summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJedrzej Nowacki <jedrzej.nowacki@nokia.com>2011-06-17 10:43:19 +0200
committerJedrzej Nowacki <jedrzej.nowacki@nokia.com>2011-06-20 15:29:35 +0200
commit9741a88940f8ce7b8f1892295a25006162c4113e (patch)
tree2caa5978a18fe0830d38a3b5a7f186fa74393dcb
parent2c4c3e9517c0bfe4c574691aeeee91f5b33e7240 (diff)
Replace v8 copy by v8 submodule
It is much easier for us to use git submodule then constantly copy all source code from v8.
-rw-r--r--.gitmodules3
m---------src/3rdparty/v80
-rw-r--r--src/3rdparty/v8/AUTHORS42
-rw-r--r--src/3rdparty/v8/ChangeLog2656
-rw-r--r--src/3rdparty/v8/LICENSE52
-rw-r--r--src/3rdparty/v8/LICENSE.strongtalk29
-rw-r--r--src/3rdparty/v8/LICENSE.v826
-rw-r--r--src/3rdparty/v8/LICENSE.valgrind45
-rw-r--r--src/3rdparty/v8/VERSION11
-rwxr-xr-xsrc/3rdparty/v8/include/v8-debug.h394
-rw-r--r--src/3rdparty/v8/include/v8-preparser.h116
-rw-r--r--src/3rdparty/v8/include/v8-profiler.h505
-rw-r--r--src/3rdparty/v8/include/v8-testing.h104
-rw-r--r--src/3rdparty/v8/include/v8.h4115
-rw-r--r--src/3rdparty/v8/include/v8stdint.h53
-rw-r--r--src/3rdparty/v8/preparser/preparser-process.cc169
-rw-r--r--src/3rdparty/v8/src/accessors.cc766
-rw-r--r--src/3rdparty/v8/src/accessors.h121
-rw-r--r--src/3rdparty/v8/src/allocation-inl.h49
-rw-r--r--src/3rdparty/v8/src/allocation.cc122
-rw-r--r--src/3rdparty/v8/src/allocation.h143
-rw-r--r--src/3rdparty/v8/src/api.cc5952
-rw-r--r--src/3rdparty/v8/src/api.h572
-rw-r--r--src/3rdparty/v8/src/apinatives.js110
-rw-r--r--src/3rdparty/v8/src/apiutils.h73
-rw-r--r--src/3rdparty/v8/src/arguments.h116
-rw-r--r--src/3rdparty/v8/src/arm/assembler-arm-inl.h353
-rw-r--r--src/3rdparty/v8/src/arm/assembler-arm.cc2795
-rw-r--r--src/3rdparty/v8/src/arm/assembler-arm.h1358
-rw-r--r--src/3rdparty/v8/src/arm/builtins-arm.cc1634
-rw-r--r--src/3rdparty/v8/src/arm/code-stubs-arm.cc6917
-rw-r--r--src/3rdparty/v8/src/arm/code-stubs-arm.h623
-rw-r--r--src/3rdparty/v8/src/arm/codegen-arm-inl.h48
-rw-r--r--src/3rdparty/v8/src/arm/codegen-arm.cc7437
-rw-r--r--src/3rdparty/v8/src/arm/codegen-arm.h595
-rw-r--r--src/3rdparty/v8/src/arm/constants-arm.cc152
-rw-r--r--src/3rdparty/v8/src/arm/constants-arm.h776
-rw-r--r--src/3rdparty/v8/src/arm/cpu-arm.cc149
-rw-r--r--src/3rdparty/v8/src/arm/debug-arm.cc317
-rw-r--r--src/3rdparty/v8/src/arm/deoptimizer-arm.cc737
-rw-r--r--src/3rdparty/v8/src/arm/disasm-arm.cc1471
-rw-r--r--src/3rdparty/v8/src/arm/frames-arm.cc45
-rw-r--r--src/3rdparty/v8/src/arm/frames-arm.h168
-rw-r--r--src/3rdparty/v8/src/arm/full-codegen-arm.cc4374
-rw-r--r--src/3rdparty/v8/src/arm/ic-arm.cc1793
-rw-r--r--src/3rdparty/v8/src/arm/jump-target-arm.cc174
-rw-r--r--src/3rdparty/v8/src/arm/lithium-arm.cc2120
-rw-r--r--src/3rdparty/v8/src/arm/lithium-arm.h2179
-rw-r--r--src/3rdparty/v8/src/arm/lithium-codegen-arm.cc4132
-rw-r--r--src/3rdparty/v8/src/arm/lithium-codegen-arm.h329
-rw-r--r--src/3rdparty/v8/src/arm/lithium-gap-resolver-arm.cc305
-rw-r--r--src/3rdparty/v8/src/arm/lithium-gap-resolver-arm.h84
-rw-r--r--src/3rdparty/v8/src/arm/macro-assembler-arm.cc2939
-rw-r--r--src/3rdparty/v8/src/arm/macro-assembler-arm.h1071
-rw-r--r--src/3rdparty/v8/src/arm/regexp-macro-assembler-arm.cc1287
-rw-r--r--src/3rdparty/v8/src/arm/regexp-macro-assembler-arm.h253
-rw-r--r--src/3rdparty/v8/src/arm/register-allocator-arm-inl.h100
-rw-r--r--src/3rdparty/v8/src/arm/register-allocator-arm.cc63
-rw-r--r--src/3rdparty/v8/src/arm/register-allocator-arm.h44
-rw-r--r--src/3rdparty/v8/src/arm/simulator-arm.cc3215
-rw-r--r--src/3rdparty/v8/src/arm/simulator-arm.h407
-rw-r--r--src/3rdparty/v8/src/arm/stub-cache-arm.cc4034
-rw-r--r--src/3rdparty/v8/src/arm/virtual-frame-arm-inl.h59
-rw-r--r--src/3rdparty/v8/src/arm/virtual-frame-arm.cc843
-rw-r--r--src/3rdparty/v8/src/arm/virtual-frame-arm.h523
-rw-r--r--src/3rdparty/v8/src/array.js1249
-rw-r--r--src/3rdparty/v8/src/assembler.cc1067
-rw-r--r--src/3rdparty/v8/src/assembler.h823
-rw-r--r--src/3rdparty/v8/src/ast-inl.h112
-rw-r--r--src/3rdparty/v8/src/ast.cc1078
-rw-r--r--src/3rdparty/v8/src/ast.h2234
-rw-r--r--src/3rdparty/v8/src/atomicops.h167
-rw-r--r--src/3rdparty/v8/src/atomicops_internals_arm_gcc.h145
-rw-r--r--src/3rdparty/v8/src/atomicops_internals_mips_gcc.h169
-rw-r--r--src/3rdparty/v8/src/atomicops_internals_x86_gcc.cc126
-rw-r--r--src/3rdparty/v8/src/atomicops_internals_x86_gcc.h287
-rw-r--r--src/3rdparty/v8/src/atomicops_internals_x86_macosx.h301
-rw-r--r--src/3rdparty/v8/src/atomicops_internals_x86_msvc.h203
-rw-r--r--src/3rdparty/v8/src/bignum-dtoa.cc655
-rw-r--r--src/3rdparty/v8/src/bignum-dtoa.h81
-rw-r--r--src/3rdparty/v8/src/bignum.cc768
-rw-r--r--src/3rdparty/v8/src/bignum.h140
-rw-r--r--src/3rdparty/v8/src/bootstrapper.cc2138
-rw-r--r--src/3rdparty/v8/src/bootstrapper.h185
-rw-r--r--src/3rdparty/v8/src/builtins.cc1708
-rw-r--r--src/3rdparty/v8/src/builtins.h368
-rw-r--r--src/3rdparty/v8/src/bytecodes-irregexp.h105
-rw-r--r--src/3rdparty/v8/src/cached-powers.cc177
-rw-r--r--src/3rdparty/v8/src/cached-powers.h65
-rw-r--r--src/3rdparty/v8/src/char-predicates-inl.h94
-rw-r--r--src/3rdparty/v8/src/char-predicates.h65
-rw-r--r--src/3rdparty/v8/src/checks.cc110
-rw-r--r--src/3rdparty/v8/src/checks.h296
-rw-r--r--src/3rdparty/v8/src/circular-queue-inl.h53
-rw-r--r--src/3rdparty/v8/src/circular-queue.cc122
-rw-r--r--src/3rdparty/v8/src/circular-queue.h103
-rw-r--r--src/3rdparty/v8/src/code-stubs.cc240
-rw-r--r--src/3rdparty/v8/src/code-stubs.h971
-rw-r--r--src/3rdparty/v8/src/code.h68
-rw-r--r--src/3rdparty/v8/src/codegen-inl.h68
-rw-r--r--src/3rdparty/v8/src/codegen.cc505
-rw-r--r--src/3rdparty/v8/src/codegen.h245
-rw-r--r--src/3rdparty/v8/src/compilation-cache.cc566
-rw-r--r--src/3rdparty/v8/src/compilation-cache.h300
-rwxr-xr-xsrc/3rdparty/v8/src/compiler.cc808
-rw-r--r--src/3rdparty/v8/src/compiler.h312
-rw-r--r--src/3rdparty/v8/src/contexts.cc327
-rw-r--r--src/3rdparty/v8/src/contexts.h382
-rw-r--r--src/3rdparty/v8/src/conversions-inl.h110
-rw-r--r--src/3rdparty/v8/src/conversions.cc1125
-rw-r--r--src/3rdparty/v8/src/conversions.h122
-rw-r--r--src/3rdparty/v8/src/counters.cc93
-rw-r--r--src/3rdparty/v8/src/counters.h254
-rw-r--r--src/3rdparty/v8/src/cpu-profiler-inl.h101
-rw-r--r--src/3rdparty/v8/src/cpu-profiler.cc606
-rw-r--r--src/3rdparty/v8/src/cpu-profiler.h305
-rw-r--r--src/3rdparty/v8/src/cpu.h67
-rw-r--r--src/3rdparty/v8/src/d8-debug.cc367
-rw-r--r--src/3rdparty/v8/src/d8-debug.h158
-rw-r--r--src/3rdparty/v8/src/d8-posix.cc695
-rw-r--r--src/3rdparty/v8/src/d8-readline.cc128
-rw-r--r--src/3rdparty/v8/src/d8-windows.cc42
-rw-r--r--src/3rdparty/v8/src/d8.cc796
-rw-r--r--src/3rdparty/v8/src/d8.h231
-rw-r--r--src/3rdparty/v8/src/d8.js2798
-rw-r--r--src/3rdparty/v8/src/data-flow.cc545
-rw-r--r--src/3rdparty/v8/src/data-flow.h379
-rw-r--r--src/3rdparty/v8/src/date.js1103
-rw-r--r--src/3rdparty/v8/src/dateparser-inl.h125
-rw-r--r--src/3rdparty/v8/src/dateparser.cc178
-rw-r--r--src/3rdparty/v8/src/dateparser.h265
-rw-r--r--src/3rdparty/v8/src/debug-agent.cc447
-rw-r--r--src/3rdparty/v8/src/debug-agent.h129
-rw-r--r--src/3rdparty/v8/src/debug-debugger.js2569
-rw-r--r--src/3rdparty/v8/src/debug.cc3188
-rw-r--r--src/3rdparty/v8/src/debug.h1055
-rw-r--r--src/3rdparty/v8/src/deoptimizer.cc1296
-rw-r--r--src/3rdparty/v8/src/deoptimizer.h629
-rw-r--r--src/3rdparty/v8/src/disasm.h80
-rw-r--r--src/3rdparty/v8/src/disassembler.cc339
-rw-r--r--src/3rdparty/v8/src/disassembler.h56
-rw-r--r--src/3rdparty/v8/src/diy-fp.cc58
-rw-r--r--src/3rdparty/v8/src/diy-fp.h117
-rw-r--r--src/3rdparty/v8/src/double.h238
-rw-r--r--src/3rdparty/v8/src/dtoa.cc103
-rw-r--r--src/3rdparty/v8/src/dtoa.h85
-rw-r--r--src/3rdparty/v8/src/execution.cc835
-rw-r--r--src/3rdparty/v8/src/execution.h303
-rw-r--r--src/3rdparty/v8/src/extensions/experimental/break-iterator.cc250
-rw-r--r--src/3rdparty/v8/src/extensions/experimental/break-iterator.h89
-rw-r--r--src/3rdparty/v8/src/extensions/experimental/experimental.gyp55
-rw-r--r--src/3rdparty/v8/src/extensions/experimental/i18n-extension.cc284
-rw-r--r--src/3rdparty/v8/src/extensions/experimental/i18n-extension.h64
-rw-r--r--src/3rdparty/v8/src/extensions/externalize-string-extension.cc141
-rw-r--r--src/3rdparty/v8/src/extensions/externalize-string-extension.h50
-rw-r--r--src/3rdparty/v8/src/extensions/gc-extension.cc58
-rw-r--r--src/3rdparty/v8/src/extensions/gc-extension.h49
-rw-r--r--src/3rdparty/v8/src/factory.cc1194
-rw-r--r--src/3rdparty/v8/src/factory.h436
-rw-r--r--src/3rdparty/v8/src/fast-dtoa.cc736
-rw-r--r--src/3rdparty/v8/src/fast-dtoa.h83
-rw-r--r--src/3rdparty/v8/src/fixed-dtoa.cc405
-rw-r--r--src/3rdparty/v8/src/fixed-dtoa.h55
-rw-r--r--src/3rdparty/v8/src/flag-definitions.h556
-rw-r--r--src/3rdparty/v8/src/flags.cc551
-rw-r--r--src/3rdparty/v8/src/flags.h79
-rw-r--r--src/3rdparty/v8/src/frame-element.cc37
-rw-r--r--src/3rdparty/v8/src/frame-element.h269
-rw-r--r--src/3rdparty/v8/src/frames-inl.h236
-rw-r--r--src/3rdparty/v8/src/frames.cc1273
-rw-r--r--src/3rdparty/v8/src/frames.h854
-rw-r--r--src/3rdparty/v8/src/full-codegen.cc1385
-rw-r--r--src/3rdparty/v8/src/full-codegen.h753
-rw-r--r--src/3rdparty/v8/src/func-name-inferrer.cc91
-rw-r--r--src/3rdparty/v8/src/func-name-inferrer.h111
-rw-r--r--src/3rdparty/v8/src/gdb-jit.cc1548
-rw-r--r--src/3rdparty/v8/src/gdb-jit.h138
-rw-r--r--src/3rdparty/v8/src/global-handles.cc596
-rw-r--r--src/3rdparty/v8/src/global-handles.h239
-rw-r--r--src/3rdparty/v8/src/globals.h325
-rw-r--r--src/3rdparty/v8/src/handles-inl.h177
-rw-r--r--src/3rdparty/v8/src/handles.cc965
-rw-r--r--src/3rdparty/v8/src/handles.h372
-rw-r--r--src/3rdparty/v8/src/hashmap.cc230
-rw-r--r--src/3rdparty/v8/src/hashmap.h121
-rw-r--r--src/3rdparty/v8/src/heap-inl.h703
-rw-r--r--src/3rdparty/v8/src/heap-profiler.cc1173
-rw-r--r--src/3rdparty/v8/src/heap-profiler.h396
-rw-r--r--src/3rdparty/v8/src/heap.cc5856
-rw-r--r--src/3rdparty/v8/src/heap.h2265
-rw-r--r--src/3rdparty/v8/src/hydrogen-instructions.cc1639
-rw-r--r--src/3rdparty/v8/src/hydrogen-instructions.h3657
-rw-r--r--src/3rdparty/v8/src/hydrogen.cc5976
-rw-r--r--src/3rdparty/v8/src/hydrogen.h1119
-rw-r--r--src/3rdparty/v8/src/ia32/assembler-ia32-inl.h430
-rw-r--r--src/3rdparty/v8/src/ia32/assembler-ia32.cc2846
-rw-r--r--src/3rdparty/v8/src/ia32/assembler-ia32.h1159
-rw-r--r--src/3rdparty/v8/src/ia32/builtins-ia32.cc1596
-rw-r--r--src/3rdparty/v8/src/ia32/code-stubs-ia32.cc6549
-rw-r--r--src/3rdparty/v8/src/ia32/code-stubs-ia32.h495
-rw-r--r--src/3rdparty/v8/src/ia32/codegen-ia32-inl.h46
-rw-r--r--src/3rdparty/v8/src/ia32/codegen-ia32.cc10385
-rw-r--r--src/3rdparty/v8/src/ia32/codegen-ia32.h801
-rw-r--r--src/3rdparty/v8/src/ia32/cpu-ia32.cc88
-rw-r--r--src/3rdparty/v8/src/ia32/debug-ia32.cc312
-rw-r--r--src/3rdparty/v8/src/ia32/deoptimizer-ia32.cc774
-rw-r--r--src/3rdparty/v8/src/ia32/disasm-ia32.cc1620
-rw-r--r--src/3rdparty/v8/src/ia32/frames-ia32.cc45
-rw-r--r--src/3rdparty/v8/src/ia32/frames-ia32.h140
-rw-r--r--src/3rdparty/v8/src/ia32/full-codegen-ia32.cc4357
-rw-r--r--src/3rdparty/v8/src/ia32/ic-ia32.cc1779
-rw-r--r--src/3rdparty/v8/src/ia32/jump-target-ia32.cc437
-rw-r--r--src/3rdparty/v8/src/ia32/lithium-codegen-ia32.cc4158
-rw-r--r--src/3rdparty/v8/src/ia32/lithium-codegen-ia32.h318
-rw-r--r--src/3rdparty/v8/src/ia32/lithium-gap-resolver-ia32.cc466
-rw-r--r--src/3rdparty/v8/src/ia32/lithium-gap-resolver-ia32.h110
-rw-r--r--src/3rdparty/v8/src/ia32/lithium-ia32.cc2181
-rw-r--r--src/3rdparty/v8/src/ia32/lithium-ia32.h2235
-rw-r--r--src/3rdparty/v8/src/ia32/macro-assembler-ia32.cc2056
-rw-r--r--src/3rdparty/v8/src/ia32/macro-assembler-ia32.h807
-rw-r--r--src/3rdparty/v8/src/ia32/regexp-macro-assembler-ia32.cc1264
-rw-r--r--src/3rdparty/v8/src/ia32/regexp-macro-assembler-ia32.h216
-rw-r--r--src/3rdparty/v8/src/ia32/register-allocator-ia32-inl.h82
-rw-r--r--src/3rdparty/v8/src/ia32/register-allocator-ia32.cc157
-rw-r--r--src/3rdparty/v8/src/ia32/register-allocator-ia32.h43
-rw-r--r--src/3rdparty/v8/src/ia32/simulator-ia32.cc30
-rw-r--r--src/3rdparty/v8/src/ia32/simulator-ia32.h72
-rw-r--r--src/3rdparty/v8/src/ia32/stub-cache-ia32.cc3711
-rw-r--r--src/3rdparty/v8/src/ia32/virtual-frame-ia32.cc1366
-rw-r--r--src/3rdparty/v8/src/ia32/virtual-frame-ia32.h650
-rw-r--r--src/3rdparty/v8/src/ic-inl.h130
-rw-r--r--src/3rdparty/v8/src/ic.cc2389
-rw-r--r--src/3rdparty/v8/src/ic.h675
-rw-r--r--src/3rdparty/v8/src/inspector.cc63
-rw-r--r--src/3rdparty/v8/src/inspector.h62
-rw-r--r--src/3rdparty/v8/src/interpreter-irregexp.cc659
-rw-r--r--src/3rdparty/v8/src/interpreter-irregexp.h49
-rw-r--r--src/3rdparty/v8/src/isolate.cc883
-rw-r--r--src/3rdparty/v8/src/isolate.h1306
-rw-r--r--src/3rdparty/v8/src/json.js342
-rw-r--r--src/3rdparty/v8/src/jsregexp.cc5371
-rw-r--r--src/3rdparty/v8/src/jsregexp.h1483
-rw-r--r--src/3rdparty/v8/src/jump-target-heavy-inl.h51
-rw-r--r--src/3rdparty/v8/src/jump-target-heavy.cc427
-rw-r--r--src/3rdparty/v8/src/jump-target-heavy.h238
-rw-r--r--src/3rdparty/v8/src/jump-target-inl.h48
-rw-r--r--src/3rdparty/v8/src/jump-target-light-inl.h56
-rw-r--r--src/3rdparty/v8/src/jump-target-light.cc111
-rw-r--r--src/3rdparty/v8/src/jump-target-light.h193
-rw-r--r--src/3rdparty/v8/src/jump-target.cc91
-rw-r--r--src/3rdparty/v8/src/jump-target.h90
-rw-r--r--src/3rdparty/v8/src/list-inl.h206
-rw-r--r--src/3rdparty/v8/src/list.h164
-rw-r--r--src/3rdparty/v8/src/lithium-allocator-inl.h142
-rw-r--r--src/3rdparty/v8/src/lithium-allocator.cc2105
-rw-r--r--src/3rdparty/v8/src/lithium-allocator.h630
-rw-r--r--src/3rdparty/v8/src/lithium.cc169
-rw-r--r--src/3rdparty/v8/src/lithium.h592
-rw-r--r--src/3rdparty/v8/src/liveedit-debugger.js1082
-rw-r--r--src/3rdparty/v8/src/liveedit.cc1693
-rw-r--r--src/3rdparty/v8/src/liveedit.h179
-rw-r--r--src/3rdparty/v8/src/liveobjectlist-inl.h126
-rw-r--r--src/3rdparty/v8/src/liveobjectlist.cc2589
-rw-r--r--src/3rdparty/v8/src/liveobjectlist.h322
-rw-r--r--src/3rdparty/v8/src/log-inl.h59
-rw-r--r--src/3rdparty/v8/src/log-utils.cc423
-rw-r--r--src/3rdparty/v8/src/log-utils.h229
-rw-r--r--src/3rdparty/v8/src/log.cc1666
-rw-r--r--src/3rdparty/v8/src/log.h446
-rw-r--r--src/3rdparty/v8/src/macro-assembler.h120
-rw-r--r--src/3rdparty/v8/src/macros.py178
-rw-r--r--src/3rdparty/v8/src/mark-compact.cc3092
-rw-r--r--src/3rdparty/v8/src/mark-compact.h506
-rw-r--r--src/3rdparty/v8/src/math.js264
-rw-r--r--src/3rdparty/v8/src/messages.cc166
-rw-r--r--src/3rdparty/v8/src/messages.h114
-rw-r--r--src/3rdparty/v8/src/messages.js1090
-rw-r--r--src/3rdparty/v8/src/mips/assembler-mips-inl.h335
-rw-r--r--src/3rdparty/v8/src/mips/assembler-mips.cc2093
-rw-r--r--src/3rdparty/v8/src/mips/assembler-mips.h1066
-rw-r--r--src/3rdparty/v8/src/mips/builtins-mips.cc148
-rw-r--r--src/3rdparty/v8/src/mips/code-stubs-mips.cc752
-rw-r--r--src/3rdparty/v8/src/mips/code-stubs-mips.h511
-rw-r--r--src/3rdparty/v8/src/mips/codegen-mips-inl.h64
-rw-r--r--src/3rdparty/v8/src/mips/codegen-mips.cc1213
-rw-r--r--src/3rdparty/v8/src/mips/codegen-mips.h633
-rw-r--r--src/3rdparty/v8/src/mips/constants-mips.cc352
-rw-r--r--src/3rdparty/v8/src/mips/constants-mips.h723
-rw-r--r--src/3rdparty/v8/src/mips/cpu-mips.cc90
-rw-r--r--src/3rdparty/v8/src/mips/debug-mips.cc155
-rw-r--r--src/3rdparty/v8/src/mips/deoptimizer-mips.cc91
-rw-r--r--src/3rdparty/v8/src/mips/disasm-mips.cc1023
-rw-r--r--src/3rdparty/v8/src/mips/frames-mips.cc48
-rw-r--r--src/3rdparty/v8/src/mips/frames-mips.h179
-rw-r--r--src/3rdparty/v8/src/mips/full-codegen-mips.cc727
-rw-r--r--src/3rdparty/v8/src/mips/ic-mips.cc244
-rw-r--r--src/3rdparty/v8/src/mips/jump-target-mips.cc80
-rw-r--r--src/3rdparty/v8/src/mips/lithium-codegen-mips.h65
-rw-r--r--src/3rdparty/v8/src/mips/lithium-mips.h304
-rw-r--r--src/3rdparty/v8/src/mips/macro-assembler-mips.cc3327
-rw-r--r--src/3rdparty/v8/src/mips/macro-assembler-mips.h1058
-rw-r--r--src/3rdparty/v8/src/mips/regexp-macro-assembler-mips.cc478
-rw-r--r--src/3rdparty/v8/src/mips/regexp-macro-assembler-mips.h250
-rw-r--r--src/3rdparty/v8/src/mips/register-allocator-mips-inl.h134
-rw-r--r--src/3rdparty/v8/src/mips/register-allocator-mips.cc63
-rw-r--r--src/3rdparty/v8/src/mips/register-allocator-mips.h47
-rw-r--r--src/3rdparty/v8/src/mips/simulator-mips.cc2438
-rw-r--r--src/3rdparty/v8/src/mips/simulator-mips.h394
-rw-r--r--src/3rdparty/v8/src/mips/stub-cache-mips.cc601
-rw-r--r--src/3rdparty/v8/src/mips/virtual-frame-mips-inl.h58
-rw-r--r--src/3rdparty/v8/src/mips/virtual-frame-mips.cc307
-rw-r--r--src/3rdparty/v8/src/mips/virtual-frame-mips.h530
-rw-r--r--src/3rdparty/v8/src/mirror-debugger.js2381
-rw-r--r--src/3rdparty/v8/src/mksnapshot.cc256
-rw-r--r--src/3rdparty/v8/src/natives.h63
-rw-r--r--src/3rdparty/v8/src/objects-debug.cc722
-rw-r--r--src/3rdparty/v8/src/objects-inl.h4166
-rw-r--r--src/3rdparty/v8/src/objects-printer.cc801
-rw-r--r--src/3rdparty/v8/src/objects-visiting.cc142
-rw-r--r--src/3rdparty/v8/src/objects-visiting.h422
-rw-r--r--src/3rdparty/v8/src/objects.cc10296
-rw-r--r--src/3rdparty/v8/src/objects.h6662
-rw-r--r--src/3rdparty/v8/src/parser.cc5168
-rw-r--r--src/3rdparty/v8/src/parser.h823
-rw-r--r--src/3rdparty/v8/src/platform-cygwin.cc811
-rw-r--r--src/3rdparty/v8/src/platform-freebsd.cc854
-rw-r--r--src/3rdparty/v8/src/platform-linux.cc1120
-rw-r--r--src/3rdparty/v8/src/platform-macos.cc865
-rw-r--r--src/3rdparty/v8/src/platform-nullos.cc504
-rw-r--r--src/3rdparty/v8/src/platform-openbsd.cc672
-rw-r--r--src/3rdparty/v8/src/platform-posix.cc424
-rw-r--r--src/3rdparty/v8/src/platform-solaris.cc796
-rw-r--r--src/3rdparty/v8/src/platform-tls-mac.h62
-rw-r--r--src/3rdparty/v8/src/platform-tls-win32.h62
-rw-r--r--src/3rdparty/v8/src/platform-tls.h50
-rw-r--r--src/3rdparty/v8/src/platform-win32.cc2072
-rw-r--r--src/3rdparty/v8/src/platform.h693
-rw-r--r--src/3rdparty/v8/src/preparse-data.cc185
-rw-r--r--src/3rdparty/v8/src/preparse-data.h249
-rw-r--r--src/3rdparty/v8/src/preparser-api.cc219
-rw-r--r--src/3rdparty/v8/src/preparser.cc1205
-rw-r--r--src/3rdparty/v8/src/preparser.h278
-rw-r--r--src/3rdparty/v8/src/prettyprinter.cc1530
-rw-r--r--src/3rdparty/v8/src/prettyprinter.h223
-rw-r--r--src/3rdparty/v8/src/profile-generator-inl.h128
-rw-r--r--src/3rdparty/v8/src/profile-generator.cc3095
-rw-r--r--src/3rdparty/v8/src/profile-generator.h1125
-rw-r--r--src/3rdparty/v8/src/property.cc102
-rw-r--r--src/3rdparty/v8/src/property.h348
-rw-r--r--src/3rdparty/v8/src/regexp-macro-assembler-irregexp-inl.h78
-rw-r--r--src/3rdparty/v8/src/regexp-macro-assembler-irregexp.cc470
-rw-r--r--src/3rdparty/v8/src/regexp-macro-assembler-irregexp.h142
-rw-r--r--src/3rdparty/v8/src/regexp-macro-assembler-tracer.cc373
-rw-r--r--src/3rdparty/v8/src/regexp-macro-assembler-tracer.h104
-rw-r--r--src/3rdparty/v8/src/regexp-macro-assembler.cc266
-rw-r--r--src/3rdparty/v8/src/regexp-macro-assembler.h236
-rw-r--r--src/3rdparty/v8/src/regexp-stack.cc111
-rw-r--r--src/3rdparty/v8/src/regexp-stack.h147
-rw-r--r--src/3rdparty/v8/src/regexp.js483
-rw-r--r--src/3rdparty/v8/src/register-allocator-inl.h141
-rw-r--r--src/3rdparty/v8/src/register-allocator.cc98
-rw-r--r--src/3rdparty/v8/src/register-allocator.h310
-rw-r--r--src/3rdparty/v8/src/rewriter.cc1024
-rw-r--r--src/3rdparty/v8/src/rewriter.h59
-rw-r--r--src/3rdparty/v8/src/runtime-profiler.cc478
-rw-r--r--src/3rdparty/v8/src/runtime-profiler.h192
-rw-r--r--src/3rdparty/v8/src/runtime.cc11949
-rw-r--r--src/3rdparty/v8/src/runtime.h643
-rw-r--r--src/3rdparty/v8/src/runtime.js643
-rw-r--r--src/3rdparty/v8/src/safepoint-table.cc256
-rw-r--r--src/3rdparty/v8/src/safepoint-table.h269
-rw-r--r--src/3rdparty/v8/src/scanner-base.cc964
-rw-r--r--src/3rdparty/v8/src/scanner-base.h664
-rwxr-xr-xsrc/3rdparty/v8/src/scanner.cc584
-rw-r--r--src/3rdparty/v8/src/scanner.h196
-rw-r--r--src/3rdparty/v8/src/scopeinfo.cc631
-rw-r--r--src/3rdparty/v8/src/scopeinfo.h249
-rw-r--r--src/3rdparty/v8/src/scopes.cc1093
-rw-r--r--src/3rdparty/v8/src/scopes.h508
-rw-r--r--src/3rdparty/v8/src/serialize.cc1574
-rw-r--r--src/3rdparty/v8/src/serialize.h589
-rw-r--r--src/3rdparty/v8/src/shell.h55
-rw-r--r--src/3rdparty/v8/src/simulator.h43
-rw-r--r--src/3rdparty/v8/src/small-pointer-list.h163
-rw-r--r--src/3rdparty/v8/src/smart-pointer.h109
-rw-r--r--src/3rdparty/v8/src/snapshot-common.cc82
-rw-r--r--src/3rdparty/v8/src/snapshot-empty.cc50
-rw-r--r--src/3rdparty/v8/src/snapshot.h73
-rw-r--r--src/3rdparty/v8/src/spaces-inl.h529
-rw-r--r--src/3rdparty/v8/src/spaces.cc3147
-rw-r--r--src/3rdparty/v8/src/spaces.h2368
-rw-r--r--src/3rdparty/v8/src/splay-tree-inl.h310
-rw-r--r--src/3rdparty/v8/src/splay-tree.h203
-rw-r--r--src/3rdparty/v8/src/string-search.cc41
-rw-r--r--src/3rdparty/v8/src/string-search.h568
-rw-r--r--src/3rdparty/v8/src/string-stream.cc592
-rw-r--r--src/3rdparty/v8/src/string-stream.h191
-rw-r--r--src/3rdparty/v8/src/string.js915
-rw-r--r--src/3rdparty/v8/src/strtod.cc440
-rw-r--r--src/3rdparty/v8/src/strtod.h40
-rw-r--r--src/3rdparty/v8/src/stub-cache.cc1940
-rw-r--r--src/3rdparty/v8/src/stub-cache.h866
-rw-r--r--src/3rdparty/v8/src/third_party/valgrind/valgrind.h3925
-rw-r--r--src/3rdparty/v8/src/token.cc63
-rw-r--r--src/3rdparty/v8/src/token.h288
-rw-r--r--src/3rdparty/v8/src/top.cc993
-rw-r--r--src/3rdparty/v8/src/type-info.cc472
-rw-r--r--src/3rdparty/v8/src/type-info.h290
-rw-r--r--src/3rdparty/v8/src/unbound-queue-inl.h95
-rw-r--r--src/3rdparty/v8/src/unbound-queue.h67
-rw-r--r--src/3rdparty/v8/src/unicode-inl.h238
-rw-r--r--src/3rdparty/v8/src/unicode.cc1624
-rw-r--r--src/3rdparty/v8/src/unicode.h280
-rw-r--r--src/3rdparty/v8/src/uri.js402
-rw-r--r--src/3rdparty/v8/src/utils.cc371
-rw-r--r--src/3rdparty/v8/src/utils.h796
-rw-r--r--src/3rdparty/v8/src/v8-counters.cc62
-rw-r--r--src/3rdparty/v8/src/v8-counters.h311
-rw-r--r--src/3rdparty/v8/src/v8.cc215
-rw-r--r--src/3rdparty/v8/src/v8.h130
-rw-r--r--src/3rdparty/v8/src/v8checks.h64
-rw-r--r--src/3rdparty/v8/src/v8dll-main.cc39
-rw-r--r--src/3rdparty/v8/src/v8globals.h486
-rw-r--r--src/3rdparty/v8/src/v8memory.h82
-rw-r--r--src/3rdparty/v8/src/v8natives.js1293
-rw-r--r--src/3rdparty/v8/src/v8preparserdll-main.cc39
-rw-r--r--src/3rdparty/v8/src/v8threads.cc453
-rw-r--r--src/3rdparty/v8/src/v8threads.h164
-rw-r--r--src/3rdparty/v8/src/v8utils.h317
-rw-r--r--src/3rdparty/v8/src/variables.cc132
-rw-r--r--src/3rdparty/v8/src/variables.h212
-rw-r--r--src/3rdparty/v8/src/version.cc116
-rw-r--r--src/3rdparty/v8/src/version.h68
-rw-r--r--src/3rdparty/v8/src/virtual-frame-heavy-inl.h190
-rw-r--r--src/3rdparty/v8/src/virtual-frame-heavy.cc312
-rw-r--r--src/3rdparty/v8/src/virtual-frame-inl.h39
-rw-r--r--src/3rdparty/v8/src/virtual-frame-light-inl.h171
-rw-r--r--src/3rdparty/v8/src/virtual-frame-light.cc52
-rw-r--r--src/3rdparty/v8/src/virtual-frame.cc49
-rw-r--r--src/3rdparty/v8/src/virtual-frame.h59
-rw-r--r--src/3rdparty/v8/src/vm-state-inl.h138
-rw-r--r--src/3rdparty/v8/src/vm-state.h70
-rw-r--r--src/3rdparty/v8/src/win32-headers.h96
-rw-r--r--src/3rdparty/v8/src/x64/assembler-x64-inl.h456
-rw-r--r--src/3rdparty/v8/src/x64/assembler-x64.cc3180
-rw-r--r--src/3rdparty/v8/src/x64/assembler-x64.h1632
-rw-r--r--src/3rdparty/v8/src/x64/builtins-x64.cc1493
-rw-r--r--src/3rdparty/v8/src/x64/code-stubs-x64.cc5134
-rw-r--r--src/3rdparty/v8/src/x64/code-stubs-x64.h477
-rw-r--r--src/3rdparty/v8/src/x64/codegen-x64-inl.h46
-rw-r--r--src/3rdparty/v8/src/x64/codegen-x64.cc8843
-rw-r--r--src/3rdparty/v8/src/x64/codegen-x64.h753
-rw-r--r--src/3rdparty/v8/src/x64/cpu-x64.cc88
-rw-r--r--src/3rdparty/v8/src/x64/debug-x64.cc318
-rw-r--r--src/3rdparty/v8/src/x64/deoptimizer-x64.cc816
-rw-r--r--src/3rdparty/v8/src/x64/disasm-x64.cc1752
-rw-r--r--src/3rdparty/v8/src/x64/frames-x64.cc45
-rw-r--r--src/3rdparty/v8/src/x64/frames-x64.h130
-rw-r--r--src/3rdparty/v8/src/x64/full-codegen-x64.cc4339
-rw-r--r--src/3rdparty/v8/src/x64/ic-x64.cc1752
-rw-r--r--src/3rdparty/v8/src/x64/jump-target-x64.cc437
-rw-r--r--src/3rdparty/v8/src/x64/lithium-codegen-x64.cc3970
-rw-r--r--src/3rdparty/v8/src/x64/lithium-codegen-x64.h318
-rw-r--r--src/3rdparty/v8/src/x64/lithium-gap-resolver-x64.cc320
-rw-r--r--src/3rdparty/v8/src/x64/lithium-gap-resolver-x64.h74
-rw-r--r--src/3rdparty/v8/src/x64/lithium-x64.cc2117
-rw-r--r--src/3rdparty/v8/src/x64/lithium-x64.h2161
-rw-r--r--src/3rdparty/v8/src/x64/macro-assembler-x64.cc2912
-rw-r--r--src/3rdparty/v8/src/x64/macro-assembler-x64.h1984
-rw-r--r--src/3rdparty/v8/src/x64/regexp-macro-assembler-x64.cc1398
-rw-r--r--src/3rdparty/v8/src/x64/regexp-macro-assembler-x64.h282
-rw-r--r--src/3rdparty/v8/src/x64/register-allocator-x64-inl.h87
-rw-r--r--src/3rdparty/v8/src/x64/register-allocator-x64.cc95
-rw-r--r--src/3rdparty/v8/src/x64/register-allocator-x64.h43
-rw-r--r--src/3rdparty/v8/src/x64/simulator-x64.cc27
-rw-r--r--src/3rdparty/v8/src/x64/simulator-x64.h71
-rw-r--r--src/3rdparty/v8/src/x64/stub-cache-x64.cc3460
-rw-r--r--src/3rdparty/v8/src/x64/virtual-frame-x64.cc1296
-rw-r--r--src/3rdparty/v8/src/x64/virtual-frame-x64.h597
-rw-r--r--src/3rdparty/v8/src/zone-inl.h129
-rw-r--r--src/3rdparty/v8/src/zone.cc196
-rw-r--r--src/3rdparty/v8/src/zone.h236
-rw-r--r--src/3rdparty/v8/tools/codemap.js265
-rw-r--r--src/3rdparty/v8/tools/consarray.js93
-rw-r--r--src/3rdparty/v8/tools/csvparser.js78
-rw-r--r--src/3rdparty/v8/tools/disasm.py92
-rwxr-xr-xsrc/3rdparty/v8/tools/freebsd-tick-processor10
-rwxr-xr-xsrc/3rdparty/v8/tools/gc-nvp-trace-processor.py328
-rw-r--r--src/3rdparty/v8/tools/generate-ten-powers.scm286
-rwxr-xr-xsrc/3rdparty/v8/tools/grokdump.py840
-rw-r--r--src/3rdparty/v8/tools/gyp/v8.gyp844
-rwxr-xr-xsrc/3rdparty/v8/tools/js2c.py380
-rw-r--r--src/3rdparty/v8/tools/jsmin.py280
-rwxr-xr-xsrc/3rdparty/v8/tools/linux-tick-processor35
-rwxr-xr-xsrc/3rdparty/v8/tools/ll_prof.py919
-rw-r--r--src/3rdparty/v8/tools/logreader.js185
-rwxr-xr-xsrc/3rdparty/v8/tools/mac-nm18
-rwxr-xr-xsrc/3rdparty/v8/tools/mac-tick-processor6
-rw-r--r--src/3rdparty/v8/tools/oom_dump/README31
-rw-r--r--src/3rdparty/v8/tools/oom_dump/SConstruct42
-rw-r--r--src/3rdparty/v8/tools/oom_dump/oom_dump.cc288
-rwxr-xr-xsrc/3rdparty/v8/tools/presubmit.py305
-rwxr-xr-xsrc/3rdparty/v8/tools/process-heap-prof.py120
-rw-r--r--src/3rdparty/v8/tools/profile.js751
-rw-r--r--src/3rdparty/v8/tools/profile_view.js219
-rwxr-xr-xsrc/3rdparty/v8/tools/run-valgrind.py77
-rw-r--r--src/3rdparty/v8/tools/splaytree.js316
-rwxr-xr-xsrc/3rdparty/v8/tools/stats-viewer.py468
-rwxr-xr-xsrc/3rdparty/v8/tools/test.py1490
-rw-r--r--src/3rdparty/v8/tools/tickprocessor-driver.js59
-rw-r--r--src/3rdparty/v8/tools/tickprocessor.js877
-rw-r--r--src/3rdparty/v8/tools/utils.py96
-rw-r--r--src/3rdparty/v8/tools/visual_studio/README.txt70
-rw-r--r--src/3rdparty/v8/tools/visual_studio/arm.vsprops14
-rw-r--r--src/3rdparty/v8/tools/visual_studio/common.vsprops34
-rw-r--r--src/3rdparty/v8/tools/visual_studio/d8js2c.cmd6
-rw-r--r--src/3rdparty/v8/tools/visual_studio/debug.vsprops17
-rw-r--r--src/3rdparty/v8/tools/visual_studio/ia32.vsprops17
-rw-r--r--src/3rdparty/v8/tools/visual_studio/js2c.cmd6
-rw-r--r--src/3rdparty/v8/tools/visual_studio/release.vsprops24
-rw-r--r--src/3rdparty/v8/tools/visual_studio/x64.vsprops18
-rwxr-xr-xsrc/3rdparty/v8/tools/windows-tick-processor.bat30
522 files changed, 3 insertions, 426004 deletions
diff --git a/.gitmodules b/.gitmodules
new file mode 100644
index 0000000..d340669
--- /dev/null
+++ b/.gitmodules
@@ -0,0 +1,3 @@
+[submodule "src/3rdparty/v8"]
+ path = src/3rdparty/v8
+ url = git://github.com/stampho/qtscript-backend.git
diff --git a/src/3rdparty/v8 b/src/3rdparty/v8
new file mode 160000
+Subproject 820bcec84a5c00e4e46b21e4ddf0fa1e17dacf5
diff --git a/src/3rdparty/v8/AUTHORS b/src/3rdparty/v8/AUTHORS
deleted file mode 100644
index 843d1d2..0000000
--- a/src/3rdparty/v8/AUTHORS
+++ /dev/null
@@ -1,42 +0,0 @@
-# Below is a list of people and organizations that have contributed
-# to the V8 project. Names should be added to the list like so:
-#
-# Name/Organization <email address>
-
-Google Inc.
-Sigma Designs Inc.
-ARM Ltd.
-Hewlett-Packard Development Company, LP
-
-Alexander Botero-Lowry <alexbl@FreeBSD.org>
-Alexander Karpinsky <homm86@gmail.com>
-Alexandre Vassalotti <avassalotti@gmail.com>
-Andreas Anyuru <andreas.anyuru@gmail.com>
-Bert Belder <bertbelder@gmail.com>
-Burcu Dogan <burcujdogan@gmail.com>
-Craig Schlenter <craig.schlenter@gmail.com>
-Daniel Andersson <kodandersson@gmail.com>
-Daniel James <dnljms@gmail.com>
-Dineel D Sule <dsule@codeaurora.org>
-Erich Ocean <erich.ocean@me.com>
-Jan de Mooij <jandemooij@gmail.com>
-Jay Freeman <saurik@saurik.com>
-Joel Stanley <joel.stan@gmail.com>
-John Jozwiak <jjozwiak@codeaurora.org>
-Kun Zhang <zhangk@codeaurora.org>
-Martyn Capewell <martyn.capewell@arm.com>
-Matt Hanselman <mjhanselman@gmail.com>
-Maxim Mossienko <maxim.mossienko@gmail.com>
-Michael Smith <mike@w3.org>
-Mike Gilbert <floppymaster@gmail.com>
-Paolo Giarrusso <p.giarrusso@gmail.com>
-Patrick Gansterer <paroga@paroga.com>
-Peter Varga <pvarga@inf.u-szeged.hu>
-Rafal Krypa <rafal@krypa.net>
-Rene Rebe <rene@exactcode.de>
-Rodolph Perfetta <rodolph.perfetta@arm.com>
-Ryan Dahl <coldredlemur@gmail.com>
-Sanjoy Das <sanjoy@playingwithpointers.com>
-Subrato K De <subratokde@codeaurora.org>
-Vlad Burlik <vladbph@gmail.com>
-Zaheer Ahmad <zahmad@codeaurora.org>
diff --git a/src/3rdparty/v8/ChangeLog b/src/3rdparty/v8/ChangeLog
deleted file mode 100644
index cfd18fa..0000000
--- a/src/3rdparty/v8/ChangeLog
+++ /dev/null
@@ -1,2656 +0,0 @@
-2011-04-04: Version 3.2.7
-
- Disabled the original 'classic' V8 code generator. Crankshaft is
- now the default on all platforms.
-
- Changed the heap profiler to use more descriptive names.
-
- Performance and stability improvements to isolates on all platforms.
-
-
-2011-03-30: Version 3.2.6
-
- Fixed xcode build warning in shell.cc (out of order initialization).
-
- Fixed null-pointer dereference in the compiler when running without
- SSE3 support (Chromium issue 77654).
-
- Fixed x64 compilation error due to some dead code. (Issue 1286)
-
- Introduced scons target to build the preparser stand-alone example.
-
- Made FreeBSD build and pass all tests.
-
-
-2011-03-28: Version 3.2.5
-
- Fixed build with Irregexp interpreter (issue 1266).
-
- Added Crankshaft support for external arrays.
-
- Fixed two potential crash bugs.
-
-
-2011-03-23: Version 3.2.4
-
- Added isolates which allows several V8 instances in the same process.
- This is controlled through the new Isolate class in the API.
-
- Implemented more of EcmaScript 5 strict mode.
-
- Reduced the time it takes to make detailed heap snapshot.
-
- Added a number of commands to the ARM simulator and enhanced the ARM
- disassembler.
-
-
-2011-03-17: Version 3.2.3
-
- Fixed a number of crash bugs.
-
- Fixed Array::New(length) to return an array with a length (issue 1256).
-
- Fixed FreeBSD build.
-
- Changed __defineGetter__ to not throw (matching the behavior of Safari).
-
- Implemented more of EcmaScript 5 strict mode.
-
- Improved Crankshaft performance on all platforms.
-
-
-2011-03-14: Version 3.2.2
-
- Fixed a number of crash and correctness bugs.
-
- Improved Crankshaft performance on all platforms.
-
- Fixed Crankshaft on Solaris/Illumos.
-
-
-2011-03-10: Version 3.2.1
-
- Fixed a number of crash bugs.
-
- Improved Crankshaft for x64 and ARM.
-
- Implemented more of EcmaScript 5 strict mode.
-
-
-2011-03-07: Version 3.2.0
-
- Fixed a number of crash bugs.
-
- Turned on Crankshaft by default on x64 and ARM.
-
- Improved Crankshaft for x64 and ARM.
-
- Implemented more of EcmaScript 5 strict mode.
-
-
-2011-03-02: Version 3.1.8
-
- Fixed a number of crash bugs.
-
- Improved Crankshaft for x64 and ARM.
-
- Implemented more of EcmaScript 5 strict mode.
-
- Fixed issue with unaligned reads and writes on ARM.
-
- Improved heap profiler support.
-
-
-2011-02-28: Version 3.1.7
-
- Fixed a number of crash bugs.
-
- Improved Crankshaft for x64 and ARM.
-
- Fixed implementation of indexOf/lastIndexOf for sparse
- arrays (http://crbug.com/73940).
-
- Fixed bug in map space compaction (http://crbug.com/59688).
-
- Added support for direct getter accessors calls on ARM.
-
-
-2011-02-24: Version 3.1.6
-
- Fixed a number of crash bugs.
-
- Added support for Cygwin (issue 64).
-
- Improved Crankshaft for x64 and ARM.
-
- Added Crankshaft support for stores to pixel arrays.
-
- Fixed issue in CPU profiler with Crankshaft.
-
-
-2011-02-16: Version 3.1.5
-
- Change RegExp parsing to disallow /(*)/.
-
- Added GDB JIT support for ARM.
-
- Fixed several crash bugs.
-
- Performance improvements on the IA32 platform.
-
-
-2011-02-14: Version 3.1.4
-
- Fixed incorrect compare of prototypes of the global object (issue
- 1082).
-
- Fixed a bug in optimizing calls to global functions (issue 1106).
-
- Made optimized Function.prototype.apply safe for non-JSObject first
- arguments (issue 1128).
-
- Fixed an error related to element accessors on Object.prototype and
- parser errors (issue 1130).
-
- Fixed a bug in sorting an array with large array indices (issue 1131).
-
- Properly treat exceptions thrown while compiling (issue 1132).
-
- Fixed bug in register requirements for function.apply (issue 1133).
-
- Fixed a representation change bug in the Hydrogen graph construction
- (issue 1134).
-
- Fixed the semantics of delete on parameters (issue 1136).
-
- Fixed a optimizer bug related to moving instructions with side effects
- (issue 1138).
-
- Added support for the global object in Object.keys (issue 1150).
-
- Fixed incorrect value for Math.LOG10E
- (issue http://code.google.com/p/chromium/issues/detail?id=72555)
-
- Performance improvements on the IA32 platform.
-
- Implement assignment to undefined reference in ES5 Strict Mode.
-
-
-2011-02-09: Version 3.1.3
-
- Fixed a bug triggered by functions with huge numbers of declared
- arguments.
-
- Fixed zap value aliasing a real object - debug mode only (issue 866).
-
- Fixed issue where Array.prototype.__proto__ had been set to null
- (issue 1121).
-
- Fixed stability bugs in Crankshaft for x86.
-
-
-2011-02-07: Version 3.1.2
-
- Added better security checks when accessing properties via
- Object.getOwnPropertyDescriptor.
-
- Fixed bug in Object.defineProperty and related access bugs (issues
- 992, 1083 and 1092).
-
- Added LICENSE.v8, LICENSE.strongtalk and LICENSE.valgrind to ease
- copyright notice generation for embedders.
-
-
-2011-02-02: Version 3.1.1
-
- Perform security checks before fetching the value in
- Object.getOwnPropertyDescriptor.
-
- Fixed a bug in Array.prototype.splice triggered by passing no
- arguments.
-
- Fixed bugs in -0 in arithmetic and in Math.pow.
-
- Fixed bugs in the register allocator and in switching from optimized
- to unoptimized code.
-
-
-2011-01-31: Version 3.1.0
-
- Performance improvements on all platforms.
-
-
-2011-01-28: Version 3.0.12
-
- Added support for strict mode parameter and object property
- validation.
-
- Fixed a couple of crash bugs.
-
-
-2011-01-25: Version 3.0.11
-
- Fixed a bug in deletion of lookup slots that could cause global
- variables to be accidentally deleted (http://crbug.com/70066).
-
- Added support for strict mode octal literal verification.
-
- Fixed a couple of crash bugs (issues 1070 and 1071).
-
-
-2011-01-24: Version 3.0.10
-
- Fixed External::Wrap for 64-bit addresses (issue 1037).
-
- Fixed incorrect .arguments variable proxy handling in the full
- code generator (issue 1060).
-
- Introduced partial strict mode support.
-
- Changed formatting of recursive error messages to match Firefox and
- Safari (issue http://crbug.com/70334).
-
- Fixed incorrect rounding for float-to-integer conversions for external
- array types, which implement the Typed Array spec
- (issue http://crbug.com/50972).
-
- Performance improvements on the IA32 platform.
-
-
-2011-01-19: Version 3.0.9
-
- Added basic GDB JIT Interface integration.
-
- Make invalid break/continue statements a syntax error instead of a
- runtime error.
-
-
-2011-01-17: Version 3.0.8
-
- Exposed heap size limit to the heap statistics gathered by
- the GetHeapStatistics API.
-
- Wrapped external pointers more carefully (issue 1037).
-
- Hardened the implementation of error objects to avoid setters
- intercepting the properties set then throwing an error.
-
- Avoided trashing the FPSCR when calculating Math.floor on ARM.
-
- Performance improvements on the IA32 platform.
-
-
-2011-01-10: Version 3.0.7
-
- Stopped calling inherited setters when creating object literals
- (issue 1015).
-
- Changed interpretation of malformed \c? escapes in RegExp to match
- JSC.
-
- Enhanced the command-line debugger interface and fixed some minor
- bugs in the debugger.
-
- Performance improvements on the IA32 platform.
-
-
-2011-01-05: Version 3.0.6
-
- Allowed getters and setters on JSArray elements (issue 900).
-
- Stopped JSON objects from hitting inherited setters (part of
- issue 1015).
-
- Allowed numbers and strings as names of getters/setters in object
- initializer (issue 820).
-
- Added use_system_v8 option to gyp (off by default), to make it easier
- for Linux distributions to ship with system-provided V8 library.
-
- Exported external array data accessors (issue 1016).
-
- Added labelled thread names to help with debugging (on Linux).
-
-
-2011-01-03: Version 3.0.5
-
- Fixed a couple of cast errors for gcc-3.4.3.
-
- Performance improvements in GC and IA32 code generator.
-
-
-2010-12-21: Version 3.0.4
-
- Added Date::ResetCache() to the API so that the cached values in the
- Date object can be reset to allow live DST / timezone changes.
-
- Extended existing support for printing (while debugging) the contents
- of objects. Added support for printing objects from release builds.
-
- Fixed V8 issues 989, 1006, and 1007.
-
-
-2010-12-17: Version 3.0.3
-
- Reapplied all changes for version 3.0.1.
-
- Improved debugger protocol for remote debugging.
-
- Added experimental support for using gyp to generate build files
- for V8.
-
- Fixed implementation of String::Write in the API (issue 975).
-
-
-2010-12-15: Version 3.0.2
-
- Revert version 3.0.1 and patch 3.0.1.1.
-
-
-2010-12-13: Version 3.0.1
-
- Added support for an experimental internationalization API as an
- extension. This extension is disabled by default but can be enabled
- when building V8. The ECMAScript internationalization strawman is
- at http://wiki.ecmascript.org/doku.php?id=strawman:i18n_api.
-
- Made RegExp character class parsing stricter. This mirrors a change
- to RegExp parsing in WebKit.
-
- Fixed a bug in Object.defineProperty when used to change attributes
- of an existing property. It incorrectly set the property value to
- undefined (issue 965).
-
- Fixed several different compilation failures on various platforms
- caused by the 3.0.0 release.
-
- Optimized Math.pow so it can work on unboxed doubles.
-
- Sped up quoting of JSON strings by removing one traversal of the
- string.
-
-
-2010-12-07: Version 3.0.0
-
- Improved performance by (partially) addressing issue 957 on
- IA-32. Still needs more work for the other architectures.
-
-
-2010-11-29: Version 2.5.9
-
- Fixed crashes during GC caused by partially initialize heap
- objects.
-
- Fixed bug in process sample that caused memory leaks.
-
- Improved performance on ARM by implementing missing stubs and
- inlining.
-
- Improved heap profiler support.
-
- Added separate seeding on Windows of the random number generator
- used internally by the compiler (issue 936).
-
- Exposed API for getting the name of the function used to construct
- an object.
-
- Fixed date parser to handle one and two digit millisecond
- values (issue 944).
-
- Fixed number parsing to disallow space between sign and
- digits (issue 946).
-
-
-2010-11-23: Version 2.5.8
-
- Removed dependency on Gay's dtoa.
-
- Improved heap profiler precision and speed.
-
- Reduced overhead of callback invocations on ARM.
-
-
-2010-11-18: Version 2.5.7
-
- Fixed obscure evaluation order bug (issue 931).
-
- Split the random number state between JavaScript and the private API.
-
- Fixed performance bug causing GCs when generating stack traces on
- code from very large scripts.
-
- Fixed bug in parser that allowed (foo):42 as a labelled statement
- (issue 918).
-
- Provide more accurate results about used heap size via
- GetHeapStatistics.
-
- Allow build-time customization of the max semispace size.
-
- Made String.prototype.split honor limit when separator is empty
- (issue 929).
-
- Added missing failure check after expecting an identifier in
- preparser (Chromium issue 62639).
-
-
-2010-11-10: Version 2.5.6
-
- Added support for VFP rounding modes to the ARM simulator.
-
- Fixed multiplication overflow bug (issue 927).
-
- Added a limit for the amount of executable memory (issue 925).
-
-
-2010-11-08: Version 2.5.5
-
- Added more aggressive GC of external objects in near out-of-memory
- situations.
-
- Fixed a bug that gave the incorrect result for String.split called
- on the empty string (issue 924).
-
-
-2010-11-03: Version 2.5.4
-
- Improved V8 VFPv3 runtime detection to address issue 914.
-
-
-2010-11-01: Version 2.5.3
-
- Fixed a bug that prevents constants from overwriting function values
- in object literals (issue 907).
-
- Fixed a bug with reporting of impossible nested calls of DOM functions
- (issue http://crbug.com/60753).
-
-
-2010-10-27: Version 2.5.2
-
- Improved sampler resolution on Linux.
-
- Allowed forcing the use of a simulator from the build script
- independently of the host architecture.
-
- Fixed FreeBSD port (issue 912).
-
- Made windows-tick-processor respect D8_PATH.
-
- Implemented --noinline-new flag fully on IA32, X64 and ARM platforms.
-
-
-2010-10-20: Version 2.5.1
-
- Fixed bug causing spurious out of memory exceptions
- (issue http://crbug.com/54580).
-
- Fixed compilation error on Solaris platform (issue 901).
-
- Fixed error in strtod (string to floating point number conversion)
- due to glibc's use of 80-bit floats in the FPU on 32-bit linux.
-
- Adjusted randomized allocations of executable memory to have 64k
- granularity (issue http://crbug.com/56036).
-
- Supported profiling using kernel perf_events on linux. Added ll_prof
- script to tools and --ll-prof flag to V8.
-
-
-2010-10-18: Version 2.5.0
-
- Fixed bug in cache handling of lastIndex on global regexps
- (issue http://crbug.com/58740).
-
- Added USE_SIMULATOR macro that explicitly indicates that we wish to use
- the simulator as the execution engine (by Mark Lam <mark.lam@palm.com>
- from Hewlett-Packard Development Company, LP).
-
- Fixed compilation error on ARM with gcc 4.4 (issue 894).
-
-
-2010-10-13: Version 2.4.9
-
- Fixed a bug in the handling of conditional expressions in test
- contexts in compiler for top-level code.
-
- Added "//@ sourceURL" information to the StackTrace API.
-
- Exposed RegExp construction through the API.
-
-
-2010-10-04: Version 2.4.8
-
- Fixed a bug in ResumeProfilerEx causing it to not always write out the
- whole snapshot (issue 868).
-
- Performance improvements on all platforms.
-
-
-2010-09-30: Version 2.4.7
-
- Changed the command-line flag --max-new-space-size to be in kB and the
- flag --max-old-space-size to be in MB (previously they were in bytes).
-
- Added Debug::CancelDebugBreak to the debugger API.
-
- Fixed a bug in getters for negative numeric property names
- (https://bugs.webkit.org/show_bug.cgi?id=46689).
-
- Performance improvements on all platforms.
-
-
-2010-09-27: Version 2.4.6
-
- Fixed assertion failure related to copy-on-write arrays (issue 876).
-
- Fixed build failure of 64-bit V8 on Windows.
-
- Fixed a bug in RegExp (issue http://crbug.com/52801).
-
- Improved the profiler's coverage to cover more functions (issue 858).
-
- Fixed error in shift operators on 64-bit V8
- (issue http://crbug.com/54521).
-
-
-2010-09-22: Version 2.4.5
-
- Changed the RegExp benchmark to exercise the regexp engine on different
- inputs by scrambling the input strings.
-
- Fixed a bug in keyed loads on strings.
-
- Fixed a bug with loading global function prototypes.
-
- Fixed a bug with profiling RegExp calls (issue http://crbug.com/55999).
-
- Performance improvements on all platforms.
-
-
-2010-09-15: Version 2.4.4
-
- Fixed bug with hangs on very large sparse arrays.
-
- Now tries harder to free up memory when running out of space.
-
- Added heap snapshots to JSON format to API.
-
- Recalibrated benchmarks.
-
-
-2010-09-13: Version 2.4.3
-
- Made Date.parse properly handle TZ offsets (issue 857).
-
- Performance improvements on all platforms.
-
-
-2010-09-08: Version 2.4.2
-
- Fixed GC crash bug.
-
- Fixed stack corruption bug.
-
- Fixed compilation for newer C++ compilers that found Operand(0)
- ambiguous.
-
-
-2010-09-06: Version 2.4.1
-
- Added the ability for an embedding application to receive a callback
- when V8 allocates (V8::AddMemoryAllocationCallback) or deallocates
- (V8::RemoveMemoryAllocationCallback) from the OS.
-
- Fixed several JSON bugs (including issue 855).
-
- Fixed memory overrun crash bug triggered during V8's tick-based
- profiling.
-
- Performance improvements on all platforms.
-
-
-2010-09-01: Version 2.4.0
-
- Fixed bug in Object.freeze and Object.seal when Array.prototype or
- Object.prototype are changed (issue 842).
-
- Updated Array.splice to follow Safari and Firefox when called
- with zero arguments.
-
- Fixed a missing live register when breaking at keyed loads on ARM.
-
- Performance improvements on all platforms.
-
-
-2010-08-25: Version 2.3.11
-
- Fixed bug in RegExp related to copy-on-write arrays.
-
- Refactored tools/test.py script, including the introduction of
- VARIANT_FLAGS that allows specification of sets of flags with which
- all tests should be run.
-
- Fixed a bug in the handling of debug breaks in CallIC.
-
- Performance improvements on all platforms.
-
-
-2010-08-23: Version 2.3.10
-
- Fixed bug in bitops on ARM.
-
- Build fixes for unusual compilers.
-
- Track high water mark for RWX memory.
-
- Performance improvements on all platforms.
-
-
-2010-08-18: Version 2.3.9
-
- Fixed compilation for ARMv4 on OpenBSD/FreeBSD.
-
- Removed specialized handling of GCC 4.4 (issue 830).
-
- Fixed DST cache to take into account the suspension of DST in
- Egypt during the 2010 Ramadan (issue http://crbug.com/51855).
-
- Performance improvements on all platforms.
-
-
-2010-08-16: Version 2.3.8
-
- Fixed build with strict aliasing on GCC 4.4 (issue 463).
-
- Fixed issue with incorrect handling of custom valueOf methods on
- string wrappers (issue 760).
-
- Fixed compilation for ARMv4 (issue 590).
-
- Improved performance.
-
-
-2010-08-11: Version 2.3.7
-
- Reduced size of heap snapshots produced by heap profiler (issue 783).
-
- Introduced v8::Value::IsRegExp method.
-
- Fixed CPU profiler crash in start / stop sequence when non-existent
- name is passed (issue http://crbug.com/51594).
-
- Introduced new indexed property query callbacks API (issue 816). This
- API is guarded by USE_NEW_QUERY_CALLBACK define and is disabled
- by default.
-
- Removed support for object literal get/set with number/string
- property name.
-
- Fixed handling of JSObject::elements in CalculateNetworkSize
- (issue 822).
-
- Allowed compiling with strict aliasing enabled on GCC 4.4 (issue 463).
-
-
-2010-08-09: Version 2.3.6
-
- RegExp literals create a new object every time they are evaluated
- (issue 704).
-
- Object.seal and Object.freeze return the modified object (issue 809).
-
- Fixed building using GCC 4.4.4.
-
-
-2010-08-04: Version 2.3.5
-
- Added support for ES5 property names. Object initialisers and
- dot-notation property access now allows keywords. Also allowed
- non-identifiers after "get" or "set" in an object initialiser.
-
- Randomized the addresses of allocated executable memory on Windows.
-
-
-2010-08-02: Version 2.3.4
-
- Fixed problems in implementation of ES5 function.prototype.bind.
-
- Fixed error when using apply with arguments object on ARM (issue 784).
-
- Added setting of global flags to debugger protocol.
-
- Fixed an error affecting cached results of sin and cos (issue 792).
-
- Removed memory leak from a boundary case where V8 is not initialized.
-
- Fixed issue where debugger could set breakpoints outside the body
- of a function.
-
- Fixed issue in debugger when using both live edit and step in features.
-
- Added Number-letter (Nl) category to Unicode tables. These characters
- can now be used in identifiers.
-
- Fixed an assert failure on X64 (issue 806).
-
- Performance improvements on all platforms.
-
-
-2010-07-26: Version 2.3.3
-
- Fixed error when building the d8 shell in a fresh checkout.
-
- Implemented Function.prototype.bind (ES5 15.3.4.5).
-
- Fixed an error in inlined stores on ia32.
-
- Fixed an error when setting a breakpoint at the end of a function
- that does not end with a newline character.
-
- Performance improvements on all platforms.
-
-
-2010-07-21: Version 2.3.2
-
- Fixed compiler warnings when building with LLVM.
-
- Fixed a bug with for-in applied to strings (issue 785).
-
- Performance improvements on all platforms.
-
-
-2010-07-19: Version 2.3.1
-
- Fixed compilation and linking with V8_INTERPRETED_REGEXP flag.
-
- Fixed bug related to code flushing while compiling a lazy
- compilable function (issue http://crbug.com/49099).
-
- Performance improvements on all platforms.
-
-
-2010-07-15: Version 2.3.0
-
- Added ES5 Object.seal and Object.isSealed.
-
- Added debugger API for scheduling debugger commands from a
- separate thread.
-
-
-2010-07-14: Version 2.2.24
-
- Added API for capturing stack traces for uncaught exceptions.
-
- Fixed crash bug when preparsing from a non-external V8 string
- (issue 775).
-
- Fixed JSON.parse bug causing input not to be converted to string
- (issue 764).
-
- Added ES5 Object.freeze and Object.isFrozen.
-
- Performance improvements on all platforms.
-
-
-2010-07-07: Version 2.2.23
-
- API change: Convert Unicode code points outside the basic multilingual
- plane to the replacement character. Previous behavior was to silently
- truncate the value to 16 bits.
-
- Fixed crash: handle all flat string types in regexp replace.
-
- Prevent invalid pre-parsing data passed in through the API from
- crashing V8.
-
- Performance improvements on all platforms.
-
-
-2010-07-05: Version 2.2.22
-
- Added ES5 Object.isExtensible and Object.preventExtensions.
-
- Enabled building V8 as a DLL.
-
- Fixed a bug in date code where -0 was not interpreted as 0
- (issue 736).
-
- Performance improvements on all platforms.
-
-
-2010-06-30: Version 2.2.21
-
- Fixed bug in externalizing some ASCII strings (Chromium issue 47824).
-
- Updated JSON.stringify to floor the space parameter (issue 753).
-
- Updated the Mozilla test expectations to the newest version.
-
- Updated the ES5 Conformance Test expectations to the latest version.
-
- Updated the V8 benchmark suite.
-
- Provide actual breakpoints locations in response to setBreakpoint
- and listBreakpoints requests.
-
-
-2010-06-28: Version 2.2.20
-
- Fixed bug with for-in on x64 platform (issue 748).
-
- Fixed crash bug on x64 platform (issue 756).
-
- Fixed bug in Object.getOwnPropertyNames. (chromium issue 41243).
-
- Fixed a bug on ARM that caused the result of 1 << x to be
- miscalculated for some inputs.
-
- Performance improvements on all platforms.
-
-
-2010-06-23: Version 2.2.19
-
- Fixed bug that causes the build to break when profillingsupport=off
- (issue 738).
-
- Added expose-externalize-string flag for testing extensions.
-
- Resolve linker issues with using V8 as a DLL causing a number of
- problems with unresolved symbols.
-
- Fixed build failure for cctests when ENABLE_DEBUGGER_SUPPORT is not
- defined.
-
- Performance improvements on all platforms.
-
-
-2010-06-16: Version 2.2.18
-
- Added API functions to retrieve information on indexed properties
- managed by the embedding layer. Fixes bug 737.
-
- Made ES5 Object.defineProperty support array elements. Fixes bug 619.
-
- Added heap profiling to the API.
-
- Removed old named property query from the API.
-
- Incremental performance improvements.
-
-
-2010-06-14: Version 2.2.17
-
- Improved debugger support for stepping out of functions.
-
- Incremental performance improvements.
-
-
-2010-06-09: Version 2.2.16
-
- Removed the SetExternalStringDiposeCallback API. Changed the
- disposal of external string resources to call a virtual Dispose
- method on the resource.
-
- Added support for more precise break points when debugging and
- stepping.
-
- Memory usage improvements on all platforms.
-
-
-2010-06-07: Version 2.2.15
-
- Added an API to control the disposal of external string resources.
-
- Added missing initialization of a couple of variables which makes
- some compilers complaint when compiling with -Werror.
-
- Improved performance on all platforms.
-
-
-2010-06-02: Version 2.2.14
-
- Fixed a crash in code generated for String.charCodeAt.
-
- Fixed a compilation issue with some GCC versions (issue 727).
-
- Performance optimizations on x64 and ARM platforms.
-
-
-2010-05-31: Version 2.2.13
-
- Implemented Object.getOwnPropertyDescriptor for element indices and
- strings (issue 599).
-
- Fixed bug for windows 64 bit C calls from generated code.
-
- Added new scons flag unalignedaccesses for arm builds.
-
- Performance improvements on all platforms.
-
-
-2010-05-26: Version 2.2.12
-
- Allowed accessors to be defined on objects rather than just object
- templates.
-
- Changed the ScriptData API.
-
-
-2010-05-21: Version 2.2.11
-
- Fixed crash bug in liveedit on 64 bit.
-
- Use 'full compiler' when debugging is active. This should increase
- the density of possible break points, making single step more fine
- grained. This will only take effect for functions compiled after
- debugging has been started, so recompilation of all functions is
- required to get the full effect. IA32 and x64 only for now.
-
- Misc. fixes to the Solaris build.
-
- Added new flags --print-cumulative-gc-stat and --trace-gc-nvp.
-
- Added filtering of CPU profiles by security context.
-
- Fixed crash bug on ARM when running without VFP2 or VFP3.
-
- Incremental performance improvements in all backends.
-
-
-2010-05-17: Version 2.2.10
-
- Performance improvements in the x64 and ARM backends.
-
-
-2010-05-10: Version 2.2.9
-
- Allowed Object.create to be called with a function (issue 697).
-
- Fixed bug with Date.parse returning a non-NaN value when called on a
- non date string (issue 696).
-
- Allowed unaligned memory accesses on ARM targets that support it (by
- Subrato K De of CodeAurora <subratokde@codeaurora.org>).
-
- C++ API for retrieving JavaScript stack trace information.
-
-
-2010-05-05: Version 2.2.8
-
- Performance improvements in the x64 and ARM backends.
-
-
-2010-05-03: Version 2.2.7
-
- Added support for ES5 date time string format to Date.parse.
-
- Performance improvements in the x64 backend.
-
-
-2010-04-28: Version 2.2.6
-
- Added "amd64" as recognized architecture in scons build script
- (by Ryan Dahl <coldredlemur@gmail.com>).
-
- Fixed bug in String search and replace with very simple RegExps.
-
- Fixed bug in RegExp containing "\b^".
-
- Performance improvements on all platforms.
-
-
-2010-04-26: Version 2.2.5
-
- Various performance improvements (especially for ARM and x64)
-
- Fixed bug in CPU profiling (http://crbug.com/42137)
-
- Fixed a bug with the natives cache.
-
- Fixed two bugs in the ARM code generator that can cause
- wrong calculations.
-
- Fixed a bug that may cause a wrong result for shift operations.
-
-
-2010-04-21: Version 2.2.4
-
- Fixed warnings on arm on newer GCC versions.
-
- Fixed a number of minor bugs.
-
- Performance improvements on all platforms.
-
-
-2010-04-14: Version 2.2.3
-
- Added stack command and mem command to ARM simulator debugger.
-
- Fixed scons snapshot and ARM build, and Windows X64 build issues.
-
- Performance improvements on all platforms.
-
-
-2010-04-12: Version 2.2.2
-
- Introduced new profiler API.
-
- Fixed random number generator to produce full 32 random bits.
-
-
-2010-04-06: Version 2.2.1
-
- Debugger improvements.
-
- Fixed minor bugs.
-
-
-2010-03-29: Version 2.2.0
-
- Fixed a few minor bugs.
-
- Performance improvements for string operations.
-
-
-2010-03-26: Version 2.1.10
-
- Fixed scons build issues.
-
- Fixed a couple of minor bugs.
-
-
-2010-03-25: Version 2.1.9
-
- Added API support for reattaching a global object to a context.
-
- Extended debugger API with access to the internal debugger context.
-
- Fixed Chromium crashes (issues http://crbug.com/39128 and
- http://crbug.com/39160)
-
-
-2010-03-24: Version 2.1.8
-
- Added fine-grained garbage collection callbacks to the API.
-
- Performance improvements on all platforms.
-
-
-2010-03-22: Version 2.1.7
-
- Fixed issue 650.
-
- Fixed a bug where __proto__ was sometimes enumerated (issue 646).
-
- Performance improvements for arithmetic operations.
-
- Performance improvements for string operations.
-
- Print script name and line number information in stack trace.
-
-
-2010-03-17: Version 2.1.6
-
- Performance improvements for arithmetic operations.
-
- Performance improvements for string operations.
-
-
-2010-03-10: Version 2.1.4
-
- Fixed code cache lookup for keyed IC's (issue http://crbug.com/37853).
-
- Performance improvements on all platforms.
-
-
-2010-03-10: Version 2.1.3
-
- Added API method for context-disposal notifications.
-
- Added API method for accessing elements by integer index.
-
- Added missing implementation of Uint32::Value and Value::IsUint32
- API methods.
-
- Added IsExecutionTerminating API method.
-
- Disabled strict aliasing for GCC 4.4.
-
- Fixed string-concatenation bug (issue 636).
-
- Performance improvements on all platforms.
-
-
-2010-02-23: Version 2.1.2
-
- Fixed a crash bug caused by wrong assert.
-
- Fixed a bug with register names on 64-bit V8 (issue 615).
-
- Performance improvements on all platforms.
-
-
-2010-02-19: Version 2.1.1
-
- [ES5] Implemented Object.defineProperty.
-
- Improved profiler support.
-
- Added SetPrototype method in the public V8 API.
-
- Added GetScriptOrigin and GetScriptLineNumber methods to Function
- objects in the API.
-
- Performance improvements on all platforms.
-
-
-2010-02-03: Version 2.1.0
-
- Values are now always wrapped in objects when used as a receiver.
- (issue 223).
-
- [ES5] Implemented Object.getOwnPropertyNames.
-
- [ES5] Restrict JSON.parse to only accept strings that conforms to the
- JSON grammar.
-
- Improvement of debugger agent (issue 549 and 554).
-
- Fixed problem with skipped stack frame in profiles (issue 553).
-
- Solaris support by Erich Ocean <erich.ocean@me.com> and Ryan Dahl
- <ry@tinyclouds.org>.
-
- Fixed a bug that Math.round() returns incorrect results for huge
- integers.
-
- Fixed enumeration order for objects created from some constructor
- functions (isue http://crbug.com/3867).
-
- Fixed arithmetic on some integer constants (issue 580).
-
- Numerous performance improvements including porting of previous IA-32
- optimizations to x64 and ARM architectures.
-
-
-2010-01-14: Version 2.0.6
-
- Added ES5 Object.getPrototypeOf, GetOwnPropertyDescriptor,
- GetOwnProperty, FromPropertyDescriptor.
-
- Fixed Mac x64 build errors.
-
- Improved performance of some math and string operations.
-
- Improved performance of some regexp operations.
-
- Improved performance of context creation.
-
- Improved performance of hash tables.
-
-
-2009-12-18: Version 2.0.5
-
- Extended to upper limit of map space to allow for 7 times as many map
- to be allocated (issue 524).
-
- Improved performance of code using closures.
-
- Improved performance of some binary operations involving doubles.
-
-
-2009-12-16: Version 2.0.4
-
- Added ECMAScript 5 Object.create.
-
- Improved performance of Math.max and Math.min.
-
- Optimized adding of strings on 64-bit platforms.
-
- Improved handling of external strings by using a separate table
- instead of weak handles. This improves garbage collection
- performance and uses less memory.
-
- Changed code generation for object and array literals in toplevel
- code to be more compact by doing more work in the runtime.
-
- Fixed a crash bug triggered when garbage collection happened during
- generation of a callback load inline cache stub.
-
- Fixed crash bug sometimes triggered when local variables shadowed
- parameters in functions that used the arguments object.
-
-
-2009-12-03: Version 2.0.3
-
- Optimized handling and adding of strings, for-in and Array.join.
-
- Heap serialization is now non-destructive.
-
- Improved profiler support with information on time spend in C++
- callbacks registered through the API.
-
- Added commands to the debugger protocol for starting/stopping
- profiling.
-
- Enabled the non-optimizing compiler for top-level code.
-
- Changed the API to only allow strings to be set as data objects on
- Contexts and scripts to avoid potentially keeping global objects
- around for too long (issue 528).
-
- OpenBSD support patch by Peter Valchev <pvalchev@gmail.com>.
-
- Fixed bugs.
-
-
-2009-11-24: Version 2.0.2
-
- Improved profiler support.
-
- Fixed bug that broke compilation of d8 with readline support.
-
-
-2009-11-20: Version 2.0.1
-
- Fixed crash bug in String.prototype.replace.
-
- Reverted a change which caused Chromium interactive ui test
- failures.
-
-
-2009-11-18: Version 2.0.0
-
- Added support for VFP on ARM.
-
- Added TryCatch::ReThrow method to the API.
-
- Reduced the size of snapshots and improved the snapshot load time.
-
- Improved heap profiler support.
-
- 64-bit version now supported on Windows.
-
- Fixed a number of debugger issues.
-
- Fixed bugs.
-
-
-2009-10-29: Version 1.3.18
-
- Reverted a change which caused crashes in RegExp replace.
-
- Reverted a change which caused Chromium ui_tests failure.
-
-
-2009-10-28: Version 1.3.17
-
- Added API method to get simple heap statistics.
-
- Improved heap profiler support.
-
- Fixed the implementation of the resource constraint API so it
- works when using snapshots.
-
- Fixed a number of issues in the Windows 64-bit version.
-
- Optimized calls to API getters.
-
- Added valgrind notification on code modification to the 64-bit version.
-
- Fixed issue where we logged shared library addresses on Windows at
- startup and never used them.
-
-
-2009-10-16: Version 1.3.16
-
- X64: Convert smis to holding 32 bits of payload.
-
- Introduced v8::Integer::NewFromUnsigned method.
-
- Added missing null check in Context::GetCurrent.
-
- Added trim, trimLeft and trimRight methods to String
- Patch by Jan de Mooij <jandemooij@gmail.com>
-
- Implement ES5 Array.isArray
- Patch by Jan de Mooij <jandemooij@gmail.com>
-
- Skip access checks for hidden properties.
-
- Added String::Concat(Handle<String> left, Handle<String> right) to the
- V8 API.
-
- Fixed GYP-based builds of V8.
-
-
-2009-10-07: Version 1.3.15
-
- Expanded the maximum size of the code space to 512MB for 64-bit mode.
-
- Fixed a crash bug happening when starting profiling (issue
- http://crbug.com/23768).
-
-
-2009-10-07: Version 1.3.14
-
- Added GetRealNamedProperty to the API to lookup real properties
- located on the object or in the prototype chain skipping any
- interceptors.
-
- Fixed the stack limits setting API to work correctly with threads. The
- stack limit now needs to be set to each thread thich is used with V8.
-
- Removed the high-priority flag from IdleNotification()
-
- Ensure V8 is initialized before locking and unlocking threads.
-
- Implemented a new JavaScript minifier for compressing the source of
- the built-in JavaScript. This removes non-Open Source code from Douglas
- Crockford from the project.
-
- Added a missing optimization in StringCharAt.
-
- Fixed some flaky socket tests.
-
- Change by Alexander Botero-Lowry to fix profiler sampling on FreeBSD
- in 64-bit mode.
-
- Fixed memory leaks in the thread management code.
-
- Fixed the result of assignment to a pixel array. The assigned value
- is now the result.
-
- Error reporting for invalid left-hand sides in for-in statements, pre-
- and postfix count expressions, and assignments now matches the JSC
- behavior in Safari 4.
-
- Follow the spec in disallowing function declarations without a name.
-
- Always allocate code objects within a 2 GB range. On x64 architecture
- this is used to use near calls (32-bit displacement) in Code objects.
-
- Optimized array construction ported to x64 and ARM architectures.
-
- [ES5] Changed Object.keys to return strings for element indices.
-
-
-2009-09-23: Version 1.3.13
-
- Fixed uninitialized memory problem.
-
- Improved heap profiler support.
-
-
-2009-09-22: Version 1.3.12
-
- Changed behavior of |function|.toString() on built-in functions to
- be compatible with other implementations. Patch by Jan de Mooij.
-
- Added Object::IsDirty in the API.
-
- Optimized array construction; it is now handled purely in native
- code.
-
- [ES5] Made properties of the arguments array enumerable.
-
- [ES5] Added test suite adapter for the es5conform test suite.
-
- [ES5] Added Object.keys function.
-
-
-2009-09-15: Version 1.3.11
-
- Fixed crash in error reporting during bootstrapping.
-
- Optimized generated IA32 math code by using SSE2 instructions when
- available.
-
- Implemented missing pieces of debugger infrastructure on ARM. The
- debugger is now fully functional on ARM.
-
- Made 'hidden' the default visibility for gcc.
-
-
-2009-09-09: Version 1.3.10
-
- Fixed profiler on Mac in 64-bit mode.
-
- Optimized creation of objects from simple constructor functions on
- ARM.
-
- Fixed a number of debugger issues.
-
- Reduced the amount of memory consumed by V8.
-
-
-2009-09-02: Version 1.3.9
-
- Optimized stack guard checks on ARM.
-
- Optimized API operations by inlining more in the API.
-
- Optimized creation of objects from simple constructor functions.
-
- Enabled a number of missing optimizations in the 64-bit port.
-
- Implemented native-code support for regular expressions on ARM.
-
- Stopped using the 'sahf' instruction on 64-bit machines that do
- not support it.
-
- Fixed a bug in the support for forceful termination of JavaScript
- execution.
-
-
-2009-08-26: Version 1.3.8
-
- Changed the handling of idle notifications to allow idle
- notifications when V8 has not yet been initialized.
-
- Fixed ARM simulator compilation problem on Windows.
-
-
-2009-08-25: Version 1.3.7
-
- Reduced the size of generated code on ARM platforms by reducing
- the size of constant pools.
-
- Changed build files to not include the 'ENV' user environment
- variable in the build environment.
-
- Changed the handling of idle notifications.
-
-
-2009-08-21: Version 1.3.6
-
- Added support for forceful termination of JavaScript execution.
-
- Added low memory notification to the API. The embedding host can signal
- a low memory situation to V8.
-
- Changed the handling of global handles (persistent handles in the API
- sense) to avoid issues regarding allocation of new global handles
- during weak handle callbacks.
-
- Changed the growth policy of the young space.
-
- Fixed a GC issue introduced in version 1.3.5.
-
-
-2009-08-19: Version 1.3.5
-
- Optimized initialization of some arrays in the builtins.
-
- Fixed mac-nm script to support filenames with spaces.
-
- Support for using the V8 profiler when V8 is embedded in a Windows DLL.
-
- Changed typeof RegExp from 'object' to 'function' for compatibility.
- Fixed bug where regexps were not callable across contexts.
-
- Added context independent script compilation to the API.
-
- Added API call to get the stack trace for an exception.
-
- Added API for getting object mirrors.
-
- Made sure that SSE3 instructions are used whenever possible even when
- running off a snapshot generated without using SSE3 instructions.
-
- Tweaked the handling of the initial size and growth policy of the heap.
-
- Added native code generation for RegExp to 64-bit version.
-
- Added JavaScript debugger support to 64-bit version.
-
-
-2009-08-13: Version 1.3.4
-
- Added a readline() command to the d8 shell.
-
- Fixed bug in json parsing.
-
- Added idle notification to the API and reduced memory on idle
- notifications.
-
-
-2009-08-12: Version 1.3.3
-
- Fixed issue 417: incorrect %t placeholder expansion.
-
- Added .gitignore file similar to Chromium's one.
-
- Fixed SConstruct file to build with new logging code for Android.
-
- API: added function to find instance of template in prototype
- chain. Inlined Object::IsInstanceOf.
-
- Land change to notify valgrind when we modify code on x86.
-
- Added api call to determine whether a string can be externalized.
-
- Added a write() command to d8.
-
-
-2009-08-05: Version 1.3.2
-
- Started new compiler infrastructure for two-pass compilation using a
- control flow graph constructed from the AST.
-
- Profiler stack sampling for X64.
-
- Safe handling of NaN to Posix platform-dependent time functions.
-
- Added a new profiler control API to unify controlling various aspects
- of profiling.
-
- Fixed issue 392.
-
-
-2009-07-30: Version 1.3.1
-
- Speed improvements to accessors and interceptors.
-
- Added support for capturing stack information on custom errors.
-
- Added support for morphing an object into a pixel array where its
- indexed properties are stored in an external byte array. Values written
- are always clamped to the 0..255 interval.
-
- Profiler on x64 now handles C/C++ functions from shared libraries.
-
- Changed the debugger to avoid stepping into function.call/apply if the
- function is a built-in.
-
- Initial implementation of constructor heap profile for JS objects.
-
- More fine grained control of profiling aspects through the API.
-
- Optimized the called as constructor check for API calls.
-
-
-2009-07-27: Version 1.3.0
-
- Allowed RegExp objects to be called as functions (issue 132).
-
- Fixed issue where global property cells would escape after
- detaching the global object; see http://crbug.com/16276.
-
- Added support for stepping into setters and getters in the
- debugger.
-
- Changed the debugger to avoid stopping in its own JavaScript code
- and in the code of built-in functions.
-
- Fixed issue 345 by avoiding duplicate escaping labels.
-
- Fixed ARM code generator crash in short-circuited boolean
- expressions and added regression tests.
-
- Added an external allocation limit to avoid issues where small V8
- objects would hold on to large amounts of external memory without
- causing garbage collections.
-
- Finished more of the inline caching stubs for x64 targets.
-
-
-2009-07-13: Version 1.2.14
-
- Added separate paged heap space for global property cells and
- avoid updating the write barrier when storing into them.
-
- Improved peep-hole optimization on ARM platforms by not emitting
- unnecessary debug information.
-
- Re-enabled ICs for loads and calls that skip a global object
- during lookup through the prototype chain.
-
- Allowed access through global proxies to use ICs.
-
- Fixed issue 401.
-
-
-2009-07-09: Version 1.2.13
-
- Fixed issue 397, issue 398, and issue 399.
-
- Added support for breakpoint groups.
-
- Fixed bugs introduced with the new global object representation.
-
- Fixed a few bugs in the ARM code generator.
-
-
-2009-07-06: Version 1.2.12
-
- Added stack traces collection to Error objects accessible through
- the e.stack property.
-
- Changed RegExp parser to use a recursive data structure instead of
- stack-based recursion.
-
- Optimized Date object construction and string concatenation.
-
- Improved performance of div, mod, and mul on ARM platforms.
-
-
-2009-07-02: Version 1.2.11
-
- Improved performance on IA-32 and ARM.
-
- Fixed profiler sampler implementation on Mac OS X.
-
- Changed the representation of global objects to improve
- performance of adding a lot of new properties.
-
-
-2009-06-29: Version 1.2.10
-
- Improved debugger support.
-
- Fixed bug in exception message reporting (issue 390).
-
- Improved overall performance.
-
-
-2009-06-23: Version 1.2.9
-
- Improved math performance on ARM.
-
- Fixed profiler name-inference bug.
-
- Fixed handling of shared libraries in the profiler tick processor
- scripts.
-
- Fixed handling of tests that time out in the test scripts.
-
- Fixed compilation on MacOS X version 10.4.
-
- Fixed two bugs in the regular expression engine.
-
- Fixed a bug in the string type inference.
-
- Fixed a bug in the handling of 'constant function' properties.
-
- Improved overall performance.
-
-
-2009-06-16: Version 1.2.8
-
- Optimized math on ARM platforms.
-
- Fixed two crash bugs in the handling of getters and setters.
-
- Improved the debugger support by adding scope chain information.
-
- Improved the profiler support by compressing log data transmitted
- to clients.
-
- Improved overall performance.
-
-
-2009-06-08: Version 1.2.7
-
- Improved debugger and profiler support.
-
- Reduced compilation time by improving the handling of deferred
- code.
-
- Optimized interceptor accesses where the property is on the object
- on which the interceptors is attached.
-
- Fixed compilation problem on GCC 4.4 by changing the stack
- alignment to 16 bytes.
-
- Fixed handle creation to follow stric aliasing rules.
-
- Fixed compilation on FreeBSD.
-
- Introduced API for forcing the deletion of a property ignoring
- interceptors and attributes.
-
-
-2009-05-29: Version 1.2.6
-
- Added a histogram recording hit rates at different levels of the
- compilation cache.
-
- Added stack overflow check for the RegExp analysis phase. Previously a
- very long regexp graph could overflow the stack with recursive calls.
-
- Use a dynamic buffer when collecting log events in memory.
-
- Added start/stop events to the profiler log.
-
- Fixed infinite loop which could happen when setting a debug break while
- executing a RegExp compiled to native code.
-
- Fixed handling of lastIndexOf called with negative index (issue 351).
-
- Fixed irregular crash in profiler test (issue 358).
-
- Fixed compilation issues with some versions of gcc.
-
-
-2009-05-26: Version 1.2.5
-
- Fixed bug in initial boundary check for Boyer-Moore text
- search (issue 349).
-
- Fixed compilation issues with MinGW and gcc 4.3+ and added support
- for armv7 and cortex-a8 architectures. Patches by Lei Zhang and
- Craig Schlenter.
-
- Added a script cache to the debugger.
-
- Optimized compilation performance by improving internal data
- structures and avoiding expensive property load optimizations for
- code that's infrequently executed.
-
- Exposed the calling JavaScript context through the static API
- function Context::GetCalling().
-
-
-2009-05-18: Version 1.2.4
-
- Improved performance of floating point number allocation for ARM
- platforms.
-
- Fixed crash when using the instanceof operator on functions with
- number values in their prototype chain (issue 341).
-
- Optimized virtual frame operations in the code generator to speed
- up compilation time and allocated the frames in the zone.
-
- Made the representation of virtual frames and jump targets in the
- code generator much more compact.
-
- Avoided linear search for non-locals in scope code when resolving
- variables inside with and eval scopes.
-
- Optimized lexical scanner by dealing with whitespace as part of
- the token scanning instead of as a separate step before it.
-
- Changed the scavenging collector so that promoted objects do not
- reside in the old generation while their remembered set is being
- swept for pointers into the young generation.
-
- Fixed numeric overflow handling when compiling count operations.
-
-
-2009-05-11: Version 1.2.3
-
- Fixed bug in reporting of out-of-memory situations.
-
- Introduced hidden prototypes on certain builtin prototype objects
- such as String.prototype to emulate JSC's behavior of restoring
- the original function when deleting functions from those prototype
- objects.
-
- Fixed crash bug in the register allocator.
-
-
-2009-05-04: Version 1.2.2
-
- Fixed bug in array sorting for sparse arrays (issue 326).
-
- Added support for adding a soname when building a shared library
- on Linux (issue 151).
-
- Fixed bug caused by morphing internal ASCII strings to external
- two-byte strings. Slices over ASCII strings have to forward ASCII
- checks to the underlying buffer string.
-
- Allowed API call-as-function handlers to be called as
- constructors.
-
- Fixed a crash bug where an external string was disposed but a
- slice of the external string survived as a symbol.
-
-
-2009-04-27: Version 1.2.1
-
- Added EcmaScript 5 JSON object.
-
- Fixed bug in preemption support on ARM.
-
-
-2009-04-23: Version 1.2.0
-
- Optimized floating-point operations on ARM.
-
- Added a number of extensions to the debugger API.
-
- Changed the enumeration order for unsigned integer keys to always
- be numerical order.
-
- Added a "read" extension to the shell sample.
-
- Added support for Array.prototype.reduce and
- Array.prototype.reduceRight.
-
- Added an option to the SCons build to control Microsoft Visual C++
- link-time code generation.
-
- Fixed a number of bugs (in particular issue 315, issue 316,
- issue 317 and issue 318).
-
-
-2009-04-15: Version 1.1.10
-
- Fixed crash bug that occurred when loading a const variable in the
- presence of eval.
-
- Allowed using with and eval in registered extensions in debug mode
- by fixing bogus assert.
-
- Fixed the source position for function returns to enable the
- debugger to break there.
-
-
-2009-04-14: Version 1.1.9
-
- Made the stack traversal code in the profiler robust by avoiding
- to look into the heap.
-
- Added name inferencing for anonymous functions to facilitate
- debugging and profiling.
-
- Re-enabled stats timers in the developer shell (d8).
-
- Fixed issue 303 by avoiding to shortcut cons-symbols.
-
-
-2009-04-11: Version 1.1.8
-
- Changed test-debug/ThreadedDebugging to be non-flaky (issue 96).
-
- Fixed step-in handling for Function.prototype.apply and call in
- the debugger (issue 269).
-
- Fixed v8::Object::DeleteHiddenValue to not bail out when there
- are no hidden properties.
-
- Added workaround for crash bug, where external symbol table
- entries with deleted resources would lead to NPEs when looking
- up in the symbol table.
-
-
-2009-04-07: Version 1.1.7
-
- Added support for easily importing additional environment
- variables into the SCons build.
-
- Optimized strict equality checks.
-
- Fixed crash in indexed setters on objects without a corresponding
- getter (issue 298).
-
- Re-enabled script compilation cache.
-
-
-2009-04-01: Version 1.1.6
-
- Reverted an unsafe code generator change.
-
-
-2009-04-01: Version 1.1.5
-
- Fixed bug that caused function literals to not be optimized as
- much as other functions.
-
- Improved profiler support.
-
- Fixed a crash bug in connection with debugger unloading.
-
- Fixed a crash bug in the code generator caused by losing the
- information that a frame element was copied.
-
- Fixed an exception propagation bug that could cause non-null
- return values when exceptions were thrown.
-
-
-2009-03-30: Version 1.1.4
-
- Optimized String.prototype.match.
-
- Improved the stack information in profiles.
-
- Fixed bug in ARM port making it possible to compile the runtime
- system for thumb mode again.
-
- Implemented a number of optimizations in the code generator.
-
- Fixed a number of memory leaks in tests.
-
- Fixed crash bug in connection with script source code and external
- strings.
-
-
-2009-03-24: Version 1.1.3
-
- Fixed assertion failures in compilation of loop conditions.
-
- Removed STL dependency from developer shell (d8).
-
- Added infrastructure for protecting the V8 heap from corruption
- caused by memory modifications from the outside.
-
-
-2009-03-24: Version 1.1.2
-
- Improved frame merge code generated by the code generator.
-
- Optimized String.prototype.replace.
-
- Implemented __defineGetter__ and __defineSetter__ for properties
- with integer keys on non-array objects.
-
- Improved debugger and profiler support.
-
- Fixed a number of portability issues to allow compilation for
- smaller ARM devices.
-
- Exposed object cloning through the API.
-
- Implemented hidden properties. This is used to expose an identity
- hash for objects through the API.
-
- Implemented restarting of regular expressions if their input
- string changes representation during preemption.
-
- Fixed a code generator bug that could cause assignments in loops
- to be ignored if using continue to break out of the loop (issue
- 284).
-
-
-2009-03-12: Version 1.1.1
-
- Fixed an assertion in the new compiler to take stack overflow
- exceptions into account.
-
- Removed exception propagation code that could cause crashes.
-
- Fixed minor bug in debugger line number computations.
-
- 8-byte align the C stack on Linux and Windows to speed up floating
- point computations.
-
-
-2009-03-12: Version 1.1.0
-
- Improved code generation infrastructure by doing simple register
- allocation and constant folding and propagation.
-
- Optimized regular expression matching by avoiding to create
- intermediate string arrays and by flattening nested array
- representations of RegExp data.
-
- Traverse a few stack frames when recording profiler samples to
- include partial call graphs in the profiling output.
-
- Added support for using OProfile to profile generated code.
-
- Added remote debugging support to the D8 developer shell.
-
- Optimized creation of nested literals like JSON objects.
-
- Fixed a bug in garbage collecting unused maps and turned it on by
- default (--collect-maps).
-
- Added support for running tests under Valgrind.
-
-
-2009-02-27: Version 1.0.3
-
- Optimized double-to-integer conversions in bit operations by using
- SSE3 instructions if available.
-
- Optimized initialization sequences that store to multiple
- properties of the same object.
-
- Changed the D8 debugger frontend to use JSON messages.
-
- Force garbage collections when disposing contexts.
-
- Align code objects at 32-byte boundaries.
-
-
-2009-02-25: Version 1.0.2
-
- Improved profiling support by performing simple call stack
- sampling for ticks and by fixing a bug in the logging of code
- addresses.
-
- Fixed a number of debugger issues.
-
- Optimized code that uses eval.
-
- Fixed a couple of bugs in the regular expression engine.
-
- Reduced the size of generated code for certain regular expressions.
-
- Removed JSCRE completely.
-
- Fixed issue where test could not be run if there was a dot in the
- checkout path.
-
-
-2009-02-13: Version 1.0.1
-
- Fixed two crash-bugs in irregexp (issue 231 and 233).
-
- Fixed a number of minor bugs (issue 87, 227 and 228).
-
- Added support for morphing strings to external strings on demand
- to avoid having to create copies in the embedding code.
-
- Removed experimental support for external symbol callbacks.
-
-
-2009-02-09: Version 1.0.0
-
- Fixed crash-bug in the code generation for case independent 16 bit
- backreferences.
-
- Made shells more robust in the presence of string conversion
- failures (issue 224).
-
- Fixed a potential infinite loop when attempting to resolve
- eval (issue 221).
-
- Miscellaneous fixes to the new regular expression engine.
-
- Reduced binary by stripping unneeded text from JavaScript library and
- minifying some JavaScript files.
-
-
-2009-01-27: Version 0.4.9
-
- Enabled new regular expression engine.
-
- Made a number of changes to the debugger protocol.
-
- Fixed a number of bugs in the preemption support.
-
- Added -p option to the developer shell to run files in parallel
- using preemption.
-
- Fixed a number of minor bugs (including issues 176, 187, 189, 192,
- 193, 198 and 201).
-
- Fixed a number of bugs in the serialization/deserialization
- support for the ARM platform.
-
-
-2009-01-19: Version 0.4.8.1
-
- Minor patch to debugger support.
-
-
-2009-01-16: Version 0.4.8
-
- Fixed string length bug on ARM (issue 171).
-
- Made most methods in the API const.
-
- Optimized object literals by improving data locality.
-
- Fixed bug that caused incomplete functions to be cached in case of
- stack overflow exceptions.
-
- Fixed bugs that caused catch variables and variables introduced by
- eval to behave incorrectly when using accessors (issues 186, 190
- and 191).
-
-
-2009-01-06: Version 0.4.7
-
- Minor bugfixes and optimizations.
-
- Added command line debugger to D8 shell.
-
- Fixed subtle bug that caused the wrong 'this' to be used when
- calling a caught function in a catch clause.
-
- Inline array loads within loops directly in the code instead of
- always calling a stub.
-
-
-2008-12-11: Version 0.4.6
-
- Fixed exception reporting bug where certain exceptions were
- incorrectly reported as uncaught.
-
- Improved the memory allocation strategy used during compilation to
- make running out of memory when compiling huge scripts less
- likely.
-
- Optimized String.replace by avoiding the construction of certain
- sub strings.
-
- Fixed bug in code generation for large switch statements on ARM.
-
- Fixed bug that caused V8 to change the global object template
- passed in by the user.
-
- Changed the API for creating object groups used during garbage
- collection. Entire object groups are now passed to V8 instead of
- individual members of the groups.
-
-
-2008-12-03: Version 0.4.5
-
- Added experimental API support for allocating V8 symbols as
- external strings.
-
- Fixed bugs in debugging support on ARM.
-
- Changed eval implementation to correctly detect whether or not a
- call to eval is aliased.
-
- Fixed bug caused by a combination of the compilation cache and
- dictionary probing in native code. The bug caused us to sometimes
- call functions that had not yet been compiled.
-
- Added platform support for FreeBSD.
-
- Added support for building V8 on Windows with either the shared or
- static version of MSVCRT
-
- Added the v8::jscre namespace around the jscre functions to avoid
- link errors (duplicate symbols) when building Google Chrome.
-
- Added support for calling a JavaScript function with the current
- debugger execution context as its argument to the debugger
- interface.
-
- Changed the type of names of counters from wchar_t to char.
-
- Changed the Windows system call used to compute daylight savings
- time. The system call that we used to use became four times
- slower on WinXP SP3.
-
- Added support in the d8 developer shell for memory-mapped counters
- and added a stats-viewer tool.
-
- Fixed bug in upper/lower case mappings (issue 149).
-
-
-2008-11-17: Version 0.4.4
-
- Reduced code size by using shorter instruction encoding when
- possible.
-
- Added a --help option to the shell sample and to the d8 shell.
-
- Added visual studio project files for building the ARM simulator.
-
- Fixed a number of ARM simulator issues.
-
- Fixed bug in out-of-memory handling on ARM.
-
- Implemented shell support for passing arguments to a script from
- the command line.
-
- Fixed bug in date code that made certain date functions return -0
- instead of 0 for dates before the epoch.
-
- Restricted applications of eval so it can only be used in the
- context of the associated global object.
-
- Treat byte-order marks as whitespace characters.
-
-
-2008-11-04: Version 0.4.3
-
- Added support for API accessors that prohibit overwriting by
- accessors defined in JavaScript code by using __defineGetter__ and
- __defineSetter__.
-
- Improved handling of conditionals in test status files.
-
- Introduced access control in propertyIsEnumerable.
-
- Improved performance of some string operations by caching
- information about the type of the string between operations.
-
- Fixed bug in fast-case code for switch statements that only have
- integer labels.
-
-
-2008-10-30: Version 0.4.2
-
- Improved performance of Array.prototype.concat by moving the
- implementation to C++ (issue 123).
-
- Fixed heap growth policy to avoid growing old space to its maximum
- capacity before doing a garbage collection and fixed issue that
- would lead to artificial out of memory situations (issue 129).
-
- Fixed Date.prototype.toLocaleDateString to return the date in the
- same format as WebKit.
-
- Added missing initialization checks to debugger API.
-
- Added removing of unused maps during GC.
-
-
-2008-10-28: Version 0.4.1
-
- Added caching of RegExp data in compilation cache.
-
- Added Visual Studio project file for d8 shell.
-
- Fixed function call performance regression introduced in version
- 0.4.0 when splitting the global object in two parts (issue 120).
-
- Fixed issue 131 by checking for empty handles before throwing and
- reporting exceptions.
-
-
-2008-10-23: Version 0.4.0
-
- Split the global object into two parts: The state holding global
- object and the global object proxy.
-
- Fixed bug that affected the value of an assignment to an element
- in certain cases (issue 116).
-
- Added GetPropertyNames functionality (issue 33) and extra Date
- functions (issue 77) to the API.
-
- Changed WeakReferenceCallback to take a Persistent<Value> instead
- of a Persistent<Object> (issue 101).
-
- Fixed issues with message reporting for exceptions in try-finally
- blocks (issues 73 and 75).
-
- Optimized flattening of strings and string equality checking.
-
- Improved Boyer-Moore implementation for faster indexOf operations.
-
- Added development shell (d8) which includes counters and
- completion support.
-
- Fixed problem with the receiver passed to functions called from
- eval (issue 124).
-
-
-2008-10-16: Version 0.3.5
-
- Improved string hash-code distribution by excluding bit-field bits
- from the hash-code.
-
- Changed string search algorithm used in indexOf from KMP to
- Boyer-Moore.
-
- Improved the generated code for the instanceof operator.
-
- Improved performance of slow-case string equality checks by
- specializing the code based on the string representation.
-
- Improve the handling of out-of-memory situations (issue 70).
-
- Improved performance of strict equality checks.
-
- Improved profiler output to make it easier to see anonymous
- functions.
-
- Improved performance of slow-case keyed loads.
-
- Improved property access performance by allocating a number of
- properties in the front object.
-
- Changed the toString behavior on the built-in object constructors
- to print [native code] instead of the actual source. Some web
- applications do not like constructors with complex toString
- results.
-
-
-2008-10-06: Version 0.3.4
-
- Changed Array.prototype.sort to use quick sort.
-
- Fixed code generation issue where leaving a finally block with
- break or continue would accumulate elements on the expression
- stack (issue 86).
-
- Made sure that the name accessor on functions returns the expected
- names for builtin JavaScript functions and C++ callback functions.
-
- Added fast case code for extending the property storage array of
- JavaScript objects.
-
- Ported switch statement optimizations introduced in version 0.3.3
- to the ARM code generator.
-
- Allowed GCC to use strict-aliasing rules when compiling.
-
- Improved performance of arguments object allocation by taking care
- of arguments adaptor frames in the generated code.
-
- Updated the V8 benchmark suite to version 2.
-
-
-2008-09-25: Version 0.3.3
-
- Improved handling of relocation information to enable more
- peep-hole optimizations.
-
- Optimized switch statements where all labels are constant small
- integers.
-
- Optimized String.prototype.indexOf for common cases.
-
- Fixed more build issues (issue 80).
-
- Fixed a couple of profiler issues.
-
- Fixed bug where the body of a function created using the Function
- constructor was not allowed to end with a single-line comment
- (issue 85).
-
- Improved handling of object literals by canonicalizing object
- literal maps. This will allow JSON objects with the same set of
- properties to share the same map making inline caching work better
- for JSON objects.
-
-
-2008-09-17: Version 0.3.2
-
- Generalized the EvalCache into a CompilationCache and enabled it
- for scripts too. The current strategy is to retire all entries
- whenever a mark-sweep collection is started.
-
- Fixed bug where switch statements containing only a default case
- would lead to an unbalanced stack (issue 69).
-
- Fixed bug that made access to the function in a named function
- expression impossible in certain situations (issue 24).
-
- Fixed even more build issues.
-
- Optimized calling conventions on ARM. The conventions on ARM and
- IA-32 now match.
-
- Removed static initializers for flags and counters.
-
- Improved inline caching behavior for uncommon cases where lazily
- loading Date and RegExp code could force certain code paths go
- megamorphic.
-
- Removed arguments adaption for builtins written in C++. This
- makes Array.prototype.push and Array.prototype.pop slightly
- faster.
-
-
-2008-09-11: Version 0.3.1
-
- Fixed a number of build issues.
-
- Fixed problem with missing I-cache flusing on ARM.
-
- Changed space layout in memory management by splitting up
- code space into old data space and code space.
-
- Added utf-8 conversion support to the API (issue 57).
-
- Optimized repeated calls to eval with the same strings. These
- repeated calls are common in web applications.
-
- Added Xcode project file.
-
- Optimized a couple of Array operation.
-
- Fixed parser bug by checking for end-of-string when parsing break
- and continue (issue 35).
-
- Fixed problem where asian characters were not categorized as
- letters.
-
- Fixed bug that disallowed calling functions fetched from an array
- using a string as an array index (issue 32).
-
- Fixed bug where the internal field count on object templates were
- sometimes ignored (issue 54).
-
- Added -f option to the shell sample for compatibility with other
- engines (issue 18).
-
- Added source info to TryCatches in the API.
-
- Fixed problem where the seed for the random number generator was
- clipped in a double to unsigned int conversion.
-
- Fixed bug where cons string symbols were sometimes converted to
- non-symbol flat strings during GC.
-
- Fixed bug in error reporting when attempting to convert null to an
- object.
-
-
-2008-09-04: Version 0.3.0
-
- Added support for running tests on the ARM simulator.
-
- Fixed bug in the 'in' operator where negative indices were not
- treated correctly.
-
- Fixed build issues on gcc-4.3.1.
-
- Changed Date.prototype.toLocaleTimeString to not print the
- timezone part of the time.
-
- Renamed debug.h to v8-debug.h to reduce the risk of name conflicts
- with user code.
-
-
-2008-09-02: Version 0.2.5
-
- Renamed the top level directory 'public' to 'include'.
-
- Added 'env' option to the SCons build scripts to support
- overriding the ENV part of the build environment. This is mostly
- to support Windows builds in cases where SCons cannot find the
- correct paths to the Windows SDK, as these paths cannot be passed
- through shell environment variables.
-
- Enabled "Buffer Security Check" on for the Windows SCons build and
- added the linker option /OPT:ICF as an optimization.
-
- Added the V8 benchmark suite to the repository.
-
-
-2008-09-01: Version 0.2.4
-
- Included mjsunit JavaScript test suite and C++ unit tests.
-
- Changed the shell sample to not print the result of executing a
- script provided on the command line.
-
- Fixed issue when building samples on Windows using a shared V8
- library. Added visibility option on Linux build which makes the
- generated library 18% smaller.
-
- Changed build system to accept multiple build modes in one build
- and generate separate objects, libraries and executables for each
- mode.
-
- Removed deferred negation optimization (a * -b => -(a * b)) since
- this visibly changes operand conversion order.
-
- Improved parsing performance by introducing stack guard in
- preparsing. Without a stack guard preparsing always bails out
- with stack overflow.
-
- Changed shell sample to take flags directly from the command-line.
- Added API call that implements this.
-
- Added load, quit and version functions to the shell sample so it's
- easier to run benchmarks and tests.
-
- Fixed issue with building samples and cctests on 64-bit machines.
-
- Fixed bug in the runtime system where the prototype chain was not
- always searched for a setter when setting a property that does not
- exist locally.
-
-
-2008-08-14: Version 0.2.3
-
- Improved performance of garbage collection by moving the
- function that updates pointers during compacting collection
- into the updating visitor. This gives the compiler a better
- chance to inline and avoid a function call per (potential)
- pointer.
-
- Extended the shell sample with a --runtime-flags option.
-
- Added Visual Studio project files for the shell.cc and
- process.cc samples.
-
-
-2008-08-13: Version 0.2.2
-
- Improved performance of garbage collection by changing the way
- we use the marking stack in the event of stack overflow during
- full garbage collection and by changing the way we mark roots.
-
- Cleaned up ARM version by removing top of stack caching and by
- introducing push/pop elimination.
-
- Cleaned up the way runtime functions are called to allow
- runtime calls with no arguments.
-
- Changed Windows build options to make sure that exceptions are
- disabled and that optimization flags are enabled.
-
- Added first version of Visual Studio project files.
-
-
-2008-08-06: Version 0.2.1
-
- Improved performance of unary addition by avoiding runtime calls.
-
- Fixed the handling of '>' and '<=' to use right-to-left conversion
- and left-to-right evaluation as specified by ECMA-262.
-
- Fixed a branch elimination bug on the ARM platform where incorrect
- code was generated because of overly aggressive branch
- elimination.
-
- Improved performance of code that repeatedly assigns the same
- function to the same property of different objects with the same
- map.
-
- Untangled DEBUG and ENABLE_DISASSEMBLER defines. The disassembler
- no longer expects DEBUG to be defined.
-
- Added platform-nullos.cc to serve as the basis for new platform
- implementations.
-
-
-2008-07-30: Version 0.2.0
-
- Changed all text files to have native svn:eol-style.
-
- Added a few samples and support for building them. The samples
- include a simple shell that can be used to benchmark and test V8.
-
- Changed V8::GetVersion to return the version as a string.
-
- Added source for lazily loaded scripts to snapshots and made
- serialization non-destructive.
-
- Improved ARM support by fixing the write barrier code to use
- aligned loads and stores and by removing premature locals
- optimization that relied on broken support for callee-saved
- registers (removed).
-
- Refactored the code for marking live objects during garbage
- collection and the code for allocating objects in paged
- spaces. Introduced an abstraction for the map word of a heap-
- allocated object and changed the memory allocator to allocate
- executable memory only for spaces that may contain code objects.
-
- Moved StringBuilder to utils.h and ScopedLock to platform.h, where
- they can be used by debugging and logging modules. Added
- thread-safe message queues for dealing with debugger events.
-
- Fixed the source code reported by toString for certain builtin
- empty functions and made sure that the prototype property of a
- function is enumerable.
-
- Improved performance of converting values to condition flags in
- generated code.
-
- Merged disassembler-{arch} files.
-
-
-2008-07-28: Version 0.1.4
-
- Added support for storing JavaScript stack traces in a stack
- allocated buffer to make it visible in shallow core dumps.
- Controlled by the --preallocate-message-memory flag which is
- disabled by default.
-
-
-2008-07-25: Version 0.1.3
-
- Fixed bug in JSObject::GetPropertyAttributePostInterceptor where
- map transitions would count as properties.
-
- Allowed aliased eval invocations by treating them as evals in the
- global context. This may change in the future.
-
- Added support for accessing the last entered context through the
- API and renamed Context::Current to Context::GetCurrent and
- Context::GetSecurityContext to Context::GetCurrentSecurityContext.
-
- Fixed bug in the debugger that would cause the debugger scripts to
- be recursively loaded and changed all disabling of interrupts to
- be block-structured.
-
- Made snapshot data read-only to allow it to be more easily shared
- across multiple users of V8 when linked as a shared library.
-
-
-2008-07-16: Version 0.1.2
-
- Fixed building on Mac OS X by recognizing i386 and friends as
- IA-32 platforms.
-
- Added propagation of stack overflow exceptions that occur while
- compiling nested functions.
-
- Improved debugger with support for recursive break points and
- handling of exceptions that occur in the debugger JavaScript code.
-
- Renamed GetInternal to GetInternalField and SetInternal to
- SetInternalField in the API and moved InternalFieldCount and
- SetInternalFieldCount from FunctionTemplate to ObjectTemplate.
-
-
-2008-07-09: Version 0.1.1
-
- Fixed bug in stack overflow check code for IA-32 targets where a
- non-tagged value in register eax was pushed to the stack.
-
- Fixed potential quadratic behavior when converting strings to
- numbers.
-
- Fixed bug where the return value from Object::SetProperty could
- end up being the property holder instead of the written value.
-
- Improved debugger support by allowing nested break points and by
- dealing with stack-overflows when compiling functions before
- setting break points in them.
-
-
-2008-07-03: Version 0.1.0
-
- Initial export.
-
diff --git a/src/3rdparty/v8/LICENSE b/src/3rdparty/v8/LICENSE
deleted file mode 100644
index e435050..0000000
--- a/src/3rdparty/v8/LICENSE
+++ /dev/null
@@ -1,52 +0,0 @@
-This license applies to all parts of V8 that are not externally
-maintained libraries. The externally maintained libraries used by V8
-are:
-
- - PCRE test suite, located in
- test/mjsunit/third_party/regexp-pcre.js. This is based on the
- test suite from PCRE-7.3, which is copyrighted by the University
- of Cambridge and Google, Inc. The copyright notice and license
- are embedded in regexp-pcre.js.
-
- - Layout tests, located in test/mjsunit/third_party. These are
- based on layout tests from webkit.org which are copyrighted by
- Apple Computer, Inc. and released under a 3-clause BSD license.
-
- - Strongtalk assembler, the basis of the files assembler-arm-inl.h,
- assembler-arm.cc, assembler-arm.h, assembler-ia32-inl.h,
- assembler-ia32.cc, assembler-ia32.h, assembler.cc and assembler.h.
- This code is copyrighted by Sun Microsystems Inc. and released
- under a 3-clause BSD license.
-
- - Valgrind client API header, located at third_party/valgrind/valgrind.h
- This is release under the BSD license.
-
-These libraries have their own licenses; we recommend you read them,
-as their terms may differ from the terms below.
-
-Copyright 2006-2011, the V8 project authors. All rights reserved.
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
- copyright notice, this list of conditions and the following
- disclaimer in the documentation and/or other materials provided
- with the distribution.
- * Neither the name of Google Inc. nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/src/3rdparty/v8/LICENSE.strongtalk b/src/3rdparty/v8/LICENSE.strongtalk
deleted file mode 100644
index 9bd62e4..0000000
--- a/src/3rdparty/v8/LICENSE.strongtalk
+++ /dev/null
@@ -1,29 +0,0 @@
-Copyright (c) 1994-2006 Sun Microsystems Inc.
-All Rights Reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
-- Redistributions of source code must retain the above copyright notice,
-this list of conditions and the following disclaimer.
-
-- Redistribution in binary form must reproduce the above copyright
-notice, this list of conditions and the following disclaimer in the
-documentation and/or other materials provided with the distribution.
-
-- Neither the name of Sun Microsystems or the names of contributors may
-be used to endorse or promote products derived from this software without
-specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
-IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
-CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/src/3rdparty/v8/LICENSE.v8 b/src/3rdparty/v8/LICENSE.v8
deleted file mode 100644
index 933718a..0000000
--- a/src/3rdparty/v8/LICENSE.v8
+++ /dev/null
@@ -1,26 +0,0 @@
-Copyright 2006-2011, the V8 project authors. All rights reserved.
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
- copyright notice, this list of conditions and the following
- disclaimer in the documentation and/or other materials provided
- with the distribution.
- * Neither the name of Google Inc. nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/src/3rdparty/v8/LICENSE.valgrind b/src/3rdparty/v8/LICENSE.valgrind
deleted file mode 100644
index fd8ebaf..0000000
--- a/src/3rdparty/v8/LICENSE.valgrind
+++ /dev/null
@@ -1,45 +0,0 @@
-----------------------------------------------------------------
-
-Notice that the following BSD-style license applies to this one
-file (valgrind.h) only. The rest of Valgrind is licensed under the
-terms of the GNU General Public License, version 2, unless
-otherwise indicated. See the COPYING file in the source
-distribution for details.
-
-----------------------------------------------------------------
-
-This file is part of Valgrind, a dynamic binary instrumentation
-framework.
-
-Copyright (C) 2000-2007 Julian Seward. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions
-are met:
-
-1. Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
-
-2. The origin of this software must not be misrepresented; you must
- not claim that you wrote the original software. If you use this
- software in a product, an acknowledgment in the product
- documentation would be appreciated but is not required.
-
-3. Altered source versions must be plainly marked as such, and must
- not be misrepresented as being the original software.
-
-4. The name of the author may not be used to endorse or promote
- products derived from this software without specific prior written
- permission.
-
-THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
-OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
-DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
-GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
-WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/src/3rdparty/v8/VERSION b/src/3rdparty/v8/VERSION
deleted file mode 100644
index 4169077..0000000
--- a/src/3rdparty/v8/VERSION
+++ /dev/null
@@ -1,11 +0,0 @@
-This is a snapshot of v8 from
-
- http://v8.googlecode.com/svn/branches/bleeding_edge
-
-The commit imported was from the
-
- v8-snapshot-05042011 branch/tag
-
-and has the sha1 checksum
-
- eab749c43efba1fdd862dd1f3a4faceddf1c8d8f
diff --git a/src/3rdparty/v8/include/v8-debug.h b/src/3rdparty/v8/include/v8-debug.h
deleted file mode 100755
index 0bdff84..0000000
--- a/src/3rdparty/v8/include/v8-debug.h
+++ /dev/null
@@ -1,394 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_V8_DEBUG_H_
-#define V8_V8_DEBUG_H_
-
-#include "v8.h"
-
-#ifdef _WIN32
-typedef int int32_t;
-typedef unsigned int uint32_t;
-typedef unsigned short uint16_t; // NOLINT
-typedef long long int64_t; // NOLINT
-
-// Setup for Windows DLL export/import. See v8.h in this directory for
-// information on how to build/use V8 as a DLL.
-#if defined(BUILDING_V8_SHARED) && defined(USING_V8_SHARED)
-#error both BUILDING_V8_SHARED and USING_V8_SHARED are set - please check the\
- build configuration to ensure that at most one of these is set
-#endif
-
-#ifdef BUILDING_V8_SHARED
-#define EXPORT __declspec(dllexport)
-#elif USING_V8_SHARED
-#define EXPORT __declspec(dllimport)
-#else
-#define EXPORT
-#endif
-
-#else // _WIN32
-
-// Setup for Linux shared library export. See v8.h in this directory for
-// information on how to build/use V8 as shared library.
-#if defined(__GNUC__) && (__GNUC__ >= 4) && defined(V8_SHARED)
-#define EXPORT __attribute__ ((visibility("default")))
-#else // defined(__GNUC__) && (__GNUC__ >= 4)
-#define EXPORT
-#endif // defined(__GNUC__) && (__GNUC__ >= 4)
-
-#endif // _WIN32
-
-
-/**
- * Debugger support for the V8 JavaScript engine.
- */
-namespace v8 {
-
-// Debug events which can occur in the V8 JavaScript engine.
-enum DebugEvent {
- Break = 1,
- Exception = 2,
- NewFunction = 3,
- BeforeCompile = 4,
- AfterCompile = 5,
- ScriptCollected = 6,
- BreakForCommand = 7
-};
-
-
-class EXPORT Debug {
- public:
- /**
- * A client object passed to the v8 debugger whose ownership will be taken by
- * it. v8 is always responsible for deleting the object.
- */
- class ClientData {
- public:
- virtual ~ClientData() {}
- };
-
-
- /**
- * A message object passed to the debug message handler.
- */
- class Message {
- public:
- /**
- * Check type of message.
- */
- virtual bool IsEvent() const = 0;
- virtual bool IsResponse() const = 0;
- virtual DebugEvent GetEvent() const = 0;
-
- /**
- * Indicate whether this is a response to a continue command which will
- * start the VM running after this is processed.
- */
- virtual bool WillStartRunning() const = 0;
-
- /**
- * Access to execution state and event data. Don't store these cross
- * callbacks as their content becomes invalid. These objects are from the
- * debugger event that started the debug message loop.
- */
- virtual Handle<Object> GetExecutionState() const = 0;
- virtual Handle<Object> GetEventData() const = 0;
-
- /**
- * Get the debugger protocol JSON.
- */
- virtual Handle<String> GetJSON() const = 0;
-
- /**
- * Get the context active when the debug event happened. Note this is not
- * the current active context as the JavaScript part of the debugger is
- * running in it's own context which is entered at this point.
- */
- virtual Handle<Context> GetEventContext() const = 0;
-
- /**
- * Client data passed with the corresponding request if any. This is the
- * client_data data value passed into Debug::SendCommand along with the
- * request that led to the message or NULL if the message is an event. The
- * debugger takes ownership of the data and will delete it even if there is
- * no message handler.
- */
- virtual ClientData* GetClientData() const = 0;
-
- virtual ~Message() {}
- };
-
-
- /**
- * An event details object passed to the debug event listener.
- */
- class EventDetails {
- public:
- /**
- * Event type.
- */
- virtual DebugEvent GetEvent() const = 0;
-
- /**
- * Access to execution state and event data of the debug event. Don't store
- * these cross callbacks as their content becomes invalid.
- */
- virtual Handle<Object> GetExecutionState() const = 0;
- virtual Handle<Object> GetEventData() const = 0;
-
- /**
- * Get the context active when the debug event happened. Note this is not
- * the current active context as the JavaScript part of the debugger is
- * running in it's own context which is entered at this point.
- */
- virtual Handle<Context> GetEventContext() const = 0;
-
- /**
- * Client data passed with the corresponding callbak whet it was registered.
- */
- virtual Handle<Value> GetCallbackData() const = 0;
-
- /**
- * Client data passed to DebugBreakForCommand function. The
- * debugger takes ownership of the data and will delete it even if
- * there is no message handler.
- */
- virtual ClientData* GetClientData() const = 0;
-
- virtual ~EventDetails() {}
- };
-
-
- /**
- * Debug event callback function.
- *
- * \param event the type of the debug event that triggered the callback
- * (enum DebugEvent)
- * \param exec_state execution state (JavaScript object)
- * \param event_data event specific data (JavaScript object)
- * \param data value passed by the user to SetDebugEventListener
- */
- typedef void (*EventCallback)(DebugEvent event,
- Handle<Object> exec_state,
- Handle<Object> event_data,
- Handle<Value> data);
-
- /**
- * Debug event callback function.
- *
- * \param event_details object providing information about the debug event
- *
- * A EventCallback2 does not take possession of the event data,
- * and must not rely on the data persisting after the handler returns.
- */
- typedef void (*EventCallback2)(const EventDetails& event_details);
-
- /**
- * Debug message callback function.
- *
- * \param message the debug message handler message object
- * \param length length of the message
- * \param client_data the data value passed when registering the message handler
-
- * A MessageHandler does not take possession of the message string,
- * and must not rely on the data persisting after the handler returns.
- *
- * This message handler is deprecated. Use MessageHandler2 instead.
- */
- typedef void (*MessageHandler)(const uint16_t* message, int length,
- ClientData* client_data);
-
- /**
- * Debug message callback function.
- *
- * \param message the debug message handler message object
- *
- * A MessageHandler does not take possession of the message data,
- * and must not rely on the data persisting after the handler returns.
- */
- typedef void (*MessageHandler2)(const Message& message);
-
- /**
- * Debug host dispatch callback function.
- */
- typedef void (*HostDispatchHandler)();
-
- /**
- * Callback function for the host to ensure debug messages are processed.
- */
- typedef void (*DebugMessageDispatchHandler)();
-
- // Set a C debug event listener.
- static bool SetDebugEventListener(EventCallback that,
- Handle<Value> data = Handle<Value>());
- static bool SetDebugEventListener2(EventCallback2 that,
- Handle<Value> data = Handle<Value>());
-
- // Set a JavaScript debug event listener.
- static bool SetDebugEventListener(v8::Handle<v8::Object> that,
- Handle<Value> data = Handle<Value>());
-
- // Schedule a debugger break to happen when JavaScript code is run
- // in the given isolate. If no isolate is provided the default
- // isolate is used.
- static void DebugBreak(Isolate* isolate = NULL);
-
- // Remove scheduled debugger break in given isolate if it has not
- // happened yet. If no isolate is provided the default isolate is
- // used.
- static void CancelDebugBreak(Isolate* isolate = NULL);
-
- // Break execution of JavaScript in the given isolate (this method
- // can be invoked from a non-VM thread) for further client command
- // execution on a VM thread. Client data is then passed in
- // EventDetails to EventCallback at the moment when the VM actually
- // stops. If no isolate is provided the default isolate is used.
- static void DebugBreakForCommand(ClientData* data = NULL,
- Isolate* isolate = NULL);
-
- // Message based interface. The message protocol is JSON. NOTE the message
- // handler thread is not supported any more parameter must be false.
- static void SetMessageHandler(MessageHandler handler,
- bool message_handler_thread = false);
- static void SetMessageHandler2(MessageHandler2 handler);
-
- // If no isolate is provided the default isolate is
- // used.
- static void SendCommand(const uint16_t* command, int length,
- ClientData* client_data = NULL,
- Isolate* isolate = NULL);
-
- // Dispatch interface.
- static void SetHostDispatchHandler(HostDispatchHandler handler,
- int period = 100);
-
- /**
- * Register a callback function to be called when a debug message has been
- * received and is ready to be processed. For the debug messages to be
- * processed V8 needs to be entered, and in certain embedding scenarios this
- * callback can be used to make sure V8 is entered for the debug message to
- * be processed. Note that debug messages will only be processed if there is
- * a V8 break. This can happen automatically by using the option
- * --debugger-auto-break.
- * \param provide_locker requires that V8 acquires v8::Locker for you before
- * calling handler
- */
- static void SetDebugMessageDispatchHandler(
- DebugMessageDispatchHandler handler, bool provide_locker = false);
-
- /**
- * Run a JavaScript function in the debugger.
- * \param fun the function to call
- * \param data passed as second argument to the function
- * With this call the debugger is entered and the function specified is called
- * with the execution state as the first argument. This makes it possible to
- * get access to information otherwise not available during normal JavaScript
- * execution e.g. details on stack frames. Receiver of the function call will
- * be the debugger context global object, however this is a subject to change.
- * The following example show a JavaScript function which when passed to
- * v8::Debug::Call will return the current line of JavaScript execution.
- *
- * \code
- * function frame_source_line(exec_state) {
- * return exec_state.frame(0).sourceLine();
- * }
- * \endcode
- */
- static Local<Value> Call(v8::Handle<v8::Function> fun,
- Handle<Value> data = Handle<Value>());
-
- /**
- * Returns a mirror object for the given object.
- */
- static Local<Value> GetMirror(v8::Handle<v8::Value> obj);
-
- /**
- * Enable the V8 builtin debug agent. The debugger agent will listen on the
- * supplied TCP/IP port for remote debugger connection.
- * \param name the name of the embedding application
- * \param port the TCP/IP port to listen on
- * \param wait_for_connection whether V8 should pause on a first statement
- * allowing remote debugger to connect before anything interesting happened
- */
- static bool EnableAgent(const char* name, int port,
- bool wait_for_connection = false);
-
- /**
- * Makes V8 process all pending debug messages.
- *
- * From V8 point of view all debug messages come asynchronously (e.g. from
- * remote debugger) but they all must be handled synchronously: V8 cannot
- * do 2 things at one time so normal script execution must be interrupted
- * for a while.
- *
- * Generally when message arrives V8 may be in one of 3 states:
- * 1. V8 is running script; V8 will automatically interrupt and process all
- * pending messages (however auto_break flag should be enabled);
- * 2. V8 is suspended on debug breakpoint; in this state V8 is dedicated
- * to reading and processing debug messages;
- * 3. V8 is not running at all or has called some long-working C++ function;
- * by default it means that processing of all debug message will be deferred
- * until V8 gets control again; however, embedding application may improve
- * this by manually calling this method.
- *
- * It makes sense to call this method whenever a new debug message arrived and
- * V8 is not already running. Method v8::Debug::SetDebugMessageDispatchHandler
- * should help with the former condition.
- *
- * Technically this method in many senses is equivalent to executing empty
- * script:
- * 1. It does nothing except for processing all pending debug messages.
- * 2. It should be invoked with the same precautions and from the same context
- * as V8 script would be invoked from, because:
- * a. with "evaluate" command it can do whatever normal script can do,
- * including all native calls;
- * b. no other thread should call V8 while this method is running
- * (v8::Locker may be used here).
- *
- * "Evaluate" debug command behavior currently is not specified in scope
- * of this method.
- */
- static void ProcessDebugMessages();
-
- /**
- * Debugger is running in it's own context which is entered while debugger
- * messages are being dispatched. This is an explicit getter for this
- * debugger context. Note that the content of the debugger context is subject
- * to change.
- */
- static Local<Context> GetDebugContext();
-};
-
-
-} // namespace v8
-
-
-#undef EXPORT
-
-
-#endif // V8_V8_DEBUG_H_
diff --git a/src/3rdparty/v8/include/v8-preparser.h b/src/3rdparty/v8/include/v8-preparser.h
deleted file mode 100644
index 7baac94..0000000
--- a/src/3rdparty/v8/include/v8-preparser.h
+++ /dev/null
@@ -1,116 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef PREPARSER_H
-#define PREPARSER_H
-
-#include "v8stdint.h"
-
-#ifdef _WIN32
-
-// Setup for Windows DLL export/import. When building the V8 DLL the
-// BUILDING_V8_SHARED needs to be defined. When building a program which uses
-// the V8 DLL USING_V8_SHARED needs to be defined. When either building the V8
-// static library or building a program which uses the V8 static library neither
-// BUILDING_V8_SHARED nor USING_V8_SHARED should be defined.
-#if defined(BUILDING_V8_SHARED) && defined(USING_V8_SHARED)
-#error both BUILDING_V8_SHARED and USING_V8_SHARED are set - please check the\
- build configuration to ensure that at most one of these is set
-#endif
-
-#ifdef BUILDING_V8_SHARED
-#define V8EXPORT __declspec(dllexport)
-#elif USING_V8_SHARED
-#define V8EXPORT __declspec(dllimport)
-#else
-#define V8EXPORT
-#endif // BUILDING_V8_SHARED
-
-#else // _WIN32
-
-// Setup for Linux shared library export. There is no need to distinguish
-// between building or using the V8 shared library, but we should not
-// export symbols when we are building a static library.
-#if defined(__GNUC__) && (__GNUC__ >= 4) && defined(V8_SHARED)
-#define V8EXPORT __attribute__ ((visibility("default")))
-#else // defined(__GNUC__) && (__GNUC__ >= 4)
-#define V8EXPORT
-#endif // defined(__GNUC__) && (__GNUC__ >= 4)
-
-#endif // _WIN32
-
-
-namespace v8 {
-
-
-class PreParserData {
- public:
- PreParserData(size_t size, const uint8_t* data)
- : data_(data), size_(size) { }
-
- // Create a PreParserData value where stack_overflow reports true.
- static PreParserData StackOverflow() { return PreParserData(0, NULL); }
- // Whether the pre-parser stopped due to a stack overflow.
- // If this is the case, size() and data() should not be used.
-
- bool stack_overflow() { return size_ == 0u; }
-
- // The size of the data in bytes.
- size_t size() const { return size_; }
-
- // Pointer to the data.
- const uint8_t* data() const { return data_; }
-
- private:
- const uint8_t* const data_;
- const size_t size_;
-};
-
-
-// Interface for a stream of Unicode characters.
-class UnicodeInputStream {
- public:
- virtual ~UnicodeInputStream();
-
- // Returns the next Unicode code-point in the input, or a negative value when
- // there is no more input in the stream.
- virtual int32_t Next() = 0;
-};
-
-
-// Preparse a JavaScript program. The source code is provided as a
-// UnicodeInputStream. The max_stack_size limits the amount of stack
-// space that the preparser is allowed to use. If the preparser uses
-// more stack space than the limit provided, the result's stack_overflow()
-// method will return true. Otherwise the result contains preparser
-// data that can be used by the V8 parser to speed up parsing.
-PreParserData V8EXPORT Preparse(UnicodeInputStream* input,
- size_t max_stack_size);
-
-} // namespace v8.
-
-#endif // PREPARSER_H
diff --git a/src/3rdparty/v8/include/v8-profiler.h b/src/3rdparty/v8/include/v8-profiler.h
deleted file mode 100644
index db56e26..0000000
--- a/src/3rdparty/v8/include/v8-profiler.h
+++ /dev/null
@@ -1,505 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_V8_PROFILER_H_
-#define V8_V8_PROFILER_H_
-
-#include "v8.h"
-
-#ifdef _WIN32
-// Setup for Windows DLL export/import. See v8.h in this directory for
-// information on how to build/use V8 as a DLL.
-#if defined(BUILDING_V8_SHARED) && defined(USING_V8_SHARED)
-#error both BUILDING_V8_SHARED and USING_V8_SHARED are set - please check the\
- build configuration to ensure that at most one of these is set
-#endif
-
-#ifdef BUILDING_V8_SHARED
-#define V8EXPORT __declspec(dllexport)
-#elif USING_V8_SHARED
-#define V8EXPORT __declspec(dllimport)
-#else
-#define V8EXPORT
-#endif
-
-#else // _WIN32
-
-// Setup for Linux shared library export. See v8.h in this directory for
-// information on how to build/use V8 as shared library.
-#if defined(__GNUC__) && (__GNUC__ >= 4) && defined(V8_SHARED)
-#define V8EXPORT __attribute__ ((visibility("default")))
-#else // defined(__GNUC__) && (__GNUC__ >= 4)
-#define V8EXPORT
-#endif // defined(__GNUC__) && (__GNUC__ >= 4)
-
-#endif // _WIN32
-
-
-/**
- * Profiler support for the V8 JavaScript engine.
- */
-namespace v8 {
-
-
-/**
- * CpuProfileNode represents a node in a call graph.
- */
-class V8EXPORT CpuProfileNode {
- public:
- /** Returns function name (empty string for anonymous functions.) */
- Handle<String> GetFunctionName() const;
-
- /** Returns resource name for script from where the function originates. */
- Handle<String> GetScriptResourceName() const;
-
- /**
- * Returns the number, 1-based, of the line where the function originates.
- * kNoLineNumberInfo if no line number information is available.
- */
- int GetLineNumber() const;
-
- /**
- * Returns total (self + children) execution time of the function,
- * in milliseconds, estimated by samples count.
- */
- double GetTotalTime() const;
-
- /**
- * Returns self execution time of the function, in milliseconds,
- * estimated by samples count.
- */
- double GetSelfTime() const;
-
- /** Returns the count of samples where function exists. */
- double GetTotalSamplesCount() const;
-
- /** Returns the count of samples where function was currently executing. */
- double GetSelfSamplesCount() const;
-
- /** Returns function entry UID. */
- unsigned GetCallUid() const;
-
- /** Returns child nodes count of the node. */
- int GetChildrenCount() const;
-
- /** Retrieves a child node by index. */
- const CpuProfileNode* GetChild(int index) const;
-
- static const int kNoLineNumberInfo = Message::kNoLineNumberInfo;
-};
-
-
-/**
- * CpuProfile contains a CPU profile in a form of two call trees:
- * - top-down (from main() down to functions that do all the work);
- * - bottom-up call graph (in backward direction).
- */
-class V8EXPORT CpuProfile {
- public:
- /** Returns CPU profile UID (assigned by the profiler.) */
- unsigned GetUid() const;
-
- /** Returns CPU profile title. */
- Handle<String> GetTitle() const;
-
- /** Returns the root node of the bottom up call tree. */
- const CpuProfileNode* GetBottomUpRoot() const;
-
- /** Returns the root node of the top down call tree. */
- const CpuProfileNode* GetTopDownRoot() const;
-
- /**
- * Deletes the profile and removes it from CpuProfiler's list.
- * All pointers to nodes previously returned become invalid.
- * Profiles with the same uid but obtained using different
- * security token are not deleted, but become inaccessible
- * using FindProfile method. It is embedder's responsibility
- * to call Delete on these profiles.
- */
- void Delete();
-};
-
-
-/**
- * Interface for controlling CPU profiling.
- */
-class V8EXPORT CpuProfiler {
- public:
- /**
- * A note on security tokens usage. As scripts from different
- * origins can run inside a single V8 instance, it is possible to
- * have functions from different security contexts intermixed in a
- * single CPU profile. To avoid exposing function names belonging to
- * other contexts, filtering by security token is performed while
- * obtaining profiling results.
- */
-
- /**
- * Returns the number of profiles collected (doesn't include
- * profiles that are being collected at the moment of call.)
- */
- static int GetProfilesCount();
-
- /** Returns a profile by index. */
- static const CpuProfile* GetProfile(
- int index,
- Handle<Value> security_token = Handle<Value>());
-
- /** Returns a profile by uid. */
- static const CpuProfile* FindProfile(
- unsigned uid,
- Handle<Value> security_token = Handle<Value>());
-
- /**
- * Starts collecting CPU profile. Title may be an empty string. It
- * is allowed to have several profiles being collected at
- * once. Attempts to start collecting several profiles with the same
- * title are silently ignored. While collecting a profile, functions
- * from all security contexts are included in it. The token-based
- * filtering is only performed when querying for a profile.
- */
- static void StartProfiling(Handle<String> title);
-
- /**
- * Stops collecting CPU profile with a given title and returns it.
- * If the title given is empty, finishes the last profile started.
- */
- static const CpuProfile* StopProfiling(
- Handle<String> title,
- Handle<Value> security_token = Handle<Value>());
-
- /**
- * Deletes all existing profiles, also cancelling all profiling
- * activity. All previously returned pointers to profiles and their
- * contents become invalid after this call.
- */
- static void DeleteAllProfiles();
-};
-
-
-class HeapGraphNode;
-
-
-/**
- * HeapSnapshotEdge represents a directed connection between heap
- * graph nodes: from retaners to retained nodes.
- */
-class V8EXPORT HeapGraphEdge {
- public:
- enum Type {
- kContextVariable = 0, // A variable from a function context.
- kElement = 1, // An element of an array.
- kProperty = 2, // A named object property.
- kInternal = 3, // A link that can't be accessed from JS,
- // thus, its name isn't a real property name
- // (e.g. parts of a ConsString).
- kHidden = 4, // A link that is needed for proper sizes
- // calculation, but may be hidden from user.
- kShortcut = 5 // A link that must not be followed during
- // sizes calculation.
- };
-
- /** Returns edge type (see HeapGraphEdge::Type). */
- Type GetType() const;
-
- /**
- * Returns edge name. This can be a variable name, an element index, or
- * a property name.
- */
- Handle<Value> GetName() const;
-
- /** Returns origin node. */
- const HeapGraphNode* GetFromNode() const;
-
- /** Returns destination node. */
- const HeapGraphNode* GetToNode() const;
-};
-
-
-/**
- * HeapGraphNode represents a node in a heap graph.
- */
-class V8EXPORT HeapGraphNode {
- public:
- enum Type {
- kHidden = 0, // Hidden node, may be filtered when shown to user.
- kArray = 1, // An array of elements.
- kString = 2, // A string.
- kObject = 3, // A JS object (except for arrays and strings).
- kCode = 4, // Compiled code.
- kClosure = 5, // Function closure.
- kRegExp = 6, // RegExp.
- kHeapNumber = 7, // Number stored in the heap.
- kNative = 8 // Native object (not from V8 heap).
- };
-
- /** Returns node type (see HeapGraphNode::Type). */
- Type GetType() const;
-
- /**
- * Returns node name. Depending on node's type this can be the name
- * of the constructor (for objects), the name of the function (for
- * closures), string value, or an empty string (for compiled code).
- */
- Handle<String> GetName() const;
-
- /**
- * Returns node id. For the same heap object, the id remains the same
- * across all snapshots. Not applicable to aggregated heap snapshots
- * as they only contain aggregated instances.
- */
- uint64_t GetId() const;
-
- /**
- * Returns the number of instances. Only applicable to aggregated
- * heap snapshots.
- */
- int GetInstancesCount() const;
-
- /** Returns node's own size, in bytes. */
- int GetSelfSize() const;
-
- /**
- * Returns node's retained size, in bytes. That is, self + sizes of
- * the objects that are reachable only from this object. In other
- * words, the size of memory that will be reclaimed having this node
- * collected.
- *
- * Exact retained size calculation has O(N) (number of nodes)
- * computational complexity, while approximate has O(1). It is
- * assumed that initially heap profiling tools provide approximate
- * sizes for all nodes, and then exact sizes are calculated for the
- * most 'interesting' nodes.
- */
- int GetRetainedSize(bool exact) const;
-
- /** Returns child nodes count of the node. */
- int GetChildrenCount() const;
-
- /** Retrieves a child by index. */
- const HeapGraphEdge* GetChild(int index) const;
-
- /** Returns retainer nodes count of the node. */
- int GetRetainersCount() const;
-
- /** Returns a retainer by index. */
- const HeapGraphEdge* GetRetainer(int index) const;
-
- /**
- * Returns a dominator node. This is the node that participates in every
- * path from the snapshot root to the current node.
- */
- const HeapGraphNode* GetDominatorNode() const;
-};
-
-
-/**
- * HeapSnapshots record the state of the JS heap at some moment.
- */
-class V8EXPORT HeapSnapshot {
- public:
- enum Type {
- kFull = 0, // Heap snapshot with all instances and references.
- kAggregated = 1 // Snapshot doesn't contain individual heap entries,
- // instead they are grouped by constructor name.
- };
- enum SerializationFormat {
- kJSON = 0 // See format description near 'Serialize' method.
- };
-
- /** Returns heap snapshot type. */
- Type GetType() const;
-
- /** Returns heap snapshot UID (assigned by the profiler.) */
- unsigned GetUid() const;
-
- /** Returns heap snapshot title. */
- Handle<String> GetTitle() const;
-
- /** Returns the root node of the heap graph. */
- const HeapGraphNode* GetRoot() const;
-
- /** Returns a node by its id. */
- const HeapGraphNode* GetNodeById(uint64_t id) const;
-
- /**
- * Deletes the snapshot and removes it from HeapProfiler's list.
- * All pointers to nodes, edges and paths previously returned become
- * invalid.
- */
- void Delete();
-
- /**
- * Prepare a serialized representation of the snapshot. The result
- * is written into the stream provided in chunks of specified size.
- * The total length of the serialized snapshot is unknown in
- * advance, it is can be roughly equal to JS heap size (that means,
- * it can be really big - tens of megabytes).
- *
- * For the JSON format, heap contents are represented as an object
- * with the following structure:
- *
- * {
- * snapshot: {title: "...", uid: nnn},
- * nodes: [
- * meta-info (JSON string),
- * nodes themselves
- * ],
- * strings: [strings]
- * }
- *
- * Outgoing node links are stored after each node. Nodes reference strings
- * and other nodes by their indexes in corresponding arrays.
- */
- void Serialize(OutputStream* stream, SerializationFormat format) const;
-};
-
-
-class RetainedObjectInfo;
-
-/**
- * Interface for controlling heap profiling.
- */
-class V8EXPORT HeapProfiler {
- public:
- /**
- * Callback function invoked for obtaining RetainedObjectInfo for
- * the given JavaScript wrapper object. It is prohibited to enter V8
- * while the callback is running: only getters on the handle and
- * GetPointerFromInternalField on the objects are allowed.
- */
- typedef RetainedObjectInfo* (*WrapperInfoCallback)
- (uint16_t class_id, Handle<Value> wrapper);
-
- /** Returns the number of snapshots taken. */
- static int GetSnapshotsCount();
-
- /** Returns a snapshot by index. */
- static const HeapSnapshot* GetSnapshot(int index);
-
- /** Returns a profile by uid. */
- static const HeapSnapshot* FindSnapshot(unsigned uid);
-
- /**
- * Takes a heap snapshot and returns it. Title may be an empty string.
- * See HeapSnapshot::Type for types description.
- */
- static const HeapSnapshot* TakeSnapshot(
- Handle<String> title,
- HeapSnapshot::Type type = HeapSnapshot::kFull,
- ActivityControl* control = NULL);
-
- /**
- * Deletes all snapshots taken. All previously returned pointers to
- * snapshots and their contents become invalid after this call.
- */
- static void DeleteAllSnapshots();
-
- /** Binds a callback to embedder's class ID. */
- static void DefineWrapperClass(
- uint16_t class_id,
- WrapperInfoCallback callback);
-
- /**
- * Default value of persistent handle class ID. Must not be used to
- * define a class. Can be used to reset a class of a persistent
- * handle.
- */
- static const uint16_t kPersistentHandleNoClassId = 0;
-};
-
-
-/**
- * Interface for providing information about embedder's objects
- * held by global handles. This information is reported in two ways:
- *
- * 1. When calling AddObjectGroup, an embedder may pass
- * RetainedObjectInfo instance describing the group. To collect
- * this information while taking a heap snapshot, V8 calls GC
- * prologue and epilogue callbacks.
- *
- * 2. When a heap snapshot is collected, V8 additionally
- * requests RetainedObjectInfos for persistent handles that
- * were not previously reported via AddObjectGroup.
- *
- * Thus, if an embedder wants to provide information about native
- * objects for heap snapshots, he can do it in a GC prologue
- * handler, and / or by assigning wrapper class ids in the following way:
- *
- * 1. Bind a callback to class id by calling DefineWrapperClass.
- * 2. Call SetWrapperClassId on certain persistent handles.
- *
- * V8 takes ownership of RetainedObjectInfo instances passed to it and
- * keeps them alive only during snapshot collection. Afterwards, they
- * are freed by calling the Dispose class function.
- */
-class V8EXPORT RetainedObjectInfo { // NOLINT
- public:
- /** Called by V8 when it no longer needs an instance. */
- virtual void Dispose() = 0;
-
- /** Returns whether two instances are equivalent. */
- virtual bool IsEquivalent(RetainedObjectInfo* other) = 0;
-
- /**
- * Returns hash value for the instance. Equivalent instances
- * must have the same hash value.
- */
- virtual intptr_t GetHash() = 0;
-
- /**
- * Returns human-readable label. It must be a NUL-terminated UTF-8
- * encoded string. V8 copies its contents during a call to GetLabel.
- */
- virtual const char* GetLabel() = 0;
-
- /**
- * Returns element count in case if a global handle retains
- * a subgraph by holding one of its nodes.
- */
- virtual intptr_t GetElementCount() { return -1; }
-
- /** Returns embedder's object size in bytes. */
- virtual intptr_t GetSizeInBytes() { return -1; }
-
- protected:
- RetainedObjectInfo() {}
- virtual ~RetainedObjectInfo() {}
-
- private:
- RetainedObjectInfo(const RetainedObjectInfo&);
- RetainedObjectInfo& operator=(const RetainedObjectInfo&);
-};
-
-
-} // namespace v8
-
-
-#undef V8EXPORT
-
-
-#endif // V8_V8_PROFILER_H_
diff --git a/src/3rdparty/v8/include/v8-testing.h b/src/3rdparty/v8/include/v8-testing.h
deleted file mode 100644
index 245f74d..0000000
--- a/src/3rdparty/v8/include/v8-testing.h
+++ /dev/null
@@ -1,104 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_V8_TEST_H_
-#define V8_V8_TEST_H_
-
-#include "v8.h"
-
-#ifdef _WIN32
-// Setup for Windows DLL export/import. See v8.h in this directory for
-// information on how to build/use V8 as a DLL.
-#if defined(BUILDING_V8_SHARED) && defined(USING_V8_SHARED)
-#error both BUILDING_V8_SHARED and USING_V8_SHARED are set - please check the\
- build configuration to ensure that at most one of these is set
-#endif
-
-#ifdef BUILDING_V8_SHARED
-#define V8EXPORT __declspec(dllexport)
-#elif USING_V8_SHARED
-#define V8EXPORT __declspec(dllimport)
-#else
-#define V8EXPORT
-#endif
-
-#else // _WIN32
-
-// Setup for Linux shared library export. See v8.h in this directory for
-// information on how to build/use V8 as shared library.
-#if defined(__GNUC__) && (__GNUC__ >= 4) && defined(V8_SHARED)
-#define V8EXPORT __attribute__ ((visibility("default")))
-#else // defined(__GNUC__) && (__GNUC__ >= 4)
-#define V8EXPORT
-#endif // defined(__GNUC__) && (__GNUC__ >= 4)
-
-#endif // _WIN32
-
-
-/**
- * Testing support for the V8 JavaScript engine.
- */
-namespace v8 {
-
-class V8EXPORT Testing {
- public:
- enum StressType {
- kStressTypeOpt,
- kStressTypeDeopt
- };
-
- /**
- * Set the type of stressing to do. The default if not set is kStressTypeOpt.
- */
- static void SetStressRunType(StressType type);
-
- /**
- * Get the number of runs of a given test that is required to get the full
- * stress coverage.
- */
- static int GetStressRuns();
-
- /**
- * Indicate the number of the run which is about to start. The value of run
- * should be between 0 and one less than the result from GetStressRuns()
- */
- static void PrepareStressRun(int run);
-
- /**
- * Force deoptimization of all functions.
- */
- static void DeoptimizeAll();
-};
-
-
-} // namespace v8
-
-
-#undef V8EXPORT
-
-
-#endif // V8_V8_TEST_H_
diff --git a/src/3rdparty/v8/include/v8.h b/src/3rdparty/v8/include/v8.h
deleted file mode 100644
index fb7cc34..0000000
--- a/src/3rdparty/v8/include/v8.h
+++ /dev/null
@@ -1,4115 +0,0 @@
-// Copyright 2007-2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-/** \mainpage V8 API Reference Guide
- *
- * V8 is Google's open source JavaScript engine.
- *
- * This set of documents provides reference material generated from the
- * V8 header file, include/v8.h.
- *
- * For other documentation see http://code.google.com/apis/v8/
- */
-
-#ifndef V8_H_
-#define V8_H_
-
-#include "v8stdint.h"
-
-#ifdef _WIN32
-
-// Setup for Windows DLL export/import. When building the V8 DLL the
-// BUILDING_V8_SHARED needs to be defined. When building a program which uses
-// the V8 DLL USING_V8_SHARED needs to be defined. When either building the V8
-// static library or building a program which uses the V8 static library neither
-// BUILDING_V8_SHARED nor USING_V8_SHARED should be defined.
-#if defined(BUILDING_V8_SHARED) && defined(USING_V8_SHARED)
-#error both BUILDING_V8_SHARED and USING_V8_SHARED are set - please check the\
- build configuration to ensure that at most one of these is set
-#endif
-
-#ifdef BUILDING_V8_SHARED
-#define V8EXPORT __declspec(dllexport)
-#elif USING_V8_SHARED
-#define V8EXPORT __declspec(dllimport)
-#else
-#define V8EXPORT
-#endif // BUILDING_V8_SHARED
-
-#else // _WIN32
-
-// Setup for Linux shared library export. There is no need to distinguish
-// between building or using the V8 shared library, but we should not
-// export symbols when we are building a static library.
-#if defined(__GNUC__) && (__GNUC__ >= 4) && defined(V8_SHARED)
-#define V8EXPORT __attribute__ ((visibility("default")))
-#else // defined(__GNUC__) && (__GNUC__ >= 4)
-#define V8EXPORT
-#endif // defined(__GNUC__) && (__GNUC__ >= 4)
-
-#endif // _WIN32
-
-/**
- * The v8 JavaScript engine.
- */
-namespace v8 {
-
-class Context;
-class String;
-class Value;
-class Utils;
-class Number;
-class Object;
-class Array;
-class Int32;
-class Uint32;
-class External;
-class Primitive;
-class Boolean;
-class Integer;
-class Function;
-class Date;
-class ImplementationUtilities;
-class Signature;
-template <class T> class Handle;
-template <class T> class Local;
-template <class T> class Persistent;
-class FunctionTemplate;
-class ObjectTemplate;
-class Data;
-class AccessorInfo;
-class StackTrace;
-class StackFrame;
-
-namespace internal {
-
-class Arguments;
-class Object;
-class Heap;
-class HeapObject;
-class Isolate;
-}
-
-
-// --- W e a k H a n d l e s
-
-
-/**
- * A weak reference callback function.
- *
- * This callback should either explicitly invoke Dispose on |object| if
- * V8 wrapper is not needed anymore, or 'revive' it by invocation of MakeWeak.
- *
- * \param object the weak global object to be reclaimed by the garbage collector
- * \param parameter the value passed in when making the weak global object
- */
-typedef void (*WeakReferenceCallback)(Persistent<Value> object,
- void* parameter);
-
-
-// --- H a n d l e s ---
-
-#define TYPE_CHECK(T, S) \
- while (false) { \
- *(static_cast<T* volatile*>(0)) = static_cast<S*>(0); \
- }
-
-/**
- * An object reference managed by the v8 garbage collector.
- *
- * All objects returned from v8 have to be tracked by the garbage
- * collector so that it knows that the objects are still alive. Also,
- * because the garbage collector may move objects, it is unsafe to
- * point directly to an object. Instead, all objects are stored in
- * handles which are known by the garbage collector and updated
- * whenever an object moves. Handles should always be passed by value
- * (except in cases like out-parameters) and they should never be
- * allocated on the heap.
- *
- * There are two types of handles: local and persistent handles.
- * Local handles are light-weight and transient and typically used in
- * local operations. They are managed by HandleScopes. Persistent
- * handles can be used when storing objects across several independent
- * operations and have to be explicitly deallocated when they're no
- * longer used.
- *
- * It is safe to extract the object stored in the handle by
- * dereferencing the handle (for instance, to extract the Object* from
- * an Handle<Object>); the value will still be governed by a handle
- * behind the scenes and the same rules apply to these values as to
- * their handles.
- */
-template <class T> class Handle {
- public:
-
- /**
- * Creates an empty handle.
- */
- inline Handle();
-
- /**
- * Creates a new handle for the specified value.
- */
- inline explicit Handle(T* val) : val_(val) { }
-
- /**
- * Creates a handle for the contents of the specified handle. This
- * constructor allows you to pass handles as arguments by value and
- * to assign between handles. However, if you try to assign between
- * incompatible handles, for instance from a Handle<String> to a
- * Handle<Number> it will cause a compiletime error. Assigning
- * between compatible handles, for instance assigning a
- * Handle<String> to a variable declared as Handle<Value>, is legal
- * because String is a subclass of Value.
- */
- template <class S> inline Handle(Handle<S> that)
- : val_(reinterpret_cast<T*>(*that)) {
- /**
- * This check fails when trying to convert between incompatible
- * handles. For example, converting from a Handle<String> to a
- * Handle<Number>.
- */
- TYPE_CHECK(T, S);
- }
-
- /**
- * Returns true if the handle is empty.
- */
- inline bool IsEmpty() const { return val_ == 0; }
-
- inline T* operator->() const { return val_; }
-
- inline T* operator*() const { return val_; }
-
- /**
- * Sets the handle to be empty. IsEmpty() will then return true.
- */
- inline void Clear() { this->val_ = 0; }
-
- /**
- * Checks whether two handles are the same.
- * Returns true if both are empty, or if the objects
- * to which they refer are identical.
- * The handles' references are not checked.
- */
- template <class S> inline bool operator==(Handle<S> that) const {
- internal::Object** a = reinterpret_cast<internal::Object**>(**this);
- internal::Object** b = reinterpret_cast<internal::Object**>(*that);
- if (a == 0) return b == 0;
- if (b == 0) return false;
- return *a == *b;
- }
-
- /**
- * Checks whether two handles are different.
- * Returns true if only one of the handles is empty, or if
- * the objects to which they refer are different.
- * The handles' references are not checked.
- */
- template <class S> inline bool operator!=(Handle<S> that) const {
- return !operator==(that);
- }
-
- template <class S> static inline Handle<T> Cast(Handle<S> that) {
-#ifdef V8_ENABLE_CHECKS
- // If we're going to perform the type check then we have to check
- // that the handle isn't empty before doing the checked cast.
- if (that.IsEmpty()) return Handle<T>();
-#endif
- return Handle<T>(T::Cast(*that));
- }
-
- template <class S> inline Handle<S> As() {
- return Handle<S>::Cast(*this);
- }
-
- private:
- T* val_;
-};
-
-
-/**
- * A light-weight stack-allocated object handle. All operations
- * that return objects from within v8 return them in local handles. They
- * are created within HandleScopes, and all local handles allocated within a
- * handle scope are destroyed when the handle scope is destroyed. Hence it
- * is not necessary to explicitly deallocate local handles.
- */
-template <class T> class Local : public Handle<T> {
- public:
- inline Local();
- template <class S> inline Local(Local<S> that)
- : Handle<T>(reinterpret_cast<T*>(*that)) {
- /**
- * This check fails when trying to convert between incompatible
- * handles. For example, converting from a Handle<String> to a
- * Handle<Number>.
- */
- TYPE_CHECK(T, S);
- }
- template <class S> inline Local(S* that) : Handle<T>(that) { }
- template <class S> static inline Local<T> Cast(Local<S> that) {
-#ifdef V8_ENABLE_CHECKS
- // If we're going to perform the type check then we have to check
- // that the handle isn't empty before doing the checked cast.
- if (that.IsEmpty()) return Local<T>();
-#endif
- return Local<T>(T::Cast(*that));
- }
-
- template <class S> inline Local<S> As() {
- return Local<S>::Cast(*this);
- }
-
- /** Create a local handle for the content of another handle.
- * The referee is kept alive by the local handle even when
- * the original handle is destroyed/disposed.
- */
- inline static Local<T> New(Handle<T> that);
-};
-
-
-/**
- * An object reference that is independent of any handle scope. Where
- * a Local handle only lives as long as the HandleScope in which it was
- * allocated, a Persistent handle remains valid until it is explicitly
- * disposed.
- *
- * A persistent handle contains a reference to a storage cell within
- * the v8 engine which holds an object value and which is updated by
- * the garbage collector whenever the object is moved. A new storage
- * cell can be created using Persistent::New and existing handles can
- * be disposed using Persistent::Dispose. Since persistent handles
- * are passed by value you may have many persistent handle objects
- * that point to the same storage cell. For instance, if you pass a
- * persistent handle as an argument to a function you will not get two
- * different storage cells but rather two references to the same
- * storage cell.
- */
-template <class T> class Persistent : public Handle<T> {
- public:
-
- /**
- * Creates an empty persistent handle that doesn't point to any
- * storage cell.
- */
- inline Persistent();
-
- /**
- * Creates a persistent handle for the same storage cell as the
- * specified handle. This constructor allows you to pass persistent
- * handles as arguments by value and to assign between persistent
- * handles. However, attempting to assign between incompatible
- * persistent handles, for instance from a Persistent<String> to a
- * Persistent<Number> will cause a compiletime error. Assigning
- * between compatible persistent handles, for instance assigning a
- * Persistent<String> to a variable declared as Persistent<Value>,
- * is allowed as String is a subclass of Value.
- */
- template <class S> inline Persistent(Persistent<S> that)
- : Handle<T>(reinterpret_cast<T*>(*that)) {
- /**
- * This check fails when trying to convert between incompatible
- * handles. For example, converting from a Handle<String> to a
- * Handle<Number>.
- */
- TYPE_CHECK(T, S);
- }
-
- template <class S> inline Persistent(S* that) : Handle<T>(that) { }
-
- /**
- * "Casts" a plain handle which is known to be a persistent handle
- * to a persistent handle.
- */
- template <class S> explicit inline Persistent(Handle<S> that)
- : Handle<T>(*that) { }
-
- template <class S> static inline Persistent<T> Cast(Persistent<S> that) {
-#ifdef V8_ENABLE_CHECKS
- // If we're going to perform the type check then we have to check
- // that the handle isn't empty before doing the checked cast.
- if (that.IsEmpty()) return Persistent<T>();
-#endif
- return Persistent<T>(T::Cast(*that));
- }
-
- template <class S> inline Persistent<S> As() {
- return Persistent<S>::Cast(*this);
- }
-
- /**
- * Creates a new persistent handle for an existing local or
- * persistent handle.
- */
- inline static Persistent<T> New(Handle<T> that);
-
- /**
- * Releases the storage cell referenced by this persistent handle.
- * Does not remove the reference to the cell from any handles.
- * This handle's reference, and any any other references to the storage
- * cell remain and IsEmpty will still return false.
- */
- inline void Dispose();
-
- /**
- * Make the reference to this object weak. When only weak handles
- * refer to the object, the garbage collector will perform a
- * callback to the given V8::WeakReferenceCallback function, passing
- * it the object reference and the given parameters.
- */
- inline void MakeWeak(void* parameters, WeakReferenceCallback callback);
-
- /** Clears the weak reference to this object.*/
- inline void ClearWeak();
-
- /**
- *Checks if the handle holds the only reference to an object.
- */
- inline bool IsNearDeath() const;
-
- /**
- * Returns true if the handle's reference is weak.
- */
- inline bool IsWeak() const;
-
- /**
- * Assigns a wrapper class ID to the handle. See RetainedObjectInfo
- * interface description in v8-profiler.h for details.
- */
- inline void SetWrapperClassId(uint16_t class_id);
-
- private:
- friend class ImplementationUtilities;
- friend class ObjectTemplate;
-};
-
-
- /**
- * A stack-allocated class that governs a number of local handles.
- * After a handle scope has been created, all local handles will be
- * allocated within that handle scope until either the handle scope is
- * deleted or another handle scope is created. If there is already a
- * handle scope and a new one is created, all allocations will take
- * place in the new handle scope until it is deleted. After that,
- * new handles will again be allocated in the original handle scope.
- *
- * After the handle scope of a local handle has been deleted the
- * garbage collector will no longer track the object stored in the
- * handle and may deallocate it. The behavior of accessing a handle
- * for which the handle scope has been deleted is undefined.
- */
-class V8EXPORT HandleScope {
- public:
- HandleScope();
-
- ~HandleScope();
-
- /**
- * Closes the handle scope and returns the value as a handle in the
- * previous scope, which is the new current scope after the call.
- */
- template <class T> Local<T> Close(Handle<T> value);
-
- /**
- * Counts the number of allocated handles.
- */
- static int NumberOfHandles();
-
- /**
- * Creates a new handle with the given value.
- */
- static internal::Object** CreateHandle(internal::Object* value);
- // Faster version, uses HeapObject to obtain the current Isolate.
- static internal::Object** CreateHandle(internal::HeapObject* value);
-
- private:
- // Make it impossible to create heap-allocated or illegal handle
- // scopes by disallowing certain operations.
- HandleScope(const HandleScope&);
- void operator=(const HandleScope&);
- void* operator new(size_t size);
- void operator delete(void*, size_t);
-
- // This Data class is accessible internally as HandleScopeData through a
- // typedef in the ImplementationUtilities class.
- class V8EXPORT Data {
- public:
- internal::Object** next;
- internal::Object** limit;
- int level;
- inline void Initialize() {
- next = limit = NULL;
- level = 0;
- }
- };
-
- void Leave();
-
- internal::Isolate* isolate_;
- internal::Object** prev_next_;
- internal::Object** prev_limit_;
-
- // Allow for the active closing of HandleScopes which allows to pass a handle
- // from the HandleScope being closed to the next top most HandleScope.
- bool is_closed_;
- internal::Object** RawClose(internal::Object** value);
-
- friend class ImplementationUtilities;
-};
-
-
-// --- S p e c i a l o b j e c t s ---
-
-
-/**
- * The superclass of values and API object templates.
- */
-class V8EXPORT Data {
- private:
- Data();
-};
-
-
-/**
- * Pre-compilation data that can be associated with a script. This
- * data can be calculated for a script in advance of actually
- * compiling it, and can be stored between compilations. When script
- * data is given to the compile method compilation will be faster.
- */
-class V8EXPORT ScriptData { // NOLINT
- public:
- virtual ~ScriptData() { }
-
- /**
- * Pre-compiles the specified script (context-independent).
- *
- * \param input Pointer to UTF-8 script source code.
- * \param length Length of UTF-8 script source code.
- */
- static ScriptData* PreCompile(const char* input, int length);
-
- /**
- * Pre-compiles the specified script (context-independent).
- *
- * NOTE: Pre-compilation using this method cannot happen on another thread
- * without using Lockers.
- *
- * \param source Script source code.
- */
- static ScriptData* PreCompile(Handle<String> source);
-
- /**
- * Load previous pre-compilation data.
- *
- * \param data Pointer to data returned by a call to Data() of a previous
- * ScriptData. Ownership is not transferred.
- * \param length Length of data.
- */
- static ScriptData* New(const char* data, int length);
-
- /**
- * Returns the length of Data().
- */
- virtual int Length() = 0;
-
- /**
- * Returns a serialized representation of this ScriptData that can later be
- * passed to New(). NOTE: Serialized data is platform-dependent.
- */
- virtual const char* Data() = 0;
-
- /**
- * Returns true if the source code could not be parsed.
- */
- virtual bool HasError() = 0;
-};
-
-
-/**
- * The origin, within a file, of a script.
- */
-class ScriptOrigin {
- public:
- inline ScriptOrigin(
- Handle<Value> resource_name,
- Handle<Integer> resource_line_offset = Handle<Integer>(),
- Handle<Integer> resource_column_offset = Handle<Integer>())
- : resource_name_(resource_name),
- resource_line_offset_(resource_line_offset),
- resource_column_offset_(resource_column_offset) { }
- inline Handle<Value> ResourceName() const;
- inline Handle<Integer> ResourceLineOffset() const;
- inline Handle<Integer> ResourceColumnOffset() const;
- private:
- Handle<Value> resource_name_;
- Handle<Integer> resource_line_offset_;
- Handle<Integer> resource_column_offset_;
-};
-
-
-/**
- * A compiled JavaScript script.
- */
-class V8EXPORT Script {
- public:
-
- /**
- * Compiles the specified script (context-independent).
- *
- * \param source Script source code.
- * \param origin Script origin, owned by caller, no references are kept
- * when New() returns
- * \param pre_data Pre-parsing data, as obtained by ScriptData::PreCompile()
- * using pre_data speeds compilation if it's done multiple times.
- * Owned by caller, no references are kept when New() returns.
- * \param script_data Arbitrary data associated with script. Using
- * this has same effect as calling SetData(), but allows data to be
- * available to compile event handlers.
- * \return Compiled script object (context independent; when run it
- * will use the currently entered context).
- */
- static Local<Script> New(Handle<String> source,
- ScriptOrigin* origin = NULL,
- ScriptData* pre_data = NULL,
- Handle<String> script_data = Handle<String>());
-
- /**
- * Compiles the specified script using the specified file name
- * object (typically a string) as the script's origin.
- *
- * \param source Script source code.
- * \param file_name file name object (typically a string) to be used
- * as the script's origin.
- * \return Compiled script object (context independent; when run it
- * will use the currently entered context).
- */
- static Local<Script> New(Handle<String> source,
- Handle<Value> file_name);
-
- /**
- * Compiles the specified script (bound to current context).
- *
- * \param source Script source code.
- * \param origin Script origin, owned by caller, no references are kept
- * when Compile() returns
- * \param pre_data Pre-parsing data, as obtained by ScriptData::PreCompile()
- * using pre_data speeds compilation if it's done multiple times.
- * Owned by caller, no references are kept when Compile() returns.
- * \param script_data Arbitrary data associated with script. Using
- * this has same effect as calling SetData(), but makes data available
- * earlier (i.e. to compile event handlers).
- * \return Compiled script object, bound to the context that was active
- * when this function was called. When run it will always use this
- * context.
- */
- static Local<Script> Compile(Handle<String> source,
- ScriptOrigin* origin = NULL,
- ScriptData* pre_data = NULL,
- Handle<String> script_data = Handle<String>());
-
- /**
- * Compiles the specified script using the specified file name
- * object (typically a string) as the script's origin.
- *
- * \param source Script source code.
- * \param file_name File name to use as script's origin
- * \param script_data Arbitrary data associated with script. Using
- * this has same effect as calling SetData(), but makes data available
- * earlier (i.e. to compile event handlers).
- * \return Compiled script object, bound to the context that was active
- * when this function was called. When run it will always use this
- * context.
- */
- static Local<Script> Compile(Handle<String> source,
- Handle<Value> file_name,
- Handle<String> script_data = Handle<String>());
-
- /**
- * Runs the script returning the resulting value. If the script is
- * context independent (created using ::New) it will be run in the
- * currently entered context. If it is context specific (created
- * using ::Compile) it will be run in the context in which it was
- * compiled.
- */
- Local<Value> Run();
-
-#ifdef QT_BUILD_SCRIPT_LIB
- /**
- * Same as Run() but allow to give a different value for the 'this' variable
- */
- Local<Value> Run(Handle<Object> receiver);
-#endif
-
- /**
- * Returns the script id value.
- */
- Local<Value> Id();
-
- /**
- * Associate an additional data object with the script. This is mainly used
- * with the debugger as this data object is only available through the
- * debugger API.
- */
- void SetData(Handle<String> data);
-
-#ifdef QT_BUILD_SCRIPT_LIB
- static Local<Script> CompileEval(Handle<String> source,
- ScriptOrigin* origin = NULL,
- ScriptData* pre_data = NULL,
- Handle<String> script_data = Handle<String>());
-
- static Local<Script> CompileEval(Handle<String> source,
- Handle<Value> file_name,
- Handle<String> script_data = Handle<String>());
-#endif
-};
-
-
-/**
- * An error message.
- */
-class V8EXPORT Message {
- public:
- Local<String> Get() const;
- Local<String> GetSourceLine() const;
-
- /**
- * Returns the resource name for the script from where the function causing
- * the error originates.
- */
- Handle<Value> GetScriptResourceName() const;
-
- /**
- * Returns the resource data for the script from where the function causing
- * the error originates.
- */
- Handle<Value> GetScriptData() const;
-
- /**
- * Exception stack trace. By default stack traces are not captured for
- * uncaught exceptions. SetCaptureStackTraceForUncaughtExceptions allows
- * to change this option.
- */
- Handle<StackTrace> GetStackTrace() const;
-
- /**
- * Returns the number, 1-based, of the line where the error occurred.
- */
- int GetLineNumber() const;
-
- /**
- * Returns the index within the script of the first character where
- * the error occurred.
- */
- int GetStartPosition() const;
-
- /**
- * Returns the index within the script of the last character where
- * the error occurred.
- */
- int GetEndPosition() const;
-
- /**
- * Returns the index within the line of the first character where
- * the error occurred.
- */
- int GetStartColumn() const;
-
- /**
- * Returns the index within the line of the last character where
- * the error occurred.
- */
- int GetEndColumn() const;
-
- // TODO(1245381): Print to a string instead of on a FILE.
- static void PrintCurrentStackTrace(FILE* out);
-
- static const int kNoLineNumberInfo = 0;
- static const int kNoColumnInfo = 0;
-};
-
-
-/**
- * Representation of a JavaScript stack trace. The information collected is a
- * snapshot of the execution stack and the information remains valid after
- * execution continues.
- */
-class V8EXPORT StackTrace {
- public:
- /**
- * Flags that determine what information is placed captured for each
- * StackFrame when grabbing the current stack trace.
- */
- enum StackTraceOptions {
- kLineNumber = 1,
- kColumnOffset = 1 << 1 | kLineNumber,
- kScriptName = 1 << 2,
- kFunctionName = 1 << 3,
- kIsEval = 1 << 4,
- kIsConstructor = 1 << 5,
- kScriptNameOrSourceURL = 1 << 6,
-#ifdef QT_BUILD_SCRIPT_LIB
- kScriptId = 1 << 7,
- kOverview = kLineNumber | kColumnOffset | kScriptName | kFunctionName | kScriptId,
-#else
- kOverview = kLineNumber | kColumnOffset | kScriptName | kFunctionName,
-#endif
- kDetailed = kOverview | kIsEval | kIsConstructor | kScriptNameOrSourceURL
- };
-
- /**
- * Returns a StackFrame at a particular index.
- */
- Local<StackFrame> GetFrame(uint32_t index) const;
-
- /**
- * Returns the number of StackFrames.
- */
- int GetFrameCount() const;
-
- /**
- * Returns StackTrace as a v8::Array that contains StackFrame objects.
- */
- Local<Array> AsArray();
-
- /**
- * Grab a snapshot of the the current JavaScript execution stack.
- *
- * \param frame_limit The maximum number of stack frames we want to capture.
- * \param options Enumerates the set of things we will capture for each
- * StackFrame.
- */
- static Local<StackTrace> CurrentStackTrace(
- int frame_limit,
- StackTraceOptions options = kOverview);
-};
-
-
-/**
- * A single JavaScript stack frame.
- */
-class V8EXPORT StackFrame {
- public:
- /**
- * Returns the number, 1-based, of the line for the associate function call.
- * This method will return Message::kNoLineNumberInfo if it is unable to
- * retrieve the line number, or if kLineNumber was not passed as an option
- * when capturing the StackTrace.
- */
- int GetLineNumber() const;
-
- /**
- * Returns the 1-based column offset on the line for the associated function
- * call.
- * This method will return Message::kNoColumnInfo if it is unable to retrieve
- * the column number, or if kColumnOffset was not passed as an option when
- * capturing the StackTrace.
- */
- int GetColumn() const;
-
- /**
- * Returns the name of the resource that contains the script for the
- * function for this StackFrame.
- */
- Local<String> GetScriptName() const;
-
-#ifdef QT_BUILD_SCRIPT_LIB
- /**
- * Returns the id of the resource that contains the script for the
- * function for this StackFrame.
- */
- Local<Value> GetScriptId() const;
-#endif
-
- /**
- * Returns the name of the resource that contains the script for the
- * function for this StackFrame or sourceURL value if the script name
- * is undefined and its source ends with //@ sourceURL=... string.
- */
- Local<String> GetScriptNameOrSourceURL() const;
-
- /**
- * Returns the name of the function associated with this stack frame.
- */
- Local<String> GetFunctionName() const;
-
- /**
- * Returns whether or not the associated function is compiled via a call to
- * eval().
- */
- bool IsEval() const;
-
- /**
- * Returns whther or not the associated function is called as a
- * constructor via "new".
- */
- bool IsConstructor() const;
-};
-
-
-// --- V a l u e ---
-
-
-/**
- * The superclass of all JavaScript values and objects.
- */
-class Value : public Data {
- public:
-
- /**
- * Returns true if this value is the undefined value. See ECMA-262
- * 4.3.10.
- */
- V8EXPORT bool IsUndefined() const;
-
- /**
- * Returns true if this value is the null value. See ECMA-262
- * 4.3.11.
- */
- V8EXPORT bool IsNull() const;
-
- /**
- * Returns true if this value is true.
- */
- V8EXPORT bool IsTrue() const;
-
- /**
- * Returns true if this value is false.
- */
- V8EXPORT bool IsFalse() const;
-
- /**
- * Returns true if this value is an instance of the String type.
- * See ECMA-262 8.4.
- */
- inline bool IsString() const;
-
- /**
- * Returns true if this value is a function.
- */
- V8EXPORT bool IsFunction() const;
-
- /**
- * Returns true if this value is an array.
- */
- V8EXPORT bool IsArray() const;
-
- /**
- * Returns true if this value is an object.
- */
- V8EXPORT bool IsObject() const;
-
- /**
- * Returns true if this value is boolean.
- */
- V8EXPORT bool IsBoolean() const;
-
- /**
- * Returns true if this value is a number.
- */
- V8EXPORT bool IsNumber() const;
-
- /**
- * Returns true if this value is external.
- */
- V8EXPORT bool IsExternal() const;
-
- /**
- * Returns true if this value is a 32-bit signed integer.
- */
- V8EXPORT bool IsInt32() const;
-
- /**
- * Returns true if this value is a 32-bit unsigned integer.
- */
- V8EXPORT bool IsUint32() const;
-
- /**
- * Returns true if this value is a Date.
- */
- V8EXPORT bool IsDate() const;
-
- /**
- * Returns true if this value is a RegExp.
- */
- V8EXPORT bool IsRegExp() const;
-
- /**
- * Returns true if this value is an Error.
- */
- V8EXPORT bool IsError() const;
-
- V8EXPORT Local<Boolean> ToBoolean() const;
- V8EXPORT Local<Number> ToNumber() const;
- V8EXPORT Local<String> ToString() const;
- V8EXPORT Local<String> ToDetailString() const;
- V8EXPORT Local<Object> ToObject() const;
- V8EXPORT Local<Integer> ToInteger() const;
- V8EXPORT Local<Uint32> ToUint32() const;
- V8EXPORT Local<Int32> ToInt32() const;
-
- /**
- * Attempts to convert a string to an array index.
- * Returns an empty handle if the conversion fails.
- */
- V8EXPORT Local<Uint32> ToArrayIndex() const;
-
- V8EXPORT bool BooleanValue() const;
- V8EXPORT double NumberValue() const;
- V8EXPORT int64_t IntegerValue() const;
- V8EXPORT uint32_t Uint32Value() const;
- V8EXPORT int32_t Int32Value() const;
-
- /** JS == */
- V8EXPORT bool Equals(Handle<Value> that) const;
- V8EXPORT bool StrictEquals(Handle<Value> that) const;
-
- private:
- inline bool QuickIsString() const;
- V8EXPORT bool FullIsString() const;
-};
-
-
-/**
- * The superclass of primitive values. See ECMA-262 4.3.2.
- */
-class Primitive : public Value { };
-
-
-/**
- * A primitive boolean value (ECMA-262, 4.3.14). Either the true
- * or false value.
- */
-class Boolean : public Primitive {
- public:
- V8EXPORT bool Value() const;
- static inline Handle<Boolean> New(bool value);
-};
-
-
-/**
- * A JavaScript string value (ECMA-262, 4.3.17).
- */
-class String : public Primitive {
- public:
-
- /**
- * Returns the number of characters in this string.
- */
- V8EXPORT int Length() const;
-
- /**
- * Returns the number of bytes in the UTF-8 encoded
- * representation of this string.
- */
- V8EXPORT int Utf8Length() const;
-
- /**
- * Write the contents of the string to an external buffer.
- * If no arguments are given, expects the buffer to be large
- * enough to hold the entire string and NULL terminator. Copies
- * the contents of the string and the NULL terminator into the
- * buffer.
- *
- * WriteUtf8 will not write partial UTF-8 sequences, preferring to stop
- * before the end of the buffer.
- *
- * Copies up to length characters into the output buffer.
- * Only null-terminates if there is enough space in the buffer.
- *
- * \param buffer The buffer into which the string will be copied.
- * \param start The starting position within the string at which
- * copying begins.
- * \param length The number of characters to copy from the string. For
- * WriteUtf8 the number of bytes in the buffer.
- * \param nchars_ref The number of characters written, can be NULL.
- * \param hints Various hints that might affect performance of this or
- * subsequent operations.
- * \return The number of characters copied to the buffer excluding the null
- * terminator. For WriteUtf8: The number of bytes copied to the buffer
- * including the null terminator.
- */
- enum WriteHints {
- NO_HINTS = 0,
- HINT_MANY_WRITES_EXPECTED = 1
- };
-
- V8EXPORT int Write(uint16_t* buffer,
- int start = 0,
- int length = -1,
- WriteHints hints = NO_HINTS) const; // UTF-16
- V8EXPORT int WriteAscii(char* buffer,
- int start = 0,
- int length = -1,
- WriteHints hints = NO_HINTS) const; // ASCII
- V8EXPORT int WriteUtf8(char* buffer,
- int length = -1,
- int* nchars_ref = NULL,
- WriteHints hints = NO_HINTS) const; // UTF-8
-
- V8EXPORT uint32_t Hash() const;
-
- V8EXPORT bool Equals(Handle<String> other) const;
-
- /**
- * A zero length string.
- */
- V8EXPORT static v8::Local<v8::String> Empty();
-
- /**
- * Returns true if the string is external
- */
- V8EXPORT bool IsExternal() const;
-
- /**
- * Returns true if the string is both external and ascii
- */
- V8EXPORT bool IsExternalAscii() const;
-
- class V8EXPORT ExternalStringResourceBase { // NOLINT
- public:
- virtual ~ExternalStringResourceBase() {}
-
- protected:
- ExternalStringResourceBase() {}
-
- /**
- * Internally V8 will call this Dispose method when the external string
- * resource is no longer needed. The default implementation will use the
- * delete operator. This method can be overridden in subclasses to
- * control how allocated external string resources are disposed.
- */
- virtual void Dispose() { delete this; }
-
- private:
- // Disallow copying and assigning.
- ExternalStringResourceBase(const ExternalStringResourceBase&);
- void operator=(const ExternalStringResourceBase&);
-
- friend class v8::internal::Heap;
- };
-
- /**
- * An ExternalStringResource is a wrapper around a two-byte string
- * buffer that resides outside V8's heap. Implement an
- * ExternalStringResource to manage the life cycle of the underlying
- * buffer. Note that the string data must be immutable.
- */
- class V8EXPORT ExternalStringResource
- : public ExternalStringResourceBase {
- public:
- /**
- * Override the destructor to manage the life cycle of the underlying
- * buffer.
- */
- virtual ~ExternalStringResource() {}
-
- /**
- * The string data from the underlying buffer.
- */
- virtual const uint16_t* data() const = 0;
-
- /**
- * The length of the string. That is, the number of two-byte characters.
- */
- virtual size_t length() const = 0;
-
- protected:
- ExternalStringResource() {}
- };
-
- /**
- * An ExternalAsciiStringResource is a wrapper around an ascii
- * string buffer that resides outside V8's heap. Implement an
- * ExternalAsciiStringResource to manage the life cycle of the
- * underlying buffer. Note that the string data must be immutable
- * and that the data must be strict 7-bit ASCII, not Latin1 or
- * UTF-8, which would require special treatment internally in the
- * engine and, in the case of UTF-8, do not allow efficient indexing.
- * Use String::New or convert to 16 bit data for non-ASCII.
- */
-
- class V8EXPORT ExternalAsciiStringResource
- : public ExternalStringResourceBase {
- public:
- /**
- * Override the destructor to manage the life cycle of the underlying
- * buffer.
- */
- virtual ~ExternalAsciiStringResource() {}
- /** The string data from the underlying buffer.*/
- virtual const char* data() const = 0;
- /** The number of ascii characters in the string.*/
- virtual size_t length() const = 0;
- protected:
- ExternalAsciiStringResource() {}
- };
-
- /**
- * Get the ExternalStringResource for an external string. Returns
- * NULL if IsExternal() doesn't return true.
- */
- inline ExternalStringResource* GetExternalStringResource() const;
-
- /**
- * Get the ExternalAsciiStringResource for an external ascii string.
- * Returns NULL if IsExternalAscii() doesn't return true.
- */
- V8EXPORT ExternalAsciiStringResource* GetExternalAsciiStringResource() const;
-
- static inline String* Cast(v8::Value* obj);
-
- /**
- * Allocates a new string from either utf-8 encoded or ascii data.
- * The second parameter 'length' gives the buffer length.
- * If the data is utf-8 encoded, the caller must
- * be careful to supply the length parameter.
- * If it is not given, the function calls
- * 'strlen' to determine the buffer length, it might be
- * wrong if 'data' contains a null character.
- */
- V8EXPORT static Local<String> New(const char* data, int length = -1);
-
- /** Allocates a new string from utf16 data.*/
- V8EXPORT static Local<String> New(const uint16_t* data, int length = -1);
-
- /** Creates a symbol. Returns one if it exists already.*/
- V8EXPORT static Local<String> NewSymbol(const char* data, int length = -1);
-
- /**
- * Creates a new string by concatenating the left and the right strings
- * passed in as parameters.
- */
- V8EXPORT static Local<String> Concat(Handle<String> left,
- Handle<String>right);
-
- /**
- * Creates a new external string using the data defined in the given
- * resource. When the external string is no longer live on V8's heap the
- * resource will be disposed by calling its Dispose method. The caller of
- * this function should not otherwise delete or modify the resource. Neither
- * should the underlying buffer be deallocated or modified except through the
- * destructor of the external string resource.
- */
- V8EXPORT static Local<String> NewExternal(ExternalStringResource* resource);
-
- /**
- * Associate an external string resource with this string by transforming it
- * in place so that existing references to this string in the JavaScript heap
- * will use the external string resource. The external string resource's
- * character contents needs to be equivalent to this string.
- * Returns true if the string has been changed to be an external string.
- * The string is not modified if the operation fails. See NewExternal for
- * information on the lifetime of the resource.
- */
- V8EXPORT bool MakeExternal(ExternalStringResource* resource);
-
- /**
- * Creates a new external string using the ascii data defined in the given
- * resource. When the external string is no longer live on V8's heap the
- * resource will be disposed by calling its Dispose method. The caller of
- * this function should not otherwise delete or modify the resource. Neither
- * should the underlying buffer be deallocated or modified except through the
- * destructor of the external string resource.
- */
- V8EXPORT static Local<String> NewExternal(
- ExternalAsciiStringResource* resource);
-
- /**
- * Associate an external string resource with this string by transforming it
- * in place so that existing references to this string in the JavaScript heap
- * will use the external string resource. The external string resource's
- * character contents needs to be equivalent to this string.
- * Returns true if the string has been changed to be an external string.
- * The string is not modified if the operation fails. See NewExternal for
- * information on the lifetime of the resource.
- */
- V8EXPORT bool MakeExternal(ExternalAsciiStringResource* resource);
-
- /**
- * Returns true if this string can be made external.
- */
- V8EXPORT bool CanMakeExternal();
-
- /** Creates an undetectable string from the supplied ascii or utf-8 data.*/
- V8EXPORT static Local<String> NewUndetectable(const char* data,
- int length = -1);
-
- /** Creates an undetectable string from the supplied utf-16 data.*/
- V8EXPORT static Local<String> NewUndetectable(const uint16_t* data,
- int length = -1);
-
- /**
- * Converts an object to a utf8-encoded character array. Useful if
- * you want to print the object. If conversion to a string fails
- * (eg. due to an exception in the toString() method of the object)
- * then the length() method returns 0 and the * operator returns
- * NULL.
- */
- class V8EXPORT Utf8Value {
- public:
- explicit Utf8Value(Handle<v8::Value> obj);
- ~Utf8Value();
- char* operator*() { return str_; }
- const char* operator*() const { return str_; }
- int length() const { return length_; }
- private:
- char* str_;
- int length_;
-
- // Disallow copying and assigning.
- Utf8Value(const Utf8Value&);
- void operator=(const Utf8Value&);
- };
-
- /**
- * Converts an object to an ascii string.
- * Useful if you want to print the object.
- * If conversion to a string fails (eg. due to an exception in the toString()
- * method of the object) then the length() method returns 0 and the * operator
- * returns NULL.
- */
- class V8EXPORT AsciiValue {
- public:
- explicit AsciiValue(Handle<v8::Value> obj);
- ~AsciiValue();
- char* operator*() { return str_; }
- const char* operator*() const { return str_; }
- int length() const { return length_; }
- private:
- char* str_;
- int length_;
-
- // Disallow copying and assigning.
- AsciiValue(const AsciiValue&);
- void operator=(const AsciiValue&);
- };
-
- /**
- * Converts an object to a two-byte string.
- * If conversion to a string fails (eg. due to an exception in the toString()
- * method of the object) then the length() method returns 0 and the * operator
- * returns NULL.
- */
- class V8EXPORT Value {
- public:
- explicit Value(Handle<v8::Value> obj);
- ~Value();
- uint16_t* operator*() { return str_; }
- const uint16_t* operator*() const { return str_; }
- int length() const { return length_; }
- private:
- uint16_t* str_;
- int length_;
-
- // Disallow copying and assigning.
- Value(const Value&);
- void operator=(const Value&);
- };
-
- private:
- V8EXPORT void VerifyExternalStringResource(ExternalStringResource* val) const;
- V8EXPORT static void CheckCast(v8::Value* obj);
-};
-
-
-/**
- * A JavaScript number value (ECMA-262, 4.3.20)
- */
-class Number : public Primitive {
- public:
- V8EXPORT double Value() const;
- V8EXPORT static Local<Number> New(double value);
- static inline Number* Cast(v8::Value* obj);
- private:
- V8EXPORT Number();
- static void CheckCast(v8::Value* obj);
-};
-
-
-/**
- * A JavaScript value representing a signed integer.
- */
-class Integer : public Number {
- public:
- V8EXPORT static Local<Integer> New(int32_t value);
- V8EXPORT static Local<Integer> NewFromUnsigned(uint32_t value);
- V8EXPORT int64_t Value() const;
- static inline Integer* Cast(v8::Value* obj);
- private:
- V8EXPORT Integer();
- V8EXPORT static void CheckCast(v8::Value* obj);
-};
-
-
-/**
- * A JavaScript value representing a 32-bit signed integer.
- */
-class Int32 : public Integer {
- public:
- V8EXPORT int32_t Value() const;
- private:
- V8EXPORT Int32();
-};
-
-
-/**
- * A JavaScript value representing a 32-bit unsigned integer.
- */
-class Uint32 : public Integer {
- public:
- V8EXPORT uint32_t Value() const;
- private:
- V8EXPORT Uint32();
-};
-
-
-/**
- * An instance of the built-in Date constructor (ECMA-262, 15.9).
- */
-class Date : public Value {
- public:
- V8EXPORT static Local<Value> New(double time);
-
- /**
- * A specialization of Value::NumberValue that is more efficient
- * because we know the structure of this object.
- */
- V8EXPORT double NumberValue() const;
-
- static inline Date* Cast(v8::Value* obj);
-
- /**
- * Notification that the embedder has changed the time zone,
- * daylight savings time, or other date / time configuration
- * parameters. V8 keeps a cache of various values used for
- * date / time computation. This notification will reset
- * those cached values for the current context so that date /
- * time configuration changes would be reflected in the Date
- * object.
- *
- * This API should not be called more than needed as it will
- * negatively impact the performance of date operations.
- */
- V8EXPORT static void DateTimeConfigurationChangeNotification();
-
- private:
- V8EXPORT static void CheckCast(v8::Value* obj);
-};
-
-
-/**
- * An instance of the built-in RegExp constructor (ECMA-262, 15.10).
- */
-class RegExp : public Value {
- public:
- /**
- * Regular expression flag bits. They can be or'ed to enable a set
- * of flags.
- */
- enum Flags {
- kNone = 0,
- kGlobal = 1,
- kIgnoreCase = 2,
- kMultiline = 4
- };
-
- /**
- * Creates a regular expression from the given pattern string and
- * the flags bit field. May throw a JavaScript exception as
- * described in ECMA-262, 15.10.4.1.
- *
- * For example,
- * RegExp::New(v8::String::New("foo"),
- * static_cast<RegExp::Flags>(kGlobal | kMultiline))
- * is equivalent to evaluating "/foo/gm".
- */
- V8EXPORT static Local<RegExp> New(Handle<String> pattern,
- Flags flags);
-
- /**
- * Returns the value of the source property: a string representing
- * the regular expression.
- */
- V8EXPORT Local<String> GetSource() const;
-
- /**
- * Returns the flags bit field.
- */
- V8EXPORT Flags GetFlags() const;
-
- static inline RegExp* Cast(v8::Value* obj);
-
- private:
- V8EXPORT static void CheckCast(v8::Value* obj);
-};
-
-
-enum PropertyAttribute {
- None = 0,
- ReadOnly = 1 << 0,
- DontEnum = 1 << 1,
- DontDelete = 1 << 2
-};
-
-enum ExternalArrayType {
- kExternalByteArray = 1,
- kExternalUnsignedByteArray,
- kExternalShortArray,
- kExternalUnsignedShortArray,
- kExternalIntArray,
- kExternalUnsignedIntArray,
- kExternalFloatArray,
- kExternalPixelArray
-};
-
-/**
- * Accessor[Getter|Setter] are used as callback functions when
- * setting|getting a particular property. See Object and ObjectTemplate's
- * method SetAccessor.
- */
-typedef Handle<Value> (*AccessorGetter)(Local<String> property,
- const AccessorInfo& info);
-
-
-typedef void (*AccessorSetter)(Local<String> property,
- Local<Value> value,
- const AccessorInfo& info);
-
-
-/**
- * Access control specifications.
- *
- * Some accessors should be accessible across contexts. These
- * accessors have an explicit access control parameter which specifies
- * the kind of cross-context access that should be allowed.
- *
- * Additionally, for security, accessors can prohibit overwriting by
- * accessors defined in JavaScript. For objects that have such
- * accessors either locally or in their prototype chain it is not
- * possible to overwrite the accessor by using __defineGetter__ or
- * __defineSetter__ from JavaScript code.
- */
-enum AccessControl {
- DEFAULT = 0,
- ALL_CAN_READ = 1,
- ALL_CAN_WRITE = 1 << 1,
- PROHIBITS_OVERWRITING = 1 << 2
-};
-
-
-/**
- * A JavaScript object (ECMA-262, 4.3.3)
- */
-class Object : public Value {
- public:
- V8EXPORT bool Set(Handle<Value> key,
- Handle<Value> value,
- PropertyAttribute attribs = None);
-
- V8EXPORT bool Set(uint32_t index,
- Handle<Value> value);
-
- // Sets a local property on this object bypassing interceptors and
- // overriding accessors or read-only properties.
- //
- // Note that if the object has an interceptor the property will be set
- // locally, but since the interceptor takes precedence the local property
- // will only be returned if the interceptor doesn't return a value.
- //
- // Note also that this only works for named properties.
- V8EXPORT bool ForceSet(Handle<Value> key,
- Handle<Value> value,
- PropertyAttribute attribs = None);
-
- V8EXPORT Local<Value> Get(Handle<Value> key);
-
- V8EXPORT Local<Value> Get(uint32_t index);
-
- // TODO(1245389): Replace the type-specific versions of these
- // functions with generic ones that accept a Handle<Value> key.
- V8EXPORT bool Has(Handle<String> key);
-
- V8EXPORT bool Delete(Handle<String> key);
-
- // Delete a property on this object bypassing interceptors and
- // ignoring dont-delete attributes.
- V8EXPORT bool ForceDelete(Handle<Value> key);
-
- V8EXPORT bool Has(uint32_t index);
-
- V8EXPORT bool Delete(uint32_t index);
-
- V8EXPORT bool SetAccessor(Handle<String> name,
- AccessorGetter getter,
- AccessorSetter setter = 0,
- Handle<Value> data = Handle<Value>(),
- AccessControl settings = DEFAULT,
- PropertyAttribute attribute = None);
-
- /**
- * Returns an array containing the names of the enumerable properties
- * of this object, including properties from prototype objects. The
- * array returned by this method contains the same values as would
- * be enumerated by a for-in statement over this object.
- */
- V8EXPORT Local<Array> GetPropertyNames();
-
- /**
- * Get the prototype object. This does not skip objects marked to
- * be skipped by __proto__ and it does not consult the security
- * handler.
- */
- V8EXPORT Local<Value> GetPrototype();
-
- /**
- * Set the prototype object. This does not skip objects marked to
- * be skipped by __proto__ and it does not consult the security
- * handler.
- */
- V8EXPORT bool SetPrototype(Handle<Value> prototype);
-
- /**
- * Finds an instance of the given function template in the prototype
- * chain.
- */
- V8EXPORT Local<Object> FindInstanceInPrototypeChain(
- Handle<FunctionTemplate> tmpl);
-
- /**
- * Call builtin Object.prototype.toString on this object.
- * This is different from Value::ToString() that may call
- * user-defined toString function. This one does not.
- */
- V8EXPORT Local<String> ObjectProtoToString();
-
- /**
- * Returns the name of the function invoked as a constructor for this object.
- */
- V8EXPORT Local<String> GetConstructorName();
-
- /** Gets the number of internal fields for this Object. */
- V8EXPORT int InternalFieldCount();
- /** Gets the value in an internal field. */
- inline Local<Value> GetInternalField(int index);
- /** Sets the value in an internal field. */
- V8EXPORT void SetInternalField(int index, Handle<Value> value);
-
- /** Gets a native pointer from an internal field. */
- inline void* GetPointerFromInternalField(int index);
-
- /** Sets a native pointer in an internal field. */
- V8EXPORT void SetPointerInInternalField(int index, void* value);
-
- // Testers for local properties.
- V8EXPORT bool HasRealNamedProperty(Handle<String> key);
- V8EXPORT bool HasRealIndexedProperty(uint32_t index);
- V8EXPORT bool HasRealNamedCallbackProperty(Handle<String> key);
-
- /**
- * If result.IsEmpty() no real property was located in the prototype chain.
- * This means interceptors in the prototype chain are not called.
- */
- V8EXPORT Local<Value> GetRealNamedPropertyInPrototypeChain(
- Handle<String> key);
-
- /**
- * If result.IsEmpty() no real property was located on the object or
- * in the prototype chain.
- * This means interceptors in the prototype chain are not called.
- */
- V8EXPORT Local<Value> GetRealNamedProperty(Handle<String> key);
-
- /** Tests for a named lookup interceptor.*/
- V8EXPORT bool HasNamedLookupInterceptor();
-
- /** Tests for an index lookup interceptor.*/
- V8EXPORT bool HasIndexedLookupInterceptor();
-
- /**
- * Turns on access check on the object if the object is an instance of
- * a template that has access check callbacks. If an object has no
- * access check info, the object cannot be accessed by anyone.
- */
- V8EXPORT void TurnOnAccessCheck();
-
- /**
- * Returns the identity hash for this object. The current implemenation uses
- * a hidden property on the object to store the identity hash.
- *
- * The return value will never be 0. Also, it is not guaranteed to be
- * unique.
- */
- V8EXPORT int GetIdentityHash();
-
- /**
- * Access hidden properties on JavaScript objects. These properties are
- * hidden from the executing JavaScript and only accessible through the V8
- * C++ API. Hidden properties introduced by V8 internally (for example the
- * identity hash) are prefixed with "v8::".
- */
- V8EXPORT bool SetHiddenValue(Handle<String> key, Handle<Value> value);
- V8EXPORT Local<Value> GetHiddenValue(Handle<String> key);
- V8EXPORT bool DeleteHiddenValue(Handle<String> key);
-
- /**
- * Returns true if this is an instance of an api function (one
- * created from a function created from a function template) and has
- * been modified since it was created. Note that this method is
- * conservative and may return true for objects that haven't actually
- * been modified.
- */
- V8EXPORT bool IsDirty();
-
- /**
- * Clone this object with a fast but shallow copy. Values will point
- * to the same values as the original object.
- */
- V8EXPORT Local<Object> Clone();
-
- /**
- * Returns the context in which the object was created.
- */
- V8EXPORT Local<Context> CreationContext();
-
- /**
- * Set the backing store of the indexed properties to be managed by the
- * embedding layer. Access to the indexed properties will follow the rules
- * spelled out in CanvasPixelArray.
- * Note: The embedding program still owns the data and needs to ensure that
- * the backing store is preserved while V8 has a reference.
- */
- V8EXPORT void SetIndexedPropertiesToPixelData(uint8_t* data, int length);
- V8EXPORT bool HasIndexedPropertiesInPixelData();
- V8EXPORT uint8_t* GetIndexedPropertiesPixelData();
- V8EXPORT int GetIndexedPropertiesPixelDataLength();
-
- /**
- * Set the backing store of the indexed properties to be managed by the
- * embedding layer. Access to the indexed properties will follow the rules
- * spelled out for the CanvasArray subtypes in the WebGL specification.
- * Note: The embedding program still owns the data and needs to ensure that
- * the backing store is preserved while V8 has a reference.
- */
- V8EXPORT void SetIndexedPropertiesToExternalArrayData(
- void* data,
- ExternalArrayType array_type,
- int number_of_elements);
- V8EXPORT bool HasIndexedPropertiesInExternalArrayData();
- V8EXPORT void* GetIndexedPropertiesExternalArrayData();
- V8EXPORT ExternalArrayType GetIndexedPropertiesExternalArrayDataType();
- V8EXPORT int GetIndexedPropertiesExternalArrayDataLength();
-
- V8EXPORT static Local<Object> New();
- static inline Object* Cast(Value* obj);
-
-#ifdef QT_BUILD_SCRIPT_LIB
- /**
- * Returns wether the object can be called as a function
- */
- V8EXPORT bool IsCallable();
- /**
- * Call the object as a function
- */
- V8EXPORT Local<Value> Call(Handle<Object> recv,
- int argc,
- Handle<Value> argv[]);
-
- V8EXPORT Local<Object> NewInstance(int argc, Handle<Value> argv[]) const;
-#endif
-
- private:
- V8EXPORT Object();
- V8EXPORT static void CheckCast(Value* obj);
- V8EXPORT Local<Value> CheckedGetInternalField(int index);
- V8EXPORT void* SlowGetPointerFromInternalField(int index);
-
- /**
- * If quick access to the internal field is possible this method
- * returns the value. Otherwise an empty handle is returned.
- */
- inline Local<Value> UncheckedGetInternalField(int index);
-};
-
-
-/**
- * An instance of the built-in array constructor (ECMA-262, 15.4.2).
- */
-class Array : public Object {
- public:
- V8EXPORT uint32_t Length() const;
-
- /**
- * Clones an element at index |index|. Returns an empty
- * handle if cloning fails (for any reason).
- */
- V8EXPORT Local<Object> CloneElementAt(uint32_t index);
-
- /**
- * Creates a JavaScript array with the given length. If the length
- * is negative the returned array will have length 0.
- */
- V8EXPORT static Local<Array> New(int length = 0);
-
- static inline Array* Cast(Value* obj);
- private:
- V8EXPORT Array();
- static void CheckCast(Value* obj);
-};
-
-
-/**
- * A JavaScript function object (ECMA-262, 15.3).
- */
-class Function : public Object {
- public:
- V8EXPORT Local<Object> NewInstance() const;
- V8EXPORT Local<Object> NewInstance(int argc, Handle<Value> argv[]) const;
- V8EXPORT Local<Value> Call(Handle<Object> recv,
- int argc,
- Handle<Value> argv[]);
- V8EXPORT void SetName(Handle<String> name);
- V8EXPORT Handle<Value> GetName() const;
-
- /**
- * Returns zero based line number of function body and
- * kLineOffsetNotFound if no information available.
- */
- V8EXPORT int GetScriptLineNumber() const;
- V8EXPORT ScriptOrigin GetScriptOrigin() const;
- static inline Function* Cast(Value* obj);
- V8EXPORT static const int kLineOffsetNotFound;
- private:
- V8EXPORT Function();
- V8EXPORT static void CheckCast(Value* obj);
-};
-
-
-/**
- * A JavaScript value that wraps a C++ void*. This type of value is
- * mainly used to associate C++ data structures with JavaScript
- * objects.
- *
- * The Wrap function V8 will return the most optimal Value object wrapping the
- * C++ void*. The type of the value is not guaranteed to be an External object
- * and no assumptions about its type should be made. To access the wrapped
- * value Unwrap should be used, all other operations on that object will lead
- * to unpredictable results.
- */
-class External : public Value {
- public:
- V8EXPORT static Local<Value> Wrap(void* data);
- static inline void* Unwrap(Handle<Value> obj);
-
- V8EXPORT static Local<External> New(void* value);
- static inline External* Cast(Value* obj);
- V8EXPORT void* Value() const;
- private:
- V8EXPORT External();
- V8EXPORT static void CheckCast(v8::Value* obj);
- static inline void* QuickUnwrap(Handle<v8::Value> obj);
- V8EXPORT static void* FullUnwrap(Handle<v8::Value> obj);
-};
-
-
-// --- T e m p l a t e s ---
-
-
-/**
- * The superclass of object and function templates.
- */
-class V8EXPORT Template : public Data {
- public:
- /** Adds a property to each instance created by this template.*/
- void Set(Handle<String> name, Handle<Data> value,
- PropertyAttribute attributes = None);
- inline void Set(const char* name, Handle<Data> value);
- private:
- Template();
-
- friend class ObjectTemplate;
- friend class FunctionTemplate;
-};
-
-
-/**
- * The argument information given to function call callbacks. This
- * class provides access to information about the context of the call,
- * including the receiver, the number and values of arguments, and
- * the holder of the function.
- */
-class Arguments {
- public:
- inline int Length() const;
- inline Local<Value> operator[](int i) const;
- inline Local<Function> Callee() const;
- inline Local<Object> This() const;
- inline Local<Object> Holder() const;
- inline bool IsConstructCall() const;
- inline Local<Value> Data() const;
- private:
- static const int kDataIndex = 0;
- static const int kCalleeIndex = -1;
- static const int kHolderIndex = -2;
-
- friend class ImplementationUtilities;
- inline Arguments(internal::Object** implicit_args,
- internal::Object** values,
- int length,
- bool is_construct_call);
- internal::Object** implicit_args_;
- internal::Object** values_;
- int length_;
- bool is_construct_call_;
-};
-
-
-/**
- * The information passed to an accessor callback about the context
- * of the property access.
- */
-class V8EXPORT AccessorInfo {
- public:
- inline AccessorInfo(internal::Object** args)
- : args_(args) { }
- inline Local<Value> Data() const;
- inline Local<Object> This() const;
- inline Local<Object> Holder() const;
- private:
- internal::Object** args_;
-};
-
-
-typedef Handle<Value> (*InvocationCallback)(const Arguments& args);
-
-/**
- * NamedProperty[Getter|Setter] are used as interceptors on object.
- * See ObjectTemplate::SetNamedPropertyHandler.
- */
-typedef Handle<Value> (*NamedPropertyGetter)(Local<String> property,
- const AccessorInfo& info);
-
-
-/**
- * Returns the value if the setter intercepts the request.
- * Otherwise, returns an empty handle.
- */
-typedef Handle<Value> (*NamedPropertySetter)(Local<String> property,
- Local<Value> value,
- const AccessorInfo& info);
-
-/**
- * Returns a non-empty handle if the interceptor intercepts the request.
- * The result is an integer encoding property attributes (like v8::None,
- * v8::DontEnum, etc.)
- */
-typedef Handle<Integer> (*NamedPropertyQuery)(Local<String> property,
- const AccessorInfo& info);
-
-
-/**
- * Returns a non-empty handle if the deleter intercepts the request.
- * The return value is true if the property could be deleted and false
- * otherwise.
- */
-typedef Handle<Boolean> (*NamedPropertyDeleter)(Local<String> property,
- const AccessorInfo& info);
-
-/**
- * Returns an array containing the names of the properties the named
- * property getter intercepts.
- */
-typedef Handle<Array> (*NamedPropertyEnumerator)(const AccessorInfo& info);
-
-
-/**
- * Returns the value of the property if the getter intercepts the
- * request. Otherwise, returns an empty handle.
- */
-typedef Handle<Value> (*IndexedPropertyGetter)(uint32_t index,
- const AccessorInfo& info);
-
-
-/**
- * Returns the value if the setter intercepts the request.
- * Otherwise, returns an empty handle.
- */
-typedef Handle<Value> (*IndexedPropertySetter)(uint32_t index,
- Local<Value> value,
- const AccessorInfo& info);
-
-
-/**
- * Returns a non-empty handle if the interceptor intercepts the request.
- * The result is an integer encoding property attributes.
- */
-typedef Handle<Integer> (*IndexedPropertyQuery)(uint32_t index,
- const AccessorInfo& info);
-
-/**
- * Returns a non-empty handle if the deleter intercepts the request.
- * The return value is true if the property could be deleted and false
- * otherwise.
- */
-typedef Handle<Boolean> (*IndexedPropertyDeleter)(uint32_t index,
- const AccessorInfo& info);
-
-/**
- * Returns an array containing the indices of the properties the
- * indexed property getter intercepts.
- */
-typedef Handle<Array> (*IndexedPropertyEnumerator)(const AccessorInfo& info);
-
-
-/**
- * Access type specification.
- */
-enum AccessType {
- ACCESS_GET,
- ACCESS_SET,
- ACCESS_HAS,
- ACCESS_DELETE,
- ACCESS_KEYS
-};
-
-
-/**
- * Returns true if cross-context access should be allowed to the named
- * property with the given key on the host object.
- */
-typedef bool (*NamedSecurityCallback)(Local<Object> host,
- Local<Value> key,
- AccessType type,
- Local<Value> data);
-
-
-/**
- * Returns true if cross-context access should be allowed to the indexed
- * property with the given index on the host object.
- */
-typedef bool (*IndexedSecurityCallback)(Local<Object> host,
- uint32_t index,
- AccessType type,
- Local<Value> data);
-
-
-/**
- * A FunctionTemplate is used to create functions at runtime. There
- * can only be one function created from a FunctionTemplate in a
- * context. The lifetime of the created function is equal to the
- * lifetime of the context. So in case the embedder needs to create
- * temporary functions that can be collected using Scripts is
- * preferred.
- *
- * A FunctionTemplate can have properties, these properties are added to the
- * function object when it is created.
- *
- * A FunctionTemplate has a corresponding instance template which is
- * used to create object instances when the function is used as a
- * constructor. Properties added to the instance template are added to
- * each object instance.
- *
- * A FunctionTemplate can have a prototype template. The prototype template
- * is used to create the prototype object of the function.
- *
- * The following example shows how to use a FunctionTemplate:
- *
- * \code
- * v8::Local<v8::FunctionTemplate> t = v8::FunctionTemplate::New();
- * t->Set("func_property", v8::Number::New(1));
- *
- * v8::Local<v8::Template> proto_t = t->PrototypeTemplate();
- * proto_t->Set("proto_method", v8::FunctionTemplate::New(InvokeCallback));
- * proto_t->Set("proto_const", v8::Number::New(2));
- *
- * v8::Local<v8::ObjectTemplate> instance_t = t->InstanceTemplate();
- * instance_t->SetAccessor("instance_accessor", InstanceAccessorCallback);
- * instance_t->SetNamedPropertyHandler(PropertyHandlerCallback, ...);
- * instance_t->Set("instance_property", Number::New(3));
- *
- * v8::Local<v8::Function> function = t->GetFunction();
- * v8::Local<v8::Object> instance = function->NewInstance();
- * \endcode
- *
- * Let's use "function" as the JS variable name of the function object
- * and "instance" for the instance object created above. The function
- * and the instance will have the following properties:
- *
- * \code
- * func_property in function == true;
- * function.func_property == 1;
- *
- * function.prototype.proto_method() invokes 'InvokeCallback'
- * function.prototype.proto_const == 2;
- *
- * instance instanceof function == true;
- * instance.instance_accessor calls 'InstanceAccessorCallback'
- * instance.instance_property == 3;
- * \endcode
- *
- * A FunctionTemplate can inherit from another one by calling the
- * FunctionTemplate::Inherit method. The following graph illustrates
- * the semantics of inheritance:
- *
- * \code
- * FunctionTemplate Parent -> Parent() . prototype -> { }
- * ^ ^
- * | Inherit(Parent) | .__proto__
- * | |
- * FunctionTemplate Child -> Child() . prototype -> { }
- * \endcode
- *
- * A FunctionTemplate 'Child' inherits from 'Parent', the prototype
- * object of the Child() function has __proto__ pointing to the
- * Parent() function's prototype object. An instance of the Child
- * function has all properties on Parent's instance templates.
- *
- * Let Parent be the FunctionTemplate initialized in the previous
- * section and create a Child FunctionTemplate by:
- *
- * \code
- * Local<FunctionTemplate> parent = t;
- * Local<FunctionTemplate> child = FunctionTemplate::New();
- * child->Inherit(parent);
- *
- * Local<Function> child_function = child->GetFunction();
- * Local<Object> child_instance = child_function->NewInstance();
- * \endcode
- *
- * The Child function and Child instance will have the following
- * properties:
- *
- * \code
- * child_func.prototype.__proto__ == function.prototype;
- * child_instance.instance_accessor calls 'InstanceAccessorCallback'
- * child_instance.instance_property == 3;
- * \endcode
- */
-class V8EXPORT FunctionTemplate : public Template {
- public:
- /** Creates a function template.*/
- static Local<FunctionTemplate> New(
- InvocationCallback callback = 0,
- Handle<Value> data = Handle<Value>(),
- Handle<Signature> signature = Handle<Signature>());
- /** Returns the unique function instance in the current execution context.*/
- Local<Function> GetFunction();
-
- /**
- * Set the call-handler callback for a FunctionTemplate. This
- * callback is called whenever the function created from this
- * FunctionTemplate is called.
- */
- void SetCallHandler(InvocationCallback callback,
- Handle<Value> data = Handle<Value>());
-
- /** Get the InstanceTemplate. */
- Local<ObjectTemplate> InstanceTemplate();
-
- /** Causes the function template to inherit from a parent function template.*/
- void Inherit(Handle<FunctionTemplate> parent);
-
- /**
- * A PrototypeTemplate is the template used to create the prototype object
- * of the function created by this template.
- */
- Local<ObjectTemplate> PrototypeTemplate();
-
-
- /**
- * Set the class name of the FunctionTemplate. This is used for
- * printing objects created with the function created from the
- * FunctionTemplate as its constructor.
- */
- void SetClassName(Handle<String> name);
-
- /**
- * Determines whether the __proto__ accessor ignores instances of
- * the function template. If instances of the function template are
- * ignored, __proto__ skips all instances and instead returns the
- * next object in the prototype chain.
- *
- * Call with a value of true to make the __proto__ accessor ignore
- * instances of the function template. Call with a value of false
- * to make the __proto__ accessor not ignore instances of the
- * function template. By default, instances of a function template
- * are not ignored.
- */
- void SetHiddenPrototype(bool value);
-
- /**
- * Returns true if the given object is an instance of this function
- * template.
- */
- bool HasInstance(Handle<Value> object);
-
- private:
- FunctionTemplate();
- void AddInstancePropertyAccessor(Handle<String> name,
- AccessorGetter getter,
- AccessorSetter setter,
- Handle<Value> data,
- AccessControl settings,
- PropertyAttribute attributes);
- void SetNamedInstancePropertyHandler(NamedPropertyGetter getter,
- NamedPropertySetter setter,
- NamedPropertyQuery query,
- NamedPropertyDeleter remover,
- NamedPropertyEnumerator enumerator,
- Handle<Value> data);
- void SetIndexedInstancePropertyHandler(IndexedPropertyGetter getter,
- IndexedPropertySetter setter,
- IndexedPropertyQuery query,
- IndexedPropertyDeleter remover,
- IndexedPropertyEnumerator enumerator,
- Handle<Value> data);
- void SetInstanceCallAsFunctionHandler(InvocationCallback callback,
- Handle<Value> data);
-
- friend class Context;
- friend class ObjectTemplate;
-};
-
-
-/**
- * An ObjectTemplate is used to create objects at runtime.
- *
- * Properties added to an ObjectTemplate are added to each object
- * created from the ObjectTemplate.
- */
-class V8EXPORT ObjectTemplate : public Template {
- public:
- /** Creates an ObjectTemplate. */
- static Local<ObjectTemplate> New();
-
- /** Creates a new instance of this template.*/
- Local<Object> NewInstance();
-
- /**
- * Sets an accessor on the object template.
- *
- * Whenever the property with the given name is accessed on objects
- * created from this ObjectTemplate the getter and setter callbacks
- * are called instead of getting and setting the property directly
- * on the JavaScript object.
- *
- * \param name The name of the property for which an accessor is added.
- * \param getter The callback to invoke when getting the property.
- * \param setter The callback to invoke when setting the property.
- * \param data A piece of data that will be passed to the getter and setter
- * callbacks whenever they are invoked.
- * \param settings Access control settings for the accessor. This is a bit
- * field consisting of one of more of
- * DEFAULT = 0, ALL_CAN_READ = 1, or ALL_CAN_WRITE = 2.
- * The default is to not allow cross-context access.
- * ALL_CAN_READ means that all cross-context reads are allowed.
- * ALL_CAN_WRITE means that all cross-context writes are allowed.
- * The combination ALL_CAN_READ | ALL_CAN_WRITE can be used to allow all
- * cross-context access.
- * \param attribute The attributes of the property for which an accessor
- * is added.
- */
- void SetAccessor(Handle<String> name,
- AccessorGetter getter,
- AccessorSetter setter = 0,
- Handle<Value> data = Handle<Value>(),
- AccessControl settings = DEFAULT,
- PropertyAttribute attribute = None);
-
- /**
- * Sets a named property handler on the object template.
- *
- * Whenever a named property is accessed on objects created from
- * this object template, the provided callback is invoked instead of
- * accessing the property directly on the JavaScript object.
- *
- * \param getter The callback to invoke when getting a property.
- * \param setter The callback to invoke when setting a property.
- * \param query The callback to invoke to check if a property is present,
- * and if present, get its attributes.
- * \param deleter The callback to invoke when deleting a property.
- * \param enumerator The callback to invoke to enumerate all the named
- * properties of an object.
- * \param data A piece of data that will be passed to the callbacks
- * whenever they are invoked.
- */
- void SetNamedPropertyHandler(NamedPropertyGetter getter,
- NamedPropertySetter setter = 0,
- NamedPropertyQuery query = 0,
- NamedPropertyDeleter deleter = 0,
- NamedPropertyEnumerator enumerator = 0,
- Handle<Value> data = Handle<Value>());
-
- /**
- * Sets an indexed property handler on the object template.
- *
- * Whenever an indexed property is accessed on objects created from
- * this object template, the provided callback is invoked instead of
- * accessing the property directly on the JavaScript object.
- *
- * \param getter The callback to invoke when getting a property.
- * \param setter The callback to invoke when setting a property.
- * \param query The callback to invoke to check is an object has a property.
- * \param deleter The callback to invoke when deleting a property.
- * \param enumerator The callback to invoke to enumerate all the indexed
- * properties of an object.
- * \param data A piece of data that will be passed to the callbacks
- * whenever they are invoked.
- */
- void SetIndexedPropertyHandler(IndexedPropertyGetter getter,
- IndexedPropertySetter setter = 0,
- IndexedPropertyQuery query = 0,
- IndexedPropertyDeleter deleter = 0,
- IndexedPropertyEnumerator enumerator = 0,
- Handle<Value> data = Handle<Value>());
-
- /**
- * Sets the callback to be used when calling instances created from
- * this template as a function. If no callback is set, instances
- * behave like normal JavaScript objects that cannot be called as a
- * function.
- */
- void SetCallAsFunctionHandler(InvocationCallback callback,
- Handle<Value> data = Handle<Value>());
-
- /**
- * Mark object instances of the template as undetectable.
- *
- * In many ways, undetectable objects behave as though they are not
- * there. They behave like 'undefined' in conditionals and when
- * printed. However, properties can be accessed and called as on
- * normal objects.
- */
- void MarkAsUndetectable();
-
- /**
- * Sets access check callbacks on the object template.
- *
- * When accessing properties on instances of this object template,
- * the access check callback will be called to determine whether or
- * not to allow cross-context access to the properties.
- * The last parameter specifies whether access checks are turned
- * on by default on instances. If access checks are off by default,
- * they can be turned on on individual instances by calling
- * Object::TurnOnAccessCheck().
- */
- void SetAccessCheckCallbacks(NamedSecurityCallback named_handler,
- IndexedSecurityCallback indexed_handler,
- Handle<Value> data = Handle<Value>(),
- bool turned_on_by_default = true);
-
- /**
- * Gets the number of internal fields for objects generated from
- * this template.
- */
- int InternalFieldCount();
-
- /**
- * Sets the number of internal fields for objects generated from
- * this template.
- */
- void SetInternalFieldCount(int value);
-
- private:
- ObjectTemplate();
- static Local<ObjectTemplate> New(Handle<FunctionTemplate> constructor);
- friend class FunctionTemplate;
-};
-
-
-/**
- * A Signature specifies which receivers and arguments a function can
- * legally be called with.
- */
-class V8EXPORT Signature : public Data {
- public:
- static Local<Signature> New(Handle<FunctionTemplate> receiver =
- Handle<FunctionTemplate>(),
- int argc = 0,
- Handle<FunctionTemplate> argv[] = 0);
- private:
- Signature();
-};
-
-
-/**
- * A utility for determining the type of objects based on the template
- * they were constructed from.
- */
-class V8EXPORT TypeSwitch : public Data {
- public:
- static Local<TypeSwitch> New(Handle<FunctionTemplate> type);
- static Local<TypeSwitch> New(int argc, Handle<FunctionTemplate> types[]);
- int match(Handle<Value> value);
- private:
- TypeSwitch();
-};
-
-
-// --- E x t e n s i o n s ---
-
-
-/**
- * Ignore
- */
-class V8EXPORT Extension { // NOLINT
- public:
- Extension(const char* name,
- const char* source = 0,
- int dep_count = 0,
- const char** deps = 0);
- virtual ~Extension() { }
- virtual v8::Handle<v8::FunctionTemplate>
- GetNativeFunction(v8::Handle<v8::String> name) {
- return v8::Handle<v8::FunctionTemplate>();
- }
-
- const char* name() { return name_; }
- const char* source() { return source_; }
- int dependency_count() { return dep_count_; }
- const char** dependencies() { return deps_; }
- void set_auto_enable(bool value) { auto_enable_ = value; }
- bool auto_enable() { return auto_enable_; }
-
- private:
- const char* name_;
- const char* source_;
- int dep_count_;
- const char** deps_;
- bool auto_enable_;
-
- // Disallow copying and assigning.
- Extension(const Extension&);
- void operator=(const Extension&);
-};
-
-
-void V8EXPORT RegisterExtension(Extension* extension);
-
-
-/**
- * Ignore
- */
-class V8EXPORT DeclareExtension {
- public:
- inline DeclareExtension(Extension* extension) {
- RegisterExtension(extension);
- }
-};
-
-
-// --- S t a t i c s ---
-
-
-Handle<Primitive> V8EXPORT Undefined();
-Handle<Primitive> V8EXPORT Null();
-Handle<Boolean> V8EXPORT True();
-Handle<Boolean> V8EXPORT False();
-
-
-/**
- * A set of constraints that specifies the limits of the runtime's memory use.
- * You must set the heap size before initializing the VM - the size cannot be
- * adjusted after the VM is initialized.
- *
- * If you are using threads then you should hold the V8::Locker lock while
- * setting the stack limit and you must set a non-default stack limit separately
- * for each thread.
- */
-class V8EXPORT ResourceConstraints {
- public:
- ResourceConstraints();
- int max_young_space_size() const { return max_young_space_size_; }
- void set_max_young_space_size(int value) { max_young_space_size_ = value; }
- int max_old_space_size() const { return max_old_space_size_; }
- void set_max_old_space_size(int value) { max_old_space_size_ = value; }
- int max_executable_size() { return max_executable_size_; }
- void set_max_executable_size(int value) { max_executable_size_ = value; }
- uint32_t* stack_limit() const { return stack_limit_; }
- // Sets an address beyond which the VM's stack may not grow.
- void set_stack_limit(uint32_t* value) { stack_limit_ = value; }
- private:
- int max_young_space_size_;
- int max_old_space_size_;
- int max_executable_size_;
- uint32_t* stack_limit_;
-};
-
-
-bool V8EXPORT SetResourceConstraints(ResourceConstraints* constraints);
-
-
-// --- E x c e p t i o n s ---
-
-
-typedef void (*FatalErrorCallback)(const char* location, const char* message);
-
-
-typedef void (*MessageCallback)(Handle<Message> message, Handle<Value> data);
-
-
-/**
- * Schedules an exception to be thrown when returning to JavaScript. When an
- * exception has been scheduled it is illegal to invoke any JavaScript
- * operation; the caller must return immediately and only after the exception
- * has been handled does it become legal to invoke JavaScript operations.
- */
-Handle<Value> V8EXPORT ThrowException(Handle<Value> exception);
-
-/**
- * Create new error objects by calling the corresponding error object
- * constructor with the message.
- */
-class V8EXPORT Exception {
- public:
- static Local<Value> RangeError(Handle<String> message);
- static Local<Value> ReferenceError(Handle<String> message);
- static Local<Value> SyntaxError(Handle<String> message);
- static Local<Value> TypeError(Handle<String> message);
- static Local<Value> Error(Handle<String> message);
-};
-
-
-// --- C o u n t e r s C a l l b a c k s ---
-
-typedef int* (*CounterLookupCallback)(const char* name);
-
-typedef void* (*CreateHistogramCallback)(const char* name,
- int min,
- int max,
- size_t buckets);
-
-typedef void (*AddHistogramSampleCallback)(void* histogram, int sample);
-
-typedef void (*UserCallback)(void *data);
-
-// --- M e m o r y A l l o c a t i o n C a l l b a c k ---
- enum ObjectSpace {
- kObjectSpaceNewSpace = 1 << 0,
- kObjectSpaceOldPointerSpace = 1 << 1,
- kObjectSpaceOldDataSpace = 1 << 2,
- kObjectSpaceCodeSpace = 1 << 3,
- kObjectSpaceMapSpace = 1 << 4,
- kObjectSpaceLoSpace = 1 << 5,
-
- kObjectSpaceAll = kObjectSpaceNewSpace | kObjectSpaceOldPointerSpace |
- kObjectSpaceOldDataSpace | kObjectSpaceCodeSpace | kObjectSpaceMapSpace |
- kObjectSpaceLoSpace
- };
-
- enum AllocationAction {
- kAllocationActionAllocate = 1 << 0,
- kAllocationActionFree = 1 << 1,
- kAllocationActionAll = kAllocationActionAllocate | kAllocationActionFree
- };
-
-typedef void (*MemoryAllocationCallback)(ObjectSpace space,
- AllocationAction action,
- int size);
-
-// --- F a i l e d A c c e s s C h e c k C a l l b a c k ---
-typedef void (*FailedAccessCheckCallback)(Local<Object> target,
- AccessType type,
- Local<Value> data);
-
-// --- G a r b a g e C o l l e c t i o n C a l l b a c k s
-
-/**
- * Applications can register callback functions which will be called
- * before and after a garbage collection. Allocations are not
- * allowed in the callback functions, you therefore cannot manipulate
- * objects (set or delete properties for example) since it is possible
- * such operations will result in the allocation of objects.
- */
-enum GCType {
- kGCTypeScavenge = 1 << 0,
- kGCTypeMarkSweepCompact = 1 << 1,
- kGCTypeAll = kGCTypeScavenge | kGCTypeMarkSweepCompact
-};
-
-enum GCCallbackFlags {
- kNoGCCallbackFlags = 0,
- kGCCallbackFlagCompacted = 1 << 0
-};
-
-typedef void (*GCPrologueCallback)(GCType type, GCCallbackFlags flags);
-typedef void (*GCEpilogueCallback)(GCType type, GCCallbackFlags flags);
-
-typedef void (*GCCallback)();
-
-
-/**
- * Profiler modules.
- *
- * In V8, profiler consists of several modules: CPU profiler, and different
- * kinds of heap profiling. Each can be turned on / off independently.
- * When PROFILER_MODULE_HEAP_SNAPSHOT flag is passed to ResumeProfilerEx,
- * modules are enabled only temporarily for making a snapshot of the heap.
- */
-enum ProfilerModules {
- PROFILER_MODULE_NONE = 0,
- PROFILER_MODULE_CPU = 1,
- PROFILER_MODULE_HEAP_STATS = 1 << 1,
- PROFILER_MODULE_JS_CONSTRUCTORS = 1 << 2,
- PROFILER_MODULE_HEAP_SNAPSHOT = 1 << 16
-};
-
-
-/**
- * Collection of V8 heap information.
- *
- * Instances of this class can be passed to v8::V8::HeapStatistics to
- * get heap statistics from V8.
- */
-class V8EXPORT HeapStatistics {
- public:
- HeapStatistics();
- size_t total_heap_size() { return total_heap_size_; }
- size_t total_heap_size_executable() { return total_heap_size_executable_; }
- size_t used_heap_size() { return used_heap_size_; }
- size_t heap_size_limit() { return heap_size_limit_; }
-
- private:
- void set_total_heap_size(size_t size) { total_heap_size_ = size; }
- void set_total_heap_size_executable(size_t size) {
- total_heap_size_executable_ = size;
- }
- void set_used_heap_size(size_t size) { used_heap_size_ = size; }
- void set_heap_size_limit(size_t size) { heap_size_limit_ = size; }
-
- size_t total_heap_size_;
- size_t total_heap_size_executable_;
- size_t used_heap_size_;
- size_t heap_size_limit_;
-
- friend class V8;
-};
-
-
-class RetainedObjectInfo;
-
-/**
- * Isolate represents an isolated instance of the V8 engine. V8
- * isolates have completely separate states. Objects from one isolate
- * must not be used in other isolates. When V8 is initialized a
- * default isolate is implicitly created and entered. The embedder
- * can create additional isolates and use them in parallel in multiple
- * threads. An isolate can be entered by at most one thread at any
- * given time. The Locker/Unlocker API can be used to synchronize.
- */
-class V8EXPORT Isolate {
- public:
- /**
- * Stack-allocated class which sets the isolate for all operations
- * executed within a local scope.
- */
- class V8EXPORT Scope {
- public:
- explicit Scope(Isolate* isolate) : isolate_(isolate) {
- isolate->Enter();
- }
-
- ~Scope() { isolate_->Exit(); }
-
- private:
- Isolate* const isolate_;
-
- // Prevent copying of Scope objects.
- Scope(const Scope&);
- Scope& operator=(const Scope&);
- };
-
- /**
- * Creates a new isolate. Does not change the currently entered
- * isolate.
- *
- * When an isolate is no longer used its resources should be freed
- * by calling Dispose(). Using the delete operator is not allowed.
- */
- static Isolate* New();
-
- /**
- * Returns the entered isolate for the current thread or NULL in
- * case there is no current isolate.
- */
- static Isolate* GetCurrent();
-
- /**
- * Methods below this point require holding a lock (using Locker) in
- * a multi-threaded environment.
- */
-
- /**
- * Sets this isolate as the entered one for the current thread.
- * Saves the previously entered one (if any), so that it can be
- * restored when exiting. Re-entering an isolate is allowed.
- */
- void Enter();
-
- /**
- * Exits this isolate by restoring the previously entered one in the
- * current thread. The isolate may still stay the same, if it was
- * entered more than once.
- *
- * Requires: this == Isolate::GetCurrent().
- */
- void Exit();
-
- /**
- * Disposes the isolate. The isolate must not be entered by any
- * thread to be disposable.
- */
- void Dispose();
-
- private:
-
- Isolate();
- Isolate(const Isolate&);
- ~Isolate();
- Isolate& operator=(const Isolate&);
- void* operator new(size_t size);
- void operator delete(void*, size_t);
-};
-
-
-/**
- * Container class for static utility functions.
- */
-class V8EXPORT V8 {
- public:
- /** Set the callback to invoke in case of fatal errors. */
- static void SetFatalErrorHandler(FatalErrorCallback that);
-
- /**
- * Ignore out-of-memory exceptions.
- *
- * V8 running out of memory is treated as a fatal error by default.
- * This means that the fatal error handler is called and that V8 is
- * terminated.
- *
- * IgnoreOutOfMemoryException can be used to not treat a
- * out-of-memory situation as a fatal error. This way, the contexts
- * that did not cause the out of memory problem might be able to
- * continue execution.
- */
- static void IgnoreOutOfMemoryException();
-
- /**
- * Check if V8 is dead and therefore unusable. This is the case after
- * fatal errors such as out-of-memory situations.
- */
- static bool IsDead();
-
- /**
- * Adds a message listener.
- *
- * The same message listener can be added more than once and it that
- * case it will be called more than once for each message.
- */
- static bool AddMessageListener(MessageCallback that,
- Handle<Value> data = Handle<Value>());
-
- /**
- * Remove all message listeners from the specified callback function.
- */
- static void RemoveMessageListeners(MessageCallback that);
-
- /**
- * Tells V8 to capture current stack trace when uncaught exception occurs
- * and report it to the message listeners. The option is off by default.
- */
- static void SetCaptureStackTraceForUncaughtExceptions(
- bool capture,
- int frame_limit = 10,
- StackTrace::StackTraceOptions options = StackTrace::kOverview);
-
- /**
- * Sets V8 flags from a string.
- */
- static void SetFlagsFromString(const char* str, int length);
-
- /**
- * Sets V8 flags from the command line.
- */
- static void SetFlagsFromCommandLine(int* argc,
- char** argv,
- bool remove_flags);
-
- /** Get the version string. */
- static const char* GetVersion();
-
- /**
- * Enables the host application to provide a mechanism for recording
- * statistics counters.
- */
- static void SetCounterFunction(CounterLookupCallback);
-
- /**
- * Enables the host application to provide a mechanism for recording
- * histograms. The CreateHistogram function returns a
- * histogram which will later be passed to the AddHistogramSample
- * function.
- */
- static void SetCreateHistogramFunction(CreateHistogramCallback);
- static void SetAddHistogramSampleFunction(AddHistogramSampleCallback);
-
- /**
- * Enables the computation of a sliding window of states. The sliding
- * window information is recorded in statistics counters.
- */
- static void EnableSlidingStateWindow();
-
- /** Callback function for reporting failed access checks.*/
- static void SetFailedAccessCheckCallbackFunction(FailedAccessCheckCallback);
-
- /**
- * Enables the host application to receive a notification before a
- * garbage collection. Allocations are not allowed in the
- * callback function, you therefore cannot manipulate objects (set
- * or delete properties for example) since it is possible such
- * operations will result in the allocation of objects. It is possible
- * to specify the GCType filter for your callback. But it is not possible to
- * register the same callback function two times with different
- * GCType filters.
- */
- static void AddGCPrologueCallback(
- GCPrologueCallback callback, GCType gc_type_filter = kGCTypeAll);
-
- /**
- * This function removes callback which was installed by
- * AddGCPrologueCallback function.
- */
- static void RemoveGCPrologueCallback(GCPrologueCallback callback);
-
- /**
- * The function is deprecated. Please use AddGCPrologueCallback instead.
- * Enables the host application to receive a notification before a
- * garbage collection. Allocations are not allowed in the
- * callback function, you therefore cannot manipulate objects (set
- * or delete properties for example) since it is possible such
- * operations will result in the allocation of objects.
- */
- static void SetGlobalGCPrologueCallback(GCCallback);
-
- /**
- * Enables the host application to receive a notification after a
- * garbage collection. Allocations are not allowed in the
- * callback function, you therefore cannot manipulate objects (set
- * or delete properties for example) since it is possible such
- * operations will result in the allocation of objects. It is possible
- * to specify the GCType filter for your callback. But it is not possible to
- * register the same callback function two times with different
- * GCType filters.
- */
- static void AddGCEpilogueCallback(
- GCEpilogueCallback callback, GCType gc_type_filter = kGCTypeAll);
-
- /**
- * This function removes callback which was installed by
- * AddGCEpilogueCallback function.
- */
- static void RemoveGCEpilogueCallback(GCEpilogueCallback callback);
-
- /**
- * The function is deprecated. Please use AddGCEpilogueCallback instead.
- * Enables the host application to receive a notification after a
- * major garbage collection. Allocations are not allowed in the
- * callback function, you therefore cannot manipulate objects (set
- * or delete properties for example) since it is possible such
- * operations will result in the allocation of objects.
- */
- static void SetGlobalGCEpilogueCallback(GCCallback);
-
- /**
- * Enables the host application to provide a mechanism to be notified
- * and perform custom logging when V8 Allocates Executable Memory.
- */
- static void AddMemoryAllocationCallback(MemoryAllocationCallback callback,
- ObjectSpace space,
- AllocationAction action);
-
- /**
- * This function removes callback which was installed by
- * AddMemoryAllocationCallback function.
- */
- static void RemoveMemoryAllocationCallback(MemoryAllocationCallback callback);
-
- /**
- * Allows the host application to group objects together. If one
- * object in the group is alive, all objects in the group are alive.
- * After each garbage collection, object groups are removed. It is
- * intended to be used in the before-garbage-collection callback
- * function, for instance to simulate DOM tree connections among JS
- * wrapper objects.
- * See v8-profiler.h for RetainedObjectInfo interface description.
- */
- static void AddObjectGroup(Persistent<Value>* objects,
- size_t length,
- RetainedObjectInfo* info = NULL);
-
- /**
- * Allows the host application to declare implicit references between
- * the objects: if |parent| is alive, all |children| are alive too.
- * After each garbage collection, all implicit references
- * are removed. It is intended to be used in the before-garbage-collection
- * callback function.
- */
- static void AddImplicitReferences(Persistent<Object> parent,
- Persistent<Value>* children,
- size_t length);
-
- /**
- * Initializes from snapshot if possible. Otherwise, attempts to
- * initialize from scratch. This function is called implicitly if
- * you use the API without calling it first.
- */
- static bool Initialize();
-
- /**
- * Adjusts the amount of registered external memory. Used to give
- * V8 an indication of the amount of externally allocated memory
- * that is kept alive by JavaScript objects. V8 uses this to decide
- * when to perform global garbage collections. Registering
- * externally allocated memory will trigger global garbage
- * collections more often than otherwise in an attempt to garbage
- * collect the JavaScript objects keeping the externally allocated
- * memory alive.
- *
- * \param change_in_bytes the change in externally allocated memory
- * that is kept alive by JavaScript objects.
- * \returns the adjusted value.
- */
- static int AdjustAmountOfExternalAllocatedMemory(int change_in_bytes);
-
- /**
- * Suspends recording of tick samples in the profiler.
- * When the V8 profiling mode is enabled (usually via command line
- * switches) this function suspends recording of tick samples.
- * Profiling ticks are discarded until ResumeProfiler() is called.
- *
- * See also the --prof and --prof_auto command line switches to
- * enable V8 profiling.
- */
- static void PauseProfiler();
-
- /**
- * Resumes recording of tick samples in the profiler.
- * See also PauseProfiler().
- */
- static void ResumeProfiler();
-
- /**
- * Return whether profiler is currently paused.
- */
- static bool IsProfilerPaused();
-
- /**
- * Resumes specified profiler modules. Can be called several times to
- * mark the opening of a profiler events block with the given tag.
- *
- * "ResumeProfiler" is equivalent to "ResumeProfilerEx(PROFILER_MODULE_CPU)".
- * See ProfilerModules enum.
- *
- * \param flags Flags specifying profiler modules.
- * \param tag Profile tag.
- */
- static void ResumeProfilerEx(int flags, int tag = 0);
-
- /**
- * Pauses specified profiler modules. Each call to "PauseProfilerEx" closes
- * a block of profiler events opened by a call to "ResumeProfilerEx" with the
- * same tag value. There is no need for blocks to be properly nested.
- * The profiler is paused when the last opened block is closed.
- *
- * "PauseProfiler" is equivalent to "PauseProfilerEx(PROFILER_MODULE_CPU)".
- * See ProfilerModules enum.
- *
- * \param flags Flags specifying profiler modules.
- * \param tag Profile tag.
- */
- static void PauseProfilerEx(int flags, int tag = 0);
-
- /**
- * Returns active (resumed) profiler modules.
- * See ProfilerModules enum.
- *
- * \returns active profiler modules.
- */
- static int GetActiveProfilerModules();
-
- /**
- * If logging is performed into a memory buffer (via --logfile=*), allows to
- * retrieve previously written messages. This can be used for retrieving
- * profiler log data in the application. This function is thread-safe.
- *
- * Caller provides a destination buffer that must exist during GetLogLines
- * call. Only whole log lines are copied into the buffer.
- *
- * \param from_pos specified a point in a buffer to read from, 0 is the
- * beginning of a buffer. It is assumed that caller updates its current
- * position using returned size value from the previous call.
- * \param dest_buf destination buffer for log data.
- * \param max_size size of the destination buffer.
- * \returns actual size of log data copied into buffer.
- */
- static int GetLogLines(int from_pos, char* dest_buf, int max_size);
-
- /**
- * The minimum allowed size for a log lines buffer. If the size of
- * the buffer given will not be enough to hold a line of the maximum
- * length, an attempt to find a log line end in GetLogLines will
- * fail, and an empty result will be returned.
- */
- static const int kMinimumSizeForLogLinesBuffer = 2048;
-
- /**
- * Retrieve the V8 thread id of the calling thread.
- *
- * The thread id for a thread should only be retrieved after the V8
- * lock has been acquired with a Locker object with that thread.
- */
- static int GetCurrentThreadId();
-
- /**
- * Forcefully terminate execution of a JavaScript thread. This can
- * be used to terminate long-running scripts.
- *
- * TerminateExecution should only be called when then V8 lock has
- * been acquired with a Locker object. Therefore, in order to be
- * able to terminate long-running threads, preemption must be
- * enabled to allow the user of TerminateExecution to acquire the
- * lock.
- *
- * The termination is achieved by throwing an exception that is
- * uncatchable by JavaScript exception handlers. Termination
- * exceptions act as if they were caught by a C++ TryCatch exception
- * handlers. If forceful termination is used, any C++ TryCatch
- * exception handler that catches an exception should check if that
- * exception is a termination exception and immediately return if
- * that is the case. Returning immediately in that case will
- * continue the propagation of the termination exception if needed.
- *
- * The thread id passed to TerminateExecution must have been
- * obtained by calling GetCurrentThreadId on the thread in question.
- *
- * \param thread_id The thread id of the thread to terminate.
- */
- static void TerminateExecution(int thread_id);
-
- /**
- * Forcefully terminate the current thread of JavaScript execution
- * in the given isolate. If no isolate is provided, the default
- * isolate is used.
- *
- * This method can be used by any thread even if that thread has not
- * acquired the V8 lock with a Locker object.
- *
- * \param isolate The isolate in which to terminate the current JS execution.
- */
- static void TerminateExecution(Isolate* isolate = NULL);
-
- /**
- * Is V8 terminating JavaScript execution.
- *
- * Returns true if JavaScript execution is currently terminating
- * because of a call to TerminateExecution. In that case there are
- * still JavaScript frames on the stack and the termination
- * exception is still active.
- */
- static bool IsExecutionTerminating();
-
- /**
- * Releases any resources used by v8 and stops any utility threads
- * that may be running. Note that disposing v8 is permanent, it
- * cannot be reinitialized.
- *
- * It should generally not be necessary to dispose v8 before exiting
- * a process, this should happen automatically. It is only necessary
- * to use if the process needs the resources taken up by v8.
- */
- static bool Dispose();
-
- /**
- * Get statistics about the heap memory usage.
- */
- static void GetHeapStatistics(HeapStatistics* heap_statistics);
-
- /**
- * Optional notification that the embedder is idle.
- * V8 uses the notification to reduce memory footprint.
- * This call can be used repeatedly if the embedder remains idle.
- * Returns true if the embedder should stop calling IdleNotification
- * until real work has been done. This indicates that V8 has done
- * as much cleanup as it will be able to do.
- */
- static bool IdleNotification();
-
- /**
- * Optional notification that the system is running low on memory.
- * V8 uses these notifications to attempt to free memory.
- */
- static void LowMemoryNotification();
-
- /**
- * Optional notification that a context has been disposed. V8 uses
- * these notifications to guide the GC heuristic. Returns the number
- * of context disposals - including this one - since the last time
- * V8 had a chance to clean up.
- */
- static int ContextDisposedNotification();
-
-#ifdef QT_BUILD_SCRIPT_LIB
- /**
- * Will call the callback with the data as parameter as soon as possible
- * from the thread running the script
- * This method can be used by any thread even if that thread has not
- * acquired the V8 lock with a Locker object.
- */
- static void ExecuteUserCallback(UserCallback Callback, void *data);
-#endif
- private:
- V8();
-
- static internal::Object** GlobalizeReference(internal::Object** handle);
- static void DisposeGlobal(internal::Object** global_handle);
- static void MakeWeak(internal::Object** global_handle,
- void* data,
- WeakReferenceCallback);
- static void ClearWeak(internal::Object** global_handle);
- static bool IsGlobalNearDeath(internal::Object** global_handle);
- static bool IsGlobalWeak(internal::Object** global_handle);
- static void SetWrapperClassId(internal::Object** global_handle,
- uint16_t class_id);
-
- template <class T> friend class Handle;
- template <class T> friend class Local;
- template <class T> friend class Persistent;
- friend class Context;
-};
-
-
-/**
- * An external exception handler.
- */
-class V8EXPORT TryCatch {
- public:
-
- /**
- * Creates a new try/catch block and registers it with v8.
- */
- TryCatch();
-
- /**
- * Unregisters and deletes this try/catch block.
- */
- ~TryCatch();
-
- /**
- * Returns true if an exception has been caught by this try/catch block.
- */
- bool HasCaught() const;
-
- /**
- * For certain types of exceptions, it makes no sense to continue
- * execution.
- *
- * Currently, the only type of exception that can be caught by a
- * TryCatch handler and for which it does not make sense to continue
- * is termination exception. Such exceptions are thrown when the
- * TerminateExecution methods are called to terminate a long-running
- * script.
- *
- * If CanContinue returns false, the correct action is to perform
- * any C++ cleanup needed and then return.
- */
- bool CanContinue() const;
-
- /**
- * Throws the exception caught by this TryCatch in a way that avoids
- * it being caught again by this same TryCatch. As with ThrowException
- * it is illegal to execute any JavaScript operations after calling
- * ReThrow; the caller must return immediately to where the exception
- * is caught.
- */
- Handle<Value> ReThrow();
-
- /**
- * Returns the exception caught by this try/catch block. If no exception has
- * been caught an empty handle is returned.
- *
- * The returned handle is valid until this TryCatch block has been destroyed.
- */
- Local<Value> Exception() const;
-
- /**
- * Returns the .stack property of the thrown object. If no .stack
- * property is present an empty handle is returned.
- */
- Local<Value> StackTrace() const;
-
- /**
- * Returns the message associated with this exception. If there is
- * no message associated an empty handle is returned.
- *
- * The returned handle is valid until this TryCatch block has been
- * destroyed.
- */
- Local<v8::Message> Message() const;
-
- /**
- * Clears any exceptions that may have been caught by this try/catch block.
- * After this method has been called, HasCaught() will return false.
- *
- * It is not necessary to clear a try/catch block before using it again; if
- * another exception is thrown the previously caught exception will just be
- * overwritten. However, it is often a good idea since it makes it easier
- * to determine which operation threw a given exception.
- */
- void Reset();
-
- /**
- * Set verbosity of the external exception handler.
- *
- * By default, exceptions that are caught by an external exception
- * handler are not reported. Call SetVerbose with true on an
- * external exception handler to have exceptions caught by the
- * handler reported as if they were not caught.
- */
- void SetVerbose(bool value);
-
- /**
- * Set whether or not this TryCatch should capture a Message object
- * which holds source information about where the exception
- * occurred. True by default.
- */
- void SetCaptureMessage(bool value);
-
- private:
- void* next_;
- void* exception_;
- void* message_;
- bool is_verbose_ : 1;
- bool can_continue_ : 1;
- bool capture_message_ : 1;
- bool rethrow_ : 1;
-
- friend class v8::internal::Isolate;
-};
-
-
-// --- C o n t e x t ---
-
-
-/**
- * Ignore
- */
-class V8EXPORT ExtensionConfiguration {
- public:
- ExtensionConfiguration(int name_count, const char* names[])
- : name_count_(name_count), names_(names) { }
- private:
- friend class ImplementationUtilities;
- int name_count_;
- const char** names_;
-};
-
-
-/**
- * A sandboxed execution context with its own set of built-in objects
- * and functions.
- */
-class V8EXPORT Context {
- public:
- /**
- * Returns the global proxy object or global object itself for
- * detached contexts.
- *
- * Global proxy object is a thin wrapper whose prototype points to
- * actual context's global object with the properties like Object, etc.
- * This is done that way for security reasons (for more details see
- * https://wiki.mozilla.org/Gecko:SplitWindow).
- *
- * Please note that changes to global proxy object prototype most probably
- * would break VM---v8 expects only global object as a prototype of
- * global proxy object.
- *
- * If DetachGlobal() has been invoked, Global() would return actual global
- * object until global is reattached with ReattachGlobal().
- */
- Local<Object> Global();
-
- /**
- * Detaches the global object from its context before
- * the global object can be reused to create a new context.
- */
- void DetachGlobal();
-
- /**
- * Reattaches a global object to a context. This can be used to
- * restore the connection between a global object and a context
- * after DetachGlobal has been called.
- *
- * \param global_object The global object to reattach to the
- * context. For this to work, the global object must be the global
- * object that was associated with this context before a call to
- * DetachGlobal.
- */
- void ReattachGlobal(Handle<Object> global_object);
-
- /** Creates a new context.
- *
- * Returns a persistent handle to the newly allocated context. This
- * persistent handle has to be disposed when the context is no
- * longer used so the context can be garbage collected.
- *
- * \param extensions An optional extension configuration containing
- * the extensions to be installed in the newly created context.
- *
- * \param global_template An optional object template from which the
- * global object for the newly created context will be created.
- *
- * \param global_object An optional global object to be reused for
- * the newly created context. This global object must have been
- * created by a previous call to Context::New with the same global
- * template. The state of the global object will be completely reset
- * and only object identify will remain.
- */
- static Persistent<Context> New(
- ExtensionConfiguration* extensions = NULL,
- Handle<ObjectTemplate> global_template = Handle<ObjectTemplate>(),
- Handle<Value> global_object = Handle<Value>());
-
- /** Returns the last entered context. */
- static Local<Context> GetEntered();
-
- /** Returns the context that is on the top of the stack. */
- static Local<Context> GetCurrent();
-
- /**
- * Returns the context of the calling JavaScript code. That is the
- * context of the top-most JavaScript frame. If there are no
- * JavaScript frames an empty handle is returned.
- */
- static Local<Context> GetCalling();
-
- /**
- * Sets the security token for the context. To access an object in
- * another context, the security tokens must match.
- */
- void SetSecurityToken(Handle<Value> token);
-
- /** Restores the security token to the default value. */
- void UseDefaultSecurityToken();
-
- /** Returns the security token of this context.*/
- Handle<Value> GetSecurityToken();
-
- /**
- * Enter this context. After entering a context, all code compiled
- * and run is compiled and run in this context. If another context
- * is already entered, this old context is saved so it can be
- * restored when the new context is exited.
- */
- void Enter();
-
- /**
- * Exit this context. Exiting the current context restores the
- * context that was in place when entering the current context.
- */
- void Exit();
-
- /** Returns true if the context has experienced an out of memory situation. */
- bool HasOutOfMemoryException();
-
- /** Returns true if V8 has a current context. */
- static bool InContext();
-
- /**
- * Associate an additional data object with the context. This is mainly used
- * with the debugger to provide additional information on the context through
- * the debugger API.
- */
- void SetData(Handle<String> data);
- Local<Value> GetData();
-
- /**
- * Stack-allocated class which sets the execution context for all
- * operations executed within a local scope.
- */
- class Scope {
- public:
- inline Scope(Handle<Context> context) : context_(context) {
- context_->Enter();
- }
- inline ~Scope() { context_->Exit(); }
- private:
- Handle<Context> context_;
- };
-
-#ifdef QT_BUILD_SCRIPT_LIB
- /**
- * Creates a new scope context.
- *
- * The currently entered context will be the new context's previous
- * scope.
- *
- * Properties on the given object, scope_object, are accessible from
- * the new scope.
- */
- static Local<Context> NewScopeContext(Handle<Object> scope_object);
-
- /**
- * Creates a new function context.
- *
- * The currently entered context will be the new context's previous
- * scope.
- */
- static Local<Context> NewFunctionContext();
-
- /**
- * Returns the extension object of this context.
- *
- * For a scope context, the extension object is the object that was
- * passed to NewScopeContext().
- *
- * For a function context, the extension object is the object that's
- * used to hold the context's dynamically instantiated variables
- * (e.g. by eval()).
- */
- Local<Object> GetExtensionObject();
-
- /**
- * Set the extention object
- */
- void SetExtensionObject(Handle<Object>);
-
- /**
- * Gets the previous context.
- */
- Local<Context> GetPrevious();
-
- /**
- * Gets the context corresponding to the top-most JavaScript caller.
- */
- static Local<Context> GetCallerContext();
-#endif
-
- private:
- friend class Value;
- friend class Script;
- friend class Object;
- friend class Function;
-};
-
-
-/**
- * Multiple threads in V8 are allowed, but only one thread at a time
- * is allowed to use any given V8 isolate. See Isolate class
- * comments. The definition of 'using V8 isolate' includes
- * accessing handles or holding onto object pointers obtained
- * from V8 handles while in the particular V8 isolate. It is up
- * to the user of V8 to ensure (perhaps with locking) that this
- * constraint is not violated.
- *
- * More then one thread and multiple V8 isolates can be used
- * without any locking if each isolate is created and accessed
- * by a single thread only. For example, one thread can use
- * multiple isolates or multiple threads can each create and run
- * their own isolate.
- *
- * If you wish to start using V8 isolate in more then one thread
- * you can do this by constructing a v8::Locker object to guard
- * access to the isolate. After the code using V8 has completed
- * for the current thread you can call the destructor. This can
- * be combined with C++ scope-based construction as follows
- * (assumes the default isolate that is used if not specified as
- * a parameter for the Locker):
- *
- * \code
- * ...
- * {
- * v8::Locker locker;
- * ...
- * // Code using V8 goes here.
- * ...
- * } // Destructor called here
- * \endcode
- *
- * If you wish to stop using V8 in a thread A you can do this by either
- * by destroying the v8::Locker object as above or by constructing a
- * v8::Unlocker object:
- *
- * \code
- * {
- * v8::Unlocker unlocker;
- * ...
- * // Code not using V8 goes here while V8 can run in another thread.
- * ...
- * } // Destructor called here.
- * \endcode
- *
- * The Unlocker object is intended for use in a long-running callback
- * from V8, where you want to release the V8 lock for other threads to
- * use.
- *
- * The v8::Locker is a recursive lock. That is, you can lock more than
- * once in a given thread. This can be useful if you have code that can
- * be called either from code that holds the lock or from code that does
- * not. The Unlocker is not recursive so you can not have several
- * Unlockers on the stack at once, and you can not use an Unlocker in a
- * thread that is not inside a Locker's scope.
- *
- * An unlocker will unlock several lockers if it has to and reinstate
- * the correct depth of locking on its destruction. eg.:
- *
- * \code
- * // V8 not locked.
- * {
- * v8::Locker locker;
- * // V8 locked.
- * {
- * v8::Locker another_locker;
- * // V8 still locked (2 levels).
- * {
- * v8::Unlocker unlocker;
- * // V8 not locked.
- * }
- * // V8 locked again (2 levels).
- * }
- * // V8 still locked (1 level).
- * }
- * // V8 Now no longer locked.
- * \endcode
- */
-class V8EXPORT Unlocker {
- public:
- Unlocker();
- ~Unlocker();
-};
-
-
-class V8EXPORT Locker {
- public:
- Locker();
- ~Locker();
-
- /**
- * Start preemption.
- *
- * When preemption is started, a timer is fired every n milli seconds
- * that will switch between multiple threads that are in contention
- * for the V8 lock.
- */
- static void StartPreemption(int every_n_ms);
-
- /**
- * Stop preemption.
- */
- static void StopPreemption();
-
- /**
- * Returns whether or not the locker is locked by the current thread.
- */
- static bool IsLocked();
-
- /**
- * Returns whether v8::Locker is being used by this V8 instance.
- */
- static bool IsActive() { return active_; }
-
- private:
- bool has_lock_;
- bool top_level_;
-
- static bool active_;
-
- // Disallow copying and assigning.
- Locker(const Locker&);
- void operator=(const Locker&);
-};
-
-
-/**
- * An interface for exporting data from V8, using "push" model.
- */
-class V8EXPORT OutputStream { // NOLINT
- public:
- enum OutputEncoding {
- kAscii = 0 // 7-bit ASCII.
- };
- enum WriteResult {
- kContinue = 0,
- kAbort = 1
- };
- virtual ~OutputStream() {}
- /** Notify about the end of stream. */
- virtual void EndOfStream() = 0;
- /** Get preferred output chunk size. Called only once. */
- virtual int GetChunkSize() { return 1024; }
- /** Get preferred output encoding. Called only once. */
- virtual OutputEncoding GetOutputEncoding() { return kAscii; }
- /**
- * Writes the next chunk of snapshot data into the stream. Writing
- * can be stopped by returning kAbort as function result. EndOfStream
- * will not be called in case writing was aborted.
- */
- virtual WriteResult WriteAsciiChunk(char* data, int size) = 0;
-};
-
-
-/**
- * An interface for reporting progress and controlling long-running
- * activities.
- */
-class V8EXPORT ActivityControl { // NOLINT
- public:
- enum ControlOption {
- kContinue = 0,
- kAbort = 1
- };
- virtual ~ActivityControl() {}
- /**
- * Notify about current progress. The activity can be stopped by
- * returning kAbort as the callback result.
- */
- virtual ControlOption ReportProgressValue(int done, int total) = 0;
-};
-
-
-// --- I m p l e m e n t a t i o n ---
-
-
-namespace internal {
-
-static const int kApiPointerSize = sizeof(void*); // NOLINT
-static const int kApiIntSize = sizeof(int); // NOLINT
-
-// Tag information for HeapObject.
-const int kHeapObjectTag = 1;
-const int kHeapObjectTagSize = 2;
-const intptr_t kHeapObjectTagMask = (1 << kHeapObjectTagSize) - 1;
-
-// Tag information for Smi.
-const int kSmiTag = 0;
-const int kSmiTagSize = 1;
-const intptr_t kSmiTagMask = (1 << kSmiTagSize) - 1;
-
-template <size_t ptr_size> struct SmiTagging;
-
-// Smi constants for 32-bit systems.
-template <> struct SmiTagging<4> {
- static const int kSmiShiftSize = 0;
- static const int kSmiValueSize = 31;
- static inline int SmiToInt(internal::Object* value) {
- int shift_bits = kSmiTagSize + kSmiShiftSize;
- // Throw away top 32 bits and shift down (requires >> to be sign extending).
- return static_cast<int>(reinterpret_cast<intptr_t>(value)) >> shift_bits;
- }
-
- // For 32-bit systems any 2 bytes aligned pointer can be encoded as smi
- // with a plain reinterpret_cast.
- static const uintptr_t kEncodablePointerMask = 0x1;
- static const int kPointerToSmiShift = 0;
-};
-
-// Smi constants for 64-bit systems.
-template <> struct SmiTagging<8> {
- static const int kSmiShiftSize = 31;
- static const int kSmiValueSize = 32;
- static inline int SmiToInt(internal::Object* value) {
- int shift_bits = kSmiTagSize + kSmiShiftSize;
- // Shift down and throw away top 32 bits.
- return static_cast<int>(reinterpret_cast<intptr_t>(value) >> shift_bits);
- }
-
- // To maximize the range of pointers that can be encoded
- // in the available 32 bits, we require them to be 8 bytes aligned.
- // This gives 2 ^ (32 + 3) = 32G address space covered.
- // It might be not enough to cover stack allocated objects on some platforms.
- static const int kPointerAlignment = 3;
-
- static const uintptr_t kEncodablePointerMask =
- ~(uintptr_t(0xffffffff) << kPointerAlignment);
-
- static const int kPointerToSmiShift =
- kSmiTagSize + kSmiShiftSize - kPointerAlignment;
-};
-
-typedef SmiTagging<kApiPointerSize> PlatformSmiTagging;
-const int kSmiShiftSize = PlatformSmiTagging::kSmiShiftSize;
-const int kSmiValueSize = PlatformSmiTagging::kSmiValueSize;
-const uintptr_t kEncodablePointerMask =
- PlatformSmiTagging::kEncodablePointerMask;
-const int kPointerToSmiShift = PlatformSmiTagging::kPointerToSmiShift;
-
-template <size_t ptr_size> struct InternalConstants;
-
-// Internal constants for 32-bit systems.
-template <> struct InternalConstants<4> {
- static const int kStringResourceOffset = 3 * kApiPointerSize;
-};
-
-// Internal constants for 64-bit systems.
-template <> struct InternalConstants<8> {
- static const int kStringResourceOffset = 3 * kApiPointerSize;
-};
-
-/**
- * This class exports constants and functionality from within v8 that
- * is necessary to implement inline functions in the v8 api. Don't
- * depend on functions and constants defined here.
- */
-class Internals {
- public:
-
- // These values match non-compiler-dependent values defined within
- // the implementation of v8.
- static const int kHeapObjectMapOffset = 0;
- static const int kMapInstanceTypeOffset = 1 * kApiPointerSize + kApiIntSize;
- static const int kStringResourceOffset =
- InternalConstants<kApiPointerSize>::kStringResourceOffset;
-
- static const int kProxyProxyOffset = kApiPointerSize;
- static const int kJSObjectHeaderSize = 3 * kApiPointerSize;
- static const int kFullStringRepresentationMask = 0x07;
- static const int kExternalTwoByteRepresentationTag = 0x02;
-
- static const int kJSObjectType = 0xa0;
- static const int kFirstNonstringType = 0x80;
- static const int kProxyType = 0x85;
-
- static inline bool HasHeapObjectTag(internal::Object* value) {
- return ((reinterpret_cast<intptr_t>(value) & kHeapObjectTagMask) ==
- kHeapObjectTag);
- }
-
- static inline bool HasSmiTag(internal::Object* value) {
- return ((reinterpret_cast<intptr_t>(value) & kSmiTagMask) == kSmiTag);
- }
-
- static inline int SmiValue(internal::Object* value) {
- return PlatformSmiTagging::SmiToInt(value);
- }
-
- static inline int GetInstanceType(internal::Object* obj) {
- typedef internal::Object O;
- O* map = ReadField<O*>(obj, kHeapObjectMapOffset);
- return ReadField<uint8_t>(map, kMapInstanceTypeOffset);
- }
-
- static inline void* GetExternalPointerFromSmi(internal::Object* value) {
- const uintptr_t address = reinterpret_cast<uintptr_t>(value);
- return reinterpret_cast<void*>(address >> kPointerToSmiShift);
- }
-
- static inline void* GetExternalPointer(internal::Object* obj) {
- if (HasSmiTag(obj)) {
- return GetExternalPointerFromSmi(obj);
- } else if (GetInstanceType(obj) == kProxyType) {
- return ReadField<void*>(obj, kProxyProxyOffset);
- } else {
- return NULL;
- }
- }
-
- static inline bool IsExternalTwoByteString(int instance_type) {
- int representation = (instance_type & kFullStringRepresentationMask);
- return representation == kExternalTwoByteRepresentationTag;
- }
-
- template <typename T>
- static inline T ReadField(Object* ptr, int offset) {
- uint8_t* addr = reinterpret_cast<uint8_t*>(ptr) + offset - kHeapObjectTag;
- return *reinterpret_cast<T*>(addr);
- }
-
- static inline bool CanCastToHeapObject(void* o) { return false; }
- static inline bool CanCastToHeapObject(Context* o) { return true; }
- static inline bool CanCastToHeapObject(String* o) { return true; }
- static inline bool CanCastToHeapObject(Object* o) { return true; }
- static inline bool CanCastToHeapObject(Message* o) { return true; }
- static inline bool CanCastToHeapObject(StackTrace* o) { return true; }
- static inline bool CanCastToHeapObject(StackFrame* o) { return true; }
-};
-
-} // namespace internal
-
-
-template <class T>
-Handle<T>::Handle() : val_(0) { }
-
-
-template <class T>
-Local<T>::Local() : Handle<T>() { }
-
-
-template <class T>
-Local<T> Local<T>::New(Handle<T> that) {
- if (that.IsEmpty()) return Local<T>();
- T* that_ptr = *that;
- internal::Object** p = reinterpret_cast<internal::Object**>(that_ptr);
- if (internal::Internals::CanCastToHeapObject(that_ptr)) {
- return Local<T>(reinterpret_cast<T*>(HandleScope::CreateHandle(
- reinterpret_cast<internal::HeapObject*>(*p))));
- }
- return Local<T>(reinterpret_cast<T*>(HandleScope::CreateHandle(*p)));
-}
-
-
-template <class T>
-Persistent<T> Persistent<T>::New(Handle<T> that) {
- if (that.IsEmpty()) return Persistent<T>();
- internal::Object** p = reinterpret_cast<internal::Object**>(*that);
- return Persistent<T>(reinterpret_cast<T*>(V8::GlobalizeReference(p)));
-}
-
-
-template <class T>
-bool Persistent<T>::IsNearDeath() const {
- if (this->IsEmpty()) return false;
- return V8::IsGlobalNearDeath(reinterpret_cast<internal::Object**>(**this));
-}
-
-
-template <class T>
-bool Persistent<T>::IsWeak() const {
- if (this->IsEmpty()) return false;
- return V8::IsGlobalWeak(reinterpret_cast<internal::Object**>(**this));
-}
-
-
-template <class T>
-void Persistent<T>::Dispose() {
- if (this->IsEmpty()) return;
- V8::DisposeGlobal(reinterpret_cast<internal::Object**>(**this));
-}
-
-
-template <class T>
-Persistent<T>::Persistent() : Handle<T>() { }
-
-template <class T>
-void Persistent<T>::MakeWeak(void* parameters, WeakReferenceCallback callback) {
- V8::MakeWeak(reinterpret_cast<internal::Object**>(**this),
- parameters,
- callback);
-}
-
-template <class T>
-void Persistent<T>::ClearWeak() {
- V8::ClearWeak(reinterpret_cast<internal::Object**>(**this));
-}
-
-template <class T>
-void Persistent<T>::SetWrapperClassId(uint16_t class_id) {
- V8::SetWrapperClassId(reinterpret_cast<internal::Object**>(**this), class_id);
-}
-
-Arguments::Arguments(internal::Object** implicit_args,
- internal::Object** values, int length,
- bool is_construct_call)
- : implicit_args_(implicit_args),
- values_(values),
- length_(length),
- is_construct_call_(is_construct_call) { }
-
-
-Local<Value> Arguments::operator[](int i) const {
- if (i < 0 || length_ <= i) return Local<Value>(*Undefined());
- return Local<Value>(reinterpret_cast<Value*>(values_ - i));
-}
-
-
-Local<Function> Arguments::Callee() const {
- return Local<Function>(reinterpret_cast<Function*>(
- &implicit_args_[kCalleeIndex]));
-}
-
-
-Local<Object> Arguments::This() const {
- return Local<Object>(reinterpret_cast<Object*>(values_ + 1));
-}
-
-
-Local<Object> Arguments::Holder() const {
- return Local<Object>(reinterpret_cast<Object*>(
- &implicit_args_[kHolderIndex]));
-}
-
-
-Local<Value> Arguments::Data() const {
- return Local<Value>(reinterpret_cast<Value*>(&implicit_args_[kDataIndex]));
-}
-
-
-bool Arguments::IsConstructCall() const {
- return is_construct_call_;
-}
-
-
-int Arguments::Length() const {
- return length_;
-}
-
-
-template <class T>
-Local<T> HandleScope::Close(Handle<T> value) {
- internal::Object** before = reinterpret_cast<internal::Object**>(*value);
- internal::Object** after = RawClose(before);
- return Local<T>(reinterpret_cast<T*>(after));
-}
-
-Handle<Value> ScriptOrigin::ResourceName() const {
- return resource_name_;
-}
-
-
-Handle<Integer> ScriptOrigin::ResourceLineOffset() const {
- return resource_line_offset_;
-}
-
-
-Handle<Integer> ScriptOrigin::ResourceColumnOffset() const {
- return resource_column_offset_;
-}
-
-
-Handle<Boolean> Boolean::New(bool value) {
- return value ? True() : False();
-}
-
-
-void Template::Set(const char* name, v8::Handle<Data> value) {
- Set(v8::String::New(name), value);
-}
-
-
-Local<Value> Object::GetInternalField(int index) {
-#ifndef V8_ENABLE_CHECKS
- Local<Value> quick_result = UncheckedGetInternalField(index);
- if (!quick_result.IsEmpty()) return quick_result;
-#endif
- return CheckedGetInternalField(index);
-}
-
-
-Local<Value> Object::UncheckedGetInternalField(int index) {
- typedef internal::Object O;
- typedef internal::Internals I;
- O* obj = *reinterpret_cast<O**>(this);
- if (I::GetInstanceType(obj) == I::kJSObjectType) {
- // If the object is a plain JSObject, which is the common case,
- // we know where to find the internal fields and can return the
- // value directly.
- int offset = I::kJSObjectHeaderSize + (internal::kApiPointerSize * index);
- O* value = I::ReadField<O*>(obj, offset);
- O** result = HandleScope::CreateHandle(value);
- return Local<Value>(reinterpret_cast<Value*>(result));
- } else {
- return Local<Value>();
- }
-}
-
-
-void* External::Unwrap(Handle<v8::Value> obj) {
-#ifdef V8_ENABLE_CHECKS
- return FullUnwrap(obj);
-#else
- return QuickUnwrap(obj);
-#endif
-}
-
-
-void* External::QuickUnwrap(Handle<v8::Value> wrapper) {
- typedef internal::Object O;
- O* obj = *reinterpret_cast<O**>(const_cast<v8::Value*>(*wrapper));
- return internal::Internals::GetExternalPointer(obj);
-}
-
-
-void* Object::GetPointerFromInternalField(int index) {
- typedef internal::Object O;
- typedef internal::Internals I;
-
- O* obj = *reinterpret_cast<O**>(this);
-
- if (I::GetInstanceType(obj) == I::kJSObjectType) {
- // If the object is a plain JSObject, which is the common case,
- // we know where to find the internal fields and can return the
- // value directly.
- int offset = I::kJSObjectHeaderSize + (internal::kApiPointerSize * index);
- O* value = I::ReadField<O*>(obj, offset);
- return I::GetExternalPointer(value);
- }
-
- return SlowGetPointerFromInternalField(index);
-}
-
-
-String* String::Cast(v8::Value* value) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(value);
-#endif
- return static_cast<String*>(value);
-}
-
-
-String::ExternalStringResource* String::GetExternalStringResource() const {
- typedef internal::Object O;
- typedef internal::Internals I;
- O* obj = *reinterpret_cast<O**>(const_cast<String*>(this));
- String::ExternalStringResource* result;
- if (I::IsExternalTwoByteString(I::GetInstanceType(obj))) {
- void* value = I::ReadField<void*>(obj, I::kStringResourceOffset);
- result = reinterpret_cast<String::ExternalStringResource*>(value);
- } else {
- result = NULL;
- }
-#ifdef V8_ENABLE_CHECKS
- VerifyExternalStringResource(result);
-#endif
- return result;
-}
-
-
-bool Value::IsString() const {
-#ifdef V8_ENABLE_CHECKS
- return FullIsString();
-#else
- return QuickIsString();
-#endif
-}
-
-bool Value::QuickIsString() const {
- typedef internal::Object O;
- typedef internal::Internals I;
- O* obj = *reinterpret_cast<O**>(const_cast<Value*>(this));
- if (!I::HasHeapObjectTag(obj)) return false;
- return (I::GetInstanceType(obj) < I::kFirstNonstringType);
-}
-
-
-Number* Number::Cast(v8::Value* value) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(value);
-#endif
- return static_cast<Number*>(value);
-}
-
-
-Integer* Integer::Cast(v8::Value* value) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(value);
-#endif
- return static_cast<Integer*>(value);
-}
-
-
-Date* Date::Cast(v8::Value* value) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(value);
-#endif
- return static_cast<Date*>(value);
-}
-
-
-RegExp* RegExp::Cast(v8::Value* value) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(value);
-#endif
- return static_cast<RegExp*>(value);
-}
-
-
-Object* Object::Cast(v8::Value* value) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(value);
-#endif
- return static_cast<Object*>(value);
-}
-
-
-Array* Array::Cast(v8::Value* value) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(value);
-#endif
- return static_cast<Array*>(value);
-}
-
-
-Function* Function::Cast(v8::Value* value) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(value);
-#endif
- return static_cast<Function*>(value);
-}
-
-
-External* External::Cast(v8::Value* value) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(value);
-#endif
- return static_cast<External*>(value);
-}
-
-
-Local<Value> AccessorInfo::Data() const {
- return Local<Value>(reinterpret_cast<Value*>(&args_[-2]));
-}
-
-
-Local<Object> AccessorInfo::This() const {
- return Local<Object>(reinterpret_cast<Object*>(&args_[0]));
-}
-
-
-Local<Object> AccessorInfo::Holder() const {
- return Local<Object>(reinterpret_cast<Object*>(&args_[-1]));
-}
-
-
-/**
- * \example shell.cc
- * A simple shell that takes a list of expressions on the
- * command-line and executes them.
- */
-
-
-/**
- * \example process.cc
- */
-
-
-} // namespace v8
-
-
-#undef V8EXPORT
-#undef TYPE_CHECK
-
-
-#endif // V8_H_
diff --git a/src/3rdparty/v8/include/v8stdint.h b/src/3rdparty/v8/include/v8stdint.h
deleted file mode 100644
index 50b4f29..0000000
--- a/src/3rdparty/v8/include/v8stdint.h
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Load definitions of standard types.
-
-#ifndef V8STDINT_H_
-#define V8STDINT_H_
-
-#include <stdio.h>
-
-#if defined(_WIN32) && !defined(__MINGW32__)
-
-typedef signed char int8_t;
-typedef unsigned char uint8_t;
-typedef short int16_t; // NOLINT
-typedef unsigned short uint16_t; // NOLINT
-typedef int int32_t;
-typedef unsigned int uint32_t;
-typedef __int64 int64_t;
-typedef unsigned __int64 uint64_t;
-// intptr_t and friends are defined in crtdefs.h through stdio.h.
-
-#else
-
-#include <stdint.h>
-
-#endif
-
-#endif // V8STDINT_H_
diff --git a/src/3rdparty/v8/preparser/preparser-process.cc b/src/3rdparty/v8/preparser/preparser-process.cc
deleted file mode 100644
index fb6e386..0000000
--- a/src/3rdparty/v8/preparser/preparser-process.cc
+++ /dev/null
@@ -1,169 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include <stdlib.h>
-#include <stdarg.h>
-#include <stdio.h>
-
-#include "../include/v8stdint.h"
-#include "../include/v8-preparser.h"
-
-// This file is only used for testing the stand-alone preparser
-// library.
-// The first (and only) argument must be the path of a JavaScript file.
-// This file is preparsed and the resulting preparser data is written
-// to stdout. Diagnostic output is output on stderr.
-// The file must contain only ASCII characters (UTF-8 isn't supported).
-// The file is read into memory, so it should have a reasonable size.
-
-
-// Adapts an ASCII string to the UnicodeInputStream interface.
-class AsciiInputStream : public v8::UnicodeInputStream {
- public:
- AsciiInputStream(uint8_t* buffer, size_t length)
- : buffer_(buffer),
- end_offset_(static_cast<int>(length)),
- offset_(0) { }
-
- virtual ~AsciiInputStream() { }
-
- virtual void PushBack(int32_t ch) {
- offset_--;
-#ifdef DEBUG
- if (offset_ < 0 ||
- (ch != ((offset_ >= end_offset_) ? -1 : buffer_[offset_]))) {
- fprintf(stderr, "Invalid pushback: '%c' at offset %d.", ch, offset_);
- exit(1);
- }
-#endif
- }
-
- virtual int32_t Next() {
- if (offset_ >= end_offset_) {
- offset_++; // Increment anyway to allow symmetric pushbacks.
- return -1;
- }
- uint8_t next_char = buffer_[offset_];
-#ifdef DEBUG
- if (next_char > 0x7fu) {
- fprintf(stderr, "Non-ASCII character in input: '%c'.", next_char);
- exit(1);
- }
-#endif
- offset_++;
- return static_cast<int32_t>(next_char);
- }
-
- private:
- const uint8_t* buffer_;
- const int end_offset_;
- int offset_;
-};
-
-
-bool ReadBuffer(FILE* source, void* buffer, size_t length) {
- size_t actually_read = fread(buffer, 1, length, source);
- return (actually_read == length);
-}
-
-
-bool WriteBuffer(FILE* dest, const void* buffer, size_t length) {
- size_t actually_written = fwrite(buffer, 1, length, dest);
- return (actually_written == length);
-}
-
-
-template <typename T>
-class ScopedPointer {
- public:
- explicit ScopedPointer(T* pointer) : pointer_(pointer) {}
- ~ScopedPointer() { delete[] pointer_; }
- T& operator[](int index) { return pointer_[index]; }
- T* operator*() { return pointer_ ;}
- private:
- T* pointer_;
-};
-
-
-int main(int argc, char* argv[]) {
- // Check for filename argument.
- if (argc < 2) {
- fprintf(stderr, "ERROR: No filename on command line.\n");
- fflush(stderr);
- return EXIT_FAILURE;
- }
- const char* filename = argv[1];
-
- // Open JS file.
- FILE* input = fopen(filename, "rb");
- if (input == NULL) {
- perror("ERROR: Error opening file");
- fflush(stderr);
- return EXIT_FAILURE;
- }
-
- // Find length of JS file.
- if (fseek(input, 0, SEEK_END) != 0) {
- perror("ERROR: Error during seek");
- fflush(stderr);
- return EXIT_FAILURE;
- }
- size_t length = static_cast<size_t>(ftell(input));
- rewind(input);
-
- // Read JS file into memory buffer.
- ScopedPointer<uint8_t> buffer(new uint8_t[length]);
- if (!ReadBuffer(input, *buffer, length)) {
- perror("ERROR: Reading file");
- fflush(stderr);
- return EXIT_FAILURE;
- }
- fclose(input);
-
- // Preparse input file.
- AsciiInputStream input_buffer(*buffer, length);
- size_t kMaxStackSize = 64 * 1024 * sizeof(void*); // NOLINT
- v8::PreParserData data = v8::Preparse(&input_buffer, kMaxStackSize);
-
- // Fail if stack overflow.
- if (data.stack_overflow()) {
- fprintf(stderr, "ERROR: Stack overflow\n");
- fflush(stderr);
- return EXIT_FAILURE;
- }
-
- // Print preparser data to stdout.
- uint32_t size = data.size();
- fprintf(stderr, "LOG: Success, data size: %u\n", size);
- fflush(stderr);
- if (!WriteBuffer(stdout, data.data(), size)) {
- perror("ERROR: Writing data");
- return EXIT_FAILURE;
- }
-
- return EXIT_SUCCESS;
-}
diff --git a/src/3rdparty/v8/src/accessors.cc b/src/3rdparty/v8/src/accessors.cc
deleted file mode 100644
index 5f9bf74..0000000
--- a/src/3rdparty/v8/src/accessors.cc
+++ /dev/null
@@ -1,766 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "accessors.h"
-#include "ast.h"
-#include "deoptimizer.h"
-#include "execution.h"
-#include "factory.h"
-#include "safepoint-table.h"
-#include "scopeinfo.h"
-
-namespace v8 {
-namespace internal {
-
-
-template <class C>
-static C* FindInPrototypeChain(Object* obj, bool* found_it) {
- ASSERT(!*found_it);
- Heap* heap = HEAP;
- while (!Is<C>(obj)) {
- if (obj == heap->null_value()) return NULL;
- obj = obj->GetPrototype();
- }
- *found_it = true;
- return C::cast(obj);
-}
-
-
-// Entry point that never should be called.
-MaybeObject* Accessors::IllegalSetter(JSObject*, Object*, void*) {
- UNREACHABLE();
- return NULL;
-}
-
-
-Object* Accessors::IllegalGetAccessor(Object* object, void*) {
- UNREACHABLE();
- return object;
-}
-
-
-MaybeObject* Accessors::ReadOnlySetAccessor(JSObject*, Object* value, void*) {
- // According to ECMA-262, section 8.6.2.2, page 28, setting
- // read-only properties must be silently ignored.
- return value;
-}
-
-
-//
-// Accessors::ArrayLength
-//
-
-
-MaybeObject* Accessors::ArrayGetLength(Object* object, void*) {
- // Traverse the prototype chain until we reach an array.
- bool found_it = false;
- JSArray* holder = FindInPrototypeChain<JSArray>(object, &found_it);
- if (!found_it) return Smi::FromInt(0);
- return holder->length();
-}
-
-
-// The helper function will 'flatten' Number objects.
-Object* Accessors::FlattenNumber(Object* value) {
- if (value->IsNumber() || !value->IsJSValue()) return value;
- JSValue* wrapper = JSValue::cast(value);
- ASSERT(Isolate::Current()->context()->global_context()->number_function()->
- has_initial_map());
- Map* number_map = Isolate::Current()->context()->global_context()->
- number_function()->initial_map();
- if (wrapper->map() == number_map) return wrapper->value();
- return value;
-}
-
-
-MaybeObject* Accessors::ArraySetLength(JSObject* object, Object* value, void*) {
- Isolate* isolate = object->GetIsolate();
- value = FlattenNumber(value);
-
- // Need to call methods that may trigger GC.
- HandleScope scope(isolate);
-
- // Protect raw pointers.
- Handle<JSObject> object_handle(object, isolate);
- Handle<Object> value_handle(value, isolate);
-
- bool has_exception;
- Handle<Object> uint32_v = Execution::ToUint32(value_handle, &has_exception);
- if (has_exception) return Failure::Exception();
- Handle<Object> number_v = Execution::ToNumber(value_handle, &has_exception);
- if (has_exception) return Failure::Exception();
-
- // Restore raw pointers,
- object = *object_handle;
- value = *value_handle;
-
- if (uint32_v->Number() == number_v->Number()) {
- if (object->IsJSArray()) {
- return JSArray::cast(object)->SetElementsLength(*uint32_v);
- } else {
- // This means one of the object's prototypes is a JSArray and
- // the object does not have a 'length' property.
- // Calling SetProperty causes an infinite loop.
- return object->SetLocalPropertyIgnoreAttributes(
- isolate->heap()->length_symbol(), value, NONE);
- }
- }
- return isolate->Throw(
- *isolate->factory()->NewRangeError("invalid_array_length",
- HandleVector<Object>(NULL, 0)));
-}
-
-
-const AccessorDescriptor Accessors::ArrayLength = {
- ArrayGetLength,
- ArraySetLength,
- 0
-};
-
-
-//
-// Accessors::StringLength
-//
-
-
-MaybeObject* Accessors::StringGetLength(Object* object, void*) {
- Object* value = object;
- if (object->IsJSValue()) value = JSValue::cast(object)->value();
- if (value->IsString()) return Smi::FromInt(String::cast(value)->length());
- // If object is not a string we return 0 to be compatible with WebKit.
- // Note: Firefox returns the length of ToString(object).
- return Smi::FromInt(0);
-}
-
-
-const AccessorDescriptor Accessors::StringLength = {
- StringGetLength,
- IllegalSetter,
- 0
-};
-
-
-//
-// Accessors::ScriptSource
-//
-
-
-MaybeObject* Accessors::ScriptGetSource(Object* object, void*) {
- Object* script = JSValue::cast(object)->value();
- return Script::cast(script)->source();
-}
-
-
-const AccessorDescriptor Accessors::ScriptSource = {
- ScriptGetSource,
- IllegalSetter,
- 0
-};
-
-
-//
-// Accessors::ScriptName
-//
-
-
-MaybeObject* Accessors::ScriptGetName(Object* object, void*) {
- Object* script = JSValue::cast(object)->value();
- return Script::cast(script)->name();
-}
-
-
-const AccessorDescriptor Accessors::ScriptName = {
- ScriptGetName,
- IllegalSetter,
- 0
-};
-
-
-//
-// Accessors::ScriptId
-//
-
-
-MaybeObject* Accessors::ScriptGetId(Object* object, void*) {
- Object* script = JSValue::cast(object)->value();
- return Script::cast(script)->id();
-}
-
-
-const AccessorDescriptor Accessors::ScriptId = {
- ScriptGetId,
- IllegalSetter,
- 0
-};
-
-
-//
-// Accessors::ScriptLineOffset
-//
-
-
-MaybeObject* Accessors::ScriptGetLineOffset(Object* object, void*) {
- Object* script = JSValue::cast(object)->value();
- return Script::cast(script)->line_offset();
-}
-
-
-const AccessorDescriptor Accessors::ScriptLineOffset = {
- ScriptGetLineOffset,
- IllegalSetter,
- 0
-};
-
-
-//
-// Accessors::ScriptColumnOffset
-//
-
-
-MaybeObject* Accessors::ScriptGetColumnOffset(Object* object, void*) {
- Object* script = JSValue::cast(object)->value();
- return Script::cast(script)->column_offset();
-}
-
-
-const AccessorDescriptor Accessors::ScriptColumnOffset = {
- ScriptGetColumnOffset,
- IllegalSetter,
- 0
-};
-
-
-//
-// Accessors::ScriptData
-//
-
-
-MaybeObject* Accessors::ScriptGetData(Object* object, void*) {
- Object* script = JSValue::cast(object)->value();
- return Script::cast(script)->data();
-}
-
-
-const AccessorDescriptor Accessors::ScriptData = {
- ScriptGetData,
- IllegalSetter,
- 0
-};
-
-
-//
-// Accessors::ScriptType
-//
-
-
-MaybeObject* Accessors::ScriptGetType(Object* object, void*) {
- Object* script = JSValue::cast(object)->value();
- return Script::cast(script)->type();
-}
-
-
-const AccessorDescriptor Accessors::ScriptType = {
- ScriptGetType,
- IllegalSetter,
- 0
-};
-
-
-//
-// Accessors::ScriptCompilationType
-//
-
-
-MaybeObject* Accessors::ScriptGetCompilationType(Object* object, void*) {
- Object* script = JSValue::cast(object)->value();
- return Script::cast(script)->compilation_type();
-}
-
-
-const AccessorDescriptor Accessors::ScriptCompilationType = {
- ScriptGetCompilationType,
- IllegalSetter,
- 0
-};
-
-
-//
-// Accessors::ScriptGetLineEnds
-//
-
-
-MaybeObject* Accessors::ScriptGetLineEnds(Object* object, void*) {
- JSValue* wrapper = JSValue::cast(object);
- Isolate* isolate = wrapper->GetIsolate();
- HandleScope scope(isolate);
- Handle<Script> script(Script::cast(wrapper->value()), isolate);
- InitScriptLineEnds(script);
- ASSERT(script->line_ends()->IsFixedArray());
- Handle<FixedArray> line_ends(FixedArray::cast(script->line_ends()));
- // We do not want anyone to modify this array from JS.
- ASSERT(*line_ends == isolate->heap()->empty_fixed_array() ||
- line_ends->map() == isolate->heap()->fixed_cow_array_map());
- Handle<JSArray> js_array =
- isolate->factory()->NewJSArrayWithElements(line_ends);
- return *js_array;
-}
-
-
-const AccessorDescriptor Accessors::ScriptLineEnds = {
- ScriptGetLineEnds,
- IllegalSetter,
- 0
-};
-
-
-//
-// Accessors::ScriptGetContextData
-//
-
-
-MaybeObject* Accessors::ScriptGetContextData(Object* object, void*) {
- Object* script = JSValue::cast(object)->value();
- return Script::cast(script)->context_data();
-}
-
-
-const AccessorDescriptor Accessors::ScriptContextData = {
- ScriptGetContextData,
- IllegalSetter,
- 0
-};
-
-
-//
-// Accessors::ScriptGetEvalFromScript
-//
-
-
-MaybeObject* Accessors::ScriptGetEvalFromScript(Object* object, void*) {
- Object* script = JSValue::cast(object)->value();
- if (!Script::cast(script)->eval_from_shared()->IsUndefined()) {
- Handle<SharedFunctionInfo> eval_from_shared(
- SharedFunctionInfo::cast(Script::cast(script)->eval_from_shared()));
-
- if (eval_from_shared->script()->IsScript()) {
- Handle<Script> eval_from_script(Script::cast(eval_from_shared->script()));
- return *GetScriptWrapper(eval_from_script);
- }
- }
- return HEAP->undefined_value();
-}
-
-
-const AccessorDescriptor Accessors::ScriptEvalFromScript = {
- ScriptGetEvalFromScript,
- IllegalSetter,
- 0
-};
-
-
-//
-// Accessors::ScriptGetEvalFromScriptPosition
-//
-
-
-MaybeObject* Accessors::ScriptGetEvalFromScriptPosition(Object* object, void*) {
- HandleScope scope;
- Handle<Script> script(Script::cast(JSValue::cast(object)->value()));
-
- // If this is not a script compiled through eval there is no eval position.
- int compilation_type = Smi::cast(script->compilation_type())->value();
- if (compilation_type != Script::COMPILATION_TYPE_EVAL) {
- return HEAP->undefined_value();
- }
-
- // Get the function from where eval was called and find the source position
- // from the instruction offset.
- Handle<Code> code(SharedFunctionInfo::cast(
- script->eval_from_shared())->code());
- return Smi::FromInt(code->SourcePosition(code->instruction_start() +
- script->eval_from_instructions_offset()->value()));
-}
-
-
-const AccessorDescriptor Accessors::ScriptEvalFromScriptPosition = {
- ScriptGetEvalFromScriptPosition,
- IllegalSetter,
- 0
-};
-
-
-//
-// Accessors::ScriptGetEvalFromFunctionName
-//
-
-
-MaybeObject* Accessors::ScriptGetEvalFromFunctionName(Object* object, void*) {
- Object* script = JSValue::cast(object)->value();
- Handle<SharedFunctionInfo> shared(SharedFunctionInfo::cast(
- Script::cast(script)->eval_from_shared()));
-
-
- // Find the name of the function calling eval.
- if (!shared->name()->IsUndefined()) {
- return shared->name();
- } else {
- return shared->inferred_name();
- }
-}
-
-
-const AccessorDescriptor Accessors::ScriptEvalFromFunctionName = {
- ScriptGetEvalFromFunctionName,
- IllegalSetter,
- 0
-};
-
-
-//
-// Accessors::FunctionPrototype
-//
-
-
-MaybeObject* Accessors::FunctionGetPrototype(Object* object, void*) {
- Heap* heap = Isolate::Current()->heap();
- bool found_it = false;
- JSFunction* function = FindInPrototypeChain<JSFunction>(object, &found_it);
- if (!found_it) return heap->undefined_value();
- while (!function->should_have_prototype()) {
- found_it = false;
- function = FindInPrototypeChain<JSFunction>(object->GetPrototype(),
- &found_it);
- // There has to be one because we hit the getter.
- ASSERT(found_it);
- }
-
- if (!function->has_prototype()) {
- Object* prototype;
- { MaybeObject* maybe_prototype = heap->AllocateFunctionPrototype(function);
- if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
- }
- Object* result;
- { MaybeObject* maybe_result = function->SetPrototype(prototype);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- }
- return function->prototype();
-}
-
-
-MaybeObject* Accessors::FunctionSetPrototype(JSObject* object,
- Object* value,
- void*) {
- Heap* heap = object->GetHeap();
- bool found_it = false;
- JSFunction* function = FindInPrototypeChain<JSFunction>(object, &found_it);
- if (!found_it) return heap->undefined_value();
- if (!function->should_have_prototype()) {
- // Since we hit this accessor, object will have no prototype property.
- return object->SetLocalPropertyIgnoreAttributes(heap->prototype_symbol(),
- value,
- NONE);
- }
-
- if (function->has_initial_map()) {
- // If the function has allocated the initial map
- // replace it with a copy containing the new prototype.
- Object* new_map;
- { MaybeObject* maybe_new_map =
- function->initial_map()->CopyDropTransitions();
- if (!maybe_new_map->ToObject(&new_map)) return maybe_new_map;
- }
- function->set_initial_map(Map::cast(new_map));
- }
- Object* prototype;
- { MaybeObject* maybe_prototype = function->SetPrototype(value);
- if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
- }
- ASSERT(function->prototype() == value);
- return function;
-}
-
-
-const AccessorDescriptor Accessors::FunctionPrototype = {
- FunctionGetPrototype,
- FunctionSetPrototype,
- 0
-};
-
-
-//
-// Accessors::FunctionLength
-//
-
-
-MaybeObject* Accessors::FunctionGetLength(Object* object, void*) {
- bool found_it = false;
- JSFunction* function = FindInPrototypeChain<JSFunction>(object, &found_it);
- if (!found_it) return Smi::FromInt(0);
- // Check if already compiled.
- if (!function->shared()->is_compiled()) {
- // If the function isn't compiled yet, the length is not computed
- // correctly yet. Compile it now and return the right length.
- HandleScope scope;
- Handle<JSFunction> handle(function);
- if (!CompileLazy(handle, KEEP_EXCEPTION)) return Failure::Exception();
- return Smi::FromInt(handle->shared()->length());
- } else {
- return Smi::FromInt(function->shared()->length());
- }
-}
-
-
-const AccessorDescriptor Accessors::FunctionLength = {
- FunctionGetLength,
- ReadOnlySetAccessor,
- 0
-};
-
-
-//
-// Accessors::FunctionName
-//
-
-
-MaybeObject* Accessors::FunctionGetName(Object* object, void*) {
- bool found_it = false;
- JSFunction* holder = FindInPrototypeChain<JSFunction>(object, &found_it);
- if (!found_it) return HEAP->undefined_value();
- return holder->shared()->name();
-}
-
-
-const AccessorDescriptor Accessors::FunctionName = {
- FunctionGetName,
- ReadOnlySetAccessor,
- 0
-};
-
-
-//
-// Accessors::FunctionArguments
-//
-
-
-static MaybeObject* ConstructArgumentsObjectForInlinedFunction(
- JavaScriptFrame* frame,
- Handle<JSFunction> inlined_function,
- int inlined_frame_index) {
- Factory* factory = Isolate::Current()->factory();
- int args_count = inlined_function->shared()->formal_parameter_count();
- ScopedVector<SlotRef> args_slots(args_count);
- SlotRef::ComputeSlotMappingForArguments(frame,
- inlined_frame_index,
- &args_slots);
- Handle<JSObject> arguments =
- factory->NewArgumentsObject(inlined_function, args_count);
- Handle<FixedArray> array = factory->NewFixedArray(args_count);
- for (int i = 0; i < args_count; ++i) {
- Handle<Object> value = args_slots[i].GetValue();
- array->set(i, *value);
- }
- arguments->set_elements(*array);
-
- // Return the freshly allocated arguments object.
- return *arguments;
-}
-
-
-MaybeObject* Accessors::FunctionGetArguments(Object* object, void*) {
- Isolate* isolate = Isolate::Current();
- HandleScope scope(isolate);
- bool found_it = false;
- JSFunction* holder = FindInPrototypeChain<JSFunction>(object, &found_it);
- if (!found_it) return isolate->heap()->undefined_value();
- Handle<JSFunction> function(holder, isolate);
-
- // Find the top invocation of the function by traversing frames.
- List<JSFunction*> functions(2);
- for (JavaScriptFrameIterator it(isolate); !it.done(); it.Advance()) {
- JavaScriptFrame* frame = it.frame();
- frame->GetFunctions(&functions);
- for (int i = functions.length() - 1; i >= 0; i--) {
- // Skip all frames that aren't invocations of the given function.
- if (functions[i] != *function) continue;
-
- if (i > 0) {
- // The function in question was inlined. Inlined functions have the
- // correct number of arguments and no allocated arguments object, so
- // we can construct a fresh one by interpreting the function's
- // deoptimization input data.
- return ConstructArgumentsObjectForInlinedFunction(frame, function, i);
- }
-
- if (!frame->is_optimized()) {
- // If there is an arguments variable in the stack, we return that.
- Handle<SerializedScopeInfo> info(function->shared()->scope_info());
- int index = info->StackSlotIndex(isolate->heap()->arguments_symbol());
- if (index >= 0) {
- Handle<Object> arguments(frame->GetExpression(index), isolate);
- if (!arguments->IsArgumentsMarker()) return *arguments;
- }
- }
-
- // If there is no arguments variable in the stack or we have an
- // optimized frame, we find the frame that holds the actual arguments
- // passed to the function.
- it.AdvanceToArgumentsFrame();
- frame = it.frame();
-
- // Get the number of arguments and construct an arguments object
- // mirror for the right frame.
- const int length = frame->ComputeParametersCount();
- Handle<JSObject> arguments = isolate->factory()->NewArgumentsObject(
- function, length);
- Handle<FixedArray> array = isolate->factory()->NewFixedArray(length);
-
- // Copy the parameters to the arguments object.
- ASSERT(array->length() == length);
- for (int i = 0; i < length; i++) array->set(i, frame->GetParameter(i));
- arguments->set_elements(*array);
-
- // Return the freshly allocated arguments object.
- return *arguments;
- }
- functions.Rewind(0);
- }
-
- // No frame corresponding to the given function found. Return null.
- return isolate->heap()->null_value();
-}
-
-
-const AccessorDescriptor Accessors::FunctionArguments = {
- FunctionGetArguments,
- ReadOnlySetAccessor,
- 0
-};
-
-
-//
-// Accessors::FunctionCaller
-//
-
-
-static MaybeObject* CheckNonStrictCallerOrThrow(
- Isolate* isolate,
- JSFunction* caller) {
- DisableAssertNoAllocation enable_allocation;
- if (caller->shared()->strict_mode()) {
- return isolate->Throw(
- *isolate->factory()->NewTypeError("strict_caller",
- HandleVector<Object>(NULL, 0)));
- }
- return caller;
-}
-
-
-MaybeObject* Accessors::FunctionGetCaller(Object* object, void*) {
- Isolate* isolate = Isolate::Current();
- HandleScope scope(isolate);
- AssertNoAllocation no_alloc;
- bool found_it = false;
- JSFunction* holder = FindInPrototypeChain<JSFunction>(object, &found_it);
- if (!found_it) return isolate->heap()->undefined_value();
- Handle<JSFunction> function(holder, isolate);
-
- List<JSFunction*> functions(2);
- for (JavaScriptFrameIterator it(isolate); !it.done(); it.Advance()) {
- JavaScriptFrame* frame = it.frame();
- frame->GetFunctions(&functions);
- for (int i = functions.length() - 1; i >= 0; i--) {
- if (functions[i] == *function) {
- // Once we have found the frame, we need to go to the caller
- // frame. This may require skipping through a number of top-level
- // frames, e.g. frames for scripts not functions.
- if (i > 0) {
- ASSERT(!functions[i - 1]->shared()->is_toplevel());
- return CheckNonStrictCallerOrThrow(isolate, functions[i - 1]);
- } else {
- for (it.Advance(); !it.done(); it.Advance()) {
- frame = it.frame();
- functions.Rewind(0);
- frame->GetFunctions(&functions);
- if (!functions.last()->shared()->is_toplevel()) {
- return CheckNonStrictCallerOrThrow(isolate, functions.last());
- }
- ASSERT(functions.length() == 1);
- }
- if (it.done()) return isolate->heap()->null_value();
- break;
- }
- }
- }
- functions.Rewind(0);
- }
-
- // No frame corresponding to the given function found. Return null.
- return isolate->heap()->null_value();
-}
-
-
-const AccessorDescriptor Accessors::FunctionCaller = {
- FunctionGetCaller,
- ReadOnlySetAccessor,
- 0
-};
-
-
-//
-// Accessors::ObjectPrototype
-//
-
-
-MaybeObject* Accessors::ObjectGetPrototype(Object* receiver, void*) {
- Object* current = receiver->GetPrototype();
- while (current->IsJSObject() &&
- JSObject::cast(current)->map()->is_hidden_prototype()) {
- current = current->GetPrototype();
- }
- return current;
-}
-
-
-MaybeObject* Accessors::ObjectSetPrototype(JSObject* receiver,
- Object* value,
- void*) {
- const bool skip_hidden_prototypes = true;
- // To be consistent with other Set functions, return the value.
- return receiver->SetPrototype(value, skip_hidden_prototypes);
-}
-
-
-const AccessorDescriptor Accessors::ObjectPrototype = {
- ObjectGetPrototype,
- ObjectSetPrototype,
- 0
-};
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/accessors.h b/src/3rdparty/v8/src/accessors.h
deleted file mode 100644
index 14ccc8f..0000000
--- a/src/3rdparty/v8/src/accessors.h
+++ /dev/null
@@ -1,121 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_ACCESSORS_H_
-#define V8_ACCESSORS_H_
-
-namespace v8 {
-namespace internal {
-
-// The list of accessor descriptors. This is a second-order macro
-// taking a macro to be applied to all accessor descriptor names.
-#define ACCESSOR_DESCRIPTOR_LIST(V) \
- V(FunctionPrototype) \
- V(FunctionLength) \
- V(FunctionName) \
- V(FunctionArguments) \
- V(FunctionCaller) \
- V(ArrayLength) \
- V(StringLength) \
- V(ScriptSource) \
- V(ScriptName) \
- V(ScriptId) \
- V(ScriptLineOffset) \
- V(ScriptColumnOffset) \
- V(ScriptData) \
- V(ScriptType) \
- V(ScriptCompilationType) \
- V(ScriptLineEnds) \
- V(ScriptContextData) \
- V(ScriptEvalFromScript) \
- V(ScriptEvalFromScriptPosition) \
- V(ScriptEvalFromFunctionName) \
- V(ObjectPrototype)
-
-// Accessors contains all predefined proxy accessors.
-
-class Accessors : public AllStatic {
- public:
- // Accessor descriptors.
-#define ACCESSOR_DESCRIPTOR_DECLARATION(name) \
- static const AccessorDescriptor name;
- ACCESSOR_DESCRIPTOR_LIST(ACCESSOR_DESCRIPTOR_DECLARATION)
-#undef ACCESSOR_DESCRIPTOR_DECLARATION
-
- enum DescriptorId {
-#define ACCESSOR_DESCRIPTOR_DECLARATION(name) \
- k##name,
- ACCESSOR_DESCRIPTOR_LIST(ACCESSOR_DESCRIPTOR_DECLARATION)
-#undef ACCESSOR_DESCRIPTOR_DECLARATION
- descriptorCount
- };
-
- // Accessor functions called directly from the runtime system.
- MUST_USE_RESULT static MaybeObject* FunctionGetPrototype(Object* object,
- void*);
- MUST_USE_RESULT static MaybeObject* FunctionSetPrototype(JSObject* object,
- Object* value,
- void*);
- static MaybeObject* FunctionGetArguments(Object* object, void*);
-
- private:
- // Accessor functions only used through the descriptor.
- static MaybeObject* FunctionGetLength(Object* object, void*);
- static MaybeObject* FunctionGetName(Object* object, void*);
- static MaybeObject* FunctionGetCaller(Object* object, void*);
- MUST_USE_RESULT static MaybeObject* ArraySetLength(JSObject* object,
- Object* value, void*);
- static MaybeObject* ArrayGetLength(Object* object, void*);
- static MaybeObject* StringGetLength(Object* object, void*);
- static MaybeObject* ScriptGetName(Object* object, void*);
- static MaybeObject* ScriptGetId(Object* object, void*);
- static MaybeObject* ScriptGetSource(Object* object, void*);
- static MaybeObject* ScriptGetLineOffset(Object* object, void*);
- static MaybeObject* ScriptGetColumnOffset(Object* object, void*);
- static MaybeObject* ScriptGetData(Object* object, void*);
- static MaybeObject* ScriptGetType(Object* object, void*);
- static MaybeObject* ScriptGetCompilationType(Object* object, void*);
- static MaybeObject* ScriptGetLineEnds(Object* object, void*);
- static MaybeObject* ScriptGetContextData(Object* object, void*);
- static MaybeObject* ScriptGetEvalFromScript(Object* object, void*);
- static MaybeObject* ScriptGetEvalFromScriptPosition(Object* object, void*);
- static MaybeObject* ScriptGetEvalFromFunctionName(Object* object, void*);
- static MaybeObject* ObjectGetPrototype(Object* receiver, void*);
- static MaybeObject* ObjectSetPrototype(JSObject* receiver,
- Object* value,
- void*);
-
- // Helper functions.
- static Object* FlattenNumber(Object* value);
- static MaybeObject* IllegalSetter(JSObject*, Object*, void*);
- static Object* IllegalGetAccessor(Object* object, void*);
- static MaybeObject* ReadOnlySetAccessor(JSObject*, Object* value, void*);
-};
-
-} } // namespace v8::internal
-
-#endif // V8_ACCESSORS_H_
diff --git a/src/3rdparty/v8/src/allocation-inl.h b/src/3rdparty/v8/src/allocation-inl.h
deleted file mode 100644
index 04a3fe6..0000000
--- a/src/3rdparty/v8/src/allocation-inl.h
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_ALLOCATION_INL_H_
-#define V8_ALLOCATION_INL_H_
-
-#include "allocation.h"
-
-namespace v8 {
-namespace internal {
-
-
-void* PreallocatedStorage::New(size_t size) {
- return Isolate::Current()->PreallocatedStorageNew(size);
-}
-
-
-void PreallocatedStorage::Delete(void* p) {
- return Isolate::Current()->PreallocatedStorageDelete(p);
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_ALLOCATION_INL_H_
diff --git a/src/3rdparty/v8/src/allocation.cc b/src/3rdparty/v8/src/allocation.cc
deleted file mode 100644
index 119b087..0000000
--- a/src/3rdparty/v8/src/allocation.cc
+++ /dev/null
@@ -1,122 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "../include/v8stdint.h"
-#include "globals.h"
-#include "checks.h"
-#include "allocation.h"
-#include "utils.h"
-
-namespace v8 {
-namespace internal {
-
-void* Malloced::New(size_t size) {
- void* result = malloc(size);
- if (result == NULL) {
- v8::internal::FatalProcessOutOfMemory("Malloced operator new");
- }
- return result;
-}
-
-
-void Malloced::Delete(void* p) {
- free(p);
-}
-
-
-void Malloced::FatalProcessOutOfMemory() {
- v8::internal::FatalProcessOutOfMemory("Out of memory");
-}
-
-
-#ifdef DEBUG
-
-static void* invalid = static_cast<void*>(NULL);
-
-void* Embedded::operator new(size_t size) {
- UNREACHABLE();
- return invalid;
-}
-
-
-void Embedded::operator delete(void* p) {
- UNREACHABLE();
-}
-
-
-void* AllStatic::operator new(size_t size) {
- UNREACHABLE();
- return invalid;
-}
-
-
-void AllStatic::operator delete(void* p) {
- UNREACHABLE();
-}
-
-#endif
-
-
-char* StrDup(const char* str) {
- int length = StrLength(str);
- char* result = NewArray<char>(length + 1);
- memcpy(result, str, length);
- result[length] = '\0';
- return result;
-}
-
-
-char* StrNDup(const char* str, int n) {
- int length = StrLength(str);
- if (n < length) length = n;
- char* result = NewArray<char>(length + 1);
- memcpy(result, str, length);
- result[length] = '\0';
- return result;
-}
-
-
-void PreallocatedStorage::LinkTo(PreallocatedStorage* other) {
- next_ = other->next_;
- other->next_->previous_ = this;
- previous_ = other;
- other->next_ = this;
-}
-
-
-void PreallocatedStorage::Unlink() {
- next_->previous_ = previous_;
- previous_->next_ = next_;
-}
-
-
-PreallocatedStorage::PreallocatedStorage(size_t size)
- : size_(size) {
- previous_ = next_ = this;
-}
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/allocation.h b/src/3rdparty/v8/src/allocation.h
deleted file mode 100644
index 75aba35..0000000
--- a/src/3rdparty/v8/src/allocation.h
+++ /dev/null
@@ -1,143 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_ALLOCATION_H_
-#define V8_ALLOCATION_H_
-
-#include "checks.h"
-#include "globals.h"
-
-namespace v8 {
-namespace internal {
-
-// Called when allocation routines fail to allocate.
-// This function should not return, but should terminate the current
-// processing.
-void FatalProcessOutOfMemory(const char* message);
-
-// Superclass for classes managed with new & delete.
-class Malloced {
- public:
- void* operator new(size_t size) { return New(size); }
- void operator delete(void* p) { Delete(p); }
-
- static void FatalProcessOutOfMemory();
- static void* New(size_t size);
- static void Delete(void* p);
-};
-
-
-// A macro is used for defining the base class used for embedded instances.
-// The reason is some compilers allocate a minimum of one word for the
-// superclass. The macro prevents the use of new & delete in debug mode.
-// In release mode we are not willing to pay this overhead.
-
-#ifdef DEBUG
-// Superclass for classes with instances allocated inside stack
-// activations or inside other objects.
-class Embedded {
- public:
- void* operator new(size_t size);
- void operator delete(void* p);
-};
-#define BASE_EMBEDDED : public Embedded
-#else
-#define BASE_EMBEDDED
-#endif
-
-
-// Superclass for classes only using statics.
-class AllStatic {
-#ifdef DEBUG
- public:
- void* operator new(size_t size);
- void operator delete(void* p);
-#endif
-};
-
-
-template <typename T>
-static T* NewArray(int size) {
- T* result = new T[size];
- if (result == NULL) Malloced::FatalProcessOutOfMemory();
- return result;
-}
-
-
-template <typename T>
-static void DeleteArray(T* array) {
- delete[] array;
-}
-
-
-// The normal strdup functions use malloc. These versions of StrDup
-// and StrNDup uses new and calls the FatalProcessOutOfMemory handler
-// if allocation fails.
-char* StrDup(const char* str);
-char* StrNDup(const char* str, int n);
-
-
-// Allocation policy for allocating in the C free store using malloc
-// and free. Used as the default policy for lists.
-class FreeStoreAllocationPolicy {
- public:
- INLINE(static void* New(size_t size)) { return Malloced::New(size); }
- INLINE(static void Delete(void* p)) { Malloced::Delete(p); }
-};
-
-
-// Allocation policy for allocating in preallocated space.
-// Used as an allocation policy for ScopeInfo when generating
-// stack traces.
-class PreallocatedStorage {
- public:
- explicit PreallocatedStorage(size_t size);
- size_t size() { return size_; }
-
- // TODO(isolates): Get rid of these-- we'll have to change the allocator
- // interface to include a pointer to an isolate to do this
- // efficiently.
- static inline void* New(size_t size);
- static inline void Delete(void* p);
-
- private:
- size_t size_;
- PreallocatedStorage* previous_;
- PreallocatedStorage* next_;
-
- void LinkTo(PreallocatedStorage* other);
- void Unlink();
-
- friend class Isolate;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(PreallocatedStorage);
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_ALLOCATION_H_
diff --git a/src/3rdparty/v8/src/api.cc b/src/3rdparty/v8/src/api.cc
deleted file mode 100644
index ad39da6..0000000
--- a/src/3rdparty/v8/src/api.cc
+++ /dev/null
@@ -1,5952 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "api.h"
-
-#include "arguments.h"
-#include "bootstrapper.h"
-#include "compiler.h"
-#include "debug.h"
-#include "deoptimizer.h"
-#include "execution.h"
-#include "global-handles.h"
-#include "heap-profiler.h"
-#include "messages.h"
-#include "parser.h"
-#include "platform.h"
-#include "profile-generator-inl.h"
-#include "runtime-profiler.h"
-#include "serialize.h"
-#include "snapshot.h"
-#include "v8threads.h"
-#include "version.h"
-#include "vm-state-inl.h"
-
-#include "../include/v8-profiler.h"
-#include "../include/v8-testing.h"
-
-#define LOG_API(isolate, expr) LOG(isolate, ApiEntryCall(expr))
-
-// TODO(isolates): avoid repeated TLS reads in function prologues.
-#ifdef ENABLE_VMSTATE_TRACKING
-#define ENTER_V8(isolate) \
- ASSERT((isolate)->IsInitialized()); \
- i::VMState __state__((isolate), i::OTHER)
-#define LEAVE_V8(isolate) \
- i::VMState __state__((isolate), i::EXTERNAL)
-#else
-#define ENTER_V8(isolate) ((void) 0)
-#define LEAVE_V8(isolate) ((void) 0)
-#endif
-
-namespace v8 {
-
-#define ON_BAILOUT(isolate, location, code) \
- if (IsDeadCheck(isolate, location) || \
- IsExecutionTerminatingCheck(isolate)) { \
- code; \
- UNREACHABLE(); \
- }
-
-
-#define EXCEPTION_PREAMBLE(isolate) \
- (isolate)->handle_scope_implementer()->IncrementCallDepth(); \
- ASSERT(!(isolate)->external_caught_exception()); \
- bool has_pending_exception = false
-
-
-#define EXCEPTION_BAILOUT_CHECK(isolate, value) \
- do { \
- i::HandleScopeImplementer* handle_scope_implementer = \
- (isolate)->handle_scope_implementer(); \
- handle_scope_implementer->DecrementCallDepth(); \
- if (has_pending_exception) { \
- if (handle_scope_implementer->CallDepthIsZero() && \
- (isolate)->is_out_of_memory()) { \
- if (!handle_scope_implementer->ignore_out_of_memory()) \
- i::V8::FatalProcessOutOfMemory(NULL); \
- } \
- bool call_depth_is_zero = handle_scope_implementer->CallDepthIsZero(); \
- (isolate)->OptionalRescheduleException(call_depth_is_zero); \
- return value; \
- } \
- } while (false)
-
-// TODO(isolates): Add a parameter to this macro for an isolate.
-
-#define API_ENTRY_CHECK(msg) \
- do { \
- if (v8::Locker::IsActive()) { \
- ApiCheck(i::Isolate::Current()->thread_manager()-> \
- IsLockedByCurrentThread(), \
- msg, \
- "Entering the V8 API without proper locking in place"); \
- } \
- } while (false)
-
-
-// --- E x c e p t i o n B e h a v i o r ---
-
-
-static void DefaultFatalErrorHandler(const char* location,
- const char* message) {
-#ifdef ENABLE_VMSTATE_TRACKING
- i::VMState __state__(i::Isolate::Current(), i::OTHER);
-#endif
- API_Fatal(location, message);
-}
-
-
-static FatalErrorCallback GetFatalErrorHandler() {
- i::Isolate* isolate = i::Isolate::Current();
- if (isolate->exception_behavior() == NULL) {
- isolate->set_exception_behavior(DefaultFatalErrorHandler);
- }
- return isolate->exception_behavior();
-}
-
-
-void i::FatalProcessOutOfMemory(const char* location) {
- i::V8::FatalProcessOutOfMemory(location, false);
-}
-
-
-// When V8 cannot allocated memory FatalProcessOutOfMemory is called.
-// The default fatal error handler is called and execution is stopped.
-void i::V8::FatalProcessOutOfMemory(const char* location, bool take_snapshot) {
- i::HeapStats heap_stats;
- int start_marker;
- heap_stats.start_marker = &start_marker;
- int new_space_size;
- heap_stats.new_space_size = &new_space_size;
- int new_space_capacity;
- heap_stats.new_space_capacity = &new_space_capacity;
- intptr_t old_pointer_space_size;
- heap_stats.old_pointer_space_size = &old_pointer_space_size;
- intptr_t old_pointer_space_capacity;
- heap_stats.old_pointer_space_capacity = &old_pointer_space_capacity;
- intptr_t old_data_space_size;
- heap_stats.old_data_space_size = &old_data_space_size;
- intptr_t old_data_space_capacity;
- heap_stats.old_data_space_capacity = &old_data_space_capacity;
- intptr_t code_space_size;
- heap_stats.code_space_size = &code_space_size;
- intptr_t code_space_capacity;
- heap_stats.code_space_capacity = &code_space_capacity;
- intptr_t map_space_size;
- heap_stats.map_space_size = &map_space_size;
- intptr_t map_space_capacity;
- heap_stats.map_space_capacity = &map_space_capacity;
- intptr_t cell_space_size;
- heap_stats.cell_space_size = &cell_space_size;
- intptr_t cell_space_capacity;
- heap_stats.cell_space_capacity = &cell_space_capacity;
- intptr_t lo_space_size;
- heap_stats.lo_space_size = &lo_space_size;
- int global_handle_count;
- heap_stats.global_handle_count = &global_handle_count;
- int weak_global_handle_count;
- heap_stats.weak_global_handle_count = &weak_global_handle_count;
- int pending_global_handle_count;
- heap_stats.pending_global_handle_count = &pending_global_handle_count;
- int near_death_global_handle_count;
- heap_stats.near_death_global_handle_count = &near_death_global_handle_count;
- int destroyed_global_handle_count;
- heap_stats.destroyed_global_handle_count = &destroyed_global_handle_count;
- intptr_t memory_allocator_size;
- heap_stats.memory_allocator_size = &memory_allocator_size;
- intptr_t memory_allocator_capacity;
- heap_stats.memory_allocator_capacity = &memory_allocator_capacity;
- int objects_per_type[LAST_TYPE + 1] = {0};
- heap_stats.objects_per_type = objects_per_type;
- int size_per_type[LAST_TYPE + 1] = {0};
- heap_stats.size_per_type = size_per_type;
- int os_error;
- heap_stats.os_error = &os_error;
- int end_marker;
- heap_stats.end_marker = &end_marker;
- i::Isolate* isolate = i::Isolate::Current();
- isolate->heap()->RecordStats(&heap_stats, take_snapshot);
- i::V8::SetFatalError();
- FatalErrorCallback callback = GetFatalErrorHandler();
- {
- LEAVE_V8(isolate);
- callback(location, "Allocation failed - process out of memory");
- }
- // If the callback returns, we stop execution.
- UNREACHABLE();
-}
-
-
-bool Utils::ReportApiFailure(const char* location, const char* message) {
- FatalErrorCallback callback = GetFatalErrorHandler();
- callback(location, message);
- i::V8::SetFatalError();
- return false;
-}
-
-
-bool V8::IsDead() {
- return i::V8::IsDead();
-}
-
-
-static inline bool ApiCheck(bool condition,
- const char* location,
- const char* message) {
- return condition ? true : Utils::ReportApiFailure(location, message);
-}
-
-
-static bool ReportV8Dead(const char* location) {
- FatalErrorCallback callback = GetFatalErrorHandler();
- callback(location, "V8 is no longer usable");
- return true;
-}
-
-
-static bool ReportEmptyHandle(const char* location) {
- FatalErrorCallback callback = GetFatalErrorHandler();
- callback(location, "Reading from empty handle");
- return true;
-}
-
-
-/**
- * IsDeadCheck checks that the vm is usable. If, for instance, the vm has been
- * out of memory at some point this check will fail. It should be called on
- * entry to all methods that touch anything in the heap, except destructors
- * which you sometimes can't avoid calling after the vm has crashed. Functions
- * that call EnsureInitialized or ON_BAILOUT don't have to also call
- * IsDeadCheck. ON_BAILOUT has the advantage over EnsureInitialized that you
- * can arrange to return if the VM is dead. This is needed to ensure that no VM
- * heap allocations are attempted on a dead VM. EnsureInitialized has the
- * advantage over ON_BAILOUT that it actually initializes the VM if this has not
- * yet been done.
- */
-static inline bool IsDeadCheck(i::Isolate* isolate, const char* location) {
- return !isolate->IsInitialized()
- && i::V8::IsDead() ? ReportV8Dead(location) : false;
-}
-
-
-static inline bool IsExecutionTerminatingCheck(i::Isolate* isolate) {
- if (!isolate->IsInitialized()) return false;
- if (isolate->has_scheduled_exception()) {
- return isolate->scheduled_exception() ==
- isolate->heap()->termination_exception();
- }
- return false;
-}
-
-
-static inline bool EmptyCheck(const char* location, v8::Handle<v8::Data> obj) {
- return obj.IsEmpty() ? ReportEmptyHandle(location) : false;
-}
-
-
-static inline bool EmptyCheck(const char* location, const v8::Data* obj) {
- return (obj == 0) ? ReportEmptyHandle(location) : false;
-}
-
-// --- S t a t i c s ---
-
-
-static bool InitializeHelper() {
- if (i::Snapshot::Initialize()) return true;
- return i::V8::Initialize(NULL);
-}
-
-
-static inline bool EnsureInitializedForIsolate(i::Isolate* isolate,
- const char* location) {
- if (IsDeadCheck(isolate, location)) return false;
- if (isolate != NULL) {
- if (isolate->IsInitialized()) return true;
- }
- return ApiCheck(InitializeHelper(), location, "Error initializing V8");
-}
-
-// Some initializing API functions are called early and may be
-// called on a thread different from static initializer thread.
-// If Isolate API is used, Isolate::Enter() will initialize TLS so
-// Isolate::Current() works. If it's a legacy case, then the thread
-// may not have TLS initialized yet. However, in initializing APIs it
-// may be too early to call EnsureInitialized() - some pre-init
-// parameters still have to be configured.
-static inline i::Isolate* EnterIsolateIfNeeded() {
- i::Isolate* isolate = i::Isolate::UncheckedCurrent();
- if (isolate != NULL)
- return isolate;
-
- i::Isolate::EnterDefaultIsolate();
- isolate = i::Isolate::Current();
- return isolate;
-}
-
-
-void V8::SetFatalErrorHandler(FatalErrorCallback that) {
- i::Isolate* isolate = EnterIsolateIfNeeded();
- isolate->set_exception_behavior(that);
-}
-
-
-#ifdef DEBUG
-void ImplementationUtilities::ZapHandleRange(i::Object** begin,
- i::Object** end) {
- i::HandleScope::ZapRange(begin, end);
-}
-#endif
-
-
-void V8::SetFlagsFromString(const char* str, int length) {
- i::FlagList::SetFlagsFromString(str, length);
-}
-
-
-void V8::SetFlagsFromCommandLine(int* argc, char** argv, bool remove_flags) {
- i::FlagList::SetFlagsFromCommandLine(argc, argv, remove_flags);
-}
-
-
-v8::Handle<Value> ThrowException(v8::Handle<v8::Value> value) {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::ThrowException()")) {
- return v8::Handle<Value>();
- }
- ENTER_V8(isolate);
- // If we're passed an empty handle, we throw an undefined exception
- // to deal more gracefully with out of memory situations.
- if (value.IsEmpty()) {
- isolate->ScheduleThrow(isolate->heap()->undefined_value());
- } else {
- isolate->ScheduleThrow(*Utils::OpenHandle(*value));
- }
- return v8::Undefined();
-}
-
-
-RegisteredExtension* RegisteredExtension::first_extension_ = NULL;
-
-
-RegisteredExtension::RegisteredExtension(Extension* extension)
- : extension_(extension), state_(UNVISITED) { }
-
-
-void RegisteredExtension::Register(RegisteredExtension* that) {
- that->next_ = first_extension_;
- first_extension_ = that;
-}
-
-
-void RegisterExtension(Extension* that) {
- RegisteredExtension* extension = new RegisteredExtension(that);
- RegisteredExtension::Register(extension);
-}
-
-
-Extension::Extension(const char* name,
- const char* source,
- int dep_count,
- const char** deps)
- : name_(name),
- source_(source),
- dep_count_(dep_count),
- deps_(deps),
- auto_enable_(false) { }
-
-
-v8::Handle<Primitive> Undefined() {
- i::Isolate* isolate = i::Isolate::Current();
- if (!EnsureInitializedForIsolate(isolate, "v8::Undefined()")) {
- return v8::Handle<v8::Primitive>();
- }
- return v8::Handle<Primitive>(ToApi<Primitive>(
- isolate->factory()->undefined_value()));
-}
-
-
-v8::Handle<Primitive> Null() {
- i::Isolate* isolate = i::Isolate::Current();
- if (!EnsureInitializedForIsolate(isolate, "v8::Null()")) {
- return v8::Handle<v8::Primitive>();
- }
- return v8::Handle<Primitive>(
- ToApi<Primitive>(isolate->factory()->null_value()));
-}
-
-
-v8::Handle<Boolean> True() {
- i::Isolate* isolate = i::Isolate::Current();
- if (!EnsureInitializedForIsolate(isolate, "v8::True()")) {
- return v8::Handle<Boolean>();
- }
- return v8::Handle<Boolean>(
- ToApi<Boolean>(isolate->factory()->true_value()));
-}
-
-
-v8::Handle<Boolean> False() {
- i::Isolate* isolate = i::Isolate::Current();
- if (!EnsureInitializedForIsolate(isolate, "v8::False()")) {
- return v8::Handle<Boolean>();
- }
- return v8::Handle<Boolean>(
- ToApi<Boolean>(isolate->factory()->false_value()));
-}
-
-
-ResourceConstraints::ResourceConstraints()
- : max_young_space_size_(0),
- max_old_space_size_(0),
- max_executable_size_(0),
- stack_limit_(NULL) { }
-
-
-bool SetResourceConstraints(ResourceConstraints* constraints) {
- i::Isolate* isolate = EnterIsolateIfNeeded();
-
- int young_space_size = constraints->max_young_space_size();
- int old_gen_size = constraints->max_old_space_size();
- int max_executable_size = constraints->max_executable_size();
- if (young_space_size != 0 || old_gen_size != 0 || max_executable_size != 0) {
- // After initialization it's too late to change Heap constraints.
- ASSERT(!isolate->IsInitialized());
- bool result = isolate->heap()->ConfigureHeap(young_space_size / 2,
- old_gen_size,
- max_executable_size);
- if (!result) return false;
- }
- if (constraints->stack_limit() != NULL) {
- uintptr_t limit = reinterpret_cast<uintptr_t>(constraints->stack_limit());
- isolate->stack_guard()->SetStackLimit(limit);
- }
- return true;
-}
-
-
-i::Object** V8::GlobalizeReference(i::Object** obj) {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "V8::Persistent::New")) return NULL;
- LOG_API(isolate, "Persistent::New");
- i::Handle<i::Object> result =
- isolate->global_handles()->Create(*obj);
- return result.location();
-}
-
-
-void V8::MakeWeak(i::Object** object, void* parameters,
- WeakReferenceCallback callback) {
- i::Isolate* isolate = i::Isolate::Current();
- LOG_API(isolate, "MakeWeak");
- isolate->global_handles()->MakeWeak(object, parameters,
- callback);
-}
-
-
-void V8::ClearWeak(i::Object** obj) {
- i::Isolate* isolate = i::Isolate::Current();
- LOG_API(isolate, "ClearWeak");
- isolate->global_handles()->ClearWeakness(obj);
-}
-
-
-bool V8::IsGlobalNearDeath(i::Object** obj) {
- i::Isolate* isolate = i::Isolate::Current();
- LOG_API(isolate, "IsGlobalNearDeath");
- if (!isolate->IsInitialized()) return false;
- return i::GlobalHandles::IsNearDeath(obj);
-}
-
-
-bool V8::IsGlobalWeak(i::Object** obj) {
- i::Isolate* isolate = i::Isolate::Current();
- LOG_API(isolate, "IsGlobalWeak");
- if (!isolate->IsInitialized()) return false;
- return i::GlobalHandles::IsWeak(obj);
-}
-
-
-void V8::DisposeGlobal(i::Object** obj) {
- i::Isolate* isolate = i::Isolate::Current();
- LOG_API(isolate, "DisposeGlobal");
- if (!isolate->IsInitialized()) return;
- isolate->global_handles()->Destroy(obj);
-}
-
-// --- H a n d l e s ---
-
-
-HandleScope::HandleScope() {
- API_ENTRY_CHECK("HandleScope::HandleScope");
- i::Isolate* isolate = i::Isolate::Current();
- v8::ImplementationUtilities::HandleScopeData* current =
- isolate->handle_scope_data();
- isolate_ = isolate;
- prev_next_ = current->next;
- prev_limit_ = current->limit;
- is_closed_ = false;
- current->level++;
-}
-
-
-HandleScope::~HandleScope() {
- if (!is_closed_) {
- Leave();
- }
-}
-
-
-void HandleScope::Leave() {
- ASSERT(isolate_ == i::Isolate::Current());
- v8::ImplementationUtilities::HandleScopeData* current =
- isolate_->handle_scope_data();
- current->level--;
- ASSERT(current->level >= 0);
- current->next = prev_next_;
- if (current->limit != prev_limit_) {
- current->limit = prev_limit_;
- i::HandleScope::DeleteExtensions(isolate_);
- }
-
-#ifdef DEBUG
- i::HandleScope::ZapRange(prev_next_, prev_limit_);
-#endif
-}
-
-
-int HandleScope::NumberOfHandles() {
- EnsureInitializedForIsolate(
- i::Isolate::Current(), "HandleScope::NumberOfHandles");
- return i::HandleScope::NumberOfHandles();
-}
-
-
-i::Object** HandleScope::CreateHandle(i::Object* value) {
- return i::HandleScope::CreateHandle(value, i::Isolate::Current());
-}
-
-
-i::Object** HandleScope::CreateHandle(i::HeapObject* value) {
- ASSERT(value->IsHeapObject());
- return reinterpret_cast<i::Object**>(
- i::HandleScope::CreateHandle(value, value->GetIsolate()));
-}
-
-
-void Context::Enter() {
- // TODO(isolates): Context should have a pointer to isolate.
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Context::Enter()")) return;
- ENTER_V8(isolate);
-
- i::Handle<i::Context> env = Utils::OpenHandle(this);
- isolate->handle_scope_implementer()->EnterContext(env);
-
- isolate->handle_scope_implementer()->SaveContext(isolate->context());
- isolate->set_context(*env);
-}
-
-
-void Context::Exit() {
- // TODO(isolates): Context should have a pointer to isolate.
- i::Isolate* isolate = i::Isolate::Current();
- if (!isolate->IsInitialized()) return;
-
- if (!ApiCheck(isolate->handle_scope_implementer()->LeaveLastContext(),
- "v8::Context::Exit()",
- "Cannot exit non-entered context")) {
- return;
- }
-
- // Content of 'last_context' could be NULL.
- i::Context* last_context =
- isolate->handle_scope_implementer()->RestoreContext();
- isolate->set_context(last_context);
-}
-
-
-void Context::SetData(v8::Handle<String> data) {
- // TODO(isolates): Context should have a pointer to isolate.
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Context::SetData()")) return;
- ENTER_V8(isolate);
- {
- i::HandleScope scope(isolate);
- i::Handle<i::Context> env = Utils::OpenHandle(this);
- i::Handle<i::Object> raw_data = Utils::OpenHandle(*data);
- ASSERT(env->IsGlobalContext());
- if (env->IsGlobalContext()) {
- env->set_data(*raw_data);
- }
- }
-}
-
-
-v8::Local<v8::Value> Context::GetData() {
- // TODO(isolates): Context should have a pointer to isolate.
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Context::GetData()")) {
- return v8::Local<Value>();
- }
- ENTER_V8(isolate);
- i::Object* raw_result = NULL;
- {
- i::HandleScope scope(isolate);
- i::Handle<i::Context> env = Utils::OpenHandle(this);
- ASSERT(env->IsGlobalContext());
- if (env->IsGlobalContext()) {
- raw_result = env->data();
- } else {
- return Local<Value>();
- }
- }
- i::Handle<i::Object> result(raw_result);
- return Utils::ToLocal(result);
-}
-
-
-i::Object** v8::HandleScope::RawClose(i::Object** value) {
- if (!ApiCheck(!is_closed_,
- "v8::HandleScope::Close()",
- "Local scope has already been closed")) {
- return 0;
- }
- LOG_API(isolate_, "CloseHandleScope");
-
- // Read the result before popping the handle block.
- i::Object* result = NULL;
- if (value != NULL) {
- result = *value;
- }
- is_closed_ = true;
- Leave();
-
- if (value == NULL) {
- return NULL;
- }
-
- // Allocate a new handle on the previous handle block.
- i::Handle<i::Object> handle(result);
- return handle.location();
-}
-
-
-// --- N e a n d e r ---
-
-
-// A constructor cannot easily return an error value, therefore it is necessary
-// to check for a dead VM with ON_BAILOUT before constructing any Neander
-// objects. To remind you about this there is no HandleScope in the
-// NeanderObject constructor. When you add one to the site calling the
-// constructor you should check that you ensured the VM was not dead first.
-NeanderObject::NeanderObject(int size) {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::Nowhere");
- ENTER_V8(isolate);
- value_ = isolate->factory()->NewNeanderObject();
- i::Handle<i::FixedArray> elements = isolate->factory()->NewFixedArray(size);
- value_->set_elements(*elements);
-}
-
-
-int NeanderObject::size() {
- return i::FixedArray::cast(value_->elements())->length();
-}
-
-
-NeanderArray::NeanderArray() : obj_(2) {
- obj_.set(0, i::Smi::FromInt(0));
-}
-
-
-int NeanderArray::length() {
- return i::Smi::cast(obj_.get(0))->value();
-}
-
-
-i::Object* NeanderArray::get(int offset) {
- ASSERT(0 <= offset);
- ASSERT(offset < length());
- return obj_.get(offset + 1);
-}
-
-
-// This method cannot easily return an error value, therefore it is necessary
-// to check for a dead VM with ON_BAILOUT before calling it. To remind you
-// about this there is no HandleScope in this method. When you add one to the
-// site calling this method you should check that you ensured the VM was not
-// dead first.
-void NeanderArray::add(i::Handle<i::Object> value) {
- int length = this->length();
- int size = obj_.size();
- if (length == size - 1) {
- i::Handle<i::FixedArray> new_elms = FACTORY->NewFixedArray(2 * size);
- for (int i = 0; i < length; i++)
- new_elms->set(i + 1, get(i));
- obj_.value()->set_elements(*new_elms);
- }
- obj_.set(length + 1, *value);
- obj_.set(0, i::Smi::FromInt(length + 1));
-}
-
-
-void NeanderArray::set(int index, i::Object* value) {
- if (index < 0 || index >= this->length()) return;
- obj_.set(index + 1, value);
-}
-
-
-// --- T e m p l a t e ---
-
-
-static void InitializeTemplate(i::Handle<i::TemplateInfo> that, int type) {
- that->set_tag(i::Smi::FromInt(type));
-}
-
-
-void Template::Set(v8::Handle<String> name, v8::Handle<Data> value,
- v8::PropertyAttribute attribute) {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Template::Set()")) return;
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- i::Handle<i::Object> list(Utils::OpenHandle(this)->property_list());
- if (list->IsUndefined()) {
- list = NeanderArray().value();
- Utils::OpenHandle(this)->set_property_list(*list);
- }
- NeanderArray array(list);
- array.add(Utils::OpenHandle(*name));
- array.add(Utils::OpenHandle(*value));
- array.add(Utils::OpenHandle(*v8::Integer::New(attribute)));
-}
-
-
-// --- F u n c t i o n T e m p l a t e ---
-static void InitializeFunctionTemplate(
- i::Handle<i::FunctionTemplateInfo> info) {
- info->set_tag(i::Smi::FromInt(Consts::FUNCTION_TEMPLATE));
- info->set_flag(0);
-}
-
-
-Local<ObjectTemplate> FunctionTemplate::PrototypeTemplate() {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::FunctionTemplate::PrototypeTemplate()")) {
- return Local<ObjectTemplate>();
- }
- ENTER_V8(isolate);
- i::Handle<i::Object> result(Utils::OpenHandle(this)->prototype_template());
- if (result->IsUndefined()) {
- result = Utils::OpenHandle(*ObjectTemplate::New());
- Utils::OpenHandle(this)->set_prototype_template(*result);
- }
- return Local<ObjectTemplate>(ToApi<ObjectTemplate>(result));
-}
-
-
-void FunctionTemplate::Inherit(v8::Handle<FunctionTemplate> value) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::FunctionTemplate::Inherit()")) return;
- ENTER_V8(isolate);
- Utils::OpenHandle(this)->set_parent_template(*Utils::OpenHandle(*value));
-}
-
-
-Local<FunctionTemplate> FunctionTemplate::New(InvocationCallback callback,
- v8::Handle<Value> data, v8::Handle<Signature> signature) {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::FunctionTemplate::New()");
- LOG_API(isolate, "FunctionTemplate::New");
- ENTER_V8(isolate);
- i::Handle<i::Struct> struct_obj =
- isolate->factory()->NewStruct(i::FUNCTION_TEMPLATE_INFO_TYPE);
- i::Handle<i::FunctionTemplateInfo> obj =
- i::Handle<i::FunctionTemplateInfo>::cast(struct_obj);
- InitializeFunctionTemplate(obj);
- int next_serial_number = isolate->next_serial_number();
- isolate->set_next_serial_number(next_serial_number + 1);
- obj->set_serial_number(i::Smi::FromInt(next_serial_number));
- if (callback != 0) {
- if (data.IsEmpty()) data = v8::Undefined();
- Utils::ToLocal(obj)->SetCallHandler(callback, data);
- }
- obj->set_undetectable(false);
- obj->set_needs_access_check(false);
-
- if (!signature.IsEmpty())
- obj->set_signature(*Utils::OpenHandle(*signature));
- return Utils::ToLocal(obj);
-}
-
-
-Local<Signature> Signature::New(Handle<FunctionTemplate> receiver,
- int argc, Handle<FunctionTemplate> argv[]) {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::Signature::New()");
- LOG_API(isolate, "Signature::New");
- ENTER_V8(isolate);
- i::Handle<i::Struct> struct_obj =
- isolate->factory()->NewStruct(i::SIGNATURE_INFO_TYPE);
- i::Handle<i::SignatureInfo> obj =
- i::Handle<i::SignatureInfo>::cast(struct_obj);
- if (!receiver.IsEmpty()) obj->set_receiver(*Utils::OpenHandle(*receiver));
- if (argc > 0) {
- i::Handle<i::FixedArray> args = isolate->factory()->NewFixedArray(argc);
- for (int i = 0; i < argc; i++) {
- if (!argv[i].IsEmpty())
- args->set(i, *Utils::OpenHandle(*argv[i]));
- }
- obj->set_args(*args);
- }
- return Utils::ToLocal(obj);
-}
-
-
-Local<TypeSwitch> TypeSwitch::New(Handle<FunctionTemplate> type) {
- Handle<FunctionTemplate> types[1] = { type };
- return TypeSwitch::New(1, types);
-}
-
-
-Local<TypeSwitch> TypeSwitch::New(int argc, Handle<FunctionTemplate> types[]) {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::TypeSwitch::New()");
- LOG_API(isolate, "TypeSwitch::New");
- ENTER_V8(isolate);
- i::Handle<i::FixedArray> vector = isolate->factory()->NewFixedArray(argc);
- for (int i = 0; i < argc; i++)
- vector->set(i, *Utils::OpenHandle(*types[i]));
- i::Handle<i::Struct> struct_obj =
- isolate->factory()->NewStruct(i::TYPE_SWITCH_INFO_TYPE);
- i::Handle<i::TypeSwitchInfo> obj =
- i::Handle<i::TypeSwitchInfo>::cast(struct_obj);
- obj->set_types(*vector);
- return Utils::ToLocal(obj);
-}
-
-
-int TypeSwitch::match(v8::Handle<Value> value) {
- i::Isolate* isolate = i::Isolate::Current();
- LOG_API(isolate, "TypeSwitch::match");
- i::Handle<i::Object> obj = Utils::OpenHandle(*value);
- i::Handle<i::TypeSwitchInfo> info = Utils::OpenHandle(this);
- i::FixedArray* types = i::FixedArray::cast(info->types());
- for (int i = 0; i < types->length(); i++) {
- if (obj->IsInstanceOf(i::FunctionTemplateInfo::cast(types->get(i))))
- return i + 1;
- }
- return 0;
-}
-
-
-#define SET_FIELD_WRAPPED(obj, setter, cdata) do { \
- i::Handle<i::Object> proxy = FromCData(cdata); \
- (obj)->setter(*proxy); \
- } while (false)
-
-
-void FunctionTemplate::SetCallHandler(InvocationCallback callback,
- v8::Handle<Value> data) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::FunctionTemplate::SetCallHandler()")) return;
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- i::Handle<i::Struct> struct_obj =
- isolate->factory()->NewStruct(i::CALL_HANDLER_INFO_TYPE);
- i::Handle<i::CallHandlerInfo> obj =
- i::Handle<i::CallHandlerInfo>::cast(struct_obj);
- SET_FIELD_WRAPPED(obj, set_callback, callback);
- if (data.IsEmpty()) data = v8::Undefined();
- obj->set_data(*Utils::OpenHandle(*data));
- Utils::OpenHandle(this)->set_call_code(*obj);
-}
-
-
-static i::Handle<i::AccessorInfo> MakeAccessorInfo(
- v8::Handle<String> name,
- AccessorGetter getter,
- AccessorSetter setter,
- v8::Handle<Value> data,
- v8::AccessControl settings,
- v8::PropertyAttribute attributes) {
- i::Handle<i::AccessorInfo> obj = FACTORY->NewAccessorInfo();
- ASSERT(getter != NULL);
- SET_FIELD_WRAPPED(obj, set_getter, getter);
- SET_FIELD_WRAPPED(obj, set_setter, setter);
- if (data.IsEmpty()) data = v8::Undefined();
- obj->set_data(*Utils::OpenHandle(*data));
- obj->set_name(*Utils::OpenHandle(*name));
- if (settings & ALL_CAN_READ) obj->set_all_can_read(true);
- if (settings & ALL_CAN_WRITE) obj->set_all_can_write(true);
- if (settings & PROHIBITS_OVERWRITING) obj->set_prohibits_overwriting(true);
- obj->set_property_attributes(static_cast<PropertyAttributes>(attributes));
- return obj;
-}
-
-
-void FunctionTemplate::AddInstancePropertyAccessor(
- v8::Handle<String> name,
- AccessorGetter getter,
- AccessorSetter setter,
- v8::Handle<Value> data,
- v8::AccessControl settings,
- v8::PropertyAttribute attributes) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate,
- "v8::FunctionTemplate::AddInstancePropertyAccessor()")) {
- return;
- }
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
-
- i::Handle<i::AccessorInfo> obj = MakeAccessorInfo(name,
- getter, setter, data,
- settings, attributes);
- i::Handle<i::Object> list(Utils::OpenHandle(this)->property_accessors());
- if (list->IsUndefined()) {
- list = NeanderArray().value();
- Utils::OpenHandle(this)->set_property_accessors(*list);
- }
- NeanderArray array(list);
- array.add(obj);
-}
-
-
-Local<ObjectTemplate> FunctionTemplate::InstanceTemplate() {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::FunctionTemplate::InstanceTemplate()")
- || EmptyCheck("v8::FunctionTemplate::InstanceTemplate()", this))
- return Local<ObjectTemplate>();
- ENTER_V8(isolate);
- if (Utils::OpenHandle(this)->instance_template()->IsUndefined()) {
- Local<ObjectTemplate> templ =
- ObjectTemplate::New(v8::Handle<FunctionTemplate>(this));
- Utils::OpenHandle(this)->set_instance_template(*Utils::OpenHandle(*templ));
- }
- i::Handle<i::ObjectTemplateInfo> result(i::ObjectTemplateInfo::cast(
- Utils::OpenHandle(this)->instance_template()));
- return Utils::ToLocal(result);
-}
-
-
-void FunctionTemplate::SetClassName(Handle<String> name) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::FunctionTemplate::SetClassName()")) return;
- ENTER_V8(isolate);
- Utils::OpenHandle(this)->set_class_name(*Utils::OpenHandle(*name));
-}
-
-
-void FunctionTemplate::SetHiddenPrototype(bool value) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::FunctionTemplate::SetHiddenPrototype()")) {
- return;
- }
- ENTER_V8(isolate);
- Utils::OpenHandle(this)->set_hidden_prototype(value);
-}
-
-
-void FunctionTemplate::SetNamedInstancePropertyHandler(
- NamedPropertyGetter getter,
- NamedPropertySetter setter,
- NamedPropertyQuery query,
- NamedPropertyDeleter remover,
- NamedPropertyEnumerator enumerator,
- Handle<Value> data) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate,
- "v8::FunctionTemplate::SetNamedInstancePropertyHandler()")) {
- return;
- }
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- i::Handle<i::Struct> struct_obj =
- isolate->factory()->NewStruct(i::INTERCEPTOR_INFO_TYPE);
- i::Handle<i::InterceptorInfo> obj =
- i::Handle<i::InterceptorInfo>::cast(struct_obj);
-
- if (getter != 0) SET_FIELD_WRAPPED(obj, set_getter, getter);
- if (setter != 0) SET_FIELD_WRAPPED(obj, set_setter, setter);
- if (query != 0) SET_FIELD_WRAPPED(obj, set_query, query);
- if (remover != 0) SET_FIELD_WRAPPED(obj, set_deleter, remover);
- if (enumerator != 0) SET_FIELD_WRAPPED(obj, set_enumerator, enumerator);
-
- if (data.IsEmpty()) data = v8::Undefined();
- obj->set_data(*Utils::OpenHandle(*data));
- Utils::OpenHandle(this)->set_named_property_handler(*obj);
-}
-
-
-void FunctionTemplate::SetIndexedInstancePropertyHandler(
- IndexedPropertyGetter getter,
- IndexedPropertySetter setter,
- IndexedPropertyQuery query,
- IndexedPropertyDeleter remover,
- IndexedPropertyEnumerator enumerator,
- Handle<Value> data) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate,
- "v8::FunctionTemplate::SetIndexedInstancePropertyHandler()")) {
- return;
- }
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- i::Handle<i::Struct> struct_obj =
- isolate->factory()->NewStruct(i::INTERCEPTOR_INFO_TYPE);
- i::Handle<i::InterceptorInfo> obj =
- i::Handle<i::InterceptorInfo>::cast(struct_obj);
-
- if (getter != 0) SET_FIELD_WRAPPED(obj, set_getter, getter);
- if (setter != 0) SET_FIELD_WRAPPED(obj, set_setter, setter);
- if (query != 0) SET_FIELD_WRAPPED(obj, set_query, query);
- if (remover != 0) SET_FIELD_WRAPPED(obj, set_deleter, remover);
- if (enumerator != 0) SET_FIELD_WRAPPED(obj, set_enumerator, enumerator);
-
- if (data.IsEmpty()) data = v8::Undefined();
- obj->set_data(*Utils::OpenHandle(*data));
- Utils::OpenHandle(this)->set_indexed_property_handler(*obj);
-}
-
-
-void FunctionTemplate::SetInstanceCallAsFunctionHandler(
- InvocationCallback callback,
- Handle<Value> data) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate,
- "v8::FunctionTemplate::SetInstanceCallAsFunctionHandler()")) {
- return;
- }
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- i::Handle<i::Struct> struct_obj =
- isolate->factory()->NewStruct(i::CALL_HANDLER_INFO_TYPE);
- i::Handle<i::CallHandlerInfo> obj =
- i::Handle<i::CallHandlerInfo>::cast(struct_obj);
- SET_FIELD_WRAPPED(obj, set_callback, callback);
- if (data.IsEmpty()) data = v8::Undefined();
- obj->set_data(*Utils::OpenHandle(*data));
- Utils::OpenHandle(this)->set_instance_call_handler(*obj);
-}
-
-
-// --- O b j e c t T e m p l a t e ---
-
-
-Local<ObjectTemplate> ObjectTemplate::New() {
- return New(Local<FunctionTemplate>());
-}
-
-
-Local<ObjectTemplate> ObjectTemplate::New(
- v8::Handle<FunctionTemplate> constructor) {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::ObjectTemplate::New()")) {
- return Local<ObjectTemplate>();
- }
- EnsureInitializedForIsolate(isolate, "v8::ObjectTemplate::New()");
- LOG_API(isolate, "ObjectTemplate::New");
- ENTER_V8(isolate);
- i::Handle<i::Struct> struct_obj =
- isolate->factory()->NewStruct(i::OBJECT_TEMPLATE_INFO_TYPE);
- i::Handle<i::ObjectTemplateInfo> obj =
- i::Handle<i::ObjectTemplateInfo>::cast(struct_obj);
- InitializeTemplate(obj, Consts::OBJECT_TEMPLATE);
- if (!constructor.IsEmpty())
- obj->set_constructor(*Utils::OpenHandle(*constructor));
- obj->set_internal_field_count(i::Smi::FromInt(0));
- return Utils::ToLocal(obj);
-}
-
-
-// Ensure that the object template has a constructor. If no
-// constructor is available we create one.
-static void EnsureConstructor(ObjectTemplate* object_template) {
- if (Utils::OpenHandle(object_template)->constructor()->IsUndefined()) {
- Local<FunctionTemplate> templ = FunctionTemplate::New();
- i::Handle<i::FunctionTemplateInfo> constructor = Utils::OpenHandle(*templ);
- constructor->set_instance_template(*Utils::OpenHandle(object_template));
- Utils::OpenHandle(object_template)->set_constructor(*constructor);
- }
-}
-
-
-void ObjectTemplate::SetAccessor(v8::Handle<String> name,
- AccessorGetter getter,
- AccessorSetter setter,
- v8::Handle<Value> data,
- AccessControl settings,
- PropertyAttribute attribute) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::ObjectTemplate::SetAccessor()")) return;
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- EnsureConstructor(this);
- i::FunctionTemplateInfo* constructor =
- i::FunctionTemplateInfo::cast(Utils::OpenHandle(this)->constructor());
- i::Handle<i::FunctionTemplateInfo> cons(constructor);
- Utils::ToLocal(cons)->AddInstancePropertyAccessor(name,
- getter,
- setter,
- data,
- settings,
- attribute);
-}
-
-
-void ObjectTemplate::SetNamedPropertyHandler(NamedPropertyGetter getter,
- NamedPropertySetter setter,
- NamedPropertyQuery query,
- NamedPropertyDeleter remover,
- NamedPropertyEnumerator enumerator,
- Handle<Value> data) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::ObjectTemplate::SetNamedPropertyHandler()")) {
- return;
- }
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- EnsureConstructor(this);
- i::FunctionTemplateInfo* constructor =
- i::FunctionTemplateInfo::cast(Utils::OpenHandle(this)->constructor());
- i::Handle<i::FunctionTemplateInfo> cons(constructor);
- Utils::ToLocal(cons)->SetNamedInstancePropertyHandler(getter,
- setter,
- query,
- remover,
- enumerator,
- data);
-}
-
-
-void ObjectTemplate::MarkAsUndetectable() {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::ObjectTemplate::MarkAsUndetectable()")) return;
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- EnsureConstructor(this);
- i::FunctionTemplateInfo* constructor =
- i::FunctionTemplateInfo::cast(Utils::OpenHandle(this)->constructor());
- i::Handle<i::FunctionTemplateInfo> cons(constructor);
- cons->set_undetectable(true);
-}
-
-
-void ObjectTemplate::SetAccessCheckCallbacks(
- NamedSecurityCallback named_callback,
- IndexedSecurityCallback indexed_callback,
- Handle<Value> data,
- bool turned_on_by_default) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::ObjectTemplate::SetAccessCheckCallbacks()")) {
- return;
- }
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- EnsureConstructor(this);
-
- i::Handle<i::Struct> struct_info =
- isolate->factory()->NewStruct(i::ACCESS_CHECK_INFO_TYPE);
- i::Handle<i::AccessCheckInfo> info =
- i::Handle<i::AccessCheckInfo>::cast(struct_info);
-
- SET_FIELD_WRAPPED(info, set_named_callback, named_callback);
- SET_FIELD_WRAPPED(info, set_indexed_callback, indexed_callback);
-
- if (data.IsEmpty()) data = v8::Undefined();
- info->set_data(*Utils::OpenHandle(*data));
-
- i::FunctionTemplateInfo* constructor =
- i::FunctionTemplateInfo::cast(Utils::OpenHandle(this)->constructor());
- i::Handle<i::FunctionTemplateInfo> cons(constructor);
- cons->set_access_check_info(*info);
- cons->set_needs_access_check(turned_on_by_default);
-}
-
-
-void ObjectTemplate::SetIndexedPropertyHandler(
- IndexedPropertyGetter getter,
- IndexedPropertySetter setter,
- IndexedPropertyQuery query,
- IndexedPropertyDeleter remover,
- IndexedPropertyEnumerator enumerator,
- Handle<Value> data) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::ObjectTemplate::SetIndexedPropertyHandler()")) {
- return;
- }
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- EnsureConstructor(this);
- i::FunctionTemplateInfo* constructor =
- i::FunctionTemplateInfo::cast(Utils::OpenHandle(this)->constructor());
- i::Handle<i::FunctionTemplateInfo> cons(constructor);
- Utils::ToLocal(cons)->SetIndexedInstancePropertyHandler(getter,
- setter,
- query,
- remover,
- enumerator,
- data);
-}
-
-
-void ObjectTemplate::SetCallAsFunctionHandler(InvocationCallback callback,
- Handle<Value> data) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate,
- "v8::ObjectTemplate::SetCallAsFunctionHandler()")) {
- return;
- }
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- EnsureConstructor(this);
- i::FunctionTemplateInfo* constructor =
- i::FunctionTemplateInfo::cast(Utils::OpenHandle(this)->constructor());
- i::Handle<i::FunctionTemplateInfo> cons(constructor);
- Utils::ToLocal(cons)->SetInstanceCallAsFunctionHandler(callback, data);
-}
-
-
-int ObjectTemplate::InternalFieldCount() {
- if (IsDeadCheck(Utils::OpenHandle(this)->GetIsolate(),
- "v8::ObjectTemplate::InternalFieldCount()")) {
- return 0;
- }
- return i::Smi::cast(Utils::OpenHandle(this)->internal_field_count())->value();
-}
-
-
-void ObjectTemplate::SetInternalFieldCount(int value) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::ObjectTemplate::SetInternalFieldCount()")) {
- return;
- }
- if (!ApiCheck(i::Smi::IsValid(value),
- "v8::ObjectTemplate::SetInternalFieldCount()",
- "Invalid internal field count")) {
- return;
- }
- ENTER_V8(isolate);
- if (value > 0) {
- // The internal field count is set by the constructor function's
- // construct code, so we ensure that there is a constructor
- // function to do the setting.
- EnsureConstructor(this);
- }
- Utils::OpenHandle(this)->set_internal_field_count(i::Smi::FromInt(value));
-}
-
-
-// --- S c r i p t D a t a ---
-
-
-ScriptData* ScriptData::PreCompile(const char* input, int length) {
- i::Utf8ToUC16CharacterStream stream(
- reinterpret_cast<const unsigned char*>(input), length);
- return i::ParserApi::PreParse(&stream, NULL);
-}
-
-
-ScriptData* ScriptData::PreCompile(v8::Handle<String> source) {
- i::Handle<i::String> str = Utils::OpenHandle(*source);
- if (str->IsExternalTwoByteString()) {
- i::ExternalTwoByteStringUC16CharacterStream stream(
- i::Handle<i::ExternalTwoByteString>::cast(str), 0, str->length());
- return i::ParserApi::PreParse(&stream, NULL);
- } else {
- i::GenericStringUC16CharacterStream stream(str, 0, str->length());
- return i::ParserApi::PreParse(&stream, NULL);
- }
-}
-
-
-ScriptData* ScriptData::New(const char* data, int length) {
- // Return an empty ScriptData if the length is obviously invalid.
- if (length % sizeof(unsigned) != 0) {
- return new i::ScriptDataImpl();
- }
-
- // Copy the data to ensure it is properly aligned.
- int deserialized_data_length = length / sizeof(unsigned);
- // If aligned, don't create a copy of the data.
- if (reinterpret_cast<intptr_t>(data) % sizeof(unsigned) == 0) {
- return new i::ScriptDataImpl(data, length);
- }
- // Copy the data to align it.
- unsigned* deserialized_data = i::NewArray<unsigned>(deserialized_data_length);
- i::OS::MemCopy(deserialized_data, data, length);
-
- return new i::ScriptDataImpl(
- i::Vector<unsigned>(deserialized_data, deserialized_data_length));
-}
-
-
-// --- S c r i p t ---
-
-
-Local<Script> Script::New(v8::Handle<String> source,
- v8::ScriptOrigin* origin,
- v8::ScriptData* pre_data,
- v8::Handle<String> script_data) {
- i::Isolate* isolate = i::Isolate::Current();
- ON_BAILOUT(isolate, "v8::Script::New()", return Local<Script>());
- LOG_API(isolate, "Script::New");
- ENTER_V8(isolate);
- i::Handle<i::String> str = Utils::OpenHandle(*source);
- i::Handle<i::Object> name_obj;
- int line_offset = 0;
- int column_offset = 0;
- if (origin != NULL) {
- if (!origin->ResourceName().IsEmpty()) {
- name_obj = Utils::OpenHandle(*origin->ResourceName());
- }
- if (!origin->ResourceLineOffset().IsEmpty()) {
- line_offset = static_cast<int>(origin->ResourceLineOffset()->Value());
- }
- if (!origin->ResourceColumnOffset().IsEmpty()) {
- column_offset = static_cast<int>(origin->ResourceColumnOffset()->Value());
- }
- }
- EXCEPTION_PREAMBLE(isolate);
- i::ScriptDataImpl* pre_data_impl = static_cast<i::ScriptDataImpl*>(pre_data);
- // We assert that the pre-data is sane, even though we can actually
- // handle it if it turns out not to be in release mode.
- ASSERT(pre_data_impl == NULL || pre_data_impl->SanityCheck());
- // If the pre-data isn't sane we simply ignore it
- if (pre_data_impl != NULL && !pre_data_impl->SanityCheck()) {
- pre_data_impl = NULL;
- }
- i::Handle<i::SharedFunctionInfo> result =
- i::Compiler::Compile(str,
- name_obj,
- line_offset,
- column_offset,
- NULL,
- pre_data_impl,
- Utils::OpenHandle(*script_data),
- i::NOT_NATIVES_CODE);
- has_pending_exception = result.is_null();
- EXCEPTION_BAILOUT_CHECK(isolate, Local<Script>());
- return Local<Script>(ToApi<Script>(result));
-}
-
-
-Local<Script> Script::New(v8::Handle<String> source,
- v8::Handle<Value> file_name) {
- ScriptOrigin origin(file_name);
- return New(source, &origin);
-}
-
-
-Local<Script> Script::Compile(v8::Handle<String> source,
- v8::ScriptOrigin* origin,
- v8::ScriptData* pre_data,
- v8::Handle<String> script_data) {
- i::Isolate* isolate = i::Isolate::Current();
- ON_BAILOUT(isolate, "v8::Script::Compile()", return Local<Script>());
- LOG_API(isolate, "Script::Compile");
- ENTER_V8(isolate);
- Local<Script> generic = New(source, origin, pre_data, script_data);
- if (generic.IsEmpty())
- return generic;
- i::Handle<i::Object> obj = Utils::OpenHandle(*generic);
- i::Handle<i::SharedFunctionInfo> function =
- i::Handle<i::SharedFunctionInfo>(i::SharedFunctionInfo::cast(*obj));
- i::Handle<i::JSFunction> result =
- isolate->factory()->NewFunctionFromSharedFunctionInfo(
- function,
- isolate->global_context());
- return Local<Script>(ToApi<Script>(result));
-}
-
-
-Local<Script> Script::Compile(v8::Handle<String> source,
- v8::Handle<Value> file_name,
- v8::Handle<String> script_data) {
- ScriptOrigin origin(file_name);
- return Compile(source, &origin, 0, script_data);
-}
-
-
-Local<Value> Script::Run() {
- i::Isolate* isolate = i::Isolate::Current();
- ON_BAILOUT(isolate, "v8::Script::Run()", return Local<Value>());
- LOG_API(isolate, "Script::Run");
- ENTER_V8(isolate);
- i::Object* raw_result = NULL;
- {
- i::HandleScope scope(isolate);
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- i::Handle<i::JSFunction> fun;
- if (obj->IsSharedFunctionInfo()) {
- i::Handle<i::SharedFunctionInfo>
- function_info(i::SharedFunctionInfo::cast(*obj), isolate);
- fun = isolate->factory()->NewFunctionFromSharedFunctionInfo(
- function_info, isolate->global_context());
- } else {
- fun = i::Handle<i::JSFunction>(i::JSFunction::cast(*obj), isolate);
- }
- EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> receiver(
- isolate->context()->global_proxy(), isolate);
- i::Handle<i::Object> result =
- i::Execution::Call(fun, receiver, 0, NULL, &has_pending_exception);
- EXCEPTION_BAILOUT_CHECK(isolate, Local<Value>());
- raw_result = *result;
- }
- i::Handle<i::Object> result(raw_result, isolate);
- return Utils::ToLocal(result);
-}
-
-#ifdef QT_BUILD_SCRIPT_LIB
-Local<Value> Script::Run(Handle<Object> receiver) {
- i::Isolate* isolate = i::Isolate::Current();
- ON_BAILOUT(isolate, "v8::Script::Run()", return Local<Value>());
- LOG_API(isolate, "Script::Run");
- ENTER_V8(isolate);
- i::Object* raw_result = NULL;
- {
- i::HandleScope scope(isolate);
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- i::Handle<i::JSFunction> fun;
- if (obj->IsSharedFunctionInfo()) {
- i::Handle<i::SharedFunctionInfo>
- function_info(i::SharedFunctionInfo::cast(*obj));
- fun = isolate->factory()->NewFunctionFromSharedFunctionInfo(
- function_info, isolate->global_context());
- } else {
- fun = i::Handle<i::JSFunction>(i::JSFunction::cast(*obj));
- }
- EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> recv = Utils::OpenHandle(*receiver);
- i::Handle<i::Object> result =
- i::Execution::Call(fun, recv, 0, NULL, &has_pending_exception);
- EXCEPTION_BAILOUT_CHECK(isolate, Local<Value>());
- raw_result = *result;
- }
- i::Handle<i::Object> result(raw_result);
- return Utils::ToLocal(result);
-}
-#endif
-
-static i::Handle<i::SharedFunctionInfo> OpenScript(Script* script) {
- i::Handle<i::Object> obj = Utils::OpenHandle(script);
- i::Handle<i::SharedFunctionInfo> result;
- if (obj->IsSharedFunctionInfo()) {
- result =
- i::Handle<i::SharedFunctionInfo>(i::SharedFunctionInfo::cast(*obj));
- } else {
- result =
- i::Handle<i::SharedFunctionInfo>(i::JSFunction::cast(*obj)->shared());
- }
- return result;
-}
-
-
-Local<Value> Script::Id() {
- i::Isolate* isolate = i::Isolate::Current();
- ON_BAILOUT(isolate, "v8::Script::Id()", return Local<Value>());
- LOG_API(isolate, "Script::Id");
- i::Object* raw_id = NULL;
- {
- i::HandleScope scope(isolate);
- i::Handle<i::SharedFunctionInfo> function_info = OpenScript(this);
- i::Handle<i::Script> script(i::Script::cast(function_info->script()));
- i::Handle<i::Object> id(script->id());
- raw_id = *id;
- }
- i::Handle<i::Object> id(raw_id);
- return Utils::ToLocal(id);
-}
-
-
-void Script::SetData(v8::Handle<String> data) {
- i::Isolate* isolate = i::Isolate::Current();
- ON_BAILOUT(isolate, "v8::Script::SetData()", return);
- LOG_API(isolate, "Script::SetData");
- {
- i::HandleScope scope(isolate);
- i::Handle<i::SharedFunctionInfo> function_info = OpenScript(this);
- i::Handle<i::Object> raw_data = Utils::OpenHandle(*data);
- i::Handle<i::Script> script(i::Script::cast(function_info->script()));
- script->set_data(*raw_data);
- }
-}
-
-
-#ifdef QT_BUILD_SCRIPT_LIB
-Local<Script> Script::CompileEval(v8::Handle<String> source,
- v8::ScriptOrigin* origin,
- v8::ScriptData* pre_data,
- v8::Handle<String> script_data) {
- i::Isolate* isolate = i::Isolate::Current();
- ON_BAILOUT(isolate, "v8::Script::CompileEval()", return Local<Script>());
- LOG_API(isolate, "Script::CompileEval");
- ENTER_V8(isolate);
- i::Handle<i::String> str = Utils::OpenHandle(*source);
- i::Handle<i::Context> context(isolate->context());
-
- i::Handle<i::Object> name_obj;
- int line_offset = 0;
- int column_offset = 0;
- if (origin != NULL) {
- if (!origin->ResourceName().IsEmpty()) {
- name_obj = Utils::OpenHandle(*origin->ResourceName());
- }
- if (!origin->ResourceLineOffset().IsEmpty()) {
- line_offset = static_cast<int>(origin->ResourceLineOffset()->Value());
- }
- if (!origin->ResourceColumnOffset().IsEmpty()) {
- column_offset = static_cast<int>(origin->ResourceColumnOffset()->Value());
- }
- }
-
- i::Handle<i::SharedFunctionInfo> shared = i::Compiler::CompileEval(
- str,
- context,
- context->IsGlobalContext(),
- i::kNonStrictMode,
- name_obj, line_offset, column_offset);
- if (shared.is_null())
- return Local<Script>();
- i::Handle<i::JSFunction> result = isolate->factory()->NewFunctionFromSharedFunctionInfo(
- shared,
- context,
- i::NOT_TENURED);
- return Local<Script>(ToApi<Script>(result));
-}
-
-
-Local<Script> Script::CompileEval(v8::Handle<String> source,
- v8::Handle<Value> file_name,
- v8::Handle<String> script_data) {
- ScriptOrigin origin(file_name);
- return CompileEval(source, &origin, 0, script_data);
-}
-#endif
-
-
-// --- E x c e p t i o n s ---
-
-
-v8::TryCatch::TryCatch()
- : next_(i::Isolate::Current()->try_catch_handler_address()),
- exception_(HEAP->the_hole_value()),
- message_(i::Smi::FromInt(0)),
- is_verbose_(false),
- can_continue_(true),
- capture_message_(true),
- rethrow_(false) {
- i::Isolate::Current()->RegisterTryCatchHandler(this);
-}
-
-
-v8::TryCatch::~TryCatch() {
- i::Isolate* isolate = i::Isolate::Current();
- if (rethrow_) {
- v8::HandleScope scope;
- v8::Local<v8::Value> exc = v8::Local<v8::Value>::New(Exception());
- isolate->UnregisterTryCatchHandler(this);
- v8::ThrowException(exc);
- } else {
- isolate->UnregisterTryCatchHandler(this);
- }
-}
-
-
-bool v8::TryCatch::HasCaught() const {
- return !reinterpret_cast<i::Object*>(exception_)->IsTheHole();
-}
-
-
-bool v8::TryCatch::CanContinue() const {
- return can_continue_;
-}
-
-
-v8::Handle<v8::Value> v8::TryCatch::ReThrow() {
- if (!HasCaught()) return v8::Local<v8::Value>();
- rethrow_ = true;
- return v8::Undefined();
-}
-
-
-v8::Local<Value> v8::TryCatch::Exception() const {
- if (HasCaught()) {
- // Check for out of memory exception.
- i::Object* exception = reinterpret_cast<i::Object*>(exception_);
- return v8::Utils::ToLocal(i::Handle<i::Object>(exception));
- } else {
- return v8::Local<Value>();
- }
-}
-
-
-v8::Local<Value> v8::TryCatch::StackTrace() const {
- if (HasCaught()) {
- i::Object* raw_obj = reinterpret_cast<i::Object*>(exception_);
- if (!raw_obj->IsJSObject()) return v8::Local<Value>();
- v8::HandleScope scope;
- i::Handle<i::JSObject> obj(i::JSObject::cast(raw_obj));
- i::Handle<i::String> name = FACTORY->LookupAsciiSymbol("stack");
- if (!obj->HasProperty(*name))
- return v8::Local<Value>();
- return scope.Close(v8::Utils::ToLocal(i::GetProperty(obj, name)));
- } else {
- return v8::Local<Value>();
- }
-}
-
-
-v8::Local<v8::Message> v8::TryCatch::Message() const {
- if (HasCaught() && message_ != i::Smi::FromInt(0)) {
- i::Object* message = reinterpret_cast<i::Object*>(message_);
- return v8::Utils::MessageToLocal(i::Handle<i::Object>(message));
- } else {
- return v8::Local<v8::Message>();
- }
-}
-
-
-void v8::TryCatch::Reset() {
- exception_ = HEAP->the_hole_value();
- message_ = i::Smi::FromInt(0);
-}
-
-
-void v8::TryCatch::SetVerbose(bool value) {
- is_verbose_ = value;
-}
-
-
-void v8::TryCatch::SetCaptureMessage(bool value) {
- capture_message_ = value;
-}
-
-
-// --- M e s s a g e ---
-
-
-Local<String> Message::Get() const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Message::Get()", return Local<String>());
- ENTER_V8(isolate);
- HandleScope scope;
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- i::Handle<i::String> raw_result = i::MessageHandler::GetMessage(obj);
- Local<String> result = Utils::ToLocal(raw_result);
- return scope.Close(result);
-}
-
-
-v8::Handle<Value> Message::GetScriptResourceName() const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::Message::GetScriptResourceName()")) {
- return Local<String>();
- }
- ENTER_V8(isolate);
- HandleScope scope;
- i::Handle<i::JSMessageObject> message =
- i::Handle<i::JSMessageObject>::cast(Utils::OpenHandle(this));
- // Return this.script.name.
- i::Handle<i::JSValue> script =
- i::Handle<i::JSValue>::cast(i::Handle<i::Object>(message->script()));
- i::Handle<i::Object> resource_name(i::Script::cast(script->value())->name());
- return scope.Close(Utils::ToLocal(resource_name));
-}
-
-
-v8::Handle<Value> Message::GetScriptData() const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::Message::GetScriptResourceData()")) {
- return Local<Value>();
- }
- ENTER_V8(isolate);
- HandleScope scope;
- i::Handle<i::JSMessageObject> message =
- i::Handle<i::JSMessageObject>::cast(Utils::OpenHandle(this));
- // Return this.script.data.
- i::Handle<i::JSValue> script =
- i::Handle<i::JSValue>::cast(i::Handle<i::Object>(message->script()));
- i::Handle<i::Object> data(i::Script::cast(script->value())->data());
- return scope.Close(Utils::ToLocal(data));
-}
-
-
-v8::Handle<v8::StackTrace> Message::GetStackTrace() const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::Message::GetStackTrace()")) {
- return Local<v8::StackTrace>();
- }
- ENTER_V8(isolate);
- HandleScope scope;
- i::Handle<i::JSMessageObject> message =
- i::Handle<i::JSMessageObject>::cast(Utils::OpenHandle(this));
- i::Handle<i::Object> stackFramesObj(message->stack_frames());
- if (!stackFramesObj->IsJSArray()) return v8::Handle<v8::StackTrace>();
- i::Handle<i::JSArray> stackTrace =
- i::Handle<i::JSArray>::cast(stackFramesObj);
- return scope.Close(Utils::StackTraceToLocal(stackTrace));
-}
-
-
-static i::Handle<i::Object> CallV8HeapFunction(const char* name,
- i::Handle<i::Object> recv,
- int argc,
- i::Object** argv[],
- bool* has_pending_exception) {
- i::Isolate* isolate = i::Isolate::Current();
- i::Handle<i::String> fmt_str = isolate->factory()->LookupAsciiSymbol(name);
- i::Object* object_fun =
- isolate->js_builtins_object()->GetPropertyNoExceptionThrown(*fmt_str);
- i::Handle<i::JSFunction> fun =
- i::Handle<i::JSFunction>(i::JSFunction::cast(object_fun));
- i::Handle<i::Object> value =
- i::Execution::Call(fun, recv, argc, argv, has_pending_exception);
- return value;
-}
-
-
-static i::Handle<i::Object> CallV8HeapFunction(const char* name,
- i::Handle<i::Object> data,
- bool* has_pending_exception) {
- i::Object** argv[1] = { data.location() };
- return CallV8HeapFunction(name,
- i::Isolate::Current()->js_builtins_object(),
- 1,
- argv,
- has_pending_exception);
-}
-
-
-int Message::GetLineNumber() const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Message::GetLineNumber()", return kNoLineNumberInfo);
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
-
- EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> result = CallV8HeapFunction("GetLineNumber",
- Utils::OpenHandle(this),
- &has_pending_exception);
- EXCEPTION_BAILOUT_CHECK(isolate, 0);
- return static_cast<int>(result->Number());
-}
-
-
-int Message::GetStartPosition() const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::Message::GetStartPosition()")) return 0;
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- i::Handle<i::JSMessageObject> message =
- i::Handle<i::JSMessageObject>::cast(Utils::OpenHandle(this));
- return message->start_position();
-}
-
-
-int Message::GetEndPosition() const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::Message::GetEndPosition()")) return 0;
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- i::Handle<i::JSMessageObject> message =
- i::Handle<i::JSMessageObject>::cast(Utils::OpenHandle(this));
- return message->end_position();
-}
-
-
-int Message::GetStartColumn() const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::Message::GetStartColumn()")) {
- return kNoColumnInfo;
- }
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- i::Handle<i::JSObject> data_obj = Utils::OpenHandle(this);
- EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> start_col_obj = CallV8HeapFunction(
- "GetPositionInLine",
- data_obj,
- &has_pending_exception);
- EXCEPTION_BAILOUT_CHECK(isolate, 0);
- return static_cast<int>(start_col_obj->Number());
-}
-
-
-int Message::GetEndColumn() const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::Message::GetEndColumn()")) return kNoColumnInfo;
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- i::Handle<i::JSObject> data_obj = Utils::OpenHandle(this);
- EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> start_col_obj = CallV8HeapFunction(
- "GetPositionInLine",
- data_obj,
- &has_pending_exception);
- EXCEPTION_BAILOUT_CHECK(isolate, 0);
- i::Handle<i::JSMessageObject> message =
- i::Handle<i::JSMessageObject>::cast(data_obj);
- int start = message->start_position();
- int end = message->end_position();
- return static_cast<int>(start_col_obj->Number()) + (end - start);
-}
-
-
-Local<String> Message::GetSourceLine() const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Message::GetSourceLine()", return Local<String>());
- ENTER_V8(isolate);
- HandleScope scope;
- EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> result = CallV8HeapFunction("GetSourceLine",
- Utils::OpenHandle(this),
- &has_pending_exception);
- EXCEPTION_BAILOUT_CHECK(isolate, Local<v8::String>());
- if (result->IsString()) {
- return scope.Close(Utils::ToLocal(i::Handle<i::String>::cast(result)));
- } else {
- return Local<String>();
- }
-}
-
-
-void Message::PrintCurrentStackTrace(FILE* out) {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Message::PrintCurrentStackTrace()")) return;
- ENTER_V8(isolate);
- isolate->PrintCurrentStackTrace(out);
-}
-
-
-// --- S t a c k T r a c e ---
-
-Local<StackFrame> StackTrace::GetFrame(uint32_t index) const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::StackTrace::GetFrame()")) {
- return Local<StackFrame>();
- }
- ENTER_V8(isolate);
- HandleScope scope;
- i::Handle<i::JSArray> self = Utils::OpenHandle(this);
- i::Object* raw_object = self->GetElementNoExceptionThrown(index);
- i::Handle<i::JSObject> obj(i::JSObject::cast(raw_object));
- return scope.Close(Utils::StackFrameToLocal(obj));
-}
-
-
-int StackTrace::GetFrameCount() const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::StackTrace::GetFrameCount()")) return -1;
- ENTER_V8(isolate);
- return i::Smi::cast(Utils::OpenHandle(this)->length())->value();
-}
-
-
-Local<Array> StackTrace::AsArray() {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::StackTrace::AsArray()")) Local<Array>();
- ENTER_V8(isolate);
- return Utils::ToLocal(Utils::OpenHandle(this));
-}
-
-
-Local<StackTrace> StackTrace::CurrentStackTrace(int frame_limit,
- StackTraceOptions options) {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::StackTrace::CurrentStackTrace()")) {
- Local<StackTrace>();
- }
- ENTER_V8(isolate);
- i::Handle<i::JSArray> stackTrace =
- isolate->CaptureCurrentStackTrace(frame_limit, options);
- return Utils::StackTraceToLocal(stackTrace);
-}
-
-
-// --- S t a c k F r a m e ---
-
-int StackFrame::GetLineNumber() const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::StackFrame::GetLineNumber()")) {
- return Message::kNoLineNumberInfo;
- }
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- i::Handle<i::Object> line = GetProperty(self, "lineNumber");
- if (!line->IsSmi()) {
- return Message::kNoLineNumberInfo;
- }
- return i::Smi::cast(*line)->value();
-}
-
-
-int StackFrame::GetColumn() const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::StackFrame::GetColumn()")) {
- return Message::kNoColumnInfo;
- }
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- i::Handle<i::Object> column = GetProperty(self, "column");
- if (!column->IsSmi()) {
- return Message::kNoColumnInfo;
- }
- return i::Smi::cast(*column)->value();
-}
-
-
-Local<String> StackFrame::GetScriptName() const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::StackFrame::GetScriptName()")) {
- return Local<String>();
- }
- ENTER_V8(isolate);
- HandleScope scope;
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- i::Handle<i::Object> name = GetProperty(self, "scriptName");
- if (!name->IsString()) {
- return Local<String>();
- }
- return scope.Close(Local<String>::Cast(Utils::ToLocal(name)));
-}
-
-#ifdef QT_BUILD_SCRIPT_LIB
-Local<Value> StackFrame::GetScriptId() const {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::StackFrame::GetScriptId()")) return Local<Value>();
- ENTER_V8(isolate);
- HandleScope scope;
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- i::Handle<i::Object> id = GetProperty(self, "scriptId");
- if (!id->IsNumber()) {
- return Local<Value>();
- }
- return scope.Close(Utils::ToLocal(id));
-}
-#endif
-
-Local<String> StackFrame::GetScriptNameOrSourceURL() const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::StackFrame::GetScriptNameOrSourceURL()")) {
- return Local<String>();
- }
- ENTER_V8(isolate);
- HandleScope scope;
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- i::Handle<i::Object> name = GetProperty(self, "scriptNameOrSourceURL");
- if (!name->IsString()) {
- return Local<String>();
- }
- return scope.Close(Local<String>::Cast(Utils::ToLocal(name)));
-}
-
-
-Local<String> StackFrame::GetFunctionName() const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::StackFrame::GetFunctionName()")) {
- return Local<String>();
- }
- ENTER_V8(isolate);
- HandleScope scope;
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- i::Handle<i::Object> name = GetProperty(self, "functionName");
- if (!name->IsString()) {
- return Local<String>();
- }
- return scope.Close(Local<String>::Cast(Utils::ToLocal(name)));
-}
-
-
-bool StackFrame::IsEval() const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::StackFrame::IsEval()")) return false;
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- i::Handle<i::Object> is_eval = GetProperty(self, "isEval");
- return is_eval->IsTrue();
-}
-
-
-bool StackFrame::IsConstructor() const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::StackFrame::IsConstructor()")) return false;
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- i::Handle<i::Object> is_constructor = GetProperty(self, "isConstructor");
- return is_constructor->IsTrue();
-}
-
-
-// --- D a t a ---
-
-bool Value::IsUndefined() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsUndefined()")) {
- return false;
- }
- return Utils::OpenHandle(this)->IsUndefined();
-}
-
-
-bool Value::IsNull() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsNull()")) return false;
- return Utils::OpenHandle(this)->IsNull();
-}
-
-
-bool Value::IsTrue() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsTrue()")) return false;
- return Utils::OpenHandle(this)->IsTrue();
-}
-
-
-bool Value::IsFalse() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsFalse()")) return false;
- return Utils::OpenHandle(this)->IsFalse();
-}
-
-
-bool Value::IsFunction() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsFunction()")) {
- return false;
- }
- return Utils::OpenHandle(this)->IsJSFunction();
-}
-
-
-bool Value::FullIsString() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsString()")) return false;
- bool result = Utils::OpenHandle(this)->IsString();
- ASSERT_EQ(result, QuickIsString());
- return result;
-}
-
-
-bool Value::IsArray() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsArray()")) return false;
- return Utils::OpenHandle(this)->IsJSArray();
-}
-
-
-bool Value::IsObject() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsObject()")) return false;
- return Utils::OpenHandle(this)->IsJSObject();
-}
-
-
-bool Value::IsNumber() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsNumber()")) return false;
- return Utils::OpenHandle(this)->IsNumber();
-}
-
-
-bool Value::IsBoolean() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsBoolean()")) {
- return false;
- }
- return Utils::OpenHandle(this)->IsBoolean();
-}
-
-
-bool Value::IsExternal() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsExternal()")) {
- return false;
- }
- return Utils::OpenHandle(this)->IsProxy();
-}
-
-
-bool Value::IsInt32() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsInt32()")) return false;
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- if (obj->IsSmi()) return true;
- if (obj->IsNumber()) {
- double value = obj->Number();
- return i::FastI2D(i::FastD2I(value)) == value;
- }
- return false;
-}
-
-
-bool Value::IsUint32() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsUint32()")) return false;
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- if (obj->IsSmi()) return i::Smi::cast(*obj)->value() >= 0;
- if (obj->IsNumber()) {
- double value = obj->Number();
- return i::FastUI2D(i::FastD2UI(value)) == value;
- }
- return false;
-}
-
-
-bool Value::IsDate() const {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::IsDate()")) return false;
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- return obj->HasSpecificClassOf(isolate->heap()->Date_symbol());
-}
-
-
-bool Value::IsRegExp() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsRegExp()")) return false;
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- return obj->IsJSRegExp();
-}
-
-bool Value::IsError() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsError()")) return false;
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- return obj->HasSpecificClassOf(HEAP->Error_symbol());
-}
-
-
-Local<String> Value::ToString() const {
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- i::Handle<i::Object> str;
- if (obj->IsString()) {
- str = obj;
- } else {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::ToString()")) {
- return Local<String>();
- }
- LOG_API(isolate, "ToString");
- ENTER_V8(isolate);
- EXCEPTION_PREAMBLE(isolate);
- str = i::Execution::ToString(obj, &has_pending_exception);
- EXCEPTION_BAILOUT_CHECK(isolate, Local<String>());
- }
- return Local<String>(ToApi<String>(str));
-}
-
-
-Local<String> Value::ToDetailString() const {
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- i::Handle<i::Object> str;
- if (obj->IsString()) {
- str = obj;
- } else {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::ToDetailString()")) {
- return Local<String>();
- }
- LOG_API(isolate, "ToDetailString");
- ENTER_V8(isolate);
- EXCEPTION_PREAMBLE(isolate);
- str = i::Execution::ToDetailString(obj, &has_pending_exception);
- EXCEPTION_BAILOUT_CHECK(isolate, Local<String>());
- }
- return Local<String>(ToApi<String>(str));
-}
-
-
-Local<v8::Object> Value::ToObject() const {
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- i::Handle<i::Object> val;
- if (obj->IsJSObject()) {
- val = obj;
- } else {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::ToObject()")) {
- return Local<v8::Object>();
- }
- LOG_API(isolate, "ToObject");
- ENTER_V8(isolate);
- EXCEPTION_PREAMBLE(isolate);
- val = i::Execution::ToObject(obj, &has_pending_exception);
- EXCEPTION_BAILOUT_CHECK(isolate, Local<v8::Object>());
- }
- return Local<v8::Object>(ToApi<Object>(val));
-}
-
-
-Local<Boolean> Value::ToBoolean() const {
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- if (obj->IsBoolean()) {
- return Local<Boolean>(ToApi<Boolean>(obj));
- } else {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::ToBoolean()")) {
- return Local<Boolean>();
- }
- LOG_API(isolate, "ToBoolean");
- ENTER_V8(isolate);
- i::Handle<i::Object> val = i::Execution::ToBoolean(obj);
- return Local<Boolean>(ToApi<Boolean>(val));
- }
-}
-
-
-Local<Number> Value::ToNumber() const {
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- i::Handle<i::Object> num;
- if (obj->IsNumber()) {
- num = obj;
- } else {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::ToNumber()")) {
- return Local<Number>();
- }
- LOG_API(isolate, "ToNumber");
- ENTER_V8(isolate);
- EXCEPTION_PREAMBLE(isolate);
- num = i::Execution::ToNumber(obj, &has_pending_exception);
- EXCEPTION_BAILOUT_CHECK(isolate, Local<Number>());
- }
- return Local<Number>(ToApi<Number>(num));
-}
-
-
-Local<Integer> Value::ToInteger() const {
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- i::Handle<i::Object> num;
- if (obj->IsSmi()) {
- num = obj;
- } else {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::ToInteger()")) return Local<Integer>();
- LOG_API(isolate, "ToInteger");
- ENTER_V8(isolate);
- EXCEPTION_PREAMBLE(isolate);
- num = i::Execution::ToInteger(obj, &has_pending_exception);
- EXCEPTION_BAILOUT_CHECK(isolate, Local<Integer>());
- }
- return Local<Integer>(ToApi<Integer>(num));
-}
-
-
-void External::CheckCast(v8::Value* that) {
- if (IsDeadCheck(i::Isolate::Current(), "v8::External::Cast()")) return;
- i::Handle<i::Object> obj = Utils::OpenHandle(that);
- ApiCheck(obj->IsProxy(),
- "v8::External::Cast()",
- "Could not convert to external");
-}
-
-
-void v8::Object::CheckCast(Value* that) {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Object::Cast()")) return;
- i::Handle<i::Object> obj = Utils::OpenHandle(that);
- ApiCheck(obj->IsJSObject(),
- "v8::Object::Cast()",
- "Could not convert to object");
-}
-
-
-void v8::Function::CheckCast(Value* that) {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Function::Cast()")) return;
- i::Handle<i::Object> obj = Utils::OpenHandle(that);
- ApiCheck(obj->IsJSFunction(),
- "v8::Function::Cast()",
- "Could not convert to function");
-}
-
-
-void v8::String::CheckCast(v8::Value* that) {
- if (IsDeadCheck(i::Isolate::Current(), "v8::String::Cast()")) return;
- i::Handle<i::Object> obj = Utils::OpenHandle(that);
- ApiCheck(obj->IsString(),
- "v8::String::Cast()",
- "Could not convert to string");
-}
-
-
-void v8::Number::CheckCast(v8::Value* that) {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Number::Cast()")) return;
- i::Handle<i::Object> obj = Utils::OpenHandle(that);
- ApiCheck(obj->IsNumber(),
- "v8::Number::Cast()",
- "Could not convert to number");
-}
-
-
-void v8::Integer::CheckCast(v8::Value* that) {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Integer::Cast()")) return;
- i::Handle<i::Object> obj = Utils::OpenHandle(that);
- ApiCheck(obj->IsNumber(),
- "v8::Integer::Cast()",
- "Could not convert to number");
-}
-
-
-void v8::Array::CheckCast(Value* that) {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Array::Cast()")) return;
- i::Handle<i::Object> obj = Utils::OpenHandle(that);
- ApiCheck(obj->IsJSArray(),
- "v8::Array::Cast()",
- "Could not convert to array");
-}
-
-
-void v8::Date::CheckCast(v8::Value* that) {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Date::Cast()")) return;
- i::Handle<i::Object> obj = Utils::OpenHandle(that);
- ApiCheck(obj->HasSpecificClassOf(isolate->heap()->Date_symbol()),
- "v8::Date::Cast()",
- "Could not convert to date");
-}
-
-
-void v8::RegExp::CheckCast(v8::Value* that) {
- if (IsDeadCheck(i::Isolate::Current(), "v8::RegExp::Cast()")) return;
- i::Handle<i::Object> obj = Utils::OpenHandle(that);
- ApiCheck(obj->IsJSRegExp(),
- "v8::RegExp::Cast()",
- "Could not convert to regular expression");
-}
-
-
-bool Value::BooleanValue() const {
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- if (obj->IsBoolean()) {
- return obj->IsTrue();
- } else {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::BooleanValue()")) return false;
- LOG_API(isolate, "BooleanValue");
- ENTER_V8(isolate);
- i::Handle<i::Object> value = i::Execution::ToBoolean(obj);
- return value->IsTrue();
- }
-}
-
-
-double Value::NumberValue() const {
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- i::Handle<i::Object> num;
- if (obj->IsNumber()) {
- num = obj;
- } else {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::NumberValue()")) {
- return i::OS::nan_value();
- }
- LOG_API(isolate, "NumberValue");
- ENTER_V8(isolate);
- EXCEPTION_PREAMBLE(isolate);
- num = i::Execution::ToNumber(obj, &has_pending_exception);
- EXCEPTION_BAILOUT_CHECK(isolate, i::OS::nan_value());
- }
- return num->Number();
-}
-
-
-int64_t Value::IntegerValue() const {
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- i::Handle<i::Object> num;
- if (obj->IsNumber()) {
- num = obj;
- } else {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::IntegerValue()")) return 0;
- LOG_API(isolate, "IntegerValue");
- ENTER_V8(isolate);
- EXCEPTION_PREAMBLE(isolate);
- num = i::Execution::ToInteger(obj, &has_pending_exception);
- EXCEPTION_BAILOUT_CHECK(isolate, 0);
- }
- if (num->IsSmi()) {
- return i::Smi::cast(*num)->value();
- } else {
- return static_cast<int64_t>(num->Number());
- }
-}
-
-
-Local<Int32> Value::ToInt32() const {
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- i::Handle<i::Object> num;
- if (obj->IsSmi()) {
- num = obj;
- } else {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::ToInt32()")) return Local<Int32>();
- LOG_API(isolate, "ToInt32");
- ENTER_V8(isolate);
- EXCEPTION_PREAMBLE(isolate);
- num = i::Execution::ToInt32(obj, &has_pending_exception);
- EXCEPTION_BAILOUT_CHECK(isolate, Local<Int32>());
- }
- return Local<Int32>(ToApi<Int32>(num));
-}
-
-
-Local<Uint32> Value::ToUint32() const {
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- i::Handle<i::Object> num;
- if (obj->IsSmi()) {
- num = obj;
- } else {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::ToUint32()")) return Local<Uint32>();
- LOG_API(isolate, "ToUInt32");
- ENTER_V8(isolate);
- EXCEPTION_PREAMBLE(isolate);
- num = i::Execution::ToUint32(obj, &has_pending_exception);
- EXCEPTION_BAILOUT_CHECK(isolate, Local<Uint32>());
- }
- return Local<Uint32>(ToApi<Uint32>(num));
-}
-
-
-Local<Uint32> Value::ToArrayIndex() const {
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- if (obj->IsSmi()) {
- if (i::Smi::cast(*obj)->value() >= 0) return Utils::Uint32ToLocal(obj);
- return Local<Uint32>();
- }
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::ToArrayIndex()")) return Local<Uint32>();
- LOG_API(isolate, "ToArrayIndex");
- ENTER_V8(isolate);
- EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> string_obj =
- i::Execution::ToString(obj, &has_pending_exception);
- EXCEPTION_BAILOUT_CHECK(isolate, Local<Uint32>());
- i::Handle<i::String> str = i::Handle<i::String>::cast(string_obj);
- uint32_t index;
- if (str->AsArrayIndex(&index)) {
- i::Handle<i::Object> value;
- if (index <= static_cast<uint32_t>(i::Smi::kMaxValue)) {
- value = i::Handle<i::Object>(i::Smi::FromInt(index));
- } else {
- value = isolate->factory()->NewNumber(index);
- }
- return Utils::Uint32ToLocal(value);
- }
- return Local<Uint32>();
-}
-
-
-int32_t Value::Int32Value() const {
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- if (obj->IsSmi()) {
- return i::Smi::cast(*obj)->value();
- } else {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::Int32Value()")) return 0;
- LOG_API(isolate, "Int32Value (slow)");
- ENTER_V8(isolate);
- EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> num =
- i::Execution::ToInt32(obj, &has_pending_exception);
- EXCEPTION_BAILOUT_CHECK(isolate, 0);
- if (num->IsSmi()) {
- return i::Smi::cast(*num)->value();
- } else {
- return static_cast<int32_t>(num->Number());
- }
- }
-}
-
-
-bool Value::Equals(Handle<Value> that) const {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::Equals()")
- || EmptyCheck("v8::Value::Equals()", this)
- || EmptyCheck("v8::Value::Equals()", that)) {
- return false;
- }
- LOG_API(isolate, "Equals");
- ENTER_V8(isolate);
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- i::Handle<i::Object> other = Utils::OpenHandle(*that);
- // If both obj and other are JSObjects, we'd better compare by identity
- // immediately when going into JS builtin. The reason is Invoke
- // would overwrite global object receiver with global proxy.
- if (obj->IsJSObject() && other->IsJSObject()) {
- return *obj == *other;
- }
- i::Object** args[1] = { other.location() };
- EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> result =
- CallV8HeapFunction("EQUALS", obj, 1, args, &has_pending_exception);
- EXCEPTION_BAILOUT_CHECK(isolate, false);
- return *result == i::Smi::FromInt(i::EQUAL);
-}
-
-
-bool Value::StrictEquals(Handle<Value> that) const {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::StrictEquals()")
- || EmptyCheck("v8::Value::StrictEquals()", this)
- || EmptyCheck("v8::Value::StrictEquals()", that)) {
- return false;
- }
- LOG_API(isolate, "StrictEquals");
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- i::Handle<i::Object> other = Utils::OpenHandle(*that);
- // Must check HeapNumber first, since NaN !== NaN.
- if (obj->IsHeapNumber()) {
- if (!other->IsNumber()) return false;
- double x = obj->Number();
- double y = other->Number();
- // Must check explicitly for NaN:s on Windows, but -0 works fine.
- return x == y && !isnan(x) && !isnan(y);
- } else if (*obj == *other) { // Also covers Booleans.
- return true;
- } else if (obj->IsSmi()) {
- return other->IsNumber() && obj->Number() == other->Number();
- } else if (obj->IsString()) {
- return other->IsString() &&
- i::String::cast(*obj)->Equals(i::String::cast(*other));
- } else if (obj->IsUndefined() || obj->IsUndetectableObject()) {
- return other->IsUndefined() || other->IsUndetectableObject();
- } else {
- return false;
- }
-}
-
-
-uint32_t Value::Uint32Value() const {
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- if (obj->IsSmi()) {
- return i::Smi::cast(*obj)->value();
- } else {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::Uint32Value()")) return 0;
- LOG_API(isolate, "Uint32Value");
- ENTER_V8(isolate);
- EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> num =
- i::Execution::ToUint32(obj, &has_pending_exception);
- EXCEPTION_BAILOUT_CHECK(isolate, 0);
- if (num->IsSmi()) {
- return i::Smi::cast(*num)->value();
- } else {
- return static_cast<uint32_t>(num->Number());
- }
- }
-}
-
-
-bool v8::Object::Set(v8::Handle<Value> key, v8::Handle<Value> value,
- v8::PropertyAttribute attribs) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::Set()", return false);
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- i::Handle<i::Object> self = Utils::OpenHandle(this);
- i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
- i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
- EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> obj = i::SetProperty(
- self,
- key_obj,
- value_obj,
- static_cast<PropertyAttributes>(attribs),
- i::kNonStrictMode);
- has_pending_exception = obj.is_null();
- EXCEPTION_BAILOUT_CHECK(isolate, false);
- return true;
-}
-
-
-bool v8::Object::Set(uint32_t index, v8::Handle<Value> value) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::Set()", return false);
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
- EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> obj = i::SetElement(
- self,
- index,
- value_obj,
- i::kNonStrictMode);
- has_pending_exception = obj.is_null();
- EXCEPTION_BAILOUT_CHECK(isolate, false);
- return true;
-}
-
-
-bool v8::Object::ForceSet(v8::Handle<Value> key,
- v8::Handle<Value> value,
- v8::PropertyAttribute attribs) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::ForceSet()", return false);
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
- i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
- EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> obj = i::ForceSetProperty(
- self,
- key_obj,
- value_obj,
- static_cast<PropertyAttributes>(attribs));
- has_pending_exception = obj.is_null();
- EXCEPTION_BAILOUT_CHECK(isolate, false);
- return true;
-}
-
-
-bool v8::Object::ForceDelete(v8::Handle<Value> key) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::ForceDelete()", return false);
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
-
- // When turning on access checks for a global object deoptimize all functions
- // as optimized code does not always handle access checks.
- i::Deoptimizer::DeoptimizeGlobalObject(*self);
-
- EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> obj = i::ForceDeleteProperty(self, key_obj);
- has_pending_exception = obj.is_null();
- EXCEPTION_BAILOUT_CHECK(isolate, false);
- return obj->IsTrue();
-}
-
-
-Local<Value> v8::Object::Get(v8::Handle<Value> key) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::Get()", return Local<v8::Value>());
- ENTER_V8(isolate);
- i::Handle<i::Object> self = Utils::OpenHandle(this);
- i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
- EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> result = i::GetProperty(self, key_obj);
- has_pending_exception = result.is_null();
- EXCEPTION_BAILOUT_CHECK(isolate, Local<Value>());
- return Utils::ToLocal(result);
-}
-
-
-Local<Value> v8::Object::Get(uint32_t index) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::Get()", return Local<v8::Value>());
- ENTER_V8(isolate);
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> result = i::GetElement(self, index);
- has_pending_exception = result.is_null();
- EXCEPTION_BAILOUT_CHECK(isolate, Local<Value>());
- return Utils::ToLocal(result);
-}
-
-
-Local<Value> v8::Object::GetPrototype() {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::GetPrototype()",
- return Local<v8::Value>());
- ENTER_V8(isolate);
- i::Handle<i::Object> self = Utils::OpenHandle(this);
- i::Handle<i::Object> result = i::GetPrototype(self);
- return Utils::ToLocal(result);
-}
-
-
-bool v8::Object::SetPrototype(Handle<Value> value) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::SetPrototype()", return false);
- ENTER_V8(isolate);
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
- EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> result = i::SetPrototype(self, value_obj);
- has_pending_exception = result.is_null();
- EXCEPTION_BAILOUT_CHECK(isolate, false);
- return true;
-}
-
-
-Local<Object> v8::Object::FindInstanceInPrototypeChain(
- v8::Handle<FunctionTemplate> tmpl) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate,
- "v8::Object::FindInstanceInPrototypeChain()",
- return Local<v8::Object>());
- ENTER_V8(isolate);
- i::JSObject* object = *Utils::OpenHandle(this);
- i::FunctionTemplateInfo* tmpl_info = *Utils::OpenHandle(*tmpl);
- while (!object->IsInstanceOf(tmpl_info)) {
- i::Object* prototype = object->GetPrototype();
- if (!prototype->IsJSObject()) return Local<Object>();
- object = i::JSObject::cast(prototype);
- }
- return Utils::ToLocal(i::Handle<i::JSObject>(object));
-}
-
-
-Local<Array> v8::Object::GetPropertyNames() {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::GetPropertyNames()",
- return Local<v8::Array>());
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- i::Handle<i::FixedArray> value =
- i::GetKeysInFixedArrayFor(self, i::INCLUDE_PROTOS);
- // Because we use caching to speed up enumeration it is important
- // to never change the result of the basic enumeration function so
- // we clone the result.
- i::Handle<i::FixedArray> elms = isolate->factory()->CopyFixedArray(value);
- i::Handle<i::JSArray> result =
- isolate->factory()->NewJSArrayWithElements(elms);
- return Utils::ToLocal(scope.CloseAndEscape(result));
-}
-
-
-Local<String> v8::Object::ObjectProtoToString() {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::ObjectProtoToString()",
- return Local<v8::String>());
- ENTER_V8(isolate);
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
-
- i::Handle<i::Object> name(self->class_name());
-
- // Native implementation of Object.prototype.toString (v8natives.js):
- // var c = %ClassOf(this);
- // if (c === 'Arguments') c = 'Object';
- // return "[object " + c + "]";
-
- if (!name->IsString()) {
- return v8::String::New("[object ]");
-
- } else {
- i::Handle<i::String> class_name = i::Handle<i::String>::cast(name);
- if (class_name->IsEqualTo(i::CStrVector("Arguments"))) {
- return v8::String::New("[object Object]");
-
- } else {
- const char* prefix = "[object ";
- Local<String> str = Utils::ToLocal(class_name);
- const char* postfix = "]";
-
- int prefix_len = i::StrLength(prefix);
- int str_len = str->Length();
- int postfix_len = i::StrLength(postfix);
-
- int buf_len = prefix_len + str_len + postfix_len;
- i::ScopedVector<char> buf(buf_len);
-
- // Write prefix.
- char* ptr = buf.start();
- memcpy(ptr, prefix, prefix_len * v8::internal::kCharSize);
- ptr += prefix_len;
-
- // Write real content.
- str->WriteAscii(ptr, 0, str_len);
- ptr += str_len;
-
- // Write postfix.
- memcpy(ptr, postfix, postfix_len * v8::internal::kCharSize);
-
- // Copy the buffer into a heap-allocated string and return it.
- Local<String> result = v8::String::New(buf.start(), buf_len);
- return result;
- }
- }
-}
-
-
-Local<String> v8::Object::GetConstructorName() {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::GetConstructorName()",
- return Local<v8::String>());
- ENTER_V8(isolate);
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- i::Handle<i::String> name(self->constructor_name());
- return Utils::ToLocal(name);
-}
-
-
-bool v8::Object::Delete(v8::Handle<String> key) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::Delete()", return false);
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
- return i::DeleteProperty(self, key_obj)->IsTrue();
-}
-
-
-bool v8::Object::Has(v8::Handle<String> key) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::Has()", return false);
- ENTER_V8(isolate);
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
- return self->HasProperty(*key_obj);
-}
-
-
-bool v8::Object::Delete(uint32_t index) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::DeleteProperty()",
- return false);
- ENTER_V8(isolate);
- HandleScope scope;
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- return i::DeleteElement(self, index)->IsTrue();
-}
-
-
-bool v8::Object::Has(uint32_t index) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::HasProperty()", return false);
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- return self->HasElement(index);
-}
-
-
-bool Object::SetAccessor(Handle<String> name,
- AccessorGetter getter,
- AccessorSetter setter,
- v8::Handle<Value> data,
- AccessControl settings,
- PropertyAttribute attributes) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::SetAccessor()", return false);
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- i::Handle<i::AccessorInfo> info = MakeAccessorInfo(name,
- getter, setter, data,
- settings, attributes);
- i::Handle<i::Object> result = i::SetAccessor(Utils::OpenHandle(this), info);
- return !result.is_null() && !result->IsUndefined();
-}
-
-
-bool v8::Object::HasRealNamedProperty(Handle<String> key) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::HasRealNamedProperty()",
- return false);
- return Utils::OpenHandle(this)->HasRealNamedProperty(
- *Utils::OpenHandle(*key));
-}
-
-
-bool v8::Object::HasRealIndexedProperty(uint32_t index) {
- ON_BAILOUT(Utils::OpenHandle(this)->GetIsolate(),
- "v8::Object::HasRealIndexedProperty()",
- return false);
- return Utils::OpenHandle(this)->HasRealElementProperty(index);
-}
-
-
-bool v8::Object::HasRealNamedCallbackProperty(Handle<String> key) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate,
- "v8::Object::HasRealNamedCallbackProperty()",
- return false);
- ENTER_V8(isolate);
- return Utils::OpenHandle(this)->HasRealNamedCallbackProperty(
- *Utils::OpenHandle(*key));
-}
-
-
-bool v8::Object::HasNamedLookupInterceptor() {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::HasNamedLookupInterceptor()",
- return false);
- return Utils::OpenHandle(this)->HasNamedInterceptor();
-}
-
-
-bool v8::Object::HasIndexedLookupInterceptor() {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::HasIndexedLookupInterceptor()",
- return false);
- return Utils::OpenHandle(this)->HasIndexedInterceptor();
-}
-
-
-Local<Value> v8::Object::GetRealNamedPropertyInPrototypeChain(
- Handle<String> key) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate,
- "v8::Object::GetRealNamedPropertyInPrototypeChain()",
- return Local<Value>());
- ENTER_V8(isolate);
- i::Handle<i::JSObject> self_obj = Utils::OpenHandle(this);
- i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
- i::LookupResult lookup;
- self_obj->LookupRealNamedPropertyInPrototypes(*key_obj, &lookup);
- if (lookup.IsProperty()) {
- PropertyAttributes attributes;
- i::Object* property =
- self_obj->GetProperty(*self_obj,
- &lookup,
- *key_obj,
- &attributes)->ToObjectUnchecked();
- i::Handle<i::Object> result(property);
- return Utils::ToLocal(result);
- }
- return Local<Value>(); // No real property was found in prototype chain.
-}
-
-
-Local<Value> v8::Object::GetRealNamedProperty(Handle<String> key) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::GetRealNamedProperty()",
- return Local<Value>());
- ENTER_V8(isolate);
- i::Handle<i::JSObject> self_obj = Utils::OpenHandle(this);
- i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
- i::LookupResult lookup;
- self_obj->LookupRealNamedProperty(*key_obj, &lookup);
- if (lookup.IsProperty()) {
- PropertyAttributes attributes;
- i::Object* property =
- self_obj->GetProperty(*self_obj,
- &lookup,
- *key_obj,
- &attributes)->ToObjectUnchecked();
- i::Handle<i::Object> result(property);
- return Utils::ToLocal(result);
- }
- return Local<Value>(); // No real property was found in prototype chain.
-}
-
-
-// Turns on access checks by copying the map and setting the check flag.
-// Because the object gets a new map, existing inline cache caching
-// the old map of this object will fail.
-void v8::Object::TurnOnAccessCheck() {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::TurnOnAccessCheck()", return);
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
-
- // When turning on access checks for a global object deoptimize all functions
- // as optimized code does not always handle access checks.
- i::Deoptimizer::DeoptimizeGlobalObject(*obj);
-
- i::Handle<i::Map> new_map =
- isolate->factory()->CopyMapDropTransitions(i::Handle<i::Map>(obj->map()));
- new_map->set_is_access_check_needed(true);
- obj->set_map(*new_map);
-}
-
-
-bool v8::Object::IsDirty() {
- return Utils::OpenHandle(this)->IsDirty();
-}
-
-
-Local<v8::Object> v8::Object::Clone() {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::Clone()", return Local<Object>());
- ENTER_V8(isolate);
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::JSObject> result = i::Copy(self);
- has_pending_exception = result.is_null();
- EXCEPTION_BAILOUT_CHECK(isolate, Local<Object>());
- return Utils::ToLocal(result);
-}
-
-
-static i::Context* GetCreationContext(i::JSObject* object) {
- i::Object* constructor = object->map()->constructor();
- i::JSFunction* function;
- if (!constructor->IsJSFunction()) {
- // API functions have null as a constructor,
- // but any JSFunction knows its context immediately.
- ASSERT(object->IsJSFunction() &&
- i::JSFunction::cast(object)->shared()->IsApiFunction());
- function = i::JSFunction::cast(object);
- } else {
- function = i::JSFunction::cast(constructor);
- }
- return function->context()->global_context();
-}
-
-
-Local<v8::Context> v8::Object::CreationContext() {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate,
- "v8::Object::CreationContext()", return Local<v8::Context>());
- ENTER_V8(isolate);
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- i::Context* context = GetCreationContext(*self);
- return Utils::ToLocal(i::Handle<i::Context>(context));
-}
-
-
-int v8::Object::GetIdentityHash() {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::GetIdentityHash()", return 0);
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- i::Handle<i::Object> hidden_props_obj(i::GetHiddenProperties(self, true));
- if (!hidden_props_obj->IsJSObject()) {
- // We failed to create hidden properties. That's a detached
- // global proxy.
- ASSERT(hidden_props_obj->IsUndefined());
- return 0;
- }
- i::Handle<i::JSObject> hidden_props =
- i::Handle<i::JSObject>::cast(hidden_props_obj);
- i::Handle<i::String> hash_symbol = isolate->factory()->identity_hash_symbol();
- if (hidden_props->HasLocalProperty(*hash_symbol)) {
- i::Handle<i::Object> hash = i::GetProperty(hidden_props, hash_symbol);
- CHECK(!hash.is_null());
- CHECK(hash->IsSmi());
- return i::Smi::cast(*hash)->value();
- }
-
- int hash_value;
- int attempts = 0;
- do {
- // Generate a random 32-bit hash value but limit range to fit
- // within a smi.
- hash_value = i::V8::Random(self->GetIsolate()) & i::Smi::kMaxValue;
- attempts++;
- } while (hash_value == 0 && attempts < 30);
- hash_value = hash_value != 0 ? hash_value : 1; // never return 0
- CHECK(!i::SetLocalPropertyIgnoreAttributes(
- hidden_props,
- hash_symbol,
- i::Handle<i::Object>(i::Smi::FromInt(hash_value)),
- static_cast<PropertyAttributes>(None)).is_null());
-
- return hash_value;
-}
-
-
-bool v8::Object::SetHiddenValue(v8::Handle<v8::String> key,
- v8::Handle<v8::Value> value) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::SetHiddenValue()", return false);
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- i::Handle<i::Object> hidden_props(i::GetHiddenProperties(self, true));
- i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
- i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
- EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> obj = i::SetProperty(
- hidden_props,
- key_obj,
- value_obj,
- static_cast<PropertyAttributes>(None),
- i::kNonStrictMode);
- has_pending_exception = obj.is_null();
- EXCEPTION_BAILOUT_CHECK(isolate, false);
- return true;
-}
-
-
-v8::Local<v8::Value> v8::Object::GetHiddenValue(v8::Handle<v8::String> key) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::GetHiddenValue()",
- return Local<v8::Value>());
- ENTER_V8(isolate);
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- i::Handle<i::Object> hidden_props(i::GetHiddenProperties(self, false));
- if (hidden_props->IsUndefined()) {
- return v8::Local<v8::Value>();
- }
- i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
- EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> result = i::GetProperty(hidden_props, key_obj);
- has_pending_exception = result.is_null();
- EXCEPTION_BAILOUT_CHECK(isolate, v8::Local<v8::Value>());
- if (result->IsUndefined()) {
- return v8::Local<v8::Value>();
- }
- return Utils::ToLocal(result);
-}
-
-
-bool v8::Object::DeleteHiddenValue(v8::Handle<v8::String> key) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::DeleteHiddenValue()", return false);
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- i::Handle<i::Object> hidden_props(i::GetHiddenProperties(self, false));
- if (hidden_props->IsUndefined()) {
- return true;
- }
- i::Handle<i::JSObject> js_obj(i::JSObject::cast(*hidden_props));
- i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
- return i::DeleteProperty(js_obj, key_obj)->IsTrue();
-}
-
-
-namespace {
-
-void PrepareExternalArrayElements(i::Handle<i::JSObject> object,
- void* data,
- ExternalArrayType array_type,
- int length) {
- i::Isolate* isolate = object->GetIsolate();
- i::Handle<i::ExternalArray> array =
- isolate->factory()->NewExternalArray(length, array_type, data);
-
- // If the object already has external elements, create a new, unique
- // map if the element type is now changing, because assumptions about
- // generated code based on the receiver's map will be invalid.
- i::Handle<i::HeapObject> elements(object->elements());
- bool cant_reuse_map =
- elements->map()->IsUndefined() ||
- !elements->map()->has_external_array_elements() ||
- elements->map() != isolate->heap()->MapForExternalArrayType(array_type);
- if (cant_reuse_map) {
- i::Handle<i::Map> external_array_map =
- isolate->factory()->GetExternalArrayElementsMap(
- i::Handle<i::Map>(object->map()),
- array_type,
- object->HasFastProperties());
- object->set_map(*external_array_map);
- }
- object->set_elements(*array);
-}
-
-} // namespace
-
-
-void v8::Object::SetIndexedPropertiesToPixelData(uint8_t* data, int length) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::SetElementsToPixelData()", return);
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- if (!ApiCheck(length <= i::ExternalPixelArray::kMaxLength,
- "v8::Object::SetIndexedPropertiesToPixelData()",
- "length exceeds max acceptable value")) {
- return;
- }
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- if (!ApiCheck(!self->IsJSArray(),
- "v8::Object::SetIndexedPropertiesToPixelData()",
- "JSArray is not supported")) {
- return;
- }
- PrepareExternalArrayElements(self, data, kExternalPixelArray, length);
-}
-
-
-bool v8::Object::HasIndexedPropertiesInPixelData() {
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- ON_BAILOUT(self->GetIsolate(), "v8::HasIndexedPropertiesInPixelData()",
- return false);
- return self->HasExternalPixelElements();
-}
-
-
-uint8_t* v8::Object::GetIndexedPropertiesPixelData() {
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- ON_BAILOUT(self->GetIsolate(), "v8::GetIndexedPropertiesPixelData()",
- return NULL);
- if (self->HasExternalPixelElements()) {
- return i::ExternalPixelArray::cast(self->elements())->
- external_pixel_pointer();
- } else {
- return NULL;
- }
-}
-
-
-int v8::Object::GetIndexedPropertiesPixelDataLength() {
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- ON_BAILOUT(self->GetIsolate(), "v8::GetIndexedPropertiesPixelDataLength()",
- return -1);
- if (self->HasExternalPixelElements()) {
- return i::ExternalPixelArray::cast(self->elements())->length();
- } else {
- return -1;
- }
-}
-
-void v8::Object::SetIndexedPropertiesToExternalArrayData(
- void* data,
- ExternalArrayType array_type,
- int length) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::SetIndexedPropertiesToExternalArrayData()", return);
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- if (!ApiCheck(length <= i::ExternalArray::kMaxLength,
- "v8::Object::SetIndexedPropertiesToExternalArrayData()",
- "length exceeds max acceptable value")) {
- return;
- }
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- if (!ApiCheck(!self->IsJSArray(),
- "v8::Object::SetIndexedPropertiesToExternalArrayData()",
- "JSArray is not supported")) {
- return;
- }
- PrepareExternalArrayElements(self, data, array_type, length);
-}
-
-
-bool v8::Object::HasIndexedPropertiesInExternalArrayData() {
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- ON_BAILOUT(self->GetIsolate(),
- "v8::HasIndexedPropertiesInExternalArrayData()",
- return false);
- return self->HasExternalArrayElements();
-}
-
-
-void* v8::Object::GetIndexedPropertiesExternalArrayData() {
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- ON_BAILOUT(self->GetIsolate(),
- "v8::GetIndexedPropertiesExternalArrayData()",
- return NULL);
- if (self->HasExternalArrayElements()) {
- return i::ExternalArray::cast(self->elements())->external_pointer();
- } else {
- return NULL;
- }
-}
-
-
-ExternalArrayType v8::Object::GetIndexedPropertiesExternalArrayDataType() {
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- ON_BAILOUT(self->GetIsolate(),
- "v8::GetIndexedPropertiesExternalArrayDataType()",
- return static_cast<ExternalArrayType>(-1));
- switch (self->elements()->map()->instance_type()) {
- case i::EXTERNAL_BYTE_ARRAY_TYPE:
- return kExternalByteArray;
- case i::EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
- return kExternalUnsignedByteArray;
- case i::EXTERNAL_SHORT_ARRAY_TYPE:
- return kExternalShortArray;
- case i::EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE:
- return kExternalUnsignedShortArray;
- case i::EXTERNAL_INT_ARRAY_TYPE:
- return kExternalIntArray;
- case i::EXTERNAL_UNSIGNED_INT_ARRAY_TYPE:
- return kExternalUnsignedIntArray;
- case i::EXTERNAL_FLOAT_ARRAY_TYPE:
- return kExternalFloatArray;
- case i::EXTERNAL_PIXEL_ARRAY_TYPE:
- return kExternalPixelArray;
- default:
- return static_cast<ExternalArrayType>(-1);
- }
-}
-
-
-int v8::Object::GetIndexedPropertiesExternalArrayDataLength() {
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- ON_BAILOUT(self->GetIsolate(),
- "v8::GetIndexedPropertiesExternalArrayDataLength()",
- return 0);
- if (self->HasExternalArrayElements()) {
- return i::ExternalArray::cast(self->elements())->length();
- } else {
- return -1;
- }
-}
-
-#ifdef QT_BUILD_SCRIPT_LIB
-bool v8::Object::IsCallable() {
- i::Isolate* isolate = i::Isolate::Current();
- ON_BAILOUT(isolate, "v8::Object::IsCallable()", return false);
- ENTER_V8(isolate);
- i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
- if (obj->IsJSFunction())
- return true;
- HandleScope scope;
- return i::Execution::GetFunctionDelegate(obj)->IsJSFunction();
-}
-
-Local<v8::Value> v8::Object::Call(v8::Handle<v8::Object> recv, int argc,
- v8::Handle<v8::Value> argv[]) {
- i::Isolate* isolate = i::Isolate::Current();
- ON_BAILOUT(isolate, "v8::Object::Call()", return Local<v8::Value>());
- LOG_API(isolate, "Object::Call");
- ENTER_V8(isolate);
- i::Object* raw_result = NULL;
- {
- HandleScope scope;
- i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
- i::Handle<i::Object> recv_obj = Utils::OpenHandle(*recv);
- STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**));
- i::Object*** args = reinterpret_cast<i::Object***>(argv);
- i::Handle<i::JSFunction> fun;
- if (obj->IsJSFunction()) {
- fun = i::Handle<i::JSFunction>::cast(obj);
- } else {
- fun = i::Handle<i::JSFunction>::cast(i::Execution::GetFunctionDelegate(obj));
- recv_obj = obj;
- }
- EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> returned =
- i::Execution::Call(fun, recv_obj, argc, args, &has_pending_exception);
- EXCEPTION_BAILOUT_CHECK(isolate, Local<Object>());
- raw_result = *returned;
- }
- i::Handle<i::Object> result(raw_result);
- return Utils::ToLocal(result);
-}
-
-Local<v8::Object> Object::NewInstance(int argc,
- v8::Handle<v8::Value> argv[]) const {
- i::Isolate* isolate = i::Isolate::Current();
- ON_BAILOUT(isolate, "v8::Object::NewInstance()", return Local<v8::Object>());
- LOG_API(isolate, "Object::NewInstance");
- ENTER_V8(isolate);
- HandleScope scope;
- i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
- STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**));
- i::Object*** args = reinterpret_cast<i::Object***>(argv);
- EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> returned;
- if (obj->IsJSFunction()) {
- i::Handle<i::JSFunction> function = i::Handle<i::JSFunction>::cast(obj);
- returned = i::Execution::New(function, argc, args, &has_pending_exception);
- } else {
- i::Handle<i::JSFunction> delegate =
- i::Handle<i::JSFunction>::cast(i::Execution::GetConstructorDelegate(obj));
- returned = i::Execution::Call(delegate, obj, argc, args, &has_pending_exception);
- }
- EXCEPTION_BAILOUT_CHECK(isolate, Local<v8::Object>());
- return scope.Close(Utils::ToLocal(i::Handle<i::JSObject>::cast(returned)));
-}
-#endif
-
-
-Local<v8::Object> Function::NewInstance() const {
- return NewInstance(0, NULL);
-}
-
-
-Local<v8::Object> Function::NewInstance(int argc,
- v8::Handle<v8::Value> argv[]) const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Function::NewInstance()",
- return Local<v8::Object>());
- LOG_API(isolate, "Function::NewInstance");
- ENTER_V8(isolate);
- HandleScope scope;
- i::Handle<i::JSFunction> function = Utils::OpenHandle(this);
- STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**));
- i::Object*** args = reinterpret_cast<i::Object***>(argv);
- EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> returned =
- i::Execution::New(function, argc, args, &has_pending_exception);
- EXCEPTION_BAILOUT_CHECK(isolate, Local<v8::Object>());
- return scope.Close(Utils::ToLocal(i::Handle<i::JSObject>::cast(returned)));
-}
-
-
-Local<v8::Value> Function::Call(v8::Handle<v8::Object> recv, int argc,
- v8::Handle<v8::Value> argv[]) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Function::Call()", return Local<v8::Value>());
- LOG_API(isolate, "Function::Call");
- ENTER_V8(isolate);
- i::Object* raw_result = NULL;
- {
- i::HandleScope scope(isolate);
- i::Handle<i::JSFunction> fun = Utils::OpenHandle(this);
- i::Handle<i::Object> recv_obj = Utils::OpenHandle(*recv);
- STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**));
- i::Object*** args = reinterpret_cast<i::Object***>(argv);
- EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> returned =
- i::Execution::Call(fun, recv_obj, argc, args, &has_pending_exception);
- EXCEPTION_BAILOUT_CHECK(isolate, Local<Object>());
- raw_result = *returned;
- }
- i::Handle<i::Object> result(raw_result);
- return Utils::ToLocal(result);
-}
-
-
-void Function::SetName(v8::Handle<v8::String> name) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ENTER_V8(isolate);
- i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
- func->shared()->set_name(*Utils::OpenHandle(*name));
-}
-
-
-Handle<Value> Function::GetName() const {
- i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
- return Utils::ToLocal(i::Handle<i::Object>(func->shared()->name()));
-}
-
-
-ScriptOrigin Function::GetScriptOrigin() const {
- i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
- if (func->shared()->script()->IsScript()) {
- i::Handle<i::Script> script(i::Script::cast(func->shared()->script()));
- v8::ScriptOrigin origin(
- Utils::ToLocal(i::Handle<i::Object>(script->name())),
- v8::Integer::New(script->line_offset()->value()),
- v8::Integer::New(script->column_offset()->value()));
- return origin;
- }
- return v8::ScriptOrigin(Handle<Value>());
-}
-
-
-const int Function::kLineOffsetNotFound = -1;
-
-
-int Function::GetScriptLineNumber() const {
- i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
- if (func->shared()->script()->IsScript()) {
- i::Handle<i::Script> script(i::Script::cast(func->shared()->script()));
- return i::GetScriptLineNumber(script, func->shared()->start_position());
- }
- return kLineOffsetNotFound;
-}
-
-
-int String::Length() const {
- i::Handle<i::String> str = Utils::OpenHandle(this);
- if (IsDeadCheck(str->GetIsolate(), "v8::String::Length()")) return 0;
- return str->length();
-}
-
-
-int String::Utf8Length() const {
- i::Handle<i::String> str = Utils::OpenHandle(this);
- if (IsDeadCheck(str->GetIsolate(), "v8::String::Utf8Length()")) return 0;
- return str->Utf8Length();
-}
-
-
-uint String::Hash() const
-{
- return Utils::OpenHandle(this)->Hash();
-}
-
-bool String::Equals(Handle<String> other) const
-{
- return Utils::OpenHandle(this)->Equals(*Utils::OpenHandle(*other));
-}
-
-int String::WriteUtf8(char* buffer,
- int capacity,
- int* nchars_ref,
- WriteHints hints) const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::String::WriteUtf8()")) return 0;
- LOG_API(isolate, "String::WriteUtf8");
- ENTER_V8(isolate);
- i::StringInputBuffer& write_input_buffer = *isolate->write_input_buffer();
- i::Handle<i::String> str = Utils::OpenHandle(this);
- isolate->string_tracker()->RecordWrite(str);
- if (hints & HINT_MANY_WRITES_EXPECTED) {
- // Flatten the string for efficiency. This applies whether we are
- // using StringInputBuffer or Get(i) to access the characters.
- str->TryFlatten();
- }
- write_input_buffer.Reset(0, *str);
- int len = str->length();
- // Encode the first K - 3 bytes directly into the buffer since we
- // know there's room for them. If no capacity is given we copy all
- // of them here.
- int fast_end = capacity - (unibrow::Utf8::kMaxEncodedSize - 1);
- int i;
- int pos = 0;
- int nchars = 0;
- for (i = 0; i < len && (capacity == -1 || pos < fast_end); i++) {
- i::uc32 c = write_input_buffer.GetNext();
- int written = unibrow::Utf8::Encode(buffer + pos, c);
- pos += written;
- nchars++;
- }
- if (i < len) {
- // For the last characters we need to check the length for each one
- // because they may be longer than the remaining space in the
- // buffer.
- char intermediate[unibrow::Utf8::kMaxEncodedSize];
- for (; i < len && pos < capacity; i++) {
- i::uc32 c = write_input_buffer.GetNext();
- int written = unibrow::Utf8::Encode(intermediate, c);
- if (pos + written <= capacity) {
- for (int j = 0; j < written; j++)
- buffer[pos + j] = intermediate[j];
- pos += written;
- nchars++;
- } else {
- // We've reached the end of the buffer
- break;
- }
- }
- }
- if (nchars_ref != NULL) *nchars_ref = nchars;
- if (i == len && (capacity == -1 || pos < capacity))
- buffer[pos++] = '\0';
- return pos;
-}
-
-
-int String::WriteAscii(char* buffer,
- int start,
- int length,
- WriteHints hints) const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::String::WriteAscii()")) return 0;
- LOG_API(isolate, "String::WriteAscii");
- ENTER_V8(isolate);
- i::StringInputBuffer& write_input_buffer = *isolate->write_input_buffer();
- ASSERT(start >= 0 && length >= -1);
- i::Handle<i::String> str = Utils::OpenHandle(this);
- isolate->string_tracker()->RecordWrite(str);
- if (hints & HINT_MANY_WRITES_EXPECTED) {
- // Flatten the string for efficiency. This applies whether we are
- // using StringInputBuffer or Get(i) to access the characters.
- str->TryFlatten();
- }
- int end = length;
- if ( (length == -1) || (length > str->length() - start) )
- end = str->length() - start;
- if (end < 0) return 0;
- write_input_buffer.Reset(start, *str);
- int i;
- for (i = 0; i < end; i++) {
- char c = static_cast<char>(write_input_buffer.GetNext());
- if (c == '\0') c = ' ';
- buffer[i] = c;
- }
- if (length == -1 || i < length)
- buffer[i] = '\0';
- return i;
-}
-
-
-int String::Write(uint16_t* buffer,
- int start,
- int length,
- WriteHints hints) const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::String::Write()")) return 0;
- LOG_API(isolate, "String::Write");
- ENTER_V8(isolate);
- ASSERT(start >= 0 && length >= -1);
- i::Handle<i::String> str = Utils::OpenHandle(this);
- isolate->string_tracker()->RecordWrite(str);
- if (hints & HINT_MANY_WRITES_EXPECTED) {
- // Flatten the string for efficiency. This applies whether we are
- // using StringInputBuffer or Get(i) to access the characters.
- str->TryFlatten();
- }
- int end = start + length;
- if ((length == -1) || (length > str->length() - start) )
- end = str->length();
- if (end < 0) return 0;
- i::String::WriteToFlat(*str, buffer, start, end);
- if (length == -1 || end - start < length) {
- buffer[end - start] = '\0';
- }
- return end - start;
-}
-
-
-bool v8::String::IsExternal() const {
- i::Handle<i::String> str = Utils::OpenHandle(this);
- if (IsDeadCheck(str->GetIsolate(), "v8::String::IsExternal()")) {
- return false;
- }
- EnsureInitializedForIsolate(str->GetIsolate(), "v8::String::IsExternal()");
- return i::StringShape(*str).IsExternalTwoByte();
-}
-
-
-bool v8::String::IsExternalAscii() const {
- i::Handle<i::String> str = Utils::OpenHandle(this);
- if (IsDeadCheck(str->GetIsolate(), "v8::String::IsExternalAscii()")) {
- return false;
- }
- return i::StringShape(*str).IsExternalAscii();
-}
-
-
-void v8::String::VerifyExternalStringResource(
- v8::String::ExternalStringResource* value) const {
- i::Handle<i::String> str = Utils::OpenHandle(this);
- v8::String::ExternalStringResource* expected;
- if (i::StringShape(*str).IsExternalTwoByte()) {
- void* resource = i::Handle<i::ExternalTwoByteString>::cast(str)->resource();
- expected = reinterpret_cast<ExternalStringResource*>(resource);
- } else {
- expected = NULL;
- }
- CHECK_EQ(expected, value);
-}
-
-
-v8::String::ExternalAsciiStringResource*
- v8::String::GetExternalAsciiStringResource() const {
- i::Handle<i::String> str = Utils::OpenHandle(this);
- if (IsDeadCheck(str->GetIsolate(),
- "v8::String::GetExternalAsciiStringResource()")) {
- return NULL;
- }
- if (i::StringShape(*str).IsExternalAscii()) {
- void* resource = i::Handle<i::ExternalAsciiString>::cast(str)->resource();
- return reinterpret_cast<ExternalAsciiStringResource*>(resource);
- } else {
- return NULL;
- }
-}
-
-
-double Number::Value() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Number::Value()")) return 0;
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- return obj->Number();
-}
-
-
-bool Boolean::Value() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Boolean::Value()")) return false;
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- return obj->IsTrue();
-}
-
-
-int64_t Integer::Value() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Integer::Value()")) return 0;
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- if (obj->IsSmi()) {
- return i::Smi::cast(*obj)->value();
- } else {
- return static_cast<int64_t>(obj->Number());
- }
-}
-
-
-int32_t Int32::Value() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Int32::Value()")) return 0;
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- if (obj->IsSmi()) {
- return i::Smi::cast(*obj)->value();
- } else {
- return static_cast<int32_t>(obj->Number());
- }
-}
-
-
-uint32_t Uint32::Value() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Uint32::Value()")) return 0;
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- if (obj->IsSmi()) {
- return i::Smi::cast(*obj)->value();
- } else {
- return static_cast<uint32_t>(obj->Number());
- }
-}
-
-
-int v8::Object::InternalFieldCount() {
- i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
- if (IsDeadCheck(obj->GetIsolate(), "v8::Object::InternalFieldCount()")) {
- return 0;
- }
- return obj->GetInternalFieldCount();
-}
-
-
-Local<Value> v8::Object::CheckedGetInternalField(int index) {
- i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
- if (IsDeadCheck(obj->GetIsolate(), "v8::Object::GetInternalField()")) {
- return Local<Value>();
- }
- if (!ApiCheck(index < obj->GetInternalFieldCount(),
- "v8::Object::GetInternalField()",
- "Reading internal field out of bounds")) {
- return Local<Value>();
- }
- i::Handle<i::Object> value(obj->GetInternalField(index));
- Local<Value> result = Utils::ToLocal(value);
-#ifdef DEBUG
- Local<Value> unchecked = UncheckedGetInternalField(index);
- ASSERT(unchecked.IsEmpty() || (unchecked == result));
-#endif
- return result;
-}
-
-
-void v8::Object::SetInternalField(int index, v8::Handle<Value> value) {
- i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
- i::Isolate* isolate = obj->GetIsolate();
- if (IsDeadCheck(isolate, "v8::Object::SetInternalField()")) {
- return;
- }
- if (!ApiCheck(index < obj->GetInternalFieldCount(),
- "v8::Object::SetInternalField()",
- "Writing internal field out of bounds")) {
- return;
- }
- ENTER_V8(isolate);
- i::Handle<i::Object> val = Utils::OpenHandle(*value);
- obj->SetInternalField(index, *val);
-}
-
-
-static bool CanBeEncodedAsSmi(void* ptr) {
- const uintptr_t address = reinterpret_cast<uintptr_t>(ptr);
- return ((address & i::kEncodablePointerMask) == 0);
-}
-
-
-static i::Smi* EncodeAsSmi(void* ptr) {
- ASSERT(CanBeEncodedAsSmi(ptr));
- const uintptr_t address = reinterpret_cast<uintptr_t>(ptr);
- i::Smi* result = reinterpret_cast<i::Smi*>(address << i::kPointerToSmiShift);
- ASSERT(i::Internals::HasSmiTag(result));
- ASSERT_EQ(result, i::Smi::FromInt(result->value()));
- ASSERT_EQ(ptr, i::Internals::GetExternalPointerFromSmi(result));
- return result;
-}
-
-
-void v8::Object::SetPointerInInternalField(int index, void* value) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ENTER_V8(isolate);
- if (CanBeEncodedAsSmi(value)) {
- Utils::OpenHandle(this)->SetInternalField(index, EncodeAsSmi(value));
- } else {
- HandleScope scope;
- i::Handle<i::Proxy> proxy =
- isolate->factory()->NewProxy(
- reinterpret_cast<i::Address>(value), i::TENURED);
- if (!proxy.is_null())
- Utils::OpenHandle(this)->SetInternalField(index, *proxy);
- }
- ASSERT_EQ(value, GetPointerFromInternalField(index));
-}
-
-
-// --- E n v i r o n m e n t ---
-
-
-bool v8::V8::Initialize() {
- i::Isolate* isolate = i::Isolate::UncheckedCurrent();
- if (isolate != NULL && isolate->IsInitialized()) {
- return true;
- }
- return InitializeHelper();
-}
-
-
-bool v8::V8::Dispose() {
- i::Isolate* isolate = i::Isolate::Current();
- if (!ApiCheck(isolate != NULL && isolate->IsDefaultIsolate(),
- "v8::V8::Dispose()",
- "Use v8::Isolate::Dispose() for a non-default isolate.")) {
- return false;
- }
- i::V8::TearDown();
- return true;
-}
-
-
-HeapStatistics::HeapStatistics(): total_heap_size_(0),
- total_heap_size_executable_(0),
- used_heap_size_(0),
- heap_size_limit_(0) { }
-
-
-void v8::V8::GetHeapStatistics(HeapStatistics* heap_statistics) {
- i::Heap* heap = i::Isolate::Current()->heap();
- heap_statistics->set_total_heap_size(heap->CommittedMemory());
- heap_statistics->set_total_heap_size_executable(
- heap->CommittedMemoryExecutable());
- heap_statistics->set_used_heap_size(heap->SizeOfObjects());
- heap_statistics->set_heap_size_limit(heap->MaxReserved());
-}
-
-
-bool v8::V8::IdleNotification() {
- // Returning true tells the caller that it need not
- // continue to call IdleNotification.
- if (!i::Isolate::Current()->IsInitialized()) return true;
- return i::V8::IdleNotification();
-}
-
-
-void v8::V8::LowMemoryNotification() {
- i::Isolate* isolate = i::Isolate::Current();
- if (!isolate->IsInitialized()) return;
- isolate->heap()->CollectAllGarbage(true);
-}
-
-
-int v8::V8::ContextDisposedNotification() {
- i::Isolate* isolate = i::Isolate::Current();
- if (!isolate->IsInitialized()) return 0;
- return isolate->heap()->NotifyContextDisposed();
-}
-
-
-const char* v8::V8::GetVersion() {
- return i::Version::GetVersion();
-}
-
-
-static i::Handle<i::FunctionTemplateInfo>
- EnsureConstructor(i::Handle<i::ObjectTemplateInfo> templ) {
- if (templ->constructor()->IsUndefined()) {
- Local<FunctionTemplate> constructor = FunctionTemplate::New();
- Utils::OpenHandle(*constructor)->set_instance_template(*templ);
- templ->set_constructor(*Utils::OpenHandle(*constructor));
- }
- return i::Handle<i::FunctionTemplateInfo>(
- i::FunctionTemplateInfo::cast(templ->constructor()));
-}
-
-
-Persistent<Context> v8::Context::New(
- v8::ExtensionConfiguration* extensions,
- v8::Handle<ObjectTemplate> global_template,
- v8::Handle<Value> global_object) {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::Context::New()");
- LOG_API(isolate, "Context::New");
- ON_BAILOUT(isolate, "v8::Context::New()", return Persistent<Context>());
-
- // Enter V8 via an ENTER_V8 scope.
- i::Handle<i::Context> env;
- {
- ENTER_V8(isolate);
- v8::Handle<ObjectTemplate> proxy_template = global_template;
- i::Handle<i::FunctionTemplateInfo> proxy_constructor;
- i::Handle<i::FunctionTemplateInfo> global_constructor;
-
- if (!global_template.IsEmpty()) {
- // Make sure that the global_template has a constructor.
- global_constructor =
- EnsureConstructor(Utils::OpenHandle(*global_template));
-
- // Create a fresh template for the global proxy object.
- proxy_template = ObjectTemplate::New();
- proxy_constructor =
- EnsureConstructor(Utils::OpenHandle(*proxy_template));
-
- // Set the global template to be the prototype template of
- // global proxy template.
- proxy_constructor->set_prototype_template(
- *Utils::OpenHandle(*global_template));
-
- // Migrate security handlers from global_template to
- // proxy_template. Temporarily removing access check
- // information from the global template.
- if (!global_constructor->access_check_info()->IsUndefined()) {
- proxy_constructor->set_access_check_info(
- global_constructor->access_check_info());
- proxy_constructor->set_needs_access_check(
- global_constructor->needs_access_check());
- global_constructor->set_needs_access_check(false);
- global_constructor->set_access_check_info(
- isolate->heap()->undefined_value());
- }
- }
-
- // Create the environment.
- env = isolate->bootstrapper()->CreateEnvironment(
- Utils::OpenHandle(*global_object),
- proxy_template,
- extensions);
-
- // Restore the access check info on the global template.
- if (!global_template.IsEmpty()) {
- ASSERT(!global_constructor.is_null());
- ASSERT(!proxy_constructor.is_null());
- global_constructor->set_access_check_info(
- proxy_constructor->access_check_info());
- global_constructor->set_needs_access_check(
- proxy_constructor->needs_access_check());
- }
- isolate->runtime_profiler()->Reset();
- }
- // Leave V8.
-
- if (env.is_null())
- return Persistent<Context>();
- return Persistent<Context>(Utils::ToLocal(env));
-}
-
-
-void v8::Context::SetSecurityToken(Handle<Value> token) {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Context::SetSecurityToken()")) {
- return;
- }
- ENTER_V8(isolate);
- i::Handle<i::Context> env = Utils::OpenHandle(this);
- i::Handle<i::Object> token_handle = Utils::OpenHandle(*token);
- env->set_security_token(*token_handle);
-}
-
-
-void v8::Context::UseDefaultSecurityToken() {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate,
- "v8::Context::UseDefaultSecurityToken()")) {
- return;
- }
- ENTER_V8(isolate);
- i::Handle<i::Context> env = Utils::OpenHandle(this);
- env->set_security_token(env->global());
-}
-
-
-Handle<Value> v8::Context::GetSecurityToken() {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Context::GetSecurityToken()")) {
- return Handle<Value>();
- }
- i::Handle<i::Context> env = Utils::OpenHandle(this);
- i::Object* security_token = env->security_token();
- i::Handle<i::Object> token_handle(security_token);
- return Utils::ToLocal(token_handle);
-}
-
-
-bool Context::HasOutOfMemoryException() {
- i::Handle<i::Context> env = Utils::OpenHandle(this);
- return env->has_out_of_memory();
-}
-
-
-bool Context::InContext() {
- return i::Isolate::Current()->context() != NULL;
-}
-
-
-v8::Local<v8::Context> Context::GetEntered() {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Context::GetEntered()")) {
- return Local<Context>();
- }
- i::Handle<i::Object> last =
- isolate->handle_scope_implementer()->LastEnteredContext();
- if (last.is_null()) return Local<Context>();
- i::Handle<i::Context> context = i::Handle<i::Context>::cast(last);
- return Utils::ToLocal(context);
-}
-
-
-v8::Local<v8::Context> Context::GetCurrent() {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Context::GetCurrent()")) {
- return Local<Context>();
- }
- i::Handle<i::Object> current = isolate->global_context();
- if (current.is_null()) return Local<Context>();
- i::Handle<i::Context> context = i::Handle<i::Context>::cast(current);
- return Utils::ToLocal(context);
-}
-
-
-v8::Local<v8::Context> Context::GetCalling() {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Context::GetCalling()")) {
- return Local<Context>();
- }
- i::Handle<i::Object> calling =
- isolate->GetCallingGlobalContext();
- if (calling.is_null()) return Local<Context>();
- i::Handle<i::Context> context = i::Handle<i::Context>::cast(calling);
- return Utils::ToLocal(context);
-}
-
-
-v8::Local<v8::Object> Context::Global() {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Context::Global()")) {
- return Local<v8::Object>();
- }
- i::Object** ctx = reinterpret_cast<i::Object**>(this);
- i::Handle<i::Context> context =
- i::Handle<i::Context>::cast(i::Handle<i::Object>(ctx));
- i::Handle<i::Object> global(context->global_proxy());
- return Utils::ToLocal(i::Handle<i::JSObject>::cast(global));
-}
-
-
-void Context::DetachGlobal() {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Context::DetachGlobal()")) return;
- ENTER_V8(isolate);
- i::Object** ctx = reinterpret_cast<i::Object**>(this);
- i::Handle<i::Context> context =
- i::Handle<i::Context>::cast(i::Handle<i::Object>(ctx));
- isolate->bootstrapper()->DetachGlobal(context);
-}
-
-
-#ifdef QT_BUILD_SCRIPT_LIB
-Local<Context> v8::Context::NewScopeContext(v8::Handle<Object> scope_object) {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::Context::NewScopeContext()");
- ON_BAILOUT(isolate, "v8::Context::NewScopeContext()", return Local<Context>());
- LOG_API(isolate, "Context::NewScopeContext");
-
- ENTER_V8(isolate);
- i::Handle<i::JSObject> obj = Utils::OpenHandle(*scope_object);
- i::Handle<i::Context> current(isolate->context());
- i::Handle<i::Context> context = isolate->factory()->NewWithContext(current, obj, /*is_catch_context=*/false);
- return Utils::ToLocal(context);
-}
-
-
-Local<Context> v8::Context::NewFunctionContext() {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::Context::NewFunctionContext()");
- ON_BAILOUT(isolate, "v8::Context::NewFunctionContext()", return Local<Context>());
- LOG_API(isolate, "Context::NewFunctionContext");
-
- ENTER_V8(isolate);
- i::Handle<i::JSFunction> closure(isolate->global_context()->closure());
- i::Handle<i::Context> context = isolate->factory()->NewFunctionContext(i::Context::MIN_CONTEXT_SLOTS,
- closure);
- return Utils::ToLocal(context);
-}
-
-
-Local<Context> v8::Context::GetPrevious() {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Context::GetPrevious()")) return Local<Context>();
- ENTER_V8(isolate);
- i::Handle<i::Context> env = Utils::OpenHandle(this);
- if (env->IsGlobalContext()) return Local<Context>();
- i::Context* previous = 0;
- if (env->is_function_context())
- previous = env->closure()->context();
- else
- previous = env->previous();
- if (!previous) return Local<Context>();
- i::Handle<i::Context> previous_handle(previous);
- return Utils::ToLocal(previous_handle);
-}
-
-
-Local<Object> v8::Context::GetExtensionObject() {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Context::GetExtensionObject()")) return Local<Object>();
- ENTER_V8(isolate);
- i::Handle<i::Context> env = Utils::OpenHandle(this);
- if (!env->has_extension()) {
- if (env->is_function_context()) {
- // Create extension object on demand.
- i::Handle<i::JSObject> ext = isolate->factory()->NewJSObject(
- isolate->context_extension_function());
- env->set_extension(*ext);
- } else {
- return Local<Object>();
- }
- }
- i::Handle<i::Object> extension_handle(env->extension());
- return Local<v8::Object>(ToApi<Object>(extension_handle));
-}
-
-void v8::Context::SetExtensionObject(Handle<Object> extension) {
- i::Isolate *isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Context::SetExtensionObject()")) return;
- ENTER_V8(isolate);
- i::Handle<i::Context> env = Utils::OpenHandle(this);
- env->set_extension(*Utils::OpenHandle(*extension));
-}
-
-v8::Local<v8::Context> Context::GetCallerContext()
-{
- i::JavaScriptFrameIterator it;
- if (it.done())
- return Local<Context>();
- i::JavaScriptFrame *frame = it.frame();
- ASSERT(frame);
- i::Context *context = (i::Context*)frame->context();
- ASSERT(context);
- i::Handle<i::Context> context_handle(context);
- return Utils::ToLocal(context_handle);
-}
-#endif
-
-
-void Context::ReattachGlobal(Handle<Object> global_object) {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Context::ReattachGlobal()")) return;
- ENTER_V8(isolate);
- i::Object** ctx = reinterpret_cast<i::Object**>(this);
- i::Handle<i::Context> context =
- i::Handle<i::Context>::cast(i::Handle<i::Object>(ctx));
- isolate->bootstrapper()->ReattachGlobal(
- context,
- Utils::OpenHandle(*global_object));
-}
-
-
-void V8::SetWrapperClassId(i::Object** global_handle, uint16_t class_id) {
- i::GlobalHandles::SetWrapperClassId(global_handle, class_id);
-}
-
-
-Local<v8::Object> ObjectTemplate::NewInstance() {
- i::Isolate* isolate = i::Isolate::Current();
- ON_BAILOUT(isolate, "v8::ObjectTemplate::NewInstance()",
- return Local<v8::Object>());
- LOG_API(isolate, "ObjectTemplate::NewInstance");
- ENTER_V8(isolate);
- EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> obj =
- i::Execution::InstantiateObject(Utils::OpenHandle(this),
- &has_pending_exception);
- EXCEPTION_BAILOUT_CHECK(isolate, Local<v8::Object>());
- return Utils::ToLocal(i::Handle<i::JSObject>::cast(obj));
-}
-
-
-Local<v8::Function> FunctionTemplate::GetFunction() {
- i::Isolate* isolate = i::Isolate::Current();
- ON_BAILOUT(isolate, "v8::FunctionTemplate::GetFunction()",
- return Local<v8::Function>());
- LOG_API(isolate, "FunctionTemplate::GetFunction");
- ENTER_V8(isolate);
- EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> obj =
- i::Execution::InstantiateFunction(Utils::OpenHandle(this),
- &has_pending_exception);
- EXCEPTION_BAILOUT_CHECK(isolate, Local<v8::Function>());
- return Utils::ToLocal(i::Handle<i::JSFunction>::cast(obj));
-}
-
-
-bool FunctionTemplate::HasInstance(v8::Handle<v8::Value> value) {
- ON_BAILOUT(i::Isolate::Current(), "v8::FunctionTemplate::HasInstanceOf()",
- return false);
- i::Object* obj = *Utils::OpenHandle(*value);
- return obj->IsInstanceOf(*Utils::OpenHandle(this));
-}
-
-
-static Local<External> ExternalNewImpl(void* data) {
- return Utils::ToLocal(FACTORY->NewProxy(static_cast<i::Address>(data)));
-}
-
-static void* ExternalValueImpl(i::Handle<i::Object> obj) {
- return reinterpret_cast<void*>(i::Proxy::cast(*obj)->proxy());
-}
-
-
-Local<Value> v8::External::Wrap(void* data) {
- i::Isolate* isolate = i::Isolate::Current();
- STATIC_ASSERT(sizeof(data) == sizeof(i::Address));
- LOG_API(isolate, "External::Wrap");
- EnsureInitializedForIsolate(isolate, "v8::External::Wrap()");
- ENTER_V8(isolate);
-
- v8::Local<v8::Value> result = CanBeEncodedAsSmi(data)
- ? Utils::ToLocal(i::Handle<i::Object>(EncodeAsSmi(data)))
- : v8::Local<v8::Value>(ExternalNewImpl(data));
-
- ASSERT_EQ(data, Unwrap(result));
- return result;
-}
-
-
-void* v8::Object::SlowGetPointerFromInternalField(int index) {
- i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
- i::Object* value = obj->GetInternalField(index);
- if (value->IsSmi()) {
- return i::Internals::GetExternalPointerFromSmi(value);
- } else if (value->IsProxy()) {
- return reinterpret_cast<void*>(i::Proxy::cast(value)->proxy());
- } else {
- return NULL;
- }
-}
-
-
-void* v8::External::FullUnwrap(v8::Handle<v8::Value> wrapper) {
- if (IsDeadCheck(i::Isolate::Current(), "v8::External::Unwrap()")) return 0;
- i::Handle<i::Object> obj = Utils::OpenHandle(*wrapper);
- void* result;
- if (obj->IsSmi()) {
- result = i::Internals::GetExternalPointerFromSmi(*obj);
- } else if (obj->IsProxy()) {
- result = ExternalValueImpl(obj);
- } else {
- result = NULL;
- }
- ASSERT_EQ(result, QuickUnwrap(wrapper));
- return result;
-}
-
-
-Local<External> v8::External::New(void* data) {
- STATIC_ASSERT(sizeof(data) == sizeof(i::Address));
- i::Isolate* isolate = i::Isolate::Current();
- LOG_API(isolate, "External::New");
- EnsureInitializedForIsolate(isolate, "v8::External::New()");
- ENTER_V8(isolate);
- return ExternalNewImpl(data);
-}
-
-
-void* External::Value() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::External::Value()")) return 0;
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- return ExternalValueImpl(obj);
-}
-
-
-Local<String> v8::String::Empty() {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::String::Empty()");
- LOG_API(isolate, "String::Empty()");
- return Utils::ToLocal(isolate->factory()->empty_symbol());
-}
-
-
-Local<String> v8::String::New(const char* data, int length) {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::String::New()");
- LOG_API(isolate, "String::New(char)");
- if (length == 0) return Empty();
- ENTER_V8(isolate);
- if (length == -1) length = i::StrLength(data);
- i::Handle<i::String> result =
- isolate->factory()->NewStringFromUtf8(
- i::Vector<const char>(data, length));
- return Utils::ToLocal(result);
-}
-
-
-Local<String> v8::String::Concat(Handle<String> left, Handle<String> right) {
- i::Handle<i::String> left_string = Utils::OpenHandle(*left);
- i::Isolate* isolate = left_string->GetIsolate();
- EnsureInitializedForIsolate(isolate, "v8::String::New()");
- LOG_API(isolate, "String::New(char)");
- ENTER_V8(isolate);
- i::Handle<i::String> right_string = Utils::OpenHandle(*right);
- i::Handle<i::String> result = isolate->factory()->NewConsString(left_string,
- right_string);
- return Utils::ToLocal(result);
-}
-
-
-Local<String> v8::String::NewUndetectable(const char* data, int length) {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::String::NewUndetectable()");
- LOG_API(isolate, "String::NewUndetectable(char)");
- ENTER_V8(isolate);
- if (length == -1) length = i::StrLength(data);
- i::Handle<i::String> result =
- isolate->factory()->NewStringFromUtf8(
- i::Vector<const char>(data, length));
- result->MarkAsUndetectable();
- return Utils::ToLocal(result);
-}
-
-
-static int TwoByteStringLength(const uint16_t* data) {
- int length = 0;
- while (data[length] != '\0') length++;
- return length;
-}
-
-
-Local<String> v8::String::New(const uint16_t* data, int length) {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::String::New()");
- LOG_API(isolate, "String::New(uint16_)");
- if (length == 0) return Empty();
- ENTER_V8(isolate);
- if (length == -1) length = TwoByteStringLength(data);
- i::Handle<i::String> result =
- isolate->factory()->NewStringFromTwoByte(
- i::Vector<const uint16_t>(data, length));
- return Utils::ToLocal(result);
-}
-
-
-Local<String> v8::String::NewUndetectable(const uint16_t* data, int length) {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::String::NewUndetectable()");
- LOG_API(isolate, "String::NewUndetectable(uint16_)");
- ENTER_V8(isolate);
- if (length == -1) length = TwoByteStringLength(data);
- i::Handle<i::String> result =
- isolate->factory()->NewStringFromTwoByte(
- i::Vector<const uint16_t>(data, length));
- result->MarkAsUndetectable();
- return Utils::ToLocal(result);
-}
-
-
-i::Handle<i::String> NewExternalStringHandle(i::Isolate* isolate,
- v8::String::ExternalStringResource* resource) {
- i::Handle<i::String> result =
- isolate->factory()->NewExternalStringFromTwoByte(resource);
- return result;
-}
-
-
-i::Handle<i::String> NewExternalAsciiStringHandle(i::Isolate* isolate,
- v8::String::ExternalAsciiStringResource* resource) {
- i::Handle<i::String> result =
- isolate->factory()->NewExternalStringFromAscii(resource);
- return result;
-}
-
-
-Local<String> v8::String::NewExternal(
- v8::String::ExternalStringResource* resource) {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::String::NewExternal()");
- LOG_API(isolate, "String::NewExternal");
- ENTER_V8(isolate);
- i::Handle<i::String> result = NewExternalStringHandle(isolate, resource);
- isolate->heap()->external_string_table()->AddString(*result);
- return Utils::ToLocal(result);
-}
-
-
-bool v8::String::MakeExternal(v8::String::ExternalStringResource* resource) {
- i::Handle<i::String> obj = Utils::OpenHandle(this);
- i::Isolate* isolate = obj->GetIsolate();
- if (IsDeadCheck(isolate, "v8::String::MakeExternal()")) return false;
- if (i::StringShape(*obj).IsExternalTwoByte()) {
- return false; // Already an external string.
- }
- ENTER_V8(isolate);
- if (isolate->string_tracker()->IsFreshUnusedString(obj)) {
- return false;
- }
- bool result = obj->MakeExternal(resource);
- if (result && !obj->IsSymbol()) {
- isolate->heap()->external_string_table()->AddString(*obj);
- }
- return result;
-}
-
-
-Local<String> v8::String::NewExternal(
- v8::String::ExternalAsciiStringResource* resource) {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::String::NewExternal()");
- LOG_API(isolate, "String::NewExternal");
- ENTER_V8(isolate);
- i::Handle<i::String> result = NewExternalAsciiStringHandle(isolate, resource);
- isolate->heap()->external_string_table()->AddString(*result);
- return Utils::ToLocal(result);
-}
-
-
-bool v8::String::MakeExternal(
- v8::String::ExternalAsciiStringResource* resource) {
- i::Handle<i::String> obj = Utils::OpenHandle(this);
- i::Isolate* isolate = obj->GetIsolate();
- if (IsDeadCheck(isolate, "v8::String::MakeExternal()")) return false;
- if (i::StringShape(*obj).IsExternalTwoByte()) {
- return false; // Already an external string.
- }
- ENTER_V8(isolate);
- if (isolate->string_tracker()->IsFreshUnusedString(obj)) {
- return false;
- }
- bool result = obj->MakeExternal(resource);
- if (result && !obj->IsSymbol()) {
- isolate->heap()->external_string_table()->AddString(*obj);
- }
- return result;
-}
-
-
-bool v8::String::CanMakeExternal() {
- i::Handle<i::String> obj = Utils::OpenHandle(this);
- i::Isolate* isolate = obj->GetIsolate();
- if (IsDeadCheck(isolate, "v8::String::CanMakeExternal()")) return false;
- if (isolate->string_tracker()->IsFreshUnusedString(obj)) {
- return false;
- }
- int size = obj->Size(); // Byte size of the original string.
- if (size < i::ExternalString::kSize)
- return false;
- i::StringShape shape(*obj);
- return !shape.IsExternal();
-}
-
-
-Local<v8::Object> v8::Object::New() {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::Object::New()");
- LOG_API(isolate, "Object::New");
- ENTER_V8(isolate);
- i::Handle<i::JSObject> obj =
- isolate->factory()->NewJSObject(isolate->object_function());
- return Utils::ToLocal(obj);
-}
-
-
-Local<v8::Value> v8::Date::New(double time) {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::Date::New()");
- LOG_API(isolate, "Date::New");
- if (isnan(time)) {
- // Introduce only canonical NaN value into the VM, to avoid signaling NaNs.
- time = i::OS::nan_value();
- }
- ENTER_V8(isolate);
- EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> obj =
- i::Execution::NewDate(time, &has_pending_exception);
- EXCEPTION_BAILOUT_CHECK(isolate, Local<v8::Value>());
- return Utils::ToLocal(obj);
-}
-
-
-double v8::Date::NumberValue() const {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Date::NumberValue()")) return 0;
- LOG_API(isolate, "Date::NumberValue");
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- i::Handle<i::JSValue> jsvalue = i::Handle<i::JSValue>::cast(obj);
- return jsvalue->value()->Number();
-}
-
-
-void v8::Date::DateTimeConfigurationChangeNotification() {
- i::Isolate* isolate = i::Isolate::Current();
- ON_BAILOUT(isolate, "v8::Date::DateTimeConfigurationChangeNotification()",
- return);
- LOG_API(isolate, "Date::DateTimeConfigurationChangeNotification");
- ENTER_V8(isolate);
-
- i::HandleScope scope(isolate);
- // Get the function ResetDateCache (defined in date-delay.js).
- i::Handle<i::String> func_name_str =
- isolate->factory()->LookupAsciiSymbol("ResetDateCache");
- i::MaybeObject* result =
- isolate->js_builtins_object()->GetProperty(*func_name_str);
- i::Object* object_func;
- if (!result->ToObject(&object_func)) {
- return;
- }
-
- if (object_func->IsJSFunction()) {
- i::Handle<i::JSFunction> func =
- i::Handle<i::JSFunction>(i::JSFunction::cast(object_func));
-
- // Call ResetDateCache(0 but expect no exceptions:
- bool caught_exception = false;
- i::Execution::TryCall(func,
- isolate->js_builtins_object(),
- 0,
- NULL,
- &caught_exception);
- }
-}
-
-
-static i::Handle<i::String> RegExpFlagsToString(RegExp::Flags flags) {
- char flags_buf[3];
- int num_flags = 0;
- if ((flags & RegExp::kGlobal) != 0) flags_buf[num_flags++] = 'g';
- if ((flags & RegExp::kMultiline) != 0) flags_buf[num_flags++] = 'm';
- if ((flags & RegExp::kIgnoreCase) != 0) flags_buf[num_flags++] = 'i';
- ASSERT(num_flags <= static_cast<int>(ARRAY_SIZE(flags_buf)));
- return FACTORY->LookupSymbol(
- i::Vector<const char>(flags_buf, num_flags));
-}
-
-
-Local<v8::RegExp> v8::RegExp::New(Handle<String> pattern,
- Flags flags) {
- i::Isolate* isolate = Utils::OpenHandle(*pattern)->GetIsolate();
- EnsureInitializedForIsolate(isolate, "v8::RegExp::New()");
- LOG_API(isolate, "RegExp::New");
- ENTER_V8(isolate);
- EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::JSRegExp> obj = i::Execution::NewJSRegExp(
- Utils::OpenHandle(*pattern),
- RegExpFlagsToString(flags),
- &has_pending_exception);
- EXCEPTION_BAILOUT_CHECK(isolate, Local<v8::RegExp>());
- return Utils::ToLocal(i::Handle<i::JSRegExp>::cast(obj));
-}
-
-
-Local<v8::String> v8::RegExp::GetSource() const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::RegExp::GetSource()")) {
- return Local<v8::String>();
- }
- i::Handle<i::JSRegExp> obj = Utils::OpenHandle(this);
- return Utils::ToLocal(i::Handle<i::String>(obj->Pattern()));
-}
-
-
-// Assert that the static flags cast in GetFlags is valid.
-#define REGEXP_FLAG_ASSERT_EQ(api_flag, internal_flag) \
- STATIC_ASSERT(static_cast<int>(v8::RegExp::api_flag) == \
- static_cast<int>(i::JSRegExp::internal_flag))
-REGEXP_FLAG_ASSERT_EQ(kNone, NONE);
-REGEXP_FLAG_ASSERT_EQ(kGlobal, GLOBAL);
-REGEXP_FLAG_ASSERT_EQ(kIgnoreCase, IGNORE_CASE);
-REGEXP_FLAG_ASSERT_EQ(kMultiline, MULTILINE);
-#undef REGEXP_FLAG_ASSERT_EQ
-
-v8::RegExp::Flags v8::RegExp::GetFlags() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::RegExp::GetFlags()")) {
- return v8::RegExp::kNone;
- }
- i::Handle<i::JSRegExp> obj = Utils::OpenHandle(this);
- return static_cast<RegExp::Flags>(obj->GetFlags().value());
-}
-
-
-Local<v8::Array> v8::Array::New(int length) {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::Array::New()");
- LOG_API(isolate, "Array::New");
- ENTER_V8(isolate);
- int real_length = length > 0 ? length : 0;
- i::Handle<i::JSArray> obj = isolate->factory()->NewJSArray(real_length);
- i::Handle<i::Object> length_obj =
- isolate->factory()->NewNumberFromInt(real_length);
- obj->set_length(*length_obj);
- return Utils::ToLocal(obj);
-}
-
-
-uint32_t v8::Array::Length() const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::Array::Length()")) return 0;
- i::Handle<i::JSArray> obj = Utils::OpenHandle(this);
- i::Object* length = obj->length();
- if (length->IsSmi()) {
- return i::Smi::cast(length)->value();
- } else {
- return static_cast<uint32_t>(length->Number());
- }
-}
-
-
-Local<Object> Array::CloneElementAt(uint32_t index) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Array::CloneElementAt()", return Local<Object>());
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- if (!self->HasFastElements()) {
- return Local<Object>();
- }
- i::FixedArray* elms = i::FixedArray::cast(self->elements());
- i::Object* paragon = elms->get(index);
- if (!paragon->IsJSObject()) {
- return Local<Object>();
- }
- i::Handle<i::JSObject> paragon_handle(i::JSObject::cast(paragon));
- EXCEPTION_PREAMBLE(isolate);
- ENTER_V8(isolate);
- i::Handle<i::JSObject> result = i::Copy(paragon_handle);
- has_pending_exception = result.is_null();
- EXCEPTION_BAILOUT_CHECK(isolate, Local<Object>());
- return Utils::ToLocal(result);
-}
-
-
-Local<String> v8::String::NewSymbol(const char* data, int length) {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::String::NewSymbol()");
- LOG_API(isolate, "String::NewSymbol(char)");
- ENTER_V8(isolate);
- if (length == -1) length = i::StrLength(data);
- i::Handle<i::String> result =
- isolate->factory()->LookupSymbol(i::Vector<const char>(data, length));
- return Utils::ToLocal(result);
-}
-
-
-Local<Number> v8::Number::New(double value) {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::Number::New()");
- if (isnan(value)) {
- // Introduce only canonical NaN value into the VM, to avoid signaling NaNs.
- value = i::OS::nan_value();
- }
- ENTER_V8(isolate);
- i::Handle<i::Object> result = isolate->factory()->NewNumber(value);
- return Utils::NumberToLocal(result);
-}
-
-
-Local<Integer> v8::Integer::New(int32_t value) {
- i::Isolate* isolate = i::Isolate::UncheckedCurrent();
- EnsureInitializedForIsolate(isolate, "v8::Integer::New()");
- if (i::Smi::IsValid(value)) {
- return Utils::IntegerToLocal(i::Handle<i::Object>(i::Smi::FromInt(value),
- isolate));
- }
- ENTER_V8(isolate);
- i::Handle<i::Object> result = isolate->factory()->NewNumber(value);
- return Utils::IntegerToLocal(result);
-}
-
-
-Local<Integer> Integer::NewFromUnsigned(uint32_t value) {
- bool fits_into_int32_t = (value & (1 << 31)) == 0;
- if (fits_into_int32_t) {
- return Integer::New(static_cast<int32_t>(value));
- }
- i::Isolate* isolate = i::Isolate::Current();
- ENTER_V8(isolate);
- i::Handle<i::Object> result = isolate->factory()->NewNumber(value);
- return Utils::IntegerToLocal(result);
-}
-
-
-void V8::IgnoreOutOfMemoryException() {
- EnterIsolateIfNeeded()->handle_scope_implementer()->set_ignore_out_of_memory(
- true);
-}
-
-
-bool V8::AddMessageListener(MessageCallback that, Handle<Value> data) {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::V8::AddMessageListener()");
- ON_BAILOUT(isolate, "v8::V8::AddMessageListener()", return false);
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- NeanderArray listeners(isolate->factory()->message_listeners());
- NeanderObject obj(2);
- obj.set(0, *isolate->factory()->NewProxy(FUNCTION_ADDR(that)));
- obj.set(1, data.IsEmpty() ?
- isolate->heap()->undefined_value() :
- *Utils::OpenHandle(*data));
- listeners.add(obj.value());
- return true;
-}
-
-
-void V8::RemoveMessageListeners(MessageCallback that) {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::V8::RemoveMessageListener()");
- ON_BAILOUT(isolate, "v8::V8::RemoveMessageListeners()", return);
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- NeanderArray listeners(isolate->factory()->message_listeners());
- for (int i = 0; i < listeners.length(); i++) {
- if (listeners.get(i)->IsUndefined()) continue; // skip deleted ones
-
- NeanderObject listener(i::JSObject::cast(listeners.get(i)));
- i::Handle<i::Proxy> callback_obj(i::Proxy::cast(listener.get(0)));
- if (callback_obj->proxy() == FUNCTION_ADDR(that)) {
- listeners.set(i, isolate->heap()->undefined_value());
- }
- }
-}
-
-
-void V8::SetCaptureStackTraceForUncaughtExceptions(
- bool capture,
- int frame_limit,
- StackTrace::StackTraceOptions options) {
- i::Isolate::Current()->SetCaptureStackTraceForUncaughtExceptions(
- capture,
- frame_limit,
- options);
-}
-
-
-void V8::SetCounterFunction(CounterLookupCallback callback) {
- i::Isolate* isolate = EnterIsolateIfNeeded();
- if (IsDeadCheck(isolate, "v8::V8::SetCounterFunction()")) return;
- isolate->stats_table()->SetCounterFunction(callback);
-}
-
-void V8::SetCreateHistogramFunction(CreateHistogramCallback callback) {
- i::Isolate* isolate = EnterIsolateIfNeeded();
- if (IsDeadCheck(isolate, "v8::V8::SetCreateHistogramFunction()")) return;
- isolate->stats_table()->SetCreateHistogramFunction(callback);
-}
-
-void V8::SetAddHistogramSampleFunction(AddHistogramSampleCallback callback) {
- i::Isolate* isolate = EnterIsolateIfNeeded();
- if (IsDeadCheck(isolate, "v8::V8::SetAddHistogramSampleFunction()")) return;
- isolate->stats_table()->
- SetAddHistogramSampleFunction(callback);
-}
-
-void V8::EnableSlidingStateWindow() {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::V8::EnableSlidingStateWindow()")) return;
- isolate->logger()->EnableSlidingStateWindow();
-}
-
-
-void V8::SetFailedAccessCheckCallbackFunction(
- FailedAccessCheckCallback callback) {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::V8::SetFailedAccessCheckCallbackFunction()")) {
- return;
- }
- isolate->SetFailedAccessCheckCallback(callback);
-}
-
-void V8::AddObjectGroup(Persistent<Value>* objects,
- size_t length,
- RetainedObjectInfo* info) {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::V8::AddObjectGroup()")) return;
- STATIC_ASSERT(sizeof(Persistent<Value>) == sizeof(i::Object**));
- isolate->global_handles()->AddObjectGroup(
- reinterpret_cast<i::Object***>(objects), length, info);
-}
-
-
-void V8::AddImplicitReferences(Persistent<Object> parent,
- Persistent<Value>* children,
- size_t length) {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::V8::AddImplicitReferences()")) return;
- STATIC_ASSERT(sizeof(Persistent<Value>) == sizeof(i::Object**));
- isolate->global_handles()->AddImplicitReferences(
- *Utils::OpenHandle(*parent),
- reinterpret_cast<i::Object***>(children), length);
-}
-
-
-int V8::AdjustAmountOfExternalAllocatedMemory(int change_in_bytes) {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::V8::AdjustAmountOfExternalAllocatedMemory()")) {
- return 0;
- }
- return isolate->heap()->AdjustAmountOfExternalAllocatedMemory(
- change_in_bytes);
-}
-
-
-void V8::SetGlobalGCPrologueCallback(GCCallback callback) {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::V8::SetGlobalGCPrologueCallback()")) return;
- isolate->heap()->SetGlobalGCPrologueCallback(callback);
-}
-
-
-void V8::SetGlobalGCEpilogueCallback(GCCallback callback) {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::V8::SetGlobalGCEpilogueCallback()")) return;
- isolate->heap()->SetGlobalGCEpilogueCallback(callback);
-}
-
-
-void V8::AddGCPrologueCallback(GCPrologueCallback callback, GCType gc_type) {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::V8::AddGCPrologueCallback()")) return;
- isolate->heap()->AddGCPrologueCallback(callback, gc_type);
-}
-
-
-void V8::RemoveGCPrologueCallback(GCPrologueCallback callback) {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::V8::RemoveGCPrologueCallback()")) return;
- isolate->heap()->RemoveGCPrologueCallback(callback);
-}
-
-
-void V8::AddGCEpilogueCallback(GCEpilogueCallback callback, GCType gc_type) {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::V8::AddGCEpilogueCallback()")) return;
- isolate->heap()->AddGCEpilogueCallback(callback, gc_type);
-}
-
-
-void V8::RemoveGCEpilogueCallback(GCEpilogueCallback callback) {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::V8::RemoveGCEpilogueCallback()")) return;
- isolate->heap()->RemoveGCEpilogueCallback(callback);
-}
-
-
-void V8::AddMemoryAllocationCallback(MemoryAllocationCallback callback,
- ObjectSpace space,
- AllocationAction action) {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::V8::AddMemoryAllocationCallback()")) return;
- isolate->memory_allocator()->AddMemoryAllocationCallback(
- callback, space, action);
-}
-
-
-void V8::RemoveMemoryAllocationCallback(MemoryAllocationCallback callback) {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::V8::RemoveMemoryAllocationCallback()")) return;
- isolate->memory_allocator()->RemoveMemoryAllocationCallback(
- callback);
-}
-
-
-void V8::PauseProfiler() {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- PauseProfilerEx(PROFILER_MODULE_CPU);
-#endif
-}
-
-
-void V8::ResumeProfiler() {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- ResumeProfilerEx(PROFILER_MODULE_CPU);
-#endif
-}
-
-
-bool V8::IsProfilerPaused() {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- return LOGGER->GetActiveProfilerModules() & PROFILER_MODULE_CPU;
-#else
- return true;
-#endif
-}
-
-
-void V8::ResumeProfilerEx(int flags, int tag) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- i::Isolate* isolate = i::Isolate::Current();
- if (flags & PROFILER_MODULE_HEAP_SNAPSHOT) {
- // Snapshot mode: resume modules, perform GC, then pause only
- // those modules which haven't been started prior to making a
- // snapshot.
-
- // Make a GC prior to taking a snapshot.
- isolate->heap()->CollectAllGarbage(false);
- // Reset snapshot flag and CPU module flags.
- flags &= ~(PROFILER_MODULE_HEAP_SNAPSHOT | PROFILER_MODULE_CPU);
- const int current_flags = isolate->logger()->GetActiveProfilerModules();
- isolate->logger()->ResumeProfiler(flags, tag);
- isolate->heap()->CollectAllGarbage(false);
- isolate->logger()->PauseProfiler(~current_flags & flags, tag);
- } else {
- isolate->logger()->ResumeProfiler(flags, tag);
- }
-#endif
-}
-
-
-void V8::PauseProfilerEx(int flags, int tag) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- LOGGER->PauseProfiler(flags, tag);
-#endif
-}
-
-
-int V8::GetActiveProfilerModules() {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- return LOGGER->GetActiveProfilerModules();
-#else
- return PROFILER_MODULE_NONE;
-#endif
-}
-
-
-int V8::GetLogLines(int from_pos, char* dest_buf, int max_size) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- ASSERT(max_size >= kMinimumSizeForLogLinesBuffer);
- return LOGGER->GetLogLines(from_pos, dest_buf, max_size);
-#endif
- return 0;
-}
-
-
-int V8::GetCurrentThreadId() {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "V8::GetCurrentThreadId()");
- return isolate->thread_id();
-}
-
-
-void V8::TerminateExecution(int thread_id) {
- i::Isolate* isolate = i::Isolate::Current();
- if (!isolate->IsInitialized()) return;
- API_ENTRY_CHECK("V8::TerminateExecution()");
- // If the thread_id identifies the current thread just terminate
- // execution right away. Otherwise, ask the thread manager to
- // terminate the thread with the given id if any.
- if (thread_id == isolate->thread_id()) {
- isolate->stack_guard()->TerminateExecution();
- } else {
- isolate->thread_manager()->TerminateExecution(thread_id);
- }
-}
-
-#ifdef QT_BUILD_SCRIPT_LIB
-void V8::ExecuteUserCallback(UserCallback callback, void *data) {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::ExecuteUserCallback()")) return;
- isolate->stack_guard()->ExecuteUserCallback(callback, data);
-}
-#endif
-
-void V8::TerminateExecution(Isolate* isolate) {
- // If no isolate is supplied, use the default isolate.
- if (isolate != NULL) {
- reinterpret_cast<i::Isolate*>(isolate)->stack_guard()->TerminateExecution();
- } else {
- i::Isolate::GetDefaultIsolateStackGuard()->TerminateExecution();
- }
-}
-
-
-bool V8::IsExecutionTerminating() {
- i::Isolate* isolate = i::Isolate::Current();
- return IsExecutionTerminatingCheck(isolate);
-}
-
-
-Isolate* Isolate::GetCurrent() {
- i::Isolate* isolate = i::Isolate::UncheckedCurrent();
- return reinterpret_cast<Isolate*>(isolate);
-}
-
-
-Isolate* Isolate::New() {
- i::Isolate* isolate = new i::Isolate();
- return reinterpret_cast<Isolate*>(isolate);
-}
-
-
-void Isolate::Dispose() {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
- if (!ApiCheck(!isolate->IsInUse(),
- "v8::Isolate::Dispose()",
- "Disposing the isolate that is entered by a thread.")) {
- return;
- }
- isolate->TearDown();
-}
-
-
-void Isolate::Enter() {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
- isolate->Enter();
-}
-
-
-void Isolate::Exit() {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
- isolate->Exit();
-}
-
-
-String::Utf8Value::Utf8Value(v8::Handle<v8::Value> obj) {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::String::Utf8Value::Utf8Value()")) return;
- if (obj.IsEmpty()) {
- str_ = NULL;
- length_ = 0;
- return;
- }
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- TryCatch try_catch;
- Handle<String> str = obj->ToString();
- if (str.IsEmpty()) {
- str_ = NULL;
- length_ = 0;
- } else {
- length_ = str->Utf8Length();
- str_ = i::NewArray<char>(length_ + 1);
- str->WriteUtf8(str_);
- }
-}
-
-
-String::Utf8Value::~Utf8Value() {
- i::DeleteArray(str_);
-}
-
-
-String::AsciiValue::AsciiValue(v8::Handle<v8::Value> obj) {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::String::AsciiValue::AsciiValue()")) return;
- if (obj.IsEmpty()) {
- str_ = NULL;
- length_ = 0;
- return;
- }
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- TryCatch try_catch;
- Handle<String> str = obj->ToString();
- if (str.IsEmpty()) {
- str_ = NULL;
- length_ = 0;
- } else {
- length_ = str->Length();
- str_ = i::NewArray<char>(length_ + 1);
- str->WriteAscii(str_);
- }
-}
-
-
-String::AsciiValue::~AsciiValue() {
- i::DeleteArray(str_);
-}
-
-
-String::Value::Value(v8::Handle<v8::Value> obj) {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::String::Value::Value()")) return;
- if (obj.IsEmpty()) {
- str_ = NULL;
- length_ = 0;
- return;
- }
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- TryCatch try_catch;
- Handle<String> str = obj->ToString();
- if (str.IsEmpty()) {
- str_ = NULL;
- length_ = 0;
- } else {
- length_ = str->Length();
- str_ = i::NewArray<uint16_t>(length_ + 1);
- str->Write(str_);
- }
-}
-
-
-String::Value::~Value() {
- i::DeleteArray(str_);
-}
-
-Local<Value> Exception::RangeError(v8::Handle<v8::String> raw_message) {
- i::Isolate* isolate = i::Isolate::Current();
- LOG_API(isolate, "RangeError");
- ON_BAILOUT(isolate, "v8::Exception::RangeError()", return Local<Value>());
- ENTER_V8(isolate);
- i::Object* error;
- {
- i::HandleScope scope(isolate);
- i::Handle<i::String> message = Utils::OpenHandle(*raw_message);
- i::Handle<i::Object> result = isolate->factory()->NewRangeError(message);
- error = *result;
- }
- i::Handle<i::Object> result(error);
- return Utils::ToLocal(result);
-}
-
-Local<Value> Exception::ReferenceError(v8::Handle<v8::String> raw_message) {
- i::Isolate* isolate = i::Isolate::Current();
- LOG_API(isolate, "ReferenceError");
- ON_BAILOUT(isolate, "v8::Exception::ReferenceError()", return Local<Value>());
- ENTER_V8(isolate);
- i::Object* error;
- {
- i::HandleScope scope(isolate);
- i::Handle<i::String> message = Utils::OpenHandle(*raw_message);
- i::Handle<i::Object> result =
- isolate->factory()->NewReferenceError(message);
- error = *result;
- }
- i::Handle<i::Object> result(error);
- return Utils::ToLocal(result);
-}
-
-Local<Value> Exception::SyntaxError(v8::Handle<v8::String> raw_message) {
- i::Isolate* isolate = i::Isolate::Current();
- LOG_API(isolate, "SyntaxError");
- ON_BAILOUT(isolate, "v8::Exception::SyntaxError()", return Local<Value>());
- ENTER_V8(isolate);
- i::Object* error;
- {
- i::HandleScope scope(isolate);
- i::Handle<i::String> message = Utils::OpenHandle(*raw_message);
- i::Handle<i::Object> result = isolate->factory()->NewSyntaxError(message);
- error = *result;
- }
- i::Handle<i::Object> result(error);
- return Utils::ToLocal(result);
-}
-
-Local<Value> Exception::TypeError(v8::Handle<v8::String> raw_message) {
- i::Isolate* isolate = i::Isolate::Current();
- LOG_API(isolate, "TypeError");
- ON_BAILOUT(isolate, "v8::Exception::TypeError()", return Local<Value>());
- ENTER_V8(isolate);
- i::Object* error;
- {
- i::HandleScope scope(isolate);
- i::Handle<i::String> message = Utils::OpenHandle(*raw_message);
- i::Handle<i::Object> result = isolate->factory()->NewTypeError(message);
- error = *result;
- }
- i::Handle<i::Object> result(error);
- return Utils::ToLocal(result);
-}
-
-Local<Value> Exception::Error(v8::Handle<v8::String> raw_message) {
- i::Isolate* isolate = i::Isolate::Current();
- LOG_API(isolate, "Error");
- ON_BAILOUT(isolate, "v8::Exception::Error()", return Local<Value>());
- ENTER_V8(isolate);
- i::Object* error;
- {
- i::HandleScope scope(isolate);
- i::Handle<i::String> message = Utils::OpenHandle(*raw_message);
- i::Handle<i::Object> result = isolate->factory()->NewError(message);
- error = *result;
- }
- i::Handle<i::Object> result(error);
- return Utils::ToLocal(result);
-}
-
-
-// --- D e b u g S u p p o r t ---
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-
-static void EventCallbackWrapper(const v8::Debug::EventDetails& event_details) {
- i::Isolate* isolate = i::Isolate::Current();
- if (isolate->debug_event_callback() != NULL) {
- isolate->debug_event_callback()(event_details.GetEvent(),
- event_details.GetExecutionState(),
- event_details.GetEventData(),
- event_details.GetCallbackData());
- }
-}
-
-
-bool Debug::SetDebugEventListener(EventCallback that, Handle<Value> data) {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::Debug::SetDebugEventListener()");
- ON_BAILOUT(isolate, "v8::Debug::SetDebugEventListener()", return false);
- ENTER_V8(isolate);
-
- isolate->set_debug_event_callback(that);
-
- i::HandleScope scope(isolate);
- i::Handle<i::Object> proxy = isolate->factory()->undefined_value();
- if (that != NULL) {
- proxy = isolate->factory()->NewProxy(FUNCTION_ADDR(EventCallbackWrapper));
- }
- isolate->debugger()->SetEventListener(proxy, Utils::OpenHandle(*data));
- return true;
-}
-
-
-bool Debug::SetDebugEventListener2(EventCallback2 that, Handle<Value> data) {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::Debug::SetDebugEventListener2()");
- ON_BAILOUT(isolate, "v8::Debug::SetDebugEventListener2()", return false);
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- i::Handle<i::Object> proxy = isolate->factory()->undefined_value();
- if (that != NULL) {
- proxy = isolate->factory()->NewProxy(FUNCTION_ADDR(that));
- }
- isolate->debugger()->SetEventListener(proxy,
- Utils::OpenHandle(*data));
- return true;
-}
-
-
-bool Debug::SetDebugEventListener(v8::Handle<v8::Object> that,
- Handle<Value> data) {
- i::Isolate* isolate = i::Isolate::Current();
- ON_BAILOUT(isolate, "v8::Debug::SetDebugEventListener()", return false);
- ENTER_V8(isolate);
- isolate->debugger()->SetEventListener(Utils::OpenHandle(*that),
- Utils::OpenHandle(*data));
- return true;
-}
-
-
-void Debug::DebugBreak(Isolate* isolate) {
- // If no isolate is supplied, use the default isolate.
- if (isolate != NULL) {
- reinterpret_cast<i::Isolate*>(isolate)->stack_guard()->DebugBreak();
- } else {
- i::Isolate::GetDefaultIsolateStackGuard()->DebugBreak();
- }
-}
-
-
-void Debug::CancelDebugBreak(Isolate* isolate) {
- // If no isolate is supplied, use the default isolate.
- if (isolate != NULL) {
- i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
- internal_isolate->stack_guard()->Continue(i::DEBUGBREAK);
- } else {
- i::Isolate::GetDefaultIsolateStackGuard()->Continue(i::DEBUGBREAK);
- }
-}
-
-
-void Debug::DebugBreakForCommand(ClientData* data, Isolate* isolate) {
- // If no isolate is supplied, use the default isolate.
- if (isolate != NULL) {
- i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
- internal_isolate->debugger()->EnqueueDebugCommand(data);
- } else {
- i::Isolate::GetDefaultIsolateDebugger()->EnqueueDebugCommand(data);
- }
-}
-
-
-static void MessageHandlerWrapper(const v8::Debug::Message& message) {
- i::Isolate* isolate = i::Isolate::Current();
- if (isolate->message_handler()) {
- v8::String::Value json(message.GetJSON());
- (isolate->message_handler())(*json, json.length(), message.GetClientData());
- }
-}
-
-
-void Debug::SetMessageHandler(v8::Debug::MessageHandler handler,
- bool message_handler_thread) {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::Debug::SetMessageHandler");
- ENTER_V8(isolate);
-
- // Message handler thread not supported any more. Parameter temporally left in
- // the API for client compatibility reasons.
- CHECK(!message_handler_thread);
-
- // TODO(sgjesse) support the old message handler API through a simple wrapper.
- isolate->set_message_handler(handler);
- if (handler != NULL) {
- isolate->debugger()->SetMessageHandler(MessageHandlerWrapper);
- } else {
- isolate->debugger()->SetMessageHandler(NULL);
- }
-}
-
-
-void Debug::SetMessageHandler2(v8::Debug::MessageHandler2 handler) {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::Debug::SetMessageHandler");
- ENTER_V8(isolate);
- isolate->debugger()->SetMessageHandler(handler);
-}
-
-
-void Debug::SendCommand(const uint16_t* command, int length,
- ClientData* client_data,
- Isolate* isolate) {
- // If no isolate is supplied, use the default isolate.
- if (isolate != NULL) {
- i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
- internal_isolate->debugger()->ProcessCommand(
- i::Vector<const uint16_t>(command, length), client_data);
- } else {
- i::Isolate::GetDefaultIsolateDebugger()->ProcessCommand(
- i::Vector<const uint16_t>(command, length), client_data);
- }
-}
-
-
-void Debug::SetHostDispatchHandler(HostDispatchHandler handler,
- int period) {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::Debug::SetHostDispatchHandler");
- ENTER_V8(isolate);
- isolate->debugger()->SetHostDispatchHandler(handler, period);
-}
-
-
-void Debug::SetDebugMessageDispatchHandler(
- DebugMessageDispatchHandler handler, bool provide_locker) {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate,
- "v8::Debug::SetDebugMessageDispatchHandler");
- ENTER_V8(isolate);
- isolate->debugger()->SetDebugMessageDispatchHandler(
- handler, provide_locker);
-}
-
-
-Local<Value> Debug::Call(v8::Handle<v8::Function> fun,
- v8::Handle<v8::Value> data) {
- i::Isolate* isolate = i::Isolate::Current();
- if (!isolate->IsInitialized()) return Local<Value>();
- ON_BAILOUT(isolate, "v8::Debug::Call()", return Local<Value>());
- ENTER_V8(isolate);
- i::Handle<i::Object> result;
- EXCEPTION_PREAMBLE(isolate);
- if (data.IsEmpty()) {
- result = isolate->debugger()->Call(Utils::OpenHandle(*fun),
- isolate->factory()->undefined_value(),
- &has_pending_exception);
- } else {
- result = isolate->debugger()->Call(Utils::OpenHandle(*fun),
- Utils::OpenHandle(*data),
- &has_pending_exception);
- }
- EXCEPTION_BAILOUT_CHECK(isolate, Local<Value>());
- return Utils::ToLocal(result);
-}
-
-
-Local<Value> Debug::GetMirror(v8::Handle<v8::Value> obj) {
- i::Isolate* isolate = i::Isolate::Current();
- if (!isolate->IsInitialized()) return Local<Value>();
- ON_BAILOUT(isolate, "v8::Debug::GetMirror()", return Local<Value>());
- ENTER_V8(isolate);
- v8::HandleScope scope;
- i::Debug* isolate_debug = isolate->debug();
- isolate_debug->Load();
- i::Handle<i::JSObject> debug(isolate_debug->debug_context()->global());
- i::Handle<i::String> name =
- isolate->factory()->LookupAsciiSymbol("MakeMirror");
- i::Handle<i::Object> fun_obj = i::GetProperty(debug, name);
- i::Handle<i::JSFunction> fun = i::Handle<i::JSFunction>::cast(fun_obj);
- v8::Handle<v8::Function> v8_fun = Utils::ToLocal(fun);
- const int kArgc = 1;
- v8::Handle<v8::Value> argv[kArgc] = { obj };
- EXCEPTION_PREAMBLE(isolate);
- v8::Handle<v8::Value> result = v8_fun->Call(Utils::ToLocal(debug),
- kArgc,
- argv);
- EXCEPTION_BAILOUT_CHECK(isolate, Local<Value>());
- return scope.Close(result);
-}
-
-
-bool Debug::EnableAgent(const char* name, int port, bool wait_for_connection) {
- return i::Isolate::Current()->debugger()->StartAgent(name, port,
- wait_for_connection);
-}
-
-void Debug::ProcessDebugMessages() {
- i::Execution::ProcessDebugMesssages(true);
-}
-
-Local<Context> Debug::GetDebugContext() {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::Debug::GetDebugContext()");
- ENTER_V8(isolate);
- return Utils::ToLocal(i::Isolate::Current()->debugger()->GetDebugContext());
-}
-
-#endif // ENABLE_DEBUGGER_SUPPORT
-
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
-
-Handle<String> CpuProfileNode::GetFunctionName() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::CpuProfileNode::GetFunctionName");
- const i::ProfileNode* node = reinterpret_cast<const i::ProfileNode*>(this);
- const i::CodeEntry* entry = node->entry();
- if (!entry->has_name_prefix()) {
- return Handle<String>(ToApi<String>(
- isolate->factory()->LookupAsciiSymbol(entry->name())));
- } else {
- return Handle<String>(ToApi<String>(isolate->factory()->NewConsString(
- isolate->factory()->LookupAsciiSymbol(entry->name_prefix()),
- isolate->factory()->LookupAsciiSymbol(entry->name()))));
- }
-}
-
-
-Handle<String> CpuProfileNode::GetScriptResourceName() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::CpuProfileNode::GetScriptResourceName");
- const i::ProfileNode* node = reinterpret_cast<const i::ProfileNode*>(this);
- return Handle<String>(ToApi<String>(isolate->factory()->LookupAsciiSymbol(
- node->entry()->resource_name())));
-}
-
-
-int CpuProfileNode::GetLineNumber() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::CpuProfileNode::GetLineNumber");
- return reinterpret_cast<const i::ProfileNode*>(this)->entry()->line_number();
-}
-
-
-double CpuProfileNode::GetTotalTime() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::CpuProfileNode::GetTotalTime");
- return reinterpret_cast<const i::ProfileNode*>(this)->GetTotalMillis();
-}
-
-
-double CpuProfileNode::GetSelfTime() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::CpuProfileNode::GetSelfTime");
- return reinterpret_cast<const i::ProfileNode*>(this)->GetSelfMillis();
-}
-
-
-double CpuProfileNode::GetTotalSamplesCount() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::CpuProfileNode::GetTotalSamplesCount");
- return reinterpret_cast<const i::ProfileNode*>(this)->total_ticks();
-}
-
-
-double CpuProfileNode::GetSelfSamplesCount() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::CpuProfileNode::GetSelfSamplesCount");
- return reinterpret_cast<const i::ProfileNode*>(this)->self_ticks();
-}
-
-
-unsigned CpuProfileNode::GetCallUid() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::CpuProfileNode::GetCallUid");
- return reinterpret_cast<const i::ProfileNode*>(this)->entry()->GetCallUid();
-}
-
-
-int CpuProfileNode::GetChildrenCount() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::CpuProfileNode::GetChildrenCount");
- return reinterpret_cast<const i::ProfileNode*>(this)->children()->length();
-}
-
-
-const CpuProfileNode* CpuProfileNode::GetChild(int index) const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::CpuProfileNode::GetChild");
- const i::ProfileNode* child =
- reinterpret_cast<const i::ProfileNode*>(this)->children()->at(index);
- return reinterpret_cast<const CpuProfileNode*>(child);
-}
-
-
-void CpuProfile::Delete() {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::CpuProfile::Delete");
- i::CpuProfiler::DeleteProfile(reinterpret_cast<i::CpuProfile*>(this));
- if (i::CpuProfiler::GetProfilesCount() == 0 &&
- !i::CpuProfiler::HasDetachedProfiles()) {
- // If this was the last profile, clean up all accessory data as well.
- i::CpuProfiler::DeleteAllProfiles();
- }
-}
-
-
-unsigned CpuProfile::GetUid() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::CpuProfile::GetUid");
- return reinterpret_cast<const i::CpuProfile*>(this)->uid();
-}
-
-
-Handle<String> CpuProfile::GetTitle() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::CpuProfile::GetTitle");
- const i::CpuProfile* profile = reinterpret_cast<const i::CpuProfile*>(this);
- return Handle<String>(ToApi<String>(isolate->factory()->LookupAsciiSymbol(
- profile->title())));
-}
-
-
-const CpuProfileNode* CpuProfile::GetBottomUpRoot() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::CpuProfile::GetBottomUpRoot");
- const i::CpuProfile* profile = reinterpret_cast<const i::CpuProfile*>(this);
- return reinterpret_cast<const CpuProfileNode*>(profile->bottom_up()->root());
-}
-
-
-const CpuProfileNode* CpuProfile::GetTopDownRoot() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::CpuProfile::GetTopDownRoot");
- const i::CpuProfile* profile = reinterpret_cast<const i::CpuProfile*>(this);
- return reinterpret_cast<const CpuProfileNode*>(profile->top_down()->root());
-}
-
-
-int CpuProfiler::GetProfilesCount() {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::CpuProfiler::GetProfilesCount");
- return i::CpuProfiler::GetProfilesCount();
-}
-
-
-const CpuProfile* CpuProfiler::GetProfile(int index,
- Handle<Value> security_token) {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::CpuProfiler::GetProfile");
- return reinterpret_cast<const CpuProfile*>(
- i::CpuProfiler::GetProfile(
- security_token.IsEmpty() ? NULL : *Utils::OpenHandle(*security_token),
- index));
-}
-
-
-const CpuProfile* CpuProfiler::FindProfile(unsigned uid,
- Handle<Value> security_token) {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::CpuProfiler::FindProfile");
- return reinterpret_cast<const CpuProfile*>(
- i::CpuProfiler::FindProfile(
- security_token.IsEmpty() ? NULL : *Utils::OpenHandle(*security_token),
- uid));
-}
-
-
-void CpuProfiler::StartProfiling(Handle<String> title) {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::CpuProfiler::StartProfiling");
- i::CpuProfiler::StartProfiling(*Utils::OpenHandle(*title));
-}
-
-
-const CpuProfile* CpuProfiler::StopProfiling(Handle<String> title,
- Handle<Value> security_token) {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::CpuProfiler::StopProfiling");
- return reinterpret_cast<const CpuProfile*>(
- i::CpuProfiler::StopProfiling(
- security_token.IsEmpty() ? NULL : *Utils::OpenHandle(*security_token),
- *Utils::OpenHandle(*title)));
-}
-
-
-void CpuProfiler::DeleteAllProfiles() {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::CpuProfiler::DeleteAllProfiles");
- i::CpuProfiler::DeleteAllProfiles();
-}
-
-
-static i::HeapGraphEdge* ToInternal(const HeapGraphEdge* edge) {
- return const_cast<i::HeapGraphEdge*>(
- reinterpret_cast<const i::HeapGraphEdge*>(edge));
-}
-
-HeapGraphEdge::Type HeapGraphEdge::GetType() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapGraphEdge::GetType");
- return static_cast<HeapGraphEdge::Type>(ToInternal(this)->type());
-}
-
-
-Handle<Value> HeapGraphEdge::GetName() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapGraphEdge::GetName");
- i::HeapGraphEdge* edge = ToInternal(this);
- switch (edge->type()) {
- case i::HeapGraphEdge::kContextVariable:
- case i::HeapGraphEdge::kInternal:
- case i::HeapGraphEdge::kProperty:
- case i::HeapGraphEdge::kShortcut:
- return Handle<String>(ToApi<String>(isolate->factory()->LookupAsciiSymbol(
- edge->name())));
- case i::HeapGraphEdge::kElement:
- case i::HeapGraphEdge::kHidden:
- return Handle<Number>(ToApi<Number>(isolate->factory()->NewNumberFromInt(
- edge->index())));
- default: UNREACHABLE();
- }
- return v8::Undefined();
-}
-
-
-const HeapGraphNode* HeapGraphEdge::GetFromNode() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapGraphEdge::GetFromNode");
- const i::HeapEntry* from = ToInternal(this)->From();
- return reinterpret_cast<const HeapGraphNode*>(from);
-}
-
-
-const HeapGraphNode* HeapGraphEdge::GetToNode() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapGraphEdge::GetToNode");
- const i::HeapEntry* to = ToInternal(this)->to();
- return reinterpret_cast<const HeapGraphNode*>(to);
-}
-
-
-static i::HeapEntry* ToInternal(const HeapGraphNode* entry) {
- return const_cast<i::HeapEntry*>(
- reinterpret_cast<const i::HeapEntry*>(entry));
-}
-
-
-HeapGraphNode::Type HeapGraphNode::GetType() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapGraphNode::GetType");
- return static_cast<HeapGraphNode::Type>(ToInternal(this)->type());
-}
-
-
-Handle<String> HeapGraphNode::GetName() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapGraphNode::GetName");
- return Handle<String>(ToApi<String>(isolate->factory()->LookupAsciiSymbol(
- ToInternal(this)->name())));
-}
-
-
-uint64_t HeapGraphNode::GetId() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapGraphNode::GetId");
- ASSERT(ToInternal(this)->snapshot()->type() != i::HeapSnapshot::kAggregated);
- return ToInternal(this)->id();
-}
-
-
-int HeapGraphNode::GetInstancesCount() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapGraphNode::GetInstancesCount");
- ASSERT(ToInternal(this)->snapshot()->type() == i::HeapSnapshot::kAggregated);
- return static_cast<int>(ToInternal(this)->id());
-}
-
-
-int HeapGraphNode::GetSelfSize() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapGraphNode::GetSelfSize");
- return ToInternal(this)->self_size();
-}
-
-
-int HeapGraphNode::GetRetainedSize(bool exact) const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapSnapshot::GetRetainedSize");
- return ToInternal(this)->RetainedSize(exact);
-}
-
-
-int HeapGraphNode::GetChildrenCount() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapSnapshot::GetChildrenCount");
- return ToInternal(this)->children().length();
-}
-
-
-const HeapGraphEdge* HeapGraphNode::GetChild(int index) const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapSnapshot::GetChild");
- return reinterpret_cast<const HeapGraphEdge*>(
- &ToInternal(this)->children()[index]);
-}
-
-
-int HeapGraphNode::GetRetainersCount() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapSnapshot::GetRetainersCount");
- return ToInternal(this)->retainers().length();
-}
-
-
-const HeapGraphEdge* HeapGraphNode::GetRetainer(int index) const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapSnapshot::GetRetainer");
- return reinterpret_cast<const HeapGraphEdge*>(
- ToInternal(this)->retainers()[index]);
-}
-
-
-const HeapGraphNode* HeapGraphNode::GetDominatorNode() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapSnapshot::GetDominatorNode");
- return reinterpret_cast<const HeapGraphNode*>(ToInternal(this)->dominator());
-}
-
-
-static i::HeapSnapshot* ToInternal(const HeapSnapshot* snapshot) {
- return const_cast<i::HeapSnapshot*>(
- reinterpret_cast<const i::HeapSnapshot*>(snapshot));
-}
-
-
-void HeapSnapshot::Delete() {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapSnapshot::Delete");
- if (i::HeapProfiler::GetSnapshotsCount() > 1) {
- ToInternal(this)->Delete();
- } else {
- // If this is the last snapshot, clean up all accessory data as well.
- i::HeapProfiler::DeleteAllSnapshots();
- }
-}
-
-
-HeapSnapshot::Type HeapSnapshot::GetType() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapSnapshot::GetType");
- return static_cast<HeapSnapshot::Type>(ToInternal(this)->type());
-}
-
-
-unsigned HeapSnapshot::GetUid() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapSnapshot::GetUid");
- return ToInternal(this)->uid();
-}
-
-
-Handle<String> HeapSnapshot::GetTitle() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapSnapshot::GetTitle");
- return Handle<String>(ToApi<String>(isolate->factory()->LookupAsciiSymbol(
- ToInternal(this)->title())));
-}
-
-
-const HeapGraphNode* HeapSnapshot::GetRoot() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapSnapshot::GetHead");
- return reinterpret_cast<const HeapGraphNode*>(ToInternal(this)->root());
-}
-
-
-const HeapGraphNode* HeapSnapshot::GetNodeById(uint64_t id) const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapSnapshot::GetNodeById");
- return reinterpret_cast<const HeapGraphNode*>(
- ToInternal(this)->GetEntryById(id));
-}
-
-
-void HeapSnapshot::Serialize(OutputStream* stream,
- HeapSnapshot::SerializationFormat format) const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapSnapshot::Serialize");
- ApiCheck(format == kJSON,
- "v8::HeapSnapshot::Serialize",
- "Unknown serialization format");
- ApiCheck(stream->GetOutputEncoding() == OutputStream::kAscii,
- "v8::HeapSnapshot::Serialize",
- "Unsupported output encoding");
- ApiCheck(stream->GetChunkSize() > 0,
- "v8::HeapSnapshot::Serialize",
- "Invalid stream chunk size");
- i::HeapSnapshotJSONSerializer serializer(ToInternal(this));
- serializer.Serialize(stream);
-}
-
-
-int HeapProfiler::GetSnapshotsCount() {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapProfiler::GetSnapshotsCount");
- return i::HeapProfiler::GetSnapshotsCount();
-}
-
-
-const HeapSnapshot* HeapProfiler::GetSnapshot(int index) {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapProfiler::GetSnapshot");
- return reinterpret_cast<const HeapSnapshot*>(
- i::HeapProfiler::GetSnapshot(index));
-}
-
-
-const HeapSnapshot* HeapProfiler::FindSnapshot(unsigned uid) {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapProfiler::FindSnapshot");
- return reinterpret_cast<const HeapSnapshot*>(
- i::HeapProfiler::FindSnapshot(uid));
-}
-
-
-const HeapSnapshot* HeapProfiler::TakeSnapshot(Handle<String> title,
- HeapSnapshot::Type type,
- ActivityControl* control) {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapProfiler::TakeSnapshot");
- i::HeapSnapshot::Type internal_type = i::HeapSnapshot::kFull;
- switch (type) {
- case HeapSnapshot::kFull:
- internal_type = i::HeapSnapshot::kFull;
- break;
- case HeapSnapshot::kAggregated:
- internal_type = i::HeapSnapshot::kAggregated;
- break;
- default:
- UNREACHABLE();
- }
- return reinterpret_cast<const HeapSnapshot*>(
- i::HeapProfiler::TakeSnapshot(
- *Utils::OpenHandle(*title), internal_type, control));
-}
-
-
-void HeapProfiler::DeleteAllSnapshots() {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapProfiler::DeleteAllSnapshots");
- i::HeapProfiler::DeleteAllSnapshots();
-}
-
-
-void HeapProfiler::DefineWrapperClass(uint16_t class_id,
- WrapperInfoCallback callback) {
- i::Isolate::Current()->heap_profiler()->DefineWrapperClass(class_id,
- callback);
-}
-
-#endif // ENABLE_LOGGING_AND_PROFILING
-
-
-v8::Testing::StressType internal::Testing::stress_type_ =
- v8::Testing::kStressTypeOpt;
-
-
-void Testing::SetStressRunType(Testing::StressType type) {
- internal::Testing::set_stress_type(type);
-}
-
-int Testing::GetStressRuns() {
- if (internal::FLAG_stress_runs != 0) return internal::FLAG_stress_runs;
-#ifdef DEBUG
- // In debug mode the code runs much slower so stressing will only make two
- // runs.
- return 2;
-#else
- return 5;
-#endif
-}
-
-
-static void SetFlagsFromString(const char* flags) {
- V8::SetFlagsFromString(flags, i::StrLength(flags));
-}
-
-
-void Testing::PrepareStressRun(int run) {
- static const char* kLazyOptimizations =
- "--prepare-always-opt --nolimit-inlining "
- "--noalways-opt --noopt-eagerly";
- static const char* kEagerOptimizations = "--opt-eagerly";
- static const char* kForcedOptimizations = "--always-opt";
-
- // If deoptimization stressed turn on frequent deoptimization. If no value
- // is spefified through --deopt-every-n-times use a default default value.
- static const char* kDeoptEvery13Times = "--deopt-every-n-times=13";
- if (internal::Testing::stress_type() == Testing::kStressTypeDeopt &&
- internal::FLAG_deopt_every_n_times == 0) {
- SetFlagsFromString(kDeoptEvery13Times);
- }
-
-#ifdef DEBUG
- // As stressing in debug mode only make two runs skip the deopt stressing
- // here.
- if (run == GetStressRuns() - 1) {
- SetFlagsFromString(kForcedOptimizations);
- } else {
- SetFlagsFromString(kEagerOptimizations);
- SetFlagsFromString(kLazyOptimizations);
- }
-#else
- if (run == GetStressRuns() - 1) {
- SetFlagsFromString(kForcedOptimizations);
- } else if (run == GetStressRuns() - 2) {
- SetFlagsFromString(kEagerOptimizations);
- } else {
- SetFlagsFromString(kLazyOptimizations);
- }
-#endif
-}
-
-
-void Testing::DeoptimizeAll() {
- internal::Deoptimizer::DeoptimizeAll();
-}
-
-
-namespace internal {
-
-
-void HandleScopeImplementer::FreeThreadResources() {
- Free();
-}
-
-
-char* HandleScopeImplementer::ArchiveThread(char* storage) {
- Isolate* isolate = Isolate::Current();
- v8::ImplementationUtilities::HandleScopeData* current =
- isolate->handle_scope_data();
- handle_scope_data_ = *current;
- memcpy(storage, this, sizeof(*this));
-
- ResetAfterArchive();
- current->Initialize();
-
- return storage + ArchiveSpacePerThread();
-}
-
-
-int HandleScopeImplementer::ArchiveSpacePerThread() {
- return sizeof(HandleScopeImplementer);
-}
-
-
-char* HandleScopeImplementer::RestoreThread(char* storage) {
- memcpy(this, storage, sizeof(*this));
- *Isolate::Current()->handle_scope_data() = handle_scope_data_;
- return storage + ArchiveSpacePerThread();
-}
-
-
-void HandleScopeImplementer::IterateThis(ObjectVisitor* v) {
- // Iterate over all handles in the blocks except for the last.
- for (int i = blocks()->length() - 2; i >= 0; --i) {
- Object** block = blocks()->at(i);
- v->VisitPointers(block, &block[kHandleBlockSize]);
- }
-
- // Iterate over live handles in the last block (if any).
- if (!blocks()->is_empty()) {
- v->VisitPointers(blocks()->last(), handle_scope_data_.next);
- }
-
- if (!saved_contexts_.is_empty()) {
- Object** start = reinterpret_cast<Object**>(&saved_contexts_.first());
- v->VisitPointers(start, start + saved_contexts_.length());
- }
-}
-
-
-void HandleScopeImplementer::Iterate(ObjectVisitor* v) {
- v8::ImplementationUtilities::HandleScopeData* current =
- Isolate::Current()->handle_scope_data();
- handle_scope_data_ = *current;
- IterateThis(v);
-}
-
-
-char* HandleScopeImplementer::Iterate(ObjectVisitor* v, char* storage) {
- HandleScopeImplementer* scope_implementer =
- reinterpret_cast<HandleScopeImplementer*>(storage);
- scope_implementer->IterateThis(v);
- return storage + ArchiveSpacePerThread();
-}
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/api.h b/src/3rdparty/v8/src/api.h
deleted file mode 100644
index 6d46713..0000000
--- a/src/3rdparty/v8/src/api.h
+++ /dev/null
@@ -1,572 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_API_H_
-#define V8_API_H_
-
-#include "apiutils.h"
-#include "factory.h"
-
-#include "../include/v8-testing.h"
-
-namespace v8 {
-
-// Constants used in the implementation of the API. The most natural thing
-// would usually be to place these with the classes that use them, but
-// we want to keep them out of v8.h because it is an externally
-// visible file.
-class Consts {
- public:
- enum TemplateType {
- FUNCTION_TEMPLATE = 0,
- OBJECT_TEMPLATE = 1
- };
-};
-
-
-// Utilities for working with neander-objects, primitive
-// env-independent JSObjects used by the api.
-class NeanderObject {
- public:
- explicit NeanderObject(int size);
- inline NeanderObject(v8::internal::Handle<v8::internal::Object> obj);
- inline NeanderObject(v8::internal::Object* obj);
- inline v8::internal::Object* get(int index);
- inline void set(int index, v8::internal::Object* value);
- inline v8::internal::Handle<v8::internal::JSObject> value() { return value_; }
- int size();
- private:
- v8::internal::Handle<v8::internal::JSObject> value_;
-};
-
-
-// Utilities for working with neander-arrays, a simple extensible
-// array abstraction built on neander-objects.
-class NeanderArray {
- public:
- NeanderArray();
- inline NeanderArray(v8::internal::Handle<v8::internal::Object> obj);
- inline v8::internal::Handle<v8::internal::JSObject> value() {
- return obj_.value();
- }
-
- void add(v8::internal::Handle<v8::internal::Object> value);
-
- int length();
-
- v8::internal::Object* get(int index);
- // Change the value at an index to undefined value. If the index is
- // out of bounds, the request is ignored. Returns the old value.
- void set(int index, v8::internal::Object* value);
- private:
- NeanderObject obj_;
-};
-
-
-NeanderObject::NeanderObject(v8::internal::Handle<v8::internal::Object> obj)
- : value_(v8::internal::Handle<v8::internal::JSObject>::cast(obj)) { }
-
-
-NeanderObject::NeanderObject(v8::internal::Object* obj)
- : value_(v8::internal::Handle<v8::internal::JSObject>(
- v8::internal::JSObject::cast(obj))) { }
-
-
-NeanderArray::NeanderArray(v8::internal::Handle<v8::internal::Object> obj)
- : obj_(obj) { }
-
-
-v8::internal::Object* NeanderObject::get(int offset) {
- ASSERT(value()->HasFastElements());
- return v8::internal::FixedArray::cast(value()->elements())->get(offset);
-}
-
-
-void NeanderObject::set(int offset, v8::internal::Object* value) {
- ASSERT(value_->HasFastElements());
- v8::internal::FixedArray::cast(value_->elements())->set(offset, value);
-}
-
-
-template <typename T> static inline T ToCData(v8::internal::Object* obj) {
- STATIC_ASSERT(sizeof(T) == sizeof(v8::internal::Address));
- return reinterpret_cast<T>(
- reinterpret_cast<intptr_t>(v8::internal::Proxy::cast(obj)->proxy()));
-}
-
-
-template <typename T>
-static inline v8::internal::Handle<v8::internal::Object> FromCData(T obj) {
- STATIC_ASSERT(sizeof(T) == sizeof(v8::internal::Address));
- return FACTORY->NewProxy(
- reinterpret_cast<v8::internal::Address>(reinterpret_cast<intptr_t>(obj)));
-}
-
-
-class ApiFunction {
- public:
- explicit ApiFunction(v8::internal::Address addr) : addr_(addr) { }
- v8::internal::Address address() { return addr_; }
- private:
- v8::internal::Address addr_;
-};
-
-
-enum ExtensionTraversalState {
- UNVISITED, VISITED, INSTALLED
-};
-
-
-class RegisteredExtension {
- public:
- explicit RegisteredExtension(Extension* extension);
- static void Register(RegisteredExtension* that);
- Extension* extension() { return extension_; }
- RegisteredExtension* next() { return next_; }
- RegisteredExtension* next_auto() { return next_auto_; }
- ExtensionTraversalState state() { return state_; }
- void set_state(ExtensionTraversalState value) { state_ = value; }
- static RegisteredExtension* first_extension() { return first_extension_; }
- private:
- Extension* extension_;
- RegisteredExtension* next_;
- RegisteredExtension* next_auto_;
- ExtensionTraversalState state_;
- static RegisteredExtension* first_extension_;
-};
-
-
-class Utils {
- public:
- static bool ReportApiFailure(const char* location, const char* message);
-
- static Local<FunctionTemplate> ToFunctionTemplate(NeanderObject obj);
- static Local<ObjectTemplate> ToObjectTemplate(NeanderObject obj);
-
- static inline Local<Context> ToLocal(
- v8::internal::Handle<v8::internal::Context> obj);
- static inline Local<Value> ToLocal(
- v8::internal::Handle<v8::internal::Object> obj);
- static inline Local<Function> ToLocal(
- v8::internal::Handle<v8::internal::JSFunction> obj);
- static inline Local<String> ToLocal(
- v8::internal::Handle<v8::internal::String> obj);
- static inline Local<RegExp> ToLocal(
- v8::internal::Handle<v8::internal::JSRegExp> obj);
- static inline Local<Object> ToLocal(
- v8::internal::Handle<v8::internal::JSObject> obj);
- static inline Local<Array> ToLocal(
- v8::internal::Handle<v8::internal::JSArray> obj);
- static inline Local<External> ToLocal(
- v8::internal::Handle<v8::internal::Proxy> obj);
- static inline Local<Message> MessageToLocal(
- v8::internal::Handle<v8::internal::Object> obj);
- static inline Local<StackTrace> StackTraceToLocal(
- v8::internal::Handle<v8::internal::JSArray> obj);
- static inline Local<StackFrame> StackFrameToLocal(
- v8::internal::Handle<v8::internal::JSObject> obj);
- static inline Local<Number> NumberToLocal(
- v8::internal::Handle<v8::internal::Object> obj);
- static inline Local<Integer> IntegerToLocal(
- v8::internal::Handle<v8::internal::Object> obj);
- static inline Local<Uint32> Uint32ToLocal(
- v8::internal::Handle<v8::internal::Object> obj);
- static inline Local<FunctionTemplate> ToLocal(
- v8::internal::Handle<v8::internal::FunctionTemplateInfo> obj);
- static inline Local<ObjectTemplate> ToLocal(
- v8::internal::Handle<v8::internal::ObjectTemplateInfo> obj);
- static inline Local<Signature> ToLocal(
- v8::internal::Handle<v8::internal::SignatureInfo> obj);
- static inline Local<TypeSwitch> ToLocal(
- v8::internal::Handle<v8::internal::TypeSwitchInfo> obj);
-
- static inline v8::internal::Handle<v8::internal::TemplateInfo>
- OpenHandle(const Template* that);
- static inline v8::internal::Handle<v8::internal::FunctionTemplateInfo>
- OpenHandle(const FunctionTemplate* that);
- static inline v8::internal::Handle<v8::internal::ObjectTemplateInfo>
- OpenHandle(const ObjectTemplate* that);
- static inline v8::internal::Handle<v8::internal::Object>
- OpenHandle(const Data* data);
- static inline v8::internal::Handle<v8::internal::JSRegExp>
- OpenHandle(const RegExp* data);
- static inline v8::internal::Handle<v8::internal::JSObject>
- OpenHandle(const v8::Object* data);
- static inline v8::internal::Handle<v8::internal::JSArray>
- OpenHandle(const v8::Array* data);
- static inline v8::internal::Handle<v8::internal::String>
- OpenHandle(const String* data);
- static inline v8::internal::Handle<v8::internal::Object>
- OpenHandle(const Script* data);
- static inline v8::internal::Handle<v8::internal::JSFunction>
- OpenHandle(const Function* data);
- static inline v8::internal::Handle<v8::internal::JSObject>
- OpenHandle(const Message* message);
- static inline v8::internal::Handle<v8::internal::JSArray>
- OpenHandle(const StackTrace* stack_trace);
- static inline v8::internal::Handle<v8::internal::JSObject>
- OpenHandle(const StackFrame* stack_frame);
- static inline v8::internal::Handle<v8::internal::Context>
- OpenHandle(const v8::Context* context);
- static inline v8::internal::Handle<v8::internal::SignatureInfo>
- OpenHandle(const v8::Signature* sig);
- static inline v8::internal::Handle<v8::internal::TypeSwitchInfo>
- OpenHandle(const v8::TypeSwitch* that);
- static inline v8::internal::Handle<v8::internal::Proxy>
- OpenHandle(const v8::External* that);
-};
-
-
-template <class T>
-static inline T* ToApi(v8::internal::Handle<v8::internal::Object> obj) {
- return reinterpret_cast<T*>(obj.location());
-}
-
-
-template <class T>
-v8::internal::Handle<T> v8::internal::Handle<T>::EscapeFrom(
- v8::HandleScope* scope) {
- v8::internal::Handle<T> handle;
- if (!is_null()) {
- handle = *this;
- }
- return Utils::OpenHandle(*scope->Close(Utils::ToLocal(handle)));
-}
-
-
-// Implementations of ToLocal
-
-#define MAKE_TO_LOCAL(Name, From, To) \
- Local<v8::To> Utils::Name(v8::internal::Handle<v8::internal::From> obj) { \
- ASSERT(obj.is_null() || !obj->IsTheHole()); \
- return Local<To>(reinterpret_cast<To*>(obj.location())); \
- }
-
-MAKE_TO_LOCAL(ToLocal, Context, Context)
-MAKE_TO_LOCAL(ToLocal, Object, Value)
-MAKE_TO_LOCAL(ToLocal, JSFunction, Function)
-MAKE_TO_LOCAL(ToLocal, String, String)
-MAKE_TO_LOCAL(ToLocal, JSRegExp, RegExp)
-MAKE_TO_LOCAL(ToLocal, JSObject, Object)
-MAKE_TO_LOCAL(ToLocal, JSArray, Array)
-MAKE_TO_LOCAL(ToLocal, Proxy, External)
-MAKE_TO_LOCAL(ToLocal, FunctionTemplateInfo, FunctionTemplate)
-MAKE_TO_LOCAL(ToLocal, ObjectTemplateInfo, ObjectTemplate)
-MAKE_TO_LOCAL(ToLocal, SignatureInfo, Signature)
-MAKE_TO_LOCAL(ToLocal, TypeSwitchInfo, TypeSwitch)
-MAKE_TO_LOCAL(MessageToLocal, Object, Message)
-MAKE_TO_LOCAL(StackTraceToLocal, JSArray, StackTrace)
-MAKE_TO_LOCAL(StackFrameToLocal, JSObject, StackFrame)
-MAKE_TO_LOCAL(NumberToLocal, Object, Number)
-MAKE_TO_LOCAL(IntegerToLocal, Object, Integer)
-MAKE_TO_LOCAL(Uint32ToLocal, Object, Uint32)
-
-#undef MAKE_TO_LOCAL
-
-
-// Implementations of OpenHandle
-
-#define MAKE_OPEN_HANDLE(From, To) \
- v8::internal::Handle<v8::internal::To> Utils::OpenHandle(\
- const v8::From* that) { \
- return v8::internal::Handle<v8::internal::To>( \
- reinterpret_cast<v8::internal::To**>(const_cast<v8::From*>(that))); \
- }
-
-MAKE_OPEN_HANDLE(Template, TemplateInfo)
-MAKE_OPEN_HANDLE(FunctionTemplate, FunctionTemplateInfo)
-MAKE_OPEN_HANDLE(ObjectTemplate, ObjectTemplateInfo)
-MAKE_OPEN_HANDLE(Signature, SignatureInfo)
-MAKE_OPEN_HANDLE(TypeSwitch, TypeSwitchInfo)
-MAKE_OPEN_HANDLE(Data, Object)
-MAKE_OPEN_HANDLE(RegExp, JSRegExp)
-MAKE_OPEN_HANDLE(Object, JSObject)
-MAKE_OPEN_HANDLE(Array, JSArray)
-MAKE_OPEN_HANDLE(String, String)
-MAKE_OPEN_HANDLE(Script, Object)
-MAKE_OPEN_HANDLE(Function, JSFunction)
-MAKE_OPEN_HANDLE(Message, JSObject)
-MAKE_OPEN_HANDLE(Context, Context)
-MAKE_OPEN_HANDLE(External, Proxy)
-MAKE_OPEN_HANDLE(StackTrace, JSArray)
-MAKE_OPEN_HANDLE(StackFrame, JSObject)
-
-#undef MAKE_OPEN_HANDLE
-
-
-namespace internal {
-
-// Tracks string usage to help make better decisions when
-// externalizing strings.
-//
-// Implementation note: internally this class only tracks fresh
-// strings and keeps a single use counter for them.
-class StringTracker {
- public:
- // Records that the given string's characters were copied to some
- // external buffer. If this happens often we should honor
- // externalization requests for the string.
- void RecordWrite(Handle<String> string) {
- Address address = reinterpret_cast<Address>(*string);
- Address top = isolate_->heap()->NewSpaceTop();
- if (IsFreshString(address, top)) {
- IncrementUseCount(top);
- }
- }
-
- // Estimates freshness and use frequency of the given string based
- // on how close it is to the new space top and the recorded usage
- // history.
- inline bool IsFreshUnusedString(Handle<String> string) {
- Address address = reinterpret_cast<Address>(*string);
- Address top = isolate_->heap()->NewSpaceTop();
- return IsFreshString(address, top) && IsUseCountLow(top);
- }
-
- private:
- StringTracker() : use_count_(0), last_top_(NULL), isolate_(NULL) { }
-
- static inline bool IsFreshString(Address string, Address top) {
- return top - kFreshnessLimit <= string && string <= top;
- }
-
- inline bool IsUseCountLow(Address top) {
- if (last_top_ != top) return true;
- return use_count_ < kUseLimit;
- }
-
- inline void IncrementUseCount(Address top) {
- if (last_top_ != top) {
- use_count_ = 0;
- last_top_ = top;
- }
- ++use_count_;
- }
-
- // Single use counter shared by all fresh strings.
- int use_count_;
-
- // Last new space top when the use count above was valid.
- Address last_top_;
-
- Isolate* isolate_;
-
- // How close to the new space top a fresh string has to be.
- static const int kFreshnessLimit = 1024;
-
- // The number of uses required to consider a string useful.
- static const int kUseLimit = 32;
-
- friend class Isolate;
-
- DISALLOW_COPY_AND_ASSIGN(StringTracker);
-};
-
-
-// This class is here in order to be able to declare it a friend of
-// HandleScope. Moving these methods to be members of HandleScope would be
-// neat in some ways, but it would expose internal implementation details in
-// our public header file, which is undesirable.
-//
-// An isolate has a single instance of this class to hold the current thread's
-// data. In multithreaded V8 programs this data is copied in and out of storage
-// so that the currently executing thread always has its own copy of this
-// data.
-ISOLATED_CLASS HandleScopeImplementer {
- public:
-
- HandleScopeImplementer()
- : blocks_(0),
- entered_contexts_(0),
- saved_contexts_(0),
- spare_(NULL),
- ignore_out_of_memory_(false),
- call_depth_(0) { }
-
- // Threading support for handle data.
- static int ArchiveSpacePerThread();
- char* RestoreThread(char* from);
- char* ArchiveThread(char* to);
- void FreeThreadResources();
-
- // Garbage collection support.
- void Iterate(v8::internal::ObjectVisitor* v);
- static char* Iterate(v8::internal::ObjectVisitor* v, char* data);
-
-
- inline internal::Object** GetSpareOrNewBlock();
- inline void DeleteExtensions(internal::Object** prev_limit);
-
- inline void IncrementCallDepth() {call_depth_++;}
- inline void DecrementCallDepth() {call_depth_--;}
- inline bool CallDepthIsZero() { return call_depth_ == 0; }
-
- inline void EnterContext(Handle<Object> context);
- inline bool LeaveLastContext();
-
- // Returns the last entered context or an empty handle if no
- // contexts have been entered.
- inline Handle<Object> LastEnteredContext();
-
- inline void SaveContext(Context* context);
- inline Context* RestoreContext();
- inline bool HasSavedContexts();
-
- inline List<internal::Object**>* blocks() { return &blocks_; }
- inline bool ignore_out_of_memory() { return ignore_out_of_memory_; }
- inline void set_ignore_out_of_memory(bool value) {
- ignore_out_of_memory_ = value;
- }
-
- private:
- void ResetAfterArchive() {
- blocks_.Initialize(0);
- entered_contexts_.Initialize(0);
- saved_contexts_.Initialize(0);
- spare_ = NULL;
- ignore_out_of_memory_ = false;
- call_depth_ = 0;
- }
-
- void Free() {
- ASSERT(blocks_.length() == 0);
- ASSERT(entered_contexts_.length() == 0);
- ASSERT(saved_contexts_.length() == 0);
- blocks_.Free();
- entered_contexts_.Free();
- saved_contexts_.Free();
- if (spare_ != NULL) {
- DeleteArray(spare_);
- spare_ = NULL;
- }
- ASSERT(call_depth_ == 0);
- }
-
- List<internal::Object**> blocks_;
- // Used as a stack to keep track of entered contexts.
- List<Handle<Object> > entered_contexts_;
- // Used as a stack to keep track of saved contexts.
- List<Context*> saved_contexts_;
- Object** spare_;
- bool ignore_out_of_memory_;
- int call_depth_;
- // This is only used for threading support.
- v8::ImplementationUtilities::HandleScopeData handle_scope_data_;
-
- void IterateThis(ObjectVisitor* v);
- char* RestoreThreadHelper(char* from);
- char* ArchiveThreadHelper(char* to);
-
- DISALLOW_COPY_AND_ASSIGN(HandleScopeImplementer);
-};
-
-
-static const int kHandleBlockSize = v8::internal::KB - 2; // fit in one page
-
-
-void HandleScopeImplementer::SaveContext(Context* context) {
- saved_contexts_.Add(context);
-}
-
-
-Context* HandleScopeImplementer::RestoreContext() {
- return saved_contexts_.RemoveLast();
-}
-
-
-bool HandleScopeImplementer::HasSavedContexts() {
- return !saved_contexts_.is_empty();
-}
-
-
-void HandleScopeImplementer::EnterContext(Handle<Object> context) {
- entered_contexts_.Add(context);
-}
-
-
-bool HandleScopeImplementer::LeaveLastContext() {
- if (entered_contexts_.is_empty()) return false;
- entered_contexts_.RemoveLast();
- return true;
-}
-
-
-Handle<Object> HandleScopeImplementer::LastEnteredContext() {
- if (entered_contexts_.is_empty()) return Handle<Object>::null();
- return entered_contexts_.last();
-}
-
-
-// If there's a spare block, use it for growing the current scope.
-internal::Object** HandleScopeImplementer::GetSpareOrNewBlock() {
- internal::Object** block = (spare_ != NULL) ?
- spare_ :
- NewArray<internal::Object*>(kHandleBlockSize);
- spare_ = NULL;
- return block;
-}
-
-
-void HandleScopeImplementer::DeleteExtensions(internal::Object** prev_limit) {
- while (!blocks_.is_empty()) {
- internal::Object** block_start = blocks_.last();
- internal::Object** block_limit = block_start + kHandleBlockSize;
-#ifdef DEBUG
- // NoHandleAllocation may make the prev_limit to point inside the block.
- if (block_start <= prev_limit && prev_limit <= block_limit) break;
-#else
- if (prev_limit == block_limit) break;
-#endif
-
- blocks_.RemoveLast();
-#ifdef DEBUG
- v8::ImplementationUtilities::ZapHandleRange(block_start, block_limit);
-#endif
- if (spare_ != NULL) {
- DeleteArray(spare_);
- }
- spare_ = block_start;
- }
- ASSERT((blocks_.is_empty() && prev_limit == NULL) ||
- (!blocks_.is_empty() && prev_limit != NULL));
-}
-
-
-class Testing {
- public:
- static v8::Testing::StressType stress_type() { return stress_type_; }
- static void set_stress_type(v8::Testing::StressType stress_type) {
- stress_type_ = stress_type;
- }
-
- private:
- static v8::Testing::StressType stress_type_;
-};
-
-} } // namespace v8::internal
-
-#endif // V8_API_H_
diff --git a/src/3rdparty/v8/src/apinatives.js b/src/3rdparty/v8/src/apinatives.js
deleted file mode 100644
index ca2bbf5..0000000
--- a/src/3rdparty/v8/src/apinatives.js
+++ /dev/null
@@ -1,110 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// This file contains infrastructure used by the API. See
-// v8natives.js for an explanation of these files are processed and
-// loaded.
-
-
-function CreateDate(time) {
- var date = new $Date();
- date.setTime(time);
- return date;
-}
-
-
-const kApiFunctionCache = {};
-const functionCache = kApiFunctionCache;
-
-
-function Instantiate(data, name) {
- if (!%IsTemplate(data)) return data;
- var tag = %GetTemplateField(data, kApiTagOffset);
- switch (tag) {
- case kFunctionTag:
- return InstantiateFunction(data, name);
- case kNewObjectTag:
- var Constructor = %GetTemplateField(data, kApiConstructorOffset);
- var result = Constructor ? new (Instantiate(Constructor))() : {};
- ConfigureTemplateInstance(result, data);
- result = %ToFastProperties(result);
- return result;
- default:
- throw 'Unknown API tag <' + tag + '>';
- }
-}
-
-
-function InstantiateFunction(data, name) {
- // We need a reference to kApiFunctionCache in the stack frame
- // if we need to bail out from a stack overflow.
- var cache = kApiFunctionCache;
- var serialNumber = %GetTemplateField(data, kApiSerialNumberOffset);
- var isFunctionCached =
- (serialNumber in cache) && (cache[serialNumber] != kUninitialized);
- if (!isFunctionCached) {
- try {
- cache[serialNumber] = null;
- var fun = %CreateApiFunction(data);
- if (name) %FunctionSetName(fun, name);
- cache[serialNumber] = fun;
- var prototype = %GetTemplateField(data, kApiPrototypeTemplateOffset);
- fun.prototype = prototype ? Instantiate(prototype) : {};
- %SetProperty(fun.prototype, "constructor", fun, DONT_ENUM);
- var parent = %GetTemplateField(data, kApiParentTemplateOffset);
- if (parent) {
- var parent_fun = Instantiate(parent);
- fun.prototype.__proto__ = parent_fun.prototype;
- }
- ConfigureTemplateInstance(fun, data);
- } catch (e) {
- cache[serialNumber] = kUninitialized;
- throw e;
- }
- }
- return cache[serialNumber];
-}
-
-
-function ConfigureTemplateInstance(obj, data) {
- var properties = %GetTemplateField(data, kApiPropertyListOffset);
- if (properties) {
- // Disable access checks while instantiating the object.
- var requires_access_checks = %DisableAccessChecks(obj);
- try {
- for (var i = 0; i < properties[0]; i += 3) {
- var name = properties[i + 1];
- var prop_data = properties[i + 2];
- var attributes = properties[i + 3];
- var value = Instantiate(prop_data, name);
- %SetProperty(obj, name, value, attributes);
- }
- } finally {
- if (requires_access_checks) %EnableAccessChecks(obj);
- }
- }
-}
diff --git a/src/3rdparty/v8/src/apiutils.h b/src/3rdparty/v8/src/apiutils.h
deleted file mode 100644
index 68579af..0000000
--- a/src/3rdparty/v8/src/apiutils.h
+++ /dev/null
@@ -1,73 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_APIUTILS_H_
-#define V8_APIUTILS_H_
-
-namespace v8 {
-class ImplementationUtilities {
- public:
- static int GetNameCount(ExtensionConfiguration* that) {
- return that->name_count_;
- }
-
- static const char** GetNames(ExtensionConfiguration* that) {
- return that->names_;
- }
-
- // Packs additional parameters for the NewArguments function. |implicit_args|
- // is a pointer to the last element of 3-elements array controlled by GC.
- static void PrepareArgumentsData(internal::Object** implicit_args,
- internal::Object* data,
- internal::JSFunction* callee,
- internal::Object* holder) {
- implicit_args[v8::Arguments::kDataIndex] = data;
- implicit_args[v8::Arguments::kCalleeIndex] = callee;
- implicit_args[v8::Arguments::kHolderIndex] = holder;
- }
-
- static v8::Arguments NewArguments(internal::Object** implicit_args,
- internal::Object** argv, int argc,
- bool is_construct_call) {
- ASSERT(implicit_args[v8::Arguments::kCalleeIndex]->IsJSFunction());
- ASSERT(implicit_args[v8::Arguments::kHolderIndex]->IsHeapObject());
-
- return v8::Arguments(implicit_args, argv, argc, is_construct_call);
- }
-
- // Introduce an alias for the handle scope data to allow non-friends
- // to access the HandleScope data.
- typedef v8::HandleScope::Data HandleScopeData;
-
-#ifdef DEBUG
- static void ZapHandleRange(internal::Object** begin, internal::Object** end);
-#endif
-};
-
-} // namespace v8
-
-#endif // V8_APIUTILS_H_
diff --git a/src/3rdparty/v8/src/arguments.h b/src/3rdparty/v8/src/arguments.h
deleted file mode 100644
index a7a30e2..0000000
--- a/src/3rdparty/v8/src/arguments.h
+++ /dev/null
@@ -1,116 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_ARGUMENTS_H_
-#define V8_ARGUMENTS_H_
-
-namespace v8 {
-namespace internal {
-
-// Arguments provides access to runtime call parameters.
-//
-// It uses the fact that the instance fields of Arguments
-// (length_, arguments_) are "overlayed" with the parameters
-// (no. of parameters, and the parameter pointer) passed so
-// that inside the C++ function, the parameters passed can
-// be accessed conveniently:
-//
-// Object* Runtime_function(Arguments args) {
-// ... use args[i] here ...
-// }
-
-class Arguments BASE_EMBEDDED {
- public:
- Arguments(int length, Object** arguments)
- : length_(length), arguments_(arguments) { }
-
- Object*& operator[] (int index) {
- ASSERT(0 <= index && index < length_);
- return arguments_[-index];
- }
-
- template <class S> Handle<S> at(int index) {
- Object** value = &((*this)[index]);
- // This cast checks that the object we're accessing does indeed have the
- // expected type.
- S::cast(*value);
- return Handle<S>(reinterpret_cast<S**>(value));
- }
-
- // Get the total number of arguments including the receiver.
- int length() const { return length_; }
-
- Object** arguments() { return arguments_; }
- private:
- int length_;
- Object** arguments_;
-};
-
-
-// Custom arguments replicate a small segment of stack that can be
-// accessed through an Arguments object the same way the actual stack
-// can.
-class CustomArguments : public Relocatable {
- public:
- inline CustomArguments(Isolate* isolate,
- Object* data,
- Object* self,
- JSObject* holder) : Relocatable(isolate) {
- values_[2] = self;
- values_[1] = holder;
- values_[0] = data;
- }
-
- inline explicit CustomArguments(Isolate* isolate) : Relocatable(isolate) {
-#ifdef DEBUG
- for (size_t i = 0; i < ARRAY_SIZE(values_); i++) {
- values_[i] = reinterpret_cast<Object*>(kZapValue);
- }
-#endif
- }
-
- void IterateInstance(ObjectVisitor* v);
- Object** end() { return values_ + ARRAY_SIZE(values_) - 1; }
- private:
- Object* values_[3];
-};
-
-
-#define DECLARE_RUNTIME_FUNCTION(Type, Name) \
-Type Name(Arguments args, Isolate* isolate)
-
-
-#define RUNTIME_FUNCTION(Type, Name) \
-Type Name(Arguments args, Isolate* isolate)
-
-
-#define RUNTIME_ARGUMENTS(isolate, args) args, isolate
-
-
-} } // namespace v8::internal
-
-#endif // V8_ARGUMENTS_H_
diff --git a/src/3rdparty/v8/src/arm/assembler-arm-inl.h b/src/3rdparty/v8/src/arm/assembler-arm-inl.h
deleted file mode 100644
index 3e19a45..0000000
--- a/src/3rdparty/v8/src/arm/assembler-arm-inl.h
+++ /dev/null
@@ -1,353 +0,0 @@
-// Copyright (c) 1994-2006 Sun Microsystems Inc.
-// All Rights Reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-//
-// - Redistributions of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-//
-// - Redistribution in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the
-// distribution.
-//
-// - Neither the name of Sun Microsystems or the names of contributors may
-// be used to endorse or promote products derived from this software without
-// specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
-// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
-// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
-// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
-// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
-// OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// The original source code covered by the above license above has been modified
-// significantly by Google Inc.
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-
-#ifndef V8_ARM_ASSEMBLER_ARM_INL_H_
-#define V8_ARM_ASSEMBLER_ARM_INL_H_
-
-#include "arm/assembler-arm.h"
-#include "cpu.h"
-#include "debug.h"
-
-
-namespace v8 {
-namespace internal {
-
-
-void RelocInfo::apply(intptr_t delta) {
- if (RelocInfo::IsInternalReference(rmode_)) {
- // absolute code pointer inside code object moves with the code object.
- int32_t* p = reinterpret_cast<int32_t*>(pc_);
- *p += delta; // relocate entry
- }
- // We do not use pc relative addressing on ARM, so there is
- // nothing else to do.
-}
-
-
-Address RelocInfo::target_address() {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
- return Assembler::target_address_at(pc_);
-}
-
-
-Address RelocInfo::target_address_address() {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
- return reinterpret_cast<Address>(Assembler::target_address_address_at(pc_));
-}
-
-
-int RelocInfo::target_address_size() {
- return Assembler::kExternalTargetSize;
-}
-
-
-void RelocInfo::set_target_address(Address target) {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
- Assembler::set_target_address_at(pc_, target);
-}
-
-
-Object* RelocInfo::target_object() {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return Memory::Object_at(Assembler::target_address_address_at(pc_));
-}
-
-
-Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return Memory::Object_Handle_at(Assembler::target_address_address_at(pc_));
-}
-
-
-Object** RelocInfo::target_object_address() {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return reinterpret_cast<Object**>(Assembler::target_address_address_at(pc_));
-}
-
-
-void RelocInfo::set_target_object(Object* target) {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- Assembler::set_target_address_at(pc_, reinterpret_cast<Address>(target));
-}
-
-
-Address* RelocInfo::target_reference_address() {
- ASSERT(rmode_ == EXTERNAL_REFERENCE);
- return reinterpret_cast<Address*>(Assembler::target_address_address_at(pc_));
-}
-
-
-Handle<JSGlobalPropertyCell> RelocInfo::target_cell_handle() {
- ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
- Address address = Memory::Address_at(pc_);
- return Handle<JSGlobalPropertyCell>(
- reinterpret_cast<JSGlobalPropertyCell**>(address));
-}
-
-
-JSGlobalPropertyCell* RelocInfo::target_cell() {
- ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
- Address address = Memory::Address_at(pc_);
- Object* object = HeapObject::FromAddress(
- address - JSGlobalPropertyCell::kValueOffset);
- return reinterpret_cast<JSGlobalPropertyCell*>(object);
-}
-
-
-void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell) {
- ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
- Address address = cell->address() + JSGlobalPropertyCell::kValueOffset;
- Memory::Address_at(pc_) = address;
-}
-
-
-Address RelocInfo::call_address() {
- // The 2 instructions offset assumes patched debug break slot or return
- // sequence.
- ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
- (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
- return Memory::Address_at(pc_ + 2 * Assembler::kInstrSize);
-}
-
-
-void RelocInfo::set_call_address(Address target) {
- ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
- (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
- Memory::Address_at(pc_ + 2 * Assembler::kInstrSize) = target;
-}
-
-
-Object* RelocInfo::call_object() {
- return *call_object_address();
-}
-
-
-void RelocInfo::set_call_object(Object* target) {
- *call_object_address() = target;
-}
-
-
-Object** RelocInfo::call_object_address() {
- ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
- (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
- return reinterpret_cast<Object**>(pc_ + 2 * Assembler::kInstrSize);
-}
-
-
-bool RelocInfo::IsPatchedReturnSequence() {
- Instr current_instr = Assembler::instr_at(pc_);
- Instr next_instr = Assembler::instr_at(pc_ + Assembler::kInstrSize);
-#ifdef USE_BLX
- // A patched return sequence is:
- // ldr ip, [pc, #0]
- // blx ip
- return ((current_instr & kLdrPCMask) == kLdrPCPattern)
- && ((next_instr & kBlxRegMask) == kBlxRegPattern);
-#else
- // A patched return sequence is:
- // mov lr, pc
- // ldr pc, [pc, #-4]
- return (current_instr == kMovLrPc)
- && ((next_instr & kLdrPCMask) == kLdrPCPattern);
-#endif
-}
-
-
-bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
- Instr current_instr = Assembler::instr_at(pc_);
- return !Assembler::IsNop(current_instr, Assembler::DEBUG_BREAK_NOP);
-}
-
-
-void RelocInfo::Visit(ObjectVisitor* visitor) {
- RelocInfo::Mode mode = rmode();
- if (mode == RelocInfo::EMBEDDED_OBJECT) {
- visitor->VisitPointer(target_object_address());
- } else if (RelocInfo::IsCodeTarget(mode)) {
- visitor->VisitCodeTarget(this);
- } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
- visitor->VisitGlobalPropertyCell(this);
- } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
- visitor->VisitExternalReference(target_reference_address());
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // TODO(isolates): Get a cached isolate below.
- } else if (((RelocInfo::IsJSReturn(mode) &&
- IsPatchedReturnSequence()) ||
- (RelocInfo::IsDebugBreakSlot(mode) &&
- IsPatchedDebugBreakSlotSequence())) &&
- Isolate::Current()->debug()->has_break_points()) {
- visitor->VisitDebugTarget(this);
-#endif
- } else if (mode == RelocInfo::RUNTIME_ENTRY) {
- visitor->VisitRuntimeEntry(this);
- }
-}
-
-
-template<typename StaticVisitor>
-void RelocInfo::Visit(Heap* heap) {
- RelocInfo::Mode mode = rmode();
- if (mode == RelocInfo::EMBEDDED_OBJECT) {
- StaticVisitor::VisitPointer(heap, target_object_address());
- } else if (RelocInfo::IsCodeTarget(mode)) {
- StaticVisitor::VisitCodeTarget(heap, this);
- } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
- StaticVisitor::VisitGlobalPropertyCell(heap, this);
- } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
- StaticVisitor::VisitExternalReference(target_reference_address());
-#ifdef ENABLE_DEBUGGER_SUPPORT
- } else if (heap->isolate()->debug()->has_break_points() &&
- ((RelocInfo::IsJSReturn(mode) &&
- IsPatchedReturnSequence()) ||
- (RelocInfo::IsDebugBreakSlot(mode) &&
- IsPatchedDebugBreakSlotSequence()))) {
- StaticVisitor::VisitDebugTarget(heap, this);
-#endif
- } else if (mode == RelocInfo::RUNTIME_ENTRY) {
- StaticVisitor::VisitRuntimeEntry(this);
- }
-}
-
-
-Operand::Operand(int32_t immediate, RelocInfo::Mode rmode) {
- rm_ = no_reg;
- imm32_ = immediate;
- rmode_ = rmode;
-}
-
-
-Operand::Operand(const ExternalReference& f) {
- rm_ = no_reg;
- imm32_ = reinterpret_cast<int32_t>(f.address());
- rmode_ = RelocInfo::EXTERNAL_REFERENCE;
-}
-
-
-Operand::Operand(Smi* value) {
- rm_ = no_reg;
- imm32_ = reinterpret_cast<intptr_t>(value);
- rmode_ = RelocInfo::NONE;
-}
-
-
-Operand::Operand(Register rm) {
- rm_ = rm;
- rs_ = no_reg;
- shift_op_ = LSL;
- shift_imm_ = 0;
-}
-
-
-bool Operand::is_reg() const {
- return rm_.is_valid() &&
- rs_.is(no_reg) &&
- shift_op_ == LSL &&
- shift_imm_ == 0;
-}
-
-
-void Assembler::CheckBuffer() {
- if (buffer_space() <= kGap) {
- GrowBuffer();
- }
- if (pc_offset() >= next_buffer_check_) {
- CheckConstPool(false, true);
- }
-}
-
-
-void Assembler::emit(Instr x) {
- CheckBuffer();
- *reinterpret_cast<Instr*>(pc_) = x;
- pc_ += kInstrSize;
-}
-
-
-Address Assembler::target_address_address_at(Address pc) {
- Address target_pc = pc;
- Instr instr = Memory::int32_at(target_pc);
- // If we have a bx instruction, the instruction before the bx is
- // what we need to patch.
- static const int32_t kBxInstMask = 0x0ffffff0;
- static const int32_t kBxInstPattern = 0x012fff10;
- if ((instr & kBxInstMask) == kBxInstPattern) {
- target_pc -= kInstrSize;
- instr = Memory::int32_at(target_pc);
- }
-
-#ifdef USE_BLX
- // If we have a blx instruction, the instruction before it is
- // what needs to be patched.
- if ((instr & kBlxRegMask) == kBlxRegPattern) {
- target_pc -= kInstrSize;
- instr = Memory::int32_at(target_pc);
- }
-#endif
-
- ASSERT(IsLdrPcImmediateOffset(instr));
- int offset = instr & 0xfff; // offset_12 is unsigned
- if ((instr & (1 << 23)) == 0) offset = -offset; // U bit defines offset sign
- // Verify that the constant pool comes after the instruction referencing it.
- ASSERT(offset >= -4);
- return target_pc + offset + 8;
-}
-
-
-Address Assembler::target_address_at(Address pc) {
- return Memory::Address_at(target_address_address_at(pc));
-}
-
-
-void Assembler::set_target_at(Address constant_pool_entry,
- Address target) {
- Memory::Address_at(constant_pool_entry) = target;
-}
-
-
-void Assembler::set_target_address_at(Address pc, Address target) {
- Memory::Address_at(target_address_address_at(pc)) = target;
- // Intuitively, we would think it is necessary to flush the instruction cache
- // after patching a target address in the code as follows:
- // CPU::FlushICache(pc, sizeof(target));
- // However, on ARM, no instruction was actually patched by the assignment
- // above; the target address is not part of an instruction, it is patched in
- // the constant pool and is read via a data access; the instruction accessing
- // this address in the constant pool remains unchanged.
-}
-
-} } // namespace v8::internal
-
-#endif // V8_ARM_ASSEMBLER_ARM_INL_H_
diff --git a/src/3rdparty/v8/src/arm/assembler-arm.cc b/src/3rdparty/v8/src/arm/assembler-arm.cc
deleted file mode 100644
index 49b1975..0000000
--- a/src/3rdparty/v8/src/arm/assembler-arm.cc
+++ /dev/null
@@ -1,2795 +0,0 @@
-// Copyright (c) 1994-2006 Sun Microsystems Inc.
-// All Rights Reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-//
-// - Redistributions of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-//
-// - Redistribution in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the
-// distribution.
-//
-// - Neither the name of Sun Microsystems or the names of contributors may
-// be used to endorse or promote products derived from this software without
-// specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
-// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
-// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
-// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
-// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
-// OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// The original source code covered by the above license above has been
-// modified significantly by Google Inc.
-// Copyright 2010 the V8 project authors. All rights reserved.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_ARM)
-
-#include "arm/assembler-arm-inl.h"
-#include "serialize.h"
-
-namespace v8 {
-namespace internal {
-
-#ifdef DEBUG
-bool CpuFeatures::initialized_ = false;
-#endif
-unsigned CpuFeatures::supported_ = 0;
-unsigned CpuFeatures::found_by_runtime_probing_ = 0;
-
-
-#ifdef __arm__
-static uint64_t CpuFeaturesImpliedByCompiler() {
- uint64_t answer = 0;
-#ifdef CAN_USE_ARMV7_INSTRUCTIONS
- answer |= 1u << ARMv7;
-#endif // def CAN_USE_ARMV7_INSTRUCTIONS
- // If the compiler is allowed to use VFP then we can use VFP too in our code
- // generation even when generating snapshots. This won't work for cross
- // compilation.
-#if defined(__VFP_FP__) && !defined(__SOFTFP__)
- answer |= 1u << VFP3;
-#endif // defined(__VFP_FP__) && !defined(__SOFTFP__)
-#ifdef CAN_USE_VFP_INSTRUCTIONS
- answer |= 1u << VFP3;
-#endif // def CAN_USE_VFP_INSTRUCTIONS
- return answer;
-}
-#endif // def __arm__
-
-
-void CpuFeatures::Probe() {
- ASSERT(!initialized_);
-#ifdef DEBUG
- initialized_ = true;
-#endif
-#ifndef __arm__
- // For the simulator=arm build, use VFP when FLAG_enable_vfp3 is enabled.
- if (FLAG_enable_vfp3) {
- supported_ |= 1u << VFP3;
- }
- // For the simulator=arm build, use ARMv7 when FLAG_enable_armv7 is enabled
- if (FLAG_enable_armv7) {
- supported_ |= 1u << ARMv7;
- }
-#else // def __arm__
- if (Serializer::enabled()) {
- supported_ |= OS::CpuFeaturesImpliedByPlatform();
- supported_ |= CpuFeaturesImpliedByCompiler();
- return; // No features if we might serialize.
- }
-
- if (OS::ArmCpuHasFeature(VFP3)) {
- // This implementation also sets the VFP flags if
- // runtime detection of VFP returns true.
- supported_ |= 1u << VFP3;
- found_by_runtime_probing_ |= 1u << VFP3;
- }
-
- if (OS::ArmCpuHasFeature(ARMv7)) {
- supported_ |= 1u << ARMv7;
- found_by_runtime_probing_ |= 1u << ARMv7;
- }
-#endif
-}
-
-
-// -----------------------------------------------------------------------------
-// Implementation of RelocInfo
-
-const int RelocInfo::kApplyMask = 0;
-
-
-bool RelocInfo::IsCodedSpecially() {
- // The deserializer needs to know whether a pointer is specially coded. Being
- // specially coded on ARM means that it is a movw/movt instruction. We don't
- // generate those yet.
- return false;
-}
-
-
-
-void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
- // Patch the code at the current address with the supplied instructions.
- Instr* pc = reinterpret_cast<Instr*>(pc_);
- Instr* instr = reinterpret_cast<Instr*>(instructions);
- for (int i = 0; i < instruction_count; i++) {
- *(pc + i) = *(instr + i);
- }
-
- // Indicate that code has changed.
- CPU::FlushICache(pc_, instruction_count * Assembler::kInstrSize);
-}
-
-
-// Patch the code at the current PC with a call to the target address.
-// Additional guard instructions can be added if required.
-void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
- // Patch the code at the current address with a call to the target.
- UNIMPLEMENTED();
-}
-
-
-// -----------------------------------------------------------------------------
-// Implementation of Operand and MemOperand
-// See assembler-arm-inl.h for inlined constructors
-
-Operand::Operand(Handle<Object> handle) {
- rm_ = no_reg;
- // Verify all Objects referred by code are NOT in new space.
- Object* obj = *handle;
- ASSERT(!HEAP->InNewSpace(obj));
- if (obj->IsHeapObject()) {
- imm32_ = reinterpret_cast<intptr_t>(handle.location());
- rmode_ = RelocInfo::EMBEDDED_OBJECT;
- } else {
- // no relocation needed
- imm32_ = reinterpret_cast<intptr_t>(obj);
- rmode_ = RelocInfo::NONE;
- }
-}
-
-
-Operand::Operand(Register rm, ShiftOp shift_op, int shift_imm) {
- ASSERT(is_uint5(shift_imm));
- ASSERT(shift_op != ROR || shift_imm != 0); // use RRX if you mean it
- rm_ = rm;
- rs_ = no_reg;
- shift_op_ = shift_op;
- shift_imm_ = shift_imm & 31;
- if (shift_op == RRX) {
- // encoded as ROR with shift_imm == 0
- ASSERT(shift_imm == 0);
- shift_op_ = ROR;
- shift_imm_ = 0;
- }
-}
-
-
-Operand::Operand(Register rm, ShiftOp shift_op, Register rs) {
- ASSERT(shift_op != RRX);
- rm_ = rm;
- rs_ = no_reg;
- shift_op_ = shift_op;
- rs_ = rs;
-}
-
-
-MemOperand::MemOperand(Register rn, int32_t offset, AddrMode am) {
- rn_ = rn;
- rm_ = no_reg;
- offset_ = offset;
- am_ = am;
-}
-
-MemOperand::MemOperand(Register rn, Register rm, AddrMode am) {
- rn_ = rn;
- rm_ = rm;
- shift_op_ = LSL;
- shift_imm_ = 0;
- am_ = am;
-}
-
-
-MemOperand::MemOperand(Register rn, Register rm,
- ShiftOp shift_op, int shift_imm, AddrMode am) {
- ASSERT(is_uint5(shift_imm));
- rn_ = rn;
- rm_ = rm;
- shift_op_ = shift_op;
- shift_imm_ = shift_imm & 31;
- am_ = am;
-}
-
-
-// -----------------------------------------------------------------------------
-// Specific instructions, constants, and masks.
-
-// add(sp, sp, 4) instruction (aka Pop())
-const Instr kPopInstruction =
- al | PostIndex | 4 | LeaveCC | I | sp.code() * B16 | sp.code() * B12;
-// str(r, MemOperand(sp, 4, NegPreIndex), al) instruction (aka push(r))
-// register r is not encoded.
-const Instr kPushRegPattern =
- al | B26 | 4 | NegPreIndex | sp.code() * B16;
-// ldr(r, MemOperand(sp, 4, PostIndex), al) instruction (aka pop(r))
-// register r is not encoded.
-const Instr kPopRegPattern =
- al | B26 | L | 4 | PostIndex | sp.code() * B16;
-// mov lr, pc
-const Instr kMovLrPc = al | MOV | pc.code() | lr.code() * B12;
-// ldr rd, [pc, #offset]
-const Instr kLdrPCMask = kCondMask | 15 * B24 | 7 * B20 | 15 * B16;
-const Instr kLdrPCPattern = al | 5 * B24 | L | pc.code() * B16;
-// blxcc rm
-const Instr kBlxRegMask =
- 15 * B24 | 15 * B20 | 15 * B16 | 15 * B12 | 15 * B8 | 15 * B4;
-const Instr kBlxRegPattern =
- B24 | B21 | 15 * B16 | 15 * B12 | 15 * B8 | BLX;
-const Instr kMovMvnMask = 0x6d * B21 | 0xf * B16;
-const Instr kMovMvnPattern = 0xd * B21;
-const Instr kMovMvnFlip = B22;
-const Instr kMovLeaveCCMask = 0xdff * B16;
-const Instr kMovLeaveCCPattern = 0x1a0 * B16;
-const Instr kMovwMask = 0xff * B20;
-const Instr kMovwPattern = 0x30 * B20;
-const Instr kMovwLeaveCCFlip = 0x5 * B21;
-const Instr kCmpCmnMask = 0xdd * B20 | 0xf * B12;
-const Instr kCmpCmnPattern = 0x15 * B20;
-const Instr kCmpCmnFlip = B21;
-const Instr kAddSubFlip = 0x6 * B21;
-const Instr kAndBicFlip = 0xe * B21;
-
-// A mask for the Rd register for push, pop, ldr, str instructions.
-const Instr kLdrRegFpOffsetPattern =
- al | B26 | L | Offset | fp.code() * B16;
-const Instr kStrRegFpOffsetPattern =
- al | B26 | Offset | fp.code() * B16;
-const Instr kLdrRegFpNegOffsetPattern =
- al | B26 | L | NegOffset | fp.code() * B16;
-const Instr kStrRegFpNegOffsetPattern =
- al | B26 | NegOffset | fp.code() * B16;
-const Instr kLdrStrInstrTypeMask = 0xffff0000;
-const Instr kLdrStrInstrArgumentMask = 0x0000ffff;
-const Instr kLdrStrOffsetMask = 0x00000fff;
-
-
-// Spare buffer.
-static const int kMinimalBufferSize = 4*KB;
-
-
-Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size)
- : AssemblerBase(arg_isolate),
- positions_recorder_(this),
- allow_peephole_optimization_(false),
- emit_debug_code_(FLAG_debug_code) {
- allow_peephole_optimization_ = FLAG_peephole_optimization;
- if (buffer == NULL) {
- // Do our own buffer management.
- if (buffer_size <= kMinimalBufferSize) {
- buffer_size = kMinimalBufferSize;
-
- if (isolate()->assembler_spare_buffer() != NULL) {
- buffer = isolate()->assembler_spare_buffer();
- isolate()->set_assembler_spare_buffer(NULL);
- }
- }
- if (buffer == NULL) {
- buffer_ = NewArray<byte>(buffer_size);
- } else {
- buffer_ = static_cast<byte*>(buffer);
- }
- buffer_size_ = buffer_size;
- own_buffer_ = true;
-
- } else {
- // Use externally provided buffer instead.
- ASSERT(buffer_size > 0);
- buffer_ = static_cast<byte*>(buffer);
- buffer_size_ = buffer_size;
- own_buffer_ = false;
- }
-
- // Setup buffer pointers.
- ASSERT(buffer_ != NULL);
- pc_ = buffer_;
- reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
- num_prinfo_ = 0;
- next_buffer_check_ = 0;
- const_pool_blocked_nesting_ = 0;
- no_const_pool_before_ = 0;
- last_const_pool_end_ = 0;
- last_bound_pos_ = 0;
-}
-
-
-Assembler::~Assembler() {
- ASSERT(const_pool_blocked_nesting_ == 0);
- if (own_buffer_) {
- if (isolate()->assembler_spare_buffer() == NULL &&
- buffer_size_ == kMinimalBufferSize) {
- isolate()->set_assembler_spare_buffer(buffer_);
- } else {
- DeleteArray(buffer_);
- }
- }
-}
-
-
-void Assembler::GetCode(CodeDesc* desc) {
- // Emit constant pool if necessary.
- CheckConstPool(true, false);
- ASSERT(num_prinfo_ == 0);
-
- // Setup code descriptor.
- desc->buffer = buffer_;
- desc->buffer_size = buffer_size_;
- desc->instr_size = pc_offset();
- desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
-}
-
-
-void Assembler::Align(int m) {
- ASSERT(m >= 4 && IsPowerOf2(m));
- while ((pc_offset() & (m - 1)) != 0) {
- nop();
- }
-}
-
-
-void Assembler::CodeTargetAlign() {
- // Preferred alignment of jump targets on some ARM chips.
- Align(8);
-}
-
-
-Condition Assembler::GetCondition(Instr instr) {
- return Instruction::ConditionField(instr);
-}
-
-
-bool Assembler::IsBranch(Instr instr) {
- return (instr & (B27 | B25)) == (B27 | B25);
-}
-
-
-int Assembler::GetBranchOffset(Instr instr) {
- ASSERT(IsBranch(instr));
- // Take the jump offset in the lower 24 bits, sign extend it and multiply it
- // with 4 to get the offset in bytes.
- return ((instr & kImm24Mask) << 8) >> 6;
-}
-
-
-bool Assembler::IsLdrRegisterImmediate(Instr instr) {
- return (instr & (B27 | B26 | B25 | B22 | B20)) == (B26 | B20);
-}
-
-
-int Assembler::GetLdrRegisterImmediateOffset(Instr instr) {
- ASSERT(IsLdrRegisterImmediate(instr));
- bool positive = (instr & B23) == B23;
- int offset = instr & kOff12Mask; // Zero extended offset.
- return positive ? offset : -offset;
-}
-
-
-Instr Assembler::SetLdrRegisterImmediateOffset(Instr instr, int offset) {
- ASSERT(IsLdrRegisterImmediate(instr));
- bool positive = offset >= 0;
- if (!positive) offset = -offset;
- ASSERT(is_uint12(offset));
- // Set bit indicating whether the offset should be added.
- instr = (instr & ~B23) | (positive ? B23 : 0);
- // Set the actual offset.
- return (instr & ~kOff12Mask) | offset;
-}
-
-
-bool Assembler::IsStrRegisterImmediate(Instr instr) {
- return (instr & (B27 | B26 | B25 | B22 | B20)) == B26;
-}
-
-
-Instr Assembler::SetStrRegisterImmediateOffset(Instr instr, int offset) {
- ASSERT(IsStrRegisterImmediate(instr));
- bool positive = offset >= 0;
- if (!positive) offset = -offset;
- ASSERT(is_uint12(offset));
- // Set bit indicating whether the offset should be added.
- instr = (instr & ~B23) | (positive ? B23 : 0);
- // Set the actual offset.
- return (instr & ~kOff12Mask) | offset;
-}
-
-
-bool Assembler::IsAddRegisterImmediate(Instr instr) {
- return (instr & (B27 | B26 | B25 | B24 | B23 | B22 | B21)) == (B25 | B23);
-}
-
-
-Instr Assembler::SetAddRegisterImmediateOffset(Instr instr, int offset) {
- ASSERT(IsAddRegisterImmediate(instr));
- ASSERT(offset >= 0);
- ASSERT(is_uint12(offset));
- // Set the offset.
- return (instr & ~kOff12Mask) | offset;
-}
-
-
-Register Assembler::GetRd(Instr instr) {
- Register reg;
- reg.code_ = Instruction::RdValue(instr);
- return reg;
-}
-
-
-Register Assembler::GetRn(Instr instr) {
- Register reg;
- reg.code_ = Instruction::RnValue(instr);
- return reg;
-}
-
-
-Register Assembler::GetRm(Instr instr) {
- Register reg;
- reg.code_ = Instruction::RmValue(instr);
- return reg;
-}
-
-
-bool Assembler::IsPush(Instr instr) {
- return ((instr & ~kRdMask) == kPushRegPattern);
-}
-
-
-bool Assembler::IsPop(Instr instr) {
- return ((instr & ~kRdMask) == kPopRegPattern);
-}
-
-
-bool Assembler::IsStrRegFpOffset(Instr instr) {
- return ((instr & kLdrStrInstrTypeMask) == kStrRegFpOffsetPattern);
-}
-
-
-bool Assembler::IsLdrRegFpOffset(Instr instr) {
- return ((instr & kLdrStrInstrTypeMask) == kLdrRegFpOffsetPattern);
-}
-
-
-bool Assembler::IsStrRegFpNegOffset(Instr instr) {
- return ((instr & kLdrStrInstrTypeMask) == kStrRegFpNegOffsetPattern);
-}
-
-
-bool Assembler::IsLdrRegFpNegOffset(Instr instr) {
- return ((instr & kLdrStrInstrTypeMask) == kLdrRegFpNegOffsetPattern);
-}
-
-
-bool Assembler::IsLdrPcImmediateOffset(Instr instr) {
- // Check the instruction is indeed a
- // ldr<cond> <Rd>, [pc +/- offset_12].
- return (instr & (kLdrPCMask & ~kCondMask)) == 0x051f0000;
-}
-
-
-bool Assembler::IsTstImmediate(Instr instr) {
- return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask)) ==
- (I | TST | S);
-}
-
-
-bool Assembler::IsCmpRegister(Instr instr) {
- return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask | B4)) ==
- (CMP | S);
-}
-
-
-bool Assembler::IsCmpImmediate(Instr instr) {
- return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask)) ==
- (I | CMP | S);
-}
-
-
-Register Assembler::GetCmpImmediateRegister(Instr instr) {
- ASSERT(IsCmpImmediate(instr));
- return GetRn(instr);
-}
-
-
-int Assembler::GetCmpImmediateRawImmediate(Instr instr) {
- ASSERT(IsCmpImmediate(instr));
- return instr & kOff12Mask;
-}
-
-// Labels refer to positions in the (to be) generated code.
-// There are bound, linked, and unused labels.
-//
-// Bound labels refer to known positions in the already
-// generated code. pos() is the position the label refers to.
-//
-// Linked labels refer to unknown positions in the code
-// to be generated; pos() is the position of the last
-// instruction using the label.
-
-
-// The link chain is terminated by a negative code position (must be aligned)
-const int kEndOfChain = -4;
-
-
-int Assembler::target_at(int pos) {
- Instr instr = instr_at(pos);
- if ((instr & ~kImm24Mask) == 0) {
- // Emitted label constant, not part of a branch.
- return instr - (Code::kHeaderSize - kHeapObjectTag);
- }
- ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx imm24
- int imm26 = ((instr & kImm24Mask) << 8) >> 6;
- if ((Instruction::ConditionField(instr) == kSpecialCondition) &&
- ((instr & B24) != 0)) {
- // blx uses bit 24 to encode bit 2 of imm26
- imm26 += 2;
- }
- return pos + kPcLoadDelta + imm26;
-}
-
-
-void Assembler::target_at_put(int pos, int target_pos) {
- Instr instr = instr_at(pos);
- if ((instr & ~kImm24Mask) == 0) {
- ASSERT(target_pos == kEndOfChain || target_pos >= 0);
- // Emitted label constant, not part of a branch.
- // Make label relative to Code* of generated Code object.
- instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag));
- return;
- }
- int imm26 = target_pos - (pos + kPcLoadDelta);
- ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx imm24
- if (Instruction::ConditionField(instr) == kSpecialCondition) {
- // blx uses bit 24 to encode bit 2 of imm26
- ASSERT((imm26 & 1) == 0);
- instr = (instr & ~(B24 | kImm24Mask)) | ((imm26 & 2) >> 1)*B24;
- } else {
- ASSERT((imm26 & 3) == 0);
- instr &= ~kImm24Mask;
- }
- int imm24 = imm26 >> 2;
- ASSERT(is_int24(imm24));
- instr_at_put(pos, instr | (imm24 & kImm24Mask));
-}
-
-
-void Assembler::print(Label* L) {
- if (L->is_unused()) {
- PrintF("unused label\n");
- } else if (L->is_bound()) {
- PrintF("bound label to %d\n", L->pos());
- } else if (L->is_linked()) {
- Label l = *L;
- PrintF("unbound label");
- while (l.is_linked()) {
- PrintF("@ %d ", l.pos());
- Instr instr = instr_at(l.pos());
- if ((instr & ~kImm24Mask) == 0) {
- PrintF("value\n");
- } else {
- ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx
- Condition cond = Instruction::ConditionField(instr);
- const char* b;
- const char* c;
- if (cond == kSpecialCondition) {
- b = "blx";
- c = "";
- } else {
- if ((instr & B24) != 0)
- b = "bl";
- else
- b = "b";
-
- switch (cond) {
- case eq: c = "eq"; break;
- case ne: c = "ne"; break;
- case hs: c = "hs"; break;
- case lo: c = "lo"; break;
- case mi: c = "mi"; break;
- case pl: c = "pl"; break;
- case vs: c = "vs"; break;
- case vc: c = "vc"; break;
- case hi: c = "hi"; break;
- case ls: c = "ls"; break;
- case ge: c = "ge"; break;
- case lt: c = "lt"; break;
- case gt: c = "gt"; break;
- case le: c = "le"; break;
- case al: c = ""; break;
- default:
- c = "";
- UNREACHABLE();
- }
- }
- PrintF("%s%s\n", b, c);
- }
- next(&l);
- }
- } else {
- PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
- }
-}
-
-
-void Assembler::bind_to(Label* L, int pos) {
- ASSERT(0 <= pos && pos <= pc_offset()); // must have a valid binding position
- while (L->is_linked()) {
- int fixup_pos = L->pos();
- next(L); // call next before overwriting link with target at fixup_pos
- target_at_put(fixup_pos, pos);
- }
- L->bind_to(pos);
-
- // Keep track of the last bound label so we don't eliminate any instructions
- // before a bound label.
- if (pos > last_bound_pos_)
- last_bound_pos_ = pos;
-}
-
-
-void Assembler::link_to(Label* L, Label* appendix) {
- if (appendix->is_linked()) {
- if (L->is_linked()) {
- // Append appendix to L's list.
- int fixup_pos;
- int link = L->pos();
- do {
- fixup_pos = link;
- link = target_at(fixup_pos);
- } while (link > 0);
- ASSERT(link == kEndOfChain);
- target_at_put(fixup_pos, appendix->pos());
- } else {
- // L is empty, simply use appendix.
- *L = *appendix;
- }
- }
- appendix->Unuse(); // appendix should not be used anymore
-}
-
-
-void Assembler::bind(Label* L) {
- ASSERT(!L->is_bound()); // label can only be bound once
- bind_to(L, pc_offset());
-}
-
-
-void Assembler::next(Label* L) {
- ASSERT(L->is_linked());
- int link = target_at(L->pos());
- if (link > 0) {
- L->link_to(link);
- } else {
- ASSERT(link == kEndOfChain);
- L->Unuse();
- }
-}
-
-
-static Instr EncodeMovwImmediate(uint32_t immediate) {
- ASSERT(immediate < 0x10000);
- return ((immediate & 0xf000) << 4) | (immediate & 0xfff);
-}
-
-
-// Low-level code emission routines depending on the addressing mode.
-// If this returns true then you have to use the rotate_imm and immed_8
-// that it returns, because it may have already changed the instruction
-// to match them!
-static bool fits_shifter(uint32_t imm32,
- uint32_t* rotate_imm,
- uint32_t* immed_8,
- Instr* instr) {
- // imm32 must be unsigned.
- for (int rot = 0; rot < 16; rot++) {
- uint32_t imm8 = (imm32 << 2*rot) | (imm32 >> (32 - 2*rot));
- if ((imm8 <= 0xff)) {
- *rotate_imm = rot;
- *immed_8 = imm8;
- return true;
- }
- }
- // If the opcode is one with a complementary version and the complementary
- // immediate fits, change the opcode.
- if (instr != NULL) {
- if ((*instr & kMovMvnMask) == kMovMvnPattern) {
- if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
- *instr ^= kMovMvnFlip;
- return true;
- } else if ((*instr & kMovLeaveCCMask) == kMovLeaveCCPattern) {
- if (CpuFeatures::IsSupported(ARMv7)) {
- if (imm32 < 0x10000) {
- *instr ^= kMovwLeaveCCFlip;
- *instr |= EncodeMovwImmediate(imm32);
- *rotate_imm = *immed_8 = 0; // Not used for movw.
- return true;
- }
- }
- }
- } else if ((*instr & kCmpCmnMask) == kCmpCmnPattern) {
- if (fits_shifter(-imm32, rotate_imm, immed_8, NULL)) {
- *instr ^= kCmpCmnFlip;
- return true;
- }
- } else {
- Instr alu_insn = (*instr & kALUMask);
- if (alu_insn == ADD ||
- alu_insn == SUB) {
- if (fits_shifter(-imm32, rotate_imm, immed_8, NULL)) {
- *instr ^= kAddSubFlip;
- return true;
- }
- } else if (alu_insn == AND ||
- alu_insn == BIC) {
- if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
- *instr ^= kAndBicFlip;
- return true;
- }
- }
- }
- }
- return false;
-}
-
-
-// We have to use the temporary register for things that can be relocated even
-// if they can be encoded in the ARM's 12 bits of immediate-offset instruction
-// space. There is no guarantee that the relocated location can be similarly
-// encoded.
-bool Operand::must_use_constant_pool() const {
- if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) {
-#ifdef DEBUG
- if (!Serializer::enabled()) {
- Serializer::TooLateToEnableNow();
- }
-#endif // def DEBUG
- return Serializer::enabled();
- } else if (rmode_ == RelocInfo::NONE) {
- return false;
- }
- return true;
-}
-
-
-bool Operand::is_single_instruction(Instr instr) const {
- if (rm_.is_valid()) return true;
- uint32_t dummy1, dummy2;
- if (must_use_constant_pool() ||
- !fits_shifter(imm32_, &dummy1, &dummy2, &instr)) {
- // The immediate operand cannot be encoded as a shifter operand, or use of
- // constant pool is required. For a mov instruction not setting the
- // condition code additional instruction conventions can be used.
- if ((instr & ~kCondMask) == 13*B21) { // mov, S not set
- if (must_use_constant_pool() ||
- !CpuFeatures::IsSupported(ARMv7)) {
- // mov instruction will be an ldr from constant pool (one instruction).
- return true;
- } else {
- // mov instruction will be a mov or movw followed by movt (two
- // instructions).
- return false;
- }
- } else {
- // If this is not a mov or mvn instruction there will always an additional
- // instructions - either mov or ldr. The mov might actually be two
- // instructions mov or movw followed by movt so including the actual
- // instruction two or three instructions will be generated.
- return false;
- }
- } else {
- // No use of constant pool and the immediate operand can be encoded as a
- // shifter operand.
- return true;
- }
-}
-
-
-void Assembler::addrmod1(Instr instr,
- Register rn,
- Register rd,
- const Operand& x) {
- CheckBuffer();
- ASSERT((instr & ~(kCondMask | kOpCodeMask | S)) == 0);
- if (!x.rm_.is_valid()) {
- // Immediate.
- uint32_t rotate_imm;
- uint32_t immed_8;
- if (x.must_use_constant_pool() ||
- !fits_shifter(x.imm32_, &rotate_imm, &immed_8, &instr)) {
- // The immediate operand cannot be encoded as a shifter operand, so load
- // it first to register ip and change the original instruction to use ip.
- // However, if the original instruction is a 'mov rd, x' (not setting the
- // condition code), then replace it with a 'ldr rd, [pc]'.
- CHECK(!rn.is(ip)); // rn should never be ip, or will be trashed
- Condition cond = Instruction::ConditionField(instr);
- if ((instr & ~kCondMask) == 13*B21) { // mov, S not set
- if (x.must_use_constant_pool() ||
- !CpuFeatures::IsSupported(ARMv7)) {
- RecordRelocInfo(x.rmode_, x.imm32_);
- ldr(rd, MemOperand(pc, 0), cond);
- } else {
- // Will probably use movw, will certainly not use constant pool.
- mov(rd, Operand(x.imm32_ & 0xffff), LeaveCC, cond);
- movt(rd, static_cast<uint32_t>(x.imm32_) >> 16, cond);
- }
- } else {
- // If this is not a mov or mvn instruction we may still be able to avoid
- // a constant pool entry by using mvn or movw.
- if (!x.must_use_constant_pool() &&
- (instr & kMovMvnMask) != kMovMvnPattern) {
- mov(ip, x, LeaveCC, cond);
- } else {
- RecordRelocInfo(x.rmode_, x.imm32_);
- ldr(ip, MemOperand(pc, 0), cond);
- }
- addrmod1(instr, rn, rd, Operand(ip));
- }
- return;
- }
- instr |= I | rotate_imm*B8 | immed_8;
- } else if (!x.rs_.is_valid()) {
- // Immediate shift.
- instr |= x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
- } else {
- // Register shift.
- ASSERT(!rn.is(pc) && !rd.is(pc) && !x.rm_.is(pc) && !x.rs_.is(pc));
- instr |= x.rs_.code()*B8 | x.shift_op_ | B4 | x.rm_.code();
- }
- emit(instr | rn.code()*B16 | rd.code()*B12);
- if (rn.is(pc) || x.rm_.is(pc)) {
- // Block constant pool emission for one instruction after reading pc.
- BlockConstPoolBefore(pc_offset() + kInstrSize);
- }
-}
-
-
-void Assembler::addrmod2(Instr instr, Register rd, const MemOperand& x) {
- ASSERT((instr & ~(kCondMask | B | L)) == B26);
- int am = x.am_;
- if (!x.rm_.is_valid()) {
- // Immediate offset.
- int offset_12 = x.offset_;
- if (offset_12 < 0) {
- offset_12 = -offset_12;
- am ^= U;
- }
- if (!is_uint12(offset_12)) {
- // Immediate offset cannot be encoded, load it first to register ip
- // rn (and rd in a load) should never be ip, or will be trashed.
- ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
- mov(ip, Operand(x.offset_), LeaveCC, Instruction::ConditionField(instr));
- addrmod2(instr, rd, MemOperand(x.rn_, ip, x.am_));
- return;
- }
- ASSERT(offset_12 >= 0); // no masking needed
- instr |= offset_12;
- } else {
- // Register offset (shift_imm_ and shift_op_ are 0) or scaled
- // register offset the constructors make sure than both shift_imm_
- // and shift_op_ are initialized.
- ASSERT(!x.rm_.is(pc));
- instr |= B25 | x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
- }
- ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
- emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
-}
-
-
-void Assembler::addrmod3(Instr instr, Register rd, const MemOperand& x) {
- ASSERT((instr & ~(kCondMask | L | S6 | H)) == (B4 | B7));
- ASSERT(x.rn_.is_valid());
- int am = x.am_;
- if (!x.rm_.is_valid()) {
- // Immediate offset.
- int offset_8 = x.offset_;
- if (offset_8 < 0) {
- offset_8 = -offset_8;
- am ^= U;
- }
- if (!is_uint8(offset_8)) {
- // Immediate offset cannot be encoded, load it first to register ip
- // rn (and rd in a load) should never be ip, or will be trashed.
- ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
- mov(ip, Operand(x.offset_), LeaveCC, Instruction::ConditionField(instr));
- addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
- return;
- }
- ASSERT(offset_8 >= 0); // no masking needed
- instr |= B | (offset_8 >> 4)*B8 | (offset_8 & 0xf);
- } else if (x.shift_imm_ != 0) {
- // Scaled register offset not supported, load index first
- // rn (and rd in a load) should never be ip, or will be trashed.
- ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
- mov(ip, Operand(x.rm_, x.shift_op_, x.shift_imm_), LeaveCC,
- Instruction::ConditionField(instr));
- addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
- return;
- } else {
- // Register offset.
- ASSERT((am & (P|W)) == P || !x.rm_.is(pc)); // no pc index with writeback
- instr |= x.rm_.code();
- }
- ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
- emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
-}
-
-
-void Assembler::addrmod4(Instr instr, Register rn, RegList rl) {
- ASSERT((instr & ~(kCondMask | P | U | W | L)) == B27);
- ASSERT(rl != 0);
- ASSERT(!rn.is(pc));
- emit(instr | rn.code()*B16 | rl);
-}
-
-
-void Assembler::addrmod5(Instr instr, CRegister crd, const MemOperand& x) {
- // Unindexed addressing is not encoded by this function.
- ASSERT_EQ((B27 | B26),
- (instr & ~(kCondMask | kCoprocessorMask | P | U | N | W | L)));
- ASSERT(x.rn_.is_valid() && !x.rm_.is_valid());
- int am = x.am_;
- int offset_8 = x.offset_;
- ASSERT((offset_8 & 3) == 0); // offset must be an aligned word offset
- offset_8 >>= 2;
- if (offset_8 < 0) {
- offset_8 = -offset_8;
- am ^= U;
- }
- ASSERT(is_uint8(offset_8)); // unsigned word offset must fit in a byte
- ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
-
- // Post-indexed addressing requires W == 1; different than in addrmod2/3.
- if ((am & P) == 0)
- am |= W;
-
- ASSERT(offset_8 >= 0); // no masking needed
- emit(instr | am | x.rn_.code()*B16 | crd.code()*B12 | offset_8);
-}
-
-
-int Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
- int target_pos;
- if (L->is_bound()) {
- target_pos = L->pos();
- } else {
- if (L->is_linked()) {
- target_pos = L->pos(); // L's link
- } else {
- target_pos = kEndOfChain;
- }
- L->link_to(pc_offset());
- }
-
- // Block the emission of the constant pool, since the branch instruction must
- // be emitted at the pc offset recorded by the label.
- BlockConstPoolBefore(pc_offset() + kInstrSize);
- return target_pos - (pc_offset() + kPcLoadDelta);
-}
-
-
-void Assembler::label_at_put(Label* L, int at_offset) {
- int target_pos;
- if (L->is_bound()) {
- target_pos = L->pos();
- } else {
- if (L->is_linked()) {
- target_pos = L->pos(); // L's link
- } else {
- target_pos = kEndOfChain;
- }
- L->link_to(at_offset);
- instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
- }
-}
-
-
-// Branch instructions.
-void Assembler::b(int branch_offset, Condition cond) {
- ASSERT((branch_offset & 3) == 0);
- int imm24 = branch_offset >> 2;
- ASSERT(is_int24(imm24));
- emit(cond | B27 | B25 | (imm24 & kImm24Mask));
-
- if (cond == al) {
- // Dead code is a good location to emit the constant pool.
- CheckConstPool(false, false);
- }
-}
-
-
-void Assembler::bl(int branch_offset, Condition cond) {
- positions_recorder()->WriteRecordedPositions();
- ASSERT((branch_offset & 3) == 0);
- int imm24 = branch_offset >> 2;
- ASSERT(is_int24(imm24));
- emit(cond | B27 | B25 | B24 | (imm24 & kImm24Mask));
-}
-
-
-void Assembler::blx(int branch_offset) { // v5 and above
- positions_recorder()->WriteRecordedPositions();
- ASSERT((branch_offset & 1) == 0);
- int h = ((branch_offset & 2) >> 1)*B24;
- int imm24 = branch_offset >> 2;
- ASSERT(is_int24(imm24));
- emit(kSpecialCondition | B27 | B25 | h | (imm24 & kImm24Mask));
-}
-
-
-void Assembler::blx(Register target, Condition cond) { // v5 and above
- positions_recorder()->WriteRecordedPositions();
- ASSERT(!target.is(pc));
- emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BLX | target.code());
-}
-
-
-void Assembler::bx(Register target, Condition cond) { // v5 and above, plus v4t
- positions_recorder()->WriteRecordedPositions();
- ASSERT(!target.is(pc)); // use of pc is actually allowed, but discouraged
- emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BX | target.code());
-}
-
-
-// Data-processing instructions.
-
-void Assembler::and_(Register dst, Register src1, const Operand& src2,
- SBit s, Condition cond) {
- addrmod1(cond | AND | s, src1, dst, src2);
-}
-
-
-void Assembler::eor(Register dst, Register src1, const Operand& src2,
- SBit s, Condition cond) {
- addrmod1(cond | EOR | s, src1, dst, src2);
-}
-
-
-void Assembler::sub(Register dst, Register src1, const Operand& src2,
- SBit s, Condition cond) {
- addrmod1(cond | SUB | s, src1, dst, src2);
-}
-
-
-void Assembler::rsb(Register dst, Register src1, const Operand& src2,
- SBit s, Condition cond) {
- addrmod1(cond | RSB | s, src1, dst, src2);
-}
-
-
-void Assembler::add(Register dst, Register src1, const Operand& src2,
- SBit s, Condition cond) {
- addrmod1(cond | ADD | s, src1, dst, src2);
-
- // Eliminate pattern: push(r), pop()
- // str(src, MemOperand(sp, 4, NegPreIndex), al);
- // add(sp, sp, Operand(kPointerSize));
- // Both instructions can be eliminated.
- if (can_peephole_optimize(2) &&
- // Pattern.
- instr_at(pc_ - 1 * kInstrSize) == kPopInstruction &&
- (instr_at(pc_ - 2 * kInstrSize) & ~kRdMask) == kPushRegPattern) {
- pc_ -= 2 * kInstrSize;
- if (FLAG_print_peephole_optimization) {
- PrintF("%x push(reg)/pop() eliminated\n", pc_offset());
- }
- }
-}
-
-
-void Assembler::adc(Register dst, Register src1, const Operand& src2,
- SBit s, Condition cond) {
- addrmod1(cond | ADC | s, src1, dst, src2);
-}
-
-
-void Assembler::sbc(Register dst, Register src1, const Operand& src2,
- SBit s, Condition cond) {
- addrmod1(cond | SBC | s, src1, dst, src2);
-}
-
-
-void Assembler::rsc(Register dst, Register src1, const Operand& src2,
- SBit s, Condition cond) {
- addrmod1(cond | RSC | s, src1, dst, src2);
-}
-
-
-void Assembler::tst(Register src1, const Operand& src2, Condition cond) {
- addrmod1(cond | TST | S, src1, r0, src2);
-}
-
-
-void Assembler::teq(Register src1, const Operand& src2, Condition cond) {
- addrmod1(cond | TEQ | S, src1, r0, src2);
-}
-
-
-void Assembler::cmp(Register src1, const Operand& src2, Condition cond) {
- addrmod1(cond | CMP | S, src1, r0, src2);
-}
-
-
-void Assembler::cmp_raw_immediate(
- Register src, int raw_immediate, Condition cond) {
- ASSERT(is_uint12(raw_immediate));
- emit(cond | I | CMP | S | src.code() << 16 | raw_immediate);
-}
-
-
-void Assembler::cmn(Register src1, const Operand& src2, Condition cond) {
- addrmod1(cond | CMN | S, src1, r0, src2);
-}
-
-
-void Assembler::orr(Register dst, Register src1, const Operand& src2,
- SBit s, Condition cond) {
- addrmod1(cond | ORR | s, src1, dst, src2);
-}
-
-
-void Assembler::mov(Register dst, const Operand& src, SBit s, Condition cond) {
- if (dst.is(pc)) {
- positions_recorder()->WriteRecordedPositions();
- }
- // Don't allow nop instructions in the form mov rn, rn to be generated using
- // the mov instruction. They must be generated using nop(int/NopMarkerTypes)
- // or MarkCode(int/NopMarkerTypes) pseudo instructions.
- ASSERT(!(src.is_reg() && src.rm().is(dst) && s == LeaveCC && cond == al));
- addrmod1(cond | MOV | s, r0, dst, src);
-}
-
-
-void Assembler::movw(Register reg, uint32_t immediate, Condition cond) {
- ASSERT(immediate < 0x10000);
- mov(reg, Operand(immediate), LeaveCC, cond);
-}
-
-
-void Assembler::movt(Register reg, uint32_t immediate, Condition cond) {
- emit(cond | 0x34*B20 | reg.code()*B12 | EncodeMovwImmediate(immediate));
-}
-
-
-void Assembler::bic(Register dst, Register src1, const Operand& src2,
- SBit s, Condition cond) {
- addrmod1(cond | BIC | s, src1, dst, src2);
-}
-
-
-void Assembler::mvn(Register dst, const Operand& src, SBit s, Condition cond) {
- addrmod1(cond | MVN | s, r0, dst, src);
-}
-
-
-// Multiply instructions.
-void Assembler::mla(Register dst, Register src1, Register src2, Register srcA,
- SBit s, Condition cond) {
- ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
- emit(cond | A | s | dst.code()*B16 | srcA.code()*B12 |
- src2.code()*B8 | B7 | B4 | src1.code());
-}
-
-
-void Assembler::mul(Register dst, Register src1, Register src2,
- SBit s, Condition cond) {
- ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
- // dst goes in bits 16-19 for this instruction!
- emit(cond | s | dst.code()*B16 | src2.code()*B8 | B7 | B4 | src1.code());
-}
-
-
-void Assembler::smlal(Register dstL,
- Register dstH,
- Register src1,
- Register src2,
- SBit s,
- Condition cond) {
- ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
- ASSERT(!dstL.is(dstH));
- emit(cond | B23 | B22 | A | s | dstH.code()*B16 | dstL.code()*B12 |
- src2.code()*B8 | B7 | B4 | src1.code());
-}
-
-
-void Assembler::smull(Register dstL,
- Register dstH,
- Register src1,
- Register src2,
- SBit s,
- Condition cond) {
- ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
- ASSERT(!dstL.is(dstH));
- emit(cond | B23 | B22 | s | dstH.code()*B16 | dstL.code()*B12 |
- src2.code()*B8 | B7 | B4 | src1.code());
-}
-
-
-void Assembler::umlal(Register dstL,
- Register dstH,
- Register src1,
- Register src2,
- SBit s,
- Condition cond) {
- ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
- ASSERT(!dstL.is(dstH));
- emit(cond | B23 | A | s | dstH.code()*B16 | dstL.code()*B12 |
- src2.code()*B8 | B7 | B4 | src1.code());
-}
-
-
-void Assembler::umull(Register dstL,
- Register dstH,
- Register src1,
- Register src2,
- SBit s,
- Condition cond) {
- ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
- ASSERT(!dstL.is(dstH));
- emit(cond | B23 | s | dstH.code()*B16 | dstL.code()*B12 |
- src2.code()*B8 | B7 | B4 | src1.code());
-}
-
-
-// Miscellaneous arithmetic instructions.
-void Assembler::clz(Register dst, Register src, Condition cond) {
- // v5 and above.
- ASSERT(!dst.is(pc) && !src.is(pc));
- emit(cond | B24 | B22 | B21 | 15*B16 | dst.code()*B12 |
- 15*B8 | CLZ | src.code());
-}
-
-
-// Saturating instructions.
-
-// Unsigned saturate.
-void Assembler::usat(Register dst,
- int satpos,
- const Operand& src,
- Condition cond) {
- // v6 and above.
- ASSERT(CpuFeatures::IsSupported(ARMv7));
- ASSERT(!dst.is(pc) && !src.rm_.is(pc));
- ASSERT((satpos >= 0) && (satpos <= 31));
- ASSERT((src.shift_op_ == ASR) || (src.shift_op_ == LSL));
- ASSERT(src.rs_.is(no_reg));
-
- int sh = 0;
- if (src.shift_op_ == ASR) {
- sh = 1;
- }
-
- emit(cond | 0x6*B24 | 0xe*B20 | satpos*B16 | dst.code()*B12 |
- src.shift_imm_*B7 | sh*B6 | 0x1*B4 | src.rm_.code());
-}
-
-
-// Bitfield manipulation instructions.
-
-// Unsigned bit field extract.
-// Extracts #width adjacent bits from position #lsb in a register, and
-// writes them to the low bits of a destination register.
-// ubfx dst, src, #lsb, #width
-void Assembler::ubfx(Register dst,
- Register src,
- int lsb,
- int width,
- Condition cond) {
- // v7 and above.
- ASSERT(CpuFeatures::IsSupported(ARMv7));
- ASSERT(!dst.is(pc) && !src.is(pc));
- ASSERT((lsb >= 0) && (lsb <= 31));
- ASSERT((width >= 1) && (width <= (32 - lsb)));
- emit(cond | 0xf*B23 | B22 | B21 | (width - 1)*B16 | dst.code()*B12 |
- lsb*B7 | B6 | B4 | src.code());
-}
-
-
-// Signed bit field extract.
-// Extracts #width adjacent bits from position #lsb in a register, and
-// writes them to the low bits of a destination register. The extracted
-// value is sign extended to fill the destination register.
-// sbfx dst, src, #lsb, #width
-void Assembler::sbfx(Register dst,
- Register src,
- int lsb,
- int width,
- Condition cond) {
- // v7 and above.
- ASSERT(CpuFeatures::IsSupported(ARMv7));
- ASSERT(!dst.is(pc) && !src.is(pc));
- ASSERT((lsb >= 0) && (lsb <= 31));
- ASSERT((width >= 1) && (width <= (32 - lsb)));
- emit(cond | 0xf*B23 | B21 | (width - 1)*B16 | dst.code()*B12 |
- lsb*B7 | B6 | B4 | src.code());
-}
-
-
-// Bit field clear.
-// Sets #width adjacent bits at position #lsb in the destination register
-// to zero, preserving the value of the other bits.
-// bfc dst, #lsb, #width
-void Assembler::bfc(Register dst, int lsb, int width, Condition cond) {
- // v7 and above.
- ASSERT(CpuFeatures::IsSupported(ARMv7));
- ASSERT(!dst.is(pc));
- ASSERT((lsb >= 0) && (lsb <= 31));
- ASSERT((width >= 1) && (width <= (32 - lsb)));
- int msb = lsb + width - 1;
- emit(cond | 0x1f*B22 | msb*B16 | dst.code()*B12 | lsb*B7 | B4 | 0xf);
-}
-
-
-// Bit field insert.
-// Inserts #width adjacent bits from the low bits of the source register
-// into position #lsb of the destination register.
-// bfi dst, src, #lsb, #width
-void Assembler::bfi(Register dst,
- Register src,
- int lsb,
- int width,
- Condition cond) {
- // v7 and above.
- ASSERT(CpuFeatures::IsSupported(ARMv7));
- ASSERT(!dst.is(pc) && !src.is(pc));
- ASSERT((lsb >= 0) && (lsb <= 31));
- ASSERT((width >= 1) && (width <= (32 - lsb)));
- int msb = lsb + width - 1;
- emit(cond | 0x1f*B22 | msb*B16 | dst.code()*B12 | lsb*B7 | B4 |
- src.code());
-}
-
-
-// Status register access instructions.
-void Assembler::mrs(Register dst, SRegister s, Condition cond) {
- ASSERT(!dst.is(pc));
- emit(cond | B24 | s | 15*B16 | dst.code()*B12);
-}
-
-
-void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
- Condition cond) {
- ASSERT(fields >= B16 && fields < B20); // at least one field set
- Instr instr;
- if (!src.rm_.is_valid()) {
- // Immediate.
- uint32_t rotate_imm;
- uint32_t immed_8;
- if (src.must_use_constant_pool() ||
- !fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) {
- // Immediate operand cannot be encoded, load it first to register ip.
- RecordRelocInfo(src.rmode_, src.imm32_);
- ldr(ip, MemOperand(pc, 0), cond);
- msr(fields, Operand(ip), cond);
- return;
- }
- instr = I | rotate_imm*B8 | immed_8;
- } else {
- ASSERT(!src.rs_.is_valid() && src.shift_imm_ == 0); // only rm allowed
- instr = src.rm_.code();
- }
- emit(cond | instr | B24 | B21 | fields | 15*B12);
-}
-
-
-// Load/Store instructions.
-void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) {
- if (dst.is(pc)) {
- positions_recorder()->WriteRecordedPositions();
- }
- addrmod2(cond | B26 | L, dst, src);
-
- // Eliminate pattern: push(ry), pop(rx)
- // str(ry, MemOperand(sp, 4, NegPreIndex), al)
- // ldr(rx, MemOperand(sp, 4, PostIndex), al)
- // Both instructions can be eliminated if ry = rx.
- // If ry != rx, a register copy from ry to rx is inserted
- // after eliminating the push and the pop instructions.
- if (can_peephole_optimize(2)) {
- Instr push_instr = instr_at(pc_ - 2 * kInstrSize);
- Instr pop_instr = instr_at(pc_ - 1 * kInstrSize);
-
- if (IsPush(push_instr) && IsPop(pop_instr)) {
- if (Instruction::RdValue(pop_instr) != Instruction::RdValue(push_instr)) {
- // For consecutive push and pop on different registers,
- // we delete both the push & pop and insert a register move.
- // push ry, pop rx --> mov rx, ry
- Register reg_pushed, reg_popped;
- reg_pushed = GetRd(push_instr);
- reg_popped = GetRd(pop_instr);
- pc_ -= 2 * kInstrSize;
- // Insert a mov instruction, which is better than a pair of push & pop
- mov(reg_popped, reg_pushed);
- if (FLAG_print_peephole_optimization) {
- PrintF("%x push/pop (diff reg) replaced by a reg move\n",
- pc_offset());
- }
- } else {
- // For consecutive push and pop on the same register,
- // both the push and the pop can be deleted.
- pc_ -= 2 * kInstrSize;
- if (FLAG_print_peephole_optimization) {
- PrintF("%x push/pop (same reg) eliminated\n", pc_offset());
- }
- }
- }
- }
-
- if (can_peephole_optimize(2)) {
- Instr str_instr = instr_at(pc_ - 2 * kInstrSize);
- Instr ldr_instr = instr_at(pc_ - 1 * kInstrSize);
-
- if ((IsStrRegFpOffset(str_instr) &&
- IsLdrRegFpOffset(ldr_instr)) ||
- (IsStrRegFpNegOffset(str_instr) &&
- IsLdrRegFpNegOffset(ldr_instr))) {
- if ((ldr_instr & kLdrStrInstrArgumentMask) ==
- (str_instr & kLdrStrInstrArgumentMask)) {
- // Pattern: Ldr/str same fp+offset, same register.
- //
- // The following:
- // str rx, [fp, #-12]
- // ldr rx, [fp, #-12]
- //
- // Becomes:
- // str rx, [fp, #-12]
-
- pc_ -= 1 * kInstrSize;
- if (FLAG_print_peephole_optimization) {
- PrintF("%x str/ldr (fp + same offset), same reg\n", pc_offset());
- }
- } else if ((ldr_instr & kLdrStrOffsetMask) ==
- (str_instr & kLdrStrOffsetMask)) {
- // Pattern: Ldr/str same fp+offset, different register.
- //
- // The following:
- // str rx, [fp, #-12]
- // ldr ry, [fp, #-12]
- //
- // Becomes:
- // str rx, [fp, #-12]
- // mov ry, rx
-
- Register reg_stored, reg_loaded;
- reg_stored = GetRd(str_instr);
- reg_loaded = GetRd(ldr_instr);
- pc_ -= 1 * kInstrSize;
- // Insert a mov instruction, which is better than ldr.
- mov(reg_loaded, reg_stored);
- if (FLAG_print_peephole_optimization) {
- PrintF("%x str/ldr (fp + same offset), diff reg \n", pc_offset());
- }
- }
- }
- }
-
- if (can_peephole_optimize(3)) {
- Instr mem_write_instr = instr_at(pc_ - 3 * kInstrSize);
- Instr ldr_instr = instr_at(pc_ - 2 * kInstrSize);
- Instr mem_read_instr = instr_at(pc_ - 1 * kInstrSize);
- if (IsPush(mem_write_instr) &&
- IsPop(mem_read_instr)) {
- if ((IsLdrRegFpOffset(ldr_instr) ||
- IsLdrRegFpNegOffset(ldr_instr))) {
- if (Instruction::RdValue(mem_write_instr) ==
- Instruction::RdValue(mem_read_instr)) {
- // Pattern: push & pop from/to same register,
- // with a fp+offset ldr in between
- //
- // The following:
- // str rx, [sp, #-4]!
- // ldr rz, [fp, #-24]
- // ldr rx, [sp], #+4
- //
- // Becomes:
- // if(rx == rz)
- // delete all
- // else
- // ldr rz, [fp, #-24]
-
- if (Instruction::RdValue(mem_write_instr) ==
- Instruction::RdValue(ldr_instr)) {
- pc_ -= 3 * kInstrSize;
- } else {
- pc_ -= 3 * kInstrSize;
- // Reinsert back the ldr rz.
- emit(ldr_instr);
- }
- if (FLAG_print_peephole_optimization) {
- PrintF("%x push/pop -dead ldr fp+offset in middle\n", pc_offset());
- }
- } else {
- // Pattern: push & pop from/to different registers
- // with a fp+offset ldr in between
- //
- // The following:
- // str rx, [sp, #-4]!
- // ldr rz, [fp, #-24]
- // ldr ry, [sp], #+4
- //
- // Becomes:
- // if(ry == rz)
- // mov ry, rx;
- // else if(rx != rz)
- // ldr rz, [fp, #-24]
- // mov ry, rx
- // else if((ry != rz) || (rx == rz)) becomes:
- // mov ry, rx
- // ldr rz, [fp, #-24]
-
- Register reg_pushed, reg_popped;
- if (Instruction::RdValue(mem_read_instr) ==
- Instruction::RdValue(ldr_instr)) {
- reg_pushed = GetRd(mem_write_instr);
- reg_popped = GetRd(mem_read_instr);
- pc_ -= 3 * kInstrSize;
- mov(reg_popped, reg_pushed);
- } else if (Instruction::RdValue(mem_write_instr) !=
- Instruction::RdValue(ldr_instr)) {
- reg_pushed = GetRd(mem_write_instr);
- reg_popped = GetRd(mem_read_instr);
- pc_ -= 3 * kInstrSize;
- emit(ldr_instr);
- mov(reg_popped, reg_pushed);
- } else if ((Instruction::RdValue(mem_read_instr) !=
- Instruction::RdValue(ldr_instr)) ||
- (Instruction::RdValue(mem_write_instr) ==
- Instruction::RdValue(ldr_instr))) {
- reg_pushed = GetRd(mem_write_instr);
- reg_popped = GetRd(mem_read_instr);
- pc_ -= 3 * kInstrSize;
- mov(reg_popped, reg_pushed);
- emit(ldr_instr);
- }
- if (FLAG_print_peephole_optimization) {
- PrintF("%x push/pop (ldr fp+off in middle)\n", pc_offset());
- }
- }
- }
- }
- }
-}
-
-
-void Assembler::str(Register src, const MemOperand& dst, Condition cond) {
- addrmod2(cond | B26, src, dst);
-
- // Eliminate pattern: pop(), push(r)
- // add sp, sp, #4 LeaveCC, al; str r, [sp, #-4], al
- // -> str r, [sp, 0], al
- if (can_peephole_optimize(2) &&
- // Pattern.
- instr_at(pc_ - 1 * kInstrSize) == (kPushRegPattern | src.code() * B12) &&
- instr_at(pc_ - 2 * kInstrSize) == kPopInstruction) {
- pc_ -= 2 * kInstrSize;
- emit(al | B26 | 0 | Offset | sp.code() * B16 | src.code() * B12);
- if (FLAG_print_peephole_optimization) {
- PrintF("%x pop()/push(reg) eliminated\n", pc_offset());
- }
- }
-}
-
-
-void Assembler::ldrb(Register dst, const MemOperand& src, Condition cond) {
- addrmod2(cond | B26 | B | L, dst, src);
-}
-
-
-void Assembler::strb(Register src, const MemOperand& dst, Condition cond) {
- addrmod2(cond | B26 | B, src, dst);
-}
-
-
-void Assembler::ldrh(Register dst, const MemOperand& src, Condition cond) {
- addrmod3(cond | L | B7 | H | B4, dst, src);
-}
-
-
-void Assembler::strh(Register src, const MemOperand& dst, Condition cond) {
- addrmod3(cond | B7 | H | B4, src, dst);
-}
-
-
-void Assembler::ldrsb(Register dst, const MemOperand& src, Condition cond) {
- addrmod3(cond | L | B7 | S6 | B4, dst, src);
-}
-
-
-void Assembler::ldrsh(Register dst, const MemOperand& src, Condition cond) {
- addrmod3(cond | L | B7 | S6 | H | B4, dst, src);
-}
-
-
-void Assembler::ldrd(Register dst1, Register dst2,
- const MemOperand& src, Condition cond) {
- ASSERT(CpuFeatures::IsEnabled(ARMv7));
- ASSERT(src.rm().is(no_reg));
- ASSERT(!dst1.is(lr)); // r14.
- ASSERT_EQ(0, dst1.code() % 2);
- ASSERT_EQ(dst1.code() + 1, dst2.code());
- addrmod3(cond | B7 | B6 | B4, dst1, src);
-}
-
-
-void Assembler::strd(Register src1, Register src2,
- const MemOperand& dst, Condition cond) {
- ASSERT(dst.rm().is(no_reg));
- ASSERT(!src1.is(lr)); // r14.
- ASSERT_EQ(0, src1.code() % 2);
- ASSERT_EQ(src1.code() + 1, src2.code());
- ASSERT(CpuFeatures::IsEnabled(ARMv7));
- addrmod3(cond | B7 | B6 | B5 | B4, src1, dst);
-}
-
-// Load/Store multiple instructions.
-void Assembler::ldm(BlockAddrMode am,
- Register base,
- RegList dst,
- Condition cond) {
- // ABI stack constraint: ldmxx base, {..sp..} base != sp is not restartable.
- ASSERT(base.is(sp) || (dst & sp.bit()) == 0);
-
- addrmod4(cond | B27 | am | L, base, dst);
-
- // Emit the constant pool after a function return implemented by ldm ..{..pc}.
- if (cond == al && (dst & pc.bit()) != 0) {
- // There is a slight chance that the ldm instruction was actually a call,
- // in which case it would be wrong to return into the constant pool; we
- // recognize this case by checking if the emission of the pool was blocked
- // at the pc of the ldm instruction by a mov lr, pc instruction; if this is
- // the case, we emit a jump over the pool.
- CheckConstPool(true, no_const_pool_before_ == pc_offset() - kInstrSize);
- }
-}
-
-
-void Assembler::stm(BlockAddrMode am,
- Register base,
- RegList src,
- Condition cond) {
- addrmod4(cond | B27 | am, base, src);
-}
-
-
-// Exception-generating instructions and debugging support.
-// Stops with a non-negative code less than kNumOfWatchedStops support
-// enabling/disabling and a counter feature. See simulator-arm.h .
-void Assembler::stop(const char* msg, Condition cond, int32_t code) {
-#ifndef __arm__
- ASSERT(code >= kDefaultStopCode);
- // The Simulator will handle the stop instruction and get the message address.
- // It expects to find the address just after the svc instruction.
- BlockConstPoolFor(2);
- if (code >= 0) {
- svc(kStopCode + code, cond);
- } else {
- svc(kStopCode + kMaxStopCode, cond);
- }
- emit(reinterpret_cast<Instr>(msg));
-#else // def __arm__
-#ifdef CAN_USE_ARMV5_INSTRUCTIONS
- if (cond != al) {
- Label skip;
- b(&skip, NegateCondition(cond));
- bkpt(0);
- bind(&skip);
- } else {
- bkpt(0);
- }
-#else // ndef CAN_USE_ARMV5_INSTRUCTIONS
- svc(0x9f0001, cond);
-#endif // ndef CAN_USE_ARMV5_INSTRUCTIONS
-#endif // def __arm__
-}
-
-
-void Assembler::bkpt(uint32_t imm16) { // v5 and above
- ASSERT(is_uint16(imm16));
- emit(al | B24 | B21 | (imm16 >> 4)*B8 | BKPT | (imm16 & 0xf));
-}
-
-
-void Assembler::svc(uint32_t imm24, Condition cond) {
- ASSERT(is_uint24(imm24));
- emit(cond | 15*B24 | imm24);
-}
-
-
-// Coprocessor instructions.
-void Assembler::cdp(Coprocessor coproc,
- int opcode_1,
- CRegister crd,
- CRegister crn,
- CRegister crm,
- int opcode_2,
- Condition cond) {
- ASSERT(is_uint4(opcode_1) && is_uint3(opcode_2));
- emit(cond | B27 | B26 | B25 | (opcode_1 & 15)*B20 | crn.code()*B16 |
- crd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | crm.code());
-}
-
-
-void Assembler::cdp2(Coprocessor coproc,
- int opcode_1,
- CRegister crd,
- CRegister crn,
- CRegister crm,
- int opcode_2) { // v5 and above
- cdp(coproc, opcode_1, crd, crn, crm, opcode_2, kSpecialCondition);
-}
-
-
-void Assembler::mcr(Coprocessor coproc,
- int opcode_1,
- Register rd,
- CRegister crn,
- CRegister crm,
- int opcode_2,
- Condition cond) {
- ASSERT(is_uint3(opcode_1) && is_uint3(opcode_2));
- emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | crn.code()*B16 |
- rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
-}
-
-
-void Assembler::mcr2(Coprocessor coproc,
- int opcode_1,
- Register rd,
- CRegister crn,
- CRegister crm,
- int opcode_2) { // v5 and above
- mcr(coproc, opcode_1, rd, crn, crm, opcode_2, kSpecialCondition);
-}
-
-
-void Assembler::mrc(Coprocessor coproc,
- int opcode_1,
- Register rd,
- CRegister crn,
- CRegister crm,
- int opcode_2,
- Condition cond) {
- ASSERT(is_uint3(opcode_1) && is_uint3(opcode_2));
- emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | L | crn.code()*B16 |
- rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
-}
-
-
-void Assembler::mrc2(Coprocessor coproc,
- int opcode_1,
- Register rd,
- CRegister crn,
- CRegister crm,
- int opcode_2) { // v5 and above
- mrc(coproc, opcode_1, rd, crn, crm, opcode_2, kSpecialCondition);
-}
-
-
-void Assembler::ldc(Coprocessor coproc,
- CRegister crd,
- const MemOperand& src,
- LFlag l,
- Condition cond) {
- addrmod5(cond | B27 | B26 | l | L | coproc*B8, crd, src);
-}
-
-
-void Assembler::ldc(Coprocessor coproc,
- CRegister crd,
- Register rn,
- int option,
- LFlag l,
- Condition cond) {
- // Unindexed addressing.
- ASSERT(is_uint8(option));
- emit(cond | B27 | B26 | U | l | L | rn.code()*B16 | crd.code()*B12 |
- coproc*B8 | (option & 255));
-}
-
-
-void Assembler::ldc2(Coprocessor coproc,
- CRegister crd,
- const MemOperand& src,
- LFlag l) { // v5 and above
- ldc(coproc, crd, src, l, kSpecialCondition);
-}
-
-
-void Assembler::ldc2(Coprocessor coproc,
- CRegister crd,
- Register rn,
- int option,
- LFlag l) { // v5 and above
- ldc(coproc, crd, rn, option, l, kSpecialCondition);
-}
-
-
-void Assembler::stc(Coprocessor coproc,
- CRegister crd,
- const MemOperand& dst,
- LFlag l,
- Condition cond) {
- addrmod5(cond | B27 | B26 | l | coproc*B8, crd, dst);
-}
-
-
-void Assembler::stc(Coprocessor coproc,
- CRegister crd,
- Register rn,
- int option,
- LFlag l,
- Condition cond) {
- // Unindexed addressing.
- ASSERT(is_uint8(option));
- emit(cond | B27 | B26 | U | l | rn.code()*B16 | crd.code()*B12 |
- coproc*B8 | (option & 255));
-}
-
-
-void Assembler::stc2(Coprocessor
- coproc, CRegister crd,
- const MemOperand& dst,
- LFlag l) { // v5 and above
- stc(coproc, crd, dst, l, kSpecialCondition);
-}
-
-
-void Assembler::stc2(Coprocessor coproc,
- CRegister crd,
- Register rn,
- int option,
- LFlag l) { // v5 and above
- stc(coproc, crd, rn, option, l, kSpecialCondition);
-}
-
-
-// Support for VFP.
-
-void Assembler::vldr(const DwVfpRegister dst,
- const Register base,
- int offset,
- const Condition cond) {
- // Ddst = MEM(Rbase + offset).
- // Instruction details available in ARM DDI 0406A, A8-628.
- // cond(31-28) | 1101(27-24)| U001(23-20) | Rbase(19-16) |
- // Vdst(15-12) | 1011(11-8) | offset
- ASSERT(CpuFeatures::IsEnabled(VFP3));
- int u = 1;
- if (offset < 0) {
- offset = -offset;
- u = 0;
- }
-
- ASSERT(offset >= 0);
- if ((offset % 4) == 0 && (offset / 4) < 256) {
- emit(cond | u*B23 | 0xD1*B20 | base.code()*B16 | dst.code()*B12 |
- 0xB*B8 | ((offset / 4) & 255));
- } else {
- // Larger offsets must be handled by computing the correct address
- // in the ip register.
- ASSERT(!base.is(ip));
- if (u == 1) {
- add(ip, base, Operand(offset));
- } else {
- sub(ip, base, Operand(offset));
- }
- emit(cond | 0xD1*B20 | ip.code()*B16 | dst.code()*B12 | 0xB*B8);
- }
-}
-
-
-void Assembler::vldr(const DwVfpRegister dst,
- const MemOperand& operand,
- const Condition cond) {
- ASSERT(!operand.rm().is_valid());
- ASSERT(operand.am_ == Offset);
- vldr(dst, operand.rn(), operand.offset(), cond);
-}
-
-
-void Assembler::vldr(const SwVfpRegister dst,
- const Register base,
- int offset,
- const Condition cond) {
- // Sdst = MEM(Rbase + offset).
- // Instruction details available in ARM DDI 0406A, A8-628.
- // cond(31-28) | 1101(27-24)| U001(23-20) | Rbase(19-16) |
- // Vdst(15-12) | 1010(11-8) | offset
- ASSERT(CpuFeatures::IsEnabled(VFP3));
- int u = 1;
- if (offset < 0) {
- offset = -offset;
- u = 0;
- }
- int sd, d;
- dst.split_code(&sd, &d);
- ASSERT(offset >= 0);
-
- if ((offset % 4) == 0 && (offset / 4) < 256) {
- emit(cond | u*B23 | d*B22 | 0xD1*B20 | base.code()*B16 | sd*B12 |
- 0xA*B8 | ((offset / 4) & 255));
- } else {
- // Larger offsets must be handled by computing the correct address
- // in the ip register.
- ASSERT(!base.is(ip));
- if (u == 1) {
- add(ip, base, Operand(offset));
- } else {
- sub(ip, base, Operand(offset));
- }
- emit(cond | d*B22 | 0xD1*B20 | ip.code()*B16 | sd*B12 | 0xA*B8);
- }
-}
-
-
-void Assembler::vldr(const SwVfpRegister dst,
- const MemOperand& operand,
- const Condition cond) {
- ASSERT(!operand.rm().is_valid());
- ASSERT(operand.am_ == Offset);
- vldr(dst, operand.rn(), operand.offset(), cond);
-}
-
-
-void Assembler::vstr(const DwVfpRegister src,
- const Register base,
- int offset,
- const Condition cond) {
- // MEM(Rbase + offset) = Dsrc.
- // Instruction details available in ARM DDI 0406A, A8-786.
- // cond(31-28) | 1101(27-24)| U000(23-20) | | Rbase(19-16) |
- // Vsrc(15-12) | 1011(11-8) | (offset/4)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
- int u = 1;
- if (offset < 0) {
- offset = -offset;
- u = 0;
- }
- ASSERT(offset >= 0);
- if ((offset % 4) == 0 && (offset / 4) < 256) {
- emit(cond | u*B23 | 0xD0*B20 | base.code()*B16 | src.code()*B12 |
- 0xB*B8 | ((offset / 4) & 255));
- } else {
- // Larger offsets must be handled by computing the correct address
- // in the ip register.
- ASSERT(!base.is(ip));
- if (u == 1) {
- add(ip, base, Operand(offset));
- } else {
- sub(ip, base, Operand(offset));
- }
- emit(cond | 0xD0*B20 | ip.code()*B16 | src.code()*B12 | 0xB*B8);
- }
-}
-
-
-void Assembler::vstr(const DwVfpRegister src,
- const MemOperand& operand,
- const Condition cond) {
- ASSERT(!operand.rm().is_valid());
- ASSERT(operand.am_ == Offset);
- vstr(src, operand.rn(), operand.offset(), cond);
-}
-
-
-void Assembler::vstr(const SwVfpRegister src,
- const Register base,
- int offset,
- const Condition cond) {
- // MEM(Rbase + offset) = SSrc.
- // Instruction details available in ARM DDI 0406A, A8-786.
- // cond(31-28) | 1101(27-24)| U000(23-20) | Rbase(19-16) |
- // Vdst(15-12) | 1010(11-8) | (offset/4)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
- int u = 1;
- if (offset < 0) {
- offset = -offset;
- u = 0;
- }
- int sd, d;
- src.split_code(&sd, &d);
- ASSERT(offset >= 0);
- if ((offset % 4) == 0 && (offset / 4) < 256) {
- emit(cond | u*B23 | d*B22 | 0xD0*B20 | base.code()*B16 | sd*B12 |
- 0xA*B8 | ((offset / 4) & 255));
- } else {
- // Larger offsets must be handled by computing the correct address
- // in the ip register.
- ASSERT(!base.is(ip));
- if (u == 1) {
- add(ip, base, Operand(offset));
- } else {
- sub(ip, base, Operand(offset));
- }
- emit(cond | d*B22 | 0xD0*B20 | ip.code()*B16 | sd*B12 | 0xA*B8);
- }
-}
-
-
-void Assembler::vstr(const SwVfpRegister src,
- const MemOperand& operand,
- const Condition cond) {
- ASSERT(!operand.rm().is_valid());
- ASSERT(operand.am_ == Offset);
- vldr(src, operand.rn(), operand.offset(), cond);
-}
-
-
-static void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
- uint64_t i;
- memcpy(&i, &d, 8);
-
- *lo = i & 0xffffffff;
- *hi = i >> 32;
-}
-
-// Only works for little endian floating point formats.
-// We don't support VFP on the mixed endian floating point platform.
-static bool FitsVMOVDoubleImmediate(double d, uint32_t *encoding) {
- ASSERT(CpuFeatures::IsEnabled(VFP3));
-
- // VMOV can accept an immediate of the form:
- //
- // +/- m * 2^(-n) where 16 <= m <= 31 and 0 <= n <= 7
- //
- // The immediate is encoded using an 8-bit quantity, comprised of two
- // 4-bit fields. For an 8-bit immediate of the form:
- //
- // [abcdefgh]
- //
- // where a is the MSB and h is the LSB, an immediate 64-bit double can be
- // created of the form:
- //
- // [aBbbbbbb,bbcdefgh,00000000,00000000,
- // 00000000,00000000,00000000,00000000]
- //
- // where B = ~b.
- //
-
- uint32_t lo, hi;
- DoubleAsTwoUInt32(d, &lo, &hi);
-
- // The most obvious constraint is the long block of zeroes.
- if ((lo != 0) || ((hi & 0xffff) != 0)) {
- return false;
- }
-
- // Bits 62:55 must be all clear or all set.
- if (((hi & 0x3fc00000) != 0) && ((hi & 0x3fc00000) != 0x3fc00000)) {
- return false;
- }
-
- // Bit 63 must be NOT bit 62.
- if (((hi ^ (hi << 1)) & (0x40000000)) == 0) {
- return false;
- }
-
- // Create the encoded immediate in the form:
- // [00000000,0000abcd,00000000,0000efgh]
- *encoding = (hi >> 16) & 0xf; // Low nybble.
- *encoding |= (hi >> 4) & 0x70000; // Low three bits of the high nybble.
- *encoding |= (hi >> 12) & 0x80000; // Top bit of the high nybble.
-
- return true;
-}
-
-
-void Assembler::vmov(const DwVfpRegister dst,
- double imm,
- const Condition cond) {
- // Dd = immediate
- // Instruction details available in ARM DDI 0406B, A8-640.
- ASSERT(CpuFeatures::IsEnabled(VFP3));
-
- uint32_t enc;
- if (FitsVMOVDoubleImmediate(imm, &enc)) {
- // The double can be encoded in the instruction.
- emit(cond | 0xE*B24 | 0xB*B20 | dst.code()*B12 | 0xB*B8 | enc);
- } else {
- // Synthesise the double from ARM immediates. This could be implemented
- // using vldr from a constant pool.
- uint32_t lo, hi;
- DoubleAsTwoUInt32(imm, &lo, &hi);
-
- if (lo == hi) {
- // If the lo and hi parts of the double are equal, the literal is easier
- // to create. This is the case with 0.0.
- mov(ip, Operand(lo));
- vmov(dst, ip, ip);
- } else {
- // Move the low part of the double into the lower of the corresponsing S
- // registers of D register dst.
- mov(ip, Operand(lo));
- vmov(dst.low(), ip, cond);
-
- // Move the high part of the double into the higher of the corresponsing S
- // registers of D register dst.
- mov(ip, Operand(hi));
- vmov(dst.high(), ip, cond);
- }
- }
-}
-
-
-void Assembler::vmov(const SwVfpRegister dst,
- const SwVfpRegister src,
- const Condition cond) {
- // Sd = Sm
- // Instruction details available in ARM DDI 0406B, A8-642.
- ASSERT(CpuFeatures::IsEnabled(VFP3));
- int sd, d, sm, m;
- dst.split_code(&sd, &d);
- src.split_code(&sm, &m);
- emit(cond | 0xE*B24 | d*B22 | 0xB*B20 | sd*B12 | 0xA*B8 | B6 | m*B5 | sm);
-}
-
-
-void Assembler::vmov(const DwVfpRegister dst,
- const DwVfpRegister src,
- const Condition cond) {
- // Dd = Dm
- // Instruction details available in ARM DDI 0406B, A8-642.
- ASSERT(CpuFeatures::IsEnabled(VFP3));
- emit(cond | 0xE*B24 | 0xB*B20 |
- dst.code()*B12 | 0x5*B9 | B8 | B6 | src.code());
-}
-
-
-void Assembler::vmov(const DwVfpRegister dst,
- const Register src1,
- const Register src2,
- const Condition cond) {
- // Dm = <Rt,Rt2>.
- // Instruction details available in ARM DDI 0406A, A8-646.
- // cond(31-28) | 1100(27-24)| 010(23-21) | op=0(20) | Rt2(19-16) |
- // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
- ASSERT(CpuFeatures::IsEnabled(VFP3));
- ASSERT(!src1.is(pc) && !src2.is(pc));
- emit(cond | 0xC*B24 | B22 | src2.code()*B16 |
- src1.code()*B12 | 0xB*B8 | B4 | dst.code());
-}
-
-
-void Assembler::vmov(const Register dst1,
- const Register dst2,
- const DwVfpRegister src,
- const Condition cond) {
- // <Rt,Rt2> = Dm.
- // Instruction details available in ARM DDI 0406A, A8-646.
- // cond(31-28) | 1100(27-24)| 010(23-21) | op=1(20) | Rt2(19-16) |
- // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
- ASSERT(CpuFeatures::IsEnabled(VFP3));
- ASSERT(!dst1.is(pc) && !dst2.is(pc));
- emit(cond | 0xC*B24 | B22 | B20 | dst2.code()*B16 |
- dst1.code()*B12 | 0xB*B8 | B4 | src.code());
-}
-
-
-void Assembler::vmov(const SwVfpRegister dst,
- const Register src,
- const Condition cond) {
- // Sn = Rt.
- // Instruction details available in ARM DDI 0406A, A8-642.
- // cond(31-28) | 1110(27-24)| 000(23-21) | op=0(20) | Vn(19-16) |
- // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
- ASSERT(!src.is(pc));
- int sn, n;
- dst.split_code(&sn, &n);
- emit(cond | 0xE*B24 | sn*B16 | src.code()*B12 | 0xA*B8 | n*B7 | B4);
-}
-
-
-void Assembler::vmov(const Register dst,
- const SwVfpRegister src,
- const Condition cond) {
- // Rt = Sn.
- // Instruction details available in ARM DDI 0406A, A8-642.
- // cond(31-28) | 1110(27-24)| 000(23-21) | op=1(20) | Vn(19-16) |
- // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
- ASSERT(!dst.is(pc));
- int sn, n;
- src.split_code(&sn, &n);
- emit(cond | 0xE*B24 | B20 | sn*B16 | dst.code()*B12 | 0xA*B8 | n*B7 | B4);
-}
-
-
-// Type of data to read from or write to VFP register.
-// Used as specifier in generic vcvt instruction.
-enum VFPType { S32, U32, F32, F64 };
-
-
-static bool IsSignedVFPType(VFPType type) {
- switch (type) {
- case S32:
- return true;
- case U32:
- return false;
- default:
- UNREACHABLE();
- return false;
- }
-}
-
-
-static bool IsIntegerVFPType(VFPType type) {
- switch (type) {
- case S32:
- case U32:
- return true;
- case F32:
- case F64:
- return false;
- default:
- UNREACHABLE();
- return false;
- }
-}
-
-
-static bool IsDoubleVFPType(VFPType type) {
- switch (type) {
- case F32:
- return false;
- case F64:
- return true;
- default:
- UNREACHABLE();
- return false;
- }
-}
-
-
-// Split five bit reg_code based on size of reg_type.
-// 32-bit register codes are Vm:M
-// 64-bit register codes are M:Vm
-// where Vm is four bits, and M is a single bit.
-static void SplitRegCode(VFPType reg_type,
- int reg_code,
- int* vm,
- int* m) {
- ASSERT((reg_code >= 0) && (reg_code <= 31));
- if (IsIntegerVFPType(reg_type) || !IsDoubleVFPType(reg_type)) {
- // 32 bit type.
- *m = reg_code & 0x1;
- *vm = reg_code >> 1;
- } else {
- // 64 bit type.
- *m = (reg_code & 0x10) >> 4;
- *vm = reg_code & 0x0F;
- }
-}
-
-
-// Encode vcvt.src_type.dst_type instruction.
-static Instr EncodeVCVT(const VFPType dst_type,
- const int dst_code,
- const VFPType src_type,
- const int src_code,
- VFPConversionMode mode,
- const Condition cond) {
- ASSERT(src_type != dst_type);
- int D, Vd, M, Vm;
- SplitRegCode(src_type, src_code, &Vm, &M);
- SplitRegCode(dst_type, dst_code, &Vd, &D);
-
- if (IsIntegerVFPType(dst_type) || IsIntegerVFPType(src_type)) {
- // Conversion between IEEE floating point and 32-bit integer.
- // Instruction details available in ARM DDI 0406B, A8.6.295.
- // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 1(19) | opc2(18-16) |
- // Vd(15-12) | 101(11-9) | sz(8) | op(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
- ASSERT(!IsIntegerVFPType(dst_type) || !IsIntegerVFPType(src_type));
-
- int sz, opc2, op;
-
- if (IsIntegerVFPType(dst_type)) {
- opc2 = IsSignedVFPType(dst_type) ? 0x5 : 0x4;
- sz = IsDoubleVFPType(src_type) ? 0x1 : 0x0;
- op = mode;
- } else {
- ASSERT(IsIntegerVFPType(src_type));
- opc2 = 0x0;
- sz = IsDoubleVFPType(dst_type) ? 0x1 : 0x0;
- op = IsSignedVFPType(src_type) ? 0x1 : 0x0;
- }
-
- return (cond | 0xE*B24 | B23 | D*B22 | 0x3*B20 | B19 | opc2*B16 |
- Vd*B12 | 0x5*B9 | sz*B8 | op*B7 | B6 | M*B5 | Vm);
- } else {
- // Conversion between IEEE double and single precision.
- // Instruction details available in ARM DDI 0406B, A8.6.298.
- // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0111(19-16) |
- // Vd(15-12) | 101(11-9) | sz(8) | 1(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
- int sz = IsDoubleVFPType(src_type) ? 0x1 : 0x0;
- return (cond | 0xE*B24 | B23 | D*B22 | 0x3*B20 | 0x7*B16 |
- Vd*B12 | 0x5*B9 | sz*B8 | B7 | B6 | M*B5 | Vm);
- }
-}
-
-
-void Assembler::vcvt_f64_s32(const DwVfpRegister dst,
- const SwVfpRegister src,
- VFPConversionMode mode,
- const Condition cond) {
- ASSERT(CpuFeatures::IsEnabled(VFP3));
- emit(EncodeVCVT(F64, dst.code(), S32, src.code(), mode, cond));
-}
-
-
-void Assembler::vcvt_f32_s32(const SwVfpRegister dst,
- const SwVfpRegister src,
- VFPConversionMode mode,
- const Condition cond) {
- ASSERT(CpuFeatures::IsEnabled(VFP3));
- emit(EncodeVCVT(F32, dst.code(), S32, src.code(), mode, cond));
-}
-
-
-void Assembler::vcvt_f64_u32(const DwVfpRegister dst,
- const SwVfpRegister src,
- VFPConversionMode mode,
- const Condition cond) {
- ASSERT(CpuFeatures::IsEnabled(VFP3));
- emit(EncodeVCVT(F64, dst.code(), U32, src.code(), mode, cond));
-}
-
-
-void Assembler::vcvt_s32_f64(const SwVfpRegister dst,
- const DwVfpRegister src,
- VFPConversionMode mode,
- const Condition cond) {
- ASSERT(CpuFeatures::IsEnabled(VFP3));
- emit(EncodeVCVT(S32, dst.code(), F64, src.code(), mode, cond));
-}
-
-
-void Assembler::vcvt_u32_f64(const SwVfpRegister dst,
- const DwVfpRegister src,
- VFPConversionMode mode,
- const Condition cond) {
- ASSERT(CpuFeatures::IsEnabled(VFP3));
- emit(EncodeVCVT(U32, dst.code(), F64, src.code(), mode, cond));
-}
-
-
-void Assembler::vcvt_f64_f32(const DwVfpRegister dst,
- const SwVfpRegister src,
- VFPConversionMode mode,
- const Condition cond) {
- ASSERT(CpuFeatures::IsEnabled(VFP3));
- emit(EncodeVCVT(F64, dst.code(), F32, src.code(), mode, cond));
-}
-
-
-void Assembler::vcvt_f32_f64(const SwVfpRegister dst,
- const DwVfpRegister src,
- VFPConversionMode mode,
- const Condition cond) {
- ASSERT(CpuFeatures::IsEnabled(VFP3));
- emit(EncodeVCVT(F32, dst.code(), F64, src.code(), mode, cond));
-}
-
-
-void Assembler::vneg(const DwVfpRegister dst,
- const DwVfpRegister src,
- const Condition cond) {
- emit(cond | 0xE*B24 | 0xB*B20 | B16 | dst.code()*B12 |
- 0x5*B9 | B8 | B6 | src.code());
-}
-
-
-void Assembler::vabs(const DwVfpRegister dst,
- const DwVfpRegister src,
- const Condition cond) {
- emit(cond | 0xE*B24 | 0xB*B20 | dst.code()*B12 |
- 0x5*B9 | B8 | 0x3*B6 | src.code());
-}
-
-
-void Assembler::vadd(const DwVfpRegister dst,
- const DwVfpRegister src1,
- const DwVfpRegister src2,
- const Condition cond) {
- // Dd = vadd(Dn, Dm) double precision floating point addition.
- // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
- // Instruction details available in ARM DDI 0406A, A8-536.
- // cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) |
- // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 0(6) | M=?(5) | 0(4) | Vm(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
- emit(cond | 0xE*B24 | 0x3*B20 | src1.code()*B16 |
- dst.code()*B12 | 0x5*B9 | B8 | src2.code());
-}
-
-
-void Assembler::vsub(const DwVfpRegister dst,
- const DwVfpRegister src1,
- const DwVfpRegister src2,
- const Condition cond) {
- // Dd = vsub(Dn, Dm) double precision floating point subtraction.
- // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
- // Instruction details available in ARM DDI 0406A, A8-784.
- // cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) |
- // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 1(6) | M=?(5) | 0(4) | Vm(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
- emit(cond | 0xE*B24 | 0x3*B20 | src1.code()*B16 |
- dst.code()*B12 | 0x5*B9 | B8 | B6 | src2.code());
-}
-
-
-void Assembler::vmul(const DwVfpRegister dst,
- const DwVfpRegister src1,
- const DwVfpRegister src2,
- const Condition cond) {
- // Dd = vmul(Dn, Dm) double precision floating point multiplication.
- // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
- // Instruction details available in ARM DDI 0406A, A8-784.
- // cond(31-28) | 11100(27-23)| D=?(22) | 10(21-20) | Vn(19-16) |
- // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 0(6) | M=?(5) | 0(4) | Vm(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
- emit(cond | 0xE*B24 | 0x2*B20 | src1.code()*B16 |
- dst.code()*B12 | 0x5*B9 | B8 | src2.code());
-}
-
-
-void Assembler::vdiv(const DwVfpRegister dst,
- const DwVfpRegister src1,
- const DwVfpRegister src2,
- const Condition cond) {
- // Dd = vdiv(Dn, Dm) double precision floating point division.
- // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
- // Instruction details available in ARM DDI 0406A, A8-584.
- // cond(31-28) | 11101(27-23)| D=?(22) | 00(21-20) | Vn(19-16) |
- // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=? | 0(6) | M=?(5) | 0(4) | Vm(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
- emit(cond | 0xE*B24 | B23 | src1.code()*B16 |
- dst.code()*B12 | 0x5*B9 | B8 | src2.code());
-}
-
-
-void Assembler::vcmp(const DwVfpRegister src1,
- const DwVfpRegister src2,
- const Condition cond) {
- // vcmp(Dd, Dm) double precision floating point comparison.
- // Instruction details available in ARM DDI 0406A, A8-570.
- // cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0100 (19-16) |
- // Vd(15-12) | 101(11-9) | sz(8)=1 | E(7)=0 | 1(6) | M(5)=? | 0(4) | Vm(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
- emit(cond | 0xE*B24 |B23 | 0x3*B20 | B18 |
- src1.code()*B12 | 0x5*B9 | B8 | B6 | src2.code());
-}
-
-
-void Assembler::vcmp(const DwVfpRegister src1,
- const double src2,
- const Condition cond) {
- // vcmp(Dd, Dm) double precision floating point comparison.
- // Instruction details available in ARM DDI 0406A, A8-570.
- // cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0101 (19-16) |
- // Vd(15-12) | 101(11-9) | sz(8)=1 | E(7)=0 | 1(6) | M(5)=? | 0(4) | 0000(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
- ASSERT(src2 == 0.0);
- emit(cond | 0xE*B24 |B23 | 0x3*B20 | B18 | B16 |
- src1.code()*B12 | 0x5*B9 | B8 | B6);
-}
-
-
-void Assembler::vmsr(Register dst, Condition cond) {
- // Instruction details available in ARM DDI 0406A, A8-652.
- // cond(31-28) | 1110 (27-24) | 1110(23-20)| 0001 (19-16) |
- // Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
- emit(cond | 0xE*B24 | 0xE*B20 | B16 |
- dst.code()*B12 | 0xA*B8 | B4);
-}
-
-
-void Assembler::vmrs(Register dst, Condition cond) {
- // Instruction details available in ARM DDI 0406A, A8-652.
- // cond(31-28) | 1110 (27-24) | 1111(23-20)| 0001 (19-16) |
- // Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
- emit(cond | 0xE*B24 | 0xF*B20 | B16 |
- dst.code()*B12 | 0xA*B8 | B4);
-}
-
-
-void Assembler::vsqrt(const DwVfpRegister dst,
- const DwVfpRegister src,
- const Condition cond) {
- // cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0001 (19-16) |
- // Vd(15-12) | 101(11-9) | sz(8)=1 | 11 (7-6) | M(5)=? | 0(4) | Vm(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
- emit(cond | 0xE*B24 | B23 | 0x3*B20 | B16 |
- dst.code()*B12 | 0x5*B9 | B8 | 3*B6 | src.code());
-}
-
-
-// Pseudo instructions.
-void Assembler::nop(int type) {
- // This is mov rx, rx.
- ASSERT(0 <= type && type <= 14); // mov pc, pc is not a nop.
- emit(al | 13*B21 | type*B12 | type);
-}
-
-
-bool Assembler::IsNop(Instr instr, int type) {
- // Check for mov rx, rx where x = type.
- ASSERT(0 <= type && type <= 14); // mov pc, pc is not a nop.
- return instr == (al | 13*B21 | type*B12 | type);
-}
-
-
-bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) {
- uint32_t dummy1;
- uint32_t dummy2;
- return fits_shifter(imm32, &dummy1, &dummy2, NULL);
-}
-
-
-void Assembler::BlockConstPoolFor(int instructions) {
- BlockConstPoolBefore(pc_offset() + instructions * kInstrSize);
-}
-
-
-// Debugging.
-void Assembler::RecordJSReturn() {
- positions_recorder()->WriteRecordedPositions();
- CheckBuffer();
- RecordRelocInfo(RelocInfo::JS_RETURN);
-}
-
-
-void Assembler::RecordDebugBreakSlot() {
- positions_recorder()->WriteRecordedPositions();
- CheckBuffer();
- RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT);
-}
-
-
-void Assembler::RecordComment(const char* msg) {
- if (FLAG_code_comments) {
- CheckBuffer();
- RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
- }
-}
-
-
-void Assembler::GrowBuffer() {
- if (!own_buffer_) FATAL("external code buffer is too small");
-
- // Compute new buffer size.
- CodeDesc desc; // the new buffer
- if (buffer_size_ < 4*KB) {
- desc.buffer_size = 4*KB;
- } else if (buffer_size_ < 1*MB) {
- desc.buffer_size = 2*buffer_size_;
- } else {
- desc.buffer_size = buffer_size_ + 1*MB;
- }
- CHECK_GT(desc.buffer_size, 0); // no overflow
-
- // Setup new buffer.
- desc.buffer = NewArray<byte>(desc.buffer_size);
-
- desc.instr_size = pc_offset();
- desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
-
- // Copy the data.
- int pc_delta = desc.buffer - buffer_;
- int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
- memmove(desc.buffer, buffer_, desc.instr_size);
- memmove(reloc_info_writer.pos() + rc_delta,
- reloc_info_writer.pos(), desc.reloc_size);
-
- // Switch buffers.
- DeleteArray(buffer_);
- buffer_ = desc.buffer;
- buffer_size_ = desc.buffer_size;
- pc_ += pc_delta;
- reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
- reloc_info_writer.last_pc() + pc_delta);
-
- // None of our relocation types are pc relative pointing outside the code
- // buffer nor pc absolute pointing inside the code buffer, so there is no need
- // to relocate any emitted relocation entries.
-
- // Relocate pending relocation entries.
- for (int i = 0; i < num_prinfo_; i++) {
- RelocInfo& rinfo = prinfo_[i];
- ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
- rinfo.rmode() != RelocInfo::POSITION);
- if (rinfo.rmode() != RelocInfo::JS_RETURN) {
- rinfo.set_pc(rinfo.pc() + pc_delta);
- }
- }
-}
-
-
-void Assembler::db(uint8_t data) {
- // No relocation info should be pending while using db. db is used
- // to write pure data with no pointers and the constant pool should
- // be emitted before using db.
- ASSERT(num_prinfo_ == 0);
- CheckBuffer();
- *reinterpret_cast<uint8_t*>(pc_) = data;
- pc_ += sizeof(uint8_t);
-}
-
-
-void Assembler::dd(uint32_t data) {
- // No relocation info should be pending while using dd. dd is used
- // to write pure data with no pointers and the constant pool should
- // be emitted before using dd.
- ASSERT(num_prinfo_ == 0);
- CheckBuffer();
- *reinterpret_cast<uint32_t*>(pc_) = data;
- pc_ += sizeof(uint32_t);
-}
-
-
-void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
- RelocInfo rinfo(pc_, rmode, data); // we do not try to reuse pool constants
- if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::DEBUG_BREAK_SLOT) {
- // Adjust code for new modes.
- ASSERT(RelocInfo::IsDebugBreakSlot(rmode)
- || RelocInfo::IsJSReturn(rmode)
- || RelocInfo::IsComment(rmode)
- || RelocInfo::IsPosition(rmode));
- // These modes do not need an entry in the constant pool.
- } else {
- ASSERT(num_prinfo_ < kMaxNumPRInfo);
- prinfo_[num_prinfo_++] = rinfo;
- // Make sure the constant pool is not emitted in place of the next
- // instruction for which we just recorded relocation info.
- BlockConstPoolBefore(pc_offset() + kInstrSize);
- }
- if (rinfo.rmode() != RelocInfo::NONE) {
- // Don't record external references unless the heap will be serialized.
- if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
-#ifdef DEBUG
- if (!Serializer::enabled()) {
- Serializer::TooLateToEnableNow();
- }
-#endif
- if (!Serializer::enabled() && !emit_debug_code()) {
- return;
- }
- }
- ASSERT(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
- reloc_info_writer.Write(&rinfo);
- }
-}
-
-
-void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
- // Calculate the offset of the next check. It will be overwritten
- // when a const pool is generated or when const pools are being
- // blocked for a specific range.
- next_buffer_check_ = pc_offset() + kCheckConstInterval;
-
- // There is nothing to do if there are no pending relocation info entries.
- if (num_prinfo_ == 0) return;
-
- // We emit a constant pool at regular intervals of about kDistBetweenPools
- // or when requested by parameter force_emit (e.g. after each function).
- // We prefer not to emit a jump unless the max distance is reached or if we
- // are running low on slots, which can happen if a lot of constants are being
- // emitted (e.g. --debug-code and many static references).
- int dist = pc_offset() - last_const_pool_end_;
- if (!force_emit && dist < kMaxDistBetweenPools &&
- (require_jump || dist < kDistBetweenPools) &&
- // TODO(1236125): Cleanup the "magic" number below. We know that
- // the code generation will test every kCheckConstIntervalInst.
- // Thus we are safe as long as we generate less than 7 constant
- // entries per instruction.
- (num_prinfo_ < (kMaxNumPRInfo - (7 * kCheckConstIntervalInst)))) {
- return;
- }
-
- // If we did not return by now, we need to emit the constant pool soon.
-
- // However, some small sequences of instructions must not be broken up by the
- // insertion of a constant pool; such sequences are protected by setting
- // either const_pool_blocked_nesting_ or no_const_pool_before_, which are
- // both checked here. Also, recursive calls to CheckConstPool are blocked by
- // no_const_pool_before_.
- if (const_pool_blocked_nesting_ > 0 || pc_offset() < no_const_pool_before_) {
- // Emission is currently blocked; make sure we try again as soon as
- // possible.
- if (const_pool_blocked_nesting_ > 0) {
- next_buffer_check_ = pc_offset() + kInstrSize;
- } else {
- next_buffer_check_ = no_const_pool_before_;
- }
-
- // Something is wrong if emission is forced and blocked at the same time.
- ASSERT(!force_emit);
- return;
- }
-
- int jump_instr = require_jump ? kInstrSize : 0;
-
- // Check that the code buffer is large enough before emitting the constant
- // pool and relocation information (include the jump over the pool and the
- // constant pool marker).
- int max_needed_space =
- jump_instr + kInstrSize + num_prinfo_*(kInstrSize + kMaxRelocSize);
- while (buffer_space() <= (max_needed_space + kGap)) GrowBuffer();
-
- // Block recursive calls to CheckConstPool.
- BlockConstPoolBefore(pc_offset() + jump_instr + kInstrSize +
- num_prinfo_*kInstrSize);
- // Don't bother to check for the emit calls below.
- next_buffer_check_ = no_const_pool_before_;
-
- // Emit jump over constant pool if necessary.
- Label after_pool;
- if (require_jump) b(&after_pool);
-
- RecordComment("[ Constant Pool");
-
- // Put down constant pool marker "Undefined instruction" as specified by
- // A5.6 (ARMv7) Instruction set encoding.
- emit(kConstantPoolMarker | num_prinfo_);
-
- // Emit constant pool entries.
- for (int i = 0; i < num_prinfo_; i++) {
- RelocInfo& rinfo = prinfo_[i];
- ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
- rinfo.rmode() != RelocInfo::POSITION &&
- rinfo.rmode() != RelocInfo::STATEMENT_POSITION);
- Instr instr = instr_at(rinfo.pc());
-
- // Instruction to patch must be a ldr/str [pc, #offset].
- // P and U set, B and W clear, Rn == pc, offset12 still 0.
- ASSERT((instr & (7*B25 | P | U | B | W | 15*B16 | kOff12Mask)) ==
- (2*B25 | P | U | pc.code()*B16));
- int delta = pc_ - rinfo.pc() - 8;
- ASSERT(delta >= -4); // instr could be ldr pc, [pc, #-4] followed by targ32
- if (delta < 0) {
- instr &= ~U;
- delta = -delta;
- }
- ASSERT(is_uint12(delta));
- instr_at_put(rinfo.pc(), instr + delta);
- emit(rinfo.data());
- }
- num_prinfo_ = 0;
- last_const_pool_end_ = pc_offset();
-
- RecordComment("]");
-
- if (after_pool.is_linked()) {
- bind(&after_pool);
- }
-
- // Since a constant pool was just emitted, move the check offset forward by
- // the standard interval.
- next_buffer_check_ = pc_offset() + kCheckConstInterval;
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_ARM
diff --git a/src/3rdparty/v8/src/arm/assembler-arm.h b/src/3rdparty/v8/src/arm/assembler-arm.h
deleted file mode 100644
index c9f8cfe..0000000
--- a/src/3rdparty/v8/src/arm/assembler-arm.h
+++ /dev/null
@@ -1,1358 +0,0 @@
-// Copyright (c) 1994-2006 Sun Microsystems Inc.
-// All Rights Reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-//
-// - Redistributions of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-//
-// - Redistribution in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the
-// distribution.
-//
-// - Neither the name of Sun Microsystems or the names of contributors may
-// be used to endorse or promote products derived from this software without
-// specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
-// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
-// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
-// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
-// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
-// OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// The original source code covered by the above license above has been
-// modified significantly by Google Inc.
-// Copyright 2010 the V8 project authors. All rights reserved.
-
-// A light-weight ARM Assembler
-// Generates user mode instructions for the ARM architecture up to version 5
-
-#ifndef V8_ARM_ASSEMBLER_ARM_H_
-#define V8_ARM_ASSEMBLER_ARM_H_
-#include <stdio.h>
-#include "assembler.h"
-#include "constants-arm.h"
-#include "serialize.h"
-
-namespace v8 {
-namespace internal {
-
-// CPU Registers.
-//
-// 1) We would prefer to use an enum, but enum values are assignment-
-// compatible with int, which has caused code-generation bugs.
-//
-// 2) We would prefer to use a class instead of a struct but we don't like
-// the register initialization to depend on the particular initialization
-// order (which appears to be different on OS X, Linux, and Windows for the
-// installed versions of C++ we tried). Using a struct permits C-style
-// "initialization". Also, the Register objects cannot be const as this
-// forces initialization stubs in MSVC, making us dependent on initialization
-// order.
-//
-// 3) By not using an enum, we are possibly preventing the compiler from
-// doing certain constant folds, which may significantly reduce the
-// code generated for some assembly instructions (because they boil down
-// to a few constants). If this is a problem, we could change the code
-// such that we use an enum in optimized mode, and the struct in debug
-// mode. This way we get the compile-time error checking in debug mode
-// and best performance in optimized code.
-
-// Core register
-struct Register {
- static const int kNumRegisters = 16;
- static const int kNumAllocatableRegisters = 8;
-
- static int ToAllocationIndex(Register reg) {
- ASSERT(reg.code() < kNumAllocatableRegisters);
- return reg.code();
- }
-
- static Register FromAllocationIndex(int index) {
- ASSERT(index >= 0 && index < kNumAllocatableRegisters);
- return from_code(index);
- }
-
- static const char* AllocationIndexToString(int index) {
- ASSERT(index >= 0 && index < kNumAllocatableRegisters);
- const char* const names[] = {
- "r0",
- "r1",
- "r2",
- "r3",
- "r4",
- "r5",
- "r6",
- "r7",
- };
- return names[index];
- }
-
- static Register from_code(int code) {
- Register r = { code };
- return r;
- }
-
- bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
- bool is(Register reg) const { return code_ == reg.code_; }
- int code() const {
- ASSERT(is_valid());
- return code_;
- }
- int bit() const {
- ASSERT(is_valid());
- return 1 << code_;
- }
-
- void set_code(int code) {
- code_ = code;
- ASSERT(is_valid());
- }
-
- // Unfortunately we can't make this private in a struct.
- int code_;
-};
-
-const Register no_reg = { -1 };
-
-const Register r0 = { 0 };
-const Register r1 = { 1 };
-const Register r2 = { 2 };
-const Register r3 = { 3 };
-const Register r4 = { 4 };
-const Register r5 = { 5 };
-const Register r6 = { 6 };
-const Register r7 = { 7 };
-const Register r8 = { 8 }; // Used as context register.
-const Register r9 = { 9 }; // Used as lithium codegen scratch register.
-const Register r10 = { 10 }; // Used as roots register.
-const Register fp = { 11 };
-const Register ip = { 12 };
-const Register sp = { 13 };
-const Register lr = { 14 };
-const Register pc = { 15 };
-
-// Single word VFP register.
-struct SwVfpRegister {
- bool is_valid() const { return 0 <= code_ && code_ < 32; }
- bool is(SwVfpRegister reg) const { return code_ == reg.code_; }
- int code() const {
- ASSERT(is_valid());
- return code_;
- }
- int bit() const {
- ASSERT(is_valid());
- return 1 << code_;
- }
- void split_code(int* vm, int* m) const {
- ASSERT(is_valid());
- *m = code_ & 0x1;
- *vm = code_ >> 1;
- }
-
- int code_;
-};
-
-
-// Double word VFP register.
-struct DwVfpRegister {
- // d0 has been excluded from allocation. This is following ia32
- // where xmm0 is excluded. This should be revisited.
- // Currently d0 is used as a scratch register.
- // d1 has also been excluded from allocation to be used as a scratch
- // register as well.
- static const int kNumRegisters = 16;
- static const int kNumAllocatableRegisters = 15;
-
- static int ToAllocationIndex(DwVfpRegister reg) {
- ASSERT(reg.code() != 0);
- return reg.code() - 1;
- }
-
- static DwVfpRegister FromAllocationIndex(int index) {
- ASSERT(index >= 0 && index < kNumAllocatableRegisters);
- return from_code(index + 1);
- }
-
- static const char* AllocationIndexToString(int index) {
- ASSERT(index >= 0 && index < kNumAllocatableRegisters);
- const char* const names[] = {
- "d1",
- "d2",
- "d3",
- "d4",
- "d5",
- "d6",
- "d7",
- "d8",
- "d9",
- "d10",
- "d11",
- "d12",
- "d13",
- "d14",
- "d15"
- };
- return names[index];
- }
-
- static DwVfpRegister from_code(int code) {
- DwVfpRegister r = { code };
- return r;
- }
-
- // Supporting d0 to d15, can be later extended to d31.
- bool is_valid() const { return 0 <= code_ && code_ < 16; }
- bool is(DwVfpRegister reg) const { return code_ == reg.code_; }
- SwVfpRegister low() const {
- SwVfpRegister reg;
- reg.code_ = code_ * 2;
-
- ASSERT(reg.is_valid());
- return reg;
- }
- SwVfpRegister high() const {
- SwVfpRegister reg;
- reg.code_ = (code_ * 2) + 1;
-
- ASSERT(reg.is_valid());
- return reg;
- }
- int code() const {
- ASSERT(is_valid());
- return code_;
- }
- int bit() const {
- ASSERT(is_valid());
- return 1 << code_;
- }
- void split_code(int* vm, int* m) const {
- ASSERT(is_valid());
- *m = (code_ & 0x10) >> 4;
- *vm = code_ & 0x0F;
- }
-
- int code_;
-};
-
-
-typedef DwVfpRegister DoubleRegister;
-
-
-// Support for the VFP registers s0 to s31 (d0 to d15).
-// Note that "s(N):s(N+1)" is the same as "d(N/2)".
-const SwVfpRegister s0 = { 0 };
-const SwVfpRegister s1 = { 1 };
-const SwVfpRegister s2 = { 2 };
-const SwVfpRegister s3 = { 3 };
-const SwVfpRegister s4 = { 4 };
-const SwVfpRegister s5 = { 5 };
-const SwVfpRegister s6 = { 6 };
-const SwVfpRegister s7 = { 7 };
-const SwVfpRegister s8 = { 8 };
-const SwVfpRegister s9 = { 9 };
-const SwVfpRegister s10 = { 10 };
-const SwVfpRegister s11 = { 11 };
-const SwVfpRegister s12 = { 12 };
-const SwVfpRegister s13 = { 13 };
-const SwVfpRegister s14 = { 14 };
-const SwVfpRegister s15 = { 15 };
-const SwVfpRegister s16 = { 16 };
-const SwVfpRegister s17 = { 17 };
-const SwVfpRegister s18 = { 18 };
-const SwVfpRegister s19 = { 19 };
-const SwVfpRegister s20 = { 20 };
-const SwVfpRegister s21 = { 21 };
-const SwVfpRegister s22 = { 22 };
-const SwVfpRegister s23 = { 23 };
-const SwVfpRegister s24 = { 24 };
-const SwVfpRegister s25 = { 25 };
-const SwVfpRegister s26 = { 26 };
-const SwVfpRegister s27 = { 27 };
-const SwVfpRegister s28 = { 28 };
-const SwVfpRegister s29 = { 29 };
-const SwVfpRegister s30 = { 30 };
-const SwVfpRegister s31 = { 31 };
-
-const DwVfpRegister no_dreg = { -1 };
-const DwVfpRegister d0 = { 0 };
-const DwVfpRegister d1 = { 1 };
-const DwVfpRegister d2 = { 2 };
-const DwVfpRegister d3 = { 3 };
-const DwVfpRegister d4 = { 4 };
-const DwVfpRegister d5 = { 5 };
-const DwVfpRegister d6 = { 6 };
-const DwVfpRegister d7 = { 7 };
-const DwVfpRegister d8 = { 8 };
-const DwVfpRegister d9 = { 9 };
-const DwVfpRegister d10 = { 10 };
-const DwVfpRegister d11 = { 11 };
-const DwVfpRegister d12 = { 12 };
-const DwVfpRegister d13 = { 13 };
-const DwVfpRegister d14 = { 14 };
-const DwVfpRegister d15 = { 15 };
-
-
-// Coprocessor register
-struct CRegister {
- bool is_valid() const { return 0 <= code_ && code_ < 16; }
- bool is(CRegister creg) const { return code_ == creg.code_; }
- int code() const {
- ASSERT(is_valid());
- return code_;
- }
- int bit() const {
- ASSERT(is_valid());
- return 1 << code_;
- }
-
- // Unfortunately we can't make this private in a struct.
- int code_;
-};
-
-
-const CRegister no_creg = { -1 };
-
-const CRegister cr0 = { 0 };
-const CRegister cr1 = { 1 };
-const CRegister cr2 = { 2 };
-const CRegister cr3 = { 3 };
-const CRegister cr4 = { 4 };
-const CRegister cr5 = { 5 };
-const CRegister cr6 = { 6 };
-const CRegister cr7 = { 7 };
-const CRegister cr8 = { 8 };
-const CRegister cr9 = { 9 };
-const CRegister cr10 = { 10 };
-const CRegister cr11 = { 11 };
-const CRegister cr12 = { 12 };
-const CRegister cr13 = { 13 };
-const CRegister cr14 = { 14 };
-const CRegister cr15 = { 15 };
-
-
-// Coprocessor number
-enum Coprocessor {
- p0 = 0,
- p1 = 1,
- p2 = 2,
- p3 = 3,
- p4 = 4,
- p5 = 5,
- p6 = 6,
- p7 = 7,
- p8 = 8,
- p9 = 9,
- p10 = 10,
- p11 = 11,
- p12 = 12,
- p13 = 13,
- p14 = 14,
- p15 = 15
-};
-
-
-// -----------------------------------------------------------------------------
-// Machine instruction Operands
-
-// Class Operand represents a shifter operand in data processing instructions
-class Operand BASE_EMBEDDED {
- public:
- // immediate
- INLINE(explicit Operand(int32_t immediate,
- RelocInfo::Mode rmode = RelocInfo::NONE));
- INLINE(explicit Operand(const ExternalReference& f));
- INLINE(explicit Operand(const char* s));
- explicit Operand(Handle<Object> handle);
- INLINE(explicit Operand(Smi* value));
-
- // rm
- INLINE(explicit Operand(Register rm));
-
- // rm <shift_op> shift_imm
- explicit Operand(Register rm, ShiftOp shift_op, int shift_imm);
-
- // rm <shift_op> rs
- explicit Operand(Register rm, ShiftOp shift_op, Register rs);
-
- // Return true if this is a register operand.
- INLINE(bool is_reg() const);
-
- // Return true if this operand fits in one instruction so that no
- // 2-instruction solution with a load into the ip register is necessary. If
- // the instruction this operand is used for is a MOV or MVN instruction the
- // actual instruction to use is required for this calculation. For other
- // instructions instr is ignored.
- bool is_single_instruction(Instr instr = 0) const;
- bool must_use_constant_pool() const;
-
- inline int32_t immediate() const {
- ASSERT(!rm_.is_valid());
- return imm32_;
- }
-
- Register rm() const { return rm_; }
- Register rs() const { return rs_; }
- ShiftOp shift_op() const { return shift_op_; }
-
- private:
- Register rm_;
- Register rs_;
- ShiftOp shift_op_;
- int shift_imm_; // valid if rm_ != no_reg && rs_ == no_reg
- int32_t imm32_; // valid if rm_ == no_reg
- RelocInfo::Mode rmode_;
-
- friend class Assembler;
-};
-
-
-// Class MemOperand represents a memory operand in load and store instructions
-class MemOperand BASE_EMBEDDED {
- public:
- // [rn +/- offset] Offset/NegOffset
- // [rn +/- offset]! PreIndex/NegPreIndex
- // [rn], +/- offset PostIndex/NegPostIndex
- // offset is any signed 32-bit value; offset is first loaded to register ip if
- // it does not fit the addressing mode (12-bit unsigned and sign bit)
- explicit MemOperand(Register rn, int32_t offset = 0, AddrMode am = Offset);
-
- // [rn +/- rm] Offset/NegOffset
- // [rn +/- rm]! PreIndex/NegPreIndex
- // [rn], +/- rm PostIndex/NegPostIndex
- explicit MemOperand(Register rn, Register rm, AddrMode am = Offset);
-
- // [rn +/- rm <shift_op> shift_imm] Offset/NegOffset
- // [rn +/- rm <shift_op> shift_imm]! PreIndex/NegPreIndex
- // [rn], +/- rm <shift_op> shift_imm PostIndex/NegPostIndex
- explicit MemOperand(Register rn, Register rm,
- ShiftOp shift_op, int shift_imm, AddrMode am = Offset);
-
- void set_offset(int32_t offset) {
- ASSERT(rm_.is(no_reg));
- offset_ = offset;
- }
-
- uint32_t offset() const {
- ASSERT(rm_.is(no_reg));
- return offset_;
- }
-
- Register rn() const { return rn_; }
- Register rm() const { return rm_; }
-
- bool OffsetIsUint12Encodable() const {
- return offset_ >= 0 ? is_uint12(offset_) : is_uint12(-offset_);
- }
-
- private:
- Register rn_; // base
- Register rm_; // register offset
- int32_t offset_; // valid if rm_ == no_reg
- ShiftOp shift_op_;
- int shift_imm_; // valid if rm_ != no_reg && rs_ == no_reg
- AddrMode am_; // bits P, U, and W
-
- friend class Assembler;
-};
-
-// CpuFeatures keeps track of which features are supported by the target CPU.
-// Supported features must be enabled by a Scope before use.
-class CpuFeatures : public AllStatic {
- public:
- // Detect features of the target CPU. Set safe defaults if the serializer
- // is enabled (snapshots must be portable).
- static void Probe();
-
- // Check whether a feature is supported by the target CPU.
- static bool IsSupported(CpuFeature f) {
- ASSERT(initialized_);
- if (f == VFP3 && !FLAG_enable_vfp3) return false;
- return (supported_ & (1u << f)) != 0;
- }
-
-#ifdef DEBUG
- // Check whether a feature is currently enabled.
- static bool IsEnabled(CpuFeature f) {
- ASSERT(initialized_);
- Isolate* isolate = Isolate::UncheckedCurrent();
- if (isolate == NULL) {
- // When no isolate is available, work as if we're running in
- // release mode.
- return IsSupported(f);
- }
- unsigned enabled = static_cast<unsigned>(isolate->enabled_cpu_features());
- return (enabled & (1u << f)) != 0;
- }
-#endif
-
- // Enable a specified feature within a scope.
- class Scope BASE_EMBEDDED {
-#ifdef DEBUG
- public:
- explicit Scope(CpuFeature f) {
- unsigned mask = 1u << f;
- ASSERT(CpuFeatures::IsSupported(f));
- ASSERT(!Serializer::enabled() ||
- (CpuFeatures::found_by_runtime_probing_ & mask) == 0);
- isolate_ = Isolate::UncheckedCurrent();
- old_enabled_ = 0;
- if (isolate_ != NULL) {
- old_enabled_ = static_cast<unsigned>(isolate_->enabled_cpu_features());
- isolate_->set_enabled_cpu_features(old_enabled_ | mask);
- }
- }
- ~Scope() {
- ASSERT_EQ(Isolate::UncheckedCurrent(), isolate_);
- if (isolate_ != NULL) {
- isolate_->set_enabled_cpu_features(old_enabled_);
- }
- }
- private:
- Isolate* isolate_;
- unsigned old_enabled_;
-#else
- public:
- explicit Scope(CpuFeature f) {}
-#endif
- };
-
- class TryForceFeatureScope BASE_EMBEDDED {
- public:
- explicit TryForceFeatureScope(CpuFeature f)
- : old_supported_(CpuFeatures::supported_) {
- if (CanForce()) {
- CpuFeatures::supported_ |= (1u << f);
- }
- }
-
- ~TryForceFeatureScope() {
- if (CanForce()) {
- CpuFeatures::supported_ = old_supported_;
- }
- }
-
- private:
- static bool CanForce() {
- // It's only safe to temporarily force support of CPU features
- // when there's only a single isolate, which is guaranteed when
- // the serializer is enabled.
- return Serializer::enabled();
- }
-
- const unsigned old_supported_;
- };
-
- private:
-#ifdef DEBUG
- static bool initialized_;
-#endif
- static unsigned supported_;
- static unsigned found_by_runtime_probing_;
-
- DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
-};
-
-
-extern const Instr kMovLrPc;
-extern const Instr kLdrPCMask;
-extern const Instr kLdrPCPattern;
-extern const Instr kBlxRegMask;
-extern const Instr kBlxRegPattern;
-
-extern const Instr kMovMvnMask;
-extern const Instr kMovMvnPattern;
-extern const Instr kMovMvnFlip;
-
-extern const Instr kMovLeaveCCMask;
-extern const Instr kMovLeaveCCPattern;
-extern const Instr kMovwMask;
-extern const Instr kMovwPattern;
-extern const Instr kMovwLeaveCCFlip;
-
-extern const Instr kCmpCmnMask;
-extern const Instr kCmpCmnPattern;
-extern const Instr kCmpCmnFlip;
-extern const Instr kAddSubFlip;
-extern const Instr kAndBicFlip;
-
-
-
-class Assembler : public AssemblerBase {
- public:
- // Create an assembler. Instructions and relocation information are emitted
- // into a buffer, with the instructions starting from the beginning and the
- // relocation information starting from the end of the buffer. See CodeDesc
- // for a detailed comment on the layout (globals.h).
- //
- // If the provided buffer is NULL, the assembler allocates and grows its own
- // buffer, and buffer_size determines the initial buffer size. The buffer is
- // owned by the assembler and deallocated upon destruction of the assembler.
- //
- // If the provided buffer is not NULL, the assembler uses the provided buffer
- // for code generation and assumes its size to be buffer_size. If the buffer
- // is too small, a fatal error occurs. No deallocation of the buffer is done
- // upon destruction of the assembler.
- Assembler(Isolate* isolate, void* buffer, int buffer_size);
- ~Assembler();
-
- // Overrides the default provided by FLAG_debug_code.
- void set_emit_debug_code(bool value) { emit_debug_code_ = value; }
-
- // GetCode emits any pending (non-emitted) code and fills the descriptor
- // desc. GetCode() is idempotent; it returns the same result if no other
- // Assembler functions are invoked in between GetCode() calls.
- void GetCode(CodeDesc* desc);
-
- // Label operations & relative jumps (PPUM Appendix D)
- //
- // Takes a branch opcode (cc) and a label (L) and generates
- // either a backward branch or a forward branch and links it
- // to the label fixup chain. Usage:
- //
- // Label L; // unbound label
- // j(cc, &L); // forward branch to unbound label
- // bind(&L); // bind label to the current pc
- // j(cc, &L); // backward branch to bound label
- // bind(&L); // illegal: a label may be bound only once
- //
- // Note: The same Label can be used for forward and backward branches
- // but it may be bound only once.
-
- void bind(Label* L); // binds an unbound label L to the current code position
-
- // Returns the branch offset to the given label from the current code position
- // Links the label to the current position if it is still unbound
- // Manages the jump elimination optimization if the second parameter is true.
- int branch_offset(Label* L, bool jump_elimination_allowed);
-
- // Puts a labels target address at the given position.
- // The high 8 bits are set to zero.
- void label_at_put(Label* L, int at_offset);
-
- // Return the address in the constant pool of the code target address used by
- // the branch/call instruction at pc.
- INLINE(static Address target_address_address_at(Address pc));
-
- // Read/Modify the code target address in the branch/call instruction at pc.
- INLINE(static Address target_address_at(Address pc));
- INLINE(static void set_target_address_at(Address pc, Address target));
-
- // This sets the branch destination (which is in the constant pool on ARM).
- // This is for calls and branches within generated code.
- inline static void set_target_at(Address constant_pool_entry, Address target);
-
- // This sets the branch destination (which is in the constant pool on ARM).
- // This is for calls and branches to runtime code.
- inline static void set_external_target_at(Address constant_pool_entry,
- Address target) {
- set_target_at(constant_pool_entry, target);
- }
-
- // Here we are patching the address in the constant pool, not the actual call
- // instruction. The address in the constant pool is the same size as a
- // pointer.
- static const int kCallTargetSize = kPointerSize;
- static const int kExternalTargetSize = kPointerSize;
-
- // Size of an instruction.
- static const int kInstrSize = sizeof(Instr);
-
- // Distance between the instruction referring to the address of the call
- // target and the return address.
-#ifdef USE_BLX
- // Call sequence is:
- // ldr ip, [pc, #...] @ call address
- // blx ip
- // @ return address
- static const int kCallTargetAddressOffset = 2 * kInstrSize;
-#else
- // Call sequence is:
- // mov lr, pc
- // ldr pc, [pc, #...] @ call address
- // @ return address
- static const int kCallTargetAddressOffset = kInstrSize;
-#endif
-
- // Distance between start of patched return sequence and the emitted address
- // to jump to.
-#ifdef USE_BLX
- // Patched return sequence is:
- // ldr ip, [pc, #0] @ emited address and start
- // blx ip
- static const int kPatchReturnSequenceAddressOffset = 0 * kInstrSize;
-#else
- // Patched return sequence is:
- // mov lr, pc @ start of sequence
- // ldr pc, [pc, #-4] @ emited address
- static const int kPatchReturnSequenceAddressOffset = kInstrSize;
-#endif
-
- // Distance between start of patched debug break slot and the emitted address
- // to jump to.
-#ifdef USE_BLX
- // Patched debug break slot code is:
- // ldr ip, [pc, #0] @ emited address and start
- // blx ip
- static const int kPatchDebugBreakSlotAddressOffset = 0 * kInstrSize;
-#else
- // Patched debug break slot code is:
- // mov lr, pc @ start of sequence
- // ldr pc, [pc, #-4] @ emited address
- static const int kPatchDebugBreakSlotAddressOffset = kInstrSize;
-#endif
-
- // Difference between address of current opcode and value read from pc
- // register.
- static const int kPcLoadDelta = 8;
-
- static const int kJSReturnSequenceInstructions = 4;
- static const int kDebugBreakSlotInstructions = 3;
- static const int kDebugBreakSlotLength =
- kDebugBreakSlotInstructions * kInstrSize;
-
- // ---------------------------------------------------------------------------
- // Code generation
-
- // Insert the smallest number of nop instructions
- // possible to align the pc offset to a multiple
- // of m. m must be a power of 2 (>= 4).
- void Align(int m);
- // Aligns code to something that's optimal for a jump target for the platform.
- void CodeTargetAlign();
-
- // Branch instructions
- void b(int branch_offset, Condition cond = al);
- void bl(int branch_offset, Condition cond = al);
- void blx(int branch_offset); // v5 and above
- void blx(Register target, Condition cond = al); // v5 and above
- void bx(Register target, Condition cond = al); // v5 and above, plus v4t
-
- // Convenience branch instructions using labels
- void b(Label* L, Condition cond = al) {
- b(branch_offset(L, cond == al), cond);
- }
- void b(Condition cond, Label* L) { b(branch_offset(L, cond == al), cond); }
- void bl(Label* L, Condition cond = al) { bl(branch_offset(L, false), cond); }
- void bl(Condition cond, Label* L) { bl(branch_offset(L, false), cond); }
- void blx(Label* L) { blx(branch_offset(L, false)); } // v5 and above
-
- // Data-processing instructions
-
- void and_(Register dst, Register src1, const Operand& src2,
- SBit s = LeaveCC, Condition cond = al);
-
- void eor(Register dst, Register src1, const Operand& src2,
- SBit s = LeaveCC, Condition cond = al);
-
- void sub(Register dst, Register src1, const Operand& src2,
- SBit s = LeaveCC, Condition cond = al);
- void sub(Register dst, Register src1, Register src2,
- SBit s = LeaveCC, Condition cond = al) {
- sub(dst, src1, Operand(src2), s, cond);
- }
-
- void rsb(Register dst, Register src1, const Operand& src2,
- SBit s = LeaveCC, Condition cond = al);
-
- void add(Register dst, Register src1, const Operand& src2,
- SBit s = LeaveCC, Condition cond = al);
- void add(Register dst, Register src1, Register src2,
- SBit s = LeaveCC, Condition cond = al) {
- add(dst, src1, Operand(src2), s, cond);
- }
-
- void adc(Register dst, Register src1, const Operand& src2,
- SBit s = LeaveCC, Condition cond = al);
-
- void sbc(Register dst, Register src1, const Operand& src2,
- SBit s = LeaveCC, Condition cond = al);
-
- void rsc(Register dst, Register src1, const Operand& src2,
- SBit s = LeaveCC, Condition cond = al);
-
- void tst(Register src1, const Operand& src2, Condition cond = al);
- void tst(Register src1, Register src2, Condition cond = al) {
- tst(src1, Operand(src2), cond);
- }
-
- void teq(Register src1, const Operand& src2, Condition cond = al);
-
- void cmp(Register src1, const Operand& src2, Condition cond = al);
- void cmp(Register src1, Register src2, Condition cond = al) {
- cmp(src1, Operand(src2), cond);
- }
- void cmp_raw_immediate(Register src1, int raw_immediate, Condition cond = al);
-
- void cmn(Register src1, const Operand& src2, Condition cond = al);
-
- void orr(Register dst, Register src1, const Operand& src2,
- SBit s = LeaveCC, Condition cond = al);
- void orr(Register dst, Register src1, Register src2,
- SBit s = LeaveCC, Condition cond = al) {
- orr(dst, src1, Operand(src2), s, cond);
- }
-
- void mov(Register dst, const Operand& src,
- SBit s = LeaveCC, Condition cond = al);
- void mov(Register dst, Register src, SBit s = LeaveCC, Condition cond = al) {
- mov(dst, Operand(src), s, cond);
- }
-
- // ARMv7 instructions for loading a 32 bit immediate in two instructions.
- // This may actually emit a different mov instruction, but on an ARMv7 it
- // is guaranteed to only emit one instruction.
- void movw(Register reg, uint32_t immediate, Condition cond = al);
- // The constant for movt should be in the range 0-0xffff.
- void movt(Register reg, uint32_t immediate, Condition cond = al);
-
- void bic(Register dst, Register src1, const Operand& src2,
- SBit s = LeaveCC, Condition cond = al);
-
- void mvn(Register dst, const Operand& src,
- SBit s = LeaveCC, Condition cond = al);
-
- // Multiply instructions
-
- void mla(Register dst, Register src1, Register src2, Register srcA,
- SBit s = LeaveCC, Condition cond = al);
-
- void mul(Register dst, Register src1, Register src2,
- SBit s = LeaveCC, Condition cond = al);
-
- void smlal(Register dstL, Register dstH, Register src1, Register src2,
- SBit s = LeaveCC, Condition cond = al);
-
- void smull(Register dstL, Register dstH, Register src1, Register src2,
- SBit s = LeaveCC, Condition cond = al);
-
- void umlal(Register dstL, Register dstH, Register src1, Register src2,
- SBit s = LeaveCC, Condition cond = al);
-
- void umull(Register dstL, Register dstH, Register src1, Register src2,
- SBit s = LeaveCC, Condition cond = al);
-
- // Miscellaneous arithmetic instructions
-
- void clz(Register dst, Register src, Condition cond = al); // v5 and above
-
- // Saturating instructions. v6 and above.
-
- // Unsigned saturate.
- //
- // Saturate an optionally shifted signed value to an unsigned range.
- //
- // usat dst, #satpos, src
- // usat dst, #satpos, src, lsl #sh
- // usat dst, #satpos, src, asr #sh
- //
- // Register dst will contain:
- //
- // 0, if s < 0
- // (1 << satpos) - 1, if s > ((1 << satpos) - 1)
- // s, otherwise
- //
- // where s is the contents of src after shifting (if used.)
- void usat(Register dst, int satpos, const Operand& src, Condition cond = al);
-
- // Bitfield manipulation instructions. v7 and above.
-
- void ubfx(Register dst, Register src, int lsb, int width,
- Condition cond = al);
-
- void sbfx(Register dst, Register src, int lsb, int width,
- Condition cond = al);
-
- void bfc(Register dst, int lsb, int width, Condition cond = al);
-
- void bfi(Register dst, Register src, int lsb, int width,
- Condition cond = al);
-
- // Status register access instructions
-
- void mrs(Register dst, SRegister s, Condition cond = al);
- void msr(SRegisterFieldMask fields, const Operand& src, Condition cond = al);
-
- // Load/Store instructions
- void ldr(Register dst, const MemOperand& src, Condition cond = al);
- void str(Register src, const MemOperand& dst, Condition cond = al);
- void ldrb(Register dst, const MemOperand& src, Condition cond = al);
- void strb(Register src, const MemOperand& dst, Condition cond = al);
- void ldrh(Register dst, const MemOperand& src, Condition cond = al);
- void strh(Register src, const MemOperand& dst, Condition cond = al);
- void ldrsb(Register dst, const MemOperand& src, Condition cond = al);
- void ldrsh(Register dst, const MemOperand& src, Condition cond = al);
- void ldrd(Register dst1,
- Register dst2,
- const MemOperand& src, Condition cond = al);
- void strd(Register src1,
- Register src2,
- const MemOperand& dst, Condition cond = al);
-
- // Load/Store multiple instructions
- void ldm(BlockAddrMode am, Register base, RegList dst, Condition cond = al);
- void stm(BlockAddrMode am, Register base, RegList src, Condition cond = al);
-
- // Exception-generating instructions and debugging support
- void stop(const char* msg,
- Condition cond = al,
- int32_t code = kDefaultStopCode);
-
- void bkpt(uint32_t imm16); // v5 and above
- void svc(uint32_t imm24, Condition cond = al);
-
- // Coprocessor instructions
-
- void cdp(Coprocessor coproc, int opcode_1,
- CRegister crd, CRegister crn, CRegister crm,
- int opcode_2, Condition cond = al);
-
- void cdp2(Coprocessor coproc, int opcode_1,
- CRegister crd, CRegister crn, CRegister crm,
- int opcode_2); // v5 and above
-
- void mcr(Coprocessor coproc, int opcode_1,
- Register rd, CRegister crn, CRegister crm,
- int opcode_2 = 0, Condition cond = al);
-
- void mcr2(Coprocessor coproc, int opcode_1,
- Register rd, CRegister crn, CRegister crm,
- int opcode_2 = 0); // v5 and above
-
- void mrc(Coprocessor coproc, int opcode_1,
- Register rd, CRegister crn, CRegister crm,
- int opcode_2 = 0, Condition cond = al);
-
- void mrc2(Coprocessor coproc, int opcode_1,
- Register rd, CRegister crn, CRegister crm,
- int opcode_2 = 0); // v5 and above
-
- void ldc(Coprocessor coproc, CRegister crd, const MemOperand& src,
- LFlag l = Short, Condition cond = al);
- void ldc(Coprocessor coproc, CRegister crd, Register base, int option,
- LFlag l = Short, Condition cond = al);
-
- void ldc2(Coprocessor coproc, CRegister crd, const MemOperand& src,
- LFlag l = Short); // v5 and above
- void ldc2(Coprocessor coproc, CRegister crd, Register base, int option,
- LFlag l = Short); // v5 and above
-
- void stc(Coprocessor coproc, CRegister crd, const MemOperand& dst,
- LFlag l = Short, Condition cond = al);
- void stc(Coprocessor coproc, CRegister crd, Register base, int option,
- LFlag l = Short, Condition cond = al);
-
- void stc2(Coprocessor coproc, CRegister crd, const MemOperand& dst,
- LFlag l = Short); // v5 and above
- void stc2(Coprocessor coproc, CRegister crd, Register base, int option,
- LFlag l = Short); // v5 and above
-
- // Support for VFP.
- // All these APIs support S0 to S31 and D0 to D15.
- // Currently these APIs do not support extended D registers, i.e, D16 to D31.
- // However, some simple modifications can allow
- // these APIs to support D16 to D31.
-
- void vldr(const DwVfpRegister dst,
- const Register base,
- int offset,
- const Condition cond = al);
- void vldr(const DwVfpRegister dst,
- const MemOperand& src,
- const Condition cond = al);
-
- void vldr(const SwVfpRegister dst,
- const Register base,
- int offset,
- const Condition cond = al);
- void vldr(const SwVfpRegister dst,
- const MemOperand& src,
- const Condition cond = al);
-
- void vstr(const DwVfpRegister src,
- const Register base,
- int offset,
- const Condition cond = al);
- void vstr(const DwVfpRegister src,
- const MemOperand& dst,
- const Condition cond = al);
-
- void vstr(const SwVfpRegister src,
- const Register base,
- int offset,
- const Condition cond = al);
- void vstr(const SwVfpRegister src,
- const MemOperand& dst,
- const Condition cond = al);
-
- void vmov(const DwVfpRegister dst,
- double imm,
- const Condition cond = al);
- void vmov(const SwVfpRegister dst,
- const SwVfpRegister src,
- const Condition cond = al);
- void vmov(const DwVfpRegister dst,
- const DwVfpRegister src,
- const Condition cond = al);
- void vmov(const DwVfpRegister dst,
- const Register src1,
- const Register src2,
- const Condition cond = al);
- void vmov(const Register dst1,
- const Register dst2,
- const DwVfpRegister src,
- const Condition cond = al);
- void vmov(const SwVfpRegister dst,
- const Register src,
- const Condition cond = al);
- void vmov(const Register dst,
- const SwVfpRegister src,
- const Condition cond = al);
- void vcvt_f64_s32(const DwVfpRegister dst,
- const SwVfpRegister src,
- VFPConversionMode mode = kDefaultRoundToZero,
- const Condition cond = al);
- void vcvt_f32_s32(const SwVfpRegister dst,
- const SwVfpRegister src,
- VFPConversionMode mode = kDefaultRoundToZero,
- const Condition cond = al);
- void vcvt_f64_u32(const DwVfpRegister dst,
- const SwVfpRegister src,
- VFPConversionMode mode = kDefaultRoundToZero,
- const Condition cond = al);
- void vcvt_s32_f64(const SwVfpRegister dst,
- const DwVfpRegister src,
- VFPConversionMode mode = kDefaultRoundToZero,
- const Condition cond = al);
- void vcvt_u32_f64(const SwVfpRegister dst,
- const DwVfpRegister src,
- VFPConversionMode mode = kDefaultRoundToZero,
- const Condition cond = al);
- void vcvt_f64_f32(const DwVfpRegister dst,
- const SwVfpRegister src,
- VFPConversionMode mode = kDefaultRoundToZero,
- const Condition cond = al);
- void vcvt_f32_f64(const SwVfpRegister dst,
- const DwVfpRegister src,
- VFPConversionMode mode = kDefaultRoundToZero,
- const Condition cond = al);
-
- void vneg(const DwVfpRegister dst,
- const DwVfpRegister src,
- const Condition cond = al);
- void vabs(const DwVfpRegister dst,
- const DwVfpRegister src,
- const Condition cond = al);
- void vadd(const DwVfpRegister dst,
- const DwVfpRegister src1,
- const DwVfpRegister src2,
- const Condition cond = al);
- void vsub(const DwVfpRegister dst,
- const DwVfpRegister src1,
- const DwVfpRegister src2,
- const Condition cond = al);
- void vmul(const DwVfpRegister dst,
- const DwVfpRegister src1,
- const DwVfpRegister src2,
- const Condition cond = al);
- void vdiv(const DwVfpRegister dst,
- const DwVfpRegister src1,
- const DwVfpRegister src2,
- const Condition cond = al);
- void vcmp(const DwVfpRegister src1,
- const DwVfpRegister src2,
- const Condition cond = al);
- void vcmp(const DwVfpRegister src1,
- const double src2,
- const Condition cond = al);
- void vmrs(const Register dst,
- const Condition cond = al);
- void vmsr(const Register dst,
- const Condition cond = al);
- void vsqrt(const DwVfpRegister dst,
- const DwVfpRegister src,
- const Condition cond = al);
-
- // Pseudo instructions
-
- // Different nop operations are used by the code generator to detect certain
- // states of the generated code.
- enum NopMarkerTypes {
- NON_MARKING_NOP = 0,
- DEBUG_BREAK_NOP,
- // IC markers.
- PROPERTY_ACCESS_INLINED,
- PROPERTY_ACCESS_INLINED_CONTEXT,
- PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE,
- // Helper values.
- LAST_CODE_MARKER,
- FIRST_IC_MARKER = PROPERTY_ACCESS_INLINED
- };
-
- void nop(int type = 0); // 0 is the default non-marking type.
-
- void push(Register src, Condition cond = al) {
- str(src, MemOperand(sp, 4, NegPreIndex), cond);
- }
-
- void pop(Register dst, Condition cond = al) {
- ldr(dst, MemOperand(sp, 4, PostIndex), cond);
- }
-
- void pop() {
- add(sp, sp, Operand(kPointerSize));
- }
-
- // Jump unconditionally to given label.
- void jmp(Label* L) { b(L, al); }
-
- // Check the code size generated from label to here.
- int InstructionsGeneratedSince(Label* l) {
- return (pc_offset() - l->pos()) / kInstrSize;
- }
-
- // Check whether an immediate fits an addressing mode 1 instruction.
- bool ImmediateFitsAddrMode1Instruction(int32_t imm32);
-
- // Class for scoping postponing the constant pool generation.
- class BlockConstPoolScope {
- public:
- explicit BlockConstPoolScope(Assembler* assem) : assem_(assem) {
- assem_->StartBlockConstPool();
- }
- ~BlockConstPoolScope() {
- assem_->EndBlockConstPool();
- }
-
- private:
- Assembler* assem_;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(BlockConstPoolScope);
- };
-
- // Postpone the generation of the constant pool for the specified number of
- // instructions.
- void BlockConstPoolFor(int instructions);
-
- // Debugging
-
- // Mark address of the ExitJSFrame code.
- void RecordJSReturn();
-
- // Mark address of a debug break slot.
- void RecordDebugBreakSlot();
-
- // Record a comment relocation entry that can be used by a disassembler.
- // Use --code-comments to enable.
- void RecordComment(const char* msg);
-
- // Writes a single byte or word of data in the code stream. Used
- // for inline tables, e.g., jump-tables. The constant pool should be
- // emitted before any use of db and dd to ensure that constant pools
- // are not emitted as part of the tables generated.
- void db(uint8_t data);
- void dd(uint32_t data);
-
- int pc_offset() const { return pc_ - buffer_; }
-
- PositionsRecorder* positions_recorder() { return &positions_recorder_; }
-
- bool can_peephole_optimize(int instructions) {
- if (!allow_peephole_optimization_) return false;
- if (last_bound_pos_ > pc_offset() - instructions * kInstrSize) return false;
- return reloc_info_writer.last_pc() <= pc_ - instructions * kInstrSize;
- }
-
- // Read/patch instructions
- static Instr instr_at(byte* pc) { return *reinterpret_cast<Instr*>(pc); }
- static void instr_at_put(byte* pc, Instr instr) {
- *reinterpret_cast<Instr*>(pc) = instr;
- }
- static Condition GetCondition(Instr instr);
- static bool IsBranch(Instr instr);
- static int GetBranchOffset(Instr instr);
- static bool IsLdrRegisterImmediate(Instr instr);
- static int GetLdrRegisterImmediateOffset(Instr instr);
- static Instr SetLdrRegisterImmediateOffset(Instr instr, int offset);
- static bool IsStrRegisterImmediate(Instr instr);
- static Instr SetStrRegisterImmediateOffset(Instr instr, int offset);
- static bool IsAddRegisterImmediate(Instr instr);
- static Instr SetAddRegisterImmediateOffset(Instr instr, int offset);
- static Register GetRd(Instr instr);
- static Register GetRn(Instr instr);
- static Register GetRm(Instr instr);
- static bool IsPush(Instr instr);
- static bool IsPop(Instr instr);
- static bool IsStrRegFpOffset(Instr instr);
- static bool IsLdrRegFpOffset(Instr instr);
- static bool IsStrRegFpNegOffset(Instr instr);
- static bool IsLdrRegFpNegOffset(Instr instr);
- static bool IsLdrPcImmediateOffset(Instr instr);
- static bool IsTstImmediate(Instr instr);
- static bool IsCmpRegister(Instr instr);
- static bool IsCmpImmediate(Instr instr);
- static Register GetCmpImmediateRegister(Instr instr);
- static int GetCmpImmediateRawImmediate(Instr instr);
- static bool IsNop(Instr instr, int type = NON_MARKING_NOP);
-
- // Check if is time to emit a constant pool for pending reloc info entries
- void CheckConstPool(bool force_emit, bool require_jump);
-
- protected:
- bool emit_debug_code() const { return emit_debug_code_; }
-
- int buffer_space() const { return reloc_info_writer.pos() - pc_; }
-
- // Read/patch instructions
- Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); }
- void instr_at_put(int pos, Instr instr) {
- *reinterpret_cast<Instr*>(buffer_ + pos) = instr;
- }
-
- // Decode branch instruction at pos and return branch target pos
- int target_at(int pos);
-
- // Patch branch instruction at pos to branch to given branch target pos
- void target_at_put(int pos, int target_pos);
-
- // Block the emission of the constant pool before pc_offset
- void BlockConstPoolBefore(int pc_offset) {
- if (no_const_pool_before_ < pc_offset) no_const_pool_before_ = pc_offset;
- }
-
- void StartBlockConstPool() {
- const_pool_blocked_nesting_++;
- }
- void EndBlockConstPool() {
- const_pool_blocked_nesting_--;
- }
- bool is_const_pool_blocked() const { return const_pool_blocked_nesting_ > 0; }
-
- private:
- // Code buffer:
- // The buffer into which code and relocation info are generated.
- byte* buffer_;
- int buffer_size_;
- // True if the assembler owns the buffer, false if buffer is external.
- bool own_buffer_;
-
- // Buffer size and constant pool distance are checked together at regular
- // intervals of kBufferCheckInterval emitted bytes
- static const int kBufferCheckInterval = 1*KB/2;
- int next_buffer_check_; // pc offset of next buffer check
-
- // Code generation
- // The relocation writer's position is at least kGap bytes below the end of
- // the generated instructions. This is so that multi-instruction sequences do
- // not have to check for overflow. The same is true for writes of large
- // relocation info entries.
- static const int kGap = 32;
- byte* pc_; // the program counter; moves forward
-
- // Constant pool generation
- // Pools are emitted in the instruction stream, preferably after unconditional
- // jumps or after returns from functions (in dead code locations).
- // If a long code sequence does not contain unconditional jumps, it is
- // necessary to emit the constant pool before the pool gets too far from the
- // location it is accessed from. In this case, we emit a jump over the emitted
- // constant pool.
- // Constants in the pool may be addresses of functions that gets relocated;
- // if so, a relocation info entry is associated to the constant pool entry.
-
- // Repeated checking whether the constant pool should be emitted is rather
- // expensive. By default we only check again once a number of instructions
- // has been generated. That also means that the sizing of the buffers is not
- // an exact science, and that we rely on some slop to not overrun buffers.
- static const int kCheckConstIntervalInst = 32;
- static const int kCheckConstInterval = kCheckConstIntervalInst * kInstrSize;
-
-
- // Pools are emitted after function return and in dead code at (more or less)
- // regular intervals of kDistBetweenPools bytes
- static const int kDistBetweenPools = 1*KB;
-
- // Constants in pools are accessed via pc relative addressing, which can
- // reach +/-4KB thereby defining a maximum distance between the instruction
- // and the accessed constant. We satisfy this constraint by limiting the
- // distance between pools.
- static const int kMaxDistBetweenPools = 4*KB - 2*kBufferCheckInterval;
-
- // Emission of the constant pool may be blocked in some code sequences.
- int const_pool_blocked_nesting_; // Block emission if this is not zero.
- int no_const_pool_before_; // Block emission before this pc offset.
-
- // Keep track of the last emitted pool to guarantee a maximal distance
- int last_const_pool_end_; // pc offset following the last constant pool
-
- // Relocation info generation
- // Each relocation is encoded as a variable size value
- static const int kMaxRelocSize = RelocInfoWriter::kMaxSize;
- RelocInfoWriter reloc_info_writer;
- // Relocation info records are also used during code generation as temporary
- // containers for constants and code target addresses until they are emitted
- // to the constant pool. These pending relocation info records are temporarily
- // stored in a separate buffer until a constant pool is emitted.
- // If every instruction in a long sequence is accessing the pool, we need one
- // pending relocation entry per instruction.
- static const int kMaxNumPRInfo = kMaxDistBetweenPools/kInstrSize;
- RelocInfo prinfo_[kMaxNumPRInfo]; // the buffer of pending relocation info
- int num_prinfo_; // number of pending reloc info entries in the buffer
-
- // The bound position, before this we cannot do instruction elimination.
- int last_bound_pos_;
-
- // Code emission
- inline void CheckBuffer();
- void GrowBuffer();
- inline void emit(Instr x);
-
- // Instruction generation
- void addrmod1(Instr instr, Register rn, Register rd, const Operand& x);
- void addrmod2(Instr instr, Register rd, const MemOperand& x);
- void addrmod3(Instr instr, Register rd, const MemOperand& x);
- void addrmod4(Instr instr, Register rn, RegList rl);
- void addrmod5(Instr instr, CRegister crd, const MemOperand& x);
-
- // Labels
- void print(Label* L);
- void bind_to(Label* L, int pos);
- void link_to(Label* L, Label* appendix);
- void next(Label* L);
-
- // Record reloc info for current pc_
- void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
-
- friend class RegExpMacroAssemblerARM;
- friend class RelocInfo;
- friend class CodePatcher;
- friend class BlockConstPoolScope;
-
- PositionsRecorder positions_recorder_;
- bool allow_peephole_optimization_;
- bool emit_debug_code_;
- friend class PositionsRecorder;
- friend class EnsureSpace;
-};
-
-
-class EnsureSpace BASE_EMBEDDED {
- public:
- explicit EnsureSpace(Assembler* assembler) {
- assembler->CheckBuffer();
- }
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_ARM_ASSEMBLER_ARM_H_
diff --git a/src/3rdparty/v8/src/arm/builtins-arm.cc b/src/3rdparty/v8/src/arm/builtins-arm.cc
deleted file mode 100644
index 9cca536..0000000
--- a/src/3rdparty/v8/src/arm/builtins-arm.cc
+++ /dev/null
@@ -1,1634 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_ARM)
-
-#include "codegen-inl.h"
-#include "debug.h"
-#include "deoptimizer.h"
-#include "full-codegen.h"
-#include "runtime.h"
-
-namespace v8 {
-namespace internal {
-
-
-#define __ ACCESS_MASM(masm)
-
-
-void Builtins::Generate_Adaptor(MacroAssembler* masm,
- CFunctionId id,
- BuiltinExtraArguments extra_args) {
- // ----------- S t a t e -------------
- // -- r0 : number of arguments excluding receiver
- // -- r1 : called function (only guaranteed when
- // extra_args requires it)
- // -- cp : context
- // -- sp[0] : last argument
- // -- ...
- // -- sp[4 * (argc - 1)] : first argument (argc == r0)
- // -- sp[4 * argc] : receiver
- // -----------------------------------
-
- // Insert extra arguments.
- int num_extra_args = 0;
- if (extra_args == NEEDS_CALLED_FUNCTION) {
- num_extra_args = 1;
- __ push(r1);
- } else {
- ASSERT(extra_args == NO_EXTRA_ARGUMENTS);
- }
-
- // JumpToExternalReference expects r0 to contain the number of arguments
- // including the receiver and the extra arguments.
- __ add(r0, r0, Operand(num_extra_args + 1));
- __ JumpToExternalReference(ExternalReference(id, masm->isolate()));
-}
-
-
-// Load the built-in Array function from the current context.
-static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
- // Load the global context.
-
- __ ldr(result, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ ldr(result,
- FieldMemOperand(result, GlobalObject::kGlobalContextOffset));
- // Load the Array function from the global context.
- __ ldr(result,
- MemOperand(result,
- Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
-}
-
-
-// This constant has the same value as JSArray::kPreallocatedArrayElements and
-// if JSArray::kPreallocatedArrayElements is changed handling of loop unfolding
-// below should be reconsidered.
-static const int kLoopUnfoldLimit = 4;
-
-
-// Allocate an empty JSArray. The allocated array is put into the result
-// register. An elements backing store is allocated with size initial_capacity
-// and filled with the hole values.
-static void AllocateEmptyJSArray(MacroAssembler* masm,
- Register array_function,
- Register result,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- int initial_capacity,
- Label* gc_required) {
- ASSERT(initial_capacity > 0);
- // Load the initial map from the array function.
- __ ldr(scratch1, FieldMemOperand(array_function,
- JSFunction::kPrototypeOrInitialMapOffset));
-
- // Allocate the JSArray object together with space for a fixed array with the
- // requested elements.
- int size = JSArray::kSize + FixedArray::SizeFor(initial_capacity);
- __ AllocateInNewSpace(size,
- result,
- scratch2,
- scratch3,
- gc_required,
- TAG_OBJECT);
-
- // Allocated the JSArray. Now initialize the fields except for the elements
- // array.
- // result: JSObject
- // scratch1: initial map
- // scratch2: start of next object
- __ str(scratch1, FieldMemOperand(result, JSObject::kMapOffset));
- __ LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex);
- __ str(scratch1, FieldMemOperand(result, JSArray::kPropertiesOffset));
- // Field JSArray::kElementsOffset is initialized later.
- __ mov(scratch3, Operand(0, RelocInfo::NONE));
- __ str(scratch3, FieldMemOperand(result, JSArray::kLengthOffset));
-
- // Calculate the location of the elements array and set elements array member
- // of the JSArray.
- // result: JSObject
- // scratch2: start of next object
- __ add(scratch1, result, Operand(JSArray::kSize));
- __ str(scratch1, FieldMemOperand(result, JSArray::kElementsOffset));
-
- // Clear the heap tag on the elements array.
- ASSERT(kSmiTag == 0);
- __ sub(scratch1, scratch1, Operand(kHeapObjectTag));
-
- // Initialize the FixedArray and fill it with holes. FixedArray length is
- // stored as a smi.
- // result: JSObject
- // scratch1: elements array (untagged)
- // scratch2: start of next object
- __ LoadRoot(scratch3, Heap::kFixedArrayMapRootIndex);
- ASSERT_EQ(0 * kPointerSize, FixedArray::kMapOffset);
- __ str(scratch3, MemOperand(scratch1, kPointerSize, PostIndex));
- __ mov(scratch3, Operand(Smi::FromInt(initial_capacity)));
- ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
- __ str(scratch3, MemOperand(scratch1, kPointerSize, PostIndex));
-
- // Fill the FixedArray with the hole value.
- ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
- ASSERT(initial_capacity <= kLoopUnfoldLimit);
- __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
- for (int i = 0; i < initial_capacity; i++) {
- __ str(scratch3, MemOperand(scratch1, kPointerSize, PostIndex));
- }
-}
-
-// Allocate a JSArray with the number of elements stored in a register. The
-// register array_function holds the built-in Array function and the register
-// array_size holds the size of the array as a smi. The allocated array is put
-// into the result register and beginning and end of the FixedArray elements
-// storage is put into registers elements_array_storage and elements_array_end
-// (see below for when that is not the case). If the parameter fill_with_holes
-// is true the allocated elements backing store is filled with the hole values
-// otherwise it is left uninitialized. When the backing store is filled the
-// register elements_array_storage is scratched.
-static void AllocateJSArray(MacroAssembler* masm,
- Register array_function, // Array function.
- Register array_size, // As a smi.
- Register result,
- Register elements_array_storage,
- Register elements_array_end,
- Register scratch1,
- Register scratch2,
- bool fill_with_hole,
- Label* gc_required) {
- Label not_empty, allocated;
-
- // Load the initial map from the array function.
- __ ldr(elements_array_storage,
- FieldMemOperand(array_function,
- JSFunction::kPrototypeOrInitialMapOffset));
-
- // Check whether an empty sized array is requested.
- __ tst(array_size, array_size);
- __ b(ne, &not_empty);
-
- // If an empty array is requested allocate a small elements array anyway. This
- // keeps the code below free of special casing for the empty array.
- int size = JSArray::kSize +
- FixedArray::SizeFor(JSArray::kPreallocatedArrayElements);
- __ AllocateInNewSpace(size,
- result,
- elements_array_end,
- scratch1,
- gc_required,
- TAG_OBJECT);
- __ jmp(&allocated);
-
- // Allocate the JSArray object together with space for a FixedArray with the
- // requested number of elements.
- __ bind(&not_empty);
- ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
- __ mov(elements_array_end,
- Operand((JSArray::kSize + FixedArray::kHeaderSize) / kPointerSize));
- __ add(elements_array_end,
- elements_array_end,
- Operand(array_size, ASR, kSmiTagSize));
- __ AllocateInNewSpace(
- elements_array_end,
- result,
- scratch1,
- scratch2,
- gc_required,
- static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
-
- // Allocated the JSArray. Now initialize the fields except for the elements
- // array.
- // result: JSObject
- // elements_array_storage: initial map
- // array_size: size of array (smi)
- __ bind(&allocated);
- __ str(elements_array_storage, FieldMemOperand(result, JSObject::kMapOffset));
- __ LoadRoot(elements_array_storage, Heap::kEmptyFixedArrayRootIndex);
- __ str(elements_array_storage,
- FieldMemOperand(result, JSArray::kPropertiesOffset));
- // Field JSArray::kElementsOffset is initialized later.
- __ str(array_size, FieldMemOperand(result, JSArray::kLengthOffset));
-
- // Calculate the location of the elements array and set elements array member
- // of the JSArray.
- // result: JSObject
- // array_size: size of array (smi)
- __ add(elements_array_storage, result, Operand(JSArray::kSize));
- __ str(elements_array_storage,
- FieldMemOperand(result, JSArray::kElementsOffset));
-
- // Clear the heap tag on the elements array.
- ASSERT(kSmiTag == 0);
- __ sub(elements_array_storage,
- elements_array_storage,
- Operand(kHeapObjectTag));
- // Initialize the fixed array and fill it with holes. FixedArray length is
- // stored as a smi.
- // result: JSObject
- // elements_array_storage: elements array (untagged)
- // array_size: size of array (smi)
- __ LoadRoot(scratch1, Heap::kFixedArrayMapRootIndex);
- ASSERT_EQ(0 * kPointerSize, FixedArray::kMapOffset);
- __ str(scratch1, MemOperand(elements_array_storage, kPointerSize, PostIndex));
- ASSERT(kSmiTag == 0);
- __ tst(array_size, array_size);
- // Length of the FixedArray is the number of pre-allocated elements if
- // the actual JSArray has length 0 and the size of the JSArray for non-empty
- // JSArrays. The length of a FixedArray is stored as a smi.
- __ mov(array_size,
- Operand(Smi::FromInt(JSArray::kPreallocatedArrayElements)),
- LeaveCC,
- eq);
- ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
- __ str(array_size,
- MemOperand(elements_array_storage, kPointerSize, PostIndex));
-
- // Calculate elements array and elements array end.
- // result: JSObject
- // elements_array_storage: elements array element storage
- // array_size: smi-tagged size of elements array
- ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
- __ add(elements_array_end,
- elements_array_storage,
- Operand(array_size, LSL, kPointerSizeLog2 - kSmiTagSize));
-
- // Fill the allocated FixedArray with the hole value if requested.
- // result: JSObject
- // elements_array_storage: elements array element storage
- // elements_array_end: start of next object
- if (fill_with_hole) {
- Label loop, entry;
- __ LoadRoot(scratch1, Heap::kTheHoleValueRootIndex);
- __ jmp(&entry);
- __ bind(&loop);
- __ str(scratch1,
- MemOperand(elements_array_storage, kPointerSize, PostIndex));
- __ bind(&entry);
- __ cmp(elements_array_storage, elements_array_end);
- __ b(lt, &loop);
- }
-}
-
-// Create a new array for the built-in Array function. This function allocates
-// the JSArray object and the FixedArray elements array and initializes these.
-// If the Array cannot be constructed in native code the runtime is called. This
-// function assumes the following state:
-// r0: argc
-// r1: constructor (built-in Array function)
-// lr: return address
-// sp[0]: last argument
-// This function is used for both construct and normal calls of Array. The only
-// difference between handling a construct call and a normal call is that for a
-// construct call the constructor function in r1 needs to be preserved for
-// entering the generic code. In both cases argc in r0 needs to be preserved.
-// Both registers are preserved by this code so no need to differentiate between
-// construct call and normal call.
-static void ArrayNativeCode(MacroAssembler* masm,
- Label* call_generic_code) {
- Counters* counters = masm->isolate()->counters();
- Label argc_one_or_more, argc_two_or_more;
-
- // Check for array construction with zero arguments or one.
- __ cmp(r0, Operand(0, RelocInfo::NONE));
- __ b(ne, &argc_one_or_more);
-
- // Handle construction of an empty array.
- AllocateEmptyJSArray(masm,
- r1,
- r2,
- r3,
- r4,
- r5,
- JSArray::kPreallocatedArrayElements,
- call_generic_code);
- __ IncrementCounter(counters->array_function_native(), 1, r3, r4);
- // Setup return value, remove receiver from stack and return.
- __ mov(r0, r2);
- __ add(sp, sp, Operand(kPointerSize));
- __ Jump(lr);
-
- // Check for one argument. Bail out if argument is not smi or if it is
- // negative.
- __ bind(&argc_one_or_more);
- __ cmp(r0, Operand(1));
- __ b(ne, &argc_two_or_more);
- ASSERT(kSmiTag == 0);
- __ ldr(r2, MemOperand(sp)); // Get the argument from the stack.
- __ and_(r3, r2, Operand(kIntptrSignBit | kSmiTagMask), SetCC);
- __ b(ne, call_generic_code);
-
- // Handle construction of an empty array of a certain size. Bail out if size
- // is too large to actually allocate an elements array.
- ASSERT(kSmiTag == 0);
- __ cmp(r2, Operand(JSObject::kInitialMaxFastElementArray << kSmiTagSize));
- __ b(ge, call_generic_code);
-
- // r0: argc
- // r1: constructor
- // r2: array_size (smi)
- // sp[0]: argument
- AllocateJSArray(masm,
- r1,
- r2,
- r3,
- r4,
- r5,
- r6,
- r7,
- true,
- call_generic_code);
- __ IncrementCounter(counters->array_function_native(), 1, r2, r4);
- // Setup return value, remove receiver and argument from stack and return.
- __ mov(r0, r3);
- __ add(sp, sp, Operand(2 * kPointerSize));
- __ Jump(lr);
-
- // Handle construction of an array from a list of arguments.
- __ bind(&argc_two_or_more);
- __ mov(r2, Operand(r0, LSL, kSmiTagSize)); // Convet argc to a smi.
-
- // r0: argc
- // r1: constructor
- // r2: array_size (smi)
- // sp[0]: last argument
- AllocateJSArray(masm,
- r1,
- r2,
- r3,
- r4,
- r5,
- r6,
- r7,
- false,
- call_generic_code);
- __ IncrementCounter(counters->array_function_native(), 1, r2, r6);
-
- // Fill arguments as array elements. Copy from the top of the stack (last
- // element) to the array backing store filling it backwards. Note:
- // elements_array_end points after the backing store therefore PreIndex is
- // used when filling the backing store.
- // r0: argc
- // r3: JSArray
- // r4: elements_array storage start (untagged)
- // r5: elements_array_end (untagged)
- // sp[0]: last argument
- Label loop, entry;
- __ jmp(&entry);
- __ bind(&loop);
- __ ldr(r2, MemOperand(sp, kPointerSize, PostIndex));
- __ str(r2, MemOperand(r5, -kPointerSize, PreIndex));
- __ bind(&entry);
- __ cmp(r4, r5);
- __ b(lt, &loop);
-
- // Remove caller arguments and receiver from the stack, setup return value and
- // return.
- // r0: argc
- // r3: JSArray
- // sp[0]: receiver
- __ add(sp, sp, Operand(kPointerSize));
- __ mov(r0, r3);
- __ Jump(lr);
-}
-
-
-void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r0 : number of arguments
- // -- lr : return address
- // -- sp[...]: constructor arguments
- // -----------------------------------
- Label generic_array_code, one_or_more_arguments, two_or_more_arguments;
-
- // Get the Array function.
- GenerateLoadArrayFunction(masm, r1);
-
- if (FLAG_debug_code) {
- // Initial map for the builtin Array functions should be maps.
- __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
- __ tst(r2, Operand(kSmiTagMask));
- __ Assert(ne, "Unexpected initial map for Array function");
- __ CompareObjectType(r2, r3, r4, MAP_TYPE);
- __ Assert(eq, "Unexpected initial map for Array function");
- }
-
- // Run the native code for the Array function called as a normal function.
- ArrayNativeCode(masm, &generic_array_code);
-
- // Jump to the generic array code if the specialized code cannot handle
- // the construction.
- __ bind(&generic_array_code);
-
- Handle<Code> array_code =
- masm->isolate()->builtins()->ArrayCodeGeneric();
- __ Jump(array_code, RelocInfo::CODE_TARGET);
-}
-
-
-void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r0 : number of arguments
- // -- r1 : constructor function
- // -- lr : return address
- // -- sp[...]: constructor arguments
- // -----------------------------------
- Label generic_constructor;
-
- if (FLAG_debug_code) {
- // The array construct code is only set for the builtin and internal
- // Array functions which always have a map.
- // Initial map for the builtin Array function should be a map.
- __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
- __ tst(r2, Operand(kSmiTagMask));
- __ Assert(ne, "Unexpected initial map for Array function");
- __ CompareObjectType(r2, r3, r4, MAP_TYPE);
- __ Assert(eq, "Unexpected initial map for Array function");
- }
-
- // Run the native code for the Array function called as a constructor.
- ArrayNativeCode(masm, &generic_constructor);
-
- // Jump to the generic construct code in case the specialized code cannot
- // handle the construction.
- __ bind(&generic_constructor);
- Handle<Code> generic_construct_stub =
- masm->isolate()->builtins()->JSConstructStubGeneric();
- __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
-}
-
-
-void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r0 : number of arguments
- // -- r1 : constructor function
- // -- lr : return address
- // -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
- // -- sp[argc * 4] : receiver
- // -----------------------------------
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->string_ctor_calls(), 1, r2, r3);
-
- Register function = r1;
- if (FLAG_debug_code) {
- __ LoadGlobalFunction(Context::STRING_FUNCTION_INDEX, r2);
- __ cmp(function, Operand(r2));
- __ Assert(eq, "Unexpected String function");
- }
-
- // Load the first arguments in r0 and get rid of the rest.
- Label no_arguments;
- __ cmp(r0, Operand(0, RelocInfo::NONE));
- __ b(eq, &no_arguments);
- // First args = sp[(argc - 1) * 4].
- __ sub(r0, r0, Operand(1));
- __ ldr(r0, MemOperand(sp, r0, LSL, kPointerSizeLog2, PreIndex));
- // sp now point to args[0], drop args[0] + receiver.
- __ Drop(2);
-
- Register argument = r2;
- Label not_cached, argument_is_string;
- NumberToStringStub::GenerateLookupNumberStringCache(
- masm,
- r0, // Input.
- argument, // Result.
- r3, // Scratch.
- r4, // Scratch.
- r5, // Scratch.
- false, // Is it a Smi?
- &not_cached);
- __ IncrementCounter(counters->string_ctor_cached_number(), 1, r3, r4);
- __ bind(&argument_is_string);
-
- // ----------- S t a t e -------------
- // -- r2 : argument converted to string
- // -- r1 : constructor function
- // -- lr : return address
- // -----------------------------------
-
- Label gc_required;
- __ AllocateInNewSpace(JSValue::kSize,
- r0, // Result.
- r3, // Scratch.
- r4, // Scratch.
- &gc_required,
- TAG_OBJECT);
-
- // Initialising the String Object.
- Register map = r3;
- __ LoadGlobalFunctionInitialMap(function, map, r4);
- if (FLAG_debug_code) {
- __ ldrb(r4, FieldMemOperand(map, Map::kInstanceSizeOffset));
- __ cmp(r4, Operand(JSValue::kSize >> kPointerSizeLog2));
- __ Assert(eq, "Unexpected string wrapper instance size");
- __ ldrb(r4, FieldMemOperand(map, Map::kUnusedPropertyFieldsOffset));
- __ cmp(r4, Operand(0, RelocInfo::NONE));
- __ Assert(eq, "Unexpected unused properties of string wrapper");
- }
- __ str(map, FieldMemOperand(r0, HeapObject::kMapOffset));
-
- __ LoadRoot(r3, Heap::kEmptyFixedArrayRootIndex);
- __ str(r3, FieldMemOperand(r0, JSObject::kPropertiesOffset));
- __ str(r3, FieldMemOperand(r0, JSObject::kElementsOffset));
-
- __ str(argument, FieldMemOperand(r0, JSValue::kValueOffset));
-
- // Ensure the object is fully initialized.
- STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
-
- __ Ret();
-
- // The argument was not found in the number to string cache. Check
- // if it's a string already before calling the conversion builtin.
- Label convert_argument;
- __ bind(&not_cached);
- __ JumpIfSmi(r0, &convert_argument);
-
- // Is it a String?
- __ ldr(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ ldrb(r3, FieldMemOperand(r2, Map::kInstanceTypeOffset));
- ASSERT(kNotStringTag != 0);
- __ tst(r3, Operand(kIsNotStringMask));
- __ b(ne, &convert_argument);
- __ mov(argument, r0);
- __ IncrementCounter(counters->string_ctor_conversions(), 1, r3, r4);
- __ b(&argument_is_string);
-
- // Invoke the conversion builtin and put the result into r2.
- __ bind(&convert_argument);
- __ push(function); // Preserve the function.
- __ IncrementCounter(counters->string_ctor_conversions(), 1, r3, r4);
- __ EnterInternalFrame();
- __ push(r0);
- __ InvokeBuiltin(Builtins::TO_STRING, CALL_JS);
- __ LeaveInternalFrame();
- __ pop(function);
- __ mov(argument, r0);
- __ b(&argument_is_string);
-
- // Load the empty string into r2, remove the receiver from the
- // stack, and jump back to the case where the argument is a string.
- __ bind(&no_arguments);
- __ LoadRoot(argument, Heap::kEmptyStringRootIndex);
- __ Drop(1);
- __ b(&argument_is_string);
-
- // At this point the argument is already a string. Call runtime to
- // create a string wrapper.
- __ bind(&gc_required);
- __ IncrementCounter(counters->string_ctor_gc_required(), 1, r3, r4);
- __ EnterInternalFrame();
- __ push(argument);
- __ CallRuntime(Runtime::kNewStringWrapper, 1);
- __ LeaveInternalFrame();
- __ Ret();
-}
-
-
-void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r0 : number of arguments
- // -- r1 : constructor function
- // -- lr : return address
- // -- sp[...]: constructor arguments
- // -----------------------------------
-
- Label non_function_call;
- // Check that the function is not a smi.
- __ tst(r1, Operand(kSmiTagMask));
- __ b(eq, &non_function_call);
- // Check that the function is a JSFunction.
- __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
- __ b(ne, &non_function_call);
-
- // Jump to the function-specific construct stub.
- __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kConstructStubOffset));
- __ add(pc, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
-
- // r0: number of arguments
- // r1: called object
- __ bind(&non_function_call);
- // Set expected number of arguments to zero (not changing r0).
- __ mov(r2, Operand(0, RelocInfo::NONE));
- __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
- __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
-}
-
-
-static void Generate_JSConstructStubHelper(MacroAssembler* masm,
- bool is_api_function,
- bool count_constructions) {
- // Should never count constructions for api objects.
- ASSERT(!is_api_function || !count_constructions);
-
- Isolate* isolate = masm->isolate();
-
- // Enter a construct frame.
- __ EnterConstructFrame();
-
- // Preserve the two incoming parameters on the stack.
- __ mov(r0, Operand(r0, LSL, kSmiTagSize));
- __ push(r0); // Smi-tagged arguments count.
- __ push(r1); // Constructor function.
-
- // Try to allocate the object without transitioning into C code. If any of the
- // preconditions is not met, the code bails out to the runtime call.
- Label rt_call, allocated;
- if (FLAG_inline_new) {
- Label undo_allocation;
-#ifdef ENABLE_DEBUGGER_SUPPORT
- ExternalReference debug_step_in_fp =
- ExternalReference::debug_step_in_fp_address(isolate);
- __ mov(r2, Operand(debug_step_in_fp));
- __ ldr(r2, MemOperand(r2));
- __ tst(r2, r2);
- __ b(ne, &rt_call);
-#endif
-
- // Load the initial map and verify that it is in fact a map.
- // r1: constructor function
- __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
- __ tst(r2, Operand(kSmiTagMask));
- __ b(eq, &rt_call);
- __ CompareObjectType(r2, r3, r4, MAP_TYPE);
- __ b(ne, &rt_call);
-
- // Check that the constructor is not constructing a JSFunction (see comments
- // in Runtime_NewObject in runtime.cc). In which case the initial map's
- // instance type would be JS_FUNCTION_TYPE.
- // r1: constructor function
- // r2: initial map
- __ CompareInstanceType(r2, r3, JS_FUNCTION_TYPE);
- __ b(eq, &rt_call);
-
- if (count_constructions) {
- Label allocate;
- // Decrease generous allocation count.
- __ ldr(r3, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- MemOperand constructor_count =
- FieldMemOperand(r3, SharedFunctionInfo::kConstructionCountOffset);
- __ ldrb(r4, constructor_count);
- __ sub(r4, r4, Operand(1), SetCC);
- __ strb(r4, constructor_count);
- __ b(ne, &allocate);
-
- __ Push(r1, r2);
-
- __ push(r1); // constructor
- // The call will replace the stub, so the countdown is only done once.
- __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
-
- __ pop(r2);
- __ pop(r1);
-
- __ bind(&allocate);
- }
-
- // Now allocate the JSObject on the heap.
- // r1: constructor function
- // r2: initial map
- __ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset));
- __ AllocateInNewSpace(r3, r4, r5, r6, &rt_call, SIZE_IN_WORDS);
-
- // Allocated the JSObject, now initialize the fields. Map is set to initial
- // map and properties and elements are set to empty fixed array.
- // r1: constructor function
- // r2: initial map
- // r3: object size
- // r4: JSObject (not tagged)
- __ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
- __ mov(r5, r4);
- ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
- __ str(r2, MemOperand(r5, kPointerSize, PostIndex));
- ASSERT_EQ(1 * kPointerSize, JSObject::kPropertiesOffset);
- __ str(r6, MemOperand(r5, kPointerSize, PostIndex));
- ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset);
- __ str(r6, MemOperand(r5, kPointerSize, PostIndex));
-
- // Fill all the in-object properties with the appropriate filler.
- // r1: constructor function
- // r2: initial map
- // r3: object size (in words)
- // r4: JSObject (not tagged)
- // r5: First in-object property of JSObject (not tagged)
- __ add(r6, r4, Operand(r3, LSL, kPointerSizeLog2)); // End of object.
- ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize);
- { Label loop, entry;
- if (count_constructions) {
- // To allow for truncation.
- __ LoadRoot(r7, Heap::kOnePointerFillerMapRootIndex);
- } else {
- __ LoadRoot(r7, Heap::kUndefinedValueRootIndex);
- }
- __ b(&entry);
- __ bind(&loop);
- __ str(r7, MemOperand(r5, kPointerSize, PostIndex));
- __ bind(&entry);
- __ cmp(r5, r6);
- __ b(lt, &loop);
- }
-
- // Add the object tag to make the JSObject real, so that we can continue and
- // jump into the continuation code at any time from now on. Any failures
- // need to undo the allocation, so that the heap is in a consistent state
- // and verifiable.
- __ add(r4, r4, Operand(kHeapObjectTag));
-
- // Check if a non-empty properties array is needed. Continue with allocated
- // object if not fall through to runtime call if it is.
- // r1: constructor function
- // r4: JSObject
- // r5: start of next object (not tagged)
- __ ldrb(r3, FieldMemOperand(r2, Map::kUnusedPropertyFieldsOffset));
- // The field instance sizes contains both pre-allocated property fields and
- // in-object properties.
- __ ldr(r0, FieldMemOperand(r2, Map::kInstanceSizesOffset));
- __ Ubfx(r6, r0, Map::kPreAllocatedPropertyFieldsByte * 8, 8);
- __ add(r3, r3, Operand(r6));
- __ Ubfx(r6, r0, Map::kInObjectPropertiesByte * 8, 8);
- __ sub(r3, r3, Operand(r6), SetCC);
-
- // Done if no extra properties are to be allocated.
- __ b(eq, &allocated);
- __ Assert(pl, "Property allocation count failed.");
-
- // Scale the number of elements by pointer size and add the header for
- // FixedArrays to the start of the next object calculation from above.
- // r1: constructor
- // r3: number of elements in properties array
- // r4: JSObject
- // r5: start of next object
- __ add(r0, r3, Operand(FixedArray::kHeaderSize / kPointerSize));
- __ AllocateInNewSpace(
- r0,
- r5,
- r6,
- r2,
- &undo_allocation,
- static_cast<AllocationFlags>(RESULT_CONTAINS_TOP | SIZE_IN_WORDS));
-
- // Initialize the FixedArray.
- // r1: constructor
- // r3: number of elements in properties array
- // r4: JSObject
- // r5: FixedArray (not tagged)
- __ LoadRoot(r6, Heap::kFixedArrayMapRootIndex);
- __ mov(r2, r5);
- ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
- __ str(r6, MemOperand(r2, kPointerSize, PostIndex));
- ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
- __ mov(r0, Operand(r3, LSL, kSmiTagSize));
- __ str(r0, MemOperand(r2, kPointerSize, PostIndex));
-
- // Initialize the fields to undefined.
- // r1: constructor function
- // r2: First element of FixedArray (not tagged)
- // r3: number of elements in properties array
- // r4: JSObject
- // r5: FixedArray (not tagged)
- __ add(r6, r2, Operand(r3, LSL, kPointerSizeLog2)); // End of object.
- ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
- { Label loop, entry;
- if (count_constructions) {
- __ LoadRoot(r7, Heap::kUndefinedValueRootIndex);
- } else if (FLAG_debug_code) {
- __ LoadRoot(r8, Heap::kUndefinedValueRootIndex);
- __ cmp(r7, r8);
- __ Assert(eq, "Undefined value not loaded.");
- }
- __ b(&entry);
- __ bind(&loop);
- __ str(r7, MemOperand(r2, kPointerSize, PostIndex));
- __ bind(&entry);
- __ cmp(r2, r6);
- __ b(lt, &loop);
- }
-
- // Store the initialized FixedArray into the properties field of
- // the JSObject
- // r1: constructor function
- // r4: JSObject
- // r5: FixedArray (not tagged)
- __ add(r5, r5, Operand(kHeapObjectTag)); // Add the heap tag.
- __ str(r5, FieldMemOperand(r4, JSObject::kPropertiesOffset));
-
- // Continue with JSObject being successfully allocated
- // r1: constructor function
- // r4: JSObject
- __ jmp(&allocated);
-
- // Undo the setting of the new top so that the heap is verifiable. For
- // example, the map's unused properties potentially do not match the
- // allocated objects unused properties.
- // r4: JSObject (previous new top)
- __ bind(&undo_allocation);
- __ UndoAllocationInNewSpace(r4, r5);
- }
-
- // Allocate the new receiver object using the runtime call.
- // r1: constructor function
- __ bind(&rt_call);
- __ push(r1); // argument for Runtime_NewObject
- __ CallRuntime(Runtime::kNewObject, 1);
- __ mov(r4, r0);
-
- // Receiver for constructor call allocated.
- // r4: JSObject
- __ bind(&allocated);
- __ push(r4);
-
- // Push the function and the allocated receiver from the stack.
- // sp[0]: receiver (newly allocated object)
- // sp[1]: constructor function
- // sp[2]: number of arguments (smi-tagged)
- __ ldr(r1, MemOperand(sp, kPointerSize));
- __ push(r1); // Constructor function.
- __ push(r4); // Receiver.
-
- // Reload the number of arguments from the stack.
- // r1: constructor function
- // sp[0]: receiver
- // sp[1]: constructor function
- // sp[2]: receiver
- // sp[3]: constructor function
- // sp[4]: number of arguments (smi-tagged)
- __ ldr(r3, MemOperand(sp, 4 * kPointerSize));
-
- // Setup pointer to last argument.
- __ add(r2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
-
- // Setup number of arguments for function call below
- __ mov(r0, Operand(r3, LSR, kSmiTagSize));
-
- // Copy arguments and receiver to the expression stack.
- // r0: number of arguments
- // r2: address of last argument (caller sp)
- // r1: constructor function
- // r3: number of arguments (smi-tagged)
- // sp[0]: receiver
- // sp[1]: constructor function
- // sp[2]: receiver
- // sp[3]: constructor function
- // sp[4]: number of arguments (smi-tagged)
- Label loop, entry;
- __ b(&entry);
- __ bind(&loop);
- __ ldr(ip, MemOperand(r2, r3, LSL, kPointerSizeLog2 - 1));
- __ push(ip);
- __ bind(&entry);
- __ sub(r3, r3, Operand(2), SetCC);
- __ b(ge, &loop);
-
- // Call the function.
- // r0: number of arguments
- // r1: constructor function
- if (is_api_function) {
- __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
- Handle<Code> code =
- masm->isolate()->builtins()->HandleApiCallConstruct();
- ParameterCount expected(0);
- __ InvokeCode(code, expected, expected,
- RelocInfo::CODE_TARGET, CALL_FUNCTION);
- } else {
- ParameterCount actual(r0);
- __ InvokeFunction(r1, actual, CALL_FUNCTION);
- }
-
- // Pop the function from the stack.
- // sp[0]: constructor function
- // sp[2]: receiver
- // sp[3]: constructor function
- // sp[4]: number of arguments (smi-tagged)
- __ pop();
-
- // Restore context from the frame.
- // r0: result
- // sp[0]: receiver
- // sp[1]: constructor function
- // sp[2]: number of arguments (smi-tagged)
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-
- // If the result is an object (in the ECMA sense), we should get rid
- // of the receiver and use the result; see ECMA-262 section 13.2.2-7
- // on page 74.
- Label use_receiver, exit;
-
- // If the result is a smi, it is *not* an object in the ECMA sense.
- // r0: result
- // sp[0]: receiver (newly allocated object)
- // sp[1]: constructor function
- // sp[2]: number of arguments (smi-tagged)
- __ tst(r0, Operand(kSmiTagMask));
- __ b(eq, &use_receiver);
-
- // If the type of the result (stored in its map) is less than
- // FIRST_JS_OBJECT_TYPE, it is not an object in the ECMA sense.
- __ CompareObjectType(r0, r3, r3, FIRST_JS_OBJECT_TYPE);
- __ b(ge, &exit);
-
- // Throw away the result of the constructor invocation and use the
- // on-stack receiver as the result.
- __ bind(&use_receiver);
- __ ldr(r0, MemOperand(sp));
-
- // Remove receiver from the stack, remove caller arguments, and
- // return.
- __ bind(&exit);
- // r0: result
- // sp[0]: receiver (newly allocated object)
- // sp[1]: constructor function
- // sp[2]: number of arguments (smi-tagged)
- __ ldr(r1, MemOperand(sp, 2 * kPointerSize));
- __ LeaveConstructFrame();
- __ add(sp, sp, Operand(r1, LSL, kPointerSizeLog2 - 1));
- __ add(sp, sp, Operand(kPointerSize));
- __ IncrementCounter(isolate->counters()->constructed_objects(), 1, r1, r2);
- __ Jump(lr);
-}
-
-
-void Builtins::Generate_JSConstructStubCountdown(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, true);
-}
-
-
-void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, false);
-}
-
-
-void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, true, false);
-}
-
-
-static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
- bool is_construct) {
- // Called from Generate_JS_Entry
- // r0: code entry
- // r1: function
- // r2: receiver
- // r3: argc
- // r4: argv
- // r5-r7, cp may be clobbered
-
- // Clear the context before we push it when entering the JS frame.
- __ mov(cp, Operand(0, RelocInfo::NONE));
-
- // Enter an internal frame.
- __ EnterInternalFrame();
-
- // Set up the context from the function argument.
- __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
-
- // Set up the roots register.
- ExternalReference roots_address =
- ExternalReference::roots_address(masm->isolate());
- __ mov(r10, Operand(roots_address));
-
- // Push the function and the receiver onto the stack.
- __ push(r1);
- __ push(r2);
-
- // Copy arguments to the stack in a loop.
- // r1: function
- // r3: argc
- // r4: argv, i.e. points to first arg
- Label loop, entry;
- __ add(r2, r4, Operand(r3, LSL, kPointerSizeLog2));
- // r2 points past last arg.
- __ b(&entry);
- __ bind(&loop);
- __ ldr(r0, MemOperand(r4, kPointerSize, PostIndex)); // read next parameter
- __ ldr(r0, MemOperand(r0)); // dereference handle
- __ push(r0); // push parameter
- __ bind(&entry);
- __ cmp(r4, r2);
- __ b(ne, &loop);
-
- // Initialize all JavaScript callee-saved registers, since they will be seen
- // by the garbage collector as part of handlers.
- __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
- __ mov(r5, Operand(r4));
- __ mov(r6, Operand(r4));
- __ mov(r7, Operand(r4));
- if (kR9Available == 1) {
- __ mov(r9, Operand(r4));
- }
-
- // Invoke the code and pass argc as r0.
- __ mov(r0, Operand(r3));
- if (is_construct) {
- __ Call(masm->isolate()->builtins()->JSConstructCall(),
- RelocInfo::CODE_TARGET);
- } else {
- ParameterCount actual(r0);
- __ InvokeFunction(r1, actual, CALL_FUNCTION);
- }
-
- // Exit the JS frame and remove the parameters (except function), and return.
- // Respect ABI stack constraint.
- __ LeaveInternalFrame();
- __ Jump(lr);
-
- // r0: result
-}
-
-
-void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
- Generate_JSEntryTrampolineHelper(masm, false);
-}
-
-
-void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
- Generate_JSEntryTrampolineHelper(masm, true);
-}
-
-
-void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
- // Enter an internal frame.
- __ EnterInternalFrame();
-
- // Preserve the function.
- __ push(r1);
-
- // Push the function on the stack as the argument to the runtime function.
- __ push(r1);
- __ CallRuntime(Runtime::kLazyCompile, 1);
- // Calculate the entry point.
- __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
- // Restore saved function.
- __ pop(r1);
-
- // Tear down temporary frame.
- __ LeaveInternalFrame();
-
- // Do a tail-call of the compiled function.
- __ Jump(r2);
-}
-
-
-void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
- // Enter an internal frame.
- __ EnterInternalFrame();
-
- // Preserve the function.
- __ push(r1);
-
- // Push the function on the stack as the argument to the runtime function.
- __ push(r1);
- __ CallRuntime(Runtime::kLazyRecompile, 1);
- // Calculate the entry point.
- __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
- // Restore saved function.
- __ pop(r1);
-
- // Tear down temporary frame.
- __ LeaveInternalFrame();
-
- // Do a tail-call of the compiled function.
- __ Jump(r2);
-}
-
-
-static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
- Deoptimizer::BailoutType type) {
- __ EnterInternalFrame();
- // Pass the function and deoptimization type to the runtime system.
- __ mov(r0, Operand(Smi::FromInt(static_cast<int>(type))));
- __ push(r0);
- __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
- __ LeaveInternalFrame();
-
- // Get the full codegen state from the stack and untag it -> r6.
- __ ldr(r6, MemOperand(sp, 0 * kPointerSize));
- __ SmiUntag(r6);
- // Switch on the state.
- Label with_tos_register, unknown_state;
- __ cmp(r6, Operand(FullCodeGenerator::NO_REGISTERS));
- __ b(ne, &with_tos_register);
- __ add(sp, sp, Operand(1 * kPointerSize)); // Remove state.
- __ Ret();
-
- __ bind(&with_tos_register);
- __ ldr(r0, MemOperand(sp, 1 * kPointerSize));
- __ cmp(r6, Operand(FullCodeGenerator::TOS_REG));
- __ b(ne, &unknown_state);
- __ add(sp, sp, Operand(2 * kPointerSize)); // Remove state.
- __ Ret();
-
- __ bind(&unknown_state);
- __ stop("no cases left");
-}
-
-
-void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
- Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
-}
-
-
-void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
- Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
-}
-
-
-void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
- // For now, we are relying on the fact that Runtime::NotifyOSR
- // doesn't do any garbage collection which allows us to save/restore
- // the registers without worrying about which of them contain
- // pointers. This seems a bit fragile.
- __ stm(db_w, sp, kJSCallerSaved | kCalleeSaved | lr.bit() | fp.bit());
- __ EnterInternalFrame();
- __ CallRuntime(Runtime::kNotifyOSR, 0);
- __ LeaveInternalFrame();
- __ ldm(ia_w, sp, kJSCallerSaved | kCalleeSaved | lr.bit() | fp.bit());
- __ Ret();
-}
-
-
-void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
- CpuFeatures::TryForceFeatureScope scope(VFP3);
- if (!CpuFeatures::IsSupported(VFP3)) {
- __ Abort("Unreachable code: Cannot optimize without VFP3 support.");
- return;
- }
-
- // Lookup the function in the JavaScript frame and push it as an
- // argument to the on-stack replacement function.
- __ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ EnterInternalFrame();
- __ push(r0);
- __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
- __ LeaveInternalFrame();
-
- // If the result was -1 it means that we couldn't optimize the
- // function. Just return and continue in the unoptimized version.
- Label skip;
- __ cmp(r0, Operand(Smi::FromInt(-1)));
- __ b(ne, &skip);
- __ Ret();
-
- __ bind(&skip);
- // Untag the AST id and push it on the stack.
- __ SmiUntag(r0);
- __ push(r0);
-
- // Generate the code for doing the frame-to-frame translation using
- // the deoptimizer infrastructure.
- Deoptimizer::EntryGenerator generator(masm, Deoptimizer::OSR);
- generator.Generate();
-}
-
-
-void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
- // 1. Make sure we have at least one argument.
- // r0: actual number of arguments
- { Label done;
- __ tst(r0, Operand(r0));
- __ b(ne, &done);
- __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
- __ push(r2);
- __ add(r0, r0, Operand(1));
- __ bind(&done);
- }
-
- // 2. Get the function to call (passed as receiver) from the stack, check
- // if it is a function.
- // r0: actual number of arguments
- Label non_function;
- __ ldr(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2));
- __ tst(r1, Operand(kSmiTagMask));
- __ b(eq, &non_function);
- __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
- __ b(ne, &non_function);
-
- // 3a. Patch the first argument if necessary when calling a function.
- // r0: actual number of arguments
- // r1: function
- Label shift_arguments;
- { Label convert_to_object, use_global_receiver, patch_receiver;
- // Change context eagerly in case we need the global receiver.
- __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
-
- // Do not transform the receiver for strict mode functions.
- __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kCompilerHintsOffset));
- __ tst(r2, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
- kSmiTagSize)));
- __ b(ne, &shift_arguments);
-
- // Compute the receiver in non-strict mode.
- __ add(r2, sp, Operand(r0, LSL, kPointerSizeLog2));
- __ ldr(r2, MemOperand(r2, -kPointerSize));
- // r0: actual number of arguments
- // r1: function
- // r2: first argument
- __ tst(r2, Operand(kSmiTagMask));
- __ b(eq, &convert_to_object);
-
- __ LoadRoot(r3, Heap::kNullValueRootIndex);
- __ cmp(r2, r3);
- __ b(eq, &use_global_receiver);
- __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
- __ cmp(r2, r3);
- __ b(eq, &use_global_receiver);
-
- __ CompareObjectType(r2, r3, r3, FIRST_JS_OBJECT_TYPE);
- __ b(lt, &convert_to_object);
- __ cmp(r3, Operand(LAST_JS_OBJECT_TYPE));
- __ b(le, &shift_arguments);
-
- __ bind(&convert_to_object);
- __ EnterInternalFrame(); // In order to preserve argument count.
- __ mov(r0, Operand(r0, LSL, kSmiTagSize)); // Smi-tagged.
- __ push(r0);
-
- __ push(r2);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS);
- __ mov(r2, r0);
-
- __ pop(r0);
- __ mov(r0, Operand(r0, ASR, kSmiTagSize));
- __ LeaveInternalFrame();
- // Restore the function to r1.
- __ ldr(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2));
- __ jmp(&patch_receiver);
-
- // Use the global receiver object from the called function as the
- // receiver.
- __ bind(&use_global_receiver);
- const int kGlobalIndex =
- Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
- __ ldr(r2, FieldMemOperand(cp, kGlobalIndex));
- __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalContextOffset));
- __ ldr(r2, FieldMemOperand(r2, kGlobalIndex));
- __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalReceiverOffset));
-
- __ bind(&patch_receiver);
- __ add(r3, sp, Operand(r0, LSL, kPointerSizeLog2));
- __ str(r2, MemOperand(r3, -kPointerSize));
-
- __ jmp(&shift_arguments);
- }
-
- // 3b. Patch the first argument when calling a non-function. The
- // CALL_NON_FUNCTION builtin expects the non-function callee as
- // receiver, so overwrite the first argument which will ultimately
- // become the receiver.
- // r0: actual number of arguments
- // r1: function
- __ bind(&non_function);
- __ add(r2, sp, Operand(r0, LSL, kPointerSizeLog2));
- __ str(r1, MemOperand(r2, -kPointerSize));
- // Clear r1 to indicate a non-function being called.
- __ mov(r1, Operand(0, RelocInfo::NONE));
-
- // 4. Shift arguments and return address one slot down on the stack
- // (overwriting the original receiver). Adjust argument count to make
- // the original first argument the new receiver.
- // r0: actual number of arguments
- // r1: function
- __ bind(&shift_arguments);
- { Label loop;
- // Calculate the copy start address (destination). Copy end address is sp.
- __ add(r2, sp, Operand(r0, LSL, kPointerSizeLog2));
-
- __ bind(&loop);
- __ ldr(ip, MemOperand(r2, -kPointerSize));
- __ str(ip, MemOperand(r2));
- __ sub(r2, r2, Operand(kPointerSize));
- __ cmp(r2, sp);
- __ b(ne, &loop);
- // Adjust the actual number of arguments and remove the top element
- // (which is a copy of the last argument).
- __ sub(r0, r0, Operand(1));
- __ pop();
- }
-
- // 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin.
- // r0: actual number of arguments
- // r1: function
- { Label function;
- __ tst(r1, r1);
- __ b(ne, &function);
- // Expected number of arguments is 0 for CALL_NON_FUNCTION.
- __ mov(r2, Operand(0, RelocInfo::NONE));
- __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION);
- __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
- __ bind(&function);
- }
-
- // 5b. Get the code to call from the function and check that the number of
- // expected arguments matches what we're providing. If so, jump
- // (tail-call) to the code in register edx without checking arguments.
- // r0: actual number of arguments
- // r1: function
- __ ldr(r3, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(r2,
- FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset));
- __ mov(r2, Operand(r2, ASR, kSmiTagSize));
- __ ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
- __ cmp(r2, r0); // Check formal and actual parameter counts.
- __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET,
- ne);
-
- ParameterCount expected(0);
- __ InvokeCode(r3, expected, expected, JUMP_FUNCTION);
-}
-
-
-void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
- const int kIndexOffset = -5 * kPointerSize;
- const int kLimitOffset = -4 * kPointerSize;
- const int kArgsOffset = 2 * kPointerSize;
- const int kRecvOffset = 3 * kPointerSize;
- const int kFunctionOffset = 4 * kPointerSize;
-
- __ EnterInternalFrame();
-
- __ ldr(r0, MemOperand(fp, kFunctionOffset)); // get the function
- __ push(r0);
- __ ldr(r0, MemOperand(fp, kArgsOffset)); // get the args array
- __ push(r0);
- __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_JS);
-
- // Check the stack for overflow. We are not trying need to catch
- // interruptions (e.g. debug break and preemption) here, so the "real stack
- // limit" is checked.
- Label okay;
- __ LoadRoot(r2, Heap::kRealStackLimitRootIndex);
- // Make r2 the space we have left. The stack might already be overflowed
- // here which will cause r2 to become negative.
- __ sub(r2, sp, r2);
- // Check if the arguments will overflow the stack.
- __ cmp(r2, Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ b(gt, &okay); // Signed comparison.
-
- // Out of stack space.
- __ ldr(r1, MemOperand(fp, kFunctionOffset));
- __ push(r1);
- __ push(r0);
- __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_JS);
- // End of stack check.
-
- // Push current limit and index.
- __ bind(&okay);
- __ push(r0); // limit
- __ mov(r1, Operand(0, RelocInfo::NONE)); // initial index
- __ push(r1);
-
- // Change context eagerly to get the right global object if necessary.
- __ ldr(r0, MemOperand(fp, kFunctionOffset));
- __ ldr(cp, FieldMemOperand(r0, JSFunction::kContextOffset));
- // Load the shared function info while the function is still in r0.
- __ ldr(r1, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
-
- // Compute the receiver.
- Label call_to_object, use_global_receiver, push_receiver;
- __ ldr(r0, MemOperand(fp, kRecvOffset));
-
- // Do not transform the receiver for strict mode functions.
- __ ldr(r1, FieldMemOperand(r1, SharedFunctionInfo::kCompilerHintsOffset));
- __ tst(r1, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
- kSmiTagSize)));
- __ b(ne, &push_receiver);
-
- // Compute the receiver in non-strict mode.
- __ tst(r0, Operand(kSmiTagMask));
- __ b(eq, &call_to_object);
- __ LoadRoot(r1, Heap::kNullValueRootIndex);
- __ cmp(r0, r1);
- __ b(eq, &use_global_receiver);
- __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
- __ cmp(r0, r1);
- __ b(eq, &use_global_receiver);
-
- // Check if the receiver is already a JavaScript object.
- // r0: receiver
- __ CompareObjectType(r0, r1, r1, FIRST_JS_OBJECT_TYPE);
- __ b(lt, &call_to_object);
- __ cmp(r1, Operand(LAST_JS_OBJECT_TYPE));
- __ b(le, &push_receiver);
-
- // Convert the receiver to a regular object.
- // r0: receiver
- __ bind(&call_to_object);
- __ push(r0);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS);
- __ b(&push_receiver);
-
- // Use the current global receiver object as the receiver.
- __ bind(&use_global_receiver);
- const int kGlobalOffset =
- Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
- __ ldr(r0, FieldMemOperand(cp, kGlobalOffset));
- __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalContextOffset));
- __ ldr(r0, FieldMemOperand(r0, kGlobalOffset));
- __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset));
-
- // Push the receiver.
- // r0: receiver
- __ bind(&push_receiver);
- __ push(r0);
-
- // Copy all arguments from the array to the stack.
- Label entry, loop;
- __ ldr(r0, MemOperand(fp, kIndexOffset));
- __ b(&entry);
-
- // Load the current argument from the arguments array and push it to the
- // stack.
- // r0: current argument index
- __ bind(&loop);
- __ ldr(r1, MemOperand(fp, kArgsOffset));
- __ push(r1);
- __ push(r0);
-
- // Call the runtime to access the property in the arguments array.
- __ CallRuntime(Runtime::kGetProperty, 2);
- __ push(r0);
-
- // Use inline caching to access the arguments.
- __ ldr(r0, MemOperand(fp, kIndexOffset));
- __ add(r0, r0, Operand(1 << kSmiTagSize));
- __ str(r0, MemOperand(fp, kIndexOffset));
-
- // Test if the copy loop has finished copying all the elements from the
- // arguments object.
- __ bind(&entry);
- __ ldr(r1, MemOperand(fp, kLimitOffset));
- __ cmp(r0, r1);
- __ b(ne, &loop);
-
- // Invoke the function.
- ParameterCount actual(r0);
- __ mov(r0, Operand(r0, ASR, kSmiTagSize));
- __ ldr(r1, MemOperand(fp, kFunctionOffset));
- __ InvokeFunction(r1, actual, CALL_FUNCTION);
-
- // Tear down the internal frame and remove function, receiver and args.
- __ LeaveInternalFrame();
- __ add(sp, sp, Operand(3 * kPointerSize));
- __ Jump(lr);
-}
-
-
-static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
- __ mov(r0, Operand(r0, LSL, kSmiTagSize));
- __ mov(r4, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ stm(db_w, sp, r0.bit() | r1.bit() | r4.bit() | fp.bit() | lr.bit());
- __ add(fp, sp, Operand(3 * kPointerSize));
-}
-
-
-static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r0 : result being passed through
- // -----------------------------------
- // Get the number of arguments passed (as a smi), tear down the frame and
- // then tear down the parameters.
- __ ldr(r1, MemOperand(fp, -3 * kPointerSize));
- __ mov(sp, fp);
- __ ldm(ia_w, sp, fp.bit() | lr.bit());
- __ add(sp, sp, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ add(sp, sp, Operand(kPointerSize)); // adjust for receiver
-}
-
-
-void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r0 : actual number of arguments
- // -- r1 : function (passed through to callee)
- // -- r2 : expected number of arguments
- // -- r3 : code entry to call
- // -----------------------------------
-
- Label invoke, dont_adapt_arguments;
-
- Label enough, too_few;
- __ cmp(r0, r2);
- __ b(lt, &too_few);
- __ cmp(r2, Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
- __ b(eq, &dont_adapt_arguments);
-
- { // Enough parameters: actual >= expected
- __ bind(&enough);
- EnterArgumentsAdaptorFrame(masm);
-
- // Calculate copy start address into r0 and copy end address into r2.
- // r0: actual number of arguments as a smi
- // r1: function
- // r2: expected number of arguments
- // r3: code entry to call
- __ add(r0, fp, Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
- // adjust for return address and receiver
- __ add(r0, r0, Operand(2 * kPointerSize));
- __ sub(r2, r0, Operand(r2, LSL, kPointerSizeLog2));
-
- // Copy the arguments (including the receiver) to the new stack frame.
- // r0: copy start address
- // r1: function
- // r2: copy end address
- // r3: code entry to call
-
- Label copy;
- __ bind(&copy);
- __ ldr(ip, MemOperand(r0, 0));
- __ push(ip);
- __ cmp(r0, r2); // Compare before moving to next argument.
- __ sub(r0, r0, Operand(kPointerSize));
- __ b(ne, &copy);
-
- __ b(&invoke);
- }
-
- { // Too few parameters: Actual < expected
- __ bind(&too_few);
- EnterArgumentsAdaptorFrame(masm);
-
- // Calculate copy start address into r0 and copy end address is fp.
- // r0: actual number of arguments as a smi
- // r1: function
- // r2: expected number of arguments
- // r3: code entry to call
- __ add(r0, fp, Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
-
- // Copy the arguments (including the receiver) to the new stack frame.
- // r0: copy start address
- // r1: function
- // r2: expected number of arguments
- // r3: code entry to call
- Label copy;
- __ bind(&copy);
- // Adjust load for return address and receiver.
- __ ldr(ip, MemOperand(r0, 2 * kPointerSize));
- __ push(ip);
- __ cmp(r0, fp); // Compare before moving to next argument.
- __ sub(r0, r0, Operand(kPointerSize));
- __ b(ne, &copy);
-
- // Fill the remaining expected arguments with undefined.
- // r1: function
- // r2: expected number of arguments
- // r3: code entry to call
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ sub(r2, fp, Operand(r2, LSL, kPointerSizeLog2));
- __ sub(r2, r2, Operand(4 * kPointerSize)); // Adjust for frame.
-
- Label fill;
- __ bind(&fill);
- __ push(ip);
- __ cmp(sp, r2);
- __ b(ne, &fill);
- }
-
- // Call the entry point.
- __ bind(&invoke);
- __ Call(r3);
-
- // Exit frame and return.
- LeaveArgumentsAdaptorFrame(masm);
- __ Jump(lr);
-
-
- // -------------------------------------------
- // Dont adapt arguments.
- // -------------------------------------------
- __ bind(&dont_adapt_arguments);
- __ Jump(r3);
-}
-
-
-#undef __
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_ARM
diff --git a/src/3rdparty/v8/src/arm/code-stubs-arm.cc b/src/3rdparty/v8/src/arm/code-stubs-arm.cc
deleted file mode 100644
index 328b519..0000000
--- a/src/3rdparty/v8/src/arm/code-stubs-arm.cc
+++ /dev/null
@@ -1,6917 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_ARM)
-
-#include "bootstrapper.h"
-#include "code-stubs.h"
-#include "regexp-macro-assembler.h"
-
-namespace v8 {
-namespace internal {
-
-
-#define __ ACCESS_MASM(masm)
-
-static void EmitIdenticalObjectComparison(MacroAssembler* masm,
- Label* slow,
- Condition cond,
- bool never_nan_nan);
-static void EmitSmiNonsmiComparison(MacroAssembler* masm,
- Register lhs,
- Register rhs,
- Label* lhs_not_nan,
- Label* slow,
- bool strict);
-static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cond);
-static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
- Register lhs,
- Register rhs);
-
-
-void ToNumberStub::Generate(MacroAssembler* masm) {
- // The ToNumber stub takes one argument in eax.
- Label check_heap_number, call_builtin;
- __ tst(r0, Operand(kSmiTagMask));
- __ b(ne, &check_heap_number);
- __ Ret();
-
- __ bind(&check_heap_number);
- __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
- __ cmp(r1, ip);
- __ b(ne, &call_builtin);
- __ Ret();
-
- __ bind(&call_builtin);
- __ push(r0);
- __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_JS);
-}
-
-
-void FastNewClosureStub::Generate(MacroAssembler* masm) {
- // Create a new closure from the given function info in new
- // space. Set the context to the current context in cp.
- Label gc;
-
- // Pop the function info from the stack.
- __ pop(r3);
-
- // Attempt to allocate new JSFunction in new space.
- __ AllocateInNewSpace(JSFunction::kSize,
- r0,
- r1,
- r2,
- &gc,
- TAG_OBJECT);
-
- int map_index = strict_mode_ == kStrictMode
- ? Context::STRICT_MODE_FUNCTION_MAP_INDEX
- : Context::FUNCTION_MAP_INDEX;
-
- // Compute the function map in the current global context and set that
- // as the map of the allocated object.
- __ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalContextOffset));
- __ ldr(r2, MemOperand(r2, Context::SlotOffset(map_index)));
- __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
-
- // Initialize the rest of the function. We don't have to update the
- // write barrier because the allocated object is in new space.
- __ LoadRoot(r1, Heap::kEmptyFixedArrayRootIndex);
- __ LoadRoot(r2, Heap::kTheHoleValueRootIndex);
- __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
- __ str(r1, FieldMemOperand(r0, JSObject::kPropertiesOffset));
- __ str(r1, FieldMemOperand(r0, JSObject::kElementsOffset));
- __ str(r2, FieldMemOperand(r0, JSFunction::kPrototypeOrInitialMapOffset));
- __ str(r3, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
- __ str(cp, FieldMemOperand(r0, JSFunction::kContextOffset));
- __ str(r1, FieldMemOperand(r0, JSFunction::kLiteralsOffset));
- __ str(r4, FieldMemOperand(r0, JSFunction::kNextFunctionLinkOffset));
-
-
- // Initialize the code pointer in the function to be the one
- // found in the shared function info object.
- __ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kCodeOffset));
- __ add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ str(r3, FieldMemOperand(r0, JSFunction::kCodeEntryOffset));
-
- // Return result. The argument function info has been popped already.
- __ Ret();
-
- // Create a new closure through the slower runtime call.
- __ bind(&gc);
- __ LoadRoot(r4, Heap::kFalseValueRootIndex);
- __ Push(cp, r3, r4);
- __ TailCallRuntime(Runtime::kNewClosure, 3, 1);
-}
-
-
-void FastNewContextStub::Generate(MacroAssembler* masm) {
- // Try to allocate the context in new space.
- Label gc;
- int length = slots_ + Context::MIN_CONTEXT_SLOTS;
-
- // Attempt to allocate the context in new space.
- __ AllocateInNewSpace(FixedArray::SizeFor(length),
- r0,
- r1,
- r2,
- &gc,
- TAG_OBJECT);
-
- // Load the function from the stack.
- __ ldr(r3, MemOperand(sp, 0));
-
- // Setup the object header.
- __ LoadRoot(r2, Heap::kContextMapRootIndex);
- __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ mov(r2, Operand(Smi::FromInt(length)));
- __ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset));
-
- // Setup the fixed slots.
- __ mov(r1, Operand(Smi::FromInt(0)));
- __ str(r3, MemOperand(r0, Context::SlotOffset(Context::CLOSURE_INDEX)));
- __ str(r0, MemOperand(r0, Context::SlotOffset(Context::FCONTEXT_INDEX)));
- __ str(r1, MemOperand(r0, Context::SlotOffset(Context::PREVIOUS_INDEX)));
- __ str(r1, MemOperand(r0, Context::SlotOffset(Context::EXTENSION_INDEX)));
-
- // Copy the global object from the surrounding context.
- __ ldr(r1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ str(r1, MemOperand(r0, Context::SlotOffset(Context::GLOBAL_INDEX)));
-
- // Initialize the rest of the slots to undefined.
- __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
- for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
- __ str(r1, MemOperand(r0, Context::SlotOffset(i)));
- }
-
- // Remove the on-stack argument and return.
- __ mov(cp, r0);
- __ pop();
- __ Ret();
-
- // Need to collect. Call into runtime system.
- __ bind(&gc);
- __ TailCallRuntime(Runtime::kNewContext, 1, 1);
-}
-
-
-void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
- // Stack layout on entry:
- //
- // [sp]: constant elements.
- // [sp + kPointerSize]: literal index.
- // [sp + (2 * kPointerSize)]: literals array.
-
- // All sizes here are multiples of kPointerSize.
- int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0;
- int size = JSArray::kSize + elements_size;
-
- // Load boilerplate object into r3 and check if we need to create a
- // boilerplate.
- Label slow_case;
- __ ldr(r3, MemOperand(sp, 2 * kPointerSize));
- __ ldr(r0, MemOperand(sp, 1 * kPointerSize));
- __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ ldr(r3, MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(r3, ip);
- __ b(eq, &slow_case);
-
- if (FLAG_debug_code) {
- const char* message;
- Heap::RootListIndex expected_map_index;
- if (mode_ == CLONE_ELEMENTS) {
- message = "Expected (writable) fixed array";
- expected_map_index = Heap::kFixedArrayMapRootIndex;
- } else {
- ASSERT(mode_ == COPY_ON_WRITE_ELEMENTS);
- message = "Expected copy-on-write fixed array";
- expected_map_index = Heap::kFixedCOWArrayMapRootIndex;
- }
- __ push(r3);
- __ ldr(r3, FieldMemOperand(r3, JSArray::kElementsOffset));
- __ ldr(r3, FieldMemOperand(r3, HeapObject::kMapOffset));
- __ LoadRoot(ip, expected_map_index);
- __ cmp(r3, ip);
- __ Assert(eq, message);
- __ pop(r3);
- }
-
- // Allocate both the JS array and the elements array in one big
- // allocation. This avoids multiple limit checks.
- __ AllocateInNewSpace(size,
- r0,
- r1,
- r2,
- &slow_case,
- TAG_OBJECT);
-
- // Copy the JS array part.
- for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
- if ((i != JSArray::kElementsOffset) || (length_ == 0)) {
- __ ldr(r1, FieldMemOperand(r3, i));
- __ str(r1, FieldMemOperand(r0, i));
- }
- }
-
- if (length_ > 0) {
- // Get hold of the elements array of the boilerplate and setup the
- // elements pointer in the resulting object.
- __ ldr(r3, FieldMemOperand(r3, JSArray::kElementsOffset));
- __ add(r2, r0, Operand(JSArray::kSize));
- __ str(r2, FieldMemOperand(r0, JSArray::kElementsOffset));
-
- // Copy the elements array.
- __ CopyFields(r2, r3, r1.bit(), elements_size / kPointerSize);
- }
-
- // Return and remove the on-stack parameters.
- __ add(sp, sp, Operand(3 * kPointerSize));
- __ Ret();
-
- __ bind(&slow_case);
- __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
-}
-
-
-// Takes a Smi and converts to an IEEE 64 bit floating point value in two
-// registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and
-// 52 fraction bits (20 in the first word, 32 in the second). Zeros is a
-// scratch register. Destroys the source register. No GC occurs during this
-// stub so you don't have to set up the frame.
-class ConvertToDoubleStub : public CodeStub {
- public:
- ConvertToDoubleStub(Register result_reg_1,
- Register result_reg_2,
- Register source_reg,
- Register scratch_reg)
- : result1_(result_reg_1),
- result2_(result_reg_2),
- source_(source_reg),
- zeros_(scratch_reg) { }
-
- private:
- Register result1_;
- Register result2_;
- Register source_;
- Register zeros_;
-
- // Minor key encoding in 16 bits.
- class ModeBits: public BitField<OverwriteMode, 0, 2> {};
- class OpBits: public BitField<Token::Value, 2, 14> {};
-
- Major MajorKey() { return ConvertToDouble; }
- int MinorKey() {
- // Encode the parameters in a unique 16 bit value.
- return result1_.code() +
- (result2_.code() << 4) +
- (source_.code() << 8) +
- (zeros_.code() << 12);
- }
-
- void Generate(MacroAssembler* masm);
-
- const char* GetName() { return "ConvertToDoubleStub"; }
-
-#ifdef DEBUG
- void Print() { PrintF("ConvertToDoubleStub\n"); }
-#endif
-};
-
-
-void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
-#ifndef BIG_ENDIAN_FLOATING_POINT
- Register exponent = result1_;
- Register mantissa = result2_;
-#else
- Register exponent = result2_;
- Register mantissa = result1_;
-#endif
- Label not_special;
- // Convert from Smi to integer.
- __ mov(source_, Operand(source_, ASR, kSmiTagSize));
- // Move sign bit from source to destination. This works because the sign bit
- // in the exponent word of the double has the same position and polarity as
- // the 2's complement sign bit in a Smi.
- STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
- __ and_(exponent, source_, Operand(HeapNumber::kSignMask), SetCC);
- // Subtract from 0 if source was negative.
- __ rsb(source_, source_, Operand(0, RelocInfo::NONE), LeaveCC, ne);
-
- // We have -1, 0 or 1, which we treat specially. Register source_ contains
- // absolute value: it is either equal to 1 (special case of -1 and 1),
- // greater than 1 (not a special case) or less than 1 (special case of 0).
- __ cmp(source_, Operand(1));
- __ b(gt, &not_special);
-
- // For 1 or -1 we need to or in the 0 exponent (biased to 1023).
- static const uint32_t exponent_word_for_1 =
- HeapNumber::kExponentBias << HeapNumber::kExponentShift;
- __ orr(exponent, exponent, Operand(exponent_word_for_1), LeaveCC, eq);
- // 1, 0 and -1 all have 0 for the second word.
- __ mov(mantissa, Operand(0, RelocInfo::NONE));
- __ Ret();
-
- __ bind(&not_special);
- // Count leading zeros. Uses mantissa for a scratch register on pre-ARM5.
- // Gets the wrong answer for 0, but we already checked for that case above.
- __ CountLeadingZeros(zeros_, source_, mantissa);
- // Compute exponent and or it into the exponent register.
- // We use mantissa as a scratch register here. Use a fudge factor to
- // divide the constant 31 + HeapNumber::kExponentBias, 0x41d, into two parts
- // that fit in the ARM's constant field.
- int fudge = 0x400;
- __ rsb(mantissa, zeros_, Operand(31 + HeapNumber::kExponentBias - fudge));
- __ add(mantissa, mantissa, Operand(fudge));
- __ orr(exponent,
- exponent,
- Operand(mantissa, LSL, HeapNumber::kExponentShift));
- // Shift up the source chopping the top bit off.
- __ add(zeros_, zeros_, Operand(1));
- // This wouldn't work for 1.0 or -1.0 as the shift would be 32 which means 0.
- __ mov(source_, Operand(source_, LSL, zeros_));
- // Compute lower part of fraction (last 12 bits).
- __ mov(mantissa, Operand(source_, LSL, HeapNumber::kMantissaBitsInTopWord));
- // And the top (top 20 bits).
- __ orr(exponent,
- exponent,
- Operand(source_, LSR, 32 - HeapNumber::kMantissaBitsInTopWord));
- __ Ret();
-}
-
-
-class FloatingPointHelper : public AllStatic {
- public:
-
- enum Destination {
- kVFPRegisters,
- kCoreRegisters
- };
-
-
- // Loads smis from r0 and r1 (right and left in binary operations) into
- // floating point registers. Depending on the destination the values ends up
- // either d7 and d6 or in r2/r3 and r0/r1 respectively. If the destination is
- // floating point registers VFP3 must be supported. If core registers are
- // requested when VFP3 is supported d6 and d7 will be scratched.
- static void LoadSmis(MacroAssembler* masm,
- Destination destination,
- Register scratch1,
- Register scratch2);
-
- // Loads objects from r0 and r1 (right and left in binary operations) into
- // floating point registers. Depending on the destination the values ends up
- // either d7 and d6 or in r2/r3 and r0/r1 respectively. If the destination is
- // floating point registers VFP3 must be supported. If core registers are
- // requested when VFP3 is supported d6 and d7 will still be scratched. If
- // either r0 or r1 is not a number (not smi and not heap number object) the
- // not_number label is jumped to with r0 and r1 intact.
- static void LoadOperands(MacroAssembler* masm,
- FloatingPointHelper::Destination destination,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* not_number);
-
- // Convert the smi or heap number in object to an int32 using the rules
- // for ToInt32 as described in ECMAScript 9.5.: the value is truncated
- // and brought into the range -2^31 .. +2^31 - 1.
- static void ConvertNumberToInt32(MacroAssembler* masm,
- Register object,
- Register dst,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- DwVfpRegister double_scratch,
- Label* not_int32);
-
- // Load the number from object into double_dst in the double format.
- // Control will jump to not_int32 if the value cannot be exactly represented
- // by a 32-bit integer.
- // Floating point value in the 32-bit integer range that are not exact integer
- // won't be loaded.
- static void LoadNumberAsInt32Double(MacroAssembler* masm,
- Register object,
- Destination destination,
- DwVfpRegister double_dst,
- Register dst1,
- Register dst2,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- SwVfpRegister single_scratch,
- Label* not_int32);
-
- // Loads the number from object into dst as a 32-bit integer.
- // Control will jump to not_int32 if the object cannot be exactly represented
- // by a 32-bit integer.
- // Floating point value in the 32-bit integer range that are not exact integer
- // won't be converted.
- // scratch3 is not used when VFP3 is supported.
- static void LoadNumberAsInt32(MacroAssembler* masm,
- Register object,
- Register dst,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- DwVfpRegister double_scratch,
- Label* not_int32);
-
- // Generate non VFP3 code to check if a double can be exactly represented by a
- // 32-bit integer. This does not check for 0 or -0, which need
- // to be checked for separately.
- // Control jumps to not_int32 if the value is not a 32-bit integer, and falls
- // through otherwise.
- // src1 and src2 will be cloberred.
- //
- // Expected input:
- // - src1: higher (exponent) part of the double value.
- // - src2: lower (mantissa) part of the double value.
- // Output status:
- // - dst: 32 higher bits of the mantissa. (mantissa[51:20])
- // - src2: contains 1.
- // - other registers are clobbered.
- static void DoubleIs32BitInteger(MacroAssembler* masm,
- Register src1,
- Register src2,
- Register dst,
- Register scratch,
- Label* not_int32);
-
- // Generates code to call a C function to do a double operation using core
- // registers. (Used when VFP3 is not supported.)
- // This code never falls through, but returns with a heap number containing
- // the result in r0.
- // Register heapnumber_result must be a heap number in which the
- // result of the operation will be stored.
- // Requires the following layout on entry:
- // r0: Left value (least significant part of mantissa).
- // r1: Left value (sign, exponent, top of mantissa).
- // r2: Right value (least significant part of mantissa).
- // r3: Right value (sign, exponent, top of mantissa).
- static void CallCCodeForDoubleOperation(MacroAssembler* masm,
- Token::Value op,
- Register heap_number_result,
- Register scratch);
-
- private:
- static void LoadNumber(MacroAssembler* masm,
- FloatingPointHelper::Destination destination,
- Register object,
- DwVfpRegister dst,
- Register dst1,
- Register dst2,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* not_number);
-};
-
-
-void FloatingPointHelper::LoadSmis(MacroAssembler* masm,
- FloatingPointHelper::Destination destination,
- Register scratch1,
- Register scratch2) {
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
- __ mov(scratch1, Operand(r0, ASR, kSmiTagSize));
- __ vmov(d7.high(), scratch1);
- __ vcvt_f64_s32(d7, d7.high());
- __ mov(scratch1, Operand(r1, ASR, kSmiTagSize));
- __ vmov(d6.high(), scratch1);
- __ vcvt_f64_s32(d6, d6.high());
- if (destination == kCoreRegisters) {
- __ vmov(r2, r3, d7);
- __ vmov(r0, r1, d6);
- }
- } else {
- ASSERT(destination == kCoreRegisters);
- // Write Smi from r0 to r3 and r2 in double format.
- __ mov(scratch1, Operand(r0));
- ConvertToDoubleStub stub1(r3, r2, scratch1, scratch2);
- __ push(lr);
- __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
- // Write Smi from r1 to r1 and r0 in double format. r9 is scratch.
- __ mov(scratch1, Operand(r1));
- ConvertToDoubleStub stub2(r1, r0, scratch1, scratch2);
- __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
- __ pop(lr);
- }
-}
-
-
-void FloatingPointHelper::LoadOperands(
- MacroAssembler* masm,
- FloatingPointHelper::Destination destination,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* slow) {
-
- // Load right operand (r0) to d6 or r2/r3.
- LoadNumber(masm, destination,
- r0, d7, r2, r3, heap_number_map, scratch1, scratch2, slow);
-
- // Load left operand (r1) to d7 or r0/r1.
- LoadNumber(masm, destination,
- r1, d6, r0, r1, heap_number_map, scratch1, scratch2, slow);
-}
-
-
-void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
- Destination destination,
- Register object,
- DwVfpRegister dst,
- Register dst1,
- Register dst2,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* not_number) {
- if (FLAG_debug_code) {
- __ AbortIfNotRootValue(heap_number_map,
- Heap::kHeapNumberMapRootIndex,
- "HeapNumberMap register clobbered.");
- }
-
- Label is_smi, done;
-
- __ JumpIfSmi(object, &is_smi);
- __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
-
- // Handle loading a double from a heap number.
- if (CpuFeatures::IsSupported(VFP3) &&
- destination == kVFPRegisters) {
- CpuFeatures::Scope scope(VFP3);
- // Load the double from tagged HeapNumber to double register.
- __ sub(scratch1, object, Operand(kHeapObjectTag));
- __ vldr(dst, scratch1, HeapNumber::kValueOffset);
- } else {
- ASSERT(destination == kCoreRegisters);
- // Load the double from heap number to dst1 and dst2 in double format.
- __ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset));
- }
- __ jmp(&done);
-
- // Handle loading a double from a smi.
- __ bind(&is_smi);
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
- // Convert smi to double using VFP instructions.
- __ SmiUntag(scratch1, object);
- __ vmov(dst.high(), scratch1);
- __ vcvt_f64_s32(dst, dst.high());
- if (destination == kCoreRegisters) {
- // Load the converted smi to dst1 and dst2 in double format.
- __ vmov(dst1, dst2, dst);
- }
- } else {
- ASSERT(destination == kCoreRegisters);
- // Write smi to dst1 and dst2 double format.
- __ mov(scratch1, Operand(object));
- ConvertToDoubleStub stub(dst2, dst1, scratch1, scratch2);
- __ push(lr);
- __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
- __ pop(lr);
- }
-
- __ bind(&done);
-}
-
-
-void FloatingPointHelper::ConvertNumberToInt32(MacroAssembler* masm,
- Register object,
- Register dst,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- DwVfpRegister double_scratch,
- Label* not_number) {
- if (FLAG_debug_code) {
- __ AbortIfNotRootValue(heap_number_map,
- Heap::kHeapNumberMapRootIndex,
- "HeapNumberMap register clobbered.");
- }
- Label is_smi;
- Label done;
- Label not_in_int32_range;
-
- __ JumpIfSmi(object, &is_smi);
- __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kMapOffset));
- __ cmp(scratch1, heap_number_map);
- __ b(ne, not_number);
- __ ConvertToInt32(object,
- dst,
- scratch1,
- scratch2,
- double_scratch,
- &not_in_int32_range);
- __ jmp(&done);
-
- __ bind(&not_in_int32_range);
- __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
- __ ldr(scratch2, FieldMemOperand(object, HeapNumber::kMantissaOffset));
-
- __ EmitOutOfInt32RangeTruncate(dst,
- scratch1,
- scratch2,
- scratch3);
- __ jmp(&done);
-
- __ bind(&is_smi);
- __ SmiUntag(dst, object);
- __ bind(&done);
-}
-
-
-void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
- Register object,
- Destination destination,
- DwVfpRegister double_dst,
- Register dst1,
- Register dst2,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- SwVfpRegister single_scratch,
- Label* not_int32) {
- ASSERT(!scratch1.is(object) && !scratch2.is(object));
- ASSERT(!scratch1.is(scratch2));
- ASSERT(!heap_number_map.is(object) &&
- !heap_number_map.is(scratch1) &&
- !heap_number_map.is(scratch2));
-
- Label done, obj_is_not_smi;
-
- __ JumpIfNotSmi(object, &obj_is_not_smi);
- __ SmiUntag(scratch1, object);
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
- __ vmov(single_scratch, scratch1);
- __ vcvt_f64_s32(double_dst, single_scratch);
- if (destination == kCoreRegisters) {
- __ vmov(dst1, dst2, double_dst);
- }
- } else {
- Label fewer_than_20_useful_bits;
- // Expected output:
- // | dst1 | dst2 |
- // | s | exp | mantissa |
-
- // Check for zero.
- __ cmp(scratch1, Operand(0));
- __ mov(dst1, scratch1);
- __ mov(dst2, scratch1);
- __ b(eq, &done);
-
- // Preload the sign of the value.
- __ and_(dst1, scratch1, Operand(HeapNumber::kSignMask), SetCC);
- // Get the absolute value of the object (as an unsigned integer).
- __ rsb(scratch1, scratch1, Operand(0), SetCC, mi);
-
- // Get mantisssa[51:20].
-
- // Get the position of the first set bit.
- __ CountLeadingZeros(dst2, scratch1, scratch2);
- __ rsb(dst2, dst2, Operand(31));
-
- // Set the exponent.
- __ add(scratch2, dst2, Operand(HeapNumber::kExponentBias));
- __ Bfi(dst1, scratch2, scratch2,
- HeapNumber::kExponentShift, HeapNumber::kExponentBits);
-
- // Clear the first non null bit.
- __ mov(scratch2, Operand(1));
- __ bic(scratch1, scratch1, Operand(scratch2, LSL, dst2));
-
- __ cmp(dst2, Operand(HeapNumber::kMantissaBitsInTopWord));
- // Get the number of bits to set in the lower part of the mantissa.
- __ sub(scratch2, dst2, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC);
- __ b(mi, &fewer_than_20_useful_bits);
- // Set the higher 20 bits of the mantissa.
- __ orr(dst1, dst1, Operand(scratch1, LSR, scratch2));
- __ rsb(scratch2, scratch2, Operand(32));
- __ mov(dst2, Operand(scratch1, LSL, scratch2));
- __ b(&done);
-
- __ bind(&fewer_than_20_useful_bits);
- __ rsb(scratch2, dst2, Operand(HeapNumber::kMantissaBitsInTopWord));
- __ mov(scratch2, Operand(scratch1, LSL, scratch2));
- __ orr(dst1, dst1, scratch2);
- // Set dst2 to 0.
- __ mov(dst2, Operand(0));
- }
-
- __ b(&done);
-
- __ bind(&obj_is_not_smi);
- if (FLAG_debug_code) {
- __ AbortIfNotRootValue(heap_number_map,
- Heap::kHeapNumberMapRootIndex,
- "HeapNumberMap register clobbered.");
- }
- __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
-
- // Load the number.
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
- // Load the double value.
- __ sub(scratch1, object, Operand(kHeapObjectTag));
- __ vldr(double_dst, scratch1, HeapNumber::kValueOffset);
-
- __ EmitVFPTruncate(kRoundToZero,
- single_scratch,
- double_dst,
- scratch1,
- scratch2,
- kCheckForInexactConversion);
-
- // Jump to not_int32 if the operation did not succeed.
- __ b(ne, not_int32);
-
- if (destination == kCoreRegisters) {
- __ vmov(dst1, dst2, double_dst);
- }
-
- } else {
- ASSERT(!scratch1.is(object) && !scratch2.is(object));
- // Load the double value in the destination registers..
- __ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset));
-
- // Check for 0 and -0.
- __ bic(scratch1, dst1, Operand(HeapNumber::kSignMask));
- __ orr(scratch1, scratch1, Operand(dst2));
- __ cmp(scratch1, Operand(0));
- __ b(eq, &done);
-
- // Check that the value can be exactly represented by a 32-bit integer.
- // Jump to not_int32 if that's not the case.
- DoubleIs32BitInteger(masm, dst1, dst2, scratch1, scratch2, not_int32);
-
- // dst1 and dst2 were trashed. Reload the double value.
- __ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset));
- }
-
- __ bind(&done);
-}
-
-
-void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
- Register object,
- Register dst,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- DwVfpRegister double_scratch,
- Label* not_int32) {
- ASSERT(!dst.is(object));
- ASSERT(!scratch1.is(object) && !scratch2.is(object) && !scratch3.is(object));
- ASSERT(!scratch1.is(scratch2) &&
- !scratch1.is(scratch3) &&
- !scratch2.is(scratch3));
-
- Label done;
-
- // Untag the object into the destination register.
- __ SmiUntag(dst, object);
- // Just return if the object is a smi.
- __ JumpIfSmi(object, &done);
-
- if (FLAG_debug_code) {
- __ AbortIfNotRootValue(heap_number_map,
- Heap::kHeapNumberMapRootIndex,
- "HeapNumberMap register clobbered.");
- }
- __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
-
- // Object is a heap number.
- // Convert the floating point value to a 32-bit integer.
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
- SwVfpRegister single_scratch = double_scratch.low();
- // Load the double value.
- __ sub(scratch1, object, Operand(kHeapObjectTag));
- __ vldr(double_scratch, scratch1, HeapNumber::kValueOffset);
-
- __ EmitVFPTruncate(kRoundToZero,
- single_scratch,
- double_scratch,
- scratch1,
- scratch2,
- kCheckForInexactConversion);
-
- // Jump to not_int32 if the operation did not succeed.
- __ b(ne, not_int32);
- // Get the result in the destination register.
- __ vmov(dst, single_scratch);
-
- } else {
- // Load the double value in the destination registers.
- __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
- __ ldr(scratch2, FieldMemOperand(object, HeapNumber::kMantissaOffset));
-
- // Check for 0 and -0.
- __ bic(dst, scratch1, Operand(HeapNumber::kSignMask));
- __ orr(dst, scratch2, Operand(dst));
- __ cmp(dst, Operand(0));
- __ b(eq, &done);
-
- DoubleIs32BitInteger(masm, scratch1, scratch2, dst, scratch3, not_int32);
-
- // Registers state after DoubleIs32BitInteger.
- // dst: mantissa[51:20].
- // scratch2: 1
-
- // Shift back the higher bits of the mantissa.
- __ mov(dst, Operand(dst, LSR, scratch3));
- // Set the implicit first bit.
- __ rsb(scratch3, scratch3, Operand(32));
- __ orr(dst, dst, Operand(scratch2, LSL, scratch3));
- // Set the sign.
- __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
- __ tst(scratch1, Operand(HeapNumber::kSignMask));
- __ rsb(dst, dst, Operand(0), LeaveCC, mi);
- }
-
- __ bind(&done);
-}
-
-
-void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm,
- Register src1,
- Register src2,
- Register dst,
- Register scratch,
- Label* not_int32) {
- // Get exponent alone in scratch.
- __ Ubfx(scratch,
- src1,
- HeapNumber::kExponentShift,
- HeapNumber::kExponentBits);
-
- // Substract the bias from the exponent.
- __ sub(scratch, scratch, Operand(HeapNumber::kExponentBias), SetCC);
-
- // src1: higher (exponent) part of the double value.
- // src2: lower (mantissa) part of the double value.
- // scratch: unbiased exponent.
-
- // Fast cases. Check for obvious non 32-bit integer values.
- // Negative exponent cannot yield 32-bit integers.
- __ b(mi, not_int32);
- // Exponent greater than 31 cannot yield 32-bit integers.
- // Also, a positive value with an exponent equal to 31 is outside of the
- // signed 32-bit integer range.
- // Another way to put it is that if (exponent - signbit) > 30 then the
- // number cannot be represented as an int32.
- Register tmp = dst;
- __ sub(tmp, scratch, Operand(src1, LSR, 31));
- __ cmp(tmp, Operand(30));
- __ b(gt, not_int32);
- // - Bits [21:0] in the mantissa are not null.
- __ tst(src2, Operand(0x3fffff));
- __ b(ne, not_int32);
-
- // Otherwise the exponent needs to be big enough to shift left all the
- // non zero bits left. So we need the (30 - exponent) last bits of the
- // 31 higher bits of the mantissa to be null.
- // Because bits [21:0] are null, we can check instead that the
- // (32 - exponent) last bits of the 32 higher bits of the mantisssa are null.
-
- // Get the 32 higher bits of the mantissa in dst.
- __ Ubfx(dst,
- src2,
- HeapNumber::kMantissaBitsInTopWord,
- 32 - HeapNumber::kMantissaBitsInTopWord);
- __ orr(dst,
- dst,
- Operand(src1, LSL, HeapNumber::kNonMantissaBitsInTopWord));
-
- // Create the mask and test the lower bits (of the higher bits).
- __ rsb(scratch, scratch, Operand(32));
- __ mov(src2, Operand(1));
- __ mov(src1, Operand(src2, LSL, scratch));
- __ sub(src1, src1, Operand(1));
- __ tst(dst, src1);
- __ b(ne, not_int32);
-}
-
-
-void FloatingPointHelper::CallCCodeForDoubleOperation(
- MacroAssembler* masm,
- Token::Value op,
- Register heap_number_result,
- Register scratch) {
- // Using core registers:
- // r0: Left value (least significant part of mantissa).
- // r1: Left value (sign, exponent, top of mantissa).
- // r2: Right value (least significant part of mantissa).
- // r3: Right value (sign, exponent, top of mantissa).
-
- // Assert that heap_number_result is callee-saved.
- // We currently always use r5 to pass it.
- ASSERT(heap_number_result.is(r5));
-
- // Push the current return address before the C call. Return will be
- // through pop(pc) below.
- __ push(lr);
- __ PrepareCallCFunction(4, scratch); // Two doubles are 4 arguments.
- // Call C routine that may not cause GC or other trouble.
- __ CallCFunction(ExternalReference::double_fp_operation(op, masm->isolate()),
- 4);
- // Store answer in the overwritable heap number.
-#if !defined(USE_ARM_EABI)
- // Double returned in fp coprocessor register 0 and 1, encoded as
- // register cr8. Offsets must be divisible by 4 for coprocessor so we
- // need to substract the tag from heap_number_result.
- __ sub(scratch, heap_number_result, Operand(kHeapObjectTag));
- __ stc(p1, cr8, MemOperand(scratch, HeapNumber::kValueOffset));
-#else
- // Double returned in registers 0 and 1.
- __ Strd(r0, r1, FieldMemOperand(heap_number_result,
- HeapNumber::kValueOffset));
-#endif
- // Place heap_number_result in r0 and return to the pushed return address.
- __ mov(r0, Operand(heap_number_result));
- __ pop(pc);
-}
-
-
-// See comment for class.
-void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
- Label max_negative_int;
- // the_int_ has the answer which is a signed int32 but not a Smi.
- // We test for the special value that has a different exponent. This test
- // has the neat side effect of setting the flags according to the sign.
- STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
- __ cmp(the_int_, Operand(0x80000000u));
- __ b(eq, &max_negative_int);
- // Set up the correct exponent in scratch_. All non-Smi int32s have the same.
- // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).
- uint32_t non_smi_exponent =
- (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
- __ mov(scratch_, Operand(non_smi_exponent));
- // Set the sign bit in scratch_ if the value was negative.
- __ orr(scratch_, scratch_, Operand(HeapNumber::kSignMask), LeaveCC, cs);
- // Subtract from 0 if the value was negative.
- __ rsb(the_int_, the_int_, Operand(0, RelocInfo::NONE), LeaveCC, cs);
- // We should be masking the implict first digit of the mantissa away here,
- // but it just ends up combining harmlessly with the last digit of the
- // exponent that happens to be 1. The sign bit is 0 so we shift 10 to get
- // the most significant 1 to hit the last bit of the 12 bit sign and exponent.
- ASSERT(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0);
- const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
- __ orr(scratch_, scratch_, Operand(the_int_, LSR, shift_distance));
- __ str(scratch_, FieldMemOperand(the_heap_number_,
- HeapNumber::kExponentOffset));
- __ mov(scratch_, Operand(the_int_, LSL, 32 - shift_distance));
- __ str(scratch_, FieldMemOperand(the_heap_number_,
- HeapNumber::kMantissaOffset));
- __ Ret();
-
- __ bind(&max_negative_int);
- // The max negative int32 is stored as a positive number in the mantissa of
- // a double because it uses a sign bit instead of using two's complement.
- // The actual mantissa bits stored are all 0 because the implicit most
- // significant 1 bit is not stored.
- non_smi_exponent += 1 << HeapNumber::kExponentShift;
- __ mov(ip, Operand(HeapNumber::kSignMask | non_smi_exponent));
- __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset));
- __ mov(ip, Operand(0, RelocInfo::NONE));
- __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset));
- __ Ret();
-}
-
-
-// Handle the case where the lhs and rhs are the same object.
-// Equality is almost reflexive (everything but NaN), so this is a test
-// for "identity and not NaN".
-static void EmitIdenticalObjectComparison(MacroAssembler* masm,
- Label* slow,
- Condition cond,
- bool never_nan_nan) {
- Label not_identical;
- Label heap_number, return_equal;
- __ cmp(r0, r1);
- __ b(ne, &not_identical);
-
- // The two objects are identical. If we know that one of them isn't NaN then
- // we now know they test equal.
- if (cond != eq || !never_nan_nan) {
- // Test for NaN. Sadly, we can't just compare to FACTORY->nan_value(),
- // so we do the second best thing - test it ourselves.
- // They are both equal and they are not both Smis so both of them are not
- // Smis. If it's not a heap number, then return equal.
- if (cond == lt || cond == gt) {
- __ CompareObjectType(r0, r4, r4, FIRST_JS_OBJECT_TYPE);
- __ b(ge, slow);
- } else {
- __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
- __ b(eq, &heap_number);
- // Comparing JS objects with <=, >= is complicated.
- if (cond != eq) {
- __ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE));
- __ b(ge, slow);
- // Normally here we fall through to return_equal, but undefined is
- // special: (undefined == undefined) == true, but
- // (undefined <= undefined) == false! See ECMAScript 11.8.5.
- if (cond == le || cond == ge) {
- __ cmp(r4, Operand(ODDBALL_TYPE));
- __ b(ne, &return_equal);
- __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
- __ cmp(r0, r2);
- __ b(ne, &return_equal);
- if (cond == le) {
- // undefined <= undefined should fail.
- __ mov(r0, Operand(GREATER));
- } else {
- // undefined >= undefined should fail.
- __ mov(r0, Operand(LESS));
- }
- __ Ret();
- }
- }
- }
- }
-
- __ bind(&return_equal);
- if (cond == lt) {
- __ mov(r0, Operand(GREATER)); // Things aren't less than themselves.
- } else if (cond == gt) {
- __ mov(r0, Operand(LESS)); // Things aren't greater than themselves.
- } else {
- __ mov(r0, Operand(EQUAL)); // Things are <=, >=, ==, === themselves.
- }
- __ Ret();
-
- if (cond != eq || !never_nan_nan) {
- // For less and greater we don't have to check for NaN since the result of
- // x < x is false regardless. For the others here is some code to check
- // for NaN.
- if (cond != lt && cond != gt) {
- __ bind(&heap_number);
- // It is a heap number, so return non-equal if it's NaN and equal if it's
- // not NaN.
-
- // The representation of NaN values has all exponent bits (52..62) set,
- // and not all mantissa bits (0..51) clear.
- // Read top bits of double representation (second word of value).
- __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
- // Test that exponent bits are all set.
- __ Sbfx(r3, r2, HeapNumber::kExponentShift, HeapNumber::kExponentBits);
- // NaNs have all-one exponents so they sign extend to -1.
- __ cmp(r3, Operand(-1));
- __ b(ne, &return_equal);
-
- // Shift out flag and all exponent bits, retaining only mantissa.
- __ mov(r2, Operand(r2, LSL, HeapNumber::kNonMantissaBitsInTopWord));
- // Or with all low-bits of mantissa.
- __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
- __ orr(r0, r3, Operand(r2), SetCC);
- // For equal we already have the right value in r0: Return zero (equal)
- // if all bits in mantissa are zero (it's an Infinity) and non-zero if
- // not (it's a NaN). For <= and >= we need to load r0 with the failing
- // value if it's a NaN.
- if (cond != eq) {
- // All-zero means Infinity means equal.
- __ Ret(eq);
- if (cond == le) {
- __ mov(r0, Operand(GREATER)); // NaN <= NaN should fail.
- } else {
- __ mov(r0, Operand(LESS)); // NaN >= NaN should fail.
- }
- }
- __ Ret();
- }
- // No fall through here.
- }
-
- __ bind(&not_identical);
-}
-
-
-// See comment at call site.
-static void EmitSmiNonsmiComparison(MacroAssembler* masm,
- Register lhs,
- Register rhs,
- Label* lhs_not_nan,
- Label* slow,
- bool strict) {
- ASSERT((lhs.is(r0) && rhs.is(r1)) ||
- (lhs.is(r1) && rhs.is(r0)));
-
- Label rhs_is_smi;
- __ tst(rhs, Operand(kSmiTagMask));
- __ b(eq, &rhs_is_smi);
-
- // Lhs is a Smi. Check whether the rhs is a heap number.
- __ CompareObjectType(rhs, r4, r4, HEAP_NUMBER_TYPE);
- if (strict) {
- // If rhs is not a number and lhs is a Smi then strict equality cannot
- // succeed. Return non-equal
- // If rhs is r0 then there is already a non zero value in it.
- if (!rhs.is(r0)) {
- __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne);
- }
- __ Ret(ne);
- } else {
- // Smi compared non-strictly with a non-Smi non-heap-number. Call
- // the runtime.
- __ b(ne, slow);
- }
-
- // Lhs is a smi, rhs is a number.
- if (CpuFeatures::IsSupported(VFP3)) {
- // Convert lhs to a double in d7.
- CpuFeatures::Scope scope(VFP3);
- __ SmiToDoubleVFPRegister(lhs, d7, r7, s15);
- // Load the double from rhs, tagged HeapNumber r0, to d6.
- __ sub(r7, rhs, Operand(kHeapObjectTag));
- __ vldr(d6, r7, HeapNumber::kValueOffset);
- } else {
- __ push(lr);
- // Convert lhs to a double in r2, r3.
- __ mov(r7, Operand(lhs));
- ConvertToDoubleStub stub1(r3, r2, r7, r6);
- __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
- // Load rhs to a double in r0, r1.
- __ Ldrd(r0, r1, FieldMemOperand(rhs, HeapNumber::kValueOffset));
- __ pop(lr);
- }
-
- // We now have both loaded as doubles but we can skip the lhs nan check
- // since it's a smi.
- __ jmp(lhs_not_nan);
-
- __ bind(&rhs_is_smi);
- // Rhs is a smi. Check whether the non-smi lhs is a heap number.
- __ CompareObjectType(lhs, r4, r4, HEAP_NUMBER_TYPE);
- if (strict) {
- // If lhs is not a number and rhs is a smi then strict equality cannot
- // succeed. Return non-equal.
- // If lhs is r0 then there is already a non zero value in it.
- if (!lhs.is(r0)) {
- __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne);
- }
- __ Ret(ne);
- } else {
- // Smi compared non-strictly with a non-smi non-heap-number. Call
- // the runtime.
- __ b(ne, slow);
- }
-
- // Rhs is a smi, lhs is a heap number.
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
- // Load the double from lhs, tagged HeapNumber r1, to d7.
- __ sub(r7, lhs, Operand(kHeapObjectTag));
- __ vldr(d7, r7, HeapNumber::kValueOffset);
- // Convert rhs to a double in d6 .
- __ SmiToDoubleVFPRegister(rhs, d6, r7, s13);
- } else {
- __ push(lr);
- // Load lhs to a double in r2, r3.
- __ Ldrd(r2, r3, FieldMemOperand(lhs, HeapNumber::kValueOffset));
- // Convert rhs to a double in r0, r1.
- __ mov(r7, Operand(rhs));
- ConvertToDoubleStub stub2(r1, r0, r7, r6);
- __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
- __ pop(lr);
- }
- // Fall through to both_loaded_as_doubles.
-}
-
-
-void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cond) {
- bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
- Register rhs_exponent = exp_first ? r0 : r1;
- Register lhs_exponent = exp_first ? r2 : r3;
- Register rhs_mantissa = exp_first ? r1 : r0;
- Register lhs_mantissa = exp_first ? r3 : r2;
- Label one_is_nan, neither_is_nan;
-
- __ Sbfx(r4,
- lhs_exponent,
- HeapNumber::kExponentShift,
- HeapNumber::kExponentBits);
- // NaNs have all-one exponents so they sign extend to -1.
- __ cmp(r4, Operand(-1));
- __ b(ne, lhs_not_nan);
- __ mov(r4,
- Operand(lhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
- SetCC);
- __ b(ne, &one_is_nan);
- __ cmp(lhs_mantissa, Operand(0, RelocInfo::NONE));
- __ b(ne, &one_is_nan);
-
- __ bind(lhs_not_nan);
- __ Sbfx(r4,
- rhs_exponent,
- HeapNumber::kExponentShift,
- HeapNumber::kExponentBits);
- // NaNs have all-one exponents so they sign extend to -1.
- __ cmp(r4, Operand(-1));
- __ b(ne, &neither_is_nan);
- __ mov(r4,
- Operand(rhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
- SetCC);
- __ b(ne, &one_is_nan);
- __ cmp(rhs_mantissa, Operand(0, RelocInfo::NONE));
- __ b(eq, &neither_is_nan);
-
- __ bind(&one_is_nan);
- // NaN comparisons always fail.
- // Load whatever we need in r0 to make the comparison fail.
- if (cond == lt || cond == le) {
- __ mov(r0, Operand(GREATER));
- } else {
- __ mov(r0, Operand(LESS));
- }
- __ Ret();
-
- __ bind(&neither_is_nan);
-}
-
-
-// See comment at call site.
-static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm,
- Condition cond) {
- bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
- Register rhs_exponent = exp_first ? r0 : r1;
- Register lhs_exponent = exp_first ? r2 : r3;
- Register rhs_mantissa = exp_first ? r1 : r0;
- Register lhs_mantissa = exp_first ? r3 : r2;
-
- // r0, r1, r2, r3 have the two doubles. Neither is a NaN.
- if (cond == eq) {
- // Doubles are not equal unless they have the same bit pattern.
- // Exception: 0 and -0.
- __ cmp(rhs_mantissa, Operand(lhs_mantissa));
- __ orr(r0, rhs_mantissa, Operand(lhs_mantissa), LeaveCC, ne);
- // Return non-zero if the numbers are unequal.
- __ Ret(ne);
-
- __ sub(r0, rhs_exponent, Operand(lhs_exponent), SetCC);
- // If exponents are equal then return 0.
- __ Ret(eq);
-
- // Exponents are unequal. The only way we can return that the numbers
- // are equal is if one is -0 and the other is 0. We already dealt
- // with the case where both are -0 or both are 0.
- // We start by seeing if the mantissas (that are equal) or the bottom
- // 31 bits of the rhs exponent are non-zero. If so we return not
- // equal.
- __ orr(r4, lhs_mantissa, Operand(lhs_exponent, LSL, kSmiTagSize), SetCC);
- __ mov(r0, Operand(r4), LeaveCC, ne);
- __ Ret(ne);
- // Now they are equal if and only if the lhs exponent is zero in its
- // low 31 bits.
- __ mov(r0, Operand(rhs_exponent, LSL, kSmiTagSize));
- __ Ret();
- } else {
- // Call a native function to do a comparison between two non-NaNs.
- // Call C routine that may not cause GC or other trouble.
- __ push(lr);
- __ PrepareCallCFunction(4, r5); // Two doubles count as 4 arguments.
- __ CallCFunction(ExternalReference::compare_doubles(masm->isolate()), 4);
- __ pop(pc); // Return.
- }
-}
-
-
-// See comment at call site.
-static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
- Register lhs,
- Register rhs) {
- ASSERT((lhs.is(r0) && rhs.is(r1)) ||
- (lhs.is(r1) && rhs.is(r0)));
-
- // If either operand is a JSObject or an oddball value, then they are
- // not equal since their pointers are different.
- // There is no test for undetectability in strict equality.
- STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
- Label first_non_object;
- // Get the type of the first operand into r2 and compare it with
- // FIRST_JS_OBJECT_TYPE.
- __ CompareObjectType(rhs, r2, r2, FIRST_JS_OBJECT_TYPE);
- __ b(lt, &first_non_object);
-
- // Return non-zero (r0 is not zero)
- Label return_not_equal;
- __ bind(&return_not_equal);
- __ Ret();
-
- __ bind(&first_non_object);
- // Check for oddballs: true, false, null, undefined.
- __ cmp(r2, Operand(ODDBALL_TYPE));
- __ b(eq, &return_not_equal);
-
- __ CompareObjectType(lhs, r3, r3, FIRST_JS_OBJECT_TYPE);
- __ b(ge, &return_not_equal);
-
- // Check for oddballs: true, false, null, undefined.
- __ cmp(r3, Operand(ODDBALL_TYPE));
- __ b(eq, &return_not_equal);
-
- // Now that we have the types we might as well check for symbol-symbol.
- // Ensure that no non-strings have the symbol bit set.
- STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsSymbolMask);
- STATIC_ASSERT(kSymbolTag != 0);
- __ and_(r2, r2, Operand(r3));
- __ tst(r2, Operand(kIsSymbolMask));
- __ b(ne, &return_not_equal);
-}
-
-
-// See comment at call site.
-static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
- Register lhs,
- Register rhs,
- Label* both_loaded_as_doubles,
- Label* not_heap_numbers,
- Label* slow) {
- ASSERT((lhs.is(r0) && rhs.is(r1)) ||
- (lhs.is(r1) && rhs.is(r0)));
-
- __ CompareObjectType(rhs, r3, r2, HEAP_NUMBER_TYPE);
- __ b(ne, not_heap_numbers);
- __ ldr(r2, FieldMemOperand(lhs, HeapObject::kMapOffset));
- __ cmp(r2, r3);
- __ b(ne, slow); // First was a heap number, second wasn't. Go slow case.
-
- // Both are heap numbers. Load them up then jump to the code we have
- // for that.
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
- __ sub(r7, rhs, Operand(kHeapObjectTag));
- __ vldr(d6, r7, HeapNumber::kValueOffset);
- __ sub(r7, lhs, Operand(kHeapObjectTag));
- __ vldr(d7, r7, HeapNumber::kValueOffset);
- } else {
- __ Ldrd(r2, r3, FieldMemOperand(lhs, HeapNumber::kValueOffset));
- __ Ldrd(r0, r1, FieldMemOperand(rhs, HeapNumber::kValueOffset));
- }
- __ jmp(both_loaded_as_doubles);
-}
-
-
-// Fast negative check for symbol-to-symbol equality.
-static void EmitCheckForSymbolsOrObjects(MacroAssembler* masm,
- Register lhs,
- Register rhs,
- Label* possible_strings,
- Label* not_both_strings) {
- ASSERT((lhs.is(r0) && rhs.is(r1)) ||
- (lhs.is(r1) && rhs.is(r0)));
-
- // r2 is object type of rhs.
- // Ensure that no non-strings have the symbol bit set.
- Label object_test;
- STATIC_ASSERT(kSymbolTag != 0);
- __ tst(r2, Operand(kIsNotStringMask));
- __ b(ne, &object_test);
- __ tst(r2, Operand(kIsSymbolMask));
- __ b(eq, possible_strings);
- __ CompareObjectType(lhs, r3, r3, FIRST_NONSTRING_TYPE);
- __ b(ge, not_both_strings);
- __ tst(r3, Operand(kIsSymbolMask));
- __ b(eq, possible_strings);
-
- // Both are symbols. We already checked they weren't the same pointer
- // so they are not equal.
- __ mov(r0, Operand(NOT_EQUAL));
- __ Ret();
-
- __ bind(&object_test);
- __ cmp(r2, Operand(FIRST_JS_OBJECT_TYPE));
- __ b(lt, not_both_strings);
- __ CompareObjectType(lhs, r2, r3, FIRST_JS_OBJECT_TYPE);
- __ b(lt, not_both_strings);
- // If both objects are undetectable, they are equal. Otherwise, they
- // are not equal, since they are different objects and an object is not
- // equal to undefined.
- __ ldr(r3, FieldMemOperand(rhs, HeapObject::kMapOffset));
- __ ldrb(r2, FieldMemOperand(r2, Map::kBitFieldOffset));
- __ ldrb(r3, FieldMemOperand(r3, Map::kBitFieldOffset));
- __ and_(r0, r2, Operand(r3));
- __ and_(r0, r0, Operand(1 << Map::kIsUndetectable));
- __ eor(r0, r0, Operand(1 << Map::kIsUndetectable));
- __ Ret();
-}
-
-
-void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
- Register object,
- Register result,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- bool object_is_smi,
- Label* not_found) {
- // Use of registers. Register result is used as a temporary.
- Register number_string_cache = result;
- Register mask = scratch3;
-
- // Load the number string cache.
- __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
-
- // Make the hash mask from the length of the number string cache. It
- // contains two elements (number and string) for each cache entry.
- __ ldr(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
- // Divide length by two (length is a smi).
- __ mov(mask, Operand(mask, ASR, kSmiTagSize + 1));
- __ sub(mask, mask, Operand(1)); // Make mask.
-
- // Calculate the entry in the number string cache. The hash value in the
- // number string cache for smis is just the smi value, and the hash for
- // doubles is the xor of the upper and lower words. See
- // Heap::GetNumberStringCache.
- Isolate* isolate = masm->isolate();
- Label is_smi;
- Label load_result_from_cache;
- if (!object_is_smi) {
- __ JumpIfSmi(object, &is_smi);
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
- __ CheckMap(object,
- scratch1,
- Heap::kHeapNumberMapRootIndex,
- not_found,
- true);
-
- STATIC_ASSERT(8 == kDoubleSize);
- __ add(scratch1,
- object,
- Operand(HeapNumber::kValueOffset - kHeapObjectTag));
- __ ldm(ia, scratch1, scratch1.bit() | scratch2.bit());
- __ eor(scratch1, scratch1, Operand(scratch2));
- __ and_(scratch1, scratch1, Operand(mask));
-
- // Calculate address of entry in string cache: each entry consists
- // of two pointer sized fields.
- __ add(scratch1,
- number_string_cache,
- Operand(scratch1, LSL, kPointerSizeLog2 + 1));
-
- Register probe = mask;
- __ ldr(probe,
- FieldMemOperand(scratch1, FixedArray::kHeaderSize));
- __ JumpIfSmi(probe, not_found);
- __ sub(scratch2, object, Operand(kHeapObjectTag));
- __ vldr(d0, scratch2, HeapNumber::kValueOffset);
- __ sub(probe, probe, Operand(kHeapObjectTag));
- __ vldr(d1, probe, HeapNumber::kValueOffset);
- __ VFPCompareAndSetFlags(d0, d1);
- __ b(ne, not_found); // The cache did not contain this value.
- __ b(&load_result_from_cache);
- } else {
- __ b(not_found);
- }
- }
-
- __ bind(&is_smi);
- Register scratch = scratch1;
- __ and_(scratch, mask, Operand(object, ASR, 1));
- // Calculate address of entry in string cache: each entry consists
- // of two pointer sized fields.
- __ add(scratch,
- number_string_cache,
- Operand(scratch, LSL, kPointerSizeLog2 + 1));
-
- // Check if the entry is the smi we are looking for.
- Register probe = mask;
- __ ldr(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
- __ cmp(object, probe);
- __ b(ne, not_found);
-
- // Get the result from the cache.
- __ bind(&load_result_from_cache);
- __ ldr(result,
- FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
- __ IncrementCounter(isolate->counters()->number_to_string_native(),
- 1,
- scratch1,
- scratch2);
-}
-
-
-void NumberToStringStub::Generate(MacroAssembler* masm) {
- Label runtime;
-
- __ ldr(r1, MemOperand(sp, 0));
-
- // Generate code to lookup number in the number string cache.
- GenerateLookupNumberStringCache(masm, r1, r0, r2, r3, r4, false, &runtime);
- __ add(sp, sp, Operand(1 * kPointerSize));
- __ Ret();
-
- __ bind(&runtime);
- // Handle number to string in the runtime system if not found in the cache.
- __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1);
-}
-
-
-// On entry lhs_ and rhs_ are the values to be compared.
-// On exit r0 is 0, positive or negative to indicate the result of
-// the comparison.
-void CompareStub::Generate(MacroAssembler* masm) {
- ASSERT((lhs_.is(r0) && rhs_.is(r1)) ||
- (lhs_.is(r1) && rhs_.is(r0)));
-
- Label slow; // Call builtin.
- Label not_smis, both_loaded_as_doubles, lhs_not_nan;
-
- if (include_smi_compare_) {
- Label not_two_smis, smi_done;
- __ orr(r2, r1, r0);
- __ tst(r2, Operand(kSmiTagMask));
- __ b(ne, &not_two_smis);
- __ mov(r1, Operand(r1, ASR, 1));
- __ sub(r0, r1, Operand(r0, ASR, 1));
- __ Ret();
- __ bind(&not_two_smis);
- } else if (FLAG_debug_code) {
- __ orr(r2, r1, r0);
- __ tst(r2, Operand(kSmiTagMask));
- __ Assert(ne, "CompareStub: unexpected smi operands.");
- }
-
- // NOTICE! This code is only reached after a smi-fast-case check, so
- // it is certain that at least one operand isn't a smi.
-
- // Handle the case where the objects are identical. Either returns the answer
- // or goes to slow. Only falls through if the objects were not identical.
- EmitIdenticalObjectComparison(masm, &slow, cc_, never_nan_nan_);
-
- // If either is a Smi (we know that not both are), then they can only
- // be strictly equal if the other is a HeapNumber.
- STATIC_ASSERT(kSmiTag == 0);
- ASSERT_EQ(0, Smi::FromInt(0));
- __ and_(r2, lhs_, Operand(rhs_));
- __ tst(r2, Operand(kSmiTagMask));
- __ b(ne, &not_smis);
- // One operand is a smi. EmitSmiNonsmiComparison generates code that can:
- // 1) Return the answer.
- // 2) Go to slow.
- // 3) Fall through to both_loaded_as_doubles.
- // 4) Jump to lhs_not_nan.
- // In cases 3 and 4 we have found out we were dealing with a number-number
- // comparison. If VFP3 is supported the double values of the numbers have
- // been loaded into d7 and d6. Otherwise, the double values have been loaded
- // into r0, r1, r2, and r3.
- EmitSmiNonsmiComparison(masm, lhs_, rhs_, &lhs_not_nan, &slow, strict_);
-
- __ bind(&both_loaded_as_doubles);
- // The arguments have been converted to doubles and stored in d6 and d7, if
- // VFP3 is supported, or in r0, r1, r2, and r3.
- Isolate* isolate = masm->isolate();
- if (CpuFeatures::IsSupported(VFP3)) {
- __ bind(&lhs_not_nan);
- CpuFeatures::Scope scope(VFP3);
- Label no_nan;
- // ARMv7 VFP3 instructions to implement double precision comparison.
- __ VFPCompareAndSetFlags(d7, d6);
- Label nan;
- __ b(vs, &nan);
- __ mov(r0, Operand(EQUAL), LeaveCC, eq);
- __ mov(r0, Operand(LESS), LeaveCC, lt);
- __ mov(r0, Operand(GREATER), LeaveCC, gt);
- __ Ret();
-
- __ bind(&nan);
- // If one of the sides was a NaN then the v flag is set. Load r0 with
- // whatever it takes to make the comparison fail, since comparisons with NaN
- // always fail.
- if (cc_ == lt || cc_ == le) {
- __ mov(r0, Operand(GREATER));
- } else {
- __ mov(r0, Operand(LESS));
- }
- __ Ret();
- } else {
- // Checks for NaN in the doubles we have loaded. Can return the answer or
- // fall through if neither is a NaN. Also binds lhs_not_nan.
- EmitNanCheck(masm, &lhs_not_nan, cc_);
- // Compares two doubles in r0, r1, r2, r3 that are not NaNs. Returns the
- // answer. Never falls through.
- EmitTwoNonNanDoubleComparison(masm, cc_);
- }
-
- __ bind(&not_smis);
- // At this point we know we are dealing with two different objects,
- // and neither of them is a Smi. The objects are in rhs_ and lhs_.
- if (strict_) {
- // This returns non-equal for some object types, or falls through if it
- // was not lucky.
- EmitStrictTwoHeapObjectCompare(masm, lhs_, rhs_);
- }
-
- Label check_for_symbols;
- Label flat_string_check;
- // Check for heap-number-heap-number comparison. Can jump to slow case,
- // or load both doubles into r0, r1, r2, r3 and jump to the code that handles
- // that case. If the inputs are not doubles then jumps to check_for_symbols.
- // In this case r2 will contain the type of rhs_. Never falls through.
- EmitCheckForTwoHeapNumbers(masm,
- lhs_,
- rhs_,
- &both_loaded_as_doubles,
- &check_for_symbols,
- &flat_string_check);
-
- __ bind(&check_for_symbols);
- // In the strict case the EmitStrictTwoHeapObjectCompare already took care of
- // symbols.
- if (cc_ == eq && !strict_) {
- // Returns an answer for two symbols or two detectable objects.
- // Otherwise jumps to string case or not both strings case.
- // Assumes that r2 is the type of rhs_ on entry.
- EmitCheckForSymbolsOrObjects(masm, lhs_, rhs_, &flat_string_check, &slow);
- }
-
- // Check for both being sequential ASCII strings, and inline if that is the
- // case.
- __ bind(&flat_string_check);
-
- __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs_, rhs_, r2, r3, &slow);
-
- __ IncrementCounter(isolate->counters()->string_compare_native(), 1, r2, r3);
- StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
- lhs_,
- rhs_,
- r2,
- r3,
- r4,
- r5);
- // Never falls through to here.
-
- __ bind(&slow);
-
- __ Push(lhs_, rhs_);
- // Figure out which native to call and setup the arguments.
- Builtins::JavaScript native;
- if (cc_ == eq) {
- native = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
- } else {
- native = Builtins::COMPARE;
- int ncr; // NaN compare result
- if (cc_ == lt || cc_ == le) {
- ncr = GREATER;
- } else {
- ASSERT(cc_ == gt || cc_ == ge); // remaining cases
- ncr = LESS;
- }
- __ mov(r0, Operand(Smi::FromInt(ncr)));
- __ push(r0);
- }
-
- // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
- // tagged as a small integer.
- __ InvokeBuiltin(native, JUMP_JS);
-}
-
-
-// This stub does not handle the inlined cases (Smis, Booleans, undefined).
-// The stub returns zero for false, and a non-zero value for true.
-void ToBooleanStub::Generate(MacroAssembler* masm) {
- // This stub uses VFP3 instructions.
- ASSERT(CpuFeatures::IsEnabled(VFP3));
-
- Label false_result;
- Label not_heap_number;
- Register scratch = r9.is(tos_) ? r7 : r9;
-
- __ LoadRoot(ip, Heap::kNullValueRootIndex);
- __ cmp(tos_, ip);
- __ b(eq, &false_result);
-
- // HeapNumber => false iff +0, -0, or NaN.
- __ ldr(scratch, FieldMemOperand(tos_, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
- __ cmp(scratch, ip);
- __ b(&not_heap_number, ne);
-
- __ sub(ip, tos_, Operand(kHeapObjectTag));
- __ vldr(d1, ip, HeapNumber::kValueOffset);
- __ VFPCompareAndSetFlags(d1, 0.0);
- // "tos_" is a register, and contains a non zero value by default.
- // Hence we only need to overwrite "tos_" with zero to return false for
- // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true.
- __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, eq); // for FP_ZERO
- __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, vs); // for FP_NAN
- __ Ret();
-
- __ bind(&not_heap_number);
-
- // Check if the value is 'null'.
- // 'null' => false.
- __ LoadRoot(ip, Heap::kNullValueRootIndex);
- __ cmp(tos_, ip);
- __ b(&false_result, eq);
-
- // It can be an undetectable object.
- // Undetectable => false.
- __ ldr(ip, FieldMemOperand(tos_, HeapObject::kMapOffset));
- __ ldrb(scratch, FieldMemOperand(ip, Map::kBitFieldOffset));
- __ and_(scratch, scratch, Operand(1 << Map::kIsUndetectable));
- __ cmp(scratch, Operand(1 << Map::kIsUndetectable));
- __ b(&false_result, eq);
-
- // JavaScript object => true.
- __ ldr(scratch, FieldMemOperand(tos_, HeapObject::kMapOffset));
- __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
- __ cmp(scratch, Operand(FIRST_JS_OBJECT_TYPE));
- // "tos_" is a register and contains a non-zero value.
- // Hence we implicitly return true if the greater than
- // condition is satisfied.
- __ Ret(gt);
-
- // Check for string
- __ ldr(scratch, FieldMemOperand(tos_, HeapObject::kMapOffset));
- __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
- __ cmp(scratch, Operand(FIRST_NONSTRING_TYPE));
- // "tos_" is a register and contains a non-zero value.
- // Hence we implicitly return true if the greater than
- // condition is satisfied.
- __ Ret(gt);
-
- // String value => false iff empty, i.e., length is zero
- __ ldr(tos_, FieldMemOperand(tos_, String::kLengthOffset));
- // If length is zero, "tos_" contains zero ==> false.
- // If length is not zero, "tos_" contains a non-zero value ==> true.
- __ Ret();
-
- // Return 0 in "tos_" for false .
- __ bind(&false_result);
- __ mov(tos_, Operand(0, RelocInfo::NONE));
- __ Ret();
-}
-
-
-// We fall into this code if the operands were Smis, but the result was
-// not (eg. overflow). We branch into this code (to the not_smi label) if
-// the operands were not both Smi. The operands are in r0 and r1. In order
-// to call the C-implemented binary fp operation routines we need to end up
-// with the double precision floating point operands in r0 and r1 (for the
-// value in r1) and r2 and r3 (for the value in r0).
-void GenericBinaryOpStub::HandleBinaryOpSlowCases(
- MacroAssembler* masm,
- Label* not_smi,
- Register lhs,
- Register rhs,
- const Builtins::JavaScript& builtin) {
- Label slow, slow_reverse, do_the_call;
- bool use_fp_registers =
- CpuFeatures::IsSupported(VFP3) &&
- Token::MOD != op_;
-
- ASSERT((lhs.is(r0) && rhs.is(r1)) || (lhs.is(r1) && rhs.is(r0)));
- Register heap_number_map = r6;
-
- if (ShouldGenerateSmiCode()) {
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-
- // Smi-smi case (overflow).
- // Since both are Smis there is no heap number to overwrite, so allocate.
- // The new heap number is in r5. r3 and r7 are scratch.
- __ AllocateHeapNumber(
- r5, r3, r7, heap_number_map, lhs.is(r0) ? &slow_reverse : &slow);
-
- // If we have floating point hardware, inline ADD, SUB, MUL, and DIV,
- // using registers d7 and d6 for the double values.
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
- __ mov(r7, Operand(rhs, ASR, kSmiTagSize));
- __ vmov(s15, r7);
- __ vcvt_f64_s32(d7, s15);
- __ mov(r7, Operand(lhs, ASR, kSmiTagSize));
- __ vmov(s13, r7);
- __ vcvt_f64_s32(d6, s13);
- if (!use_fp_registers) {
- __ vmov(r2, r3, d7);
- __ vmov(r0, r1, d6);
- }
- } else {
- // Write Smi from rhs to r3 and r2 in double format. r9 is scratch.
- __ mov(r7, Operand(rhs));
- ConvertToDoubleStub stub1(r3, r2, r7, r9);
- __ push(lr);
- __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
- // Write Smi from lhs to r1 and r0 in double format. r9 is scratch.
- __ mov(r7, Operand(lhs));
- ConvertToDoubleStub stub2(r1, r0, r7, r9);
- __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
- __ pop(lr);
- }
- __ jmp(&do_the_call); // Tail call. No return.
- }
-
- // We branch here if at least one of r0 and r1 is not a Smi.
- __ bind(not_smi);
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-
- // After this point we have the left hand side in r1 and the right hand side
- // in r0.
- if (lhs.is(r0)) {
- __ Swap(r0, r1, ip);
- }
-
- // The type transition also calculates the answer.
- bool generate_code_to_calculate_answer = true;
-
- if (ShouldGenerateFPCode()) {
- // DIV has neither SmiSmi fast code nor specialized slow code.
- // So don't try to patch a DIV Stub.
- if (runtime_operands_type_ == BinaryOpIC::DEFAULT) {
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- GenerateTypeTransition(masm); // Tail call.
- generate_code_to_calculate_answer = false;
- break;
-
- case Token::DIV:
- // DIV has neither SmiSmi fast code nor specialized slow code.
- // So don't try to patch a DIV Stub.
- break;
-
- default:
- break;
- }
- }
-
- if (generate_code_to_calculate_answer) {
- Label r0_is_smi, r1_is_smi, finished_loading_r0, finished_loading_r1;
- if (mode_ == NO_OVERWRITE) {
- // In the case where there is no chance of an overwritable float we may
- // as well do the allocation immediately while r0 and r1 are untouched.
- __ AllocateHeapNumber(r5, r3, r7, heap_number_map, &slow);
- }
-
- // Move r0 to a double in r2-r3.
- __ tst(r0, Operand(kSmiTagMask));
- __ b(eq, &r0_is_smi); // It's a Smi so don't check it's a heap number.
- __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- __ cmp(r4, heap_number_map);
- __ b(ne, &slow);
- if (mode_ == OVERWRITE_RIGHT) {
- __ mov(r5, Operand(r0)); // Overwrite this heap number.
- }
- if (use_fp_registers) {
- CpuFeatures::Scope scope(VFP3);
- // Load the double from tagged HeapNumber r0 to d7.
- __ sub(r7, r0, Operand(kHeapObjectTag));
- __ vldr(d7, r7, HeapNumber::kValueOffset);
- } else {
- // Calling convention says that second double is in r2 and r3.
- __ Ldrd(r2, r3, FieldMemOperand(r0, HeapNumber::kValueOffset));
- }
- __ jmp(&finished_loading_r0);
- __ bind(&r0_is_smi);
- if (mode_ == OVERWRITE_RIGHT) {
- // We can't overwrite a Smi so get address of new heap number into r5.
- __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
- }
-
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
- // Convert smi in r0 to double in d7.
- __ mov(r7, Operand(r0, ASR, kSmiTagSize));
- __ vmov(s15, r7);
- __ vcvt_f64_s32(d7, s15);
- if (!use_fp_registers) {
- __ vmov(r2, r3, d7);
- }
- } else {
- // Write Smi from r0 to r3 and r2 in double format.
- __ mov(r7, Operand(r0));
- ConvertToDoubleStub stub3(r3, r2, r7, r4);
- __ push(lr);
- __ Call(stub3.GetCode(), RelocInfo::CODE_TARGET);
- __ pop(lr);
- }
-
- // HEAP_NUMBERS stub is slower than GENERIC on a pair of smis.
- // r0 is known to be a smi. If r1 is also a smi then switch to GENERIC.
- Label r1_is_not_smi;
- if ((runtime_operands_type_ == BinaryOpIC::HEAP_NUMBERS) &&
- HasSmiSmiFastPath()) {
- __ tst(r1, Operand(kSmiTagMask));
- __ b(ne, &r1_is_not_smi);
- GenerateTypeTransition(masm); // Tail call.
- }
-
- __ bind(&finished_loading_r0);
-
- // Move r1 to a double in r0-r1.
- __ tst(r1, Operand(kSmiTagMask));
- __ b(eq, &r1_is_smi); // It's a Smi so don't check it's a heap number.
- __ bind(&r1_is_not_smi);
- __ ldr(r4, FieldMemOperand(r1, HeapNumber::kMapOffset));
- __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- __ cmp(r4, heap_number_map);
- __ b(ne, &slow);
- if (mode_ == OVERWRITE_LEFT) {
- __ mov(r5, Operand(r1)); // Overwrite this heap number.
- }
- if (use_fp_registers) {
- CpuFeatures::Scope scope(VFP3);
- // Load the double from tagged HeapNumber r1 to d6.
- __ sub(r7, r1, Operand(kHeapObjectTag));
- __ vldr(d6, r7, HeapNumber::kValueOffset);
- } else {
- // Calling convention says that first double is in r0 and r1.
- __ Ldrd(r0, r1, FieldMemOperand(r1, HeapNumber::kValueOffset));
- }
- __ jmp(&finished_loading_r1);
- __ bind(&r1_is_smi);
- if (mode_ == OVERWRITE_LEFT) {
- // We can't overwrite a Smi so get address of new heap number into r5.
- __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
- }
-
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
- // Convert smi in r1 to double in d6.
- __ mov(r7, Operand(r1, ASR, kSmiTagSize));
- __ vmov(s13, r7);
- __ vcvt_f64_s32(d6, s13);
- if (!use_fp_registers) {
- __ vmov(r0, r1, d6);
- }
- } else {
- // Write Smi from r1 to r1 and r0 in double format.
- __ mov(r7, Operand(r1));
- ConvertToDoubleStub stub4(r1, r0, r7, r9);
- __ push(lr);
- __ Call(stub4.GetCode(), RelocInfo::CODE_TARGET);
- __ pop(lr);
- }
-
- __ bind(&finished_loading_r1);
- }
-
- if (generate_code_to_calculate_answer || do_the_call.is_linked()) {
- __ bind(&do_the_call);
- // If we are inlining the operation using VFP3 instructions for
- // add, subtract, multiply, or divide, the arguments are in d6 and d7.
- if (use_fp_registers) {
- CpuFeatures::Scope scope(VFP3);
- // ARMv7 VFP3 instructions to implement
- // double precision, add, subtract, multiply, divide.
-
- if (Token::MUL == op_) {
- __ vmul(d5, d6, d7);
- } else if (Token::DIV == op_) {
- __ vdiv(d5, d6, d7);
- } else if (Token::ADD == op_) {
- __ vadd(d5, d6, d7);
- } else if (Token::SUB == op_) {
- __ vsub(d5, d6, d7);
- } else {
- UNREACHABLE();
- }
- __ sub(r0, r5, Operand(kHeapObjectTag));
- __ vstr(d5, r0, HeapNumber::kValueOffset);
- __ add(r0, r0, Operand(kHeapObjectTag));
- __ Ret();
- } else {
- // If we did not inline the operation, then the arguments are in:
- // r0: Left value (least significant part of mantissa).
- // r1: Left value (sign, exponent, top of mantissa).
- // r2: Right value (least significant part of mantissa).
- // r3: Right value (sign, exponent, top of mantissa).
- // r5: Address of heap number for result.
-
- __ push(lr); // For later.
- __ PrepareCallCFunction(4, r4); // Two doubles count as 4 arguments.
- // Call C routine that may not cause GC or other trouble. r5 is callee
- // save.
- __ CallCFunction(
- ExternalReference::double_fp_operation(op_, masm->isolate()), 4);
- // Store answer in the overwritable heap number.
- #if !defined(USE_ARM_EABI)
- // Double returned in fp coprocessor register 0 and 1, encoded as
- // register cr8. Offsets must be divisible by 4 for coprocessor so we
- // need to substract the tag from r5.
- __ sub(r4, r5, Operand(kHeapObjectTag));
- __ stc(p1, cr8, MemOperand(r4, HeapNumber::kValueOffset));
- #else
- // Double returned in registers 0 and 1.
- __ Strd(r0, r1, FieldMemOperand(r5, HeapNumber::kValueOffset));
- #endif
- __ mov(r0, Operand(r5));
- // And we are done.
- __ pop(pc);
- }
- }
- }
-
- if (!generate_code_to_calculate_answer &&
- !slow_reverse.is_linked() &&
- !slow.is_linked()) {
- return;
- }
-
- if (lhs.is(r0)) {
- __ b(&slow);
- __ bind(&slow_reverse);
- __ Swap(r0, r1, ip);
- }
-
- heap_number_map = no_reg; // Don't use this any more from here on.
-
- // We jump to here if something goes wrong (one param is not a number of any
- // sort or new-space allocation fails).
- __ bind(&slow);
-
- // Push arguments to the stack
- __ Push(r1, r0);
-
- if (Token::ADD == op_) {
- // Test for string arguments before calling runtime.
- // r1 : first argument
- // r0 : second argument
- // sp[0] : second argument
- // sp[4] : first argument
-
- Label not_strings, not_string1, string1, string1_smi2;
- __ tst(r1, Operand(kSmiTagMask));
- __ b(eq, &not_string1);
- __ CompareObjectType(r1, r2, r2, FIRST_NONSTRING_TYPE);
- __ b(ge, &not_string1);
-
- // First argument is a a string, test second.
- __ tst(r0, Operand(kSmiTagMask));
- __ b(eq, &string1_smi2);
- __ CompareObjectType(r0, r2, r2, FIRST_NONSTRING_TYPE);
- __ b(ge, &string1);
-
- // First and second argument are strings.
- StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
- __ TailCallStub(&string_add_stub);
-
- __ bind(&string1_smi2);
- // First argument is a string, second is a smi. Try to lookup the number
- // string for the smi in the number string cache.
- NumberToStringStub::GenerateLookupNumberStringCache(
- masm, r0, r2, r4, r5, r6, true, &string1);
-
- // Replace second argument on stack and tailcall string add stub to make
- // the result.
- __ str(r2, MemOperand(sp, 0));
- __ TailCallStub(&string_add_stub);
-
- // Only first argument is a string.
- __ bind(&string1);
- __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_JS);
-
- // First argument was not a string, test second.
- __ bind(&not_string1);
- __ tst(r0, Operand(kSmiTagMask));
- __ b(eq, &not_strings);
- __ CompareObjectType(r0, r2, r2, FIRST_NONSTRING_TYPE);
- __ b(ge, &not_strings);
-
- // Only second argument is a string.
- __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_JS);
-
- __ bind(&not_strings);
- }
-
- __ InvokeBuiltin(builtin, JUMP_JS); // Tail call. No return.
-}
-
-
-// For bitwise ops where the inputs are not both Smis we here try to determine
-// whether both inputs are either Smis or at least heap numbers that can be
-// represented by a 32 bit signed value. We truncate towards zero as required
-// by the ES spec. If this is the case we do the bitwise op and see if the
-// result is a Smi. If so, great, otherwise we try to find a heap number to
-// write the answer into (either by allocating or by overwriting).
-// On entry the operands are in lhs and rhs. On exit the answer is in r0.
-void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm,
- Register lhs,
- Register rhs) {
- Label slow, result_not_a_smi;
- Label rhs_is_smi, lhs_is_smi;
- Label done_checking_rhs, done_checking_lhs;
-
- Register heap_number_map = r6;
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-
- __ tst(lhs, Operand(kSmiTagMask));
- __ b(eq, &lhs_is_smi); // It's a Smi so don't check it's a heap number.
- __ ldr(r4, FieldMemOperand(lhs, HeapNumber::kMapOffset));
- __ cmp(r4, heap_number_map);
- __ b(ne, &slow);
- __ ConvertToInt32(lhs, r3, r5, r4, d0, &slow);
- __ jmp(&done_checking_lhs);
- __ bind(&lhs_is_smi);
- __ mov(r3, Operand(lhs, ASR, 1));
- __ bind(&done_checking_lhs);
-
- __ tst(rhs, Operand(kSmiTagMask));
- __ b(eq, &rhs_is_smi); // It's a Smi so don't check it's a heap number.
- __ ldr(r4, FieldMemOperand(rhs, HeapNumber::kMapOffset));
- __ cmp(r4, heap_number_map);
- __ b(ne, &slow);
- __ ConvertToInt32(rhs, r2, r5, r4, d0, &slow);
- __ jmp(&done_checking_rhs);
- __ bind(&rhs_is_smi);
- __ mov(r2, Operand(rhs, ASR, 1));
- __ bind(&done_checking_rhs);
-
- ASSERT(((lhs.is(r0) && rhs.is(r1)) || (lhs.is(r1) && rhs.is(r0))));
-
- // r0 and r1: Original operands (Smi or heap numbers).
- // r2 and r3: Signed int32 operands.
- switch (op_) {
- case Token::BIT_OR: __ orr(r2, r2, Operand(r3)); break;
- case Token::BIT_XOR: __ eor(r2, r2, Operand(r3)); break;
- case Token::BIT_AND: __ and_(r2, r2, Operand(r3)); break;
- case Token::SAR:
- // Use only the 5 least significant bits of the shift count.
- __ and_(r2, r2, Operand(0x1f));
- __ mov(r2, Operand(r3, ASR, r2));
- break;
- case Token::SHR:
- // Use only the 5 least significant bits of the shift count.
- __ and_(r2, r2, Operand(0x1f));
- __ mov(r2, Operand(r3, LSR, r2), SetCC);
- // SHR is special because it is required to produce a positive answer.
- // The code below for writing into heap numbers isn't capable of writing
- // the register as an unsigned int so we go to slow case if we hit this
- // case.
- if (CpuFeatures::IsSupported(VFP3)) {
- __ b(mi, &result_not_a_smi);
- } else {
- __ b(mi, &slow);
- }
- break;
- case Token::SHL:
- // Use only the 5 least significant bits of the shift count.
- __ and_(r2, r2, Operand(0x1f));
- __ mov(r2, Operand(r3, LSL, r2));
- break;
- default: UNREACHABLE();
- }
- // check that the *signed* result fits in a smi
- __ add(r3, r2, Operand(0x40000000), SetCC);
- __ b(mi, &result_not_a_smi);
- __ mov(r0, Operand(r2, LSL, kSmiTagSize));
- __ Ret();
-
- Label have_to_allocate, got_a_heap_number;
- __ bind(&result_not_a_smi);
- switch (mode_) {
- case OVERWRITE_RIGHT: {
- __ tst(rhs, Operand(kSmiTagMask));
- __ b(eq, &have_to_allocate);
- __ mov(r5, Operand(rhs));
- break;
- }
- case OVERWRITE_LEFT: {
- __ tst(lhs, Operand(kSmiTagMask));
- __ b(eq, &have_to_allocate);
- __ mov(r5, Operand(lhs));
- break;
- }
- case NO_OVERWRITE: {
- // Get a new heap number in r5. r4 and r7 are scratch.
- __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
- }
- default: break;
- }
- __ bind(&got_a_heap_number);
- // r2: Answer as signed int32.
- // r5: Heap number to write answer into.
-
- // Nothing can go wrong now, so move the heap number to r0, which is the
- // result.
- __ mov(r0, Operand(r5));
-
- if (CpuFeatures::IsSupported(VFP3)) {
- // Convert the int32 in r2 to the heap number in r0. r3 is corrupted.
- CpuFeatures::Scope scope(VFP3);
- __ vmov(s0, r2);
- if (op_ == Token::SHR) {
- __ vcvt_f64_u32(d0, s0);
- } else {
- __ vcvt_f64_s32(d0, s0);
- }
- __ sub(r3, r0, Operand(kHeapObjectTag));
- __ vstr(d0, r3, HeapNumber::kValueOffset);
- __ Ret();
- } else {
- // Tail call that writes the int32 in r2 to the heap number in r0, using
- // r3 as scratch. r0 is preserved and returned.
- WriteInt32ToHeapNumberStub stub(r2, r0, r3);
- __ TailCallStub(&stub);
- }
-
- if (mode_ != NO_OVERWRITE) {
- __ bind(&have_to_allocate);
- // Get a new heap number in r5. r4 and r7 are scratch.
- __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
- __ jmp(&got_a_heap_number);
- }
-
- // If all else failed then we go to the runtime system.
- __ bind(&slow);
- __ Push(lhs, rhs); // Restore stack.
- switch (op_) {
- case Token::BIT_OR:
- __ InvokeBuiltin(Builtins::BIT_OR, JUMP_JS);
- break;
- case Token::BIT_AND:
- __ InvokeBuiltin(Builtins::BIT_AND, JUMP_JS);
- break;
- case Token::BIT_XOR:
- __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_JS);
- break;
- case Token::SAR:
- __ InvokeBuiltin(Builtins::SAR, JUMP_JS);
- break;
- case Token::SHR:
- __ InvokeBuiltin(Builtins::SHR, JUMP_JS);
- break;
- case Token::SHL:
- __ InvokeBuiltin(Builtins::SHL, JUMP_JS);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-
-
-// This function takes the known int in a register for the cases
-// where it doesn't know a good trick, and may deliver
-// a result that needs shifting.
-static void MultiplyByKnownIntInStub(
- MacroAssembler* masm,
- Register result,
- Register source,
- Register known_int_register, // Smi tagged.
- int known_int,
- int* required_shift) { // Including Smi tag shift
- switch (known_int) {
- case 3:
- __ add(result, source, Operand(source, LSL, 1));
- *required_shift = 1;
- break;
- case 5:
- __ add(result, source, Operand(source, LSL, 2));
- *required_shift = 1;
- break;
- case 6:
- __ add(result, source, Operand(source, LSL, 1));
- *required_shift = 2;
- break;
- case 7:
- __ rsb(result, source, Operand(source, LSL, 3));
- *required_shift = 1;
- break;
- case 9:
- __ add(result, source, Operand(source, LSL, 3));
- *required_shift = 1;
- break;
- case 10:
- __ add(result, source, Operand(source, LSL, 2));
- *required_shift = 2;
- break;
- default:
- ASSERT(!IsPowerOf2(known_int)); // That would be very inefficient.
- __ mul(result, source, known_int_register);
- *required_shift = 0;
- }
-}
-
-
-// This uses versions of the sum-of-digits-to-see-if-a-number-is-divisible-by-3
-// trick. See http://en.wikipedia.org/wiki/Divisibility_rule
-// Takes the sum of the digits base (mask + 1) repeatedly until we have a
-// number from 0 to mask. On exit the 'eq' condition flags are set if the
-// answer is exactly the mask.
-void IntegerModStub::DigitSum(MacroAssembler* masm,
- Register lhs,
- int mask,
- int shift,
- Label* entry) {
- ASSERT(mask > 0);
- ASSERT(mask <= 0xff); // This ensures we don't need ip to use it.
- Label loop;
- __ bind(&loop);
- __ and_(ip, lhs, Operand(mask));
- __ add(lhs, ip, Operand(lhs, LSR, shift));
- __ bind(entry);
- __ cmp(lhs, Operand(mask));
- __ b(gt, &loop);
-}
-
-
-void IntegerModStub::DigitSum(MacroAssembler* masm,
- Register lhs,
- Register scratch,
- int mask,
- int shift1,
- int shift2,
- Label* entry) {
- ASSERT(mask > 0);
- ASSERT(mask <= 0xff); // This ensures we don't need ip to use it.
- Label loop;
- __ bind(&loop);
- __ bic(scratch, lhs, Operand(mask));
- __ and_(ip, lhs, Operand(mask));
- __ add(lhs, ip, Operand(lhs, LSR, shift1));
- __ add(lhs, lhs, Operand(scratch, LSR, shift2));
- __ bind(entry);
- __ cmp(lhs, Operand(mask));
- __ b(gt, &loop);
-}
-
-
-// Splits the number into two halves (bottom half has shift bits). The top
-// half is subtracted from the bottom half. If the result is negative then
-// rhs is added.
-void IntegerModStub::ModGetInRangeBySubtraction(MacroAssembler* masm,
- Register lhs,
- int shift,
- int rhs) {
- int mask = (1 << shift) - 1;
- __ and_(ip, lhs, Operand(mask));
- __ sub(lhs, ip, Operand(lhs, LSR, shift), SetCC);
- __ add(lhs, lhs, Operand(rhs), LeaveCC, mi);
-}
-
-
-void IntegerModStub::ModReduce(MacroAssembler* masm,
- Register lhs,
- int max,
- int denominator) {
- int limit = denominator;
- while (limit * 2 <= max) limit *= 2;
- while (limit >= denominator) {
- __ cmp(lhs, Operand(limit));
- __ sub(lhs, lhs, Operand(limit), LeaveCC, ge);
- limit >>= 1;
- }
-}
-
-
-void IntegerModStub::ModAnswer(MacroAssembler* masm,
- Register result,
- Register shift_distance,
- Register mask_bits,
- Register sum_of_digits) {
- __ add(result, mask_bits, Operand(sum_of_digits, LSL, shift_distance));
- __ Ret();
-}
-
-
-// See comment for class.
-void IntegerModStub::Generate(MacroAssembler* masm) {
- __ mov(lhs_, Operand(lhs_, LSR, shift_distance_));
- __ bic(odd_number_, odd_number_, Operand(1));
- __ mov(odd_number_, Operand(odd_number_, LSL, 1));
- // We now have (odd_number_ - 1) * 2 in the register.
- // Build a switch out of branches instead of data because it avoids
- // having to teach the assembler about intra-code-object pointers
- // that are not in relative branch instructions.
- Label mod3, mod5, mod7, mod9, mod11, mod13, mod15, mod17, mod19;
- Label mod21, mod23, mod25;
- { Assembler::BlockConstPoolScope block_const_pool(masm);
- __ add(pc, pc, Operand(odd_number_));
- // When you read pc it is always 8 ahead, but when you write it you always
- // write the actual value. So we put in two nops to take up the slack.
- __ nop();
- __ nop();
- __ b(&mod3);
- __ b(&mod5);
- __ b(&mod7);
- __ b(&mod9);
- __ b(&mod11);
- __ b(&mod13);
- __ b(&mod15);
- __ b(&mod17);
- __ b(&mod19);
- __ b(&mod21);
- __ b(&mod23);
- __ b(&mod25);
- }
-
- // For each denominator we find a multiple that is almost only ones
- // when expressed in binary. Then we do the sum-of-digits trick for
- // that number. If the multiple is not 1 then we have to do a little
- // more work afterwards to get the answer into the 0-denominator-1
- // range.
- DigitSum(masm, lhs_, 3, 2, &mod3); // 3 = b11.
- __ sub(lhs_, lhs_, Operand(3), LeaveCC, eq);
- ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
-
- DigitSum(masm, lhs_, 0xf, 4, &mod5); // 5 * 3 = b1111.
- ModGetInRangeBySubtraction(masm, lhs_, 2, 5);
- ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
-
- DigitSum(masm, lhs_, 7, 3, &mod7); // 7 = b111.
- __ sub(lhs_, lhs_, Operand(7), LeaveCC, eq);
- ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
-
- DigitSum(masm, lhs_, 0x3f, 6, &mod9); // 7 * 9 = b111111.
- ModGetInRangeBySubtraction(masm, lhs_, 3, 9);
- ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
-
- DigitSum(masm, lhs_, r5, 0x3f, 6, 3, &mod11); // 5 * 11 = b110111.
- ModReduce(masm, lhs_, 0x3f, 11);
- ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
-
- DigitSum(masm, lhs_, r5, 0xff, 8, 5, &mod13); // 19 * 13 = b11110111.
- ModReduce(masm, lhs_, 0xff, 13);
- ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
-
- DigitSum(masm, lhs_, 0xf, 4, &mod15); // 15 = b1111.
- __ sub(lhs_, lhs_, Operand(15), LeaveCC, eq);
- ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
-
- DigitSum(masm, lhs_, 0xff, 8, &mod17); // 15 * 17 = b11111111.
- ModGetInRangeBySubtraction(masm, lhs_, 4, 17);
- ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
-
- DigitSum(masm, lhs_, r5, 0xff, 8, 5, &mod19); // 13 * 19 = b11110111.
- ModReduce(masm, lhs_, 0xff, 19);
- ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
-
- DigitSum(masm, lhs_, 0x3f, 6, &mod21); // 3 * 21 = b111111.
- ModReduce(masm, lhs_, 0x3f, 21);
- ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
-
- DigitSum(masm, lhs_, r5, 0xff, 8, 7, &mod23); // 11 * 23 = b11111101.
- ModReduce(masm, lhs_, 0xff, 23);
- ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
-
- DigitSum(masm, lhs_, r5, 0x7f, 7, 6, &mod25); // 5 * 25 = b1111101.
- ModReduce(masm, lhs_, 0x7f, 25);
- ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
-}
-
-
-void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
- // lhs_ : x
- // rhs_ : y
- // r0 : result
-
- Register result = r0;
- Register lhs = lhs_;
- Register rhs = rhs_;
-
- // This code can't cope with other register allocations yet.
- ASSERT(result.is(r0) &&
- ((lhs.is(r0) && rhs.is(r1)) ||
- (lhs.is(r1) && rhs.is(r0))));
-
- Register smi_test_reg = r7;
- Register scratch = r9;
-
- // All ops need to know whether we are dealing with two Smis. Set up
- // smi_test_reg to tell us that.
- if (ShouldGenerateSmiCode()) {
- __ orr(smi_test_reg, lhs, Operand(rhs));
- }
-
- switch (op_) {
- case Token::ADD: {
- Label not_smi;
- // Fast path.
- if (ShouldGenerateSmiCode()) {
- STATIC_ASSERT(kSmiTag == 0); // Adjust code below.
- __ tst(smi_test_reg, Operand(kSmiTagMask));
- __ b(ne, &not_smi);
- __ add(r0, r1, Operand(r0), SetCC); // Add y optimistically.
- // Return if no overflow.
- __ Ret(vc);
- __ sub(r0, r0, Operand(r1)); // Revert optimistic add.
- }
- HandleBinaryOpSlowCases(masm, &not_smi, lhs, rhs, Builtins::ADD);
- break;
- }
-
- case Token::SUB: {
- Label not_smi;
- // Fast path.
- if (ShouldGenerateSmiCode()) {
- STATIC_ASSERT(kSmiTag == 0); // Adjust code below.
- __ tst(smi_test_reg, Operand(kSmiTagMask));
- __ b(ne, &not_smi);
- if (lhs.is(r1)) {
- __ sub(r0, r1, Operand(r0), SetCC); // Subtract y optimistically.
- // Return if no overflow.
- __ Ret(vc);
- __ sub(r0, r1, Operand(r0)); // Revert optimistic subtract.
- } else {
- __ sub(r0, r0, Operand(r1), SetCC); // Subtract y optimistically.
- // Return if no overflow.
- __ Ret(vc);
- __ add(r0, r0, Operand(r1)); // Revert optimistic subtract.
- }
- }
- HandleBinaryOpSlowCases(masm, &not_smi, lhs, rhs, Builtins::SUB);
- break;
- }
-
- case Token::MUL: {
- Label not_smi, slow;
- if (ShouldGenerateSmiCode()) {
- STATIC_ASSERT(kSmiTag == 0); // adjust code below
- __ tst(smi_test_reg, Operand(kSmiTagMask));
- Register scratch2 = smi_test_reg;
- smi_test_reg = no_reg;
- __ b(ne, &not_smi);
- // Remove tag from one operand (but keep sign), so that result is Smi.
- __ mov(ip, Operand(rhs, ASR, kSmiTagSize));
- // Do multiplication
- // scratch = lower 32 bits of ip * lhs.
- __ smull(scratch, scratch2, lhs, ip);
- // Go slow on overflows (overflow bit is not set).
- __ mov(ip, Operand(scratch, ASR, 31));
- // No overflow if higher 33 bits are identical.
- __ cmp(ip, Operand(scratch2));
- __ b(ne, &slow);
- // Go slow on zero result to handle -0.
- __ tst(scratch, Operand(scratch));
- __ mov(result, Operand(scratch), LeaveCC, ne);
- __ Ret(ne);
- // We need -0 if we were multiplying a negative number with 0 to get 0.
- // We know one of them was zero.
- __ add(scratch2, rhs, Operand(lhs), SetCC);
- __ mov(result, Operand(Smi::FromInt(0)), LeaveCC, pl);
- __ Ret(pl); // Return Smi 0 if the non-zero one was positive.
- // Slow case. We fall through here if we multiplied a negative number
- // with 0, because that would mean we should produce -0.
- __ bind(&slow);
- }
- HandleBinaryOpSlowCases(masm, &not_smi, lhs, rhs, Builtins::MUL);
- break;
- }
-
- case Token::DIV:
- case Token::MOD: {
- Label not_smi;
- if (ShouldGenerateSmiCode() && specialized_on_rhs_) {
- Label lhs_is_unsuitable;
- __ JumpIfNotSmi(lhs, &not_smi);
- if (IsPowerOf2(constant_rhs_)) {
- if (op_ == Token::MOD) {
- __ and_(rhs,
- lhs,
- Operand(0x80000000u | ((constant_rhs_ << kSmiTagSize) - 1)),
- SetCC);
- // We now have the answer, but if the input was negative we also
- // have the sign bit. Our work is done if the result is
- // positive or zero:
- if (!rhs.is(r0)) {
- __ mov(r0, rhs, LeaveCC, pl);
- }
- __ Ret(pl);
- // A mod of a negative left hand side must return a negative number.
- // Unfortunately if the answer is 0 then we must return -0. And we
- // already optimistically trashed rhs so we may need to restore it.
- __ eor(rhs, rhs, Operand(0x80000000u), SetCC);
- // Next two instructions are conditional on the answer being -0.
- __ mov(rhs, Operand(Smi::FromInt(constant_rhs_)), LeaveCC, eq);
- __ b(eq, &lhs_is_unsuitable);
- // We need to subtract the dividend. Eg. -3 % 4 == -3.
- __ sub(result, rhs, Operand(Smi::FromInt(constant_rhs_)));
- } else {
- ASSERT(op_ == Token::DIV);
- __ tst(lhs,
- Operand(0x80000000u | ((constant_rhs_ << kSmiTagSize) - 1)));
- __ b(ne, &lhs_is_unsuitable); // Go slow on negative or remainder.
- int shift = 0;
- int d = constant_rhs_;
- while ((d & 1) == 0) {
- d >>= 1;
- shift++;
- }
- __ mov(r0, Operand(lhs, LSR, shift));
- __ bic(r0, r0, Operand(kSmiTagMask));
- }
- } else {
- // Not a power of 2.
- __ tst(lhs, Operand(0x80000000u));
- __ b(ne, &lhs_is_unsuitable);
- // Find a fixed point reciprocal of the divisor so we can divide by
- // multiplying.
- double divisor = 1.0 / constant_rhs_;
- int shift = 32;
- double scale = 4294967296.0; // 1 << 32.
- uint32_t mul;
- // Maximise the precision of the fixed point reciprocal.
- while (true) {
- mul = static_cast<uint32_t>(scale * divisor);
- if (mul >= 0x7fffffff) break;
- scale *= 2.0;
- shift++;
- }
- mul++;
- Register scratch2 = smi_test_reg;
- smi_test_reg = no_reg;
- __ mov(scratch2, Operand(mul));
- __ umull(scratch, scratch2, scratch2, lhs);
- __ mov(scratch2, Operand(scratch2, LSR, shift - 31));
- // scratch2 is lhs / rhs. scratch2 is not Smi tagged.
- // rhs is still the known rhs. rhs is Smi tagged.
- // lhs is still the unkown lhs. lhs is Smi tagged.
- int required_scratch_shift = 0; // Including the Smi tag shift of 1.
- // scratch = scratch2 * rhs.
- MultiplyByKnownIntInStub(masm,
- scratch,
- scratch2,
- rhs,
- constant_rhs_,
- &required_scratch_shift);
- // scratch << required_scratch_shift is now the Smi tagged rhs *
- // (lhs / rhs) where / indicates integer division.
- if (op_ == Token::DIV) {
- __ cmp(lhs, Operand(scratch, LSL, required_scratch_shift));
- __ b(ne, &lhs_is_unsuitable); // There was a remainder.
- __ mov(result, Operand(scratch2, LSL, kSmiTagSize));
- } else {
- ASSERT(op_ == Token::MOD);
- __ sub(result, lhs, Operand(scratch, LSL, required_scratch_shift));
- }
- }
- __ Ret();
- __ bind(&lhs_is_unsuitable);
- } else if (op_ == Token::MOD &&
- runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS &&
- runtime_operands_type_ != BinaryOpIC::STRINGS) {
- // Do generate a bit of smi code for modulus even though the default for
- // modulus is not to do it, but as the ARM processor has no coprocessor
- // support for modulus checking for smis makes sense. We can handle
- // 1 to 25 times any power of 2. This covers over half the numbers from
- // 1 to 100 including all of the first 25. (Actually the constants < 10
- // are handled above by reciprocal multiplication. We only get here for
- // those cases if the right hand side is not a constant or for cases
- // like 192 which is 3*2^6 and ends up in the 3 case in the integer mod
- // stub.)
- Label slow;
- Label not_power_of_2;
- ASSERT(!ShouldGenerateSmiCode());
- STATIC_ASSERT(kSmiTag == 0); // Adjust code below.
- // Check for two positive smis.
- __ orr(smi_test_reg, lhs, Operand(rhs));
- __ tst(smi_test_reg, Operand(0x80000000u | kSmiTagMask));
- __ b(ne, &slow);
- // Check that rhs is a power of two and not zero.
- Register mask_bits = r3;
- __ sub(scratch, rhs, Operand(1), SetCC);
- __ b(mi, &slow);
- __ and_(mask_bits, rhs, Operand(scratch), SetCC);
- __ b(ne, &not_power_of_2);
- // Calculate power of two modulus.
- __ and_(result, lhs, Operand(scratch));
- __ Ret();
-
- __ bind(&not_power_of_2);
- __ eor(scratch, scratch, Operand(mask_bits));
- // At least two bits are set in the modulus. The high one(s) are in
- // mask_bits and the low one is scratch + 1.
- __ and_(mask_bits, scratch, Operand(lhs));
- Register shift_distance = scratch;
- scratch = no_reg;
-
- // The rhs consists of a power of 2 multiplied by some odd number.
- // The power-of-2 part we handle by putting the corresponding bits
- // from the lhs in the mask_bits register, and the power in the
- // shift_distance register. Shift distance is never 0 due to Smi
- // tagging.
- __ CountLeadingZeros(r4, shift_distance, shift_distance);
- __ rsb(shift_distance, r4, Operand(32));
-
- // Now we need to find out what the odd number is. The last bit is
- // always 1.
- Register odd_number = r4;
- __ mov(odd_number, Operand(rhs, LSR, shift_distance));
- __ cmp(odd_number, Operand(25));
- __ b(gt, &slow);
-
- IntegerModStub stub(
- result, shift_distance, odd_number, mask_bits, lhs, r5);
- __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); // Tail call.
-
- __ bind(&slow);
- }
- HandleBinaryOpSlowCases(
- masm,
- &not_smi,
- lhs,
- rhs,
- op_ == Token::MOD ? Builtins::MOD : Builtins::DIV);
- break;
- }
-
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHR:
- case Token::SHL: {
- Label slow;
- STATIC_ASSERT(kSmiTag == 0); // adjust code below
- __ tst(smi_test_reg, Operand(kSmiTagMask));
- __ b(ne, &slow);
- Register scratch2 = smi_test_reg;
- smi_test_reg = no_reg;
- switch (op_) {
- case Token::BIT_OR: __ orr(result, rhs, Operand(lhs)); break;
- case Token::BIT_AND: __ and_(result, rhs, Operand(lhs)); break;
- case Token::BIT_XOR: __ eor(result, rhs, Operand(lhs)); break;
- case Token::SAR:
- // Remove tags from right operand.
- __ GetLeastBitsFromSmi(scratch2, rhs, 5);
- __ mov(result, Operand(lhs, ASR, scratch2));
- // Smi tag result.
- __ bic(result, result, Operand(kSmiTagMask));
- break;
- case Token::SHR:
- // Remove tags from operands. We can't do this on a 31 bit number
- // because then the 0s get shifted into bit 30 instead of bit 31.
- __ mov(scratch, Operand(lhs, ASR, kSmiTagSize)); // x
- __ GetLeastBitsFromSmi(scratch2, rhs, 5);
- __ mov(scratch, Operand(scratch, LSR, scratch2));
- // Unsigned shift is not allowed to produce a negative number, so
- // check the sign bit and the sign bit after Smi tagging.
- __ tst(scratch, Operand(0xc0000000));
- __ b(ne, &slow);
- // Smi tag result.
- __ mov(result, Operand(scratch, LSL, kSmiTagSize));
- break;
- case Token::SHL:
- // Remove tags from operands.
- __ mov(scratch, Operand(lhs, ASR, kSmiTagSize)); // x
- __ GetLeastBitsFromSmi(scratch2, rhs, 5);
- __ mov(scratch, Operand(scratch, LSL, scratch2));
- // Check that the signed result fits in a Smi.
- __ add(scratch2, scratch, Operand(0x40000000), SetCC);
- __ b(mi, &slow);
- __ mov(result, Operand(scratch, LSL, kSmiTagSize));
- break;
- default: UNREACHABLE();
- }
- __ Ret();
- __ bind(&slow);
- HandleNonSmiBitwiseOp(masm, lhs, rhs);
- break;
- }
-
- default: UNREACHABLE();
- }
- // This code should be unreachable.
- __ stop("Unreachable");
-
- // Generate an unreachable reference to the DEFAULT stub so that it can be
- // found at the end of this stub when clearing ICs at GC.
- // TODO(kaznacheev): Check performance impact and get rid of this.
- if (runtime_operands_type_ != BinaryOpIC::DEFAULT) {
- GenericBinaryOpStub uninit(MinorKey(), BinaryOpIC::DEFAULT);
- __ CallStub(&uninit);
- }
-}
-
-
-void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
- Label get_result;
-
- __ Push(r1, r0);
-
- __ mov(r2, Operand(Smi::FromInt(MinorKey())));
- __ mov(r1, Operand(Smi::FromInt(op_)));
- __ mov(r0, Operand(Smi::FromInt(runtime_operands_type_)));
- __ Push(r2, r1, r0);
-
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kBinaryOp_Patch), masm->isolate()),
- 5,
- 1);
-}
-
-
-Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
- GenericBinaryOpStub stub(key, type_info);
- return stub.GetCode();
-}
-
-
-Handle<Code> GetTypeRecordingBinaryOpStub(int key,
- TRBinaryOpIC::TypeInfo type_info,
- TRBinaryOpIC::TypeInfo result_type_info) {
- TypeRecordingBinaryOpStub stub(key, type_info, result_type_info);
- return stub.GetCode();
-}
-
-
-void TypeRecordingBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
- Label get_result;
-
- __ Push(r1, r0);
-
- __ mov(r2, Operand(Smi::FromInt(MinorKey())));
- __ mov(r1, Operand(Smi::FromInt(op_)));
- __ mov(r0, Operand(Smi::FromInt(operands_type_)));
- __ Push(r2, r1, r0);
-
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kTypeRecordingBinaryOp_Patch),
- masm->isolate()),
- 5,
- 1);
-}
-
-
-void TypeRecordingBinaryOpStub::GenerateTypeTransitionWithSavedArgs(
- MacroAssembler* masm) {
- UNIMPLEMENTED();
-}
-
-
-void TypeRecordingBinaryOpStub::Generate(MacroAssembler* masm) {
- switch (operands_type_) {
- case TRBinaryOpIC::UNINITIALIZED:
- GenerateTypeTransition(masm);
- break;
- case TRBinaryOpIC::SMI:
- GenerateSmiStub(masm);
- break;
- case TRBinaryOpIC::INT32:
- GenerateInt32Stub(masm);
- break;
- case TRBinaryOpIC::HEAP_NUMBER:
- GenerateHeapNumberStub(masm);
- break;
- case TRBinaryOpIC::ODDBALL:
- GenerateOddballStub(masm);
- break;
- case TRBinaryOpIC::STRING:
- GenerateStringStub(masm);
- break;
- case TRBinaryOpIC::GENERIC:
- GenerateGeneric(masm);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-const char* TypeRecordingBinaryOpStub::GetName() {
- if (name_ != NULL) return name_;
- const int kMaxNameLength = 100;
- name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
- kMaxNameLength);
- if (name_ == NULL) return "OOM";
- const char* op_name = Token::Name(op_);
- const char* overwrite_name;
- switch (mode_) {
- case NO_OVERWRITE: overwrite_name = "Alloc"; break;
- case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
- case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
- default: overwrite_name = "UnknownOverwrite"; break;
- }
-
- OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
- "TypeRecordingBinaryOpStub_%s_%s_%s",
- op_name,
- overwrite_name,
- TRBinaryOpIC::GetName(operands_type_));
- return name_;
-}
-
-
-void TypeRecordingBinaryOpStub::GenerateSmiSmiOperation(
- MacroAssembler* masm) {
- Register left = r1;
- Register right = r0;
- Register scratch1 = r7;
- Register scratch2 = r9;
-
- ASSERT(right.is(r0));
- STATIC_ASSERT(kSmiTag == 0);
-
- Label not_smi_result;
- switch (op_) {
- case Token::ADD:
- __ add(right, left, Operand(right), SetCC); // Add optimistically.
- __ Ret(vc);
- __ sub(right, right, Operand(left)); // Revert optimistic add.
- break;
- case Token::SUB:
- __ sub(right, left, Operand(right), SetCC); // Subtract optimistically.
- __ Ret(vc);
- __ sub(right, left, Operand(right)); // Revert optimistic subtract.
- break;
- case Token::MUL:
- // Remove tag from one of the operands. This way the multiplication result
- // will be a smi if it fits the smi range.
- __ SmiUntag(ip, right);
- // Do multiplication
- // scratch1 = lower 32 bits of ip * left.
- // scratch2 = higher 32 bits of ip * left.
- __ smull(scratch1, scratch2, left, ip);
- // Check for overflowing the smi range - no overflow if higher 33 bits of
- // the result are identical.
- __ mov(ip, Operand(scratch1, ASR, 31));
- __ cmp(ip, Operand(scratch2));
- __ b(ne, &not_smi_result);
- // Go slow on zero result to handle -0.
- __ tst(scratch1, Operand(scratch1));
- __ mov(right, Operand(scratch1), LeaveCC, ne);
- __ Ret(ne);
- // We need -0 if we were multiplying a negative number with 0 to get 0.
- // We know one of them was zero.
- __ add(scratch2, right, Operand(left), SetCC);
- __ mov(right, Operand(Smi::FromInt(0)), LeaveCC, pl);
- __ Ret(pl); // Return smi 0 if the non-zero one was positive.
- // We fall through here if we multiplied a negative number with 0, because
- // that would mean we should produce -0.
- break;
- case Token::DIV:
- // Check for power of two on the right hand side.
- __ JumpIfNotPowerOfTwoOrZero(right, scratch1, &not_smi_result);
- // Check for positive and no remainder (scratch1 contains right - 1).
- __ orr(scratch2, scratch1, Operand(0x80000000u));
- __ tst(left, scratch2);
- __ b(ne, &not_smi_result);
-
- // Perform division by shifting.
- __ CountLeadingZeros(scratch1, scratch1, scratch2);
- __ rsb(scratch1, scratch1, Operand(31));
- __ mov(right, Operand(left, LSR, scratch1));
- __ Ret();
- break;
- case Token::MOD:
- // Check for two positive smis.
- __ orr(scratch1, left, Operand(right));
- __ tst(scratch1, Operand(0x80000000u | kSmiTagMask));
- __ b(ne, &not_smi_result);
-
- // Check for power of two on the right hand side.
- __ JumpIfNotPowerOfTwoOrZero(right, scratch1, &not_smi_result);
-
- // Perform modulus by masking.
- __ and_(right, left, Operand(scratch1));
- __ Ret();
- break;
- case Token::BIT_OR:
- __ orr(right, left, Operand(right));
- __ Ret();
- break;
- case Token::BIT_AND:
- __ and_(right, left, Operand(right));
- __ Ret();
- break;
- case Token::BIT_XOR:
- __ eor(right, left, Operand(right));
- __ Ret();
- break;
- case Token::SAR:
- // Remove tags from right operand.
- __ GetLeastBitsFromSmi(scratch1, right, 5);
- __ mov(right, Operand(left, ASR, scratch1));
- // Smi tag result.
- __ bic(right, right, Operand(kSmiTagMask));
- __ Ret();
- break;
- case Token::SHR:
- // Remove tags from operands. We can't do this on a 31 bit number
- // because then the 0s get shifted into bit 30 instead of bit 31.
- __ SmiUntag(scratch1, left);
- __ GetLeastBitsFromSmi(scratch2, right, 5);
- __ mov(scratch1, Operand(scratch1, LSR, scratch2));
- // Unsigned shift is not allowed to produce a negative number, so
- // check the sign bit and the sign bit after Smi tagging.
- __ tst(scratch1, Operand(0xc0000000));
- __ b(ne, &not_smi_result);
- // Smi tag result.
- __ SmiTag(right, scratch1);
- __ Ret();
- break;
- case Token::SHL:
- // Remove tags from operands.
- __ SmiUntag(scratch1, left);
- __ GetLeastBitsFromSmi(scratch2, right, 5);
- __ mov(scratch1, Operand(scratch1, LSL, scratch2));
- // Check that the signed result fits in a Smi.
- __ add(scratch2, scratch1, Operand(0x40000000), SetCC);
- __ b(mi, &not_smi_result);
- __ SmiTag(right, scratch1);
- __ Ret();
- break;
- default:
- UNREACHABLE();
- }
- __ bind(&not_smi_result);
-}
-
-
-void TypeRecordingBinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
- bool smi_operands,
- Label* not_numbers,
- Label* gc_required) {
- Register left = r1;
- Register right = r0;
- Register scratch1 = r7;
- Register scratch2 = r9;
- Register scratch3 = r4;
-
- ASSERT(smi_operands || (not_numbers != NULL));
- if (smi_operands && FLAG_debug_code) {
- __ AbortIfNotSmi(left);
- __ AbortIfNotSmi(right);
- }
-
- Register heap_number_map = r6;
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- case Token::MOD: {
- // Load left and right operands into d6 and d7 or r0/r1 and r2/r3
- // depending on whether VFP3 is available or not.
- FloatingPointHelper::Destination destination =
- CpuFeatures::IsSupported(VFP3) &&
- op_ != Token::MOD ?
- FloatingPointHelper::kVFPRegisters :
- FloatingPointHelper::kCoreRegisters;
-
- // Allocate new heap number for result.
- Register result = r5;
- GenerateHeapResultAllocation(
- masm, result, heap_number_map, scratch1, scratch2, gc_required);
-
- // Load the operands.
- if (smi_operands) {
- FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2);
- } else {
- FloatingPointHelper::LoadOperands(masm,
- destination,
- heap_number_map,
- scratch1,
- scratch2,
- not_numbers);
- }
-
- // Calculate the result.
- if (destination == FloatingPointHelper::kVFPRegisters) {
- // Using VFP registers:
- // d6: Left value
- // d7: Right value
- CpuFeatures::Scope scope(VFP3);
- switch (op_) {
- case Token::ADD:
- __ vadd(d5, d6, d7);
- break;
- case Token::SUB:
- __ vsub(d5, d6, d7);
- break;
- case Token::MUL:
- __ vmul(d5, d6, d7);
- break;
- case Token::DIV:
- __ vdiv(d5, d6, d7);
- break;
- default:
- UNREACHABLE();
- }
-
- __ sub(r0, result, Operand(kHeapObjectTag));
- __ vstr(d5, r0, HeapNumber::kValueOffset);
- __ add(r0, r0, Operand(kHeapObjectTag));
- __ Ret();
- } else {
- // Call the C function to handle the double operation.
- FloatingPointHelper::CallCCodeForDoubleOperation(masm,
- op_,
- result,
- scratch1);
- }
- break;
- }
- case Token::BIT_OR:
- case Token::BIT_XOR:
- case Token::BIT_AND:
- case Token::SAR:
- case Token::SHR:
- case Token::SHL: {
- if (smi_operands) {
- __ SmiUntag(r3, left);
- __ SmiUntag(r2, right);
- } else {
- // Convert operands to 32-bit integers. Right in r2 and left in r3.
- FloatingPointHelper::ConvertNumberToInt32(masm,
- left,
- r3,
- heap_number_map,
- scratch1,
- scratch2,
- scratch3,
- d0,
- not_numbers);
- FloatingPointHelper::ConvertNumberToInt32(masm,
- right,
- r2,
- heap_number_map,
- scratch1,
- scratch2,
- scratch3,
- d0,
- not_numbers);
- }
-
- Label result_not_a_smi;
- switch (op_) {
- case Token::BIT_OR:
- __ orr(r2, r3, Operand(r2));
- break;
- case Token::BIT_XOR:
- __ eor(r2, r3, Operand(r2));
- break;
- case Token::BIT_AND:
- __ and_(r2, r3, Operand(r2));
- break;
- case Token::SAR:
- // Use only the 5 least significant bits of the shift count.
- __ GetLeastBitsFromInt32(r2, r2, 5);
- __ mov(r2, Operand(r3, ASR, r2));
- break;
- case Token::SHR:
- // Use only the 5 least significant bits of the shift count.
- __ GetLeastBitsFromInt32(r2, r2, 5);
- __ mov(r2, Operand(r3, LSR, r2), SetCC);
- // SHR is special because it is required to produce a positive answer.
- // The code below for writing into heap numbers isn't capable of
- // writing the register as an unsigned int so we go to slow case if we
- // hit this case.
- if (CpuFeatures::IsSupported(VFP3)) {
- __ b(mi, &result_not_a_smi);
- } else {
- __ b(mi, not_numbers);
- }
- break;
- case Token::SHL:
- // Use only the 5 least significant bits of the shift count.
- __ GetLeastBitsFromInt32(r2, r2, 5);
- __ mov(r2, Operand(r3, LSL, r2));
- break;
- default:
- UNREACHABLE();
- }
-
- // Check that the *signed* result fits in a smi.
- __ add(r3, r2, Operand(0x40000000), SetCC);
- __ b(mi, &result_not_a_smi);
- __ SmiTag(r0, r2);
- __ Ret();
-
- // Allocate new heap number for result.
- __ bind(&result_not_a_smi);
- Register result = r5;
- if (smi_operands) {
- __ AllocateHeapNumber(
- result, scratch1, scratch2, heap_number_map, gc_required);
- } else {
- GenerateHeapResultAllocation(
- masm, result, heap_number_map, scratch1, scratch2, gc_required);
- }
-
- // r2: Answer as signed int32.
- // r5: Heap number to write answer into.
-
- // Nothing can go wrong now, so move the heap number to r0, which is the
- // result.
- __ mov(r0, Operand(r5));
-
- if (CpuFeatures::IsSupported(VFP3)) {
- // Convert the int32 in r2 to the heap number in r0. r3 is corrupted. As
- // mentioned above SHR needs to always produce a positive result.
- CpuFeatures::Scope scope(VFP3);
- __ vmov(s0, r2);
- if (op_ == Token::SHR) {
- __ vcvt_f64_u32(d0, s0);
- } else {
- __ vcvt_f64_s32(d0, s0);
- }
- __ sub(r3, r0, Operand(kHeapObjectTag));
- __ vstr(d0, r3, HeapNumber::kValueOffset);
- __ Ret();
- } else {
- // Tail call that writes the int32 in r2 to the heap number in r0, using
- // r3 as scratch. r0 is preserved and returned.
- WriteInt32ToHeapNumberStub stub(r2, r0, r3);
- __ TailCallStub(&stub);
- }
- break;
- }
- default:
- UNREACHABLE();
- }
-}
-
-
-// Generate the smi code. If the operation on smis are successful this return is
-// generated. If the result is not a smi and heap number allocation is not
-// requested the code falls through. If number allocation is requested but a
-// heap number cannot be allocated the code jumps to the lable gc_required.
-void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm,
- Label* gc_required,
- SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
- Label not_smis;
-
- Register left = r1;
- Register right = r0;
- Register scratch1 = r7;
- Register scratch2 = r9;
-
- // Perform combined smi check on both operands.
- __ orr(scratch1, left, Operand(right));
- STATIC_ASSERT(kSmiTag == 0);
- __ tst(scratch1, Operand(kSmiTagMask));
- __ b(ne, &not_smis);
-
- // If the smi-smi operation results in a smi return is generated.
- GenerateSmiSmiOperation(masm);
-
- // If heap number results are possible generate the result in an allocated
- // heap number.
- if (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) {
- GenerateFPOperation(masm, true, NULL, gc_required);
- }
- __ bind(&not_smis);
-}
-
-
-void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
- Label not_smis, call_runtime;
-
- if (result_type_ == TRBinaryOpIC::UNINITIALIZED ||
- result_type_ == TRBinaryOpIC::SMI) {
- // Only allow smi results.
- GenerateSmiCode(masm, NULL, NO_HEAPNUMBER_RESULTS);
- } else {
- // Allow heap number result and don't make a transition if a heap number
- // cannot be allocated.
- GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
- }
-
- // Code falls through if the result is not returned as either a smi or heap
- // number.
- GenerateTypeTransition(masm);
-
- __ bind(&call_runtime);
- GenerateCallRuntime(masm);
-}
-
-
-void TypeRecordingBinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
- ASSERT(operands_type_ == TRBinaryOpIC::STRING);
- ASSERT(op_ == Token::ADD);
- // Try to add arguments as strings, otherwise, transition to the generic
- // TRBinaryOpIC type.
- GenerateAddStrings(masm);
- GenerateTypeTransition(masm);
-}
-
-
-void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
- ASSERT(operands_type_ == TRBinaryOpIC::INT32);
-
- Register left = r1;
- Register right = r0;
- Register scratch1 = r7;
- Register scratch2 = r9;
- DwVfpRegister double_scratch = d0;
- SwVfpRegister single_scratch = s3;
-
- Register heap_number_result = no_reg;
- Register heap_number_map = r6;
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-
- Label call_runtime;
- // Labels for type transition, used for wrong input or output types.
- // Both label are currently actually bound to the same position. We use two
- // different label to differentiate the cause leading to type transition.
- Label transition;
-
- // Smi-smi fast case.
- Label skip;
- __ orr(scratch1, left, right);
- __ JumpIfNotSmi(scratch1, &skip);
- GenerateSmiSmiOperation(masm);
- // Fall through if the result is not a smi.
- __ bind(&skip);
-
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- case Token::MOD: {
- // Load both operands and check that they are 32-bit integer.
- // Jump to type transition if they are not. The registers r0 and r1 (right
- // and left) are preserved for the runtime call.
- FloatingPointHelper::Destination destination =
- CpuFeatures::IsSupported(VFP3) &&
- op_ != Token::MOD ?
- FloatingPointHelper::kVFPRegisters :
- FloatingPointHelper::kCoreRegisters;
-
- FloatingPointHelper::LoadNumberAsInt32Double(masm,
- right,
- destination,
- d7,
- r2,
- r3,
- heap_number_map,
- scratch1,
- scratch2,
- s0,
- &transition);
- FloatingPointHelper::LoadNumberAsInt32Double(masm,
- left,
- destination,
- d6,
- r4,
- r5,
- heap_number_map,
- scratch1,
- scratch2,
- s0,
- &transition);
-
- if (destination == FloatingPointHelper::kVFPRegisters) {
- CpuFeatures::Scope scope(VFP3);
- Label return_heap_number;
- switch (op_) {
- case Token::ADD:
- __ vadd(d5, d6, d7);
- break;
- case Token::SUB:
- __ vsub(d5, d6, d7);
- break;
- case Token::MUL:
- __ vmul(d5, d6, d7);
- break;
- case Token::DIV:
- __ vdiv(d5, d6, d7);
- break;
- default:
- UNREACHABLE();
- }
-
- if (op_ != Token::DIV) {
- // These operations produce an integer result.
- // Try to return a smi if we can.
- // Otherwise return a heap number if allowed, or jump to type
- // transition.
-
- __ EmitVFPTruncate(kRoundToZero,
- single_scratch,
- d5,
- scratch1,
- scratch2);
-
- if (result_type_ <= TRBinaryOpIC::INT32) {
- // If the ne condition is set, result does
- // not fit in a 32-bit integer.
- __ b(ne, &transition);
- }
-
- // Check if the result fits in a smi.
- __ vmov(scratch1, single_scratch);
- __ add(scratch2, scratch1, Operand(0x40000000), SetCC);
- // If not try to return a heap number.
- __ b(mi, &return_heap_number);
- // Check for minus zero. Return heap number for minus zero.
- Label not_zero;
- __ cmp(scratch1, Operand(0));
- __ b(ne, &not_zero);
- __ vmov(scratch2, d5.high());
- __ tst(scratch2, Operand(HeapNumber::kSignMask));
- __ b(ne, &return_heap_number);
- __ bind(&not_zero);
-
- // Tag the result and return.
- __ SmiTag(r0, scratch1);
- __ Ret();
- } else {
- // DIV just falls through to allocating a heap number.
- }
-
- if (result_type_ >= (op_ == Token::DIV) ? TRBinaryOpIC::HEAP_NUMBER
- : TRBinaryOpIC::INT32) {
- __ bind(&return_heap_number);
- // We are using vfp registers so r5 is available.
- heap_number_result = r5;
- GenerateHeapResultAllocation(masm,
- heap_number_result,
- heap_number_map,
- scratch1,
- scratch2,
- &call_runtime);
- __ sub(r0, heap_number_result, Operand(kHeapObjectTag));
- __ vstr(d5, r0, HeapNumber::kValueOffset);
- __ mov(r0, heap_number_result);
- __ Ret();
- }
-
- // A DIV operation expecting an integer result falls through
- // to type transition.
-
- } else {
- // We preserved r0 and r1 to be able to call runtime.
- // Save the left value on the stack.
- __ Push(r5, r4);
-
- // Allocate a heap number to store the result.
- heap_number_result = r5;
- GenerateHeapResultAllocation(masm,
- heap_number_result,
- heap_number_map,
- scratch1,
- scratch2,
- &call_runtime);
-
- // Load the left value from the value saved on the stack.
- __ Pop(r1, r0);
-
- // Call the C function to handle the double operation.
- FloatingPointHelper::CallCCodeForDoubleOperation(
- masm, op_, heap_number_result, scratch1);
- }
-
- break;
- }
-
- case Token::BIT_OR:
- case Token::BIT_XOR:
- case Token::BIT_AND:
- case Token::SAR:
- case Token::SHR:
- case Token::SHL: {
- Label return_heap_number;
- Register scratch3 = r5;
- // Convert operands to 32-bit integers. Right in r2 and left in r3. The
- // registers r0 and r1 (right and left) are preserved for the runtime
- // call.
- FloatingPointHelper::LoadNumberAsInt32(masm,
- left,
- r3,
- heap_number_map,
- scratch1,
- scratch2,
- scratch3,
- d0,
- &transition);
- FloatingPointHelper::LoadNumberAsInt32(masm,
- right,
- r2,
- heap_number_map,
- scratch1,
- scratch2,
- scratch3,
- d0,
- &transition);
-
- // The ECMA-262 standard specifies that, for shift operations, only the
- // 5 least significant bits of the shift value should be used.
- switch (op_) {
- case Token::BIT_OR:
- __ orr(r2, r3, Operand(r2));
- break;
- case Token::BIT_XOR:
- __ eor(r2, r3, Operand(r2));
- break;
- case Token::BIT_AND:
- __ and_(r2, r3, Operand(r2));
- break;
- case Token::SAR:
- __ and_(r2, r2, Operand(0x1f));
- __ mov(r2, Operand(r3, ASR, r2));
- break;
- case Token::SHR:
- __ and_(r2, r2, Operand(0x1f));
- __ mov(r2, Operand(r3, LSR, r2), SetCC);
- // SHR is special because it is required to produce a positive answer.
- // We only get a negative result if the shift value (r2) is 0.
- // This result cannot be respresented as a signed 32-bit integer, try
- // to return a heap number if we can.
- // The non vfp3 code does not support this special case, so jump to
- // runtime if we don't support it.
- if (CpuFeatures::IsSupported(VFP3)) {
- __ b(mi,
- (result_type_ <= TRBinaryOpIC::INT32) ? &transition
- : &return_heap_number);
- } else {
- __ b(mi, (result_type_ <= TRBinaryOpIC::INT32) ? &transition
- : &call_runtime);
- }
- break;
- case Token::SHL:
- __ and_(r2, r2, Operand(0x1f));
- __ mov(r2, Operand(r3, LSL, r2));
- break;
- default:
- UNREACHABLE();
- }
-
- // Check if the result fits in a smi.
- __ add(scratch1, r2, Operand(0x40000000), SetCC);
- // If not try to return a heap number. (We know the result is an int32.)
- __ b(mi, &return_heap_number);
- // Tag the result and return.
- __ SmiTag(r0, r2);
- __ Ret();
-
- __ bind(&return_heap_number);
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
- heap_number_result = r5;
- GenerateHeapResultAllocation(masm,
- heap_number_result,
- heap_number_map,
- scratch1,
- scratch2,
- &call_runtime);
-
- if (op_ != Token::SHR) {
- // Convert the result to a floating point value.
- __ vmov(double_scratch.low(), r2);
- __ vcvt_f64_s32(double_scratch, double_scratch.low());
- } else {
- // The result must be interpreted as an unsigned 32-bit integer.
- __ vmov(double_scratch.low(), r2);
- __ vcvt_f64_u32(double_scratch, double_scratch.low());
- }
-
- // Store the result.
- __ sub(r0, heap_number_result, Operand(kHeapObjectTag));
- __ vstr(double_scratch, r0, HeapNumber::kValueOffset);
- __ mov(r0, heap_number_result);
- __ Ret();
- } else {
- // Tail call that writes the int32 in r2 to the heap number in r0, using
- // r3 as scratch. r0 is preserved and returned.
- WriteInt32ToHeapNumberStub stub(r2, r0, r3);
- __ TailCallStub(&stub);
- }
-
- break;
- }
-
- default:
- UNREACHABLE();
- }
-
- if (transition.is_linked()) {
- __ bind(&transition);
- GenerateTypeTransition(masm);
- }
-
- __ bind(&call_runtime);
- GenerateCallRuntime(masm);
-}
-
-
-void TypeRecordingBinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
- Label call_runtime;
-
- if (op_ == Token::ADD) {
- // Handle string addition here, because it is the only operation
- // that does not do a ToNumber conversion on the operands.
- GenerateAddStrings(masm);
- }
-
- // Convert oddball arguments to numbers.
- Label check, done;
- __ CompareRoot(r1, Heap::kUndefinedValueRootIndex);
- __ b(ne, &check);
- if (Token::IsBitOp(op_)) {
- __ mov(r1, Operand(Smi::FromInt(0)));
- } else {
- __ LoadRoot(r1, Heap::kNanValueRootIndex);
- }
- __ jmp(&done);
- __ bind(&check);
- __ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
- __ b(ne, &done);
- if (Token::IsBitOp(op_)) {
- __ mov(r0, Operand(Smi::FromInt(0)));
- } else {
- __ LoadRoot(r0, Heap::kNanValueRootIndex);
- }
- __ bind(&done);
-
- GenerateHeapNumberStub(masm);
-}
-
-
-void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
- Label call_runtime;
- GenerateFPOperation(masm, false, &call_runtime, &call_runtime);
-
- __ bind(&call_runtime);
- GenerateCallRuntime(masm);
-}
-
-
-void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
- Label call_runtime, call_string_add_or_runtime;
-
- GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
-
- GenerateFPOperation(masm, false, &call_string_add_or_runtime, &call_runtime);
-
- __ bind(&call_string_add_or_runtime);
- if (op_ == Token::ADD) {
- GenerateAddStrings(masm);
- }
-
- __ bind(&call_runtime);
- GenerateCallRuntime(masm);
-}
-
-
-void TypeRecordingBinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
- ASSERT(op_ == Token::ADD);
- Label left_not_string, call_runtime;
-
- Register left = r1;
- Register right = r0;
-
- // Check if left argument is a string.
- __ JumpIfSmi(left, &left_not_string);
- __ CompareObjectType(left, r2, r2, FIRST_NONSTRING_TYPE);
- __ b(ge, &left_not_string);
-
- StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
- GenerateRegisterArgsPush(masm);
- __ TailCallStub(&string_add_left_stub);
-
- // Left operand is not a string, test right.
- __ bind(&left_not_string);
- __ JumpIfSmi(right, &call_runtime);
- __ CompareObjectType(right, r2, r2, FIRST_NONSTRING_TYPE);
- __ b(ge, &call_runtime);
-
- StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
- GenerateRegisterArgsPush(masm);
- __ TailCallStub(&string_add_right_stub);
-
- // At least one argument is not a string.
- __ bind(&call_runtime);
-}
-
-
-void TypeRecordingBinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) {
- GenerateRegisterArgsPush(masm);
- switch (op_) {
- case Token::ADD:
- __ InvokeBuiltin(Builtins::ADD, JUMP_JS);
- break;
- case Token::SUB:
- __ InvokeBuiltin(Builtins::SUB, JUMP_JS);
- break;
- case Token::MUL:
- __ InvokeBuiltin(Builtins::MUL, JUMP_JS);
- break;
- case Token::DIV:
- __ InvokeBuiltin(Builtins::DIV, JUMP_JS);
- break;
- case Token::MOD:
- __ InvokeBuiltin(Builtins::MOD, JUMP_JS);
- break;
- case Token::BIT_OR:
- __ InvokeBuiltin(Builtins::BIT_OR, JUMP_JS);
- break;
- case Token::BIT_AND:
- __ InvokeBuiltin(Builtins::BIT_AND, JUMP_JS);
- break;
- case Token::BIT_XOR:
- __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_JS);
- break;
- case Token::SAR:
- __ InvokeBuiltin(Builtins::SAR, JUMP_JS);
- break;
- case Token::SHR:
- __ InvokeBuiltin(Builtins::SHR, JUMP_JS);
- break;
- case Token::SHL:
- __ InvokeBuiltin(Builtins::SHL, JUMP_JS);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void TypeRecordingBinaryOpStub::GenerateHeapResultAllocation(
- MacroAssembler* masm,
- Register result,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
-
- // Code below will scratch result if allocation fails. To keep both arguments
- // intact for the runtime call result cannot be one of these.
- ASSERT(!result.is(r0) && !result.is(r1));
-
- if (mode_ == OVERWRITE_LEFT || mode_ == OVERWRITE_RIGHT) {
- Label skip_allocation, allocated;
- Register overwritable_operand = mode_ == OVERWRITE_LEFT ? r1 : r0;
- // If the overwritable operand is already an object, we skip the
- // allocation of a heap number.
- __ JumpIfNotSmi(overwritable_operand, &skip_allocation);
- // Allocate a heap number for the result.
- __ AllocateHeapNumber(
- result, scratch1, scratch2, heap_number_map, gc_required);
- __ b(&allocated);
- __ bind(&skip_allocation);
- // Use object holding the overwritable operand for result.
- __ mov(result, Operand(overwritable_operand));
- __ bind(&allocated);
- } else {
- ASSERT(mode_ == NO_OVERWRITE);
- __ AllocateHeapNumber(
- result, scratch1, scratch2, heap_number_map, gc_required);
- }
-}
-
-
-void TypeRecordingBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
- __ Push(r1, r0);
-}
-
-
-void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
- // Untagged case: double input in d2, double result goes
- // into d2.
- // Tagged case: tagged input on top of stack and in r0,
- // tagged result (heap number) goes into r0.
-
- Label input_not_smi;
- Label loaded;
- Label calculate;
- Label invalid_cache;
- const Register scratch0 = r9;
- const Register scratch1 = r7;
- const Register cache_entry = r0;
- const bool tagged = (argument_type_ == TAGGED);
-
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
- if (tagged) {
- // Argument is a number and is on stack and in r0.
- // Load argument and check if it is a smi.
- __ JumpIfNotSmi(r0, &input_not_smi);
-
- // Input is a smi. Convert to double and load the low and high words
- // of the double into r2, r3.
- __ IntegerToDoubleConversionWithVFP3(r0, r3, r2);
- __ b(&loaded);
-
- __ bind(&input_not_smi);
- // Check if input is a HeapNumber.
- __ CheckMap(r0,
- r1,
- Heap::kHeapNumberMapRootIndex,
- &calculate,
- true);
- // Input is a HeapNumber. Load it to a double register and store the
- // low and high words into r2, r3.
- __ vldr(d0, FieldMemOperand(r0, HeapNumber::kValueOffset));
- __ vmov(r2, r3, d0);
- } else {
- // Input is untagged double in d2. Output goes to d2.
- __ vmov(r2, r3, d2);
- }
- __ bind(&loaded);
- // r2 = low 32 bits of double value
- // r3 = high 32 bits of double value
- // Compute hash (the shifts are arithmetic):
- // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
- __ eor(r1, r2, Operand(r3));
- __ eor(r1, r1, Operand(r1, ASR, 16));
- __ eor(r1, r1, Operand(r1, ASR, 8));
- ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize));
- __ And(r1, r1, Operand(TranscendentalCache::SubCache::kCacheSize - 1));
-
- // r2 = low 32 bits of double value.
- // r3 = high 32 bits of double value.
- // r1 = TranscendentalCache::hash(double value).
- Isolate* isolate = masm->isolate();
- ExternalReference cache_array =
- ExternalReference::transcendental_cache_array_address(isolate);
- __ mov(cache_entry, Operand(cache_array));
- // cache_entry points to cache array.
- int cache_array_index
- = type_ * sizeof(isolate->transcendental_cache()->caches_[0]);
- __ ldr(cache_entry, MemOperand(cache_entry, cache_array_index));
- // r0 points to the cache for the type type_.
- // If NULL, the cache hasn't been initialized yet, so go through runtime.
- __ cmp(cache_entry, Operand(0, RelocInfo::NONE));
- __ b(eq, &invalid_cache);
-
-#ifdef DEBUG
- // Check that the layout of cache elements match expectations.
- { TranscendentalCache::SubCache::Element test_elem[2];
- char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
- char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
- char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
- char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
- char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
- CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer.
- CHECK_EQ(0, elem_in0 - elem_start);
- CHECK_EQ(kIntSize, elem_in1 - elem_start);
- CHECK_EQ(2 * kIntSize, elem_out - elem_start);
- }
-#endif
-
- // Find the address of the r1'st entry in the cache, i.e., &r0[r1*12].
- __ add(r1, r1, Operand(r1, LSL, 1));
- __ add(cache_entry, cache_entry, Operand(r1, LSL, 2));
- // Check if cache matches: Double value is stored in uint32_t[2] array.
- __ ldm(ia, cache_entry, r4.bit() | r5.bit() | r6.bit());
- __ cmp(r2, r4);
- __ b(ne, &calculate);
- __ cmp(r3, r5);
- __ b(ne, &calculate);
- // Cache hit. Load result, cleanup and return.
- if (tagged) {
- // Pop input value from stack and load result into r0.
- __ pop();
- __ mov(r0, Operand(r6));
- } else {
- // Load result into d2.
- __ vldr(d2, FieldMemOperand(r6, HeapNumber::kValueOffset));
- }
- __ Ret();
- } // if (CpuFeatures::IsSupported(VFP3))
-
- __ bind(&calculate);
- if (tagged) {
- __ bind(&invalid_cache);
- ExternalReference runtime_function =
- ExternalReference(RuntimeFunction(), masm->isolate());
- __ TailCallExternalReference(runtime_function, 1, 1);
- } else {
- if (!CpuFeatures::IsSupported(VFP3)) UNREACHABLE();
- CpuFeatures::Scope scope(VFP3);
-
- Label no_update;
- Label skip_cache;
- const Register heap_number_map = r5;
-
- // Call C function to calculate the result and update the cache.
- // Register r0 holds precalculated cache entry address; preserve
- // it on the stack and pop it into register cache_entry after the
- // call.
- __ push(cache_entry);
- GenerateCallCFunction(masm, scratch0);
- __ GetCFunctionDoubleResult(d2);
-
- // Try to update the cache. If we cannot allocate a
- // heap number, we return the result without updating.
- __ pop(cache_entry);
- __ LoadRoot(r5, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r6, scratch0, scratch1, r5, &no_update);
- __ vstr(d2, FieldMemOperand(r6, HeapNumber::kValueOffset));
- __ stm(ia, cache_entry, r2.bit() | r3.bit() | r6.bit());
- __ Ret();
-
- __ bind(&invalid_cache);
- // The cache is invalid. Call runtime which will recreate the
- // cache.
- __ LoadRoot(r5, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r0, scratch0, scratch1, r5, &skip_cache);
- __ vstr(d2, FieldMemOperand(r0, HeapNumber::kValueOffset));
- __ EnterInternalFrame();
- __ push(r0);
- __ CallRuntime(RuntimeFunction(), 1);
- __ LeaveInternalFrame();
- __ vldr(d2, FieldMemOperand(r0, HeapNumber::kValueOffset));
- __ Ret();
-
- __ bind(&skip_cache);
- // Call C function to calculate the result and answer directly
- // without updating the cache.
- GenerateCallCFunction(masm, scratch0);
- __ GetCFunctionDoubleResult(d2);
- __ bind(&no_update);
-
- // We return the value in d2 without adding it to the cache, but
- // we cause a scavenging GC so that future allocations will succeed.
- __ EnterInternalFrame();
-
- // Allocate an aligned object larger than a HeapNumber.
- ASSERT(4 * kPointerSize >= HeapNumber::kSize);
- __ mov(scratch0, Operand(4 * kPointerSize));
- __ push(scratch0);
- __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
- __ LeaveInternalFrame();
- __ Ret();
- }
-}
-
-
-void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm,
- Register scratch) {
- Isolate* isolate = masm->isolate();
-
- __ push(lr);
- __ PrepareCallCFunction(2, scratch);
- __ vmov(r0, r1, d2);
- switch (type_) {
- case TranscendentalCache::SIN:
- __ CallCFunction(ExternalReference::math_sin_double_function(isolate), 2);
- break;
- case TranscendentalCache::COS:
- __ CallCFunction(ExternalReference::math_cos_double_function(isolate), 2);
- break;
- case TranscendentalCache::LOG:
- __ CallCFunction(ExternalReference::math_log_double_function(isolate), 2);
- break;
- default:
- UNIMPLEMENTED();
- break;
- }
- __ pop(lr);
-}
-
-
-Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
- switch (type_) {
- // Add more cases when necessary.
- case TranscendentalCache::SIN: return Runtime::kMath_sin;
- case TranscendentalCache::COS: return Runtime::kMath_cos;
- case TranscendentalCache::LOG: return Runtime::kMath_log;
- default:
- UNIMPLEMENTED();
- return Runtime::kAbort;
- }
-}
-
-
-void StackCheckStub::Generate(MacroAssembler* masm) {
- __ TailCallRuntime(Runtime::kStackGuard, 0, 1);
-}
-
-
-void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
- Label slow, done;
-
- Register heap_number_map = r6;
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-
- if (op_ == Token::SUB) {
- if (include_smi_code_) {
- // Check whether the value is a smi.
- Label try_float;
- __ tst(r0, Operand(kSmiTagMask));
- __ b(ne, &try_float);
-
- // Go slow case if the value of the expression is zero
- // to make sure that we switch between 0 and -0.
- if (negative_zero_ == kStrictNegativeZero) {
- // If we have to check for zero, then we can check for the max negative
- // smi while we are at it.
- __ bic(ip, r0, Operand(0x80000000), SetCC);
- __ b(eq, &slow);
- __ rsb(r0, r0, Operand(0, RelocInfo::NONE));
- __ Ret();
- } else {
- // The value of the expression is a smi and 0 is OK for -0. Try
- // optimistic subtraction '0 - value'.
- __ rsb(r0, r0, Operand(0, RelocInfo::NONE), SetCC);
- __ Ret(vc);
- // We don't have to reverse the optimistic neg since the only case
- // where we fall through is the minimum negative Smi, which is the case
- // where the neg leaves the register unchanged.
- __ jmp(&slow); // Go slow on max negative Smi.
- }
- __ bind(&try_float);
- } else if (FLAG_debug_code) {
- __ tst(r0, Operand(kSmiTagMask));
- __ Assert(ne, "Unexpected smi operand.");
- }
-
- __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- __ cmp(r1, heap_number_map);
- __ b(ne, &slow);
- // r0 is a heap number. Get a new heap number in r1.
- if (overwrite_ == UNARY_OVERWRITE) {
- __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
- __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign.
- __ str(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
- } else {
- __ AllocateHeapNumber(r1, r2, r3, r6, &slow);
- __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
- __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
- __ str(r3, FieldMemOperand(r1, HeapNumber::kMantissaOffset));
- __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign.
- __ str(r2, FieldMemOperand(r1, HeapNumber::kExponentOffset));
- __ mov(r0, Operand(r1));
- }
- } else if (op_ == Token::BIT_NOT) {
- if (include_smi_code_) {
- Label non_smi;
- __ JumpIfNotSmi(r0, &non_smi);
- __ mvn(r0, Operand(r0));
- // Bit-clear inverted smi-tag.
- __ bic(r0, r0, Operand(kSmiTagMask));
- __ Ret();
- __ bind(&non_smi);
- } else if (FLAG_debug_code) {
- __ tst(r0, Operand(kSmiTagMask));
- __ Assert(ne, "Unexpected smi operand.");
- }
-
- // Check if the operand is a heap number.
- __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- __ cmp(r1, heap_number_map);
- __ b(ne, &slow);
-
- // Convert the heap number is r0 to an untagged integer in r1.
- __ ConvertToInt32(r0, r1, r2, r3, d0, &slow);
-
- // Do the bitwise operation (move negated) and check if the result
- // fits in a smi.
- Label try_float;
- __ mvn(r1, Operand(r1));
- __ add(r2, r1, Operand(0x40000000), SetCC);
- __ b(mi, &try_float);
- __ mov(r0, Operand(r1, LSL, kSmiTagSize));
- __ b(&done);
-
- __ bind(&try_float);
- if (!overwrite_ == UNARY_OVERWRITE) {
- // Allocate a fresh heap number, but don't overwrite r0 until
- // we're sure we can do it without going through the slow case
- // that needs the value in r0.
- __ AllocateHeapNumber(r2, r3, r4, r6, &slow);
- __ mov(r0, Operand(r2));
- }
-
- if (CpuFeatures::IsSupported(VFP3)) {
- // Convert the int32 in r1 to the heap number in r0. r2 is corrupted.
- CpuFeatures::Scope scope(VFP3);
- __ vmov(s0, r1);
- __ vcvt_f64_s32(d0, s0);
- __ sub(r2, r0, Operand(kHeapObjectTag));
- __ vstr(d0, r2, HeapNumber::kValueOffset);
- } else {
- // WriteInt32ToHeapNumberStub does not trigger GC, so we do not
- // have to set up a frame.
- WriteInt32ToHeapNumberStub stub(r1, r0, r2);
- __ push(lr);
- __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
- __ pop(lr);
- }
- } else {
- UNIMPLEMENTED();
- }
-
- __ bind(&done);
- __ Ret();
-
- // Handle the slow case by jumping to the JavaScript builtin.
- __ bind(&slow);
- __ push(r0);
- switch (op_) {
- case Token::SUB:
- __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_JS);
- break;
- case Token::BIT_NOT:
- __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_JS);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void MathPowStub::Generate(MacroAssembler* masm) {
- Label call_runtime;
-
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
-
- Label base_not_smi;
- Label exponent_not_smi;
- Label convert_exponent;
-
- const Register base = r0;
- const Register exponent = r1;
- const Register heapnumbermap = r5;
- const Register heapnumber = r6;
- const DoubleRegister double_base = d0;
- const DoubleRegister double_exponent = d1;
- const DoubleRegister double_result = d2;
- const SwVfpRegister single_scratch = s0;
- const Register scratch = r9;
- const Register scratch2 = r7;
-
- __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
- __ ldr(base, MemOperand(sp, 1 * kPointerSize));
- __ ldr(exponent, MemOperand(sp, 0 * kPointerSize));
-
- // Convert base to double value and store it in d0.
- __ JumpIfNotSmi(base, &base_not_smi);
- // Base is a Smi. Untag and convert it.
- __ SmiUntag(base);
- __ vmov(single_scratch, base);
- __ vcvt_f64_s32(double_base, single_scratch);
- __ b(&convert_exponent);
-
- __ bind(&base_not_smi);
- __ ldr(scratch, FieldMemOperand(base, JSObject::kMapOffset));
- __ cmp(scratch, heapnumbermap);
- __ b(ne, &call_runtime);
- // Base is a heapnumber. Load it into double register.
- __ vldr(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
-
- __ bind(&convert_exponent);
- __ JumpIfNotSmi(exponent, &exponent_not_smi);
- __ SmiUntag(exponent);
-
- // The base is in a double register and the exponent is
- // an untagged smi. Allocate a heap number and call a
- // C function for integer exponents. The register containing
- // the heap number is callee-saved.
- __ AllocateHeapNumber(heapnumber,
- scratch,
- scratch2,
- heapnumbermap,
- &call_runtime);
- __ push(lr);
- __ PrepareCallCFunction(3, scratch);
- __ mov(r2, exponent);
- __ vmov(r0, r1, double_base);
- __ CallCFunction(
- ExternalReference::power_double_int_function(masm->isolate()), 3);
- __ pop(lr);
- __ GetCFunctionDoubleResult(double_result);
- __ vstr(double_result,
- FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
- __ mov(r0, heapnumber);
- __ Ret(2 * kPointerSize);
-
- __ bind(&exponent_not_smi);
- __ ldr(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
- __ cmp(scratch, heapnumbermap);
- __ b(ne, &call_runtime);
- // Exponent is a heapnumber. Load it into double register.
- __ vldr(double_exponent,
- FieldMemOperand(exponent, HeapNumber::kValueOffset));
-
- // The base and the exponent are in double registers.
- // Allocate a heap number and call a C function for
- // double exponents. The register containing
- // the heap number is callee-saved.
- __ AllocateHeapNumber(heapnumber,
- scratch,
- scratch2,
- heapnumbermap,
- &call_runtime);
- __ push(lr);
- __ PrepareCallCFunction(4, scratch);
- __ vmov(r0, r1, double_base);
- __ vmov(r2, r3, double_exponent);
- __ CallCFunction(
- ExternalReference::power_double_double_function(masm->isolate()), 4);
- __ pop(lr);
- __ GetCFunctionDoubleResult(double_result);
- __ vstr(double_result,
- FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
- __ mov(r0, heapnumber);
- __ Ret(2 * kPointerSize);
- }
-
- __ bind(&call_runtime);
- __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
-}
-
-
-bool CEntryStub::NeedsImmovableCode() {
- return true;
-}
-
-
-void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
- __ Throw(r0);
-}
-
-
-void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
- UncatchableExceptionType type) {
- __ ThrowUncatchable(type, r0);
-}
-
-
-void CEntryStub::GenerateCore(MacroAssembler* masm,
- Label* throw_normal_exception,
- Label* throw_termination_exception,
- Label* throw_out_of_memory_exception,
- bool do_gc,
- bool always_allocate) {
- // r0: result parameter for PerformGC, if any
- // r4: number of arguments including receiver (C callee-saved)
- // r5: pointer to builtin function (C callee-saved)
- // r6: pointer to the first argument (C callee-saved)
- Isolate* isolate = masm->isolate();
-
- if (do_gc) {
- // Passing r0.
- __ PrepareCallCFunction(1, r1);
- __ CallCFunction(ExternalReference::perform_gc_function(isolate), 1);
- }
-
- ExternalReference scope_depth =
- ExternalReference::heap_always_allocate_scope_depth(isolate);
- if (always_allocate) {
- __ mov(r0, Operand(scope_depth));
- __ ldr(r1, MemOperand(r0));
- __ add(r1, r1, Operand(1));
- __ str(r1, MemOperand(r0));
- }
-
- // Call C built-in.
- // r0 = argc, r1 = argv
- __ mov(r0, Operand(r4));
- __ mov(r1, Operand(r6));
-
-#if defined(V8_HOST_ARCH_ARM)
- int frame_alignment = MacroAssembler::ActivationFrameAlignment();
- int frame_alignment_mask = frame_alignment - 1;
- if (FLAG_debug_code) {
- if (frame_alignment > kPointerSize) {
- Label alignment_as_expected;
- ASSERT(IsPowerOf2(frame_alignment));
- __ tst(sp, Operand(frame_alignment_mask));
- __ b(eq, &alignment_as_expected);
- // Don't use Check here, as it will call Runtime_Abort re-entering here.
- __ stop("Unexpected alignment");
- __ bind(&alignment_as_expected);
- }
- }
-#endif
-
- __ mov(r2, Operand(ExternalReference::isolate_address()));
-
-
- // TODO(1242173): To let the GC traverse the return address of the exit
- // frames, we need to know where the return address is. Right now,
- // we store it on the stack to be able to find it again, but we never
- // restore from it in case of changes, which makes it impossible to
- // support moving the C entry code stub. This should be fixed, but currently
- // this is OK because the CEntryStub gets generated so early in the V8 boot
- // sequence that it is not moving ever.
-
- // Compute the return address in lr to return to after the jump below. Pc is
- // already at '+ 8' from the current instruction but return is after three
- // instructions so add another 4 to pc to get the return address.
- masm->add(lr, pc, Operand(4));
- __ str(lr, MemOperand(sp, 0));
- masm->Jump(r5);
-
- if (always_allocate) {
- // It's okay to clobber r2 and r3 here. Don't mess with r0 and r1
- // though (contain the result).
- __ mov(r2, Operand(scope_depth));
- __ ldr(r3, MemOperand(r2));
- __ sub(r3, r3, Operand(1));
- __ str(r3, MemOperand(r2));
- }
-
- // check for failure result
- Label failure_returned;
- STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
- // Lower 2 bits of r2 are 0 iff r0 has failure tag.
- __ add(r2, r0, Operand(1));
- __ tst(r2, Operand(kFailureTagMask));
- __ b(eq, &failure_returned);
-
- // Exit C frame and return.
- // r0:r1: result
- // sp: stack pointer
- // fp: frame pointer
- // Callee-saved register r4 still holds argc.
- __ LeaveExitFrame(save_doubles_, r4);
- __ mov(pc, lr);
-
- // check if we should retry or throw exception
- Label retry;
- __ bind(&failure_returned);
- STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
- __ tst(r0, Operand(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
- __ b(eq, &retry);
-
- // Special handling of out of memory exceptions.
- Failure* out_of_memory = Failure::OutOfMemoryException();
- __ cmp(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
- __ b(eq, throw_out_of_memory_exception);
-
- // Retrieve the pending exception and clear the variable.
- __ mov(ip, Operand(ExternalReference::the_hole_value_location(isolate)));
- __ ldr(r3, MemOperand(ip));
- __ mov(ip, Operand(ExternalReference(Isolate::k_pending_exception_address,
- isolate)));
- __ ldr(r0, MemOperand(ip));
- __ str(r3, MemOperand(ip));
-
- // Special handling of termination exceptions which are uncatchable
- // by javascript code.
- __ cmp(r0, Operand(isolate->factory()->termination_exception()));
- __ b(eq, throw_termination_exception);
-
- // Handle normal exception.
- __ jmp(throw_normal_exception);
-
- __ bind(&retry); // pass last failure (r0) as parameter (r0) when retrying
-}
-
-
-void CEntryStub::Generate(MacroAssembler* masm) {
- // Called from JavaScript; parameters are on stack as if calling JS function
- // r0: number of arguments including receiver
- // r1: pointer to builtin function
- // fp: frame pointer (restored after C call)
- // sp: stack pointer (restored as callee's sp after C call)
- // cp: current context (C callee-saved)
-
- // Result returned in r0 or r0+r1 by default.
-
- // NOTE: Invocations of builtins may return failure objects
- // instead of a proper result. The builtin entry handles
- // this by performing a garbage collection and retrying the
- // builtin once.
-
- // Compute the argv pointer in a callee-saved register.
- __ add(r6, sp, Operand(r0, LSL, kPointerSizeLog2));
- __ sub(r6, r6, Operand(kPointerSize));
-
- // Enter the exit frame that transitions from JavaScript to C++.
- __ EnterExitFrame(save_doubles_);
-
- // Setup argc and the builtin function in callee-saved registers.
- __ mov(r4, Operand(r0));
- __ mov(r5, Operand(r1));
-
- // r4: number of arguments (C callee-saved)
- // r5: pointer to builtin function (C callee-saved)
- // r6: pointer to first argument (C callee-saved)
-
- Label throw_normal_exception;
- Label throw_termination_exception;
- Label throw_out_of_memory_exception;
-
- // Call into the runtime system.
- GenerateCore(masm,
- &throw_normal_exception,
- &throw_termination_exception,
- &throw_out_of_memory_exception,
- false,
- false);
-
- // Do space-specific GC and retry runtime call.
- GenerateCore(masm,
- &throw_normal_exception,
- &throw_termination_exception,
- &throw_out_of_memory_exception,
- true,
- false);
-
- // Do full GC and retry runtime call one final time.
- Failure* failure = Failure::InternalError();
- __ mov(r0, Operand(reinterpret_cast<int32_t>(failure)));
- GenerateCore(masm,
- &throw_normal_exception,
- &throw_termination_exception,
- &throw_out_of_memory_exception,
- true,
- true);
-
- __ bind(&throw_out_of_memory_exception);
- GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
-
- __ bind(&throw_termination_exception);
- GenerateThrowUncatchable(masm, TERMINATION);
-
- __ bind(&throw_normal_exception);
- GenerateThrowTOS(masm);
-}
-
-
-void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
- // r0: code entry
- // r1: function
- // r2: receiver
- // r3: argc
- // [sp+0]: argv
-
- Label invoke, exit;
-
- // Called from C, so do not pop argc and args on exit (preserve sp)
- // No need to save register-passed args
- // Save callee-saved registers (incl. cp and fp), sp, and lr
- __ stm(db_w, sp, kCalleeSaved | lr.bit());
-
- // Get address of argv, see stm above.
- // r0: code entry
- // r1: function
- // r2: receiver
- // r3: argc
- __ ldr(r4, MemOperand(sp, (kNumCalleeSaved + 1) * kPointerSize)); // argv
-
- // Push a frame with special values setup to mark it as an entry frame.
- // r0: code entry
- // r1: function
- // r2: receiver
- // r3: argc
- // r4: argv
- Isolate* isolate = masm->isolate();
- __ mov(r8, Operand(-1)); // Push a bad frame pointer to fail if it is used.
- int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
- __ mov(r7, Operand(Smi::FromInt(marker)));
- __ mov(r6, Operand(Smi::FromInt(marker)));
- __ mov(r5,
- Operand(ExternalReference(Isolate::k_c_entry_fp_address, isolate)));
- __ ldr(r5, MemOperand(r5));
- __ Push(r8, r7, r6, r5);
-
- // Setup frame pointer for the frame to be pushed.
- __ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
- // If this is the outermost JS call, set js_entry_sp value.
- ExternalReference js_entry_sp(Isolate::k_js_entry_sp_address, isolate);
- __ mov(r5, Operand(ExternalReference(js_entry_sp)));
- __ ldr(r6, MemOperand(r5));
- __ cmp(r6, Operand(0, RelocInfo::NONE));
- __ str(fp, MemOperand(r5), eq);
-#endif
-
- // Call a faked try-block that does the invoke.
- __ bl(&invoke);
-
- // Caught exception: Store result (exception) in the pending
- // exception field in the JSEnv and return a failure sentinel.
- // Coming in here the fp will be invalid because the PushTryHandler below
- // sets it to 0 to signal the existence of the JSEntry frame.
- __ mov(ip, Operand(ExternalReference(Isolate::k_pending_exception_address,
- isolate)));
- __ str(r0, MemOperand(ip));
- __ mov(r0, Operand(reinterpret_cast<int32_t>(Failure::Exception())));
- __ b(&exit);
-
- // Invoke: Link this frame into the handler chain.
- __ bind(&invoke);
- // Must preserve r0-r4, r5-r7 are available.
- __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
- // If an exception not caught by another handler occurs, this handler
- // returns control to the code after the bl(&invoke) above, which
- // restores all kCalleeSaved registers (including cp and fp) to their
- // saved values before returning a failure to C.
-
- // Clear any pending exceptions.
- __ mov(ip, Operand(ExternalReference::the_hole_value_location(isolate)));
- __ ldr(r5, MemOperand(ip));
- __ mov(ip, Operand(ExternalReference(Isolate::k_pending_exception_address,
- isolate)));
- __ str(r5, MemOperand(ip));
-
- // Invoke the function by calling through JS entry trampoline builtin.
- // Notice that we cannot store a reference to the trampoline code directly in
- // this stub, because runtime stubs are not traversed when doing GC.
-
- // Expected registers by Builtins::JSEntryTrampoline
- // r0: code entry
- // r1: function
- // r2: receiver
- // r3: argc
- // r4: argv
- if (is_construct) {
- ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
- isolate);
- __ mov(ip, Operand(construct_entry));
- } else {
- ExternalReference entry(Builtins::kJSEntryTrampoline, isolate);
- __ mov(ip, Operand(entry));
- }
- __ ldr(ip, MemOperand(ip)); // deref address
-
- // Branch and link to JSEntryTrampoline. We don't use the double underscore
- // macro for the add instruction because we don't want the coverage tool
- // inserting instructions here after we read the pc.
- __ mov(lr, Operand(pc));
- masm->add(pc, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
-
- // Unlink this frame from the handler chain. When reading the
- // address of the next handler, there is no need to use the address
- // displacement since the current stack pointer (sp) points directly
- // to the stack handler.
- __ ldr(r3, MemOperand(sp, StackHandlerConstants::kNextOffset));
- __ mov(ip, Operand(ExternalReference(Isolate::k_handler_address, isolate)));
- __ str(r3, MemOperand(ip));
- // No need to restore registers
- __ add(sp, sp, Operand(StackHandlerConstants::kSize));
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
- // If current FP value is the same as js_entry_sp value, it means that
- // the current function is the outermost.
- __ mov(r5, Operand(ExternalReference(js_entry_sp)));
- __ ldr(r6, MemOperand(r5));
- __ cmp(fp, Operand(r6));
- __ mov(r6, Operand(0, RelocInfo::NONE), LeaveCC, eq);
- __ str(r6, MemOperand(r5), eq);
-#endif
-
- __ bind(&exit); // r0 holds result
- // Restore the top frame descriptors from the stack.
- __ pop(r3);
- __ mov(ip,
- Operand(ExternalReference(Isolate::k_c_entry_fp_address, isolate)));
- __ str(r3, MemOperand(ip));
-
- // Reset the stack to the callee saved registers.
- __ add(sp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
-
- // Restore callee-saved registers and return.
-#ifdef DEBUG
- if (FLAG_debug_code) {
- __ mov(lr, Operand(pc));
- }
-#endif
- __ ldm(ia_w, sp, kCalleeSaved | pc.bit());
-}
-
-
-// Uses registers r0 to r4.
-// Expected input (depending on whether args are in registers or on the stack):
-// * object: r0 or at sp + 1 * kPointerSize.
-// * function: r1 or at sp.
-//
-// An inlined call site may have been generated before calling this stub.
-// In this case the offset to the inline site to patch is passed on the stack,
-// in the safepoint slot for register r4.
-// (See LCodeGen::DoInstanceOfKnownGlobal)
-void InstanceofStub::Generate(MacroAssembler* masm) {
- // Call site inlining and patching implies arguments in registers.
- ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck());
- // ReturnTrueFalse is only implemented for inlined call sites.
- ASSERT(!ReturnTrueFalseObject() || HasCallSiteInlineCheck());
-
- // Fixed register usage throughout the stub:
- const Register object = r0; // Object (lhs).
- Register map = r3; // Map of the object.
- const Register function = r1; // Function (rhs).
- const Register prototype = r4; // Prototype of the function.
- const Register inline_site = r9;
- const Register scratch = r2;
-
- const int32_t kDeltaToLoadBoolResult = 3 * kPointerSize;
-
- Label slow, loop, is_instance, is_not_instance, not_js_object;
-
- if (!HasArgsInRegisters()) {
- __ ldr(object, MemOperand(sp, 1 * kPointerSize));
- __ ldr(function, MemOperand(sp, 0));
- }
-
- // Check that the left hand is a JS object and load map.
- __ JumpIfSmi(object, &not_js_object);
- __ IsObjectJSObjectType(object, map, scratch, &not_js_object);
-
- // If there is a call site cache don't look in the global cache, but do the
- // real lookup and update the call site cache.
- if (!HasCallSiteInlineCheck()) {
- Label miss;
- __ LoadRoot(ip, Heap::kInstanceofCacheFunctionRootIndex);
- __ cmp(function, ip);
- __ b(ne, &miss);
- __ LoadRoot(ip, Heap::kInstanceofCacheMapRootIndex);
- __ cmp(map, ip);
- __ b(ne, &miss);
- __ LoadRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
- __ Ret(HasArgsInRegisters() ? 0 : 2);
-
- __ bind(&miss);
- }
-
- // Get the prototype of the function.
- __ TryGetFunctionPrototype(function, prototype, scratch, &slow);
-
- // Check that the function prototype is a JS object.
- __ JumpIfSmi(prototype, &slow);
- __ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
-
- // Update the global instanceof or call site inlined cache with the current
- // map and function. The cached answer will be set when it is known below.
- if (!HasCallSiteInlineCheck()) {
- __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
- __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
- } else {
- ASSERT(HasArgsInRegisters());
- // Patch the (relocated) inlined map check.
-
- // The offset was stored in r4 safepoint slot.
- // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal)
- __ LoadFromSafepointRegisterSlot(scratch, r4);
- __ sub(inline_site, lr, scratch);
- // Get the map location in scratch and patch it.
- __ GetRelocatedValueLocation(inline_site, scratch);
- __ str(map, MemOperand(scratch));
- }
-
- // Register mapping: r3 is object map and r4 is function prototype.
- // Get prototype of object into r2.
- __ ldr(scratch, FieldMemOperand(map, Map::kPrototypeOffset));
-
- // We don't need map any more. Use it as a scratch register.
- Register scratch2 = map;
- map = no_reg;
-
- // Loop through the prototype chain looking for the function prototype.
- __ LoadRoot(scratch2, Heap::kNullValueRootIndex);
- __ bind(&loop);
- __ cmp(scratch, Operand(prototype));
- __ b(eq, &is_instance);
- __ cmp(scratch, scratch2);
- __ b(eq, &is_not_instance);
- __ ldr(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
- __ ldr(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset));
- __ jmp(&loop);
-
- __ bind(&is_instance);
- if (!HasCallSiteInlineCheck()) {
- __ mov(r0, Operand(Smi::FromInt(0)));
- __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
- } else {
- // Patch the call site to return true.
- __ LoadRoot(r0, Heap::kTrueValueRootIndex);
- __ add(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
- // Get the boolean result location in scratch and patch it.
- __ GetRelocatedValueLocation(inline_site, scratch);
- __ str(r0, MemOperand(scratch));
-
- if (!ReturnTrueFalseObject()) {
- __ mov(r0, Operand(Smi::FromInt(0)));
- }
- }
- __ Ret(HasArgsInRegisters() ? 0 : 2);
-
- __ bind(&is_not_instance);
- if (!HasCallSiteInlineCheck()) {
- __ mov(r0, Operand(Smi::FromInt(1)));
- __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
- } else {
- // Patch the call site to return false.
- __ LoadRoot(r0, Heap::kFalseValueRootIndex);
- __ add(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
- // Get the boolean result location in scratch and patch it.
- __ GetRelocatedValueLocation(inline_site, scratch);
- __ str(r0, MemOperand(scratch));
-
- if (!ReturnTrueFalseObject()) {
- __ mov(r0, Operand(Smi::FromInt(1)));
- }
- }
- __ Ret(HasArgsInRegisters() ? 0 : 2);
-
- Label object_not_null, object_not_null_or_smi;
- __ bind(&not_js_object);
- // Before null, smi and string value checks, check that the rhs is a function
- // as for a non-function rhs an exception needs to be thrown.
- __ JumpIfSmi(function, &slow);
- __ CompareObjectType(function, scratch2, scratch, JS_FUNCTION_TYPE);
- __ b(ne, &slow);
-
- // Null is not instance of anything.
- __ cmp(scratch, Operand(FACTORY->null_value()));
- __ b(ne, &object_not_null);
- __ mov(r0, Operand(Smi::FromInt(1)));
- __ Ret(HasArgsInRegisters() ? 0 : 2);
-
- __ bind(&object_not_null);
- // Smi values are not instances of anything.
- __ JumpIfNotSmi(object, &object_not_null_or_smi);
- __ mov(r0, Operand(Smi::FromInt(1)));
- __ Ret(HasArgsInRegisters() ? 0 : 2);
-
- __ bind(&object_not_null_or_smi);
- // String values are not instances of anything.
- __ IsObjectJSStringType(object, scratch, &slow);
- __ mov(r0, Operand(Smi::FromInt(1)));
- __ Ret(HasArgsInRegisters() ? 0 : 2);
-
- // Slow-case. Tail call builtin.
- __ bind(&slow);
- if (!ReturnTrueFalseObject()) {
- if (HasArgsInRegisters()) {
- __ Push(r0, r1);
- }
- __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_JS);
- } else {
- __ EnterInternalFrame();
- __ Push(r0, r1);
- __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_JS);
- __ LeaveInternalFrame();
- __ cmp(r0, Operand(0));
- __ LoadRoot(r0, Heap::kTrueValueRootIndex, eq);
- __ LoadRoot(r0, Heap::kFalseValueRootIndex, ne);
- __ Ret(HasArgsInRegisters() ? 0 : 2);
- }
-}
-
-
-Register InstanceofStub::left() { return r0; }
-
-
-Register InstanceofStub::right() { return r1; }
-
-
-void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
- // The displacement is the offset of the last parameter (if any)
- // relative to the frame pointer.
- static const int kDisplacement =
- StandardFrameConstants::kCallerSPOffset - kPointerSize;
-
- // Check that the key is a smi.
- Label slow;
- __ JumpIfNotSmi(r1, &slow);
-
- // Check if the calling frame is an arguments adaptor frame.
- Label adaptor;
- __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
- __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ b(eq, &adaptor);
-
- // Check index against formal parameters count limit passed in
- // through register r0. Use unsigned comparison to get negative
- // check for free.
- __ cmp(r1, r0);
- __ b(hs, &slow);
-
- // Read the argument from the stack and return it.
- __ sub(r3, r0, r1);
- __ add(r3, fp, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ ldr(r0, MemOperand(r3, kDisplacement));
- __ Jump(lr);
-
- // Arguments adaptor case: Check index against actual arguments
- // limit found in the arguments adaptor frame. Use unsigned
- // comparison to get negative check for free.
- __ bind(&adaptor);
- __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ cmp(r1, r0);
- __ b(cs, &slow);
-
- // Read the argument from the adaptor frame and return it.
- __ sub(r3, r0, r1);
- __ add(r3, r2, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ ldr(r0, MemOperand(r3, kDisplacement));
- __ Jump(lr);
-
- // Slow-case: Handle non-smi or out-of-bounds access to arguments
- // by calling the runtime system.
- __ bind(&slow);
- __ push(r1);
- __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
-}
-
-
-void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
- // sp[0] : number of parameters
- // sp[4] : receiver displacement
- // sp[8] : function
-
- // Check if the calling frame is an arguments adaptor frame.
- Label adaptor_frame, try_allocate, runtime;
- __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
- __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ b(eq, &adaptor_frame);
-
- // Get the length from the frame.
- __ ldr(r1, MemOperand(sp, 0));
- __ b(&try_allocate);
-
- // Patch the arguments.length and the parameters pointer.
- __ bind(&adaptor_frame);
- __ ldr(r1, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ str(r1, MemOperand(sp, 0));
- __ add(r3, r2, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
- __ str(r3, MemOperand(sp, 1 * kPointerSize));
-
- // Try the new space allocation. Start out with computing the size
- // of the arguments object and the elements array in words.
- Label add_arguments_object;
- __ bind(&try_allocate);
- __ cmp(r1, Operand(0, RelocInfo::NONE));
- __ b(eq, &add_arguments_object);
- __ mov(r1, Operand(r1, LSR, kSmiTagSize));
- __ add(r1, r1, Operand(FixedArray::kHeaderSize / kPointerSize));
- __ bind(&add_arguments_object);
- __ add(r1, r1, Operand(GetArgumentsObjectSize() / kPointerSize));
-
- // Do the allocation of both objects in one go.
- __ AllocateInNewSpace(
- r1,
- r0,
- r2,
- r3,
- &runtime,
- static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
-
- // Get the arguments boilerplate from the current (global) context.
- __ ldr(r4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ ldr(r4, FieldMemOperand(r4, GlobalObject::kGlobalContextOffset));
- __ ldr(r4, MemOperand(r4,
- Context::SlotOffset(GetArgumentsBoilerplateIndex())));
-
- // Copy the JS object part.
- __ CopyFields(r0, r4, r3.bit(), JSObject::kHeaderSize / kPointerSize);
-
- if (type_ == NEW_NON_STRICT) {
- // Setup the callee in-object property.
- STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
- __ ldr(r3, MemOperand(sp, 2 * kPointerSize));
- const int kCalleeOffset = JSObject::kHeaderSize +
- Heap::kArgumentsCalleeIndex * kPointerSize;
- __ str(r3, FieldMemOperand(r0, kCalleeOffset));
- }
-
- // Get the length (smi tagged) and set that as an in-object property too.
- STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
- __ ldr(r1, MemOperand(sp, 0 * kPointerSize));
- __ str(r1, FieldMemOperand(r0, JSObject::kHeaderSize +
- Heap::kArgumentsLengthIndex * kPointerSize));
-
- // If there are no actual arguments, we're done.
- Label done;
- __ cmp(r1, Operand(0, RelocInfo::NONE));
- __ b(eq, &done);
-
- // Get the parameters pointer from the stack.
- __ ldr(r2, MemOperand(sp, 1 * kPointerSize));
-
- // Setup the elements pointer in the allocated arguments object and
- // initialize the header in the elements fixed array.
- __ add(r4, r0, Operand(GetArgumentsObjectSize()));
- __ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
- __ LoadRoot(r3, Heap::kFixedArrayMapRootIndex);
- __ str(r3, FieldMemOperand(r4, FixedArray::kMapOffset));
- __ str(r1, FieldMemOperand(r4, FixedArray::kLengthOffset));
- __ mov(r1, Operand(r1, LSR, kSmiTagSize)); // Untag the length for the loop.
-
- // Copy the fixed array slots.
- Label loop;
- // Setup r4 to point to the first array slot.
- __ add(r4, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ bind(&loop);
- // Pre-decrement r2 with kPointerSize on each iteration.
- // Pre-decrement in order to skip receiver.
- __ ldr(r3, MemOperand(r2, kPointerSize, NegPreIndex));
- // Post-increment r4 with kPointerSize on each iteration.
- __ str(r3, MemOperand(r4, kPointerSize, PostIndex));
- __ sub(r1, r1, Operand(1));
- __ cmp(r1, Operand(0, RelocInfo::NONE));
- __ b(ne, &loop);
-
- // Return and remove the on-stack parameters.
- __ bind(&done);
- __ add(sp, sp, Operand(3 * kPointerSize));
- __ Ret();
-
- // Do the runtime call to allocate the arguments object.
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
-}
-
-
-void RegExpExecStub::Generate(MacroAssembler* masm) {
- // Just jump directly to runtime if native RegExp is not selected at compile
- // time or if regexp entry in generated code is turned off runtime switch or
- // at compilation.
-#ifdef V8_INTERPRETED_REGEXP
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
-#else // V8_INTERPRETED_REGEXP
- if (!FLAG_regexp_entry_native) {
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
- return;
- }
-
- // Stack frame on entry.
- // sp[0]: last_match_info (expected JSArray)
- // sp[4]: previous index
- // sp[8]: subject string
- // sp[12]: JSRegExp object
-
- static const int kLastMatchInfoOffset = 0 * kPointerSize;
- static const int kPreviousIndexOffset = 1 * kPointerSize;
- static const int kSubjectOffset = 2 * kPointerSize;
- static const int kJSRegExpOffset = 3 * kPointerSize;
-
- Label runtime, invoke_regexp;
-
- // Allocation of registers for this function. These are in callee save
- // registers and will be preserved by the call to the native RegExp code, as
- // this code is called using the normal C calling convention. When calling
- // directly from generated code the native RegExp code will not do a GC and
- // therefore the content of these registers are safe to use after the call.
- Register subject = r4;
- Register regexp_data = r5;
- Register last_match_info_elements = r6;
-
- // Ensure that a RegExp stack is allocated.
- Isolate* isolate = masm->isolate();
- ExternalReference address_of_regexp_stack_memory_address =
- ExternalReference::address_of_regexp_stack_memory_address(isolate);
- ExternalReference address_of_regexp_stack_memory_size =
- ExternalReference::address_of_regexp_stack_memory_size(isolate);
- __ mov(r0, Operand(address_of_regexp_stack_memory_size));
- __ ldr(r0, MemOperand(r0, 0));
- __ tst(r0, Operand(r0));
- __ b(eq, &runtime);
-
- // Check that the first argument is a JSRegExp object.
- __ ldr(r0, MemOperand(sp, kJSRegExpOffset));
- STATIC_ASSERT(kSmiTag == 0);
- __ tst(r0, Operand(kSmiTagMask));
- __ b(eq, &runtime);
- __ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE);
- __ b(ne, &runtime);
-
- // Check that the RegExp has been compiled (data contains a fixed array).
- __ ldr(regexp_data, FieldMemOperand(r0, JSRegExp::kDataOffset));
- if (FLAG_debug_code) {
- __ tst(regexp_data, Operand(kSmiTagMask));
- __ Check(ne, "Unexpected type for RegExp data, FixedArray expected");
- __ CompareObjectType(regexp_data, r0, r0, FIXED_ARRAY_TYPE);
- __ Check(eq, "Unexpected type for RegExp data, FixedArray expected");
- }
-
- // regexp_data: RegExp data (FixedArray)
- // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
- __ ldr(r0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
- __ cmp(r0, Operand(Smi::FromInt(JSRegExp::IRREGEXP)));
- __ b(ne, &runtime);
-
- // regexp_data: RegExp data (FixedArray)
- // Check that the number of captures fit in the static offsets vector buffer.
- __ ldr(r2,
- FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
- // Calculate number of capture registers (number_of_captures + 1) * 2. This
- // uses the asumption that smis are 2 * their untagged value.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
- __ add(r2, r2, Operand(2)); // r2 was a smi.
- // Check that the static offsets vector buffer is large enough.
- __ cmp(r2, Operand(OffsetsVector::kStaticOffsetsVectorSize));
- __ b(hi, &runtime);
-
- // r2: Number of capture registers
- // regexp_data: RegExp data (FixedArray)
- // Check that the second argument is a string.
- __ ldr(subject, MemOperand(sp, kSubjectOffset));
- __ tst(subject, Operand(kSmiTagMask));
- __ b(eq, &runtime);
- Condition is_string = masm->IsObjectStringType(subject, r0);
- __ b(NegateCondition(is_string), &runtime);
- // Get the length of the string to r3.
- __ ldr(r3, FieldMemOperand(subject, String::kLengthOffset));
-
- // r2: Number of capture registers
- // r3: Length of subject string as a smi
- // subject: Subject string
- // regexp_data: RegExp data (FixedArray)
- // Check that the third argument is a positive smi less than the subject
- // string length. A negative value will be greater (unsigned comparison).
- __ ldr(r0, MemOperand(sp, kPreviousIndexOffset));
- __ tst(r0, Operand(kSmiTagMask));
- __ b(ne, &runtime);
- __ cmp(r3, Operand(r0));
- __ b(ls, &runtime);
-
- // r2: Number of capture registers
- // subject: Subject string
- // regexp_data: RegExp data (FixedArray)
- // Check that the fourth object is a JSArray object.
- __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset));
- __ tst(r0, Operand(kSmiTagMask));
- __ b(eq, &runtime);
- __ CompareObjectType(r0, r1, r1, JS_ARRAY_TYPE);
- __ b(ne, &runtime);
- // Check that the JSArray is in fast case.
- __ ldr(last_match_info_elements,
- FieldMemOperand(r0, JSArray::kElementsOffset));
- __ ldr(r0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
- __ cmp(r0, ip);
- __ b(ne, &runtime);
- // Check that the last match info has space for the capture registers and the
- // additional information.
- __ ldr(r0,
- FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
- __ add(r2, r2, Operand(RegExpImpl::kLastMatchOverhead));
- __ cmp(r2, Operand(r0, ASR, kSmiTagSize));
- __ b(gt, &runtime);
-
- // subject: Subject string
- // regexp_data: RegExp data (FixedArray)
- // Check the representation and encoding of the subject string.
- Label seq_string;
- __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
- __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
- // First check for flat string.
- __ tst(r0, Operand(kIsNotStringMask | kStringRepresentationMask));
- STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
- __ b(eq, &seq_string);
-
- // subject: Subject string
- // regexp_data: RegExp data (FixedArray)
- // Check for flat cons string.
- // A flat cons string is a cons string where the second part is the empty
- // string. In that case the subject string is just the first part of the cons
- // string. Also in this case the first part of the cons string is known to be
- // a sequential string or an external string.
- STATIC_ASSERT(kExternalStringTag !=0);
- STATIC_ASSERT((kConsStringTag & kExternalStringTag) == 0);
- __ tst(r0, Operand(kIsNotStringMask | kExternalStringTag));
- __ b(ne, &runtime);
- __ ldr(r0, FieldMemOperand(subject, ConsString::kSecondOffset));
- __ LoadRoot(r1, Heap::kEmptyStringRootIndex);
- __ cmp(r0, r1);
- __ b(ne, &runtime);
- __ ldr(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
- __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
- __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
- // Is first part a flat string?
- STATIC_ASSERT(kSeqStringTag == 0);
- __ tst(r0, Operand(kStringRepresentationMask));
- __ b(ne, &runtime);
-
- __ bind(&seq_string);
- // subject: Subject string
- // regexp_data: RegExp data (FixedArray)
- // r0: Instance type of subject string
- STATIC_ASSERT(4 == kAsciiStringTag);
- STATIC_ASSERT(kTwoByteStringTag == 0);
- // Find the code object based on the assumptions above.
- __ and_(r0, r0, Operand(kStringEncodingMask));
- __ mov(r3, Operand(r0, ASR, 2), SetCC);
- __ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset), ne);
- __ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset), eq);
-
- // Check that the irregexp code has been generated for the actual string
- // encoding. If it has, the field contains a code object otherwise it contains
- // the hole.
- __ CompareObjectType(r7, r0, r0, CODE_TYPE);
- __ b(ne, &runtime);
-
- // r3: encoding of subject string (1 if ASCII, 0 if two_byte);
- // r7: code
- // subject: Subject string
- // regexp_data: RegExp data (FixedArray)
- // Load used arguments before starting to push arguments for call to native
- // RegExp code to avoid handling changing stack height.
- __ ldr(r1, MemOperand(sp, kPreviousIndexOffset));
- __ mov(r1, Operand(r1, ASR, kSmiTagSize));
-
- // r1: previous index
- // r3: encoding of subject string (1 if ASCII, 0 if two_byte);
- // r7: code
- // subject: Subject string
- // regexp_data: RegExp data (FixedArray)
- // All checks done. Now push arguments for native regexp code.
- __ IncrementCounter(isolate->counters()->regexp_entry_native(), 1, r0, r2);
-
- // Isolates: note we add an additional parameter here (isolate pointer).
- static const int kRegExpExecuteArguments = 8;
- static const int kParameterRegisters = 4;
- __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
-
- // Stack pointer now points to cell where return address is to be written.
- // Arguments are before that on the stack or in registers.
-
- // Argument 8 (sp[16]): Pass current isolate address.
- __ mov(r0, Operand(ExternalReference::isolate_address()));
- __ str(r0, MemOperand(sp, 4 * kPointerSize));
-
- // Argument 7 (sp[12]): Indicate that this is a direct call from JavaScript.
- __ mov(r0, Operand(1));
- __ str(r0, MemOperand(sp, 3 * kPointerSize));
-
- // Argument 6 (sp[8]): Start (high end) of backtracking stack memory area.
- __ mov(r0, Operand(address_of_regexp_stack_memory_address));
- __ ldr(r0, MemOperand(r0, 0));
- __ mov(r2, Operand(address_of_regexp_stack_memory_size));
- __ ldr(r2, MemOperand(r2, 0));
- __ add(r0, r0, Operand(r2));
- __ str(r0, MemOperand(sp, 2 * kPointerSize));
-
- // Argument 5 (sp[4]): static offsets vector buffer.
- __ mov(r0,
- Operand(ExternalReference::address_of_static_offsets_vector(isolate)));
- __ str(r0, MemOperand(sp, 1 * kPointerSize));
-
- // For arguments 4 and 3 get string length, calculate start of string data and
- // calculate the shift of the index (0 for ASCII and 1 for two byte).
- __ ldr(r0, FieldMemOperand(subject, String::kLengthOffset));
- __ mov(r0, Operand(r0, ASR, kSmiTagSize));
- STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
- __ add(r9, subject, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- __ eor(r3, r3, Operand(1));
- // Argument 4 (r3): End of string data
- // Argument 3 (r2): Start of string data
- __ add(r2, r9, Operand(r1, LSL, r3));
- __ add(r3, r9, Operand(r0, LSL, r3));
-
- // Argument 2 (r1): Previous index.
- // Already there
-
- // Argument 1 (r0): Subject string.
- __ mov(r0, subject);
-
- // Locate the code entry and call it.
- __ add(r7, r7, Operand(Code::kHeaderSize - kHeapObjectTag));
- DirectCEntryStub stub;
- stub.GenerateCall(masm, r7);
-
- __ LeaveExitFrame(false, no_reg);
-
- // r0: result
- // subject: subject string (callee saved)
- // regexp_data: RegExp data (callee saved)
- // last_match_info_elements: Last match info elements (callee saved)
-
- // Check the result.
- Label success;
-
- __ cmp(r0, Operand(NativeRegExpMacroAssembler::SUCCESS));
- __ b(eq, &success);
- Label failure;
- __ cmp(r0, Operand(NativeRegExpMacroAssembler::FAILURE));
- __ b(eq, &failure);
- __ cmp(r0, Operand(NativeRegExpMacroAssembler::EXCEPTION));
- // If not exception it can only be retry. Handle that in the runtime system.
- __ b(ne, &runtime);
- // Result must now be exception. If there is no pending exception already a
- // stack overflow (on the backtrack stack) was detected in RegExp code but
- // haven't created the exception yet. Handle that in the runtime system.
- // TODO(592): Rerunning the RegExp to get the stack overflow exception.
- __ mov(r1, Operand(ExternalReference::the_hole_value_location(isolate)));
- __ ldr(r1, MemOperand(r1, 0));
- __ mov(r2, Operand(ExternalReference(Isolate::k_pending_exception_address,
- isolate)));
- __ ldr(r0, MemOperand(r2, 0));
- __ cmp(r0, r1);
- __ b(eq, &runtime);
-
- __ str(r1, MemOperand(r2, 0)); // Clear pending exception.
-
- // Check if the exception is a termination. If so, throw as uncatchable.
- __ LoadRoot(ip, Heap::kTerminationExceptionRootIndex);
- __ cmp(r0, ip);
- Label termination_exception;
- __ b(eq, &termination_exception);
-
- __ Throw(r0); // Expects thrown value in r0.
-
- __ bind(&termination_exception);
- __ ThrowUncatchable(TERMINATION, r0); // Expects thrown value in r0.
-
- __ bind(&failure);
- // For failure and exception return null.
- __ mov(r0, Operand(FACTORY->null_value()));
- __ add(sp, sp, Operand(4 * kPointerSize));
- __ Ret();
-
- // Process the result from the native regexp code.
- __ bind(&success);
- __ ldr(r1,
- FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
- // Calculate number of capture registers (number_of_captures + 1) * 2.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
- __ add(r1, r1, Operand(2)); // r1 was a smi.
-
- // r1: number of capture registers
- // r4: subject string
- // Store the capture count.
- __ mov(r2, Operand(r1, LSL, kSmiTagSize + kSmiShiftSize)); // To smi.
- __ str(r2, FieldMemOperand(last_match_info_elements,
- RegExpImpl::kLastCaptureCountOffset));
- // Store last subject and last input.
- __ mov(r3, last_match_info_elements); // Moved up to reduce latency.
- __ str(subject,
- FieldMemOperand(last_match_info_elements,
- RegExpImpl::kLastSubjectOffset));
- __ RecordWrite(r3, Operand(RegExpImpl::kLastSubjectOffset), r2, r7);
- __ str(subject,
- FieldMemOperand(last_match_info_elements,
- RegExpImpl::kLastInputOffset));
- __ mov(r3, last_match_info_elements);
- __ RecordWrite(r3, Operand(RegExpImpl::kLastInputOffset), r2, r7);
-
- // Get the static offsets vector filled by the native regexp code.
- ExternalReference address_of_static_offsets_vector =
- ExternalReference::address_of_static_offsets_vector(isolate);
- __ mov(r2, Operand(address_of_static_offsets_vector));
-
- // r1: number of capture registers
- // r2: offsets vector
- Label next_capture, done;
- // Capture register counter starts from number of capture registers and
- // counts down until wraping after zero.
- __ add(r0,
- last_match_info_elements,
- Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag));
- __ bind(&next_capture);
- __ sub(r1, r1, Operand(1), SetCC);
- __ b(mi, &done);
- // Read the value from the static offsets vector buffer.
- __ ldr(r3, MemOperand(r2, kPointerSize, PostIndex));
- // Store the smi value in the last match info.
- __ mov(r3, Operand(r3, LSL, kSmiTagSize));
- __ str(r3, MemOperand(r0, kPointerSize, PostIndex));
- __ jmp(&next_capture);
- __ bind(&done);
-
- // Return last match info.
- __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset));
- __ add(sp, sp, Operand(4 * kPointerSize));
- __ Ret();
-
- // Do the runtime call to execute the regexp.
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
-#endif // V8_INTERPRETED_REGEXP
-}
-
-
-void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
- const int kMaxInlineLength = 100;
- Label slowcase;
- Label done;
- __ ldr(r1, MemOperand(sp, kPointerSize * 2));
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
- __ tst(r1, Operand(kSmiTagMask));
- __ b(ne, &slowcase);
- __ cmp(r1, Operand(Smi::FromInt(kMaxInlineLength)));
- __ b(hi, &slowcase);
- // Smi-tagging is equivalent to multiplying by 2.
- // Allocate RegExpResult followed by FixedArray with size in ebx.
- // JSArray: [Map][empty properties][Elements][Length-smi][index][input]
- // Elements: [Map][Length][..elements..]
- // Size of JSArray with two in-object properties and the header of a
- // FixedArray.
- int objects_size =
- (JSRegExpResult::kSize + FixedArray::kHeaderSize) / kPointerSize;
- __ mov(r5, Operand(r1, LSR, kSmiTagSize + kSmiShiftSize));
- __ add(r2, r5, Operand(objects_size));
- __ AllocateInNewSpace(
- r2, // In: Size, in words.
- r0, // Out: Start of allocation (tagged).
- r3, // Scratch register.
- r4, // Scratch register.
- &slowcase,
- static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
- // r0: Start of allocated area, object-tagged.
- // r1: Number of elements in array, as smi.
- // r5: Number of elements, untagged.
-
- // Set JSArray map to global.regexp_result_map().
- // Set empty properties FixedArray.
- // Set elements to point to FixedArray allocated right after the JSArray.
- // Interleave operations for better latency.
- __ ldr(r2, ContextOperand(cp, Context::GLOBAL_INDEX));
- __ add(r3, r0, Operand(JSRegExpResult::kSize));
- __ mov(r4, Operand(FACTORY->empty_fixed_array()));
- __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalContextOffset));
- __ str(r3, FieldMemOperand(r0, JSObject::kElementsOffset));
- __ ldr(r2, ContextOperand(r2, Context::REGEXP_RESULT_MAP_INDEX));
- __ str(r4, FieldMemOperand(r0, JSObject::kPropertiesOffset));
- __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
-
- // Set input, index and length fields from arguments.
- __ ldr(r1, MemOperand(sp, kPointerSize * 0));
- __ str(r1, FieldMemOperand(r0, JSRegExpResult::kInputOffset));
- __ ldr(r1, MemOperand(sp, kPointerSize * 1));
- __ str(r1, FieldMemOperand(r0, JSRegExpResult::kIndexOffset));
- __ ldr(r1, MemOperand(sp, kPointerSize * 2));
- __ str(r1, FieldMemOperand(r0, JSArray::kLengthOffset));
-
- // Fill out the elements FixedArray.
- // r0: JSArray, tagged.
- // r3: FixedArray, tagged.
- // r5: Number of elements in array, untagged.
-
- // Set map.
- __ mov(r2, Operand(FACTORY->fixed_array_map()));
- __ str(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
- // Set FixedArray length.
- __ mov(r6, Operand(r5, LSL, kSmiTagSize));
- __ str(r6, FieldMemOperand(r3, FixedArray::kLengthOffset));
- // Fill contents of fixed-array with the-hole.
- __ mov(r2, Operand(FACTORY->the_hole_value()));
- __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- // Fill fixed array elements with hole.
- // r0: JSArray, tagged.
- // r2: the hole.
- // r3: Start of elements in FixedArray.
- // r5: Number of elements to fill.
- Label loop;
- __ tst(r5, Operand(r5));
- __ bind(&loop);
- __ b(le, &done); // Jump if r1 is negative or zero.
- __ sub(r5, r5, Operand(1), SetCC);
- __ str(r2, MemOperand(r3, r5, LSL, kPointerSizeLog2));
- __ jmp(&loop);
-
- __ bind(&done);
- __ add(sp, sp, Operand(3 * kPointerSize));
- __ Ret();
-
- __ bind(&slowcase);
- __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1);
-}
-
-
-void CallFunctionStub::Generate(MacroAssembler* masm) {
- Label slow;
-
- // If the receiver might be a value (string, number or boolean) check for this
- // and box it if it is.
- if (ReceiverMightBeValue()) {
- // Get the receiver from the stack.
- // function, receiver [, arguments]
- Label receiver_is_value, receiver_is_js_object;
- __ ldr(r1, MemOperand(sp, argc_ * kPointerSize));
-
- // Check if receiver is a smi (which is a number value).
- __ JumpIfSmi(r1, &receiver_is_value);
-
- // Check if the receiver is a valid JS object.
- __ CompareObjectType(r1, r2, r2, FIRST_JS_OBJECT_TYPE);
- __ b(ge, &receiver_is_js_object);
-
- // Call the runtime to box the value.
- __ bind(&receiver_is_value);
- __ EnterInternalFrame();
- __ push(r1);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS);
- __ LeaveInternalFrame();
- __ str(r0, MemOperand(sp, argc_ * kPointerSize));
-
- __ bind(&receiver_is_js_object);
- }
-
- // Get the function to call from the stack.
- // function, receiver [, arguments]
- __ ldr(r1, MemOperand(sp, (argc_ + 1) * kPointerSize));
-
- // Check that the function is really a JavaScript function.
- // r1: pushed function (to be verified)
- __ JumpIfSmi(r1, &slow);
- // Get the map of the function object.
- __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
- __ b(ne, &slow);
-
- // Fast-case: Invoke the function now.
- // r1: pushed function
- ParameterCount actual(argc_);
- __ InvokeFunction(r1, actual, JUMP_FUNCTION);
-
- // Slow-case: Non-function called.
- __ bind(&slow);
- // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
- // of the original receiver from the call site).
- __ str(r1, MemOperand(sp, argc_ * kPointerSize));
- __ mov(r0, Operand(argc_)); // Setup the number of arguments.
- __ mov(r2, Operand(0, RelocInfo::NONE));
- __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION);
- __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
-}
-
-
-// Unfortunately you have to run without snapshots to see most of these
-// names in the profile since most compare stubs end up in the snapshot.
-const char* CompareStub::GetName() {
- ASSERT((lhs_.is(r0) && rhs_.is(r1)) ||
- (lhs_.is(r1) && rhs_.is(r0)));
-
- if (name_ != NULL) return name_;
- const int kMaxNameLength = 100;
- name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
- kMaxNameLength);
- if (name_ == NULL) return "OOM";
-
- const char* cc_name;
- switch (cc_) {
- case lt: cc_name = "LT"; break;
- case gt: cc_name = "GT"; break;
- case le: cc_name = "LE"; break;
- case ge: cc_name = "GE"; break;
- case eq: cc_name = "EQ"; break;
- case ne: cc_name = "NE"; break;
- default: cc_name = "UnknownCondition"; break;
- }
-
- const char* lhs_name = lhs_.is(r0) ? "_r0" : "_r1";
- const char* rhs_name = rhs_.is(r0) ? "_r0" : "_r1";
-
- const char* strict_name = "";
- if (strict_ && (cc_ == eq || cc_ == ne)) {
- strict_name = "_STRICT";
- }
-
- const char* never_nan_nan_name = "";
- if (never_nan_nan_ && (cc_ == eq || cc_ == ne)) {
- never_nan_nan_name = "_NO_NAN";
- }
-
- const char* include_number_compare_name = "";
- if (!include_number_compare_) {
- include_number_compare_name = "_NO_NUMBER";
- }
-
- const char* include_smi_compare_name = "";
- if (!include_smi_compare_) {
- include_smi_compare_name = "_NO_SMI";
- }
-
- OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
- "CompareStub_%s%s%s%s%s%s",
- cc_name,
- lhs_name,
- rhs_name,
- strict_name,
- never_nan_nan_name,
- include_number_compare_name,
- include_smi_compare_name);
- return name_;
-}
-
-
-int CompareStub::MinorKey() {
- // Encode the three parameters in a unique 16 bit value. To avoid duplicate
- // stubs the never NaN NaN condition is only taken into account if the
- // condition is equals.
- ASSERT((static_cast<unsigned>(cc_) >> 28) < (1 << 12));
- ASSERT((lhs_.is(r0) && rhs_.is(r1)) ||
- (lhs_.is(r1) && rhs_.is(r0)));
- return ConditionField::encode(static_cast<unsigned>(cc_) >> 28)
- | RegisterField::encode(lhs_.is(r0))
- | StrictField::encode(strict_)
- | NeverNanNanField::encode(cc_ == eq ? never_nan_nan_ : false)
- | IncludeNumberCompareField::encode(include_number_compare_)
- | IncludeSmiCompareField::encode(include_smi_compare_);
-}
-
-
-// StringCharCodeAtGenerator
-void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
- Label flat_string;
- Label ascii_string;
- Label got_char_code;
-
- // If the receiver is a smi trigger the non-string case.
- __ JumpIfSmi(object_, receiver_not_string_);
-
- // Fetch the instance type of the receiver into result register.
- __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
- __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
- // If the receiver is not a string trigger the non-string case.
- __ tst(result_, Operand(kIsNotStringMask));
- __ b(ne, receiver_not_string_);
-
- // If the index is non-smi trigger the non-smi case.
- __ JumpIfNotSmi(index_, &index_not_smi_);
-
- // Put smi-tagged index into scratch register.
- __ mov(scratch_, index_);
- __ bind(&got_smi_index_);
-
- // Check for index out of range.
- __ ldr(ip, FieldMemOperand(object_, String::kLengthOffset));
- __ cmp(ip, Operand(scratch_));
- __ b(ls, index_out_of_range_);
-
- // We need special handling for non-flat strings.
- STATIC_ASSERT(kSeqStringTag == 0);
- __ tst(result_, Operand(kStringRepresentationMask));
- __ b(eq, &flat_string);
-
- // Handle non-flat strings.
- __ tst(result_, Operand(kIsConsStringMask));
- __ b(eq, &call_runtime_);
-
- // ConsString.
- // Check whether the right hand side is the empty string (i.e. if
- // this is really a flat string in a cons string). If that is not
- // the case we would rather go to the runtime system now to flatten
- // the string.
- __ ldr(result_, FieldMemOperand(object_, ConsString::kSecondOffset));
- __ LoadRoot(ip, Heap::kEmptyStringRootIndex);
- __ cmp(result_, Operand(ip));
- __ b(ne, &call_runtime_);
- // Get the first of the two strings and load its instance type.
- __ ldr(object_, FieldMemOperand(object_, ConsString::kFirstOffset));
- __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
- __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
- // If the first cons component is also non-flat, then go to runtime.
- STATIC_ASSERT(kSeqStringTag == 0);
- __ tst(result_, Operand(kStringRepresentationMask));
- __ b(ne, &call_runtime_);
-
- // Check for 1-byte or 2-byte string.
- __ bind(&flat_string);
- STATIC_ASSERT(kAsciiStringTag != 0);
- __ tst(result_, Operand(kStringEncodingMask));
- __ b(ne, &ascii_string);
-
- // 2-byte string.
- // Load the 2-byte character code into the result register. We can
- // add without shifting since the smi tag size is the log2 of the
- // number of bytes in a two-byte character.
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1 && kSmiShiftSize == 0);
- __ add(scratch_, object_, Operand(scratch_));
- __ ldrh(result_, FieldMemOperand(scratch_, SeqTwoByteString::kHeaderSize));
- __ jmp(&got_char_code);
-
- // ASCII string.
- // Load the byte into the result register.
- __ bind(&ascii_string);
- __ add(scratch_, object_, Operand(scratch_, LSR, kSmiTagSize));
- __ ldrb(result_, FieldMemOperand(scratch_, SeqAsciiString::kHeaderSize));
-
- __ bind(&got_char_code);
- __ mov(result_, Operand(result_, LSL, kSmiTagSize));
- __ bind(&exit_);
-}
-
-
-void StringCharCodeAtGenerator::GenerateSlow(
- MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
- __ Abort("Unexpected fallthrough to CharCodeAt slow case");
-
- // Index is not a smi.
- __ bind(&index_not_smi_);
- // If index is a heap number, try converting it to an integer.
- __ CheckMap(index_,
- scratch_,
- Heap::kHeapNumberMapRootIndex,
- index_not_number_,
- true);
- call_helper.BeforeCall(masm);
- __ Push(object_, index_);
- __ push(index_); // Consumed by runtime conversion function.
- if (index_flags_ == STRING_INDEX_IS_NUMBER) {
- __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
- } else {
- ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
- // NumberToSmi discards numbers that are not exact integers.
- __ CallRuntime(Runtime::kNumberToSmi, 1);
- }
- // Save the conversion result before the pop instructions below
- // have a chance to overwrite it.
- __ Move(scratch_, r0);
- __ pop(index_);
- __ pop(object_);
- // Reload the instance type.
- __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
- __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
- call_helper.AfterCall(masm);
- // If index is still not a smi, it must be out of range.
- __ JumpIfNotSmi(scratch_, index_out_of_range_);
- // Otherwise, return to the fast path.
- __ jmp(&got_smi_index_);
-
- // Call runtime. We get here when the receiver is a string and the
- // index is a number, but the code of getting the actual character
- // is too complex (e.g., when the string needs to be flattened).
- __ bind(&call_runtime_);
- call_helper.BeforeCall(masm);
- __ Push(object_, index_);
- __ CallRuntime(Runtime::kStringCharCodeAt, 2);
- __ Move(result_, r0);
- call_helper.AfterCall(masm);
- __ jmp(&exit_);
-
- __ Abort("Unexpected fallthrough from CharCodeAt slow case");
-}
-
-
-// -------------------------------------------------------------------------
-// StringCharFromCodeGenerator
-
-void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
- // Fast case of Heap::LookupSingleCharacterStringFromCode.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiShiftSize == 0);
- ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1));
- __ tst(code_,
- Operand(kSmiTagMask |
- ((~String::kMaxAsciiCharCode) << kSmiTagSize)));
- __ b(ne, &slow_case_);
-
- __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
- // At this point code register contains smi tagged ASCII char code.
- STATIC_ASSERT(kSmiTag == 0);
- __ add(result_, result_, Operand(code_, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(result_, Operand(ip));
- __ b(eq, &slow_case_);
- __ bind(&exit_);
-}
-
-
-void StringCharFromCodeGenerator::GenerateSlow(
- MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
- __ Abort("Unexpected fallthrough to CharFromCode slow case");
-
- __ bind(&slow_case_);
- call_helper.BeforeCall(masm);
- __ push(code_);
- __ CallRuntime(Runtime::kCharFromCode, 1);
- __ Move(result_, r0);
- call_helper.AfterCall(masm);
- __ jmp(&exit_);
-
- __ Abort("Unexpected fallthrough from CharFromCode slow case");
-}
-
-
-// -------------------------------------------------------------------------
-// StringCharAtGenerator
-
-void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) {
- char_code_at_generator_.GenerateFast(masm);
- char_from_code_generator_.GenerateFast(masm);
-}
-
-
-void StringCharAtGenerator::GenerateSlow(
- MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
- char_code_at_generator_.GenerateSlow(masm, call_helper);
- char_from_code_generator_.GenerateSlow(masm, call_helper);
-}
-
-
-class StringHelper : public AllStatic {
- public:
- // Generate code for copying characters using a simple loop. This should only
- // be used in places where the number of characters is small and the
- // additional setup and checking in GenerateCopyCharactersLong adds too much
- // overhead. Copying of overlapping regions is not supported.
- // Dest register ends at the position after the last character written.
- static void GenerateCopyCharacters(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- Register scratch,
- bool ascii);
-
- // Generate code for copying a large number of characters. This function
- // is allowed to spend extra time setting up conditions to make copying
- // faster. Copying of overlapping regions is not supported.
- // Dest register ends at the position after the last character written.
- static void GenerateCopyCharactersLong(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Register scratch5,
- int flags);
-
-
- // Probe the symbol table for a two character string. If the string is
- // not found by probing a jump to the label not_found is performed. This jump
- // does not guarantee that the string is not in the symbol table. If the
- // string is found the code falls through with the string in register r0.
- // Contents of both c1 and c2 registers are modified. At the exit c1 is
- // guaranteed to contain halfword with low and high bytes equal to
- // initial contents of c1 and c2 respectively.
- static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
- Register c1,
- Register c2,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Register scratch5,
- Label* not_found);
-
- // Generate string hash.
- static void GenerateHashInit(MacroAssembler* masm,
- Register hash,
- Register character);
-
- static void GenerateHashAddCharacter(MacroAssembler* masm,
- Register hash,
- Register character);
-
- static void GenerateHashGetHash(MacroAssembler* masm,
- Register hash);
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
-};
-
-
-void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- Register scratch,
- bool ascii) {
- Label loop;
- Label done;
- // This loop just copies one character at a time, as it is only used for very
- // short strings.
- if (!ascii) {
- __ add(count, count, Operand(count), SetCC);
- } else {
- __ cmp(count, Operand(0, RelocInfo::NONE));
- }
- __ b(eq, &done);
-
- __ bind(&loop);
- __ ldrb(scratch, MemOperand(src, 1, PostIndex));
- // Perform sub between load and dependent store to get the load time to
- // complete.
- __ sub(count, count, Operand(1), SetCC);
- __ strb(scratch, MemOperand(dest, 1, PostIndex));
- // last iteration.
- __ b(gt, &loop);
-
- __ bind(&done);
-}
-
-
-enum CopyCharactersFlags {
- COPY_ASCII = 1,
- DEST_ALWAYS_ALIGNED = 2
-};
-
-
-void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Register scratch5,
- int flags) {
- bool ascii = (flags & COPY_ASCII) != 0;
- bool dest_always_aligned = (flags & DEST_ALWAYS_ALIGNED) != 0;
-
- if (dest_always_aligned && FLAG_debug_code) {
- // Check that destination is actually word aligned if the flag says
- // that it is.
- __ tst(dest, Operand(kPointerAlignmentMask));
- __ Check(eq, "Destination of copy not aligned.");
- }
-
- const int kReadAlignment = 4;
- const int kReadAlignmentMask = kReadAlignment - 1;
- // Ensure that reading an entire aligned word containing the last character
- // of a string will not read outside the allocated area (because we pad up
- // to kObjectAlignment).
- STATIC_ASSERT(kObjectAlignment >= kReadAlignment);
- // Assumes word reads and writes are little endian.
- // Nothing to do for zero characters.
- Label done;
- if (!ascii) {
- __ add(count, count, Operand(count), SetCC);
- } else {
- __ cmp(count, Operand(0, RelocInfo::NONE));
- }
- __ b(eq, &done);
-
- // Assume that you cannot read (or write) unaligned.
- Label byte_loop;
- // Must copy at least eight bytes, otherwise just do it one byte at a time.
- __ cmp(count, Operand(8));
- __ add(count, dest, Operand(count));
- Register limit = count; // Read until src equals this.
- __ b(lt, &byte_loop);
-
- if (!dest_always_aligned) {
- // Align dest by byte copying. Copies between zero and three bytes.
- __ and_(scratch4, dest, Operand(kReadAlignmentMask), SetCC);
- Label dest_aligned;
- __ b(eq, &dest_aligned);
- __ cmp(scratch4, Operand(2));
- __ ldrb(scratch1, MemOperand(src, 1, PostIndex));
- __ ldrb(scratch2, MemOperand(src, 1, PostIndex), le);
- __ ldrb(scratch3, MemOperand(src, 1, PostIndex), lt);
- __ strb(scratch1, MemOperand(dest, 1, PostIndex));
- __ strb(scratch2, MemOperand(dest, 1, PostIndex), le);
- __ strb(scratch3, MemOperand(dest, 1, PostIndex), lt);
- __ bind(&dest_aligned);
- }
-
- Label simple_loop;
-
- __ sub(scratch4, dest, Operand(src));
- __ and_(scratch4, scratch4, Operand(0x03), SetCC);
- __ b(eq, &simple_loop);
- // Shift register is number of bits in a source word that
- // must be combined with bits in the next source word in order
- // to create a destination word.
-
- // Complex loop for src/dst that are not aligned the same way.
- {
- Label loop;
- __ mov(scratch4, Operand(scratch4, LSL, 3));
- Register left_shift = scratch4;
- __ and_(src, src, Operand(~3)); // Round down to load previous word.
- __ ldr(scratch1, MemOperand(src, 4, PostIndex));
- // Store the "shift" most significant bits of scratch in the least
- // signficant bits (i.e., shift down by (32-shift)).
- __ rsb(scratch2, left_shift, Operand(32));
- Register right_shift = scratch2;
- __ mov(scratch1, Operand(scratch1, LSR, right_shift));
-
- __ bind(&loop);
- __ ldr(scratch3, MemOperand(src, 4, PostIndex));
- __ sub(scratch5, limit, Operand(dest));
- __ orr(scratch1, scratch1, Operand(scratch3, LSL, left_shift));
- __ str(scratch1, MemOperand(dest, 4, PostIndex));
- __ mov(scratch1, Operand(scratch3, LSR, right_shift));
- // Loop if four or more bytes left to copy.
- // Compare to eight, because we did the subtract before increasing dst.
- __ sub(scratch5, scratch5, Operand(8), SetCC);
- __ b(ge, &loop);
- }
- // There is now between zero and three bytes left to copy (negative that
- // number is in scratch5), and between one and three bytes already read into
- // scratch1 (eight times that number in scratch4). We may have read past
- // the end of the string, but because objects are aligned, we have not read
- // past the end of the object.
- // Find the minimum of remaining characters to move and preloaded characters
- // and write those as bytes.
- __ add(scratch5, scratch5, Operand(4), SetCC);
- __ b(eq, &done);
- __ cmp(scratch4, Operand(scratch5, LSL, 3), ne);
- // Move minimum of bytes read and bytes left to copy to scratch4.
- __ mov(scratch5, Operand(scratch4, LSR, 3), LeaveCC, lt);
- // Between one and three (value in scratch5) characters already read into
- // scratch ready to write.
- __ cmp(scratch5, Operand(2));
- __ strb(scratch1, MemOperand(dest, 1, PostIndex));
- __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, ge);
- __ strb(scratch1, MemOperand(dest, 1, PostIndex), ge);
- __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, gt);
- __ strb(scratch1, MemOperand(dest, 1, PostIndex), gt);
- // Copy any remaining bytes.
- __ b(&byte_loop);
-
- // Simple loop.
- // Copy words from src to dst, until less than four bytes left.
- // Both src and dest are word aligned.
- __ bind(&simple_loop);
- {
- Label loop;
- __ bind(&loop);
- __ ldr(scratch1, MemOperand(src, 4, PostIndex));
- __ sub(scratch3, limit, Operand(dest));
- __ str(scratch1, MemOperand(dest, 4, PostIndex));
- // Compare to 8, not 4, because we do the substraction before increasing
- // dest.
- __ cmp(scratch3, Operand(8));
- __ b(ge, &loop);
- }
-
- // Copy bytes from src to dst until dst hits limit.
- __ bind(&byte_loop);
- __ cmp(dest, Operand(limit));
- __ ldrb(scratch1, MemOperand(src, 1, PostIndex), lt);
- __ b(ge, &done);
- __ strb(scratch1, MemOperand(dest, 1, PostIndex));
- __ b(&byte_loop);
-
- __ bind(&done);
-}
-
-
-void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
- Register c1,
- Register c2,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Register scratch5,
- Label* not_found) {
- // Register scratch3 is the general scratch register in this function.
- Register scratch = scratch3;
-
- // Make sure that both characters are not digits as such strings has a
- // different hash algorithm. Don't try to look for these in the symbol table.
- Label not_array_index;
- __ sub(scratch, c1, Operand(static_cast<int>('0')));
- __ cmp(scratch, Operand(static_cast<int>('9' - '0')));
- __ b(hi, &not_array_index);
- __ sub(scratch, c2, Operand(static_cast<int>('0')));
- __ cmp(scratch, Operand(static_cast<int>('9' - '0')));
-
- // If check failed combine both characters into single halfword.
- // This is required by the contract of the method: code at the
- // not_found branch expects this combination in c1 register
- __ orr(c1, c1, Operand(c2, LSL, kBitsPerByte), LeaveCC, ls);
- __ b(ls, not_found);
-
- __ bind(&not_array_index);
- // Calculate the two character string hash.
- Register hash = scratch1;
- StringHelper::GenerateHashInit(masm, hash, c1);
- StringHelper::GenerateHashAddCharacter(masm, hash, c2);
- StringHelper::GenerateHashGetHash(masm, hash);
-
- // Collect the two characters in a register.
- Register chars = c1;
- __ orr(chars, chars, Operand(c2, LSL, kBitsPerByte));
-
- // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
- // hash: hash of two character string.
-
- // Load symbol table
- // Load address of first element of the symbol table.
- Register symbol_table = c2;
- __ LoadRoot(symbol_table, Heap::kSymbolTableRootIndex);
-
- Register undefined = scratch4;
- __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
-
- // Calculate capacity mask from the symbol table capacity.
- Register mask = scratch2;
- __ ldr(mask, FieldMemOperand(symbol_table, SymbolTable::kCapacityOffset));
- __ mov(mask, Operand(mask, ASR, 1));
- __ sub(mask, mask, Operand(1));
-
- // Calculate untagged address of the first element of the symbol table.
- Register first_symbol_table_element = symbol_table;
- __ add(first_symbol_table_element, symbol_table,
- Operand(SymbolTable::kElementsStartOffset - kHeapObjectTag));
-
- // Registers
- // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
- // hash: hash of two character string
- // mask: capacity mask
- // first_symbol_table_element: address of the first element of
- // the symbol table
- // undefined: the undefined object
- // scratch: -
-
- // Perform a number of probes in the symbol table.
- static const int kProbes = 4;
- Label found_in_symbol_table;
- Label next_probe[kProbes];
- for (int i = 0; i < kProbes; i++) {
- Register candidate = scratch5; // Scratch register contains candidate.
-
- // Calculate entry in symbol table.
- if (i > 0) {
- __ add(candidate, hash, Operand(SymbolTable::GetProbeOffset(i)));
- } else {
- __ mov(candidate, hash);
- }
-
- __ and_(candidate, candidate, Operand(mask));
-
- // Load the entry from the symble table.
- STATIC_ASSERT(SymbolTable::kEntrySize == 1);
- __ ldr(candidate,
- MemOperand(first_symbol_table_element,
- candidate,
- LSL,
- kPointerSizeLog2));
-
- // If entry is undefined no string with this hash can be found.
- Label is_string;
- __ CompareObjectType(candidate, scratch, scratch, ODDBALL_TYPE);
- __ b(ne, &is_string);
-
- __ cmp(undefined, candidate);
- __ b(eq, not_found);
- // Must be null (deleted entry).
- if (FLAG_debug_code) {
- __ LoadRoot(ip, Heap::kNullValueRootIndex);
- __ cmp(ip, candidate);
- __ Assert(eq, "oddball in symbol table is not undefined or null");
- }
- __ jmp(&next_probe[i]);
-
- __ bind(&is_string);
-
- // Check that the candidate is a non-external ASCII string. The instance
- // type is still in the scratch register from the CompareObjectType
- // operation.
- __ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch, &next_probe[i]);
-
- // If length is not 2 the string is not a candidate.
- __ ldr(scratch, FieldMemOperand(candidate, String::kLengthOffset));
- __ cmp(scratch, Operand(Smi::FromInt(2)));
- __ b(ne, &next_probe[i]);
-
- // Check if the two characters match.
- // Assumes that word load is little endian.
- __ ldrh(scratch, FieldMemOperand(candidate, SeqAsciiString::kHeaderSize));
- __ cmp(chars, scratch);
- __ b(eq, &found_in_symbol_table);
- __ bind(&next_probe[i]);
- }
-
- // No matching 2 character string found by probing.
- __ jmp(not_found);
-
- // Scratch register contains result when we fall through to here.
- Register result = scratch;
- __ bind(&found_in_symbol_table);
- __ Move(r0, result);
-}
-
-
-void StringHelper::GenerateHashInit(MacroAssembler* masm,
- Register hash,
- Register character) {
- // hash = character + (character << 10);
- __ add(hash, character, Operand(character, LSL, 10));
- // hash ^= hash >> 6;
- __ eor(hash, hash, Operand(hash, ASR, 6));
-}
-
-
-void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
- Register hash,
- Register character) {
- // hash += character;
- __ add(hash, hash, Operand(character));
- // hash += hash << 10;
- __ add(hash, hash, Operand(hash, LSL, 10));
- // hash ^= hash >> 6;
- __ eor(hash, hash, Operand(hash, ASR, 6));
-}
-
-
-void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
- Register hash) {
- // hash += hash << 3;
- __ add(hash, hash, Operand(hash, LSL, 3));
- // hash ^= hash >> 11;
- __ eor(hash, hash, Operand(hash, ASR, 11));
- // hash += hash << 15;
- __ add(hash, hash, Operand(hash, LSL, 15), SetCC);
-
- // if (hash == 0) hash = 27;
- __ mov(hash, Operand(27), LeaveCC, ne);
-}
-
-
-void SubStringStub::Generate(MacroAssembler* masm) {
- Label runtime;
-
- // Stack frame on entry.
- // lr: return address
- // sp[0]: to
- // sp[4]: from
- // sp[8]: string
-
- // This stub is called from the native-call %_SubString(...), so
- // nothing can be assumed about the arguments. It is tested that:
- // "string" is a sequential string,
- // both "from" and "to" are smis, and
- // 0 <= from <= to <= string.length.
- // If any of these assumptions fail, we call the runtime system.
-
- static const int kToOffset = 0 * kPointerSize;
- static const int kFromOffset = 1 * kPointerSize;
- static const int kStringOffset = 2 * kPointerSize;
-
- // Check bounds and smi-ness.
- Register to = r6;
- Register from = r7;
- __ Ldrd(to, from, MemOperand(sp, kToOffset));
- STATIC_ASSERT(kFromOffset == kToOffset + 4);
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
- // I.e., arithmetic shift right by one un-smi-tags.
- __ mov(r2, Operand(to, ASR, 1), SetCC);
- __ mov(r3, Operand(from, ASR, 1), SetCC, cc);
- // If either to or from had the smi tag bit set, then carry is set now.
- __ b(cs, &runtime); // Either "from" or "to" is not a smi.
- __ b(mi, &runtime); // From is negative.
-
- // Both to and from are smis.
-
- __ sub(r2, r2, Operand(r3), SetCC);
- __ b(mi, &runtime); // Fail if from > to.
- // Special handling of sub-strings of length 1 and 2. One character strings
- // are handled in the runtime system (looked up in the single character
- // cache). Two character strings are looked for in the symbol cache.
- __ cmp(r2, Operand(2));
- __ b(lt, &runtime);
-
- // r2: length
- // r3: from index (untaged smi)
- // r6 (a.k.a. to): to (smi)
- // r7 (a.k.a. from): from offset (smi)
-
- // Make sure first argument is a sequential (or flat) string.
- __ ldr(r5, MemOperand(sp, kStringOffset));
- STATIC_ASSERT(kSmiTag == 0);
- __ tst(r5, Operand(kSmiTagMask));
- __ b(eq, &runtime);
- Condition is_string = masm->IsObjectStringType(r5, r1);
- __ b(NegateCondition(is_string), &runtime);
-
- // r1: instance type
- // r2: length
- // r3: from index (untagged smi)
- // r5: string
- // r6 (a.k.a. to): to (smi)
- // r7 (a.k.a. from): from offset (smi)
- Label seq_string;
- __ and_(r4, r1, Operand(kStringRepresentationMask));
- STATIC_ASSERT(kSeqStringTag < kConsStringTag);
- STATIC_ASSERT(kConsStringTag < kExternalStringTag);
- __ cmp(r4, Operand(kConsStringTag));
- __ b(gt, &runtime); // External strings go to runtime.
- __ b(lt, &seq_string); // Sequential strings are handled directly.
-
- // Cons string. Try to recurse (once) on the first substring.
- // (This adds a little more generality than necessary to handle flattened
- // cons strings, but not much).
- __ ldr(r5, FieldMemOperand(r5, ConsString::kFirstOffset));
- __ ldr(r4, FieldMemOperand(r5, HeapObject::kMapOffset));
- __ ldrb(r1, FieldMemOperand(r4, Map::kInstanceTypeOffset));
- __ tst(r1, Operand(kStringRepresentationMask));
- STATIC_ASSERT(kSeqStringTag == 0);
- __ b(ne, &runtime); // Cons and External strings go to runtime.
-
- // Definitly a sequential string.
- __ bind(&seq_string);
-
- // r1: instance type.
- // r2: length
- // r3: from index (untaged smi)
- // r5: string
- // r6 (a.k.a. to): to (smi)
- // r7 (a.k.a. from): from offset (smi)
- __ ldr(r4, FieldMemOperand(r5, String::kLengthOffset));
- __ cmp(r4, Operand(to));
- __ b(lt, &runtime); // Fail if to > length.
- to = no_reg;
-
- // r1: instance type.
- // r2: result string length.
- // r3: from index (untaged smi)
- // r5: string.
- // r7 (a.k.a. from): from offset (smi)
- // Check for flat ASCII string.
- Label non_ascii_flat;
- __ tst(r1, Operand(kStringEncodingMask));
- STATIC_ASSERT(kTwoByteStringTag == 0);
- __ b(eq, &non_ascii_flat);
-
- Label result_longer_than_two;
- __ cmp(r2, Operand(2));
- __ b(gt, &result_longer_than_two);
-
- // Sub string of length 2 requested.
- // Get the two characters forming the sub string.
- __ add(r5, r5, Operand(r3));
- __ ldrb(r3, FieldMemOperand(r5, SeqAsciiString::kHeaderSize));
- __ ldrb(r4, FieldMemOperand(r5, SeqAsciiString::kHeaderSize + 1));
-
- // Try to lookup two character string in symbol table.
- Label make_two_character_string;
- StringHelper::GenerateTwoCharacterSymbolTableProbe(
- masm, r3, r4, r1, r5, r6, r7, r9, &make_two_character_string);
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->sub_string_native(), 1, r3, r4);
- __ add(sp, sp, Operand(3 * kPointerSize));
- __ Ret();
-
- // r2: result string length.
- // r3: two characters combined into halfword in little endian byte order.
- __ bind(&make_two_character_string);
- __ AllocateAsciiString(r0, r2, r4, r5, r9, &runtime);
- __ strh(r3, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
- __ IncrementCounter(counters->sub_string_native(), 1, r3, r4);
- __ add(sp, sp, Operand(3 * kPointerSize));
- __ Ret();
-
- __ bind(&result_longer_than_two);
-
- // Allocate the result.
- __ AllocateAsciiString(r0, r2, r3, r4, r1, &runtime);
-
- // r0: result string.
- // r2: result string length.
- // r5: string.
- // r7 (a.k.a. from): from offset (smi)
- // Locate first character of result.
- __ add(r1, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- // Locate 'from' character of string.
- __ add(r5, r5, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- __ add(r5, r5, Operand(from, ASR, 1));
-
- // r0: result string.
- // r1: first character of result string.
- // r2: result string length.
- // r5: first character of sub string to copy.
- STATIC_ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
- StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9,
- COPY_ASCII | DEST_ALWAYS_ALIGNED);
- __ IncrementCounter(counters->sub_string_native(), 1, r3, r4);
- __ add(sp, sp, Operand(3 * kPointerSize));
- __ Ret();
-
- __ bind(&non_ascii_flat);
- // r2: result string length.
- // r5: string.
- // r7 (a.k.a. from): from offset (smi)
- // Check for flat two byte string.
-
- // Allocate the result.
- __ AllocateTwoByteString(r0, r2, r1, r3, r4, &runtime);
-
- // r0: result string.
- // r2: result string length.
- // r5: string.
- // Locate first character of result.
- __ add(r1, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- // Locate 'from' character of string.
- __ add(r5, r5, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- // As "from" is a smi it is 2 times the value which matches the size of a two
- // byte character.
- __ add(r5, r5, Operand(from));
- from = no_reg;
-
- // r0: result string.
- // r1: first character of result.
- // r2: result length.
- // r5: first character of string to copy.
- STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
- StringHelper::GenerateCopyCharactersLong(
- masm, r1, r5, r2, r3, r4, r6, r7, r9, DEST_ALWAYS_ALIGNED);
- __ IncrementCounter(counters->sub_string_native(), 1, r3, r4);
- __ add(sp, sp, Operand(3 * kPointerSize));
- __ Ret();
-
- // Just jump to runtime to create the sub string.
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kSubString, 3, 1);
-}
-
-
-void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4) {
- Label compare_lengths;
- // Find minimum length and length difference.
- __ ldr(scratch1, FieldMemOperand(left, String::kLengthOffset));
- __ ldr(scratch2, FieldMemOperand(right, String::kLengthOffset));
- __ sub(scratch3, scratch1, Operand(scratch2), SetCC);
- Register length_delta = scratch3;
- __ mov(scratch1, scratch2, LeaveCC, gt);
- Register min_length = scratch1;
- STATIC_ASSERT(kSmiTag == 0);
- __ tst(min_length, Operand(min_length));
- __ b(eq, &compare_lengths);
-
- // Untag smi.
- __ mov(min_length, Operand(min_length, ASR, kSmiTagSize));
-
- // Setup registers so that we only need to increment one register
- // in the loop.
- __ add(scratch2, min_length,
- Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- __ add(left, left, Operand(scratch2));
- __ add(right, right, Operand(scratch2));
- // Registers left and right points to the min_length character of strings.
- __ rsb(min_length, min_length, Operand(-1));
- Register index = min_length;
- // Index starts at -min_length.
-
- {
- // Compare loop.
- Label loop;
- __ bind(&loop);
- // Compare characters.
- __ add(index, index, Operand(1), SetCC);
- __ ldrb(scratch2, MemOperand(left, index), ne);
- __ ldrb(scratch4, MemOperand(right, index), ne);
- // Skip to compare lengths with eq condition true.
- __ b(eq, &compare_lengths);
- __ cmp(scratch2, scratch4);
- __ b(eq, &loop);
- // Fallthrough with eq condition false.
- }
- // Compare lengths - strings up to min-length are equal.
- __ bind(&compare_lengths);
- ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
- // Use zero length_delta as result.
- __ mov(r0, Operand(length_delta), SetCC, eq);
- // Fall through to here if characters compare not-equal.
- __ mov(r0, Operand(Smi::FromInt(GREATER)), LeaveCC, gt);
- __ mov(r0, Operand(Smi::FromInt(LESS)), LeaveCC, lt);
- __ Ret();
-}
-
-
-void StringCompareStub::Generate(MacroAssembler* masm) {
- Label runtime;
-
- Counters* counters = masm->isolate()->counters();
-
- // Stack frame on entry.
- // sp[0]: right string
- // sp[4]: left string
- __ Ldrd(r0 , r1, MemOperand(sp)); // Load right in r0, left in r1.
-
- Label not_same;
- __ cmp(r0, r1);
- __ b(ne, &not_same);
- STATIC_ASSERT(EQUAL == 0);
- STATIC_ASSERT(kSmiTag == 0);
- __ mov(r0, Operand(Smi::FromInt(EQUAL)));
- __ IncrementCounter(counters->string_compare_native(), 1, r1, r2);
- __ add(sp, sp, Operand(2 * kPointerSize));
- __ Ret();
-
- __ bind(&not_same);
-
- // Check that both objects are sequential ASCII strings.
- __ JumpIfNotBothSequentialAsciiStrings(r1, r0, r2, r3, &runtime);
-
- // Compare flat ASCII strings natively. Remove arguments from stack first.
- __ IncrementCounter(counters->string_compare_native(), 1, r2, r3);
- __ add(sp, sp, Operand(2 * kPointerSize));
- GenerateCompareFlatAsciiStrings(masm, r1, r0, r2, r3, r4, r5);
-
- // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
- // tagged as a small integer.
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
-}
-
-
-void StringAddStub::Generate(MacroAssembler* masm) {
- Label string_add_runtime, call_builtin;
- Builtins::JavaScript builtin_id = Builtins::ADD;
-
- Counters* counters = masm->isolate()->counters();
-
- // Stack on entry:
- // sp[0]: second argument (right).
- // sp[4]: first argument (left).
-
- // Load the two arguments.
- __ ldr(r0, MemOperand(sp, 1 * kPointerSize)); // First argument.
- __ ldr(r1, MemOperand(sp, 0 * kPointerSize)); // Second argument.
-
- // Make sure that both arguments are strings if not known in advance.
- if (flags_ == NO_STRING_ADD_FLAGS) {
- __ JumpIfEitherSmi(r0, r1, &string_add_runtime);
- // Load instance types.
- __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
- __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kStringTag == 0);
- // If either is not a string, go to runtime.
- __ tst(r4, Operand(kIsNotStringMask));
- __ tst(r5, Operand(kIsNotStringMask), eq);
- __ b(ne, &string_add_runtime);
- } else {
- // Here at least one of the arguments is definitely a string.
- // We convert the one that is not known to be a string.
- if ((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) == 0) {
- ASSERT((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) != 0);
- GenerateConvertArgument(
- masm, 1 * kPointerSize, r0, r2, r3, r4, r5, &call_builtin);
- builtin_id = Builtins::STRING_ADD_RIGHT;
- } else if ((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) == 0) {
- ASSERT((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) != 0);
- GenerateConvertArgument(
- masm, 0 * kPointerSize, r1, r2, r3, r4, r5, &call_builtin);
- builtin_id = Builtins::STRING_ADD_LEFT;
- }
- }
-
- // Both arguments are strings.
- // r0: first string
- // r1: second string
- // r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
- // r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
- {
- Label strings_not_empty;
- // Check if either of the strings are empty. In that case return the other.
- __ ldr(r2, FieldMemOperand(r0, String::kLengthOffset));
- __ ldr(r3, FieldMemOperand(r1, String::kLengthOffset));
- STATIC_ASSERT(kSmiTag == 0);
- __ cmp(r2, Operand(Smi::FromInt(0))); // Test if first string is empty.
- __ mov(r0, Operand(r1), LeaveCC, eq); // If first is empty, return second.
- STATIC_ASSERT(kSmiTag == 0);
- // Else test if second string is empty.
- __ cmp(r3, Operand(Smi::FromInt(0)), ne);
- __ b(ne, &strings_not_empty); // If either string was empty, return r0.
-
- __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
- __ add(sp, sp, Operand(2 * kPointerSize));
- __ Ret();
-
- __ bind(&strings_not_empty);
- }
-
- __ mov(r2, Operand(r2, ASR, kSmiTagSize));
- __ mov(r3, Operand(r3, ASR, kSmiTagSize));
- // Both strings are non-empty.
- // r0: first string
- // r1: second string
- // r2: length of first string
- // r3: length of second string
- // r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
- // r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
- // Look at the length of the result of adding the two strings.
- Label string_add_flat_result, longer_than_two;
- // Adding two lengths can't overflow.
- STATIC_ASSERT(String::kMaxLength < String::kMaxLength * 2);
- __ add(r6, r2, Operand(r3));
- // Use the symbol table when adding two one character strings, as it
- // helps later optimizations to return a symbol here.
- __ cmp(r6, Operand(2));
- __ b(ne, &longer_than_two);
-
- // Check that both strings are non-external ASCII strings.
- if (flags_ != NO_STRING_ADD_FLAGS) {
- __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
- __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
- }
- __ JumpIfBothInstanceTypesAreNotSequentialAscii(r4, r5, r6, r7,
- &string_add_runtime);
-
- // Get the two characters forming the sub string.
- __ ldrb(r2, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
- __ ldrb(r3, FieldMemOperand(r1, SeqAsciiString::kHeaderSize));
-
- // Try to lookup two character string in symbol table. If it is not found
- // just allocate a new one.
- Label make_two_character_string;
- StringHelper::GenerateTwoCharacterSymbolTableProbe(
- masm, r2, r3, r6, r7, r4, r5, r9, &make_two_character_string);
- __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
- __ add(sp, sp, Operand(2 * kPointerSize));
- __ Ret();
-
- __ bind(&make_two_character_string);
- // Resulting string has length 2 and first chars of two strings
- // are combined into single halfword in r2 register.
- // So we can fill resulting string without two loops by a single
- // halfword store instruction (which assumes that processor is
- // in a little endian mode)
- __ mov(r6, Operand(2));
- __ AllocateAsciiString(r0, r6, r4, r5, r9, &string_add_runtime);
- __ strh(r2, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
- __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
- __ add(sp, sp, Operand(2 * kPointerSize));
- __ Ret();
-
- __ bind(&longer_than_two);
- // Check if resulting string will be flat.
- __ cmp(r6, Operand(String::kMinNonFlatLength));
- __ b(lt, &string_add_flat_result);
- // Handle exceptionally long strings in the runtime system.
- STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0);
- ASSERT(IsPowerOf2(String::kMaxLength + 1));
- // kMaxLength + 1 is representable as shifted literal, kMaxLength is not.
- __ cmp(r6, Operand(String::kMaxLength + 1));
- __ b(hs, &string_add_runtime);
-
- // If result is not supposed to be flat, allocate a cons string object.
- // If both strings are ASCII the result is an ASCII cons string.
- if (flags_ != NO_STRING_ADD_FLAGS) {
- __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
- __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
- }
- Label non_ascii, allocated, ascii_data;
- STATIC_ASSERT(kTwoByteStringTag == 0);
- __ tst(r4, Operand(kStringEncodingMask));
- __ tst(r5, Operand(kStringEncodingMask), ne);
- __ b(eq, &non_ascii);
-
- // Allocate an ASCII cons string.
- __ bind(&ascii_data);
- __ AllocateAsciiConsString(r7, r6, r4, r5, &string_add_runtime);
- __ bind(&allocated);
- // Fill the fields of the cons string.
- __ str(r0, FieldMemOperand(r7, ConsString::kFirstOffset));
- __ str(r1, FieldMemOperand(r7, ConsString::kSecondOffset));
- __ mov(r0, Operand(r7));
- __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
- __ add(sp, sp, Operand(2 * kPointerSize));
- __ Ret();
-
- __ bind(&non_ascii);
- // At least one of the strings is two-byte. Check whether it happens
- // to contain only ASCII characters.
- // r4: first instance type.
- // r5: second instance type.
- __ tst(r4, Operand(kAsciiDataHintMask));
- __ tst(r5, Operand(kAsciiDataHintMask), ne);
- __ b(ne, &ascii_data);
- __ eor(r4, r4, Operand(r5));
- STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
- __ and_(r4, r4, Operand(kAsciiStringTag | kAsciiDataHintTag));
- __ cmp(r4, Operand(kAsciiStringTag | kAsciiDataHintTag));
- __ b(eq, &ascii_data);
-
- // Allocate a two byte cons string.
- __ AllocateTwoByteConsString(r7, r6, r4, r5, &string_add_runtime);
- __ jmp(&allocated);
-
- // Handle creating a flat result. First check that both strings are
- // sequential and that they have the same encoding.
- // r0: first string
- // r1: second string
- // r2: length of first string
- // r3: length of second string
- // r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
- // r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
- // r6: sum of lengths.
- __ bind(&string_add_flat_result);
- if (flags_ != NO_STRING_ADD_FLAGS) {
- __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
- __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
- }
- // Check that both strings are sequential.
- STATIC_ASSERT(kSeqStringTag == 0);
- __ tst(r4, Operand(kStringRepresentationMask));
- __ tst(r5, Operand(kStringRepresentationMask), eq);
- __ b(ne, &string_add_runtime);
- // Now check if both strings have the same encoding (ASCII/Two-byte).
- // r0: first string.
- // r1: second string.
- // r2: length of first string.
- // r3: length of second string.
- // r6: sum of lengths..
- Label non_ascii_string_add_flat_result;
- ASSERT(IsPowerOf2(kStringEncodingMask)); // Just one bit to test.
- __ eor(r7, r4, Operand(r5));
- __ tst(r7, Operand(kStringEncodingMask));
- __ b(ne, &string_add_runtime);
- // And see if it's ASCII or two-byte.
- __ tst(r4, Operand(kStringEncodingMask));
- __ b(eq, &non_ascii_string_add_flat_result);
-
- // Both strings are sequential ASCII strings. We also know that they are
- // short (since the sum of the lengths is less than kMinNonFlatLength).
- // r6: length of resulting flat string
- __ AllocateAsciiString(r7, r6, r4, r5, r9, &string_add_runtime);
- // Locate first character of result.
- __ add(r6, r7, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- // Locate first character of first argument.
- __ add(r0, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- // r0: first character of first string.
- // r1: second string.
- // r2: length of first string.
- // r3: length of second string.
- // r6: first character of result.
- // r7: result string.
- StringHelper::GenerateCopyCharacters(masm, r6, r0, r2, r4, true);
-
- // Load second argument and locate first character.
- __ add(r1, r1, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- // r1: first character of second string.
- // r3: length of second string.
- // r6: next character of result.
- // r7: result string.
- StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, true);
- __ mov(r0, Operand(r7));
- __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
- __ add(sp, sp, Operand(2 * kPointerSize));
- __ Ret();
-
- __ bind(&non_ascii_string_add_flat_result);
- // Both strings are sequential two byte strings.
- // r0: first string.
- // r1: second string.
- // r2: length of first string.
- // r3: length of second string.
- // r6: sum of length of strings.
- __ AllocateTwoByteString(r7, r6, r4, r5, r9, &string_add_runtime);
- // r0: first string.
- // r1: second string.
- // r2: length of first string.
- // r3: length of second string.
- // r7: result string.
-
- // Locate first character of result.
- __ add(r6, r7, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- // Locate first character of first argument.
- __ add(r0, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
-
- // r0: first character of first string.
- // r1: second string.
- // r2: length of first string.
- // r3: length of second string.
- // r6: first character of result.
- // r7: result string.
- StringHelper::GenerateCopyCharacters(masm, r6, r0, r2, r4, false);
-
- // Locate first character of second argument.
- __ add(r1, r1, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
-
- // r1: first character of second string.
- // r3: length of second string.
- // r6: next character of result (after copy of first string).
- // r7: result string.
- StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, false);
-
- __ mov(r0, Operand(r7));
- __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
- __ add(sp, sp, Operand(2 * kPointerSize));
- __ Ret();
-
- // Just jump to runtime to add the two strings.
- __ bind(&string_add_runtime);
- __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
-
- if (call_builtin.is_linked()) {
- __ bind(&call_builtin);
- __ InvokeBuiltin(builtin_id, JUMP_JS);
- }
-}
-
-
-void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
- int stack_offset,
- Register arg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Label* slow) {
- // First check if the argument is already a string.
- Label not_string, done;
- __ JumpIfSmi(arg, &not_string);
- __ CompareObjectType(arg, scratch1, scratch1, FIRST_NONSTRING_TYPE);
- __ b(lt, &done);
-
- // Check the number to string cache.
- Label not_cached;
- __ bind(&not_string);
- // Puts the cached result into scratch1.
- NumberToStringStub::GenerateLookupNumberStringCache(masm,
- arg,
- scratch1,
- scratch2,
- scratch3,
- scratch4,
- false,
- &not_cached);
- __ mov(arg, scratch1);
- __ str(arg, MemOperand(sp, stack_offset));
- __ jmp(&done);
-
- // Check if the argument is a safe string wrapper.
- __ bind(&not_cached);
- __ JumpIfSmi(arg, slow);
- __ CompareObjectType(
- arg, scratch1, scratch2, JS_VALUE_TYPE); // map -> scratch1.
- __ b(ne, slow);
- __ ldrb(scratch2, FieldMemOperand(scratch1, Map::kBitField2Offset));
- __ and_(scratch2,
- scratch2, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
- __ cmp(scratch2,
- Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
- __ b(ne, slow);
- __ ldr(arg, FieldMemOperand(arg, JSValue::kValueOffset));
- __ str(arg, MemOperand(sp, stack_offset));
-
- __ bind(&done);
-}
-
-
-void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::SMIS);
- Label miss;
- __ orr(r2, r1, r0);
- __ tst(r2, Operand(kSmiTagMask));
- __ b(ne, &miss);
-
- if (GetCondition() == eq) {
- // For equality we do not care about the sign of the result.
- __ sub(r0, r0, r1, SetCC);
- } else {
- // Untag before subtracting to avoid handling overflow.
- __ SmiUntag(r1);
- __ sub(r0, r1, SmiUntagOperand(r0));
- }
- __ Ret();
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::HEAP_NUMBERS);
-
- Label generic_stub;
- Label unordered;
- Label miss;
- __ and_(r2, r1, Operand(r0));
- __ tst(r2, Operand(kSmiTagMask));
- __ b(eq, &generic_stub);
-
- __ CompareObjectType(r0, r2, r2, HEAP_NUMBER_TYPE);
- __ b(ne, &miss);
- __ CompareObjectType(r1, r2, r2, HEAP_NUMBER_TYPE);
- __ b(ne, &miss);
-
- // Inlining the double comparison and falling back to the general compare
- // stub if NaN is involved or VFP3 is unsupported.
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
-
- // Load left and right operand
- __ sub(r2, r1, Operand(kHeapObjectTag));
- __ vldr(d0, r2, HeapNumber::kValueOffset);
- __ sub(r2, r0, Operand(kHeapObjectTag));
- __ vldr(d1, r2, HeapNumber::kValueOffset);
-
- // Compare operands
- __ VFPCompareAndSetFlags(d0, d1);
-
- // Don't base result on status bits when a NaN is involved.
- __ b(vs, &unordered);
-
- // Return a result of -1, 0, or 1, based on status bits.
- __ mov(r0, Operand(EQUAL), LeaveCC, eq);
- __ mov(r0, Operand(LESS), LeaveCC, lt);
- __ mov(r0, Operand(GREATER), LeaveCC, gt);
- __ Ret();
-
- __ bind(&unordered);
- }
-
- CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, r1, r0);
- __ bind(&generic_stub);
- __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::OBJECTS);
- Label miss;
- __ and_(r2, r1, Operand(r0));
- __ tst(r2, Operand(kSmiTagMask));
- __ b(eq, &miss);
-
- __ CompareObjectType(r0, r2, r2, JS_OBJECT_TYPE);
- __ b(ne, &miss);
- __ CompareObjectType(r1, r2, r2, JS_OBJECT_TYPE);
- __ b(ne, &miss);
-
- ASSERT(GetCondition() == eq);
- __ sub(r0, r0, Operand(r1));
- __ Ret();
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
- __ Push(r1, r0);
- __ push(lr);
-
- // Call the runtime system in a fresh internal frame.
- ExternalReference miss =
- ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
- __ EnterInternalFrame();
- __ Push(r1, r0);
- __ mov(ip, Operand(Smi::FromInt(op_)));
- __ push(ip);
- __ CallExternalReference(miss, 3);
- __ LeaveInternalFrame();
- // Compute the entry point of the rewritten stub.
- __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
- // Restore registers.
- __ pop(lr);
- __ pop(r0);
- __ pop(r1);
- __ Jump(r2);
-}
-
-
-void DirectCEntryStub::Generate(MacroAssembler* masm) {
- __ ldr(pc, MemOperand(sp, 0));
-}
-
-
-void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
- ExternalReference function) {
- __ mov(lr, Operand(reinterpret_cast<intptr_t>(GetCode().location()),
- RelocInfo::CODE_TARGET));
- __ mov(r2, Operand(function));
- // Push return address (accessible to GC through exit frame pc).
- __ str(pc, MemOperand(sp, 0));
- __ Jump(r2); // Call the api function.
-}
-
-
-void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
- Register target) {
- __ mov(lr, Operand(reinterpret_cast<intptr_t>(GetCode().location()),
- RelocInfo::CODE_TARGET));
- // Push return address (accessible to GC through exit frame pc).
- __ str(pc, MemOperand(sp, 0));
- __ Jump(target); // Call the C++ function.
-}
-
-
-#undef __
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_ARM
diff --git a/src/3rdparty/v8/src/arm/code-stubs-arm.h b/src/3rdparty/v8/src/arm/code-stubs-arm.h
deleted file mode 100644
index 2b1ce4c..0000000
--- a/src/3rdparty/v8/src/arm/code-stubs-arm.h
+++ /dev/null
@@ -1,623 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_ARM_CODE_STUBS_ARM_H_
-#define V8_ARM_CODE_STUBS_ARM_H_
-
-#include "ic-inl.h"
-
-namespace v8 {
-namespace internal {
-
-
-// Compute a transcendental math function natively, or call the
-// TranscendentalCache runtime function.
-class TranscendentalCacheStub: public CodeStub {
- public:
- enum ArgumentType {
- TAGGED = 0 << TranscendentalCache::kTranscendentalTypeBits,
- UNTAGGED = 1 << TranscendentalCache::kTranscendentalTypeBits
- };
-
- TranscendentalCacheStub(TranscendentalCache::Type type,
- ArgumentType argument_type)
- : type_(type), argument_type_(argument_type) { }
- void Generate(MacroAssembler* masm);
- private:
- TranscendentalCache::Type type_;
- ArgumentType argument_type_;
- void GenerateCallCFunction(MacroAssembler* masm, Register scratch);
-
- Major MajorKey() { return TranscendentalCache; }
- int MinorKey() { return type_ | argument_type_; }
- Runtime::FunctionId RuntimeFunction();
-};
-
-
-class ToBooleanStub: public CodeStub {
- public:
- explicit ToBooleanStub(Register tos) : tos_(tos) { }
-
- void Generate(MacroAssembler* masm);
-
- private:
- Register tos_;
- Major MajorKey() { return ToBoolean; }
- int MinorKey() { return tos_.code(); }
-};
-
-
-class GenericBinaryOpStub : public CodeStub {
- public:
- static const int kUnknownIntValue = -1;
-
- GenericBinaryOpStub(Token::Value op,
- OverwriteMode mode,
- Register lhs,
- Register rhs,
- int constant_rhs = kUnknownIntValue)
- : op_(op),
- mode_(mode),
- lhs_(lhs),
- rhs_(rhs),
- constant_rhs_(constant_rhs),
- specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op, constant_rhs)),
- runtime_operands_type_(BinaryOpIC::UNINIT_OR_SMI),
- name_(NULL) { }
-
- GenericBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info)
- : op_(OpBits::decode(key)),
- mode_(ModeBits::decode(key)),
- lhs_(LhsRegister(RegisterBits::decode(key))),
- rhs_(RhsRegister(RegisterBits::decode(key))),
- constant_rhs_(KnownBitsForMinorKey(KnownIntBits::decode(key))),
- specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op_, constant_rhs_)),
- runtime_operands_type_(type_info),
- name_(NULL) { }
-
- private:
- Token::Value op_;
- OverwriteMode mode_;
- Register lhs_;
- Register rhs_;
- int constant_rhs_;
- bool specialized_on_rhs_;
- BinaryOpIC::TypeInfo runtime_operands_type_;
- char* name_;
-
- static const int kMaxKnownRhs = 0x40000000;
- static const int kKnownRhsKeyBits = 6;
-
- // Minor key encoding in 17 bits.
- class ModeBits: public BitField<OverwriteMode, 0, 2> {};
- class OpBits: public BitField<Token::Value, 2, 6> {};
- class TypeInfoBits: public BitField<int, 8, 3> {};
- class RegisterBits: public BitField<bool, 11, 1> {};
- class KnownIntBits: public BitField<int, 12, kKnownRhsKeyBits> {};
-
- Major MajorKey() { return GenericBinaryOp; }
- int MinorKey() {
- ASSERT((lhs_.is(r0) && rhs_.is(r1)) ||
- (lhs_.is(r1) && rhs_.is(r0)));
- // Encode the parameters in a unique 18 bit value.
- return OpBits::encode(op_)
- | ModeBits::encode(mode_)
- | KnownIntBits::encode(MinorKeyForKnownInt())
- | TypeInfoBits::encode(runtime_operands_type_)
- | RegisterBits::encode(lhs_.is(r0));
- }
-
- void Generate(MacroAssembler* masm);
- void HandleNonSmiBitwiseOp(MacroAssembler* masm,
- Register lhs,
- Register rhs);
- void HandleBinaryOpSlowCases(MacroAssembler* masm,
- Label* not_smi,
- Register lhs,
- Register rhs,
- const Builtins::JavaScript& builtin);
- void GenerateTypeTransition(MacroAssembler* masm);
-
- static bool RhsIsOneWeWantToOptimizeFor(Token::Value op, int constant_rhs) {
- if (constant_rhs == kUnknownIntValue) return false;
- if (op == Token::DIV) return constant_rhs >= 2 && constant_rhs <= 3;
- if (op == Token::MOD) {
- if (constant_rhs <= 1) return false;
- if (constant_rhs <= 10) return true;
- if (constant_rhs <= kMaxKnownRhs && IsPowerOf2(constant_rhs)) return true;
- return false;
- }
- return false;
- }
-
- int MinorKeyForKnownInt() {
- if (!specialized_on_rhs_) return 0;
- if (constant_rhs_ <= 10) return constant_rhs_ + 1;
- ASSERT(IsPowerOf2(constant_rhs_));
- int key = 12;
- int d = constant_rhs_;
- while ((d & 1) == 0) {
- key++;
- d >>= 1;
- }
- ASSERT(key >= 0 && key < (1 << kKnownRhsKeyBits));
- return key;
- }
-
- int KnownBitsForMinorKey(int key) {
- if (!key) return 0;
- if (key <= 11) return key - 1;
- int d = 1;
- while (key != 12) {
- key--;
- d <<= 1;
- }
- return d;
- }
-
- Register LhsRegister(bool lhs_is_r0) {
- return lhs_is_r0 ? r0 : r1;
- }
-
- Register RhsRegister(bool lhs_is_r0) {
- return lhs_is_r0 ? r1 : r0;
- }
-
- bool HasSmiSmiFastPath() {
- return op_ != Token::DIV;
- }
-
- bool ShouldGenerateSmiCode() {
- return ((op_ != Token::DIV && op_ != Token::MOD) || specialized_on_rhs_) &&
- runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS &&
- runtime_operands_type_ != BinaryOpIC::STRINGS;
- }
-
- bool ShouldGenerateFPCode() {
- return runtime_operands_type_ != BinaryOpIC::STRINGS;
- }
-
- virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
-
- virtual InlineCacheState GetICState() {
- return BinaryOpIC::ToState(runtime_operands_type_);
- }
-
- const char* GetName();
-
- virtual void FinishCode(Code* code) {
- code->set_binary_op_type(runtime_operands_type_);
- }
-
-#ifdef DEBUG
- void Print() {
- if (!specialized_on_rhs_) {
- PrintF("GenericBinaryOpStub (%s)\n", Token::String(op_));
- } else {
- PrintF("GenericBinaryOpStub (%s by %d)\n",
- Token::String(op_),
- constant_rhs_);
- }
- }
-#endif
-};
-
-
-class TypeRecordingBinaryOpStub: public CodeStub {
- public:
- TypeRecordingBinaryOpStub(Token::Value op, OverwriteMode mode)
- : op_(op),
- mode_(mode),
- operands_type_(TRBinaryOpIC::UNINITIALIZED),
- result_type_(TRBinaryOpIC::UNINITIALIZED),
- name_(NULL) {
- use_vfp3_ = CpuFeatures::IsSupported(VFP3);
- ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
- }
-
- TypeRecordingBinaryOpStub(
- int key,
- TRBinaryOpIC::TypeInfo operands_type,
- TRBinaryOpIC::TypeInfo result_type = TRBinaryOpIC::UNINITIALIZED)
- : op_(OpBits::decode(key)),
- mode_(ModeBits::decode(key)),
- use_vfp3_(VFP3Bits::decode(key)),
- operands_type_(operands_type),
- result_type_(result_type),
- name_(NULL) { }
-
- private:
- enum SmiCodeGenerateHeapNumberResults {
- ALLOW_HEAPNUMBER_RESULTS,
- NO_HEAPNUMBER_RESULTS
- };
-
- Token::Value op_;
- OverwriteMode mode_;
- bool use_vfp3_;
-
- // Operand type information determined at runtime.
- TRBinaryOpIC::TypeInfo operands_type_;
- TRBinaryOpIC::TypeInfo result_type_;
-
- char* name_;
-
- const char* GetName();
-
-#ifdef DEBUG
- void Print() {
- PrintF("TypeRecordingBinaryOpStub %d (op %s), "
- "(mode %d, runtime_type_info %s)\n",
- MinorKey(),
- Token::String(op_),
- static_cast<int>(mode_),
- TRBinaryOpIC::GetName(operands_type_));
- }
-#endif
-
- // Minor key encoding in 16 bits RRRTTTVOOOOOOOMM.
- class ModeBits: public BitField<OverwriteMode, 0, 2> {};
- class OpBits: public BitField<Token::Value, 2, 7> {};
- class VFP3Bits: public BitField<bool, 9, 1> {};
- class OperandTypeInfoBits: public BitField<TRBinaryOpIC::TypeInfo, 10, 3> {};
- class ResultTypeInfoBits: public BitField<TRBinaryOpIC::TypeInfo, 13, 3> {};
-
- Major MajorKey() { return TypeRecordingBinaryOp; }
- int MinorKey() {
- return OpBits::encode(op_)
- | ModeBits::encode(mode_)
- | VFP3Bits::encode(use_vfp3_)
- | OperandTypeInfoBits::encode(operands_type_)
- | ResultTypeInfoBits::encode(result_type_);
- }
-
- void Generate(MacroAssembler* masm);
- void GenerateGeneric(MacroAssembler* masm);
- void GenerateSmiSmiOperation(MacroAssembler* masm);
- void GenerateFPOperation(MacroAssembler* masm,
- bool smi_operands,
- Label* not_numbers,
- Label* gc_required);
- void GenerateSmiCode(MacroAssembler* masm,
- Label* gc_required,
- SmiCodeGenerateHeapNumberResults heapnumber_results);
- void GenerateLoadArguments(MacroAssembler* masm);
- void GenerateReturn(MacroAssembler* masm);
- void GenerateUninitializedStub(MacroAssembler* masm);
- void GenerateSmiStub(MacroAssembler* masm);
- void GenerateInt32Stub(MacroAssembler* masm);
- void GenerateHeapNumberStub(MacroAssembler* masm);
- void GenerateOddballStub(MacroAssembler* masm);
- void GenerateStringStub(MacroAssembler* masm);
- void GenerateGenericStub(MacroAssembler* masm);
- void GenerateAddStrings(MacroAssembler* masm);
- void GenerateCallRuntime(MacroAssembler* masm);
-
- void GenerateHeapResultAllocation(MacroAssembler* masm,
- Register result,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
- void GenerateRegisterArgsPush(MacroAssembler* masm);
- void GenerateTypeTransition(MacroAssembler* masm);
- void GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm);
-
- virtual int GetCodeKind() { return Code::TYPE_RECORDING_BINARY_OP_IC; }
-
- virtual InlineCacheState GetICState() {
- return TRBinaryOpIC::ToState(operands_type_);
- }
-
- virtual void FinishCode(Code* code) {
- code->set_type_recording_binary_op_type(operands_type_);
- code->set_type_recording_binary_op_result_type(result_type_);
- }
-
- friend class CodeGenerator;
-};
-
-
-// Flag that indicates how to generate code for the stub StringAddStub.
-enum StringAddFlags {
- NO_STRING_ADD_FLAGS = 0,
- // Omit left string check in stub (left is definitely a string).
- NO_STRING_CHECK_LEFT_IN_STUB = 1 << 0,
- // Omit right string check in stub (right is definitely a string).
- NO_STRING_CHECK_RIGHT_IN_STUB = 1 << 1,
- // Omit both string checks in stub.
- NO_STRING_CHECK_IN_STUB =
- NO_STRING_CHECK_LEFT_IN_STUB | NO_STRING_CHECK_RIGHT_IN_STUB
-};
-
-
-class StringAddStub: public CodeStub {
- public:
- explicit StringAddStub(StringAddFlags flags) : flags_(flags) {}
-
- private:
- Major MajorKey() { return StringAdd; }
- int MinorKey() { return flags_; }
-
- void Generate(MacroAssembler* masm);
-
- void GenerateConvertArgument(MacroAssembler* masm,
- int stack_offset,
- Register arg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Label* slow);
-
- const StringAddFlags flags_;
-};
-
-
-class SubStringStub: public CodeStub {
- public:
- SubStringStub() {}
-
- private:
- Major MajorKey() { return SubString; }
- int MinorKey() { return 0; }
-
- void Generate(MacroAssembler* masm);
-};
-
-
-
-class StringCompareStub: public CodeStub {
- public:
- StringCompareStub() { }
-
- // Compare two flat ASCII strings and returns result in r0.
- // Does not use the stack.
- static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4);
-
- private:
- Major MajorKey() { return StringCompare; }
- int MinorKey() { return 0; }
-
- void Generate(MacroAssembler* masm);
-};
-
-
-// This stub can do a fast mod operation without using fp.
-// It is tail called from the GenericBinaryOpStub and it always
-// returns an answer. It never causes GC so it doesn't need a real frame.
-//
-// The inputs are always positive Smis. This is never called
-// where the denominator is a power of 2. We handle that separately.
-//
-// If we consider the denominator as an odd number multiplied by a power of 2,
-// then:
-// * The exponent (power of 2) is in the shift_distance register.
-// * The odd number is in the odd_number register. It is always in the range
-// of 3 to 25.
-// * The bits from the numerator that are to be copied to the answer (there are
-// shift_distance of them) are in the mask_bits register.
-// * The other bits of the numerator have been shifted down and are in the lhs
-// register.
-class IntegerModStub : public CodeStub {
- public:
- IntegerModStub(Register result,
- Register shift_distance,
- Register odd_number,
- Register mask_bits,
- Register lhs,
- Register scratch)
- : result_(result),
- shift_distance_(shift_distance),
- odd_number_(odd_number),
- mask_bits_(mask_bits),
- lhs_(lhs),
- scratch_(scratch) {
- // We don't code these in the minor key, so they should always be the same.
- // We don't really want to fix that since this stub is rather large and we
- // don't want many copies of it.
- ASSERT(shift_distance_.is(r9));
- ASSERT(odd_number_.is(r4));
- ASSERT(mask_bits_.is(r3));
- ASSERT(scratch_.is(r5));
- }
-
- private:
- Register result_;
- Register shift_distance_;
- Register odd_number_;
- Register mask_bits_;
- Register lhs_;
- Register scratch_;
-
- // Minor key encoding in 16 bits.
- class ResultRegisterBits: public BitField<int, 0, 4> {};
- class LhsRegisterBits: public BitField<int, 4, 4> {};
-
- Major MajorKey() { return IntegerMod; }
- int MinorKey() {
- // Encode the parameters in a unique 16 bit value.
- return ResultRegisterBits::encode(result_.code())
- | LhsRegisterBits::encode(lhs_.code());
- }
-
- void Generate(MacroAssembler* masm);
-
- const char* GetName() { return "IntegerModStub"; }
-
- // Utility functions.
- void DigitSum(MacroAssembler* masm,
- Register lhs,
- int mask,
- int shift,
- Label* entry);
- void DigitSum(MacroAssembler* masm,
- Register lhs,
- Register scratch,
- int mask,
- int shift1,
- int shift2,
- Label* entry);
- void ModGetInRangeBySubtraction(MacroAssembler* masm,
- Register lhs,
- int shift,
- int rhs);
- void ModReduce(MacroAssembler* masm,
- Register lhs,
- int max,
- int denominator);
- void ModAnswer(MacroAssembler* masm,
- Register result,
- Register shift_distance,
- Register mask_bits,
- Register sum_of_digits);
-
-
-#ifdef DEBUG
- void Print() { PrintF("IntegerModStub\n"); }
-#endif
-};
-
-
-// This stub can convert a signed int32 to a heap number (double). It does
-// not work for int32s that are in Smi range! No GC occurs during this stub
-// so you don't have to set up the frame.
-class WriteInt32ToHeapNumberStub : public CodeStub {
- public:
- WriteInt32ToHeapNumberStub(Register the_int,
- Register the_heap_number,
- Register scratch)
- : the_int_(the_int),
- the_heap_number_(the_heap_number),
- scratch_(scratch) { }
-
- private:
- Register the_int_;
- Register the_heap_number_;
- Register scratch_;
-
- // Minor key encoding in 16 bits.
- class IntRegisterBits: public BitField<int, 0, 4> {};
- class HeapNumberRegisterBits: public BitField<int, 4, 4> {};
- class ScratchRegisterBits: public BitField<int, 8, 4> {};
-
- Major MajorKey() { return WriteInt32ToHeapNumber; }
- int MinorKey() {
- // Encode the parameters in a unique 16 bit value.
- return IntRegisterBits::encode(the_int_.code())
- | HeapNumberRegisterBits::encode(the_heap_number_.code())
- | ScratchRegisterBits::encode(scratch_.code());
- }
-
- void Generate(MacroAssembler* masm);
-
- const char* GetName() { return "WriteInt32ToHeapNumberStub"; }
-
-#ifdef DEBUG
- void Print() { PrintF("WriteInt32ToHeapNumberStub\n"); }
-#endif
-};
-
-
-class NumberToStringStub: public CodeStub {
- public:
- NumberToStringStub() { }
-
- // Generate code to do a lookup in the number string cache. If the number in
- // the register object is found in the cache the generated code falls through
- // with the result in the result register. The object and the result register
- // can be the same. If the number is not found in the cache the code jumps to
- // the label not_found with only the content of register object unchanged.
- static void GenerateLookupNumberStringCache(MacroAssembler* masm,
- Register object,
- Register result,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- bool object_is_smi,
- Label* not_found);
-
- private:
- Major MajorKey() { return NumberToString; }
- int MinorKey() { return 0; }
-
- void Generate(MacroAssembler* masm);
-
- const char* GetName() { return "NumberToStringStub"; }
-};
-
-
-// Enter C code from generated RegExp code in a way that allows
-// the C code to fix the return address in case of a GC.
-// Currently only needed on ARM.
-class RegExpCEntryStub: public CodeStub {
- public:
- RegExpCEntryStub() {}
- virtual ~RegExpCEntryStub() {}
- void Generate(MacroAssembler* masm);
-
- private:
- Major MajorKey() { return RegExpCEntry; }
- int MinorKey() { return 0; }
-
- bool NeedsImmovableCode() { return true; }
-
- const char* GetName() { return "RegExpCEntryStub"; }
-};
-
-
-// Trampoline stub to call into native code. To call safely into native code
-// in the presence of compacting GC (which can move code objects) we need to
-// keep the code which called into native pinned in the memory. Currently the
-// simplest approach is to generate such stub early enough so it can never be
-// moved by GC
-class DirectCEntryStub: public CodeStub {
- public:
- DirectCEntryStub() {}
- void Generate(MacroAssembler* masm);
- void GenerateCall(MacroAssembler* masm, ExternalReference function);
- void GenerateCall(MacroAssembler* masm, Register target);
-
- private:
- Major MajorKey() { return DirectCEntry; }
- int MinorKey() { return 0; }
-
- bool NeedsImmovableCode() { return true; }
-
- const char* GetName() { return "DirectCEntryStub"; }
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_ARM_CODE_STUBS_ARM_H_
diff --git a/src/3rdparty/v8/src/arm/codegen-arm-inl.h b/src/3rdparty/v8/src/arm/codegen-arm-inl.h
deleted file mode 100644
index 81ed2d0..0000000
--- a/src/3rdparty/v8/src/arm/codegen-arm-inl.h
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#ifndef V8_ARM_CODEGEN_ARM_INL_H_
-#define V8_ARM_CODEGEN_ARM_INL_H_
-
-#include "virtual-frame-arm.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm_)
-
-// Platform-specific inline functions.
-
-void DeferredCode::Jump() { __ jmp(&entry_label_); }
-void DeferredCode::Branch(Condition cond) { __ b(cond, &entry_label_); }
-
-#undef __
-
-} } // namespace v8::internal
-
-#endif // V8_ARM_CODEGEN_ARM_INL_H_
diff --git a/src/3rdparty/v8/src/arm/codegen-arm.cc b/src/3rdparty/v8/src/arm/codegen-arm.cc
deleted file mode 100644
index 7b3ea14..0000000
--- a/src/3rdparty/v8/src/arm/codegen-arm.cc
+++ /dev/null
@@ -1,7437 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_ARM)
-
-#include "bootstrapper.h"
-#include "code-stubs.h"
-#include "codegen-inl.h"
-#include "compiler.h"
-#include "debug.h"
-#include "ic-inl.h"
-#include "jsregexp.h"
-#include "jump-target-inl.h"
-#include "parser.h"
-#include "regexp-macro-assembler.h"
-#include "regexp-stack.h"
-#include "register-allocator-inl.h"
-#include "runtime.h"
-#include "scopes.h"
-#include "stub-cache.h"
-#include "virtual-frame-inl.h"
-#include "virtual-frame-arm-inl.h"
-
-namespace v8 {
-namespace internal {
-
-
-#define __ ACCESS_MASM(masm_)
-
-// -------------------------------------------------------------------------
-// Platform-specific DeferredCode functions.
-
-void DeferredCode::SaveRegisters() {
- // On ARM you either have a completely spilled frame or you
- // handle it yourself, but at the moment there's no automation
- // of registers and deferred code.
-}
-
-
-void DeferredCode::RestoreRegisters() {
-}
-
-
-// -------------------------------------------------------------------------
-// Platform-specific RuntimeCallHelper functions.
-
-void VirtualFrameRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
- frame_state_->frame()->AssertIsSpilled();
-}
-
-
-void VirtualFrameRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
-}
-
-
-void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
- masm->EnterInternalFrame();
-}
-
-
-void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
- masm->LeaveInternalFrame();
-}
-
-
-// -------------------------------------------------------------------------
-// CodeGenState implementation.
-
-CodeGenState::CodeGenState(CodeGenerator* owner)
- : owner_(owner),
- previous_(owner->state()) {
- owner->set_state(this);
-}
-
-
-ConditionCodeGenState::ConditionCodeGenState(CodeGenerator* owner,
- JumpTarget* true_target,
- JumpTarget* false_target)
- : CodeGenState(owner),
- true_target_(true_target),
- false_target_(false_target) {
- owner->set_state(this);
-}
-
-
-TypeInfoCodeGenState::TypeInfoCodeGenState(CodeGenerator* owner,
- Slot* slot,
- TypeInfo type_info)
- : CodeGenState(owner),
- slot_(slot) {
- owner->set_state(this);
- old_type_info_ = owner->set_type_info(slot, type_info);
-}
-
-
-CodeGenState::~CodeGenState() {
- ASSERT(owner_->state() == this);
- owner_->set_state(previous_);
-}
-
-
-TypeInfoCodeGenState::~TypeInfoCodeGenState() {
- owner()->set_type_info(slot_, old_type_info_);
-}
-
-// -------------------------------------------------------------------------
-// CodeGenerator implementation
-
-CodeGenerator::CodeGenerator(MacroAssembler* masm)
- : deferred_(8),
- masm_(masm),
- info_(NULL),
- frame_(NULL),
- allocator_(NULL),
- cc_reg_(al),
- state_(NULL),
- loop_nesting_(0),
- type_info_(NULL),
- function_return_(JumpTarget::BIDIRECTIONAL),
- function_return_is_shadowed_(false) {
-}
-
-
-// Calling conventions:
-// fp: caller's frame pointer
-// sp: stack pointer
-// r1: called JS function
-// cp: callee's context
-
-void CodeGenerator::Generate(CompilationInfo* info) {
- // Record the position for debugging purposes.
- CodeForFunctionPosition(info->function());
- Comment cmnt(masm_, "[ function compiled by virtual frame code generator");
-
- // Initialize state.
- info_ = info;
-
- int slots = scope()->num_parameters() + scope()->num_stack_slots();
- ScopedVector<TypeInfo> type_info_array(slots);
- for (int i = 0; i < slots; i++) {
- type_info_array[i] = TypeInfo::Unknown();
- }
- type_info_ = &type_info_array;
-
- ASSERT(allocator_ == NULL);
- RegisterAllocator register_allocator(this);
- allocator_ = &register_allocator;
- ASSERT(frame_ == NULL);
- frame_ = new VirtualFrame();
- cc_reg_ = al;
-
- // Adjust for function-level loop nesting.
- ASSERT_EQ(0, loop_nesting_);
- loop_nesting_ = info->is_in_loop() ? 1 : 0;
-
- {
- CodeGenState state(this);
-
- // Entry:
- // Stack: receiver, arguments
- // lr: return address
- // fp: caller's frame pointer
- // sp: stack pointer
- // r1: called JS function
- // cp: callee's context
- allocator_->Initialize();
-
-#ifdef DEBUG
- if (strlen(FLAG_stop_at) > 0 &&
- info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
- frame_->SpillAll();
- __ stop("stop-at");
- }
-#endif
-
- frame_->Enter();
- // tos: code slot
-
- // Allocate space for locals and initialize them. This also checks
- // for stack overflow.
- frame_->AllocateStackSlots();
-
- frame_->AssertIsSpilled();
- int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
- if (heap_slots > 0) {
- // Allocate local context.
- // Get outer context and create a new context based on it.
- __ ldr(r0, frame_->Function());
- frame_->EmitPush(r0);
- if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(heap_slots);
- frame_->CallStub(&stub, 1);
- } else {
- frame_->CallRuntime(Runtime::kNewContext, 1);
- }
-
-#ifdef DEBUG
- JumpTarget verified_true;
- __ cmp(r0, cp);
- verified_true.Branch(eq);
- __ stop("NewContext: r0 is expected to be the same as cp");
- verified_true.Bind();
-#endif
- // Update context local.
- __ str(cp, frame_->Context());
- }
-
- // TODO(1241774): Improve this code:
- // 1) only needed if we have a context
- // 2) no need to recompute context ptr every single time
- // 3) don't copy parameter operand code from SlotOperand!
- {
- Comment cmnt2(masm_, "[ copy context parameters into .context");
- // Note that iteration order is relevant here! If we have the same
- // parameter twice (e.g., function (x, y, x)), and that parameter
- // needs to be copied into the context, it must be the last argument
- // passed to the parameter that needs to be copied. This is a rare
- // case so we don't check for it, instead we rely on the copying
- // order: such a parameter is copied repeatedly into the same
- // context location and thus the last value is what is seen inside
- // the function.
- frame_->AssertIsSpilled();
- for (int i = 0; i < scope()->num_parameters(); i++) {
- Variable* par = scope()->parameter(i);
- Slot* slot = par->AsSlot();
- if (slot != NULL && slot->type() == Slot::CONTEXT) {
- ASSERT(!scope()->is_global_scope()); // No params in global scope.
- __ ldr(r1, frame_->ParameterAt(i));
- // Loads r2 with context; used below in RecordWrite.
- __ str(r1, SlotOperand(slot, r2));
- // Load the offset into r3.
- int slot_offset =
- FixedArray::kHeaderSize + slot->index() * kPointerSize;
- __ RecordWrite(r2, Operand(slot_offset), r3, r1);
- }
- }
- }
-
- // Store the arguments object. This must happen after context
- // initialization because the arguments object may be stored in
- // the context.
- if (ArgumentsMode() != NO_ARGUMENTS_ALLOCATION) {
- StoreArgumentsObject(true);
- }
-
- // Initialize ThisFunction reference if present.
- if (scope()->is_function_scope() && scope()->function() != NULL) {
- frame_->EmitPushRoot(Heap::kTheHoleValueRootIndex);
- StoreToSlot(scope()->function()->AsSlot(), NOT_CONST_INIT);
- }
-
- // Initialize the function return target after the locals are set
- // up, because it needs the expected frame height from the frame.
- function_return_.SetExpectedHeight();
- function_return_is_shadowed_ = false;
-
- // Generate code to 'execute' declarations and initialize functions
- // (source elements). In case of an illegal redeclaration we need to
- // handle that instead of processing the declarations.
- if (scope()->HasIllegalRedeclaration()) {
- Comment cmnt(masm_, "[ illegal redeclarations");
- scope()->VisitIllegalRedeclaration(this);
- } else {
- Comment cmnt(masm_, "[ declarations");
- ProcessDeclarations(scope()->declarations());
- // Bail out if a stack-overflow exception occurred when processing
- // declarations.
- if (HasStackOverflow()) return;
- }
-
- if (FLAG_trace) {
- frame_->CallRuntime(Runtime::kTraceEnter, 0);
- // Ignore the return value.
- }
-
- // Compile the body of the function in a vanilla state. Don't
- // bother compiling all the code if the scope has an illegal
- // redeclaration.
- if (!scope()->HasIllegalRedeclaration()) {
- Comment cmnt(masm_, "[ function body");
-#ifdef DEBUG
- bool is_builtin = Isolate::Current()->bootstrapper()->IsActive();
- bool should_trace =
- is_builtin ? FLAG_trace_builtin_calls : FLAG_trace_calls;
- if (should_trace) {
- frame_->CallRuntime(Runtime::kDebugTrace, 0);
- // Ignore the return value.
- }
-#endif
- VisitStatements(info->function()->body());
- }
- }
-
- // Handle the return from the function.
- if (has_valid_frame()) {
- // If there is a valid frame, control flow can fall off the end of
- // the body. In that case there is an implicit return statement.
- ASSERT(!function_return_is_shadowed_);
- frame_->PrepareForReturn();
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
- if (function_return_.is_bound()) {
- function_return_.Jump();
- } else {
- function_return_.Bind();
- GenerateReturnSequence();
- }
- } else if (function_return_.is_linked()) {
- // If the return target has dangling jumps to it, then we have not
- // yet generated the return sequence. This can happen when (a)
- // control does not flow off the end of the body so we did not
- // compile an artificial return statement just above, and (b) there
- // are return statements in the body but (c) they are all shadowed.
- function_return_.Bind();
- GenerateReturnSequence();
- }
-
- // Adjust for function-level loop nesting.
- ASSERT(loop_nesting_ == info->is_in_loop()? 1 : 0);
- loop_nesting_ = 0;
-
- // Code generation state must be reset.
- ASSERT(!has_cc());
- ASSERT(state_ == NULL);
- ASSERT(loop_nesting() == 0);
- ASSERT(!function_return_is_shadowed_);
- function_return_.Unuse();
- DeleteFrame();
-
- // Process any deferred code using the register allocator.
- if (!HasStackOverflow()) {
- ProcessDeferred();
- }
-
- allocator_ = NULL;
- type_info_ = NULL;
-}
-
-
-int CodeGenerator::NumberOfSlot(Slot* slot) {
- if (slot == NULL) return kInvalidSlotNumber;
- switch (slot->type()) {
- case Slot::PARAMETER:
- return slot->index();
- case Slot::LOCAL:
- return slot->index() + scope()->num_parameters();
- default:
- break;
- }
- return kInvalidSlotNumber;
-}
-
-
-MemOperand CodeGenerator::SlotOperand(Slot* slot, Register tmp) {
- // Currently, this assertion will fail if we try to assign to
- // a constant variable that is constant because it is read-only
- // (such as the variable referring to a named function expression).
- // We need to implement assignments to read-only variables.
- // Ideally, we should do this during AST generation (by converting
- // such assignments into expression statements); however, in general
- // we may not be able to make the decision until past AST generation,
- // that is when the entire program is known.
- ASSERT(slot != NULL);
- int index = slot->index();
- switch (slot->type()) {
- case Slot::PARAMETER:
- return frame_->ParameterAt(index);
-
- case Slot::LOCAL:
- return frame_->LocalAt(index);
-
- case Slot::CONTEXT: {
- // Follow the context chain if necessary.
- ASSERT(!tmp.is(cp)); // do not overwrite context register
- Register context = cp;
- int chain_length = scope()->ContextChainLength(slot->var()->scope());
- for (int i = 0; i < chain_length; i++) {
- // Load the closure.
- // (All contexts, even 'with' contexts, have a closure,
- // and it is the same for all contexts inside a function.
- // There is no need to go to the function context first.)
- __ ldr(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
- // Load the function context (which is the incoming, outer context).
- __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset));
- context = tmp;
- }
- // We may have a 'with' context now. Get the function context.
- // (In fact this mov may never be the needed, since the scope analysis
- // may not permit a direct context access in this case and thus we are
- // always at a function context. However it is safe to dereference be-
- // cause the function context of a function context is itself. Before
- // deleting this mov we should try to create a counter-example first,
- // though...)
- __ ldr(tmp, ContextOperand(context, Context::FCONTEXT_INDEX));
- return ContextOperand(tmp, index);
- }
-
- default:
- UNREACHABLE();
- return MemOperand(r0, 0);
- }
-}
-
-
-MemOperand CodeGenerator::ContextSlotOperandCheckExtensions(
- Slot* slot,
- Register tmp,
- Register tmp2,
- JumpTarget* slow) {
- ASSERT(slot->type() == Slot::CONTEXT);
- Register context = cp;
-
- for (Scope* s = scope(); s != slot->var()->scope(); s = s->outer_scope()) {
- if (s->num_heap_slots() > 0) {
- if (s->calls_eval()) {
- // Check that extension is NULL.
- __ ldr(tmp2, ContextOperand(context, Context::EXTENSION_INDEX));
- __ tst(tmp2, tmp2);
- slow->Branch(ne);
- }
- __ ldr(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
- __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset));
- context = tmp;
- }
- }
- // Check that last extension is NULL.
- __ ldr(tmp2, ContextOperand(context, Context::EXTENSION_INDEX));
- __ tst(tmp2, tmp2);
- slow->Branch(ne);
- __ ldr(tmp, ContextOperand(context, Context::FCONTEXT_INDEX));
- return ContextOperand(tmp, slot->index());
-}
-
-
-// Loads a value on TOS. If it is a boolean value, the result may have been
-// (partially) translated into branches, or it may have set the condition
-// code register. If force_cc is set, the value is forced to set the
-// condition code register and no value is pushed. If the condition code
-// register was set, has_cc() is true and cc_reg_ contains the condition to
-// test for 'true'.
-void CodeGenerator::LoadCondition(Expression* x,
- JumpTarget* true_target,
- JumpTarget* false_target,
- bool force_cc) {
- ASSERT(!has_cc());
- int original_height = frame_->height();
-
- { ConditionCodeGenState new_state(this, true_target, false_target);
- Visit(x);
-
- // If we hit a stack overflow, we may not have actually visited
- // the expression. In that case, we ensure that we have a
- // valid-looking frame state because we will continue to generate
- // code as we unwind the C++ stack.
- //
- // It's possible to have both a stack overflow and a valid frame
- // state (eg, a subexpression overflowed, visiting it returned
- // with a dummied frame state, and visiting this expression
- // returned with a normal-looking state).
- if (HasStackOverflow() &&
- has_valid_frame() &&
- !has_cc() &&
- frame_->height() == original_height) {
- true_target->Jump();
- }
- }
- if (force_cc && frame_ != NULL && !has_cc()) {
- // Convert the TOS value to a boolean in the condition code register.
- ToBoolean(true_target, false_target);
- }
- ASSERT(!force_cc || !has_valid_frame() || has_cc());
- ASSERT(!has_valid_frame() ||
- (has_cc() && frame_->height() == original_height) ||
- (!has_cc() && frame_->height() == original_height + 1));
-}
-
-
-void CodeGenerator::Load(Expression* expr) {
- // We generally assume that we are not in a spilled scope for most
- // of the code generator. A failure to ensure this caused issue 815
- // and this assert is designed to catch similar issues.
- frame_->AssertIsNotSpilled();
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- JumpTarget true_target;
- JumpTarget false_target;
- LoadCondition(expr, &true_target, &false_target, false);
-
- if (has_cc()) {
- // Convert cc_reg_ into a boolean value.
- JumpTarget loaded;
- JumpTarget materialize_true;
- materialize_true.Branch(cc_reg_);
- frame_->EmitPushRoot(Heap::kFalseValueRootIndex);
- loaded.Jump();
- materialize_true.Bind();
- frame_->EmitPushRoot(Heap::kTrueValueRootIndex);
- loaded.Bind();
- cc_reg_ = al;
- }
-
- if (true_target.is_linked() || false_target.is_linked()) {
- // We have at least one condition value that has been "translated"
- // into a branch, thus it needs to be loaded explicitly.
- JumpTarget loaded;
- if (frame_ != NULL) {
- loaded.Jump(); // Don't lose the current TOS.
- }
- bool both = true_target.is_linked() && false_target.is_linked();
- // Load "true" if necessary.
- if (true_target.is_linked()) {
- true_target.Bind();
- frame_->EmitPushRoot(Heap::kTrueValueRootIndex);
- }
- // If both "true" and "false" need to be loaded jump across the code for
- // "false".
- if (both) {
- loaded.Jump();
- }
- // Load "false" if necessary.
- if (false_target.is_linked()) {
- false_target.Bind();
- frame_->EmitPushRoot(Heap::kFalseValueRootIndex);
- }
- // A value is loaded on all paths reaching this point.
- loaded.Bind();
- }
- ASSERT(has_valid_frame());
- ASSERT(!has_cc());
- ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::LoadGlobal() {
- Register reg = frame_->GetTOSRegister();
- __ ldr(reg, GlobalObjectOperand());
- frame_->EmitPush(reg);
-}
-
-
-void CodeGenerator::LoadGlobalReceiver(Register scratch) {
- Register reg = frame_->GetTOSRegister();
- __ ldr(reg, ContextOperand(cp, Context::GLOBAL_INDEX));
- __ ldr(reg,
- FieldMemOperand(reg, GlobalObject::kGlobalReceiverOffset));
- frame_->EmitPush(reg);
-}
-
-
-ArgumentsAllocationMode CodeGenerator::ArgumentsMode() {
- if (scope()->arguments() == NULL) return NO_ARGUMENTS_ALLOCATION;
-
- // In strict mode there is no need for shadow arguments.
- ASSERT(scope()->arguments_shadow() != NULL || scope()->is_strict_mode());
- // We don't want to do lazy arguments allocation for functions that
- // have heap-allocated contexts, because it interfers with the
- // uninitialized const tracking in the context objects.
- return (scope()->num_heap_slots() > 0 || scope()->is_strict_mode())
- ? EAGER_ARGUMENTS_ALLOCATION
- : LAZY_ARGUMENTS_ALLOCATION;
-}
-
-
-void CodeGenerator::StoreArgumentsObject(bool initial) {
- ArgumentsAllocationMode mode = ArgumentsMode();
- ASSERT(mode != NO_ARGUMENTS_ALLOCATION);
-
- Comment cmnt(masm_, "[ store arguments object");
- if (mode == LAZY_ARGUMENTS_ALLOCATION && initial) {
- // When using lazy arguments allocation, we store the hole value
- // as a sentinel indicating that the arguments object hasn't been
- // allocated yet.
- frame_->EmitPushRoot(Heap::kArgumentsMarkerRootIndex);
- } else {
- frame_->SpillAll();
- ArgumentsAccessStub stub(is_strict_mode()
- ? ArgumentsAccessStub::NEW_STRICT
- : ArgumentsAccessStub::NEW_NON_STRICT);
- __ ldr(r2, frame_->Function());
- // The receiver is below the arguments, the return address, and the
- // frame pointer on the stack.
- const int kReceiverDisplacement = 2 + scope()->num_parameters();
- __ add(r1, fp, Operand(kReceiverDisplacement * kPointerSize));
- __ mov(r0, Operand(Smi::FromInt(scope()->num_parameters())));
- frame_->Adjust(3);
- __ Push(r2, r1, r0);
- frame_->CallStub(&stub, 3);
- frame_->EmitPush(r0);
- }
-
- Variable* arguments = scope()->arguments();
- Variable* shadow = scope()->arguments_shadow();
- ASSERT(arguments != NULL && arguments->AsSlot() != NULL);
- ASSERT((shadow != NULL && shadow->AsSlot() != NULL) ||
- scope()->is_strict_mode());
-
- JumpTarget done;
- if (mode == LAZY_ARGUMENTS_ALLOCATION && !initial) {
- // We have to skip storing into the arguments slot if it has
- // already been written to. This can happen if the a function
- // has a local variable named 'arguments'.
- LoadFromSlot(scope()->arguments()->AsSlot(), NOT_INSIDE_TYPEOF);
- Register arguments = frame_->PopToRegister();
- __ LoadRoot(ip, Heap::kArgumentsMarkerRootIndex);
- __ cmp(arguments, ip);
- done.Branch(ne);
- }
- StoreToSlot(arguments->AsSlot(), NOT_CONST_INIT);
- if (mode == LAZY_ARGUMENTS_ALLOCATION) done.Bind();
- if (shadow != NULL) {
- StoreToSlot(shadow->AsSlot(), NOT_CONST_INIT);
- }
-}
-
-
-void CodeGenerator::LoadTypeofExpression(Expression* expr) {
- // Special handling of identifiers as subexpressions of typeof.
- Variable* variable = expr->AsVariableProxy()->AsVariable();
- if (variable != NULL && !variable->is_this() && variable->is_global()) {
- // For a global variable we build the property reference
- // <global>.<variable> and perform a (regular non-contextual) property
- // load to make sure we do not get reference errors.
- Slot global(variable, Slot::CONTEXT, Context::GLOBAL_INDEX);
- Literal key(variable->name());
- Property property(&global, &key, RelocInfo::kNoPosition);
- Reference ref(this, &property);
- ref.GetValue();
- } else if (variable != NULL && variable->AsSlot() != NULL) {
- // For a variable that rewrites to a slot, we signal it is the immediate
- // subexpression of a typeof.
- LoadFromSlotCheckForArguments(variable->AsSlot(), INSIDE_TYPEOF);
- } else {
- // Anything else can be handled normally.
- Load(expr);
- }
-}
-
-
-Reference::Reference(CodeGenerator* cgen,
- Expression* expression,
- bool persist_after_get)
- : cgen_(cgen),
- expression_(expression),
- type_(ILLEGAL),
- persist_after_get_(persist_after_get) {
- // We generally assume that we are not in a spilled scope for most
- // of the code generator. A failure to ensure this caused issue 815
- // and this assert is designed to catch similar issues.
- cgen->frame()->AssertIsNotSpilled();
- cgen->LoadReference(this);
-}
-
-
-Reference::~Reference() {
- ASSERT(is_unloaded() || is_illegal());
-}
-
-
-void CodeGenerator::LoadReference(Reference* ref) {
- Comment cmnt(masm_, "[ LoadReference");
- Expression* e = ref->expression();
- Property* property = e->AsProperty();
- Variable* var = e->AsVariableProxy()->AsVariable();
-
- if (property != NULL) {
- // The expression is either a property or a variable proxy that rewrites
- // to a property.
- Load(property->obj());
- if (property->key()->IsPropertyName()) {
- ref->set_type(Reference::NAMED);
- } else {
- Load(property->key());
- ref->set_type(Reference::KEYED);
- }
- } else if (var != NULL) {
- // The expression is a variable proxy that does not rewrite to a
- // property. Global variables are treated as named property references.
- if (var->is_global()) {
- LoadGlobal();
- ref->set_type(Reference::NAMED);
- } else {
- ASSERT(var->AsSlot() != NULL);
- ref->set_type(Reference::SLOT);
- }
- } else {
- // Anything else is a runtime error.
- Load(e);
- frame_->CallRuntime(Runtime::kThrowReferenceError, 1);
- }
-}
-
-
-void CodeGenerator::UnloadReference(Reference* ref) {
- int size = ref->size();
- ref->set_unloaded();
- if (size == 0) return;
-
- // Pop a reference from the stack while preserving TOS.
- VirtualFrame::RegisterAllocationScope scope(this);
- Comment cmnt(masm_, "[ UnloadReference");
- if (size > 0) {
- Register tos = frame_->PopToRegister();
- frame_->Drop(size);
- frame_->EmitPush(tos);
- }
-}
-
-
-// ECMA-262, section 9.2, page 30: ToBoolean(). Convert the given
-// register to a boolean in the condition code register. The code
-// may jump to 'false_target' in case the register converts to 'false'.
-void CodeGenerator::ToBoolean(JumpTarget* true_target,
- JumpTarget* false_target) {
- // Note: The generated code snippet does not change stack variables.
- // Only the condition code should be set.
- bool known_smi = frame_->KnownSmiAt(0);
- Register tos = frame_->PopToRegister();
-
- // Fast case checks
-
- // Check if the value is 'false'.
- if (!known_smi) {
- __ LoadRoot(ip, Heap::kFalseValueRootIndex);
- __ cmp(tos, ip);
- false_target->Branch(eq);
-
- // Check if the value is 'true'.
- __ LoadRoot(ip, Heap::kTrueValueRootIndex);
- __ cmp(tos, ip);
- true_target->Branch(eq);
-
- // Check if the value is 'undefined'.
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(tos, ip);
- false_target->Branch(eq);
- }
-
- // Check if the value is a smi.
- __ cmp(tos, Operand(Smi::FromInt(0)));
-
- if (!known_smi) {
- false_target->Branch(eq);
- __ tst(tos, Operand(kSmiTagMask));
- true_target->Branch(eq);
-
- // Slow case.
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
- // Implements the slow case by using ToBooleanStub.
- // The ToBooleanStub takes a single argument, and
- // returns a non-zero value for true, or zero for false.
- // Both the argument value and the return value use the
- // register assigned to tos_
- ToBooleanStub stub(tos);
- frame_->CallStub(&stub, 0);
- // Convert the result in "tos" to a condition code.
- __ cmp(tos, Operand(0, RelocInfo::NONE));
- } else {
- // Implements slow case by calling the runtime.
- frame_->EmitPush(tos);
- frame_->CallRuntime(Runtime::kToBool, 1);
- // Convert the result (r0) to a condition code.
- __ LoadRoot(ip, Heap::kFalseValueRootIndex);
- __ cmp(r0, ip);
- }
- }
-
- cc_reg_ = ne;
-}
-
-
-void CodeGenerator::GenericBinaryOperation(Token::Value op,
- OverwriteMode overwrite_mode,
- GenerateInlineSmi inline_smi,
- int constant_rhs) {
- // top of virtual frame: y
- // 2nd elt. on virtual frame : x
- // result : top of virtual frame
-
- // Stub is entered with a call: 'return address' is in lr.
- switch (op) {
- case Token::ADD:
- case Token::SUB:
- if (inline_smi) {
- JumpTarget done;
- Register rhs = frame_->PopToRegister();
- Register lhs = frame_->PopToRegister(rhs);
- Register scratch = VirtualFrame::scratch0();
- __ orr(scratch, rhs, Operand(lhs));
- // Check they are both small and positive.
- __ tst(scratch, Operand(kSmiTagMask | 0xc0000000));
- ASSERT(rhs.is(r0) || lhs.is(r0)); // r0 is free now.
- STATIC_ASSERT(kSmiTag == 0);
- if (op == Token::ADD) {
- __ add(r0, lhs, Operand(rhs), LeaveCC, eq);
- } else {
- __ sub(r0, lhs, Operand(rhs), LeaveCC, eq);
- }
- done.Branch(eq);
- GenericBinaryOpStub stub(op, overwrite_mode, lhs, rhs, constant_rhs);
- frame_->SpillAll();
- frame_->CallStub(&stub, 0);
- done.Bind();
- frame_->EmitPush(r0);
- break;
- } else {
- // Fall through!
- }
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- if (inline_smi) {
- bool rhs_is_smi = frame_->KnownSmiAt(0);
- bool lhs_is_smi = frame_->KnownSmiAt(1);
- Register rhs = frame_->PopToRegister();
- Register lhs = frame_->PopToRegister(rhs);
- Register smi_test_reg;
- Condition cond;
- if (!rhs_is_smi || !lhs_is_smi) {
- if (rhs_is_smi) {
- smi_test_reg = lhs;
- } else if (lhs_is_smi) {
- smi_test_reg = rhs;
- } else {
- smi_test_reg = VirtualFrame::scratch0();
- __ orr(smi_test_reg, rhs, Operand(lhs));
- }
- // Check they are both Smis.
- __ tst(smi_test_reg, Operand(kSmiTagMask));
- cond = eq;
- } else {
- cond = al;
- }
- ASSERT(rhs.is(r0) || lhs.is(r0)); // r0 is free now.
- if (op == Token::BIT_OR) {
- __ orr(r0, lhs, Operand(rhs), LeaveCC, cond);
- } else if (op == Token::BIT_AND) {
- __ and_(r0, lhs, Operand(rhs), LeaveCC, cond);
- } else {
- ASSERT(op == Token::BIT_XOR);
- STATIC_ASSERT(kSmiTag == 0);
- __ eor(r0, lhs, Operand(rhs), LeaveCC, cond);
- }
- if (cond != al) {
- JumpTarget done;
- done.Branch(cond);
- GenericBinaryOpStub stub(op, overwrite_mode, lhs, rhs, constant_rhs);
- frame_->SpillAll();
- frame_->CallStub(&stub, 0);
- done.Bind();
- }
- frame_->EmitPush(r0);
- break;
- } else {
- // Fall through!
- }
- case Token::MUL:
- case Token::DIV:
- case Token::MOD:
- case Token::SHL:
- case Token::SHR:
- case Token::SAR: {
- Register rhs = frame_->PopToRegister();
- Register lhs = frame_->PopToRegister(rhs); // Don't pop to rhs register.
- GenericBinaryOpStub stub(op, overwrite_mode, lhs, rhs, constant_rhs);
- frame_->SpillAll();
- frame_->CallStub(&stub, 0);
- frame_->EmitPush(r0);
- break;
- }
-
- case Token::COMMA: {
- Register scratch = frame_->PopToRegister();
- // Simply discard left value.
- frame_->Drop();
- frame_->EmitPush(scratch);
- break;
- }
-
- default:
- // Other cases should have been handled before this point.
- UNREACHABLE();
- break;
- }
-}
-
-
-class DeferredInlineSmiOperation: public DeferredCode {
- public:
- DeferredInlineSmiOperation(Token::Value op,
- int value,
- bool reversed,
- OverwriteMode overwrite_mode,
- Register tos)
- : op_(op),
- value_(value),
- reversed_(reversed),
- overwrite_mode_(overwrite_mode),
- tos_register_(tos) {
- set_comment("[ DeferredInlinedSmiOperation");
- }
-
- virtual void Generate();
- // This stub makes explicit calls to SaveRegisters(), RestoreRegisters() and
- // Exit(). Currently on ARM SaveRegisters() and RestoreRegisters() are empty
- // methods, it is the responsibility of the deferred code to save and restore
- // registers.
- virtual bool AutoSaveAndRestore() { return false; }
-
- void JumpToNonSmiInput(Condition cond);
- void JumpToAnswerOutOfRange(Condition cond);
-
- private:
- void GenerateNonSmiInput();
- void GenerateAnswerOutOfRange();
- void WriteNonSmiAnswer(Register answer,
- Register heap_number,
- Register scratch);
-
- Token::Value op_;
- int value_;
- bool reversed_;
- OverwriteMode overwrite_mode_;
- Register tos_register_;
- Label non_smi_input_;
- Label answer_out_of_range_;
-};
-
-
-// For bit operations we try harder and handle the case where the input is not
-// a Smi but a 32bits integer without calling the generic stub.
-void DeferredInlineSmiOperation::JumpToNonSmiInput(Condition cond) {
- ASSERT(Token::IsBitOp(op_));
-
- __ b(cond, &non_smi_input_);
-}
-
-
-// For bit operations the result is always 32bits so we handle the case where
-// the result does not fit in a Smi without calling the generic stub.
-void DeferredInlineSmiOperation::JumpToAnswerOutOfRange(Condition cond) {
- ASSERT(Token::IsBitOp(op_));
-
- if ((op_ == Token::SHR) && !CpuFeatures::IsSupported(VFP3)) {
- // >>> requires an unsigned to double conversion and the non VFP code
- // does not support this conversion.
- __ b(cond, entry_label());
- } else {
- __ b(cond, &answer_out_of_range_);
- }
-}
-
-
-// On entry the non-constant side of the binary operation is in tos_register_
-// and the constant smi side is nowhere. The tos_register_ is not used by the
-// virtual frame. On exit the answer is in the tos_register_ and the virtual
-// frame is unchanged.
-void DeferredInlineSmiOperation::Generate() {
- VirtualFrame copied_frame(*frame_state()->frame());
- copied_frame.SpillAll();
-
- Register lhs = r1;
- Register rhs = r0;
- switch (op_) {
- case Token::ADD: {
- // Revert optimistic add.
- if (reversed_) {
- __ sub(r0, tos_register_, Operand(Smi::FromInt(value_)));
- __ mov(r1, Operand(Smi::FromInt(value_)));
- } else {
- __ sub(r1, tos_register_, Operand(Smi::FromInt(value_)));
- __ mov(r0, Operand(Smi::FromInt(value_)));
- }
- break;
- }
-
- case Token::SUB: {
- // Revert optimistic sub.
- if (reversed_) {
- __ rsb(r0, tos_register_, Operand(Smi::FromInt(value_)));
- __ mov(r1, Operand(Smi::FromInt(value_)));
- } else {
- __ add(r1, tos_register_, Operand(Smi::FromInt(value_)));
- __ mov(r0, Operand(Smi::FromInt(value_)));
- }
- break;
- }
-
- // For these operations there is no optimistic operation that needs to be
- // reverted.
- case Token::MUL:
- case Token::MOD:
- case Token::BIT_OR:
- case Token::BIT_XOR:
- case Token::BIT_AND:
- case Token::SHL:
- case Token::SHR:
- case Token::SAR: {
- if (tos_register_.is(r1)) {
- __ mov(r0, Operand(Smi::FromInt(value_)));
- } else {
- ASSERT(tos_register_.is(r0));
- __ mov(r1, Operand(Smi::FromInt(value_)));
- }
- if (reversed_ == tos_register_.is(r1)) {
- lhs = r0;
- rhs = r1;
- }
- break;
- }
-
- default:
- // Other cases should have been handled before this point.
- UNREACHABLE();
- break;
- }
-
- GenericBinaryOpStub stub(op_, overwrite_mode_, lhs, rhs, value_);
- __ CallStub(&stub);
-
- // The generic stub returns its value in r0, but that's not
- // necessarily what we want. We want whatever the inlined code
- // expected, which is that the answer is in the same register as
- // the operand was.
- __ Move(tos_register_, r0);
-
- // The tos register was not in use for the virtual frame that we
- // came into this function with, so we can merge back to that frame
- // without trashing it.
- copied_frame.MergeTo(frame_state()->frame());
-
- Exit();
-
- if (non_smi_input_.is_linked()) {
- GenerateNonSmiInput();
- }
-
- if (answer_out_of_range_.is_linked()) {
- GenerateAnswerOutOfRange();
- }
-}
-
-
-// Convert and write the integer answer into heap_number.
-void DeferredInlineSmiOperation::WriteNonSmiAnswer(Register answer,
- Register heap_number,
- Register scratch) {
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
- __ vmov(s0, answer);
- if (op_ == Token::SHR) {
- __ vcvt_f64_u32(d0, s0);
- } else {
- __ vcvt_f64_s32(d0, s0);
- }
- __ sub(scratch, heap_number, Operand(kHeapObjectTag));
- __ vstr(d0, scratch, HeapNumber::kValueOffset);
- } else {
- WriteInt32ToHeapNumberStub stub(answer, heap_number, scratch);
- __ CallStub(&stub);
- }
-}
-
-
-void DeferredInlineSmiOperation::GenerateNonSmiInput() {
- // We know the left hand side is not a Smi and the right hand side is an
- // immediate value (value_) which can be represented as a Smi. We only
- // handle bit operations.
- ASSERT(Token::IsBitOp(op_));
-
- if (FLAG_debug_code) {
- __ Abort("Should not fall through!");
- }
-
- __ bind(&non_smi_input_);
- if (FLAG_debug_code) {
- __ AbortIfSmi(tos_register_);
- }
-
- // This routine uses the registers from r2 to r6. At the moment they are
- // not used by the register allocator, but when they are it should use
- // SpillAll and MergeTo like DeferredInlineSmiOperation::Generate() above.
-
- Register heap_number_map = r7;
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- __ ldr(r3, FieldMemOperand(tos_register_, HeapNumber::kMapOffset));
- __ cmp(r3, heap_number_map);
- // Not a number, fall back to the GenericBinaryOpStub.
- __ b(ne, entry_label());
-
- Register int32 = r2;
- // Not a 32bits signed int, fall back to the GenericBinaryOpStub.
- __ ConvertToInt32(tos_register_, int32, r4, r5, d0, entry_label());
-
- // tos_register_ (r0 or r1): Original heap number.
- // int32: signed 32bits int.
-
- Label result_not_a_smi;
- int shift_value = value_ & 0x1f;
- switch (op_) {
- case Token::BIT_OR: __ orr(int32, int32, Operand(value_)); break;
- case Token::BIT_XOR: __ eor(int32, int32, Operand(value_)); break;
- case Token::BIT_AND: __ and_(int32, int32, Operand(value_)); break;
- case Token::SAR:
- ASSERT(!reversed_);
- if (shift_value != 0) {
- __ mov(int32, Operand(int32, ASR, shift_value));
- }
- break;
- case Token::SHR:
- ASSERT(!reversed_);
- if (shift_value != 0) {
- __ mov(int32, Operand(int32, LSR, shift_value), SetCC);
- } else {
- // SHR is special because it is required to produce a positive answer.
- __ cmp(int32, Operand(0, RelocInfo::NONE));
- }
- if (CpuFeatures::IsSupported(VFP3)) {
- __ b(mi, &result_not_a_smi);
- } else {
- // Non VFP code cannot convert from unsigned to double, so fall back
- // to GenericBinaryOpStub.
- __ b(mi, entry_label());
- }
- break;
- case Token::SHL:
- ASSERT(!reversed_);
- if (shift_value != 0) {
- __ mov(int32, Operand(int32, LSL, shift_value));
- }
- break;
- default: UNREACHABLE();
- }
- // Check that the *signed* result fits in a smi. Not necessary for AND, SAR
- // if the shift if more than 0 or SHR if the shit is more than 1.
- if (!( (op_ == Token::AND && value_ >= 0) ||
- ((op_ == Token::SAR) && (shift_value > 0)) ||
- ((op_ == Token::SHR) && (shift_value > 1)))) {
- __ add(r3, int32, Operand(0x40000000), SetCC);
- __ b(mi, &result_not_a_smi);
- }
- __ mov(tos_register_, Operand(int32, LSL, kSmiTagSize));
- Exit();
-
- if (result_not_a_smi.is_linked()) {
- __ bind(&result_not_a_smi);
- if (overwrite_mode_ != OVERWRITE_LEFT) {
- ASSERT((overwrite_mode_ == NO_OVERWRITE) ||
- (overwrite_mode_ == OVERWRITE_RIGHT));
- // If the allocation fails, fall back to the GenericBinaryOpStub.
- __ AllocateHeapNumber(r4, r5, r6, heap_number_map, entry_label());
- // Nothing can go wrong now, so overwrite tos.
- __ mov(tos_register_, Operand(r4));
- }
-
- // int32: answer as signed 32bits integer.
- // tos_register_: Heap number to write the answer into.
- WriteNonSmiAnswer(int32, tos_register_, r3);
-
- Exit();
- }
-}
-
-
-void DeferredInlineSmiOperation::GenerateAnswerOutOfRange() {
- // The input from a bitwise operation were Smis but the result cannot fit
- // into a Smi, so we store it into a heap number. VirtualFrame::scratch0()
- // holds the untagged result to be converted. tos_register_ contains the
- // input. See the calls to JumpToAnswerOutOfRange to see how we got here.
- ASSERT(Token::IsBitOp(op_));
- ASSERT(!reversed_);
-
- Register untagged_result = VirtualFrame::scratch0();
-
- if (FLAG_debug_code) {
- __ Abort("Should not fall through!");
- }
-
- __ bind(&answer_out_of_range_);
- if (((value_ & 0x1f) == 0) && (op_ == Token::SHR)) {
- // >>> 0 is a special case where the untagged_result register is not set up
- // yet. We untag the input to get it.
- __ mov(untagged_result, Operand(tos_register_, ASR, kSmiTagSize));
- }
-
- // This routine uses the registers from r2 to r6. At the moment they are
- // not used by the register allocator, but when they are it should use
- // SpillAll and MergeTo like DeferredInlineSmiOperation::Generate() above.
-
- // Allocate the result heap number.
- Register heap_number_map = VirtualFrame::scratch1();
- Register heap_number = r4;
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- // If the allocation fails, fall back to the GenericBinaryOpStub.
- __ AllocateHeapNumber(heap_number, r5, r6, heap_number_map, entry_label());
- WriteNonSmiAnswer(untagged_result, heap_number, r3);
- __ mov(tos_register_, Operand(heap_number));
-
- Exit();
-}
-
-
-static bool PopCountLessThanEqual2(unsigned int x) {
- x &= x - 1;
- return (x & (x - 1)) == 0;
-}
-
-
-// Returns the index of the lowest bit set.
-static int BitPosition(unsigned x) {
- int bit_posn = 0;
- while ((x & 0xf) == 0) {
- bit_posn += 4;
- x >>= 4;
- }
- while ((x & 1) == 0) {
- bit_posn++;
- x >>= 1;
- }
- return bit_posn;
-}
-
-
-// Can we multiply by x with max two shifts and an add.
-// This answers yes to all integers from 2 to 10.
-static bool IsEasyToMultiplyBy(int x) {
- if (x < 2) return false; // Avoid special cases.
- if (x > (Smi::kMaxValue + 1) >> 2) return false; // Almost always overflows.
- if (IsPowerOf2(x)) return true; // Simple shift.
- if (PopCountLessThanEqual2(x)) return true; // Shift and add and shift.
- if (IsPowerOf2(x + 1)) return true; // Patterns like 11111.
- return false;
-}
-
-
-// Can multiply by anything that IsEasyToMultiplyBy returns true for.
-// Source and destination may be the same register. This routine does
-// not set carry and overflow the way a mul instruction would.
-static void InlineMultiplyByKnownInt(MacroAssembler* masm,
- Register source,
- Register destination,
- int known_int) {
- if (IsPowerOf2(known_int)) {
- masm->mov(destination, Operand(source, LSL, BitPosition(known_int)));
- } else if (PopCountLessThanEqual2(known_int)) {
- int first_bit = BitPosition(known_int);
- int second_bit = BitPosition(known_int ^ (1 << first_bit));
- masm->add(destination, source,
- Operand(source, LSL, second_bit - first_bit));
- if (first_bit != 0) {
- masm->mov(destination, Operand(destination, LSL, first_bit));
- }
- } else {
- ASSERT(IsPowerOf2(known_int + 1)); // Patterns like 1111.
- int the_bit = BitPosition(known_int + 1);
- masm->rsb(destination, source, Operand(source, LSL, the_bit));
- }
-}
-
-
-void CodeGenerator::SmiOperation(Token::Value op,
- Handle<Object> value,
- bool reversed,
- OverwriteMode mode) {
- int int_value = Smi::cast(*value)->value();
-
- bool both_sides_are_smi = frame_->KnownSmiAt(0);
-
- bool something_to_inline;
- switch (op) {
- case Token::ADD:
- case Token::SUB:
- case Token::BIT_AND:
- case Token::BIT_OR:
- case Token::BIT_XOR: {
- something_to_inline = true;
- break;
- }
- case Token::SHL: {
- something_to_inline = (both_sides_are_smi || !reversed);
- break;
- }
- case Token::SHR:
- case Token::SAR: {
- if (reversed) {
- something_to_inline = false;
- } else {
- something_to_inline = true;
- }
- break;
- }
- case Token::MOD: {
- if (reversed || int_value < 2 || !IsPowerOf2(int_value)) {
- something_to_inline = false;
- } else {
- something_to_inline = true;
- }
- break;
- }
- case Token::MUL: {
- if (!IsEasyToMultiplyBy(int_value)) {
- something_to_inline = false;
- } else {
- something_to_inline = true;
- }
- break;
- }
- default: {
- something_to_inline = false;
- break;
- }
- }
-
- if (!something_to_inline) {
- if (!reversed) {
- // Push the rhs onto the virtual frame by putting it in a TOS register.
- Register rhs = frame_->GetTOSRegister();
- __ mov(rhs, Operand(value));
- frame_->EmitPush(rhs, TypeInfo::Smi());
- GenericBinaryOperation(op, mode, GENERATE_INLINE_SMI, int_value);
- } else {
- // Pop the rhs, then push lhs and rhs in the right order. Only performs
- // at most one pop, the rest takes place in TOS registers.
- Register lhs = frame_->GetTOSRegister(); // Get reg for pushing.
- Register rhs = frame_->PopToRegister(lhs); // Don't use lhs for this.
- __ mov(lhs, Operand(value));
- frame_->EmitPush(lhs, TypeInfo::Smi());
- TypeInfo t = both_sides_are_smi ? TypeInfo::Smi() : TypeInfo::Unknown();
- frame_->EmitPush(rhs, t);
- GenericBinaryOperation(op, mode, GENERATE_INLINE_SMI,
- GenericBinaryOpStub::kUnknownIntValue);
- }
- return;
- }
-
- // We move the top of stack to a register (normally no move is invoved).
- Register tos = frame_->PopToRegister();
- switch (op) {
- case Token::ADD: {
- DeferredCode* deferred =
- new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
-
- __ add(tos, tos, Operand(value), SetCC);
- deferred->Branch(vs);
- if (!both_sides_are_smi) {
- __ tst(tos, Operand(kSmiTagMask));
- deferred->Branch(ne);
- }
- deferred->BindExit();
- frame_->EmitPush(tos);
- break;
- }
-
- case Token::SUB: {
- DeferredCode* deferred =
- new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
-
- if (reversed) {
- __ rsb(tos, tos, Operand(value), SetCC);
- } else {
- __ sub(tos, tos, Operand(value), SetCC);
- }
- deferred->Branch(vs);
- if (!both_sides_are_smi) {
- __ tst(tos, Operand(kSmiTagMask));
- deferred->Branch(ne);
- }
- deferred->BindExit();
- frame_->EmitPush(tos);
- break;
- }
-
-
- case Token::BIT_OR:
- case Token::BIT_XOR:
- case Token::BIT_AND: {
- if (both_sides_are_smi) {
- switch (op) {
- case Token::BIT_OR: __ orr(tos, tos, Operand(value)); break;
- case Token::BIT_XOR: __ eor(tos, tos, Operand(value)); break;
- case Token::BIT_AND: __ And(tos, tos, Operand(value)); break;
- default: UNREACHABLE();
- }
- frame_->EmitPush(tos, TypeInfo::Smi());
- } else {
- DeferredInlineSmiOperation* deferred =
- new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
- __ tst(tos, Operand(kSmiTagMask));
- deferred->JumpToNonSmiInput(ne);
- switch (op) {
- case Token::BIT_OR: __ orr(tos, tos, Operand(value)); break;
- case Token::BIT_XOR: __ eor(tos, tos, Operand(value)); break;
- case Token::BIT_AND: __ And(tos, tos, Operand(value)); break;
- default: UNREACHABLE();
- }
- deferred->BindExit();
- TypeInfo result_type = TypeInfo::Integer32();
- if (op == Token::BIT_AND && int_value >= 0) {
- result_type = TypeInfo::Smi();
- }
- frame_->EmitPush(tos, result_type);
- }
- break;
- }
-
- case Token::SHL:
- if (reversed) {
- ASSERT(both_sides_are_smi);
- int max_shift = 0;
- int max_result = int_value == 0 ? 1 : int_value;
- while (Smi::IsValid(max_result << 1)) {
- max_shift++;
- max_result <<= 1;
- }
- DeferredCode* deferred =
- new DeferredInlineSmiOperation(op, int_value, true, mode, tos);
- // Mask off the last 5 bits of the shift operand (rhs). This is part
- // of the definition of shift in JS and we know we have a Smi so we
- // can safely do this. The masked version gets passed to the
- // deferred code, but that makes no difference.
- __ and_(tos, tos, Operand(Smi::FromInt(0x1f)));
- __ cmp(tos, Operand(Smi::FromInt(max_shift)));
- deferred->Branch(ge);
- Register scratch = VirtualFrame::scratch0();
- __ mov(scratch, Operand(tos, ASR, kSmiTagSize)); // Untag.
- __ mov(tos, Operand(Smi::FromInt(int_value))); // Load constant.
- __ mov(tos, Operand(tos, LSL, scratch)); // Shift constant.
- deferred->BindExit();
- TypeInfo result = TypeInfo::Integer32();
- frame_->EmitPush(tos, result);
- break;
- }
- // Fall through!
- case Token::SHR:
- case Token::SAR: {
- ASSERT(!reversed);
- int shift_value = int_value & 0x1f;
- TypeInfo result = TypeInfo::Number();
-
- if (op == Token::SHR) {
- if (shift_value > 1) {
- result = TypeInfo::Smi();
- } else if (shift_value > 0) {
- result = TypeInfo::Integer32();
- }
- } else if (op == Token::SAR) {
- if (shift_value > 0) {
- result = TypeInfo::Smi();
- } else {
- result = TypeInfo::Integer32();
- }
- } else {
- ASSERT(op == Token::SHL);
- result = TypeInfo::Integer32();
- }
-
- DeferredInlineSmiOperation* deferred =
- new DeferredInlineSmiOperation(op, shift_value, false, mode, tos);
- if (!both_sides_are_smi) {
- __ tst(tos, Operand(kSmiTagMask));
- deferred->JumpToNonSmiInput(ne);
- }
- switch (op) {
- case Token::SHL: {
- if (shift_value != 0) {
- Register untagged_result = VirtualFrame::scratch0();
- Register scratch = VirtualFrame::scratch1();
- int adjusted_shift = shift_value - kSmiTagSize;
- ASSERT(adjusted_shift >= 0);
-
- if (adjusted_shift != 0) {
- __ mov(untagged_result, Operand(tos, LSL, adjusted_shift));
- } else {
- __ mov(untagged_result, Operand(tos));
- }
- // Check that the *signed* result fits in a smi.
- __ add(scratch, untagged_result, Operand(0x40000000), SetCC);
- deferred->JumpToAnswerOutOfRange(mi);
- __ mov(tos, Operand(untagged_result, LSL, kSmiTagSize));
- }
- break;
- }
- case Token::SHR: {
- if (shift_value != 0) {
- Register untagged_result = VirtualFrame::scratch0();
- // Remove tag.
- __ mov(untagged_result, Operand(tos, ASR, kSmiTagSize));
- __ mov(untagged_result, Operand(untagged_result, LSR, shift_value));
- if (shift_value == 1) {
- // Check that the *unsigned* result fits in a smi.
- // Neither of the two high-order bits can be set:
- // - 0x80000000: high bit would be lost when smi tagging
- // - 0x40000000: this number would convert to negative when Smi
- // tagging.
- // These two cases can only happen with shifts by 0 or 1 when
- // handed a valid smi.
- __ tst(untagged_result, Operand(0xc0000000));
- deferred->JumpToAnswerOutOfRange(ne);
- }
- __ mov(tos, Operand(untagged_result, LSL, kSmiTagSize));
- } else {
- __ cmp(tos, Operand(0, RelocInfo::NONE));
- deferred->JumpToAnswerOutOfRange(mi);
- }
- break;
- }
- case Token::SAR: {
- if (shift_value != 0) {
- // Do the shift and the tag removal in one operation. If the shift
- // is 31 bits (the highest possible value) then we emit the
- // instruction as a shift by 0 which in the ARM ISA means shift
- // arithmetically by 32.
- __ mov(tos, Operand(tos, ASR, (kSmiTagSize + shift_value) & 0x1f));
- __ mov(tos, Operand(tos, LSL, kSmiTagSize));
- }
- break;
- }
- default: UNREACHABLE();
- }
- deferred->BindExit();
- frame_->EmitPush(tos, result);
- break;
- }
-
- case Token::MOD: {
- ASSERT(!reversed);
- ASSERT(int_value >= 2);
- ASSERT(IsPowerOf2(int_value));
- DeferredCode* deferred =
- new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
- unsigned mask = (0x80000000u | kSmiTagMask);
- __ tst(tos, Operand(mask));
- deferred->Branch(ne); // Go to deferred code on non-Smis and negative.
- mask = (int_value << kSmiTagSize) - 1;
- __ and_(tos, tos, Operand(mask));
- deferred->BindExit();
- // Mod of positive power of 2 Smi gives a Smi if the lhs is an integer.
- frame_->EmitPush(
- tos,
- both_sides_are_smi ? TypeInfo::Smi() : TypeInfo::Number());
- break;
- }
-
- case Token::MUL: {
- ASSERT(IsEasyToMultiplyBy(int_value));
- DeferredCode* deferred =
- new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
- unsigned max_smi_that_wont_overflow = Smi::kMaxValue / int_value;
- max_smi_that_wont_overflow <<= kSmiTagSize;
- unsigned mask = 0x80000000u;
- while ((mask & max_smi_that_wont_overflow) == 0) {
- mask |= mask >> 1;
- }
- mask |= kSmiTagMask;
- // This does a single mask that checks for a too high value in a
- // conservative way and for a non-Smi. It also filters out negative
- // numbers, unfortunately, but since this code is inline we prefer
- // brevity to comprehensiveness.
- __ tst(tos, Operand(mask));
- deferred->Branch(ne);
- InlineMultiplyByKnownInt(masm_, tos, tos, int_value);
- deferred->BindExit();
- frame_->EmitPush(tos);
- break;
- }
-
- default:
- UNREACHABLE();
- break;
- }
-}
-
-
-void CodeGenerator::Comparison(Condition cond,
- Expression* left,
- Expression* right,
- bool strict) {
- VirtualFrame::RegisterAllocationScope scope(this);
-
- if (left != NULL) Load(left);
- if (right != NULL) Load(right);
-
- // sp[0] : y
- // sp[1] : x
- // result : cc register
-
- // Strict only makes sense for equality comparisons.
- ASSERT(!strict || cond == eq);
-
- Register lhs;
- Register rhs;
-
- bool lhs_is_smi;
- bool rhs_is_smi;
-
- // We load the top two stack positions into registers chosen by the virtual
- // frame. This should keep the register shuffling to a minimum.
- // Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order.
- if (cond == gt || cond == le) {
- cond = ReverseCondition(cond);
- lhs_is_smi = frame_->KnownSmiAt(0);
- rhs_is_smi = frame_->KnownSmiAt(1);
- lhs = frame_->PopToRegister();
- rhs = frame_->PopToRegister(lhs); // Don't pop to the same register again!
- } else {
- rhs_is_smi = frame_->KnownSmiAt(0);
- lhs_is_smi = frame_->KnownSmiAt(1);
- rhs = frame_->PopToRegister();
- lhs = frame_->PopToRegister(rhs); // Don't pop to the same register again!
- }
-
- bool both_sides_are_smi = (lhs_is_smi && rhs_is_smi);
-
- ASSERT(rhs.is(r0) || rhs.is(r1));
- ASSERT(lhs.is(r0) || lhs.is(r1));
-
- JumpTarget exit;
-
- if (!both_sides_are_smi) {
- // Now we have the two sides in r0 and r1. We flush any other registers
- // because the stub doesn't know about register allocation.
- frame_->SpillAll();
- Register scratch = VirtualFrame::scratch0();
- Register smi_test_reg;
- if (lhs_is_smi) {
- smi_test_reg = rhs;
- } else if (rhs_is_smi) {
- smi_test_reg = lhs;
- } else {
- __ orr(scratch, lhs, Operand(rhs));
- smi_test_reg = scratch;
- }
- __ tst(smi_test_reg, Operand(kSmiTagMask));
- JumpTarget smi;
- smi.Branch(eq);
-
- // Perform non-smi comparison by stub.
- // CompareStub takes arguments in r0 and r1, returns <0, >0 or 0 in r0.
- // We call with 0 args because there are 0 on the stack.
- CompareStub stub(cond, strict, NO_SMI_COMPARE_IN_STUB, lhs, rhs);
- frame_->CallStub(&stub, 0);
- __ cmp(r0, Operand(0, RelocInfo::NONE));
- exit.Jump();
-
- smi.Bind();
- }
-
- // Do smi comparisons by pointer comparison.
- __ cmp(lhs, Operand(rhs));
-
- exit.Bind();
- cc_reg_ = cond;
-}
-
-
-// Call the function on the stack with the given arguments.
-void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
- CallFunctionFlags flags,
- int position) {
- // Push the arguments ("left-to-right") on the stack.
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Load(args->at(i));
- }
-
- // Record the position for debugging purposes.
- CodeForSourcePosition(position);
-
- // Use the shared code stub to call the function.
- InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
- CallFunctionStub call_function(arg_count, in_loop, flags);
- frame_->CallStub(&call_function, arg_count + 1);
-
- // Restore context and pop function from the stack.
- __ ldr(cp, frame_->Context());
- frame_->Drop(); // discard the TOS
-}
-
-
-void CodeGenerator::CallApplyLazy(Expression* applicand,
- Expression* receiver,
- VariableProxy* arguments,
- int position) {
- // An optimized implementation of expressions of the form
- // x.apply(y, arguments).
- // If the arguments object of the scope has not been allocated,
- // and x.apply is Function.prototype.apply, this optimization
- // just copies y and the arguments of the current function on the
- // stack, as receiver and arguments, and calls x.
- // In the implementation comments, we call x the applicand
- // and y the receiver.
-
- ASSERT(ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION);
- ASSERT(arguments->IsArguments());
-
- // Load applicand.apply onto the stack. This will usually
- // give us a megamorphic load site. Not super, but it works.
- Load(applicand);
- Handle<String> name = FACTORY->LookupAsciiSymbol("apply");
- frame_->Dup();
- frame_->CallLoadIC(name, RelocInfo::CODE_TARGET);
- frame_->EmitPush(r0);
-
- // Load the receiver and the existing arguments object onto the
- // expression stack. Avoid allocating the arguments object here.
- Load(receiver);
- LoadFromSlot(scope()->arguments()->AsSlot(), NOT_INSIDE_TYPEOF);
-
- // At this point the top two stack elements are probably in registers
- // since they were just loaded. Ensure they are in regs and get the
- // regs.
- Register receiver_reg = frame_->Peek2();
- Register arguments_reg = frame_->Peek();
-
- // From now on the frame is spilled.
- frame_->SpillAll();
-
- // Emit the source position information after having loaded the
- // receiver and the arguments.
- CodeForSourcePosition(position);
- // Contents of the stack at this point:
- // sp[0]: arguments object of the current function or the hole.
- // sp[1]: receiver
- // sp[2]: applicand.apply
- // sp[3]: applicand.
-
- // Check if the arguments object has been lazily allocated
- // already. If so, just use that instead of copying the arguments
- // from the stack. This also deals with cases where a local variable
- // named 'arguments' has been introduced.
- JumpTarget slow;
- Label done;
- __ LoadRoot(ip, Heap::kArgumentsMarkerRootIndex);
- __ cmp(ip, arguments_reg);
- slow.Branch(ne);
-
- Label build_args;
- // Get rid of the arguments object probe.
- frame_->Drop();
- // Stack now has 3 elements on it.
- // Contents of stack at this point:
- // sp[0]: receiver - in the receiver_reg register.
- // sp[1]: applicand.apply
- // sp[2]: applicand.
-
- // Check that the receiver really is a JavaScript object.
- __ JumpIfSmi(receiver_reg, &build_args);
- // We allow all JSObjects including JSFunctions. As long as
- // JS_FUNCTION_TYPE is the last instance type and it is right
- // after LAST_JS_OBJECT_TYPE, we do not have to check the upper
- // bound.
- STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
- STATIC_ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
- __ CompareObjectType(receiver_reg, r2, r3, FIRST_JS_OBJECT_TYPE);
- __ b(lt, &build_args);
-
- // Check that applicand.apply is Function.prototype.apply.
- __ ldr(r0, MemOperand(sp, kPointerSize));
- __ JumpIfSmi(r0, &build_args);
- __ CompareObjectType(r0, r1, r2, JS_FUNCTION_TYPE);
- __ b(ne, &build_args);
- Handle<Code> apply_code(
- Isolate::Current()->builtins()->builtin(Builtins::kFunctionApply));
- __ ldr(r1, FieldMemOperand(r0, JSFunction::kCodeEntryOffset));
- __ sub(r1, r1, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ cmp(r1, Operand(apply_code));
- __ b(ne, &build_args);
-
- // Check that applicand is a function.
- __ ldr(r1, MemOperand(sp, 2 * kPointerSize));
- __ JumpIfSmi(r1, &build_args);
- __ CompareObjectType(r1, r2, r3, JS_FUNCTION_TYPE);
- __ b(ne, &build_args);
-
- // Copy the arguments to this function possibly from the
- // adaptor frame below it.
- Label invoke, adapted;
- __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
- __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ b(eq, &adapted);
-
- // No arguments adaptor frame. Copy fixed number of arguments.
- __ mov(r0, Operand(scope()->num_parameters()));
- for (int i = 0; i < scope()->num_parameters(); i++) {
- __ ldr(r2, frame_->ParameterAt(i));
- __ push(r2);
- }
- __ jmp(&invoke);
-
- // Arguments adaptor frame present. Copy arguments from there, but
- // avoid copying too many arguments to avoid stack overflows.
- __ bind(&adapted);
- static const uint32_t kArgumentsLimit = 1 * KB;
- __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ mov(r0, Operand(r0, LSR, kSmiTagSize));
- __ mov(r3, r0);
- __ cmp(r0, Operand(kArgumentsLimit));
- __ b(gt, &build_args);
-
- // Loop through the arguments pushing them onto the execution
- // stack. We don't inform the virtual frame of the push, so we don't
- // have to worry about getting rid of the elements from the virtual
- // frame.
- Label loop;
- // r3 is a small non-negative integer, due to the test above.
- __ cmp(r3, Operand(0, RelocInfo::NONE));
- __ b(eq, &invoke);
- // Compute the address of the first argument.
- __ add(r2, r2, Operand(r3, LSL, kPointerSizeLog2));
- __ add(r2, r2, Operand(kPointerSize));
- __ bind(&loop);
- // Post-decrement argument address by kPointerSize on each iteration.
- __ ldr(r4, MemOperand(r2, kPointerSize, NegPostIndex));
- __ push(r4);
- __ sub(r3, r3, Operand(1), SetCC);
- __ b(gt, &loop);
-
- // Invoke the function.
- __ bind(&invoke);
- ParameterCount actual(r0);
- __ InvokeFunction(r1, actual, CALL_FUNCTION);
- // Drop applicand.apply and applicand from the stack, and push
- // the result of the function call, but leave the spilled frame
- // unchanged, with 3 elements, so it is correct when we compile the
- // slow-case code.
- __ add(sp, sp, Operand(2 * kPointerSize));
- __ push(r0);
- // Stack now has 1 element:
- // sp[0]: result
- __ jmp(&done);
-
- // Slow-case: Allocate the arguments object since we know it isn't
- // there, and fall-through to the slow-case where we call
- // applicand.apply.
- __ bind(&build_args);
- // Stack now has 3 elements, because we have jumped from where:
- // sp[0]: receiver
- // sp[1]: applicand.apply
- // sp[2]: applicand.
- StoreArgumentsObject(false);
-
- // Stack and frame now have 4 elements.
- slow.Bind();
-
- // Generic computation of x.apply(y, args) with no special optimization.
- // Flip applicand.apply and applicand on the stack, so
- // applicand looks like the receiver of the applicand.apply call.
- // Then process it as a normal function call.
- __ ldr(r0, MemOperand(sp, 3 * kPointerSize));
- __ ldr(r1, MemOperand(sp, 2 * kPointerSize));
- __ Strd(r0, r1, MemOperand(sp, 2 * kPointerSize));
-
- CallFunctionStub call_function(2, NOT_IN_LOOP, NO_CALL_FUNCTION_FLAGS);
- frame_->CallStub(&call_function, 3);
- // The function and its two arguments have been dropped.
- frame_->Drop(); // Drop the receiver as well.
- frame_->EmitPush(r0);
- frame_->SpillAll(); // A spilled frame is also jumping to label done.
- // Stack now has 1 element:
- // sp[0]: result
- __ bind(&done);
-
- // Restore the context register after a call.
- __ ldr(cp, frame_->Context());
-}
-
-
-void CodeGenerator::Branch(bool if_true, JumpTarget* target) {
- ASSERT(has_cc());
- Condition cond = if_true ? cc_reg_ : NegateCondition(cc_reg_);
- target->Branch(cond);
- cc_reg_ = al;
-}
-
-
-void CodeGenerator::CheckStack() {
- frame_->SpillAll();
- Comment cmnt(masm_, "[ check stack");
- __ LoadRoot(ip, Heap::kStackLimitRootIndex);
- masm_->cmp(sp, Operand(ip));
- StackCheckStub stub;
- // Call the stub if lower.
- masm_->mov(ip,
- Operand(reinterpret_cast<intptr_t>(stub.GetCode().location()),
- RelocInfo::CODE_TARGET),
- LeaveCC,
- lo);
- masm_->Call(ip, lo);
-}
-
-
-void CodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- for (int i = 0; frame_ != NULL && i < statements->length(); i++) {
- Visit(statements->at(i));
- }
- ASSERT(!has_valid_frame() || frame_->height() == original_height);
-}
-
-
-void CodeGenerator::VisitBlock(Block* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ Block");
- CodeForStatementPosition(node);
- node->break_target()->SetExpectedHeight();
- VisitStatements(node->statements());
- if (node->break_target()->is_linked()) {
- node->break_target()->Bind();
- }
- node->break_target()->Unuse();
- ASSERT(!has_valid_frame() || frame_->height() == original_height);
-}
-
-
-void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
- frame_->EmitPush(cp);
- frame_->EmitPush(Operand(pairs));
- frame_->EmitPush(Operand(Smi::FromInt(is_eval() ? 1 : 0)));
- frame_->EmitPush(Operand(Smi::FromInt(strict_mode_flag())));
-
- frame_->CallRuntime(Runtime::kDeclareGlobals, 4);
- // The result is discarded.
-}
-
-
-void CodeGenerator::VisitDeclaration(Declaration* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ Declaration");
- Variable* var = node->proxy()->var();
- ASSERT(var != NULL); // must have been resolved
- Slot* slot = var->AsSlot();
-
- // If it was not possible to allocate the variable at compile time,
- // we need to "declare" it at runtime to make sure it actually
- // exists in the local context.
- if (slot != NULL && slot->type() == Slot::LOOKUP) {
- // Variables with a "LOOKUP" slot were introduced as non-locals
- // during variable resolution and must have mode DYNAMIC.
- ASSERT(var->is_dynamic());
- // For now, just do a runtime call.
- frame_->EmitPush(cp);
- frame_->EmitPush(Operand(var->name()));
- // Declaration nodes are always declared in only two modes.
- ASSERT(node->mode() == Variable::VAR || node->mode() == Variable::CONST);
- PropertyAttributes attr = node->mode() == Variable::VAR ? NONE : READ_ONLY;
- frame_->EmitPush(Operand(Smi::FromInt(attr)));
- // Push initial value, if any.
- // Note: For variables we must not push an initial value (such as
- // 'undefined') because we may have a (legal) redeclaration and we
- // must not destroy the current value.
- if (node->mode() == Variable::CONST) {
- frame_->EmitPushRoot(Heap::kTheHoleValueRootIndex);
- } else if (node->fun() != NULL) {
- Load(node->fun());
- } else {
- frame_->EmitPush(Operand(0, RelocInfo::NONE));
- }
-
- frame_->CallRuntime(Runtime::kDeclareContextSlot, 4);
- // Ignore the return value (declarations are statements).
-
- ASSERT(frame_->height() == original_height);
- return;
- }
-
- ASSERT(!var->is_global());
-
- // If we have a function or a constant, we need to initialize the variable.
- Expression* val = NULL;
- if (node->mode() == Variable::CONST) {
- val = new Literal(FACTORY->the_hole_value());
- } else {
- val = node->fun(); // NULL if we don't have a function
- }
-
-
- if (val != NULL) {
- WriteBarrierCharacter wb_info =
- val->type()->IsLikelySmi() ? LIKELY_SMI : UNLIKELY_SMI;
- if (val->AsLiteral() != NULL) wb_info = NEVER_NEWSPACE;
- // Set initial value.
- Reference target(this, node->proxy());
- Load(val);
- target.SetValue(NOT_CONST_INIT, wb_info);
-
- // Get rid of the assigned value (declarations are statements).
- frame_->Drop();
- }
- ASSERT(frame_->height() == original_height);
-}
-
-
-void CodeGenerator::VisitExpressionStatement(ExpressionStatement* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ ExpressionStatement");
- CodeForStatementPosition(node);
- Expression* expression = node->expression();
- expression->MarkAsStatement();
- Load(expression);
- frame_->Drop();
- ASSERT(frame_->height() == original_height);
-}
-
-
-void CodeGenerator::VisitEmptyStatement(EmptyStatement* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "// EmptyStatement");
- CodeForStatementPosition(node);
- // nothing to do
- ASSERT(frame_->height() == original_height);
-}
-
-
-void CodeGenerator::VisitIfStatement(IfStatement* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ IfStatement");
- // Generate different code depending on which parts of the if statement
- // are present or not.
- bool has_then_stm = node->HasThenStatement();
- bool has_else_stm = node->HasElseStatement();
-
- CodeForStatementPosition(node);
-
- JumpTarget exit;
- if (has_then_stm && has_else_stm) {
- Comment cmnt(masm_, "[ IfThenElse");
- JumpTarget then;
- JumpTarget else_;
- // if (cond)
- LoadCondition(node->condition(), &then, &else_, true);
- if (frame_ != NULL) {
- Branch(false, &else_);
- }
- // then
- if (frame_ != NULL || then.is_linked()) {
- then.Bind();
- Visit(node->then_statement());
- }
- if (frame_ != NULL) {
- exit.Jump();
- }
- // else
- if (else_.is_linked()) {
- else_.Bind();
- Visit(node->else_statement());
- }
-
- } else if (has_then_stm) {
- Comment cmnt(masm_, "[ IfThen");
- ASSERT(!has_else_stm);
- JumpTarget then;
- // if (cond)
- LoadCondition(node->condition(), &then, &exit, true);
- if (frame_ != NULL) {
- Branch(false, &exit);
- }
- // then
- if (frame_ != NULL || then.is_linked()) {
- then.Bind();
- Visit(node->then_statement());
- }
-
- } else if (has_else_stm) {
- Comment cmnt(masm_, "[ IfElse");
- ASSERT(!has_then_stm);
- JumpTarget else_;
- // if (!cond)
- LoadCondition(node->condition(), &exit, &else_, true);
- if (frame_ != NULL) {
- Branch(true, &exit);
- }
- // else
- if (frame_ != NULL || else_.is_linked()) {
- else_.Bind();
- Visit(node->else_statement());
- }
-
- } else {
- Comment cmnt(masm_, "[ If");
- ASSERT(!has_then_stm && !has_else_stm);
- // if (cond)
- LoadCondition(node->condition(), &exit, &exit, false);
- if (frame_ != NULL) {
- if (has_cc()) {
- cc_reg_ = al;
- } else {
- frame_->Drop();
- }
- }
- }
-
- // end
- if (exit.is_linked()) {
- exit.Bind();
- }
- ASSERT(!has_valid_frame() || frame_->height() == original_height);
-}
-
-
-void CodeGenerator::VisitContinueStatement(ContinueStatement* node) {
- Comment cmnt(masm_, "[ ContinueStatement");
- CodeForStatementPosition(node);
- node->target()->continue_target()->Jump();
-}
-
-
-void CodeGenerator::VisitBreakStatement(BreakStatement* node) {
- Comment cmnt(masm_, "[ BreakStatement");
- CodeForStatementPosition(node);
- node->target()->break_target()->Jump();
-}
-
-
-void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
- Comment cmnt(masm_, "[ ReturnStatement");
-
- CodeForStatementPosition(node);
- Load(node->expression());
- frame_->PopToR0();
- frame_->PrepareForReturn();
- if (function_return_is_shadowed_) {
- function_return_.Jump();
- } else {
- // Pop the result from the frame and prepare the frame for
- // returning thus making it easier to merge.
- if (function_return_.is_bound()) {
- // If the function return label is already bound we reuse the
- // code by jumping to the return site.
- function_return_.Jump();
- } else {
- function_return_.Bind();
- GenerateReturnSequence();
- }
- }
-}
-
-
-void CodeGenerator::GenerateReturnSequence() {
- if (FLAG_trace) {
- // Push the return value on the stack as the parameter.
- // Runtime::TraceExit returns the parameter as it is.
- frame_->EmitPush(r0);
- frame_->CallRuntime(Runtime::kTraceExit, 1);
- }
-
-#ifdef DEBUG
- // Add a label for checking the size of the code used for returning.
- Label check_exit_codesize;
- masm_->bind(&check_exit_codesize);
-#endif
- // Make sure that the constant pool is not emitted inside of the return
- // sequence.
- { Assembler::BlockConstPoolScope block_const_pool(masm_);
- // Tear down the frame which will restore the caller's frame pointer and
- // the link register.
- frame_->Exit();
-
- // Here we use masm_-> instead of the __ macro to avoid the code coverage
- // tool from instrumenting as we rely on the code size here.
- int32_t sp_delta = (scope()->num_parameters() + 1) * kPointerSize;
- masm_->add(sp, sp, Operand(sp_delta));
- masm_->Jump(lr);
- DeleteFrame();
-
-#ifdef DEBUG
- // Check that the size of the code used for returning is large enough
- // for the debugger's requirements.
- ASSERT(Assembler::kJSReturnSequenceInstructions <=
- masm_->InstructionsGeneratedSince(&check_exit_codesize));
-#endif
- }
-}
-
-
-void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ WithEnterStatement");
- CodeForStatementPosition(node);
- Load(node->expression());
- if (node->is_catch_block()) {
- frame_->CallRuntime(Runtime::kPushCatchContext, 1);
- } else {
- frame_->CallRuntime(Runtime::kPushContext, 1);
- }
-#ifdef DEBUG
- JumpTarget verified_true;
- __ cmp(r0, cp);
- verified_true.Branch(eq);
- __ stop("PushContext: r0 is expected to be the same as cp");
- verified_true.Bind();
-#endif
- // Update context local.
- __ str(cp, frame_->Context());
- ASSERT(frame_->height() == original_height);
-}
-
-
-void CodeGenerator::VisitWithExitStatement(WithExitStatement* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ WithExitStatement");
- CodeForStatementPosition(node);
- // Pop context.
- __ ldr(cp, ContextOperand(cp, Context::PREVIOUS_INDEX));
- // Update context local.
- __ str(cp, frame_->Context());
- ASSERT(frame_->height() == original_height);
-}
-
-
-void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ SwitchStatement");
- CodeForStatementPosition(node);
- node->break_target()->SetExpectedHeight();
-
- Load(node->tag());
-
- JumpTarget next_test;
- JumpTarget fall_through;
- JumpTarget default_entry;
- JumpTarget default_exit(JumpTarget::BIDIRECTIONAL);
- ZoneList<CaseClause*>* cases = node->cases();
- int length = cases->length();
- CaseClause* default_clause = NULL;
-
- for (int i = 0; i < length; i++) {
- CaseClause* clause = cases->at(i);
- if (clause->is_default()) {
- // Remember the default clause and compile it at the end.
- default_clause = clause;
- continue;
- }
-
- Comment cmnt(masm_, "[ Case clause");
- // Compile the test.
- next_test.Bind();
- next_test.Unuse();
- // Duplicate TOS.
- frame_->Dup();
- Comparison(eq, NULL, clause->label(), true);
- Branch(false, &next_test);
-
- // Before entering the body from the test, remove the switch value from
- // the stack.
- frame_->Drop();
-
- // Label the body so that fall through is enabled.
- if (i > 0 && cases->at(i - 1)->is_default()) {
- default_exit.Bind();
- } else {
- fall_through.Bind();
- fall_through.Unuse();
- }
- VisitStatements(clause->statements());
-
- // If control flow can fall through from the body, jump to the next body
- // or the end of the statement.
- if (frame_ != NULL) {
- if (i < length - 1 && cases->at(i + 1)->is_default()) {
- default_entry.Jump();
- } else {
- fall_through.Jump();
- }
- }
- }
-
- // The final "test" removes the switch value.
- next_test.Bind();
- frame_->Drop();
-
- // If there is a default clause, compile it.
- if (default_clause != NULL) {
- Comment cmnt(masm_, "[ Default clause");
- default_entry.Bind();
- VisitStatements(default_clause->statements());
- // If control flow can fall out of the default and there is a case after
- // it, jump to that case's body.
- if (frame_ != NULL && default_exit.is_bound()) {
- default_exit.Jump();
- }
- }
-
- if (fall_through.is_linked()) {
- fall_through.Bind();
- }
-
- if (node->break_target()->is_linked()) {
- node->break_target()->Bind();
- }
- node->break_target()->Unuse();
- ASSERT(!has_valid_frame() || frame_->height() == original_height);
-}
-
-
-void CodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ DoWhileStatement");
- CodeForStatementPosition(node);
- node->break_target()->SetExpectedHeight();
- JumpTarget body(JumpTarget::BIDIRECTIONAL);
- IncrementLoopNesting();
-
- // Label the top of the loop for the backward CFG edge. If the test
- // is always true we can use the continue target, and if the test is
- // always false there is no need.
- ConditionAnalysis info = AnalyzeCondition(node->cond());
- switch (info) {
- case ALWAYS_TRUE:
- node->continue_target()->SetExpectedHeight();
- node->continue_target()->Bind();
- break;
- case ALWAYS_FALSE:
- node->continue_target()->SetExpectedHeight();
- break;
- case DONT_KNOW:
- node->continue_target()->SetExpectedHeight();
- body.Bind();
- break;
- }
-
- CheckStack(); // TODO(1222600): ignore if body contains calls.
- Visit(node->body());
-
- // Compile the test.
- switch (info) {
- case ALWAYS_TRUE:
- // If control can fall off the end of the body, jump back to the
- // top.
- if (has_valid_frame()) {
- node->continue_target()->Jump();
- }
- break;
- case ALWAYS_FALSE:
- // If we have a continue in the body, we only have to bind its
- // jump target.
- if (node->continue_target()->is_linked()) {
- node->continue_target()->Bind();
- }
- break;
- case DONT_KNOW:
- // We have to compile the test expression if it can be reached by
- // control flow falling out of the body or via continue.
- if (node->continue_target()->is_linked()) {
- node->continue_target()->Bind();
- }
- if (has_valid_frame()) {
- Comment cmnt(masm_, "[ DoWhileCondition");
- CodeForDoWhileConditionPosition(node);
- LoadCondition(node->cond(), &body, node->break_target(), true);
- if (has_valid_frame()) {
- // A invalid frame here indicates that control did not
- // fall out of the test expression.
- Branch(true, &body);
- }
- }
- break;
- }
-
- if (node->break_target()->is_linked()) {
- node->break_target()->Bind();
- }
- DecrementLoopNesting();
- ASSERT(!has_valid_frame() || frame_->height() == original_height);
-}
-
-
-void CodeGenerator::VisitWhileStatement(WhileStatement* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ WhileStatement");
- CodeForStatementPosition(node);
-
- // If the test is never true and has no side effects there is no need
- // to compile the test or body.
- ConditionAnalysis info = AnalyzeCondition(node->cond());
- if (info == ALWAYS_FALSE) return;
-
- node->break_target()->SetExpectedHeight();
- IncrementLoopNesting();
-
- // Label the top of the loop with the continue target for the backward
- // CFG edge.
- node->continue_target()->SetExpectedHeight();
- node->continue_target()->Bind();
-
- if (info == DONT_KNOW) {
- JumpTarget body(JumpTarget::BIDIRECTIONAL);
- LoadCondition(node->cond(), &body, node->break_target(), true);
- if (has_valid_frame()) {
- // A NULL frame indicates that control did not fall out of the
- // test expression.
- Branch(false, node->break_target());
- }
- if (has_valid_frame() || body.is_linked()) {
- body.Bind();
- }
- }
-
- if (has_valid_frame()) {
- CheckStack(); // TODO(1222600): ignore if body contains calls.
- Visit(node->body());
-
- // If control flow can fall out of the body, jump back to the top.
- if (has_valid_frame()) {
- node->continue_target()->Jump();
- }
- }
- if (node->break_target()->is_linked()) {
- node->break_target()->Bind();
- }
- DecrementLoopNesting();
- ASSERT(!has_valid_frame() || frame_->height() == original_height);
-}
-
-
-void CodeGenerator::VisitForStatement(ForStatement* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ ForStatement");
- CodeForStatementPosition(node);
- if (node->init() != NULL) {
- Visit(node->init());
- }
-
- // If the test is never true there is no need to compile the test or
- // body.
- ConditionAnalysis info = AnalyzeCondition(node->cond());
- if (info == ALWAYS_FALSE) return;
-
- node->break_target()->SetExpectedHeight();
- IncrementLoopNesting();
-
- // We know that the loop index is a smi if it is not modified in the
- // loop body and it is checked against a constant limit in the loop
- // condition. In this case, we reset the static type information of the
- // loop index to smi before compiling the body, the update expression, and
- // the bottom check of the loop condition.
- TypeInfoCodeGenState type_info_scope(this,
- node->is_fast_smi_loop() ?
- node->loop_variable()->AsSlot() :
- NULL,
- TypeInfo::Smi());
-
- // If there is no update statement, label the top of the loop with the
- // continue target, otherwise with the loop target.
- JumpTarget loop(JumpTarget::BIDIRECTIONAL);
- if (node->next() == NULL) {
- node->continue_target()->SetExpectedHeight();
- node->continue_target()->Bind();
- } else {
- node->continue_target()->SetExpectedHeight();
- loop.Bind();
- }
-
- // If the test is always true, there is no need to compile it.
- if (info == DONT_KNOW) {
- JumpTarget body;
- LoadCondition(node->cond(), &body, node->break_target(), true);
- if (has_valid_frame()) {
- Branch(false, node->break_target());
- }
- if (has_valid_frame() || body.is_linked()) {
- body.Bind();
- }
- }
-
- if (has_valid_frame()) {
- CheckStack(); // TODO(1222600): ignore if body contains calls.
- Visit(node->body());
-
- if (node->next() == NULL) {
- // If there is no update statement and control flow can fall out
- // of the loop, jump directly to the continue label.
- if (has_valid_frame()) {
- node->continue_target()->Jump();
- }
- } else {
- // If there is an update statement and control flow can reach it
- // via falling out of the body of the loop or continuing, we
- // compile the update statement.
- if (node->continue_target()->is_linked()) {
- node->continue_target()->Bind();
- }
- if (has_valid_frame()) {
- // Record source position of the statement as this code which is
- // after the code for the body actually belongs to the loop
- // statement and not the body.
- CodeForStatementPosition(node);
- Visit(node->next());
- loop.Jump();
- }
- }
- }
- if (node->break_target()->is_linked()) {
- node->break_target()->Bind();
- }
- DecrementLoopNesting();
- ASSERT(!has_valid_frame() || frame_->height() == original_height);
-}
-
-
-void CodeGenerator::VisitForInStatement(ForInStatement* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ ForInStatement");
- CodeForStatementPosition(node);
-
- JumpTarget primitive;
- JumpTarget jsobject;
- JumpTarget fixed_array;
- JumpTarget entry(JumpTarget::BIDIRECTIONAL);
- JumpTarget end_del_check;
- JumpTarget exit;
-
- // Get the object to enumerate over (converted to JSObject).
- Load(node->enumerable());
-
- VirtualFrame::SpilledScope spilled_scope(frame_);
- // Both SpiderMonkey and kjs ignore null and undefined in contrast
- // to the specification. 12.6.4 mandates a call to ToObject.
- frame_->EmitPop(r0);
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(r0, ip);
- exit.Branch(eq);
- __ LoadRoot(ip, Heap::kNullValueRootIndex);
- __ cmp(r0, ip);
- exit.Branch(eq);
-
- // Stack layout in body:
- // [iteration counter (Smi)]
- // [length of array]
- // [FixedArray]
- // [Map or 0]
- // [Object]
-
- // Check if enumerable is already a JSObject
- __ tst(r0, Operand(kSmiTagMask));
- primitive.Branch(eq);
- __ CompareObjectType(r0, r1, r1, FIRST_JS_OBJECT_TYPE);
- jsobject.Branch(hs);
-
- primitive.Bind();
- frame_->EmitPush(r0);
- frame_->InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS, 1);
-
- jsobject.Bind();
- // Get the set of properties (as a FixedArray or Map).
- // r0: value to be iterated over
- frame_->EmitPush(r0); // Push the object being iterated over.
-
- // Check cache validity in generated code. This is a fast case for
- // the JSObject::IsSimpleEnum cache validity checks. If we cannot
- // guarantee cache validity, call the runtime system to check cache
- // validity or get the property names in a fixed array.
- JumpTarget call_runtime;
- JumpTarget loop(JumpTarget::BIDIRECTIONAL);
- JumpTarget check_prototype;
- JumpTarget use_cache;
- __ mov(r1, Operand(r0));
- loop.Bind();
- // Check that there are no elements.
- __ ldr(r2, FieldMemOperand(r1, JSObject::kElementsOffset));
- __ LoadRoot(r4, Heap::kEmptyFixedArrayRootIndex);
- __ cmp(r2, r4);
- call_runtime.Branch(ne);
- // Check that instance descriptors are not empty so that we can
- // check for an enum cache. Leave the map in r3 for the subsequent
- // prototype load.
- __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ ldr(r2, FieldMemOperand(r3, Map::kInstanceDescriptorsOffset));
- __ LoadRoot(ip, Heap::kEmptyDescriptorArrayRootIndex);
- __ cmp(r2, ip);
- call_runtime.Branch(eq);
- // Check that there in an enum cache in the non-empty instance
- // descriptors. This is the case if the next enumeration index
- // field does not contain a smi.
- __ ldr(r2, FieldMemOperand(r2, DescriptorArray::kEnumerationIndexOffset));
- __ tst(r2, Operand(kSmiTagMask));
- call_runtime.Branch(eq);
- // For all objects but the receiver, check that the cache is empty.
- // r4: empty fixed array root.
- __ cmp(r1, r0);
- check_prototype.Branch(eq);
- __ ldr(r2, FieldMemOperand(r2, DescriptorArray::kEnumCacheBridgeCacheOffset));
- __ cmp(r2, r4);
- call_runtime.Branch(ne);
- check_prototype.Bind();
- // Load the prototype from the map and loop if non-null.
- __ ldr(r1, FieldMemOperand(r3, Map::kPrototypeOffset));
- __ LoadRoot(ip, Heap::kNullValueRootIndex);
- __ cmp(r1, ip);
- loop.Branch(ne);
- // The enum cache is valid. Load the map of the object being
- // iterated over and use the cache for the iteration.
- __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
- use_cache.Jump();
-
- call_runtime.Bind();
- // Call the runtime to get the property names for the object.
- frame_->EmitPush(r0); // push the object (slot 4) for the runtime call
- frame_->CallRuntime(Runtime::kGetPropertyNamesFast, 1);
-
- // If we got a map from the runtime call, we can do a fast
- // modification check. Otherwise, we got a fixed array, and we have
- // to do a slow check.
- // r0: map or fixed array (result from call to
- // Runtime::kGetPropertyNamesFast)
- __ mov(r2, Operand(r0));
- __ ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kMetaMapRootIndex);
- __ cmp(r1, ip);
- fixed_array.Branch(ne);
-
- use_cache.Bind();
- // Get enum cache
- // r0: map (either the result from a call to
- // Runtime::kGetPropertyNamesFast or has been fetched directly from
- // the object)
- __ mov(r1, Operand(r0));
- __ ldr(r1, FieldMemOperand(r1, Map::kInstanceDescriptorsOffset));
- __ ldr(r1, FieldMemOperand(r1, DescriptorArray::kEnumerationIndexOffset));
- __ ldr(r2,
- FieldMemOperand(r1, DescriptorArray::kEnumCacheBridgeCacheOffset));
-
- frame_->EmitPush(r0); // map
- frame_->EmitPush(r2); // enum cache bridge cache
- __ ldr(r0, FieldMemOperand(r2, FixedArray::kLengthOffset));
- frame_->EmitPush(r0);
- __ mov(r0, Operand(Smi::FromInt(0)));
- frame_->EmitPush(r0);
- entry.Jump();
-
- fixed_array.Bind();
- __ mov(r1, Operand(Smi::FromInt(0)));
- frame_->EmitPush(r1); // insert 0 in place of Map
- frame_->EmitPush(r0);
-
- // Push the length of the array and the initial index onto the stack.
- __ ldr(r0, FieldMemOperand(r0, FixedArray::kLengthOffset));
- frame_->EmitPush(r0);
- __ mov(r0, Operand(Smi::FromInt(0))); // init index
- frame_->EmitPush(r0);
-
- // Condition.
- entry.Bind();
- // sp[0] : index
- // sp[1] : array/enum cache length
- // sp[2] : array or enum cache
- // sp[3] : 0 or map
- // sp[4] : enumerable
- // Grab the current frame's height for the break and continue
- // targets only after all the state is pushed on the frame.
- node->break_target()->SetExpectedHeight();
- node->continue_target()->SetExpectedHeight();
-
- // Load the current count to r0, load the length to r1.
- __ Ldrd(r0, r1, frame_->ElementAt(0));
- __ cmp(r0, r1); // compare to the array length
- node->break_target()->Branch(hs);
-
- // Get the i'th entry of the array.
- __ ldr(r2, frame_->ElementAt(2));
- __ add(r2, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ ldr(r3, MemOperand(r2, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
-
- // Get Map or 0.
- __ ldr(r2, frame_->ElementAt(3));
- // Check if this (still) matches the map of the enumerable.
- // If not, we have to filter the key.
- __ ldr(r1, frame_->ElementAt(4));
- __ ldr(r1, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ cmp(r1, Operand(r2));
- end_del_check.Branch(eq);
-
- // Convert the entry to a string (or null if it isn't a property anymore).
- __ ldr(r0, frame_->ElementAt(4)); // push enumerable
- frame_->EmitPush(r0);
- frame_->EmitPush(r3); // push entry
- frame_->InvokeBuiltin(Builtins::FILTER_KEY, CALL_JS, 2);
- __ mov(r3, Operand(r0), SetCC);
- // If the property has been removed while iterating, we just skip it.
- node->continue_target()->Branch(eq);
-
- end_del_check.Bind();
- // Store the entry in the 'each' expression and take another spin in the
- // loop. r3: i'th entry of the enum cache (or string there of)
- frame_->EmitPush(r3); // push entry
- { VirtualFrame::RegisterAllocationScope scope(this);
- Reference each(this, node->each());
- if (!each.is_illegal()) {
- if (each.size() > 0) {
- // Loading a reference may leave the frame in an unspilled state.
- frame_->SpillAll(); // Sync stack to memory.
- // Get the value (under the reference on the stack) from memory.
- __ ldr(r0, frame_->ElementAt(each.size()));
- frame_->EmitPush(r0);
- each.SetValue(NOT_CONST_INIT, UNLIKELY_SMI);
- frame_->Drop(2); // The result of the set and the extra pushed value.
- } else {
- // If the reference was to a slot we rely on the convenient property
- // that it doesn't matter whether a value (eg, ebx pushed above) is
- // right on top of or right underneath a zero-sized reference.
- each.SetValue(NOT_CONST_INIT, UNLIKELY_SMI);
- frame_->Drop(1); // Drop the result of the set operation.
- }
- }
- }
- // Body.
- CheckStack(); // TODO(1222600): ignore if body contains calls.
- { VirtualFrame::RegisterAllocationScope scope(this);
- Visit(node->body());
- }
-
- // Next. Reestablish a spilled frame in case we are coming here via
- // a continue in the body.
- node->continue_target()->Bind();
- frame_->SpillAll();
- frame_->EmitPop(r0);
- __ add(r0, r0, Operand(Smi::FromInt(1)));
- frame_->EmitPush(r0);
- entry.Jump();
-
- // Cleanup. No need to spill because VirtualFrame::Drop is safe for
- // any frame.
- node->break_target()->Bind();
- frame_->Drop(5);
-
- // Exit.
- exit.Bind();
- node->continue_target()->Unuse();
- node->break_target()->Unuse();
- ASSERT(frame_->height() == original_height);
-}
-
-
-void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- VirtualFrame::SpilledScope spilled_scope(frame_);
- Comment cmnt(masm_, "[ TryCatchStatement");
- CodeForStatementPosition(node);
-
- JumpTarget try_block;
- JumpTarget exit;
-
- try_block.Call();
- // --- Catch block ---
- frame_->EmitPush(r0);
-
- // Store the caught exception in the catch variable.
- Variable* catch_var = node->catch_var()->var();
- ASSERT(catch_var != NULL && catch_var->AsSlot() != NULL);
- StoreToSlot(catch_var->AsSlot(), NOT_CONST_INIT);
-
- // Remove the exception from the stack.
- frame_->Drop();
-
- { VirtualFrame::RegisterAllocationScope scope(this);
- VisitStatements(node->catch_block()->statements());
- }
- if (frame_ != NULL) {
- exit.Jump();
- }
-
-
- // --- Try block ---
- try_block.Bind();
-
- frame_->PushTryHandler(TRY_CATCH_HANDLER);
- int handler_height = frame_->height();
-
- // Shadow the labels for all escapes from the try block, including
- // returns. During shadowing, the original label is hidden as the
- // LabelShadow and operations on the original actually affect the
- // shadowing label.
- //
- // We should probably try to unify the escaping labels and the return
- // label.
- int nof_escapes = node->escaping_targets()->length();
- List<ShadowTarget*> shadows(1 + nof_escapes);
-
- // Add the shadow target for the function return.
- static const int kReturnShadowIndex = 0;
- shadows.Add(new ShadowTarget(&function_return_));
- bool function_return_was_shadowed = function_return_is_shadowed_;
- function_return_is_shadowed_ = true;
- ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
-
- // Add the remaining shadow targets.
- for (int i = 0; i < nof_escapes; i++) {
- shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
- }
-
- // Generate code for the statements in the try block.
- { VirtualFrame::RegisterAllocationScope scope(this);
- VisitStatements(node->try_block()->statements());
- }
-
- // Stop the introduced shadowing and count the number of required unlinks.
- // After shadowing stops, the original labels are unshadowed and the
- // LabelShadows represent the formerly shadowing labels.
- bool has_unlinks = false;
- for (int i = 0; i < shadows.length(); i++) {
- shadows[i]->StopShadowing();
- has_unlinks = has_unlinks || shadows[i]->is_linked();
- }
- function_return_is_shadowed_ = function_return_was_shadowed;
-
- // Get an external reference to the handler address.
- ExternalReference handler_address(Isolate::k_handler_address, isolate());
-
- // If we can fall off the end of the try block, unlink from try chain.
- if (has_valid_frame()) {
- // The next handler address is on top of the frame. Unlink from
- // the handler list and drop the rest of this handler from the
- // frame.
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- frame_->EmitPop(r1); // r0 can contain the return value.
- __ mov(r3, Operand(handler_address));
- __ str(r1, MemOperand(r3));
- frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
- if (has_unlinks) {
- exit.Jump();
- }
- }
-
- // Generate unlink code for the (formerly) shadowing labels that have been
- // jumped to. Deallocate each shadow target.
- for (int i = 0; i < shadows.length(); i++) {
- if (shadows[i]->is_linked()) {
- // Unlink from try chain;
- shadows[i]->Bind();
- // Because we can be jumping here (to spilled code) from unspilled
- // code, we need to reestablish a spilled frame at this block.
- frame_->SpillAll();
-
- // Reload sp from the top handler, because some statements that we
- // break from (eg, for...in) may have left stuff on the stack.
- __ mov(r3, Operand(handler_address));
- __ ldr(sp, MemOperand(r3));
- frame_->Forget(frame_->height() - handler_height);
-
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- frame_->EmitPop(r1); // r0 can contain the return value.
- __ str(r1, MemOperand(r3));
- frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
-
- if (!function_return_is_shadowed_ && i == kReturnShadowIndex) {
- frame_->PrepareForReturn();
- }
- shadows[i]->other_target()->Jump();
- }
- }
-
- exit.Bind();
- ASSERT(!has_valid_frame() || frame_->height() == original_height);
-}
-
-
-void CodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- VirtualFrame::SpilledScope spilled_scope(frame_);
- Comment cmnt(masm_, "[ TryFinallyStatement");
- CodeForStatementPosition(node);
-
- // State: Used to keep track of reason for entering the finally
- // block. Should probably be extended to hold information for
- // break/continue from within the try block.
- enum { FALLING, THROWING, JUMPING };
-
- JumpTarget try_block;
- JumpTarget finally_block;
-
- try_block.Call();
-
- frame_->EmitPush(r0); // save exception object on the stack
- // In case of thrown exceptions, this is where we continue.
- __ mov(r2, Operand(Smi::FromInt(THROWING)));
- finally_block.Jump();
-
- // --- Try block ---
- try_block.Bind();
-
- frame_->PushTryHandler(TRY_FINALLY_HANDLER);
- int handler_height = frame_->height();
-
- // Shadow the labels for all escapes from the try block, including
- // returns. Shadowing hides the original label as the LabelShadow and
- // operations on the original actually affect the shadowing label.
- //
- // We should probably try to unify the escaping labels and the return
- // label.
- int nof_escapes = node->escaping_targets()->length();
- List<ShadowTarget*> shadows(1 + nof_escapes);
-
- // Add the shadow target for the function return.
- static const int kReturnShadowIndex = 0;
- shadows.Add(new ShadowTarget(&function_return_));
- bool function_return_was_shadowed = function_return_is_shadowed_;
- function_return_is_shadowed_ = true;
- ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
-
- // Add the remaining shadow targets.
- for (int i = 0; i < nof_escapes; i++) {
- shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
- }
-
- // Generate code for the statements in the try block.
- { VirtualFrame::RegisterAllocationScope scope(this);
- VisitStatements(node->try_block()->statements());
- }
-
- // Stop the introduced shadowing and count the number of required unlinks.
- // After shadowing stops, the original labels are unshadowed and the
- // LabelShadows represent the formerly shadowing labels.
- int nof_unlinks = 0;
- for (int i = 0; i < shadows.length(); i++) {
- shadows[i]->StopShadowing();
- if (shadows[i]->is_linked()) nof_unlinks++;
- }
- function_return_is_shadowed_ = function_return_was_shadowed;
-
- // Get an external reference to the handler address.
- ExternalReference handler_address(Isolate::k_handler_address, isolate());
-
- // If we can fall off the end of the try block, unlink from the try
- // chain and set the state on the frame to FALLING.
- if (has_valid_frame()) {
- // The next handler address is on top of the frame.
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- frame_->EmitPop(r1);
- __ mov(r3, Operand(handler_address));
- __ str(r1, MemOperand(r3));
- frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
-
- // Fake a top of stack value (unneeded when FALLING) and set the
- // state in r2, then jump around the unlink blocks if any.
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
- frame_->EmitPush(r0);
- __ mov(r2, Operand(Smi::FromInt(FALLING)));
- if (nof_unlinks > 0) {
- finally_block.Jump();
- }
- }
-
- // Generate code to unlink and set the state for the (formerly)
- // shadowing targets that have been jumped to.
- for (int i = 0; i < shadows.length(); i++) {
- if (shadows[i]->is_linked()) {
- // If we have come from the shadowed return, the return value is
- // in (a non-refcounted reference to) r0. We must preserve it
- // until it is pushed.
- //
- // Because we can be jumping here (to spilled code) from
- // unspilled code, we need to reestablish a spilled frame at
- // this block.
- shadows[i]->Bind();
- frame_->SpillAll();
-
- // Reload sp from the top handler, because some statements that
- // we break from (eg, for...in) may have left stuff on the
- // stack.
- __ mov(r3, Operand(handler_address));
- __ ldr(sp, MemOperand(r3));
- frame_->Forget(frame_->height() - handler_height);
-
- // Unlink this handler and drop it from the frame. The next
- // handler address is currently on top of the frame.
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- frame_->EmitPop(r1);
- __ str(r1, MemOperand(r3));
- frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
-
- if (i == kReturnShadowIndex) {
- // If this label shadowed the function return, materialize the
- // return value on the stack.
- frame_->EmitPush(r0);
- } else {
- // Fake TOS for targets that shadowed breaks and continues.
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
- frame_->EmitPush(r0);
- }
- __ mov(r2, Operand(Smi::FromInt(JUMPING + i)));
- if (--nof_unlinks > 0) {
- // If this is not the last unlink block, jump around the next.
- finally_block.Jump();
- }
- }
- }
-
- // --- Finally block ---
- finally_block.Bind();
-
- // Push the state on the stack.
- frame_->EmitPush(r2);
-
- // We keep two elements on the stack - the (possibly faked) result
- // and the state - while evaluating the finally block.
- //
- // Generate code for the statements in the finally block.
- { VirtualFrame::RegisterAllocationScope scope(this);
- VisitStatements(node->finally_block()->statements());
- }
-
- if (has_valid_frame()) {
- // Restore state and return value or faked TOS.
- frame_->EmitPop(r2);
- frame_->EmitPop(r0);
- }
-
- // Generate code to jump to the right destination for all used
- // formerly shadowing targets. Deallocate each shadow target.
- for (int i = 0; i < shadows.length(); i++) {
- if (has_valid_frame() && shadows[i]->is_bound()) {
- JumpTarget* original = shadows[i]->other_target();
- __ cmp(r2, Operand(Smi::FromInt(JUMPING + i)));
- if (!function_return_is_shadowed_ && i == kReturnShadowIndex) {
- JumpTarget skip;
- skip.Branch(ne);
- frame_->PrepareForReturn();
- original->Jump();
- skip.Bind();
- } else {
- original->Branch(eq);
- }
- }
- }
-
- if (has_valid_frame()) {
- // Check if we need to rethrow the exception.
- JumpTarget exit;
- __ cmp(r2, Operand(Smi::FromInt(THROWING)));
- exit.Branch(ne);
-
- // Rethrow exception.
- frame_->EmitPush(r0);
- frame_->CallRuntime(Runtime::kReThrow, 1);
-
- // Done.
- exit.Bind();
- }
- ASSERT(!has_valid_frame() || frame_->height() == original_height);
-}
-
-
-void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ DebuggerStatament");
- CodeForStatementPosition(node);
-#ifdef ENABLE_DEBUGGER_SUPPORT
- frame_->DebugBreak();
-#endif
- // Ignore the return value.
- ASSERT(frame_->height() == original_height);
-}
-
-
-void CodeGenerator::InstantiateFunction(
- Handle<SharedFunctionInfo> function_info,
- bool pretenure) {
- // Use the fast case closure allocation code that allocates in new
- // space for nested functions that don't need literals cloning.
- if (!pretenure &&
- scope()->is_function_scope() &&
- function_info->num_literals() == 0) {
- FastNewClosureStub stub(
- function_info->strict_mode() ? kStrictMode : kNonStrictMode);
- frame_->EmitPush(Operand(function_info));
- frame_->SpillAll();
- frame_->CallStub(&stub, 1);
- frame_->EmitPush(r0);
- } else {
- // Create a new closure.
- frame_->EmitPush(cp);
- frame_->EmitPush(Operand(function_info));
- frame_->EmitPush(Operand(pretenure
- ? FACTORY->true_value()
- : FACTORY->false_value()));
- frame_->CallRuntime(Runtime::kNewClosure, 3);
- frame_->EmitPush(r0);
- }
-}
-
-
-void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ FunctionLiteral");
-
- // Build the function info and instantiate it.
- Handle<SharedFunctionInfo> function_info =
- Compiler::BuildFunctionInfo(node, script());
- if (function_info.is_null()) {
- SetStackOverflow();
- ASSERT(frame_->height() == original_height);
- return;
- }
- InstantiateFunction(function_info, node->pretenure());
- ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::VisitSharedFunctionInfoLiteral(
- SharedFunctionInfoLiteral* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ SharedFunctionInfoLiteral");
- InstantiateFunction(node->shared_function_info(), false);
- ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::VisitConditional(Conditional* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ Conditional");
- JumpTarget then;
- JumpTarget else_;
- LoadCondition(node->condition(), &then, &else_, true);
- if (has_valid_frame()) {
- Branch(false, &else_);
- }
- if (has_valid_frame() || then.is_linked()) {
- then.Bind();
- Load(node->then_expression());
- }
- if (else_.is_linked()) {
- JumpTarget exit;
- if (has_valid_frame()) exit.Jump();
- else_.Bind();
- Load(node->else_expression());
- if (exit.is_linked()) exit.Bind();
- }
- ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
- if (slot->type() == Slot::LOOKUP) {
- ASSERT(slot->var()->is_dynamic());
-
- // JumpTargets do not yet support merging frames so the frame must be
- // spilled when jumping to these targets.
- JumpTarget slow;
- JumpTarget done;
-
- // Generate fast case for loading from slots that correspond to
- // local/global variables or arguments unless they are shadowed by
- // eval-introduced bindings.
- EmitDynamicLoadFromSlotFastCase(slot,
- typeof_state,
- &slow,
- &done);
-
- slow.Bind();
- frame_->EmitPush(cp);
- frame_->EmitPush(Operand(slot->var()->name()));
-
- if (typeof_state == INSIDE_TYPEOF) {
- frame_->CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
- } else {
- frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
- }
-
- done.Bind();
- frame_->EmitPush(r0);
-
- } else {
- Register scratch = VirtualFrame::scratch0();
- TypeInfo info = type_info(slot);
- frame_->EmitPush(SlotOperand(slot, scratch), info);
-
- if (slot->var()->mode() == Variable::CONST) {
- // Const slots may contain 'the hole' value (the constant hasn't been
- // initialized yet) which needs to be converted into the 'undefined'
- // value.
- Comment cmnt(masm_, "[ Unhole const");
- Register tos = frame_->PopToRegister();
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(tos, ip);
- __ LoadRoot(tos, Heap::kUndefinedValueRootIndex, eq);
- frame_->EmitPush(tos);
- }
- }
-}
-
-
-void CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot,
- TypeofState state) {
- VirtualFrame::RegisterAllocationScope scope(this);
- LoadFromSlot(slot, state);
-
- // Bail out quickly if we're not using lazy arguments allocation.
- if (ArgumentsMode() != LAZY_ARGUMENTS_ALLOCATION) return;
-
- // ... or if the slot isn't a non-parameter arguments slot.
- if (slot->type() == Slot::PARAMETER || !slot->is_arguments()) return;
-
- // Load the loaded value from the stack into a register but leave it on the
- // stack.
- Register tos = frame_->Peek();
-
- // If the loaded value is the sentinel that indicates that we
- // haven't loaded the arguments object yet, we need to do it now.
- JumpTarget exit;
- __ LoadRoot(ip, Heap::kArgumentsMarkerRootIndex);
- __ cmp(tos, ip);
- exit.Branch(ne);
- frame_->Drop();
- StoreArgumentsObject(false);
- exit.Bind();
-}
-
-
-void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
- ASSERT(slot != NULL);
- VirtualFrame::RegisterAllocationScope scope(this);
- if (slot->type() == Slot::LOOKUP) {
- ASSERT(slot->var()->is_dynamic());
-
- // For now, just do a runtime call.
- frame_->EmitPush(cp);
- frame_->EmitPush(Operand(slot->var()->name()));
-
- if (init_state == CONST_INIT) {
- // Same as the case for a normal store, but ignores attribute
- // (e.g. READ_ONLY) of context slot so that we can initialize
- // const properties (introduced via eval("const foo = (some
- // expr);")). Also, uses the current function context instead of
- // the top context.
- //
- // Note that we must declare the foo upon entry of eval(), via a
- // context slot declaration, but we cannot initialize it at the
- // same time, because the const declaration may be at the end of
- // the eval code (sigh...) and the const variable may have been
- // used before (where its value is 'undefined'). Thus, we can only
- // do the initialization when we actually encounter the expression
- // and when the expression operands are defined and valid, and
- // thus we need the split into 2 operations: declaration of the
- // context slot followed by initialization.
- frame_->CallRuntime(Runtime::kInitializeConstContextSlot, 3);
- } else {
- frame_->EmitPush(Operand(Smi::FromInt(strict_mode_flag())));
- frame_->CallRuntime(Runtime::kStoreContextSlot, 4);
- }
- // Storing a variable must keep the (new) value on the expression
- // stack. This is necessary for compiling assignment expressions.
- frame_->EmitPush(r0);
-
- } else {
- ASSERT(!slot->var()->is_dynamic());
- Register scratch = VirtualFrame::scratch0();
- Register scratch2 = VirtualFrame::scratch1();
-
- // The frame must be spilled when branching to this target.
- JumpTarget exit;
-
- if (init_state == CONST_INIT) {
- ASSERT(slot->var()->mode() == Variable::CONST);
- // Only the first const initialization must be executed (the slot
- // still contains 'the hole' value). When the assignment is
- // executed, the code is identical to a normal store (see below).
- Comment cmnt(masm_, "[ Init const");
- __ ldr(scratch, SlotOperand(slot, scratch));
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(scratch, ip);
- exit.Branch(ne);
- }
-
- // We must execute the store. Storing a variable must keep the
- // (new) value on the stack. This is necessary for compiling
- // assignment expressions.
- //
- // Note: We will reach here even with slot->var()->mode() ==
- // Variable::CONST because of const declarations which will
- // initialize consts to 'the hole' value and by doing so, end up
- // calling this code. r2 may be loaded with context; used below in
- // RecordWrite.
- Register tos = frame_->Peek();
- __ str(tos, SlotOperand(slot, scratch));
- if (slot->type() == Slot::CONTEXT) {
- // Skip write barrier if the written value is a smi.
- __ tst(tos, Operand(kSmiTagMask));
- // We don't use tos any more after here.
- exit.Branch(eq);
- // scratch is loaded with context when calling SlotOperand above.
- int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
- // We need an extra register. Until we have a way to do that in the
- // virtual frame we will cheat and ask for a free TOS register.
- Register scratch3 = frame_->GetTOSRegister();
- __ RecordWrite(scratch, Operand(offset), scratch2, scratch3);
- }
- // If we definitely did not jump over the assignment, we do not need
- // to bind the exit label. Doing so can defeat peephole
- // optimization.
- if (init_state == CONST_INIT || slot->type() == Slot::CONTEXT) {
- exit.Bind();
- }
- }
-}
-
-
-void CodeGenerator::LoadFromGlobalSlotCheckExtensions(Slot* slot,
- TypeofState typeof_state,
- JumpTarget* slow) {
- // Check that no extension objects have been created by calls to
- // eval from the current scope to the global scope.
- Register tmp = frame_->scratch0();
- Register tmp2 = frame_->scratch1();
- Register context = cp;
- Scope* s = scope();
- while (s != NULL) {
- if (s->num_heap_slots() > 0) {
- if (s->calls_eval()) {
- frame_->SpillAll();
- // Check that extension is NULL.
- __ ldr(tmp2, ContextOperand(context, Context::EXTENSION_INDEX));
- __ tst(tmp2, tmp2);
- slow->Branch(ne);
- }
- // Load next context in chain.
- __ ldr(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
- __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset));
- context = tmp;
- }
- // If no outer scope calls eval, we do not need to check more
- // context extensions.
- if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break;
- s = s->outer_scope();
- }
-
- if (s->is_eval_scope()) {
- frame_->SpillAll();
- Label next, fast;
- __ Move(tmp, context);
- __ bind(&next);
- // Terminate at global context.
- __ ldr(tmp2, FieldMemOperand(tmp, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kGlobalContextMapRootIndex);
- __ cmp(tmp2, ip);
- __ b(eq, &fast);
- // Check that extension is NULL.
- __ ldr(tmp2, ContextOperand(tmp, Context::EXTENSION_INDEX));
- __ tst(tmp2, tmp2);
- slow->Branch(ne);
- // Load next context in chain.
- __ ldr(tmp, ContextOperand(tmp, Context::CLOSURE_INDEX));
- __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset));
- __ b(&next);
- __ bind(&fast);
- }
-
- // Load the global object.
- LoadGlobal();
- // Setup the name register and call load IC.
- frame_->CallLoadIC(slot->var()->name(),
- typeof_state == INSIDE_TYPEOF
- ? RelocInfo::CODE_TARGET
- : RelocInfo::CODE_TARGET_CONTEXT);
-}
-
-
-void CodeGenerator::EmitDynamicLoadFromSlotFastCase(Slot* slot,
- TypeofState typeof_state,
- JumpTarget* slow,
- JumpTarget* done) {
- // Generate fast-case code for variables that might be shadowed by
- // eval-introduced variables. Eval is used a lot without
- // introducing variables. In those cases, we do not want to
- // perform a runtime call for all variables in the scope
- // containing the eval.
- if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
- LoadFromGlobalSlotCheckExtensions(slot, typeof_state, slow);
- frame_->SpillAll();
- done->Jump();
-
- } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
- frame_->SpillAll();
- Slot* potential_slot = slot->var()->local_if_not_shadowed()->AsSlot();
- Expression* rewrite = slot->var()->local_if_not_shadowed()->rewrite();
- if (potential_slot != NULL) {
- // Generate fast case for locals that rewrite to slots.
- __ ldr(r0,
- ContextSlotOperandCheckExtensions(potential_slot,
- r1,
- r2,
- slow));
- if (potential_slot->var()->mode() == Variable::CONST) {
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(r0, ip);
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
- }
- done->Jump();
- } else if (rewrite != NULL) {
- // Generate fast case for argument loads.
- Property* property = rewrite->AsProperty();
- if (property != NULL) {
- VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
- Literal* key_literal = property->key()->AsLiteral();
- if (obj_proxy != NULL &&
- key_literal != NULL &&
- obj_proxy->IsArguments() &&
- key_literal->handle()->IsSmi()) {
- // Load arguments object if there are no eval-introduced
- // variables. Then load the argument from the arguments
- // object using keyed load.
- __ ldr(r0,
- ContextSlotOperandCheckExtensions(obj_proxy->var()->AsSlot(),
- r1,
- r2,
- slow));
- frame_->EmitPush(r0);
- __ mov(r1, Operand(key_literal->handle()));
- frame_->EmitPush(r1);
- EmitKeyedLoad();
- done->Jump();
- }
- }
- }
- }
-}
-
-
-void CodeGenerator::VisitSlot(Slot* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ Slot");
- LoadFromSlotCheckForArguments(node, NOT_INSIDE_TYPEOF);
- ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::VisitVariableProxy(VariableProxy* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ VariableProxy");
-
- Variable* var = node->var();
- Expression* expr = var->rewrite();
- if (expr != NULL) {
- Visit(expr);
- } else {
- ASSERT(var->is_global());
- Reference ref(this, node);
- ref.GetValue();
- }
- ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::VisitLiteral(Literal* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ Literal");
- Register reg = frame_->GetTOSRegister();
- bool is_smi = node->handle()->IsSmi();
- __ mov(reg, Operand(node->handle()));
- frame_->EmitPush(reg, is_smi ? TypeInfo::Smi() : TypeInfo::Unknown());
- ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ RexExp Literal");
-
- Register tmp = VirtualFrame::scratch0();
- // Free up a TOS register that can be used to push the literal.
- Register literal = frame_->GetTOSRegister();
-
- // Retrieve the literal array and check the allocated entry.
-
- // Load the function of this activation.
- __ ldr(tmp, frame_->Function());
-
- // Load the literals array of the function.
- __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kLiteralsOffset));
-
- // Load the literal at the ast saved index.
- int literal_offset =
- FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
- __ ldr(literal, FieldMemOperand(tmp, literal_offset));
-
- JumpTarget materialized;
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(literal, ip);
- // This branch locks the virtual frame at the done label to match the
- // one we have here, where the literal register is not on the stack and
- // nothing is spilled.
- materialized.Branch(ne);
-
- // If the entry is undefined we call the runtime system to compute
- // the literal.
- // literal array (0)
- frame_->EmitPush(tmp);
- // literal index (1)
- frame_->EmitPush(Operand(Smi::FromInt(node->literal_index())));
- // RegExp pattern (2)
- frame_->EmitPush(Operand(node->pattern()));
- // RegExp flags (3)
- frame_->EmitPush(Operand(node->flags()));
- frame_->CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
- __ Move(literal, r0);
-
- materialized.Bind();
-
- frame_->EmitPush(literal);
- int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
- frame_->EmitPush(Operand(Smi::FromInt(size)));
- frame_->CallRuntime(Runtime::kAllocateInNewSpace, 1);
- // TODO(lrn): Use AllocateInNewSpace macro with fallback to runtime.
- // r0 is newly allocated space.
-
- // Reuse literal variable with (possibly) a new register, still holding
- // the materialized boilerplate.
- literal = frame_->PopToRegister(r0);
-
- __ CopyFields(r0, literal, tmp.bit(), size / kPointerSize);
-
- // Push the clone.
- frame_->EmitPush(r0);
- ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ ObjectLiteral");
-
- Register literal = frame_->GetTOSRegister();
- // Load the function of this activation.
- __ ldr(literal, frame_->Function());
- // Literal array.
- __ ldr(literal, FieldMemOperand(literal, JSFunction::kLiteralsOffset));
- frame_->EmitPush(literal);
- // Literal index.
- frame_->EmitPush(Operand(Smi::FromInt(node->literal_index())));
- // Constant properties.
- frame_->EmitPush(Operand(node->constant_properties()));
- // Should the object literal have fast elements?
- frame_->EmitPush(Operand(Smi::FromInt(node->fast_elements() ? 1 : 0)));
- if (node->depth() > 1) {
- frame_->CallRuntime(Runtime::kCreateObjectLiteral, 4);
- } else {
- frame_->CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
- }
- frame_->EmitPush(r0); // save the result
-
- // Mark all computed expressions that are bound to a key that
- // is shadowed by a later occurrence of the same key. For the
- // marked expressions, no store code is emitted.
- node->CalculateEmitStore();
-
- for (int i = 0; i < node->properties()->length(); i++) {
- // At the start of each iteration, the top of stack contains
- // the newly created object literal.
- ObjectLiteral::Property* property = node->properties()->at(i);
- Literal* key = property->key();
- Expression* value = property->value();
- switch (property->kind()) {
- case ObjectLiteral::Property::CONSTANT:
- break;
- case ObjectLiteral::Property::MATERIALIZED_LITERAL:
- if (CompileTimeValue::IsCompileTimeValue(property->value())) break;
- // else fall through
- case ObjectLiteral::Property::COMPUTED:
- if (key->handle()->IsSymbol()) {
- Handle<Code> ic(Isolate::Current()->builtins()->builtin(
- Builtins::kStoreIC_Initialize));
- Load(value);
- if (property->emit_store()) {
- frame_->PopToR0();
- // Fetch the object literal.
- frame_->SpillAllButCopyTOSToR1();
- __ mov(r2, Operand(key->handle()));
- frame_->CallCodeObject(ic, RelocInfo::CODE_TARGET, 0);
- } else {
- frame_->Drop();
- }
- break;
- }
- // else fall through
- case ObjectLiteral::Property::PROTOTYPE: {
- frame_->Dup();
- Load(key);
- Load(value);
- if (property->emit_store()) {
- frame_->EmitPush(Operand(Smi::FromInt(NONE))); // PropertyAttributes
- frame_->CallRuntime(Runtime::kSetProperty, 4);
- } else {
- frame_->Drop(3);
- }
- break;
- }
- case ObjectLiteral::Property::SETTER: {
- frame_->Dup();
- Load(key);
- frame_->EmitPush(Operand(Smi::FromInt(1)));
- Load(value);
- frame_->CallRuntime(Runtime::kDefineAccessor, 4);
- break;
- }
- case ObjectLiteral::Property::GETTER: {
- frame_->Dup();
- Load(key);
- frame_->EmitPush(Operand(Smi::FromInt(0)));
- Load(value);
- frame_->CallRuntime(Runtime::kDefineAccessor, 4);
- break;
- }
- }
- }
- ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ ArrayLiteral");
-
- Register tos = frame_->GetTOSRegister();
- // Load the function of this activation.
- __ ldr(tos, frame_->Function());
- // Load the literals array of the function.
- __ ldr(tos, FieldMemOperand(tos, JSFunction::kLiteralsOffset));
- frame_->EmitPush(tos);
- frame_->EmitPush(Operand(Smi::FromInt(node->literal_index())));
- frame_->EmitPush(Operand(node->constant_elements()));
- int length = node->values()->length();
- if (node->constant_elements()->map() == HEAP->fixed_cow_array_map()) {
- FastCloneShallowArrayStub stub(
- FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length);
- frame_->CallStub(&stub, 3);
- __ IncrementCounter(masm_->isolate()->counters()->cow_arrays_created_stub(),
- 1, r1, r2);
- } else if (node->depth() > 1) {
- frame_->CallRuntime(Runtime::kCreateArrayLiteral, 3);
- } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
- frame_->CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
- } else {
- FastCloneShallowArrayStub stub(
- FastCloneShallowArrayStub::CLONE_ELEMENTS, length);
- frame_->CallStub(&stub, 3);
- }
- frame_->EmitPush(r0); // save the result
- // r0: created object literal
-
- // Generate code to set the elements in the array that are not
- // literals.
- for (int i = 0; i < node->values()->length(); i++) {
- Expression* value = node->values()->at(i);
-
- // If value is a literal the property value is already set in the
- // boilerplate object.
- if (value->AsLiteral() != NULL) continue;
- // If value is a materialized literal the property value is already set
- // in the boilerplate object if it is simple.
- if (CompileTimeValue::IsCompileTimeValue(value)) continue;
-
- // The property must be set by generated code.
- Load(value);
- frame_->PopToR0();
- // Fetch the object literal.
- frame_->SpillAllButCopyTOSToR1();
-
- // Get the elements array.
- __ ldr(r1, FieldMemOperand(r1, JSObject::kElementsOffset));
-
- // Write to the indexed properties array.
- int offset = i * kPointerSize + FixedArray::kHeaderSize;
- __ str(r0, FieldMemOperand(r1, offset));
-
- // Update the write barrier for the array address.
- __ RecordWrite(r1, Operand(offset), r3, r2);
- }
- ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- // Call runtime routine to allocate the catch extension object and
- // assign the exception value to the catch variable.
- Comment cmnt(masm_, "[ CatchExtensionObject");
- Load(node->key());
- Load(node->value());
- frame_->CallRuntime(Runtime::kCreateCatchExtensionObject, 2);
- frame_->EmitPush(r0);
- ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::EmitSlotAssignment(Assignment* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm(), "[ Variable Assignment");
- Variable* var = node->target()->AsVariableProxy()->AsVariable();
- ASSERT(var != NULL);
- Slot* slot = var->AsSlot();
- ASSERT(slot != NULL);
-
- // Evaluate the right-hand side.
- if (node->is_compound()) {
- // For a compound assignment the right-hand side is a binary operation
- // between the current property value and the actual right-hand side.
- LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
-
- // Perform the binary operation.
- Literal* literal = node->value()->AsLiteral();
- bool overwrite_value = node->value()->ResultOverwriteAllowed();
- if (literal != NULL && literal->handle()->IsSmi()) {
- SmiOperation(node->binary_op(),
- literal->handle(),
- false,
- overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
- } else {
- GenerateInlineSmi inline_smi =
- loop_nesting() > 0 ? GENERATE_INLINE_SMI : DONT_GENERATE_INLINE_SMI;
- if (literal != NULL) {
- ASSERT(!literal->handle()->IsSmi());
- inline_smi = DONT_GENERATE_INLINE_SMI;
- }
- Load(node->value());
- GenericBinaryOperation(node->binary_op(),
- overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE,
- inline_smi);
- }
- } else {
- Load(node->value());
- }
-
- // Perform the assignment.
- if (var->mode() != Variable::CONST || node->op() == Token::INIT_CONST) {
- CodeForSourcePosition(node->position());
- StoreToSlot(slot,
- node->op() == Token::INIT_CONST ? CONST_INIT : NOT_CONST_INIT);
- }
- ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::EmitNamedPropertyAssignment(Assignment* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm(), "[ Named Property Assignment");
- Variable* var = node->target()->AsVariableProxy()->AsVariable();
- Property* prop = node->target()->AsProperty();
- ASSERT(var == NULL || (prop == NULL && var->is_global()));
-
- // Initialize name and evaluate the receiver sub-expression if necessary. If
- // the receiver is trivial it is not placed on the stack at this point, but
- // loaded whenever actually needed.
- Handle<String> name;
- bool is_trivial_receiver = false;
- if (var != NULL) {
- name = var->name();
- } else {
- Literal* lit = prop->key()->AsLiteral();
- ASSERT_NOT_NULL(lit);
- name = Handle<String>::cast(lit->handle());
- // Do not materialize the receiver on the frame if it is trivial.
- is_trivial_receiver = prop->obj()->IsTrivial();
- if (!is_trivial_receiver) Load(prop->obj());
- }
-
- // Change to slow case in the beginning of an initialization block to
- // avoid the quadratic behavior of repeatedly adding fast properties.
- if (node->starts_initialization_block()) {
- // Initialization block consists of assignments of the form expr.x = ..., so
- // this will never be an assignment to a variable, so there must be a
- // receiver object.
- ASSERT_EQ(NULL, var);
- if (is_trivial_receiver) {
- Load(prop->obj());
- } else {
- frame_->Dup();
- }
- frame_->CallRuntime(Runtime::kToSlowProperties, 1);
- }
-
- // Change to fast case at the end of an initialization block. To prepare for
- // that add an extra copy of the receiver to the frame, so that it can be
- // converted back to fast case after the assignment.
- if (node->ends_initialization_block() && !is_trivial_receiver) {
- frame_->Dup();
- }
-
- // Stack layout:
- // [tos] : receiver (only materialized if non-trivial)
- // [tos+1] : receiver if at the end of an initialization block
-
- // Evaluate the right-hand side.
- if (node->is_compound()) {
- // For a compound assignment the right-hand side is a binary operation
- // between the current property value and the actual right-hand side.
- if (is_trivial_receiver) {
- Load(prop->obj());
- } else if (var != NULL) {
- LoadGlobal();
- } else {
- frame_->Dup();
- }
- EmitNamedLoad(name, var != NULL);
-
- // Perform the binary operation.
- Literal* literal = node->value()->AsLiteral();
- bool overwrite_value = node->value()->ResultOverwriteAllowed();
- if (literal != NULL && literal->handle()->IsSmi()) {
- SmiOperation(node->binary_op(),
- literal->handle(),
- false,
- overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
- } else {
- GenerateInlineSmi inline_smi =
- loop_nesting() > 0 ? GENERATE_INLINE_SMI : DONT_GENERATE_INLINE_SMI;
- if (literal != NULL) {
- ASSERT(!literal->handle()->IsSmi());
- inline_smi = DONT_GENERATE_INLINE_SMI;
- }
- Load(node->value());
- GenericBinaryOperation(node->binary_op(),
- overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE,
- inline_smi);
- }
- } else {
- // For non-compound assignment just load the right-hand side.
- Load(node->value());
- }
-
- // Stack layout:
- // [tos] : value
- // [tos+1] : receiver (only materialized if non-trivial)
- // [tos+2] : receiver if at the end of an initialization block
-
- // Perform the assignment. It is safe to ignore constants here.
- ASSERT(var == NULL || var->mode() != Variable::CONST);
- ASSERT_NE(Token::INIT_CONST, node->op());
- if (is_trivial_receiver) {
- // Load the receiver and swap with the value.
- Load(prop->obj());
- Register t0 = frame_->PopToRegister();
- Register t1 = frame_->PopToRegister(t0);
- frame_->EmitPush(t0);
- frame_->EmitPush(t1);
- }
- CodeForSourcePosition(node->position());
- bool is_contextual = (var != NULL);
- EmitNamedStore(name, is_contextual);
- frame_->EmitPush(r0);
-
- // Change to fast case at the end of an initialization block.
- if (node->ends_initialization_block()) {
- ASSERT_EQ(NULL, var);
- // The argument to the runtime call is the receiver.
- if (is_trivial_receiver) {
- Load(prop->obj());
- } else {
- // A copy of the receiver is below the value of the assignment. Swap
- // the receiver and the value of the assignment expression.
- Register t0 = frame_->PopToRegister();
- Register t1 = frame_->PopToRegister(t0);
- frame_->EmitPush(t0);
- frame_->EmitPush(t1);
- }
- frame_->CallRuntime(Runtime::kToFastProperties, 1);
- }
-
- // Stack layout:
- // [tos] : result
-
- ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::EmitKeyedPropertyAssignment(Assignment* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ Keyed Property Assignment");
- Property* prop = node->target()->AsProperty();
- ASSERT_NOT_NULL(prop);
-
- // Evaluate the receiver subexpression.
- Load(prop->obj());
-
- WriteBarrierCharacter wb_info;
-
- // Change to slow case in the beginning of an initialization block to
- // avoid the quadratic behavior of repeatedly adding fast properties.
- if (node->starts_initialization_block()) {
- frame_->Dup();
- frame_->CallRuntime(Runtime::kToSlowProperties, 1);
- }
-
- // Change to fast case at the end of an initialization block. To prepare for
- // that add an extra copy of the receiver to the frame, so that it can be
- // converted back to fast case after the assignment.
- if (node->ends_initialization_block()) {
- frame_->Dup();
- }
-
- // Evaluate the key subexpression.
- Load(prop->key());
-
- // Stack layout:
- // [tos] : key
- // [tos+1] : receiver
- // [tos+2] : receiver if at the end of an initialization block
- //
- // Evaluate the right-hand side.
- if (node->is_compound()) {
- // For a compound assignment the right-hand side is a binary operation
- // between the current property value and the actual right-hand side.
- // Duplicate receiver and key for loading the current property value.
- frame_->Dup2();
- EmitKeyedLoad();
- frame_->EmitPush(r0);
-
- // Perform the binary operation.
- Literal* literal = node->value()->AsLiteral();
- bool overwrite_value = node->value()->ResultOverwriteAllowed();
- if (literal != NULL && literal->handle()->IsSmi()) {
- SmiOperation(node->binary_op(),
- literal->handle(),
- false,
- overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
- } else {
- GenerateInlineSmi inline_smi =
- loop_nesting() > 0 ? GENERATE_INLINE_SMI : DONT_GENERATE_INLINE_SMI;
- if (literal != NULL) {
- ASSERT(!literal->handle()->IsSmi());
- inline_smi = DONT_GENERATE_INLINE_SMI;
- }
- Load(node->value());
- GenericBinaryOperation(node->binary_op(),
- overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE,
- inline_smi);
- }
- wb_info = node->type()->IsLikelySmi() ? LIKELY_SMI : UNLIKELY_SMI;
- } else {
- // For non-compound assignment just load the right-hand side.
- Load(node->value());
- wb_info = node->value()->AsLiteral() != NULL ?
- NEVER_NEWSPACE :
- (node->value()->type()->IsLikelySmi() ? LIKELY_SMI : UNLIKELY_SMI);
- }
-
- // Stack layout:
- // [tos] : value
- // [tos+1] : key
- // [tos+2] : receiver
- // [tos+3] : receiver if at the end of an initialization block
-
- // Perform the assignment. It is safe to ignore constants here.
- ASSERT(node->op() != Token::INIT_CONST);
- CodeForSourcePosition(node->position());
- EmitKeyedStore(prop->key()->type(), wb_info);
- frame_->EmitPush(r0);
-
- // Stack layout:
- // [tos] : result
- // [tos+1] : receiver if at the end of an initialization block
-
- // Change to fast case at the end of an initialization block.
- if (node->ends_initialization_block()) {
- // The argument to the runtime call is the extra copy of the receiver,
- // which is below the value of the assignment. Swap the receiver and
- // the value of the assignment expression.
- Register t0 = frame_->PopToRegister();
- Register t1 = frame_->PopToRegister(t0);
- frame_->EmitPush(t1);
- frame_->EmitPush(t0);
- frame_->CallRuntime(Runtime::kToFastProperties, 1);
- }
-
- // Stack layout:
- // [tos] : result
-
- ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::VisitAssignment(Assignment* node) {
- VirtualFrame::RegisterAllocationScope scope(this);
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ Assignment");
-
- Variable* var = node->target()->AsVariableProxy()->AsVariable();
- Property* prop = node->target()->AsProperty();
-
- if (var != NULL && !var->is_global()) {
- EmitSlotAssignment(node);
-
- } else if ((prop != NULL && prop->key()->IsPropertyName()) ||
- (var != NULL && var->is_global())) {
- // Properties whose keys are property names and global variables are
- // treated as named property references. We do not need to consider
- // global 'this' because it is not a valid left-hand side.
- EmitNamedPropertyAssignment(node);
-
- } else if (prop != NULL) {
- // Other properties (including rewritten parameters for a function that
- // uses arguments) are keyed property assignments.
- EmitKeyedPropertyAssignment(node);
-
- } else {
- // Invalid left-hand side.
- Load(node->target());
- frame_->CallRuntime(Runtime::kThrowReferenceError, 1);
- // The runtime call doesn't actually return but the code generator will
- // still generate code and expects a certain frame height.
- frame_->EmitPush(r0);
- }
- ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::VisitThrow(Throw* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ Throw");
-
- Load(node->exception());
- CodeForSourcePosition(node->position());
- frame_->CallRuntime(Runtime::kThrow, 1);
- frame_->EmitPush(r0);
- ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::VisitProperty(Property* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ Property");
-
- { Reference property(this, node);
- property.GetValue();
- }
- ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::VisitCall(Call* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ Call");
-
- Expression* function = node->expression();
- ZoneList<Expression*>* args = node->arguments();
-
- // Standard function call.
- // Check if the function is a variable or a property.
- Variable* var = function->AsVariableProxy()->AsVariable();
- Property* property = function->AsProperty();
-
- // ------------------------------------------------------------------------
- // Fast-case: Use inline caching.
- // ---
- // According to ECMA-262, section 11.2.3, page 44, the function to call
- // must be resolved after the arguments have been evaluated. The IC code
- // automatically handles this by loading the arguments before the function
- // is resolved in cache misses (this also holds for megamorphic calls).
- // ------------------------------------------------------------------------
-
- if (var != NULL && var->is_possibly_eval()) {
- // ----------------------------------
- // JavaScript example: 'eval(arg)' // eval is not known to be shadowed
- // ----------------------------------
-
- // In a call to eval, we first call %ResolvePossiblyDirectEval to
- // resolve the function we need to call and the receiver of the
- // call. Then we call the resolved function using the given
- // arguments.
-
- // Prepare stack for call to resolved function.
- Load(function);
-
- // Allocate a frame slot for the receiver.
- frame_->EmitPushRoot(Heap::kUndefinedValueRootIndex);
-
- // Load the arguments.
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Load(args->at(i));
- }
-
- VirtualFrame::SpilledScope spilled_scope(frame_);
-
- // If we know that eval can only be shadowed by eval-introduced
- // variables we attempt to load the global eval function directly
- // in generated code. If we succeed, there is no need to perform a
- // context lookup in the runtime system.
- JumpTarget done;
- if (var->AsSlot() != NULL && var->mode() == Variable::DYNAMIC_GLOBAL) {
- ASSERT(var->AsSlot()->type() == Slot::LOOKUP);
- JumpTarget slow;
- // Prepare the stack for the call to
- // ResolvePossiblyDirectEvalNoLookup by pushing the loaded
- // function, the first argument to the eval call and the
- // receiver.
- LoadFromGlobalSlotCheckExtensions(var->AsSlot(),
- NOT_INSIDE_TYPEOF,
- &slow);
- frame_->EmitPush(r0);
- if (arg_count > 0) {
- __ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
- frame_->EmitPush(r1);
- } else {
- frame_->EmitPush(r2);
- }
- __ ldr(r1, frame_->Receiver());
- frame_->EmitPush(r1);
-
- // Push the strict mode flag.
- frame_->EmitPush(Operand(Smi::FromInt(strict_mode_flag())));
-
- frame_->CallRuntime(Runtime::kResolvePossiblyDirectEvalNoLookup, 4);
-
- done.Jump();
- slow.Bind();
- }
-
- // Prepare the stack for the call to ResolvePossiblyDirectEval by
- // pushing the loaded function, the first argument to the eval
- // call and the receiver.
- __ ldr(r1, MemOperand(sp, arg_count * kPointerSize + kPointerSize));
- frame_->EmitPush(r1);
- if (arg_count > 0) {
- __ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
- frame_->EmitPush(r1);
- } else {
- frame_->EmitPush(r2);
- }
- __ ldr(r1, frame_->Receiver());
- frame_->EmitPush(r1);
-
- // Push the strict mode flag.
- frame_->EmitPush(Operand(Smi::FromInt(strict_mode_flag())));
-
- // Resolve the call.
- frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 4);
-
- // If we generated fast-case code bind the jump-target where fast
- // and slow case merge.
- if (done.is_linked()) done.Bind();
-
- // Touch up stack with the right values for the function and the receiver.
- __ str(r0, MemOperand(sp, (arg_count + 1) * kPointerSize));
- __ str(r1, MemOperand(sp, arg_count * kPointerSize));
-
- // Call the function.
- CodeForSourcePosition(node->position());
-
- InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
- CallFunctionStub call_function(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
- frame_->CallStub(&call_function, arg_count + 1);
-
- __ ldr(cp, frame_->Context());
- // Remove the function from the stack.
- frame_->Drop();
- frame_->EmitPush(r0);
-
- } else if (var != NULL && !var->is_this() && var->is_global()) {
- // ----------------------------------
- // JavaScript example: 'foo(1, 2, 3)' // foo is global
- // ----------------------------------
- // Pass the global object as the receiver and let the IC stub
- // patch the stack to use the global proxy as 'this' in the
- // invoked function.
- LoadGlobal();
-
- // Load the arguments.
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Load(args->at(i));
- }
-
- VirtualFrame::SpilledScope spilled_scope(frame_);
- // Setup the name register and call the IC initialization code.
- __ mov(r2, Operand(var->name()));
- InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
- Handle<Code> stub =
- ISOLATE->stub_cache()->ComputeCallInitialize(arg_count, in_loop);
- CodeForSourcePosition(node->position());
- frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET_CONTEXT,
- arg_count + 1);
- __ ldr(cp, frame_->Context());
- frame_->EmitPush(r0);
-
- } else if (var != NULL && var->AsSlot() != NULL &&
- var->AsSlot()->type() == Slot::LOOKUP) {
- // ----------------------------------
- // JavaScript examples:
- //
- // with (obj) foo(1, 2, 3) // foo may be in obj.
- //
- // function f() {};
- // function g() {
- // eval(...);
- // f(); // f could be in extension object.
- // }
- // ----------------------------------
-
- JumpTarget slow, done;
-
- // Generate fast case for loading functions from slots that
- // correspond to local/global variables or arguments unless they
- // are shadowed by eval-introduced bindings.
- EmitDynamicLoadFromSlotFastCase(var->AsSlot(),
- NOT_INSIDE_TYPEOF,
- &slow,
- &done);
-
- slow.Bind();
- // Load the function
- frame_->EmitPush(cp);
- frame_->EmitPush(Operand(var->name()));
- frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
- // r0: slot value; r1: receiver
-
- // Load the receiver.
- frame_->EmitPush(r0); // function
- frame_->EmitPush(r1); // receiver
-
- // If fast case code has been generated, emit code to push the
- // function and receiver and have the slow path jump around this
- // code.
- if (done.is_linked()) {
- JumpTarget call;
- call.Jump();
- done.Bind();
- frame_->EmitPush(r0); // function
- LoadGlobalReceiver(VirtualFrame::scratch0()); // receiver
- call.Bind();
- }
-
- // Call the function. At this point, everything is spilled but the
- // function and receiver are in r0 and r1.
- CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
- frame_->EmitPush(r0);
-
- } else if (property != NULL) {
- // Check if the key is a literal string.
- Literal* literal = property->key()->AsLiteral();
-
- if (literal != NULL && literal->handle()->IsSymbol()) {
- // ------------------------------------------------------------------
- // JavaScript example: 'object.foo(1, 2, 3)' or 'map["key"](1, 2, 3)'
- // ------------------------------------------------------------------
-
- Handle<String> name = Handle<String>::cast(literal->handle());
-
- if (ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION &&
- name->IsEqualTo(CStrVector("apply")) &&
- args->length() == 2 &&
- args->at(1)->AsVariableProxy() != NULL &&
- args->at(1)->AsVariableProxy()->IsArguments()) {
- // Use the optimized Function.prototype.apply that avoids
- // allocating lazily allocated arguments objects.
- CallApplyLazy(property->obj(),
- args->at(0),
- args->at(1)->AsVariableProxy(),
- node->position());
-
- } else {
- Load(property->obj()); // Receiver.
- // Load the arguments.
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Load(args->at(i));
- }
-
- VirtualFrame::SpilledScope spilled_scope(frame_);
- // Set the name register and call the IC initialization code.
- __ mov(r2, Operand(name));
- InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
- Handle<Code> stub =
- ISOLATE->stub_cache()->ComputeCallInitialize(arg_count, in_loop);
- CodeForSourcePosition(node->position());
- frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET, arg_count + 1);
- __ ldr(cp, frame_->Context());
- frame_->EmitPush(r0);
- }
-
- } else {
- // -------------------------------------------
- // JavaScript example: 'array[index](1, 2, 3)'
- // -------------------------------------------
-
- // Load the receiver and name of the function.
- Load(property->obj());
- Load(property->key());
-
- if (property->is_synthetic()) {
- EmitKeyedLoad();
- // Put the function below the receiver.
- // Use the global receiver.
- frame_->EmitPush(r0); // Function.
- LoadGlobalReceiver(VirtualFrame::scratch0());
- // Call the function.
- CallWithArguments(args, RECEIVER_MIGHT_BE_VALUE, node->position());
- frame_->EmitPush(r0);
- } else {
- // Swap the name of the function and the receiver on the stack to follow
- // the calling convention for call ICs.
- Register key = frame_->PopToRegister();
- Register receiver = frame_->PopToRegister(key);
- frame_->EmitPush(key);
- frame_->EmitPush(receiver);
-
- // Load the arguments.
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Load(args->at(i));
- }
-
- // Load the key into r2 and call the IC initialization code.
- InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
- Handle<Code> stub =
- ISOLATE->stub_cache()->ComputeKeyedCallInitialize(arg_count,
- in_loop);
- CodeForSourcePosition(node->position());
- frame_->SpillAll();
- __ ldr(r2, frame_->ElementAt(arg_count + 1));
- frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET, arg_count + 1);
- frame_->Drop(); // Drop the key still on the stack.
- __ ldr(cp, frame_->Context());
- frame_->EmitPush(r0);
- }
- }
-
- } else {
- // ----------------------------------
- // JavaScript example: 'foo(1, 2, 3)' // foo is not global
- // ----------------------------------
-
- // Load the function.
- Load(function);
-
- // Pass the global proxy as the receiver.
- LoadGlobalReceiver(VirtualFrame::scratch0());
-
- // Call the function.
- CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
- frame_->EmitPush(r0);
- }
- ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::VisitCallNew(CallNew* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ CallNew");
-
- // According to ECMA-262, section 11.2.2, page 44, the function
- // expression in new calls must be evaluated before the
- // arguments. This is different from ordinary calls, where the
- // actual function to call is resolved after the arguments have been
- // evaluated.
-
- // Push constructor on the stack. If it's not a function it's used as
- // receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
- // ignored.
- Load(node->expression());
-
- // Push the arguments ("left-to-right") on the stack.
- ZoneList<Expression*>* args = node->arguments();
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Load(args->at(i));
- }
-
- // Spill everything from here to simplify the implementation.
- VirtualFrame::SpilledScope spilled_scope(frame_);
-
- // Load the argument count into r0 and the function into r1 as per
- // calling convention.
- __ mov(r0, Operand(arg_count));
- __ ldr(r1, frame_->ElementAt(arg_count));
-
- // Call the construct call builtin that handles allocation and
- // constructor invocation.
- CodeForSourcePosition(node->position());
- Handle<Code> ic(Isolate::Current()->builtins()->builtin(
- Builtins::kJSConstructCall));
- frame_->CallCodeObject(ic, RelocInfo::CONSTRUCT_CALL, arg_count + 1);
- frame_->EmitPush(r0);
-
- ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
- Register scratch = VirtualFrame::scratch0();
- JumpTarget null, function, leave, non_function_constructor;
-
- // Load the object into register.
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Register tos = frame_->PopToRegister();
-
- // If the object is a smi, we return null.
- __ tst(tos, Operand(kSmiTagMask));
- null.Branch(eq);
-
- // Check that the object is a JS object but take special care of JS
- // functions to make sure they have 'Function' as their class.
- __ CompareObjectType(tos, tos, scratch, FIRST_JS_OBJECT_TYPE);
- null.Branch(lt);
-
- // As long as JS_FUNCTION_TYPE is the last instance type and it is
- // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
- // LAST_JS_OBJECT_TYPE.
- STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
- STATIC_ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
- __ cmp(scratch, Operand(JS_FUNCTION_TYPE));
- function.Branch(eq);
-
- // Check if the constructor in the map is a function.
- __ ldr(tos, FieldMemOperand(tos, Map::kConstructorOffset));
- __ CompareObjectType(tos, scratch, scratch, JS_FUNCTION_TYPE);
- non_function_constructor.Branch(ne);
-
- // The tos register now contains the constructor function. Grab the
- // instance class name from there.
- __ ldr(tos, FieldMemOperand(tos, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(tos,
- FieldMemOperand(tos, SharedFunctionInfo::kInstanceClassNameOffset));
- frame_->EmitPush(tos);
- leave.Jump();
-
- // Functions have class 'Function'.
- function.Bind();
- __ mov(tos, Operand(FACTORY->function_class_symbol()));
- frame_->EmitPush(tos);
- leave.Jump();
-
- // Objects with a non-function constructor have class 'Object'.
- non_function_constructor.Bind();
- __ mov(tos, Operand(FACTORY->Object_symbol()));
- frame_->EmitPush(tos);
- leave.Jump();
-
- // Non-JS objects have class null.
- null.Bind();
- __ LoadRoot(tos, Heap::kNullValueRootIndex);
- frame_->EmitPush(tos);
-
- // All done.
- leave.Bind();
-}
-
-
-void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
- Register scratch = VirtualFrame::scratch0();
- JumpTarget leave;
-
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Register tos = frame_->PopToRegister(); // tos contains object.
- // if (object->IsSmi()) return the object.
- __ tst(tos, Operand(kSmiTagMask));
- leave.Branch(eq);
- // It is a heap object - get map. If (!object->IsJSValue()) return the object.
- __ CompareObjectType(tos, scratch, scratch, JS_VALUE_TYPE);
- leave.Branch(ne);
- // Load the value.
- __ ldr(tos, FieldMemOperand(tos, JSValue::kValueOffset));
- leave.Bind();
- frame_->EmitPush(tos);
-}
-
-
-void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
- Register scratch1 = VirtualFrame::scratch0();
- Register scratch2 = VirtualFrame::scratch1();
- JumpTarget leave;
-
- ASSERT(args->length() == 2);
- Load(args->at(0)); // Load the object.
- Load(args->at(1)); // Load the value.
- Register value = frame_->PopToRegister();
- Register object = frame_->PopToRegister(value);
- // if (object->IsSmi()) return object.
- __ tst(object, Operand(kSmiTagMask));
- leave.Branch(eq);
- // It is a heap object - get map. If (!object->IsJSValue()) return the object.
- __ CompareObjectType(object, scratch1, scratch1, JS_VALUE_TYPE);
- leave.Branch(ne);
- // Store the value.
- __ str(value, FieldMemOperand(object, JSValue::kValueOffset));
- // Update the write barrier.
- __ RecordWrite(object,
- Operand(JSValue::kValueOffset - kHeapObjectTag),
- scratch1,
- scratch2);
- // Leave.
- leave.Bind();
- frame_->EmitPush(value);
-}
-
-
-void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Register reg = frame_->PopToRegister();
- __ tst(reg, Operand(kSmiTagMask));
- cc_reg_ = eq;
-}
-
-
-void CodeGenerator::GenerateLog(ZoneList<Expression*>* args) {
- // See comment in CodeGenerator::GenerateLog in codegen-ia32.cc.
- ASSERT_EQ(args->length(), 3);
-#ifdef ENABLE_LOGGING_AND_PROFILING
- if (ShouldGenerateLog(args->at(0))) {
- Load(args->at(1));
- Load(args->at(2));
- frame_->CallRuntime(Runtime::kLog, 2);
- }
-#endif
- frame_->EmitPushRoot(Heap::kUndefinedValueRootIndex);
-}
-
-
-void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Register reg = frame_->PopToRegister();
- __ tst(reg, Operand(kSmiTagMask | 0x80000000u));
- cc_reg_ = eq;
-}
-
-
-// Generates the Math.pow method.
-void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 2);
- Load(args->at(0));
- Load(args->at(1));
-
- if (!CpuFeatures::IsSupported(VFP3)) {
- frame_->CallRuntime(Runtime::kMath_pow, 2);
- frame_->EmitPush(r0);
- } else {
- CpuFeatures::Scope scope(VFP3);
- JumpTarget runtime, done;
- Label exponent_nonsmi, base_nonsmi, powi, not_minus_half, allocate_return;
-
- Register scratch1 = VirtualFrame::scratch0();
- Register scratch2 = VirtualFrame::scratch1();
-
- // Get base and exponent to registers.
- Register exponent = frame_->PopToRegister();
- Register base = frame_->PopToRegister(exponent);
- Register heap_number_map = no_reg;
-
- // Set the frame for the runtime jump target. The code below jumps to the
- // jump target label so the frame needs to be established before that.
- ASSERT(runtime.entry_frame() == NULL);
- runtime.set_entry_frame(frame_);
-
- __ JumpIfNotSmi(exponent, &exponent_nonsmi);
- __ JumpIfNotSmi(base, &base_nonsmi);
-
- heap_number_map = r6;
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-
- // Exponent is a smi and base is a smi. Get the smi value into vfp register
- // d1.
- __ SmiToDoubleVFPRegister(base, d1, scratch1, s0);
- __ b(&powi);
-
- __ bind(&base_nonsmi);
- // Exponent is smi and base is non smi. Get the double value from the base
- // into vfp register d1.
- __ ObjectToDoubleVFPRegister(base, d1,
- scratch1, scratch2, heap_number_map, s0,
- runtime.entry_label());
-
- __ bind(&powi);
-
- // Load 1.0 into d0.
- __ vmov(d0, 1.0);
-
- // Get the absolute untagged value of the exponent and use that for the
- // calculation.
- __ mov(scratch1, Operand(exponent, ASR, kSmiTagSize), SetCC);
- // Negate if negative.
- __ rsb(scratch1, scratch1, Operand(0, RelocInfo::NONE), LeaveCC, mi);
- __ vmov(d2, d0, mi); // 1.0 needed in d2 later if exponent is negative.
-
- // Run through all the bits in the exponent. The result is calculated in d0
- // and d1 holds base^(bit^2).
- Label more_bits;
- __ bind(&more_bits);
- __ mov(scratch1, Operand(scratch1, LSR, 1), SetCC);
- __ vmul(d0, d0, d1, cs); // Multiply with base^(bit^2) if bit is set.
- __ vmul(d1, d1, d1, ne); // Don't bother calculating next d1 if done.
- __ b(ne, &more_bits);
-
- // If exponent is positive we are done.
- __ cmp(exponent, Operand(0, RelocInfo::NONE));
- __ b(ge, &allocate_return);
-
- // If exponent is negative result is 1/result (d2 already holds 1.0 in that
- // case). However if d0 has reached infinity this will not provide the
- // correct result, so call runtime if that is the case.
- __ mov(scratch2, Operand(0x7FF00000));
- __ mov(scratch1, Operand(0, RelocInfo::NONE));
- __ vmov(d1, scratch1, scratch2); // Load infinity into d1.
- __ VFPCompareAndSetFlags(d0, d1);
- runtime.Branch(eq); // d0 reached infinity.
- __ vdiv(d0, d2, d0);
- __ b(&allocate_return);
-
- __ bind(&exponent_nonsmi);
- // Special handling of raising to the power of -0.5 and 0.5. First check
- // that the value is a heap number and that the lower bits (which for both
- // values are zero).
- heap_number_map = r6;
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- __ ldr(scratch1, FieldMemOperand(exponent, HeapObject::kMapOffset));
- __ ldr(scratch2, FieldMemOperand(exponent, HeapNumber::kMantissaOffset));
- __ cmp(scratch1, heap_number_map);
- runtime.Branch(ne);
- __ tst(scratch2, scratch2);
- runtime.Branch(ne);
-
- // Load the higher bits (which contains the floating point exponent).
- __ ldr(scratch1, FieldMemOperand(exponent, HeapNumber::kExponentOffset));
-
- // Compare exponent with -0.5.
- __ cmp(scratch1, Operand(0xbfe00000));
- __ b(ne, &not_minus_half);
-
- // Get the double value from the base into vfp register d0.
- __ ObjectToDoubleVFPRegister(base, d0,
- scratch1, scratch2, heap_number_map, s0,
- runtime.entry_label(),
- AVOID_NANS_AND_INFINITIES);
-
- // Convert -0 into +0 by adding +0.
- __ vmov(d2, 0.0);
- __ vadd(d0, d2, d0);
- // Load 1.0 into d2.
- __ vmov(d2, 1.0);
-
- // Calculate the reciprocal of the square root.
- __ vsqrt(d0, d0);
- __ vdiv(d0, d2, d0);
-
- __ b(&allocate_return);
-
- __ bind(&not_minus_half);
- // Compare exponent with 0.5.
- __ cmp(scratch1, Operand(0x3fe00000));
- runtime.Branch(ne);
-
- // Get the double value from the base into vfp register d0.
- __ ObjectToDoubleVFPRegister(base, d0,
- scratch1, scratch2, heap_number_map, s0,
- runtime.entry_label(),
- AVOID_NANS_AND_INFINITIES);
- // Convert -0 into +0 by adding +0.
- __ vmov(d2, 0.0);
- __ vadd(d0, d2, d0);
- __ vsqrt(d0, d0);
-
- __ bind(&allocate_return);
- Register scratch3 = r5;
- __ AllocateHeapNumberWithValue(scratch3, d0, scratch1, scratch2,
- heap_number_map, runtime.entry_label());
- __ mov(base, scratch3);
- done.Jump();
-
- runtime.Bind();
-
- // Push back the arguments again for the runtime call.
- frame_->EmitPush(base);
- frame_->EmitPush(exponent);
- frame_->CallRuntime(Runtime::kMath_pow, 2);
- __ Move(base, r0);
-
- done.Bind();
- frame_->EmitPush(base);
- }
-}
-
-
-// Generates the Math.sqrt method.
-void CodeGenerator::GenerateMathSqrt(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Load(args->at(0));
-
- if (!CpuFeatures::IsSupported(VFP3)) {
- frame_->CallRuntime(Runtime::kMath_sqrt, 1);
- frame_->EmitPush(r0);
- } else {
- CpuFeatures::Scope scope(VFP3);
- JumpTarget runtime, done;
-
- Register scratch1 = VirtualFrame::scratch0();
- Register scratch2 = VirtualFrame::scratch1();
-
- // Get the value from the frame.
- Register tos = frame_->PopToRegister();
-
- // Set the frame for the runtime jump target. The code below jumps to the
- // jump target label so the frame needs to be established before that.
- ASSERT(runtime.entry_frame() == NULL);
- runtime.set_entry_frame(frame_);
-
- Register heap_number_map = r6;
- Register new_heap_number = r5;
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-
- // Get the double value from the heap number into vfp register d0.
- __ ObjectToDoubleVFPRegister(tos, d0,
- scratch1, scratch2, heap_number_map, s0,
- runtime.entry_label());
-
- // Calculate the square root of d0 and place result in a heap number object.
- __ vsqrt(d0, d0);
- __ AllocateHeapNumberWithValue(new_heap_number,
- d0,
- scratch1, scratch2,
- heap_number_map,
- runtime.entry_label());
- __ mov(tos, Operand(new_heap_number));
- done.Jump();
-
- runtime.Bind();
- // Push back the argument again for the runtime call.
- frame_->EmitPush(tos);
- frame_->CallRuntime(Runtime::kMath_sqrt, 1);
- __ Move(tos, r0);
-
- done.Bind();
- frame_->EmitPush(tos);
- }
-}
-
-
-class DeferredStringCharCodeAt : public DeferredCode {
- public:
- DeferredStringCharCodeAt(Register object,
- Register index,
- Register scratch,
- Register result)
- : result_(result),
- char_code_at_generator_(object,
- index,
- scratch,
- result,
- &need_conversion_,
- &need_conversion_,
- &index_out_of_range_,
- STRING_INDEX_IS_NUMBER) {}
-
- StringCharCodeAtGenerator* fast_case_generator() {
- return &char_code_at_generator_;
- }
-
- virtual void Generate() {
- VirtualFrameRuntimeCallHelper call_helper(frame_state());
- char_code_at_generator_.GenerateSlow(masm(), call_helper);
-
- __ bind(&need_conversion_);
- // Move the undefined value into the result register, which will
- // trigger conversion.
- __ LoadRoot(result_, Heap::kUndefinedValueRootIndex);
- __ jmp(exit_label());
-
- __ bind(&index_out_of_range_);
- // When the index is out of range, the spec requires us to return
- // NaN.
- __ LoadRoot(result_, Heap::kNanValueRootIndex);
- __ jmp(exit_label());
- }
-
- private:
- Register result_;
-
- Label need_conversion_;
- Label index_out_of_range_;
-
- StringCharCodeAtGenerator char_code_at_generator_;
-};
-
-
-// This generates code that performs a String.prototype.charCodeAt() call
-// or returns a smi in order to trigger conversion.
-void CodeGenerator::GenerateStringCharCodeAt(ZoneList<Expression*>* args) {
- Comment(masm_, "[ GenerateStringCharCodeAt");
- ASSERT(args->length() == 2);
-
- Load(args->at(0));
- Load(args->at(1));
-
- Register index = frame_->PopToRegister();
- Register object = frame_->PopToRegister(index);
-
- // We need two extra registers.
- Register scratch = VirtualFrame::scratch0();
- Register result = VirtualFrame::scratch1();
-
- DeferredStringCharCodeAt* deferred =
- new DeferredStringCharCodeAt(object,
- index,
- scratch,
- result);
- deferred->fast_case_generator()->GenerateFast(masm_);
- deferred->BindExit();
- frame_->EmitPush(result);
-}
-
-
-class DeferredStringCharFromCode : public DeferredCode {
- public:
- DeferredStringCharFromCode(Register code,
- Register result)
- : char_from_code_generator_(code, result) {}
-
- StringCharFromCodeGenerator* fast_case_generator() {
- return &char_from_code_generator_;
- }
-
- virtual void Generate() {
- VirtualFrameRuntimeCallHelper call_helper(frame_state());
- char_from_code_generator_.GenerateSlow(masm(), call_helper);
- }
-
- private:
- StringCharFromCodeGenerator char_from_code_generator_;
-};
-
-
-// Generates code for creating a one-char string from a char code.
-void CodeGenerator::GenerateStringCharFromCode(ZoneList<Expression*>* args) {
- Comment(masm_, "[ GenerateStringCharFromCode");
- ASSERT(args->length() == 1);
-
- Load(args->at(0));
-
- Register result = frame_->GetTOSRegister();
- Register code = frame_->PopToRegister(result);
-
- DeferredStringCharFromCode* deferred = new DeferredStringCharFromCode(
- code, result);
- deferred->fast_case_generator()->GenerateFast(masm_);
- deferred->BindExit();
- frame_->EmitPush(result);
-}
-
-
-class DeferredStringCharAt : public DeferredCode {
- public:
- DeferredStringCharAt(Register object,
- Register index,
- Register scratch1,
- Register scratch2,
- Register result)
- : result_(result),
- char_at_generator_(object,
- index,
- scratch1,
- scratch2,
- result,
- &need_conversion_,
- &need_conversion_,
- &index_out_of_range_,
- STRING_INDEX_IS_NUMBER) {}
-
- StringCharAtGenerator* fast_case_generator() {
- return &char_at_generator_;
- }
-
- virtual void Generate() {
- VirtualFrameRuntimeCallHelper call_helper(frame_state());
- char_at_generator_.GenerateSlow(masm(), call_helper);
-
- __ bind(&need_conversion_);
- // Move smi zero into the result register, which will trigger
- // conversion.
- __ mov(result_, Operand(Smi::FromInt(0)));
- __ jmp(exit_label());
-
- __ bind(&index_out_of_range_);
- // When the index is out of range, the spec requires us to return
- // the empty string.
- __ LoadRoot(result_, Heap::kEmptyStringRootIndex);
- __ jmp(exit_label());
- }
-
- private:
- Register result_;
-
- Label need_conversion_;
- Label index_out_of_range_;
-
- StringCharAtGenerator char_at_generator_;
-};
-
-
-// This generates code that performs a String.prototype.charAt() call
-// or returns a smi in order to trigger conversion.
-void CodeGenerator::GenerateStringCharAt(ZoneList<Expression*>* args) {
- Comment(masm_, "[ GenerateStringCharAt");
- ASSERT(args->length() == 2);
-
- Load(args->at(0));
- Load(args->at(1));
-
- Register index = frame_->PopToRegister();
- Register object = frame_->PopToRegister(index);
-
- // We need three extra registers.
- Register scratch1 = VirtualFrame::scratch0();
- Register scratch2 = VirtualFrame::scratch1();
- // Use r6 without notifying the virtual frame.
- Register result = r6;
-
- DeferredStringCharAt* deferred =
- new DeferredStringCharAt(object,
- index,
- scratch1,
- scratch2,
- result);
- deferred->fast_case_generator()->GenerateFast(masm_);
- deferred->BindExit();
- frame_->EmitPush(result);
-}
-
-
-void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Load(args->at(0));
- JumpTarget answer;
- // We need the CC bits to come out as not_equal in the case where the
- // object is a smi. This can't be done with the usual test opcode so
- // we use XOR to get the right CC bits.
- Register possible_array = frame_->PopToRegister();
- Register scratch = VirtualFrame::scratch0();
- __ and_(scratch, possible_array, Operand(kSmiTagMask));
- __ eor(scratch, scratch, Operand(kSmiTagMask), SetCC);
- answer.Branch(ne);
- // It is a heap object - get the map. Check if the object is a JS array.
- __ CompareObjectType(possible_array, scratch, scratch, JS_ARRAY_TYPE);
- answer.Bind();
- cc_reg_ = eq;
-}
-
-
-void CodeGenerator::GenerateIsRegExp(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Load(args->at(0));
- JumpTarget answer;
- // We need the CC bits to come out as not_equal in the case where the
- // object is a smi. This can't be done with the usual test opcode so
- // we use XOR to get the right CC bits.
- Register possible_regexp = frame_->PopToRegister();
- Register scratch = VirtualFrame::scratch0();
- __ and_(scratch, possible_regexp, Operand(kSmiTagMask));
- __ eor(scratch, scratch, Operand(kSmiTagMask), SetCC);
- answer.Branch(ne);
- // It is a heap object - get the map. Check if the object is a regexp.
- __ CompareObjectType(possible_regexp, scratch, scratch, JS_REGEXP_TYPE);
- answer.Bind();
- cc_reg_ = eq;
-}
-
-
-void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
- // This generates a fast version of:
- // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp')
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Register possible_object = frame_->PopToRegister();
- __ tst(possible_object, Operand(kSmiTagMask));
- false_target()->Branch(eq);
-
- __ LoadRoot(ip, Heap::kNullValueRootIndex);
- __ cmp(possible_object, ip);
- true_target()->Branch(eq);
-
- Register map_reg = VirtualFrame::scratch0();
- __ ldr(map_reg, FieldMemOperand(possible_object, HeapObject::kMapOffset));
- // Undetectable objects behave like undefined when tested with typeof.
- __ ldrb(possible_object, FieldMemOperand(map_reg, Map::kBitFieldOffset));
- __ tst(possible_object, Operand(1 << Map::kIsUndetectable));
- false_target()->Branch(ne);
-
- __ ldrb(possible_object, FieldMemOperand(map_reg, Map::kInstanceTypeOffset));
- __ cmp(possible_object, Operand(FIRST_JS_OBJECT_TYPE));
- false_target()->Branch(lt);
- __ cmp(possible_object, Operand(LAST_JS_OBJECT_TYPE));
- cc_reg_ = le;
-}
-
-
-void CodeGenerator::GenerateIsSpecObject(ZoneList<Expression*>* args) {
- // This generates a fast version of:
- // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp' ||
- // typeof(arg) == function).
- // It includes undetectable objects (as opposed to IsObject).
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Register value = frame_->PopToRegister();
- __ tst(value, Operand(kSmiTagMask));
- false_target()->Branch(eq);
- // Check that this is an object.
- __ ldr(value, FieldMemOperand(value, HeapObject::kMapOffset));
- __ ldrb(value, FieldMemOperand(value, Map::kInstanceTypeOffset));
- __ cmp(value, Operand(FIRST_JS_OBJECT_TYPE));
- cc_reg_ = ge;
-}
-
-
-// Deferred code to check whether the String JavaScript object is safe for using
-// default value of. This code is called after the bit caching this information
-// in the map has been checked with the map for the object in the map_result_
-// register. On return the register map_result_ contains 1 for true and 0 for
-// false.
-class DeferredIsStringWrapperSafeForDefaultValueOf : public DeferredCode {
- public:
- DeferredIsStringWrapperSafeForDefaultValueOf(Register object,
- Register map_result,
- Register scratch1,
- Register scratch2)
- : object_(object),
- map_result_(map_result),
- scratch1_(scratch1),
- scratch2_(scratch2) { }
-
- virtual void Generate() {
- Label false_result;
-
- // Check that map is loaded as expected.
- if (FLAG_debug_code) {
- __ ldr(ip, FieldMemOperand(object_, HeapObject::kMapOffset));
- __ cmp(map_result_, ip);
- __ Assert(eq, "Map not in expected register");
- }
-
- // Check for fast case object. Generate false result for slow case object.
- __ ldr(scratch1_, FieldMemOperand(object_, JSObject::kPropertiesOffset));
- __ ldr(scratch1_, FieldMemOperand(scratch1_, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
- __ cmp(scratch1_, ip);
- __ b(eq, &false_result);
-
- // Look for valueOf symbol in the descriptor array, and indicate false if
- // found. The type is not checked, so if it is a transition it is a false
- // negative.
- __ ldr(map_result_,
- FieldMemOperand(map_result_, Map::kInstanceDescriptorsOffset));
- __ ldr(scratch2_, FieldMemOperand(map_result_, FixedArray::kLengthOffset));
- // map_result_: descriptor array
- // scratch2_: length of descriptor array
- // Calculate the end of the descriptor array.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kPointerSize == 4);
- __ add(scratch1_,
- map_result_,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ add(scratch1_,
- scratch1_,
- Operand(scratch2_, LSL, kPointerSizeLog2 - kSmiTagSize));
-
- // Calculate location of the first key name.
- __ add(map_result_,
- map_result_,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag +
- DescriptorArray::kFirstIndex * kPointerSize));
- // Loop through all the keys in the descriptor array. If one of these is the
- // symbol valueOf the result is false.
- Label entry, loop;
- // The use of ip to store the valueOf symbol asumes that it is not otherwise
- // used in the loop below.
- __ mov(ip, Operand(FACTORY->value_of_symbol()));
- __ jmp(&entry);
- __ bind(&loop);
- __ ldr(scratch2_, MemOperand(map_result_, 0));
- __ cmp(scratch2_, ip);
- __ b(eq, &false_result);
- __ add(map_result_, map_result_, Operand(kPointerSize));
- __ bind(&entry);
- __ cmp(map_result_, Operand(scratch1_));
- __ b(ne, &loop);
-
- // Reload map as register map_result_ was used as temporary above.
- __ ldr(map_result_, FieldMemOperand(object_, HeapObject::kMapOffset));
-
- // If a valueOf property is not found on the object check that it's
- // prototype is the un-modified String prototype. If not result is false.
- __ ldr(scratch1_, FieldMemOperand(map_result_, Map::kPrototypeOffset));
- __ tst(scratch1_, Operand(kSmiTagMask));
- __ b(eq, &false_result);
- __ ldr(scratch1_, FieldMemOperand(scratch1_, HeapObject::kMapOffset));
- __ ldr(scratch2_,
- ContextOperand(cp, Context::GLOBAL_INDEX));
- __ ldr(scratch2_,
- FieldMemOperand(scratch2_, GlobalObject::kGlobalContextOffset));
- __ ldr(scratch2_,
- ContextOperand(
- scratch2_, Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
- __ cmp(scratch1_, scratch2_);
- __ b(ne, &false_result);
-
- // Set the bit in the map to indicate that it has been checked safe for
- // default valueOf and set true result.
- __ ldrb(scratch1_, FieldMemOperand(map_result_, Map::kBitField2Offset));
- __ orr(scratch1_,
- scratch1_,
- Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
- __ strb(scratch1_, FieldMemOperand(map_result_, Map::kBitField2Offset));
- __ mov(map_result_, Operand(1));
- __ jmp(exit_label());
- __ bind(&false_result);
- // Set false result.
- __ mov(map_result_, Operand(0, RelocInfo::NONE));
- }
-
- private:
- Register object_;
- Register map_result_;
- Register scratch1_;
- Register scratch2_;
-};
-
-
-void CodeGenerator::GenerateIsStringWrapperSafeForDefaultValueOf(
- ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Register obj = frame_->PopToRegister(); // Pop the string wrapper.
- if (FLAG_debug_code) {
- __ AbortIfSmi(obj);
- }
-
- // Check whether this map has already been checked to be safe for default
- // valueOf.
- Register map_result = VirtualFrame::scratch0();
- __ ldr(map_result, FieldMemOperand(obj, HeapObject::kMapOffset));
- __ ldrb(ip, FieldMemOperand(map_result, Map::kBitField2Offset));
- __ tst(ip, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
- true_target()->Branch(ne);
-
- // We need an additional two scratch registers for the deferred code.
- Register scratch1 = VirtualFrame::scratch1();
- // Use r6 without notifying the virtual frame.
- Register scratch2 = r6;
-
- DeferredIsStringWrapperSafeForDefaultValueOf* deferred =
- new DeferredIsStringWrapperSafeForDefaultValueOf(
- obj, map_result, scratch1, scratch2);
- deferred->Branch(eq);
- deferred->BindExit();
- __ tst(map_result, Operand(map_result));
- cc_reg_ = ne;
-}
-
-
-void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
- // This generates a fast version of:
- // (%_ClassOf(arg) === 'Function')
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Register possible_function = frame_->PopToRegister();
- __ tst(possible_function, Operand(kSmiTagMask));
- false_target()->Branch(eq);
- Register map_reg = VirtualFrame::scratch0();
- Register scratch = VirtualFrame::scratch1();
- __ CompareObjectType(possible_function, map_reg, scratch, JS_FUNCTION_TYPE);
- cc_reg_ = eq;
-}
-
-
-void CodeGenerator::GenerateIsUndetectableObject(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Register possible_undetectable = frame_->PopToRegister();
- __ tst(possible_undetectable, Operand(kSmiTagMask));
- false_target()->Branch(eq);
- Register scratch = VirtualFrame::scratch0();
- __ ldr(scratch,
- FieldMemOperand(possible_undetectable, HeapObject::kMapOffset));
- __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
- __ tst(scratch, Operand(1 << Map::kIsUndetectable));
- cc_reg_ = ne;
-}
-
-
-void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 0);
-
- Register scratch0 = VirtualFrame::scratch0();
- Register scratch1 = VirtualFrame::scratch1();
- // Get the frame pointer for the calling frame.
- __ ldr(scratch0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-
- // Skip the arguments adaptor frame if it exists.
- __ ldr(scratch1,
- MemOperand(scratch0, StandardFrameConstants::kContextOffset));
- __ cmp(scratch1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ ldr(scratch0,
- MemOperand(scratch0, StandardFrameConstants::kCallerFPOffset), eq);
-
- // Check the marker in the calling frame.
- __ ldr(scratch1,
- MemOperand(scratch0, StandardFrameConstants::kMarkerOffset));
- __ cmp(scratch1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
- cc_reg_ = eq;
-}
-
-
-void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 0);
-
- Register tos = frame_->GetTOSRegister();
- Register scratch0 = VirtualFrame::scratch0();
- Register scratch1 = VirtualFrame::scratch1();
-
- // Check if the calling frame is an arguments adaptor frame.
- __ ldr(scratch0,
- MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ldr(scratch1,
- MemOperand(scratch0, StandardFrameConstants::kContextOffset));
- __ cmp(scratch1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-
- // Get the number of formal parameters.
- __ mov(tos, Operand(Smi::FromInt(scope()->num_parameters())), LeaveCC, ne);
-
- // Arguments adaptor case: Read the arguments length from the
- // adaptor frame.
- __ ldr(tos,
- MemOperand(scratch0, ArgumentsAdaptorFrameConstants::kLengthOffset),
- eq);
-
- frame_->EmitPush(tos);
-}
-
-
-void CodeGenerator::GenerateArguments(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
-
- // Satisfy contract with ArgumentsAccessStub:
- // Load the key into r1 and the formal parameters count into r0.
- Load(args->at(0));
- frame_->PopToR1();
- frame_->SpillAll();
- __ mov(r0, Operand(Smi::FromInt(scope()->num_parameters())));
-
- // Call the shared stub to get to arguments[key].
- ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
- frame_->CallStub(&stub, 0);
- frame_->EmitPush(r0);
-}
-
-
-void CodeGenerator::GenerateRandomHeapNumber(
- ZoneList<Expression*>* args) {
- VirtualFrame::SpilledScope spilled_scope(frame_);
- ASSERT(args->length() == 0);
-
- Label slow_allocate_heapnumber;
- Label heapnumber_allocated;
-
- __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r4, r1, r2, r6, &slow_allocate_heapnumber);
- __ jmp(&heapnumber_allocated);
-
- __ bind(&slow_allocate_heapnumber);
- // Allocate a heap number.
- __ CallRuntime(Runtime::kNumberAlloc, 0);
- __ mov(r4, Operand(r0));
-
- __ bind(&heapnumber_allocated);
-
- // Convert 32 random bits in r0 to 0.(32 random bits) in a double
- // by computing:
- // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
- if (CpuFeatures::IsSupported(VFP3)) {
- __ PrepareCallCFunction(1, r0);
- __ mov(r0, Operand(ExternalReference::isolate_address()));
- __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
-
- CpuFeatures::Scope scope(VFP3);
- // 0x41300000 is the top half of 1.0 x 2^20 as a double.
- // Create this constant using mov/orr to avoid PC relative load.
- __ mov(r1, Operand(0x41000000));
- __ orr(r1, r1, Operand(0x300000));
- // Move 0x41300000xxxxxxxx (x = random bits) to VFP.
- __ vmov(d7, r0, r1);
- // Move 0x4130000000000000 to VFP.
- __ mov(r0, Operand(0, RelocInfo::NONE));
- __ vmov(d8, r0, r1);
- // Subtract and store the result in the heap number.
- __ vsub(d7, d7, d8);
- __ sub(r0, r4, Operand(kHeapObjectTag));
- __ vstr(d7, r0, HeapNumber::kValueOffset);
- frame_->EmitPush(r4);
- } else {
- __ PrepareCallCFunction(2, r0);
- __ mov(r0, Operand(r4));
- __ mov(r1, Operand(ExternalReference::isolate_address()));
- __ CallCFunction(
- ExternalReference::fill_heap_number_with_random_function(isolate()), 2);
- frame_->EmitPush(r0);
- }
-}
-
-
-void CodeGenerator::GenerateStringAdd(ZoneList<Expression*>* args) {
- ASSERT_EQ(2, args->length());
-
- Load(args->at(0));
- Load(args->at(1));
-
- StringAddStub stub(NO_STRING_ADD_FLAGS);
- frame_->SpillAll();
- frame_->CallStub(&stub, 2);
- frame_->EmitPush(r0);
-}
-
-
-void CodeGenerator::GenerateSubString(ZoneList<Expression*>* args) {
- ASSERT_EQ(3, args->length());
-
- Load(args->at(0));
- Load(args->at(1));
- Load(args->at(2));
-
- SubStringStub stub;
- frame_->SpillAll();
- frame_->CallStub(&stub, 3);
- frame_->EmitPush(r0);
-}
-
-
-void CodeGenerator::GenerateStringCompare(ZoneList<Expression*>* args) {
- ASSERT_EQ(2, args->length());
-
- Load(args->at(0));
- Load(args->at(1));
-
- StringCompareStub stub;
- frame_->SpillAll();
- frame_->CallStub(&stub, 2);
- frame_->EmitPush(r0);
-}
-
-
-void CodeGenerator::GenerateRegExpExec(ZoneList<Expression*>* args) {
- ASSERT_EQ(4, args->length());
-
- Load(args->at(0));
- Load(args->at(1));
- Load(args->at(2));
- Load(args->at(3));
- RegExpExecStub stub;
- frame_->SpillAll();
- frame_->CallStub(&stub, 4);
- frame_->EmitPush(r0);
-}
-
-
-void CodeGenerator::GenerateRegExpConstructResult(ZoneList<Expression*>* args) {
- ASSERT_EQ(3, args->length());
-
- Load(args->at(0)); // Size of array, smi.
- Load(args->at(1)); // "index" property value.
- Load(args->at(2)); // "input" property value.
- RegExpConstructResultStub stub;
- frame_->SpillAll();
- frame_->CallStub(&stub, 3);
- frame_->EmitPush(r0);
-}
-
-
-class DeferredSearchCache: public DeferredCode {
- public:
- DeferredSearchCache(Register dst, Register cache, Register key)
- : dst_(dst), cache_(cache), key_(key) {
- set_comment("[ DeferredSearchCache");
- }
-
- virtual void Generate();
-
- private:
- Register dst_, cache_, key_;
-};
-
-
-void DeferredSearchCache::Generate() {
- __ Push(cache_, key_);
- __ CallRuntime(Runtime::kGetFromCache, 2);
- __ Move(dst_, r0);
-}
-
-
-void CodeGenerator::GenerateGetFromCache(ZoneList<Expression*>* args) {
- ASSERT_EQ(2, args->length());
-
- ASSERT_NE(NULL, args->at(0)->AsLiteral());
- int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
-
- Handle<FixedArray> jsfunction_result_caches(
- Isolate::Current()->global_context()->jsfunction_result_caches());
- if (jsfunction_result_caches->length() <= cache_id) {
- __ Abort("Attempt to use undefined cache.");
- frame_->EmitPushRoot(Heap::kUndefinedValueRootIndex);
- return;
- }
-
- Load(args->at(1));
-
- frame_->PopToR1();
- frame_->SpillAll();
- Register key = r1; // Just poped to r1
- Register result = r0; // Free, as frame has just been spilled.
- Register scratch1 = VirtualFrame::scratch0();
- Register scratch2 = VirtualFrame::scratch1();
-
- __ ldr(scratch1, ContextOperand(cp, Context::GLOBAL_INDEX));
- __ ldr(scratch1,
- FieldMemOperand(scratch1, GlobalObject::kGlobalContextOffset));
- __ ldr(scratch1,
- ContextOperand(scratch1, Context::JSFUNCTION_RESULT_CACHES_INDEX));
- __ ldr(scratch1,
- FieldMemOperand(scratch1, FixedArray::OffsetOfElementAt(cache_id)));
-
- DeferredSearchCache* deferred =
- new DeferredSearchCache(result, scratch1, key);
-
- const int kFingerOffset =
- FixedArray::OffsetOfElementAt(JSFunctionResultCache::kFingerIndex);
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
- __ ldr(result, FieldMemOperand(scratch1, kFingerOffset));
- // result now holds finger offset as a smi.
- __ add(scratch2, scratch1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- // scratch2 now points to the start of fixed array elements.
- __ ldr(result,
- MemOperand(
- scratch2, result, LSL, kPointerSizeLog2 - kSmiTagSize, PreIndex));
- // Note side effect of PreIndex: scratch2 now points to the key of the pair.
- __ cmp(key, result);
- deferred->Branch(ne);
-
- __ ldr(result, MemOperand(scratch2, kPointerSize));
-
- deferred->BindExit();
- frame_->EmitPush(result);
-}
-
-
-void CodeGenerator::GenerateNumberToString(ZoneList<Expression*>* args) {
- ASSERT_EQ(args->length(), 1);
-
- // Load the argument on the stack and jump to the runtime.
- Load(args->at(0));
-
- NumberToStringStub stub;
- frame_->SpillAll();
- frame_->CallStub(&stub, 1);
- frame_->EmitPush(r0);
-}
-
-
-class DeferredSwapElements: public DeferredCode {
- public:
- DeferredSwapElements(Register object, Register index1, Register index2)
- : object_(object), index1_(index1), index2_(index2) {
- set_comment("[ DeferredSwapElements");
- }
-
- virtual void Generate();
-
- private:
- Register object_, index1_, index2_;
-};
-
-
-void DeferredSwapElements::Generate() {
- __ push(object_);
- __ push(index1_);
- __ push(index2_);
- __ CallRuntime(Runtime::kSwapElements, 3);
-}
-
-
-void CodeGenerator::GenerateSwapElements(ZoneList<Expression*>* args) {
- Comment cmnt(masm_, "[ GenerateSwapElements");
-
- ASSERT_EQ(3, args->length());
-
- Load(args->at(0));
- Load(args->at(1));
- Load(args->at(2));
-
- VirtualFrame::SpilledScope spilled_scope(frame_);
-
- Register index2 = r2;
- Register index1 = r1;
- Register object = r0;
- Register tmp1 = r3;
- Register tmp2 = r4;
-
- frame_->EmitPop(index2);
- frame_->EmitPop(index1);
- frame_->EmitPop(object);
-
- DeferredSwapElements* deferred =
- new DeferredSwapElements(object, index1, index2);
-
- // Fetch the map and check if array is in fast case.
- // Check that object doesn't require security checks and
- // has no indexed interceptor.
- __ CompareObjectType(object, tmp1, tmp2, JS_ARRAY_TYPE);
- deferred->Branch(ne);
- __ ldrb(tmp2, FieldMemOperand(tmp1, Map::kBitFieldOffset));
- __ tst(tmp2, Operand(KeyedLoadIC::kSlowCaseBitFieldMask));
- deferred->Branch(ne);
-
- // Check the object's elements are in fast case and writable.
- __ ldr(tmp1, FieldMemOperand(object, JSObject::kElementsOffset));
- __ ldr(tmp2, FieldMemOperand(tmp1, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
- __ cmp(tmp2, ip);
- deferred->Branch(ne);
-
- // Smi-tagging is equivalent to multiplying by 2.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
-
- // Check that both indices are smis.
- __ mov(tmp2, index1);
- __ orr(tmp2, tmp2, index2);
- __ tst(tmp2, Operand(kSmiTagMask));
- deferred->Branch(ne);
-
- // Check that both indices are valid.
- __ ldr(tmp2, FieldMemOperand(object, JSArray::kLengthOffset));
- __ cmp(tmp2, index1);
- __ cmp(tmp2, index2, hi);
- deferred->Branch(ls);
-
- // Bring the offsets into the fixed array in tmp1 into index1 and
- // index2.
- __ mov(tmp2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ add(index1, tmp2, Operand(index1, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ add(index2, tmp2, Operand(index2, LSL, kPointerSizeLog2 - kSmiTagSize));
-
- // Swap elements.
- Register tmp3 = object;
- object = no_reg;
- __ ldr(tmp3, MemOperand(tmp1, index1));
- __ ldr(tmp2, MemOperand(tmp1, index2));
- __ str(tmp3, MemOperand(tmp1, index2));
- __ str(tmp2, MemOperand(tmp1, index1));
-
- Label done;
- __ InNewSpace(tmp1, tmp2, eq, &done);
- // Possible optimization: do a check that both values are Smis
- // (or them and test against Smi mask.)
-
- __ mov(tmp2, tmp1);
- __ add(index1, index1, tmp1);
- __ add(index2, index2, tmp1);
- __ RecordWriteHelper(tmp1, index1, tmp3);
- __ RecordWriteHelper(tmp2, index2, tmp3);
- __ bind(&done);
-
- deferred->BindExit();
- __ LoadRoot(tmp1, Heap::kUndefinedValueRootIndex);
- frame_->EmitPush(tmp1);
-}
-
-
-void CodeGenerator::GenerateCallFunction(ZoneList<Expression*>* args) {
- Comment cmnt(masm_, "[ GenerateCallFunction");
-
- ASSERT(args->length() >= 2);
-
- int n_args = args->length() - 2; // for receiver and function.
- Load(args->at(0)); // receiver
- for (int i = 0; i < n_args; i++) {
- Load(args->at(i + 1));
- }
- Load(args->at(n_args + 1)); // function
- frame_->CallJSFunction(n_args);
- frame_->EmitPush(r0);
-}
-
-
-void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
- ASSERT_EQ(args->length(), 1);
- Load(args->at(0));
- if (CpuFeatures::IsSupported(VFP3)) {
- TranscendentalCacheStub stub(TranscendentalCache::SIN,
- TranscendentalCacheStub::TAGGED);
- frame_->SpillAllButCopyTOSToR0();
- frame_->CallStub(&stub, 1);
- } else {
- frame_->CallRuntime(Runtime::kMath_sin, 1);
- }
- frame_->EmitPush(r0);
-}
-
-
-void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
- ASSERT_EQ(args->length(), 1);
- Load(args->at(0));
- if (CpuFeatures::IsSupported(VFP3)) {
- TranscendentalCacheStub stub(TranscendentalCache::COS,
- TranscendentalCacheStub::TAGGED);
- frame_->SpillAllButCopyTOSToR0();
- frame_->CallStub(&stub, 1);
- } else {
- frame_->CallRuntime(Runtime::kMath_cos, 1);
- }
- frame_->EmitPush(r0);
-}
-
-
-void CodeGenerator::GenerateMathLog(ZoneList<Expression*>* args) {
- ASSERT_EQ(args->length(), 1);
- Load(args->at(0));
- if (CpuFeatures::IsSupported(VFP3)) {
- TranscendentalCacheStub stub(TranscendentalCache::LOG,
- TranscendentalCacheStub::TAGGED);
- frame_->SpillAllButCopyTOSToR0();
- frame_->CallStub(&stub, 1);
- } else {
- frame_->CallRuntime(Runtime::kMath_log, 1);
- }
- frame_->EmitPush(r0);
-}
-
-
-void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 2);
-
- // Load the two objects into registers and perform the comparison.
- Load(args->at(0));
- Load(args->at(1));
- Register lhs = frame_->PopToRegister();
- Register rhs = frame_->PopToRegister(lhs);
- __ cmp(lhs, rhs);
- cc_reg_ = eq;
-}
-
-
-void CodeGenerator::GenerateIsRegExpEquivalent(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 2);
-
- // Load the two objects into registers and perform the comparison.
- Load(args->at(0));
- Load(args->at(1));
- Register right = frame_->PopToRegister();
- Register left = frame_->PopToRegister(right);
- Register tmp = frame_->scratch0();
- Register tmp2 = frame_->scratch1();
-
- // Jumps to done must have the eq flag set if the test is successful
- // and clear if the test has failed.
- Label done;
-
- // Fail if either is a non-HeapObject.
- __ cmp(left, Operand(right));
- __ b(eq, &done);
- __ and_(tmp, left, Operand(right));
- __ eor(tmp, tmp, Operand(kSmiTagMask));
- __ tst(tmp, Operand(kSmiTagMask));
- __ b(ne, &done);
- __ ldr(tmp, FieldMemOperand(left, HeapObject::kMapOffset));
- __ ldrb(tmp2, FieldMemOperand(tmp, Map::kInstanceTypeOffset));
- __ cmp(tmp2, Operand(JS_REGEXP_TYPE));
- __ b(ne, &done);
- __ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
- __ cmp(tmp, Operand(tmp2));
- __ b(ne, &done);
- __ ldr(tmp, FieldMemOperand(left, JSRegExp::kDataOffset));
- __ ldr(tmp2, FieldMemOperand(right, JSRegExp::kDataOffset));
- __ cmp(tmp, tmp2);
- __ bind(&done);
- cc_reg_ = eq;
-}
-
-
-void CodeGenerator::GenerateHasCachedArrayIndex(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Register value = frame_->PopToRegister();
- Register tmp = frame_->scratch0();
- __ ldr(tmp, FieldMemOperand(value, String::kHashFieldOffset));
- __ tst(tmp, Operand(String::kContainsCachedArrayIndexMask));
- cc_reg_ = eq;
-}
-
-
-void CodeGenerator::GenerateGetCachedArrayIndex(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Register value = frame_->PopToRegister();
-
- __ ldr(value, FieldMemOperand(value, String::kHashFieldOffset));
- __ IndexFromHash(value, value);
- frame_->EmitPush(value);
-}
-
-
-void CodeGenerator::GenerateFastAsciiArrayJoin(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 2);
- Load(args->at(0));
- Register value = frame_->PopToRegister();
- __ LoadRoot(value, Heap::kUndefinedValueRootIndex);
- frame_->EmitPush(value);
-}
-
-
-void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- if (CheckForInlineRuntimeCall(node)) {
- ASSERT((has_cc() && frame_->height() == original_height) ||
- (!has_cc() && frame_->height() == original_height + 1));
- return;
- }
-
- ZoneList<Expression*>* args = node->arguments();
- Comment cmnt(masm_, "[ CallRuntime");
- const Runtime::Function* function = node->function();
-
- if (function == NULL) {
- // Prepare stack for calling JS runtime function.
- // Push the builtins object found in the current global object.
- Register scratch = VirtualFrame::scratch0();
- __ ldr(scratch, GlobalObjectOperand());
- Register builtins = frame_->GetTOSRegister();
- __ ldr(builtins, FieldMemOperand(scratch, GlobalObject::kBuiltinsOffset));
- frame_->EmitPush(builtins);
- }
-
- // Push the arguments ("left-to-right").
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Load(args->at(i));
- }
-
- VirtualFrame::SpilledScope spilled_scope(frame_);
-
- if (function == NULL) {
- // Call the JS runtime function.
- __ mov(r2, Operand(node->name()));
- InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
- Handle<Code> stub =
- ISOLATE->stub_cache()->ComputeCallInitialize(arg_count, in_loop);
- frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET, arg_count + 1);
- __ ldr(cp, frame_->Context());
- frame_->EmitPush(r0);
- } else {
- // Call the C runtime function.
- frame_->CallRuntime(function, arg_count);
- frame_->EmitPush(r0);
- }
- ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ UnaryOperation");
-
- Token::Value op = node->op();
-
- if (op == Token::NOT) {
- LoadCondition(node->expression(), false_target(), true_target(), true);
- // LoadCondition may (and usually does) leave a test and branch to
- // be emitted by the caller. In that case, negate the condition.
- if (has_cc()) cc_reg_ = NegateCondition(cc_reg_);
-
- } else if (op == Token::DELETE) {
- Property* property = node->expression()->AsProperty();
- Variable* variable = node->expression()->AsVariableProxy()->AsVariable();
- if (property != NULL) {
- Load(property->obj());
- Load(property->key());
- frame_->EmitPush(Operand(Smi::FromInt(strict_mode_flag())));
- frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 3);
- frame_->EmitPush(r0);
-
- } else if (variable != NULL) {
- // Delete of an unqualified identifier is disallowed in strict mode
- // but "delete this" is.
- ASSERT(strict_mode_flag() == kNonStrictMode || variable->is_this());
- Slot* slot = variable->AsSlot();
- if (variable->is_global()) {
- LoadGlobal();
- frame_->EmitPush(Operand(variable->name()));
- frame_->EmitPush(Operand(Smi::FromInt(kNonStrictMode)));
- frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 3);
- frame_->EmitPush(r0);
-
- } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
- // Delete from the context holding the named variable.
- frame_->EmitPush(cp);
- frame_->EmitPush(Operand(variable->name()));
- frame_->CallRuntime(Runtime::kDeleteContextSlot, 2);
- frame_->EmitPush(r0);
-
- } else {
- // Default: Result of deleting non-global, not dynamically
- // introduced variables is false.
- frame_->EmitPushRoot(Heap::kFalseValueRootIndex);
- }
-
- } else {
- // Default: Result of deleting expressions is true.
- Load(node->expression()); // may have side-effects
- frame_->Drop();
- frame_->EmitPushRoot(Heap::kTrueValueRootIndex);
- }
-
- } else if (op == Token::TYPEOF) {
- // Special case for loading the typeof expression; see comment on
- // LoadTypeofExpression().
- LoadTypeofExpression(node->expression());
- frame_->CallRuntime(Runtime::kTypeof, 1);
- frame_->EmitPush(r0); // r0 has result
-
- } else {
- bool can_overwrite = node->expression()->ResultOverwriteAllowed();
- UnaryOverwriteMode overwrite =
- can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
-
- bool no_negative_zero = node->expression()->no_negative_zero();
- Load(node->expression());
- switch (op) {
- case Token::NOT:
- case Token::DELETE:
- case Token::TYPEOF:
- UNREACHABLE(); // handled above
- break;
-
- case Token::SUB: {
- frame_->PopToR0();
- GenericUnaryOpStub stub(
- Token::SUB,
- overwrite,
- NO_UNARY_FLAGS,
- no_negative_zero ? kIgnoreNegativeZero : kStrictNegativeZero);
- frame_->CallStub(&stub, 0);
- frame_->EmitPush(r0); // r0 has result
- break;
- }
-
- case Token::BIT_NOT: {
- Register tos = frame_->PopToRegister();
- JumpTarget not_smi_label;
- JumpTarget continue_label;
- // Smi check.
- __ tst(tos, Operand(kSmiTagMask));
- not_smi_label.Branch(ne);
-
- __ mvn(tos, Operand(tos));
- __ bic(tos, tos, Operand(kSmiTagMask)); // Bit-clear inverted smi-tag.
- frame_->EmitPush(tos);
- // The fast case is the first to jump to the continue label, so it gets
- // to decide the virtual frame layout.
- continue_label.Jump();
-
- not_smi_label.Bind();
- frame_->SpillAll();
- __ Move(r0, tos);
- GenericUnaryOpStub stub(Token::BIT_NOT,
- overwrite,
- NO_UNARY_SMI_CODE_IN_STUB);
- frame_->CallStub(&stub, 0);
- frame_->EmitPush(r0);
-
- continue_label.Bind();
- break;
- }
-
- case Token::VOID:
- frame_->Drop();
- frame_->EmitPushRoot(Heap::kUndefinedValueRootIndex);
- break;
-
- case Token::ADD: {
- Register tos = frame_->Peek();
- // Smi check.
- JumpTarget continue_label;
- __ tst(tos, Operand(kSmiTagMask));
- continue_label.Branch(eq);
-
- frame_->InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, 1);
- frame_->EmitPush(r0);
-
- continue_label.Bind();
- break;
- }
- default:
- UNREACHABLE();
- }
- }
- ASSERT(!has_valid_frame() ||
- (has_cc() && frame_->height() == original_height) ||
- (!has_cc() && frame_->height() == original_height + 1));
-}
-
-
-class DeferredCountOperation: public DeferredCode {
- public:
- DeferredCountOperation(Register value,
- bool is_increment,
- bool is_postfix,
- int target_size)
- : value_(value),
- is_increment_(is_increment),
- is_postfix_(is_postfix),
- target_size_(target_size) {}
-
- virtual void Generate() {
- VirtualFrame copied_frame(*frame_state()->frame());
-
- Label slow;
- // Check for smi operand.
- __ tst(value_, Operand(kSmiTagMask));
- __ b(ne, &slow);
-
- // Revert optimistic increment/decrement.
- if (is_increment_) {
- __ sub(value_, value_, Operand(Smi::FromInt(1)));
- } else {
- __ add(value_, value_, Operand(Smi::FromInt(1)));
- }
-
- // Slow case: Convert to number. At this point the
- // value to be incremented is in the value register..
- __ bind(&slow);
-
- // Convert the operand to a number.
- copied_frame.EmitPush(value_);
-
- copied_frame.InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, 1);
-
- if (is_postfix_) {
- // Postfix: store to result (on the stack).
- __ str(r0, MemOperand(sp, target_size_ * kPointerSize));
- }
-
- copied_frame.EmitPush(r0);
- copied_frame.EmitPush(Operand(Smi::FromInt(1)));
-
- if (is_increment_) {
- copied_frame.CallRuntime(Runtime::kNumberAdd, 2);
- } else {
- copied_frame.CallRuntime(Runtime::kNumberSub, 2);
- }
-
- __ Move(value_, r0);
-
- copied_frame.MergeTo(frame_state()->frame());
- }
-
- private:
- Register value_;
- bool is_increment_;
- bool is_postfix_;
- int target_size_;
-};
-
-
-void CodeGenerator::VisitCountOperation(CountOperation* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ CountOperation");
- VirtualFrame::RegisterAllocationScope scope(this);
-
- bool is_postfix = node->is_postfix();
- bool is_increment = node->op() == Token::INC;
-
- Variable* var = node->expression()->AsVariableProxy()->AsVariable();
- bool is_const = (var != NULL && var->mode() == Variable::CONST);
- bool is_slot = (var != NULL && var->mode() == Variable::VAR);
-
- if (!is_const && is_slot && type_info(var->AsSlot()).IsSmi()) {
- // The type info declares that this variable is always a Smi. That
- // means it is a Smi both before and after the increment/decrement.
- // Lets make use of that to make a very minimal count.
- Reference target(this, node->expression(), !is_const);
- ASSERT(!target.is_illegal());
- target.GetValue(); // Pushes the value.
- Register value = frame_->PopToRegister();
- if (is_postfix) frame_->EmitPush(value);
- if (is_increment) {
- __ add(value, value, Operand(Smi::FromInt(1)));
- } else {
- __ sub(value, value, Operand(Smi::FromInt(1)));
- }
- frame_->EmitPush(value);
- target.SetValue(NOT_CONST_INIT, LIKELY_SMI);
- if (is_postfix) frame_->Pop();
- ASSERT_EQ(original_height + 1, frame_->height());
- return;
- }
-
- // If it's a postfix expression and its result is not ignored and the
- // reference is non-trivial, then push a placeholder on the stack now
- // to hold the result of the expression.
- bool placeholder_pushed = false;
- if (!is_slot && is_postfix) {
- frame_->EmitPush(Operand(Smi::FromInt(0)));
- placeholder_pushed = true;
- }
-
- // A constant reference is not saved to, so a constant reference is not a
- // compound assignment reference.
- { Reference target(this, node->expression(), !is_const);
- if (target.is_illegal()) {
- // Spoof the virtual frame to have the expected height (one higher
- // than on entry).
- if (!placeholder_pushed) frame_->EmitPush(Operand(Smi::FromInt(0)));
- ASSERT_EQ(original_height + 1, frame_->height());
- return;
- }
-
- // This pushes 0, 1 or 2 words on the object to be used later when updating
- // the target. It also pushes the current value of the target.
- target.GetValue();
-
- bool value_is_known_smi = frame_->KnownSmiAt(0);
- Register value = frame_->PopToRegister();
-
- // Postfix: Store the old value as the result.
- if (placeholder_pushed) {
- frame_->SetElementAt(value, target.size());
- } else if (is_postfix) {
- frame_->EmitPush(value);
- __ mov(VirtualFrame::scratch0(), value);
- value = VirtualFrame::scratch0();
- }
-
- // We can't use any type information here since the virtual frame from the
- // deferred code may have lost information and we can't merge a virtual
- // frame with less specific type knowledge to a virtual frame with more
- // specific knowledge that has already used that specific knowledge to
- // generate code.
- frame_->ForgetTypeInfo();
-
- // The constructor here will capture the current virtual frame and use it to
- // merge to after the deferred code has run. No virtual frame changes are
- // allowed from here until the 'BindExit' below.
- DeferredCode* deferred =
- new DeferredCountOperation(value,
- is_increment,
- is_postfix,
- target.size());
- if (!value_is_known_smi) {
- // Check for smi operand.
- __ tst(value, Operand(kSmiTagMask));
-
- deferred->Branch(ne);
- }
-
- // Perform optimistic increment/decrement.
- if (is_increment) {
- __ add(value, value, Operand(Smi::FromInt(1)), SetCC);
- } else {
- __ sub(value, value, Operand(Smi::FromInt(1)), SetCC);
- }
-
- // If increment/decrement overflows, go to deferred code.
- deferred->Branch(vs);
-
- deferred->BindExit();
-
- // Store the new value in the target if not const.
- // At this point the answer is in the value register.
- frame_->EmitPush(value);
- // Set the target with the result, leaving the result on
- // top of the stack. Removes the target from the stack if
- // it has a non-zero size.
- if (!is_const) target.SetValue(NOT_CONST_INIT, LIKELY_SMI);
- }
-
- // Postfix: Discard the new value and use the old.
- if (is_postfix) frame_->Pop();
- ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::GenerateLogicalBooleanOperation(BinaryOperation* node) {
- // According to ECMA-262 section 11.11, page 58, the binary logical
- // operators must yield the result of one of the two expressions
- // before any ToBoolean() conversions. This means that the value
- // produced by a && or || operator is not necessarily a boolean.
-
- // NOTE: If the left hand side produces a materialized value (not in
- // the CC register), we force the right hand side to do the
- // same. This is necessary because we may have to branch to the exit
- // after evaluating the left hand side (due to the shortcut
- // semantics), but the compiler must (statically) know if the result
- // of compiling the binary operation is materialized or not.
- if (node->op() == Token::AND) {
- JumpTarget is_true;
- LoadCondition(node->left(), &is_true, false_target(), false);
- if (has_valid_frame() && !has_cc()) {
- // The left-hand side result is on top of the virtual frame.
- JumpTarget pop_and_continue;
- JumpTarget exit;
-
- frame_->Dup();
- // Avoid popping the result if it converts to 'false' using the
- // standard ToBoolean() conversion as described in ECMA-262,
- // section 9.2, page 30.
- ToBoolean(&pop_and_continue, &exit);
- Branch(false, &exit);
-
- // Pop the result of evaluating the first part.
- pop_and_continue.Bind();
- frame_->Pop();
-
- // Evaluate right side expression.
- is_true.Bind();
- Load(node->right());
-
- // Exit (always with a materialized value).
- exit.Bind();
- } else if (has_cc() || is_true.is_linked()) {
- // The left-hand side is either (a) partially compiled to
- // control flow with a final branch left to emit or (b) fully
- // compiled to control flow and possibly true.
- if (has_cc()) {
- Branch(false, false_target());
- }
- is_true.Bind();
- LoadCondition(node->right(), true_target(), false_target(), false);
- } else {
- // Nothing to do.
- ASSERT(!has_valid_frame() && !has_cc() && !is_true.is_linked());
- }
-
- } else {
- ASSERT(node->op() == Token::OR);
- JumpTarget is_false;
- LoadCondition(node->left(), true_target(), &is_false, false);
- if (has_valid_frame() && !has_cc()) {
- // The left-hand side result is on top of the virtual frame.
- JumpTarget pop_and_continue;
- JumpTarget exit;
-
- frame_->Dup();
- // Avoid popping the result if it converts to 'true' using the
- // standard ToBoolean() conversion as described in ECMA-262,
- // section 9.2, page 30.
- ToBoolean(&exit, &pop_and_continue);
- Branch(true, &exit);
-
- // Pop the result of evaluating the first part.
- pop_and_continue.Bind();
- frame_->Pop();
-
- // Evaluate right side expression.
- is_false.Bind();
- Load(node->right());
-
- // Exit (always with a materialized value).
- exit.Bind();
- } else if (has_cc() || is_false.is_linked()) {
- // The left-hand side is either (a) partially compiled to
- // control flow with a final branch left to emit or (b) fully
- // compiled to control flow and possibly false.
- if (has_cc()) {
- Branch(true, true_target());
- }
- is_false.Bind();
- LoadCondition(node->right(), true_target(), false_target(), false);
- } else {
- // Nothing to do.
- ASSERT(!has_valid_frame() && !has_cc() && !is_false.is_linked());
- }
- }
-}
-
-
-void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ BinaryOperation");
-
- if (node->op() == Token::AND || node->op() == Token::OR) {
- GenerateLogicalBooleanOperation(node);
- } else {
- // Optimize for the case where (at least) one of the expressions
- // is a literal small integer.
- Literal* lliteral = node->left()->AsLiteral();
- Literal* rliteral = node->right()->AsLiteral();
- // NOTE: The code below assumes that the slow cases (calls to runtime)
- // never return a constant/immutable object.
- bool overwrite_left = node->left()->ResultOverwriteAllowed();
- bool overwrite_right = node->right()->ResultOverwriteAllowed();
-
- if (rliteral != NULL && rliteral->handle()->IsSmi()) {
- VirtualFrame::RegisterAllocationScope scope(this);
- Load(node->left());
- if (frame_->KnownSmiAt(0)) overwrite_left = false;
- SmiOperation(node->op(),
- rliteral->handle(),
- false,
- overwrite_left ? OVERWRITE_LEFT : NO_OVERWRITE);
- } else if (lliteral != NULL && lliteral->handle()->IsSmi()) {
- VirtualFrame::RegisterAllocationScope scope(this);
- Load(node->right());
- if (frame_->KnownSmiAt(0)) overwrite_right = false;
- SmiOperation(node->op(),
- lliteral->handle(),
- true,
- overwrite_right ? OVERWRITE_RIGHT : NO_OVERWRITE);
- } else {
- GenerateInlineSmi inline_smi =
- loop_nesting() > 0 ? GENERATE_INLINE_SMI : DONT_GENERATE_INLINE_SMI;
- if (lliteral != NULL) {
- ASSERT(!lliteral->handle()->IsSmi());
- inline_smi = DONT_GENERATE_INLINE_SMI;
- }
- if (rliteral != NULL) {
- ASSERT(!rliteral->handle()->IsSmi());
- inline_smi = DONT_GENERATE_INLINE_SMI;
- }
- VirtualFrame::RegisterAllocationScope scope(this);
- OverwriteMode overwrite_mode = NO_OVERWRITE;
- if (overwrite_left) {
- overwrite_mode = OVERWRITE_LEFT;
- } else if (overwrite_right) {
- overwrite_mode = OVERWRITE_RIGHT;
- }
- Load(node->left());
- Load(node->right());
- GenericBinaryOperation(node->op(), overwrite_mode, inline_smi);
- }
- }
- ASSERT(!has_valid_frame() ||
- (has_cc() && frame_->height() == original_height) ||
- (!has_cc() && frame_->height() == original_height + 1));
-}
-
-
-void CodeGenerator::VisitThisFunction(ThisFunction* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- frame_->EmitPush(MemOperand(frame_->Function()));
- ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ CompareOperation");
-
- VirtualFrame::RegisterAllocationScope nonspilled_scope(this);
-
- // Get the expressions from the node.
- Expression* left = node->left();
- Expression* right = node->right();
- Token::Value op = node->op();
-
- // To make typeof testing for natives implemented in JavaScript really
- // efficient, we generate special code for expressions of the form:
- // 'typeof <expression> == <string>'.
- UnaryOperation* operation = left->AsUnaryOperation();
- if ((op == Token::EQ || op == Token::EQ_STRICT) &&
- (operation != NULL && operation->op() == Token::TYPEOF) &&
- (right->AsLiteral() != NULL &&
- right->AsLiteral()->handle()->IsString())) {
- Handle<String> check(String::cast(*right->AsLiteral()->handle()));
-
- // Load the operand, move it to a register.
- LoadTypeofExpression(operation->expression());
- Register tos = frame_->PopToRegister();
-
- Register scratch = VirtualFrame::scratch0();
-
- if (check->Equals(HEAP->number_symbol())) {
- __ tst(tos, Operand(kSmiTagMask));
- true_target()->Branch(eq);
- __ ldr(tos, FieldMemOperand(tos, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
- __ cmp(tos, ip);
- cc_reg_ = eq;
-
- } else if (check->Equals(HEAP->string_symbol())) {
- __ tst(tos, Operand(kSmiTagMask));
- false_target()->Branch(eq);
-
- __ ldr(tos, FieldMemOperand(tos, HeapObject::kMapOffset));
-
- // It can be an undetectable string object.
- __ ldrb(scratch, FieldMemOperand(tos, Map::kBitFieldOffset));
- __ and_(scratch, scratch, Operand(1 << Map::kIsUndetectable));
- __ cmp(scratch, Operand(1 << Map::kIsUndetectable));
- false_target()->Branch(eq);
-
- __ ldrb(scratch, FieldMemOperand(tos, Map::kInstanceTypeOffset));
- __ cmp(scratch, Operand(FIRST_NONSTRING_TYPE));
- cc_reg_ = lt;
-
- } else if (check->Equals(HEAP->boolean_symbol())) {
- __ LoadRoot(ip, Heap::kTrueValueRootIndex);
- __ cmp(tos, ip);
- true_target()->Branch(eq);
- __ LoadRoot(ip, Heap::kFalseValueRootIndex);
- __ cmp(tos, ip);
- cc_reg_ = eq;
-
- } else if (check->Equals(HEAP->undefined_symbol())) {
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(tos, ip);
- true_target()->Branch(eq);
-
- __ tst(tos, Operand(kSmiTagMask));
- false_target()->Branch(eq);
-
- // It can be an undetectable object.
- __ ldr(tos, FieldMemOperand(tos, HeapObject::kMapOffset));
- __ ldrb(scratch, FieldMemOperand(tos, Map::kBitFieldOffset));
- __ and_(scratch, scratch, Operand(1 << Map::kIsUndetectable));
- __ cmp(scratch, Operand(1 << Map::kIsUndetectable));
-
- cc_reg_ = eq;
-
- } else if (check->Equals(HEAP->function_symbol())) {
- __ tst(tos, Operand(kSmiTagMask));
- false_target()->Branch(eq);
- Register map_reg = scratch;
- __ CompareObjectType(tos, map_reg, tos, JS_FUNCTION_TYPE);
- true_target()->Branch(eq);
- // Regular expressions are callable so typeof == 'function'.
- __ CompareInstanceType(map_reg, tos, JS_REGEXP_TYPE);
- cc_reg_ = eq;
-
- } else if (check->Equals(HEAP->object_symbol())) {
- __ tst(tos, Operand(kSmiTagMask));
- false_target()->Branch(eq);
-
- __ LoadRoot(ip, Heap::kNullValueRootIndex);
- __ cmp(tos, ip);
- true_target()->Branch(eq);
-
- Register map_reg = scratch;
- __ CompareObjectType(tos, map_reg, tos, JS_REGEXP_TYPE);
- false_target()->Branch(eq);
-
- // It can be an undetectable object.
- __ ldrb(tos, FieldMemOperand(map_reg, Map::kBitFieldOffset));
- __ and_(tos, tos, Operand(1 << Map::kIsUndetectable));
- __ cmp(tos, Operand(1 << Map::kIsUndetectable));
- false_target()->Branch(eq);
-
- __ ldrb(tos, FieldMemOperand(map_reg, Map::kInstanceTypeOffset));
- __ cmp(tos, Operand(FIRST_JS_OBJECT_TYPE));
- false_target()->Branch(lt);
- __ cmp(tos, Operand(LAST_JS_OBJECT_TYPE));
- cc_reg_ = le;
-
- } else {
- // Uncommon case: typeof testing against a string literal that is
- // never returned from the typeof operator.
- false_target()->Jump();
- }
- ASSERT(!has_valid_frame() ||
- (has_cc() && frame_->height() == original_height));
- return;
- }
-
- switch (op) {
- case Token::EQ:
- Comparison(eq, left, right, false);
- break;
-
- case Token::LT:
- Comparison(lt, left, right);
- break;
-
- case Token::GT:
- Comparison(gt, left, right);
- break;
-
- case Token::LTE:
- Comparison(le, left, right);
- break;
-
- case Token::GTE:
- Comparison(ge, left, right);
- break;
-
- case Token::EQ_STRICT:
- Comparison(eq, left, right, true);
- break;
-
- case Token::IN: {
- Load(left);
- Load(right);
- frame_->InvokeBuiltin(Builtins::IN, CALL_JS, 2);
- frame_->EmitPush(r0);
- break;
- }
-
- case Token::INSTANCEOF: {
- Load(left);
- Load(right);
- InstanceofStub stub(InstanceofStub::kNoFlags);
- frame_->CallStub(&stub, 2);
- // At this point if instanceof succeeded then r0 == 0.
- __ tst(r0, Operand(r0));
- cc_reg_ = eq;
- break;
- }
-
- default:
- UNREACHABLE();
- }
- ASSERT((has_cc() && frame_->height() == original_height) ||
- (!has_cc() && frame_->height() == original_height + 1));
-}
-
-
-void CodeGenerator::VisitCompareToNull(CompareToNull* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ CompareToNull");
-
- Load(node->expression());
- Register tos = frame_->PopToRegister();
- __ LoadRoot(ip, Heap::kNullValueRootIndex);
- __ cmp(tos, ip);
-
- // The 'null' value is only equal to 'undefined' if using non-strict
- // comparisons.
- if (!node->is_strict()) {
- true_target()->Branch(eq);
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(tos, Operand(ip));
- true_target()->Branch(eq);
-
- __ tst(tos, Operand(kSmiTagMask));
- false_target()->Branch(eq);
-
- // It can be an undetectable object.
- __ ldr(tos, FieldMemOperand(tos, HeapObject::kMapOffset));
- __ ldrb(tos, FieldMemOperand(tos, Map::kBitFieldOffset));
- __ and_(tos, tos, Operand(1 << Map::kIsUndetectable));
- __ cmp(tos, Operand(1 << Map::kIsUndetectable));
- }
-
- cc_reg_ = eq;
- ASSERT(has_cc() && frame_->height() == original_height);
-}
-
-
-class DeferredReferenceGetNamedValue: public DeferredCode {
- public:
- explicit DeferredReferenceGetNamedValue(Register receiver,
- Handle<String> name,
- bool is_contextual)
- : receiver_(receiver),
- name_(name),
- is_contextual_(is_contextual),
- is_dont_delete_(false) {
- set_comment(is_contextual
- ? "[ DeferredReferenceGetNamedValue (contextual)"
- : "[ DeferredReferenceGetNamedValue");
- }
-
- virtual void Generate();
-
- void set_is_dont_delete(bool value) {
- ASSERT(is_contextual_);
- is_dont_delete_ = value;
- }
-
- private:
- Register receiver_;
- Handle<String> name_;
- bool is_contextual_;
- bool is_dont_delete_;
-};
-
-
-// Convention for this is that on entry the receiver is in a register that
-// is not used by the stack. On exit the answer is found in that same
-// register and the stack has the same height.
-void DeferredReferenceGetNamedValue::Generate() {
-#ifdef DEBUG
- int expected_height = frame_state()->frame()->height();
-#endif
- VirtualFrame copied_frame(*frame_state()->frame());
- copied_frame.SpillAll();
-
- Register scratch1 = VirtualFrame::scratch0();
- Register scratch2 = VirtualFrame::scratch1();
- ASSERT(!receiver_.is(scratch1) && !receiver_.is(scratch2));
- __ DecrementCounter(masm_->isolate()->counters()->named_load_inline(),
- 1, scratch1, scratch2);
- __ IncrementCounter(masm_->isolate()->counters()->named_load_inline_miss(),
- 1, scratch1, scratch2);
-
- // Ensure receiver in r0 and name in r2 to match load ic calling convention.
- __ Move(r0, receiver_);
- __ mov(r2, Operand(name_));
-
- // The rest of the instructions in the deferred code must be together.
- { Assembler::BlockConstPoolScope block_const_pool(masm_);
- Handle<Code> ic(Isolate::Current()->builtins()->builtin(
- Builtins::kLoadIC_Initialize));
- RelocInfo::Mode mode = is_contextual_
- ? RelocInfo::CODE_TARGET_CONTEXT
- : RelocInfo::CODE_TARGET;
- __ Call(ic, mode);
- // We must mark the code just after the call with the correct marker.
- MacroAssembler::NopMarkerTypes code_marker;
- if (is_contextual_) {
- code_marker = is_dont_delete_
- ? MacroAssembler::PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE
- : MacroAssembler::PROPERTY_ACCESS_INLINED_CONTEXT;
- } else {
- code_marker = MacroAssembler::PROPERTY_ACCESS_INLINED;
- }
- __ MarkCode(code_marker);
-
- // At this point the answer is in r0. We move it to the expected register
- // if necessary.
- __ Move(receiver_, r0);
-
- // Now go back to the frame that we entered with. This will not overwrite
- // the receiver register since that register was not in use when we came
- // in. The instructions emitted by this merge are skipped over by the
- // inline load patching mechanism when looking for the branch instruction
- // that tells it where the code to patch is.
- copied_frame.MergeTo(frame_state()->frame());
-
- // Block the constant pool for one more instruction after leaving this
- // constant pool block scope to include the branch instruction ending the
- // deferred code.
- __ BlockConstPoolFor(1);
- }
- ASSERT_EQ(expected_height, frame_state()->frame()->height());
-}
-
-
-class DeferredReferenceGetKeyedValue: public DeferredCode {
- public:
- DeferredReferenceGetKeyedValue(Register key, Register receiver)
- : key_(key), receiver_(receiver) {
- set_comment("[ DeferredReferenceGetKeyedValue");
- }
-
- virtual void Generate();
-
- private:
- Register key_;
- Register receiver_;
-};
-
-
-// Takes key and register in r0 and r1 or vice versa. Returns result
-// in r0.
-void DeferredReferenceGetKeyedValue::Generate() {
- ASSERT((key_.is(r0) && receiver_.is(r1)) ||
- (key_.is(r1) && receiver_.is(r0)));
-
- VirtualFrame copied_frame(*frame_state()->frame());
- copied_frame.SpillAll();
-
- Register scratch1 = VirtualFrame::scratch0();
- Register scratch2 = VirtualFrame::scratch1();
- __ DecrementCounter(masm_->isolate()->counters()->keyed_load_inline(),
- 1, scratch1, scratch2);
- __ IncrementCounter(masm_->isolate()->counters()->keyed_load_inline_miss(),
- 1, scratch1, scratch2);
-
- // Ensure key in r0 and receiver in r1 to match keyed load ic calling
- // convention.
- if (key_.is(r1)) {
- __ Swap(r0, r1, ip);
- }
-
- // The rest of the instructions in the deferred code must be together.
- { Assembler::BlockConstPoolScope block_const_pool(masm_);
- // Call keyed load IC. It has the arguments key and receiver in r0 and r1.
- Handle<Code> ic(Isolate::Current()->builtins()->builtin(
- Builtins::kKeyedLoadIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
- // The call must be followed by a nop instruction to indicate that the
- // keyed load has been inlined.
- __ MarkCode(MacroAssembler::PROPERTY_ACCESS_INLINED);
-
- // Now go back to the frame that we entered with. This will not overwrite
- // the receiver or key registers since they were not in use when we came
- // in. The instructions emitted by this merge are skipped over by the
- // inline load patching mechanism when looking for the branch instruction
- // that tells it where the code to patch is.
- copied_frame.MergeTo(frame_state()->frame());
-
- // Block the constant pool for one more instruction after leaving this
- // constant pool block scope to include the branch instruction ending the
- // deferred code.
- __ BlockConstPoolFor(1);
- }
-}
-
-
-class DeferredReferenceSetKeyedValue: public DeferredCode {
- public:
- DeferredReferenceSetKeyedValue(Register value,
- Register key,
- Register receiver,
- StrictModeFlag strict_mode)
- : value_(value),
- key_(key),
- receiver_(receiver),
- strict_mode_(strict_mode) {
- set_comment("[ DeferredReferenceSetKeyedValue");
- }
-
- virtual void Generate();
-
- private:
- Register value_;
- Register key_;
- Register receiver_;
- StrictModeFlag strict_mode_;
-};
-
-
-void DeferredReferenceSetKeyedValue::Generate() {
- Register scratch1 = VirtualFrame::scratch0();
- Register scratch2 = VirtualFrame::scratch1();
- __ DecrementCounter(masm_->isolate()->counters()->keyed_store_inline(),
- 1, scratch1, scratch2);
- __ IncrementCounter(masm_->isolate()->counters()->keyed_store_inline_miss(),
- 1, scratch1, scratch2);
-
- // Ensure value in r0, key in r1 and receiver in r2 to match keyed store ic
- // calling convention.
- if (value_.is(r1)) {
- __ Swap(r0, r1, ip);
- }
- ASSERT(receiver_.is(r2));
-
- // The rest of the instructions in the deferred code must be together.
- { Assembler::BlockConstPoolScope block_const_pool(masm_);
- // Call keyed store IC. It has the arguments value, key and receiver in r0,
- // r1 and r2.
- Handle<Code> ic(Isolate::Current()->builtins()->builtin(
- (strict_mode_ == kStrictMode)
- ? Builtins::kKeyedStoreIC_Initialize_Strict
- : Builtins::kKeyedStoreIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
- // The call must be followed by a nop instruction to indicate that the
- // keyed store has been inlined.
- __ MarkCode(MacroAssembler::PROPERTY_ACCESS_INLINED);
-
- // Block the constant pool for one more instruction after leaving this
- // constant pool block scope to include the branch instruction ending the
- // deferred code.
- __ BlockConstPoolFor(1);
- }
-}
-
-
-class DeferredReferenceSetNamedValue: public DeferredCode {
- public:
- DeferredReferenceSetNamedValue(Register value,
- Register receiver,
- Handle<String> name,
- StrictModeFlag strict_mode)
- : value_(value),
- receiver_(receiver),
- name_(name),
- strict_mode_(strict_mode) {
- set_comment("[ DeferredReferenceSetNamedValue");
- }
-
- virtual void Generate();
-
- private:
- Register value_;
- Register receiver_;
- Handle<String> name_;
- StrictModeFlag strict_mode_;
-};
-
-
-// Takes value in r0, receiver in r1 and returns the result (the
-// value) in r0.
-void DeferredReferenceSetNamedValue::Generate() {
- // Record the entry frame and spill.
- VirtualFrame copied_frame(*frame_state()->frame());
- copied_frame.SpillAll();
-
- // Ensure value in r0, receiver in r1 to match store ic calling
- // convention.
- ASSERT(value_.is(r0) && receiver_.is(r1));
- __ mov(r2, Operand(name_));
-
- // The rest of the instructions in the deferred code must be together.
- { Assembler::BlockConstPoolScope block_const_pool(masm_);
- // Call keyed store IC. It has the arguments value, key and receiver in r0,
- // r1 and r2.
- Handle<Code> ic(Isolate::Current()->builtins()->builtin(
- (strict_mode_ == kStrictMode) ? Builtins::kStoreIC_Initialize_Strict
- : Builtins::kStoreIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
- // The call must be followed by a nop instruction to indicate that the
- // named store has been inlined.
- __ MarkCode(MacroAssembler::PROPERTY_ACCESS_INLINED);
-
- // Go back to the frame we entered with. The instructions
- // generated by this merge are skipped over by the inline store
- // patching mechanism when looking for the branch instruction that
- // tells it where the code to patch is.
- copied_frame.MergeTo(frame_state()->frame());
-
- // Block the constant pool for one more instruction after leaving this
- // constant pool block scope to include the branch instruction ending the
- // deferred code.
- __ BlockConstPoolFor(1);
- }
-}
-
-
-// Consumes the top of stack (the receiver) and pushes the result instead.
-void CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
- bool contextual_load_in_builtin =
- is_contextual &&
- (ISOLATE->bootstrapper()->IsActive() ||
- (!info_->closure().is_null() && info_->closure()->IsBuiltin()));
-
- if (scope()->is_global_scope() ||
- loop_nesting() == 0 ||
- contextual_load_in_builtin) {
- Comment cmnt(masm(), "[ Load from named Property");
- // Setup the name register and call load IC.
- frame_->CallLoadIC(name,
- is_contextual
- ? RelocInfo::CODE_TARGET_CONTEXT
- : RelocInfo::CODE_TARGET);
- frame_->EmitPush(r0); // Push answer.
- } else {
- // Inline the in-object property case.
- Comment cmnt(masm(), is_contextual
- ? "[ Inlined contextual property load"
- : "[ Inlined named property load");
-
- // Counter will be decremented in the deferred code. Placed here to avoid
- // having it in the instruction stream below where patching will occur.
- if (is_contextual) {
- __ IncrementCounter(
- masm_->isolate()->counters()->named_load_global_inline(),
- 1, frame_->scratch0(), frame_->scratch1());
- } else {
- __ IncrementCounter(masm_->isolate()->counters()->named_load_inline(),
- 1, frame_->scratch0(), frame_->scratch1());
- }
-
- // The following instructions are the inlined load of an in-object property.
- // Parts of this code is patched, so the exact instructions generated needs
- // to be fixed. Therefore the instruction pool is blocked when generating
- // this code
-
- // Load the receiver from the stack.
- Register receiver = frame_->PopToRegister();
-
- DeferredReferenceGetNamedValue* deferred =
- new DeferredReferenceGetNamedValue(receiver, name, is_contextual);
-
- bool is_dont_delete = false;
- if (is_contextual) {
- if (!info_->closure().is_null()) {
- // When doing lazy compilation we can check if the global cell
- // already exists and use its "don't delete" status as a hint.
- AssertNoAllocation no_gc;
- v8::internal::GlobalObject* global_object =
- info_->closure()->context()->global();
- LookupResult lookup;
- global_object->LocalLookupRealNamedProperty(*name, &lookup);
- if (lookup.IsProperty() && lookup.type() == NORMAL) {
- ASSERT(lookup.holder() == global_object);
- ASSERT(global_object->property_dictionary()->ValueAt(
- lookup.GetDictionaryEntry())->IsJSGlobalPropertyCell());
- is_dont_delete = lookup.IsDontDelete();
- }
- }
- if (is_dont_delete) {
- __ IncrementCounter(
- masm_->isolate()->counters()->dont_delete_hint_hit(),
- 1, frame_->scratch0(), frame_->scratch1());
- }
- }
-
- { Assembler::BlockConstPoolScope block_const_pool(masm_);
- if (!is_contextual) {
- // Check that the receiver is a heap object.
- __ tst(receiver, Operand(kSmiTagMask));
- deferred->Branch(eq);
- }
-
- // Check for the_hole_value if necessary.
- // Below we rely on the number of instructions generated, and we can't
- // cope with the Check macro which does not generate a fixed number of
- // instructions.
- Label skip, check_the_hole, cont;
- if (FLAG_debug_code && is_contextual && is_dont_delete) {
- __ b(&skip);
- __ bind(&check_the_hole);
- __ Check(ne, "DontDelete cells can't contain the hole");
- __ b(&cont);
- __ bind(&skip);
- }
-
-#ifdef DEBUG
- int InlinedNamedLoadInstructions = 5;
- Label check_inlined_codesize;
- masm_->bind(&check_inlined_codesize);
-#endif
-
- Register scratch = VirtualFrame::scratch0();
- Register scratch2 = VirtualFrame::scratch1();
-
- // Check the map. The null map used below is patched by the inline cache
- // code. Therefore we can't use a LoadRoot call.
- __ ldr(scratch, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ mov(scratch2, Operand(FACTORY->null_value()));
- __ cmp(scratch, scratch2);
- deferred->Branch(ne);
-
- if (is_contextual) {
-#ifdef DEBUG
- InlinedNamedLoadInstructions += 1;
-#endif
- // Load the (initially invalid) cell and get its value.
- masm()->mov(receiver, Operand(FACTORY->null_value()));
- __ ldr(receiver,
- FieldMemOperand(receiver, JSGlobalPropertyCell::kValueOffset));
-
- deferred->set_is_dont_delete(is_dont_delete);
-
- if (!is_dont_delete) {
-#ifdef DEBUG
- InlinedNamedLoadInstructions += 3;
-#endif
- __ cmp(receiver, Operand(FACTORY->the_hole_value()));
- deferred->Branch(eq);
- } else if (FLAG_debug_code) {
-#ifdef DEBUG
- InlinedNamedLoadInstructions += 3;
-#endif
- __ cmp(receiver, Operand(FACTORY->the_hole_value()));
- __ b(&check_the_hole, eq);
- __ bind(&cont);
- }
- } else {
- // Initially use an invalid index. The index will be patched by the
- // inline cache code.
- __ ldr(receiver, MemOperand(receiver, 0));
- }
-
- // Make sure that the expected number of instructions are generated.
- // If the code before is updated, the offsets in ic-arm.cc
- // LoadIC::PatchInlinedContextualLoad and PatchInlinedLoad need
- // to be updated.
- ASSERT_EQ(InlinedNamedLoadInstructions,
- masm_->InstructionsGeneratedSince(&check_inlined_codesize));
- }
-
- deferred->BindExit();
- // At this point the receiver register has the result, either from the
- // deferred code or from the inlined code.
- frame_->EmitPush(receiver);
- }
-}
-
-
-void CodeGenerator::EmitNamedStore(Handle<String> name, bool is_contextual) {
-#ifdef DEBUG
- int expected_height = frame()->height() - (is_contextual ? 1 : 2);
-#endif
-
- Result result;
- if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) {
- frame()->CallStoreIC(name, is_contextual, strict_mode_flag());
- } else {
- // Inline the in-object property case.
- JumpTarget slow, done;
-
- // Get the value and receiver from the stack.
- frame()->PopToR0();
- Register value = r0;
- frame()->PopToR1();
- Register receiver = r1;
-
- DeferredReferenceSetNamedValue* deferred =
- new DeferredReferenceSetNamedValue(
- value, receiver, name, strict_mode_flag());
-
- // Check that the receiver is a heap object.
- __ tst(receiver, Operand(kSmiTagMask));
- deferred->Branch(eq);
-
- // The following instructions are the part of the inlined
- // in-object property store code which can be patched. Therefore
- // the exact number of instructions generated must be fixed, so
- // the constant pool is blocked while generating this code.
- { Assembler::BlockConstPoolScope block_const_pool(masm_);
- Register scratch0 = VirtualFrame::scratch0();
- Register scratch1 = VirtualFrame::scratch1();
-
- // Check the map. Initially use an invalid map to force a
- // failure. The map check will be patched in the runtime system.
- __ ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
-
-#ifdef DEBUG
- Label check_inlined_codesize;
- masm_->bind(&check_inlined_codesize);
-#endif
- __ mov(scratch0, Operand(FACTORY->null_value()));
- __ cmp(scratch0, scratch1);
- deferred->Branch(ne);
-
- int offset = 0;
- __ str(value, MemOperand(receiver, offset));
-
- // Update the write barrier and record its size. We do not use
- // the RecordWrite macro here because we want the offset
- // addition instruction first to make it easy to patch.
- Label record_write_start, record_write_done;
- __ bind(&record_write_start);
- // Add offset into the object.
- __ add(scratch0, receiver, Operand(offset));
- // Test that the object is not in the new space. We cannot set
- // region marks for new space pages.
- __ InNewSpace(receiver, scratch1, eq, &record_write_done);
- // Record the actual write.
- __ RecordWriteHelper(receiver, scratch0, scratch1);
- __ bind(&record_write_done);
- // Clobber all input registers when running with the debug-code flag
- // turned on to provoke errors.
- if (FLAG_debug_code) {
- __ mov(receiver, Operand(BitCast<int32_t>(kZapValue)));
- __ mov(scratch0, Operand(BitCast<int32_t>(kZapValue)));
- __ mov(scratch1, Operand(BitCast<int32_t>(kZapValue)));
- }
- // Check that this is the first inlined write barrier or that
- // this inlined write barrier has the same size as all the other
- // inlined write barriers.
- ASSERT((Isolate::Current()->inlined_write_barrier_size() == -1) ||
- (Isolate::Current()->inlined_write_barrier_size() ==
- masm()->InstructionsGeneratedSince(&record_write_start)));
- Isolate::Current()->set_inlined_write_barrier_size(
- masm()->InstructionsGeneratedSince(&record_write_start));
-
- // Make sure that the expected number of instructions are generated.
- ASSERT_EQ(GetInlinedNamedStoreInstructionsAfterPatch(),
- masm()->InstructionsGeneratedSince(&check_inlined_codesize));
- }
- deferred->BindExit();
- }
- ASSERT_EQ(expected_height, frame()->height());
-}
-
-
-void CodeGenerator::EmitKeyedLoad() {
- if (loop_nesting() == 0) {
- Comment cmnt(masm_, "[ Load from keyed property");
- frame_->CallKeyedLoadIC();
- } else {
- // Inline the keyed load.
- Comment cmnt(masm_, "[ Inlined load from keyed property");
-
- // Counter will be decremented in the deferred code. Placed here to avoid
- // having it in the instruction stream below where patching will occur.
- __ IncrementCounter(masm_->isolate()->counters()->keyed_load_inline(),
- 1, frame_->scratch0(), frame_->scratch1());
-
- // Load the key and receiver from the stack.
- bool key_is_known_smi = frame_->KnownSmiAt(0);
- Register key = frame_->PopToRegister();
- Register receiver = frame_->PopToRegister(key);
-
- // The deferred code expects key and receiver in registers.
- DeferredReferenceGetKeyedValue* deferred =
- new DeferredReferenceGetKeyedValue(key, receiver);
-
- // Check that the receiver is a heap object.
- __ tst(receiver, Operand(kSmiTagMask));
- deferred->Branch(eq);
-
- // The following instructions are the part of the inlined load keyed
- // property code which can be patched. Therefore the exact number of
- // instructions generated need to be fixed, so the constant pool is blocked
- // while generating this code.
- { Assembler::BlockConstPoolScope block_const_pool(masm_);
- Register scratch1 = VirtualFrame::scratch0();
- Register scratch2 = VirtualFrame::scratch1();
- // Check the map. The null map used below is patched by the inline cache
- // code.
- __ ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
-
- // Check that the key is a smi.
- if (!key_is_known_smi) {
- __ tst(key, Operand(kSmiTagMask));
- deferred->Branch(ne);
- }
-
-#ifdef DEBUG
- Label check_inlined_codesize;
- masm_->bind(&check_inlined_codesize);
-#endif
- __ mov(scratch2, Operand(FACTORY->null_value()));
- __ cmp(scratch1, scratch2);
- deferred->Branch(ne);
-
- // Get the elements array from the receiver.
- __ ldr(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ AssertFastElements(scratch1);
-
- // Check that key is within bounds. Use unsigned comparison to handle
- // negative keys.
- __ ldr(scratch2, FieldMemOperand(scratch1, FixedArray::kLengthOffset));
- __ cmp(scratch2, key);
- deferred->Branch(ls); // Unsigned less equal.
-
- // Load and check that the result is not the hole (key is a smi).
- __ LoadRoot(scratch2, Heap::kTheHoleValueRootIndex);
- __ add(scratch1,
- scratch1,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ ldr(scratch1,
- MemOperand(scratch1, key, LSL,
- kPointerSizeLog2 - (kSmiTagSize + kSmiShiftSize)));
- __ cmp(scratch1, scratch2);
- deferred->Branch(eq);
-
- __ mov(r0, scratch1);
- // Make sure that the expected number of instructions are generated.
- ASSERT_EQ(GetInlinedKeyedLoadInstructionsAfterPatch(),
- masm_->InstructionsGeneratedSince(&check_inlined_codesize));
- }
-
- deferred->BindExit();
- }
-}
-
-
-void CodeGenerator::EmitKeyedStore(StaticType* key_type,
- WriteBarrierCharacter wb_info) {
- // Generate inlined version of the keyed store if the code is in a loop
- // and the key is likely to be a smi.
- if (loop_nesting() > 0 && key_type->IsLikelySmi()) {
- // Inline the keyed store.
- Comment cmnt(masm_, "[ Inlined store to keyed property");
-
- Register scratch1 = VirtualFrame::scratch0();
- Register scratch2 = VirtualFrame::scratch1();
- Register scratch3 = r3;
-
- // Counter will be decremented in the deferred code. Placed here to avoid
- // having it in the instruction stream below where patching will occur.
- __ IncrementCounter(masm_->isolate()->counters()->keyed_store_inline(),
- 1, scratch1, scratch2);
-
-
- // Load the value, key and receiver from the stack.
- bool value_is_harmless = frame_->KnownSmiAt(0);
- if (wb_info == NEVER_NEWSPACE) value_is_harmless = true;
- bool key_is_smi = frame_->KnownSmiAt(1);
- Register value = frame_->PopToRegister();
- Register key = frame_->PopToRegister(value);
- VirtualFrame::SpilledScope spilled(frame_);
- Register receiver = r2;
- frame_->EmitPop(receiver);
-
-#ifdef DEBUG
- bool we_remembered_the_write_barrier = value_is_harmless;
-#endif
-
- // The deferred code expects value, key and receiver in registers.
- DeferredReferenceSetKeyedValue* deferred =
- new DeferredReferenceSetKeyedValue(
- value, key, receiver, strict_mode_flag());
-
- // Check that the value is a smi. As this inlined code does not set the
- // write barrier it is only possible to store smi values.
- if (!value_is_harmless) {
- // If the value is not likely to be a Smi then let's test the fixed array
- // for new space instead. See below.
- if (wb_info == LIKELY_SMI) {
- __ tst(value, Operand(kSmiTagMask));
- deferred->Branch(ne);
-#ifdef DEBUG
- we_remembered_the_write_barrier = true;
-#endif
- }
- }
-
- if (!key_is_smi) {
- // Check that the key is a smi.
- __ tst(key, Operand(kSmiTagMask));
- deferred->Branch(ne);
- }
-
- // Check that the receiver is a heap object.
- __ tst(receiver, Operand(kSmiTagMask));
- deferred->Branch(eq);
-
- // Check that the receiver is a JSArray.
- __ CompareObjectType(receiver, scratch1, scratch1, JS_ARRAY_TYPE);
- deferred->Branch(ne);
-
- // Get the elements array from the receiver.
- __ ldr(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset));
- if (!value_is_harmless && wb_info != LIKELY_SMI) {
- Label ok;
- __ and_(scratch2,
- scratch1,
- Operand(ExternalReference::new_space_mask(isolate())));
- __ cmp(scratch2, Operand(ExternalReference::new_space_start(isolate())));
- __ tst(value, Operand(kSmiTagMask), ne);
- deferred->Branch(ne);
-#ifdef DEBUG
- we_remembered_the_write_barrier = true;
-#endif
- }
- // Check that the elements array is not a dictionary.
- __ ldr(scratch2, FieldMemOperand(scratch1, JSObject::kMapOffset));
-
- // The following instructions are the part of the inlined store keyed
- // property code which can be patched. Therefore the exact number of
- // instructions generated need to be fixed, so the constant pool is blocked
- // while generating this code.
- { Assembler::BlockConstPoolScope block_const_pool(masm_);
-#ifdef DEBUG
- Label check_inlined_codesize;
- masm_->bind(&check_inlined_codesize);
-#endif
-
- // Read the fixed array map from the constant pool (not from the root
- // array) so that the value can be patched. When debugging, we patch this
- // comparison to always fail so that we will hit the IC call in the
- // deferred code which will allow the debugger to break for fast case
- // stores.
- __ mov(scratch3, Operand(FACTORY->fixed_array_map()));
- __ cmp(scratch2, scratch3);
- deferred->Branch(ne);
-
- // Check that the key is within bounds. Both the key and the length of
- // the JSArray are smis (because the fixed array check above ensures the
- // elements are in fast case). Use unsigned comparison to handle negative
- // keys.
- __ ldr(scratch3, FieldMemOperand(receiver, JSArray::kLengthOffset));
- __ cmp(scratch3, key);
- deferred->Branch(ls); // Unsigned less equal.
-
- // Store the value.
- __ add(scratch1, scratch1,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ str(value,
- MemOperand(scratch1, key, LSL,
- kPointerSizeLog2 - (kSmiTagSize + kSmiShiftSize)));
-
- // Make sure that the expected number of instructions are generated.
- ASSERT_EQ(kInlinedKeyedStoreInstructionsAfterPatch,
- masm_->InstructionsGeneratedSince(&check_inlined_codesize));
- }
-
- ASSERT(we_remembered_the_write_barrier);
-
- deferred->BindExit();
- } else {
- frame()->CallKeyedStoreIC(strict_mode_flag());
- }
-}
-
-
-#ifdef DEBUG
-bool CodeGenerator::HasValidEntryRegisters() { return true; }
-#endif
-
-
-#undef __
-#define __ ACCESS_MASM(masm)
-
-Handle<String> Reference::GetName() {
- ASSERT(type_ == NAMED);
- Property* property = expression_->AsProperty();
- if (property == NULL) {
- // Global variable reference treated as a named property reference.
- VariableProxy* proxy = expression_->AsVariableProxy();
- ASSERT(proxy->AsVariable() != NULL);
- ASSERT(proxy->AsVariable()->is_global());
- return proxy->name();
- } else {
- Literal* raw_name = property->key()->AsLiteral();
- ASSERT(raw_name != NULL);
- return Handle<String>(String::cast(*raw_name->handle()));
- }
-}
-
-
-void Reference::DupIfPersist() {
- if (persist_after_get_) {
- switch (type_) {
- case KEYED:
- cgen_->frame()->Dup2();
- break;
- case NAMED:
- cgen_->frame()->Dup();
- // Fall through.
- case UNLOADED:
- case ILLEGAL:
- case SLOT:
- // Do nothing.
- ;
- }
- } else {
- set_unloaded();
- }
-}
-
-
-void Reference::GetValue() {
- ASSERT(cgen_->HasValidEntryRegisters());
- ASSERT(!is_illegal());
- ASSERT(!cgen_->has_cc());
- MacroAssembler* masm = cgen_->masm();
- Property* property = expression_->AsProperty();
- if (property != NULL) {
- cgen_->CodeForSourcePosition(property->position());
- }
-
- switch (type_) {
- case SLOT: {
- Comment cmnt(masm, "[ Load from Slot");
- Slot* slot = expression_->AsVariableProxy()->AsVariable()->AsSlot();
- ASSERT(slot != NULL);
- DupIfPersist();
- cgen_->LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
- break;
- }
-
- case NAMED: {
- Variable* var = expression_->AsVariableProxy()->AsVariable();
- bool is_global = var != NULL;
- ASSERT(!is_global || var->is_global());
- Handle<String> name = GetName();
- DupIfPersist();
- cgen_->EmitNamedLoad(name, is_global);
- break;
- }
-
- case KEYED: {
- ASSERT(property != NULL);
- DupIfPersist();
- cgen_->EmitKeyedLoad();
- cgen_->frame()->EmitPush(r0);
- break;
- }
-
- default:
- UNREACHABLE();
- }
-}
-
-
-void Reference::SetValue(InitState init_state, WriteBarrierCharacter wb_info) {
- ASSERT(!is_illegal());
- ASSERT(!cgen_->has_cc());
- MacroAssembler* masm = cgen_->masm();
- VirtualFrame* frame = cgen_->frame();
- Property* property = expression_->AsProperty();
- if (property != NULL) {
- cgen_->CodeForSourcePosition(property->position());
- }
-
- switch (type_) {
- case SLOT: {
- Comment cmnt(masm, "[ Store to Slot");
- Slot* slot = expression_->AsVariableProxy()->AsVariable()->AsSlot();
- cgen_->StoreToSlot(slot, init_state);
- set_unloaded();
- break;
- }
-
- case NAMED: {
- Comment cmnt(masm, "[ Store to named Property");
- cgen_->EmitNamedStore(GetName(), false);
- frame->EmitPush(r0);
- set_unloaded();
- break;
- }
-
- case KEYED: {
- Comment cmnt(masm, "[ Store to keyed Property");
- Property* property = expression_->AsProperty();
- ASSERT(property != NULL);
- cgen_->CodeForSourcePosition(property->position());
- cgen_->EmitKeyedStore(property->key()->type(), wb_info);
- frame->EmitPush(r0);
- set_unloaded();
- break;
- }
-
- default:
- UNREACHABLE();
- }
-}
-
-
-const char* GenericBinaryOpStub::GetName() {
- if (name_ != NULL) return name_;
- const int len = 100;
- name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(len);
- if (name_ == NULL) return "OOM";
- const char* op_name = Token::Name(op_);
- const char* overwrite_name;
- switch (mode_) {
- case NO_OVERWRITE: overwrite_name = "Alloc"; break;
- case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
- case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
- default: overwrite_name = "UnknownOverwrite"; break;
- }
-
- OS::SNPrintF(Vector<char>(name_, len),
- "GenericBinaryOpStub_%s_%s%s_%s",
- op_name,
- overwrite_name,
- specialized_on_rhs_ ? "_ConstantRhs" : "",
- BinaryOpIC::GetName(runtime_operands_type_));
- return name_;
-}
-
-#undef __
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_ARM
diff --git a/src/3rdparty/v8/src/arm/codegen-arm.h b/src/3rdparty/v8/src/arm/codegen-arm.h
deleted file mode 100644
index 9b1f103..0000000
--- a/src/3rdparty/v8/src/arm/codegen-arm.h
+++ /dev/null
@@ -1,595 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_ARM_CODEGEN_ARM_H_
-#define V8_ARM_CODEGEN_ARM_H_
-
-#include "ast.h"
-#include "code-stubs-arm.h"
-#include "ic-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// Forward declarations
-class CompilationInfo;
-class DeferredCode;
-class JumpTarget;
-class RegisterAllocator;
-class RegisterFile;
-
-enum InitState { CONST_INIT, NOT_CONST_INIT };
-enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
-enum GenerateInlineSmi { DONT_GENERATE_INLINE_SMI, GENERATE_INLINE_SMI };
-enum WriteBarrierCharacter { UNLIKELY_SMI, LIKELY_SMI, NEVER_NEWSPACE };
-
-
-// -------------------------------------------------------------------------
-// Reference support
-
-// A reference is a C++ stack-allocated object that puts a
-// reference on the virtual frame. The reference may be consumed
-// by GetValue, TakeValue, SetValue, and Codegen::UnloadReference.
-// When the lifetime (scope) of a valid reference ends, it must have
-// been consumed, and be in state UNLOADED.
-class Reference BASE_EMBEDDED {
- public:
- // The values of the types is important, see size().
- enum Type { UNLOADED = -2, ILLEGAL = -1, SLOT = 0, NAMED = 1, KEYED = 2 };
- Reference(CodeGenerator* cgen,
- Expression* expression,
- bool persist_after_get = false);
- ~Reference();
-
- Expression* expression() const { return expression_; }
- Type type() const { return type_; }
- void set_type(Type value) {
- ASSERT_EQ(ILLEGAL, type_);
- type_ = value;
- }
-
- void set_unloaded() {
- ASSERT_NE(ILLEGAL, type_);
- ASSERT_NE(UNLOADED, type_);
- type_ = UNLOADED;
- }
- // The size the reference takes up on the stack.
- int size() const {
- return (type_ < SLOT) ? 0 : type_;
- }
-
- bool is_illegal() const { return type_ == ILLEGAL; }
- bool is_slot() const { return type_ == SLOT; }
- bool is_property() const { return type_ == NAMED || type_ == KEYED; }
- bool is_unloaded() const { return type_ == UNLOADED; }
-
- // Return the name. Only valid for named property references.
- Handle<String> GetName();
-
- // Generate code to push the value of the reference on top of the
- // expression stack. The reference is expected to be already on top of
- // the expression stack, and it is consumed by the call unless the
- // reference is for a compound assignment.
- // If the reference is not consumed, it is left in place under its value.
- void GetValue();
-
- // Generate code to store the value on top of the expression stack in the
- // reference. The reference is expected to be immediately below the value
- // on the expression stack. The value is stored in the location specified
- // by the reference, and is left on top of the stack, after the reference
- // is popped from beneath it (unloaded).
- void SetValue(InitState init_state, WriteBarrierCharacter wb);
-
- // This is in preparation for something that uses the reference on the stack.
- // If we need this reference afterwards get then dup it now. Otherwise mark
- // it as used.
- inline void DupIfPersist();
-
- private:
- CodeGenerator* cgen_;
- Expression* expression_;
- Type type_;
- // Keep the reference on the stack after get, so it can be used by set later.
- bool persist_after_get_;
-};
-
-
-// -------------------------------------------------------------------------
-// Code generation state
-
-// The state is passed down the AST by the code generator (and back up, in
-// the form of the state of the label pair). It is threaded through the
-// call stack. Constructing a state implicitly pushes it on the owning code
-// generator's stack of states, and destroying one implicitly pops it.
-
-class CodeGenState BASE_EMBEDDED {
- public:
- // Create an initial code generator state. Destroying the initial state
- // leaves the code generator with a NULL state.
- explicit CodeGenState(CodeGenerator* owner);
-
- // Destroy a code generator state and restore the owning code generator's
- // previous state.
- virtual ~CodeGenState();
-
- virtual JumpTarget* true_target() const { return NULL; }
- virtual JumpTarget* false_target() const { return NULL; }
-
- protected:
- inline CodeGenerator* owner() { return owner_; }
- inline CodeGenState* previous() const { return previous_; }
-
- private:
- CodeGenerator* owner_;
- CodeGenState* previous_;
-};
-
-
-class ConditionCodeGenState : public CodeGenState {
- public:
- // Create a code generator state based on a code generator's current
- // state. The new state has its own pair of branch labels.
- ConditionCodeGenState(CodeGenerator* owner,
- JumpTarget* true_target,
- JumpTarget* false_target);
-
- virtual JumpTarget* true_target() const { return true_target_; }
- virtual JumpTarget* false_target() const { return false_target_; }
-
- private:
- JumpTarget* true_target_;
- JumpTarget* false_target_;
-};
-
-
-class TypeInfoCodeGenState : public CodeGenState {
- public:
- TypeInfoCodeGenState(CodeGenerator* owner,
- Slot* slot_number,
- TypeInfo info);
- ~TypeInfoCodeGenState();
-
- virtual JumpTarget* true_target() const { return previous()->true_target(); }
- virtual JumpTarget* false_target() const {
- return previous()->false_target();
- }
-
- private:
- Slot* slot_;
- TypeInfo old_type_info_;
-};
-
-
-// -------------------------------------------------------------------------
-// Arguments allocation mode
-
-enum ArgumentsAllocationMode {
- NO_ARGUMENTS_ALLOCATION,
- EAGER_ARGUMENTS_ALLOCATION,
- LAZY_ARGUMENTS_ALLOCATION
-};
-
-
-// -------------------------------------------------------------------------
-// CodeGenerator
-
-class CodeGenerator: public AstVisitor {
- public:
- static bool MakeCode(CompilationInfo* info);
-
- // Printing of AST, etc. as requested by flags.
- static void MakeCodePrologue(CompilationInfo* info);
-
- // Allocate and install the code.
- static Handle<Code> MakeCodeEpilogue(MacroAssembler* masm,
- Code::Flags flags,
- CompilationInfo* info);
-
- // Print the code after compiling it.
- static void PrintCode(Handle<Code> code, CompilationInfo* info);
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
- static bool ShouldGenerateLog(Expression* type);
-#endif
-
- static void SetFunctionInfo(Handle<JSFunction> fun,
- FunctionLiteral* lit,
- bool is_toplevel,
- Handle<Script> script);
-
- static bool RecordPositions(MacroAssembler* masm,
- int pos,
- bool right_here = false);
-
- // Accessors
- MacroAssembler* masm() { return masm_; }
- VirtualFrame* frame() const { return frame_; }
- inline Handle<Script> script();
-
- bool has_valid_frame() const { return frame_ != NULL; }
-
- // Set the virtual frame to be new_frame, with non-frame register
- // reference counts given by non_frame_registers. The non-frame
- // register reference counts of the old frame are returned in
- // non_frame_registers.
- void SetFrame(VirtualFrame* new_frame, RegisterFile* non_frame_registers);
-
- void DeleteFrame();
-
- RegisterAllocator* allocator() const { return allocator_; }
-
- CodeGenState* state() { return state_; }
- void set_state(CodeGenState* state) { state_ = state; }
-
- TypeInfo type_info(Slot* slot) {
- int index = NumberOfSlot(slot);
- if (index == kInvalidSlotNumber) return TypeInfo::Unknown();
- return (*type_info_)[index];
- }
-
- TypeInfo set_type_info(Slot* slot, TypeInfo info) {
- int index = NumberOfSlot(slot);
- ASSERT(index >= kInvalidSlotNumber);
- if (index != kInvalidSlotNumber) {
- TypeInfo previous_value = (*type_info_)[index];
- (*type_info_)[index] = info;
- return previous_value;
- }
- return TypeInfo::Unknown();
- }
-
- void AddDeferred(DeferredCode* code) { deferred_.Add(code); }
-
- // Constants related to patching of inlined load/store.
- static int GetInlinedKeyedLoadInstructionsAfterPatch() {
- return FLAG_debug_code ? 32 : 13;
- }
- static const int kInlinedKeyedStoreInstructionsAfterPatch = 8;
- static int GetInlinedNamedStoreInstructionsAfterPatch() {
- ASSERT(Isolate::Current()->inlined_write_barrier_size() != -1);
- return Isolate::Current()->inlined_write_barrier_size() + 4;
- }
-
- private:
- // Type of a member function that generates inline code for a native function.
- typedef void (CodeGenerator::*InlineFunctionGenerator)
- (ZoneList<Expression*>*);
-
- static const InlineFunctionGenerator kInlineFunctionGenerators[];
-
- // Construction/Destruction
- explicit CodeGenerator(MacroAssembler* masm);
-
- // Accessors
- inline bool is_eval();
- inline Scope* scope();
- inline bool is_strict_mode();
- inline StrictModeFlag strict_mode_flag();
-
- // Generating deferred code.
- void ProcessDeferred();
-
- static const int kInvalidSlotNumber = -1;
-
- int NumberOfSlot(Slot* slot);
-
- // State
- bool has_cc() const { return cc_reg_ != al; }
- JumpTarget* true_target() const { return state_->true_target(); }
- JumpTarget* false_target() const { return state_->false_target(); }
-
- // Track loop nesting level.
- int loop_nesting() const { return loop_nesting_; }
- void IncrementLoopNesting() { loop_nesting_++; }
- void DecrementLoopNesting() { loop_nesting_--; }
-
- // Node visitors.
- void VisitStatements(ZoneList<Statement*>* statements);
-
- virtual void VisitSlot(Slot* node);
-#define DEF_VISIT(type) \
- virtual void Visit##type(type* node);
- AST_NODE_LIST(DEF_VISIT)
-#undef DEF_VISIT
-
- // Main code generation function
- void Generate(CompilationInfo* info);
-
- // Generate the return sequence code. Should be called no more than
- // once per compiled function, immediately after binding the return
- // target (which can not be done more than once). The return value should
- // be in r0.
- void GenerateReturnSequence();
-
- // Returns the arguments allocation mode.
- ArgumentsAllocationMode ArgumentsMode();
-
- // Store the arguments object and allocate it if necessary.
- void StoreArgumentsObject(bool initial);
-
- // The following are used by class Reference.
- void LoadReference(Reference* ref);
- void UnloadReference(Reference* ref);
-
- MemOperand SlotOperand(Slot* slot, Register tmp);
-
- MemOperand ContextSlotOperandCheckExtensions(Slot* slot,
- Register tmp,
- Register tmp2,
- JumpTarget* slow);
-
- // Expressions
- void LoadCondition(Expression* x,
- JumpTarget* true_target,
- JumpTarget* false_target,
- bool force_cc);
- void Load(Expression* expr);
- void LoadGlobal();
- void LoadGlobalReceiver(Register scratch);
-
- // Read a value from a slot and leave it on top of the expression stack.
- void LoadFromSlot(Slot* slot, TypeofState typeof_state);
- void LoadFromSlotCheckForArguments(Slot* slot, TypeofState state);
-
- // Store the value on top of the stack to a slot.
- void StoreToSlot(Slot* slot, InitState init_state);
-
- // Support for compiling assignment expressions.
- void EmitSlotAssignment(Assignment* node);
- void EmitNamedPropertyAssignment(Assignment* node);
- void EmitKeyedPropertyAssignment(Assignment* node);
-
- // Load a named property, returning it in r0. The receiver is passed on the
- // stack, and remains there.
- void EmitNamedLoad(Handle<String> name, bool is_contextual);
-
- // Store to a named property. If the store is contextual, value is passed on
- // the frame and consumed. Otherwise, receiver and value are passed on the
- // frame and consumed. The result is returned in r0.
- void EmitNamedStore(Handle<String> name, bool is_contextual);
-
- // Load a keyed property, leaving it in r0. The receiver and key are
- // passed on the stack, and remain there.
- void EmitKeyedLoad();
-
- // Store a keyed property. Key and receiver are on the stack and the value is
- // in r0. Result is returned in r0.
- void EmitKeyedStore(StaticType* key_type, WriteBarrierCharacter wb_info);
-
- void LoadFromGlobalSlotCheckExtensions(Slot* slot,
- TypeofState typeof_state,
- JumpTarget* slow);
-
- // Support for loading from local/global variables and arguments
- // whose location is known unless they are shadowed by
- // eval-introduced bindings. Generates no code for unsupported slot
- // types and therefore expects to fall through to the slow jump target.
- void EmitDynamicLoadFromSlotFastCase(Slot* slot,
- TypeofState typeof_state,
- JumpTarget* slow,
- JumpTarget* done);
-
- // Special code for typeof expressions: Unfortunately, we must
- // be careful when loading the expression in 'typeof'
- // expressions. We are not allowed to throw reference errors for
- // non-existing properties of the global object, so we must make it
- // look like an explicit property access, instead of an access
- // through the context chain.
- void LoadTypeofExpression(Expression* x);
-
- void ToBoolean(JumpTarget* true_target, JumpTarget* false_target);
-
- // Generate code that computes a shortcutting logical operation.
- void GenerateLogicalBooleanOperation(BinaryOperation* node);
-
- void GenericBinaryOperation(Token::Value op,
- OverwriteMode overwrite_mode,
- GenerateInlineSmi inline_smi,
- int known_rhs =
- GenericBinaryOpStub::kUnknownIntValue);
- void Comparison(Condition cc,
- Expression* left,
- Expression* right,
- bool strict = false);
-
- void SmiOperation(Token::Value op,
- Handle<Object> value,
- bool reversed,
- OverwriteMode mode);
-
- void CallWithArguments(ZoneList<Expression*>* arguments,
- CallFunctionFlags flags,
- int position);
-
- // An optimized implementation of expressions of the form
- // x.apply(y, arguments). We call x the applicand and y the receiver.
- // The optimization avoids allocating an arguments object if possible.
- void CallApplyLazy(Expression* applicand,
- Expression* receiver,
- VariableProxy* arguments,
- int position);
-
- // Control flow
- void Branch(bool if_true, JumpTarget* target);
- void CheckStack();
-
- bool CheckForInlineRuntimeCall(CallRuntime* node);
-
- static Handle<Code> ComputeLazyCompile(int argc);
- void ProcessDeclarations(ZoneList<Declaration*>* declarations);
-
- // Declare global variables and functions in the given array of
- // name/value pairs.
- void DeclareGlobals(Handle<FixedArray> pairs);
-
- // Instantiate the function based on the shared function info.
- void InstantiateFunction(Handle<SharedFunctionInfo> function_info,
- bool pretenure);
-
- // Support for type checks.
- void GenerateIsSmi(ZoneList<Expression*>* args);
- void GenerateIsNonNegativeSmi(ZoneList<Expression*>* args);
- void GenerateIsArray(ZoneList<Expression*>* args);
- void GenerateIsRegExp(ZoneList<Expression*>* args);
- void GenerateIsObject(ZoneList<Expression*>* args);
- void GenerateIsSpecObject(ZoneList<Expression*>* args);
- void GenerateIsFunction(ZoneList<Expression*>* args);
- void GenerateIsUndetectableObject(ZoneList<Expression*>* args);
- void GenerateIsStringWrapperSafeForDefaultValueOf(
- ZoneList<Expression*>* args);
-
- // Support for construct call checks.
- void GenerateIsConstructCall(ZoneList<Expression*>* args);
-
- // Support for arguments.length and arguments[?].
- void GenerateArgumentsLength(ZoneList<Expression*>* args);
- void GenerateArguments(ZoneList<Expression*>* args);
-
- // Support for accessing the class and value fields of an object.
- void GenerateClassOf(ZoneList<Expression*>* args);
- void GenerateValueOf(ZoneList<Expression*>* args);
- void GenerateSetValueOf(ZoneList<Expression*>* args);
-
- // Fast support for charCodeAt(n).
- void GenerateStringCharCodeAt(ZoneList<Expression*>* args);
-
- // Fast support for string.charAt(n) and string[n].
- void GenerateStringCharFromCode(ZoneList<Expression*>* args);
-
- // Fast support for string.charAt(n) and string[n].
- void GenerateStringCharAt(ZoneList<Expression*>* args);
-
- // Fast support for object equality testing.
- void GenerateObjectEquals(ZoneList<Expression*>* args);
-
- void GenerateLog(ZoneList<Expression*>* args);
-
- // Fast support for Math.random().
- void GenerateRandomHeapNumber(ZoneList<Expression*>* args);
-
- // Fast support for StringAdd.
- void GenerateStringAdd(ZoneList<Expression*>* args);
-
- // Fast support for SubString.
- void GenerateSubString(ZoneList<Expression*>* args);
-
- // Fast support for StringCompare.
- void GenerateStringCompare(ZoneList<Expression*>* args);
-
- // Support for direct calls from JavaScript to native RegExp code.
- void GenerateRegExpExec(ZoneList<Expression*>* args);
-
- void GenerateRegExpConstructResult(ZoneList<Expression*>* args);
-
- // Support for fast native caches.
- void GenerateGetFromCache(ZoneList<Expression*>* args);
-
- // Fast support for number to string.
- void GenerateNumberToString(ZoneList<Expression*>* args);
-
- // Fast swapping of elements.
- void GenerateSwapElements(ZoneList<Expression*>* args);
-
- // Fast call for custom callbacks.
- void GenerateCallFunction(ZoneList<Expression*>* args);
-
- // Fast call to math functions.
- void GenerateMathPow(ZoneList<Expression*>* args);
- void GenerateMathSin(ZoneList<Expression*>* args);
- void GenerateMathCos(ZoneList<Expression*>* args);
- void GenerateMathSqrt(ZoneList<Expression*>* args);
- void GenerateMathLog(ZoneList<Expression*>* args);
-
- void GenerateIsRegExpEquivalent(ZoneList<Expression*>* args);
-
- void GenerateHasCachedArrayIndex(ZoneList<Expression*>* args);
- void GenerateGetCachedArrayIndex(ZoneList<Expression*>* args);
- void GenerateFastAsciiArrayJoin(ZoneList<Expression*>* args);
-
- // Simple condition analysis.
- enum ConditionAnalysis {
- ALWAYS_TRUE,
- ALWAYS_FALSE,
- DONT_KNOW
- };
- ConditionAnalysis AnalyzeCondition(Expression* cond);
-
- // Methods used to indicate which source code is generated for. Source
- // positions are collected by the assembler and emitted with the relocation
- // information.
- void CodeForFunctionPosition(FunctionLiteral* fun);
- void CodeForReturnPosition(FunctionLiteral* fun);
- void CodeForStatementPosition(Statement* node);
- void CodeForDoWhileConditionPosition(DoWhileStatement* stmt);
- void CodeForSourcePosition(int pos);
-
-#ifdef DEBUG
- // True if the registers are valid for entry to a block.
- bool HasValidEntryRegisters();
-#endif
-
- List<DeferredCode*> deferred_;
-
- // Assembler
- MacroAssembler* masm_; // to generate code
-
- CompilationInfo* info_;
-
- // Code generation state
- VirtualFrame* frame_;
- RegisterAllocator* allocator_;
- Condition cc_reg_;
- CodeGenState* state_;
- int loop_nesting_;
-
- Vector<TypeInfo>* type_info_;
-
- // Jump targets
- BreakTarget function_return_;
-
- // True if the function return is shadowed (ie, jumping to the target
- // function_return_ does not jump to the true function return, but rather
- // to some unlinking code).
- bool function_return_is_shadowed_;
-
- friend class VirtualFrame;
- friend class Isolate;
- friend class JumpTarget;
- friend class Reference;
- friend class FastCodeGenerator;
- friend class FullCodeGenerator;
- friend class FullCodeGenSyntaxChecker;
- friend class InlineRuntimeFunctionsTable;
- friend class LCodeGen;
-
- DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_ARM_CODEGEN_ARM_H_
diff --git a/src/3rdparty/v8/src/arm/constants-arm.cc b/src/3rdparty/v8/src/arm/constants-arm.cc
deleted file mode 100644
index bf9da23..0000000
--- a/src/3rdparty/v8/src/arm/constants-arm.cc
+++ /dev/null
@@ -1,152 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_ARM)
-
-#include "constants-arm.h"
-
-
-namespace v8 {
-namespace internal {
-
-double Instruction::DoubleImmedVmov() const {
- // Reconstruct a double from the immediate encoded in the vmov instruction.
- //
- // instruction: [xxxxxxxx,xxxxabcd,xxxxxxxx,xxxxefgh]
- // double: [aBbbbbbb,bbcdefgh,00000000,00000000,
- // 00000000,00000000,00000000,00000000]
- //
- // where B = ~b. Only the high 16 bits are affected.
- uint64_t high16;
- high16 = (Bits(17, 16) << 4) | Bits(3, 0); // xxxxxxxx,xxcdefgh.
- high16 |= (0xff * Bit(18)) << 6; // xxbbbbbb,bbxxxxxx.
- high16 |= (Bit(18) ^ 1) << 14; // xBxxxxxx,xxxxxxxx.
- high16 |= Bit(19) << 15; // axxxxxxx,xxxxxxxx.
-
- uint64_t imm = high16 << 48;
- double d;
- memcpy(&d, &imm, 8);
- return d;
-}
-
-
-// These register names are defined in a way to match the native disassembler
-// formatting. See for example the command "objdump -d <binary file>".
-const char* Registers::names_[kNumRegisters] = {
- "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
- "r8", "r9", "r10", "fp", "ip", "sp", "lr", "pc",
-};
-
-
-// List of alias names which can be used when referring to ARM registers.
-const Registers::RegisterAlias Registers::aliases_[] = {
- {10, "sl"},
- {11, "r11"},
- {12, "r12"},
- {13, "r13"},
- {14, "r14"},
- {15, "r15"},
- {kNoRegister, NULL}
-};
-
-
-const char* Registers::Name(int reg) {
- const char* result;
- if ((0 <= reg) && (reg < kNumRegisters)) {
- result = names_[reg];
- } else {
- result = "noreg";
- }
- return result;
-}
-
-
-// Support for VFP registers s0 to s31 (d0 to d15).
-// Note that "sN:sM" is the same as "dN/2"
-// These register names are defined in a way to match the native disassembler
-// formatting. See for example the command "objdump -d <binary file>".
-const char* VFPRegisters::names_[kNumVFPRegisters] = {
- "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
- "s8", "s9", "s10", "s11", "s12", "s13", "s14", "s15",
- "s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23",
- "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31",
- "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
- "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15"
-};
-
-
-const char* VFPRegisters::Name(int reg, bool is_double) {
- ASSERT((0 <= reg) && (reg < kNumVFPRegisters));
- return names_[reg + (is_double ? kNumVFPSingleRegisters : 0)];
-}
-
-
-int VFPRegisters::Number(const char* name, bool* is_double) {
- for (int i = 0; i < kNumVFPRegisters; i++) {
- if (strcmp(names_[i], name) == 0) {
- if (i < kNumVFPSingleRegisters) {
- *is_double = false;
- return i;
- } else {
- *is_double = true;
- return i - kNumVFPSingleRegisters;
- }
- }
- }
-
- // No register with the requested name found.
- return kNoRegister;
-}
-
-
-int Registers::Number(const char* name) {
- // Look through the canonical names.
- for (int i = 0; i < kNumRegisters; i++) {
- if (strcmp(names_[i], name) == 0) {
- return i;
- }
- }
-
- // Look through the alias names.
- int i = 0;
- while (aliases_[i].reg != kNoRegister) {
- if (strcmp(aliases_[i].name, name) == 0) {
- return aliases_[i].reg;
- }
- i++;
- }
-
- // No register with the requested name found.
- return kNoRegister;
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_ARM
diff --git a/src/3rdparty/v8/src/arm/constants-arm.h b/src/3rdparty/v8/src/arm/constants-arm.h
deleted file mode 100644
index 0ac567c..0000000
--- a/src/3rdparty/v8/src/arm/constants-arm.h
+++ /dev/null
@@ -1,776 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_ARM_CONSTANTS_ARM_H_
-#define V8_ARM_CONSTANTS_ARM_H_
-
-// The simulator emulates the EABI so we define the USE_ARM_EABI macro if we
-// are not running on real ARM hardware. One reason for this is that the
-// old ABI uses fp registers in the calling convention and the simulator does
-// not simulate fp registers or coroutine instructions.
-#if defined(__ARM_EABI__) || !defined(__arm__)
-# define USE_ARM_EABI 1
-#endif
-
-// This means that interwork-compatible jump instructions are generated. We
-// want to generate them on the simulator too so it makes snapshots that can
-// be used on real hardware.
-#if defined(__THUMB_INTERWORK__) || !defined(__arm__)
-# define USE_THUMB_INTERWORK 1
-#endif
-
-#if defined(__ARM_ARCH_7A__) || \
- defined(__ARM_ARCH_7R__) || \
- defined(__ARM_ARCH_7__)
-# define CAN_USE_ARMV7_INSTRUCTIONS 1
-#endif
-
-#if defined(__ARM_ARCH_6__) || \
- defined(__ARM_ARCH_6J__) || \
- defined(__ARM_ARCH_6K__) || \
- defined(__ARM_ARCH_6Z__) || \
- defined(__ARM_ARCH_6ZK__) || \
- defined(__ARM_ARCH_6T2__) || \
- defined(CAN_USE_ARMV7_INSTRUCTIONS)
-# define CAN_USE_ARMV6_INSTRUCTIONS 1
-#endif
-
-#if defined(__ARM_ARCH_5T__) || \
- defined(__ARM_ARCH_5TE__) || \
- defined(CAN_USE_ARMV6_INSTRUCTIONS)
-# define CAN_USE_ARMV5_INSTRUCTIONS 1
-# define CAN_USE_THUMB_INSTRUCTIONS 1
-#endif
-
-// Simulator should support ARM5 instructions and unaligned access by default.
-#if !defined(__arm__)
-# define CAN_USE_ARMV5_INSTRUCTIONS 1
-# define CAN_USE_THUMB_INSTRUCTIONS 1
-
-# ifndef CAN_USE_UNALIGNED_ACCESSES
-# define CAN_USE_UNALIGNED_ACCESSES 1
-# endif
-
-#endif
-
-#if CAN_USE_UNALIGNED_ACCESSES
-#define V8_TARGET_CAN_READ_UNALIGNED 1
-#endif
-
-// Using blx may yield better code, so use it when required or when available
-#if defined(USE_THUMB_INTERWORK) || defined(CAN_USE_ARMV5_INSTRUCTIONS)
-#define USE_BLX 1
-#endif
-
-namespace v8 {
-namespace internal {
-
-// Constant pool marker.
-static const int kConstantPoolMarkerMask = 0xffe00000;
-static const int kConstantPoolMarker = 0x0c000000;
-static const int kConstantPoolLengthMask = 0x001ffff;
-
-// Number of registers in normal ARM mode.
-static const int kNumRegisters = 16;
-
-// VFP support.
-static const int kNumVFPSingleRegisters = 32;
-static const int kNumVFPDoubleRegisters = 16;
-static const int kNumVFPRegisters =
- kNumVFPSingleRegisters + kNumVFPDoubleRegisters;
-
-// PC is register 15.
-static const int kPCRegister = 15;
-static const int kNoRegister = -1;
-
-// -----------------------------------------------------------------------------
-// Conditions.
-
-// Defines constants and accessor classes to assemble, disassemble and
-// simulate ARM instructions.
-//
-// Section references in the code refer to the "ARM Architecture Reference
-// Manual" from July 2005 (available at http://www.arm.com/miscPDFs/14128.pdf)
-//
-// Constants for specific fields are defined in their respective named enums.
-// General constants are in an anonymous enum in class Instr.
-
-// Values for the condition field as defined in section A3.2
-enum Condition {
- kNoCondition = -1,
-
- eq = 0 << 28, // Z set Equal.
- ne = 1 << 28, // Z clear Not equal.
- cs = 2 << 28, // C set Unsigned higher or same.
- cc = 3 << 28, // C clear Unsigned lower.
- mi = 4 << 28, // N set Negative.
- pl = 5 << 28, // N clear Positive or zero.
- vs = 6 << 28, // V set Overflow.
- vc = 7 << 28, // V clear No overflow.
- hi = 8 << 28, // C set, Z clear Unsigned higher.
- ls = 9 << 28, // C clear or Z set Unsigned lower or same.
- ge = 10 << 28, // N == V Greater or equal.
- lt = 11 << 28, // N != V Less than.
- gt = 12 << 28, // Z clear, N == V Greater than.
- le = 13 << 28, // Z set or N != V Less then or equal
- al = 14 << 28, // Always.
-
- kSpecialCondition = 15 << 28, // Special condition (refer to section A3.2.1).
- kNumberOfConditions = 16,
-
- // Aliases.
- hs = cs, // C set Unsigned higher or same.
- lo = cc // C clear Unsigned lower.
-};
-
-
-inline Condition NegateCondition(Condition cond) {
- ASSERT(cond != al);
- return static_cast<Condition>(cond ^ ne);
-}
-
-
-// Corresponds to transposing the operands of a comparison.
-inline Condition ReverseCondition(Condition cond) {
- switch (cond) {
- case lo:
- return hi;
- case hi:
- return lo;
- case hs:
- return ls;
- case ls:
- return hs;
- case lt:
- return gt;
- case gt:
- return lt;
- case ge:
- return le;
- case le:
- return ge;
- default:
- return cond;
- };
-}
-
-
-// -----------------------------------------------------------------------------
-// Instructions encoding.
-
-// Instr is merely used by the Assembler to distinguish 32bit integers
-// representing instructions from usual 32 bit values.
-// Instruction objects are pointers to 32bit values, and provide methods to
-// access the various ISA fields.
-typedef int32_t Instr;
-
-
-// Opcodes for Data-processing instructions (instructions with a type 0 and 1)
-// as defined in section A3.4
-enum Opcode {
- AND = 0 << 21, // Logical AND.
- EOR = 1 << 21, // Logical Exclusive OR.
- SUB = 2 << 21, // Subtract.
- RSB = 3 << 21, // Reverse Subtract.
- ADD = 4 << 21, // Add.
- ADC = 5 << 21, // Add with Carry.
- SBC = 6 << 21, // Subtract with Carry.
- RSC = 7 << 21, // Reverse Subtract with Carry.
- TST = 8 << 21, // Test.
- TEQ = 9 << 21, // Test Equivalence.
- CMP = 10 << 21, // Compare.
- CMN = 11 << 21, // Compare Negated.
- ORR = 12 << 21, // Logical (inclusive) OR.
- MOV = 13 << 21, // Move.
- BIC = 14 << 21, // Bit Clear.
- MVN = 15 << 21 // Move Not.
-};
-
-
-// The bits for bit 7-4 for some type 0 miscellaneous instructions.
-enum MiscInstructionsBits74 {
- // With bits 22-21 01.
- BX = 1 << 4,
- BXJ = 2 << 4,
- BLX = 3 << 4,
- BKPT = 7 << 4,
-
- // With bits 22-21 11.
- CLZ = 1 << 4
-};
-
-
-// Instruction encoding bits and masks.
-enum {
- H = 1 << 5, // Halfword (or byte).
- S6 = 1 << 6, // Signed (or unsigned).
- L = 1 << 20, // Load (or store).
- S = 1 << 20, // Set condition code (or leave unchanged).
- W = 1 << 21, // Writeback base register (or leave unchanged).
- A = 1 << 21, // Accumulate in multiply instruction (or not).
- B = 1 << 22, // Unsigned byte (or word).
- N = 1 << 22, // Long (or short).
- U = 1 << 23, // Positive (or negative) offset/index.
- P = 1 << 24, // Offset/pre-indexed addressing (or post-indexed addressing).
- I = 1 << 25, // Immediate shifter operand (or not).
-
- B4 = 1 << 4,
- B5 = 1 << 5,
- B6 = 1 << 6,
- B7 = 1 << 7,
- B8 = 1 << 8,
- B9 = 1 << 9,
- B12 = 1 << 12,
- B16 = 1 << 16,
- B18 = 1 << 18,
- B19 = 1 << 19,
- B20 = 1 << 20,
- B21 = 1 << 21,
- B22 = 1 << 22,
- B23 = 1 << 23,
- B24 = 1 << 24,
- B25 = 1 << 25,
- B26 = 1 << 26,
- B27 = 1 << 27,
- B28 = 1 << 28,
-
- // Instruction bit masks.
- kCondMask = 15 << 28,
- kALUMask = 0x6f << 21,
- kRdMask = 15 << 12, // In str instruction.
- kCoprocessorMask = 15 << 8,
- kOpCodeMask = 15 << 21, // In data-processing instructions.
- kImm24Mask = (1 << 24) - 1,
- kOff12Mask = (1 << 12) - 1
-};
-
-
-// -----------------------------------------------------------------------------
-// Addressing modes and instruction variants.
-
-// Condition code updating mode.
-enum SBit {
- SetCC = 1 << 20, // Set condition code.
- LeaveCC = 0 << 20 // Leave condition code unchanged.
-};
-
-
-// Status register selection.
-enum SRegister {
- CPSR = 0 << 22,
- SPSR = 1 << 22
-};
-
-
-// Shifter types for Data-processing operands as defined in section A5.1.2.
-enum ShiftOp {
- LSL = 0 << 5, // Logical shift left.
- LSR = 1 << 5, // Logical shift right.
- ASR = 2 << 5, // Arithmetic shift right.
- ROR = 3 << 5, // Rotate right.
-
- // RRX is encoded as ROR with shift_imm == 0.
- // Use a special code to make the distinction. The RRX ShiftOp is only used
- // as an argument, and will never actually be encoded. The Assembler will
- // detect it and emit the correct ROR shift operand with shift_imm == 0.
- RRX = -1,
- kNumberOfShifts = 4
-};
-
-
-// Status register fields.
-enum SRegisterField {
- CPSR_c = CPSR | 1 << 16,
- CPSR_x = CPSR | 1 << 17,
- CPSR_s = CPSR | 1 << 18,
- CPSR_f = CPSR | 1 << 19,
- SPSR_c = SPSR | 1 << 16,
- SPSR_x = SPSR | 1 << 17,
- SPSR_s = SPSR | 1 << 18,
- SPSR_f = SPSR | 1 << 19
-};
-
-// Status register field mask (or'ed SRegisterField enum values).
-typedef uint32_t SRegisterFieldMask;
-
-
-// Memory operand addressing mode.
-enum AddrMode {
- // Bit encoding P U W.
- Offset = (8|4|0) << 21, // Offset (without writeback to base).
- PreIndex = (8|4|1) << 21, // Pre-indexed addressing with writeback.
- PostIndex = (0|4|0) << 21, // Post-indexed addressing with writeback.
- NegOffset = (8|0|0) << 21, // Negative offset (without writeback to base).
- NegPreIndex = (8|0|1) << 21, // Negative pre-indexed with writeback.
- NegPostIndex = (0|0|0) << 21 // Negative post-indexed with writeback.
-};
-
-
-// Load/store multiple addressing mode.
-enum BlockAddrMode {
- // Bit encoding P U W .
- da = (0|0|0) << 21, // Decrement after.
- ia = (0|4|0) << 21, // Increment after.
- db = (8|0|0) << 21, // Decrement before.
- ib = (8|4|0) << 21, // Increment before.
- da_w = (0|0|1) << 21, // Decrement after with writeback to base.
- ia_w = (0|4|1) << 21, // Increment after with writeback to base.
- db_w = (8|0|1) << 21, // Decrement before with writeback to base.
- ib_w = (8|4|1) << 21, // Increment before with writeback to base.
-
- // Alias modes for comparison when writeback does not matter.
- da_x = (0|0|0) << 21, // Decrement after.
- ia_x = (0|4|0) << 21, // Increment after.
- db_x = (8|0|0) << 21, // Decrement before.
- ib_x = (8|4|0) << 21 // Increment before.
-};
-
-
-// Coprocessor load/store operand size.
-enum LFlag {
- Long = 1 << 22, // Long load/store coprocessor.
- Short = 0 << 22 // Short load/store coprocessor.
-};
-
-
-// -----------------------------------------------------------------------------
-// Supervisor Call (svc) specific support.
-
-// Special Software Interrupt codes when used in the presence of the ARM
-// simulator.
-// svc (formerly swi) provides a 24bit immediate value. Use bits 22:0 for
-// standard SoftwareInterrupCode. Bit 23 is reserved for the stop feature.
-enum SoftwareInterruptCodes {
- // transition to C code
- kCallRtRedirected= 0x10,
- // break point
- kBreakpoint= 0x20,
- // stop
- kStopCode = 1 << 23
-};
-static const uint32_t kStopCodeMask = kStopCode - 1;
-static const uint32_t kMaxStopCode = kStopCode - 1;
-static const int32_t kDefaultStopCode = -1;
-
-
-// Type of VFP register. Determines register encoding.
-enum VFPRegPrecision {
- kSinglePrecision = 0,
- kDoublePrecision = 1
-};
-
-
-// VFP FPSCR constants.
-enum VFPConversionMode {
- kFPSCRRounding = 0,
- kDefaultRoundToZero = 1
-};
-
-// This mask does not include the "inexact" or "input denormal" cumulative
-// exceptions flags, because we usually don't want to check for it.
-static const uint32_t kVFPExceptionMask = 0xf;
-static const uint32_t kVFPInvalidOpExceptionBit = 1 << 0;
-static const uint32_t kVFPOverflowExceptionBit = 1 << 2;
-static const uint32_t kVFPUnderflowExceptionBit = 1 << 3;
-static const uint32_t kVFPInexactExceptionBit = 1 << 4;
-static const uint32_t kVFPFlushToZeroMask = 1 << 24;
-
-static const uint32_t kVFPNConditionFlagBit = 1 << 31;
-static const uint32_t kVFPZConditionFlagBit = 1 << 30;
-static const uint32_t kVFPCConditionFlagBit = 1 << 29;
-static const uint32_t kVFPVConditionFlagBit = 1 << 28;
-
-
-// VFP rounding modes. See ARM DDI 0406B Page A2-29.
-enum VFPRoundingMode {
- RN = 0 << 22, // Round to Nearest.
- RP = 1 << 22, // Round towards Plus Infinity.
- RM = 2 << 22, // Round towards Minus Infinity.
- RZ = 3 << 22, // Round towards zero.
-
- // Aliases.
- kRoundToNearest = RN,
- kRoundToPlusInf = RP,
- kRoundToMinusInf = RM,
- kRoundToZero = RZ
-};
-
-static const uint32_t kVFPRoundingModeMask = 3 << 22;
-
-enum CheckForInexactConversion {
- kCheckForInexactConversion,
- kDontCheckForInexactConversion
-};
-
-// -----------------------------------------------------------------------------
-// Hints.
-
-// Branch hints are not used on the ARM. They are defined so that they can
-// appear in shared function signatures, but will be ignored in ARM
-// implementations.
-enum Hint { no_hint };
-
-// Hints are not used on the arm. Negating is trivial.
-inline Hint NegateHint(Hint ignored) { return no_hint; }
-
-
-// -----------------------------------------------------------------------------
-// Specific instructions, constants, and masks.
-// These constants are declared in assembler-arm.cc, as they use named registers
-// and other constants.
-
-
-// add(sp, sp, 4) instruction (aka Pop())
-extern const Instr kPopInstruction;
-
-// str(r, MemOperand(sp, 4, NegPreIndex), al) instruction (aka push(r))
-// register r is not encoded.
-extern const Instr kPushRegPattern;
-
-// ldr(r, MemOperand(sp, 4, PostIndex), al) instruction (aka pop(r))
-// register r is not encoded.
-extern const Instr kPopRegPattern;
-
-// mov lr, pc
-extern const Instr kMovLrPc;
-// ldr rd, [pc, #offset]
-extern const Instr kLdrPCMask;
-extern const Instr kLdrPCPattern;
-// blxcc rm
-extern const Instr kBlxRegMask;
-
-extern const Instr kBlxRegPattern;
-
-extern const Instr kMovMvnMask;
-extern const Instr kMovMvnPattern;
-extern const Instr kMovMvnFlip;
-extern const Instr kMovLeaveCCMask;
-extern const Instr kMovLeaveCCPattern;
-extern const Instr kMovwMask;
-extern const Instr kMovwPattern;
-extern const Instr kMovwLeaveCCFlip;
-extern const Instr kCmpCmnMask;
-extern const Instr kCmpCmnPattern;
-extern const Instr kCmpCmnFlip;
-extern const Instr kAddSubFlip;
-extern const Instr kAndBicFlip;
-
-// A mask for the Rd register for push, pop, ldr, str instructions.
-extern const Instr kLdrRegFpOffsetPattern;
-
-extern const Instr kStrRegFpOffsetPattern;
-
-extern const Instr kLdrRegFpNegOffsetPattern;
-
-extern const Instr kStrRegFpNegOffsetPattern;
-
-extern const Instr kLdrStrInstrTypeMask;
-extern const Instr kLdrStrInstrArgumentMask;
-extern const Instr kLdrStrOffsetMask;
-
-
-// -----------------------------------------------------------------------------
-// Instruction abstraction.
-
-// The class Instruction enables access to individual fields defined in the ARM
-// architecture instruction set encoding as described in figure A3-1.
-// Note that the Assembler uses typedef int32_t Instr.
-//
-// Example: Test whether the instruction at ptr does set the condition code
-// bits.
-//
-// bool InstructionSetsConditionCodes(byte* ptr) {
-// Instruction* instr = Instruction::At(ptr);
-// int type = instr->TypeValue();
-// return ((type == 0) || (type == 1)) && instr->HasS();
-// }
-//
-class Instruction {
- public:
- enum {
- kInstrSize = 4,
- kInstrSizeLog2 = 2,
- kPCReadOffset = 8
- };
-
- // Helper macro to define static accessors.
- // We use the cast to char* trick to bypass the strict anti-aliasing rules.
- #define DECLARE_STATIC_TYPED_ACCESSOR(return_type, Name) \
- static inline return_type Name(Instr instr) { \
- char* temp = reinterpret_cast<char*>(&instr); \
- return reinterpret_cast<Instruction*>(temp)->Name(); \
- }
-
- #define DECLARE_STATIC_ACCESSOR(Name) DECLARE_STATIC_TYPED_ACCESSOR(int, Name)
-
- // Get the raw instruction bits.
- inline Instr InstructionBits() const {
- return *reinterpret_cast<const Instr*>(this);
- }
-
- // Set the raw instruction bits to value.
- inline void SetInstructionBits(Instr value) {
- *reinterpret_cast<Instr*>(this) = value;
- }
-
- // Read one particular bit out of the instruction bits.
- inline int Bit(int nr) const {
- return (InstructionBits() >> nr) & 1;
- }
-
- // Read a bit field's value out of the instruction bits.
- inline int Bits(int hi, int lo) const {
- return (InstructionBits() >> lo) & ((2 << (hi - lo)) - 1);
- }
-
- // Read a bit field out of the instruction bits.
- inline int BitField(int hi, int lo) const {
- return InstructionBits() & (((2 << (hi - lo)) - 1) << lo);
- }
-
- // Static support.
-
- // Read one particular bit out of the instruction bits.
- static inline int Bit(Instr instr, int nr) {
- return (instr >> nr) & 1;
- }
-
- // Read the value of a bit field out of the instruction bits.
- static inline int Bits(Instr instr, int hi, int lo) {
- return (instr >> lo) & ((2 << (hi - lo)) - 1);
- }
-
-
- // Read a bit field out of the instruction bits.
- static inline int BitField(Instr instr, int hi, int lo) {
- return instr & (((2 << (hi - lo)) - 1) << lo);
- }
-
-
- // Accessors for the different named fields used in the ARM encoding.
- // The naming of these accessor corresponds to figure A3-1.
- //
- // Two kind of accessors are declared:
- // - <Name>Field() will return the raw field, ie the field's bits at their
- // original place in the instruction encoding.
- // eg. if instr is the 'addgt r0, r1, r2' instruction, encoded as 0xC0810002
- // ConditionField(instr) will return 0xC0000000.
- // - <Name>Value() will return the field value, shifted back to bit 0.
- // eg. if instr is the 'addgt r0, r1, r2' instruction, encoded as 0xC0810002
- // ConditionField(instr) will return 0xC.
-
-
- // Generally applicable fields
- inline Condition ConditionValue() const {
- return static_cast<Condition>(Bits(31, 28));
- }
- inline Condition ConditionField() const {
- return static_cast<Condition>(BitField(31, 28));
- }
- DECLARE_STATIC_TYPED_ACCESSOR(Condition, ConditionValue);
- DECLARE_STATIC_TYPED_ACCESSOR(Condition, ConditionField);
-
- inline int TypeValue() const { return Bits(27, 25); }
-
- inline int RnValue() const { return Bits(19, 16); }
- DECLARE_STATIC_ACCESSOR(RnValue);
- inline int RdValue() const { return Bits(15, 12); }
- DECLARE_STATIC_ACCESSOR(RdValue);
-
- inline int CoprocessorValue() const { return Bits(11, 8); }
- // Support for VFP.
- // Vn(19-16) | Vd(15-12) | Vm(3-0)
- inline int VnValue() const { return Bits(19, 16); }
- inline int VmValue() const { return Bits(3, 0); }
- inline int VdValue() const { return Bits(15, 12); }
- inline int NValue() const { return Bit(7); }
- inline int MValue() const { return Bit(5); }
- inline int DValue() const { return Bit(22); }
- inline int RtValue() const { return Bits(15, 12); }
- inline int PValue() const { return Bit(24); }
- inline int UValue() const { return Bit(23); }
- inline int Opc1Value() const { return (Bit(23) << 2) | Bits(21, 20); }
- inline int Opc2Value() const { return Bits(19, 16); }
- inline int Opc3Value() const { return Bits(7, 6); }
- inline int SzValue() const { return Bit(8); }
- inline int VLValue() const { return Bit(20); }
- inline int VCValue() const { return Bit(8); }
- inline int VAValue() const { return Bits(23, 21); }
- inline int VBValue() const { return Bits(6, 5); }
- inline int VFPNRegValue(VFPRegPrecision pre) {
- return VFPGlueRegValue(pre, 16, 7);
- }
- inline int VFPMRegValue(VFPRegPrecision pre) {
- return VFPGlueRegValue(pre, 0, 5);
- }
- inline int VFPDRegValue(VFPRegPrecision pre) {
- return VFPGlueRegValue(pre, 12, 22);
- }
-
- // Fields used in Data processing instructions
- inline int OpcodeValue() const {
- return static_cast<Opcode>(Bits(24, 21));
- }
- inline Opcode OpcodeField() const {
- return static_cast<Opcode>(BitField(24, 21));
- }
- inline int SValue() const { return Bit(20); }
- // with register
- inline int RmValue() const { return Bits(3, 0); }
- DECLARE_STATIC_ACCESSOR(RmValue);
- inline int ShiftValue() const { return static_cast<ShiftOp>(Bits(6, 5)); }
- inline ShiftOp ShiftField() const {
- return static_cast<ShiftOp>(BitField(6, 5));
- }
- inline int RegShiftValue() const { return Bit(4); }
- inline int RsValue() const { return Bits(11, 8); }
- inline int ShiftAmountValue() const { return Bits(11, 7); }
- // with immediate
- inline int RotateValue() const { return Bits(11, 8); }
- inline int Immed8Value() const { return Bits(7, 0); }
- inline int Immed4Value() const { return Bits(19, 16); }
- inline int ImmedMovwMovtValue() const {
- return Immed4Value() << 12 | Offset12Value(); }
-
- // Fields used in Load/Store instructions
- inline int PUValue() const { return Bits(24, 23); }
- inline int PUField() const { return BitField(24, 23); }
- inline int BValue() const { return Bit(22); }
- inline int WValue() const { return Bit(21); }
- inline int LValue() const { return Bit(20); }
- // with register uses same fields as Data processing instructions above
- // with immediate
- inline int Offset12Value() const { return Bits(11, 0); }
- // multiple
- inline int RlistValue() const { return Bits(15, 0); }
- // extra loads and stores
- inline int SignValue() const { return Bit(6); }
- inline int HValue() const { return Bit(5); }
- inline int ImmedHValue() const { return Bits(11, 8); }
- inline int ImmedLValue() const { return Bits(3, 0); }
-
- // Fields used in Branch instructions
- inline int LinkValue() const { return Bit(24); }
- inline int SImmed24Value() const { return ((InstructionBits() << 8) >> 8); }
-
- // Fields used in Software interrupt instructions
- inline SoftwareInterruptCodes SvcValue() const {
- return static_cast<SoftwareInterruptCodes>(Bits(23, 0));
- }
-
- // Test for special encodings of type 0 instructions (extra loads and stores,
- // as well as multiplications).
- inline bool IsSpecialType0() const { return (Bit(7) == 1) && (Bit(4) == 1); }
-
- // Test for miscellaneous instructions encodings of type 0 instructions.
- inline bool IsMiscType0() const { return (Bit(24) == 1)
- && (Bit(23) == 0)
- && (Bit(20) == 0)
- && ((Bit(7) == 0)); }
-
- // Test for a stop instruction.
- inline bool IsStop() const {
- return (TypeValue() == 7) && (Bit(24) == 1) && (SvcValue() >= kStopCode);
- }
-
- // Special accessors that test for existence of a value.
- inline bool HasS() const { return SValue() == 1; }
- inline bool HasB() const { return BValue() == 1; }
- inline bool HasW() const { return WValue() == 1; }
- inline bool HasL() const { return LValue() == 1; }
- inline bool HasU() const { return UValue() == 1; }
- inline bool HasSign() const { return SignValue() == 1; }
- inline bool HasH() const { return HValue() == 1; }
- inline bool HasLink() const { return LinkValue() == 1; }
-
- // Decoding the double immediate in the vmov instruction.
- double DoubleImmedVmov() const;
-
- // Instructions are read of out a code stream. The only way to get a
- // reference to an instruction is to convert a pointer. There is no way
- // to allocate or create instances of class Instruction.
- // Use the At(pc) function to create references to Instruction.
- static Instruction* At(byte* pc) {
- return reinterpret_cast<Instruction*>(pc);
- }
-
-
- private:
- // Join split register codes, depending on single or double precision.
- // four_bit is the position of the least-significant bit of the four
- // bit specifier. one_bit is the position of the additional single bit
- // specifier.
- inline int VFPGlueRegValue(VFPRegPrecision pre, int four_bit, int one_bit) {
- if (pre == kSinglePrecision) {
- return (Bits(four_bit + 3, four_bit) << 1) | Bit(one_bit);
- }
- return (Bit(one_bit) << 4) | Bits(four_bit + 3, four_bit);
- }
-
- // We need to prevent the creation of instances of class Instruction.
- DISALLOW_IMPLICIT_CONSTRUCTORS(Instruction);
-};
-
-
-// Helper functions for converting between register numbers and names.
-class Registers {
- public:
- // Return the name of the register.
- static const char* Name(int reg);
-
- // Lookup the register number for the name provided.
- static int Number(const char* name);
-
- struct RegisterAlias {
- int reg;
- const char* name;
- };
-
- private:
- static const char* names_[kNumRegisters];
- static const RegisterAlias aliases_[];
-};
-
-// Helper functions for converting between VFP register numbers and names.
-class VFPRegisters {
- public:
- // Return the name of the register.
- static const char* Name(int reg, bool is_double);
-
- // Lookup the register number for the name provided.
- // Set flag pointed by is_double to true if register
- // is double-precision.
- static int Number(const char* name, bool* is_double);
-
- private:
- static const char* names_[kNumVFPRegisters];
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_ARM_CONSTANTS_ARM_H_
diff --git a/src/3rdparty/v8/src/arm/cpu-arm.cc b/src/3rdparty/v8/src/arm/cpu-arm.cc
deleted file mode 100644
index 5bd2029..0000000
--- a/src/3rdparty/v8/src/arm/cpu-arm.cc
+++ /dev/null
@@ -1,149 +0,0 @@
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// CPU specific code for arm independent of OS goes here.
-#ifdef __arm__
-#include <sys/syscall.h> // for cache flushing.
-#endif
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_ARM)
-
-#include "cpu.h"
-#include "macro-assembler.h"
-#include "simulator.h" // for cache flushing.
-
-namespace v8 {
-namespace internal {
-
-void CPU::Setup() {
- CpuFeatures::Probe();
-}
-
-
-bool CPU::SupportsCrankshaft() {
- return CpuFeatures::IsSupported(VFP3);
-}
-
-
-void CPU::FlushICache(void* start, size_t size) {
- // Nothing to do flushing no instructions.
- if (size == 0) {
- return;
- }
-
-#if defined (USE_SIMULATOR)
- // Not generating ARM instructions for C-code. This means that we are
- // building an ARM emulator based target. We should notify the simulator
- // that the Icache was flushed.
- // None of this code ends up in the snapshot so there are no issues
- // around whether or not to generate the code when building snapshots.
- Simulator::FlushICache(Isolate::Current()->simulator_i_cache(), start, size);
-#else
- // Ideally, we would call
- // syscall(__ARM_NR_cacheflush, start,
- // reinterpret_cast<intptr_t>(start) + size, 0);
- // however, syscall(int, ...) is not supported on all platforms, especially
- // not when using EABI, so we call the __ARM_NR_cacheflush syscall directly.
-
- register uint32_t beg asm("a1") = reinterpret_cast<uint32_t>(start);
- register uint32_t end asm("a2") =
- reinterpret_cast<uint32_t>(start) + size;
- register uint32_t flg asm("a3") = 0;
- #ifdef __ARM_EABI__
- #if defined (__arm__) && !defined(__thumb__)
- // __arm__ may be defined in thumb mode.
- register uint32_t scno asm("r7") = __ARM_NR_cacheflush;
- asm volatile(
- "svc 0x0"
- : "=r" (beg)
- : "0" (beg), "r" (end), "r" (flg), "r" (scno));
- #else
- // r7 is reserved by the EABI in thumb mode.
- asm volatile(
- "@ Enter ARM Mode \n\t"
- "adr r3, 1f \n\t"
- "bx r3 \n\t"
- ".ALIGN 4 \n\t"
- ".ARM \n"
- "1: push {r7} \n\t"
- "mov r7, %4 \n\t"
- "svc 0x0 \n\t"
- "pop {r7} \n\t"
- "@ Enter THUMB Mode\n\t"
- "adr r3, 2f+1 \n\t"
- "bx r3 \n\t"
- ".THUMB \n"
- "2: \n\t"
- : "=r" (beg)
- : "0" (beg), "r" (end), "r" (flg), "r" (__ARM_NR_cacheflush)
- : "r3");
- #endif
- #else
- #if defined (__arm__) && !defined(__thumb__)
- // __arm__ may be defined in thumb mode.
- asm volatile(
- "svc %1"
- : "=r" (beg)
- : "i" (__ARM_NR_cacheflush), "0" (beg), "r" (end), "r" (flg));
- #else
- // Do not use the value of __ARM_NR_cacheflush in the inline assembly
- // below, because the thumb mode value would be used, which would be
- // wrong, since we switch to ARM mode before executing the svc instruction
- asm volatile(
- "@ Enter ARM Mode \n\t"
- "adr r3, 1f \n\t"
- "bx r3 \n\t"
- ".ALIGN 4 \n\t"
- ".ARM \n"
- "1: svc 0x9f0002 \n"
- "@ Enter THUMB Mode\n\t"
- "adr r3, 2f+1 \n\t"
- "bx r3 \n\t"
- ".THUMB \n"
- "2: \n\t"
- : "=r" (beg)
- : "0" (beg), "r" (end), "r" (flg)
- : "r3");
- #endif
- #endif
-#endif
-}
-
-
-void CPU::DebugBreak() {
-#if !defined (__arm__) || !defined(CAN_USE_ARMV5_INSTRUCTIONS)
- UNIMPLEMENTED(); // when building ARM emulator target
-#else
- asm volatile("bkpt 0");
-#endif
-}
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_ARM
diff --git a/src/3rdparty/v8/src/arm/debug-arm.cc b/src/3rdparty/v8/src/arm/debug-arm.cc
deleted file mode 100644
index e6ad98c..0000000
--- a/src/3rdparty/v8/src/arm/debug-arm.cc
+++ /dev/null
@@ -1,317 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_ARM)
-
-#include "codegen-inl.h"
-#include "debug.h"
-
-namespace v8 {
-namespace internal {
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-bool BreakLocationIterator::IsDebugBreakAtReturn() {
- return Debug::IsDebugBreakAtReturn(rinfo());
-}
-
-
-void BreakLocationIterator::SetDebugBreakAtReturn() {
- // Patch the code changing the return from JS function sequence from
- // mov sp, fp
- // ldmia sp!, {fp, lr}
- // add sp, sp, #4
- // bx lr
- // to a call to the debug break return code.
- // #if USE_BLX
- // ldr ip, [pc, #0]
- // blx ip
- // #else
- // mov lr, pc
- // ldr pc, [pc, #-4]
- // #endif
- // <debug break return code entry point address>
- // bktp 0
- CodePatcher patcher(rinfo()->pc(), Assembler::kJSReturnSequenceInstructions);
-#ifdef USE_BLX
- patcher.masm()->ldr(v8::internal::ip, MemOperand(v8::internal::pc, 0));
- patcher.masm()->blx(v8::internal::ip);
-#else
- patcher.masm()->mov(v8::internal::lr, v8::internal::pc);
- patcher.masm()->ldr(v8::internal::pc, MemOperand(v8::internal::pc, -4));
-#endif
- patcher.Emit(Isolate::Current()->debug()->debug_break_return()->entry());
- patcher.masm()->bkpt(0);
-}
-
-
-// Restore the JS frame exit code.
-void BreakLocationIterator::ClearDebugBreakAtReturn() {
- rinfo()->PatchCode(original_rinfo()->pc(),
- Assembler::kJSReturnSequenceInstructions);
-}
-
-
-// A debug break in the frame exit code is identified by the JS frame exit code
-// having been patched with a call instruction.
-bool Debug::IsDebugBreakAtReturn(RelocInfo* rinfo) {
- ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()));
- return rinfo->IsPatchedReturnSequence();
-}
-
-
-bool BreakLocationIterator::IsDebugBreakAtSlot() {
- ASSERT(IsDebugBreakSlot());
- // Check whether the debug break slot instructions have been patched.
- return rinfo()->IsPatchedDebugBreakSlotSequence();
-}
-
-
-void BreakLocationIterator::SetDebugBreakAtSlot() {
- ASSERT(IsDebugBreakSlot());
- // Patch the code changing the debug break slot code from
- // mov r2, r2
- // mov r2, r2
- // mov r2, r2
- // to a call to the debug break slot code.
- // #if USE_BLX
- // ldr ip, [pc, #0]
- // blx ip
- // #else
- // mov lr, pc
- // ldr pc, [pc, #-4]
- // #endif
- // <debug break slot code entry point address>
- CodePatcher patcher(rinfo()->pc(), Assembler::kDebugBreakSlotInstructions);
-#ifdef USE_BLX
- patcher.masm()->ldr(v8::internal::ip, MemOperand(v8::internal::pc, 0));
- patcher.masm()->blx(v8::internal::ip);
-#else
- patcher.masm()->mov(v8::internal::lr, v8::internal::pc);
- patcher.masm()->ldr(v8::internal::pc, MemOperand(v8::internal::pc, -4));
-#endif
- patcher.Emit(Isolate::Current()->debug()->debug_break_slot()->entry());
-}
-
-
-void BreakLocationIterator::ClearDebugBreakAtSlot() {
- ASSERT(IsDebugBreakSlot());
- rinfo()->PatchCode(original_rinfo()->pc(),
- Assembler::kDebugBreakSlotInstructions);
-}
-
-
-#define __ ACCESS_MASM(masm)
-
-
-static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
- RegList object_regs,
- RegList non_object_regs) {
- __ EnterInternalFrame();
-
- // Store the registers containing live values on the expression stack to
- // make sure that these are correctly updated during GC. Non object values
- // are stored as a smi causing it to be untouched by GC.
- ASSERT((object_regs & ~kJSCallerSaved) == 0);
- ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
- ASSERT((object_regs & non_object_regs) == 0);
- if ((object_regs | non_object_regs) != 0) {
- for (int i = 0; i < kNumJSCallerSaved; i++) {
- int r = JSCallerSavedCode(i);
- Register reg = { r };
- if ((non_object_regs & (1 << r)) != 0) {
- if (FLAG_debug_code) {
- __ tst(reg, Operand(0xc0000000));
- __ Assert(eq, "Unable to encode value as smi");
- }
- __ mov(reg, Operand(reg, LSL, kSmiTagSize));
- }
- }
- __ stm(db_w, sp, object_regs | non_object_regs);
- }
-
-#ifdef DEBUG
- __ RecordComment("// Calling from debug break to runtime - come in - over");
-#endif
- __ mov(r0, Operand(0, RelocInfo::NONE)); // no arguments
- __ mov(r1, Operand(ExternalReference::debug_break(masm->isolate())));
-
- CEntryStub ceb(1);
- __ CallStub(&ceb);
-
- // Restore the register values from the expression stack.
- if ((object_regs | non_object_regs) != 0) {
- __ ldm(ia_w, sp, object_regs | non_object_regs);
- for (int i = 0; i < kNumJSCallerSaved; i++) {
- int r = JSCallerSavedCode(i);
- Register reg = { r };
- if ((non_object_regs & (1 << r)) != 0) {
- __ mov(reg, Operand(reg, LSR, kSmiTagSize));
- }
- if (FLAG_debug_code &&
- (((object_regs |non_object_regs) & (1 << r)) == 0)) {
- __ mov(reg, Operand(kDebugZapValue));
- }
- }
- }
-
- __ LeaveInternalFrame();
-
- // Now that the break point has been handled, resume normal execution by
- // jumping to the target address intended by the caller and that was
- // overwritten by the address of DebugBreakXXX.
- ExternalReference after_break_target =
- ExternalReference(Debug_Address::AfterBreakTarget(), masm->isolate());
- __ mov(ip, Operand(after_break_target));
- __ ldr(ip, MemOperand(ip));
- __ Jump(ip);
-}
-
-
-void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) {
- // Calling convention for IC load (from ic-arm.cc).
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -- r0 : receiver
- // -- [sp] : receiver
- // -----------------------------------
- // Registers r0 and r2 contain objects that need to be pushed on the
- // expression stack of the fake JS frame.
- Generate_DebugBreakCallHelper(masm, r0.bit() | r2.bit(), 0);
-}
-
-
-void Debug::GenerateStoreICDebugBreak(MacroAssembler* masm) {
- // Calling convention for IC store (from ic-arm.cc).
- // ----------- S t a t e -------------
- // -- r0 : value
- // -- r1 : receiver
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- // Registers r0, r1, and r2 contain objects that need to be pushed on the
- // expression stack of the fake JS frame.
- Generate_DebugBreakCallHelper(masm, r0.bit() | r1.bit() | r2.bit(), 0);
-}
-
-
-void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
- // ---------- S t a t e --------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- Generate_DebugBreakCallHelper(masm, r0.bit() | r1.bit(), 0);
-}
-
-
-void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
- // ---------- S t a t e --------------
- // -- r0 : value
- // -- r1 : key
- // -- r2 : receiver
- // -- lr : return address
- Generate_DebugBreakCallHelper(masm, r0.bit() | r1.bit() | r2.bit(), 0);
-}
-
-
-void Debug::GenerateCallICDebugBreak(MacroAssembler* masm) {
- // Calling convention for IC call (from ic-arm.cc)
- // ----------- S t a t e -------------
- // -- r2 : name
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, r2.bit(), 0);
-}
-
-
-void Debug::GenerateConstructCallDebugBreak(MacroAssembler* masm) {
- // Calling convention for construct call (from builtins-arm.cc)
- // -- r0 : number of arguments (not smi)
- // -- r1 : constructor function
- Generate_DebugBreakCallHelper(masm, r1.bit(), r0.bit());
-}
-
-
-void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
- // In places other than IC call sites it is expected that r0 is TOS which
- // is an object - this is not generally the case so this should be used with
- // care.
- Generate_DebugBreakCallHelper(masm, r0.bit(), 0);
-}
-
-
-void Debug::GenerateStubNoRegistersDebugBreak(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // No registers used on entry.
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, 0, 0);
-}
-
-
-void Debug::GenerateSlot(MacroAssembler* masm) {
- // Generate enough nop's to make space for a call instruction. Avoid emitting
- // the constant pool in the debug break slot code.
- Assembler::BlockConstPoolScope block_const_pool(masm);
- Label check_codesize;
- __ bind(&check_codesize);
- __ RecordDebugBreakSlot();
- for (int i = 0; i < Assembler::kDebugBreakSlotInstructions; i++) {
- __ nop(MacroAssembler::DEBUG_BREAK_NOP);
- }
- ASSERT_EQ(Assembler::kDebugBreakSlotInstructions,
- masm->InstructionsGeneratedSince(&check_codesize));
-}
-
-
-void Debug::GenerateSlotDebugBreak(MacroAssembler* masm) {
- // In the places where a debug break slot is inserted no registers can contain
- // object pointers.
- Generate_DebugBreakCallHelper(masm, 0, 0);
-}
-
-
-void Debug::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
- masm->Abort("LiveEdit frame dropping is not supported on arm");
-}
-
-
-void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
- masm->Abort("LiveEdit frame dropping is not supported on arm");
-}
-
-const bool Debug::kFrameDropperSupported = false;
-
-#undef __
-
-
-
-#endif // ENABLE_DEBUGGER_SUPPORT
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_ARM
diff --git a/src/3rdparty/v8/src/arm/deoptimizer-arm.cc b/src/3rdparty/v8/src/arm/deoptimizer-arm.cc
deleted file mode 100644
index f0a6937..0000000
--- a/src/3rdparty/v8/src/arm/deoptimizer-arm.cc
+++ /dev/null
@@ -1,737 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "codegen.h"
-#include "deoptimizer.h"
-#include "full-codegen.h"
-#include "safepoint-table.h"
-
-namespace v8 {
-namespace internal {
-
-int Deoptimizer::table_entry_size_ = 16;
-
-
-int Deoptimizer::patch_size() {
- const int kCallInstructionSizeInWords = 3;
- return kCallInstructionSizeInWords * Assembler::kInstrSize;
-}
-
-
-void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
- // Nothing to do. No new relocation information is written for lazy
- // deoptimization on ARM.
-}
-
-
-void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
- HandleScope scope;
- AssertNoAllocation no_allocation;
-
- if (!function->IsOptimized()) return;
-
- // Get the optimized code.
- Code* code = function->code();
-
- // Invalidate the relocation information, as it will become invalid by the
- // code patching below, and is not needed any more.
- code->InvalidateRelocation();
-
- // For each return after a safepoint insert an absolute call to the
- // corresponding deoptimization entry.
- ASSERT(patch_size() % Assembler::kInstrSize == 0);
- int call_size_in_words = patch_size() / Assembler::kInstrSize;
- unsigned last_pc_offset = 0;
- SafepointTable table(function->code());
- for (unsigned i = 0; i < table.length(); i++) {
- unsigned pc_offset = table.GetPcOffset(i);
- SafepointEntry safepoint_entry = table.GetEntry(i);
- int deoptimization_index = safepoint_entry.deoptimization_index();
- int gap_code_size = safepoint_entry.gap_code_size();
- // Check that we did not shoot past next safepoint.
- CHECK(pc_offset >= last_pc_offset);
-#ifdef DEBUG
- // Destroy the code which is not supposed to be run again.
- int instructions = (pc_offset - last_pc_offset) / Assembler::kInstrSize;
- CodePatcher destroyer(code->instruction_start() + last_pc_offset,
- instructions);
- for (int x = 0; x < instructions; x++) {
- destroyer.masm()->bkpt(0);
- }
-#endif
- last_pc_offset = pc_offset;
- if (deoptimization_index != Safepoint::kNoDeoptimizationIndex) {
- last_pc_offset += gap_code_size;
- CodePatcher patcher(code->instruction_start() + last_pc_offset,
- call_size_in_words);
- Address deoptimization_entry = Deoptimizer::GetDeoptimizationEntry(
- deoptimization_index, Deoptimizer::LAZY);
- patcher.masm()->Call(deoptimization_entry, RelocInfo::NONE);
- last_pc_offset += patch_size();
- }
- }
-
-
-#ifdef DEBUG
- // Destroy the code which is not supposed to be run again.
- int instructions =
- (code->safepoint_table_offset() - last_pc_offset) / Assembler::kInstrSize;
- CodePatcher destroyer(code->instruction_start() + last_pc_offset,
- instructions);
- for (int x = 0; x < instructions; x++) {
- destroyer.masm()->bkpt(0);
- }
-#endif
-
- // Add the deoptimizing code to the list.
- DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code);
- DeoptimizerData* data = code->GetIsolate()->deoptimizer_data();
- node->set_next(data->deoptimizing_code_list_);
- data->deoptimizing_code_list_ = node;
-
- // Set the code for the function to non-optimized version.
- function->ReplaceCode(function->shared()->code());
-
- if (FLAG_trace_deopt) {
- PrintF("[forced deoptimization: ");
- function->PrintName();
- PrintF(" / %x]\n", reinterpret_cast<uint32_t>(function));
-#ifdef DEBUG
- if (FLAG_print_code) {
- code->PrintLn();
- }
-#endif
- }
-}
-
-
-void Deoptimizer::PatchStackCheckCodeAt(Address pc_after,
- Code* check_code,
- Code* replacement_code) {
- const int kInstrSize = Assembler::kInstrSize;
- // The call of the stack guard check has the following form:
- // e1 5d 00 0c cmp sp, <limit>
- // 2a 00 00 01 bcs ok
- // e5 9f c? ?? ldr ip, [pc, <stack guard address>]
- // e1 2f ff 3c blx ip
- ASSERT(Memory::int32_at(pc_after - kInstrSize) ==
- (al | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BLX | ip.code()));
- ASSERT(Assembler::IsLdrPcImmediateOffset(
- Assembler::instr_at(pc_after - 2 * kInstrSize)));
-
- // We patch the code to the following form:
- // e1 5d 00 0c cmp sp, <limit>
- // e1 a0 00 00 mov r0, r0 (NOP)
- // e5 9f c? ?? ldr ip, [pc, <on-stack replacement address>]
- // e1 2f ff 3c blx ip
- // and overwrite the constant containing the
- // address of the stack check stub.
-
- // Replace conditional jump with NOP.
- CodePatcher patcher(pc_after - 3 * kInstrSize, 1);
- patcher.masm()->nop();
-
- // Replace the stack check address in the constant pool
- // with the entry address of the replacement code.
- uint32_t stack_check_address_offset = Memory::uint16_at(pc_after -
- 2 * kInstrSize) & 0xfff;
- Address stack_check_address_pointer = pc_after + stack_check_address_offset;
- ASSERT(Memory::uint32_at(stack_check_address_pointer) ==
- reinterpret_cast<uint32_t>(check_code->entry()));
- Memory::uint32_at(stack_check_address_pointer) =
- reinterpret_cast<uint32_t>(replacement_code->entry());
-}
-
-
-void Deoptimizer::RevertStackCheckCodeAt(Address pc_after,
- Code* check_code,
- Code* replacement_code) {
- const int kInstrSize = Assembler::kInstrSize;
- ASSERT(Memory::uint32_at(pc_after - kInstrSize) == 0xe12fff3c);
- ASSERT(Memory::uint8_at(pc_after - kInstrSize - 1) == 0xe5);
- ASSERT(Memory::uint8_at(pc_after - kInstrSize - 2) == 0x9f);
-
- // Replace NOP with conditional jump.
- CodePatcher patcher(pc_after - 3 * kInstrSize, 1);
- patcher.masm()->b(+4, cs);
-
- // Replace the stack check address in the constant pool
- // with the entry address of the replacement code.
- uint32_t stack_check_address_offset = Memory::uint16_at(pc_after -
- 2 * kInstrSize) & 0xfff;
- Address stack_check_address_pointer = pc_after + stack_check_address_offset;
- ASSERT(Memory::uint32_at(stack_check_address_pointer) ==
- reinterpret_cast<uint32_t>(replacement_code->entry()));
- Memory::uint32_at(stack_check_address_pointer) =
- reinterpret_cast<uint32_t>(check_code->entry());
-}
-
-
-static int LookupBailoutId(DeoptimizationInputData* data, unsigned ast_id) {
- ByteArray* translations = data->TranslationByteArray();
- int length = data->DeoptCount();
- for (int i = 0; i < length; i++) {
- if (static_cast<unsigned>(data->AstId(i)->value()) == ast_id) {
- TranslationIterator it(translations, data->TranslationIndex(i)->value());
- int value = it.Next();
- ASSERT(Translation::BEGIN == static_cast<Translation::Opcode>(value));
- // Read the number of frames.
- value = it.Next();
- if (value == 1) return i;
- }
- }
- UNREACHABLE();
- return -1;
-}
-
-
-void Deoptimizer::DoComputeOsrOutputFrame() {
- DeoptimizationInputData* data = DeoptimizationInputData::cast(
- optimized_code_->deoptimization_data());
- unsigned ast_id = data->OsrAstId()->value();
-
- int bailout_id = LookupBailoutId(data, ast_id);
- unsigned translation_index = data->TranslationIndex(bailout_id)->value();
- ByteArray* translations = data->TranslationByteArray();
-
- TranslationIterator iterator(translations, translation_index);
- Translation::Opcode opcode =
- static_cast<Translation::Opcode>(iterator.Next());
- ASSERT(Translation::BEGIN == opcode);
- USE(opcode);
- int count = iterator.Next();
- ASSERT(count == 1);
- USE(count);
-
- opcode = static_cast<Translation::Opcode>(iterator.Next());
- USE(opcode);
- ASSERT(Translation::FRAME == opcode);
- unsigned node_id = iterator.Next();
- USE(node_id);
- ASSERT(node_id == ast_id);
- JSFunction* function = JSFunction::cast(ComputeLiteral(iterator.Next()));
- USE(function);
- ASSERT(function == function_);
- unsigned height = iterator.Next();
- unsigned height_in_bytes = height * kPointerSize;
- USE(height_in_bytes);
-
- unsigned fixed_size = ComputeFixedSize(function_);
- unsigned input_frame_size = input_->GetFrameSize();
- ASSERT(fixed_size + height_in_bytes == input_frame_size);
-
- unsigned stack_slot_size = optimized_code_->stack_slots() * kPointerSize;
- unsigned outgoing_height = data->ArgumentsStackHeight(bailout_id)->value();
- unsigned outgoing_size = outgoing_height * kPointerSize;
- unsigned output_frame_size = fixed_size + stack_slot_size + outgoing_size;
- ASSERT(outgoing_size == 0); // OSR does not happen in the middle of a call.
-
- if (FLAG_trace_osr) {
- PrintF("[on-stack replacement: begin 0x%08" V8PRIxPTR " ",
- reinterpret_cast<intptr_t>(function_));
- function_->PrintName();
- PrintF(" => node=%u, frame=%d->%d]\n",
- ast_id,
- input_frame_size,
- output_frame_size);
- }
-
- // There's only one output frame in the OSR case.
- output_count_ = 1;
- output_ = new FrameDescription*[1];
- output_[0] = new(output_frame_size) FrameDescription(
- output_frame_size, function_);
-
- // Clear the incoming parameters in the optimized frame to avoid
- // confusing the garbage collector.
- unsigned output_offset = output_frame_size - kPointerSize;
- int parameter_count = function_->shared()->formal_parameter_count() + 1;
- for (int i = 0; i < parameter_count; ++i) {
- output_[0]->SetFrameSlot(output_offset, 0);
- output_offset -= kPointerSize;
- }
-
- // Translate the incoming parameters. This may overwrite some of the
- // incoming argument slots we've just cleared.
- int input_offset = input_frame_size - kPointerSize;
- bool ok = true;
- int limit = input_offset - (parameter_count * kPointerSize);
- while (ok && input_offset > limit) {
- ok = DoOsrTranslateCommand(&iterator, &input_offset);
- }
-
- // There are no translation commands for the caller's pc and fp, the
- // context, and the function. Set them up explicitly.
- for (int i = StandardFrameConstants::kCallerPCOffset;
- ok && i >= StandardFrameConstants::kMarkerOffset;
- i -= kPointerSize) {
- uint32_t input_value = input_->GetFrameSlot(input_offset);
- if (FLAG_trace_osr) {
- const char* name = "UNKNOWN";
- switch (i) {
- case StandardFrameConstants::kCallerPCOffset:
- name = "caller's pc";
- break;
- case StandardFrameConstants::kCallerFPOffset:
- name = "fp";
- break;
- case StandardFrameConstants::kContextOffset:
- name = "context";
- break;
- case StandardFrameConstants::kMarkerOffset:
- name = "function";
- break;
- }
- PrintF(" [sp + %d] <- 0x%08x ; [sp + %d] (fixed part - %s)\n",
- output_offset,
- input_value,
- input_offset,
- name);
- }
-
- output_[0]->SetFrameSlot(output_offset, input_->GetFrameSlot(input_offset));
- input_offset -= kPointerSize;
- output_offset -= kPointerSize;
- }
-
- // Translate the rest of the frame.
- while (ok && input_offset >= 0) {
- ok = DoOsrTranslateCommand(&iterator, &input_offset);
- }
-
- // If translation of any command failed, continue using the input frame.
- if (!ok) {
- delete output_[0];
- output_[0] = input_;
- output_[0]->SetPc(reinterpret_cast<uint32_t>(from_));
- } else {
- // Setup the frame pointer and the context pointer.
- output_[0]->SetRegister(fp.code(), input_->GetRegister(fp.code()));
- output_[0]->SetRegister(cp.code(), input_->GetRegister(cp.code()));
-
- unsigned pc_offset = data->OsrPcOffset()->value();
- uint32_t pc = reinterpret_cast<uint32_t>(
- optimized_code_->entry() + pc_offset);
- output_[0]->SetPc(pc);
- }
- Code* continuation = isolate_->builtins()->builtin(Builtins::kNotifyOSR);
- output_[0]->SetContinuation(
- reinterpret_cast<uint32_t>(continuation->entry()));
-
- if (FLAG_trace_osr) {
- PrintF("[on-stack replacement translation %s: 0x%08" V8PRIxPTR " ",
- ok ? "finished" : "aborted",
- reinterpret_cast<intptr_t>(function));
- function->PrintName();
- PrintF(" => pc=0x%0x]\n", output_[0]->GetPc());
- }
-}
-
-
-// This code is very similar to ia32 code, but relies on register names (fp, sp)
-// and how the frame is laid out.
-void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
- int frame_index) {
- // Read the ast node id, function, and frame height for this output frame.
- Translation::Opcode opcode =
- static_cast<Translation::Opcode>(iterator->Next());
- USE(opcode);
- ASSERT(Translation::FRAME == opcode);
- int node_id = iterator->Next();
- JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
- unsigned height = iterator->Next();
- unsigned height_in_bytes = height * kPointerSize;
- if (FLAG_trace_deopt) {
- PrintF(" translating ");
- function->PrintName();
- PrintF(" => node=%d, height=%d\n", node_id, height_in_bytes);
- }
-
- // The 'fixed' part of the frame consists of the incoming parameters and
- // the part described by JavaScriptFrameConstants.
- unsigned fixed_frame_size = ComputeFixedSize(function);
- unsigned input_frame_size = input_->GetFrameSize();
- unsigned output_frame_size = height_in_bytes + fixed_frame_size;
-
- // Allocate and store the output frame description.
- FrameDescription* output_frame =
- new(output_frame_size) FrameDescription(output_frame_size, function);
-
- bool is_bottommost = (0 == frame_index);
- bool is_topmost = (output_count_ - 1 == frame_index);
- ASSERT(frame_index >= 0 && frame_index < output_count_);
- ASSERT(output_[frame_index] == NULL);
- output_[frame_index] = output_frame;
-
- // The top address for the bottommost output frame can be computed from
- // the input frame pointer and the output frame's height. For all
- // subsequent output frames, it can be computed from the previous one's
- // top address and the current frame's size.
- uint32_t top_address;
- if (is_bottommost) {
- // 2 = context and function in the frame.
- top_address =
- input_->GetRegister(fp.code()) - (2 * kPointerSize) - height_in_bytes;
- } else {
- top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
- }
- output_frame->SetTop(top_address);
-
- // Compute the incoming parameter translation.
- int parameter_count = function->shared()->formal_parameter_count() + 1;
- unsigned output_offset = output_frame_size;
- unsigned input_offset = input_frame_size;
- for (int i = 0; i < parameter_count; ++i) {
- output_offset -= kPointerSize;
- DoTranslateCommand(iterator, frame_index, output_offset);
- }
- input_offset -= (parameter_count * kPointerSize);
-
- // There are no translation commands for the caller's pc and fp, the
- // context, and the function. Synthesize their values and set them up
- // explicitly.
- //
- // The caller's pc for the bottommost output frame is the same as in the
- // input frame. For all subsequent output frames, it can be read from the
- // previous one. This frame's pc can be computed from the non-optimized
- // function code and AST id of the bailout.
- output_offset -= kPointerSize;
- input_offset -= kPointerSize;
- intptr_t value;
- if (is_bottommost) {
- value = input_->GetFrameSlot(input_offset);
- } else {
- value = output_[frame_index - 1]->GetPc();
- }
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
- top_address + output_offset, output_offset, value);
- }
-
- // The caller's frame pointer for the bottommost output frame is the same
- // as in the input frame. For all subsequent output frames, it can be
- // read from the previous one. Also compute and set this frame's frame
- // pointer.
- output_offset -= kPointerSize;
- input_offset -= kPointerSize;
- if (is_bottommost) {
- value = input_->GetFrameSlot(input_offset);
- } else {
- value = output_[frame_index - 1]->GetFp();
- }
- output_frame->SetFrameSlot(output_offset, value);
- intptr_t fp_value = top_address + output_offset;
- ASSERT(!is_bottommost || input_->GetRegister(fp.code()) == fp_value);
- output_frame->SetFp(fp_value);
- if (is_topmost) {
- output_frame->SetRegister(fp.code(), fp_value);
- }
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
- fp_value, output_offset, value);
- }
-
- // For the bottommost output frame the context can be gotten from the input
- // frame. For all subsequent output frames it can be gotten from the function
- // so long as we don't inline functions that need local contexts.
- output_offset -= kPointerSize;
- input_offset -= kPointerSize;
- if (is_bottommost) {
- value = input_->GetFrameSlot(input_offset);
- } else {
- value = reinterpret_cast<intptr_t>(function->context());
- }
- output_frame->SetFrameSlot(output_offset, value);
- if (is_topmost) {
- output_frame->SetRegister(cp.code(), value);
- }
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context\n",
- top_address + output_offset, output_offset, value);
- }
-
- // The function was mentioned explicitly in the BEGIN_FRAME.
- output_offset -= kPointerSize;
- input_offset -= kPointerSize;
- value = reinterpret_cast<uint32_t>(function);
- // The function for the bottommost output frame should also agree with the
- // input frame.
- ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value);
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; function\n",
- top_address + output_offset, output_offset, value);
- }
-
- // Translate the rest of the frame.
- for (unsigned i = 0; i < height; ++i) {
- output_offset -= kPointerSize;
- DoTranslateCommand(iterator, frame_index, output_offset);
- }
- ASSERT(0 == output_offset);
-
- // Compute this frame's PC, state, and continuation.
- Code* non_optimized_code = function->shared()->code();
- FixedArray* raw_data = non_optimized_code->deoptimization_data();
- DeoptimizationOutputData* data = DeoptimizationOutputData::cast(raw_data);
- Address start = non_optimized_code->instruction_start();
- unsigned pc_and_state = GetOutputInfo(data, node_id, function->shared());
- unsigned pc_offset = FullCodeGenerator::PcField::decode(pc_and_state);
- uint32_t pc_value = reinterpret_cast<uint32_t>(start + pc_offset);
- output_frame->SetPc(pc_value);
- if (is_topmost) {
- output_frame->SetRegister(pc.code(), pc_value);
- }
-
- FullCodeGenerator::State state =
- FullCodeGenerator::StateField::decode(pc_and_state);
- output_frame->SetState(Smi::FromInt(state));
-
-
- // Set the continuation for the topmost frame.
- if (is_topmost) {
- Builtins* builtins = isolate_->builtins();
- Code* continuation = (bailout_type_ == EAGER)
- ? builtins->builtin(Builtins::kNotifyDeoptimized)
- : builtins->builtin(Builtins::kNotifyLazyDeoptimized);
- output_frame->SetContinuation(
- reinterpret_cast<uint32_t>(continuation->entry()));
- }
-
- if (output_count_ - 1 == frame_index) iterator->Done();
-}
-
-
-#define __ masm()->
-
-
-// This code tries to be close to ia32 code so that any changes can be
-// easily ported.
-void Deoptimizer::EntryGenerator::Generate() {
- GeneratePrologue();
-
- Isolate* isolate = masm()->isolate();
-
- CpuFeatures::Scope scope(VFP3);
- // Save all general purpose registers before messing with them.
- const int kNumberOfRegisters = Register::kNumRegisters;
-
- // Everything but pc, lr and ip which will be saved but not restored.
- RegList restored_regs = kJSCallerSaved | kCalleeSaved | ip.bit();
-
- const int kDoubleRegsSize =
- kDoubleSize * DwVfpRegister::kNumAllocatableRegisters;
-
- // Save all general purpose registers before messing with them.
- __ sub(sp, sp, Operand(kDoubleRegsSize));
- for (int i = 0; i < DwVfpRegister::kNumAllocatableRegisters; ++i) {
- DwVfpRegister vfp_reg = DwVfpRegister::FromAllocationIndex(i);
- int offset = i * kDoubleSize;
- __ vstr(vfp_reg, sp, offset);
- }
-
- // Push all 16 registers (needed to populate FrameDescription::registers_).
- __ stm(db_w, sp, restored_regs | sp.bit() | lr.bit() | pc.bit());
-
- const int kSavedRegistersAreaSize =
- (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize;
-
- // Get the bailout id from the stack.
- __ ldr(r2, MemOperand(sp, kSavedRegistersAreaSize));
-
- // Get the address of the location in the code object if possible (r3) (return
- // address for lazy deoptimization) and compute the fp-to-sp delta in
- // register r4.
- if (type() == EAGER) {
- __ mov(r3, Operand(0));
- // Correct one word for bailout id.
- __ add(r4, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
- } else if (type() == OSR) {
- __ mov(r3, lr);
- // Correct one word for bailout id.
- __ add(r4, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
- } else {
- __ mov(r3, lr);
- // Correct two words for bailout id and return address.
- __ add(r4, sp, Operand(kSavedRegistersAreaSize + (2 * kPointerSize)));
- }
- __ sub(r4, fp, r4);
-
- // Allocate a new deoptimizer object.
- // Pass four arguments in r0 to r3 and fifth argument on stack.
- __ PrepareCallCFunction(6, r5);
- __ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ mov(r1, Operand(type())); // bailout type,
- // r2: bailout id already loaded.
- // r3: code address or 0 already loaded.
- __ str(r4, MemOperand(sp, 0 * kPointerSize)); // Fp-to-sp delta.
- __ mov(r5, Operand(ExternalReference::isolate_address()));
- __ str(r5, MemOperand(sp, 1 * kPointerSize)); // Isolate.
- // Call Deoptimizer::New().
- __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6);
-
- // Preserve "deoptimizer" object in register r0 and get the input
- // frame descriptor pointer to r1 (deoptimizer->input_);
- __ ldr(r1, MemOperand(r0, Deoptimizer::input_offset()));
-
- // Copy core registers into FrameDescription::registers_[kNumRegisters].
- ASSERT(Register::kNumRegisters == kNumberOfRegisters);
- for (int i = 0; i < kNumberOfRegisters; i++) {
- int offset = (i * kPointerSize) + FrameDescription::registers_offset();
- __ ldr(r2, MemOperand(sp, i * kPointerSize));
- __ str(r2, MemOperand(r1, offset));
- }
-
- // Copy VFP registers to
- // double_registers_[DoubleRegister::kNumAllocatableRegisters]
- int double_regs_offset = FrameDescription::double_registers_offset();
- for (int i = 0; i < DwVfpRegister::kNumAllocatableRegisters; ++i) {
- int dst_offset = i * kDoubleSize + double_regs_offset;
- int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize;
- __ vldr(d0, sp, src_offset);
- __ vstr(d0, r1, dst_offset);
- }
-
- // Remove the bailout id, eventually return address, and the saved registers
- // from the stack.
- if (type() == EAGER || type() == OSR) {
- __ add(sp, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
- } else {
- __ add(sp, sp, Operand(kSavedRegistersAreaSize + (2 * kPointerSize)));
- }
-
- // Compute a pointer to the unwinding limit in register r2; that is
- // the first stack slot not part of the input frame.
- __ ldr(r2, MemOperand(r1, FrameDescription::frame_size_offset()));
- __ add(r2, r2, sp);
-
- // Unwind the stack down to - but not including - the unwinding
- // limit and copy the contents of the activation frame to the input
- // frame description.
- __ add(r3, r1, Operand(FrameDescription::frame_content_offset()));
- Label pop_loop;
- __ bind(&pop_loop);
- __ pop(r4);
- __ str(r4, MemOperand(r3, 0));
- __ add(r3, r3, Operand(sizeof(uint32_t)));
- __ cmp(r2, sp);
- __ b(ne, &pop_loop);
-
- // Compute the output frame in the deoptimizer.
- __ push(r0); // Preserve deoptimizer object across call.
- // r0: deoptimizer object; r1: scratch.
- __ PrepareCallCFunction(1, r1);
- // Call Deoptimizer::ComputeOutputFrames().
- __ CallCFunction(
- ExternalReference::compute_output_frames_function(isolate), 1);
- __ pop(r0); // Restore deoptimizer object (class Deoptimizer).
-
- // Replace the current (input) frame with the output frames.
- Label outer_push_loop, inner_push_loop;
- // Outer loop state: r0 = current "FrameDescription** output_",
- // r1 = one past the last FrameDescription**.
- __ ldr(r1, MemOperand(r0, Deoptimizer::output_count_offset()));
- __ ldr(r0, MemOperand(r0, Deoptimizer::output_offset())); // r0 is output_.
- __ add(r1, r0, Operand(r1, LSL, 2));
- __ bind(&outer_push_loop);
- // Inner loop state: r2 = current FrameDescription*, r3 = loop index.
- __ ldr(r2, MemOperand(r0, 0)); // output_[ix]
- __ ldr(r3, MemOperand(r2, FrameDescription::frame_size_offset()));
- __ bind(&inner_push_loop);
- __ sub(r3, r3, Operand(sizeof(uint32_t)));
- // __ add(r6, r2, Operand(r3, LSL, 1));
- __ add(r6, r2, Operand(r3));
- __ ldr(r7, MemOperand(r6, FrameDescription::frame_content_offset()));
- __ push(r7);
- __ cmp(r3, Operand(0));
- __ b(ne, &inner_push_loop); // test for gt?
- __ add(r0, r0, Operand(kPointerSize));
- __ cmp(r0, r1);
- __ b(lt, &outer_push_loop);
-
- // Push state, pc, and continuation from the last output frame.
- if (type() != OSR) {
- __ ldr(r6, MemOperand(r2, FrameDescription::state_offset()));
- __ push(r6);
- }
-
- __ ldr(r6, MemOperand(r2, FrameDescription::pc_offset()));
- __ push(r6);
- __ ldr(r6, MemOperand(r2, FrameDescription::continuation_offset()));
- __ push(r6);
-
- // Push the registers from the last output frame.
- for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
- int offset = (i * kPointerSize) + FrameDescription::registers_offset();
- __ ldr(r6, MemOperand(r2, offset));
- __ push(r6);
- }
-
- // Restore the registers from the stack.
- __ ldm(ia_w, sp, restored_regs); // all but pc registers.
- __ pop(ip); // remove sp
- __ pop(ip); // remove lr
-
- // Set up the roots register.
- ExternalReference roots_address = ExternalReference::roots_address(isolate);
- __ mov(r10, Operand(roots_address));
-
- __ pop(ip); // remove pc
- __ pop(r7); // get continuation, leave pc on stack
- __ pop(lr);
- __ Jump(r7);
- __ stop("Unreachable.");
-}
-
-
-void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
- // Create a sequence of deoptimization entries. Note that any
- // registers may be still live.
- Label done;
- for (int i = 0; i < count(); i++) {
- int start = masm()->pc_offset();
- USE(start);
- if (type() == EAGER) {
- __ nop();
- } else {
- // Emulate ia32 like call by pushing return address to stack.
- __ push(lr);
- }
- __ mov(ip, Operand(i));
- __ push(ip);
- __ b(&done);
- ASSERT(masm()->pc_offset() - start == table_entry_size_);
- }
- __ bind(&done);
-}
-
-#undef __
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/arm/disasm-arm.cc b/src/3rdparty/v8/src/arm/disasm-arm.cc
deleted file mode 100644
index 899b88a..0000000
--- a/src/3rdparty/v8/src/arm/disasm-arm.cc
+++ /dev/null
@@ -1,1471 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// A Disassembler object is used to disassemble a block of code instruction by
-// instruction. The default implementation of the NameConverter object can be
-// overriden to modify register names or to do symbol lookup on addresses.
-//
-// The example below will disassemble a block of code and print it to stdout.
-//
-// NameConverter converter;
-// Disassembler d(converter);
-// for (byte* pc = begin; pc < end;) {
-// v8::internal::EmbeddedVector<char, 256> buffer;
-// byte* prev_pc = pc;
-// pc += d.InstructionDecode(buffer, pc);
-// printf("%p %08x %s\n",
-// prev_pc, *reinterpret_cast<int32_t*>(prev_pc), buffer);
-// }
-//
-// The Disassembler class also has a convenience method to disassemble a block
-// of code into a FILE*, meaning that the above functionality could also be
-// achieved by just calling Disassembler::Disassemble(stdout, begin, end);
-
-
-#include <assert.h>
-#include <stdio.h>
-#include <stdarg.h>
-#include <string.h>
-#ifndef WIN32
-#include <stdint.h>
-#endif
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_ARM)
-
-#include "constants-arm.h"
-#include "disasm.h"
-#include "macro-assembler.h"
-#include "platform.h"
-
-
-namespace v8 {
-namespace internal {
-
-
-//------------------------------------------------------------------------------
-
-// Decoder decodes and disassembles instructions into an output buffer.
-// It uses the converter to convert register names and call destinations into
-// more informative description.
-class Decoder {
- public:
- Decoder(const disasm::NameConverter& converter,
- Vector<char> out_buffer)
- : converter_(converter),
- out_buffer_(out_buffer),
- out_buffer_pos_(0) {
- out_buffer_[out_buffer_pos_] = '\0';
- }
-
- ~Decoder() {}
-
- // Writes one disassembled instruction into 'buffer' (0-terminated).
- // Returns the length of the disassembled machine instruction in bytes.
- int InstructionDecode(byte* instruction);
-
- static bool IsConstantPoolAt(byte* instr_ptr);
- static int ConstantPoolSizeAt(byte* instr_ptr);
-
- private:
- // Bottleneck functions to print into the out_buffer.
- void PrintChar(const char ch);
- void Print(const char* str);
-
- // Printing of common values.
- void PrintRegister(int reg);
- void PrintSRegister(int reg);
- void PrintDRegister(int reg);
- int FormatVFPRegister(Instruction* instr, const char* format);
- void PrintMovwMovt(Instruction* instr);
- int FormatVFPinstruction(Instruction* instr, const char* format);
- void PrintCondition(Instruction* instr);
- void PrintShiftRm(Instruction* instr);
- void PrintShiftImm(Instruction* instr);
- void PrintShiftSat(Instruction* instr);
- void PrintPU(Instruction* instr);
- void PrintSoftwareInterrupt(SoftwareInterruptCodes svc);
-
- // Handle formatting of instructions and their options.
- int FormatRegister(Instruction* instr, const char* option);
- int FormatOption(Instruction* instr, const char* option);
- void Format(Instruction* instr, const char* format);
- void Unknown(Instruction* instr);
-
- // Each of these functions decodes one particular instruction type, a 3-bit
- // field in the instruction encoding.
- // Types 0 and 1 are combined as they are largely the same except for the way
- // they interpret the shifter operand.
- void DecodeType01(Instruction* instr);
- void DecodeType2(Instruction* instr);
- void DecodeType3(Instruction* instr);
- void DecodeType4(Instruction* instr);
- void DecodeType5(Instruction* instr);
- void DecodeType6(Instruction* instr);
- // Type 7 includes special Debugger instructions.
- int DecodeType7(Instruction* instr);
- // For VFP support.
- void DecodeTypeVFP(Instruction* instr);
- void DecodeType6CoprocessorIns(Instruction* instr);
-
- void DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(Instruction* instr);
- void DecodeVCMP(Instruction* instr);
- void DecodeVCVTBetweenDoubleAndSingle(Instruction* instr);
- void DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr);
-
- const disasm::NameConverter& converter_;
- Vector<char> out_buffer_;
- int out_buffer_pos_;
-
- DISALLOW_COPY_AND_ASSIGN(Decoder);
-};
-
-
-// Support for assertions in the Decoder formatting functions.
-#define STRING_STARTS_WITH(string, compare_string) \
- (strncmp(string, compare_string, strlen(compare_string)) == 0)
-
-
-// Append the ch to the output buffer.
-void Decoder::PrintChar(const char ch) {
- out_buffer_[out_buffer_pos_++] = ch;
-}
-
-
-// Append the str to the output buffer.
-void Decoder::Print(const char* str) {
- char cur = *str++;
- while (cur != '\0' && (out_buffer_pos_ < (out_buffer_.length() - 1))) {
- PrintChar(cur);
- cur = *str++;
- }
- out_buffer_[out_buffer_pos_] = 0;
-}
-
-
-// These condition names are defined in a way to match the native disassembler
-// formatting. See for example the command "objdump -d <binary file>".
-static const char* cond_names[kNumberOfConditions] = {
- "eq", "ne", "cs" , "cc" , "mi" , "pl" , "vs" , "vc" ,
- "hi", "ls", "ge", "lt", "gt", "le", "", "invalid",
-};
-
-
-// Print the condition guarding the instruction.
-void Decoder::PrintCondition(Instruction* instr) {
- Print(cond_names[instr->ConditionValue()]);
-}
-
-
-// Print the register name according to the active name converter.
-void Decoder::PrintRegister(int reg) {
- Print(converter_.NameOfCPURegister(reg));
-}
-
-// Print the VFP S register name according to the active name converter.
-void Decoder::PrintSRegister(int reg) {
- Print(VFPRegisters::Name(reg, false));
-}
-
-// Print the VFP D register name according to the active name converter.
-void Decoder::PrintDRegister(int reg) {
- Print(VFPRegisters::Name(reg, true));
-}
-
-
-// These shift names are defined in a way to match the native disassembler
-// formatting. See for example the command "objdump -d <binary file>".
-static const char* shift_names[kNumberOfShifts] = {
- "lsl", "lsr", "asr", "ror"
-};
-
-
-// Print the register shift operands for the instruction. Generally used for
-// data processing instructions.
-void Decoder::PrintShiftRm(Instruction* instr) {
- ShiftOp shift = instr->ShiftField();
- int shift_index = instr->ShiftValue();
- int shift_amount = instr->ShiftAmountValue();
- int rm = instr->RmValue();
-
- PrintRegister(rm);
-
- if ((instr->RegShiftValue() == 0) && (shift == LSL) && (shift_amount == 0)) {
- // Special case for using rm only.
- return;
- }
- if (instr->RegShiftValue() == 0) {
- // by immediate
- if ((shift == ROR) && (shift_amount == 0)) {
- Print(", RRX");
- return;
- } else if (((shift == LSR) || (shift == ASR)) && (shift_amount == 0)) {
- shift_amount = 32;
- }
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- ", %s #%d",
- shift_names[shift_index],
- shift_amount);
- } else {
- // by register
- int rs = instr->RsValue();
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- ", %s ", shift_names[shift_index]);
- PrintRegister(rs);
- }
-}
-
-
-// Print the immediate operand for the instruction. Generally used for data
-// processing instructions.
-void Decoder::PrintShiftImm(Instruction* instr) {
- int rotate = instr->RotateValue() * 2;
- int immed8 = instr->Immed8Value();
- int imm = (immed8 >> rotate) | (immed8 << (32 - rotate));
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "#%d", imm);
-}
-
-
-// Print the optional shift and immediate used by saturating instructions.
-void Decoder::PrintShiftSat(Instruction* instr) {
- int shift = instr->Bits(11, 7);
- if (shift > 0) {
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- ", %s #%d",
- shift_names[instr->Bit(6) * 2],
- instr->Bits(11, 7));
- }
-}
-
-
-// Print PU formatting to reduce complexity of FormatOption.
-void Decoder::PrintPU(Instruction* instr) {
- switch (instr->PUField()) {
- case da_x: {
- Print("da");
- break;
- }
- case ia_x: {
- Print("ia");
- break;
- }
- case db_x: {
- Print("db");
- break;
- }
- case ib_x: {
- Print("ib");
- break;
- }
- default: {
- UNREACHABLE();
- break;
- }
- }
-}
-
-
-// Print SoftwareInterrupt codes. Factoring this out reduces the complexity of
-// the FormatOption method.
-void Decoder::PrintSoftwareInterrupt(SoftwareInterruptCodes svc) {
- switch (svc) {
- case kCallRtRedirected:
- Print("call rt redirected");
- return;
- case kBreakpoint:
- Print("breakpoint");
- return;
- default:
- if (svc >= kStopCode) {
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "%d - 0x%x",
- svc & kStopCodeMask,
- svc & kStopCodeMask);
- } else {
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "%d",
- svc);
- }
- return;
- }
-}
-
-
-// Handle all register based formatting in this function to reduce the
-// complexity of FormatOption.
-int Decoder::FormatRegister(Instruction* instr, const char* format) {
- ASSERT(format[0] == 'r');
- if (format[1] == 'n') { // 'rn: Rn register
- int reg = instr->RnValue();
- PrintRegister(reg);
- return 2;
- } else if (format[1] == 'd') { // 'rd: Rd register
- int reg = instr->RdValue();
- PrintRegister(reg);
- return 2;
- } else if (format[1] == 's') { // 'rs: Rs register
- int reg = instr->RsValue();
- PrintRegister(reg);
- return 2;
- } else if (format[1] == 'm') { // 'rm: Rm register
- int reg = instr->RmValue();
- PrintRegister(reg);
- return 2;
- } else if (format[1] == 't') { // 'rt: Rt register
- int reg = instr->RtValue();
- PrintRegister(reg);
- return 2;
- } else if (format[1] == 'l') {
- // 'rlist: register list for load and store multiple instructions
- ASSERT(STRING_STARTS_WITH(format, "rlist"));
- int rlist = instr->RlistValue();
- int reg = 0;
- Print("{");
- // Print register list in ascending order, by scanning the bit mask.
- while (rlist != 0) {
- if ((rlist & 1) != 0) {
- PrintRegister(reg);
- if ((rlist >> 1) != 0) {
- Print(", ");
- }
- }
- reg++;
- rlist >>= 1;
- }
- Print("}");
- return 5;
- }
- UNREACHABLE();
- return -1;
-}
-
-
-// Handle all VFP register based formatting in this function to reduce the
-// complexity of FormatOption.
-int Decoder::FormatVFPRegister(Instruction* instr, const char* format) {
- ASSERT((format[0] == 'S') || (format[0] == 'D'));
-
- if (format[1] == 'n') {
- int reg = instr->VnValue();
- if (format[0] == 'S') PrintSRegister(((reg << 1) | instr->NValue()));
- if (format[0] == 'D') PrintDRegister(reg);
- return 2;
- } else if (format[1] == 'm') {
- int reg = instr->VmValue();
- if (format[0] == 'S') PrintSRegister(((reg << 1) | instr->MValue()));
- if (format[0] == 'D') PrintDRegister(reg);
- return 2;
- } else if (format[1] == 'd') {
- int reg = instr->VdValue();
- if (format[0] == 'S') PrintSRegister(((reg << 1) | instr->DValue()));
- if (format[0] == 'D') PrintDRegister(reg);
- return 2;
- }
-
- UNREACHABLE();
- return -1;
-}
-
-
-int Decoder::FormatVFPinstruction(Instruction* instr, const char* format) {
- Print(format);
- return 0;
-}
-
-
-// Print the movw or movt instruction.
-void Decoder::PrintMovwMovt(Instruction* instr) {
- int imm = instr->ImmedMovwMovtValue();
- int rd = instr->RdValue();
- PrintRegister(rd);
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- ", #%d", imm);
-}
-
-
-// FormatOption takes a formatting string and interprets it based on
-// the current instructions. The format string points to the first
-// character of the option string (the option escape has already been
-// consumed by the caller.) FormatOption returns the number of
-// characters that were consumed from the formatting string.
-int Decoder::FormatOption(Instruction* instr, const char* format) {
- switch (format[0]) {
- case 'a': { // 'a: accumulate multiplies
- if (instr->Bit(21) == 0) {
- Print("ul");
- } else {
- Print("la");
- }
- return 1;
- }
- case 'b': { // 'b: byte loads or stores
- if (instr->HasB()) {
- Print("b");
- }
- return 1;
- }
- case 'c': { // 'cond: conditional execution
- ASSERT(STRING_STARTS_WITH(format, "cond"));
- PrintCondition(instr);
- return 4;
- }
- case 'd': { // 'd: vmov double immediate.
- double d = instr->DoubleImmedVmov();
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "#%g", d);
- return 1;
- }
- case 'f': { // 'f: bitfield instructions - v7 and above.
- uint32_t lsbit = instr->Bits(11, 7);
- uint32_t width = instr->Bits(20, 16) + 1;
- if (instr->Bit(21) == 0) {
- // BFC/BFI:
- // Bits 20-16 represent most-significant bit. Covert to width.
- width -= lsbit;
- ASSERT(width > 0);
- }
- ASSERT((width + lsbit) <= 32);
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "#%d, #%d", lsbit, width);
- return 1;
- }
- case 'h': { // 'h: halfword operation for extra loads and stores
- if (instr->HasH()) {
- Print("h");
- } else {
- Print("b");
- }
- return 1;
- }
- case 'i': { // 'i: immediate value from adjacent bits.
- // Expects tokens in the form imm%02d@%02d, ie. imm05@07, imm10@16
- int width = (format[3] - '0') * 10 + (format[4] - '0');
- int lsb = (format[6] - '0') * 10 + (format[7] - '0');
-
- ASSERT((width >= 1) && (width <= 32));
- ASSERT((lsb >= 0) && (lsb <= 31));
- ASSERT((width + lsb) <= 32);
-
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "%d",
- instr->Bits(width + lsb - 1, lsb));
- return 8;
- }
- case 'l': { // 'l: branch and link
- if (instr->HasLink()) {
- Print("l");
- }
- return 1;
- }
- case 'm': {
- if (format[1] == 'w') {
- // 'mw: movt/movw instructions.
- PrintMovwMovt(instr);
- return 2;
- }
- if (format[1] == 'e') { // 'memop: load/store instructions.
- ASSERT(STRING_STARTS_WITH(format, "memop"));
- if (instr->HasL()) {
- Print("ldr");
- } else if ((instr->Bits(27, 25) == 0) && (instr->Bit(20) == 0)) {
- if (instr->Bits(7, 4) == 0xf) {
- Print("strd");
- } else {
- Print("ldrd");
- }
- } else {
- Print("str");
- }
- return 5;
- }
- // 'msg: for simulator break instructions
- ASSERT(STRING_STARTS_WITH(format, "msg"));
- byte* str =
- reinterpret_cast<byte*>(instr->InstructionBits() & 0x0fffffff);
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "%s", converter_.NameInCode(str));
- return 3;
- }
- case 'o': {
- if ((format[3] == '1') && (format[4] == '2')) {
- // 'off12: 12-bit offset for load and store instructions
- ASSERT(STRING_STARTS_WITH(format, "off12"));
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "%d", instr->Offset12Value());
- return 5;
- } else if (format[3] == '0') {
- // 'off0to3and8to19 16-bit immediate encoded in bits 19-8 and 3-0.
- ASSERT(STRING_STARTS_WITH(format, "off0to3and8to19"));
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "%d",
- (instr->Bits(19, 8) << 4) +
- instr->Bits(3, 0));
- return 15;
- }
- // 'off8: 8-bit offset for extra load and store instructions
- ASSERT(STRING_STARTS_WITH(format, "off8"));
- int offs8 = (instr->ImmedHValue() << 4) | instr->ImmedLValue();
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "%d", offs8);
- return 4;
- }
- case 'p': { // 'pu: P and U bits for load and store instructions
- ASSERT(STRING_STARTS_WITH(format, "pu"));
- PrintPU(instr);
- return 2;
- }
- case 'r': {
- return FormatRegister(instr, format);
- }
- case 's': {
- if (format[1] == 'h') { // 'shift_op or 'shift_rm or 'shift_sat.
- if (format[6] == 'o') { // 'shift_op
- ASSERT(STRING_STARTS_WITH(format, "shift_op"));
- if (instr->TypeValue() == 0) {
- PrintShiftRm(instr);
- } else {
- ASSERT(instr->TypeValue() == 1);
- PrintShiftImm(instr);
- }
- return 8;
- } else if (format[6] == 's') { // 'shift_sat.
- ASSERT(STRING_STARTS_WITH(format, "shift_sat"));
- PrintShiftSat(instr);
- return 9;
- } else { // 'shift_rm
- ASSERT(STRING_STARTS_WITH(format, "shift_rm"));
- PrintShiftRm(instr);
- return 8;
- }
- } else if (format[1] == 'v') { // 'svc
- ASSERT(STRING_STARTS_WITH(format, "svc"));
- PrintSoftwareInterrupt(instr->SvcValue());
- return 3;
- } else if (format[1] == 'i') { // 'sign: signed extra loads and stores
- ASSERT(STRING_STARTS_WITH(format, "sign"));
- if (instr->HasSign()) {
- Print("s");
- }
- return 4;
- }
- // 's: S field of data processing instructions
- if (instr->HasS()) {
- Print("s");
- }
- return 1;
- }
- case 't': { // 'target: target of branch instructions
- ASSERT(STRING_STARTS_WITH(format, "target"));
- int off = (instr->SImmed24Value() << 2) + 8;
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "%+d -> %s",
- off,
- converter_.NameOfAddress(
- reinterpret_cast<byte*>(instr) + off));
- return 6;
- }
- case 'u': { // 'u: signed or unsigned multiplies
- // The manual gets the meaning of bit 22 backwards in the multiply
- // instruction overview on page A3.16.2. The instructions that
- // exist in u and s variants are the following:
- // smull A4.1.87
- // umull A4.1.129
- // umlal A4.1.128
- // smlal A4.1.76
- // For these 0 means u and 1 means s. As can be seen on their individual
- // pages. The other 18 mul instructions have the bit set or unset in
- // arbitrary ways that are unrelated to the signedness of the instruction.
- // None of these 18 instructions exist in both a 'u' and an 's' variant.
-
- if (instr->Bit(22) == 0) {
- Print("u");
- } else {
- Print("s");
- }
- return 1;
- }
- case 'v': {
- return FormatVFPinstruction(instr, format);
- }
- case 'S':
- case 'D': {
- return FormatVFPRegister(instr, format);
- }
- case 'w': { // 'w: W field of load and store instructions
- if (instr->HasW()) {
- Print("!");
- }
- return 1;
- }
- default: {
- UNREACHABLE();
- break;
- }
- }
- UNREACHABLE();
- return -1;
-}
-
-
-// Format takes a formatting string for a whole instruction and prints it into
-// the output buffer. All escaped options are handed to FormatOption to be
-// parsed further.
-void Decoder::Format(Instruction* instr, const char* format) {
- char cur = *format++;
- while ((cur != 0) && (out_buffer_pos_ < (out_buffer_.length() - 1))) {
- if (cur == '\'') { // Single quote is used as the formatting escape.
- format += FormatOption(instr, format);
- } else {
- out_buffer_[out_buffer_pos_++] = cur;
- }
- cur = *format++;
- }
- out_buffer_[out_buffer_pos_] = '\0';
-}
-
-
-// For currently unimplemented decodings the disassembler calls Unknown(instr)
-// which will just print "unknown" of the instruction bits.
-void Decoder::Unknown(Instruction* instr) {
- Format(instr, "unknown");
-}
-
-
-void Decoder::DecodeType01(Instruction* instr) {
- int type = instr->TypeValue();
- if ((type == 0) && instr->IsSpecialType0()) {
- // multiply instruction or extra loads and stores
- if (instr->Bits(7, 4) == 9) {
- if (instr->Bit(24) == 0) {
- // multiply instructions
- if (instr->Bit(23) == 0) {
- if (instr->Bit(21) == 0) {
- // The MUL instruction description (A 4.1.33) refers to Rd as being
- // the destination for the operation, but it confusingly uses the
- // Rn field to encode it.
- Format(instr, "mul'cond's 'rn, 'rm, 'rs");
- } else {
- // The MLA instruction description (A 4.1.28) refers to the order
- // of registers as "Rd, Rm, Rs, Rn". But confusingly it uses the
- // Rn field to encode the Rd register and the Rd field to encode
- // the Rn register.
- Format(instr, "mla'cond's 'rn, 'rm, 'rs, 'rd");
- }
- } else {
- // The signed/long multiply instructions use the terms RdHi and RdLo
- // when referring to the target registers. They are mapped to the Rn
- // and Rd fields as follows:
- // RdLo == Rd field
- // RdHi == Rn field
- // The order of registers is: <RdLo>, <RdHi>, <Rm>, <Rs>
- Format(instr, "'um'al'cond's 'rd, 'rn, 'rm, 'rs");
- }
- } else {
- Unknown(instr); // not used by V8
- }
- } else if ((instr->Bit(20) == 0) && ((instr->Bits(7, 4) & 0xd) == 0xd)) {
- // ldrd, strd
- switch (instr->PUField()) {
- case da_x: {
- if (instr->Bit(22) == 0) {
- Format(instr, "'memop'cond's 'rd, ['rn], -'rm");
- } else {
- Format(instr, "'memop'cond's 'rd, ['rn], #-'off8");
- }
- break;
- }
- case ia_x: {
- if (instr->Bit(22) == 0) {
- Format(instr, "'memop'cond's 'rd, ['rn], +'rm");
- } else {
- Format(instr, "'memop'cond's 'rd, ['rn], #+'off8");
- }
- break;
- }
- case db_x: {
- if (instr->Bit(22) == 0) {
- Format(instr, "'memop'cond's 'rd, ['rn, -'rm]'w");
- } else {
- Format(instr, "'memop'cond's 'rd, ['rn, #-'off8]'w");
- }
- break;
- }
- case ib_x: {
- if (instr->Bit(22) == 0) {
- Format(instr, "'memop'cond's 'rd, ['rn, +'rm]'w");
- } else {
- Format(instr, "'memop'cond's 'rd, ['rn, #+'off8]'w");
- }
- break;
- }
- default: {
- // The PU field is a 2-bit field.
- UNREACHABLE();
- break;
- }
- }
- } else {
- // extra load/store instructions
- switch (instr->PUField()) {
- case da_x: {
- if (instr->Bit(22) == 0) {
- Format(instr, "'memop'cond'sign'h 'rd, ['rn], -'rm");
- } else {
- Format(instr, "'memop'cond'sign'h 'rd, ['rn], #-'off8");
- }
- break;
- }
- case ia_x: {
- if (instr->Bit(22) == 0) {
- Format(instr, "'memop'cond'sign'h 'rd, ['rn], +'rm");
- } else {
- Format(instr, "'memop'cond'sign'h 'rd, ['rn], #+'off8");
- }
- break;
- }
- case db_x: {
- if (instr->Bit(22) == 0) {
- Format(instr, "'memop'cond'sign'h 'rd, ['rn, -'rm]'w");
- } else {
- Format(instr, "'memop'cond'sign'h 'rd, ['rn, #-'off8]'w");
- }
- break;
- }
- case ib_x: {
- if (instr->Bit(22) == 0) {
- Format(instr, "'memop'cond'sign'h 'rd, ['rn, +'rm]'w");
- } else {
- Format(instr, "'memop'cond'sign'h 'rd, ['rn, #+'off8]'w");
- }
- break;
- }
- default: {
- // The PU field is a 2-bit field.
- UNREACHABLE();
- break;
- }
- }
- return;
- }
- } else if ((type == 0) && instr->IsMiscType0()) {
- if (instr->Bits(22, 21) == 1) {
- switch (instr->BitField(7, 4)) {
- case BX:
- Format(instr, "bx'cond 'rm");
- break;
- case BLX:
- Format(instr, "blx'cond 'rm");
- break;
- case BKPT:
- Format(instr, "bkpt 'off0to3and8to19");
- break;
- default:
- Unknown(instr); // not used by V8
- break;
- }
- } else if (instr->Bits(22, 21) == 3) {
- switch (instr->BitField(7, 4)) {
- case CLZ:
- Format(instr, "clz'cond 'rd, 'rm");
- break;
- default:
- Unknown(instr); // not used by V8
- break;
- }
- } else {
- Unknown(instr); // not used by V8
- }
- } else {
- switch (instr->OpcodeField()) {
- case AND: {
- Format(instr, "and'cond's 'rd, 'rn, 'shift_op");
- break;
- }
- case EOR: {
- Format(instr, "eor'cond's 'rd, 'rn, 'shift_op");
- break;
- }
- case SUB: {
- Format(instr, "sub'cond's 'rd, 'rn, 'shift_op");
- break;
- }
- case RSB: {
- Format(instr, "rsb'cond's 'rd, 'rn, 'shift_op");
- break;
- }
- case ADD: {
- Format(instr, "add'cond's 'rd, 'rn, 'shift_op");
- break;
- }
- case ADC: {
- Format(instr, "adc'cond's 'rd, 'rn, 'shift_op");
- break;
- }
- case SBC: {
- Format(instr, "sbc'cond's 'rd, 'rn, 'shift_op");
- break;
- }
- case RSC: {
- Format(instr, "rsc'cond's 'rd, 'rn, 'shift_op");
- break;
- }
- case TST: {
- if (instr->HasS()) {
- Format(instr, "tst'cond 'rn, 'shift_op");
- } else {
- Format(instr, "movw'cond 'mw");
- }
- break;
- }
- case TEQ: {
- if (instr->HasS()) {
- Format(instr, "teq'cond 'rn, 'shift_op");
- } else {
- // Other instructions matching this pattern are handled in the
- // miscellaneous instructions part above.
- UNREACHABLE();
- }
- break;
- }
- case CMP: {
- if (instr->HasS()) {
- Format(instr, "cmp'cond 'rn, 'shift_op");
- } else {
- Format(instr, "movt'cond 'mw");
- }
- break;
- }
- case CMN: {
- if (instr->HasS()) {
- Format(instr, "cmn'cond 'rn, 'shift_op");
- } else {
- // Other instructions matching this pattern are handled in the
- // miscellaneous instructions part above.
- UNREACHABLE();
- }
- break;
- }
- case ORR: {
- Format(instr, "orr'cond's 'rd, 'rn, 'shift_op");
- break;
- }
- case MOV: {
- Format(instr, "mov'cond's 'rd, 'shift_op");
- break;
- }
- case BIC: {
- Format(instr, "bic'cond's 'rd, 'rn, 'shift_op");
- break;
- }
- case MVN: {
- Format(instr, "mvn'cond's 'rd, 'shift_op");
- break;
- }
- default: {
- // The Opcode field is a 4-bit field.
- UNREACHABLE();
- break;
- }
- }
- }
-}
-
-
-void Decoder::DecodeType2(Instruction* instr) {
- switch (instr->PUField()) {
- case da_x: {
- if (instr->HasW()) {
- Unknown(instr); // not used in V8
- return;
- }
- Format(instr, "'memop'cond'b 'rd, ['rn], #-'off12");
- break;
- }
- case ia_x: {
- if (instr->HasW()) {
- Unknown(instr); // not used in V8
- return;
- }
- Format(instr, "'memop'cond'b 'rd, ['rn], #+'off12");
- break;
- }
- case db_x: {
- Format(instr, "'memop'cond'b 'rd, ['rn, #-'off12]'w");
- break;
- }
- case ib_x: {
- Format(instr, "'memop'cond'b 'rd, ['rn, #+'off12]'w");
- break;
- }
- default: {
- // The PU field is a 2-bit field.
- UNREACHABLE();
- break;
- }
- }
-}
-
-
-void Decoder::DecodeType3(Instruction* instr) {
- switch (instr->PUField()) {
- case da_x: {
- ASSERT(!instr->HasW());
- Format(instr, "'memop'cond'b 'rd, ['rn], -'shift_rm");
- break;
- }
- case ia_x: {
- if (instr->HasW()) {
- ASSERT(instr->Bits(5, 4) == 0x1);
- if (instr->Bit(22) == 0x1) {
- Format(instr, "usat 'rd, #'imm05@16, 'rm'shift_sat");
- } else {
- UNREACHABLE(); // SSAT.
- }
- } else {
- Format(instr, "'memop'cond'b 'rd, ['rn], +'shift_rm");
- }
- break;
- }
- case db_x: {
- Format(instr, "'memop'cond'b 'rd, ['rn, -'shift_rm]'w");
- break;
- }
- case ib_x: {
- if (instr->HasW() && (instr->Bits(6, 4) == 0x5)) {
- uint32_t widthminus1 = static_cast<uint32_t>(instr->Bits(20, 16));
- uint32_t lsbit = static_cast<uint32_t>(instr->Bits(11, 7));
- uint32_t msbit = widthminus1 + lsbit;
- if (msbit <= 31) {
- if (instr->Bit(22)) {
- Format(instr, "ubfx'cond 'rd, 'rm, 'f");
- } else {
- Format(instr, "sbfx'cond 'rd, 'rm, 'f");
- }
- } else {
- UNREACHABLE();
- }
- } else if (!instr->HasW() && (instr->Bits(6, 4) == 0x1)) {
- uint32_t lsbit = static_cast<uint32_t>(instr->Bits(11, 7));
- uint32_t msbit = static_cast<uint32_t>(instr->Bits(20, 16));
- if (msbit >= lsbit) {
- if (instr->RmValue() == 15) {
- Format(instr, "bfc'cond 'rd, 'f");
- } else {
- Format(instr, "bfi'cond 'rd, 'rm, 'f");
- }
- } else {
- UNREACHABLE();
- }
- } else {
- Format(instr, "'memop'cond'b 'rd, ['rn, +'shift_rm]'w");
- }
- break;
- }
- default: {
- // The PU field is a 2-bit field.
- UNREACHABLE();
- break;
- }
- }
-}
-
-
-void Decoder::DecodeType4(Instruction* instr) {
- if (instr->Bit(22) != 0) {
- // Privileged mode currently not supported.
- Unknown(instr);
- } else {
- if (instr->HasL()) {
- Format(instr, "ldm'cond'pu 'rn'w, 'rlist");
- } else {
- Format(instr, "stm'cond'pu 'rn'w, 'rlist");
- }
- }
-}
-
-
-void Decoder::DecodeType5(Instruction* instr) {
- Format(instr, "b'l'cond 'target");
-}
-
-
-void Decoder::DecodeType6(Instruction* instr) {
- DecodeType6CoprocessorIns(instr);
-}
-
-
-int Decoder::DecodeType7(Instruction* instr) {
- if (instr->Bit(24) == 1) {
- if (instr->SvcValue() >= kStopCode) {
- Format(instr, "stop'cond 'svc");
- // Also print the stop message. Its address is encoded
- // in the following 4 bytes.
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "\n %p %08x stop message: %s",
- reinterpret_cast<int32_t*>(instr
- + Instruction::kInstrSize),
- *reinterpret_cast<char**>(instr
- + Instruction::kInstrSize),
- *reinterpret_cast<char**>(instr
- + Instruction::kInstrSize));
- // We have decoded 2 * Instruction::kInstrSize bytes.
- return 2 * Instruction::kInstrSize;
- } else {
- Format(instr, "svc'cond 'svc");
- }
- } else {
- DecodeTypeVFP(instr);
- }
- return Instruction::kInstrSize;
-}
-
-
-// void Decoder::DecodeTypeVFP(Instruction* instr)
-// vmov: Sn = Rt
-// vmov: Rt = Sn
-// vcvt: Dd = Sm
-// vcvt: Sd = Dm
-// Dd = vabs(Dm)
-// Dd = vneg(Dm)
-// Dd = vadd(Dn, Dm)
-// Dd = vsub(Dn, Dm)
-// Dd = vmul(Dn, Dm)
-// Dd = vdiv(Dn, Dm)
-// vcmp(Dd, Dm)
-// vmrs
-// vmsr
-// Dd = vsqrt(Dm)
-void Decoder::DecodeTypeVFP(Instruction* instr) {
- ASSERT((instr->TypeValue() == 7) && (instr->Bit(24) == 0x0) );
- ASSERT(instr->Bits(11, 9) == 0x5);
-
- if (instr->Bit(4) == 0) {
- if (instr->Opc1Value() == 0x7) {
- // Other data processing instructions
- if ((instr->Opc2Value() == 0x0) && (instr->Opc3Value() == 0x1)) {
- // vmov register to register.
- if (instr->SzValue() == 0x1) {
- Format(instr, "vmov.f64'cond 'Dd, 'Dm");
- } else {
- Format(instr, "vmov.f32'cond 'Sd, 'Sm");
- }
- } else if ((instr->Opc2Value() == 0x0) && (instr->Opc3Value() == 0x3)) {
- // vabs
- Format(instr, "vabs'cond 'Dd, 'Dm");
- } else if ((instr->Opc2Value() == 0x1) && (instr->Opc3Value() == 0x1)) {
- // vneg
- Format(instr, "vneg'cond 'Dd, 'Dm");
- } else if ((instr->Opc2Value() == 0x7) && (instr->Opc3Value() == 0x3)) {
- DecodeVCVTBetweenDoubleAndSingle(instr);
- } else if ((instr->Opc2Value() == 0x8) && (instr->Opc3Value() & 0x1)) {
- DecodeVCVTBetweenFloatingPointAndInteger(instr);
- } else if (((instr->Opc2Value() >> 1) == 0x6) &&
- (instr->Opc3Value() & 0x1)) {
- DecodeVCVTBetweenFloatingPointAndInteger(instr);
- } else if (((instr->Opc2Value() == 0x4) || (instr->Opc2Value() == 0x5)) &&
- (instr->Opc3Value() & 0x1)) {
- DecodeVCMP(instr);
- } else if (((instr->Opc2Value() == 0x1)) && (instr->Opc3Value() == 0x3)) {
- Format(instr, "vsqrt.f64'cond 'Dd, 'Dm");
- } else if (instr->Opc3Value() == 0x0) {
- if (instr->SzValue() == 0x1) {
- Format(instr, "vmov.f64'cond 'Dd, 'd");
- } else {
- Unknown(instr); // Not used by V8.
- }
- } else {
- Unknown(instr); // Not used by V8.
- }
- } else if (instr->Opc1Value() == 0x3) {
- if (instr->SzValue() == 0x1) {
- if (instr->Opc3Value() & 0x1) {
- Format(instr, "vsub.f64'cond 'Dd, 'Dn, 'Dm");
- } else {
- Format(instr, "vadd.f64'cond 'Dd, 'Dn, 'Dm");
- }
- } else {
- Unknown(instr); // Not used by V8.
- }
- } else if ((instr->Opc1Value() == 0x2) && !(instr->Opc3Value() & 0x1)) {
- if (instr->SzValue() == 0x1) {
- Format(instr, "vmul.f64'cond 'Dd, 'Dn, 'Dm");
- } else {
- Unknown(instr); // Not used by V8.
- }
- } else if ((instr->Opc1Value() == 0x4) && !(instr->Opc3Value() & 0x1)) {
- if (instr->SzValue() == 0x1) {
- Format(instr, "vdiv.f64'cond 'Dd, 'Dn, 'Dm");
- } else {
- Unknown(instr); // Not used by V8.
- }
- } else {
- Unknown(instr); // Not used by V8.
- }
- } else {
- if ((instr->VCValue() == 0x0) &&
- (instr->VAValue() == 0x0)) {
- DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(instr);
- } else if ((instr->VCValue() == 0x0) &&
- (instr->VAValue() == 0x7) &&
- (instr->Bits(19, 16) == 0x1)) {
- if (instr->VLValue() == 0) {
- if (instr->Bits(15, 12) == 0xF) {
- Format(instr, "vmsr'cond FPSCR, APSR");
- } else {
- Format(instr, "vmsr'cond FPSCR, 'rt");
- }
- } else {
- if (instr->Bits(15, 12) == 0xF) {
- Format(instr, "vmrs'cond APSR, FPSCR");
- } else {
- Format(instr, "vmrs'cond 'rt, FPSCR");
- }
- }
- }
- }
-}
-
-
-void Decoder::DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(
- Instruction* instr) {
- ASSERT((instr->Bit(4) == 1) && (instr->VCValue() == 0x0) &&
- (instr->VAValue() == 0x0));
-
- bool to_arm_register = (instr->VLValue() == 0x1);
-
- if (to_arm_register) {
- Format(instr, "vmov'cond 'rt, 'Sn");
- } else {
- Format(instr, "vmov'cond 'Sn, 'rt");
- }
-}
-
-
-void Decoder::DecodeVCMP(Instruction* instr) {
- ASSERT((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7));
- ASSERT(((instr->Opc2Value() == 0x4) || (instr->Opc2Value() == 0x5)) &&
- (instr->Opc3Value() & 0x1));
-
- // Comparison.
- bool dp_operation = (instr->SzValue() == 1);
- bool raise_exception_for_qnan = (instr->Bit(7) == 0x1);
-
- if (dp_operation && !raise_exception_for_qnan) {
- if (instr->Opc2Value() == 0x4) {
- Format(instr, "vcmp.f64'cond 'Dd, 'Dm");
- } else if (instr->Opc2Value() == 0x5) {
- Format(instr, "vcmp.f64'cond 'Dd, #0.0");
- } else {
- Unknown(instr); // invalid
- }
- } else {
- Unknown(instr); // Not used by V8.
- }
-}
-
-
-void Decoder::DecodeVCVTBetweenDoubleAndSingle(Instruction* instr) {
- ASSERT((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7));
- ASSERT((instr->Opc2Value() == 0x7) && (instr->Opc3Value() == 0x3));
-
- bool double_to_single = (instr->SzValue() == 1);
-
- if (double_to_single) {
- Format(instr, "vcvt.f32.f64'cond 'Sd, 'Dm");
- } else {
- Format(instr, "vcvt.f64.f32'cond 'Dd, 'Sm");
- }
-}
-
-
-void Decoder::DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr) {
- ASSERT((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7));
- ASSERT(((instr->Opc2Value() == 0x8) && (instr->Opc3Value() & 0x1)) ||
- (((instr->Opc2Value() >> 1) == 0x6) && (instr->Opc3Value() & 0x1)));
-
- bool to_integer = (instr->Bit(18) == 1);
- bool dp_operation = (instr->SzValue() == 1);
- if (to_integer) {
- bool unsigned_integer = (instr->Bit(16) == 0);
-
- if (dp_operation) {
- if (unsigned_integer) {
- Format(instr, "vcvt.u32.f64'cond 'Sd, 'Dm");
- } else {
- Format(instr, "vcvt.s32.f64'cond 'Sd, 'Dm");
- }
- } else {
- if (unsigned_integer) {
- Format(instr, "vcvt.u32.f32'cond 'Sd, 'Sm");
- } else {
- Format(instr, "vcvt.s32.f32'cond 'Sd, 'Sm");
- }
- }
- } else {
- bool unsigned_integer = (instr->Bit(7) == 0);
-
- if (dp_operation) {
- if (unsigned_integer) {
- Format(instr, "vcvt.f64.u32'cond 'Dd, 'Sm");
- } else {
- Format(instr, "vcvt.f64.s32'cond 'Dd, 'Sm");
- }
- } else {
- if (unsigned_integer) {
- Format(instr, "vcvt.f32.u32'cond 'Sd, 'Sm");
- } else {
- Format(instr, "vcvt.f32.s32'cond 'Sd, 'Sm");
- }
- }
- }
-}
-
-
-// Decode Type 6 coprocessor instructions.
-// Dm = vmov(Rt, Rt2)
-// <Rt, Rt2> = vmov(Dm)
-// Ddst = MEM(Rbase + 4*offset).
-// MEM(Rbase + 4*offset) = Dsrc.
-void Decoder::DecodeType6CoprocessorIns(Instruction* instr) {
- ASSERT(instr->TypeValue() == 6);
-
- if (instr->CoprocessorValue() == 0xA) {
- switch (instr->OpcodeValue()) {
- case 0x8:
- case 0xA:
- if (instr->HasL()) {
- Format(instr, "vldr'cond 'Sd, ['rn - 4*'imm08@00]");
- } else {
- Format(instr, "vstr'cond 'Sd, ['rn - 4*'imm08@00]");
- }
- break;
- case 0xC:
- case 0xE:
- if (instr->HasL()) {
- Format(instr, "vldr'cond 'Sd, ['rn + 4*'imm08@00]");
- } else {
- Format(instr, "vstr'cond 'Sd, ['rn + 4*'imm08@00]");
- }
- break;
- default:
- Unknown(instr); // Not used by V8.
- break;
- }
- } else if (instr->CoprocessorValue() == 0xB) {
- switch (instr->OpcodeValue()) {
- case 0x2:
- // Load and store double to two GP registers
- if (instr->Bits(7, 4) != 0x1) {
- Unknown(instr); // Not used by V8.
- } else if (instr->HasL()) {
- Format(instr, "vmov'cond 'rt, 'rn, 'Dm");
- } else {
- Format(instr, "vmov'cond 'Dm, 'rt, 'rn");
- }
- break;
- case 0x8:
- if (instr->HasL()) {
- Format(instr, "vldr'cond 'Dd, ['rn - 4*'imm08@00]");
- } else {
- Format(instr, "vstr'cond 'Dd, ['rn - 4*'imm08@00]");
- }
- break;
- case 0xC:
- if (instr->HasL()) {
- Format(instr, "vldr'cond 'Dd, ['rn + 4*'imm08@00]");
- } else {
- Format(instr, "vstr'cond 'Dd, ['rn + 4*'imm08@00]");
- }
- break;
- default:
- Unknown(instr); // Not used by V8.
- break;
- }
- } else {
- Unknown(instr); // Not used by V8.
- }
-}
-
-
-bool Decoder::IsConstantPoolAt(byte* instr_ptr) {
- int instruction_bits = *(reinterpret_cast<int*>(instr_ptr));
- return (instruction_bits & kConstantPoolMarkerMask) == kConstantPoolMarker;
-}
-
-
-int Decoder::ConstantPoolSizeAt(byte* instr_ptr) {
- if (IsConstantPoolAt(instr_ptr)) {
- int instruction_bits = *(reinterpret_cast<int*>(instr_ptr));
- return instruction_bits & kConstantPoolLengthMask;
- } else {
- return -1;
- }
-}
-
-
-// Disassemble the instruction at *instr_ptr into the output buffer.
-int Decoder::InstructionDecode(byte* instr_ptr) {
- Instruction* instr = Instruction::At(instr_ptr);
- // Print raw instruction bytes.
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "%08x ",
- instr->InstructionBits());
- if (instr->ConditionField() == kSpecialCondition) {
- Unknown(instr);
- return Instruction::kInstrSize;
- }
- int instruction_bits = *(reinterpret_cast<int*>(instr_ptr));
- if ((instruction_bits & kConstantPoolMarkerMask) == kConstantPoolMarker) {
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "constant pool begin (length %d)",
- instruction_bits &
- kConstantPoolLengthMask);
- return Instruction::kInstrSize;
- }
- switch (instr->TypeValue()) {
- case 0:
- case 1: {
- DecodeType01(instr);
- break;
- }
- case 2: {
- DecodeType2(instr);
- break;
- }
- case 3: {
- DecodeType3(instr);
- break;
- }
- case 4: {
- DecodeType4(instr);
- break;
- }
- case 5: {
- DecodeType5(instr);
- break;
- }
- case 6: {
- DecodeType6(instr);
- break;
- }
- case 7: {
- return DecodeType7(instr);
- }
- default: {
- // The type field is 3-bits in the ARM encoding.
- UNREACHABLE();
- break;
- }
- }
- return Instruction::kInstrSize;
-}
-
-
-} } // namespace v8::internal
-
-
-
-//------------------------------------------------------------------------------
-
-namespace disasm {
-
-
-const char* NameConverter::NameOfAddress(byte* addr) const {
- v8::internal::OS::SNPrintF(tmp_buffer_, "%p", addr);
- return tmp_buffer_.start();
-}
-
-
-const char* NameConverter::NameOfConstant(byte* addr) const {
- return NameOfAddress(addr);
-}
-
-
-const char* NameConverter::NameOfCPURegister(int reg) const {
- return v8::internal::Registers::Name(reg);
-}
-
-
-const char* NameConverter::NameOfByteCPURegister(int reg) const {
- UNREACHABLE(); // ARM does not have the concept of a byte register
- return "nobytereg";
-}
-
-
-const char* NameConverter::NameOfXMMRegister(int reg) const {
- UNREACHABLE(); // ARM does not have any XMM registers
- return "noxmmreg";
-}
-
-
-const char* NameConverter::NameInCode(byte* addr) const {
- // The default name converter is called for unknown code. So we will not try
- // to access any memory.
- return "";
-}
-
-
-//------------------------------------------------------------------------------
-
-Disassembler::Disassembler(const NameConverter& converter)
- : converter_(converter) {}
-
-
-Disassembler::~Disassembler() {}
-
-
-int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer,
- byte* instruction) {
- v8::internal::Decoder d(converter_, buffer);
- return d.InstructionDecode(instruction);
-}
-
-
-int Disassembler::ConstantPoolSizeAt(byte* instruction) {
- return v8::internal::Decoder::ConstantPoolSizeAt(instruction);
-}
-
-
-void Disassembler::Disassemble(FILE* f, byte* begin, byte* end) {
- NameConverter converter;
- Disassembler d(converter);
- for (byte* pc = begin; pc < end;) {
- v8::internal::EmbeddedVector<char, 128> buffer;
- buffer[0] = '\0';
- byte* prev_pc = pc;
- pc += d.InstructionDecode(buffer, pc);
- fprintf(f, "%p %08x %s\n",
- prev_pc, *reinterpret_cast<int32_t*>(prev_pc), buffer.start());
- }
-}
-
-
-} // namespace disasm
-
-#endif // V8_TARGET_ARCH_ARM
diff --git a/src/3rdparty/v8/src/arm/frames-arm.cc b/src/3rdparty/v8/src/arm/frames-arm.cc
deleted file mode 100644
index a805d28..0000000
--- a/src/3rdparty/v8/src/arm/frames-arm.cc
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_ARM)
-
-#include "frames-inl.h"
-
-namespace v8 {
-namespace internal {
-
-
-Address ExitFrame::ComputeStackPointer(Address fp) {
- return Memory::Address_at(fp + ExitFrameConstants::kSPOffset);
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_ARM
diff --git a/src/3rdparty/v8/src/arm/frames-arm.h b/src/3rdparty/v8/src/arm/frames-arm.h
deleted file mode 100644
index d6846c8..0000000
--- a/src/3rdparty/v8/src/arm/frames-arm.h
+++ /dev/null
@@ -1,168 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_ARM_FRAMES_ARM_H_
-#define V8_ARM_FRAMES_ARM_H_
-
-namespace v8 {
-namespace internal {
-
-
-// The ARM ABI does not specify the usage of register r9, which may be reserved
-// as the static base or thread register on some platforms, in which case we
-// leave it alone. Adjust the value of kR9Available accordingly:
-static const int kR9Available = 1; // 1 if available to us, 0 if reserved
-
-
-// Register list in load/store instructions
-// Note that the bit values must match those used in actual instruction encoding
-static const int kNumRegs = 16;
-
-
-// Caller-saved/arguments registers
-static const RegList kJSCallerSaved =
- 1 << 0 | // r0 a1
- 1 << 1 | // r1 a2
- 1 << 2 | // r2 a3
- 1 << 3; // r3 a4
-
-static const int kNumJSCallerSaved = 4;
-
-typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved];
-
-// Return the code of the n-th caller-saved register available to JavaScript
-// e.g. JSCallerSavedReg(0) returns r0.code() == 0
-int JSCallerSavedCode(int n);
-
-
-// Callee-saved registers preserved when switching from C to JavaScript
-static const RegList kCalleeSaved =
- 1 << 4 | // r4 v1
- 1 << 5 | // r5 v2
- 1 << 6 | // r6 v3
- 1 << 7 | // r7 v4
- 1 << 8 | // r8 v5 (cp in JavaScript code)
- kR9Available << 9 | // r9 v6
- 1 << 10 | // r10 v7
- 1 << 11; // r11 v8 (fp in JavaScript code)
-
-static const int kNumCalleeSaved = 7 + kR9Available;
-
-
-// Number of registers for which space is reserved in safepoints. Must be a
-// multiple of 8.
-// TODO(regis): Only 8 registers may actually be sufficient. Revisit.
-static const int kNumSafepointRegisters = 16;
-
-// Define the list of registers actually saved at safepoints.
-// Note that the number of saved registers may be smaller than the reserved
-// space, i.e. kNumSafepointSavedRegisters <= kNumSafepointRegisters.
-static const RegList kSafepointSavedRegisters = kJSCallerSaved | kCalleeSaved;
-static const int kNumSafepointSavedRegisters =
- kNumJSCallerSaved + kNumCalleeSaved;
-
-// ----------------------------------------------------
-
-
-class StackHandlerConstants : public AllStatic {
- public:
- static const int kNextOffset = 0 * kPointerSize;
- static const int kStateOffset = 1 * kPointerSize;
- static const int kFPOffset = 2 * kPointerSize;
- static const int kPCOffset = 3 * kPointerSize;
-
- static const int kSize = kPCOffset + kPointerSize;
-};
-
-
-class EntryFrameConstants : public AllStatic {
- public:
- static const int kCallerFPOffset = -3 * kPointerSize;
-};
-
-
-class ExitFrameConstants : public AllStatic {
- public:
- static const int kCodeOffset = -2 * kPointerSize;
- static const int kSPOffset = -1 * kPointerSize;
-
- // The caller fields are below the frame pointer on the stack.
- static const int kCallerFPOffset = 0 * kPointerSize;
- // The calling JS function is below FP.
- static const int kCallerPCOffset = 1 * kPointerSize;
-
- // FP-relative displacement of the caller's SP. It points just
- // below the saved PC.
- static const int kCallerSPDisplacement = 2 * kPointerSize;
-};
-
-
-class StandardFrameConstants : public AllStatic {
- public:
- static const int kExpressionsOffset = -3 * kPointerSize;
- static const int kMarkerOffset = -2 * kPointerSize;
- static const int kContextOffset = -1 * kPointerSize;
- static const int kCallerFPOffset = 0 * kPointerSize;
- static const int kCallerPCOffset = 1 * kPointerSize;
- static const int kCallerSPOffset = 2 * kPointerSize;
-};
-
-
-class JavaScriptFrameConstants : public AllStatic {
- public:
- // FP-relative.
- static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
- static const int kLastParameterOffset = +2 * kPointerSize;
- static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
-
- // Caller SP-relative.
- static const int kParam0Offset = -2 * kPointerSize;
- static const int kReceiverOffset = -1 * kPointerSize;
-};
-
-
-class ArgumentsAdaptorFrameConstants : public AllStatic {
- public:
- static const int kLengthOffset = StandardFrameConstants::kExpressionsOffset;
-};
-
-
-class InternalFrameConstants : public AllStatic {
- public:
- static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
-};
-
-
-inline Object* JavaScriptFrame::function_slot_object() const {
- const int offset = JavaScriptFrameConstants::kFunctionOffset;
- return Memory::Object_at(fp() + offset);
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_ARM_FRAMES_ARM_H_
diff --git a/src/3rdparty/v8/src/arm/full-codegen-arm.cc b/src/3rdparty/v8/src/arm/full-codegen-arm.cc
deleted file mode 100644
index 3267951..0000000
--- a/src/3rdparty/v8/src/arm/full-codegen-arm.cc
+++ /dev/null
@@ -1,4374 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_ARM)
-
-#include "code-stubs.h"
-#include "codegen-inl.h"
-#include "compiler.h"
-#include "debug.h"
-#include "full-codegen.h"
-#include "parser.h"
-#include "scopes.h"
-#include "stub-cache.h"
-
-#include "arm/code-stubs-arm.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm_)
-
-
-// A patch site is a location in the code which it is possible to patch. This
-// class has a number of methods to emit the code which is patchable and the
-// method EmitPatchInfo to record a marker back to the patchable code. This
-// marker is a cmp rx, #yyy instruction, and x * 0x00000fff + yyy (raw 12 bit
-// immediate value is used) is the delta from the pc to the first instruction of
-// the patchable code.
-class JumpPatchSite BASE_EMBEDDED {
- public:
- explicit JumpPatchSite(MacroAssembler* masm) : masm_(masm) {
-#ifdef DEBUG
- info_emitted_ = false;
-#endif
- }
-
- ~JumpPatchSite() {
- ASSERT(patch_site_.is_bound() == info_emitted_);
- }
-
- // When initially emitting this ensure that a jump is always generated to skip
- // the inlined smi code.
- void EmitJumpIfNotSmi(Register reg, Label* target) {
- ASSERT(!patch_site_.is_bound() && !info_emitted_);
- __ bind(&patch_site_);
- __ cmp(reg, Operand(reg));
- // Don't use b(al, ...) as that might emit the constant pool right after the
- // branch. After patching when the branch is no longer unconditional
- // execution can continue into the constant pool.
- __ b(eq, target); // Always taken before patched.
- }
-
- // When initially emitting this ensure that a jump is never generated to skip
- // the inlined smi code.
- void EmitJumpIfSmi(Register reg, Label* target) {
- ASSERT(!patch_site_.is_bound() && !info_emitted_);
- __ bind(&patch_site_);
- __ cmp(reg, Operand(reg));
- __ b(ne, target); // Never taken before patched.
- }
-
- void EmitPatchInfo() {
- int delta_to_patch_site = masm_->InstructionsGeneratedSince(&patch_site_);
- Register reg;
- reg.set_code(delta_to_patch_site / kOff12Mask);
- __ cmp_raw_immediate(reg, delta_to_patch_site % kOff12Mask);
-#ifdef DEBUG
- info_emitted_ = true;
-#endif
- }
-
- bool is_bound() const { return patch_site_.is_bound(); }
-
- private:
- MacroAssembler* masm_;
- Label patch_site_;
-#ifdef DEBUG
- bool info_emitted_;
-#endif
-};
-
-
-// Generate code for a JS function. On entry to the function the receiver
-// and arguments have been pushed on the stack left to right. The actual
-// argument count matches the formal parameter count expected by the
-// function.
-//
-// The live registers are:
-// o r1: the JS function object being called (ie, ourselves)
-// o cp: our context
-// o fp: our caller's frame pointer
-// o sp: stack pointer
-// o lr: return address
-//
-// The function builds a JS frame. Please see JavaScriptFrameConstants in
-// frames-arm.h for its layout.
-void FullCodeGenerator::Generate(CompilationInfo* info) {
- ASSERT(info_ == NULL);
- info_ = info;
- SetFunctionPosition(function());
- Comment cmnt(masm_, "[ function compiled by full code generator");
-
-#ifdef DEBUG
- if (strlen(FLAG_stop_at) > 0 &&
- info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
- __ stop("stop-at");
- }
-#endif
-
- int locals_count = scope()->num_stack_slots();
-
- __ Push(lr, fp, cp, r1);
- if (locals_count > 0) {
- // Load undefined value here, so the value is ready for the loop
- // below.
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- }
- // Adjust fp to point to caller's fp.
- __ add(fp, sp, Operand(2 * kPointerSize));
-
- { Comment cmnt(masm_, "[ Allocate locals");
- for (int i = 0; i < locals_count; i++) {
- __ push(ip);
- }
- }
-
- bool function_in_register = true;
-
- // Possibly allocate a local context.
- int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
- if (heap_slots > 0) {
- Comment cmnt(masm_, "[ Allocate local context");
- // Argument to NewContext is the function, which is in r1.
- __ push(r1);
- if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(heap_slots);
- __ CallStub(&stub);
- } else {
- __ CallRuntime(Runtime::kNewContext, 1);
- }
- function_in_register = false;
- // Context is returned in both r0 and cp. It replaces the context
- // passed to us. It's saved in the stack and kept live in cp.
- __ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- // Copy any necessary parameters into the context.
- int num_parameters = scope()->num_parameters();
- for (int i = 0; i < num_parameters; i++) {
- Slot* slot = scope()->parameter(i)->AsSlot();
- if (slot != NULL && slot->type() == Slot::CONTEXT) {
- int parameter_offset = StandardFrameConstants::kCallerSPOffset +
- (num_parameters - 1 - i) * kPointerSize;
- // Load parameter from stack.
- __ ldr(r0, MemOperand(fp, parameter_offset));
- // Store it in the context.
- __ mov(r1, Operand(Context::SlotOffset(slot->index())));
- __ str(r0, MemOperand(cp, r1));
- // Update the write barrier. This clobbers all involved
- // registers, so we have to use two more registers to avoid
- // clobbering cp.
- __ mov(r2, Operand(cp));
- __ RecordWrite(r2, Operand(r1), r3, r0);
- }
- }
- }
-
- Variable* arguments = scope()->arguments();
- if (arguments != NULL) {
- // Function uses arguments object.
- Comment cmnt(masm_, "[ Allocate arguments object");
- if (!function_in_register) {
- // Load this again, if it's used by the local context below.
- __ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- } else {
- __ mov(r3, r1);
- }
- // Receiver is just before the parameters on the caller's stack.
- int offset = scope()->num_parameters() * kPointerSize;
- __ add(r2, fp,
- Operand(StandardFrameConstants::kCallerSPOffset + offset));
- __ mov(r1, Operand(Smi::FromInt(scope()->num_parameters())));
- __ Push(r3, r2, r1);
-
- // Arguments to ArgumentsAccessStub:
- // function, receiver address, parameter count.
- // The stub will rewrite receiever and parameter count if the previous
- // stack frame was an arguments adapter frame.
- ArgumentsAccessStub stub(
- is_strict_mode() ? ArgumentsAccessStub::NEW_STRICT
- : ArgumentsAccessStub::NEW_NON_STRICT);
- __ CallStub(&stub);
-
- Variable* arguments_shadow = scope()->arguments_shadow();
- if (arguments_shadow != NULL) {
- // Duplicate the value; move-to-slot operation might clobber registers.
- __ mov(r3, r0);
- Move(arguments_shadow->AsSlot(), r3, r1, r2);
- }
- Move(arguments->AsSlot(), r0, r1, r2);
- }
-
- if (FLAG_trace) {
- __ CallRuntime(Runtime::kTraceEnter, 0);
- }
-
- // Visit the declarations and body unless there is an illegal
- // redeclaration.
- if (scope()->HasIllegalRedeclaration()) {
- Comment cmnt(masm_, "[ Declarations");
- scope()->VisitIllegalRedeclaration(this);
-
- } else {
- { Comment cmnt(masm_, "[ Declarations");
- // For named function expressions, declare the function name as a
- // constant.
- if (scope()->is_function_scope() && scope()->function() != NULL) {
- EmitDeclaration(scope()->function(), Variable::CONST, NULL);
- }
- VisitDeclarations(scope()->declarations());
- }
-
- { Comment cmnt(masm_, "[ Stack check");
- PrepareForBailout(info->function(), NO_REGISTERS);
- Label ok;
- __ LoadRoot(ip, Heap::kStackLimitRootIndex);
- __ cmp(sp, Operand(ip));
- __ b(hs, &ok);
- StackCheckStub stub;
- __ CallStub(&stub);
- __ bind(&ok);
- }
-
- { Comment cmnt(masm_, "[ Body");
- ASSERT(loop_depth() == 0);
- VisitStatements(function()->body());
- ASSERT(loop_depth() == 0);
- }
- }
-
- // Always emit a 'return undefined' in case control fell off the end of
- // the body.
- { Comment cmnt(masm_, "[ return <undefined>;");
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
- }
- EmitReturnSequence();
-
- // Force emit the constant pool, so it doesn't get emitted in the middle
- // of the stack check table.
- masm()->CheckConstPool(true, false);
-}
-
-
-void FullCodeGenerator::ClearAccumulator() {
- __ mov(r0, Operand(Smi::FromInt(0)));
-}
-
-
-void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt) {
- Comment cmnt(masm_, "[ Stack check");
- Label ok;
- __ LoadRoot(ip, Heap::kStackLimitRootIndex);
- __ cmp(sp, Operand(ip));
- __ b(hs, &ok);
- StackCheckStub stub;
- __ CallStub(&stub);
- // Record a mapping of this PC offset to the OSR id. This is used to find
- // the AST id from the unoptimized code in order to use it as a key into
- // the deoptimization input data found in the optimized code.
- RecordStackCheck(stmt->OsrEntryId());
-
- __ bind(&ok);
- PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
- // Record a mapping of the OSR id to this PC. This is used if the OSR
- // entry becomes the target of a bailout. We don't expect it to be, but
- // we want it to work if it is.
- PrepareForBailoutForId(stmt->OsrEntryId(), NO_REGISTERS);
-}
-
-
-void FullCodeGenerator::EmitReturnSequence() {
- Comment cmnt(masm_, "[ Return sequence");
- if (return_label_.is_bound()) {
- __ b(&return_label_);
- } else {
- __ bind(&return_label_);
- if (FLAG_trace) {
- // Push the return value on the stack as the parameter.
- // Runtime::TraceExit returns its parameter in r0.
- __ push(r0);
- __ CallRuntime(Runtime::kTraceExit, 1);
- }
-
-#ifdef DEBUG
- // Add a label for checking the size of the code used for returning.
- Label check_exit_codesize;
- masm_->bind(&check_exit_codesize);
-#endif
- // Make sure that the constant pool is not emitted inside of the return
- // sequence.
- { Assembler::BlockConstPoolScope block_const_pool(masm_);
- // Here we use masm_-> instead of the __ macro to avoid the code coverage
- // tool from instrumenting as we rely on the code size here.
- int32_t sp_delta = (scope()->num_parameters() + 1) * kPointerSize;
- CodeGenerator::RecordPositions(masm_, function()->end_position() - 1);
- __ RecordJSReturn();
- masm_->mov(sp, fp);
- masm_->ldm(ia_w, sp, fp.bit() | lr.bit());
- masm_->add(sp, sp, Operand(sp_delta));
- masm_->Jump(lr);
- }
-
-#ifdef DEBUG
- // Check that the size of the code used for returning is large enough
- // for the debugger's requirements.
- ASSERT(Assembler::kJSReturnSequenceInstructions <=
- masm_->InstructionsGeneratedSince(&check_exit_codesize));
-#endif
- }
-}
-
-
-void FullCodeGenerator::EffectContext::Plug(Slot* slot) const {
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::Plug(Slot* slot) const {
- codegen()->Move(result_register(), slot);
-}
-
-
-void FullCodeGenerator::StackValueContext::Plug(Slot* slot) const {
- codegen()->Move(result_register(), slot);
- __ push(result_register());
-}
-
-
-void FullCodeGenerator::TestContext::Plug(Slot* slot) const {
- // For simplicity we always test the accumulator register.
- codegen()->Move(result_register(), slot);
- codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
- codegen()->DoTest(true_label_, false_label_, fall_through_);
-}
-
-
-void FullCodeGenerator::EffectContext::Plug(Heap::RootListIndex index) const {
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::Plug(
- Heap::RootListIndex index) const {
- __ LoadRoot(result_register(), index);
-}
-
-
-void FullCodeGenerator::StackValueContext::Plug(
- Heap::RootListIndex index) const {
- __ LoadRoot(result_register(), index);
- __ push(result_register());
-}
-
-
-void FullCodeGenerator::TestContext::Plug(Heap::RootListIndex index) const {
- codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
- true,
- true_label_,
- false_label_);
- if (index == Heap::kUndefinedValueRootIndex ||
- index == Heap::kNullValueRootIndex ||
- index == Heap::kFalseValueRootIndex) {
- if (false_label_ != fall_through_) __ b(false_label_);
- } else if (index == Heap::kTrueValueRootIndex) {
- if (true_label_ != fall_through_) __ b(true_label_);
- } else {
- __ LoadRoot(result_register(), index);
- codegen()->DoTest(true_label_, false_label_, fall_through_);
- }
-}
-
-
-void FullCodeGenerator::EffectContext::Plug(Handle<Object> lit) const {
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::Plug(
- Handle<Object> lit) const {
- __ mov(result_register(), Operand(lit));
-}
-
-
-void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
- // Immediates cannot be pushed directly.
- __ mov(result_register(), Operand(lit));
- __ push(result_register());
-}
-
-
-void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
- codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
- true,
- true_label_,
- false_label_);
- ASSERT(!lit->IsUndetectableObject()); // There are no undetectable literals.
- if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
- if (false_label_ != fall_through_) __ b(false_label_);
- } else if (lit->IsTrue() || lit->IsJSObject()) {
- if (true_label_ != fall_through_) __ b(true_label_);
- } else if (lit->IsString()) {
- if (String::cast(*lit)->length() == 0) {
- if (false_label_ != fall_through_) __ b(false_label_);
- __ b(false_label_);
- } else {
- if (true_label_ != fall_through_) __ b(true_label_);
- }
- } else if (lit->IsSmi()) {
- if (Smi::cast(*lit)->value() == 0) {
- if (false_label_ != fall_through_) __ b(false_label_);
- } else {
- if (true_label_ != fall_through_) __ b(true_label_);
- }
- } else {
- // For simplicity we always test the accumulator register.
- __ mov(result_register(), Operand(lit));
- codegen()->DoTest(true_label_, false_label_, fall_through_);
- }
-}
-
-
-void FullCodeGenerator::EffectContext::DropAndPlug(int count,
- Register reg) const {
- ASSERT(count > 0);
- __ Drop(count);
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::DropAndPlug(
- int count,
- Register reg) const {
- ASSERT(count > 0);
- __ Drop(count);
- __ Move(result_register(), reg);
-}
-
-
-void FullCodeGenerator::StackValueContext::DropAndPlug(int count,
- Register reg) const {
- ASSERT(count > 0);
- if (count > 1) __ Drop(count - 1);
- __ str(reg, MemOperand(sp, 0));
-}
-
-
-void FullCodeGenerator::TestContext::DropAndPlug(int count,
- Register reg) const {
- ASSERT(count > 0);
- // For simplicity we always test the accumulator register.
- __ Drop(count);
- __ Move(result_register(), reg);
- codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
- codegen()->DoTest(true_label_, false_label_, fall_through_);
-}
-
-
-void FullCodeGenerator::EffectContext::Plug(Label* materialize_true,
- Label* materialize_false) const {
- ASSERT(materialize_true == materialize_false);
- __ bind(materialize_true);
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::Plug(
- Label* materialize_true,
- Label* materialize_false) const {
- Label done;
- __ bind(materialize_true);
- __ LoadRoot(result_register(), Heap::kTrueValueRootIndex);
- __ jmp(&done);
- __ bind(materialize_false);
- __ LoadRoot(result_register(), Heap::kFalseValueRootIndex);
- __ bind(&done);
-}
-
-
-void FullCodeGenerator::StackValueContext::Plug(
- Label* materialize_true,
- Label* materialize_false) const {
- Label done;
- __ bind(materialize_true);
- __ LoadRoot(ip, Heap::kTrueValueRootIndex);
- __ push(ip);
- __ jmp(&done);
- __ bind(materialize_false);
- __ LoadRoot(ip, Heap::kFalseValueRootIndex);
- __ push(ip);
- __ bind(&done);
-}
-
-
-void FullCodeGenerator::TestContext::Plug(Label* materialize_true,
- Label* materialize_false) const {
- ASSERT(materialize_true == true_label_);
- ASSERT(materialize_false == false_label_);
-}
-
-
-void FullCodeGenerator::EffectContext::Plug(bool flag) const {
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::Plug(bool flag) const {
- Heap::RootListIndex value_root_index =
- flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex;
- __ LoadRoot(result_register(), value_root_index);
-}
-
-
-void FullCodeGenerator::StackValueContext::Plug(bool flag) const {
- Heap::RootListIndex value_root_index =
- flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex;
- __ LoadRoot(ip, value_root_index);
- __ push(ip);
-}
-
-
-void FullCodeGenerator::TestContext::Plug(bool flag) const {
- codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
- true,
- true_label_,
- false_label_);
- if (flag) {
- if (true_label_ != fall_through_) __ b(true_label_);
- } else {
- if (false_label_ != fall_through_) __ b(false_label_);
- }
-}
-
-
-void FullCodeGenerator::DoTest(Label* if_true,
- Label* if_false,
- Label* fall_through) {
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
- // Emit the inlined tests assumed by the stub.
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(result_register(), ip);
- __ b(eq, if_false);
- __ LoadRoot(ip, Heap::kTrueValueRootIndex);
- __ cmp(result_register(), ip);
- __ b(eq, if_true);
- __ LoadRoot(ip, Heap::kFalseValueRootIndex);
- __ cmp(result_register(), ip);
- __ b(eq, if_false);
- STATIC_ASSERT(kSmiTag == 0);
- __ tst(result_register(), result_register());
- __ b(eq, if_false);
- __ JumpIfSmi(result_register(), if_true);
-
- // Call the ToBoolean stub for all other cases.
- ToBooleanStub stub(result_register());
- __ CallStub(&stub);
- __ tst(result_register(), result_register());
- } else {
- // Call the runtime to find the boolean value of the source and then
- // translate it into control flow to the pair of labels.
- __ push(result_register());
- __ CallRuntime(Runtime::kToBool, 1);
- __ LoadRoot(ip, Heap::kFalseValueRootIndex);
- __ cmp(r0, ip);
- }
-
- // The stub returns nonzero for true.
- Split(ne, if_true, if_false, fall_through);
-}
-
-
-void FullCodeGenerator::Split(Condition cond,
- Label* if_true,
- Label* if_false,
- Label* fall_through) {
- if (if_false == fall_through) {
- __ b(cond, if_true);
- } else if (if_true == fall_through) {
- __ b(NegateCondition(cond), if_false);
- } else {
- __ b(cond, if_true);
- __ b(if_false);
- }
-}
-
-
-MemOperand FullCodeGenerator::EmitSlotSearch(Slot* slot, Register scratch) {
- switch (slot->type()) {
- case Slot::PARAMETER:
- case Slot::LOCAL:
- return MemOperand(fp, SlotOffset(slot));
- case Slot::CONTEXT: {
- int context_chain_length =
- scope()->ContextChainLength(slot->var()->scope());
- __ LoadContext(scratch, context_chain_length);
- return ContextOperand(scratch, slot->index());
- }
- case Slot::LOOKUP:
- UNREACHABLE();
- }
- UNREACHABLE();
- return MemOperand(r0, 0);
-}
-
-
-void FullCodeGenerator::Move(Register destination, Slot* source) {
- // Use destination as scratch.
- MemOperand slot_operand = EmitSlotSearch(source, destination);
- __ ldr(destination, slot_operand);
-}
-
-
-void FullCodeGenerator::Move(Slot* dst,
- Register src,
- Register scratch1,
- Register scratch2) {
- ASSERT(dst->type() != Slot::LOOKUP); // Not yet implemented.
- ASSERT(!scratch1.is(src) && !scratch2.is(src));
- MemOperand location = EmitSlotSearch(dst, scratch1);
- __ str(src, location);
- // Emit the write barrier code if the location is in the heap.
- if (dst->type() == Slot::CONTEXT) {
- __ RecordWrite(scratch1,
- Operand(Context::SlotOffset(dst->index())),
- scratch2,
- src);
- }
-}
-
-
-void FullCodeGenerator::PrepareForBailoutBeforeSplit(State state,
- bool should_normalize,
- Label* if_true,
- Label* if_false) {
- // Only prepare for bailouts before splits if we're in a test
- // context. Otherwise, we let the Visit function deal with the
- // preparation to avoid preparing with the same AST id twice.
- if (!context()->IsTest() || !info_->IsOptimizable()) return;
-
- Label skip;
- if (should_normalize) __ b(&skip);
-
- ForwardBailoutStack* current = forward_bailout_stack_;
- while (current != NULL) {
- PrepareForBailout(current->expr(), state);
- current = current->parent();
- }
-
- if (should_normalize) {
- __ LoadRoot(ip, Heap::kTrueValueRootIndex);
- __ cmp(r0, ip);
- Split(eq, if_true, if_false, NULL);
- __ bind(&skip);
- }
-}
-
-
-void FullCodeGenerator::EmitDeclaration(Variable* variable,
- Variable::Mode mode,
- FunctionLiteral* function) {
- Comment cmnt(masm_, "[ Declaration");
- ASSERT(variable != NULL); // Must have been resolved.
- Slot* slot = variable->AsSlot();
- Property* prop = variable->AsProperty();
-
- if (slot != NULL) {
- switch (slot->type()) {
- case Slot::PARAMETER:
- case Slot::LOCAL:
- if (mode == Variable::CONST) {
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ str(ip, MemOperand(fp, SlotOffset(slot)));
- } else if (function != NULL) {
- VisitForAccumulatorValue(function);
- __ str(result_register(), MemOperand(fp, SlotOffset(slot)));
- }
- break;
-
- case Slot::CONTEXT:
- // We bypass the general EmitSlotSearch because we know more about
- // this specific context.
-
- // The variable in the decl always resides in the current function
- // context.
- ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
- if (FLAG_debug_code) {
- // Check that we're not inside a 'with'.
- __ ldr(r1, ContextOperand(cp, Context::FCONTEXT_INDEX));
- __ cmp(r1, cp);
- __ Check(eq, "Unexpected declaration in current context.");
- }
- if (mode == Variable::CONST) {
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ str(ip, ContextOperand(cp, slot->index()));
- // No write barrier since the_hole_value is in old space.
- } else if (function != NULL) {
- VisitForAccumulatorValue(function);
- __ str(result_register(), ContextOperand(cp, slot->index()));
- int offset = Context::SlotOffset(slot->index());
- // We know that we have written a function, which is not a smi.
- __ mov(r1, Operand(cp));
- __ RecordWrite(r1, Operand(offset), r2, result_register());
- }
- break;
-
- case Slot::LOOKUP: {
- __ mov(r2, Operand(variable->name()));
- // Declaration nodes are always introduced in one of two modes.
- ASSERT(mode == Variable::VAR ||
- mode == Variable::CONST);
- PropertyAttributes attr =
- (mode == Variable::VAR) ? NONE : READ_ONLY;
- __ mov(r1, Operand(Smi::FromInt(attr)));
- // Push initial value, if any.
- // Note: For variables we must not push an initial value (such as
- // 'undefined') because we may have a (legal) redeclaration and we
- // must not destroy the current value.
- if (mode == Variable::CONST) {
- __ LoadRoot(r0, Heap::kTheHoleValueRootIndex);
- __ Push(cp, r2, r1, r0);
- } else if (function != NULL) {
- __ Push(cp, r2, r1);
- // Push initial value for function declaration.
- VisitForStackValue(function);
- } else {
- __ mov(r0, Operand(Smi::FromInt(0))); // No initial value!
- __ Push(cp, r2, r1, r0);
- }
- __ CallRuntime(Runtime::kDeclareContextSlot, 4);
- break;
- }
- }
-
- } else if (prop != NULL) {
- if (function != NULL || mode == Variable::CONST) {
- // We are declaring a function or constant that rewrites to a
- // property. Use (keyed) IC to set the initial value. We
- // cannot visit the rewrite because it's shared and we risk
- // recording duplicate AST IDs for bailouts from optimized code.
- ASSERT(prop->obj()->AsVariableProxy() != NULL);
- { AccumulatorValueContext for_object(this);
- EmitVariableLoad(prop->obj()->AsVariableProxy()->var());
- }
- if (function != NULL) {
- __ push(r0);
- VisitForAccumulatorValue(function);
- __ pop(r2);
- } else {
- __ mov(r2, r0);
- __ LoadRoot(r0, Heap::kTheHoleValueRootIndex);
- }
- ASSERT(prop->key()->AsLiteral() != NULL &&
- prop->key()->AsLiteral()->handle()->IsSmi());
- __ mov(r1, Operand(prop->key()->AsLiteral()->handle()));
-
- Handle<Code> ic = is_strict_mode()
- ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
- : isolate()->builtins()->KeyedStoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
- // Value in r0 is ignored (declarations are statements).
- }
- }
-}
-
-
-void FullCodeGenerator::VisitDeclaration(Declaration* decl) {
- EmitDeclaration(decl->proxy()->var(), decl->mode(), decl->fun());
-}
-
-
-void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
- // Call the runtime to declare the globals.
- // The context is the first argument.
- __ mov(r2, Operand(pairs));
- __ mov(r1, Operand(Smi::FromInt(is_eval() ? 1 : 0)));
- __ mov(r0, Operand(Smi::FromInt(strict_mode_flag())));
- __ Push(cp, r2, r1, r0);
- __ CallRuntime(Runtime::kDeclareGlobals, 4);
- // Return value is ignored.
-}
-
-
-void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
- Comment cmnt(masm_, "[ SwitchStatement");
- Breakable nested_statement(this, stmt);
- SetStatementPosition(stmt);
-
- // Keep the switch value on the stack until a case matches.
- VisitForStackValue(stmt->tag());
- PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
-
- ZoneList<CaseClause*>* clauses = stmt->cases();
- CaseClause* default_clause = NULL; // Can occur anywhere in the list.
-
- Label next_test; // Recycled for each test.
- // Compile all the tests with branches to their bodies.
- for (int i = 0; i < clauses->length(); i++) {
- CaseClause* clause = clauses->at(i);
- clause->body_target()->entry_label()->Unuse();
-
- // The default is not a test, but remember it as final fall through.
- if (clause->is_default()) {
- default_clause = clause;
- continue;
- }
-
- Comment cmnt(masm_, "[ Case comparison");
- __ bind(&next_test);
- next_test.Unuse();
-
- // Compile the label expression.
- VisitForAccumulatorValue(clause->label());
-
- // Perform the comparison as if via '==='.
- __ ldr(r1, MemOperand(sp, 0)); // Switch value.
- bool inline_smi_code = ShouldInlineSmiCase(Token::EQ_STRICT);
- JumpPatchSite patch_site(masm_);
- if (inline_smi_code) {
- Label slow_case;
- __ orr(r2, r1, r0);
- patch_site.EmitJumpIfNotSmi(r2, &slow_case);
-
- __ cmp(r1, r0);
- __ b(ne, &next_test);
- __ Drop(1); // Switch value is no longer needed.
- __ b(clause->body_target()->entry_label());
- __ bind(&slow_case);
- }
-
- // Record position before stub call for type feedback.
- SetSourcePosition(clause->position());
- Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT);
- EmitCallIC(ic, &patch_site);
- __ cmp(r0, Operand(0));
- __ b(ne, &next_test);
- __ Drop(1); // Switch value is no longer needed.
- __ b(clause->body_target()->entry_label());
- }
-
- // Discard the test value and jump to the default if present, otherwise to
- // the end of the statement.
- __ bind(&next_test);
- __ Drop(1); // Switch value is no longer needed.
- if (default_clause == NULL) {
- __ b(nested_statement.break_target());
- } else {
- __ b(default_clause->body_target()->entry_label());
- }
-
- // Compile all the case bodies.
- for (int i = 0; i < clauses->length(); i++) {
- Comment cmnt(masm_, "[ Case body");
- CaseClause* clause = clauses->at(i);
- __ bind(clause->body_target()->entry_label());
- PrepareForBailoutForId(clause->EntryId(), NO_REGISTERS);
- VisitStatements(clause->statements());
- }
-
- __ bind(nested_statement.break_target());
- PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
-}
-
-
-void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
- Comment cmnt(masm_, "[ ForInStatement");
- SetStatementPosition(stmt);
-
- Label loop, exit;
- ForIn loop_statement(this, stmt);
- increment_loop_depth();
-
- // Get the object to enumerate over. Both SpiderMonkey and JSC
- // ignore null and undefined in contrast to the specification; see
- // ECMA-262 section 12.6.4.
- VisitForAccumulatorValue(stmt->enumerable());
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(r0, ip);
- __ b(eq, &exit);
- Register null_value = r5;
- __ LoadRoot(null_value, Heap::kNullValueRootIndex);
- __ cmp(r0, null_value);
- __ b(eq, &exit);
-
- // Convert the object to a JS object.
- Label convert, done_convert;
- __ JumpIfSmi(r0, &convert);
- __ CompareObjectType(r0, r1, r1, FIRST_JS_OBJECT_TYPE);
- __ b(hs, &done_convert);
- __ bind(&convert);
- __ push(r0);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS);
- __ bind(&done_convert);
- __ push(r0);
-
- // Check cache validity in generated code. This is a fast case for
- // the JSObject::IsSimpleEnum cache validity checks. If we cannot
- // guarantee cache validity, call the runtime system to check cache
- // validity or get the property names in a fixed array.
- Label next, call_runtime;
- // Preload a couple of values used in the loop.
- Register empty_fixed_array_value = r6;
- __ LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
- Register empty_descriptor_array_value = r7;
- __ LoadRoot(empty_descriptor_array_value,
- Heap::kEmptyDescriptorArrayRootIndex);
- __ mov(r1, r0);
- __ bind(&next);
-
- // Check that there are no elements. Register r1 contains the
- // current JS object we've reached through the prototype chain.
- __ ldr(r2, FieldMemOperand(r1, JSObject::kElementsOffset));
- __ cmp(r2, empty_fixed_array_value);
- __ b(ne, &call_runtime);
-
- // Check that instance descriptors are not empty so that we can
- // check for an enum cache. Leave the map in r2 for the subsequent
- // prototype load.
- __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ ldr(r3, FieldMemOperand(r2, Map::kInstanceDescriptorsOffset));
- __ cmp(r3, empty_descriptor_array_value);
- __ b(eq, &call_runtime);
-
- // Check that there is an enum cache in the non-empty instance
- // descriptors (r3). This is the case if the next enumeration
- // index field does not contain a smi.
- __ ldr(r3, FieldMemOperand(r3, DescriptorArray::kEnumerationIndexOffset));
- __ JumpIfSmi(r3, &call_runtime);
-
- // For all objects but the receiver, check that the cache is empty.
- Label check_prototype;
- __ cmp(r1, r0);
- __ b(eq, &check_prototype);
- __ ldr(r3, FieldMemOperand(r3, DescriptorArray::kEnumCacheBridgeCacheOffset));
- __ cmp(r3, empty_fixed_array_value);
- __ b(ne, &call_runtime);
-
- // Load the prototype from the map and loop if non-null.
- __ bind(&check_prototype);
- __ ldr(r1, FieldMemOperand(r2, Map::kPrototypeOffset));
- __ cmp(r1, null_value);
- __ b(ne, &next);
-
- // The enum cache is valid. Load the map of the object being
- // iterated over and use the cache for the iteration.
- Label use_cache;
- __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ b(&use_cache);
-
- // Get the set of properties to enumerate.
- __ bind(&call_runtime);
- __ push(r0); // Duplicate the enumerable object on the stack.
- __ CallRuntime(Runtime::kGetPropertyNamesFast, 1);
-
- // If we got a map from the runtime call, we can do a fast
- // modification check. Otherwise, we got a fixed array, and we have
- // to do a slow check.
- Label fixed_array;
- __ mov(r2, r0);
- __ ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kMetaMapRootIndex);
- __ cmp(r1, ip);
- __ b(ne, &fixed_array);
-
- // We got a map in register r0. Get the enumeration cache from it.
- __ bind(&use_cache);
- __ ldr(r1, FieldMemOperand(r0, Map::kInstanceDescriptorsOffset));
- __ ldr(r1, FieldMemOperand(r1, DescriptorArray::kEnumerationIndexOffset));
- __ ldr(r2, FieldMemOperand(r1, DescriptorArray::kEnumCacheBridgeCacheOffset));
-
- // Setup the four remaining stack slots.
- __ push(r0); // Map.
- __ ldr(r1, FieldMemOperand(r2, FixedArray::kLengthOffset));
- __ mov(r0, Operand(Smi::FromInt(0)));
- // Push enumeration cache, enumeration cache length (as smi) and zero.
- __ Push(r2, r1, r0);
- __ jmp(&loop);
-
- // We got a fixed array in register r0. Iterate through that.
- __ bind(&fixed_array);
- __ mov(r1, Operand(Smi::FromInt(0))); // Map (0) - force slow check.
- __ Push(r1, r0);
- __ ldr(r1, FieldMemOperand(r0, FixedArray::kLengthOffset));
- __ mov(r0, Operand(Smi::FromInt(0)));
- __ Push(r1, r0); // Fixed array length (as smi) and initial index.
-
- // Generate code for doing the condition check.
- __ bind(&loop);
- // Load the current count to r0, load the length to r1.
- __ Ldrd(r0, r1, MemOperand(sp, 0 * kPointerSize));
- __ cmp(r0, r1); // Compare to the array length.
- __ b(hs, loop_statement.break_target());
-
- // Get the current entry of the array into register r3.
- __ ldr(r2, MemOperand(sp, 2 * kPointerSize));
- __ add(r2, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ ldr(r3, MemOperand(r2, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
-
- // Get the expected map from the stack or a zero map in the
- // permanent slow case into register r2.
- __ ldr(r2, MemOperand(sp, 3 * kPointerSize));
-
- // Check if the expected map still matches that of the enumerable.
- // If not, we have to filter the key.
- Label update_each;
- __ ldr(r1, MemOperand(sp, 4 * kPointerSize));
- __ ldr(r4, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ cmp(r4, Operand(r2));
- __ b(eq, &update_each);
-
- // Convert the entry to a string or (smi) 0 if it isn't a property
- // any more. If the property has been removed while iterating, we
- // just skip it.
- __ push(r1); // Enumerable.
- __ push(r3); // Current entry.
- __ InvokeBuiltin(Builtins::FILTER_KEY, CALL_JS);
- __ mov(r3, Operand(r0), SetCC);
- __ b(eq, loop_statement.continue_target());
-
- // Update the 'each' property or variable from the possibly filtered
- // entry in register r3.
- __ bind(&update_each);
- __ mov(result_register(), r3);
- // Perform the assignment as if via '='.
- { EffectContext context(this);
- EmitAssignment(stmt->each(), stmt->AssignmentId());
- }
-
- // Generate code for the body of the loop.
- Visit(stmt->body());
-
- // Generate code for the going to the next element by incrementing
- // the index (smi) stored on top of the stack.
- __ bind(loop_statement.continue_target());
- __ pop(r0);
- __ add(r0, r0, Operand(Smi::FromInt(1)));
- __ push(r0);
-
- EmitStackCheck(stmt);
- __ b(&loop);
-
- // Remove the pointers stored on the stack.
- __ bind(loop_statement.break_target());
- __ Drop(5);
-
- // Exit and decrement the loop depth.
- __ bind(&exit);
- decrement_loop_depth();
-}
-
-
-void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
- bool pretenure) {
- // Use the fast case closure allocation code that allocates in new
- // space for nested functions that don't need literals cloning. If
- // we're running with the --always-opt or the --prepare-always-opt
- // flag, we need to use the runtime function so that the new function
- // we are creating here gets a chance to have its code optimized and
- // doesn't just get a copy of the existing unoptimized code.
- if (!FLAG_always_opt &&
- !FLAG_prepare_always_opt &&
- !pretenure &&
- scope()->is_function_scope() &&
- info->num_literals() == 0) {
- FastNewClosureStub stub(info->strict_mode() ? kStrictMode : kNonStrictMode);
- __ mov(r0, Operand(info));
- __ push(r0);
- __ CallStub(&stub);
- } else {
- __ mov(r0, Operand(info));
- __ LoadRoot(r1, pretenure ? Heap::kTrueValueRootIndex
- : Heap::kFalseValueRootIndex);
- __ Push(cp, r0, r1);
- __ CallRuntime(Runtime::kNewClosure, 3);
- }
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
- Comment cmnt(masm_, "[ VariableProxy");
- EmitVariableLoad(expr->var());
-}
-
-
-MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(
- Slot* slot,
- Label* slow) {
- ASSERT(slot->type() == Slot::CONTEXT);
- Register context = cp;
- Register next = r3;
- Register temp = r4;
-
- for (Scope* s = scope(); s != slot->var()->scope(); s = s->outer_scope()) {
- if (s->num_heap_slots() > 0) {
- if (s->calls_eval()) {
- // Check that extension is NULL.
- __ ldr(temp, ContextOperand(context, Context::EXTENSION_INDEX));
- __ tst(temp, temp);
- __ b(ne, slow);
- }
- __ ldr(next, ContextOperand(context, Context::CLOSURE_INDEX));
- __ ldr(next, FieldMemOperand(next, JSFunction::kContextOffset));
- // Walk the rest of the chain without clobbering cp.
- context = next;
- }
- }
- // Check that last extension is NULL.
- __ ldr(temp, ContextOperand(context, Context::EXTENSION_INDEX));
- __ tst(temp, temp);
- __ b(ne, slow);
-
- // This function is used only for loads, not stores, so it's safe to
- // return an cp-based operand (the write barrier cannot be allowed to
- // destroy the cp register).
- return ContextOperand(context, slot->index());
-}
-
-
-void FullCodeGenerator::EmitDynamicLoadFromSlotFastCase(
- Slot* slot,
- TypeofState typeof_state,
- Label* slow,
- Label* done) {
- // Generate fast-case code for variables that might be shadowed by
- // eval-introduced variables. Eval is used a lot without
- // introducing variables. In those cases, we do not want to
- // perform a runtime call for all variables in the scope
- // containing the eval.
- if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
- EmitLoadGlobalSlotCheckExtensions(slot, typeof_state, slow);
- __ jmp(done);
- } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
- Slot* potential_slot = slot->var()->local_if_not_shadowed()->AsSlot();
- Expression* rewrite = slot->var()->local_if_not_shadowed()->rewrite();
- if (potential_slot != NULL) {
- // Generate fast case for locals that rewrite to slots.
- __ ldr(r0, ContextSlotOperandCheckExtensions(potential_slot, slow));
- if (potential_slot->var()->mode() == Variable::CONST) {
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(r0, ip);
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
- }
- __ jmp(done);
- } else if (rewrite != NULL) {
- // Generate fast case for calls of an argument function.
- Property* property = rewrite->AsProperty();
- if (property != NULL) {
- VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
- Literal* key_literal = property->key()->AsLiteral();
- if (obj_proxy != NULL &&
- key_literal != NULL &&
- obj_proxy->IsArguments() &&
- key_literal->handle()->IsSmi()) {
- // Load arguments object if there are no eval-introduced
- // variables. Then load the argument from the arguments
- // object using keyed load.
- __ ldr(r1,
- ContextSlotOperandCheckExtensions(obj_proxy->var()->AsSlot(),
- slow));
- __ mov(r0, Operand(key_literal->handle()));
- Handle<Code> ic =
- isolate()->builtins()->KeyedLoadIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
- __ jmp(done);
- }
- }
- }
- }
-}
-
-
-void FullCodeGenerator::EmitLoadGlobalSlotCheckExtensions(
- Slot* slot,
- TypeofState typeof_state,
- Label* slow) {
- Register current = cp;
- Register next = r1;
- Register temp = r2;
-
- Scope* s = scope();
- while (s != NULL) {
- if (s->num_heap_slots() > 0) {
- if (s->calls_eval()) {
- // Check that extension is NULL.
- __ ldr(temp, ContextOperand(current, Context::EXTENSION_INDEX));
- __ tst(temp, temp);
- __ b(ne, slow);
- }
- // Load next context in chain.
- __ ldr(next, ContextOperand(current, Context::CLOSURE_INDEX));
- __ ldr(next, FieldMemOperand(next, JSFunction::kContextOffset));
- // Walk the rest of the chain without clobbering cp.
- current = next;
- }
- // If no outer scope calls eval, we do not need to check more
- // context extensions.
- if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break;
- s = s->outer_scope();
- }
-
- if (s->is_eval_scope()) {
- Label loop, fast;
- if (!current.is(next)) {
- __ Move(next, current);
- }
- __ bind(&loop);
- // Terminate at global context.
- __ ldr(temp, FieldMemOperand(next, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kGlobalContextMapRootIndex);
- __ cmp(temp, ip);
- __ b(eq, &fast);
- // Check that extension is NULL.
- __ ldr(temp, ContextOperand(next, Context::EXTENSION_INDEX));
- __ tst(temp, temp);
- __ b(ne, slow);
- // Load next context in chain.
- __ ldr(next, ContextOperand(next, Context::CLOSURE_INDEX));
- __ ldr(next, FieldMemOperand(next, JSFunction::kContextOffset));
- __ b(&loop);
- __ bind(&fast);
- }
-
- __ ldr(r0, GlobalObjectOperand());
- __ mov(r2, Operand(slot->var()->name()));
- RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
- ? RelocInfo::CODE_TARGET
- : RelocInfo::CODE_TARGET_CONTEXT;
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- EmitCallIC(ic, mode);
-}
-
-
-void FullCodeGenerator::EmitVariableLoad(Variable* var) {
- // Four cases: non-this global variables, lookup slots, all other
- // types of slots, and parameters that rewrite to explicit property
- // accesses on the arguments object.
- Slot* slot = var->AsSlot();
- Property* property = var->AsProperty();
-
- if (var->is_global() && !var->is_this()) {
- Comment cmnt(masm_, "Global variable");
- // Use inline caching. Variable name is passed in r2 and the global
- // object (receiver) in r0.
- __ ldr(r0, GlobalObjectOperand());
- __ mov(r2, Operand(var->name()));
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
- context()->Plug(r0);
-
- } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
- Label done, slow;
-
- // Generate code for loading from variables potentially shadowed
- // by eval-introduced variables.
- EmitDynamicLoadFromSlotFastCase(slot, NOT_INSIDE_TYPEOF, &slow, &done);
-
- __ bind(&slow);
- Comment cmnt(masm_, "Lookup slot");
- __ mov(r1, Operand(var->name()));
- __ Push(cp, r1); // Context and name.
- __ CallRuntime(Runtime::kLoadContextSlot, 2);
- __ bind(&done);
-
- context()->Plug(r0);
-
- } else if (slot != NULL) {
- Comment cmnt(masm_, (slot->type() == Slot::CONTEXT)
- ? "Context slot"
- : "Stack slot");
- if (var->mode() == Variable::CONST) {
- // Constants may be the hole value if they have not been initialized.
- // Unhole them.
- MemOperand slot_operand = EmitSlotSearch(slot, r0);
- __ ldr(r0, slot_operand);
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(r0, ip);
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
- context()->Plug(r0);
- } else {
- context()->Plug(slot);
- }
- } else {
- Comment cmnt(masm_, "Rewritten parameter");
- ASSERT_NOT_NULL(property);
- // Rewritten parameter accesses are of the form "slot[literal]".
-
- // Assert that the object is in a slot.
- Variable* object_var = property->obj()->AsVariableProxy()->AsVariable();
- ASSERT_NOT_NULL(object_var);
- Slot* object_slot = object_var->AsSlot();
- ASSERT_NOT_NULL(object_slot);
-
- // Load the object.
- Move(r1, object_slot);
-
- // Assert that the key is a smi.
- Literal* key_literal = property->key()->AsLiteral();
- ASSERT_NOT_NULL(key_literal);
- ASSERT(key_literal->handle()->IsSmi());
-
- // Load the key.
- __ mov(r0, Operand(key_literal->handle()));
-
- // Call keyed load IC. It has arguments key and receiver in r0 and r1.
- Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
- context()->Plug(r0);
- }
-}
-
-
-void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
- Comment cmnt(masm_, "[ RegExpLiteral");
- Label materialized;
- // Registers will be used as follows:
- // r5 = materialized value (RegExp literal)
- // r4 = JS function, literals array
- // r3 = literal index
- // r2 = RegExp pattern
- // r1 = RegExp flags
- // r0 = RegExp literal clone
- __ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ ldr(r4, FieldMemOperand(r0, JSFunction::kLiteralsOffset));
- int literal_offset =
- FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
- __ ldr(r5, FieldMemOperand(r4, literal_offset));
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(r5, ip);
- __ b(ne, &materialized);
-
- // Create regexp literal using runtime function.
- // Result will be in r0.
- __ mov(r3, Operand(Smi::FromInt(expr->literal_index())));
- __ mov(r2, Operand(expr->pattern()));
- __ mov(r1, Operand(expr->flags()));
- __ Push(r4, r3, r2, r1);
- __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
- __ mov(r5, r0);
-
- __ bind(&materialized);
- int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
- Label allocated, runtime_allocate;
- __ AllocateInNewSpace(size, r0, r2, r3, &runtime_allocate, TAG_OBJECT);
- __ jmp(&allocated);
-
- __ bind(&runtime_allocate);
- __ push(r5);
- __ mov(r0, Operand(Smi::FromInt(size)));
- __ push(r0);
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
- __ pop(r5);
-
- __ bind(&allocated);
- // After this, registers are used as follows:
- // r0: Newly allocated regexp.
- // r5: Materialized regexp.
- // r2: temp.
- __ CopyFields(r0, r5, r2.bit(), size / kPointerSize);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
- Comment cmnt(masm_, "[ ObjectLiteral");
- __ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
- __ mov(r2, Operand(Smi::FromInt(expr->literal_index())));
- __ mov(r1, Operand(expr->constant_properties()));
- int flags = expr->fast_elements()
- ? ObjectLiteral::kFastElements
- : ObjectLiteral::kNoFlags;
- flags |= expr->has_function()
- ? ObjectLiteral::kHasFunction
- : ObjectLiteral::kNoFlags;
- __ mov(r0, Operand(Smi::FromInt(flags)));
- __ Push(r3, r2, r1, r0);
- if (expr->depth() > 1) {
- __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
- } else {
- __ CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
- }
-
- // If result_saved is true the result is on top of the stack. If
- // result_saved is false the result is in r0.
- bool result_saved = false;
-
- // Mark all computed expressions that are bound to a key that
- // is shadowed by a later occurrence of the same key. For the
- // marked expressions, no store code is emitted.
- expr->CalculateEmitStore();
-
- for (int i = 0; i < expr->properties()->length(); i++) {
- ObjectLiteral::Property* property = expr->properties()->at(i);
- if (property->IsCompileTimeValue()) continue;
-
- Literal* key = property->key();
- Expression* value = property->value();
- if (!result_saved) {
- __ push(r0); // Save result on stack
- result_saved = true;
- }
- switch (property->kind()) {
- case ObjectLiteral::Property::CONSTANT:
- UNREACHABLE();
- case ObjectLiteral::Property::MATERIALIZED_LITERAL:
- ASSERT(!CompileTimeValue::IsCompileTimeValue(property->value()));
- // Fall through.
- case ObjectLiteral::Property::COMPUTED:
- if (key->handle()->IsSymbol()) {
- if (property->emit_store()) {
- VisitForAccumulatorValue(value);
- __ mov(r2, Operand(key->handle()));
- __ ldr(r1, MemOperand(sp));
- Handle<Code> ic = isolate()->builtins()->StoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
- PrepareForBailoutForId(key->id(), NO_REGISTERS);
- } else {
- VisitForEffect(value);
- }
- break;
- }
- // Fall through.
- case ObjectLiteral::Property::PROTOTYPE:
- // Duplicate receiver on stack.
- __ ldr(r0, MemOperand(sp));
- __ push(r0);
- VisitForStackValue(key);
- VisitForStackValue(value);
- if (property->emit_store()) {
- __ mov(r0, Operand(Smi::FromInt(NONE))); // PropertyAttributes
- __ push(r0);
- __ CallRuntime(Runtime::kSetProperty, 4);
- } else {
- __ Drop(3);
- }
- break;
- case ObjectLiteral::Property::GETTER:
- case ObjectLiteral::Property::SETTER:
- // Duplicate receiver on stack.
- __ ldr(r0, MemOperand(sp));
- __ push(r0);
- VisitForStackValue(key);
- __ mov(r1, Operand(property->kind() == ObjectLiteral::Property::SETTER ?
- Smi::FromInt(1) :
- Smi::FromInt(0)));
- __ push(r1);
- VisitForStackValue(value);
- __ CallRuntime(Runtime::kDefineAccessor, 4);
- break;
- }
- }
-
- if (expr->has_function()) {
- ASSERT(result_saved);
- __ ldr(r0, MemOperand(sp));
- __ push(r0);
- __ CallRuntime(Runtime::kToFastProperties, 1);
- }
-
- if (result_saved) {
- context()->PlugTOS();
- } else {
- context()->Plug(r0);
- }
-}
-
-
-void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
- Comment cmnt(masm_, "[ ArrayLiteral");
-
- ZoneList<Expression*>* subexprs = expr->values();
- int length = subexprs->length();
-
- __ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
- __ mov(r2, Operand(Smi::FromInt(expr->literal_index())));
- __ mov(r1, Operand(expr->constant_elements()));
- __ Push(r3, r2, r1);
- if (expr->constant_elements()->map() ==
- isolate()->heap()->fixed_cow_array_map()) {
- FastCloneShallowArrayStub stub(
- FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length);
- __ CallStub(&stub);
- __ IncrementCounter(
- isolate()->counters()->cow_arrays_created_stub(), 1, r1, r2);
- } else if (expr->depth() > 1) {
- __ CallRuntime(Runtime::kCreateArrayLiteral, 3);
- } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
- __ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
- } else {
- FastCloneShallowArrayStub stub(
- FastCloneShallowArrayStub::CLONE_ELEMENTS, length);
- __ CallStub(&stub);
- }
-
- bool result_saved = false; // Is the result saved to the stack?
-
- // Emit code to evaluate all the non-constant subexpressions and to store
- // them into the newly cloned array.
- for (int i = 0; i < length; i++) {
- Expression* subexpr = subexprs->at(i);
- // If the subexpression is a literal or a simple materialized literal it
- // is already set in the cloned array.
- if (subexpr->AsLiteral() != NULL ||
- CompileTimeValue::IsCompileTimeValue(subexpr)) {
- continue;
- }
-
- if (!result_saved) {
- __ push(r0);
- result_saved = true;
- }
- VisitForAccumulatorValue(subexpr);
-
- // Store the subexpression value in the array's elements.
- __ ldr(r1, MemOperand(sp)); // Copy of array literal.
- __ ldr(r1, FieldMemOperand(r1, JSObject::kElementsOffset));
- int offset = FixedArray::kHeaderSize + (i * kPointerSize);
- __ str(result_register(), FieldMemOperand(r1, offset));
-
- // Update the write barrier for the array store with r0 as the scratch
- // register.
- __ RecordWrite(r1, Operand(offset), r2, result_register());
-
- PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS);
- }
-
- if (result_saved) {
- context()->PlugTOS();
- } else {
- context()->Plug(r0);
- }
-}
-
-
-void FullCodeGenerator::VisitAssignment(Assignment* expr) {
- Comment cmnt(masm_, "[ Assignment");
- // Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
- // on the left-hand side.
- if (!expr->target()->IsValidLeftHandSide()) {
- VisitForEffect(expr->target());
- return;
- }
-
- // Left-hand side can only be a property, a global or a (parameter or local)
- // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
- enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
- LhsKind assign_type = VARIABLE;
- Property* property = expr->target()->AsProperty();
- if (property != NULL) {
- assign_type = (property->key()->IsPropertyName())
- ? NAMED_PROPERTY
- : KEYED_PROPERTY;
- }
-
- // Evaluate LHS expression.
- switch (assign_type) {
- case VARIABLE:
- // Nothing to do here.
- break;
- case NAMED_PROPERTY:
- if (expr->is_compound()) {
- // We need the receiver both on the stack and in the accumulator.
- VisitForAccumulatorValue(property->obj());
- __ push(result_register());
- } else {
- VisitForStackValue(property->obj());
- }
- break;
- case KEYED_PROPERTY:
- if (expr->is_compound()) {
- if (property->is_arguments_access()) {
- VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
- __ ldr(r0, EmitSlotSearch(obj_proxy->var()->AsSlot(), r0));
- __ push(r0);
- __ mov(r0, Operand(property->key()->AsLiteral()->handle()));
- } else {
- VisitForStackValue(property->obj());
- VisitForAccumulatorValue(property->key());
- }
- __ ldr(r1, MemOperand(sp, 0));
- __ push(r0);
- } else {
- if (property->is_arguments_access()) {
- VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
- __ ldr(r1, EmitSlotSearch(obj_proxy->var()->AsSlot(), r0));
- __ mov(r0, Operand(property->key()->AsLiteral()->handle()));
- __ Push(r1, r0);
- } else {
- VisitForStackValue(property->obj());
- VisitForStackValue(property->key());
- }
- }
- break;
- }
-
- // For compound assignments we need another deoptimization point after the
- // variable/property load.
- if (expr->is_compound()) {
- { AccumulatorValueContext context(this);
- switch (assign_type) {
- case VARIABLE:
- EmitVariableLoad(expr->target()->AsVariableProxy()->var());
- PrepareForBailout(expr->target(), TOS_REG);
- break;
- case NAMED_PROPERTY:
- EmitNamedPropertyLoad(property);
- PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
- break;
- case KEYED_PROPERTY:
- EmitKeyedPropertyLoad(property);
- PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
- break;
- }
- }
-
- Token::Value op = expr->binary_op();
- __ push(r0); // Left operand goes on the stack.
- VisitForAccumulatorValue(expr->value());
-
- OverwriteMode mode = expr->value()->ResultOverwriteAllowed()
- ? OVERWRITE_RIGHT
- : NO_OVERWRITE;
- SetSourcePosition(expr->position() + 1);
- AccumulatorValueContext context(this);
- if (ShouldInlineSmiCase(op)) {
- EmitInlineSmiBinaryOp(expr,
- op,
- mode,
- expr->target(),
- expr->value());
- } else {
- EmitBinaryOp(op, mode);
- }
-
- // Deoptimization point in case the binary operation may have side effects.
- PrepareForBailout(expr->binary_operation(), TOS_REG);
- } else {
- VisitForAccumulatorValue(expr->value());
- }
-
- // Record source position before possible IC call.
- SetSourcePosition(expr->position());
-
- // Store the value.
- switch (assign_type) {
- case VARIABLE:
- EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
- expr->op());
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- context()->Plug(r0);
- break;
- case NAMED_PROPERTY:
- EmitNamedPropertyAssignment(expr);
- break;
- case KEYED_PROPERTY:
- EmitKeyedPropertyAssignment(expr);
- break;
- }
-}
-
-
-void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
- SetSourcePosition(prop->position());
- Literal* key = prop->key()->AsLiteral();
- __ mov(r2, Operand(key->handle()));
- // Call load IC. It has arguments receiver and property name r0 and r2.
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
-}
-
-
-void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
- SetSourcePosition(prop->position());
- // Call keyed load IC. It has arguments key and receiver in r0 and r1.
- Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
-}
-
-
-void FullCodeGenerator::EmitInlineSmiBinaryOp(Expression* expr,
- Token::Value op,
- OverwriteMode mode,
- Expression* left_expr,
- Expression* right_expr) {
- Label done, smi_case, stub_call;
-
- Register scratch1 = r2;
- Register scratch2 = r3;
-
- // Get the arguments.
- Register left = r1;
- Register right = r0;
- __ pop(left);
-
- // Perform combined smi check on both operands.
- __ orr(scratch1, left, Operand(right));
- STATIC_ASSERT(kSmiTag == 0);
- JumpPatchSite patch_site(masm_);
- patch_site.EmitJumpIfSmi(scratch1, &smi_case);
-
- __ bind(&stub_call);
- TypeRecordingBinaryOpStub stub(op, mode);
- EmitCallIC(stub.GetCode(), &patch_site);
- __ jmp(&done);
-
- __ bind(&smi_case);
- // Smi case. This code works the same way as the smi-smi case in the type
- // recording binary operation stub, see
- // TypeRecordingBinaryOpStub::GenerateSmiSmiOperation for comments.
- switch (op) {
- case Token::SAR:
- __ b(&stub_call);
- __ GetLeastBitsFromSmi(scratch1, right, 5);
- __ mov(right, Operand(left, ASR, scratch1));
- __ bic(right, right, Operand(kSmiTagMask));
- break;
- case Token::SHL: {
- __ b(&stub_call);
- __ SmiUntag(scratch1, left);
- __ GetLeastBitsFromSmi(scratch2, right, 5);
- __ mov(scratch1, Operand(scratch1, LSL, scratch2));
- __ add(scratch2, scratch1, Operand(0x40000000), SetCC);
- __ b(mi, &stub_call);
- __ SmiTag(right, scratch1);
- break;
- }
- case Token::SHR: {
- __ b(&stub_call);
- __ SmiUntag(scratch1, left);
- __ GetLeastBitsFromSmi(scratch2, right, 5);
- __ mov(scratch1, Operand(scratch1, LSR, scratch2));
- __ tst(scratch1, Operand(0xc0000000));
- __ b(ne, &stub_call);
- __ SmiTag(right, scratch1);
- break;
- }
- case Token::ADD:
- __ add(scratch1, left, Operand(right), SetCC);
- __ b(vs, &stub_call);
- __ mov(right, scratch1);
- break;
- case Token::SUB:
- __ sub(scratch1, left, Operand(right), SetCC);
- __ b(vs, &stub_call);
- __ mov(right, scratch1);
- break;
- case Token::MUL: {
- __ SmiUntag(ip, right);
- __ smull(scratch1, scratch2, left, ip);
- __ mov(ip, Operand(scratch1, ASR, 31));
- __ cmp(ip, Operand(scratch2));
- __ b(ne, &stub_call);
- __ tst(scratch1, Operand(scratch1));
- __ mov(right, Operand(scratch1), LeaveCC, ne);
- __ b(ne, &done);
- __ add(scratch2, right, Operand(left), SetCC);
- __ mov(right, Operand(Smi::FromInt(0)), LeaveCC, pl);
- __ b(mi, &stub_call);
- break;
- }
- case Token::BIT_OR:
- __ orr(right, left, Operand(right));
- break;
- case Token::BIT_AND:
- __ and_(right, left, Operand(right));
- break;
- case Token::BIT_XOR:
- __ eor(right, left, Operand(right));
- break;
- default:
- UNREACHABLE();
- }
-
- __ bind(&done);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitBinaryOp(Token::Value op,
- OverwriteMode mode) {
- __ pop(r1);
- TypeRecordingBinaryOpStub stub(op, mode);
- EmitCallIC(stub.GetCode(), NULL);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
- // Invalid left-hand sides are rewritten to have a 'throw
- // ReferenceError' on the left-hand side.
- if (!expr->IsValidLeftHandSide()) {
- VisitForEffect(expr);
- return;
- }
-
- // Left-hand side can only be a property, a global or a (parameter or local)
- // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
- enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
- LhsKind assign_type = VARIABLE;
- Property* prop = expr->AsProperty();
- if (prop != NULL) {
- assign_type = (prop->key()->IsPropertyName())
- ? NAMED_PROPERTY
- : KEYED_PROPERTY;
- }
-
- switch (assign_type) {
- case VARIABLE: {
- Variable* var = expr->AsVariableProxy()->var();
- EffectContext context(this);
- EmitVariableAssignment(var, Token::ASSIGN);
- break;
- }
- case NAMED_PROPERTY: {
- __ push(r0); // Preserve value.
- VisitForAccumulatorValue(prop->obj());
- __ mov(r1, r0);
- __ pop(r0); // Restore value.
- __ mov(r2, Operand(prop->key()->AsLiteral()->handle()));
- Handle<Code> ic = is_strict_mode()
- ? isolate()->builtins()->StoreIC_Initialize_Strict()
- : isolate()->builtins()->StoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
- break;
- }
- case KEYED_PROPERTY: {
- __ push(r0); // Preserve value.
- if (prop->is_synthetic()) {
- ASSERT(prop->obj()->AsVariableProxy() != NULL);
- ASSERT(prop->key()->AsLiteral() != NULL);
- { AccumulatorValueContext for_object(this);
- EmitVariableLoad(prop->obj()->AsVariableProxy()->var());
- }
- __ mov(r2, r0);
- __ mov(r1, Operand(prop->key()->AsLiteral()->handle()));
- } else {
- VisitForStackValue(prop->obj());
- VisitForAccumulatorValue(prop->key());
- __ mov(r1, r0);
- __ pop(r2);
- }
- __ pop(r0); // Restore value.
- Handle<Code> ic = is_strict_mode()
- ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
- : isolate()->builtins()->KeyedStoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
- break;
- }
- }
- PrepareForBailoutForId(bailout_ast_id, TOS_REG);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitVariableAssignment(Variable* var,
- Token::Value op) {
- // Left-hand sides that rewrite to explicit property accesses do not reach
- // here.
- ASSERT(var != NULL);
- ASSERT(var->is_global() || var->AsSlot() != NULL);
-
- if (var->is_global()) {
- ASSERT(!var->is_this());
- // Assignment to a global variable. Use inline caching for the
- // assignment. Right-hand-side value is passed in r0, variable name in
- // r2, and the global object in r1.
- __ mov(r2, Operand(var->name()));
- __ ldr(r1, GlobalObjectOperand());
- Handle<Code> ic = is_strict_mode()
- ? isolate()->builtins()->StoreIC_Initialize_Strict()
- : isolate()->builtins()->StoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
-
- } else if (op == Token::INIT_CONST) {
- // Like var declarations, const declarations are hoisted to function
- // scope. However, unlike var initializers, const initializers are able
- // to drill a hole to that function context, even from inside a 'with'
- // context. We thus bypass the normal static scope lookup.
- Slot* slot = var->AsSlot();
- Label skip;
- switch (slot->type()) {
- case Slot::PARAMETER:
- // No const parameters.
- UNREACHABLE();
- break;
- case Slot::LOCAL:
- // Detect const reinitialization by checking for the hole value.
- __ ldr(r1, MemOperand(fp, SlotOffset(slot)));
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(r1, ip);
- __ b(ne, &skip);
- __ str(result_register(), MemOperand(fp, SlotOffset(slot)));
- break;
- case Slot::CONTEXT: {
- __ ldr(r1, ContextOperand(cp, Context::FCONTEXT_INDEX));
- __ ldr(r2, ContextOperand(r1, slot->index()));
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(r2, ip);
- __ b(ne, &skip);
- __ str(r0, ContextOperand(r1, slot->index()));
- int offset = Context::SlotOffset(slot->index());
- __ mov(r3, r0); // Preserve the stored value in r0.
- __ RecordWrite(r1, Operand(offset), r3, r2);
- break;
- }
- case Slot::LOOKUP:
- __ push(r0);
- __ mov(r0, Operand(slot->var()->name()));
- __ Push(cp, r0); // Context and name.
- __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
- break;
- }
- __ bind(&skip);
-
- } else if (var->mode() != Variable::CONST) {
- // Perform the assignment for non-const variables. Const assignments
- // are simply skipped.
- Slot* slot = var->AsSlot();
- switch (slot->type()) {
- case Slot::PARAMETER:
- case Slot::LOCAL:
- // Perform the assignment.
- __ str(result_register(), MemOperand(fp, SlotOffset(slot)));
- break;
-
- case Slot::CONTEXT: {
- MemOperand target = EmitSlotSearch(slot, r1);
- // Perform the assignment and issue the write barrier.
- __ str(result_register(), target);
- // RecordWrite may destroy all its register arguments.
- __ mov(r3, result_register());
- int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
- __ RecordWrite(r1, Operand(offset), r2, r3);
- break;
- }
-
- case Slot::LOOKUP:
- // Call the runtime for the assignment.
- __ push(r0); // Value.
- __ mov(r1, Operand(slot->var()->name()));
- __ mov(r0, Operand(Smi::FromInt(strict_mode_flag())));
- __ Push(cp, r1, r0); // Context, name, strict mode.
- __ CallRuntime(Runtime::kStoreContextSlot, 4);
- break;
- }
- }
-}
-
-
-void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
- // Assignment to a property, using a named store IC.
- Property* prop = expr->target()->AsProperty();
- ASSERT(prop != NULL);
- ASSERT(prop->key()->AsLiteral() != NULL);
-
- // If the assignment starts a block of assignments to the same object,
- // change to slow case to avoid the quadratic behavior of repeatedly
- // adding fast properties.
- if (expr->starts_initialization_block()) {
- __ push(result_register());
- __ ldr(ip, MemOperand(sp, kPointerSize)); // Receiver is now under value.
- __ push(ip);
- __ CallRuntime(Runtime::kToSlowProperties, 1);
- __ pop(result_register());
- }
-
- // Record source code position before IC call.
- SetSourcePosition(expr->position());
- __ mov(r2, Operand(prop->key()->AsLiteral()->handle()));
- // Load receiver to r1. Leave a copy in the stack if needed for turning the
- // receiver into fast case.
- if (expr->ends_initialization_block()) {
- __ ldr(r1, MemOperand(sp));
- } else {
- __ pop(r1);
- }
-
- Handle<Code> ic = is_strict_mode()
- ? isolate()->builtins()->StoreIC_Initialize_Strict()
- : isolate()->builtins()->StoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
-
- // If the assignment ends an initialization block, revert to fast case.
- if (expr->ends_initialization_block()) {
- __ push(r0); // Result of assignment, saved even if not needed.
- // Receiver is under the result value.
- __ ldr(ip, MemOperand(sp, kPointerSize));
- __ push(ip);
- __ CallRuntime(Runtime::kToFastProperties, 1);
- __ pop(r0);
- __ Drop(1);
- }
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
- // Assignment to a property, using a keyed store IC.
-
- // If the assignment starts a block of assignments to the same object,
- // change to slow case to avoid the quadratic behavior of repeatedly
- // adding fast properties.
- if (expr->starts_initialization_block()) {
- __ push(result_register());
- // Receiver is now under the key and value.
- __ ldr(ip, MemOperand(sp, 2 * kPointerSize));
- __ push(ip);
- __ CallRuntime(Runtime::kToSlowProperties, 1);
- __ pop(result_register());
- }
-
- // Record source code position before IC call.
- SetSourcePosition(expr->position());
- __ pop(r1); // Key.
- // Load receiver to r2. Leave a copy in the stack if needed for turning the
- // receiver into fast case.
- if (expr->ends_initialization_block()) {
- __ ldr(r2, MemOperand(sp));
- } else {
- __ pop(r2);
- }
-
- Handle<Code> ic = is_strict_mode()
- ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
- : isolate()->builtins()->KeyedStoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
-
- // If the assignment ends an initialization block, revert to fast case.
- if (expr->ends_initialization_block()) {
- __ push(r0); // Result of assignment, saved even if not needed.
- // Receiver is under the result value.
- __ ldr(ip, MemOperand(sp, kPointerSize));
- __ push(ip);
- __ CallRuntime(Runtime::kToFastProperties, 1);
- __ pop(r0);
- __ Drop(1);
- }
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::VisitProperty(Property* expr) {
- Comment cmnt(masm_, "[ Property");
- Expression* key = expr->key();
-
- if (key->IsPropertyName()) {
- VisitForAccumulatorValue(expr->obj());
- EmitNamedPropertyLoad(expr);
- context()->Plug(r0);
- } else {
- VisitForStackValue(expr->obj());
- VisitForAccumulatorValue(expr->key());
- __ pop(r1);
- EmitKeyedPropertyLoad(expr);
- context()->Plug(r0);
- }
-}
-
-void FullCodeGenerator::EmitCallWithIC(Call* expr,
- Handle<Object> name,
- RelocInfo::Mode mode) {
- // Code common for calls using the IC.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- { PreservePositionScope scope(masm()->positions_recorder());
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
- __ mov(r2, Operand(name));
- }
- // Record source position for debugger.
- SetSourcePosition(expr->position());
- // Call the IC initialization code.
- InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeCallInitialize(arg_count, in_loop);
- EmitCallIC(ic, mode);
- RecordJSReturnSite(expr);
- // Restore context register.
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
- Expression* key,
- RelocInfo::Mode mode) {
- // Load the key.
- VisitForAccumulatorValue(key);
-
- // Swap the name of the function and the receiver on the stack to follow
- // the calling convention for call ICs.
- __ pop(r1);
- __ push(r0);
- __ push(r1);
-
- // Code common for calls using the IC.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- { PreservePositionScope scope(masm()->positions_recorder());
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
- }
- // Record source position for debugger.
- SetSourcePosition(expr->position());
- // Call the IC initialization code.
- InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeKeyedCallInitialize(arg_count, in_loop);
- __ ldr(r2, MemOperand(sp, (arg_count + 1) * kPointerSize)); // Key.
- EmitCallIC(ic, mode);
- RecordJSReturnSite(expr);
- // Restore context register.
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- context()->DropAndPlug(1, r0); // Drop the key still on the stack.
-}
-
-
-void FullCodeGenerator::EmitCallWithStub(Call* expr) {
- // Code common for calls using the call stub.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- { PreservePositionScope scope(masm()->positions_recorder());
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
- }
- // Record source position for debugger.
- SetSourcePosition(expr->position());
- InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
- CallFunctionStub stub(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
- __ CallStub(&stub);
- RecordJSReturnSite(expr);
- // Restore context register.
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- context()->DropAndPlug(1, r0);
-}
-
-
-void FullCodeGenerator::EmitResolvePossiblyDirectEval(ResolveEvalFlag flag,
- int arg_count) {
- // Push copy of the first argument or undefined if it doesn't exist.
- if (arg_count > 0) {
- __ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
- } else {
- __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
- }
- __ push(r1);
-
- // Push the receiver of the enclosing function and do runtime call.
- __ ldr(r1, MemOperand(fp, (2 + scope()->num_parameters()) * kPointerSize));
- __ push(r1);
- // Push the strict mode flag.
- __ mov(r1, Operand(Smi::FromInt(strict_mode_flag())));
- __ push(r1);
-
- __ CallRuntime(flag == SKIP_CONTEXT_LOOKUP
- ? Runtime::kResolvePossiblyDirectEvalNoLookup
- : Runtime::kResolvePossiblyDirectEval, 4);
-}
-
-
-void FullCodeGenerator::VisitCall(Call* expr) {
-#ifdef DEBUG
- // We want to verify that RecordJSReturnSite gets called on all paths
- // through this function. Avoid early returns.
- expr->return_is_recorded_ = false;
-#endif
-
- Comment cmnt(masm_, "[ Call");
- Expression* fun = expr->expression();
- Variable* var = fun->AsVariableProxy()->AsVariable();
-
- if (var != NULL && var->is_possibly_eval()) {
- // In a call to eval, we first call %ResolvePossiblyDirectEval to
- // resolve the function we need to call and the receiver of the
- // call. Then we call the resolved function using the given
- // arguments.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
-
- { PreservePositionScope pos_scope(masm()->positions_recorder());
- VisitForStackValue(fun);
- __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
- __ push(r2); // Reserved receiver slot.
-
- // Push the arguments.
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
-
- // If we know that eval can only be shadowed by eval-introduced
- // variables we attempt to load the global eval function directly
- // in generated code. If we succeed, there is no need to perform a
- // context lookup in the runtime system.
- Label done;
- if (var->AsSlot() != NULL && var->mode() == Variable::DYNAMIC_GLOBAL) {
- Label slow;
- EmitLoadGlobalSlotCheckExtensions(var->AsSlot(),
- NOT_INSIDE_TYPEOF,
- &slow);
- // Push the function and resolve eval.
- __ push(r0);
- EmitResolvePossiblyDirectEval(SKIP_CONTEXT_LOOKUP, arg_count);
- __ jmp(&done);
- __ bind(&slow);
- }
-
- // Push copy of the function (found below the arguments) and
- // resolve eval.
- __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
- __ push(r1);
- EmitResolvePossiblyDirectEval(PERFORM_CONTEXT_LOOKUP, arg_count);
- if (done.is_linked()) {
- __ bind(&done);
- }
-
- // The runtime call returns a pair of values in r0 (function) and
- // r1 (receiver). Touch up the stack with the right values.
- __ str(r0, MemOperand(sp, (arg_count + 1) * kPointerSize));
- __ str(r1, MemOperand(sp, arg_count * kPointerSize));
- }
-
- // Record source position for debugger.
- SetSourcePosition(expr->position());
- InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
- CallFunctionStub stub(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
- __ CallStub(&stub);
- RecordJSReturnSite(expr);
- // Restore context register.
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- context()->DropAndPlug(1, r0);
- } else if (var != NULL && !var->is_this() && var->is_global()) {
- // Push global object as receiver for the call IC.
- __ ldr(r0, GlobalObjectOperand());
- __ push(r0);
- EmitCallWithIC(expr, var->name(), RelocInfo::CODE_TARGET_CONTEXT);
- } else if (var != NULL && var->AsSlot() != NULL &&
- var->AsSlot()->type() == Slot::LOOKUP) {
- // Call to a lookup slot (dynamically introduced variable).
- Label slow, done;
-
- { PreservePositionScope scope(masm()->positions_recorder());
- // Generate code for loading from variables potentially shadowed
- // by eval-introduced variables.
- EmitDynamicLoadFromSlotFastCase(var->AsSlot(),
- NOT_INSIDE_TYPEOF,
- &slow,
- &done);
- }
-
- __ bind(&slow);
- // Call the runtime to find the function to call (returned in r0)
- // and the object holding it (returned in edx).
- __ push(context_register());
- __ mov(r2, Operand(var->name()));
- __ push(r2);
- __ CallRuntime(Runtime::kLoadContextSlot, 2);
- __ Push(r0, r1); // Function, receiver.
-
- // If fast case code has been generated, emit code to push the
- // function and receiver and have the slow path jump around this
- // code.
- if (done.is_linked()) {
- Label call;
- __ b(&call);
- __ bind(&done);
- // Push function.
- __ push(r0);
- // Push global receiver.
- __ ldr(r1, GlobalObjectOperand());
- __ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset));
- __ push(r1);
- __ bind(&call);
- }
-
- EmitCallWithStub(expr);
- } else if (fun->AsProperty() != NULL) {
- // Call to an object property.
- Property* prop = fun->AsProperty();
- Literal* key = prop->key()->AsLiteral();
- if (key != NULL && key->handle()->IsSymbol()) {
- // Call to a named property, use call IC.
- { PreservePositionScope scope(masm()->positions_recorder());
- VisitForStackValue(prop->obj());
- }
- EmitCallWithIC(expr, key->handle(), RelocInfo::CODE_TARGET);
- } else {
- // Call to a keyed property.
- // For a synthetic property use keyed load IC followed by function call,
- // for a regular property use keyed CallIC.
- if (prop->is_synthetic()) {
- // Do not visit the object and key subexpressions (they are shared
- // by all occurrences of the same rewritten parameter).
- ASSERT(prop->obj()->AsVariableProxy() != NULL);
- ASSERT(prop->obj()->AsVariableProxy()->var()->AsSlot() != NULL);
- Slot* slot = prop->obj()->AsVariableProxy()->var()->AsSlot();
- MemOperand operand = EmitSlotSearch(slot, r1);
- __ ldr(r1, operand);
-
- ASSERT(prop->key()->AsLiteral() != NULL);
- ASSERT(prop->key()->AsLiteral()->handle()->IsSmi());
- __ mov(r0, Operand(prop->key()->AsLiteral()->handle()));
-
- // Record source code position for IC call.
- SetSourcePosition(prop->position());
-
- Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
- __ ldr(r1, GlobalObjectOperand());
- __ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset));
- __ Push(r0, r1); // Function, receiver.
- EmitCallWithStub(expr);
- } else {
- { PreservePositionScope scope(masm()->positions_recorder());
- VisitForStackValue(prop->obj());
- }
- EmitKeyedCallWithIC(expr, prop->key(), RelocInfo::CODE_TARGET);
- }
- }
- } else {
- { PreservePositionScope scope(masm()->positions_recorder());
- VisitForStackValue(fun);
- }
- // Load global receiver object.
- __ ldr(r1, GlobalObjectOperand());
- __ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset));
- __ push(r1);
- // Emit function call.
- EmitCallWithStub(expr);
- }
-
-#ifdef DEBUG
- // RecordJSReturnSite should have been called.
- ASSERT(expr->return_is_recorded_);
-#endif
-}
-
-
-void FullCodeGenerator::VisitCallNew(CallNew* expr) {
- Comment cmnt(masm_, "[ CallNew");
- // According to ECMA-262, section 11.2.2, page 44, the function
- // expression in new calls must be evaluated before the
- // arguments.
-
- // Push constructor on the stack. If it's not a function it's used as
- // receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
- // ignored.
- VisitForStackValue(expr->expression());
-
- // Push the arguments ("left-to-right") on the stack.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
-
- // Call the construct call builtin that handles allocation and
- // constructor invocation.
- SetSourcePosition(expr->position());
-
- // Load function and argument count into r1 and r0.
- __ mov(r0, Operand(arg_count));
- __ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
-
- Handle<Code> construct_builtin =
- isolate()->builtins()->JSConstructCall();
- __ Call(construct_builtin, RelocInfo::CONSTRUCT_CALL);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitIsSmi(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
- __ tst(r0, Operand(kSmiTagMask));
- Split(eq, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsNonNegativeSmi(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
- __ tst(r0, Operand(kSmiTagMask | 0x80000000));
- Split(eq, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(r0, if_false);
- __ LoadRoot(ip, Heap::kNullValueRootIndex);
- __ cmp(r0, ip);
- __ b(eq, if_true);
- __ ldr(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
- // Undetectable objects behave like undefined when tested with typeof.
- __ ldrb(r1, FieldMemOperand(r2, Map::kBitFieldOffset));
- __ tst(r1, Operand(1 << Map::kIsUndetectable));
- __ b(ne, if_false);
- __ ldrb(r1, FieldMemOperand(r2, Map::kInstanceTypeOffset));
- __ cmp(r1, Operand(FIRST_JS_OBJECT_TYPE));
- __ b(lt, if_false);
- __ cmp(r1, Operand(LAST_JS_OBJECT_TYPE));
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
- Split(le, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsSpecObject(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(r0, if_false);
- __ CompareObjectType(r0, r1, r1, FIRST_JS_OBJECT_TYPE);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
- Split(ge, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(r0, if_false);
- __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ ldrb(r1, FieldMemOperand(r1, Map::kBitFieldOffset));
- __ tst(r1, Operand(1 << Map::kIsUndetectable));
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
- Split(ne, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
- ZoneList<Expression*>* args) {
-
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- if (FLAG_debug_code) __ AbortIfSmi(r0);
-
- __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ ldrb(ip, FieldMemOperand(r1, Map::kBitField2Offset));
- __ tst(ip, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
- __ b(ne, if_true);
-
- // Check for fast case object. Generate false result for slow case object.
- __ ldr(r2, FieldMemOperand(r0, JSObject::kPropertiesOffset));
- __ ldr(r2, FieldMemOperand(r2, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
- __ cmp(r2, ip);
- __ b(eq, if_false);
-
- // Look for valueOf symbol in the descriptor array, and indicate false if
- // found. The type is not checked, so if it is a transition it is a false
- // negative.
- __ ldr(r4, FieldMemOperand(r1, Map::kInstanceDescriptorsOffset));
- __ ldr(r3, FieldMemOperand(r4, FixedArray::kLengthOffset));
- // r4: descriptor array
- // r3: length of descriptor array
- // Calculate the end of the descriptor array.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kPointerSize == 4);
- __ add(r2, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ add(r2, r2, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
-
- // Calculate location of the first key name.
- __ add(r4,
- r4,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag +
- DescriptorArray::kFirstIndex * kPointerSize));
- // Loop through all the keys in the descriptor array. If one of these is the
- // symbol valueOf the result is false.
- Label entry, loop;
- // The use of ip to store the valueOf symbol asumes that it is not otherwise
- // used in the loop below.
- __ mov(ip, Operand(FACTORY->value_of_symbol()));
- __ jmp(&entry);
- __ bind(&loop);
- __ ldr(r3, MemOperand(r4, 0));
- __ cmp(r3, ip);
- __ b(eq, if_false);
- __ add(r4, r4, Operand(kPointerSize));
- __ bind(&entry);
- __ cmp(r4, Operand(r2));
- __ b(ne, &loop);
-
- // If a valueOf property is not found on the object check that it's
- // prototype is the un-modified String prototype. If not result is false.
- __ ldr(r2, FieldMemOperand(r1, Map::kPrototypeOffset));
- __ tst(r2, Operand(kSmiTagMask));
- __ b(eq, if_false);
- __ ldr(r2, FieldMemOperand(r2, HeapObject::kMapOffset));
- __ ldr(r3, ContextOperand(cp, Context::GLOBAL_INDEX));
- __ ldr(r3, FieldMemOperand(r3, GlobalObject::kGlobalContextOffset));
- __ ldr(r3, ContextOperand(r3, Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
- __ cmp(r2, r3);
- __ b(ne, if_false);
-
- // Set the bit in the map to indicate that it has been checked safe for
- // default valueOf and set true result.
- __ ldrb(r2, FieldMemOperand(r4, Map::kBitField2Offset));
- __ orr(r2, r2, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
- __ strb(r2, FieldMemOperand(r4, Map::kBitField2Offset));
- __ jmp(if_true);
-
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsFunction(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(r0, if_false);
- __ CompareObjectType(r0, r1, r1, JS_FUNCTION_TYPE);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
- Split(eq, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsArray(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(r0, if_false);
- __ CompareObjectType(r0, r1, r1, JS_ARRAY_TYPE);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
- Split(eq, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsRegExp(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(r0, if_false);
- __ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
- Split(eq, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-
-void FullCodeGenerator::EmitIsConstructCall(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 0);
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- // Get the frame pointer for the calling frame.
- __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-
- // Skip the arguments adaptor frame if it exists.
- Label check_frame_marker;
- __ ldr(r1, MemOperand(r2, StandardFrameConstants::kContextOffset));
- __ cmp(r1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ b(ne, &check_frame_marker);
- __ ldr(r2, MemOperand(r2, StandardFrameConstants::kCallerFPOffset));
-
- // Check the marker in the calling frame.
- __ bind(&check_frame_marker);
- __ ldr(r1, MemOperand(r2, StandardFrameConstants::kMarkerOffset));
- __ cmp(r1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
- Split(eq, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitObjectEquals(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 2);
-
- // Load the two objects into registers and perform the comparison.
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ pop(r1);
- __ cmp(r0, r1);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
- Split(eq, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitArguments(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
-
- // ArgumentsAccessStub expects the key in edx and the formal
- // parameter count in r0.
- VisitForAccumulatorValue(args->at(0));
- __ mov(r1, r0);
- __ mov(r0, Operand(Smi::FromInt(scope()->num_parameters())));
- ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
- __ CallStub(&stub);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitArgumentsLength(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 0);
-
- Label exit;
- // Get the number of formal parameters.
- __ mov(r0, Operand(Smi::FromInt(scope()->num_parameters())));
-
- // Check if the calling frame is an arguments adaptor frame.
- __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
- __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ b(ne, &exit);
-
- // Arguments adaptor case: Read the arguments length from the
- // adaptor frame.
- __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
-
- __ bind(&exit);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Label done, null, function, non_function_constructor;
-
- VisitForAccumulatorValue(args->at(0));
-
- // If the object is a smi, we return null.
- __ JumpIfSmi(r0, &null);
-
- // Check that the object is a JS object but take special care of JS
- // functions to make sure they have 'Function' as their class.
- __ CompareObjectType(r0, r0, r1, FIRST_JS_OBJECT_TYPE); // Map is now in r0.
- __ b(lt, &null);
-
- // As long as JS_FUNCTION_TYPE is the last instance type and it is
- // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
- // LAST_JS_OBJECT_TYPE.
- ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
- ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
- __ cmp(r1, Operand(JS_FUNCTION_TYPE));
- __ b(eq, &function);
-
- // Check if the constructor in the map is a function.
- __ ldr(r0, FieldMemOperand(r0, Map::kConstructorOffset));
- __ CompareObjectType(r0, r1, r1, JS_FUNCTION_TYPE);
- __ b(ne, &non_function_constructor);
-
- // r0 now contains the constructor function. Grab the
- // instance class name from there.
- __ ldr(r0, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(r0, FieldMemOperand(r0, SharedFunctionInfo::kInstanceClassNameOffset));
- __ b(&done);
-
- // Functions have class 'Function'.
- __ bind(&function);
- __ LoadRoot(r0, Heap::kfunction_class_symbolRootIndex);
- __ jmp(&done);
-
- // Objects with a non-function constructor have class 'Object'.
- __ bind(&non_function_constructor);
- __ LoadRoot(r0, Heap::kfunction_class_symbolRootIndex);
- __ jmp(&done);
-
- // Non-JS objects have class null.
- __ bind(&null);
- __ LoadRoot(r0, Heap::kNullValueRootIndex);
-
- // All done.
- __ bind(&done);
-
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitLog(ZoneList<Expression*>* args) {
- // Conditionally generate a log call.
- // Args:
- // 0 (literal string): The type of logging (corresponds to the flags).
- // This is used to determine whether or not to generate the log call.
- // 1 (string): Format string. Access the string at argument index 2
- // with '%2s' (see Logger::LogRuntime for all the formats).
- // 2 (array): Arguments to the format string.
- ASSERT_EQ(args->length(), 3);
-#ifdef ENABLE_LOGGING_AND_PROFILING
- if (CodeGenerator::ShouldGenerateLog(args->at(0))) {
- VisitForStackValue(args->at(1));
- VisitForStackValue(args->at(2));
- __ CallRuntime(Runtime::kLog, 2);
- }
-#endif
- // Finally, we're expected to leave a value on the top of the stack.
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 0);
-
- Label slow_allocate_heapnumber;
- Label heapnumber_allocated;
-
- __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r4, r1, r2, r6, &slow_allocate_heapnumber);
- __ jmp(&heapnumber_allocated);
-
- __ bind(&slow_allocate_heapnumber);
- // Allocate a heap number.
- __ CallRuntime(Runtime::kNumberAlloc, 0);
- __ mov(r4, Operand(r0));
-
- __ bind(&heapnumber_allocated);
-
- // Convert 32 random bits in r0 to 0.(32 random bits) in a double
- // by computing:
- // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
- if (CpuFeatures::IsSupported(VFP3)) {
- __ PrepareCallCFunction(1, r0);
- __ mov(r0, Operand(ExternalReference::isolate_address()));
- __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
-
- CpuFeatures::Scope scope(VFP3);
- // 0x41300000 is the top half of 1.0 x 2^20 as a double.
- // Create this constant using mov/orr to avoid PC relative load.
- __ mov(r1, Operand(0x41000000));
- __ orr(r1, r1, Operand(0x300000));
- // Move 0x41300000xxxxxxxx (x = random bits) to VFP.
- __ vmov(d7, r0, r1);
- // Move 0x4130000000000000 to VFP.
- __ mov(r0, Operand(0, RelocInfo::NONE));
- __ vmov(d8, r0, r1);
- // Subtract and store the result in the heap number.
- __ vsub(d7, d7, d8);
- __ sub(r0, r4, Operand(kHeapObjectTag));
- __ vstr(d7, r0, HeapNumber::kValueOffset);
- __ mov(r0, r4);
- } else {
- __ PrepareCallCFunction(2, r0);
- __ mov(r0, Operand(r4));
- __ mov(r1, Operand(ExternalReference::isolate_address()));
- __ CallCFunction(
- ExternalReference::fill_heap_number_with_random_function(isolate()), 2);
- }
-
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitSubString(ZoneList<Expression*>* args) {
- // Load the arguments on the stack and call the stub.
- SubStringStub stub;
- ASSERT(args->length() == 3);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
- VisitForStackValue(args->at(2));
- __ CallStub(&stub);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitRegExpExec(ZoneList<Expression*>* args) {
- // Load the arguments on the stack and call the stub.
- RegExpExecStub stub;
- ASSERT(args->length() == 4);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
- VisitForStackValue(args->at(2));
- VisitForStackValue(args->at(3));
- __ CallStub(&stub);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitValueOf(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0)); // Load the object.
-
- Label done;
- // If the object is a smi return the object.
- __ JumpIfSmi(r0, &done);
- // If the object is not a value type, return the object.
- __ CompareObjectType(r0, r1, r1, JS_VALUE_TYPE);
- __ b(ne, &done);
- __ ldr(r0, FieldMemOperand(r0, JSValue::kValueOffset));
-
- __ bind(&done);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitMathPow(ZoneList<Expression*>* args) {
- // Load the arguments on the stack and call the runtime function.
- ASSERT(args->length() == 2);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
- MathPowStub stub;
- __ CallStub(&stub);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitSetValueOf(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 2);
-
- VisitForStackValue(args->at(0)); // Load the object.
- VisitForAccumulatorValue(args->at(1)); // Load the value.
- __ pop(r1); // r0 = value. r1 = object.
-
- Label done;
- // If the object is a smi, return the value.
- __ JumpIfSmi(r1, &done);
-
- // If the object is not a value type, return the value.
- __ CompareObjectType(r1, r2, r2, JS_VALUE_TYPE);
- __ b(ne, &done);
-
- // Store the value.
- __ str(r0, FieldMemOperand(r1, JSValue::kValueOffset));
- // Update the write barrier. Save the value as it will be
- // overwritten by the write barrier code and is needed afterward.
- __ RecordWrite(r1, Operand(JSValue::kValueOffset - kHeapObjectTag), r2, r3);
-
- __ bind(&done);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitNumberToString(ZoneList<Expression*>* args) {
- ASSERT_EQ(args->length(), 1);
-
- // Load the argument on the stack and call the stub.
- VisitForStackValue(args->at(0));
-
- NumberToStringStub stub;
- __ CallStub(&stub);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitStringCharFromCode(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label done;
- StringCharFromCodeGenerator generator(r0, r1);
- generator.GenerateFast(masm_);
- __ jmp(&done);
-
- NopRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm_, call_helper);
-
- __ bind(&done);
- context()->Plug(r1);
-}
-
-
-void FullCodeGenerator::EmitStringCharCodeAt(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 2);
-
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
-
- Register object = r1;
- Register index = r0;
- Register scratch = r2;
- Register result = r3;
-
- __ pop(object);
-
- Label need_conversion;
- Label index_out_of_range;
- Label done;
- StringCharCodeAtGenerator generator(object,
- index,
- scratch,
- result,
- &need_conversion,
- &need_conversion,
- &index_out_of_range,
- STRING_INDEX_IS_NUMBER);
- generator.GenerateFast(masm_);
- __ jmp(&done);
-
- __ bind(&index_out_of_range);
- // When the index is out of range, the spec requires us to return
- // NaN.
- __ LoadRoot(result, Heap::kNanValueRootIndex);
- __ jmp(&done);
-
- __ bind(&need_conversion);
- // Load the undefined value into the result register, which will
- // trigger conversion.
- __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
- __ jmp(&done);
-
- NopRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm_, call_helper);
-
- __ bind(&done);
- context()->Plug(result);
-}
-
-
-void FullCodeGenerator::EmitStringCharAt(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 2);
-
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
-
- Register object = r1;
- Register index = r0;
- Register scratch1 = r2;
- Register scratch2 = r3;
- Register result = r0;
-
- __ pop(object);
-
- Label need_conversion;
- Label index_out_of_range;
- Label done;
- StringCharAtGenerator generator(object,
- index,
- scratch1,
- scratch2,
- result,
- &need_conversion,
- &need_conversion,
- &index_out_of_range,
- STRING_INDEX_IS_NUMBER);
- generator.GenerateFast(masm_);
- __ jmp(&done);
-
- __ bind(&index_out_of_range);
- // When the index is out of range, the spec requires us to return
- // the empty string.
- __ LoadRoot(result, Heap::kEmptyStringRootIndex);
- __ jmp(&done);
-
- __ bind(&need_conversion);
- // Move smi zero into the result register, which will trigger
- // conversion.
- __ mov(result, Operand(Smi::FromInt(0)));
- __ jmp(&done);
-
- NopRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm_, call_helper);
-
- __ bind(&done);
- context()->Plug(result);
-}
-
-
-void FullCodeGenerator::EmitStringAdd(ZoneList<Expression*>* args) {
- ASSERT_EQ(2, args->length());
-
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
-
- StringAddStub stub(NO_STRING_ADD_FLAGS);
- __ CallStub(&stub);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitStringCompare(ZoneList<Expression*>* args) {
- ASSERT_EQ(2, args->length());
-
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
-
- StringCompareStub stub;
- __ CallStub(&stub);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitMathSin(ZoneList<Expression*>* args) {
- // Load the argument on the stack and call the stub.
- TranscendentalCacheStub stub(TranscendentalCache::SIN,
- TranscendentalCacheStub::TAGGED);
- ASSERT(args->length() == 1);
- VisitForStackValue(args->at(0));
- __ CallStub(&stub);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitMathCos(ZoneList<Expression*>* args) {
- // Load the argument on the stack and call the stub.
- TranscendentalCacheStub stub(TranscendentalCache::COS,
- TranscendentalCacheStub::TAGGED);
- ASSERT(args->length() == 1);
- VisitForStackValue(args->at(0));
- __ CallStub(&stub);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitMathLog(ZoneList<Expression*>* args) {
- // Load the argument on the stack and call the stub.
- TranscendentalCacheStub stub(TranscendentalCache::LOG,
- TranscendentalCacheStub::TAGGED);
- ASSERT(args->length() == 1);
- VisitForStackValue(args->at(0));
- __ CallStub(&stub);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitMathSqrt(ZoneList<Expression*>* args) {
- // Load the argument on the stack and call the runtime function.
- ASSERT(args->length() == 1);
- VisitForStackValue(args->at(0));
- __ CallRuntime(Runtime::kMath_sqrt, 1);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitCallFunction(ZoneList<Expression*>* args) {
- ASSERT(args->length() >= 2);
-
- int arg_count = args->length() - 2; // For receiver and function.
- VisitForStackValue(args->at(0)); // Receiver.
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i + 1));
- }
- VisitForAccumulatorValue(args->at(arg_count + 1)); // Function.
-
- // InvokeFunction requires function in r1. Move it in there.
- if (!result_register().is(r1)) __ mov(r1, result_register());
- ParameterCount count(arg_count);
- __ InvokeFunction(r1, count, CALL_FUNCTION);
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitRegExpConstructResult(ZoneList<Expression*>* args) {
- RegExpConstructResultStub stub;
- ASSERT(args->length() == 3);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
- VisitForStackValue(args->at(2));
- __ CallStub(&stub);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 3);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
- VisitForStackValue(args->at(2));
- Label done;
- Label slow_case;
- Register object = r0;
- Register index1 = r1;
- Register index2 = r2;
- Register elements = r3;
- Register scratch1 = r4;
- Register scratch2 = r5;
-
- __ ldr(object, MemOperand(sp, 2 * kPointerSize));
- // Fetch the map and check if array is in fast case.
- // Check that object doesn't require security checks and
- // has no indexed interceptor.
- __ CompareObjectType(object, scratch1, scratch2, JS_ARRAY_TYPE);
- __ b(ne, &slow_case);
- // Map is now in scratch1.
-
- __ ldrb(scratch2, FieldMemOperand(scratch1, Map::kBitFieldOffset));
- __ tst(scratch2, Operand(KeyedLoadIC::kSlowCaseBitFieldMask));
- __ b(ne, &slow_case);
-
- // Check the object's elements are in fast case and writable.
- __ ldr(elements, FieldMemOperand(object, JSObject::kElementsOffset));
- __ ldr(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
- __ cmp(scratch1, ip);
- __ b(ne, &slow_case);
-
- // Check that both indices are smis.
- __ ldr(index1, MemOperand(sp, 1 * kPointerSize));
- __ ldr(index2, MemOperand(sp, 0));
- __ JumpIfNotBothSmi(index1, index2, &slow_case);
-
- // Check that both indices are valid.
- __ ldr(scratch1, FieldMemOperand(object, JSArray::kLengthOffset));
- __ cmp(scratch1, index1);
- __ cmp(scratch1, index2, hi);
- __ b(ls, &slow_case);
-
- // Bring the address of the elements into index1 and index2.
- __ add(scratch1, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ add(index1,
- scratch1,
- Operand(index1, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ add(index2,
- scratch1,
- Operand(index2, LSL, kPointerSizeLog2 - kSmiTagSize));
-
- // Swap elements.
- __ ldr(scratch1, MemOperand(index1, 0));
- __ ldr(scratch2, MemOperand(index2, 0));
- __ str(scratch1, MemOperand(index2, 0));
- __ str(scratch2, MemOperand(index1, 0));
-
- Label new_space;
- __ InNewSpace(elements, scratch1, eq, &new_space);
- // Possible optimization: do a check that both values are Smis
- // (or them and test against Smi mask.)
-
- __ mov(scratch1, elements);
- __ RecordWriteHelper(elements, index1, scratch2);
- __ RecordWriteHelper(scratch1, index2, scratch2); // scratch1 holds elements.
-
- __ bind(&new_space);
- // We are done. Drop elements from the stack, and return undefined.
- __ Drop(3);
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
- __ jmp(&done);
-
- __ bind(&slow_case);
- __ CallRuntime(Runtime::kSwapElements, 3);
-
- __ bind(&done);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitGetFromCache(ZoneList<Expression*>* args) {
- ASSERT_EQ(2, args->length());
-
- ASSERT_NE(NULL, args->at(0)->AsLiteral());
- int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
-
- Handle<FixedArray> jsfunction_result_caches(
- isolate()->global_context()->jsfunction_result_caches());
- if (jsfunction_result_caches->length() <= cache_id) {
- __ Abort("Attempt to use undefined cache.");
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
- context()->Plug(r0);
- return;
- }
-
- VisitForAccumulatorValue(args->at(1));
-
- Register key = r0;
- Register cache = r1;
- __ ldr(cache, ContextOperand(cp, Context::GLOBAL_INDEX));
- __ ldr(cache, FieldMemOperand(cache, GlobalObject::kGlobalContextOffset));
- __ ldr(cache, ContextOperand(cache, Context::JSFUNCTION_RESULT_CACHES_INDEX));
- __ ldr(cache,
- FieldMemOperand(cache, FixedArray::OffsetOfElementAt(cache_id)));
-
-
- Label done, not_found;
- // tmp now holds finger offset as a smi.
- ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
- __ ldr(r2, FieldMemOperand(cache, JSFunctionResultCache::kFingerOffset));
- // r2 now holds finger offset as a smi.
- __ add(r3, cache, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- // r3 now points to the start of fixed array elements.
- __ ldr(r2, MemOperand(r3, r2, LSL, kPointerSizeLog2 - kSmiTagSize, PreIndex));
- // Note side effect of PreIndex: r3 now points to the key of the pair.
- __ cmp(key, r2);
- __ b(ne, &not_found);
-
- __ ldr(r0, MemOperand(r3, kPointerSize));
- __ b(&done);
-
- __ bind(&not_found);
- // Call runtime to perform the lookup.
- __ Push(cache, key);
- __ CallRuntime(Runtime::kGetFromCache, 2);
-
- __ bind(&done);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitIsRegExpEquivalent(ZoneList<Expression*>* args) {
- ASSERT_EQ(2, args->length());
-
- Register right = r0;
- Register left = r1;
- Register tmp = r2;
- Register tmp2 = r3;
-
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
- __ pop(left);
-
- Label done, fail, ok;
- __ cmp(left, Operand(right));
- __ b(eq, &ok);
- // Fail if either is a non-HeapObject.
- __ and_(tmp, left, Operand(right));
- __ tst(tmp, Operand(kSmiTagMask));
- __ b(eq, &fail);
- __ ldr(tmp, FieldMemOperand(left, HeapObject::kMapOffset));
- __ ldrb(tmp2, FieldMemOperand(tmp, Map::kInstanceTypeOffset));
- __ cmp(tmp2, Operand(JS_REGEXP_TYPE));
- __ b(ne, &fail);
- __ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
- __ cmp(tmp, Operand(tmp2));
- __ b(ne, &fail);
- __ ldr(tmp, FieldMemOperand(left, JSRegExp::kDataOffset));
- __ ldr(tmp2, FieldMemOperand(right, JSRegExp::kDataOffset));
- __ cmp(tmp, tmp2);
- __ b(eq, &ok);
- __ bind(&fail);
- __ LoadRoot(r0, Heap::kFalseValueRootIndex);
- __ jmp(&done);
- __ bind(&ok);
- __ LoadRoot(r0, Heap::kTrueValueRootIndex);
- __ bind(&done);
-
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitHasCachedArrayIndex(ZoneList<Expression*>* args) {
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ ldr(r0, FieldMemOperand(r0, String::kHashFieldOffset));
- __ tst(r0, Operand(String::kContainsCachedArrayIndexMask));
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
- Split(eq, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitGetCachedArrayIndex(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- VisitForAccumulatorValue(args->at(0));
-
- if (FLAG_debug_code) {
- __ AbortIfNotString(r0);
- }
-
- __ ldr(r0, FieldMemOperand(r0, String::kHashFieldOffset));
- __ IndexFromHash(r0, r0);
-
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
- Label bailout, done, one_char_separator, long_separator,
- non_trivial_array, not_size_one_array, loop,
- empty_separator_loop, one_char_separator_loop,
- one_char_separator_loop_entry, long_separator_loop;
-
- ASSERT(args->length() == 2);
- VisitForStackValue(args->at(1));
- VisitForAccumulatorValue(args->at(0));
-
- // All aliases of the same register have disjoint lifetimes.
- Register array = r0;
- Register elements = no_reg; // Will be r0.
- Register result = no_reg; // Will be r0.
- Register separator = r1;
- Register array_length = r2;
- Register result_pos = no_reg; // Will be r2
- Register string_length = r3;
- Register string = r4;
- Register element = r5;
- Register elements_end = r6;
- Register scratch1 = r7;
- Register scratch2 = r9;
-
- // Separator operand is on the stack.
- __ pop(separator);
-
- // Check that the array is a JSArray.
- __ JumpIfSmi(array, &bailout);
- __ CompareObjectType(array, scratch1, scratch2, JS_ARRAY_TYPE);
- __ b(ne, &bailout);
-
- // Check that the array has fast elements.
- __ ldrb(scratch2, FieldMemOperand(scratch1, Map::kBitField2Offset));
- __ tst(scratch2, Operand(1 << Map::kHasFastElements));
- __ b(eq, &bailout);
-
- // If the array has length zero, return the empty string.
- __ ldr(array_length, FieldMemOperand(array, JSArray::kLengthOffset));
- __ SmiUntag(array_length, SetCC);
- __ b(ne, &non_trivial_array);
- __ LoadRoot(r0, Heap::kEmptyStringRootIndex);
- __ b(&done);
-
- __ bind(&non_trivial_array);
-
- // Get the FixedArray containing array's elements.
- elements = array;
- __ ldr(elements, FieldMemOperand(array, JSArray::kElementsOffset));
- array = no_reg; // End of array's live range.
-
- // Check that all array elements are sequential ASCII strings, and
- // accumulate the sum of their lengths, as a smi-encoded value.
- __ mov(string_length, Operand(0));
- __ add(element,
- elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ add(elements_end, element, Operand(array_length, LSL, kPointerSizeLog2));
- // Loop condition: while (element < elements_end).
- // Live values in registers:
- // elements: Fixed array of strings.
- // array_length: Length of the fixed array of strings (not smi)
- // separator: Separator string
- // string_length: Accumulated sum of string lengths (smi).
- // element: Current array element.
- // elements_end: Array end.
- if (FLAG_debug_code) {
- __ cmp(array_length, Operand(0));
- __ Assert(gt, "No empty arrays here in EmitFastAsciiArrayJoin");
- }
- __ bind(&loop);
- __ ldr(string, MemOperand(element, kPointerSize, PostIndex));
- __ JumpIfSmi(string, &bailout);
- __ ldr(scratch1, FieldMemOperand(string, HeapObject::kMapOffset));
- __ ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
- __ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout);
- __ ldr(scratch1, FieldMemOperand(string, SeqAsciiString::kLengthOffset));
- __ add(string_length, string_length, Operand(scratch1));
- __ b(vs, &bailout);
- __ cmp(element, elements_end);
- __ b(lt, &loop);
-
- // If array_length is 1, return elements[0], a string.
- __ cmp(array_length, Operand(1));
- __ b(ne, &not_size_one_array);
- __ ldr(r0, FieldMemOperand(elements, FixedArray::kHeaderSize));
- __ b(&done);
-
- __ bind(&not_size_one_array);
-
- // Live values in registers:
- // separator: Separator string
- // array_length: Length of the array.
- // string_length: Sum of string lengths (smi).
- // elements: FixedArray of strings.
-
- // Check that the separator is a flat ASCII string.
- __ JumpIfSmi(separator, &bailout);
- __ ldr(scratch1, FieldMemOperand(separator, HeapObject::kMapOffset));
- __ ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
- __ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout);
-
- // Add (separator length times array_length) - separator length to the
- // string_length to get the length of the result string. array_length is not
- // smi but the other values are, so the result is a smi
- __ ldr(scratch1, FieldMemOperand(separator, SeqAsciiString::kLengthOffset));
- __ sub(string_length, string_length, Operand(scratch1));
- __ smull(scratch2, ip, array_length, scratch1);
- // Check for smi overflow. No overflow if higher 33 bits of 64-bit result are
- // zero.
- __ cmp(ip, Operand(0));
- __ b(ne, &bailout);
- __ tst(scratch2, Operand(0x80000000));
- __ b(ne, &bailout);
- __ add(string_length, string_length, Operand(scratch2));
- __ b(vs, &bailout);
- __ SmiUntag(string_length);
-
- // Get first element in the array to free up the elements register to be used
- // for the result.
- __ add(element,
- elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- result = elements; // End of live range for elements.
- elements = no_reg;
- // Live values in registers:
- // element: First array element
- // separator: Separator string
- // string_length: Length of result string (not smi)
- // array_length: Length of the array.
- __ AllocateAsciiString(result,
- string_length,
- scratch1,
- scratch2,
- elements_end,
- &bailout);
- // Prepare for looping. Set up elements_end to end of the array. Set
- // result_pos to the position of the result where to write the first
- // character.
- __ add(elements_end, element, Operand(array_length, LSL, kPointerSizeLog2));
- result_pos = array_length; // End of live range for array_length.
- array_length = no_reg;
- __ add(result_pos,
- result,
- Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
-
- // Check the length of the separator.
- __ ldr(scratch1, FieldMemOperand(separator, SeqAsciiString::kLengthOffset));
- __ cmp(scratch1, Operand(Smi::FromInt(1)));
- __ b(eq, &one_char_separator);
- __ b(gt, &long_separator);
-
- // Empty separator case
- __ bind(&empty_separator_loop);
- // Live values in registers:
- // result_pos: the position to which we are currently copying characters.
- // element: Current array element.
- // elements_end: Array end.
-
- // Copy next array element to the result.
- __ ldr(string, MemOperand(element, kPointerSize, PostIndex));
- __ ldr(string_length, FieldMemOperand(string, String::kLengthOffset));
- __ SmiUntag(string_length);
- __ add(string, string, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- __ CopyBytes(string, result_pos, string_length, scratch1);
- __ cmp(element, elements_end);
- __ b(lt, &empty_separator_loop); // End while (element < elements_end).
- ASSERT(result.is(r0));
- __ b(&done);
-
- // One-character separator case
- __ bind(&one_char_separator);
- // Replace separator with its ascii character value.
- __ ldrb(separator, FieldMemOperand(separator, SeqAsciiString::kHeaderSize));
- // Jump into the loop after the code that copies the separator, so the first
- // element is not preceded by a separator
- __ jmp(&one_char_separator_loop_entry);
-
- __ bind(&one_char_separator_loop);
- // Live values in registers:
- // result_pos: the position to which we are currently copying characters.
- // element: Current array element.
- // elements_end: Array end.
- // separator: Single separator ascii char (in lower byte).
-
- // Copy the separator character to the result.
- __ strb(separator, MemOperand(result_pos, 1, PostIndex));
-
- // Copy next array element to the result.
- __ bind(&one_char_separator_loop_entry);
- __ ldr(string, MemOperand(element, kPointerSize, PostIndex));
- __ ldr(string_length, FieldMemOperand(string, String::kLengthOffset));
- __ SmiUntag(string_length);
- __ add(string, string, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- __ CopyBytes(string, result_pos, string_length, scratch1);
- __ cmp(element, elements_end);
- __ b(lt, &one_char_separator_loop); // End while (element < elements_end).
- ASSERT(result.is(r0));
- __ b(&done);
-
- // Long separator case (separator is more than one character). Entry is at the
- // label long_separator below.
- __ bind(&long_separator_loop);
- // Live values in registers:
- // result_pos: the position to which we are currently copying characters.
- // element: Current array element.
- // elements_end: Array end.
- // separator: Separator string.
-
- // Copy the separator to the result.
- __ ldr(string_length, FieldMemOperand(separator, String::kLengthOffset));
- __ SmiUntag(string_length);
- __ add(string,
- separator,
- Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- __ CopyBytes(string, result_pos, string_length, scratch1);
-
- __ bind(&long_separator);
- __ ldr(string, MemOperand(element, kPointerSize, PostIndex));
- __ ldr(string_length, FieldMemOperand(string, String::kLengthOffset));
- __ SmiUntag(string_length);
- __ add(string, string, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- __ CopyBytes(string, result_pos, string_length, scratch1);
- __ cmp(element, elements_end);
- __ b(lt, &long_separator_loop); // End while (element < elements_end).
- ASSERT(result.is(r0));
- __ b(&done);
-
- __ bind(&bailout);
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
- __ bind(&done);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
- Handle<String> name = expr->name();
- if (name->length() > 0 && name->Get(0) == '_') {
- Comment cmnt(masm_, "[ InlineRuntimeCall");
- EmitInlineRuntimeCall(expr);
- return;
- }
-
- Comment cmnt(masm_, "[ CallRuntime");
- ZoneList<Expression*>* args = expr->arguments();
-
- if (expr->is_jsruntime()) {
- // Prepare for calling JS runtime function.
- __ ldr(r0, GlobalObjectOperand());
- __ ldr(r0, FieldMemOperand(r0, GlobalObject::kBuiltinsOffset));
- __ push(r0);
- }
-
- // Push the arguments ("left-to-right").
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
-
- if (expr->is_jsruntime()) {
- // Call the JS runtime function.
- __ mov(r2, Operand(expr->name()));
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeCallInitialize(arg_count, NOT_IN_LOOP);
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
- // Restore context register.
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- } else {
- // Call the C runtime function.
- __ CallRuntime(expr->function(), arg_count);
- }
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
- switch (expr->op()) {
- case Token::DELETE: {
- Comment cmnt(masm_, "[ UnaryOperation (DELETE)");
- Property* prop = expr->expression()->AsProperty();
- Variable* var = expr->expression()->AsVariableProxy()->AsVariable();
-
- if (prop != NULL) {
- if (prop->is_synthetic()) {
- // Result of deleting parameters is false, even when they rewrite
- // to accesses on the arguments object.
- context()->Plug(false);
- } else {
- VisitForStackValue(prop->obj());
- VisitForStackValue(prop->key());
- __ mov(r1, Operand(Smi::FromInt(strict_mode_flag())));
- __ push(r1);
- __ InvokeBuiltin(Builtins::DELETE, CALL_JS);
- context()->Plug(r0);
- }
- } else if (var != NULL) {
- // Delete of an unqualified identifier is disallowed in strict mode
- // but "delete this" is.
- ASSERT(strict_mode_flag() == kNonStrictMode || var->is_this());
- if (var->is_global()) {
- __ ldr(r2, GlobalObjectOperand());
- __ mov(r1, Operand(var->name()));
- __ mov(r0, Operand(Smi::FromInt(kNonStrictMode)));
- __ Push(r2, r1, r0);
- __ InvokeBuiltin(Builtins::DELETE, CALL_JS);
- context()->Plug(r0);
- } else if (var->AsSlot() != NULL &&
- var->AsSlot()->type() != Slot::LOOKUP) {
- // Result of deleting non-global, non-dynamic variables is false.
- // The subexpression does not have side effects.
- context()->Plug(false);
- } else {
- // Non-global variable. Call the runtime to try to delete from the
- // context where the variable was introduced.
- __ push(context_register());
- __ mov(r2, Operand(var->name()));
- __ push(r2);
- __ CallRuntime(Runtime::kDeleteContextSlot, 2);
- context()->Plug(r0);
- }
- } else {
- // Result of deleting non-property, non-variable reference is true.
- // The subexpression may have side effects.
- VisitForEffect(expr->expression());
- context()->Plug(true);
- }
- break;
- }
-
- case Token::VOID: {
- Comment cmnt(masm_, "[ UnaryOperation (VOID)");
- VisitForEffect(expr->expression());
- context()->Plug(Heap::kUndefinedValueRootIndex);
- break;
- }
-
- case Token::NOT: {
- Comment cmnt(masm_, "[ UnaryOperation (NOT)");
- if (context()->IsEffect()) {
- // Unary NOT has no side effects so it's only necessary to visit the
- // subexpression. Match the optimizing compiler by not branching.
- VisitForEffect(expr->expression());
- } else {
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
-
- // Notice that the labels are swapped.
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_false, &if_true, &fall_through);
- if (context()->IsTest()) ForwardBailoutToChild(expr);
- VisitForControl(expr->expression(), if_true, if_false, fall_through);
- context()->Plug(if_false, if_true); // Labels swapped.
- }
- break;
- }
-
- case Token::TYPEOF: {
- Comment cmnt(masm_, "[ UnaryOperation (TYPEOF)");
- { StackValueContext context(this);
- VisitForTypeofValue(expr->expression());
- }
- __ CallRuntime(Runtime::kTypeof, 1);
- context()->Plug(r0);
- break;
- }
-
- case Token::ADD: {
- Comment cmt(masm_, "[ UnaryOperation (ADD)");
- VisitForAccumulatorValue(expr->expression());
- Label no_conversion;
- __ tst(result_register(), Operand(kSmiTagMask));
- __ b(eq, &no_conversion);
- ToNumberStub convert_stub;
- __ CallStub(&convert_stub);
- __ bind(&no_conversion);
- context()->Plug(result_register());
- break;
- }
-
- case Token::SUB: {
- Comment cmt(masm_, "[ UnaryOperation (SUB)");
- bool can_overwrite = expr->expression()->ResultOverwriteAllowed();
- UnaryOverwriteMode overwrite =
- can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
- GenericUnaryOpStub stub(Token::SUB, overwrite, NO_UNARY_FLAGS);
- // GenericUnaryOpStub expects the argument to be in the
- // accumulator register r0.
- VisitForAccumulatorValue(expr->expression());
- __ CallStub(&stub);
- context()->Plug(r0);
- break;
- }
-
- case Token::BIT_NOT: {
- Comment cmt(masm_, "[ UnaryOperation (BIT_NOT)");
- // The generic unary operation stub expects the argument to be
- // in the accumulator register r0.
- VisitForAccumulatorValue(expr->expression());
- Label done;
- bool inline_smi_code = ShouldInlineSmiCase(expr->op());
- if (inline_smi_code) {
- Label call_stub;
- __ JumpIfNotSmi(r0, &call_stub);
- __ mvn(r0, Operand(r0));
- // Bit-clear inverted smi-tag.
- __ bic(r0, r0, Operand(kSmiTagMask));
- __ b(&done);
- __ bind(&call_stub);
- }
- bool overwrite = expr->expression()->ResultOverwriteAllowed();
- UnaryOpFlags flags = inline_smi_code
- ? NO_UNARY_SMI_CODE_IN_STUB
- : NO_UNARY_FLAGS;
- UnaryOverwriteMode mode =
- overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
- GenericUnaryOpStub stub(Token::BIT_NOT, mode, flags);
- __ CallStub(&stub);
- __ bind(&done);
- context()->Plug(r0);
- break;
- }
-
- default:
- UNREACHABLE();
- }
-}
-
-
-void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
- Comment cmnt(masm_, "[ CountOperation");
- SetSourcePosition(expr->position());
-
- // Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
- // as the left-hand side.
- if (!expr->expression()->IsValidLeftHandSide()) {
- VisitForEffect(expr->expression());
- return;
- }
-
- // Expression can only be a property, a global or a (parameter or local)
- // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
- enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
- LhsKind assign_type = VARIABLE;
- Property* prop = expr->expression()->AsProperty();
- // In case of a property we use the uninitialized expression context
- // of the key to detect a named property.
- if (prop != NULL) {
- assign_type =
- (prop->key()->IsPropertyName()) ? NAMED_PROPERTY : KEYED_PROPERTY;
- }
-
- // Evaluate expression and get value.
- if (assign_type == VARIABLE) {
- ASSERT(expr->expression()->AsVariableProxy()->var() != NULL);
- AccumulatorValueContext context(this);
- EmitVariableLoad(expr->expression()->AsVariableProxy()->var());
- } else {
- // Reserve space for result of postfix operation.
- if (expr->is_postfix() && !context()->IsEffect()) {
- __ mov(ip, Operand(Smi::FromInt(0)));
- __ push(ip);
- }
- if (assign_type == NAMED_PROPERTY) {
- // Put the object both on the stack and in the accumulator.
- VisitForAccumulatorValue(prop->obj());
- __ push(r0);
- EmitNamedPropertyLoad(prop);
- } else {
- if (prop->is_arguments_access()) {
- VariableProxy* obj_proxy = prop->obj()->AsVariableProxy();
- __ ldr(r0, EmitSlotSearch(obj_proxy->var()->AsSlot(), r0));
- __ push(r0);
- __ mov(r0, Operand(prop->key()->AsLiteral()->handle()));
- } else {
- VisitForStackValue(prop->obj());
- VisitForAccumulatorValue(prop->key());
- }
- __ ldr(r1, MemOperand(sp, 0));
- __ push(r0);
- EmitKeyedPropertyLoad(prop);
- }
- }
-
- // We need a second deoptimization point after loading the value
- // in case evaluating the property load my have a side effect.
- if (assign_type == VARIABLE) {
- PrepareForBailout(expr->expression(), TOS_REG);
- } else {
- PrepareForBailout(expr->increment(), TOS_REG);
- }
-
- // Call ToNumber only if operand is not a smi.
- Label no_conversion;
- __ JumpIfSmi(r0, &no_conversion);
- ToNumberStub convert_stub;
- __ CallStub(&convert_stub);
- __ bind(&no_conversion);
-
- // Save result for postfix expressions.
- if (expr->is_postfix()) {
- if (!context()->IsEffect()) {
- // Save the result on the stack. If we have a named or keyed property
- // we store the result under the receiver that is currently on top
- // of the stack.
- switch (assign_type) {
- case VARIABLE:
- __ push(r0);
- break;
- case NAMED_PROPERTY:
- __ str(r0, MemOperand(sp, kPointerSize));
- break;
- case KEYED_PROPERTY:
- __ str(r0, MemOperand(sp, 2 * kPointerSize));
- break;
- }
- }
- }
-
-
- // Inline smi case if we are in a loop.
- Label stub_call, done;
- JumpPatchSite patch_site(masm_);
-
- int count_value = expr->op() == Token::INC ? 1 : -1;
- if (ShouldInlineSmiCase(expr->op())) {
- __ add(r0, r0, Operand(Smi::FromInt(count_value)), SetCC);
- __ b(vs, &stub_call);
- // We could eliminate this smi check if we split the code at
- // the first smi check before calling ToNumber.
- patch_site.EmitJumpIfSmi(r0, &done);
-
- __ bind(&stub_call);
- // Call stub. Undo operation first.
- __ sub(r0, r0, Operand(Smi::FromInt(count_value)));
- }
- __ mov(r1, Operand(Smi::FromInt(count_value)));
-
- // Record position before stub call.
- SetSourcePosition(expr->position());
-
- TypeRecordingBinaryOpStub stub(Token::ADD, NO_OVERWRITE);
- EmitCallIC(stub.GetCode(), &patch_site);
- __ bind(&done);
-
- // Store the value returned in r0.
- switch (assign_type) {
- case VARIABLE:
- if (expr->is_postfix()) {
- { EffectContext context(this);
- EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
- Token::ASSIGN);
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- context.Plug(r0);
- }
- // For all contexts except EffectConstant We have the result on
- // top of the stack.
- if (!context()->IsEffect()) {
- context()->PlugTOS();
- }
- } else {
- EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
- Token::ASSIGN);
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- context()->Plug(r0);
- }
- break;
- case NAMED_PROPERTY: {
- __ mov(r2, Operand(prop->key()->AsLiteral()->handle()));
- __ pop(r1);
- Handle<Code> ic = is_strict_mode()
- ? isolate()->builtins()->StoreIC_Initialize_Strict()
- : isolate()->builtins()->StoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- if (expr->is_postfix()) {
- if (!context()->IsEffect()) {
- context()->PlugTOS();
- }
- } else {
- context()->Plug(r0);
- }
- break;
- }
- case KEYED_PROPERTY: {
- __ pop(r1); // Key.
- __ pop(r2); // Receiver.
- Handle<Code> ic = is_strict_mode()
- ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
- : isolate()->builtins()->KeyedStoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- if (expr->is_postfix()) {
- if (!context()->IsEffect()) {
- context()->PlugTOS();
- }
- } else {
- context()->Plug(r0);
- }
- break;
- }
- }
-}
-
-
-void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
- ASSERT(!context()->IsEffect());
- ASSERT(!context()->IsTest());
- VariableProxy* proxy = expr->AsVariableProxy();
- if (proxy != NULL && !proxy->var()->is_this() && proxy->var()->is_global()) {
- Comment cmnt(masm_, "Global variable");
- __ ldr(r0, GlobalObjectOperand());
- __ mov(r2, Operand(proxy->name()));
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- // Use a regular load, not a contextual load, to avoid a reference
- // error.
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
- PrepareForBailout(expr, TOS_REG);
- context()->Plug(r0);
- } else if (proxy != NULL &&
- proxy->var()->AsSlot() != NULL &&
- proxy->var()->AsSlot()->type() == Slot::LOOKUP) {
- Label done, slow;
-
- // Generate code for loading from variables potentially shadowed
- // by eval-introduced variables.
- Slot* slot = proxy->var()->AsSlot();
- EmitDynamicLoadFromSlotFastCase(slot, INSIDE_TYPEOF, &slow, &done);
-
- __ bind(&slow);
- __ mov(r0, Operand(proxy->name()));
- __ Push(cp, r0);
- __ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
- PrepareForBailout(expr, TOS_REG);
- __ bind(&done);
-
- context()->Plug(r0);
- } else {
- // This expression cannot throw a reference error at the top level.
- context()->HandleExpression(expr);
- }
-}
-
-
-bool FullCodeGenerator::TryLiteralCompare(Token::Value op,
- Expression* left,
- Expression* right,
- Label* if_true,
- Label* if_false,
- Label* fall_through) {
- if (op != Token::EQ && op != Token::EQ_STRICT) return false;
-
- // Check for the pattern: typeof <expression> == <string literal>.
- Literal* right_literal = right->AsLiteral();
- if (right_literal == NULL) return false;
- Handle<Object> right_literal_value = right_literal->handle();
- if (!right_literal_value->IsString()) return false;
- UnaryOperation* left_unary = left->AsUnaryOperation();
- if (left_unary == NULL || left_unary->op() != Token::TYPEOF) return false;
- Handle<String> check = Handle<String>::cast(right_literal_value);
-
- { AccumulatorValueContext context(this);
- VisitForTypeofValue(left_unary->expression());
- }
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
-
- if (check->Equals(isolate()->heap()->number_symbol())) {
- __ JumpIfSmi(r0, if_true);
- __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
- __ cmp(r0, ip);
- Split(eq, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->string_symbol())) {
- __ JumpIfSmi(r0, if_false);
- // Check for undetectable objects => false.
- __ CompareObjectType(r0, r0, r1, FIRST_NONSTRING_TYPE);
- __ b(ge, if_false);
- __ ldrb(r1, FieldMemOperand(r0, Map::kBitFieldOffset));
- __ tst(r1, Operand(1 << Map::kIsUndetectable));
- Split(eq, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->boolean_symbol())) {
- __ CompareRoot(r0, Heap::kTrueValueRootIndex);
- __ b(eq, if_true);
- __ CompareRoot(r0, Heap::kFalseValueRootIndex);
- Split(eq, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->undefined_symbol())) {
- __ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
- __ b(eq, if_true);
- __ JumpIfSmi(r0, if_false);
- // Check for undetectable objects => true.
- __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ ldrb(r1, FieldMemOperand(r0, Map::kBitFieldOffset));
- __ tst(r1, Operand(1 << Map::kIsUndetectable));
- Split(ne, if_true, if_false, fall_through);
-
- } else if (check->Equals(isolate()->heap()->function_symbol())) {
- __ JumpIfSmi(r0, if_false);
- __ CompareObjectType(r0, r1, r0, FIRST_FUNCTION_CLASS_TYPE);
- Split(ge, if_true, if_false, fall_through);
-
- } else if (check->Equals(isolate()->heap()->object_symbol())) {
- __ JumpIfSmi(r0, if_false);
- __ CompareRoot(r0, Heap::kNullValueRootIndex);
- __ b(eq, if_true);
- // Check for JS objects => true.
- __ CompareObjectType(r0, r0, r1, FIRST_JS_OBJECT_TYPE);
- __ b(lo, if_false);
- __ CompareInstanceType(r0, r1, FIRST_FUNCTION_CLASS_TYPE);
- __ b(hs, if_false);
- // Check for undetectable objects => false.
- __ ldrb(r1, FieldMemOperand(r0, Map::kBitFieldOffset));
- __ tst(r1, Operand(1 << Map::kIsUndetectable));
- Split(eq, if_true, if_false, fall_through);
- } else {
- if (if_false != fall_through) __ jmp(if_false);
- }
-
- return true;
-}
-
-
-void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
- Comment cmnt(masm_, "[ CompareOperation");
- SetSourcePosition(expr->position());
-
- // Always perform the comparison for its control flow. Pack the result
- // into the expression's context after the comparison is performed.
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- // First we try a fast inlined version of the compare when one of
- // the operands is a literal.
- Token::Value op = expr->op();
- Expression* left = expr->left();
- Expression* right = expr->right();
- if (TryLiteralCompare(op, left, right, if_true, if_false, fall_through)) {
- context()->Plug(if_true, if_false);
- return;
- }
-
- VisitForStackValue(expr->left());
- switch (op) {
- case Token::IN:
- VisitForStackValue(expr->right());
- __ InvokeBuiltin(Builtins::IN, CALL_JS);
- PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
- __ LoadRoot(ip, Heap::kTrueValueRootIndex);
- __ cmp(r0, ip);
- Split(eq, if_true, if_false, fall_through);
- break;
-
- case Token::INSTANCEOF: {
- VisitForStackValue(expr->right());
- InstanceofStub stub(InstanceofStub::kNoFlags);
- __ CallStub(&stub);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
- // The stub returns 0 for true.
- __ tst(r0, r0);
- Split(eq, if_true, if_false, fall_through);
- break;
- }
-
- default: {
- VisitForAccumulatorValue(expr->right());
- Condition cond = eq;
- bool strict = false;
- switch (op) {
- case Token::EQ_STRICT:
- strict = true;
- // Fall through
- case Token::EQ:
- cond = eq;
- __ pop(r1);
- break;
- case Token::LT:
- cond = lt;
- __ pop(r1);
- break;
- case Token::GT:
- // Reverse left and right sides to obtain ECMA-262 conversion order.
- cond = lt;
- __ mov(r1, result_register());
- __ pop(r0);
- break;
- case Token::LTE:
- // Reverse left and right sides to obtain ECMA-262 conversion order.
- cond = ge;
- __ mov(r1, result_register());
- __ pop(r0);
- break;
- case Token::GTE:
- cond = ge;
- __ pop(r1);
- break;
- case Token::IN:
- case Token::INSTANCEOF:
- default:
- UNREACHABLE();
- }
-
- bool inline_smi_code = ShouldInlineSmiCase(op);
- JumpPatchSite patch_site(masm_);
- if (inline_smi_code) {
- Label slow_case;
- __ orr(r2, r0, Operand(r1));
- patch_site.EmitJumpIfNotSmi(r2, &slow_case);
- __ cmp(r1, r0);
- Split(cond, if_true, if_false, NULL);
- __ bind(&slow_case);
- }
-
- // Record position and call the compare IC.
- SetSourcePosition(expr->position());
- Handle<Code> ic = CompareIC::GetUninitialized(op);
- EmitCallIC(ic, &patch_site);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
- __ cmp(r0, Operand(0));
- Split(cond, if_true, if_false, fall_through);
- }
- }
-
- // Convert the result of the comparison into one expected for this
- // expression's context.
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::VisitCompareToNull(CompareToNull* expr) {
- Comment cmnt(masm_, "[ CompareToNull");
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- VisitForAccumulatorValue(expr->expression());
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
- __ LoadRoot(r1, Heap::kNullValueRootIndex);
- __ cmp(r0, r1);
- if (expr->is_strict()) {
- Split(eq, if_true, if_false, fall_through);
- } else {
- __ b(eq, if_true);
- __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
- __ cmp(r0, r1);
- __ b(eq, if_true);
- __ tst(r0, Operand(kSmiTagMask));
- __ b(eq, if_false);
- // It can be an undetectable object.
- __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ ldrb(r1, FieldMemOperand(r1, Map::kBitFieldOffset));
- __ and_(r1, r1, Operand(1 << Map::kIsUndetectable));
- __ cmp(r1, Operand(1 << Map::kIsUndetectable));
- Split(eq, if_true, if_false, fall_through);
- }
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
- __ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- context()->Plug(r0);
-}
-
-
-Register FullCodeGenerator::result_register() {
- return r0;
-}
-
-
-Register FullCodeGenerator::context_register() {
- return cp;
-}
-
-
-void FullCodeGenerator::EmitCallIC(Handle<Code> ic, RelocInfo::Mode mode) {
- ASSERT(mode == RelocInfo::CODE_TARGET ||
- mode == RelocInfo::CODE_TARGET_CONTEXT);
- Counters* counters = isolate()->counters();
- switch (ic->kind()) {
- case Code::LOAD_IC:
- __ IncrementCounter(counters->named_load_full(), 1, r1, r2);
- break;
- case Code::KEYED_LOAD_IC:
- __ IncrementCounter(counters->keyed_load_full(), 1, r1, r2);
- break;
- case Code::STORE_IC:
- __ IncrementCounter(counters->named_store_full(), 1, r1, r2);
- break;
- case Code::KEYED_STORE_IC:
- __ IncrementCounter(counters->keyed_store_full(), 1, r1, r2);
- default:
- break;
- }
-
- __ Call(ic, mode);
-}
-
-
-void FullCodeGenerator::EmitCallIC(Handle<Code> ic, JumpPatchSite* patch_site) {
- Counters* counters = isolate()->counters();
- switch (ic->kind()) {
- case Code::LOAD_IC:
- __ IncrementCounter(counters->named_load_full(), 1, r1, r2);
- break;
- case Code::KEYED_LOAD_IC:
- __ IncrementCounter(counters->keyed_load_full(), 1, r1, r2);
- break;
- case Code::STORE_IC:
- __ IncrementCounter(counters->named_store_full(), 1, r1, r2);
- break;
- case Code::KEYED_STORE_IC:
- __ IncrementCounter(counters->keyed_store_full(), 1, r1, r2);
- default:
- break;
- }
-
- __ Call(ic, RelocInfo::CODE_TARGET);
- if (patch_site != NULL && patch_site->is_bound()) {
- patch_site->EmitPatchInfo();
- } else {
- __ nop(); // Signals no inlined code.
- }
-}
-
-
-void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
- ASSERT_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
- __ str(value, MemOperand(fp, frame_offset));
-}
-
-
-void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
- __ ldr(dst, ContextOperand(cp, context_index));
-}
-
-
-// ----------------------------------------------------------------------------
-// Non-local control flow support.
-
-void FullCodeGenerator::EnterFinallyBlock() {
- ASSERT(!result_register().is(r1));
- // Store result register while executing finally block.
- __ push(result_register());
- // Cook return address in link register to stack (smi encoded Code* delta)
- __ sub(r1, lr, Operand(masm_->CodeObject()));
- ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
- ASSERT_EQ(0, kSmiTag);
- __ add(r1, r1, Operand(r1)); // Convert to smi.
- __ push(r1);
-}
-
-
-void FullCodeGenerator::ExitFinallyBlock() {
- ASSERT(!result_register().is(r1));
- // Restore result register from stack.
- __ pop(r1);
- // Uncook return address and return.
- __ pop(result_register());
- ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
- __ mov(r1, Operand(r1, ASR, 1)); // Un-smi-tag value.
- __ add(pc, r1, Operand(masm_->CodeObject()));
-}
-
-
-#undef __
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_ARM
diff --git a/src/3rdparty/v8/src/arm/ic-arm.cc b/src/3rdparty/v8/src/arm/ic-arm.cc
deleted file mode 100644
index dc4f761..0000000
--- a/src/3rdparty/v8/src/arm/ic-arm.cc
+++ /dev/null
@@ -1,1793 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_ARM)
-
-#include "assembler-arm.h"
-#include "code-stubs.h"
-#include "codegen-inl.h"
-#include "disasm.h"
-#include "ic-inl.h"
-#include "runtime.h"
-#include "stub-cache.h"
-
-namespace v8 {
-namespace internal {
-
-
-// ----------------------------------------------------------------------------
-// Static IC stub generators.
-//
-
-#define __ ACCESS_MASM(masm)
-
-
-static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm,
- Register type,
- Label* global_object) {
- // Register usage:
- // type: holds the receiver instance type on entry.
- __ cmp(type, Operand(JS_GLOBAL_OBJECT_TYPE));
- __ b(eq, global_object);
- __ cmp(type, Operand(JS_BUILTINS_OBJECT_TYPE));
- __ b(eq, global_object);
- __ cmp(type, Operand(JS_GLOBAL_PROXY_TYPE));
- __ b(eq, global_object);
-}
-
-
-// Generated code falls through if the receiver is a regular non-global
-// JS object with slow properties and no interceptors.
-static void GenerateStringDictionaryReceiverCheck(MacroAssembler* masm,
- Register receiver,
- Register elements,
- Register t0,
- Register t1,
- Label* miss) {
- // Register usage:
- // receiver: holds the receiver on entry and is unchanged.
- // elements: holds the property dictionary on fall through.
- // Scratch registers:
- // t0: used to holds the receiver map.
- // t1: used to holds the receiver instance type, receiver bit mask and
- // elements map.
-
- // Check that the receiver isn't a smi.
- __ tst(receiver, Operand(kSmiTagMask));
- __ b(eq, miss);
-
- // Check that the receiver is a valid JS object.
- __ CompareObjectType(receiver, t0, t1, FIRST_JS_OBJECT_TYPE);
- __ b(lt, miss);
-
- // If this assert fails, we have to check upper bound too.
- ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
-
- GenerateGlobalInstanceTypeCheck(masm, t1, miss);
-
- // Check that the global object does not require access checks.
- __ ldrb(t1, FieldMemOperand(t0, Map::kBitFieldOffset));
- __ tst(t1, Operand((1 << Map::kIsAccessCheckNeeded) |
- (1 << Map::kHasNamedInterceptor)));
- __ b(ne, miss);
-
- __ ldr(elements, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
- __ ldr(t1, FieldMemOperand(elements, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
- __ cmp(t1, ip);
- __ b(ne, miss);
-}
-
-
-// Probe the string dictionary in the |elements| register. Jump to the
-// |done| label if a property with the given name is found. Jump to
-// the |miss| label otherwise.
-static void GenerateStringDictionaryProbes(MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register elements,
- Register name,
- Register scratch1,
- Register scratch2) {
- // Assert that name contains a string.
- if (FLAG_debug_code) __ AbortIfNotString(name);
-
- // Compute the capacity mask.
- const int kCapacityOffset = StringDictionary::kHeaderSize +
- StringDictionary::kCapacityIndex * kPointerSize;
- __ ldr(scratch1, FieldMemOperand(elements, kCapacityOffset));
- __ mov(scratch1, Operand(scratch1, ASR, kSmiTagSize)); // convert smi to int
- __ sub(scratch1, scratch1, Operand(1));
-
- const int kElementsStartOffset = StringDictionary::kHeaderSize +
- StringDictionary::kElementsStartIndex * kPointerSize;
-
- // Generate an unrolled loop that performs a few probes before
- // giving up. Measurements done on Gmail indicate that 2 probes
- // cover ~93% of loads from dictionaries.
- static const int kProbes = 4;
- for (int i = 0; i < kProbes; i++) {
- // Compute the masked index: (hash + i + i * i) & mask.
- __ ldr(scratch2, FieldMemOperand(name, String::kHashFieldOffset));
- if (i > 0) {
- // Add the probe offset (i + i * i) left shifted to avoid right shifting
- // the hash in a separate instruction. The value hash + i + i * i is right
- // shifted in the following and instruction.
- ASSERT(StringDictionary::GetProbeOffset(i) <
- 1 << (32 - String::kHashFieldOffset));
- __ add(scratch2, scratch2, Operand(
- StringDictionary::GetProbeOffset(i) << String::kHashShift));
- }
- __ and_(scratch2, scratch1, Operand(scratch2, LSR, String::kHashShift));
-
- // Scale the index by multiplying by the element size.
- ASSERT(StringDictionary::kEntrySize == 3);
- // scratch2 = scratch2 * 3.
- __ add(scratch2, scratch2, Operand(scratch2, LSL, 1));
-
- // Check if the key is identical to the name.
- __ add(scratch2, elements, Operand(scratch2, LSL, 2));
- __ ldr(ip, FieldMemOperand(scratch2, kElementsStartOffset));
- __ cmp(name, Operand(ip));
- if (i != kProbes - 1) {
- __ b(eq, done);
- } else {
- __ b(ne, miss);
- }
- }
-}
-
-
-// Helper function used from LoadIC/CallIC GenerateNormal.
-//
-// elements: Property dictionary. It is not clobbered if a jump to the miss
-// label is done.
-// name: Property name. It is not clobbered if a jump to the miss label is
-// done
-// result: Register for the result. It is only updated if a jump to the miss
-// label is not done. Can be the same as elements or name clobbering
-// one of these in the case of not jumping to the miss label.
-// The two scratch registers need to be different from elements, name and
-// result.
-// The generated code assumes that the receiver has slow properties,
-// is not a global object and does not have interceptors.
-static void GenerateDictionaryLoad(MacroAssembler* masm,
- Label* miss,
- Register elements,
- Register name,
- Register result,
- Register scratch1,
- Register scratch2) {
- // Main use of the scratch registers.
- // scratch1: Used as temporary and to hold the capacity of the property
- // dictionary.
- // scratch2: Used as temporary.
- Label done;
-
- // Probe the dictionary.
- GenerateStringDictionaryProbes(masm,
- miss,
- &done,
- elements,
- name,
- scratch1,
- scratch2);
-
- // If probing finds an entry check that the value is a normal
- // property.
- __ bind(&done); // scratch2 == elements + 4 * index
- const int kElementsStartOffset = StringDictionary::kHeaderSize +
- StringDictionary::kElementsStartIndex * kPointerSize;
- const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
- __ ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
- __ tst(scratch1, Operand(PropertyDetails::TypeField::mask() << kSmiTagSize));
- __ b(ne, miss);
-
- // Get the value at the masked, scaled index and return.
- __ ldr(result,
- FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize));
-}
-
-
-// Helper function used from StoreIC::GenerateNormal.
-//
-// elements: Property dictionary. It is not clobbered if a jump to the miss
-// label is done.
-// name: Property name. It is not clobbered if a jump to the miss label is
-// done
-// value: The value to store.
-// The two scratch registers need to be different from elements, name and
-// result.
-// The generated code assumes that the receiver has slow properties,
-// is not a global object and does not have interceptors.
-static void GenerateDictionaryStore(MacroAssembler* masm,
- Label* miss,
- Register elements,
- Register name,
- Register value,
- Register scratch1,
- Register scratch2) {
- // Main use of the scratch registers.
- // scratch1: Used as temporary and to hold the capacity of the property
- // dictionary.
- // scratch2: Used as temporary.
- Label done;
-
- // Probe the dictionary.
- GenerateStringDictionaryProbes(masm,
- miss,
- &done,
- elements,
- name,
- scratch1,
- scratch2);
-
- // If probing finds an entry in the dictionary check that the value
- // is a normal property that is not read only.
- __ bind(&done); // scratch2 == elements + 4 * index
- const int kElementsStartOffset = StringDictionary::kHeaderSize +
- StringDictionary::kElementsStartIndex * kPointerSize;
- const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
- const int kTypeAndReadOnlyMask
- = (PropertyDetails::TypeField::mask() |
- PropertyDetails::AttributesField::encode(READ_ONLY)) << kSmiTagSize;
- __ ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
- __ tst(scratch1, Operand(kTypeAndReadOnlyMask));
- __ b(ne, miss);
-
- // Store the value at the masked, scaled index and return.
- const int kValueOffset = kElementsStartOffset + kPointerSize;
- __ add(scratch2, scratch2, Operand(kValueOffset - kHeapObjectTag));
- __ str(value, MemOperand(scratch2));
-
- // Update the write barrier. Make sure not to clobber the value.
- __ mov(scratch1, value);
- __ RecordWrite(elements, scratch2, scratch1);
-}
-
-
-static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
- Label* miss,
- Register elements,
- Register key,
- Register result,
- Register t0,
- Register t1,
- Register t2) {
- // Register use:
- //
- // elements - holds the slow-case elements of the receiver on entry.
- // Unchanged unless 'result' is the same register.
- //
- // key - holds the smi key on entry.
- // Unchanged unless 'result' is the same register.
- //
- // result - holds the result on exit if the load succeeded.
- // Allowed to be the same as 'key' or 'result'.
- // Unchanged on bailout so 'key' or 'result' can be used
- // in further computation.
- //
- // Scratch registers:
- //
- // t0 - holds the untagged key on entry and holds the hash once computed.
- //
- // t1 - used to hold the capacity mask of the dictionary
- //
- // t2 - used for the index into the dictionary.
- Label done;
-
- // Compute the hash code from the untagged key. This must be kept in sync
- // with ComputeIntegerHash in utils.h.
- //
- // hash = ~hash + (hash << 15);
- __ mvn(t1, Operand(t0));
- __ add(t0, t1, Operand(t0, LSL, 15));
- // hash = hash ^ (hash >> 12);
- __ eor(t0, t0, Operand(t0, LSR, 12));
- // hash = hash + (hash << 2);
- __ add(t0, t0, Operand(t0, LSL, 2));
- // hash = hash ^ (hash >> 4);
- __ eor(t0, t0, Operand(t0, LSR, 4));
- // hash = hash * 2057;
- __ mov(t1, Operand(2057));
- __ mul(t0, t0, t1);
- // hash = hash ^ (hash >> 16);
- __ eor(t0, t0, Operand(t0, LSR, 16));
-
- // Compute the capacity mask.
- __ ldr(t1, FieldMemOperand(elements, NumberDictionary::kCapacityOffset));
- __ mov(t1, Operand(t1, ASR, kSmiTagSize)); // convert smi to int
- __ sub(t1, t1, Operand(1));
-
- // Generate an unrolled loop that performs a few probes before giving up.
- static const int kProbes = 4;
- for (int i = 0; i < kProbes; i++) {
- // Use t2 for index calculations and keep the hash intact in t0.
- __ mov(t2, t0);
- // Compute the masked index: (hash + i + i * i) & mask.
- if (i > 0) {
- __ add(t2, t2, Operand(NumberDictionary::GetProbeOffset(i)));
- }
- __ and_(t2, t2, Operand(t1));
-
- // Scale the index by multiplying by the element size.
- ASSERT(NumberDictionary::kEntrySize == 3);
- __ add(t2, t2, Operand(t2, LSL, 1)); // t2 = t2 * 3
-
- // Check if the key is identical to the name.
- __ add(t2, elements, Operand(t2, LSL, kPointerSizeLog2));
- __ ldr(ip, FieldMemOperand(t2, NumberDictionary::kElementsStartOffset));
- __ cmp(key, Operand(ip));
- if (i != kProbes - 1) {
- __ b(eq, &done);
- } else {
- __ b(ne, miss);
- }
- }
-
- __ bind(&done);
- // Check that the value is a normal property.
- // t2: elements + (index * kPointerSize)
- const int kDetailsOffset =
- NumberDictionary::kElementsStartOffset + 2 * kPointerSize;
- __ ldr(t1, FieldMemOperand(t2, kDetailsOffset));
- __ tst(t1, Operand(Smi::FromInt(PropertyDetails::TypeField::mask())));
- __ b(ne, miss);
-
- // Get the value at the masked, scaled index and return.
- const int kValueOffset =
- NumberDictionary::kElementsStartOffset + kPointerSize;
- __ ldr(result, FieldMemOperand(t2, kValueOffset));
-}
-
-
-void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -- r0 : receiver
- // -- sp[0] : receiver
- // -----------------------------------
- Label miss;
-
- StubCompiler::GenerateLoadArrayLength(masm, r0, r3, &miss);
- __ bind(&miss);
- StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
-}
-
-
-void LoadIC::GenerateStringLength(MacroAssembler* masm, bool support_wrappers) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -- r0 : receiver
- // -- sp[0] : receiver
- // -----------------------------------
- Label miss;
-
- StubCompiler::GenerateLoadStringLength(masm, r0, r1, r3, &miss,
- support_wrappers);
- // Cache miss: Jump to runtime.
- __ bind(&miss);
- StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
-}
-
-
-void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -- r0 : receiver
- // -- sp[0] : receiver
- // -----------------------------------
- Label miss;
-
- StubCompiler::GenerateLoadFunctionPrototype(masm, r0, r1, r3, &miss);
- __ bind(&miss);
- StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
-}
-
-
-// Checks the receiver for special cases (value type, slow case bits).
-// Falls through for regular JS object.
-static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
- Register receiver,
- Register map,
- Register scratch,
- int interceptor_bit,
- Label* slow) {
- // Check that the object isn't a smi.
- __ JumpIfSmi(receiver, slow);
- // Get the map of the receiver.
- __ ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- // Check bit field.
- __ ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
- __ tst(scratch,
- Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
- __ b(ne, slow);
- // Check that the object is some kind of JS object EXCEPT JS Value type.
- // In the case that the object is a value-wrapper object,
- // we enter the runtime system to make sure that indexing into string
- // objects work as intended.
- ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
- __ ldrb(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
- __ cmp(scratch, Operand(JS_OBJECT_TYPE));
- __ b(lt, slow);
-}
-
-
-// Loads an indexed element from a fast case array.
-// If not_fast_array is NULL, doesn't perform the elements map check.
-static void GenerateFastArrayLoad(MacroAssembler* masm,
- Register receiver,
- Register key,
- Register elements,
- Register scratch1,
- Register scratch2,
- Register result,
- Label* not_fast_array,
- Label* out_of_range) {
- // Register use:
- //
- // receiver - holds the receiver on entry.
- // Unchanged unless 'result' is the same register.
- //
- // key - holds the smi key on entry.
- // Unchanged unless 'result' is the same register.
- //
- // elements - holds the elements of the receiver on exit.
- //
- // result - holds the result on exit if the load succeeded.
- // Allowed to be the the same as 'receiver' or 'key'.
- // Unchanged on bailout so 'receiver' and 'key' can be safely
- // used by further computation.
- //
- // Scratch registers:
- //
- // scratch1 - used to hold elements map and elements length.
- // Holds the elements map if not_fast_array branch is taken.
- //
- // scratch2 - used to hold the loaded value.
-
- __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- if (not_fast_array != NULL) {
- // Check that the object is in fast mode and writable.
- __ ldr(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
- __ cmp(scratch1, ip);
- __ b(ne, not_fast_array);
- } else {
- __ AssertFastElements(elements);
- }
- // Check that the key (index) is within bounds.
- __ ldr(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
- __ cmp(key, Operand(scratch1));
- __ b(hs, out_of_range);
- // Fast case: Do the load.
- __ add(scratch1, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- // The key is a smi.
- ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
- __ ldr(scratch2,
- MemOperand(scratch1, key, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(scratch2, ip);
- // In case the loaded value is the_hole we have to consult GetProperty
- // to ensure the prototype chain is searched.
- __ b(eq, out_of_range);
- __ mov(result, scratch2);
-}
-
-
-// Checks whether a key is an array index string or a symbol string.
-// Falls through if a key is a symbol.
-static void GenerateKeyStringCheck(MacroAssembler* masm,
- Register key,
- Register map,
- Register hash,
- Label* index_string,
- Label* not_symbol) {
- // The key is not a smi.
- // Is it a string?
- __ CompareObjectType(key, map, hash, FIRST_NONSTRING_TYPE);
- __ b(ge, not_symbol);
-
- // Is the string an array index, with cached numeric value?
- __ ldr(hash, FieldMemOperand(key, String::kHashFieldOffset));
- __ tst(hash, Operand(String::kContainsCachedArrayIndexMask));
- __ b(eq, index_string);
-
- // Is the string a symbol?
- // map: key map
- __ ldrb(hash, FieldMemOperand(map, Map::kInstanceTypeOffset));
- ASSERT(kSymbolTag != 0);
- __ tst(hash, Operand(kIsSymbolMask));
- __ b(eq, not_symbol);
-}
-
-
-// Defined in ic.cc.
-Object* CallIC_Miss(Arguments args);
-
-// The generated code does not accept smi keys.
-// The generated code falls through if both probes miss.
-static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
- int argc,
- Code::Kind kind) {
- // ----------- S t a t e -------------
- // -- r1 : receiver
- // -- r2 : name
- // -----------------------------------
- Label number, non_number, non_string, boolean, probe, miss;
-
- // Probe the stub cache.
- Code::Flags flags = Code::ComputeFlags(kind,
- NOT_IN_LOOP,
- MONOMORPHIC,
- Code::kNoExtraICState,
- NORMAL,
- argc);
- Isolate::Current()->stub_cache()->GenerateProbe(
- masm, flags, r1, r2, r3, r4, r5);
-
- // If the stub cache probing failed, the receiver might be a value.
- // For value objects, we use the map of the prototype objects for
- // the corresponding JSValue for the cache and that is what we need
- // to probe.
- //
- // Check for number.
- __ tst(r1, Operand(kSmiTagMask));
- __ b(eq, &number);
- __ CompareObjectType(r1, r3, r3, HEAP_NUMBER_TYPE);
- __ b(ne, &non_number);
- __ bind(&number);
- StubCompiler::GenerateLoadGlobalFunctionPrototype(
- masm, Context::NUMBER_FUNCTION_INDEX, r1);
- __ b(&probe);
-
- // Check for string.
- __ bind(&non_number);
- __ cmp(r3, Operand(FIRST_NONSTRING_TYPE));
- __ b(hs, &non_string);
- StubCompiler::GenerateLoadGlobalFunctionPrototype(
- masm, Context::STRING_FUNCTION_INDEX, r1);
- __ b(&probe);
-
- // Check for boolean.
- __ bind(&non_string);
- __ LoadRoot(ip, Heap::kTrueValueRootIndex);
- __ cmp(r1, ip);
- __ b(eq, &boolean);
- __ LoadRoot(ip, Heap::kFalseValueRootIndex);
- __ cmp(r1, ip);
- __ b(ne, &miss);
- __ bind(&boolean);
- StubCompiler::GenerateLoadGlobalFunctionPrototype(
- masm, Context::BOOLEAN_FUNCTION_INDEX, r1);
-
- // Probe the stub cache for the value object.
- __ bind(&probe);
- Isolate::Current()->stub_cache()->GenerateProbe(
- masm, flags, r1, r2, r3, r4, r5);
-
- __ bind(&miss);
-}
-
-
-static void GenerateFunctionTailCall(MacroAssembler* masm,
- int argc,
- Label* miss,
- Register scratch) {
- // r1: function
-
- // Check that the value isn't a smi.
- __ tst(r1, Operand(kSmiTagMask));
- __ b(eq, miss);
-
- // Check that the value is a JSFunction.
- __ CompareObjectType(r1, scratch, scratch, JS_FUNCTION_TYPE);
- __ b(ne, miss);
-
- // Invoke the function.
- ParameterCount actual(argc);
- __ InvokeFunction(r1, actual, JUMP_FUNCTION);
-}
-
-
-static void GenerateCallNormal(MacroAssembler* masm, int argc) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- Label miss;
-
- // Get the receiver of the function from the stack into r1.
- __ ldr(r1, MemOperand(sp, argc * kPointerSize));
-
- GenerateStringDictionaryReceiverCheck(masm, r1, r0, r3, r4, &miss);
-
- // r0: elements
- // Search the dictionary - put result in register r1.
- GenerateDictionaryLoad(masm, &miss, r0, r2, r1, r3, r4);
-
- GenerateFunctionTailCall(masm, argc, &miss, r4);
-
- __ bind(&miss);
-}
-
-
-static void GenerateCallMiss(MacroAssembler* masm, int argc, IC::UtilityId id) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- Isolate* isolate = masm->isolate();
-
- if (id == IC::kCallIC_Miss) {
- __ IncrementCounter(isolate->counters()->call_miss(), 1, r3, r4);
- } else {
- __ IncrementCounter(isolate->counters()->keyed_call_miss(), 1, r3, r4);
- }
-
- // Get the receiver of the function from the stack.
- __ ldr(r3, MemOperand(sp, argc * kPointerSize));
-
- __ EnterInternalFrame();
-
- // Push the receiver and the name of the function.
- __ Push(r3, r2);
-
- // Call the entry.
- __ mov(r0, Operand(2));
- __ mov(r1, Operand(ExternalReference(IC_Utility(id), isolate)));
-
- CEntryStub stub(1);
- __ CallStub(&stub);
-
- // Move result to r1 and leave the internal frame.
- __ mov(r1, Operand(r0));
- __ LeaveInternalFrame();
-
- // Check if the receiver is a global object of some sort.
- // This can happen only for regular CallIC but not KeyedCallIC.
- if (id == IC::kCallIC_Miss) {
- Label invoke, global;
- __ ldr(r2, MemOperand(sp, argc * kPointerSize)); // receiver
- __ tst(r2, Operand(kSmiTagMask));
- __ b(eq, &invoke);
- __ CompareObjectType(r2, r3, r3, JS_GLOBAL_OBJECT_TYPE);
- __ b(eq, &global);
- __ cmp(r3, Operand(JS_BUILTINS_OBJECT_TYPE));
- __ b(ne, &invoke);
-
- // Patch the receiver on the stack.
- __ bind(&global);
- __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalReceiverOffset));
- __ str(r2, MemOperand(sp, argc * kPointerSize));
- __ bind(&invoke);
- }
-
- // Invoke the function.
- ParameterCount actual(argc);
- __ InvokeFunction(r1, actual, JUMP_FUNCTION);
-}
-
-
-void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
-
- GenerateCallMiss(masm, argc, IC::kCallIC_Miss);
-}
-
-
-void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
-
- // Get the receiver of the function from the stack into r1.
- __ ldr(r1, MemOperand(sp, argc * kPointerSize));
- GenerateMonomorphicCacheProbe(masm, argc, Code::CALL_IC);
- GenerateMiss(masm, argc);
-}
-
-
-void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
-
- GenerateCallNormal(masm, argc);
- GenerateMiss(masm, argc);
-}
-
-
-void KeyedCallIC::GenerateMiss(MacroAssembler* masm, int argc) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
-
- GenerateCallMiss(masm, argc, IC::kKeyedCallIC_Miss);
-}
-
-
-void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
-
- // Get the receiver of the function from the stack into r1.
- __ ldr(r1, MemOperand(sp, argc * kPointerSize));
-
- Label do_call, slow_call, slow_load, slow_reload_receiver;
- Label check_number_dictionary, check_string, lookup_monomorphic_cache;
- Label index_smi, index_string;
-
- // Check that the key is a smi.
- __ JumpIfNotSmi(r2, &check_string);
- __ bind(&index_smi);
- // Now the key is known to be a smi. This place is also jumped to from below
- // where a numeric string is converted to a smi.
-
- GenerateKeyedLoadReceiverCheck(
- masm, r1, r0, r3, Map::kHasIndexedInterceptor, &slow_call);
-
- GenerateFastArrayLoad(
- masm, r1, r2, r4, r3, r0, r1, &check_number_dictionary, &slow_load);
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->keyed_call_generic_smi_fast(), 1, r0, r3);
-
- __ bind(&do_call);
- // receiver in r1 is not used after this point.
- // r2: key
- // r1: function
- GenerateFunctionTailCall(masm, argc, &slow_call, r0);
-
- __ bind(&check_number_dictionary);
- // r2: key
- // r3: elements map
- // r4: elements
- // Check whether the elements is a number dictionary.
- __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
- __ cmp(r3, ip);
- __ b(ne, &slow_load);
- __ mov(r0, Operand(r2, ASR, kSmiTagSize));
- // r0: untagged index
- GenerateNumberDictionaryLoad(masm, &slow_load, r4, r2, r1, r0, r3, r5);
- __ IncrementCounter(counters->keyed_call_generic_smi_dict(), 1, r0, r3);
- __ jmp(&do_call);
-
- __ bind(&slow_load);
- // This branch is taken when calling KeyedCallIC_Miss is neither required
- // nor beneficial.
- __ IncrementCounter(counters->keyed_call_generic_slow_load(), 1, r0, r3);
- __ EnterInternalFrame();
- __ push(r2); // save the key
- __ Push(r1, r2); // pass the receiver and the key
- __ CallRuntime(Runtime::kKeyedGetProperty, 2);
- __ pop(r2); // restore the key
- __ LeaveInternalFrame();
- __ mov(r1, r0);
- __ jmp(&do_call);
-
- __ bind(&check_string);
- GenerateKeyStringCheck(masm, r2, r0, r3, &index_string, &slow_call);
-
- // The key is known to be a symbol.
- // If the receiver is a regular JS object with slow properties then do
- // a quick inline probe of the receiver's dictionary.
- // Otherwise do the monomorphic cache probe.
- GenerateKeyedLoadReceiverCheck(
- masm, r1, r0, r3, Map::kHasNamedInterceptor, &lookup_monomorphic_cache);
-
- __ ldr(r0, FieldMemOperand(r1, JSObject::kPropertiesOffset));
- __ ldr(r3, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
- __ cmp(r3, ip);
- __ b(ne, &lookup_monomorphic_cache);
-
- GenerateDictionaryLoad(masm, &slow_load, r0, r2, r1, r3, r4);
- __ IncrementCounter(counters->keyed_call_generic_lookup_dict(), 1, r0, r3);
- __ jmp(&do_call);
-
- __ bind(&lookup_monomorphic_cache);
- __ IncrementCounter(counters->keyed_call_generic_lookup_cache(), 1, r0, r3);
- GenerateMonomorphicCacheProbe(masm, argc, Code::KEYED_CALL_IC);
- // Fall through on miss.
-
- __ bind(&slow_call);
- // This branch is taken if:
- // - the receiver requires boxing or access check,
- // - the key is neither smi nor symbol,
- // - the value loaded is not a function,
- // - there is hope that the runtime will create a monomorphic call stub
- // that will get fetched next time.
- __ IncrementCounter(counters->keyed_call_generic_slow(), 1, r0, r3);
- GenerateMiss(masm, argc);
-
- __ bind(&index_string);
- __ IndexFromHash(r3, r2);
- // Now jump to the place where smi keys are handled.
- __ jmp(&index_smi);
-}
-
-
-void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
-
- // Check if the name is a string.
- Label miss;
- __ tst(r2, Operand(kSmiTagMask));
- __ b(eq, &miss);
- __ IsObjectJSStringType(r2, r0, &miss);
-
- GenerateCallNormal(masm, argc);
- __ bind(&miss);
- GenerateMiss(masm, argc);
-}
-
-
-// Defined in ic.cc.
-Object* LoadIC_Miss(Arguments args);
-
-void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -- r0 : receiver
- // -- sp[0] : receiver
- // -----------------------------------
-
- // Probe the stub cache.
- Code::Flags flags = Code::ComputeFlags(Code::LOAD_IC,
- NOT_IN_LOOP,
- MONOMORPHIC);
- Isolate::Current()->stub_cache()->GenerateProbe(
- masm, flags, r0, r2, r3, r4, r5);
-
- // Cache miss: Jump to runtime.
- GenerateMiss(masm);
-}
-
-
-void LoadIC::GenerateNormal(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -- r0 : receiver
- // -- sp[0] : receiver
- // -----------------------------------
- Label miss;
-
- GenerateStringDictionaryReceiverCheck(masm, r0, r1, r3, r4, &miss);
-
- // r1: elements
- GenerateDictionaryLoad(masm, &miss, r1, r2, r0, r3, r4);
- __ Ret();
-
- // Cache miss: Jump to runtime.
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void LoadIC::GenerateMiss(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -- r0 : receiver
- // -- sp[0] : receiver
- // -----------------------------------
- Isolate* isolate = masm->isolate();
-
- __ IncrementCounter(isolate->counters()->load_miss(), 1, r3, r4);
-
- __ mov(r3, r0);
- __ Push(r3, r2);
-
- // Perform tail call to the entry.
- ExternalReference ref =
- ExternalReference(IC_Utility(kLoadIC_Miss), isolate);
- __ TailCallExternalReference(ref, 2, 1);
-}
-
-// Returns the code marker, or the 0 if the code is not marked.
-static inline int InlinedICSiteMarker(Address address,
- Address* inline_end_address) {
- if (V8::UseCrankshaft()) return false;
-
- // If the instruction after the call site is not the pseudo instruction nop1
- // then this is not related to an inlined in-object property load. The nop1
- // instruction is located just after the call to the IC in the deferred code
- // handling the miss in the inlined code. After the nop1 instruction there is
- // a branch instruction for jumping back from the deferred code.
- Address address_after_call = address + Assembler::kCallTargetAddressOffset;
- Instr instr_after_call = Assembler::instr_at(address_after_call);
- int code_marker = MacroAssembler::GetCodeMarker(instr_after_call);
-
- // A negative result means the code is not marked.
- if (code_marker <= 0) return 0;
-
- Address address_after_nop = address_after_call + Assembler::kInstrSize;
- Instr instr_after_nop = Assembler::instr_at(address_after_nop);
- // There may be some reg-reg move and frame merging code to skip over before
- // the branch back from the DeferredReferenceGetKeyedValue code to the inlined
- // code.
- while (!Assembler::IsBranch(instr_after_nop)) {
- address_after_nop += Assembler::kInstrSize;
- instr_after_nop = Assembler::instr_at(address_after_nop);
- }
-
- // Find the end of the inlined code for handling the load.
- int b_offset =
- Assembler::GetBranchOffset(instr_after_nop) + Assembler::kPcLoadDelta;
- ASSERT(b_offset < 0); // Jumping back from deferred code.
- *inline_end_address = address_after_nop + b_offset;
-
- return code_marker;
-}
-
-
-bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
- if (V8::UseCrankshaft()) return false;
-
- // Find the end of the inlined code for handling the load if this is an
- // inlined IC call site.
- Address inline_end_address = 0;
- if (InlinedICSiteMarker(address, &inline_end_address)
- != Assembler::PROPERTY_ACCESS_INLINED) {
- return false;
- }
-
- // Patch the offset of the property load instruction (ldr r0, [r1, #+XXX]).
- // The immediate must be representable in 12 bits.
- ASSERT((JSObject::kMaxInstanceSize - JSObject::kHeaderSize) < (1 << 12));
- Address ldr_property_instr_address =
- inline_end_address - Assembler::kInstrSize;
- ASSERT(Assembler::IsLdrRegisterImmediate(
- Assembler::instr_at(ldr_property_instr_address)));
- Instr ldr_property_instr = Assembler::instr_at(ldr_property_instr_address);
- ldr_property_instr = Assembler::SetLdrRegisterImmediateOffset(
- ldr_property_instr, offset - kHeapObjectTag);
- Assembler::instr_at_put(ldr_property_instr_address, ldr_property_instr);
-
- // Indicate that code has changed.
- CPU::FlushICache(ldr_property_instr_address, 1 * Assembler::kInstrSize);
-
- // Patch the map check.
- // For PROPERTY_ACCESS_INLINED, the load map instruction is generated
- // 4 instructions before the end of the inlined code.
- // See codgen-arm.cc CodeGenerator::EmitNamedLoad.
- int ldr_map_offset = -4;
- Address ldr_map_instr_address =
- inline_end_address + ldr_map_offset * Assembler::kInstrSize;
- Assembler::set_target_address_at(ldr_map_instr_address,
- reinterpret_cast<Address>(map));
- return true;
-}
-
-
-bool LoadIC::PatchInlinedContextualLoad(Address address,
- Object* map,
- Object* cell,
- bool is_dont_delete) {
- // Find the end of the inlined code for handling the contextual load if
- // this is inlined IC call site.
- Address inline_end_address = 0;
- int marker = InlinedICSiteMarker(address, &inline_end_address);
- if (!((marker == Assembler::PROPERTY_ACCESS_INLINED_CONTEXT) ||
- (marker == Assembler::PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE))) {
- return false;
- }
- // On ARM we don't rely on the is_dont_delete argument as the hint is already
- // embedded in the code marker.
- bool marker_is_dont_delete =
- marker == Assembler::PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE;
-
- // These are the offsets from the end of the inlined code.
- // See codgen-arm.cc CodeGenerator::EmitNamedLoad.
- int ldr_map_offset = marker_is_dont_delete ? -5: -8;
- int ldr_cell_offset = marker_is_dont_delete ? -2: -5;
- if (FLAG_debug_code && marker_is_dont_delete) {
- // Three extra instructions were generated to check for the_hole_value.
- ldr_map_offset -= 3;
- ldr_cell_offset -= 3;
- }
- Address ldr_map_instr_address =
- inline_end_address + ldr_map_offset * Assembler::kInstrSize;
- Address ldr_cell_instr_address =
- inline_end_address + ldr_cell_offset * Assembler::kInstrSize;
-
- // Patch the map check.
- Assembler::set_target_address_at(ldr_map_instr_address,
- reinterpret_cast<Address>(map));
- // Patch the cell address.
- Assembler::set_target_address_at(ldr_cell_instr_address,
- reinterpret_cast<Address>(cell));
-
- return true;
-}
-
-
-bool StoreIC::PatchInlinedStore(Address address, Object* map, int offset) {
- if (V8::UseCrankshaft()) return false;
-
- // Find the end of the inlined code for the store if there is an
- // inlined version of the store.
- Address inline_end_address = 0;
- if (InlinedICSiteMarker(address, &inline_end_address)
- != Assembler::PROPERTY_ACCESS_INLINED) {
- return false;
- }
-
- // Compute the address of the map load instruction.
- Address ldr_map_instr_address =
- inline_end_address -
- (CodeGenerator::GetInlinedNamedStoreInstructionsAfterPatch() *
- Assembler::kInstrSize);
-
- // Update the offsets if initializing the inlined store. No reason
- // to update the offsets when clearing the inlined version because
- // it will bail out in the map check.
- if (map != HEAP->null_value()) {
- // Patch the offset in the actual store instruction.
- Address str_property_instr_address =
- ldr_map_instr_address + 3 * Assembler::kInstrSize;
- Instr str_property_instr = Assembler::instr_at(str_property_instr_address);
- ASSERT(Assembler::IsStrRegisterImmediate(str_property_instr));
- str_property_instr = Assembler::SetStrRegisterImmediateOffset(
- str_property_instr, offset - kHeapObjectTag);
- Assembler::instr_at_put(str_property_instr_address, str_property_instr);
-
- // Patch the offset in the add instruction that is part of the
- // write barrier.
- Address add_offset_instr_address =
- str_property_instr_address + Assembler::kInstrSize;
- Instr add_offset_instr = Assembler::instr_at(add_offset_instr_address);
- ASSERT(Assembler::IsAddRegisterImmediate(add_offset_instr));
- add_offset_instr = Assembler::SetAddRegisterImmediateOffset(
- add_offset_instr, offset - kHeapObjectTag);
- Assembler::instr_at_put(add_offset_instr_address, add_offset_instr);
-
- // Indicate that code has changed.
- CPU::FlushICache(str_property_instr_address, 2 * Assembler::kInstrSize);
- }
-
- // Patch the map check.
- Assembler::set_target_address_at(ldr_map_instr_address,
- reinterpret_cast<Address>(map));
-
- return true;
-}
-
-
-bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
- if (V8::UseCrankshaft()) return false;
-
- Address inline_end_address = 0;
- if (InlinedICSiteMarker(address, &inline_end_address)
- != Assembler::PROPERTY_ACCESS_INLINED) {
- return false;
- }
-
- // Patch the map check.
- Address ldr_map_instr_address =
- inline_end_address -
- (CodeGenerator::GetInlinedKeyedLoadInstructionsAfterPatch() *
- Assembler::kInstrSize);
- Assembler::set_target_address_at(ldr_map_instr_address,
- reinterpret_cast<Address>(map));
- return true;
-}
-
-
-bool KeyedStoreIC::PatchInlinedStore(Address address, Object* map) {
- if (V8::UseCrankshaft()) return false;
-
- // Find the end of the inlined code for handling the store if this is an
- // inlined IC call site.
- Address inline_end_address = 0;
- if (InlinedICSiteMarker(address, &inline_end_address)
- != Assembler::PROPERTY_ACCESS_INLINED) {
- return false;
- }
-
- // Patch the map check.
- Address ldr_map_instr_address =
- inline_end_address -
- (CodeGenerator::kInlinedKeyedStoreInstructionsAfterPatch *
- Assembler::kInstrSize);
- Assembler::set_target_address_at(ldr_map_instr_address,
- reinterpret_cast<Address>(map));
- return true;
-}
-
-
-Object* KeyedLoadIC_Miss(Arguments args);
-
-
-void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
- // ---------- S t a t e --------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
- Isolate* isolate = masm->isolate();
-
- __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, r3, r4);
-
- __ Push(r1, r0);
-
- ExternalReference ref =
- ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
- __ TailCallExternalReference(ref, 2, 1);
-}
-
-
-void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
- // ---------- S t a t e --------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
-
- __ Push(r1, r0);
-
- __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
-}
-
-
-void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
- // ---------- S t a t e --------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
- Label slow, check_string, index_smi, index_string, property_array_property;
- Label probe_dictionary, check_number_dictionary;
-
- Register key = r0;
- Register receiver = r1;
-
- Isolate* isolate = masm->isolate();
-
- // Check that the key is a smi.
- __ JumpIfNotSmi(key, &check_string);
- __ bind(&index_smi);
- // Now the key is known to be a smi. This place is also jumped to from below
- // where a numeric string is converted to a smi.
-
- GenerateKeyedLoadReceiverCheck(
- masm, receiver, r2, r3, Map::kHasIndexedInterceptor, &slow);
-
- // Check the "has fast elements" bit in the receiver's map which is
- // now in r2.
- __ ldrb(r3, FieldMemOperand(r2, Map::kBitField2Offset));
- __ tst(r3, Operand(1 << Map::kHasFastElements));
- __ b(eq, &check_number_dictionary);
-
- GenerateFastArrayLoad(
- masm, receiver, key, r4, r3, r2, r0, NULL, &slow);
- __ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1, r2, r3);
- __ Ret();
-
- __ bind(&check_number_dictionary);
- __ ldr(r4, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ ldr(r3, FieldMemOperand(r4, JSObject::kMapOffset));
-
- // Check whether the elements is a number dictionary.
- // r0: key
- // r3: elements map
- // r4: elements
- __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
- __ cmp(r3, ip);
- __ b(ne, &slow);
- __ mov(r2, Operand(r0, ASR, kSmiTagSize));
- GenerateNumberDictionaryLoad(masm, &slow, r4, r0, r0, r2, r3, r5);
- __ Ret();
-
- // Slow case, key and receiver still in r0 and r1.
- __ bind(&slow);
- __ IncrementCounter(isolate->counters()->keyed_load_generic_slow(),
- 1, r2, r3);
- GenerateRuntimeGetProperty(masm);
-
- __ bind(&check_string);
- GenerateKeyStringCheck(masm, key, r2, r3, &index_string, &slow);
-
- GenerateKeyedLoadReceiverCheck(
- masm, receiver, r2, r3, Map::kHasNamedInterceptor, &slow);
-
- // If the receiver is a fast-case object, check the keyed lookup
- // cache. Otherwise probe the dictionary.
- __ ldr(r3, FieldMemOperand(r1, JSObject::kPropertiesOffset));
- __ ldr(r4, FieldMemOperand(r3, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
- __ cmp(r4, ip);
- __ b(eq, &probe_dictionary);
-
- // Load the map of the receiver, compute the keyed lookup cache hash
- // based on 32 bits of the map pointer and the string hash.
- __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ mov(r3, Operand(r2, ASR, KeyedLookupCache::kMapHashShift));
- __ ldr(r4, FieldMemOperand(r0, String::kHashFieldOffset));
- __ eor(r3, r3, Operand(r4, ASR, String::kHashShift));
- __ And(r3, r3, Operand(KeyedLookupCache::kCapacityMask));
-
- // Load the key (consisting of map and symbol) from the cache and
- // check for match.
- ExternalReference cache_keys =
- ExternalReference::keyed_lookup_cache_keys(isolate);
- __ mov(r4, Operand(cache_keys));
- __ add(r4, r4, Operand(r3, LSL, kPointerSizeLog2 + 1));
- __ ldr(r5, MemOperand(r4, kPointerSize, PostIndex)); // Move r4 to symbol.
- __ cmp(r2, r5);
- __ b(ne, &slow);
- __ ldr(r5, MemOperand(r4));
- __ cmp(r0, r5);
- __ b(ne, &slow);
-
- // Get field offset.
- // r0 : key
- // r1 : receiver
- // r2 : receiver's map
- // r3 : lookup cache index
- ExternalReference cache_field_offsets =
- ExternalReference::keyed_lookup_cache_field_offsets(isolate);
- __ mov(r4, Operand(cache_field_offsets));
- __ ldr(r5, MemOperand(r4, r3, LSL, kPointerSizeLog2));
- __ ldrb(r6, FieldMemOperand(r2, Map::kInObjectPropertiesOffset));
- __ sub(r5, r5, r6, SetCC);
- __ b(ge, &property_array_property);
-
- // Load in-object property.
- __ ldrb(r6, FieldMemOperand(r2, Map::kInstanceSizeOffset));
- __ add(r6, r6, r5); // Index from start of object.
- __ sub(r1, r1, Operand(kHeapObjectTag)); // Remove the heap tag.
- __ ldr(r0, MemOperand(r1, r6, LSL, kPointerSizeLog2));
- __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(),
- 1, r2, r3);
- __ Ret();
-
- // Load property array property.
- __ bind(&property_array_property);
- __ ldr(r1, FieldMemOperand(r1, JSObject::kPropertiesOffset));
- __ add(r1, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ ldr(r0, MemOperand(r1, r5, LSL, kPointerSizeLog2));
- __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(),
- 1, r2, r3);
- __ Ret();
-
- // Do a quick inline probe of the receiver's dictionary, if it
- // exists.
- __ bind(&probe_dictionary);
- // r1: receiver
- // r0: key
- // r3: elements
- __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
- GenerateGlobalInstanceTypeCheck(masm, r2, &slow);
- // Load the property to r0.
- GenerateDictionaryLoad(masm, &slow, r3, r0, r0, r2, r4);
- __ IncrementCounter(isolate->counters()->keyed_load_generic_symbol(),
- 1, r2, r3);
- __ Ret();
-
- __ bind(&index_string);
- __ IndexFromHash(r3, key);
- // Now jump to the place where smi keys are handled.
- __ jmp(&index_smi);
-}
-
-
-void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
- // ---------- S t a t e --------------
- // -- lr : return address
- // -- r0 : key (index)
- // -- r1 : receiver
- // -----------------------------------
- Label miss;
-
- Register receiver = r1;
- Register index = r0;
- Register scratch1 = r2;
- Register scratch2 = r3;
- Register result = r0;
-
- StringCharAtGenerator char_at_generator(receiver,
- index,
- scratch1,
- scratch2,
- result,
- &miss, // When not a string.
- &miss, // When not a number.
- &miss, // When index out of range.
- STRING_INDEX_IS_ARRAY_INDEX);
- char_at_generator.GenerateFast(masm);
- __ Ret();
-
- StubRuntimeCallHelper call_helper;
- char_at_generator.GenerateSlow(masm, call_helper);
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
- // ---------- S t a t e --------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
- Label slow;
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(r1, &slow);
-
- // Check that the key is an array index, that is Uint32.
- __ tst(r0, Operand(kSmiTagMask | kSmiSignMask));
- __ b(ne, &slow);
-
- // Get the map of the receiver.
- __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
-
- // Check that it has indexed interceptor and access checks
- // are not enabled for this object.
- __ ldrb(r3, FieldMemOperand(r2, Map::kBitFieldOffset));
- __ and_(r3, r3, Operand(kSlowCaseBitFieldMask));
- __ cmp(r3, Operand(1 << Map::kHasIndexedInterceptor));
- __ b(ne, &slow);
-
- // Everything is fine, call runtime.
- __ Push(r1, r0); // Receiver, key.
-
- // Perform tail call to the entry.
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(kKeyedLoadPropertyWithInterceptor),
- masm->isolate()),
- 2,
- 1);
-
- __ bind(&slow);
- GenerateMiss(masm);
-}
-
-
-void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
- // ---------- S t a t e --------------
- // -- r0 : value
- // -- r1 : key
- // -- r2 : receiver
- // -- lr : return address
- // -----------------------------------
-
- // Push receiver, key and value for runtime call.
- __ Push(r2, r1, r0);
-
- ExternalReference ref =
- ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
-}
-
-
-void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
- // ---------- S t a t e --------------
- // -- r0 : value
- // -- r1 : key
- // -- r2 : receiver
- // -- lr : return address
- // -----------------------------------
-
- // Push receiver, key and value for runtime call.
- __ Push(r2, r1, r0);
-
- __ mov(r1, Operand(Smi::FromInt(NONE))); // PropertyAttributes
- __ mov(r0, Operand(Smi::FromInt(strict_mode))); // Strict mode.
- __ Push(r1, r0);
-
- __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
-}
-
-
-void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
- // ---------- S t a t e --------------
- // -- r0 : value
- // -- r1 : key
- // -- r2 : receiver
- // -- lr : return address
- // -----------------------------------
- Label slow, fast, array, extra;
-
- // Register usage.
- Register value = r0;
- Register key = r1;
- Register receiver = r2;
- Register elements = r3; // Elements array of the receiver.
- // r4 and r5 are used as general scratch registers.
-
- // Check that the key is a smi.
- __ tst(key, Operand(kSmiTagMask));
- __ b(ne, &slow);
- // Check that the object isn't a smi.
- __ tst(receiver, Operand(kSmiTagMask));
- __ b(eq, &slow);
- // Get the map of the object.
- __ ldr(r4, FieldMemOperand(receiver, HeapObject::kMapOffset));
- // Check that the receiver does not require access checks. We need
- // to do this because this generic stub does not perform map checks.
- __ ldrb(ip, FieldMemOperand(r4, Map::kBitFieldOffset));
- __ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded));
- __ b(ne, &slow);
- // Check if the object is a JS array or not.
- __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
- __ cmp(r4, Operand(JS_ARRAY_TYPE));
- __ b(eq, &array);
- // Check that the object is some kind of JS object.
- __ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE));
- __ b(lt, &slow);
-
- // Object case: Check key against length in the elements array.
- __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- // Check that the object is in fast mode and writable.
- __ ldr(r4, FieldMemOperand(elements, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
- __ cmp(r4, ip);
- __ b(ne, &slow);
- // Check array bounds. Both the key and the length of FixedArray are smis.
- __ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
- __ cmp(key, Operand(ip));
- __ b(lo, &fast);
-
- // Slow case, handle jump to runtime.
- __ bind(&slow);
- // Entry registers are intact.
- // r0: value.
- // r1: key.
- // r2: receiver.
- GenerateRuntimeSetProperty(masm, strict_mode);
-
- // Extra capacity case: Check if there is extra capacity to
- // perform the store and update the length. Used for adding one
- // element to the array by writing to array[array.length].
- __ bind(&extra);
- // Condition code from comparing key and array length is still available.
- __ b(ne, &slow); // Only support writing to writing to array[array.length].
- // Check for room in the elements backing store.
- // Both the key and the length of FixedArray are smis.
- __ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
- __ cmp(key, Operand(ip));
- __ b(hs, &slow);
- // Calculate key + 1 as smi.
- ASSERT_EQ(0, kSmiTag);
- __ add(r4, key, Operand(Smi::FromInt(1)));
- __ str(r4, FieldMemOperand(receiver, JSArray::kLengthOffset));
- __ b(&fast);
-
- // Array case: Get the length and the elements array from the JS
- // array. Check that the array is in fast mode (and writable); if it
- // is the length is always a smi.
- __ bind(&array);
- __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ ldr(r4, FieldMemOperand(elements, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
- __ cmp(r4, ip);
- __ b(ne, &slow);
-
- // Check the key against the length in the array.
- __ ldr(ip, FieldMemOperand(receiver, JSArray::kLengthOffset));
- __ cmp(key, Operand(ip));
- __ b(hs, &extra);
- // Fall through to fast case.
-
- __ bind(&fast);
- // Fast case, store the value to the elements backing store.
- __ add(r5, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ add(r5, r5, Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ str(value, MemOperand(r5));
- // Skip write barrier if the written value is a smi.
- __ tst(value, Operand(kSmiTagMask));
- __ Ret(eq);
- // Update write barrier for the elements array address.
- __ sub(r4, r5, Operand(elements));
- __ RecordWrite(elements, Operand(r4), r5, r6);
-
- __ Ret();
-}
-
-
-void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
- // ----------- S t a t e -------------
- // -- r0 : value
- // -- r1 : receiver
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
-
- // Get the receiver from the stack and probe the stub cache.
- Code::Flags flags = Code::ComputeFlags(Code::STORE_IC,
- NOT_IN_LOOP,
- MONOMORPHIC,
- strict_mode);
-
- Isolate::Current()->stub_cache()->GenerateProbe(
- masm, flags, r1, r2, r3, r4, r5);
-
- // Cache miss: Jump to runtime.
- GenerateMiss(masm);
-}
-
-
-void StoreIC::GenerateMiss(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r0 : value
- // -- r1 : receiver
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
-
- __ Push(r1, r2, r0);
-
- // Perform tail call to the entry.
- ExternalReference ref =
- ExternalReference(IC_Utility(kStoreIC_Miss), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
-}
-
-
-void StoreIC::GenerateArrayLength(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r0 : value
- // -- r1 : receiver
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- //
- // This accepts as a receiver anything JSObject::SetElementsLength accepts
- // (currently anything except for external and pixel arrays which means
- // anything with elements of FixedArray type.), but currently is restricted
- // to JSArray.
- // Value must be a number, but only smis are accepted as the most common case.
-
- Label miss;
-
- Register receiver = r1;
- Register value = r0;
- Register scratch = r3;
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &miss);
-
- // Check that the object is a JS array.
- __ CompareObjectType(receiver, scratch, scratch, JS_ARRAY_TYPE);
- __ b(ne, &miss);
-
- // Check that elements are FixedArray.
- // We rely on StoreIC_ArrayLength below to deal with all types of
- // fast elements (including COW).
- __ ldr(scratch, FieldMemOperand(receiver, JSArray::kElementsOffset));
- __ CompareObjectType(scratch, scratch, scratch, FIXED_ARRAY_TYPE);
- __ b(ne, &miss);
-
- // Check that value is a smi.
- __ JumpIfNotSmi(value, &miss);
-
- // Prepare tail call to StoreIC_ArrayLength.
- __ Push(receiver, value);
-
- ExternalReference ref =
- ExternalReference(IC_Utility(kStoreIC_ArrayLength), masm->isolate());
- __ TailCallExternalReference(ref, 2, 1);
-
- __ bind(&miss);
-
- GenerateMiss(masm);
-}
-
-
-void StoreIC::GenerateNormal(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r0 : value
- // -- r1 : receiver
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- Label miss;
-
- GenerateStringDictionaryReceiverCheck(masm, r1, r3, r4, r5, &miss);
-
- GenerateDictionaryStore(masm, &miss, r3, r2, r0, r4, r5);
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->store_normal_hit(),
- 1, r4, r5);
- __ Ret();
-
- __ bind(&miss);
- __ IncrementCounter(counters->store_normal_miss(), 1, r4, r5);
- GenerateMiss(masm);
-}
-
-
-void StoreIC::GenerateGlobalProxy(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
- // ----------- S t a t e -------------
- // -- r0 : value
- // -- r1 : receiver
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
-
- __ Push(r1, r2, r0);
-
- __ mov(r1, Operand(Smi::FromInt(NONE))); // PropertyAttributes
- __ mov(r0, Operand(Smi::FromInt(strict_mode)));
- __ Push(r1, r0);
-
- // Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
-}
-
-
-#undef __
-
-
-Condition CompareIC::ComputeCondition(Token::Value op) {
- switch (op) {
- case Token::EQ_STRICT:
- case Token::EQ:
- return eq;
- case Token::LT:
- return lt;
- case Token::GT:
- // Reverse left and right operands to obtain ECMA-262 conversion order.
- return lt;
- case Token::LTE:
- // Reverse left and right operands to obtain ECMA-262 conversion order.
- return ge;
- case Token::GTE:
- return ge;
- default:
- UNREACHABLE();
- return kNoCondition;
- }
-}
-
-
-void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
- HandleScope scope;
- Handle<Code> rewritten;
- State previous_state = GetState();
- State state = TargetState(previous_state, false, x, y);
- if (state == GENERIC) {
- CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, r1, r0);
- rewritten = stub.GetCode();
- } else {
- ICCompareStub stub(op_, state);
- rewritten = stub.GetCode();
- }
- set_target(*rewritten);
-
-#ifdef DEBUG
- if (FLAG_trace_ic) {
- PrintF("[CompareIC (%s->%s)#%s]\n",
- GetStateName(previous_state),
- GetStateName(state),
- Token::Name(op_));
- }
-#endif
-
- // Activate inlined smi code.
- if (previous_state == UNINITIALIZED) {
- PatchInlinedSmiCode(address());
- }
-}
-
-
-void PatchInlinedSmiCode(Address address) {
- Address cmp_instruction_address =
- address + Assembler::kCallTargetAddressOffset;
-
- // If the instruction following the call is not a cmp rx, #yyy, nothing
- // was inlined.
- Instr instr = Assembler::instr_at(cmp_instruction_address);
- if (!Assembler::IsCmpImmediate(instr)) {
- return;
- }
-
- // The delta to the start of the map check instruction and the
- // condition code uses at the patched jump.
- int delta = Assembler::GetCmpImmediateRawImmediate(instr);
- delta +=
- Assembler::GetCmpImmediateRegister(instr).code() * kOff12Mask;
- // If the delta is 0 the instruction is cmp r0, #0 which also signals that
- // nothing was inlined.
- if (delta == 0) {
- return;
- }
-
-#ifdef DEBUG
- if (FLAG_trace_ic) {
- PrintF("[ patching ic at %p, cmp=%p, delta=%d\n",
- address, cmp_instruction_address, delta);
- }
-#endif
-
- Address patch_address =
- cmp_instruction_address - delta * Instruction::kInstrSize;
- Instr instr_at_patch = Assembler::instr_at(patch_address);
- Instr branch_instr =
- Assembler::instr_at(patch_address + Instruction::kInstrSize);
- ASSERT(Assembler::IsCmpRegister(instr_at_patch));
- ASSERT_EQ(Assembler::GetRn(instr_at_patch).code(),
- Assembler::GetRm(instr_at_patch).code());
- ASSERT(Assembler::IsBranch(branch_instr));
- if (Assembler::GetCondition(branch_instr) == eq) {
- // This is patching a "jump if not smi" site to be active.
- // Changing
- // cmp rx, rx
- // b eq, <target>
- // to
- // tst rx, #kSmiTagMask
- // b ne, <target>
- CodePatcher patcher(patch_address, 2);
- Register reg = Assembler::GetRn(instr_at_patch);
- patcher.masm()->tst(reg, Operand(kSmiTagMask));
- patcher.EmitCondition(ne);
- } else {
- ASSERT(Assembler::GetCondition(branch_instr) == ne);
- // This is patching a "jump if smi" site to be active.
- // Changing
- // cmp rx, rx
- // b ne, <target>
- // to
- // tst rx, #kSmiTagMask
- // b eq, <target>
- CodePatcher patcher(patch_address, 2);
- Register reg = Assembler::GetRn(instr_at_patch);
- patcher.masm()->tst(reg, Operand(kSmiTagMask));
- patcher.EmitCondition(eq);
- }
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_ARM
diff --git a/src/3rdparty/v8/src/arm/jump-target-arm.cc b/src/3rdparty/v8/src/arm/jump-target-arm.cc
deleted file mode 100644
index df370c4..0000000
--- a/src/3rdparty/v8/src/arm/jump-target-arm.cc
+++ /dev/null
@@ -1,174 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_ARM)
-
-#include "codegen-inl.h"
-#include "jump-target-inl.h"
-#include "register-allocator-inl.h"
-#include "virtual-frame-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// JumpTarget implementation.
-
-#define __ ACCESS_MASM(cgen()->masm())
-
-void JumpTarget::DoJump() {
- ASSERT(cgen()->has_valid_frame());
- // Live non-frame registers are not allowed at unconditional jumps
- // because we have no way of invalidating the corresponding results
- // which are still live in the C++ code.
- ASSERT(cgen()->HasValidEntryRegisters());
-
- if (entry_frame_set_) {
- if (entry_label_.is_bound()) {
- // If we already bound and generated code at the destination then it
- // is too late to ask for less optimistic type assumptions.
- ASSERT(entry_frame_.IsCompatibleWith(cgen()->frame()));
- }
- // There already a frame expectation at the target.
- cgen()->frame()->MergeTo(&entry_frame_);
- cgen()->DeleteFrame();
- } else {
- // Clone the current frame to use as the expected one at the target.
- set_entry_frame(cgen()->frame());
- // Zap the fall-through frame since the jump was unconditional.
- RegisterFile empty;
- cgen()->SetFrame(NULL, &empty);
- }
- if (entry_label_.is_bound()) {
- // You can't jump backwards to an already bound label unless you admitted
- // up front that this was a bidirectional jump target. Bidirectional jump
- // targets will zap their type info when bound in case some later virtual
- // frame with less precise type info branches to them.
- ASSERT(direction_ != FORWARD_ONLY);
- }
- __ jmp(&entry_label_);
-}
-
-
-void JumpTarget::DoBranch(Condition cond, Hint ignored) {
- ASSERT(cgen()->has_valid_frame());
-
- if (entry_frame_set_) {
- if (entry_label_.is_bound()) {
- // If we already bound and generated code at the destination then it
- // is too late to ask for less optimistic type assumptions.
- ASSERT(entry_frame_.IsCompatibleWith(cgen()->frame()));
- }
- // We have an expected frame to merge to on the backward edge.
- cgen()->frame()->MergeTo(&entry_frame_, cond);
- } else {
- // Clone the current frame to use as the expected one at the target.
- set_entry_frame(cgen()->frame());
- }
- if (entry_label_.is_bound()) {
- // You can't branch backwards to an already bound label unless you admitted
- // up front that this was a bidirectional jump target. Bidirectional jump
- // targets will zap their type info when bound in case some later virtual
- // frame with less precise type info branches to them.
- ASSERT(direction_ != FORWARD_ONLY);
- }
- __ b(cond, &entry_label_);
- if (cond == al) {
- cgen()->DeleteFrame();
- }
-}
-
-
-void JumpTarget::Call() {
- // Call is used to push the address of the catch block on the stack as
- // a return address when compiling try/catch and try/finally. We
- // fully spill the frame before making the call. The expected frame
- // at the label (which should be the only one) is the spilled current
- // frame plus an in-memory return address. The "fall-through" frame
- // at the return site is the spilled current frame.
- ASSERT(cgen()->has_valid_frame());
- // There are no non-frame references across the call.
- ASSERT(cgen()->HasValidEntryRegisters());
- ASSERT(!is_linked());
-
- // Calls are always 'forward' so we use a copy of the current frame (plus
- // one for a return address) as the expected frame.
- ASSERT(!entry_frame_set_);
- VirtualFrame target_frame = *cgen()->frame();
- target_frame.Adjust(1);
- set_entry_frame(&target_frame);
-
- __ bl(&entry_label_);
-}
-
-
-void JumpTarget::DoBind() {
- ASSERT(!is_bound());
-
- // Live non-frame registers are not allowed at the start of a basic
- // block.
- ASSERT(!cgen()->has_valid_frame() || cgen()->HasValidEntryRegisters());
-
- if (cgen()->has_valid_frame()) {
- if (direction_ != FORWARD_ONLY) cgen()->frame()->ForgetTypeInfo();
- // If there is a current frame we can use it on the fall through.
- if (!entry_frame_set_) {
- entry_frame_ = *cgen()->frame();
- entry_frame_set_ = true;
- } else {
- cgen()->frame()->MergeTo(&entry_frame_);
- // On fall through we may have to merge both ways.
- if (direction_ != FORWARD_ONLY) {
- // This will not need to adjust the virtual frame entries that are
- // register allocated since that was done above and they now match.
- // But it does need to adjust the entry_frame_ of this jump target
- // to make it potentially less optimistic. Later code can branch back
- // to this jump target and we need to assert that that code does not
- // have weaker assumptions about types.
- entry_frame_.MergeTo(cgen()->frame());
- }
- }
- } else {
- // If there is no current frame we must have an entry frame which we can
- // copy.
- ASSERT(entry_frame_set_);
- RegisterFile empty;
- cgen()->SetFrame(new VirtualFrame(&entry_frame_), &empty);
- }
-
- __ bind(&entry_label_);
-}
-
-
-#undef __
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_ARM
diff --git a/src/3rdparty/v8/src/arm/lithium-arm.cc b/src/3rdparty/v8/src/arm/lithium-arm.cc
deleted file mode 100644
index a5216ad..0000000
--- a/src/3rdparty/v8/src/arm/lithium-arm.cc
+++ /dev/null
@@ -1,2120 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "lithium-allocator-inl.h"
-#include "arm/lithium-arm.h"
-#include "arm/lithium-codegen-arm.h"
-
-namespace v8 {
-namespace internal {
-
-#define DEFINE_COMPILE(type) \
- void L##type::CompileToNative(LCodeGen* generator) { \
- generator->Do##type(this); \
- }
-LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
-#undef DEFINE_COMPILE
-
-LOsrEntry::LOsrEntry() {
- for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
- register_spills_[i] = NULL;
- }
- for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; ++i) {
- double_register_spills_[i] = NULL;
- }
-}
-
-
-void LOsrEntry::MarkSpilledRegister(int allocation_index,
- LOperand* spill_operand) {
- ASSERT(spill_operand->IsStackSlot());
- ASSERT(register_spills_[allocation_index] == NULL);
- register_spills_[allocation_index] = spill_operand;
-}
-
-
-#ifdef DEBUG
-void LInstruction::VerifyCall() {
- // Call instructions can use only fixed registers as
- // temporaries and outputs because all registers
- // are blocked by the calling convention.
- // Inputs must use a fixed register.
- ASSERT(Output() == NULL ||
- LUnallocated::cast(Output())->HasFixedPolicy() ||
- !LUnallocated::cast(Output())->HasRegisterPolicy());
- for (UseIterator it(this); it.HasNext(); it.Advance()) {
- LOperand* operand = it.Next();
- ASSERT(LUnallocated::cast(operand)->HasFixedPolicy() ||
- !LUnallocated::cast(operand)->HasRegisterPolicy());
- }
- for (TempIterator it(this); it.HasNext(); it.Advance()) {
- LOperand* operand = it.Next();
- ASSERT(LUnallocated::cast(operand)->HasFixedPolicy() ||
- !LUnallocated::cast(operand)->HasRegisterPolicy());
- }
-}
-#endif
-
-
-void LOsrEntry::MarkSpilledDoubleRegister(int allocation_index,
- LOperand* spill_operand) {
- ASSERT(spill_operand->IsDoubleStackSlot());
- ASSERT(double_register_spills_[allocation_index] == NULL);
- double_register_spills_[allocation_index] = spill_operand;
-}
-
-
-void LInstruction::PrintTo(StringStream* stream) {
- stream->Add("%s ", this->Mnemonic());
-
- PrintOutputOperandTo(stream);
-
- PrintDataTo(stream);
-
- if (HasEnvironment()) {
- stream->Add(" ");
- environment()->PrintTo(stream);
- }
-
- if (HasPointerMap()) {
- stream->Add(" ");
- pointer_map()->PrintTo(stream);
- }
-}
-
-
-template<int R, int I, int T>
-void LTemplateInstruction<R, I, T>::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- inputs_.PrintOperandsTo(stream);
-}
-
-
-template<int R, int I, int T>
-void LTemplateInstruction<R, I, T>::PrintOutputOperandTo(StringStream* stream) {
- results_.PrintOperandsTo(stream);
-}
-
-
-template<typename T, int N>
-void OperandContainer<T, N>::PrintOperandsTo(StringStream* stream) {
- for (int i = 0; i < N; i++) {
- if (i > 0) stream->Add(" ");
- elems_[i]->PrintTo(stream);
- }
-}
-
-
-void LLabel::PrintDataTo(StringStream* stream) {
- LGap::PrintDataTo(stream);
- LLabel* rep = replacement();
- if (rep != NULL) {
- stream->Add(" Dead block replaced with B%d", rep->block_id());
- }
-}
-
-
-bool LGap::IsRedundant() const {
- for (int i = 0; i < 4; i++) {
- if (parallel_moves_[i] != NULL && !parallel_moves_[i]->IsRedundant()) {
- return false;
- }
- }
-
- return true;
-}
-
-
-void LGap::PrintDataTo(StringStream* stream) const {
- for (int i = 0; i < 4; i++) {
- stream->Add("(");
- if (parallel_moves_[i] != NULL) {
- parallel_moves_[i]->PrintDataTo(stream);
- }
- stream->Add(") ");
- }
-}
-
-
-const char* LArithmeticD::Mnemonic() const {
- switch (op()) {
- case Token::ADD: return "add-d";
- case Token::SUB: return "sub-d";
- case Token::MUL: return "mul-d";
- case Token::DIV: return "div-d";
- case Token::MOD: return "mod-d";
- default:
- UNREACHABLE();
- return NULL;
- }
-}
-
-
-const char* LArithmeticT::Mnemonic() const {
- switch (op()) {
- case Token::ADD: return "add-t";
- case Token::SUB: return "sub-t";
- case Token::MUL: return "mul-t";
- case Token::MOD: return "mod-t";
- case Token::DIV: return "div-t";
- case Token::BIT_AND: return "bit-and-t";
- case Token::BIT_OR: return "bit-or-t";
- case Token::BIT_XOR: return "bit-xor-t";
- case Token::SHL: return "shl-t";
- case Token::SAR: return "sar-t";
- case Token::SHR: return "shr-t";
- default:
- UNREACHABLE();
- return NULL;
- }
-}
-
-
-void LGoto::PrintDataTo(StringStream* stream) {
- stream->Add("B%d", block_id());
-}
-
-
-void LBranch::PrintDataTo(StringStream* stream) {
- stream->Add("B%d | B%d on ", true_block_id(), false_block_id());
- InputAt(0)->PrintTo(stream);
-}
-
-
-void LCmpIDAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if ");
- InputAt(0)->PrintTo(stream);
- stream->Add(" %s ", Token::String(op()));
- InputAt(1)->PrintTo(stream);
- stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LIsNullAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if ");
- InputAt(0)->PrintTo(stream);
- stream->Add(is_strict() ? " === null" : " == null");
- stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LIsObjectAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if is_object(");
- InputAt(0)->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LIsSmiAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if is_smi(");
- InputAt(0)->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if has_instance_type(");
- InputAt(0)->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if has_cached_array_index(");
- InputAt(0)->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if class_of_test(");
- InputAt(0)->PrintTo(stream);
- stream->Add(", \"%o\") then B%d else B%d",
- *hydrogen()->class_name(),
- true_block_id(),
- false_block_id());
-}
-
-
-void LTypeofIs::PrintDataTo(StringStream* stream) {
- InputAt(0)->PrintTo(stream);
- stream->Add(" == \"%s\"", *hydrogen()->type_literal()->ToCString());
-}
-
-
-void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if typeof ");
- InputAt(0)->PrintTo(stream);
- stream->Add(" == \"%s\" then B%d else B%d",
- *hydrogen()->type_literal()->ToCString(),
- true_block_id(), false_block_id());
-}
-
-
-void LCallConstantFunction::PrintDataTo(StringStream* stream) {
- stream->Add("#%d / ", arity());
-}
-
-
-void LUnaryMathOperation::PrintDataTo(StringStream* stream) {
- stream->Add("/%s ", hydrogen()->OpName());
- InputAt(0)->PrintTo(stream);
-}
-
-
-void LLoadContextSlot::PrintDataTo(StringStream* stream) {
- InputAt(0)->PrintTo(stream);
- stream->Add("[%d]", slot_index());
-}
-
-
-void LStoreContextSlot::PrintDataTo(StringStream* stream) {
- InputAt(0)->PrintTo(stream);
- stream->Add("[%d] <- ", slot_index());
- InputAt(1)->PrintTo(stream);
-}
-
-
-void LCallKeyed::PrintDataTo(StringStream* stream) {
- stream->Add("[r2] #%d / ", arity());
-}
-
-
-void LCallNamed::PrintDataTo(StringStream* stream) {
- SmartPointer<char> name_string = name()->ToCString();
- stream->Add("%s #%d / ", *name_string, arity());
-}
-
-
-void LCallGlobal::PrintDataTo(StringStream* stream) {
- SmartPointer<char> name_string = name()->ToCString();
- stream->Add("%s #%d / ", *name_string, arity());
-}
-
-
-void LCallKnownGlobal::PrintDataTo(StringStream* stream) {
- stream->Add("#%d / ", arity());
-}
-
-
-void LCallNew::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- InputAt(0)->PrintTo(stream);
- stream->Add(" #%d / ", arity());
-}
-
-
-void LClassOfTest::PrintDataTo(StringStream* stream) {
- stream->Add("= class_of_test(");
- InputAt(0)->PrintTo(stream);
- stream->Add(", \"%o\")", *hydrogen()->class_name());
-}
-
-
-void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
- arguments()->PrintTo(stream);
-
- stream->Add(" length ");
- length()->PrintTo(stream);
-
- stream->Add(" index ");
- index()->PrintTo(stream);
-}
-
-
-void LStoreNamedField::PrintDataTo(StringStream* stream) {
- object()->PrintTo(stream);
- stream->Add(".");
- stream->Add(*String::cast(*name())->ToCString());
- stream->Add(" <- ");
- value()->PrintTo(stream);
-}
-
-
-void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
- object()->PrintTo(stream);
- stream->Add(".");
- stream->Add(*String::cast(*name())->ToCString());
- stream->Add(" <- ");
- value()->PrintTo(stream);
-}
-
-
-void LStoreKeyedFastElement::PrintDataTo(StringStream* stream) {
- object()->PrintTo(stream);
- stream->Add("[");
- key()->PrintTo(stream);
- stream->Add("] <- ");
- value()->PrintTo(stream);
-}
-
-
-void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
- object()->PrintTo(stream);
- stream->Add("[");
- key()->PrintTo(stream);
- stream->Add("] <- ");
- value()->PrintTo(stream);
-}
-
-
-LChunk::LChunk(CompilationInfo* info, HGraph* graph)
- : spill_slot_count_(0),
- info_(info),
- graph_(graph),
- instructions_(32),
- pointer_maps_(8),
- inlined_closures_(1) {
-}
-
-
-int LChunk::GetNextSpillIndex(bool is_double) {
- // Skip a slot if for a double-width slot.
- if (is_double) spill_slot_count_++;
- return spill_slot_count_++;
-}
-
-
-LOperand* LChunk::GetNextSpillSlot(bool is_double) {
- int index = GetNextSpillIndex(is_double);
- if (is_double) {
- return LDoubleStackSlot::Create(index);
- } else {
- return LStackSlot::Create(index);
- }
-}
-
-
-void LChunk::MarkEmptyBlocks() {
- HPhase phase("Mark empty blocks", this);
- for (int i = 0; i < graph()->blocks()->length(); ++i) {
- HBasicBlock* block = graph()->blocks()->at(i);
- int first = block->first_instruction_index();
- int last = block->last_instruction_index();
- LInstruction* first_instr = instructions()->at(first);
- LInstruction* last_instr = instructions()->at(last);
-
- LLabel* label = LLabel::cast(first_instr);
- if (last_instr->IsGoto()) {
- LGoto* goto_instr = LGoto::cast(last_instr);
- if (!goto_instr->include_stack_check() &&
- label->IsRedundant() &&
- !label->is_loop_header()) {
- bool can_eliminate = true;
- for (int i = first + 1; i < last && can_eliminate; ++i) {
- LInstruction* cur = instructions()->at(i);
- if (cur->IsGap()) {
- LGap* gap = LGap::cast(cur);
- if (!gap->IsRedundant()) {
- can_eliminate = false;
- }
- } else {
- can_eliminate = false;
- }
- }
-
- if (can_eliminate) {
- label->set_replacement(GetLabel(goto_instr->block_id()));
- }
- }
- }
- }
-}
-
-
-void LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
- LGap* gap = new LGap(block);
- int index = -1;
- if (instr->IsControl()) {
- instructions_.Add(gap);
- index = instructions_.length();
- instructions_.Add(instr);
- } else {
- index = instructions_.length();
- instructions_.Add(instr);
- instructions_.Add(gap);
- }
- if (instr->HasPointerMap()) {
- pointer_maps_.Add(instr->pointer_map());
- instr->pointer_map()->set_lithium_position(index);
- }
-}
-
-
-LConstantOperand* LChunk::DefineConstantOperand(HConstant* constant) {
- return LConstantOperand::Create(constant->id());
-}
-
-
-int LChunk::GetParameterStackSlot(int index) const {
- // The receiver is at index 0, the first parameter at index 1, so we
- // shift all parameter indexes down by the number of parameters, and
- // make sure they end up negative so they are distinguishable from
- // spill slots.
- int result = index - info()->scope()->num_parameters() - 1;
- ASSERT(result < 0);
- return result;
-}
-
-// A parameter relative to ebp in the arguments stub.
-int LChunk::ParameterAt(int index) {
- ASSERT(-1 <= index); // -1 is the receiver.
- return (1 + info()->scope()->num_parameters() - index) *
- kPointerSize;
-}
-
-
-LGap* LChunk::GetGapAt(int index) const {
- return LGap::cast(instructions_[index]);
-}
-
-
-bool LChunk::IsGapAt(int index) const {
- return instructions_[index]->IsGap();
-}
-
-
-int LChunk::NearestGapPos(int index) const {
- while (!IsGapAt(index)) index--;
- return index;
-}
-
-
-void LChunk::AddGapMove(int index, LOperand* from, LOperand* to) {
- GetGapAt(index)->GetOrCreateParallelMove(LGap::START)->AddMove(from, to);
-}
-
-
-Handle<Object> LChunk::LookupLiteral(LConstantOperand* operand) const {
- return HConstant::cast(graph_->LookupValue(operand->index()))->handle();
-}
-
-
-Representation LChunk::LookupLiteralRepresentation(
- LConstantOperand* operand) const {
- return graph_->LookupValue(operand->index())->representation();
-}
-
-
-LChunk* LChunkBuilder::Build() {
- ASSERT(is_unused());
- chunk_ = new LChunk(info(), graph());
- HPhase phase("Building chunk", chunk_);
- status_ = BUILDING;
- const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
- for (int i = 0; i < blocks->length(); i++) {
- HBasicBlock* next = NULL;
- if (i < blocks->length() - 1) next = blocks->at(i + 1);
- DoBasicBlock(blocks->at(i), next);
- if (is_aborted()) return NULL;
- }
- status_ = DONE;
- return chunk_;
-}
-
-
-void LChunkBuilder::Abort(const char* format, ...) {
- if (FLAG_trace_bailout) {
- SmartPointer<char> name(info()->shared_info()->DebugName()->ToCString());
- PrintF("Aborting LChunk building in @\"%s\": ", *name);
- va_list arguments;
- va_start(arguments, format);
- OS::VPrint(format, arguments);
- va_end(arguments);
- PrintF("\n");
- }
- status_ = ABORTED;
-}
-
-
-LRegister* LChunkBuilder::ToOperand(Register reg) {
- return LRegister::Create(Register::ToAllocationIndex(reg));
-}
-
-
-LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
- return new LUnallocated(LUnallocated::FIXED_REGISTER,
- Register::ToAllocationIndex(reg));
-}
-
-
-LUnallocated* LChunkBuilder::ToUnallocated(DoubleRegister reg) {
- return new LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER,
- DoubleRegister::ToAllocationIndex(reg));
-}
-
-
-LOperand* LChunkBuilder::UseFixed(HValue* value, Register fixed_register) {
- return Use(value, ToUnallocated(fixed_register));
-}
-
-
-LOperand* LChunkBuilder::UseFixedDouble(HValue* value, DoubleRegister reg) {
- return Use(value, ToUnallocated(reg));
-}
-
-
-LOperand* LChunkBuilder::UseRegister(HValue* value) {
- return Use(value, new LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
-}
-
-
-LOperand* LChunkBuilder::UseRegisterAtStart(HValue* value) {
- return Use(value,
- new LUnallocated(LUnallocated::MUST_HAVE_REGISTER,
- LUnallocated::USED_AT_START));
-}
-
-
-LOperand* LChunkBuilder::UseTempRegister(HValue* value) {
- return Use(value, new LUnallocated(LUnallocated::WRITABLE_REGISTER));
-}
-
-
-LOperand* LChunkBuilder::Use(HValue* value) {
- return Use(value, new LUnallocated(LUnallocated::NONE));
-}
-
-
-LOperand* LChunkBuilder::UseAtStart(HValue* value) {
- return Use(value, new LUnallocated(LUnallocated::NONE,
- LUnallocated::USED_AT_START));
-}
-
-
-LOperand* LChunkBuilder::UseOrConstant(HValue* value) {
- return value->IsConstant()
- ? chunk_->DefineConstantOperand(HConstant::cast(value))
- : Use(value);
-}
-
-
-LOperand* LChunkBuilder::UseOrConstantAtStart(HValue* value) {
- return value->IsConstant()
- ? chunk_->DefineConstantOperand(HConstant::cast(value))
- : UseAtStart(value);
-}
-
-
-LOperand* LChunkBuilder::UseRegisterOrConstant(HValue* value) {
- return value->IsConstant()
- ? chunk_->DefineConstantOperand(HConstant::cast(value))
- : UseRegister(value);
-}
-
-
-LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) {
- return value->IsConstant()
- ? chunk_->DefineConstantOperand(HConstant::cast(value))
- : UseRegisterAtStart(value);
-}
-
-
-LOperand* LChunkBuilder::UseAny(HValue* value) {
- return value->IsConstant()
- ? chunk_->DefineConstantOperand(HConstant::cast(value))
- : Use(value, new LUnallocated(LUnallocated::ANY));
-}
-
-
-LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) {
- if (value->EmitAtUses()) {
- HInstruction* instr = HInstruction::cast(value);
- VisitInstruction(instr);
- }
- allocator_->RecordUse(value, operand);
- return operand;
-}
-
-
-template<int I, int T>
-LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr,
- LUnallocated* result) {
- allocator_->RecordDefinition(current_instruction_, result);
- instr->set_result(result);
- return instr;
-}
-
-
-template<int I, int T>
-LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr) {
- return Define(instr, new LUnallocated(LUnallocated::NONE));
-}
-
-
-template<int I, int T>
-LInstruction* LChunkBuilder::DefineAsRegister(
- LTemplateInstruction<1, I, T>* instr) {
- return Define(instr, new LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
-}
-
-
-template<int I, int T>
-LInstruction* LChunkBuilder::DefineAsSpilled(
- LTemplateInstruction<1, I, T>* instr, int index) {
- return Define(instr, new LUnallocated(LUnallocated::FIXED_SLOT, index));
-}
-
-
-template<int I, int T>
-LInstruction* LChunkBuilder::DefineSameAsFirst(
- LTemplateInstruction<1, I, T>* instr) {
- return Define(instr, new LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
-}
-
-
-template<int I, int T>
-LInstruction* LChunkBuilder::DefineFixed(
- LTemplateInstruction<1, I, T>* instr, Register reg) {
- return Define(instr, ToUnallocated(reg));
-}
-
-
-template<int I, int T>
-LInstruction* LChunkBuilder::DefineFixedDouble(
- LTemplateInstruction<1, I, T>* instr, DoubleRegister reg) {
- return Define(instr, ToUnallocated(reg));
-}
-
-
-LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
- HEnvironment* hydrogen_env = current_block_->last_environment();
- instr->set_environment(CreateEnvironment(hydrogen_env));
- return instr;
-}
-
-
-LInstruction* LChunkBuilder::SetInstructionPendingDeoptimizationEnvironment(
- LInstruction* instr, int ast_id) {
- ASSERT(instruction_pending_deoptimization_environment_ == NULL);
- ASSERT(pending_deoptimization_ast_id_ == AstNode::kNoNumber);
- instruction_pending_deoptimization_environment_ = instr;
- pending_deoptimization_ast_id_ = ast_id;
- return instr;
-}
-
-
-void LChunkBuilder::ClearInstructionPendingDeoptimizationEnvironment() {
- instruction_pending_deoptimization_environment_ = NULL;
- pending_deoptimization_ast_id_ = AstNode::kNoNumber;
-}
-
-
-LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
- HInstruction* hinstr,
- CanDeoptimize can_deoptimize) {
-#ifdef DEBUG
- instr->VerifyCall();
-#endif
- instr->MarkAsCall();
- instr = AssignPointerMap(instr);
-
- if (hinstr->HasSideEffects()) {
- ASSERT(hinstr->next()->IsSimulate());
- HSimulate* sim = HSimulate::cast(hinstr->next());
- instr = SetInstructionPendingDeoptimizationEnvironment(
- instr, sim->ast_id());
- }
-
- // If instruction does not have side-effects lazy deoptimization
- // after the call will try to deoptimize to the point before the call.
- // Thus we still need to attach environment to this call even if
- // call sequence can not deoptimize eagerly.
- bool needs_environment =
- (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) || !hinstr->HasSideEffects();
- if (needs_environment && !instr->HasEnvironment()) {
- instr = AssignEnvironment(instr);
- }
-
- return instr;
-}
-
-
-LInstruction* LChunkBuilder::MarkAsSaveDoubles(LInstruction* instr) {
- instr->MarkAsSaveDoubles();
- return instr;
-}
-
-
-LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
- ASSERT(!instr->HasPointerMap());
- instr->set_pointer_map(new LPointerMap(position_));
- return instr;
-}
-
-
-LUnallocated* LChunkBuilder::TempRegister() {
- LUnallocated* operand = new LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
- allocator_->RecordTemporary(operand);
- return operand;
-}
-
-
-LOperand* LChunkBuilder::FixedTemp(Register reg) {
- LUnallocated* operand = ToUnallocated(reg);
- allocator_->RecordTemporary(operand);
- return operand;
-}
-
-
-LOperand* LChunkBuilder::FixedTemp(DoubleRegister reg) {
- LUnallocated* operand = ToUnallocated(reg);
- allocator_->RecordTemporary(operand);
- return operand;
-}
-
-
-LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) {
- return new LLabel(instr->block());
-}
-
-
-LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
- return AssignEnvironment(new LDeoptimize);
-}
-
-
-LInstruction* LChunkBuilder::DoBit(Token::Value op,
- HBitwiseBinaryOperation* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
-
- LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
- LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
- return DefineSameAsFirst(new LBitI(op, left, right));
- } else {
- ASSERT(instr->representation().IsTagged());
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
-
- LOperand* left = UseFixed(instr->left(), r1);
- LOperand* right = UseFixed(instr->right(), r0);
- LArithmeticT* result = new LArithmeticT(op, left, right);
- return MarkAsCall(DefineFixed(result, r0), instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoShift(Token::Value op,
- HBitwiseBinaryOperation* instr) {
- if (instr->representation().IsTagged()) {
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
-
- LOperand* left = UseFixed(instr->left(), r1);
- LOperand* right = UseFixed(instr->right(), r0);
- LArithmeticT* result = new LArithmeticT(op, left, right);
- return MarkAsCall(DefineFixed(result, r0), instr);
- }
-
- ASSERT(instr->representation().IsInteger32());
- ASSERT(instr->OperandAt(0)->representation().IsInteger32());
- ASSERT(instr->OperandAt(1)->representation().IsInteger32());
- LOperand* left = UseRegisterAtStart(instr->OperandAt(0));
-
- HValue* right_value = instr->OperandAt(1);
- LOperand* right = NULL;
- int constant_value = 0;
- if (right_value->IsConstant()) {
- HConstant* constant = HConstant::cast(right_value);
- right = chunk_->DefineConstantOperand(constant);
- constant_value = constant->Integer32Value() & 0x1f;
- } else {
- right = UseRegister(right_value);
- }
-
- // Shift operations can only deoptimize if we do a logical shift
- // by 0 and the result cannot be truncated to int32.
- bool can_deopt = (op == Token::SHR && constant_value == 0);
- if (can_deopt) {
- bool can_truncate = true;
- for (int i = 0; i < instr->uses()->length(); i++) {
- if (!instr->uses()->at(i)->CheckFlag(HValue::kTruncatingToInt32)) {
- can_truncate = false;
- break;
- }
- }
- can_deopt = !can_truncate;
- }
-
- LInstruction* result =
- DefineSameAsFirst(new LShiftI(op, left, right, can_deopt));
- if (can_deopt) AssignEnvironment(result);
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
- HArithmeticBinaryOperation* instr) {
- ASSERT(instr->representation().IsDouble());
- ASSERT(instr->left()->representation().IsDouble());
- ASSERT(instr->right()->representation().IsDouble());
- ASSERT(op != Token::MOD);
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseRegisterAtStart(instr->right());
- LArithmeticD* result = new LArithmeticD(op, left, right);
- return DefineSameAsFirst(result);
-}
-
-
-LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
- HArithmeticBinaryOperation* instr) {
- ASSERT(op == Token::ADD ||
- op == Token::DIV ||
- op == Token::MOD ||
- op == Token::MUL ||
- op == Token::SUB);
- HValue* left = instr->left();
- HValue* right = instr->right();
- ASSERT(left->representation().IsTagged());
- ASSERT(right->representation().IsTagged());
- LOperand* left_operand = UseFixed(left, r1);
- LOperand* right_operand = UseFixed(right, r0);
- LArithmeticT* result = new LArithmeticT(op, left_operand, right_operand);
- return MarkAsCall(DefineFixed(result, r0), instr);
-}
-
-
-void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
- ASSERT(is_building());
- current_block_ = block;
- next_block_ = next_block;
- if (block->IsStartBlock()) {
- block->UpdateEnvironment(graph_->start_environment());
- argument_count_ = 0;
- } else if (block->predecessors()->length() == 1) {
- // We have a single predecessor => copy environment and outgoing
- // argument count from the predecessor.
- ASSERT(block->phis()->length() == 0);
- HBasicBlock* pred = block->predecessors()->at(0);
- HEnvironment* last_environment = pred->last_environment();
- ASSERT(last_environment != NULL);
- // Only copy the environment, if it is later used again.
- if (pred->end()->SecondSuccessor() == NULL) {
- ASSERT(pred->end()->FirstSuccessor() == block);
- } else {
- if (pred->end()->FirstSuccessor()->block_id() > block->block_id() ||
- pred->end()->SecondSuccessor()->block_id() > block->block_id()) {
- last_environment = last_environment->Copy();
- }
- }
- block->UpdateEnvironment(last_environment);
- ASSERT(pred->argument_count() >= 0);
- argument_count_ = pred->argument_count();
- } else {
- // We are at a state join => process phis.
- HBasicBlock* pred = block->predecessors()->at(0);
- // No need to copy the environment, it cannot be used later.
- HEnvironment* last_environment = pred->last_environment();
- for (int i = 0; i < block->phis()->length(); ++i) {
- HPhi* phi = block->phis()->at(i);
- last_environment->SetValueAt(phi->merged_index(), phi);
- }
- for (int i = 0; i < block->deleted_phis()->length(); ++i) {
- last_environment->SetValueAt(block->deleted_phis()->at(i),
- graph_->GetConstantUndefined());
- }
- block->UpdateEnvironment(last_environment);
- // Pick up the outgoing argument count of one of the predecessors.
- argument_count_ = pred->argument_count();
- }
- HInstruction* current = block->first();
- int start = chunk_->instructions()->length();
- while (current != NULL && !is_aborted()) {
- // Code for constants in registers is generated lazily.
- if (!current->EmitAtUses()) {
- VisitInstruction(current);
- }
- current = current->next();
- }
- int end = chunk_->instructions()->length() - 1;
- if (end >= start) {
- block->set_first_instruction_index(start);
- block->set_last_instruction_index(end);
- }
- block->set_argument_count(argument_count_);
- next_block_ = NULL;
- current_block_ = NULL;
-}
-
-
-void LChunkBuilder::VisitInstruction(HInstruction* current) {
- HInstruction* old_current = current_instruction_;
- current_instruction_ = current;
- if (current->has_position()) position_ = current->position();
- LInstruction* instr = current->CompileToLithium(this);
-
- if (instr != NULL) {
- if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
- instr = AssignPointerMap(instr);
- }
- if (FLAG_stress_environments && !instr->HasEnvironment()) {
- instr = AssignEnvironment(instr);
- }
- if (current->IsTest() && !instr->IsGoto()) {
- ASSERT(instr->IsControl());
- HTest* test = HTest::cast(current);
- instr->set_hydrogen_value(test->value());
- HBasicBlock* first = test->FirstSuccessor();
- HBasicBlock* second = test->SecondSuccessor();
- ASSERT(first != NULL && second != NULL);
- instr->SetBranchTargets(first->block_id(), second->block_id());
- } else {
- instr->set_hydrogen_value(current);
- }
-
- chunk_->AddInstruction(instr, current_block_);
- }
- current_instruction_ = old_current;
-}
-
-
-LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) {
- if (hydrogen_env == NULL) return NULL;
-
- LEnvironment* outer = CreateEnvironment(hydrogen_env->outer());
- int ast_id = hydrogen_env->ast_id();
- ASSERT(ast_id != AstNode::kNoNumber);
- int value_count = hydrogen_env->length();
- LEnvironment* result = new LEnvironment(hydrogen_env->closure(),
- ast_id,
- hydrogen_env->parameter_count(),
- argument_count_,
- value_count,
- outer);
- int argument_index = 0;
- for (int i = 0; i < value_count; ++i) {
- HValue* value = hydrogen_env->values()->at(i);
- LOperand* op = NULL;
- if (value->IsArgumentsObject()) {
- op = NULL;
- } else if (value->IsPushArgument()) {
- op = new LArgument(argument_index++);
- } else {
- op = UseAny(value);
- }
- result->AddValue(op, value->representation());
- }
-
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
- LInstruction* result = new LGoto(instr->FirstSuccessor()->block_id(),
- instr->include_stack_check());
- if (instr->include_stack_check()) result = AssignPointerMap(result);
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoTest(HTest* instr) {
- HValue* v = instr->value();
- if (v->EmitAtUses()) {
- if (v->IsClassOfTest()) {
- HClassOfTest* compare = HClassOfTest::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
-
- return new LClassOfTestAndBranch(UseTempRegister(compare->value()),
- TempRegister());
- } else if (v->IsCompare()) {
- HCompare* compare = HCompare::cast(v);
- Token::Value op = compare->token();
- HValue* left = compare->left();
- HValue* right = compare->right();
- Representation r = compare->GetInputRepresentation();
- if (r.IsInteger32()) {
- ASSERT(left->representation().IsInteger32());
- ASSERT(right->representation().IsInteger32());
- return new LCmpIDAndBranch(UseRegisterAtStart(left),
- UseRegisterAtStart(right));
- } else if (r.IsDouble()) {
- ASSERT(left->representation().IsDouble());
- ASSERT(right->representation().IsDouble());
- return new LCmpIDAndBranch(UseRegisterAtStart(left),
- UseRegisterAtStart(right));
- } else {
- ASSERT(left->representation().IsTagged());
- ASSERT(right->representation().IsTagged());
- bool reversed = op == Token::GT || op == Token::LTE;
- LOperand* left_operand = UseFixed(left, reversed ? r0 : r1);
- LOperand* right_operand = UseFixed(right, reversed ? r1 : r0);
- LInstruction* result = new LCmpTAndBranch(left_operand,
- right_operand);
- return MarkAsCall(result, instr);
- }
- } else if (v->IsIsSmi()) {
- HIsSmi* compare = HIsSmi::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
-
- return new LIsSmiAndBranch(Use(compare->value()));
- } else if (v->IsHasInstanceType()) {
- HHasInstanceType* compare = HHasInstanceType::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
- return new LHasInstanceTypeAndBranch(
- UseRegisterAtStart(compare->value()));
- } else if (v->IsHasCachedArrayIndex()) {
- HHasCachedArrayIndex* compare = HHasCachedArrayIndex::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
-
- return new LHasCachedArrayIndexAndBranch(
- UseRegisterAtStart(compare->value()));
- } else if (v->IsIsNull()) {
- HIsNull* compare = HIsNull::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
-
- return new LIsNullAndBranch(UseRegisterAtStart(compare->value()));
- } else if (v->IsIsObject()) {
- HIsObject* compare = HIsObject::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
-
- LOperand* temp = TempRegister();
- return new LIsObjectAndBranch(UseRegisterAtStart(compare->value()), temp);
- } else if (v->IsCompareJSObjectEq()) {
- HCompareJSObjectEq* compare = HCompareJSObjectEq::cast(v);
- return new LCmpJSObjectEqAndBranch(UseRegisterAtStart(compare->left()),
- UseRegisterAtStart(compare->right()));
- } else if (v->IsInstanceOf()) {
- HInstanceOf* instance_of = HInstanceOf::cast(v);
- LInstruction* result =
- new LInstanceOfAndBranch(UseFixed(instance_of->left(), r0),
- UseFixed(instance_of->right(), r1));
- return MarkAsCall(result, instr);
- } else if (v->IsTypeofIs()) {
- HTypeofIs* typeof_is = HTypeofIs::cast(v);
- return new LTypeofIsAndBranch(UseTempRegister(typeof_is->value()));
- } else if (v->IsIsConstructCall()) {
- return new LIsConstructCallAndBranch(TempRegister());
- } else {
- if (v->IsConstant()) {
- if (HConstant::cast(v)->handle()->IsTrue()) {
- return new LGoto(instr->FirstSuccessor()->block_id());
- } else if (HConstant::cast(v)->handle()->IsFalse()) {
- return new LGoto(instr->SecondSuccessor()->block_id());
- }
- }
- Abort("Undefined compare before branch");
- return NULL;
- }
- }
- return new LBranch(UseRegisterAtStart(v));
-}
-
-
-LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
- LOperand* temp = TempRegister();
- return new LCmpMapAndBranch(value, temp);
-}
-
-
-LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* length) {
- return DefineAsRegister(new LArgumentsLength(UseRegister(length->value())));
-}
-
-
-LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
- return DefineAsRegister(new LArgumentsElements);
-}
-
-
-LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
- LInstanceOf* result =
- new LInstanceOf(UseFixed(instr->left(), r0),
- UseFixed(instr->right(), r1));
- return MarkAsCall(DefineFixed(result, r0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
- HInstanceOfKnownGlobal* instr) {
- LInstanceOfKnownGlobal* result =
- new LInstanceOfKnownGlobal(UseFixed(instr->value(), r0), FixedTemp(r4));
- return MarkAsCall(DefineFixed(result, r0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
- LOperand* function = UseFixed(instr->function(), r1);
- LOperand* receiver = UseFixed(instr->receiver(), r0);
- LOperand* length = UseFixed(instr->length(), r2);
- LOperand* elements = UseFixed(instr->elements(), r3);
- LApplyArguments* result = new LApplyArguments(function,
- receiver,
- length,
- elements);
- return MarkAsCall(DefineFixed(result, r0), instr, CAN_DEOPTIMIZE_EAGERLY);
-}
-
-
-LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
- ++argument_count_;
- LOperand* argument = Use(instr->argument());
- return new LPushArgument(argument);
-}
-
-
-LInstruction* LChunkBuilder::DoContext(HContext* instr) {
- return DefineAsRegister(new LContext);
-}
-
-
-LInstruction* LChunkBuilder::DoOuterContext(HOuterContext* instr) {
- LOperand* context = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LOuterContext(context));
-}
-
-
-LInstruction* LChunkBuilder::DoGlobalObject(HGlobalObject* instr) {
- LOperand* context = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LGlobalObject(context));
-}
-
-
-LInstruction* LChunkBuilder::DoGlobalReceiver(HGlobalReceiver* instr) {
- LOperand* global_object = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LGlobalReceiver(global_object));
-}
-
-
-LInstruction* LChunkBuilder::DoCallConstantFunction(
- HCallConstantFunction* instr) {
- argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new LCallConstantFunction, r0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
- BuiltinFunctionId op = instr->op();
- if (op == kMathLog || op == kMathSin || op == kMathCos) {
- LOperand* input = UseFixedDouble(instr->value(), d2);
- LUnaryMathOperation* result = new LUnaryMathOperation(input, NULL);
- return MarkAsCall(DefineFixedDouble(result, d2), instr);
- } else {
- LOperand* input = UseRegisterAtStart(instr->value());
- LOperand* temp = (op == kMathFloor) ? TempRegister() : NULL;
- LUnaryMathOperation* result = new LUnaryMathOperation(input, temp);
- switch (op) {
- case kMathAbs:
- return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
- case kMathFloor:
- return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
- case kMathSqrt:
- return DefineSameAsFirst(result);
- case kMathRound:
- return AssignEnvironment(DefineAsRegister(result));
- case kMathPowHalf:
- return DefineSameAsFirst(result);
- default:
- UNREACHABLE();
- return NULL;
- }
- }
-}
-
-
-LInstruction* LChunkBuilder::DoCallKeyed(HCallKeyed* instr) {
- ASSERT(instr->key()->representation().IsTagged());
- argument_count_ -= instr->argument_count();
- LOperand* key = UseFixed(instr->key(), r2);
- return MarkAsCall(DefineFixed(new LCallKeyed(key), r0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallNamed(HCallNamed* instr) {
- argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new LCallNamed, r0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallGlobal(HCallGlobal* instr) {
- argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new LCallGlobal, r0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallKnownGlobal(HCallKnownGlobal* instr) {
- argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new LCallKnownGlobal, r0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
- LOperand* constructor = UseFixed(instr->constructor(), r1);
- argument_count_ -= instr->argument_count();
- LCallNew* result = new LCallNew(constructor);
- return MarkAsCall(DefineFixed(result, r0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
- argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new LCallFunction, r0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
- argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new LCallRuntime, r0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoShr(HShr* instr) {
- return DoShift(Token::SHR, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoSar(HSar* instr) {
- return DoShift(Token::SAR, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoShl(HShl* instr) {
- return DoShift(Token::SHL, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoBitAnd(HBitAnd* instr) {
- return DoBit(Token::BIT_AND, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoBitNot(HBitNot* instr) {
- ASSERT(instr->value()->representation().IsInteger32());
- ASSERT(instr->representation().IsInteger32());
- return DefineSameAsFirst(new LBitNotI(UseRegisterAtStart(instr->value())));
-}
-
-
-LInstruction* LChunkBuilder::DoBitOr(HBitOr* instr) {
- return DoBit(Token::BIT_OR, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoBitXor(HBitXor* instr) {
- return DoBit(Token::BIT_XOR, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
- if (instr->representation().IsDouble()) {
- return DoArithmeticD(Token::DIV, instr);
- } else if (instr->representation().IsInteger32()) {
- // TODO(1042) The fixed register allocation
- // is needed because we call GenericBinaryOpStub from
- // the generated code, which requires registers r0
- // and r1 to be used. We should remove that
- // when we provide a native implementation.
- LOperand* dividend = UseFixed(instr->left(), r0);
- LOperand* divisor = UseFixed(instr->right(), r1);
- return AssignEnvironment(AssignPointerMap(
- DefineFixed(new LDivI(dividend, divisor), r0)));
- } else {
- return DoArithmeticT(Token::DIV, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoMod(HMod* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
-
- LModI* mod;
- if (instr->HasPowerOf2Divisor()) {
- ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero));
- LOperand* value = UseRegisterAtStart(instr->left());
- mod = new LModI(value, UseOrConstant(instr->right()));
- } else {
- LOperand* dividend = UseRegister(instr->left());
- LOperand* divisor = UseRegisterAtStart(instr->right());
- mod = new LModI(dividend,
- divisor,
- TempRegister(),
- FixedTemp(d1),
- FixedTemp(d2));
- }
-
- return AssignEnvironment(DefineSameAsFirst(mod));
- } else if (instr->representation().IsTagged()) {
- return DoArithmeticT(Token::MOD, instr);
- } else {
- ASSERT(instr->representation().IsDouble());
- // We call a C function for double modulo. It can't trigger a GC.
- // We need to use fixed result register for the call.
- // TODO(fschneider): Allow any register as input registers.
- LOperand* left = UseFixedDouble(instr->left(), d1);
- LOperand* right = UseFixedDouble(instr->right(), d2);
- LArithmeticD* result = new LArithmeticD(Token::MOD, left, right);
- return MarkAsCall(DefineFixedDouble(result, d1), instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoMul(HMul* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
- LOperand* right = UseOrConstant(instr->MostConstantOperand());
- LOperand* temp = NULL;
- if (instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
- temp = TempRegister();
- }
- LMulI* mul = new LMulI(left, right, temp);
- return AssignEnvironment(DefineSameAsFirst(mul));
- } else if (instr->representation().IsDouble()) {
- return DoArithmeticD(Token::MUL, instr);
- } else {
- return DoArithmeticT(Token::MUL, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoSub(HSub* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseOrConstantAtStart(instr->right());
- LSubI* sub = new LSubI(left, right);
- LInstruction* result = DefineSameAsFirst(sub);
- if (instr->CheckFlag(HValue::kCanOverflow)) {
- result = AssignEnvironment(result);
- }
- return result;
- } else if (instr->representation().IsDouble()) {
- return DoArithmeticD(Token::SUB, instr);
- } else {
- return DoArithmeticT(Token::SUB, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
- LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
- LAddI* add = new LAddI(left, right);
- LInstruction* result = DefineSameAsFirst(add);
- if (instr->CheckFlag(HValue::kCanOverflow)) {
- result = AssignEnvironment(result);
- }
- return result;
- } else if (instr->representation().IsDouble()) {
- return DoArithmeticD(Token::ADD, instr);
- } else {
- ASSERT(instr->representation().IsTagged());
- return DoArithmeticT(Token::ADD, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoPower(HPower* instr) {
- ASSERT(instr->representation().IsDouble());
- // We call a C function for double power. It can't trigger a GC.
- // We need to use fixed result register for the call.
- Representation exponent_type = instr->right()->representation();
- ASSERT(instr->left()->representation().IsDouble());
- LOperand* left = UseFixedDouble(instr->left(), d1);
- LOperand* right = exponent_type.IsDouble() ?
- UseFixedDouble(instr->right(), d2) :
- UseFixed(instr->right(), r0);
- LPower* result = new LPower(left, right);
- return MarkAsCall(DefineFixedDouble(result, d3),
- instr,
- CAN_DEOPTIMIZE_EAGERLY);
-}
-
-
-LInstruction* LChunkBuilder::DoCompare(HCompare* instr) {
- Token::Value op = instr->token();
- Representation r = instr->GetInputRepresentation();
- if (r.IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseRegisterAtStart(instr->right());
- return DefineAsRegister(new LCmpID(left, right));
- } else if (r.IsDouble()) {
- ASSERT(instr->left()->representation().IsDouble());
- ASSERT(instr->right()->representation().IsDouble());
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseRegisterAtStart(instr->right());
- return DefineAsRegister(new LCmpID(left, right));
- } else {
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
- bool reversed = (op == Token::GT || op == Token::LTE);
- LOperand* left = UseFixed(instr->left(), reversed ? r0 : r1);
- LOperand* right = UseFixed(instr->right(), reversed ? r1 : r0);
- LCmpT* result = new LCmpT(left, right);
- return MarkAsCall(DefineFixed(result, r0), instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoCompareJSObjectEq(
- HCompareJSObjectEq* instr) {
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseRegisterAtStart(instr->right());
- LCmpJSObjectEq* result = new LCmpJSObjectEq(left, right);
- return DefineAsRegister(result);
-}
-
-
-LInstruction* LChunkBuilder::DoIsNull(HIsNull* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
-
- return DefineAsRegister(new LIsNull(value));
-}
-
-
-LInstruction* LChunkBuilder::DoIsObject(HIsObject* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
-
- return DefineAsRegister(new LIsObject(value));
-}
-
-
-LInstruction* LChunkBuilder::DoIsSmi(HIsSmi* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseAtStart(instr->value());
-
- return DefineAsRegister(new LIsSmi(value));
-}
-
-
-LInstruction* LChunkBuilder::DoHasInstanceType(HHasInstanceType* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
-
- return DefineAsRegister(new LHasInstanceType(value));
-}
-
-
-LInstruction* LChunkBuilder::DoGetCachedArrayIndex(
- HGetCachedArrayIndex* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
-
- return DefineAsRegister(new LGetCachedArrayIndex(value));
-}
-
-
-LInstruction* LChunkBuilder::DoHasCachedArrayIndex(
- HHasCachedArrayIndex* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegister(instr->value());
-
- return DefineAsRegister(new LHasCachedArrayIndex(value));
-}
-
-
-LInstruction* LChunkBuilder::DoClassOfTest(HClassOfTest* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseTempRegister(instr->value());
- return DefineSameAsFirst(new LClassOfTest(value));
-}
-
-
-LInstruction* LChunkBuilder::DoJSArrayLength(HJSArrayLength* instr) {
- LOperand* array = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LJSArrayLength(array));
-}
-
-
-LInstruction* LChunkBuilder::DoExternalArrayLength(
- HExternalArrayLength* instr) {
- LOperand* array = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LExternalArrayLength(array));
-}
-
-
-LInstruction* LChunkBuilder::DoFixedArrayLength(HFixedArrayLength* instr) {
- LOperand* array = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LFixedArrayLength(array));
-}
-
-
-LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) {
- LOperand* object = UseRegister(instr->value());
- LValueOf* result = new LValueOf(object, TempRegister());
- return AssignEnvironment(DefineSameAsFirst(result));
-}
-
-
-LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
- return AssignEnvironment(new LBoundsCheck(UseRegisterAtStart(instr->index()),
- UseRegister(instr->length())));
-}
-
-
-LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
- // The control instruction marking the end of a block that completed
- // abruptly (e.g., threw an exception). There is nothing specific to do.
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoThrow(HThrow* instr) {
- LOperand* value = UseFixed(instr->value(), r0);
- return MarkAsCall(new LThrow(value), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoChange(HChange* instr) {
- Representation from = instr->from();
- Representation to = instr->to();
- if (from.IsTagged()) {
- if (to.IsDouble()) {
- LOperand* value = UseRegister(instr->value());
- LNumberUntagD* res = new LNumberUntagD(value);
- return AssignEnvironment(DefineAsRegister(res));
- } else {
- ASSERT(to.IsInteger32());
- LOperand* value = UseRegister(instr->value());
- bool needs_check = !instr->value()->type().IsSmi();
- LInstruction* res = NULL;
- if (!needs_check) {
- res = DefineSameAsFirst(new LSmiUntag(value, needs_check));
- } else {
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = instr->CanTruncateToInt32() ? TempRegister()
- : NULL;
- LOperand* temp3 = instr->CanTruncateToInt32() ? FixedTemp(d3)
- : NULL;
- res = DefineSameAsFirst(new LTaggedToI(value, temp1, temp2, temp3));
- res = AssignEnvironment(res);
- }
- return res;
- }
- } else if (from.IsDouble()) {
- if (to.IsTagged()) {
- LOperand* value = UseRegister(instr->value());
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = TempRegister();
-
- // Make sure that the temp and result_temp registers are
- // different.
- LUnallocated* result_temp = TempRegister();
- LNumberTagD* result = new LNumberTagD(value, temp1, temp2);
- Define(result, result_temp);
- return AssignPointerMap(result);
- } else {
- ASSERT(to.IsInteger32());
- LOperand* value = UseRegister(instr->value());
- LDoubleToI* res =
- new LDoubleToI(value,
- TempRegister(),
- instr->CanTruncateToInt32() ? TempRegister() : NULL);
- return AssignEnvironment(DefineAsRegister(res));
- }
- } else if (from.IsInteger32()) {
- if (to.IsTagged()) {
- HValue* val = instr->value();
- LOperand* value = UseRegister(val);
- if (val->HasRange() && val->range()->IsInSmiRange()) {
- return DefineSameAsFirst(new LSmiTag(value));
- } else {
- LNumberTagI* result = new LNumberTagI(value);
- return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
- }
- } else {
- ASSERT(to.IsDouble());
- LOperand* value = Use(instr->value());
- return DefineAsRegister(new LInteger32ToDouble(value));
- }
- }
- UNREACHABLE();
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoCheckNonSmi(HCheckNonSmi* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- return AssignEnvironment(new LCheckNonSmi(value));
-}
-
-
-LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- LInstruction* result = new LCheckInstanceType(value);
- return AssignEnvironment(result);
-}
-
-
-LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = TempRegister();
- LInstruction* result = new LCheckPrototypeMaps(temp1, temp2);
- return AssignEnvironment(result);
-}
-
-
-LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- return AssignEnvironment(new LCheckSmi(value));
-}
-
-
-LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- return AssignEnvironment(new LCheckFunction(value));
-}
-
-
-LInstruction* LChunkBuilder::DoCheckMap(HCheckMap* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- LInstruction* result = new LCheckMap(value);
- return AssignEnvironment(result);
-}
-
-
-LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
- return new LReturn(UseFixed(instr->value(), r0));
-}
-
-
-LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
- Representation r = instr->representation();
- if (r.IsInteger32()) {
- return DefineAsRegister(new LConstantI);
- } else if (r.IsDouble()) {
- return DefineAsRegister(new LConstantD);
- } else if (r.IsTagged()) {
- return DefineAsRegister(new LConstantT);
- } else {
- UNREACHABLE();
- return NULL;
- }
-}
-
-
-LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
- LLoadGlobalCell* result = new LLoadGlobalCell;
- return instr->check_hole_value()
- ? AssignEnvironment(DefineAsRegister(result))
- : DefineAsRegister(result);
-}
-
-
-LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
- LOperand* global_object = UseFixed(instr->global_object(), r0);
- LLoadGlobalGeneric* result = new LLoadGlobalGeneric(global_object);
- return MarkAsCall(DefineFixed(result, r0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
- if (instr->check_hole_value()) {
- LOperand* temp = TempRegister();
- LOperand* value = UseRegister(instr->value());
- return AssignEnvironment(new LStoreGlobalCell(value, temp));
- } else {
- LOperand* value = UseRegisterAtStart(instr->value());
- return new LStoreGlobalCell(value, NULL);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoStoreGlobalGeneric(HStoreGlobalGeneric* instr) {
- LOperand* global_object = UseFixed(instr->global_object(), r1);
- LOperand* value = UseFixed(instr->value(), r0);
- LStoreGlobalGeneric* result =
- new LStoreGlobalGeneric(global_object, value);
- return MarkAsCall(result, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
- LOperand* context = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LLoadContextSlot(context));
-}
-
-
-LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
- LOperand* context;
- LOperand* value;
- if (instr->NeedsWriteBarrier()) {
- context = UseTempRegister(instr->context());
- value = UseTempRegister(instr->value());
- } else {
- context = UseRegister(instr->context());
- value = UseRegister(instr->value());
- }
- return new LStoreContextSlot(context, value);
-}
-
-
-LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
- return DefineAsRegister(
- new LLoadNamedField(UseRegisterAtStart(instr->object())));
-}
-
-
-LInstruction* LChunkBuilder::DoLoadNamedFieldPolymorphic(
- HLoadNamedFieldPolymorphic* instr) {
- ASSERT(instr->representation().IsTagged());
- if (instr->need_generic()) {
- LOperand* obj = UseFixed(instr->object(), r0);
- LLoadNamedFieldPolymorphic* result = new LLoadNamedFieldPolymorphic(obj);
- return MarkAsCall(DefineFixed(result, r0), instr);
- } else {
- LOperand* obj = UseRegisterAtStart(instr->object());
- LLoadNamedFieldPolymorphic* result = new LLoadNamedFieldPolymorphic(obj);
- return AssignEnvironment(DefineAsRegister(result));
- }
-}
-
-
-LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
- LOperand* object = UseFixed(instr->object(), r0);
- LInstruction* result = DefineFixed(new LLoadNamedGeneric(object), r0);
- return MarkAsCall(result, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
- HLoadFunctionPrototype* instr) {
- return AssignEnvironment(DefineAsRegister(
- new LLoadFunctionPrototype(UseRegister(instr->function()))));
-}
-
-
-LInstruction* LChunkBuilder::DoLoadElements(HLoadElements* instr) {
- LOperand* input = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LLoadElements(input));
-}
-
-
-LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
- HLoadExternalArrayPointer* instr) {
- LOperand* input = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LLoadExternalArrayPointer(input));
-}
-
-
-LInstruction* LChunkBuilder::DoLoadKeyedFastElement(
- HLoadKeyedFastElement* instr) {
- ASSERT(instr->representation().IsTagged());
- ASSERT(instr->key()->representation().IsInteger32());
- LOperand* obj = UseRegisterAtStart(instr->object());
- LOperand* key = UseRegisterAtStart(instr->key());
- LLoadKeyedFastElement* result = new LLoadKeyedFastElement(obj, key);
- return AssignEnvironment(DefineSameAsFirst(result));
-}
-
-
-LInstruction* LChunkBuilder::DoLoadKeyedSpecializedArrayElement(
- HLoadKeyedSpecializedArrayElement* instr) {
- // TODO(danno): Add support for other external array types.
- if (instr->array_type() != kExternalPixelArray) {
- Abort("unsupported load for external array type.");
- return NULL;
- }
-
- ASSERT(instr->representation().IsInteger32());
- ASSERT(instr->key()->representation().IsInteger32());
- LOperand* external_pointer =
- UseRegisterAtStart(instr->external_pointer());
- LOperand* key = UseRegisterAtStart(instr->key());
- LLoadKeyedSpecializedArrayElement* result =
- new LLoadKeyedSpecializedArrayElement(external_pointer,
- key);
- return DefineAsRegister(result);
-}
-
-
-LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
- LOperand* object = UseFixed(instr->object(), r1);
- LOperand* key = UseFixed(instr->key(), r0);
-
- LInstruction* result =
- DefineFixed(new LLoadKeyedGeneric(object, key), r0);
- return MarkAsCall(result, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoStoreKeyedFastElement(
- HStoreKeyedFastElement* instr) {
- bool needs_write_barrier = instr->NeedsWriteBarrier();
- ASSERT(instr->value()->representation().IsTagged());
- ASSERT(instr->object()->representation().IsTagged());
- ASSERT(instr->key()->representation().IsInteger32());
-
- LOperand* obj = UseTempRegister(instr->object());
- LOperand* val = needs_write_barrier
- ? UseTempRegister(instr->value())
- : UseRegisterAtStart(instr->value());
- LOperand* key = needs_write_barrier
- ? UseTempRegister(instr->key())
- : UseRegisterOrConstantAtStart(instr->key());
-
- return AssignEnvironment(new LStoreKeyedFastElement(obj, key, val));
-}
-
-
-LInstruction* LChunkBuilder::DoStoreKeyedSpecializedArrayElement(
- HStoreKeyedSpecializedArrayElement* instr) {
- // TODO(danno): Add support for other external array types.
- if (instr->array_type() != kExternalPixelArray) {
- Abort("unsupported store for external array type.");
- return NULL;
- }
-
- ASSERT(instr->value()->representation().IsInteger32());
- ASSERT(instr->external_pointer()->representation().IsExternal());
- ASSERT(instr->key()->representation().IsInteger32());
-
- LOperand* external_pointer = UseRegister(instr->external_pointer());
- LOperand* value = UseTempRegister(instr->value()); // changed by clamp.
- LOperand* key = UseRegister(instr->key());
-
- return new LStoreKeyedSpecializedArrayElement(external_pointer,
- key,
- value);
-}
-
-
-LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
- LOperand* obj = UseFixed(instr->object(), r2);
- LOperand* key = UseFixed(instr->key(), r1);
- LOperand* val = UseFixed(instr->value(), r0);
-
- ASSERT(instr->object()->representation().IsTagged());
- ASSERT(instr->key()->representation().IsTagged());
- ASSERT(instr->value()->representation().IsTagged());
-
- return MarkAsCall(new LStoreKeyedGeneric(obj, key, val), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
- bool needs_write_barrier = instr->NeedsWriteBarrier();
-
- LOperand* obj = needs_write_barrier
- ? UseTempRegister(instr->object())
- : UseRegisterAtStart(instr->object());
-
- LOperand* val = needs_write_barrier
- ? UseTempRegister(instr->value())
- : UseRegister(instr->value());
-
- return new LStoreNamedField(obj, val);
-}
-
-
-LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
- LOperand* obj = UseFixed(instr->object(), r1);
- LOperand* val = UseFixed(instr->value(), r0);
-
- LInstruction* result = new LStoreNamedGeneric(obj, val);
- return MarkAsCall(result, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
- LOperand* string = UseRegister(instr->string());
- LOperand* index = UseRegisterOrConstant(instr->index());
- LStringCharCodeAt* result = new LStringCharCodeAt(string, index);
- return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
-}
-
-
-LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
- LOperand* char_code = UseRegister(instr->value());
- LStringCharFromCode* result = new LStringCharFromCode(char_code);
- return AssignPointerMap(DefineAsRegister(result));
-}
-
-
-LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) {
- LOperand* string = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LStringLength(string));
-}
-
-
-LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) {
- return MarkAsCall(DefineFixed(new LArrayLiteral, r0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoObjectLiteral(HObjectLiteral* instr) {
- return MarkAsCall(DefineFixed(new LObjectLiteral, r0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
- return MarkAsCall(DefineFixed(new LRegExpLiteral, r0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
- return MarkAsCall(DefineFixed(new LFunctionLiteral, r0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoDeleteProperty(HDeleteProperty* instr) {
- LOperand* object = UseFixed(instr->object(), r0);
- LOperand* key = UseFixed(instr->key(), r1);
- LDeleteProperty* result = new LDeleteProperty(object, key);
- return MarkAsCall(DefineFixed(result, r0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
- allocator_->MarkAsOsrEntry();
- current_block_->last_environment()->set_ast_id(instr->ast_id());
- return AssignEnvironment(new LOsrEntry);
-}
-
-
-LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
- int spill_index = chunk()->GetParameterStackSlot(instr->index());
- return DefineAsSpilled(new LParameter, spill_index);
-}
-
-
-LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
- int spill_index = chunk()->GetNextSpillIndex(false); // Not double-width.
- return DefineAsSpilled(new LUnknownOSRValue, spill_index);
-}
-
-
-LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
- argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new LCallStub, r0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
- // There are no real uses of the arguments object.
- // arguments.length and element access are supported directly on
- // stack arguments, and any real arguments object use causes a bailout.
- // So this value is never used.
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
- LOperand* arguments = UseRegister(instr->arguments());
- LOperand* length = UseTempRegister(instr->length());
- LOperand* index = UseRegister(instr->index());
- LAccessArgumentsAt* result = new LAccessArgumentsAt(arguments, length, index);
- return AssignEnvironment(DefineAsRegister(result));
-}
-
-
-LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) {
- LOperand* object = UseFixed(instr->value(), r0);
- LToFastProperties* result = new LToFastProperties(object);
- return MarkAsCall(DefineFixed(result, r0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
- LTypeof* result = new LTypeof(UseFixed(instr->value(), r0));
- return MarkAsCall(DefineFixed(result, r0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoTypeofIs(HTypeofIs* instr) {
- return DefineSameAsFirst(new LTypeofIs(UseRegister(instr->value())));
-}
-
-
-LInstruction* LChunkBuilder::DoIsConstructCall(HIsConstructCall* instr) {
- return DefineAsRegister(new LIsConstructCall());
-}
-
-
-LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
- HEnvironment* env = current_block_->last_environment();
- ASSERT(env != NULL);
-
- env->set_ast_id(instr->ast_id());
-
- env->Drop(instr->pop_count());
- for (int i = 0; i < instr->values()->length(); ++i) {
- HValue* value = instr->values()->at(i);
- if (instr->HasAssignedIndexAt(i)) {
- env->Bind(instr->GetAssignedIndexAt(i), value);
- } else {
- env->Push(value);
- }
- }
-
- // If there is an instruction pending deoptimization environment create a
- // lazy bailout instruction to capture the environment.
- if (pending_deoptimization_ast_id_ == instr->ast_id()) {
- LInstruction* result = new LLazyBailout;
- result = AssignEnvironment(result);
- instruction_pending_deoptimization_environment_->
- set_deoptimization_environment(result->environment());
- ClearInstructionPendingDeoptimizationEnvironment();
- return result;
- }
-
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
- return MarkAsCall(new LStackCheck, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
- HEnvironment* outer = current_block_->last_environment();
- HConstant* undefined = graph()->GetConstantUndefined();
- HEnvironment* inner = outer->CopyForInlining(instr->closure(),
- instr->function(),
- false,
- undefined);
- current_block_->UpdateEnvironment(inner);
- chunk_->AddInlinedClosure(instr->closure());
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
- HEnvironment* outer = current_block_->last_environment()->outer();
- current_block_->UpdateEnvironment(outer);
- return NULL;
-}
-
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/arm/lithium-arm.h b/src/3rdparty/v8/src/arm/lithium-arm.h
deleted file mode 100644
index f406f95..0000000
--- a/src/3rdparty/v8/src/arm/lithium-arm.h
+++ /dev/null
@@ -1,2179 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_ARM_LITHIUM_ARM_H_
-#define V8_ARM_LITHIUM_ARM_H_
-
-#include "hydrogen.h"
-#include "lithium-allocator.h"
-#include "lithium.h"
-#include "safepoint-table.h"
-
-namespace v8 {
-namespace internal {
-
-// Forward declarations.
-class LCodeGen;
-
-#define LITHIUM_ALL_INSTRUCTION_LIST(V) \
- V(ControlInstruction) \
- V(Call) \
- LITHIUM_CONCRETE_INSTRUCTION_LIST(V)
-
-
-#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \
- V(AccessArgumentsAt) \
- V(AddI) \
- V(ApplyArguments) \
- V(ArgumentsElements) \
- V(ArgumentsLength) \
- V(ArithmeticD) \
- V(ArithmeticT) \
- V(ArrayLiteral) \
- V(BitI) \
- V(BitNotI) \
- V(BoundsCheck) \
- V(Branch) \
- V(CallConstantFunction) \
- V(CallFunction) \
- V(CallGlobal) \
- V(CallKeyed) \
- V(CallKnownGlobal) \
- V(CallNamed) \
- V(CallNew) \
- V(CallRuntime) \
- V(CallStub) \
- V(CheckFunction) \
- V(CheckInstanceType) \
- V(CheckNonSmi) \
- V(CheckMap) \
- V(CheckPrototypeMaps) \
- V(CheckSmi) \
- V(ClassOfTest) \
- V(ClassOfTestAndBranch) \
- V(CmpID) \
- V(CmpIDAndBranch) \
- V(CmpJSObjectEq) \
- V(CmpJSObjectEqAndBranch) \
- V(CmpMapAndBranch) \
- V(CmpT) \
- V(CmpTAndBranch) \
- V(ConstantD) \
- V(ConstantI) \
- V(ConstantT) \
- V(Context) \
- V(DeleteProperty) \
- V(Deoptimize) \
- V(DivI) \
- V(DoubleToI) \
- V(ExternalArrayLength) \
- V(FixedArrayLength) \
- V(FunctionLiteral) \
- V(Gap) \
- V(GetCachedArrayIndex) \
- V(GlobalObject) \
- V(GlobalReceiver) \
- V(Goto) \
- V(HasCachedArrayIndex) \
- V(HasCachedArrayIndexAndBranch) \
- V(HasInstanceType) \
- V(HasInstanceTypeAndBranch) \
- V(InstanceOf) \
- V(InstanceOfAndBranch) \
- V(InstanceOfKnownGlobal) \
- V(Integer32ToDouble) \
- V(IsNull) \
- V(IsNullAndBranch) \
- V(IsObject) \
- V(IsObjectAndBranch) \
- V(IsSmi) \
- V(IsSmiAndBranch) \
- V(JSArrayLength) \
- V(Label) \
- V(LazyBailout) \
- V(LoadContextSlot) \
- V(LoadElements) \
- V(LoadExternalArrayPointer) \
- V(LoadFunctionPrototype) \
- V(LoadGlobalCell) \
- V(LoadGlobalGeneric) \
- V(LoadKeyedFastElement) \
- V(LoadKeyedGeneric) \
- V(LoadKeyedSpecializedArrayElement) \
- V(LoadNamedField) \
- V(LoadNamedFieldPolymorphic) \
- V(LoadNamedGeneric) \
- V(ModI) \
- V(MulI) \
- V(NumberTagD) \
- V(NumberTagI) \
- V(NumberUntagD) \
- V(ObjectLiteral) \
- V(OsrEntry) \
- V(OuterContext) \
- V(Parameter) \
- V(Power) \
- V(PushArgument) \
- V(RegExpLiteral) \
- V(Return) \
- V(ShiftI) \
- V(SmiTag) \
- V(SmiUntag) \
- V(StackCheck) \
- V(StoreContextSlot) \
- V(StoreGlobalCell) \
- V(StoreGlobalGeneric) \
- V(StoreKeyedFastElement) \
- V(StoreKeyedGeneric) \
- V(StoreKeyedSpecializedArrayElement) \
- V(StoreNamedField) \
- V(StoreNamedGeneric) \
- V(StringCharCodeAt) \
- V(StringCharFromCode) \
- V(StringLength) \
- V(SubI) \
- V(TaggedToI) \
- V(Throw) \
- V(ToFastProperties) \
- V(Typeof) \
- V(TypeofIs) \
- V(TypeofIsAndBranch) \
- V(IsConstructCall) \
- V(IsConstructCallAndBranch) \
- V(UnaryMathOperation) \
- V(UnknownOSRValue) \
- V(ValueOf)
-
-
-#define DECLARE_INSTRUCTION(type) \
- virtual bool Is##type() const { return true; } \
- static L##type* cast(LInstruction* instr) { \
- ASSERT(instr->Is##type()); \
- return reinterpret_cast<L##type*>(instr); \
- }
-
-
-#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
- virtual void CompileToNative(LCodeGen* generator); \
- virtual const char* Mnemonic() const { return mnemonic; } \
- DECLARE_INSTRUCTION(type)
-
-
-#define DECLARE_HYDROGEN_ACCESSOR(type) \
- H##type* hydrogen() const { \
- return H##type::cast(hydrogen_value()); \
- }
-
-
-class LInstruction: public ZoneObject {
- public:
- LInstruction()
- : environment_(NULL),
- hydrogen_value_(NULL),
- is_call_(false),
- is_save_doubles_(false) { }
- virtual ~LInstruction() { }
-
- virtual void CompileToNative(LCodeGen* generator) = 0;
- virtual const char* Mnemonic() const = 0;
- virtual void PrintTo(StringStream* stream);
- virtual void PrintDataTo(StringStream* stream) = 0;
- virtual void PrintOutputOperandTo(StringStream* stream) = 0;
-
- // Declare virtual type testers.
-#define DECLARE_DO(type) virtual bool Is##type() const { return false; }
- LITHIUM_ALL_INSTRUCTION_LIST(DECLARE_DO)
-#undef DECLARE_DO
-
- virtual bool IsControl() const { return false; }
- virtual void SetBranchTargets(int true_block_id, int false_block_id) { }
-
- void set_environment(LEnvironment* env) { environment_ = env; }
- LEnvironment* environment() const { return environment_; }
- bool HasEnvironment() const { return environment_ != NULL; }
-
- void set_pointer_map(LPointerMap* p) { pointer_map_.set(p); }
- LPointerMap* pointer_map() const { return pointer_map_.get(); }
- bool HasPointerMap() const { return pointer_map_.is_set(); }
-
- void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
- HValue* hydrogen_value() const { return hydrogen_value_; }
-
- void set_deoptimization_environment(LEnvironment* env) {
- deoptimization_environment_.set(env);
- }
- LEnvironment* deoptimization_environment() const {
- return deoptimization_environment_.get();
- }
- bool HasDeoptimizationEnvironment() const {
- return deoptimization_environment_.is_set();
- }
-
- void MarkAsCall() { is_call_ = true; }
- void MarkAsSaveDoubles() { is_save_doubles_ = true; }
-
- // Interface to the register allocator and iterators.
- bool IsMarkedAsCall() const { return is_call_; }
- bool IsMarkedAsSaveDoubles() const { return is_save_doubles_; }
-
- virtual bool HasResult() const = 0;
- virtual LOperand* result() = 0;
-
- virtual int InputCount() = 0;
- virtual LOperand* InputAt(int i) = 0;
- virtual int TempCount() = 0;
- virtual LOperand* TempAt(int i) = 0;
-
- LOperand* FirstInput() { return InputAt(0); }
- LOperand* Output() { return HasResult() ? result() : NULL; }
-
-#ifdef DEBUG
- void VerifyCall();
-#endif
-
- private:
- LEnvironment* environment_;
- SetOncePointer<LPointerMap> pointer_map_;
- HValue* hydrogen_value_;
- SetOncePointer<LEnvironment> deoptimization_environment_;
- bool is_call_;
- bool is_save_doubles_;
-};
-
-
-template<typename ElementType, int NumElements>
-class OperandContainer {
- public:
- OperandContainer() {
- for (int i = 0; i < NumElements; i++) elems_[i] = NULL;
- }
- int length() { return NumElements; }
- ElementType& operator[](int i) {
- ASSERT(i < length());
- return elems_[i];
- }
- void PrintOperandsTo(StringStream* stream);
-
- private:
- ElementType elems_[NumElements];
-};
-
-
-template<typename ElementType>
-class OperandContainer<ElementType, 0> {
- public:
- int length() { return 0; }
- void PrintOperandsTo(StringStream* stream) { }
- ElementType& operator[](int i) {
- UNREACHABLE();
- static ElementType t = 0;
- return t;
- }
-};
-
-
-// R = number of result operands (0 or 1).
-// I = number of input operands.
-// T = number of temporary operands.
-template<int R, int I, int T>
-class LTemplateInstruction: public LInstruction {
- public:
- // Allow 0 or 1 output operands.
- STATIC_ASSERT(R == 0 || R == 1);
- virtual bool HasResult() const { return R != 0; }
- void set_result(LOperand* operand) { results_[0] = operand; }
- LOperand* result() { return results_[0]; }
-
- int InputCount() { return I; }
- LOperand* InputAt(int i) { return inputs_[i]; }
-
- int TempCount() { return T; }
- LOperand* TempAt(int i) { return temps_[i]; }
-
- virtual void PrintDataTo(StringStream* stream);
- virtual void PrintOutputOperandTo(StringStream* stream);
-
- protected:
- OperandContainer<LOperand*, R> results_;
- OperandContainer<LOperand*, I> inputs_;
- OperandContainer<LOperand*, T> temps_;
-};
-
-
-class LGap: public LTemplateInstruction<0, 0, 0> {
- public:
- explicit LGap(HBasicBlock* block)
- : block_(block) {
- parallel_moves_[BEFORE] = NULL;
- parallel_moves_[START] = NULL;
- parallel_moves_[END] = NULL;
- parallel_moves_[AFTER] = NULL;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(Gap, "gap")
- virtual void PrintDataTo(StringStream* stream) const;
-
- bool IsRedundant() const;
-
- HBasicBlock* block() const { return block_; }
-
- enum InnerPosition {
- BEFORE,
- START,
- END,
- AFTER,
- FIRST_INNER_POSITION = BEFORE,
- LAST_INNER_POSITION = AFTER
- };
-
- LParallelMove* GetOrCreateParallelMove(InnerPosition pos) {
- if (parallel_moves_[pos] == NULL) parallel_moves_[pos] = new LParallelMove;
- return parallel_moves_[pos];
- }
-
- LParallelMove* GetParallelMove(InnerPosition pos) {
- return parallel_moves_[pos];
- }
-
- private:
- LParallelMove* parallel_moves_[LAST_INNER_POSITION + 1];
- HBasicBlock* block_;
-};
-
-
-class LGoto: public LTemplateInstruction<0, 0, 0> {
- public:
- LGoto(int block_id, bool include_stack_check = false)
- : block_id_(block_id), include_stack_check_(include_stack_check) { }
-
- DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
- virtual void PrintDataTo(StringStream* stream);
- virtual bool IsControl() const { return true; }
-
- int block_id() const { return block_id_; }
- bool include_stack_check() const { return include_stack_check_; }
-
- private:
- int block_id_;
- bool include_stack_check_;
-};
-
-
-class LLazyBailout: public LTemplateInstruction<0, 0, 0> {
- public:
- LLazyBailout() : gap_instructions_size_(0) { }
-
- DECLARE_CONCRETE_INSTRUCTION(LazyBailout, "lazy-bailout")
-
- void set_gap_instructions_size(int gap_instructions_size) {
- gap_instructions_size_ = gap_instructions_size;
- }
- int gap_instructions_size() { return gap_instructions_size_; }
-
- private:
- int gap_instructions_size_;
-};
-
-
-class LDeoptimize: public LTemplateInstruction<0, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
-};
-
-
-class LLabel: public LGap {
- public:
- explicit LLabel(HBasicBlock* block)
- : LGap(block), replacement_(NULL) { }
-
- DECLARE_CONCRETE_INSTRUCTION(Label, "label")
-
- virtual void PrintDataTo(StringStream* stream);
-
- int block_id() const { return block()->block_id(); }
- bool is_loop_header() const { return block()->IsLoopHeader(); }
- Label* label() { return &label_; }
- LLabel* replacement() const { return replacement_; }
- void set_replacement(LLabel* label) { replacement_ = label; }
- bool HasReplacement() const { return replacement_ != NULL; }
-
- private:
- Label label_;
- LLabel* replacement_;
-};
-
-
-class LParameter: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
-};
-
-
-class LCallStub: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
- DECLARE_HYDROGEN_ACCESSOR(CallStub)
-
- TranscendentalCache::Type transcendental_type() {
- return hydrogen()->transcendental_type();
- }
-};
-
-
-class LUnknownOSRValue: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
-};
-
-
-template<int I, int T>
-class LControlInstruction: public LTemplateInstruction<0, I, T> {
- public:
- DECLARE_INSTRUCTION(ControlInstruction)
- virtual bool IsControl() const { return true; }
-
- int true_block_id() const { return true_block_id_; }
- int false_block_id() const { return false_block_id_; }
- void SetBranchTargets(int true_block_id, int false_block_id) {
- true_block_id_ = true_block_id;
- false_block_id_ = false_block_id;
- }
-
- private:
- int true_block_id_;
- int false_block_id_;
-};
-
-
-class LApplyArguments: public LTemplateInstruction<1, 4, 0> {
- public:
- LApplyArguments(LOperand* function,
- LOperand* receiver,
- LOperand* length,
- LOperand* elements) {
- inputs_[0] = function;
- inputs_[1] = receiver;
- inputs_[2] = length;
- inputs_[3] = elements;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
-
- LOperand* function() { return inputs_[0]; }
- LOperand* receiver() { return inputs_[1]; }
- LOperand* length() { return inputs_[2]; }
- LOperand* elements() { return inputs_[3]; }
-};
-
-
-class LAccessArgumentsAt: public LTemplateInstruction<1, 3, 0> {
- public:
- LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index) {
- inputs_[0] = arguments;
- inputs_[1] = length;
- inputs_[2] = index;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at")
-
- LOperand* arguments() { return inputs_[0]; }
- LOperand* length() { return inputs_[1]; }
- LOperand* index() { return inputs_[2]; }
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LArgumentsLength: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LArgumentsLength(LOperand* elements) {
- inputs_[0] = elements;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments-length")
-};
-
-
-class LArgumentsElements: public LTemplateInstruction<1, 0, 0> {
- public:
- LArgumentsElements() { }
-
- DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
-};
-
-
-class LModI: public LTemplateInstruction<1, 2, 3> {
- public:
- // Used when the right hand is a constant power of 2.
- LModI(LOperand* left,
- LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- temps_[0] = NULL;
- temps_[1] = NULL;
- temps_[2] = NULL;
- }
-
- // Used for the standard case.
- LModI(LOperand* left,
- LOperand* right,
- LOperand* temp1,
- LOperand* temp2,
- LOperand* temp3) {
- inputs_[0] = left;
- inputs_[1] = right;
- temps_[0] = temp1;
- temps_[1] = temp2;
- temps_[2] = temp3;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(ModI, "mod-i")
- DECLARE_HYDROGEN_ACCESSOR(Mod)
-};
-
-
-class LDivI: public LTemplateInstruction<1, 2, 0> {
- public:
- LDivI(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
- DECLARE_HYDROGEN_ACCESSOR(Div)
-};
-
-
-class LMulI: public LTemplateInstruction<1, 2, 1> {
- public:
- LMulI(LOperand* left, LOperand* right, LOperand* temp) {
- inputs_[0] = left;
- inputs_[1] = right;
- temps_[0] = temp;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-i")
- DECLARE_HYDROGEN_ACCESSOR(Mul)
-};
-
-
-class LCmpID: public LTemplateInstruction<1, 2, 0> {
- public:
- LCmpID(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpID, "cmp-id")
- DECLARE_HYDROGEN_ACCESSOR(Compare)
-
- Token::Value op() const { return hydrogen()->token(); }
- bool is_double() const {
- return hydrogen()->GetInputRepresentation().IsDouble();
- }
-};
-
-
-class LCmpIDAndBranch: public LControlInstruction<2, 0> {
- public:
- LCmpIDAndBranch(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpIDAndBranch, "cmp-id-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(Compare)
-
- Token::Value op() const { return hydrogen()->token(); }
- bool is_double() const {
- return hydrogen()->GetInputRepresentation().IsDouble();
- }
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LUnaryMathOperation: public LTemplateInstruction<1, 1, 1> {
- public:
- LUnaryMathOperation(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(UnaryMathOperation, "unary-math-operation")
- DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
-
- virtual void PrintDataTo(StringStream* stream);
- BuiltinFunctionId op() const { return hydrogen()->op(); }
-};
-
-
-class LCmpJSObjectEq: public LTemplateInstruction<1, 2, 0> {
- public:
- LCmpJSObjectEq(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpJSObjectEq, "cmp-jsobject-eq")
-};
-
-
-class LCmpJSObjectEqAndBranch: public LControlInstruction<2, 0> {
- public:
- LCmpJSObjectEqAndBranch(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpJSObjectEqAndBranch,
- "cmp-jsobject-eq-and-branch")
-};
-
-
-class LIsNull: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LIsNull(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(IsNull, "is-null")
- DECLARE_HYDROGEN_ACCESSOR(IsNull)
-
- bool is_strict() const { return hydrogen()->is_strict(); }
-};
-
-class LIsNullAndBranch: public LControlInstruction<1, 0> {
- public:
- explicit LIsNullAndBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(IsNullAndBranch, "is-null-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsNull)
-
- bool is_strict() const { return hydrogen()->is_strict(); }
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LIsObject: public LTemplateInstruction<1, 1, 1> {
- public:
- explicit LIsObject(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(IsObject, "is-object")
-};
-
-
-class LIsObjectAndBranch: public LControlInstruction<1, 2> {
- public:
- LIsObjectAndBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LIsSmi: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LIsSmi(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(IsSmi, "is-smi")
- DECLARE_HYDROGEN_ACCESSOR(IsSmi)
-};
-
-
-class LIsSmiAndBranch: public LControlInstruction<1, 0> {
- public:
- explicit LIsSmiAndBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LHasInstanceType: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LHasInstanceType(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(HasInstanceType, "has-instance-type")
- DECLARE_HYDROGEN_ACCESSOR(HasInstanceType)
-};
-
-
-class LHasInstanceTypeAndBranch: public LControlInstruction<1, 0> {
- public:
- explicit LHasInstanceTypeAndBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch,
- "has-instance-type-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(HasInstanceType)
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LGetCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LGetCachedArrayIndex(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get-cached-array-index")
- DECLARE_HYDROGEN_ACCESSOR(GetCachedArrayIndex)
-};
-
-
-class LHasCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LHasCachedArrayIndex(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndex, "has-cached-array-index")
- DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndex)
-};
-
-
-class LHasCachedArrayIndexAndBranch: public LControlInstruction<1, 0> {
- public:
- explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
- "has-cached-array-index-and-branch")
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LClassOfTest: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LClassOfTest(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(ClassOfTest, "class-of-test")
- DECLARE_HYDROGEN_ACCESSOR(ClassOfTest)
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LClassOfTestAndBranch: public LControlInstruction<1, 1> {
- public:
- LClassOfTestAndBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch,
- "class-of-test-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(ClassOfTest)
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LCmpT: public LTemplateInstruction<1, 2, 0> {
- public:
- LCmpT(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
- DECLARE_HYDROGEN_ACCESSOR(Compare)
-
- Token::Value op() const { return hydrogen()->token(); }
-};
-
-
-class LCmpTAndBranch: public LControlInstruction<2, 0> {
- public:
- LCmpTAndBranch(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpTAndBranch, "cmp-t-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(Compare)
-
- Token::Value op() const { return hydrogen()->token(); }
-};
-
-
-class LInstanceOf: public LTemplateInstruction<1, 2, 0> {
- public:
- LInstanceOf(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
-};
-
-
-class LInstanceOfAndBranch: public LControlInstruction<2, 0> {
- public:
- LInstanceOfAndBranch(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(InstanceOfAndBranch, "instance-of-and-branch")
-};
-
-
-class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 1> {
- public:
- LInstanceOfKnownGlobal(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(InstanceOfKnownGlobal,
- "instance-of-known-global")
- DECLARE_HYDROGEN_ACCESSOR(InstanceOfKnownGlobal)
-
- Handle<JSFunction> function() const { return hydrogen()->function(); }
-};
-
-
-class LBoundsCheck: public LTemplateInstruction<0, 2, 0> {
- public:
- LBoundsCheck(LOperand* index, LOperand* length) {
- inputs_[0] = index;
- inputs_[1] = length;
- }
-
- LOperand* index() { return inputs_[0]; }
- LOperand* length() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds-check")
-};
-
-
-class LBitI: public LTemplateInstruction<1, 2, 0> {
- public:
- LBitI(Token::Value op, LOperand* left, LOperand* right)
- : op_(op) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- Token::Value op() const { return op_; }
-
- DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i")
-
- private:
- Token::Value op_;
-};
-
-
-class LShiftI: public LTemplateInstruction<1, 2, 0> {
- public:
- LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
- : op_(op), can_deopt_(can_deopt) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- Token::Value op() const { return op_; }
-
- bool can_deopt() const { return can_deopt_; }
-
- DECLARE_CONCRETE_INSTRUCTION(ShiftI, "shift-i")
-
- private:
- Token::Value op_;
- bool can_deopt_;
-};
-
-
-class LSubI: public LTemplateInstruction<1, 2, 0> {
- public:
- LSubI(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(SubI, "sub-i")
- DECLARE_HYDROGEN_ACCESSOR(Sub)
-};
-
-
-class LConstantI: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
- DECLARE_HYDROGEN_ACCESSOR(Constant)
-
- int32_t value() const { return hydrogen()->Integer32Value(); }
-};
-
-
-class LConstantD: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
- DECLARE_HYDROGEN_ACCESSOR(Constant)
-
- double value() const { return hydrogen()->DoubleValue(); }
-};
-
-
-class LConstantT: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t")
- DECLARE_HYDROGEN_ACCESSOR(Constant)
-
- Handle<Object> value() const { return hydrogen()->handle(); }
-};
-
-
-class LBranch: public LControlInstruction<1, 0> {
- public:
- explicit LBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
- DECLARE_HYDROGEN_ACCESSOR(Value)
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LCmpMapAndBranch: public LTemplateInstruction<0, 1, 1> {
- public:
- LCmpMapAndBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareMap)
-
- virtual bool IsControl() const { return true; }
-
- Handle<Map> map() const { return hydrogen()->map(); }
- int true_block_id() const {
- return hydrogen()->FirstSuccessor()->block_id();
- }
- int false_block_id() const {
- return hydrogen()->SecondSuccessor()->block_id();
- }
-};
-
-
-class LJSArrayLength: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LJSArrayLength(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(JSArrayLength, "js-array-length")
- DECLARE_HYDROGEN_ACCESSOR(JSArrayLength)
-};
-
-
-class LExternalArrayLength: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LExternalArrayLength(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(ExternalArrayLength, "external-array-length")
- DECLARE_HYDROGEN_ACCESSOR(ExternalArrayLength)
-};
-
-
-class LFixedArrayLength: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LFixedArrayLength(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(FixedArrayLength, "fixed-array-length")
- DECLARE_HYDROGEN_ACCESSOR(FixedArrayLength)
-};
-
-
-class LValueOf: public LTemplateInstruction<1, 1, 1> {
- public:
- LValueOf(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(ValueOf, "value-of")
- DECLARE_HYDROGEN_ACCESSOR(ValueOf)
-};
-
-
-class LThrow: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LThrow(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(Throw, "throw")
-};
-
-
-class LBitNotI: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LBitNotI(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(BitNotI, "bit-not-i")
-};
-
-
-class LAddI: public LTemplateInstruction<1, 2, 0> {
- public:
- LAddI(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i")
- DECLARE_HYDROGEN_ACCESSOR(Add)
-};
-
-
-class LPower: public LTemplateInstruction<1, 2, 0> {
- public:
- LPower(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(Power, "power")
- DECLARE_HYDROGEN_ACCESSOR(Power)
-};
-
-
-class LArithmeticD: public LTemplateInstruction<1, 2, 0> {
- public:
- LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
- : op_(op) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- Token::Value op() const { return op_; }
-
- virtual void CompileToNative(LCodeGen* generator);
- virtual const char* Mnemonic() const;
-
- private:
- Token::Value op_;
-};
-
-
-class LArithmeticT: public LTemplateInstruction<1, 2, 0> {
- public:
- LArithmeticT(Token::Value op, LOperand* left, LOperand* right)
- : op_(op) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- virtual void CompileToNative(LCodeGen* generator);
- virtual const char* Mnemonic() const;
-
- Token::Value op() const { return op_; }
-
- private:
- Token::Value op_;
-};
-
-
-class LReturn: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LReturn(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(Return, "return")
-};
-
-
-class LLoadNamedField: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadNamedField(LOperand* object) {
- inputs_[0] = object;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field")
- DECLARE_HYDROGEN_ACCESSOR(LoadNamedField)
-};
-
-
-class LLoadNamedFieldPolymorphic: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadNamedFieldPolymorphic(LOperand* object) {
- inputs_[0] = object;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field-polymorphic")
- DECLARE_HYDROGEN_ACCESSOR(LoadNamedFieldPolymorphic)
-
- LOperand* object() { return inputs_[0]; }
-};
-
-
-class LLoadNamedGeneric: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadNamedGeneric(LOperand* object) {
- inputs_[0] = object;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
- DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric)
-
- LOperand* object() { return inputs_[0]; }
- Handle<Object> name() const { return hydrogen()->name(); }
-};
-
-
-class LLoadFunctionPrototype: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadFunctionPrototype(LOperand* function) {
- inputs_[0] = function;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype, "load-function-prototype")
- DECLARE_HYDROGEN_ACCESSOR(LoadFunctionPrototype)
-
- LOperand* function() { return inputs_[0]; }
-};
-
-
-class LLoadElements: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadElements(LOperand* object) {
- inputs_[0] = object;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadElements, "load-elements")
-};
-
-
-class LLoadExternalArrayPointer: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadExternalArrayPointer(LOperand* object) {
- inputs_[0] = object;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadExternalArrayPointer,
- "load-external-array-pointer")
-};
-
-
-class LLoadKeyedFastElement: public LTemplateInstruction<1, 2, 0> {
- public:
- LLoadKeyedFastElement(LOperand* elements, LOperand* key) {
- inputs_[0] = elements;
- inputs_[1] = key;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastElement, "load-keyed-fast-element")
- DECLARE_HYDROGEN_ACCESSOR(LoadKeyedFastElement)
-
- LOperand* elements() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
-};
-
-
-class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> {
- public:
- LLoadKeyedSpecializedArrayElement(LOperand* external_pointer,
- LOperand* key) {
- inputs_[0] = external_pointer;
- inputs_[1] = key;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyedSpecializedArrayElement,
- "load-keyed-specialized-array-element")
- DECLARE_HYDROGEN_ACCESSOR(LoadKeyedSpecializedArrayElement)
-
- LOperand* external_pointer() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- ExternalArrayType array_type() const {
- return hydrogen()->array_type();
- }
-};
-
-
-class LLoadKeyedGeneric: public LTemplateInstruction<1, 2, 0> {
- public:
- LLoadKeyedGeneric(LOperand* obj, LOperand* key) {
- inputs_[0] = obj;
- inputs_[1] = key;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
-
- LOperand* object() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
-};
-
-
-class LLoadGlobalCell: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load-global-cell")
- DECLARE_HYDROGEN_ACCESSOR(LoadGlobalCell)
-};
-
-
-class LLoadGlobalGeneric: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadGlobalGeneric(LOperand* global_object) {
- inputs_[0] = global_object;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
- DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
-
- LOperand* global_object() { return inputs_[0]; }
- Handle<Object> name() const { return hydrogen()->name(); }
- bool for_typeof() const { return hydrogen()->for_typeof(); }
-};
-
-
-class LStoreGlobalCell: public LTemplateInstruction<0, 1, 1> {
- public:
- LStoreGlobalCell(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store-global-cell")
- DECLARE_HYDROGEN_ACCESSOR(StoreGlobalCell)
-};
-
-
-class LStoreGlobalGeneric: public LTemplateInstruction<0, 2, 0> {
- public:
- explicit LStoreGlobalGeneric(LOperand* global_object,
- LOperand* value) {
- inputs_[0] = global_object;
- inputs_[1] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreGlobalGeneric, "store-global-generic")
- DECLARE_HYDROGEN_ACCESSOR(StoreGlobalGeneric)
-
- LOperand* global_object() { return InputAt(0); }
- Handle<Object> name() const { return hydrogen()->name(); }
- LOperand* value() { return InputAt(1); }
-};
-
-
-class LLoadContextSlot: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadContextSlot(LOperand* context) {
- inputs_[0] = context;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot")
- DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot)
-
- LOperand* context() { return InputAt(0); }
- int slot_index() { return hydrogen()->slot_index(); }
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LStoreContextSlot: public LTemplateInstruction<0, 2, 0> {
- public:
- LStoreContextSlot(LOperand* context, LOperand* value) {
- inputs_[0] = context;
- inputs_[1] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store-context-slot")
- DECLARE_HYDROGEN_ACCESSOR(StoreContextSlot)
-
- LOperand* context() { return InputAt(0); }
- LOperand* value() { return InputAt(1); }
- int slot_index() { return hydrogen()->slot_index(); }
- int needs_write_barrier() { return hydrogen()->NeedsWriteBarrier(); }
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LPushArgument: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LPushArgument(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(PushArgument, "push-argument")
-};
-
-
-class LContext: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(Context, "context")
-};
-
-
-class LOuterContext: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LOuterContext(LOperand* context) {
- inputs_[0] = context;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(OuterContext, "outer-context")
-
- LOperand* context() { return InputAt(0); }
-};
-
-
-class LGlobalObject: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LGlobalObject(LOperand* context) {
- inputs_[0] = context;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global-object")
-
- LOperand* context() { return InputAt(0); }
-};
-
-
-class LGlobalReceiver: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LGlobalReceiver(LOperand* global_object) {
- inputs_[0] = global_object;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver, "global-receiver")
-
- LOperand* global() { return InputAt(0); }
-};
-
-
-class LCallConstantFunction: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(CallConstantFunction, "call-constant-function")
- DECLARE_HYDROGEN_ACCESSOR(CallConstantFunction)
-
- virtual void PrintDataTo(StringStream* stream);
-
- Handle<JSFunction> function() { return hydrogen()->function(); }
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallKeyed: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCallKeyed(LOperand* key) {
- inputs_[0] = key;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CallKeyed, "call-keyed")
- DECLARE_HYDROGEN_ACCESSOR(CallKeyed)
-
- virtual void PrintDataTo(StringStream* stream);
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-
-class LCallNamed: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(CallNamed, "call-named")
- DECLARE_HYDROGEN_ACCESSOR(CallNamed)
-
- virtual void PrintDataTo(StringStream* stream);
-
- Handle<String> name() const { return hydrogen()->name(); }
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallFunction: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
- DECLARE_HYDROGEN_ACCESSOR(CallFunction)
-
- int arity() const { return hydrogen()->argument_count() - 2; }
-};
-
-
-class LCallGlobal: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(CallGlobal, "call-global")
- DECLARE_HYDROGEN_ACCESSOR(CallGlobal)
-
- virtual void PrintDataTo(StringStream* stream);
-
- Handle<String> name() const {return hydrogen()->name(); }
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallKnownGlobal: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(CallKnownGlobal, "call-known-global")
- DECLARE_HYDROGEN_ACCESSOR(CallKnownGlobal)
-
- virtual void PrintDataTo(StringStream* stream);
-
- Handle<JSFunction> target() const { return hydrogen()->target(); }
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallNew: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCallNew(LOperand* constructor) {
- inputs_[0] = constructor;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
- DECLARE_HYDROGEN_ACCESSOR(CallNew)
-
- virtual void PrintDataTo(StringStream* stream);
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallRuntime: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
- DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
-
- const Runtime::Function* function() const { return hydrogen()->function(); }
- int arity() const { return hydrogen()->argument_count(); }
-};
-
-
-class LInteger32ToDouble: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LInteger32ToDouble(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(Integer32ToDouble, "int32-to-double")
-};
-
-
-class LNumberTagI: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LNumberTagI(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(NumberTagI, "number-tag-i")
-};
-
-
-class LNumberTagD: public LTemplateInstruction<1, 1, 2> {
- public:
- LNumberTagD(LOperand* value, LOperand* temp1, LOperand* temp2) {
- inputs_[0] = value;
- temps_[0] = temp1;
- temps_[1] = temp2;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d")
-};
-
-
-// Sometimes truncating conversion from a tagged value to an int32.
-class LDoubleToI: public LTemplateInstruction<1, 1, 2> {
- public:
- LDoubleToI(LOperand* value, LOperand* temp1, LOperand* temp2) {
- inputs_[0] = value;
- temps_[0] = temp1;
- temps_[1] = temp2;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i")
- DECLARE_HYDROGEN_ACCESSOR(Change)
-
- bool truncating() { return hydrogen()->CanTruncateToInt32(); }
-};
-
-
-// Truncating conversion from a tagged value to an int32.
-class LTaggedToI: public LTemplateInstruction<1, 1, 3> {
- public:
- LTaggedToI(LOperand* value,
- LOperand* temp1,
- LOperand* temp2,
- LOperand* temp3) {
- inputs_[0] = value;
- temps_[0] = temp1;
- temps_[1] = temp2;
- temps_[2] = temp3;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
- DECLARE_HYDROGEN_ACCESSOR(Change)
-
- bool truncating() { return hydrogen()->CanTruncateToInt32(); }
-};
-
-
-class LSmiTag: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LSmiTag(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag")
-};
-
-
-class LNumberUntagD: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LNumberUntagD(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
-};
-
-
-class LSmiUntag: public LTemplateInstruction<1, 1, 0> {
- public:
- LSmiUntag(LOperand* value, bool needs_check)
- : needs_check_(needs_check) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(SmiUntag, "smi-untag")
-
- bool needs_check() const { return needs_check_; }
-
- private:
- bool needs_check_;
-};
-
-
-class LStoreNamedField: public LTemplateInstruction<0, 2, 0> {
- public:
- LStoreNamedField(LOperand* obj, LOperand* val) {
- inputs_[0] = obj;
- inputs_[1] = val;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
- DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
-
- virtual void PrintDataTo(StringStream* stream);
-
- LOperand* object() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
-
- Handle<Object> name() const { return hydrogen()->name(); }
- bool is_in_object() { return hydrogen()->is_in_object(); }
- int offset() { return hydrogen()->offset(); }
- bool needs_write_barrier() { return hydrogen()->NeedsWriteBarrier(); }
- Handle<Map> transition() const { return hydrogen()->transition(); }
-};
-
-
-class LStoreNamedGeneric: public LTemplateInstruction<0, 2, 0> {
- public:
- LStoreNamedGeneric(LOperand* obj, LOperand* val) {
- inputs_[0] = obj;
- inputs_[1] = val;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
- DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
-
- virtual void PrintDataTo(StringStream* stream);
-
- LOperand* object() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
- Handle<Object> name() const { return hydrogen()->name(); }
-};
-
-
-class LStoreKeyedFastElement: public LTemplateInstruction<0, 3, 0> {
- public:
- LStoreKeyedFastElement(LOperand* obj, LOperand* key, LOperand* val) {
- inputs_[0] = obj;
- inputs_[1] = key;
- inputs_[2] = val;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement,
- "store-keyed-fast-element")
- DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastElement)
-
- virtual void PrintDataTo(StringStream* stream);
-
- LOperand* object() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
-};
-
-
-class LStoreKeyedGeneric: public LTemplateInstruction<0, 3, 0> {
- public:
- LStoreKeyedGeneric(LOperand* obj, LOperand* key, LOperand* val) {
- inputs_[0] = obj;
- inputs_[1] = key;
- inputs_[2] = val;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
- DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
-
- virtual void PrintDataTo(StringStream* stream);
-
- LOperand* object() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
-};
-
-class LStoreKeyedSpecializedArrayElement: public LTemplateInstruction<0, 3, 0> {
- public:
- LStoreKeyedSpecializedArrayElement(LOperand* external_pointer,
- LOperand* key,
- LOperand* val) {
- inputs_[0] = external_pointer;
- inputs_[1] = key;
- inputs_[2] = val;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyedSpecializedArrayElement,
- "store-keyed-specialized-array-element")
- DECLARE_HYDROGEN_ACCESSOR(StoreKeyedSpecializedArrayElement)
-
- LOperand* external_pointer() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
- ExternalArrayType array_type() const {
- return hydrogen()->array_type();
- }
-};
-
-
-class LStringCharCodeAt: public LTemplateInstruction<1, 2, 0> {
- public:
- LStringCharCodeAt(LOperand* string, LOperand* index) {
- inputs_[0] = string;
- inputs_[1] = index;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at")
- DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt)
-
- LOperand* string() { return inputs_[0]; }
- LOperand* index() { return inputs_[1]; }
-};
-
-
-class LStringCharFromCode: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LStringCharFromCode(LOperand* char_code) {
- inputs_[0] = char_code;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string-char-from-code")
- DECLARE_HYDROGEN_ACCESSOR(StringCharFromCode)
-
- LOperand* char_code() { return inputs_[0]; }
-};
-
-
-class LStringLength: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LStringLength(LOperand* string) {
- inputs_[0] = string;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(StringLength, "string-length")
- DECLARE_HYDROGEN_ACCESSOR(StringLength)
-
- LOperand* string() { return inputs_[0]; }
-};
-
-
-class LCheckFunction: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LCheckFunction(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckFunction, "check-function")
- DECLARE_HYDROGEN_ACCESSOR(CheckFunction)
-};
-
-
-class LCheckInstanceType: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LCheckInstanceType(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check-instance-type")
- DECLARE_HYDROGEN_ACCESSOR(CheckInstanceType)
-};
-
-
-class LCheckMap: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LCheckMap(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckMap, "check-map")
- DECLARE_HYDROGEN_ACCESSOR(CheckMap)
-};
-
-
-class LCheckPrototypeMaps: public LTemplateInstruction<0, 0, 2> {
- public:
- LCheckPrototypeMaps(LOperand* temp1, LOperand* temp2) {
- temps_[0] = temp1;
- temps_[1] = temp2;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckPrototypeMaps, "check-prototype-maps")
- DECLARE_HYDROGEN_ACCESSOR(CheckPrototypeMaps)
-
- Handle<JSObject> prototype() const { return hydrogen()->prototype(); }
- Handle<JSObject> holder() const { return hydrogen()->holder(); }
-};
-
-
-class LCheckSmi: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LCheckSmi(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckSmi, "check-smi")
-};
-
-
-class LCheckNonSmi: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LCheckNonSmi(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check-non-smi")
-};
-
-
-class LArrayLiteral: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral, "array-literal")
- DECLARE_HYDROGEN_ACCESSOR(ArrayLiteral)
-};
-
-
-class LObjectLiteral: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ObjectLiteral, "object-literal")
- DECLARE_HYDROGEN_ACCESSOR(ObjectLiteral)
-};
-
-
-class LRegExpLiteral: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp-literal")
- DECLARE_HYDROGEN_ACCESSOR(RegExpLiteral)
-};
-
-
-class LFunctionLiteral: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function-literal")
- DECLARE_HYDROGEN_ACCESSOR(FunctionLiteral)
-
- Handle<SharedFunctionInfo> shared_info() { return hydrogen()->shared_info(); }
-};
-
-
-class LToFastProperties: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LToFastProperties(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(ToFastProperties, "to-fast-properties")
- DECLARE_HYDROGEN_ACCESSOR(ToFastProperties)
-};
-
-
-class LTypeof: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LTypeof(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof")
-};
-
-
-class LTypeofIs: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LTypeofIs(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(TypeofIs, "typeof-is")
- DECLARE_HYDROGEN_ACCESSOR(TypeofIs)
-
- Handle<String> type_literal() { return hydrogen()->type_literal(); }
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LTypeofIsAndBranch: public LControlInstruction<1, 0> {
- public:
- explicit LTypeofIsAndBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(TypeofIs)
-
- Handle<String> type_literal() { return hydrogen()->type_literal(); }
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LIsConstructCall: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(IsConstructCall, "is-construct-call")
- DECLARE_HYDROGEN_ACCESSOR(IsConstructCall)
-};
-
-
-class LIsConstructCallAndBranch: public LControlInstruction<0, 1> {
- public:
- explicit LIsConstructCallAndBranch(LOperand* temp) {
- temps_[0] = temp;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(IsConstructCallAndBranch,
- "is-construct-call-and-branch")
-};
-
-
-class LDeleteProperty: public LTemplateInstruction<1, 2, 0> {
- public:
- LDeleteProperty(LOperand* obj, LOperand* key) {
- inputs_[0] = obj;
- inputs_[1] = key;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(DeleteProperty, "delete-property")
-
- LOperand* object() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
-};
-
-
-class LOsrEntry: public LTemplateInstruction<0, 0, 0> {
- public:
- LOsrEntry();
-
- DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
-
- LOperand** SpilledRegisterArray() { return register_spills_; }
- LOperand** SpilledDoubleRegisterArray() { return double_register_spills_; }
-
- void MarkSpilledRegister(int allocation_index, LOperand* spill_operand);
- void MarkSpilledDoubleRegister(int allocation_index,
- LOperand* spill_operand);
-
- private:
- // Arrays of spill slot operands for registers with an assigned spill
- // slot, i.e., that must also be restored to the spill slot on OSR entry.
- // NULL if the register has no assigned spill slot. Indexed by allocation
- // index.
- LOperand* register_spills_[Register::kNumAllocatableRegisters];
- LOperand* double_register_spills_[DoubleRegister::kNumAllocatableRegisters];
-};
-
-
-class LStackCheck: public LTemplateInstruction<0, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check")
-};
-
-
-class LChunkBuilder;
-class LChunk: public ZoneObject {
- public:
- explicit LChunk(CompilationInfo* info, HGraph* graph);
-
- void AddInstruction(LInstruction* instruction, HBasicBlock* block);
- LConstantOperand* DefineConstantOperand(HConstant* constant);
- Handle<Object> LookupLiteral(LConstantOperand* operand) const;
- Representation LookupLiteralRepresentation(LConstantOperand* operand) const;
-
- int GetNextSpillIndex(bool is_double);
- LOperand* GetNextSpillSlot(bool is_double);
-
- int ParameterAt(int index);
- int GetParameterStackSlot(int index) const;
- int spill_slot_count() const { return spill_slot_count_; }
- CompilationInfo* info() const { return info_; }
- HGraph* graph() const { return graph_; }
- const ZoneList<LInstruction*>* instructions() const { return &instructions_; }
- void AddGapMove(int index, LOperand* from, LOperand* to);
- LGap* GetGapAt(int index) const;
- bool IsGapAt(int index) const;
- int NearestGapPos(int index) const;
- void MarkEmptyBlocks();
- const ZoneList<LPointerMap*>* pointer_maps() const { return &pointer_maps_; }
- LLabel* GetLabel(int block_id) const {
- HBasicBlock* block = graph_->blocks()->at(block_id);
- int first_instruction = block->first_instruction_index();
- return LLabel::cast(instructions_[first_instruction]);
- }
- int LookupDestination(int block_id) const {
- LLabel* cur = GetLabel(block_id);
- while (cur->replacement() != NULL) {
- cur = cur->replacement();
- }
- return cur->block_id();
- }
- Label* GetAssemblyLabel(int block_id) const {
- LLabel* label = GetLabel(block_id);
- ASSERT(!label->HasReplacement());
- return label->label();
- }
-
- const ZoneList<Handle<JSFunction> >* inlined_closures() const {
- return &inlined_closures_;
- }
-
- void AddInlinedClosure(Handle<JSFunction> closure) {
- inlined_closures_.Add(closure);
- }
-
- private:
- int spill_slot_count_;
- CompilationInfo* info_;
- HGraph* const graph_;
- ZoneList<LInstruction*> instructions_;
- ZoneList<LPointerMap*> pointer_maps_;
- ZoneList<Handle<JSFunction> > inlined_closures_;
-};
-
-
-class LChunkBuilder BASE_EMBEDDED {
- public:
- LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
- : chunk_(NULL),
- info_(info),
- graph_(graph),
- status_(UNUSED),
- current_instruction_(NULL),
- current_block_(NULL),
- next_block_(NULL),
- argument_count_(0),
- allocator_(allocator),
- position_(RelocInfo::kNoPosition),
- instruction_pending_deoptimization_environment_(NULL),
- pending_deoptimization_ast_id_(AstNode::kNoNumber) { }
-
- // Build the sequence for the graph.
- LChunk* Build();
-
- // Declare methods that deal with the individual node types.
-#define DECLARE_DO(type) LInstruction* Do##type(H##type* node);
- HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
-#undef DECLARE_DO
-
- private:
- enum Status {
- UNUSED,
- BUILDING,
- DONE,
- ABORTED
- };
-
- LChunk* chunk() const { return chunk_; }
- CompilationInfo* info() const { return info_; }
- HGraph* graph() const { return graph_; }
-
- bool is_unused() const { return status_ == UNUSED; }
- bool is_building() const { return status_ == BUILDING; }
- bool is_done() const { return status_ == DONE; }
- bool is_aborted() const { return status_ == ABORTED; }
-
- void Abort(const char* format, ...);
-
- // Methods for getting operands for Use / Define / Temp.
- LRegister* ToOperand(Register reg);
- LUnallocated* ToUnallocated(Register reg);
- LUnallocated* ToUnallocated(DoubleRegister reg);
-
- // Methods for setting up define-use relationships.
- MUST_USE_RESULT LOperand* Use(HValue* value, LUnallocated* operand);
- MUST_USE_RESULT LOperand* UseFixed(HValue* value, Register fixed_register);
- MUST_USE_RESULT LOperand* UseFixedDouble(HValue* value,
- DoubleRegister fixed_register);
-
- // A value that is guaranteed to be allocated to a register.
- // Operand created by UseRegister is guaranteed to be live until the end of
- // instruction. This means that register allocator will not reuse it's
- // register for any other operand inside instruction.
- // Operand created by UseRegisterAtStart is guaranteed to be live only at
- // instruction start. Register allocator is free to assign the same register
- // to some other operand used inside instruction (i.e. temporary or
- // output).
- MUST_USE_RESULT LOperand* UseRegister(HValue* value);
- MUST_USE_RESULT LOperand* UseRegisterAtStart(HValue* value);
-
- // An input operand in a register that may be trashed.
- MUST_USE_RESULT LOperand* UseTempRegister(HValue* value);
-
- // An input operand in a register or stack slot.
- MUST_USE_RESULT LOperand* Use(HValue* value);
- MUST_USE_RESULT LOperand* UseAtStart(HValue* value);
-
- // An input operand in a register, stack slot or a constant operand.
- MUST_USE_RESULT LOperand* UseOrConstant(HValue* value);
- MUST_USE_RESULT LOperand* UseOrConstantAtStart(HValue* value);
-
- // An input operand in a register or a constant operand.
- MUST_USE_RESULT LOperand* UseRegisterOrConstant(HValue* value);
- MUST_USE_RESULT LOperand* UseRegisterOrConstantAtStart(HValue* value);
-
- // An input operand in register, stack slot or a constant operand.
- // Will not be moved to a register even if one is freely available.
- MUST_USE_RESULT LOperand* UseAny(HValue* value);
-
- // Temporary operand that must be in a register.
- MUST_USE_RESULT LUnallocated* TempRegister();
- MUST_USE_RESULT LOperand* FixedTemp(Register reg);
- MUST_USE_RESULT LOperand* FixedTemp(DoubleRegister reg);
-
- // Methods for setting up define-use relationships.
- // Return the same instruction that they are passed.
- template<int I, int T>
- LInstruction* Define(LTemplateInstruction<1, I, T>* instr,
- LUnallocated* result);
- template<int I, int T>
- LInstruction* Define(LTemplateInstruction<1, I, T>* instr);
- template<int I, int T>
- LInstruction* DefineAsRegister(LTemplateInstruction<1, I, T>* instr);
- template<int I, int T>
- LInstruction* DefineAsSpilled(LTemplateInstruction<1, I, T>* instr,
- int index);
- template<int I, int T>
- LInstruction* DefineSameAsFirst(LTemplateInstruction<1, I, T>* instr);
- template<int I, int T>
- LInstruction* DefineFixed(LTemplateInstruction<1, I, T>* instr,
- Register reg);
- template<int I, int T>
- LInstruction* DefineFixedDouble(LTemplateInstruction<1, I, T>* instr,
- DoubleRegister reg);
- LInstruction* AssignEnvironment(LInstruction* instr);
- LInstruction* AssignPointerMap(LInstruction* instr);
-
- enum CanDeoptimize { CAN_DEOPTIMIZE_EAGERLY, CANNOT_DEOPTIMIZE_EAGERLY };
-
- // By default we assume that instruction sequences generated for calls
- // cannot deoptimize eagerly and we do not attach environment to this
- // instruction.
- LInstruction* MarkAsCall(
- LInstruction* instr,
- HInstruction* hinstr,
- CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
- LInstruction* MarkAsSaveDoubles(LInstruction* instr);
-
- LInstruction* SetInstructionPendingDeoptimizationEnvironment(
- LInstruction* instr, int ast_id);
- void ClearInstructionPendingDeoptimizationEnvironment();
-
- LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env);
-
- void VisitInstruction(HInstruction* current);
-
- void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block);
- LInstruction* DoBit(Token::Value op, HBitwiseBinaryOperation* instr);
- LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr);
- LInstruction* DoArithmeticD(Token::Value op,
- HArithmeticBinaryOperation* instr);
- LInstruction* DoArithmeticT(Token::Value op,
- HArithmeticBinaryOperation* instr);
-
- LChunk* chunk_;
- CompilationInfo* info_;
- HGraph* const graph_;
- Status status_;
- HInstruction* current_instruction_;
- HBasicBlock* current_block_;
- HBasicBlock* next_block_;
- int argument_count_;
- LAllocator* allocator_;
- int position_;
- LInstruction* instruction_pending_deoptimization_environment_;
- int pending_deoptimization_ast_id_;
-
- DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
-};
-
-#undef DECLARE_HYDROGEN_ACCESSOR
-#undef DECLARE_INSTRUCTION
-#undef DECLARE_CONCRETE_INSTRUCTION
-
-} } // namespace v8::internal
-
-#endif // V8_ARM_LITHIUM_ARM_H_
diff --git a/src/3rdparty/v8/src/arm/lithium-codegen-arm.cc b/src/3rdparty/v8/src/arm/lithium-codegen-arm.cc
deleted file mode 100644
index b214169..0000000
--- a/src/3rdparty/v8/src/arm/lithium-codegen-arm.cc
+++ /dev/null
@@ -1,4132 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "arm/lithium-codegen-arm.h"
-#include "arm/lithium-gap-resolver-arm.h"
-#include "code-stubs.h"
-#include "stub-cache.h"
-
-namespace v8 {
-namespace internal {
-
-
-class SafepointGenerator : public CallWrapper {
- public:
- SafepointGenerator(LCodeGen* codegen,
- LPointerMap* pointers,
- int deoptimization_index)
- : codegen_(codegen),
- pointers_(pointers),
- deoptimization_index_(deoptimization_index) { }
- virtual ~SafepointGenerator() { }
-
- virtual void BeforeCall(int call_size) {
- ASSERT(call_size >= 0);
- // Ensure that we have enough space after the previous safepoint position
- // for the generated code there.
- int call_end = codegen_->masm()->pc_offset() + call_size;
- int prev_jump_end =
- codegen_->LastSafepointEnd() + Deoptimizer::patch_size();
- if (call_end < prev_jump_end) {
- int padding_size = prev_jump_end - call_end;
- ASSERT_EQ(0, padding_size % Assembler::kInstrSize);
- while (padding_size > 0) {
- codegen_->masm()->nop();
- padding_size -= Assembler::kInstrSize;
- }
- }
- }
-
- virtual void AfterCall() {
- codegen_->RecordSafepoint(pointers_, deoptimization_index_);
- }
-
- private:
- LCodeGen* codegen_;
- LPointerMap* pointers_;
- int deoptimization_index_;
-};
-
-
-#define __ masm()->
-
-bool LCodeGen::GenerateCode() {
- HPhase phase("Code generation", chunk());
- ASSERT(is_unused());
- status_ = GENERATING;
- CpuFeatures::Scope scope1(VFP3);
- CpuFeatures::Scope scope2(ARMv7);
- return GeneratePrologue() &&
- GenerateBody() &&
- GenerateDeferredCode() &&
- GenerateSafepointTable();
-}
-
-
-void LCodeGen::FinishCode(Handle<Code> code) {
- ASSERT(is_done());
- code->set_stack_slots(StackSlotCount());
- code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
- PopulateDeoptimizationData(code);
- Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code);
-}
-
-
-void LCodeGen::Abort(const char* format, ...) {
- if (FLAG_trace_bailout) {
- SmartPointer<char> name(info()->shared_info()->DebugName()->ToCString());
- PrintF("Aborting LCodeGen in @\"%s\": ", *name);
- va_list arguments;
- va_start(arguments, format);
- OS::VPrint(format, arguments);
- va_end(arguments);
- PrintF("\n");
- }
- status_ = ABORTED;
-}
-
-
-void LCodeGen::Comment(const char* format, ...) {
- if (!FLAG_code_comments) return;
- char buffer[4 * KB];
- StringBuilder builder(buffer, ARRAY_SIZE(buffer));
- va_list arguments;
- va_start(arguments, format);
- builder.AddFormattedList(format, arguments);
- va_end(arguments);
-
- // Copy the string before recording it in the assembler to avoid
- // issues when the stack allocated buffer goes out of scope.
- size_t length = builder.position();
- Vector<char> copy = Vector<char>::New(length + 1);
- memcpy(copy.start(), builder.Finalize(), copy.length());
- masm()->RecordComment(copy.start());
-}
-
-
-bool LCodeGen::GeneratePrologue() {
- ASSERT(is_generating());
-
-#ifdef DEBUG
- if (strlen(FLAG_stop_at) > 0 &&
- info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
- __ stop("stop_at");
- }
-#endif
-
- // r1: Callee's JS function.
- // cp: Callee's context.
- // fp: Caller's frame pointer.
- // lr: Caller's pc.
-
- __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
- __ add(fp, sp, Operand(2 * kPointerSize)); // Adjust FP to point to saved FP.
-
- // Reserve space for the stack slots needed by the code.
- int slots = StackSlotCount();
- if (slots > 0) {
- if (FLAG_debug_code) {
- __ mov(r0, Operand(slots));
- __ mov(r2, Operand(kSlotsZapValue));
- Label loop;
- __ bind(&loop);
- __ push(r2);
- __ sub(r0, r0, Operand(1), SetCC);
- __ b(ne, &loop);
- } else {
- __ sub(sp, sp, Operand(slots * kPointerSize));
- }
- }
-
- // Possibly allocate a local context.
- int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
- if (heap_slots > 0) {
- Comment(";;; Allocate local context");
- // Argument to NewContext is the function, which is in r1.
- __ push(r1);
- if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(heap_slots);
- __ CallStub(&stub);
- } else {
- __ CallRuntime(Runtime::kNewContext, 1);
- }
- RecordSafepoint(Safepoint::kNoDeoptimizationIndex);
- // Context is returned in both r0 and cp. It replaces the context
- // passed to us. It's saved in the stack and kept live in cp.
- __ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- // Copy any necessary parameters into the context.
- int num_parameters = scope()->num_parameters();
- for (int i = 0; i < num_parameters; i++) {
- Slot* slot = scope()->parameter(i)->AsSlot();
- if (slot != NULL && slot->type() == Slot::CONTEXT) {
- int parameter_offset = StandardFrameConstants::kCallerSPOffset +
- (num_parameters - 1 - i) * kPointerSize;
- // Load parameter from stack.
- __ ldr(r0, MemOperand(fp, parameter_offset));
- // Store it in the context.
- __ mov(r1, Operand(Context::SlotOffset(slot->index())));
- __ str(r0, MemOperand(cp, r1));
- // Update the write barrier. This clobbers all involved
- // registers, so we have to use two more registers to avoid
- // clobbering cp.
- __ mov(r2, Operand(cp));
- __ RecordWrite(r2, Operand(r1), r3, r0);
- }
- }
- Comment(";;; End allocate local context");
- }
-
- // Trace the call.
- if (FLAG_trace) {
- __ CallRuntime(Runtime::kTraceEnter, 0);
- }
- return !is_aborted();
-}
-
-
-bool LCodeGen::GenerateBody() {
- ASSERT(is_generating());
- bool emit_instructions = true;
- for (current_instruction_ = 0;
- !is_aborted() && current_instruction_ < instructions_->length();
- current_instruction_++) {
- LInstruction* instr = instructions_->at(current_instruction_);
- if (instr->IsLabel()) {
- LLabel* label = LLabel::cast(instr);
- emit_instructions = !label->HasReplacement();
- }
-
- if (emit_instructions) {
- Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
- instr->CompileToNative(this);
- }
- }
- return !is_aborted();
-}
-
-
-LInstruction* LCodeGen::GetNextInstruction() {
- if (current_instruction_ < instructions_->length() - 1) {
- return instructions_->at(current_instruction_ + 1);
- } else {
- return NULL;
- }
-}
-
-
-bool LCodeGen::GenerateDeferredCode() {
- ASSERT(is_generating());
- for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
- LDeferredCode* code = deferred_[i];
- __ bind(code->entry());
- code->Generate();
- __ jmp(code->exit());
- }
-
- // Force constant pool emission at the end of deferred code to make
- // sure that no constant pools are emitted after the official end of
- // the instruction sequence.
- masm()->CheckConstPool(true, false);
-
- // Deferred code is the last part of the instruction sequence. Mark
- // the generated code as done unless we bailed out.
- if (!is_aborted()) status_ = DONE;
- return !is_aborted();
-}
-
-
-bool LCodeGen::GenerateSafepointTable() {
- ASSERT(is_done());
- safepoints_.Emit(masm(), StackSlotCount());
- return !is_aborted();
-}
-
-
-Register LCodeGen::ToRegister(int index) const {
- return Register::FromAllocationIndex(index);
-}
-
-
-DoubleRegister LCodeGen::ToDoubleRegister(int index) const {
- return DoubleRegister::FromAllocationIndex(index);
-}
-
-
-Register LCodeGen::ToRegister(LOperand* op) const {
- ASSERT(op->IsRegister());
- return ToRegister(op->index());
-}
-
-
-Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
- if (op->IsRegister()) {
- return ToRegister(op->index());
- } else if (op->IsConstantOperand()) {
- __ mov(scratch, ToOperand(op));
- return scratch;
- } else if (op->IsStackSlot() || op->IsArgument()) {
- __ ldr(scratch, ToMemOperand(op));
- return scratch;
- }
- UNREACHABLE();
- return scratch;
-}
-
-
-DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
- ASSERT(op->IsDoubleRegister());
- return ToDoubleRegister(op->index());
-}
-
-
-DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
- SwVfpRegister flt_scratch,
- DoubleRegister dbl_scratch) {
- if (op->IsDoubleRegister()) {
- return ToDoubleRegister(op->index());
- } else if (op->IsConstantOperand()) {
- LConstantOperand* const_op = LConstantOperand::cast(op);
- Handle<Object> literal = chunk_->LookupLiteral(const_op);
- Representation r = chunk_->LookupLiteralRepresentation(const_op);
- if (r.IsInteger32()) {
- ASSERT(literal->IsNumber());
- __ mov(ip, Operand(static_cast<int32_t>(literal->Number())));
- __ vmov(flt_scratch, ip);
- __ vcvt_f64_s32(dbl_scratch, flt_scratch);
- return dbl_scratch;
- } else if (r.IsDouble()) {
- Abort("unsupported double immediate");
- } else if (r.IsTagged()) {
- Abort("unsupported tagged immediate");
- }
- } else if (op->IsStackSlot() || op->IsArgument()) {
- // TODO(regis): Why is vldr not taking a MemOperand?
- // __ vldr(dbl_scratch, ToMemOperand(op));
- MemOperand mem_op = ToMemOperand(op);
- __ vldr(dbl_scratch, mem_op.rn(), mem_op.offset());
- return dbl_scratch;
- }
- UNREACHABLE();
- return dbl_scratch;
-}
-
-
-int LCodeGen::ToInteger32(LConstantOperand* op) const {
- Handle<Object> value = chunk_->LookupLiteral(op);
- ASSERT(chunk_->LookupLiteralRepresentation(op).IsInteger32());
- ASSERT(static_cast<double>(static_cast<int32_t>(value->Number())) ==
- value->Number());
- return static_cast<int32_t>(value->Number());
-}
-
-
-Operand LCodeGen::ToOperand(LOperand* op) {
- if (op->IsConstantOperand()) {
- LConstantOperand* const_op = LConstantOperand::cast(op);
- Handle<Object> literal = chunk_->LookupLiteral(const_op);
- Representation r = chunk_->LookupLiteralRepresentation(const_op);
- if (r.IsInteger32()) {
- ASSERT(literal->IsNumber());
- return Operand(static_cast<int32_t>(literal->Number()));
- } else if (r.IsDouble()) {
- Abort("ToOperand Unsupported double immediate.");
- }
- ASSERT(r.IsTagged());
- return Operand(literal);
- } else if (op->IsRegister()) {
- return Operand(ToRegister(op));
- } else if (op->IsDoubleRegister()) {
- Abort("ToOperand IsDoubleRegister unimplemented");
- return Operand(0);
- }
- // Stack slots not implemented, use ToMemOperand instead.
- UNREACHABLE();
- return Operand(0);
-}
-
-
-MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
- ASSERT(!op->IsRegister());
- ASSERT(!op->IsDoubleRegister());
- ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
- int index = op->index();
- if (index >= 0) {
- // Local or spill slot. Skip the frame pointer, function, and
- // context in the fixed part of the frame.
- return MemOperand(fp, -(index + 3) * kPointerSize);
- } else {
- // Incoming parameter. Skip the return address.
- return MemOperand(fp, -(index - 1) * kPointerSize);
- }
-}
-
-
-MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
- ASSERT(op->IsDoubleStackSlot());
- int index = op->index();
- if (index >= 0) {
- // Local or spill slot. Skip the frame pointer, function, context,
- // and the first word of the double in the fixed part of the frame.
- return MemOperand(fp, -(index + 3) * kPointerSize + kPointerSize);
- } else {
- // Incoming parameter. Skip the return address and the first word of
- // the double.
- return MemOperand(fp, -(index - 1) * kPointerSize + kPointerSize);
- }
-}
-
-
-void LCodeGen::WriteTranslation(LEnvironment* environment,
- Translation* translation) {
- if (environment == NULL) return;
-
- // The translation includes one command per value in the environment.
- int translation_size = environment->values()->length();
- // The output frame height does not include the parameters.
- int height = translation_size - environment->parameter_count();
-
- WriteTranslation(environment->outer(), translation);
- int closure_id = DefineDeoptimizationLiteral(environment->closure());
- translation->BeginFrame(environment->ast_id(), closure_id, height);
- for (int i = 0; i < translation_size; ++i) {
- LOperand* value = environment->values()->at(i);
- // spilled_registers_ and spilled_double_registers_ are either
- // both NULL or both set.
- if (environment->spilled_registers() != NULL && value != NULL) {
- if (value->IsRegister() &&
- environment->spilled_registers()[value->index()] != NULL) {
- translation->MarkDuplicate();
- AddToTranslation(translation,
- environment->spilled_registers()[value->index()],
- environment->HasTaggedValueAt(i));
- } else if (
- value->IsDoubleRegister() &&
- environment->spilled_double_registers()[value->index()] != NULL) {
- translation->MarkDuplicate();
- AddToTranslation(
- translation,
- environment->spilled_double_registers()[value->index()],
- false);
- }
- }
-
- AddToTranslation(translation, value, environment->HasTaggedValueAt(i));
- }
-}
-
-
-void LCodeGen::AddToTranslation(Translation* translation,
- LOperand* op,
- bool is_tagged) {
- if (op == NULL) {
- // TODO(twuerthinger): Introduce marker operands to indicate that this value
- // is not present and must be reconstructed from the deoptimizer. Currently
- // this is only used for the arguments object.
- translation->StoreArgumentsObject();
- } else if (op->IsStackSlot()) {
- if (is_tagged) {
- translation->StoreStackSlot(op->index());
- } else {
- translation->StoreInt32StackSlot(op->index());
- }
- } else if (op->IsDoubleStackSlot()) {
- translation->StoreDoubleStackSlot(op->index());
- } else if (op->IsArgument()) {
- ASSERT(is_tagged);
- int src_index = StackSlotCount() + op->index();
- translation->StoreStackSlot(src_index);
- } else if (op->IsRegister()) {
- Register reg = ToRegister(op);
- if (is_tagged) {
- translation->StoreRegister(reg);
- } else {
- translation->StoreInt32Register(reg);
- }
- } else if (op->IsDoubleRegister()) {
- DoubleRegister reg = ToDoubleRegister(op);
- translation->StoreDoubleRegister(reg);
- } else if (op->IsConstantOperand()) {
- Handle<Object> literal = chunk()->LookupLiteral(LConstantOperand::cast(op));
- int src_index = DefineDeoptimizationLiteral(literal);
- translation->StoreLiteral(src_index);
- } else {
- UNREACHABLE();
- }
-}
-
-
-void LCodeGen::CallCode(Handle<Code> code,
- RelocInfo::Mode mode,
- LInstruction* instr) {
- ASSERT(instr != NULL);
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
- __ Call(code, mode);
- RegisterLazyDeoptimization(instr);
-}
-
-
-void LCodeGen::CallRuntime(const Runtime::Function* function,
- int num_arguments,
- LInstruction* instr) {
- ASSERT(instr != NULL);
- LPointerMap* pointers = instr->pointer_map();
- ASSERT(pointers != NULL);
- RecordPosition(pointers->position());
-
- __ CallRuntime(function, num_arguments);
- RegisterLazyDeoptimization(instr);
-}
-
-
-void LCodeGen::RegisterLazyDeoptimization(LInstruction* instr) {
- // Create the environment to bailout to. If the call has side effects
- // execution has to continue after the call otherwise execution can continue
- // from a previous bailout point repeating the call.
- LEnvironment* deoptimization_environment;
- if (instr->HasDeoptimizationEnvironment()) {
- deoptimization_environment = instr->deoptimization_environment();
- } else {
- deoptimization_environment = instr->environment();
- }
-
- RegisterEnvironmentForDeoptimization(deoptimization_environment);
- RecordSafepoint(instr->pointer_map(),
- deoptimization_environment->deoptimization_index());
-}
-
-
-void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment) {
- if (!environment->HasBeenRegistered()) {
- // Physical stack frame layout:
- // -x ............. -4 0 ..................................... y
- // [incoming arguments] [spill slots] [pushed outgoing arguments]
-
- // Layout of the environment:
- // 0 ..................................................... size-1
- // [parameters] [locals] [expression stack including arguments]
-
- // Layout of the translation:
- // 0 ........................................................ size - 1 + 4
- // [expression stack including arguments] [locals] [4 words] [parameters]
- // |>------------ translation_size ------------<|
-
- int frame_count = 0;
- for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
- ++frame_count;
- }
- Translation translation(&translations_, frame_count);
- WriteTranslation(environment, &translation);
- int deoptimization_index = deoptimizations_.length();
- environment->Register(deoptimization_index, translation.index());
- deoptimizations_.Add(environment);
- }
-}
-
-
-void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
- RegisterEnvironmentForDeoptimization(environment);
- ASSERT(environment->HasBeenRegistered());
- int id = environment->deoptimization_index();
- Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
- ASSERT(entry != NULL);
- if (entry == NULL) {
- Abort("bailout was not prepared");
- return;
- }
-
- ASSERT(FLAG_deopt_every_n_times < 2); // Other values not supported on ARM.
-
- if (FLAG_deopt_every_n_times == 1 &&
- info_->shared_info()->opt_count() == id) {
- __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
- return;
- }
-
- if (cc == al) {
- if (FLAG_trap_on_deopt) __ stop("trap_on_deopt");
- __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
- } else {
- if (FLAG_trap_on_deopt) {
- Label done;
- __ b(&done, NegateCondition(cc));
- __ stop("trap_on_deopt");
- __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
- __ bind(&done);
- } else {
- __ Jump(entry, RelocInfo::RUNTIME_ENTRY, cc);
- }
- }
-}
-
-
-void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
- int length = deoptimizations_.length();
- if (length == 0) return;
- ASSERT(FLAG_deopt);
- Handle<DeoptimizationInputData> data =
- factory()->NewDeoptimizationInputData(length, TENURED);
-
- Handle<ByteArray> translations = translations_.CreateByteArray();
- data->SetTranslationByteArray(*translations);
- data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
-
- Handle<FixedArray> literals =
- factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
- for (int i = 0; i < deoptimization_literals_.length(); i++) {
- literals->set(i, *deoptimization_literals_[i]);
- }
- data->SetLiteralArray(*literals);
-
- data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id()));
- data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
-
- // Populate the deoptimization entries.
- for (int i = 0; i < length; i++) {
- LEnvironment* env = deoptimizations_[i];
- data->SetAstId(i, Smi::FromInt(env->ast_id()));
- data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
- data->SetArgumentsStackHeight(i,
- Smi::FromInt(env->arguments_stack_height()));
- }
- code->set_deoptimization_data(*data);
-}
-
-
-int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
- int result = deoptimization_literals_.length();
- for (int i = 0; i < deoptimization_literals_.length(); ++i) {
- if (deoptimization_literals_[i].is_identical_to(literal)) return i;
- }
- deoptimization_literals_.Add(literal);
- return result;
-}
-
-
-void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
- ASSERT(deoptimization_literals_.length() == 0);
-
- const ZoneList<Handle<JSFunction> >* inlined_closures =
- chunk()->inlined_closures();
-
- for (int i = 0, length = inlined_closures->length();
- i < length;
- i++) {
- DefineDeoptimizationLiteral(inlined_closures->at(i));
- }
-
- inlined_function_count_ = deoptimization_literals_.length();
-}
-
-
-void LCodeGen::RecordSafepoint(
- LPointerMap* pointers,
- Safepoint::Kind kind,
- int arguments,
- int deoptimization_index) {
- const ZoneList<LOperand*>* operands = pointers->operands();
- Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
- kind, arguments, deoptimization_index);
- for (int i = 0; i < operands->length(); i++) {
- LOperand* pointer = operands->at(i);
- if (pointer->IsStackSlot()) {
- safepoint.DefinePointerSlot(pointer->index());
- } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
- safepoint.DefinePointerRegister(ToRegister(pointer));
- }
- }
- if (kind & Safepoint::kWithRegisters) {
- // Register cp always contains a pointer to the context.
- safepoint.DefinePointerRegister(cp);
- }
-}
-
-
-void LCodeGen::RecordSafepoint(LPointerMap* pointers,
- int deoptimization_index) {
- RecordSafepoint(pointers, Safepoint::kSimple, 0, deoptimization_index);
-}
-
-
-void LCodeGen::RecordSafepoint(int deoptimization_index) {
- LPointerMap empty_pointers(RelocInfo::kNoPosition);
- RecordSafepoint(&empty_pointers, deoptimization_index);
-}
-
-
-void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
- int arguments,
- int deoptimization_index) {
- RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments,
- deoptimization_index);
-}
-
-
-void LCodeGen::RecordSafepointWithRegistersAndDoubles(
- LPointerMap* pointers,
- int arguments,
- int deoptimization_index) {
- RecordSafepoint(pointers, Safepoint::kWithRegistersAndDoubles, arguments,
- deoptimization_index);
-}
-
-
-void LCodeGen::RecordPosition(int position) {
- if (!FLAG_debug_info || position == RelocInfo::kNoPosition) return;
- masm()->positions_recorder()->RecordPosition(position);
-}
-
-
-void LCodeGen::DoLabel(LLabel* label) {
- if (label->is_loop_header()) {
- Comment(";;; B%d - LOOP entry", label->block_id());
- } else {
- Comment(";;; B%d", label->block_id());
- }
- __ bind(label->label());
- current_block_ = label->block_id();
- LCodeGen::DoGap(label);
-}
-
-
-void LCodeGen::DoParallelMove(LParallelMove* move) {
- resolver_.Resolve(move);
-}
-
-
-void LCodeGen::DoGap(LGap* gap) {
- for (int i = LGap::FIRST_INNER_POSITION;
- i <= LGap::LAST_INNER_POSITION;
- i++) {
- LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
- LParallelMove* move = gap->GetParallelMove(inner_pos);
- if (move != NULL) DoParallelMove(move);
- }
-
- LInstruction* next = GetNextInstruction();
- if (next != NULL && next->IsLazyBailout()) {
- int pc = masm()->pc_offset();
- safepoints_.SetPcAfterGap(pc);
- }
-}
-
-
-void LCodeGen::DoParameter(LParameter* instr) {
- // Nothing to do.
-}
-
-
-void LCodeGen::DoCallStub(LCallStub* instr) {
- ASSERT(ToRegister(instr->result()).is(r0));
- switch (instr->hydrogen()->major_key()) {
- case CodeStub::RegExpConstructResult: {
- RegExpConstructResultStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- break;
- }
- case CodeStub::RegExpExec: {
- RegExpExecStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- break;
- }
- case CodeStub::SubString: {
- SubStringStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- break;
- }
- case CodeStub::NumberToString: {
- NumberToStringStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- break;
- }
- case CodeStub::StringAdd: {
- StringAddStub stub(NO_STRING_ADD_FLAGS);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- break;
- }
- case CodeStub::StringCompare: {
- StringCompareStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- break;
- }
- case CodeStub::TranscendentalCache: {
- __ ldr(r0, MemOperand(sp, 0));
- TranscendentalCacheStub stub(instr->transcendental_type(),
- TranscendentalCacheStub::TAGGED);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- break;
- }
- default:
- UNREACHABLE();
- }
-}
-
-
-void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
- // Nothing to do.
-}
-
-
-void LCodeGen::DoModI(LModI* instr) {
- if (instr->hydrogen()->HasPowerOf2Divisor()) {
- Register dividend = ToRegister(instr->InputAt(0));
-
- int32_t divisor =
- HConstant::cast(instr->hydrogen()->right())->Integer32Value();
-
- if (divisor < 0) divisor = -divisor;
-
- Label positive_dividend, done;
- __ cmp(dividend, Operand(0));
- __ b(pl, &positive_dividend);
- __ rsb(dividend, dividend, Operand(0));
- __ and_(dividend, dividend, Operand(divisor - 1));
- __ rsb(dividend, dividend, Operand(0), SetCC);
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ b(ne, &done);
- DeoptimizeIf(al, instr->environment());
- }
- __ bind(&positive_dividend);
- __ and_(dividend, dividend, Operand(divisor - 1));
- __ bind(&done);
- return;
- }
-
- // These registers hold untagged 32 bit values.
- Register left = ToRegister(instr->InputAt(0));
- Register right = ToRegister(instr->InputAt(1));
- Register result = ToRegister(instr->result());
-
- Register scratch = scratch0();
- Register scratch2 = ToRegister(instr->TempAt(0));
- DwVfpRegister dividend = ToDoubleRegister(instr->TempAt(1));
- DwVfpRegister divisor = ToDoubleRegister(instr->TempAt(2));
- DwVfpRegister quotient = double_scratch0();
-
- ASSERT(result.is(left));
-
- ASSERT(!dividend.is(divisor));
- ASSERT(!dividend.is(quotient));
- ASSERT(!divisor.is(quotient));
- ASSERT(!scratch.is(left));
- ASSERT(!scratch.is(right));
- ASSERT(!scratch.is(result));
-
- Label done, vfp_modulo, both_positive, right_negative;
-
- // Check for x % 0.
- if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
- __ cmp(right, Operand(0));
- DeoptimizeIf(eq, instr->environment());
- }
-
- // (0 % x) must yield 0 (if x is finite, which is the case here).
- __ cmp(left, Operand(0));
- __ b(eq, &done);
- // Preload right in a vfp register.
- __ vmov(divisor.low(), right);
- __ b(lt, &vfp_modulo);
-
- __ cmp(left, Operand(right));
- __ b(lt, &done);
-
- // Check for (positive) power of two on the right hand side.
- __ JumpIfNotPowerOfTwoOrZeroAndNeg(right,
- scratch,
- &right_negative,
- &both_positive);
- // Perform modulo operation (scratch contains right - 1).
- __ and_(result, scratch, Operand(left));
- __ b(&done);
-
- __ bind(&right_negative);
- // Negate right. The sign of the divisor does not matter.
- __ rsb(right, right, Operand(0));
-
- __ bind(&both_positive);
- const int kUnfolds = 3;
- // If the right hand side is smaller than the (nonnegative)
- // left hand side, the left hand side is the result.
- // Else try a few subtractions of the left hand side.
- __ mov(scratch, left);
- for (int i = 0; i < kUnfolds; i++) {
- // Check if the left hand side is less or equal than the
- // the right hand side.
- __ cmp(scratch, Operand(right));
- __ mov(result, scratch, LeaveCC, lt);
- __ b(lt, &done);
- // If not, reduce the left hand side by the right hand
- // side and check again.
- if (i < kUnfolds - 1) __ sub(scratch, scratch, right);
- }
-
- __ bind(&vfp_modulo);
- // Load the arguments in VFP registers.
- // The divisor value is preloaded before. Be careful that 'right' is only live
- // on entry.
- __ vmov(dividend.low(), left);
- // From here on don't use right as it may have been reallocated (for example
- // to scratch2).
- right = no_reg;
-
- __ vcvt_f64_s32(dividend, dividend.low());
- __ vcvt_f64_s32(divisor, divisor.low());
-
- // We do not care about the sign of the divisor.
- __ vabs(divisor, divisor);
- // Compute the quotient and round it to a 32bit integer.
- __ vdiv(quotient, dividend, divisor);
- __ vcvt_s32_f64(quotient.low(), quotient);
- __ vcvt_f64_s32(quotient, quotient.low());
-
- // Compute the remainder in result.
- DwVfpRegister double_scratch = dividend;
- __ vmul(double_scratch, divisor, quotient);
- __ vcvt_s32_f64(double_scratch.low(), double_scratch);
- __ vmov(scratch, double_scratch.low());
-
- if (!instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ sub(result, left, scratch);
- } else {
- Label ok;
- // Check for -0.
- __ sub(scratch2, left, scratch, SetCC);
- __ b(ne, &ok);
- __ cmp(left, Operand(0));
- DeoptimizeIf(mi, instr->environment());
- __ bind(&ok);
- // Load the result and we are done.
- __ mov(result, scratch2);
- }
-
- __ bind(&done);
-}
-
-
-void LCodeGen::DoDivI(LDivI* instr) {
- class DeferredDivI: public LDeferredCode {
- public:
- DeferredDivI(LCodeGen* codegen, LDivI* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
- codegen()->DoDeferredBinaryOpStub(instr_, Token::DIV);
- }
- private:
- LDivI* instr_;
- };
-
- const Register left = ToRegister(instr->InputAt(0));
- const Register right = ToRegister(instr->InputAt(1));
- const Register scratch = scratch0();
- const Register result = ToRegister(instr->result());
-
- // Check for x / 0.
- if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
- __ cmp(right, Operand(0));
- DeoptimizeIf(eq, instr->environment());
- }
-
- // Check for (0 / -x) that will produce negative zero.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- Label left_not_zero;
- __ cmp(left, Operand(0));
- __ b(ne, &left_not_zero);
- __ cmp(right, Operand(0));
- DeoptimizeIf(mi, instr->environment());
- __ bind(&left_not_zero);
- }
-
- // Check for (-kMinInt / -1).
- if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- Label left_not_min_int;
- __ cmp(left, Operand(kMinInt));
- __ b(ne, &left_not_min_int);
- __ cmp(right, Operand(-1));
- DeoptimizeIf(eq, instr->environment());
- __ bind(&left_not_min_int);
- }
-
- Label done, deoptimize;
- // Test for a few common cases first.
- __ cmp(right, Operand(1));
- __ mov(result, left, LeaveCC, eq);
- __ b(eq, &done);
-
- __ cmp(right, Operand(2));
- __ tst(left, Operand(1), eq);
- __ mov(result, Operand(left, ASR, 1), LeaveCC, eq);
- __ b(eq, &done);
-
- __ cmp(right, Operand(4));
- __ tst(left, Operand(3), eq);
- __ mov(result, Operand(left, ASR, 2), LeaveCC, eq);
- __ b(eq, &done);
-
- // Call the stub. The numbers in r0 and r1 have
- // to be tagged to Smis. If that is not possible, deoptimize.
- DeferredDivI* deferred = new DeferredDivI(this, instr);
-
- __ TrySmiTag(left, &deoptimize, scratch);
- __ TrySmiTag(right, &deoptimize, scratch);
-
- __ b(al, deferred->entry());
- __ bind(deferred->exit());
-
- // If the result in r0 is a Smi, untag it, else deoptimize.
- __ JumpIfNotSmi(result, &deoptimize);
- __ SmiUntag(result);
- __ b(&done);
-
- __ bind(&deoptimize);
- DeoptimizeIf(al, instr->environment());
- __ bind(&done);
-}
-
-
-template<int T>
-void LCodeGen::DoDeferredBinaryOpStub(LTemplateInstruction<1, 2, T>* instr,
- Token::Value op) {
- Register left = ToRegister(instr->InputAt(0));
- Register right = ToRegister(instr->InputAt(1));
-
- __ PushSafepointRegistersAndDoubles();
- // Move left to r1 and right to r0 for the stub call.
- if (left.is(r1)) {
- __ Move(r0, right);
- } else if (left.is(r0) && right.is(r1)) {
- __ Swap(r0, r1, r2);
- } else if (left.is(r0)) {
- ASSERT(!right.is(r1));
- __ mov(r1, r0);
- __ mov(r0, right);
- } else {
- ASSERT(!left.is(r0) && !right.is(r0));
- __ mov(r0, right);
- __ mov(r1, left);
- }
- TypeRecordingBinaryOpStub stub(op, OVERWRITE_LEFT);
- __ CallStub(&stub);
- RecordSafepointWithRegistersAndDoubles(instr->pointer_map(),
- 0,
- Safepoint::kNoDeoptimizationIndex);
- // Overwrite the stored value of r0 with the result of the stub.
- __ StoreToSafepointRegistersAndDoublesSlot(r0, r0);
- __ PopSafepointRegistersAndDoubles();
-}
-
-
-void LCodeGen::DoMulI(LMulI* instr) {
- Register scratch = scratch0();
- Register left = ToRegister(instr->InputAt(0));
- Register right = EmitLoadRegister(instr->InputAt(1), scratch);
-
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero) &&
- !instr->InputAt(1)->IsConstantOperand()) {
- __ orr(ToRegister(instr->TempAt(0)), left, right);
- }
-
- if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- // scratch:left = left * right.
- __ smull(left, scratch, left, right);
- __ mov(ip, Operand(left, ASR, 31));
- __ cmp(ip, Operand(scratch));
- DeoptimizeIf(ne, instr->environment());
- } else {
- __ mul(left, left, right);
- }
-
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- // Bail out if the result is supposed to be negative zero.
- Label done;
- __ cmp(left, Operand(0));
- __ b(ne, &done);
- if (instr->InputAt(1)->IsConstantOperand()) {
- if (ToInteger32(LConstantOperand::cast(instr->InputAt(1))) <= 0) {
- DeoptimizeIf(al, instr->environment());
- }
- } else {
- // Test the non-zero operand for negative sign.
- __ cmp(ToRegister(instr->TempAt(0)), Operand(0));
- DeoptimizeIf(mi, instr->environment());
- }
- __ bind(&done);
- }
-}
-
-
-void LCodeGen::DoBitI(LBitI* instr) {
- LOperand* left = instr->InputAt(0);
- LOperand* right = instr->InputAt(1);
- ASSERT(left->Equals(instr->result()));
- ASSERT(left->IsRegister());
- Register result = ToRegister(left);
- Operand right_operand(no_reg);
-
- if (right->IsStackSlot() || right->IsArgument()) {
- Register right_reg = EmitLoadRegister(right, ip);
- right_operand = Operand(right_reg);
- } else {
- ASSERT(right->IsRegister() || right->IsConstantOperand());
- right_operand = ToOperand(right);
- }
-
- switch (instr->op()) {
- case Token::BIT_AND:
- __ and_(result, ToRegister(left), right_operand);
- break;
- case Token::BIT_OR:
- __ orr(result, ToRegister(left), right_operand);
- break;
- case Token::BIT_XOR:
- __ eor(result, ToRegister(left), right_operand);
- break;
- default:
- UNREACHABLE();
- break;
- }
-}
-
-
-void LCodeGen::DoShiftI(LShiftI* instr) {
- Register scratch = scratch0();
- LOperand* left = instr->InputAt(0);
- LOperand* right = instr->InputAt(1);
- ASSERT(left->Equals(instr->result()));
- ASSERT(left->IsRegister());
- Register result = ToRegister(left);
- if (right->IsRegister()) {
- // Mask the right operand.
- __ and_(scratch, ToRegister(right), Operand(0x1F));
- switch (instr->op()) {
- case Token::SAR:
- __ mov(result, Operand(result, ASR, scratch));
- break;
- case Token::SHR:
- if (instr->can_deopt()) {
- __ mov(result, Operand(result, LSR, scratch), SetCC);
- DeoptimizeIf(mi, instr->environment());
- } else {
- __ mov(result, Operand(result, LSR, scratch));
- }
- break;
- case Token::SHL:
- __ mov(result, Operand(result, LSL, scratch));
- break;
- default:
- UNREACHABLE();
- break;
- }
- } else {
- int value = ToInteger32(LConstantOperand::cast(right));
- uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
- switch (instr->op()) {
- case Token::SAR:
- if (shift_count != 0) {
- __ mov(result, Operand(result, ASR, shift_count));
- }
- break;
- case Token::SHR:
- if (shift_count == 0 && instr->can_deopt()) {
- __ tst(result, Operand(0x80000000));
- DeoptimizeIf(ne, instr->environment());
- } else {
- __ mov(result, Operand(result, LSR, shift_count));
- }
- break;
- case Token::SHL:
- if (shift_count != 0) {
- __ mov(result, Operand(result, LSL, shift_count));
- }
- break;
- default:
- UNREACHABLE();
- break;
- }
- }
-}
-
-
-void LCodeGen::DoSubI(LSubI* instr) {
- LOperand* left = instr->InputAt(0);
- LOperand* right = instr->InputAt(1);
- ASSERT(left->Equals(instr->result()));
- bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
- SBit set_cond = can_overflow ? SetCC : LeaveCC;
-
- if (right->IsStackSlot() || right->IsArgument()) {
- Register right_reg = EmitLoadRegister(right, ip);
- __ sub(ToRegister(left), ToRegister(left), Operand(right_reg), set_cond);
- } else {
- ASSERT(right->IsRegister() || right->IsConstantOperand());
- __ sub(ToRegister(left), ToRegister(left), ToOperand(right), set_cond);
- }
-
- if (can_overflow) {
- DeoptimizeIf(vs, instr->environment());
- }
-}
-
-
-void LCodeGen::DoConstantI(LConstantI* instr) {
- ASSERT(instr->result()->IsRegister());
- __ mov(ToRegister(instr->result()), Operand(instr->value()));
-}
-
-
-void LCodeGen::DoConstantD(LConstantD* instr) {
- ASSERT(instr->result()->IsDoubleRegister());
- DwVfpRegister result = ToDoubleRegister(instr->result());
- double v = instr->value();
- __ vmov(result, v);
-}
-
-
-void LCodeGen::DoConstantT(LConstantT* instr) {
- ASSERT(instr->result()->IsRegister());
- __ mov(ToRegister(instr->result()), Operand(instr->value()));
-}
-
-
-void LCodeGen::DoJSArrayLength(LJSArrayLength* instr) {
- Register result = ToRegister(instr->result());
- Register array = ToRegister(instr->InputAt(0));
- __ ldr(result, FieldMemOperand(array, JSArray::kLengthOffset));
-}
-
-
-void LCodeGen::DoExternalArrayLength(LExternalArrayLength* instr) {
- Register result = ToRegister(instr->result());
- Register array = ToRegister(instr->InputAt(0));
- __ ldr(result, FieldMemOperand(array, ExternalArray::kLengthOffset));
-}
-
-
-void LCodeGen::DoFixedArrayLength(LFixedArrayLength* instr) {
- Register result = ToRegister(instr->result());
- Register array = ToRegister(instr->InputAt(0));
- __ ldr(result, FieldMemOperand(array, FixedArray::kLengthOffset));
-}
-
-
-void LCodeGen::DoValueOf(LValueOf* instr) {
- Register input = ToRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
- Register map = ToRegister(instr->TempAt(0));
- ASSERT(input.is(result));
- Label done;
-
- // If the object is a smi return the object.
- __ tst(input, Operand(kSmiTagMask));
- __ b(eq, &done);
-
- // If the object is not a value type, return the object.
- __ CompareObjectType(input, map, map, JS_VALUE_TYPE);
- __ b(ne, &done);
- __ ldr(result, FieldMemOperand(input, JSValue::kValueOffset));
-
- __ bind(&done);
-}
-
-
-void LCodeGen::DoBitNotI(LBitNotI* instr) {
- LOperand* input = instr->InputAt(0);
- ASSERT(input->Equals(instr->result()));
- __ mvn(ToRegister(input), Operand(ToRegister(input)));
-}
-
-
-void LCodeGen::DoThrow(LThrow* instr) {
- Register input_reg = EmitLoadRegister(instr->InputAt(0), ip);
- __ push(input_reg);
- CallRuntime(Runtime::kThrow, 1, instr);
-
- if (FLAG_debug_code) {
- __ stop("Unreachable code.");
- }
-}
-
-
-void LCodeGen::DoAddI(LAddI* instr) {
- LOperand* left = instr->InputAt(0);
- LOperand* right = instr->InputAt(1);
- ASSERT(left->Equals(instr->result()));
- bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
- SBit set_cond = can_overflow ? SetCC : LeaveCC;
-
- if (right->IsStackSlot() || right->IsArgument()) {
- Register right_reg = EmitLoadRegister(right, ip);
- __ add(ToRegister(left), ToRegister(left), Operand(right_reg), set_cond);
- } else {
- ASSERT(right->IsRegister() || right->IsConstantOperand());
- __ add(ToRegister(left), ToRegister(left), ToOperand(right), set_cond);
- }
-
- if (can_overflow) {
- DeoptimizeIf(vs, instr->environment());
- }
-}
-
-
-void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
- DoubleRegister left = ToDoubleRegister(instr->InputAt(0));
- DoubleRegister right = ToDoubleRegister(instr->InputAt(1));
- switch (instr->op()) {
- case Token::ADD:
- __ vadd(left, left, right);
- break;
- case Token::SUB:
- __ vsub(left, left, right);
- break;
- case Token::MUL:
- __ vmul(left, left, right);
- break;
- case Token::DIV:
- __ vdiv(left, left, right);
- break;
- case Token::MOD: {
- // Save r0-r3 on the stack.
- __ stm(db_w, sp, r0.bit() | r1.bit() | r2.bit() | r3.bit());
-
- __ PrepareCallCFunction(4, scratch0());
- __ vmov(r0, r1, left);
- __ vmov(r2, r3, right);
- __ CallCFunction(
- ExternalReference::double_fp_operation(Token::MOD, isolate()), 4);
- // Move the result in the double result register.
- __ GetCFunctionDoubleResult(ToDoubleRegister(instr->result()));
-
- // Restore r0-r3.
- __ ldm(ia_w, sp, r0.bit() | r1.bit() | r2.bit() | r3.bit());
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
-}
-
-
-void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
- ASSERT(ToRegister(instr->InputAt(0)).is(r1));
- ASSERT(ToRegister(instr->InputAt(1)).is(r0));
- ASSERT(ToRegister(instr->result()).is(r0));
-
- TypeRecordingBinaryOpStub stub(instr->op(), NO_OVERWRITE);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-}
-
-
-int LCodeGen::GetNextEmittedBlock(int block) {
- for (int i = block + 1; i < graph()->blocks()->length(); ++i) {
- LLabel* label = chunk_->GetLabel(i);
- if (!label->HasReplacement()) return i;
- }
- return -1;
-}
-
-
-void LCodeGen::EmitBranch(int left_block, int right_block, Condition cc) {
- int next_block = GetNextEmittedBlock(current_block_);
- right_block = chunk_->LookupDestination(right_block);
- left_block = chunk_->LookupDestination(left_block);
-
- if (right_block == left_block) {
- EmitGoto(left_block);
- } else if (left_block == next_block) {
- __ b(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block));
- } else if (right_block == next_block) {
- __ b(cc, chunk_->GetAssemblyLabel(left_block));
- } else {
- __ b(cc, chunk_->GetAssemblyLabel(left_block));
- __ b(chunk_->GetAssemblyLabel(right_block));
- }
-}
-
-
-void LCodeGen::DoBranch(LBranch* instr) {
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- Representation r = instr->hydrogen()->representation();
- if (r.IsInteger32()) {
- Register reg = ToRegister(instr->InputAt(0));
- __ cmp(reg, Operand(0));
- EmitBranch(true_block, false_block, ne);
- } else if (r.IsDouble()) {
- DoubleRegister reg = ToDoubleRegister(instr->InputAt(0));
- Register scratch = scratch0();
-
- // Test the double value. Zero and NaN are false.
- __ VFPCompareAndLoadFlags(reg, 0.0, scratch);
- __ tst(scratch, Operand(kVFPZConditionFlagBit | kVFPVConditionFlagBit));
- EmitBranch(true_block, false_block, ne);
- } else {
- ASSERT(r.IsTagged());
- Register reg = ToRegister(instr->InputAt(0));
- if (instr->hydrogen()->type().IsBoolean()) {
- __ LoadRoot(ip, Heap::kTrueValueRootIndex);
- __ cmp(reg, ip);
- EmitBranch(true_block, false_block, eq);
- } else {
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(reg, ip);
- __ b(eq, false_label);
- __ LoadRoot(ip, Heap::kTrueValueRootIndex);
- __ cmp(reg, ip);
- __ b(eq, true_label);
- __ LoadRoot(ip, Heap::kFalseValueRootIndex);
- __ cmp(reg, ip);
- __ b(eq, false_label);
- __ cmp(reg, Operand(0));
- __ b(eq, false_label);
- __ tst(reg, Operand(kSmiTagMask));
- __ b(eq, true_label);
-
- // Test double values. Zero and NaN are false.
- Label call_stub;
- DoubleRegister dbl_scratch = d0;
- Register scratch = scratch0();
- __ ldr(scratch, FieldMemOperand(reg, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
- __ cmp(scratch, Operand(ip));
- __ b(ne, &call_stub);
- __ sub(ip, reg, Operand(kHeapObjectTag));
- __ vldr(dbl_scratch, ip, HeapNumber::kValueOffset);
- __ VFPCompareAndLoadFlags(dbl_scratch, 0.0, scratch);
- __ tst(scratch, Operand(kVFPZConditionFlagBit | kVFPVConditionFlagBit));
- __ b(ne, false_label);
- __ b(true_label);
-
- // The conversion stub doesn't cause garbage collections so it's
- // safe to not record a safepoint after the call.
- __ bind(&call_stub);
- ToBooleanStub stub(reg);
- RegList saved_regs = kJSCallerSaved | kCalleeSaved;
- __ stm(db_w, sp, saved_regs);
- __ CallStub(&stub);
- __ cmp(reg, Operand(0));
- __ ldm(ia_w, sp, saved_regs);
- EmitBranch(true_block, false_block, ne);
- }
- }
-}
-
-
-void LCodeGen::EmitGoto(int block, LDeferredCode* deferred_stack_check) {
- block = chunk_->LookupDestination(block);
- int next_block = GetNextEmittedBlock(current_block_);
- if (block != next_block) {
- // Perform stack overflow check if this goto needs it before jumping.
- if (deferred_stack_check != NULL) {
- __ LoadRoot(ip, Heap::kStackLimitRootIndex);
- __ cmp(sp, Operand(ip));
- __ b(hs, chunk_->GetAssemblyLabel(block));
- __ jmp(deferred_stack_check->entry());
- deferred_stack_check->SetExit(chunk_->GetAssemblyLabel(block));
- } else {
- __ jmp(chunk_->GetAssemblyLabel(block));
- }
- }
-}
-
-
-void LCodeGen::DoDeferredStackCheck(LGoto* instr) {
- __ PushSafepointRegisters();
- __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
- __ PopSafepointRegisters();
-}
-
-
-void LCodeGen::DoGoto(LGoto* instr) {
- class DeferredStackCheck: public LDeferredCode {
- public:
- DeferredStackCheck(LCodeGen* codegen, LGoto* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
- private:
- LGoto* instr_;
- };
-
- DeferredStackCheck* deferred = NULL;
- if (instr->include_stack_check()) {
- deferred = new DeferredStackCheck(this, instr);
- }
- EmitGoto(instr->block_id(), deferred);
-}
-
-
-Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
- Condition cond = kNoCondition;
- switch (op) {
- case Token::EQ:
- case Token::EQ_STRICT:
- cond = eq;
- break;
- case Token::LT:
- cond = is_unsigned ? lo : lt;
- break;
- case Token::GT:
- cond = is_unsigned ? hi : gt;
- break;
- case Token::LTE:
- cond = is_unsigned ? ls : le;
- break;
- case Token::GTE:
- cond = is_unsigned ? hs : ge;
- break;
- case Token::IN:
- case Token::INSTANCEOF:
- default:
- UNREACHABLE();
- }
- return cond;
-}
-
-
-void LCodeGen::EmitCmpI(LOperand* left, LOperand* right) {
- __ cmp(ToRegister(left), ToRegister(right));
-}
-
-
-void LCodeGen::DoCmpID(LCmpID* instr) {
- LOperand* left = instr->InputAt(0);
- LOperand* right = instr->InputAt(1);
- LOperand* result = instr->result();
- Register scratch = scratch0();
-
- Label unordered, done;
- if (instr->is_double()) {
- // Compare left and right as doubles and load the
- // resulting flags into the normal status register.
- __ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right));
- // If a NaN is involved, i.e. the result is unordered (V set),
- // jump to unordered to return false.
- __ b(vs, &unordered);
- } else {
- EmitCmpI(left, right);
- }
-
- Condition cc = TokenToCondition(instr->op(), instr->is_double());
- __ LoadRoot(ToRegister(result), Heap::kTrueValueRootIndex);
- __ b(cc, &done);
-
- __ bind(&unordered);
- __ LoadRoot(ToRegister(result), Heap::kFalseValueRootIndex);
- __ bind(&done);
-}
-
-
-void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
- LOperand* left = instr->InputAt(0);
- LOperand* right = instr->InputAt(1);
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
-
- if (instr->is_double()) {
- // Compare left and right as doubles and load the
- // resulting flags into the normal status register.
- __ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right));
- // If a NaN is involved, i.e. the result is unordered (V set),
- // jump to false block label.
- __ b(vs, chunk_->GetAssemblyLabel(false_block));
- } else {
- EmitCmpI(left, right);
- }
-
- Condition cc = TokenToCondition(instr->op(), instr->is_double());
- EmitBranch(true_block, false_block, cc);
-}
-
-
-void LCodeGen::DoCmpJSObjectEq(LCmpJSObjectEq* instr) {
- Register left = ToRegister(instr->InputAt(0));
- Register right = ToRegister(instr->InputAt(1));
- Register result = ToRegister(instr->result());
-
- __ cmp(left, Operand(right));
- __ LoadRoot(result, Heap::kTrueValueRootIndex, eq);
- __ LoadRoot(result, Heap::kFalseValueRootIndex, ne);
-}
-
-
-void LCodeGen::DoCmpJSObjectEqAndBranch(LCmpJSObjectEqAndBranch* instr) {
- Register left = ToRegister(instr->InputAt(0));
- Register right = ToRegister(instr->InputAt(1));
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
-
- __ cmp(left, Operand(right));
- EmitBranch(true_block, false_block, eq);
-}
-
-
-void LCodeGen::DoIsNull(LIsNull* instr) {
- Register reg = ToRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
-
- __ LoadRoot(ip, Heap::kNullValueRootIndex);
- __ cmp(reg, ip);
- if (instr->is_strict()) {
- __ LoadRoot(result, Heap::kTrueValueRootIndex, eq);
- __ LoadRoot(result, Heap::kFalseValueRootIndex, ne);
- } else {
- Label true_value, false_value, done;
- __ b(eq, &true_value);
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(ip, reg);
- __ b(eq, &true_value);
- __ tst(reg, Operand(kSmiTagMask));
- __ b(eq, &false_value);
- // Check for undetectable objects by looking in the bit field in
- // the map. The object has already been smi checked.
- Register scratch = result;
- __ ldr(scratch, FieldMemOperand(reg, HeapObject::kMapOffset));
- __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
- __ tst(scratch, Operand(1 << Map::kIsUndetectable));
- __ b(ne, &true_value);
- __ bind(&false_value);
- __ LoadRoot(result, Heap::kFalseValueRootIndex);
- __ jmp(&done);
- __ bind(&true_value);
- __ LoadRoot(result, Heap::kTrueValueRootIndex);
- __ bind(&done);
- }
-}
-
-
-void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) {
- Register scratch = scratch0();
- Register reg = ToRegister(instr->InputAt(0));
-
- // TODO(fsc): If the expression is known to be a smi, then it's
- // definitely not null. Jump to the false block.
-
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- __ LoadRoot(ip, Heap::kNullValueRootIndex);
- __ cmp(reg, ip);
- if (instr->is_strict()) {
- EmitBranch(true_block, false_block, eq);
- } else {
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
- __ b(eq, true_label);
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(reg, ip);
- __ b(eq, true_label);
- __ tst(reg, Operand(kSmiTagMask));
- __ b(eq, false_label);
- // Check for undetectable objects by looking in the bit field in
- // the map. The object has already been smi checked.
- __ ldr(scratch, FieldMemOperand(reg, HeapObject::kMapOffset));
- __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
- __ tst(scratch, Operand(1 << Map::kIsUndetectable));
- EmitBranch(true_block, false_block, ne);
- }
-}
-
-
-Condition LCodeGen::EmitIsObject(Register input,
- Register temp1,
- Register temp2,
- Label* is_not_object,
- Label* is_object) {
- __ JumpIfSmi(input, is_not_object);
-
- __ LoadRoot(temp1, Heap::kNullValueRootIndex);
- __ cmp(input, temp1);
- __ b(eq, is_object);
-
- // Load map.
- __ ldr(temp1, FieldMemOperand(input, HeapObject::kMapOffset));
- // Undetectable objects behave like undefined.
- __ ldrb(temp2, FieldMemOperand(temp1, Map::kBitFieldOffset));
- __ tst(temp2, Operand(1 << Map::kIsUndetectable));
- __ b(ne, is_not_object);
-
- // Load instance type and check that it is in object type range.
- __ ldrb(temp2, FieldMemOperand(temp1, Map::kInstanceTypeOffset));
- __ cmp(temp2, Operand(FIRST_JS_OBJECT_TYPE));
- __ b(lt, is_not_object);
- __ cmp(temp2, Operand(LAST_JS_OBJECT_TYPE));
- return le;
-}
-
-
-void LCodeGen::DoIsObject(LIsObject* instr) {
- Register reg = ToRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
- Register temp = scratch0();
- Label is_false, is_true, done;
-
- Condition true_cond = EmitIsObject(reg, result, temp, &is_false, &is_true);
- __ b(true_cond, &is_true);
-
- __ bind(&is_false);
- __ LoadRoot(result, Heap::kFalseValueRootIndex);
- __ b(&done);
-
- __ bind(&is_true);
- __ LoadRoot(result, Heap::kTrueValueRootIndex);
-
- __ bind(&done);
-}
-
-
-void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
- Register reg = ToRegister(instr->InputAt(0));
- Register temp1 = ToRegister(instr->TempAt(0));
- Register temp2 = scratch0();
-
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
- Condition true_cond =
- EmitIsObject(reg, temp1, temp2, false_label, true_label);
-
- EmitBranch(true_block, false_block, true_cond);
-}
-
-
-void LCodeGen::DoIsSmi(LIsSmi* instr) {
- ASSERT(instr->hydrogen()->value()->representation().IsTagged());
- Register result = ToRegister(instr->result());
- Register input_reg = EmitLoadRegister(instr->InputAt(0), ip);
- __ tst(input_reg, Operand(kSmiTagMask));
- __ LoadRoot(result, Heap::kTrueValueRootIndex);
- Label done;
- __ b(eq, &done);
- __ LoadRoot(result, Heap::kFalseValueRootIndex);
- __ bind(&done);
-}
-
-
-void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- Register input_reg = EmitLoadRegister(instr->InputAt(0), ip);
- __ tst(input_reg, Operand(kSmiTagMask));
- EmitBranch(true_block, false_block, eq);
-}
-
-
-static InstanceType TestType(HHasInstanceType* instr) {
- InstanceType from = instr->from();
- InstanceType to = instr->to();
- if (from == FIRST_TYPE) return to;
- ASSERT(from == to || to == LAST_TYPE);
- return from;
-}
-
-
-static Condition BranchCondition(HHasInstanceType* instr) {
- InstanceType from = instr->from();
- InstanceType to = instr->to();
- if (from == to) return eq;
- if (to == LAST_TYPE) return hs;
- if (from == FIRST_TYPE) return ls;
- UNREACHABLE();
- return eq;
-}
-
-
-void LCodeGen::DoHasInstanceType(LHasInstanceType* instr) {
- Register input = ToRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
-
- ASSERT(instr->hydrogen()->value()->representation().IsTagged());
- Label done;
- __ tst(input, Operand(kSmiTagMask));
- __ LoadRoot(result, Heap::kFalseValueRootIndex, eq);
- __ b(eq, &done);
- __ CompareObjectType(input, result, result, TestType(instr->hydrogen()));
- Condition cond = BranchCondition(instr->hydrogen());
- __ LoadRoot(result, Heap::kTrueValueRootIndex, cond);
- __ LoadRoot(result, Heap::kFalseValueRootIndex, NegateCondition(cond));
- __ bind(&done);
-}
-
-
-void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
- Register scratch = scratch0();
- Register input = ToRegister(instr->InputAt(0));
-
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
- __ tst(input, Operand(kSmiTagMask));
- __ b(eq, false_label);
-
- __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen()));
- EmitBranch(true_block, false_block, BranchCondition(instr->hydrogen()));
-}
-
-
-void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
- Register input = ToRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
-
- if (FLAG_debug_code) {
- __ AbortIfNotString(input);
- }
-
- __ ldr(result, FieldMemOperand(input, String::kHashFieldOffset));
- __ IndexFromHash(result, result);
-}
-
-
-void LCodeGen::DoHasCachedArrayIndex(LHasCachedArrayIndex* instr) {
- Register input = ToRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
- Register scratch = scratch0();
-
- ASSERT(instr->hydrogen()->value()->representation().IsTagged());
- __ ldr(scratch,
- FieldMemOperand(input, String::kHashFieldOffset));
- __ tst(scratch, Operand(String::kContainsCachedArrayIndexMask));
- __ LoadRoot(result, Heap::kTrueValueRootIndex, eq);
- __ LoadRoot(result, Heap::kFalseValueRootIndex, ne);
-}
-
-
-void LCodeGen::DoHasCachedArrayIndexAndBranch(
- LHasCachedArrayIndexAndBranch* instr) {
- Register input = ToRegister(instr->InputAt(0));
- Register scratch = scratch0();
-
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- __ ldr(scratch,
- FieldMemOperand(input, String::kHashFieldOffset));
- __ tst(scratch, Operand(String::kContainsCachedArrayIndexMask));
- EmitBranch(true_block, false_block, eq);
-}
-
-
-// Branches to a label or falls through with the answer in flags. Trashes
-// the temp registers, but not the input. Only input and temp2 may alias.
-void LCodeGen::EmitClassOfTest(Label* is_true,
- Label* is_false,
- Handle<String>class_name,
- Register input,
- Register temp,
- Register temp2) {
- ASSERT(!input.is(temp));
- ASSERT(!temp.is(temp2)); // But input and temp2 may be the same register.
- __ tst(input, Operand(kSmiTagMask));
- __ b(eq, is_false);
- __ CompareObjectType(input, temp, temp2, FIRST_JS_OBJECT_TYPE);
- __ b(lt, is_false);
-
- // Map is now in temp.
- // Functions have class 'Function'.
- __ CompareInstanceType(temp, temp2, JS_FUNCTION_TYPE);
- if (class_name->IsEqualTo(CStrVector("Function"))) {
- __ b(eq, is_true);
- } else {
- __ b(eq, is_false);
- }
-
- // Check if the constructor in the map is a function.
- __ ldr(temp, FieldMemOperand(temp, Map::kConstructorOffset));
-
- // As long as JS_FUNCTION_TYPE is the last instance type and it is
- // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
- // LAST_JS_OBJECT_TYPE.
- ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
- ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
-
- // Objects with a non-function constructor have class 'Object'.
- __ CompareObjectType(temp, temp2, temp2, JS_FUNCTION_TYPE);
- if (class_name->IsEqualTo(CStrVector("Object"))) {
- __ b(ne, is_true);
- } else {
- __ b(ne, is_false);
- }
-
- // temp now contains the constructor function. Grab the
- // instance class name from there.
- __ ldr(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(temp, FieldMemOperand(temp,
- SharedFunctionInfo::kInstanceClassNameOffset));
- // The class name we are testing against is a symbol because it's a literal.
- // The name in the constructor is a symbol because of the way the context is
- // booted. This routine isn't expected to work for random API-created
- // classes and it doesn't have to because you can't access it with natives
- // syntax. Since both sides are symbols it is sufficient to use an identity
- // comparison.
- __ cmp(temp, Operand(class_name));
- // End with the answer in flags.
-}
-
-
-void LCodeGen::DoClassOfTest(LClassOfTest* instr) {
- Register input = ToRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
- ASSERT(input.is(result));
- Handle<String> class_name = instr->hydrogen()->class_name();
-
- Label done, is_true, is_false;
-
- EmitClassOfTest(&is_true, &is_false, class_name, input, scratch0(), input);
- __ b(ne, &is_false);
-
- __ bind(&is_true);
- __ LoadRoot(result, Heap::kTrueValueRootIndex);
- __ jmp(&done);
-
- __ bind(&is_false);
- __ LoadRoot(result, Heap::kFalseValueRootIndex);
- __ bind(&done);
-}
-
-
-void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
- Register input = ToRegister(instr->InputAt(0));
- Register temp = scratch0();
- Register temp2 = ToRegister(instr->TempAt(0));
- Handle<String> class_name = instr->hydrogen()->class_name();
-
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
- EmitClassOfTest(true_label, false_label, class_name, input, temp, temp2);
-
- EmitBranch(true_block, false_block, eq);
-}
-
-
-void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
- Register reg = ToRegister(instr->InputAt(0));
- Register temp = ToRegister(instr->TempAt(0));
- int true_block = instr->true_block_id();
- int false_block = instr->false_block_id();
-
- __ ldr(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
- __ cmp(temp, Operand(instr->map()));
- EmitBranch(true_block, false_block, eq);
-}
-
-
-void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
- ASSERT(ToRegister(instr->InputAt(0)).is(r0)); // Object is in r0.
- ASSERT(ToRegister(instr->InputAt(1)).is(r1)); // Function is in r1.
-
- InstanceofStub stub(InstanceofStub::kArgsInRegisters);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-
- __ cmp(r0, Operand(0));
- __ mov(r0, Operand(factory()->false_value()), LeaveCC, ne);
- __ mov(r0, Operand(factory()->true_value()), LeaveCC, eq);
-}
-
-
-void LCodeGen::DoInstanceOfAndBranch(LInstanceOfAndBranch* instr) {
- ASSERT(ToRegister(instr->InputAt(0)).is(r0)); // Object is in r0.
- ASSERT(ToRegister(instr->InputAt(1)).is(r1)); // Function is in r1.
-
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- InstanceofStub stub(InstanceofStub::kArgsInRegisters);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- __ cmp(r0, Operand(0));
- EmitBranch(true_block, false_block, eq);
-}
-
-
-void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
- class DeferredInstanceOfKnownGlobal: public LDeferredCode {
- public:
- DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
- LInstanceOfKnownGlobal* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
- codegen()->DoDeferredLInstanceOfKnownGlobal(instr_, &map_check_);
- }
-
- Label* map_check() { return &map_check_; }
-
- private:
- LInstanceOfKnownGlobal* instr_;
- Label map_check_;
- };
-
- DeferredInstanceOfKnownGlobal* deferred;
- deferred = new DeferredInstanceOfKnownGlobal(this, instr);
-
- Label done, false_result;
- Register object = ToRegister(instr->InputAt(0));
- Register temp = ToRegister(instr->TempAt(0));
- Register result = ToRegister(instr->result());
-
- ASSERT(object.is(r0));
- ASSERT(result.is(r0));
-
- // A Smi is not instance of anything.
- __ JumpIfSmi(object, &false_result);
-
- // This is the inlined call site instanceof cache. The two occurences of the
- // hole value will be patched to the last map/result pair generated by the
- // instanceof stub.
- Label cache_miss;
- Register map = temp;
- __ ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
- __ bind(deferred->map_check()); // Label for calculating code patching.
- // We use Factory::the_hole_value() on purpose instead of loading from the
- // root array to force relocation to be able to later patch with
- // the cached map.
- __ mov(ip, Operand(factory()->the_hole_value()));
- __ cmp(map, Operand(ip));
- __ b(ne, &cache_miss);
- // We use Factory::the_hole_value() on purpose instead of loading from the
- // root array to force relocation to be able to later patch
- // with true or false.
- __ mov(result, Operand(factory()->the_hole_value()));
- __ b(&done);
-
- // The inlined call site cache did not match. Check null and string before
- // calling the deferred code.
- __ bind(&cache_miss);
- // Null is not instance of anything.
- __ LoadRoot(ip, Heap::kNullValueRootIndex);
- __ cmp(object, Operand(ip));
- __ b(eq, &false_result);
-
- // String values is not instance of anything.
- Condition is_string = masm_->IsObjectStringType(object, temp);
- __ b(is_string, &false_result);
-
- // Go to the deferred code.
- __ b(deferred->entry());
-
- __ bind(&false_result);
- __ LoadRoot(result, Heap::kFalseValueRootIndex);
-
- // Here result has either true or false. Deferred code also produces true or
- // false object.
- __ bind(deferred->exit());
- __ bind(&done);
-}
-
-
-void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
- Label* map_check) {
- Register result = ToRegister(instr->result());
- ASSERT(result.is(r0));
-
- InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
- flags = static_cast<InstanceofStub::Flags>(
- flags | InstanceofStub::kArgsInRegisters);
- flags = static_cast<InstanceofStub::Flags>(
- flags | InstanceofStub::kCallSiteInlineCheck);
- flags = static_cast<InstanceofStub::Flags>(
- flags | InstanceofStub::kReturnTrueFalseObject);
- InstanceofStub stub(flags);
-
- __ PushSafepointRegisters();
-
- // Get the temp register reserved by the instruction. This needs to be r4 as
- // its slot of the pushing of safepoint registers is used to communicate the
- // offset to the location of the map check.
- Register temp = ToRegister(instr->TempAt(0));
- ASSERT(temp.is(r4));
- __ mov(InstanceofStub::right(), Operand(instr->function()));
- static const int kAdditionalDelta = 4;
- int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
- Label before_push_delta;
- __ bind(&before_push_delta);
- __ BlockConstPoolFor(kAdditionalDelta);
- __ mov(temp, Operand(delta * kPointerSize));
- __ StoreToSafepointRegisterSlot(temp, temp);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- // Put the result value into the result register slot and
- // restore all registers.
- __ StoreToSafepointRegisterSlot(result, result);
-
- __ PopSafepointRegisters();
-}
-
-
-static Condition ComputeCompareCondition(Token::Value op) {
- switch (op) {
- case Token::EQ_STRICT:
- case Token::EQ:
- return eq;
- case Token::LT:
- return lt;
- case Token::GT:
- return gt;
- case Token::LTE:
- return le;
- case Token::GTE:
- return ge;
- default:
- UNREACHABLE();
- return kNoCondition;
- }
-}
-
-
-void LCodeGen::DoCmpT(LCmpT* instr) {
- Token::Value op = instr->op();
-
- Handle<Code> ic = CompareIC::GetUninitialized(op);
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
- __ cmp(r0, Operand(0)); // This instruction also signals no smi code inlined.
-
- Condition condition = ComputeCompareCondition(op);
- if (op == Token::GT || op == Token::LTE) {
- condition = ReverseCondition(condition);
- }
- __ LoadRoot(ToRegister(instr->result()),
- Heap::kTrueValueRootIndex,
- condition);
- __ LoadRoot(ToRegister(instr->result()),
- Heap::kFalseValueRootIndex,
- NegateCondition(condition));
-}
-
-
-void LCodeGen::DoCmpTAndBranch(LCmpTAndBranch* instr) {
- Token::Value op = instr->op();
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- Handle<Code> ic = CompareIC::GetUninitialized(op);
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-
- // The compare stub expects compare condition and the input operands
- // reversed for GT and LTE.
- Condition condition = ComputeCompareCondition(op);
- if (op == Token::GT || op == Token::LTE) {
- condition = ReverseCondition(condition);
- }
- __ cmp(r0, Operand(0));
- EmitBranch(true_block, false_block, condition);
-}
-
-
-void LCodeGen::DoReturn(LReturn* instr) {
- if (FLAG_trace) {
- // Push the return value on the stack as the parameter.
- // Runtime::TraceExit returns its parameter in r0.
- __ push(r0);
- __ CallRuntime(Runtime::kTraceExit, 1);
- }
- int32_t sp_delta = (ParameterCount() + 1) * kPointerSize;
- __ mov(sp, fp);
- __ ldm(ia_w, sp, fp.bit() | lr.bit());
- __ add(sp, sp, Operand(sp_delta));
- __ Jump(lr);
-}
-
-
-void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
- Register result = ToRegister(instr->result());
- __ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell())));
- __ ldr(result, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset));
- if (instr->hydrogen()->check_hole_value()) {
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(result, ip);
- DeoptimizeIf(eq, instr->environment());
- }
-}
-
-
-void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
- ASSERT(ToRegister(instr->global_object()).is(r0));
- ASSERT(ToRegister(instr->result()).is(r0));
-
- __ mov(r2, Operand(instr->name()));
- RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET
- : RelocInfo::CODE_TARGET_CONTEXT;
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallCode(ic, mode, instr);
-}
-
-
-void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
- Register value = ToRegister(instr->InputAt(0));
- Register scratch = scratch0();
-
- // Load the cell.
- __ mov(scratch, Operand(Handle<Object>(instr->hydrogen()->cell())));
-
- // If the cell we are storing to contains the hole it could have
- // been deleted from the property dictionary. In that case, we need
- // to update the property details in the property dictionary to mark
- // it as no longer deleted.
- if (instr->hydrogen()->check_hole_value()) {
- Register scratch2 = ToRegister(instr->TempAt(0));
- __ ldr(scratch2,
- FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(scratch2, ip);
- DeoptimizeIf(eq, instr->environment());
- }
-
- // Store the value.
- __ str(value, FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
-}
-
-
-void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
- ASSERT(ToRegister(instr->global_object()).is(r1));
- ASSERT(ToRegister(instr->value()).is(r0));
-
- __ mov(r2, Operand(instr->name()));
- Handle<Code> ic = isolate()->builtins()->StoreIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
-}
-
-
-void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
- Register context = ToRegister(instr->context());
- Register result = ToRegister(instr->result());
- __ ldr(result, ContextOperand(context, instr->slot_index()));
-}
-
-
-void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
- Register context = ToRegister(instr->context());
- Register value = ToRegister(instr->value());
- __ str(value, ContextOperand(context, instr->slot_index()));
- if (instr->needs_write_barrier()) {
- int offset = Context::SlotOffset(instr->slot_index());
- __ RecordWrite(context, Operand(offset), value, scratch0());
- }
-}
-
-
-void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
- Register object = ToRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
- if (instr->hydrogen()->is_in_object()) {
- __ ldr(result, FieldMemOperand(object, instr->hydrogen()->offset()));
- } else {
- __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
- __ ldr(result, FieldMemOperand(result, instr->hydrogen()->offset()));
- }
-}
-
-
-void LCodeGen::EmitLoadField(Register result,
- Register object,
- Handle<Map> type,
- Handle<String> name) {
- LookupResult lookup;
- type->LookupInDescriptors(NULL, *name, &lookup);
- ASSERT(lookup.IsProperty() && lookup.type() == FIELD);
- int index = lookup.GetLocalFieldIndexFromMap(*type);
- int offset = index * kPointerSize;
- if (index < 0) {
- // Negative property indices are in-object properties, indexed
- // from the end of the fixed part of the object.
- __ ldr(result, FieldMemOperand(object, offset + type->instance_size()));
- } else {
- // Non-negative property indices are in the properties array.
- __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
- __ ldr(result, FieldMemOperand(result, offset + FixedArray::kHeaderSize));
- }
-}
-
-
-void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
- Register object = ToRegister(instr->object());
- Register result = ToRegister(instr->result());
- Register scratch = scratch0();
- int map_count = instr->hydrogen()->types()->length();
- Handle<String> name = instr->hydrogen()->name();
- if (map_count == 0) {
- ASSERT(instr->hydrogen()->need_generic());
- __ mov(r2, Operand(name));
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
- } else {
- Label done;
- __ ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
- for (int i = 0; i < map_count - 1; ++i) {
- Handle<Map> map = instr->hydrogen()->types()->at(i);
- Label next;
- __ cmp(scratch, Operand(map));
- __ b(ne, &next);
- EmitLoadField(result, object, map, name);
- __ b(&done);
- __ bind(&next);
- }
- Handle<Map> map = instr->hydrogen()->types()->last();
- __ cmp(scratch, Operand(map));
- if (instr->hydrogen()->need_generic()) {
- Label generic;
- __ b(ne, &generic);
- EmitLoadField(result, object, map, name);
- __ b(&done);
- __ bind(&generic);
- __ mov(r2, Operand(name));
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
- } else {
- DeoptimizeIf(ne, instr->environment());
- EmitLoadField(result, object, map, name);
- }
- __ bind(&done);
- }
-}
-
-
-void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
- ASSERT(ToRegister(instr->object()).is(r0));
- ASSERT(ToRegister(instr->result()).is(r0));
-
- // Name is always in r2.
- __ mov(r2, Operand(instr->name()));
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
- Register scratch = scratch0();
- Register function = ToRegister(instr->function());
- Register result = ToRegister(instr->result());
-
- // Check that the function really is a function. Load map into the
- // result register.
- __ CompareObjectType(function, result, scratch, JS_FUNCTION_TYPE);
- DeoptimizeIf(ne, instr->environment());
-
- // Make sure that the function has an instance prototype.
- Label non_instance;
- __ ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
- __ tst(scratch, Operand(1 << Map::kHasNonInstancePrototype));
- __ b(ne, &non_instance);
-
- // Get the prototype or initial map from the function.
- __ ldr(result,
- FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
-
- // Check that the function has a prototype or an initial map.
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(result, ip);
- DeoptimizeIf(eq, instr->environment());
-
- // If the function does not have an initial map, we're done.
- Label done;
- __ CompareObjectType(result, scratch, scratch, MAP_TYPE);
- __ b(ne, &done);
-
- // Get the prototype from the initial map.
- __ ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
- __ jmp(&done);
-
- // Non-instance prototype: Fetch prototype from constructor field
- // in initial map.
- __ bind(&non_instance);
- __ ldr(result, FieldMemOperand(result, Map::kConstructorOffset));
-
- // All done.
- __ bind(&done);
-}
-
-
-void LCodeGen::DoLoadElements(LLoadElements* instr) {
- Register result = ToRegister(instr->result());
- Register input = ToRegister(instr->InputAt(0));
- Register scratch = scratch0();
-
- __ ldr(result, FieldMemOperand(input, JSObject::kElementsOffset));
- if (FLAG_debug_code) {
- Label done;
- __ ldr(scratch, FieldMemOperand(result, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
- __ cmp(scratch, ip);
- __ b(eq, &done);
- __ LoadRoot(ip, Heap::kExternalPixelArrayMapRootIndex);
- __ cmp(scratch, ip);
- __ b(eq, &done);
- __ LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex);
- __ cmp(scratch, ip);
- __ Check(eq, "Check for fast elements failed.");
- __ bind(&done);
- }
-}
-
-
-void LCodeGen::DoLoadExternalArrayPointer(
- LLoadExternalArrayPointer* instr) {
- Register to_reg = ToRegister(instr->result());
- Register from_reg = ToRegister(instr->InputAt(0));
- __ ldr(to_reg, FieldMemOperand(from_reg,
- ExternalArray::kExternalPointerOffset));
-}
-
-
-void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
- Register arguments = ToRegister(instr->arguments());
- Register length = ToRegister(instr->length());
- Register index = ToRegister(instr->index());
- Register result = ToRegister(instr->result());
-
- // Bailout index is not a valid argument index. Use unsigned check to get
- // negative check for free.
- __ sub(length, length, index, SetCC);
- DeoptimizeIf(ls, instr->environment());
-
- // There are two words between the frame pointer and the last argument.
- // Subtracting from length accounts for one of them add one more.
- __ add(length, length, Operand(1));
- __ ldr(result, MemOperand(arguments, length, LSL, kPointerSizeLog2));
-}
-
-
-void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
- Register elements = ToRegister(instr->elements());
- Register key = EmitLoadRegister(instr->key(), scratch0());
- Register result = ToRegister(instr->result());
- Register scratch = scratch0();
- ASSERT(result.is(elements));
-
- // Load the result.
- __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
- __ ldr(result, FieldMemOperand(scratch, FixedArray::kHeaderSize));
-
- // Check for the hole value.
- __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
- __ cmp(result, scratch);
- DeoptimizeIf(eq, instr->environment());
-}
-
-
-void LCodeGen::DoLoadKeyedSpecializedArrayElement(
- LLoadKeyedSpecializedArrayElement* instr) {
- ASSERT(instr->array_type() == kExternalPixelArray);
-
- Register external_pointer = ToRegister(instr->external_pointer());
- Register key = ToRegister(instr->key());
- Register result = ToRegister(instr->result());
-
- // Load the result.
- __ ldrb(result, MemOperand(external_pointer, key));
-}
-
-
-void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
- ASSERT(ToRegister(instr->object()).is(r1));
- ASSERT(ToRegister(instr->key()).is(r0));
-
- Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
- Register scratch = scratch0();
- Register result = ToRegister(instr->result());
-
- // Check if the calling frame is an arguments adaptor frame.
- Label done, adapted;
- __ ldr(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ldr(result, MemOperand(scratch, StandardFrameConstants::kContextOffset));
- __ cmp(result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-
- // Result is the frame pointer for the frame if not adapted and for the real
- // frame below the adaptor frame if adapted.
- __ mov(result, fp, LeaveCC, ne);
- __ mov(result, scratch, LeaveCC, eq);
-}
-
-
-void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
- Register elem = ToRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
-
- Label done;
-
- // If no arguments adaptor frame the number of arguments is fixed.
- __ cmp(fp, elem);
- __ mov(result, Operand(scope()->num_parameters()));
- __ b(eq, &done);
-
- // Arguments adaptor frame present. Get argument length from there.
- __ ldr(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ldr(result,
- MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(result);
-
- // Argument length is in result register.
- __ bind(&done);
-}
-
-
-void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
- Register receiver = ToRegister(instr->receiver());
- Register function = ToRegister(instr->function());
- Register length = ToRegister(instr->length());
- Register elements = ToRegister(instr->elements());
- Register scratch = scratch0();
- ASSERT(receiver.is(r0)); // Used for parameter count.
- ASSERT(function.is(r1)); // Required by InvokeFunction.
- ASSERT(ToRegister(instr->result()).is(r0));
-
- // If the receiver is null or undefined, we have to pass the global object
- // as a receiver.
- Label global_object, receiver_ok;
- __ LoadRoot(scratch, Heap::kNullValueRootIndex);
- __ cmp(receiver, scratch);
- __ b(eq, &global_object);
- __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
- __ cmp(receiver, scratch);
- __ b(eq, &global_object);
-
- // Deoptimize if the receiver is not a JS object.
- __ tst(receiver, Operand(kSmiTagMask));
- DeoptimizeIf(eq, instr->environment());
- __ CompareObjectType(receiver, scratch, scratch, FIRST_JS_OBJECT_TYPE);
- DeoptimizeIf(lo, instr->environment());
- __ jmp(&receiver_ok);
-
- __ bind(&global_object);
- __ ldr(receiver, GlobalObjectOperand());
- __ bind(&receiver_ok);
-
- // Copy the arguments to this function possibly from the
- // adaptor frame below it.
- const uint32_t kArgumentsLimit = 1 * KB;
- __ cmp(length, Operand(kArgumentsLimit));
- DeoptimizeIf(hi, instr->environment());
-
- // Push the receiver and use the register to keep the original
- // number of arguments.
- __ push(receiver);
- __ mov(receiver, length);
- // The arguments are at a one pointer size offset from elements.
- __ add(elements, elements, Operand(1 * kPointerSize));
-
- // Loop through the arguments pushing them onto the execution
- // stack.
- Label invoke, loop;
- // length is a small non-negative integer, due to the test above.
- __ cmp(length, Operand(0));
- __ b(eq, &invoke);
- __ bind(&loop);
- __ ldr(scratch, MemOperand(elements, length, LSL, 2));
- __ push(scratch);
- __ sub(length, length, Operand(1), SetCC);
- __ b(ne, &loop);
-
- __ bind(&invoke);
- ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
- LPointerMap* pointers = instr->pointer_map();
- LEnvironment* env = instr->deoptimization_environment();
- RecordPosition(pointers->position());
- RegisterEnvironmentForDeoptimization(env);
- SafepointGenerator safepoint_generator(this,
- pointers,
- env->deoptimization_index());
- // The number of arguments is stored in receiver which is r0, as expected
- // by InvokeFunction.
- v8::internal::ParameterCount actual(receiver);
- __ InvokeFunction(function, actual, CALL_FUNCTION, &safepoint_generator);
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-}
-
-
-void LCodeGen::DoPushArgument(LPushArgument* instr) {
- LOperand* argument = instr->InputAt(0);
- if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
- Abort("DoPushArgument not implemented for double type.");
- } else {
- Register argument_reg = EmitLoadRegister(argument, ip);
- __ push(argument_reg);
- }
-}
-
-
-void LCodeGen::DoContext(LContext* instr) {
- Register result = ToRegister(instr->result());
- __ mov(result, cp);
-}
-
-
-void LCodeGen::DoOuterContext(LOuterContext* instr) {
- Register context = ToRegister(instr->context());
- Register result = ToRegister(instr->result());
- __ ldr(result,
- MemOperand(context, Context::SlotOffset(Context::CLOSURE_INDEX)));
- __ ldr(result, FieldMemOperand(result, JSFunction::kContextOffset));
-}
-
-
-void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
- Register context = ToRegister(instr->context());
- Register result = ToRegister(instr->result());
- __ ldr(result, ContextOperand(cp, Context::GLOBAL_INDEX));
-}
-
-
-void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
- Register global = ToRegister(instr->global());
- Register result = ToRegister(instr->result());
- __ ldr(result, FieldMemOperand(global, GlobalObject::kGlobalReceiverOffset));
-}
-
-
-void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
- int arity,
- LInstruction* instr) {
- // Change context if needed.
- bool change_context =
- (info()->closure()->context() != function->context()) ||
- scope()->contains_with() ||
- (scope()->num_heap_slots() > 0);
- if (change_context) {
- __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
- }
-
- // Set r0 to arguments count if adaption is not needed. Assumes that r0
- // is available to write to at this point.
- if (!function->NeedsArgumentsAdaption()) {
- __ mov(r0, Operand(arity));
- }
-
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
-
- // Invoke function.
- __ ldr(ip, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
- __ Call(ip);
-
- // Setup deoptimization.
- RegisterLazyDeoptimization(instr);
-
- // Restore context.
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-}
-
-
-void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
- ASSERT(ToRegister(instr->result()).is(r0));
- __ mov(r1, Operand(instr->function()));
- CallKnownFunction(instr->function(), instr->arity(), instr);
-}
-
-
-void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
- ASSERT(instr->InputAt(0)->Equals(instr->result()));
- Register input = ToRegister(instr->InputAt(0));
- Register scratch = scratch0();
-
- // Deoptimize if not a heap number.
- __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
- __ cmp(scratch, Operand(ip));
- DeoptimizeIf(ne, instr->environment());
-
- Label done;
- Register exponent = scratch0();
- scratch = no_reg;
- __ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
- // Check the sign of the argument. If the argument is positive, just
- // return it. We do not need to patch the stack since |input| and
- // |result| are the same register and |input| would be restored
- // unchanged by popping safepoint registers.
- __ tst(exponent, Operand(HeapNumber::kSignMask));
- __ b(eq, &done);
-
- // Input is negative. Reverse its sign.
- // Preserve the value of all registers.
- __ PushSafepointRegisters();
-
- // Registers were saved at the safepoint, so we can use
- // many scratch registers.
- Register tmp1 = input.is(r1) ? r0 : r1;
- Register tmp2 = input.is(r2) ? r0 : r2;
- Register tmp3 = input.is(r3) ? r0 : r3;
- Register tmp4 = input.is(r4) ? r0 : r4;
-
- // exponent: floating point exponent value.
-
- Label allocated, slow;
- __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
- __ b(&allocated);
-
- // Slow case: Call the runtime system to do the number allocation.
- __ bind(&slow);
-
- __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
- // Set the pointer to the new heap number in tmp.
- if (!tmp1.is(r0)) __ mov(tmp1, Operand(r0));
- // Restore input_reg after call to runtime.
- __ LoadFromSafepointRegisterSlot(input, input);
- __ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
-
- __ bind(&allocated);
- // exponent: floating point exponent value.
- // tmp1: allocated heap number.
- __ bic(exponent, exponent, Operand(HeapNumber::kSignMask));
- __ str(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
- __ ldr(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
- __ str(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
-
- __ StoreToSafepointRegisterSlot(tmp1, input);
- __ PopSafepointRegisters();
-
- __ bind(&done);
-}
-
-
-void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
- Register input = ToRegister(instr->InputAt(0));
- __ cmp(input, Operand(0));
- // We can make rsb conditional because the previous cmp instruction
- // will clear the V (overflow) flag and rsb won't set this flag
- // if input is positive.
- __ rsb(input, input, Operand(0), SetCC, mi);
- // Deoptimize on overflow.
- DeoptimizeIf(vs, instr->environment());
-}
-
-
-void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
- // Class for deferred case.
- class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
- public:
- DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
- LUnaryMathOperation* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
- codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
- }
- private:
- LUnaryMathOperation* instr_;
- };
-
- ASSERT(instr->InputAt(0)->Equals(instr->result()));
- Representation r = instr->hydrogen()->value()->representation();
- if (r.IsDouble()) {
- DwVfpRegister input = ToDoubleRegister(instr->InputAt(0));
- __ vabs(input, input);
- } else if (r.IsInteger32()) {
- EmitIntegerMathAbs(instr);
- } else {
- // Representation is tagged.
- DeferredMathAbsTaggedHeapNumber* deferred =
- new DeferredMathAbsTaggedHeapNumber(this, instr);
- Register input = ToRegister(instr->InputAt(0));
- // Smi check.
- __ JumpIfNotSmi(input, deferred->entry());
- // If smi, handle it directly.
- EmitIntegerMathAbs(instr);
- __ bind(deferred->exit());
- }
-}
-
-
-void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
- DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
- SwVfpRegister single_scratch = double_scratch0().low();
- Register scratch1 = scratch0();
- Register scratch2 = ToRegister(instr->TempAt(0));
-
- __ EmitVFPTruncate(kRoundToMinusInf,
- single_scratch,
- input,
- scratch1,
- scratch2);
- DeoptimizeIf(ne, instr->environment());
-
- // Move the result back to general purpose register r0.
- __ vmov(result, single_scratch);
-
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- // Test for -0.
- Label done;
- __ cmp(result, Operand(0));
- __ b(ne, &done);
- __ vmov(scratch1, input.high());
- __ tst(scratch1, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr->environment());
- __ bind(&done);
- }
-}
-
-
-void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
- DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
- Register scratch1 = scratch0();
- Register scratch2 = result;
- __ EmitVFPTruncate(kRoundToNearest,
- double_scratch0().low(),
- input,
- scratch1,
- scratch2);
- DeoptimizeIf(ne, instr->environment());
- __ vmov(result, double_scratch0().low());
-
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- // Test for -0.
- Label done;
- __ cmp(result, Operand(0));
- __ b(ne, &done);
- __ vmov(scratch1, input.high());
- __ tst(scratch1, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr->environment());
- __ bind(&done);
- }
-}
-
-
-void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
- DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
- ASSERT(ToDoubleRegister(instr->result()).is(input));
- __ vsqrt(input, input);
-}
-
-
-void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
- DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
- Register scratch = scratch0();
- SwVfpRegister single_scratch = double_scratch0().low();
- DoubleRegister double_scratch = double_scratch0();
- ASSERT(ToDoubleRegister(instr->result()).is(input));
-
- // Add +0 to convert -0 to +0.
- __ mov(scratch, Operand(0));
- __ vmov(single_scratch, scratch);
- __ vcvt_f64_s32(double_scratch, single_scratch);
- __ vadd(input, input, double_scratch);
- __ vsqrt(input, input);
-}
-
-
-void LCodeGen::DoPower(LPower* instr) {
- LOperand* left = instr->InputAt(0);
- LOperand* right = instr->InputAt(1);
- Register scratch = scratch0();
- DoubleRegister result_reg = ToDoubleRegister(instr->result());
- Representation exponent_type = instr->hydrogen()->right()->representation();
- if (exponent_type.IsDouble()) {
- // Prepare arguments and call C function.
- __ PrepareCallCFunction(4, scratch);
- __ vmov(r0, r1, ToDoubleRegister(left));
- __ vmov(r2, r3, ToDoubleRegister(right));
- __ CallCFunction(
- ExternalReference::power_double_double_function(isolate()), 4);
- } else if (exponent_type.IsInteger32()) {
- ASSERT(ToRegister(right).is(r0));
- // Prepare arguments and call C function.
- __ PrepareCallCFunction(4, scratch);
- __ mov(r2, ToRegister(right));
- __ vmov(r0, r1, ToDoubleRegister(left));
- __ CallCFunction(
- ExternalReference::power_double_int_function(isolate()), 4);
- } else {
- ASSERT(exponent_type.IsTagged());
- ASSERT(instr->hydrogen()->left()->representation().IsDouble());
-
- Register right_reg = ToRegister(right);
-
- // Check for smi on the right hand side.
- Label non_smi, call;
- __ JumpIfNotSmi(right_reg, &non_smi);
-
- // Untag smi and convert it to a double.
- __ SmiUntag(right_reg);
- SwVfpRegister single_scratch = double_scratch0().low();
- __ vmov(single_scratch, right_reg);
- __ vcvt_f64_s32(result_reg, single_scratch);
- __ jmp(&call);
-
- // Heap number map check.
- __ bind(&non_smi);
- __ ldr(scratch, FieldMemOperand(right_reg, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
- __ cmp(scratch, Operand(ip));
- DeoptimizeIf(ne, instr->environment());
- int32_t value_offset = HeapNumber::kValueOffset - kHeapObjectTag;
- __ add(scratch, right_reg, Operand(value_offset));
- __ vldr(result_reg, scratch, 0);
-
- // Prepare arguments and call C function.
- __ bind(&call);
- __ PrepareCallCFunction(4, scratch);
- __ vmov(r0, r1, ToDoubleRegister(left));
- __ vmov(r2, r3, result_reg);
- __ CallCFunction(
- ExternalReference::power_double_double_function(isolate()), 4);
- }
- // Store the result in the result register.
- __ GetCFunctionDoubleResult(result_reg);
-}
-
-
-void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
- ASSERT(ToDoubleRegister(instr->result()).is(d2));
- TranscendentalCacheStub stub(TranscendentalCache::LOG,
- TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
- ASSERT(ToDoubleRegister(instr->result()).is(d2));
- TranscendentalCacheStub stub(TranscendentalCache::COS,
- TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoMathSin(LUnaryMathOperation* instr) {
- ASSERT(ToDoubleRegister(instr->result()).is(d2));
- TranscendentalCacheStub stub(TranscendentalCache::SIN,
- TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
- switch (instr->op()) {
- case kMathAbs:
- DoMathAbs(instr);
- break;
- case kMathFloor:
- DoMathFloor(instr);
- break;
- case kMathRound:
- DoMathRound(instr);
- break;
- case kMathSqrt:
- DoMathSqrt(instr);
- break;
- case kMathPowHalf:
- DoMathPowHalf(instr);
- break;
- case kMathCos:
- DoMathCos(instr);
- break;
- case kMathSin:
- DoMathSin(instr);
- break;
- case kMathLog:
- DoMathLog(instr);
- break;
- default:
- Abort("Unimplemented type of LUnaryMathOperation.");
- UNREACHABLE();
- }
-}
-
-
-void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
- ASSERT(ToRegister(instr->result()).is(r0));
-
- int arity = instr->arity();
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeKeyedCallInitialize(arity, NOT_IN_LOOP);
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-}
-
-
-void LCodeGen::DoCallNamed(LCallNamed* instr) {
- ASSERT(ToRegister(instr->result()).is(r0));
-
- int arity = instr->arity();
- Handle<Code> ic = isolate()->stub_cache()->ComputeCallInitialize(
- arity, NOT_IN_LOOP);
- __ mov(r2, Operand(instr->name()));
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
- // Restore context register.
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-}
-
-
-void LCodeGen::DoCallFunction(LCallFunction* instr) {
- ASSERT(ToRegister(instr->result()).is(r0));
-
- int arity = instr->arity();
- CallFunctionStub stub(arity, NOT_IN_LOOP, RECEIVER_MIGHT_BE_VALUE);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- __ Drop(1);
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-}
-
-
-void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
- ASSERT(ToRegister(instr->result()).is(r0));
-
- int arity = instr->arity();
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeCallInitialize(arity, NOT_IN_LOOP);
- __ mov(r2, Operand(instr->name()));
- CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-}
-
-
-void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
- ASSERT(ToRegister(instr->result()).is(r0));
- __ mov(r1, Operand(instr->target()));
- CallKnownFunction(instr->target(), instr->arity(), instr);
-}
-
-
-void LCodeGen::DoCallNew(LCallNew* instr) {
- ASSERT(ToRegister(instr->InputAt(0)).is(r1));
- ASSERT(ToRegister(instr->result()).is(r0));
-
- Handle<Code> builtin = isolate()->builtins()->JSConstructCall();
- __ mov(r0, Operand(instr->arity()));
- CallCode(builtin, RelocInfo::CONSTRUCT_CALL, instr);
-}
-
-
-void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
- CallRuntime(instr->function(), instr->arity(), instr);
-}
-
-
-void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
- Register object = ToRegister(instr->object());
- Register value = ToRegister(instr->value());
- Register scratch = scratch0();
- int offset = instr->offset();
-
- ASSERT(!object.is(value));
-
- if (!instr->transition().is_null()) {
- __ mov(scratch, Operand(instr->transition()));
- __ str(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
- }
-
- // Do the store.
- if (instr->is_in_object()) {
- __ str(value, FieldMemOperand(object, offset));
- if (instr->needs_write_barrier()) {
- // Update the write barrier for the object for in-object properties.
- __ RecordWrite(object, Operand(offset), value, scratch);
- }
- } else {
- __ ldr(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
- __ str(value, FieldMemOperand(scratch, offset));
- if (instr->needs_write_barrier()) {
- // Update the write barrier for the properties array.
- // object is used as a scratch register.
- __ RecordWrite(scratch, Operand(offset), value, object);
- }
- }
-}
-
-
-void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
- ASSERT(ToRegister(instr->object()).is(r1));
- ASSERT(ToRegister(instr->value()).is(r0));
-
- // Name is always in r2.
- __ mov(r2, Operand(instr->name()));
- Handle<Code> ic = info_->is_strict()
- ? isolate()->builtins()->StoreIC_Initialize_Strict()
- : isolate()->builtins()->StoreIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
- __ cmp(ToRegister(instr->index()), ToRegister(instr->length()));
- DeoptimizeIf(hs, instr->environment());
-}
-
-
-void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
- Register value = ToRegister(instr->value());
- Register elements = ToRegister(instr->object());
- Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
- Register scratch = scratch0();
-
- // Do the store.
- if (instr->key()->IsConstantOperand()) {
- ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
- LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
- int offset =
- ToInteger32(const_operand) * kPointerSize + FixedArray::kHeaderSize;
- __ str(value, FieldMemOperand(elements, offset));
- } else {
- __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
- __ str(value, FieldMemOperand(scratch, FixedArray::kHeaderSize));
- }
-
- if (instr->hydrogen()->NeedsWriteBarrier()) {
- // Compute address of modified element and store it into key register.
- __ add(key, scratch, Operand(FixedArray::kHeaderSize));
- __ RecordWrite(elements, key, value);
- }
-}
-
-
-void LCodeGen::DoStoreKeyedSpecializedArrayElement(
- LStoreKeyedSpecializedArrayElement* instr) {
- ASSERT(instr->array_type() == kExternalPixelArray);
-
- Register external_pointer = ToRegister(instr->external_pointer());
- Register key = ToRegister(instr->key());
- Register value = ToRegister(instr->value());
-
- // Clamp the value to [0..255].
- __ Usat(value, 8, Operand(value));
- __ strb(value, MemOperand(external_pointer, key, LSL, 0));
-}
-
-
-void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
- ASSERT(ToRegister(instr->object()).is(r2));
- ASSERT(ToRegister(instr->key()).is(r1));
- ASSERT(ToRegister(instr->value()).is(r0));
-
- Handle<Code> ic = info_->is_strict()
- ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
- : isolate()->builtins()->KeyedStoreIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
- class DeferredStringCharCodeAt: public LDeferredCode {
- public:
- DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
- private:
- LStringCharCodeAt* instr_;
- };
-
- Register scratch = scratch0();
- Register string = ToRegister(instr->string());
- Register index = no_reg;
- int const_index = -1;
- if (instr->index()->IsConstantOperand()) {
- const_index = ToInteger32(LConstantOperand::cast(instr->index()));
- STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
- if (!Smi::IsValid(const_index)) {
- // Guaranteed to be out of bounds because of the assert above.
- // So the bounds check that must dominate this instruction must
- // have deoptimized already.
- if (FLAG_debug_code) {
- __ Abort("StringCharCodeAt: out of bounds index.");
- }
- // No code needs to be generated.
- return;
- }
- } else {
- index = ToRegister(instr->index());
- }
- Register result = ToRegister(instr->result());
-
- DeferredStringCharCodeAt* deferred =
- new DeferredStringCharCodeAt(this, instr);
-
- Label flat_string, ascii_string, done;
-
- // Fetch the instance type of the receiver into result register.
- __ ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
- __ ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
-
- // We need special handling for non-flat strings.
- STATIC_ASSERT(kSeqStringTag == 0);
- __ tst(result, Operand(kStringRepresentationMask));
- __ b(eq, &flat_string);
-
- // Handle non-flat strings.
- __ tst(result, Operand(kIsConsStringMask));
- __ b(eq, deferred->entry());
-
- // ConsString.
- // Check whether the right hand side is the empty string (i.e. if
- // this is really a flat string in a cons string). If that is not
- // the case we would rather go to the runtime system now to flatten
- // the string.
- __ ldr(scratch, FieldMemOperand(string, ConsString::kSecondOffset));
- __ LoadRoot(ip, Heap::kEmptyStringRootIndex);
- __ cmp(scratch, ip);
- __ b(ne, deferred->entry());
- // Get the first of the two strings and load its instance type.
- __ ldr(string, FieldMemOperand(string, ConsString::kFirstOffset));
- __ ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
- __ ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
- // If the first cons component is also non-flat, then go to runtime.
- STATIC_ASSERT(kSeqStringTag == 0);
- __ tst(result, Operand(kStringRepresentationMask));
- __ b(ne, deferred->entry());
-
- // Check for 1-byte or 2-byte string.
- __ bind(&flat_string);
- STATIC_ASSERT(kAsciiStringTag != 0);
- __ tst(result, Operand(kStringEncodingMask));
- __ b(ne, &ascii_string);
-
- // 2-byte string.
- // Load the 2-byte character code into the result register.
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
- if (instr->index()->IsConstantOperand()) {
- __ ldrh(result,
- FieldMemOperand(string,
- SeqTwoByteString::kHeaderSize + 2 * const_index));
- } else {
- __ add(scratch,
- string,
- Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- __ ldrh(result, MemOperand(scratch, index, LSL, 1));
- }
- __ jmp(&done);
-
- // ASCII string.
- // Load the byte into the result register.
- __ bind(&ascii_string);
- if (instr->index()->IsConstantOperand()) {
- __ ldrb(result, FieldMemOperand(string,
- SeqAsciiString::kHeaderSize + const_index));
- } else {
- __ add(scratch,
- string,
- Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- __ ldrb(result, MemOperand(scratch, index));
- }
- __ bind(&done);
- __ bind(deferred->exit());
-}
-
-
-void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
- Register string = ToRegister(instr->string());
- Register result = ToRegister(instr->result());
- Register scratch = scratch0();
-
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- __ mov(result, Operand(0));
-
- __ PushSafepointRegisters();
- __ push(string);
- // Push the index as a smi. This is safe because of the checks in
- // DoStringCharCodeAt above.
- if (instr->index()->IsConstantOperand()) {
- int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
- __ mov(scratch, Operand(Smi::FromInt(const_index)));
- __ push(scratch);
- } else {
- Register index = ToRegister(instr->index());
- __ SmiTag(index);
- __ push(index);
- }
- __ CallRuntimeSaveDoubles(Runtime::kStringCharCodeAt);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 2, Safepoint::kNoDeoptimizationIndex);
- if (FLAG_debug_code) {
- __ AbortIfNotSmi(r0);
- }
- __ SmiUntag(r0);
- __ StoreToSafepointRegisterSlot(r0, result);
- __ PopSafepointRegisters();
-}
-
-
-void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
- class DeferredStringCharFromCode: public LDeferredCode {
- public:
- DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
- private:
- LStringCharFromCode* instr_;
- };
-
- DeferredStringCharFromCode* deferred =
- new DeferredStringCharFromCode(this, instr);
-
- ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
- Register char_code = ToRegister(instr->char_code());
- Register result = ToRegister(instr->result());
- ASSERT(!char_code.is(result));
-
- __ cmp(char_code, Operand(String::kMaxAsciiCharCode));
- __ b(hi, deferred->entry());
- __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
- __ add(result, result, Operand(char_code, LSL, kPointerSizeLog2));
- __ ldr(result, FieldMemOperand(result, FixedArray::kHeaderSize));
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(result, ip);
- __ b(eq, deferred->entry());
- __ bind(deferred->exit());
-}
-
-
-void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
- Register char_code = ToRegister(instr->char_code());
- Register result = ToRegister(instr->result());
-
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- __ mov(result, Operand(0));
-
- __ PushSafepointRegisters();
- __ SmiTag(char_code);
- __ push(char_code);
- __ CallRuntimeSaveDoubles(Runtime::kCharFromCode);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 1, Safepoint::kNoDeoptimizationIndex);
- __ StoreToSafepointRegisterSlot(r0, result);
- __ PopSafepointRegisters();
-}
-
-
-void LCodeGen::DoStringLength(LStringLength* instr) {
- Register string = ToRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
- __ ldr(result, FieldMemOperand(string, String::kLengthOffset));
-}
-
-
-void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
- LOperand* input = instr->InputAt(0);
- ASSERT(input->IsRegister() || input->IsStackSlot());
- LOperand* output = instr->result();
- ASSERT(output->IsDoubleRegister());
- SwVfpRegister single_scratch = double_scratch0().low();
- if (input->IsStackSlot()) {
- Register scratch = scratch0();
- __ ldr(scratch, ToMemOperand(input));
- __ vmov(single_scratch, scratch);
- } else {
- __ vmov(single_scratch, ToRegister(input));
- }
- __ vcvt_f64_s32(ToDoubleRegister(output), single_scratch);
-}
-
-
-void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
- class DeferredNumberTagI: public LDeferredCode {
- public:
- DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredNumberTagI(instr_); }
- private:
- LNumberTagI* instr_;
- };
-
- LOperand* input = instr->InputAt(0);
- ASSERT(input->IsRegister() && input->Equals(instr->result()));
- Register reg = ToRegister(input);
-
- DeferredNumberTagI* deferred = new DeferredNumberTagI(this, instr);
- __ SmiTag(reg, SetCC);
- __ b(vs, deferred->entry());
- __ bind(deferred->exit());
-}
-
-
-void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
- Label slow;
- Register reg = ToRegister(instr->InputAt(0));
- DoubleRegister dbl_scratch = d0;
- SwVfpRegister flt_scratch = s0;
-
- // Preserve the value of all registers.
- __ PushSafepointRegisters();
-
- // There was overflow, so bits 30 and 31 of the original integer
- // disagree. Try to allocate a heap number in new space and store
- // the value in there. If that fails, call the runtime system.
- Label done;
- __ SmiUntag(reg);
- __ eor(reg, reg, Operand(0x80000000));
- __ vmov(flt_scratch, reg);
- __ vcvt_f64_s32(dbl_scratch, flt_scratch);
- if (FLAG_inline_new) {
- __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r5, r3, r4, r6, &slow);
- if (!reg.is(r5)) __ mov(reg, r5);
- __ b(&done);
- }
-
- // Slow case: Call the runtime system to do the number allocation.
- __ bind(&slow);
-
- // TODO(3095996): Put a valid pointer value in the stack slot where the result
- // register is stored, as this register is in the pointer map, but contains an
- // integer value.
- __ mov(ip, Operand(0));
- __ StoreToSafepointRegisterSlot(ip, reg);
- __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
- if (!reg.is(r0)) __ mov(reg, r0);
-
- // Done. Put the value in dbl_scratch into the value of the allocated heap
- // number.
- __ bind(&done);
- __ sub(ip, reg, Operand(kHeapObjectTag));
- __ vstr(dbl_scratch, ip, HeapNumber::kValueOffset);
- __ StoreToSafepointRegisterSlot(reg, reg);
- __ PopSafepointRegisters();
-}
-
-
-void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
- class DeferredNumberTagD: public LDeferredCode {
- public:
- DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
- private:
- LNumberTagD* instr_;
- };
-
- DoubleRegister input_reg = ToDoubleRegister(instr->InputAt(0));
- Register scratch = scratch0();
- Register reg = ToRegister(instr->result());
- Register temp1 = ToRegister(instr->TempAt(0));
- Register temp2 = ToRegister(instr->TempAt(1));
-
- DeferredNumberTagD* deferred = new DeferredNumberTagD(this, instr);
- if (FLAG_inline_new) {
- __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry());
- } else {
- __ jmp(deferred->entry());
- }
- __ bind(deferred->exit());
- __ sub(ip, reg, Operand(kHeapObjectTag));
- __ vstr(input_reg, ip, HeapNumber::kValueOffset);
-}
-
-
-void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- Register reg = ToRegister(instr->result());
- __ mov(reg, Operand(0));
-
- __ PushSafepointRegisters();
- __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
- __ StoreToSafepointRegisterSlot(r0, reg);
- __ PopSafepointRegisters();
-}
-
-
-void LCodeGen::DoSmiTag(LSmiTag* instr) {
- LOperand* input = instr->InputAt(0);
- ASSERT(input->IsRegister() && input->Equals(instr->result()));
- ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
- __ SmiTag(ToRegister(input));
-}
-
-
-void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
- LOperand* input = instr->InputAt(0);
- ASSERT(input->IsRegister() && input->Equals(instr->result()));
- if (instr->needs_check()) {
- __ tst(ToRegister(input), Operand(kSmiTagMask));
- DeoptimizeIf(ne, instr->environment());
- }
- __ SmiUntag(ToRegister(input));
-}
-
-
-void LCodeGen::EmitNumberUntagD(Register input_reg,
- DoubleRegister result_reg,
- LEnvironment* env) {
- Register scratch = scratch0();
- SwVfpRegister flt_scratch = s0;
- ASSERT(!result_reg.is(d0));
-
- Label load_smi, heap_number, done;
-
- // Smi check.
- __ tst(input_reg, Operand(kSmiTagMask));
- __ b(eq, &load_smi);
-
- // Heap number map check.
- __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
- __ cmp(scratch, Operand(ip));
- __ b(eq, &heap_number);
-
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(input_reg, Operand(ip));
- DeoptimizeIf(ne, env);
-
- // Convert undefined to NaN.
- __ LoadRoot(ip, Heap::kNanValueRootIndex);
- __ sub(ip, ip, Operand(kHeapObjectTag));
- __ vldr(result_reg, ip, HeapNumber::kValueOffset);
- __ jmp(&done);
-
- // Heap number to double register conversion.
- __ bind(&heap_number);
- __ sub(ip, input_reg, Operand(kHeapObjectTag));
- __ vldr(result_reg, ip, HeapNumber::kValueOffset);
- __ jmp(&done);
-
- // Smi to double register conversion
- __ bind(&load_smi);
- __ SmiUntag(input_reg); // Untag smi before converting to float.
- __ vmov(flt_scratch, input_reg);
- __ vcvt_f64_s32(result_reg, flt_scratch);
- __ SmiTag(input_reg); // Retag smi.
- __ bind(&done);
-}
-
-
-class DeferredTaggedToI: public LDeferredCode {
- public:
- DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
- private:
- LTaggedToI* instr_;
-};
-
-
-void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
- Register input_reg = ToRegister(instr->InputAt(0));
- Register scratch1 = scratch0();
- Register scratch2 = ToRegister(instr->TempAt(0));
- DwVfpRegister double_scratch = double_scratch0();
- SwVfpRegister single_scratch = double_scratch.low();
-
- ASSERT(!scratch1.is(input_reg) && !scratch1.is(scratch2));
- ASSERT(!scratch2.is(input_reg) && !scratch2.is(scratch1));
-
- Label done;
-
- // Heap number map check.
- __ ldr(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
- __ cmp(scratch1, Operand(ip));
-
- if (instr->truncating()) {
- Register scratch3 = ToRegister(instr->TempAt(1));
- DwVfpRegister double_scratch2 = ToDoubleRegister(instr->TempAt(2));
- ASSERT(!scratch3.is(input_reg) &&
- !scratch3.is(scratch1) &&
- !scratch3.is(scratch2));
- // Performs a truncating conversion of a floating point number as used by
- // the JS bitwise operations.
- Label heap_number;
- __ b(eq, &heap_number);
- // Check for undefined. Undefined is converted to zero for truncating
- // conversions.
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(input_reg, Operand(ip));
- DeoptimizeIf(ne, instr->environment());
- __ mov(input_reg, Operand(0));
- __ b(&done);
-
- __ bind(&heap_number);
- __ sub(scratch1, input_reg, Operand(kHeapObjectTag));
- __ vldr(double_scratch2, scratch1, HeapNumber::kValueOffset);
-
- __ EmitECMATruncate(input_reg,
- double_scratch2,
- single_scratch,
- scratch1,
- scratch2,
- scratch3);
-
- } else {
- CpuFeatures::Scope scope(VFP3);
- // Deoptimize if we don't have a heap number.
- DeoptimizeIf(ne, instr->environment());
-
- __ sub(ip, input_reg, Operand(kHeapObjectTag));
- __ vldr(double_scratch, ip, HeapNumber::kValueOffset);
- __ EmitVFPTruncate(kRoundToZero,
- single_scratch,
- double_scratch,
- scratch1,
- scratch2,
- kCheckForInexactConversion);
- DeoptimizeIf(ne, instr->environment());
- // Load the result.
- __ vmov(input_reg, single_scratch);
-
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ cmp(input_reg, Operand(0));
- __ b(ne, &done);
- __ vmov(scratch1, double_scratch.high());
- __ tst(scratch1, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr->environment());
- }
- }
- __ bind(&done);
-}
-
-
-void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
- LOperand* input = instr->InputAt(0);
- ASSERT(input->IsRegister());
- ASSERT(input->Equals(instr->result()));
-
- Register input_reg = ToRegister(input);
-
- DeferredTaggedToI* deferred = new DeferredTaggedToI(this, instr);
-
- // Smi check.
- __ tst(input_reg, Operand(kSmiTagMask));
- __ b(ne, deferred->entry());
-
- // Smi to int32 conversion
- __ SmiUntag(input_reg); // Untag smi.
-
- __ bind(deferred->exit());
-}
-
-
-void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
- LOperand* input = instr->InputAt(0);
- ASSERT(input->IsRegister());
- LOperand* result = instr->result();
- ASSERT(result->IsDoubleRegister());
-
- Register input_reg = ToRegister(input);
- DoubleRegister result_reg = ToDoubleRegister(result);
-
- EmitNumberUntagD(input_reg, result_reg, instr->environment());
-}
-
-
-void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
- Register result_reg = ToRegister(instr->result());
- Register scratch1 = scratch0();
- Register scratch2 = ToRegister(instr->TempAt(0));
- DwVfpRegister double_input = ToDoubleRegister(instr->InputAt(0));
- DwVfpRegister double_scratch = double_scratch0();
- SwVfpRegister single_scratch = double_scratch0().low();
-
- Label done;
-
- if (instr->truncating()) {
- Register scratch3 = ToRegister(instr->TempAt(1));
- __ EmitECMATruncate(result_reg,
- double_input,
- single_scratch,
- scratch1,
- scratch2,
- scratch3);
- } else {
- VFPRoundingMode rounding_mode = kRoundToMinusInf;
- __ EmitVFPTruncate(rounding_mode,
- single_scratch,
- double_input,
- scratch1,
- scratch2,
- kCheckForInexactConversion);
- // Deoptimize if we had a vfp invalid exception,
- // including inexact operation.
- DeoptimizeIf(ne, instr->environment());
- // Retrieve the result.
- __ vmov(result_reg, single_scratch);
- }
- __ bind(&done);
-}
-
-
-void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
- LOperand* input = instr->InputAt(0);
- __ tst(ToRegister(input), Operand(kSmiTagMask));
- DeoptimizeIf(ne, instr->environment());
-}
-
-
-void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
- LOperand* input = instr->InputAt(0);
- __ tst(ToRegister(input), Operand(kSmiTagMask));
- DeoptimizeIf(eq, instr->environment());
-}
-
-
-void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
- Register input = ToRegister(instr->InputAt(0));
- Register scratch = scratch0();
- InstanceType first = instr->hydrogen()->first();
- InstanceType last = instr->hydrogen()->last();
-
- __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
- __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
- __ cmp(scratch, Operand(first));
-
- // If there is only one type in the interval check for equality.
- if (first == last) {
- DeoptimizeIf(ne, instr->environment());
- } else {
- DeoptimizeIf(lo, instr->environment());
- // Omit check for the last type.
- if (last != LAST_TYPE) {
- __ cmp(scratch, Operand(last));
- DeoptimizeIf(hi, instr->environment());
- }
- }
-}
-
-
-void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
- ASSERT(instr->InputAt(0)->IsRegister());
- Register reg = ToRegister(instr->InputAt(0));
- __ cmp(reg, Operand(instr->hydrogen()->target()));
- DeoptimizeIf(ne, instr->environment());
-}
-
-
-void LCodeGen::DoCheckMap(LCheckMap* instr) {
- Register scratch = scratch0();
- LOperand* input = instr->InputAt(0);
- ASSERT(input->IsRegister());
- Register reg = ToRegister(input);
- __ ldr(scratch, FieldMemOperand(reg, HeapObject::kMapOffset));
- __ cmp(scratch, Operand(instr->hydrogen()->map()));
- DeoptimizeIf(ne, instr->environment());
-}
-
-
-void LCodeGen::LoadHeapObject(Register result,
- Handle<HeapObject> object) {
- if (heap()->InNewSpace(*object)) {
- Handle<JSGlobalPropertyCell> cell =
- factory()->NewJSGlobalPropertyCell(object);
- __ mov(result, Operand(cell));
- __ ldr(result, FieldMemOperand(result, JSGlobalPropertyCell::kValueOffset));
- } else {
- __ mov(result, Operand(object));
- }
-}
-
-
-void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
- Register temp1 = ToRegister(instr->TempAt(0));
- Register temp2 = ToRegister(instr->TempAt(1));
-
- Handle<JSObject> holder = instr->holder();
- Handle<JSObject> current_prototype = instr->prototype();
-
- // Load prototype object.
- LoadHeapObject(temp1, current_prototype);
-
- // Check prototype maps up to the holder.
- while (!current_prototype.is_identical_to(holder)) {
- __ ldr(temp2, FieldMemOperand(temp1, HeapObject::kMapOffset));
- __ cmp(temp2, Operand(Handle<Map>(current_prototype->map())));
- DeoptimizeIf(ne, instr->environment());
- current_prototype =
- Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
- // Load next prototype object.
- LoadHeapObject(temp1, current_prototype);
- }
-
- // Check the holder map.
- __ ldr(temp2, FieldMemOperand(temp1, HeapObject::kMapOffset));
- __ cmp(temp2, Operand(Handle<Map>(current_prototype->map())));
- DeoptimizeIf(ne, instr->environment());
-}
-
-
-void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
- __ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
- __ mov(r2, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
- __ mov(r1, Operand(instr->hydrogen()->constant_elements()));
- __ Push(r3, r2, r1);
-
- // Pick the right runtime function or stub to call.
- int length = instr->hydrogen()->length();
- if (instr->hydrogen()->IsCopyOnWrite()) {
- ASSERT(instr->hydrogen()->depth() == 1);
- FastCloneShallowArrayStub::Mode mode =
- FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS;
- FastCloneShallowArrayStub stub(mode, length);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- } else if (instr->hydrogen()->depth() > 1) {
- CallRuntime(Runtime::kCreateArrayLiteral, 3, instr);
- } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
- CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
- } else {
- FastCloneShallowArrayStub::Mode mode =
- FastCloneShallowArrayStub::CLONE_ELEMENTS;
- FastCloneShallowArrayStub stub(mode, length);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- }
-}
-
-
-void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
- __ ldr(r4, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ ldr(r4, FieldMemOperand(r4, JSFunction::kLiteralsOffset));
- __ mov(r3, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
- __ mov(r2, Operand(instr->hydrogen()->constant_properties()));
- __ mov(r1, Operand(Smi::FromInt(instr->hydrogen()->fast_elements() ? 1 : 0)));
- __ Push(r4, r3, r2, r1);
-
- // Pick the right runtime function to call.
- if (instr->hydrogen()->depth() > 1) {
- CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
- } else {
- CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
- }
-}
-
-
-void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
- ASSERT(ToRegister(instr->InputAt(0)).is(r0));
- __ push(r0);
- CallRuntime(Runtime::kToFastProperties, 1, instr);
-}
-
-
-void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
- Label materialized;
- // Registers will be used as follows:
- // r3 = JS function.
- // r7 = literals array.
- // r1 = regexp literal.
- // r0 = regexp literal clone.
- // r2 and r4-r6 are used as temporaries.
- __ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ ldr(r7, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
- int literal_offset = FixedArray::kHeaderSize +
- instr->hydrogen()->literal_index() * kPointerSize;
- __ ldr(r1, FieldMemOperand(r7, literal_offset));
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(r1, ip);
- __ b(ne, &materialized);
-
- // Create regexp literal using runtime function
- // Result will be in r0.
- __ mov(r6, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
- __ mov(r5, Operand(instr->hydrogen()->pattern()));
- __ mov(r4, Operand(instr->hydrogen()->flags()));
- __ Push(r7, r6, r5, r4);
- CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
- __ mov(r1, r0);
-
- __ bind(&materialized);
- int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
- Label allocated, runtime_allocate;
-
- __ AllocateInNewSpace(size, r0, r2, r3, &runtime_allocate, TAG_OBJECT);
- __ jmp(&allocated);
-
- __ bind(&runtime_allocate);
- __ mov(r0, Operand(Smi::FromInt(size)));
- __ Push(r1, r0);
- CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
- __ pop(r1);
-
- __ bind(&allocated);
- // Copy the content into the newly allocated memory.
- // (Unroll copy loop once for better throughput).
- for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
- __ ldr(r3, FieldMemOperand(r1, i));
- __ ldr(r2, FieldMemOperand(r1, i + kPointerSize));
- __ str(r3, FieldMemOperand(r0, i));
- __ str(r2, FieldMemOperand(r0, i + kPointerSize));
- }
- if ((size % (2 * kPointerSize)) != 0) {
- __ ldr(r3, FieldMemOperand(r1, size - kPointerSize));
- __ str(r3, FieldMemOperand(r0, size - kPointerSize));
- }
-}
-
-
-void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
- // Use the fast case closure allocation code that allocates in new
- // space for nested functions that don't need literals cloning.
- Handle<SharedFunctionInfo> shared_info = instr->shared_info();
- bool pretenure = instr->hydrogen()->pretenure();
- if (!pretenure && shared_info->num_literals() == 0) {
- FastNewClosureStub stub(
- shared_info->strict_mode() ? kStrictMode : kNonStrictMode);
- __ mov(r1, Operand(shared_info));
- __ push(r1);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- } else {
- __ mov(r2, Operand(shared_info));
- __ mov(r1, Operand(pretenure
- ? factory()->true_value()
- : factory()->false_value()));
- __ Push(cp, r2, r1);
- CallRuntime(Runtime::kNewClosure, 3, instr);
- }
-}
-
-
-void LCodeGen::DoTypeof(LTypeof* instr) {
- Register input = ToRegister(instr->InputAt(0));
- __ push(input);
- CallRuntime(Runtime::kTypeof, 1, instr);
-}
-
-
-void LCodeGen::DoTypeofIs(LTypeofIs* instr) {
- Register input = ToRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
- Label true_label;
- Label false_label;
- Label done;
-
- Condition final_branch_condition = EmitTypeofIs(&true_label,
- &false_label,
- input,
- instr->type_literal());
- __ b(final_branch_condition, &true_label);
- __ bind(&false_label);
- __ LoadRoot(result, Heap::kFalseValueRootIndex);
- __ b(&done);
-
- __ bind(&true_label);
- __ LoadRoot(result, Heap::kTrueValueRootIndex);
-
- __ bind(&done);
-}
-
-
-void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
- Register input = ToRegister(instr->InputAt(0));
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
- Condition final_branch_condition = EmitTypeofIs(true_label,
- false_label,
- input,
- instr->type_literal());
-
- EmitBranch(true_block, false_block, final_branch_condition);
-}
-
-
-Condition LCodeGen::EmitTypeofIs(Label* true_label,
- Label* false_label,
- Register input,
- Handle<String> type_name) {
- Condition final_branch_condition = kNoCondition;
- Register scratch = scratch0();
- if (type_name->Equals(heap()->number_symbol())) {
- __ JumpIfSmi(input, true_label);
- __ ldr(input, FieldMemOperand(input, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
- __ cmp(input, Operand(ip));
- final_branch_condition = eq;
-
- } else if (type_name->Equals(heap()->string_symbol())) {
- __ JumpIfSmi(input, false_label);
- __ CompareObjectType(input, input, scratch, FIRST_NONSTRING_TYPE);
- __ b(ge, false_label);
- __ ldrb(ip, FieldMemOperand(input, Map::kBitFieldOffset));
- __ tst(ip, Operand(1 << Map::kIsUndetectable));
- final_branch_condition = eq;
-
- } else if (type_name->Equals(heap()->boolean_symbol())) {
- __ CompareRoot(input, Heap::kTrueValueRootIndex);
- __ b(eq, true_label);
- __ CompareRoot(input, Heap::kFalseValueRootIndex);
- final_branch_condition = eq;
-
- } else if (type_name->Equals(heap()->undefined_symbol())) {
- __ CompareRoot(input, Heap::kUndefinedValueRootIndex);
- __ b(eq, true_label);
- __ JumpIfSmi(input, false_label);
- // Check for undetectable objects => true.
- __ ldr(input, FieldMemOperand(input, HeapObject::kMapOffset));
- __ ldrb(ip, FieldMemOperand(input, Map::kBitFieldOffset));
- __ tst(ip, Operand(1 << Map::kIsUndetectable));
- final_branch_condition = ne;
-
- } else if (type_name->Equals(heap()->function_symbol())) {
- __ JumpIfSmi(input, false_label);
- __ CompareObjectType(input, input, scratch, FIRST_FUNCTION_CLASS_TYPE);
- final_branch_condition = ge;
-
- } else if (type_name->Equals(heap()->object_symbol())) {
- __ JumpIfSmi(input, false_label);
- __ CompareRoot(input, Heap::kNullValueRootIndex);
- __ b(eq, true_label);
- __ CompareObjectType(input, input, scratch, FIRST_JS_OBJECT_TYPE);
- __ b(lo, false_label);
- __ CompareInstanceType(input, scratch, FIRST_FUNCTION_CLASS_TYPE);
- __ b(hs, false_label);
- // Check for undetectable objects => false.
- __ ldrb(ip, FieldMemOperand(input, Map::kBitFieldOffset));
- __ tst(ip, Operand(1 << Map::kIsUndetectable));
- final_branch_condition = eq;
-
- } else {
- final_branch_condition = ne;
- __ b(false_label);
- // A dead branch instruction will be generated after this point.
- }
-
- return final_branch_condition;
-}
-
-
-void LCodeGen::DoIsConstructCall(LIsConstructCall* instr) {
- Register result = ToRegister(instr->result());
- Label true_label;
- Label false_label;
- Label done;
-
- EmitIsConstructCall(result, scratch0());
- __ b(eq, &true_label);
-
- __ LoadRoot(result, Heap::kFalseValueRootIndex);
- __ b(&done);
-
-
- __ bind(&true_label);
- __ LoadRoot(result, Heap::kTrueValueRootIndex);
-
- __ bind(&done);
-}
-
-
-void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
- Register temp1 = ToRegister(instr->TempAt(0));
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- EmitIsConstructCall(temp1, scratch0());
- EmitBranch(true_block, false_block, eq);
-}
-
-
-void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
- ASSERT(!temp1.is(temp2));
- // Get the frame pointer for the calling frame.
- __ ldr(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-
- // Skip the arguments adaptor frame if it exists.
- Label check_frame_marker;
- __ ldr(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset));
- __ cmp(temp2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ b(ne, &check_frame_marker);
- __ ldr(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset));
-
- // Check the marker in the calling frame.
- __ bind(&check_frame_marker);
- __ ldr(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset));
- __ cmp(temp1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
-}
-
-
-void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
- // No code for lazy bailout instruction. Used to capture environment after a
- // call for populating the safepoint data with deoptimization data.
-}
-
-
-void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
- DeoptimizeIf(al, instr->environment());
-}
-
-
-void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
- Register object = ToRegister(instr->object());
- Register key = ToRegister(instr->key());
- Register strict = scratch0();
- __ mov(strict, Operand(Smi::FromInt(strict_mode_flag())));
- __ Push(object, key, strict);
- ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
- LPointerMap* pointers = instr->pointer_map();
- LEnvironment* env = instr->deoptimization_environment();
- RecordPosition(pointers->position());
- RegisterEnvironmentForDeoptimization(env);
- SafepointGenerator safepoint_generator(this,
- pointers,
- env->deoptimization_index());
- __ InvokeBuiltin(Builtins::DELETE, CALL_JS, &safepoint_generator);
-}
-
-
-void LCodeGen::DoStackCheck(LStackCheck* instr) {
- // Perform stack overflow check.
- Label ok;
- __ LoadRoot(ip, Heap::kStackLimitRootIndex);
- __ cmp(sp, Operand(ip));
- __ b(hs, &ok);
- StackCheckStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- __ bind(&ok);
-}
-
-
-void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
- // This is a pseudo-instruction that ensures that the environment here is
- // properly registered for deoptimization and records the assembler's PC
- // offset.
- LEnvironment* environment = instr->environment();
- environment->SetSpilledRegisters(instr->SpilledRegisterArray(),
- instr->SpilledDoubleRegisterArray());
-
- // If the environment were already registered, we would have no way of
- // backpatching it with the spill slot operands.
- ASSERT(!environment->HasBeenRegistered());
- RegisterEnvironmentForDeoptimization(environment);
- ASSERT(osr_pc_offset_ == -1);
- osr_pc_offset_ = masm()->pc_offset();
-}
-
-
-#undef __
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/arm/lithium-codegen-arm.h b/src/3rdparty/v8/src/arm/lithium-codegen-arm.h
deleted file mode 100644
index caa85d2..0000000
--- a/src/3rdparty/v8/src/arm/lithium-codegen-arm.h
+++ /dev/null
@@ -1,329 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_ARM_LITHIUM_CODEGEN_ARM_H_
-#define V8_ARM_LITHIUM_CODEGEN_ARM_H_
-
-#include "arm/lithium-arm.h"
-#include "arm/lithium-gap-resolver-arm.h"
-#include "deoptimizer.h"
-#include "safepoint-table.h"
-#include "scopes.h"
-
-namespace v8 {
-namespace internal {
-
-// Forward declarations.
-class LDeferredCode;
-class SafepointGenerator;
-
-class LCodeGen BASE_EMBEDDED {
- public:
- LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
- : chunk_(chunk),
- masm_(assembler),
- info_(info),
- current_block_(-1),
- current_instruction_(-1),
- instructions_(chunk->instructions()),
- deoptimizations_(4),
- deoptimization_literals_(8),
- inlined_function_count_(0),
- scope_(info->scope()),
- status_(UNUSED),
- deferred_(8),
- osr_pc_offset_(-1),
- resolver_(this) {
- PopulateDeoptimizationLiteralsWithInlinedFunctions();
- }
-
-
- // Simple accessors.
- MacroAssembler* masm() const { return masm_; }
- CompilationInfo* info() const { return info_; }
- Isolate* isolate() const { return info_->isolate(); }
- Factory* factory() const { return isolate()->factory(); }
- Heap* heap() const { return isolate()->heap(); }
-
- // Support for converting LOperands to assembler types.
- // LOperand must be a register.
- Register ToRegister(LOperand* op) const;
-
- // LOperand is loaded into scratch, unless already a register.
- Register EmitLoadRegister(LOperand* op, Register scratch);
-
- // LOperand must be a double register.
- DoubleRegister ToDoubleRegister(LOperand* op) const;
-
- // LOperand is loaded into dbl_scratch, unless already a double register.
- DoubleRegister EmitLoadDoubleRegister(LOperand* op,
- SwVfpRegister flt_scratch,
- DoubleRegister dbl_scratch);
- int ToInteger32(LConstantOperand* op) const;
- Operand ToOperand(LOperand* op);
- MemOperand ToMemOperand(LOperand* op) const;
- // Returns a MemOperand pointing to the high word of a DoubleStackSlot.
- MemOperand ToHighMemOperand(LOperand* op) const;
-
- // Try to generate code for the entire chunk, but it may fail if the
- // chunk contains constructs we cannot handle. Returns true if the
- // code generation attempt succeeded.
- bool GenerateCode();
-
- // Finish the code by setting stack height, safepoint, and bailout
- // information on it.
- void FinishCode(Handle<Code> code);
-
- // Deferred code support.
- template<int T>
- void DoDeferredBinaryOpStub(LTemplateInstruction<1, 2, T>* instr,
- Token::Value op);
- void DoDeferredNumberTagD(LNumberTagD* instr);
- void DoDeferredNumberTagI(LNumberTagI* instr);
- void DoDeferredTaggedToI(LTaggedToI* instr);
- void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr);
- void DoDeferredStackCheck(LGoto* instr);
- void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
- void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
- void DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
- Label* map_check);
-
- // Parallel move support.
- void DoParallelMove(LParallelMove* move);
-
- // Emit frame translation commands for an environment.
- void WriteTranslation(LEnvironment* environment, Translation* translation);
-
- // Declare methods that deal with the individual node types.
-#define DECLARE_DO(type) void Do##type(L##type* node);
- LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
-#undef DECLARE_DO
-
- private:
- enum Status {
- UNUSED,
- GENERATING,
- DONE,
- ABORTED
- };
-
- bool is_unused() const { return status_ == UNUSED; }
- bool is_generating() const { return status_ == GENERATING; }
- bool is_done() const { return status_ == DONE; }
- bool is_aborted() const { return status_ == ABORTED; }
-
- int strict_mode_flag() const {
- return info()->is_strict() ? kStrictMode : kNonStrictMode;
- }
-
- LChunk* chunk() const { return chunk_; }
- Scope* scope() const { return scope_; }
- HGraph* graph() const { return chunk_->graph(); }
-
- Register scratch0() { return r9; }
- DwVfpRegister double_scratch0() { return d0; }
-
- int GetNextEmittedBlock(int block);
- LInstruction* GetNextInstruction();
-
- void EmitClassOfTest(Label* if_true,
- Label* if_false,
- Handle<String> class_name,
- Register input,
- Register temporary,
- Register temporary2);
-
- int StackSlotCount() const { return chunk()->spill_slot_count(); }
- int ParameterCount() const { return scope()->num_parameters(); }
-
- void Abort(const char* format, ...);
- void Comment(const char* format, ...);
-
- void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code); }
-
- // Code generation passes. Returns true if code generation should
- // continue.
- bool GeneratePrologue();
- bool GenerateBody();
- bool GenerateDeferredCode();
- bool GenerateSafepointTable();
-
- void CallCode(Handle<Code> code,
- RelocInfo::Mode mode,
- LInstruction* instr);
- void CallRuntime(const Runtime::Function* function,
- int num_arguments,
- LInstruction* instr);
- void CallRuntime(Runtime::FunctionId id,
- int num_arguments,
- LInstruction* instr) {
- const Runtime::Function* function = Runtime::FunctionForId(id);
- CallRuntime(function, num_arguments, instr);
- }
-
- // Generate a direct call to a known function. Expects the function
- // to be in edi.
- void CallKnownFunction(Handle<JSFunction> function,
- int arity,
- LInstruction* instr);
-
- void LoadHeapObject(Register result, Handle<HeapObject> object);
-
- void RegisterLazyDeoptimization(LInstruction* instr);
- void RegisterEnvironmentForDeoptimization(LEnvironment* environment);
- void DeoptimizeIf(Condition cc, LEnvironment* environment);
-
- void AddToTranslation(Translation* translation,
- LOperand* op,
- bool is_tagged);
- void PopulateDeoptimizationData(Handle<Code> code);
- int DefineDeoptimizationLiteral(Handle<Object> literal);
-
- void PopulateDeoptimizationLiteralsWithInlinedFunctions();
-
- Register ToRegister(int index) const;
- DoubleRegister ToDoubleRegister(int index) const;
-
- // Specific math operations - used from DoUnaryMathOperation.
- void EmitIntegerMathAbs(LUnaryMathOperation* instr);
- void DoMathAbs(LUnaryMathOperation* instr);
- void DoMathFloor(LUnaryMathOperation* instr);
- void DoMathRound(LUnaryMathOperation* instr);
- void DoMathSqrt(LUnaryMathOperation* instr);
- void DoMathPowHalf(LUnaryMathOperation* instr);
- void DoMathLog(LUnaryMathOperation* instr);
- void DoMathCos(LUnaryMathOperation* instr);
- void DoMathSin(LUnaryMathOperation* instr);
-
- // Support for recording safepoint and position information.
- void RecordSafepoint(LPointerMap* pointers,
- Safepoint::Kind kind,
- int arguments,
- int deoptimization_index);
- void RecordSafepoint(LPointerMap* pointers, int deoptimization_index);
- void RecordSafepoint(int deoptimization_index);
- void RecordSafepointWithRegisters(LPointerMap* pointers,
- int arguments,
- int deoptimization_index);
- void RecordSafepointWithRegistersAndDoubles(LPointerMap* pointers,
- int arguments,
- int deoptimization_index);
- void RecordPosition(int position);
- int LastSafepointEnd() {
- return static_cast<int>(safepoints_.GetPcAfterGap());
- }
-
- static Condition TokenToCondition(Token::Value op, bool is_unsigned);
- void EmitGoto(int block, LDeferredCode* deferred_stack_check = NULL);
- void EmitBranch(int left_block, int right_block, Condition cc);
- void EmitCmpI(LOperand* left, LOperand* right);
- void EmitNumberUntagD(Register input,
- DoubleRegister result,
- LEnvironment* env);
-
- // Emits optimized code for typeof x == "y". Modifies input register.
- // Returns the condition on which a final split to
- // true and false label should be made, to optimize fallthrough.
- Condition EmitTypeofIs(Label* true_label, Label* false_label,
- Register input, Handle<String> type_name);
-
- // Emits optimized code for %_IsObject(x). Preserves input register.
- // Returns the condition on which a final split to
- // true and false label should be made, to optimize fallthrough.
- Condition EmitIsObject(Register input,
- Register temp1,
- Register temp2,
- Label* is_not_object,
- Label* is_object);
-
- // Emits optimized code for %_IsConstructCall().
- // Caller should branch on equal condition.
- void EmitIsConstructCall(Register temp1, Register temp2);
-
- void EmitLoadField(Register result,
- Register object,
- Handle<Map> type,
- Handle<String> name);
-
- LChunk* const chunk_;
- MacroAssembler* const masm_;
- CompilationInfo* const info_;
-
- int current_block_;
- int current_instruction_;
- const ZoneList<LInstruction*>* instructions_;
- ZoneList<LEnvironment*> deoptimizations_;
- ZoneList<Handle<Object> > deoptimization_literals_;
- int inlined_function_count_;
- Scope* const scope_;
- Status status_;
- TranslationBuffer translations_;
- ZoneList<LDeferredCode*> deferred_;
- int osr_pc_offset_;
-
- // Builder that keeps track of safepoints in the code. The table
- // itself is emitted at the end of the generated code.
- SafepointTableBuilder safepoints_;
-
- // Compiler from a set of parallel moves to a sequential list of moves.
- LGapResolver resolver_;
-
- friend class LDeferredCode;
- friend class LEnvironment;
- friend class SafepointGenerator;
- DISALLOW_COPY_AND_ASSIGN(LCodeGen);
-};
-
-
-class LDeferredCode: public ZoneObject {
- public:
- explicit LDeferredCode(LCodeGen* codegen)
- : codegen_(codegen), external_exit_(NULL) {
- codegen->AddDeferredCode(this);
- }
-
- virtual ~LDeferredCode() { }
- virtual void Generate() = 0;
-
- void SetExit(Label *exit) { external_exit_ = exit; }
- Label* entry() { return &entry_; }
- Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
-
- protected:
- LCodeGen* codegen() const { return codegen_; }
- MacroAssembler* masm() const { return codegen_->masm(); }
-
- private:
- LCodeGen* codegen_;
- Label entry_;
- Label exit_;
- Label* external_exit_;
-};
-
-} } // namespace v8::internal
-
-#endif // V8_ARM_LITHIUM_CODEGEN_ARM_H_
diff --git a/src/3rdparty/v8/src/arm/lithium-gap-resolver-arm.cc b/src/3rdparty/v8/src/arm/lithium-gap-resolver-arm.cc
deleted file mode 100644
index 02608a6..0000000
--- a/src/3rdparty/v8/src/arm/lithium-gap-resolver-arm.cc
+++ /dev/null
@@ -1,305 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "arm/lithium-gap-resolver-arm.h"
-#include "arm/lithium-codegen-arm.h"
-
-namespace v8 {
-namespace internal {
-
-static const Register kSavedValueRegister = { 9 };
-static const DoubleRegister kSavedDoubleValueRegister = { 0 };
-
-LGapResolver::LGapResolver(LCodeGen* owner)
- : cgen_(owner), moves_(32), root_index_(0), in_cycle_(false),
- saved_destination_(NULL) { }
-
-
-void LGapResolver::Resolve(LParallelMove* parallel_move) {
- ASSERT(moves_.is_empty());
- // Build up a worklist of moves.
- BuildInitialMoveList(parallel_move);
-
- for (int i = 0; i < moves_.length(); ++i) {
- LMoveOperands move = moves_[i];
- // Skip constants to perform them last. They don't block other moves
- // and skipping such moves with register destinations keeps those
- // registers free for the whole algorithm.
- if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
- root_index_ = i; // Any cycle is found when by reaching this move again.
- PerformMove(i);
- if (in_cycle_) {
- RestoreValue();
- }
- }
- }
-
- // Perform the moves with constant sources.
- for (int i = 0; i < moves_.length(); ++i) {
- if (!moves_[i].IsEliminated()) {
- ASSERT(moves_[i].source()->IsConstantOperand());
- EmitMove(i);
- }
- }
-
- moves_.Rewind(0);
-}
-
-
-void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
- // Perform a linear sweep of the moves to add them to the initial list of
- // moves to perform, ignoring any move that is redundant (the source is
- // the same as the destination, the destination is ignored and
- // unallocated, or the move was already eliminated).
- const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
- for (int i = 0; i < moves->length(); ++i) {
- LMoveOperands move = moves->at(i);
- if (!move.IsRedundant()) moves_.Add(move);
- }
- Verify();
-}
-
-
-void LGapResolver::PerformMove(int index) {
- // Each call to this function performs a move and deletes it from the move
- // graph. We first recursively perform any move blocking this one. We
- // mark a move as "pending" on entry to PerformMove in order to detect
- // cycles in the move graph.
-
- // We can only find a cycle, when doing a depth-first traversal of moves,
- // be encountering the starting move again. So by spilling the source of
- // the starting move, we break the cycle. All moves are then unblocked,
- // and the starting move is completed by writing the spilled value to
- // its destination. All other moves from the spilled source have been
- // completed prior to breaking the cycle.
- // An additional complication is that moves to MemOperands with large
- // offsets (more than 1K or 4K) require us to spill this spilled value to
- // the stack, to free up the register.
- ASSERT(!moves_[index].IsPending());
- ASSERT(!moves_[index].IsRedundant());
-
- // Clear this move's destination to indicate a pending move. The actual
- // destination is saved in a stack allocated local. Multiple moves can
- // be pending because this function is recursive.
- ASSERT(moves_[index].source() != NULL); // Or else it will look eliminated.
- LOperand* destination = moves_[index].destination();
- moves_[index].set_destination(NULL);
-
- // Perform a depth-first traversal of the move graph to resolve
- // dependencies. Any unperformed, unpending move with a source the same
- // as this one's destination blocks this one so recursively perform all
- // such moves.
- for (int i = 0; i < moves_.length(); ++i) {
- LMoveOperands other_move = moves_[i];
- if (other_move.Blocks(destination) && !other_move.IsPending()) {
- PerformMove(i);
- // If there is a blocking, pending move it must be moves_[root_index_]
- // and all other moves with the same source as moves_[root_index_] are
- // sucessfully executed (because they are cycle-free) by this loop.
- }
- }
-
- // We are about to resolve this move and don't need it marked as
- // pending, so restore its destination.
- moves_[index].set_destination(destination);
-
- // The move may be blocked on a pending move, which must be the starting move.
- // In this case, we have a cycle, and we save the source of this move to
- // a scratch register to break it.
- LMoveOperands other_move = moves_[root_index_];
- if (other_move.Blocks(destination)) {
- ASSERT(other_move.IsPending());
- BreakCycle(index);
- return;
- }
-
- // This move is no longer blocked.
- EmitMove(index);
-}
-
-
-void LGapResolver::Verify() {
-#ifdef ENABLE_SLOW_ASSERTS
- // No operand should be the destination for more than one move.
- for (int i = 0; i < moves_.length(); ++i) {
- LOperand* destination = moves_[i].destination();
- for (int j = i + 1; j < moves_.length(); ++j) {
- SLOW_ASSERT(!destination->Equals(moves_[j].destination()));
- }
- }
-#endif
-}
-
-#define __ ACCESS_MASM(cgen_->masm())
-
-void LGapResolver::BreakCycle(int index) {
- // We save in a register the value that should end up in the source of
- // moves_[root_index]. After performing all moves in the tree rooted
- // in that move, we save the value to that source.
- ASSERT(moves_[index].destination()->Equals(moves_[root_index_].source()));
- ASSERT(!in_cycle_);
- in_cycle_ = true;
- LOperand* source = moves_[index].source();
- saved_destination_ = moves_[index].destination();
- if (source->IsRegister()) {
- __ mov(kSavedValueRegister, cgen_->ToRegister(source));
- } else if (source->IsStackSlot()) {
- __ ldr(kSavedValueRegister, cgen_->ToMemOperand(source));
- } else if (source->IsDoubleRegister()) {
- __ vmov(kSavedDoubleValueRegister, cgen_->ToDoubleRegister(source));
- } else if (source->IsDoubleStackSlot()) {
- __ vldr(kSavedDoubleValueRegister, cgen_->ToMemOperand(source));
- } else {
- UNREACHABLE();
- }
- // This move will be done by restoring the saved value to the destination.
- moves_[index].Eliminate();
-}
-
-
-void LGapResolver::RestoreValue() {
- ASSERT(in_cycle_);
- ASSERT(saved_destination_ != NULL);
-
- // Spilled value is in kSavedValueRegister or kSavedDoubleValueRegister.
- if (saved_destination_->IsRegister()) {
- __ mov(cgen_->ToRegister(saved_destination_), kSavedValueRegister);
- } else if (saved_destination_->IsStackSlot()) {
- __ str(kSavedValueRegister, cgen_->ToMemOperand(saved_destination_));
- } else if (saved_destination_->IsDoubleRegister()) {
- __ vmov(cgen_->ToDoubleRegister(saved_destination_),
- kSavedDoubleValueRegister);
- } else if (saved_destination_->IsDoubleStackSlot()) {
- __ vstr(kSavedDoubleValueRegister,
- cgen_->ToMemOperand(saved_destination_));
- } else {
- UNREACHABLE();
- }
-
- in_cycle_ = false;
- saved_destination_ = NULL;
-}
-
-
-void LGapResolver::EmitMove(int index) {
- LOperand* source = moves_[index].source();
- LOperand* destination = moves_[index].destination();
-
- // Dispatch on the source and destination operand kinds. Not all
- // combinations are possible.
-
- if (source->IsRegister()) {
- Register source_register = cgen_->ToRegister(source);
- if (destination->IsRegister()) {
- __ mov(cgen_->ToRegister(destination), source_register);
- } else {
- ASSERT(destination->IsStackSlot());
- __ str(source_register, cgen_->ToMemOperand(destination));
- }
-
- } else if (source->IsStackSlot()) {
- MemOperand source_operand = cgen_->ToMemOperand(source);
- if (destination->IsRegister()) {
- __ ldr(cgen_->ToRegister(destination), source_operand);
- } else {
- ASSERT(destination->IsStackSlot());
- MemOperand destination_operand = cgen_->ToMemOperand(destination);
- if (in_cycle_) {
- if (!destination_operand.OffsetIsUint12Encodable()) {
- // ip is overwritten while saving the value to the destination.
- // Therefore we can't use ip. It is OK if the read from the source
- // destroys ip, since that happens before the value is read.
- __ vldr(kSavedDoubleValueRegister.low(), source_operand);
- __ vstr(kSavedDoubleValueRegister.low(), destination_operand);
- } else {
- __ ldr(ip, source_operand);
- __ str(ip, destination_operand);
- }
- } else {
- __ ldr(kSavedValueRegister, source_operand);
- __ str(kSavedValueRegister, destination_operand);
- }
- }
-
- } else if (source->IsConstantOperand()) {
- Operand source_operand = cgen_->ToOperand(source);
- if (destination->IsRegister()) {
- __ mov(cgen_->ToRegister(destination), source_operand);
- } else {
- ASSERT(destination->IsStackSlot());
- ASSERT(!in_cycle_); // Constant moves happen after all cycles are gone.
- MemOperand destination_operand = cgen_->ToMemOperand(destination);
- __ mov(kSavedValueRegister, source_operand);
- __ str(kSavedValueRegister, cgen_->ToMemOperand(destination));
- }
-
- } else if (source->IsDoubleRegister()) {
- DoubleRegister source_register = cgen_->ToDoubleRegister(source);
- if (destination->IsDoubleRegister()) {
- __ vmov(cgen_->ToDoubleRegister(destination), source_register);
- } else {
- ASSERT(destination->IsDoubleStackSlot());
- MemOperand destination_operand = cgen_->ToMemOperand(destination);
- __ vstr(source_register, destination_operand);
- }
-
- } else if (source->IsDoubleStackSlot()) {
- MemOperand source_operand = cgen_->ToMemOperand(source);
- if (destination->IsDoubleRegister()) {
- __ vldr(cgen_->ToDoubleRegister(destination), source_operand);
- } else {
- ASSERT(destination->IsDoubleStackSlot());
- MemOperand destination_operand = cgen_->ToMemOperand(destination);
- if (in_cycle_) {
- // kSavedDoubleValueRegister was used to break the cycle,
- // but kSavedValueRegister is free.
- MemOperand source_high_operand =
- cgen_->ToHighMemOperand(source);
- MemOperand destination_high_operand =
- cgen_->ToHighMemOperand(destination);
- __ ldr(kSavedValueRegister, source_operand);
- __ str(kSavedValueRegister, destination_operand);
- __ ldr(kSavedValueRegister, source_high_operand);
- __ str(kSavedValueRegister, destination_high_operand);
- } else {
- __ vldr(kSavedDoubleValueRegister, source_operand);
- __ vstr(kSavedDoubleValueRegister, destination_operand);
- }
- }
- } else {
- UNREACHABLE();
- }
-
- moves_[index].Eliminate();
-}
-
-
-#undef __
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/arm/lithium-gap-resolver-arm.h b/src/3rdparty/v8/src/arm/lithium-gap-resolver-arm.h
deleted file mode 100644
index 334d292..0000000
--- a/src/3rdparty/v8/src/arm/lithium-gap-resolver-arm.h
+++ /dev/null
@@ -1,84 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_ARM_LITHIUM_GAP_RESOLVER_ARM_H_
-#define V8_ARM_LITHIUM_GAP_RESOLVER_ARM_H_
-
-#include "v8.h"
-
-#include "lithium.h"
-
-namespace v8 {
-namespace internal {
-
-class LCodeGen;
-class LGapResolver;
-
-class LGapResolver BASE_EMBEDDED {
- public:
-
- explicit LGapResolver(LCodeGen* owner);
-
- // Resolve a set of parallel moves, emitting assembler instructions.
- void Resolve(LParallelMove* parallel_move);
-
- private:
- // Build the initial list of moves.
- void BuildInitialMoveList(LParallelMove* parallel_move);
-
- // Perform the move at the moves_ index in question (possibly requiring
- // other moves to satisfy dependencies).
- void PerformMove(int index);
-
- // If a cycle is found in the series of moves, save the blocking value to
- // a scratch register. The cycle must be found by hitting the root of the
- // depth-first search.
- void BreakCycle(int index);
-
- // After a cycle has been resolved, restore the value from the scratch
- // register to its proper destination.
- void RestoreValue();
-
- // Emit a move and remove it from the move graph.
- void EmitMove(int index);
-
- // Verify the move list before performing moves.
- void Verify();
-
- LCodeGen* cgen_;
-
- // List of moves not yet resolved.
- ZoneList<LMoveOperands> moves_;
-
- int root_index_;
- bool in_cycle_;
- LOperand* saved_destination_;
-};
-
-} } // namespace v8::internal
-
-#endif // V8_ARM_LITHIUM_GAP_RESOLVER_ARM_H_
diff --git a/src/3rdparty/v8/src/arm/macro-assembler-arm.cc b/src/3rdparty/v8/src/arm/macro-assembler-arm.cc
deleted file mode 100644
index 2ba98f4..0000000
--- a/src/3rdparty/v8/src/arm/macro-assembler-arm.cc
+++ /dev/null
@@ -1,2939 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include <limits.h> // For LONG_MIN, LONG_MAX.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_ARM)
-
-#include "bootstrapper.h"
-#include "codegen-inl.h"
-#include "debug.h"
-#include "runtime.h"
-
-namespace v8 {
-namespace internal {
-
-MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
- : Assembler(arg_isolate, buffer, size),
- generating_stub_(false),
- allow_stub_calls_(true) {
- if (isolate() != NULL) {
- code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
- isolate());
- }
-}
-
-
-// We always generate arm code, never thumb code, even if V8 is compiled to
-// thumb, so we require inter-working support
-#if defined(__thumb__) && !defined(USE_THUMB_INTERWORK)
-#error "flag -mthumb-interwork missing"
-#endif
-
-
-// We do not support thumb inter-working with an arm architecture not supporting
-// the blx instruction (below v5t). If you know what CPU you are compiling for
-// you can use -march=armv7 or similar.
-#if defined(USE_THUMB_INTERWORK) && !defined(CAN_USE_THUMB_INSTRUCTIONS)
-# error "For thumb inter-working we require an architecture which supports blx"
-#endif
-
-
-// Using bx does not yield better code, so use it only when required
-#if defined(USE_THUMB_INTERWORK)
-#define USE_BX 1
-#endif
-
-
-void MacroAssembler::Jump(Register target, Condition cond) {
-#if USE_BX
- bx(target, cond);
-#else
- mov(pc, Operand(target), LeaveCC, cond);
-#endif
-}
-
-
-void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
- Condition cond) {
-#if USE_BX
- mov(ip, Operand(target, rmode), LeaveCC, cond);
- bx(ip, cond);
-#else
- mov(pc, Operand(target, rmode), LeaveCC, cond);
-#endif
-}
-
-
-void MacroAssembler::Jump(byte* target, RelocInfo::Mode rmode,
- Condition cond) {
- ASSERT(!RelocInfo::IsCodeTarget(rmode));
- Jump(reinterpret_cast<intptr_t>(target), rmode, cond);
-}
-
-
-void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
- Condition cond) {
- ASSERT(RelocInfo::IsCodeTarget(rmode));
- // 'code' is always generated ARM code, never THUMB code
- Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
-}
-
-
-int MacroAssembler::CallSize(Register target, Condition cond) {
-#if USE_BLX
- return kInstrSize;
-#else
- return 2 * kInstrSize;
-#endif
-}
-
-
-void MacroAssembler::Call(Register target, Condition cond) {
- // Block constant pool for the call instruction sequence.
- BlockConstPoolScope block_const_pool(this);
-#ifdef DEBUG
- int pre_position = pc_offset();
-#endif
-
-#if USE_BLX
- blx(target, cond);
-#else
- // set lr for return at current pc + 8
- mov(lr, Operand(pc), LeaveCC, cond);
- mov(pc, Operand(target), LeaveCC, cond);
-#endif
-
-#ifdef DEBUG
- int post_position = pc_offset();
- CHECK_EQ(pre_position + CallSize(target, cond), post_position);
-#endif
-}
-
-
-int MacroAssembler::CallSize(
- intptr_t target, RelocInfo::Mode rmode, Condition cond) {
- int size = 2 * kInstrSize;
- Instr mov_instr = cond | MOV | LeaveCC;
- if (!Operand(target, rmode).is_single_instruction(mov_instr)) {
- size += kInstrSize;
- }
- return size;
-}
-
-
-void MacroAssembler::Call(
- intptr_t target, RelocInfo::Mode rmode, Condition cond) {
- // Block constant pool for the call instruction sequence.
- BlockConstPoolScope block_const_pool(this);
-#ifdef DEBUG
- int pre_position = pc_offset();
-#endif
-
-#if USE_BLX
- // On ARMv5 and after the recommended call sequence is:
- // ldr ip, [pc, #...]
- // blx ip
-
- // Statement positions are expected to be recorded when the target
- // address is loaded. The mov method will automatically record
- // positions when pc is the target, since this is not the case here
- // we have to do it explicitly.
- positions_recorder()->WriteRecordedPositions();
-
- mov(ip, Operand(target, rmode), LeaveCC, cond);
- blx(ip, cond);
-
- ASSERT(kCallTargetAddressOffset == 2 * kInstrSize);
-#else
- // Set lr for return at current pc + 8.
- mov(lr, Operand(pc), LeaveCC, cond);
- // Emit a ldr<cond> pc, [pc + offset of target in constant pool].
- mov(pc, Operand(target, rmode), LeaveCC, cond);
- ASSERT(kCallTargetAddressOffset == kInstrSize);
-#endif
-
-#ifdef DEBUG
- int post_position = pc_offset();
- CHECK_EQ(pre_position + CallSize(target, rmode, cond), post_position);
-#endif
-}
-
-
-int MacroAssembler::CallSize(
- byte* target, RelocInfo::Mode rmode, Condition cond) {
- return CallSize(reinterpret_cast<intptr_t>(target), rmode);
-}
-
-
-void MacroAssembler::Call(
- byte* target, RelocInfo::Mode rmode, Condition cond) {
-#ifdef DEBUG
- int pre_position = pc_offset();
-#endif
-
- ASSERT(!RelocInfo::IsCodeTarget(rmode));
- Call(reinterpret_cast<intptr_t>(target), rmode, cond);
-
-#ifdef DEBUG
- int post_position = pc_offset();
- CHECK_EQ(pre_position + CallSize(target, rmode, cond), post_position);
-#endif
-}
-
-
-int MacroAssembler::CallSize(
- Handle<Code> code, RelocInfo::Mode rmode, Condition cond) {
- return CallSize(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
-}
-
-
-void MacroAssembler::Call(
- Handle<Code> code, RelocInfo::Mode rmode, Condition cond) {
-#ifdef DEBUG
- int pre_position = pc_offset();
-#endif
-
- ASSERT(RelocInfo::IsCodeTarget(rmode));
- // 'code' is always generated ARM code, never THUMB code
- Call(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
-
-#ifdef DEBUG
- int post_position = pc_offset();
- CHECK_EQ(pre_position + CallSize(code, rmode, cond), post_position);
-#endif
-}
-
-
-void MacroAssembler::Ret(Condition cond) {
-#if USE_BX
- bx(lr, cond);
-#else
- mov(pc, Operand(lr), LeaveCC, cond);
-#endif
-}
-
-
-void MacroAssembler::Drop(int count, Condition cond) {
- if (count > 0) {
- add(sp, sp, Operand(count * kPointerSize), LeaveCC, cond);
- }
-}
-
-
-void MacroAssembler::Ret(int drop, Condition cond) {
- Drop(drop, cond);
- Ret(cond);
-}
-
-
-void MacroAssembler::Swap(Register reg1,
- Register reg2,
- Register scratch,
- Condition cond) {
- if (scratch.is(no_reg)) {
- eor(reg1, reg1, Operand(reg2), LeaveCC, cond);
- eor(reg2, reg2, Operand(reg1), LeaveCC, cond);
- eor(reg1, reg1, Operand(reg2), LeaveCC, cond);
- } else {
- mov(scratch, reg1, LeaveCC, cond);
- mov(reg1, reg2, LeaveCC, cond);
- mov(reg2, scratch, LeaveCC, cond);
- }
-}
-
-
-void MacroAssembler::Call(Label* target) {
- bl(target);
-}
-
-
-void MacroAssembler::Move(Register dst, Handle<Object> value) {
- mov(dst, Operand(value));
-}
-
-
-void MacroAssembler::Move(Register dst, Register src) {
- if (!dst.is(src)) {
- mov(dst, src);
- }
-}
-
-
-void MacroAssembler::And(Register dst, Register src1, const Operand& src2,
- Condition cond) {
- if (!src2.is_reg() &&
- !src2.must_use_constant_pool() &&
- src2.immediate() == 0) {
- mov(dst, Operand(0, RelocInfo::NONE), LeaveCC, cond);
-
- } else if (!src2.is_single_instruction() &&
- !src2.must_use_constant_pool() &&
- CpuFeatures::IsSupported(ARMv7) &&
- IsPowerOf2(src2.immediate() + 1)) {
- ubfx(dst, src1, 0, WhichPowerOf2(src2.immediate() + 1), cond);
-
- } else {
- and_(dst, src1, src2, LeaveCC, cond);
- }
-}
-
-
-void MacroAssembler::Ubfx(Register dst, Register src1, int lsb, int width,
- Condition cond) {
- ASSERT(lsb < 32);
- if (!CpuFeatures::IsSupported(ARMv7)) {
- int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
- and_(dst, src1, Operand(mask), LeaveCC, cond);
- if (lsb != 0) {
- mov(dst, Operand(dst, LSR, lsb), LeaveCC, cond);
- }
- } else {
- ubfx(dst, src1, lsb, width, cond);
- }
-}
-
-
-void MacroAssembler::Sbfx(Register dst, Register src1, int lsb, int width,
- Condition cond) {
- ASSERT(lsb < 32);
- if (!CpuFeatures::IsSupported(ARMv7)) {
- int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
- and_(dst, src1, Operand(mask), LeaveCC, cond);
- int shift_up = 32 - lsb - width;
- int shift_down = lsb + shift_up;
- if (shift_up != 0) {
- mov(dst, Operand(dst, LSL, shift_up), LeaveCC, cond);
- }
- if (shift_down != 0) {
- mov(dst, Operand(dst, ASR, shift_down), LeaveCC, cond);
- }
- } else {
- sbfx(dst, src1, lsb, width, cond);
- }
-}
-
-
-void MacroAssembler::Bfi(Register dst,
- Register src,
- Register scratch,
- int lsb,
- int width,
- Condition cond) {
- ASSERT(0 <= lsb && lsb < 32);
- ASSERT(0 <= width && width < 32);
- ASSERT(lsb + width < 32);
- ASSERT(!scratch.is(dst));
- if (width == 0) return;
- if (!CpuFeatures::IsSupported(ARMv7)) {
- int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
- bic(dst, dst, Operand(mask));
- and_(scratch, src, Operand((1 << width) - 1));
- mov(scratch, Operand(scratch, LSL, lsb));
- orr(dst, dst, scratch);
- } else {
- bfi(dst, src, lsb, width, cond);
- }
-}
-
-
-void MacroAssembler::Bfc(Register dst, int lsb, int width, Condition cond) {
- ASSERT(lsb < 32);
- if (!CpuFeatures::IsSupported(ARMv7)) {
- int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
- bic(dst, dst, Operand(mask));
- } else {
- bfc(dst, lsb, width, cond);
- }
-}
-
-
-void MacroAssembler::Usat(Register dst, int satpos, const Operand& src,
- Condition cond) {
- if (!CpuFeatures::IsSupported(ARMv7)) {
- ASSERT(!dst.is(pc) && !src.rm().is(pc));
- ASSERT((satpos >= 0) && (satpos <= 31));
-
- // These asserts are required to ensure compatibility with the ARMv7
- // implementation.
- ASSERT((src.shift_op() == ASR) || (src.shift_op() == LSL));
- ASSERT(src.rs().is(no_reg));
-
- Label done;
- int satval = (1 << satpos) - 1;
-
- if (cond != al) {
- b(NegateCondition(cond), &done); // Skip saturate if !condition.
- }
- if (!(src.is_reg() && dst.is(src.rm()))) {
- mov(dst, src);
- }
- tst(dst, Operand(~satval));
- b(eq, &done);
- mov(dst, Operand(0, RelocInfo::NONE), LeaveCC, mi); // 0 if negative.
- mov(dst, Operand(satval), LeaveCC, pl); // satval if positive.
- bind(&done);
- } else {
- usat(dst, satpos, src, cond);
- }
-}
-
-
-void MacroAssembler::SmiJumpTable(Register index, Vector<Label*> targets) {
- // Empty the const pool.
- CheckConstPool(true, true);
- add(pc, pc, Operand(index,
- LSL,
- Instruction::kInstrSizeLog2 - kSmiTagSize));
- BlockConstPoolBefore(pc_offset() + (targets.length() + 1) * kInstrSize);
- nop(); // Jump table alignment.
- for (int i = 0; i < targets.length(); i++) {
- b(targets[i]);
- }
-}
-
-
-void MacroAssembler::LoadRoot(Register destination,
- Heap::RootListIndex index,
- Condition cond) {
- ldr(destination, MemOperand(roots, index << kPointerSizeLog2), cond);
-}
-
-
-void MacroAssembler::StoreRoot(Register source,
- Heap::RootListIndex index,
- Condition cond) {
- str(source, MemOperand(roots, index << kPointerSizeLog2), cond);
-}
-
-
-void MacroAssembler::RecordWriteHelper(Register object,
- Register address,
- Register scratch) {
- if (emit_debug_code()) {
- // Check that the object is not in new space.
- Label not_in_new_space;
- InNewSpace(object, scratch, ne, &not_in_new_space);
- Abort("new-space object passed to RecordWriteHelper");
- bind(&not_in_new_space);
- }
-
- // Calculate page address.
- Bfc(object, 0, kPageSizeBits);
-
- // Calculate region number.
- Ubfx(address, address, Page::kRegionSizeLog2,
- kPageSizeBits - Page::kRegionSizeLog2);
-
- // Mark region dirty.
- ldr(scratch, MemOperand(object, Page::kDirtyFlagOffset));
- mov(ip, Operand(1));
- orr(scratch, scratch, Operand(ip, LSL, address));
- str(scratch, MemOperand(object, Page::kDirtyFlagOffset));
-}
-
-
-void MacroAssembler::InNewSpace(Register object,
- Register scratch,
- Condition cond,
- Label* branch) {
- ASSERT(cond == eq || cond == ne);
- and_(scratch, object, Operand(ExternalReference::new_space_mask(isolate())));
- cmp(scratch, Operand(ExternalReference::new_space_start(isolate())));
- b(cond, branch);
-}
-
-
-// Will clobber 4 registers: object, offset, scratch, ip. The
-// register 'object' contains a heap object pointer. The heap object
-// tag is shifted away.
-void MacroAssembler::RecordWrite(Register object,
- Operand offset,
- Register scratch0,
- Register scratch1) {
- // The compiled code assumes that record write doesn't change the
- // context register, so we check that none of the clobbered
- // registers are cp.
- ASSERT(!object.is(cp) && !scratch0.is(cp) && !scratch1.is(cp));
-
- Label done;
-
- // First, test that the object is not in the new space. We cannot set
- // region marks for new space pages.
- InNewSpace(object, scratch0, eq, &done);
-
- // Add offset into the object.
- add(scratch0, object, offset);
-
- // Record the actual write.
- RecordWriteHelper(object, scratch0, scratch1);
-
- bind(&done);
-
- // Clobber all input registers when running with the debug-code flag
- // turned on to provoke errors.
- if (emit_debug_code()) {
- mov(object, Operand(BitCast<int32_t>(kZapValue)));
- mov(scratch0, Operand(BitCast<int32_t>(kZapValue)));
- mov(scratch1, Operand(BitCast<int32_t>(kZapValue)));
- }
-}
-
-
-// Will clobber 4 registers: object, address, scratch, ip. The
-// register 'object' contains a heap object pointer. The heap object
-// tag is shifted away.
-void MacroAssembler::RecordWrite(Register object,
- Register address,
- Register scratch) {
- // The compiled code assumes that record write doesn't change the
- // context register, so we check that none of the clobbered
- // registers are cp.
- ASSERT(!object.is(cp) && !address.is(cp) && !scratch.is(cp));
-
- Label done;
-
- // First, test that the object is not in the new space. We cannot set
- // region marks for new space pages.
- InNewSpace(object, scratch, eq, &done);
-
- // Record the actual write.
- RecordWriteHelper(object, address, scratch);
-
- bind(&done);
-
- // Clobber all input registers when running with the debug-code flag
- // turned on to provoke errors.
- if (emit_debug_code()) {
- mov(object, Operand(BitCast<int32_t>(kZapValue)));
- mov(address, Operand(BitCast<int32_t>(kZapValue)));
- mov(scratch, Operand(BitCast<int32_t>(kZapValue)));
- }
-}
-
-
-// Push and pop all registers that can hold pointers.
-void MacroAssembler::PushSafepointRegisters() {
- // Safepoints expect a block of contiguous register values starting with r0:
- ASSERT(((1 << kNumSafepointSavedRegisters) - 1) == kSafepointSavedRegisters);
- // Safepoints expect a block of kNumSafepointRegisters values on the
- // stack, so adjust the stack for unsaved registers.
- const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
- ASSERT(num_unsaved >= 0);
- sub(sp, sp, Operand(num_unsaved * kPointerSize));
- stm(db_w, sp, kSafepointSavedRegisters);
-}
-
-
-void MacroAssembler::PopSafepointRegisters() {
- const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
- ldm(ia_w, sp, kSafepointSavedRegisters);
- add(sp, sp, Operand(num_unsaved * kPointerSize));
-}
-
-
-void MacroAssembler::PushSafepointRegistersAndDoubles() {
- PushSafepointRegisters();
- sub(sp, sp, Operand(DwVfpRegister::kNumAllocatableRegisters *
- kDoubleSize));
- for (int i = 0; i < DwVfpRegister::kNumAllocatableRegisters; i++) {
- vstr(DwVfpRegister::FromAllocationIndex(i), sp, i * kDoubleSize);
- }
-}
-
-
-void MacroAssembler::PopSafepointRegistersAndDoubles() {
- for (int i = 0; i < DwVfpRegister::kNumAllocatableRegisters; i++) {
- vldr(DwVfpRegister::FromAllocationIndex(i), sp, i * kDoubleSize);
- }
- add(sp, sp, Operand(DwVfpRegister::kNumAllocatableRegisters *
- kDoubleSize));
- PopSafepointRegisters();
-}
-
-void MacroAssembler::StoreToSafepointRegistersAndDoublesSlot(Register src,
- Register dst) {
- str(src, SafepointRegistersAndDoublesSlot(dst));
-}
-
-
-void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
- str(src, SafepointRegisterSlot(dst));
-}
-
-
-void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
- ldr(dst, SafepointRegisterSlot(src));
-}
-
-
-int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
- // The registers are pushed starting with the highest encoding,
- // which means that lowest encodings are closest to the stack pointer.
- ASSERT(reg_code >= 0 && reg_code < kNumSafepointRegisters);
- return reg_code;
-}
-
-
-MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
- return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
-}
-
-
-MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
- // General purpose registers are pushed last on the stack.
- int doubles_size = DwVfpRegister::kNumAllocatableRegisters * kDoubleSize;
- int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
- return MemOperand(sp, doubles_size + register_offset);
-}
-
-
-void MacroAssembler::Ldrd(Register dst1, Register dst2,
- const MemOperand& src, Condition cond) {
- ASSERT(src.rm().is(no_reg));
- ASSERT(!dst1.is(lr)); // r14.
- ASSERT_EQ(0, dst1.code() % 2);
- ASSERT_EQ(dst1.code() + 1, dst2.code());
-
- // Generate two ldr instructions if ldrd is not available.
- if (CpuFeatures::IsSupported(ARMv7)) {
- CpuFeatures::Scope scope(ARMv7);
- ldrd(dst1, dst2, src, cond);
- } else {
- MemOperand src2(src);
- src2.set_offset(src2.offset() + 4);
- if (dst1.is(src.rn())) {
- ldr(dst2, src2, cond);
- ldr(dst1, src, cond);
- } else {
- ldr(dst1, src, cond);
- ldr(dst2, src2, cond);
- }
- }
-}
-
-
-void MacroAssembler::Strd(Register src1, Register src2,
- const MemOperand& dst, Condition cond) {
- ASSERT(dst.rm().is(no_reg));
- ASSERT(!src1.is(lr)); // r14.
- ASSERT_EQ(0, src1.code() % 2);
- ASSERT_EQ(src1.code() + 1, src2.code());
-
- // Generate two str instructions if strd is not available.
- if (CpuFeatures::IsSupported(ARMv7)) {
- CpuFeatures::Scope scope(ARMv7);
- strd(src1, src2, dst, cond);
- } else {
- MemOperand dst2(dst);
- dst2.set_offset(dst2.offset() + 4);
- str(src1, dst, cond);
- str(src2, dst2, cond);
- }
-}
-
-
-void MacroAssembler::ClearFPSCRBits(const uint32_t bits_to_clear,
- const Register scratch,
- const Condition cond) {
- vmrs(scratch, cond);
- bic(scratch, scratch, Operand(bits_to_clear), LeaveCC, cond);
- vmsr(scratch, cond);
-}
-
-
-void MacroAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1,
- const DwVfpRegister src2,
- const Condition cond) {
- // Compare and move FPSCR flags to the normal condition flags.
- VFPCompareAndLoadFlags(src1, src2, pc, cond);
-}
-
-void MacroAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1,
- const double src2,
- const Condition cond) {
- // Compare and move FPSCR flags to the normal condition flags.
- VFPCompareAndLoadFlags(src1, src2, pc, cond);
-}
-
-
-void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
- const DwVfpRegister src2,
- const Register fpscr_flags,
- const Condition cond) {
- // Compare and load FPSCR.
- vcmp(src1, src2, cond);
- vmrs(fpscr_flags, cond);
-}
-
-void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
- const double src2,
- const Register fpscr_flags,
- const Condition cond) {
- // Compare and load FPSCR.
- vcmp(src1, src2, cond);
- vmrs(fpscr_flags, cond);
-}
-
-
-void MacroAssembler::EnterFrame(StackFrame::Type type) {
- // r0-r3: preserved
- stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
- mov(ip, Operand(Smi::FromInt(type)));
- push(ip);
- mov(ip, Operand(CodeObject()));
- push(ip);
- add(fp, sp, Operand(3 * kPointerSize)); // Adjust FP to point to saved FP.
-}
-
-
-void MacroAssembler::LeaveFrame(StackFrame::Type type) {
- // r0: preserved
- // r1: preserved
- // r2: preserved
-
- // Drop the execution stack down to the frame pointer and restore
- // the caller frame pointer and return address.
- mov(sp, fp);
- ldm(ia_w, sp, fp.bit() | lr.bit());
-}
-
-
-void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
- // Setup the frame structure on the stack.
- ASSERT_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
- ASSERT_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
- ASSERT_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
- Push(lr, fp);
- mov(fp, Operand(sp)); // Setup new frame pointer.
- // Reserve room for saved entry sp and code object.
- sub(sp, sp, Operand(2 * kPointerSize));
- if (emit_debug_code()) {
- mov(ip, Operand(0));
- str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset));
- }
- mov(ip, Operand(CodeObject()));
- str(ip, MemOperand(fp, ExitFrameConstants::kCodeOffset));
-
- // Save the frame pointer and the context in top.
- mov(ip, Operand(ExternalReference(Isolate::k_c_entry_fp_address, isolate())));
- str(fp, MemOperand(ip));
- mov(ip, Operand(ExternalReference(Isolate::k_context_address, isolate())));
- str(cp, MemOperand(ip));
-
- // Optionally save all double registers.
- if (save_doubles) {
- sub(sp, sp, Operand(DwVfpRegister::kNumRegisters * kDoubleSize));
- const int offset = -2 * kPointerSize;
- for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) {
- DwVfpRegister reg = DwVfpRegister::from_code(i);
- vstr(reg, fp, offset - ((i + 1) * kDoubleSize));
- }
- // Note that d0 will be accessible at
- // fp - 2 * kPointerSize - DwVfpRegister::kNumRegisters * kDoubleSize,
- // since the sp slot and code slot were pushed after the fp.
- }
-
- // Reserve place for the return address and stack space and align the frame
- // preparing for calling the runtime function.
- const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
- sub(sp, sp, Operand((stack_space + 1) * kPointerSize));
- if (frame_alignment > 0) {
- ASSERT(IsPowerOf2(frame_alignment));
- and_(sp, sp, Operand(-frame_alignment));
- }
-
- // Set the exit frame sp value to point just before the return address
- // location.
- add(ip, sp, Operand(kPointerSize));
- str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset));
-}
-
-
-void MacroAssembler::InitializeNewString(Register string,
- Register length,
- Heap::RootListIndex map_index,
- Register scratch1,
- Register scratch2) {
- mov(scratch1, Operand(length, LSL, kSmiTagSize));
- LoadRoot(scratch2, map_index);
- str(scratch1, FieldMemOperand(string, String::kLengthOffset));
- mov(scratch1, Operand(String::kEmptyHashField));
- str(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
- str(scratch1, FieldMemOperand(string, String::kHashFieldOffset));
-}
-
-
-int MacroAssembler::ActivationFrameAlignment() {
-#if defined(V8_HOST_ARCH_ARM)
- // Running on the real platform. Use the alignment as mandated by the local
- // environment.
- // Note: This will break if we ever start generating snapshots on one ARM
- // platform for another ARM platform with a different alignment.
- return OS::ActivationFrameAlignment();
-#else // defined(V8_HOST_ARCH_ARM)
- // If we are using the simulator then we should always align to the expected
- // alignment. As the simulator is used to generate snapshots we do not know
- // if the target platform will need alignment, so this is controlled from a
- // flag.
- return FLAG_sim_stack_alignment;
-#endif // defined(V8_HOST_ARCH_ARM)
-}
-
-
-void MacroAssembler::LeaveExitFrame(bool save_doubles,
- Register argument_count) {
- // Optionally restore all double registers.
- if (save_doubles) {
- for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) {
- DwVfpRegister reg = DwVfpRegister::from_code(i);
- const int offset = -2 * kPointerSize;
- vldr(reg, fp, offset - ((i + 1) * kDoubleSize));
- }
- }
-
- // Clear top frame.
- mov(r3, Operand(0, RelocInfo::NONE));
- mov(ip, Operand(ExternalReference(Isolate::k_c_entry_fp_address, isolate())));
- str(r3, MemOperand(ip));
-
- // Restore current context from top and clear it in debug mode.
- mov(ip, Operand(ExternalReference(Isolate::k_context_address, isolate())));
- ldr(cp, MemOperand(ip));
-#ifdef DEBUG
- str(r3, MemOperand(ip));
-#endif
-
- // Tear down the exit frame, pop the arguments, and return.
- mov(sp, Operand(fp));
- ldm(ia_w, sp, fp.bit() | lr.bit());
- if (argument_count.is_valid()) {
- add(sp, sp, Operand(argument_count, LSL, kPointerSizeLog2));
- }
-}
-
-void MacroAssembler::GetCFunctionDoubleResult(const DoubleRegister dst) {
-#if !defined(USE_ARM_EABI)
- UNREACHABLE();
-#else
- vmov(dst, r0, r1);
-#endif
-}
-
-
-void MacroAssembler::InvokePrologue(const ParameterCount& expected,
- const ParameterCount& actual,
- Handle<Code> code_constant,
- Register code_reg,
- Label* done,
- InvokeFlag flag,
- CallWrapper* call_wrapper) {
- bool definitely_matches = false;
- Label regular_invoke;
-
- // Check whether the expected and actual arguments count match. If not,
- // setup registers according to contract with ArgumentsAdaptorTrampoline:
- // r0: actual arguments count
- // r1: function (passed through to callee)
- // r2: expected arguments count
- // r3: callee code entry
-
- // The code below is made a lot easier because the calling code already sets
- // up actual and expected registers according to the contract if values are
- // passed in registers.
- ASSERT(actual.is_immediate() || actual.reg().is(r0));
- ASSERT(expected.is_immediate() || expected.reg().is(r2));
- ASSERT((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(r3));
-
- if (expected.is_immediate()) {
- ASSERT(actual.is_immediate());
- if (expected.immediate() == actual.immediate()) {
- definitely_matches = true;
- } else {
- mov(r0, Operand(actual.immediate()));
- const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
- if (expected.immediate() == sentinel) {
- // Don't worry about adapting arguments for builtins that
- // don't want that done. Skip adaption code by making it look
- // like we have a match between expected and actual number of
- // arguments.
- definitely_matches = true;
- } else {
- mov(r2, Operand(expected.immediate()));
- }
- }
- } else {
- if (actual.is_immediate()) {
- cmp(expected.reg(), Operand(actual.immediate()));
- b(eq, &regular_invoke);
- mov(r0, Operand(actual.immediate()));
- } else {
- cmp(expected.reg(), Operand(actual.reg()));
- b(eq, &regular_invoke);
- }
- }
-
- if (!definitely_matches) {
- if (!code_constant.is_null()) {
- mov(r3, Operand(code_constant));
- add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
- }
-
- Handle<Code> adaptor =
- isolate()->builtins()->ArgumentsAdaptorTrampoline();
- if (flag == CALL_FUNCTION) {
- if (call_wrapper != NULL) {
- call_wrapper->BeforeCall(CallSize(adaptor, RelocInfo::CODE_TARGET));
- }
- Call(adaptor, RelocInfo::CODE_TARGET);
- if (call_wrapper != NULL) call_wrapper->AfterCall();
- b(done);
- } else {
- Jump(adaptor, RelocInfo::CODE_TARGET);
- }
- bind(&regular_invoke);
- }
-}
-
-
-void MacroAssembler::InvokeCode(Register code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag,
- CallWrapper* call_wrapper) {
- Label done;
-
- InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag,
- call_wrapper);
- if (flag == CALL_FUNCTION) {
- if (call_wrapper != NULL) call_wrapper->BeforeCall(CallSize(code));
- Call(code);
- if (call_wrapper != NULL) call_wrapper->AfterCall();
- } else {
- ASSERT(flag == JUMP_FUNCTION);
- Jump(code);
- }
-
- // Continue here if InvokePrologue does handle the invocation due to
- // mismatched parameter counts.
- bind(&done);
-}
-
-
-void MacroAssembler::InvokeCode(Handle<Code> code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- RelocInfo::Mode rmode,
- InvokeFlag flag) {
- Label done;
-
- InvokePrologue(expected, actual, code, no_reg, &done, flag);
- if (flag == CALL_FUNCTION) {
- Call(code, rmode);
- } else {
- Jump(code, rmode);
- }
-
- // Continue here if InvokePrologue does handle the invocation due to
- // mismatched parameter counts.
- bind(&done);
-}
-
-
-void MacroAssembler::InvokeFunction(Register fun,
- const ParameterCount& actual,
- InvokeFlag flag,
- CallWrapper* call_wrapper) {
- // Contract with called JS functions requires that function is passed in r1.
- ASSERT(fun.is(r1));
-
- Register expected_reg = r2;
- Register code_reg = r3;
-
- ldr(code_reg, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
- ldr(expected_reg,
- FieldMemOperand(code_reg,
- SharedFunctionInfo::kFormalParameterCountOffset));
- mov(expected_reg, Operand(expected_reg, ASR, kSmiTagSize));
- ldr(code_reg,
- FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
-
- ParameterCount expected(expected_reg);
- InvokeCode(code_reg, expected, actual, flag, call_wrapper);
-}
-
-
-void MacroAssembler::InvokeFunction(JSFunction* function,
- const ParameterCount& actual,
- InvokeFlag flag) {
- ASSERT(function->is_compiled());
-
- // Get the function and setup the context.
- mov(r1, Operand(Handle<JSFunction>(function)));
- ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
-
- // Invoke the cached code.
- Handle<Code> code(function->code());
- ParameterCount expected(function->shared()->formal_parameter_count());
- if (V8::UseCrankshaft()) {
- // TODO(kasperl): For now, we always call indirectly through the
- // code field in the function to allow recompilation to take effect
- // without changing any of the call sites.
- ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
- InvokeCode(r3, expected, actual, flag);
- } else {
- InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET, flag);
- }
-}
-
-
-void MacroAssembler::IsObjectJSObjectType(Register heap_object,
- Register map,
- Register scratch,
- Label* fail) {
- ldr(map, FieldMemOperand(heap_object, HeapObject::kMapOffset));
- IsInstanceJSObjectType(map, scratch, fail);
-}
-
-
-void MacroAssembler::IsInstanceJSObjectType(Register map,
- Register scratch,
- Label* fail) {
- ldrb(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
- cmp(scratch, Operand(FIRST_JS_OBJECT_TYPE));
- b(lt, fail);
- cmp(scratch, Operand(LAST_JS_OBJECT_TYPE));
- b(gt, fail);
-}
-
-
-void MacroAssembler::IsObjectJSStringType(Register object,
- Register scratch,
- Label* fail) {
- ASSERT(kNotStringTag != 0);
-
- ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
- ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
- tst(scratch, Operand(kIsNotStringMask));
- b(ne, fail);
-}
-
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-void MacroAssembler::DebugBreak() {
- ASSERT(allow_stub_calls());
- mov(r0, Operand(0, RelocInfo::NONE));
- mov(r1, Operand(ExternalReference(Runtime::kDebugBreak, isolate())));
- CEntryStub ces(1);
- Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
-}
-#endif
-
-
-void MacroAssembler::PushTryHandler(CodeLocation try_location,
- HandlerType type) {
- // Adjust this code if not the case.
- ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
- // The pc (return address) is passed in register lr.
- if (try_location == IN_JAVASCRIPT) {
- if (type == TRY_CATCH_HANDLER) {
- mov(r3, Operand(StackHandler::TRY_CATCH));
- } else {
- mov(r3, Operand(StackHandler::TRY_FINALLY));
- }
- ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize
- && StackHandlerConstants::kFPOffset == 2 * kPointerSize
- && StackHandlerConstants::kPCOffset == 3 * kPointerSize);
- stm(db_w, sp, r3.bit() | fp.bit() | lr.bit());
- // Save the current handler as the next handler.
- mov(r3, Operand(ExternalReference(Isolate::k_handler_address, isolate())));
- ldr(r1, MemOperand(r3));
- ASSERT(StackHandlerConstants::kNextOffset == 0);
- push(r1);
- // Link this handler as the new current one.
- str(sp, MemOperand(r3));
- } else {
- // Must preserve r0-r4, r5-r7 are available.
- ASSERT(try_location == IN_JS_ENTRY);
- // The frame pointer does not point to a JS frame so we save NULL
- // for fp. We expect the code throwing an exception to check fp
- // before dereferencing it to restore the context.
- mov(ip, Operand(0, RelocInfo::NONE)); // To save a NULL frame pointer.
- mov(r6, Operand(StackHandler::ENTRY));
- ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize
- && StackHandlerConstants::kFPOffset == 2 * kPointerSize
- && StackHandlerConstants::kPCOffset == 3 * kPointerSize);
- stm(db_w, sp, r6.bit() | ip.bit() | lr.bit());
- // Save the current handler as the next handler.
- mov(r7, Operand(ExternalReference(Isolate::k_handler_address, isolate())));
- ldr(r6, MemOperand(r7));
- ASSERT(StackHandlerConstants::kNextOffset == 0);
- push(r6);
- // Link this handler as the new current one.
- str(sp, MemOperand(r7));
- }
-}
-
-
-void MacroAssembler::PopTryHandler() {
- ASSERT_EQ(0, StackHandlerConstants::kNextOffset);
- pop(r1);
- mov(ip, Operand(ExternalReference(Isolate::k_handler_address, isolate())));
- add(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
- str(r1, MemOperand(ip));
-}
-
-
-void MacroAssembler::Throw(Register value) {
- // r0 is expected to hold the exception.
- if (!value.is(r0)) {
- mov(r0, value);
- }
-
- // Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
-
- // Drop the sp to the top of the handler.
- mov(r3, Operand(ExternalReference(Isolate::k_handler_address, isolate())));
- ldr(sp, MemOperand(r3));
-
- // Restore the next handler and frame pointer, discard handler state.
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- pop(r2);
- str(r2, MemOperand(r3));
- STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
- ldm(ia_w, sp, r3.bit() | fp.bit()); // r3: discarded state.
-
- // Before returning we restore the context from the frame pointer if
- // not NULL. The frame pointer is NULL in the exception handler of a
- // JS entry frame.
- cmp(fp, Operand(0, RelocInfo::NONE));
- // Set cp to NULL if fp is NULL.
- mov(cp, Operand(0, RelocInfo::NONE), LeaveCC, eq);
- // Restore cp otherwise.
- ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
-#ifdef DEBUG
- if (emit_debug_code()) {
- mov(lr, Operand(pc));
- }
-#endif
- STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
- pop(pc);
-}
-
-
-void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type,
- Register value) {
- // Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
-
- // r0 is expected to hold the exception.
- if (!value.is(r0)) {
- mov(r0, value);
- }
-
- // Drop sp to the top stack handler.
- mov(r3, Operand(ExternalReference(Isolate::k_handler_address, isolate())));
- ldr(sp, MemOperand(r3));
-
- // Unwind the handlers until the ENTRY handler is found.
- Label loop, done;
- bind(&loop);
- // Load the type of the current stack handler.
- const int kStateOffset = StackHandlerConstants::kStateOffset;
- ldr(r2, MemOperand(sp, kStateOffset));
- cmp(r2, Operand(StackHandler::ENTRY));
- b(eq, &done);
- // Fetch the next handler in the list.
- const int kNextOffset = StackHandlerConstants::kNextOffset;
- ldr(sp, MemOperand(sp, kNextOffset));
- jmp(&loop);
- bind(&done);
-
- // Set the top handler address to next handler past the current ENTRY handler.
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- pop(r2);
- str(r2, MemOperand(r3));
-
- if (type == OUT_OF_MEMORY) {
- // Set external caught exception to false.
- ExternalReference external_caught(
- Isolate::k_external_caught_exception_address, isolate());
- mov(r0, Operand(false, RelocInfo::NONE));
- mov(r2, Operand(external_caught));
- str(r0, MemOperand(r2));
-
- // Set pending exception and r0 to out of memory exception.
- Failure* out_of_memory = Failure::OutOfMemoryException();
- mov(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
- mov(r2, Operand(ExternalReference(Isolate::k_pending_exception_address,
- isolate())));
- str(r0, MemOperand(r2));
- }
-
- // Stack layout at this point. See also StackHandlerConstants.
- // sp -> state (ENTRY)
- // fp
- // lr
-
- // Discard handler state (r2 is not used) and restore frame pointer.
- STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
- ldm(ia_w, sp, r2.bit() | fp.bit()); // r2: discarded state.
- // Before returning we restore the context from the frame pointer if
- // not NULL. The frame pointer is NULL in the exception handler of a
- // JS entry frame.
- cmp(fp, Operand(0, RelocInfo::NONE));
- // Set cp to NULL if fp is NULL.
- mov(cp, Operand(0, RelocInfo::NONE), LeaveCC, eq);
- // Restore cp otherwise.
- ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
-#ifdef DEBUG
- if (emit_debug_code()) {
- mov(lr, Operand(pc));
- }
-#endif
- STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
- pop(pc);
-}
-
-
-void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
- Register scratch,
- Label* miss) {
- Label same_contexts;
-
- ASSERT(!holder_reg.is(scratch));
- ASSERT(!holder_reg.is(ip));
- ASSERT(!scratch.is(ip));
-
- // Load current lexical context from the stack frame.
- ldr(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
- // In debug mode, make sure the lexical context is set.
-#ifdef DEBUG
- cmp(scratch, Operand(0, RelocInfo::NONE));
- Check(ne, "we should not have an empty lexical context");
-#endif
-
- // Load the global context of the current context.
- int offset = Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
- ldr(scratch, FieldMemOperand(scratch, offset));
- ldr(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset));
-
- // Check the context is a global context.
- if (emit_debug_code()) {
- // TODO(119): avoid push(holder_reg)/pop(holder_reg)
- // Cannot use ip as a temporary in this verification code. Due to the fact
- // that ip is clobbered as part of cmp with an object Operand.
- push(holder_reg); // Temporarily save holder on the stack.
- // Read the first word and compare to the global_context_map.
- ldr(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
- LoadRoot(ip, Heap::kGlobalContextMapRootIndex);
- cmp(holder_reg, ip);
- Check(eq, "JSGlobalObject::global_context should be a global context.");
- pop(holder_reg); // Restore holder.
- }
-
- // Check if both contexts are the same.
- ldr(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kContextOffset));
- cmp(scratch, Operand(ip));
- b(eq, &same_contexts);
-
- // Check the context is a global context.
- if (emit_debug_code()) {
- // TODO(119): avoid push(holder_reg)/pop(holder_reg)
- // Cannot use ip as a temporary in this verification code. Due to the fact
- // that ip is clobbered as part of cmp with an object Operand.
- push(holder_reg); // Temporarily save holder on the stack.
- mov(holder_reg, ip); // Move ip to its holding place.
- LoadRoot(ip, Heap::kNullValueRootIndex);
- cmp(holder_reg, ip);
- Check(ne, "JSGlobalProxy::context() should not be null.");
-
- ldr(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
- LoadRoot(ip, Heap::kGlobalContextMapRootIndex);
- cmp(holder_reg, ip);
- Check(eq, "JSGlobalObject::global_context should be a global context.");
- // Restore ip is not needed. ip is reloaded below.
- pop(holder_reg); // Restore holder.
- // Restore ip to holder's context.
- ldr(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kContextOffset));
- }
-
- // Check that the security token in the calling global object is
- // compatible with the security token in the receiving global
- // object.
- int token_offset = Context::kHeaderSize +
- Context::SECURITY_TOKEN_INDEX * kPointerSize;
-
- ldr(scratch, FieldMemOperand(scratch, token_offset));
- ldr(ip, FieldMemOperand(ip, token_offset));
- cmp(scratch, Operand(ip));
- b(ne, miss);
-
- bind(&same_contexts);
-}
-
-
-void MacroAssembler::AllocateInNewSpace(int object_size,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- AllocationFlags flags) {
- if (!FLAG_inline_new) {
- if (emit_debug_code()) {
- // Trash the registers to simulate an allocation failure.
- mov(result, Operand(0x7091));
- mov(scratch1, Operand(0x7191));
- mov(scratch2, Operand(0x7291));
- }
- jmp(gc_required);
- return;
- }
-
- ASSERT(!result.is(scratch1));
- ASSERT(!result.is(scratch2));
- ASSERT(!scratch1.is(scratch2));
- ASSERT(!scratch1.is(ip));
- ASSERT(!scratch2.is(ip));
-
- // Make object size into bytes.
- if ((flags & SIZE_IN_WORDS) != 0) {
- object_size *= kPointerSize;
- }
- ASSERT_EQ(0, object_size & kObjectAlignmentMask);
-
- // Check relative positions of allocation top and limit addresses.
- // The values must be adjacent in memory to allow the use of LDM.
- // Also, assert that the registers are numbered such that the values
- // are loaded in the correct order.
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate());
- ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address(isolate());
- intptr_t top =
- reinterpret_cast<intptr_t>(new_space_allocation_top.address());
- intptr_t limit =
- reinterpret_cast<intptr_t>(new_space_allocation_limit.address());
- ASSERT((limit - top) == kPointerSize);
- ASSERT(result.code() < ip.code());
-
- // Set up allocation top address and object size registers.
- Register topaddr = scratch1;
- Register obj_size_reg = scratch2;
- mov(topaddr, Operand(new_space_allocation_top));
- mov(obj_size_reg, Operand(object_size));
-
- // This code stores a temporary value in ip. This is OK, as the code below
- // does not need ip for implicit literal generation.
- if ((flags & RESULT_CONTAINS_TOP) == 0) {
- // Load allocation top into result and allocation limit into ip.
- ldm(ia, topaddr, result.bit() | ip.bit());
- } else {
- if (emit_debug_code()) {
- // Assert that result actually contains top on entry. ip is used
- // immediately below so this use of ip does not cause difference with
- // respect to register content between debug and release mode.
- ldr(ip, MemOperand(topaddr));
- cmp(result, ip);
- Check(eq, "Unexpected allocation top");
- }
- // Load allocation limit into ip. Result already contains allocation top.
- ldr(ip, MemOperand(topaddr, limit - top));
- }
-
- // Calculate new top and bail out if new space is exhausted. Use result
- // to calculate the new top.
- add(scratch2, result, Operand(obj_size_reg), SetCC);
- b(cs, gc_required);
- cmp(scratch2, Operand(ip));
- b(hi, gc_required);
- str(scratch2, MemOperand(topaddr));
-
- // Tag object if requested.
- if ((flags & TAG_OBJECT) != 0) {
- add(result, result, Operand(kHeapObjectTag));
- }
-}
-
-
-void MacroAssembler::AllocateInNewSpace(Register object_size,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- AllocationFlags flags) {
- if (!FLAG_inline_new) {
- if (emit_debug_code()) {
- // Trash the registers to simulate an allocation failure.
- mov(result, Operand(0x7091));
- mov(scratch1, Operand(0x7191));
- mov(scratch2, Operand(0x7291));
- }
- jmp(gc_required);
- return;
- }
-
- // Assert that the register arguments are different and that none of
- // them are ip. ip is used explicitly in the code generated below.
- ASSERT(!result.is(scratch1));
- ASSERT(!result.is(scratch2));
- ASSERT(!scratch1.is(scratch2));
- ASSERT(!result.is(ip));
- ASSERT(!scratch1.is(ip));
- ASSERT(!scratch2.is(ip));
-
- // Check relative positions of allocation top and limit addresses.
- // The values must be adjacent in memory to allow the use of LDM.
- // Also, assert that the registers are numbered such that the values
- // are loaded in the correct order.
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate());
- ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address(isolate());
- intptr_t top =
- reinterpret_cast<intptr_t>(new_space_allocation_top.address());
- intptr_t limit =
- reinterpret_cast<intptr_t>(new_space_allocation_limit.address());
- ASSERT((limit - top) == kPointerSize);
- ASSERT(result.code() < ip.code());
-
- // Set up allocation top address.
- Register topaddr = scratch1;
- mov(topaddr, Operand(new_space_allocation_top));
-
- // This code stores a temporary value in ip. This is OK, as the code below
- // does not need ip for implicit literal generation.
- if ((flags & RESULT_CONTAINS_TOP) == 0) {
- // Load allocation top into result and allocation limit into ip.
- ldm(ia, topaddr, result.bit() | ip.bit());
- } else {
- if (emit_debug_code()) {
- // Assert that result actually contains top on entry. ip is used
- // immediately below so this use of ip does not cause difference with
- // respect to register content between debug and release mode.
- ldr(ip, MemOperand(topaddr));
- cmp(result, ip);
- Check(eq, "Unexpected allocation top");
- }
- // Load allocation limit into ip. Result already contains allocation top.
- ldr(ip, MemOperand(topaddr, limit - top));
- }
-
- // Calculate new top and bail out if new space is exhausted. Use result
- // to calculate the new top. Object size may be in words so a shift is
- // required to get the number of bytes.
- if ((flags & SIZE_IN_WORDS) != 0) {
- add(scratch2, result, Operand(object_size, LSL, kPointerSizeLog2), SetCC);
- } else {
- add(scratch2, result, Operand(object_size), SetCC);
- }
- b(cs, gc_required);
- cmp(scratch2, Operand(ip));
- b(hi, gc_required);
-
- // Update allocation top. result temporarily holds the new top.
- if (emit_debug_code()) {
- tst(scratch2, Operand(kObjectAlignmentMask));
- Check(eq, "Unaligned allocation in new space");
- }
- str(scratch2, MemOperand(topaddr));
-
- // Tag object if requested.
- if ((flags & TAG_OBJECT) != 0) {
- add(result, result, Operand(kHeapObjectTag));
- }
-}
-
-
-void MacroAssembler::UndoAllocationInNewSpace(Register object,
- Register scratch) {
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate());
-
- // Make sure the object has no tag before resetting top.
- and_(object, object, Operand(~kHeapObjectTagMask));
-#ifdef DEBUG
- // Check that the object un-allocated is below the current top.
- mov(scratch, Operand(new_space_allocation_top));
- ldr(scratch, MemOperand(scratch));
- cmp(object, scratch);
- Check(lt, "Undo allocation of non allocated memory");
-#endif
- // Write the address of the object to un-allocate as the current top.
- mov(scratch, Operand(new_space_allocation_top));
- str(object, MemOperand(scratch));
-}
-
-
-void MacroAssembler::AllocateTwoByteString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required) {
- // Calculate the number of bytes needed for the characters in the string while
- // observing object alignment.
- ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
- mov(scratch1, Operand(length, LSL, 1)); // Length in bytes, not chars.
- add(scratch1, scratch1,
- Operand(kObjectAlignmentMask + SeqTwoByteString::kHeaderSize));
- and_(scratch1, scratch1, Operand(~kObjectAlignmentMask));
-
- // Allocate two-byte string in new space.
- AllocateInNewSpace(scratch1,
- result,
- scratch2,
- scratch3,
- gc_required,
- TAG_OBJECT);
-
- // Set the map, length and hash field.
- InitializeNewString(result,
- length,
- Heap::kStringMapRootIndex,
- scratch1,
- scratch2);
-}
-
-
-void MacroAssembler::AllocateAsciiString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required) {
- // Calculate the number of bytes needed for the characters in the string while
- // observing object alignment.
- ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
- ASSERT(kCharSize == 1);
- add(scratch1, length,
- Operand(kObjectAlignmentMask + SeqAsciiString::kHeaderSize));
- and_(scratch1, scratch1, Operand(~kObjectAlignmentMask));
-
- // Allocate ASCII string in new space.
- AllocateInNewSpace(scratch1,
- result,
- scratch2,
- scratch3,
- gc_required,
- TAG_OBJECT);
-
- // Set the map, length and hash field.
- InitializeNewString(result,
- length,
- Heap::kAsciiStringMapRootIndex,
- scratch1,
- scratch2);
-}
-
-
-void MacroAssembler::AllocateTwoByteConsString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- AllocateInNewSpace(ConsString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
-
- InitializeNewString(result,
- length,
- Heap::kConsStringMapRootIndex,
- scratch1,
- scratch2);
-}
-
-
-void MacroAssembler::AllocateAsciiConsString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- AllocateInNewSpace(ConsString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
-
- InitializeNewString(result,
- length,
- Heap::kConsAsciiStringMapRootIndex,
- scratch1,
- scratch2);
-}
-
-
-void MacroAssembler::CompareObjectType(Register object,
- Register map,
- Register type_reg,
- InstanceType type) {
- ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
- CompareInstanceType(map, type_reg, type);
-}
-
-
-void MacroAssembler::CompareInstanceType(Register map,
- Register type_reg,
- InstanceType type) {
- ldrb(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
- cmp(type_reg, Operand(type));
-}
-
-
-void MacroAssembler::CompareRoot(Register obj,
- Heap::RootListIndex index) {
- ASSERT(!obj.is(ip));
- LoadRoot(ip, index);
- cmp(obj, ip);
-}
-
-
-void MacroAssembler::CheckMap(Register obj,
- Register scratch,
- Handle<Map> map,
- Label* fail,
- bool is_heap_object) {
- if (!is_heap_object) {
- JumpIfSmi(obj, fail);
- }
- ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
- mov(ip, Operand(map));
- cmp(scratch, ip);
- b(ne, fail);
-}
-
-
-void MacroAssembler::CheckMap(Register obj,
- Register scratch,
- Heap::RootListIndex index,
- Label* fail,
- bool is_heap_object) {
- if (!is_heap_object) {
- JumpIfSmi(obj, fail);
- }
- ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
- LoadRoot(ip, index);
- cmp(scratch, ip);
- b(ne, fail);
-}
-
-
-void MacroAssembler::TryGetFunctionPrototype(Register function,
- Register result,
- Register scratch,
- Label* miss) {
- // Check that the receiver isn't a smi.
- JumpIfSmi(function, miss);
-
- // Check that the function really is a function. Load map into result reg.
- CompareObjectType(function, result, scratch, JS_FUNCTION_TYPE);
- b(ne, miss);
-
- // Make sure that the function has an instance prototype.
- Label non_instance;
- ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
- tst(scratch, Operand(1 << Map::kHasNonInstancePrototype));
- b(ne, &non_instance);
-
- // Get the prototype or initial map from the function.
- ldr(result,
- FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
-
- // If the prototype or initial map is the hole, don't return it and
- // simply miss the cache instead. This will allow us to allocate a
- // prototype object on-demand in the runtime system.
- LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- cmp(result, ip);
- b(eq, miss);
-
- // If the function does not have an initial map, we're done.
- Label done;
- CompareObjectType(result, scratch, scratch, MAP_TYPE);
- b(ne, &done);
-
- // Get the prototype from the initial map.
- ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
- jmp(&done);
-
- // Non-instance prototype: Fetch prototype from constructor field
- // in initial map.
- bind(&non_instance);
- ldr(result, FieldMemOperand(result, Map::kConstructorOffset));
-
- // All done.
- bind(&done);
-}
-
-
-void MacroAssembler::CallStub(CodeStub* stub, Condition cond) {
- ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
- Call(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
-}
-
-
-void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
- ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
- Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
-}
-
-
-MaybeObject* MacroAssembler::TryTailCallStub(CodeStub* stub, Condition cond) {
- ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
- Object* result;
- { MaybeObject* maybe_result = stub->TryGetCode();
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
- return result;
-}
-
-
-static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
- return ref0.address() - ref1.address();
-}
-
-
-MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(
- ExternalReference function, int stack_space) {
- ExternalReference next_address =
- ExternalReference::handle_scope_next_address();
- const int kNextOffset = 0;
- const int kLimitOffset = AddressOffset(
- ExternalReference::handle_scope_limit_address(),
- next_address);
- const int kLevelOffset = AddressOffset(
- ExternalReference::handle_scope_level_address(),
- next_address);
-
- // Allocate HandleScope in callee-save registers.
- mov(r7, Operand(next_address));
- ldr(r4, MemOperand(r7, kNextOffset));
- ldr(r5, MemOperand(r7, kLimitOffset));
- ldr(r6, MemOperand(r7, kLevelOffset));
- add(r6, r6, Operand(1));
- str(r6, MemOperand(r7, kLevelOffset));
-
- // Native call returns to the DirectCEntry stub which redirects to the
- // return address pushed on stack (could have moved after GC).
- // DirectCEntry stub itself is generated early and never moves.
- DirectCEntryStub stub;
- stub.GenerateCall(this, function);
-
- Label promote_scheduled_exception;
- Label delete_allocated_handles;
- Label leave_exit_frame;
-
- // If result is non-zero, dereference to get the result value
- // otherwise set it to undefined.
- cmp(r0, Operand(0));
- LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
- ldr(r0, MemOperand(r0), ne);
-
- // No more valid handles (the result handle was the last one). Restore
- // previous handle scope.
- str(r4, MemOperand(r7, kNextOffset));
- if (emit_debug_code()) {
- ldr(r1, MemOperand(r7, kLevelOffset));
- cmp(r1, r6);
- Check(eq, "Unexpected level after return from api call");
- }
- sub(r6, r6, Operand(1));
- str(r6, MemOperand(r7, kLevelOffset));
- ldr(ip, MemOperand(r7, kLimitOffset));
- cmp(r5, ip);
- b(ne, &delete_allocated_handles);
-
- // Check if the function scheduled an exception.
- bind(&leave_exit_frame);
- LoadRoot(r4, Heap::kTheHoleValueRootIndex);
- mov(ip, Operand(ExternalReference::scheduled_exception_address(isolate())));
- ldr(r5, MemOperand(ip));
- cmp(r4, r5);
- b(ne, &promote_scheduled_exception);
-
- // LeaveExitFrame expects unwind space to be in a register.
- mov(r4, Operand(stack_space));
- LeaveExitFrame(false, r4);
- mov(pc, lr);
-
- bind(&promote_scheduled_exception);
- MaybeObject* result
- = TryTailCallExternalReference(
- ExternalReference(Runtime::kPromoteScheduledException, isolate()),
- 0,
- 1);
- if (result->IsFailure()) {
- return result;
- }
-
- // HandleScope limit has changed. Delete allocated extensions.
- bind(&delete_allocated_handles);
- str(r5, MemOperand(r7, kLimitOffset));
- mov(r4, r0);
- PrepareCallCFunction(1, r5);
- mov(r0, Operand(ExternalReference::isolate_address()));
- CallCFunction(
- ExternalReference::delete_handle_scope_extensions(isolate()), 1);
- mov(r0, r4);
- jmp(&leave_exit_frame);
-
- return result;
-}
-
-
-void MacroAssembler::IllegalOperation(int num_arguments) {
- if (num_arguments > 0) {
- add(sp, sp, Operand(num_arguments * kPointerSize));
- }
- LoadRoot(r0, Heap::kUndefinedValueRootIndex);
-}
-
-
-void MacroAssembler::IndexFromHash(Register hash, Register index) {
- // If the hash field contains an array index pick it out. The assert checks
- // that the constants for the maximum number of digits for an array index
- // cached in the hash field and the number of bits reserved for it does not
- // conflict.
- ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
- (1 << String::kArrayIndexValueBits));
- // We want the smi-tagged index in key. kArrayIndexValueMask has zeros in
- // the low kHashShift bits.
- STATIC_ASSERT(kSmiTag == 0);
- Ubfx(hash, hash, String::kHashShift, String::kArrayIndexValueBits);
- mov(index, Operand(hash, LSL, kSmiTagSize));
-}
-
-
-void MacroAssembler::IntegerToDoubleConversionWithVFP3(Register inReg,
- Register outHighReg,
- Register outLowReg) {
- // ARMv7 VFP3 instructions to implement integer to double conversion.
- mov(r7, Operand(inReg, ASR, kSmiTagSize));
- vmov(s15, r7);
- vcvt_f64_s32(d7, s15);
- vmov(outLowReg, outHighReg, d7);
-}
-
-
-void MacroAssembler::ObjectToDoubleVFPRegister(Register object,
- DwVfpRegister result,
- Register scratch1,
- Register scratch2,
- Register heap_number_map,
- SwVfpRegister scratch3,
- Label* not_number,
- ObjectToDoubleFlags flags) {
- Label done;
- if ((flags & OBJECT_NOT_SMI) == 0) {
- Label not_smi;
- JumpIfNotSmi(object, &not_smi);
- // Remove smi tag and convert to double.
- mov(scratch1, Operand(object, ASR, kSmiTagSize));
- vmov(scratch3, scratch1);
- vcvt_f64_s32(result, scratch3);
- b(&done);
- bind(&not_smi);
- }
- // Check for heap number and load double value from it.
- ldr(scratch1, FieldMemOperand(object, HeapObject::kMapOffset));
- sub(scratch2, object, Operand(kHeapObjectTag));
- cmp(scratch1, heap_number_map);
- b(ne, not_number);
- if ((flags & AVOID_NANS_AND_INFINITIES) != 0) {
- // If exponent is all ones the number is either a NaN or +/-Infinity.
- ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
- Sbfx(scratch1,
- scratch1,
- HeapNumber::kExponentShift,
- HeapNumber::kExponentBits);
- // All-one value sign extend to -1.
- cmp(scratch1, Operand(-1));
- b(eq, not_number);
- }
- vldr(result, scratch2, HeapNumber::kValueOffset);
- bind(&done);
-}
-
-
-void MacroAssembler::SmiToDoubleVFPRegister(Register smi,
- DwVfpRegister value,
- Register scratch1,
- SwVfpRegister scratch2) {
- mov(scratch1, Operand(smi, ASR, kSmiTagSize));
- vmov(scratch2, scratch1);
- vcvt_f64_s32(value, scratch2);
-}
-
-
-// Tries to get a signed int32 out of a double precision floating point heap
-// number. Rounds towards 0. Branch to 'not_int32' if the double is out of the
-// 32bits signed integer range.
-void MacroAssembler::ConvertToInt32(Register source,
- Register dest,
- Register scratch,
- Register scratch2,
- DwVfpRegister double_scratch,
- Label *not_int32) {
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
- sub(scratch, source, Operand(kHeapObjectTag));
- vldr(double_scratch, scratch, HeapNumber::kValueOffset);
- vcvt_s32_f64(double_scratch.low(), double_scratch);
- vmov(dest, double_scratch.low());
- // Signed vcvt instruction will saturate to the minimum (0x80000000) or
- // maximun (0x7fffffff) signed 32bits integer when the double is out of
- // range. When substracting one, the minimum signed integer becomes the
- // maximun signed integer.
- sub(scratch, dest, Operand(1));
- cmp(scratch, Operand(LONG_MAX - 1));
- // If equal then dest was LONG_MAX, if greater dest was LONG_MIN.
- b(ge, not_int32);
- } else {
- // This code is faster for doubles that are in the ranges -0x7fffffff to
- // -0x40000000 or 0x40000000 to 0x7fffffff. This corresponds almost to
- // the range of signed int32 values that are not Smis. Jumps to the label
- // 'not_int32' if the double isn't in the range -0x80000000.0 to
- // 0x80000000.0 (excluding the endpoints).
- Label right_exponent, done;
- // Get exponent word.
- ldr(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset));
- // Get exponent alone in scratch2.
- Ubfx(scratch2,
- scratch,
- HeapNumber::kExponentShift,
- HeapNumber::kExponentBits);
- // Load dest with zero. We use this either for the final shift or
- // for the answer.
- mov(dest, Operand(0, RelocInfo::NONE));
- // Check whether the exponent matches a 32 bit signed int that is not a Smi.
- // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). This is
- // the exponent that we are fastest at and also the highest exponent we can
- // handle here.
- const uint32_t non_smi_exponent = HeapNumber::kExponentBias + 30;
- // The non_smi_exponent, 0x41d, is too big for ARM's immediate field so we
- // split it up to avoid a constant pool entry. You can't do that in general
- // for cmp because of the overflow flag, but we know the exponent is in the
- // range 0-2047 so there is no overflow.
- int fudge_factor = 0x400;
- sub(scratch2, scratch2, Operand(fudge_factor));
- cmp(scratch2, Operand(non_smi_exponent - fudge_factor));
- // If we have a match of the int32-but-not-Smi exponent then skip some
- // logic.
- b(eq, &right_exponent);
- // If the exponent is higher than that then go to slow case. This catches
- // numbers that don't fit in a signed int32, infinities and NaNs.
- b(gt, not_int32);
-
- // We know the exponent is smaller than 30 (biased). If it is less than
- // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
- // it rounds to zero.
- const uint32_t zero_exponent = HeapNumber::kExponentBias + 0;
- sub(scratch2, scratch2, Operand(zero_exponent - fudge_factor), SetCC);
- // Dest already has a Smi zero.
- b(lt, &done);
-
- // We have an exponent between 0 and 30 in scratch2. Subtract from 30 to
- // get how much to shift down.
- rsb(dest, scratch2, Operand(30));
-
- bind(&right_exponent);
- // Get the top bits of the mantissa.
- and_(scratch2, scratch, Operand(HeapNumber::kMantissaMask));
- // Put back the implicit 1.
- orr(scratch2, scratch2, Operand(1 << HeapNumber::kExponentShift));
- // Shift up the mantissa bits to take up the space the exponent used to
- // take. We just orred in the implicit bit so that took care of one and
- // we want to leave the sign bit 0 so we subtract 2 bits from the shift
- // distance.
- const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
- mov(scratch2, Operand(scratch2, LSL, shift_distance));
- // Put sign in zero flag.
- tst(scratch, Operand(HeapNumber::kSignMask));
- // Get the second half of the double. For some exponents we don't
- // actually need this because the bits get shifted out again, but
- // it's probably slower to test than just to do it.
- ldr(scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset));
- // Shift down 22 bits to get the last 10 bits.
- orr(scratch, scratch2, Operand(scratch, LSR, 32 - shift_distance));
- // Move down according to the exponent.
- mov(dest, Operand(scratch, LSR, dest));
- // Fix sign if sign bit was set.
- rsb(dest, dest, Operand(0, RelocInfo::NONE), LeaveCC, ne);
- bind(&done);
- }
-}
-
-
-void MacroAssembler::EmitVFPTruncate(VFPRoundingMode rounding_mode,
- SwVfpRegister result,
- DwVfpRegister double_input,
- Register scratch1,
- Register scratch2,
- CheckForInexactConversion check_inexact) {
- ASSERT(CpuFeatures::IsSupported(VFP3));
- CpuFeatures::Scope scope(VFP3);
- Register prev_fpscr = scratch1;
- Register scratch = scratch2;
-
- int32_t check_inexact_conversion =
- (check_inexact == kCheckForInexactConversion) ? kVFPInexactExceptionBit : 0;
-
- // Set custom FPCSR:
- // - Set rounding mode.
- // - Clear vfp cumulative exception flags.
- // - Make sure Flush-to-zero mode control bit is unset.
- vmrs(prev_fpscr);
- bic(scratch,
- prev_fpscr,
- Operand(kVFPExceptionMask |
- check_inexact_conversion |
- kVFPRoundingModeMask |
- kVFPFlushToZeroMask));
- // 'Round To Nearest' is encoded by 0b00 so no bits need to be set.
- if (rounding_mode != kRoundToNearest) {
- orr(scratch, scratch, Operand(rounding_mode));
- }
- vmsr(scratch);
-
- // Convert the argument to an integer.
- vcvt_s32_f64(result,
- double_input,
- (rounding_mode == kRoundToZero) ? kDefaultRoundToZero
- : kFPSCRRounding);
-
- // Retrieve FPSCR.
- vmrs(scratch);
- // Restore FPSCR.
- vmsr(prev_fpscr);
- // Check for vfp exceptions.
- tst(scratch, Operand(kVFPExceptionMask | check_inexact_conversion));
-}
-
-
-void MacroAssembler::EmitOutOfInt32RangeTruncate(Register result,
- Register input_high,
- Register input_low,
- Register scratch) {
- Label done, normal_exponent, restore_sign;
-
- // Extract the biased exponent in result.
- Ubfx(result,
- input_high,
- HeapNumber::kExponentShift,
- HeapNumber::kExponentBits);
-
- // Check for Infinity and NaNs, which should return 0.
- cmp(result, Operand(HeapNumber::kExponentMask));
- mov(result, Operand(0), LeaveCC, eq);
- b(eq, &done);
-
- // Express exponent as delta to (number of mantissa bits + 31).
- sub(result,
- result,
- Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31),
- SetCC);
-
- // If the delta is strictly positive, all bits would be shifted away,
- // which means that we can return 0.
- b(le, &normal_exponent);
- mov(result, Operand(0));
- b(&done);
-
- bind(&normal_exponent);
- const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1;
- // Calculate shift.
- add(scratch, result, Operand(kShiftBase + HeapNumber::kMantissaBits), SetCC);
-
- // Save the sign.
- Register sign = result;
- result = no_reg;
- and_(sign, input_high, Operand(HeapNumber::kSignMask));
-
- // Set the implicit 1 before the mantissa part in input_high.
- orr(input_high,
- input_high,
- Operand(1 << HeapNumber::kMantissaBitsInTopWord));
- // Shift the mantissa bits to the correct position.
- // We don't need to clear non-mantissa bits as they will be shifted away.
- // If they weren't, it would mean that the answer is in the 32bit range.
- mov(input_high, Operand(input_high, LSL, scratch));
-
- // Replace the shifted bits with bits from the lower mantissa word.
- Label pos_shift, shift_done;
- rsb(scratch, scratch, Operand(32), SetCC);
- b(&pos_shift, ge);
-
- // Negate scratch.
- rsb(scratch, scratch, Operand(0));
- mov(input_low, Operand(input_low, LSL, scratch));
- b(&shift_done);
-
- bind(&pos_shift);
- mov(input_low, Operand(input_low, LSR, scratch));
-
- bind(&shift_done);
- orr(input_high, input_high, Operand(input_low));
- // Restore sign if necessary.
- cmp(sign, Operand(0));
- result = sign;
- sign = no_reg;
- rsb(result, input_high, Operand(0), LeaveCC, ne);
- mov(result, input_high, LeaveCC, eq);
- bind(&done);
-}
-
-
-void MacroAssembler::EmitECMATruncate(Register result,
- DwVfpRegister double_input,
- SwVfpRegister single_scratch,
- Register scratch,
- Register input_high,
- Register input_low) {
- CpuFeatures::Scope scope(VFP3);
- ASSERT(!input_high.is(result));
- ASSERT(!input_low.is(result));
- ASSERT(!input_low.is(input_high));
- ASSERT(!scratch.is(result) &&
- !scratch.is(input_high) &&
- !scratch.is(input_low));
- ASSERT(!single_scratch.is(double_input.low()) &&
- !single_scratch.is(double_input.high()));
-
- Label done;
-
- // Clear cumulative exception flags.
- ClearFPSCRBits(kVFPExceptionMask, scratch);
- // Try a conversion to a signed integer.
- vcvt_s32_f64(single_scratch, double_input);
- vmov(result, single_scratch);
- // Retrieve he FPSCR.
- vmrs(scratch);
- // Check for overflow and NaNs.
- tst(scratch, Operand(kVFPOverflowExceptionBit |
- kVFPUnderflowExceptionBit |
- kVFPInvalidOpExceptionBit));
- // If we had no exceptions we are done.
- b(eq, &done);
-
- // Load the double value and perform a manual truncation.
- vmov(input_low, input_high, double_input);
- EmitOutOfInt32RangeTruncate(result,
- input_high,
- input_low,
- scratch);
- bind(&done);
-}
-
-
-void MacroAssembler::GetLeastBitsFromSmi(Register dst,
- Register src,
- int num_least_bits) {
- if (CpuFeatures::IsSupported(ARMv7)) {
- ubfx(dst, src, kSmiTagSize, num_least_bits);
- } else {
- mov(dst, Operand(src, ASR, kSmiTagSize));
- and_(dst, dst, Operand((1 << num_least_bits) - 1));
- }
-}
-
-
-void MacroAssembler::GetLeastBitsFromInt32(Register dst,
- Register src,
- int num_least_bits) {
- and_(dst, src, Operand((1 << num_least_bits) - 1));
-}
-
-
-void MacroAssembler::CallRuntime(const Runtime::Function* f,
- int num_arguments) {
- // All parameters are on the stack. r0 has the return value after call.
-
- // If the expected number of arguments of the runtime function is
- // constant, we check that the actual number of arguments match the
- // expectation.
- if (f->nargs >= 0 && f->nargs != num_arguments) {
- IllegalOperation(num_arguments);
- return;
- }
-
- // TODO(1236192): Most runtime routines don't need the number of
- // arguments passed in because it is constant. At some point we
- // should remove this need and make the runtime routine entry code
- // smarter.
- mov(r0, Operand(num_arguments));
- mov(r1, Operand(ExternalReference(f, isolate())));
- CEntryStub stub(1);
- CallStub(&stub);
-}
-
-
-void MacroAssembler::CallRuntime(Runtime::FunctionId fid, int num_arguments) {
- CallRuntime(Runtime::FunctionForId(fid), num_arguments);
-}
-
-
-void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
- const Runtime::Function* function = Runtime::FunctionForId(id);
- mov(r0, Operand(function->nargs));
- mov(r1, Operand(ExternalReference(function, isolate())));
- CEntryStub stub(1);
- stub.SaveDoubles();
- CallStub(&stub);
-}
-
-
-void MacroAssembler::CallExternalReference(const ExternalReference& ext,
- int num_arguments) {
- mov(r0, Operand(num_arguments));
- mov(r1, Operand(ext));
-
- CEntryStub stub(1);
- CallStub(&stub);
-}
-
-
-void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
- int num_arguments,
- int result_size) {
- // TODO(1236192): Most runtime routines don't need the number of
- // arguments passed in because it is constant. At some point we
- // should remove this need and make the runtime routine entry code
- // smarter.
- mov(r0, Operand(num_arguments));
- JumpToExternalReference(ext);
-}
-
-
-MaybeObject* MacroAssembler::TryTailCallExternalReference(
- const ExternalReference& ext, int num_arguments, int result_size) {
- // TODO(1236192): Most runtime routines don't need the number of
- // arguments passed in because it is constant. At some point we
- // should remove this need and make the runtime routine entry code
- // smarter.
- mov(r0, Operand(num_arguments));
- return TryJumpToExternalReference(ext);
-}
-
-
-void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
- int num_arguments,
- int result_size) {
- TailCallExternalReference(ExternalReference(fid, isolate()),
- num_arguments,
- result_size);
-}
-
-
-void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
-#if defined(__thumb__)
- // Thumb mode builtin.
- ASSERT((reinterpret_cast<intptr_t>(builtin.address()) & 1) == 1);
-#endif
- mov(r1, Operand(builtin));
- CEntryStub stub(1);
- Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
-}
-
-
-MaybeObject* MacroAssembler::TryJumpToExternalReference(
- const ExternalReference& builtin) {
-#if defined(__thumb__)
- // Thumb mode builtin.
- ASSERT((reinterpret_cast<intptr_t>(builtin.address()) & 1) == 1);
-#endif
- mov(r1, Operand(builtin));
- CEntryStub stub(1);
- return TryTailCallStub(&stub);
-}
-
-
-void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
- InvokeJSFlags flags,
- CallWrapper* call_wrapper) {
- GetBuiltinEntry(r2, id);
- if (flags == CALL_JS) {
- if (call_wrapper != NULL) call_wrapper->BeforeCall(CallSize(r2));
- Call(r2);
- if (call_wrapper != NULL) call_wrapper->AfterCall();
- } else {
- ASSERT(flags == JUMP_JS);
- Jump(r2);
- }
-}
-
-
-void MacroAssembler::GetBuiltinFunction(Register target,
- Builtins::JavaScript id) {
- // Load the builtins object into target register.
- ldr(target, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
- ldr(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
- // Load the JavaScript builtin function from the builtins object.
- ldr(target, FieldMemOperand(target,
- JSBuiltinsObject::OffsetOfFunctionWithId(id)));
-}
-
-
-void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
- ASSERT(!target.is(r1));
- GetBuiltinFunction(r1, id);
- // Load the code entry point from the builtins object.
- ldr(target, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
-}
-
-
-void MacroAssembler::SetCounter(StatsCounter* counter, int value,
- Register scratch1, Register scratch2) {
- if (FLAG_native_code_counters && counter->Enabled()) {
- mov(scratch1, Operand(value));
- mov(scratch2, Operand(ExternalReference(counter)));
- str(scratch1, MemOperand(scratch2));
- }
-}
-
-
-void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
- Register scratch1, Register scratch2) {
- ASSERT(value > 0);
- if (FLAG_native_code_counters && counter->Enabled()) {
- mov(scratch2, Operand(ExternalReference(counter)));
- ldr(scratch1, MemOperand(scratch2));
- add(scratch1, scratch1, Operand(value));
- str(scratch1, MemOperand(scratch2));
- }
-}
-
-
-void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
- Register scratch1, Register scratch2) {
- ASSERT(value > 0);
- if (FLAG_native_code_counters && counter->Enabled()) {
- mov(scratch2, Operand(ExternalReference(counter)));
- ldr(scratch1, MemOperand(scratch2));
- sub(scratch1, scratch1, Operand(value));
- str(scratch1, MemOperand(scratch2));
- }
-}
-
-
-void MacroAssembler::Assert(Condition cond, const char* msg) {
- if (emit_debug_code())
- Check(cond, msg);
-}
-
-
-void MacroAssembler::AssertRegisterIsRoot(Register reg,
- Heap::RootListIndex index) {
- if (emit_debug_code()) {
- LoadRoot(ip, index);
- cmp(reg, ip);
- Check(eq, "Register did not match expected root");
- }
-}
-
-
-void MacroAssembler::AssertFastElements(Register elements) {
- if (emit_debug_code()) {
- ASSERT(!elements.is(ip));
- Label ok;
- push(elements);
- ldr(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
- LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
- cmp(elements, ip);
- b(eq, &ok);
- LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex);
- cmp(elements, ip);
- b(eq, &ok);
- Abort("JSObject with fast elements map has slow elements");
- bind(&ok);
- pop(elements);
- }
-}
-
-
-void MacroAssembler::Check(Condition cond, const char* msg) {
- Label L;
- b(cond, &L);
- Abort(msg);
- // will not return here
- bind(&L);
-}
-
-
-void MacroAssembler::Abort(const char* msg) {
- Label abort_start;
- bind(&abort_start);
- // We want to pass the msg string like a smi to avoid GC
- // problems, however msg is not guaranteed to be aligned
- // properly. Instead, we pass an aligned pointer that is
- // a proper v8 smi, but also pass the alignment difference
- // from the real pointer as a smi.
- intptr_t p1 = reinterpret_cast<intptr_t>(msg);
- intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
- ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
-#ifdef DEBUG
- if (msg != NULL) {
- RecordComment("Abort message: ");
- RecordComment(msg);
- }
-#endif
- // Disable stub call restrictions to always allow calls to abort.
- AllowStubCallsScope allow_scope(this, true);
-
- mov(r0, Operand(p0));
- push(r0);
- mov(r0, Operand(Smi::FromInt(p1 - p0)));
- push(r0);
- CallRuntime(Runtime::kAbort, 2);
- // will not return here
- if (is_const_pool_blocked()) {
- // If the calling code cares about the exact number of
- // instructions generated, we insert padding here to keep the size
- // of the Abort macro constant.
- static const int kExpectedAbortInstructions = 10;
- int abort_instructions = InstructionsGeneratedSince(&abort_start);
- ASSERT(abort_instructions <= kExpectedAbortInstructions);
- while (abort_instructions++ < kExpectedAbortInstructions) {
- nop();
- }
- }
-}
-
-
-void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
- if (context_chain_length > 0) {
- // Move up the chain of contexts to the context containing the slot.
- ldr(dst, MemOperand(cp, Context::SlotOffset(Context::CLOSURE_INDEX)));
- // Load the function context (which is the incoming, outer context).
- ldr(dst, FieldMemOperand(dst, JSFunction::kContextOffset));
- for (int i = 1; i < context_chain_length; i++) {
- ldr(dst, MemOperand(dst, Context::SlotOffset(Context::CLOSURE_INDEX)));
- ldr(dst, FieldMemOperand(dst, JSFunction::kContextOffset));
- }
- } else {
- // Slot is in the current function context. Move it into the
- // destination register in case we store into it (the write barrier
- // cannot be allowed to destroy the context in esi).
- mov(dst, cp);
- }
-
- // We should not have found a 'with' context by walking the context chain
- // (i.e., the static scope chain and runtime context chain do not agree).
- // A variable occurring in such a scope should have slot type LOOKUP and
- // not CONTEXT.
- if (emit_debug_code()) {
- ldr(ip, MemOperand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX)));
- cmp(dst, ip);
- Check(eq, "Yo dawg, I heard you liked function contexts "
- "so I put function contexts in all your contexts");
- }
-}
-
-
-void MacroAssembler::LoadGlobalFunction(int index, Register function) {
- // Load the global or builtins object from the current context.
- ldr(function, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
- // Load the global context from the global or builtins object.
- ldr(function, FieldMemOperand(function,
- GlobalObject::kGlobalContextOffset));
- // Load the function from the global context.
- ldr(function, MemOperand(function, Context::SlotOffset(index)));
-}
-
-
-void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
- Register map,
- Register scratch) {
- // Load the initial map. The global functions all have initial maps.
- ldr(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
- if (emit_debug_code()) {
- Label ok, fail;
- CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, false);
- b(&ok);
- bind(&fail);
- Abort("Global functions must have initial map");
- bind(&ok);
- }
-}
-
-
-void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
- Register reg,
- Register scratch,
- Label* not_power_of_two_or_zero) {
- sub(scratch, reg, Operand(1), SetCC);
- b(mi, not_power_of_two_or_zero);
- tst(scratch, reg);
- b(ne, not_power_of_two_or_zero);
-}
-
-
-void MacroAssembler::JumpIfNotPowerOfTwoOrZeroAndNeg(
- Register reg,
- Register scratch,
- Label* zero_and_neg,
- Label* not_power_of_two) {
- sub(scratch, reg, Operand(1), SetCC);
- b(mi, zero_and_neg);
- tst(scratch, reg);
- b(ne, not_power_of_two);
-}
-
-
-void MacroAssembler::JumpIfNotBothSmi(Register reg1,
- Register reg2,
- Label* on_not_both_smi) {
- STATIC_ASSERT(kSmiTag == 0);
- tst(reg1, Operand(kSmiTagMask));
- tst(reg2, Operand(kSmiTagMask), eq);
- b(ne, on_not_both_smi);
-}
-
-
-void MacroAssembler::JumpIfEitherSmi(Register reg1,
- Register reg2,
- Label* on_either_smi) {
- STATIC_ASSERT(kSmiTag == 0);
- tst(reg1, Operand(kSmiTagMask));
- tst(reg2, Operand(kSmiTagMask), ne);
- b(eq, on_either_smi);
-}
-
-
-void MacroAssembler::AbortIfSmi(Register object) {
- STATIC_ASSERT(kSmiTag == 0);
- tst(object, Operand(kSmiTagMask));
- Assert(ne, "Operand is a smi");
-}
-
-
-void MacroAssembler::AbortIfNotSmi(Register object) {
- STATIC_ASSERT(kSmiTag == 0);
- tst(object, Operand(kSmiTagMask));
- Assert(eq, "Operand is not smi");
-}
-
-
-void MacroAssembler::AbortIfNotString(Register object) {
- STATIC_ASSERT(kSmiTag == 0);
- tst(object, Operand(kSmiTagMask));
- Assert(ne, "Operand is not a string");
- push(object);
- ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
- CompareInstanceType(object, object, FIRST_NONSTRING_TYPE);
- pop(object);
- Assert(lo, "Operand is not a string");
-}
-
-
-
-void MacroAssembler::AbortIfNotRootValue(Register src,
- Heap::RootListIndex root_value_index,
- const char* message) {
- CompareRoot(src, root_value_index);
- Assert(eq, message);
-}
-
-
-void MacroAssembler::JumpIfNotHeapNumber(Register object,
- Register heap_number_map,
- Register scratch,
- Label* on_not_heap_number) {
- ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
- AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- cmp(scratch, heap_number_map);
- b(ne, on_not_heap_number);
-}
-
-
-void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
- Register first,
- Register second,
- Register scratch1,
- Register scratch2,
- Label* failure) {
- // Test that both first and second are sequential ASCII strings.
- // Assume that they are non-smis.
- ldr(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
- ldr(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
- ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
- ldrb(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
-
- JumpIfBothInstanceTypesAreNotSequentialAscii(scratch1,
- scratch2,
- scratch1,
- scratch2,
- failure);
-}
-
-void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first,
- Register second,
- Register scratch1,
- Register scratch2,
- Label* failure) {
- // Check that neither is a smi.
- STATIC_ASSERT(kSmiTag == 0);
- and_(scratch1, first, Operand(second));
- tst(scratch1, Operand(kSmiTagMask));
- b(eq, failure);
- JumpIfNonSmisNotBothSequentialAsciiStrings(first,
- second,
- scratch1,
- scratch2,
- failure);
-}
-
-
-// Allocates a heap number or jumps to the need_gc label if the young space
-// is full and a scavenge is needed.
-void MacroAssembler::AllocateHeapNumber(Register result,
- Register scratch1,
- Register scratch2,
- Register heap_number_map,
- Label* gc_required) {
- // Allocate an object in the heap for the heap number and tag it as a heap
- // object.
- AllocateInNewSpace(HeapNumber::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
-
- // Store heap number map in the allocated object.
- AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- str(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
-}
-
-
-void MacroAssembler::AllocateHeapNumberWithValue(Register result,
- DwVfpRegister value,
- Register scratch1,
- Register scratch2,
- Register heap_number_map,
- Label* gc_required) {
- AllocateHeapNumber(result, scratch1, scratch2, heap_number_map, gc_required);
- sub(scratch1, result, Operand(kHeapObjectTag));
- vstr(value, scratch1, HeapNumber::kValueOffset);
-}
-
-
-// Copies a fixed number of fields of heap objects from src to dst.
-void MacroAssembler::CopyFields(Register dst,
- Register src,
- RegList temps,
- int field_count) {
- // At least one bit set in the first 15 registers.
- ASSERT((temps & ((1 << 15) - 1)) != 0);
- ASSERT((temps & dst.bit()) == 0);
- ASSERT((temps & src.bit()) == 0);
- // Primitive implementation using only one temporary register.
-
- Register tmp = no_reg;
- // Find a temp register in temps list.
- for (int i = 0; i < 15; i++) {
- if ((temps & (1 << i)) != 0) {
- tmp.set_code(i);
- break;
- }
- }
- ASSERT(!tmp.is(no_reg));
-
- for (int i = 0; i < field_count; i++) {
- ldr(tmp, FieldMemOperand(src, i * kPointerSize));
- str(tmp, FieldMemOperand(dst, i * kPointerSize));
- }
-}
-
-
-void MacroAssembler::CopyBytes(Register src,
- Register dst,
- Register length,
- Register scratch) {
- Label align_loop, align_loop_1, word_loop, byte_loop, byte_loop_1, done;
-
- // Align src before copying in word size chunks.
- bind(&align_loop);
- cmp(length, Operand(0));
- b(eq, &done);
- bind(&align_loop_1);
- tst(src, Operand(kPointerSize - 1));
- b(eq, &word_loop);
- ldrb(scratch, MemOperand(src, 1, PostIndex));
- strb(scratch, MemOperand(dst, 1, PostIndex));
- sub(length, length, Operand(1), SetCC);
- b(ne, &byte_loop_1);
-
- // Copy bytes in word size chunks.
- bind(&word_loop);
- if (emit_debug_code()) {
- tst(src, Operand(kPointerSize - 1));
- Assert(eq, "Expecting alignment for CopyBytes");
- }
- cmp(length, Operand(kPointerSize));
- b(lt, &byte_loop);
- ldr(scratch, MemOperand(src, kPointerSize, PostIndex));
-#if CAN_USE_UNALIGNED_ACCESSES
- str(scratch, MemOperand(dst, kPointerSize, PostIndex));
-#else
- strb(scratch, MemOperand(dst, 1, PostIndex));
- mov(scratch, Operand(scratch, LSR, 8));
- strb(scratch, MemOperand(dst, 1, PostIndex));
- mov(scratch, Operand(scratch, LSR, 8));
- strb(scratch, MemOperand(dst, 1, PostIndex));
- mov(scratch, Operand(scratch, LSR, 8));
- strb(scratch, MemOperand(dst, 1, PostIndex));
-#endif
- sub(length, length, Operand(kPointerSize));
- b(&word_loop);
-
- // Copy the last bytes if any left.
- bind(&byte_loop);
- cmp(length, Operand(0));
- b(eq, &done);
- bind(&byte_loop_1);
- ldrb(scratch, MemOperand(src, 1, PostIndex));
- strb(scratch, MemOperand(dst, 1, PostIndex));
- sub(length, length, Operand(1), SetCC);
- b(ne, &byte_loop_1);
- bind(&done);
-}
-
-
-void MacroAssembler::CountLeadingZeros(Register zeros, // Answer.
- Register source, // Input.
- Register scratch) {
- ASSERT(!zeros.is(source) || !source.is(scratch));
- ASSERT(!zeros.is(scratch));
- ASSERT(!scratch.is(ip));
- ASSERT(!source.is(ip));
- ASSERT(!zeros.is(ip));
-#ifdef CAN_USE_ARMV5_INSTRUCTIONS
- clz(zeros, source); // This instruction is only supported after ARM5.
-#else
- mov(zeros, Operand(0, RelocInfo::NONE));
- Move(scratch, source);
- // Top 16.
- tst(scratch, Operand(0xffff0000));
- add(zeros, zeros, Operand(16), LeaveCC, eq);
- mov(scratch, Operand(scratch, LSL, 16), LeaveCC, eq);
- // Top 8.
- tst(scratch, Operand(0xff000000));
- add(zeros, zeros, Operand(8), LeaveCC, eq);
- mov(scratch, Operand(scratch, LSL, 8), LeaveCC, eq);
- // Top 4.
- tst(scratch, Operand(0xf0000000));
- add(zeros, zeros, Operand(4), LeaveCC, eq);
- mov(scratch, Operand(scratch, LSL, 4), LeaveCC, eq);
- // Top 2.
- tst(scratch, Operand(0xc0000000));
- add(zeros, zeros, Operand(2), LeaveCC, eq);
- mov(scratch, Operand(scratch, LSL, 2), LeaveCC, eq);
- // Top bit.
- tst(scratch, Operand(0x80000000u));
- add(zeros, zeros, Operand(1), LeaveCC, eq);
-#endif
-}
-
-
-void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
- Register first,
- Register second,
- Register scratch1,
- Register scratch2,
- Label* failure) {
- int kFlatAsciiStringMask =
- kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
- int kFlatAsciiStringTag = ASCII_STRING_TYPE;
- and_(scratch1, first, Operand(kFlatAsciiStringMask));
- and_(scratch2, second, Operand(kFlatAsciiStringMask));
- cmp(scratch1, Operand(kFlatAsciiStringTag));
- // Ignore second test if first test failed.
- cmp(scratch2, Operand(kFlatAsciiStringTag), eq);
- b(ne, failure);
-}
-
-
-void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type,
- Register scratch,
- Label* failure) {
- int kFlatAsciiStringMask =
- kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
- int kFlatAsciiStringTag = ASCII_STRING_TYPE;
- and_(scratch, type, Operand(kFlatAsciiStringMask));
- cmp(scratch, Operand(kFlatAsciiStringTag));
- b(ne, failure);
-}
-
-static const int kRegisterPassedArguments = 4;
-
-void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
- int frame_alignment = ActivationFrameAlignment();
-
- // Up to four simple arguments are passed in registers r0..r3.
- int stack_passed_arguments = (num_arguments <= kRegisterPassedArguments) ?
- 0 : num_arguments - kRegisterPassedArguments;
- if (frame_alignment > kPointerSize) {
- // Make stack end at alignment and make room for num_arguments - 4 words
- // and the original value of sp.
- mov(scratch, sp);
- sub(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
- ASSERT(IsPowerOf2(frame_alignment));
- and_(sp, sp, Operand(-frame_alignment));
- str(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
- } else {
- sub(sp, sp, Operand(stack_passed_arguments * kPointerSize));
- }
-}
-
-
-void MacroAssembler::CallCFunction(ExternalReference function,
- int num_arguments) {
- CallCFunctionHelper(no_reg, function, ip, num_arguments);
-}
-
-void MacroAssembler::CallCFunction(Register function,
- Register scratch,
- int num_arguments) {
- CallCFunctionHelper(function,
- ExternalReference::the_hole_value_location(isolate()),
- scratch,
- num_arguments);
-}
-
-
-void MacroAssembler::CallCFunctionHelper(Register function,
- ExternalReference function_reference,
- Register scratch,
- int num_arguments) {
- // Make sure that the stack is aligned before calling a C function unless
- // running in the simulator. The simulator has its own alignment check which
- // provides more information.
-#if defined(V8_HOST_ARCH_ARM)
- if (emit_debug_code()) {
- int frame_alignment = OS::ActivationFrameAlignment();
- int frame_alignment_mask = frame_alignment - 1;
- if (frame_alignment > kPointerSize) {
- ASSERT(IsPowerOf2(frame_alignment));
- Label alignment_as_expected;
- tst(sp, Operand(frame_alignment_mask));
- b(eq, &alignment_as_expected);
- // Don't use Check here, as it will call Runtime_Abort possibly
- // re-entering here.
- stop("Unexpected alignment");
- bind(&alignment_as_expected);
- }
- }
-#endif
-
- // Just call directly. The function called cannot cause a GC, or
- // allow preemption, so the return address in the link register
- // stays correct.
- if (function.is(no_reg)) {
- mov(scratch, Operand(function_reference));
- function = scratch;
- }
- Call(function);
- int stack_passed_arguments = (num_arguments <= kRegisterPassedArguments) ?
- 0 : num_arguments - kRegisterPassedArguments;
- if (OS::ActivationFrameAlignment() > kPointerSize) {
- ldr(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
- } else {
- add(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize)));
- }
-}
-
-
-void MacroAssembler::GetRelocatedValueLocation(Register ldr_location,
- Register result) {
- const uint32_t kLdrOffsetMask = (1 << 12) - 1;
- const int32_t kPCRegOffset = 2 * kPointerSize;
- ldr(result, MemOperand(ldr_location));
- if (emit_debug_code()) {
- // Check that the instruction is a ldr reg, [pc + offset] .
- and_(result, result, Operand(kLdrPCPattern));
- cmp(result, Operand(kLdrPCPattern));
- Check(eq, "The instruction to patch should be a load from pc.");
- // Result was clobbered. Restore it.
- ldr(result, MemOperand(ldr_location));
- }
- // Get the address of the constant.
- and_(result, result, Operand(kLdrOffsetMask));
- add(result, ldr_location, Operand(result));
- add(result, result, Operand(kPCRegOffset));
-}
-
-
-CodePatcher::CodePatcher(byte* address, int instructions)
- : address_(address),
- instructions_(instructions),
- size_(instructions * Assembler::kInstrSize),
- masm_(Isolate::Current(), address, size_ + Assembler::kGap) {
- // Create a new macro assembler pointing to the address of the code to patch.
- // The size is adjusted with kGap on order for the assembler to generate size
- // bytes of instructions without failing with buffer size constraints.
- ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
-}
-
-
-CodePatcher::~CodePatcher() {
- // Indicate that code has changed.
- CPU::FlushICache(address_, size_);
-
- // Check that the code was patched as expected.
- ASSERT(masm_.pc_ == address_ + size_);
- ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
-}
-
-
-void CodePatcher::Emit(Instr instr) {
- masm()->emit(instr);
-}
-
-
-void CodePatcher::Emit(Address addr) {
- masm()->emit(reinterpret_cast<Instr>(addr));
-}
-
-
-void CodePatcher::EmitCondition(Condition cond) {
- Instr instr = Assembler::instr_at(masm_.pc_);
- instr = (instr & ~kCondMask) | cond;
- masm_.emit(instr);
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_ARM
diff --git a/src/3rdparty/v8/src/arm/macro-assembler-arm.h b/src/3rdparty/v8/src/arm/macro-assembler-arm.h
deleted file mode 100644
index ab5efb0..0000000
--- a/src/3rdparty/v8/src/arm/macro-assembler-arm.h
+++ /dev/null
@@ -1,1071 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_ARM_MACRO_ASSEMBLER_ARM_H_
-#define V8_ARM_MACRO_ASSEMBLER_ARM_H_
-
-#include "assembler.h"
-
-namespace v8 {
-namespace internal {
-
-// Forward declaration.
-class CallWrapper;
-
-// ----------------------------------------------------------------------------
-// Static helper functions
-
-// Generate a MemOperand for loading a field from an object.
-static inline MemOperand FieldMemOperand(Register object, int offset) {
- return MemOperand(object, offset - kHeapObjectTag);
-}
-
-
-static inline Operand SmiUntagOperand(Register object) {
- return Operand(object, ASR, kSmiTagSize);
-}
-
-
-
-// Give alias names to registers
-const Register cp = { 8 }; // JavaScript context pointer
-const Register roots = { 10 }; // Roots array pointer.
-
-enum InvokeJSFlags {
- CALL_JS,
- JUMP_JS
-};
-
-
-// Flags used for the AllocateInNewSpace functions.
-enum AllocationFlags {
- // No special flags.
- NO_ALLOCATION_FLAGS = 0,
- // Return the pointer to the allocated already tagged as a heap object.
- TAG_OBJECT = 1 << 0,
- // The content of the result register already contains the allocation top in
- // new space.
- RESULT_CONTAINS_TOP = 1 << 1,
- // Specify that the requested size of the space to allocate is specified in
- // words instead of bytes.
- SIZE_IN_WORDS = 1 << 2
-};
-
-
-// Flags used for the ObjectToDoubleVFPRegister function.
-enum ObjectToDoubleFlags {
- // No special flags.
- NO_OBJECT_TO_DOUBLE_FLAGS = 0,
- // Object is known to be a non smi.
- OBJECT_NOT_SMI = 1 << 0,
- // Don't load NaNs or infinities, branch to the non number case instead.
- AVOID_NANS_AND_INFINITIES = 1 << 1
-};
-
-
-// MacroAssembler implements a collection of frequently used macros.
-class MacroAssembler: public Assembler {
- public:
- // The isolate parameter can be NULL if the macro assembler should
- // not use isolate-dependent functionality. In this case, it's the
- // responsibility of the caller to never invoke such function on the
- // macro assembler.
- MacroAssembler(Isolate* isolate, void* buffer, int size);
-
- // Jump, Call, and Ret pseudo instructions implementing inter-working.
- void Jump(Register target, Condition cond = al);
- void Jump(byte* target, RelocInfo::Mode rmode, Condition cond = al);
- void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
- int CallSize(Register target, Condition cond = al);
- void Call(Register target, Condition cond = al);
- int CallSize(byte* target, RelocInfo::Mode rmode, Condition cond = al);
- void Call(byte* target, RelocInfo::Mode rmode, Condition cond = al);
- int CallSize(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
- void Call(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
- void Ret(Condition cond = al);
-
- // Emit code to discard a non-negative number of pointer-sized elements
- // from the stack, clobbering only the sp register.
- void Drop(int count, Condition cond = al);
-
- void Ret(int drop, Condition cond = al);
-
- // Swap two registers. If the scratch register is omitted then a slightly
- // less efficient form using xor instead of mov is emitted.
- void Swap(Register reg1,
- Register reg2,
- Register scratch = no_reg,
- Condition cond = al);
-
-
- void And(Register dst, Register src1, const Operand& src2,
- Condition cond = al);
- void Ubfx(Register dst, Register src, int lsb, int width,
- Condition cond = al);
- void Sbfx(Register dst, Register src, int lsb, int width,
- Condition cond = al);
- // The scratch register is not used for ARMv7.
- // scratch can be the same register as src (in which case it is trashed), but
- // not the same as dst.
- void Bfi(Register dst,
- Register src,
- Register scratch,
- int lsb,
- int width,
- Condition cond = al);
- void Bfc(Register dst, int lsb, int width, Condition cond = al);
- void Usat(Register dst, int satpos, const Operand& src,
- Condition cond = al);
-
- void Call(Label* target);
- void Move(Register dst, Handle<Object> value);
- // May do nothing if the registers are identical.
- void Move(Register dst, Register src);
- // Jumps to the label at the index given by the Smi in "index".
- void SmiJumpTable(Register index, Vector<Label*> targets);
- // Load an object from the root table.
- void LoadRoot(Register destination,
- Heap::RootListIndex index,
- Condition cond = al);
- // Store an object to the root table.
- void StoreRoot(Register source,
- Heap::RootListIndex index,
- Condition cond = al);
-
-
- // Check if object is in new space.
- // scratch can be object itself, but it will be clobbered.
- void InNewSpace(Register object,
- Register scratch,
- Condition cond, // eq for new space, ne otherwise
- Label* branch);
-
-
- // For the page containing |object| mark the region covering [address]
- // dirty. The object address must be in the first 8K of an allocated page.
- void RecordWriteHelper(Register object,
- Register address,
- Register scratch);
-
- // For the page containing |object| mark the region covering
- // [object+offset] dirty. The object address must be in the first 8K
- // of an allocated page. The 'scratch' registers are used in the
- // implementation and all 3 registers are clobbered by the
- // operation, as well as the ip register. RecordWrite updates the
- // write barrier even when storing smis.
- void RecordWrite(Register object,
- Operand offset,
- Register scratch0,
- Register scratch1);
-
- // For the page containing |object| mark the region covering
- // [address] dirty. The object address must be in the first 8K of an
- // allocated page. All 3 registers are clobbered by the operation,
- // as well as the ip register. RecordWrite updates the write barrier
- // even when storing smis.
- void RecordWrite(Register object,
- Register address,
- Register scratch);
-
- // Push two registers. Pushes leftmost register first (to highest address).
- void Push(Register src1, Register src2, Condition cond = al) {
- ASSERT(!src1.is(src2));
- if (src1.code() > src2.code()) {
- stm(db_w, sp, src1.bit() | src2.bit(), cond);
- } else {
- str(src1, MemOperand(sp, 4, NegPreIndex), cond);
- str(src2, MemOperand(sp, 4, NegPreIndex), cond);
- }
- }
-
- // Push three registers. Pushes leftmost register first (to highest address).
- void Push(Register src1, Register src2, Register src3, Condition cond = al) {
- ASSERT(!src1.is(src2));
- ASSERT(!src2.is(src3));
- ASSERT(!src1.is(src3));
- if (src1.code() > src2.code()) {
- if (src2.code() > src3.code()) {
- stm(db_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
- } else {
- stm(db_w, sp, src1.bit() | src2.bit(), cond);
- str(src3, MemOperand(sp, 4, NegPreIndex), cond);
- }
- } else {
- str(src1, MemOperand(sp, 4, NegPreIndex), cond);
- Push(src2, src3, cond);
- }
- }
-
- // Push four registers. Pushes leftmost register first (to highest address).
- void Push(Register src1, Register src2,
- Register src3, Register src4, Condition cond = al) {
- ASSERT(!src1.is(src2));
- ASSERT(!src2.is(src3));
- ASSERT(!src1.is(src3));
- ASSERT(!src1.is(src4));
- ASSERT(!src2.is(src4));
- ASSERT(!src3.is(src4));
- if (src1.code() > src2.code()) {
- if (src2.code() > src3.code()) {
- if (src3.code() > src4.code()) {
- stm(db_w,
- sp,
- src1.bit() | src2.bit() | src3.bit() | src4.bit(),
- cond);
- } else {
- stm(db_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
- str(src4, MemOperand(sp, 4, NegPreIndex), cond);
- }
- } else {
- stm(db_w, sp, src1.bit() | src2.bit(), cond);
- Push(src3, src4, cond);
- }
- } else {
- str(src1, MemOperand(sp, 4, NegPreIndex), cond);
- Push(src2, src3, src4, cond);
- }
- }
-
- // Pop two registers. Pops rightmost register first (from lower address).
- void Pop(Register src1, Register src2, Condition cond = al) {
- ASSERT(!src1.is(src2));
- if (src1.code() > src2.code()) {
- ldm(ia_w, sp, src1.bit() | src2.bit(), cond);
- } else {
- ldr(src2, MemOperand(sp, 4, PostIndex), cond);
- ldr(src1, MemOperand(sp, 4, PostIndex), cond);
- }
- }
-
- // Push and pop the registers that can hold pointers, as defined by the
- // RegList constant kSafepointSavedRegisters.
- void PushSafepointRegisters();
- void PopSafepointRegisters();
- void PushSafepointRegistersAndDoubles();
- void PopSafepointRegistersAndDoubles();
- // Store value in register src in the safepoint stack slot for
- // register dst.
- void StoreToSafepointRegisterSlot(Register src, Register dst);
- void StoreToSafepointRegistersAndDoublesSlot(Register src, Register dst);
- // Load the value of the src register from its safepoint stack slot
- // into register dst.
- void LoadFromSafepointRegisterSlot(Register dst, Register src);
-
- // Load two consecutive registers with two consecutive memory locations.
- void Ldrd(Register dst1,
- Register dst2,
- const MemOperand& src,
- Condition cond = al);
-
- // Store two consecutive registers to two consecutive memory locations.
- void Strd(Register src1,
- Register src2,
- const MemOperand& dst,
- Condition cond = al);
-
- // Clear specified FPSCR bits.
- void ClearFPSCRBits(const uint32_t bits_to_clear,
- const Register scratch,
- const Condition cond = al);
-
- // Compare double values and move the result to the normal condition flags.
- void VFPCompareAndSetFlags(const DwVfpRegister src1,
- const DwVfpRegister src2,
- const Condition cond = al);
- void VFPCompareAndSetFlags(const DwVfpRegister src1,
- const double src2,
- const Condition cond = al);
-
- // Compare double values and then load the fpscr flags to a register.
- void VFPCompareAndLoadFlags(const DwVfpRegister src1,
- const DwVfpRegister src2,
- const Register fpscr_flags,
- const Condition cond = al);
- void VFPCompareAndLoadFlags(const DwVfpRegister src1,
- const double src2,
- const Register fpscr_flags,
- const Condition cond = al);
-
-
- // ---------------------------------------------------------------------------
- // Activation frames
-
- void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); }
- void LeaveInternalFrame() { LeaveFrame(StackFrame::INTERNAL); }
-
- void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); }
- void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); }
-
- // Enter exit frame.
- // stack_space - extra stack space, used for alignment before call to C.
- void EnterExitFrame(bool save_doubles, int stack_space = 0);
-
- // Leave the current exit frame. Expects the return value in r0.
- // Expect the number of values, pushed prior to the exit frame, to
- // remove in a register (or no_reg, if there is nothing to remove).
- void LeaveExitFrame(bool save_doubles, Register argument_count);
-
- // Get the actual activation frame alignment for target environment.
- static int ActivationFrameAlignment();
-
- void LoadContext(Register dst, int context_chain_length);
-
- void LoadGlobalFunction(int index, Register function);
-
- // Load the initial map from the global function. The registers
- // function and map can be the same, function is then overwritten.
- void LoadGlobalFunctionInitialMap(Register function,
- Register map,
- Register scratch);
-
- // ---------------------------------------------------------------------------
- // JavaScript invokes
-
- // Invoke the JavaScript function code by either calling or jumping.
- void InvokeCode(Register code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag,
- CallWrapper* call_wrapper = NULL);
-
- void InvokeCode(Handle<Code> code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- RelocInfo::Mode rmode,
- InvokeFlag flag);
-
- // Invoke the JavaScript function in the given register. Changes the
- // current context to the context in the function before invoking.
- void InvokeFunction(Register function,
- const ParameterCount& actual,
- InvokeFlag flag,
- CallWrapper* call_wrapper = NULL);
-
- void InvokeFunction(JSFunction* function,
- const ParameterCount& actual,
- InvokeFlag flag);
-
- void IsObjectJSObjectType(Register heap_object,
- Register map,
- Register scratch,
- Label* fail);
-
- void IsInstanceJSObjectType(Register map,
- Register scratch,
- Label* fail);
-
- void IsObjectJSStringType(Register object,
- Register scratch,
- Label* fail);
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // ---------------------------------------------------------------------------
- // Debugger Support
-
- void DebugBreak();
-#endif
-
- // ---------------------------------------------------------------------------
- // Exception handling
-
- // Push a new try handler and link into try handler chain.
- // The return address must be passed in register lr.
- // On exit, r0 contains TOS (code slot).
- void PushTryHandler(CodeLocation try_location, HandlerType type);
-
- // Unlink the stack handler on top of the stack from the try handler chain.
- // Must preserve the result register.
- void PopTryHandler();
-
- // Passes thrown value (in r0) to the handler of top of the try handler chain.
- void Throw(Register value);
-
- // Propagates an uncatchable exception to the top of the current JS stack's
- // handler chain.
- void ThrowUncatchable(UncatchableExceptionType type, Register value);
-
- // ---------------------------------------------------------------------------
- // Inline caching support
-
- // Generate code for checking access rights - used for security checks
- // on access to global objects across environments. The holder register
- // is left untouched, whereas both scratch registers are clobbered.
- void CheckAccessGlobalProxy(Register holder_reg,
- Register scratch,
- Label* miss);
-
- inline void MarkCode(NopMarkerTypes type) {
- nop(type);
- }
-
- // Check if the given instruction is a 'type' marker.
- // ie. check if is is a mov r<type>, r<type> (referenced as nop(type))
- // These instructions are generated to mark special location in the code,
- // like some special IC code.
- static inline bool IsMarkedCode(Instr instr, int type) {
- ASSERT((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER));
- return IsNop(instr, type);
- }
-
-
- static inline int GetCodeMarker(Instr instr) {
- int dst_reg_offset = 12;
- int dst_mask = 0xf << dst_reg_offset;
- int src_mask = 0xf;
- int dst_reg = (instr & dst_mask) >> dst_reg_offset;
- int src_reg = instr & src_mask;
- uint32_t non_register_mask = ~(dst_mask | src_mask);
- uint32_t mov_mask = al | 13 << 21;
-
- // Return <n> if we have a mov rn rn, else return -1.
- int type = ((instr & non_register_mask) == mov_mask) &&
- (dst_reg == src_reg) &&
- (FIRST_IC_MARKER <= dst_reg) && (dst_reg < LAST_CODE_MARKER)
- ? src_reg
- : -1;
- ASSERT((type == -1) ||
- ((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER)));
- return type;
- }
-
-
- // ---------------------------------------------------------------------------
- // Allocation support
-
- // Allocate an object in new space. The object_size is specified
- // either in bytes or in words if the allocation flag SIZE_IN_WORDS
- // is passed. If the new space is exhausted control continues at the
- // gc_required label. The allocated object is returned in result. If
- // the flag tag_allocated_object is true the result is tagged as as
- // a heap object. All registers are clobbered also when control
- // continues at the gc_required label.
- void AllocateInNewSpace(int object_size,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- AllocationFlags flags);
- void AllocateInNewSpace(Register object_size,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- AllocationFlags flags);
-
- // Undo allocation in new space. The object passed and objects allocated after
- // it will no longer be allocated. The caller must make sure that no pointers
- // are left to the object(s) no longer allocated as they would be invalid when
- // allocation is undone.
- void UndoAllocationInNewSpace(Register object, Register scratch);
-
-
- void AllocateTwoByteString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required);
- void AllocateAsciiString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required);
- void AllocateTwoByteConsString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
- void AllocateAsciiConsString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
-
- // Allocates a heap number or jumps to the gc_required label if the young
- // space is full and a scavenge is needed. All registers are clobbered also
- // when control continues at the gc_required label.
- void AllocateHeapNumber(Register result,
- Register scratch1,
- Register scratch2,
- Register heap_number_map,
- Label* gc_required);
- void AllocateHeapNumberWithValue(Register result,
- DwVfpRegister value,
- Register scratch1,
- Register scratch2,
- Register heap_number_map,
- Label* gc_required);
-
- // Copies a fixed number of fields of heap objects from src to dst.
- void CopyFields(Register dst, Register src, RegList temps, int field_count);
-
- // Copies a number of bytes from src to dst. All registers are clobbered. On
- // exit src and dst will point to the place just after where the last byte was
- // read or written and length will be zero.
- void CopyBytes(Register src,
- Register dst,
- Register length,
- Register scratch);
-
- // ---------------------------------------------------------------------------
- // Support functions.
-
- // Try to get function prototype of a function and puts the value in
- // the result register. Checks that the function really is a
- // function and jumps to the miss label if the fast checks fail. The
- // function register will be untouched; the other registers may be
- // clobbered.
- void TryGetFunctionPrototype(Register function,
- Register result,
- Register scratch,
- Label* miss);
-
- // Compare object type for heap object. heap_object contains a non-Smi
- // whose object type should be compared with the given type. This both
- // sets the flags and leaves the object type in the type_reg register.
- // It leaves the map in the map register (unless the type_reg and map register
- // are the same register). It leaves the heap object in the heap_object
- // register unless the heap_object register is the same register as one of the
- // other registers.
- void CompareObjectType(Register heap_object,
- Register map,
- Register type_reg,
- InstanceType type);
-
- // Compare instance type in a map. map contains a valid map object whose
- // object type should be compared with the given type. This both
- // sets the flags and leaves the object type in the type_reg register. It
- // leaves the heap object in the heap_object register unless the heap_object
- // register is the same register as type_reg.
- void CompareInstanceType(Register map,
- Register type_reg,
- InstanceType type);
-
-
- // Check if the map of an object is equal to a specified map (either
- // given directly or as an index into the root list) and branch to
- // label if not. Skip the smi check if not required (object is known
- // to be a heap object)
- void CheckMap(Register obj,
- Register scratch,
- Handle<Map> map,
- Label* fail,
- bool is_heap_object);
-
- void CheckMap(Register obj,
- Register scratch,
- Heap::RootListIndex index,
- Label* fail,
- bool is_heap_object);
-
-
- // Compare the object in a register to a value from the root list.
- // Uses the ip register as scratch.
- void CompareRoot(Register obj, Heap::RootListIndex index);
-
-
- // Load and check the instance type of an object for being a string.
- // Loads the type into the second argument register.
- // Returns a condition that will be enabled if the object was a string.
- Condition IsObjectStringType(Register obj,
- Register type) {
- ldr(type, FieldMemOperand(obj, HeapObject::kMapOffset));
- ldrb(type, FieldMemOperand(type, Map::kInstanceTypeOffset));
- tst(type, Operand(kIsNotStringMask));
- ASSERT_EQ(0, kStringTag);
- return eq;
- }
-
-
- // Generates code for reporting that an illegal operation has
- // occurred.
- void IllegalOperation(int num_arguments);
-
- // Picks out an array index from the hash field.
- // Register use:
- // hash - holds the index's hash. Clobbered.
- // index - holds the overwritten index on exit.
- void IndexFromHash(Register hash, Register index);
-
- // Get the number of least significant bits from a register
- void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits);
- void GetLeastBitsFromInt32(Register dst, Register src, int mun_least_bits);
-
- // Uses VFP instructions to Convert a Smi to a double.
- void IntegerToDoubleConversionWithVFP3(Register inReg,
- Register outHighReg,
- Register outLowReg);
-
- // Load the value of a number object into a VFP double register. If the object
- // is not a number a jump to the label not_number is performed and the VFP
- // double register is unchanged.
- void ObjectToDoubleVFPRegister(
- Register object,
- DwVfpRegister value,
- Register scratch1,
- Register scratch2,
- Register heap_number_map,
- SwVfpRegister scratch3,
- Label* not_number,
- ObjectToDoubleFlags flags = NO_OBJECT_TO_DOUBLE_FLAGS);
-
- // Load the value of a smi object into a VFP double register. The register
- // scratch1 can be the same register as smi in which case smi will hold the
- // untagged value afterwards.
- void SmiToDoubleVFPRegister(Register smi,
- DwVfpRegister value,
- Register scratch1,
- SwVfpRegister scratch2);
-
- // Convert the HeapNumber pointed to by source to a 32bits signed integer
- // dest. If the HeapNumber does not fit into a 32bits signed integer branch
- // to not_int32 label. If VFP3 is available double_scratch is used but not
- // scratch2.
- void ConvertToInt32(Register source,
- Register dest,
- Register scratch,
- Register scratch2,
- DwVfpRegister double_scratch,
- Label *not_int32);
-
- // Truncates a double using a specific rounding mode.
- // Clears the z flag (ne condition) if an overflow occurs.
- // If exact_conversion is true, the z flag is also cleared if the conversion
- // was inexact, ie. if the double value could not be converted exactly
- // to a 32bit integer.
- void EmitVFPTruncate(VFPRoundingMode rounding_mode,
- SwVfpRegister result,
- DwVfpRegister double_input,
- Register scratch1,
- Register scratch2,
- CheckForInexactConversion check
- = kDontCheckForInexactConversion);
-
- // Helper for EmitECMATruncate.
- // This will truncate a floating-point value outside of the singed 32bit
- // integer range to a 32bit signed integer.
- // Expects the double value loaded in input_high and input_low.
- // Exits with the answer in 'result'.
- // Note that this code does not work for values in the 32bit range!
- void EmitOutOfInt32RangeTruncate(Register result,
- Register input_high,
- Register input_low,
- Register scratch);
-
- // Performs a truncating conversion of a floating point number as used by
- // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
- // Exits with 'result' holding the answer and all other registers clobbered.
- void EmitECMATruncate(Register result,
- DwVfpRegister double_input,
- SwVfpRegister single_scratch,
- Register scratch,
- Register scratch2,
- Register scratch3);
-
- // Count leading zeros in a 32 bit word. On ARM5 and later it uses the clz
- // instruction. On pre-ARM5 hardware this routine gives the wrong answer
- // for 0 (31 instead of 32). Source and scratch can be the same in which case
- // the source is clobbered. Source and zeros can also be the same in which
- // case scratch should be a different register.
- void CountLeadingZeros(Register zeros,
- Register source,
- Register scratch);
-
- // ---------------------------------------------------------------------------
- // Runtime calls
-
- // Call a code stub.
- void CallStub(CodeStub* stub, Condition cond = al);
-
- // Call a code stub.
- void TailCallStub(CodeStub* stub, Condition cond = al);
-
- // Tail call a code stub (jump) and return the code object called. Try to
- // generate the code if necessary. Do not perform a GC but instead return
- // a retry after GC failure.
- MUST_USE_RESULT MaybeObject* TryTailCallStub(CodeStub* stub,
- Condition cond = al);
-
- // Call a runtime routine.
- void CallRuntime(const Runtime::Function* f, int num_arguments);
- void CallRuntimeSaveDoubles(Runtime::FunctionId id);
-
- // Convenience function: Same as above, but takes the fid instead.
- void CallRuntime(Runtime::FunctionId fid, int num_arguments);
-
- // Convenience function: call an external reference.
- void CallExternalReference(const ExternalReference& ext,
- int num_arguments);
-
- // Tail call of a runtime routine (jump).
- // Like JumpToExternalReference, but also takes care of passing the number
- // of parameters.
- void TailCallExternalReference(const ExternalReference& ext,
- int num_arguments,
- int result_size);
-
- // Tail call of a runtime routine (jump). Try to generate the code if
- // necessary. Do not perform a GC but instead return a retry after GC
- // failure.
- MUST_USE_RESULT MaybeObject* TryTailCallExternalReference(
- const ExternalReference& ext, int num_arguments, int result_size);
-
- // Convenience function: tail call a runtime routine (jump).
- void TailCallRuntime(Runtime::FunctionId fid,
- int num_arguments,
- int result_size);
-
- // Before calling a C-function from generated code, align arguments on stack.
- // After aligning the frame, non-register arguments must be stored in
- // sp[0], sp[4], etc., not pushed. The argument count assumes all arguments
- // are word sized.
- // Some compilers/platforms require the stack to be aligned when calling
- // C++ code.
- // Needs a scratch register to do some arithmetic. This register will be
- // trashed.
- void PrepareCallCFunction(int num_arguments, Register scratch);
-
- // Calls a C function and cleans up the space for arguments allocated
- // by PrepareCallCFunction. The called function is not allowed to trigger a
- // garbage collection, since that might move the code and invalidate the
- // return address (unless this is somehow accounted for by the called
- // function).
- void CallCFunction(ExternalReference function, int num_arguments);
- void CallCFunction(Register function, Register scratch, int num_arguments);
-
- void GetCFunctionDoubleResult(const DoubleRegister dst);
-
- // Calls an API function. Allocates HandleScope, extracts returned value
- // from handle and propagates exceptions. Restores context.
- // stack_space - space to be unwound on exit (includes the call js
- // arguments space and the additional space allocated for the fast call).
- MaybeObject* TryCallApiFunctionAndReturn(ExternalReference function,
- int stack_space);
-
- // Jump to a runtime routine.
- void JumpToExternalReference(const ExternalReference& builtin);
-
- MaybeObject* TryJumpToExternalReference(const ExternalReference& ext);
-
- // Invoke specified builtin JavaScript function. Adds an entry to
- // the unresolved list if the name does not resolve.
- void InvokeBuiltin(Builtins::JavaScript id,
- InvokeJSFlags flags,
- CallWrapper* call_wrapper = NULL);
-
- // Store the code object for the given builtin in the target register and
- // setup the function in r1.
- void GetBuiltinEntry(Register target, Builtins::JavaScript id);
-
- // Store the function for the given builtin in the target register.
- void GetBuiltinFunction(Register target, Builtins::JavaScript id);
-
- Handle<Object> CodeObject() {
- ASSERT(!code_object_.is_null());
- return code_object_;
- }
-
-
- // ---------------------------------------------------------------------------
- // StatsCounter support
-
- void SetCounter(StatsCounter* counter, int value,
- Register scratch1, Register scratch2);
- void IncrementCounter(StatsCounter* counter, int value,
- Register scratch1, Register scratch2);
- void DecrementCounter(StatsCounter* counter, int value,
- Register scratch1, Register scratch2);
-
-
- // ---------------------------------------------------------------------------
- // Debugging
-
- // Calls Abort(msg) if the condition cond is not satisfied.
- // Use --debug_code to enable.
- void Assert(Condition cond, const char* msg);
- void AssertRegisterIsRoot(Register reg, Heap::RootListIndex index);
- void AssertFastElements(Register elements);
-
- // Like Assert(), but always enabled.
- void Check(Condition cond, const char* msg);
-
- // Print a message to stdout and abort execution.
- void Abort(const char* msg);
-
- // Verify restrictions about code generated in stubs.
- void set_generating_stub(bool value) { generating_stub_ = value; }
- bool generating_stub() { return generating_stub_; }
- void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
- bool allow_stub_calls() { return allow_stub_calls_; }
-
- // ---------------------------------------------------------------------------
- // Number utilities
-
- // Check whether the value of reg is a power of two and not zero. If not
- // control continues at the label not_power_of_two. If reg is a power of two
- // the register scratch contains the value of (reg - 1) when control falls
- // through.
- void JumpIfNotPowerOfTwoOrZero(Register reg,
- Register scratch,
- Label* not_power_of_two_or_zero);
- // Check whether the value of reg is a power of two and not zero.
- // Control falls through if it is, with scratch containing the mask
- // value (reg - 1).
- // Otherwise control jumps to the 'zero_and_neg' label if the value of reg is
- // zero or negative, or jumps to the 'not_power_of_two' label if the value is
- // strictly positive but not a power of two.
- void JumpIfNotPowerOfTwoOrZeroAndNeg(Register reg,
- Register scratch,
- Label* zero_and_neg,
- Label* not_power_of_two);
-
- // ---------------------------------------------------------------------------
- // Smi utilities
-
- void SmiTag(Register reg, SBit s = LeaveCC) {
- add(reg, reg, Operand(reg), s);
- }
- void SmiTag(Register dst, Register src, SBit s = LeaveCC) {
- add(dst, src, Operand(src), s);
- }
-
- // Try to convert int32 to smi. If the value is to large, preserve
- // the original value and jump to not_a_smi. Destroys scratch and
- // sets flags.
- void TrySmiTag(Register reg, Label* not_a_smi, Register scratch) {
- mov(scratch, reg);
- SmiTag(scratch, SetCC);
- b(vs, not_a_smi);
- mov(reg, scratch);
- }
-
- void SmiUntag(Register reg, SBit s = LeaveCC) {
- mov(reg, Operand(reg, ASR, kSmiTagSize), s);
- }
- void SmiUntag(Register dst, Register src, SBit s = LeaveCC) {
- mov(dst, Operand(src, ASR, kSmiTagSize), s);
- }
-
- // Jump the register contains a smi.
- inline void JumpIfSmi(Register value, Label* smi_label) {
- tst(value, Operand(kSmiTagMask));
- b(eq, smi_label);
- }
- // Jump if either of the registers contain a non-smi.
- inline void JumpIfNotSmi(Register value, Label* not_smi_label) {
- tst(value, Operand(kSmiTagMask));
- b(ne, not_smi_label);
- }
- // Jump if either of the registers contain a non-smi.
- void JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi);
- // Jump if either of the registers contain a smi.
- void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi);
-
- // Abort execution if argument is a smi. Used in debug code.
- void AbortIfSmi(Register object);
- void AbortIfNotSmi(Register object);
-
- // Abort execution if argument is a string. Used in debug code.
- void AbortIfNotString(Register object);
-
- // Abort execution if argument is not the root value with the given index.
- void AbortIfNotRootValue(Register src,
- Heap::RootListIndex root_value_index,
- const char* message);
-
- // ---------------------------------------------------------------------------
- // HeapNumber utilities
-
- void JumpIfNotHeapNumber(Register object,
- Register heap_number_map,
- Register scratch,
- Label* on_not_heap_number);
-
- // ---------------------------------------------------------------------------
- // String utilities
-
- // Checks if both objects are sequential ASCII strings and jumps to label
- // if either is not. Assumes that neither object is a smi.
- void JumpIfNonSmisNotBothSequentialAsciiStrings(Register object1,
- Register object2,
- Register scratch1,
- Register scratch2,
- Label* failure);
-
- // Checks if both objects are sequential ASCII strings and jumps to label
- // if either is not.
- void JumpIfNotBothSequentialAsciiStrings(Register first,
- Register second,
- Register scratch1,
- Register scratch2,
- Label* not_flat_ascii_strings);
-
- // Checks if both instance types are sequential ASCII strings and jumps to
- // label if either is not.
- void JumpIfBothInstanceTypesAreNotSequentialAscii(
- Register first_object_instance_type,
- Register second_object_instance_type,
- Register scratch1,
- Register scratch2,
- Label* failure);
-
- // Check if instance type is sequential ASCII string and jump to label if
- // it is not.
- void JumpIfInstanceTypeIsNotSequentialAscii(Register type,
- Register scratch,
- Label* failure);
-
-
- // ---------------------------------------------------------------------------
- // Patching helpers.
-
- // Get the location of a relocated constant (its address in the constant pool)
- // from its load site.
- void GetRelocatedValueLocation(Register ldr_location,
- Register result);
-
-
- private:
- void CallCFunctionHelper(Register function,
- ExternalReference function_reference,
- Register scratch,
- int num_arguments);
-
- void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
- int CallSize(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
- void Call(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
-
- // Helper functions for generating invokes.
- void InvokePrologue(const ParameterCount& expected,
- const ParameterCount& actual,
- Handle<Code> code_constant,
- Register code_reg,
- Label* done,
- InvokeFlag flag,
- CallWrapper* call_wrapper = NULL);
-
- // Activation support.
- void EnterFrame(StackFrame::Type type);
- void LeaveFrame(StackFrame::Type type);
-
- void InitializeNewString(Register string,
- Register length,
- Heap::RootListIndex map_index,
- Register scratch1,
- Register scratch2);
-
- // Compute memory operands for safepoint stack slots.
- static int SafepointRegisterStackIndex(int reg_code);
- MemOperand SafepointRegisterSlot(Register reg);
- MemOperand SafepointRegistersAndDoublesSlot(Register reg);
-
- bool generating_stub_;
- bool allow_stub_calls_;
- // This handle will be patched with the code object on installation.
- Handle<Object> code_object_;
-
- // Needs access to SafepointRegisterStackIndex for optimized frame
- // traversal.
- friend class OptimizedFrame;
-};
-
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-// The code patcher is used to patch (typically) small parts of code e.g. for
-// debugging and other types of instrumentation. When using the code patcher
-// the exact number of bytes specified must be emitted. It is not legal to emit
-// relocation information. If any of these constraints are violated it causes
-// an assertion to fail.
-class CodePatcher {
- public:
- CodePatcher(byte* address, int instructions);
- virtual ~CodePatcher();
-
- // Macro assembler to emit code.
- MacroAssembler* masm() { return &masm_; }
-
- // Emit an instruction directly.
- void Emit(Instr instr);
-
- // Emit an address directly.
- void Emit(Address addr);
-
- // Emit the condition part of an instruction leaving the rest of the current
- // instruction unchanged.
- void EmitCondition(Condition cond);
-
- private:
- byte* address_; // The address of the code being patched.
- int instructions_; // Number of instructions of the expected patch size.
- int size_; // Number of bytes of the expected patch size.
- MacroAssembler masm_; // Macro assembler used to generate the code.
-};
-#endif // ENABLE_DEBUGGER_SUPPORT
-
-
-// Helper class for generating code or data associated with the code
-// right after a call instruction. As an example this can be used to
-// generate safepoint data after calls for crankshaft.
-class CallWrapper {
- public:
- CallWrapper() { }
- virtual ~CallWrapper() { }
- // Called just before emitting a call. Argument is the size of the generated
- // call code.
- virtual void BeforeCall(int call_size) = 0;
- // Called just after emitting a call, i.e., at the return site for the call.
- virtual void AfterCall() = 0;
-};
-
-
-// -----------------------------------------------------------------------------
-// Static helper functions.
-
-static MemOperand ContextOperand(Register context, int index) {
- return MemOperand(context, Context::SlotOffset(index));
-}
-
-
-static inline MemOperand GlobalObjectOperand() {
- return ContextOperand(cp, Context::GLOBAL_INDEX);
-}
-
-
-#ifdef GENERATED_CODE_COVERAGE
-#define CODE_COVERAGE_STRINGIFY(x) #x
-#define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
-#define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
-#define ACCESS_MASM(masm) masm->stop(__FILE_LINE__); masm->
-#else
-#define ACCESS_MASM(masm) masm->
-#endif
-
-
-} } // namespace v8::internal
-
-#endif // V8_ARM_MACRO_ASSEMBLER_ARM_H_
diff --git a/src/3rdparty/v8/src/arm/regexp-macro-assembler-arm.cc b/src/3rdparty/v8/src/arm/regexp-macro-assembler-arm.cc
deleted file mode 100644
index 4bd8c80..0000000
--- a/src/3rdparty/v8/src/arm/regexp-macro-assembler-arm.cc
+++ /dev/null
@@ -1,1287 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_ARM)
-
-#include "unicode.h"
-#include "log.h"
-#include "code-stubs.h"
-#include "regexp-stack.h"
-#include "macro-assembler.h"
-#include "regexp-macro-assembler.h"
-#include "arm/regexp-macro-assembler-arm.h"
-
-namespace v8 {
-namespace internal {
-
-#ifndef V8_INTERPRETED_REGEXP
-/*
- * This assembler uses the following register assignment convention
- * - r5 : Pointer to current code object (Code*) including heap object tag.
- * - r6 : Current position in input, as negative offset from end of string.
- * Please notice that this is the byte offset, not the character offset!
- * - r7 : Currently loaded character. Must be loaded using
- * LoadCurrentCharacter before using any of the dispatch methods.
- * - r8 : points to tip of backtrack stack
- * - r9 : Unused, might be used by C code and expected unchanged.
- * - r10 : End of input (points to byte after last character in input).
- * - r11 : Frame pointer. Used to access arguments, local variables and
- * RegExp registers.
- * - r12 : IP register, used by assembler. Very volatile.
- * - r13/sp : points to tip of C stack.
- *
- * The remaining registers are free for computations.
- * Each call to a public method should retain this convention.
- *
- * The stack will have the following structure:
- * - fp[52] Isolate* isolate (Address of the current isolate)
- * - fp[48] direct_call (if 1, direct call from JavaScript code,
- * if 0, call through the runtime system).
- * - fp[44] stack_area_base (High end of the memory area to use as
- * backtracking stack).
- * - fp[40] int* capture_array (int[num_saved_registers_], for output).
- * - fp[36] secondary link/return address used by native call.
- * --- sp when called ---
- * - fp[32] return address (lr).
- * - fp[28] old frame pointer (r11).
- * - fp[0..24] backup of registers r4..r10.
- * --- frame pointer ----
- * - fp[-4] end of input (Address of end of string).
- * - fp[-8] start of input (Address of first character in string).
- * - fp[-12] start index (character index of start).
- * - fp[-16] void* input_string (location of a handle containing the string).
- * - fp[-20] Offset of location before start of input (effectively character
- * position -1). Used to initialize capture registers to a
- * non-position.
- * - fp[-24] At start (if 1, we are starting at the start of the
- * string, otherwise 0)
- * - fp[-28] register 0 (Only positions must be stored in the first
- * - register 1 num_saved_registers_ registers)
- * - ...
- * - register num_registers-1
- * --- sp ---
- *
- * The first num_saved_registers_ registers are initialized to point to
- * "character -1" in the string (i.e., char_size() bytes before the first
- * character of the string). The remaining registers start out as garbage.
- *
- * The data up to the return address must be placed there by the calling
- * code and the remaining arguments are passed in registers, e.g. by calling the
- * code entry as cast to a function with the signature:
- * int (*match)(String* input_string,
- * int start_index,
- * Address start,
- * Address end,
- * Address secondary_return_address, // Only used by native call.
- * int* capture_output_array,
- * byte* stack_area_base,
- * bool direct_call = false)
- * The call is performed by NativeRegExpMacroAssembler::Execute()
- * (in regexp-macro-assembler.cc) via the CALL_GENERATED_REGEXP_CODE macro
- * in arm/simulator-arm.h.
- * When calling as a non-direct call (i.e., from C++ code), the return address
- * area is overwritten with the LR register by the RegExp code. When doing a
- * direct call from generated code, the return address is placed there by
- * the calling code, as in a normal exit frame.
- */
-
-#define __ ACCESS_MASM(masm_)
-
-RegExpMacroAssemblerARM::RegExpMacroAssemblerARM(
- Mode mode,
- int registers_to_save)
- : masm_(new MacroAssembler(Isolate::Current(), NULL, kRegExpCodeSize)),
- mode_(mode),
- num_registers_(registers_to_save),
- num_saved_registers_(registers_to_save),
- entry_label_(),
- start_label_(),
- success_label_(),
- backtrack_label_(),
- exit_label_() {
- ASSERT_EQ(0, registers_to_save % 2);
- __ jmp(&entry_label_); // We'll write the entry code later.
- EmitBacktrackConstantPool();
- __ bind(&start_label_); // And then continue from here.
-}
-
-
-RegExpMacroAssemblerARM::~RegExpMacroAssemblerARM() {
- delete masm_;
- // Unuse labels in case we throw away the assembler without calling GetCode.
- entry_label_.Unuse();
- start_label_.Unuse();
- success_label_.Unuse();
- backtrack_label_.Unuse();
- exit_label_.Unuse();
- check_preempt_label_.Unuse();
- stack_overflow_label_.Unuse();
-}
-
-
-int RegExpMacroAssemblerARM::stack_limit_slack() {
- return RegExpStack::kStackLimitSlack;
-}
-
-
-void RegExpMacroAssemblerARM::AdvanceCurrentPosition(int by) {
- if (by != 0) {
- __ add(current_input_offset(),
- current_input_offset(), Operand(by * char_size()));
- }
-}
-
-
-void RegExpMacroAssemblerARM::AdvanceRegister(int reg, int by) {
- ASSERT(reg >= 0);
- ASSERT(reg < num_registers_);
- if (by != 0) {
- __ ldr(r0, register_location(reg));
- __ add(r0, r0, Operand(by));
- __ str(r0, register_location(reg));
- }
-}
-
-
-void RegExpMacroAssemblerARM::Backtrack() {
- CheckPreemption();
- // Pop Code* offset from backtrack stack, add Code* and jump to location.
- Pop(r0);
- __ add(pc, r0, Operand(code_pointer()));
-}
-
-
-void RegExpMacroAssemblerARM::Bind(Label* label) {
- __ bind(label);
-}
-
-
-void RegExpMacroAssemblerARM::CheckCharacter(uint32_t c, Label* on_equal) {
- __ cmp(current_character(), Operand(c));
- BranchOrBacktrack(eq, on_equal);
-}
-
-
-void RegExpMacroAssemblerARM::CheckCharacterGT(uc16 limit, Label* on_greater) {
- __ cmp(current_character(), Operand(limit));
- BranchOrBacktrack(gt, on_greater);
-}
-
-
-void RegExpMacroAssemblerARM::CheckAtStart(Label* on_at_start) {
- Label not_at_start;
- // Did we start the match at the start of the string at all?
- __ ldr(r0, MemOperand(frame_pointer(), kAtStart));
- __ cmp(r0, Operand(0, RelocInfo::NONE));
- BranchOrBacktrack(eq, &not_at_start);
-
- // If we did, are we still at the start of the input?
- __ ldr(r1, MemOperand(frame_pointer(), kInputStart));
- __ add(r0, end_of_input_address(), Operand(current_input_offset()));
- __ cmp(r0, r1);
- BranchOrBacktrack(eq, on_at_start);
- __ bind(&not_at_start);
-}
-
-
-void RegExpMacroAssemblerARM::CheckNotAtStart(Label* on_not_at_start) {
- // Did we start the match at the start of the string at all?
- __ ldr(r0, MemOperand(frame_pointer(), kAtStart));
- __ cmp(r0, Operand(0, RelocInfo::NONE));
- BranchOrBacktrack(eq, on_not_at_start);
- // If we did, are we still at the start of the input?
- __ ldr(r1, MemOperand(frame_pointer(), kInputStart));
- __ add(r0, end_of_input_address(), Operand(current_input_offset()));
- __ cmp(r0, r1);
- BranchOrBacktrack(ne, on_not_at_start);
-}
-
-
-void RegExpMacroAssemblerARM::CheckCharacterLT(uc16 limit, Label* on_less) {
- __ cmp(current_character(), Operand(limit));
- BranchOrBacktrack(lt, on_less);
-}
-
-
-void RegExpMacroAssemblerARM::CheckCharacters(Vector<const uc16> str,
- int cp_offset,
- Label* on_failure,
- bool check_end_of_string) {
- if (on_failure == NULL) {
- // Instead of inlining a backtrack for each test, (re)use the global
- // backtrack target.
- on_failure = &backtrack_label_;
- }
-
- if (check_end_of_string) {
- // Is last character of required match inside string.
- CheckPosition(cp_offset + str.length() - 1, on_failure);
- }
-
- __ add(r0, end_of_input_address(), Operand(current_input_offset()));
- if (cp_offset != 0) {
- int byte_offset = cp_offset * char_size();
- __ add(r0, r0, Operand(byte_offset));
- }
-
- // r0 : Address of characters to match against str.
- int stored_high_byte = 0;
- for (int i = 0; i < str.length(); i++) {
- if (mode_ == ASCII) {
- __ ldrb(r1, MemOperand(r0, char_size(), PostIndex));
- ASSERT(str[i] <= String::kMaxAsciiCharCode);
- __ cmp(r1, Operand(str[i]));
- } else {
- __ ldrh(r1, MemOperand(r0, char_size(), PostIndex));
- uc16 match_char = str[i];
- int match_high_byte = (match_char >> 8);
- if (match_high_byte == 0) {
- __ cmp(r1, Operand(str[i]));
- } else {
- if (match_high_byte != stored_high_byte) {
- __ mov(r2, Operand(match_high_byte));
- stored_high_byte = match_high_byte;
- }
- __ add(r3, r2, Operand(match_char & 0xff));
- __ cmp(r1, r3);
- }
- }
- BranchOrBacktrack(ne, on_failure);
- }
-}
-
-
-void RegExpMacroAssemblerARM::CheckGreedyLoop(Label* on_equal) {
- __ ldr(r0, MemOperand(backtrack_stackpointer(), 0));
- __ cmp(current_input_offset(), r0);
- __ add(backtrack_stackpointer(),
- backtrack_stackpointer(), Operand(kPointerSize), LeaveCC, eq);
- BranchOrBacktrack(eq, on_equal);
-}
-
-
-void RegExpMacroAssemblerARM::CheckNotBackReferenceIgnoreCase(
- int start_reg,
- Label* on_no_match) {
- Label fallthrough;
- __ ldr(r0, register_location(start_reg)); // Index of start of capture
- __ ldr(r1, register_location(start_reg + 1)); // Index of end of capture
- __ sub(r1, r1, r0, SetCC); // Length of capture.
-
- // If length is zero, either the capture is empty or it is not participating.
- // In either case succeed immediately.
- __ b(eq, &fallthrough);
-
- // Check that there are enough characters left in the input.
- __ cmn(r1, Operand(current_input_offset()));
- BranchOrBacktrack(gt, on_no_match);
-
- if (mode_ == ASCII) {
- Label success;
- Label fail;
- Label loop_check;
-
- // r0 - offset of start of capture
- // r1 - length of capture
- __ add(r0, r0, Operand(end_of_input_address()));
- __ add(r2, end_of_input_address(), Operand(current_input_offset()));
- __ add(r1, r0, Operand(r1));
-
- // r0 - Address of start of capture.
- // r1 - Address of end of capture
- // r2 - Address of current input position.
-
- Label loop;
- __ bind(&loop);
- __ ldrb(r3, MemOperand(r0, char_size(), PostIndex));
- __ ldrb(r4, MemOperand(r2, char_size(), PostIndex));
- __ cmp(r4, r3);
- __ b(eq, &loop_check);
-
- // Mismatch, try case-insensitive match (converting letters to lower-case).
- __ orr(r3, r3, Operand(0x20)); // Convert capture character to lower-case.
- __ orr(r4, r4, Operand(0x20)); // Also convert input character.
- __ cmp(r4, r3);
- __ b(ne, &fail);
- __ sub(r3, r3, Operand('a'));
- __ cmp(r3, Operand('z' - 'a')); // Is r3 a lowercase letter?
- __ b(hi, &fail);
-
-
- __ bind(&loop_check);
- __ cmp(r0, r1);
- __ b(lt, &loop);
- __ jmp(&success);
-
- __ bind(&fail);
- BranchOrBacktrack(al, on_no_match);
-
- __ bind(&success);
- // Compute new value of character position after the matched part.
- __ sub(current_input_offset(), r2, end_of_input_address());
- } else {
- ASSERT(mode_ == UC16);
- int argument_count = 4;
- __ PrepareCallCFunction(argument_count, r2);
-
- // r0 - offset of start of capture
- // r1 - length of capture
-
- // Put arguments into arguments registers.
- // Parameters are
- // r0: Address byte_offset1 - Address captured substring's start.
- // r1: Address byte_offset2 - Address of current character position.
- // r2: size_t byte_length - length of capture in bytes(!)
- // r3: Isolate* isolate
-
- // Address of start of capture.
- __ add(r0, r0, Operand(end_of_input_address()));
- // Length of capture.
- __ mov(r2, Operand(r1));
- // Save length in callee-save register for use on return.
- __ mov(r4, Operand(r1));
- // Address of current input position.
- __ add(r1, current_input_offset(), Operand(end_of_input_address()));
- // Isolate.
- __ mov(r3, Operand(ExternalReference::isolate_address()));
-
- ExternalReference function =
- ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate());
- __ CallCFunction(function, argument_count);
-
- // Check if function returned non-zero for success or zero for failure.
- __ cmp(r0, Operand(0, RelocInfo::NONE));
- BranchOrBacktrack(eq, on_no_match);
- // On success, increment position by length of capture.
- __ add(current_input_offset(), current_input_offset(), Operand(r4));
- }
-
- __ bind(&fallthrough);
-}
-
-
-void RegExpMacroAssemblerARM::CheckNotBackReference(
- int start_reg,
- Label* on_no_match) {
- Label fallthrough;
- Label success;
-
- // Find length of back-referenced capture.
- __ ldr(r0, register_location(start_reg));
- __ ldr(r1, register_location(start_reg + 1));
- __ sub(r1, r1, r0, SetCC); // Length to check.
- // Succeed on empty capture (including no capture).
- __ b(eq, &fallthrough);
-
- // Check that there are enough characters left in the input.
- __ cmn(r1, Operand(current_input_offset()));
- BranchOrBacktrack(gt, on_no_match);
-
- // Compute pointers to match string and capture string
- __ add(r0, r0, Operand(end_of_input_address()));
- __ add(r2, end_of_input_address(), Operand(current_input_offset()));
- __ add(r1, r1, Operand(r0));
-
- Label loop;
- __ bind(&loop);
- if (mode_ == ASCII) {
- __ ldrb(r3, MemOperand(r0, char_size(), PostIndex));
- __ ldrb(r4, MemOperand(r2, char_size(), PostIndex));
- } else {
- ASSERT(mode_ == UC16);
- __ ldrh(r3, MemOperand(r0, char_size(), PostIndex));
- __ ldrh(r4, MemOperand(r2, char_size(), PostIndex));
- }
- __ cmp(r3, r4);
- BranchOrBacktrack(ne, on_no_match);
- __ cmp(r0, r1);
- __ b(lt, &loop);
-
- // Move current character position to position after match.
- __ sub(current_input_offset(), r2, end_of_input_address());
- __ bind(&fallthrough);
-}
-
-
-void RegExpMacroAssemblerARM::CheckNotRegistersEqual(int reg1,
- int reg2,
- Label* on_not_equal) {
- __ ldr(r0, register_location(reg1));
- __ ldr(r1, register_location(reg2));
- __ cmp(r0, r1);
- BranchOrBacktrack(ne, on_not_equal);
-}
-
-
-void RegExpMacroAssemblerARM::CheckNotCharacter(unsigned c,
- Label* on_not_equal) {
- __ cmp(current_character(), Operand(c));
- BranchOrBacktrack(ne, on_not_equal);
-}
-
-
-void RegExpMacroAssemblerARM::CheckCharacterAfterAnd(uint32_t c,
- uint32_t mask,
- Label* on_equal) {
- __ and_(r0, current_character(), Operand(mask));
- __ cmp(r0, Operand(c));
- BranchOrBacktrack(eq, on_equal);
-}
-
-
-void RegExpMacroAssemblerARM::CheckNotCharacterAfterAnd(unsigned c,
- unsigned mask,
- Label* on_not_equal) {
- __ and_(r0, current_character(), Operand(mask));
- __ cmp(r0, Operand(c));
- BranchOrBacktrack(ne, on_not_equal);
-}
-
-
-void RegExpMacroAssemblerARM::CheckNotCharacterAfterMinusAnd(
- uc16 c,
- uc16 minus,
- uc16 mask,
- Label* on_not_equal) {
- ASSERT(minus < String::kMaxUC16CharCode);
- __ sub(r0, current_character(), Operand(minus));
- __ and_(r0, r0, Operand(mask));
- __ cmp(r0, Operand(c));
- BranchOrBacktrack(ne, on_not_equal);
-}
-
-
-bool RegExpMacroAssemblerARM::CheckSpecialCharacterClass(uc16 type,
- Label* on_no_match) {
- // Range checks (c in min..max) are generally implemented by an unsigned
- // (c - min) <= (max - min) check
- switch (type) {
- case 's':
- // Match space-characters
- if (mode_ == ASCII) {
- // ASCII space characters are '\t'..'\r' and ' '.
- Label success;
- __ cmp(current_character(), Operand(' '));
- __ b(eq, &success);
- // Check range 0x09..0x0d
- __ sub(r0, current_character(), Operand('\t'));
- __ cmp(r0, Operand('\r' - '\t'));
- BranchOrBacktrack(hi, on_no_match);
- __ bind(&success);
- return true;
- }
- return false;
- case 'S':
- // Match non-space characters.
- if (mode_ == ASCII) {
- // ASCII space characters are '\t'..'\r' and ' '.
- __ cmp(current_character(), Operand(' '));
- BranchOrBacktrack(eq, on_no_match);
- __ sub(r0, current_character(), Operand('\t'));
- __ cmp(r0, Operand('\r' - '\t'));
- BranchOrBacktrack(ls, on_no_match);
- return true;
- }
- return false;
- case 'd':
- // Match ASCII digits ('0'..'9')
- __ sub(r0, current_character(), Operand('0'));
- __ cmp(current_character(), Operand('9' - '0'));
- BranchOrBacktrack(hi, on_no_match);
- return true;
- case 'D':
- // Match non ASCII-digits
- __ sub(r0, current_character(), Operand('0'));
- __ cmp(r0, Operand('9' - '0'));
- BranchOrBacktrack(ls, on_no_match);
- return true;
- case '.': {
- // Match non-newlines (not 0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
- __ eor(r0, current_character(), Operand(0x01));
- // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
- __ sub(r0, r0, Operand(0x0b));
- __ cmp(r0, Operand(0x0c - 0x0b));
- BranchOrBacktrack(ls, on_no_match);
- if (mode_ == UC16) {
- // Compare original value to 0x2028 and 0x2029, using the already
- // computed (current_char ^ 0x01 - 0x0b). I.e., check for
- // 0x201d (0x2028 - 0x0b) or 0x201e.
- __ sub(r0, r0, Operand(0x2028 - 0x0b));
- __ cmp(r0, Operand(1));
- BranchOrBacktrack(ls, on_no_match);
- }
- return true;
- }
- case 'n': {
- // Match newlines (0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
- __ eor(r0, current_character(), Operand(0x01));
- // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
- __ sub(r0, r0, Operand(0x0b));
- __ cmp(r0, Operand(0x0c - 0x0b));
- if (mode_ == ASCII) {
- BranchOrBacktrack(hi, on_no_match);
- } else {
- Label done;
- __ b(ls, &done);
- // Compare original value to 0x2028 and 0x2029, using the already
- // computed (current_char ^ 0x01 - 0x0b). I.e., check for
- // 0x201d (0x2028 - 0x0b) or 0x201e.
- __ sub(r0, r0, Operand(0x2028 - 0x0b));
- __ cmp(r0, Operand(1));
- BranchOrBacktrack(hi, on_no_match);
- __ bind(&done);
- }
- return true;
- }
- case 'w': {
- if (mode_ != ASCII) {
- // Table is 128 entries, so all ASCII characters can be tested.
- __ cmp(current_character(), Operand('z'));
- BranchOrBacktrack(hi, on_no_match);
- }
- ExternalReference map = ExternalReference::re_word_character_map();
- __ mov(r0, Operand(map));
- __ ldrb(r0, MemOperand(r0, current_character()));
- __ tst(r0, Operand(r0));
- BranchOrBacktrack(eq, on_no_match);
- return true;
- }
- case 'W': {
- Label done;
- if (mode_ != ASCII) {
- // Table is 128 entries, so all ASCII characters can be tested.
- __ cmp(current_character(), Operand('z'));
- __ b(hi, &done);
- }
- ExternalReference map = ExternalReference::re_word_character_map();
- __ mov(r0, Operand(map));
- __ ldrb(r0, MemOperand(r0, current_character()));
- __ tst(r0, Operand(r0));
- BranchOrBacktrack(ne, on_no_match);
- if (mode_ != ASCII) {
- __ bind(&done);
- }
- return true;
- }
- case '*':
- // Match any character.
- return true;
- // No custom implementation (yet): s(UC16), S(UC16).
- default:
- return false;
- }
-}
-
-
-void RegExpMacroAssemblerARM::Fail() {
- __ mov(r0, Operand(FAILURE));
- __ jmp(&exit_label_);
-}
-
-
-Handle<Object> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
- // Finalize code - write the entry point code now we know how many
- // registers we need.
-
- // Entry code:
- __ bind(&entry_label_);
- // Push arguments
- // Save callee-save registers.
- // Start new stack frame.
- // Store link register in existing stack-cell.
- // Order here should correspond to order of offset constants in header file.
- RegList registers_to_retain = r4.bit() | r5.bit() | r6.bit() |
- r7.bit() | r8.bit() | r9.bit() | r10.bit() | fp.bit();
- RegList argument_registers = r0.bit() | r1.bit() | r2.bit() | r3.bit();
- __ stm(db_w, sp, argument_registers | registers_to_retain | lr.bit());
- // Set frame pointer in space for it if this is not a direct call
- // from generated code.
- __ add(frame_pointer(), sp, Operand(4 * kPointerSize));
- __ push(r0); // Make room for "position - 1" constant (value is irrelevant).
- __ push(r0); // Make room for "at start" constant (value is irrelevant).
- // Check if we have space on the stack for registers.
- Label stack_limit_hit;
- Label stack_ok;
-
- ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit(masm_->isolate());
- __ mov(r0, Operand(stack_limit));
- __ ldr(r0, MemOperand(r0));
- __ sub(r0, sp, r0, SetCC);
- // Handle it if the stack pointer is already below the stack limit.
- __ b(ls, &stack_limit_hit);
- // Check if there is room for the variable number of registers above
- // the stack limit.
- __ cmp(r0, Operand(num_registers_ * kPointerSize));
- __ b(hs, &stack_ok);
- // Exit with OutOfMemory exception. There is not enough space on the stack
- // for our working registers.
- __ mov(r0, Operand(EXCEPTION));
- __ jmp(&exit_label_);
-
- __ bind(&stack_limit_hit);
- CallCheckStackGuardState(r0);
- __ cmp(r0, Operand(0, RelocInfo::NONE));
- // If returned value is non-zero, we exit with the returned value as result.
- __ b(ne, &exit_label_);
-
- __ bind(&stack_ok);
-
- // Allocate space on stack for registers.
- __ sub(sp, sp, Operand(num_registers_ * kPointerSize));
- // Load string end.
- __ ldr(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
- // Load input start.
- __ ldr(r0, MemOperand(frame_pointer(), kInputStart));
- // Find negative length (offset of start relative to end).
- __ sub(current_input_offset(), r0, end_of_input_address());
- // Set r0 to address of char before start of the input string
- // (effectively string position -1).
- __ ldr(r1, MemOperand(frame_pointer(), kStartIndex));
- __ sub(r0, current_input_offset(), Operand(char_size()));
- __ sub(r0, r0, Operand(r1, LSL, (mode_ == UC16) ? 1 : 0));
- // Store this value in a local variable, for use when clearing
- // position registers.
- __ str(r0, MemOperand(frame_pointer(), kInputStartMinusOne));
-
- // Determine whether the start index is zero, that is at the start of the
- // string, and store that value in a local variable.
- __ tst(r1, Operand(r1));
- __ mov(r1, Operand(1), LeaveCC, eq);
- __ mov(r1, Operand(0, RelocInfo::NONE), LeaveCC, ne);
- __ str(r1, MemOperand(frame_pointer(), kAtStart));
-
- if (num_saved_registers_ > 0) { // Always is, if generated from a regexp.
- // Fill saved registers with initial value = start offset - 1
-
- // Address of register 0.
- __ add(r1, frame_pointer(), Operand(kRegisterZero));
- __ mov(r2, Operand(num_saved_registers_));
- Label init_loop;
- __ bind(&init_loop);
- __ str(r0, MemOperand(r1, kPointerSize, NegPostIndex));
- __ sub(r2, r2, Operand(1), SetCC);
- __ b(ne, &init_loop);
- }
-
- // Initialize backtrack stack pointer.
- __ ldr(backtrack_stackpointer(), MemOperand(frame_pointer(), kStackHighEnd));
- // Initialize code pointer register
- __ mov(code_pointer(), Operand(masm_->CodeObject()));
- // Load previous char as initial value of current character register.
- Label at_start;
- __ ldr(r0, MemOperand(frame_pointer(), kAtStart));
- __ cmp(r0, Operand(0, RelocInfo::NONE));
- __ b(ne, &at_start);
- LoadCurrentCharacterUnchecked(-1, 1); // Load previous char.
- __ jmp(&start_label_);
- __ bind(&at_start);
- __ mov(current_character(), Operand('\n'));
- __ jmp(&start_label_);
-
-
- // Exit code:
- if (success_label_.is_linked()) {
- // Save captures when successful.
- __ bind(&success_label_);
- if (num_saved_registers_ > 0) {
- // copy captures to output
- __ ldr(r1, MemOperand(frame_pointer(), kInputStart));
- __ ldr(r0, MemOperand(frame_pointer(), kRegisterOutput));
- __ ldr(r2, MemOperand(frame_pointer(), kStartIndex));
- __ sub(r1, end_of_input_address(), r1);
- // r1 is length of input in bytes.
- if (mode_ == UC16) {
- __ mov(r1, Operand(r1, LSR, 1));
- }
- // r1 is length of input in characters.
- __ add(r1, r1, Operand(r2));
- // r1 is length of string in characters.
-
- ASSERT_EQ(0, num_saved_registers_ % 2);
- // Always an even number of capture registers. This allows us to
- // unroll the loop once to add an operation between a load of a register
- // and the following use of that register.
- for (int i = 0; i < num_saved_registers_; i += 2) {
- __ ldr(r2, register_location(i));
- __ ldr(r3, register_location(i + 1));
- if (mode_ == UC16) {
- __ add(r2, r1, Operand(r2, ASR, 1));
- __ add(r3, r1, Operand(r3, ASR, 1));
- } else {
- __ add(r2, r1, Operand(r2));
- __ add(r3, r1, Operand(r3));
- }
- __ str(r2, MemOperand(r0, kPointerSize, PostIndex));
- __ str(r3, MemOperand(r0, kPointerSize, PostIndex));
- }
- }
- __ mov(r0, Operand(SUCCESS));
- }
- // Exit and return r0
- __ bind(&exit_label_);
- // Skip sp past regexp registers and local variables..
- __ mov(sp, frame_pointer());
- // Restore registers r4..r11 and return (restoring lr to pc).
- __ ldm(ia_w, sp, registers_to_retain | pc.bit());
-
- // Backtrack code (branch target for conditional backtracks).
- if (backtrack_label_.is_linked()) {
- __ bind(&backtrack_label_);
- Backtrack();
- }
-
- Label exit_with_exception;
-
- // Preempt-code
- if (check_preempt_label_.is_linked()) {
- SafeCallTarget(&check_preempt_label_);
-
- CallCheckStackGuardState(r0);
- __ cmp(r0, Operand(0, RelocInfo::NONE));
- // If returning non-zero, we should end execution with the given
- // result as return value.
- __ b(ne, &exit_label_);
-
- // String might have moved: Reload end of string from frame.
- __ ldr(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
- SafeReturn();
- }
-
- // Backtrack stack overflow code.
- if (stack_overflow_label_.is_linked()) {
- SafeCallTarget(&stack_overflow_label_);
- // Reached if the backtrack-stack limit has been hit.
- Label grow_failed;
-
- // Call GrowStack(backtrack_stackpointer(), &stack_base)
- static const int num_arguments = 3;
- __ PrepareCallCFunction(num_arguments, r0);
- __ mov(r0, backtrack_stackpointer());
- __ add(r1, frame_pointer(), Operand(kStackHighEnd));
- __ mov(r2, Operand(ExternalReference::isolate_address()));
- ExternalReference grow_stack =
- ExternalReference::re_grow_stack(masm_->isolate());
- __ CallCFunction(grow_stack, num_arguments);
- // If return NULL, we have failed to grow the stack, and
- // must exit with a stack-overflow exception.
- __ cmp(r0, Operand(0, RelocInfo::NONE));
- __ b(eq, &exit_with_exception);
- // Otherwise use return value as new stack pointer.
- __ mov(backtrack_stackpointer(), r0);
- // Restore saved registers and continue.
- SafeReturn();
- }
-
- if (exit_with_exception.is_linked()) {
- // If any of the code above needed to exit with an exception.
- __ bind(&exit_with_exception);
- // Exit with Result EXCEPTION(-1) to signal thrown exception.
- __ mov(r0, Operand(EXCEPTION));
- __ jmp(&exit_label_);
- }
-
- CodeDesc code_desc;
- masm_->GetCode(&code_desc);
- Handle<Code> code = FACTORY->NewCode(code_desc,
- Code::ComputeFlags(Code::REGEXP),
- masm_->CodeObject());
- PROFILE(Isolate::Current(), RegExpCodeCreateEvent(*code, *source));
- return Handle<Object>::cast(code);
-}
-
-
-void RegExpMacroAssemblerARM::GoTo(Label* to) {
- BranchOrBacktrack(al, to);
-}
-
-
-void RegExpMacroAssemblerARM::IfRegisterGE(int reg,
- int comparand,
- Label* if_ge) {
- __ ldr(r0, register_location(reg));
- __ cmp(r0, Operand(comparand));
- BranchOrBacktrack(ge, if_ge);
-}
-
-
-void RegExpMacroAssemblerARM::IfRegisterLT(int reg,
- int comparand,
- Label* if_lt) {
- __ ldr(r0, register_location(reg));
- __ cmp(r0, Operand(comparand));
- BranchOrBacktrack(lt, if_lt);
-}
-
-
-void RegExpMacroAssemblerARM::IfRegisterEqPos(int reg,
- Label* if_eq) {
- __ ldr(r0, register_location(reg));
- __ cmp(r0, Operand(current_input_offset()));
- BranchOrBacktrack(eq, if_eq);
-}
-
-
-RegExpMacroAssembler::IrregexpImplementation
- RegExpMacroAssemblerARM::Implementation() {
- return kARMImplementation;
-}
-
-
-void RegExpMacroAssemblerARM::LoadCurrentCharacter(int cp_offset,
- Label* on_end_of_input,
- bool check_bounds,
- int characters) {
- ASSERT(cp_offset >= -1); // ^ and \b can look behind one character.
- ASSERT(cp_offset < (1<<30)); // Be sane! (And ensure negation works)
- if (check_bounds) {
- CheckPosition(cp_offset + characters - 1, on_end_of_input);
- }
- LoadCurrentCharacterUnchecked(cp_offset, characters);
-}
-
-
-void RegExpMacroAssemblerARM::PopCurrentPosition() {
- Pop(current_input_offset());
-}
-
-
-void RegExpMacroAssemblerARM::PopRegister(int register_index) {
- Pop(r0);
- __ str(r0, register_location(register_index));
-}
-
-
-static bool is_valid_memory_offset(int value) {
- if (value < 0) value = -value;
- return value < (1<<12);
-}
-
-
-void RegExpMacroAssemblerARM::PushBacktrack(Label* label) {
- if (label->is_bound()) {
- int target = label->pos();
- __ mov(r0, Operand(target + Code::kHeaderSize - kHeapObjectTag));
- } else {
- int constant_offset = GetBacktrackConstantPoolEntry();
- masm_->label_at_put(label, constant_offset);
- // Reading pc-relative is based on the address 8 bytes ahead of
- // the current opcode.
- unsigned int offset_of_pc_register_read =
- masm_->pc_offset() + Assembler::kPcLoadDelta;
- int pc_offset_of_constant =
- constant_offset - offset_of_pc_register_read;
- ASSERT(pc_offset_of_constant < 0);
- if (is_valid_memory_offset(pc_offset_of_constant)) {
- masm_->BlockConstPoolBefore(masm_->pc_offset() + Assembler::kInstrSize);
- __ ldr(r0, MemOperand(pc, pc_offset_of_constant));
- } else {
- // Not a 12-bit offset, so it needs to be loaded from the constant
- // pool.
- masm_->BlockConstPoolBefore(
- masm_->pc_offset() + 2 * Assembler::kInstrSize);
- __ mov(r0, Operand(pc_offset_of_constant + Assembler::kInstrSize));
- __ ldr(r0, MemOperand(pc, r0));
- }
- }
- Push(r0);
- CheckStackLimit();
-}
-
-
-void RegExpMacroAssemblerARM::PushCurrentPosition() {
- Push(current_input_offset());
-}
-
-
-void RegExpMacroAssemblerARM::PushRegister(int register_index,
- StackCheckFlag check_stack_limit) {
- __ ldr(r0, register_location(register_index));
- Push(r0);
- if (check_stack_limit) CheckStackLimit();
-}
-
-
-void RegExpMacroAssemblerARM::ReadCurrentPositionFromRegister(int reg) {
- __ ldr(current_input_offset(), register_location(reg));
-}
-
-
-void RegExpMacroAssemblerARM::ReadStackPointerFromRegister(int reg) {
- __ ldr(backtrack_stackpointer(), register_location(reg));
- __ ldr(r0, MemOperand(frame_pointer(), kStackHighEnd));
- __ add(backtrack_stackpointer(), backtrack_stackpointer(), Operand(r0));
-}
-
-
-void RegExpMacroAssemblerARM::SetCurrentPositionFromEnd(int by) {
- Label after_position;
- __ cmp(current_input_offset(), Operand(-by * char_size()));
- __ b(ge, &after_position);
- __ mov(current_input_offset(), Operand(-by * char_size()));
- // On RegExp code entry (where this operation is used), the character before
- // the current position is expected to be already loaded.
- // We have advanced the position, so it's safe to read backwards.
- LoadCurrentCharacterUnchecked(-1, 1);
- __ bind(&after_position);
-}
-
-
-void RegExpMacroAssemblerARM::SetRegister(int register_index, int to) {
- ASSERT(register_index >= num_saved_registers_); // Reserved for positions!
- __ mov(r0, Operand(to));
- __ str(r0, register_location(register_index));
-}
-
-
-void RegExpMacroAssemblerARM::Succeed() {
- __ jmp(&success_label_);
-}
-
-
-void RegExpMacroAssemblerARM::WriteCurrentPositionToRegister(int reg,
- int cp_offset) {
- if (cp_offset == 0) {
- __ str(current_input_offset(), register_location(reg));
- } else {
- __ add(r0, current_input_offset(), Operand(cp_offset * char_size()));
- __ str(r0, register_location(reg));
- }
-}
-
-
-void RegExpMacroAssemblerARM::ClearRegisters(int reg_from, int reg_to) {
- ASSERT(reg_from <= reg_to);
- __ ldr(r0, MemOperand(frame_pointer(), kInputStartMinusOne));
- for (int reg = reg_from; reg <= reg_to; reg++) {
- __ str(r0, register_location(reg));
- }
-}
-
-
-void RegExpMacroAssemblerARM::WriteStackPointerToRegister(int reg) {
- __ ldr(r1, MemOperand(frame_pointer(), kStackHighEnd));
- __ sub(r0, backtrack_stackpointer(), r1);
- __ str(r0, register_location(reg));
-}
-
-
-// Private methods:
-
-void RegExpMacroAssemblerARM::CallCheckStackGuardState(Register scratch) {
- static const int num_arguments = 3;
- __ PrepareCallCFunction(num_arguments, scratch);
- // RegExp code frame pointer.
- __ mov(r2, frame_pointer());
- // Code* of self.
- __ mov(r1, Operand(masm_->CodeObject()));
- // r0 becomes return address pointer.
- ExternalReference stack_guard_check =
- ExternalReference::re_check_stack_guard_state(masm_->isolate());
- CallCFunctionUsingStub(stack_guard_check, num_arguments);
-}
-
-
-// Helper function for reading a value out of a stack frame.
-template <typename T>
-static T& frame_entry(Address re_frame, int frame_offset) {
- return reinterpret_cast<T&>(Memory::int32_at(re_frame + frame_offset));
-}
-
-
-int RegExpMacroAssemblerARM::CheckStackGuardState(Address* return_address,
- Code* re_code,
- Address re_frame) {
- Isolate* isolate = frame_entry<Isolate*>(re_frame, kIsolate);
- ASSERT(isolate == Isolate::Current());
- if (isolate->stack_guard()->IsStackOverflow()) {
- isolate->StackOverflow();
- return EXCEPTION;
- }
-
- // If not real stack overflow the stack guard was used to interrupt
- // execution for another purpose.
-
- // If this is a direct call from JavaScript retry the RegExp forcing the call
- // through the runtime system. Currently the direct call cannot handle a GC.
- if (frame_entry<int>(re_frame, kDirectCall) == 1) {
- return RETRY;
- }
-
- // Prepare for possible GC.
- HandleScope handles;
- Handle<Code> code_handle(re_code);
-
- Handle<String> subject(frame_entry<String*>(re_frame, kInputString));
- // Current string.
- bool is_ascii = subject->IsAsciiRepresentation();
-
- ASSERT(re_code->instruction_start() <= *return_address);
- ASSERT(*return_address <=
- re_code->instruction_start() + re_code->instruction_size());
-
- MaybeObject* result = Execution::HandleStackGuardInterrupt();
-
- if (*code_handle != re_code) { // Return address no longer valid
- int delta = *code_handle - re_code;
- // Overwrite the return address on the stack.
- *return_address += delta;
- }
-
- if (result->IsException()) {
- return EXCEPTION;
- }
-
- // String might have changed.
- if (subject->IsAsciiRepresentation() != is_ascii) {
- // If we changed between an ASCII and an UC16 string, the specialized
- // code cannot be used, and we need to restart regexp matching from
- // scratch (including, potentially, compiling a new version of the code).
- return RETRY;
- }
-
- // Otherwise, the content of the string might have moved. It must still
- // be a sequential or external string with the same content.
- // Update the start and end pointers in the stack frame to the current
- // location (whether it has actually moved or not).
- ASSERT(StringShape(*subject).IsSequential() ||
- StringShape(*subject).IsExternal());
-
- // The original start address of the characters to match.
- const byte* start_address = frame_entry<const byte*>(re_frame, kInputStart);
-
- // Find the current start address of the same character at the current string
- // position.
- int start_index = frame_entry<int>(re_frame, kStartIndex);
- const byte* new_address = StringCharacterPosition(*subject, start_index);
-
- if (start_address != new_address) {
- // If there is a difference, update the object pointer and start and end
- // addresses in the RegExp stack frame to match the new value.
- const byte* end_address = frame_entry<const byte* >(re_frame, kInputEnd);
- int byte_length = end_address - start_address;
- frame_entry<const String*>(re_frame, kInputString) = *subject;
- frame_entry<const byte*>(re_frame, kInputStart) = new_address;
- frame_entry<const byte*>(re_frame, kInputEnd) = new_address + byte_length;
- }
-
- return 0;
-}
-
-
-MemOperand RegExpMacroAssemblerARM::register_location(int register_index) {
- ASSERT(register_index < (1<<30));
- if (num_registers_ <= register_index) {
- num_registers_ = register_index + 1;
- }
- return MemOperand(frame_pointer(),
- kRegisterZero - register_index * kPointerSize);
-}
-
-
-void RegExpMacroAssemblerARM::CheckPosition(int cp_offset,
- Label* on_outside_input) {
- __ cmp(current_input_offset(), Operand(-cp_offset * char_size()));
- BranchOrBacktrack(ge, on_outside_input);
-}
-
-
-void RegExpMacroAssemblerARM::BranchOrBacktrack(Condition condition,
- Label* to) {
- if (condition == al) { // Unconditional.
- if (to == NULL) {
- Backtrack();
- return;
- }
- __ jmp(to);
- return;
- }
- if (to == NULL) {
- __ b(condition, &backtrack_label_);
- return;
- }
- __ b(condition, to);
-}
-
-
-void RegExpMacroAssemblerARM::SafeCall(Label* to, Condition cond) {
- __ bl(to, cond);
-}
-
-
-void RegExpMacroAssemblerARM::SafeReturn() {
- __ pop(lr);
- __ add(pc, lr, Operand(masm_->CodeObject()));
-}
-
-
-void RegExpMacroAssemblerARM::SafeCallTarget(Label* name) {
- __ bind(name);
- __ sub(lr, lr, Operand(masm_->CodeObject()));
- __ push(lr);
-}
-
-
-void RegExpMacroAssemblerARM::Push(Register source) {
- ASSERT(!source.is(backtrack_stackpointer()));
- __ str(source,
- MemOperand(backtrack_stackpointer(), kPointerSize, NegPreIndex));
-}
-
-
-void RegExpMacroAssemblerARM::Pop(Register target) {
- ASSERT(!target.is(backtrack_stackpointer()));
- __ ldr(target,
- MemOperand(backtrack_stackpointer(), kPointerSize, PostIndex));
-}
-
-
-void RegExpMacroAssemblerARM::CheckPreemption() {
- // Check for preemption.
- ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit(masm_->isolate());
- __ mov(r0, Operand(stack_limit));
- __ ldr(r0, MemOperand(r0));
- __ cmp(sp, r0);
- SafeCall(&check_preempt_label_, ls);
-}
-
-
-void RegExpMacroAssemblerARM::CheckStackLimit() {
- ExternalReference stack_limit =
- ExternalReference::address_of_regexp_stack_limit(masm_->isolate());
- __ mov(r0, Operand(stack_limit));
- __ ldr(r0, MemOperand(r0));
- __ cmp(backtrack_stackpointer(), Operand(r0));
- SafeCall(&stack_overflow_label_, ls);
-}
-
-
-void RegExpMacroAssemblerARM::EmitBacktrackConstantPool() {
- __ CheckConstPool(false, false);
- __ BlockConstPoolBefore(
- masm_->pc_offset() + kBacktrackConstantPoolSize * Assembler::kInstrSize);
- backtrack_constant_pool_offset_ = masm_->pc_offset();
- for (int i = 0; i < kBacktrackConstantPoolSize; i++) {
- __ emit(0);
- }
-
- backtrack_constant_pool_capacity_ = kBacktrackConstantPoolSize;
-}
-
-
-int RegExpMacroAssemblerARM::GetBacktrackConstantPoolEntry() {
- while (backtrack_constant_pool_capacity_ > 0) {
- int offset = backtrack_constant_pool_offset_;
- backtrack_constant_pool_offset_ += kPointerSize;
- backtrack_constant_pool_capacity_--;
- if (masm_->pc_offset() - offset < 2 * KB) {
- return offset;
- }
- }
- Label new_pool_skip;
- __ jmp(&new_pool_skip);
- EmitBacktrackConstantPool();
- __ bind(&new_pool_skip);
- int offset = backtrack_constant_pool_offset_;
- backtrack_constant_pool_offset_ += kPointerSize;
- backtrack_constant_pool_capacity_--;
- return offset;
-}
-
-
-void RegExpMacroAssemblerARM::CallCFunctionUsingStub(
- ExternalReference function,
- int num_arguments) {
- // Must pass all arguments in registers. The stub pushes on the stack.
- ASSERT(num_arguments <= 4);
- __ mov(code_pointer(), Operand(function));
- RegExpCEntryStub stub;
- __ CallStub(&stub);
- if (OS::ActivationFrameAlignment() != 0) {
- __ ldr(sp, MemOperand(sp, 0));
- }
- __ mov(code_pointer(), Operand(masm_->CodeObject()));
-}
-
-
-void RegExpMacroAssemblerARM::LoadCurrentCharacterUnchecked(int cp_offset,
- int characters) {
- Register offset = current_input_offset();
- if (cp_offset != 0) {
- __ add(r0, current_input_offset(), Operand(cp_offset * char_size()));
- offset = r0;
- }
- // The ldr, str, ldrh, strh instructions can do unaligned accesses, if the CPU
- // and the operating system running on the target allow it.
- // If unaligned load/stores are not supported then this function must only
- // be used to load a single character at a time.
-#if !V8_TARGET_CAN_READ_UNALIGNED
- ASSERT(characters == 1);
-#endif
-
- if (mode_ == ASCII) {
- if (characters == 4) {
- __ ldr(current_character(), MemOperand(end_of_input_address(), offset));
- } else if (characters == 2) {
- __ ldrh(current_character(), MemOperand(end_of_input_address(), offset));
- } else {
- ASSERT(characters == 1);
- __ ldrb(current_character(), MemOperand(end_of_input_address(), offset));
- }
- } else {
- ASSERT(mode_ == UC16);
- if (characters == 2) {
- __ ldr(current_character(), MemOperand(end_of_input_address(), offset));
- } else {
- ASSERT(characters == 1);
- __ ldrh(current_character(), MemOperand(end_of_input_address(), offset));
- }
- }
-}
-
-
-void RegExpCEntryStub::Generate(MacroAssembler* masm_) {
- int stack_alignment = OS::ActivationFrameAlignment();
- if (stack_alignment < kPointerSize) stack_alignment = kPointerSize;
- // Stack is already aligned for call, so decrement by alignment
- // to make room for storing the link register.
- __ str(lr, MemOperand(sp, stack_alignment, NegPreIndex));
- __ mov(r0, sp);
- __ Call(r5);
- __ ldr(pc, MemOperand(sp, stack_alignment, PostIndex));
-}
-
-#undef __
-
-#endif // V8_INTERPRETED_REGEXP
-
-}} // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_ARM
diff --git a/src/3rdparty/v8/src/arm/regexp-macro-assembler-arm.h b/src/3rdparty/v8/src/arm/regexp-macro-assembler-arm.h
deleted file mode 100644
index b57d0eb..0000000
--- a/src/3rdparty/v8/src/arm/regexp-macro-assembler-arm.h
+++ /dev/null
@@ -1,253 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_ARM_REGEXP_MACRO_ASSEMBLER_ARM_H_
-#define V8_ARM_REGEXP_MACRO_ASSEMBLER_ARM_H_
-
-namespace v8 {
-namespace internal {
-
-
-#ifdef V8_INTERPRETED_REGEXP
-class RegExpMacroAssemblerARM: public RegExpMacroAssembler {
- public:
- RegExpMacroAssemblerARM();
- virtual ~RegExpMacroAssemblerARM();
-};
-
-#else // V8_INTERPRETED_REGEXP
-class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
- public:
- RegExpMacroAssemblerARM(Mode mode, int registers_to_save);
- virtual ~RegExpMacroAssemblerARM();
- virtual int stack_limit_slack();
- virtual void AdvanceCurrentPosition(int by);
- virtual void AdvanceRegister(int reg, int by);
- virtual void Backtrack();
- virtual void Bind(Label* label);
- virtual void CheckAtStart(Label* on_at_start);
- virtual void CheckCharacter(unsigned c, Label* on_equal);
- virtual void CheckCharacterAfterAnd(unsigned c,
- unsigned mask,
- Label* on_equal);
- virtual void CheckCharacterGT(uc16 limit, Label* on_greater);
- virtual void CheckCharacterLT(uc16 limit, Label* on_less);
- virtual void CheckCharacters(Vector<const uc16> str,
- int cp_offset,
- Label* on_failure,
- bool check_end_of_string);
- // A "greedy loop" is a loop that is both greedy and with a simple
- // body. It has a particularly simple implementation.
- virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
- virtual void CheckNotAtStart(Label* on_not_at_start);
- virtual void CheckNotBackReference(int start_reg, Label* on_no_match);
- virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
- Label* on_no_match);
- virtual void CheckNotRegistersEqual(int reg1, int reg2, Label* on_not_equal);
- virtual void CheckNotCharacter(unsigned c, Label* on_not_equal);
- virtual void CheckNotCharacterAfterAnd(unsigned c,
- unsigned mask,
- Label* on_not_equal);
- virtual void CheckNotCharacterAfterMinusAnd(uc16 c,
- uc16 minus,
- uc16 mask,
- Label* on_not_equal);
- // Checks whether the given offset from the current position is before
- // the end of the string.
- virtual void CheckPosition(int cp_offset, Label* on_outside_input);
- virtual bool CheckSpecialCharacterClass(uc16 type,
- Label* on_no_match);
- virtual void Fail();
- virtual Handle<Object> GetCode(Handle<String> source);
- virtual void GoTo(Label* label);
- virtual void IfRegisterGE(int reg, int comparand, Label* if_ge);
- virtual void IfRegisterLT(int reg, int comparand, Label* if_lt);
- virtual void IfRegisterEqPos(int reg, Label* if_eq);
- virtual IrregexpImplementation Implementation();
- virtual void LoadCurrentCharacter(int cp_offset,
- Label* on_end_of_input,
- bool check_bounds = true,
- int characters = 1);
- virtual void PopCurrentPosition();
- virtual void PopRegister(int register_index);
- virtual void PushBacktrack(Label* label);
- virtual void PushCurrentPosition();
- virtual void PushRegister(int register_index,
- StackCheckFlag check_stack_limit);
- virtual void ReadCurrentPositionFromRegister(int reg);
- virtual void ReadStackPointerFromRegister(int reg);
- virtual void SetCurrentPositionFromEnd(int by);
- virtual void SetRegister(int register_index, int to);
- virtual void Succeed();
- virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
- virtual void ClearRegisters(int reg_from, int reg_to);
- virtual void WriteStackPointerToRegister(int reg);
-
- // Called from RegExp if the stack-guard is triggered.
- // If the code object is relocated, the return address is fixed before
- // returning.
- static int CheckStackGuardState(Address* return_address,
- Code* re_code,
- Address re_frame);
- private:
- // Offsets from frame_pointer() of function parameters and stored registers.
- static const int kFramePointer = 0;
-
- // Above the frame pointer - Stored registers and stack passed parameters.
- // Register 4..11.
- static const int kStoredRegisters = kFramePointer;
- // Return address (stored from link register, read into pc on return).
- static const int kReturnAddress = kStoredRegisters + 8 * kPointerSize;
- static const int kSecondaryReturnAddress = kReturnAddress + kPointerSize;
- // Stack parameters placed by caller.
- static const int kRegisterOutput = kSecondaryReturnAddress + kPointerSize;
- static const int kStackHighEnd = kRegisterOutput + kPointerSize;
- static const int kDirectCall = kStackHighEnd + kPointerSize;
- static const int kIsolate = kDirectCall + kPointerSize;
-
- // Below the frame pointer.
- // Register parameters stored by setup code.
- static const int kInputEnd = kFramePointer - kPointerSize;
- static const int kInputStart = kInputEnd - kPointerSize;
- static const int kStartIndex = kInputStart - kPointerSize;
- static const int kInputString = kStartIndex - kPointerSize;
- // When adding local variables remember to push space for them in
- // the frame in GetCode.
- static const int kInputStartMinusOne = kInputString - kPointerSize;
- static const int kAtStart = kInputStartMinusOne - kPointerSize;
- // First register address. Following registers are below it on the stack.
- static const int kRegisterZero = kAtStart - kPointerSize;
-
- // Initial size of code buffer.
- static const size_t kRegExpCodeSize = 1024;
-
- static const int kBacktrackConstantPoolSize = 4;
-
- // Load a number of characters at the given offset from the
- // current position, into the current-character register.
- void LoadCurrentCharacterUnchecked(int cp_offset, int character_count);
-
- // Check whether preemption has been requested.
- void CheckPreemption();
-
- // Check whether we are exceeding the stack limit on the backtrack stack.
- void CheckStackLimit();
-
- void EmitBacktrackConstantPool();
- int GetBacktrackConstantPoolEntry();
-
-
- // Generate a call to CheckStackGuardState.
- void CallCheckStackGuardState(Register scratch);
-
- // The ebp-relative location of a regexp register.
- MemOperand register_location(int register_index);
-
- // Register holding the current input position as negative offset from
- // the end of the string.
- inline Register current_input_offset() { return r6; }
-
- // The register containing the current character after LoadCurrentCharacter.
- inline Register current_character() { return r7; }
-
- // Register holding address of the end of the input string.
- inline Register end_of_input_address() { return r10; }
-
- // Register holding the frame address. Local variables, parameters and
- // regexp registers are addressed relative to this.
- inline Register frame_pointer() { return fp; }
-
- // The register containing the backtrack stack top. Provides a meaningful
- // name to the register.
- inline Register backtrack_stackpointer() { return r8; }
-
- // Register holding pointer to the current code object.
- inline Register code_pointer() { return r5; }
-
- // Byte size of chars in the string to match (decided by the Mode argument)
- inline int char_size() { return static_cast<int>(mode_); }
-
- // Equivalent to a conditional branch to the label, unless the label
- // is NULL, in which case it is a conditional Backtrack.
- void BranchOrBacktrack(Condition condition, Label* to);
-
- // Call and return internally in the generated code in a way that
- // is GC-safe (i.e., doesn't leave absolute code addresses on the stack)
- inline void SafeCall(Label* to, Condition cond = al);
- inline void SafeReturn();
- inline void SafeCallTarget(Label* name);
-
- // Pushes the value of a register on the backtrack stack. Decrements the
- // stack pointer by a word size and stores the register's value there.
- inline void Push(Register source);
-
- // Pops a value from the backtrack stack. Reads the word at the stack pointer
- // and increments it by a word size.
- inline void Pop(Register target);
-
- // Calls a C function and cleans up the frame alignment done by
- // by FrameAlign. The called function *is* allowed to trigger a garbage
- // collection, but may not take more than four arguments (no arguments
- // passed on the stack), and the first argument will be a pointer to the
- // return address.
- inline void CallCFunctionUsingStub(ExternalReference function,
- int num_arguments);
-
-
- MacroAssembler* masm_;
-
- // Which mode to generate code for (ASCII or UC16).
- Mode mode_;
-
- // One greater than maximal register index actually used.
- int num_registers_;
-
- // Number of registers to output at the end (the saved registers
- // are always 0..num_saved_registers_-1)
- int num_saved_registers_;
-
- // Manage a small pre-allocated pool for writing label targets
- // to for pushing backtrack addresses.
- int backtrack_constant_pool_offset_;
- int backtrack_constant_pool_capacity_;
-
- // Labels used internally.
- Label entry_label_;
- Label start_label_;
- Label success_label_;
- Label backtrack_label_;
- Label exit_label_;
- Label check_preempt_label_;
- Label stack_overflow_label_;
-};
-
-#endif // V8_INTERPRETED_REGEXP
-
-
-}} // namespace v8::internal
-
-#endif // V8_ARM_REGEXP_MACRO_ASSEMBLER_ARM_H_
diff --git a/src/3rdparty/v8/src/arm/register-allocator-arm-inl.h b/src/3rdparty/v8/src/arm/register-allocator-arm-inl.h
deleted file mode 100644
index 945cdeb..0000000
--- a/src/3rdparty/v8/src/arm/register-allocator-arm-inl.h
+++ /dev/null
@@ -1,100 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_ARM_REGISTER_ALLOCATOR_ARM_INL_H_
-#define V8_ARM_REGISTER_ALLOCATOR_ARM_INL_H_
-
-#include "v8.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// RegisterAllocator implementation.
-
-bool RegisterAllocator::IsReserved(Register reg) {
- return reg.is(cp) || reg.is(fp) || reg.is(sp) || reg.is(pc);
-}
-
-
-
-// The register allocator uses small integers to represent the
-// non-reserved assembler registers. The mapping is:
-//
-// r0 <-> 0
-// r1 <-> 1
-// r2 <-> 2
-// r3 <-> 3
-// r4 <-> 4
-// r5 <-> 5
-// r6 <-> 6
-// r7 <-> 7
-// r9 <-> 8
-// r10 <-> 9
-// ip <-> 10
-// lr <-> 11
-
-int RegisterAllocator::ToNumber(Register reg) {
- ASSERT(reg.is_valid() && !IsReserved(reg));
- const int kNumbers[] = {
- 0, // r0
- 1, // r1
- 2, // r2
- 3, // r3
- 4, // r4
- 5, // r5
- 6, // r6
- 7, // r7
- -1, // cp
- 8, // r9
- 9, // r10
- -1, // fp
- 10, // ip
- -1, // sp
- 11, // lr
- -1 // pc
- };
- return kNumbers[reg.code()];
-}
-
-
-Register RegisterAllocator::ToRegister(int num) {
- ASSERT(num >= 0 && num < kNumRegisters);
- const Register kRegisters[] =
- { r0, r1, r2, r3, r4, r5, r6, r7, r9, r10, ip, lr };
- return kRegisters[num];
-}
-
-
-void RegisterAllocator::Initialize() {
- Reset();
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_ARM_REGISTER_ALLOCATOR_ARM_INL_H_
diff --git a/src/3rdparty/v8/src/arm/register-allocator-arm.cc b/src/3rdparty/v8/src/arm/register-allocator-arm.cc
deleted file mode 100644
index 3b35574..0000000
--- a/src/3rdparty/v8/src/arm/register-allocator-arm.cc
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_ARM)
-
-#include "codegen-inl.h"
-#include "register-allocator-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// Result implementation.
-
-void Result::ToRegister() {
- UNIMPLEMENTED();
-}
-
-
-void Result::ToRegister(Register target) {
- UNIMPLEMENTED();
-}
-
-
-// -------------------------------------------------------------------------
-// RegisterAllocator implementation.
-
-Result RegisterAllocator::AllocateByteRegisterWithoutSpilling() {
- // No byte registers on ARM.
- UNREACHABLE();
- return Result();
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_ARM
diff --git a/src/3rdparty/v8/src/arm/register-allocator-arm.h b/src/3rdparty/v8/src/arm/register-allocator-arm.h
deleted file mode 100644
index fdbc88f..0000000
--- a/src/3rdparty/v8/src/arm/register-allocator-arm.h
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_ARM_REGISTER_ALLOCATOR_ARM_H_
-#define V8_ARM_REGISTER_ALLOCATOR_ARM_H_
-
-namespace v8 {
-namespace internal {
-
-class RegisterAllocatorConstants : public AllStatic {
- public:
- // No registers are currently managed by the register allocator on ARM.
- static const int kNumRegisters = 0;
- static const int kInvalidRegister = -1;
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_ARM_REGISTER_ALLOCATOR_ARM_H_
diff --git a/src/3rdparty/v8/src/arm/simulator-arm.cc b/src/3rdparty/v8/src/arm/simulator-arm.cc
deleted file mode 100644
index 46797d9..0000000
--- a/src/3rdparty/v8/src/arm/simulator-arm.cc
+++ /dev/null
@@ -1,3215 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include <stdlib.h>
-#include <math.h>
-#include <cstdarg>
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_ARM)
-
-#include "disasm.h"
-#include "assembler.h"
-#include "arm/constants-arm.h"
-#include "arm/simulator-arm.h"
-
-#if defined(USE_SIMULATOR)
-
-// Only build the simulator if not compiling for real ARM hardware.
-namespace v8 {
-namespace internal {
-
-// This macro provides a platform independent use of sscanf. The reason for
-// SScanF not being implemented in a platform independent way through
-// ::v8::internal::OS in the same way as SNPrintF is that the
-// Windows C Run-Time Library does not provide vsscanf.
-#define SScanF sscanf // NOLINT
-
-// The ArmDebugger class is used by the simulator while debugging simulated ARM
-// code.
-class ArmDebugger {
- public:
- explicit ArmDebugger(Simulator* sim);
- ~ArmDebugger();
-
- void Stop(Instruction* instr);
- void Debug();
-
- private:
- static const Instr kBreakpointInstr =
- (al | (7*B25) | (1*B24) | kBreakpoint);
- static const Instr kNopInstr = (al | (13*B21));
-
- Simulator* sim_;
-
- int32_t GetRegisterValue(int regnum);
- double GetVFPDoubleRegisterValue(int regnum);
- bool GetValue(const char* desc, int32_t* value);
- bool GetVFPSingleValue(const char* desc, float* value);
- bool GetVFPDoubleValue(const char* desc, double* value);
-
- // Set or delete a breakpoint. Returns true if successful.
- bool SetBreakpoint(Instruction* breakpc);
- bool DeleteBreakpoint(Instruction* breakpc);
-
- // Undo and redo all breakpoints. This is needed to bracket disassembly and
- // execution to skip past breakpoints when run from the debugger.
- void UndoBreakpoints();
- void RedoBreakpoints();
-};
-
-
-ArmDebugger::ArmDebugger(Simulator* sim) {
- sim_ = sim;
-}
-
-
-ArmDebugger::~ArmDebugger() {
-}
-
-
-
-#ifdef GENERATED_CODE_COVERAGE
-static FILE* coverage_log = NULL;
-
-
-static void InitializeCoverage() {
- char* file_name = getenv("V8_GENERATED_CODE_COVERAGE_LOG");
- if (file_name != NULL) {
- coverage_log = fopen(file_name, "aw+");
- }
-}
-
-
-void ArmDebugger::Stop(Instruction* instr) {
- // Get the stop code.
- uint32_t code = instr->SvcValue() & kStopCodeMask;
- // Retrieve the encoded address, which comes just after this stop.
- char** msg_address =
- reinterpret_cast<char**>(sim_->get_pc() + Instruction::kInstrSize);
- char* msg = *msg_address;
- ASSERT(msg != NULL);
-
- // Update this stop description.
- if (isWatchedStop(code) && !watched_stops[code].desc) {
- watched_stops[code].desc = msg;
- }
-
- if (strlen(msg) > 0) {
- if (coverage_log != NULL) {
- fprintf(coverage_log, "%s\n", msg);
- fflush(coverage_log);
- }
- // Overwrite the instruction and address with nops.
- instr->SetInstructionBits(kNopInstr);
- reinterpret_cast<Instruction*>(msg_address)->SetInstructionBits(kNopInstr);
- }
- sim_->set_pc(sim_->get_pc() + 2 * Instruction::kInstrSize);
-}
-
-#else // ndef GENERATED_CODE_COVERAGE
-
-static void InitializeCoverage() {
-}
-
-
-void ArmDebugger::Stop(Instruction* instr) {
- // Get the stop code.
- uint32_t code = instr->SvcValue() & kStopCodeMask;
- // Retrieve the encoded address, which comes just after this stop.
- char* msg = *reinterpret_cast<char**>(sim_->get_pc()
- + Instruction::kInstrSize);
- // Update this stop description.
- if (sim_->isWatchedStop(code) && !sim_->watched_stops[code].desc) {
- sim_->watched_stops[code].desc = msg;
- }
- // Print the stop message and code if it is not the default code.
- if (code != kMaxStopCode) {
- PrintF("Simulator hit stop %u: %s\n", code, msg);
- } else {
- PrintF("Simulator hit %s\n", msg);
- }
- sim_->set_pc(sim_->get_pc() + 2 * Instruction::kInstrSize);
- Debug();
-}
-#endif
-
-
-int32_t ArmDebugger::GetRegisterValue(int regnum) {
- if (regnum == kPCRegister) {
- return sim_->get_pc();
- } else {
- return sim_->get_register(regnum);
- }
-}
-
-
-double ArmDebugger::GetVFPDoubleRegisterValue(int regnum) {
- return sim_->get_double_from_d_register(regnum);
-}
-
-
-bool ArmDebugger::GetValue(const char* desc, int32_t* value) {
- int regnum = Registers::Number(desc);
- if (regnum != kNoRegister) {
- *value = GetRegisterValue(regnum);
- return true;
- } else {
- if (strncmp(desc, "0x", 2) == 0) {
- return SScanF(desc + 2, "%x", reinterpret_cast<uint32_t*>(value)) == 1;
- } else {
- return SScanF(desc, "%u", reinterpret_cast<uint32_t*>(value)) == 1;
- }
- }
- return false;
-}
-
-
-bool ArmDebugger::GetVFPSingleValue(const char* desc, float* value) {
- bool is_double;
- int regnum = VFPRegisters::Number(desc, &is_double);
- if (regnum != kNoRegister && !is_double) {
- *value = sim_->get_float_from_s_register(regnum);
- return true;
- }
- return false;
-}
-
-
-bool ArmDebugger::GetVFPDoubleValue(const char* desc, double* value) {
- bool is_double;
- int regnum = VFPRegisters::Number(desc, &is_double);
- if (regnum != kNoRegister && is_double) {
- *value = sim_->get_double_from_d_register(regnum);
- return true;
- }
- return false;
-}
-
-
-bool ArmDebugger::SetBreakpoint(Instruction* breakpc) {
- // Check if a breakpoint can be set. If not return without any side-effects.
- if (sim_->break_pc_ != NULL) {
- return false;
- }
-
- // Set the breakpoint.
- sim_->break_pc_ = breakpc;
- sim_->break_instr_ = breakpc->InstructionBits();
- // Not setting the breakpoint instruction in the code itself. It will be set
- // when the debugger shell continues.
- return true;
-}
-
-
-bool ArmDebugger::DeleteBreakpoint(Instruction* breakpc) {
- if (sim_->break_pc_ != NULL) {
- sim_->break_pc_->SetInstructionBits(sim_->break_instr_);
- }
-
- sim_->break_pc_ = NULL;
- sim_->break_instr_ = 0;
- return true;
-}
-
-
-void ArmDebugger::UndoBreakpoints() {
- if (sim_->break_pc_ != NULL) {
- sim_->break_pc_->SetInstructionBits(sim_->break_instr_);
- }
-}
-
-
-void ArmDebugger::RedoBreakpoints() {
- if (sim_->break_pc_ != NULL) {
- sim_->break_pc_->SetInstructionBits(kBreakpointInstr);
- }
-}
-
-
-void ArmDebugger::Debug() {
- intptr_t last_pc = -1;
- bool done = false;
-
-#define COMMAND_SIZE 63
-#define ARG_SIZE 255
-
-#define STR(a) #a
-#define XSTR(a) STR(a)
-
- char cmd[COMMAND_SIZE + 1];
- char arg1[ARG_SIZE + 1];
- char arg2[ARG_SIZE + 1];
- char* argv[3] = { cmd, arg1, arg2 };
-
- // make sure to have a proper terminating character if reaching the limit
- cmd[COMMAND_SIZE] = 0;
- arg1[ARG_SIZE] = 0;
- arg2[ARG_SIZE] = 0;
-
- // Undo all set breakpoints while running in the debugger shell. This will
- // make them invisible to all commands.
- UndoBreakpoints();
-
- while (!done) {
- if (last_pc != sim_->get_pc()) {
- disasm::NameConverter converter;
- disasm::Disassembler dasm(converter);
- // use a reasonably large buffer
- v8::internal::EmbeddedVector<char, 256> buffer;
- dasm.InstructionDecode(buffer,
- reinterpret_cast<byte*>(sim_->get_pc()));
- PrintF(" 0x%08x %s\n", sim_->get_pc(), buffer.start());
- last_pc = sim_->get_pc();
- }
- char* line = ReadLine("sim> ");
- if (line == NULL) {
- break;
- } else {
- // Use sscanf to parse the individual parts of the command line. At the
- // moment no command expects more than two parameters.
- int argc = SScanF(line,
- "%" XSTR(COMMAND_SIZE) "s "
- "%" XSTR(ARG_SIZE) "s "
- "%" XSTR(ARG_SIZE) "s",
- cmd, arg1, arg2);
- if ((strcmp(cmd, "si") == 0) || (strcmp(cmd, "stepi") == 0)) {
- sim_->InstructionDecode(reinterpret_cast<Instruction*>(sim_->get_pc()));
- } else if ((strcmp(cmd, "c") == 0) || (strcmp(cmd, "cont") == 0)) {
- // Execute the one instruction we broke at with breakpoints disabled.
- sim_->InstructionDecode(reinterpret_cast<Instruction*>(sim_->get_pc()));
- // Leave the debugger shell.
- done = true;
- } else if ((strcmp(cmd, "p") == 0) || (strcmp(cmd, "print") == 0)) {
- if (argc == 2) {
- int32_t value;
- float svalue;
- double dvalue;
- if (strcmp(arg1, "all") == 0) {
- for (int i = 0; i < kNumRegisters; i++) {
- value = GetRegisterValue(i);
- PrintF("%3s: 0x%08x %10d\n", Registers::Name(i), value, value);
- }
- for (int i = 0; i < kNumVFPDoubleRegisters; i++) {
- dvalue = GetVFPDoubleRegisterValue(i);
- uint64_t as_words = BitCast<uint64_t>(dvalue);
- PrintF("%3s: %f 0x%08x %08x\n",
- VFPRegisters::Name(i, true),
- dvalue,
- static_cast<uint32_t>(as_words >> 32),
- static_cast<uint32_t>(as_words & 0xffffffff));
- }
- } else {
- if (GetValue(arg1, &value)) {
- PrintF("%s: 0x%08x %d \n", arg1, value, value);
- } else if (GetVFPSingleValue(arg1, &svalue)) {
- uint32_t as_word = BitCast<uint32_t>(svalue);
- PrintF("%s: %f 0x%08x\n", arg1, svalue, as_word);
- } else if (GetVFPDoubleValue(arg1, &dvalue)) {
- uint64_t as_words = BitCast<uint64_t>(dvalue);
- PrintF("%s: %f 0x%08x %08x\n",
- arg1,
- dvalue,
- static_cast<uint32_t>(as_words >> 32),
- static_cast<uint32_t>(as_words & 0xffffffff));
- } else {
- PrintF("%s unrecognized\n", arg1);
- }
- }
- } else {
- PrintF("print <register>\n");
- }
- } else if ((strcmp(cmd, "po") == 0)
- || (strcmp(cmd, "printobject") == 0)) {
- if (argc == 2) {
- int32_t value;
- if (GetValue(arg1, &value)) {
- Object* obj = reinterpret_cast<Object*>(value);
- PrintF("%s: \n", arg1);
-#ifdef DEBUG
- obj->PrintLn();
-#else
- obj->ShortPrint();
- PrintF("\n");
-#endif
- } else {
- PrintF("%s unrecognized\n", arg1);
- }
- } else {
- PrintF("printobject <value>\n");
- }
- } else if (strcmp(cmd, "stack") == 0 || strcmp(cmd, "mem") == 0) {
- int32_t* cur = NULL;
- int32_t* end = NULL;
- int next_arg = 1;
-
- if (strcmp(cmd, "stack") == 0) {
- cur = reinterpret_cast<int32_t*>(sim_->get_register(Simulator::sp));
- } else { // "mem"
- int32_t value;
- if (!GetValue(arg1, &value)) {
- PrintF("%s unrecognized\n", arg1);
- continue;
- }
- cur = reinterpret_cast<int32_t*>(value);
- next_arg++;
- }
-
- int32_t words;
- if (argc == next_arg) {
- words = 10;
- } else if (argc == next_arg + 1) {
- if (!GetValue(argv[next_arg], &words)) {
- words = 10;
- }
- }
- end = cur + words;
-
- while (cur < end) {
- PrintF(" 0x%08x: 0x%08x %10d",
- reinterpret_cast<intptr_t>(cur), *cur, *cur);
- HeapObject* obj = reinterpret_cast<HeapObject*>(*cur);
- int value = *cur;
- Heap* current_heap = v8::internal::Isolate::Current()->heap();
- if (current_heap->Contains(obj) || ((value & 1) == 0)) {
- PrintF(" (");
- if ((value & 1) == 0) {
- PrintF("smi %d", value / 2);
- } else {
- obj->ShortPrint();
- }
- PrintF(")");
- }
- PrintF("\n");
- cur++;
- }
- } else if (strcmp(cmd, "disasm") == 0 || strcmp(cmd, "di") == 0) {
- disasm::NameConverter converter;
- disasm::Disassembler dasm(converter);
- // use a reasonably large buffer
- v8::internal::EmbeddedVector<char, 256> buffer;
-
- byte* prev = NULL;
- byte* cur = NULL;
- byte* end = NULL;
-
- if (argc == 1) {
- cur = reinterpret_cast<byte*>(sim_->get_pc());
- end = cur + (10 * Instruction::kInstrSize);
- } else if (argc == 2) {
- int regnum = Registers::Number(arg1);
- if (regnum != kNoRegister || strncmp(arg1, "0x", 2) == 0) {
- // The argument is an address or a register name.
- int32_t value;
- if (GetValue(arg1, &value)) {
- cur = reinterpret_cast<byte*>(value);
- // Disassemble 10 instructions at <arg1>.
- end = cur + (10 * Instruction::kInstrSize);
- }
- } else {
- // The argument is the number of instructions.
- int32_t value;
- if (GetValue(arg1, &value)) {
- cur = reinterpret_cast<byte*>(sim_->get_pc());
- // Disassemble <arg1> instructions.
- end = cur + (value * Instruction::kInstrSize);
- }
- }
- } else {
- int32_t value1;
- int32_t value2;
- if (GetValue(arg1, &value1) && GetValue(arg2, &value2)) {
- cur = reinterpret_cast<byte*>(value1);
- end = cur + (value2 * Instruction::kInstrSize);
- }
- }
-
- while (cur < end) {
- prev = cur;
- cur += dasm.InstructionDecode(buffer, cur);
- PrintF(" 0x%08x %s\n",
- reinterpret_cast<intptr_t>(prev), buffer.start());
- }
- } else if (strcmp(cmd, "gdb") == 0) {
- PrintF("relinquishing control to gdb\n");
- v8::internal::OS::DebugBreak();
- PrintF("regaining control from gdb\n");
- } else if (strcmp(cmd, "break") == 0) {
- if (argc == 2) {
- int32_t value;
- if (GetValue(arg1, &value)) {
- if (!SetBreakpoint(reinterpret_cast<Instruction*>(value))) {
- PrintF("setting breakpoint failed\n");
- }
- } else {
- PrintF("%s unrecognized\n", arg1);
- }
- } else {
- PrintF("break <address>\n");
- }
- } else if (strcmp(cmd, "del") == 0) {
- if (!DeleteBreakpoint(NULL)) {
- PrintF("deleting breakpoint failed\n");
- }
- } else if (strcmp(cmd, "flags") == 0) {
- PrintF("N flag: %d; ", sim_->n_flag_);
- PrintF("Z flag: %d; ", sim_->z_flag_);
- PrintF("C flag: %d; ", sim_->c_flag_);
- PrintF("V flag: %d\n", sim_->v_flag_);
- PrintF("INVALID OP flag: %d; ", sim_->inv_op_vfp_flag_);
- PrintF("DIV BY ZERO flag: %d; ", sim_->div_zero_vfp_flag_);
- PrintF("OVERFLOW flag: %d; ", sim_->overflow_vfp_flag_);
- PrintF("UNDERFLOW flag: %d; ", sim_->underflow_vfp_flag_);
- PrintF("INEXACT flag: %d;\n", sim_->inexact_vfp_flag_);
- } else if (strcmp(cmd, "stop") == 0) {
- int32_t value;
- intptr_t stop_pc = sim_->get_pc() - 2 * Instruction::kInstrSize;
- Instruction* stop_instr = reinterpret_cast<Instruction*>(stop_pc);
- Instruction* msg_address =
- reinterpret_cast<Instruction*>(stop_pc + Instruction::kInstrSize);
- if ((argc == 2) && (strcmp(arg1, "unstop") == 0)) {
- // Remove the current stop.
- if (sim_->isStopInstruction(stop_instr)) {
- stop_instr->SetInstructionBits(kNopInstr);
- msg_address->SetInstructionBits(kNopInstr);
- } else {
- PrintF("Not at debugger stop.\n");
- }
- } else if (argc == 3) {
- // Print information about all/the specified breakpoint(s).
- if (strcmp(arg1, "info") == 0) {
- if (strcmp(arg2, "all") == 0) {
- PrintF("Stop information:\n");
- for (uint32_t i = 0; i < sim_->kNumOfWatchedStops; i++) {
- sim_->PrintStopInfo(i);
- }
- } else if (GetValue(arg2, &value)) {
- sim_->PrintStopInfo(value);
- } else {
- PrintF("Unrecognized argument.\n");
- }
- } else if (strcmp(arg1, "enable") == 0) {
- // Enable all/the specified breakpoint(s).
- if (strcmp(arg2, "all") == 0) {
- for (uint32_t i = 0; i < sim_->kNumOfWatchedStops; i++) {
- sim_->EnableStop(i);
- }
- } else if (GetValue(arg2, &value)) {
- sim_->EnableStop(value);
- } else {
- PrintF("Unrecognized argument.\n");
- }
- } else if (strcmp(arg1, "disable") == 0) {
- // Disable all/the specified breakpoint(s).
- if (strcmp(arg2, "all") == 0) {
- for (uint32_t i = 0; i < sim_->kNumOfWatchedStops; i++) {
- sim_->DisableStop(i);
- }
- } else if (GetValue(arg2, &value)) {
- sim_->DisableStop(value);
- } else {
- PrintF("Unrecognized argument.\n");
- }
- }
- } else {
- PrintF("Wrong usage. Use help command for more information.\n");
- }
- } else if ((strcmp(cmd, "t") == 0) || strcmp(cmd, "trace") == 0) {
- ::v8::internal::FLAG_trace_sim = !::v8::internal::FLAG_trace_sim;
- PrintF("Trace of executed instructions is %s\n",
- ::v8::internal::FLAG_trace_sim ? "on" : "off");
- } else if ((strcmp(cmd, "h") == 0) || (strcmp(cmd, "help") == 0)) {
- PrintF("cont\n");
- PrintF(" continue execution (alias 'c')\n");
- PrintF("stepi\n");
- PrintF(" step one instruction (alias 'si')\n");
- PrintF("print <register>\n");
- PrintF(" print register content (alias 'p')\n");
- PrintF(" use register name 'all' to print all registers\n");
- PrintF("printobject <register>\n");
- PrintF(" print an object from a register (alias 'po')\n");
- PrintF("flags\n");
- PrintF(" print flags\n");
- PrintF("stack [<words>]\n");
- PrintF(" dump stack content, default dump 10 words)\n");
- PrintF("mem <address> [<words>]\n");
- PrintF(" dump memory content, default dump 10 words)\n");
- PrintF("disasm [<instructions>]\n");
- PrintF("disasm [<address/register>]\n");
- PrintF("disasm [[<address/register>] <instructions>]\n");
- PrintF(" disassemble code, default is 10 instructions\n");
- PrintF(" from pc (alias 'di')\n");
- PrintF("gdb\n");
- PrintF(" enter gdb\n");
- PrintF("break <address>\n");
- PrintF(" set a break point on the address\n");
- PrintF("del\n");
- PrintF(" delete the breakpoint\n");
- PrintF("trace (alias 't')\n");
- PrintF(" toogle the tracing of all executed statements\n");
- PrintF("stop feature:\n");
- PrintF(" Description:\n");
- PrintF(" Stops are debug instructions inserted by\n");
- PrintF(" the Assembler::stop() function.\n");
- PrintF(" When hitting a stop, the Simulator will\n");
- PrintF(" stop and and give control to the ArmDebugger.\n");
- PrintF(" The first %d stop codes are watched:\n",
- Simulator::kNumOfWatchedStops);
- PrintF(" - They can be enabled / disabled: the Simulator\n");
- PrintF(" will / won't stop when hitting them.\n");
- PrintF(" - The Simulator keeps track of how many times they \n");
- PrintF(" are met. (See the info command.) Going over a\n");
- PrintF(" disabled stop still increases its counter. \n");
- PrintF(" Commands:\n");
- PrintF(" stop info all/<code> : print infos about number <code>\n");
- PrintF(" or all stop(s).\n");
- PrintF(" stop enable/disable all/<code> : enables / disables\n");
- PrintF(" all or number <code> stop(s)\n");
- PrintF(" stop unstop\n");
- PrintF(" ignore the stop instruction at the current location\n");
- PrintF(" from now on\n");
- } else {
- PrintF("Unknown command: %s\n", cmd);
- }
- }
- DeleteArray(line);
- }
-
- // Add all the breakpoints back to stop execution and enter the debugger
- // shell when hit.
- RedoBreakpoints();
-
-#undef COMMAND_SIZE
-#undef ARG_SIZE
-
-#undef STR
-#undef XSTR
-}
-
-
-static bool ICacheMatch(void* one, void* two) {
- ASSERT((reinterpret_cast<intptr_t>(one) & CachePage::kPageMask) == 0);
- ASSERT((reinterpret_cast<intptr_t>(two) & CachePage::kPageMask) == 0);
- return one == two;
-}
-
-
-static uint32_t ICacheHash(void* key) {
- return static_cast<uint32_t>(reinterpret_cast<uintptr_t>(key)) >> 2;
-}
-
-
-static bool AllOnOnePage(uintptr_t start, int size) {
- intptr_t start_page = (start & ~CachePage::kPageMask);
- intptr_t end_page = ((start + size) & ~CachePage::kPageMask);
- return start_page == end_page;
-}
-
-
-void Simulator::FlushICache(v8::internal::HashMap* i_cache,
- void* start_addr,
- size_t size) {
- intptr_t start = reinterpret_cast<intptr_t>(start_addr);
- int intra_line = (start & CachePage::kLineMask);
- start -= intra_line;
- size += intra_line;
- size = ((size - 1) | CachePage::kLineMask) + 1;
- int offset = (start & CachePage::kPageMask);
- while (!AllOnOnePage(start, size - 1)) {
- int bytes_to_flush = CachePage::kPageSize - offset;
- FlushOnePage(i_cache, start, bytes_to_flush);
- start += bytes_to_flush;
- size -= bytes_to_flush;
- ASSERT_EQ(0, start & CachePage::kPageMask);
- offset = 0;
- }
- if (size != 0) {
- FlushOnePage(i_cache, start, size);
- }
-}
-
-
-CachePage* Simulator::GetCachePage(v8::internal::HashMap* i_cache, void* page) {
- v8::internal::HashMap::Entry* entry = i_cache->Lookup(page,
- ICacheHash(page),
- true);
- if (entry->value == NULL) {
- CachePage* new_page = new CachePage();
- entry->value = new_page;
- }
- return reinterpret_cast<CachePage*>(entry->value);
-}
-
-
-// Flush from start up to and not including start + size.
-void Simulator::FlushOnePage(v8::internal::HashMap* i_cache,
- intptr_t start,
- int size) {
- ASSERT(size <= CachePage::kPageSize);
- ASSERT(AllOnOnePage(start, size - 1));
- ASSERT((start & CachePage::kLineMask) == 0);
- ASSERT((size & CachePage::kLineMask) == 0);
- void* page = reinterpret_cast<void*>(start & (~CachePage::kPageMask));
- int offset = (start & CachePage::kPageMask);
- CachePage* cache_page = GetCachePage(i_cache, page);
- char* valid_bytemap = cache_page->ValidityByte(offset);
- memset(valid_bytemap, CachePage::LINE_INVALID, size >> CachePage::kLineShift);
-}
-
-
-void Simulator::CheckICache(v8::internal::HashMap* i_cache,
- Instruction* instr) {
- intptr_t address = reinterpret_cast<intptr_t>(instr);
- void* page = reinterpret_cast<void*>(address & (~CachePage::kPageMask));
- void* line = reinterpret_cast<void*>(address & (~CachePage::kLineMask));
- int offset = (address & CachePage::kPageMask);
- CachePage* cache_page = GetCachePage(i_cache, page);
- char* cache_valid_byte = cache_page->ValidityByte(offset);
- bool cache_hit = (*cache_valid_byte == CachePage::LINE_VALID);
- char* cached_line = cache_page->CachedData(offset & ~CachePage::kLineMask);
- if (cache_hit) {
- // Check that the data in memory matches the contents of the I-cache.
- CHECK(memcmp(reinterpret_cast<void*>(instr),
- cache_page->CachedData(offset),
- Instruction::kInstrSize) == 0);
- } else {
- // Cache miss. Load memory into the cache.
- memcpy(cached_line, line, CachePage::kLineLength);
- *cache_valid_byte = CachePage::LINE_VALID;
- }
-}
-
-
-void Simulator::Initialize() {
- if (Isolate::Current()->simulator_initialized()) return;
- Isolate::Current()->set_simulator_initialized(true);
- ::v8::internal::ExternalReference::set_redirector(&RedirectExternalReference);
-}
-
-
-Simulator::Simulator() : isolate_(Isolate::Current()) {
- i_cache_ = isolate_->simulator_i_cache();
- if (i_cache_ == NULL) {
- i_cache_ = new v8::internal::HashMap(&ICacheMatch);
- isolate_->set_simulator_i_cache(i_cache_);
- }
- Initialize();
- // Setup simulator support first. Some of this information is needed to
- // setup the architecture state.
- size_t stack_size = 1 * 1024*1024; // allocate 1MB for stack
- stack_ = reinterpret_cast<char*>(malloc(stack_size));
- pc_modified_ = false;
- icount_ = 0;
- break_pc_ = NULL;
- break_instr_ = 0;
-
- // Setup architecture state.
- // All registers are initialized to zero to start with.
- for (int i = 0; i < num_registers; i++) {
- registers_[i] = 0;
- }
- n_flag_ = false;
- z_flag_ = false;
- c_flag_ = false;
- v_flag_ = false;
-
- // Initializing VFP registers.
- // All registers are initialized to zero to start with
- // even though s_registers_ & d_registers_ share the same
- // physical registers in the target.
- for (int i = 0; i < num_s_registers; i++) {
- vfp_register[i] = 0;
- }
- n_flag_FPSCR_ = false;
- z_flag_FPSCR_ = false;
- c_flag_FPSCR_ = false;
- v_flag_FPSCR_ = false;
- FPSCR_rounding_mode_ = RZ;
-
- inv_op_vfp_flag_ = false;
- div_zero_vfp_flag_ = false;
- overflow_vfp_flag_ = false;
- underflow_vfp_flag_ = false;
- inexact_vfp_flag_ = false;
-
- // The sp is initialized to point to the bottom (high address) of the
- // allocated stack area. To be safe in potential stack underflows we leave
- // some buffer below.
- registers_[sp] = reinterpret_cast<int32_t>(stack_) + stack_size - 64;
- // The lr and pc are initialized to a known bad value that will cause an
- // access violation if the simulator ever tries to execute it.
- registers_[pc] = bad_lr;
- registers_[lr] = bad_lr;
- InitializeCoverage();
-}
-
-
-// When the generated code calls an external reference we need to catch that in
-// the simulator. The external reference will be a function compiled for the
-// host architecture. We need to call that function instead of trying to
-// execute it with the simulator. We do that by redirecting the external
-// reference to a svc (Supervisor Call) instruction that is handled by
-// the simulator. We write the original destination of the jump just at a known
-// offset from the svc instruction so the simulator knows what to call.
-class Redirection {
- public:
- Redirection(void* external_function, ExternalReference::Type type)
- : external_function_(external_function),
- swi_instruction_(al | (0xf*B24) | kCallRtRedirected),
- type_(type),
- next_(NULL) {
- Isolate* isolate = Isolate::Current();
- next_ = isolate->simulator_redirection();
- Simulator::current(isolate)->
- FlushICache(isolate->simulator_i_cache(),
- reinterpret_cast<void*>(&swi_instruction_),
- Instruction::kInstrSize);
- isolate->set_simulator_redirection(this);
- }
-
- void* address_of_swi_instruction() {
- return reinterpret_cast<void*>(&swi_instruction_);
- }
-
- void* external_function() { return external_function_; }
- ExternalReference::Type type() { return type_; }
-
- static Redirection* Get(void* external_function,
- ExternalReference::Type type) {
- Isolate* isolate = Isolate::Current();
- Redirection* current = isolate->simulator_redirection();
- for (; current != NULL; current = current->next_) {
- if (current->external_function_ == external_function) return current;
- }
- return new Redirection(external_function, type);
- }
-
- static Redirection* FromSwiInstruction(Instruction* swi_instruction) {
- char* addr_of_swi = reinterpret_cast<char*>(swi_instruction);
- char* addr_of_redirection =
- addr_of_swi - OFFSET_OF(Redirection, swi_instruction_);
- return reinterpret_cast<Redirection*>(addr_of_redirection);
- }
-
- private:
- void* external_function_;
- uint32_t swi_instruction_;
- ExternalReference::Type type_;
- Redirection* next_;
-};
-
-
-void* Simulator::RedirectExternalReference(void* external_function,
- ExternalReference::Type type) {
- Redirection* redirection = Redirection::Get(external_function, type);
- return redirection->address_of_swi_instruction();
-}
-
-
-// Get the active Simulator for the current thread.
-Simulator* Simulator::current(Isolate* isolate) {
- v8::internal::Isolate::PerIsolateThreadData* isolate_data =
- Isolate::CurrentPerIsolateThreadData();
- if (isolate_data == NULL) {
- Isolate::EnterDefaultIsolate();
- isolate_data = Isolate::CurrentPerIsolateThreadData();
- }
- ASSERT(isolate_data != NULL);
-
- Simulator* sim = isolate_data->simulator();
- if (sim == NULL) {
- // TODO(146): delete the simulator object when a thread/isolate goes away.
- sim = new Simulator();
- isolate_data->set_simulator(sim);
- }
- return sim;
-}
-
-
-// Sets the register in the architecture state. It will also deal with updating
-// Simulator internal state for special registers such as PC.
-void Simulator::set_register(int reg, int32_t value) {
- ASSERT((reg >= 0) && (reg < num_registers));
- if (reg == pc) {
- pc_modified_ = true;
- }
- registers_[reg] = value;
-}
-
-
-// Get the register from the architecture state. This function does handle
-// the special case of accessing the PC register.
-int32_t Simulator::get_register(int reg) const {
- ASSERT((reg >= 0) && (reg < num_registers));
- // Stupid code added to avoid bug in GCC.
- // See: http://gcc.gnu.org/bugzilla/show_bug.cgi?id=43949
- if (reg >= num_registers) return 0;
- // End stupid code.
- return registers_[reg] + ((reg == pc) ? Instruction::kPCReadOffset : 0);
-}
-
-
-void Simulator::set_dw_register(int dreg, const int* dbl) {
- ASSERT((dreg >= 0) && (dreg < num_d_registers));
- registers_[dreg] = dbl[0];
- registers_[dreg + 1] = dbl[1];
-}
-
-
-// Raw access to the PC register.
-void Simulator::set_pc(int32_t value) {
- pc_modified_ = true;
- registers_[pc] = value;
-}
-
-
-bool Simulator::has_bad_pc() const {
- return ((registers_[pc] == bad_lr) || (registers_[pc] == end_sim_pc));
-}
-
-
-// Raw access to the PC register without the special adjustment when reading.
-int32_t Simulator::get_pc() const {
- return registers_[pc];
-}
-
-
-// Getting from and setting into VFP registers.
-void Simulator::set_s_register(int sreg, unsigned int value) {
- ASSERT((sreg >= 0) && (sreg < num_s_registers));
- vfp_register[sreg] = value;
-}
-
-
-unsigned int Simulator::get_s_register(int sreg) const {
- ASSERT((sreg >= 0) && (sreg < num_s_registers));
- return vfp_register[sreg];
-}
-
-
-void Simulator::set_s_register_from_float(int sreg, const float flt) {
- ASSERT((sreg >= 0) && (sreg < num_s_registers));
- // Read the bits from the single precision floating point value
- // into the unsigned integer element of vfp_register[] given by index=sreg.
- char buffer[sizeof(vfp_register[0])];
- memcpy(buffer, &flt, sizeof(vfp_register[0]));
- memcpy(&vfp_register[sreg], buffer, sizeof(vfp_register[0]));
-}
-
-
-void Simulator::set_s_register_from_sinteger(int sreg, const int sint) {
- ASSERT((sreg >= 0) && (sreg < num_s_registers));
- // Read the bits from the integer value into the unsigned integer element of
- // vfp_register[] given by index=sreg.
- char buffer[sizeof(vfp_register[0])];
- memcpy(buffer, &sint, sizeof(vfp_register[0]));
- memcpy(&vfp_register[sreg], buffer, sizeof(vfp_register[0]));
-}
-
-
-void Simulator::set_d_register_from_double(int dreg, const double& dbl) {
- ASSERT((dreg >= 0) && (dreg < num_d_registers));
- // Read the bits from the double precision floating point value into the two
- // consecutive unsigned integer elements of vfp_register[] given by index
- // 2*sreg and 2*sreg+1.
- char buffer[2 * sizeof(vfp_register[0])];
- memcpy(buffer, &dbl, 2 * sizeof(vfp_register[0]));
-#ifndef BIG_ENDIAN_FLOATING_POINT
- memcpy(&vfp_register[dreg * 2], buffer, 2 * sizeof(vfp_register[0]));
-#else
- memcpy(&vfp_register[dreg * 2], &buffer[4], sizeof(vfp_register[0]));
- memcpy(&vfp_register[dreg * 2 + 1], &buffer[0], sizeof(vfp_register[0]));
-#endif
-}
-
-
-float Simulator::get_float_from_s_register(int sreg) {
- ASSERT((sreg >= 0) && (sreg < num_s_registers));
-
- float sm_val = 0.0;
- // Read the bits from the unsigned integer vfp_register[] array
- // into the single precision floating point value and return it.
- char buffer[sizeof(vfp_register[0])];
- memcpy(buffer, &vfp_register[sreg], sizeof(vfp_register[0]));
- memcpy(&sm_val, buffer, sizeof(vfp_register[0]));
- return(sm_val);
-}
-
-
-int Simulator::get_sinteger_from_s_register(int sreg) {
- ASSERT((sreg >= 0) && (sreg < num_s_registers));
-
- int sm_val = 0;
- // Read the bits from the unsigned integer vfp_register[] array
- // into the single precision floating point value and return it.
- char buffer[sizeof(vfp_register[0])];
- memcpy(buffer, &vfp_register[sreg], sizeof(vfp_register[0]));
- memcpy(&sm_val, buffer, sizeof(vfp_register[0]));
- return(sm_val);
-}
-
-
-double Simulator::get_double_from_d_register(int dreg) {
- ASSERT((dreg >= 0) && (dreg < num_d_registers));
-
- double dm_val = 0.0;
- // Read the bits from the unsigned integer vfp_register[] array
- // into the double precision floating point value and return it.
- char buffer[2 * sizeof(vfp_register[0])];
-#ifdef BIG_ENDIAN_FLOATING_POINT
- memcpy(&buffer[0], &vfp_register[2 * dreg + 1], sizeof(vfp_register[0]));
- memcpy(&buffer[4], &vfp_register[2 * dreg], sizeof(vfp_register[0]));
-#else
- memcpy(buffer, &vfp_register[2 * dreg], 2 * sizeof(vfp_register[0]));
-#endif
- memcpy(&dm_val, buffer, 2 * sizeof(vfp_register[0]));
- return(dm_val);
-}
-
-
-// For use in calls that take two double values, constructed from r0, r1, r2
-// and r3.
-void Simulator::GetFpArgs(double* x, double* y) {
- // We use a char buffer to get around the strict-aliasing rules which
- // otherwise allow the compiler to optimize away the copy.
- char buffer[2 * sizeof(registers_[0])];
- // Registers 0 and 1 -> x.
- memcpy(buffer, registers_, sizeof(buffer));
- memcpy(x, buffer, sizeof(buffer));
- // Registers 2 and 3 -> y.
- memcpy(buffer, registers_ + 2, sizeof(buffer));
- memcpy(y, buffer, sizeof(buffer));
-}
-
-
-void Simulator::SetFpResult(const double& result) {
- char buffer[2 * sizeof(registers_[0])];
- memcpy(buffer, &result, sizeof(buffer));
- // result -> registers 0 and 1.
- memcpy(registers_, buffer, sizeof(buffer));
-}
-
-
-void Simulator::TrashCallerSaveRegisters() {
- // We don't trash the registers with the return value.
- registers_[2] = 0x50Bad4U;
- registers_[3] = 0x50Bad4U;
- registers_[12] = 0x50Bad4U;
-}
-
-// Some Operating Systems allow unaligned access on ARMv7 targets. We
-// assume that unaligned accesses are not allowed unless the v8 build system
-// defines the CAN_USE_UNALIGNED_ACCESSES macro to be non-zero.
-// The following statements below describes the behavior of the ARM CPUs
-// that don't support unaligned access.
-// Some ARM platforms raise an interrupt on detecting unaligned access.
-// On others it does a funky rotation thing. For now we
-// simply disallow unaligned reads. Note that simulator runs have the runtime
-// system running directly on the host system and only generated code is
-// executed in the simulator. Since the host is typically IA32 we will not
-// get the correct ARM-like behaviour on unaligned accesses for those ARM
-// targets that don't support unaligned loads and stores.
-
-
-int Simulator::ReadW(int32_t addr, Instruction* instr) {
-#if V8_TARGET_CAN_READ_UNALIGNED
- intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
- return *ptr;
-#else
- if ((addr & 3) == 0) {
- intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
- return *ptr;
- }
- PrintF("Unaligned read at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
- addr,
- reinterpret_cast<intptr_t>(instr));
- UNIMPLEMENTED();
- return 0;
-#endif
-}
-
-
-void Simulator::WriteW(int32_t addr, int value, Instruction* instr) {
-#if V8_TARGET_CAN_READ_UNALIGNED
- intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
- *ptr = value;
- return;
-#else
- if ((addr & 3) == 0) {
- intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
- *ptr = value;
- return;
- }
- PrintF("Unaligned write at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
- addr,
- reinterpret_cast<intptr_t>(instr));
- UNIMPLEMENTED();
-#endif
-}
-
-
-uint16_t Simulator::ReadHU(int32_t addr, Instruction* instr) {
-#if V8_TARGET_CAN_READ_UNALIGNED
- uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
- return *ptr;
-#else
- if ((addr & 1) == 0) {
- uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
- return *ptr;
- }
- PrintF("Unaligned unsigned halfword read at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
- addr,
- reinterpret_cast<intptr_t>(instr));
- UNIMPLEMENTED();
- return 0;
-#endif
-}
-
-
-int16_t Simulator::ReadH(int32_t addr, Instruction* instr) {
-#if V8_TARGET_CAN_READ_UNALIGNED
- int16_t* ptr = reinterpret_cast<int16_t*>(addr);
- return *ptr;
-#else
- if ((addr & 1) == 0) {
- int16_t* ptr = reinterpret_cast<int16_t*>(addr);
- return *ptr;
- }
- PrintF("Unaligned signed halfword read at 0x%08x\n", addr);
- UNIMPLEMENTED();
- return 0;
-#endif
-}
-
-
-void Simulator::WriteH(int32_t addr, uint16_t value, Instruction* instr) {
-#if V8_TARGET_CAN_READ_UNALIGNED
- uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
- *ptr = value;
- return;
-#else
- if ((addr & 1) == 0) {
- uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
- *ptr = value;
- return;
- }
- PrintF("Unaligned unsigned halfword write at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
- addr,
- reinterpret_cast<intptr_t>(instr));
- UNIMPLEMENTED();
-#endif
-}
-
-
-void Simulator::WriteH(int32_t addr, int16_t value, Instruction* instr) {
-#if V8_TARGET_CAN_READ_UNALIGNED
- int16_t* ptr = reinterpret_cast<int16_t*>(addr);
- *ptr = value;
- return;
-#else
- if ((addr & 1) == 0) {
- int16_t* ptr = reinterpret_cast<int16_t*>(addr);
- *ptr = value;
- return;
- }
- PrintF("Unaligned halfword write at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
- addr,
- reinterpret_cast<intptr_t>(instr));
- UNIMPLEMENTED();
-#endif
-}
-
-
-uint8_t Simulator::ReadBU(int32_t addr) {
- uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
- return *ptr;
-}
-
-
-int8_t Simulator::ReadB(int32_t addr) {
- int8_t* ptr = reinterpret_cast<int8_t*>(addr);
- return *ptr;
-}
-
-
-void Simulator::WriteB(int32_t addr, uint8_t value) {
- uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
- *ptr = value;
-}
-
-
-void Simulator::WriteB(int32_t addr, int8_t value) {
- int8_t* ptr = reinterpret_cast<int8_t*>(addr);
- *ptr = value;
-}
-
-
-int32_t* Simulator::ReadDW(int32_t addr) {
-#if V8_TARGET_CAN_READ_UNALIGNED
- int32_t* ptr = reinterpret_cast<int32_t*>(addr);
- return ptr;
-#else
- if ((addr & 3) == 0) {
- int32_t* ptr = reinterpret_cast<int32_t*>(addr);
- return ptr;
- }
- PrintF("Unaligned read at 0x%08x\n", addr);
- UNIMPLEMENTED();
- return 0;
-#endif
-}
-
-
-void Simulator::WriteDW(int32_t addr, int32_t value1, int32_t value2) {
-#if V8_TARGET_CAN_READ_UNALIGNED
- int32_t* ptr = reinterpret_cast<int32_t*>(addr);
- *ptr++ = value1;
- *ptr = value2;
- return;
-#else
- if ((addr & 3) == 0) {
- int32_t* ptr = reinterpret_cast<int32_t*>(addr);
- *ptr++ = value1;
- *ptr = value2;
- return;
- }
- PrintF("Unaligned write at 0x%08x\n", addr);
- UNIMPLEMENTED();
-#endif
-}
-
-
-// Returns the limit of the stack area to enable checking for stack overflows.
-uintptr_t Simulator::StackLimit() const {
- // Leave a safety margin of 256 bytes to prevent overrunning the stack when
- // pushing values.
- return reinterpret_cast<uintptr_t>(stack_) + 256;
-}
-
-
-// Unsupported instructions use Format to print an error and stop execution.
-void Simulator::Format(Instruction* instr, const char* format) {
- PrintF("Simulator found unsupported instruction:\n 0x%08x: %s\n",
- reinterpret_cast<intptr_t>(instr), format);
- UNIMPLEMENTED();
-}
-
-
-// Checks if the current instruction should be executed based on its
-// condition bits.
-bool Simulator::ConditionallyExecute(Instruction* instr) {
- switch (instr->ConditionField()) {
- case eq: return z_flag_;
- case ne: return !z_flag_;
- case cs: return c_flag_;
- case cc: return !c_flag_;
- case mi: return n_flag_;
- case pl: return !n_flag_;
- case vs: return v_flag_;
- case vc: return !v_flag_;
- case hi: return c_flag_ && !z_flag_;
- case ls: return !c_flag_ || z_flag_;
- case ge: return n_flag_ == v_flag_;
- case lt: return n_flag_ != v_flag_;
- case gt: return !z_flag_ && (n_flag_ == v_flag_);
- case le: return z_flag_ || (n_flag_ != v_flag_);
- case al: return true;
- default: UNREACHABLE();
- }
- return false;
-}
-
-
-// Calculate and set the Negative and Zero flags.
-void Simulator::SetNZFlags(int32_t val) {
- n_flag_ = (val < 0);
- z_flag_ = (val == 0);
-}
-
-
-// Set the Carry flag.
-void Simulator::SetCFlag(bool val) {
- c_flag_ = val;
-}
-
-
-// Set the oVerflow flag.
-void Simulator::SetVFlag(bool val) {
- v_flag_ = val;
-}
-
-
-// Calculate C flag value for additions.
-bool Simulator::CarryFrom(int32_t left, int32_t right) {
- uint32_t uleft = static_cast<uint32_t>(left);
- uint32_t uright = static_cast<uint32_t>(right);
- uint32_t urest = 0xffffffffU - uleft;
-
- return (uright > urest);
-}
-
-
-// Calculate C flag value for subtractions.
-bool Simulator::BorrowFrom(int32_t left, int32_t right) {
- uint32_t uleft = static_cast<uint32_t>(left);
- uint32_t uright = static_cast<uint32_t>(right);
-
- return (uright > uleft);
-}
-
-
-// Calculate V flag value for additions and subtractions.
-bool Simulator::OverflowFrom(int32_t alu_out,
- int32_t left, int32_t right, bool addition) {
- bool overflow;
- if (addition) {
- // operands have the same sign
- overflow = ((left >= 0 && right >= 0) || (left < 0 && right < 0))
- // and operands and result have different sign
- && ((left < 0 && alu_out >= 0) || (left >= 0 && alu_out < 0));
- } else {
- // operands have different signs
- overflow = ((left < 0 && right >= 0) || (left >= 0 && right < 0))
- // and first operand and result have different signs
- && ((left < 0 && alu_out >= 0) || (left >= 0 && alu_out < 0));
- }
- return overflow;
-}
-
-
-// Support for VFP comparisons.
-void Simulator::Compute_FPSCR_Flags(double val1, double val2) {
- if (isnan(val1) || isnan(val2)) {
- n_flag_FPSCR_ = false;
- z_flag_FPSCR_ = false;
- c_flag_FPSCR_ = true;
- v_flag_FPSCR_ = true;
- // All non-NaN cases.
- } else if (val1 == val2) {
- n_flag_FPSCR_ = false;
- z_flag_FPSCR_ = true;
- c_flag_FPSCR_ = true;
- v_flag_FPSCR_ = false;
- } else if (val1 < val2) {
- n_flag_FPSCR_ = true;
- z_flag_FPSCR_ = false;
- c_flag_FPSCR_ = false;
- v_flag_FPSCR_ = false;
- } else {
- // Case when (val1 > val2).
- n_flag_FPSCR_ = false;
- z_flag_FPSCR_ = false;
- c_flag_FPSCR_ = true;
- v_flag_FPSCR_ = false;
- }
-}
-
-
-void Simulator::Copy_FPSCR_to_APSR() {
- n_flag_ = n_flag_FPSCR_;
- z_flag_ = z_flag_FPSCR_;
- c_flag_ = c_flag_FPSCR_;
- v_flag_ = v_flag_FPSCR_;
-}
-
-
-// Addressing Mode 1 - Data-processing operands:
-// Get the value based on the shifter_operand with register.
-int32_t Simulator::GetShiftRm(Instruction* instr, bool* carry_out) {
- ShiftOp shift = instr->ShiftField();
- int shift_amount = instr->ShiftAmountValue();
- int32_t result = get_register(instr->RmValue());
- if (instr->Bit(4) == 0) {
- // by immediate
- if ((shift == ROR) && (shift_amount == 0)) {
- UNIMPLEMENTED();
- return result;
- } else if (((shift == LSR) || (shift == ASR)) && (shift_amount == 0)) {
- shift_amount = 32;
- }
- switch (shift) {
- case ASR: {
- if (shift_amount == 0) {
- if (result < 0) {
- result = 0xffffffff;
- *carry_out = true;
- } else {
- result = 0;
- *carry_out = false;
- }
- } else {
- result >>= (shift_amount - 1);
- *carry_out = (result & 1) == 1;
- result >>= 1;
- }
- break;
- }
-
- case LSL: {
- if (shift_amount == 0) {
- *carry_out = c_flag_;
- } else {
- result <<= (shift_amount - 1);
- *carry_out = (result < 0);
- result <<= 1;
- }
- break;
- }
-
- case LSR: {
- if (shift_amount == 0) {
- result = 0;
- *carry_out = c_flag_;
- } else {
- uint32_t uresult = static_cast<uint32_t>(result);
- uresult >>= (shift_amount - 1);
- *carry_out = (uresult & 1) == 1;
- uresult >>= 1;
- result = static_cast<int32_t>(uresult);
- }
- break;
- }
-
- case ROR: {
- UNIMPLEMENTED();
- break;
- }
-
- default: {
- UNREACHABLE();
- break;
- }
- }
- } else {
- // by register
- int rs = instr->RsValue();
- shift_amount = get_register(rs) &0xff;
- switch (shift) {
- case ASR: {
- if (shift_amount == 0) {
- *carry_out = c_flag_;
- } else if (shift_amount < 32) {
- result >>= (shift_amount - 1);
- *carry_out = (result & 1) == 1;
- result >>= 1;
- } else {
- ASSERT(shift_amount >= 32);
- if (result < 0) {
- *carry_out = true;
- result = 0xffffffff;
- } else {
- *carry_out = false;
- result = 0;
- }
- }
- break;
- }
-
- case LSL: {
- if (shift_amount == 0) {
- *carry_out = c_flag_;
- } else if (shift_amount < 32) {
- result <<= (shift_amount - 1);
- *carry_out = (result < 0);
- result <<= 1;
- } else if (shift_amount == 32) {
- *carry_out = (result & 1) == 1;
- result = 0;
- } else {
- ASSERT(shift_amount > 32);
- *carry_out = false;
- result = 0;
- }
- break;
- }
-
- case LSR: {
- if (shift_amount == 0) {
- *carry_out = c_flag_;
- } else if (shift_amount < 32) {
- uint32_t uresult = static_cast<uint32_t>(result);
- uresult >>= (shift_amount - 1);
- *carry_out = (uresult & 1) == 1;
- uresult >>= 1;
- result = static_cast<int32_t>(uresult);
- } else if (shift_amount == 32) {
- *carry_out = (result < 0);
- result = 0;
- } else {
- *carry_out = false;
- result = 0;
- }
- break;
- }
-
- case ROR: {
- UNIMPLEMENTED();
- break;
- }
-
- default: {
- UNREACHABLE();
- break;
- }
- }
- }
- return result;
-}
-
-
-// Addressing Mode 1 - Data-processing operands:
-// Get the value based on the shifter_operand with immediate.
-int32_t Simulator::GetImm(Instruction* instr, bool* carry_out) {
- int rotate = instr->RotateValue() * 2;
- int immed8 = instr->Immed8Value();
- int imm = (immed8 >> rotate) | (immed8 << (32 - rotate));
- *carry_out = (rotate == 0) ? c_flag_ : (imm < 0);
- return imm;
-}
-
-
-static int count_bits(int bit_vector) {
- int count = 0;
- while (bit_vector != 0) {
- if ((bit_vector & 1) != 0) {
- count++;
- }
- bit_vector >>= 1;
- }
- return count;
-}
-
-
-// Addressing Mode 4 - Load and Store Multiple
-void Simulator::HandleRList(Instruction* instr, bool load) {
- int rn = instr->RnValue();
- int32_t rn_val = get_register(rn);
- int rlist = instr->RlistValue();
- int num_regs = count_bits(rlist);
-
- intptr_t start_address = 0;
- intptr_t end_address = 0;
- switch (instr->PUField()) {
- case da_x: {
- UNIMPLEMENTED();
- break;
- }
- case ia_x: {
- start_address = rn_val;
- end_address = rn_val + (num_regs * 4) - 4;
- rn_val = rn_val + (num_regs * 4);
- break;
- }
- case db_x: {
- start_address = rn_val - (num_regs * 4);
- end_address = rn_val - 4;
- rn_val = start_address;
- break;
- }
- case ib_x: {
- start_address = rn_val + 4;
- end_address = rn_val + (num_regs * 4);
- rn_val = end_address;
- break;
- }
- default: {
- UNREACHABLE();
- break;
- }
- }
- if (instr->HasW()) {
- set_register(rn, rn_val);
- }
- intptr_t* address = reinterpret_cast<intptr_t*>(start_address);
- int reg = 0;
- while (rlist != 0) {
- if ((rlist & 1) != 0) {
- if (load) {
- set_register(reg, *address);
- } else {
- *address = get_register(reg);
- }
- address += 1;
- }
- reg++;
- rlist >>= 1;
- }
- ASSERT(end_address == ((intptr_t)address) - 4);
-}
-
-
-// Calls into the V8 runtime are based on this very simple interface.
-// Note: To be able to return two values from some calls the code in runtime.cc
-// uses the ObjectPair which is essentially two 32-bit values stuffed into a
-// 64-bit value. With the code below we assume that all runtime calls return
-// 64 bits of result. If they don't, the r1 result register contains a bogus
-// value, which is fine because it is caller-saved.
-typedef int64_t (*SimulatorRuntimeCall)(int32_t arg0,
- int32_t arg1,
- int32_t arg2,
- int32_t arg3,
- int32_t arg4,
- int32_t arg5);
-typedef double (*SimulatorRuntimeFPCall)(int32_t arg0,
- int32_t arg1,
- int32_t arg2,
- int32_t arg3);
-
-// This signature supports direct call in to API function native callback
-// (refer to InvocationCallback in v8.h).
-typedef v8::Handle<v8::Value> (*SimulatorRuntimeDirectApiCall)(int32_t arg0);
-
-// This signature supports direct call to accessor getter callback.
-typedef v8::Handle<v8::Value> (*SimulatorRuntimeDirectGetterCall)(int32_t arg0,
- int32_t arg1);
-
-// Software interrupt instructions are used by the simulator to call into the
-// C-based V8 runtime.
-void Simulator::SoftwareInterrupt(Instruction* instr) {
- int svc = instr->SvcValue();
- switch (svc) {
- case kCallRtRedirected: {
- // Check if stack is aligned. Error if not aligned is reported below to
- // include information on the function called.
- bool stack_aligned =
- (get_register(sp)
- & (::v8::internal::FLAG_sim_stack_alignment - 1)) == 0;
- Redirection* redirection = Redirection::FromSwiInstruction(instr);
- int32_t arg0 = get_register(r0);
- int32_t arg1 = get_register(r1);
- int32_t arg2 = get_register(r2);
- int32_t arg3 = get_register(r3);
- int32_t* stack_pointer = reinterpret_cast<int32_t*>(get_register(sp));
- int32_t arg4 = stack_pointer[0];
- int32_t arg5 = stack_pointer[1];
- // This is dodgy but it works because the C entry stubs are never moved.
- // See comment in codegen-arm.cc and bug 1242173.
- int32_t saved_lr = get_register(lr);
- intptr_t external =
- reinterpret_cast<intptr_t>(redirection->external_function());
- if (redirection->type() == ExternalReference::FP_RETURN_CALL) {
- SimulatorRuntimeFPCall target =
- reinterpret_cast<SimulatorRuntimeFPCall>(external);
- if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
- double x, y;
- GetFpArgs(&x, &y);
- PrintF("Call to host function at %p with args %f, %f",
- FUNCTION_ADDR(target), x, y);
- if (!stack_aligned) {
- PrintF(" with unaligned stack %08x\n", get_register(sp));
- }
- PrintF("\n");
- }
- CHECK(stack_aligned);
- double result = target(arg0, arg1, arg2, arg3);
- SetFpResult(result);
- } else if (redirection->type() == ExternalReference::DIRECT_API_CALL) {
- SimulatorRuntimeDirectApiCall target =
- reinterpret_cast<SimulatorRuntimeDirectApiCall>(external);
- if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
- PrintF("Call to host function at %p args %08x",
- FUNCTION_ADDR(target), arg0);
- if (!stack_aligned) {
- PrintF(" with unaligned stack %08x\n", get_register(sp));
- }
- PrintF("\n");
- }
- CHECK(stack_aligned);
- v8::Handle<v8::Value> result = target(arg0);
- if (::v8::internal::FLAG_trace_sim) {
- PrintF("Returned %p\n", reinterpret_cast<void *>(*result));
- }
- set_register(r0, (int32_t) *result);
- } else if (redirection->type() == ExternalReference::DIRECT_GETTER_CALL) {
- SimulatorRuntimeDirectGetterCall target =
- reinterpret_cast<SimulatorRuntimeDirectGetterCall>(external);
- if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
- PrintF("Call to host function at %p args %08x %08x",
- FUNCTION_ADDR(target), arg0, arg1);
- if (!stack_aligned) {
- PrintF(" with unaligned stack %08x\n", get_register(sp));
- }
- PrintF("\n");
- }
- CHECK(stack_aligned);
- v8::Handle<v8::Value> result = target(arg0, arg1);
- if (::v8::internal::FLAG_trace_sim) {
- PrintF("Returned %p\n", reinterpret_cast<void *>(*result));
- }
- set_register(r0, (int32_t) *result);
- } else {
- // builtin call.
- ASSERT(redirection->type() == ExternalReference::BUILTIN_CALL);
- SimulatorRuntimeCall target =
- reinterpret_cast<SimulatorRuntimeCall>(external);
- if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
- PrintF(
- "Call to host function at %p"
- "args %08x, %08x, %08x, %08x, %08x, %08x",
- FUNCTION_ADDR(target),
- arg0,
- arg1,
- arg2,
- arg3,
- arg4,
- arg5);
- if (!stack_aligned) {
- PrintF(" with unaligned stack %08x\n", get_register(sp));
- }
- PrintF("\n");
- }
- CHECK(stack_aligned);
- int64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5);
- int32_t lo_res = static_cast<int32_t>(result);
- int32_t hi_res = static_cast<int32_t>(result >> 32);
- if (::v8::internal::FLAG_trace_sim) {
- PrintF("Returned %08x\n", lo_res);
- }
- set_register(r0, lo_res);
- set_register(r1, hi_res);
- }
- set_register(lr, saved_lr);
- set_pc(get_register(lr));
- break;
- }
- case kBreakpoint: {
- ArmDebugger dbg(this);
- dbg.Debug();
- break;
- }
- // stop uses all codes greater than 1 << 23.
- default: {
- if (svc >= (1 << 23)) {
- uint32_t code = svc & kStopCodeMask;
- if (isWatchedStop(code)) {
- IncreaseStopCounter(code);
- }
- // Stop if it is enabled, otherwise go on jumping over the stop
- // and the message address.
- if (isEnabledStop(code)) {
- ArmDebugger dbg(this);
- dbg.Stop(instr);
- } else {
- set_pc(get_pc() + 2 * Instruction::kInstrSize);
- }
- } else {
- // This is not a valid svc code.
- UNREACHABLE();
- break;
- }
- }
- }
-}
-
-
-// Stop helper functions.
-bool Simulator::isStopInstruction(Instruction* instr) {
- return (instr->Bits(27, 24) == 0xF) && (instr->SvcValue() >= kStopCode);
-}
-
-
-bool Simulator::isWatchedStop(uint32_t code) {
- ASSERT(code <= kMaxStopCode);
- return code < kNumOfWatchedStops;
-}
-
-
-bool Simulator::isEnabledStop(uint32_t code) {
- ASSERT(code <= kMaxStopCode);
- // Unwatched stops are always enabled.
- return !isWatchedStop(code) ||
- !(watched_stops[code].count & kStopDisabledBit);
-}
-
-
-void Simulator::EnableStop(uint32_t code) {
- ASSERT(isWatchedStop(code));
- if (!isEnabledStop(code)) {
- watched_stops[code].count &= ~kStopDisabledBit;
- }
-}
-
-
-void Simulator::DisableStop(uint32_t code) {
- ASSERT(isWatchedStop(code));
- if (isEnabledStop(code)) {
- watched_stops[code].count |= kStopDisabledBit;
- }
-}
-
-
-void Simulator::IncreaseStopCounter(uint32_t code) {
- ASSERT(code <= kMaxStopCode);
- ASSERT(isWatchedStop(code));
- if ((watched_stops[code].count & ~(1 << 31)) == 0x7fffffff) {
- PrintF("Stop counter for code %i has overflowed.\n"
- "Enabling this code and reseting the counter to 0.\n", code);
- watched_stops[code].count = 0;
- EnableStop(code);
- } else {
- watched_stops[code].count++;
- }
-}
-
-
-// Print a stop status.
-void Simulator::PrintStopInfo(uint32_t code) {
- ASSERT(code <= kMaxStopCode);
- if (!isWatchedStop(code)) {
- PrintF("Stop not watched.");
- } else {
- const char* state = isEnabledStop(code) ? "Enabled" : "Disabled";
- int32_t count = watched_stops[code].count & ~kStopDisabledBit;
- // Don't print the state of unused breakpoints.
- if (count != 0) {
- if (watched_stops[code].desc) {
- PrintF("stop %i - 0x%x: \t%s, \tcounter = %i, \t%s\n",
- code, code, state, count, watched_stops[code].desc);
- } else {
- PrintF("stop %i - 0x%x: \t%s, \tcounter = %i\n",
- code, code, state, count);
- }
- }
- }
-}
-
-
-// Handle execution based on instruction types.
-
-// Instruction types 0 and 1 are both rolled into one function because they
-// only differ in the handling of the shifter_operand.
-void Simulator::DecodeType01(Instruction* instr) {
- int type = instr->TypeValue();
- if ((type == 0) && instr->IsSpecialType0()) {
- // multiply instruction or extra loads and stores
- if (instr->Bits(7, 4) == 9) {
- if (instr->Bit(24) == 0) {
- // Raw field decoding here. Multiply instructions have their Rd in
- // funny places.
- int rn = instr->RnValue();
- int rm = instr->RmValue();
- int rs = instr->RsValue();
- int32_t rs_val = get_register(rs);
- int32_t rm_val = get_register(rm);
- if (instr->Bit(23) == 0) {
- if (instr->Bit(21) == 0) {
- // The MUL instruction description (A 4.1.33) refers to Rd as being
- // the destination for the operation, but it confusingly uses the
- // Rn field to encode it.
- // Format(instr, "mul'cond's 'rn, 'rm, 'rs");
- int rd = rn; // Remap the rn field to the Rd register.
- int32_t alu_out = rm_val * rs_val;
- set_register(rd, alu_out);
- if (instr->HasS()) {
- SetNZFlags(alu_out);
- }
- } else {
- // The MLA instruction description (A 4.1.28) refers to the order
- // of registers as "Rd, Rm, Rs, Rn". But confusingly it uses the
- // Rn field to encode the Rd register and the Rd field to encode
- // the Rn register.
- Format(instr, "mla'cond's 'rn, 'rm, 'rs, 'rd");
- }
- } else {
- // The signed/long multiply instructions use the terms RdHi and RdLo
- // when referring to the target registers. They are mapped to the Rn
- // and Rd fields as follows:
- // RdLo == Rd
- // RdHi == Rn (This is confusingly stored in variable rd here
- // because the mul instruction from above uses the
- // Rn field to encode the Rd register. Good luck figuring
- // this out without reading the ARM instruction manual
- // at a very detailed level.)
- // Format(instr, "'um'al'cond's 'rd, 'rn, 'rs, 'rm");
- int rd_hi = rn; // Remap the rn field to the RdHi register.
- int rd_lo = instr->RdValue();
- int32_t hi_res = 0;
- int32_t lo_res = 0;
- if (instr->Bit(22) == 1) {
- int64_t left_op = static_cast<int32_t>(rm_val);
- int64_t right_op = static_cast<int32_t>(rs_val);
- uint64_t result = left_op * right_op;
- hi_res = static_cast<int32_t>(result >> 32);
- lo_res = static_cast<int32_t>(result & 0xffffffff);
- } else {
- // unsigned multiply
- uint64_t left_op = static_cast<uint32_t>(rm_val);
- uint64_t right_op = static_cast<uint32_t>(rs_val);
- uint64_t result = left_op * right_op;
- hi_res = static_cast<int32_t>(result >> 32);
- lo_res = static_cast<int32_t>(result & 0xffffffff);
- }
- set_register(rd_lo, lo_res);
- set_register(rd_hi, hi_res);
- if (instr->HasS()) {
- UNIMPLEMENTED();
- }
- }
- } else {
- UNIMPLEMENTED(); // Not used by V8.
- }
- } else {
- // extra load/store instructions
- int rd = instr->RdValue();
- int rn = instr->RnValue();
- int32_t rn_val = get_register(rn);
- int32_t addr = 0;
- if (instr->Bit(22) == 0) {
- int rm = instr->RmValue();
- int32_t rm_val = get_register(rm);
- switch (instr->PUField()) {
- case da_x: {
- // Format(instr, "'memop'cond'sign'h 'rd, ['rn], -'rm");
- ASSERT(!instr->HasW());
- addr = rn_val;
- rn_val -= rm_val;
- set_register(rn, rn_val);
- break;
- }
- case ia_x: {
- // Format(instr, "'memop'cond'sign'h 'rd, ['rn], +'rm");
- ASSERT(!instr->HasW());
- addr = rn_val;
- rn_val += rm_val;
- set_register(rn, rn_val);
- break;
- }
- case db_x: {
- // Format(instr, "'memop'cond'sign'h 'rd, ['rn, -'rm]'w");
- rn_val -= rm_val;
- addr = rn_val;
- if (instr->HasW()) {
- set_register(rn, rn_val);
- }
- break;
- }
- case ib_x: {
- // Format(instr, "'memop'cond'sign'h 'rd, ['rn, +'rm]'w");
- rn_val += rm_val;
- addr = rn_val;
- if (instr->HasW()) {
- set_register(rn, rn_val);
- }
- break;
- }
- default: {
- // The PU field is a 2-bit field.
- UNREACHABLE();
- break;
- }
- }
- } else {
- int32_t imm_val = (instr->ImmedHValue() << 4) | instr->ImmedLValue();
- switch (instr->PUField()) {
- case da_x: {
- // Format(instr, "'memop'cond'sign'h 'rd, ['rn], #-'off8");
- ASSERT(!instr->HasW());
- addr = rn_val;
- rn_val -= imm_val;
- set_register(rn, rn_val);
- break;
- }
- case ia_x: {
- // Format(instr, "'memop'cond'sign'h 'rd, ['rn], #+'off8");
- ASSERT(!instr->HasW());
- addr = rn_val;
- rn_val += imm_val;
- set_register(rn, rn_val);
- break;
- }
- case db_x: {
- // Format(instr, "'memop'cond'sign'h 'rd, ['rn, #-'off8]'w");
- rn_val -= imm_val;
- addr = rn_val;
- if (instr->HasW()) {
- set_register(rn, rn_val);
- }
- break;
- }
- case ib_x: {
- // Format(instr, "'memop'cond'sign'h 'rd, ['rn, #+'off8]'w");
- rn_val += imm_val;
- addr = rn_val;
- if (instr->HasW()) {
- set_register(rn, rn_val);
- }
- break;
- }
- default: {
- // The PU field is a 2-bit field.
- UNREACHABLE();
- break;
- }
- }
- }
- if (((instr->Bits(7, 4) & 0xd) == 0xd) && (instr->Bit(20) == 0)) {
- ASSERT((rd % 2) == 0);
- if (instr->HasH()) {
- // The strd instruction.
- int32_t value1 = get_register(rd);
- int32_t value2 = get_register(rd+1);
- WriteDW(addr, value1, value2);
- } else {
- // The ldrd instruction.
- int* rn_data = ReadDW(addr);
- set_dw_register(rd, rn_data);
- }
- } else if (instr->HasH()) {
- if (instr->HasSign()) {
- if (instr->HasL()) {
- int16_t val = ReadH(addr, instr);
- set_register(rd, val);
- } else {
- int16_t val = get_register(rd);
- WriteH(addr, val, instr);
- }
- } else {
- if (instr->HasL()) {
- uint16_t val = ReadHU(addr, instr);
- set_register(rd, val);
- } else {
- uint16_t val = get_register(rd);
- WriteH(addr, val, instr);
- }
- }
- } else {
- // signed byte loads
- ASSERT(instr->HasSign());
- ASSERT(instr->HasL());
- int8_t val = ReadB(addr);
- set_register(rd, val);
- }
- return;
- }
- } else if ((type == 0) && instr->IsMiscType0()) {
- if (instr->Bits(22, 21) == 1) {
- int rm = instr->RmValue();
- switch (instr->BitField(7, 4)) {
- case BX:
- set_pc(get_register(rm));
- break;
- case BLX: {
- uint32_t old_pc = get_pc();
- set_pc(get_register(rm));
- set_register(lr, old_pc + Instruction::kInstrSize);
- break;
- }
- case BKPT: {
- ArmDebugger dbg(this);
- PrintF("Simulator hit BKPT.\n");
- dbg.Debug();
- break;
- }
- default:
- UNIMPLEMENTED();
- }
- } else if (instr->Bits(22, 21) == 3) {
- int rm = instr->RmValue();
- int rd = instr->RdValue();
- switch (instr->BitField(7, 4)) {
- case CLZ: {
- uint32_t bits = get_register(rm);
- int leading_zeros = 0;
- if (bits == 0) {
- leading_zeros = 32;
- } else {
- while ((bits & 0x80000000u) == 0) {
- bits <<= 1;
- leading_zeros++;
- }
- }
- set_register(rd, leading_zeros);
- break;
- }
- default:
- UNIMPLEMENTED();
- }
- } else {
- PrintF("%08x\n", instr->InstructionBits());
- UNIMPLEMENTED();
- }
- } else {
- int rd = instr->RdValue();
- int rn = instr->RnValue();
- int32_t rn_val = get_register(rn);
- int32_t shifter_operand = 0;
- bool shifter_carry_out = 0;
- if (type == 0) {
- shifter_operand = GetShiftRm(instr, &shifter_carry_out);
- } else {
- ASSERT(instr->TypeValue() == 1);
- shifter_operand = GetImm(instr, &shifter_carry_out);
- }
- int32_t alu_out;
-
- switch (instr->OpcodeField()) {
- case AND: {
- // Format(instr, "and'cond's 'rd, 'rn, 'shift_rm");
- // Format(instr, "and'cond's 'rd, 'rn, 'imm");
- alu_out = rn_val & shifter_operand;
- set_register(rd, alu_out);
- if (instr->HasS()) {
- SetNZFlags(alu_out);
- SetCFlag(shifter_carry_out);
- }
- break;
- }
-
- case EOR: {
- // Format(instr, "eor'cond's 'rd, 'rn, 'shift_rm");
- // Format(instr, "eor'cond's 'rd, 'rn, 'imm");
- alu_out = rn_val ^ shifter_operand;
- set_register(rd, alu_out);
- if (instr->HasS()) {
- SetNZFlags(alu_out);
- SetCFlag(shifter_carry_out);
- }
- break;
- }
-
- case SUB: {
- // Format(instr, "sub'cond's 'rd, 'rn, 'shift_rm");
- // Format(instr, "sub'cond's 'rd, 'rn, 'imm");
- alu_out = rn_val - shifter_operand;
- set_register(rd, alu_out);
- if (instr->HasS()) {
- SetNZFlags(alu_out);
- SetCFlag(!BorrowFrom(rn_val, shifter_operand));
- SetVFlag(OverflowFrom(alu_out, rn_val, shifter_operand, false));
- }
- break;
- }
-
- case RSB: {
- // Format(instr, "rsb'cond's 'rd, 'rn, 'shift_rm");
- // Format(instr, "rsb'cond's 'rd, 'rn, 'imm");
- alu_out = shifter_operand - rn_val;
- set_register(rd, alu_out);
- if (instr->HasS()) {
- SetNZFlags(alu_out);
- SetCFlag(!BorrowFrom(shifter_operand, rn_val));
- SetVFlag(OverflowFrom(alu_out, shifter_operand, rn_val, false));
- }
- break;
- }
-
- case ADD: {
- // Format(instr, "add'cond's 'rd, 'rn, 'shift_rm");
- // Format(instr, "add'cond's 'rd, 'rn, 'imm");
- alu_out = rn_val + shifter_operand;
- set_register(rd, alu_out);
- if (instr->HasS()) {
- SetNZFlags(alu_out);
- SetCFlag(CarryFrom(rn_val, shifter_operand));
- SetVFlag(OverflowFrom(alu_out, rn_val, shifter_operand, true));
- }
- break;
- }
-
- case ADC: {
- Format(instr, "adc'cond's 'rd, 'rn, 'shift_rm");
- Format(instr, "adc'cond's 'rd, 'rn, 'imm");
- break;
- }
-
- case SBC: {
- Format(instr, "sbc'cond's 'rd, 'rn, 'shift_rm");
- Format(instr, "sbc'cond's 'rd, 'rn, 'imm");
- break;
- }
-
- case RSC: {
- Format(instr, "rsc'cond's 'rd, 'rn, 'shift_rm");
- Format(instr, "rsc'cond's 'rd, 'rn, 'imm");
- break;
- }
-
- case TST: {
- if (instr->HasS()) {
- // Format(instr, "tst'cond 'rn, 'shift_rm");
- // Format(instr, "tst'cond 'rn, 'imm");
- alu_out = rn_val & shifter_operand;
- SetNZFlags(alu_out);
- SetCFlag(shifter_carry_out);
- } else {
- // Format(instr, "movw'cond 'rd, 'imm").
- alu_out = instr->ImmedMovwMovtValue();
- set_register(rd, alu_out);
- }
- break;
- }
-
- case TEQ: {
- if (instr->HasS()) {
- // Format(instr, "teq'cond 'rn, 'shift_rm");
- // Format(instr, "teq'cond 'rn, 'imm");
- alu_out = rn_val ^ shifter_operand;
- SetNZFlags(alu_out);
- SetCFlag(shifter_carry_out);
- } else {
- // Other instructions matching this pattern are handled in the
- // miscellaneous instructions part above.
- UNREACHABLE();
- }
- break;
- }
-
- case CMP: {
- if (instr->HasS()) {
- // Format(instr, "cmp'cond 'rn, 'shift_rm");
- // Format(instr, "cmp'cond 'rn, 'imm");
- alu_out = rn_val - shifter_operand;
- SetNZFlags(alu_out);
- SetCFlag(!BorrowFrom(rn_val, shifter_operand));
- SetVFlag(OverflowFrom(alu_out, rn_val, shifter_operand, false));
- } else {
- // Format(instr, "movt'cond 'rd, 'imm").
- alu_out = (get_register(rd) & 0xffff) |
- (instr->ImmedMovwMovtValue() << 16);
- set_register(rd, alu_out);
- }
- break;
- }
-
- case CMN: {
- if (instr->HasS()) {
- // Format(instr, "cmn'cond 'rn, 'shift_rm");
- // Format(instr, "cmn'cond 'rn, 'imm");
- alu_out = rn_val + shifter_operand;
- SetNZFlags(alu_out);
- SetCFlag(!CarryFrom(rn_val, shifter_operand));
- SetVFlag(OverflowFrom(alu_out, rn_val, shifter_operand, true));
- } else {
- // Other instructions matching this pattern are handled in the
- // miscellaneous instructions part above.
- UNREACHABLE();
- }
- break;
- }
-
- case ORR: {
- // Format(instr, "orr'cond's 'rd, 'rn, 'shift_rm");
- // Format(instr, "orr'cond's 'rd, 'rn, 'imm");
- alu_out = rn_val | shifter_operand;
- set_register(rd, alu_out);
- if (instr->HasS()) {
- SetNZFlags(alu_out);
- SetCFlag(shifter_carry_out);
- }
- break;
- }
-
- case MOV: {
- // Format(instr, "mov'cond's 'rd, 'shift_rm");
- // Format(instr, "mov'cond's 'rd, 'imm");
- alu_out = shifter_operand;
- set_register(rd, alu_out);
- if (instr->HasS()) {
- SetNZFlags(alu_out);
- SetCFlag(shifter_carry_out);
- }
- break;
- }
-
- case BIC: {
- // Format(instr, "bic'cond's 'rd, 'rn, 'shift_rm");
- // Format(instr, "bic'cond's 'rd, 'rn, 'imm");
- alu_out = rn_val & ~shifter_operand;
- set_register(rd, alu_out);
- if (instr->HasS()) {
- SetNZFlags(alu_out);
- SetCFlag(shifter_carry_out);
- }
- break;
- }
-
- case MVN: {
- // Format(instr, "mvn'cond's 'rd, 'shift_rm");
- // Format(instr, "mvn'cond's 'rd, 'imm");
- alu_out = ~shifter_operand;
- set_register(rd, alu_out);
- if (instr->HasS()) {
- SetNZFlags(alu_out);
- SetCFlag(shifter_carry_out);
- }
- break;
- }
-
- default: {
- UNREACHABLE();
- break;
- }
- }
- }
-}
-
-
-void Simulator::DecodeType2(Instruction* instr) {
- int rd = instr->RdValue();
- int rn = instr->RnValue();
- int32_t rn_val = get_register(rn);
- int32_t im_val = instr->Offset12Value();
- int32_t addr = 0;
- switch (instr->PUField()) {
- case da_x: {
- // Format(instr, "'memop'cond'b 'rd, ['rn], #-'off12");
- ASSERT(!instr->HasW());
- addr = rn_val;
- rn_val -= im_val;
- set_register(rn, rn_val);
- break;
- }
- case ia_x: {
- // Format(instr, "'memop'cond'b 'rd, ['rn], #+'off12");
- ASSERT(!instr->HasW());
- addr = rn_val;
- rn_val += im_val;
- set_register(rn, rn_val);
- break;
- }
- case db_x: {
- // Format(instr, "'memop'cond'b 'rd, ['rn, #-'off12]'w");
- rn_val -= im_val;
- addr = rn_val;
- if (instr->HasW()) {
- set_register(rn, rn_val);
- }
- break;
- }
- case ib_x: {
- // Format(instr, "'memop'cond'b 'rd, ['rn, #+'off12]'w");
- rn_val += im_val;
- addr = rn_val;
- if (instr->HasW()) {
- set_register(rn, rn_val);
- }
- break;
- }
- default: {
- UNREACHABLE();
- break;
- }
- }
- if (instr->HasB()) {
- if (instr->HasL()) {
- byte val = ReadBU(addr);
- set_register(rd, val);
- } else {
- byte val = get_register(rd);
- WriteB(addr, val);
- }
- } else {
- if (instr->HasL()) {
- set_register(rd, ReadW(addr, instr));
- } else {
- WriteW(addr, get_register(rd), instr);
- }
- }
-}
-
-
-void Simulator::DecodeType3(Instruction* instr) {
- int rd = instr->RdValue();
- int rn = instr->RnValue();
- int32_t rn_val = get_register(rn);
- bool shifter_carry_out = 0;
- int32_t shifter_operand = GetShiftRm(instr, &shifter_carry_out);
- int32_t addr = 0;
- switch (instr->PUField()) {
- case da_x: {
- ASSERT(!instr->HasW());
- Format(instr, "'memop'cond'b 'rd, ['rn], -'shift_rm");
- UNIMPLEMENTED();
- break;
- }
- case ia_x: {
- if (instr->HasW()) {
- ASSERT(instr->Bits(5, 4) == 0x1);
-
- if (instr->Bit(22) == 0x1) { // USAT.
- int32_t sat_pos = instr->Bits(20, 16);
- int32_t sat_val = (1 << sat_pos) - 1;
- int32_t shift = instr->Bits(11, 7);
- int32_t shift_type = instr->Bit(6);
- int32_t rm_val = get_register(instr->RmValue());
- if (shift_type == 0) { // LSL
- rm_val <<= shift;
- } else { // ASR
- rm_val >>= shift;
- }
- // If saturation occurs, the Q flag should be set in the CPSR.
- // There is no Q flag yet, and no instruction (MRS) to read the
- // CPSR directly.
- if (rm_val > sat_val) {
- rm_val = sat_val;
- } else if (rm_val < 0) {
- rm_val = 0;
- }
- set_register(rd, rm_val);
- } else { // SSAT.
- UNIMPLEMENTED();
- }
- return;
- } else {
- Format(instr, "'memop'cond'b 'rd, ['rn], +'shift_rm");
- UNIMPLEMENTED();
- }
- break;
- }
- case db_x: {
- // Format(instr, "'memop'cond'b 'rd, ['rn, -'shift_rm]'w");
- addr = rn_val - shifter_operand;
- if (instr->HasW()) {
- set_register(rn, addr);
- }
- break;
- }
- case ib_x: {
- if (instr->HasW() && (instr->Bits(6, 4) == 0x5)) {
- uint32_t widthminus1 = static_cast<uint32_t>(instr->Bits(20, 16));
- uint32_t lsbit = static_cast<uint32_t>(instr->Bits(11, 7));
- uint32_t msbit = widthminus1 + lsbit;
- if (msbit <= 31) {
- if (instr->Bit(22)) {
- // ubfx - unsigned bitfield extract.
- uint32_t rm_val =
- static_cast<uint32_t>(get_register(instr->RmValue()));
- uint32_t extr_val = rm_val << (31 - msbit);
- extr_val = extr_val >> (31 - widthminus1);
- set_register(instr->RdValue(), extr_val);
- } else {
- // sbfx - signed bitfield extract.
- int32_t rm_val = get_register(instr->RmValue());
- int32_t extr_val = rm_val << (31 - msbit);
- extr_val = extr_val >> (31 - widthminus1);
- set_register(instr->RdValue(), extr_val);
- }
- } else {
- UNREACHABLE();
- }
- return;
- } else if (!instr->HasW() && (instr->Bits(6, 4) == 0x1)) {
- uint32_t lsbit = static_cast<uint32_t>(instr->Bits(11, 7));
- uint32_t msbit = static_cast<uint32_t>(instr->Bits(20, 16));
- if (msbit >= lsbit) {
- // bfc or bfi - bitfield clear/insert.
- uint32_t rd_val =
- static_cast<uint32_t>(get_register(instr->RdValue()));
- uint32_t bitcount = msbit - lsbit + 1;
- uint32_t mask = (1 << bitcount) - 1;
- rd_val &= ~(mask << lsbit);
- if (instr->RmValue() != 15) {
- // bfi - bitfield insert.
- uint32_t rm_val =
- static_cast<uint32_t>(get_register(instr->RmValue()));
- rm_val &= mask;
- rd_val |= rm_val << lsbit;
- }
- set_register(instr->RdValue(), rd_val);
- } else {
- UNREACHABLE();
- }
- return;
- } else {
- // Format(instr, "'memop'cond'b 'rd, ['rn, +'shift_rm]'w");
- addr = rn_val + shifter_operand;
- if (instr->HasW()) {
- set_register(rn, addr);
- }
- }
- break;
- }
- default: {
- UNREACHABLE();
- break;
- }
- }
- if (instr->HasB()) {
- if (instr->HasL()) {
- uint8_t byte = ReadB(addr);
- set_register(rd, byte);
- } else {
- uint8_t byte = get_register(rd);
- WriteB(addr, byte);
- }
- } else {
- if (instr->HasL()) {
- set_register(rd, ReadW(addr, instr));
- } else {
- WriteW(addr, get_register(rd), instr);
- }
- }
-}
-
-
-void Simulator::DecodeType4(Instruction* instr) {
- ASSERT(instr->Bit(22) == 0); // only allowed to be set in privileged mode
- if (instr->HasL()) {
- // Format(instr, "ldm'cond'pu 'rn'w, 'rlist");
- HandleRList(instr, true);
- } else {
- // Format(instr, "stm'cond'pu 'rn'w, 'rlist");
- HandleRList(instr, false);
- }
-}
-
-
-void Simulator::DecodeType5(Instruction* instr) {
- // Format(instr, "b'l'cond 'target");
- int off = (instr->SImmed24Value() << 2);
- intptr_t pc_address = get_pc();
- if (instr->HasLink()) {
- set_register(lr, pc_address + Instruction::kInstrSize);
- }
- int pc_reg = get_register(pc);
- set_pc(pc_reg + off);
-}
-
-
-void Simulator::DecodeType6(Instruction* instr) {
- DecodeType6CoprocessorIns(instr);
-}
-
-
-void Simulator::DecodeType7(Instruction* instr) {
- if (instr->Bit(24) == 1) {
- SoftwareInterrupt(instr);
- } else {
- DecodeTypeVFP(instr);
- }
-}
-
-
-// void Simulator::DecodeTypeVFP(Instruction* instr)
-// The Following ARMv7 VFPv instructions are currently supported.
-// vmov :Sn = Rt
-// vmov :Rt = Sn
-// vcvt: Dd = Sm
-// vcvt: Sd = Dm
-// Dd = vabs(Dm)
-// Dd = vneg(Dm)
-// Dd = vadd(Dn, Dm)
-// Dd = vsub(Dn, Dm)
-// Dd = vmul(Dn, Dm)
-// Dd = vdiv(Dn, Dm)
-// vcmp(Dd, Dm)
-// vmrs
-// Dd = vsqrt(Dm)
-void Simulator::DecodeTypeVFP(Instruction* instr) {
- ASSERT((instr->TypeValue() == 7) && (instr->Bit(24) == 0x0) );
- ASSERT(instr->Bits(11, 9) == 0x5);
-
- // Obtain double precision register codes.
- int vm = instr->VFPMRegValue(kDoublePrecision);
- int vd = instr->VFPDRegValue(kDoublePrecision);
- int vn = instr->VFPNRegValue(kDoublePrecision);
-
- if (instr->Bit(4) == 0) {
- if (instr->Opc1Value() == 0x7) {
- // Other data processing instructions
- if ((instr->Opc2Value() == 0x0) && (instr->Opc3Value() == 0x1)) {
- // vmov register to register.
- if (instr->SzValue() == 0x1) {
- int m = instr->VFPMRegValue(kDoublePrecision);
- int d = instr->VFPDRegValue(kDoublePrecision);
- set_d_register_from_double(d, get_double_from_d_register(m));
- } else {
- int m = instr->VFPMRegValue(kSinglePrecision);
- int d = instr->VFPDRegValue(kSinglePrecision);
- set_s_register_from_float(d, get_float_from_s_register(m));
- }
- } else if ((instr->Opc2Value() == 0x0) && (instr->Opc3Value() == 0x3)) {
- // vabs
- double dm_value = get_double_from_d_register(vm);
- double dd_value = fabs(dm_value);
- set_d_register_from_double(vd, dd_value);
- } else if ((instr->Opc2Value() == 0x1) && (instr->Opc3Value() == 0x1)) {
- // vneg
- double dm_value = get_double_from_d_register(vm);
- double dd_value = -dm_value;
- set_d_register_from_double(vd, dd_value);
- } else if ((instr->Opc2Value() == 0x7) && (instr->Opc3Value() == 0x3)) {
- DecodeVCVTBetweenDoubleAndSingle(instr);
- } else if ((instr->Opc2Value() == 0x8) && (instr->Opc3Value() & 0x1)) {
- DecodeVCVTBetweenFloatingPointAndInteger(instr);
- } else if (((instr->Opc2Value() >> 1) == 0x6) &&
- (instr->Opc3Value() & 0x1)) {
- DecodeVCVTBetweenFloatingPointAndInteger(instr);
- } else if (((instr->Opc2Value() == 0x4) || (instr->Opc2Value() == 0x5)) &&
- (instr->Opc3Value() & 0x1)) {
- DecodeVCMP(instr);
- } else if (((instr->Opc2Value() == 0x1)) && (instr->Opc3Value() == 0x3)) {
- // vsqrt
- double dm_value = get_double_from_d_register(vm);
- double dd_value = sqrt(dm_value);
- set_d_register_from_double(vd, dd_value);
- } else if (instr->Opc3Value() == 0x0) {
- // vmov immediate.
- if (instr->SzValue() == 0x1) {
- set_d_register_from_double(vd, instr->DoubleImmedVmov());
- } else {
- UNREACHABLE(); // Not used by v8.
- }
- } else {
- UNREACHABLE(); // Not used by V8.
- }
- } else if (instr->Opc1Value() == 0x3) {
- if (instr->SzValue() != 0x1) {
- UNREACHABLE(); // Not used by V8.
- }
-
- if (instr->Opc3Value() & 0x1) {
- // vsub
- double dn_value = get_double_from_d_register(vn);
- double dm_value = get_double_from_d_register(vm);
- double dd_value = dn_value - dm_value;
- set_d_register_from_double(vd, dd_value);
- } else {
- // vadd
- double dn_value = get_double_from_d_register(vn);
- double dm_value = get_double_from_d_register(vm);
- double dd_value = dn_value + dm_value;
- set_d_register_from_double(vd, dd_value);
- }
- } else if ((instr->Opc1Value() == 0x2) && !(instr->Opc3Value() & 0x1)) {
- // vmul
- if (instr->SzValue() != 0x1) {
- UNREACHABLE(); // Not used by V8.
- }
-
- double dn_value = get_double_from_d_register(vn);
- double dm_value = get_double_from_d_register(vm);
- double dd_value = dn_value * dm_value;
- set_d_register_from_double(vd, dd_value);
- } else if ((instr->Opc1Value() == 0x4) && !(instr->Opc3Value() & 0x1)) {
- // vdiv
- if (instr->SzValue() != 0x1) {
- UNREACHABLE(); // Not used by V8.
- }
-
- double dn_value = get_double_from_d_register(vn);
- double dm_value = get_double_from_d_register(vm);
- double dd_value = dn_value / dm_value;
- div_zero_vfp_flag_ = (dm_value == 0);
- set_d_register_from_double(vd, dd_value);
- } else {
- UNIMPLEMENTED(); // Not used by V8.
- }
- } else {
- if ((instr->VCValue() == 0x0) &&
- (instr->VAValue() == 0x0)) {
- DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(instr);
- } else if ((instr->VLValue() == 0x1) &&
- (instr->VCValue() == 0x0) &&
- (instr->VAValue() == 0x7) &&
- (instr->Bits(19, 16) == 0x1)) {
- // vmrs
- uint32_t rt = instr->RtValue();
- if (rt == 0xF) {
- Copy_FPSCR_to_APSR();
- } else {
- // Emulate FPSCR from the Simulator flags.
- uint32_t fpscr = (n_flag_FPSCR_ << 31) |
- (z_flag_FPSCR_ << 30) |
- (c_flag_FPSCR_ << 29) |
- (v_flag_FPSCR_ << 28) |
- (inexact_vfp_flag_ << 4) |
- (underflow_vfp_flag_ << 3) |
- (overflow_vfp_flag_ << 2) |
- (div_zero_vfp_flag_ << 1) |
- (inv_op_vfp_flag_ << 0) |
- (FPSCR_rounding_mode_);
- set_register(rt, fpscr);
- }
- } else if ((instr->VLValue() == 0x0) &&
- (instr->VCValue() == 0x0) &&
- (instr->VAValue() == 0x7) &&
- (instr->Bits(19, 16) == 0x1)) {
- // vmsr
- uint32_t rt = instr->RtValue();
- if (rt == pc) {
- UNREACHABLE();
- } else {
- uint32_t rt_value = get_register(rt);
- n_flag_FPSCR_ = (rt_value >> 31) & 1;
- z_flag_FPSCR_ = (rt_value >> 30) & 1;
- c_flag_FPSCR_ = (rt_value >> 29) & 1;
- v_flag_FPSCR_ = (rt_value >> 28) & 1;
- inexact_vfp_flag_ = (rt_value >> 4) & 1;
- underflow_vfp_flag_ = (rt_value >> 3) & 1;
- overflow_vfp_flag_ = (rt_value >> 2) & 1;
- div_zero_vfp_flag_ = (rt_value >> 1) & 1;
- inv_op_vfp_flag_ = (rt_value >> 0) & 1;
- FPSCR_rounding_mode_ =
- static_cast<VFPRoundingMode>((rt_value) & kVFPRoundingModeMask);
- }
- } else {
- UNIMPLEMENTED(); // Not used by V8.
- }
- }
-}
-
-
-void Simulator::DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(
- Instruction* instr) {
- ASSERT((instr->Bit(4) == 1) && (instr->VCValue() == 0x0) &&
- (instr->VAValue() == 0x0));
-
- int t = instr->RtValue();
- int n = instr->VFPNRegValue(kSinglePrecision);
- bool to_arm_register = (instr->VLValue() == 0x1);
-
- if (to_arm_register) {
- int32_t int_value = get_sinteger_from_s_register(n);
- set_register(t, int_value);
- } else {
- int32_t rs_val = get_register(t);
- set_s_register_from_sinteger(n, rs_val);
- }
-}
-
-
-void Simulator::DecodeVCMP(Instruction* instr) {
- ASSERT((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7));
- ASSERT(((instr->Opc2Value() == 0x4) || (instr->Opc2Value() == 0x5)) &&
- (instr->Opc3Value() & 0x1));
- // Comparison.
-
- VFPRegPrecision precision = kSinglePrecision;
- if (instr->SzValue() == 1) {
- precision = kDoublePrecision;
- }
-
- int d = instr->VFPDRegValue(precision);
- int m = 0;
- if (instr->Opc2Value() == 0x4) {
- m = instr->VFPMRegValue(precision);
- }
-
- if (precision == kDoublePrecision) {
- double dd_value = get_double_from_d_register(d);
- double dm_value = 0.0;
- if (instr->Opc2Value() == 0x4) {
- dm_value = get_double_from_d_register(m);
- }
-
- // Raise exceptions for quiet NaNs if necessary.
- if (instr->Bit(7) == 1) {
- if (isnan(dd_value)) {
- inv_op_vfp_flag_ = true;
- }
- }
-
- Compute_FPSCR_Flags(dd_value, dm_value);
- } else {
- UNIMPLEMENTED(); // Not used by V8.
- }
-}
-
-
-void Simulator::DecodeVCVTBetweenDoubleAndSingle(Instruction* instr) {
- ASSERT((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7));
- ASSERT((instr->Opc2Value() == 0x7) && (instr->Opc3Value() == 0x3));
-
- VFPRegPrecision dst_precision = kDoublePrecision;
- VFPRegPrecision src_precision = kSinglePrecision;
- if (instr->SzValue() == 1) {
- dst_precision = kSinglePrecision;
- src_precision = kDoublePrecision;
- }
-
- int dst = instr->VFPDRegValue(dst_precision);
- int src = instr->VFPMRegValue(src_precision);
-
- if (dst_precision == kSinglePrecision) {
- double val = get_double_from_d_register(src);
- set_s_register_from_float(dst, static_cast<float>(val));
- } else {
- float val = get_float_from_s_register(src);
- set_d_register_from_double(dst, static_cast<double>(val));
- }
-}
-
-bool get_inv_op_vfp_flag(VFPRoundingMode mode,
- double val,
- bool unsigned_) {
- ASSERT((mode == RN) || (mode == RM) || (mode == RZ));
- double max_uint = static_cast<double>(0xffffffffu);
- double max_int = static_cast<double>(kMaxInt);
- double min_int = static_cast<double>(kMinInt);
-
- // Check for NaN.
- if (val != val) {
- return true;
- }
-
- // Check for overflow. This code works because 32bit integers can be
- // exactly represented by ieee-754 64bit floating-point values.
- switch (mode) {
- case RN:
- return unsigned_ ? (val >= (max_uint + 0.5)) ||
- (val < -0.5)
- : (val >= (max_int + 0.5)) ||
- (val < (min_int - 0.5));
-
- case RM:
- return unsigned_ ? (val >= (max_uint + 1.0)) ||
- (val < 0)
- : (val >= (max_int + 1.0)) ||
- (val < min_int);
-
- case RZ:
- return unsigned_ ? (val >= (max_uint + 1.0)) ||
- (val <= -1)
- : (val >= (max_int + 1.0)) ||
- (val <= (min_int - 1.0));
- default:
- UNREACHABLE();
- return true;
- }
-}
-
-
-// We call this function only if we had a vfp invalid exception.
-// It returns the correct saturated value.
-int VFPConversionSaturate(double val, bool unsigned_res) {
- if (val != val) {
- return 0;
- } else {
- if (unsigned_res) {
- return (val < 0) ? 0 : 0xffffffffu;
- } else {
- return (val < 0) ? kMinInt : kMaxInt;
- }
- }
-}
-
-
-void Simulator::DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr) {
- ASSERT((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7) &&
- (instr->Bits(27, 23) == 0x1D));
- ASSERT(((instr->Opc2Value() == 0x8) && (instr->Opc3Value() & 0x1)) ||
- (((instr->Opc2Value() >> 1) == 0x6) && (instr->Opc3Value() & 0x1)));
-
- // Conversion between floating-point and integer.
- bool to_integer = (instr->Bit(18) == 1);
-
- VFPRegPrecision src_precision = (instr->SzValue() == 1) ? kDoublePrecision
- : kSinglePrecision;
-
- if (to_integer) {
- // We are playing with code close to the C++ standard's limits below,
- // hence the very simple code and heavy checks.
- //
- // Note:
- // C++ defines default type casting from floating point to integer as
- // (close to) rounding toward zero ("fractional part discarded").
-
- int dst = instr->VFPDRegValue(kSinglePrecision);
- int src = instr->VFPMRegValue(src_precision);
-
- // Bit 7 in vcvt instructions indicates if we should use the FPSCR rounding
- // mode or the default Round to Zero mode.
- VFPRoundingMode mode = (instr->Bit(7) != 1) ? FPSCR_rounding_mode_
- : RZ;
- ASSERT((mode == RM) || (mode == RZ) || (mode == RN));
-
- bool unsigned_integer = (instr->Bit(16) == 0);
- bool double_precision = (src_precision == kDoublePrecision);
-
- double val = double_precision ? get_double_from_d_register(src)
- : get_float_from_s_register(src);
-
- int temp = unsigned_integer ? static_cast<uint32_t>(val)
- : static_cast<int32_t>(val);
-
- inv_op_vfp_flag_ = get_inv_op_vfp_flag(mode, val, unsigned_integer);
-
- double abs_diff =
- unsigned_integer ? fabs(val - static_cast<uint32_t>(temp))
- : fabs(val - temp);
-
- inexact_vfp_flag_ = (abs_diff != 0);
-
- if (inv_op_vfp_flag_) {
- temp = VFPConversionSaturate(val, unsigned_integer);
- } else {
- switch (mode) {
- case RN: {
- int val_sign = (val > 0) ? 1 : -1;
- if (abs_diff > 0.5) {
- temp += val_sign;
- } else if (abs_diff == 0.5) {
- // Round to even if exactly halfway.
- temp = ((temp % 2) == 0) ? temp : temp + val_sign;
- }
- break;
- }
-
- case RM:
- temp = temp > val ? temp - 1 : temp;
- break;
-
- case RZ:
- // Nothing to do.
- break;
-
- default:
- UNREACHABLE();
- }
- }
-
- // Update the destination register.
- set_s_register_from_sinteger(dst, temp);
-
- } else {
- bool unsigned_integer = (instr->Bit(7) == 0);
-
- int dst = instr->VFPDRegValue(src_precision);
- int src = instr->VFPMRegValue(kSinglePrecision);
-
- int val = get_sinteger_from_s_register(src);
-
- if (src_precision == kDoublePrecision) {
- if (unsigned_integer) {
- set_d_register_from_double(dst,
- static_cast<double>((uint32_t)val));
- } else {
- set_d_register_from_double(dst, static_cast<double>(val));
- }
- } else {
- if (unsigned_integer) {
- set_s_register_from_float(dst,
- static_cast<float>((uint32_t)val));
- } else {
- set_s_register_from_float(dst, static_cast<float>(val));
- }
- }
- }
-}
-
-
-// void Simulator::DecodeType6CoprocessorIns(Instruction* instr)
-// Decode Type 6 coprocessor instructions.
-// Dm = vmov(Rt, Rt2)
-// <Rt, Rt2> = vmov(Dm)
-// Ddst = MEM(Rbase + 4*offset).
-// MEM(Rbase + 4*offset) = Dsrc.
-void Simulator::DecodeType6CoprocessorIns(Instruction* instr) {
- ASSERT((instr->TypeValue() == 6));
-
- if (instr->CoprocessorValue() == 0xA) {
- switch (instr->OpcodeValue()) {
- case 0x8:
- case 0xA:
- case 0xC:
- case 0xE: { // Load and store single precision float to memory.
- int rn = instr->RnValue();
- int vd = instr->VFPDRegValue(kSinglePrecision);
- int offset = instr->Immed8Value();
- if (!instr->HasU()) {
- offset = -offset;
- }
-
- int32_t address = get_register(rn) + 4 * offset;
- if (instr->HasL()) {
- // Load double from memory: vldr.
- set_s_register_from_sinteger(vd, ReadW(address, instr));
- } else {
- // Store double to memory: vstr.
- WriteW(address, get_sinteger_from_s_register(vd), instr);
- }
- break;
- }
- default:
- UNIMPLEMENTED(); // Not used by V8.
- break;
- }
- } else if (instr->CoprocessorValue() == 0xB) {
- switch (instr->OpcodeValue()) {
- case 0x2:
- // Load and store double to two GP registers
- if (instr->Bits(7, 4) != 0x1) {
- UNIMPLEMENTED(); // Not used by V8.
- } else {
- int rt = instr->RtValue();
- int rn = instr->RnValue();
- int vm = instr->VmValue();
- if (instr->HasL()) {
- int32_t rt_int_value = get_sinteger_from_s_register(2*vm);
- int32_t rn_int_value = get_sinteger_from_s_register(2*vm+1);
-
- set_register(rt, rt_int_value);
- set_register(rn, rn_int_value);
- } else {
- int32_t rs_val = get_register(rt);
- int32_t rn_val = get_register(rn);
-
- set_s_register_from_sinteger(2*vm, rs_val);
- set_s_register_from_sinteger((2*vm+1), rn_val);
- }
- }
- break;
- case 0x8:
- case 0xC: { // Load and store double to memory.
- int rn = instr->RnValue();
- int vd = instr->VdValue();
- int offset = instr->Immed8Value();
- if (!instr->HasU()) {
- offset = -offset;
- }
- int32_t address = get_register(rn) + 4 * offset;
- if (instr->HasL()) {
- // Load double from memory: vldr.
- set_s_register_from_sinteger(2*vd, ReadW(address, instr));
- set_s_register_from_sinteger(2*vd + 1, ReadW(address + 4, instr));
- } else {
- // Store double to memory: vstr.
- WriteW(address, get_sinteger_from_s_register(2*vd), instr);
- WriteW(address + 4, get_sinteger_from_s_register(2*vd + 1), instr);
- }
- break;
- }
- default:
- UNIMPLEMENTED(); // Not used by V8.
- break;
- }
- } else {
- UNIMPLEMENTED(); // Not used by V8.
- }
-}
-
-
-// Executes the current instruction.
-void Simulator::InstructionDecode(Instruction* instr) {
- if (v8::internal::FLAG_check_icache) {
- CheckICache(isolate_->simulator_i_cache(), instr);
- }
- pc_modified_ = false;
- if (::v8::internal::FLAG_trace_sim) {
- disasm::NameConverter converter;
- disasm::Disassembler dasm(converter);
- // use a reasonably large buffer
- v8::internal::EmbeddedVector<char, 256> buffer;
- dasm.InstructionDecode(buffer,
- reinterpret_cast<byte*>(instr));
- PrintF(" 0x%08x %s\n", reinterpret_cast<intptr_t>(instr), buffer.start());
- }
- if (instr->ConditionField() == kSpecialCondition) {
- UNIMPLEMENTED();
- } else if (ConditionallyExecute(instr)) {
- switch (instr->TypeValue()) {
- case 0:
- case 1: {
- DecodeType01(instr);
- break;
- }
- case 2: {
- DecodeType2(instr);
- break;
- }
- case 3: {
- DecodeType3(instr);
- break;
- }
- case 4: {
- DecodeType4(instr);
- break;
- }
- case 5: {
- DecodeType5(instr);
- break;
- }
- case 6: {
- DecodeType6(instr);
- break;
- }
- case 7: {
- DecodeType7(instr);
- break;
- }
- default: {
- UNIMPLEMENTED();
- break;
- }
- }
- // If the instruction is a non taken conditional stop, we need to skip the
- // inlined message address.
- } else if (instr->IsStop()) {
- set_pc(get_pc() + 2 * Instruction::kInstrSize);
- }
- if (!pc_modified_) {
- set_register(pc, reinterpret_cast<int32_t>(instr)
- + Instruction::kInstrSize);
- }
-}
-
-
-void Simulator::Execute() {
- // Get the PC to simulate. Cannot use the accessor here as we need the
- // raw PC value and not the one used as input to arithmetic instructions.
- int program_counter = get_pc();
-
- if (::v8::internal::FLAG_stop_sim_at == 0) {
- // Fast version of the dispatch loop without checking whether the simulator
- // should be stopping at a particular executed instruction.
- while (program_counter != end_sim_pc) {
- Instruction* instr = reinterpret_cast<Instruction*>(program_counter);
- icount_++;
- InstructionDecode(instr);
- program_counter = get_pc();
- }
- } else {
- // FLAG_stop_sim_at is at the non-default value. Stop in the debugger when
- // we reach the particular instuction count.
- while (program_counter != end_sim_pc) {
- Instruction* instr = reinterpret_cast<Instruction*>(program_counter);
- icount_++;
- if (icount_ == ::v8::internal::FLAG_stop_sim_at) {
- ArmDebugger dbg(this);
- dbg.Debug();
- } else {
- InstructionDecode(instr);
- }
- program_counter = get_pc();
- }
- }
-}
-
-
-int32_t Simulator::Call(byte* entry, int argument_count, ...) {
- va_list parameters;
- va_start(parameters, argument_count);
- // Setup arguments
-
- // First four arguments passed in registers.
- ASSERT(argument_count >= 4);
- set_register(r0, va_arg(parameters, int32_t));
- set_register(r1, va_arg(parameters, int32_t));
- set_register(r2, va_arg(parameters, int32_t));
- set_register(r3, va_arg(parameters, int32_t));
-
- // Remaining arguments passed on stack.
- int original_stack = get_register(sp);
- // Compute position of stack on entry to generated code.
- int entry_stack = (original_stack - (argument_count - 4) * sizeof(int32_t));
- if (OS::ActivationFrameAlignment() != 0) {
- entry_stack &= -OS::ActivationFrameAlignment();
- }
- // Store remaining arguments on stack, from low to high memory.
- intptr_t* stack_argument = reinterpret_cast<intptr_t*>(entry_stack);
- for (int i = 4; i < argument_count; i++) {
- stack_argument[i - 4] = va_arg(parameters, int32_t);
- }
- va_end(parameters);
- set_register(sp, entry_stack);
-
- // Prepare to execute the code at entry
- set_register(pc, reinterpret_cast<int32_t>(entry));
- // Put down marker for end of simulation. The simulator will stop simulation
- // when the PC reaches this value. By saving the "end simulation" value into
- // the LR the simulation stops when returning to this call point.
- set_register(lr, end_sim_pc);
-
- // Remember the values of callee-saved registers.
- // The code below assumes that r9 is not used as sb (static base) in
- // simulator code and therefore is regarded as a callee-saved register.
- int32_t r4_val = get_register(r4);
- int32_t r5_val = get_register(r5);
- int32_t r6_val = get_register(r6);
- int32_t r7_val = get_register(r7);
- int32_t r8_val = get_register(r8);
- int32_t r9_val = get_register(r9);
- int32_t r10_val = get_register(r10);
- int32_t r11_val = get_register(r11);
-
- // Setup the callee-saved registers with a known value. To be able to check
- // that they are preserved properly across JS execution.
- int32_t callee_saved_value = icount_;
- set_register(r4, callee_saved_value);
- set_register(r5, callee_saved_value);
- set_register(r6, callee_saved_value);
- set_register(r7, callee_saved_value);
- set_register(r8, callee_saved_value);
- set_register(r9, callee_saved_value);
- set_register(r10, callee_saved_value);
- set_register(r11, callee_saved_value);
-
- // Start the simulation
- Execute();
-
- // Check that the callee-saved registers have been preserved.
- CHECK_EQ(callee_saved_value, get_register(r4));
- CHECK_EQ(callee_saved_value, get_register(r5));
- CHECK_EQ(callee_saved_value, get_register(r6));
- CHECK_EQ(callee_saved_value, get_register(r7));
- CHECK_EQ(callee_saved_value, get_register(r8));
- CHECK_EQ(callee_saved_value, get_register(r9));
- CHECK_EQ(callee_saved_value, get_register(r10));
- CHECK_EQ(callee_saved_value, get_register(r11));
-
- // Restore callee-saved registers with the original value.
- set_register(r4, r4_val);
- set_register(r5, r5_val);
- set_register(r6, r6_val);
- set_register(r7, r7_val);
- set_register(r8, r8_val);
- set_register(r9, r9_val);
- set_register(r10, r10_val);
- set_register(r11, r11_val);
-
- // Pop stack passed arguments.
- CHECK_EQ(entry_stack, get_register(sp));
- set_register(sp, original_stack);
-
- int32_t result = get_register(r0);
- return result;
-}
-
-
-uintptr_t Simulator::PushAddress(uintptr_t address) {
- int new_sp = get_register(sp) - sizeof(uintptr_t);
- uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(new_sp);
- *stack_slot = address;
- set_register(sp, new_sp);
- return new_sp;
-}
-
-
-uintptr_t Simulator::PopAddress() {
- int current_sp = get_register(sp);
- uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(current_sp);
- uintptr_t address = *stack_slot;
- set_register(sp, current_sp + sizeof(uintptr_t));
- return address;
-}
-
-} } // namespace v8::internal
-
-#endif // USE_SIMULATOR
-
-#endif // V8_TARGET_ARCH_ARM
diff --git a/src/3rdparty/v8/src/arm/simulator-arm.h b/src/3rdparty/v8/src/arm/simulator-arm.h
deleted file mode 100644
index b7b1b68..0000000
--- a/src/3rdparty/v8/src/arm/simulator-arm.h
+++ /dev/null
@@ -1,407 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-// Declares a Simulator for ARM instructions if we are not generating a native
-// ARM binary. This Simulator allows us to run and debug ARM code generation on
-// regular desktop machines.
-// V8 calls into generated code by "calling" the CALL_GENERATED_CODE macro,
-// which will start execution in the Simulator or forwards to the real entry
-// on a ARM HW platform.
-
-#ifndef V8_ARM_SIMULATOR_ARM_H_
-#define V8_ARM_SIMULATOR_ARM_H_
-
-#include "allocation.h"
-
-#if !defined(USE_SIMULATOR)
-// Running without a simulator on a native arm platform.
-
-namespace v8 {
-namespace internal {
-
-// When running without a simulator we call the entry directly.
-#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
- (entry(p0, p1, p2, p3, p4))
-
-typedef int (*arm_regexp_matcher)(String*, int, const byte*, const byte*,
- void*, int*, Address, int, Isolate*);
-
-
-// Call the generated regexp code directly. The code at the entry address
-// should act as a function matching the type arm_regexp_matcher.
-// The fifth argument is a dummy that reserves the space used for
-// the return address added by the ExitFrame in native calls.
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
- (FUNCTION_CAST<arm_regexp_matcher>(entry)( \
- p0, p1, p2, p3, NULL, p4, p5, p6, p7))
-
-#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
- reinterpret_cast<TryCatch*>(try_catch_address)
-
-// The stack limit beyond which we will throw stack overflow errors in
-// generated code. Because generated code on arm uses the C stack, we
-// just use the C stack limit.
-class SimulatorStack : public v8::internal::AllStatic {
- public:
- static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) {
- return c_limit;
- }
-
- static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
- return try_catch_address;
- }
-
- static inline void UnregisterCTryCatch() { }
-};
-
-} } // namespace v8::internal
-
-#else // !defined(USE_SIMULATOR)
-// Running with a simulator.
-
-#include "constants-arm.h"
-#include "hashmap.h"
-#include "assembler.h"
-
-namespace v8 {
-namespace internal {
-
-class CachePage {
- public:
- static const int LINE_VALID = 0;
- static const int LINE_INVALID = 1;
-
- static const int kPageShift = 12;
- static const int kPageSize = 1 << kPageShift;
- static const int kPageMask = kPageSize - 1;
- static const int kLineShift = 2; // The cache line is only 4 bytes right now.
- static const int kLineLength = 1 << kLineShift;
- static const int kLineMask = kLineLength - 1;
-
- CachePage() {
- memset(&validity_map_, LINE_INVALID, sizeof(validity_map_));
- }
-
- char* ValidityByte(int offset) {
- return &validity_map_[offset >> kLineShift];
- }
-
- char* CachedData(int offset) {
- return &data_[offset];
- }
-
- private:
- char data_[kPageSize]; // The cached data.
- static const int kValidityMapSize = kPageSize >> kLineShift;
- char validity_map_[kValidityMapSize]; // One byte per line.
-};
-
-
-class Simulator {
- public:
- friend class ArmDebugger;
- enum Register {
- no_reg = -1,
- r0 = 0, r1, r2, r3, r4, r5, r6, r7,
- r8, r9, r10, r11, r12, r13, r14, r15,
- num_registers,
- sp = 13,
- lr = 14,
- pc = 15,
- s0 = 0, s1, s2, s3, s4, s5, s6, s7,
- s8, s9, s10, s11, s12, s13, s14, s15,
- s16, s17, s18, s19, s20, s21, s22, s23,
- s24, s25, s26, s27, s28, s29, s30, s31,
- num_s_registers = 32,
- d0 = 0, d1, d2, d3, d4, d5, d6, d7,
- d8, d9, d10, d11, d12, d13, d14, d15,
- num_d_registers = 16
- };
-
- Simulator();
- ~Simulator();
-
- // The currently executing Simulator instance. Potentially there can be one
- // for each native thread.
- static Simulator* current(v8::internal::Isolate* isolate);
-
- // Accessors for register state. Reading the pc value adheres to the ARM
- // architecture specification and is off by a 8 from the currently executing
- // instruction.
- void set_register(int reg, int32_t value);
- int32_t get_register(int reg) const;
- void set_dw_register(int dreg, const int* dbl);
-
- // Support for VFP.
- void set_s_register(int reg, unsigned int value);
- unsigned int get_s_register(int reg) const;
- void set_d_register_from_double(int dreg, const double& dbl);
- double get_double_from_d_register(int dreg);
- void set_s_register_from_float(int sreg, const float dbl);
- float get_float_from_s_register(int sreg);
- void set_s_register_from_sinteger(int reg, const int value);
- int get_sinteger_from_s_register(int reg);
-
- // Special case of set_register and get_register to access the raw PC value.
- void set_pc(int32_t value);
- int32_t get_pc() const;
-
- // Accessor to the internal simulator stack area.
- uintptr_t StackLimit() const;
-
- // Executes ARM instructions until the PC reaches end_sim_pc.
- void Execute();
-
- // Call on program start.
- static void Initialize();
-
- // V8 generally calls into generated JS code with 5 parameters and into
- // generated RegExp code with 7 parameters. This is a convenience function,
- // which sets up the simulator state and grabs the result on return.
- int32_t Call(byte* entry, int argument_count, ...);
-
- // Push an address onto the JS stack.
- uintptr_t PushAddress(uintptr_t address);
-
- // Pop an address from the JS stack.
- uintptr_t PopAddress();
-
- // ICache checking.
- static void FlushICache(v8::internal::HashMap* i_cache, void* start,
- size_t size);
-
- // Returns true if pc register contains one of the 'special_values' defined
- // below (bad_lr, end_sim_pc).
- bool has_bad_pc() const;
-
- private:
- enum special_values {
- // Known bad pc value to ensure that the simulator does not execute
- // without being properly setup.
- bad_lr = -1,
- // A pc value used to signal the simulator to stop execution. Generally
- // the lr is set to this value on transition from native C code to
- // simulated execution, so that the simulator can "return" to the native
- // C code.
- end_sim_pc = -2
- };
-
- // Unsupported instructions use Format to print an error and stop execution.
- void Format(Instruction* instr, const char* format);
-
- // Checks if the current instruction should be executed based on its
- // condition bits.
- bool ConditionallyExecute(Instruction* instr);
-
- // Helper functions to set the conditional flags in the architecture state.
- void SetNZFlags(int32_t val);
- void SetCFlag(bool val);
- void SetVFlag(bool val);
- bool CarryFrom(int32_t left, int32_t right);
- bool BorrowFrom(int32_t left, int32_t right);
- bool OverflowFrom(int32_t alu_out,
- int32_t left,
- int32_t right,
- bool addition);
-
- // Support for VFP.
- void Compute_FPSCR_Flags(double val1, double val2);
- void Copy_FPSCR_to_APSR();
-
- // Helper functions to decode common "addressing" modes
- int32_t GetShiftRm(Instruction* instr, bool* carry_out);
- int32_t GetImm(Instruction* instr, bool* carry_out);
- void HandleRList(Instruction* instr, bool load);
- void SoftwareInterrupt(Instruction* instr);
-
- // Stop helper functions.
- inline bool isStopInstruction(Instruction* instr);
- inline bool isWatchedStop(uint32_t bkpt_code);
- inline bool isEnabledStop(uint32_t bkpt_code);
- inline void EnableStop(uint32_t bkpt_code);
- inline void DisableStop(uint32_t bkpt_code);
- inline void IncreaseStopCounter(uint32_t bkpt_code);
- void PrintStopInfo(uint32_t code);
-
- // Read and write memory.
- inline uint8_t ReadBU(int32_t addr);
- inline int8_t ReadB(int32_t addr);
- inline void WriteB(int32_t addr, uint8_t value);
- inline void WriteB(int32_t addr, int8_t value);
-
- inline uint16_t ReadHU(int32_t addr, Instruction* instr);
- inline int16_t ReadH(int32_t addr, Instruction* instr);
- // Note: Overloaded on the sign of the value.
- inline void WriteH(int32_t addr, uint16_t value, Instruction* instr);
- inline void WriteH(int32_t addr, int16_t value, Instruction* instr);
-
- inline int ReadW(int32_t addr, Instruction* instr);
- inline void WriteW(int32_t addr, int value, Instruction* instr);
-
- int32_t* ReadDW(int32_t addr);
- void WriteDW(int32_t addr, int32_t value1, int32_t value2);
-
- // Executing is handled based on the instruction type.
- // Both type 0 and type 1 rolled into one.
- void DecodeType01(Instruction* instr);
- void DecodeType2(Instruction* instr);
- void DecodeType3(Instruction* instr);
- void DecodeType4(Instruction* instr);
- void DecodeType5(Instruction* instr);
- void DecodeType6(Instruction* instr);
- void DecodeType7(Instruction* instr);
-
- // Support for VFP.
- void DecodeTypeVFP(Instruction* instr);
- void DecodeType6CoprocessorIns(Instruction* instr);
-
- void DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(Instruction* instr);
- void DecodeVCMP(Instruction* instr);
- void DecodeVCVTBetweenDoubleAndSingle(Instruction* instr);
- void DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr);
-
- // Executes one instruction.
- void InstructionDecode(Instruction* instr);
-
- // ICache.
- static void CheckICache(v8::internal::HashMap* i_cache, Instruction* instr);
- static void FlushOnePage(v8::internal::HashMap* i_cache, intptr_t start,
- int size);
- static CachePage* GetCachePage(v8::internal::HashMap* i_cache, void* page);
-
- // Runtime call support.
- static void* RedirectExternalReference(
- void* external_function,
- v8::internal::ExternalReference::Type type);
-
- // For use in calls that take two double values, constructed from r0, r1, r2
- // and r3.
- void GetFpArgs(double* x, double* y);
- void SetFpResult(const double& result);
- void TrashCallerSaveRegisters();
-
- // Architecture state.
- // Saturating instructions require a Q flag to indicate saturation.
- // There is currently no way to read the CPSR directly, and thus read the Q
- // flag, so this is left unimplemented.
- int32_t registers_[16];
- bool n_flag_;
- bool z_flag_;
- bool c_flag_;
- bool v_flag_;
-
- // VFP architecture state.
- unsigned int vfp_register[num_s_registers];
- bool n_flag_FPSCR_;
- bool z_flag_FPSCR_;
- bool c_flag_FPSCR_;
- bool v_flag_FPSCR_;
-
- // VFP rounding mode. See ARM DDI 0406B Page A2-29.
- VFPRoundingMode FPSCR_rounding_mode_;
-
- // VFP FP exception flags architecture state.
- bool inv_op_vfp_flag_;
- bool div_zero_vfp_flag_;
- bool overflow_vfp_flag_;
- bool underflow_vfp_flag_;
- bool inexact_vfp_flag_;
-
- // Simulator support.
- char* stack_;
- bool pc_modified_;
- int icount_;
-
- // Icache simulation
- v8::internal::HashMap* i_cache_;
-
- // Registered breakpoints.
- Instruction* break_pc_;
- Instr break_instr_;
-
- v8::internal::Isolate* isolate_;
-
- // A stop is watched if its code is less than kNumOfWatchedStops.
- // Only watched stops support enabling/disabling and the counter feature.
- static const uint32_t kNumOfWatchedStops = 256;
-
- // Breakpoint is disabled if bit 31 is set.
- static const uint32_t kStopDisabledBit = 1 << 31;
-
- // A stop is enabled, meaning the simulator will stop when meeting the
- // instruction, if bit 31 of watched_stops[code].count is unset.
- // The value watched_stops[code].count & ~(1 << 31) indicates how many times
- // the breakpoint was hit or gone through.
- struct StopCountAndDesc {
- uint32_t count;
- char* desc;
- };
- StopCountAndDesc watched_stops[kNumOfWatchedStops];
-};
-
-
-// When running with the simulator transition into simulated execution at this
-// point.
-#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
- reinterpret_cast<Object*>(Simulator::current(Isolate::Current())->Call( \
- FUNCTION_ADDR(entry), 5, p0, p1, p2, p3, p4))
-
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
- Simulator::current(Isolate::Current())->Call( \
- entry, 9, p0, p1, p2, p3, NULL, p4, p5, p6, p7)
-
-#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
- try_catch_address == NULL ? \
- NULL : *(reinterpret_cast<TryCatch**>(try_catch_address))
-
-
-// The simulator has its own stack. Thus it has a different stack limit from
-// the C-based native code. Setting the c_limit to indicate a very small
-// stack cause stack overflow errors, since the simulator ignores the input.
-// This is unlikely to be an issue in practice, though it might cause testing
-// trouble down the line.
-class SimulatorStack : public v8::internal::AllStatic {
- public:
- static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) {
- return Simulator::current(Isolate::Current())->StackLimit();
- }
-
- static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
- Simulator* sim = Simulator::current(Isolate::Current());
- return sim->PushAddress(try_catch_address);
- }
-
- static inline void UnregisterCTryCatch() {
- Simulator::current(Isolate::Current())->PopAddress();
- }
-};
-
-} } // namespace v8::internal
-
-#endif // !defined(USE_SIMULATOR)
-#endif // V8_ARM_SIMULATOR_ARM_H_
diff --git a/src/3rdparty/v8/src/arm/stub-cache-arm.cc b/src/3rdparty/v8/src/arm/stub-cache-arm.cc
deleted file mode 100644
index a71a4c5..0000000
--- a/src/3rdparty/v8/src/arm/stub-cache-arm.cc
+++ /dev/null
@@ -1,4034 +0,0 @@
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_ARM)
-
-#include "ic-inl.h"
-#include "codegen-inl.h"
-#include "stub-cache.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-
-static void ProbeTable(Isolate* isolate,
- MacroAssembler* masm,
- Code::Flags flags,
- StubCache::Table table,
- Register name,
- Register offset,
- Register scratch,
- Register scratch2) {
- ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
- ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
-
- uint32_t key_off_addr = reinterpret_cast<uint32_t>(key_offset.address());
- uint32_t value_off_addr = reinterpret_cast<uint32_t>(value_offset.address());
-
- // Check the relative positions of the address fields.
- ASSERT(value_off_addr > key_off_addr);
- ASSERT((value_off_addr - key_off_addr) % 4 == 0);
- ASSERT((value_off_addr - key_off_addr) < (256 * 4));
-
- Label miss;
- Register offsets_base_addr = scratch;
-
- // Check that the key in the entry matches the name.
- __ mov(offsets_base_addr, Operand(key_offset));
- __ ldr(ip, MemOperand(offsets_base_addr, offset, LSL, 1));
- __ cmp(name, ip);
- __ b(ne, &miss);
-
- // Get the code entry from the cache.
- __ add(offsets_base_addr, offsets_base_addr,
- Operand(value_off_addr - key_off_addr));
- __ ldr(scratch2, MemOperand(offsets_base_addr, offset, LSL, 1));
-
- // Check that the flags match what we're looking for.
- __ ldr(scratch2, FieldMemOperand(scratch2, Code::kFlagsOffset));
- __ bic(scratch2, scratch2, Operand(Code::kFlagsNotUsedInLookup));
- __ cmp(scratch2, Operand(flags));
- __ b(ne, &miss);
-
- // Re-load code entry from cache.
- __ ldr(offset, MemOperand(offsets_base_addr, offset, LSL, 1));
-
- // Jump to the first instruction in the code stub.
- __ add(offset, offset, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(offset);
-
- // Miss: fall through.
- __ bind(&miss);
-}
-
-
-// Helper function used to check that the dictionary doesn't contain
-// the property. This function may return false negatives, so miss_label
-// must always call a backup property check that is complete.
-// This function is safe to call if the receiver has fast properties.
-// Name must be a symbol and receiver must be a heap object.
-static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
- Label* miss_label,
- Register receiver,
- String* name,
- Register scratch0,
- Register scratch1) {
- ASSERT(name->IsSymbol());
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1);
- __ IncrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
-
- Label done;
-
- const int kInterceptorOrAccessCheckNeededMask =
- (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
-
- // Bail out if the receiver has a named interceptor or requires access checks.
- Register map = scratch1;
- __ ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ ldrb(scratch0, FieldMemOperand(map, Map::kBitFieldOffset));
- __ tst(scratch0, Operand(kInterceptorOrAccessCheckNeededMask));
- __ b(ne, miss_label);
-
- // Check that receiver is a JSObject.
- __ ldrb(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset));
- __ cmp(scratch0, Operand(FIRST_JS_OBJECT_TYPE));
- __ b(lt, miss_label);
-
- // Load properties array.
- Register properties = scratch0;
- __ ldr(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
- // Check that the properties array is a dictionary.
- __ ldr(map, FieldMemOperand(properties, HeapObject::kMapOffset));
- Register tmp = properties;
- __ LoadRoot(tmp, Heap::kHashTableMapRootIndex);
- __ cmp(map, tmp);
- __ b(ne, miss_label);
-
- // Restore the temporarily used register.
- __ ldr(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
-
- // Compute the capacity mask.
- const int kCapacityOffset =
- StringDictionary::kHeaderSize +
- StringDictionary::kCapacityIndex * kPointerSize;
-
- // Generate an unrolled loop that performs a few probes before
- // giving up.
- static const int kProbes = 4;
- const int kElementsStartOffset =
- StringDictionary::kHeaderSize +
- StringDictionary::kElementsStartIndex * kPointerSize;
-
- // If names of slots in range from 1 to kProbes - 1 for the hash value are
- // not equal to the name and kProbes-th slot is not used (its name is the
- // undefined value), it guarantees the hash table doesn't contain the
- // property. It's true even if some slots represent deleted properties
- // (their names are the null value).
- for (int i = 0; i < kProbes; i++) {
- // scratch0 points to properties hash.
- // Compute the masked index: (hash + i + i * i) & mask.
- Register index = scratch1;
- // Capacity is smi 2^n.
- __ ldr(index, FieldMemOperand(properties, kCapacityOffset));
- __ sub(index, index, Operand(1));
- __ and_(index, index, Operand(
- Smi::FromInt(name->Hash() + StringDictionary::GetProbeOffset(i))));
-
- // Scale the index by multiplying by the entry size.
- ASSERT(StringDictionary::kEntrySize == 3);
- __ add(index, index, Operand(index, LSL, 1)); // index *= 3.
-
- Register entity_name = scratch1;
- // Having undefined at this place means the name is not contained.
- ASSERT_EQ(kSmiTagSize, 1);
- Register tmp = properties;
- __ add(tmp, properties, Operand(index, LSL, 1));
- __ ldr(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
-
- ASSERT(!tmp.is(entity_name));
- __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
- __ cmp(entity_name, tmp);
- if (i != kProbes - 1) {
- __ b(eq, &done);
-
- // Stop if found the property.
- __ cmp(entity_name, Operand(Handle<String>(name)));
- __ b(eq, miss_label);
-
- // Check if the entry name is not a symbol.
- __ ldr(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
- __ ldrb(entity_name,
- FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
- __ tst(entity_name, Operand(kIsSymbolMask));
- __ b(eq, miss_label);
-
- // Restore the properties.
- __ ldr(properties,
- FieldMemOperand(receiver, JSObject::kPropertiesOffset));
- } else {
- // Give up probing if still not found the undefined value.
- __ b(ne, miss_label);
- }
- }
- __ bind(&done);
- __ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
-}
-
-
-void StubCache::GenerateProbe(MacroAssembler* masm,
- Code::Flags flags,
- Register receiver,
- Register name,
- Register scratch,
- Register extra,
- Register extra2) {
- Isolate* isolate = masm->isolate();
- Label miss;
-
- // Make sure that code is valid. The shifting code relies on the
- // entry size being 8.
- ASSERT(sizeof(Entry) == 8);
-
- // Make sure the flags does not name a specific type.
- ASSERT(Code::ExtractTypeFromFlags(flags) == 0);
-
- // Make sure that there are no register conflicts.
- ASSERT(!scratch.is(receiver));
- ASSERT(!scratch.is(name));
- ASSERT(!extra.is(receiver));
- ASSERT(!extra.is(name));
- ASSERT(!extra.is(scratch));
- ASSERT(!extra2.is(receiver));
- ASSERT(!extra2.is(name));
- ASSERT(!extra2.is(scratch));
- ASSERT(!extra2.is(extra));
-
- // Check scratch, extra and extra2 registers are valid.
- ASSERT(!scratch.is(no_reg));
- ASSERT(!extra.is(no_reg));
- ASSERT(!extra2.is(no_reg));
-
- // Check that the receiver isn't a smi.
- __ tst(receiver, Operand(kSmiTagMask));
- __ b(eq, &miss);
-
- // Get the map of the receiver and compute the hash.
- __ ldr(scratch, FieldMemOperand(name, String::kHashFieldOffset));
- __ ldr(ip, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ add(scratch, scratch, Operand(ip));
- __ eor(scratch, scratch, Operand(flags));
- __ and_(scratch,
- scratch,
- Operand((kPrimaryTableSize - 1) << kHeapObjectTagSize));
-
- // Probe the primary table.
- ProbeTable(isolate, masm, flags, kPrimary, name, scratch, extra, extra2);
-
- // Primary miss: Compute hash for secondary probe.
- __ sub(scratch, scratch, Operand(name));
- __ add(scratch, scratch, Operand(flags));
- __ and_(scratch,
- scratch,
- Operand((kSecondaryTableSize - 1) << kHeapObjectTagSize));
-
- // Probe the secondary table.
- ProbeTable(isolate, masm, flags, kSecondary, name, scratch, extra, extra2);
-
- // Cache miss: Fall-through and let caller handle the miss by
- // entering the runtime system.
- __ bind(&miss);
-}
-
-
-void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
- int index,
- Register prototype) {
- // Load the global or builtins object from the current context.
- __ ldr(prototype, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
- // Load the global context from the global or builtins object.
- __ ldr(prototype,
- FieldMemOperand(prototype, GlobalObject::kGlobalContextOffset));
- // Load the function from the global context.
- __ ldr(prototype, MemOperand(prototype, Context::SlotOffset(index)));
- // Load the initial map. The global functions all have initial maps.
- __ ldr(prototype,
- FieldMemOperand(prototype, JSFunction::kPrototypeOrInitialMapOffset));
- // Load the prototype from the initial map.
- __ ldr(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset));
-}
-
-
-void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
- MacroAssembler* masm, int index, Register prototype, Label* miss) {
- Isolate* isolate = masm->isolate();
- // Check we're still in the same context.
- __ ldr(prototype, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ Move(ip, isolate->global());
- __ cmp(prototype, ip);
- __ b(ne, miss);
- // Get the global function with the given index.
- JSFunction* function =
- JSFunction::cast(isolate->global_context()->get(index));
- // Load its initial map. The global functions all have initial maps.
- __ Move(prototype, Handle<Map>(function->initial_map()));
- // Load the prototype from the initial map.
- __ ldr(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset));
-}
-
-
-// Load a fast property out of a holder object (src). In-object properties
-// are loaded directly otherwise the property is loaded from the properties
-// fixed array.
-void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
- Register dst, Register src,
- JSObject* holder, int index) {
- // Adjust for the number of properties stored in the holder.
- index -= holder->map()->inobject_properties();
- if (index < 0) {
- // Get the property straight out of the holder.
- int offset = holder->map()->instance_size() + (index * kPointerSize);
- __ ldr(dst, FieldMemOperand(src, offset));
- } else {
- // Calculate the offset into the properties array.
- int offset = index * kPointerSize + FixedArray::kHeaderSize;
- __ ldr(dst, FieldMemOperand(src, JSObject::kPropertiesOffset));
- __ ldr(dst, FieldMemOperand(dst, offset));
- }
-}
-
-
-void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
- Register receiver,
- Register scratch,
- Label* miss_label) {
- // Check that the receiver isn't a smi.
- __ tst(receiver, Operand(kSmiTagMask));
- __ b(eq, miss_label);
-
- // Check that the object is a JS array.
- __ CompareObjectType(receiver, scratch, scratch, JS_ARRAY_TYPE);
- __ b(ne, miss_label);
-
- // Load length directly from the JS array.
- __ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
- __ Ret();
-}
-
-
-// Generate code to check if an object is a string. If the object is a
-// heap object, its map's instance type is left in the scratch1 register.
-// If this is not needed, scratch1 and scratch2 may be the same register.
-static void GenerateStringCheck(MacroAssembler* masm,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Label* smi,
- Label* non_string_object) {
- // Check that the receiver isn't a smi.
- __ tst(receiver, Operand(kSmiTagMask));
- __ b(eq, smi);
-
- // Check that the object is a string.
- __ ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
- __ and_(scratch2, scratch1, Operand(kIsNotStringMask));
- // The cast is to resolve the overload for the argument of 0x0.
- __ cmp(scratch2, Operand(static_cast<int32_t>(kStringTag)));
- __ b(ne, non_string_object);
-}
-
-
-// Generate code to load the length from a string object and return the length.
-// If the receiver object is not a string or a wrapped string object the
-// execution continues at the miss label. The register containing the
-// receiver is potentially clobbered.
-void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Label* miss,
- bool support_wrappers) {
- Label check_wrapper;
-
- // Check if the object is a string leaving the instance type in the
- // scratch1 register.
- GenerateStringCheck(masm, receiver, scratch1, scratch2, miss,
- support_wrappers ? &check_wrapper : miss);
-
- // Load length directly from the string.
- __ ldr(r0, FieldMemOperand(receiver, String::kLengthOffset));
- __ Ret();
-
- if (support_wrappers) {
- // Check if the object is a JSValue wrapper.
- __ bind(&check_wrapper);
- __ cmp(scratch1, Operand(JS_VALUE_TYPE));
- __ b(ne, miss);
-
- // Unwrap the value and check if the wrapped value is a string.
- __ ldr(scratch1, FieldMemOperand(receiver, JSValue::kValueOffset));
- GenerateStringCheck(masm, scratch1, scratch2, scratch2, miss, miss);
- __ ldr(r0, FieldMemOperand(scratch1, String::kLengthOffset));
- __ Ret();
- }
-}
-
-
-void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Label* miss_label) {
- __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
- __ mov(r0, scratch1);
- __ Ret();
-}
-
-
-// Generate StoreField code, value is passed in r0 register.
-// When leaving generated code after success, the receiver_reg and name_reg
-// may be clobbered. Upon branch to miss_label, the receiver and name
-// registers have their original values.
-void StubCompiler::GenerateStoreField(MacroAssembler* masm,
- JSObject* object,
- int index,
- Map* transition,
- Register receiver_reg,
- Register name_reg,
- Register scratch,
- Label* miss_label) {
- // r0 : value
- Label exit;
-
- // Check that the receiver isn't a smi.
- __ tst(receiver_reg, Operand(kSmiTagMask));
- __ b(eq, miss_label);
-
- // Check that the map of the receiver hasn't changed.
- __ ldr(scratch, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
- __ cmp(scratch, Operand(Handle<Map>(object->map())));
- __ b(ne, miss_label);
-
- // Perform global security token check if needed.
- if (object->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(receiver_reg, scratch, miss_label);
- }
-
- // Stub never generated for non-global objects that require access
- // checks.
- ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
-
- // Perform map transition for the receiver if necessary.
- if ((transition != NULL) && (object->map()->unused_property_fields() == 0)) {
- // The properties must be extended before we can store the value.
- // We jump to a runtime call that extends the properties array.
- __ push(receiver_reg);
- __ mov(r2, Operand(Handle<Map>(transition)));
- __ Push(r2, r0);
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage),
- masm->isolate()),
- 3,
- 1);
- return;
- }
-
- if (transition != NULL) {
- // Update the map of the object; no write barrier updating is
- // needed because the map is never in new space.
- __ mov(ip, Operand(Handle<Map>(transition)));
- __ str(ip, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
- }
-
- // Adjust for the number of properties stored in the object. Even in the
- // face of a transition we can use the old map here because the size of the
- // object and the number of in-object properties is not going to change.
- index -= object->map()->inobject_properties();
-
- if (index < 0) {
- // Set the property straight into the object.
- int offset = object->map()->instance_size() + (index * kPointerSize);
- __ str(r0, FieldMemOperand(receiver_reg, offset));
-
- // Skip updating write barrier if storing a smi.
- __ tst(r0, Operand(kSmiTagMask));
- __ b(eq, &exit);
-
- // Update the write barrier for the array address.
- // Pass the now unused name_reg as a scratch register.
- __ RecordWrite(receiver_reg, Operand(offset), name_reg, scratch);
- } else {
- // Write to the properties array.
- int offset = index * kPointerSize + FixedArray::kHeaderSize;
- // Get the properties array
- __ ldr(scratch, FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
- __ str(r0, FieldMemOperand(scratch, offset));
-
- // Skip updating write barrier if storing a smi.
- __ tst(r0, Operand(kSmiTagMask));
- __ b(eq, &exit);
-
- // Update the write barrier for the array address.
- // Ok to clobber receiver_reg and name_reg, since we return.
- __ RecordWrite(scratch, Operand(offset), name_reg, receiver_reg);
- }
-
- // Return the value (register r0).
- __ bind(&exit);
- __ Ret();
-}
-
-
-void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
- ASSERT(kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC);
- Code* code = NULL;
- if (kind == Code::LOAD_IC) {
- code = masm->isolate()->builtins()->builtin(Builtins::kLoadIC_Miss);
- } else {
- code = masm->isolate()->builtins()->builtin(Builtins::kKeyedLoadIC_Miss);
- }
-
- Handle<Code> ic(code);
- __ Jump(ic, RelocInfo::CODE_TARGET);
-}
-
-
-static void GenerateCallFunction(MacroAssembler* masm,
- Object* object,
- const ParameterCount& arguments,
- Label* miss) {
- // ----------- S t a t e -------------
- // -- r0: receiver
- // -- r1: function to call
- // -----------------------------------
-
- // Check that the function really is a function.
- __ JumpIfSmi(r1, miss);
- __ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE);
- __ b(ne, miss);
-
- // Patch the receiver on the stack with the global proxy if
- // necessary.
- if (object->IsGlobalObject()) {
- __ ldr(r3, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset));
- __ str(r3, MemOperand(sp, arguments.immediate() * kPointerSize));
- }
-
- // Invoke the function.
- __ InvokeFunction(r1, arguments, JUMP_FUNCTION);
-}
-
-
-static void PushInterceptorArguments(MacroAssembler* masm,
- Register receiver,
- Register holder,
- Register name,
- JSObject* holder_obj) {
- __ push(name);
- InterceptorInfo* interceptor = holder_obj->GetNamedInterceptor();
- ASSERT(!masm->isolate()->heap()->InNewSpace(interceptor));
- Register scratch = name;
- __ mov(scratch, Operand(Handle<Object>(interceptor)));
- __ push(scratch);
- __ push(receiver);
- __ push(holder);
- __ ldr(scratch, FieldMemOperand(scratch, InterceptorInfo::kDataOffset));
- __ push(scratch);
-}
-
-
-static void CompileCallLoadPropertyWithInterceptor(MacroAssembler* masm,
- Register receiver,
- Register holder,
- Register name,
- JSObject* holder_obj) {
- PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
-
- ExternalReference ref =
- ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly),
- masm->isolate());
- __ mov(r0, Operand(5));
- __ mov(r1, Operand(ref));
-
- CEntryStub stub(1);
- __ CallStub(&stub);
-}
-
-static const int kFastApiCallArguments = 3;
-
-// Reserves space for the extra arguments to FastHandleApiCall in the
-// caller's frame.
-//
-// These arguments are set by CheckPrototypes and GenerateFastApiDirectCall.
-static void ReserveSpaceForFastApiCall(MacroAssembler* masm,
- Register scratch) {
- __ mov(scratch, Operand(Smi::FromInt(0)));
- for (int i = 0; i < kFastApiCallArguments; i++) {
- __ push(scratch);
- }
-}
-
-
-// Undoes the effects of ReserveSpaceForFastApiCall.
-static void FreeSpaceForFastApiCall(MacroAssembler* masm) {
- __ Drop(kFastApiCallArguments);
-}
-
-
-static MaybeObject* GenerateFastApiDirectCall(MacroAssembler* masm,
- const CallOptimization& optimization,
- int argc) {
- // ----------- S t a t e -------------
- // -- sp[0] : holder (set by CheckPrototypes)
- // -- sp[4] : callee js function
- // -- sp[8] : call data
- // -- sp[12] : last js argument
- // -- ...
- // -- sp[(argc + 3) * 4] : first js argument
- // -- sp[(argc + 4) * 4] : receiver
- // -----------------------------------
- // Get the function and setup the context.
- JSFunction* function = optimization.constant_function();
- __ mov(r5, Operand(Handle<JSFunction>(function)));
- __ ldr(cp, FieldMemOperand(r5, JSFunction::kContextOffset));
-
- // Pass the additional arguments FastHandleApiCall expects.
- Object* call_data = optimization.api_call_info()->data();
- Handle<CallHandlerInfo> api_call_info_handle(optimization.api_call_info());
- if (masm->isolate()->heap()->InNewSpace(call_data)) {
- __ Move(r0, api_call_info_handle);
- __ ldr(r6, FieldMemOperand(r0, CallHandlerInfo::kDataOffset));
- } else {
- __ Move(r6, Handle<Object>(call_data));
- }
- // Store js function and call data.
- __ stm(ib, sp, r5.bit() | r6.bit());
-
- // r2 points to call data as expected by Arguments
- // (refer to layout above).
- __ add(r2, sp, Operand(2 * kPointerSize));
-
- Object* callback = optimization.api_call_info()->callback();
- Address api_function_address = v8::ToCData<Address>(callback);
- ApiFunction fun(api_function_address);
-
- const int kApiStackSpace = 4;
- __ EnterExitFrame(false, kApiStackSpace);
-
- // r0 = v8::Arguments&
- // Arguments is after the return address.
- __ add(r0, sp, Operand(1 * kPointerSize));
- // v8::Arguments::implicit_args = data
- __ str(r2, MemOperand(r0, 0 * kPointerSize));
- // v8::Arguments::values = last argument
- __ add(ip, r2, Operand(argc * kPointerSize));
- __ str(ip, MemOperand(r0, 1 * kPointerSize));
- // v8::Arguments::length_ = argc
- __ mov(ip, Operand(argc));
- __ str(ip, MemOperand(r0, 2 * kPointerSize));
- // v8::Arguments::is_construct_call = 0
- __ mov(ip, Operand(0));
- __ str(ip, MemOperand(r0, 3 * kPointerSize));
-
- // Emitting a stub call may try to allocate (if the code is not
- // already generated). Do not allow the assembler to perform a
- // garbage collection but instead return the allocation failure
- // object.
- const int kStackUnwindSpace = argc + kFastApiCallArguments + 1;
- ExternalReference ref = ExternalReference(&fun,
- ExternalReference::DIRECT_API_CALL,
- masm->isolate());
- return masm->TryCallApiFunctionAndReturn(ref, kStackUnwindSpace);
-}
-
-class CallInterceptorCompiler BASE_EMBEDDED {
- public:
- CallInterceptorCompiler(StubCompiler* stub_compiler,
- const ParameterCount& arguments,
- Register name)
- : stub_compiler_(stub_compiler),
- arguments_(arguments),
- name_(name) {}
-
- MaybeObject* Compile(MacroAssembler* masm,
- JSObject* object,
- JSObject* holder,
- String* name,
- LookupResult* lookup,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* miss) {
- ASSERT(holder->HasNamedInterceptor());
- ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, miss);
-
- CallOptimization optimization(lookup);
-
- if (optimization.is_constant_call()) {
- return CompileCacheable(masm,
- object,
- receiver,
- scratch1,
- scratch2,
- scratch3,
- holder,
- lookup,
- name,
- optimization,
- miss);
- } else {
- CompileRegular(masm,
- object,
- receiver,
- scratch1,
- scratch2,
- scratch3,
- name,
- holder,
- miss);
- return masm->isolate()->heap()->undefined_value();
- }
- }
-
- private:
- MaybeObject* CompileCacheable(MacroAssembler* masm,
- JSObject* object,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- JSObject* interceptor_holder,
- LookupResult* lookup,
- String* name,
- const CallOptimization& optimization,
- Label* miss_label) {
- ASSERT(optimization.is_constant_call());
- ASSERT(!lookup->holder()->IsGlobalObject());
-
- Counters* counters = masm->isolate()->counters();
-
- int depth1 = kInvalidProtoDepth;
- int depth2 = kInvalidProtoDepth;
- bool can_do_fast_api_call = false;
- if (optimization.is_simple_api_call() &&
- !lookup->holder()->IsGlobalObject()) {
- depth1 =
- optimization.GetPrototypeDepthOfExpectedType(object,
- interceptor_holder);
- if (depth1 == kInvalidProtoDepth) {
- depth2 =
- optimization.GetPrototypeDepthOfExpectedType(interceptor_holder,
- lookup->holder());
- }
- can_do_fast_api_call = (depth1 != kInvalidProtoDepth) ||
- (depth2 != kInvalidProtoDepth);
- }
-
- __ IncrementCounter(counters->call_const_interceptor(), 1,
- scratch1, scratch2);
-
- if (can_do_fast_api_call) {
- __ IncrementCounter(counters->call_const_interceptor_fast_api(), 1,
- scratch1, scratch2);
- ReserveSpaceForFastApiCall(masm, scratch1);
- }
-
- // Check that the maps from receiver to interceptor's holder
- // haven't changed and thus we can invoke interceptor.
- Label miss_cleanup;
- Label* miss = can_do_fast_api_call ? &miss_cleanup : miss_label;
- Register holder =
- stub_compiler_->CheckPrototypes(object, receiver,
- interceptor_holder, scratch1,
- scratch2, scratch3, name, depth1, miss);
-
- // Invoke an interceptor and if it provides a value,
- // branch to |regular_invoke|.
- Label regular_invoke;
- LoadWithInterceptor(masm, receiver, holder, interceptor_holder, scratch2,
- &regular_invoke);
-
- // Interceptor returned nothing for this property. Try to use cached
- // constant function.
-
- // Check that the maps from interceptor's holder to constant function's
- // holder haven't changed and thus we can use cached constant function.
- if (interceptor_holder != lookup->holder()) {
- stub_compiler_->CheckPrototypes(interceptor_holder, receiver,
- lookup->holder(), scratch1,
- scratch2, scratch3, name, depth2, miss);
- } else {
- // CheckPrototypes has a side effect of fetching a 'holder'
- // for API (object which is instanceof for the signature). It's
- // safe to omit it here, as if present, it should be fetched
- // by the previous CheckPrototypes.
- ASSERT(depth2 == kInvalidProtoDepth);
- }
-
- // Invoke function.
- if (can_do_fast_api_call) {
- MaybeObject* result = GenerateFastApiDirectCall(masm,
- optimization,
- arguments_.immediate());
- if (result->IsFailure()) return result;
- } else {
- __ InvokeFunction(optimization.constant_function(), arguments_,
- JUMP_FUNCTION);
- }
-
- // Deferred code for fast API call case---clean preallocated space.
- if (can_do_fast_api_call) {
- __ bind(&miss_cleanup);
- FreeSpaceForFastApiCall(masm);
- __ b(miss_label);
- }
-
- // Invoke a regular function.
- __ bind(&regular_invoke);
- if (can_do_fast_api_call) {
- FreeSpaceForFastApiCall(masm);
- }
-
- return masm->isolate()->heap()->undefined_value();
- }
-
- void CompileRegular(MacroAssembler* masm,
- JSObject* object,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- String* name,
- JSObject* interceptor_holder,
- Label* miss_label) {
- Register holder =
- stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, scratch3, name,
- miss_label);
-
- // Call a runtime function to load the interceptor property.
- __ EnterInternalFrame();
- // Save the name_ register across the call.
- __ push(name_);
-
- PushInterceptorArguments(masm,
- receiver,
- holder,
- name_,
- interceptor_holder);
-
- __ CallExternalReference(
- ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForCall),
- masm->isolate()),
- 5);
-
- // Restore the name_ register.
- __ pop(name_);
- __ LeaveInternalFrame();
- }
-
- void LoadWithInterceptor(MacroAssembler* masm,
- Register receiver,
- Register holder,
- JSObject* holder_obj,
- Register scratch,
- Label* interceptor_succeeded) {
- __ EnterInternalFrame();
- __ Push(holder, name_);
-
- CompileCallLoadPropertyWithInterceptor(masm,
- receiver,
- holder,
- name_,
- holder_obj);
-
- __ pop(name_); // Restore the name.
- __ pop(receiver); // Restore the holder.
- __ LeaveInternalFrame();
-
- // If interceptor returns no-result sentinel, call the constant function.
- __ LoadRoot(scratch, Heap::kNoInterceptorResultSentinelRootIndex);
- __ cmp(r0, scratch);
- __ b(ne, interceptor_succeeded);
- }
-
- StubCompiler* stub_compiler_;
- const ParameterCount& arguments_;
- Register name_;
-};
-
-
-// Generate code to check that a global property cell is empty. Create
-// the property cell at compilation time if no cell exists for the
-// property.
-MUST_USE_RESULT static MaybeObject* GenerateCheckPropertyCell(
- MacroAssembler* masm,
- GlobalObject* global,
- String* name,
- Register scratch,
- Label* miss) {
- Object* probe;
- { MaybeObject* maybe_probe = global->EnsurePropertyCell(name);
- if (!maybe_probe->ToObject(&probe)) return maybe_probe;
- }
- JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(probe);
- ASSERT(cell->value()->IsTheHole());
- __ mov(scratch, Operand(Handle<Object>(cell)));
- __ ldr(scratch,
- FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(scratch, ip);
- __ b(ne, miss);
- return cell;
-}
-
-// Calls GenerateCheckPropertyCell for each global object in the prototype chain
-// from object to (but not including) holder.
-MUST_USE_RESULT static MaybeObject* GenerateCheckPropertyCells(
- MacroAssembler* masm,
- JSObject* object,
- JSObject* holder,
- String* name,
- Register scratch,
- Label* miss) {
- JSObject* current = object;
- while (current != holder) {
- if (current->IsGlobalObject()) {
- // Returns a cell or a failure.
- MaybeObject* result = GenerateCheckPropertyCell(
- masm,
- GlobalObject::cast(current),
- name,
- scratch,
- miss);
- if (result->IsFailure()) return result;
- }
- ASSERT(current->IsJSObject());
- current = JSObject::cast(current->GetPrototype());
- }
- return NULL;
-}
-
-
-// Convert and store int passed in register ival to IEEE 754 single precision
-// floating point value at memory location (dst + 4 * wordoffset)
-// If VFP3 is available use it for conversion.
-static void StoreIntAsFloat(MacroAssembler* masm,
- Register dst,
- Register wordoffset,
- Register ival,
- Register fval,
- Register scratch1,
- Register scratch2) {
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
- __ vmov(s0, ival);
- __ add(scratch1, dst, Operand(wordoffset, LSL, 2));
- __ vcvt_f32_s32(s0, s0);
- __ vstr(s0, scratch1, 0);
- } else {
- Label not_special, done;
- // Move sign bit from source to destination. This works because the sign
- // bit in the exponent word of the double has the same position and polarity
- // as the 2's complement sign bit in a Smi.
- ASSERT(kBinary32SignMask == 0x80000000u);
-
- __ and_(fval, ival, Operand(kBinary32SignMask), SetCC);
- // Negate value if it is negative.
- __ rsb(ival, ival, Operand(0, RelocInfo::NONE), LeaveCC, ne);
-
- // We have -1, 0 or 1, which we treat specially. Register ival contains
- // absolute value: it is either equal to 1 (special case of -1 and 1),
- // greater than 1 (not a special case) or less than 1 (special case of 0).
- __ cmp(ival, Operand(1));
- __ b(gt, &not_special);
-
- // For 1 or -1 we need to or in the 0 exponent (biased).
- static const uint32_t exponent_word_for_1 =
- kBinary32ExponentBias << kBinary32ExponentShift;
-
- __ orr(fval, fval, Operand(exponent_word_for_1), LeaveCC, eq);
- __ b(&done);
-
- __ bind(&not_special);
- // Count leading zeros.
- // Gets the wrong answer for 0, but we already checked for that case above.
- Register zeros = scratch2;
- __ CountLeadingZeros(zeros, ival, scratch1);
-
- // Compute exponent and or it into the exponent register.
- __ rsb(scratch1,
- zeros,
- Operand((kBitsPerInt - 1) + kBinary32ExponentBias));
-
- __ orr(fval,
- fval,
- Operand(scratch1, LSL, kBinary32ExponentShift));
-
- // Shift up the source chopping the top bit off.
- __ add(zeros, zeros, Operand(1));
- // This wouldn't work for 1 and -1 as the shift would be 32 which means 0.
- __ mov(ival, Operand(ival, LSL, zeros));
- // And the top (top 20 bits).
- __ orr(fval,
- fval,
- Operand(ival, LSR, kBitsPerInt - kBinary32MantissaBits));
-
- __ bind(&done);
- __ str(fval, MemOperand(dst, wordoffset, LSL, 2));
- }
-}
-
-
-// Convert unsigned integer with specified number of leading zeroes in binary
-// representation to IEEE 754 double.
-// Integer to convert is passed in register hiword.
-// Resulting double is returned in registers hiword:loword.
-// This functions does not work correctly for 0.
-static void GenerateUInt2Double(MacroAssembler* masm,
- Register hiword,
- Register loword,
- Register scratch,
- int leading_zeroes) {
- const int meaningful_bits = kBitsPerInt - leading_zeroes - 1;
- const int biased_exponent = HeapNumber::kExponentBias + meaningful_bits;
-
- const int mantissa_shift_for_hi_word =
- meaningful_bits - HeapNumber::kMantissaBitsInTopWord;
-
- const int mantissa_shift_for_lo_word =
- kBitsPerInt - mantissa_shift_for_hi_word;
-
- __ mov(scratch, Operand(biased_exponent << HeapNumber::kExponentShift));
- if (mantissa_shift_for_hi_word > 0) {
- __ mov(loword, Operand(hiword, LSL, mantissa_shift_for_lo_word));
- __ orr(hiword, scratch, Operand(hiword, LSR, mantissa_shift_for_hi_word));
- } else {
- __ mov(loword, Operand(0, RelocInfo::NONE));
- __ orr(hiword, scratch, Operand(hiword, LSL, mantissa_shift_for_hi_word));
- }
-
- // If least significant bit of biased exponent was not 1 it was corrupted
- // by most significant bit of mantissa so we should fix that.
- if (!(biased_exponent & 1)) {
- __ bic(hiword, hiword, Operand(1 << HeapNumber::kExponentShift));
- }
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm())
-
-
-Register StubCompiler::CheckPrototypes(JSObject* object,
- Register object_reg,
- JSObject* holder,
- Register holder_reg,
- Register scratch1,
- Register scratch2,
- String* name,
- int save_at_depth,
- Label* miss) {
- // Make sure there's no overlap between holder and object registers.
- ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
- ASSERT(!scratch2.is(object_reg) && !scratch2.is(holder_reg)
- && !scratch2.is(scratch1));
-
- // Keep track of the current object in register reg.
- Register reg = object_reg;
- int depth = 0;
-
- if (save_at_depth == depth) {
- __ str(reg, MemOperand(sp));
- }
-
- // Check the maps in the prototype chain.
- // Traverse the prototype chain from the object and do map checks.
- JSObject* current = object;
- while (current != holder) {
- depth++;
-
- // Only global objects and objects that do not require access
- // checks are allowed in stubs.
- ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded());
-
- ASSERT(current->GetPrototype()->IsJSObject());
- JSObject* prototype = JSObject::cast(current->GetPrototype());
- if (!current->HasFastProperties() &&
- !current->IsJSGlobalObject() &&
- !current->IsJSGlobalProxy()) {
- if (!name->IsSymbol()) {
- MaybeObject* maybe_lookup_result = heap()->LookupSymbol(name);
- Object* lookup_result = NULL; // Initialization to please compiler.
- if (!maybe_lookup_result->ToObject(&lookup_result)) {
- set_failure(Failure::cast(maybe_lookup_result));
- return reg;
- }
- name = String::cast(lookup_result);
- }
- ASSERT(current->property_dictionary()->FindEntry(name) ==
- StringDictionary::kNotFound);
-
- GenerateDictionaryNegativeLookup(masm(),
- miss,
- reg,
- name,
- scratch1,
- scratch2);
- __ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
- reg = holder_reg; // from now the object is in holder_reg
- __ ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
- } else if (heap()->InNewSpace(prototype)) {
- // Get the map of the current object.
- __ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
- __ cmp(scratch1, Operand(Handle<Map>(current->map())));
-
- // Branch on the result of the map check.
- __ b(ne, miss);
-
- // Check access rights to the global object. This has to happen
- // after the map check so that we know that the object is
- // actually a global object.
- if (current->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(reg, scratch1, miss);
- // Restore scratch register to be the map of the object. In the
- // new space case below, we load the prototype from the map in
- // the scratch register.
- __ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
- }
-
- reg = holder_reg; // from now the object is in holder_reg
- // The prototype is in new space; we cannot store a reference
- // to it in the code. Load it from the map.
- __ ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
- } else {
- // Check the map of the current object.
- __ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
- __ cmp(scratch1, Operand(Handle<Map>(current->map())));
- // Branch on the result of the map check.
- __ b(ne, miss);
- // Check access rights to the global object. This has to happen
- // after the map check so that we know that the object is
- // actually a global object.
- if (current->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(reg, scratch1, miss);
- }
- // The prototype is in old space; load it directly.
- reg = holder_reg; // from now the object is in holder_reg
- __ mov(reg, Operand(Handle<JSObject>(prototype)));
- }
-
- if (save_at_depth == depth) {
- __ str(reg, MemOperand(sp));
- }
-
- // Go to the next object in the prototype chain.
- current = prototype;
- }
-
- // Check the holder map.
- __ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
- __ cmp(scratch1, Operand(Handle<Map>(current->map())));
- __ b(ne, miss);
-
- // Log the check depth.
- LOG(masm()->isolate(), IntEvent("check-maps-depth", depth + 1));
-
- // Perform security check for access to the global object.
- ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
- if (holder->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(reg, scratch1, miss);
- };
-
- // If we've skipped any global objects, it's not enough to verify
- // that their maps haven't changed. We also need to check that the
- // property cell for the property is still empty.
- MaybeObject* result = GenerateCheckPropertyCells(masm(),
- object,
- holder,
- name,
- scratch1,
- miss);
- if (result->IsFailure()) set_failure(Failure::cast(result));
-
- // Return the register containing the holder.
- return reg;
-}
-
-
-void StubCompiler::GenerateLoadField(JSObject* object,
- JSObject* holder,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- int index,
- String* name,
- Label* miss) {
- // Check that the receiver isn't a smi.
- __ tst(receiver, Operand(kSmiTagMask));
- __ b(eq, miss);
-
- // Check that the maps haven't changed.
- Register reg =
- CheckPrototypes(object, receiver, holder, scratch1, scratch2, scratch3,
- name, miss);
- GenerateFastPropertyLoad(masm(), r0, reg, holder, index);
- __ Ret();
-}
-
-
-void StubCompiler::GenerateLoadConstant(JSObject* object,
- JSObject* holder,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Object* value,
- String* name,
- Label* miss) {
- // Check that the receiver isn't a smi.
- __ tst(receiver, Operand(kSmiTagMask));
- __ b(eq, miss);
-
- // Check that the maps haven't changed.
- Register reg =
- CheckPrototypes(object, receiver, holder,
- scratch1, scratch2, scratch3, name, miss);
-
- // Return the constant value.
- __ mov(r0, Operand(Handle<Object>(value)));
- __ Ret();
-}
-
-
-MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object,
- JSObject* holder,
- Register receiver,
- Register name_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- AccessorInfo* callback,
- String* name,
- Label* miss) {
- // Check that the receiver isn't a smi.
- __ tst(receiver, Operand(kSmiTagMask));
- __ b(eq, miss);
-
- // Check that the maps haven't changed.
- Register reg =
- CheckPrototypes(object, receiver, holder, scratch1, scratch2, scratch3,
- name, miss);
-
- // Build AccessorInfo::args_ list on the stack and push property name below
- // the exit frame to make GC aware of them and store pointers to them.
- __ push(receiver);
- __ mov(scratch2, sp); // scratch2 = AccessorInfo::args_
- Handle<AccessorInfo> callback_handle(callback);
- if (heap()->InNewSpace(callback_handle->data())) {
- __ Move(scratch3, callback_handle);
- __ ldr(scratch3, FieldMemOperand(scratch3, AccessorInfo::kDataOffset));
- } else {
- __ Move(scratch3, Handle<Object>(callback_handle->data()));
- }
- __ Push(reg, scratch3, name_reg);
- __ mov(r0, sp); // r0 = Handle<String>
-
- Address getter_address = v8::ToCData<Address>(callback->getter());
- ApiFunction fun(getter_address);
-
- const int kApiStackSpace = 1;
- __ EnterExitFrame(false, kApiStackSpace);
- // Create AccessorInfo instance on the stack above the exit frame with
- // scratch2 (internal::Object **args_) as the data.
- __ str(scratch2, MemOperand(sp, 1 * kPointerSize));
- __ add(r1, sp, Operand(1 * kPointerSize)); // r1 = AccessorInfo&
-
- // Emitting a stub call may try to allocate (if the code is not
- // already generated). Do not allow the assembler to perform a
- // garbage collection but instead return the allocation failure
- // object.
- const int kStackUnwindSpace = 4;
- ExternalReference ref =
- ExternalReference(&fun,
- ExternalReference::DIRECT_GETTER_CALL,
- masm()->isolate());
- return masm()->TryCallApiFunctionAndReturn(ref, kStackUnwindSpace);
-}
-
-
-void StubCompiler::GenerateLoadInterceptor(JSObject* object,
- JSObject* interceptor_holder,
- LookupResult* lookup,
- Register receiver,
- Register name_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- String* name,
- Label* miss) {
- ASSERT(interceptor_holder->HasNamedInterceptor());
- ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, miss);
-
- // So far the most popular follow ups for interceptor loads are FIELD
- // and CALLBACKS, so inline only them, other cases may be added
- // later.
- bool compile_followup_inline = false;
- if (lookup->IsProperty() && lookup->IsCacheable()) {
- if (lookup->type() == FIELD) {
- compile_followup_inline = true;
- } else if (lookup->type() == CALLBACKS &&
- lookup->GetCallbackObject()->IsAccessorInfo() &&
- AccessorInfo::cast(lookup->GetCallbackObject())->getter() != NULL) {
- compile_followup_inline = true;
- }
- }
-
- if (compile_followup_inline) {
- // Compile the interceptor call, followed by inline code to load the
- // property from further up the prototype chain if the call fails.
- // Check that the maps haven't changed.
- Register holder_reg = CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, scratch3,
- name, miss);
- ASSERT(holder_reg.is(receiver) || holder_reg.is(scratch1));
-
- // Save necessary data before invoking an interceptor.
- // Requires a frame to make GC aware of pushed pointers.
- __ EnterInternalFrame();
-
- if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
- // CALLBACKS case needs a receiver to be passed into C++ callback.
- __ Push(receiver, holder_reg, name_reg);
- } else {
- __ Push(holder_reg, name_reg);
- }
-
- // Invoke an interceptor. Note: map checks from receiver to
- // interceptor's holder has been compiled before (see a caller
- // of this method.)
- CompileCallLoadPropertyWithInterceptor(masm(),
- receiver,
- holder_reg,
- name_reg,
- interceptor_holder);
-
- // Check if interceptor provided a value for property. If it's
- // the case, return immediately.
- Label interceptor_failed;
- __ LoadRoot(scratch1, Heap::kNoInterceptorResultSentinelRootIndex);
- __ cmp(r0, scratch1);
- __ b(eq, &interceptor_failed);
- __ LeaveInternalFrame();
- __ Ret();
-
- __ bind(&interceptor_failed);
- __ pop(name_reg);
- __ pop(holder_reg);
- if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
- __ pop(receiver);
- }
-
- __ LeaveInternalFrame();
-
- // Check that the maps from interceptor's holder to lookup's holder
- // haven't changed. And load lookup's holder into |holder| register.
- if (interceptor_holder != lookup->holder()) {
- holder_reg = CheckPrototypes(interceptor_holder,
- holder_reg,
- lookup->holder(),
- scratch1,
- scratch2,
- scratch3,
- name,
- miss);
- }
-
- if (lookup->type() == FIELD) {
- // We found FIELD property in prototype chain of interceptor's holder.
- // Retrieve a field from field's holder.
- GenerateFastPropertyLoad(masm(), r0, holder_reg,
- lookup->holder(), lookup->GetFieldIndex());
- __ Ret();
- } else {
- // We found CALLBACKS property in prototype chain of interceptor's
- // holder.
- ASSERT(lookup->type() == CALLBACKS);
- ASSERT(lookup->GetCallbackObject()->IsAccessorInfo());
- AccessorInfo* callback = AccessorInfo::cast(lookup->GetCallbackObject());
- ASSERT(callback != NULL);
- ASSERT(callback->getter() != NULL);
-
- // Tail call to runtime.
- // Important invariant in CALLBACKS case: the code above must be
- // structured to never clobber |receiver| register.
- __ Move(scratch2, Handle<AccessorInfo>(callback));
- // holder_reg is either receiver or scratch1.
- if (!receiver.is(holder_reg)) {
- ASSERT(scratch1.is(holder_reg));
- __ Push(receiver, holder_reg);
- __ ldr(scratch3,
- FieldMemOperand(scratch2, AccessorInfo::kDataOffset));
- __ Push(scratch3, scratch2, name_reg);
- } else {
- __ push(receiver);
- __ ldr(scratch3,
- FieldMemOperand(scratch2, AccessorInfo::kDataOffset));
- __ Push(holder_reg, scratch3, scratch2, name_reg);
- }
-
- ExternalReference ref =
- ExternalReference(IC_Utility(IC::kLoadCallbackProperty),
- masm()->isolate());
- __ TailCallExternalReference(ref, 5, 1);
- }
- } else { // !compile_followup_inline
- // Call the runtime system to load the interceptor.
- // Check that the maps haven't changed.
- Register holder_reg = CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, scratch3,
- name, miss);
- PushInterceptorArguments(masm(), receiver, holder_reg,
- name_reg, interceptor_holder);
-
- ExternalReference ref =
- ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForLoad),
- masm()->isolate());
- __ TailCallExternalReference(ref, 5, 1);
- }
-}
-
-
-void CallStubCompiler::GenerateNameCheck(String* name, Label* miss) {
- if (kind_ == Code::KEYED_CALL_IC) {
- __ cmp(r2, Operand(Handle<String>(name)));
- __ b(ne, miss);
- }
-}
-
-
-void CallStubCompiler::GenerateGlobalReceiverCheck(JSObject* object,
- JSObject* holder,
- String* name,
- Label* miss) {
- ASSERT(holder->IsGlobalObject());
-
- // Get the number of arguments.
- const int argc = arguments().immediate();
-
- // Get the receiver from the stack.
- __ ldr(r0, MemOperand(sp, argc * kPointerSize));
-
- // If the object is the holder then we know that it's a global
- // object which can only happen for contextual calls. In this case,
- // the receiver cannot be a smi.
- if (object != holder) {
- __ tst(r0, Operand(kSmiTagMask));
- __ b(eq, miss);
- }
-
- // Check that the maps haven't changed.
- CheckPrototypes(object, r0, holder, r3, r1, r4, name, miss);
-}
-
-
-void CallStubCompiler::GenerateLoadFunctionFromCell(JSGlobalPropertyCell* cell,
- JSFunction* function,
- Label* miss) {
- // Get the value from the cell.
- __ mov(r3, Operand(Handle<JSGlobalPropertyCell>(cell)));
- __ ldr(r1, FieldMemOperand(r3, JSGlobalPropertyCell::kValueOffset));
-
- // Check that the cell contains the same function.
- if (heap()->InNewSpace(function)) {
- // We can't embed a pointer to a function in new space so we have
- // to verify that the shared function info is unchanged. This has
- // the nice side effect that multiple closures based on the same
- // function can all use this call IC. Before we load through the
- // function, we have to verify that it still is a function.
- __ tst(r1, Operand(kSmiTagMask));
- __ b(eq, miss);
- __ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE);
- __ b(ne, miss);
-
- // Check the shared function info. Make sure it hasn't changed.
- __ Move(r3, Handle<SharedFunctionInfo>(function->shared()));
- __ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- __ cmp(r4, r3);
- __ b(ne, miss);
- } else {
- __ cmp(r1, Operand(Handle<JSFunction>(function)));
- __ b(ne, miss);
- }
-}
-
-
-MaybeObject* CallStubCompiler::GenerateMissBranch() {
- MaybeObject* maybe_obj = masm()->isolate()->stub_cache()->ComputeCallMiss(
- arguments().immediate(), kind_);
- Object* obj;
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- __ Jump(Handle<Code>(Code::cast(obj)), RelocInfo::CODE_TARGET);
- return obj;
-}
-
-
-MaybeObject* CallStubCompiler::CompileCallField(JSObject* object,
- JSObject* holder,
- int index,
- String* name) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- Label miss;
-
- GenerateNameCheck(name, &miss);
-
- const int argc = arguments().immediate();
-
- // Get the receiver of the function from the stack into r0.
- __ ldr(r0, MemOperand(sp, argc * kPointerSize));
- // Check that the receiver isn't a smi.
- __ tst(r0, Operand(kSmiTagMask));
- __ b(eq, &miss);
-
- // Do the right check and compute the holder register.
- Register reg = CheckPrototypes(object, r0, holder, r1, r3, r4, name, &miss);
- GenerateFastPropertyLoad(masm(), r1, reg, holder, index);
-
- GenerateCallFunction(masm(), object, arguments(), &miss);
-
- // Handle call cache miss.
- __ bind(&miss);
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
-
- // Return the generated code.
- return GetCode(FIELD, name);
-}
-
-
-MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
- // -- ...
- // -- sp[argc * 4] : receiver
- // -----------------------------------
-
- // If object is not an array, bail out to regular call.
- if (!object->IsJSArray() || cell != NULL) return heap()->undefined_value();
-
- Label miss;
-
- GenerateNameCheck(name, &miss);
-
- Register receiver = r1;
-
- // Get the receiver from the stack
- const int argc = arguments().immediate();
- __ ldr(receiver, MemOperand(sp, argc * kPointerSize));
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &miss);
-
- // Check that the maps haven't changed.
- CheckPrototypes(JSObject::cast(object), receiver,
- holder, r3, r0, r4, name, &miss);
-
- if (argc == 0) {
- // Nothing to do, just return the length.
- __ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
- __ Drop(argc + 1);
- __ Ret();
- } else {
- Label call_builtin;
-
- Register elements = r3;
- Register end_elements = r5;
-
- // Get the elements array of the object.
- __ ldr(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
-
- // Check that the elements are in fast mode and writable.
- __ CheckMap(elements, r0,
- Heap::kFixedArrayMapRootIndex, &call_builtin, true);
-
- if (argc == 1) { // Otherwise fall through to call the builtin.
- Label exit, with_write_barrier, attempt_to_grow_elements;
-
- // Get the array's length into r0 and calculate new length.
- __ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kSmiTag == 0);
- __ add(r0, r0, Operand(Smi::FromInt(argc)));
-
- // Get the element's length.
- __ ldr(r4, FieldMemOperand(elements, FixedArray::kLengthOffset));
-
- // Check if we could survive without allocation.
- __ cmp(r0, r4);
- __ b(gt, &attempt_to_grow_elements);
-
- // Save new length.
- __ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
-
- // Push the element.
- __ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize));
- // We may need a register containing the address end_elements below,
- // so write back the value in end_elements.
- __ add(end_elements, elements,
- Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
- const int kEndElementsOffset =
- FixedArray::kHeaderSize - kHeapObjectTag - argc * kPointerSize;
- __ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex));
-
- // Check for a smi.
- __ JumpIfNotSmi(r4, &with_write_barrier);
- __ bind(&exit);
- __ Drop(argc + 1);
- __ Ret();
-
- __ bind(&with_write_barrier);
- __ InNewSpace(elements, r4, eq, &exit);
- __ RecordWriteHelper(elements, end_elements, r4);
- __ Drop(argc + 1);
- __ Ret();
-
- __ bind(&attempt_to_grow_elements);
- // r0: array's length + 1.
- // r4: elements' length.
-
- if (!FLAG_inline_new) {
- __ b(&call_builtin);
- }
-
- Isolate* isolate = masm()->isolate();
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate);
- ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address(isolate);
-
- const int kAllocationDelta = 4;
- // Load top and check if it is the end of elements.
- __ add(end_elements, elements,
- Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ add(end_elements, end_elements, Operand(kEndElementsOffset));
- __ mov(r7, Operand(new_space_allocation_top));
- __ ldr(r6, MemOperand(r7));
- __ cmp(end_elements, r6);
- __ b(ne, &call_builtin);
-
- __ mov(r9, Operand(new_space_allocation_limit));
- __ ldr(r9, MemOperand(r9));
- __ add(r6, r6, Operand(kAllocationDelta * kPointerSize));
- __ cmp(r6, r9);
- __ b(hi, &call_builtin);
-
- // We fit and could grow elements.
- // Update new_space_allocation_top.
- __ str(r6, MemOperand(r7));
- // Push the argument.
- __ ldr(r6, MemOperand(sp, (argc - 1) * kPointerSize));
- __ str(r6, MemOperand(end_elements));
- // Fill the rest with holes.
- __ LoadRoot(r6, Heap::kTheHoleValueRootIndex);
- for (int i = 1; i < kAllocationDelta; i++) {
- __ str(r6, MemOperand(end_elements, i * kPointerSize));
- }
-
- // Update elements' and array's sizes.
- __ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
- __ add(r4, r4, Operand(Smi::FromInt(kAllocationDelta)));
- __ str(r4, FieldMemOperand(elements, FixedArray::kLengthOffset));
-
- // Elements are in new space, so write barrier is not required.
- __ Drop(argc + 1);
- __ Ret();
- }
- __ bind(&call_builtin);
- __ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPush,
- masm()->isolate()),
- argc + 1,
- 1);
- }
-
- // Handle call cache miss.
- __ bind(&miss);
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
-
- // Return the generated code.
- return GetCode(function);
-}
-
-
-MaybeObject* CallStubCompiler::CompileArrayPopCall(Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
- // -- ...
- // -- sp[argc * 4] : receiver
- // -----------------------------------
-
- // If object is not an array, bail out to regular call.
- if (!object->IsJSArray() || cell != NULL) return heap()->undefined_value();
-
- Label miss, return_undefined, call_builtin;
-
- Register receiver = r1;
- Register elements = r3;
-
- GenerateNameCheck(name, &miss);
-
- // Get the receiver from the stack
- const int argc = arguments().immediate();
- __ ldr(receiver, MemOperand(sp, argc * kPointerSize));
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &miss);
-
- // Check that the maps haven't changed.
- CheckPrototypes(JSObject::cast(object),
- receiver, holder, elements, r4, r0, name, &miss);
-
- // Get the elements array of the object.
- __ ldr(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
-
- // Check that the elements are in fast mode and writable.
- __ CheckMap(elements, r0, Heap::kFixedArrayMapRootIndex, &call_builtin, true);
-
- // Get the array's length into r4 and calculate new length.
- __ ldr(r4, FieldMemOperand(receiver, JSArray::kLengthOffset));
- __ sub(r4, r4, Operand(Smi::FromInt(1)), SetCC);
- __ b(lt, &return_undefined);
-
- // Get the last element.
- __ LoadRoot(r6, Heap::kTheHoleValueRootIndex);
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kSmiTag == 0);
- // We can't address the last element in one operation. Compute the more
- // expensive shift first, and use an offset later on.
- __ add(elements, elements, Operand(r4, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ ldr(r0, MemOperand(elements, FixedArray::kHeaderSize - kHeapObjectTag));
- __ cmp(r0, r6);
- __ b(eq, &call_builtin);
-
- // Set the array's length.
- __ str(r4, FieldMemOperand(receiver, JSArray::kLengthOffset));
-
- // Fill with the hole.
- __ str(r6, MemOperand(elements, FixedArray::kHeaderSize - kHeapObjectTag));
- __ Drop(argc + 1);
- __ Ret();
-
- __ bind(&return_undefined);
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
- __ Drop(argc + 1);
- __ Ret();
-
- __ bind(&call_builtin);
- __ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPop,
- masm()->isolate()),
- argc + 1,
- 1);
-
- // Handle call cache miss.
- __ bind(&miss);
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
-
- // Return the generated code.
- return GetCode(function);
-}
-
-
-MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
- Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
- // ----------- S t a t e -------------
- // -- r2 : function name
- // -- lr : return address
- // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
- // -- ...
- // -- sp[argc * 4] : receiver
- // -----------------------------------
-
- // If object is not a string, bail out to regular call.
- if (!object->IsString() || cell != NULL) return heap()->undefined_value();
-
- const int argc = arguments().immediate();
-
- Label miss;
- Label name_miss;
- Label index_out_of_range;
- Label* index_out_of_range_label = &index_out_of_range;
-
- if (kind_ == Code::CALL_IC && extra_ic_state_ == DEFAULT_STRING_STUB) {
- index_out_of_range_label = &miss;
- }
-
- GenerateNameCheck(name, &name_miss);
-
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(masm(),
- Context::STRING_FUNCTION_INDEX,
- r0,
- &miss);
- ASSERT(object != holder);
- CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder,
- r1, r3, r4, name, &miss);
-
- Register receiver = r1;
- Register index = r4;
- Register scratch = r3;
- Register result = r0;
- __ ldr(receiver, MemOperand(sp, argc * kPointerSize));
- if (argc > 0) {
- __ ldr(index, MemOperand(sp, (argc - 1) * kPointerSize));
- } else {
- __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
- }
-
- StringCharCodeAtGenerator char_code_at_generator(receiver,
- index,
- scratch,
- result,
- &miss, // When not a string.
- &miss, // When not a number.
- index_out_of_range_label,
- STRING_INDEX_IS_NUMBER);
- char_code_at_generator.GenerateFast(masm());
- __ Drop(argc + 1);
- __ Ret();
-
- StubRuntimeCallHelper call_helper;
- char_code_at_generator.GenerateSlow(masm(), call_helper);
-
- if (index_out_of_range.is_linked()) {
- __ bind(&index_out_of_range);
- __ LoadRoot(r0, Heap::kNanValueRootIndex);
- __ Drop(argc + 1);
- __ Ret();
- }
-
- __ bind(&miss);
- // Restore function name in r2.
- __ Move(r2, Handle<String>(name));
- __ bind(&name_miss);
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
-
- // Return the generated code.
- return GetCode(function);
-}
-
-
-MaybeObject* CallStubCompiler::CompileStringCharAtCall(
- Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
- // ----------- S t a t e -------------
- // -- r2 : function name
- // -- lr : return address
- // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
- // -- ...
- // -- sp[argc * 4] : receiver
- // -----------------------------------
-
- // If object is not a string, bail out to regular call.
- if (!object->IsString() || cell != NULL) return heap()->undefined_value();
-
- const int argc = arguments().immediate();
-
- Label miss;
- Label name_miss;
- Label index_out_of_range;
- Label* index_out_of_range_label = &index_out_of_range;
-
- if (kind_ == Code::CALL_IC && extra_ic_state_ == DEFAULT_STRING_STUB) {
- index_out_of_range_label = &miss;
- }
-
- GenerateNameCheck(name, &name_miss);
-
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(masm(),
- Context::STRING_FUNCTION_INDEX,
- r0,
- &miss);
- ASSERT(object != holder);
- CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder,
- r1, r3, r4, name, &miss);
-
- Register receiver = r0;
- Register index = r4;
- Register scratch1 = r1;
- Register scratch2 = r3;
- Register result = r0;
- __ ldr(receiver, MemOperand(sp, argc * kPointerSize));
- if (argc > 0) {
- __ ldr(index, MemOperand(sp, (argc - 1) * kPointerSize));
- } else {
- __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
- }
-
- StringCharAtGenerator char_at_generator(receiver,
- index,
- scratch1,
- scratch2,
- result,
- &miss, // When not a string.
- &miss, // When not a number.
- index_out_of_range_label,
- STRING_INDEX_IS_NUMBER);
- char_at_generator.GenerateFast(masm());
- __ Drop(argc + 1);
- __ Ret();
-
- StubRuntimeCallHelper call_helper;
- char_at_generator.GenerateSlow(masm(), call_helper);
-
- if (index_out_of_range.is_linked()) {
- __ bind(&index_out_of_range);
- __ LoadRoot(r0, Heap::kEmptyStringRootIndex);
- __ Drop(argc + 1);
- __ Ret();
- }
-
- __ bind(&miss);
- // Restore function name in r2.
- __ Move(r2, Handle<String>(name));
- __ bind(&name_miss);
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
-
- // Return the generated code.
- return GetCode(function);
-}
-
-
-MaybeObject* CallStubCompiler::CompileStringFromCharCodeCall(
- Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
- // ----------- S t a t e -------------
- // -- r2 : function name
- // -- lr : return address
- // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
- // -- ...
- // -- sp[argc * 4] : receiver
- // -----------------------------------
-
- const int argc = arguments().immediate();
-
- // If the object is not a JSObject or we got an unexpected number of
- // arguments, bail out to the regular call.
- if (!object->IsJSObject() || argc != 1) return heap()->undefined_value();
-
- Label miss;
- GenerateNameCheck(name, &miss);
-
- if (cell == NULL) {
- __ ldr(r1, MemOperand(sp, 1 * kPointerSize));
-
- STATIC_ASSERT(kSmiTag == 0);
- __ tst(r1, Operand(kSmiTagMask));
- __ b(eq, &miss);
-
- CheckPrototypes(JSObject::cast(object), r1, holder, r0, r3, r4, name,
- &miss);
- } else {
- ASSERT(cell->value() == function);
- GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
- GenerateLoadFunctionFromCell(cell, function, &miss);
- }
-
- // Load the char code argument.
- Register code = r1;
- __ ldr(code, MemOperand(sp, 0 * kPointerSize));
-
- // Check the code is a smi.
- Label slow;
- STATIC_ASSERT(kSmiTag == 0);
- __ tst(code, Operand(kSmiTagMask));
- __ b(ne, &slow);
-
- // Convert the smi code to uint16.
- __ and_(code, code, Operand(Smi::FromInt(0xffff)));
-
- StringCharFromCodeGenerator char_from_code_generator(code, r0);
- char_from_code_generator.GenerateFast(masm());
- __ Drop(argc + 1);
- __ Ret();
-
- StubRuntimeCallHelper call_helper;
- char_from_code_generator.GenerateSlow(masm(), call_helper);
-
- // Tail call the full function. We do not have to patch the receiver
- // because the function makes no use of it.
- __ bind(&slow);
- __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
-
- __ bind(&miss);
- // r2: function name.
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
-
- // Return the generated code.
- return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
-}
-
-
-MaybeObject* CallStubCompiler::CompileMathFloorCall(Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
- // ----------- S t a t e -------------
- // -- r2 : function name
- // -- lr : return address
- // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
- // -- ...
- // -- sp[argc * 4] : receiver
- // -----------------------------------
-
- if (!CpuFeatures::IsSupported(VFP3)) {
- return heap()->undefined_value();
- }
-
- CpuFeatures::Scope scope_vfp3(VFP3);
-
- const int argc = arguments().immediate();
-
- // If the object is not a JSObject or we got an unexpected number of
- // arguments, bail out to the regular call.
- if (!object->IsJSObject() || argc != 1) return heap()->undefined_value();
-
- Label miss, slow;
- GenerateNameCheck(name, &miss);
-
- if (cell == NULL) {
- __ ldr(r1, MemOperand(sp, 1 * kPointerSize));
-
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfSmi(r1, &miss);
-
- CheckPrototypes(JSObject::cast(object), r1, holder, r0, r3, r4, name,
- &miss);
- } else {
- ASSERT(cell->value() == function);
- GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
- GenerateLoadFunctionFromCell(cell, function, &miss);
- }
-
- // Load the (only) argument into r0.
- __ ldr(r0, MemOperand(sp, 0 * kPointerSize));
-
- // If the argument is a smi, just return.
- STATIC_ASSERT(kSmiTag == 0);
- __ tst(r0, Operand(kSmiTagMask));
- __ Drop(argc + 1, eq);
- __ Ret(eq);
-
- __ CheckMap(r0, r1, Heap::kHeapNumberMapRootIndex, &slow, true);
-
- Label wont_fit_smi, no_vfp_exception, restore_fpscr_and_return;
-
- // If vfp3 is enabled, we use the fpu rounding with the RM (round towards
- // minus infinity) mode.
-
- // Load the HeapNumber value.
- // We will need access to the value in the core registers, so we load it
- // with ldrd and move it to the fpu. It also spares a sub instruction for
- // updating the HeapNumber value address, as vldr expects a multiple
- // of 4 offset.
- __ Ldrd(r4, r5, FieldMemOperand(r0, HeapNumber::kValueOffset));
- __ vmov(d1, r4, r5);
-
- // Backup FPSCR.
- __ vmrs(r3);
- // Set custom FPCSR:
- // - Set rounding mode to "Round towards Minus Infinity"
- // (ie bits [23:22] = 0b10).
- // - Clear vfp cumulative exception flags (bits [3:0]).
- // - Make sure Flush-to-zero mode control bit is unset (bit 22).
- __ bic(r9, r3,
- Operand(kVFPExceptionMask | kVFPRoundingModeMask | kVFPFlushToZeroMask));
- __ orr(r9, r9, Operand(kRoundToMinusInf));
- __ vmsr(r9);
-
- // Convert the argument to an integer.
- __ vcvt_s32_f64(s0, d1, kFPSCRRounding);
-
- // Use vcvt latency to start checking for special cases.
- // Get the argument exponent and clear the sign bit.
- __ bic(r6, r5, Operand(HeapNumber::kSignMask));
- __ mov(r6, Operand(r6, LSR, HeapNumber::kMantissaBitsInTopWord));
-
- // Retrieve FPSCR and check for vfp exceptions.
- __ vmrs(r9);
- __ tst(r9, Operand(kVFPExceptionMask));
- __ b(&no_vfp_exception, eq);
-
- // Check for NaN, Infinity, and -Infinity.
- // They are invariant through a Math.Floor call, so just
- // return the original argument.
- __ sub(r7, r6, Operand(HeapNumber::kExponentMask
- >> HeapNumber::kMantissaBitsInTopWord), SetCC);
- __ b(&restore_fpscr_and_return, eq);
- // We had an overflow or underflow in the conversion. Check if we
- // have a big exponent.
- __ cmp(r7, Operand(HeapNumber::kMantissaBits));
- // If greater or equal, the argument is already round and in r0.
- __ b(&restore_fpscr_and_return, ge);
- __ b(&wont_fit_smi);
-
- __ bind(&no_vfp_exception);
- // Move the result back to general purpose register r0.
- __ vmov(r0, s0);
- // Check if the result fits into a smi.
- __ add(r1, r0, Operand(0x40000000), SetCC);
- __ b(&wont_fit_smi, mi);
- // Tag the result.
- STATIC_ASSERT(kSmiTag == 0);
- __ mov(r0, Operand(r0, LSL, kSmiTagSize));
-
- // Check for -0.
- __ cmp(r0, Operand(0, RelocInfo::NONE));
- __ b(&restore_fpscr_and_return, ne);
- // r5 already holds the HeapNumber exponent.
- __ tst(r5, Operand(HeapNumber::kSignMask));
- // If our HeapNumber is negative it was -0, so load its address and return.
- // Else r0 is loaded with 0, so we can also just return.
- __ ldr(r0, MemOperand(sp, 0 * kPointerSize), ne);
-
- __ bind(&restore_fpscr_and_return);
- // Restore FPSCR and return.
- __ vmsr(r3);
- __ Drop(argc + 1);
- __ Ret();
-
- __ bind(&wont_fit_smi);
- // Restore FPCSR and fall to slow case.
- __ vmsr(r3);
-
- __ bind(&slow);
- // Tail call the full function. We do not have to patch the receiver
- // because the function makes no use of it.
- __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
-
- __ bind(&miss);
- // r2: function name.
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
-
- // Return the generated code.
- return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
-}
-
-
-MaybeObject* CallStubCompiler::CompileMathAbsCall(Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
- // ----------- S t a t e -------------
- // -- r2 : function name
- // -- lr : return address
- // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
- // -- ...
- // -- sp[argc * 4] : receiver
- // -----------------------------------
-
- const int argc = arguments().immediate();
-
- // If the object is not a JSObject or we got an unexpected number of
- // arguments, bail out to the regular call.
- if (!object->IsJSObject() || argc != 1) return heap()->undefined_value();
-
- Label miss;
- GenerateNameCheck(name, &miss);
-
- if (cell == NULL) {
- __ ldr(r1, MemOperand(sp, 1 * kPointerSize));
-
- STATIC_ASSERT(kSmiTag == 0);
- __ tst(r1, Operand(kSmiTagMask));
- __ b(eq, &miss);
-
- CheckPrototypes(JSObject::cast(object), r1, holder, r0, r3, r4, name,
- &miss);
- } else {
- ASSERT(cell->value() == function);
- GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
- GenerateLoadFunctionFromCell(cell, function, &miss);
- }
-
- // Load the (only) argument into r0.
- __ ldr(r0, MemOperand(sp, 0 * kPointerSize));
-
- // Check if the argument is a smi.
- Label not_smi;
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfNotSmi(r0, &not_smi);
-
- // Do bitwise not or do nothing depending on the sign of the
- // argument.
- __ eor(r1, r0, Operand(r0, ASR, kBitsPerInt - 1));
-
- // Add 1 or do nothing depending on the sign of the argument.
- __ sub(r0, r1, Operand(r0, ASR, kBitsPerInt - 1), SetCC);
-
- // If the result is still negative, go to the slow case.
- // This only happens for the most negative smi.
- Label slow;
- __ b(mi, &slow);
-
- // Smi case done.
- __ Drop(argc + 1);
- __ Ret();
-
- // Check if the argument is a heap number and load its exponent and
- // sign.
- __ bind(&not_smi);
- __ CheckMap(r0, r1, Heap::kHeapNumberMapRootIndex, &slow, true);
- __ ldr(r1, FieldMemOperand(r0, HeapNumber::kExponentOffset));
-
- // Check the sign of the argument. If the argument is positive,
- // just return it.
- Label negative_sign;
- __ tst(r1, Operand(HeapNumber::kSignMask));
- __ b(ne, &negative_sign);
- __ Drop(argc + 1);
- __ Ret();
-
- // If the argument is negative, clear the sign, and return a new
- // number.
- __ bind(&negative_sign);
- __ eor(r1, r1, Operand(HeapNumber::kSignMask));
- __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
- __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r0, r4, r5, r6, &slow);
- __ str(r1, FieldMemOperand(r0, HeapNumber::kExponentOffset));
- __ str(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
- __ Drop(argc + 1);
- __ Ret();
-
- // Tail call the full function. We do not have to patch the receiver
- // because the function makes no use of it.
- __ bind(&slow);
- __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
-
- __ bind(&miss);
- // r2: function name.
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
-
- // Return the generated code.
- return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
-}
-
-
-MaybeObject* CallStubCompiler::CompileFastApiCall(
- const CallOptimization& optimization,
- Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
- Counters* counters = isolate()->counters();
-
- ASSERT(optimization.is_simple_api_call());
- // Bail out if object is a global object as we don't want to
- // repatch it to global receiver.
- if (object->IsGlobalObject()) return heap()->undefined_value();
- if (cell != NULL) return heap()->undefined_value();
- int depth = optimization.GetPrototypeDepthOfExpectedType(
- JSObject::cast(object), holder);
- if (depth == kInvalidProtoDepth) return heap()->undefined_value();
-
- Label miss, miss_before_stack_reserved;
-
- GenerateNameCheck(name, &miss_before_stack_reserved);
-
- // Get the receiver from the stack.
- const int argc = arguments().immediate();
- __ ldr(r1, MemOperand(sp, argc * kPointerSize));
-
- // Check that the receiver isn't a smi.
- __ tst(r1, Operand(kSmiTagMask));
- __ b(eq, &miss_before_stack_reserved);
-
- __ IncrementCounter(counters->call_const(), 1, r0, r3);
- __ IncrementCounter(counters->call_const_fast_api(), 1, r0, r3);
-
- ReserveSpaceForFastApiCall(masm(), r0);
-
- // Check that the maps haven't changed and find a Holder as a side effect.
- CheckPrototypes(JSObject::cast(object), r1, holder, r0, r3, r4, name,
- depth, &miss);
-
- MaybeObject* result = GenerateFastApiDirectCall(masm(), optimization, argc);
- if (result->IsFailure()) return result;
-
- __ bind(&miss);
- FreeSpaceForFastApiCall(masm());
-
- __ bind(&miss_before_stack_reserved);
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
-
- // Return the generated code.
- return GetCode(function);
-}
-
-
-MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
- JSObject* holder,
- JSFunction* function,
- String* name,
- CheckType check) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- if (HasCustomCallGenerator(function)) {
- MaybeObject* maybe_result = CompileCustomCall(
- object, holder, NULL, function, name);
- Object* result;
- if (!maybe_result->ToObject(&result)) return maybe_result;
- // undefined means bail out to regular compiler.
- if (!result->IsUndefined()) return result;
- }
-
- Label miss;
-
- GenerateNameCheck(name, &miss);
-
- // Get the receiver from the stack
- const int argc = arguments().immediate();
- __ ldr(r1, MemOperand(sp, argc * kPointerSize));
-
- // Check that the receiver isn't a smi.
- if (check != NUMBER_CHECK) {
- __ tst(r1, Operand(kSmiTagMask));
- __ b(eq, &miss);
- }
-
- // Make sure that it's okay not to patch the on stack receiver
- // unless we're doing a receiver map check.
- ASSERT(!object->IsGlobalObject() || check == RECEIVER_MAP_CHECK);
-
- SharedFunctionInfo* function_info = function->shared();
- switch (check) {
- case RECEIVER_MAP_CHECK:
- __ IncrementCounter(masm()->isolate()->counters()->call_const(),
- 1, r0, r3);
-
- // Check that the maps haven't changed.
- CheckPrototypes(JSObject::cast(object), r1, holder, r0, r3, r4, name,
- &miss);
-
- // Patch the receiver on the stack with the global proxy if
- // necessary.
- if (object->IsGlobalObject()) {
- __ ldr(r3, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset));
- __ str(r3, MemOperand(sp, argc * kPointerSize));
- }
- break;
-
- case STRING_CHECK:
- if (!function->IsBuiltin() && !function_info->strict_mode()) {
- // Calling non-strict non-builtins with a value as the receiver
- // requires boxing.
- __ jmp(&miss);
- } else {
- // Check that the object is a two-byte string or a symbol.
- __ CompareObjectType(r1, r3, r3, FIRST_NONSTRING_TYPE);
- __ b(hs, &miss);
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::STRING_FUNCTION_INDEX, r0, &miss);
- CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder, r3,
- r1, r4, name, &miss);
- }
- break;
-
- case NUMBER_CHECK: {
- if (!function->IsBuiltin() && !function_info->strict_mode()) {
- // Calling non-strict non-builtins with a value as the receiver
- // requires boxing.
- __ jmp(&miss);
- } else {
- Label fast;
- // Check that the object is a smi or a heap number.
- __ tst(r1, Operand(kSmiTagMask));
- __ b(eq, &fast);
- __ CompareObjectType(r1, r0, r0, HEAP_NUMBER_TYPE);
- __ b(ne, &miss);
- __ bind(&fast);
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::NUMBER_FUNCTION_INDEX, r0, &miss);
- CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder, r3,
- r1, r4, name, &miss);
- }
- break;
- }
-
- case BOOLEAN_CHECK: {
- if (!function->IsBuiltin() && !function_info->strict_mode()) {
- // Calling non-strict non-builtins with a value as the receiver
- // requires boxing.
- __ jmp(&miss);
- } else {
- Label fast;
- // Check that the object is a boolean.
- __ LoadRoot(ip, Heap::kTrueValueRootIndex);
- __ cmp(r1, ip);
- __ b(eq, &fast);
- __ LoadRoot(ip, Heap::kFalseValueRootIndex);
- __ cmp(r1, ip);
- __ b(ne, &miss);
- __ bind(&fast);
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::BOOLEAN_FUNCTION_INDEX, r0, &miss);
- CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder, r3,
- r1, r4, name, &miss);
- }
- break;
- }
-
- default:
- UNREACHABLE();
- }
-
- __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
-
- // Handle call cache miss.
- __ bind(&miss);
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
-
- // Return the generated code.
- return GetCode(function);
-}
-
-
-MaybeObject* CallStubCompiler::CompileCallInterceptor(JSObject* object,
- JSObject* holder,
- String* name) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
-
- Label miss;
-
- GenerateNameCheck(name, &miss);
-
- // Get the number of arguments.
- const int argc = arguments().immediate();
-
- LookupResult lookup;
- LookupPostInterceptor(holder, name, &lookup);
-
- // Get the receiver from the stack.
- __ ldr(r1, MemOperand(sp, argc * kPointerSize));
-
- CallInterceptorCompiler compiler(this, arguments(), r2);
- MaybeObject* result = compiler.Compile(masm(),
- object,
- holder,
- name,
- &lookup,
- r1,
- r3,
- r4,
- r0,
- &miss);
- if (result->IsFailure()) {
- return result;
- }
-
- // Move returned value, the function to call, to r1.
- __ mov(r1, r0);
- // Restore receiver.
- __ ldr(r0, MemOperand(sp, argc * kPointerSize));
-
- GenerateCallFunction(masm(), object, arguments(), &miss);
-
- // Handle call cache miss.
- __ bind(&miss);
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
-
- // Return the generated code.
- return GetCode(INTERCEPTOR, name);
-}
-
-
-MaybeObject* CallStubCompiler::CompileCallGlobal(JSObject* object,
- GlobalObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
-
- if (HasCustomCallGenerator(function)) {
- MaybeObject* maybe_result = CompileCustomCall(
- object, holder, cell, function, name);
- Object* result;
- if (!maybe_result->ToObject(&result)) return maybe_result;
- // undefined means bail out to regular compiler.
- if (!result->IsUndefined()) return result;
- }
-
- Label miss;
-
- GenerateNameCheck(name, &miss);
-
- // Get the number of arguments.
- const int argc = arguments().immediate();
-
- GenerateGlobalReceiverCheck(object, holder, name, &miss);
-
- GenerateLoadFunctionFromCell(cell, function, &miss);
-
- // Patch the receiver on the stack with the global proxy if
- // necessary.
- if (object->IsGlobalObject()) {
- __ ldr(r3, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset));
- __ str(r3, MemOperand(sp, argc * kPointerSize));
- }
-
- // Setup the context (function already in r1).
- __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
-
- // Jump to the cached code (tail call).
- Counters* counters = masm()->isolate()->counters();
- __ IncrementCounter(counters->call_global_inline(), 1, r3, r4);
- ASSERT(function->is_compiled());
- Handle<Code> code(function->code());
- ParameterCount expected(function->shared()->formal_parameter_count());
- if (V8::UseCrankshaft()) {
- // TODO(kasperl): For now, we always call indirectly through the
- // code field in the function to allow recompilation to take effect
- // without changing any of the call sites.
- __ ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
- __ InvokeCode(r3, expected, arguments(), JUMP_FUNCTION);
- } else {
- __ InvokeCode(code, expected, arguments(),
- RelocInfo::CODE_TARGET, JUMP_FUNCTION);
- }
-
- // Handle call cache miss.
- __ bind(&miss);
- __ IncrementCounter(counters->call_global_inline_miss(), 1, r1, r3);
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
-
- // Return the generated code.
- return GetCode(NORMAL, name);
-}
-
-
-MaybeObject* StoreStubCompiler::CompileStoreField(JSObject* object,
- int index,
- Map* transition,
- String* name) {
- // ----------- S t a t e -------------
- // -- r0 : value
- // -- r1 : receiver
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- Label miss;
-
- GenerateStoreField(masm(),
- object,
- index,
- transition,
- r1, r2, r3,
- &miss);
- __ bind(&miss);
- Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss();
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(transition == NULL ? FIELD : MAP_TRANSITION, name);
-}
-
-
-MaybeObject* StoreStubCompiler::CompileStoreCallback(JSObject* object,
- AccessorInfo* callback,
- String* name) {
- // ----------- S t a t e -------------
- // -- r0 : value
- // -- r1 : receiver
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- Label miss;
-
- // Check that the object isn't a smi.
- __ tst(r1, Operand(kSmiTagMask));
- __ b(eq, &miss);
-
- // Check that the map of the object hasn't changed.
- __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ cmp(r3, Operand(Handle<Map>(object->map())));
- __ b(ne, &miss);
-
- // Perform global security token check if needed.
- if (object->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(r1, r3, &miss);
- }
-
- // Stub never generated for non-global objects that require access
- // checks.
- ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
-
- __ push(r1); // receiver
- __ mov(ip, Operand(Handle<AccessorInfo>(callback))); // callback info
- __ Push(ip, r2, r0);
-
- // Do tail-call to the runtime system.
- ExternalReference store_callback_property =
- ExternalReference(IC_Utility(IC::kStoreCallbackProperty),
- masm()->isolate());
- __ TailCallExternalReference(store_callback_property, 4, 1);
-
- // Handle store cache miss.
- __ bind(&miss);
- Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss();
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(CALLBACKS, name);
-}
-
-
-MaybeObject* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
- String* name) {
- // ----------- S t a t e -------------
- // -- r0 : value
- // -- r1 : receiver
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- Label miss;
-
- // Check that the object isn't a smi.
- __ tst(r1, Operand(kSmiTagMask));
- __ b(eq, &miss);
-
- // Check that the map of the object hasn't changed.
- __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ cmp(r3, Operand(Handle<Map>(receiver->map())));
- __ b(ne, &miss);
-
- // Perform global security token check if needed.
- if (receiver->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(r1, r3, &miss);
- }
-
- // Stub is never generated for non-global objects that require access
- // checks.
- ASSERT(receiver->IsJSGlobalProxy() || !receiver->IsAccessCheckNeeded());
-
- __ Push(r1, r2, r0); // Receiver, name, value.
-
- __ mov(r0, Operand(Smi::FromInt(strict_mode_)));
- __ push(r0); // strict mode
-
- // Do tail-call to the runtime system.
- ExternalReference store_ic_property =
- ExternalReference(IC_Utility(IC::kStoreInterceptorProperty),
- masm()->isolate());
- __ TailCallExternalReference(store_ic_property, 4, 1);
-
- // Handle store cache miss.
- __ bind(&miss);
- Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss();
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(INTERCEPTOR, name);
-}
-
-
-MaybeObject* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
- JSGlobalPropertyCell* cell,
- String* name) {
- // ----------- S t a t e -------------
- // -- r0 : value
- // -- r1 : receiver
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- Label miss;
-
- // Check that the map of the global has not changed.
- __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ cmp(r3, Operand(Handle<Map>(object->map())));
- __ b(ne, &miss);
-
- // Check that the value in the cell is not the hole. If it is, this
- // cell could have been deleted and reintroducing the global needs
- // to update the property details in the property dictionary of the
- // global object. We bail out to the runtime system to do that.
- __ mov(r4, Operand(Handle<JSGlobalPropertyCell>(cell)));
- __ LoadRoot(r5, Heap::kTheHoleValueRootIndex);
- __ ldr(r6, FieldMemOperand(r4, JSGlobalPropertyCell::kValueOffset));
- __ cmp(r5, r6);
- __ b(eq, &miss);
-
- // Store the value in the cell.
- __ str(r0, FieldMemOperand(r4, JSGlobalPropertyCell::kValueOffset));
-
- Counters* counters = masm()->isolate()->counters();
- __ IncrementCounter(counters->named_store_global_inline(), 1, r4, r3);
- __ Ret();
-
- // Handle store cache miss.
- __ bind(&miss);
- __ IncrementCounter(counters->named_store_global_inline_miss(), 1, r4, r3);
- Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss();
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(NORMAL, name);
-}
-
-
-MaybeObject* LoadStubCompiler::CompileLoadNonexistent(String* name,
- JSObject* object,
- JSObject* last) {
- // ----------- S t a t e -------------
- // -- r0 : receiver
- // -- lr : return address
- // -----------------------------------
- Label miss;
-
- // Check that receiver is not a smi.
- __ tst(r0, Operand(kSmiTagMask));
- __ b(eq, &miss);
-
- // Check the maps of the full prototype chain.
- CheckPrototypes(object, r0, last, r3, r1, r4, name, &miss);
-
- // If the last object in the prototype chain is a global object,
- // check that the global property cell is empty.
- if (last->IsGlobalObject()) {
- MaybeObject* cell = GenerateCheckPropertyCell(masm(),
- GlobalObject::cast(last),
- name,
- r1,
- &miss);
- if (cell->IsFailure()) {
- miss.Unuse();
- return cell;
- }
- }
-
- // Return undefined if maps of the full prototype chain are still the
- // same and no global property with this name contains a value.
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
- __ Ret();
-
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::LOAD_IC);
-
- // Return the generated code.
- return GetCode(NONEXISTENT, heap()->empty_string());
-}
-
-
-MaybeObject* LoadStubCompiler::CompileLoadField(JSObject* object,
- JSObject* holder,
- int index,
- String* name) {
- // ----------- S t a t e -------------
- // -- r0 : receiver
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- Label miss;
-
- GenerateLoadField(object, holder, r0, r3, r1, r4, index, name, &miss);
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::LOAD_IC);
-
- // Return the generated code.
- return GetCode(FIELD, name);
-}
-
-
-MaybeObject* LoadStubCompiler::CompileLoadCallback(String* name,
- JSObject* object,
- JSObject* holder,
- AccessorInfo* callback) {
- // ----------- S t a t e -------------
- // -- r0 : receiver
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- Label miss;
-
- MaybeObject* result = GenerateLoadCallback(object, holder, r0, r2, r3, r1, r4,
- callback, name, &miss);
- if (result->IsFailure()) {
- miss.Unuse();
- return result;
- }
-
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::LOAD_IC);
-
- // Return the generated code.
- return GetCode(CALLBACKS, name);
-}
-
-
-MaybeObject* LoadStubCompiler::CompileLoadConstant(JSObject* object,
- JSObject* holder,
- Object* value,
- String* name) {
- // ----------- S t a t e -------------
- // -- r0 : receiver
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- Label miss;
-
- GenerateLoadConstant(object, holder, r0, r3, r1, r4, value, name, &miss);
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::LOAD_IC);
-
- // Return the generated code.
- return GetCode(CONSTANT_FUNCTION, name);
-}
-
-
-MaybeObject* LoadStubCompiler::CompileLoadInterceptor(JSObject* object,
- JSObject* holder,
- String* name) {
- // ----------- S t a t e -------------
- // -- r0 : receiver
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- Label miss;
-
- LookupResult lookup;
- LookupPostInterceptor(holder, name, &lookup);
- GenerateLoadInterceptor(object,
- holder,
- &lookup,
- r0,
- r2,
- r3,
- r1,
- r4,
- name,
- &miss);
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::LOAD_IC);
-
- // Return the generated code.
- return GetCode(INTERCEPTOR, name);
-}
-
-
-MaybeObject* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
- GlobalObject* holder,
- JSGlobalPropertyCell* cell,
- String* name,
- bool is_dont_delete) {
- // ----------- S t a t e -------------
- // -- r0 : receiver
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- Label miss;
-
- // If the object is the holder then we know that it's a global
- // object which can only happen for contextual calls. In this case,
- // the receiver cannot be a smi.
- if (object != holder) {
- __ tst(r0, Operand(kSmiTagMask));
- __ b(eq, &miss);
- }
-
- // Check that the map of the global has not changed.
- CheckPrototypes(object, r0, holder, r3, r4, r1, name, &miss);
-
- // Get the value from the cell.
- __ mov(r3, Operand(Handle<JSGlobalPropertyCell>(cell)));
- __ ldr(r4, FieldMemOperand(r3, JSGlobalPropertyCell::kValueOffset));
-
- // Check for deleted property if property can actually be deleted.
- if (!is_dont_delete) {
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(r4, ip);
- __ b(eq, &miss);
- }
-
- __ mov(r0, r4);
- Counters* counters = masm()->isolate()->counters();
- __ IncrementCounter(counters->named_load_global_stub(), 1, r1, r3);
- __ Ret();
-
- __ bind(&miss);
- __ IncrementCounter(counters->named_load_global_stub_miss(), 1, r1, r3);
- GenerateLoadMiss(masm(), Code::LOAD_IC);
-
- // Return the generated code.
- return GetCode(NORMAL, name);
-}
-
-
-MaybeObject* KeyedLoadStubCompiler::CompileLoadField(String* name,
- JSObject* receiver,
- JSObject* holder,
- int index) {
- // ----------- S t a t e -------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
- Label miss;
-
- // Check the key is the cached one.
- __ cmp(r0, Operand(Handle<String>(name)));
- __ b(ne, &miss);
-
- GenerateLoadField(receiver, holder, r1, r2, r3, r4, index, name, &miss);
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- return GetCode(FIELD, name);
-}
-
-
-MaybeObject* KeyedLoadStubCompiler::CompileLoadCallback(
- String* name,
- JSObject* receiver,
- JSObject* holder,
- AccessorInfo* callback) {
- // ----------- S t a t e -------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
- Label miss;
-
- // Check the key is the cached one.
- __ cmp(r0, Operand(Handle<String>(name)));
- __ b(ne, &miss);
-
- MaybeObject* result = GenerateLoadCallback(receiver, holder, r1, r0, r2, r3,
- r4, callback, name, &miss);
- if (result->IsFailure()) {
- miss.Unuse();
- return result;
- }
-
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- return GetCode(CALLBACKS, name);
-}
-
-
-MaybeObject* KeyedLoadStubCompiler::CompileLoadConstant(String* name,
- JSObject* receiver,
- JSObject* holder,
- Object* value) {
- // ----------- S t a t e -------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
- Label miss;
-
- // Check the key is the cached one.
- __ cmp(r0, Operand(Handle<String>(name)));
- __ b(ne, &miss);
-
- GenerateLoadConstant(receiver, holder, r1, r2, r3, r4, value, name, &miss);
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- // Return the generated code.
- return GetCode(CONSTANT_FUNCTION, name);
-}
-
-
-MaybeObject* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
- JSObject* holder,
- String* name) {
- // ----------- S t a t e -------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
- Label miss;
-
- // Check the key is the cached one.
- __ cmp(r0, Operand(Handle<String>(name)));
- __ b(ne, &miss);
-
- LookupResult lookup;
- LookupPostInterceptor(holder, name, &lookup);
- GenerateLoadInterceptor(receiver,
- holder,
- &lookup,
- r1,
- r0,
- r2,
- r3,
- r4,
- name,
- &miss);
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- return GetCode(INTERCEPTOR, name);
-}
-
-
-MaybeObject* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
- // ----------- S t a t e -------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
- Label miss;
-
- // Check the key is the cached one.
- __ cmp(r0, Operand(Handle<String>(name)));
- __ b(ne, &miss);
-
- GenerateLoadArrayLength(masm(), r1, r2, &miss);
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- return GetCode(CALLBACKS, name);
-}
-
-
-MaybeObject* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
- // ----------- S t a t e -------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
- Label miss;
-
- Counters* counters = masm()->isolate()->counters();
- __ IncrementCounter(counters->keyed_load_string_length(), 1, r2, r3);
-
- // Check the key is the cached one.
- __ cmp(r0, Operand(Handle<String>(name)));
- __ b(ne, &miss);
-
- GenerateLoadStringLength(masm(), r1, r2, r3, &miss, true);
- __ bind(&miss);
- __ DecrementCounter(counters->keyed_load_string_length(), 1, r2, r3);
-
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- return GetCode(CALLBACKS, name);
-}
-
-
-MaybeObject* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
- // ----------- S t a t e -------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
- Label miss;
-
- Counters* counters = masm()->isolate()->counters();
- __ IncrementCounter(counters->keyed_load_function_prototype(), 1, r2, r3);
-
- // Check the name hasn't changed.
- __ cmp(r0, Operand(Handle<String>(name)));
- __ b(ne, &miss);
-
- GenerateLoadFunctionPrototype(masm(), r1, r2, r3, &miss);
- __ bind(&miss);
- __ DecrementCounter(counters->keyed_load_function_prototype(), 1, r2, r3);
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- return GetCode(CALLBACKS, name);
-}
-
-
-MaybeObject* KeyedLoadStubCompiler::CompileLoadSpecialized(JSObject* receiver) {
- // ----------- S t a t e -------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
- Label miss;
-
- // Check that the receiver isn't a smi.
- __ tst(r1, Operand(kSmiTagMask));
- __ b(eq, &miss);
-
- // Check that the map matches.
- __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ cmp(r2, Operand(Handle<Map>(receiver->map())));
- __ b(ne, &miss);
-
- // Check that the key is a smi.
- __ tst(r0, Operand(kSmiTagMask));
- __ b(ne, &miss);
-
- // Get the elements array.
- __ ldr(r2, FieldMemOperand(r1, JSObject::kElementsOffset));
- __ AssertFastElements(r2);
-
- // Check that the key is within bounds.
- __ ldr(r3, FieldMemOperand(r2, FixedArray::kLengthOffset));
- __ cmp(r0, Operand(r3));
- __ b(hs, &miss);
-
- // Load the result and make sure it's not the hole.
- __ add(r3, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
- __ ldr(r4,
- MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(r4, ip);
- __ b(eq, &miss);
- __ mov(r0, r4);
- __ Ret();
-
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- // Return the generated code.
- return GetCode(NORMAL, NULL);
-}
-
-
-MaybeObject* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
- int index,
- Map* transition,
- String* name) {
- // ----------- S t a t e -------------
- // -- r0 : value
- // -- r1 : name
- // -- r2 : receiver
- // -- lr : return address
- // -----------------------------------
- Label miss;
-
- Counters* counters = masm()->isolate()->counters();
- __ IncrementCounter(counters->keyed_store_field(), 1, r3, r4);
-
- // Check that the name has not changed.
- __ cmp(r1, Operand(Handle<String>(name)));
- __ b(ne, &miss);
-
- // r3 is used as scratch register. r1 and r2 keep their values if a jump to
- // the miss label is generated.
- GenerateStoreField(masm(),
- object,
- index,
- transition,
- r2, r1, r3,
- &miss);
- __ bind(&miss);
-
- __ DecrementCounter(counters->keyed_store_field(), 1, r3, r4);
- Handle<Code> ic = masm()->isolate()->builtins()->KeyedStoreIC_Miss();
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(transition == NULL ? FIELD : MAP_TRANSITION, name);
-}
-
-
-MaybeObject* KeyedStoreStubCompiler::CompileStoreSpecialized(
- JSObject* receiver) {
- // ----------- S t a t e -------------
- // -- r0 : value
- // -- r1 : key
- // -- r2 : receiver
- // -- lr : return address
- // -- r3 : scratch
- // -- r4 : scratch (elements)
- // -----------------------------------
- Label miss;
-
- Register value_reg = r0;
- Register key_reg = r1;
- Register receiver_reg = r2;
- Register scratch = r3;
- Register elements_reg = r4;
-
- // Check that the receiver isn't a smi.
- __ tst(receiver_reg, Operand(kSmiTagMask));
- __ b(eq, &miss);
-
- // Check that the map matches.
- __ ldr(scratch, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
- __ cmp(scratch, Operand(Handle<Map>(receiver->map())));
- __ b(ne, &miss);
-
- // Check that the key is a smi.
- __ tst(key_reg, Operand(kSmiTagMask));
- __ b(ne, &miss);
-
- // Get the elements array and make sure it is a fast element array, not 'cow'.
- __ ldr(elements_reg,
- FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
- __ ldr(scratch, FieldMemOperand(elements_reg, HeapObject::kMapOffset));
- __ cmp(scratch, Operand(Handle<Map>(factory()->fixed_array_map())));
- __ b(ne, &miss);
-
- // Check that the key is within bounds.
- if (receiver->IsJSArray()) {
- __ ldr(scratch, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
- } else {
- __ ldr(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
- }
- // Compare smis.
- __ cmp(key_reg, scratch);
- __ b(hs, &miss);
-
- __ add(scratch,
- elements_reg, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
- __ str(value_reg,
- MemOperand(scratch, key_reg, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ RecordWrite(scratch,
- Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize),
- receiver_reg , elements_reg);
-
- // value_reg (r0) is preserved.
- // Done.
- __ Ret();
-
- __ bind(&miss);
- Handle<Code> ic = masm()->isolate()->builtins()->KeyedStoreIC_Miss();
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(NORMAL, NULL);
-}
-
-
-MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
- // ----------- S t a t e -------------
- // -- r0 : argc
- // -- r1 : constructor
- // -- lr : return address
- // -- [sp] : last argument
- // -----------------------------------
- Label generic_stub_call;
-
- // Use r7 for holding undefined which is used in several places below.
- __ LoadRoot(r7, Heap::kUndefinedValueRootIndex);
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Check to see whether there are any break points in the function code. If
- // there are jump to the generic constructor stub which calls the actual
- // code for the function thereby hitting the break points.
- __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kDebugInfoOffset));
- __ cmp(r2, r7);
- __ b(ne, &generic_stub_call);
-#endif
-
- // Load the initial map and verify that it is in fact a map.
- // r1: constructor function
- // r7: undefined
- __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
- __ tst(r2, Operand(kSmiTagMask));
- __ b(eq, &generic_stub_call);
- __ CompareObjectType(r2, r3, r4, MAP_TYPE);
- __ b(ne, &generic_stub_call);
-
-#ifdef DEBUG
- // Cannot construct functions this way.
- // r0: argc
- // r1: constructor function
- // r2: initial map
- // r7: undefined
- __ CompareInstanceType(r2, r3, JS_FUNCTION_TYPE);
- __ Check(ne, "Function constructed by construct stub.");
-#endif
-
- // Now allocate the JSObject in new space.
- // r0: argc
- // r1: constructor function
- // r2: initial map
- // r7: undefined
- __ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset));
- __ AllocateInNewSpace(r3,
- r4,
- r5,
- r6,
- &generic_stub_call,
- SIZE_IN_WORDS);
-
- // Allocated the JSObject, now initialize the fields. Map is set to initial
- // map and properties and elements are set to empty fixed array.
- // r0: argc
- // r1: constructor function
- // r2: initial map
- // r3: object size (in words)
- // r4: JSObject (not tagged)
- // r7: undefined
- __ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
- __ mov(r5, r4);
- ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
- __ str(r2, MemOperand(r5, kPointerSize, PostIndex));
- ASSERT_EQ(1 * kPointerSize, JSObject::kPropertiesOffset);
- __ str(r6, MemOperand(r5, kPointerSize, PostIndex));
- ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset);
- __ str(r6, MemOperand(r5, kPointerSize, PostIndex));
-
- // Calculate the location of the first argument. The stack contains only the
- // argc arguments.
- __ add(r1, sp, Operand(r0, LSL, kPointerSizeLog2));
-
- // Fill all the in-object properties with undefined.
- // r0: argc
- // r1: first argument
- // r3: object size (in words)
- // r4: JSObject (not tagged)
- // r5: First in-object property of JSObject (not tagged)
- // r7: undefined
- // Fill the initialized properties with a constant value or a passed argument
- // depending on the this.x = ...; assignment in the function.
- SharedFunctionInfo* shared = function->shared();
- for (int i = 0; i < shared->this_property_assignments_count(); i++) {
- if (shared->IsThisPropertyAssignmentArgument(i)) {
- Label not_passed, next;
- // Check if the argument assigned to the property is actually passed.
- int arg_number = shared->GetThisPropertyAssignmentArgument(i);
- __ cmp(r0, Operand(arg_number));
- __ b(le, &not_passed);
- // Argument passed - find it on the stack.
- __ ldr(r2, MemOperand(r1, (arg_number + 1) * -kPointerSize));
- __ str(r2, MemOperand(r5, kPointerSize, PostIndex));
- __ b(&next);
- __ bind(&not_passed);
- // Set the property to undefined.
- __ str(r7, MemOperand(r5, kPointerSize, PostIndex));
- __ bind(&next);
- } else {
- // Set the property to the constant value.
- Handle<Object> constant(shared->GetThisPropertyAssignmentConstant(i));
- __ mov(r2, Operand(constant));
- __ str(r2, MemOperand(r5, kPointerSize, PostIndex));
- }
- }
-
- // Fill the unused in-object property fields with undefined.
- ASSERT(function->has_initial_map());
- for (int i = shared->this_property_assignments_count();
- i < function->initial_map()->inobject_properties();
- i++) {
- __ str(r7, MemOperand(r5, kPointerSize, PostIndex));
- }
-
- // r0: argc
- // r4: JSObject (not tagged)
- // Move argc to r1 and the JSObject to return to r0 and tag it.
- __ mov(r1, r0);
- __ mov(r0, r4);
- __ orr(r0, r0, Operand(kHeapObjectTag));
-
- // r0: JSObject
- // r1: argc
- // Remove caller arguments and receiver from the stack and return.
- __ add(sp, sp, Operand(r1, LSL, kPointerSizeLog2));
- __ add(sp, sp, Operand(kPointerSize));
- Counters* counters = masm()->isolate()->counters();
- __ IncrementCounter(counters->constructed_objects(), 1, r1, r2);
- __ IncrementCounter(counters->constructed_objects_stub(), 1, r1, r2);
- __ Jump(lr);
-
- // Jump to the generic stub in case the specialized code cannot handle the
- // construction.
- __ bind(&generic_stub_call);
- Handle<Code> code = masm()->isolate()->builtins()->JSConstructStubGeneric();
- __ Jump(code, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode();
-}
-
-
-static bool IsElementTypeSigned(ExternalArrayType array_type) {
- switch (array_type) {
- case kExternalByteArray:
- case kExternalShortArray:
- case kExternalIntArray:
- return true;
-
- case kExternalUnsignedByteArray:
- case kExternalUnsignedShortArray:
- case kExternalUnsignedIntArray:
- return false;
-
- default:
- UNREACHABLE();
- return false;
- }
-}
-
-
-MaybeObject* ExternalArrayStubCompiler::CompileKeyedLoadStub(
- JSObject* receiver_object,
- ExternalArrayType array_type,
- Code::Flags flags) {
- // ---------- S t a t e --------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
- Label slow, failed_allocation;
-
- Register key = r0;
- Register receiver = r1;
-
- // Check that the object isn't a smi
- __ JumpIfSmi(receiver, &slow);
-
- // Check that the key is a smi.
- __ JumpIfNotSmi(key, &slow);
-
- // Make sure that we've got the right map.
- __ ldr(r2, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ cmp(r2, Operand(Handle<Map>(receiver_object->map())));
- __ b(ne, &slow);
-
- __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset));
- // r3: elements array
-
- // Check that the index is in range.
- __ ldr(ip, FieldMemOperand(r3, ExternalArray::kLengthOffset));
- __ cmp(ip, Operand(key, ASR, kSmiTagSize));
- // Unsigned comparison catches both negative and too-large values.
- __ b(lo, &slow);
-
- __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
- // r3: base pointer of external storage
-
- // We are not untagging smi key and instead work with it
- // as if it was premultiplied by 2.
- ASSERT((kSmiTag == 0) && (kSmiTagSize == 1));
-
- Register value = r2;
- switch (array_type) {
- case kExternalByteArray:
- __ ldrsb(value, MemOperand(r3, key, LSR, 1));
- break;
- case kExternalPixelArray:
- case kExternalUnsignedByteArray:
- __ ldrb(value, MemOperand(r3, key, LSR, 1));
- break;
- case kExternalShortArray:
- __ ldrsh(value, MemOperand(r3, key, LSL, 0));
- break;
- case kExternalUnsignedShortArray:
- __ ldrh(value, MemOperand(r3, key, LSL, 0));
- break;
- case kExternalIntArray:
- case kExternalUnsignedIntArray:
- __ ldr(value, MemOperand(r3, key, LSL, 1));
- break;
- case kExternalFloatArray:
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
- __ add(r2, r3, Operand(key, LSL, 1));
- __ vldr(s0, r2, 0);
- } else {
- __ ldr(value, MemOperand(r3, key, LSL, 1));
- }
- break;
- default:
- UNREACHABLE();
- break;
- }
-
- // For integer array types:
- // r2: value
- // For floating-point array type
- // s0: value (if VFP3 is supported)
- // r2: value (if VFP3 is not supported)
-
- if (array_type == kExternalIntArray) {
- // For the Int and UnsignedInt array types, we need to see whether
- // the value can be represented in a Smi. If not, we need to convert
- // it to a HeapNumber.
- Label box_int;
- __ cmp(value, Operand(0xC0000000));
- __ b(mi, &box_int);
- // Tag integer as smi and return it.
- __ mov(r0, Operand(value, LSL, kSmiTagSize));
- __ Ret();
-
- __ bind(&box_int);
- // Allocate a HeapNumber for the result and perform int-to-double
- // conversion. Don't touch r0 or r1 as they are needed if allocation
- // fails.
- __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r5, r3, r4, r6, &slow);
- // Now we can use r0 for the result as key is not needed any more.
- __ mov(r0, r5);
-
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
- __ vmov(s0, value);
- __ vcvt_f64_s32(d0, s0);
- __ sub(r3, r0, Operand(kHeapObjectTag));
- __ vstr(d0, r3, HeapNumber::kValueOffset);
- __ Ret();
- } else {
- WriteInt32ToHeapNumberStub stub(value, r0, r3);
- __ TailCallStub(&stub);
- }
- } else if (array_type == kExternalUnsignedIntArray) {
- // The test is different for unsigned int values. Since we need
- // the value to be in the range of a positive smi, we can't
- // handle either of the top two bits being set in the value.
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
- Label box_int, done;
- __ tst(value, Operand(0xC0000000));
- __ b(ne, &box_int);
- // Tag integer as smi and return it.
- __ mov(r0, Operand(value, LSL, kSmiTagSize));
- __ Ret();
-
- __ bind(&box_int);
- __ vmov(s0, value);
- // Allocate a HeapNumber for the result and perform int-to-double
- // conversion. Don't use r0 and r1 as AllocateHeapNumber clobbers all
- // registers - also when jumping due to exhausted young space.
- __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r2, r3, r4, r6, &slow);
-
- __ vcvt_f64_u32(d0, s0);
- __ sub(r1, r2, Operand(kHeapObjectTag));
- __ vstr(d0, r1, HeapNumber::kValueOffset);
-
- __ mov(r0, r2);
- __ Ret();
- } else {
- // Check whether unsigned integer fits into smi.
- Label box_int_0, box_int_1, done;
- __ tst(value, Operand(0x80000000));
- __ b(ne, &box_int_0);
- __ tst(value, Operand(0x40000000));
- __ b(ne, &box_int_1);
- // Tag integer as smi and return it.
- __ mov(r0, Operand(value, LSL, kSmiTagSize));
- __ Ret();
-
- Register hiword = value; // r2.
- Register loword = r3;
-
- __ bind(&box_int_0);
- // Integer does not have leading zeros.
- GenerateUInt2Double(masm(), hiword, loword, r4, 0);
- __ b(&done);
-
- __ bind(&box_int_1);
- // Integer has one leading zero.
- GenerateUInt2Double(masm(), hiword, loword, r4, 1);
-
-
- __ bind(&done);
- // Integer was converted to double in registers hiword:loword.
- // Wrap it into a HeapNumber. Don't use r0 and r1 as AllocateHeapNumber
- // clobbers all registers - also when jumping due to exhausted young
- // space.
- __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r4, r5, r7, r6, &slow);
-
- __ str(hiword, FieldMemOperand(r4, HeapNumber::kExponentOffset));
- __ str(loword, FieldMemOperand(r4, HeapNumber::kMantissaOffset));
-
- __ mov(r0, r4);
- __ Ret();
- }
- } else if (array_type == kExternalFloatArray) {
- // For the floating-point array type, we need to always allocate a
- // HeapNumber.
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
- // Allocate a HeapNumber for the result. Don't use r0 and r1 as
- // AllocateHeapNumber clobbers all registers - also when jumping due to
- // exhausted young space.
- __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r2, r3, r4, r6, &slow);
- __ vcvt_f64_f32(d0, s0);
- __ sub(r1, r2, Operand(kHeapObjectTag));
- __ vstr(d0, r1, HeapNumber::kValueOffset);
-
- __ mov(r0, r2);
- __ Ret();
- } else {
- // Allocate a HeapNumber for the result. Don't use r0 and r1 as
- // AllocateHeapNumber clobbers all registers - also when jumping due to
- // exhausted young space.
- __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r3, r4, r5, r6, &slow);
- // VFP is not available, do manual single to double conversion.
-
- // r2: floating point value (binary32)
- // r3: heap number for result
-
- // Extract mantissa to r0. OK to clobber r0 now as there are no jumps to
- // the slow case from here.
- __ and_(r0, value, Operand(kBinary32MantissaMask));
-
- // Extract exponent to r1. OK to clobber r1 now as there are no jumps to
- // the slow case from here.
- __ mov(r1, Operand(value, LSR, kBinary32MantissaBits));
- __ and_(r1, r1, Operand(kBinary32ExponentMask >> kBinary32MantissaBits));
-
- Label exponent_rebiased;
- __ teq(r1, Operand(0x00));
- __ b(eq, &exponent_rebiased);
-
- __ teq(r1, Operand(0xff));
- __ mov(r1, Operand(0x7ff), LeaveCC, eq);
- __ b(eq, &exponent_rebiased);
-
- // Rebias exponent.
- __ add(r1,
- r1,
- Operand(-kBinary32ExponentBias + HeapNumber::kExponentBias));
-
- __ bind(&exponent_rebiased);
- __ and_(r2, value, Operand(kBinary32SignMask));
- value = no_reg;
- __ orr(r2, r2, Operand(r1, LSL, HeapNumber::kMantissaBitsInTopWord));
-
- // Shift mantissa.
- static const int kMantissaShiftForHiWord =
- kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
-
- static const int kMantissaShiftForLoWord =
- kBitsPerInt - kMantissaShiftForHiWord;
-
- __ orr(r2, r2, Operand(r0, LSR, kMantissaShiftForHiWord));
- __ mov(r0, Operand(r0, LSL, kMantissaShiftForLoWord));
-
- __ str(r2, FieldMemOperand(r3, HeapNumber::kExponentOffset));
- __ str(r0, FieldMemOperand(r3, HeapNumber::kMantissaOffset));
-
- __ mov(r0, r3);
- __ Ret();
- }
-
- } else {
- // Tag integer as smi and return it.
- __ mov(r0, Operand(value, LSL, kSmiTagSize));
- __ Ret();
- }
-
- // Slow case, key and receiver still in r0 and r1.
- __ bind(&slow);
- __ IncrementCounter(
- masm()->isolate()->counters()->keyed_load_external_array_slow(),
- 1, r2, r3);
-
- // ---------- S t a t e --------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
-
- __ Push(r1, r0);
-
- __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
-
- return GetCode(flags);
-}
-
-
-MaybeObject* ExternalArrayStubCompiler::CompileKeyedStoreStub(
- JSObject* receiver_object,
- ExternalArrayType array_type,
- Code::Flags flags) {
- // ---------- S t a t e --------------
- // -- r0 : value
- // -- r1 : key
- // -- r2 : receiver
- // -- lr : return address
- // -----------------------------------
- Label slow, check_heap_number;
-
- // Register usage.
- Register value = r0;
- Register key = r1;
- Register receiver = r2;
- // r3 mostly holds the elements array or the destination external array.
-
- // Check that the object isn't a smi.
- __ JumpIfSmi(receiver, &slow);
-
- // Make sure that we've got the right map.
- __ ldr(r3, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ cmp(r3, Operand(Handle<Map>(receiver_object->map())));
- __ b(ne, &slow);
-
- __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset));
-
- // Check that the key is a smi.
- __ JumpIfNotSmi(key, &slow);
-
- // Check that the index is in range
- __ SmiUntag(r4, key);
- __ ldr(ip, FieldMemOperand(r3, ExternalArray::kLengthOffset));
- __ cmp(r4, ip);
- // Unsigned comparison catches both negative and too-large values.
- __ b(hs, &slow);
-
- // Handle both smis and HeapNumbers in the fast path. Go to the
- // runtime for all other kinds of values.
- // r3: external array.
- // r4: key (integer).
- if (array_type == kExternalPixelArray) {
- // Double to pixel conversion is only implemented in the runtime for now.
- __ JumpIfNotSmi(value, &slow);
- } else {
- __ JumpIfNotSmi(value, &check_heap_number);
- }
- __ SmiUntag(r5, value);
- __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
-
- // r3: base pointer of external storage.
- // r4: key (integer).
- // r5: value (integer).
- switch (array_type) {
- case kExternalPixelArray:
- // Clamp the value to [0..255].
- __ Usat(r5, 8, Operand(r5));
- __ strb(r5, MemOperand(r3, r4, LSL, 0));
- break;
- case kExternalByteArray:
- case kExternalUnsignedByteArray:
- __ strb(r5, MemOperand(r3, r4, LSL, 0));
- break;
- case kExternalShortArray:
- case kExternalUnsignedShortArray:
- __ strh(r5, MemOperand(r3, r4, LSL, 1));
- break;
- case kExternalIntArray:
- case kExternalUnsignedIntArray:
- __ str(r5, MemOperand(r3, r4, LSL, 2));
- break;
- case kExternalFloatArray:
- // Perform int-to-float conversion and store to memory.
- StoreIntAsFloat(masm(), r3, r4, r5, r6, r7, r9);
- break;
- default:
- UNREACHABLE();
- break;
- }
-
- // Entry registers are intact, r0 holds the value which is the return value.
- __ Ret();
-
- if (array_type != kExternalPixelArray) {
- // r3: external array.
- // r4: index (integer).
- __ bind(&check_heap_number);
- __ CompareObjectType(value, r5, r6, HEAP_NUMBER_TYPE);
- __ b(ne, &slow);
-
- __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
-
- // r3: base pointer of external storage.
- // r4: key (integer).
-
- // The WebGL specification leaves the behavior of storing NaN and
- // +/-Infinity into integer arrays basically undefined. For more
- // reproducible behavior, convert these to zero.
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
-
- if (array_type == kExternalFloatArray) {
- // vldr requires offset to be a multiple of 4 so we can not
- // include -kHeapObjectTag into it.
- __ sub(r5, r0, Operand(kHeapObjectTag));
- __ vldr(d0, r5, HeapNumber::kValueOffset);
- __ add(r5, r3, Operand(r4, LSL, 2));
- __ vcvt_f32_f64(s0, d0);
- __ vstr(s0, r5, 0);
- } else {
- // Need to perform float-to-int conversion.
- // Test for NaN or infinity (both give zero).
- __ ldr(r6, FieldMemOperand(value, HeapNumber::kExponentOffset));
-
- // Hoisted load. vldr requires offset to be a multiple of 4 so we can
- // not include -kHeapObjectTag into it.
- __ sub(r5, value, Operand(kHeapObjectTag));
- __ vldr(d0, r5, HeapNumber::kValueOffset);
-
- __ Sbfx(r6, r6, HeapNumber::kExponentShift, HeapNumber::kExponentBits);
- // NaNs and Infinities have all-one exponents so they sign extend to -1.
- __ cmp(r6, Operand(-1));
- __ mov(r5, Operand(0), LeaveCC, eq);
-
- // Not infinity or NaN simply convert to int.
- if (IsElementTypeSigned(array_type)) {
- __ vcvt_s32_f64(s0, d0, kDefaultRoundToZero, ne);
- } else {
- __ vcvt_u32_f64(s0, d0, kDefaultRoundToZero, ne);
- }
- __ vmov(r5, s0, ne);
-
- switch (array_type) {
- case kExternalByteArray:
- case kExternalUnsignedByteArray:
- __ strb(r5, MemOperand(r3, r4, LSL, 0));
- break;
- case kExternalShortArray:
- case kExternalUnsignedShortArray:
- __ strh(r5, MemOperand(r3, r4, LSL, 1));
- break;
- case kExternalIntArray:
- case kExternalUnsignedIntArray:
- __ str(r5, MemOperand(r3, r4, LSL, 2));
- break;
- default:
- UNREACHABLE();
- break;
- }
- }
-
- // Entry registers are intact, r0 holds the value which is the return
- // value.
- __ Ret();
- } else {
- // VFP3 is not available do manual conversions.
- __ ldr(r5, FieldMemOperand(value, HeapNumber::kExponentOffset));
- __ ldr(r6, FieldMemOperand(value, HeapNumber::kMantissaOffset));
-
- if (array_type == kExternalFloatArray) {
- Label done, nan_or_infinity_or_zero;
- static const int kMantissaInHiWordShift =
- kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
-
- static const int kMantissaInLoWordShift =
- kBitsPerInt - kMantissaInHiWordShift;
-
- // Test for all special exponent values: zeros, subnormal numbers, NaNs
- // and infinities. All these should be converted to 0.
- __ mov(r7, Operand(HeapNumber::kExponentMask));
- __ and_(r9, r5, Operand(r7), SetCC);
- __ b(eq, &nan_or_infinity_or_zero);
-
- __ teq(r9, Operand(r7));
- __ mov(r9, Operand(kBinary32ExponentMask), LeaveCC, eq);
- __ b(eq, &nan_or_infinity_or_zero);
-
- // Rebias exponent.
- __ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift));
- __ add(r9,
- r9,
- Operand(kBinary32ExponentBias - HeapNumber::kExponentBias));
-
- __ cmp(r9, Operand(kBinary32MaxExponent));
- __ and_(r5, r5, Operand(HeapNumber::kSignMask), LeaveCC, gt);
- __ orr(r5, r5, Operand(kBinary32ExponentMask), LeaveCC, gt);
- __ b(gt, &done);
-
- __ cmp(r9, Operand(kBinary32MinExponent));
- __ and_(r5, r5, Operand(HeapNumber::kSignMask), LeaveCC, lt);
- __ b(lt, &done);
-
- __ and_(r7, r5, Operand(HeapNumber::kSignMask));
- __ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
- __ orr(r7, r7, Operand(r5, LSL, kMantissaInHiWordShift));
- __ orr(r7, r7, Operand(r6, LSR, kMantissaInLoWordShift));
- __ orr(r5, r7, Operand(r9, LSL, kBinary32ExponentShift));
-
- __ bind(&done);
- __ str(r5, MemOperand(r3, r4, LSL, 2));
- // Entry registers are intact, r0 holds the value which is the return
- // value.
- __ Ret();
-
- __ bind(&nan_or_infinity_or_zero);
- __ and_(r7, r5, Operand(HeapNumber::kSignMask));
- __ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
- __ orr(r9, r9, r7);
- __ orr(r9, r9, Operand(r5, LSL, kMantissaInHiWordShift));
- __ orr(r5, r9, Operand(r6, LSR, kMantissaInLoWordShift));
- __ b(&done);
- } else {
- bool is_signed_type = IsElementTypeSigned(array_type);
- int meaningfull_bits = is_signed_type ? (kBitsPerInt - 1) : kBitsPerInt;
- int32_t min_value = is_signed_type ? 0x80000000 : 0x00000000;
-
- Label done, sign;
-
- // Test for all special exponent values: zeros, subnormal numbers, NaNs
- // and infinities. All these should be converted to 0.
- __ mov(r7, Operand(HeapNumber::kExponentMask));
- __ and_(r9, r5, Operand(r7), SetCC);
- __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, eq);
- __ b(eq, &done);
-
- __ teq(r9, Operand(r7));
- __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, eq);
- __ b(eq, &done);
-
- // Unbias exponent.
- __ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift));
- __ sub(r9, r9, Operand(HeapNumber::kExponentBias), SetCC);
- // If exponent is negative then result is 0.
- __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, mi);
- __ b(mi, &done);
-
- // If exponent is too big then result is minimal value.
- __ cmp(r9, Operand(meaningfull_bits - 1));
- __ mov(r5, Operand(min_value), LeaveCC, ge);
- __ b(ge, &done);
-
- __ and_(r7, r5, Operand(HeapNumber::kSignMask), SetCC);
- __ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
- __ orr(r5, r5, Operand(1u << HeapNumber::kMantissaBitsInTopWord));
-
- __ rsb(r9, r9, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC);
- __ mov(r5, Operand(r5, LSR, r9), LeaveCC, pl);
- __ b(pl, &sign);
-
- __ rsb(r9, r9, Operand(0, RelocInfo::NONE));
- __ mov(r5, Operand(r5, LSL, r9));
- __ rsb(r9, r9, Operand(meaningfull_bits));
- __ orr(r5, r5, Operand(r6, LSR, r9));
-
- __ bind(&sign);
- __ teq(r7, Operand(0, RelocInfo::NONE));
- __ rsb(r5, r5, Operand(0, RelocInfo::NONE), LeaveCC, ne);
-
- __ bind(&done);
- switch (array_type) {
- case kExternalByteArray:
- case kExternalUnsignedByteArray:
- __ strb(r5, MemOperand(r3, r4, LSL, 0));
- break;
- case kExternalShortArray:
- case kExternalUnsignedShortArray:
- __ strh(r5, MemOperand(r3, r4, LSL, 1));
- break;
- case kExternalIntArray:
- case kExternalUnsignedIntArray:
- __ str(r5, MemOperand(r3, r4, LSL, 2));
- break;
- default:
- UNREACHABLE();
- break;
- }
- }
- }
- }
-
- // Slow case: call runtime.
- __ bind(&slow);
-
- // Entry registers are intact.
- // ---------- S t a t e --------------
- // -- r0 : value
- // -- r1 : key
- // -- r2 : receiver
- // -- lr : return address
- // -----------------------------------
-
- // Push receiver, key and value for runtime call.
- __ Push(r2, r1, r0);
-
- __ mov(r1, Operand(Smi::FromInt(NONE))); // PropertyAttributes
- __ mov(r0, Operand(Smi::FromInt(
- Code::ExtractExtraICStateFromFlags(flags) & kStrictMode)));
- __ Push(r1, r0);
-
- __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
-
- return GetCode(flags);
-}
-
-
-#undef __
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_ARM
diff --git a/src/3rdparty/v8/src/arm/virtual-frame-arm-inl.h b/src/3rdparty/v8/src/arm/virtual-frame-arm-inl.h
deleted file mode 100644
index 6a7902a..0000000
--- a/src/3rdparty/v8/src/arm/virtual-frame-arm-inl.h
+++ /dev/null
@@ -1,59 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_VIRTUAL_FRAME_ARM_INL_H_
-#define V8_VIRTUAL_FRAME_ARM_INL_H_
-
-#include "assembler-arm.h"
-#include "virtual-frame-arm.h"
-
-namespace v8 {
-namespace internal {
-
-// These VirtualFrame methods should actually be in a virtual-frame-arm-inl.h
-// file if such a thing existed.
-MemOperand VirtualFrame::ParameterAt(int index) {
- // Index -1 corresponds to the receiver.
- ASSERT(-1 <= index); // -1 is the receiver.
- ASSERT(index <= parameter_count());
- return MemOperand(fp, (1 + parameter_count() - index) * kPointerSize);
-}
-
- // The receiver frame slot.
-MemOperand VirtualFrame::Receiver() {
- return ParameterAt(-1);
-}
-
-
-void VirtualFrame::Forget(int count) {
- SpillAll();
- LowerHeight(count);
-}
-
-} } // namespace v8::internal
-
-#endif // V8_VIRTUAL_FRAME_ARM_INL_H_
diff --git a/src/3rdparty/v8/src/arm/virtual-frame-arm.cc b/src/3rdparty/v8/src/arm/virtual-frame-arm.cc
deleted file mode 100644
index a852d6e..0000000
--- a/src/3rdparty/v8/src/arm/virtual-frame-arm.cc
+++ /dev/null
@@ -1,843 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_ARM)
-
-#include "codegen-inl.h"
-#include "register-allocator-inl.h"
-#include "scopes.h"
-#include "virtual-frame-inl.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm())
-
-void VirtualFrame::PopToR1R0() {
- // Shuffle things around so the top of stack is in r0 and r1.
- MergeTOSTo(R0_R1_TOS);
- // Pop the two registers off the stack so they are detached from the frame.
- LowerHeight(2);
- top_of_stack_state_ = NO_TOS_REGISTERS;
-}
-
-
-void VirtualFrame::PopToR1() {
- // Shuffle things around so the top of stack is only in r1.
- MergeTOSTo(R1_TOS);
- // Pop the register off the stack so it is detached from the frame.
- LowerHeight(1);
- top_of_stack_state_ = NO_TOS_REGISTERS;
-}
-
-
-void VirtualFrame::PopToR0() {
- // Shuffle things around so the top of stack only in r0.
- MergeTOSTo(R0_TOS);
- // Pop the register off the stack so it is detached from the frame.
- LowerHeight(1);
- top_of_stack_state_ = NO_TOS_REGISTERS;
-}
-
-
-void VirtualFrame::MergeTo(const VirtualFrame* expected, Condition cond) {
- if (Equals(expected)) return;
- ASSERT((expected->tos_known_smi_map_ & tos_known_smi_map_) ==
- expected->tos_known_smi_map_);
- ASSERT(expected->IsCompatibleWith(this));
- MergeTOSTo(expected->top_of_stack_state_, cond);
- ASSERT(register_allocation_map_ == expected->register_allocation_map_);
-}
-
-
-void VirtualFrame::MergeTo(VirtualFrame* expected, Condition cond) {
- if (Equals(expected)) return;
- tos_known_smi_map_ &= expected->tos_known_smi_map_;
- MergeTOSTo(expected->top_of_stack_state_, cond);
- ASSERT(register_allocation_map_ == expected->register_allocation_map_);
-}
-
-
-void VirtualFrame::MergeTOSTo(
- VirtualFrame::TopOfStack expected_top_of_stack_state, Condition cond) {
-#define CASE_NUMBER(a, b) ((a) * TOS_STATES + (b))
- switch (CASE_NUMBER(top_of_stack_state_, expected_top_of_stack_state)) {
- case CASE_NUMBER(NO_TOS_REGISTERS, NO_TOS_REGISTERS):
- break;
- case CASE_NUMBER(NO_TOS_REGISTERS, R0_TOS):
- __ pop(r0, cond);
- break;
- case CASE_NUMBER(NO_TOS_REGISTERS, R1_TOS):
- __ pop(r1, cond);
- break;
- case CASE_NUMBER(NO_TOS_REGISTERS, R0_R1_TOS):
- __ pop(r0, cond);
- __ pop(r1, cond);
- break;
- case CASE_NUMBER(NO_TOS_REGISTERS, R1_R0_TOS):
- __ pop(r1, cond);
- __ pop(r0, cond);
- break;
- case CASE_NUMBER(R0_TOS, NO_TOS_REGISTERS):
- __ push(r0, cond);
- break;
- case CASE_NUMBER(R0_TOS, R0_TOS):
- break;
- case CASE_NUMBER(R0_TOS, R1_TOS):
- __ mov(r1, r0, LeaveCC, cond);
- break;
- case CASE_NUMBER(R0_TOS, R0_R1_TOS):
- __ pop(r1, cond);
- break;
- case CASE_NUMBER(R0_TOS, R1_R0_TOS):
- __ mov(r1, r0, LeaveCC, cond);
- __ pop(r0, cond);
- break;
- case CASE_NUMBER(R1_TOS, NO_TOS_REGISTERS):
- __ push(r1, cond);
- break;
- case CASE_NUMBER(R1_TOS, R0_TOS):
- __ mov(r0, r1, LeaveCC, cond);
- break;
- case CASE_NUMBER(R1_TOS, R1_TOS):
- break;
- case CASE_NUMBER(R1_TOS, R0_R1_TOS):
- __ mov(r0, r1, LeaveCC, cond);
- __ pop(r1, cond);
- break;
- case CASE_NUMBER(R1_TOS, R1_R0_TOS):
- __ pop(r0, cond);
- break;
- case CASE_NUMBER(R0_R1_TOS, NO_TOS_REGISTERS):
- __ Push(r1, r0, cond);
- break;
- case CASE_NUMBER(R0_R1_TOS, R0_TOS):
- __ push(r1, cond);
- break;
- case CASE_NUMBER(R0_R1_TOS, R1_TOS):
- __ push(r1, cond);
- __ mov(r1, r0, LeaveCC, cond);
- break;
- case CASE_NUMBER(R0_R1_TOS, R0_R1_TOS):
- break;
- case CASE_NUMBER(R0_R1_TOS, R1_R0_TOS):
- __ Swap(r0, r1, ip, cond);
- break;
- case CASE_NUMBER(R1_R0_TOS, NO_TOS_REGISTERS):
- __ Push(r0, r1, cond);
- break;
- case CASE_NUMBER(R1_R0_TOS, R0_TOS):
- __ push(r0, cond);
- __ mov(r0, r1, LeaveCC, cond);
- break;
- case CASE_NUMBER(R1_R0_TOS, R1_TOS):
- __ push(r0, cond);
- break;
- case CASE_NUMBER(R1_R0_TOS, R0_R1_TOS):
- __ Swap(r0, r1, ip, cond);
- break;
- case CASE_NUMBER(R1_R0_TOS, R1_R0_TOS):
- break;
- default:
- UNREACHABLE();
-#undef CASE_NUMBER
- }
- // A conditional merge will be followed by a conditional branch and the
- // fall-through code will have an unchanged virtual frame state. If the
- // merge is unconditional ('al'ways) then it might be followed by a fall
- // through. We need to update the virtual frame state to match the code we
- // are falling into. The final case is an unconditional merge followed by an
- // unconditional branch, in which case it doesn't matter what we do to the
- // virtual frame state, because the virtual frame will be invalidated.
- if (cond == al) {
- top_of_stack_state_ = expected_top_of_stack_state;
- }
-}
-
-
-void VirtualFrame::Enter() {
- Comment cmnt(masm(), "[ Enter JS frame");
-
-#ifdef DEBUG
- // Verify that r1 contains a JS function. The following code relies
- // on r2 being available for use.
- if (FLAG_debug_code) {
- Label map_check, done;
- __ tst(r1, Operand(kSmiTagMask));
- __ b(ne, &map_check);
- __ stop("VirtualFrame::Enter - r1 is not a function (smi check).");
- __ bind(&map_check);
- __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
- __ b(eq, &done);
- __ stop("VirtualFrame::Enter - r1 is not a function (map check).");
- __ bind(&done);
- }
-#endif // DEBUG
-
- // We are about to push four values to the frame.
- Adjust(4);
- __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
- // Adjust FP to point to saved FP.
- __ add(fp, sp, Operand(2 * kPointerSize));
-}
-
-
-void VirtualFrame::Exit() {
- Comment cmnt(masm(), "[ Exit JS frame");
- // Record the location of the JS exit code for patching when setting
- // break point.
- __ RecordJSReturn();
-
- // Drop the execution stack down to the frame pointer and restore the caller
- // frame pointer and return address.
- __ mov(sp, fp);
- __ ldm(ia_w, sp, fp.bit() | lr.bit());
-}
-
-
-void VirtualFrame::AllocateStackSlots() {
- int count = local_count();
- if (count > 0) {
- Comment cmnt(masm(), "[ Allocate space for locals");
- Adjust(count);
- // Initialize stack slots with 'undefined' value.
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ LoadRoot(r2, Heap::kStackLimitRootIndex);
- if (count < kLocalVarBound) {
- // For less locals the unrolled loop is more compact.
- for (int i = 0; i < count; i++) {
- __ push(ip);
- }
- } else {
- // For more locals a loop in generated code is more compact.
- Label alloc_locals_loop;
- __ mov(r1, Operand(count));
- __ bind(&alloc_locals_loop);
- __ push(ip);
- __ sub(r1, r1, Operand(1), SetCC);
- __ b(ne, &alloc_locals_loop);
- }
- } else {
- __ LoadRoot(r2, Heap::kStackLimitRootIndex);
- }
- // Check the stack for overflow or a break request.
- masm()->cmp(sp, Operand(r2));
- StackCheckStub stub;
- // Call the stub if lower.
- masm()->mov(ip,
- Operand(reinterpret_cast<intptr_t>(stub.GetCode().location()),
- RelocInfo::CODE_TARGET),
- LeaveCC,
- lo);
- masm()->Call(ip, lo);
-}
-
-
-
-void VirtualFrame::PushReceiverSlotAddress() {
- UNIMPLEMENTED();
-}
-
-
-void VirtualFrame::PushTryHandler(HandlerType type) {
- // Grow the expression stack by handler size less one (the return
- // address in lr is already counted by a call instruction).
- Adjust(kHandlerSize - 1);
- __ PushTryHandler(IN_JAVASCRIPT, type);
-}
-
-
-void VirtualFrame::CallJSFunction(int arg_count) {
- // InvokeFunction requires function in r1.
- PopToR1();
- SpillAll();
-
- // +1 for receiver.
- Forget(arg_count + 1);
- ASSERT(cgen()->HasValidEntryRegisters());
- ParameterCount count(arg_count);
- __ InvokeFunction(r1, count, CALL_FUNCTION);
- // Restore the context.
- __ ldr(cp, Context());
-}
-
-
-void VirtualFrame::CallRuntime(const Runtime::Function* f, int arg_count) {
- SpillAll();
- Forget(arg_count);
- ASSERT(cgen()->HasValidEntryRegisters());
- __ CallRuntime(f, arg_count);
-}
-
-
-void VirtualFrame::CallRuntime(Runtime::FunctionId id, int arg_count) {
- SpillAll();
- Forget(arg_count);
- ASSERT(cgen()->HasValidEntryRegisters());
- __ CallRuntime(id, arg_count);
-}
-
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-void VirtualFrame::DebugBreak() {
- ASSERT(cgen()->HasValidEntryRegisters());
- __ DebugBreak();
-}
-#endif
-
-
-void VirtualFrame::InvokeBuiltin(Builtins::JavaScript id,
- InvokeJSFlags flags,
- int arg_count) {
- Forget(arg_count);
- __ InvokeBuiltin(id, flags);
-}
-
-
-void VirtualFrame::CallLoadIC(Handle<String> name, RelocInfo::Mode mode) {
- Handle<Code> ic(Isolate::Current()->builtins()->builtin(
- Builtins::kLoadIC_Initialize));
- PopToR0();
- SpillAll();
- __ mov(r2, Operand(name));
- CallCodeObject(ic, mode, 0);
-}
-
-
-void VirtualFrame::CallStoreIC(Handle<String> name,
- bool is_contextual,
- StrictModeFlag strict_mode) {
- Handle<Code> ic(Isolate::Current()->builtins()->builtin(
- (strict_mode == kStrictMode) ? Builtins::kStoreIC_Initialize_Strict
- : Builtins::kStoreIC_Initialize));
- PopToR0();
- RelocInfo::Mode mode;
- if (is_contextual) {
- SpillAll();
- __ ldr(r1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
- mode = RelocInfo::CODE_TARGET_CONTEXT;
- } else {
- EmitPop(r1);
- SpillAll();
- mode = RelocInfo::CODE_TARGET;
- }
- __ mov(r2, Operand(name));
- CallCodeObject(ic, mode, 0);
-}
-
-
-void VirtualFrame::CallKeyedLoadIC() {
- Handle<Code> ic(Isolate::Current()->builtins()->builtin(
- Builtins::kKeyedLoadIC_Initialize));
- PopToR1R0();
- SpillAll();
- CallCodeObject(ic, RelocInfo::CODE_TARGET, 0);
-}
-
-
-void VirtualFrame::CallKeyedStoreIC(StrictModeFlag strict_mode) {
- Handle<Code> ic(Isolate::Current()->builtins()->builtin(
- (strict_mode == kStrictMode) ? Builtins::kKeyedStoreIC_Initialize_Strict
- : Builtins::kKeyedStoreIC_Initialize));
- PopToR1R0();
- SpillAll();
- EmitPop(r2);
- CallCodeObject(ic, RelocInfo::CODE_TARGET, 0);
-}
-
-
-void VirtualFrame::CallCodeObject(Handle<Code> code,
- RelocInfo::Mode rmode,
- int dropped_args) {
- switch (code->kind()) {
- case Code::CALL_IC:
- case Code::KEYED_CALL_IC:
- case Code::FUNCTION:
- break;
- case Code::KEYED_LOAD_IC:
- case Code::LOAD_IC:
- case Code::KEYED_STORE_IC:
- case Code::STORE_IC:
- ASSERT(dropped_args == 0);
- break;
- case Code::BUILTIN:
- ASSERT(*code == Isolate::Current()->builtins()->builtin(
- Builtins::kJSConstructCall));
- break;
- default:
- UNREACHABLE();
- break;
- }
- Forget(dropped_args);
- ASSERT(cgen()->HasValidEntryRegisters());
- __ Call(code, rmode);
-}
-
-
-// NO_TOS_REGISTERS, R0_TOS, R1_TOS, R1_R0_TOS, R0_R1_TOS.
-const bool VirtualFrame::kR0InUse[TOS_STATES] =
- { false, true, false, true, true };
-const bool VirtualFrame::kR1InUse[TOS_STATES] =
- { false, false, true, true, true };
-const int VirtualFrame::kVirtualElements[TOS_STATES] =
- { 0, 1, 1, 2, 2 };
-const Register VirtualFrame::kTopRegister[TOS_STATES] =
- { r0, r0, r1, r1, r0 };
-const Register VirtualFrame::kBottomRegister[TOS_STATES] =
- { r0, r0, r1, r0, r1 };
-const Register VirtualFrame::kAllocatedRegisters[
- VirtualFrame::kNumberOfAllocatedRegisters] = { r2, r3, r4, r5, r6 };
-// Popping is done by the transition implied by kStateAfterPop. Of course if
-// there were no stack slots allocated to registers then the physical SP must
-// be adjusted.
-const VirtualFrame::TopOfStack VirtualFrame::kStateAfterPop[TOS_STATES] =
- { NO_TOS_REGISTERS, NO_TOS_REGISTERS, NO_TOS_REGISTERS, R0_TOS, R1_TOS };
-// Pushing is done by the transition implied by kStateAfterPush. Of course if
-// the maximum number of registers was already allocated to the top of stack
-// slots then one register must be physically pushed onto the stack.
-const VirtualFrame::TopOfStack VirtualFrame::kStateAfterPush[TOS_STATES] =
- { R0_TOS, R1_R0_TOS, R0_R1_TOS, R0_R1_TOS, R1_R0_TOS };
-
-
-void VirtualFrame::Drop(int count) {
- ASSERT(count >= 0);
- ASSERT(height() >= count);
- // Discard elements from the virtual frame and free any registers.
- int num_virtual_elements = kVirtualElements[top_of_stack_state_];
- while (num_virtual_elements > 0) {
- Pop();
- num_virtual_elements--;
- count--;
- if (count == 0) return;
- }
- if (count == 0) return;
- __ add(sp, sp, Operand(count * kPointerSize));
- LowerHeight(count);
-}
-
-
-void VirtualFrame::Pop() {
- if (top_of_stack_state_ == NO_TOS_REGISTERS) {
- __ add(sp, sp, Operand(kPointerSize));
- } else {
- top_of_stack_state_ = kStateAfterPop[top_of_stack_state_];
- }
- LowerHeight(1);
-}
-
-
-void VirtualFrame::EmitPop(Register reg) {
- ASSERT(!is_used(RegisterAllocator::ToNumber(reg)));
- if (top_of_stack_state_ == NO_TOS_REGISTERS) {
- __ pop(reg);
- } else {
- __ mov(reg, kTopRegister[top_of_stack_state_]);
- top_of_stack_state_ = kStateAfterPop[top_of_stack_state_];
- }
- LowerHeight(1);
-}
-
-
-void VirtualFrame::SpillAllButCopyTOSToR0() {
- switch (top_of_stack_state_) {
- case NO_TOS_REGISTERS:
- __ ldr(r0, MemOperand(sp, 0));
- break;
- case R0_TOS:
- __ push(r0);
- break;
- case R1_TOS:
- __ push(r1);
- __ mov(r0, r1);
- break;
- case R0_R1_TOS:
- __ Push(r1, r0);
- break;
- case R1_R0_TOS:
- __ Push(r0, r1);
- __ mov(r0, r1);
- break;
- default:
- UNREACHABLE();
- }
- top_of_stack_state_ = NO_TOS_REGISTERS;
-}
-
-
-void VirtualFrame::SpillAllButCopyTOSToR1() {
- switch (top_of_stack_state_) {
- case NO_TOS_REGISTERS:
- __ ldr(r1, MemOperand(sp, 0));
- break;
- case R0_TOS:
- __ push(r0);
- __ mov(r1, r0);
- break;
- case R1_TOS:
- __ push(r1);
- break;
- case R0_R1_TOS:
- __ Push(r1, r0);
- __ mov(r1, r0);
- break;
- case R1_R0_TOS:
- __ Push(r0, r1);
- break;
- default:
- UNREACHABLE();
- }
- top_of_stack_state_ = NO_TOS_REGISTERS;
-}
-
-
-void VirtualFrame::SpillAllButCopyTOSToR1R0() {
- switch (top_of_stack_state_) {
- case NO_TOS_REGISTERS:
- __ ldr(r1, MemOperand(sp, 0));
- __ ldr(r0, MemOperand(sp, kPointerSize));
- break;
- case R0_TOS:
- __ push(r0);
- __ mov(r1, r0);
- __ ldr(r0, MemOperand(sp, kPointerSize));
- break;
- case R1_TOS:
- __ push(r1);
- __ ldr(r0, MemOperand(sp, kPointerSize));
- break;
- case R0_R1_TOS:
- __ Push(r1, r0);
- __ Swap(r0, r1, ip);
- break;
- case R1_R0_TOS:
- __ Push(r0, r1);
- break;
- default:
- UNREACHABLE();
- }
- top_of_stack_state_ = NO_TOS_REGISTERS;
-}
-
-
-Register VirtualFrame::Peek() {
- AssertIsNotSpilled();
- if (top_of_stack_state_ == NO_TOS_REGISTERS) {
- top_of_stack_state_ = kStateAfterPush[top_of_stack_state_];
- Register answer = kTopRegister[top_of_stack_state_];
- __ pop(answer);
- return answer;
- } else {
- return kTopRegister[top_of_stack_state_];
- }
-}
-
-
-Register VirtualFrame::Peek2() {
- AssertIsNotSpilled();
- switch (top_of_stack_state_) {
- case NO_TOS_REGISTERS:
- case R0_TOS:
- case R0_R1_TOS:
- MergeTOSTo(R0_R1_TOS);
- return r1;
- case R1_TOS:
- case R1_R0_TOS:
- MergeTOSTo(R1_R0_TOS);
- return r0;
- default:
- UNREACHABLE();
- return no_reg;
- }
-}
-
-
-void VirtualFrame::Dup() {
- if (SpilledScope::is_spilled()) {
- __ ldr(ip, MemOperand(sp, 0));
- __ push(ip);
- } else {
- switch (top_of_stack_state_) {
- case NO_TOS_REGISTERS:
- __ ldr(r0, MemOperand(sp, 0));
- top_of_stack_state_ = R0_TOS;
- break;
- case R0_TOS:
- __ mov(r1, r0);
- // r0 and r1 contains the same value. Prefer state with r0 holding TOS.
- top_of_stack_state_ = R0_R1_TOS;
- break;
- case R1_TOS:
- __ mov(r0, r1);
- // r0 and r1 contains the same value. Prefer state with r0 holding TOS.
- top_of_stack_state_ = R0_R1_TOS;
- break;
- case R0_R1_TOS:
- __ push(r1);
- __ mov(r1, r0);
- // r0 and r1 contains the same value. Prefer state with r0 holding TOS.
- top_of_stack_state_ = R0_R1_TOS;
- break;
- case R1_R0_TOS:
- __ push(r0);
- __ mov(r0, r1);
- // r0 and r1 contains the same value. Prefer state with r0 holding TOS.
- top_of_stack_state_ = R0_R1_TOS;
- break;
- default:
- UNREACHABLE();
- }
- }
- RaiseHeight(1, tos_known_smi_map_ & 1);
-}
-
-
-void VirtualFrame::Dup2() {
- if (SpilledScope::is_spilled()) {
- __ ldr(ip, MemOperand(sp, kPointerSize));
- __ push(ip);
- __ ldr(ip, MemOperand(sp, kPointerSize));
- __ push(ip);
- } else {
- switch (top_of_stack_state_) {
- case NO_TOS_REGISTERS:
- __ ldr(r0, MemOperand(sp, 0));
- __ ldr(r1, MemOperand(sp, kPointerSize));
- top_of_stack_state_ = R0_R1_TOS;
- break;
- case R0_TOS:
- __ push(r0);
- __ ldr(r1, MemOperand(sp, kPointerSize));
- top_of_stack_state_ = R0_R1_TOS;
- break;
- case R1_TOS:
- __ push(r1);
- __ ldr(r0, MemOperand(sp, kPointerSize));
- top_of_stack_state_ = R1_R0_TOS;
- break;
- case R0_R1_TOS:
- __ Push(r1, r0);
- top_of_stack_state_ = R0_R1_TOS;
- break;
- case R1_R0_TOS:
- __ Push(r0, r1);
- top_of_stack_state_ = R1_R0_TOS;
- break;
- default:
- UNREACHABLE();
- }
- }
- RaiseHeight(2, tos_known_smi_map_ & 3);
-}
-
-
-Register VirtualFrame::PopToRegister(Register but_not_to_this_one) {
- ASSERT(but_not_to_this_one.is(r0) ||
- but_not_to_this_one.is(r1) ||
- but_not_to_this_one.is(no_reg));
- LowerHeight(1);
- if (top_of_stack_state_ == NO_TOS_REGISTERS) {
- if (but_not_to_this_one.is(r0)) {
- __ pop(r1);
- return r1;
- } else {
- __ pop(r0);
- return r0;
- }
- } else {
- Register answer = kTopRegister[top_of_stack_state_];
- ASSERT(!answer.is(but_not_to_this_one));
- top_of_stack_state_ = kStateAfterPop[top_of_stack_state_];
- return answer;
- }
-}
-
-
-void VirtualFrame::EnsureOneFreeTOSRegister() {
- if (kVirtualElements[top_of_stack_state_] == kMaxTOSRegisters) {
- __ push(kBottomRegister[top_of_stack_state_]);
- top_of_stack_state_ = kStateAfterPush[top_of_stack_state_];
- top_of_stack_state_ = kStateAfterPop[top_of_stack_state_];
- }
- ASSERT(kVirtualElements[top_of_stack_state_] != kMaxTOSRegisters);
-}
-
-
-void VirtualFrame::EmitPush(Register reg, TypeInfo info) {
- RaiseHeight(1, info.IsSmi() ? 1 : 0);
- if (reg.is(cp)) {
- // If we are pushing cp then we are about to make a call and things have to
- // be pushed to the physical stack. There's nothing to be gained my moving
- // to a TOS register and then pushing that, we might as well push to the
- // physical stack immediately.
- MergeTOSTo(NO_TOS_REGISTERS);
- __ push(reg);
- return;
- }
- if (SpilledScope::is_spilled()) {
- ASSERT(top_of_stack_state_ == NO_TOS_REGISTERS);
- __ push(reg);
- return;
- }
- if (top_of_stack_state_ == NO_TOS_REGISTERS) {
- if (reg.is(r0)) {
- top_of_stack_state_ = R0_TOS;
- return;
- }
- if (reg.is(r1)) {
- top_of_stack_state_ = R1_TOS;
- return;
- }
- }
- EnsureOneFreeTOSRegister();
- top_of_stack_state_ = kStateAfterPush[top_of_stack_state_];
- Register dest = kTopRegister[top_of_stack_state_];
- __ Move(dest, reg);
-}
-
-
-void VirtualFrame::SetElementAt(Register reg, int this_far_down) {
- if (this_far_down < kTOSKnownSmiMapSize) {
- tos_known_smi_map_ &= ~(1 << this_far_down);
- }
- if (this_far_down == 0) {
- Pop();
- Register dest = GetTOSRegister();
- if (dest.is(reg)) {
- // We already popped one item off the top of the stack. If the only
- // free register is the one we were asked to push then we have been
- // asked to push a register that was already in use, which cannot
- // happen. It therefore folows that there are two free TOS registers:
- ASSERT(top_of_stack_state_ == NO_TOS_REGISTERS);
- dest = dest.is(r0) ? r1 : r0;
- }
- __ mov(dest, reg);
- EmitPush(dest);
- } else if (this_far_down == 1) {
- int virtual_elements = kVirtualElements[top_of_stack_state_];
- if (virtual_elements < 2) {
- __ str(reg, ElementAt(this_far_down));
- } else {
- ASSERT(virtual_elements == 2);
- ASSERT(!reg.is(r0));
- ASSERT(!reg.is(r1));
- Register dest = kBottomRegister[top_of_stack_state_];
- __ mov(dest, reg);
- }
- } else {
- ASSERT(this_far_down >= 2);
- ASSERT(kVirtualElements[top_of_stack_state_] <= 2);
- __ str(reg, ElementAt(this_far_down));
- }
-}
-
-
-Register VirtualFrame::GetTOSRegister() {
- if (SpilledScope::is_spilled()) return r0;
-
- EnsureOneFreeTOSRegister();
- return kTopRegister[kStateAfterPush[top_of_stack_state_]];
-}
-
-
-void VirtualFrame::EmitPush(Operand operand, TypeInfo info) {
- RaiseHeight(1, info.IsSmi() ? 1 : 0);
- if (SpilledScope::is_spilled()) {
- __ mov(r0, operand);
- __ push(r0);
- return;
- }
- EnsureOneFreeTOSRegister();
- top_of_stack_state_ = kStateAfterPush[top_of_stack_state_];
- __ mov(kTopRegister[top_of_stack_state_], operand);
-}
-
-
-void VirtualFrame::EmitPush(MemOperand operand, TypeInfo info) {
- RaiseHeight(1, info.IsSmi() ? 1 : 0);
- if (SpilledScope::is_spilled()) {
- __ ldr(r0, operand);
- __ push(r0);
- return;
- }
- EnsureOneFreeTOSRegister();
- top_of_stack_state_ = kStateAfterPush[top_of_stack_state_];
- __ ldr(kTopRegister[top_of_stack_state_], operand);
-}
-
-
-void VirtualFrame::EmitPushRoot(Heap::RootListIndex index) {
- RaiseHeight(1, 0);
- if (SpilledScope::is_spilled()) {
- __ LoadRoot(r0, index);
- __ push(r0);
- return;
- }
- EnsureOneFreeTOSRegister();
- top_of_stack_state_ = kStateAfterPush[top_of_stack_state_];
- __ LoadRoot(kTopRegister[top_of_stack_state_], index);
-}
-
-
-void VirtualFrame::EmitPushMultiple(int count, int src_regs) {
- ASSERT(SpilledScope::is_spilled());
- Adjust(count);
- __ stm(db_w, sp, src_regs);
-}
-
-
-void VirtualFrame::SpillAll() {
- switch (top_of_stack_state_) {
- case R1_R0_TOS:
- masm()->push(r0);
- // Fall through.
- case R1_TOS:
- masm()->push(r1);
- top_of_stack_state_ = NO_TOS_REGISTERS;
- break;
- case R0_R1_TOS:
- masm()->push(r1);
- // Fall through.
- case R0_TOS:
- masm()->push(r0);
- top_of_stack_state_ = NO_TOS_REGISTERS;
- // Fall through.
- case NO_TOS_REGISTERS:
- break;
- default:
- UNREACHABLE();
- break;
- }
- ASSERT(register_allocation_map_ == 0); // Not yet implemented.
-}
-
-#undef __
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_ARM
diff --git a/src/3rdparty/v8/src/arm/virtual-frame-arm.h b/src/3rdparty/v8/src/arm/virtual-frame-arm.h
deleted file mode 100644
index 6d67e70..0000000
--- a/src/3rdparty/v8/src/arm/virtual-frame-arm.h
+++ /dev/null
@@ -1,523 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_ARM_VIRTUAL_FRAME_ARM_H_
-#define V8_ARM_VIRTUAL_FRAME_ARM_H_
-
-#include "register-allocator.h"
-
-namespace v8 {
-namespace internal {
-
-// This dummy class is only used to create invalid virtual frames.
-extern class InvalidVirtualFrameInitializer {}* kInvalidVirtualFrameInitializer;
-
-
-// -------------------------------------------------------------------------
-// Virtual frames
-//
-// The virtual frame is an abstraction of the physical stack frame. It
-// encapsulates the parameters, frame-allocated locals, and the expression
-// stack. It supports push/pop operations on the expression stack, as well
-// as random access to the expression stack elements, locals, and
-// parameters.
-
-class VirtualFrame : public ZoneObject {
- public:
- class RegisterAllocationScope;
- // A utility class to introduce a scope where the virtual frame is
- // expected to remain spilled. The constructor spills the code
- // generator's current frame, and keeps it spilled.
- class SpilledScope BASE_EMBEDDED {
- public:
- explicit SpilledScope(VirtualFrame* frame)
- : old_is_spilled_(
- Isolate::Current()->is_virtual_frame_in_spilled_scope()) {
- if (frame != NULL) {
- if (!old_is_spilled_) {
- frame->SpillAll();
- } else {
- frame->AssertIsSpilled();
- }
- }
- Isolate::Current()->set_is_virtual_frame_in_spilled_scope(true);
- }
- ~SpilledScope() {
- Isolate::Current()->set_is_virtual_frame_in_spilled_scope(
- old_is_spilled_);
- }
- static bool is_spilled() {
- return Isolate::Current()->is_virtual_frame_in_spilled_scope();
- }
-
- private:
- int old_is_spilled_;
-
- SpilledScope() { }
-
- friend class RegisterAllocationScope;
- };
-
- class RegisterAllocationScope BASE_EMBEDDED {
- public:
- // A utility class to introduce a scope where the virtual frame
- // is not spilled, ie. where register allocation occurs. Eventually
- // when RegisterAllocationScope is ubiquitous it can be removed
- // along with the (by then unused) SpilledScope class.
- inline explicit RegisterAllocationScope(CodeGenerator* cgen);
- inline ~RegisterAllocationScope();
-
- private:
- CodeGenerator* cgen_;
- bool old_is_spilled_;
-
- RegisterAllocationScope() { }
- };
-
- // An illegal index into the virtual frame.
- static const int kIllegalIndex = -1;
-
- // Construct an initial virtual frame on entry to a JS function.
- inline VirtualFrame();
-
- // Construct an invalid virtual frame, used by JumpTargets.
- inline VirtualFrame(InvalidVirtualFrameInitializer* dummy);
-
- // Construct a virtual frame as a clone of an existing one.
- explicit inline VirtualFrame(VirtualFrame* original);
-
- inline CodeGenerator* cgen() const;
- inline MacroAssembler* masm();
-
- // The number of elements on the virtual frame.
- int element_count() const { return element_count_; }
-
- // The height of the virtual expression stack.
- inline int height() const;
-
- bool is_used(int num) {
- switch (num) {
- case 0: { // r0.
- return kR0InUse[top_of_stack_state_];
- }
- case 1: { // r1.
- return kR1InUse[top_of_stack_state_];
- }
- case 2:
- case 3:
- case 4:
- case 5:
- case 6: { // r2 to r6.
- ASSERT(num - kFirstAllocatedRegister < kNumberOfAllocatedRegisters);
- ASSERT(num >= kFirstAllocatedRegister);
- if ((register_allocation_map_ &
- (1 << (num - kFirstAllocatedRegister))) == 0) {
- return false;
- } else {
- return true;
- }
- }
- default: {
- ASSERT(num < kFirstAllocatedRegister ||
- num >= kFirstAllocatedRegister + kNumberOfAllocatedRegisters);
- return false;
- }
- }
- }
-
- // Add extra in-memory elements to the top of the frame to match an actual
- // frame (eg, the frame after an exception handler is pushed). No code is
- // emitted.
- void Adjust(int count);
-
- // Forget elements from the top of the frame to match an actual frame (eg,
- // the frame after a runtime call). No code is emitted except to bring the
- // frame to a spilled state.
- void Forget(int count);
-
- // Spill all values from the frame to memory.
- void SpillAll();
-
- void AssertIsSpilled() const {
- ASSERT(top_of_stack_state_ == NO_TOS_REGISTERS);
- ASSERT(register_allocation_map_ == 0);
- }
-
- void AssertIsNotSpilled() {
- ASSERT(!SpilledScope::is_spilled());
- }
-
- // Spill all occurrences of a specific register from the frame.
- void Spill(Register reg) {
- UNIMPLEMENTED();
- }
-
- // Spill all occurrences of an arbitrary register if possible. Return the
- // register spilled or no_reg if it was not possible to free any register
- // (ie, they all have frame-external references). Unimplemented.
- Register SpillAnyRegister();
-
- // Make this virtual frame have a state identical to an expected virtual
- // frame. As a side effect, code may be emitted to make this frame match
- // the expected one.
- void MergeTo(VirtualFrame* expected, Condition cond = al);
- void MergeTo(const VirtualFrame* expected, Condition cond = al);
-
- // Checks whether this frame can be branched to by the other frame.
- bool IsCompatibleWith(const VirtualFrame* other) const {
- return (tos_known_smi_map_ & (~other->tos_known_smi_map_)) == 0;
- }
-
- inline void ForgetTypeInfo() {
- tos_known_smi_map_ = 0;
- }
-
- // Detach a frame from its code generator, perhaps temporarily. This
- // tells the register allocator that it is free to use frame-internal
- // registers. Used when the code generator's frame is switched from this
- // one to NULL by an unconditional jump.
- void DetachFromCodeGenerator() {
- }
-
- // (Re)attach a frame to its code generator. This informs the register
- // allocator that the frame-internal register references are active again.
- // Used when a code generator's frame is switched from NULL to this one by
- // binding a label.
- void AttachToCodeGenerator() {
- }
-
- // Emit code for the physical JS entry and exit frame sequences. After
- // calling Enter, the virtual frame is ready for use; and after calling
- // Exit it should not be used. Note that Enter does not allocate space in
- // the physical frame for storing frame-allocated locals.
- void Enter();
- void Exit();
-
- // Prepare for returning from the frame by elements in the virtual frame. This
- // avoids generating unnecessary merge code when jumping to the
- // shared return site. No spill code emitted. Value to return should be in r0.
- inline void PrepareForReturn();
-
- // Number of local variables after when we use a loop for allocating.
- static const int kLocalVarBound = 5;
-
- // Allocate and initialize the frame-allocated locals.
- void AllocateStackSlots();
-
- // The current top of the expression stack as an assembly operand.
- MemOperand Top() {
- AssertIsSpilled();
- return MemOperand(sp, 0);
- }
-
- // An element of the expression stack as an assembly operand.
- MemOperand ElementAt(int index) {
- int adjusted_index = index - kVirtualElements[top_of_stack_state_];
- ASSERT(adjusted_index >= 0);
- return MemOperand(sp, adjusted_index * kPointerSize);
- }
-
- bool KnownSmiAt(int index) {
- if (index >= kTOSKnownSmiMapSize) return false;
- return (tos_known_smi_map_ & (1 << index)) != 0;
- }
-
- // A frame-allocated local as an assembly operand.
- inline MemOperand LocalAt(int index);
-
- // Push the address of the receiver slot on the frame.
- void PushReceiverSlotAddress();
-
- // The function frame slot.
- MemOperand Function() { return MemOperand(fp, kFunctionOffset); }
-
- // The context frame slot.
- MemOperand Context() { return MemOperand(fp, kContextOffset); }
-
- // A parameter as an assembly operand.
- inline MemOperand ParameterAt(int index);
-
- // The receiver frame slot.
- inline MemOperand Receiver();
-
- // Push a try-catch or try-finally handler on top of the virtual frame.
- void PushTryHandler(HandlerType type);
-
- // Call stub given the number of arguments it expects on (and
- // removes from) the stack.
- inline void CallStub(CodeStub* stub, int arg_count);
-
- // Call JS function from top of the stack with arguments
- // taken from the stack.
- void CallJSFunction(int arg_count);
-
- // Call runtime given the number of arguments expected on (and
- // removed from) the stack.
- void CallRuntime(const Runtime::Function* f, int arg_count);
- void CallRuntime(Runtime::FunctionId id, int arg_count);
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- void DebugBreak();
-#endif
-
- // Invoke builtin given the number of arguments it expects on (and
- // removes from) the stack.
- void InvokeBuiltin(Builtins::JavaScript id,
- InvokeJSFlags flag,
- int arg_count);
-
- // Call load IC. Receiver is on the stack and is consumed. Result is returned
- // in r0.
- void CallLoadIC(Handle<String> name, RelocInfo::Mode mode);
-
- // Call store IC. If the load is contextual, value is found on top of the
- // frame. If not, value and receiver are on the frame. Both are consumed.
- // Result is returned in r0.
- void CallStoreIC(Handle<String> name, bool is_contextual,
- StrictModeFlag strict_mode);
-
- // Call keyed load IC. Key and receiver are on the stack. Both are consumed.
- // Result is returned in r0.
- void CallKeyedLoadIC();
-
- // Call keyed store IC. Value, key and receiver are on the stack. All three
- // are consumed. Result is returned in r0.
- void CallKeyedStoreIC(StrictModeFlag strict_mode);
-
- // Call into an IC stub given the number of arguments it removes
- // from the stack. Register arguments to the IC stub are implicit,
- // and depend on the type of IC stub.
- void CallCodeObject(Handle<Code> ic,
- RelocInfo::Mode rmode,
- int dropped_args);
-
- // Drop a number of elements from the top of the expression stack. May
- // emit code to affect the physical frame. Does not clobber any registers
- // excepting possibly the stack pointer.
- void Drop(int count);
-
- // Drop one element.
- void Drop() { Drop(1); }
-
- // Pop an element from the top of the expression stack. Discards
- // the result.
- void Pop();
-
- // Pop an element from the top of the expression stack. The register
- // will be one normally used for the top of stack register allocation
- // so you can't hold on to it if you push on the stack.
- Register PopToRegister(Register but_not_to_this_one = no_reg);
-
- // Look at the top of the stack. The register returned is aliased and
- // must be copied to a scratch register before modification.
- Register Peek();
-
- // Look at the value beneath the top of the stack. The register returned is
- // aliased and must be copied to a scratch register before modification.
- Register Peek2();
-
- // Duplicate the top of stack.
- void Dup();
-
- // Duplicate the two elements on top of stack.
- void Dup2();
-
- // Flushes all registers, but it puts a copy of the top-of-stack in r0.
- void SpillAllButCopyTOSToR0();
-
- // Flushes all registers, but it puts a copy of the top-of-stack in r1.
- void SpillAllButCopyTOSToR1();
-
- // Flushes all registers, but it puts a copy of the top-of-stack in r1
- // and the next value on the stack in r0.
- void SpillAllButCopyTOSToR1R0();
-
- // Pop and save an element from the top of the expression stack and
- // emit a corresponding pop instruction.
- void EmitPop(Register reg);
-
- // Takes the top two elements and puts them in r0 (top element) and r1
- // (second element).
- void PopToR1R0();
-
- // Takes the top element and puts it in r1.
- void PopToR1();
-
- // Takes the top element and puts it in r0.
- void PopToR0();
-
- // Push an element on top of the expression stack and emit a
- // corresponding push instruction.
- void EmitPush(Register reg, TypeInfo type_info = TypeInfo::Unknown());
- void EmitPush(Operand operand, TypeInfo type_info = TypeInfo::Unknown());
- void EmitPush(MemOperand operand, TypeInfo type_info = TypeInfo::Unknown());
- void EmitPushRoot(Heap::RootListIndex index);
-
- // Overwrite the nth thing on the stack. If the nth position is in a
- // register then this turns into a mov, otherwise an str. Afterwards
- // you can still use the register even if it is a register that can be
- // used for TOS (r0 or r1).
- void SetElementAt(Register reg, int this_far_down);
-
- // Get a register which is free and which must be immediately used to
- // push on the top of the stack.
- Register GetTOSRegister();
-
- // Push multiple registers on the stack and the virtual frame
- // Register are selected by setting bit in src_regs and
- // are pushed in decreasing order: r15 .. r0.
- void EmitPushMultiple(int count, int src_regs);
-
- static Register scratch0() { return r7; }
- static Register scratch1() { return r9; }
-
- private:
- static const int kLocal0Offset = JavaScriptFrameConstants::kLocal0Offset;
- static const int kFunctionOffset = JavaScriptFrameConstants::kFunctionOffset;
- static const int kContextOffset = StandardFrameConstants::kContextOffset;
-
- static const int kHandlerSize = StackHandlerConstants::kSize / kPointerSize;
- static const int kPreallocatedElements = 5 + 8; // 8 expression stack slots.
-
- // 5 states for the top of stack, which can be in memory or in r0 and r1.
- enum TopOfStack {
- NO_TOS_REGISTERS,
- R0_TOS,
- R1_TOS,
- R1_R0_TOS,
- R0_R1_TOS,
- TOS_STATES
- };
-
- static const int kMaxTOSRegisters = 2;
-
- static const bool kR0InUse[TOS_STATES];
- static const bool kR1InUse[TOS_STATES];
- static const int kVirtualElements[TOS_STATES];
- static const TopOfStack kStateAfterPop[TOS_STATES];
- static const TopOfStack kStateAfterPush[TOS_STATES];
- static const Register kTopRegister[TOS_STATES];
- static const Register kBottomRegister[TOS_STATES];
-
- // We allocate up to 5 locals in registers.
- static const int kNumberOfAllocatedRegisters = 5;
- // r2 to r6 are allocated to locals.
- static const int kFirstAllocatedRegister = 2;
-
- static const Register kAllocatedRegisters[kNumberOfAllocatedRegisters];
-
- static Register AllocatedRegister(int r) {
- ASSERT(r >= 0 && r < kNumberOfAllocatedRegisters);
- return kAllocatedRegisters[r];
- }
-
- // The number of elements on the stack frame.
- int element_count_;
- TopOfStack top_of_stack_state_:3;
- int register_allocation_map_:kNumberOfAllocatedRegisters;
- static const int kTOSKnownSmiMapSize = 4;
- unsigned tos_known_smi_map_:kTOSKnownSmiMapSize;
-
- // The index of the element that is at the processor's stack pointer
- // (the sp register). For now since everything is in memory it is given
- // by the number of elements on the not-very-virtual stack frame.
- int stack_pointer() { return element_count_ - 1; }
-
- // The number of frame-allocated locals and parameters respectively.
- inline int parameter_count() const;
- inline int local_count() const;
-
- // The index of the element that is at the processor's frame pointer
- // (the fp register). The parameters, receiver, function, and context
- // are below the frame pointer.
- inline int frame_pointer() const;
-
- // The index of the first parameter. The receiver lies below the first
- // parameter.
- int param0_index() { return 1; }
-
- // The index of the context slot in the frame. It is immediately
- // below the frame pointer.
- inline int context_index();
-
- // The index of the function slot in the frame. It is below the frame
- // pointer and context slot.
- inline int function_index();
-
- // The index of the first local. Between the frame pointer and the
- // locals lies the return address.
- inline int local0_index() const;
-
- // The index of the base of the expression stack.
- inline int expression_base_index() const;
-
- // Convert a frame index into a frame pointer relative offset into the
- // actual stack.
- inline int fp_relative(int index);
-
- // Spill all elements in registers. Spill the top spilled_args elements
- // on the frame. Sync all other frame elements.
- // Then drop dropped_args elements from the virtual frame, to match
- // the effect of an upcoming call that will drop them from the stack.
- void PrepareForCall(int spilled_args, int dropped_args);
-
- // If all top-of-stack registers are in use then the lowest one is pushed
- // onto the physical stack and made free.
- void EnsureOneFreeTOSRegister();
-
- // Emit instructions to get the top of stack state from where we are to where
- // we want to be.
- void MergeTOSTo(TopOfStack expected_state, Condition cond = al);
-
- inline bool Equals(const VirtualFrame* other);
-
- inline void LowerHeight(int count) {
- element_count_ -= count;
- if (count >= kTOSKnownSmiMapSize) {
- tos_known_smi_map_ = 0;
- } else {
- tos_known_smi_map_ >>= count;
- }
- }
-
- inline void RaiseHeight(int count, unsigned known_smi_map = 0) {
- ASSERT(count >= 32 || known_smi_map < (1u << count));
- element_count_ += count;
- if (count >= kTOSKnownSmiMapSize) {
- tos_known_smi_map_ = known_smi_map;
- } else {
- tos_known_smi_map_ = ((tos_known_smi_map_ << count) | known_smi_map);
- }
- }
-
- friend class JumpTarget;
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_ARM_VIRTUAL_FRAME_ARM_H_
diff --git a/src/3rdparty/v8/src/array.js b/src/3rdparty/v8/src/array.js
deleted file mode 100644
index 6ed1476..0000000
--- a/src/3rdparty/v8/src/array.js
+++ /dev/null
@@ -1,1249 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// This file relies on the fact that the following declarations have been made
-// in runtime.js:
-// const $Array = global.Array;
-
-// -------------------------------------------------------------------
-
-// Global list of arrays visited during toString, toLocaleString and
-// join invocations.
-var visited_arrays = new InternalArray();
-
-
-// Gets a sorted array of array keys. Useful for operations on sparse
-// arrays. Dupes have not been removed.
-function GetSortedArrayKeys(array, intervals) {
- var length = intervals.length;
- var keys = [];
- for (var k = 0; k < length; k++) {
- var key = intervals[k];
- if (key < 0) {
- var j = -1 - key;
- var limit = j + intervals[++k];
- for (; j < limit; j++) {
- var e = array[j];
- if (!IS_UNDEFINED(e) || j in array) {
- keys.push(j);
- }
- }
- } else {
- // The case where key is undefined also ends here.
- if (!IS_UNDEFINED(key)) {
- var e = array[key];
- if (!IS_UNDEFINED(e) || key in array) {
- keys.push(key);
- }
- }
- }
- }
- keys.sort(function(a, b) { return a - b; });
- return keys;
-}
-
-
-// Optimized for sparse arrays if separator is ''.
-function SparseJoin(array, len, convert) {
- var keys = GetSortedArrayKeys(array, %GetArrayKeys(array, len));
- var last_key = -1;
- var keys_length = keys.length;
-
- var elements = new InternalArray(keys_length);
- var elements_length = 0;
-
- for (var i = 0; i < keys_length; i++) {
- var key = keys[i];
- if (key != last_key) {
- var e = array[key];
- if (!IS_STRING(e)) e = convert(e);
- elements[elements_length++] = e;
- last_key = key;
- }
- }
- return %StringBuilderConcat(elements, elements_length, '');
-}
-
-
-function UseSparseVariant(object, length, is_array) {
- return is_array &&
- length > 1000 &&
- (!%_IsSmi(length) ||
- %EstimateNumberOfElements(object) < (length >> 2));
-}
-
-
-function Join(array, length, separator, convert) {
- if (length == 0) return '';
-
- var is_array = IS_ARRAY(array);
-
- if (is_array) {
- // If the array is cyclic, return the empty string for already
- // visited arrays.
- if (!%PushIfAbsent(visited_arrays, array)) return '';
- }
-
- // Attempt to convert the elements.
- try {
- if (UseSparseVariant(array, length, is_array) && (separator.length == 0)) {
- return SparseJoin(array, length, convert);
- }
-
- // Fast case for one-element arrays.
- if (length == 1) {
- var e = array[0];
- if (IS_STRING(e)) return e;
- return convert(e);
- }
-
- // Construct an array for the elements.
- var elements = new InternalArray(length);
-
- // We pull the empty separator check outside the loop for speed!
- if (separator.length == 0) {
- var elements_length = 0;
- for (var i = 0; i < length; i++) {
- var e = array[i];
- if (!IS_UNDEFINED(e)) {
- if (!IS_STRING(e)) e = convert(e);
- elements[elements_length++] = e;
- }
- }
- elements.length = elements_length;
- var result = %_FastAsciiArrayJoin(elements, '');
- if (!IS_UNDEFINED(result)) return result;
- return %StringBuilderConcat(elements, elements_length, '');
- }
- // Non-empty separator case.
- // If the first element is a number then use the heuristic that the
- // remaining elements are also likely to be numbers.
- if (!IS_NUMBER(array[0])) {
- for (var i = 0; i < length; i++) {
- var e = array[i];
- if (!IS_STRING(e)) e = convert(e);
- elements[i] = e;
- }
- } else {
- for (var i = 0; i < length; i++) {
- var e = array[i];
- if (IS_NUMBER(e)) elements[i] = %_NumberToString(e);
- else {
- if (!IS_STRING(e)) e = convert(e);
- elements[i] = e;
- }
- }
- }
- var result = %_FastAsciiArrayJoin(elements, separator);
- if (!IS_UNDEFINED(result)) return result;
-
- return %StringBuilderJoin(elements, length, separator);
- } finally {
- // Make sure to remove the last element of the visited array no
- // matter what happens.
- if (is_array) visited_arrays.length = visited_arrays.length - 1;
- }
-}
-
-
-function ConvertToString(x) {
- // Assumes x is a non-string.
- if (IS_NUMBER(x)) return %_NumberToString(x);
- if (IS_BOOLEAN(x)) return x ? 'true' : 'false';
- return (IS_NULL_OR_UNDEFINED(x)) ? '' : %ToString(%DefaultString(x));
-}
-
-
-function ConvertToLocaleString(e) {
- if (e == null) {
- return '';
- } else {
- // e_obj's toLocaleString might be overwritten, check if it is a function.
- // Call ToString if toLocaleString is not a function.
- // See issue 877615.
- var e_obj = ToObject(e);
- if (IS_FUNCTION(e_obj.toLocaleString))
- return ToString(e_obj.toLocaleString());
- else
- return ToString(e);
- }
-}
-
-
-// This function implements the optimized splice implementation that can use
-// special array operations to handle sparse arrays in a sensible fashion.
-function SmartSlice(array, start_i, del_count, len, deleted_elements) {
- // Move deleted elements to a new array (the return value from splice).
- // Intervals array can contain keys and intervals. See comment in Concat.
- var intervals = %GetArrayKeys(array, start_i + del_count);
- var length = intervals.length;
- for (var k = 0; k < length; k++) {
- var key = intervals[k];
- if (key < 0) {
- var j = -1 - key;
- var interval_limit = j + intervals[++k];
- if (j < start_i) {
- j = start_i;
- }
- for (; j < interval_limit; j++) {
- // ECMA-262 15.4.4.12 line 10. The spec could also be
- // interpreted such that %HasLocalProperty would be the
- // appropriate test. We follow KJS in consulting the
- // prototype.
- var current = array[j];
- if (!IS_UNDEFINED(current) || j in array) {
- deleted_elements[j - start_i] = current;
- }
- }
- } else {
- if (!IS_UNDEFINED(key)) {
- if (key >= start_i) {
- // ECMA-262 15.4.4.12 line 10. The spec could also be
- // interpreted such that %HasLocalProperty would be the
- // appropriate test. We follow KJS in consulting the
- // prototype.
- var current = array[key];
- if (!IS_UNDEFINED(current) || key in array) {
- deleted_elements[key - start_i] = current;
- }
- }
- }
- }
- }
-}
-
-
-// This function implements the optimized splice implementation that can use
-// special array operations to handle sparse arrays in a sensible fashion.
-function SmartMove(array, start_i, del_count, len, num_additional_args) {
- // Move data to new array.
- var new_array = new InternalArray(len - del_count + num_additional_args);
- var intervals = %GetArrayKeys(array, len);
- var length = intervals.length;
- for (var k = 0; k < length; k++) {
- var key = intervals[k];
- if (key < 0) {
- var j = -1 - key;
- var interval_limit = j + intervals[++k];
- while (j < start_i && j < interval_limit) {
- // The spec could also be interpreted such that
- // %HasLocalProperty would be the appropriate test. We follow
- // KJS in consulting the prototype.
- var current = array[j];
- if (!IS_UNDEFINED(current) || j in array) {
- new_array[j] = current;
- }
- j++;
- }
- j = start_i + del_count;
- while (j < interval_limit) {
- // ECMA-262 15.4.4.12 lines 24 and 41. The spec could also be
- // interpreted such that %HasLocalProperty would be the
- // appropriate test. We follow KJS in consulting the
- // prototype.
- var current = array[j];
- if (!IS_UNDEFINED(current) || j in array) {
- new_array[j - del_count + num_additional_args] = current;
- }
- j++;
- }
- } else {
- if (!IS_UNDEFINED(key)) {
- if (key < start_i) {
- // The spec could also be interpreted such that
- // %HasLocalProperty would be the appropriate test. We follow
- // KJS in consulting the prototype.
- var current = array[key];
- if (!IS_UNDEFINED(current) || key in array) {
- new_array[key] = current;
- }
- } else if (key >= start_i + del_count) {
- // ECMA-262 15.4.4.12 lines 24 and 41. The spec could also
- // be interpreted such that %HasLocalProperty would be the
- // appropriate test. We follow KJS in consulting the
- // prototype.
- var current = array[key];
- if (!IS_UNDEFINED(current) || key in array) {
- new_array[key - del_count + num_additional_args] = current;
- }
- }
- }
- }
- }
- // Move contents of new_array into this array
- %MoveArrayContents(new_array, array);
-}
-
-
-// This is part of the old simple-minded splice. We are using it either
-// because the receiver is not an array (so we have no choice) or because we
-// know we are not deleting or moving a lot of elements.
-function SimpleSlice(array, start_i, del_count, len, deleted_elements) {
- for (var i = 0; i < del_count; i++) {
- var index = start_i + i;
- // The spec could also be interpreted such that %HasLocalProperty
- // would be the appropriate test. We follow KJS in consulting the
- // prototype.
- var current = array[index];
- if (!IS_UNDEFINED(current) || index in array)
- deleted_elements[i] = current;
- }
-}
-
-
-function SimpleMove(array, start_i, del_count, len, num_additional_args) {
- if (num_additional_args !== del_count) {
- // Move the existing elements after the elements to be deleted
- // to the right position in the resulting array.
- if (num_additional_args > del_count) {
- for (var i = len - del_count; i > start_i; i--) {
- var from_index = i + del_count - 1;
- var to_index = i + num_additional_args - 1;
- // The spec could also be interpreted such that
- // %HasLocalProperty would be the appropriate test. We follow
- // KJS in consulting the prototype.
- var current = array[from_index];
- if (!IS_UNDEFINED(current) || from_index in array) {
- array[to_index] = current;
- } else {
- delete array[to_index];
- }
- }
- } else {
- for (var i = start_i; i < len - del_count; i++) {
- var from_index = i + del_count;
- var to_index = i + num_additional_args;
- // The spec could also be interpreted such that
- // %HasLocalProperty would be the appropriate test. We follow
- // KJS in consulting the prototype.
- var current = array[from_index];
- if (!IS_UNDEFINED(current) || from_index in array) {
- array[to_index] = current;
- } else {
- delete array[to_index];
- }
- }
- for (var i = len; i > len - del_count + num_additional_args; i--) {
- delete array[i - 1];
- }
- }
- }
-}
-
-
-// -------------------------------------------------------------------
-
-
-function ArrayToString() {
- if (!IS_ARRAY(this)) {
- throw new $TypeError('Array.prototype.toString is not generic');
- }
- return Join(this, this.length, ',', ConvertToString);
-}
-
-
-function ArrayToLocaleString() {
- if (!IS_ARRAY(this)) {
- throw new $TypeError('Array.prototype.toString is not generic');
- }
- return Join(this, this.length, ',', ConvertToLocaleString);
-}
-
-
-function ArrayJoin(separator) {
- if (IS_UNDEFINED(separator)) {
- separator = ',';
- } else if (!IS_STRING(separator)) {
- separator = NonStringToString(separator);
- }
-
- var result = %_FastAsciiArrayJoin(this, separator);
- if (!IS_UNDEFINED(result)) return result;
-
- return Join(this, TO_UINT32(this.length), separator, ConvertToString);
-}
-
-
-// Removes the last element from the array and returns it. See
-// ECMA-262, section 15.4.4.6.
-function ArrayPop() {
- var n = TO_UINT32(this.length);
- if (n == 0) {
- this.length = n;
- return;
- }
- n--;
- var value = this[n];
- this.length = n;
- delete this[n];
- return value;
-}
-
-
-// Appends the arguments to the end of the array and returns the new
-// length of the array. See ECMA-262, section 15.4.4.7.
-function ArrayPush() {
- var n = TO_UINT32(this.length);
- var m = %_ArgumentsLength();
- for (var i = 0; i < m; i++) {
- this[i+n] = %_Arguments(i);
- }
- this.length = n + m;
- return this.length;
-}
-
-
-function ArrayConcat(arg1) { // length == 1
- var arg_count = %_ArgumentsLength();
- var arrays = new InternalArray(1 + arg_count);
- arrays[0] = this;
- for (var i = 0; i < arg_count; i++) {
- arrays[i + 1] = %_Arguments(i);
- }
-
- return %ArrayConcat(arrays);
-}
-
-
-// For implementing reverse() on large, sparse arrays.
-function SparseReverse(array, len) {
- var keys = GetSortedArrayKeys(array, %GetArrayKeys(array, len));
- var high_counter = keys.length - 1;
- var low_counter = 0;
- while (low_counter <= high_counter) {
- var i = keys[low_counter];
- var j = keys[high_counter];
-
- var j_complement = len - j - 1;
- var low, high;
-
- if (j_complement <= i) {
- high = j;
- while (keys[--high_counter] == j);
- low = j_complement;
- }
- if (j_complement >= i) {
- low = i;
- while (keys[++low_counter] == i);
- high = len - i - 1;
- }
-
- var current_i = array[low];
- if (!IS_UNDEFINED(current_i) || low in array) {
- var current_j = array[high];
- if (!IS_UNDEFINED(current_j) || high in array) {
- array[low] = current_j;
- array[high] = current_i;
- } else {
- array[high] = current_i;
- delete array[low];
- }
- } else {
- var current_j = array[high];
- if (!IS_UNDEFINED(current_j) || high in array) {
- array[low] = current_j;
- delete array[high];
- }
- }
- }
-}
-
-
-function ArrayReverse() {
- var j = TO_UINT32(this.length) - 1;
-
- if (UseSparseVariant(this, j, IS_ARRAY(this))) {
- SparseReverse(this, j+1);
- return this;
- }
-
- for (var i = 0; i < j; i++, j--) {
- var current_i = this[i];
- if (!IS_UNDEFINED(current_i) || i in this) {
- var current_j = this[j];
- if (!IS_UNDEFINED(current_j) || j in this) {
- this[i] = current_j;
- this[j] = current_i;
- } else {
- this[j] = current_i;
- delete this[i];
- }
- } else {
- var current_j = this[j];
- if (!IS_UNDEFINED(current_j) || j in this) {
- this[i] = current_j;
- delete this[j];
- }
- }
- }
- return this;
-}
-
-
-function ArrayShift() {
- var len = TO_UINT32(this.length);
-
- if (len === 0) {
- this.length = 0;
- return;
- }
-
- var first = this[0];
-
- if (IS_ARRAY(this))
- SmartMove(this, 0, 1, len, 0);
- else
- SimpleMove(this, 0, 1, len, 0);
-
- this.length = len - 1;
-
- return first;
-}
-
-
-function ArrayUnshift(arg1) { // length == 1
- var len = TO_UINT32(this.length);
- var num_arguments = %_ArgumentsLength();
-
- if (IS_ARRAY(this))
- SmartMove(this, 0, 0, len, num_arguments);
- else
- SimpleMove(this, 0, 0, len, num_arguments);
-
- for (var i = 0; i < num_arguments; i++) {
- this[i] = %_Arguments(i);
- }
-
- this.length = len + num_arguments;
-
- return len + num_arguments;
-}
-
-
-function ArraySlice(start, end) {
- var len = TO_UINT32(this.length);
- var start_i = TO_INTEGER(start);
- var end_i = len;
-
- if (end !== void 0) end_i = TO_INTEGER(end);
-
- if (start_i < 0) {
- start_i += len;
- if (start_i < 0) start_i = 0;
- } else {
- if (start_i > len) start_i = len;
- }
-
- if (end_i < 0) {
- end_i += len;
- if (end_i < 0) end_i = 0;
- } else {
- if (end_i > len) end_i = len;
- }
-
- var result = [];
-
- if (end_i < start_i) return result;
-
- if (IS_ARRAY(this)) {
- SmartSlice(this, start_i, end_i - start_i, len, result);
- } else {
- SimpleSlice(this, start_i, end_i - start_i, len, result);
- }
-
- result.length = end_i - start_i;
-
- return result;
-}
-
-
-function ArraySplice(start, delete_count) {
- var num_arguments = %_ArgumentsLength();
-
- var len = TO_UINT32(this.length);
- var start_i = TO_INTEGER(start);
-
- if (start_i < 0) {
- start_i += len;
- if (start_i < 0) start_i = 0;
- } else {
- if (start_i > len) start_i = len;
- }
-
- // SpiderMonkey, TraceMonkey and JSC treat the case where no delete count is
- // given as a request to delete all the elements from the start.
- // And it differs from the case of undefined delete count.
- // This does not follow ECMA-262, but we do the same for
- // compatibility.
- var del_count = 0;
- if (num_arguments == 1) {
- del_count = len - start_i;
- } else {
- del_count = TO_INTEGER(delete_count);
- if (del_count < 0) del_count = 0;
- if (del_count > len - start_i) del_count = len - start_i;
- }
-
- var deleted_elements = [];
- deleted_elements.length = del_count;
-
- // Number of elements to add.
- var num_additional_args = 0;
- if (num_arguments > 2) {
- num_additional_args = num_arguments - 2;
- }
-
- var use_simple_splice = true;
-
- if (IS_ARRAY(this) && num_additional_args !== del_count) {
- // If we are only deleting/moving a few things near the end of the
- // array then the simple version is going to be faster, because it
- // doesn't touch most of the array.
- var estimated_non_hole_elements = %EstimateNumberOfElements(this);
- if (len > 20 && (estimated_non_hole_elements >> 2) < (len - start_i)) {
- use_simple_splice = false;
- }
- }
-
- if (use_simple_splice) {
- SimpleSlice(this, start_i, del_count, len, deleted_elements);
- SimpleMove(this, start_i, del_count, len, num_additional_args);
- } else {
- SmartSlice(this, start_i, del_count, len, deleted_elements);
- SmartMove(this, start_i, del_count, len, num_additional_args);
- }
-
- // Insert the arguments into the resulting array in
- // place of the deleted elements.
- var i = start_i;
- var arguments_index = 2;
- var arguments_length = %_ArgumentsLength();
- while (arguments_index < arguments_length) {
- this[i++] = %_Arguments(arguments_index++);
- }
- this.length = len - del_count + num_additional_args;
-
- // Return the deleted elements.
- return deleted_elements;
-}
-
-
-function ArraySort(comparefn) {
- // In-place QuickSort algorithm.
- // For short (length <= 22) arrays, insertion sort is used for efficiency.
-
- if (!IS_FUNCTION(comparefn)) {
- comparefn = function (x, y) {
- if (x === y) return 0;
- if (%_IsSmi(x) && %_IsSmi(y)) {
- return %SmiLexicographicCompare(x, y);
- }
- x = ToString(x);
- y = ToString(y);
- if (x == y) return 0;
- else return x < y ? -1 : 1;
- };
- }
- var global_receiver = %GetGlobalReceiver();
-
- function InsertionSort(a, from, to) {
- for (var i = from + 1; i < to; i++) {
- var element = a[i];
- for (var j = i - 1; j >= from; j--) {
- var tmp = a[j];
- var order = %_CallFunction(global_receiver, tmp, element, comparefn);
- if (order > 0) {
- a[j + 1] = tmp;
- } else {
- break;
- }
- }
- a[j + 1] = element;
- }
- }
-
- function QuickSort(a, from, to) {
- // Insertion sort is faster for short arrays.
- if (to - from <= 10) {
- InsertionSort(a, from, to);
- return;
- }
- // Find a pivot as the median of first, last and middle element.
- var v0 = a[from];
- var v1 = a[to - 1];
- var middle_index = from + ((to - from) >> 1);
- var v2 = a[middle_index];
- var c01 = %_CallFunction(global_receiver, v0, v1, comparefn);
- if (c01 > 0) {
- // v1 < v0, so swap them.
- var tmp = v0;
- v0 = v1;
- v1 = tmp;
- } // v0 <= v1.
- var c02 = %_CallFunction(global_receiver, v0, v2, comparefn);
- if (c02 >= 0) {
- // v2 <= v0 <= v1.
- var tmp = v0;
- v0 = v2;
- v2 = v1;
- v1 = tmp;
- } else {
- // v0 <= v1 && v0 < v2
- var c12 = %_CallFunction(global_receiver, v1, v2, comparefn);
- if (c12 > 0) {
- // v0 <= v2 < v1
- var tmp = v1;
- v1 = v2;
- v2 = tmp;
- }
- }
- // v0 <= v1 <= v2
- a[from] = v0;
- a[to - 1] = v2;
- var pivot = v1;
- var low_end = from + 1; // Upper bound of elements lower than pivot.
- var high_start = to - 1; // Lower bound of elements greater than pivot.
- a[middle_index] = a[low_end];
- a[low_end] = pivot;
-
- // From low_end to i are elements equal to pivot.
- // From i to high_start are elements that haven't been compared yet.
- partition: for (var i = low_end + 1; i < high_start; i++) {
- var element = a[i];
- var order = %_CallFunction(global_receiver, element, pivot, comparefn);
- if (order < 0) {
- %_SwapElements(a, i, low_end);
- low_end++;
- } else if (order > 0) {
- do {
- high_start--;
- if (high_start == i) break partition;
- var top_elem = a[high_start];
- order = %_CallFunction(global_receiver, top_elem, pivot, comparefn);
- } while (order > 0);
- %_SwapElements(a, i, high_start);
- if (order < 0) {
- %_SwapElements(a, i, low_end);
- low_end++;
- }
- }
- }
- QuickSort(a, from, low_end);
- QuickSort(a, high_start, to);
- }
-
- // Copy elements in the range 0..length from obj's prototype chain
- // to obj itself, if obj has holes. Return one more than the maximal index
- // of a prototype property.
- function CopyFromPrototype(obj, length) {
- var max = 0;
- for (var proto = obj.__proto__; proto; proto = proto.__proto__) {
- var indices = %GetArrayKeys(proto, length);
- if (indices.length > 0) {
- if (indices[0] == -1) {
- // It's an interval.
- var proto_length = indices[1];
- for (var i = 0; i < proto_length; i++) {
- if (!obj.hasOwnProperty(i) && proto.hasOwnProperty(i)) {
- obj[i] = proto[i];
- if (i >= max) { max = i + 1; }
- }
- }
- } else {
- for (var i = 0; i < indices.length; i++) {
- var index = indices[i];
- if (!IS_UNDEFINED(index) &&
- !obj.hasOwnProperty(index) && proto.hasOwnProperty(index)) {
- obj[index] = proto[index];
- if (index >= max) { max = index + 1; }
- }
- }
- }
- }
- }
- return max;
- }
-
- // Set a value of "undefined" on all indices in the range from..to
- // where a prototype of obj has an element. I.e., shadow all prototype
- // elements in that range.
- function ShadowPrototypeElements(obj, from, to) {
- for (var proto = obj.__proto__; proto; proto = proto.__proto__) {
- var indices = %GetArrayKeys(proto, to);
- if (indices.length > 0) {
- if (indices[0] == -1) {
- // It's an interval.
- var proto_length = indices[1];
- for (var i = from; i < proto_length; i++) {
- if (proto.hasOwnProperty(i)) {
- obj[i] = void 0;
- }
- }
- } else {
- for (var i = 0; i < indices.length; i++) {
- var index = indices[i];
- if (!IS_UNDEFINED(index) && from <= index &&
- proto.hasOwnProperty(index)) {
- obj[index] = void 0;
- }
- }
- }
- }
- }
- }
-
- function SafeRemoveArrayHoles(obj) {
- // Copy defined elements from the end to fill in all holes and undefineds
- // in the beginning of the array. Write undefineds and holes at the end
- // after loop is finished.
- var first_undefined = 0;
- var last_defined = length - 1;
- var num_holes = 0;
- while (first_undefined < last_defined) {
- // Find first undefined element.
- while (first_undefined < last_defined &&
- !IS_UNDEFINED(obj[first_undefined])) {
- first_undefined++;
- }
- // Maintain the invariant num_holes = the number of holes in the original
- // array with indices <= first_undefined or > last_defined.
- if (!obj.hasOwnProperty(first_undefined)) {
- num_holes++;
- }
-
- // Find last defined element.
- while (first_undefined < last_defined &&
- IS_UNDEFINED(obj[last_defined])) {
- if (!obj.hasOwnProperty(last_defined)) {
- num_holes++;
- }
- last_defined--;
- }
- if (first_undefined < last_defined) {
- // Fill in hole or undefined.
- obj[first_undefined] = obj[last_defined];
- obj[last_defined] = void 0;
- }
- }
- // If there were any undefineds in the entire array, first_undefined
- // points to one past the last defined element. Make this true if
- // there were no undefineds, as well, so that first_undefined == number
- // of defined elements.
- if (!IS_UNDEFINED(obj[first_undefined])) first_undefined++;
- // Fill in the undefineds and the holes. There may be a hole where
- // an undefined should be and vice versa.
- var i;
- for (i = first_undefined; i < length - num_holes; i++) {
- obj[i] = void 0;
- }
- for (i = length - num_holes; i < length; i++) {
- // For compatability with Webkit, do not expose elements in the prototype.
- if (i in obj.__proto__) {
- obj[i] = void 0;
- } else {
- delete obj[i];
- }
- }
-
- // Return the number of defined elements.
- return first_undefined;
- }
-
- var length = TO_UINT32(this.length);
- if (length < 2) return this;
-
- var is_array = IS_ARRAY(this);
- var max_prototype_element;
- if (!is_array) {
- // For compatibility with JSC, we also sort elements inherited from
- // the prototype chain on non-Array objects.
- // We do this by copying them to this object and sorting only
- // local elements. This is not very efficient, but sorting with
- // inherited elements happens very, very rarely, if at all.
- // The specification allows "implementation dependent" behavior
- // if an element on the prototype chain has an element that
- // might interact with sorting.
- max_prototype_element = CopyFromPrototype(this, length);
- }
-
- var num_non_undefined = %RemoveArrayHoles(this, length);
- if (num_non_undefined == -1) {
- // There were indexed accessors in the array. Move array holes and
- // undefineds to the end using a Javascript function that is safe
- // in the presence of accessors.
- num_non_undefined = SafeRemoveArrayHoles(this);
- }
-
- QuickSort(this, 0, num_non_undefined);
-
- if (!is_array && (num_non_undefined + 1 < max_prototype_element)) {
- // For compatibility with JSC, we shadow any elements in the prototype
- // chain that has become exposed by sort moving a hole to its position.
- ShadowPrototypeElements(this, num_non_undefined, max_prototype_element);
- }
-
- return this;
-}
-
-
-// The following functions cannot be made efficient on sparse arrays while
-// preserving the semantics, since the calls to the receiver function can add
-// or delete elements from the array.
-function ArrayFilter(f, receiver) {
- if (!IS_FUNCTION(f)) {
- throw MakeTypeError('called_non_callable', [ f ]);
- }
- // Pull out the length so that modifications to the length in the
- // loop will not affect the looping.
- var length = this.length;
- var result = [];
- var result_length = 0;
- for (var i = 0; i < length; i++) {
- var current = this[i];
- if (!IS_UNDEFINED(current) || i in this) {
- if (f.call(receiver, current, i, this)) {
- result[result_length++] = current;
- }
- }
- }
- return result;
-}
-
-
-function ArrayForEach(f, receiver) {
- if (!IS_FUNCTION(f)) {
- throw MakeTypeError('called_non_callable', [ f ]);
- }
- // Pull out the length so that modifications to the length in the
- // loop will not affect the looping.
- var length = TO_UINT32(this.length);
- for (var i = 0; i < length; i++) {
- var current = this[i];
- if (!IS_UNDEFINED(current) || i in this) {
- f.call(receiver, current, i, this);
- }
- }
-}
-
-
-// Executes the function once for each element present in the
-// array until it finds one where callback returns true.
-function ArraySome(f, receiver) {
- if (!IS_FUNCTION(f)) {
- throw MakeTypeError('called_non_callable', [ f ]);
- }
- // Pull out the length so that modifications to the length in the
- // loop will not affect the looping.
- var length = TO_UINT32(this.length);
- for (var i = 0; i < length; i++) {
- var current = this[i];
- if (!IS_UNDEFINED(current) || i in this) {
- if (f.call(receiver, current, i, this)) return true;
- }
- }
- return false;
-}
-
-
-function ArrayEvery(f, receiver) {
- if (!IS_FUNCTION(f)) {
- throw MakeTypeError('called_non_callable', [ f ]);
- }
- // Pull out the length so that modifications to the length in the
- // loop will not affect the looping.
- var length = TO_UINT32(this.length);
- for (var i = 0; i < length; i++) {
- var current = this[i];
- if (!IS_UNDEFINED(current) || i in this) {
- if (!f.call(receiver, current, i, this)) return false;
- }
- }
- return true;
-}
-
-function ArrayMap(f, receiver) {
- if (!IS_FUNCTION(f)) {
- throw MakeTypeError('called_non_callable', [ f ]);
- }
- // Pull out the length so that modifications to the length in the
- // loop will not affect the looping.
- var length = TO_UINT32(this.length);
- var result = new $Array();
- var accumulator = new InternalArray(length);
- for (var i = 0; i < length; i++) {
- var current = this[i];
- if (!IS_UNDEFINED(current) || i in this) {
- accumulator[i] = f.call(receiver, current, i, this);
- }
- }
- %MoveArrayContents(accumulator, result);
- return result;
-}
-
-
-function ArrayIndexOf(element, index) {
- var length = TO_UINT32(this.length);
- if (length == 0) return -1;
- if (IS_UNDEFINED(index)) {
- index = 0;
- } else {
- index = TO_INTEGER(index);
- // If index is negative, index from the end of the array.
- if (index < 0) {
- index = length + index;
- // If index is still negative, search the entire array.
- if (index < 0) index = 0;
- }
- }
- var min = index;
- var max = length;
- if (UseSparseVariant(this, length, IS_ARRAY(this))) {
- var intervals = %GetArrayKeys(this, length);
- if (intervals.length == 2 && intervals[0] < 0) {
- // A single interval.
- var intervalMin = -(intervals[0] + 1);
- var intervalMax = intervalMin + intervals[1];
- if (min < intervalMin) min = intervalMin;
- max = intervalMax; // Capped by length already.
- // Fall through to loop below.
- } else {
- if (intervals.length == 0) return -1;
- // Get all the keys in sorted order.
- var sortedKeys = GetSortedArrayKeys(this, intervals);
- var n = sortedKeys.length;
- var i = 0;
- while (i < n && sortedKeys[i] < index) i++;
- while (i < n) {
- var key = sortedKeys[i];
- if (!IS_UNDEFINED(key) && this[key] === element) return key;
- i++;
- }
- return -1;
- }
- }
- // Lookup through the array.
- if (!IS_UNDEFINED(element)) {
- for (var i = min; i < max; i++) {
- if (this[i] === element) return i;
- }
- return -1;
- }
- // Lookup through the array.
- for (var i = min; i < max; i++) {
- if (IS_UNDEFINED(this[i]) && i in this) {
- return i;
- }
- }
- return -1;
-}
-
-
-function ArrayLastIndexOf(element, index) {
- var length = TO_UINT32(this.length);
- if (length == 0) return -1;
- if (%_ArgumentsLength() < 2) {
- index = length - 1;
- } else {
- index = TO_INTEGER(index);
- // If index is negative, index from end of the array.
- if (index < 0) index += length;
- // If index is still negative, do not search the array.
- if (index < 0) return -1;
- else if (index >= length) index = length - 1;
- }
- var min = 0;
- var max = index;
- if (UseSparseVariant(this, length, IS_ARRAY(this))) {
- var intervals = %GetArrayKeys(this, index + 1);
- if (intervals.length == 2 && intervals[0] < 0) {
- // A single interval.
- var intervalMin = -(intervals[0] + 1);
- var intervalMax = intervalMin + intervals[1];
- if (min < intervalMin) min = intervalMin;
- max = intervalMax; // Capped by index already.
- // Fall through to loop below.
- } else {
- if (intervals.length == 0) return -1;
- // Get all the keys in sorted order.
- var sortedKeys = GetSortedArrayKeys(this, intervals);
- var i = sortedKeys.length - 1;
- while (i >= 0) {
- var key = sortedKeys[i];
- if (!IS_UNDEFINED(key) && this[key] === element) return key;
- i--;
- }
- return -1;
- }
- }
- // Lookup through the array.
- if (!IS_UNDEFINED(element)) {
- for (var i = max; i >= min; i--) {
- if (this[i] === element) return i;
- }
- return -1;
- }
- for (var i = max; i >= min; i--) {
- if (IS_UNDEFINED(this[i]) && i in this) {
- return i;
- }
- }
- return -1;
-}
-
-
-function ArrayReduce(callback, current) {
- if (!IS_FUNCTION(callback)) {
- throw MakeTypeError('called_non_callable', [callback]);
- }
- // Pull out the length so that modifications to the length in the
- // loop will not affect the looping.
- var length = this.length;
- var i = 0;
-
- find_initial: if (%_ArgumentsLength() < 2) {
- for (; i < length; i++) {
- current = this[i];
- if (!IS_UNDEFINED(current) || i in this) {
- i++;
- break find_initial;
- }
- }
- throw MakeTypeError('reduce_no_initial', []);
- }
-
- for (; i < length; i++) {
- var element = this[i];
- if (!IS_UNDEFINED(element) || i in this) {
- current = callback.call(null, current, element, i, this);
- }
- }
- return current;
-}
-
-function ArrayReduceRight(callback, current) {
- if (!IS_FUNCTION(callback)) {
- throw MakeTypeError('called_non_callable', [callback]);
- }
- var i = this.length - 1;
-
- find_initial: if (%_ArgumentsLength() < 2) {
- for (; i >= 0; i--) {
- current = this[i];
- if (!IS_UNDEFINED(current) || i in this) {
- i--;
- break find_initial;
- }
- }
- throw MakeTypeError('reduce_no_initial', []);
- }
-
- for (; i >= 0; i--) {
- var element = this[i];
- if (!IS_UNDEFINED(element) || i in this) {
- current = callback.call(null, current, element, i, this);
- }
- }
- return current;
-}
-
-// ES5, 15.4.3.2
-function ArrayIsArray(obj) {
- return IS_ARRAY(obj);
-}
-
-
-// -------------------------------------------------------------------
-function SetupArray() {
- // Setup non-enumerable constructor property on the Array.prototype
- // object.
- %SetProperty($Array.prototype, "constructor", $Array, DONT_ENUM);
-
- // Setup non-enumerable functions on the Array object.
- InstallFunctions($Array, DONT_ENUM, $Array(
- "isArray", ArrayIsArray
- ));
-
- var specialFunctions = %SpecialArrayFunctions({});
-
- function getFunction(name, jsBuiltin, len) {
- var f = jsBuiltin;
- if (specialFunctions.hasOwnProperty(name)) {
- f = specialFunctions[name];
- }
- if (!IS_UNDEFINED(len)) {
- %FunctionSetLength(f, len);
- }
- return f;
- }
-
- // Setup non-enumerable functions of the Array.prototype object and
- // set their names.
- // Manipulate the length of some of the functions to meet
- // expectations set by ECMA-262 or Mozilla.
- InstallFunctionsOnHiddenPrototype($Array.prototype, DONT_ENUM, $Array(
- "toString", getFunction("toString", ArrayToString),
- "toLocaleString", getFunction("toLocaleString", ArrayToLocaleString),
- "join", getFunction("join", ArrayJoin),
- "pop", getFunction("pop", ArrayPop),
- "push", getFunction("push", ArrayPush, 1),
- "concat", getFunction("concat", ArrayConcat, 1),
- "reverse", getFunction("reverse", ArrayReverse),
- "shift", getFunction("shift", ArrayShift),
- "unshift", getFunction("unshift", ArrayUnshift, 1),
- "slice", getFunction("slice", ArraySlice, 2),
- "splice", getFunction("splice", ArraySplice, 2),
- "sort", getFunction("sort", ArraySort),
- "filter", getFunction("filter", ArrayFilter, 1),
- "forEach", getFunction("forEach", ArrayForEach, 1),
- "some", getFunction("some", ArraySome, 1),
- "every", getFunction("every", ArrayEvery, 1),
- "map", getFunction("map", ArrayMap, 1),
- "indexOf", getFunction("indexOf", ArrayIndexOf, 1),
- "lastIndexOf", getFunction("lastIndexOf", ArrayLastIndexOf, 1),
- "reduce", getFunction("reduce", ArrayReduce, 1),
- "reduceRight", getFunction("reduceRight", ArrayReduceRight, 1)
- ));
-
- %FinishArrayPrototypeSetup($Array.prototype);
-
- // The internal Array prototype doesn't need to be fancy, since it's never
- // exposed to user code, so no hidden prototypes or DONT_ENUM attributes
- // are necessary.
- // The null __proto__ ensures that we never inherit any user created
- // getters or setters from, e.g., Object.prototype.
- InternalArray.prototype.__proto__ = null;
- // Adding only the functions that are actually used, and a toString.
- InternalArray.prototype.join = getFunction("join", ArrayJoin);
- InternalArray.prototype.pop = getFunction("pop", ArrayPop);
- InternalArray.prototype.push = getFunction("push", ArrayPush);
- InternalArray.prototype.toString = function() {
- return "Internal Array, length " + this.length;
- };
-}
-
-
-SetupArray();
diff --git a/src/3rdparty/v8/src/assembler.cc b/src/3rdparty/v8/src/assembler.cc
deleted file mode 100644
index ff48772..0000000
--- a/src/3rdparty/v8/src/assembler.cc
+++ /dev/null
@@ -1,1067 +0,0 @@
-// Copyright (c) 1994-2006 Sun Microsystems Inc.
-// All Rights Reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// - Redistributions of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-//
-// - Redistribution in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// - Neither the name of Sun Microsystems or the names of contributors may
-// be used to endorse or promote products derived from this software without
-// specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
-// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
-// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// The original source code covered by the above license above has been
-// modified significantly by Google Inc.
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
-
-#include "v8.h"
-
-#include "arguments.h"
-#include "deoptimizer.h"
-#include "execution.h"
-#include "ic-inl.h"
-#include "factory.h"
-#include "runtime.h"
-#include "runtime-profiler.h"
-#include "serialize.h"
-#include "stub-cache.h"
-#include "regexp-stack.h"
-#include "ast.h"
-#include "regexp-macro-assembler.h"
-#include "platform.h"
-// Include native regexp-macro-assembler.
-#ifndef V8_INTERPRETED_REGEXP
-#if V8_TARGET_ARCH_IA32
-#include "ia32/regexp-macro-assembler-ia32.h"
-#elif V8_TARGET_ARCH_X64
-#include "x64/regexp-macro-assembler-x64.h"
-#elif V8_TARGET_ARCH_ARM
-#include "arm/regexp-macro-assembler-arm.h"
-#elif V8_TARGET_ARCH_MIPS
-#include "mips/regexp-macro-assembler-mips.h"
-#else // Unknown architecture.
-#error "Unknown architecture."
-#endif // Target architecture.
-#endif // V8_INTERPRETED_REGEXP
-
-namespace v8 {
-namespace internal {
-
-
-const double DoubleConstant::min_int = kMinInt;
-const double DoubleConstant::one_half = 0.5;
-const double DoubleConstant::minus_zero = -0.0;
-const double DoubleConstant::nan = OS::nan_value();
-const double DoubleConstant::negative_infinity = -V8_INFINITY;
-const char* RelocInfo::kFillerCommentString = "DEOPTIMIZATION PADDING";
-
-// -----------------------------------------------------------------------------
-// Implementation of Label
-
-int Label::pos() const {
- if (pos_ < 0) return -pos_ - 1;
- if (pos_ > 0) return pos_ - 1;
- UNREACHABLE();
- return 0;
-}
-
-
-// -----------------------------------------------------------------------------
-// Implementation of RelocInfoWriter and RelocIterator
-//
-// Encoding
-//
-// The most common modes are given single-byte encodings. Also, it is
-// easy to identify the type of reloc info and skip unwanted modes in
-// an iteration.
-//
-// The encoding relies on the fact that there are less than 14
-// different relocation modes.
-//
-// embedded_object: [6 bits pc delta] 00
-//
-// code_taget: [6 bits pc delta] 01
-//
-// position: [6 bits pc delta] 10,
-// [7 bits signed data delta] 0
-//
-// statement_position: [6 bits pc delta] 10,
-// [7 bits signed data delta] 1
-//
-// any nondata mode: 00 [4 bits rmode] 11, // rmode: 0..13 only
-// 00 [6 bits pc delta]
-//
-// pc-jump: 00 1111 11,
-// 00 [6 bits pc delta]
-//
-// pc-jump: 01 1111 11,
-// (variable length) 7 - 26 bit pc delta, written in chunks of 7
-// bits, the lowest 7 bits written first.
-//
-// data-jump + pos: 00 1110 11,
-// signed intptr_t, lowest byte written first
-//
-// data-jump + st.pos: 01 1110 11,
-// signed intptr_t, lowest byte written first
-//
-// data-jump + comm.: 10 1110 11,
-// signed intptr_t, lowest byte written first
-//
-const int kMaxRelocModes = 14;
-
-const int kTagBits = 2;
-const int kTagMask = (1 << kTagBits) - 1;
-const int kExtraTagBits = 4;
-const int kPositionTypeTagBits = 1;
-const int kSmallDataBits = kBitsPerByte - kPositionTypeTagBits;
-
-const int kEmbeddedObjectTag = 0;
-const int kCodeTargetTag = 1;
-const int kPositionTag = 2;
-const int kDefaultTag = 3;
-
-const int kPCJumpTag = (1 << kExtraTagBits) - 1;
-
-const int kSmallPCDeltaBits = kBitsPerByte - kTagBits;
-const int kSmallPCDeltaMask = (1 << kSmallPCDeltaBits) - 1;
-const int RelocInfo::kMaxSmallPCDelta = kSmallPCDeltaMask;
-
-const int kVariableLengthPCJumpTopTag = 1;
-const int kChunkBits = 7;
-const int kChunkMask = (1 << kChunkBits) - 1;
-const int kLastChunkTagBits = 1;
-const int kLastChunkTagMask = 1;
-const int kLastChunkTag = 1;
-
-
-const int kDataJumpTag = kPCJumpTag - 1;
-
-const int kNonstatementPositionTag = 0;
-const int kStatementPositionTag = 1;
-const int kCommentTag = 2;
-
-
-uint32_t RelocInfoWriter::WriteVariableLengthPCJump(uint32_t pc_delta) {
- // Return if the pc_delta can fit in kSmallPCDeltaBits bits.
- // Otherwise write a variable length PC jump for the bits that do
- // not fit in the kSmallPCDeltaBits bits.
- if (is_uintn(pc_delta, kSmallPCDeltaBits)) return pc_delta;
- WriteExtraTag(kPCJumpTag, kVariableLengthPCJumpTopTag);
- uint32_t pc_jump = pc_delta >> kSmallPCDeltaBits;
- ASSERT(pc_jump > 0);
- // Write kChunkBits size chunks of the pc_jump.
- for (; pc_jump > 0; pc_jump = pc_jump >> kChunkBits) {
- byte b = pc_jump & kChunkMask;
- *--pos_ = b << kLastChunkTagBits;
- }
- // Tag the last chunk so it can be identified.
- *pos_ = *pos_ | kLastChunkTag;
- // Return the remaining kSmallPCDeltaBits of the pc_delta.
- return pc_delta & kSmallPCDeltaMask;
-}
-
-
-void RelocInfoWriter::WriteTaggedPC(uint32_t pc_delta, int tag) {
- // Write a byte of tagged pc-delta, possibly preceded by var. length pc-jump.
- pc_delta = WriteVariableLengthPCJump(pc_delta);
- *--pos_ = pc_delta << kTagBits | tag;
-}
-
-
-void RelocInfoWriter::WriteTaggedData(intptr_t data_delta, int tag) {
- *--pos_ = static_cast<byte>(data_delta << kPositionTypeTagBits | tag);
-}
-
-
-void RelocInfoWriter::WriteExtraTag(int extra_tag, int top_tag) {
- *--pos_ = static_cast<int>(top_tag << (kTagBits + kExtraTagBits) |
- extra_tag << kTagBits |
- kDefaultTag);
-}
-
-
-void RelocInfoWriter::WriteExtraTaggedPC(uint32_t pc_delta, int extra_tag) {
- // Write two-byte tagged pc-delta, possibly preceded by var. length pc-jump.
- pc_delta = WriteVariableLengthPCJump(pc_delta);
- WriteExtraTag(extra_tag, 0);
- *--pos_ = pc_delta;
-}
-
-
-void RelocInfoWriter::WriteExtraTaggedData(intptr_t data_delta, int top_tag) {
- WriteExtraTag(kDataJumpTag, top_tag);
- for (int i = 0; i < kIntptrSize; i++) {
- *--pos_ = static_cast<byte>(data_delta);
- // Signed right shift is arithmetic shift. Tested in test-utils.cc.
- data_delta = data_delta >> kBitsPerByte;
- }
-}
-
-
-void RelocInfoWriter::Write(const RelocInfo* rinfo) {
-#ifdef DEBUG
- byte* begin_pos = pos_;
-#endif
- ASSERT(rinfo->pc() - last_pc_ >= 0);
- ASSERT(RelocInfo::NUMBER_OF_MODES <= kMaxRelocModes);
- // Use unsigned delta-encoding for pc.
- uint32_t pc_delta = static_cast<uint32_t>(rinfo->pc() - last_pc_);
- RelocInfo::Mode rmode = rinfo->rmode();
-
- // The two most common modes are given small tags, and usually fit in a byte.
- if (rmode == RelocInfo::EMBEDDED_OBJECT) {
- WriteTaggedPC(pc_delta, kEmbeddedObjectTag);
- } else if (rmode == RelocInfo::CODE_TARGET) {
- WriteTaggedPC(pc_delta, kCodeTargetTag);
- ASSERT(begin_pos - pos_ <= RelocInfo::kMaxCallSize);
- } else if (RelocInfo::IsPosition(rmode)) {
- // Use signed delta-encoding for data.
- intptr_t data_delta = rinfo->data() - last_data_;
- int pos_type_tag = rmode == RelocInfo::POSITION ? kNonstatementPositionTag
- : kStatementPositionTag;
- // Check if data is small enough to fit in a tagged byte.
- // We cannot use is_intn because data_delta is not an int32_t.
- if (data_delta >= -(1 << (kSmallDataBits-1)) &&
- data_delta < 1 << (kSmallDataBits-1)) {
- WriteTaggedPC(pc_delta, kPositionTag);
- WriteTaggedData(data_delta, pos_type_tag);
- last_data_ = rinfo->data();
- } else {
- // Otherwise, use costly encoding.
- WriteExtraTaggedPC(pc_delta, kPCJumpTag);
- WriteExtraTaggedData(data_delta, pos_type_tag);
- last_data_ = rinfo->data();
- }
- } else if (RelocInfo::IsComment(rmode)) {
- // Comments are normally not generated, so we use the costly encoding.
- WriteExtraTaggedPC(pc_delta, kPCJumpTag);
- WriteExtraTaggedData(rinfo->data() - last_data_, kCommentTag);
- last_data_ = rinfo->data();
- ASSERT(begin_pos - pos_ >= RelocInfo::kMinRelocCommentSize);
- } else {
- // For all other modes we simply use the mode as the extra tag.
- // None of these modes need a data component.
- ASSERT(rmode < kPCJumpTag && rmode < kDataJumpTag);
- WriteExtraTaggedPC(pc_delta, rmode);
- }
- last_pc_ = rinfo->pc();
-#ifdef DEBUG
- ASSERT(begin_pos - pos_ <= kMaxSize);
-#endif
-}
-
-
-inline int RelocIterator::AdvanceGetTag() {
- return *--pos_ & kTagMask;
-}
-
-
-inline int RelocIterator::GetExtraTag() {
- return (*pos_ >> kTagBits) & ((1 << kExtraTagBits) - 1);
-}
-
-
-inline int RelocIterator::GetTopTag() {
- return *pos_ >> (kTagBits + kExtraTagBits);
-}
-
-
-inline void RelocIterator::ReadTaggedPC() {
- rinfo_.pc_ += *pos_ >> kTagBits;
-}
-
-
-inline void RelocIterator::AdvanceReadPC() {
- rinfo_.pc_ += *--pos_;
-}
-
-
-void RelocIterator::AdvanceReadData() {
- intptr_t x = 0;
- for (int i = 0; i < kIntptrSize; i++) {
- x |= static_cast<intptr_t>(*--pos_) << i * kBitsPerByte;
- }
- rinfo_.data_ += x;
-}
-
-
-void RelocIterator::AdvanceReadVariableLengthPCJump() {
- // Read the 32-kSmallPCDeltaBits most significant bits of the
- // pc jump in kChunkBits bit chunks and shift them into place.
- // Stop when the last chunk is encountered.
- uint32_t pc_jump = 0;
- for (int i = 0; i < kIntSize; i++) {
- byte pc_jump_part = *--pos_;
- pc_jump |= (pc_jump_part >> kLastChunkTagBits) << i * kChunkBits;
- if ((pc_jump_part & kLastChunkTagMask) == 1) break;
- }
- // The least significant kSmallPCDeltaBits bits will be added
- // later.
- rinfo_.pc_ += pc_jump << kSmallPCDeltaBits;
-}
-
-
-inline int RelocIterator::GetPositionTypeTag() {
- return *pos_ & ((1 << kPositionTypeTagBits) - 1);
-}
-
-
-inline void RelocIterator::ReadTaggedData() {
- int8_t signed_b = *pos_;
- // Signed right shift is arithmetic shift. Tested in test-utils.cc.
- rinfo_.data_ += signed_b >> kPositionTypeTagBits;
-}
-
-
-inline RelocInfo::Mode RelocIterator::DebugInfoModeFromTag(int tag) {
- if (tag == kStatementPositionTag) {
- return RelocInfo::STATEMENT_POSITION;
- } else if (tag == kNonstatementPositionTag) {
- return RelocInfo::POSITION;
- } else {
- ASSERT(tag == kCommentTag);
- return RelocInfo::COMMENT;
- }
-}
-
-
-void RelocIterator::next() {
- ASSERT(!done());
- // Basically, do the opposite of RelocInfoWriter::Write.
- // Reading of data is as far as possible avoided for unwanted modes,
- // but we must always update the pc.
- //
- // We exit this loop by returning when we find a mode we want.
- while (pos_ > end_) {
- int tag = AdvanceGetTag();
- if (tag == kEmbeddedObjectTag) {
- ReadTaggedPC();
- if (SetMode(RelocInfo::EMBEDDED_OBJECT)) return;
- } else if (tag == kCodeTargetTag) {
- ReadTaggedPC();
- if (SetMode(RelocInfo::CODE_TARGET)) return;
- } else if (tag == kPositionTag) {
- ReadTaggedPC();
- Advance();
- // Check if we want source positions.
- if (mode_mask_ & RelocInfo::kPositionMask) {
- ReadTaggedData();
- if (SetMode(DebugInfoModeFromTag(GetPositionTypeTag()))) return;
- }
- } else {
- ASSERT(tag == kDefaultTag);
- int extra_tag = GetExtraTag();
- if (extra_tag == kPCJumpTag) {
- int top_tag = GetTopTag();
- if (top_tag == kVariableLengthPCJumpTopTag) {
- AdvanceReadVariableLengthPCJump();
- } else {
- AdvanceReadPC();
- }
- } else if (extra_tag == kDataJumpTag) {
- // Check if we want debug modes (the only ones with data).
- if (mode_mask_ & RelocInfo::kDebugMask) {
- int top_tag = GetTopTag();
- AdvanceReadData();
- if (SetMode(DebugInfoModeFromTag(top_tag))) return;
- } else {
- // Otherwise, just skip over the data.
- Advance(kIntptrSize);
- }
- } else {
- AdvanceReadPC();
- if (SetMode(static_cast<RelocInfo::Mode>(extra_tag))) return;
- }
- }
- }
- done_ = true;
-}
-
-
-RelocIterator::RelocIterator(Code* code, int mode_mask) {
- rinfo_.pc_ = code->instruction_start();
- rinfo_.data_ = 0;
- // Relocation info is read backwards.
- pos_ = code->relocation_start() + code->relocation_size();
- end_ = code->relocation_start();
- done_ = false;
- mode_mask_ = mode_mask;
- if (mode_mask_ == 0) pos_ = end_;
- next();
-}
-
-
-RelocIterator::RelocIterator(const CodeDesc& desc, int mode_mask) {
- rinfo_.pc_ = desc.buffer;
- rinfo_.data_ = 0;
- // Relocation info is read backwards.
- pos_ = desc.buffer + desc.buffer_size;
- end_ = pos_ - desc.reloc_size;
- done_ = false;
- mode_mask_ = mode_mask;
- if (mode_mask_ == 0) pos_ = end_;
- next();
-}
-
-
-// -----------------------------------------------------------------------------
-// Implementation of RelocInfo
-
-
-#ifdef ENABLE_DISASSEMBLER
-const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
- switch (rmode) {
- case RelocInfo::NONE:
- return "no reloc";
- case RelocInfo::EMBEDDED_OBJECT:
- return "embedded object";
- case RelocInfo::CONSTRUCT_CALL:
- return "code target (js construct call)";
- case RelocInfo::CODE_TARGET_CONTEXT:
- return "code target (context)";
- case RelocInfo::DEBUG_BREAK:
-#ifndef ENABLE_DEBUGGER_SUPPORT
- UNREACHABLE();
-#endif
- return "debug break";
- case RelocInfo::CODE_TARGET:
- return "code target";
- case RelocInfo::GLOBAL_PROPERTY_CELL:
- return "global property cell";
- case RelocInfo::RUNTIME_ENTRY:
- return "runtime entry";
- case RelocInfo::JS_RETURN:
- return "js return";
- case RelocInfo::COMMENT:
- return "comment";
- case RelocInfo::POSITION:
- return "position";
- case RelocInfo::STATEMENT_POSITION:
- return "statement position";
- case RelocInfo::EXTERNAL_REFERENCE:
- return "external reference";
- case RelocInfo::INTERNAL_REFERENCE:
- return "internal reference";
- case RelocInfo::DEBUG_BREAK_SLOT:
-#ifndef ENABLE_DEBUGGER_SUPPORT
- UNREACHABLE();
-#endif
- return "debug break slot";
- case RelocInfo::NUMBER_OF_MODES:
- UNREACHABLE();
- return "number_of_modes";
- }
- return "unknown relocation type";
-}
-
-
-void RelocInfo::Print(FILE* out) {
- PrintF(out, "%p %s", pc_, RelocModeName(rmode_));
- if (IsComment(rmode_)) {
- PrintF(out, " (%s)", reinterpret_cast<char*>(data_));
- } else if (rmode_ == EMBEDDED_OBJECT) {
- PrintF(out, " (");
- target_object()->ShortPrint(out);
- PrintF(out, ")");
- } else if (rmode_ == EXTERNAL_REFERENCE) {
- ExternalReferenceEncoder ref_encoder;
- PrintF(out, " (%s) (%p)",
- ref_encoder.NameOfAddress(*target_reference_address()),
- *target_reference_address());
- } else if (IsCodeTarget(rmode_)) {
- Code* code = Code::GetCodeFromTargetAddress(target_address());
- PrintF(out, " (%s) (%p)", Code::Kind2String(code->kind()),
- target_address());
- } else if (IsPosition(rmode_)) {
- PrintF(out, " (%" V8_PTR_PREFIX "d)", data());
- } else if (rmode_ == RelocInfo::RUNTIME_ENTRY) {
- // Depotimization bailouts are stored as runtime entries.
- int id = Deoptimizer::GetDeoptimizationId(
- target_address(), Deoptimizer::EAGER);
- if (id != Deoptimizer::kNotDeoptimizationEntry) {
- PrintF(out, " (deoptimization bailout %d)", id);
- }
- }
-
- PrintF(out, "\n");
-}
-#endif // ENABLE_DISASSEMBLER
-
-
-#ifdef DEBUG
-void RelocInfo::Verify() {
- switch (rmode_) {
- case EMBEDDED_OBJECT:
- Object::VerifyPointer(target_object());
- break;
- case GLOBAL_PROPERTY_CELL:
- Object::VerifyPointer(target_cell());
- break;
- case DEBUG_BREAK:
-#ifndef ENABLE_DEBUGGER_SUPPORT
- UNREACHABLE();
- break;
-#endif
- case CONSTRUCT_CALL:
- case CODE_TARGET_CONTEXT:
- case CODE_TARGET: {
- // convert inline target address to code object
- Address addr = target_address();
- ASSERT(addr != NULL);
- // Check that we can find the right code object.
- Code* code = Code::GetCodeFromTargetAddress(addr);
- Object* found = HEAP->FindCodeObject(addr);
- ASSERT(found->IsCode());
- ASSERT(code->address() == HeapObject::cast(found)->address());
- break;
- }
- case RUNTIME_ENTRY:
- case JS_RETURN:
- case COMMENT:
- case POSITION:
- case STATEMENT_POSITION:
- case EXTERNAL_REFERENCE:
- case INTERNAL_REFERENCE:
- case DEBUG_BREAK_SLOT:
- case NONE:
- break;
- case NUMBER_OF_MODES:
- UNREACHABLE();
- break;
- }
-}
-#endif // DEBUG
-
-
-// -----------------------------------------------------------------------------
-// Implementation of ExternalReference
-
-ExternalReference::ExternalReference(Builtins::CFunctionId id, Isolate* isolate)
- : address_(Redirect(isolate, Builtins::c_function_address(id))) {}
-
-
-ExternalReference::ExternalReference(
- ApiFunction* fun,
- Type type = ExternalReference::BUILTIN_CALL,
- Isolate* isolate = NULL)
- : address_(Redirect(isolate, fun->address(), type)) {}
-
-
-ExternalReference::ExternalReference(Builtins::Name name, Isolate* isolate)
- : address_(isolate->builtins()->builtin_address(name)) {}
-
-
-ExternalReference::ExternalReference(Runtime::FunctionId id,
- Isolate* isolate)
- : address_(Redirect(isolate, Runtime::FunctionForId(id)->entry)) {}
-
-
-ExternalReference::ExternalReference(const Runtime::Function* f,
- Isolate* isolate)
- : address_(Redirect(isolate, f->entry)) {}
-
-
-ExternalReference ExternalReference::isolate_address() {
- return ExternalReference(Isolate::Current());
-}
-
-
-ExternalReference::ExternalReference(const IC_Utility& ic_utility,
- Isolate* isolate)
- : address_(Redirect(isolate, ic_utility.address())) {}
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-ExternalReference::ExternalReference(const Debug_Address& debug_address,
- Isolate* isolate)
- : address_(debug_address.address(isolate)) {}
-#endif
-
-ExternalReference::ExternalReference(StatsCounter* counter)
- : address_(reinterpret_cast<Address>(counter->GetInternalPointer())) {}
-
-
-ExternalReference::ExternalReference(Isolate::AddressId id, Isolate* isolate)
- : address_(isolate->get_address_from_id(id)) {}
-
-
-ExternalReference::ExternalReference(const SCTableReference& table_ref)
- : address_(table_ref.address()) {}
-
-
-ExternalReference ExternalReference::perform_gc_function(Isolate* isolate) {
- return ExternalReference(Redirect(isolate,
- FUNCTION_ADDR(Runtime::PerformGC)));
-}
-
-
-ExternalReference ExternalReference::fill_heap_number_with_random_function(
- Isolate* isolate) {
- return ExternalReference(Redirect(
- isolate,
- FUNCTION_ADDR(V8::FillHeapNumberWithRandom)));
-}
-
-
-ExternalReference ExternalReference::delete_handle_scope_extensions(
- Isolate* isolate) {
- return ExternalReference(Redirect(
- isolate,
- FUNCTION_ADDR(HandleScope::DeleteExtensions)));
-}
-
-
-ExternalReference ExternalReference::random_uint32_function(
- Isolate* isolate) {
- return ExternalReference(Redirect(isolate, FUNCTION_ADDR(V8::Random)));
-}
-
-
-ExternalReference ExternalReference::transcendental_cache_array_address(
- Isolate* isolate) {
- return ExternalReference(
- isolate->transcendental_cache()->cache_array_address());
-}
-
-
-ExternalReference ExternalReference::new_deoptimizer_function(
- Isolate* isolate) {
- return ExternalReference(
- Redirect(isolate, FUNCTION_ADDR(Deoptimizer::New)));
-}
-
-
-ExternalReference ExternalReference::compute_output_frames_function(
- Isolate* isolate) {
- return ExternalReference(
- Redirect(isolate, FUNCTION_ADDR(Deoptimizer::ComputeOutputFrames)));
-}
-
-
-ExternalReference ExternalReference::global_contexts_list(Isolate* isolate) {
- return ExternalReference(isolate->heap()->global_contexts_list_address());
-}
-
-
-ExternalReference ExternalReference::keyed_lookup_cache_keys(Isolate* isolate) {
- return ExternalReference(isolate->keyed_lookup_cache()->keys_address());
-}
-
-
-ExternalReference ExternalReference::keyed_lookup_cache_field_offsets(
- Isolate* isolate) {
- return ExternalReference(
- isolate->keyed_lookup_cache()->field_offsets_address());
-}
-
-
-ExternalReference ExternalReference::the_hole_value_location(Isolate* isolate) {
- return ExternalReference(isolate->factory()->the_hole_value().location());
-}
-
-
-ExternalReference ExternalReference::arguments_marker_location(
- Isolate* isolate) {
- return ExternalReference(isolate->factory()->arguments_marker().location());
-}
-
-
-ExternalReference ExternalReference::roots_address(Isolate* isolate) {
- return ExternalReference(isolate->heap()->roots_address());
-}
-
-
-ExternalReference ExternalReference::address_of_stack_limit(Isolate* isolate) {
- return ExternalReference(isolate->stack_guard()->address_of_jslimit());
-}
-
-
-ExternalReference ExternalReference::address_of_real_stack_limit(
- Isolate* isolate) {
- return ExternalReference(isolate->stack_guard()->address_of_real_jslimit());
-}
-
-
-ExternalReference ExternalReference::address_of_regexp_stack_limit(
- Isolate* isolate) {
- return ExternalReference(isolate->regexp_stack()->limit_address());
-}
-
-
-ExternalReference ExternalReference::new_space_start(Isolate* isolate) {
- return ExternalReference(isolate->heap()->NewSpaceStart());
-}
-
-
-ExternalReference ExternalReference::new_space_mask(Isolate* isolate) {
- Address mask = reinterpret_cast<Address>(isolate->heap()->NewSpaceMask());
- return ExternalReference(mask);
-}
-
-
-ExternalReference ExternalReference::new_space_allocation_top_address(
- Isolate* isolate) {
- return ExternalReference(isolate->heap()->NewSpaceAllocationTopAddress());
-}
-
-
-ExternalReference ExternalReference::heap_always_allocate_scope_depth(
- Isolate* isolate) {
- Heap* heap = isolate->heap();
- return ExternalReference(heap->always_allocate_scope_depth_address());
-}
-
-
-ExternalReference ExternalReference::new_space_allocation_limit_address(
- Isolate* isolate) {
- return ExternalReference(isolate->heap()->NewSpaceAllocationLimitAddress());
-}
-
-
-ExternalReference ExternalReference::handle_scope_level_address() {
- return ExternalReference(HandleScope::current_level_address());
-}
-
-
-ExternalReference ExternalReference::handle_scope_next_address() {
- return ExternalReference(HandleScope::current_next_address());
-}
-
-
-ExternalReference ExternalReference::handle_scope_limit_address() {
- return ExternalReference(HandleScope::current_limit_address());
-}
-
-
-ExternalReference ExternalReference::scheduled_exception_address(
- Isolate* isolate) {
- return ExternalReference(isolate->scheduled_exception_address());
-}
-
-
-ExternalReference ExternalReference::address_of_min_int() {
- return ExternalReference(reinterpret_cast<void*>(
- const_cast<double*>(&DoubleConstant::min_int)));
-}
-
-
-ExternalReference ExternalReference::address_of_one_half() {
- return ExternalReference(reinterpret_cast<void*>(
- const_cast<double*>(&DoubleConstant::one_half)));
-}
-
-
-ExternalReference ExternalReference::address_of_minus_zero() {
- return ExternalReference(reinterpret_cast<void*>(
- const_cast<double*>(&DoubleConstant::minus_zero)));
-}
-
-
-ExternalReference ExternalReference::address_of_negative_infinity() {
- return ExternalReference(reinterpret_cast<void*>(
- const_cast<double*>(&DoubleConstant::negative_infinity)));
-}
-
-
-ExternalReference ExternalReference::address_of_nan() {
- return ExternalReference(reinterpret_cast<void*>(
- const_cast<double*>(&DoubleConstant::nan)));
-}
-
-
-#ifndef V8_INTERPRETED_REGEXP
-
-ExternalReference ExternalReference::re_check_stack_guard_state(
- Isolate* isolate) {
- Address function;
-#ifdef V8_TARGET_ARCH_X64
- function = FUNCTION_ADDR(RegExpMacroAssemblerX64::CheckStackGuardState);
-#elif V8_TARGET_ARCH_IA32
- function = FUNCTION_ADDR(RegExpMacroAssemblerIA32::CheckStackGuardState);
-#elif V8_TARGET_ARCH_ARM
- function = FUNCTION_ADDR(RegExpMacroAssemblerARM::CheckStackGuardState);
-#elif V8_TARGET_ARCH_MIPS
- function = FUNCTION_ADDR(RegExpMacroAssemblerMIPS::CheckStackGuardState);
-#else
- UNREACHABLE();
-#endif
- return ExternalReference(Redirect(isolate, function));
-}
-
-ExternalReference ExternalReference::re_grow_stack(Isolate* isolate) {
- return ExternalReference(
- Redirect(isolate, FUNCTION_ADDR(NativeRegExpMacroAssembler::GrowStack)));
-}
-
-ExternalReference ExternalReference::re_case_insensitive_compare_uc16(
- Isolate* isolate) {
- return ExternalReference(Redirect(
- isolate,
- FUNCTION_ADDR(NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16)));
-}
-
-ExternalReference ExternalReference::re_word_character_map() {
- return ExternalReference(
- NativeRegExpMacroAssembler::word_character_map_address());
-}
-
-ExternalReference ExternalReference::address_of_static_offsets_vector(
- Isolate* isolate) {
- return ExternalReference(
- OffsetsVector::static_offsets_vector_address(isolate));
-}
-
-ExternalReference ExternalReference::address_of_regexp_stack_memory_address(
- Isolate* isolate) {
- return ExternalReference(
- isolate->regexp_stack()->memory_address());
-}
-
-ExternalReference ExternalReference::address_of_regexp_stack_memory_size(
- Isolate* isolate) {
- return ExternalReference(isolate->regexp_stack()->memory_size_address());
-}
-
-#endif // V8_INTERPRETED_REGEXP
-
-
-static double add_two_doubles(double x, double y) {
- return x + y;
-}
-
-
-static double sub_two_doubles(double x, double y) {
- return x - y;
-}
-
-
-static double mul_two_doubles(double x, double y) {
- return x * y;
-}
-
-
-static double div_two_doubles(double x, double y) {
- return x / y;
-}
-
-
-static double mod_two_doubles(double x, double y) {
- return modulo(x, y);
-}
-
-
-static double math_sin_double(double x) {
- return sin(x);
-}
-
-
-static double math_cos_double(double x) {
- return cos(x);
-}
-
-
-static double math_log_double(double x) {
- return log(x);
-}
-
-
-ExternalReference ExternalReference::math_sin_double_function(
- Isolate* isolate) {
- return ExternalReference(Redirect(isolate,
- FUNCTION_ADDR(math_sin_double),
- FP_RETURN_CALL));
-}
-
-
-ExternalReference ExternalReference::math_cos_double_function(
- Isolate* isolate) {
- return ExternalReference(Redirect(isolate,
- FUNCTION_ADDR(math_cos_double),
- FP_RETURN_CALL));
-}
-
-
-ExternalReference ExternalReference::math_log_double_function(
- Isolate* isolate) {
- return ExternalReference(Redirect(isolate,
- FUNCTION_ADDR(math_log_double),
- FP_RETURN_CALL));
-}
-
-
-// Helper function to compute x^y, where y is known to be an
-// integer. Uses binary decomposition to limit the number of
-// multiplications; see the discussion in "Hacker's Delight" by Henry
-// S. Warren, Jr., figure 11-6, page 213.
-double power_double_int(double x, int y) {
- double m = (y < 0) ? 1 / x : x;
- unsigned n = (y < 0) ? -y : y;
- double p = 1;
- while (n != 0) {
- if ((n & 1) != 0) p *= m;
- m *= m;
- if ((n & 2) != 0) p *= m;
- m *= m;
- n >>= 2;
- }
- return p;
-}
-
-
-double power_double_double(double x, double y) {
- int y_int = static_cast<int>(y);
- if (y == y_int) {
- return power_double_int(x, y_int); // Returns 1.0 for exponent 0.
- }
- if (!isinf(x)) {
- if (y == 0.5) return sqrt(x + 0.0); // -0 must be converted to +0.
- if (y == -0.5) return 1.0 / sqrt(x + 0.0);
- }
- if (isnan(y) || ((x == 1 || x == -1) && isinf(y))) {
- return OS::nan_value();
- }
- return pow(x, y);
-}
-
-
-ExternalReference ExternalReference::power_double_double_function(
- Isolate* isolate) {
- return ExternalReference(Redirect(isolate,
- FUNCTION_ADDR(power_double_double),
- FP_RETURN_CALL));
-}
-
-
-ExternalReference ExternalReference::power_double_int_function(
- Isolate* isolate) {
- return ExternalReference(Redirect(isolate,
- FUNCTION_ADDR(power_double_int),
- FP_RETURN_CALL));
-}
-
-
-static int native_compare_doubles(double y, double x) {
- if (x == y) return EQUAL;
- return x < y ? LESS : GREATER;
-}
-
-
-ExternalReference ExternalReference::double_fp_operation(
- Token::Value operation, Isolate* isolate) {
- typedef double BinaryFPOperation(double x, double y);
- BinaryFPOperation* function = NULL;
- switch (operation) {
- case Token::ADD:
- function = &add_two_doubles;
- break;
- case Token::SUB:
- function = &sub_two_doubles;
- break;
- case Token::MUL:
- function = &mul_two_doubles;
- break;
- case Token::DIV:
- function = &div_two_doubles;
- break;
- case Token::MOD:
- function = &mod_two_doubles;
- break;
- default:
- UNREACHABLE();
- }
- // Passing true as 2nd parameter indicates that they return an fp value.
- return ExternalReference(Redirect(isolate,
- FUNCTION_ADDR(function),
- FP_RETURN_CALL));
-}
-
-
-ExternalReference ExternalReference::compare_doubles(Isolate* isolate) {
- return ExternalReference(Redirect(isolate,
- FUNCTION_ADDR(native_compare_doubles),
- BUILTIN_CALL));
-}
-
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-ExternalReference ExternalReference::debug_break(Isolate* isolate) {
- return ExternalReference(Redirect(isolate, FUNCTION_ADDR(Debug_Break)));
-}
-
-
-ExternalReference ExternalReference::debug_step_in_fp_address(
- Isolate* isolate) {
- return ExternalReference(isolate->debug()->step_in_fp_addr());
-}
-#endif
-
-
-void PositionsRecorder::RecordPosition(int pos) {
- ASSERT(pos != RelocInfo::kNoPosition);
- ASSERT(pos >= 0);
- state_.current_position = pos;
-#ifdef ENABLE_GDB_JIT_INTERFACE
- if (gdbjit_lineinfo_ != NULL) {
- gdbjit_lineinfo_->SetPosition(assembler_->pc_offset(), pos, false);
- }
-#endif
-}
-
-
-void PositionsRecorder::RecordStatementPosition(int pos) {
- ASSERT(pos != RelocInfo::kNoPosition);
- ASSERT(pos >= 0);
- state_.current_statement_position = pos;
-#ifdef ENABLE_GDB_JIT_INTERFACE
- if (gdbjit_lineinfo_ != NULL) {
- gdbjit_lineinfo_->SetPosition(assembler_->pc_offset(), pos, true);
- }
-#endif
-}
-
-
-bool PositionsRecorder::WriteRecordedPositions() {
- bool written = false;
-
- // Write the statement position if it is different from what was written last
- // time.
- if (state_.current_statement_position != state_.written_statement_position) {
- EnsureSpace ensure_space(assembler_);
- assembler_->RecordRelocInfo(RelocInfo::STATEMENT_POSITION,
- state_.current_statement_position);
- state_.written_statement_position = state_.current_statement_position;
- written = true;
- }
-
- // Write the position if it is different from what was written last time and
- // also different from the written statement position.
- if (state_.current_position != state_.written_position &&
- state_.current_position != state_.written_statement_position) {
- EnsureSpace ensure_space(assembler_);
- assembler_->RecordRelocInfo(RelocInfo::POSITION, state_.current_position);
- state_.written_position = state_.current_position;
- written = true;
- }
-
- // Return whether something was written.
- return written;
-}
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/assembler.h b/src/3rdparty/v8/src/assembler.h
deleted file mode 100644
index 62fe04d..0000000
--- a/src/3rdparty/v8/src/assembler.h
+++ /dev/null
@@ -1,823 +0,0 @@
-// Copyright (c) 1994-2006 Sun Microsystems Inc.
-// All Rights Reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// - Redistributions of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-//
-// - Redistribution in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// - Neither the name of Sun Microsystems or the names of contributors may
-// be used to endorse or promote products derived from this software without
-// specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
-// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
-// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// The original source code covered by the above license above has been
-// modified significantly by Google Inc.
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
-
-#ifndef V8_ASSEMBLER_H_
-#define V8_ASSEMBLER_H_
-
-#include "gdb-jit.h"
-#include "runtime.h"
-#include "token.h"
-
-namespace v8 {
-namespace internal {
-
-
-// -----------------------------------------------------------------------------
-// Platform independent assembler base class.
-
-class AssemblerBase: public Malloced {
- public:
- explicit AssemblerBase(Isolate* isolate) : isolate_(isolate) {}
-
- Isolate* isolate() const { return isolate_; }
-
- private:
- Isolate* isolate_;
-};
-
-// -----------------------------------------------------------------------------
-// Common double constants.
-
-class DoubleConstant: public AllStatic {
- public:
- static const double min_int;
- static const double one_half;
- static const double minus_zero;
- static const double negative_infinity;
- static const double nan;
-};
-
-
-// -----------------------------------------------------------------------------
-// Labels represent pc locations; they are typically jump or call targets.
-// After declaration, a label can be freely used to denote known or (yet)
-// unknown pc location. Assembler::bind() is used to bind a label to the
-// current pc. A label can be bound only once.
-
-class Label BASE_EMBEDDED {
- public:
- INLINE(Label()) { Unuse(); }
- INLINE(~Label()) { ASSERT(!is_linked()); }
-
- INLINE(void Unuse()) { pos_ = 0; }
-
- INLINE(bool is_bound() const) { return pos_ < 0; }
- INLINE(bool is_unused() const) { return pos_ == 0; }
- INLINE(bool is_linked() const) { return pos_ > 0; }
-
- // Returns the position of bound or linked labels. Cannot be used
- // for unused labels.
- int pos() const;
-
- private:
- // pos_ encodes both the binding state (via its sign)
- // and the binding position (via its value) of a label.
- //
- // pos_ < 0 bound label, pos() returns the jump target position
- // pos_ == 0 unused label
- // pos_ > 0 linked label, pos() returns the last reference position
- int pos_;
-
- void bind_to(int pos) {
- pos_ = -pos - 1;
- ASSERT(is_bound());
- }
- void link_to(int pos) {
- pos_ = pos + 1;
- ASSERT(is_linked());
- }
-
- friend class Assembler;
- friend class RegexpAssembler;
- friend class Displacement;
- friend class ShadowTarget;
- friend class RegExpMacroAssemblerIrregexp;
-};
-
-
-// -----------------------------------------------------------------------------
-// NearLabels are labels used for short jumps (in Intel jargon).
-// NearLabels should be used if it can be guaranteed that the jump range is
-// within -128 to +127. We already use short jumps when jumping backwards,
-// so using a NearLabel will only have performance impact if used for forward
-// jumps.
-class NearLabel BASE_EMBEDDED {
- public:
- NearLabel() { Unuse(); }
- ~NearLabel() { ASSERT(!is_linked()); }
-
- void Unuse() {
- pos_ = -1;
- unresolved_branches_ = 0;
-#ifdef DEBUG
- for (int i = 0; i < kMaxUnresolvedBranches; i++) {
- unresolved_positions_[i] = -1;
- }
-#endif
- }
-
- int pos() {
- ASSERT(is_bound());
- return pos_;
- }
-
- bool is_bound() { return pos_ >= 0; }
- bool is_linked() { return !is_bound() && unresolved_branches_ > 0; }
- bool is_unused() { return !is_bound() && unresolved_branches_ == 0; }
-
- void bind_to(int position) {
- ASSERT(!is_bound());
- pos_ = position;
- }
-
- void link_to(int position) {
- ASSERT(!is_bound());
- ASSERT(unresolved_branches_ < kMaxUnresolvedBranches);
- unresolved_positions_[unresolved_branches_++] = position;
- }
-
- private:
- static const int kMaxUnresolvedBranches = 8;
- int pos_;
- int unresolved_branches_;
- int unresolved_positions_[kMaxUnresolvedBranches];
-
- friend class Assembler;
-};
-
-
-// -----------------------------------------------------------------------------
-// Relocation information
-
-
-// Relocation information consists of the address (pc) of the datum
-// to which the relocation information applies, the relocation mode
-// (rmode), and an optional data field. The relocation mode may be
-// "descriptive" and not indicate a need for relocation, but simply
-// describe a property of the datum. Such rmodes are useful for GC
-// and nice disassembly output.
-
-class RelocInfo BASE_EMBEDDED {
- public:
- // The constant kNoPosition is used with the collecting of source positions
- // in the relocation information. Two types of source positions are collected
- // "position" (RelocMode position) and "statement position" (RelocMode
- // statement_position). The "position" is collected at places in the source
- // code which are of interest when making stack traces to pin-point the source
- // location of a stack frame as close as possible. The "statement position" is
- // collected at the beginning at each statement, and is used to indicate
- // possible break locations. kNoPosition is used to indicate an
- // invalid/uninitialized position value.
- static const int kNoPosition = -1;
-
- // This string is used to add padding comments to the reloc info in cases
- // where we are not sure to have enough space for patching in during
- // lazy deoptimization. This is the case if we have indirect calls for which
- // we do not normally record relocation info.
- static const char* kFillerCommentString;
-
- // The minimum size of a comment is equal to three bytes for the extra tagged
- // pc + the tag for the data, and kPointerSize for the actual pointer to the
- // comment.
- static const int kMinRelocCommentSize = 3 + kPointerSize;
-
- // The maximum size for a call instruction including pc-jump.
- static const int kMaxCallSize = 6;
-
- // The maximum pc delta that will use the short encoding.
- static const int kMaxSmallPCDelta;
-
- enum Mode {
- // Please note the order is important (see IsCodeTarget, IsGCRelocMode).
- CONSTRUCT_CALL, // code target that is a call to a JavaScript constructor.
- CODE_TARGET_CONTEXT, // Code target used for contextual loads and stores.
- DEBUG_BREAK, // Code target for the debugger statement.
- CODE_TARGET, // Code target which is not any of the above.
- EMBEDDED_OBJECT,
- GLOBAL_PROPERTY_CELL,
-
- // Everything after runtime_entry (inclusive) is not GC'ed.
- RUNTIME_ENTRY,
- JS_RETURN, // Marks start of the ExitJSFrame code.
- COMMENT,
- POSITION, // See comment for kNoPosition above.
- STATEMENT_POSITION, // See comment for kNoPosition above.
- DEBUG_BREAK_SLOT, // Additional code inserted for debug break slot.
- EXTERNAL_REFERENCE, // The address of an external C++ function.
- INTERNAL_REFERENCE, // An address inside the same function.
-
- // add more as needed
- // Pseudo-types
- NUMBER_OF_MODES, // must be no greater than 14 - see RelocInfoWriter
- NONE, // never recorded
- LAST_CODE_ENUM = CODE_TARGET,
- LAST_GCED_ENUM = GLOBAL_PROPERTY_CELL
- };
-
-
- RelocInfo() {}
- RelocInfo(byte* pc, Mode rmode, intptr_t data)
- : pc_(pc), rmode_(rmode), data_(data) {
- }
-
- static inline bool IsConstructCall(Mode mode) {
- return mode == CONSTRUCT_CALL;
- }
- static inline bool IsCodeTarget(Mode mode) {
- return mode <= LAST_CODE_ENUM;
- }
- // Is the relocation mode affected by GC?
- static inline bool IsGCRelocMode(Mode mode) {
- return mode <= LAST_GCED_ENUM;
- }
- static inline bool IsJSReturn(Mode mode) {
- return mode == JS_RETURN;
- }
- static inline bool IsComment(Mode mode) {
- return mode == COMMENT;
- }
- static inline bool IsPosition(Mode mode) {
- return mode == POSITION || mode == STATEMENT_POSITION;
- }
- static inline bool IsStatementPosition(Mode mode) {
- return mode == STATEMENT_POSITION;
- }
- static inline bool IsExternalReference(Mode mode) {
- return mode == EXTERNAL_REFERENCE;
- }
- static inline bool IsInternalReference(Mode mode) {
- return mode == INTERNAL_REFERENCE;
- }
- static inline bool IsDebugBreakSlot(Mode mode) {
- return mode == DEBUG_BREAK_SLOT;
- }
- static inline int ModeMask(Mode mode) { return 1 << mode; }
-
- // Accessors
- byte* pc() const { return pc_; }
- void set_pc(byte* pc) { pc_ = pc; }
- Mode rmode() const { return rmode_; }
- intptr_t data() const { return data_; }
-
- // Apply a relocation by delta bytes
- INLINE(void apply(intptr_t delta));
-
- // Is the pointer this relocation info refers to coded like a plain pointer
- // or is it strange in some way (eg relative or patched into a series of
- // instructions).
- bool IsCodedSpecially();
-
- // Read/modify the code target in the branch/call instruction
- // this relocation applies to;
- // can only be called if IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY
- INLINE(Address target_address());
- INLINE(void set_target_address(Address target));
- INLINE(Object* target_object());
- INLINE(Handle<Object> target_object_handle(Assembler* origin));
- INLINE(Object** target_object_address());
- INLINE(void set_target_object(Object* target));
- INLINE(JSGlobalPropertyCell* target_cell());
- INLINE(Handle<JSGlobalPropertyCell> target_cell_handle());
- INLINE(void set_target_cell(JSGlobalPropertyCell* cell));
-
-
- // Read the address of the word containing the target_address in an
- // instruction stream. What this means exactly is architecture-independent.
- // The only architecture-independent user of this function is the serializer.
- // The serializer uses it to find out how many raw bytes of instruction to
- // output before the next target. Architecture-independent code shouldn't
- // dereference the pointer it gets back from this.
- INLINE(Address target_address_address());
- // This indicates how much space a target takes up when deserializing a code
- // stream. For most architectures this is just the size of a pointer. For
- // an instruction like movw/movt where the target bits are mixed into the
- // instruction bits the size of the target will be zero, indicating that the
- // serializer should not step forwards in memory after a target is resolved
- // and written. In this case the target_address_address function above
- // should return the end of the instructions to be patched, allowing the
- // deserializer to deserialize the instructions as raw bytes and put them in
- // place, ready to be patched with the target.
- INLINE(int target_address_size());
-
- // Read/modify the reference in the instruction this relocation
- // applies to; can only be called if rmode_ is external_reference
- INLINE(Address* target_reference_address());
-
- // Read/modify the address of a call instruction. This is used to relocate
- // the break points where straight-line code is patched with a call
- // instruction.
- INLINE(Address call_address());
- INLINE(void set_call_address(Address target));
- INLINE(Object* call_object());
- INLINE(void set_call_object(Object* target));
- INLINE(Object** call_object_address());
-
- template<typename StaticVisitor> inline void Visit(Heap* heap);
- inline void Visit(ObjectVisitor* v);
-
- // Patch the code with some other code.
- void PatchCode(byte* instructions, int instruction_count);
-
- // Patch the code with a call.
- void PatchCodeWithCall(Address target, int guard_bytes);
-
- // Check whether this return sequence has been patched
- // with a call to the debugger.
- INLINE(bool IsPatchedReturnSequence());
-
- // Check whether this debug break slot has been patched with a call to the
- // debugger.
- INLINE(bool IsPatchedDebugBreakSlotSequence());
-
-#ifdef ENABLE_DISASSEMBLER
- // Printing
- static const char* RelocModeName(Mode rmode);
- void Print(FILE* out);
-#endif // ENABLE_DISASSEMBLER
-#ifdef DEBUG
- // Debugging
- void Verify();
-#endif
-
- static const int kCodeTargetMask = (1 << (LAST_CODE_ENUM + 1)) - 1;
- static const int kPositionMask = 1 << POSITION | 1 << STATEMENT_POSITION;
- static const int kDebugMask = kPositionMask | 1 << COMMENT;
- static const int kApplyMask; // Modes affected by apply. Depends on arch.
-
- private:
- // On ARM, note that pc_ is the address of the constant pool entry
- // to be relocated and not the address of the instruction
- // referencing the constant pool entry (except when rmode_ ==
- // comment).
- byte* pc_;
- Mode rmode_;
- intptr_t data_;
- friend class RelocIterator;
-};
-
-
-// RelocInfoWriter serializes a stream of relocation info. It writes towards
-// lower addresses.
-class RelocInfoWriter BASE_EMBEDDED {
- public:
- RelocInfoWriter() : pos_(NULL), last_pc_(NULL), last_data_(0) {}
- RelocInfoWriter(byte* pos, byte* pc) : pos_(pos), last_pc_(pc),
- last_data_(0) {}
-
- byte* pos() const { return pos_; }
- byte* last_pc() const { return last_pc_; }
-
- void Write(const RelocInfo* rinfo);
-
- // Update the state of the stream after reloc info buffer
- // and/or code is moved while the stream is active.
- void Reposition(byte* pos, byte* pc) {
- pos_ = pos;
- last_pc_ = pc;
- }
-
- // Max size (bytes) of a written RelocInfo. Longest encoding is
- // ExtraTag, VariableLengthPCJump, ExtraTag, pc_delta, ExtraTag, data_delta.
- // On ia32 and arm this is 1 + 4 + 1 + 1 + 1 + 4 = 12.
- // On x64 this is 1 + 4 + 1 + 1 + 1 + 8 == 16;
- // Here we use the maximum of the two.
- static const int kMaxSize = 16;
-
- private:
- inline uint32_t WriteVariableLengthPCJump(uint32_t pc_delta);
- inline void WriteTaggedPC(uint32_t pc_delta, int tag);
- inline void WriteExtraTaggedPC(uint32_t pc_delta, int extra_tag);
- inline void WriteExtraTaggedData(intptr_t data_delta, int top_tag);
- inline void WriteTaggedData(intptr_t data_delta, int tag);
- inline void WriteExtraTag(int extra_tag, int top_tag);
-
- byte* pos_;
- byte* last_pc_;
- intptr_t last_data_;
- DISALLOW_COPY_AND_ASSIGN(RelocInfoWriter);
-};
-
-
-// A RelocIterator iterates over relocation information.
-// Typical use:
-//
-// for (RelocIterator it(code); !it.done(); it.next()) {
-// // do something with it.rinfo() here
-// }
-//
-// A mask can be specified to skip unwanted modes.
-class RelocIterator: public Malloced {
- public:
- // Create a new iterator positioned at
- // the beginning of the reloc info.
- // Relocation information with mode k is included in the
- // iteration iff bit k of mode_mask is set.
- explicit RelocIterator(Code* code, int mode_mask = -1);
- explicit RelocIterator(const CodeDesc& desc, int mode_mask = -1);
-
- // Iteration
- bool done() const { return done_; }
- void next();
-
- // Return pointer valid until next next().
- RelocInfo* rinfo() {
- ASSERT(!done());
- return &rinfo_;
- }
-
- private:
- // Advance* moves the position before/after reading.
- // *Read* reads from current byte(s) into rinfo_.
- // *Get* just reads and returns info on current byte.
- void Advance(int bytes = 1) { pos_ -= bytes; }
- int AdvanceGetTag();
- int GetExtraTag();
- int GetTopTag();
- void ReadTaggedPC();
- void AdvanceReadPC();
- void AdvanceReadData();
- void AdvanceReadVariableLengthPCJump();
- int GetPositionTypeTag();
- void ReadTaggedData();
-
- static RelocInfo::Mode DebugInfoModeFromTag(int tag);
-
- // If the given mode is wanted, set it in rinfo_ and return true.
- // Else return false. Used for efficiently skipping unwanted modes.
- bool SetMode(RelocInfo::Mode mode) {
- return (mode_mask_ & (1 << mode)) ? (rinfo_.rmode_ = mode, true) : false;
- }
-
- byte* pos_;
- byte* end_;
- RelocInfo rinfo_;
- bool done_;
- int mode_mask_;
- DISALLOW_COPY_AND_ASSIGN(RelocIterator);
-};
-
-
-//------------------------------------------------------------------------------
-// External function
-
-//----------------------------------------------------------------------------
-class IC_Utility;
-class SCTableReference;
-#ifdef ENABLE_DEBUGGER_SUPPORT
-class Debug_Address;
-#endif
-
-
-// An ExternalReference represents a C++ address used in the generated
-// code. All references to C++ functions and variables must be encapsulated in
-// an ExternalReference instance. This is done in order to track the origin of
-// all external references in the code so that they can be bound to the correct
-// addresses when deserializing a heap.
-class ExternalReference BASE_EMBEDDED {
- public:
- // Used in the simulator to support different native api calls.
- enum Type {
- // Builtin call.
- // MaybeObject* f(v8::internal::Arguments).
- BUILTIN_CALL, // default
-
- // Builtin call that returns floating point.
- // double f(double, double).
- FP_RETURN_CALL,
-
- // Direct call to API function callback.
- // Handle<Value> f(v8::Arguments&)
- DIRECT_API_CALL,
-
- // Direct call to accessor getter callback.
- // Handle<value> f(Local<String> property, AccessorInfo& info)
- DIRECT_GETTER_CALL
- };
-
- typedef void* ExternalReferenceRedirector(void* original, Type type);
-
- ExternalReference(Builtins::CFunctionId id, Isolate* isolate);
-
- ExternalReference(ApiFunction* ptr, Type type, Isolate* isolate);
-
- ExternalReference(Builtins::Name name, Isolate* isolate);
-
- ExternalReference(Runtime::FunctionId id, Isolate* isolate);
-
- ExternalReference(const Runtime::Function* f, Isolate* isolate);
-
- ExternalReference(const IC_Utility& ic_utility, Isolate* isolate);
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- ExternalReference(const Debug_Address& debug_address, Isolate* isolate);
-#endif
-
- explicit ExternalReference(StatsCounter* counter);
-
- ExternalReference(Isolate::AddressId id, Isolate* isolate);
-
- explicit ExternalReference(const SCTableReference& table_ref);
-
- // Isolate::Current() as an external reference.
- static ExternalReference isolate_address();
-
- // One-of-a-kind references. These references are not part of a general
- // pattern. This means that they have to be added to the
- // ExternalReferenceTable in serialize.cc manually.
-
- static ExternalReference perform_gc_function(Isolate* isolate);
- static ExternalReference fill_heap_number_with_random_function(
- Isolate* isolate);
- static ExternalReference random_uint32_function(Isolate* isolate);
- static ExternalReference transcendental_cache_array_address(Isolate* isolate);
- static ExternalReference delete_handle_scope_extensions(Isolate* isolate);
-
- // Deoptimization support.
- static ExternalReference new_deoptimizer_function(Isolate* isolate);
- static ExternalReference compute_output_frames_function(Isolate* isolate);
- static ExternalReference global_contexts_list(Isolate* isolate);
-
- // Static data in the keyed lookup cache.
- static ExternalReference keyed_lookup_cache_keys(Isolate* isolate);
- static ExternalReference keyed_lookup_cache_field_offsets(Isolate* isolate);
-
- // Static variable Factory::the_hole_value.location()
- static ExternalReference the_hole_value_location(Isolate* isolate);
-
- // Static variable Factory::arguments_marker.location()
- static ExternalReference arguments_marker_location(Isolate* isolate);
-
- // Static variable Heap::roots_address()
- static ExternalReference roots_address(Isolate* isolate);
-
- // Static variable StackGuard::address_of_jslimit()
- static ExternalReference address_of_stack_limit(Isolate* isolate);
-
- // Static variable StackGuard::address_of_real_jslimit()
- static ExternalReference address_of_real_stack_limit(Isolate* isolate);
-
- // Static variable RegExpStack::limit_address()
- static ExternalReference address_of_regexp_stack_limit(Isolate* isolate);
-
- // Static variables for RegExp.
- static ExternalReference address_of_static_offsets_vector(Isolate* isolate);
- static ExternalReference address_of_regexp_stack_memory_address(
- Isolate* isolate);
- static ExternalReference address_of_regexp_stack_memory_size(
- Isolate* isolate);
-
- // Static variable Heap::NewSpaceStart()
- static ExternalReference new_space_start(Isolate* isolate);
- static ExternalReference new_space_mask(Isolate* isolate);
- static ExternalReference heap_always_allocate_scope_depth(Isolate* isolate);
-
- // Used for fast allocation in generated code.
- static ExternalReference new_space_allocation_top_address(Isolate* isolate);
- static ExternalReference new_space_allocation_limit_address(Isolate* isolate);
-
- static ExternalReference double_fp_operation(Token::Value operation,
- Isolate* isolate);
- static ExternalReference compare_doubles(Isolate* isolate);
- static ExternalReference power_double_double_function(Isolate* isolate);
- static ExternalReference power_double_int_function(Isolate* isolate);
-
- static ExternalReference handle_scope_next_address();
- static ExternalReference handle_scope_limit_address();
- static ExternalReference handle_scope_level_address();
-
- static ExternalReference scheduled_exception_address(Isolate* isolate);
-
- // Static variables containing common double constants.
- static ExternalReference address_of_min_int();
- static ExternalReference address_of_one_half();
- static ExternalReference address_of_minus_zero();
- static ExternalReference address_of_negative_infinity();
- static ExternalReference address_of_nan();
-
- static ExternalReference math_sin_double_function(Isolate* isolate);
- static ExternalReference math_cos_double_function(Isolate* isolate);
- static ExternalReference math_log_double_function(Isolate* isolate);
-
- Address address() const {return reinterpret_cast<Address>(address_);}
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Function Debug::Break()
- static ExternalReference debug_break(Isolate* isolate);
-
- // Used to check if single stepping is enabled in generated code.
- static ExternalReference debug_step_in_fp_address(Isolate* isolate);
-#endif
-
-#ifndef V8_INTERPRETED_REGEXP
- // C functions called from RegExp generated code.
-
- // Function NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16()
- static ExternalReference re_case_insensitive_compare_uc16(Isolate* isolate);
-
- // Function RegExpMacroAssembler*::CheckStackGuardState()
- static ExternalReference re_check_stack_guard_state(Isolate* isolate);
-
- // Function NativeRegExpMacroAssembler::GrowStack()
- static ExternalReference re_grow_stack(Isolate* isolate);
-
- // byte NativeRegExpMacroAssembler::word_character_bitmap
- static ExternalReference re_word_character_map();
-
-#endif
-
- // This lets you register a function that rewrites all external references.
- // Used by the ARM simulator to catch calls to external references.
- static void set_redirector(ExternalReferenceRedirector* redirector) {
- // We can't stack them.
- ASSERT(Isolate::Current()->external_reference_redirector() == NULL);
- Isolate::Current()->set_external_reference_redirector(
- reinterpret_cast<ExternalReferenceRedirectorPointer*>(redirector));
- }
-
- private:
- explicit ExternalReference(void* address)
- : address_(address) {}
-
- static void* Redirect(Isolate* isolate,
- void* address,
- Type type = ExternalReference::BUILTIN_CALL) {
- ExternalReferenceRedirector* redirector =
- reinterpret_cast<ExternalReferenceRedirector*>(
- isolate->external_reference_redirector());
- if (redirector == NULL) return address;
- void* answer = (*redirector)(address, type);
- return answer;
- }
-
- static void* Redirect(Isolate* isolate,
- Address address_arg,
- Type type = ExternalReference::BUILTIN_CALL) {
- ExternalReferenceRedirector* redirector =
- reinterpret_cast<ExternalReferenceRedirector*>(
- isolate->external_reference_redirector());
- void* address = reinterpret_cast<void*>(address_arg);
- void* answer = (redirector == NULL) ?
- address :
- (*redirector)(address, type);
- return answer;
- }
-
- void* address_;
-};
-
-
-// -----------------------------------------------------------------------------
-// Position recording support
-
-struct PositionState {
- PositionState() : current_position(RelocInfo::kNoPosition),
- written_position(RelocInfo::kNoPosition),
- current_statement_position(RelocInfo::kNoPosition),
- written_statement_position(RelocInfo::kNoPosition) {}
-
- int current_position;
- int written_position;
-
- int current_statement_position;
- int written_statement_position;
-};
-
-
-class PositionsRecorder BASE_EMBEDDED {
- public:
- explicit PositionsRecorder(Assembler* assembler)
- : assembler_(assembler) {
-#ifdef ENABLE_GDB_JIT_INTERFACE
- gdbjit_lineinfo_ = NULL;
-#endif
- }
-
-#ifdef ENABLE_GDB_JIT_INTERFACE
- ~PositionsRecorder() {
- delete gdbjit_lineinfo_;
- }
-
- void StartGDBJITLineInfoRecording() {
- if (FLAG_gdbjit) {
- gdbjit_lineinfo_ = new GDBJITLineInfo();
- }
- }
-
- GDBJITLineInfo* DetachGDBJITLineInfo() {
- GDBJITLineInfo* lineinfo = gdbjit_lineinfo_;
- gdbjit_lineinfo_ = NULL; // To prevent deallocation in destructor.
- return lineinfo;
- }
-#endif
-
- // Set current position to pos.
- void RecordPosition(int pos);
-
- // Set current statement position to pos.
- void RecordStatementPosition(int pos);
-
- // Write recorded positions to relocation information.
- bool WriteRecordedPositions();
-
- int current_position() const { return state_.current_position; }
-
- int current_statement_position() const {
- return state_.current_statement_position;
- }
-
- private:
- Assembler* assembler_;
- PositionState state_;
-#ifdef ENABLE_GDB_JIT_INTERFACE
- GDBJITLineInfo* gdbjit_lineinfo_;
-#endif
-
- friend class PreservePositionScope;
-
- DISALLOW_COPY_AND_ASSIGN(PositionsRecorder);
-};
-
-
-class PreservePositionScope BASE_EMBEDDED {
- public:
- explicit PreservePositionScope(PositionsRecorder* positions_recorder)
- : positions_recorder_(positions_recorder),
- saved_state_(positions_recorder->state_) {}
-
- ~PreservePositionScope() {
- positions_recorder_->state_ = saved_state_;
- }
-
- private:
- PositionsRecorder* positions_recorder_;
- const PositionState saved_state_;
-
- DISALLOW_COPY_AND_ASSIGN(PreservePositionScope);
-};
-
-
-// -----------------------------------------------------------------------------
-// Utility functions
-
-static inline bool is_intn(int x, int n) {
- return -(1 << (n-1)) <= x && x < (1 << (n-1));
-}
-
-static inline bool is_int8(int x) { return is_intn(x, 8); }
-static inline bool is_int16(int x) { return is_intn(x, 16); }
-static inline bool is_int18(int x) { return is_intn(x, 18); }
-static inline bool is_int24(int x) { return is_intn(x, 24); }
-
-static inline bool is_uintn(int x, int n) {
- return (x & -(1 << n)) == 0;
-}
-
-static inline bool is_uint2(int x) { return is_uintn(x, 2); }
-static inline bool is_uint3(int x) { return is_uintn(x, 3); }
-static inline bool is_uint4(int x) { return is_uintn(x, 4); }
-static inline bool is_uint5(int x) { return is_uintn(x, 5); }
-static inline bool is_uint6(int x) { return is_uintn(x, 6); }
-static inline bool is_uint8(int x) { return is_uintn(x, 8); }
-static inline bool is_uint10(int x) { return is_uintn(x, 10); }
-static inline bool is_uint12(int x) { return is_uintn(x, 12); }
-static inline bool is_uint16(int x) { return is_uintn(x, 16); }
-static inline bool is_uint24(int x) { return is_uintn(x, 24); }
-static inline bool is_uint26(int x) { return is_uintn(x, 26); }
-static inline bool is_uint28(int x) { return is_uintn(x, 28); }
-
-static inline int NumberOfBitsSet(uint32_t x) {
- unsigned int num_bits_set;
- for (num_bits_set = 0; x; x >>= 1) {
- num_bits_set += x & 1;
- }
- return num_bits_set;
-}
-
-// Computes pow(x, y) with the special cases in the spec for Math.pow.
-double power_double_int(double x, int y);
-double power_double_double(double x, double y);
-
-} } // namespace v8::internal
-
-#endif // V8_ASSEMBLER_H_
diff --git a/src/3rdparty/v8/src/ast-inl.h b/src/3rdparty/v8/src/ast-inl.h
deleted file mode 100644
index 6021fd9..0000000
--- a/src/3rdparty/v8/src/ast-inl.h
+++ /dev/null
@@ -1,112 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_AST_INL_H_
-#define V8_AST_INL_H_
-
-#include "v8.h"
-
-#include "ast.h"
-#include "jump-target-inl.h"
-
-namespace v8 {
-namespace internal {
-
-
-SwitchStatement::SwitchStatement(ZoneStringList* labels)
- : BreakableStatement(labels, TARGET_FOR_ANONYMOUS),
- tag_(NULL), cases_(NULL) {
-}
-
-
-Block::Block(ZoneStringList* labels, int capacity, bool is_initializer_block)
- : BreakableStatement(labels, TARGET_FOR_NAMED_ONLY),
- statements_(capacity),
- is_initializer_block_(is_initializer_block) {
-}
-
-
-BreakableStatement::BreakableStatement(ZoneStringList* labels, Type type)
- : labels_(labels),
- type_(type),
- entry_id_(GetNextId()),
- exit_id_(GetNextId()) {
- ASSERT(labels == NULL || labels->length() > 0);
-}
-
-
-IterationStatement::IterationStatement(ZoneStringList* labels)
- : BreakableStatement(labels, TARGET_FOR_ANONYMOUS),
- body_(NULL),
- continue_target_(JumpTarget::BIDIRECTIONAL),
- osr_entry_id_(GetNextId()) {
-}
-
-
-DoWhileStatement::DoWhileStatement(ZoneStringList* labels)
- : IterationStatement(labels),
- cond_(NULL),
- condition_position_(-1),
- continue_id_(GetNextId()),
- back_edge_id_(GetNextId()) {
-}
-
-
-WhileStatement::WhileStatement(ZoneStringList* labels)
- : IterationStatement(labels),
- cond_(NULL),
- may_have_function_literal_(true),
- body_id_(GetNextId()) {
-}
-
-
-ForStatement::ForStatement(ZoneStringList* labels)
- : IterationStatement(labels),
- init_(NULL),
- cond_(NULL),
- next_(NULL),
- may_have_function_literal_(true),
- loop_variable_(NULL),
- continue_id_(GetNextId()),
- body_id_(GetNextId()) {
-}
-
-
-ForInStatement::ForInStatement(ZoneStringList* labels)
- : IterationStatement(labels), each_(NULL), enumerable_(NULL),
- assignment_id_(GetNextId()) {
-}
-
-
-bool FunctionLiteral::strict_mode() const {
- return scope()->is_strict_mode();
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_AST_INL_H_
diff --git a/src/3rdparty/v8/src/ast.cc b/src/3rdparty/v8/src/ast.cc
deleted file mode 100644
index 9a263a5..0000000
--- a/src/3rdparty/v8/src/ast.cc
+++ /dev/null
@@ -1,1078 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "ast.h"
-#include "jump-target-inl.h"
-#include "parser.h"
-#include "scopes.h"
-#include "string-stream.h"
-
-namespace v8 {
-namespace internal {
-
-AstSentinels::AstSentinels()
- : this_proxy_(true),
- identifier_proxy_(false),
- valid_left_hand_side_sentinel_(),
- this_property_(&this_proxy_, NULL, 0),
- call_sentinel_(NULL, NULL, 0) {
-}
-
-
-// ----------------------------------------------------------------------------
-// All the Accept member functions for each syntax tree node type.
-
-void Slot::Accept(AstVisitor* v) { v->VisitSlot(this); }
-
-#define DECL_ACCEPT(type) \
- void type::Accept(AstVisitor* v) { v->Visit##type(this); }
-AST_NODE_LIST(DECL_ACCEPT)
-#undef DECL_ACCEPT
-
-
-// ----------------------------------------------------------------------------
-// Implementation of other node functionality.
-
-Assignment* ExpressionStatement::StatementAsSimpleAssignment() {
- return (expression()->AsAssignment() != NULL &&
- !expression()->AsAssignment()->is_compound())
- ? expression()->AsAssignment()
- : NULL;
-}
-
-
-CountOperation* ExpressionStatement::StatementAsCountOperation() {
- return expression()->AsCountOperation();
-}
-
-
-VariableProxy::VariableProxy(Variable* var)
- : name_(var->name()),
- var_(NULL), // Will be set by the call to BindTo.
- is_this_(var->is_this()),
- inside_with_(false),
- is_trivial_(false),
- position_(RelocInfo::kNoPosition) {
- BindTo(var);
-}
-
-
-VariableProxy::VariableProxy(Handle<String> name,
- bool is_this,
- bool inside_with,
- int position)
- : name_(name),
- var_(NULL),
- is_this_(is_this),
- inside_with_(inside_with),
- is_trivial_(false),
- position_(position) {
- // Names must be canonicalized for fast equality checks.
- ASSERT(name->IsSymbol());
-}
-
-
-VariableProxy::VariableProxy(bool is_this)
- : var_(NULL),
- is_this_(is_this),
- inside_with_(false),
- is_trivial_(false) {
-}
-
-
-void VariableProxy::BindTo(Variable* var) {
- ASSERT(var_ == NULL); // must be bound only once
- ASSERT(var != NULL); // must bind
- ASSERT((is_this() && var->is_this()) || name_.is_identical_to(var->name()));
- // Ideally CONST-ness should match. However, this is very hard to achieve
- // because we don't know the exact semantics of conflicting (const and
- // non-const) multiple variable declarations, const vars introduced via
- // eval() etc. Const-ness and variable declarations are a complete mess
- // in JS. Sigh...
- var_ = var;
- var->set_is_used(true);
-}
-
-
-Assignment::Assignment(Token::Value op,
- Expression* target,
- Expression* value,
- int pos)
- : op_(op),
- target_(target),
- value_(value),
- pos_(pos),
- binary_operation_(NULL),
- compound_load_id_(kNoNumber),
- assignment_id_(GetNextId()),
- block_start_(false),
- block_end_(false),
- is_monomorphic_(false),
- receiver_types_(NULL) {
- ASSERT(Token::IsAssignmentOp(op));
- if (is_compound()) {
- binary_operation_ =
- new BinaryOperation(binary_op(), target, value, pos + 1);
- compound_load_id_ = GetNextId();
- }
-}
-
-
-Token::Value Assignment::binary_op() const {
- switch (op_) {
- case Token::ASSIGN_BIT_OR: return Token::BIT_OR;
- case Token::ASSIGN_BIT_XOR: return Token::BIT_XOR;
- case Token::ASSIGN_BIT_AND: return Token::BIT_AND;
- case Token::ASSIGN_SHL: return Token::SHL;
- case Token::ASSIGN_SAR: return Token::SAR;
- case Token::ASSIGN_SHR: return Token::SHR;
- case Token::ASSIGN_ADD: return Token::ADD;
- case Token::ASSIGN_SUB: return Token::SUB;
- case Token::ASSIGN_MUL: return Token::MUL;
- case Token::ASSIGN_DIV: return Token::DIV;
- case Token::ASSIGN_MOD: return Token::MOD;
- default: UNREACHABLE();
- }
- return Token::ILLEGAL;
-}
-
-
-bool FunctionLiteral::AllowsLazyCompilation() {
- return scope()->AllowsLazyCompilation();
-}
-
-
-ObjectLiteral::Property::Property(Literal* key, Expression* value) {
- emit_store_ = true;
- key_ = key;
- value_ = value;
- Object* k = *key->handle();
- if (k->IsSymbol() && HEAP->Proto_symbol()->Equals(String::cast(k))) {
- kind_ = PROTOTYPE;
- } else if (value_->AsMaterializedLiteral() != NULL) {
- kind_ = MATERIALIZED_LITERAL;
- } else if (value_->AsLiteral() != NULL) {
- kind_ = CONSTANT;
- } else {
- kind_ = COMPUTED;
- }
-}
-
-
-ObjectLiteral::Property::Property(bool is_getter, FunctionLiteral* value) {
- emit_store_ = true;
- key_ = new Literal(value->name());
- value_ = value;
- kind_ = is_getter ? GETTER : SETTER;
-}
-
-
-bool ObjectLiteral::Property::IsCompileTimeValue() {
- return kind_ == CONSTANT ||
- (kind_ == MATERIALIZED_LITERAL &&
- CompileTimeValue::IsCompileTimeValue(value_));
-}
-
-
-void ObjectLiteral::Property::set_emit_store(bool emit_store) {
- emit_store_ = emit_store;
-}
-
-
-bool ObjectLiteral::Property::emit_store() {
- return emit_store_;
-}
-
-
-bool IsEqualString(void* first, void* second) {
- ASSERT((*reinterpret_cast<String**>(first))->IsString());
- ASSERT((*reinterpret_cast<String**>(second))->IsString());
- Handle<String> h1(reinterpret_cast<String**>(first));
- Handle<String> h2(reinterpret_cast<String**>(second));
- return (*h1)->Equals(*h2);
-}
-
-
-bool IsEqualNumber(void* first, void* second) {
- ASSERT((*reinterpret_cast<Object**>(first))->IsNumber());
- ASSERT((*reinterpret_cast<Object**>(second))->IsNumber());
-
- Handle<Object> h1(reinterpret_cast<Object**>(first));
- Handle<Object> h2(reinterpret_cast<Object**>(second));
- if (h1->IsSmi()) {
- return h2->IsSmi() && *h1 == *h2;
- }
- if (h2->IsSmi()) return false;
- Handle<HeapNumber> n1 = Handle<HeapNumber>::cast(h1);
- Handle<HeapNumber> n2 = Handle<HeapNumber>::cast(h2);
- ASSERT(isfinite(n1->value()));
- ASSERT(isfinite(n2->value()));
- return n1->value() == n2->value();
-}
-
-
-void ObjectLiteral::CalculateEmitStore() {
- HashMap properties(&IsEqualString);
- HashMap elements(&IsEqualNumber);
- for (int i = this->properties()->length() - 1; i >= 0; i--) {
- ObjectLiteral::Property* property = this->properties()->at(i);
- Literal* literal = property->key();
- Handle<Object> handle = literal->handle();
-
- if (handle->IsNull()) {
- continue;
- }
-
- uint32_t hash;
- HashMap* table;
- void* key;
- Factory* factory = Isolate::Current()->factory();
- if (handle->IsSymbol()) {
- Handle<String> name(String::cast(*handle));
- if (name->AsArrayIndex(&hash)) {
- Handle<Object> key_handle = factory->NewNumberFromUint(hash);
- key = key_handle.location();
- table = &elements;
- } else {
- key = name.location();
- hash = name->Hash();
- table = &properties;
- }
- } else if (handle->ToArrayIndex(&hash)) {
- key = handle.location();
- table = &elements;
- } else {
- ASSERT(handle->IsNumber());
- double num = handle->Number();
- char arr[100];
- Vector<char> buffer(arr, ARRAY_SIZE(arr));
- const char* str = DoubleToCString(num, buffer);
- Handle<String> name = factory->NewStringFromAscii(CStrVector(str));
- key = name.location();
- hash = name->Hash();
- table = &properties;
- }
- // If the key of a computed property is in the table, do not emit
- // a store for the property later.
- if (property->kind() == ObjectLiteral::Property::COMPUTED) {
- if (table->Lookup(key, hash, false) != NULL) {
- property->set_emit_store(false);
- }
- }
- // Add key to the table.
- table->Lookup(key, hash, true);
- }
-}
-
-
-void TargetCollector::AddTarget(BreakTarget* target) {
- // Add the label to the collector, but discard duplicates.
- int length = targets_->length();
- for (int i = 0; i < length; i++) {
- if (targets_->at(i) == target) return;
- }
- targets_->Add(target);
-}
-
-
-bool Expression::GuaranteedSmiResult() {
- BinaryOperation* node = AsBinaryOperation();
- if (node == NULL) return false;
- Token::Value op = node->op();
- switch (op) {
- case Token::COMMA:
- case Token::OR:
- case Token::AND:
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- case Token::MOD:
- case Token::BIT_XOR:
- case Token::SHL:
- return false;
- break;
- case Token::BIT_OR:
- case Token::BIT_AND: {
- Literal* left = node->left()->AsLiteral();
- Literal* right = node->right()->AsLiteral();
- if (left != NULL && left->handle()->IsSmi()) {
- int value = Smi::cast(*left->handle())->value();
- if (op == Token::BIT_OR && ((value & 0xc0000000) == 0xc0000000)) {
- // Result of bitwise or is always a negative Smi.
- return true;
- }
- if (op == Token::BIT_AND && ((value & 0xc0000000) == 0)) {
- // Result of bitwise and is always a positive Smi.
- return true;
- }
- }
- if (right != NULL && right->handle()->IsSmi()) {
- int value = Smi::cast(*right->handle())->value();
- if (op == Token::BIT_OR && ((value & 0xc0000000) == 0xc0000000)) {
- // Result of bitwise or is always a negative Smi.
- return true;
- }
- if (op == Token::BIT_AND && ((value & 0xc0000000) == 0)) {
- // Result of bitwise and is always a positive Smi.
- return true;
- }
- }
- return false;
- break;
- }
- case Token::SAR:
- case Token::SHR: {
- Literal* right = node->right()->AsLiteral();
- if (right != NULL && right->handle()->IsSmi()) {
- int value = Smi::cast(*right->handle())->value();
- if ((value & 0x1F) > 1 ||
- (op == Token::SAR && (value & 0x1F) == 1)) {
- return true;
- }
- }
- return false;
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
- return false;
-}
-
-
-void Expression::CopyAnalysisResultsFrom(Expression* other) {
- bitfields_ = other->bitfields_;
- type_ = other->type_;
-}
-
-
-bool UnaryOperation::ResultOverwriteAllowed() {
- switch (op_) {
- case Token::BIT_NOT:
- case Token::SUB:
- return true;
- default:
- return false;
- }
-}
-
-
-bool BinaryOperation::ResultOverwriteAllowed() {
- switch (op_) {
- case Token::COMMA:
- case Token::OR:
- case Token::AND:
- return false;
- case Token::BIT_OR:
- case Token::BIT_XOR:
- case Token::BIT_AND:
- case Token::SHL:
- case Token::SAR:
- case Token::SHR:
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- case Token::MOD:
- return true;
- default:
- UNREACHABLE();
- }
- return false;
-}
-
-
-BinaryOperation::BinaryOperation(Assignment* assignment) {
- ASSERT(assignment->is_compound());
- op_ = assignment->binary_op();
- left_ = assignment->target();
- right_ = assignment->value();
- pos_ = assignment->position();
- CopyAnalysisResultsFrom(assignment);
-}
-
-
-// ----------------------------------------------------------------------------
-// Inlining support
-
-bool Block::IsInlineable() const {
- const int count = statements_.length();
- for (int i = 0; i < count; ++i) {
- if (!statements_[i]->IsInlineable()) return false;
- }
- return true;
-}
-
-
-bool ExpressionStatement::IsInlineable() const {
- return expression()->IsInlineable();
-}
-
-
-bool IfStatement::IsInlineable() const {
- return condition()->IsInlineable() && then_statement()->IsInlineable() &&
- else_statement()->IsInlineable();
-}
-
-
-bool ReturnStatement::IsInlineable() const {
- return expression()->IsInlineable();
-}
-
-
-bool Conditional::IsInlineable() const {
- return condition()->IsInlineable() && then_expression()->IsInlineable() &&
- else_expression()->IsInlineable();
-}
-
-
-bool VariableProxy::IsInlineable() const {
- return var()->is_global() || var()->IsStackAllocated();
-}
-
-
-bool Assignment::IsInlineable() const {
- return target()->IsInlineable() && value()->IsInlineable();
-}
-
-
-bool Property::IsInlineable() const {
- return obj()->IsInlineable() && key()->IsInlineable();
-}
-
-
-bool Call::IsInlineable() const {
- if (!expression()->IsInlineable()) return false;
- const int count = arguments()->length();
- for (int i = 0; i < count; ++i) {
- if (!arguments()->at(i)->IsInlineable()) return false;
- }
- return true;
-}
-
-
-bool CallNew::IsInlineable() const {
- if (!expression()->IsInlineable()) return false;
- const int count = arguments()->length();
- for (int i = 0; i < count; ++i) {
- if (!arguments()->at(i)->IsInlineable()) return false;
- }
- return true;
-}
-
-
-bool CallRuntime::IsInlineable() const {
- const int count = arguments()->length();
- for (int i = 0; i < count; ++i) {
- if (!arguments()->at(i)->IsInlineable()) return false;
- }
- return true;
-}
-
-
-bool UnaryOperation::IsInlineable() const {
- return expression()->IsInlineable();
-}
-
-
-bool BinaryOperation::IsInlineable() const {
- return left()->IsInlineable() && right()->IsInlineable();
-}
-
-
-bool CompareOperation::IsInlineable() const {
- return left()->IsInlineable() && right()->IsInlineable();
-}
-
-
-bool CompareToNull::IsInlineable() const {
- return expression()->IsInlineable();
-}
-
-
-bool CountOperation::IsInlineable() const {
- return expression()->IsInlineable();
-}
-
-
-// ----------------------------------------------------------------------------
-// Recording of type feedback
-
-void Property::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
- // Record type feedback from the oracle in the AST.
- is_monomorphic_ = oracle->LoadIsMonomorphic(this);
- if (key()->IsPropertyName()) {
- if (oracle->LoadIsBuiltin(this, Builtins::kLoadIC_ArrayLength)) {
- is_array_length_ = true;
- } else if (oracle->LoadIsBuiltin(this, Builtins::kLoadIC_StringLength)) {
- is_string_length_ = true;
- } else if (oracle->LoadIsBuiltin(this,
- Builtins::kLoadIC_FunctionPrototype)) {
- is_function_prototype_ = true;
- } else {
- Literal* lit_key = key()->AsLiteral();
- ASSERT(lit_key != NULL && lit_key->handle()->IsString());
- Handle<String> name = Handle<String>::cast(lit_key->handle());
- ZoneMapList* types = oracle->LoadReceiverTypes(this, name);
- receiver_types_ = types;
- }
- } else if (oracle->LoadIsBuiltin(this, Builtins::kKeyedLoadIC_String)) {
- is_string_access_ = true;
- } else if (is_monomorphic_) {
- monomorphic_receiver_type_ = oracle->LoadMonomorphicReceiverType(this);
- if (monomorphic_receiver_type_->has_external_array_elements()) {
- SetExternalArrayType(oracle->GetKeyedLoadExternalArrayType(this));
- }
- }
-}
-
-
-void Assignment::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
- Property* prop = target()->AsProperty();
- ASSERT(prop != NULL);
- is_monomorphic_ = oracle->StoreIsMonomorphic(this);
- if (prop->key()->IsPropertyName()) {
- Literal* lit_key = prop->key()->AsLiteral();
- ASSERT(lit_key != NULL && lit_key->handle()->IsString());
- Handle<String> name = Handle<String>::cast(lit_key->handle());
- ZoneMapList* types = oracle->StoreReceiverTypes(this, name);
- receiver_types_ = types;
- } else if (is_monomorphic_) {
- // Record receiver type for monomorphic keyed loads.
- monomorphic_receiver_type_ = oracle->StoreMonomorphicReceiverType(this);
- if (monomorphic_receiver_type_->has_external_array_elements()) {
- SetExternalArrayType(oracle->GetKeyedStoreExternalArrayType(this));
- }
- }
-}
-
-
-void CaseClause::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
- TypeInfo info = oracle->SwitchType(this);
- if (info.IsSmi()) {
- compare_type_ = SMI_ONLY;
- } else if (info.IsNonPrimitive()) {
- compare_type_ = OBJECT_ONLY;
- } else {
- ASSERT(compare_type_ == NONE);
- }
-}
-
-
-static bool CanCallWithoutIC(Handle<JSFunction> target, int arity) {
- SharedFunctionInfo* info = target->shared();
- // If the number of formal parameters of the target function does
- // not match the number of arguments we're passing, we don't want to
- // deal with it. Otherwise, we can call it directly.
- return !target->NeedsArgumentsAdaption() ||
- info->formal_parameter_count() == arity;
-}
-
-
-bool Call::ComputeTarget(Handle<Map> type, Handle<String> name) {
- if (check_type_ == RECEIVER_MAP_CHECK) {
- // For primitive checks the holder is set up to point to the
- // corresponding prototype object, i.e. one step of the algorithm
- // below has been already performed.
- // For non-primitive checks we clear it to allow computing targets
- // for polymorphic calls.
- holder_ = Handle<JSObject>::null();
- }
- while (true) {
- LookupResult lookup;
- type->LookupInDescriptors(NULL, *name, &lookup);
- // If the function wasn't found directly in the map, we start
- // looking upwards through the prototype chain.
- if (!lookup.IsFound() && type->prototype()->IsJSObject()) {
- holder_ = Handle<JSObject>(JSObject::cast(type->prototype()));
- type = Handle<Map>(holder()->map());
- } else if (lookup.IsProperty() && lookup.type() == CONSTANT_FUNCTION) {
- target_ = Handle<JSFunction>(lookup.GetConstantFunctionFromMap(*type));
- return CanCallWithoutIC(target_, arguments()->length());
- } else {
- return false;
- }
- }
-}
-
-
-bool Call::ComputeGlobalTarget(Handle<GlobalObject> global,
- LookupResult* lookup) {
- target_ = Handle<JSFunction>::null();
- cell_ = Handle<JSGlobalPropertyCell>::null();
- ASSERT(lookup->IsProperty() &&
- lookup->type() == NORMAL &&
- lookup->holder() == *global);
- cell_ = Handle<JSGlobalPropertyCell>(global->GetPropertyCell(lookup));
- if (cell_->value()->IsJSFunction()) {
- Handle<JSFunction> candidate(JSFunction::cast(cell_->value()));
- // If the function is in new space we assume it's more likely to
- // change and thus prefer the general IC code.
- if (!HEAP->InNewSpace(*candidate) &&
- CanCallWithoutIC(candidate, arguments()->length())) {
- target_ = candidate;
- return true;
- }
- }
- return false;
-}
-
-
-void Call::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
- Property* property = expression()->AsProperty();
- ASSERT(property != NULL);
- // Specialize for the receiver types seen at runtime.
- Literal* key = property->key()->AsLiteral();
- ASSERT(key != NULL && key->handle()->IsString());
- Handle<String> name = Handle<String>::cast(key->handle());
- receiver_types_ = oracle->CallReceiverTypes(this, name);
-#ifdef DEBUG
- if (FLAG_enable_slow_asserts) {
- if (receiver_types_ != NULL) {
- int length = receiver_types_->length();
- for (int i = 0; i < length; i++) {
- Handle<Map> map = receiver_types_->at(i);
- ASSERT(!map.is_null() && *map != NULL);
- }
- }
- }
-#endif
- is_monomorphic_ = oracle->CallIsMonomorphic(this);
- check_type_ = oracle->GetCallCheckType(this);
- if (is_monomorphic_) {
- Handle<Map> map;
- if (receiver_types_ != NULL && receiver_types_->length() > 0) {
- ASSERT(check_type_ == RECEIVER_MAP_CHECK);
- map = receiver_types_->at(0);
- } else {
- ASSERT(check_type_ != RECEIVER_MAP_CHECK);
- holder_ = Handle<JSObject>(
- oracle->GetPrototypeForPrimitiveCheck(check_type_));
- map = Handle<Map>(holder_->map());
- }
- is_monomorphic_ = ComputeTarget(map, name);
- }
-}
-
-
-void CompareOperation::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
- TypeInfo info = oracle->CompareType(this);
- if (info.IsSmi()) {
- compare_type_ = SMI_ONLY;
- } else if (info.IsNonPrimitive()) {
- compare_type_ = OBJECT_ONLY;
- } else {
- ASSERT(compare_type_ == NONE);
- }
-}
-
-
-// ----------------------------------------------------------------------------
-// Implementation of AstVisitor
-
-bool AstVisitor::CheckStackOverflow() {
- if (stack_overflow_) return true;
- StackLimitCheck check(isolate_);
- if (!check.HasOverflowed()) return false;
- return (stack_overflow_ = true);
-}
-
-
-void AstVisitor::VisitDeclarations(ZoneList<Declaration*>* declarations) {
- for (int i = 0; i < declarations->length(); i++) {
- Visit(declarations->at(i));
- }
-}
-
-
-void AstVisitor::VisitStatements(ZoneList<Statement*>* statements) {
- for (int i = 0; i < statements->length(); i++) {
- Visit(statements->at(i));
- }
-}
-
-
-void AstVisitor::VisitExpressions(ZoneList<Expression*>* expressions) {
- for (int i = 0; i < expressions->length(); i++) {
- // The variable statement visiting code may pass NULL expressions
- // to this code. Maybe this should be handled by introducing an
- // undefined expression or literal? Revisit this code if this
- // changes
- Expression* expression = expressions->at(i);
- if (expression != NULL) Visit(expression);
- }
-}
-
-
-// ----------------------------------------------------------------------------
-// Regular expressions
-
-#define MAKE_ACCEPT(Name) \
- void* RegExp##Name::Accept(RegExpVisitor* visitor, void* data) { \
- return visitor->Visit##Name(this, data); \
- }
-FOR_EACH_REG_EXP_TREE_TYPE(MAKE_ACCEPT)
-#undef MAKE_ACCEPT
-
-#define MAKE_TYPE_CASE(Name) \
- RegExp##Name* RegExpTree::As##Name() { \
- return NULL; \
- } \
- bool RegExpTree::Is##Name() { return false; }
-FOR_EACH_REG_EXP_TREE_TYPE(MAKE_TYPE_CASE)
-#undef MAKE_TYPE_CASE
-
-#define MAKE_TYPE_CASE(Name) \
- RegExp##Name* RegExp##Name::As##Name() { \
- return this; \
- } \
- bool RegExp##Name::Is##Name() { return true; }
-FOR_EACH_REG_EXP_TREE_TYPE(MAKE_TYPE_CASE)
-#undef MAKE_TYPE_CASE
-
-RegExpEmpty RegExpEmpty::kInstance;
-
-
-static Interval ListCaptureRegisters(ZoneList<RegExpTree*>* children) {
- Interval result = Interval::Empty();
- for (int i = 0; i < children->length(); i++)
- result = result.Union(children->at(i)->CaptureRegisters());
- return result;
-}
-
-
-Interval RegExpAlternative::CaptureRegisters() {
- return ListCaptureRegisters(nodes());
-}
-
-
-Interval RegExpDisjunction::CaptureRegisters() {
- return ListCaptureRegisters(alternatives());
-}
-
-
-Interval RegExpLookahead::CaptureRegisters() {
- return body()->CaptureRegisters();
-}
-
-
-Interval RegExpCapture::CaptureRegisters() {
- Interval self(StartRegister(index()), EndRegister(index()));
- return self.Union(body()->CaptureRegisters());
-}
-
-
-Interval RegExpQuantifier::CaptureRegisters() {
- return body()->CaptureRegisters();
-}
-
-
-bool RegExpAssertion::IsAnchoredAtStart() {
- return type() == RegExpAssertion::START_OF_INPUT;
-}
-
-
-bool RegExpAssertion::IsAnchoredAtEnd() {
- return type() == RegExpAssertion::END_OF_INPUT;
-}
-
-
-bool RegExpAlternative::IsAnchoredAtStart() {
- ZoneList<RegExpTree*>* nodes = this->nodes();
- for (int i = 0; i < nodes->length(); i++) {
- RegExpTree* node = nodes->at(i);
- if (node->IsAnchoredAtStart()) { return true; }
- if (node->max_match() > 0) { return false; }
- }
- return false;
-}
-
-
-bool RegExpAlternative::IsAnchoredAtEnd() {
- ZoneList<RegExpTree*>* nodes = this->nodes();
- for (int i = nodes->length() - 1; i >= 0; i--) {
- RegExpTree* node = nodes->at(i);
- if (node->IsAnchoredAtEnd()) { return true; }
- if (node->max_match() > 0) { return false; }
- }
- return false;
-}
-
-
-bool RegExpDisjunction::IsAnchoredAtStart() {
- ZoneList<RegExpTree*>* alternatives = this->alternatives();
- for (int i = 0; i < alternatives->length(); i++) {
- if (!alternatives->at(i)->IsAnchoredAtStart())
- return false;
- }
- return true;
-}
-
-
-bool RegExpDisjunction::IsAnchoredAtEnd() {
- ZoneList<RegExpTree*>* alternatives = this->alternatives();
- for (int i = 0; i < alternatives->length(); i++) {
- if (!alternatives->at(i)->IsAnchoredAtEnd())
- return false;
- }
- return true;
-}
-
-
-bool RegExpLookahead::IsAnchoredAtStart() {
- return is_positive() && body()->IsAnchoredAtStart();
-}
-
-
-bool RegExpCapture::IsAnchoredAtStart() {
- return body()->IsAnchoredAtStart();
-}
-
-
-bool RegExpCapture::IsAnchoredAtEnd() {
- return body()->IsAnchoredAtEnd();
-}
-
-
-// Convert regular expression trees to a simple sexp representation.
-// This representation should be different from the input grammar
-// in as many cases as possible, to make it more difficult for incorrect
-// parses to look as correct ones which is likely if the input and
-// output formats are alike.
-class RegExpUnparser: public RegExpVisitor {
- public:
- RegExpUnparser();
- void VisitCharacterRange(CharacterRange that);
- SmartPointer<const char> ToString() { return stream_.ToCString(); }
-#define MAKE_CASE(Name) virtual void* Visit##Name(RegExp##Name*, void* data);
- FOR_EACH_REG_EXP_TREE_TYPE(MAKE_CASE)
-#undef MAKE_CASE
- private:
- StringStream* stream() { return &stream_; }
- HeapStringAllocator alloc_;
- StringStream stream_;
-};
-
-
-RegExpUnparser::RegExpUnparser() : stream_(&alloc_) {
-}
-
-
-void* RegExpUnparser::VisitDisjunction(RegExpDisjunction* that, void* data) {
- stream()->Add("(|");
- for (int i = 0; i < that->alternatives()->length(); i++) {
- stream()->Add(" ");
- that->alternatives()->at(i)->Accept(this, data);
- }
- stream()->Add(")");
- return NULL;
-}
-
-
-void* RegExpUnparser::VisitAlternative(RegExpAlternative* that, void* data) {
- stream()->Add("(:");
- for (int i = 0; i < that->nodes()->length(); i++) {
- stream()->Add(" ");
- that->nodes()->at(i)->Accept(this, data);
- }
- stream()->Add(")");
- return NULL;
-}
-
-
-void RegExpUnparser::VisitCharacterRange(CharacterRange that) {
- stream()->Add("%k", that.from());
- if (!that.IsSingleton()) {
- stream()->Add("-%k", that.to());
- }
-}
-
-
-
-void* RegExpUnparser::VisitCharacterClass(RegExpCharacterClass* that,
- void* data) {
- if (that->is_negated())
- stream()->Add("^");
- stream()->Add("[");
- for (int i = 0; i < that->ranges()->length(); i++) {
- if (i > 0) stream()->Add(" ");
- VisitCharacterRange(that->ranges()->at(i));
- }
- stream()->Add("]");
- return NULL;
-}
-
-
-void* RegExpUnparser::VisitAssertion(RegExpAssertion* that, void* data) {
- switch (that->type()) {
- case RegExpAssertion::START_OF_INPUT:
- stream()->Add("@^i");
- break;
- case RegExpAssertion::END_OF_INPUT:
- stream()->Add("@$i");
- break;
- case RegExpAssertion::START_OF_LINE:
- stream()->Add("@^l");
- break;
- case RegExpAssertion::END_OF_LINE:
- stream()->Add("@$l");
- break;
- case RegExpAssertion::BOUNDARY:
- stream()->Add("@b");
- break;
- case RegExpAssertion::NON_BOUNDARY:
- stream()->Add("@B");
- break;
- }
- return NULL;
-}
-
-
-void* RegExpUnparser::VisitAtom(RegExpAtom* that, void* data) {
- stream()->Add("'");
- Vector<const uc16> chardata = that->data();
- for (int i = 0; i < chardata.length(); i++) {
- stream()->Add("%k", chardata[i]);
- }
- stream()->Add("'");
- return NULL;
-}
-
-
-void* RegExpUnparser::VisitText(RegExpText* that, void* data) {
- if (that->elements()->length() == 1) {
- that->elements()->at(0).data.u_atom->Accept(this, data);
- } else {
- stream()->Add("(!");
- for (int i = 0; i < that->elements()->length(); i++) {
- stream()->Add(" ");
- that->elements()->at(i).data.u_atom->Accept(this, data);
- }
- stream()->Add(")");
- }
- return NULL;
-}
-
-
-void* RegExpUnparser::VisitQuantifier(RegExpQuantifier* that, void* data) {
- stream()->Add("(# %i ", that->min());
- if (that->max() == RegExpTree::kInfinity) {
- stream()->Add("- ");
- } else {
- stream()->Add("%i ", that->max());
- }
- stream()->Add(that->is_greedy() ? "g " : that->is_possessive() ? "p " : "n ");
- that->body()->Accept(this, data);
- stream()->Add(")");
- return NULL;
-}
-
-
-void* RegExpUnparser::VisitCapture(RegExpCapture* that, void* data) {
- stream()->Add("(^ ");
- that->body()->Accept(this, data);
- stream()->Add(")");
- return NULL;
-}
-
-
-void* RegExpUnparser::VisitLookahead(RegExpLookahead* that, void* data) {
- stream()->Add("(-> ");
- stream()->Add(that->is_positive() ? "+ " : "- ");
- that->body()->Accept(this, data);
- stream()->Add(")");
- return NULL;
-}
-
-
-void* RegExpUnparser::VisitBackReference(RegExpBackReference* that,
- void* data) {
- stream()->Add("(<- %i)", that->index());
- return NULL;
-}
-
-
-void* RegExpUnparser::VisitEmpty(RegExpEmpty* that, void* data) {
- stream()->Put('%');
- return NULL;
-}
-
-
-SmartPointer<const char> RegExpTree::ToString() {
- RegExpUnparser unparser;
- Accept(&unparser, NULL);
- return unparser.ToString();
-}
-
-
-RegExpDisjunction::RegExpDisjunction(ZoneList<RegExpTree*>* alternatives)
- : alternatives_(alternatives) {
- ASSERT(alternatives->length() > 1);
- RegExpTree* first_alternative = alternatives->at(0);
- min_match_ = first_alternative->min_match();
- max_match_ = first_alternative->max_match();
- for (int i = 1; i < alternatives->length(); i++) {
- RegExpTree* alternative = alternatives->at(i);
- min_match_ = Min(min_match_, alternative->min_match());
- max_match_ = Max(max_match_, alternative->max_match());
- }
-}
-
-
-RegExpAlternative::RegExpAlternative(ZoneList<RegExpTree*>* nodes)
- : nodes_(nodes) {
- ASSERT(nodes->length() > 1);
- min_match_ = 0;
- max_match_ = 0;
- for (int i = 0; i < nodes->length(); i++) {
- RegExpTree* node = nodes->at(i);
- min_match_ += node->min_match();
- int node_max_match = node->max_match();
- if (kInfinity - max_match_ < node_max_match) {
- max_match_ = kInfinity;
- } else {
- max_match_ += node->max_match();
- }
- }
-}
-
-
-CaseClause::CaseClause(Expression* label,
- ZoneList<Statement*>* statements,
- int pos)
- : label_(label),
- statements_(statements),
- position_(pos),
- compare_type_(NONE),
- entry_id_(AstNode::GetNextId()) {
-}
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/ast.h b/src/3rdparty/v8/src/ast.h
deleted file mode 100644
index d8bc18e..0000000
--- a/src/3rdparty/v8/src/ast.h
+++ /dev/null
@@ -1,2234 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_AST_H_
-#define V8_AST_H_
-
-#include "execution.h"
-#include "factory.h"
-#include "jsregexp.h"
-#include "jump-target.h"
-#include "runtime.h"
-#include "token.h"
-#include "variables.h"
-
-namespace v8 {
-namespace internal {
-
-// The abstract syntax tree is an intermediate, light-weight
-// representation of the parsed JavaScript code suitable for
-// compilation to native code.
-
-// Nodes are allocated in a separate zone, which allows faster
-// allocation and constant-time deallocation of the entire syntax
-// tree.
-
-
-// ----------------------------------------------------------------------------
-// Nodes of the abstract syntax tree. Only concrete classes are
-// enumerated here.
-
-#define STATEMENT_NODE_LIST(V) \
- V(Block) \
- V(ExpressionStatement) \
- V(EmptyStatement) \
- V(IfStatement) \
- V(ContinueStatement) \
- V(BreakStatement) \
- V(ReturnStatement) \
- V(WithEnterStatement) \
- V(WithExitStatement) \
- V(SwitchStatement) \
- V(DoWhileStatement) \
- V(WhileStatement) \
- V(ForStatement) \
- V(ForInStatement) \
- V(TryCatchStatement) \
- V(TryFinallyStatement) \
- V(DebuggerStatement)
-
-#define EXPRESSION_NODE_LIST(V) \
- V(FunctionLiteral) \
- V(SharedFunctionInfoLiteral) \
- V(Conditional) \
- V(VariableProxy) \
- V(Literal) \
- V(RegExpLiteral) \
- V(ObjectLiteral) \
- V(ArrayLiteral) \
- V(CatchExtensionObject) \
- V(Assignment) \
- V(Throw) \
- V(Property) \
- V(Call) \
- V(CallNew) \
- V(CallRuntime) \
- V(UnaryOperation) \
- V(IncrementOperation) \
- V(CountOperation) \
- V(BinaryOperation) \
- V(CompareOperation) \
- V(CompareToNull) \
- V(ThisFunction)
-
-#define AST_NODE_LIST(V) \
- V(Declaration) \
- STATEMENT_NODE_LIST(V) \
- EXPRESSION_NODE_LIST(V)
-
-// Forward declarations
-class BitVector;
-class DefinitionInfo;
-class MaterializedLiteral;
-class TargetCollector;
-class TypeFeedbackOracle;
-
-#define DEF_FORWARD_DECLARATION(type) class type;
-AST_NODE_LIST(DEF_FORWARD_DECLARATION)
-#undef DEF_FORWARD_DECLARATION
-
-
-// Typedef only introduced to avoid unreadable code.
-// Please do appreciate the required space in "> >".
-typedef ZoneList<Handle<String> > ZoneStringList;
-typedef ZoneList<Handle<Object> > ZoneObjectList;
-
-
-#define DECLARE_NODE_TYPE(type) \
- virtual void Accept(AstVisitor* v); \
- virtual AstNode::Type node_type() const { return AstNode::k##type; } \
- virtual type* As##type() { return this; }
-
-
-class AstNode: public ZoneObject {
- public:
-#define DECLARE_TYPE_ENUM(type) k##type,
- enum Type {
- AST_NODE_LIST(DECLARE_TYPE_ENUM)
- kInvalid = -1
- };
-#undef DECLARE_TYPE_ENUM
-
- static const int kNoNumber = -1;
-
- AstNode() : id_(GetNextId()) {
- Isolate* isolate = Isolate::Current();
- isolate->set_ast_node_count(isolate->ast_node_count() + 1);
- }
-
- virtual ~AstNode() { }
-
- virtual void Accept(AstVisitor* v) = 0;
- virtual Type node_type() const { return kInvalid; }
-
- // Type testing & conversion functions overridden by concrete subclasses.
-#define DECLARE_NODE_FUNCTIONS(type) \
- virtual type* As##type() { return NULL; }
- AST_NODE_LIST(DECLARE_NODE_FUNCTIONS)
-#undef DECLARE_NODE_FUNCTIONS
-
- virtual Statement* AsStatement() { return NULL; }
- virtual Expression* AsExpression() { return NULL; }
- virtual TargetCollector* AsTargetCollector() { return NULL; }
- virtual BreakableStatement* AsBreakableStatement() { return NULL; }
- virtual IterationStatement* AsIterationStatement() { return NULL; }
- virtual MaterializedLiteral* AsMaterializedLiteral() { return NULL; }
- virtual Slot* AsSlot() { return NULL; }
-
- // True if the node is simple enough for us to inline calls containing it.
- virtual bool IsInlineable() const { return false; }
-
- static int Count() { return Isolate::Current()->ast_node_count(); }
- static void ResetIds() { Isolate::Current()->set_ast_node_id(0); }
- unsigned id() const { return id_; }
-
- protected:
- static unsigned GetNextId() {
- Isolate* isolate = Isolate::Current();
- unsigned tmp = isolate->ast_node_id();
- isolate->set_ast_node_id(tmp + 1);
- return tmp;
- }
- static unsigned ReserveIdRange(int n) {
- Isolate* isolate = Isolate::Current();
- unsigned tmp = isolate->ast_node_id();
- isolate->set_ast_node_id(tmp + n);
- return tmp;
- }
-
- private:
- unsigned id_;
-
- friend class CaseClause; // Generates AST IDs.
-};
-
-
-class Statement: public AstNode {
- public:
- Statement() : statement_pos_(RelocInfo::kNoPosition) {}
-
- virtual Statement* AsStatement() { return this; }
-
- virtual Assignment* StatementAsSimpleAssignment() { return NULL; }
- virtual CountOperation* StatementAsCountOperation() { return NULL; }
-
- bool IsEmpty() { return AsEmptyStatement() != NULL; }
-
- void set_statement_pos(int statement_pos) { statement_pos_ = statement_pos; }
- int statement_pos() const { return statement_pos_; }
-
- private:
- int statement_pos_;
-};
-
-
-class Expression: public AstNode {
- public:
- enum Context {
- // Not assigned a context yet, or else will not be visited during
- // code generation.
- kUninitialized,
- // Evaluated for its side effects.
- kEffect,
- // Evaluated for its value (and side effects).
- kValue,
- // Evaluated for control flow (and side effects).
- kTest
- };
-
- Expression() : bitfields_(0) {}
-
- virtual Expression* AsExpression() { return this; }
-
- virtual bool IsTrivial() { return false; }
- virtual bool IsValidLeftHandSide() { return false; }
-
- // Helpers for ToBoolean conversion.
- virtual bool ToBooleanIsTrue() { return false; }
- virtual bool ToBooleanIsFalse() { return false; }
-
- // Symbols that cannot be parsed as array indices are considered property
- // names. We do not treat symbols that can be array indexes as property
- // names because [] for string objects is handled only by keyed ICs.
- virtual bool IsPropertyName() { return false; }
-
- // Mark the expression as being compiled as an expression
- // statement. This is used to transform postfix increments to
- // (faster) prefix increments.
- virtual void MarkAsStatement() { /* do nothing */ }
-
- // True iff the result can be safely overwritten (to avoid allocation).
- // False for operations that can return one of their operands.
- virtual bool ResultOverwriteAllowed() { return false; }
-
- // True iff the expression is a literal represented as a smi.
- virtual bool IsSmiLiteral() { return false; }
-
- // Type feedback information for assignments and properties.
- virtual bool IsMonomorphic() {
- UNREACHABLE();
- return false;
- }
- virtual bool IsArrayLength() {
- UNREACHABLE();
- return false;
- }
- virtual ZoneMapList* GetReceiverTypes() {
- UNREACHABLE();
- return NULL;
- }
- virtual Handle<Map> GetMonomorphicReceiverType() {
- UNREACHABLE();
- return Handle<Map>();
- }
-
- // Static type information for this expression.
- StaticType* type() { return &type_; }
-
- // True if the expression is a loop condition.
- bool is_loop_condition() const {
- return LoopConditionField::decode(bitfields_);
- }
- void set_is_loop_condition(bool flag) {
- bitfields_ = (bitfields_ & ~LoopConditionField::mask()) |
- LoopConditionField::encode(flag);
- }
-
- // The value of the expression is guaranteed to be a smi, because the
- // top operation is a bit operation with a mask, or a shift.
- bool GuaranteedSmiResult();
-
- // AST analysis results.
- void CopyAnalysisResultsFrom(Expression* other);
-
- // True if the expression rooted at this node can be compiled by the
- // side-effect free compiler.
- bool side_effect_free() { return SideEffectFreeField::decode(bitfields_); }
- void set_side_effect_free(bool is_side_effect_free) {
- bitfields_ &= ~SideEffectFreeField::mask();
- bitfields_ |= SideEffectFreeField::encode(is_side_effect_free);
- }
-
- // Will the use of this expression treat -0 the same as 0 in all cases?
- // If so, we can return 0 instead of -0 if we want to, to optimize code.
- bool no_negative_zero() { return NoNegativeZeroField::decode(bitfields_); }
- void set_no_negative_zero(bool no_negative_zero) {
- bitfields_ &= ~NoNegativeZeroField::mask();
- bitfields_ |= NoNegativeZeroField::encode(no_negative_zero);
- }
-
- // Will ToInt32 (ECMA 262-3 9.5) or ToUint32 (ECMA 262-3 9.6)
- // be applied to the value of this expression?
- // If so, we may be able to optimize the calculation of the value.
- bool to_int32() { return ToInt32Field::decode(bitfields_); }
- void set_to_int32(bool to_int32) {
- bitfields_ &= ~ToInt32Field::mask();
- bitfields_ |= ToInt32Field::encode(to_int32);
- }
-
- // How many bitwise logical or shift operators are used in this expression?
- int num_bit_ops() { return NumBitOpsField::decode(bitfields_); }
- void set_num_bit_ops(int num_bit_ops) {
- bitfields_ &= ~NumBitOpsField::mask();
- num_bit_ops = Min(num_bit_ops, kMaxNumBitOps);
- bitfields_ |= NumBitOpsField::encode(num_bit_ops);
- }
-
- private:
- static const int kMaxNumBitOps = (1 << 5) - 1;
-
- uint32_t bitfields_;
- StaticType type_;
-
- // Using template BitField<type, start, size>.
- class SideEffectFreeField : public BitField<bool, 0, 1> {};
- class NoNegativeZeroField : public BitField<bool, 1, 1> {};
- class ToInt32Field : public BitField<bool, 2, 1> {};
- class NumBitOpsField : public BitField<int, 3, 5> {};
- class LoopConditionField: public BitField<bool, 8, 1> {};
-};
-
-
-/**
- * A sentinel used during pre parsing that represents some expression
- * that is a valid left hand side without having to actually build
- * the expression.
- */
-class ValidLeftHandSideSentinel: public Expression {
- public:
- virtual bool IsValidLeftHandSide() { return true; }
- virtual void Accept(AstVisitor* v) { UNREACHABLE(); }
-};
-
-
-class BreakableStatement: public Statement {
- public:
- enum Type {
- TARGET_FOR_ANONYMOUS,
- TARGET_FOR_NAMED_ONLY
- };
-
- // The labels associated with this statement. May be NULL;
- // if it is != NULL, guaranteed to contain at least one entry.
- ZoneStringList* labels() const { return labels_; }
-
- // Type testing & conversion.
- virtual BreakableStatement* AsBreakableStatement() { return this; }
-
- // Code generation
- BreakTarget* break_target() { return &break_target_; }
-
- // Testers.
- bool is_target_for_anonymous() const { return type_ == TARGET_FOR_ANONYMOUS; }
-
- // Bailout support.
- int EntryId() const { return entry_id_; }
- int ExitId() const { return exit_id_; }
-
- protected:
- inline BreakableStatement(ZoneStringList* labels, Type type);
-
- private:
- ZoneStringList* labels_;
- Type type_;
- BreakTarget break_target_;
- int entry_id_;
- int exit_id_;
-};
-
-
-class Block: public BreakableStatement {
- public:
- inline Block(ZoneStringList* labels, int capacity, bool is_initializer_block);
-
- DECLARE_NODE_TYPE(Block)
-
- virtual Assignment* StatementAsSimpleAssignment() {
- if (statements_.length() != 1) return NULL;
- return statements_[0]->StatementAsSimpleAssignment();
- }
-
- virtual CountOperation* StatementAsCountOperation() {
- if (statements_.length() != 1) return NULL;
- return statements_[0]->StatementAsCountOperation();
- }
-
- virtual bool IsInlineable() const;
-
- void AddStatement(Statement* statement) { statements_.Add(statement); }
-
- ZoneList<Statement*>* statements() { return &statements_; }
- bool is_initializer_block() const { return is_initializer_block_; }
-
- private:
- ZoneList<Statement*> statements_;
- bool is_initializer_block_;
-};
-
-
-class Declaration: public AstNode {
- public:
- Declaration(VariableProxy* proxy, Variable::Mode mode, FunctionLiteral* fun)
- : proxy_(proxy),
- mode_(mode),
- fun_(fun) {
- ASSERT(mode == Variable::VAR || mode == Variable::CONST);
- // At the moment there are no "const functions"'s in JavaScript...
- ASSERT(fun == NULL || mode == Variable::VAR);
- }
-
- DECLARE_NODE_TYPE(Declaration)
-
- VariableProxy* proxy() const { return proxy_; }
- Variable::Mode mode() const { return mode_; }
- FunctionLiteral* fun() const { return fun_; } // may be NULL
-
- private:
- VariableProxy* proxy_;
- Variable::Mode mode_;
- FunctionLiteral* fun_;
-};
-
-
-class IterationStatement: public BreakableStatement {
- public:
- // Type testing & conversion.
- virtual IterationStatement* AsIterationStatement() { return this; }
-
- Statement* body() const { return body_; }
-
- // Bailout support.
- int OsrEntryId() const { return osr_entry_id_; }
- virtual int ContinueId() const = 0;
-
- // Code generation
- BreakTarget* continue_target() { return &continue_target_; }
-
- protected:
- explicit inline IterationStatement(ZoneStringList* labels);
-
- void Initialize(Statement* body) {
- body_ = body;
- }
-
- private:
- Statement* body_;
- BreakTarget continue_target_;
- int osr_entry_id_;
-};
-
-
-class DoWhileStatement: public IterationStatement {
- public:
- explicit inline DoWhileStatement(ZoneStringList* labels);
-
- DECLARE_NODE_TYPE(DoWhileStatement)
-
- void Initialize(Expression* cond, Statement* body) {
- IterationStatement::Initialize(body);
- cond_ = cond;
- }
-
- Expression* cond() const { return cond_; }
-
- // Position where condition expression starts. We need it to make
- // the loop's condition a breakable location.
- int condition_position() { return condition_position_; }
- void set_condition_position(int pos) { condition_position_ = pos; }
-
- // Bailout support.
- virtual int ContinueId() const { return continue_id_; }
- int BackEdgeId() const { return back_edge_id_; }
-
- private:
- Expression* cond_;
- int condition_position_;
- int continue_id_;
- int back_edge_id_;
-};
-
-
-class WhileStatement: public IterationStatement {
- public:
- explicit inline WhileStatement(ZoneStringList* labels);
-
- DECLARE_NODE_TYPE(WhileStatement)
-
- void Initialize(Expression* cond, Statement* body) {
- IterationStatement::Initialize(body);
- cond_ = cond;
- }
-
- Expression* cond() const { return cond_; }
- bool may_have_function_literal() const {
- return may_have_function_literal_;
- }
- void set_may_have_function_literal(bool value) {
- may_have_function_literal_ = value;
- }
-
- // Bailout support.
- virtual int ContinueId() const { return EntryId(); }
- int BodyId() const { return body_id_; }
-
- private:
- Expression* cond_;
- // True if there is a function literal subexpression in the condition.
- bool may_have_function_literal_;
- int body_id_;
-};
-
-
-class ForStatement: public IterationStatement {
- public:
- explicit inline ForStatement(ZoneStringList* labels);
-
- DECLARE_NODE_TYPE(ForStatement)
-
- void Initialize(Statement* init,
- Expression* cond,
- Statement* next,
- Statement* body) {
- IterationStatement::Initialize(body);
- init_ = init;
- cond_ = cond;
- next_ = next;
- }
-
- Statement* init() const { return init_; }
- Expression* cond() const { return cond_; }
- Statement* next() const { return next_; }
-
- bool may_have_function_literal() const {
- return may_have_function_literal_;
- }
- void set_may_have_function_literal(bool value) {
- may_have_function_literal_ = value;
- }
-
- // Bailout support.
- virtual int ContinueId() const { return continue_id_; }
- int BodyId() const { return body_id_; }
-
- bool is_fast_smi_loop() { return loop_variable_ != NULL; }
- Variable* loop_variable() { return loop_variable_; }
- void set_loop_variable(Variable* var) { loop_variable_ = var; }
-
- private:
- Statement* init_;
- Expression* cond_;
- Statement* next_;
- // True if there is a function literal subexpression in the condition.
- bool may_have_function_literal_;
- Variable* loop_variable_;
- int continue_id_;
- int body_id_;
-};
-
-
-class ForInStatement: public IterationStatement {
- public:
- explicit inline ForInStatement(ZoneStringList* labels);
-
- DECLARE_NODE_TYPE(ForInStatement)
-
- void Initialize(Expression* each, Expression* enumerable, Statement* body) {
- IterationStatement::Initialize(body);
- each_ = each;
- enumerable_ = enumerable;
- }
-
- Expression* each() const { return each_; }
- Expression* enumerable() const { return enumerable_; }
-
- // Bailout support.
- int AssignmentId() const { return assignment_id_; }
- virtual int ContinueId() const { return EntryId(); }
-
- private:
- Expression* each_;
- Expression* enumerable_;
- int assignment_id_;
-};
-
-
-class ExpressionStatement: public Statement {
- public:
- explicit ExpressionStatement(Expression* expression)
- : expression_(expression) { }
-
- DECLARE_NODE_TYPE(ExpressionStatement)
-
- virtual bool IsInlineable() const;
-
- virtual Assignment* StatementAsSimpleAssignment();
- virtual CountOperation* StatementAsCountOperation();
-
- void set_expression(Expression* e) { expression_ = e; }
- Expression* expression() const { return expression_; }
-
- private:
- Expression* expression_;
-};
-
-
-class ContinueStatement: public Statement {
- public:
- explicit ContinueStatement(IterationStatement* target)
- : target_(target) { }
-
- DECLARE_NODE_TYPE(ContinueStatement)
-
- IterationStatement* target() const { return target_; }
-
- private:
- IterationStatement* target_;
-};
-
-
-class BreakStatement: public Statement {
- public:
- explicit BreakStatement(BreakableStatement* target)
- : target_(target) { }
-
- DECLARE_NODE_TYPE(BreakStatement)
-
- BreakableStatement* target() const { return target_; }
-
- private:
- BreakableStatement* target_;
-};
-
-
-class ReturnStatement: public Statement {
- public:
- explicit ReturnStatement(Expression* expression)
- : expression_(expression) { }
-
- DECLARE_NODE_TYPE(ReturnStatement)
-
- Expression* expression() const { return expression_; }
- virtual bool IsInlineable() const;
-
- private:
- Expression* expression_;
-};
-
-
-class WithEnterStatement: public Statement {
- public:
- explicit WithEnterStatement(Expression* expression, bool is_catch_block)
- : expression_(expression), is_catch_block_(is_catch_block) { }
-
- DECLARE_NODE_TYPE(WithEnterStatement)
-
- Expression* expression() const { return expression_; }
-
- bool is_catch_block() const { return is_catch_block_; }
-
- private:
- Expression* expression_;
- bool is_catch_block_;
-};
-
-
-class WithExitStatement: public Statement {
- public:
- WithExitStatement() { }
-
- DECLARE_NODE_TYPE(WithExitStatement)
-};
-
-
-class CaseClause: public ZoneObject {
- public:
- CaseClause(Expression* label, ZoneList<Statement*>* statements, int pos);
-
- bool is_default() const { return label_ == NULL; }
- Expression* label() const {
- CHECK(!is_default());
- return label_;
- }
- JumpTarget* body_target() { return &body_target_; }
- ZoneList<Statement*>* statements() const { return statements_; }
-
- int position() { return position_; }
- void set_position(int pos) { position_ = pos; }
-
- int EntryId() { return entry_id_; }
-
- // Type feedback information.
- void RecordTypeFeedback(TypeFeedbackOracle* oracle);
- bool IsSmiCompare() { return compare_type_ == SMI_ONLY; }
- bool IsObjectCompare() { return compare_type_ == OBJECT_ONLY; }
-
- private:
- Expression* label_;
- JumpTarget body_target_;
- ZoneList<Statement*>* statements_;
- int position_;
- enum CompareTypeFeedback { NONE, SMI_ONLY, OBJECT_ONLY };
- CompareTypeFeedback compare_type_;
- int entry_id_;
-};
-
-
-class SwitchStatement: public BreakableStatement {
- public:
- explicit inline SwitchStatement(ZoneStringList* labels);
-
- DECLARE_NODE_TYPE(SwitchStatement)
-
- void Initialize(Expression* tag, ZoneList<CaseClause*>* cases) {
- tag_ = tag;
- cases_ = cases;
- }
-
- Expression* tag() const { return tag_; }
- ZoneList<CaseClause*>* cases() const { return cases_; }
-
- private:
- Expression* tag_;
- ZoneList<CaseClause*>* cases_;
-};
-
-
-// If-statements always have non-null references to their then- and
-// else-parts. When parsing if-statements with no explicit else-part,
-// the parser implicitly creates an empty statement. Use the
-// HasThenStatement() and HasElseStatement() functions to check if a
-// given if-statement has a then- or an else-part containing code.
-class IfStatement: public Statement {
- public:
- IfStatement(Expression* condition,
- Statement* then_statement,
- Statement* else_statement)
- : condition_(condition),
- then_statement_(then_statement),
- else_statement_(else_statement),
- then_id_(GetNextId()),
- else_id_(GetNextId()) {
- }
-
- DECLARE_NODE_TYPE(IfStatement)
-
- virtual bool IsInlineable() const;
-
- bool HasThenStatement() const { return !then_statement()->IsEmpty(); }
- bool HasElseStatement() const { return !else_statement()->IsEmpty(); }
-
- Expression* condition() const { return condition_; }
- Statement* then_statement() const { return then_statement_; }
- Statement* else_statement() const { return else_statement_; }
-
- int ThenId() const { return then_id_; }
- int ElseId() const { return else_id_; }
-
- private:
- Expression* condition_;
- Statement* then_statement_;
- Statement* else_statement_;
- int then_id_;
- int else_id_;
-};
-
-
-// NOTE: TargetCollectors are represented as nodes to fit in the target
-// stack in the compiler; this should probably be reworked.
-class TargetCollector: public AstNode {
- public:
- explicit TargetCollector(ZoneList<BreakTarget*>* targets)
- : targets_(targets) {
- }
-
- // Adds a jump target to the collector. The collector stores a pointer not
- // a copy of the target to make binding work, so make sure not to pass in
- // references to something on the stack.
- void AddTarget(BreakTarget* target);
-
- // Virtual behaviour. TargetCollectors are never part of the AST.
- virtual void Accept(AstVisitor* v) { UNREACHABLE(); }
- virtual TargetCollector* AsTargetCollector() { return this; }
-
- ZoneList<BreakTarget*>* targets() { return targets_; }
-
- private:
- ZoneList<BreakTarget*>* targets_;
-};
-
-
-class TryStatement: public Statement {
- public:
- explicit TryStatement(Block* try_block)
- : try_block_(try_block), escaping_targets_(NULL) { }
-
- void set_escaping_targets(ZoneList<BreakTarget*>* targets) {
- escaping_targets_ = targets;
- }
-
- Block* try_block() const { return try_block_; }
- ZoneList<BreakTarget*>* escaping_targets() const { return escaping_targets_; }
-
- private:
- Block* try_block_;
- ZoneList<BreakTarget*>* escaping_targets_;
-};
-
-
-class TryCatchStatement: public TryStatement {
- public:
- TryCatchStatement(Block* try_block,
- VariableProxy* catch_var,
- Block* catch_block)
- : TryStatement(try_block),
- catch_var_(catch_var),
- catch_block_(catch_block) {
- }
-
- DECLARE_NODE_TYPE(TryCatchStatement)
-
- VariableProxy* catch_var() const { return catch_var_; }
- Block* catch_block() const { return catch_block_; }
-
- private:
- VariableProxy* catch_var_;
- Block* catch_block_;
-};
-
-
-class TryFinallyStatement: public TryStatement {
- public:
- TryFinallyStatement(Block* try_block, Block* finally_block)
- : TryStatement(try_block),
- finally_block_(finally_block) { }
-
- DECLARE_NODE_TYPE(TryFinallyStatement)
-
- Block* finally_block() const { return finally_block_; }
-
- private:
- Block* finally_block_;
-};
-
-
-class DebuggerStatement: public Statement {
- public:
- DECLARE_NODE_TYPE(DebuggerStatement)
-};
-
-
-class EmptyStatement: public Statement {
- public:
- DECLARE_NODE_TYPE(EmptyStatement)
-
- virtual bool IsInlineable() const { return true; }
-};
-
-
-class Literal: public Expression {
- public:
- explicit Literal(Handle<Object> handle) : handle_(handle) { }
-
- DECLARE_NODE_TYPE(Literal)
-
- virtual bool IsTrivial() { return true; }
- virtual bool IsInlineable() const { return true; }
- virtual bool IsSmiLiteral() { return handle_->IsSmi(); }
-
- // Check if this literal is identical to the other literal.
- bool IsIdenticalTo(const Literal* other) const {
- return handle_.is_identical_to(other->handle_);
- }
-
- virtual bool IsPropertyName() {
- if (handle_->IsSymbol()) {
- uint32_t ignored;
- return !String::cast(*handle_)->AsArrayIndex(&ignored);
- }
- return false;
- }
-
- Handle<String> AsPropertyName() {
- ASSERT(IsPropertyName());
- return Handle<String>::cast(handle_);
- }
-
- virtual bool ToBooleanIsTrue() { return handle_->ToBoolean()->IsTrue(); }
- virtual bool ToBooleanIsFalse() { return handle_->ToBoolean()->IsFalse(); }
-
- // Identity testers.
- bool IsNull() const {
- ASSERT(!handle_.is_null());
- return handle_->IsNull();
- }
- bool IsTrue() const {
- ASSERT(!handle_.is_null());
- return handle_->IsTrue();
- }
- bool IsFalse() const {
- ASSERT(!handle_.is_null());
- return handle_->IsFalse();
- }
-
- Handle<Object> handle() const { return handle_; }
-
- private:
- Handle<Object> handle_;
-};
-
-
-// Base class for literals that needs space in the corresponding JSFunction.
-class MaterializedLiteral: public Expression {
- public:
- explicit MaterializedLiteral(int literal_index, bool is_simple, int depth)
- : literal_index_(literal_index), is_simple_(is_simple), depth_(depth) {}
-
- virtual MaterializedLiteral* AsMaterializedLiteral() { return this; }
-
- int literal_index() { return literal_index_; }
-
- // A materialized literal is simple if the values consist of only
- // constants and simple object and array literals.
- bool is_simple() const { return is_simple_; }
-
- int depth() const { return depth_; }
-
- private:
- int literal_index_;
- bool is_simple_;
- int depth_;
-};
-
-
-// An object literal has a boilerplate object that is used
-// for minimizing the work when constructing it at runtime.
-class ObjectLiteral: public MaterializedLiteral {
- public:
- // Property is used for passing information
- // about an object literal's properties from the parser
- // to the code generator.
- class Property: public ZoneObject {
- public:
- enum Kind {
- CONSTANT, // Property with constant value (compile time).
- COMPUTED, // Property with computed value (execution time).
- MATERIALIZED_LITERAL, // Property value is a materialized literal.
- GETTER, SETTER, // Property is an accessor function.
- PROTOTYPE // Property is __proto__.
- };
-
- Property(Literal* key, Expression* value);
- Property(bool is_getter, FunctionLiteral* value);
-
- Literal* key() { return key_; }
- Expression* value() { return value_; }
- Kind kind() { return kind_; }
-
- bool IsCompileTimeValue();
-
- void set_emit_store(bool emit_store);
- bool emit_store();
-
- private:
- Literal* key_;
- Expression* value_;
- Kind kind_;
- bool emit_store_;
- };
-
- ObjectLiteral(Handle<FixedArray> constant_properties,
- ZoneList<Property*>* properties,
- int literal_index,
- bool is_simple,
- bool fast_elements,
- int depth,
- bool has_function)
- : MaterializedLiteral(literal_index, is_simple, depth),
- constant_properties_(constant_properties),
- properties_(properties),
- fast_elements_(fast_elements),
- has_function_(has_function) {}
-
- DECLARE_NODE_TYPE(ObjectLiteral)
-
- Handle<FixedArray> constant_properties() const {
- return constant_properties_;
- }
- ZoneList<Property*>* properties() const { return properties_; }
-
- bool fast_elements() const { return fast_elements_; }
-
- bool has_function() { return has_function_; }
-
- // Mark all computed expressions that are bound to a key that
- // is shadowed by a later occurrence of the same key. For the
- // marked expressions, no store code is emitted.
- void CalculateEmitStore();
-
- enum Flags {
- kNoFlags = 0,
- kFastElements = 1,
- kHasFunction = 1 << 1
- };
-
- private:
- Handle<FixedArray> constant_properties_;
- ZoneList<Property*>* properties_;
- bool fast_elements_;
- bool has_function_;
-};
-
-
-// Node for capturing a regexp literal.
-class RegExpLiteral: public MaterializedLiteral {
- public:
- RegExpLiteral(Handle<String> pattern,
- Handle<String> flags,
- int literal_index)
- : MaterializedLiteral(literal_index, false, 1),
- pattern_(pattern),
- flags_(flags) {}
-
- DECLARE_NODE_TYPE(RegExpLiteral)
-
- Handle<String> pattern() const { return pattern_; }
- Handle<String> flags() const { return flags_; }
-
- private:
- Handle<String> pattern_;
- Handle<String> flags_;
-};
-
-// An array literal has a literals object that is used
-// for minimizing the work when constructing it at runtime.
-class ArrayLiteral: public MaterializedLiteral {
- public:
- ArrayLiteral(Handle<FixedArray> constant_elements,
- ZoneList<Expression*>* values,
- int literal_index,
- bool is_simple,
- int depth)
- : MaterializedLiteral(literal_index, is_simple, depth),
- constant_elements_(constant_elements),
- values_(values),
- first_element_id_(ReserveIdRange(values->length())) {}
-
- DECLARE_NODE_TYPE(ArrayLiteral)
-
- Handle<FixedArray> constant_elements() const { return constant_elements_; }
- ZoneList<Expression*>* values() const { return values_; }
-
- // Return an AST id for an element that is used in simulate instructions.
- int GetIdForElement(int i) { return first_element_id_ + i; }
-
- private:
- Handle<FixedArray> constant_elements_;
- ZoneList<Expression*>* values_;
- int first_element_id_;
-};
-
-
-// Node for constructing a context extension object for a catch block.
-// The catch context extension object has one property, the catch
-// variable, which should be DontDelete.
-class CatchExtensionObject: public Expression {
- public:
- CatchExtensionObject(Literal* key, VariableProxy* value)
- : key_(key), value_(value) {
- }
-
- DECLARE_NODE_TYPE(CatchExtensionObject)
-
- Literal* key() const { return key_; }
- VariableProxy* value() const { return value_; }
-
- private:
- Literal* key_;
- VariableProxy* value_;
-};
-
-
-class VariableProxy: public Expression {
- public:
- explicit VariableProxy(Variable* var);
-
- DECLARE_NODE_TYPE(VariableProxy)
-
- // Type testing & conversion
- virtual Property* AsProperty() {
- return var_ == NULL ? NULL : var_->AsProperty();
- }
-
- Variable* AsVariable() {
- if (this == NULL || var_ == NULL) return NULL;
- Expression* rewrite = var_->rewrite();
- if (rewrite == NULL || rewrite->AsSlot() != NULL) return var_;
- return NULL;
- }
-
- virtual bool IsValidLeftHandSide() {
- return var_ == NULL ? true : var_->IsValidLeftHandSide();
- }
-
- virtual bool IsTrivial() {
- // Reading from a mutable variable is a side effect, but the
- // variable for 'this' is immutable.
- return is_this_ || is_trivial_;
- }
-
- virtual bool IsInlineable() const;
-
- bool IsVariable(Handle<String> n) {
- return !is_this() && name().is_identical_to(n);
- }
-
- bool IsArguments() {
- Variable* variable = AsVariable();
- return (variable == NULL) ? false : variable->is_arguments();
- }
-
- Handle<String> name() const { return name_; }
- Variable* var() const { return var_; }
- bool is_this() const { return is_this_; }
- bool inside_with() const { return inside_with_; }
- int position() const { return position_; }
-
- void MarkAsTrivial() { is_trivial_ = true; }
-
- // Bind this proxy to the variable var.
- void BindTo(Variable* var);
-
- protected:
- Handle<String> name_;
- Variable* var_; // resolved variable, or NULL
- bool is_this_;
- bool inside_with_;
- bool is_trivial_;
- int position_;
-
- VariableProxy(Handle<String> name,
- bool is_this,
- bool inside_with,
- int position = RelocInfo::kNoPosition);
- explicit VariableProxy(bool is_this);
-
- friend class Scope;
-};
-
-
-class VariableProxySentinel: public VariableProxy {
- public:
- virtual bool IsValidLeftHandSide() { return !is_this(); }
-
- private:
- explicit VariableProxySentinel(bool is_this) : VariableProxy(is_this) { }
-
- friend class AstSentinels;
-};
-
-
-class Slot: public Expression {
- public:
- enum Type {
- // A slot in the parameter section on the stack. index() is
- // the parameter index, counting left-to-right, starting at 0.
- PARAMETER,
-
- // A slot in the local section on the stack. index() is
- // the variable index in the stack frame, starting at 0.
- LOCAL,
-
- // An indexed slot in a heap context. index() is the
- // variable index in the context object on the heap,
- // starting at 0. var()->scope() is the corresponding
- // scope.
- CONTEXT,
-
- // A named slot in a heap context. var()->name() is the
- // variable name in the context object on the heap,
- // with lookup starting at the current context. index()
- // is invalid.
- LOOKUP
- };
-
- Slot(Variable* var, Type type, int index)
- : var_(var), type_(type), index_(index) {
- ASSERT(var != NULL);
- }
-
- virtual void Accept(AstVisitor* v);
-
- virtual Slot* AsSlot() { return this; }
-
- bool IsStackAllocated() { return type_ == PARAMETER || type_ == LOCAL; }
-
- // Accessors
- Variable* var() const { return var_; }
- Type type() const { return type_; }
- int index() const { return index_; }
- bool is_arguments() const { return var_->is_arguments(); }
-
- private:
- Variable* var_;
- Type type_;
- int index_;
-};
-
-
-class Property: public Expression {
- public:
- // Synthetic properties are property lookups introduced by the system,
- // to objects that aren't visible to the user. Function calls to synthetic
- // properties should use the global object as receiver, not the base object
- // of the resolved Reference.
- enum Type { NORMAL, SYNTHETIC };
- Property(Expression* obj, Expression* key, int pos, Type type = NORMAL)
- : obj_(obj),
- key_(key),
- pos_(pos),
- type_(type),
- receiver_types_(NULL),
- is_monomorphic_(false),
- is_array_length_(false),
- is_string_length_(false),
- is_string_access_(false),
- is_function_prototype_(false),
- is_arguments_access_(false) { }
-
- DECLARE_NODE_TYPE(Property)
-
- virtual bool IsValidLeftHandSide() { return true; }
- virtual bool IsInlineable() const;
-
- Expression* obj() const { return obj_; }
- Expression* key() const { return key_; }
- int position() const { return pos_; }
- bool is_synthetic() const { return type_ == SYNTHETIC; }
-
- bool IsStringLength() const { return is_string_length_; }
- bool IsStringAccess() const { return is_string_access_; }
- bool IsFunctionPrototype() const { return is_function_prototype_; }
-
- // Marks that this is actually an argument rewritten to a keyed property
- // accessing the argument through the arguments shadow object.
- void set_is_arguments_access(bool is_arguments_access) {
- is_arguments_access_ = is_arguments_access;
- }
- bool is_arguments_access() const { return is_arguments_access_; }
-
- ExternalArrayType GetExternalArrayType() const { return array_type_; }
- void SetExternalArrayType(ExternalArrayType array_type) {
- array_type_ = array_type;
- }
-
- // Type feedback information.
- void RecordTypeFeedback(TypeFeedbackOracle* oracle);
- virtual bool IsMonomorphic() { return is_monomorphic_; }
- virtual ZoneMapList* GetReceiverTypes() { return receiver_types_; }
- virtual bool IsArrayLength() { return is_array_length_; }
- virtual Handle<Map> GetMonomorphicReceiverType() {
- return monomorphic_receiver_type_;
- }
-
- private:
- Expression* obj_;
- Expression* key_;
- int pos_;
- Type type_;
-
- ZoneMapList* receiver_types_;
- bool is_monomorphic_ : 1;
- bool is_array_length_ : 1;
- bool is_string_length_ : 1;
- bool is_string_access_ : 1;
- bool is_function_prototype_ : 1;
- bool is_arguments_access_ : 1;
- Handle<Map> monomorphic_receiver_type_;
- ExternalArrayType array_type_;
-};
-
-
-class Call: public Expression {
- public:
- Call(Expression* expression, ZoneList<Expression*>* arguments, int pos)
- : expression_(expression),
- arguments_(arguments),
- pos_(pos),
- is_monomorphic_(false),
- check_type_(RECEIVER_MAP_CHECK),
- receiver_types_(NULL),
- return_id_(GetNextId()) {
- }
-
- DECLARE_NODE_TYPE(Call)
-
- virtual bool IsInlineable() const;
-
- Expression* expression() const { return expression_; }
- ZoneList<Expression*>* arguments() const { return arguments_; }
- int position() { return pos_; }
-
- void RecordTypeFeedback(TypeFeedbackOracle* oracle);
- virtual ZoneMapList* GetReceiverTypes() { return receiver_types_; }
- virtual bool IsMonomorphic() { return is_monomorphic_; }
- CheckType check_type() const { return check_type_; }
- Handle<JSFunction> target() { return target_; }
- Handle<JSObject> holder() { return holder_; }
- Handle<JSGlobalPropertyCell> cell() { return cell_; }
-
- bool ComputeTarget(Handle<Map> type, Handle<String> name);
- bool ComputeGlobalTarget(Handle<GlobalObject> global, LookupResult* lookup);
-
- // Bailout support.
- int ReturnId() const { return return_id_; }
-
-#ifdef DEBUG
- // Used to assert that the FullCodeGenerator records the return site.
- bool return_is_recorded_;
-#endif
-
- private:
- Expression* expression_;
- ZoneList<Expression*>* arguments_;
- int pos_;
-
- bool is_monomorphic_;
- CheckType check_type_;
- ZoneMapList* receiver_types_;
- Handle<JSFunction> target_;
- Handle<JSObject> holder_;
- Handle<JSGlobalPropertyCell> cell_;
-
- int return_id_;
-};
-
-
-class AstSentinels {
- public:
- ~AstSentinels() { }
-
- // Returns a property singleton property access on 'this'. Used
- // during preparsing.
- Property* this_property() { return &this_property_; }
- VariableProxySentinel* this_proxy() { return &this_proxy_; }
- VariableProxySentinel* identifier_proxy() { return &identifier_proxy_; }
- ValidLeftHandSideSentinel* valid_left_hand_side_sentinel() {
- return &valid_left_hand_side_sentinel_;
- }
- Call* call_sentinel() { return &call_sentinel_; }
- EmptyStatement* empty_statement() { return &empty_statement_; }
-
- private:
- AstSentinels();
- VariableProxySentinel this_proxy_;
- VariableProxySentinel identifier_proxy_;
- ValidLeftHandSideSentinel valid_left_hand_side_sentinel_;
- Property this_property_;
- Call call_sentinel_;
- EmptyStatement empty_statement_;
-
- friend class Isolate;
-
- DISALLOW_COPY_AND_ASSIGN(AstSentinels);
-};
-
-
-class CallNew: public Expression {
- public:
- CallNew(Expression* expression, ZoneList<Expression*>* arguments, int pos)
- : expression_(expression), arguments_(arguments), pos_(pos) { }
-
- DECLARE_NODE_TYPE(CallNew)
-
- virtual bool IsInlineable() const;
-
- Expression* expression() const { return expression_; }
- ZoneList<Expression*>* arguments() const { return arguments_; }
- int position() { return pos_; }
-
- private:
- Expression* expression_;
- ZoneList<Expression*>* arguments_;
- int pos_;
-};
-
-
-// The CallRuntime class does not represent any official JavaScript
-// language construct. Instead it is used to call a C or JS function
-// with a set of arguments. This is used from the builtins that are
-// implemented in JavaScript (see "v8natives.js").
-class CallRuntime: public Expression {
- public:
- CallRuntime(Handle<String> name,
- const Runtime::Function* function,
- ZoneList<Expression*>* arguments)
- : name_(name), function_(function), arguments_(arguments) { }
-
- DECLARE_NODE_TYPE(CallRuntime)
-
- virtual bool IsInlineable() const;
-
- Handle<String> name() const { return name_; }
- const Runtime::Function* function() const { return function_; }
- ZoneList<Expression*>* arguments() const { return arguments_; }
- bool is_jsruntime() const { return function_ == NULL; }
-
- private:
- Handle<String> name_;
- const Runtime::Function* function_;
- ZoneList<Expression*>* arguments_;
-};
-
-
-class UnaryOperation: public Expression {
- public:
- UnaryOperation(Token::Value op, Expression* expression)
- : op_(op), expression_(expression) {
- ASSERT(Token::IsUnaryOp(op));
- }
-
- DECLARE_NODE_TYPE(UnaryOperation)
-
- virtual bool IsInlineable() const;
-
- virtual bool ResultOverwriteAllowed();
-
- Token::Value op() const { return op_; }
- Expression* expression() const { return expression_; }
-
- private:
- Token::Value op_;
- Expression* expression_;
-};
-
-
-class BinaryOperation: public Expression {
- public:
- BinaryOperation(Token::Value op,
- Expression* left,
- Expression* right,
- int pos)
- : op_(op), left_(left), right_(right), pos_(pos) {
- ASSERT(Token::IsBinaryOp(op));
- right_id_ = (op == Token::AND || op == Token::OR)
- ? static_cast<int>(GetNextId())
- : AstNode::kNoNumber;
- }
-
- // Create the binary operation corresponding to a compound assignment.
- explicit BinaryOperation(Assignment* assignment);
-
- DECLARE_NODE_TYPE(BinaryOperation)
-
- virtual bool IsInlineable() const;
-
- virtual bool ResultOverwriteAllowed();
-
- Token::Value op() const { return op_; }
- Expression* left() const { return left_; }
- Expression* right() const { return right_; }
- int position() const { return pos_; }
-
- // Bailout support.
- int RightId() const { return right_id_; }
-
- private:
- Token::Value op_;
- Expression* left_;
- Expression* right_;
- int pos_;
- // The short-circuit logical operations have an AST ID for their
- // right-hand subexpression.
- int right_id_;
-};
-
-
-class IncrementOperation: public Expression {
- public:
- IncrementOperation(Token::Value op, Expression* expr)
- : op_(op), expression_(expr) {
- ASSERT(Token::IsCountOp(op));
- }
-
- DECLARE_NODE_TYPE(IncrementOperation)
-
- Token::Value op() const { return op_; }
- bool is_increment() { return op_ == Token::INC; }
- Expression* expression() const { return expression_; }
-
- private:
- Token::Value op_;
- Expression* expression_;
- int pos_;
-};
-
-
-class CountOperation: public Expression {
- public:
- CountOperation(bool is_prefix, IncrementOperation* increment, int pos)
- : is_prefix_(is_prefix), increment_(increment), pos_(pos),
- assignment_id_(GetNextId()) {
- }
-
- DECLARE_NODE_TYPE(CountOperation)
-
- bool is_prefix() const { return is_prefix_; }
- bool is_postfix() const { return !is_prefix_; }
-
- Token::Value op() const { return increment_->op(); }
- Token::Value binary_op() {
- return (op() == Token::INC) ? Token::ADD : Token::SUB;
- }
-
- Expression* expression() const { return increment_->expression(); }
- IncrementOperation* increment() const { return increment_; }
- int position() const { return pos_; }
-
- virtual void MarkAsStatement() { is_prefix_ = true; }
-
- virtual bool IsInlineable() const;
-
- // Bailout support.
- int AssignmentId() const { return assignment_id_; }
-
- private:
- bool is_prefix_;
- IncrementOperation* increment_;
- int pos_;
- int assignment_id_;
-};
-
-
-class CompareOperation: public Expression {
- public:
- CompareOperation(Token::Value op,
- Expression* left,
- Expression* right,
- int pos)
- : op_(op), left_(left), right_(right), pos_(pos), compare_type_(NONE) {
- ASSERT(Token::IsCompareOp(op));
- }
-
- DECLARE_NODE_TYPE(CompareOperation)
-
- Token::Value op() const { return op_; }
- Expression* left() const { return left_; }
- Expression* right() const { return right_; }
- int position() const { return pos_; }
-
- virtual bool IsInlineable() const;
-
- // Type feedback information.
- void RecordTypeFeedback(TypeFeedbackOracle* oracle);
- bool IsSmiCompare() { return compare_type_ == SMI_ONLY; }
- bool IsObjectCompare() { return compare_type_ == OBJECT_ONLY; }
-
- private:
- Token::Value op_;
- Expression* left_;
- Expression* right_;
- int pos_;
-
- enum CompareTypeFeedback { NONE, SMI_ONLY, OBJECT_ONLY };
- CompareTypeFeedback compare_type_;
-};
-
-
-class CompareToNull: public Expression {
- public:
- CompareToNull(bool is_strict, Expression* expression)
- : is_strict_(is_strict), expression_(expression) { }
-
- DECLARE_NODE_TYPE(CompareToNull)
-
- virtual bool IsInlineable() const;
-
- bool is_strict() const { return is_strict_; }
- Token::Value op() const { return is_strict_ ? Token::EQ_STRICT : Token::EQ; }
- Expression* expression() const { return expression_; }
-
- private:
- bool is_strict_;
- Expression* expression_;
-};
-
-
-class Conditional: public Expression {
- public:
- Conditional(Expression* condition,
- Expression* then_expression,
- Expression* else_expression,
- int then_expression_position,
- int else_expression_position)
- : condition_(condition),
- then_expression_(then_expression),
- else_expression_(else_expression),
- then_expression_position_(then_expression_position),
- else_expression_position_(else_expression_position),
- then_id_(GetNextId()),
- else_id_(GetNextId()) {
- }
-
- DECLARE_NODE_TYPE(Conditional)
-
- virtual bool IsInlineable() const;
-
- Expression* condition() const { return condition_; }
- Expression* then_expression() const { return then_expression_; }
- Expression* else_expression() const { return else_expression_; }
-
- int then_expression_position() const { return then_expression_position_; }
- int else_expression_position() const { return else_expression_position_; }
-
- int ThenId() const { return then_id_; }
- int ElseId() const { return else_id_; }
-
- private:
- Expression* condition_;
- Expression* then_expression_;
- Expression* else_expression_;
- int then_expression_position_;
- int else_expression_position_;
- int then_id_;
- int else_id_;
-};
-
-
-class Assignment: public Expression {
- public:
- Assignment(Token::Value op, Expression* target, Expression* value, int pos);
-
- DECLARE_NODE_TYPE(Assignment)
-
- virtual bool IsInlineable() const;
-
- Assignment* AsSimpleAssignment() { return !is_compound() ? this : NULL; }
-
- Token::Value binary_op() const;
-
- Token::Value op() const { return op_; }
- Expression* target() const { return target_; }
- Expression* value() const { return value_; }
- int position() { return pos_; }
- BinaryOperation* binary_operation() const { return binary_operation_; }
-
- // This check relies on the definition order of token in token.h.
- bool is_compound() const { return op() > Token::ASSIGN; }
-
- // An initialization block is a series of statments of the form
- // x.y.z.a = ...; x.y.z.b = ...; etc. The parser marks the beginning and
- // ending of these blocks to allow for optimizations of initialization
- // blocks.
- bool starts_initialization_block() { return block_start_; }
- bool ends_initialization_block() { return block_end_; }
- void mark_block_start() { block_start_ = true; }
- void mark_block_end() { block_end_ = true; }
-
- // Type feedback information.
- void RecordTypeFeedback(TypeFeedbackOracle* oracle);
- virtual bool IsMonomorphic() { return is_monomorphic_; }
- virtual ZoneMapList* GetReceiverTypes() { return receiver_types_; }
- virtual Handle<Map> GetMonomorphicReceiverType() {
- return monomorphic_receiver_type_;
- }
- ExternalArrayType GetExternalArrayType() const { return array_type_; }
- void SetExternalArrayType(ExternalArrayType array_type) {
- array_type_ = array_type;
- }
-
- // Bailout support.
- int CompoundLoadId() const { return compound_load_id_; }
- int AssignmentId() const { return assignment_id_; }
-
- private:
- Token::Value op_;
- Expression* target_;
- Expression* value_;
- int pos_;
- BinaryOperation* binary_operation_;
- int compound_load_id_;
- int assignment_id_;
-
- bool block_start_;
- bool block_end_;
-
- bool is_monomorphic_;
- ZoneMapList* receiver_types_;
- Handle<Map> monomorphic_receiver_type_;
- ExternalArrayType array_type_;
-};
-
-
-class Throw: public Expression {
- public:
- Throw(Expression* exception, int pos)
- : exception_(exception), pos_(pos) {}
-
- DECLARE_NODE_TYPE(Throw)
-
- Expression* exception() const { return exception_; }
- int position() const { return pos_; }
-
- private:
- Expression* exception_;
- int pos_;
-};
-
-
-class FunctionLiteral: public Expression {
- public:
- FunctionLiteral(Handle<String> name,
- Scope* scope,
- ZoneList<Statement*>* body,
- int materialized_literal_count,
- int expected_property_count,
- bool has_only_simple_this_property_assignments,
- Handle<FixedArray> this_property_assignments,
- int num_parameters,
- int start_position,
- int end_position,
- bool is_expression,
- bool contains_loops)
- : name_(name),
- scope_(scope),
- body_(body),
- materialized_literal_count_(materialized_literal_count),
- expected_property_count_(expected_property_count),
- has_only_simple_this_property_assignments_(
- has_only_simple_this_property_assignments),
- this_property_assignments_(this_property_assignments),
- num_parameters_(num_parameters),
- start_position_(start_position),
- end_position_(end_position),
- is_expression_(is_expression),
- contains_loops_(contains_loops),
- function_token_position_(RelocInfo::kNoPosition),
- inferred_name_(HEAP->empty_string()),
- pretenure_(false) { }
-
- DECLARE_NODE_TYPE(FunctionLiteral)
-
- Handle<String> name() const { return name_; }
- Scope* scope() const { return scope_; }
- ZoneList<Statement*>* body() const { return body_; }
- void set_function_token_position(int pos) { function_token_position_ = pos; }
- int function_token_position() const { return function_token_position_; }
- int start_position() const { return start_position_; }
- int end_position() const { return end_position_; }
- bool is_expression() const { return is_expression_; }
- bool contains_loops() const { return contains_loops_; }
- bool strict_mode() const;
-
- int materialized_literal_count() { return materialized_literal_count_; }
- int expected_property_count() { return expected_property_count_; }
- bool has_only_simple_this_property_assignments() {
- return has_only_simple_this_property_assignments_;
- }
- Handle<FixedArray> this_property_assignments() {
- return this_property_assignments_;
- }
- int num_parameters() { return num_parameters_; }
-
- bool AllowsLazyCompilation();
-
- Handle<String> debug_name() const {
- if (name_->length() > 0) return name_;
- return inferred_name();
- }
-
- Handle<String> inferred_name() const { return inferred_name_; }
- void set_inferred_name(Handle<String> inferred_name) {
- inferred_name_ = inferred_name;
- }
-
- bool pretenure() { return pretenure_; }
- void set_pretenure(bool value) { pretenure_ = value; }
-
- private:
- Handle<String> name_;
- Scope* scope_;
- ZoneList<Statement*>* body_;
- int materialized_literal_count_;
- int expected_property_count_;
- bool has_only_simple_this_property_assignments_;
- Handle<FixedArray> this_property_assignments_;
- int num_parameters_;
- int start_position_;
- int end_position_;
- bool is_expression_;
- bool contains_loops_;
- bool strict_mode_;
- int function_token_position_;
- Handle<String> inferred_name_;
- bool pretenure_;
-};
-
-
-class SharedFunctionInfoLiteral: public Expression {
- public:
- explicit SharedFunctionInfoLiteral(
- Handle<SharedFunctionInfo> shared_function_info)
- : shared_function_info_(shared_function_info) { }
-
- DECLARE_NODE_TYPE(SharedFunctionInfoLiteral)
-
- Handle<SharedFunctionInfo> shared_function_info() const {
- return shared_function_info_;
- }
-
- private:
- Handle<SharedFunctionInfo> shared_function_info_;
-};
-
-
-class ThisFunction: public Expression {
- public:
- DECLARE_NODE_TYPE(ThisFunction)
-};
-
-
-// ----------------------------------------------------------------------------
-// Regular expressions
-
-
-class RegExpVisitor BASE_EMBEDDED {
- public:
- virtual ~RegExpVisitor() { }
-#define MAKE_CASE(Name) \
- virtual void* Visit##Name(RegExp##Name*, void* data) = 0;
- FOR_EACH_REG_EXP_TREE_TYPE(MAKE_CASE)
-#undef MAKE_CASE
-};
-
-
-class RegExpTree: public ZoneObject {
- public:
- static const int kInfinity = kMaxInt;
- virtual ~RegExpTree() { }
- virtual void* Accept(RegExpVisitor* visitor, void* data) = 0;
- virtual RegExpNode* ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success) = 0;
- virtual bool IsTextElement() { return false; }
- virtual bool IsAnchoredAtStart() { return false; }
- virtual bool IsAnchoredAtEnd() { return false; }
- virtual int min_match() = 0;
- virtual int max_match() = 0;
- // Returns the interval of registers used for captures within this
- // expression.
- virtual Interval CaptureRegisters() { return Interval::Empty(); }
- virtual void AppendToText(RegExpText* text);
- SmartPointer<const char> ToString();
-#define MAKE_ASTYPE(Name) \
- virtual RegExp##Name* As##Name(); \
- virtual bool Is##Name();
- FOR_EACH_REG_EXP_TREE_TYPE(MAKE_ASTYPE)
-#undef MAKE_ASTYPE
-};
-
-
-class RegExpDisjunction: public RegExpTree {
- public:
- explicit RegExpDisjunction(ZoneList<RegExpTree*>* alternatives);
- virtual void* Accept(RegExpVisitor* visitor, void* data);
- virtual RegExpNode* ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success);
- virtual RegExpDisjunction* AsDisjunction();
- virtual Interval CaptureRegisters();
- virtual bool IsDisjunction();
- virtual bool IsAnchoredAtStart();
- virtual bool IsAnchoredAtEnd();
- virtual int min_match() { return min_match_; }
- virtual int max_match() { return max_match_; }
- ZoneList<RegExpTree*>* alternatives() { return alternatives_; }
- private:
- ZoneList<RegExpTree*>* alternatives_;
- int min_match_;
- int max_match_;
-};
-
-
-class RegExpAlternative: public RegExpTree {
- public:
- explicit RegExpAlternative(ZoneList<RegExpTree*>* nodes);
- virtual void* Accept(RegExpVisitor* visitor, void* data);
- virtual RegExpNode* ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success);
- virtual RegExpAlternative* AsAlternative();
- virtual Interval CaptureRegisters();
- virtual bool IsAlternative();
- virtual bool IsAnchoredAtStart();
- virtual bool IsAnchoredAtEnd();
- virtual int min_match() { return min_match_; }
- virtual int max_match() { return max_match_; }
- ZoneList<RegExpTree*>* nodes() { return nodes_; }
- private:
- ZoneList<RegExpTree*>* nodes_;
- int min_match_;
- int max_match_;
-};
-
-
-class RegExpAssertion: public RegExpTree {
- public:
- enum Type {
- START_OF_LINE,
- START_OF_INPUT,
- END_OF_LINE,
- END_OF_INPUT,
- BOUNDARY,
- NON_BOUNDARY
- };
- explicit RegExpAssertion(Type type) : type_(type) { }
- virtual void* Accept(RegExpVisitor* visitor, void* data);
- virtual RegExpNode* ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success);
- virtual RegExpAssertion* AsAssertion();
- virtual bool IsAssertion();
- virtual bool IsAnchoredAtStart();
- virtual bool IsAnchoredAtEnd();
- virtual int min_match() { return 0; }
- virtual int max_match() { return 0; }
- Type type() { return type_; }
- private:
- Type type_;
-};
-
-
-class CharacterSet BASE_EMBEDDED {
- public:
- explicit CharacterSet(uc16 standard_set_type)
- : ranges_(NULL),
- standard_set_type_(standard_set_type) {}
- explicit CharacterSet(ZoneList<CharacterRange>* ranges)
- : ranges_(ranges),
- standard_set_type_(0) {}
- ZoneList<CharacterRange>* ranges();
- uc16 standard_set_type() { return standard_set_type_; }
- void set_standard_set_type(uc16 special_set_type) {
- standard_set_type_ = special_set_type;
- }
- bool is_standard() { return standard_set_type_ != 0; }
- void Canonicalize();
- private:
- ZoneList<CharacterRange>* ranges_;
- // If non-zero, the value represents a standard set (e.g., all whitespace
- // characters) without having to expand the ranges.
- uc16 standard_set_type_;
-};
-
-
-class RegExpCharacterClass: public RegExpTree {
- public:
- RegExpCharacterClass(ZoneList<CharacterRange>* ranges, bool is_negated)
- : set_(ranges),
- is_negated_(is_negated) { }
- explicit RegExpCharacterClass(uc16 type)
- : set_(type),
- is_negated_(false) { }
- virtual void* Accept(RegExpVisitor* visitor, void* data);
- virtual RegExpNode* ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success);
- virtual RegExpCharacterClass* AsCharacterClass();
- virtual bool IsCharacterClass();
- virtual bool IsTextElement() { return true; }
- virtual int min_match() { return 1; }
- virtual int max_match() { return 1; }
- virtual void AppendToText(RegExpText* text);
- CharacterSet character_set() { return set_; }
- // TODO(lrn): Remove need for complex version if is_standard that
- // recognizes a mangled standard set and just do { return set_.is_special(); }
- bool is_standard();
- // Returns a value representing the standard character set if is_standard()
- // returns true.
- // Currently used values are:
- // s : unicode whitespace
- // S : unicode non-whitespace
- // w : ASCII word character (digit, letter, underscore)
- // W : non-ASCII word character
- // d : ASCII digit
- // D : non-ASCII digit
- // . : non-unicode non-newline
- // * : All characters
- uc16 standard_type() { return set_.standard_set_type(); }
- ZoneList<CharacterRange>* ranges() { return set_.ranges(); }
- bool is_negated() { return is_negated_; }
- private:
- CharacterSet set_;
- bool is_negated_;
-};
-
-
-class RegExpAtom: public RegExpTree {
- public:
- explicit RegExpAtom(Vector<const uc16> data) : data_(data) { }
- virtual void* Accept(RegExpVisitor* visitor, void* data);
- virtual RegExpNode* ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success);
- virtual RegExpAtom* AsAtom();
- virtual bool IsAtom();
- virtual bool IsTextElement() { return true; }
- virtual int min_match() { return data_.length(); }
- virtual int max_match() { return data_.length(); }
- virtual void AppendToText(RegExpText* text);
- Vector<const uc16> data() { return data_; }
- int length() { return data_.length(); }
- private:
- Vector<const uc16> data_;
-};
-
-
-class RegExpText: public RegExpTree {
- public:
- RegExpText() : elements_(2), length_(0) {}
- virtual void* Accept(RegExpVisitor* visitor, void* data);
- virtual RegExpNode* ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success);
- virtual RegExpText* AsText();
- virtual bool IsText();
- virtual bool IsTextElement() { return true; }
- virtual int min_match() { return length_; }
- virtual int max_match() { return length_; }
- virtual void AppendToText(RegExpText* text);
- void AddElement(TextElement elm) {
- elements_.Add(elm);
- length_ += elm.length();
- }
- ZoneList<TextElement>* elements() { return &elements_; }
- private:
- ZoneList<TextElement> elements_;
- int length_;
-};
-
-
-class RegExpQuantifier: public RegExpTree {
- public:
- enum Type { GREEDY, NON_GREEDY, POSSESSIVE };
- RegExpQuantifier(int min, int max, Type type, RegExpTree* body)
- : body_(body),
- min_(min),
- max_(max),
- min_match_(min * body->min_match()),
- type_(type) {
- if (max > 0 && body->max_match() > kInfinity / max) {
- max_match_ = kInfinity;
- } else {
- max_match_ = max * body->max_match();
- }
- }
- virtual void* Accept(RegExpVisitor* visitor, void* data);
- virtual RegExpNode* ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success);
- static RegExpNode* ToNode(int min,
- int max,
- bool is_greedy,
- RegExpTree* body,
- RegExpCompiler* compiler,
- RegExpNode* on_success,
- bool not_at_start = false);
- virtual RegExpQuantifier* AsQuantifier();
- virtual Interval CaptureRegisters();
- virtual bool IsQuantifier();
- virtual int min_match() { return min_match_; }
- virtual int max_match() { return max_match_; }
- int min() { return min_; }
- int max() { return max_; }
- bool is_possessive() { return type_ == POSSESSIVE; }
- bool is_non_greedy() { return type_ == NON_GREEDY; }
- bool is_greedy() { return type_ == GREEDY; }
- RegExpTree* body() { return body_; }
- private:
- RegExpTree* body_;
- int min_;
- int max_;
- int min_match_;
- int max_match_;
- Type type_;
-};
-
-
-class RegExpCapture: public RegExpTree {
- public:
- explicit RegExpCapture(RegExpTree* body, int index)
- : body_(body), index_(index) { }
- virtual void* Accept(RegExpVisitor* visitor, void* data);
- virtual RegExpNode* ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success);
- static RegExpNode* ToNode(RegExpTree* body,
- int index,
- RegExpCompiler* compiler,
- RegExpNode* on_success);
- virtual RegExpCapture* AsCapture();
- virtual bool IsAnchoredAtStart();
- virtual bool IsAnchoredAtEnd();
- virtual Interval CaptureRegisters();
- virtual bool IsCapture();
- virtual int min_match() { return body_->min_match(); }
- virtual int max_match() { return body_->max_match(); }
- RegExpTree* body() { return body_; }
- int index() { return index_; }
- static int StartRegister(int index) { return index * 2; }
- static int EndRegister(int index) { return index * 2 + 1; }
- private:
- RegExpTree* body_;
- int index_;
-};
-
-
-class RegExpLookahead: public RegExpTree {
- public:
- RegExpLookahead(RegExpTree* body,
- bool is_positive,
- int capture_count,
- int capture_from)
- : body_(body),
- is_positive_(is_positive),
- capture_count_(capture_count),
- capture_from_(capture_from) { }
-
- virtual void* Accept(RegExpVisitor* visitor, void* data);
- virtual RegExpNode* ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success);
- virtual RegExpLookahead* AsLookahead();
- virtual Interval CaptureRegisters();
- virtual bool IsLookahead();
- virtual bool IsAnchoredAtStart();
- virtual int min_match() { return 0; }
- virtual int max_match() { return 0; }
- RegExpTree* body() { return body_; }
- bool is_positive() { return is_positive_; }
- int capture_count() { return capture_count_; }
- int capture_from() { return capture_from_; }
- private:
- RegExpTree* body_;
- bool is_positive_;
- int capture_count_;
- int capture_from_;
-};
-
-
-class RegExpBackReference: public RegExpTree {
- public:
- explicit RegExpBackReference(RegExpCapture* capture)
- : capture_(capture) { }
- virtual void* Accept(RegExpVisitor* visitor, void* data);
- virtual RegExpNode* ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success);
- virtual RegExpBackReference* AsBackReference();
- virtual bool IsBackReference();
- virtual int min_match() { return 0; }
- virtual int max_match() { return capture_->max_match(); }
- int index() { return capture_->index(); }
- RegExpCapture* capture() { return capture_; }
- private:
- RegExpCapture* capture_;
-};
-
-
-class RegExpEmpty: public RegExpTree {
- public:
- RegExpEmpty() { }
- virtual void* Accept(RegExpVisitor* visitor, void* data);
- virtual RegExpNode* ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success);
- virtual RegExpEmpty* AsEmpty();
- virtual bool IsEmpty();
- virtual int min_match() { return 0; }
- virtual int max_match() { return 0; }
- static RegExpEmpty* GetInstance() { return &kInstance; }
- private:
- static RegExpEmpty kInstance;
-};
-
-
-// ----------------------------------------------------------------------------
-// Basic visitor
-// - leaf node visitors are abstract.
-
-class AstVisitor BASE_EMBEDDED {
- public:
- AstVisitor() : isolate_(Isolate::Current()), stack_overflow_(false) { }
- virtual ~AstVisitor() { }
-
- // Stack overflow check and dynamic dispatch.
- void Visit(AstNode* node) { if (!CheckStackOverflow()) node->Accept(this); }
-
- // Iteration left-to-right.
- virtual void VisitDeclarations(ZoneList<Declaration*>* declarations);
- virtual void VisitStatements(ZoneList<Statement*>* statements);
- virtual void VisitExpressions(ZoneList<Expression*>* expressions);
-
- // Stack overflow tracking support.
- bool HasStackOverflow() const { return stack_overflow_; }
- bool CheckStackOverflow();
-
- // If a stack-overflow exception is encountered when visiting a
- // node, calling SetStackOverflow will make sure that the visitor
- // bails out without visiting more nodes.
- void SetStackOverflow() { stack_overflow_ = true; }
- void ClearStackOverflow() { stack_overflow_ = false; }
-
- // Nodes not appearing in the AST, including slots.
- virtual void VisitSlot(Slot* node) { UNREACHABLE(); }
-
- // Individual AST nodes.
-#define DEF_VISIT(type) \
- virtual void Visit##type(type* node) = 0;
- AST_NODE_LIST(DEF_VISIT)
-#undef DEF_VISIT
-
- protected:
- Isolate* isolate() { return isolate_; }
-
- private:
- Isolate* isolate_;
- bool stack_overflow_;
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_AST_H_
diff --git a/src/3rdparty/v8/src/atomicops.h b/src/3rdparty/v8/src/atomicops.h
deleted file mode 100644
index e2057ed..0000000
--- a/src/3rdparty/v8/src/atomicops.h
+++ /dev/null
@@ -1,167 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// The routines exported by this module are subtle. If you use them, even if
-// you get the code right, it will depend on careful reasoning about atomicity
-// and memory ordering; it will be less readable, and harder to maintain. If
-// you plan to use these routines, you should have a good reason, such as solid
-// evidence that performance would otherwise suffer, or there being no
-// alternative. You should assume only properties explicitly guaranteed by the
-// specifications in this file. You are almost certainly _not_ writing code
-// just for the x86; if you assume x86 semantics, x86 hardware bugs and
-// implementations on other archtectures will cause your code to break. If you
-// do not know what you are doing, avoid these routines, and use a Mutex.
-//
-// It is incorrect to make direct assignments to/from an atomic variable.
-// You should use one of the Load or Store routines. The NoBarrier
-// versions are provided when no barriers are needed:
-// NoBarrier_Store()
-// NoBarrier_Load()
-// Although there are currently no compiler enforcement, you are encouraged
-// to use these.
-//
-
-#ifndef V8_ATOMICOPS_H_
-#define V8_ATOMICOPS_H_
-
-#include "../include/v8.h"
-#include "globals.h"
-
-namespace v8 {
-namespace internal {
-
-typedef int32_t Atomic32;
-#ifdef V8_HOST_ARCH_64_BIT
-// We need to be able to go between Atomic64 and AtomicWord implicitly. This
-// means Atomic64 and AtomicWord should be the same type on 64-bit.
-#if defined(__APPLE__)
-// MacOS is an exception to the implicit conversion rule above,
-// because it uses long for intptr_t.
-typedef int64_t Atomic64;
-#else
-typedef intptr_t Atomic64;
-#endif
-#endif
-
-// Use AtomicWord for a machine-sized pointer. It will use the Atomic32 or
-// Atomic64 routines below, depending on your architecture.
-typedef intptr_t AtomicWord;
-
-// Atomically execute:
-// result = *ptr;
-// if (*ptr == old_value)
-// *ptr = new_value;
-// return result;
-//
-// I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value".
-// Always return the old value of "*ptr"
-//
-// This routine implies no memory barriers.
-Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value);
-
-// Atomically store new_value into *ptr, returning the previous value held in
-// *ptr. This routine implies no memory barriers.
-Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, Atomic32 new_value);
-
-// Atomically increment *ptr by "increment". Returns the new value of
-// *ptr with the increment applied. This routine implies no memory barriers.
-Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, Atomic32 increment);
-
-Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment);
-
-// These following lower-level operations are typically useful only to people
-// implementing higher-level synchronization operations like spinlocks,
-// mutexes, and condition-variables. They combine CompareAndSwap(), a load, or
-// a store with appropriate memory-ordering instructions. "Acquire" operations
-// ensure that no later memory access can be reordered ahead of the operation.
-// "Release" operations ensure that no previous memory access can be reordered
-// after the operation. "Barrier" operations have both "Acquire" and "Release"
-// semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory
-// access.
-Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value);
-Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value);
-
-void MemoryBarrier();
-void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value);
-void Acquire_Store(volatile Atomic32* ptr, Atomic32 value);
-void Release_Store(volatile Atomic32* ptr, Atomic32 value);
-
-Atomic32 NoBarrier_Load(volatile const Atomic32* ptr);
-Atomic32 Acquire_Load(volatile const Atomic32* ptr);
-Atomic32 Release_Load(volatile const Atomic32* ptr);
-
-// 64-bit atomic operations (only available on 64-bit processors).
-#ifdef V8_HOST_ARCH_64_BIT
-Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value,
- Atomic64 new_value);
-Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value);
-Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment);
-Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment);
-
-Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value,
- Atomic64 new_value);
-Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value,
- Atomic64 new_value);
-void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value);
-void Acquire_Store(volatile Atomic64* ptr, Atomic64 value);
-void Release_Store(volatile Atomic64* ptr, Atomic64 value);
-Atomic64 NoBarrier_Load(volatile const Atomic64* ptr);
-Atomic64 Acquire_Load(volatile const Atomic64* ptr);
-Atomic64 Release_Load(volatile const Atomic64* ptr);
-#endif // V8_HOST_ARCH_64_BIT
-
-} } // namespace v8::internal
-
-// Include our platform specific implementation.
-#if defined(_MSC_VER) && \
- (defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_X64))
-#include "atomicops_internals_x86_msvc.h"
-#elif defined(__APPLE__) && \
- (defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_X64))
-#include "atomicops_internals_x86_macosx.h"
-#elif defined(__GNUC__) && \
- (defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_X64))
-#include "atomicops_internals_x86_gcc.h"
-#elif defined(__GNUC__) && defined(V8_HOST_ARCH_ARM)
-#include "atomicops_internals_arm_gcc.h"
-#elif defined(__GNUC__) && defined(V8_HOST_ARCH_MIPS)
-#include "atomicops_internals_mips_gcc.h"
-#else
-#error "Atomic operations are not supported on your platform"
-#endif
-
-#endif // V8_ATOMICOPS_H_
diff --git a/src/3rdparty/v8/src/atomicops_internals_arm_gcc.h b/src/3rdparty/v8/src/atomicops_internals_arm_gcc.h
deleted file mode 100644
index 6c30256..0000000
--- a/src/3rdparty/v8/src/atomicops_internals_arm_gcc.h
+++ /dev/null
@@ -1,145 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// This file is an internal atomic implementation, use atomicops.h instead.
-//
-// LinuxKernelCmpxchg and Barrier_AtomicIncrement are from Google Gears.
-
-#ifndef V8_ATOMICOPS_INTERNALS_ARM_GCC_H_
-#define V8_ATOMICOPS_INTERNALS_ARM_GCC_H_
-
-namespace v8 {
-namespace internal {
-
-// 0xffff0fc0 is the hard coded address of a function provided by
-// the kernel which implements an atomic compare-exchange. On older
-// ARM architecture revisions (pre-v6) this may be implemented using
-// a syscall. This address is stable, and in active use (hard coded)
-// by at least glibc-2.7 and the Android C library.
-typedef Atomic32 (*LinuxKernelCmpxchgFunc)(Atomic32 old_value,
- Atomic32 new_value,
- volatile Atomic32* ptr);
-LinuxKernelCmpxchgFunc pLinuxKernelCmpxchg __attribute__((weak)) =
- (LinuxKernelCmpxchgFunc) 0xffff0fc0;
-
-typedef void (*LinuxKernelMemoryBarrierFunc)(void);
-LinuxKernelMemoryBarrierFunc pLinuxKernelMemoryBarrier __attribute__((weak)) =
- (LinuxKernelMemoryBarrierFunc) 0xffff0fa0;
-
-
-inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- Atomic32 prev_value = *ptr;
- do {
- if (!pLinuxKernelCmpxchg(old_value, new_value,
- const_cast<Atomic32*>(ptr))) {
- return old_value;
- }
- prev_value = *ptr;
- } while (prev_value == old_value);
- return prev_value;
-}
-
-inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
- Atomic32 new_value) {
- Atomic32 old_value;
- do {
- old_value = *ptr;
- } while (pLinuxKernelCmpxchg(old_value, new_value,
- const_cast<Atomic32*>(ptr)));
- return old_value;
-}
-
-inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment) {
- return Barrier_AtomicIncrement(ptr, increment);
-}
-
-inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment) {
- for (;;) {
- // Atomic exchange the old value with an incremented one.
- Atomic32 old_value = *ptr;
- Atomic32 new_value = old_value + increment;
- if (pLinuxKernelCmpxchg(old_value, new_value,
- const_cast<Atomic32*>(ptr)) == 0) {
- // The exchange took place as expected.
- return new_value;
- }
- // Otherwise, *ptr changed mid-loop and we need to retry.
- }
-}
-
-inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-}
-
-inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-}
-
-inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
- *ptr = value;
-}
-
-inline void MemoryBarrier() {
- pLinuxKernelMemoryBarrier();
-}
-
-inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
- *ptr = value;
- MemoryBarrier();
-}
-
-inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
- MemoryBarrier();
- *ptr = value;
-}
-
-inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
- return *ptr;
-}
-
-inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
- Atomic32 value = *ptr;
- MemoryBarrier();
- return value;
-}
-
-inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
- MemoryBarrier();
- return *ptr;
-}
-
-} } // namespace v8::internal
-
-#endif // V8_ATOMICOPS_INTERNALS_ARM_GCC_H_
diff --git a/src/3rdparty/v8/src/atomicops_internals_mips_gcc.h b/src/3rdparty/v8/src/atomicops_internals_mips_gcc.h
deleted file mode 100644
index 5113de2..0000000
--- a/src/3rdparty/v8/src/atomicops_internals_mips_gcc.h
+++ /dev/null
@@ -1,169 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// This file is an internal atomic implementation, use atomicops.h instead.
-
-#ifndef V8_ATOMICOPS_INTERNALS_MIPS_GCC_H_
-#define V8_ATOMICOPS_INTERNALS_MIPS_GCC_H_
-
-#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("sync" : : : "memory")
-
-namespace v8 {
-namespace internal {
-
-// Atomically execute:
-// result = *ptr;
-// if (*ptr == old_value)
-// *ptr = new_value;
-// return result;
-//
-// I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value".
-// Always return the old value of "*ptr"
-//
-// This routine implies no memory barriers.
-inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- Atomic32 prev;
- __asm__ __volatile__("1:\n"
- "ll %0, %1\n" // prev = *ptr
- "bne %0, %3, 2f\n" // if (prev != old_value) goto 2
- "nop\n" // delay slot nop
- "sc %2, %1\n" // *ptr = new_value (with atomic check)
- "beqz %2, 1b\n" // start again on atomic error
- "nop\n" // delay slot nop
- "2:\n"
- : "=&r" (prev), "=m" (*ptr), "+&r" (new_value)
- : "Ir" (old_value), "r" (new_value), "m" (*ptr)
- : "memory");
- return prev;
-}
-
-// Atomically store new_value into *ptr, returning the previous value held in
-// *ptr. This routine implies no memory barriers.
-inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
- Atomic32 new_value) {
- Atomic32 temp, old;
- __asm__ __volatile__("1:\n"
- "ll %1, %2\n" // old = *ptr
- "move %0, %3\n" // temp = new_value
- "sc %0, %2\n" // *ptr = temp (with atomic check)
- "beqz %0, 1b\n" // start again on atomic error
- "nop\n" // delay slot nop
- : "=&r" (temp), "=&r" (old), "=m" (*ptr)
- : "r" (new_value), "m" (*ptr)
- : "memory");
-
- return old;
-}
-
-// Atomically increment *ptr by "increment". Returns the new value of
-// *ptr with the increment applied. This routine implies no memory barriers.
-inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment) {
- Atomic32 temp, temp2;
-
- __asm__ __volatile__("1:\n"
- "ll %0, %2\n" // temp = *ptr
- "addu %0, %3\n" // temp = temp + increment
- "move %1, %0\n" // temp2 = temp
- "sc %0, %2\n" // *ptr = temp (with atomic check)
- "beqz %0, 1b\n" // start again on atomic error
- "nop\n" // delay slot nop
- : "=&r" (temp), "=&r" (temp2), "=m" (*ptr)
- : "Ir" (increment), "m" (*ptr)
- : "memory");
- // temp2 now holds the final value.
- return temp2;
-}
-
-inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment) {
- Atomic32 res = NoBarrier_AtomicIncrement(ptr, increment);
- ATOMICOPS_COMPILER_BARRIER();
- return res;
-}
-
-// "Acquire" operations
-// ensure that no later memory access can be reordered ahead of the operation.
-// "Release" operations ensure that no previous memory access can be reordered
-// after the operation. "Barrier" operations have both "Acquire" and "Release"
-// semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory
-// access.
-inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- Atomic32 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
- ATOMICOPS_COMPILER_BARRIER();
- return x;
-}
-
-inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- ATOMICOPS_COMPILER_BARRIER();
- return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-}
-
-inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
- *ptr = value;
-}
-
-inline void MemoryBarrier() {
- ATOMICOPS_COMPILER_BARRIER();
-}
-
-inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
- *ptr = value;
- MemoryBarrier();
-}
-
-inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
- MemoryBarrier();
- *ptr = value;
-}
-
-inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
- return *ptr;
-}
-
-inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
- Atomic32 value = *ptr;
- MemoryBarrier();
- return value;
-}
-
-inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
- MemoryBarrier();
- return *ptr;
-}
-
-} } // namespace v8::internal
-
-#undef ATOMICOPS_COMPILER_BARRIER
-
-#endif // V8_ATOMICOPS_INTERNALS_MIPS_GCC_H_
diff --git a/src/3rdparty/v8/src/atomicops_internals_x86_gcc.cc b/src/3rdparty/v8/src/atomicops_internals_x86_gcc.cc
deleted file mode 100644
index a572564..0000000
--- a/src/3rdparty/v8/src/atomicops_internals_x86_gcc.cc
+++ /dev/null
@@ -1,126 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// This module gets enough CPU information to optimize the
-// atomicops module on x86.
-
-#include <string.h>
-
-#include "atomicops.h"
-
-// This file only makes sense with atomicops_internals_x86_gcc.h -- it
-// depends on structs that are defined in that file. If atomicops.h
-// doesn't sub-include that file, then we aren't needed, and shouldn't
-// try to do anything.
-#ifdef V8_ATOMICOPS_INTERNALS_X86_GCC_H_
-
-// Inline cpuid instruction. In PIC compilations, %ebx contains the address
-// of the global offset table. To avoid breaking such executables, this code
-// must preserve that register's value across cpuid instructions.
-#if defined(__i386__)
-#define cpuid(a, b, c, d, inp) \
- asm("mov %%ebx, %%edi\n" \
- "cpuid\n" \
- "xchg %%edi, %%ebx\n" \
- : "=a" (a), "=D" (b), "=c" (c), "=d" (d) : "a" (inp))
-#elif defined(__x86_64__)
-#define cpuid(a, b, c, d, inp) \
- asm("mov %%rbx, %%rdi\n" \
- "cpuid\n" \
- "xchg %%rdi, %%rbx\n" \
- : "=a" (a), "=D" (b), "=c" (c), "=d" (d) : "a" (inp))
-#endif
-
-#if defined(cpuid) // initialize the struct only on x86
-
-// Set the flags so that code will run correctly and conservatively, so even
-// if we haven't been initialized yet, we're probably single threaded, and our
-// default values should hopefully be pretty safe.
-struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures = {
- false, // bug can't exist before process spawns multiple threads
- false, // no SSE2
-};
-
-// Initialize the AtomicOps_Internalx86CPUFeatures struct.
-static void AtomicOps_Internalx86CPUFeaturesInit() {
- uint32_t eax;
- uint32_t ebx;
- uint32_t ecx;
- uint32_t edx;
-
- // Get vendor string (issue CPUID with eax = 0)
- cpuid(eax, ebx, ecx, edx, 0);
- char vendor[13];
- memcpy(vendor, &ebx, 4);
- memcpy(vendor + 4, &edx, 4);
- memcpy(vendor + 8, &ecx, 4);
- vendor[12] = 0;
-
- // get feature flags in ecx/edx, and family/model in eax
- cpuid(eax, ebx, ecx, edx, 1);
-
- int family = (eax >> 8) & 0xf; // family and model fields
- int model = (eax >> 4) & 0xf;
- if (family == 0xf) { // use extended family and model fields
- family += (eax >> 20) & 0xff;
- model += ((eax >> 16) & 0xf) << 4;
- }
-
- // Opteron Rev E has a bug in which on very rare occasions a locked
- // instruction doesn't act as a read-acquire barrier if followed by a
- // non-locked read-modify-write instruction. Rev F has this bug in
- // pre-release versions, but not in versions released to customers,
- // so we test only for Rev E, which is family 15, model 32..63 inclusive.
- if (strcmp(vendor, "AuthenticAMD") == 0 && // AMD
- family == 15 &&
- 32 <= model && model <= 63) {
- AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug = true;
- } else {
- AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug = false;
- }
-
- // edx bit 26 is SSE2 which we use to tell use whether we can use mfence
- AtomicOps_Internalx86CPUFeatures.has_sse2 = ((edx >> 26) & 1);
-}
-
-namespace {
-
-class AtomicOpsx86Initializer {
- public:
- AtomicOpsx86Initializer() {
- AtomicOps_Internalx86CPUFeaturesInit();
- }
-};
-
-// A global to get use initialized on startup via static initialization :/
-AtomicOpsx86Initializer g_initer;
-
-} // namespace
-
-#endif // if x86
-
-#endif // ifdef V8_ATOMICOPS_INTERNALS_X86_GCC_H_
diff --git a/src/3rdparty/v8/src/atomicops_internals_x86_gcc.h b/src/3rdparty/v8/src/atomicops_internals_x86_gcc.h
deleted file mode 100644
index 3f17fa0..0000000
--- a/src/3rdparty/v8/src/atomicops_internals_x86_gcc.h
+++ /dev/null
@@ -1,287 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// This file is an internal atomic implementation, use atomicops.h instead.
-
-#ifndef V8_ATOMICOPS_INTERNALS_X86_GCC_H_
-#define V8_ATOMICOPS_INTERNALS_X86_GCC_H_
-
-// This struct is not part of the public API of this module; clients may not
-// use it.
-// Features of this x86. Values may not be correct before main() is run,
-// but are set conservatively.
-struct AtomicOps_x86CPUFeatureStruct {
- bool has_amd_lock_mb_bug; // Processor has AMD memory-barrier bug; do lfence
- // after acquire compare-and-swap.
- bool has_sse2; // Processor has SSE2.
-};
-extern struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures;
-
-#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
-
-namespace v8 {
-namespace internal {
-
-// 32-bit low-level operations on any platform.
-
-inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- Atomic32 prev;
- __asm__ __volatile__("lock; cmpxchgl %1,%2"
- : "=a" (prev)
- : "q" (new_value), "m" (*ptr), "0" (old_value)
- : "memory");
- return prev;
-}
-
-inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
- Atomic32 new_value) {
- __asm__ __volatile__("xchgl %1,%0" // The lock prefix is implicit for xchg.
- : "=r" (new_value)
- : "m" (*ptr), "0" (new_value)
- : "memory");
- return new_value; // Now it's the previous value.
-}
-
-inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment) {
- Atomic32 temp = increment;
- __asm__ __volatile__("lock; xaddl %0,%1"
- : "+r" (temp), "+m" (*ptr)
- : : "memory");
- // temp now holds the old value of *ptr
- return temp + increment;
-}
-
-inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment) {
- Atomic32 temp = increment;
- __asm__ __volatile__("lock; xaddl %0,%1"
- : "+r" (temp), "+m" (*ptr)
- : : "memory");
- // temp now holds the old value of *ptr
- if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
- __asm__ __volatile__("lfence" : : : "memory");
- }
- return temp + increment;
-}
-
-inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- Atomic32 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
- if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
- __asm__ __volatile__("lfence" : : : "memory");
- }
- return x;
-}
-
-inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-}
-
-inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
- *ptr = value;
-}
-
-#if defined(__x86_64__)
-
-// 64-bit implementations of memory barrier can be simpler, because it
-// "mfence" is guaranteed to exist.
-inline void MemoryBarrier() {
- __asm__ __volatile__("mfence" : : : "memory");
-}
-
-inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
- *ptr = value;
- MemoryBarrier();
-}
-
-#else
-
-inline void MemoryBarrier() {
- if (AtomicOps_Internalx86CPUFeatures.has_sse2) {
- __asm__ __volatile__("mfence" : : : "memory");
- } else { // mfence is faster but not present on PIII
- Atomic32 x = 0;
- NoBarrier_AtomicExchange(&x, 0); // acts as a barrier on PIII
- }
-}
-
-inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
- if (AtomicOps_Internalx86CPUFeatures.has_sse2) {
- *ptr = value;
- __asm__ __volatile__("mfence" : : : "memory");
- } else {
- NoBarrier_AtomicExchange(ptr, value);
- // acts as a barrier on PIII
- }
-}
-#endif
-
-inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
- ATOMICOPS_COMPILER_BARRIER();
- *ptr = value; // An x86 store acts as a release barrier.
- // See comments in Atomic64 version of Release_Store(), below.
-}
-
-inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
- return *ptr;
-}
-
-inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
- Atomic32 value = *ptr; // An x86 load acts as a acquire barrier.
- // See comments in Atomic64 version of Release_Store(), below.
- ATOMICOPS_COMPILER_BARRIER();
- return value;
-}
-
-inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
- MemoryBarrier();
- return *ptr;
-}
-
-#if defined(__x86_64__)
-
-// 64-bit low-level operations on 64-bit platform.
-
-inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value,
- Atomic64 new_value) {
- Atomic64 prev;
- __asm__ __volatile__("lock; cmpxchgq %1,%2"
- : "=a" (prev)
- : "q" (new_value), "m" (*ptr), "0" (old_value)
- : "memory");
- return prev;
-}
-
-inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
- Atomic64 new_value) {
- __asm__ __volatile__("xchgq %1,%0" // The lock prefix is implicit for xchg.
- : "=r" (new_value)
- : "m" (*ptr), "0" (new_value)
- : "memory");
- return new_value; // Now it's the previous value.
-}
-
-inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
- Atomic64 increment) {
- Atomic64 temp = increment;
- __asm__ __volatile__("lock; xaddq %0,%1"
- : "+r" (temp), "+m" (*ptr)
- : : "memory");
- // temp now contains the previous value of *ptr
- return temp + increment;
-}
-
-inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
- Atomic64 increment) {
- Atomic64 temp = increment;
- __asm__ __volatile__("lock; xaddq %0,%1"
- : "+r" (temp), "+m" (*ptr)
- : : "memory");
- // temp now contains the previous value of *ptr
- if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
- __asm__ __volatile__("lfence" : : : "memory");
- }
- return temp + increment;
-}
-
-inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
- *ptr = value;
-}
-
-inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
- *ptr = value;
- MemoryBarrier();
-}
-
-inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
- ATOMICOPS_COMPILER_BARRIER();
-
- *ptr = value; // An x86 store acts as a release barrier
- // for current AMD/Intel chips as of Jan 2008.
- // See also Acquire_Load(), below.
-
- // When new chips come out, check:
- // IA-32 Intel Architecture Software Developer's Manual, Volume 3:
- // System Programming Guide, Chatper 7: Multiple-processor management,
- // Section 7.2, Memory Ordering.
- // Last seen at:
- // http://developer.intel.com/design/pentium4/manuals/index_new.htm
- //
- // x86 stores/loads fail to act as barriers for a few instructions (clflush
- // maskmovdqu maskmovq movntdq movnti movntpd movntps movntq) but these are
- // not generated by the compiler, and are rare. Users of these instructions
- // need to know about cache behaviour in any case since all of these involve
- // either flushing cache lines or non-temporal cache hints.
-}
-
-inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
- return *ptr;
-}
-
-inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
- Atomic64 value = *ptr; // An x86 load acts as a acquire barrier,
- // for current AMD/Intel chips as of Jan 2008.
- // See also Release_Store(), above.
- ATOMICOPS_COMPILER_BARRIER();
- return value;
-}
-
-inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
- MemoryBarrier();
- return *ptr;
-}
-
-inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value,
- Atomic64 new_value) {
- Atomic64 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
- if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
- __asm__ __volatile__("lfence" : : : "memory");
- }
- return x;
-}
-
-inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value,
- Atomic64 new_value) {
- return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-}
-
-#endif // defined(__x86_64__)
-
-} } // namespace v8::internal
-
-#undef ATOMICOPS_COMPILER_BARRIER
-
-#endif // V8_ATOMICOPS_INTERNALS_X86_GCC_H_
diff --git a/src/3rdparty/v8/src/atomicops_internals_x86_macosx.h b/src/3rdparty/v8/src/atomicops_internals_x86_macosx.h
deleted file mode 100644
index 2bac006..0000000
--- a/src/3rdparty/v8/src/atomicops_internals_x86_macosx.h
+++ /dev/null
@@ -1,301 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// This file is an internal atomic implementation, use atomicops.h instead.
-
-#ifndef V8_ATOMICOPS_INTERNALS_X86_MACOSX_H_
-#define V8_ATOMICOPS_INTERNALS_X86_MACOSX_H_
-
-#include <libkern/OSAtomic.h>
-
-namespace v8 {
-namespace internal {
-
-inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32 *ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- Atomic32 prev_value;
- do {
- if (OSAtomicCompareAndSwap32(old_value, new_value,
- const_cast<Atomic32*>(ptr))) {
- return old_value;
- }
- prev_value = *ptr;
- } while (prev_value == old_value);
- return prev_value;
-}
-
-inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32 *ptr,
- Atomic32 new_value) {
- Atomic32 old_value;
- do {
- old_value = *ptr;
- } while (!OSAtomicCompareAndSwap32(old_value, new_value,
- const_cast<Atomic32*>(ptr)));
- return old_value;
-}
-
-inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32 *ptr,
- Atomic32 increment) {
- return OSAtomicAdd32(increment, const_cast<Atomic32*>(ptr));
-}
-
-inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32 *ptr,
- Atomic32 increment) {
- return OSAtomicAdd32Barrier(increment, const_cast<Atomic32*>(ptr));
-}
-
-inline void MemoryBarrier() {
- OSMemoryBarrier();
-}
-
-inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32 *ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- Atomic32 prev_value;
- do {
- if (OSAtomicCompareAndSwap32Barrier(old_value, new_value,
- const_cast<Atomic32*>(ptr))) {
- return old_value;
- }
- prev_value = *ptr;
- } while (prev_value == old_value);
- return prev_value;
-}
-
-inline Atomic32 Release_CompareAndSwap(volatile Atomic32 *ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- return Acquire_CompareAndSwap(ptr, old_value, new_value);
-}
-
-inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
- *ptr = value;
-}
-
-inline void Acquire_Store(volatile Atomic32 *ptr, Atomic32 value) {
- *ptr = value;
- MemoryBarrier();
-}
-
-inline void Release_Store(volatile Atomic32 *ptr, Atomic32 value) {
- MemoryBarrier();
- *ptr = value;
-}
-
-inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
- return *ptr;
-}
-
-inline Atomic32 Acquire_Load(volatile const Atomic32 *ptr) {
- Atomic32 value = *ptr;
- MemoryBarrier();
- return value;
-}
-
-inline Atomic32 Release_Load(volatile const Atomic32 *ptr) {
- MemoryBarrier();
- return *ptr;
-}
-
-#ifdef __LP64__
-
-// 64-bit implementation on 64-bit platform
-
-inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64 *ptr,
- Atomic64 old_value,
- Atomic64 new_value) {
- Atomic64 prev_value;
- do {
- if (OSAtomicCompareAndSwap64(old_value, new_value,
- const_cast<Atomic64*>(ptr))) {
- return old_value;
- }
- prev_value = *ptr;
- } while (prev_value == old_value);
- return prev_value;
-}
-
-inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64 *ptr,
- Atomic64 new_value) {
- Atomic64 old_value;
- do {
- old_value = *ptr;
- } while (!OSAtomicCompareAndSwap64(old_value, new_value,
- const_cast<Atomic64*>(ptr)));
- return old_value;
-}
-
-inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64 *ptr,
- Atomic64 increment) {
- return OSAtomicAdd64(increment, const_cast<Atomic64*>(ptr));
-}
-
-inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64 *ptr,
- Atomic64 increment) {
- return OSAtomicAdd64Barrier(increment, const_cast<Atomic64*>(ptr));
-}
-
-inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64 *ptr,
- Atomic64 old_value,
- Atomic64 new_value) {
- Atomic64 prev_value;
- do {
- if (OSAtomicCompareAndSwap64Barrier(old_value, new_value,
- const_cast<Atomic64*>(ptr))) {
- return old_value;
- }
- prev_value = *ptr;
- } while (prev_value == old_value);
- return prev_value;
-}
-
-inline Atomic64 Release_CompareAndSwap(volatile Atomic64 *ptr,
- Atomic64 old_value,
- Atomic64 new_value) {
- // The lib kern interface does not distinguish between
- // Acquire and Release memory barriers; they are equivalent.
- return Acquire_CompareAndSwap(ptr, old_value, new_value);
-}
-
-inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
- *ptr = value;
-}
-
-inline void Acquire_Store(volatile Atomic64 *ptr, Atomic64 value) {
- *ptr = value;
- MemoryBarrier();
-}
-
-inline void Release_Store(volatile Atomic64 *ptr, Atomic64 value) {
- MemoryBarrier();
- *ptr = value;
-}
-
-inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
- return *ptr;
-}
-
-inline Atomic64 Acquire_Load(volatile const Atomic64 *ptr) {
- Atomic64 value = *ptr;
- MemoryBarrier();
- return value;
-}
-
-inline Atomic64 Release_Load(volatile const Atomic64 *ptr) {
- MemoryBarrier();
- return *ptr;
-}
-
-#endif // defined(__LP64__)
-
-// MacOS uses long for intptr_t, AtomicWord and Atomic32 are always different
-// on the Mac, even when they are the same size. We need to explicitly cast
-// from AtomicWord to Atomic32/64 to implement the AtomicWord interface.
-#ifdef __LP64__
-#define AtomicWordCastType Atomic64
-#else
-#define AtomicWordCastType Atomic32
-#endif
-
-inline AtomicWord NoBarrier_CompareAndSwap(volatile AtomicWord* ptr,
- AtomicWord old_value,
- AtomicWord new_value) {
- return NoBarrier_CompareAndSwap(
- reinterpret_cast<volatile AtomicWordCastType*>(ptr),
- old_value, new_value);
-}
-
-inline AtomicWord NoBarrier_AtomicExchange(volatile AtomicWord* ptr,
- AtomicWord new_value) {
- return NoBarrier_AtomicExchange(
- reinterpret_cast<volatile AtomicWordCastType*>(ptr), new_value);
-}
-
-inline AtomicWord NoBarrier_AtomicIncrement(volatile AtomicWord* ptr,
- AtomicWord increment) {
- return NoBarrier_AtomicIncrement(
- reinterpret_cast<volatile AtomicWordCastType*>(ptr), increment);
-}
-
-inline AtomicWord Barrier_AtomicIncrement(volatile AtomicWord* ptr,
- AtomicWord increment) {
- return Barrier_AtomicIncrement(
- reinterpret_cast<volatile AtomicWordCastType*>(ptr), increment);
-}
-
-inline AtomicWord Acquire_CompareAndSwap(volatile AtomicWord* ptr,
- AtomicWord old_value,
- AtomicWord new_value) {
- return v8::internal::Acquire_CompareAndSwap(
- reinterpret_cast<volatile AtomicWordCastType*>(ptr),
- old_value, new_value);
-}
-
-inline AtomicWord Release_CompareAndSwap(volatile AtomicWord* ptr,
- AtomicWord old_value,
- AtomicWord new_value) {
- return v8::internal::Release_CompareAndSwap(
- reinterpret_cast<volatile AtomicWordCastType*>(ptr),
- old_value, new_value);
-}
-
-inline void NoBarrier_Store(volatile AtomicWord *ptr, AtomicWord value) {
- NoBarrier_Store(
- reinterpret_cast<volatile AtomicWordCastType*>(ptr), value);
-}
-
-inline void Acquire_Store(volatile AtomicWord* ptr, AtomicWord value) {
- return v8::internal::Acquire_Store(
- reinterpret_cast<volatile AtomicWordCastType*>(ptr), value);
-}
-
-inline void Release_Store(volatile AtomicWord* ptr, AtomicWord value) {
- return v8::internal::Release_Store(
- reinterpret_cast<volatile AtomicWordCastType*>(ptr), value);
-}
-
-inline AtomicWord NoBarrier_Load(volatile const AtomicWord *ptr) {
- return NoBarrier_Load(
- reinterpret_cast<volatile const AtomicWordCastType*>(ptr));
-}
-
-inline AtomicWord Acquire_Load(volatile const AtomicWord* ptr) {
- return v8::internal::Acquire_Load(
- reinterpret_cast<volatile const AtomicWordCastType*>(ptr));
-}
-
-inline AtomicWord Release_Load(volatile const AtomicWord* ptr) {
- return v8::internal::Release_Load(
- reinterpret_cast<volatile const AtomicWordCastType*>(ptr));
-}
-
-#undef AtomicWordCastType
-
-} } // namespace v8::internal
-
-#endif // V8_ATOMICOPS_INTERNALS_X86_MACOSX_H_
diff --git a/src/3rdparty/v8/src/atomicops_internals_x86_msvc.h b/src/3rdparty/v8/src/atomicops_internals_x86_msvc.h
deleted file mode 100644
index fcf6a65..0000000
--- a/src/3rdparty/v8/src/atomicops_internals_x86_msvc.h
+++ /dev/null
@@ -1,203 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// This file is an internal atomic implementation, use atomicops.h instead.
-
-#ifndef V8_ATOMICOPS_INTERNALS_X86_MSVC_H_
-#define V8_ATOMICOPS_INTERNALS_X86_MSVC_H_
-
-#include "checks.h"
-#include "win32-headers.h"
-
-namespace v8 {
-namespace internal {
-
-inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- LONG result = InterlockedCompareExchange(
- reinterpret_cast<volatile LONG*>(ptr),
- static_cast<LONG>(new_value),
- static_cast<LONG>(old_value));
- return static_cast<Atomic32>(result);
-}
-
-inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
- Atomic32 new_value) {
- LONG result = InterlockedExchange(
- reinterpret_cast<volatile LONG*>(ptr),
- static_cast<LONG>(new_value));
- return static_cast<Atomic32>(result);
-}
-
-inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment) {
- return InterlockedExchangeAdd(
- reinterpret_cast<volatile LONG*>(ptr),
- static_cast<LONG>(increment)) + increment;
-}
-
-inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment) {
- return Barrier_AtomicIncrement(ptr, increment);
-}
-
-#if !(defined(_MSC_VER) && _MSC_VER >= 1400)
-#error "We require at least vs2005 for MemoryBarrier"
-#endif
-inline void MemoryBarrier() {
- // We use MemoryBarrier from WinNT.h
- ::MemoryBarrier();
-}
-
-inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-}
-
-inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-}
-
-inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
- *ptr = value;
-}
-
-inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
- NoBarrier_AtomicExchange(ptr, value);
- // acts as a barrier in this implementation
-}
-
-inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
- *ptr = value; // works w/o barrier for current Intel chips as of June 2005
- // See comments in Atomic64 version of Release_Store() below.
-}
-
-inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
- return *ptr;
-}
-
-inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
- Atomic32 value = *ptr;
- return value;
-}
-
-inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
- MemoryBarrier();
- return *ptr;
-}
-
-#if defined(_WIN64)
-
-// 64-bit low-level operations on 64-bit platform.
-
-STATIC_ASSERT(sizeof(Atomic64) == sizeof(PVOID));
-
-inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value,
- Atomic64 new_value) {
- PVOID result = InterlockedCompareExchangePointer(
- reinterpret_cast<volatile PVOID*>(ptr),
- reinterpret_cast<PVOID>(new_value), reinterpret_cast<PVOID>(old_value));
- return reinterpret_cast<Atomic64>(result);
-}
-
-inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
- Atomic64 new_value) {
- PVOID result = InterlockedExchangePointer(
- reinterpret_cast<volatile PVOID*>(ptr),
- reinterpret_cast<PVOID>(new_value));
- return reinterpret_cast<Atomic64>(result);
-}
-
-inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
- Atomic64 increment) {
- return InterlockedExchangeAdd64(
- reinterpret_cast<volatile LONGLONG*>(ptr),
- static_cast<LONGLONG>(increment)) + increment;
-}
-
-inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
- Atomic64 increment) {
- return Barrier_AtomicIncrement(ptr, increment);
-}
-
-inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
- *ptr = value;
-}
-
-inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
- NoBarrier_AtomicExchange(ptr, value);
- // acts as a barrier in this implementation
-}
-
-inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
- *ptr = value; // works w/o barrier for current Intel chips as of June 2005
-
- // When new chips come out, check:
- // IA-32 Intel Architecture Software Developer's Manual, Volume 3:
- // System Programming Guide, Chatper 7: Multiple-processor management,
- // Section 7.2, Memory Ordering.
- // Last seen at:
- // http://developer.intel.com/design/pentium4/manuals/index_new.htm
-}
-
-inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
- return *ptr;
-}
-
-inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
- Atomic64 value = *ptr;
- return value;
-}
-
-inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
- MemoryBarrier();
- return *ptr;
-}
-
-inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value,
- Atomic64 new_value) {
- return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-}
-
-inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value,
- Atomic64 new_value) {
- return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-}
-
-
-#endif // defined(_WIN64)
-
-} } // namespace v8::internal
-
-#endif // V8_ATOMICOPS_INTERNALS_X86_MSVC_H_
diff --git a/src/3rdparty/v8/src/bignum-dtoa.cc b/src/3rdparty/v8/src/bignum-dtoa.cc
deleted file mode 100644
index 088dd79..0000000
--- a/src/3rdparty/v8/src/bignum-dtoa.cc
+++ /dev/null
@@ -1,655 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include <math.h>
-
-#include "v8.h"
-#include "bignum-dtoa.h"
-
-#include "bignum.h"
-#include "double.h"
-
-namespace v8 {
-namespace internal {
-
-static int NormalizedExponent(uint64_t significand, int exponent) {
- ASSERT(significand != 0);
- while ((significand & Double::kHiddenBit) == 0) {
- significand = significand << 1;
- exponent = exponent - 1;
- }
- return exponent;
-}
-
-
-// Forward declarations:
-// Returns an estimation of k such that 10^(k-1) <= v < 10^k.
-static int EstimatePower(int exponent);
-// Computes v / 10^estimated_power exactly, as a ratio of two bignums, numerator
-// and denominator.
-static void InitialScaledStartValues(double v,
- int estimated_power,
- bool need_boundary_deltas,
- Bignum* numerator,
- Bignum* denominator,
- Bignum* delta_minus,
- Bignum* delta_plus);
-// Multiplies numerator/denominator so that its values lies in the range 1-10.
-// Returns decimal_point s.t.
-// v = numerator'/denominator' * 10^(decimal_point-1)
-// where numerator' and denominator' are the values of numerator and
-// denominator after the call to this function.
-static void FixupMultiply10(int estimated_power, bool is_even,
- int* decimal_point,
- Bignum* numerator, Bignum* denominator,
- Bignum* delta_minus, Bignum* delta_plus);
-// Generates digits from the left to the right and stops when the generated
-// digits yield the shortest decimal representation of v.
-static void GenerateShortestDigits(Bignum* numerator, Bignum* denominator,
- Bignum* delta_minus, Bignum* delta_plus,
- bool is_even,
- Vector<char> buffer, int* length);
-// Generates 'requested_digits' after the decimal point.
-static void BignumToFixed(int requested_digits, int* decimal_point,
- Bignum* numerator, Bignum* denominator,
- Vector<char>(buffer), int* length);
-// Generates 'count' digits of numerator/denominator.
-// Once 'count' digits have been produced rounds the result depending on the
-// remainder (remainders of exactly .5 round upwards). Might update the
-// decimal_point when rounding up (for example for 0.9999).
-static void GenerateCountedDigits(int count, int* decimal_point,
- Bignum* numerator, Bignum* denominator,
- Vector<char>(buffer), int* length);
-
-
-void BignumDtoa(double v, BignumDtoaMode mode, int requested_digits,
- Vector<char> buffer, int* length, int* decimal_point) {
- ASSERT(v > 0);
- ASSERT(!Double(v).IsSpecial());
- uint64_t significand = Double(v).Significand();
- bool is_even = (significand & 1) == 0;
- int exponent = Double(v).Exponent();
- int normalized_exponent = NormalizedExponent(significand, exponent);
- // estimated_power might be too low by 1.
- int estimated_power = EstimatePower(normalized_exponent);
-
- // Shortcut for Fixed.
- // The requested digits correspond to the digits after the point. If the
- // number is much too small, then there is no need in trying to get any
- // digits.
- if (mode == BIGNUM_DTOA_FIXED && -estimated_power - 1 > requested_digits) {
- buffer[0] = '\0';
- *length = 0;
- // Set decimal-point to -requested_digits. This is what Gay does.
- // Note that it should not have any effect anyways since the string is
- // empty.
- *decimal_point = -requested_digits;
- return;
- }
-
- Bignum numerator;
- Bignum denominator;
- Bignum delta_minus;
- Bignum delta_plus;
- // Make sure the bignum can grow large enough. The smallest double equals
- // 4e-324. In this case the denominator needs fewer than 324*4 binary digits.
- // The maximum double is 1.7976931348623157e308 which needs fewer than
- // 308*4 binary digits.
- ASSERT(Bignum::kMaxSignificantBits >= 324*4);
- bool need_boundary_deltas = (mode == BIGNUM_DTOA_SHORTEST);
- InitialScaledStartValues(v, estimated_power, need_boundary_deltas,
- &numerator, &denominator,
- &delta_minus, &delta_plus);
- // We now have v = (numerator / denominator) * 10^estimated_power.
- FixupMultiply10(estimated_power, is_even, decimal_point,
- &numerator, &denominator,
- &delta_minus, &delta_plus);
- // We now have v = (numerator / denominator) * 10^(decimal_point-1), and
- // 1 <= (numerator + delta_plus) / denominator < 10
- switch (mode) {
- case BIGNUM_DTOA_SHORTEST:
- GenerateShortestDigits(&numerator, &denominator,
- &delta_minus, &delta_plus,
- is_even, buffer, length);
- break;
- case BIGNUM_DTOA_FIXED:
- BignumToFixed(requested_digits, decimal_point,
- &numerator, &denominator,
- buffer, length);
- break;
- case BIGNUM_DTOA_PRECISION:
- GenerateCountedDigits(requested_digits, decimal_point,
- &numerator, &denominator,
- buffer, length);
- break;
- default:
- UNREACHABLE();
- }
- buffer[*length] = '\0';
-}
-
-
-// The procedure starts generating digits from the left to the right and stops
-// when the generated digits yield the shortest decimal representation of v. A
-// decimal representation of v is a number lying closer to v than to any other
-// double, so it converts to v when read.
-//
-// This is true if d, the decimal representation, is between m- and m+, the
-// upper and lower boundaries. d must be strictly between them if !is_even.
-// m- := (numerator - delta_minus) / denominator
-// m+ := (numerator + delta_plus) / denominator
-//
-// Precondition: 0 <= (numerator+delta_plus) / denominator < 10.
-// If 1 <= (numerator+delta_plus) / denominator < 10 then no leading 0 digit
-// will be produced. This should be the standard precondition.
-static void GenerateShortestDigits(Bignum* numerator, Bignum* denominator,
- Bignum* delta_minus, Bignum* delta_plus,
- bool is_even,
- Vector<char> buffer, int* length) {
- // Small optimization: if delta_minus and delta_plus are the same just reuse
- // one of the two bignums.
- if (Bignum::Equal(*delta_minus, *delta_plus)) {
- delta_plus = delta_minus;
- }
- *length = 0;
- while (true) {
- uint16_t digit;
- digit = numerator->DivideModuloIntBignum(*denominator);
- ASSERT(digit <= 9); // digit is a uint16_t and therefore always positive.
- // digit = numerator / denominator (integer division).
- // numerator = numerator % denominator.
- buffer[(*length)++] = digit + '0';
-
- // Can we stop already?
- // If the remainder of the division is less than the distance to the lower
- // boundary we can stop. In this case we simply round down (discarding the
- // remainder).
- // Similarly we test if we can round up (using the upper boundary).
- bool in_delta_room_minus;
- bool in_delta_room_plus;
- if (is_even) {
- in_delta_room_minus = Bignum::LessEqual(*numerator, *delta_minus);
- } else {
- in_delta_room_minus = Bignum::Less(*numerator, *delta_minus);
- }
- if (is_even) {
- in_delta_room_plus =
- Bignum::PlusCompare(*numerator, *delta_plus, *denominator) >= 0;
- } else {
- in_delta_room_plus =
- Bignum::PlusCompare(*numerator, *delta_plus, *denominator) > 0;
- }
- if (!in_delta_room_minus && !in_delta_room_plus) {
- // Prepare for next iteration.
- numerator->Times10();
- delta_minus->Times10();
- // We optimized delta_plus to be equal to delta_minus (if they share the
- // same value). So don't multiply delta_plus if they point to the same
- // object.
- if (delta_minus != delta_plus) {
- delta_plus->Times10();
- }
- } else if (in_delta_room_minus && in_delta_room_plus) {
- // Let's see if 2*numerator < denominator.
- // If yes, then the next digit would be < 5 and we can round down.
- int compare = Bignum::PlusCompare(*numerator, *numerator, *denominator);
- if (compare < 0) {
- // Remaining digits are less than .5. -> Round down (== do nothing).
- } else if (compare > 0) {
- // Remaining digits are more than .5 of denominator. -> Round up.
- // Note that the last digit could not be a '9' as otherwise the whole
- // loop would have stopped earlier.
- // We still have an assert here in case the preconditions were not
- // satisfied.
- ASSERT(buffer[(*length) - 1] != '9');
- buffer[(*length) - 1]++;
- } else {
- // Halfway case.
- // TODO(floitsch): need a way to solve half-way cases.
- // For now let's round towards even (since this is what Gay seems to
- // do).
-
- if ((buffer[(*length) - 1] - '0') % 2 == 0) {
- // Round down => Do nothing.
- } else {
- ASSERT(buffer[(*length) - 1] != '9');
- buffer[(*length) - 1]++;
- }
- }
- return;
- } else if (in_delta_room_minus) {
- // Round down (== do nothing).
- return;
- } else { // in_delta_room_plus
- // Round up.
- // Note again that the last digit could not be '9' since this would have
- // stopped the loop earlier.
- // We still have an ASSERT here, in case the preconditions were not
- // satisfied.
- ASSERT(buffer[(*length) -1] != '9');
- buffer[(*length) - 1]++;
- return;
- }
- }
-}
-
-
-// Let v = numerator / denominator < 10.
-// Then we generate 'count' digits of d = x.xxxxx... (without the decimal point)
-// from left to right. Once 'count' digits have been produced we decide wether
-// to round up or down. Remainders of exactly .5 round upwards. Numbers such
-// as 9.999999 propagate a carry all the way, and change the
-// exponent (decimal_point), when rounding upwards.
-static void GenerateCountedDigits(int count, int* decimal_point,
- Bignum* numerator, Bignum* denominator,
- Vector<char>(buffer), int* length) {
- ASSERT(count >= 0);
- for (int i = 0; i < count - 1; ++i) {
- uint16_t digit;
- digit = numerator->DivideModuloIntBignum(*denominator);
- ASSERT(digit <= 9); // digit is a uint16_t and therefore always positive.
- // digit = numerator / denominator (integer division).
- // numerator = numerator % denominator.
- buffer[i] = digit + '0';
- // Prepare for next iteration.
- numerator->Times10();
- }
- // Generate the last digit.
- uint16_t digit;
- digit = numerator->DivideModuloIntBignum(*denominator);
- if (Bignum::PlusCompare(*numerator, *numerator, *denominator) >= 0) {
- digit++;
- }
- buffer[count - 1] = digit + '0';
- // Correct bad digits (in case we had a sequence of '9's). Propagate the
- // carry until we hat a non-'9' or til we reach the first digit.
- for (int i = count - 1; i > 0; --i) {
- if (buffer[i] != '0' + 10) break;
- buffer[i] = '0';
- buffer[i - 1]++;
- }
- if (buffer[0] == '0' + 10) {
- // Propagate a carry past the top place.
- buffer[0] = '1';
- (*decimal_point)++;
- }
- *length = count;
-}
-
-
-// Generates 'requested_digits' after the decimal point. It might omit
-// trailing '0's. If the input number is too small then no digits at all are
-// generated (ex.: 2 fixed digits for 0.00001).
-//
-// Input verifies: 1 <= (numerator + delta) / denominator < 10.
-static void BignumToFixed(int requested_digits, int* decimal_point,
- Bignum* numerator, Bignum* denominator,
- Vector<char>(buffer), int* length) {
- // Note that we have to look at more than just the requested_digits, since
- // a number could be rounded up. Example: v=0.5 with requested_digits=0.
- // Even though the power of v equals 0 we can't just stop here.
- if (-(*decimal_point) > requested_digits) {
- // The number is definitively too small.
- // Ex: 0.001 with requested_digits == 1.
- // Set decimal-point to -requested_digits. This is what Gay does.
- // Note that it should not have any effect anyways since the string is
- // empty.
- *decimal_point = -requested_digits;
- *length = 0;
- return;
- } else if (-(*decimal_point) == requested_digits) {
- // We only need to verify if the number rounds down or up.
- // Ex: 0.04 and 0.06 with requested_digits == 1.
- ASSERT(*decimal_point == -requested_digits);
- // Initially the fraction lies in range (1, 10]. Multiply the denominator
- // by 10 so that we can compare more easily.
- denominator->Times10();
- if (Bignum::PlusCompare(*numerator, *numerator, *denominator) >= 0) {
- // If the fraction is >= 0.5 then we have to include the rounded
- // digit.
- buffer[0] = '1';
- *length = 1;
- (*decimal_point)++;
- } else {
- // Note that we caught most of similar cases earlier.
- *length = 0;
- }
- return;
- } else {
- // The requested digits correspond to the digits after the point.
- // The variable 'needed_digits' includes the digits before the point.
- int needed_digits = (*decimal_point) + requested_digits;
- GenerateCountedDigits(needed_digits, decimal_point,
- numerator, denominator,
- buffer, length);
- }
-}
-
-
-// Returns an estimation of k such that 10^(k-1) <= v < 10^k where
-// v = f * 2^exponent and 2^52 <= f < 2^53.
-// v is hence a normalized double with the given exponent. The output is an
-// approximation for the exponent of the decimal approimation .digits * 10^k.
-//
-// The result might undershoot by 1 in which case 10^k <= v < 10^k+1.
-// Note: this property holds for v's upper boundary m+ too.
-// 10^k <= m+ < 10^k+1.
-// (see explanation below).
-//
-// Examples:
-// EstimatePower(0) => 16
-// EstimatePower(-52) => 0
-//
-// Note: e >= 0 => EstimatedPower(e) > 0. No similar claim can be made for e<0.
-static int EstimatePower(int exponent) {
- // This function estimates log10 of v where v = f*2^e (with e == exponent).
- // Note that 10^floor(log10(v)) <= v, but v <= 10^ceil(log10(v)).
- // Note that f is bounded by its container size. Let p = 53 (the double's
- // significand size). Then 2^(p-1) <= f < 2^p.
- //
- // Given that log10(v) == log2(v)/log2(10) and e+(len(f)-1) is quite close
- // to log2(v) the function is simplified to (e+(len(f)-1)/log2(10)).
- // The computed number undershoots by less than 0.631 (when we compute log3
- // and not log10).
- //
- // Optimization: since we only need an approximated result this computation
- // can be performed on 64 bit integers. On x86/x64 architecture the speedup is
- // not really measurable, though.
- //
- // Since we want to avoid overshooting we decrement by 1e10 so that
- // floating-point imprecisions don't affect us.
- //
- // Explanation for v's boundary m+: the computation takes advantage of
- // the fact that 2^(p-1) <= f < 2^p. Boundaries still satisfy this requirement
- // (even for denormals where the delta can be much more important).
-
- const double k1Log10 = 0.30102999566398114; // 1/lg(10)
-
- // For doubles len(f) == 53 (don't forget the hidden bit).
- const int kSignificandSize = 53;
- double estimate = ceil((exponent + kSignificandSize - 1) * k1Log10 - 1e-10);
- return static_cast<int>(estimate);
-}
-
-
-// See comments for InitialScaledStartValues.
-static void InitialScaledStartValuesPositiveExponent(
- double v, int estimated_power, bool need_boundary_deltas,
- Bignum* numerator, Bignum* denominator,
- Bignum* delta_minus, Bignum* delta_plus) {
- // A positive exponent implies a positive power.
- ASSERT(estimated_power >= 0);
- // Since the estimated_power is positive we simply multiply the denominator
- // by 10^estimated_power.
-
- // numerator = v.
- numerator->AssignUInt64(Double(v).Significand());
- numerator->ShiftLeft(Double(v).Exponent());
- // denominator = 10^estimated_power.
- denominator->AssignPowerUInt16(10, estimated_power);
-
- if (need_boundary_deltas) {
- // Introduce a common denominator so that the deltas to the boundaries are
- // integers.
- denominator->ShiftLeft(1);
- numerator->ShiftLeft(1);
- // Let v = f * 2^e, then m+ - v = 1/2 * 2^e; With the common
- // denominator (of 2) delta_plus equals 2^e.
- delta_plus->AssignUInt16(1);
- delta_plus->ShiftLeft(Double(v).Exponent());
- // Same for delta_minus (with adjustments below if f == 2^p-1).
- delta_minus->AssignUInt16(1);
- delta_minus->ShiftLeft(Double(v).Exponent());
-
- // If the significand (without the hidden bit) is 0, then the lower
- // boundary is closer than just half a ulp (unit in the last place).
- // There is only one exception: if the next lower number is a denormal then
- // the distance is 1 ulp. This cannot be the case for exponent >= 0 (but we
- // have to test it in the other function where exponent < 0).
- uint64_t v_bits = Double(v).AsUint64();
- if ((v_bits & Double::kSignificandMask) == 0) {
- // The lower boundary is closer at half the distance of "normal" numbers.
- // Increase the common denominator and adapt all but the delta_minus.
- denominator->ShiftLeft(1); // *2
- numerator->ShiftLeft(1); // *2
- delta_plus->ShiftLeft(1); // *2
- }
- }
-}
-
-
-// See comments for InitialScaledStartValues
-static void InitialScaledStartValuesNegativeExponentPositivePower(
- double v, int estimated_power, bool need_boundary_deltas,
- Bignum* numerator, Bignum* denominator,
- Bignum* delta_minus, Bignum* delta_plus) {
- uint64_t significand = Double(v).Significand();
- int exponent = Double(v).Exponent();
- // v = f * 2^e with e < 0, and with estimated_power >= 0.
- // This means that e is close to 0 (have a look at how estimated_power is
- // computed).
-
- // numerator = significand
- // since v = significand * 2^exponent this is equivalent to
- // numerator = v * / 2^-exponent
- numerator->AssignUInt64(significand);
- // denominator = 10^estimated_power * 2^-exponent (with exponent < 0)
- denominator->AssignPowerUInt16(10, estimated_power);
- denominator->ShiftLeft(-exponent);
-
- if (need_boundary_deltas) {
- // Introduce a common denominator so that the deltas to the boundaries are
- // integers.
- denominator->ShiftLeft(1);
- numerator->ShiftLeft(1);
- // Let v = f * 2^e, then m+ - v = 1/2 * 2^e; With the common
- // denominator (of 2) delta_plus equals 2^e.
- // Given that the denominator already includes v's exponent the distance
- // to the boundaries is simply 1.
- delta_plus->AssignUInt16(1);
- // Same for delta_minus (with adjustments below if f == 2^p-1).
- delta_minus->AssignUInt16(1);
-
- // If the significand (without the hidden bit) is 0, then the lower
- // boundary is closer than just one ulp (unit in the last place).
- // There is only one exception: if the next lower number is a denormal
- // then the distance is 1 ulp. Since the exponent is close to zero
- // (otherwise estimated_power would have been negative) this cannot happen
- // here either.
- uint64_t v_bits = Double(v).AsUint64();
- if ((v_bits & Double::kSignificandMask) == 0) {
- // The lower boundary is closer at half the distance of "normal" numbers.
- // Increase the denominator and adapt all but the delta_minus.
- denominator->ShiftLeft(1); // *2
- numerator->ShiftLeft(1); // *2
- delta_plus->ShiftLeft(1); // *2
- }
- }
-}
-
-
-// See comments for InitialScaledStartValues
-static void InitialScaledStartValuesNegativeExponentNegativePower(
- double v, int estimated_power, bool need_boundary_deltas,
- Bignum* numerator, Bignum* denominator,
- Bignum* delta_minus, Bignum* delta_plus) {
- const uint64_t kMinimalNormalizedExponent =
- V8_2PART_UINT64_C(0x00100000, 00000000);
- uint64_t significand = Double(v).Significand();
- int exponent = Double(v).Exponent();
- // Instead of multiplying the denominator with 10^estimated_power we
- // multiply all values (numerator and deltas) by 10^-estimated_power.
-
- // Use numerator as temporary container for power_ten.
- Bignum* power_ten = numerator;
- power_ten->AssignPowerUInt16(10, -estimated_power);
-
- if (need_boundary_deltas) {
- // Since power_ten == numerator we must make a copy of 10^estimated_power
- // before we complete the computation of the numerator.
- // delta_plus = delta_minus = 10^estimated_power
- delta_plus->AssignBignum(*power_ten);
- delta_minus->AssignBignum(*power_ten);
- }
-
- // numerator = significand * 2 * 10^-estimated_power
- // since v = significand * 2^exponent this is equivalent to
- // numerator = v * 10^-estimated_power * 2 * 2^-exponent.
- // Remember: numerator has been abused as power_ten. So no need to assign it
- // to itself.
- ASSERT(numerator == power_ten);
- numerator->MultiplyByUInt64(significand);
-
- // denominator = 2 * 2^-exponent with exponent < 0.
- denominator->AssignUInt16(1);
- denominator->ShiftLeft(-exponent);
-
- if (need_boundary_deltas) {
- // Introduce a common denominator so that the deltas to the boundaries are
- // integers.
- numerator->ShiftLeft(1);
- denominator->ShiftLeft(1);
- // With this shift the boundaries have their correct value, since
- // delta_plus = 10^-estimated_power, and
- // delta_minus = 10^-estimated_power.
- // These assignments have been done earlier.
-
- // The special case where the lower boundary is twice as close.
- // This time we have to look out for the exception too.
- uint64_t v_bits = Double(v).AsUint64();
- if ((v_bits & Double::kSignificandMask) == 0 &&
- // The only exception where a significand == 0 has its boundaries at
- // "normal" distances:
- (v_bits & Double::kExponentMask) != kMinimalNormalizedExponent) {
- numerator->ShiftLeft(1); // *2
- denominator->ShiftLeft(1); // *2
- delta_plus->ShiftLeft(1); // *2
- }
- }
-}
-
-
-// Let v = significand * 2^exponent.
-// Computes v / 10^estimated_power exactly, as a ratio of two bignums, numerator
-// and denominator. The functions GenerateShortestDigits and
-// GenerateCountedDigits will then convert this ratio to its decimal
-// representation d, with the required accuracy.
-// Then d * 10^estimated_power is the representation of v.
-// (Note: the fraction and the estimated_power might get adjusted before
-// generating the decimal representation.)
-//
-// The initial start values consist of:
-// - a scaled numerator: s.t. numerator/denominator == v / 10^estimated_power.
-// - a scaled (common) denominator.
-// optionally (used by GenerateShortestDigits to decide if it has the shortest
-// decimal converting back to v):
-// - v - m-: the distance to the lower boundary.
-// - m+ - v: the distance to the upper boundary.
-//
-// v, m+, m-, and therefore v - m- and m+ - v all share the same denominator.
-//
-// Let ep == estimated_power, then the returned values will satisfy:
-// v / 10^ep = numerator / denominator.
-// v's boundarys m- and m+:
-// m- / 10^ep == v / 10^ep - delta_minus / denominator
-// m+ / 10^ep == v / 10^ep + delta_plus / denominator
-// Or in other words:
-// m- == v - delta_minus * 10^ep / denominator;
-// m+ == v + delta_plus * 10^ep / denominator;
-//
-// Since 10^(k-1) <= v < 10^k (with k == estimated_power)
-// or 10^k <= v < 10^(k+1)
-// we then have 0.1 <= numerator/denominator < 1
-// or 1 <= numerator/denominator < 10
-//
-// It is then easy to kickstart the digit-generation routine.
-//
-// The boundary-deltas are only filled if need_boundary_deltas is set.
-static void InitialScaledStartValues(double v,
- int estimated_power,
- bool need_boundary_deltas,
- Bignum* numerator,
- Bignum* denominator,
- Bignum* delta_minus,
- Bignum* delta_plus) {
- if (Double(v).Exponent() >= 0) {
- InitialScaledStartValuesPositiveExponent(
- v, estimated_power, need_boundary_deltas,
- numerator, denominator, delta_minus, delta_plus);
- } else if (estimated_power >= 0) {
- InitialScaledStartValuesNegativeExponentPositivePower(
- v, estimated_power, need_boundary_deltas,
- numerator, denominator, delta_minus, delta_plus);
- } else {
- InitialScaledStartValuesNegativeExponentNegativePower(
- v, estimated_power, need_boundary_deltas,
- numerator, denominator, delta_minus, delta_plus);
- }
-}
-
-
-// This routine multiplies numerator/denominator so that its values lies in the
-// range 1-10. That is after a call to this function we have:
-// 1 <= (numerator + delta_plus) /denominator < 10.
-// Let numerator the input before modification and numerator' the argument
-// after modification, then the output-parameter decimal_point is such that
-// numerator / denominator * 10^estimated_power ==
-// numerator' / denominator' * 10^(decimal_point - 1)
-// In some cases estimated_power was too low, and this is already the case. We
-// then simply adjust the power so that 10^(k-1) <= v < 10^k (with k ==
-// estimated_power) but do not touch the numerator or denominator.
-// Otherwise the routine multiplies the numerator and the deltas by 10.
-static void FixupMultiply10(int estimated_power, bool is_even,
- int* decimal_point,
- Bignum* numerator, Bignum* denominator,
- Bignum* delta_minus, Bignum* delta_plus) {
- bool in_range;
- if (is_even) {
- // For IEEE doubles half-way cases (in decimal system numbers ending with 5)
- // are rounded to the closest floating-point number with even significand.
- in_range = Bignum::PlusCompare(*numerator, *delta_plus, *denominator) >= 0;
- } else {
- in_range = Bignum::PlusCompare(*numerator, *delta_plus, *denominator) > 0;
- }
- if (in_range) {
- // Since numerator + delta_plus >= denominator we already have
- // 1 <= numerator/denominator < 10. Simply update the estimated_power.
- *decimal_point = estimated_power + 1;
- } else {
- *decimal_point = estimated_power;
- numerator->Times10();
- if (Bignum::Equal(*delta_minus, *delta_plus)) {
- delta_minus->Times10();
- delta_plus->AssignBignum(*delta_minus);
- } else {
- delta_minus->Times10();
- delta_plus->Times10();
- }
- }
-}
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/bignum-dtoa.h b/src/3rdparty/v8/src/bignum-dtoa.h
deleted file mode 100644
index ea1acbb..0000000
--- a/src/3rdparty/v8/src/bignum-dtoa.h
+++ /dev/null
@@ -1,81 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_BIGNUM_DTOA_H_
-#define V8_BIGNUM_DTOA_H_
-
-namespace v8 {
-namespace internal {
-
-enum BignumDtoaMode {
- // Return the shortest correct representation.
- // For example the output of 0.299999999999999988897 is (the less accurate but
- // correct) 0.3.
- BIGNUM_DTOA_SHORTEST,
- // Return a fixed number of digits after the decimal point.
- // For instance fixed(0.1, 4) becomes 0.1000
- // If the input number is big, the output will be big.
- BIGNUM_DTOA_FIXED,
- // Return a fixed number of digits, no matter what the exponent is.
- BIGNUM_DTOA_PRECISION
-};
-
-// Converts the given double 'v' to ascii.
-// The result should be interpreted as buffer * 10^(point-length).
-// The buffer will be null-terminated.
-//
-// The input v must be > 0 and different from NaN, and Infinity.
-//
-// The output depends on the given mode:
-// - SHORTEST: produce the least amount of digits for which the internal
-// identity requirement is still satisfied. If the digits are printed
-// (together with the correct exponent) then reading this number will give
-// 'v' again. The buffer will choose the representation that is closest to
-// 'v'. If there are two at the same distance, than the number is round up.
-// In this mode the 'requested_digits' parameter is ignored.
-// - FIXED: produces digits necessary to print a given number with
-// 'requested_digits' digits after the decimal point. The produced digits
-// might be too short in which case the caller has to fill the gaps with '0's.
-// Example: toFixed(0.001, 5) is allowed to return buffer="1", point=-2.
-// Halfway cases are rounded up. The call toFixed(0.15, 2) thus returns
-// buffer="2", point=0.
-// Note: the length of the returned buffer has no meaning wrt the significance
-// of its digits. That is, just because it contains '0's does not mean that
-// any other digit would not satisfy the internal identity requirement.
-// - PRECISION: produces 'requested_digits' where the first digit is not '0'.
-// Even though the length of produced digits usually equals
-// 'requested_digits', the function is allowed to return fewer digits, in
-// which case the caller has to fill the missing digits with '0's.
-// Halfway cases are again rounded up.
-// 'BignumDtoa' expects the given buffer to be big enough to hold all digits
-// and a terminating null-character.
-void BignumDtoa(double v, BignumDtoaMode mode, int requested_digits,
- Vector<char> buffer, int* length, int* point);
-
-} } // namespace v8::internal
-
-#endif // V8_BIGNUM_DTOA_H_
diff --git a/src/3rdparty/v8/src/bignum.cc b/src/3rdparty/v8/src/bignum.cc
deleted file mode 100644
index a973974..0000000
--- a/src/3rdparty/v8/src/bignum.cc
+++ /dev/null
@@ -1,768 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "bignum.h"
-#include "utils.h"
-
-namespace v8 {
-namespace internal {
-
-Bignum::Bignum()
- : bigits_(bigits_buffer_, kBigitCapacity), used_digits_(0), exponent_(0) {
- for (int i = 0; i < kBigitCapacity; ++i) {
- bigits_[i] = 0;
- }
-}
-
-
-template<typename S>
-static int BitSize(S value) {
- return 8 * sizeof(value);
-}
-
-// Guaranteed to lie in one Bigit.
-void Bignum::AssignUInt16(uint16_t value) {
- ASSERT(kBigitSize >= BitSize(value));
- Zero();
- if (value == 0) return;
-
- EnsureCapacity(1);
- bigits_[0] = value;
- used_digits_ = 1;
-}
-
-
-void Bignum::AssignUInt64(uint64_t value) {
- const int kUInt64Size = 64;
-
- Zero();
- if (value == 0) return;
-
- int needed_bigits = kUInt64Size / kBigitSize + 1;
- EnsureCapacity(needed_bigits);
- for (int i = 0; i < needed_bigits; ++i) {
- bigits_[i] = static_cast<Chunk>(value & kBigitMask);
- value = value >> kBigitSize;
- }
- used_digits_ = needed_bigits;
- Clamp();
-}
-
-
-void Bignum::AssignBignum(const Bignum& other) {
- exponent_ = other.exponent_;
- for (int i = 0; i < other.used_digits_; ++i) {
- bigits_[i] = other.bigits_[i];
- }
- // Clear the excess digits (if there were any).
- for (int i = other.used_digits_; i < used_digits_; ++i) {
- bigits_[i] = 0;
- }
- used_digits_ = other.used_digits_;
-}
-
-
-static uint64_t ReadUInt64(Vector<const char> buffer,
- int from,
- int digits_to_read) {
- uint64_t result = 0;
- for (int i = from; i < from + digits_to_read; ++i) {
- int digit = buffer[i] - '0';
- ASSERT(0 <= digit && digit <= 9);
- result = result * 10 + digit;
- }
- return result;
-}
-
-
-void Bignum::AssignDecimalString(Vector<const char> value) {
- // 2^64 = 18446744073709551616 > 10^19
- const int kMaxUint64DecimalDigits = 19;
- Zero();
- int length = value.length();
- int pos = 0;
- // Let's just say that each digit needs 4 bits.
- while (length >= kMaxUint64DecimalDigits) {
- uint64_t digits = ReadUInt64(value, pos, kMaxUint64DecimalDigits);
- pos += kMaxUint64DecimalDigits;
- length -= kMaxUint64DecimalDigits;
- MultiplyByPowerOfTen(kMaxUint64DecimalDigits);
- AddUInt64(digits);
- }
- uint64_t digits = ReadUInt64(value, pos, length);
- MultiplyByPowerOfTen(length);
- AddUInt64(digits);
- Clamp();
-}
-
-
-static int HexCharValue(char c) {
- if ('0' <= c && c <= '9') return c - '0';
- if ('a' <= c && c <= 'f') return 10 + c - 'a';
- if ('A' <= c && c <= 'F') return 10 + c - 'A';
- UNREACHABLE();
- return 0; // To make compiler happy.
-}
-
-
-void Bignum::AssignHexString(Vector<const char> value) {
- Zero();
- int length = value.length();
-
- int needed_bigits = length * 4 / kBigitSize + 1;
- EnsureCapacity(needed_bigits);
- int string_index = length - 1;
- for (int i = 0; i < needed_bigits - 1; ++i) {
- // These bigits are guaranteed to be "full".
- Chunk current_bigit = 0;
- for (int j = 0; j < kBigitSize / 4; j++) {
- current_bigit += HexCharValue(value[string_index--]) << (j * 4);
- }
- bigits_[i] = current_bigit;
- }
- used_digits_ = needed_bigits - 1;
-
- Chunk most_significant_bigit = 0; // Could be = 0;
- for (int j = 0; j <= string_index; ++j) {
- most_significant_bigit <<= 4;
- most_significant_bigit += HexCharValue(value[j]);
- }
- if (most_significant_bigit != 0) {
- bigits_[used_digits_] = most_significant_bigit;
- used_digits_++;
- }
- Clamp();
-}
-
-
-void Bignum::AddUInt64(uint64_t operand) {
- if (operand == 0) return;
- Bignum other;
- other.AssignUInt64(operand);
- AddBignum(other);
-}
-
-
-void Bignum::AddBignum(const Bignum& other) {
- ASSERT(IsClamped());
- ASSERT(other.IsClamped());
-
- // If this has a greater exponent than other append zero-bigits to this.
- // After this call exponent_ <= other.exponent_.
- Align(other);
-
- // There are two possibilities:
- // aaaaaaaaaaa 0000 (where the 0s represent a's exponent)
- // bbbbb 00000000
- // ----------------
- // ccccccccccc 0000
- // or
- // aaaaaaaaaa 0000
- // bbbbbbbbb 0000000
- // -----------------
- // cccccccccccc 0000
- // In both cases we might need a carry bigit.
-
- EnsureCapacity(1 + Max(BigitLength(), other.BigitLength()) - exponent_);
- Chunk carry = 0;
- int bigit_pos = other.exponent_ - exponent_;
- ASSERT(bigit_pos >= 0);
- for (int i = 0; i < other.used_digits_; ++i) {
- Chunk sum = bigits_[bigit_pos] + other.bigits_[i] + carry;
- bigits_[bigit_pos] = sum & kBigitMask;
- carry = sum >> kBigitSize;
- bigit_pos++;
- }
-
- while (carry != 0) {
- Chunk sum = bigits_[bigit_pos] + carry;
- bigits_[bigit_pos] = sum & kBigitMask;
- carry = sum >> kBigitSize;
- bigit_pos++;
- }
- used_digits_ = Max(bigit_pos, used_digits_);
- ASSERT(IsClamped());
-}
-
-
-void Bignum::SubtractBignum(const Bignum& other) {
- ASSERT(IsClamped());
- ASSERT(other.IsClamped());
- // We require this to be bigger than other.
- ASSERT(LessEqual(other, *this));
-
- Align(other);
-
- int offset = other.exponent_ - exponent_;
- Chunk borrow = 0;
- int i;
- for (i = 0; i < other.used_digits_; ++i) {
- ASSERT((borrow == 0) || (borrow == 1));
- Chunk difference = bigits_[i + offset] - other.bigits_[i] - borrow;
- bigits_[i + offset] = difference & kBigitMask;
- borrow = difference >> (kChunkSize - 1);
- }
- while (borrow != 0) {
- Chunk difference = bigits_[i + offset] - borrow;
- bigits_[i + offset] = difference & kBigitMask;
- borrow = difference >> (kChunkSize - 1);
- ++i;
- }
- Clamp();
-}
-
-
-void Bignum::ShiftLeft(int shift_amount) {
- if (used_digits_ == 0) return;
- exponent_ += shift_amount / kBigitSize;
- int local_shift = shift_amount % kBigitSize;
- EnsureCapacity(used_digits_ + 1);
- BigitsShiftLeft(local_shift);
-}
-
-
-void Bignum::MultiplyByUInt32(uint32_t factor) {
- if (factor == 1) return;
- if (factor == 0) {
- Zero();
- return;
- }
- if (used_digits_ == 0) return;
-
- // The product of a bigit with the factor is of size kBigitSize + 32.
- // Assert that this number + 1 (for the carry) fits into double chunk.
- ASSERT(kDoubleChunkSize >= kBigitSize + 32 + 1);
- DoubleChunk carry = 0;
- for (int i = 0; i < used_digits_; ++i) {
- DoubleChunk product = static_cast<DoubleChunk>(factor) * bigits_[i] + carry;
- bigits_[i] = static_cast<Chunk>(product & kBigitMask);
- carry = (product >> kBigitSize);
- }
- while (carry != 0) {
- EnsureCapacity(used_digits_ + 1);
- bigits_[used_digits_] = static_cast<Chunk>(carry & kBigitMask);
- used_digits_++;
- carry >>= kBigitSize;
- }
-}
-
-
-void Bignum::MultiplyByUInt64(uint64_t factor) {
- if (factor == 1) return;
- if (factor == 0) {
- Zero();
- return;
- }
- ASSERT(kBigitSize < 32);
- uint64_t carry = 0;
- uint64_t low = factor & 0xFFFFFFFF;
- uint64_t high = factor >> 32;
- for (int i = 0; i < used_digits_; ++i) {
- uint64_t product_low = low * bigits_[i];
- uint64_t product_high = high * bigits_[i];
- uint64_t tmp = (carry & kBigitMask) + product_low;
- bigits_[i] = static_cast<Chunk>(tmp & kBigitMask);
- carry = (carry >> kBigitSize) + (tmp >> kBigitSize) +
- (product_high << (32 - kBigitSize));
- }
- while (carry != 0) {
- EnsureCapacity(used_digits_ + 1);
- bigits_[used_digits_] = static_cast<Chunk>(carry & kBigitMask);
- used_digits_++;
- carry >>= kBigitSize;
- }
-}
-
-
-void Bignum::MultiplyByPowerOfTen(int exponent) {
- const uint64_t kFive27 = V8_2PART_UINT64_C(0x6765c793, fa10079d);
- const uint16_t kFive1 = 5;
- const uint16_t kFive2 = kFive1 * 5;
- const uint16_t kFive3 = kFive2 * 5;
- const uint16_t kFive4 = kFive3 * 5;
- const uint16_t kFive5 = kFive4 * 5;
- const uint16_t kFive6 = kFive5 * 5;
- const uint32_t kFive7 = kFive6 * 5;
- const uint32_t kFive8 = kFive7 * 5;
- const uint32_t kFive9 = kFive8 * 5;
- const uint32_t kFive10 = kFive9 * 5;
- const uint32_t kFive11 = kFive10 * 5;
- const uint32_t kFive12 = kFive11 * 5;
- const uint32_t kFive13 = kFive12 * 5;
- const uint32_t kFive1_to_12[] =
- { kFive1, kFive2, kFive3, kFive4, kFive5, kFive6,
- kFive7, kFive8, kFive9, kFive10, kFive11, kFive12 };
-
- ASSERT(exponent >= 0);
- if (exponent == 0) return;
- if (used_digits_ == 0) return;
-
- // We shift by exponent at the end just before returning.
- int remaining_exponent = exponent;
- while (remaining_exponent >= 27) {
- MultiplyByUInt64(kFive27);
- remaining_exponent -= 27;
- }
- while (remaining_exponent >= 13) {
- MultiplyByUInt32(kFive13);
- remaining_exponent -= 13;
- }
- if (remaining_exponent > 0) {
- MultiplyByUInt32(kFive1_to_12[remaining_exponent - 1]);
- }
- ShiftLeft(exponent);
-}
-
-
-void Bignum::Square() {
- ASSERT(IsClamped());
- int product_length = 2 * used_digits_;
- EnsureCapacity(product_length);
-
- // Comba multiplication: compute each column separately.
- // Example: r = a2a1a0 * b2b1b0.
- // r = 1 * a0b0 +
- // 10 * (a1b0 + a0b1) +
- // 100 * (a2b0 + a1b1 + a0b2) +
- // 1000 * (a2b1 + a1b2) +
- // 10000 * a2b2
- //
- // In the worst case we have to accumulate nb-digits products of digit*digit.
- //
- // Assert that the additional number of bits in a DoubleChunk are enough to
- // sum up used_digits of Bigit*Bigit.
- if ((1 << (2 * (kChunkSize - kBigitSize))) <= used_digits_) {
- UNIMPLEMENTED();
- }
- DoubleChunk accumulator = 0;
- // First shift the digits so we don't overwrite them.
- int copy_offset = used_digits_;
- for (int i = 0; i < used_digits_; ++i) {
- bigits_[copy_offset + i] = bigits_[i];
- }
- // We have two loops to avoid some 'if's in the loop.
- for (int i = 0; i < used_digits_; ++i) {
- // Process temporary digit i with power i.
- // The sum of the two indices must be equal to i.
- int bigit_index1 = i;
- int bigit_index2 = 0;
- // Sum all of the sub-products.
- while (bigit_index1 >= 0) {
- Chunk chunk1 = bigits_[copy_offset + bigit_index1];
- Chunk chunk2 = bigits_[copy_offset + bigit_index2];
- accumulator += static_cast<DoubleChunk>(chunk1) * chunk2;
- bigit_index1--;
- bigit_index2++;
- }
- bigits_[i] = static_cast<Chunk>(accumulator) & kBigitMask;
- accumulator >>= kBigitSize;
- }
- for (int i = used_digits_; i < product_length; ++i) {
- int bigit_index1 = used_digits_ - 1;
- int bigit_index2 = i - bigit_index1;
- // Invariant: sum of both indices is again equal to i.
- // Inner loop runs 0 times on last iteration, emptying accumulator.
- while (bigit_index2 < used_digits_) {
- Chunk chunk1 = bigits_[copy_offset + bigit_index1];
- Chunk chunk2 = bigits_[copy_offset + bigit_index2];
- accumulator += static_cast<DoubleChunk>(chunk1) * chunk2;
- bigit_index1--;
- bigit_index2++;
- }
- // The overwritten bigits_[i] will never be read in further loop iterations,
- // because bigit_index1 and bigit_index2 are always greater
- // than i - used_digits_.
- bigits_[i] = static_cast<Chunk>(accumulator) & kBigitMask;
- accumulator >>= kBigitSize;
- }
- // Since the result was guaranteed to lie inside the number the
- // accumulator must be 0 now.
- ASSERT(accumulator == 0);
-
- // Don't forget to update the used_digits and the exponent.
- used_digits_ = product_length;
- exponent_ *= 2;
- Clamp();
-}
-
-
-void Bignum::AssignPowerUInt16(uint16_t base, int power_exponent) {
- ASSERT(base != 0);
- ASSERT(power_exponent >= 0);
- if (power_exponent == 0) {
- AssignUInt16(1);
- return;
- }
- Zero();
- int shifts = 0;
- // We expect base to be in range 2-32, and most often to be 10.
- // It does not make much sense to implement different algorithms for counting
- // the bits.
- while ((base & 1) == 0) {
- base >>= 1;
- shifts++;
- }
- int bit_size = 0;
- int tmp_base = base;
- while (tmp_base != 0) {
- tmp_base >>= 1;
- bit_size++;
- }
- int final_size = bit_size * power_exponent;
- // 1 extra bigit for the shifting, and one for rounded final_size.
- EnsureCapacity(final_size / kBigitSize + 2);
-
- // Left to Right exponentiation.
- int mask = 1;
- while (power_exponent >= mask) mask <<= 1;
-
- // The mask is now pointing to the bit above the most significant 1-bit of
- // power_exponent.
- // Get rid of first 1-bit;
- mask >>= 2;
- uint64_t this_value = base;
-
- bool delayed_multipliciation = false;
- const uint64_t max_32bits = 0xFFFFFFFF;
- while (mask != 0 && this_value <= max_32bits) {
- this_value = this_value * this_value;
- // Verify that there is enough space in this_value to perform the
- // multiplication. The first bit_size bits must be 0.
- if ((power_exponent & mask) != 0) {
- uint64_t base_bits_mask =
- ~((static_cast<uint64_t>(1) << (64 - bit_size)) - 1);
- bool high_bits_zero = (this_value & base_bits_mask) == 0;
- if (high_bits_zero) {
- this_value *= base;
- } else {
- delayed_multipliciation = true;
- }
- }
- mask >>= 1;
- }
- AssignUInt64(this_value);
- if (delayed_multipliciation) {
- MultiplyByUInt32(base);
- }
-
- // Now do the same thing as a bignum.
- while (mask != 0) {
- Square();
- if ((power_exponent & mask) != 0) {
- MultiplyByUInt32(base);
- }
- mask >>= 1;
- }
-
- // And finally add the saved shifts.
- ShiftLeft(shifts * power_exponent);
-}
-
-
-// Precondition: this/other < 16bit.
-uint16_t Bignum::DivideModuloIntBignum(const Bignum& other) {
- ASSERT(IsClamped());
- ASSERT(other.IsClamped());
- ASSERT(other.used_digits_ > 0);
-
- // Easy case: if we have less digits than the divisor than the result is 0.
- // Note: this handles the case where this == 0, too.
- if (BigitLength() < other.BigitLength()) {
- return 0;
- }
-
- Align(other);
-
- uint16_t result = 0;
-
- // Start by removing multiples of 'other' until both numbers have the same
- // number of digits.
- while (BigitLength() > other.BigitLength()) {
- // This naive approach is extremely inefficient if the this divided other
- // might be big. This function is implemented for doubleToString where
- // the result should be small (less than 10).
- ASSERT(other.bigits_[other.used_digits_ - 1] >= ((1 << kBigitSize) / 16));
- // Remove the multiples of the first digit.
- // Example this = 23 and other equals 9. -> Remove 2 multiples.
- result += bigits_[used_digits_ - 1];
- SubtractTimes(other, bigits_[used_digits_ - 1]);
- }
-
- ASSERT(BigitLength() == other.BigitLength());
-
- // Both bignums are at the same length now.
- // Since other has more than 0 digits we know that the access to
- // bigits_[used_digits_ - 1] is safe.
- Chunk this_bigit = bigits_[used_digits_ - 1];
- Chunk other_bigit = other.bigits_[other.used_digits_ - 1];
-
- if (other.used_digits_ == 1) {
- // Shortcut for easy (and common) case.
- int quotient = this_bigit / other_bigit;
- bigits_[used_digits_ - 1] = this_bigit - other_bigit * quotient;
- result += quotient;
- Clamp();
- return result;
- }
-
- int division_estimate = this_bigit / (other_bigit + 1);
- result += division_estimate;
- SubtractTimes(other, division_estimate);
-
- if (other_bigit * (division_estimate + 1) > this_bigit) {
- // No need to even try to subtract. Even if other's remaining digits were 0
- // another subtraction would be too much.
- return result;
- }
-
- while (LessEqual(other, *this)) {
- SubtractBignum(other);
- result++;
- }
- return result;
-}
-
-
-template<typename S>
-static int SizeInHexChars(S number) {
- ASSERT(number > 0);
- int result = 0;
- while (number != 0) {
- number >>= 4;
- result++;
- }
- return result;
-}
-
-
-static char HexCharOfValue(int value) {
- ASSERT(0 <= value && value <= 16);
- if (value < 10) return value + '0';
- return value - 10 + 'A';
-}
-
-
-bool Bignum::ToHexString(char* buffer, int buffer_size) const {
- ASSERT(IsClamped());
- // Each bigit must be printable as separate hex-character.
- ASSERT(kBigitSize % 4 == 0);
- const int kHexCharsPerBigit = kBigitSize / 4;
-
- if (used_digits_ == 0) {
- if (buffer_size < 2) return false;
- buffer[0] = '0';
- buffer[1] = '\0';
- return true;
- }
- // We add 1 for the terminating '\0' character.
- int needed_chars = (BigitLength() - 1) * kHexCharsPerBigit +
- SizeInHexChars(bigits_[used_digits_ - 1]) + 1;
- if (needed_chars > buffer_size) return false;
- int string_index = needed_chars - 1;
- buffer[string_index--] = '\0';
- for (int i = 0; i < exponent_; ++i) {
- for (int j = 0; j < kHexCharsPerBigit; ++j) {
- buffer[string_index--] = '0';
- }
- }
- for (int i = 0; i < used_digits_ - 1; ++i) {
- Chunk current_bigit = bigits_[i];
- for (int j = 0; j < kHexCharsPerBigit; ++j) {
- buffer[string_index--] = HexCharOfValue(current_bigit & 0xF);
- current_bigit >>= 4;
- }
- }
- // And finally the last bigit.
- Chunk most_significant_bigit = bigits_[used_digits_ - 1];
- while (most_significant_bigit != 0) {
- buffer[string_index--] = HexCharOfValue(most_significant_bigit & 0xF);
- most_significant_bigit >>= 4;
- }
- return true;
-}
-
-
-Bignum::Chunk Bignum::BigitAt(int index) const {
- if (index >= BigitLength()) return 0;
- if (index < exponent_) return 0;
- return bigits_[index - exponent_];
-}
-
-
-int Bignum::Compare(const Bignum& a, const Bignum& b) {
- ASSERT(a.IsClamped());
- ASSERT(b.IsClamped());
- int bigit_length_a = a.BigitLength();
- int bigit_length_b = b.BigitLength();
- if (bigit_length_a < bigit_length_b) return -1;
- if (bigit_length_a > bigit_length_b) return +1;
- for (int i = bigit_length_a - 1; i >= Min(a.exponent_, b.exponent_); --i) {
- Chunk bigit_a = a.BigitAt(i);
- Chunk bigit_b = b.BigitAt(i);
- if (bigit_a < bigit_b) return -1;
- if (bigit_a > bigit_b) return +1;
- // Otherwise they are equal up to this digit. Try the next digit.
- }
- return 0;
-}
-
-
-int Bignum::PlusCompare(const Bignum& a, const Bignum& b, const Bignum& c) {
- ASSERT(a.IsClamped());
- ASSERT(b.IsClamped());
- ASSERT(c.IsClamped());
- if (a.BigitLength() < b.BigitLength()) {
- return PlusCompare(b, a, c);
- }
- if (a.BigitLength() + 1 < c.BigitLength()) return -1;
- if (a.BigitLength() > c.BigitLength()) return +1;
- // The exponent encodes 0-bigits. So if there are more 0-digits in 'a' than
- // 'b' has digits, then the bigit-length of 'a'+'b' must be equal to the one
- // of 'a'.
- if (a.exponent_ >= b.BigitLength() && a.BigitLength() < c.BigitLength()) {
- return -1;
- }
-
- Chunk borrow = 0;
- // Starting at min_exponent all digits are == 0. So no need to compare them.
- int min_exponent = Min(Min(a.exponent_, b.exponent_), c.exponent_);
- for (int i = c.BigitLength() - 1; i >= min_exponent; --i) {
- Chunk chunk_a = a.BigitAt(i);
- Chunk chunk_b = b.BigitAt(i);
- Chunk chunk_c = c.BigitAt(i);
- Chunk sum = chunk_a + chunk_b;
- if (sum > chunk_c + borrow) {
- return +1;
- } else {
- borrow = chunk_c + borrow - sum;
- if (borrow > 1) return -1;
- borrow <<= kBigitSize;
- }
- }
- if (borrow == 0) return 0;
- return -1;
-}
-
-
-void Bignum::Clamp() {
- while (used_digits_ > 0 && bigits_[used_digits_ - 1] == 0) {
- used_digits_--;
- }
- if (used_digits_ == 0) {
- // Zero.
- exponent_ = 0;
- }
-}
-
-
-bool Bignum::IsClamped() const {
- return used_digits_ == 0 || bigits_[used_digits_ - 1] != 0;
-}
-
-
-void Bignum::Zero() {
- for (int i = 0; i < used_digits_; ++i) {
- bigits_[i] = 0;
- }
- used_digits_ = 0;
- exponent_ = 0;
-}
-
-
-void Bignum::Align(const Bignum& other) {
- if (exponent_ > other.exponent_) {
- // If "X" represents a "hidden" digit (by the exponent) then we are in the
- // following case (a == this, b == other):
- // a: aaaaaaXXXX or a: aaaaaXXX
- // b: bbbbbbX b: bbbbbbbbXX
- // We replace some of the hidden digits (X) of a with 0 digits.
- // a: aaaaaa000X or a: aaaaa0XX
- int zero_digits = exponent_ - other.exponent_;
- EnsureCapacity(used_digits_ + zero_digits);
- for (int i = used_digits_ - 1; i >= 0; --i) {
- bigits_[i + zero_digits] = bigits_[i];
- }
- for (int i = 0; i < zero_digits; ++i) {
- bigits_[i] = 0;
- }
- used_digits_ += zero_digits;
- exponent_ -= zero_digits;
- ASSERT(used_digits_ >= 0);
- ASSERT(exponent_ >= 0);
- }
-}
-
-
-void Bignum::BigitsShiftLeft(int shift_amount) {
- ASSERT(shift_amount < kBigitSize);
- ASSERT(shift_amount >= 0);
- Chunk carry = 0;
- for (int i = 0; i < used_digits_; ++i) {
- Chunk new_carry = bigits_[i] >> (kBigitSize - shift_amount);
- bigits_[i] = ((bigits_[i] << shift_amount) + carry) & kBigitMask;
- carry = new_carry;
- }
- if (carry != 0) {
- bigits_[used_digits_] = carry;
- used_digits_++;
- }
-}
-
-
-void Bignum::SubtractTimes(const Bignum& other, int factor) {
- ASSERT(exponent_ <= other.exponent_);
- if (factor < 3) {
- for (int i = 0; i < factor; ++i) {
- SubtractBignum(other);
- }
- return;
- }
- Chunk borrow = 0;
- int exponent_diff = other.exponent_ - exponent_;
- for (int i = 0; i < other.used_digits_; ++i) {
- DoubleChunk product = static_cast<DoubleChunk>(factor) * other.bigits_[i];
- DoubleChunk remove = borrow + product;
- Chunk difference =
- bigits_[i + exponent_diff] - static_cast<Chunk>(remove & kBigitMask);
- bigits_[i + exponent_diff] = difference & kBigitMask;
- borrow = static_cast<Chunk>((difference >> (kChunkSize - 1)) +
- (remove >> kBigitSize));
- }
- for (int i = other.used_digits_ + exponent_diff; i < used_digits_; ++i) {
- if (borrow == 0) return;
- Chunk difference = bigits_[i] - borrow;
- bigits_[i] = difference & kBigitMask;
- borrow = difference >> (kChunkSize - 1);
- ++i;
- }
- Clamp();
-}
-
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/bignum.h b/src/3rdparty/v8/src/bignum.h
deleted file mode 100644
index 1d2bff6..0000000
--- a/src/3rdparty/v8/src/bignum.h
+++ /dev/null
@@ -1,140 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_BIGNUM_H_
-#define V8_BIGNUM_H_
-
-namespace v8 {
-namespace internal {
-
-class Bignum {
- public:
- // 3584 = 128 * 28. We can represent 2^3584 > 10^1000 accurately.
- // This bignum can encode much bigger numbers, since it contains an
- // exponent.
- static const int kMaxSignificantBits = 3584;
-
- Bignum();
- void AssignUInt16(uint16_t value);
- void AssignUInt64(uint64_t value);
- void AssignBignum(const Bignum& other);
-
- void AssignDecimalString(Vector<const char> value);
- void AssignHexString(Vector<const char> value);
-
- void AssignPowerUInt16(uint16_t base, int exponent);
-
- void AddUInt16(uint16_t operand);
- void AddUInt64(uint64_t operand);
- void AddBignum(const Bignum& other);
- // Precondition: this >= other.
- void SubtractBignum(const Bignum& other);
-
- void Square();
- void ShiftLeft(int shift_amount);
- void MultiplyByUInt32(uint32_t factor);
- void MultiplyByUInt64(uint64_t factor);
- void MultiplyByPowerOfTen(int exponent);
- void Times10() { return MultiplyByUInt32(10); }
- // Pseudocode:
- // int result = this / other;
- // this = this % other;
- // In the worst case this function is in O(this/other).
- uint16_t DivideModuloIntBignum(const Bignum& other);
-
- bool ToHexString(char* buffer, int buffer_size) const;
-
- static int Compare(const Bignum& a, const Bignum& b);
- static bool Equal(const Bignum& a, const Bignum& b) {
- return Compare(a, b) == 0;
- }
- static bool LessEqual(const Bignum& a, const Bignum& b) {
- return Compare(a, b) <= 0;
- }
- static bool Less(const Bignum& a, const Bignum& b) {
- return Compare(a, b) < 0;
- }
- // Returns Compare(a + b, c);
- static int PlusCompare(const Bignum& a, const Bignum& b, const Bignum& c);
- // Returns a + b == c
- static bool PlusEqual(const Bignum& a, const Bignum& b, const Bignum& c) {
- return PlusCompare(a, b, c) == 0;
- }
- // Returns a + b <= c
- static bool PlusLessEqual(const Bignum& a, const Bignum& b, const Bignum& c) {
- return PlusCompare(a, b, c) <= 0;
- }
- // Returns a + b < c
- static bool PlusLess(const Bignum& a, const Bignum& b, const Bignum& c) {
- return PlusCompare(a, b, c) < 0;
- }
- private:
- typedef uint32_t Chunk;
- typedef uint64_t DoubleChunk;
-
- static const int kChunkSize = sizeof(Chunk) * 8;
- static const int kDoubleChunkSize = sizeof(DoubleChunk) * 8;
- // With bigit size of 28 we loose some bits, but a double still fits easily
- // into two chunks, and more importantly we can use the Comba multiplication.
- static const int kBigitSize = 28;
- static const Chunk kBigitMask = (1 << kBigitSize) - 1;
- // Every instance allocates kBigitLength chunks on the stack. Bignums cannot
- // grow. There are no checks if the stack-allocated space is sufficient.
- static const int kBigitCapacity = kMaxSignificantBits / kBigitSize;
-
- void EnsureCapacity(int size) {
- if (size > kBigitCapacity) {
- UNREACHABLE();
- }
- }
- void Align(const Bignum& other);
- void Clamp();
- bool IsClamped() const;
- void Zero();
- // Requires this to have enough capacity (no tests done).
- // Updates used_digits_ if necessary.
- // by must be < kBigitSize.
- void BigitsShiftLeft(int shift_amount);
- // BigitLength includes the "hidden" digits encoded in the exponent.
- int BigitLength() const { return used_digits_ + exponent_; }
- Chunk BigitAt(int index) const;
- void SubtractTimes(const Bignum& other, int factor);
-
- Chunk bigits_buffer_[kBigitCapacity];
- // A vector backed by bigits_buffer_. This way accesses to the array are
- // checked for out-of-bounds errors.
- Vector<Chunk> bigits_;
- int used_digits_;
- // The Bignum's value equals value(bigits_) * 2^(exponent_ * kBigitSize).
- int exponent_;
-
- DISALLOW_COPY_AND_ASSIGN(Bignum);
-};
-
-} } // namespace v8::internal
-
-#endif // V8_BIGNUM_H_
diff --git a/src/3rdparty/v8/src/bootstrapper.cc b/src/3rdparty/v8/src/bootstrapper.cc
deleted file mode 100644
index a30ffc0..0000000
--- a/src/3rdparty/v8/src/bootstrapper.cc
+++ /dev/null
@@ -1,2138 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "accessors.h"
-#include "api.h"
-#include "bootstrapper.h"
-#include "compiler.h"
-#include "debug.h"
-#include "execution.h"
-#include "global-handles.h"
-#include "macro-assembler.h"
-#include "natives.h"
-#include "objects-visiting.h"
-#include "snapshot.h"
-#include "extensions/externalize-string-extension.h"
-#include "extensions/gc-extension.h"
-
-namespace v8 {
-namespace internal {
-
-
-NativesExternalStringResource::NativesExternalStringResource(
- Bootstrapper* bootstrapper,
- const char* source)
- : data_(source), length_(StrLength(source)) {
- if (bootstrapper->delete_these_non_arrays_on_tear_down_ == NULL) {
- bootstrapper->delete_these_non_arrays_on_tear_down_ = new List<char*>(2);
- }
- // The resources are small objects and we only make a fixed number of
- // them, but let's clean them up on exit for neatness.
- bootstrapper->delete_these_non_arrays_on_tear_down_->
- Add(reinterpret_cast<char*>(this));
-}
-
-
-Bootstrapper::Bootstrapper()
- : nesting_(0),
- extensions_cache_(Script::TYPE_EXTENSION),
- delete_these_non_arrays_on_tear_down_(NULL),
- delete_these_arrays_on_tear_down_(NULL) {
-}
-
-
-Handle<String> Bootstrapper::NativesSourceLookup(int index) {
- ASSERT(0 <= index && index < Natives::GetBuiltinsCount());
- Isolate* isolate = Isolate::Current();
- Factory* factory = isolate->factory();
- Heap* heap = isolate->heap();
- if (heap->natives_source_cache()->get(index)->IsUndefined()) {
- if (!Snapshot::IsEnabled() || FLAG_new_snapshot) {
- // We can use external strings for the natives.
- NativesExternalStringResource* resource =
- new NativesExternalStringResource(this,
- Natives::GetScriptSource(index).start());
- Handle<String> source_code =
- factory->NewExternalStringFromAscii(resource);
- heap->natives_source_cache()->set(index, *source_code);
- } else {
- // Old snapshot code can't cope with external strings at all.
- Handle<String> source_code =
- factory->NewStringFromAscii(Natives::GetScriptSource(index));
- heap->natives_source_cache()->set(index, *source_code);
- }
- }
- Handle<Object> cached_source(heap->natives_source_cache()->get(index));
- return Handle<String>::cast(cached_source);
-}
-
-
-void Bootstrapper::Initialize(bool create_heap_objects) {
- extensions_cache_.Initialize(create_heap_objects);
- GCExtension::Register();
- ExternalizeStringExtension::Register();
-}
-
-
-char* Bootstrapper::AllocateAutoDeletedArray(int bytes) {
- char* memory = new char[bytes];
- if (memory != NULL) {
- if (delete_these_arrays_on_tear_down_ == NULL) {
- delete_these_arrays_on_tear_down_ = new List<char*>(2);
- }
- delete_these_arrays_on_tear_down_->Add(memory);
- }
- return memory;
-}
-
-
-void Bootstrapper::TearDown() {
- if (delete_these_non_arrays_on_tear_down_ != NULL) {
- int len = delete_these_non_arrays_on_tear_down_->length();
- ASSERT(len < 20); // Don't use this mechanism for unbounded allocations.
- for (int i = 0; i < len; i++) {
- delete delete_these_non_arrays_on_tear_down_->at(i);
- delete_these_non_arrays_on_tear_down_->at(i) = NULL;
- }
- delete delete_these_non_arrays_on_tear_down_;
- delete_these_non_arrays_on_tear_down_ = NULL;
- }
-
- if (delete_these_arrays_on_tear_down_ != NULL) {
- int len = delete_these_arrays_on_tear_down_->length();
- ASSERT(len < 1000); // Don't use this mechanism for unbounded allocations.
- for (int i = 0; i < len; i++) {
- delete[] delete_these_arrays_on_tear_down_->at(i);
- delete_these_arrays_on_tear_down_->at(i) = NULL;
- }
- delete delete_these_arrays_on_tear_down_;
- delete_these_arrays_on_tear_down_ = NULL;
- }
-
- extensions_cache_.Initialize(false); // Yes, symmetrical
-}
-
-
-class Genesis BASE_EMBEDDED {
- public:
- Genesis(Handle<Object> global_object,
- v8::Handle<v8::ObjectTemplate> global_template,
- v8::ExtensionConfiguration* extensions);
- ~Genesis() { }
-
- Handle<Context> result() { return result_; }
-
- Genesis* previous() { return previous_; }
-
- private:
- Handle<Context> global_context_;
-
- // There may be more than one active genesis object: When GC is
- // triggered during environment creation there may be weak handle
- // processing callbacks which may create new environments.
- Genesis* previous_;
-
- Handle<Context> global_context() { return global_context_; }
-
- // Creates some basic objects. Used for creating a context from scratch.
- void CreateRoots();
- // Creates the empty function. Used for creating a context from scratch.
- Handle<JSFunction> CreateEmptyFunction();
- // Creates the ThrowTypeError function. ECMA 5th Ed. 13.2.3
- Handle<JSFunction> CreateThrowTypeErrorFunction(Builtins::Name builtin);
-
- void CreateStrictModeFunctionMaps(Handle<JSFunction> empty);
- // Creates the global objects using the global and the template passed in
- // through the API. We call this regardless of whether we are building a
- // context from scratch or using a deserialized one from the partial snapshot
- // but in the latter case we don't use the objects it produces directly, as
- // we have to used the deserialized ones that are linked together with the
- // rest of the context snapshot.
- Handle<JSGlobalProxy> CreateNewGlobals(
- v8::Handle<v8::ObjectTemplate> global_template,
- Handle<Object> global_object,
- Handle<GlobalObject>* global_proxy_out);
- // Hooks the given global proxy into the context. If the context was created
- // by deserialization then this will unhook the global proxy that was
- // deserialized, leaving the GC to pick it up.
- void HookUpGlobalProxy(Handle<GlobalObject> inner_global,
- Handle<JSGlobalProxy> global_proxy);
- // Similarly, we want to use the inner global that has been created by the
- // templates passed through the API. The inner global from the snapshot is
- // detached from the other objects in the snapshot.
- void HookUpInnerGlobal(Handle<GlobalObject> inner_global);
- // New context initialization. Used for creating a context from scratch.
- void InitializeGlobal(Handle<GlobalObject> inner_global,
- Handle<JSFunction> empty_function);
- // Installs the contents of the native .js files on the global objects.
- // Used for creating a context from scratch.
- void InstallNativeFunctions();
- bool InstallNatives();
- void InstallBuiltinFunctionIds();
- void InstallJSFunctionResultCaches();
- void InitializeNormalizedMapCaches();
- // Used both for deserialized and from-scratch contexts to add the extensions
- // provided.
- static bool InstallExtensions(Handle<Context> global_context,
- v8::ExtensionConfiguration* extensions);
- static bool InstallExtension(const char* name);
- static bool InstallExtension(v8::RegisteredExtension* current);
- static void InstallSpecialObjects(Handle<Context> global_context);
- bool InstallJSBuiltins(Handle<JSBuiltinsObject> builtins);
- bool ConfigureApiObject(Handle<JSObject> object,
- Handle<ObjectTemplateInfo> object_template);
- bool ConfigureGlobalObjects(v8::Handle<v8::ObjectTemplate> global_template);
-
- // Migrates all properties from the 'from' object to the 'to'
- // object and overrides the prototype in 'to' with the one from
- // 'from'.
- void TransferObject(Handle<JSObject> from, Handle<JSObject> to);
- void TransferNamedProperties(Handle<JSObject> from, Handle<JSObject> to);
- void TransferIndexedProperties(Handle<JSObject> from, Handle<JSObject> to);
-
- enum PrototypePropertyMode {
- DONT_ADD_PROTOTYPE,
- ADD_READONLY_PROTOTYPE,
- ADD_WRITEABLE_PROTOTYPE
- };
-
- Handle<Map> CreateFunctionMap(PrototypePropertyMode prototype_mode);
-
- Handle<DescriptorArray> ComputeFunctionInstanceDescriptor(
- PrototypePropertyMode prototypeMode);
- void MakeFunctionInstancePrototypeWritable();
-
- Handle<Map> CreateStrictModeFunctionMap(
- PrototypePropertyMode prototype_mode,
- Handle<JSFunction> empty_function,
- Handle<FixedArray> arguments_callbacks,
- Handle<FixedArray> caller_callbacks);
-
- Handle<DescriptorArray> ComputeStrictFunctionInstanceDescriptor(
- PrototypePropertyMode propertyMode,
- Handle<FixedArray> arguments,
- Handle<FixedArray> caller);
-
- static bool CompileBuiltin(int index);
- static bool CompileNative(Vector<const char> name, Handle<String> source);
- static bool CompileScriptCached(Vector<const char> name,
- Handle<String> source,
- SourceCodeCache* cache,
- v8::Extension* extension,
- Handle<Context> top_context,
- bool use_runtime_context);
-
- Handle<Context> result_;
-
- // Function instance maps. Function literal maps are created initially with
- // a read only prototype for the processing of JS builtins. Later the function
- // instance maps are replaced in order to make prototype writable.
- // These are the final, writable prototype, maps.
- Handle<Map> function_instance_map_writable_prototype_;
- Handle<Map> strict_mode_function_instance_map_writable_prototype_;
-
- BootstrapperActive active_;
- friend class Bootstrapper;
-};
-
-
-void Bootstrapper::Iterate(ObjectVisitor* v) {
- extensions_cache_.Iterate(v);
- v->Synchronize("Extensions");
-}
-
-
-Handle<Context> Bootstrapper::CreateEnvironment(
- Handle<Object> global_object,
- v8::Handle<v8::ObjectTemplate> global_template,
- v8::ExtensionConfiguration* extensions) {
- HandleScope scope;
- Handle<Context> env;
- Genesis genesis(global_object, global_template, extensions);
- env = genesis.result();
- if (!env.is_null()) {
- if (InstallExtensions(env, extensions)) {
- return env;
- }
- }
- return Handle<Context>();
-}
-
-
-static void SetObjectPrototype(Handle<JSObject> object, Handle<Object> proto) {
- // object.__proto__ = proto;
- Handle<Map> old_to_map = Handle<Map>(object->map());
- Handle<Map> new_to_map = FACTORY->CopyMapDropTransitions(old_to_map);
- new_to_map->set_prototype(*proto);
- object->set_map(*new_to_map);
-}
-
-
-void Bootstrapper::DetachGlobal(Handle<Context> env) {
- Factory* factory = Isolate::Current()->factory();
- JSGlobalProxy::cast(env->global_proxy())->set_context(*factory->null_value());
- SetObjectPrototype(Handle<JSObject>(env->global_proxy()),
- factory->null_value());
- env->set_global_proxy(env->global());
- env->global()->set_global_receiver(env->global());
-}
-
-
-void Bootstrapper::ReattachGlobal(Handle<Context> env,
- Handle<Object> global_object) {
- ASSERT(global_object->IsJSGlobalProxy());
- Handle<JSGlobalProxy> global = Handle<JSGlobalProxy>::cast(global_object);
- env->global()->set_global_receiver(*global);
- env->set_global_proxy(*global);
- SetObjectPrototype(global, Handle<JSObject>(env->global()));
- global->set_context(*env);
-}
-
-
-static Handle<JSFunction> InstallFunction(Handle<JSObject> target,
- const char* name,
- InstanceType type,
- int instance_size,
- Handle<JSObject> prototype,
- Builtins::Name call,
- bool is_ecma_native) {
- Isolate* isolate = Isolate::Current();
- Factory* factory = isolate->factory();
- Handle<String> symbol = factory->LookupAsciiSymbol(name);
- Handle<Code> call_code = Handle<Code>(isolate->builtins()->builtin(call));
- Handle<JSFunction> function = prototype.is_null() ?
- factory->NewFunctionWithoutPrototype(symbol, call_code) :
- factory->NewFunctionWithPrototype(symbol,
- type,
- instance_size,
- prototype,
- call_code,
- is_ecma_native);
- SetLocalPropertyNoThrow(target, symbol, function, DONT_ENUM);
- if (is_ecma_native) {
- function->shared()->set_instance_class_name(*symbol);
- }
- return function;
-}
-
-
-Handle<DescriptorArray> Genesis::ComputeFunctionInstanceDescriptor(
- PrototypePropertyMode prototypeMode) {
- Factory* factory = Isolate::Current()->factory();
- Handle<DescriptorArray> descriptors =
- factory->NewDescriptorArray(prototypeMode == DONT_ADD_PROTOTYPE ? 4 : 5);
- PropertyAttributes attributes =
- static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
-
- { // Add length.
- Handle<Proxy> proxy = factory->NewProxy(&Accessors::FunctionLength);
- CallbacksDescriptor d(*factory->length_symbol(), *proxy, attributes);
- descriptors->Set(0, &d);
- }
- { // Add name.
- Handle<Proxy> proxy = factory->NewProxy(&Accessors::FunctionName);
- CallbacksDescriptor d(*factory->name_symbol(), *proxy, attributes);
- descriptors->Set(1, &d);
- }
- { // Add arguments.
- Handle<Proxy> proxy = factory->NewProxy(&Accessors::FunctionArguments);
- CallbacksDescriptor d(*factory->arguments_symbol(), *proxy, attributes);
- descriptors->Set(2, &d);
- }
- { // Add caller.
- Handle<Proxy> proxy = factory->NewProxy(&Accessors::FunctionCaller);
- CallbacksDescriptor d(*factory->caller_symbol(), *proxy, attributes);
- descriptors->Set(3, &d);
- }
- if (prototypeMode != DONT_ADD_PROTOTYPE) {
- // Add prototype.
- if (prototypeMode == ADD_WRITEABLE_PROTOTYPE) {
- attributes = static_cast<PropertyAttributes>(attributes & ~READ_ONLY);
- }
- Handle<Proxy> proxy = factory->NewProxy(&Accessors::FunctionPrototype);
- CallbacksDescriptor d(*factory->prototype_symbol(), *proxy, attributes);
- descriptors->Set(4, &d);
- }
- descriptors->Sort();
- return descriptors;
-}
-
-
-Handle<Map> Genesis::CreateFunctionMap(PrototypePropertyMode prototype_mode) {
- Handle<Map> map = FACTORY->NewMap(JS_FUNCTION_TYPE, JSFunction::kSize);
- Handle<DescriptorArray> descriptors =
- ComputeFunctionInstanceDescriptor(prototype_mode);
- map->set_instance_descriptors(*descriptors);
- map->set_function_with_prototype(prototype_mode != DONT_ADD_PROTOTYPE);
- return map;
-}
-
-
-Handle<JSFunction> Genesis::CreateEmptyFunction() {
- // Allocate the map for function instances. Maps are allocated first and their
- // prototypes patched later, once empty function is created.
-
- // Please note that the prototype property for function instances must be
- // writable.
- Handle<Map> function_instance_map =
- CreateFunctionMap(ADD_WRITEABLE_PROTOTYPE);
- global_context()->set_function_instance_map(*function_instance_map);
-
- // Functions with this map will not have a 'prototype' property, and
- // can not be used as constructors.
- Handle<Map> function_without_prototype_map =
- CreateFunctionMap(DONT_ADD_PROTOTYPE);
- global_context()->set_function_without_prototype_map(
- *function_without_prototype_map);
-
- // Allocate the function map. This map is temporary, used only for processing
- // of builtins.
- // Later the map is replaced with writable prototype map, allocated below.
- Handle<Map> function_map = CreateFunctionMap(ADD_READONLY_PROTOTYPE);
- global_context()->set_function_map(*function_map);
-
- // The final map for functions. Writeable prototype.
- // This map is installed in MakeFunctionInstancePrototypeWritable.
- function_instance_map_writable_prototype_ =
- CreateFunctionMap(ADD_WRITEABLE_PROTOTYPE);
-
- Isolate* isolate = Isolate::Current();
- Factory* factory = isolate->factory();
- Heap* heap = isolate->heap();
-
- Handle<String> object_name = Handle<String>(heap->Object_symbol());
-
- { // --- O b j e c t ---
- Handle<JSFunction> object_fun =
- factory->NewFunction(object_name, factory->null_value());
- Handle<Map> object_function_map =
- factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
- object_fun->set_initial_map(*object_function_map);
- object_function_map->set_constructor(*object_fun);
-
- global_context()->set_object_function(*object_fun);
-
- // Allocate a new prototype for the object function.
- Handle<JSObject> prototype = factory->NewJSObject(
- isolate->object_function(),
- TENURED);
-
- global_context()->set_initial_object_prototype(*prototype);
- SetPrototype(object_fun, prototype);
- object_function_map->
- set_instance_descriptors(heap->empty_descriptor_array());
- }
-
- // Allocate the empty function as the prototype for function ECMAScript
- // 262 15.3.4.
- Handle<String> symbol = factory->LookupAsciiSymbol("Empty");
- Handle<JSFunction> empty_function =
- factory->NewFunctionWithoutPrototype(symbol, kNonStrictMode);
-
- // --- E m p t y ---
- Handle<Code> code =
- Handle<Code>(isolate->builtins()->builtin(
- Builtins::kEmptyFunction));
- empty_function->set_code(*code);
- empty_function->shared()->set_code(*code);
- Handle<String> source = factory->NewStringFromAscii(CStrVector("() {}"));
- Handle<Script> script = factory->NewScript(source);
- script->set_type(Smi::FromInt(Script::TYPE_NATIVE));
- empty_function->shared()->set_script(*script);
- empty_function->shared()->set_start_position(0);
- empty_function->shared()->set_end_position(source->length());
- empty_function->shared()->DontAdaptArguments();
-
- // Set prototypes for the function maps.
- global_context()->function_map()->set_prototype(*empty_function);
- global_context()->function_instance_map()->set_prototype(*empty_function);
- global_context()->function_without_prototype_map()->
- set_prototype(*empty_function);
- function_instance_map_writable_prototype_->set_prototype(*empty_function);
-
- // Allocate the function map first and then patch the prototype later
- Handle<Map> empty_fm = factory->CopyMapDropDescriptors(
- function_without_prototype_map);
- empty_fm->set_instance_descriptors(
- function_without_prototype_map->instance_descriptors());
- empty_fm->set_prototype(global_context()->object_function()->prototype());
- empty_function->set_map(*empty_fm);
- return empty_function;
-}
-
-
-Handle<DescriptorArray> Genesis::ComputeStrictFunctionInstanceDescriptor(
- PrototypePropertyMode prototypeMode,
- Handle<FixedArray> arguments,
- Handle<FixedArray> caller) {
- Factory* factory = Isolate::Current()->factory();
- Handle<DescriptorArray> descriptors =
- factory->NewDescriptorArray(prototypeMode == DONT_ADD_PROTOTYPE ? 4 : 5);
- PropertyAttributes attributes = static_cast<PropertyAttributes>(
- DONT_ENUM | DONT_DELETE | READ_ONLY);
-
- { // length
- Handle<Proxy> proxy = factory->NewProxy(&Accessors::FunctionLength);
- CallbacksDescriptor d(*factory->length_symbol(), *proxy, attributes);
- descriptors->Set(0, &d);
- }
- { // name
- Handle<Proxy> proxy = factory->NewProxy(&Accessors::FunctionName);
- CallbacksDescriptor d(*factory->name_symbol(), *proxy, attributes);
- descriptors->Set(1, &d);
- }
- { // arguments
- CallbacksDescriptor d(*factory->arguments_symbol(), *arguments, attributes);
- descriptors->Set(2, &d);
- }
- { // caller
- CallbacksDescriptor d(*factory->caller_symbol(), *caller, attributes);
- descriptors->Set(3, &d);
- }
-
- // prototype
- if (prototypeMode != DONT_ADD_PROTOTYPE) {
- if (prototypeMode == ADD_WRITEABLE_PROTOTYPE) {
- attributes = static_cast<PropertyAttributes>(attributes & ~READ_ONLY);
- }
- Handle<Proxy> proxy = factory->NewProxy(&Accessors::FunctionPrototype);
- CallbacksDescriptor d(*factory->prototype_symbol(), *proxy, attributes);
- descriptors->Set(4, &d);
- }
-
- descriptors->Sort();
- return descriptors;
-}
-
-
-// ECMAScript 5th Edition, 13.2.3
-Handle<JSFunction> Genesis::CreateThrowTypeErrorFunction(
- Builtins::Name builtin) {
- Isolate* isolate = Isolate::Current();
- Factory* factory = isolate->factory();
-
- Handle<String> name = factory->LookupAsciiSymbol("ThrowTypeError");
- Handle<JSFunction> throw_type_error =
- factory->NewFunctionWithoutPrototype(name, kStrictMode);
- Handle<Code> code = Handle<Code>(
- isolate->builtins()->builtin(builtin));
-
- throw_type_error->set_map(global_context()->strict_mode_function_map());
- throw_type_error->set_code(*code);
- throw_type_error->shared()->set_code(*code);
- throw_type_error->shared()->DontAdaptArguments();
-
- PreventExtensions(throw_type_error);
-
- return throw_type_error;
-}
-
-
-Handle<Map> Genesis::CreateStrictModeFunctionMap(
- PrototypePropertyMode prototype_mode,
- Handle<JSFunction> empty_function,
- Handle<FixedArray> arguments_callbacks,
- Handle<FixedArray> caller_callbacks) {
- Handle<Map> map = FACTORY->NewMap(JS_FUNCTION_TYPE, JSFunction::kSize);
- Handle<DescriptorArray> descriptors =
- ComputeStrictFunctionInstanceDescriptor(prototype_mode,
- arguments_callbacks,
- caller_callbacks);
- map->set_instance_descriptors(*descriptors);
- map->set_function_with_prototype(prototype_mode != DONT_ADD_PROTOTYPE);
- map->set_prototype(*empty_function);
- return map;
-}
-
-
-void Genesis::CreateStrictModeFunctionMaps(Handle<JSFunction> empty) {
- // Create the callbacks arrays for ThrowTypeError functions.
- // The get/set callacks are filled in after the maps are created below.
- Factory* factory = Isolate::Current()->factory();
- Handle<FixedArray> arguments = factory->NewFixedArray(2, TENURED);
- Handle<FixedArray> caller = factory->NewFixedArray(2, TENURED);
-
- // Allocate map for the strict mode function instances.
- Handle<Map> strict_mode_function_instance_map =
- CreateStrictModeFunctionMap(
- ADD_WRITEABLE_PROTOTYPE, empty, arguments, caller);
- global_context()->set_strict_mode_function_instance_map(
- *strict_mode_function_instance_map);
-
- // Allocate map for the prototype-less strict mode instances.
- Handle<Map> strict_mode_function_without_prototype_map =
- CreateStrictModeFunctionMap(
- DONT_ADD_PROTOTYPE, empty, arguments, caller);
- global_context()->set_strict_mode_function_without_prototype_map(
- *strict_mode_function_without_prototype_map);
-
- // Allocate map for the strict mode functions. This map is temporary, used
- // only for processing of builtins.
- // Later the map is replaced with writable prototype map, allocated below.
- Handle<Map> strict_mode_function_map =
- CreateStrictModeFunctionMap(
- ADD_READONLY_PROTOTYPE, empty, arguments, caller);
- global_context()->set_strict_mode_function_map(
- *strict_mode_function_map);
-
- // The final map for the strict mode functions. Writeable prototype.
- // This map is installed in MakeFunctionInstancePrototypeWritable.
- strict_mode_function_instance_map_writable_prototype_ =
- CreateStrictModeFunctionMap(
- ADD_WRITEABLE_PROTOTYPE, empty, arguments, caller);
-
- // Create the ThrowTypeError function instances.
- Handle<JSFunction> arguments_throw =
- CreateThrowTypeErrorFunction(Builtins::kStrictFunctionArguments);
- Handle<JSFunction> caller_throw =
- CreateThrowTypeErrorFunction(Builtins::kStrictFunctionCaller);
-
- // Complete the callback fixed arrays.
- arguments->set(0, *arguments_throw);
- arguments->set(1, *arguments_throw);
- caller->set(0, *caller_throw);
- caller->set(1, *caller_throw);
-}
-
-
-static void AddToWeakGlobalContextList(Context* context) {
- ASSERT(context->IsGlobalContext());
- Heap* heap = Isolate::Current()->heap();
-#ifdef DEBUG
- { // NOLINT
- ASSERT(context->get(Context::NEXT_CONTEXT_LINK)->IsUndefined());
- // Check that context is not in the list yet.
- for (Object* current = heap->global_contexts_list();
- !current->IsUndefined();
- current = Context::cast(current)->get(Context::NEXT_CONTEXT_LINK)) {
- ASSERT(current != context);
- }
- }
-#endif
- context->set(Context::NEXT_CONTEXT_LINK, heap->global_contexts_list());
- heap->set_global_contexts_list(context);
-}
-
-
-void Genesis::CreateRoots() {
- Isolate* isolate = Isolate::Current();
- // Allocate the global context FixedArray first and then patch the
- // closure and extension object later (we need the empty function
- // and the global object, but in order to create those, we need the
- // global context).
- global_context_ = Handle<Context>::cast(isolate->global_handles()->Create(
- *isolate->factory()->NewGlobalContext()));
- AddToWeakGlobalContextList(*global_context_);
- isolate->set_context(*global_context());
-
- // Allocate the message listeners object.
- {
- v8::NeanderArray listeners;
- global_context()->set_message_listeners(*listeners.value());
- }
-}
-
-
-Handle<JSGlobalProxy> Genesis::CreateNewGlobals(
- v8::Handle<v8::ObjectTemplate> global_template,
- Handle<Object> global_object,
- Handle<GlobalObject>* inner_global_out) {
- // The argument global_template aka data is an ObjectTemplateInfo.
- // It has a constructor pointer that points at global_constructor which is a
- // FunctionTemplateInfo.
- // The global_constructor is used to create or reinitialize the global_proxy.
- // The global_constructor also has a prototype_template pointer that points at
- // js_global_template which is an ObjectTemplateInfo.
- // That in turn has a constructor pointer that points at
- // js_global_constructor which is a FunctionTemplateInfo.
- // js_global_constructor is used to make js_global_function
- // js_global_function is used to make the new inner_global.
- //
- // --- G l o b a l ---
- // Step 1: Create a fresh inner JSGlobalObject.
- Handle<JSFunction> js_global_function;
- Handle<ObjectTemplateInfo> js_global_template;
- if (!global_template.IsEmpty()) {
- // Get prototype template of the global_template.
- Handle<ObjectTemplateInfo> data =
- v8::Utils::OpenHandle(*global_template);
- Handle<FunctionTemplateInfo> global_constructor =
- Handle<FunctionTemplateInfo>(
- FunctionTemplateInfo::cast(data->constructor()));
- Handle<Object> proto_template(global_constructor->prototype_template());
- if (!proto_template->IsUndefined()) {
- js_global_template =
- Handle<ObjectTemplateInfo>::cast(proto_template);
- }
- }
-
- Isolate* isolate = Isolate::Current();
- Factory* factory = isolate->factory();
- Heap* heap = isolate->heap();
-
- if (js_global_template.is_null()) {
- Handle<String> name = Handle<String>(heap->empty_symbol());
- Handle<Code> code = Handle<Code>(isolate->builtins()->builtin(
- Builtins::kIllegal));
- js_global_function =
- factory->NewFunction(name, JS_GLOBAL_OBJECT_TYPE,
- JSGlobalObject::kSize, code, true);
- // Change the constructor property of the prototype of the
- // hidden global function to refer to the Object function.
- Handle<JSObject> prototype =
- Handle<JSObject>(
- JSObject::cast(js_global_function->instance_prototype()));
- SetLocalPropertyNoThrow(
- prototype,
- factory->constructor_symbol(),
- isolate->object_function(),
- NONE);
- } else {
- Handle<FunctionTemplateInfo> js_global_constructor(
- FunctionTemplateInfo::cast(js_global_template->constructor()));
- js_global_function =
- factory->CreateApiFunction(js_global_constructor,
- factory->InnerGlobalObject);
- }
-
- js_global_function->initial_map()->set_is_hidden_prototype();
- Handle<GlobalObject> inner_global =
- factory->NewGlobalObject(js_global_function);
- if (inner_global_out != NULL) {
- *inner_global_out = inner_global;
- }
-
- // Step 2: create or re-initialize the global proxy object.
- Handle<JSFunction> global_proxy_function;
- if (global_template.IsEmpty()) {
- Handle<String> name = Handle<String>(heap->empty_symbol());
- Handle<Code> code = Handle<Code>(isolate->builtins()->builtin(
- Builtins::kIllegal));
- global_proxy_function =
- factory->NewFunction(name, JS_GLOBAL_PROXY_TYPE,
- JSGlobalProxy::kSize, code, true);
- } else {
- Handle<ObjectTemplateInfo> data =
- v8::Utils::OpenHandle(*global_template);
- Handle<FunctionTemplateInfo> global_constructor(
- FunctionTemplateInfo::cast(data->constructor()));
- global_proxy_function =
- factory->CreateApiFunction(global_constructor,
- factory->OuterGlobalObject);
- }
-
- Handle<String> global_name = factory->LookupAsciiSymbol("global");
- global_proxy_function->shared()->set_instance_class_name(*global_name);
- global_proxy_function->initial_map()->set_is_access_check_needed(true);
-
- // Set global_proxy.__proto__ to js_global after ConfigureGlobalObjects
- // Return the global proxy.
-
- if (global_object.location() != NULL) {
- ASSERT(global_object->IsJSGlobalProxy());
- return ReinitializeJSGlobalProxy(
- global_proxy_function,
- Handle<JSGlobalProxy>::cast(global_object));
- } else {
- return Handle<JSGlobalProxy>::cast(
- factory->NewJSObject(global_proxy_function, TENURED));
- }
-}
-
-
-void Genesis::HookUpGlobalProxy(Handle<GlobalObject> inner_global,
- Handle<JSGlobalProxy> global_proxy) {
- // Set the global context for the global object.
- inner_global->set_global_context(*global_context());
- inner_global->set_global_receiver(*global_proxy);
- global_proxy->set_context(*global_context());
- global_context()->set_global_proxy(*global_proxy);
-}
-
-
-void Genesis::HookUpInnerGlobal(Handle<GlobalObject> inner_global) {
- Handle<GlobalObject> inner_global_from_snapshot(
- GlobalObject::cast(global_context_->extension()));
- Handle<JSBuiltinsObject> builtins_global(global_context_->builtins());
- global_context_->set_extension(*inner_global);
- global_context_->set_global(*inner_global);
- global_context_->set_security_token(*inner_global);
- static const PropertyAttributes attributes =
- static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE);
- ForceSetProperty(builtins_global,
- FACTORY->LookupAsciiSymbol("global"),
- inner_global,
- attributes);
- // Setup the reference from the global object to the builtins object.
- JSGlobalObject::cast(*inner_global)->set_builtins(*builtins_global);
- TransferNamedProperties(inner_global_from_snapshot, inner_global);
- TransferIndexedProperties(inner_global_from_snapshot, inner_global);
-}
-
-
-// This is only called if we are not using snapshots. The equivalent
-// work in the snapshot case is done in HookUpInnerGlobal.
-void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
- Handle<JSFunction> empty_function) {
- // --- G l o b a l C o n t e x t ---
- // Use the empty function as closure (no scope info).
- global_context()->set_closure(*empty_function);
- global_context()->set_fcontext(*global_context());
- global_context()->set_previous(NULL);
- // Set extension and global object.
- global_context()->set_extension(*inner_global);
- global_context()->set_global(*inner_global);
- // Security setup: Set the security token of the global object to
- // its the inner global. This makes the security check between two
- // different contexts fail by default even in case of global
- // object reinitialization.
- global_context()->set_security_token(*inner_global);
-
- Isolate* isolate = Isolate::Current();
- Factory* factory = isolate->factory();
- Heap* heap = isolate->heap();
-
- Handle<String> object_name = Handle<String>(heap->Object_symbol());
- SetLocalPropertyNoThrow(inner_global, object_name,
- isolate->object_function(), DONT_ENUM);
-
- Handle<JSObject> global = Handle<JSObject>(global_context()->global());
-
- // Install global Function object
- InstallFunction(global, "Function", JS_FUNCTION_TYPE, JSFunction::kSize,
- empty_function, Builtins::kIllegal, true); // ECMA native.
-
- { // --- A r r a y ---
- Handle<JSFunction> array_function =
- InstallFunction(global, "Array", JS_ARRAY_TYPE, JSArray::kSize,
- isolate->initial_object_prototype(),
- Builtins::kArrayCode, true);
- array_function->shared()->set_construct_stub(
- isolate->builtins()->builtin(Builtins::kArrayConstructCode));
- array_function->shared()->DontAdaptArguments();
-
- // This seems a bit hackish, but we need to make sure Array.length
- // is 1.
- array_function->shared()->set_length(1);
- Handle<DescriptorArray> array_descriptors =
- factory->CopyAppendProxyDescriptor(
- factory->empty_descriptor_array(),
- factory->length_symbol(),
- factory->NewProxy(&Accessors::ArrayLength),
- static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE));
-
- // Cache the fast JavaScript array map
- global_context()->set_js_array_map(array_function->initial_map());
- global_context()->js_array_map()->set_instance_descriptors(
- *array_descriptors);
- // array_function is used internally. JS code creating array object should
- // search for the 'Array' property on the global object and use that one
- // as the constructor. 'Array' property on a global object can be
- // overwritten by JS code.
- global_context()->set_array_function(*array_function);
- }
-
- { // --- N u m b e r ---
- Handle<JSFunction> number_fun =
- InstallFunction(global, "Number", JS_VALUE_TYPE, JSValue::kSize,
- isolate->initial_object_prototype(),
- Builtins::kIllegal, true);
- global_context()->set_number_function(*number_fun);
- }
-
- { // --- B o o l e a n ---
- Handle<JSFunction> boolean_fun =
- InstallFunction(global, "Boolean", JS_VALUE_TYPE, JSValue::kSize,
- isolate->initial_object_prototype(),
- Builtins::kIllegal, true);
- global_context()->set_boolean_function(*boolean_fun);
- }
-
- { // --- S t r i n g ---
- Handle<JSFunction> string_fun =
- InstallFunction(global, "String", JS_VALUE_TYPE, JSValue::kSize,
- isolate->initial_object_prototype(),
- Builtins::kIllegal, true);
- string_fun->shared()->set_construct_stub(
- isolate->builtins()->builtin(Builtins::kStringConstructCode));
- global_context()->set_string_function(*string_fun);
- // Add 'length' property to strings.
- Handle<DescriptorArray> string_descriptors =
- factory->CopyAppendProxyDescriptor(
- factory->empty_descriptor_array(),
- factory->length_symbol(),
- factory->NewProxy(&Accessors::StringLength),
- static_cast<PropertyAttributes>(DONT_ENUM |
- DONT_DELETE |
- READ_ONLY));
-
- Handle<Map> string_map =
- Handle<Map>(global_context()->string_function()->initial_map());
- string_map->set_instance_descriptors(*string_descriptors);
- }
-
- { // --- D a t e ---
- // Builtin functions for Date.prototype.
- Handle<JSFunction> date_fun =
- InstallFunction(global, "Date", JS_VALUE_TYPE, JSValue::kSize,
- isolate->initial_object_prototype(),
- Builtins::kIllegal, true);
-
- global_context()->set_date_function(*date_fun);
- }
-
-
- { // -- R e g E x p
- // Builtin functions for RegExp.prototype.
- Handle<JSFunction> regexp_fun =
- InstallFunction(global, "RegExp", JS_REGEXP_TYPE, JSRegExp::kSize,
- isolate->initial_object_prototype(),
- Builtins::kIllegal, true);
- global_context()->set_regexp_function(*regexp_fun);
-
- ASSERT(regexp_fun->has_initial_map());
- Handle<Map> initial_map(regexp_fun->initial_map());
-
- ASSERT_EQ(0, initial_map->inobject_properties());
-
- Handle<DescriptorArray> descriptors = factory->NewDescriptorArray(5);
- PropertyAttributes final =
- static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
- int enum_index = 0;
- {
- // ECMA-262, section 15.10.7.1.
- FieldDescriptor field(heap->source_symbol(),
- JSRegExp::kSourceFieldIndex,
- final,
- enum_index++);
- descriptors->Set(0, &field);
- }
- {
- // ECMA-262, section 15.10.7.2.
- FieldDescriptor field(heap->global_symbol(),
- JSRegExp::kGlobalFieldIndex,
- final,
- enum_index++);
- descriptors->Set(1, &field);
- }
- {
- // ECMA-262, section 15.10.7.3.
- FieldDescriptor field(heap->ignore_case_symbol(),
- JSRegExp::kIgnoreCaseFieldIndex,
- final,
- enum_index++);
- descriptors->Set(2, &field);
- }
- {
- // ECMA-262, section 15.10.7.4.
- FieldDescriptor field(heap->multiline_symbol(),
- JSRegExp::kMultilineFieldIndex,
- final,
- enum_index++);
- descriptors->Set(3, &field);
- }
- {
- // ECMA-262, section 15.10.7.5.
- PropertyAttributes writable =
- static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE);
- FieldDescriptor field(heap->last_index_symbol(),
- JSRegExp::kLastIndexFieldIndex,
- writable,
- enum_index++);
- descriptors->Set(4, &field);
- }
- descriptors->SetNextEnumerationIndex(enum_index);
- descriptors->Sort();
-
- initial_map->set_inobject_properties(5);
- initial_map->set_pre_allocated_property_fields(5);
- initial_map->set_unused_property_fields(0);
- initial_map->set_instance_size(
- initial_map->instance_size() + 5 * kPointerSize);
- initial_map->set_instance_descriptors(*descriptors);
- initial_map->set_visitor_id(StaticVisitorBase::GetVisitorId(*initial_map));
- }
-
- { // -- J S O N
- Handle<String> name = factory->NewStringFromAscii(CStrVector("JSON"));
- Handle<JSFunction> cons = factory->NewFunction(
- name,
- factory->the_hole_value());
- cons->SetInstancePrototype(global_context()->initial_object_prototype());
- cons->SetInstanceClassName(*name);
- Handle<JSObject> json_object = factory->NewJSObject(cons, TENURED);
- ASSERT(json_object->IsJSObject());
- SetLocalPropertyNoThrow(global, name, json_object, DONT_ENUM);
- global_context()->set_json_object(*json_object);
- }
-
- { // --- arguments_boilerplate_
- // Make sure we can recognize argument objects at runtime.
- // This is done by introducing an anonymous function with
- // class_name equals 'Arguments'.
- Handle<String> symbol = factory->LookupAsciiSymbol("Arguments");
- Handle<Code> code = Handle<Code>(
- isolate->builtins()->builtin(Builtins::kIllegal));
- Handle<JSObject> prototype =
- Handle<JSObject>(
- JSObject::cast(global_context()->object_function()->prototype()));
-
- Handle<JSFunction> function =
- factory->NewFunctionWithPrototype(symbol,
- JS_OBJECT_TYPE,
- JSObject::kHeaderSize,
- prototype,
- code,
- false);
- ASSERT(!function->has_initial_map());
- function->shared()->set_instance_class_name(*symbol);
- function->shared()->set_expected_nof_properties(2);
- Handle<JSObject> result = factory->NewJSObject(function);
-
- global_context()->set_arguments_boilerplate(*result);
- // Note: length must be added as the first property and
- // callee must be added as the second property.
- SetLocalPropertyNoThrow(result, factory->length_symbol(),
- factory->undefined_value(),
- DONT_ENUM);
- SetLocalPropertyNoThrow(result, factory->callee_symbol(),
- factory->undefined_value(),
- DONT_ENUM);
-
-#ifdef DEBUG
- LookupResult lookup;
- result->LocalLookup(heap->callee_symbol(), &lookup);
- ASSERT(lookup.IsProperty() && (lookup.type() == FIELD));
- ASSERT(lookup.GetFieldIndex() == Heap::kArgumentsCalleeIndex);
-
- result->LocalLookup(heap->length_symbol(), &lookup);
- ASSERT(lookup.IsProperty() && (lookup.type() == FIELD));
- ASSERT(lookup.GetFieldIndex() == Heap::kArgumentsLengthIndex);
-
- ASSERT(result->map()->inobject_properties() > Heap::kArgumentsCalleeIndex);
- ASSERT(result->map()->inobject_properties() > Heap::kArgumentsLengthIndex);
-
- // Check the state of the object.
- ASSERT(result->HasFastProperties());
- ASSERT(result->HasFastElements());
-#endif
- }
-
- { // --- strict mode arguments boilerplate
- const PropertyAttributes attributes =
- static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
-
- // Create the ThrowTypeError functions.
- Handle<FixedArray> callee = factory->NewFixedArray(2, TENURED);
- Handle<FixedArray> caller = factory->NewFixedArray(2, TENURED);
-
- Handle<JSFunction> callee_throw =
- CreateThrowTypeErrorFunction(Builtins::kStrictArgumentsCallee);
- Handle<JSFunction> caller_throw =
- CreateThrowTypeErrorFunction(Builtins::kStrictArgumentsCaller);
-
- // Install the ThrowTypeError functions.
- callee->set(0, *callee_throw);
- callee->set(1, *callee_throw);
- caller->set(0, *caller_throw);
- caller->set(1, *caller_throw);
-
- // Create the descriptor array for the arguments object.
- Handle<DescriptorArray> descriptors = factory->NewDescriptorArray(3);
- { // length
- FieldDescriptor d(*factory->length_symbol(), 0, DONT_ENUM);
- descriptors->Set(0, &d);
- }
- { // callee
- CallbacksDescriptor d(*factory->callee_symbol(), *callee, attributes);
- descriptors->Set(1, &d);
- }
- { // caller
- CallbacksDescriptor d(*factory->caller_symbol(), *caller, attributes);
- descriptors->Set(2, &d);
- }
- descriptors->Sort();
-
- // Create the map. Allocate one in-object field for length.
- Handle<Map> map = factory->NewMap(JS_OBJECT_TYPE,
- Heap::kArgumentsObjectSizeStrict);
- map->set_instance_descriptors(*descriptors);
- map->set_function_with_prototype(true);
- map->set_prototype(global_context()->object_function()->prototype());
- map->set_pre_allocated_property_fields(1);
- map->set_inobject_properties(1);
-
- // Copy constructor from the non-strict arguments boilerplate.
- map->set_constructor(
- global_context()->arguments_boilerplate()->map()->constructor());
-
- // Allocate the arguments boilerplate object.
- Handle<JSObject> result = factory->NewJSObjectFromMap(map);
- global_context()->set_strict_mode_arguments_boilerplate(*result);
-
- // Add length property only for strict mode boilerplate.
- SetLocalPropertyNoThrow(result, factory->length_symbol(),
- factory->undefined_value(),
- DONT_ENUM);
-
-#ifdef DEBUG
- LookupResult lookup;
- result->LocalLookup(heap->length_symbol(), &lookup);
- ASSERT(lookup.IsProperty() && (lookup.type() == FIELD));
- ASSERT(lookup.GetFieldIndex() == Heap::kArgumentsLengthIndex);
-
- ASSERT(result->map()->inobject_properties() > Heap::kArgumentsLengthIndex);
-
- // Check the state of the object.
- ASSERT(result->HasFastProperties());
- ASSERT(result->HasFastElements());
-#endif
- }
-
- { // --- context extension
- // Create a function for the context extension objects.
- Handle<Code> code = Handle<Code>(
- isolate->builtins()->builtin(Builtins::kIllegal));
- Handle<JSFunction> context_extension_fun =
- factory->NewFunction(factory->empty_symbol(),
- JS_CONTEXT_EXTENSION_OBJECT_TYPE,
- JSObject::kHeaderSize,
- code,
- true);
-
- Handle<String> name = factory->LookupAsciiSymbol("context_extension");
- context_extension_fun->shared()->set_instance_class_name(*name);
- global_context()->set_context_extension_function(*context_extension_fun);
- }
-
-
- {
- // Setup the call-as-function delegate.
- Handle<Code> code =
- Handle<Code>(isolate->builtins()->builtin(
- Builtins::kHandleApiCallAsFunction));
- Handle<JSFunction> delegate =
- factory->NewFunction(factory->empty_symbol(), JS_OBJECT_TYPE,
- JSObject::kHeaderSize, code, true);
- global_context()->set_call_as_function_delegate(*delegate);
- delegate->shared()->DontAdaptArguments();
- }
-
- {
- // Setup the call-as-constructor delegate.
- Handle<Code> code =
- Handle<Code>(isolate->builtins()->builtin(
- Builtins::kHandleApiCallAsConstructor));
- Handle<JSFunction> delegate =
- factory->NewFunction(factory->empty_symbol(), JS_OBJECT_TYPE,
- JSObject::kHeaderSize, code, true);
- global_context()->set_call_as_constructor_delegate(*delegate);
- delegate->shared()->DontAdaptArguments();
- }
-
- // Initialize the out of memory slot.
- global_context()->set_out_of_memory(heap->false_value());
-
- // Initialize the data slot.
- global_context()->set_data(heap->undefined_value());
-}
-
-
-bool Genesis::CompileBuiltin(int index) {
- Vector<const char> name = Natives::GetScriptName(index);
- Handle<String> source_code =
- Isolate::Current()->bootstrapper()->NativesSourceLookup(index);
- return CompileNative(name, source_code);
-}
-
-
-bool Genesis::CompileNative(Vector<const char> name, Handle<String> source) {
- HandleScope scope;
- Isolate* isolate = Isolate::Current();
-#ifdef ENABLE_DEBUGGER_SUPPORT
- isolate->debugger()->set_compiling_natives(true);
-#endif
- bool result = CompileScriptCached(name,
- source,
- NULL,
- NULL,
- Handle<Context>(isolate->context()),
- true);
- ASSERT(isolate->has_pending_exception() != result);
- if (!result) isolate->clear_pending_exception();
-#ifdef ENABLE_DEBUGGER_SUPPORT
- isolate->debugger()->set_compiling_natives(false);
-#endif
- return result;
-}
-
-
-bool Genesis::CompileScriptCached(Vector<const char> name,
- Handle<String> source,
- SourceCodeCache* cache,
- v8::Extension* extension,
- Handle<Context> top_context,
- bool use_runtime_context) {
- Factory* factory = Isolate::Current()->factory();
- HandleScope scope;
- Handle<SharedFunctionInfo> function_info;
-
- // If we can't find the function in the cache, we compile a new
- // function and insert it into the cache.
- if (cache == NULL || !cache->Lookup(name, &function_info)) {
- ASSERT(source->IsAsciiRepresentation());
- Handle<String> script_name = factory->NewStringFromUtf8(name);
- function_info = Compiler::Compile(
- source,
- script_name,
- 0,
- 0,
- extension,
- NULL,
- Handle<String>::null(),
- use_runtime_context ? NATIVES_CODE : NOT_NATIVES_CODE);
- if (function_info.is_null()) return false;
- if (cache != NULL) cache->Add(name, function_info);
- }
-
- // Setup the function context. Conceptually, we should clone the
- // function before overwriting the context but since we're in a
- // single-threaded environment it is not strictly necessary.
- ASSERT(top_context->IsGlobalContext());
- Handle<Context> context =
- Handle<Context>(use_runtime_context
- ? Handle<Context>(top_context->runtime_context())
- : top_context);
- Handle<JSFunction> fun =
- factory->NewFunctionFromSharedFunctionInfo(function_info, context);
-
- // Call function using either the runtime object or the global
- // object as the receiver. Provide no parameters.
- Handle<Object> receiver =
- Handle<Object>(use_runtime_context
- ? top_context->builtins()
- : top_context->global());
- bool has_pending_exception;
- Handle<Object> result =
- Execution::Call(fun, receiver, 0, NULL, &has_pending_exception);
- if (has_pending_exception) return false;
- return true;
-}
-
-
-#define INSTALL_NATIVE(Type, name, var) \
- Handle<String> var##_name = factory->LookupAsciiSymbol(name); \
- Object* var##_native = \
- global_context()->builtins()->GetPropertyNoExceptionThrown(*var##_name); \
- global_context()->set_##var(Type::cast(var##_native));
-
-
-void Genesis::InstallNativeFunctions() {
- Factory* factory = Isolate::Current()->factory();
- HandleScope scope;
- INSTALL_NATIVE(JSFunction, "CreateDate", create_date_fun);
- INSTALL_NATIVE(JSFunction, "ToNumber", to_number_fun);
- INSTALL_NATIVE(JSFunction, "ToString", to_string_fun);
- INSTALL_NATIVE(JSFunction, "ToDetailString", to_detail_string_fun);
- INSTALL_NATIVE(JSFunction, "ToObject", to_object_fun);
- INSTALL_NATIVE(JSFunction, "ToInteger", to_integer_fun);
- INSTALL_NATIVE(JSFunction, "ToUint32", to_uint32_fun);
- INSTALL_NATIVE(JSFunction, "ToInt32", to_int32_fun);
- INSTALL_NATIVE(JSFunction, "GlobalEval", global_eval_fun);
- INSTALL_NATIVE(JSFunction, "Instantiate", instantiate_fun);
- INSTALL_NATIVE(JSFunction, "ConfigureTemplateInstance",
- configure_instance_fun);
- INSTALL_NATIVE(JSFunction, "GetStackTraceLine", get_stack_trace_line_fun);
- INSTALL_NATIVE(JSObject, "functionCache", function_cache);
-}
-
-#undef INSTALL_NATIVE
-
-
-bool Genesis::InstallNatives() {
- HandleScope scope;
- Isolate* isolate = Isolate::Current();
- Factory* factory = isolate->factory();
- Heap* heap = isolate->heap();
-
- // Create a function for the builtins object. Allocate space for the
- // JavaScript builtins, a reference to the builtins object
- // (itself) and a reference to the global_context directly in the object.
- Handle<Code> code = Handle<Code>(
- isolate->builtins()->builtin(Builtins::kIllegal));
- Handle<JSFunction> builtins_fun =
- factory->NewFunction(factory->empty_symbol(), JS_BUILTINS_OBJECT_TYPE,
- JSBuiltinsObject::kSize, code, true);
-
- Handle<String> name = factory->LookupAsciiSymbol("builtins");
- builtins_fun->shared()->set_instance_class_name(*name);
-
- // Allocate the builtins object.
- Handle<JSBuiltinsObject> builtins =
- Handle<JSBuiltinsObject>::cast(factory->NewGlobalObject(builtins_fun));
- builtins->set_builtins(*builtins);
- builtins->set_global_context(*global_context());
- builtins->set_global_receiver(*builtins);
-
- // Setup the 'global' properties of the builtins object. The
- // 'global' property that refers to the global object is the only
- // way to get from code running in the builtins context to the
- // global object.
- static const PropertyAttributes attributes =
- static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE);
- Handle<String> global_symbol = factory->LookupAsciiSymbol("global");
- Handle<Object> global_obj(global_context()->global());
- SetLocalPropertyNoThrow(builtins, global_symbol, global_obj, attributes);
-
- // Setup the reference from the global object to the builtins object.
- JSGlobalObject::cast(global_context()->global())->set_builtins(*builtins);
-
- // Create a bridge function that has context in the global context.
- Handle<JSFunction> bridge =
- factory->NewFunction(factory->empty_symbol(), factory->undefined_value());
- ASSERT(bridge->context() == *isolate->global_context());
-
- // Allocate the builtins context.
- Handle<Context> context =
- factory->NewFunctionContext(Context::MIN_CONTEXT_SLOTS, bridge);
- context->set_global(*builtins); // override builtins global object
-
- global_context()->set_runtime_context(*context);
-
- { // -- S c r i p t
- // Builtin functions for Script.
- Handle<JSFunction> script_fun =
- InstallFunction(builtins, "Script", JS_VALUE_TYPE, JSValue::kSize,
- isolate->initial_object_prototype(),
- Builtins::kIllegal, false);
- Handle<JSObject> prototype =
- factory->NewJSObject(isolate->object_function(), TENURED);
- SetPrototype(script_fun, prototype);
- global_context()->set_script_function(*script_fun);
-
- // Add 'source' and 'data' property to scripts.
- PropertyAttributes common_attributes =
- static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
- Handle<Proxy> proxy_source = factory->NewProxy(&Accessors::ScriptSource);
- Handle<DescriptorArray> script_descriptors =
- factory->CopyAppendProxyDescriptor(
- factory->empty_descriptor_array(),
- factory->LookupAsciiSymbol("source"),
- proxy_source,
- common_attributes);
- Handle<Proxy> proxy_name = factory->NewProxy(&Accessors::ScriptName);
- script_descriptors =
- factory->CopyAppendProxyDescriptor(
- script_descriptors,
- factory->LookupAsciiSymbol("name"),
- proxy_name,
- common_attributes);
- Handle<Proxy> proxy_id = factory->NewProxy(&Accessors::ScriptId);
- script_descriptors =
- factory->CopyAppendProxyDescriptor(
- script_descriptors,
- factory->LookupAsciiSymbol("id"),
- proxy_id,
- common_attributes);
- Handle<Proxy> proxy_line_offset =
- factory->NewProxy(&Accessors::ScriptLineOffset);
- script_descriptors =
- factory->CopyAppendProxyDescriptor(
- script_descriptors,
- factory->LookupAsciiSymbol("line_offset"),
- proxy_line_offset,
- common_attributes);
- Handle<Proxy> proxy_column_offset =
- factory->NewProxy(&Accessors::ScriptColumnOffset);
- script_descriptors =
- factory->CopyAppendProxyDescriptor(
- script_descriptors,
- factory->LookupAsciiSymbol("column_offset"),
- proxy_column_offset,
- common_attributes);
- Handle<Proxy> proxy_data = factory->NewProxy(&Accessors::ScriptData);
- script_descriptors =
- factory->CopyAppendProxyDescriptor(
- script_descriptors,
- factory->LookupAsciiSymbol("data"),
- proxy_data,
- common_attributes);
- Handle<Proxy> proxy_type = factory->NewProxy(&Accessors::ScriptType);
- script_descriptors =
- factory->CopyAppendProxyDescriptor(
- script_descriptors,
- factory->LookupAsciiSymbol("type"),
- proxy_type,
- common_attributes);
- Handle<Proxy> proxy_compilation_type =
- factory->NewProxy(&Accessors::ScriptCompilationType);
- script_descriptors =
- factory->CopyAppendProxyDescriptor(
- script_descriptors,
- factory->LookupAsciiSymbol("compilation_type"),
- proxy_compilation_type,
- common_attributes);
- Handle<Proxy> proxy_line_ends =
- factory->NewProxy(&Accessors::ScriptLineEnds);
- script_descriptors =
- factory->CopyAppendProxyDescriptor(
- script_descriptors,
- factory->LookupAsciiSymbol("line_ends"),
- proxy_line_ends,
- common_attributes);
- Handle<Proxy> proxy_context_data =
- factory->NewProxy(&Accessors::ScriptContextData);
- script_descriptors =
- factory->CopyAppendProxyDescriptor(
- script_descriptors,
- factory->LookupAsciiSymbol("context_data"),
- proxy_context_data,
- common_attributes);
- Handle<Proxy> proxy_eval_from_script =
- factory->NewProxy(&Accessors::ScriptEvalFromScript);
- script_descriptors =
- factory->CopyAppendProxyDescriptor(
- script_descriptors,
- factory->LookupAsciiSymbol("eval_from_script"),
- proxy_eval_from_script,
- common_attributes);
- Handle<Proxy> proxy_eval_from_script_position =
- factory->NewProxy(&Accessors::ScriptEvalFromScriptPosition);
- script_descriptors =
- factory->CopyAppendProxyDescriptor(
- script_descriptors,
- factory->LookupAsciiSymbol("eval_from_script_position"),
- proxy_eval_from_script_position,
- common_attributes);
- Handle<Proxy> proxy_eval_from_function_name =
- factory->NewProxy(&Accessors::ScriptEvalFromFunctionName);
- script_descriptors =
- factory->CopyAppendProxyDescriptor(
- script_descriptors,
- factory->LookupAsciiSymbol("eval_from_function_name"),
- proxy_eval_from_function_name,
- common_attributes);
-
- Handle<Map> script_map = Handle<Map>(script_fun->initial_map());
- script_map->set_instance_descriptors(*script_descriptors);
-
- // Allocate the empty script.
- Handle<Script> script = factory->NewScript(factory->empty_string());
- script->set_type(Smi::FromInt(Script::TYPE_NATIVE));
- heap->public_set_empty_script(*script);
- }
- {
- // Builtin function for OpaqueReference -- a JSValue-based object,
- // that keeps its field isolated from JavaScript code. It may store
- // objects, that JavaScript code may not access.
- Handle<JSFunction> opaque_reference_fun =
- InstallFunction(builtins, "OpaqueReference", JS_VALUE_TYPE,
- JSValue::kSize,
- isolate->initial_object_prototype(),
- Builtins::kIllegal, false);
- Handle<JSObject> prototype =
- factory->NewJSObject(isolate->object_function(), TENURED);
- SetPrototype(opaque_reference_fun, prototype);
- global_context()->set_opaque_reference_function(*opaque_reference_fun);
- }
-
- { // --- I n t e r n a l A r r a y ---
- // An array constructor on the builtins object that works like
- // the public Array constructor, except that its prototype
- // doesn't inherit from Object.prototype.
- // To be used only for internal work by builtins. Instances
- // must not be leaked to user code.
- // Only works correctly when called as a constructor. The normal
- // Array code uses Array.prototype as prototype when called as
- // a function.
- Handle<JSFunction> array_function =
- InstallFunction(builtins,
- "InternalArray",
- JS_ARRAY_TYPE,
- JSArray::kSize,
- isolate->initial_object_prototype(),
- Builtins::kArrayCode,
- true);
- Handle<JSObject> prototype =
- factory->NewJSObject(isolate->object_function(), TENURED);
- SetPrototype(array_function, prototype);
-
- array_function->shared()->set_construct_stub(
- isolate->builtins()->builtin(Builtins::kArrayConstructCode));
- array_function->shared()->DontAdaptArguments();
-
- // Make "length" magic on instances.
- Handle<DescriptorArray> array_descriptors =
- factory->CopyAppendProxyDescriptor(
- factory->empty_descriptor_array(),
- factory->length_symbol(),
- factory->NewProxy(&Accessors::ArrayLength),
- static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE));
-
- array_function->initial_map()->set_instance_descriptors(
- *array_descriptors);
- }
-
- if (FLAG_disable_native_files) {
- PrintF("Warning: Running without installed natives!\n");
- return true;
- }
-
- // Install natives.
- for (int i = Natives::GetDebuggerCount();
- i < Natives::GetBuiltinsCount();
- i++) {
- Vector<const char> name = Natives::GetScriptName(i);
- if (!CompileBuiltin(i)) return false;
- // TODO(ager): We really only need to install the JS builtin
- // functions on the builtins object after compiling and running
- // runtime.js.
- if (!InstallJSBuiltins(builtins)) return false;
- }
-
- InstallNativeFunctions();
-
- // Store the map for the string prototype after the natives has been compiled
- // and the String function has been setup.
- Handle<JSFunction> string_function(global_context()->string_function());
- ASSERT(JSObject::cast(
- string_function->initial_map()->prototype())->HasFastProperties());
- global_context()->set_string_function_prototype_map(
- HeapObject::cast(string_function->initial_map()->prototype())->map());
-
- InstallBuiltinFunctionIds();
-
- // Install Function.prototype.call and apply.
- { Handle<String> key = factory->function_class_symbol();
- Handle<JSFunction> function =
- Handle<JSFunction>::cast(GetProperty(isolate->global(), key));
- Handle<JSObject> proto =
- Handle<JSObject>(JSObject::cast(function->instance_prototype()));
-
- // Install the call and the apply functions.
- Handle<JSFunction> call =
- InstallFunction(proto, "call", JS_OBJECT_TYPE, JSObject::kHeaderSize,
- Handle<JSObject>::null(),
- Builtins::kFunctionCall,
- false);
- Handle<JSFunction> apply =
- InstallFunction(proto, "apply", JS_OBJECT_TYPE, JSObject::kHeaderSize,
- Handle<JSObject>::null(),
- Builtins::kFunctionApply,
- false);
-
- // Make sure that Function.prototype.call appears to be compiled.
- // The code will never be called, but inline caching for call will
- // only work if it appears to be compiled.
- call->shared()->DontAdaptArguments();
- ASSERT(call->is_compiled());
-
- // Set the expected parameters for apply to 2; required by builtin.
- apply->shared()->set_formal_parameter_count(2);
-
- // Set the lengths for the functions to satisfy ECMA-262.
- call->shared()->set_length(1);
- apply->shared()->set_length(2);
- }
-
- // Create a constructor for RegExp results (a variant of Array that
- // predefines the two properties index and match).
- {
- // RegExpResult initial map.
-
- // Find global.Array.prototype to inherit from.
- Handle<JSFunction> array_constructor(global_context()->array_function());
- Handle<JSObject> array_prototype(
- JSObject::cast(array_constructor->instance_prototype()));
-
- // Add initial map.
- Handle<Map> initial_map =
- factory->NewMap(JS_ARRAY_TYPE, JSRegExpResult::kSize);
- initial_map->set_constructor(*array_constructor);
-
- // Set prototype on map.
- initial_map->set_non_instance_prototype(false);
- initial_map->set_prototype(*array_prototype);
-
- // Update map with length accessor from Array and add "index" and "input".
- Handle<Map> array_map(global_context()->js_array_map());
- Handle<DescriptorArray> array_descriptors(
- array_map->instance_descriptors());
- ASSERT_EQ(1, array_descriptors->number_of_descriptors());
-
- Handle<DescriptorArray> reresult_descriptors =
- factory->NewDescriptorArray(3);
-
- reresult_descriptors->CopyFrom(0, *array_descriptors, 0);
-
- int enum_index = 0;
- {
- FieldDescriptor index_field(heap->index_symbol(),
- JSRegExpResult::kIndexIndex,
- NONE,
- enum_index++);
- reresult_descriptors->Set(1, &index_field);
- }
-
- {
- FieldDescriptor input_field(heap->input_symbol(),
- JSRegExpResult::kInputIndex,
- NONE,
- enum_index++);
- reresult_descriptors->Set(2, &input_field);
- }
- reresult_descriptors->Sort();
-
- initial_map->set_inobject_properties(2);
- initial_map->set_pre_allocated_property_fields(2);
- initial_map->set_unused_property_fields(0);
- initial_map->set_instance_descriptors(*reresult_descriptors);
-
- global_context()->set_regexp_result_map(*initial_map);
- }
-
-
-#ifdef DEBUG
- builtins->Verify();
-#endif
-
- return true;
-}
-
-
-static Handle<JSObject> ResolveBuiltinIdHolder(
- Handle<Context> global_context,
- const char* holder_expr) {
- Factory* factory = Isolate::Current()->factory();
- Handle<GlobalObject> global(global_context->global());
- const char* period_pos = strchr(holder_expr, '.');
- if (period_pos == NULL) {
- return Handle<JSObject>::cast(
- GetProperty(global, factory->LookupAsciiSymbol(holder_expr)));
- }
- ASSERT_EQ(".prototype", period_pos);
- Vector<const char> property(holder_expr,
- static_cast<int>(period_pos - holder_expr));
- Handle<JSFunction> function = Handle<JSFunction>::cast(
- GetProperty(global, factory->LookupSymbol(property)));
- return Handle<JSObject>(JSObject::cast(function->prototype()));
-}
-
-
-static void InstallBuiltinFunctionId(Handle<JSObject> holder,
- const char* function_name,
- BuiltinFunctionId id) {
- Handle<String> name = FACTORY->LookupAsciiSymbol(function_name);
- Object* function_object = holder->GetProperty(*name)->ToObjectUnchecked();
- Handle<JSFunction> function(JSFunction::cast(function_object));
- function->shared()->set_function_data(Smi::FromInt(id));
-}
-
-
-void Genesis::InstallBuiltinFunctionIds() {
- HandleScope scope;
-#define INSTALL_BUILTIN_ID(holder_expr, fun_name, name) \
- { \
- Handle<JSObject> holder = ResolveBuiltinIdHolder( \
- global_context(), #holder_expr); \
- BuiltinFunctionId id = k##name; \
- InstallBuiltinFunctionId(holder, #fun_name, id); \
- }
- FUNCTIONS_WITH_ID_LIST(INSTALL_BUILTIN_ID)
-#undef INSTALL_BUILTIN_ID
-}
-
-
-// Do not forget to update macros.py with named constant
-// of cache id.
-#define JSFUNCTION_RESULT_CACHE_LIST(F) \
- F(16, global_context()->regexp_function())
-
-
-static FixedArray* CreateCache(int size, JSFunction* factory) {
- // Caches are supposed to live for a long time, allocate in old space.
- int array_size = JSFunctionResultCache::kEntriesIndex + 2 * size;
- // Cannot use cast as object is not fully initialized yet.
- JSFunctionResultCache* cache = reinterpret_cast<JSFunctionResultCache*>(
- *FACTORY->NewFixedArrayWithHoles(array_size, TENURED));
- cache->set(JSFunctionResultCache::kFactoryIndex, factory);
- cache->MakeZeroSize();
- return cache;
-}
-
-
-void Genesis::InstallJSFunctionResultCaches() {
- const int kNumberOfCaches = 0 +
-#define F(size, func) + 1
- JSFUNCTION_RESULT_CACHE_LIST(F)
-#undef F
- ;
-
- Handle<FixedArray> caches = FACTORY->NewFixedArray(kNumberOfCaches, TENURED);
-
- int index = 0;
-
-#define F(size, func) do { \
- FixedArray* cache = CreateCache((size), (func)); \
- caches->set(index++, cache); \
- } while (false)
-
- JSFUNCTION_RESULT_CACHE_LIST(F);
-
-#undef F
-
- global_context()->set_jsfunction_result_caches(*caches);
-}
-
-
-void Genesis::InitializeNormalizedMapCaches() {
- Handle<FixedArray> array(
- FACTORY->NewFixedArray(NormalizedMapCache::kEntries, TENURED));
- global_context()->set_normalized_map_cache(NormalizedMapCache::cast(*array));
-}
-
-
-bool Bootstrapper::InstallExtensions(Handle<Context> global_context,
- v8::ExtensionConfiguration* extensions) {
- Isolate* isolate = Isolate::Current();
- BootstrapperActive active;
- SaveContext saved_context(isolate);
- isolate->set_context(*global_context);
- if (!Genesis::InstallExtensions(global_context, extensions)) return false;
- Genesis::InstallSpecialObjects(global_context);
- return true;
-}
-
-
-void Genesis::InstallSpecialObjects(Handle<Context> global_context) {
- Factory* factory = Isolate::Current()->factory();
- HandleScope scope;
- Handle<JSGlobalObject> js_global(
- JSGlobalObject::cast(global_context->global()));
- // Expose the natives in global if a name for it is specified.
- if (FLAG_expose_natives_as != NULL && strlen(FLAG_expose_natives_as) != 0) {
- Handle<String> natives_string =
- factory->LookupAsciiSymbol(FLAG_expose_natives_as);
- SetLocalPropertyNoThrow(js_global, natives_string,
- Handle<JSObject>(js_global->builtins()), DONT_ENUM);
- }
-
- Handle<Object> Error = GetProperty(js_global, "Error");
- if (Error->IsJSObject()) {
- Handle<String> name = factory->LookupAsciiSymbol("stackTraceLimit");
- SetLocalPropertyNoThrow(Handle<JSObject>::cast(Error),
- name,
- Handle<Smi>(Smi::FromInt(FLAG_stack_trace_limit)),
- NONE);
- }
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Expose the debug global object in global if a name for it is specified.
- if (FLAG_expose_debug_as != NULL && strlen(FLAG_expose_debug_as) != 0) {
- Debug* debug = Isolate::Current()->debug();
- // If loading fails we just bail out without installing the
- // debugger but without tanking the whole context.
- if (!debug->Load()) return;
- // Set the security token for the debugger context to the same as
- // the shell global context to allow calling between these (otherwise
- // exposing debug global object doesn't make much sense).
- debug->debug_context()->set_security_token(
- global_context->security_token());
-
- Handle<String> debug_string =
- factory->LookupAsciiSymbol(FLAG_expose_debug_as);
- Handle<Object> global_proxy(debug->debug_context()->global_proxy());
- SetLocalPropertyNoThrow(js_global, debug_string, global_proxy, DONT_ENUM);
- }
-#endif
-}
-
-
-bool Genesis::InstallExtensions(Handle<Context> global_context,
- v8::ExtensionConfiguration* extensions) {
- // TODO(isolates): Extensions on multiple isolates may take a little more
- // effort. (The external API reads 'ignore'-- does that mean
- // we can break the interface?)
-
- // Clear coloring of extension list
- v8::RegisteredExtension* current = v8::RegisteredExtension::first_extension();
- while (current != NULL) {
- current->set_state(v8::UNVISITED);
- current = current->next();
- }
- // Install auto extensions.
- current = v8::RegisteredExtension::first_extension();
- while (current != NULL) {
- if (current->extension()->auto_enable())
- InstallExtension(current);
- current = current->next();
- }
-
- if (FLAG_expose_gc) InstallExtension("v8/gc");
- if (FLAG_expose_externalize_string) InstallExtension("v8/externalize");
-
- if (extensions == NULL) return true;
- // Install required extensions
- int count = v8::ImplementationUtilities::GetNameCount(extensions);
- const char** names = v8::ImplementationUtilities::GetNames(extensions);
- for (int i = 0; i < count; i++) {
- if (!InstallExtension(names[i]))
- return false;
- }
-
- return true;
-}
-
-
-// Installs a named extension. This methods is unoptimized and does
-// not scale well if we want to support a large number of extensions.
-bool Genesis::InstallExtension(const char* name) {
- v8::RegisteredExtension* current = v8::RegisteredExtension::first_extension();
- // Loop until we find the relevant extension
- while (current != NULL) {
- if (strcmp(name, current->extension()->name()) == 0) break;
- current = current->next();
- }
- // Didn't find the extension; fail.
- if (current == NULL) {
- v8::Utils::ReportApiFailure(
- "v8::Context::New()", "Cannot find required extension");
- return false;
- }
- return InstallExtension(current);
-}
-
-
-bool Genesis::InstallExtension(v8::RegisteredExtension* current) {
- HandleScope scope;
-
- if (current->state() == v8::INSTALLED) return true;
- // The current node has already been visited so there must be a
- // cycle in the dependency graph; fail.
- if (current->state() == v8::VISITED) {
- v8::Utils::ReportApiFailure(
- "v8::Context::New()", "Circular extension dependency");
- return false;
- }
- ASSERT(current->state() == v8::UNVISITED);
- current->set_state(v8::VISITED);
- v8::Extension* extension = current->extension();
- // Install the extension's dependencies
- for (int i = 0; i < extension->dependency_count(); i++) {
- if (!InstallExtension(extension->dependencies()[i])) return false;
- }
- Isolate* isolate = Isolate::Current();
- Vector<const char> source = CStrVector(extension->source());
- Handle<String> source_code = isolate->factory()->NewStringFromAscii(source);
- bool result = CompileScriptCached(CStrVector(extension->name()),
- source_code,
- isolate->bootstrapper()->extensions_cache(),
- extension,
- Handle<Context>(isolate->context()),
- false);
- ASSERT(isolate->has_pending_exception() != result);
- if (!result) {
- isolate->clear_pending_exception();
- }
- current->set_state(v8::INSTALLED);
- return result;
-}
-
-
-bool Genesis::InstallJSBuiltins(Handle<JSBuiltinsObject> builtins) {
- HandleScope scope;
- for (int i = 0; i < Builtins::NumberOfJavaScriptBuiltins(); i++) {
- Builtins::JavaScript id = static_cast<Builtins::JavaScript>(i);
- Handle<String> name = FACTORY->LookupAsciiSymbol(Builtins::GetName(id));
- Object* function_object = builtins->GetPropertyNoExceptionThrown(*name);
- Handle<JSFunction> function
- = Handle<JSFunction>(JSFunction::cast(function_object));
- builtins->set_javascript_builtin(id, *function);
- Handle<SharedFunctionInfo> shared
- = Handle<SharedFunctionInfo>(function->shared());
- if (!EnsureCompiled(shared, CLEAR_EXCEPTION)) return false;
- // Set the code object on the function object.
- function->ReplaceCode(function->shared()->code());
- builtins->set_javascript_builtin_code(id, shared->code());
- }
- return true;
-}
-
-
-bool Genesis::ConfigureGlobalObjects(
- v8::Handle<v8::ObjectTemplate> global_proxy_template) {
- Handle<JSObject> global_proxy(
- JSObject::cast(global_context()->global_proxy()));
- Handle<JSObject> inner_global(JSObject::cast(global_context()->global()));
-
- if (!global_proxy_template.IsEmpty()) {
- // Configure the global proxy object.
- Handle<ObjectTemplateInfo> proxy_data =
- v8::Utils::OpenHandle(*global_proxy_template);
- if (!ConfigureApiObject(global_proxy, proxy_data)) return false;
-
- // Configure the inner global object.
- Handle<FunctionTemplateInfo> proxy_constructor(
- FunctionTemplateInfo::cast(proxy_data->constructor()));
- if (!proxy_constructor->prototype_template()->IsUndefined()) {
- Handle<ObjectTemplateInfo> inner_data(
- ObjectTemplateInfo::cast(proxy_constructor->prototype_template()));
- if (!ConfigureApiObject(inner_global, inner_data)) return false;
- }
- }
-
- SetObjectPrototype(global_proxy, inner_global);
- return true;
-}
-
-
-bool Genesis::ConfigureApiObject(Handle<JSObject> object,
- Handle<ObjectTemplateInfo> object_template) {
- ASSERT(!object_template.is_null());
- ASSERT(object->IsInstanceOf(
- FunctionTemplateInfo::cast(object_template->constructor())));
-
- Isolate* isolate = Isolate::Current();
- bool pending_exception = false;
- Handle<JSObject> obj =
- Execution::InstantiateObject(object_template, &pending_exception);
- if (pending_exception) {
- ASSERT(isolate->has_pending_exception());
- isolate->clear_pending_exception();
- return false;
- }
- TransferObject(obj, object);
- return true;
-}
-
-
-void Genesis::TransferNamedProperties(Handle<JSObject> from,
- Handle<JSObject> to) {
- if (from->HasFastProperties()) {
- Handle<DescriptorArray> descs =
- Handle<DescriptorArray>(from->map()->instance_descriptors());
- for (int i = 0; i < descs->number_of_descriptors(); i++) {
- PropertyDetails details = PropertyDetails(descs->GetDetails(i));
- switch (details.type()) {
- case FIELD: {
- HandleScope inner;
- Handle<String> key = Handle<String>(descs->GetKey(i));
- int index = descs->GetFieldIndex(i);
- Handle<Object> value = Handle<Object>(from->FastPropertyAt(index));
- SetLocalPropertyNoThrow(to, key, value, details.attributes());
- break;
- }
- case CONSTANT_FUNCTION: {
- HandleScope inner;
- Handle<String> key = Handle<String>(descs->GetKey(i));
- Handle<JSFunction> fun =
- Handle<JSFunction>(descs->GetConstantFunction(i));
- SetLocalPropertyNoThrow(to, key, fun, details.attributes());
- break;
- }
- case CALLBACKS: {
- LookupResult result;
- to->LocalLookup(descs->GetKey(i), &result);
- // If the property is already there we skip it
- if (result.IsProperty()) continue;
- HandleScope inner;
- ASSERT(!to->HasFastProperties());
- // Add to dictionary.
- Handle<String> key = Handle<String>(descs->GetKey(i));
- Handle<Object> callbacks(descs->GetCallbacksObject(i));
- PropertyDetails d =
- PropertyDetails(details.attributes(), CALLBACKS, details.index());
- SetNormalizedProperty(to, key, callbacks, d);
- break;
- }
- case MAP_TRANSITION:
- case EXTERNAL_ARRAY_TRANSITION:
- case CONSTANT_TRANSITION:
- case NULL_DESCRIPTOR:
- // Ignore non-properties.
- break;
- case NORMAL:
- // Do not occur since the from object has fast properties.
- case INTERCEPTOR:
- // No element in instance descriptors have interceptor type.
- UNREACHABLE();
- break;
- }
- }
- } else {
- Handle<StringDictionary> properties =
- Handle<StringDictionary>(from->property_dictionary());
- int capacity = properties->Capacity();
- for (int i = 0; i < capacity; i++) {
- Object* raw_key(properties->KeyAt(i));
- if (properties->IsKey(raw_key)) {
- ASSERT(raw_key->IsString());
- // If the property is already there we skip it.
- LookupResult result;
- to->LocalLookup(String::cast(raw_key), &result);
- if (result.IsProperty()) continue;
- // Set the property.
- Handle<String> key = Handle<String>(String::cast(raw_key));
- Handle<Object> value = Handle<Object>(properties->ValueAt(i));
- if (value->IsJSGlobalPropertyCell()) {
- value = Handle<Object>(JSGlobalPropertyCell::cast(*value)->value());
- }
- PropertyDetails details = properties->DetailsAt(i);
- SetLocalPropertyNoThrow(to, key, value, details.attributes());
- }
- }
- }
-}
-
-
-void Genesis::TransferIndexedProperties(Handle<JSObject> from,
- Handle<JSObject> to) {
- // Cloning the elements array is sufficient.
- Handle<FixedArray> from_elements =
- Handle<FixedArray>(FixedArray::cast(from->elements()));
- Handle<FixedArray> to_elements = FACTORY->CopyFixedArray(from_elements);
- to->set_elements(*to_elements);
-}
-
-
-void Genesis::TransferObject(Handle<JSObject> from, Handle<JSObject> to) {
- HandleScope outer;
-
- ASSERT(!from->IsJSArray());
- ASSERT(!to->IsJSArray());
-
- TransferNamedProperties(from, to);
- TransferIndexedProperties(from, to);
-
- // Transfer the prototype (new map is needed).
- Handle<Map> old_to_map = Handle<Map>(to->map());
- Handle<Map> new_to_map = FACTORY->CopyMapDropTransitions(old_to_map);
- new_to_map->set_prototype(from->map()->prototype());
- to->set_map(*new_to_map);
-}
-
-
-void Genesis::MakeFunctionInstancePrototypeWritable() {
- // The maps with writable prototype are created in CreateEmptyFunction
- // and CreateStrictModeFunctionMaps respectively. Initially the maps are
- // created with read-only prototype for JS builtins processing.
- ASSERT(!function_instance_map_writable_prototype_.is_null());
- ASSERT(!strict_mode_function_instance_map_writable_prototype_.is_null());
-
- // Replace function instance maps to make prototype writable.
- global_context()->set_function_map(
- *function_instance_map_writable_prototype_);
- global_context()->set_strict_mode_function_map(
- *strict_mode_function_instance_map_writable_prototype_);
-}
-
-
-Genesis::Genesis(Handle<Object> global_object,
- v8::Handle<v8::ObjectTemplate> global_template,
- v8::ExtensionConfiguration* extensions) {
- Isolate* isolate = Isolate::Current();
- result_ = Handle<Context>::null();
- // If V8 isn't running and cannot be initialized, just return.
- if (!V8::IsRunning() && !V8::Initialize(NULL)) return;
-
- // Before creating the roots we must save the context and restore it
- // on all function exits.
- HandleScope scope;
- SaveContext saved_context(isolate);
-
- Handle<Context> new_context = Snapshot::NewContextFromSnapshot();
- if (!new_context.is_null()) {
- global_context_ =
- Handle<Context>::cast(isolate->global_handles()->Create(*new_context));
- AddToWeakGlobalContextList(*global_context_);
- isolate->set_context(*global_context_);
- isolate->counters()->contexts_created_by_snapshot()->Increment();
- Handle<GlobalObject> inner_global;
- Handle<JSGlobalProxy> global_proxy =
- CreateNewGlobals(global_template,
- global_object,
- &inner_global);
-
- HookUpGlobalProxy(inner_global, global_proxy);
- HookUpInnerGlobal(inner_global);
-
- if (!ConfigureGlobalObjects(global_template)) return;
- } else {
- // We get here if there was no context snapshot.
- CreateRoots();
- Handle<JSFunction> empty_function = CreateEmptyFunction();
- CreateStrictModeFunctionMaps(empty_function);
- Handle<GlobalObject> inner_global;
- Handle<JSGlobalProxy> global_proxy =
- CreateNewGlobals(global_template, global_object, &inner_global);
- HookUpGlobalProxy(inner_global, global_proxy);
- InitializeGlobal(inner_global, empty_function);
- InstallJSFunctionResultCaches();
- InitializeNormalizedMapCaches();
- if (!InstallNatives()) return;
-
- MakeFunctionInstancePrototypeWritable();
-
- if (!ConfigureGlobalObjects(global_template)) return;
- isolate->counters()->contexts_created_from_scratch()->Increment();
- }
-
- result_ = global_context_;
-}
-
-
-// Support for thread preemption.
-
-// Reserve space for statics needing saving and restoring.
-int Bootstrapper::ArchiveSpacePerThread() {
- return sizeof(NestingCounterType);
-}
-
-
-// Archive statics that are thread local.
-char* Bootstrapper::ArchiveState(char* to) {
- *reinterpret_cast<NestingCounterType*>(to) = nesting_;
- nesting_ = 0;
- return to + sizeof(NestingCounterType);
-}
-
-
-// Restore statics that are thread local.
-char* Bootstrapper::RestoreState(char* from) {
- nesting_ = *reinterpret_cast<NestingCounterType*>(from);
- return from + sizeof(NestingCounterType);
-}
-
-
-// Called when the top-level V8 mutex is destroyed.
-void Bootstrapper::FreeThreadResources() {
- ASSERT(!IsActive());
-}
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/bootstrapper.h b/src/3rdparty/v8/src/bootstrapper.h
deleted file mode 100644
index 3e158d6..0000000
--- a/src/3rdparty/v8/src/bootstrapper.h
+++ /dev/null
@@ -1,185 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#ifndef V8_BOOTSTRAPPER_H_
-#define V8_BOOTSTRAPPER_H_
-
-namespace v8 {
-namespace internal {
-
-
-// A SourceCodeCache uses a FixedArray to store pairs of
-// (AsciiString*, JSFunction*), mapping names of native code files
-// (runtime.js, etc.) to precompiled functions. Instead of mapping
-// names to functions it might make sense to let the JS2C tool
-// generate an index for each native JS file.
-class SourceCodeCache BASE_EMBEDDED {
- public:
- explicit SourceCodeCache(Script::Type type): type_(type), cache_(NULL) { }
-
- void Initialize(bool create_heap_objects) {
- cache_ = create_heap_objects ? HEAP->empty_fixed_array() : NULL;
- }
-
- void Iterate(ObjectVisitor* v) {
- v->VisitPointer(BitCast<Object**, FixedArray**>(&cache_));
- }
-
- bool Lookup(Vector<const char> name, Handle<SharedFunctionInfo>* handle) {
- for (int i = 0; i < cache_->length(); i+=2) {
- SeqAsciiString* str = SeqAsciiString::cast(cache_->get(i));
- if (str->IsEqualTo(name)) {
- *handle = Handle<SharedFunctionInfo>(
- SharedFunctionInfo::cast(cache_->get(i + 1)));
- return true;
- }
- }
- return false;
- }
-
- void Add(Vector<const char> name, Handle<SharedFunctionInfo> shared) {
- HandleScope scope;
- int length = cache_->length();
- Handle<FixedArray> new_array =
- FACTORY->NewFixedArray(length + 2, TENURED);
- cache_->CopyTo(0, *new_array, 0, cache_->length());
- cache_ = *new_array;
- Handle<String> str = FACTORY->NewStringFromAscii(name, TENURED);
- cache_->set(length, *str);
- cache_->set(length + 1, *shared);
- Script::cast(shared->script())->set_type(Smi::FromInt(type_));
- }
-
- private:
- Script::Type type_;
- FixedArray* cache_;
- DISALLOW_COPY_AND_ASSIGN(SourceCodeCache);
-};
-
-
-// The Boostrapper is the public interface for creating a JavaScript global
-// context.
-class Bootstrapper {
- public:
- // Requires: Heap::Setup has been called.
- void Initialize(bool create_heap_objects);
- void TearDown();
-
- // Creates a JavaScript Global Context with initial object graph.
- // The returned value is a global handle casted to V8Environment*.
- Handle<Context> CreateEnvironment(
- Handle<Object> global_object,
- v8::Handle<v8::ObjectTemplate> global_template,
- v8::ExtensionConfiguration* extensions);
-
- // Detach the environment from its outer global object.
- void DetachGlobal(Handle<Context> env);
-
- // Reattach an outer global object to an environment.
- void ReattachGlobal(Handle<Context> env, Handle<Object> global_object);
-
- // Traverses the pointers for memory management.
- void Iterate(ObjectVisitor* v);
-
- // Accessor for the native scripts source code.
- Handle<String> NativesSourceLookup(int index);
-
- // Tells whether bootstrapping is active.
- bool IsActive() const { return nesting_ != 0; }
-
- // Support for thread preemption.
- RLYSTC int ArchiveSpacePerThread();
- char* ArchiveState(char* to);
- char* RestoreState(char* from);
- void FreeThreadResources();
-
- // This will allocate a char array that is deleted when V8 is shut down.
- // It should only be used for strictly finite allocations.
- char* AllocateAutoDeletedArray(int bytes);
-
- // Used for new context creation.
- bool InstallExtensions(Handle<Context> global_context,
- v8::ExtensionConfiguration* extensions);
-
- SourceCodeCache* extensions_cache() { return &extensions_cache_; }
-
- private:
- typedef int NestingCounterType;
- NestingCounterType nesting_;
- SourceCodeCache extensions_cache_;
- // This is for delete, not delete[].
- List<char*>* delete_these_non_arrays_on_tear_down_;
- // This is for delete[]
- List<char*>* delete_these_arrays_on_tear_down_;
-
- friend class BootstrapperActive;
- friend class Isolate;
- friend class NativesExternalStringResource;
-
- Bootstrapper();
-
- DISALLOW_COPY_AND_ASSIGN(Bootstrapper);
-};
-
-
-class BootstrapperActive BASE_EMBEDDED {
- public:
- BootstrapperActive() {
- ++Isolate::Current()->bootstrapper()->nesting_;
- }
-
- ~BootstrapperActive() {
- --Isolate::Current()->bootstrapper()->nesting_;
- }
-
- private:
- DISALLOW_COPY_AND_ASSIGN(BootstrapperActive);
-};
-
-
-class NativesExternalStringResource
- : public v8::String::ExternalAsciiStringResource {
- public:
- explicit NativesExternalStringResource(Bootstrapper* bootstrapper,
- const char* source);
-
- const char* data() const {
- return data_;
- }
-
- size_t length() const {
- return length_;
- }
- private:
- const char* data_;
- size_t length_;
-};
-
-}} // namespace v8::internal
-
-#endif // V8_BOOTSTRAPPER_H_
diff --git a/src/3rdparty/v8/src/builtins.cc b/src/3rdparty/v8/src/builtins.cc
deleted file mode 100644
index ae3dab4..0000000
--- a/src/3rdparty/v8/src/builtins.cc
+++ /dev/null
@@ -1,1708 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "api.h"
-#include "arguments.h"
-#include "bootstrapper.h"
-#include "builtins.h"
-#include "gdb-jit.h"
-#include "ic-inl.h"
-#include "vm-state-inl.h"
-
-namespace v8 {
-namespace internal {
-
-namespace {
-
-// Arguments object passed to C++ builtins.
-template <BuiltinExtraArguments extra_args>
-class BuiltinArguments : public Arguments {
- public:
- BuiltinArguments(int length, Object** arguments)
- : Arguments(length, arguments) { }
-
- Object*& operator[] (int index) {
- ASSERT(index < length());
- return Arguments::operator[](index);
- }
-
- template <class S> Handle<S> at(int index) {
- ASSERT(index < length());
- return Arguments::at<S>(index);
- }
-
- Handle<Object> receiver() {
- return Arguments::at<Object>(0);
- }
-
- Handle<JSFunction> called_function() {
- STATIC_ASSERT(extra_args == NEEDS_CALLED_FUNCTION);
- return Arguments::at<JSFunction>(Arguments::length() - 1);
- }
-
- // Gets the total number of arguments including the receiver (but
- // excluding extra arguments).
- int length() const {
- STATIC_ASSERT(extra_args == NO_EXTRA_ARGUMENTS);
- return Arguments::length();
- }
-
-#ifdef DEBUG
- void Verify() {
- // Check we have at least the receiver.
- ASSERT(Arguments::length() >= 1);
- }
-#endif
-};
-
-
-// Specialize BuiltinArguments for the called function extra argument.
-
-template <>
-int BuiltinArguments<NEEDS_CALLED_FUNCTION>::length() const {
- return Arguments::length() - 1;
-}
-
-#ifdef DEBUG
-template <>
-void BuiltinArguments<NEEDS_CALLED_FUNCTION>::Verify() {
- // Check we have at least the receiver and the called function.
- ASSERT(Arguments::length() >= 2);
- // Make sure cast to JSFunction succeeds.
- called_function();
-}
-#endif
-
-
-#define DEF_ARG_TYPE(name, spec) \
- typedef BuiltinArguments<spec> name##ArgumentsType;
-BUILTIN_LIST_C(DEF_ARG_TYPE)
-#undef DEF_ARG_TYPE
-
-} // namespace
-
-// ----------------------------------------------------------------------------
-// Support macro for defining builtins in C++.
-// ----------------------------------------------------------------------------
-//
-// A builtin function is defined by writing:
-//
-// BUILTIN(name) {
-// ...
-// }
-//
-// In the body of the builtin function the arguments can be accessed
-// through the BuiltinArguments object args.
-
-#ifdef DEBUG
-
-#define BUILTIN(name) \
- MUST_USE_RESULT static MaybeObject* Builtin_Impl_##name( \
- name##ArgumentsType args, Isolate* isolate); \
- MUST_USE_RESULT static MaybeObject* Builtin_##name( \
- name##ArgumentsType args, Isolate* isolate) { \
- ASSERT(isolate == Isolate::Current()); \
- args.Verify(); \
- return Builtin_Impl_##name(args, isolate); \
- } \
- MUST_USE_RESULT static MaybeObject* Builtin_Impl_##name( \
- name##ArgumentsType args, Isolate* isolate)
-
-#else // For release mode.
-
-#define BUILTIN(name) \
- static MaybeObject* Builtin_##name(name##ArgumentsType args, Isolate* isolate)
-
-#endif
-
-
-static inline bool CalledAsConstructor(Isolate* isolate) {
-#ifdef DEBUG
- // Calculate the result using a full stack frame iterator and check
- // that the state of the stack is as we assume it to be in the
- // code below.
- StackFrameIterator it;
- ASSERT(it.frame()->is_exit());
- it.Advance();
- StackFrame* frame = it.frame();
- bool reference_result = frame->is_construct();
-#endif
- Address fp = Isolate::c_entry_fp(isolate->thread_local_top());
- // Because we know fp points to an exit frame we can use the relevant
- // part of ExitFrame::ComputeCallerState directly.
- const int kCallerOffset = ExitFrameConstants::kCallerFPOffset;
- Address caller_fp = Memory::Address_at(fp + kCallerOffset);
- // This inlines the part of StackFrame::ComputeType that grabs the
- // type of the current frame. Note that StackFrame::ComputeType
- // has been specialized for each architecture so if any one of them
- // changes this code has to be changed as well.
- const int kMarkerOffset = StandardFrameConstants::kMarkerOffset;
- const Smi* kConstructMarker = Smi::FromInt(StackFrame::CONSTRUCT);
- Object* marker = Memory::Object_at(caller_fp + kMarkerOffset);
- bool result = (marker == kConstructMarker);
- ASSERT_EQ(result, reference_result);
- return result;
-}
-
-// ----------------------------------------------------------------------------
-
-BUILTIN(Illegal) {
- UNREACHABLE();
- return isolate->heap()->undefined_value(); // Make compiler happy.
-}
-
-
-BUILTIN(EmptyFunction) {
- return isolate->heap()->undefined_value();
-}
-
-
-BUILTIN(ArrayCodeGeneric) {
- Heap* heap = isolate->heap();
- isolate->counters()->array_function_runtime()->Increment();
-
- JSArray* array;
- if (CalledAsConstructor(isolate)) {
- array = JSArray::cast(*args.receiver());
- } else {
- // Allocate the JS Array
- JSFunction* constructor =
- isolate->context()->global_context()->array_function();
- Object* obj;
- { MaybeObject* maybe_obj = heap->AllocateJSObject(constructor);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- array = JSArray::cast(obj);
- }
-
- // 'array' now contains the JSArray we should initialize.
- ASSERT(array->HasFastElements());
-
- // Optimize the case where there is one argument and the argument is a
- // small smi.
- if (args.length() == 2) {
- Object* obj = args[1];
- if (obj->IsSmi()) {
- int len = Smi::cast(obj)->value();
- if (len >= 0 && len < JSObject::kInitialMaxFastElementArray) {
- Object* obj;
- { MaybeObject* maybe_obj = heap->AllocateFixedArrayWithHoles(len);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- array->SetContent(FixedArray::cast(obj));
- return array;
- }
- }
- // Take the argument as the length.
- { MaybeObject* maybe_obj = array->Initialize(0);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- return array->SetElementsLength(args[1]);
- }
-
- // Optimize the case where there are no parameters passed.
- if (args.length() == 1) {
- return array->Initialize(JSArray::kPreallocatedArrayElements);
- }
-
- // Take the arguments as elements.
- int number_of_elements = args.length() - 1;
- Smi* len = Smi::FromInt(number_of_elements);
- Object* obj;
- { MaybeObject* maybe_obj = heap->AllocateFixedArrayWithHoles(len->value());
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
-
- AssertNoAllocation no_gc;
- FixedArray* elms = FixedArray::cast(obj);
- WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
- // Fill in the content
- for (int index = 0; index < number_of_elements; index++) {
- elms->set(index, args[index+1], mode);
- }
-
- // Set length and elements on the array.
- array->set_elements(FixedArray::cast(obj));
- array->set_length(len);
-
- return array;
-}
-
-
-MUST_USE_RESULT static MaybeObject* AllocateJSArray(Heap* heap) {
- JSFunction* array_function =
- heap->isolate()->context()->global_context()->array_function();
- Object* result;
- { MaybeObject* maybe_result = heap->AllocateJSObject(array_function);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- return result;
-}
-
-
-MUST_USE_RESULT static MaybeObject* AllocateEmptyJSArray(Heap* heap) {
- Object* result;
- { MaybeObject* maybe_result = AllocateJSArray(heap);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- JSArray* result_array = JSArray::cast(result);
- result_array->set_length(Smi::FromInt(0));
- result_array->set_elements(heap->empty_fixed_array());
- return result_array;
-}
-
-
-static void CopyElements(Heap* heap,
- AssertNoAllocation* no_gc,
- FixedArray* dst,
- int dst_index,
- FixedArray* src,
- int src_index,
- int len) {
- ASSERT(dst != src); // Use MoveElements instead.
- ASSERT(dst->map() != HEAP->fixed_cow_array_map());
- ASSERT(len > 0);
- CopyWords(dst->data_start() + dst_index,
- src->data_start() + src_index,
- len);
- WriteBarrierMode mode = dst->GetWriteBarrierMode(*no_gc);
- if (mode == UPDATE_WRITE_BARRIER) {
- heap->RecordWrites(dst->address(), dst->OffsetOfElementAt(dst_index), len);
- }
-}
-
-
-static void MoveElements(Heap* heap,
- AssertNoAllocation* no_gc,
- FixedArray* dst,
- int dst_index,
- FixedArray* src,
- int src_index,
- int len) {
- ASSERT(dst->map() != HEAP->fixed_cow_array_map());
- memmove(dst->data_start() + dst_index,
- src->data_start() + src_index,
- len * kPointerSize);
- WriteBarrierMode mode = dst->GetWriteBarrierMode(*no_gc);
- if (mode == UPDATE_WRITE_BARRIER) {
- heap->RecordWrites(dst->address(), dst->OffsetOfElementAt(dst_index), len);
- }
-}
-
-
-static void FillWithHoles(Heap* heap, FixedArray* dst, int from, int to) {
- ASSERT(dst->map() != heap->fixed_cow_array_map());
- MemsetPointer(dst->data_start() + from, heap->the_hole_value(), to - from);
-}
-
-
-static FixedArray* LeftTrimFixedArray(Heap* heap,
- FixedArray* elms,
- int to_trim) {
- ASSERT(elms->map() != HEAP->fixed_cow_array_map());
- // For now this trick is only applied to fixed arrays in new and paged space.
- // In large object space the object's start must coincide with chunk
- // and thus the trick is just not applicable.
- ASSERT(!HEAP->lo_space()->Contains(elms));
-
- STATIC_ASSERT(FixedArray::kMapOffset == 0);
- STATIC_ASSERT(FixedArray::kLengthOffset == kPointerSize);
- STATIC_ASSERT(FixedArray::kHeaderSize == 2 * kPointerSize);
-
- Object** former_start = HeapObject::RawField(elms, 0);
-
- const int len = elms->length();
-
- if (to_trim > FixedArray::kHeaderSize / kPointerSize &&
- !heap->new_space()->Contains(elms)) {
- // If we are doing a big trim in old space then we zap the space that was
- // formerly part of the array so that the GC (aided by the card-based
- // remembered set) won't find pointers to new-space there.
- Object** zap = reinterpret_cast<Object**>(elms->address());
- zap++; // Header of filler must be at least one word so skip that.
- for (int i = 1; i < to_trim; i++) {
- *zap++ = Smi::FromInt(0);
- }
- }
- // Technically in new space this write might be omitted (except for
- // debug mode which iterates through the heap), but to play safer
- // we still do it.
- heap->CreateFillerObjectAt(elms->address(), to_trim * kPointerSize);
-
- former_start[to_trim] = heap->fixed_array_map();
- former_start[to_trim + 1] = Smi::FromInt(len - to_trim);
-
- return FixedArray::cast(HeapObject::FromAddress(
- elms->address() + to_trim * kPointerSize));
-}
-
-
-static bool ArrayPrototypeHasNoElements(Heap* heap,
- Context* global_context,
- JSObject* array_proto) {
- // This method depends on non writability of Object and Array prototype
- // fields.
- if (array_proto->elements() != heap->empty_fixed_array()) return false;
- // Hidden prototype
- array_proto = JSObject::cast(array_proto->GetPrototype());
- ASSERT(array_proto->elements() == heap->empty_fixed_array());
- // Object.prototype
- Object* proto = array_proto->GetPrototype();
- if (proto == heap->null_value()) return false;
- array_proto = JSObject::cast(proto);
- if (array_proto != global_context->initial_object_prototype()) return false;
- if (array_proto->elements() != heap->empty_fixed_array()) return false;
- ASSERT(array_proto->GetPrototype()->IsNull());
- return true;
-}
-
-
-MUST_USE_RESULT
-static inline MaybeObject* EnsureJSArrayWithWritableFastElements(
- Heap* heap, Object* receiver) {
- if (!receiver->IsJSArray()) return NULL;
- JSArray* array = JSArray::cast(receiver);
- HeapObject* elms = array->elements();
- if (elms->map() == heap->fixed_array_map()) return elms;
- if (elms->map() == heap->fixed_cow_array_map()) {
- return array->EnsureWritableFastElements();
- }
- return NULL;
-}
-
-
-static inline bool IsJSArrayFastElementMovingAllowed(Heap* heap,
- JSArray* receiver) {
- Context* global_context = heap->isolate()->context()->global_context();
- JSObject* array_proto =
- JSObject::cast(global_context->array_function()->prototype());
- return receiver->GetPrototype() == array_proto &&
- ArrayPrototypeHasNoElements(heap, global_context, array_proto);
-}
-
-
-MUST_USE_RESULT static MaybeObject* CallJsBuiltin(
- Isolate* isolate,
- const char* name,
- BuiltinArguments<NO_EXTRA_ARGUMENTS> args) {
- HandleScope handleScope(isolate);
-
- Handle<Object> js_builtin =
- GetProperty(Handle<JSObject>(
- isolate->global_context()->builtins()),
- name);
- ASSERT(js_builtin->IsJSFunction());
- Handle<JSFunction> function(Handle<JSFunction>::cast(js_builtin));
- ScopedVector<Object**> argv(args.length() - 1);
- int n_args = args.length() - 1;
- for (int i = 0; i < n_args; i++) {
- argv[i] = args.at<Object>(i + 1).location();
- }
- bool pending_exception = false;
- Handle<Object> result = Execution::Call(function,
- args.receiver(),
- n_args,
- argv.start(),
- &pending_exception);
- if (pending_exception) return Failure::Exception();
- return *result;
-}
-
-
-BUILTIN(ArrayPush) {
- Heap* heap = isolate->heap();
- Object* receiver = *args.receiver();
- Object* elms_obj;
- { MaybeObject* maybe_elms_obj =
- EnsureJSArrayWithWritableFastElements(heap, receiver);
- if (maybe_elms_obj == NULL) {
- return CallJsBuiltin(isolate, "ArrayPush", args);
- }
- if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj;
- }
- FixedArray* elms = FixedArray::cast(elms_obj);
- JSArray* array = JSArray::cast(receiver);
-
- int len = Smi::cast(array->length())->value();
- int to_add = args.length() - 1;
- if (to_add == 0) {
- return Smi::FromInt(len);
- }
- // Currently fixed arrays cannot grow too big, so
- // we should never hit this case.
- ASSERT(to_add <= (Smi::kMaxValue - len));
-
- int new_length = len + to_add;
-
- if (new_length > elms->length()) {
- // New backing storage is needed.
- int capacity = new_length + (new_length >> 1) + 16;
- Object* obj;
- { MaybeObject* maybe_obj = heap->AllocateUninitializedFixedArray(capacity);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- FixedArray* new_elms = FixedArray::cast(obj);
-
- AssertNoAllocation no_gc;
- if (len > 0) {
- CopyElements(heap, &no_gc, new_elms, 0, elms, 0, len);
- }
- FillWithHoles(heap, new_elms, new_length, capacity);
-
- elms = new_elms;
- array->set_elements(elms);
- }
-
- // Add the provided values.
- AssertNoAllocation no_gc;
- WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
- for (int index = 0; index < to_add; index++) {
- elms->set(index + len, args[index + 1], mode);
- }
-
- // Set the length.
- array->set_length(Smi::FromInt(new_length));
- return Smi::FromInt(new_length);
-}
-
-
-BUILTIN(ArrayPop) {
- Heap* heap = isolate->heap();
- Object* receiver = *args.receiver();
- Object* elms_obj;
- { MaybeObject* maybe_elms_obj =
- EnsureJSArrayWithWritableFastElements(heap, receiver);
- if (maybe_elms_obj == NULL) return CallJsBuiltin(isolate, "ArrayPop", args);
- if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj;
- }
- FixedArray* elms = FixedArray::cast(elms_obj);
- JSArray* array = JSArray::cast(receiver);
-
- int len = Smi::cast(array->length())->value();
- if (len == 0) return heap->undefined_value();
-
- // Get top element
- MaybeObject* top = elms->get(len - 1);
-
- // Set the length.
- array->set_length(Smi::FromInt(len - 1));
-
- if (!top->IsTheHole()) {
- // Delete the top element.
- elms->set_the_hole(len - 1);
- return top;
- }
-
- top = array->GetPrototype()->GetElement(len - 1);
-
- return top;
-}
-
-
-BUILTIN(ArrayShift) {
- Heap* heap = isolate->heap();
- Object* receiver = *args.receiver();
- Object* elms_obj;
- { MaybeObject* maybe_elms_obj =
- EnsureJSArrayWithWritableFastElements(heap, receiver);
- if (maybe_elms_obj == NULL)
- return CallJsBuiltin(isolate, "ArrayShift", args);
- if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj;
- }
- if (!IsJSArrayFastElementMovingAllowed(heap, JSArray::cast(receiver))) {
- return CallJsBuiltin(isolate, "ArrayShift", args);
- }
- FixedArray* elms = FixedArray::cast(elms_obj);
- JSArray* array = JSArray::cast(receiver);
- ASSERT(array->HasFastElements());
-
- int len = Smi::cast(array->length())->value();
- if (len == 0) return heap->undefined_value();
-
- // Get first element
- Object* first = elms->get(0);
- if (first->IsTheHole()) {
- first = heap->undefined_value();
- }
-
- if (!heap->lo_space()->Contains(elms)) {
- // As elms still in the same space they used to be,
- // there is no need to update region dirty mark.
- array->set_elements(LeftTrimFixedArray(heap, elms, 1), SKIP_WRITE_BARRIER);
- } else {
- // Shift the elements.
- AssertNoAllocation no_gc;
- MoveElements(heap, &no_gc, elms, 0, elms, 1, len - 1);
- elms->set(len - 1, heap->the_hole_value());
- }
-
- // Set the length.
- array->set_length(Smi::FromInt(len - 1));
-
- return first;
-}
-
-
-BUILTIN(ArrayUnshift) {
- Heap* heap = isolate->heap();
- Object* receiver = *args.receiver();
- Object* elms_obj;
- { MaybeObject* maybe_elms_obj =
- EnsureJSArrayWithWritableFastElements(heap, receiver);
- if (maybe_elms_obj == NULL)
- return CallJsBuiltin(isolate, "ArrayUnshift", args);
- if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj;
- }
- if (!IsJSArrayFastElementMovingAllowed(heap, JSArray::cast(receiver))) {
- return CallJsBuiltin(isolate, "ArrayUnshift", args);
- }
- FixedArray* elms = FixedArray::cast(elms_obj);
- JSArray* array = JSArray::cast(receiver);
- ASSERT(array->HasFastElements());
-
- int len = Smi::cast(array->length())->value();
- int to_add = args.length() - 1;
- int new_length = len + to_add;
- // Currently fixed arrays cannot grow too big, so
- // we should never hit this case.
- ASSERT(to_add <= (Smi::kMaxValue - len));
-
- if (new_length > elms->length()) {
- // New backing storage is needed.
- int capacity = new_length + (new_length >> 1) + 16;
- Object* obj;
- { MaybeObject* maybe_obj = heap->AllocateUninitializedFixedArray(capacity);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- FixedArray* new_elms = FixedArray::cast(obj);
-
- AssertNoAllocation no_gc;
- if (len > 0) {
- CopyElements(heap, &no_gc, new_elms, to_add, elms, 0, len);
- }
- FillWithHoles(heap, new_elms, new_length, capacity);
-
- elms = new_elms;
- array->set_elements(elms);
- } else {
- AssertNoAllocation no_gc;
- MoveElements(heap, &no_gc, elms, to_add, elms, 0, len);
- }
-
- // Add the provided values.
- AssertNoAllocation no_gc;
- WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
- for (int i = 0; i < to_add; i++) {
- elms->set(i, args[i + 1], mode);
- }
-
- // Set the length.
- array->set_length(Smi::FromInt(new_length));
- return Smi::FromInt(new_length);
-}
-
-
-BUILTIN(ArraySlice) {
- Heap* heap = isolate->heap();
- Object* receiver = *args.receiver();
- FixedArray* elms;
- int len = -1;
- if (receiver->IsJSArray()) {
- JSArray* array = JSArray::cast(receiver);
- if (!array->HasFastElements() ||
- !IsJSArrayFastElementMovingAllowed(heap, array)) {
- return CallJsBuiltin(isolate, "ArraySlice", args);
- }
-
- elms = FixedArray::cast(array->elements());
- len = Smi::cast(array->length())->value();
- } else {
- // Array.slice(arguments, ...) is quite a common idiom (notably more
- // than 50% of invocations in Web apps). Treat it in C++ as well.
- Map* arguments_map =
- isolate->context()->global_context()->arguments_boilerplate()->map();
-
- bool is_arguments_object_with_fast_elements =
- receiver->IsJSObject()
- && JSObject::cast(receiver)->map() == arguments_map
- && JSObject::cast(receiver)->HasFastElements();
- if (!is_arguments_object_with_fast_elements) {
- return CallJsBuiltin(isolate, "ArraySlice", args);
- }
- elms = FixedArray::cast(JSObject::cast(receiver)->elements());
- Object* len_obj = JSObject::cast(receiver)
- ->InObjectPropertyAt(Heap::kArgumentsLengthIndex);
- if (!len_obj->IsSmi()) {
- return CallJsBuiltin(isolate, "ArraySlice", args);
- }
- len = Smi::cast(len_obj)->value();
- if (len > elms->length()) {
- return CallJsBuiltin(isolate, "ArraySlice", args);
- }
- for (int i = 0; i < len; i++) {
- if (elms->get(i) == heap->the_hole_value()) {
- return CallJsBuiltin(isolate, "ArraySlice", args);
- }
- }
- }
- ASSERT(len >= 0);
- int n_arguments = args.length() - 1;
-
- // Note carefully choosen defaults---if argument is missing,
- // it's undefined which gets converted to 0 for relative_start
- // and to len for relative_end.
- int relative_start = 0;
- int relative_end = len;
- if (n_arguments > 0) {
- Object* arg1 = args[1];
- if (arg1->IsSmi()) {
- relative_start = Smi::cast(arg1)->value();
- } else if (!arg1->IsUndefined()) {
- return CallJsBuiltin(isolate, "ArraySlice", args);
- }
- if (n_arguments > 1) {
- Object* arg2 = args[2];
- if (arg2->IsSmi()) {
- relative_end = Smi::cast(arg2)->value();
- } else if (!arg2->IsUndefined()) {
- return CallJsBuiltin(isolate, "ArraySlice", args);
- }
- }
- }
-
- // ECMAScript 232, 3rd Edition, Section 15.4.4.10, step 6.
- int k = (relative_start < 0) ? Max(len + relative_start, 0)
- : Min(relative_start, len);
-
- // ECMAScript 232, 3rd Edition, Section 15.4.4.10, step 8.
- int final = (relative_end < 0) ? Max(len + relative_end, 0)
- : Min(relative_end, len);
-
- // Calculate the length of result array.
- int result_len = final - k;
- if (result_len <= 0) {
- return AllocateEmptyJSArray(heap);
- }
-
- Object* result;
- { MaybeObject* maybe_result = AllocateJSArray(heap);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- JSArray* result_array = JSArray::cast(result);
-
- { MaybeObject* maybe_result =
- heap->AllocateUninitializedFixedArray(result_len);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- FixedArray* result_elms = FixedArray::cast(result);
-
- AssertNoAllocation no_gc;
- CopyElements(heap, &no_gc, result_elms, 0, elms, k, result_len);
-
- // Set elements.
- result_array->set_elements(result_elms);
-
- // Set the length.
- result_array->set_length(Smi::FromInt(result_len));
- return result_array;
-}
-
-
-BUILTIN(ArraySplice) {
- Heap* heap = isolate->heap();
- Object* receiver = *args.receiver();
- Object* elms_obj;
- { MaybeObject* maybe_elms_obj =
- EnsureJSArrayWithWritableFastElements(heap, receiver);
- if (maybe_elms_obj == NULL)
- return CallJsBuiltin(isolate, "ArraySplice", args);
- if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj;
- }
- if (!IsJSArrayFastElementMovingAllowed(heap, JSArray::cast(receiver))) {
- return CallJsBuiltin(isolate, "ArraySplice", args);
- }
- FixedArray* elms = FixedArray::cast(elms_obj);
- JSArray* array = JSArray::cast(receiver);
- ASSERT(array->HasFastElements());
-
- int len = Smi::cast(array->length())->value();
-
- int n_arguments = args.length() - 1;
-
- int relative_start = 0;
- if (n_arguments > 0) {
- Object* arg1 = args[1];
- if (arg1->IsSmi()) {
- relative_start = Smi::cast(arg1)->value();
- } else if (!arg1->IsUndefined()) {
- return CallJsBuiltin(isolate, "ArraySplice", args);
- }
- }
- int actual_start = (relative_start < 0) ? Max(len + relative_start, 0)
- : Min(relative_start, len);
-
- // SpiderMonkey, TraceMonkey and JSC treat the case where no delete count is
- // given as a request to delete all the elements from the start.
- // And it differs from the case of undefined delete count.
- // This does not follow ECMA-262, but we do the same for
- // compatibility.
- int actual_delete_count;
- if (n_arguments == 1) {
- ASSERT(len - actual_start >= 0);
- actual_delete_count = len - actual_start;
- } else {
- int value = 0; // ToInteger(undefined) == 0
- if (n_arguments > 1) {
- Object* arg2 = args[2];
- if (arg2->IsSmi()) {
- value = Smi::cast(arg2)->value();
- } else {
- return CallJsBuiltin(isolate, "ArraySplice", args);
- }
- }
- actual_delete_count = Min(Max(value, 0), len - actual_start);
- }
-
- JSArray* result_array = NULL;
- if (actual_delete_count == 0) {
- Object* result;
- { MaybeObject* maybe_result = AllocateEmptyJSArray(heap);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- result_array = JSArray::cast(result);
- } else {
- // Allocate result array.
- Object* result;
- { MaybeObject* maybe_result = AllocateJSArray(heap);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- result_array = JSArray::cast(result);
-
- { MaybeObject* maybe_result =
- heap->AllocateUninitializedFixedArray(actual_delete_count);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- FixedArray* result_elms = FixedArray::cast(result);
-
- AssertNoAllocation no_gc;
- // Fill newly created array.
- CopyElements(heap,
- &no_gc,
- result_elms, 0,
- elms, actual_start,
- actual_delete_count);
-
- // Set elements.
- result_array->set_elements(result_elms);
-
- // Set the length.
- result_array->set_length(Smi::FromInt(actual_delete_count));
- }
-
- int item_count = (n_arguments > 1) ? (n_arguments - 2) : 0;
-
- int new_length = len - actual_delete_count + item_count;
-
- if (item_count < actual_delete_count) {
- // Shrink the array.
- const bool trim_array = !heap->lo_space()->Contains(elms) &&
- ((actual_start + item_count) <
- (len - actual_delete_count - actual_start));
- if (trim_array) {
- const int delta = actual_delete_count - item_count;
-
- if (actual_start > 0) {
- Object** start = elms->data_start();
- memmove(start + delta, start, actual_start * kPointerSize);
- }
-
- elms = LeftTrimFixedArray(heap, elms, delta);
- array->set_elements(elms, SKIP_WRITE_BARRIER);
- } else {
- AssertNoAllocation no_gc;
- MoveElements(heap, &no_gc,
- elms, actual_start + item_count,
- elms, actual_start + actual_delete_count,
- (len - actual_delete_count - actual_start));
- FillWithHoles(heap, elms, new_length, len);
- }
- } else if (item_count > actual_delete_count) {
- // Currently fixed arrays cannot grow too big, so
- // we should never hit this case.
- ASSERT((item_count - actual_delete_count) <= (Smi::kMaxValue - len));
-
- // Check if array need to grow.
- if (new_length > elms->length()) {
- // New backing storage is needed.
- int capacity = new_length + (new_length >> 1) + 16;
- Object* obj;
- { MaybeObject* maybe_obj =
- heap->AllocateUninitializedFixedArray(capacity);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- FixedArray* new_elms = FixedArray::cast(obj);
-
- AssertNoAllocation no_gc;
- // Copy the part before actual_start as is.
- if (actual_start > 0) {
- CopyElements(heap, &no_gc, new_elms, 0, elms, 0, actual_start);
- }
- const int to_copy = len - actual_delete_count - actual_start;
- if (to_copy > 0) {
- CopyElements(heap, &no_gc,
- new_elms, actual_start + item_count,
- elms, actual_start + actual_delete_count,
- to_copy);
- }
- FillWithHoles(heap, new_elms, new_length, capacity);
-
- elms = new_elms;
- array->set_elements(elms);
- } else {
- AssertNoAllocation no_gc;
- MoveElements(heap, &no_gc,
- elms, actual_start + item_count,
- elms, actual_start + actual_delete_count,
- (len - actual_delete_count - actual_start));
- }
- }
-
- AssertNoAllocation no_gc;
- WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
- for (int k = actual_start; k < actual_start + item_count; k++) {
- elms->set(k, args[3 + k - actual_start], mode);
- }
-
- // Set the length.
- array->set_length(Smi::FromInt(new_length));
-
- return result_array;
-}
-
-
-BUILTIN(ArrayConcat) {
- Heap* heap = isolate->heap();
- Context* global_context = isolate->context()->global_context();
- JSObject* array_proto =
- JSObject::cast(global_context->array_function()->prototype());
- if (!ArrayPrototypeHasNoElements(heap, global_context, array_proto)) {
- return CallJsBuiltin(isolate, "ArrayConcat", args);
- }
-
- // Iterate through all the arguments performing checks
- // and calculating total length.
- int n_arguments = args.length();
- int result_len = 0;
- for (int i = 0; i < n_arguments; i++) {
- Object* arg = args[i];
- if (!arg->IsJSArray() || !JSArray::cast(arg)->HasFastElements()
- || JSArray::cast(arg)->GetPrototype() != array_proto) {
- return CallJsBuiltin(isolate, "ArrayConcat", args);
- }
-
- int len = Smi::cast(JSArray::cast(arg)->length())->value();
-
- // We shouldn't overflow when adding another len.
- const int kHalfOfMaxInt = 1 << (kBitsPerInt - 2);
- STATIC_ASSERT(FixedArray::kMaxLength < kHalfOfMaxInt);
- USE(kHalfOfMaxInt);
- result_len += len;
- ASSERT(result_len >= 0);
-
- if (result_len > FixedArray::kMaxLength) {
- return CallJsBuiltin(isolate, "ArrayConcat", args);
- }
- }
-
- if (result_len == 0) {
- return AllocateEmptyJSArray(heap);
- }
-
- // Allocate result.
- Object* result;
- { MaybeObject* maybe_result = AllocateJSArray(heap);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- JSArray* result_array = JSArray::cast(result);
-
- { MaybeObject* maybe_result =
- heap->AllocateUninitializedFixedArray(result_len);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- FixedArray* result_elms = FixedArray::cast(result);
-
- // Copy data.
- AssertNoAllocation no_gc;
- int start_pos = 0;
- for (int i = 0; i < n_arguments; i++) {
- JSArray* array = JSArray::cast(args[i]);
- int len = Smi::cast(array->length())->value();
- if (len > 0) {
- FixedArray* elms = FixedArray::cast(array->elements());
- CopyElements(heap, &no_gc, result_elms, start_pos, elms, 0, len);
- start_pos += len;
- }
- }
- ASSERT(start_pos == result_len);
-
- // Set the length and elements.
- result_array->set_length(Smi::FromInt(result_len));
- result_array->set_elements(result_elms);
-
- return result_array;
-}
-
-
-// -----------------------------------------------------------------------------
-// Strict mode poison pills
-
-
-BUILTIN(StrictArgumentsCallee) {
- HandleScope scope;
- return isolate->Throw(*isolate->factory()->NewTypeError(
- "strict_arguments_callee", HandleVector<Object>(NULL, 0)));
-}
-
-
-BUILTIN(StrictArgumentsCaller) {
- HandleScope scope;
- return isolate->Throw(*isolate->factory()->NewTypeError(
- "strict_arguments_caller", HandleVector<Object>(NULL, 0)));
-}
-
-
-BUILTIN(StrictFunctionCaller) {
- HandleScope scope;
- return isolate->Throw(*isolate->factory()->NewTypeError(
- "strict_function_caller", HandleVector<Object>(NULL, 0)));
-}
-
-
-BUILTIN(StrictFunctionArguments) {
- HandleScope scope;
- return isolate->Throw(*isolate->factory()->NewTypeError(
- "strict_function_arguments", HandleVector<Object>(NULL, 0)));
-}
-
-
-// -----------------------------------------------------------------------------
-//
-
-
-// Returns the holder JSObject if the function can legally be called
-// with this receiver. Returns Heap::null_value() if the call is
-// illegal. Any arguments that don't fit the expected type is
-// overwritten with undefined. Arguments that do fit the expected
-// type is overwritten with the object in the prototype chain that
-// actually has that type.
-static inline Object* TypeCheck(Heap* heap,
- int argc,
- Object** argv,
- FunctionTemplateInfo* info) {
- Object* recv = argv[0];
- Object* sig_obj = info->signature();
- if (sig_obj->IsUndefined()) return recv;
- SignatureInfo* sig = SignatureInfo::cast(sig_obj);
- // If necessary, check the receiver
- Object* recv_type = sig->receiver();
-
- Object* holder = recv;
- if (!recv_type->IsUndefined()) {
- for (; holder != heap->null_value(); holder = holder->GetPrototype()) {
- if (holder->IsInstanceOf(FunctionTemplateInfo::cast(recv_type))) {
- break;
- }
- }
- if (holder == heap->null_value()) return holder;
- }
- Object* args_obj = sig->args();
- // If there is no argument signature we're done
- if (args_obj->IsUndefined()) return holder;
- FixedArray* args = FixedArray::cast(args_obj);
- int length = args->length();
- if (argc <= length) length = argc - 1;
- for (int i = 0; i < length; i++) {
- Object* argtype = args->get(i);
- if (argtype->IsUndefined()) continue;
- Object** arg = &argv[-1 - i];
- Object* current = *arg;
- for (; current != heap->null_value(); current = current->GetPrototype()) {
- if (current->IsInstanceOf(FunctionTemplateInfo::cast(argtype))) {
- *arg = current;
- break;
- }
- }
- if (current == heap->null_value()) *arg = heap->undefined_value();
- }
- return holder;
-}
-
-
-template <bool is_construct>
-MUST_USE_RESULT static MaybeObject* HandleApiCallHelper(
- BuiltinArguments<NEEDS_CALLED_FUNCTION> args, Isolate* isolate) {
- ASSERT(is_construct == CalledAsConstructor(isolate));
- Heap* heap = isolate->heap();
-
- HandleScope scope(isolate);
- Handle<JSFunction> function = args.called_function();
- ASSERT(function->shared()->IsApiFunction());
-
- FunctionTemplateInfo* fun_data = function->shared()->get_api_func_data();
- if (is_construct) {
- Handle<FunctionTemplateInfo> desc(fun_data, isolate);
- bool pending_exception = false;
- isolate->factory()->ConfigureInstance(
- desc, Handle<JSObject>::cast(args.receiver()), &pending_exception);
- ASSERT(isolate->has_pending_exception() == pending_exception);
- if (pending_exception) return Failure::Exception();
- fun_data = *desc;
- }
-
- Object* raw_holder = TypeCheck(heap, args.length(), &args[0], fun_data);
-
- if (raw_holder->IsNull()) {
- // This function cannot be called with the given receiver. Abort!
- Handle<Object> obj =
- isolate->factory()->NewTypeError(
- "illegal_invocation", HandleVector(&function, 1));
- return isolate->Throw(*obj);
- }
-
- Object* raw_call_data = fun_data->call_code();
- if (!raw_call_data->IsUndefined()) {
- CallHandlerInfo* call_data = CallHandlerInfo::cast(raw_call_data);
- Object* callback_obj = call_data->callback();
- v8::InvocationCallback callback =
- v8::ToCData<v8::InvocationCallback>(callback_obj);
- Object* data_obj = call_data->data();
- Object* result;
-
- LOG(isolate, ApiObjectAccess("call", JSObject::cast(*args.receiver())));
- ASSERT(raw_holder->IsJSObject());
-
- CustomArguments custom(isolate);
- v8::ImplementationUtilities::PrepareArgumentsData(custom.end(),
- data_obj, *function, raw_holder);
-
- v8::Arguments new_args = v8::ImplementationUtilities::NewArguments(
- custom.end(),
- &args[0] - 1,
- args.length() - 1,
- is_construct);
-
- v8::Handle<v8::Value> value;
- {
- // Leaving JavaScript.
- VMState state(isolate, EXTERNAL);
- ExternalCallbackScope call_scope(isolate,
- v8::ToCData<Address>(callback_obj));
- value = callback(new_args);
- }
- if (value.IsEmpty()) {
- result = heap->undefined_value();
- } else {
- result = *reinterpret_cast<Object**>(*value);
- }
-
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- if (!is_construct || result->IsJSObject()) return result;
- }
-
- return *args.receiver();
-}
-
-
-BUILTIN(HandleApiCall) {
- return HandleApiCallHelper<false>(args, isolate);
-}
-
-
-BUILTIN(HandleApiCallConstruct) {
- return HandleApiCallHelper<true>(args, isolate);
-}
-
-
-#ifdef DEBUG
-
-static void VerifyTypeCheck(Handle<JSObject> object,
- Handle<JSFunction> function) {
- ASSERT(function->shared()->IsApiFunction());
- FunctionTemplateInfo* info = function->shared()->get_api_func_data();
- if (info->signature()->IsUndefined()) return;
- SignatureInfo* signature = SignatureInfo::cast(info->signature());
- Object* receiver_type = signature->receiver();
- if (receiver_type->IsUndefined()) return;
- FunctionTemplateInfo* type = FunctionTemplateInfo::cast(receiver_type);
- ASSERT(object->IsInstanceOf(type));
-}
-
-#endif
-
-
-BUILTIN(FastHandleApiCall) {
- ASSERT(!CalledAsConstructor(isolate));
- Heap* heap = isolate->heap();
- const bool is_construct = false;
-
- // We expect four more arguments: callback, function, call data, and holder.
- const int args_length = args.length() - 4;
- ASSERT(args_length >= 0);
-
- Object* callback_obj = args[args_length];
-
- v8::Arguments new_args = v8::ImplementationUtilities::NewArguments(
- &args[args_length + 1],
- &args[0] - 1,
- args_length - 1,
- is_construct);
-
-#ifdef DEBUG
- VerifyTypeCheck(Utils::OpenHandle(*new_args.Holder()),
- Utils::OpenHandle(*new_args.Callee()));
-#endif
- HandleScope scope(isolate);
- Object* result;
- v8::Handle<v8::Value> value;
- {
- // Leaving JavaScript.
- VMState state(isolate, EXTERNAL);
- ExternalCallbackScope call_scope(isolate,
- v8::ToCData<Address>(callback_obj));
- v8::InvocationCallback callback =
- v8::ToCData<v8::InvocationCallback>(callback_obj);
-
- value = callback(new_args);
- }
- if (value.IsEmpty()) {
- result = heap->undefined_value();
- } else {
- result = *reinterpret_cast<Object**>(*value);
- }
-
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- return result;
-}
-
-
-// Helper function to handle calls to non-function objects created through the
-// API. The object can be called as either a constructor (using new) or just as
-// a function (without new).
-MUST_USE_RESULT static MaybeObject* HandleApiCallAsFunctionOrConstructor(
- Isolate* isolate,
- bool is_construct_call,
- BuiltinArguments<NO_EXTRA_ARGUMENTS> args) {
- // Non-functions are never called as constructors. Even if this is an object
- // called as a constructor the delegate call is not a construct call.
- ASSERT(!CalledAsConstructor(isolate));
- Heap* heap = isolate->heap();
-
- Handle<Object> receiver = args.at<Object>(0);
-
- // Get the object called.
- JSObject* obj = JSObject::cast(*args.receiver());
-
- // Get the invocation callback from the function descriptor that was
- // used to create the called object.
- ASSERT(obj->map()->has_instance_call_handler());
- JSFunction* constructor = JSFunction::cast(obj->map()->constructor());
- ASSERT(constructor->shared()->IsApiFunction());
- Object* handler =
- constructor->shared()->get_api_func_data()->instance_call_handler();
- ASSERT(!handler->IsUndefined());
- CallHandlerInfo* call_data = CallHandlerInfo::cast(handler);
- Object* callback_obj = call_data->callback();
- v8::InvocationCallback callback =
- v8::ToCData<v8::InvocationCallback>(callback_obj);
-
- // Get the data for the call and perform the callback.
- Object* result;
- {
- HandleScope scope(isolate);
- LOG(isolate, ApiObjectAccess("call non-function", obj));
-
- CustomArguments custom(isolate);
- v8::ImplementationUtilities::PrepareArgumentsData(custom.end(),
- call_data->data(), constructor, obj);
- v8::Arguments new_args = v8::ImplementationUtilities::NewArguments(
- custom.end(),
- &args[0] - 1,
- args.length() - 1,
- is_construct_call);
- v8::Handle<v8::Value> value;
- {
- // Leaving JavaScript.
- VMState state(isolate, EXTERNAL);
- ExternalCallbackScope call_scope(isolate,
- v8::ToCData<Address>(callback_obj));
- value = callback(new_args);
- }
- if (value.IsEmpty()) {
- result = heap->undefined_value();
- } else {
- result = *reinterpret_cast<Object**>(*value);
- }
- }
- // Check for exceptions and return result.
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- return result;
-}
-
-
-// Handle calls to non-function objects created through the API. This delegate
-// function is used when the call is a normal function call.
-BUILTIN(HandleApiCallAsFunction) {
- return HandleApiCallAsFunctionOrConstructor(isolate, false, args);
-}
-
-
-// Handle calls to non-function objects created through the API. This delegate
-// function is used when the call is a construct call.
-BUILTIN(HandleApiCallAsConstructor) {
- return HandleApiCallAsFunctionOrConstructor(isolate, true, args);
-}
-
-
-static void Generate_LoadIC_ArrayLength(MacroAssembler* masm) {
- LoadIC::GenerateArrayLength(masm);
-}
-
-
-static void Generate_LoadIC_StringLength(MacroAssembler* masm) {
- LoadIC::GenerateStringLength(masm, false);
-}
-
-
-static void Generate_LoadIC_StringWrapperLength(MacroAssembler* masm) {
- LoadIC::GenerateStringLength(masm, true);
-}
-
-
-static void Generate_LoadIC_FunctionPrototype(MacroAssembler* masm) {
- LoadIC::GenerateFunctionPrototype(masm);
-}
-
-
-static void Generate_LoadIC_Initialize(MacroAssembler* masm) {
- LoadIC::GenerateInitialize(masm);
-}
-
-
-static void Generate_LoadIC_PreMonomorphic(MacroAssembler* masm) {
- LoadIC::GeneratePreMonomorphic(masm);
-}
-
-
-static void Generate_LoadIC_Miss(MacroAssembler* masm) {
- LoadIC::GenerateMiss(masm);
-}
-
-
-static void Generate_LoadIC_Megamorphic(MacroAssembler* masm) {
- LoadIC::GenerateMegamorphic(masm);
-}
-
-
-static void Generate_LoadIC_Normal(MacroAssembler* masm) {
- LoadIC::GenerateNormal(masm);
-}
-
-
-static void Generate_KeyedLoadIC_Initialize(MacroAssembler* masm) {
- KeyedLoadIC::GenerateInitialize(masm);
-}
-
-
-static void Generate_KeyedLoadIC_Miss(MacroAssembler* masm) {
- KeyedLoadIC::GenerateMiss(masm);
-}
-
-
-static void Generate_KeyedLoadIC_Generic(MacroAssembler* masm) {
- KeyedLoadIC::GenerateGeneric(masm);
-}
-
-
-static void Generate_KeyedLoadIC_String(MacroAssembler* masm) {
- KeyedLoadIC::GenerateString(masm);
-}
-
-
-static void Generate_KeyedLoadIC_PreMonomorphic(MacroAssembler* masm) {
- KeyedLoadIC::GeneratePreMonomorphic(masm);
-}
-
-static void Generate_KeyedLoadIC_IndexedInterceptor(MacroAssembler* masm) {
- KeyedLoadIC::GenerateIndexedInterceptor(masm);
-}
-
-
-static void Generate_StoreIC_Initialize(MacroAssembler* masm) {
- StoreIC::GenerateInitialize(masm);
-}
-
-
-static void Generate_StoreIC_Initialize_Strict(MacroAssembler* masm) {
- StoreIC::GenerateInitialize(masm);
-}
-
-
-static void Generate_StoreIC_Miss(MacroAssembler* masm) {
- StoreIC::GenerateMiss(masm);
-}
-
-
-static void Generate_StoreIC_Normal(MacroAssembler* masm) {
- StoreIC::GenerateNormal(masm);
-}
-
-
-static void Generate_StoreIC_Normal_Strict(MacroAssembler* masm) {
- StoreIC::GenerateNormal(masm);
-}
-
-
-static void Generate_StoreIC_Megamorphic(MacroAssembler* masm) {
- StoreIC::GenerateMegamorphic(masm, kNonStrictMode);
-}
-
-
-static void Generate_StoreIC_Megamorphic_Strict(MacroAssembler* masm) {
- StoreIC::GenerateMegamorphic(masm, kStrictMode);
-}
-
-
-static void Generate_StoreIC_ArrayLength(MacroAssembler* masm) {
- StoreIC::GenerateArrayLength(masm);
-}
-
-
-static void Generate_StoreIC_ArrayLength_Strict(MacroAssembler* masm) {
- StoreIC::GenerateArrayLength(masm);
-}
-
-
-static void Generate_StoreIC_GlobalProxy(MacroAssembler* masm) {
- StoreIC::GenerateGlobalProxy(masm, kNonStrictMode);
-}
-
-
-static void Generate_StoreIC_GlobalProxy_Strict(MacroAssembler* masm) {
- StoreIC::GenerateGlobalProxy(masm, kStrictMode);
-}
-
-
-static void Generate_KeyedStoreIC_Generic(MacroAssembler* masm) {
- KeyedStoreIC::GenerateGeneric(masm, kNonStrictMode);
-}
-
-
-static void Generate_KeyedStoreIC_Generic_Strict(MacroAssembler* masm) {
- KeyedStoreIC::GenerateGeneric(masm, kStrictMode);
-}
-
-
-static void Generate_KeyedStoreIC_Miss(MacroAssembler* masm) {
- KeyedStoreIC::GenerateMiss(masm);
-}
-
-
-static void Generate_KeyedStoreIC_Initialize(MacroAssembler* masm) {
- KeyedStoreIC::GenerateInitialize(masm);
-}
-
-
-static void Generate_KeyedStoreIC_Initialize_Strict(MacroAssembler* masm) {
- KeyedStoreIC::GenerateInitialize(masm);
-}
-
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-static void Generate_LoadIC_DebugBreak(MacroAssembler* masm) {
- Debug::GenerateLoadICDebugBreak(masm);
-}
-
-
-static void Generate_StoreIC_DebugBreak(MacroAssembler* masm) {
- Debug::GenerateStoreICDebugBreak(masm);
-}
-
-
-static void Generate_KeyedLoadIC_DebugBreak(MacroAssembler* masm) {
- Debug::GenerateKeyedLoadICDebugBreak(masm);
-}
-
-
-static void Generate_KeyedStoreIC_DebugBreak(MacroAssembler* masm) {
- Debug::GenerateKeyedStoreICDebugBreak(masm);
-}
-
-
-static void Generate_ConstructCall_DebugBreak(MacroAssembler* masm) {
- Debug::GenerateConstructCallDebugBreak(masm);
-}
-
-
-static void Generate_Return_DebugBreak(MacroAssembler* masm) {
- Debug::GenerateReturnDebugBreak(masm);
-}
-
-
-static void Generate_StubNoRegisters_DebugBreak(MacroAssembler* masm) {
- Debug::GenerateStubNoRegistersDebugBreak(masm);
-}
-
-
-static void Generate_Slot_DebugBreak(MacroAssembler* masm) {
- Debug::GenerateSlotDebugBreak(masm);
-}
-
-
-static void Generate_PlainReturn_LiveEdit(MacroAssembler* masm) {
- Debug::GeneratePlainReturnLiveEdit(masm);
-}
-
-
-static void Generate_FrameDropper_LiveEdit(MacroAssembler* masm) {
- Debug::GenerateFrameDropperLiveEdit(masm);
-}
-#endif
-
-
-Builtins::Builtins() : initialized_(false) {
- memset(builtins_, 0, sizeof(builtins_[0]) * builtin_count);
- memset(names_, 0, sizeof(names_[0]) * builtin_count);
-}
-
-
-Builtins::~Builtins() {
-}
-
-
-#define DEF_ENUM_C(name, ignore) FUNCTION_ADDR(Builtin_##name),
-Address const Builtins::c_functions_[cfunction_count] = {
- BUILTIN_LIST_C(DEF_ENUM_C)
-};
-#undef DEF_ENUM_C
-
-#define DEF_JS_NAME(name, ignore) #name,
-#define DEF_JS_ARGC(ignore, argc) argc,
-const char* const Builtins::javascript_names_[id_count] = {
- BUILTINS_LIST_JS(DEF_JS_NAME)
-};
-
-int const Builtins::javascript_argc_[id_count] = {
- BUILTINS_LIST_JS(DEF_JS_ARGC)
-};
-#undef DEF_JS_NAME
-#undef DEF_JS_ARGC
-
-struct BuiltinDesc {
- byte* generator;
- byte* c_code;
- const char* s_name; // name is only used for generating log information.
- int name;
- Code::Flags flags;
- BuiltinExtraArguments extra_args;
-};
-
-class BuiltinFunctionTable {
- public:
- BuiltinFunctionTable() {
- Builtins::InitBuiltinFunctionTable();
- }
-
- static const BuiltinDesc* functions() { return functions_; }
-
- private:
- static BuiltinDesc functions_[Builtins::builtin_count + 1];
-
- friend class Builtins;
-};
-
-BuiltinDesc BuiltinFunctionTable::functions_[Builtins::builtin_count + 1];
-
-static const BuiltinFunctionTable builtin_function_table_init;
-
-// Define array of pointers to generators and C builtin functions.
-// We do this in a sort of roundabout way so that we can do the initialization
-// within the lexical scope of Builtins:: and within a context where
-// Code::Flags names a non-abstract type.
-void Builtins::InitBuiltinFunctionTable() {
- BuiltinDesc* functions = BuiltinFunctionTable::functions_;
- functions[builtin_count].generator = NULL;
- functions[builtin_count].c_code = NULL;
- functions[builtin_count].s_name = NULL;
- functions[builtin_count].name = builtin_count;
- functions[builtin_count].flags = static_cast<Code::Flags>(0);
- functions[builtin_count].extra_args = NO_EXTRA_ARGUMENTS;
-
-#define DEF_FUNCTION_PTR_C(aname, aextra_args) \
- functions->generator = FUNCTION_ADDR(Generate_Adaptor); \
- functions->c_code = FUNCTION_ADDR(Builtin_##aname); \
- functions->s_name = #aname; \
- functions->name = c_##aname; \
- functions->flags = Code::ComputeFlags(Code::BUILTIN); \
- functions->extra_args = aextra_args; \
- ++functions;
-
-#define DEF_FUNCTION_PTR_A(aname, kind, state, extra) \
- functions->generator = FUNCTION_ADDR(Generate_##aname); \
- functions->c_code = NULL; \
- functions->s_name = #aname; \
- functions->name = k##aname; \
- functions->flags = Code::ComputeFlags(Code::kind, \
- NOT_IN_LOOP, \
- state, \
- extra); \
- functions->extra_args = NO_EXTRA_ARGUMENTS; \
- ++functions;
-
- BUILTIN_LIST_C(DEF_FUNCTION_PTR_C)
- BUILTIN_LIST_A(DEF_FUNCTION_PTR_A)
- BUILTIN_LIST_DEBUG_A(DEF_FUNCTION_PTR_A)
-
-#undef DEF_FUNCTION_PTR_C
-#undef DEF_FUNCTION_PTR_A
-}
-
-void Builtins::Setup(bool create_heap_objects) {
- ASSERT(!initialized_);
- Isolate* isolate = Isolate::Current();
- Heap* heap = isolate->heap();
-
- // Create a scope for the handles in the builtins.
- HandleScope scope(isolate);
-
- const BuiltinDesc* functions = BuiltinFunctionTable::functions();
-
- // For now we generate builtin adaptor code into a stack-allocated
- // buffer, before copying it into individual code objects.
- byte buffer[4*KB];
-
- // Traverse the list of builtins and generate an adaptor in a
- // separate code object for each one.
- for (int i = 0; i < builtin_count; i++) {
- if (create_heap_objects) {
- MacroAssembler masm(isolate, buffer, sizeof buffer);
- // Generate the code/adaptor.
- typedef void (*Generator)(MacroAssembler*, int, BuiltinExtraArguments);
- Generator g = FUNCTION_CAST<Generator>(functions[i].generator);
- // We pass all arguments to the generator, but it may not use all of
- // them. This works because the first arguments are on top of the
- // stack.
- g(&masm, functions[i].name, functions[i].extra_args);
- // Move the code into the object heap.
- CodeDesc desc;
- masm.GetCode(&desc);
- Code::Flags flags = functions[i].flags;
- Object* code = NULL;
- {
- // During startup it's OK to always allocate and defer GC to later.
- // This simplifies things because we don't need to retry.
- AlwaysAllocateScope __scope__;
- { MaybeObject* maybe_code =
- heap->CreateCode(desc, flags, masm.CodeObject());
- if (!maybe_code->ToObject(&code)) {
- v8::internal::V8::FatalProcessOutOfMemory("CreateCode");
- }
- }
- }
- // Log the event and add the code to the builtins array.
- PROFILE(isolate,
- CodeCreateEvent(Logger::BUILTIN_TAG,
- Code::cast(code),
- functions[i].s_name));
- GDBJIT(AddCode(GDBJITInterface::BUILTIN,
- functions[i].s_name,
- Code::cast(code)));
- builtins_[i] = code;
-#ifdef ENABLE_DISASSEMBLER
- if (FLAG_print_builtin_code) {
- PrintF("Builtin: %s\n", functions[i].s_name);
- Code::cast(code)->Disassemble(functions[i].s_name);
- PrintF("\n");
- }
-#endif
- } else {
- // Deserializing. The values will be filled in during IterateBuiltins.
- builtins_[i] = NULL;
- }
- names_[i] = functions[i].s_name;
- }
-
- // Mark as initialized.
- initialized_ = true;
-}
-
-
-void Builtins::TearDown() {
- initialized_ = false;
-}
-
-
-void Builtins::IterateBuiltins(ObjectVisitor* v) {
- v->VisitPointers(&builtins_[0], &builtins_[0] + builtin_count);
-}
-
-
-const char* Builtins::Lookup(byte* pc) {
- // may be called during initialization (disassembler!)
- if (initialized_) {
- for (int i = 0; i < builtin_count; i++) {
- Code* entry = Code::cast(builtins_[i]);
- if (entry->contains(pc)) {
- return names_[i];
- }
- }
- }
- return NULL;
-}
-
-
-#define DEFINE_BUILTIN_ACCESSOR_C(name, ignore) \
-Handle<Code> Builtins::name() { \
- Code** code_address = \
- reinterpret_cast<Code**>(builtin_address(k##name)); \
- return Handle<Code>(code_address); \
-}
-#define DEFINE_BUILTIN_ACCESSOR_A(name, kind, state, extra) \
-Handle<Code> Builtins::name() { \
- Code** code_address = \
- reinterpret_cast<Code**>(builtin_address(k##name)); \
- return Handle<Code>(code_address); \
-}
-BUILTIN_LIST_C(DEFINE_BUILTIN_ACCESSOR_C)
-BUILTIN_LIST_A(DEFINE_BUILTIN_ACCESSOR_A)
-BUILTIN_LIST_DEBUG_A(DEFINE_BUILTIN_ACCESSOR_A)
-#undef DEFINE_BUILTIN_ACCESSOR_C
-#undef DEFINE_BUILTIN_ACCESSOR_A
-
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/builtins.h b/src/3rdparty/v8/src/builtins.h
deleted file mode 100644
index bc0facb..0000000
--- a/src/3rdparty/v8/src/builtins.h
+++ /dev/null
@@ -1,368 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_BUILTINS_H_
-#define V8_BUILTINS_H_
-
-namespace v8 {
-namespace internal {
-
-// Specifies extra arguments required by a C++ builtin.
-enum BuiltinExtraArguments {
- NO_EXTRA_ARGUMENTS = 0,
- NEEDS_CALLED_FUNCTION = 1
-};
-
-
-// Define list of builtins implemented in C++.
-#define BUILTIN_LIST_C(V) \
- V(Illegal, NO_EXTRA_ARGUMENTS) \
- \
- V(EmptyFunction, NO_EXTRA_ARGUMENTS) \
- \
- V(ArrayCodeGeneric, NO_EXTRA_ARGUMENTS) \
- \
- V(ArrayPush, NO_EXTRA_ARGUMENTS) \
- V(ArrayPop, NO_EXTRA_ARGUMENTS) \
- V(ArrayShift, NO_EXTRA_ARGUMENTS) \
- V(ArrayUnshift, NO_EXTRA_ARGUMENTS) \
- V(ArraySlice, NO_EXTRA_ARGUMENTS) \
- V(ArraySplice, NO_EXTRA_ARGUMENTS) \
- V(ArrayConcat, NO_EXTRA_ARGUMENTS) \
- \
- V(HandleApiCall, NEEDS_CALLED_FUNCTION) \
- V(FastHandleApiCall, NO_EXTRA_ARGUMENTS) \
- V(HandleApiCallConstruct, NEEDS_CALLED_FUNCTION) \
- V(HandleApiCallAsFunction, NO_EXTRA_ARGUMENTS) \
- V(HandleApiCallAsConstructor, NO_EXTRA_ARGUMENTS) \
- \
- V(StrictArgumentsCallee, NO_EXTRA_ARGUMENTS) \
- V(StrictArgumentsCaller, NO_EXTRA_ARGUMENTS) \
- V(StrictFunctionCaller, NO_EXTRA_ARGUMENTS) \
- V(StrictFunctionArguments, NO_EXTRA_ARGUMENTS)
-
-
-// Define list of builtins implemented in assembly.
-#define BUILTIN_LIST_A(V) \
- V(ArgumentsAdaptorTrampoline, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
- V(JSConstructCall, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
- V(JSConstructStubCountdown, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
- V(JSConstructStubGeneric, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
- V(JSConstructStubApi, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
- V(JSEntryTrampoline, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
- V(JSConstructEntryTrampoline, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
- V(LazyCompile, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
- V(LazyRecompile, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
- V(NotifyDeoptimized, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
- V(NotifyLazyDeoptimized, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
- V(NotifyOSR, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
- \
- V(LoadIC_Miss, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
- V(KeyedLoadIC_Miss, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
- V(StoreIC_Miss, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
- V(KeyedStoreIC_Miss, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
- \
- V(LoadIC_Initialize, LOAD_IC, UNINITIALIZED, \
- Code::kNoExtraICState) \
- V(LoadIC_PreMonomorphic, LOAD_IC, PREMONOMORPHIC, \
- Code::kNoExtraICState) \
- V(LoadIC_Normal, LOAD_IC, MONOMORPHIC, \
- Code::kNoExtraICState) \
- V(LoadIC_ArrayLength, LOAD_IC, MONOMORPHIC, \
- Code::kNoExtraICState) \
- V(LoadIC_StringLength, LOAD_IC, MONOMORPHIC, \
- Code::kNoExtraICState) \
- V(LoadIC_StringWrapperLength, LOAD_IC, MONOMORPHIC, \
- Code::kNoExtraICState) \
- V(LoadIC_FunctionPrototype, LOAD_IC, MONOMORPHIC, \
- Code::kNoExtraICState) \
- V(LoadIC_Megamorphic, LOAD_IC, MEGAMORPHIC, \
- Code::kNoExtraICState) \
- \
- V(KeyedLoadIC_Initialize, KEYED_LOAD_IC, UNINITIALIZED, \
- Code::kNoExtraICState) \
- V(KeyedLoadIC_PreMonomorphic, KEYED_LOAD_IC, PREMONOMORPHIC, \
- Code::kNoExtraICState) \
- V(KeyedLoadIC_Generic, KEYED_LOAD_IC, MEGAMORPHIC, \
- Code::kNoExtraICState) \
- V(KeyedLoadIC_String, KEYED_LOAD_IC, MEGAMORPHIC, \
- Code::kNoExtraICState) \
- V(KeyedLoadIC_IndexedInterceptor, KEYED_LOAD_IC, MEGAMORPHIC, \
- Code::kNoExtraICState) \
- \
- V(StoreIC_Initialize, STORE_IC, UNINITIALIZED, \
- Code::kNoExtraICState) \
- V(StoreIC_ArrayLength, STORE_IC, MONOMORPHIC, \
- Code::kNoExtraICState) \
- V(StoreIC_Normal, STORE_IC, MONOMORPHIC, \
- Code::kNoExtraICState) \
- V(StoreIC_Megamorphic, STORE_IC, MEGAMORPHIC, \
- Code::kNoExtraICState) \
- V(StoreIC_GlobalProxy, STORE_IC, MEGAMORPHIC, \
- Code::kNoExtraICState) \
- V(StoreIC_Initialize_Strict, STORE_IC, UNINITIALIZED, \
- kStrictMode) \
- V(StoreIC_ArrayLength_Strict, STORE_IC, MONOMORPHIC, \
- kStrictMode) \
- V(StoreIC_Normal_Strict, STORE_IC, MONOMORPHIC, \
- kStrictMode) \
- V(StoreIC_Megamorphic_Strict, STORE_IC, MEGAMORPHIC, \
- kStrictMode) \
- V(StoreIC_GlobalProxy_Strict, STORE_IC, MEGAMORPHIC, \
- kStrictMode) \
- \
- V(KeyedStoreIC_Initialize, KEYED_STORE_IC, UNINITIALIZED, \
- Code::kNoExtraICState) \
- V(KeyedStoreIC_Generic, KEYED_STORE_IC, MEGAMORPHIC, \
- Code::kNoExtraICState) \
- \
- V(KeyedStoreIC_Initialize_Strict, KEYED_STORE_IC, UNINITIALIZED, \
- kStrictMode) \
- V(KeyedStoreIC_Generic_Strict, KEYED_STORE_IC, MEGAMORPHIC, \
- kStrictMode) \
- \
- /* Uses KeyedLoadIC_Initialize; must be after in list. */ \
- V(FunctionCall, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
- V(FunctionApply, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
- \
- V(ArrayCode, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
- V(ArrayConstructCode, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
- \
- V(StringConstructCode, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
- \
- V(OnStackReplacement, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState)
-
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-// Define list of builtins used by the debugger implemented in assembly.
-#define BUILTIN_LIST_DEBUG_A(V) \
- V(Return_DebugBreak, BUILTIN, DEBUG_BREAK, \
- Code::kNoExtraICState) \
- V(ConstructCall_DebugBreak, BUILTIN, DEBUG_BREAK, \
- Code::kNoExtraICState) \
- V(StubNoRegisters_DebugBreak, BUILTIN, DEBUG_BREAK, \
- Code::kNoExtraICState) \
- V(LoadIC_DebugBreak, LOAD_IC, DEBUG_BREAK, \
- Code::kNoExtraICState) \
- V(KeyedLoadIC_DebugBreak, KEYED_LOAD_IC, DEBUG_BREAK, \
- Code::kNoExtraICState) \
- V(StoreIC_DebugBreak, STORE_IC, DEBUG_BREAK, \
- Code::kNoExtraICState) \
- V(KeyedStoreIC_DebugBreak, KEYED_STORE_IC, DEBUG_BREAK, \
- Code::kNoExtraICState) \
- V(Slot_DebugBreak, BUILTIN, DEBUG_BREAK, \
- Code::kNoExtraICState) \
- V(PlainReturn_LiveEdit, BUILTIN, DEBUG_BREAK, \
- Code::kNoExtraICState) \
- V(FrameDropper_LiveEdit, BUILTIN, DEBUG_BREAK, \
- Code::kNoExtraICState)
-#else
-#define BUILTIN_LIST_DEBUG_A(V)
-#endif
-
-// Define list of builtins implemented in JavaScript.
-#define BUILTINS_LIST_JS(V) \
- V(EQUALS, 1) \
- V(STRICT_EQUALS, 1) \
- V(COMPARE, 2) \
- V(ADD, 1) \
- V(SUB, 1) \
- V(MUL, 1) \
- V(DIV, 1) \
- V(MOD, 1) \
- V(BIT_OR, 1) \
- V(BIT_AND, 1) \
- V(BIT_XOR, 1) \
- V(UNARY_MINUS, 0) \
- V(BIT_NOT, 0) \
- V(SHL, 1) \
- V(SAR, 1) \
- V(SHR, 1) \
- V(DELETE, 2) \
- V(IN, 1) \
- V(INSTANCE_OF, 1) \
- V(GET_KEYS, 0) \
- V(FILTER_KEY, 1) \
- V(CALL_NON_FUNCTION, 0) \
- V(CALL_NON_FUNCTION_AS_CONSTRUCTOR, 0) \
- V(TO_OBJECT, 0) \
- V(TO_NUMBER, 0) \
- V(TO_STRING, 0) \
- V(STRING_ADD_LEFT, 1) \
- V(STRING_ADD_RIGHT, 1) \
- V(APPLY_PREPARE, 1) \
- V(APPLY_OVERFLOW, 1)
-
-
-class BuiltinFunctionTable;
-class ObjectVisitor;
-
-
-class Builtins {
- public:
- ~Builtins();
-
- // Generate all builtin code objects. Should be called once during
- // isolate initialization.
- void Setup(bool create_heap_objects);
- void TearDown();
-
- // Garbage collection support.
- void IterateBuiltins(ObjectVisitor* v);
-
- // Disassembler support.
- const char* Lookup(byte* pc);
-
- enum Name {
-#define DEF_ENUM_C(name, ignore) k##name,
-#define DEF_ENUM_A(name, kind, state, extra) k##name,
- BUILTIN_LIST_C(DEF_ENUM_C)
- BUILTIN_LIST_A(DEF_ENUM_A)
- BUILTIN_LIST_DEBUG_A(DEF_ENUM_A)
-#undef DEF_ENUM_C
-#undef DEF_ENUM_A
- builtin_count
- };
-
- enum CFunctionId {
-#define DEF_ENUM_C(name, ignore) c_##name,
- BUILTIN_LIST_C(DEF_ENUM_C)
-#undef DEF_ENUM_C
- cfunction_count
- };
-
- enum JavaScript {
-#define DEF_ENUM(name, ignore) name,
- BUILTINS_LIST_JS(DEF_ENUM)
-#undef DEF_ENUM
- id_count
- };
-
-#define DECLARE_BUILTIN_ACCESSOR_C(name, ignore) Handle<Code> name();
-#define DECLARE_BUILTIN_ACCESSOR_A(name, kind, state, extra) \
- Handle<Code> name();
- BUILTIN_LIST_C(DECLARE_BUILTIN_ACCESSOR_C)
- BUILTIN_LIST_A(DECLARE_BUILTIN_ACCESSOR_A)
- BUILTIN_LIST_DEBUG_A(DECLARE_BUILTIN_ACCESSOR_A)
-#undef DECLARE_BUILTIN_ACCESSOR_C
-#undef DECLARE_BUILTIN_ACCESSOR_A
-
- Code* builtin(Name name) {
- // Code::cast cannot be used here since we access builtins
- // during the marking phase of mark sweep. See IC::Clear.
- return reinterpret_cast<Code*>(builtins_[name]);
- }
-
- Address builtin_address(Name name) {
- return reinterpret_cast<Address>(&builtins_[name]);
- }
-
- static Address c_function_address(CFunctionId id) {
- return c_functions_[id];
- }
-
- static const char* GetName(JavaScript id) { return javascript_names_[id]; }
- static int GetArgumentsCount(JavaScript id) { return javascript_argc_[id]; }
- Handle<Code> GetCode(JavaScript id, bool* resolved);
- static int NumberOfJavaScriptBuiltins() { return id_count; }
-
- bool is_initialized() const { return initialized_; }
-
- private:
- Builtins();
-
- // The external C++ functions called from the code.
- static Address const c_functions_[cfunction_count];
-
- // Note: These are always Code objects, but to conform with
- // IterateBuiltins() above which assumes Object**'s for the callback
- // function f, we use an Object* array here.
- Object* builtins_[builtin_count];
- const char* names_[builtin_count];
- static const char* const javascript_names_[id_count];
- static int const javascript_argc_[id_count];
-
- static void Generate_Adaptor(MacroAssembler* masm,
- CFunctionId id,
- BuiltinExtraArguments extra_args);
- static void Generate_JSConstructCall(MacroAssembler* masm);
- static void Generate_JSConstructStubCountdown(MacroAssembler* masm);
- static void Generate_JSConstructStubGeneric(MacroAssembler* masm);
- static void Generate_JSConstructStubApi(MacroAssembler* masm);
- static void Generate_JSEntryTrampoline(MacroAssembler* masm);
- static void Generate_JSConstructEntryTrampoline(MacroAssembler* masm);
- static void Generate_LazyCompile(MacroAssembler* masm);
- static void Generate_LazyRecompile(MacroAssembler* masm);
- static void Generate_NotifyDeoptimized(MacroAssembler* masm);
- static void Generate_NotifyLazyDeoptimized(MacroAssembler* masm);
- static void Generate_NotifyOSR(MacroAssembler* masm);
- static void Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm);
-
- static void Generate_FunctionCall(MacroAssembler* masm);
- static void Generate_FunctionApply(MacroAssembler* masm);
-
- static void Generate_ArrayCode(MacroAssembler* masm);
- static void Generate_ArrayConstructCode(MacroAssembler* masm);
-
- static void Generate_StringConstructCode(MacroAssembler* masm);
- static void Generate_OnStackReplacement(MacroAssembler* masm);
-
- static void InitBuiltinFunctionTable();
-
- bool initialized_;
-
- friend class BuiltinFunctionTable;
- friend class Isolate;
-
- DISALLOW_COPY_AND_ASSIGN(Builtins);
-};
-
-} } // namespace v8::internal
-
-#endif // V8_BUILTINS_H_
diff --git a/src/3rdparty/v8/src/bytecodes-irregexp.h b/src/3rdparty/v8/src/bytecodes-irregexp.h
deleted file mode 100644
index 93218ea..0000000
--- a/src/3rdparty/v8/src/bytecodes-irregexp.h
+++ /dev/null
@@ -1,105 +0,0 @@
-// Copyright 2008-2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#ifndef V8_BYTECODES_IRREGEXP_H_
-#define V8_BYTECODES_IRREGEXP_H_
-
-namespace v8 {
-namespace internal {
-
-
-static const int BYTECODE_MASK = 0xff;
-// The first argument is packed in with the byte code in one word, but so it
-// has 24 bits, but it can be positive and negative so only use 23 bits for
-// positive values.
-static const unsigned int MAX_FIRST_ARG = 0x7fffffu;
-static const int BYTECODE_SHIFT = 8;
-
-#define BYTECODE_ITERATOR(V) \
-V(BREAK, 0, 4) /* bc8 */ \
-V(PUSH_CP, 1, 4) /* bc8 pad24 */ \
-V(PUSH_BT, 2, 8) /* bc8 pad24 offset32 */ \
-V(PUSH_REGISTER, 3, 4) /* bc8 reg_idx24 */ \
-V(SET_REGISTER_TO_CP, 4, 8) /* bc8 reg_idx24 offset32 */ \
-V(SET_CP_TO_REGISTER, 5, 4) /* bc8 reg_idx24 */ \
-V(SET_REGISTER_TO_SP, 6, 4) /* bc8 reg_idx24 */ \
-V(SET_SP_TO_REGISTER, 7, 4) /* bc8 reg_idx24 */ \
-V(SET_REGISTER, 8, 8) /* bc8 reg_idx24 value32 */ \
-V(ADVANCE_REGISTER, 9, 8) /* bc8 reg_idx24 value32 */ \
-V(POP_CP, 10, 4) /* bc8 pad24 */ \
-V(POP_BT, 11, 4) /* bc8 pad24 */ \
-V(POP_REGISTER, 12, 4) /* bc8 reg_idx24 */ \
-V(FAIL, 13, 4) /* bc8 pad24 */ \
-V(SUCCEED, 14, 4) /* bc8 pad24 */ \
-V(ADVANCE_CP, 15, 4) /* bc8 offset24 */ \
-V(GOTO, 16, 8) /* bc8 pad24 addr32 */ \
-V(LOAD_CURRENT_CHAR, 17, 8) /* bc8 offset24 addr32 */ \
-V(LOAD_CURRENT_CHAR_UNCHECKED, 18, 4) /* bc8 offset24 */ \
-V(LOAD_2_CURRENT_CHARS, 19, 8) /* bc8 offset24 addr32 */ \
-V(LOAD_2_CURRENT_CHARS_UNCHECKED, 20, 4) /* bc8 offset24 */ \
-V(LOAD_4_CURRENT_CHARS, 21, 8) /* bc8 offset24 addr32 */ \
-V(LOAD_4_CURRENT_CHARS_UNCHECKED, 22, 4) /* bc8 offset24 */ \
-V(CHECK_4_CHARS, 23, 12) /* bc8 pad24 uint32 addr32 */ \
-V(CHECK_CHAR, 24, 8) /* bc8 pad8 uint16 addr32 */ \
-V(CHECK_NOT_4_CHARS, 25, 12) /* bc8 pad24 uint32 addr32 */ \
-V(CHECK_NOT_CHAR, 26, 8) /* bc8 pad8 uint16 addr32 */ \
-V(AND_CHECK_4_CHARS, 27, 16) /* bc8 pad24 uint32 uint32 addr32 */ \
-V(AND_CHECK_CHAR, 28, 12) /* bc8 pad8 uint16 uint32 addr32 */ \
-V(AND_CHECK_NOT_4_CHARS, 29, 16) /* bc8 pad24 uint32 uint32 addr32 */ \
-V(AND_CHECK_NOT_CHAR, 30, 12) /* bc8 pad8 uint16 uint32 addr32 */ \
-V(MINUS_AND_CHECK_NOT_CHAR, 31, 12) /* bc8 pad8 uc16 uc16 addr32 */ \
-V(CHECK_LT, 32, 8) /* bc8 pad8 uc16 addr32 */ \
-V(CHECK_GT, 33, 8) /* bc8 pad8 uc16 addr32 */ \
-V(CHECK_NOT_BACK_REF, 34, 8) /* bc8 reg_idx24 addr32 */ \
-V(CHECK_NOT_BACK_REF_NO_CASE, 35, 8) /* bc8 reg_idx24 addr32 */ \
-V(CHECK_NOT_REGS_EQUAL, 36, 12) /* bc8 regidx24 reg_idx32 addr32 */ \
-V(LOOKUP_MAP1, 37, 12) /* bc8 pad8 start16 bit_map_addr32 addr32 */ \
-V(LOOKUP_MAP2, 38, 96) /* bc8 pad8 start16 half_nibble_map_addr32* */ \
-V(LOOKUP_MAP8, 39, 96) /* bc8 pad8 start16 byte_map addr32* */ \
-V(LOOKUP_HI_MAP8, 40, 96) /* bc8 start24 byte_map_addr32 addr32* */ \
-V(CHECK_REGISTER_LT, 41, 12) /* bc8 reg_idx24 value32 addr32 */ \
-V(CHECK_REGISTER_GE, 42, 12) /* bc8 reg_idx24 value32 addr32 */ \
-V(CHECK_REGISTER_EQ_POS, 43, 8) /* bc8 reg_idx24 addr32 */ \
-V(CHECK_AT_START, 44, 8) /* bc8 pad24 addr32 */ \
-V(CHECK_NOT_AT_START, 45, 8) /* bc8 pad24 addr32 */ \
-V(CHECK_GREEDY, 46, 8) /* bc8 pad24 addr32 */ \
-V(ADVANCE_CP_AND_GOTO, 47, 8) /* bc8 offset24 addr32 */ \
-V(SET_CURRENT_POSITION_FROM_END, 48, 4) /* bc8 idx24 */
-
-#define DECLARE_BYTECODES(name, code, length) \
- static const int BC_##name = code;
-BYTECODE_ITERATOR(DECLARE_BYTECODES)
-#undef DECLARE_BYTECODES
-
-#define DECLARE_BYTECODE_LENGTH(name, code, length) \
- static const int BC_##name##_LENGTH = length;
-BYTECODE_ITERATOR(DECLARE_BYTECODE_LENGTH)
-#undef DECLARE_BYTECODE_LENGTH
-} }
-
-#endif // V8_BYTECODES_IRREGEXP_H_
diff --git a/src/3rdparty/v8/src/cached-powers.cc b/src/3rdparty/v8/src/cached-powers.cc
deleted file mode 100644
index 43dbc78..0000000
--- a/src/3rdparty/v8/src/cached-powers.cc
+++ /dev/null
@@ -1,177 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include <stdarg.h>
-#include <limits.h>
-
-#include "v8.h"
-
-#include "cached-powers.h"
-
-namespace v8 {
-namespace internal {
-
-struct CachedPower {
- uint64_t significand;
- int16_t binary_exponent;
- int16_t decimal_exponent;
-};
-
-static const CachedPower kCachedPowers[] = {
- {V8_2PART_UINT64_C(0xfa8fd5a0, 081c0288), -1220, -348},
- {V8_2PART_UINT64_C(0xbaaee17f, a23ebf76), -1193, -340},
- {V8_2PART_UINT64_C(0x8b16fb20, 3055ac76), -1166, -332},
- {V8_2PART_UINT64_C(0xcf42894a, 5dce35ea), -1140, -324},
- {V8_2PART_UINT64_C(0x9a6bb0aa, 55653b2d), -1113, -316},
- {V8_2PART_UINT64_C(0xe61acf03, 3d1a45df), -1087, -308},
- {V8_2PART_UINT64_C(0xab70fe17, c79ac6ca), -1060, -300},
- {V8_2PART_UINT64_C(0xff77b1fc, bebcdc4f), -1034, -292},
- {V8_2PART_UINT64_C(0xbe5691ef, 416bd60c), -1007, -284},
- {V8_2PART_UINT64_C(0x8dd01fad, 907ffc3c), -980, -276},
- {V8_2PART_UINT64_C(0xd3515c28, 31559a83), -954, -268},
- {V8_2PART_UINT64_C(0x9d71ac8f, ada6c9b5), -927, -260},
- {V8_2PART_UINT64_C(0xea9c2277, 23ee8bcb), -901, -252},
- {V8_2PART_UINT64_C(0xaecc4991, 4078536d), -874, -244},
- {V8_2PART_UINT64_C(0x823c1279, 5db6ce57), -847, -236},
- {V8_2PART_UINT64_C(0xc2109436, 4dfb5637), -821, -228},
- {V8_2PART_UINT64_C(0x9096ea6f, 3848984f), -794, -220},
- {V8_2PART_UINT64_C(0xd77485cb, 25823ac7), -768, -212},
- {V8_2PART_UINT64_C(0xa086cfcd, 97bf97f4), -741, -204},
- {V8_2PART_UINT64_C(0xef340a98, 172aace5), -715, -196},
- {V8_2PART_UINT64_C(0xb23867fb, 2a35b28e), -688, -188},
- {V8_2PART_UINT64_C(0x84c8d4df, d2c63f3b), -661, -180},
- {V8_2PART_UINT64_C(0xc5dd4427, 1ad3cdba), -635, -172},
- {V8_2PART_UINT64_C(0x936b9fce, bb25c996), -608, -164},
- {V8_2PART_UINT64_C(0xdbac6c24, 7d62a584), -582, -156},
- {V8_2PART_UINT64_C(0xa3ab6658, 0d5fdaf6), -555, -148},
- {V8_2PART_UINT64_C(0xf3e2f893, dec3f126), -529, -140},
- {V8_2PART_UINT64_C(0xb5b5ada8, aaff80b8), -502, -132},
- {V8_2PART_UINT64_C(0x87625f05, 6c7c4a8b), -475, -124},
- {V8_2PART_UINT64_C(0xc9bcff60, 34c13053), -449, -116},
- {V8_2PART_UINT64_C(0x964e858c, 91ba2655), -422, -108},
- {V8_2PART_UINT64_C(0xdff97724, 70297ebd), -396, -100},
- {V8_2PART_UINT64_C(0xa6dfbd9f, b8e5b88f), -369, -92},
- {V8_2PART_UINT64_C(0xf8a95fcf, 88747d94), -343, -84},
- {V8_2PART_UINT64_C(0xb9447093, 8fa89bcf), -316, -76},
- {V8_2PART_UINT64_C(0x8a08f0f8, bf0f156b), -289, -68},
- {V8_2PART_UINT64_C(0xcdb02555, 653131b6), -263, -60},
- {V8_2PART_UINT64_C(0x993fe2c6, d07b7fac), -236, -52},
- {V8_2PART_UINT64_C(0xe45c10c4, 2a2b3b06), -210, -44},
- {V8_2PART_UINT64_C(0xaa242499, 697392d3), -183, -36},
- {V8_2PART_UINT64_C(0xfd87b5f2, 8300ca0e), -157, -28},
- {V8_2PART_UINT64_C(0xbce50864, 92111aeb), -130, -20},
- {V8_2PART_UINT64_C(0x8cbccc09, 6f5088cc), -103, -12},
- {V8_2PART_UINT64_C(0xd1b71758, e219652c), -77, -4},
- {V8_2PART_UINT64_C(0x9c400000, 00000000), -50, 4},
- {V8_2PART_UINT64_C(0xe8d4a510, 00000000), -24, 12},
- {V8_2PART_UINT64_C(0xad78ebc5, ac620000), 3, 20},
- {V8_2PART_UINT64_C(0x813f3978, f8940984), 30, 28},
- {V8_2PART_UINT64_C(0xc097ce7b, c90715b3), 56, 36},
- {V8_2PART_UINT64_C(0x8f7e32ce, 7bea5c70), 83, 44},
- {V8_2PART_UINT64_C(0xd5d238a4, abe98068), 109, 52},
- {V8_2PART_UINT64_C(0x9f4f2726, 179a2245), 136, 60},
- {V8_2PART_UINT64_C(0xed63a231, d4c4fb27), 162, 68},
- {V8_2PART_UINT64_C(0xb0de6538, 8cc8ada8), 189, 76},
- {V8_2PART_UINT64_C(0x83c7088e, 1aab65db), 216, 84},
- {V8_2PART_UINT64_C(0xc45d1df9, 42711d9a), 242, 92},
- {V8_2PART_UINT64_C(0x924d692c, a61be758), 269, 100},
- {V8_2PART_UINT64_C(0xda01ee64, 1a708dea), 295, 108},
- {V8_2PART_UINT64_C(0xa26da399, 9aef774a), 322, 116},
- {V8_2PART_UINT64_C(0xf209787b, b47d6b85), 348, 124},
- {V8_2PART_UINT64_C(0xb454e4a1, 79dd1877), 375, 132},
- {V8_2PART_UINT64_C(0x865b8692, 5b9bc5c2), 402, 140},
- {V8_2PART_UINT64_C(0xc83553c5, c8965d3d), 428, 148},
- {V8_2PART_UINT64_C(0x952ab45c, fa97a0b3), 455, 156},
- {V8_2PART_UINT64_C(0xde469fbd, 99a05fe3), 481, 164},
- {V8_2PART_UINT64_C(0xa59bc234, db398c25), 508, 172},
- {V8_2PART_UINT64_C(0xf6c69a72, a3989f5c), 534, 180},
- {V8_2PART_UINT64_C(0xb7dcbf53, 54e9bece), 561, 188},
- {V8_2PART_UINT64_C(0x88fcf317, f22241e2), 588, 196},
- {V8_2PART_UINT64_C(0xcc20ce9b, d35c78a5), 614, 204},
- {V8_2PART_UINT64_C(0x98165af3, 7b2153df), 641, 212},
- {V8_2PART_UINT64_C(0xe2a0b5dc, 971f303a), 667, 220},
- {V8_2PART_UINT64_C(0xa8d9d153, 5ce3b396), 694, 228},
- {V8_2PART_UINT64_C(0xfb9b7cd9, a4a7443c), 720, 236},
- {V8_2PART_UINT64_C(0xbb764c4c, a7a44410), 747, 244},
- {V8_2PART_UINT64_C(0x8bab8eef, b6409c1a), 774, 252},
- {V8_2PART_UINT64_C(0xd01fef10, a657842c), 800, 260},
- {V8_2PART_UINT64_C(0x9b10a4e5, e9913129), 827, 268},
- {V8_2PART_UINT64_C(0xe7109bfb, a19c0c9d), 853, 276},
- {V8_2PART_UINT64_C(0xac2820d9, 623bf429), 880, 284},
- {V8_2PART_UINT64_C(0x80444b5e, 7aa7cf85), 907, 292},
- {V8_2PART_UINT64_C(0xbf21e440, 03acdd2d), 933, 300},
- {V8_2PART_UINT64_C(0x8e679c2f, 5e44ff8f), 960, 308},
- {V8_2PART_UINT64_C(0xd433179d, 9c8cb841), 986, 316},
- {V8_2PART_UINT64_C(0x9e19db92, b4e31ba9), 1013, 324},
- {V8_2PART_UINT64_C(0xeb96bf6e, badf77d9), 1039, 332},
- {V8_2PART_UINT64_C(0xaf87023b, 9bf0ee6b), 1066, 340},
-};
-
-static const int kCachedPowersLength = ARRAY_SIZE(kCachedPowers);
-static const int kCachedPowersOffset = -kCachedPowers[0].decimal_exponent;
-static const double kD_1_LOG2_10 = 0.30102999566398114; // 1 / lg(10)
-const int PowersOfTenCache::kDecimalExponentDistance =
- kCachedPowers[1].decimal_exponent - kCachedPowers[0].decimal_exponent;
-const int PowersOfTenCache::kMinDecimalExponent =
- kCachedPowers[0].decimal_exponent;
-const int PowersOfTenCache::kMaxDecimalExponent =
- kCachedPowers[kCachedPowersLength - 1].decimal_exponent;
-
-void PowersOfTenCache::GetCachedPowerForBinaryExponentRange(
- int min_exponent,
- int max_exponent,
- DiyFp* power,
- int* decimal_exponent) {
- int kQ = DiyFp::kSignificandSize;
- double k = ceiling((min_exponent + kQ - 1) * kD_1_LOG2_10);
- int foo = kCachedPowersOffset;
- int index =
- (foo + static_cast<int>(k) - 1) / kDecimalExponentDistance + 1;
- ASSERT(0 <= index && index < kCachedPowersLength);
- CachedPower cached_power = kCachedPowers[index];
- ASSERT(min_exponent <= cached_power.binary_exponent);
- ASSERT(cached_power.binary_exponent <= max_exponent);
- *decimal_exponent = cached_power.decimal_exponent;
- *power = DiyFp(cached_power.significand, cached_power.binary_exponent);
-}
-
-
-void PowersOfTenCache::GetCachedPowerForDecimalExponent(int requested_exponent,
- DiyFp* power,
- int* found_exponent) {
- ASSERT(kMinDecimalExponent <= requested_exponent);
- ASSERT(requested_exponent < kMaxDecimalExponent + kDecimalExponentDistance);
- int index =
- (requested_exponent + kCachedPowersOffset) / kDecimalExponentDistance;
- CachedPower cached_power = kCachedPowers[index];
- *power = DiyFp(cached_power.significand, cached_power.binary_exponent);
- *found_exponent = cached_power.decimal_exponent;
- ASSERT(*found_exponent <= requested_exponent);
- ASSERT(requested_exponent < *found_exponent + kDecimalExponentDistance);
-}
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/cached-powers.h b/src/3rdparty/v8/src/cached-powers.h
deleted file mode 100644
index 2ae5619..0000000
--- a/src/3rdparty/v8/src/cached-powers.h
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_CACHED_POWERS_H_
-#define V8_CACHED_POWERS_H_
-
-#include "diy-fp.h"
-
-namespace v8 {
-namespace internal {
-
-class PowersOfTenCache {
- public:
-
- // Not all powers of ten are cached. The decimal exponent of two neighboring
- // cached numbers will differ by kDecimalExponentDistance.
- static const int kDecimalExponentDistance;
-
- static const int kMinDecimalExponent;
- static const int kMaxDecimalExponent;
-
- // Returns a cached power-of-ten with a binary exponent in the range
- // [min_exponent; max_exponent] (boundaries included).
- static void GetCachedPowerForBinaryExponentRange(int min_exponent,
- int max_exponent,
- DiyFp* power,
- int* decimal_exponent);
-
- // Returns a cached power of ten x ~= 10^k such that
- // k <= decimal_exponent < k + kCachedPowersDecimalDistance.
- // The given decimal_exponent must satisfy
- // kMinDecimalExponent <= requested_exponent, and
- // requested_exponent < kMaxDecimalExponent + kDecimalExponentDistance.
- static void GetCachedPowerForDecimalExponent(int requested_exponent,
- DiyFp* power,
- int* found_exponent);
-};
-
-} } // namespace v8::internal
-
-#endif // V8_CACHED_POWERS_H_
diff --git a/src/3rdparty/v8/src/char-predicates-inl.h b/src/3rdparty/v8/src/char-predicates-inl.h
deleted file mode 100644
index 0dfc80d..0000000
--- a/src/3rdparty/v8/src/char-predicates-inl.h
+++ /dev/null
@@ -1,94 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_CHAR_PREDICATES_INL_H_
-#define V8_CHAR_PREDICATES_INL_H_
-
-#include "char-predicates.h"
-
-namespace v8 {
-namespace internal {
-
-
-// If c is in 'A'-'Z' or 'a'-'z', return its lower-case.
-// Else, return something outside of 'A'-'Z' and 'a'-'z'.
-// Note: it ignores LOCALE.
-inline int AsciiAlphaToLower(uc32 c) {
- return c | 0x20;
-}
-
-
-inline bool IsCarriageReturn(uc32 c) {
- return c == 0x000D;
-}
-
-
-inline bool IsLineFeed(uc32 c) {
- return c == 0x000A;
-}
-
-
-static inline bool IsInRange(int value, int lower_limit, int higher_limit) {
- ASSERT(lower_limit <= higher_limit);
- return static_cast<unsigned int>(value - lower_limit) <=
- static_cast<unsigned int>(higher_limit - lower_limit);
-}
-
-
-inline bool IsDecimalDigit(uc32 c) {
- // ECMA-262, 3rd, 7.8.3 (p 16)
- return IsInRange(c, '0', '9');
-}
-
-
-inline bool IsHexDigit(uc32 c) {
- // ECMA-262, 3rd, 7.6 (p 15)
- return IsDecimalDigit(c) || IsInRange(AsciiAlphaToLower(c), 'a', 'f');
-}
-
-
-inline bool IsRegExpWord(uc16 c) {
- return IsInRange(AsciiAlphaToLower(c), 'a', 'z')
- || IsDecimalDigit(c)
- || (c == '_');
-}
-
-
-inline bool IsRegExpNewline(uc16 c) {
- switch (c) {
- // CR LF LS PS
- case 0x000A: case 0x000D: case 0x2028: case 0x2029:
- return false;
- default:
- return true;
- }
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_CHAR_PREDICATES_INL_H_
diff --git a/src/3rdparty/v8/src/char-predicates.h b/src/3rdparty/v8/src/char-predicates.h
deleted file mode 100644
index dac1eb8..0000000
--- a/src/3rdparty/v8/src/char-predicates.h
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_CHAR_PREDICATES_H_
-#define V8_CHAR_PREDICATES_H_
-
-namespace v8 {
-namespace internal {
-
-// Unicode character predicates as defined by ECMA-262, 3rd,
-// used for lexical analysis.
-
-inline bool IsCarriageReturn(uc32 c);
-inline bool IsLineFeed(uc32 c);
-inline bool IsDecimalDigit(uc32 c);
-inline bool IsHexDigit(uc32 c);
-inline bool IsRegExpWord(uc32 c);
-inline bool IsRegExpNewline(uc32 c);
-
-struct IdentifierStart {
- static inline bool Is(uc32 c) {
- switch (c) {
- case '$': case '_': case '\\': return true;
- default: return unibrow::Letter::Is(c);
- }
- }
-};
-
-
-struct IdentifierPart {
- static inline bool Is(uc32 c) {
- return IdentifierStart::Is(c)
- || unibrow::Number::Is(c)
- || unibrow::CombiningMark::Is(c)
- || unibrow::ConnectorPunctuation::Is(c);
- }
-};
-
-} } // namespace v8::internal
-
-#endif // V8_CHAR_PREDICATES_H_
diff --git a/src/3rdparty/v8/src/checks.cc b/src/3rdparty/v8/src/checks.cc
deleted file mode 100644
index 320fd6b..0000000
--- a/src/3rdparty/v8/src/checks.cc
+++ /dev/null
@@ -1,110 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include <stdarg.h>
-
-#include "v8.h"
-
-#include "platform.h"
-
-// TODO(isolates): is it necessary to lift this?
-static int fatal_error_handler_nesting_depth = 0;
-
-// Contains protection against recursive calls (faults while handling faults).
-extern "C" void V8_Fatal(const char* file, int line, const char* format, ...) {
- fflush(stdout);
- fflush(stderr);
- fatal_error_handler_nesting_depth++;
- // First time we try to print an error message
- if (fatal_error_handler_nesting_depth < 2) {
- i::OS::PrintError("\n\n#\n# Fatal error in %s, line %d\n# ", file, line);
- va_list arguments;
- va_start(arguments, format);
- i::OS::VPrintError(format, arguments);
- va_end(arguments);
- i::OS::PrintError("\n#\n\n");
- }
- // First two times we may try to print a stack dump.
- if (fatal_error_handler_nesting_depth < 3) {
- if (i::FLAG_stack_trace_on_abort) {
- // Call this one twice on double fault
- i::Isolate::Current()->PrintStack();
- }
- }
- i::OS::Abort();
-}
-
-
-void CheckEqualsHelper(const char* file,
- int line,
- const char* expected_source,
- v8::Handle<v8::Value> expected,
- const char* value_source,
- v8::Handle<v8::Value> value) {
- if (!expected->Equals(value)) {
- v8::String::Utf8Value value_str(value);
- v8::String::Utf8Value expected_str(expected);
- V8_Fatal(file, line,
- "CHECK_EQ(%s, %s) failed\n# Expected: %s\n# Found: %s",
- expected_source, value_source, *expected_str, *value_str);
- }
-}
-
-
-void CheckNonEqualsHelper(const char* file,
- int line,
- const char* unexpected_source,
- v8::Handle<v8::Value> unexpected,
- const char* value_source,
- v8::Handle<v8::Value> value) {
- if (unexpected->Equals(value)) {
- v8::String::Utf8Value value_str(value);
- V8_Fatal(file, line, "CHECK_NE(%s, %s) failed\n# Value: %s",
- unexpected_source, value_source, *value_str);
- }
-}
-
-
-void API_Fatal(const char* location, const char* format, ...) {
- i::OS::PrintError("\n#\n# Fatal error in %s\n# ", location);
- va_list arguments;
- va_start(arguments, format);
- i::OS::VPrintError(format, arguments);
- va_end(arguments);
- i::OS::PrintError("\n#\n\n");
- i::OS::Abort();
-}
-
-
-namespace v8 { namespace internal {
-
- bool EnableSlowAsserts() { return FLAG_enable_slow_asserts; }
-
- intptr_t HeapObjectTagMask() { return kHeapObjectTagMask; }
-
-} } // namespace v8::internal
-
diff --git a/src/3rdparty/v8/src/checks.h b/src/3rdparty/v8/src/checks.h
deleted file mode 100644
index a560b2f..0000000
--- a/src/3rdparty/v8/src/checks.h
+++ /dev/null
@@ -1,296 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_CHECKS_H_
-#define V8_CHECKS_H_
-
-#include <string.h>
-
-#include "../include/v8stdint.h"
-extern "C" void V8_Fatal(const char* file, int line, const char* format, ...);
-
-// The FATAL, UNREACHABLE and UNIMPLEMENTED macros are useful during
-// development, but they should not be relied on in the final product.
-#ifdef DEBUG
-#define FATAL(msg) \
- V8_Fatal(__FILE__, __LINE__, "%s", (msg))
-#define UNIMPLEMENTED() \
- V8_Fatal(__FILE__, __LINE__, "unimplemented code")
-#define UNREACHABLE() \
- V8_Fatal(__FILE__, __LINE__, "unreachable code")
-#else
-#define FATAL(msg) \
- V8_Fatal("", 0, "%s", (msg))
-#define UNIMPLEMENTED() \
- V8_Fatal("", 0, "unimplemented code")
-#define UNREACHABLE() ((void) 0)
-#endif
-
-
-// Used by the CHECK macro -- should not be called directly.
-static inline void CheckHelper(const char* file,
- int line,
- const char* source,
- bool condition) {
- if (!condition)
- V8_Fatal(file, line, "CHECK(%s) failed", source);
-}
-
-
-// The CHECK macro checks that the given condition is true; if not, it
-// prints a message to stderr and aborts.
-#define CHECK(condition) CheckHelper(__FILE__, __LINE__, #condition, condition)
-
-
-// Helper function used by the CHECK_EQ function when given int
-// arguments. Should not be called directly.
-static inline void CheckEqualsHelper(const char* file, int line,
- const char* expected_source, int expected,
- const char* value_source, int value) {
- if (expected != value) {
- V8_Fatal(file, line,
- "CHECK_EQ(%s, %s) failed\n# Expected: %i\n# Found: %i",
- expected_source, value_source, expected, value);
- }
-}
-
-
-// Helper function used by the CHECK_EQ function when given int64_t
-// arguments. Should not be called directly.
-static inline void CheckEqualsHelper(const char* file, int line,
- const char* expected_source,
- int64_t expected,
- const char* value_source,
- int64_t value) {
- if (expected != value) {
- // Print int64_t values in hex, as two int32s,
- // to avoid platform-dependencies.
- V8_Fatal(file, line,
- "CHECK_EQ(%s, %s) failed\n#"
- " Expected: 0x%08x%08x\n# Found: 0x%08x%08x",
- expected_source, value_source,
- static_cast<uint32_t>(expected >> 32),
- static_cast<uint32_t>(expected),
- static_cast<uint32_t>(value >> 32),
- static_cast<uint32_t>(value));
- }
-}
-
-
-// Helper function used by the CHECK_NE function when given int
-// arguments. Should not be called directly.
-static inline void CheckNonEqualsHelper(const char* file,
- int line,
- const char* unexpected_source,
- int unexpected,
- const char* value_source,
- int value) {
- if (unexpected == value) {
- V8_Fatal(file, line, "CHECK_NE(%s, %s) failed\n# Value: %i",
- unexpected_source, value_source, value);
- }
-}
-
-
-// Helper function used by the CHECK function when given string
-// arguments. Should not be called directly.
-static inline void CheckEqualsHelper(const char* file,
- int line,
- const char* expected_source,
- const char* expected,
- const char* value_source,
- const char* value) {
- if ((expected == NULL && value != NULL) ||
- (expected != NULL && value == NULL) ||
- (expected != NULL && value != NULL && strcmp(expected, value) != 0)) {
- V8_Fatal(file, line,
- "CHECK_EQ(%s, %s) failed\n# Expected: %s\n# Found: %s",
- expected_source, value_source, expected, value);
- }
-}
-
-
-static inline void CheckNonEqualsHelper(const char* file,
- int line,
- const char* expected_source,
- const char* expected,
- const char* value_source,
- const char* value) {
- if (expected == value ||
- (expected != NULL && value != NULL && strcmp(expected, value) == 0)) {
- V8_Fatal(file, line, "CHECK_NE(%s, %s) failed\n# Value: %s",
- expected_source, value_source, value);
- }
-}
-
-
-// Helper function used by the CHECK function when given pointer
-// arguments. Should not be called directly.
-static inline void CheckEqualsHelper(const char* file,
- int line,
- const char* expected_source,
- const void* expected,
- const char* value_source,
- const void* value) {
- if (expected != value) {
- V8_Fatal(file, line,
- "CHECK_EQ(%s, %s) failed\n# Expected: %p\n# Found: %p",
- expected_source, value_source,
- expected, value);
- }
-}
-
-
-static inline void CheckNonEqualsHelper(const char* file,
- int line,
- const char* expected_source,
- const void* expected,
- const char* value_source,
- const void* value) {
- if (expected == value) {
- V8_Fatal(file, line, "CHECK_NE(%s, %s) failed\n# Value: %p",
- expected_source, value_source, value);
- }
-}
-
-
-// Helper function used by the CHECK function when given floating
-// point arguments. Should not be called directly.
-static inline void CheckEqualsHelper(const char* file,
- int line,
- const char* expected_source,
- double expected,
- const char* value_source,
- double value) {
- // Force values to 64 bit memory to truncate 80 bit precision on IA32.
- volatile double* exp = new double[1];
- *exp = expected;
- volatile double* val = new double[1];
- *val = value;
- if (*exp != *val) {
- V8_Fatal(file, line,
- "CHECK_EQ(%s, %s) failed\n# Expected: %f\n# Found: %f",
- expected_source, value_source, *exp, *val);
- }
- delete[] exp;
- delete[] val;
-}
-
-
-static inline void CheckNonEqualsHelper(const char* file,
- int line,
- const char* expected_source,
- double expected,
- const char* value_source,
- double value) {
- // Force values to 64 bit memory to truncate 80 bit precision on IA32.
- volatile double* exp = new double[1];
- *exp = expected;
- volatile double* val = new double[1];
- *val = value;
- if (*exp == *val) {
- V8_Fatal(file, line,
- "CHECK_NE(%s, %s) failed\n# Value: %f",
- expected_source, value_source, *val);
- }
- delete[] exp;
- delete[] val;
-}
-
-
-#define CHECK_EQ(expected, value) CheckEqualsHelper(__FILE__, __LINE__, \
- #expected, expected, #value, value)
-
-
-#define CHECK_NE(unexpected, value) CheckNonEqualsHelper(__FILE__, __LINE__, \
- #unexpected, unexpected, #value, value)
-
-
-#define CHECK_GT(a, b) CHECK((a) > (b))
-#define CHECK_GE(a, b) CHECK((a) >= (b))
-#define CHECK_LT(a, b) CHECK((a) < (b))
-#define CHECK_LE(a, b) CHECK((a) <= (b))
-
-
-// This is inspired by the static assertion facility in boost. This
-// is pretty magical. If it causes you trouble on a platform you may
-// find a fix in the boost code.
-template <bool> class StaticAssertion;
-template <> class StaticAssertion<true> { };
-// This macro joins two tokens. If one of the tokens is a macro the
-// helper call causes it to be resolved before joining.
-#define SEMI_STATIC_JOIN(a, b) SEMI_STATIC_JOIN_HELPER(a, b)
-#define SEMI_STATIC_JOIN_HELPER(a, b) a##b
-// Causes an error during compilation of the condition is not
-// statically known to be true. It is formulated as a typedef so that
-// it can be used wherever a typedef can be used. Beware that this
-// actually causes each use to introduce a new defined type with a
-// name depending on the source line.
-template <int> class StaticAssertionHelper { };
-#define STATIC_CHECK(test) \
- typedef \
- StaticAssertionHelper<sizeof(StaticAssertion<static_cast<bool>(test)>)> \
- SEMI_STATIC_JOIN(__StaticAssertTypedef__, __LINE__)
-
-
-namespace v8 { namespace internal {
-
-bool EnableSlowAsserts();
-
-} } // namespace v8::internal
-
-// The ASSERT macro is equivalent to CHECK except that it only
-// generates code in debug builds.
-#ifdef DEBUG
-#define ASSERT_RESULT(expr) CHECK(expr)
-#define ASSERT(condition) CHECK(condition)
-#define ASSERT_EQ(v1, v2) CHECK_EQ(v1, v2)
-#define ASSERT_NE(v1, v2) CHECK_NE(v1, v2)
-#define ASSERT_GE(v1, v2) CHECK_GE(v1, v2)
-#define ASSERT_LT(v1, v2) CHECK_LT(v1, v2)
-#define ASSERT_LE(v1, v2) CHECK_LE(v1, v2)
-#define SLOW_ASSERT(condition) if (EnableSlowAsserts()) CHECK(condition)
-#else
-#define ASSERT_RESULT(expr) (expr)
-#define ASSERT(condition) ((void) 0)
-#define ASSERT_EQ(v1, v2) ((void) 0)
-#define ASSERT_NE(v1, v2) ((void) 0)
-#define ASSERT_GE(v1, v2) ((void) 0)
-#define ASSERT_LT(v1, v2) ((void) 0)
-#define ASSERT_LE(v1, v2) ((void) 0)
-#define SLOW_ASSERT(condition) ((void) 0)
-#endif
-// Static asserts has no impact on runtime performance, so they can be
-// safely enabled in release mode. Moreover, the ((void) 0) expression
-// obeys different syntax rules than typedef's, e.g. it can't appear
-// inside class declaration, this leads to inconsistency between debug
-// and release compilation modes behavior.
-#define STATIC_ASSERT(test) STATIC_CHECK(test)
-
-#define ASSERT_NOT_NULL(p) ASSERT_NE(NULL, p)
-
-#endif // V8_CHECKS_H_
diff --git a/src/3rdparty/v8/src/circular-queue-inl.h b/src/3rdparty/v8/src/circular-queue-inl.h
deleted file mode 100644
index 349f222..0000000
--- a/src/3rdparty/v8/src/circular-queue-inl.h
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_CIRCULAR_BUFFER_INL_H_
-#define V8_CIRCULAR_BUFFER_INL_H_
-
-#include "circular-queue.h"
-
-namespace v8 {
-namespace internal {
-
-
-void* SamplingCircularQueue::Enqueue() {
- WrapPositionIfNeeded(&producer_pos_->enqueue_pos);
- void* result = producer_pos_->enqueue_pos;
- producer_pos_->enqueue_pos += record_size_;
- return result;
-}
-
-
-void SamplingCircularQueue::WrapPositionIfNeeded(
- SamplingCircularQueue::Cell** pos) {
- if (**pos == kEnd) *pos = buffer_;
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_CIRCULAR_BUFFER_INL_H_
diff --git a/src/3rdparty/v8/src/circular-queue.cc b/src/3rdparty/v8/src/circular-queue.cc
deleted file mode 100644
index 928c3f0..0000000
--- a/src/3rdparty/v8/src/circular-queue.cc
+++ /dev/null
@@ -1,122 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "circular-queue-inl.h"
-
-namespace v8 {
-namespace internal {
-
-
-SamplingCircularQueue::SamplingCircularQueue(int record_size_in_bytes,
- int desired_chunk_size_in_bytes,
- int buffer_size_in_chunks)
- : record_size_(record_size_in_bytes / sizeof(Cell)),
- chunk_size_in_bytes_(desired_chunk_size_in_bytes / record_size_in_bytes *
- record_size_in_bytes),
- chunk_size_(chunk_size_in_bytes_ / sizeof(Cell)),
- buffer_size_(chunk_size_ * buffer_size_in_chunks),
- // The distance ensures that producer and consumer never step on
- // each other's chunks and helps eviction of produced data from
- // the CPU cache (having that chunk size is bigger than the cache.)
- producer_consumer_distance_(2 * chunk_size_),
- buffer_(NewArray<Cell>(buffer_size_ + 1)) {
- ASSERT(buffer_size_in_chunks > 2);
- // Clean up the whole buffer to avoid encountering a random kEnd
- // while enqueuing.
- for (int i = 0; i < buffer_size_; ++i) {
- buffer_[i] = kClear;
- }
- buffer_[buffer_size_] = kEnd;
-
- // Layout producer and consumer position pointers each on their own
- // cache lines to avoid cache lines thrashing due to simultaneous
- // updates of positions by different processor cores.
- const int positions_size =
- RoundUp(1, kProcessorCacheLineSize) +
- RoundUp(static_cast<int>(sizeof(ProducerPosition)),
- kProcessorCacheLineSize) +
- RoundUp(static_cast<int>(sizeof(ConsumerPosition)),
- kProcessorCacheLineSize);
- positions_ = NewArray<byte>(positions_size);
-
- producer_pos_ = reinterpret_cast<ProducerPosition*>(
- RoundUp(positions_, kProcessorCacheLineSize));
- producer_pos_->enqueue_pos = buffer_;
-
- consumer_pos_ = reinterpret_cast<ConsumerPosition*>(
- reinterpret_cast<byte*>(producer_pos_) + kProcessorCacheLineSize);
- ASSERT(reinterpret_cast<byte*>(consumer_pos_ + 1) <=
- positions_ + positions_size);
- consumer_pos_->dequeue_chunk_pos = buffer_;
- consumer_pos_->dequeue_chunk_poll_pos = buffer_ + producer_consumer_distance_;
- consumer_pos_->dequeue_pos = NULL;
-}
-
-
-SamplingCircularQueue::~SamplingCircularQueue() {
- DeleteArray(positions_);
- DeleteArray(buffer_);
-}
-
-
-void* SamplingCircularQueue::StartDequeue() {
- if (consumer_pos_->dequeue_pos != NULL) {
- return consumer_pos_->dequeue_pos;
- } else {
- if (*consumer_pos_->dequeue_chunk_poll_pos != kClear) {
- consumer_pos_->dequeue_pos = consumer_pos_->dequeue_chunk_pos;
- consumer_pos_->dequeue_end_pos = consumer_pos_->dequeue_pos + chunk_size_;
- return consumer_pos_->dequeue_pos;
- } else {
- return NULL;
- }
- }
-}
-
-
-void SamplingCircularQueue::FinishDequeue() {
- consumer_pos_->dequeue_pos += record_size_;
- if (consumer_pos_->dequeue_pos < consumer_pos_->dequeue_end_pos) return;
- // Move to next chunk.
- consumer_pos_->dequeue_pos = NULL;
- *consumer_pos_->dequeue_chunk_pos = kClear;
- consumer_pos_->dequeue_chunk_pos += chunk_size_;
- WrapPositionIfNeeded(&consumer_pos_->dequeue_chunk_pos);
- consumer_pos_->dequeue_chunk_poll_pos += chunk_size_;
- WrapPositionIfNeeded(&consumer_pos_->dequeue_chunk_poll_pos);
-}
-
-
-void SamplingCircularQueue::FlushResidualRecords() {
- // Eliminate producer / consumer distance.
- consumer_pos_->dequeue_chunk_poll_pos = consumer_pos_->dequeue_chunk_pos;
-}
-
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/circular-queue.h b/src/3rdparty/v8/src/circular-queue.h
deleted file mode 100644
index 73afc68..0000000
--- a/src/3rdparty/v8/src/circular-queue.h
+++ /dev/null
@@ -1,103 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_CIRCULAR_QUEUE_H_
-#define V8_CIRCULAR_QUEUE_H_
-
-namespace v8 {
-namespace internal {
-
-
-// Lock-free cache-friendly sampling circular queue for large
-// records. Intended for fast transfer of large records between a
-// single producer and a single consumer. If the queue is full,
-// previous unread records are overwritten. The queue is designed with
-// a goal in mind to evade cache lines thrashing by preventing
-// simultaneous reads and writes to adjanced memory locations.
-//
-// IMPORTANT: as a producer never checks for chunks cleanness, it is
-// possible that it can catch up and overwrite a chunk that a consumer
-// is currently reading, resulting in a corrupt record being read.
-class SamplingCircularQueue {
- public:
- // Executed on the application thread.
- SamplingCircularQueue(int record_size_in_bytes,
- int desired_chunk_size_in_bytes,
- int buffer_size_in_chunks);
- ~SamplingCircularQueue();
-
- // Enqueue returns a pointer to a memory location for storing the next
- // record.
- INLINE(void* Enqueue());
-
- // Executed on the consumer (analyzer) thread.
- // StartDequeue returns a pointer to a memory location for retrieving
- // the next record. After the record had been read by a consumer,
- // FinishDequeue must be called. Until that moment, subsequent calls
- // to StartDequeue will return the same pointer.
- void* StartDequeue();
- void FinishDequeue();
- // Due to a presence of slipping between the producer and the consumer,
- // the queue must be notified whether producing has been finished in order
- // to process remaining records from the buffer.
- void FlushResidualRecords();
-
- typedef AtomicWord Cell;
- // Reserved values for the first cell of a record.
- static const Cell kClear = 0; // Marks clean (processed) chunks.
- static const Cell kEnd = -1; // Marks the end of the buffer.
-
- private:
- struct ProducerPosition {
- Cell* enqueue_pos;
- };
- struct ConsumerPosition {
- Cell* dequeue_chunk_pos;
- Cell* dequeue_chunk_poll_pos;
- Cell* dequeue_pos;
- Cell* dequeue_end_pos;
- };
-
- INLINE(void WrapPositionIfNeeded(Cell** pos));
-
- const int record_size_;
- const int chunk_size_in_bytes_;
- const int chunk_size_;
- const int buffer_size_;
- const int producer_consumer_distance_;
- Cell* buffer_;
- byte* positions_;
- ProducerPosition* producer_pos_;
- ConsumerPosition* consumer_pos_;
-
- DISALLOW_COPY_AND_ASSIGN(SamplingCircularQueue);
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_CIRCULAR_QUEUE_H_
diff --git a/src/3rdparty/v8/src/code-stubs.cc b/src/3rdparty/v8/src/code-stubs.cc
deleted file mode 100644
index f680c60..0000000
--- a/src/3rdparty/v8/src/code-stubs.cc
+++ /dev/null
@@ -1,240 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "bootstrapper.h"
-#include "code-stubs.h"
-#include "factory.h"
-#include "gdb-jit.h"
-#include "macro-assembler.h"
-
-namespace v8 {
-namespace internal {
-
-bool CodeStub::FindCodeInCache(Code** code_out) {
- Heap* heap = Isolate::Current()->heap();
- int index = heap->code_stubs()->FindEntry(GetKey());
- if (index != NumberDictionary::kNotFound) {
- *code_out = Code::cast(heap->code_stubs()->ValueAt(index));
- return true;
- }
- return false;
-}
-
-
-void CodeStub::GenerateCode(MacroAssembler* masm) {
- // Update the static counter each time a new code stub is generated.
- masm->isolate()->counters()->code_stubs()->Increment();
-
- // Nested stubs are not allowed for leafs.
- AllowStubCallsScope allow_scope(masm, AllowsStubCalls());
-
- // Generate the code for the stub.
- masm->set_generating_stub(true);
- Generate(masm);
-}
-
-
-void CodeStub::RecordCodeGeneration(Code* code, MacroAssembler* masm) {
- code->set_major_key(MajorKey());
-
- Isolate* isolate = masm->isolate();
- PROFILE(isolate, CodeCreateEvent(Logger::STUB_TAG, code, GetName()));
- GDBJIT(AddCode(GDBJITInterface::STUB, GetName(), code));
- Counters* counters = isolate->counters();
- counters->total_stubs_code_size()->Increment(code->instruction_size());
-
-#ifdef ENABLE_DISASSEMBLER
- if (FLAG_print_code_stubs) {
-#ifdef DEBUG
- Print();
-#endif
- code->Disassemble(GetName());
- PrintF("\n");
- }
-#endif
-}
-
-
-int CodeStub::GetCodeKind() {
- return Code::STUB;
-}
-
-
-Handle<Code> CodeStub::GetCode() {
- Isolate* isolate = Isolate::Current();
- Factory* factory = isolate->factory();
- Heap* heap = isolate->heap();
- Code* code;
- if (!FindCodeInCache(&code)) {
- HandleScope scope(isolate);
-
- // Generate the new code.
- MacroAssembler masm(isolate, NULL, 256);
- GenerateCode(&masm);
-
- // Create the code object.
- CodeDesc desc;
- masm.GetCode(&desc);
-
- // Copy the generated code into a heap object.
- Code::Flags flags = Code::ComputeFlags(
- static_cast<Code::Kind>(GetCodeKind()),
- InLoop(),
- GetICState());
- Handle<Code> new_object = factory->NewCode(
- desc, flags, masm.CodeObject(), NeedsImmovableCode());
- RecordCodeGeneration(*new_object, &masm);
- FinishCode(*new_object);
-
- // Update the dictionary and the root in Heap.
- Handle<NumberDictionary> dict =
- factory->DictionaryAtNumberPut(
- Handle<NumberDictionary>(heap->code_stubs()),
- GetKey(),
- new_object);
- heap->public_set_code_stubs(*dict);
-
- code = *new_object;
- }
-
- ASSERT(!NeedsImmovableCode() || heap->lo_space()->Contains(code));
- return Handle<Code>(code, isolate);
-}
-
-
-MaybeObject* CodeStub::TryGetCode() {
- Code* code;
- if (!FindCodeInCache(&code)) {
- // Generate the new code.
- MacroAssembler masm(Isolate::Current(), NULL, 256);
- GenerateCode(&masm);
- Heap* heap = masm.isolate()->heap();
-
- // Create the code object.
- CodeDesc desc;
- masm.GetCode(&desc);
-
- // Try to copy the generated code into a heap object.
- Code::Flags flags = Code::ComputeFlags(
- static_cast<Code::Kind>(GetCodeKind()),
- InLoop(),
- GetICState());
- Object* new_object;
- { MaybeObject* maybe_new_object =
- heap->CreateCode(desc, flags, masm.CodeObject());
- if (!maybe_new_object->ToObject(&new_object)) return maybe_new_object;
- }
- code = Code::cast(new_object);
- RecordCodeGeneration(code, &masm);
- FinishCode(code);
-
- // Try to update the code cache but do not fail if unable.
- MaybeObject* maybe_new_object =
- heap->code_stubs()->AtNumberPut(GetKey(), code);
- if (maybe_new_object->ToObject(&new_object)) {
- heap->public_set_code_stubs(NumberDictionary::cast(new_object));
- }
- }
-
- return code;
-}
-
-
-const char* CodeStub::MajorName(CodeStub::Major major_key,
- bool allow_unknown_keys) {
- switch (major_key) {
-#define DEF_CASE(name) case name: return #name;
- CODE_STUB_LIST(DEF_CASE)
-#undef DEF_CASE
- default:
- if (!allow_unknown_keys) {
- UNREACHABLE();
- }
- return NULL;
- }
-}
-
-
-int ICCompareStub::MinorKey() {
- return OpField::encode(op_ - Token::EQ) | StateField::encode(state_);
-}
-
-
-void ICCompareStub::Generate(MacroAssembler* masm) {
- switch (state_) {
- case CompareIC::UNINITIALIZED:
- GenerateMiss(masm);
- break;
- case CompareIC::SMIS:
- GenerateSmis(masm);
- break;
- case CompareIC::HEAP_NUMBERS:
- GenerateHeapNumbers(masm);
- break;
- case CompareIC::OBJECTS:
- GenerateObjects(masm);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-const char* InstanceofStub::GetName() {
- if (name_ != NULL) return name_;
- const int kMaxNameLength = 100;
- name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
- kMaxNameLength);
- if (name_ == NULL) return "OOM";
-
- const char* args = "";
- if (HasArgsInRegisters()) {
- args = "_REGS";
- }
-
- const char* inline_check = "";
- if (HasCallSiteInlineCheck()) {
- inline_check = "_INLINE";
- }
-
- const char* return_true_false_object = "";
- if (ReturnTrueFalseObject()) {
- return_true_false_object = "_TRUEFALSE";
- }
-
- OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
- "InstanceofStub%s%s%s",
- args,
- inline_check,
- return_true_false_object);
- return name_;
-}
-
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/code-stubs.h b/src/3rdparty/v8/src/code-stubs.h
deleted file mode 100644
index d408034..0000000
--- a/src/3rdparty/v8/src/code-stubs.h
+++ /dev/null
@@ -1,971 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_CODE_STUBS_H_
-#define V8_CODE_STUBS_H_
-
-#include "globals.h"
-
-namespace v8 {
-namespace internal {
-
-// List of code stubs used on all platforms. The order in this list is important
-// as only the stubs up to and including Instanceof allows nested stub calls.
-#define CODE_STUB_LIST_ALL_PLATFORMS(V) \
- V(CallFunction) \
- V(GenericBinaryOp) \
- V(TypeRecordingBinaryOp) \
- V(StringAdd) \
- V(SubString) \
- V(StringCompare) \
- V(SmiOp) \
- V(Compare) \
- V(CompareIC) \
- V(MathPow) \
- V(TranscendentalCache) \
- V(Instanceof) \
- V(ConvertToDouble) \
- V(WriteInt32ToHeapNumber) \
- V(IntegerMod) \
- V(StackCheck) \
- V(FastNewClosure) \
- V(FastNewContext) \
- V(FastCloneShallowArray) \
- V(GenericUnaryOp) \
- V(RevertToNumber) \
- V(ToBoolean) \
- V(ToNumber) \
- V(CounterOp) \
- V(ArgumentsAccess) \
- V(RegExpExec) \
- V(RegExpConstructResult) \
- V(NumberToString) \
- V(CEntry) \
- V(JSEntry) \
- V(DebuggerStatement)
-
-// List of code stubs only used on ARM platforms.
-#ifdef V8_TARGET_ARCH_ARM
-#define CODE_STUB_LIST_ARM(V) \
- V(GetProperty) \
- V(SetProperty) \
- V(InvokeBuiltin) \
- V(RegExpCEntry) \
- V(DirectCEntry)
-#else
-#define CODE_STUB_LIST_ARM(V)
-#endif
-
-// List of code stubs only used on MIPS platforms.
-#ifdef V8_TARGET_ARCH_MIPS
-#define CODE_STUB_LIST_MIPS(V) \
- V(RegExpCEntry)
-#else
-#define CODE_STUB_LIST_MIPS(V)
-#endif
-
-// Combined list of code stubs.
-#define CODE_STUB_LIST(V) \
- CODE_STUB_LIST_ALL_PLATFORMS(V) \
- CODE_STUB_LIST_ARM(V) \
- CODE_STUB_LIST_MIPS(V)
-
-// Mode to overwrite BinaryExpression values.
-enum OverwriteMode { NO_OVERWRITE, OVERWRITE_LEFT, OVERWRITE_RIGHT };
-enum UnaryOverwriteMode { UNARY_OVERWRITE, UNARY_NO_OVERWRITE };
-
-
-// Stub is base classes of all stubs.
-class CodeStub BASE_EMBEDDED {
- public:
- enum Major {
-#define DEF_ENUM(name) name,
- CODE_STUB_LIST(DEF_ENUM)
-#undef DEF_ENUM
- NoCache, // marker for stubs that do custom caching
- NUMBER_OF_IDS
- };
-
- // Retrieve the code for the stub. Generate the code if needed.
- Handle<Code> GetCode();
-
- // Retrieve the code for the stub if already generated. Do not
- // generate the code if not already generated and instead return a
- // retry after GC Failure object.
- MUST_USE_RESULT MaybeObject* TryGetCode();
-
- static Major MajorKeyFromKey(uint32_t key) {
- return static_cast<Major>(MajorKeyBits::decode(key));
- }
- static int MinorKeyFromKey(uint32_t key) {
- return MinorKeyBits::decode(key);
- }
-
- // Gets the major key from a code object that is a code stub or binary op IC.
- static Major GetMajorKey(Code* code_stub) {
- return static_cast<Major>(code_stub->major_key());
- }
-
- static const char* MajorName(Major major_key, bool allow_unknown_keys);
-
- virtual ~CodeStub() {}
-
- protected:
- static const int kMajorBits = 6;
- static const int kMinorBits = kBitsPerInt - kSmiTagSize - kMajorBits;
-
- private:
- // Lookup the code in the (possibly custom) cache.
- bool FindCodeInCache(Code** code_out);
-
- // Nonvirtual wrapper around the stub-specific Generate function. Call
- // this function to set up the macro assembler and generate the code.
- void GenerateCode(MacroAssembler* masm);
-
- // Generates the assembler code for the stub.
- virtual void Generate(MacroAssembler* masm) = 0;
-
- // Perform bookkeeping required after code generation when stub code is
- // initially generated.
- void RecordCodeGeneration(Code* code, MacroAssembler* masm);
-
- // Finish the code object after it has been generated.
- virtual void FinishCode(Code* code) { }
-
- // Returns information for computing the number key.
- virtual Major MajorKey() = 0;
- virtual int MinorKey() = 0;
-
- // The CallFunctionStub needs to override this so it can encode whether a
- // lazily generated function should be fully optimized or not.
- virtual InLoopFlag InLoop() { return NOT_IN_LOOP; }
-
- // GenericBinaryOpStub needs to override this.
- virtual int GetCodeKind();
-
- // GenericBinaryOpStub needs to override this.
- virtual InlineCacheState GetICState() {
- return UNINITIALIZED;
- }
-
- // Returns a name for logging/debugging purposes.
- virtual const char* GetName() { return MajorName(MajorKey(), false); }
-
- // Returns whether the code generated for this stub needs to be allocated as
- // a fixed (non-moveable) code object.
- virtual bool NeedsImmovableCode() { return false; }
-
- #ifdef DEBUG
- virtual void Print() { PrintF("%s\n", GetName()); }
-#endif
-
- // Computes the key based on major and minor.
- uint32_t GetKey() {
- ASSERT(static_cast<int>(MajorKey()) < NUMBER_OF_IDS);
- return MinorKeyBits::encode(MinorKey()) |
- MajorKeyBits::encode(MajorKey());
- }
-
- bool AllowsStubCalls() { return MajorKey() <= Instanceof; }
-
- class MajorKeyBits: public BitField<uint32_t, 0, kMajorBits> {};
- class MinorKeyBits: public BitField<uint32_t, kMajorBits, kMinorBits> {};
-
- friend class BreakPointIterator;
-};
-
-
-// Helper interface to prepare to/restore after making runtime calls.
-class RuntimeCallHelper {
- public:
- virtual ~RuntimeCallHelper() {}
-
- virtual void BeforeCall(MacroAssembler* masm) const = 0;
-
- virtual void AfterCall(MacroAssembler* masm) const = 0;
-
- protected:
- RuntimeCallHelper() {}
-
- private:
- DISALLOW_COPY_AND_ASSIGN(RuntimeCallHelper);
-};
-
-} } // namespace v8::internal
-
-#if V8_TARGET_ARCH_IA32
-#include "ia32/code-stubs-ia32.h"
-#elif V8_TARGET_ARCH_X64
-#include "x64/code-stubs-x64.h"
-#elif V8_TARGET_ARCH_ARM
-#include "arm/code-stubs-arm.h"
-#elif V8_TARGET_ARCH_MIPS
-#include "mips/code-stubs-mips.h"
-#else
-#error Unsupported target architecture.
-#endif
-
-namespace v8 {
-namespace internal {
-
-
-// RuntimeCallHelper implementation used in stubs: enters/leaves a
-// newly created internal frame before/after the runtime call.
-class StubRuntimeCallHelper : public RuntimeCallHelper {
- public:
- StubRuntimeCallHelper() {}
-
- virtual void BeforeCall(MacroAssembler* masm) const;
-
- virtual void AfterCall(MacroAssembler* masm) const;
-};
-
-
-// Trivial RuntimeCallHelper implementation.
-class NopRuntimeCallHelper : public RuntimeCallHelper {
- public:
- NopRuntimeCallHelper() {}
-
- virtual void BeforeCall(MacroAssembler* masm) const {}
-
- virtual void AfterCall(MacroAssembler* masm) const {}
-};
-
-
-class StackCheckStub : public CodeStub {
- public:
- StackCheckStub() { }
-
- void Generate(MacroAssembler* masm);
-
- private:
-
- const char* GetName() { return "StackCheckStub"; }
-
- Major MajorKey() { return StackCheck; }
- int MinorKey() { return 0; }
-};
-
-
-class ToNumberStub: public CodeStub {
- public:
- ToNumberStub() { }
-
- void Generate(MacroAssembler* masm);
-
- private:
- Major MajorKey() { return ToNumber; }
- int MinorKey() { return 0; }
- const char* GetName() { return "ToNumberStub"; }
-};
-
-
-class FastNewClosureStub : public CodeStub {
- public:
- explicit FastNewClosureStub(StrictModeFlag strict_mode)
- : strict_mode_(strict_mode) { }
-
- void Generate(MacroAssembler* masm);
-
- private:
- const char* GetName() { return "FastNewClosureStub"; }
- Major MajorKey() { return FastNewClosure; }
- int MinorKey() { return strict_mode_; }
-
- StrictModeFlag strict_mode_;
-};
-
-
-class FastNewContextStub : public CodeStub {
- public:
- static const int kMaximumSlots = 64;
-
- explicit FastNewContextStub(int slots) : slots_(slots) {
- ASSERT(slots_ > 0 && slots <= kMaximumSlots);
- }
-
- void Generate(MacroAssembler* masm);
-
- private:
- int slots_;
-
- const char* GetName() { return "FastNewContextStub"; }
- Major MajorKey() { return FastNewContext; }
- int MinorKey() { return slots_; }
-};
-
-
-class FastCloneShallowArrayStub : public CodeStub {
- public:
- // Maximum length of copied elements array.
- static const int kMaximumClonedLength = 8;
-
- enum Mode {
- CLONE_ELEMENTS,
- COPY_ON_WRITE_ELEMENTS
- };
-
- FastCloneShallowArrayStub(Mode mode, int length)
- : mode_(mode),
- length_((mode == COPY_ON_WRITE_ELEMENTS) ? 0 : length) {
- ASSERT(length_ >= 0);
- ASSERT(length_ <= kMaximumClonedLength);
- }
-
- void Generate(MacroAssembler* masm);
-
- private:
- Mode mode_;
- int length_;
-
- const char* GetName() { return "FastCloneShallowArrayStub"; }
- Major MajorKey() { return FastCloneShallowArray; }
- int MinorKey() {
- ASSERT(mode_ == 0 || mode_ == 1);
- return (length_ << 1) | mode_;
- }
-};
-
-
-class InstanceofStub: public CodeStub {
- public:
- enum Flags {
- kNoFlags = 0,
- kArgsInRegisters = 1 << 0,
- kCallSiteInlineCheck = 1 << 1,
- kReturnTrueFalseObject = 1 << 2
- };
-
- explicit InstanceofStub(Flags flags) : flags_(flags), name_(NULL) { }
-
- static Register left();
- static Register right();
-
- void Generate(MacroAssembler* masm);
-
- private:
- Major MajorKey() { return Instanceof; }
- int MinorKey() { return static_cast<int>(flags_); }
-
- bool HasArgsInRegisters() const {
- return (flags_ & kArgsInRegisters) != 0;
- }
-
- bool HasCallSiteInlineCheck() const {
- return (flags_ & kCallSiteInlineCheck) != 0;
- }
-
- bool ReturnTrueFalseObject() const {
- return (flags_ & kReturnTrueFalseObject) != 0;
- }
-
- const char* GetName();
-
- Flags flags_;
- char* name_;
-};
-
-
-enum NegativeZeroHandling {
- kStrictNegativeZero,
- kIgnoreNegativeZero
-};
-
-
-enum UnaryOpFlags {
- NO_UNARY_FLAGS = 0,
- NO_UNARY_SMI_CODE_IN_STUB = 1 << 0
-};
-
-
-class GenericUnaryOpStub : public CodeStub {
- public:
- GenericUnaryOpStub(Token::Value op,
- UnaryOverwriteMode overwrite,
- UnaryOpFlags flags,
- NegativeZeroHandling negative_zero = kStrictNegativeZero)
- : op_(op),
- overwrite_(overwrite),
- include_smi_code_((flags & NO_UNARY_SMI_CODE_IN_STUB) == 0),
- negative_zero_(negative_zero) { }
-
- private:
- Token::Value op_;
- UnaryOverwriteMode overwrite_;
- bool include_smi_code_;
- NegativeZeroHandling negative_zero_;
-
- class OverwriteField: public BitField<UnaryOverwriteMode, 0, 1> {};
- class IncludeSmiCodeField: public BitField<bool, 1, 1> {};
- class NegativeZeroField: public BitField<NegativeZeroHandling, 2, 1> {};
- class OpField: public BitField<Token::Value, 3, kMinorBits - 3> {};
-
- Major MajorKey() { return GenericUnaryOp; }
- int MinorKey() {
- return OpField::encode(op_) |
- OverwriteField::encode(overwrite_) |
- IncludeSmiCodeField::encode(include_smi_code_) |
- NegativeZeroField::encode(negative_zero_);
- }
-
- void Generate(MacroAssembler* masm);
-
- const char* GetName();
-};
-
-
-class MathPowStub: public CodeStub {
- public:
- MathPowStub() {}
- virtual void Generate(MacroAssembler* masm);
-
- private:
- virtual CodeStub::Major MajorKey() { return MathPow; }
- virtual int MinorKey() { return 0; }
-
- const char* GetName() { return "MathPowStub"; }
-};
-
-
-class ICCompareStub: public CodeStub {
- public:
- ICCompareStub(Token::Value op, CompareIC::State state)
- : op_(op), state_(state) {
- ASSERT(Token::IsCompareOp(op));
- }
-
- virtual void Generate(MacroAssembler* masm);
-
- private:
- class OpField: public BitField<int, 0, 3> { };
- class StateField: public BitField<int, 3, 5> { };
-
- virtual void FinishCode(Code* code) { code->set_compare_state(state_); }
-
- virtual CodeStub::Major MajorKey() { return CompareIC; }
- virtual int MinorKey();
-
- virtual int GetCodeKind() { return Code::COMPARE_IC; }
-
- void GenerateSmis(MacroAssembler* masm);
- void GenerateHeapNumbers(MacroAssembler* masm);
- void GenerateObjects(MacroAssembler* masm);
- void GenerateMiss(MacroAssembler* masm);
-
- bool strict() const { return op_ == Token::EQ_STRICT; }
- Condition GetCondition() const { return CompareIC::ComputeCondition(op_); }
-
- Token::Value op_;
- CompareIC::State state_;
-};
-
-
-// Flags that control the compare stub code generation.
-enum CompareFlags {
- NO_COMPARE_FLAGS = 0,
- NO_SMI_COMPARE_IN_STUB = 1 << 0,
- NO_NUMBER_COMPARE_IN_STUB = 1 << 1,
- CANT_BOTH_BE_NAN = 1 << 2
-};
-
-
-enum NaNInformation {
- kBothCouldBeNaN,
- kCantBothBeNaN
-};
-
-
-class CompareStub: public CodeStub {
- public:
- CompareStub(Condition cc,
- bool strict,
- CompareFlags flags,
- Register lhs,
- Register rhs) :
- cc_(cc),
- strict_(strict),
- never_nan_nan_((flags & CANT_BOTH_BE_NAN) != 0),
- include_number_compare_((flags & NO_NUMBER_COMPARE_IN_STUB) == 0),
- include_smi_compare_((flags & NO_SMI_COMPARE_IN_STUB) == 0),
- lhs_(lhs),
- rhs_(rhs),
- name_(NULL) { }
-
- CompareStub(Condition cc,
- bool strict,
- CompareFlags flags) :
- cc_(cc),
- strict_(strict),
- never_nan_nan_((flags & CANT_BOTH_BE_NAN) != 0),
- include_number_compare_((flags & NO_NUMBER_COMPARE_IN_STUB) == 0),
- include_smi_compare_((flags & NO_SMI_COMPARE_IN_STUB) == 0),
- lhs_(no_reg),
- rhs_(no_reg),
- name_(NULL) { }
-
- void Generate(MacroAssembler* masm);
-
- private:
- Condition cc_;
- bool strict_;
- // Only used for 'equal' comparisons. Tells the stub that we already know
- // that at least one side of the comparison is not NaN. This allows the
- // stub to use object identity in the positive case. We ignore it when
- // generating the minor key for other comparisons to avoid creating more
- // stubs.
- bool never_nan_nan_;
- // Do generate the number comparison code in the stub. Stubs without number
- // comparison code is used when the number comparison has been inlined, and
- // the stub will be called if one of the operands is not a number.
- bool include_number_compare_;
-
- // Generate the comparison code for two smi operands in the stub.
- bool include_smi_compare_;
-
- // Register holding the left hand side of the comparison if the stub gives
- // a choice, no_reg otherwise.
-
- Register lhs_;
- // Register holding the right hand side of the comparison if the stub gives
- // a choice, no_reg otherwise.
- Register rhs_;
-
- // Encoding of the minor key in 16 bits.
- class StrictField: public BitField<bool, 0, 1> {};
- class NeverNanNanField: public BitField<bool, 1, 1> {};
- class IncludeNumberCompareField: public BitField<bool, 2, 1> {};
- class IncludeSmiCompareField: public BitField<bool, 3, 1> {};
- class RegisterField: public BitField<bool, 4, 1> {};
- class ConditionField: public BitField<int, 5, 11> {};
-
- Major MajorKey() { return Compare; }
-
- int MinorKey();
-
- virtual int GetCodeKind() { return Code::COMPARE_IC; }
- virtual void FinishCode(Code* code) {
- code->set_compare_state(CompareIC::GENERIC);
- }
-
- // Branch to the label if the given object isn't a symbol.
- void BranchIfNonSymbol(MacroAssembler* masm,
- Label* label,
- Register object,
- Register scratch);
-
- // Unfortunately you have to run without snapshots to see most of these
- // names in the profile since most compare stubs end up in the snapshot.
- char* name_;
- const char* GetName();
-#ifdef DEBUG
- void Print() {
- PrintF("CompareStub (minor %d) (cc %d), (strict %s), "
- "(never_nan_nan %s), (smi_compare %s) (number_compare %s) ",
- MinorKey(),
- static_cast<int>(cc_),
- strict_ ? "true" : "false",
- never_nan_nan_ ? "true" : "false",
- include_smi_compare_ ? "inluded" : "not included",
- include_number_compare_ ? "included" : "not included");
-
- if (!lhs_.is(no_reg) && !rhs_.is(no_reg)) {
- PrintF("(lhs r%d), (rhs r%d)\n", lhs_.code(), rhs_.code());
- } else {
- PrintF("\n");
- }
- }
-#endif
-};
-
-
-class CEntryStub : public CodeStub {
- public:
- explicit CEntryStub(int result_size)
- : result_size_(result_size), save_doubles_(false) { }
-
- void Generate(MacroAssembler* masm);
- void SaveDoubles() { save_doubles_ = true; }
-
- private:
- void GenerateCore(MacroAssembler* masm,
- Label* throw_normal_exception,
- Label* throw_termination_exception,
- Label* throw_out_of_memory_exception,
- bool do_gc,
- bool always_allocate_scope);
- void GenerateThrowTOS(MacroAssembler* masm);
- void GenerateThrowUncatchable(MacroAssembler* masm,
- UncatchableExceptionType type);
-
- // Number of pointers/values returned.
- const int result_size_;
- bool save_doubles_;
-
- Major MajorKey() { return CEntry; }
- int MinorKey();
-
- bool NeedsImmovableCode();
-
- const char* GetName() { return "CEntryStub"; }
-};
-
-
-class JSEntryStub : public CodeStub {
- public:
- JSEntryStub() { }
-
- void Generate(MacroAssembler* masm) { GenerateBody(masm, false); }
-
- protected:
- void GenerateBody(MacroAssembler* masm, bool is_construct);
-
- private:
- Major MajorKey() { return JSEntry; }
- int MinorKey() { return 0; }
-
- const char* GetName() { return "JSEntryStub"; }
-};
-
-
-class JSConstructEntryStub : public JSEntryStub {
- public:
- JSConstructEntryStub() { }
-
- void Generate(MacroAssembler* masm) { GenerateBody(masm, true); }
-
- private:
- int MinorKey() { return 1; }
-
- const char* GetName() { return "JSConstructEntryStub"; }
-};
-
-
-class ArgumentsAccessStub: public CodeStub {
- public:
- enum Type {
- READ_ELEMENT,
- NEW_NON_STRICT,
- NEW_STRICT
- };
-
- explicit ArgumentsAccessStub(Type type) : type_(type) { }
-
- private:
- Type type_;
-
- Major MajorKey() { return ArgumentsAccess; }
- int MinorKey() { return type_; }
-
- void Generate(MacroAssembler* masm);
- void GenerateReadElement(MacroAssembler* masm);
- void GenerateNewObject(MacroAssembler* masm);
-
- int GetArgumentsBoilerplateIndex() const {
- return (type_ == NEW_STRICT)
- ? Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX
- : Context::ARGUMENTS_BOILERPLATE_INDEX;
- }
-
- int GetArgumentsObjectSize() const {
- if (type_ == NEW_STRICT)
- return Heap::kArgumentsObjectSizeStrict;
- else
- return Heap::kArgumentsObjectSize;
- }
-
- const char* GetName() { return "ArgumentsAccessStub"; }
-
-#ifdef DEBUG
- void Print() {
- PrintF("ArgumentsAccessStub (type %d)\n", type_);
- }
-#endif
-};
-
-
-class RegExpExecStub: public CodeStub {
- public:
- RegExpExecStub() { }
-
- private:
- Major MajorKey() { return RegExpExec; }
- int MinorKey() { return 0; }
-
- void Generate(MacroAssembler* masm);
-
- const char* GetName() { return "RegExpExecStub"; }
-
-#ifdef DEBUG
- void Print() {
- PrintF("RegExpExecStub\n");
- }
-#endif
-};
-
-
-class RegExpConstructResultStub: public CodeStub {
- public:
- RegExpConstructResultStub() { }
-
- private:
- Major MajorKey() { return RegExpConstructResult; }
- int MinorKey() { return 0; }
-
- void Generate(MacroAssembler* masm);
-
- const char* GetName() { return "RegExpConstructResultStub"; }
-
-#ifdef DEBUG
- void Print() {
- PrintF("RegExpConstructResultStub\n");
- }
-#endif
-};
-
-
-class CallFunctionStub: public CodeStub {
- public:
- CallFunctionStub(int argc, InLoopFlag in_loop, CallFunctionFlags flags)
- : argc_(argc), in_loop_(in_loop), flags_(flags) { }
-
- void Generate(MacroAssembler* masm);
-
- static int ExtractArgcFromMinorKey(int minor_key) {
- return ArgcBits::decode(minor_key);
- }
-
- private:
- int argc_;
- InLoopFlag in_loop_;
- CallFunctionFlags flags_;
-
-#ifdef DEBUG
- void Print() {
- PrintF("CallFunctionStub (args %d, in_loop %d, flags %d)\n",
- argc_,
- static_cast<int>(in_loop_),
- static_cast<int>(flags_));
- }
-#endif
-
- // Minor key encoding in 32 bits with Bitfield <Type, shift, size>.
- class InLoopBits: public BitField<InLoopFlag, 0, 1> {};
- class FlagBits: public BitField<CallFunctionFlags, 1, 1> {};
- class ArgcBits: public BitField<int, 2, 32 - 2> {};
-
- Major MajorKey() { return CallFunction; }
- int MinorKey() {
- // Encode the parameters in a unique 32 bit value.
- return InLoopBits::encode(in_loop_)
- | FlagBits::encode(flags_)
- | ArgcBits::encode(argc_);
- }
-
- InLoopFlag InLoop() { return in_loop_; }
- bool ReceiverMightBeValue() {
- return (flags_ & RECEIVER_MIGHT_BE_VALUE) != 0;
- }
-};
-
-
-enum StringIndexFlags {
- // Accepts smis or heap numbers.
- STRING_INDEX_IS_NUMBER,
-
- // Accepts smis or heap numbers that are valid array indices
- // (ECMA-262 15.4). Invalid indices are reported as being out of
- // range.
- STRING_INDEX_IS_ARRAY_INDEX
-};
-
-
-// Generates code implementing String.prototype.charCodeAt.
-//
-// Only supports the case when the receiver is a string and the index
-// is a number (smi or heap number) that is a valid index into the
-// string. Additional index constraints are specified by the
-// flags. Otherwise, bails out to the provided labels.
-//
-// Register usage: |object| may be changed to another string in a way
-// that doesn't affect charCodeAt/charAt semantics, |index| is
-// preserved, |scratch| and |result| are clobbered.
-class StringCharCodeAtGenerator {
- public:
- StringCharCodeAtGenerator(Register object,
- Register index,
- Register scratch,
- Register result,
- Label* receiver_not_string,
- Label* index_not_number,
- Label* index_out_of_range,
- StringIndexFlags index_flags)
- : object_(object),
- index_(index),
- scratch_(scratch),
- result_(result),
- receiver_not_string_(receiver_not_string),
- index_not_number_(index_not_number),
- index_out_of_range_(index_out_of_range),
- index_flags_(index_flags) {
- ASSERT(!scratch_.is(object_));
- ASSERT(!scratch_.is(index_));
- ASSERT(!scratch_.is(result_));
- ASSERT(!result_.is(object_));
- ASSERT(!result_.is(index_));
- }
-
- // Generates the fast case code. On the fallthrough path |result|
- // register contains the result.
- void GenerateFast(MacroAssembler* masm);
-
- // Generates the slow case code. Must not be naturally
- // reachable. Expected to be put after a ret instruction (e.g., in
- // deferred code). Always jumps back to the fast case.
- void GenerateSlow(MacroAssembler* masm,
- const RuntimeCallHelper& call_helper);
-
- private:
- Register object_;
- Register index_;
- Register scratch_;
- Register result_;
-
- Label* receiver_not_string_;
- Label* index_not_number_;
- Label* index_out_of_range_;
-
- StringIndexFlags index_flags_;
-
- Label call_runtime_;
- Label index_not_smi_;
- Label got_smi_index_;
- Label exit_;
-
- DISALLOW_COPY_AND_ASSIGN(StringCharCodeAtGenerator);
-};
-
-
-// Generates code for creating a one-char string from a char code.
-class StringCharFromCodeGenerator {
- public:
- StringCharFromCodeGenerator(Register code,
- Register result)
- : code_(code),
- result_(result) {
- ASSERT(!code_.is(result_));
- }
-
- // Generates the fast case code. On the fallthrough path |result|
- // register contains the result.
- void GenerateFast(MacroAssembler* masm);
-
- // Generates the slow case code. Must not be naturally
- // reachable. Expected to be put after a ret instruction (e.g., in
- // deferred code). Always jumps back to the fast case.
- void GenerateSlow(MacroAssembler* masm,
- const RuntimeCallHelper& call_helper);
-
- private:
- Register code_;
- Register result_;
-
- Label slow_case_;
- Label exit_;
-
- DISALLOW_COPY_AND_ASSIGN(StringCharFromCodeGenerator);
-};
-
-
-// Generates code implementing String.prototype.charAt.
-//
-// Only supports the case when the receiver is a string and the index
-// is a number (smi or heap number) that is a valid index into the
-// string. Additional index constraints are specified by the
-// flags. Otherwise, bails out to the provided labels.
-//
-// Register usage: |object| may be changed to another string in a way
-// that doesn't affect charCodeAt/charAt semantics, |index| is
-// preserved, |scratch1|, |scratch2|, and |result| are clobbered.
-class StringCharAtGenerator {
- public:
- StringCharAtGenerator(Register object,
- Register index,
- Register scratch1,
- Register scratch2,
- Register result,
- Label* receiver_not_string,
- Label* index_not_number,
- Label* index_out_of_range,
- StringIndexFlags index_flags)
- : char_code_at_generator_(object,
- index,
- scratch1,
- scratch2,
- receiver_not_string,
- index_not_number,
- index_out_of_range,
- index_flags),
- char_from_code_generator_(scratch2, result) {}
-
- // Generates the fast case code. On the fallthrough path |result|
- // register contains the result.
- void GenerateFast(MacroAssembler* masm);
-
- // Generates the slow case code. Must not be naturally
- // reachable. Expected to be put after a ret instruction (e.g., in
- // deferred code). Always jumps back to the fast case.
- void GenerateSlow(MacroAssembler* masm,
- const RuntimeCallHelper& call_helper);
-
- private:
- StringCharCodeAtGenerator char_code_at_generator_;
- StringCharFromCodeGenerator char_from_code_generator_;
-
- DISALLOW_COPY_AND_ASSIGN(StringCharAtGenerator);
-};
-
-
-class AllowStubCallsScope {
- public:
- AllowStubCallsScope(MacroAssembler* masm, bool allow)
- : masm_(masm), previous_allow_(masm->allow_stub_calls()) {
- masm_->set_allow_stub_calls(allow);
- }
- ~AllowStubCallsScope() {
- masm_->set_allow_stub_calls(previous_allow_);
- }
-
- private:
- MacroAssembler* masm_;
- bool previous_allow_;
-
- DISALLOW_COPY_AND_ASSIGN(AllowStubCallsScope);
-};
-
-} } // namespace v8::internal
-
-#endif // V8_CODE_STUBS_H_
diff --git a/src/3rdparty/v8/src/code.h b/src/3rdparty/v8/src/code.h
deleted file mode 100644
index 072344b..0000000
--- a/src/3rdparty/v8/src/code.h
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_CODE_H_
-#define V8_CODE_H_
-
-namespace v8 {
-namespace internal {
-
-
-// Wrapper class for passing expected and actual parameter counts as
-// either registers or immediate values. Used to make sure that the
-// caller provides exactly the expected number of parameters to the
-// callee.
-class ParameterCount BASE_EMBEDDED {
- public:
- explicit ParameterCount(Register reg)
- : reg_(reg), immediate_(0) { }
- explicit ParameterCount(int immediate)
- : reg_(no_reg), immediate_(immediate) { }
-
- bool is_reg() const { return !reg_.is(no_reg); }
- bool is_immediate() const { return !is_reg(); }
-
- Register reg() const {
- ASSERT(is_reg());
- return reg_;
- }
- int immediate() const {
- ASSERT(is_immediate());
- return immediate_;
- }
-
- private:
- const Register reg_;
- const int immediate_;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(ParameterCount);
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_CODE_H_
diff --git a/src/3rdparty/v8/src/codegen-inl.h b/src/3rdparty/v8/src/codegen-inl.h
deleted file mode 100644
index f7da54a..0000000
--- a/src/3rdparty/v8/src/codegen-inl.h
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#ifndef V8_CODEGEN_INL_H_
-#define V8_CODEGEN_INL_H_
-
-#include "codegen.h"
-#include "compiler.h"
-#include "register-allocator-inl.h"
-
-#if V8_TARGET_ARCH_IA32
-#include "ia32/codegen-ia32-inl.h"
-#elif V8_TARGET_ARCH_X64
-#include "x64/codegen-x64-inl.h"
-#elif V8_TARGET_ARCH_ARM
-#include "arm/codegen-arm-inl.h"
-#elif V8_TARGET_ARCH_MIPS
-#include "mips/codegen-mips-inl.h"
-#else
-#error Unsupported target architecture.
-#endif
-
-
-namespace v8 {
-namespace internal {
-
-Handle<Script> CodeGenerator::script() { return info_->script(); }
-
-bool CodeGenerator::is_eval() { return info_->is_eval(); }
-
-Scope* CodeGenerator::scope() { return info_->function()->scope(); }
-
-bool CodeGenerator::is_strict_mode() {
- return info_->function()->strict_mode();
-}
-
-StrictModeFlag CodeGenerator::strict_mode_flag() {
- return is_strict_mode() ? kStrictMode : kNonStrictMode;
-}
-
-} } // namespace v8::internal
-
-#endif // V8_CODEGEN_INL_H_
diff --git a/src/3rdparty/v8/src/codegen.cc b/src/3rdparty/v8/src/codegen.cc
deleted file mode 100644
index d2e7f23..0000000
--- a/src/3rdparty/v8/src/codegen.cc
+++ /dev/null
@@ -1,505 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "bootstrapper.h"
-#include "codegen-inl.h"
-#include "compiler.h"
-#include "debug.h"
-#include "prettyprinter.h"
-#include "register-allocator-inl.h"
-#include "rewriter.h"
-#include "runtime.h"
-#include "scopeinfo.h"
-#include "stub-cache.h"
-#include "virtual-frame-inl.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm_)
-
-#ifdef DEBUG
-
-Comment::Comment(MacroAssembler* masm, const char* msg)
- : masm_(masm), msg_(msg) {
- __ RecordComment(msg);
-}
-
-
-Comment::~Comment() {
- if (msg_[0] == '[') __ RecordComment("]");
-}
-
-#endif // DEBUG
-
-#undef __
-
-
-void CodeGenerator::ProcessDeferred() {
- while (!deferred_.is_empty()) {
- DeferredCode* code = deferred_.RemoveLast();
- ASSERT(masm_ == code->masm());
- // Record position of deferred code stub.
- masm_->positions_recorder()->RecordStatementPosition(
- code->statement_position());
- if (code->position() != RelocInfo::kNoPosition) {
- masm_->positions_recorder()->RecordPosition(code->position());
- }
- // Generate the code.
- Comment cmnt(masm_, code->comment());
- masm_->bind(code->entry_label());
- if (code->AutoSaveAndRestore()) {
- code->SaveRegisters();
- }
- code->Generate();
- if (code->AutoSaveAndRestore()) {
- code->RestoreRegisters();
- code->Exit();
- }
- }
-}
-
-
-void DeferredCode::Exit() {
- masm_->jmp(exit_label());
-}
-
-
-void CodeGenerator::SetFrame(VirtualFrame* new_frame,
- RegisterFile* non_frame_registers) {
- RegisterFile saved_counts;
- if (has_valid_frame()) {
- frame_->DetachFromCodeGenerator();
- // The remaining register reference counts are the non-frame ones.
- allocator_->SaveTo(&saved_counts);
- }
-
- if (new_frame != NULL) {
- // Restore the non-frame register references that go with the new frame.
- allocator_->RestoreFrom(non_frame_registers);
- new_frame->AttachToCodeGenerator();
- }
-
- frame_ = new_frame;
- saved_counts.CopyTo(non_frame_registers);
-}
-
-
-void CodeGenerator::DeleteFrame() {
- if (has_valid_frame()) {
- frame_->DetachFromCodeGenerator();
- frame_ = NULL;
- }
-}
-
-
-void CodeGenerator::MakeCodePrologue(CompilationInfo* info) {
-#ifdef DEBUG
- bool print_source = false;
- bool print_ast = false;
- bool print_json_ast = false;
- const char* ftype;
-
- if (Isolate::Current()->bootstrapper()->IsActive()) {
- print_source = FLAG_print_builtin_source;
- print_ast = FLAG_print_builtin_ast;
- print_json_ast = FLAG_print_builtin_json_ast;
- ftype = "builtin";
- } else {
- print_source = FLAG_print_source;
- print_ast = FLAG_print_ast;
- print_json_ast = FLAG_print_json_ast;
- Vector<const char> filter = CStrVector(FLAG_hydrogen_filter);
- if (print_source && !filter.is_empty()) {
- print_source = info->function()->name()->IsEqualTo(filter);
- }
- if (print_ast && !filter.is_empty()) {
- print_ast = info->function()->name()->IsEqualTo(filter);
- }
- if (print_json_ast && !filter.is_empty()) {
- print_json_ast = info->function()->name()->IsEqualTo(filter);
- }
- ftype = "user-defined";
- }
-
- if (FLAG_trace_codegen || print_source || print_ast) {
- PrintF("*** Generate code for %s function: ", ftype);
- info->function()->name()->ShortPrint();
- PrintF(" ***\n");
- }
-
- if (print_source) {
- PrintF("--- Source from AST ---\n%s\n",
- PrettyPrinter().PrintProgram(info->function()));
- }
-
- if (print_ast) {
- PrintF("--- AST ---\n%s\n",
- AstPrinter().PrintProgram(info->function()));
- }
-
- if (print_json_ast) {
- JsonAstBuilder builder;
- PrintF("%s", builder.BuildProgram(info->function()));
- }
-#endif // DEBUG
-}
-
-
-Handle<Code> CodeGenerator::MakeCodeEpilogue(MacroAssembler* masm,
- Code::Flags flags,
- CompilationInfo* info) {
- Isolate* isolate = info->isolate();
-
- // Allocate and install the code.
- CodeDesc desc;
- masm->GetCode(&desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, flags, masm->CodeObject());
-
- if (!code.is_null()) {
- isolate->counters()->total_compiled_code_size()->Increment(
- code->instruction_size());
- }
- return code;
-}
-
-
-void CodeGenerator::PrintCode(Handle<Code> code, CompilationInfo* info) {
-#ifdef ENABLE_DISASSEMBLER
- bool print_code = Isolate::Current()->bootstrapper()->IsActive()
- ? FLAG_print_builtin_code
- : (FLAG_print_code || (info->IsOptimizing() && FLAG_print_opt_code));
- Vector<const char> filter = CStrVector(FLAG_hydrogen_filter);
- FunctionLiteral* function = info->function();
- bool match = filter.is_empty() || function->debug_name()->IsEqualTo(filter);
- if (print_code && match) {
- // Print the source code if available.
- Handle<Script> script = info->script();
- if (!script->IsUndefined() && !script->source()->IsUndefined()) {
- PrintF("--- Raw source ---\n");
- StringInputBuffer stream(String::cast(script->source()));
- stream.Seek(function->start_position());
- // fun->end_position() points to the last character in the stream. We
- // need to compensate by adding one to calculate the length.
- int source_len =
- function->end_position() - function->start_position() + 1;
- for (int i = 0; i < source_len; i++) {
- if (stream.has_more()) PrintF("%c", stream.GetNext());
- }
- PrintF("\n\n");
- }
- if (info->IsOptimizing()) {
- if (FLAG_print_unopt_code) {
- PrintF("--- Unoptimized code ---\n");
- info->closure()->shared()->code()->Disassemble(
- *function->debug_name()->ToCString());
- }
- PrintF("--- Optimized code ---\n");
- } else {
- PrintF("--- Code ---\n");
- }
- code->Disassemble(*function->debug_name()->ToCString());
- }
-#endif // ENABLE_DISASSEMBLER
-}
-
-
-// Generate the code. Compile the AST and assemble all the pieces into a
-// Code object.
-bool CodeGenerator::MakeCode(CompilationInfo* info) {
- // When using Crankshaft the classic backend should never be used.
- ASSERT(!V8::UseCrankshaft());
- Handle<Script> script = info->script();
- if (!script->IsUndefined() && !script->source()->IsUndefined()) {
- int len = String::cast(script->source())->length();
- Counters* counters = info->isolate()->counters();
- counters->total_old_codegen_source_size()->Increment(len);
- }
- if (FLAG_trace_codegen) {
- PrintF("Classic Compiler - ");
- }
- MakeCodePrologue(info);
- // Generate code.
- const int kInitialBufferSize = 4 * KB;
- MacroAssembler masm(info->isolate(), NULL, kInitialBufferSize);
-#ifdef ENABLE_GDB_JIT_INTERFACE
- masm.positions_recorder()->StartGDBJITLineInfoRecording();
-#endif
- CodeGenerator cgen(&masm);
- CodeGeneratorScope scope(Isolate::Current(), &cgen);
- cgen.Generate(info);
- if (cgen.HasStackOverflow()) {
- ASSERT(!Isolate::Current()->has_pending_exception());
- return false;
- }
-
- InLoopFlag in_loop = info->is_in_loop() ? IN_LOOP : NOT_IN_LOOP;
- Code::Flags flags = Code::ComputeFlags(Code::FUNCTION, in_loop);
- Handle<Code> code = MakeCodeEpilogue(cgen.masm(), flags, info);
- // There is no stack check table in code generated by the classic backend.
- code->SetNoStackCheckTable();
- CodeGenerator::PrintCode(code, info);
- info->SetCode(code); // May be an empty handle.
-#ifdef ENABLE_GDB_JIT_INTERFACE
- if (FLAG_gdbjit && !code.is_null()) {
- GDBJITLineInfo* lineinfo =
- masm.positions_recorder()->DetachGDBJITLineInfo();
-
- GDBJIT(RegisterDetailedLineInfo(*code, lineinfo));
- }
-#endif
- return !code.is_null();
-}
-
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
-
-
-static Vector<const char> kRegexp = CStrVector("regexp");
-
-
-bool CodeGenerator::ShouldGenerateLog(Expression* type) {
- ASSERT(type != NULL);
- if (!LOGGER->is_logging() && !CpuProfiler::is_profiling()) return false;
- Handle<String> name = Handle<String>::cast(type->AsLiteral()->handle());
- if (FLAG_log_regexp) {
- if (name->IsEqualTo(kRegexp))
- return true;
- }
- return false;
-}
-
-#endif
-
-
-void CodeGenerator::ProcessDeclarations(ZoneList<Declaration*>* declarations) {
- int length = declarations->length();
- int globals = 0;
- for (int i = 0; i < length; i++) {
- Declaration* node = declarations->at(i);
- Variable* var = node->proxy()->var();
- Slot* slot = var->AsSlot();
-
- // If it was not possible to allocate the variable at compile
- // time, we need to "declare" it at runtime to make sure it
- // actually exists in the local context.
- if ((slot != NULL && slot->type() == Slot::LOOKUP) || !var->is_global()) {
- VisitDeclaration(node);
- } else {
- // Count global variables and functions for later processing
- globals++;
- }
- }
-
- // Return in case of no declared global functions or variables.
- if (globals == 0) return;
-
- // Compute array of global variable and function declarations.
- Handle<FixedArray> array = FACTORY->NewFixedArray(2 * globals, TENURED);
- for (int j = 0, i = 0; i < length; i++) {
- Declaration* node = declarations->at(i);
- Variable* var = node->proxy()->var();
- Slot* slot = var->AsSlot();
-
- if ((slot != NULL && slot->type() == Slot::LOOKUP) || !var->is_global()) {
- // Skip - already processed.
- } else {
- array->set(j++, *(var->name()));
- if (node->fun() == NULL) {
- if (var->mode() == Variable::CONST) {
- // In case this is const property use the hole.
- array->set_the_hole(j++);
- } else {
- array->set_undefined(j++);
- }
- } else {
- Handle<SharedFunctionInfo> function =
- Compiler::BuildFunctionInfo(node->fun(), script());
- // Check for stack-overflow exception.
- if (function.is_null()) {
- SetStackOverflow();
- return;
- }
- array->set(j++, *function);
- }
- }
- }
-
- // Invoke the platform-dependent code generator to do the actual
- // declaration the global variables and functions.
- DeclareGlobals(array);
-}
-
-
-void CodeGenerator::VisitIncrementOperation(IncrementOperation* expr) {
- UNREACHABLE();
-}
-
-
-// Lookup table for code generators for special runtime calls which are
-// generated inline.
-#define INLINE_FUNCTION_GENERATOR_ADDRESS(Name, argc, ressize) \
- &CodeGenerator::Generate##Name,
-
-const CodeGenerator::InlineFunctionGenerator
- CodeGenerator::kInlineFunctionGenerators[] = {
- INLINE_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_ADDRESS)
- INLINE_RUNTIME_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_ADDRESS)
-};
-#undef INLINE_FUNCTION_GENERATOR_ADDRESS
-
-
-bool CodeGenerator::CheckForInlineRuntimeCall(CallRuntime* node) {
- ZoneList<Expression*>* args = node->arguments();
- Handle<String> name = node->name();
- const Runtime::Function* function = node->function();
- if (function != NULL && function->intrinsic_type == Runtime::INLINE) {
- int lookup_index = static_cast<int>(function->function_id) -
- static_cast<int>(Runtime::kFirstInlineFunction);
- ASSERT(lookup_index >= 0);
- ASSERT(static_cast<size_t>(lookup_index) <
- ARRAY_SIZE(kInlineFunctionGenerators));
- InlineFunctionGenerator generator = kInlineFunctionGenerators[lookup_index];
- (this->*generator)(args);
- return true;
- }
- return false;
-}
-
-
-// Simple condition analysis. ALWAYS_TRUE and ALWAYS_FALSE represent a
-// known result for the test expression, with no side effects.
-CodeGenerator::ConditionAnalysis CodeGenerator::AnalyzeCondition(
- Expression* cond) {
- if (cond == NULL) return ALWAYS_TRUE;
-
- Literal* lit = cond->AsLiteral();
- if (lit == NULL) return DONT_KNOW;
-
- if (lit->IsTrue()) {
- return ALWAYS_TRUE;
- } else if (lit->IsFalse()) {
- return ALWAYS_FALSE;
- }
-
- return DONT_KNOW;
-}
-
-
-bool CodeGenerator::RecordPositions(MacroAssembler* masm,
- int pos,
- bool right_here) {
- if (pos != RelocInfo::kNoPosition) {
- masm->positions_recorder()->RecordStatementPosition(pos);
- masm->positions_recorder()->RecordPosition(pos);
- if (right_here) {
- return masm->positions_recorder()->WriteRecordedPositions();
- }
- }
- return false;
-}
-
-
-void CodeGenerator::CodeForFunctionPosition(FunctionLiteral* fun) {
- if (FLAG_debug_info) RecordPositions(masm(), fun->start_position(), false);
-}
-
-
-void CodeGenerator::CodeForReturnPosition(FunctionLiteral* fun) {
- if (FLAG_debug_info) RecordPositions(masm(), fun->end_position() - 1, false);
-}
-
-
-void CodeGenerator::CodeForStatementPosition(Statement* stmt) {
- if (FLAG_debug_info) RecordPositions(masm(), stmt->statement_pos(), false);
-}
-
-
-void CodeGenerator::CodeForDoWhileConditionPosition(DoWhileStatement* stmt) {
- if (FLAG_debug_info)
- RecordPositions(masm(), stmt->condition_position(), false);
-}
-
-
-void CodeGenerator::CodeForSourcePosition(int pos) {
- if (FLAG_debug_info && pos != RelocInfo::kNoPosition) {
- masm()->positions_recorder()->RecordPosition(pos);
- }
-}
-
-
-const char* GenericUnaryOpStub::GetName() {
- switch (op_) {
- case Token::SUB:
- if (negative_zero_ == kStrictNegativeZero) {
- return overwrite_ == UNARY_OVERWRITE
- ? "GenericUnaryOpStub_SUB_Overwrite_Strict0"
- : "GenericUnaryOpStub_SUB_Alloc_Strict0";
- } else {
- return overwrite_ == UNARY_OVERWRITE
- ? "GenericUnaryOpStub_SUB_Overwrite_Ignore0"
- : "GenericUnaryOpStub_SUB_Alloc_Ignore0";
- }
- case Token::BIT_NOT:
- return overwrite_ == UNARY_OVERWRITE
- ? "GenericUnaryOpStub_BIT_NOT_Overwrite"
- : "GenericUnaryOpStub_BIT_NOT_Alloc";
- default:
- UNREACHABLE();
- return "<unknown>";
- }
-}
-
-
-void ArgumentsAccessStub::Generate(MacroAssembler* masm) {
- switch (type_) {
- case READ_ELEMENT:
- GenerateReadElement(masm);
- break;
- case NEW_NON_STRICT:
- case NEW_STRICT:
- GenerateNewObject(masm);
- break;
- }
-}
-
-
-int CEntryStub::MinorKey() {
- ASSERT(result_size_ == 1 || result_size_ == 2);
- int result = save_doubles_ ? 1 : 0;
-#ifdef _WIN64
- return result | ((result_size_ == 1) ? 0 : 2);
-#else
- return result;
-#endif
-}
-
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/codegen.h b/src/3rdparty/v8/src/codegen.h
deleted file mode 100644
index aa31999..0000000
--- a/src/3rdparty/v8/src/codegen.h
+++ /dev/null
@@ -1,245 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_CODEGEN_H_
-#define V8_CODEGEN_H_
-
-#include "code-stubs.h"
-#include "runtime.h"
-#include "type-info.h"
-
-// Include the declaration of the architecture defined class CodeGenerator.
-// The contract to the shared code is that the the CodeGenerator is a subclass
-// of Visitor and that the following methods are available publicly:
-// MakeCode
-// MakeCodePrologue
-// MakeCodeEpilogue
-// masm
-// frame
-// script
-// has_valid_frame
-// SetFrame
-// DeleteFrame
-// allocator
-// AddDeferred
-// in_spilled_code
-// set_in_spilled_code
-// RecordPositions
-//
-// These methods are either used privately by the shared code or implemented as
-// shared code:
-// CodeGenerator
-// ~CodeGenerator
-// ProcessDeferred
-// Generate
-// ComputeLazyCompile
-// BuildFunctionInfo
-// ProcessDeclarations
-// DeclareGlobals
-// CheckForInlineRuntimeCall
-// AnalyzeCondition
-// CodeForFunctionPosition
-// CodeForReturnPosition
-// CodeForStatementPosition
-// CodeForDoWhileConditionPosition
-// CodeForSourcePosition
-
-enum InitState { CONST_INIT, NOT_CONST_INIT };
-enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
-
-#if V8_TARGET_ARCH_IA32
-#include "ia32/codegen-ia32.h"
-#elif V8_TARGET_ARCH_X64
-#include "x64/codegen-x64.h"
-#elif V8_TARGET_ARCH_ARM
-#include "arm/codegen-arm.h"
-#elif V8_TARGET_ARCH_MIPS
-#include "mips/codegen-mips.h"
-#else
-#error Unsupported target architecture.
-#endif
-
-#include "register-allocator.h"
-
-namespace v8 {
-namespace internal {
-
-// Code generation can be nested. Code generation scopes form a stack
-// of active code generators.
-class CodeGeneratorScope BASE_EMBEDDED {
- public:
- explicit CodeGeneratorScope(Isolate* isolate, CodeGenerator* cgen)
- : isolate_(isolate) {
- previous_ = isolate->current_code_generator();
- isolate->set_current_code_generator(cgen);
- }
-
- ~CodeGeneratorScope() {
- isolate_->set_current_code_generator(previous_);
- }
-
- static CodeGenerator* Current(Isolate* isolate) {
- ASSERT(isolate->current_code_generator() != NULL);
- return isolate->current_code_generator();
- }
-
- private:
- CodeGenerator* previous_;
- Isolate* isolate_;
-};
-
-#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64
-
-// State of used registers in a virtual frame.
-class FrameRegisterState {
- public:
- // Captures the current state of the given frame.
- explicit FrameRegisterState(VirtualFrame* frame);
-
- // Saves the state in the stack.
- void Save(MacroAssembler* masm) const;
-
- // Restores the state from the stack.
- void Restore(MacroAssembler* masm) const;
-
- private:
- // Constants indicating special actions. They should not be multiples
- // of kPointerSize so they will not collide with valid offsets from
- // the frame pointer.
- static const int kIgnore = -1;
- static const int kPush = 1;
-
- // This flag is ored with a valid offset from the frame pointer, so
- // it should fit in the low zero bits of a valid offset.
- static const int kSyncedFlag = 2;
-
- int registers_[RegisterAllocator::kNumRegisters];
-};
-
-#elif V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_MIPS
-
-
-class FrameRegisterState {
- public:
- inline FrameRegisterState(VirtualFrame frame) : frame_(frame) { }
-
- inline const VirtualFrame* frame() const { return &frame_; }
-
- private:
- VirtualFrame frame_;
-};
-
-#else
-
-#error Unsupported target architecture.
-
-#endif
-
-
-// RuntimeCallHelper implementation that saves/restores state of a
-// virtual frame.
-class VirtualFrameRuntimeCallHelper : public RuntimeCallHelper {
- public:
- // Does not take ownership of |frame_state|.
- explicit VirtualFrameRuntimeCallHelper(const FrameRegisterState* frame_state)
- : frame_state_(frame_state) {}
-
- virtual void BeforeCall(MacroAssembler* masm) const;
-
- virtual void AfterCall(MacroAssembler* masm) const;
-
- private:
- const FrameRegisterState* frame_state_;
-};
-
-
-// Deferred code objects are small pieces of code that are compiled
-// out of line. They are used to defer the compilation of uncommon
-// paths thereby avoiding expensive jumps around uncommon code parts.
-class DeferredCode: public ZoneObject {
- public:
- DeferredCode();
- virtual ~DeferredCode() { }
-
- virtual void Generate() = 0;
-
- MacroAssembler* masm() { return masm_; }
-
- int statement_position() const { return statement_position_; }
- int position() const { return position_; }
-
- Label* entry_label() { return &entry_label_; }
- Label* exit_label() { return &exit_label_; }
-
-#ifdef DEBUG
- void set_comment(const char* comment) { comment_ = comment; }
- const char* comment() const { return comment_; }
-#else
- void set_comment(const char* comment) { }
- const char* comment() const { return ""; }
-#endif
-
- inline void Jump();
- inline void Branch(Condition cc);
- void BindExit() { masm_->bind(&exit_label_); }
-
- const FrameRegisterState* frame_state() const { return &frame_state_; }
-
- void SaveRegisters();
- void RestoreRegisters();
- void Exit();
-
- // If this returns true then all registers will be saved for the duration
- // of the Generate() call. Otherwise the registers are not saved and the
- // Generate() call must bracket runtime any runtime calls with calls to
- // SaveRegisters() and RestoreRegisters(). In this case the Generate
- // method must also call Exit() in order to return to the non-deferred
- // code.
- virtual bool AutoSaveAndRestore() { return true; }
-
- protected:
- MacroAssembler* masm_;
-
- private:
- int statement_position_;
- int position_;
-
- Label entry_label_;
- Label exit_label_;
-
- FrameRegisterState frame_state_;
-
-#ifdef DEBUG
- const char* comment_;
-#endif
- DISALLOW_COPY_AND_ASSIGN(DeferredCode);
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_CODEGEN_H_
diff --git a/src/3rdparty/v8/src/compilation-cache.cc b/src/3rdparty/v8/src/compilation-cache.cc
deleted file mode 100644
index bc23903..0000000
--- a/src/3rdparty/v8/src/compilation-cache.cc
+++ /dev/null
@@ -1,566 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "compilation-cache.h"
-#include "serialize.h"
-
-namespace v8 {
-namespace internal {
-
-
-// The number of generations for each sub cache.
-// The number of ScriptGenerations is carefully chosen based on histograms.
-// See issue 458: http://code.google.com/p/v8/issues/detail?id=458
-static const int kScriptGenerations = 5;
-static const int kEvalGlobalGenerations = 2;
-static const int kEvalContextualGenerations = 2;
-static const int kRegExpGenerations = 2;
-
-// Initial size of each compilation cache table allocated.
-static const int kInitialCacheSize = 64;
-
-
-CompilationCache::CompilationCache(Isolate* isolate)
- : isolate_(isolate),
- script_(isolate, kScriptGenerations),
- eval_global_(isolate, kEvalGlobalGenerations),
- eval_contextual_(isolate, kEvalContextualGenerations),
- reg_exp_(isolate, kRegExpGenerations),
- enabled_(true),
- eager_optimizing_set_(NULL) {
- CompilationSubCache* subcaches[kSubCacheCount] =
- {&script_, &eval_global_, &eval_contextual_, &reg_exp_};
- for (int i = 0; i < kSubCacheCount; ++i) {
- subcaches_[i] = subcaches[i];
- }
-}
-
-
-CompilationCache::~CompilationCache() {
- delete eager_optimizing_set_;
- eager_optimizing_set_ = NULL;
-}
-
-
-static Handle<CompilationCacheTable> AllocateTable(Isolate* isolate, int size) {
- CALL_HEAP_FUNCTION(isolate,
- CompilationCacheTable::Allocate(size),
- CompilationCacheTable);
-}
-
-
-Handle<CompilationCacheTable> CompilationSubCache::GetTable(int generation) {
- ASSERT(generation < generations_);
- Handle<CompilationCacheTable> result;
- if (tables_[generation]->IsUndefined()) {
- result = AllocateTable(isolate(), kInitialCacheSize);
- tables_[generation] = *result;
- } else {
- CompilationCacheTable* table =
- CompilationCacheTable::cast(tables_[generation]);
- result = Handle<CompilationCacheTable>(table, isolate());
- }
- return result;
-}
-
-void CompilationSubCache::Age() {
- // Age the generations implicitly killing off the oldest.
- for (int i = generations_ - 1; i > 0; i--) {
- tables_[i] = tables_[i - 1];
- }
-
- // Set the first generation as unborn.
- tables_[0] = isolate()->heap()->undefined_value();
-}
-
-
-void CompilationSubCache::IterateFunctions(ObjectVisitor* v) {
- Object* undefined = isolate()->heap()->raw_unchecked_undefined_value();
- for (int i = 0; i < generations_; i++) {
- if (tables_[i] != undefined) {
- reinterpret_cast<CompilationCacheTable*>(tables_[i])->IterateElements(v);
- }
- }
-}
-
-
-void CompilationSubCache::Iterate(ObjectVisitor* v) {
- v->VisitPointers(&tables_[0], &tables_[generations_]);
-}
-
-
-void CompilationSubCache::Clear() {
- MemsetPointer(tables_, isolate()->heap()->undefined_value(), generations_);
-}
-
-
-void CompilationSubCache::Remove(Handle<SharedFunctionInfo> function_info) {
- // Probe the script generation tables. Make sure not to leak handles
- // into the caller's handle scope.
- { HandleScope scope(isolate());
- for (int generation = 0; generation < generations(); generation++) {
- Handle<CompilationCacheTable> table = GetTable(generation);
- table->Remove(*function_info);
- }
- }
-}
-
-
-CompilationCacheScript::CompilationCacheScript(Isolate* isolate,
- int generations)
- : CompilationSubCache(isolate, generations),
- script_histogram_(NULL),
- script_histogram_initialized_(false) { }
-
-
-// We only re-use a cached function for some script source code if the
-// script originates from the same place. This is to avoid issues
-// when reporting errors, etc.
-bool CompilationCacheScript::HasOrigin(
- Handle<SharedFunctionInfo> function_info,
- Handle<Object> name,
- int line_offset,
- int column_offset) {
- Handle<Script> script =
- Handle<Script>(Script::cast(function_info->script()), internal::Isolate::Current());
- // If the script name isn't set, the boilerplate script should have
- // an undefined name to have the same origin.
- if (name.is_null()) {
- return script->name()->IsUndefined();
- }
- // Do the fast bailout checks first.
- if (line_offset != script->line_offset()->value()) return false;
- if (column_offset != script->column_offset()->value()) return false;
- // Check that both names are strings. If not, no match.
- if (!name->IsString() || !script->name()->IsString()) return false;
- // Compare the two name strings for equality.
- return String::cast(*name)->Equals(String::cast(script->name()));
-}
-
-
-// TODO(245): Need to allow identical code from different contexts to
-// be cached in the same script generation. Currently the first use
-// will be cached, but subsequent code from different source / line
-// won't.
-Handle<SharedFunctionInfo> CompilationCacheScript::Lookup(Handle<String> source,
- Handle<Object> name,
- int line_offset,
- int column_offset) {
- Object* result = NULL;
- int generation;
-
- // Probe the script generation tables. Make sure not to leak handles
- // into the caller's handle scope.
- { HandleScope scope(isolate());
- for (generation = 0; generation < generations(); generation++) {
- Handle<CompilationCacheTable> table = GetTable(generation);
- Handle<Object> probe(table->Lookup(*source), isolate());
- if (probe->IsSharedFunctionInfo()) {
- Handle<SharedFunctionInfo> function_info =
- Handle<SharedFunctionInfo>::cast(probe);
- // Break when we've found a suitable shared function info that
- // matches the origin.
- if (HasOrigin(function_info, name, line_offset, column_offset)) {
- result = *function_info;
- break;
- }
- }
- }
- }
-
- if (!script_histogram_initialized_) {
- script_histogram_ = isolate()->stats_table()->CreateHistogram(
- "V8.ScriptCache",
- 0,
- kScriptGenerations,
- kScriptGenerations + 1);
- script_histogram_initialized_ = true;
- }
-
- if (script_histogram_ != NULL) {
- // The level NUMBER_OF_SCRIPT_GENERATIONS is equivalent to a cache miss.
- isolate()->stats_table()->AddHistogramSample(script_histogram_, generation);
- }
-
- // Once outside the manacles of the handle scope, we need to recheck
- // to see if we actually found a cached script. If so, we return a
- // handle created in the caller's handle scope.
- if (result != NULL) {
- Handle<SharedFunctionInfo> shared(SharedFunctionInfo::cast(result),
- isolate());
- ASSERT(HasOrigin(shared, name, line_offset, column_offset));
- // If the script was found in a later generation, we promote it to
- // the first generation to let it survive longer in the cache.
- if (generation != 0) Put(source, shared);
- isolate()->counters()->compilation_cache_hits()->Increment();
- return shared;
- } else {
- isolate()->counters()->compilation_cache_misses()->Increment();
- return Handle<SharedFunctionInfo>::null();
- }
-}
-
-
-MaybeObject* CompilationCacheScript::TryTablePut(
- Handle<String> source,
- Handle<SharedFunctionInfo> function_info) {
- Handle<CompilationCacheTable> table = GetFirstTable();
- return table->Put(*source, *function_info);
-}
-
-
-Handle<CompilationCacheTable> CompilationCacheScript::TablePut(
- Handle<String> source,
- Handle<SharedFunctionInfo> function_info) {
- CALL_HEAP_FUNCTION(isolate(),
- TryTablePut(source, function_info),
- CompilationCacheTable);
-}
-
-
-void CompilationCacheScript::Put(Handle<String> source,
- Handle<SharedFunctionInfo> function_info) {
- HandleScope scope(isolate());
- SetFirstTable(TablePut(source, function_info));
-}
-
-
-Handle<SharedFunctionInfo> CompilationCacheEval::Lookup(
- Handle<String> source,
- Handle<Context> context,
- StrictModeFlag strict_mode
-#ifdef QT_BUILD_SCRIPT_LIB
- , Handle<Object> name, int line_offset, int column_offset
-#endif
- ) {
- // Make sure not to leak the table into the surrounding handle
- // scope. Otherwise, we risk keeping old tables around even after
- // having cleared the cache.
- Object* result = NULL;
- int generation;
- { HandleScope scope(isolate());
- for (generation = 0; generation < generations(); generation++) {
- Handle<CompilationCacheTable> table = GetTable(generation);
- Handle<Object> probe(table->LookupEval(*source, *context, strict_mode));
- if (probe->IsSharedFunctionInfo()) {
-#ifdef QT_BUILD_SCRIPT_LIB
- Handle<SharedFunctionInfo> function_info =
- Handle<SharedFunctionInfo>::cast(probe);
- // Break when we've found a suitable shared function info that
- // matches the origin.
- if (CompilationCacheScript::HasOrigin(function_info, name, line_offset, column_offset)) {
- result = *function_info;
- break;
- }
-#endif
- }
- }
- }
- if (result->IsSharedFunctionInfo()) {
- Handle<SharedFunctionInfo>
- function_info(SharedFunctionInfo::cast(result), isolate());
- if (generation != 0) {
- Put(source, context, function_info);
- }
- isolate()->counters()->compilation_cache_hits()->Increment();
- return function_info;
- } else {
- isolate()->counters()->compilation_cache_misses()->Increment();
- return Handle<SharedFunctionInfo>::null();
- }
-}
-
-
-MaybeObject* CompilationCacheEval::TryTablePut(
- Handle<String> source,
- Handle<Context> context,
- Handle<SharedFunctionInfo> function_info) {
- Handle<CompilationCacheTable> table = GetFirstTable();
- return table->PutEval(*source, *context, *function_info);
-}
-
-
-Handle<CompilationCacheTable> CompilationCacheEval::TablePut(
- Handle<String> source,
- Handle<Context> context,
- Handle<SharedFunctionInfo> function_info) {
- CALL_HEAP_FUNCTION(isolate(),
- TryTablePut(source, context, function_info),
- CompilationCacheTable);
-}
-
-
-void CompilationCacheEval::Put(Handle<String> source,
- Handle<Context> context,
- Handle<SharedFunctionInfo> function_info) {
- HandleScope scope(isolate());
- SetFirstTable(TablePut(source, context, function_info));
-}
-
-
-Handle<FixedArray> CompilationCacheRegExp::Lookup(Handle<String> source,
- JSRegExp::Flags flags) {
- // Make sure not to leak the table into the surrounding handle
- // scope. Otherwise, we risk keeping old tables around even after
- // having cleared the cache.
- Object* result = NULL;
- int generation;
- { HandleScope scope(isolate());
- for (generation = 0; generation < generations(); generation++) {
- Handle<CompilationCacheTable> table = GetTable(generation);
- result = table->LookupRegExp(*source, flags);
- if (result->IsFixedArray()) {
- break;
- }
- }
- }
- if (result->IsFixedArray()) {
- Handle<FixedArray> data(FixedArray::cast(result), isolate());
- if (generation != 0) {
- Put(source, flags, data);
- }
- isolate()->counters()->compilation_cache_hits()->Increment();
- return data;
- } else {
- isolate()->counters()->compilation_cache_misses()->Increment();
- return Handle<FixedArray>::null();
- }
-}
-
-
-MaybeObject* CompilationCacheRegExp::TryTablePut(
- Handle<String> source,
- JSRegExp::Flags flags,
- Handle<FixedArray> data) {
- Handle<CompilationCacheTable> table = GetFirstTable();
- return table->PutRegExp(*source, flags, *data);
-}
-
-
-Handle<CompilationCacheTable> CompilationCacheRegExp::TablePut(
- Handle<String> source,
- JSRegExp::Flags flags,
- Handle<FixedArray> data) {
- CALL_HEAP_FUNCTION(isolate(),
- TryTablePut(source, flags, data),
- CompilationCacheTable);
-}
-
-
-void CompilationCacheRegExp::Put(Handle<String> source,
- JSRegExp::Flags flags,
- Handle<FixedArray> data) {
- HandleScope scope(isolate());
- SetFirstTable(TablePut(source, flags, data));
-}
-
-
-void CompilationCache::Remove(Handle<SharedFunctionInfo> function_info) {
- if (!IsEnabled()) return;
-
- eval_global_.Remove(function_info);
- eval_contextual_.Remove(function_info);
- script_.Remove(function_info);
-}
-
-
-Handle<SharedFunctionInfo> CompilationCache::LookupScript(Handle<String> source,
- Handle<Object> name,
- int line_offset,
- int column_offset) {
- if (!IsEnabled()) {
- return Handle<SharedFunctionInfo>::null();
- }
-
- return script_.Lookup(source, name, line_offset, column_offset);
-}
-
-
-Handle<SharedFunctionInfo> CompilationCache::LookupEval(
- Handle<String> source,
- Handle<Context> context,
- bool is_global,
- StrictModeFlag strict_mode
-#ifdef QT_BUILD_SCRIPT_LIB
- , Handle<Object> script_name,
- int line_offset, int column_offset
-#endif
-) {
- if (!IsEnabled()) {
- return Handle<SharedFunctionInfo>::null();
- }
-
- Handle<SharedFunctionInfo> result;
- if (is_global) {
- result = eval_global_.Lookup(source, context, strict_mode
-#ifdef QT_BUILD_SCRIPT_LIB
- ,script_name, line_offset, column_offset
-#endif
- );
- } else {
- result = eval_contextual_.Lookup(source, context, strict_mode
-#ifdef QT_BUILD_SCRIPT_LIB
- ,script_name, line_offset, column_offset
-#endif
- );
- }
- return result;
-}
-
-
-Handle<FixedArray> CompilationCache::LookupRegExp(Handle<String> source,
- JSRegExp::Flags flags) {
- if (!IsEnabled()) {
- return Handle<FixedArray>::null();
- }
-
- return reg_exp_.Lookup(source, flags);
-}
-
-
-void CompilationCache::PutScript(Handle<String> source,
- Handle<SharedFunctionInfo> function_info) {
- if (!IsEnabled()) {
- return;
- }
-
- script_.Put(source, function_info);
-}
-
-
-void CompilationCache::PutEval(Handle<String> source,
- Handle<Context> context,
- bool is_global,
- Handle<SharedFunctionInfo> function_info) {
- if (!IsEnabled()) {
- return;
- }
-
- HandleScope scope(isolate());
- if (is_global) {
- eval_global_.Put(source, context, function_info);
- } else {
- eval_contextual_.Put(source, context, function_info);
- }
-}
-
-
-
-void CompilationCache::PutRegExp(Handle<String> source,
- JSRegExp::Flags flags,
- Handle<FixedArray> data) {
- if (!IsEnabled()) {
- return;
- }
-
- reg_exp_.Put(source, flags, data);
-}
-
-
-static bool SourceHashCompare(void* key1, void* key2) {
- return key1 == key2;
-}
-
-
-HashMap* CompilationCache::EagerOptimizingSet() {
- if (eager_optimizing_set_ == NULL) {
- eager_optimizing_set_ = new HashMap(&SourceHashCompare);
- }
- return eager_optimizing_set_;
-}
-
-
-bool CompilationCache::ShouldOptimizeEagerly(Handle<JSFunction> function) {
- if (FLAG_opt_eagerly) return true;
- uint32_t hash = function->SourceHash();
- void* key = reinterpret_cast<void*>(hash);
- return EagerOptimizingSet()->Lookup(key, hash, false) != NULL;
-}
-
-
-void CompilationCache::MarkForEagerOptimizing(Handle<JSFunction> function) {
- uint32_t hash = function->SourceHash();
- void* key = reinterpret_cast<void*>(hash);
- EagerOptimizingSet()->Lookup(key, hash, true);
-}
-
-
-void CompilationCache::MarkForLazyOptimizing(Handle<JSFunction> function) {
- uint32_t hash = function->SourceHash();
- void* key = reinterpret_cast<void*>(hash);
- EagerOptimizingSet()->Remove(key, hash);
-}
-
-
-void CompilationCache::ResetEagerOptimizingData() {
- HashMap* set = EagerOptimizingSet();
- if (set->occupancy() > 0) set->Clear();
-}
-
-
-void CompilationCache::Clear() {
- for (int i = 0; i < kSubCacheCount; i++) {
- subcaches_[i]->Clear();
- }
-}
-
-
-void CompilationCache::Iterate(ObjectVisitor* v) {
- for (int i = 0; i < kSubCacheCount; i++) {
- subcaches_[i]->Iterate(v);
- }
-}
-
-
-void CompilationCache::IterateFunctions(ObjectVisitor* v) {
- for (int i = 0; i < kSubCacheCount; i++) {
- subcaches_[i]->IterateFunctions(v);
- }
-}
-
-
-void CompilationCache::MarkCompactPrologue() {
- for (int i = 0; i < kSubCacheCount; i++) {
- subcaches_[i]->Age();
- }
-}
-
-
-void CompilationCache::Enable() {
- enabled_ = true;
-}
-
-
-void CompilationCache::Disable() {
- enabled_ = false;
- Clear();
-}
-
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/compilation-cache.h b/src/3rdparty/v8/src/compilation-cache.h
deleted file mode 100644
index 8f4af62..0000000
--- a/src/3rdparty/v8/src/compilation-cache.h
+++ /dev/null
@@ -1,300 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_COMPILATION_CACHE_H_
-#define V8_COMPILATION_CACHE_H_
-
-namespace v8 {
-namespace internal {
-
-class HashMap;
-
-// The compilation cache consists of several generational sub-caches which uses
-// this class as a base class. A sub-cache contains a compilation cache tables
-// for each generation of the sub-cache. Since the same source code string has
-// different compiled code for scripts and evals, we use separate sub-caches
-// for different compilation modes, to avoid retrieving the wrong result.
-class CompilationSubCache {
- public:
- CompilationSubCache(Isolate* isolate, int generations)
- : isolate_(isolate),
- generations_(generations) {
- tables_ = NewArray<Object*>(generations);
- }
-
- ~CompilationSubCache() { DeleteArray(tables_); }
-
- // Index for the first generation in the cache.
- static const int kFirstGeneration = 0;
-
- // Get the compilation cache tables for a specific generation.
- Handle<CompilationCacheTable> GetTable(int generation);
-
- // Accessors for first generation.
- Handle<CompilationCacheTable> GetFirstTable() {
- return GetTable(kFirstGeneration);
- }
- void SetFirstTable(Handle<CompilationCacheTable> value) {
- ASSERT(kFirstGeneration < generations_);
- tables_[kFirstGeneration] = *value;
- }
-
- // Age the sub-cache by evicting the oldest generation and creating a new
- // young generation.
- void Age();
-
- // GC support.
- void Iterate(ObjectVisitor* v);
- void IterateFunctions(ObjectVisitor* v);
-
- // Clear this sub-cache evicting all its content.
- void Clear();
-
- // Remove given shared function info from sub-cache.
- void Remove(Handle<SharedFunctionInfo> function_info);
-
- // Number of generations in this sub-cache.
- inline int generations() { return generations_; }
-
- protected:
- Isolate* isolate() { return isolate_; }
-
- private:
- Isolate* isolate_;
- int generations_; // Number of generations.
- Object** tables_; // Compilation cache tables - one for each generation.
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationSubCache);
-};
-
-
-// Sub-cache for scripts.
-class CompilationCacheScript : public CompilationSubCache {
- public:
- CompilationCacheScript(Isolate* isolate, int generations);
-
- Handle<SharedFunctionInfo> Lookup(Handle<String> source,
- Handle<Object> name,
- int line_offset,
- int column_offset);
- void Put(Handle<String> source, Handle<SharedFunctionInfo> function_info);
-
- private:
- MUST_USE_RESULT MaybeObject* TryTablePut(
- Handle<String> source, Handle<SharedFunctionInfo> function_info);
-
- // Note: Returns a new hash table if operation results in expansion.
- Handle<CompilationCacheTable> TablePut(
- Handle<String> source, Handle<SharedFunctionInfo> function_info);
-
-#ifdef QT_BUILD_SCRIPT_LIB
-public:
- static
-#endif
- bool HasOrigin(Handle<SharedFunctionInfo> function_info,
- Handle<Object> name,
- int line_offset,
- int column_offset);
-
- void* script_histogram_;
- bool script_histogram_initialized_;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheScript);
-};
-
-
-// Sub-cache for eval scripts.
-class CompilationCacheEval: public CompilationSubCache {
- public:
- CompilationCacheEval(Isolate* isolate, int generations)
- : CompilationSubCache(isolate, generations) { }
-
- Handle<SharedFunctionInfo> Lookup(Handle<String> source,
- Handle<Context> context,
- StrictModeFlag strict_mode
-#ifdef QT_BUILD_SCRIPT_LIB
- , Handle<Object> script_name, int line_offset, int column_offset
-#endif
- );
-
- void Put(Handle<String> source,
- Handle<Context> context,
- Handle<SharedFunctionInfo> function_info);
-
- private:
- MUST_USE_RESULT MaybeObject* TryTablePut(
- Handle<String> source,
- Handle<Context> context,
- Handle<SharedFunctionInfo> function_info);
-
- // Note: Returns a new hash table if operation results in expansion.
- Handle<CompilationCacheTable> TablePut(
- Handle<String> source,
- Handle<Context> context,
- Handle<SharedFunctionInfo> function_info);
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheEval);
-};
-
-
-// Sub-cache for regular expressions.
-class CompilationCacheRegExp: public CompilationSubCache {
- public:
- CompilationCacheRegExp(Isolate* isolate, int generations)
- : CompilationSubCache(isolate, generations) { }
-
- Handle<FixedArray> Lookup(Handle<String> source, JSRegExp::Flags flags);
-
- void Put(Handle<String> source,
- JSRegExp::Flags flags,
- Handle<FixedArray> data);
- private:
- MUST_USE_RESULT MaybeObject* TryTablePut(Handle<String> source,
- JSRegExp::Flags flags,
- Handle<FixedArray> data);
-
- // Note: Returns a new hash table if operation results in expansion.
- Handle<CompilationCacheTable> TablePut(Handle<String> source,
- JSRegExp::Flags flags,
- Handle<FixedArray> data);
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheRegExp);
-};
-
-
-// The compilation cache keeps shared function infos for compiled
-// scripts and evals. The shared function infos are looked up using
-// the source string as the key. For regular expressions the
-// compilation data is cached.
-class CompilationCache {
- public:
- // Finds the script shared function info for a source
- // string. Returns an empty handle if the cache doesn't contain a
- // script for the given source string with the right origin.
- Handle<SharedFunctionInfo> LookupScript(Handle<String> source,
- Handle<Object> name,
- int line_offset,
- int column_offset);
-
- // Finds the shared function info for a source string for eval in a
- // given context. Returns an empty handle if the cache doesn't
- // contain a script for the given source string.
- Handle<SharedFunctionInfo> LookupEval(Handle<String> source,
- Handle<Context> context,
- bool is_global,
- StrictModeFlag strict_mode
-#ifdef QT_BUILD_SCRIPT_LIB
- , Handle<Object> script_name = Handle<Object>(),
- int line_offset = 0, int column_offset = 0
-#endif
- );
-
- // Returns the regexp data associated with the given regexp if it
- // is in cache, otherwise an empty handle.
- Handle<FixedArray> LookupRegExp(Handle<String> source,
- JSRegExp::Flags flags);
-
- // Associate the (source, kind) pair to the shared function
- // info. This may overwrite an existing mapping.
- void PutScript(Handle<String> source,
- Handle<SharedFunctionInfo> function_info);
-
- // Associate the (source, context->closure()->shared(), kind) triple
- // with the shared function info. This may overwrite an existing mapping.
- void PutEval(Handle<String> source,
- Handle<Context> context,
- bool is_global,
- Handle<SharedFunctionInfo> function_info);
-
- // Associate the (source, flags) pair to the given regexp data.
- // This may overwrite an existing mapping.
- void PutRegExp(Handle<String> source,
- JSRegExp::Flags flags,
- Handle<FixedArray> data);
-
- // Support for eager optimization tracking.
- bool ShouldOptimizeEagerly(Handle<JSFunction> function);
- void MarkForEagerOptimizing(Handle<JSFunction> function);
- void MarkForLazyOptimizing(Handle<JSFunction> function);
-
- // Reset the eager optimization tracking data.
- void ResetEagerOptimizingData();
-
- // Clear the cache - also used to initialize the cache at startup.
- void Clear();
-
- // Remove given shared function info from all caches.
- void Remove(Handle<SharedFunctionInfo> function_info);
-
- // GC support.
- void Iterate(ObjectVisitor* v);
- void IterateFunctions(ObjectVisitor* v);
-
- // Notify the cache that a mark-sweep garbage collection is about to
- // take place. This is used to retire entries from the cache to
- // avoid keeping them alive too long without using them.
- void MarkCompactPrologue();
-
- // Enable/disable compilation cache. Used by debugger to disable compilation
- // cache during debugging to make sure new scripts are always compiled.
- void Enable();
- void Disable();
- private:
- explicit CompilationCache(Isolate* isolate);
- ~CompilationCache();
-
- HashMap* EagerOptimizingSet();
-
- // The number of sub caches covering the different types to cache.
- static const int kSubCacheCount = 4;
-
- bool IsEnabled() { return FLAG_compilation_cache && enabled_; }
-
- Isolate* isolate() { return isolate_; }
-
- Isolate* isolate_;
-
- CompilationCacheScript script_;
- CompilationCacheEval eval_global_;
- CompilationCacheEval eval_contextual_;
- CompilationCacheRegExp reg_exp_;
- CompilationSubCache* subcaches_[kSubCacheCount];
-
- // Current enable state of the compilation cache.
- bool enabled_;
-
- HashMap* eager_optimizing_set_;
-
- friend class Isolate;
-
- DISALLOW_COPY_AND_ASSIGN(CompilationCache);
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_COMPILATION_CACHE_H_
diff --git a/src/3rdparty/v8/src/compiler.cc b/src/3rdparty/v8/src/compiler.cc
deleted file mode 100755
index c36dab8..0000000
--- a/src/3rdparty/v8/src/compiler.cc
+++ /dev/null
@@ -1,808 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "compiler.h"
-
-#include "bootstrapper.h"
-#include "codegen-inl.h"
-#include "compilation-cache.h"
-#include "data-flow.h"
-#include "debug.h"
-#include "full-codegen.h"
-#include "gdb-jit.h"
-#include "hydrogen.h"
-#include "lithium.h"
-#include "liveedit.h"
-#include "parser.h"
-#include "rewriter.h"
-#include "runtime-profiler.h"
-#include "scopeinfo.h"
-#include "scopes.h"
-#include "vm-state-inl.h"
-
-namespace v8 {
-namespace internal {
-
-
-CompilationInfo::CompilationInfo(Handle<Script> script)
- : isolate_(script->GetIsolate()),
- flags_(0),
- function_(NULL),
- scope_(NULL),
- script_(script),
- extension_(NULL),
- pre_parse_data_(NULL),
- supports_deoptimization_(false),
- osr_ast_id_(AstNode::kNoNumber) {
- Initialize(NONOPT);
-}
-
-
-CompilationInfo::CompilationInfo(Handle<SharedFunctionInfo> shared_info)
- : isolate_(shared_info->GetIsolate()),
- flags_(IsLazy::encode(true)),
- function_(NULL),
- scope_(NULL),
- shared_info_(shared_info),
- script_(Handle<Script>(Script::cast(shared_info->script()))),
- extension_(NULL),
- pre_parse_data_(NULL),
- supports_deoptimization_(false),
- osr_ast_id_(AstNode::kNoNumber) {
- Initialize(BASE);
-}
-
-
-CompilationInfo::CompilationInfo(Handle<JSFunction> closure)
- : isolate_(closure->GetIsolate()),
- flags_(IsLazy::encode(true)),
- function_(NULL),
- scope_(NULL),
- closure_(closure),
- shared_info_(Handle<SharedFunctionInfo>(closure->shared())),
- script_(Handle<Script>(Script::cast(shared_info_->script()))),
- extension_(NULL),
- pre_parse_data_(NULL),
- supports_deoptimization_(false),
- osr_ast_id_(AstNode::kNoNumber) {
- Initialize(BASE);
-}
-
-
-void CompilationInfo::DisableOptimization() {
- if (FLAG_optimize_closures) {
- // If we allow closures optimizations and it's an optimizable closure
- // mark it correspondingly.
- bool is_closure = closure_.is_null() && !scope_->HasTrivialOuterContext();
- if (is_closure) {
- bool is_optimizable_closure =
- !scope_->outer_scope_calls_eval() && !scope_->inside_with();
- if (is_optimizable_closure) {
- SetMode(BASE);
- return;
- }
- }
- }
-
- SetMode(NONOPT);
-}
-
-
-// Determine whether to use the full compiler for all code. If the flag
-// --always-full-compiler is specified this is the case. For the virtual frame
-// based compiler the full compiler is also used if a debugger is connected, as
-// the code from the full compiler supports mode precise break points. For the
-// crankshaft adaptive compiler debugging the optimized code is not possible at
-// all. However crankshaft support recompilation of functions, so in this case
-// the full compiler need not be be used if a debugger is attached, but only if
-// break points has actually been set.
-static bool AlwaysFullCompiler() {
-#ifdef ENABLE_DEBUGGER_SUPPORT
- Isolate* isolate = Isolate::Current();
- if (V8::UseCrankshaft()) {
- return FLAG_always_full_compiler || isolate->debug()->has_break_points();
- } else {
- return FLAG_always_full_compiler || isolate->debugger()->IsDebuggerActive();
- }
-#else
- return FLAG_always_full_compiler;
-#endif
-}
-
-
-static void FinishOptimization(Handle<JSFunction> function, int64_t start) {
- int opt_count = function->shared()->opt_count();
- function->shared()->set_opt_count(opt_count + 1);
- double ms = static_cast<double>(OS::Ticks() - start) / 1000;
- if (FLAG_trace_opt) {
- PrintF("[optimizing: ");
- function->PrintName();
- PrintF(" / %" V8PRIxPTR, reinterpret_cast<intptr_t>(*function));
- PrintF(" - took %0.3f ms]\n", ms);
- }
- if (FLAG_trace_opt_stats) {
- static double compilation_time = 0.0;
- static int compiled_functions = 0;
- static int code_size = 0;
-
- compilation_time += ms;
- compiled_functions++;
- code_size += function->shared()->SourceSize();
- PrintF("Compiled: %d functions with %d byte source size in %fms.\n",
- compiled_functions,
- code_size,
- compilation_time);
- }
-}
-
-
-static void AbortAndDisable(CompilationInfo* info) {
- // Disable optimization for the shared function info and mark the
- // code as non-optimizable. The marker on the shared function info
- // is there because we flush non-optimized code thereby loosing the
- // non-optimizable information for the code. When the code is
- // regenerated and set on the shared function info it is marked as
- // non-optimizable if optimization is disabled for the shared
- // function info.
- Handle<SharedFunctionInfo> shared = info->shared_info();
- shared->set_optimization_disabled(true);
- Handle<Code> code = Handle<Code>(shared->code());
- ASSERT(code->kind() == Code::FUNCTION);
- code->set_optimizable(false);
- info->SetCode(code);
- Isolate* isolate = code->GetIsolate();
- isolate->compilation_cache()->MarkForLazyOptimizing(info->closure());
- if (FLAG_trace_opt) {
- PrintF("[disabled optimization for: ");
- info->closure()->PrintName();
- PrintF(" / %" V8PRIxPTR "]\n",
- reinterpret_cast<intptr_t>(*info->closure()));
- }
-}
-
-
-static bool MakeCrankshaftCode(CompilationInfo* info) {
- // Test if we can optimize this function when asked to. We can only
- // do this after the scopes are computed.
- if (!info->AllowOptimize()) info->DisableOptimization();
-
- // In case we are not optimizing simply return the code from
- // the full code generator.
- if (!info->IsOptimizing()) {
- return FullCodeGenerator::MakeCode(info);
- }
-
- // We should never arrive here if there is not code object on the
- // shared function object.
- Handle<Code> code(info->shared_info()->code());
- ASSERT(code->kind() == Code::FUNCTION);
-
- // We should never arrive here if optimization has been disabled on the
- // shared function info.
- ASSERT(!info->shared_info()->optimization_disabled());
-
- // Fall back to using the full code generator if it's not possible
- // to use the Hydrogen-based optimizing compiler. We already have
- // generated code for this from the shared function object.
- if (AlwaysFullCompiler() || !FLAG_use_hydrogen) {
- info->SetCode(code);
- return true;
- }
-
- // Limit the number of times we re-compile a functions with
- // the optimizing compiler.
- const int kMaxOptCount =
- FLAG_deopt_every_n_times == 0 ? Compiler::kDefaultMaxOptCount : 1000;
- if (info->shared_info()->opt_count() > kMaxOptCount) {
- AbortAndDisable(info);
- // True indicates the compilation pipeline is still going, not
- // necessarily that we optimized the code.
- return true;
- }
-
- // Due to an encoding limit on LUnallocated operands in the Lithium
- // language, we cannot optimize functions with too many formal parameters
- // or perform on-stack replacement for function with too many
- // stack-allocated local variables.
- //
- // The encoding is as a signed value, with parameters and receiver using
- // the negative indices and locals the non-negative ones.
- const int limit = LUnallocated::kMaxFixedIndices / 2;
- Scope* scope = info->scope();
- if ((scope->num_parameters() + 1) > limit ||
- scope->num_stack_slots() > limit) {
- AbortAndDisable(info);
- // True indicates the compilation pipeline is still going, not
- // necessarily that we optimized the code.
- return true;
- }
-
- // Take --hydrogen-filter into account.
- Vector<const char> filter = CStrVector(FLAG_hydrogen_filter);
- Handle<String> name = info->function()->debug_name();
- bool match = filter.is_empty() || name->IsEqualTo(filter);
- if (!match) {
- info->SetCode(code);
- return true;
- }
-
- // Recompile the unoptimized version of the code if the current version
- // doesn't have deoptimization support. Alternatively, we may decide to
- // run the full code generator to get a baseline for the compile-time
- // performance of the hydrogen-based compiler.
- int64_t start = OS::Ticks();
- bool should_recompile = !info->shared_info()->has_deoptimization_support();
- if (should_recompile || FLAG_hydrogen_stats) {
- HPhase phase(HPhase::kFullCodeGen);
- CompilationInfo unoptimized(info->shared_info());
- // Note that we use the same AST that we will use for generating the
- // optimized code.
- unoptimized.SetFunction(info->function());
- unoptimized.SetScope(info->scope());
- if (should_recompile) unoptimized.EnableDeoptimizationSupport();
- bool succeeded = FullCodeGenerator::MakeCode(&unoptimized);
- if (should_recompile) {
- if (!succeeded) return false;
- Handle<SharedFunctionInfo> shared = info->shared_info();
- shared->EnableDeoptimizationSupport(*unoptimized.code());
- // The existing unoptimized code was replaced with the new one.
- Compiler::RecordFunctionCompilation(
- Logger::LAZY_COMPILE_TAG, &unoptimized, shared);
- }
- }
-
- // Check that the unoptimized, shared code is ready for
- // optimizations. When using the always_opt flag we disregard the
- // optimizable marker in the code object and optimize anyway. This
- // is safe as long as the unoptimized code has deoptimization
- // support.
- ASSERT(FLAG_always_opt || code->optimizable());
- ASSERT(info->shared_info()->has_deoptimization_support());
-
- if (FLAG_trace_hydrogen) {
- PrintF("-----------------------------------------------------------\n");
- PrintF("Compiling method %s using hydrogen\n", *name->ToCString());
- HTracer::Instance()->TraceCompilation(info->function());
- }
-
- Handle<Context> global_context(info->closure()->context()->global_context());
- TypeFeedbackOracle oracle(code, global_context);
- HGraphBuilder builder(info, &oracle);
- HPhase phase(HPhase::kTotal);
- HGraph* graph = builder.CreateGraph();
- if (info->isolate()->has_pending_exception()) {
- info->SetCode(Handle<Code>::null());
- return false;
- }
-
- if (graph != NULL && FLAG_build_lithium) {
- Handle<Code> optimized_code = graph->Compile(info);
- if (!optimized_code.is_null()) {
- info->SetCode(optimized_code);
- FinishOptimization(info->closure(), start);
- return true;
- }
- }
-
- // Compilation with the Hydrogen compiler failed. Keep using the
- // shared code but mark it as unoptimizable.
- AbortAndDisable(info);
- // True indicates the compilation pipeline is still going, not necessarily
- // that we optimized the code.
- return true;
-}
-
-
-static bool MakeCode(CompilationInfo* info) {
- // Precondition: code has been parsed. Postcondition: the code field in
- // the compilation info is set if compilation succeeded.
- ASSERT(info->function() != NULL);
-
- if (Rewriter::Rewrite(info) && Scope::Analyze(info)) {
- if (V8::UseCrankshaft()) return MakeCrankshaftCode(info);
- // If crankshaft is not supported fall back to full code generator
- // for all compilation.
- return FullCodeGenerator::MakeCode(info);
- }
-
- return false;
-}
-
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-bool Compiler::MakeCodeForLiveEdit(CompilationInfo* info) {
- // Precondition: code has been parsed. Postcondition: the code field in
- // the compilation info is set if compilation succeeded.
- bool succeeded = MakeCode(info);
- if (!info->shared_info().is_null()) {
- Handle<SerializedScopeInfo> scope_info =
- SerializedScopeInfo::Create(info->scope());
- info->shared_info()->set_scope_info(*scope_info);
- }
- return succeeded;
-}
-#endif
-
-
-static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
- CompilationZoneScope zone_scope(DELETE_ON_EXIT);
-
- Isolate* isolate = info->isolate();
- PostponeInterruptsScope postpone(isolate);
-
- ASSERT(!isolate->global_context().is_null());
- Handle<Script> script = info->script();
- script->set_context_data((*isolate->global_context())->data());
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- if (info->is_eval()) {
- Script::CompilationType compilation_type = Script::COMPILATION_TYPE_EVAL;
- script->set_compilation_type(Smi::FromInt(compilation_type));
- // For eval scripts add information on the function from which eval was
- // called.
- if (info->is_eval()) {
- StackTraceFrameIterator it(isolate);
- if (!it.done()) {
- script->set_eval_from_shared(
- JSFunction::cast(it.frame()->function())->shared());
- Code* code = it.frame()->LookupCode();
- int offset = static_cast<int>(
- it.frame()->pc() - code->instruction_start());
- script->set_eval_from_instructions_offset(Smi::FromInt(offset));
- }
- }
- }
-
- // Notify debugger
- isolate->debugger()->OnBeforeCompile(script);
-#endif
-
- // Only allow non-global compiles for eval.
- ASSERT(info->is_eval() || info->is_global());
-
- if (!ParserApi::Parse(info)) return Handle<SharedFunctionInfo>::null();
-
- // Measure how long it takes to do the compilation; only take the
- // rest of the function into account to avoid overlap with the
- // parsing statistics.
- HistogramTimer* rate = info->is_eval()
- ? info->isolate()->counters()->compile_eval()
- : info->isolate()->counters()->compile();
- HistogramTimerScope timer(rate);
-
- // Compile the code.
- FunctionLiteral* lit = info->function();
- LiveEditFunctionTracker live_edit_tracker(isolate, lit);
- if (!MakeCode(info)) {
- isolate->StackOverflow();
- return Handle<SharedFunctionInfo>::null();
- }
-
- // Allocate function.
- ASSERT(!info->code().is_null());
- Handle<SharedFunctionInfo> result =
- isolate->factory()->NewSharedFunctionInfo(
- lit->name(),
- lit->materialized_literal_count(),
- info->code(),
- SerializedScopeInfo::Create(info->scope()));
-
- ASSERT_EQ(RelocInfo::kNoPosition, lit->function_token_position());
- Compiler::SetFunctionInfo(result, lit, true, script);
-
- if (script->name()->IsString()) {
- PROFILE(isolate, CodeCreateEvent(
- info->is_eval()
- ? Logger::EVAL_TAG
- : Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script),
- *info->code(),
- *result,
- String::cast(script->name())));
- GDBJIT(AddCode(Handle<String>(String::cast(script->name())),
- script,
- info->code()));
- } else {
- PROFILE(isolate, CodeCreateEvent(
- info->is_eval()
- ? Logger::EVAL_TAG
- : Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script),
- *info->code(),
- *result,
- isolate->heap()->empty_string()));
- GDBJIT(AddCode(Handle<String>(), script, info->code()));
- }
-
- // Hint to the runtime system used when allocating space for initial
- // property space by setting the expected number of properties for
- // the instances of the function.
- SetExpectedNofPropertiesFromEstimate(result, lit->expected_property_count());
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Notify debugger
- isolate->debugger()->OnAfterCompile(
- script, Debugger::NO_AFTER_COMPILE_FLAGS);
-#endif
-
- live_edit_tracker.RecordFunctionInfo(result, lit);
-
- return result;
-}
-
-
-Handle<SharedFunctionInfo> Compiler::Compile(Handle<String> source,
- Handle<Object> script_name,
- int line_offset,
- int column_offset,
- v8::Extension* extension,
- ScriptDataImpl* input_pre_data,
- Handle<Object> script_data,
- NativesFlag natives) {
- Isolate* isolate = source->GetIsolate();
- int source_length = source->length();
- isolate->counters()->total_load_size()->Increment(source_length);
- isolate->counters()->total_compile_size()->Increment(source_length);
-
- // The VM is in the COMPILER state until exiting this function.
- VMState state(isolate, COMPILER);
-
- CompilationCache* compilation_cache = isolate->compilation_cache();
-
- // Do a lookup in the compilation cache but not for extensions.
- Handle<SharedFunctionInfo> result;
- if (extension == NULL) {
- result = compilation_cache->LookupScript(source,
- script_name,
- line_offset,
- column_offset);
- }
-
- if (result.is_null()) {
- // No cache entry found. Do pre-parsing, if it makes sense, and compile
- // the script.
- // Building preparse data that is only used immediately after is only a
- // saving if we might skip building the AST for lazily compiled functions.
- // I.e., preparse data isn't relevant when the lazy flag is off, and
- // for small sources, odds are that there aren't many functions
- // that would be compiled lazily anyway, so we skip the preparse step
- // in that case too.
- ScriptDataImpl* pre_data = input_pre_data;
- if (pre_data == NULL
- && source_length >= FLAG_min_preparse_length) {
- if (source->IsExternalTwoByteString()) {
- ExternalTwoByteStringUC16CharacterStream stream(
- Handle<ExternalTwoByteString>::cast(source), 0, source->length());
- pre_data = ParserApi::PartialPreParse(&stream, extension);
- } else {
- GenericStringUC16CharacterStream stream(source, 0, source->length());
- pre_data = ParserApi::PartialPreParse(&stream, extension);
- }
- }
-
- // Create a script object describing the script to be compiled.
- Handle<Script> script = FACTORY->NewScript(source);
- if (natives == NATIVES_CODE) {
- script->set_type(Smi::FromInt(Script::TYPE_NATIVE));
- }
- if (!script_name.is_null()) {
- script->set_name(*script_name);
- script->set_line_offset(Smi::FromInt(line_offset));
- script->set_column_offset(Smi::FromInt(column_offset));
- }
-
- script->set_data(script_data.is_null() ? HEAP->undefined_value()
- : *script_data);
-
- // Compile the function and add it to the cache.
- CompilationInfo info(script);
- info.MarkAsGlobal();
- info.SetExtension(extension);
- info.SetPreParseData(pre_data);
- if (natives == NATIVES_CODE) info.MarkAsAllowingNativesSyntax();
- result = MakeFunctionInfo(&info);
- if (extension == NULL && !result.is_null()) {
- compilation_cache->PutScript(source, result);
- }
-
- // Get rid of the pre-parsing data (if necessary).
- if (input_pre_data == NULL && pre_data != NULL) {
- delete pre_data;
- }
- }
-
- if (result.is_null()) isolate->ReportPendingMessages();
- return result;
-}
-
-
-Handle<SharedFunctionInfo> Compiler::CompileEval(Handle<String> source,
- Handle<Context> context,
- bool is_global,
- StrictModeFlag strict_mode
-#ifdef QT_BUILD_SCRIPT_LIB
- , Handle<Object> script_name,
- int line_offset, int column_offset
-#endif
- ) {
- Isolate* isolate = source->GetIsolate();
- int source_length = source->length();
- isolate->counters()->total_eval_size()->Increment(source_length);
- isolate->counters()->total_compile_size()->Increment(source_length);
-
- // The VM is in the COMPILER state until exiting this function.
- VMState state(isolate, COMPILER);
-
- // Do a lookup in the compilation cache; if the entry is not there, invoke
- // the compiler and add the result to the cache.
- Handle<SharedFunctionInfo> result;
- CompilationCache* compilation_cache = isolate->compilation_cache();
- result = compilation_cache->LookupEval(source,
- context,
- is_global,
- strict_mode
-#ifdef QT_BUILD_SCRIPT_LIB
- ,script_name, line_offset, column_offset
-#endif
- );
-
- if (result.is_null()) {
- // Create a script object describing the script to be compiled.
- Handle<Script> script = isolate->factory()->NewScript(source);
-#ifdef QT_BUILD_SCRIPT_LIB
- if (!script_name.is_null()) {
- script->set_name(*script_name);
- script->set_line_offset(Smi::FromInt(line_offset));
- script->set_column_offset(Smi::FromInt(column_offset));
- }
-#endif
- CompilationInfo info(script);
- info.MarkAsEval();
- if (is_global) info.MarkAsGlobal();
- if (strict_mode == kStrictMode) info.MarkAsStrict();
- info.SetCallingContext(context);
- result = MakeFunctionInfo(&info);
- if (!result.is_null()) {
- CompilationCache* compilation_cache = isolate->compilation_cache();
- // If caller is strict mode, the result must be strict as well,
- // but not the other way around. Consider:
- // eval("'use strict'; ...");
- ASSERT(strict_mode == kNonStrictMode || result->strict_mode());
- compilation_cache->PutEval(source, context, is_global, result);
- }
- }
-
- return result;
-}
-
-
-bool Compiler::CompileLazy(CompilationInfo* info) {
- CompilationZoneScope zone_scope(DELETE_ON_EXIT);
-
- // The VM is in the COMPILER state until exiting this function.
- VMState state(info->isolate(), COMPILER);
-
- Isolate* isolate = info->isolate();
- PostponeInterruptsScope postpone(isolate);
-
- Handle<SharedFunctionInfo> shared = info->shared_info();
- int compiled_size = shared->end_position() - shared->start_position();
- isolate->counters()->total_compile_size()->Increment(compiled_size);
-
- // Generate the AST for the lazily compiled function.
- if (ParserApi::Parse(info)) {
- // Measure how long it takes to do the lazy compilation; only take the
- // rest of the function into account to avoid overlap with the lazy
- // parsing statistics.
- HistogramTimerScope timer(isolate->counters()->compile_lazy());
-
- // Compile the code.
- if (!MakeCode(info)) {
- if (!isolate->has_pending_exception()) {
- isolate->StackOverflow();
- }
- } else {
- ASSERT(!info->code().is_null());
- Handle<Code> code = info->code();
- // Set optimizable to false if this is disallowed by the shared
- // function info, e.g., we might have flushed the code and must
- // reset this bit when lazy compiling the code again.
- if (shared->optimization_disabled()) code->set_optimizable(false);
-
- Handle<JSFunction> function = info->closure();
- RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, info, shared);
-
- if (info->IsOptimizing()) {
- function->ReplaceCode(*code);
- } else {
- // Update the shared function info with the compiled code and the
- // scope info. Please note, that the order of the shared function
- // info initialization is important since set_scope_info might
- // trigger a GC, causing the ASSERT below to be invalid if the code
- // was flushed. By settting the code object last we avoid this.
- Handle<SerializedScopeInfo> scope_info =
- SerializedScopeInfo::Create(info->scope());
- shared->set_scope_info(*scope_info);
- shared->set_code(*code);
- if (!function.is_null()) {
- function->ReplaceCode(*code);
- ASSERT(!function->IsOptimized());
- }
-
- // Set the expected number of properties for instances.
- FunctionLiteral* lit = info->function();
- int expected = lit->expected_property_count();
- SetExpectedNofPropertiesFromEstimate(shared, expected);
-
- // Set the optimization hints after performing lazy compilation, as
- // these are not set when the function is set up as a lazily
- // compiled function.
- shared->SetThisPropertyAssignmentsInfo(
- lit->has_only_simple_this_property_assignments(),
- *lit->this_property_assignments());
-
- // Check the function has compiled code.
- ASSERT(shared->is_compiled());
- shared->set_code_age(0);
-
- if (info->AllowOptimize() && !shared->optimization_disabled()) {
- // If we're asked to always optimize, we compile the optimized
- // version of the function right away - unless the debugger is
- // active as it makes no sense to compile optimized code then.
- if (FLAG_always_opt &&
- !Isolate::Current()->debug()->has_break_points()) {
- CompilationInfo optimized(function);
- optimized.SetOptimizing(AstNode::kNoNumber);
- return CompileLazy(&optimized);
- } else if (isolate->compilation_cache()->ShouldOptimizeEagerly(
- function)) {
- isolate->runtime_profiler()->OptimizeSoon(*function);
- }
- }
- }
-
- return true;
- }
- }
-
- ASSERT(info->code().is_null());
- return false;
-}
-
-
-Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(FunctionLiteral* literal,
- Handle<Script> script) {
- // Precondition: code has been parsed and scopes have been analyzed.
- CompilationInfo info(script);
- info.SetFunction(literal);
- info.SetScope(literal->scope());
-
- LiveEditFunctionTracker live_edit_tracker(info.isolate(), literal);
- // Determine if the function can be lazily compiled. This is necessary to
- // allow some of our builtin JS files to be lazily compiled. These
- // builtins cannot be handled lazily by the parser, since we have to know
- // if a function uses the special natives syntax, which is something the
- // parser records.
- bool allow_lazy = literal->AllowsLazyCompilation() &&
- !LiveEditFunctionTracker::IsActive(info.isolate());
-
- Handle<SerializedScopeInfo> scope_info(SerializedScopeInfo::Empty());
-
- // Generate code
- if (FLAG_lazy && allow_lazy) {
- Handle<Code> code = info.isolate()->builtins()->LazyCompile();
- info.SetCode(code);
- } else if ((V8::UseCrankshaft() && MakeCrankshaftCode(&info)) ||
- (!V8::UseCrankshaft() && FullCodeGenerator::MakeCode(&info))) {
- ASSERT(!info.code().is_null());
- scope_info = SerializedScopeInfo::Create(info.scope());
- } else {
- return Handle<SharedFunctionInfo>::null();
- }
-
- // Create a shared function info object.
- Handle<SharedFunctionInfo> result =
- FACTORY->NewSharedFunctionInfo(literal->name(),
- literal->materialized_literal_count(),
- info.code(),
- scope_info);
- SetFunctionInfo(result, literal, false, script);
- RecordFunctionCompilation(Logger::FUNCTION_TAG, &info, result);
- result->set_allows_lazy_compilation(allow_lazy);
-
- // Set the expected number of properties for instances and return
- // the resulting function.
- SetExpectedNofPropertiesFromEstimate(result,
- literal->expected_property_count());
- live_edit_tracker.RecordFunctionInfo(result, literal);
- return result;
-}
-
-
-// Sets the function info on a function.
-// The start_position points to the first '(' character after the function name
-// in the full script source. When counting characters in the script source the
-// the first character is number 0 (not 1).
-void Compiler::SetFunctionInfo(Handle<SharedFunctionInfo> function_info,
- FunctionLiteral* lit,
- bool is_toplevel,
- Handle<Script> script) {
- function_info->set_length(lit->num_parameters());
- function_info->set_formal_parameter_count(lit->num_parameters());
- function_info->set_script(*script);
- function_info->set_function_token_position(lit->function_token_position());
- function_info->set_start_position(lit->start_position());
- function_info->set_end_position(lit->end_position());
- function_info->set_is_expression(lit->is_expression());
- function_info->set_is_toplevel(is_toplevel);
- function_info->set_inferred_name(*lit->inferred_name());
- function_info->SetThisPropertyAssignmentsInfo(
- lit->has_only_simple_this_property_assignments(),
- *lit->this_property_assignments());
- function_info->set_allows_lazy_compilation(lit->AllowsLazyCompilation());
- function_info->set_strict_mode(lit->strict_mode());
-}
-
-
-void Compiler::RecordFunctionCompilation(Logger::LogEventsAndTags tag,
- CompilationInfo* info,
- Handle<SharedFunctionInfo> shared) {
- // SharedFunctionInfo is passed separately, because if CompilationInfo
- // was created using Script object, it will not have it.
-
- // Log the code generation. If source information is available include
- // script name and line number. Check explicitly whether logging is
- // enabled as finding the line number is not free.
- if (info->isolate()->logger()->is_logging() || CpuProfiler::is_profiling()) {
- Handle<Script> script = info->script();
- Handle<Code> code = info->code();
- if (*code == info->isolate()->builtins()->builtin(Builtins::kLazyCompile))
- return;
- if (script->name()->IsString()) {
- int line_num = GetScriptLineNumber(script, shared->start_position()) + 1;
- USE(line_num);
- PROFILE(info->isolate(),
- CodeCreateEvent(Logger::ToNativeByScript(tag, *script),
- *code,
- *shared,
- String::cast(script->name()),
- line_num));
- } else {
- PROFILE(info->isolate(),
- CodeCreateEvent(Logger::ToNativeByScript(tag, *script),
- *code,
- *shared,
- shared->DebugName()));
- }
- }
-
- GDBJIT(AddCode(name,
- Handle<Script>(info->script()),
- Handle<Code>(info->code())));
-}
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/compiler.h b/src/3rdparty/v8/src/compiler.h
deleted file mode 100644
index 8ea314c..0000000
--- a/src/3rdparty/v8/src/compiler.h
+++ /dev/null
@@ -1,312 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_COMPILER_H_
-#define V8_COMPILER_H_
-
-#include "ast.h"
-#include "frame-element.h"
-#include "register-allocator.h"
-#include "zone.h"
-
-namespace v8 {
-namespace internal {
-
-class ScriptDataImpl;
-
-// CompilationInfo encapsulates some information known at compile time. It
-// is constructed based on the resources available at compile-time.
-class CompilationInfo BASE_EMBEDDED {
- public:
- explicit CompilationInfo(Handle<Script> script);
- explicit CompilationInfo(Handle<SharedFunctionInfo> shared_info);
- explicit CompilationInfo(Handle<JSFunction> closure);
-
- Isolate* isolate() {
- ASSERT(Isolate::Current() == isolate_);
- return isolate_;
- }
- bool is_lazy() const { return (flags_ & IsLazy::mask()) != 0; }
- bool is_eval() const { return (flags_ & IsEval::mask()) != 0; }
- bool is_global() const { return (flags_ & IsGlobal::mask()) != 0; }
- bool is_strict() const { return (flags_ & IsStrict::mask()) != 0; }
- bool is_in_loop() const { return (flags_ & IsInLoop::mask()) != 0; }
- FunctionLiteral* function() const { return function_; }
- Scope* scope() const { return scope_; }
- Handle<Code> code() const { return code_; }
- Handle<JSFunction> closure() const { return closure_; }
- Handle<SharedFunctionInfo> shared_info() const { return shared_info_; }
- Handle<Script> script() const { return script_; }
- v8::Extension* extension() const { return extension_; }
- ScriptDataImpl* pre_parse_data() const { return pre_parse_data_; }
- Handle<Context> calling_context() const { return calling_context_; }
- int osr_ast_id() const { return osr_ast_id_; }
-
- void MarkAsEval() {
- ASSERT(!is_lazy());
- flags_ |= IsEval::encode(true);
- }
- void MarkAsGlobal() {
- ASSERT(!is_lazy());
- flags_ |= IsGlobal::encode(true);
- }
- void MarkAsStrict() {
- flags_ |= IsStrict::encode(true);
- }
- StrictModeFlag StrictMode() {
- return is_strict() ? kStrictMode : kNonStrictMode;
- }
- void MarkAsInLoop() {
- ASSERT(is_lazy());
- flags_ |= IsInLoop::encode(true);
- }
- void MarkAsAllowingNativesSyntax() {
- flags_ |= IsNativesSyntaxAllowed::encode(true);
- }
- bool allows_natives_syntax() const {
- return IsNativesSyntaxAllowed::decode(flags_);
- }
- void SetFunction(FunctionLiteral* literal) {
- ASSERT(function_ == NULL);
- function_ = literal;
- }
- void SetScope(Scope* scope) {
- ASSERT(scope_ == NULL);
- scope_ = scope;
- }
- void SetCode(Handle<Code> code) { code_ = code; }
- void SetExtension(v8::Extension* extension) {
- ASSERT(!is_lazy());
- extension_ = extension;
- }
- void SetPreParseData(ScriptDataImpl* pre_parse_data) {
- ASSERT(!is_lazy());
- pre_parse_data_ = pre_parse_data;
- }
- void SetCallingContext(Handle<Context> context) {
- ASSERT(is_eval());
- calling_context_ = context;
- }
- void SetOsrAstId(int osr_ast_id) {
- ASSERT(IsOptimizing());
- osr_ast_id_ = osr_ast_id;
- }
-
- bool has_global_object() const {
- return !closure().is_null() && (closure()->context()->global() != NULL);
- }
-
- GlobalObject* global_object() const {
- return has_global_object() ? closure()->context()->global() : NULL;
- }
-
- // Accessors for the different compilation modes.
- bool IsOptimizing() const { return mode_ == OPTIMIZE; }
- bool IsOptimizable() const { return mode_ == BASE; }
- void SetOptimizing(int osr_ast_id) {
- SetMode(OPTIMIZE);
- osr_ast_id_ = osr_ast_id;
- }
- void DisableOptimization();
-
- // Deoptimization support.
- bool HasDeoptimizationSupport() const { return supports_deoptimization_; }
- void EnableDeoptimizationSupport() {
- ASSERT(IsOptimizable());
- supports_deoptimization_ = true;
- }
-
- // Determine whether or not we can adaptively optimize.
- bool AllowOptimize() {
- return V8::UseCrankshaft() && !closure_.is_null();
- }
-
- private:
- Isolate* isolate_;
-
- // Compilation mode.
- // BASE is generated by the full codegen, optionally prepared for bailouts.
- // OPTIMIZE is optimized code generated by the Hydrogen-based backend.
- // NONOPT is generated by the full codegen or the classic backend
- // and is not prepared for recompilation/bailouts. These functions
- // are never recompiled.
- enum Mode {
- BASE,
- OPTIMIZE,
- NONOPT
- };
-
- CompilationInfo() : function_(NULL) {}
-
- void Initialize(Mode mode) {
- mode_ = V8::UseCrankshaft() ? mode : NONOPT;
- if (!shared_info_.is_null() && shared_info_->strict_mode()) {
- MarkAsStrict();
- }
- }
-
- void SetMode(Mode mode) {
- ASSERT(V8::UseCrankshaft());
- mode_ = mode;
- }
-
- // Flags using template class BitField<type, start, length>. All are
- // false by default.
- //
- // Compilation is either eager or lazy.
- class IsLazy: public BitField<bool, 0, 1> {};
- // Flags that can be set for eager compilation.
- class IsEval: public BitField<bool, 1, 1> {};
- class IsGlobal: public BitField<bool, 2, 1> {};
- // Flags that can be set for lazy compilation.
- class IsInLoop: public BitField<bool, 3, 1> {};
- // Strict mode - used in eager compilation.
- class IsStrict: public BitField<bool, 4, 1> {};
- // Native syntax (%-stuff) allowed?
- class IsNativesSyntaxAllowed: public BitField<bool, 5, 1> {};
-
- unsigned flags_;
-
- // Fields filled in by the compilation pipeline.
- // AST filled in by the parser.
- FunctionLiteral* function_;
- // The scope of the function literal as a convenience. Set to indicate
- // that scopes have been analyzed.
- Scope* scope_;
- // The compiled code.
- Handle<Code> code_;
-
- // Possible initial inputs to the compilation process.
- Handle<JSFunction> closure_;
- Handle<SharedFunctionInfo> shared_info_;
- Handle<Script> script_;
-
- // Fields possibly needed for eager compilation, NULL by default.
- v8::Extension* extension_;
- ScriptDataImpl* pre_parse_data_;
-
- // The context of the caller is needed for eval code, and will be a null
- // handle otherwise.
- Handle<Context> calling_context_;
-
- // Compilation mode flag and whether deoptimization is allowed.
- Mode mode_;
- bool supports_deoptimization_;
- int osr_ast_id_;
-
- DISALLOW_COPY_AND_ASSIGN(CompilationInfo);
-};
-
-
-// The V8 compiler
-//
-// General strategy: Source code is translated into an anonymous function w/o
-// parameters which then can be executed. If the source code contains other
-// functions, they will be compiled and allocated as part of the compilation
-// of the source code.
-
-// Please note this interface returns shared function infos. This means you
-// need to call Factory::NewFunctionFromSharedFunctionInfo before you have a
-// real function with a context.
-
-class Compiler : public AllStatic {
- public:
- // Default maximum number of function optimization attempts before we
- // give up.
- static const int kDefaultMaxOptCount = 10;
-
- static const int kMaxInliningLevels = 3;
-
- // All routines return a SharedFunctionInfo.
- // If an error occurs an exception is raised and the return handle
- // contains NULL.
-
- // Compile a String source within a context.
- static Handle<SharedFunctionInfo> Compile(Handle<String> source,
- Handle<Object> script_name,
- int line_offset,
- int column_offset,
- v8::Extension* extension,
- ScriptDataImpl* pre_data,
- Handle<Object> script_data,
- NativesFlag is_natives_code);
-
- // Compile a String source within a context for Eval.
- static Handle<SharedFunctionInfo> CompileEval(Handle<String> source,
- Handle<Context> context,
- bool is_global,
- StrictModeFlag strict_mode
-#ifdef QT_BUILD_SCRIPT_LIB
- , Handle<Object> script_name = Handle<Object>(),
- int line_offset = 0, int column_offset = 0
-#endif
- );
-
- // Compile from function info (used for lazy compilation). Returns true on
- // success and false if the compilation resulted in a stack overflow.
- static bool CompileLazy(CompilationInfo* info);
-
- // Compile a shared function info object (the function is possibly lazily
- // compiled).
- static Handle<SharedFunctionInfo> BuildFunctionInfo(FunctionLiteral* node,
- Handle<Script> script);
-
- // Set the function info for a newly compiled function.
- static void SetFunctionInfo(Handle<SharedFunctionInfo> function_info,
- FunctionLiteral* lit,
- bool is_toplevel,
- Handle<Script> script);
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- static bool MakeCodeForLiveEdit(CompilationInfo* info);
-#endif
-
- static void RecordFunctionCompilation(Logger::LogEventsAndTags tag,
- CompilationInfo* info,
- Handle<SharedFunctionInfo> shared);
-};
-
-
-// During compilation we need a global list of handles to constants
-// for frame elements. When the zone gets deleted, we make sure to
-// clear this list of handles as well.
-class CompilationZoneScope : public ZoneScope {
- public:
- explicit CompilationZoneScope(ZoneScopeMode mode) : ZoneScope(mode) { }
- virtual ~CompilationZoneScope() {
- if (ShouldDeleteOnExit()) {
- Isolate* isolate = Isolate::Current();
- isolate->frame_element_constant_list()->Clear();
- isolate->result_constant_list()->Clear();
- }
- }
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_COMPILER_H_
diff --git a/src/3rdparty/v8/src/contexts.cc b/src/3rdparty/v8/src/contexts.cc
deleted file mode 100644
index 520f3dd..0000000
--- a/src/3rdparty/v8/src/contexts.cc
+++ /dev/null
@@ -1,327 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "bootstrapper.h"
-#include "debug.h"
-#include "scopeinfo.h"
-
-namespace v8 {
-namespace internal {
-
-JSBuiltinsObject* Context::builtins() {
- GlobalObject* object = global();
- if (object->IsJSGlobalObject()) {
- return JSGlobalObject::cast(object)->builtins();
- } else {
- ASSERT(object->IsJSBuiltinsObject());
- return JSBuiltinsObject::cast(object);
- }
-}
-
-
-Context* Context::global_context() {
- // Fast case: the global object for this context has been set. In
- // that case, the global object has a direct pointer to the global
- // context.
- if (global()->IsGlobalObject()) {
- return global()->global_context();
- }
-
- // During bootstrapping, the global object might not be set and we
- // have to search the context chain to find the global context.
- ASSERT(Isolate::Current()->bootstrapper()->IsActive());
- Context* current = this;
- while (!current->IsGlobalContext()) {
- JSFunction* closure = JSFunction::cast(current->closure());
- current = Context::cast(closure->context());
- }
- return current;
-}
-
-
-JSObject* Context::global_proxy() {
- return global_context()->global_proxy_object();
-}
-
-void Context::set_global_proxy(JSObject* object) {
- global_context()->set_global_proxy_object(object);
-}
-
-
-Handle<Object> Context::Lookup(Handle<String> name, ContextLookupFlags flags,
- int* index_, PropertyAttributes* attributes) {
- Isolate* isolate = GetIsolate();
- Handle<Context> context(this, isolate);
-
- bool follow_context_chain = (flags & FOLLOW_CONTEXT_CHAIN) != 0;
- *index_ = -1;
- *attributes = ABSENT;
-
- if (FLAG_trace_contexts) {
- PrintF("Context::Lookup(");
- name->ShortPrint();
- PrintF(")\n");
- }
-
- do {
- if (FLAG_trace_contexts) {
- PrintF(" - looking in context %p", reinterpret_cast<void*>(*context));
- if (context->IsGlobalContext()) PrintF(" (global context)");
- PrintF("\n");
- }
-
- // check extension/with object
- if (context->has_extension()) {
- Handle<JSObject> extension = Handle<JSObject>(context->extension(),
- isolate);
- // Context extension objects needs to behave as if they have no
- // prototype. So even if we want to follow prototype chains, we
- // need to only do a local lookup for context extension objects.
- if ((flags & FOLLOW_PROTOTYPE_CHAIN) == 0 ||
- extension->IsJSContextExtensionObject()) {
- *attributes = extension->GetLocalPropertyAttribute(*name);
- } else {
- *attributes = extension->GetPropertyAttribute(*name);
- }
- if (*attributes != ABSENT) {
- // property found
- if (FLAG_trace_contexts) {
- PrintF("=> found property in context object %p\n",
- reinterpret_cast<void*>(*extension));
- }
- return extension;
- }
- }
-
- if (context->is_function_context()) {
- // we have context-local slots
-
- // check non-parameter locals in context
- Handle<SerializedScopeInfo> scope_info(
- context->closure()->shared()->scope_info(), isolate);
- Variable::Mode mode;
- int index = scope_info->ContextSlotIndex(*name, &mode);
- ASSERT(index < 0 || index >= MIN_CONTEXT_SLOTS);
- if (index >= 0) {
- // slot found
- if (FLAG_trace_contexts) {
- PrintF("=> found local in context slot %d (mode = %d)\n",
- index, mode);
- }
- *index_ = index;
- // Note: Fixed context slots are statically allocated by the compiler.
- // Statically allocated variables always have a statically known mode,
- // which is the mode with which they were declared when added to the
- // scope. Thus, the DYNAMIC mode (which corresponds to dynamically
- // declared variables that were introduced through declaration nodes)
- // must not appear here.
- switch (mode) {
- case Variable::INTERNAL: // fall through
- case Variable::VAR: *attributes = NONE; break;
- case Variable::CONST: *attributes = READ_ONLY; break;
- case Variable::DYNAMIC: UNREACHABLE(); break;
- case Variable::DYNAMIC_GLOBAL: UNREACHABLE(); break;
- case Variable::DYNAMIC_LOCAL: UNREACHABLE(); break;
- case Variable::TEMPORARY: UNREACHABLE(); break;
- }
- return context;
- }
-
- // check parameter locals in context
- int param_index = scope_info->ParameterIndex(*name);
- if (param_index >= 0) {
- // slot found.
- int index = scope_info->ContextSlotIndex(
- isolate->heap()->arguments_shadow_symbol(), NULL);
- ASSERT(index >= 0); // arguments must exist and be in the heap context
- Handle<JSObject> arguments(JSObject::cast(context->get(index)),
- isolate);
- ASSERT(arguments->HasLocalProperty(isolate->heap()->length_symbol()));
- if (FLAG_trace_contexts) {
- PrintF("=> found parameter %d in arguments object\n", param_index);
- }
- *index_ = param_index;
- *attributes = NONE;
- return arguments;
- }
-
- // check intermediate context (holding only the function name variable)
- if (follow_context_chain) {
- int index = scope_info->FunctionContextSlotIndex(*name);
- if (index >= 0) {
- // slot found
- if (FLAG_trace_contexts) {
- PrintF("=> found intermediate function in context slot %d\n",
- index);
- }
- *index_ = index;
- *attributes = READ_ONLY;
- return context;
- }
- }
- }
-
- // proceed with enclosing context
- if (context->IsGlobalContext()) {
- follow_context_chain = false;
- } else if (context->is_function_context()) {
- context = Handle<Context>(Context::cast(context->closure()->context()),
- isolate);
- } else {
- context = Handle<Context>(context->previous(), isolate);
- }
- } while (follow_context_chain);
-
- // slot not found
- if (FLAG_trace_contexts) {
- PrintF("=> no property/slot found\n");
- }
- return Handle<Object>::null();
-}
-
-
-bool Context::GlobalIfNotShadowedByEval(Handle<String> name) {
- Context* context = this;
-
- // Check that there is no local with the given name in contexts
- // before the global context and check that there are no context
- // extension objects (conservative check for with statements).
- while (!context->IsGlobalContext()) {
- // Check if the context is a potentially a with context.
- if (context->has_extension()) return false;
-
- // Not a with context so it must be a function context.
- ASSERT(context->is_function_context());
-
- // Check non-parameter locals.
- Handle<SerializedScopeInfo> scope_info(
- context->closure()->shared()->scope_info());
- Variable::Mode mode;
- int index = scope_info->ContextSlotIndex(*name, &mode);
- ASSERT(index < 0 || index >= MIN_CONTEXT_SLOTS);
- if (index >= 0) return false;
-
- // Check parameter locals.
- int param_index = scope_info->ParameterIndex(*name);
- if (param_index >= 0) return false;
-
- // Check context only holding the function name variable.
- index = scope_info->FunctionContextSlotIndex(*name);
- if (index >= 0) return false;
- context = Context::cast(context->closure()->context());
- }
-
- // No local or potential with statement found so the variable is
- // global unless it is shadowed by an eval-introduced variable.
- return true;
-}
-
-
-void Context::AddOptimizedFunction(JSFunction* function) {
- ASSERT(IsGlobalContext());
-#ifdef DEBUG
- Object* element = get(OPTIMIZED_FUNCTIONS_LIST);
- while (!element->IsUndefined()) {
- CHECK(element != function);
- element = JSFunction::cast(element)->next_function_link();
- }
-
- CHECK(function->next_function_link()->IsUndefined());
-
- // Check that the context belongs to the weak global contexts list.
- bool found = false;
- Object* context = GetHeap()->global_contexts_list();
- while (!context->IsUndefined()) {
- if (context == this) {
- found = true;
- break;
- }
- context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
- }
- CHECK(found);
-#endif
- function->set_next_function_link(get(OPTIMIZED_FUNCTIONS_LIST));
- set(OPTIMIZED_FUNCTIONS_LIST, function);
-}
-
-
-void Context::RemoveOptimizedFunction(JSFunction* function) {
- ASSERT(IsGlobalContext());
- Object* element = get(OPTIMIZED_FUNCTIONS_LIST);
- JSFunction* prev = NULL;
- while (!element->IsUndefined()) {
- JSFunction* element_function = JSFunction::cast(element);
- ASSERT(element_function->next_function_link()->IsUndefined() ||
- element_function->next_function_link()->IsJSFunction());
- if (element_function == function) {
- if (prev == NULL) {
- set(OPTIMIZED_FUNCTIONS_LIST, element_function->next_function_link());
- } else {
- prev->set_next_function_link(element_function->next_function_link());
- }
- element_function->set_next_function_link(GetHeap()->undefined_value());
- return;
- }
- prev = element_function;
- element = element_function->next_function_link();
- }
- UNREACHABLE();
-}
-
-
-Object* Context::OptimizedFunctionsListHead() {
- ASSERT(IsGlobalContext());
- return get(OPTIMIZED_FUNCTIONS_LIST);
-}
-
-
-void Context::ClearOptimizedFunctions() {
- set(OPTIMIZED_FUNCTIONS_LIST, GetHeap()->undefined_value());
-}
-
-
-#ifdef DEBUG
-bool Context::IsBootstrappingOrContext(Object* object) {
- // During bootstrapping we allow all objects to pass as
- // contexts. This is necessary to fix circular dependencies.
- return Isolate::Current()->bootstrapper()->IsActive() || object->IsContext();
-}
-
-
-bool Context::IsBootstrappingOrGlobalObject(Object* object) {
- // During bootstrapping we allow all objects to pass as global
- // objects. This is necessary to fix circular dependencies.
- Isolate* isolate = Isolate::Current();
- return isolate->heap()->gc_state() != Heap::NOT_IN_GC ||
- isolate->bootstrapper()->IsActive() ||
- object->IsGlobalObject();
-}
-#endif
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/contexts.h b/src/3rdparty/v8/src/contexts.h
deleted file mode 100644
index e46619e..0000000
--- a/src/3rdparty/v8/src/contexts.h
+++ /dev/null
@@ -1,382 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_CONTEXTS_H_
-#define V8_CONTEXTS_H_
-
-#include "heap.h"
-#include "objects.h"
-
-namespace v8 {
-namespace internal {
-
-
-enum ContextLookupFlags {
- FOLLOW_CONTEXT_CHAIN = 1,
- FOLLOW_PROTOTYPE_CHAIN = 2,
-
- DONT_FOLLOW_CHAINS = 0,
- FOLLOW_CHAINS = FOLLOW_CONTEXT_CHAIN | FOLLOW_PROTOTYPE_CHAIN
-};
-
-
-// Heap-allocated activation contexts.
-//
-// Contexts are implemented as FixedArray objects; the Context
-// class is a convenience interface casted on a FixedArray object.
-//
-// Note: Context must have no virtual functions and Context objects
-// must always be allocated via Heap::AllocateContext() or
-// Factory::NewContext.
-
-#define GLOBAL_CONTEXT_FIELDS(V) \
- V(GLOBAL_PROXY_INDEX, JSObject, global_proxy_object) \
- V(SECURITY_TOKEN_INDEX, Object, security_token) \
- V(BOOLEAN_FUNCTION_INDEX, JSFunction, boolean_function) \
- V(NUMBER_FUNCTION_INDEX, JSFunction, number_function) \
- V(STRING_FUNCTION_INDEX, JSFunction, string_function) \
- V(STRING_FUNCTION_PROTOTYPE_MAP_INDEX, Map, string_function_prototype_map) \
- V(OBJECT_FUNCTION_INDEX, JSFunction, object_function) \
- V(ARRAY_FUNCTION_INDEX, JSFunction, array_function) \
- V(DATE_FUNCTION_INDEX, JSFunction, date_function) \
- V(JSON_OBJECT_INDEX, JSObject, json_object) \
- V(REGEXP_FUNCTION_INDEX, JSFunction, regexp_function) \
- V(INITIAL_OBJECT_PROTOTYPE_INDEX, JSObject, initial_object_prototype) \
- V(CREATE_DATE_FUN_INDEX, JSFunction, create_date_fun) \
- V(TO_NUMBER_FUN_INDEX, JSFunction, to_number_fun) \
- V(TO_STRING_FUN_INDEX, JSFunction, to_string_fun) \
- V(TO_DETAIL_STRING_FUN_INDEX, JSFunction, to_detail_string_fun) \
- V(TO_OBJECT_FUN_INDEX, JSFunction, to_object_fun) \
- V(TO_INTEGER_FUN_INDEX, JSFunction, to_integer_fun) \
- V(TO_UINT32_FUN_INDEX, JSFunction, to_uint32_fun) \
- V(TO_INT32_FUN_INDEX, JSFunction, to_int32_fun) \
- V(GLOBAL_EVAL_FUN_INDEX, JSFunction, global_eval_fun) \
- V(INSTANTIATE_FUN_INDEX, JSFunction, instantiate_fun) \
- V(CONFIGURE_INSTANCE_FUN_INDEX, JSFunction, configure_instance_fun) \
- V(FUNCTION_MAP_INDEX, Map, function_map) \
- V(STRICT_MODE_FUNCTION_MAP_INDEX, Map, strict_mode_function_map) \
- V(FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX, Map, function_without_prototype_map) \
- V(STRICT_MODE_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX, Map, \
- strict_mode_function_without_prototype_map) \
- V(FUNCTION_INSTANCE_MAP_INDEX, Map, function_instance_map) \
- V(STRICT_MODE_FUNCTION_INSTANCE_MAP_INDEX, Map, \
- strict_mode_function_instance_map) \
- V(JS_ARRAY_MAP_INDEX, Map, js_array_map)\
- V(REGEXP_RESULT_MAP_INDEX, Map, regexp_result_map)\
- V(ARGUMENTS_BOILERPLATE_INDEX, JSObject, arguments_boilerplate) \
- V(STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX, JSObject, \
- strict_mode_arguments_boilerplate) \
- V(MESSAGE_LISTENERS_INDEX, JSObject, message_listeners) \
- V(MAKE_MESSAGE_FUN_INDEX, JSFunction, make_message_fun) \
- V(GET_STACK_TRACE_LINE_INDEX, JSFunction, get_stack_trace_line_fun) \
- V(CONFIGURE_GLOBAL_INDEX, JSFunction, configure_global_fun) \
- V(FUNCTION_CACHE_INDEX, JSObject, function_cache) \
- V(JSFUNCTION_RESULT_CACHES_INDEX, FixedArray, jsfunction_result_caches) \
- V(NORMALIZED_MAP_CACHE_INDEX, NormalizedMapCache, normalized_map_cache) \
- V(RUNTIME_CONTEXT_INDEX, Context, runtime_context) \
- V(CALL_AS_FUNCTION_DELEGATE_INDEX, JSFunction, call_as_function_delegate) \
- V(CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, JSFunction, \
- call_as_constructor_delegate) \
- V(SCRIPT_FUNCTION_INDEX, JSFunction, script_function) \
- V(OPAQUE_REFERENCE_FUNCTION_INDEX, JSFunction, opaque_reference_function) \
- V(CONTEXT_EXTENSION_FUNCTION_INDEX, JSFunction, context_extension_function) \
- V(OUT_OF_MEMORY_INDEX, Object, out_of_memory) \
- V(MAP_CACHE_INDEX, Object, map_cache) \
- V(CONTEXT_DATA_INDEX, Object, data)
-
-// JSFunctions are pairs (context, function code), sometimes also called
-// closures. A Context object is used to represent function contexts and
-// dynamically pushed 'with' contexts (or 'scopes' in ECMA-262 speak).
-//
-// At runtime, the contexts build a stack in parallel to the execution
-// stack, with the top-most context being the current context. All contexts
-// have the following slots:
-//
-// [ closure ] This is the current function. It is the same for all
-// contexts inside a function. It provides access to the
-// incoming context (i.e., the outer context, which may
-// or may not become the current function's context), and
-// it provides access to the functions code and thus it's
-// scope information, which in turn contains the names of
-// statically allocated context slots. The names are needed
-// for dynamic lookups in the presence of 'with' or 'eval'.
-//
-// [ fcontext ] A pointer to the innermost enclosing function context.
-// It is the same for all contexts *allocated* inside a
-// function, and the function context's fcontext points
-// to itself. It is only needed for fast access of the
-// function context (used for declarations, and static
-// context slot access).
-//
-// [ previous ] A pointer to the previous context. It is NULL for
-// function contexts, and non-NULL for 'with' contexts.
-// Used to implement the 'with' statement.
-//
-// [ extension ] A pointer to an extension JSObject, or NULL. Used to
-// implement 'with' statements and dynamic declarations
-// (through 'eval'). The object in a 'with' statement is
-// stored in the extension slot of a 'with' context.
-// Dynamically declared variables/functions are also added
-// to lazily allocated extension object. Context::Lookup
-// searches the extension object for properties.
-//
-// [ global ] A pointer to the global object. Provided for quick
-// access to the global object from inside the code (since
-// we always have a context pointer).
-//
-// In addition, function contexts may have statically allocated context slots
-// to store local variables/functions that are accessed from inner functions
-// (via static context addresses) or through 'eval' (dynamic context lookups).
-// Finally, the global context contains additional slots for fast access to
-// global properties.
-//
-// We may be able to simplify the implementation:
-//
-// - We may be able to get rid of 'fcontext': We can always use the fact that
-// previous == NULL for function contexts and so we can search for them. They
-// are only needed when doing dynamic declarations, and the context chains
-// tend to be very very short (depth of nesting of 'with' statements). At
-// the moment we also use it in generated code for context slot accesses -
-// and there we don't want a loop because of code bloat - but we may not
-// need it there after all (see comment in codegen_*.cc).
-//
-// - If we cannot get rid of fcontext, consider making 'previous' never NULL
-// except for the global context. This could simplify Context::Lookup.
-
-class Context: public FixedArray {
- public:
- // Conversions.
- static Context* cast(Object* context) {
- ASSERT(context->IsContext());
- return reinterpret_cast<Context*>(context);
- }
-
- // The default context slot layout; indices are FixedArray slot indices.
- enum {
- // These slots are in all contexts.
- CLOSURE_INDEX,
- FCONTEXT_INDEX,
- PREVIOUS_INDEX,
- EXTENSION_INDEX,
- GLOBAL_INDEX,
- MIN_CONTEXT_SLOTS,
-
- // These slots are only in global contexts.
- GLOBAL_PROXY_INDEX = MIN_CONTEXT_SLOTS,
- SECURITY_TOKEN_INDEX,
- ARGUMENTS_BOILERPLATE_INDEX,
- STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX,
- JS_ARRAY_MAP_INDEX,
- REGEXP_RESULT_MAP_INDEX,
- FUNCTION_MAP_INDEX,
- STRICT_MODE_FUNCTION_MAP_INDEX,
- FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX,
- STRICT_MODE_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX,
- FUNCTION_INSTANCE_MAP_INDEX,
- STRICT_MODE_FUNCTION_INSTANCE_MAP_INDEX,
- INITIAL_OBJECT_PROTOTYPE_INDEX,
- BOOLEAN_FUNCTION_INDEX,
- NUMBER_FUNCTION_INDEX,
- STRING_FUNCTION_INDEX,
- STRING_FUNCTION_PROTOTYPE_MAP_INDEX,
- OBJECT_FUNCTION_INDEX,
- ARRAY_FUNCTION_INDEX,
- DATE_FUNCTION_INDEX,
- JSON_OBJECT_INDEX,
- REGEXP_FUNCTION_INDEX,
- CREATE_DATE_FUN_INDEX,
- TO_NUMBER_FUN_INDEX,
- TO_STRING_FUN_INDEX,
- TO_DETAIL_STRING_FUN_INDEX,
- TO_OBJECT_FUN_INDEX,
- TO_INTEGER_FUN_INDEX,
- TO_UINT32_FUN_INDEX,
- TO_INT32_FUN_INDEX,
- TO_BOOLEAN_FUN_INDEX,
- GLOBAL_EVAL_FUN_INDEX,
- INSTANTIATE_FUN_INDEX,
- CONFIGURE_INSTANCE_FUN_INDEX,
- MESSAGE_LISTENERS_INDEX,
- MAKE_MESSAGE_FUN_INDEX,
- GET_STACK_TRACE_LINE_INDEX,
- CONFIGURE_GLOBAL_INDEX,
- FUNCTION_CACHE_INDEX,
- JSFUNCTION_RESULT_CACHES_INDEX,
- NORMALIZED_MAP_CACHE_INDEX,
- RUNTIME_CONTEXT_INDEX,
- CALL_AS_FUNCTION_DELEGATE_INDEX,
- CALL_AS_CONSTRUCTOR_DELEGATE_INDEX,
- SCRIPT_FUNCTION_INDEX,
- OPAQUE_REFERENCE_FUNCTION_INDEX,
- CONTEXT_EXTENSION_FUNCTION_INDEX,
- OUT_OF_MEMORY_INDEX,
- MAP_CACHE_INDEX,
- CONTEXT_DATA_INDEX,
-
- // Properties from here are treated as weak references by the full GC.
- // Scavenge treats them as strong references.
- OPTIMIZED_FUNCTIONS_LIST, // Weak.
- NEXT_CONTEXT_LINK, // Weak.
-
- // Total number of slots.
- GLOBAL_CONTEXT_SLOTS,
-
- FIRST_WEAK_SLOT = OPTIMIZED_FUNCTIONS_LIST
- };
-
- // Direct slot access.
- JSFunction* closure() { return JSFunction::cast(get(CLOSURE_INDEX)); }
- void set_closure(JSFunction* closure) { set(CLOSURE_INDEX, closure); }
-
- Context* fcontext() { return Context::cast(get(FCONTEXT_INDEX)); }
- void set_fcontext(Context* context) { set(FCONTEXT_INDEX, context); }
-
- Context* previous() {
- Object* result = unchecked_previous();
- ASSERT(IsBootstrappingOrContext(result));
- return reinterpret_cast<Context*>(result);
- }
- void set_previous(Context* context) { set(PREVIOUS_INDEX, context); }
-
- bool has_extension() { return unchecked_extension() != NULL; }
- JSObject* extension() { return JSObject::cast(unchecked_extension()); }
- void set_extension(JSObject* object) { set(EXTENSION_INDEX, object); }
-
- GlobalObject* global() {
- Object* result = get(GLOBAL_INDEX);
- ASSERT(IsBootstrappingOrGlobalObject(result));
- return reinterpret_cast<GlobalObject*>(result);
- }
- void set_global(GlobalObject* global) { set(GLOBAL_INDEX, global); }
-
- // Returns a JSGlobalProxy object or null.
- JSObject* global_proxy();
- void set_global_proxy(JSObject* global);
-
- // The builtins object.
- JSBuiltinsObject* builtins();
-
- // Compute the global context by traversing the context chain.
- Context* global_context();
-
- // Tells if this is a function context (as opposed to a 'with' context).
- bool is_function_context() { return unchecked_previous() == NULL; }
-
- // Tells whether the global context is marked with out of memory.
- inline bool has_out_of_memory();
-
- // Mark the global context with out of memory.
- inline void mark_out_of_memory();
-
- // The exception holder is the object used as a with object in
- // the implementation of a catch block.
- bool is_exception_holder(Object* object) {
- return IsCatchContext() && extension() == object;
- }
-
- // A global context hold a list of all functions which have been optimized.
- void AddOptimizedFunction(JSFunction* function);
- void RemoveOptimizedFunction(JSFunction* function);
- Object* OptimizedFunctionsListHead();
- void ClearOptimizedFunctions();
-
-#define GLOBAL_CONTEXT_FIELD_ACCESSORS(index, type, name) \
- void set_##name(type* value) { \
- ASSERT(IsGlobalContext()); \
- set(index, value); \
- } \
- type* name() { \
- ASSERT(IsGlobalContext()); \
- return type::cast(get(index)); \
- }
- GLOBAL_CONTEXT_FIELDS(GLOBAL_CONTEXT_FIELD_ACCESSORS)
-#undef GLOBAL_CONTEXT_FIELD_ACCESSORS
-
- // Lookup the the slot called name, starting with the current context.
- // There are 4 possible outcomes:
- //
- // 1) index_ >= 0 && result->IsContext():
- // most common case, the result is a Context, and index is the
- // context slot index, and the slot exists.
- // attributes == READ_ONLY for the function name variable, NONE otherwise.
- //
- // 2) index_ >= 0 && result->IsJSObject():
- // the result is the JSObject arguments object, the index is the parameter
- // index, i.e., key into the arguments object, and the property exists.
- // attributes != ABSENT.
- //
- // 3) index_ < 0 && result->IsJSObject():
- // the result is the JSObject extension context or the global object,
- // and the name is the property name, and the property exists.
- // attributes != ABSENT.
- //
- // 4) index_ < 0 && result.is_null():
- // there was no context found with the corresponding property.
- // attributes == ABSENT.
- Handle<Object> Lookup(Handle<String> name, ContextLookupFlags flags,
- int* index_, PropertyAttributes* attributes);
-
- // Determine if a local variable with the given name exists in a
- // context. Do not consider context extension objects. This is
- // used for compiling code using eval. If the context surrounding
- // the eval call does not have a local variable with this name and
- // does not contain a with statement the property is global unless
- // it is shadowed by a property in an extension object introduced by
- // eval.
- bool GlobalIfNotShadowedByEval(Handle<String> name);
-
- // Code generation support.
- static int SlotOffset(int index) {
- return kHeaderSize + index * kPointerSize - kHeapObjectTag;
- }
-
- static const int kSize = kHeaderSize + GLOBAL_CONTEXT_SLOTS * kPointerSize;
-
- // GC support.
- typedef FixedBodyDescriptor<
- kHeaderSize, kSize, kSize> ScavengeBodyDescriptor;
-
- typedef FixedBodyDescriptor<
- kHeaderSize,
- kHeaderSize + FIRST_WEAK_SLOT * kPointerSize,
- kSize> MarkCompactBodyDescriptor;
-
- private:
- // Unchecked access to the slots.
- Object* unchecked_previous() { return get(PREVIOUS_INDEX); }
- Object* unchecked_extension() { return get(EXTENSION_INDEX); }
-
-#ifdef DEBUG
- // Bootstrapping-aware type checks.
- static bool IsBootstrappingOrContext(Object* object);
- static bool IsBootstrappingOrGlobalObject(Object* object);
-#endif
-};
-
-} } // namespace v8::internal
-
-#endif // V8_CONTEXTS_H_
diff --git a/src/3rdparty/v8/src/conversions-inl.h b/src/3rdparty/v8/src/conversions-inl.h
deleted file mode 100644
index bf02947..0000000
--- a/src/3rdparty/v8/src/conversions-inl.h
+++ /dev/null
@@ -1,110 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_CONVERSIONS_INL_H_
-#define V8_CONVERSIONS_INL_H_
-
-#include <math.h>
-#include <float.h> // required for DBL_MAX and on Win32 for finite()
-#include <stdarg.h>
-
-// ----------------------------------------------------------------------------
-// Extra POSIX/ANSI functions for Win32/MSVC.
-
-#include "conversions.h"
-#include "platform.h"
-
-namespace v8 {
-namespace internal {
-
-// The fast double-to-unsigned-int conversion routine does not guarantee
-// rounding towards zero, or any reasonable value if the argument is larger
-// than what fits in an unsigned 32-bit integer.
-static inline unsigned int FastD2UI(double x) {
- // There is no unsigned version of lrint, so there is no fast path
- // in this function as there is in FastD2I. Using lrint doesn't work
- // for values of 2^31 and above.
-
- // Convert "small enough" doubles to uint32_t by fixing the 32
- // least significant non-fractional bits in the low 32 bits of the
- // double, and reading them from there.
- const double k2Pow52 = 4503599627370496.0;
- bool negative = x < 0;
- if (negative) {
- x = -x;
- }
- if (x < k2Pow52) {
- x += k2Pow52;
- uint32_t result;
-#ifdef BIG_ENDIAN_FLOATING_POINT
- Address mantissa_ptr = reinterpret_cast<Address>(&x) + kIntSize;
-#else
- Address mantissa_ptr = reinterpret_cast<Address>(&x);
-#endif
- // Copy least significant 32 bits of mantissa.
- memcpy(&result, mantissa_ptr, sizeof(result));
- return negative ? ~result + 1 : result;
- }
- // Large number (outside uint32 range), Infinity or NaN.
- return 0x80000000u; // Return integer indefinite.
-}
-
-
-static inline double DoubleToInteger(double x) {
- if (isnan(x)) return 0;
- if (!isfinite(x) || x == 0) return x;
- return (x >= 0) ? floor(x) : ceil(x);
-}
-
-
-int32_t NumberToInt32(Object* number) {
- if (number->IsSmi()) return Smi::cast(number)->value();
- return DoubleToInt32(number->Number());
-}
-
-
-uint32_t NumberToUint32(Object* number) {
- if (number->IsSmi()) return Smi::cast(number)->value();
- return DoubleToUint32(number->Number());
-}
-
-
-int32_t DoubleToInt32(double x) {
- int32_t i = FastD2I(x);
- if (FastI2D(i) == x) return i;
- static const double two32 = 4294967296.0;
- static const double two31 = 2147483648.0;
- if (!isfinite(x) || x == 0) return 0;
- if (x < 0 || x >= two32) x = modulo(x, two32);
- x = (x >= 0) ? floor(x) : ceil(x) + two32;
- return (int32_t) ((x >= two31) ? x - two32 : x);
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_CONVERSIONS_INL_H_
diff --git a/src/3rdparty/v8/src/conversions.cc b/src/3rdparty/v8/src/conversions.cc
deleted file mode 100644
index c3d7bdf..0000000
--- a/src/3rdparty/v8/src/conversions.cc
+++ /dev/null
@@ -1,1125 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include <stdarg.h>
-#include <limits.h>
-
-#include "v8.h"
-
-#include "conversions-inl.h"
-#include "dtoa.h"
-#include "factory.h"
-#include "scanner-base.h"
-#include "strtod.h"
-
-namespace v8 {
-namespace internal {
-
-namespace {
-
-// C++-style iterator adaptor for StringInputBuffer
-// (unlike C++ iterators the end-marker has different type).
-class StringInputBufferIterator {
- public:
- class EndMarker {};
-
- explicit StringInputBufferIterator(StringInputBuffer* buffer);
-
- int operator*() const;
- void operator++();
- bool operator==(EndMarker const&) const { return end_; }
- bool operator!=(EndMarker const& m) const { return !end_; }
-
- private:
- StringInputBuffer* const buffer_;
- int current_;
- bool end_;
-};
-
-
-StringInputBufferIterator::StringInputBufferIterator(
- StringInputBuffer* buffer) : buffer_(buffer) {
- ++(*this);
-}
-
-int StringInputBufferIterator::operator*() const {
- return current_;
-}
-
-
-void StringInputBufferIterator::operator++() {
- end_ = !buffer_->has_more();
- if (!end_) {
- current_ = buffer_->GetNext();
- }
-}
-}
-
-
-template <class Iterator, class EndMark>
-static bool SubStringEquals(Iterator* current,
- EndMark end,
- const char* substring) {
- ASSERT(**current == *substring);
- for (substring++; *substring != '\0'; substring++) {
- ++*current;
- if (*current == end || **current != *substring) return false;
- }
- ++*current;
- return true;
-}
-
-
-// Maximum number of significant digits in decimal representation.
-// The longest possible double in decimal representation is
-// (2^53 - 1) * 2 ^ -1074 that is (2 ^ 53 - 1) * 5 ^ 1074 / 10 ^ 1074
-// (768 digits). If we parse a number whose first digits are equal to a
-// mean of 2 adjacent doubles (that could have up to 769 digits) the result
-// must be rounded to the bigger one unless the tail consists of zeros, so
-// we don't need to preserve all the digits.
-const int kMaxSignificantDigits = 772;
-
-
-static const double JUNK_STRING_VALUE = OS::nan_value();
-
-
-// Returns true if a nonspace found and false if the end has reached.
-template <class Iterator, class EndMark>
-static inline bool AdvanceToNonspace(ScannerConstants* scanner_constants,
- Iterator* current,
- EndMark end) {
- while (*current != end) {
- if (!scanner_constants->IsWhiteSpace(**current)) return true;
- ++*current;
- }
- return false;
-}
-
-
-static bool isDigit(int x, int radix) {
- return (x >= '0' && x <= '9' && x < '0' + radix)
- || (radix > 10 && x >= 'a' && x < 'a' + radix - 10)
- || (radix > 10 && x >= 'A' && x < 'A' + radix - 10);
-}
-
-
-static double SignedZero(bool negative) {
- return negative ? -0.0 : 0.0;
-}
-
-
-// Parsing integers with radix 2, 4, 8, 16, 32. Assumes current != end.
-template <int radix_log_2, class Iterator, class EndMark>
-static double InternalStringToIntDouble(ScannerConstants* scanner_constants,
- Iterator current,
- EndMark end,
- bool negative,
- bool allow_trailing_junk) {
- ASSERT(current != end);
-
- // Skip leading 0s.
- while (*current == '0') {
- ++current;
- if (current == end) return SignedZero(negative);
- }
-
- int64_t number = 0;
- int exponent = 0;
- const int radix = (1 << radix_log_2);
-
- do {
- int digit;
- if (*current >= '0' && *current <= '9' && *current < '0' + radix) {
- digit = static_cast<char>(*current) - '0';
- } else if (radix > 10 && *current >= 'a' && *current < 'a' + radix - 10) {
- digit = static_cast<char>(*current) - 'a' + 10;
- } else if (radix > 10 && *current >= 'A' && *current < 'A' + radix - 10) {
- digit = static_cast<char>(*current) - 'A' + 10;
- } else {
- if (allow_trailing_junk ||
- !AdvanceToNonspace(scanner_constants, &current, end)) {
- break;
- } else {
- return JUNK_STRING_VALUE;
- }
- }
-
- number = number * radix + digit;
- int overflow = static_cast<int>(number >> 53);
- if (overflow != 0) {
- // Overflow occurred. Need to determine which direction to round the
- // result.
- int overflow_bits_count = 1;
- while (overflow > 1) {
- overflow_bits_count++;
- overflow >>= 1;
- }
-
- int dropped_bits_mask = ((1 << overflow_bits_count) - 1);
- int dropped_bits = static_cast<int>(number) & dropped_bits_mask;
- number >>= overflow_bits_count;
- exponent = overflow_bits_count;
-
- bool zero_tail = true;
- while (true) {
- ++current;
- if (current == end || !isDigit(*current, radix)) break;
- zero_tail = zero_tail && *current == '0';
- exponent += radix_log_2;
- }
-
- if (!allow_trailing_junk &&
- AdvanceToNonspace(scanner_constants, &current, end)) {
- return JUNK_STRING_VALUE;
- }
-
- int middle_value = (1 << (overflow_bits_count - 1));
- if (dropped_bits > middle_value) {
- number++; // Rounding up.
- } else if (dropped_bits == middle_value) {
- // Rounding to even to consistency with decimals: half-way case rounds
- // up if significant part is odd and down otherwise.
- if ((number & 1) != 0 || !zero_tail) {
- number++; // Rounding up.
- }
- }
-
- // Rounding up may cause overflow.
- if ((number & ((int64_t)1 << 53)) != 0) {
- exponent++;
- number >>= 1;
- }
- break;
- }
- ++current;
- } while (current != end);
-
- ASSERT(number < ((int64_t)1 << 53));
- ASSERT(static_cast<int64_t>(static_cast<double>(number)) == number);
-
- if (exponent == 0) {
- if (negative) {
- if (number == 0) return -0.0;
- number = -number;
- }
- return static_cast<double>(number);
- }
-
- ASSERT(number != 0);
- // The double could be constructed faster from number (mantissa), exponent
- // and sign. Assuming it's a rare case more simple code is used.
- return static_cast<double>(negative ? -number : number) * pow(2.0, exponent);
-}
-
-
-template <class Iterator, class EndMark>
-static double InternalStringToInt(ScannerConstants* scanner_constants,
- Iterator current,
- EndMark end,
- int radix) {
- const bool allow_trailing_junk = true;
- const double empty_string_val = JUNK_STRING_VALUE;
-
- if (!AdvanceToNonspace(scanner_constants, &current, end)) {
- return empty_string_val;
- }
-
- bool negative = false;
- bool leading_zero = false;
-
- if (*current == '+') {
- // Ignore leading sign; skip following spaces.
- ++current;
- if (!AdvanceToNonspace(scanner_constants, &current, end)) {
- return JUNK_STRING_VALUE;
- }
- } else if (*current == '-') {
- ++current;
- if (!AdvanceToNonspace(scanner_constants, &current, end)) {
- return JUNK_STRING_VALUE;
- }
- negative = true;
- }
-
- if (radix == 0) {
- // Radix detection.
- if (*current == '0') {
- ++current;
- if (current == end) return SignedZero(negative);
- if (*current == 'x' || *current == 'X') {
- radix = 16;
- ++current;
- if (current == end) return JUNK_STRING_VALUE;
- } else {
- radix = 8;
- leading_zero = true;
- }
- } else {
- radix = 10;
- }
- } else if (radix == 16) {
- if (*current == '0') {
- // Allow "0x" prefix.
- ++current;
- if (current == end) return SignedZero(negative);
- if (*current == 'x' || *current == 'X') {
- ++current;
- if (current == end) return JUNK_STRING_VALUE;
- } else {
- leading_zero = true;
- }
- }
- }
-
- if (radix < 2 || radix > 36) return JUNK_STRING_VALUE;
-
- // Skip leading zeros.
- while (*current == '0') {
- leading_zero = true;
- ++current;
- if (current == end) return SignedZero(negative);
- }
-
- if (!leading_zero && !isDigit(*current, radix)) {
- return JUNK_STRING_VALUE;
- }
-
- if (IsPowerOf2(radix)) {
- switch (radix) {
- case 2:
- return InternalStringToIntDouble<1>(
- scanner_constants, current, end, negative, allow_trailing_junk);
- case 4:
- return InternalStringToIntDouble<2>(
- scanner_constants, current, end, negative, allow_trailing_junk);
- case 8:
- return InternalStringToIntDouble<3>(
- scanner_constants, current, end, negative, allow_trailing_junk);
-
- case 16:
- return InternalStringToIntDouble<4>(
- scanner_constants, current, end, negative, allow_trailing_junk);
-
- case 32:
- return InternalStringToIntDouble<5>(
- scanner_constants, current, end, negative, allow_trailing_junk);
- default:
- UNREACHABLE();
- }
- }
-
- if (radix == 10) {
- // Parsing with strtod.
- const int kMaxSignificantDigits = 309; // Doubles are less than 1.8e308.
- // The buffer may contain up to kMaxSignificantDigits + 1 digits and a zero
- // end.
- const int kBufferSize = kMaxSignificantDigits + 2;
- char buffer[kBufferSize];
- int buffer_pos = 0;
- while (*current >= '0' && *current <= '9') {
- if (buffer_pos <= kMaxSignificantDigits) {
- // If the number has more than kMaxSignificantDigits it will be parsed
- // as infinity.
- ASSERT(buffer_pos < kBufferSize);
- buffer[buffer_pos++] = static_cast<char>(*current);
- }
- ++current;
- if (current == end) break;
- }
-
- if (!allow_trailing_junk &&
- AdvanceToNonspace(scanner_constants, &current, end)) {
- return JUNK_STRING_VALUE;
- }
-
- ASSERT(buffer_pos < kBufferSize);
- buffer[buffer_pos] = '\0';
- Vector<const char> buffer_vector(buffer, buffer_pos);
- return negative ? -Strtod(buffer_vector, 0) : Strtod(buffer_vector, 0);
- }
-
- // The following code causes accumulating rounding error for numbers greater
- // than ~2^56. It's explicitly allowed in the spec: "if R is not 2, 4, 8, 10,
- // 16, or 32, then mathInt may be an implementation-dependent approximation to
- // the mathematical integer value" (15.1.2.2).
-
- int lim_0 = '0' + (radix < 10 ? radix : 10);
- int lim_a = 'a' + (radix - 10);
- int lim_A = 'A' + (radix - 10);
-
- // NOTE: The code for computing the value may seem a bit complex at
- // first glance. It is structured to use 32-bit multiply-and-add
- // loops as long as possible to avoid loosing precision.
-
- double v = 0.0;
- bool done = false;
- do {
- // Parse the longest part of the string starting at index j
- // possible while keeping the multiplier, and thus the part
- // itself, within 32 bits.
- unsigned int part = 0, multiplier = 1;
- while (true) {
- int d;
- if (*current >= '0' && *current < lim_0) {
- d = *current - '0';
- } else if (*current >= 'a' && *current < lim_a) {
- d = *current - 'a' + 10;
- } else if (*current >= 'A' && *current < lim_A) {
- d = *current - 'A' + 10;
- } else {
- done = true;
- break;
- }
-
- // Update the value of the part as long as the multiplier fits
- // in 32 bits. When we can't guarantee that the next iteration
- // will not overflow the multiplier, we stop parsing the part
- // by leaving the loop.
- const unsigned int kMaximumMultiplier = 0xffffffffU / 36;
- uint32_t m = multiplier * radix;
- if (m > kMaximumMultiplier) break;
- part = part * radix + d;
- multiplier = m;
- ASSERT(multiplier > part);
-
- ++current;
- if (current == end) {
- done = true;
- break;
- }
- }
-
- // Update the value and skip the part in the string.
- v = v * multiplier + part;
- } while (!done);
-
- if (!allow_trailing_junk &&
- AdvanceToNonspace(scanner_constants, &current, end)) {
- return JUNK_STRING_VALUE;
- }
-
- return negative ? -v : v;
-}
-
-
-// Converts a string to a double value. Assumes the Iterator supports
-// the following operations:
-// 1. current == end (other ops are not allowed), current != end.
-// 2. *current - gets the current character in the sequence.
-// 3. ++current (advances the position).
-template <class Iterator, class EndMark>
-static double InternalStringToDouble(ScannerConstants* scanner_constants,
- Iterator current,
- EndMark end,
- int flags,
- double empty_string_val) {
- // To make sure that iterator dereferencing is valid the following
- // convention is used:
- // 1. Each '++current' statement is followed by check for equality to 'end'.
- // 2. If AdvanceToNonspace returned false then current == end.
- // 3. If 'current' becomes be equal to 'end' the function returns or goes to
- // 'parsing_done'.
- // 4. 'current' is not dereferenced after the 'parsing_done' label.
- // 5. Code before 'parsing_done' may rely on 'current != end'.
- if (!AdvanceToNonspace(scanner_constants, &current, end)) {
- return empty_string_val;
- }
-
- const bool allow_trailing_junk = (flags & ALLOW_TRAILING_JUNK) != 0;
-
- // The longest form of simplified number is: "-<significant digits>'.1eXXX\0".
- const int kBufferSize = kMaxSignificantDigits + 10;
- char buffer[kBufferSize]; // NOLINT: size is known at compile time.
- int buffer_pos = 0;
-
- // Exponent will be adjusted if insignificant digits of the integer part
- // or insignificant leading zeros of the fractional part are dropped.
- int exponent = 0;
- int significant_digits = 0;
- int insignificant_digits = 0;
- bool nonzero_digit_dropped = false;
- bool fractional_part = false;
-
- bool negative = false;
-
- if (*current == '+') {
- // Ignore leading sign.
- ++current;
- if (current == end) return JUNK_STRING_VALUE;
- } else if (*current == '-') {
- ++current;
- if (current == end) return JUNK_STRING_VALUE;
- negative = true;
- }
-
- static const char kInfinitySymbol[] = "Infinity";
- if (*current == kInfinitySymbol[0]) {
- if (!SubStringEquals(&current, end, kInfinitySymbol)) {
- return JUNK_STRING_VALUE;
- }
-
- if (!allow_trailing_junk &&
- AdvanceToNonspace(scanner_constants, &current, end)) {
- return JUNK_STRING_VALUE;
- }
-
- ASSERT(buffer_pos == 0);
- return negative ? -V8_INFINITY : V8_INFINITY;
- }
-
- bool leading_zero = false;
- if (*current == '0') {
- ++current;
- if (current == end) return SignedZero(negative);
-
- leading_zero = true;
-
- // It could be hexadecimal value.
- if ((flags & ALLOW_HEX) && (*current == 'x' || *current == 'X')) {
- ++current;
- if (current == end || !isDigit(*current, 16)) {
- return JUNK_STRING_VALUE; // "0x".
- }
-
- return InternalStringToIntDouble<4>(scanner_constants,
- current,
- end,
- negative,
- allow_trailing_junk);
- }
-
- // Ignore leading zeros in the integer part.
- while (*current == '0') {
- ++current;
- if (current == end) return SignedZero(negative);
- }
- }
-
- bool octal = leading_zero && (flags & ALLOW_OCTALS) != 0;
-
- // Copy significant digits of the integer part (if any) to the buffer.
- while (*current >= '0' && *current <= '9') {
- if (significant_digits < kMaxSignificantDigits) {
- ASSERT(buffer_pos < kBufferSize);
- buffer[buffer_pos++] = static_cast<char>(*current);
- significant_digits++;
- // Will later check if it's an octal in the buffer.
- } else {
- insignificant_digits++; // Move the digit into the exponential part.
- nonzero_digit_dropped = nonzero_digit_dropped || *current != '0';
- }
- octal = octal && *current < '8';
- ++current;
- if (current == end) goto parsing_done;
- }
-
- if (significant_digits == 0) {
- octal = false;
- }
-
- if (*current == '.') {
- if (octal && !allow_trailing_junk) return JUNK_STRING_VALUE;
- if (octal) goto parsing_done;
-
- ++current;
- if (current == end) {
- if (significant_digits == 0 && !leading_zero) {
- return JUNK_STRING_VALUE;
- } else {
- goto parsing_done;
- }
- }
-
- if (significant_digits == 0) {
- // octal = false;
- // Integer part consists of 0 or is absent. Significant digits start after
- // leading zeros (if any).
- while (*current == '0') {
- ++current;
- if (current == end) return SignedZero(negative);
- exponent--; // Move this 0 into the exponent.
- }
- }
-
- // We don't emit a '.', but adjust the exponent instead.
- fractional_part = true;
-
- // There is a fractional part.
- while (*current >= '0' && *current <= '9') {
- if (significant_digits < kMaxSignificantDigits) {
- ASSERT(buffer_pos < kBufferSize);
- buffer[buffer_pos++] = static_cast<char>(*current);
- significant_digits++;
- exponent--;
- } else {
- // Ignore insignificant digits in the fractional part.
- nonzero_digit_dropped = nonzero_digit_dropped || *current != '0';
- }
- ++current;
- if (current == end) goto parsing_done;
- }
- }
-
- if (!leading_zero && exponent == 0 && significant_digits == 0) {
- // If leading_zeros is true then the string contains zeros.
- // If exponent < 0 then string was [+-]\.0*...
- // If significant_digits != 0 the string is not equal to 0.
- // Otherwise there are no digits in the string.
- return JUNK_STRING_VALUE;
- }
-
- // Parse exponential part.
- if (*current == 'e' || *current == 'E') {
- if (octal) return JUNK_STRING_VALUE;
- ++current;
- if (current == end) {
- if (allow_trailing_junk) {
- goto parsing_done;
- } else {
- return JUNK_STRING_VALUE;
- }
- }
- char sign = '+';
- if (*current == '+' || *current == '-') {
- sign = static_cast<char>(*current);
- ++current;
- if (current == end) {
- if (allow_trailing_junk) {
- goto parsing_done;
- } else {
- return JUNK_STRING_VALUE;
- }
- }
- }
-
- if (current == end || *current < '0' || *current > '9') {
- if (allow_trailing_junk) {
- goto parsing_done;
- } else {
- return JUNK_STRING_VALUE;
- }
- }
-
- const int max_exponent = INT_MAX / 2;
- ASSERT(-max_exponent / 2 <= exponent && exponent <= max_exponent / 2);
- int num = 0;
- do {
- // Check overflow.
- int digit = *current - '0';
- if (num >= max_exponent / 10
- && !(num == max_exponent / 10 && digit <= max_exponent % 10)) {
- num = max_exponent;
- } else {
- num = num * 10 + digit;
- }
- ++current;
- } while (current != end && *current >= '0' && *current <= '9');
-
- exponent += (sign == '-' ? -num : num);
- }
-
- if (!allow_trailing_junk &&
- AdvanceToNonspace(scanner_constants, &current, end)) {
- return JUNK_STRING_VALUE;
- }
-
- parsing_done:
- exponent += insignificant_digits;
-
- if (octal) {
- return InternalStringToIntDouble<3>(scanner_constants,
- buffer,
- buffer + buffer_pos,
- negative,
- allow_trailing_junk);
- }
-
- if (nonzero_digit_dropped) {
- buffer[buffer_pos++] = '1';
- exponent--;
- }
-
- ASSERT(buffer_pos < kBufferSize);
- buffer[buffer_pos] = '\0';
-
- double converted = Strtod(Vector<const char>(buffer, buffer_pos), exponent);
- return negative ? -converted : converted;
-}
-
-
-double StringToDouble(String* str, int flags, double empty_string_val) {
- ScannerConstants* scanner_constants =
- Isolate::Current()->scanner_constants();
- StringShape shape(str);
- if (shape.IsSequentialAscii()) {
- const char* begin = SeqAsciiString::cast(str)->GetChars();
- const char* end = begin + str->length();
- return InternalStringToDouble(scanner_constants, begin, end, flags,
- empty_string_val);
- } else if (shape.IsSequentialTwoByte()) {
- const uc16* begin = SeqTwoByteString::cast(str)->GetChars();
- const uc16* end = begin + str->length();
- return InternalStringToDouble(scanner_constants, begin, end, flags,
- empty_string_val);
- } else {
- StringInputBuffer buffer(str);
- return InternalStringToDouble(scanner_constants,
- StringInputBufferIterator(&buffer),
- StringInputBufferIterator::EndMarker(),
- flags,
- empty_string_val);
- }
-}
-
-
-double StringToInt(String* str, int radix) {
- ScannerConstants* scanner_constants =
- Isolate::Current()->scanner_constants();
- StringShape shape(str);
- if (shape.IsSequentialAscii()) {
- const char* begin = SeqAsciiString::cast(str)->GetChars();
- const char* end = begin + str->length();
- return InternalStringToInt(scanner_constants, begin, end, radix);
- } else if (shape.IsSequentialTwoByte()) {
- const uc16* begin = SeqTwoByteString::cast(str)->GetChars();
- const uc16* end = begin + str->length();
- return InternalStringToInt(scanner_constants, begin, end, radix);
- } else {
- StringInputBuffer buffer(str);
- return InternalStringToInt(scanner_constants,
- StringInputBufferIterator(&buffer),
- StringInputBufferIterator::EndMarker(),
- radix);
- }
-}
-
-
-double StringToDouble(const char* str, int flags, double empty_string_val) {
- ScannerConstants* scanner_constants =
- Isolate::Current()->scanner_constants();
- const char* end = str + StrLength(str);
- return InternalStringToDouble(scanner_constants, str, end, flags,
- empty_string_val);
-}
-
-
-double StringToDouble(Vector<const char> str,
- int flags,
- double empty_string_val) {
- ScannerConstants* scanner_constants =
- Isolate::Current()->scanner_constants();
- const char* end = str.start() + str.length();
- return InternalStringToDouble(scanner_constants, str.start(), end, flags,
- empty_string_val);
-}
-
-
-const char* DoubleToCString(double v, Vector<char> buffer) {
- switch (fpclassify(v)) {
- case FP_NAN: return "NaN";
- case FP_INFINITE: return (v < 0.0 ? "-Infinity" : "Infinity");
- case FP_ZERO: return "0";
- default: {
- StringBuilder builder(buffer.start(), buffer.length());
- int decimal_point;
- int sign;
- const int kV8DtoaBufferCapacity = kBase10MaximalLength + 1;
- char decimal_rep[kV8DtoaBufferCapacity];
- int length;
-
- DoubleToAscii(v, DTOA_SHORTEST, 0,
- Vector<char>(decimal_rep, kV8DtoaBufferCapacity),
- &sign, &length, &decimal_point);
-
- if (sign) builder.AddCharacter('-');
-
- if (length <= decimal_point && decimal_point <= 21) {
- // ECMA-262 section 9.8.1 step 6.
- builder.AddString(decimal_rep);
- builder.AddPadding('0', decimal_point - length);
-
- } else if (0 < decimal_point && decimal_point <= 21) {
- // ECMA-262 section 9.8.1 step 7.
- builder.AddSubstring(decimal_rep, decimal_point);
- builder.AddCharacter('.');
- builder.AddString(decimal_rep + decimal_point);
-
- } else if (decimal_point <= 0 && decimal_point > -6) {
- // ECMA-262 section 9.8.1 step 8.
- builder.AddString("0.");
- builder.AddPadding('0', -decimal_point);
- builder.AddString(decimal_rep);
-
- } else {
- // ECMA-262 section 9.8.1 step 9 and 10 combined.
- builder.AddCharacter(decimal_rep[0]);
- if (length != 1) {
- builder.AddCharacter('.');
- builder.AddString(decimal_rep + 1);
- }
- builder.AddCharacter('e');
- builder.AddCharacter((decimal_point >= 0) ? '+' : '-');
- int exponent = decimal_point - 1;
- if (exponent < 0) exponent = -exponent;
- builder.AddFormatted("%d", exponent);
- }
- return builder.Finalize();
- }
- }
-}
-
-
-const char* IntToCString(int n, Vector<char> buffer) {
- bool negative = false;
- if (n < 0) {
- // We must not negate the most negative int.
- if (n == kMinInt) return DoubleToCString(n, buffer);
- negative = true;
- n = -n;
- }
- // Build the string backwards from the least significant digit.
- int i = buffer.length();
- buffer[--i] = '\0';
- do {
- buffer[--i] = '0' + (n % 10);
- n /= 10;
- } while (n);
- if (negative) buffer[--i] = '-';
- return buffer.start() + i;
-}
-
-
-char* DoubleToFixedCString(double value, int f) {
- const int kMaxDigitsBeforePoint = 21;
- const double kFirstNonFixed = 1e21;
- const int kMaxDigitsAfterPoint = 20;
- ASSERT(f >= 0);
- ASSERT(f <= kMaxDigitsAfterPoint);
-
- bool negative = false;
- double abs_value = value;
- if (value < 0) {
- abs_value = -value;
- negative = true;
- }
-
- // If abs_value has more than kMaxDigitsBeforePoint digits before the point
- // use the non-fixed conversion routine.
- if (abs_value >= kFirstNonFixed) {
- char arr[100];
- Vector<char> buffer(arr, ARRAY_SIZE(arr));
- return StrDup(DoubleToCString(value, buffer));
- }
-
- // Find a sufficiently precise decimal representation of n.
- int decimal_point;
- int sign;
- // Add space for the '\0' byte.
- const int kDecimalRepCapacity =
- kMaxDigitsBeforePoint + kMaxDigitsAfterPoint + 1;
- char decimal_rep[kDecimalRepCapacity];
- int decimal_rep_length;
- DoubleToAscii(value, DTOA_FIXED, f,
- Vector<char>(decimal_rep, kDecimalRepCapacity),
- &sign, &decimal_rep_length, &decimal_point);
-
- // Create a representation that is padded with zeros if needed.
- int zero_prefix_length = 0;
- int zero_postfix_length = 0;
-
- if (decimal_point <= 0) {
- zero_prefix_length = -decimal_point + 1;
- decimal_point = 1;
- }
-
- if (zero_prefix_length + decimal_rep_length < decimal_point + f) {
- zero_postfix_length = decimal_point + f - decimal_rep_length -
- zero_prefix_length;
- }
-
- unsigned rep_length =
- zero_prefix_length + decimal_rep_length + zero_postfix_length;
- StringBuilder rep_builder(rep_length + 1);
- rep_builder.AddPadding('0', zero_prefix_length);
- rep_builder.AddString(decimal_rep);
- rep_builder.AddPadding('0', zero_postfix_length);
- char* rep = rep_builder.Finalize();
-
- // Create the result string by appending a minus and putting in a
- // decimal point if needed.
- unsigned result_size = decimal_point + f + 2;
- StringBuilder builder(result_size + 1);
- if (negative) builder.AddCharacter('-');
- builder.AddSubstring(rep, decimal_point);
- if (f > 0) {
- builder.AddCharacter('.');
- builder.AddSubstring(rep + decimal_point, f);
- }
- DeleteArray(rep);
- return builder.Finalize();
-}
-
-
-static char* CreateExponentialRepresentation(char* decimal_rep,
- int exponent,
- bool negative,
- int significant_digits) {
- bool negative_exponent = false;
- if (exponent < 0) {
- negative_exponent = true;
- exponent = -exponent;
- }
-
- // Leave room in the result for appending a minus, for a period, the
- // letter 'e', a minus or a plus depending on the exponent, and a
- // three digit exponent.
- unsigned result_size = significant_digits + 7;
- StringBuilder builder(result_size + 1);
-
- if (negative) builder.AddCharacter('-');
- builder.AddCharacter(decimal_rep[0]);
- if (significant_digits != 1) {
- builder.AddCharacter('.');
- builder.AddString(decimal_rep + 1);
- int rep_length = StrLength(decimal_rep);
- builder.AddPadding('0', significant_digits - rep_length);
- }
-
- builder.AddCharacter('e');
- builder.AddCharacter(negative_exponent ? '-' : '+');
- builder.AddFormatted("%d", exponent);
- return builder.Finalize();
-}
-
-
-
-char* DoubleToExponentialCString(double value, int f) {
- const int kMaxDigitsAfterPoint = 20;
- // f might be -1 to signal that f was undefined in JavaScript.
- ASSERT(f >= -1 && f <= kMaxDigitsAfterPoint);
-
- bool negative = false;
- if (value < 0) {
- value = -value;
- negative = true;
- }
-
- // Find a sufficiently precise decimal representation of n.
- int decimal_point;
- int sign;
- // f corresponds to the digits after the point. There is always one digit
- // before the point. The number of requested_digits equals hence f + 1.
- // And we have to add one character for the null-terminator.
- const int kV8DtoaBufferCapacity = kMaxDigitsAfterPoint + 1 + 1;
- // Make sure that the buffer is big enough, even if we fall back to the
- // shortest representation (which happens when f equals -1).
- ASSERT(kBase10MaximalLength <= kMaxDigitsAfterPoint + 1);
- char decimal_rep[kV8DtoaBufferCapacity];
- int decimal_rep_length;
-
- if (f == -1) {
- DoubleToAscii(value, DTOA_SHORTEST, 0,
- Vector<char>(decimal_rep, kV8DtoaBufferCapacity),
- &sign, &decimal_rep_length, &decimal_point);
- f = decimal_rep_length - 1;
- } else {
- DoubleToAscii(value, DTOA_PRECISION, f + 1,
- Vector<char>(decimal_rep, kV8DtoaBufferCapacity),
- &sign, &decimal_rep_length, &decimal_point);
- }
- ASSERT(decimal_rep_length > 0);
- ASSERT(decimal_rep_length <= f + 1);
-
- int exponent = decimal_point - 1;
- char* result =
- CreateExponentialRepresentation(decimal_rep, exponent, negative, f+1);
-
- return result;
-}
-
-
-char* DoubleToPrecisionCString(double value, int p) {
- const int kMinimalDigits = 1;
- const int kMaximalDigits = 21;
- ASSERT(p >= kMinimalDigits && p <= kMaximalDigits);
- USE(kMinimalDigits);
-
- bool negative = false;
- if (value < 0) {
- value = -value;
- negative = true;
- }
-
- // Find a sufficiently precise decimal representation of n.
- int decimal_point;
- int sign;
- // Add one for the terminating null character.
- const int kV8DtoaBufferCapacity = kMaximalDigits + 1;
- char decimal_rep[kV8DtoaBufferCapacity];
- int decimal_rep_length;
-
- DoubleToAscii(value, DTOA_PRECISION, p,
- Vector<char>(decimal_rep, kV8DtoaBufferCapacity),
- &sign, &decimal_rep_length, &decimal_point);
- ASSERT(decimal_rep_length <= p);
-
- int exponent = decimal_point - 1;
-
- char* result = NULL;
-
- if (exponent < -6 || exponent >= p) {
- result =
- CreateExponentialRepresentation(decimal_rep, exponent, negative, p);
- } else {
- // Use fixed notation.
- //
- // Leave room in the result for appending a minus, a period and in
- // the case where decimal_point is not positive for a zero in
- // front of the period.
- unsigned result_size = (decimal_point <= 0)
- ? -decimal_point + p + 3
- : p + 2;
- StringBuilder builder(result_size + 1);
- if (negative) builder.AddCharacter('-');
- if (decimal_point <= 0) {
- builder.AddString("0.");
- builder.AddPadding('0', -decimal_point);
- builder.AddString(decimal_rep);
- builder.AddPadding('0', p - decimal_rep_length);
- } else {
- const int m = Min(decimal_rep_length, decimal_point);
- builder.AddSubstring(decimal_rep, m);
- builder.AddPadding('0', decimal_point - decimal_rep_length);
- if (decimal_point < p) {
- builder.AddCharacter('.');
- const int extra = negative ? 2 : 1;
- if (decimal_rep_length > decimal_point) {
- const int len = StrLength(decimal_rep + decimal_point);
- const int n = Min(len, p - (builder.position() - extra));
- builder.AddSubstring(decimal_rep + decimal_point, n);
- }
- builder.AddPadding('0', extra + (p - builder.position()));
- }
- }
- result = builder.Finalize();
- }
-
- return result;
-}
-
-
-char* DoubleToRadixCString(double value, int radix) {
- ASSERT(radix >= 2 && radix <= 36);
-
- // Character array used for conversion.
- static const char chars[] = "0123456789abcdefghijklmnopqrstuvwxyz";
-
- // Buffer for the integer part of the result. 1024 chars is enough
- // for max integer value in radix 2. We need room for a sign too.
- static const int kBufferSize = 1100;
- char integer_buffer[kBufferSize];
- integer_buffer[kBufferSize - 1] = '\0';
-
- // Buffer for the decimal part of the result. We only generate up
- // to kBufferSize - 1 chars for the decimal part.
- char decimal_buffer[kBufferSize];
- decimal_buffer[kBufferSize - 1] = '\0';
-
- // Make sure the value is positive.
- bool is_negative = value < 0.0;
- if (is_negative) value = -value;
-
- // Get the integer part and the decimal part.
- double integer_part = floor(value);
- double decimal_part = value - integer_part;
-
- // Convert the integer part starting from the back. Always generate
- // at least one digit.
- int integer_pos = kBufferSize - 2;
- do {
- integer_buffer[integer_pos--] =
- chars[static_cast<int>(modulo(integer_part, radix))];
- integer_part /= radix;
- } while (integer_part >= 1.0);
- // Sanity check.
- ASSERT(integer_pos > 0);
- // Add sign if needed.
- if (is_negative) integer_buffer[integer_pos--] = '-';
-
- // Convert the decimal part. Repeatedly multiply by the radix to
- // generate the next char. Never generate more than kBufferSize - 1
- // chars.
- //
- // TODO(1093998): We will often generate a full decimal_buffer of
- // chars because hitting zero will often not happen. The right
- // solution would be to continue until the string representation can
- // be read back and yield the original value. To implement this
- // efficiently, we probably have to modify dtoa.
- int decimal_pos = 0;
- while ((decimal_part > 0.0) && (decimal_pos < kBufferSize - 1)) {
- decimal_part *= radix;
- decimal_buffer[decimal_pos++] =
- chars[static_cast<int>(floor(decimal_part))];
- decimal_part -= floor(decimal_part);
- }
- decimal_buffer[decimal_pos] = '\0';
-
- // Compute the result size.
- int integer_part_size = kBufferSize - 2 - integer_pos;
- // Make room for zero termination.
- unsigned result_size = integer_part_size + decimal_pos;
- // If the number has a decimal part, leave room for the period.
- if (decimal_pos > 0) result_size++;
- // Allocate result and fill in the parts.
- StringBuilder builder(result_size + 1);
- builder.AddSubstring(integer_buffer + integer_pos + 1, integer_part_size);
- if (decimal_pos > 0) builder.AddCharacter('.');
- builder.AddSubstring(decimal_buffer, decimal_pos);
- return builder.Finalize();
-}
-
-
-static Mutex* dtoa_lock_one = OS::CreateMutex();
-static Mutex* dtoa_lock_zero = OS::CreateMutex();
-
-
-} } // namespace v8::internal
-
-
-extern "C" {
-void ACQUIRE_DTOA_LOCK(int n) {
- ASSERT(n == 0 || n == 1);
- (n == 0 ? v8::internal::dtoa_lock_zero : v8::internal::dtoa_lock_one)->Lock();
-}
-
-
-void FREE_DTOA_LOCK(int n) {
- ASSERT(n == 0 || n == 1);
- (n == 0 ? v8::internal::dtoa_lock_zero : v8::internal::dtoa_lock_one)->
- Unlock();
-}
-}
diff --git a/src/3rdparty/v8/src/conversions.h b/src/3rdparty/v8/src/conversions.h
deleted file mode 100644
index 312e6ae..0000000
--- a/src/3rdparty/v8/src/conversions.h
+++ /dev/null
@@ -1,122 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_CONVERSIONS_H_
-#define V8_CONVERSIONS_H_
-
-namespace v8 {
-namespace internal {
-
-
-// The fast double-to-(unsigned-)int conversion routine does not guarantee
-// rounding towards zero.
-// The result is unspecified if x is infinite or NaN, or if the rounded
-// integer value is outside the range of type int.
-static inline int FastD2I(double x) {
- // The static_cast convertion from double to int used to be slow, but
- // as new benchmarks show, now it is much faster than lrint().
- return static_cast<int>(x);
-}
-
-static inline unsigned int FastD2UI(double x);
-
-
-static inline double FastI2D(int x) {
- // There is no rounding involved in converting an integer to a
- // double, so this code should compile to a few instructions without
- // any FPU pipeline stalls.
- return static_cast<double>(x);
-}
-
-
-static inline double FastUI2D(unsigned x) {
- // There is no rounding involved in converting an unsigned integer to a
- // double, so this code should compile to a few instructions without
- // any FPU pipeline stalls.
- return static_cast<double>(x);
-}
-
-
-// This function should match the exact semantics of ECMA-262 9.4.
-static inline double DoubleToInteger(double x);
-
-
-// This function should match the exact semantics of ECMA-262 9.5.
-static inline int32_t DoubleToInt32(double x);
-
-
-// This function should match the exact semantics of ECMA-262 9.6.
-static inline uint32_t DoubleToUint32(double x) {
- return static_cast<uint32_t>(DoubleToInt32(x));
-}
-
-
-// Enumeration for allowing octals and ignoring junk when converting
-// strings to numbers.
-enum ConversionFlags {
- NO_FLAGS = 0,
- ALLOW_HEX = 1,
- ALLOW_OCTALS = 2,
- ALLOW_TRAILING_JUNK = 4
-};
-
-
-// Convert from Number object to C integer.
-static inline int32_t NumberToInt32(Object* number);
-static inline uint32_t NumberToUint32(Object* number);
-
-
-// Converts a string into a double value according to ECMA-262 9.3.1
-double StringToDouble(String* str, int flags, double empty_string_val = 0);
-double StringToDouble(Vector<const char> str,
- int flags,
- double empty_string_val = 0);
-// This version expects a zero-terminated character array.
-double StringToDouble(const char* str, int flags, double empty_string_val = 0);
-
-// Converts a string into an integer.
-double StringToInt(String* str, int radix);
-
-// Converts a double to a string value according to ECMA-262 9.8.1.
-// The buffer should be large enough for any floating point number.
-// 100 characters is enough.
-const char* DoubleToCString(double value, Vector<char> buffer);
-
-// Convert an int to a null-terminated string. The returned string is
-// located inside the buffer, but not necessarily at the start.
-const char* IntToCString(int n, Vector<char> buffer);
-
-// Additional number to string conversions for the number type.
-// The caller is responsible for calling free on the returned pointer.
-char* DoubleToFixedCString(double value, int f);
-char* DoubleToExponentialCString(double value, int f);
-char* DoubleToPrecisionCString(double value, int f);
-char* DoubleToRadixCString(double value, int radix);
-
-} } // namespace v8::internal
-
-#endif // V8_CONVERSIONS_H_
diff --git a/src/3rdparty/v8/src/counters.cc b/src/3rdparty/v8/src/counters.cc
deleted file mode 100644
index faad6d4..0000000
--- a/src/3rdparty/v8/src/counters.cc
+++ /dev/null
@@ -1,93 +0,0 @@
-// Copyright 2007-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "counters.h"
-#include "isolate.h"
-#include "platform.h"
-
-namespace v8 {
-namespace internal {
-
-StatsTable::StatsTable()
- : lookup_function_(NULL),
- create_histogram_function_(NULL),
- add_histogram_sample_function_(NULL) {}
-
-
-int* StatsCounter::FindLocationInStatsTable() const {
- return Isolate::Current()->stats_table()->FindLocation(name_);
-}
-
-
-// Start the timer.
-void StatsCounterTimer::Start() {
- if (!counter_.Enabled())
- return;
- stop_time_ = 0;
- start_time_ = OS::Ticks();
-}
-
-// Stop the timer and record the results.
-void StatsCounterTimer::Stop() {
- if (!counter_.Enabled())
- return;
- stop_time_ = OS::Ticks();
-
- // Compute the delta between start and stop, in milliseconds.
- int milliseconds = static_cast<int>(stop_time_ - start_time_) / 1000;
- counter_.Increment(milliseconds);
-}
-
-// Start the timer.
-void HistogramTimer::Start() {
- if (GetHistogram() != NULL) {
- stop_time_ = 0;
- start_time_ = OS::Ticks();
- }
-}
-
-// Stop the timer and record the results.
-void HistogramTimer::Stop() {
- if (histogram_ != NULL) {
- stop_time_ = OS::Ticks();
-
- // Compute the delta between start and stop, in milliseconds.
- int milliseconds = static_cast<int>(stop_time_ - start_time_) / 1000;
- Isolate::Current()->stats_table()->
- AddHistogramSample(histogram_, milliseconds);
- }
-}
-
-
-void* HistogramTimer::CreateHistogram() const {
- return Isolate::Current()->stats_table()->
- CreateHistogram(name_, 0, 10000, 50);
-}
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/counters.h b/src/3rdparty/v8/src/counters.h
deleted file mode 100644
index 6498a02..0000000
--- a/src/3rdparty/v8/src/counters.h
+++ /dev/null
@@ -1,254 +0,0 @@
-// Copyright 2007-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_COUNTERS_H_
-#define V8_COUNTERS_H_
-
-#include "../include/v8.h"
-#include "allocation.h"
-
-namespace v8 {
-namespace internal {
-
-// StatsCounters is an interface for plugging into external
-// counters for monitoring. Counters can be looked up and
-// manipulated by name.
-
-class StatsTable {
- public:
- // Register an application-defined function where
- // counters can be looked up.
- void SetCounterFunction(CounterLookupCallback f) {
- lookup_function_ = f;
- }
-
- // Register an application-defined function to create
- // a histogram for passing to the AddHistogramSample function
- void SetCreateHistogramFunction(CreateHistogramCallback f) {
- create_histogram_function_ = f;
- }
-
- // Register an application-defined function to add a sample
- // to a histogram created with CreateHistogram function
- void SetAddHistogramSampleFunction(AddHistogramSampleCallback f) {
- add_histogram_sample_function_ = f;
- }
-
- bool HasCounterFunction() const {
- return lookup_function_ != NULL;
- }
-
- // Lookup the location of a counter by name. If the lookup
- // is successful, returns a non-NULL pointer for writing the
- // value of the counter. Each thread calling this function
- // may receive a different location to store it's counter.
- // The return value must not be cached and re-used across
- // threads, although a single thread is free to cache it.
- int* FindLocation(const char* name) {
- if (!lookup_function_) return NULL;
- return lookup_function_(name);
- }
-
- // Create a histogram by name. If the create is successful,
- // returns a non-NULL pointer for use with AddHistogramSample
- // function. min and max define the expected minimum and maximum
- // sample values. buckets is the maximum number of buckets
- // that the samples will be grouped into.
- void* CreateHistogram(const char* name,
- int min,
- int max,
- size_t buckets) {
- if (!create_histogram_function_) return NULL;
- return create_histogram_function_(name, min, max, buckets);
- }
-
- // Add a sample to a histogram created with the CreateHistogram
- // function.
- void AddHistogramSample(void* histogram, int sample) {
- if (!add_histogram_sample_function_) return;
- return add_histogram_sample_function_(histogram, sample);
- }
-
- private:
- StatsTable();
-
- CounterLookupCallback lookup_function_;
- CreateHistogramCallback create_histogram_function_;
- AddHistogramSampleCallback add_histogram_sample_function_;
-
- friend class Isolate;
-
- DISALLOW_COPY_AND_ASSIGN(StatsTable);
-};
-
-// StatsCounters are dynamically created values which can be tracked in
-// the StatsTable. They are designed to be lightweight to create and
-// easy to use.
-//
-// Internally, a counter represents a value in a row of a StatsTable.
-// The row has a 32bit value for each process/thread in the table and also
-// a name (stored in the table metadata). Since the storage location can be
-// thread-specific, this class cannot be shared across threads.
-//
-// This class is designed to be POD initialized. It will be registered with
-// the counter system on first use. For example:
-// StatsCounter c = { "c:myctr", NULL, false };
-struct StatsCounter {
- const char* name_;
- int* ptr_;
- bool lookup_done_;
-
- // Sets the counter to a specific value.
- void Set(int value) {
- int* loc = GetPtr();
- if (loc) *loc = value;
- }
-
- // Increments the counter.
- void Increment() {
- int* loc = GetPtr();
- if (loc) (*loc)++;
- }
-
- void Increment(int value) {
- int* loc = GetPtr();
- if (loc)
- (*loc) += value;
- }
-
- // Decrements the counter.
- void Decrement() {
- int* loc = GetPtr();
- if (loc) (*loc)--;
- }
-
- void Decrement(int value) {
- int* loc = GetPtr();
- if (loc) (*loc) -= value;
- }
-
- // Is this counter enabled?
- // Returns false if table is full.
- bool Enabled() {
- return GetPtr() != NULL;
- }
-
- // Get the internal pointer to the counter. This is used
- // by the code generator to emit code that manipulates a
- // given counter without calling the runtime system.
- int* GetInternalPointer() {
- int* loc = GetPtr();
- ASSERT(loc != NULL);
- return loc;
- }
-
- protected:
- // Returns the cached address of this counter location.
- int* GetPtr() {
- if (lookup_done_)
- return ptr_;
- lookup_done_ = true;
- ptr_ = FindLocationInStatsTable();
- return ptr_;
- }
-
- private:
- int* FindLocationInStatsTable() const;
-};
-
-// StatsCounterTimer t = { { L"t:foo", NULL, false }, 0, 0 };
-struct StatsCounterTimer {
- StatsCounter counter_;
-
- int64_t start_time_;
- int64_t stop_time_;
-
- // Start the timer.
- void Start();
-
- // Stop the timer and record the results.
- void Stop();
-
- // Returns true if the timer is running.
- bool Running() {
- return counter_.Enabled() && start_time_ != 0 && stop_time_ == 0;
- }
-};
-
-// A HistogramTimer allows distributions of results to be created
-// HistogramTimer t = { L"foo", NULL, false, 0, 0 };
-struct HistogramTimer {
- const char* name_;
- void* histogram_;
- bool lookup_done_;
-
- int64_t start_time_;
- int64_t stop_time_;
-
- // Start the timer.
- void Start();
-
- // Stop the timer and record the results.
- void Stop();
-
- // Returns true if the timer is running.
- bool Running() {
- return (histogram_ != NULL) && (start_time_ != 0) && (stop_time_ == 0);
- }
-
- protected:
- // Returns the handle to the histogram.
- void* GetHistogram() {
- if (!lookup_done_) {
- lookup_done_ = true;
- histogram_ = CreateHistogram();
- }
- return histogram_;
- }
-
- private:
- void* CreateHistogram() const;
-};
-
-// Helper class for scoping a HistogramTimer.
-class HistogramTimerScope BASE_EMBEDDED {
- public:
- explicit HistogramTimerScope(HistogramTimer* timer) :
- timer_(timer) {
- timer_->Start();
- }
- ~HistogramTimerScope() {
- timer_->Stop();
- }
- private:
- HistogramTimer* timer_;
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_COUNTERS_H_
diff --git a/src/3rdparty/v8/src/cpu-profiler-inl.h b/src/3rdparty/v8/src/cpu-profiler-inl.h
deleted file mode 100644
index b704417..0000000
--- a/src/3rdparty/v8/src/cpu-profiler-inl.h
+++ /dev/null
@@ -1,101 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_CPU_PROFILER_INL_H_
-#define V8_CPU_PROFILER_INL_H_
-
-#include "cpu-profiler.h"
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
-
-#include "circular-queue-inl.h"
-#include "profile-generator-inl.h"
-#include "unbound-queue-inl.h"
-
-namespace v8 {
-namespace internal {
-
-void CodeCreateEventRecord::UpdateCodeMap(CodeMap* code_map) {
- code_map->AddCode(start, entry, size);
- if (shared != NULL) {
- entry->set_shared_id(code_map->GetSharedId(shared));
- }
-}
-
-
-void CodeMoveEventRecord::UpdateCodeMap(CodeMap* code_map) {
- code_map->MoveCode(from, to);
-}
-
-
-void CodeDeleteEventRecord::UpdateCodeMap(CodeMap* code_map) {
- code_map->DeleteCode(start);
-}
-
-
-void SharedFunctionInfoMoveEventRecord::UpdateCodeMap(CodeMap* code_map) {
- code_map->MoveCode(from, to);
-}
-
-
-TickSampleEventRecord* TickSampleEventRecord::init(void* value) {
- TickSampleEventRecord* result =
- reinterpret_cast<TickSampleEventRecord*>(value);
- result->filler = 1;
- ASSERT(result->filler != SamplingCircularQueue::kClear);
- // Init the required fields only.
- result->sample.pc = NULL;
- result->sample.frames_count = 0;
- result->sample.has_external_callback = false;
- return result;
-}
-
-
-TickSample* ProfilerEventsProcessor::TickSampleEvent() {
- generator_->Tick();
- TickSampleEventRecord* evt =
- TickSampleEventRecord::init(ticks_buffer_.Enqueue());
- evt->order = enqueue_order_; // No increment!
- return &evt->sample;
-}
-
-
-bool ProfilerEventsProcessor::FilterOutCodeCreateEvent(
- Logger::LogEventsAndTags tag) {
- return FLAG_prof_browser_mode
- && (tag != Logger::CALLBACK_TAG
- && tag != Logger::FUNCTION_TAG
- && tag != Logger::LAZY_COMPILE_TAG
- && tag != Logger::REG_EXP_TAG
- && tag != Logger::SCRIPT_TAG);
-}
-
-} } // namespace v8::internal
-
-#endif // ENABLE_LOGGING_AND_PROFILING
-
-#endif // V8_CPU_PROFILER_INL_H_
diff --git a/src/3rdparty/v8/src/cpu-profiler.cc b/src/3rdparty/v8/src/cpu-profiler.cc
deleted file mode 100644
index 3894748..0000000
--- a/src/3rdparty/v8/src/cpu-profiler.cc
+++ /dev/null
@@ -1,606 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "cpu-profiler-inl.h"
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
-
-#include "frames-inl.h"
-#include "hashmap.h"
-#include "log-inl.h"
-#include "vm-state-inl.h"
-
-#include "../include/v8-profiler.h"
-
-namespace v8 {
-namespace internal {
-
-static const int kEventsBufferSize = 256*KB;
-static const int kTickSamplesBufferChunkSize = 64*KB;
-static const int kTickSamplesBufferChunksCount = 16;
-
-
-ProfilerEventsProcessor::ProfilerEventsProcessor(Isolate* isolate,
- ProfileGenerator* generator)
- : Thread(isolate, "v8:ProfEvntProc"),
- generator_(generator),
- running_(true),
- ticks_buffer_(sizeof(TickSampleEventRecord),
- kTickSamplesBufferChunkSize,
- kTickSamplesBufferChunksCount),
- enqueue_order_(0) {
-}
-
-
-void ProfilerEventsProcessor::CallbackCreateEvent(Logger::LogEventsAndTags tag,
- const char* prefix,
- String* name,
- Address start) {
- if (FilterOutCodeCreateEvent(tag)) return;
- CodeEventsContainer evt_rec;
- CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
- rec->type = CodeEventRecord::CODE_CREATION;
- rec->order = ++enqueue_order_;
- rec->start = start;
- rec->entry = generator_->NewCodeEntry(tag, prefix, name);
- rec->size = 1;
- rec->shared = NULL;
- events_buffer_.Enqueue(evt_rec);
-}
-
-
-void ProfilerEventsProcessor::CodeCreateEvent(Logger::LogEventsAndTags tag,
- String* name,
- String* resource_name,
- int line_number,
- Address start,
- unsigned size,
- Address shared) {
- if (FilterOutCodeCreateEvent(tag)) return;
- CodeEventsContainer evt_rec;
- CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
- rec->type = CodeEventRecord::CODE_CREATION;
- rec->order = ++enqueue_order_;
- rec->start = start;
- rec->entry = generator_->NewCodeEntry(tag, name, resource_name, line_number);
- rec->size = size;
- rec->shared = shared;
- events_buffer_.Enqueue(evt_rec);
-}
-
-
-void ProfilerEventsProcessor::CodeCreateEvent(Logger::LogEventsAndTags tag,
- const char* name,
- Address start,
- unsigned size) {
- if (FilterOutCodeCreateEvent(tag)) return;
- CodeEventsContainer evt_rec;
- CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
- rec->type = CodeEventRecord::CODE_CREATION;
- rec->order = ++enqueue_order_;
- rec->start = start;
- rec->entry = generator_->NewCodeEntry(tag, name);
- rec->size = size;
- rec->shared = NULL;
- events_buffer_.Enqueue(evt_rec);
-}
-
-
-void ProfilerEventsProcessor::CodeCreateEvent(Logger::LogEventsAndTags tag,
- int args_count,
- Address start,
- unsigned size) {
- if (FilterOutCodeCreateEvent(tag)) return;
- CodeEventsContainer evt_rec;
- CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
- rec->type = CodeEventRecord::CODE_CREATION;
- rec->order = ++enqueue_order_;
- rec->start = start;
- rec->entry = generator_->NewCodeEntry(tag, args_count);
- rec->size = size;
- rec->shared = NULL;
- events_buffer_.Enqueue(evt_rec);
-}
-
-
-void ProfilerEventsProcessor::CodeMoveEvent(Address from, Address to) {
- CodeEventsContainer evt_rec;
- CodeMoveEventRecord* rec = &evt_rec.CodeMoveEventRecord_;
- rec->type = CodeEventRecord::CODE_MOVE;
- rec->order = ++enqueue_order_;
- rec->from = from;
- rec->to = to;
- events_buffer_.Enqueue(evt_rec);
-}
-
-
-void ProfilerEventsProcessor::CodeDeleteEvent(Address from) {
- CodeEventsContainer evt_rec;
- CodeDeleteEventRecord* rec = &evt_rec.CodeDeleteEventRecord_;
- rec->type = CodeEventRecord::CODE_DELETE;
- rec->order = ++enqueue_order_;
- rec->start = from;
- events_buffer_.Enqueue(evt_rec);
-}
-
-
-void ProfilerEventsProcessor::SharedFunctionInfoMoveEvent(Address from,
- Address to) {
- CodeEventsContainer evt_rec;
- SharedFunctionInfoMoveEventRecord* rec =
- &evt_rec.SharedFunctionInfoMoveEventRecord_;
- rec->type = CodeEventRecord::SHARED_FUNC_MOVE;
- rec->order = ++enqueue_order_;
- rec->from = from;
- rec->to = to;
- events_buffer_.Enqueue(evt_rec);
-}
-
-
-void ProfilerEventsProcessor::RegExpCodeCreateEvent(
- Logger::LogEventsAndTags tag,
- const char* prefix,
- String* name,
- Address start,
- unsigned size) {
- if (FilterOutCodeCreateEvent(tag)) return;
- CodeEventsContainer evt_rec;
- CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
- rec->type = CodeEventRecord::CODE_CREATION;
- rec->order = ++enqueue_order_;
- rec->start = start;
- rec->entry = generator_->NewCodeEntry(tag, prefix, name);
- rec->size = size;
- events_buffer_.Enqueue(evt_rec);
-}
-
-
-void ProfilerEventsProcessor::AddCurrentStack() {
- TickSampleEventRecord record;
- TickSample* sample = &record.sample;
- Isolate* isolate = Isolate::Current();
- sample->state = isolate->current_vm_state();
- sample->pc = reinterpret_cast<Address>(sample); // Not NULL.
- sample->tos = NULL;
- sample->has_external_callback = false;
- sample->frames_count = 0;
- for (StackTraceFrameIterator it(isolate);
- !it.done() && sample->frames_count < TickSample::kMaxFramesCount;
- it.Advance()) {
- sample->stack[sample->frames_count++] = it.frame()->pc();
- }
- record.order = enqueue_order_;
- ticks_from_vm_buffer_.Enqueue(record);
-}
-
-
-bool ProfilerEventsProcessor::ProcessCodeEvent(unsigned* dequeue_order) {
- if (!events_buffer_.IsEmpty()) {
- CodeEventsContainer record;
- events_buffer_.Dequeue(&record);
- switch (record.generic.type) {
-#define PROFILER_TYPE_CASE(type, clss) \
- case CodeEventRecord::type: \
- record.clss##_.UpdateCodeMap(generator_->code_map()); \
- break;
-
- CODE_EVENTS_TYPE_LIST(PROFILER_TYPE_CASE)
-
-#undef PROFILER_TYPE_CASE
- default: return true; // Skip record.
- }
- *dequeue_order = record.generic.order;
- return true;
- }
- return false;
-}
-
-
-bool ProfilerEventsProcessor::ProcessTicks(unsigned dequeue_order) {
- while (true) {
- if (!ticks_from_vm_buffer_.IsEmpty()
- && ticks_from_vm_buffer_.Peek()->order == dequeue_order) {
- TickSampleEventRecord record;
- ticks_from_vm_buffer_.Dequeue(&record);
- generator_->RecordTickSample(record.sample);
- }
-
- const TickSampleEventRecord* rec =
- TickSampleEventRecord::cast(ticks_buffer_.StartDequeue());
- if (rec == NULL) return !ticks_from_vm_buffer_.IsEmpty();
- // Make a local copy of tick sample record to ensure that it won't
- // be modified as we are processing it. This is possible as the
- // sampler writes w/o any sync to the queue, so if the processor
- // will get far behind, a record may be modified right under its
- // feet.
- TickSampleEventRecord record = *rec;
- if (record.order == dequeue_order) {
- // A paranoid check to make sure that we don't get a memory overrun
- // in case of frames_count having a wild value.
- if (record.sample.frames_count < 0
- || record.sample.frames_count >= TickSample::kMaxFramesCount)
- record.sample.frames_count = 0;
- generator_->RecordTickSample(record.sample);
- ticks_buffer_.FinishDequeue();
- } else {
- return true;
- }
- }
-}
-
-
-void ProfilerEventsProcessor::Run() {
- unsigned dequeue_order = 0;
-
- while (running_) {
- // Process ticks until we have any.
- if (ProcessTicks(dequeue_order)) {
- // All ticks of the current dequeue_order are processed,
- // proceed to the next code event.
- ProcessCodeEvent(&dequeue_order);
- }
- YieldCPU();
- }
-
- // Process remaining tick events.
- ticks_buffer_.FlushResidualRecords();
- // Perform processing until we have tick events, skip remaining code events.
- while (ProcessTicks(dequeue_order) && ProcessCodeEvent(&dequeue_order)) { }
-}
-
-
-void CpuProfiler::StartProfiling(const char* title) {
- ASSERT(Isolate::Current()->cpu_profiler() != NULL);
- Isolate::Current()->cpu_profiler()->StartCollectingProfile(title);
-}
-
-
-void CpuProfiler::StartProfiling(String* title) {
- ASSERT(Isolate::Current()->cpu_profiler() != NULL);
- Isolate::Current()->cpu_profiler()->StartCollectingProfile(title);
-}
-
-
-CpuProfile* CpuProfiler::StopProfiling(const char* title) {
- return is_profiling() ?
- Isolate::Current()->cpu_profiler()->StopCollectingProfile(title) : NULL;
-}
-
-
-CpuProfile* CpuProfiler::StopProfiling(Object* security_token, String* title) {
- return is_profiling() ?
- Isolate::Current()->cpu_profiler()->StopCollectingProfile(
- security_token, title) : NULL;
-}
-
-
-int CpuProfiler::GetProfilesCount() {
- ASSERT(Isolate::Current()->cpu_profiler() != NULL);
- // The count of profiles doesn't depend on a security token.
- return Isolate::Current()->cpu_profiler()->profiles_->Profiles(
- TokenEnumerator::kNoSecurityToken)->length();
-}
-
-
-CpuProfile* CpuProfiler::GetProfile(Object* security_token, int index) {
- ASSERT(Isolate::Current()->cpu_profiler() != NULL);
- CpuProfiler* profiler = Isolate::Current()->cpu_profiler();
- const int token = profiler->token_enumerator_->GetTokenId(security_token);
- return profiler->profiles_->Profiles(token)->at(index);
-}
-
-
-CpuProfile* CpuProfiler::FindProfile(Object* security_token, unsigned uid) {
- ASSERT(Isolate::Current()->cpu_profiler() != NULL);
- CpuProfiler* profiler = Isolate::Current()->cpu_profiler();
- const int token = profiler->token_enumerator_->GetTokenId(security_token);
- return profiler->profiles_->GetProfile(token, uid);
-}
-
-
-TickSample* CpuProfiler::TickSampleEvent(Isolate* isolate) {
- if (CpuProfiler::is_profiling(isolate)) {
- return isolate->cpu_profiler()->processor_->TickSampleEvent();
- } else {
- return NULL;
- }
-}
-
-
-void CpuProfiler::DeleteAllProfiles() {
- Isolate* isolate = Isolate::Current();
- ASSERT(isolate->cpu_profiler() != NULL);
- if (is_profiling())
- isolate->cpu_profiler()->StopProcessor();
- isolate->cpu_profiler()->ResetProfiles();
-}
-
-
-void CpuProfiler::DeleteProfile(CpuProfile* profile) {
- ASSERT(Isolate::Current()->cpu_profiler() != NULL);
- Isolate::Current()->cpu_profiler()->profiles_->RemoveProfile(profile);
- delete profile;
-}
-
-
-bool CpuProfiler::HasDetachedProfiles() {
- ASSERT(Isolate::Current()->cpu_profiler() != NULL);
- return Isolate::Current()->cpu_profiler()->profiles_->HasDetachedProfiles();
-}
-
-
-void CpuProfiler::CallbackEvent(String* name, Address entry_point) {
- Isolate::Current()->cpu_profiler()->processor_->CallbackCreateEvent(
- Logger::CALLBACK_TAG, CodeEntry::kEmptyNamePrefix, name, entry_point);
-}
-
-
-void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
- Code* code, const char* comment) {
- Isolate::Current()->cpu_profiler()->processor_->CodeCreateEvent(
- tag, comment, code->address(), code->ExecutableSize());
-}
-
-
-void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
- Code* code, String* name) {
- Isolate* isolate = Isolate::Current();
- isolate->cpu_profiler()->processor_->CodeCreateEvent(
- tag,
- name,
- isolate->heap()->empty_string(),
- v8::CpuProfileNode::kNoLineNumberInfo,
- code->address(),
- code->ExecutableSize(),
- NULL);
-}
-
-
-void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
- Code* code,
- SharedFunctionInfo* shared,
- String* name) {
- Isolate* isolate = Isolate::Current();
- isolate->cpu_profiler()->processor_->CodeCreateEvent(
- tag,
- name,
- isolate->heap()->empty_string(),
- v8::CpuProfileNode::kNoLineNumberInfo,
- code->address(),
- code->ExecutableSize(),
- shared->address());
-}
-
-
-void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
- Code* code,
- SharedFunctionInfo* shared,
- String* source, int line) {
- Isolate::Current()->cpu_profiler()->processor_->CodeCreateEvent(
- tag,
- shared->DebugName(),
- source,
- line,
- code->address(),
- code->ExecutableSize(),
- shared->address());
-}
-
-
-void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
- Code* code, int args_count) {
- Isolate::Current()->cpu_profiler()->processor_->CodeCreateEvent(
- tag,
- args_count,
- code->address(),
- code->ExecutableSize());
-}
-
-
-void CpuProfiler::CodeMoveEvent(Address from, Address to) {
- Isolate::Current()->cpu_profiler()->processor_->CodeMoveEvent(from, to);
-}
-
-
-void CpuProfiler::CodeDeleteEvent(Address from) {
- Isolate::Current()->cpu_profiler()->processor_->CodeDeleteEvent(from);
-}
-
-
-void CpuProfiler::SharedFunctionInfoMoveEvent(Address from, Address to) {
- CpuProfiler* profiler = Isolate::Current()->cpu_profiler();
- profiler->processor_->SharedFunctionInfoMoveEvent(from, to);
-}
-
-
-void CpuProfiler::GetterCallbackEvent(String* name, Address entry_point) {
- Isolate::Current()->cpu_profiler()->processor_->CallbackCreateEvent(
- Logger::CALLBACK_TAG, "get ", name, entry_point);
-}
-
-
-void CpuProfiler::RegExpCodeCreateEvent(Code* code, String* source) {
- Isolate::Current()->cpu_profiler()->processor_->RegExpCodeCreateEvent(
- Logger::REG_EXP_TAG,
- "RegExp: ",
- source,
- code->address(),
- code->ExecutableSize());
-}
-
-
-void CpuProfiler::SetterCallbackEvent(String* name, Address entry_point) {
- Isolate::Current()->cpu_profiler()->processor_->CallbackCreateEvent(
- Logger::CALLBACK_TAG, "set ", name, entry_point);
-}
-
-
-CpuProfiler::CpuProfiler()
- : profiles_(new CpuProfilesCollection()),
- next_profile_uid_(1),
- token_enumerator_(new TokenEnumerator()),
- generator_(NULL),
- processor_(NULL),
- need_to_stop_sampler_(false),
- is_profiling_(false) {
-}
-
-
-CpuProfiler::~CpuProfiler() {
- delete token_enumerator_;
- delete profiles_;
-}
-
-
-void CpuProfiler::ResetProfiles() {
- delete profiles_;
- profiles_ = new CpuProfilesCollection();
-}
-
-void CpuProfiler::StartCollectingProfile(const char* title) {
- if (profiles_->StartProfiling(title, next_profile_uid_++)) {
- StartProcessorIfNotStarted();
- }
- processor_->AddCurrentStack();
-}
-
-
-void CpuProfiler::StartCollectingProfile(String* title) {
- StartCollectingProfile(profiles_->GetName(title));
-}
-
-
-void CpuProfiler::StartProcessorIfNotStarted() {
- if (processor_ == NULL) {
- Isolate* isolate = Isolate::Current();
-
- // Disable logging when using the new implementation.
- saved_logging_nesting_ = isolate->logger()->logging_nesting_;
- isolate->logger()->logging_nesting_ = 0;
- generator_ = new ProfileGenerator(profiles_);
- processor_ = new ProfilerEventsProcessor(isolate, generator_);
- NoBarrier_Store(&is_profiling_, true);
- processor_->Start();
- // Enumerate stuff we already have in the heap.
- if (isolate->heap()->HasBeenSetup()) {
- if (!FLAG_prof_browser_mode) {
- bool saved_log_code_flag = FLAG_log_code;
- FLAG_log_code = true;
- isolate->logger()->LogCodeObjects();
- FLAG_log_code = saved_log_code_flag;
- }
- isolate->logger()->LogCompiledFunctions();
- isolate->logger()->LogAccessorCallbacks();
- }
- // Enable stack sampling.
- Sampler* sampler = reinterpret_cast<Sampler*>(isolate->logger()->ticker_);
- if (!sampler->IsActive()) {
- sampler->Start();
- need_to_stop_sampler_ = true;
- }
- sampler->IncreaseProfilingDepth();
- }
-}
-
-
-CpuProfile* CpuProfiler::StopCollectingProfile(const char* title) {
- const double actual_sampling_rate = generator_->actual_sampling_rate();
- StopProcessorIfLastProfile(title);
- CpuProfile* result =
- profiles_->StopProfiling(TokenEnumerator::kNoSecurityToken,
- title,
- actual_sampling_rate);
- if (result != NULL) {
- result->Print();
- }
- return result;
-}
-
-
-CpuProfile* CpuProfiler::StopCollectingProfile(Object* security_token,
- String* title) {
- const double actual_sampling_rate = generator_->actual_sampling_rate();
- const char* profile_title = profiles_->GetName(title);
- StopProcessorIfLastProfile(profile_title);
- int token = token_enumerator_->GetTokenId(security_token);
- return profiles_->StopProfiling(token, profile_title, actual_sampling_rate);
-}
-
-
-void CpuProfiler::StopProcessorIfLastProfile(const char* title) {
- if (profiles_->IsLastProfile(title)) StopProcessor();
-}
-
-
-void CpuProfiler::StopProcessor() {
- Logger* logger = Isolate::Current()->logger();
- Sampler* sampler = reinterpret_cast<Sampler*>(logger->ticker_);
- sampler->DecreaseProfilingDepth();
- if (need_to_stop_sampler_) {
- sampler->Stop();
- need_to_stop_sampler_ = false;
- }
- processor_->Stop();
- processor_->Join();
- delete processor_;
- delete generator_;
- processor_ = NULL;
- NoBarrier_Store(&is_profiling_, false);
- generator_ = NULL;
- logger->logging_nesting_ = saved_logging_nesting_;
-}
-
-} } // namespace v8::internal
-
-#endif // ENABLE_LOGGING_AND_PROFILING
-
-namespace v8 {
-namespace internal {
-
-void CpuProfiler::Setup() {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- Isolate* isolate = Isolate::Current();
- if (isolate->cpu_profiler() == NULL) {
- isolate->set_cpu_profiler(new CpuProfiler());
- }
-#endif
-}
-
-
-void CpuProfiler::TearDown() {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- Isolate* isolate = Isolate::Current();
- if (isolate->cpu_profiler() != NULL) {
- delete isolate->cpu_profiler();
- }
- isolate->set_cpu_profiler(NULL);
-#endif
-}
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/cpu-profiler.h b/src/3rdparty/v8/src/cpu-profiler.h
deleted file mode 100644
index e04cf85..0000000
--- a/src/3rdparty/v8/src/cpu-profiler.h
+++ /dev/null
@@ -1,305 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_CPU_PROFILER_H_
-#define V8_CPU_PROFILER_H_
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
-
-#include "atomicops.h"
-#include "circular-queue.h"
-#include "unbound-queue.h"
-
-namespace v8 {
-namespace internal {
-
-// Forward declarations.
-class CodeEntry;
-class CodeMap;
-class CpuProfile;
-class CpuProfilesCollection;
-class HashMap;
-class ProfileGenerator;
-class TokenEnumerator;
-
-#define CODE_EVENTS_TYPE_LIST(V) \
- V(CODE_CREATION, CodeCreateEventRecord) \
- V(CODE_MOVE, CodeMoveEventRecord) \
- V(CODE_DELETE, CodeDeleteEventRecord) \
- V(SHARED_FUNC_MOVE, SharedFunctionInfoMoveEventRecord)
-
-
-class CodeEventRecord {
- public:
-#define DECLARE_TYPE(type, ignore) type,
- enum Type {
- NONE = 0,
- CODE_EVENTS_TYPE_LIST(DECLARE_TYPE)
- NUMBER_OF_TYPES
- };
-#undef DECLARE_TYPE
-
- Type type;
- unsigned order;
-};
-
-
-class CodeCreateEventRecord : public CodeEventRecord {
- public:
- Address start;
- CodeEntry* entry;
- unsigned size;
- Address shared;
-
- INLINE(void UpdateCodeMap(CodeMap* code_map));
-};
-
-
-class CodeMoveEventRecord : public CodeEventRecord {
- public:
- Address from;
- Address to;
-
- INLINE(void UpdateCodeMap(CodeMap* code_map));
-};
-
-
-class CodeDeleteEventRecord : public CodeEventRecord {
- public:
- Address start;
-
- INLINE(void UpdateCodeMap(CodeMap* code_map));
-};
-
-
-class SharedFunctionInfoMoveEventRecord : public CodeEventRecord {
- public:
- Address from;
- Address to;
-
- INLINE(void UpdateCodeMap(CodeMap* code_map));
-};
-
-
-class TickSampleEventRecord BASE_EMBEDDED {
- public:
- TickSampleEventRecord()
- : filler(1) {
- ASSERT(filler != SamplingCircularQueue::kClear);
- }
-
- // The first machine word of a TickSampleEventRecord must not ever
- // become equal to SamplingCircularQueue::kClear. As both order and
- // TickSample's first field are not reliable in this sense (order
- // can overflow, TickSample can have all fields reset), we are
- // forced to use an artificial filler field.
- int filler;
- unsigned order;
- TickSample sample;
-
- static TickSampleEventRecord* cast(void* value) {
- return reinterpret_cast<TickSampleEventRecord*>(value);
- }
-
- INLINE(static TickSampleEventRecord* init(void* value));
-};
-
-
-// This class implements both the profile events processor thread and
-// methods called by event producers: VM and stack sampler threads.
-class ProfilerEventsProcessor : public Thread {
- public:
- explicit ProfilerEventsProcessor(Isolate* isolate,
- ProfileGenerator* generator);
- virtual ~ProfilerEventsProcessor() {}
-
- // Thread control.
- virtual void Run();
- inline void Stop() { running_ = false; }
- INLINE(bool running()) { return running_; }
-
- // Events adding methods. Called by VM threads.
- void CallbackCreateEvent(Logger::LogEventsAndTags tag,
- const char* prefix, String* name,
- Address start);
- void CodeCreateEvent(Logger::LogEventsAndTags tag,
- String* name,
- String* resource_name, int line_number,
- Address start, unsigned size,
- Address shared);
- void CodeCreateEvent(Logger::LogEventsAndTags tag,
- const char* name,
- Address start, unsigned size);
- void CodeCreateEvent(Logger::LogEventsAndTags tag,
- int args_count,
- Address start, unsigned size);
- void CodeMoveEvent(Address from, Address to);
- void CodeDeleteEvent(Address from);
- void SharedFunctionInfoMoveEvent(Address from, Address to);
- void RegExpCodeCreateEvent(Logger::LogEventsAndTags tag,
- const char* prefix, String* name,
- Address start, unsigned size);
- // Puts current stack into tick sample events buffer.
- void AddCurrentStack();
-
- // Tick sample events are filled directly in the buffer of the circular
- // queue (because the structure is of fixed width, but usually not all
- // stack frame entries are filled.) This method returns a pointer to the
- // next record of the buffer.
- INLINE(TickSample* TickSampleEvent());
-
- private:
- union CodeEventsContainer {
- CodeEventRecord generic;
-#define DECLARE_CLASS(ignore, type) type type##_;
- CODE_EVENTS_TYPE_LIST(DECLARE_CLASS)
-#undef DECLARE_TYPE
- };
-
- // Called from events processing thread (Run() method.)
- bool ProcessCodeEvent(unsigned* dequeue_order);
- bool ProcessTicks(unsigned dequeue_order);
-
- INLINE(static bool FilterOutCodeCreateEvent(Logger::LogEventsAndTags tag));
-
- ProfileGenerator* generator_;
- bool running_;
- UnboundQueue<CodeEventsContainer> events_buffer_;
- SamplingCircularQueue ticks_buffer_;
- UnboundQueue<TickSampleEventRecord> ticks_from_vm_buffer_;
- unsigned enqueue_order_;
-};
-
-} } // namespace v8::internal
-
-
-#define PROFILE(isolate, Call) \
- LOG(isolate, Call); \
- do { \
- if (v8::internal::CpuProfiler::is_profiling()) { \
- v8::internal::CpuProfiler::Call; \
- } \
- } while (false)
-#else
-#define PROFILE(isolate, Call) LOG(isolate, Call)
-#endif // ENABLE_LOGGING_AND_PROFILING
-
-
-namespace v8 {
-namespace internal {
-
-
-// TODO(isolates): isolatify this class.
-class CpuProfiler {
- public:
- static void Setup();
- static void TearDown();
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
- static void StartProfiling(const char* title);
- static void StartProfiling(String* title);
- static CpuProfile* StopProfiling(const char* title);
- static CpuProfile* StopProfiling(Object* security_token, String* title);
- static int GetProfilesCount();
- static CpuProfile* GetProfile(Object* security_token, int index);
- static CpuProfile* FindProfile(Object* security_token, unsigned uid);
- static void DeleteAllProfiles();
- static void DeleteProfile(CpuProfile* profile);
- static bool HasDetachedProfiles();
-
- // Invoked from stack sampler (thread or signal handler.)
- static TickSample* TickSampleEvent(Isolate* isolate);
-
- // Must be called via PROFILE macro, otherwise will crash when
- // profiling is not enabled.
- static void CallbackEvent(String* name, Address entry_point);
- static void CodeCreateEvent(Logger::LogEventsAndTags tag,
- Code* code, const char* comment);
- static void CodeCreateEvent(Logger::LogEventsAndTags tag,
- Code* code, String* name);
- static void CodeCreateEvent(Logger::LogEventsAndTags tag,
- Code* code,
- SharedFunctionInfo *shared,
- String* name);
- static void CodeCreateEvent(Logger::LogEventsAndTags tag,
- Code* code,
- SharedFunctionInfo *shared,
- String* source, int line);
- static void CodeCreateEvent(Logger::LogEventsAndTags tag,
- Code* code, int args_count);
- static void CodeMovingGCEvent() {}
- static void CodeMoveEvent(Address from, Address to);
- static void CodeDeleteEvent(Address from);
- static void GetterCallbackEvent(String* name, Address entry_point);
- static void RegExpCodeCreateEvent(Code* code, String* source);
- static void SetterCallbackEvent(String* name, Address entry_point);
- static void SharedFunctionInfoMoveEvent(Address from, Address to);
-
- // TODO(isolates): this doesn't have to use atomics anymore.
-
- static INLINE(bool is_profiling()) {
- return is_profiling(Isolate::Current());
- }
-
- static INLINE(bool is_profiling(Isolate* isolate)) {
- CpuProfiler* profiler = isolate->cpu_profiler();
- return profiler != NULL && NoBarrier_Load(&profiler->is_profiling_);
- }
-
- private:
- CpuProfiler();
- ~CpuProfiler();
- void StartCollectingProfile(const char* title);
- void StartCollectingProfile(String* title);
- void StartProcessorIfNotStarted();
- CpuProfile* StopCollectingProfile(const char* title);
- CpuProfile* StopCollectingProfile(Object* security_token, String* title);
- void StopProcessorIfLastProfile(const char* title);
- void StopProcessor();
- void ResetProfiles();
-
- CpuProfilesCollection* profiles_;
- unsigned next_profile_uid_;
- TokenEnumerator* token_enumerator_;
- ProfileGenerator* generator_;
- ProfilerEventsProcessor* processor_;
- int saved_logging_nesting_;
- bool need_to_stop_sampler_;
- Atomic32 is_profiling_;
-
-#else
- static INLINE(bool is_profiling()) { return false; }
-#endif // ENABLE_LOGGING_AND_PROFILING
-
- private:
- DISALLOW_COPY_AND_ASSIGN(CpuProfiler);
-};
-
-} } // namespace v8::internal
-
-
-#endif // V8_CPU_PROFILER_H_
diff --git a/src/3rdparty/v8/src/cpu.h b/src/3rdparty/v8/src/cpu.h
deleted file mode 100644
index e307302..0000000
--- a/src/3rdparty/v8/src/cpu.h
+++ /dev/null
@@ -1,67 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// This module contains the architecture-specific code. This make the rest of
-// the code less dependent on differences between different processor
-// architecture.
-// The classes have the same definition for all architectures. The
-// implementation for a particular architecture is put in cpu_<arch>.cc.
-// The build system then uses the implementation for the target architecture.
-//
-
-#ifndef V8_CPU_H_
-#define V8_CPU_H_
-
-namespace v8 {
-namespace internal {
-
-// ----------------------------------------------------------------------------
-// CPU
-//
-// This class has static methods for the architecture specific functions. Add
-// methods here to cope with differences between the supported architectures.
-//
-// For each architecture the file cpu_<arch>.cc contains the implementation of
-// these functions.
-
-class CPU : public AllStatic {
- public:
- // Initializes the cpu architecture support. Called once at VM startup.
- static void Setup();
-
- static bool SupportsCrankshaft();
-
- // Flush instruction cache.
- static void FlushICache(void* start, size_t size);
-
- // Try to activate a system level debugger.
- static void DebugBreak();
-};
-
-} } // namespace v8::internal
-
-#endif // V8_CPU_H_
diff --git a/src/3rdparty/v8/src/d8-debug.cc b/src/3rdparty/v8/src/d8-debug.cc
deleted file mode 100644
index 3df8693..0000000
--- a/src/3rdparty/v8/src/d8-debug.cc
+++ /dev/null
@@ -1,367 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#include "d8.h"
-#include "d8-debug.h"
-#include "platform.h"
-#include "debug-agent.h"
-
-
-namespace v8 {
-
-static bool was_running = true;
-
-void PrintPrompt(bool is_running) {
- const char* prompt = is_running? "> " : "dbg> ";
- was_running = is_running;
- printf("%s", prompt);
- fflush(stdout);
-}
-
-
-void PrintPrompt() {
- PrintPrompt(was_running);
-}
-
-
-void HandleDebugEvent(DebugEvent event,
- Handle<Object> exec_state,
- Handle<Object> event_data,
- Handle<Value> data) {
- HandleScope scope;
-
- // Check for handled event.
- if (event != Break && event != Exception && event != AfterCompile) {
- return;
- }
-
- TryCatch try_catch;
-
- // Get the toJSONProtocol function on the event and get the JSON format.
- Local<String> to_json_fun_name = String::New("toJSONProtocol");
- Local<Function> to_json_fun =
- Function::Cast(*event_data->Get(to_json_fun_name));
- Local<Value> event_json = to_json_fun->Call(event_data, 0, NULL);
- if (try_catch.HasCaught()) {
- Shell::ReportException(&try_catch);
- return;
- }
-
- // Print the event details.
- Handle<Object> details =
- Shell::DebugMessageDetails(Handle<String>::Cast(event_json));
- if (try_catch.HasCaught()) {
- Shell::ReportException(&try_catch);
- return;
- }
- String::Utf8Value str(details->Get(String::New("text")));
- if (str.length() == 0) {
- // Empty string is used to signal not to process this event.
- return;
- }
- printf("%s\n", *str);
-
- // Get the debug command processor.
- Local<String> fun_name = String::New("debugCommandProcessor");
- Local<Function> fun = Function::Cast(*exec_state->Get(fun_name));
- Local<Object> cmd_processor =
- Object::Cast(*fun->Call(exec_state, 0, NULL));
- if (try_catch.HasCaught()) {
- Shell::ReportException(&try_catch);
- return;
- }
-
- static const int kBufferSize = 256;
- bool running = false;
- while (!running) {
- char command[kBufferSize];
- PrintPrompt(running);
- char* str = fgets(command, kBufferSize, stdin);
- if (str == NULL) break;
-
- // Ignore empty commands.
- if (strlen(command) == 0) continue;
-
- TryCatch try_catch;
-
- // Convert the debugger command to a JSON debugger request.
- Handle<Value> request =
- Shell::DebugCommandToJSONRequest(String::New(command));
- if (try_catch.HasCaught()) {
- Shell::ReportException(&try_catch);
- continue;
- }
-
- // If undefined is returned the command was handled internally and there is
- // no JSON to send.
- if (request->IsUndefined()) {
- continue;
- }
-
- Handle<String> fun_name;
- Handle<Function> fun;
- // All the functions used below take one argument.
- static const int kArgc = 1;
- Handle<Value> args[kArgc];
-
- // Invoke the JavaScript to convert the debug command line to a JSON
- // request, invoke the JSON request and convert the JSON respose to a text
- // representation.
- fun_name = String::New("processDebugRequest");
- fun = Handle<Function>::Cast(cmd_processor->Get(fun_name));
- args[0] = request;
- Handle<Value> response_val = fun->Call(cmd_processor, kArgc, args);
- if (try_catch.HasCaught()) {
- Shell::ReportException(&try_catch);
- continue;
- }
- Handle<String> response = Handle<String>::Cast(response_val);
-
- // Convert the debugger response into text details and the running state.
- Handle<Object> response_details = Shell::DebugMessageDetails(response);
- if (try_catch.HasCaught()) {
- Shell::ReportException(&try_catch);
- continue;
- }
- String::Utf8Value text_str(response_details->Get(String::New("text")));
- if (text_str.length() > 0) {
- printf("%s\n", *text_str);
- }
- running =
- response_details->Get(String::New("running"))->ToBoolean()->Value();
- }
-}
-
-
-void RunRemoteDebugger(int port) {
- RemoteDebugger debugger(i::Isolate::Current(), port);
- debugger.Run();
-}
-
-
-void RemoteDebugger::Run() {
- bool ok;
-
- // Make sure that socket support is initialized.
- ok = i::Socket::Setup();
- if (!ok) {
- printf("Unable to initialize socket support %d\n", i::Socket::LastError());
- return;
- }
-
- // Connect to the debugger agent.
- conn_ = i::OS::CreateSocket();
- static const int kPortStrSize = 6;
- char port_str[kPortStrSize];
- i::OS::SNPrintF(i::Vector<char>(port_str, kPortStrSize), "%d", port_);
- ok = conn_->Connect("localhost", port_str);
- if (!ok) {
- printf("Unable to connect to debug agent %d\n", i::Socket::LastError());
- return;
- }
-
- // Start the receiver thread.
- ReceiverThread receiver(isolate_, this);
- receiver.Start();
-
- // Start the keyboard thread.
- KeyboardThread keyboard(isolate_, this);
- keyboard.Start();
- PrintPrompt();
-
- // Process events received from debugged VM and from the keyboard.
- bool terminate = false;
- while (!terminate) {
- event_available_->Wait();
- RemoteDebuggerEvent* event = GetEvent();
- switch (event->type()) {
- case RemoteDebuggerEvent::kMessage:
- HandleMessageReceived(event->data());
- break;
- case RemoteDebuggerEvent::kKeyboard:
- HandleKeyboardCommand(event->data());
- break;
- case RemoteDebuggerEvent::kDisconnect:
- terminate = true;
- break;
-
- default:
- UNREACHABLE();
- }
- delete event;
- }
-
- // Wait for the receiver thread to end.
- receiver.Join();
-}
-
-
-void RemoteDebugger::MessageReceived(i::SmartPointer<char> message) {
- RemoteDebuggerEvent* event =
- new RemoteDebuggerEvent(RemoteDebuggerEvent::kMessage, message);
- AddEvent(event);
-}
-
-
-void RemoteDebugger::KeyboardCommand(i::SmartPointer<char> command) {
- RemoteDebuggerEvent* event =
- new RemoteDebuggerEvent(RemoteDebuggerEvent::kKeyboard, command);
- AddEvent(event);
-}
-
-
-void RemoteDebugger::ConnectionClosed() {
- RemoteDebuggerEvent* event =
- new RemoteDebuggerEvent(RemoteDebuggerEvent::kDisconnect,
- i::SmartPointer<char>());
- AddEvent(event);
-}
-
-
-void RemoteDebugger::AddEvent(RemoteDebuggerEvent* event) {
- i::ScopedLock lock(event_access_);
- if (head_ == NULL) {
- ASSERT(tail_ == NULL);
- head_ = event;
- tail_ = event;
- } else {
- ASSERT(tail_ != NULL);
- tail_->set_next(event);
- tail_ = event;
- }
- event_available_->Signal();
-}
-
-
-RemoteDebuggerEvent* RemoteDebugger::GetEvent() {
- i::ScopedLock lock(event_access_);
- ASSERT(head_ != NULL);
- RemoteDebuggerEvent* result = head_;
- head_ = head_->next();
- if (head_ == NULL) {
- ASSERT(tail_ == result);
- tail_ = NULL;
- }
- return result;
-}
-
-
-void RemoteDebugger::HandleMessageReceived(char* message) {
- HandleScope scope;
-
- // Print the event details.
- TryCatch try_catch;
- Handle<Object> details =
- Shell::DebugMessageDetails(Handle<String>::Cast(String::New(message)));
- if (try_catch.HasCaught()) {
- Shell::ReportException(&try_catch);
- PrintPrompt();
- return;
- }
- String::Utf8Value str(details->Get(String::New("text")));
- if (str.length() == 0) {
- // Empty string is used to signal not to process this event.
- return;
- }
- if (*str != NULL) {
- printf("%s\n", *str);
- } else {
- printf("???\n");
- }
-
- bool is_running = details->Get(String::New("running"))->ToBoolean()->Value();
- PrintPrompt(is_running);
-}
-
-
-void RemoteDebugger::HandleKeyboardCommand(char* command) {
- HandleScope scope;
-
- // Convert the debugger command to a JSON debugger request.
- TryCatch try_catch;
- Handle<Value> request =
- Shell::DebugCommandToJSONRequest(String::New(command));
- if (try_catch.HasCaught()) {
- v8::String::Utf8Value exception(try_catch.Exception());
- const char* exception_string = Shell::ToCString(exception);
- printf("%s\n", exception_string);
- PrintPrompt();
- return;
- }
-
- // If undefined is returned the command was handled internally and there is
- // no JSON to send.
- if (request->IsUndefined()) {
- PrintPrompt();
- return;
- }
-
- // Send the JSON debugger request.
- i::DebuggerAgentUtil::SendMessage(conn_, Handle<String>::Cast(request));
-}
-
-
-void ReceiverThread::Run() {
- // Receive the connect message (with empty body).
- i::SmartPointer<char> message =
- i::DebuggerAgentUtil::ReceiveMessage(remote_debugger_->conn());
- ASSERT(*message == NULL);
-
- while (true) {
- // Receive a message.
- i::SmartPointer<char> message =
- i::DebuggerAgentUtil::ReceiveMessage(remote_debugger_->conn());
- if (*message == NULL) {
- remote_debugger_->ConnectionClosed();
- return;
- }
-
- // Pass the message to the main thread.
- remote_debugger_->MessageReceived(message);
- }
-}
-
-
-void KeyboardThread::Run() {
- static const int kBufferSize = 256;
- while (true) {
- // read keyboard input.
- char command[kBufferSize];
- char* str = fgets(command, kBufferSize, stdin);
- if (str == NULL) {
- break;
- }
-
- // Pass the keyboard command to the main thread.
- remote_debugger_->KeyboardCommand(
- i::SmartPointer<char>(i::StrDup(command)));
- }
-}
-
-
-} // namespace v8
diff --git a/src/3rdparty/v8/src/d8-debug.h b/src/3rdparty/v8/src/d8-debug.h
deleted file mode 100644
index ceb9e36..0000000
--- a/src/3rdparty/v8/src/d8-debug.h
+++ /dev/null
@@ -1,158 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_D8_DEBUG_H_
-#define V8_D8_DEBUG_H_
-
-
-#include "d8.h"
-#include "debug.h"
-
-
-namespace v8 {
-
-
-void HandleDebugEvent(DebugEvent event,
- Handle<Object> exec_state,
- Handle<Object> event_data,
- Handle<Value> data);
-
-// Start the remove debugger connecting to a V8 debugger agent on the specified
-// port.
-void RunRemoteDebugger(int port);
-
-// Forward declerations.
-class RemoteDebuggerEvent;
-class ReceiverThread;
-
-
-// Remote debugging class.
-class RemoteDebugger {
- public:
- RemoteDebugger(i::Isolate* isolate, int port)
- : port_(port),
- event_access_(i::OS::CreateMutex()),
- event_available_(i::OS::CreateSemaphore(0)),
- head_(NULL), tail_(NULL), isolate_(isolate) {}
- void Run();
-
- // Handle events from the subordinate threads.
- void MessageReceived(i::SmartPointer<char> message);
- void KeyboardCommand(i::SmartPointer<char> command);
- void ConnectionClosed();
-
- private:
- // Add new debugger event to the list.
- void AddEvent(RemoteDebuggerEvent* event);
- // Read next debugger event from the list.
- RemoteDebuggerEvent* GetEvent();
-
- // Handle a message from the debugged V8.
- void HandleMessageReceived(char* message);
- // Handle a keyboard command.
- void HandleKeyboardCommand(char* command);
-
- // Get connection to agent in debugged V8.
- i::Socket* conn() { return conn_; }
-
- int port_; // Port used to connect to debugger V8.
- i::Socket* conn_; // Connection to debugger agent in debugged V8.
-
- // Linked list of events from debugged V8 and from keyboard input. Access to
- // the list is guarded by a mutex and a semaphore signals new items in the
- // list.
- i::Mutex* event_access_;
- i::Semaphore* event_available_;
- RemoteDebuggerEvent* head_;
- RemoteDebuggerEvent* tail_;
- i::Isolate* isolate_;
-
- friend class ReceiverThread;
-};
-
-
-// Thread reading from debugged V8 instance.
-class ReceiverThread: public i::Thread {
- public:
- ReceiverThread(i::Isolate* isolate, RemoteDebugger* remote_debugger)
- : Thread(isolate, "d8:ReceiverThrd"),
- remote_debugger_(remote_debugger) {}
- ~ReceiverThread() {}
-
- void Run();
-
- private:
- RemoteDebugger* remote_debugger_;
-};
-
-
-// Thread reading keyboard input.
-class KeyboardThread: public i::Thread {
- public:
- explicit KeyboardThread(i::Isolate* isolate, RemoteDebugger* remote_debugger)
- : Thread(isolate, "d8:KeyboardThrd"),
- remote_debugger_(remote_debugger) {}
- ~KeyboardThread() {}
-
- void Run();
-
- private:
- RemoteDebugger* remote_debugger_;
-};
-
-
-// Events processed by the main deubgger thread.
-class RemoteDebuggerEvent {
- public:
- RemoteDebuggerEvent(int type, i::SmartPointer<char> data)
- : type_(type), data_(data), next_(NULL) {
- ASSERT(type == kMessage || type == kKeyboard || type == kDisconnect);
- }
-
- static const int kMessage = 1;
- static const int kKeyboard = 2;
- static const int kDisconnect = 3;
-
- int type() { return type_; }
- char* data() { return *data_; }
-
- private:
- void set_next(RemoteDebuggerEvent* event) { next_ = event; }
- RemoteDebuggerEvent* next() { return next_; }
-
- int type_;
- i::SmartPointer<char> data_;
- RemoteDebuggerEvent* next_;
-
- friend class RemoteDebugger;
-};
-
-
-} // namespace v8
-
-
-#endif // V8_D8_DEBUG_H_
diff --git a/src/3rdparty/v8/src/d8-posix.cc b/src/3rdparty/v8/src/d8-posix.cc
deleted file mode 100644
index a7a4049..0000000
--- a/src/3rdparty/v8/src/d8-posix.cc
+++ /dev/null
@@ -1,695 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#include <stdlib.h>
-#include <errno.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <sys/time.h>
-#include <time.h>
-#include <unistd.h>
-#include <fcntl.h>
-#include <sys/wait.h>
-#include <signal.h>
-
-
-#include "d8.h"
-#include "d8-debug.h"
-#include "debug.h"
-
-
-namespace v8 {
-
-
-// If the buffer ends in the middle of a UTF-8 sequence then we return
-// the length of the string up to but not including the incomplete UTF-8
-// sequence. If the buffer ends with a valid UTF-8 sequence then we
-// return the whole buffer.
-static int LengthWithoutIncompleteUtf8(char* buffer, int len) {
- int answer = len;
- // 1-byte encoding.
- static const int kUtf8SingleByteMask = 0x80;
- static const int kUtf8SingleByteValue = 0x00;
- // 2-byte encoding.
- static const int kUtf8TwoByteMask = 0xe0;
- static const int kUtf8TwoByteValue = 0xc0;
- // 3-byte encoding.
- static const int kUtf8ThreeByteMask = 0xf0;
- static const int kUtf8ThreeByteValue = 0xe0;
- // 4-byte encoding.
- static const int kUtf8FourByteMask = 0xf8;
- static const int kUtf8FourByteValue = 0xf0;
- // Subsequent bytes of a multi-byte encoding.
- static const int kMultiByteMask = 0xc0;
- static const int kMultiByteValue = 0x80;
- int multi_byte_bytes_seen = 0;
- while (answer > 0) {
- int c = buffer[answer - 1];
- // Ends in valid single-byte sequence?
- if ((c & kUtf8SingleByteMask) == kUtf8SingleByteValue) return answer;
- // Ends in one or more subsequent bytes of a multi-byte value?
- if ((c & kMultiByteMask) == kMultiByteValue) {
- multi_byte_bytes_seen++;
- answer--;
- } else {
- if ((c & kUtf8TwoByteMask) == kUtf8TwoByteValue) {
- if (multi_byte_bytes_seen >= 1) {
- return answer + 2;
- }
- return answer - 1;
- } else if ((c & kUtf8ThreeByteMask) == kUtf8ThreeByteValue) {
- if (multi_byte_bytes_seen >= 2) {
- return answer + 3;
- }
- return answer - 1;
- } else if ((c & kUtf8FourByteMask) == kUtf8FourByteValue) {
- if (multi_byte_bytes_seen >= 3) {
- return answer + 4;
- }
- return answer - 1;
- } else {
- return answer; // Malformed UTF-8.
- }
- }
- }
- return 0;
-}
-
-
-// Suspends the thread until there is data available from the child process.
-// Returns false on timeout, true on data ready.
-static bool WaitOnFD(int fd,
- int read_timeout,
- int total_timeout,
- struct timeval& start_time) {
- fd_set readfds, writefds, exceptfds;
- struct timeval timeout;
- int gone = 0;
- if (total_timeout != -1) {
- struct timeval time_now;
- gettimeofday(&time_now, NULL);
- int seconds = time_now.tv_sec - start_time.tv_sec;
- gone = seconds * 1000 + (time_now.tv_usec - start_time.tv_usec) / 1000;
- if (gone >= total_timeout) return false;
- }
- FD_ZERO(&readfds);
- FD_ZERO(&writefds);
- FD_ZERO(&exceptfds);
- FD_SET(fd, &readfds);
- FD_SET(fd, &exceptfds);
- if (read_timeout == -1 ||
- (total_timeout != -1 && total_timeout - gone < read_timeout)) {
- read_timeout = total_timeout - gone;
- }
- timeout.tv_usec = (read_timeout % 1000) * 1000;
- timeout.tv_sec = read_timeout / 1000;
- int number_of_fds_ready = select(fd + 1,
- &readfds,
- &writefds,
- &exceptfds,
- read_timeout != -1 ? &timeout : NULL);
- return number_of_fds_ready == 1;
-}
-
-
-// Checks whether we ran out of time on the timeout. Returns true if we ran out
-// of time, false if we still have time.
-static bool TimeIsOut(const struct timeval& start_time, const int& total_time) {
- if (total_time == -1) return false;
- struct timeval time_now;
- gettimeofday(&time_now, NULL);
- // Careful about overflow.
- int seconds = time_now.tv_sec - start_time.tv_sec;
- if (seconds > 100) {
- if (seconds * 1000 > total_time) return true;
- return false;
- }
- int useconds = time_now.tv_usec - start_time.tv_usec;
- if (seconds * 1000000 + useconds > total_time * 1000) {
- return true;
- }
- return false;
-}
-
-
-// A utility class that does a non-hanging waitpid on the child process if we
-// bail out of the System() function early. If you don't ever do a waitpid on
-// a subprocess then it turns into one of those annoying 'zombie processes'.
-class ZombieProtector {
- public:
- explicit ZombieProtector(int pid): pid_(pid) { }
- ~ZombieProtector() { if (pid_ != 0) waitpid(pid_, NULL, 0); }
- void ChildIsDeadNow() { pid_ = 0; }
- private:
- int pid_;
-};
-
-
-// A utility class that closes a file descriptor when it goes out of scope.
-class OpenFDCloser {
- public:
- explicit OpenFDCloser(int fd): fd_(fd) { }
- ~OpenFDCloser() { close(fd_); }
- private:
- int fd_;
-};
-
-
-// A utility class that takes the array of command arguments and puts then in an
-// array of new[]ed UTF-8 C strings. Deallocates them again when it goes out of
-// scope.
-class ExecArgs {
- public:
- ExecArgs() {
- exec_args_[0] = NULL;
- }
- bool Init(Handle<Value> arg0, Handle<Array> command_args) {
- String::Utf8Value prog(arg0);
- if (*prog == NULL) {
- const char* message =
- "os.system(): String conversion of program name failed";
- ThrowException(String::New(message));
- return false;
- }
- int len = prog.length() + 3;
- char* c_arg = new char[len];
- snprintf(c_arg, len, "%s", *prog);
- exec_args_[0] = c_arg;
- int i = 1;
- for (unsigned j = 0; j < command_args->Length(); i++, j++) {
- Handle<Value> arg(command_args->Get(Integer::New(j)));
- String::Utf8Value utf8_arg(arg);
- if (*utf8_arg == NULL) {
- exec_args_[i] = NULL; // Consistent state for destructor.
- const char* message =
- "os.system(): String conversion of argument failed.";
- ThrowException(String::New(message));
- return false;
- }
- int len = utf8_arg.length() + 1;
- char* c_arg = new char[len];
- snprintf(c_arg, len, "%s", *utf8_arg);
- exec_args_[i] = c_arg;
- }
- exec_args_[i] = NULL;
- return true;
- }
- ~ExecArgs() {
- for (unsigned i = 0; i < kMaxArgs; i++) {
- if (exec_args_[i] == NULL) {
- return;
- }
- delete [] exec_args_[i];
- exec_args_[i] = 0;
- }
- }
- static const unsigned kMaxArgs = 1000;
- char** arg_array() { return exec_args_; }
- char* arg0() { return exec_args_[0]; }
- private:
- char* exec_args_[kMaxArgs + 1];
-};
-
-
-// Gets the optional timeouts from the arguments to the system() call.
-static bool GetTimeouts(const Arguments& args,
- int* read_timeout,
- int* total_timeout) {
- if (args.Length() > 3) {
- if (args[3]->IsNumber()) {
- *total_timeout = args[3]->Int32Value();
- } else {
- ThrowException(String::New("system: Argument 4 must be a number"));
- return false;
- }
- }
- if (args.Length() > 2) {
- if (args[2]->IsNumber()) {
- *read_timeout = args[2]->Int32Value();
- } else {
- ThrowException(String::New("system: Argument 3 must be a number"));
- return false;
- }
- }
- return true;
-}
-
-
-static const int kReadFD = 0;
-static const int kWriteFD = 1;
-
-
-// This is run in the child process after fork() but before exec(). It normally
-// ends with the child process being replaced with the desired child program.
-// It only returns if an error occurred.
-static void ExecSubprocess(int* exec_error_fds,
- int* stdout_fds,
- ExecArgs& exec_args) {
- close(exec_error_fds[kReadFD]); // Don't need this in the child.
- close(stdout_fds[kReadFD]); // Don't need this in the child.
- close(1); // Close stdout.
- dup2(stdout_fds[kWriteFD], 1); // Dup pipe fd to stdout.
- close(stdout_fds[kWriteFD]); // Don't need the original fd now.
- fcntl(exec_error_fds[kWriteFD], F_SETFD, FD_CLOEXEC);
- execvp(exec_args.arg0(), exec_args.arg_array());
- // Only get here if the exec failed. Write errno to the parent to tell
- // them it went wrong. If it went well the pipe is closed.
- int err = errno;
- int bytes_written;
- do {
- bytes_written = write(exec_error_fds[kWriteFD], &err, sizeof(err));
- } while (bytes_written == -1 && errno == EINTR);
- // Return (and exit child process).
-}
-
-
-// Runs in the parent process. Checks that the child was able to exec (closing
-// the file desriptor), or reports an error if it failed.
-static bool ChildLaunchedOK(int* exec_error_fds) {
- int bytes_read;
- int err;
- do {
- bytes_read = read(exec_error_fds[kReadFD], &err, sizeof(err));
- } while (bytes_read == -1 && errno == EINTR);
- if (bytes_read != 0) {
- ThrowException(String::New(strerror(err)));
- return false;
- }
- return true;
-}
-
-
-// Accumulates the output from the child in a string handle. Returns true if it
-// succeeded or false if an exception was thrown.
-static Handle<Value> GetStdout(int child_fd,
- struct timeval& start_time,
- int read_timeout,
- int total_timeout) {
- Handle<String> accumulator = String::Empty();
- const char* source = "(function(a, b) { return a + b; })";
- Handle<Value> cons_as_obj(Script::Compile(String::New(source))->Run());
- Handle<Function> cons_function(Function::Cast(*cons_as_obj));
- Handle<Value> cons_args[2];
-
- int fullness = 0;
- static const int kStdoutReadBufferSize = 4096;
- char buffer[kStdoutReadBufferSize];
-
- if (fcntl(child_fd, F_SETFL, O_NONBLOCK) != 0) {
- return ThrowException(String::New(strerror(errno)));
- }
-
- int bytes_read;
- do {
- bytes_read = read(child_fd,
- buffer + fullness,
- kStdoutReadBufferSize - fullness);
- if (bytes_read == -1) {
- if (errno == EAGAIN) {
- if (!WaitOnFD(child_fd,
- read_timeout,
- total_timeout,
- start_time) ||
- (TimeIsOut(start_time, total_timeout))) {
- return ThrowException(String::New("Timed out waiting for output"));
- }
- continue;
- } else if (errno == EINTR) {
- continue;
- } else {
- break;
- }
- }
- if (bytes_read + fullness > 0) {
- int length = bytes_read == 0 ?
- bytes_read + fullness :
- LengthWithoutIncompleteUtf8(buffer, bytes_read + fullness);
- Handle<String> addition = String::New(buffer, length);
- cons_args[0] = accumulator;
- cons_args[1] = addition;
- accumulator = Handle<String>::Cast(cons_function->Call(
- Shell::utility_context()->Global(),
- 2,
- cons_args));
- fullness = bytes_read + fullness - length;
- memcpy(buffer, buffer + length, fullness);
- }
- } while (bytes_read != 0);
- return accumulator;
-}
-
-
-// Modern Linux has the waitid call, which is like waitpid, but more useful
-// if you want a timeout. If we don't have waitid we can't limit the time
-// waiting for the process to exit without losing the information about
-// whether it exited normally. In the common case this doesn't matter because
-// we don't get here before the child has closed stdout and most programs don't
-// do that before they exit.
-//
-// We're disabling usage of waitid in Mac OS X because it doens't work for us:
-// a parent process hangs on waiting while a child process is already a zombie.
-// See http://code.google.com/p/v8/issues/detail?id=401.
-#if defined(WNOWAIT) && !defined(ANDROID) && !defined(__APPLE__)
-#if !defined(__FreeBSD__)
-#define HAS_WAITID 1
-#endif
-#endif
-
-
-// Get exit status of child.
-static bool WaitForChild(int pid,
- ZombieProtector& child_waiter,
- struct timeval& start_time,
- int read_timeout,
- int total_timeout) {
-#ifdef HAS_WAITID
-
- siginfo_t child_info;
- child_info.si_pid = 0;
- int useconds = 1;
- // Wait for child to exit.
- while (child_info.si_pid == 0) {
- waitid(P_PID, pid, &child_info, WEXITED | WNOHANG | WNOWAIT);
- usleep(useconds);
- if (useconds < 1000000) useconds <<= 1;
- if ((read_timeout != -1 && useconds / 1000 > read_timeout) ||
- (TimeIsOut(start_time, total_timeout))) {
- ThrowException(String::New("Timed out waiting for process to terminate"));
- kill(pid, SIGINT);
- return false;
- }
- }
- if (child_info.si_code == CLD_KILLED) {
- char message[999];
- snprintf(message,
- sizeof(message),
- "Child killed by signal %d",
- child_info.si_status);
- ThrowException(String::New(message));
- return false;
- }
- if (child_info.si_code == CLD_EXITED && child_info.si_status != 0) {
- char message[999];
- snprintf(message,
- sizeof(message),
- "Child exited with status %d",
- child_info.si_status);
- ThrowException(String::New(message));
- return false;
- }
-
-#else // No waitid call.
-
- int child_status;
- waitpid(pid, &child_status, 0); // We hang here if the child doesn't exit.
- child_waiter.ChildIsDeadNow();
- if (WIFSIGNALED(child_status)) {
- char message[999];
- snprintf(message,
- sizeof(message),
- "Child killed by signal %d",
- WTERMSIG(child_status));
- ThrowException(String::New(message));
- return false;
- }
- if (WEXITSTATUS(child_status) != 0) {
- char message[999];
- int exit_status = WEXITSTATUS(child_status);
- snprintf(message,
- sizeof(message),
- "Child exited with status %d",
- exit_status);
- ThrowException(String::New(message));
- return false;
- }
-
-#endif // No waitid call.
-
- return true;
-}
-
-
-// Implementation of the system() function (see d8.h for details).
-Handle<Value> Shell::System(const Arguments& args) {
- HandleScope scope;
- int read_timeout = -1;
- int total_timeout = -1;
- if (!GetTimeouts(args, &read_timeout, &total_timeout)) return v8::Undefined();
- Handle<Array> command_args;
- if (args.Length() > 1) {
- if (!args[1]->IsArray()) {
- return ThrowException(String::New("system: Argument 2 must be an array"));
- }
- command_args = Handle<Array>::Cast(args[1]);
- } else {
- command_args = Array::New(0);
- }
- if (command_args->Length() > ExecArgs::kMaxArgs) {
- return ThrowException(String::New("Too many arguments to system()"));
- }
- if (args.Length() < 1) {
- return ThrowException(String::New("Too few arguments to system()"));
- }
-
- struct timeval start_time;
- gettimeofday(&start_time, NULL);
-
- ExecArgs exec_args;
- if (!exec_args.Init(args[0], command_args)) {
- return v8::Undefined();
- }
- int exec_error_fds[2];
- int stdout_fds[2];
-
- if (pipe(exec_error_fds) != 0) {
- return ThrowException(String::New("pipe syscall failed."));
- }
- if (pipe(stdout_fds) != 0) {
- return ThrowException(String::New("pipe syscall failed."));
- }
-
- pid_t pid = fork();
- if (pid == 0) { // Child process.
- ExecSubprocess(exec_error_fds, stdout_fds, exec_args);
- exit(1);
- }
-
- // Parent process. Ensure that we clean up if we exit this function early.
- ZombieProtector child_waiter(pid);
- close(exec_error_fds[kWriteFD]);
- close(stdout_fds[kWriteFD]);
- OpenFDCloser error_read_closer(exec_error_fds[kReadFD]);
- OpenFDCloser stdout_read_closer(stdout_fds[kReadFD]);
-
- if (!ChildLaunchedOK(exec_error_fds)) return v8::Undefined();
-
- Handle<Value> accumulator = GetStdout(stdout_fds[kReadFD],
- start_time,
- read_timeout,
- total_timeout);
- if (accumulator->IsUndefined()) {
- kill(pid, SIGINT); // On timeout, kill the subprocess.
- return accumulator;
- }
-
- if (!WaitForChild(pid,
- child_waiter,
- start_time,
- read_timeout,
- total_timeout)) {
- return v8::Undefined();
- }
-
- return scope.Close(accumulator);
-}
-
-
-Handle<Value> Shell::ChangeDirectory(const Arguments& args) {
- if (args.Length() != 1) {
- const char* message = "chdir() takes one argument";
- return ThrowException(String::New(message));
- }
- String::Utf8Value directory(args[0]);
- if (*directory == NULL) {
- const char* message = "os.chdir(): String conversion of argument failed.";
- return ThrowException(String::New(message));
- }
- if (chdir(*directory) != 0) {
- return ThrowException(String::New(strerror(errno)));
- }
- return v8::Undefined();
-}
-
-
-Handle<Value> Shell::SetUMask(const Arguments& args) {
- if (args.Length() != 1) {
- const char* message = "umask() takes one argument";
- return ThrowException(String::New(message));
- }
- if (args[0]->IsNumber()) {
- mode_t mask = args[0]->Int32Value();
- int previous = umask(mask);
- return Number::New(previous);
- } else {
- const char* message = "umask() argument must be numeric";
- return ThrowException(String::New(message));
- }
-}
-
-
-static bool CheckItsADirectory(char* directory) {
- struct stat stat_buf;
- int stat_result = stat(directory, &stat_buf);
- if (stat_result != 0) {
- ThrowException(String::New(strerror(errno)));
- return false;
- }
- if ((stat_buf.st_mode & S_IFDIR) != 0) return true;
- ThrowException(String::New(strerror(EEXIST)));
- return false;
-}
-
-
-// Returns true for success. Creates intermediate directories as needed. No
-// error if the directory exists already.
-static bool mkdirp(char* directory, mode_t mask) {
- int result = mkdir(directory, mask);
- if (result == 0) return true;
- if (errno == EEXIST) {
- return CheckItsADirectory(directory);
- } else if (errno == ENOENT) { // Intermediate path element is missing.
- char* last_slash = strrchr(directory, '/');
- if (last_slash == NULL) {
- ThrowException(String::New(strerror(errno)));
- return false;
- }
- *last_slash = 0;
- if (!mkdirp(directory, mask)) return false;
- *last_slash = '/';
- result = mkdir(directory, mask);
- if (result == 0) return true;
- if (errno == EEXIST) {
- return CheckItsADirectory(directory);
- }
- ThrowException(String::New(strerror(errno)));
- return false;
- } else {
- ThrowException(String::New(strerror(errno)));
- return false;
- }
-}
-
-
-Handle<Value> Shell::MakeDirectory(const Arguments& args) {
- mode_t mask = 0777;
- if (args.Length() == 2) {
- if (args[1]->IsNumber()) {
- mask = args[1]->Int32Value();
- } else {
- const char* message = "mkdirp() second argument must be numeric";
- return ThrowException(String::New(message));
- }
- } else if (args.Length() != 1) {
- const char* message = "mkdirp() takes one or two arguments";
- return ThrowException(String::New(message));
- }
- String::Utf8Value directory(args[0]);
- if (*directory == NULL) {
- const char* message = "os.mkdirp(): String conversion of argument failed.";
- return ThrowException(String::New(message));
- }
- mkdirp(*directory, mask);
- return v8::Undefined();
-}
-
-
-Handle<Value> Shell::RemoveDirectory(const Arguments& args) {
- if (args.Length() != 1) {
- const char* message = "rmdir() takes one or two arguments";
- return ThrowException(String::New(message));
- }
- String::Utf8Value directory(args[0]);
- if (*directory == NULL) {
- const char* message = "os.rmdir(): String conversion of argument failed.";
- return ThrowException(String::New(message));
- }
- rmdir(*directory);
- return v8::Undefined();
-}
-
-
-Handle<Value> Shell::SetEnvironment(const Arguments& args) {
- if (args.Length() != 2) {
- const char* message = "setenv() takes two arguments";
- return ThrowException(String::New(message));
- }
- String::Utf8Value var(args[0]);
- String::Utf8Value value(args[1]);
- if (*var == NULL) {
- const char* message =
- "os.setenv(): String conversion of variable name failed.";
- return ThrowException(String::New(message));
- }
- if (*value == NULL) {
- const char* message =
- "os.setenv(): String conversion of variable contents failed.";
- return ThrowException(String::New(message));
- }
- setenv(*var, *value, 1);
- return v8::Undefined();
-}
-
-
-Handle<Value> Shell::UnsetEnvironment(const Arguments& args) {
- if (args.Length() != 1) {
- const char* message = "unsetenv() takes one argument";
- return ThrowException(String::New(message));
- }
- String::Utf8Value var(args[0]);
- if (*var == NULL) {
- const char* message =
- "os.setenv(): String conversion of variable name failed.";
- return ThrowException(String::New(message));
- }
- unsetenv(*var);
- return v8::Undefined();
-}
-
-
-void Shell::AddOSMethods(Handle<ObjectTemplate> os_templ) {
- os_templ->Set(String::New("system"), FunctionTemplate::New(System));
- os_templ->Set(String::New("chdir"), FunctionTemplate::New(ChangeDirectory));
- os_templ->Set(String::New("setenv"), FunctionTemplate::New(SetEnvironment));
- os_templ->Set(String::New("unsetenv"),
- FunctionTemplate::New(UnsetEnvironment));
- os_templ->Set(String::New("umask"), FunctionTemplate::New(SetUMask));
- os_templ->Set(String::New("mkdirp"), FunctionTemplate::New(MakeDirectory));
- os_templ->Set(String::New("rmdir"), FunctionTemplate::New(RemoveDirectory));
-}
-
-} // namespace v8
diff --git a/src/3rdparty/v8/src/d8-readline.cc b/src/3rdparty/v8/src/d8-readline.cc
deleted file mode 100644
index 67fc9ef..0000000
--- a/src/3rdparty/v8/src/d8-readline.cc
+++ /dev/null
@@ -1,128 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#include <cstdio> // NOLINT
-#include <readline/readline.h> // NOLINT
-#include <readline/history.h> // NOLINT
-
-
-#include "d8.h"
-
-
-// There are incompatibilities between different versions and different
-// implementations of readline. This smooths out one known incompatibility.
-#if RL_READLINE_VERSION >= 0x0500
-#define completion_matches rl_completion_matches
-#endif
-
-
-namespace v8 {
-
-
-class ReadLineEditor: public LineEditor {
- public:
- ReadLineEditor() : LineEditor(LineEditor::READLINE, "readline") { }
- virtual i::SmartPointer<char> Prompt(const char* prompt);
- virtual bool Open();
- virtual bool Close();
- virtual void AddHistory(const char* str);
- private:
- static char** AttemptedCompletion(const char* text, int start, int end);
- static char* CompletionGenerator(const char* text, int state);
- static char kWordBreakCharacters[];
-};
-
-
-static ReadLineEditor read_line_editor;
-char ReadLineEditor::kWordBreakCharacters[] = {' ', '\t', '\n', '"',
- '\\', '\'', '`', '@', '.', '>', '<', '=', ';', '|', '&', '{', '(',
- '\0'};
-
-
-bool ReadLineEditor::Open() {
- rl_initialize();
- rl_attempted_completion_function = AttemptedCompletion;
- rl_completer_word_break_characters = kWordBreakCharacters;
- rl_bind_key('\t', rl_complete);
- using_history();
- return read_history(Shell::kHistoryFileName) == 0;
-}
-
-
-bool ReadLineEditor::Close() {
- return write_history(Shell::kHistoryFileName) == 0;
-}
-
-
-i::SmartPointer<char> ReadLineEditor::Prompt(const char* prompt) {
- char* result = readline(prompt);
- return i::SmartPointer<char>(result);
-}
-
-
-void ReadLineEditor::AddHistory(const char* str) {
- add_history(str);
-}
-
-
-char** ReadLineEditor::AttemptedCompletion(const char* text,
- int start,
- int end) {
- char** result = completion_matches(text, CompletionGenerator);
- rl_attempted_completion_over = true;
- return result;
-}
-
-
-char* ReadLineEditor::CompletionGenerator(const char* text, int state) {
- static unsigned current_index;
- static Persistent<Array> current_completions;
- if (state == 0) {
- i::SmartPointer<char> full_text(i::StrNDup(rl_line_buffer, rl_point));
- HandleScope scope;
- Handle<Array> completions =
- Shell::GetCompletions(String::New(text), String::New(*full_text));
- current_completions = Persistent<Array>::New(completions);
- current_index = 0;
- }
- if (current_index < current_completions->Length()) {
- HandleScope scope;
- Handle<Integer> index = Integer::New(current_index);
- Handle<Value> str_obj = current_completions->Get(index);
- current_index++;
- String::Utf8Value str(str_obj);
- return strdup(*str);
- } else {
- current_completions.Dispose();
- current_completions.Clear();
- return NULL;
- }
-}
-
-
-} // namespace v8
diff --git a/src/3rdparty/v8/src/d8-windows.cc b/src/3rdparty/v8/src/d8-windows.cc
deleted file mode 100644
index eeb4735..0000000
--- a/src/3rdparty/v8/src/d8-windows.cc
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#include "d8.h"
-#include "d8-debug.h"
-#include "debug.h"
-#include "api.h"
-
-
-namespace v8 {
-
-
-void Shell::AddOSMethods(Handle<ObjectTemplate> os_templ) {
-}
-
-
-} // namespace v8
diff --git a/src/3rdparty/v8/src/d8.cc b/src/3rdparty/v8/src/d8.cc
deleted file mode 100644
index 7de82b7..0000000
--- a/src/3rdparty/v8/src/d8.cc
+++ /dev/null
@@ -1,796 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#include <stdlib.h>
-#include <errno.h>
-
-#include "v8.h"
-
-#include "d8.h"
-#include "d8-debug.h"
-#include "debug.h"
-#include "api.h"
-#include "natives.h"
-#include "platform.h"
-
-
-namespace v8 {
-
-
-const char* Shell::kHistoryFileName = ".d8_history";
-const char* Shell::kPrompt = "d8> ";
-
-
-LineEditor *LineEditor::first_ = NULL;
-
-
-LineEditor::LineEditor(Type type, const char* name)
- : type_(type),
- name_(name),
- next_(first_) {
- first_ = this;
-}
-
-
-LineEditor* LineEditor::Get() {
- LineEditor* current = first_;
- LineEditor* best = current;
- while (current != NULL) {
- if (current->type_ > best->type_)
- best = current;
- current = current->next_;
- }
- return best;
-}
-
-
-class DumbLineEditor: public LineEditor {
- public:
- DumbLineEditor() : LineEditor(LineEditor::DUMB, "dumb") { }
- virtual i::SmartPointer<char> Prompt(const char* prompt);
-};
-
-
-static DumbLineEditor dumb_line_editor;
-
-
-i::SmartPointer<char> DumbLineEditor::Prompt(const char* prompt) {
- static const int kBufferSize = 256;
- char buffer[kBufferSize];
- printf("%s", prompt);
- char* str = fgets(buffer, kBufferSize, stdin);
- return i::SmartPointer<char>(str ? i::StrDup(str) : str);
-}
-
-
-CounterMap* Shell::counter_map_;
-i::OS::MemoryMappedFile* Shell::counters_file_ = NULL;
-CounterCollection Shell::local_counters_;
-CounterCollection* Shell::counters_ = &local_counters_;
-Persistent<Context> Shell::utility_context_;
-Persistent<Context> Shell::evaluation_context_;
-
-
-bool CounterMap::Match(void* key1, void* key2) {
- const char* name1 = reinterpret_cast<const char*>(key1);
- const char* name2 = reinterpret_cast<const char*>(key2);
- return strcmp(name1, name2) == 0;
-}
-
-
-// Converts a V8 value to a C string.
-const char* Shell::ToCString(const v8::String::Utf8Value& value) {
- return *value ? *value : "<string conversion failed>";
-}
-
-
-// Executes a string within the current v8 context.
-bool Shell::ExecuteString(Handle<String> source,
- Handle<Value> name,
- bool print_result,
- bool report_exceptions) {
- HandleScope handle_scope;
- TryCatch try_catch;
- if (i::FLAG_debugger) {
- // When debugging make exceptions appear to be uncaught.
- try_catch.SetVerbose(true);
- }
- Handle<Script> script = Script::Compile(source, name);
- if (script.IsEmpty()) {
- // Print errors that happened during compilation.
- if (report_exceptions && !i::FLAG_debugger)
- ReportException(&try_catch);
- return false;
- } else {
- Handle<Value> result = script->Run();
- if (result.IsEmpty()) {
- ASSERT(try_catch.HasCaught());
- // Print errors that happened during execution.
- if (report_exceptions && !i::FLAG_debugger)
- ReportException(&try_catch);
- return false;
- } else {
- ASSERT(!try_catch.HasCaught());
- if (print_result && !result->IsUndefined()) {
- // If all went well and the result wasn't undefined then print
- // the returned value.
- v8::String::Utf8Value str(result);
- const char* cstr = ToCString(str);
- printf("%s\n", cstr);
- }
- return true;
- }
- }
-}
-
-
-Handle<Value> Shell::Print(const Arguments& args) {
- Handle<Value> val = Write(args);
- printf("\n");
- return val;
-}
-
-
-Handle<Value> Shell::Write(const Arguments& args) {
- for (int i = 0; i < args.Length(); i++) {
- HandleScope handle_scope;
- if (i != 0) {
- printf(" ");
- }
- v8::String::Utf8Value str(args[i]);
- int n = fwrite(*str, sizeof(**str), str.length(), stdout);
- if (n != str.length()) {
- printf("Error in fwrite\n");
- exit(1);
- }
- }
- return Undefined();
-}
-
-
-Handle<Value> Shell::Read(const Arguments& args) {
- String::Utf8Value file(args[0]);
- if (*file == NULL) {
- return ThrowException(String::New("Error loading file"));
- }
- Handle<String> source = ReadFile(*file);
- if (source.IsEmpty()) {
- return ThrowException(String::New("Error loading file"));
- }
- return source;
-}
-
-
-Handle<Value> Shell::ReadLine(const Arguments& args) {
- i::SmartPointer<char> line(i::ReadLine(""));
- if (*line == NULL) {
- return Null();
- }
- size_t len = strlen(*line);
- if (len > 0 && line[len - 1] == '\n') {
- --len;
- }
- return String::New(*line, len);
-}
-
-
-Handle<Value> Shell::Load(const Arguments& args) {
- for (int i = 0; i < args.Length(); i++) {
- HandleScope handle_scope;
- String::Utf8Value file(args[i]);
- if (*file == NULL) {
- return ThrowException(String::New("Error loading file"));
- }
- Handle<String> source = ReadFile(*file);
- if (source.IsEmpty()) {
- return ThrowException(String::New("Error loading file"));
- }
- if (!ExecuteString(source, String::New(*file), false, false)) {
- return ThrowException(String::New("Error executing file"));
- }
- }
- return Undefined();
-}
-
-
-Handle<Value> Shell::Yield(const Arguments& args) {
- v8::Unlocker unlocker;
- return Undefined();
-}
-
-
-Handle<Value> Shell::Quit(const Arguments& args) {
- int exit_code = args[0]->Int32Value();
- OnExit();
- exit(exit_code);
- return Undefined();
-}
-
-
-Handle<Value> Shell::Version(const Arguments& args) {
- return String::New(V8::GetVersion());
-}
-
-
-void Shell::ReportException(v8::TryCatch* try_catch) {
- HandleScope handle_scope;
- v8::String::Utf8Value exception(try_catch->Exception());
- const char* exception_string = ToCString(exception);
- Handle<Message> message = try_catch->Message();
- if (message.IsEmpty()) {
- // V8 didn't provide any extra information about this error; just
- // print the exception.
- printf("%s\n", exception_string);
- } else {
- // Print (filename):(line number): (message).
- v8::String::Utf8Value filename(message->GetScriptResourceName());
- const char* filename_string = ToCString(filename);
- int linenum = message->GetLineNumber();
- printf("%s:%i: %s\n", filename_string, linenum, exception_string);
- // Print line of source code.
- v8::String::Utf8Value sourceline(message->GetSourceLine());
- const char* sourceline_string = ToCString(sourceline);
- printf("%s\n", sourceline_string);
- // Print wavy underline (GetUnderline is deprecated).
- int start = message->GetStartColumn();
- for (int i = 0; i < start; i++) {
- printf(" ");
- }
- int end = message->GetEndColumn();
- for (int i = start; i < end; i++) {
- printf("^");
- }
- printf("\n");
- }
-}
-
-
-Handle<Array> Shell::GetCompletions(Handle<String> text, Handle<String> full) {
- HandleScope handle_scope;
- Context::Scope context_scope(utility_context_);
- Handle<Object> global = utility_context_->Global();
- Handle<Value> fun = global->Get(String::New("GetCompletions"));
- static const int kArgc = 3;
- Handle<Value> argv[kArgc] = { evaluation_context_->Global(), text, full };
- Handle<Value> val = Handle<Function>::Cast(fun)->Call(global, kArgc, argv);
- return handle_scope.Close(Handle<Array>::Cast(val));
-}
-
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-Handle<Object> Shell::DebugMessageDetails(Handle<String> message) {
- Context::Scope context_scope(utility_context_);
- Handle<Object> global = utility_context_->Global();
- Handle<Value> fun = global->Get(String::New("DebugMessageDetails"));
- static const int kArgc = 1;
- Handle<Value> argv[kArgc] = { message };
- Handle<Value> val = Handle<Function>::Cast(fun)->Call(global, kArgc, argv);
- return Handle<Object>::Cast(val);
-}
-
-
-Handle<Value> Shell::DebugCommandToJSONRequest(Handle<String> command) {
- Context::Scope context_scope(utility_context_);
- Handle<Object> global = utility_context_->Global();
- Handle<Value> fun = global->Get(String::New("DebugCommandToJSONRequest"));
- static const int kArgc = 1;
- Handle<Value> argv[kArgc] = { command };
- Handle<Value> val = Handle<Function>::Cast(fun)->Call(global, kArgc, argv);
- return val;
-}
-#endif
-
-
-int32_t* Counter::Bind(const char* name, bool is_histogram) {
- int i;
- for (i = 0; i < kMaxNameSize - 1 && name[i]; i++)
- name_[i] = static_cast<char>(name[i]);
- name_[i] = '\0';
- is_histogram_ = is_histogram;
- return ptr();
-}
-
-
-void Counter::AddSample(int32_t sample) {
- count_++;
- sample_total_ += sample;
-}
-
-
-CounterCollection::CounterCollection() {
- magic_number_ = 0xDEADFACE;
- max_counters_ = kMaxCounters;
- max_name_size_ = Counter::kMaxNameSize;
- counters_in_use_ = 0;
-}
-
-
-Counter* CounterCollection::GetNextCounter() {
- if (counters_in_use_ == kMaxCounters) return NULL;
- return &counters_[counters_in_use_++];
-}
-
-
-void Shell::MapCounters(const char* name) {
- counters_file_ = i::OS::MemoryMappedFile::create(name,
- sizeof(CounterCollection), &local_counters_);
- void* memory = (counters_file_ == NULL) ?
- NULL : counters_file_->memory();
- if (memory == NULL) {
- printf("Could not map counters file %s\n", name);
- exit(1);
- }
- counters_ = static_cast<CounterCollection*>(memory);
- V8::SetCounterFunction(LookupCounter);
- V8::SetCreateHistogramFunction(CreateHistogram);
- V8::SetAddHistogramSampleFunction(AddHistogramSample);
-}
-
-
-int CounterMap::Hash(const char* name) {
- int h = 0;
- int c;
- while ((c = *name++) != 0) {
- h += h << 5;
- h += c;
- }
- return h;
-}
-
-
-Counter* Shell::GetCounter(const char* name, bool is_histogram) {
- Counter* counter = counter_map_->Lookup(name);
-
- if (counter == NULL) {
- counter = counters_->GetNextCounter();
- if (counter != NULL) {
- counter_map_->Set(name, counter);
- counter->Bind(name, is_histogram);
- }
- } else {
- ASSERT(counter->is_histogram() == is_histogram);
- }
- return counter;
-}
-
-
-int* Shell::LookupCounter(const char* name) {
- Counter* counter = GetCounter(name, false);
-
- if (counter != NULL) {
- return counter->ptr();
- } else {
- return NULL;
- }
-}
-
-
-void* Shell::CreateHistogram(const char* name,
- int min,
- int max,
- size_t buckets) {
- return GetCounter(name, true);
-}
-
-
-void Shell::AddHistogramSample(void* histogram, int sample) {
- Counter* counter = reinterpret_cast<Counter*>(histogram);
- counter->AddSample(sample);
-}
-
-
-void Shell::Initialize() {
- Shell::counter_map_ = new CounterMap();
- // Set up counters
- if (i::StrLength(i::FLAG_map_counters) != 0)
- MapCounters(i::FLAG_map_counters);
- if (i::FLAG_dump_counters) {
- V8::SetCounterFunction(LookupCounter);
- V8::SetCreateHistogramFunction(CreateHistogram);
- V8::SetAddHistogramSampleFunction(AddHistogramSample);
- }
-
- // Initialize the global objects
- HandleScope scope;
- Handle<ObjectTemplate> global_template = ObjectTemplate::New();
- global_template->Set(String::New("print"), FunctionTemplate::New(Print));
- global_template->Set(String::New("write"), FunctionTemplate::New(Write));
- global_template->Set(String::New("read"), FunctionTemplate::New(Read));
- global_template->Set(String::New("readline"),
- FunctionTemplate::New(ReadLine));
- global_template->Set(String::New("load"), FunctionTemplate::New(Load));
- global_template->Set(String::New("quit"), FunctionTemplate::New(Quit));
- global_template->Set(String::New("version"), FunctionTemplate::New(Version));
-
-#ifdef LIVE_OBJECT_LIST
- global_template->Set(String::New("lol_is_enabled"), Boolean::New(true));
-#else
- global_template->Set(String::New("lol_is_enabled"), Boolean::New(false));
-#endif
-
- Handle<ObjectTemplate> os_templ = ObjectTemplate::New();
- AddOSMethods(os_templ);
- global_template->Set(String::New("os"), os_templ);
-
- utility_context_ = Context::New(NULL, global_template);
- utility_context_->SetSecurityToken(Undefined());
- Context::Scope utility_scope(utility_context_);
-
- i::JSArguments js_args = i::FLAG_js_arguments;
- i::Handle<i::FixedArray> arguments_array =
- FACTORY->NewFixedArray(js_args.argc());
- for (int j = 0; j < js_args.argc(); j++) {
- i::Handle<i::String> arg =
- FACTORY->NewStringFromUtf8(i::CStrVector(js_args[j]));
- arguments_array->set(j, *arg);
- }
- i::Handle<i::JSArray> arguments_jsarray =
- FACTORY->NewJSArrayWithElements(arguments_array);
- global_template->Set(String::New("arguments"),
- Utils::ToLocal(arguments_jsarray));
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Install the debugger object in the utility scope
- i::Debug* debug = i::Isolate::Current()->debug();
- debug->Load();
- i::Handle<i::JSObject> js_debug
- = i::Handle<i::JSObject>(debug->debug_context()->global());
- utility_context_->Global()->Set(String::New("$debug"),
- Utils::ToLocal(js_debug));
-#endif
-
- // Run the d8 shell utility script in the utility context
- int source_index = i::NativesCollection<i::D8>::GetIndex("d8");
- i::Vector<const char> shell_source
- = i::NativesCollection<i::D8>::GetScriptSource(source_index);
- i::Vector<const char> shell_source_name
- = i::NativesCollection<i::D8>::GetScriptName(source_index);
- Handle<String> source = String::New(shell_source.start(),
- shell_source.length());
- Handle<String> name = String::New(shell_source_name.start(),
- shell_source_name.length());
- Handle<Script> script = Script::Compile(source, name);
- script->Run();
-
- // Mark the d8 shell script as native to avoid it showing up as normal source
- // in the debugger.
- i::Handle<i::Object> compiled_script = Utils::OpenHandle(*script);
- i::Handle<i::Script> script_object = compiled_script->IsJSFunction()
- ? i::Handle<i::Script>(i::Script::cast(
- i::JSFunction::cast(*compiled_script)->shared()->script()))
- : i::Handle<i::Script>(i::Script::cast(
- i::SharedFunctionInfo::cast(*compiled_script)->script()));
- script_object->set_type(i::Smi::FromInt(i::Script::TYPE_NATIVE));
-
- // Create the evaluation context
- evaluation_context_ = Context::New(NULL, global_template);
- evaluation_context_->SetSecurityToken(Undefined());
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Set the security token of the debug context to allow access.
- debug->debug_context()->set_security_token(HEAP->undefined_value());
-
- // Start the debugger agent if requested.
- if (i::FLAG_debugger_agent) {
- v8::Debug::EnableAgent("d8 shell", i::FLAG_debugger_port, true);
- }
-
- // Start the in-process debugger if requested.
- if (i::FLAG_debugger && !i::FLAG_debugger_agent) {
- v8::Debug::SetDebugEventListener(HandleDebugEvent);
- }
-#endif
-}
-
-
-void Shell::OnExit() {
- if (i::FLAG_dump_counters) {
- ::printf("+----------------------------------------+-------------+\n");
- ::printf("| Name | Value |\n");
- ::printf("+----------------------------------------+-------------+\n");
- for (CounterMap::Iterator i(counter_map_); i.More(); i.Next()) {
- Counter* counter = i.CurrentValue();
- if (counter->is_histogram()) {
- ::printf("| c:%-36s | %11i |\n", i.CurrentKey(), counter->count());
- ::printf("| t:%-36s | %11i |\n",
- i.CurrentKey(),
- counter->sample_total());
- } else {
- ::printf("| %-38s | %11i |\n", i.CurrentKey(), counter->count());
- }
- }
- ::printf("+----------------------------------------+-------------+\n");
- }
- if (counters_file_ != NULL)
- delete counters_file_;
-}
-
-
-static char* ReadChars(const char* name, int* size_out) {
- v8::Unlocker unlocker; // Release the V8 lock while reading files.
- FILE* file = i::OS::FOpen(name, "rb");
- if (file == NULL) return NULL;
-
- fseek(file, 0, SEEK_END);
- int size = ftell(file);
- rewind(file);
-
- char* chars = new char[size + 1];
- chars[size] = '\0';
- for (int i = 0; i < size;) {
- int read = fread(&chars[i], 1, size - i, file);
- i += read;
- }
- fclose(file);
- *size_out = size;
- return chars;
-}
-
-
-static char* ReadToken(char* data, char token) {
- char* next = i::OS::StrChr(data, token);
- if (next != NULL) {
- *next = '\0';
- return (next + 1);
- }
-
- return NULL;
-}
-
-
-static char* ReadLine(char* data) {
- return ReadToken(data, '\n');
-}
-
-
-static char* ReadWord(char* data) {
- return ReadToken(data, ' ');
-}
-
-
-// Reads a file into a v8 string.
-Handle<String> Shell::ReadFile(const char* name) {
- int size = 0;
- char* chars = ReadChars(name, &size);
- if (chars == NULL) return Handle<String>();
- Handle<String> result = String::New(chars);
- delete[] chars;
- return result;
-}
-
-
-void Shell::RunShell() {
- LineEditor* editor = LineEditor::Get();
- printf("V8 version %s [console: %s]\n", V8::GetVersion(), editor->name());
- if (i::FLAG_debugger) {
- printf("JavaScript debugger enabled\n");
- }
- editor->Open();
- while (true) {
- Locker locker;
- HandleScope handle_scope;
- Context::Scope context_scope(evaluation_context_);
- i::SmartPointer<char> input = editor->Prompt(Shell::kPrompt);
- if (input.is_empty())
- break;
- editor->AddHistory(*input);
- Handle<String> name = String::New("(d8)");
- ExecuteString(String::New(*input), name, true, true);
- }
- editor->Close();
- printf("\n");
-}
-
-
-class ShellThread : public i::Thread {
- public:
- ShellThread(i::Isolate* isolate, int no, i::Vector<const char> files)
- : Thread(isolate, "d8:ShellThread"),
- no_(no), files_(files) { }
- virtual void Run();
- private:
- int no_;
- i::Vector<const char> files_;
-};
-
-
-void ShellThread::Run() {
- // Prepare the context for this thread.
- Locker locker;
- HandleScope scope;
- Handle<ObjectTemplate> global_template = ObjectTemplate::New();
- global_template->Set(String::New("print"),
- FunctionTemplate::New(Shell::Print));
- global_template->Set(String::New("write"),
- FunctionTemplate::New(Shell::Write));
- global_template->Set(String::New("read"),
- FunctionTemplate::New(Shell::Read));
- global_template->Set(String::New("readline"),
- FunctionTemplate::New(Shell::ReadLine));
- global_template->Set(String::New("load"),
- FunctionTemplate::New(Shell::Load));
- global_template->Set(String::New("yield"),
- FunctionTemplate::New(Shell::Yield));
- global_template->Set(String::New("version"),
- FunctionTemplate::New(Shell::Version));
-
- char* ptr = const_cast<char*>(files_.start());
- while ((ptr != NULL) && (*ptr != '\0')) {
- // For each newline-separated line.
- char* next_line = ReadLine(ptr);
-
- if (*ptr == '#') {
- // Skip comment lines.
- ptr = next_line;
- continue;
- }
-
- Persistent<Context> thread_context = Context::New(NULL, global_template);
- thread_context->SetSecurityToken(Undefined());
- Context::Scope context_scope(thread_context);
-
- while ((ptr != NULL) && (*ptr != '\0')) {
- char* filename = ptr;
- ptr = ReadWord(ptr);
-
- // Skip empty strings.
- if (strlen(filename) == 0) {
- break;
- }
-
- Handle<String> str = Shell::ReadFile(filename);
- if (str.IsEmpty()) {
- printf("WARNING: %s not found\n", filename);
- break;
- }
-
- Shell::ExecuteString(str, String::New(filename), false, false);
- }
-
- thread_context.Dispose();
- ptr = next_line;
- }
-}
-
-
-int Shell::Main(int argc, char* argv[]) {
- i::FlagList::SetFlagsFromCommandLine(&argc, argv, true);
- if (i::FLAG_help) {
- return 1;
- }
- Initialize();
- bool run_shell = (argc == 1);
-
- // Default use preemption if threads are created.
- bool use_preemption = true;
-
- // Default to use lowest possible thread preemption interval to test as many
- // edgecases as possible.
- int preemption_interval = 1;
-
- i::List<i::Thread*> threads(1);
-
- {
- // Acquire the V8 lock once initialization has finished. Since the thread
- // below may spawn new threads accessing V8 holding the V8 lock here is
- // mandatory.
- Locker locker;
- Context::Scope context_scope(evaluation_context_);
- for (int i = 1; i < argc; i++) {
- char* str = argv[i];
- if (strcmp(str, "--shell") == 0) {
- run_shell = true;
- } else if (strcmp(str, "--preemption") == 0) {
- use_preemption = true;
- } else if (strcmp(str, "--no-preemption") == 0) {
- use_preemption = false;
- } else if (strcmp(str, "--preemption-interval") == 0) {
- if (i + 1 < argc) {
- char* end = NULL;
- preemption_interval = strtol(argv[++i], &end, 10); // NOLINT
- if (preemption_interval <= 0 || *end != '\0' || errno == ERANGE) {
- printf("Invalid value for --preemption-interval '%s'\n", argv[i]);
- return 1;
- }
- } else {
- printf("Missing value for --preemption-interval\n");
- return 1;
- }
- } else if (strcmp(str, "-f") == 0) {
- // Ignore any -f flags for compatibility with other stand-alone
- // JavaScript engines.
- continue;
- } else if (strncmp(str, "--", 2) == 0) {
- printf("Warning: unknown flag %s.\nTry --help for options\n", str);
- } else if (strcmp(str, "-e") == 0 && i + 1 < argc) {
- // Execute argument given to -e option directly.
- v8::HandleScope handle_scope;
- v8::Handle<v8::String> file_name = v8::String::New("unnamed");
- v8::Handle<v8::String> source = v8::String::New(argv[i + 1]);
- if (!ExecuteString(source, file_name, false, true)) {
- OnExit();
- return 1;
- }
- i++;
- } else if (strcmp(str, "-p") == 0 && i + 1 < argc) {
- int size = 0;
- const char* files = ReadChars(argv[++i], &size);
- if (files == NULL) return 1;
- ShellThread* thread =
- new ShellThread(i::Isolate::Current(),
- threads.length(),
- i::Vector<const char>(files, size));
- thread->Start();
- threads.Add(thread);
- } else {
- // Use all other arguments as names of files to load and run.
- HandleScope handle_scope;
- Handle<String> file_name = v8::String::New(str);
- Handle<String> source = ReadFile(str);
- if (source.IsEmpty()) {
- printf("Error reading '%s'\n", str);
- return 1;
- }
- if (!ExecuteString(source, file_name, false, true)) {
- OnExit();
- return 1;
- }
- }
- }
-
- // Start preemption if threads have been created and preemption is enabled.
- if (threads.length() > 0 && use_preemption) {
- Locker::StartPreemption(preemption_interval);
- }
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Run the remote debugger if requested.
- if (i::FLAG_remote_debugger) {
- RunRemoteDebugger(i::FLAG_debugger_port);
- return 0;
- }
-#endif
- }
- if (run_shell)
- RunShell();
- for (int i = 0; i < threads.length(); i++) {
- i::Thread* thread = threads[i];
- thread->Join();
- delete thread;
- }
- OnExit();
- return 0;
-}
-
-
-} // namespace v8
-
-
-int main(int argc, char* argv[]) {
- return v8::Shell::Main(argc, argv);
-}
diff --git a/src/3rdparty/v8/src/d8.h b/src/3rdparty/v8/src/d8.h
deleted file mode 100644
index de1fe0d..0000000
--- a/src/3rdparty/v8/src/d8.h
+++ /dev/null
@@ -1,231 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_D8_H_
-#define V8_D8_H_
-
-#include "v8.h"
-#include "hashmap.h"
-
-
-namespace v8 {
-
-
-namespace i = v8::internal;
-
-
-// A single counter in a counter collection.
-class Counter {
- public:
- static const int kMaxNameSize = 64;
- int32_t* Bind(const char* name, bool histogram);
- int32_t* ptr() { return &count_; }
- int32_t count() { return count_; }
- int32_t sample_total() { return sample_total_; }
- bool is_histogram() { return is_histogram_; }
- void AddSample(int32_t sample);
- private:
- int32_t count_;
- int32_t sample_total_;
- bool is_histogram_;
- uint8_t name_[kMaxNameSize];
-};
-
-
-// A set of counters and associated information. An instance of this
-// class is stored directly in the memory-mapped counters file if
-// the --map-counters options is used
-class CounterCollection {
- public:
- CounterCollection();
- Counter* GetNextCounter();
- private:
- static const unsigned kMaxCounters = 256;
- uint32_t magic_number_;
- uint32_t max_counters_;
- uint32_t max_name_size_;
- uint32_t counters_in_use_;
- Counter counters_[kMaxCounters];
-};
-
-
-class CounterMap {
- public:
- CounterMap(): hash_map_(Match) { }
- Counter* Lookup(const char* name) {
- i::HashMap::Entry* answer = hash_map_.Lookup(
- const_cast<char*>(name),
- Hash(name),
- false);
- if (!answer) return NULL;
- return reinterpret_cast<Counter*>(answer->value);
- }
- void Set(const char* name, Counter* value) {
- i::HashMap::Entry* answer = hash_map_.Lookup(
- const_cast<char*>(name),
- Hash(name),
- true);
- ASSERT(answer != NULL);
- answer->value = value;
- }
- class Iterator {
- public:
- explicit Iterator(CounterMap* map)
- : map_(&map->hash_map_), entry_(map_->Start()) { }
- void Next() { entry_ = map_->Next(entry_); }
- bool More() { return entry_ != NULL; }
- const char* CurrentKey() { return static_cast<const char*>(entry_->key); }
- Counter* CurrentValue() { return static_cast<Counter*>(entry_->value); }
- private:
- i::HashMap* map_;
- i::HashMap::Entry* entry_;
- };
- private:
- static int Hash(const char* name);
- static bool Match(void* key1, void* key2);
- i::HashMap hash_map_;
-};
-
-
-class Shell: public i::AllStatic {
- public:
- static bool ExecuteString(Handle<String> source,
- Handle<Value> name,
- bool print_result,
- bool report_exceptions);
- static const char* ToCString(const v8::String::Utf8Value& value);
- static void ReportException(TryCatch* try_catch);
- static void Initialize();
- static void OnExit();
- static int* LookupCounter(const char* name);
- static void* CreateHistogram(const char* name,
- int min,
- int max,
- size_t buckets);
- static void AddHistogramSample(void* histogram, int sample);
- static void MapCounters(const char* name);
- static Handle<String> ReadFile(const char* name);
- static void RunShell();
- static int Main(int argc, char* argv[]);
- static Handle<Array> GetCompletions(Handle<String> text,
- Handle<String> full);
-#ifdef ENABLE_DEBUGGER_SUPPORT
- static Handle<Object> DebugMessageDetails(Handle<String> message);
- static Handle<Value> DebugCommandToJSONRequest(Handle<String> command);
-#endif
-
-#ifdef WIN32
-#undef Yield
-#endif
-
- static Handle<Value> Print(const Arguments& args);
- static Handle<Value> Write(const Arguments& args);
- static Handle<Value> Yield(const Arguments& args);
- static Handle<Value> Quit(const Arguments& args);
- static Handle<Value> Version(const Arguments& args);
- static Handle<Value> Read(const Arguments& args);
- static Handle<Value> ReadLine(const Arguments& args);
- static Handle<Value> Load(const Arguments& args);
- // The OS object on the global object contains methods for performing
- // operating system calls:
- //
- // os.system("program_name", ["arg1", "arg2", ...], timeout1, timeout2) will
- // run the command, passing the arguments to the program. The standard output
- // of the program will be picked up and returned as a multiline string. If
- // timeout1 is present then it should be a number. -1 indicates no timeout
- // and a positive number is used as a timeout in milliseconds that limits the
- // time spent waiting between receiving output characters from the program.
- // timeout2, if present, should be a number indicating the limit in
- // milliseconds on the total running time of the program. Exceptions are
- // thrown on timeouts or other errors or if the exit status of the program
- // indicates an error.
- //
- // os.chdir(dir) changes directory to the given directory. Throws an
- // exception/ on error.
- //
- // os.setenv(variable, value) sets an environment variable. Repeated calls to
- // this method leak memory due to the API of setenv in the standard C library.
- //
- // os.umask(alue) calls the umask system call and returns the old umask.
- //
- // os.mkdirp(name, mask) creates a directory. The mask (if present) is anded
- // with the current umask. Intermediate directories are created if necessary.
- // An exception is not thrown if the directory already exists. Analogous to
- // the "mkdir -p" command.
- static Handle<Value> OSObject(const Arguments& args);
- static Handle<Value> System(const Arguments& args);
- static Handle<Value> ChangeDirectory(const Arguments& args);
- static Handle<Value> SetEnvironment(const Arguments& args);
- static Handle<Value> UnsetEnvironment(const Arguments& args);
- static Handle<Value> SetUMask(const Arguments& args);
- static Handle<Value> MakeDirectory(const Arguments& args);
- static Handle<Value> RemoveDirectory(const Arguments& args);
-
- static void AddOSMethods(Handle<ObjectTemplate> os_template);
-
- static Handle<Context> utility_context() { return utility_context_; }
-
- static const char* kHistoryFileName;
- static const char* kPrompt;
- private:
- static Persistent<Context> utility_context_;
- static Persistent<Context> evaluation_context_;
- static CounterMap* counter_map_;
- // We statically allocate a set of local counters to be used if we
- // don't want to store the stats in a memory-mapped file
- static CounterCollection local_counters_;
- static CounterCollection* counters_;
- static i::OS::MemoryMappedFile* counters_file_;
- static Counter* GetCounter(const char* name, bool is_histogram);
-};
-
-
-class LineEditor {
- public:
- enum Type { DUMB = 0, READLINE = 1 };
- LineEditor(Type type, const char* name);
- virtual ~LineEditor() { }
-
- virtual i::SmartPointer<char> Prompt(const char* prompt) = 0;
- virtual bool Open() { return true; }
- virtual bool Close() { return true; }
- virtual void AddHistory(const char* str) { }
-
- const char* name() { return name_; }
- static LineEditor* Get();
- private:
- Type type_;
- const char* name_;
- LineEditor* next_;
- static LineEditor* first_;
-};
-
-
-} // namespace v8
-
-
-#endif // V8_D8_H_
diff --git a/src/3rdparty/v8/src/d8.js b/src/3rdparty/v8/src/d8.js
deleted file mode 100644
index 9798078..0000000
--- a/src/3rdparty/v8/src/d8.js
+++ /dev/null
@@ -1,2798 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-String.prototype.startsWith = function (str) {
- if (str.length > this.length)
- return false;
- return this.substr(0, str.length) == str;
-}
-
-function log10(num) {
- return Math.log(num)/Math.log(10);
-}
-
-function ToInspectableObject(obj) {
- if (!obj && typeof obj === 'object') {
- return void 0;
- } else {
- return Object(obj);
- }
-}
-
-function GetCompletions(global, last, full) {
- var full_tokens = full.split();
- full = full_tokens.pop();
- var parts = full.split('.');
- parts.pop();
- var current = global;
- for (var i = 0; i < parts.length; i++) {
- var part = parts[i];
- var next = current[part];
- if (!next)
- return [];
- current = next;
- }
- var result = [];
- current = ToInspectableObject(current);
- while (typeof current !== 'undefined') {
- var mirror = new $debug.ObjectMirror(current);
- var properties = mirror.properties();
- for (var i = 0; i < properties.length; i++) {
- var name = properties[i].name();
- if (typeof name === 'string' && name.startsWith(last))
- result.push(name);
- }
- current = ToInspectableObject(current.__proto__);
- }
- return result;
-}
-
-
-// Global object holding debugger related constants and state.
-const Debug = {};
-
-
-// Debug events which can occour in the V8 JavaScript engine. These originate
-// from the API include file v8-debug.h.
-Debug.DebugEvent = { Break: 1,
- Exception: 2,
- NewFunction: 3,
- BeforeCompile: 4,
- AfterCompile: 5 };
-
-
-// The different types of scripts matching enum ScriptType in objects.h.
-Debug.ScriptType = { Native: 0,
- Extension: 1,
- Normal: 2 };
-
-
-// The different types of script compilations matching enum
-// Script::CompilationType in objects.h.
-Debug.ScriptCompilationType = { Host: 0,
- Eval: 1,
- JSON: 2 };
-
-
-// The different types of scopes matching constants runtime.cc.
-Debug.ScopeType = { Global: 0,
- Local: 1,
- With: 2,
- Closure: 3,
- Catch: 4 };
-
-
-// Current debug state.
-const kNoFrame = -1;
-Debug.State = {
- currentFrame: kNoFrame,
- displaySourceStartLine: -1,
- displaySourceEndLine: -1,
- currentSourceLine: -1
-}
-var trace_compile = false; // Tracing all compile events?
-var trace_debug_json = false; // Tracing all debug json packets?
-var last_cmd_line = '';
-//var lol_is_enabled; // Set to true in d8.cc if LIVE_OBJECT_LIST is defined.
-var lol_next_dump_index = 0;
-const kDefaultLolLinesToPrintAtATime = 10;
-const kMaxLolLinesToPrintAtATime = 1000;
-var repeat_cmd_line = '';
-var is_running = true;
-
-// Copied from debug-delay.js. This is needed below:
-function ScriptTypeFlag(type) {
- return (1 << type);
-}
-
-
-// Process a debugger JSON message into a display text and a running status.
-// This function returns an object with properties "text" and "running" holding
-// this information.
-function DebugMessageDetails(message) {
- if (trace_debug_json) {
- print("received: '" + message + "'");
- }
- // Convert the JSON string to an object.
- var response = new ProtocolPackage(message);
- is_running = response.running();
-
- if (response.type() == 'event') {
- return DebugEventDetails(response);
- } else {
- return DebugResponseDetails(response);
- }
-}
-
-function DebugEventDetails(response) {
- details = {text:'', running:false}
-
- // Get the running state.
- details.running = response.running();
-
- var body = response.body();
- var result = '';
- switch (response.event()) {
- case 'break':
- if (body.breakpoints) {
- result += 'breakpoint';
- if (body.breakpoints.length > 1) {
- result += 's';
- }
- result += ' #';
- for (var i = 0; i < body.breakpoints.length; i++) {
- if (i > 0) {
- result += ', #';
- }
- result += body.breakpoints[i];
- }
- } else {
- result += 'break';
- }
- result += ' in ';
- result += body.invocationText;
- result += ', ';
- result += SourceInfo(body);
- result += '\n';
- result += SourceUnderline(body.sourceLineText, body.sourceColumn);
- Debug.State.currentSourceLine = body.sourceLine;
- Debug.State.displaySourceStartLine = -1;
- Debug.State.displaySourceEndLine = -1;
- Debug.State.currentFrame = 0;
- details.text = result;
- break;
-
- case 'exception':
- if (body.uncaught) {
- result += 'Uncaught: ';
- } else {
- result += 'Exception: ';
- }
- result += '"';
- result += body.exception.text;
- result += '"';
- if (body.sourceLine >= 0) {
- result += ', ';
- result += SourceInfo(body);
- result += '\n';
- result += SourceUnderline(body.sourceLineText, body.sourceColumn);
- Debug.State.currentSourceLine = body.sourceLine;
- Debug.State.displaySourceStartLine = -1;
- Debug.State.displaySourceEndLine = -1;
- Debug.State.currentFrame = 0;
- } else {
- result += ' (empty stack)';
- Debug.State.currentSourceLine = -1;
- Debug.State.displaySourceStartLine = -1;
- Debug.State.displaySourceEndLine = -1;
- Debug.State.currentFrame = kNoFrame;
- }
- details.text = result;
- break;
-
- case 'afterCompile':
- if (trace_compile) {
- result = 'Source ' + body.script.name + ' compiled:\n'
- var source = body.script.source;
- if (!(source[source.length - 1] == '\n')) {
- result += source;
- } else {
- result += source.substring(0, source.length - 1);
- }
- }
- details.text = result;
- break;
-
- case 'scriptCollected':
- details.text = result;
- break;
-
- default:
- details.text = 'Unknown debug event ' + response.event();
- }
-
- return details;
-};
-
-
-function SourceInfo(body) {
- var result = '';
-
- if (body.script) {
- if (body.script.name) {
- result += body.script.name;
- } else {
- result += '[unnamed]';
- }
- }
- result += ' line ';
- result += body.sourceLine + 1;
- result += ' column ';
- result += body.sourceColumn + 1;
-
- return result;
-}
-
-
-function SourceUnderline(source_text, position) {
- if (!source_text) {
- return;
- }
-
- // Create an underline with a caret pointing to the source position. If the
- // source contains a tab character the underline will have a tab character in
- // the same place otherwise the underline will have a space character.
- var underline = '';
- for (var i = 0; i < position; i++) {
- if (source_text[i] == '\t') {
- underline += '\t';
- } else {
- underline += ' ';
- }
- }
- underline += '^';
-
- // Return the source line text with the underline beneath.
- return source_text + '\n' + underline;
-};
-
-
-// Converts a text command to a JSON request.
-function DebugCommandToJSONRequest(cmd_line) {
- var result = new DebugRequest(cmd_line).JSONRequest();
- if (trace_debug_json && result) {
- print("sending: '" + result + "'");
- }
- return result;
-};
-
-
-function DebugRequest(cmd_line) {
- // If the very first character is a { assume that a JSON request have been
- // entered as a command. Converting that to a JSON request is trivial.
- if (cmd_line && cmd_line.length > 0 && cmd_line.charAt(0) == '{') {
- this.request_ = cmd_line;
- return;
- }
-
- // Check for a simple carriage return to repeat the last command:
- var is_repeating = false;
- if (cmd_line == '\n') {
- if (is_running) {
- cmd_line = 'break'; // Not in debugger mode, break with a frame request.
- } else {
- cmd_line = repeat_cmd_line; // use command to repeat.
- is_repeating = true;
- }
- }
- if (!is_running) { // Only save the command if in debugger mode.
- repeat_cmd_line = cmd_line; // save last command.
- }
-
- // Trim string for leading and trailing whitespace.
- cmd_line = cmd_line.replace(/^\s+|\s+$/g, '');
-
- // Find the command.
- var pos = cmd_line.indexOf(' ');
- var cmd;
- var args;
- if (pos == -1) {
- cmd = cmd_line;
- args = '';
- } else {
- cmd = cmd_line.slice(0, pos);
- args = cmd_line.slice(pos).replace(/^\s+|\s+$/g, '');
- }
-
- if ((cmd === undefined) || !cmd) {
- this.request_ = void 0;
- return;
- }
-
- last_cmd = cmd;
-
- // Switch on command.
- switch (cmd) {
- case 'continue':
- case 'c':
- this.request_ = this.continueCommandToJSONRequest_(args);
- break;
-
- case 'step':
- case 's':
- this.request_ = this.stepCommandToJSONRequest_(args, 'in');
- break;
-
- case 'stepi':
- case 'si':
- this.request_ = this.stepCommandToJSONRequest_(args, 'min');
- break;
-
- case 'next':
- case 'n':
- this.request_ = this.stepCommandToJSONRequest_(args, 'next');
- break;
-
- case 'finish':
- case 'fin':
- this.request_ = this.stepCommandToJSONRequest_(args, 'out');
- break;
-
- case 'backtrace':
- case 'bt':
- this.request_ = this.backtraceCommandToJSONRequest_(args);
- break;
-
- case 'frame':
- case 'f':
- this.request_ = this.frameCommandToJSONRequest_(args);
- break;
-
- case 'scopes':
- this.request_ = this.scopesCommandToJSONRequest_(args);
- break;
-
- case 'scope':
- this.request_ = this.scopeCommandToJSONRequest_(args);
- break;
-
- case 'disconnect':
- case 'exit':
- case 'quit':
- this.request_ = this.disconnectCommandToJSONRequest_(args);
- break;
-
- case 'up':
- this.request_ =
- this.frameCommandToJSONRequest_('' +
- (Debug.State.currentFrame + 1));
- break;
-
- case 'down':
- case 'do':
- this.request_ =
- this.frameCommandToJSONRequest_('' +
- (Debug.State.currentFrame - 1));
- break;
-
- case 'set':
- case 'print':
- case 'p':
- this.request_ = this.printCommandToJSONRequest_(args);
- break;
-
- case 'dir':
- this.request_ = this.dirCommandToJSONRequest_(args);
- break;
-
- case 'references':
- this.request_ = this.referencesCommandToJSONRequest_(args);
- break;
-
- case 'instances':
- this.request_ = this.instancesCommandToJSONRequest_(args);
- break;
-
- case 'list':
- case 'l':
- this.request_ = this.listCommandToJSONRequest_(args);
- break;
- case 'source':
- this.request_ = this.sourceCommandToJSONRequest_(args);
- break;
-
- case 'scripts':
- case 'script':
- case 'scr':
- this.request_ = this.scriptsCommandToJSONRequest_(args);
- break;
-
- case 'break':
- case 'b':
- this.request_ = this.breakCommandToJSONRequest_(args);
- break;
-
- case 'breakpoints':
- case 'bb':
- this.request_ = this.breakpointsCommandToJSONRequest_(args);
- break;
-
- case 'clear':
- case 'delete':
- case 'd':
- this.request_ = this.clearCommandToJSONRequest_(args);
- break;
-
- case 'threads':
- this.request_ = this.threadsCommandToJSONRequest_(args);
- break;
-
- case 'cond':
- this.request_ = this.changeBreakpointCommandToJSONRequest_(args, 'cond');
- break;
-
- case 'enable':
- case 'en':
- this.request_ =
- this.changeBreakpointCommandToJSONRequest_(args, 'enable');
- break;
-
- case 'disable':
- case 'dis':
- this.request_ =
- this.changeBreakpointCommandToJSONRequest_(args, 'disable');
- break;
-
- case 'ignore':
- this.request_ =
- this.changeBreakpointCommandToJSONRequest_(args, 'ignore');
- break;
-
- case 'info':
- case 'inf':
- this.request_ = this.infoCommandToJSONRequest_(args);
- break;
-
- case 'flags':
- this.request_ = this.v8FlagsToJSONRequest_(args);
- break;
-
- case 'gc':
- this.request_ = this.gcToJSONRequest_(args);
- break;
-
- case 'trace':
- case 'tr':
- // Return undefined to indicate command handled internally (no JSON).
- this.request_ = void 0;
- this.traceCommand_(args);
- break;
-
- case 'help':
- case '?':
- this.helpCommand_(args);
- // Return undefined to indicate command handled internally (no JSON).
- this.request_ = void 0;
- break;
-
- case 'liveobjectlist':
- case 'lol':
- if (lol_is_enabled) {
- this.request_ = this.lolToJSONRequest_(args, is_repeating);
- break;
- }
-
- default:
- throw new Error('Unknown command "' + cmd + '"');
- }
-}
-
-DebugRequest.prototype.JSONRequest = function() {
- return this.request_;
-}
-
-
-function RequestPacket(command) {
- this.seq = 0;
- this.type = 'request';
- this.command = command;
-}
-
-
-RequestPacket.prototype.toJSONProtocol = function() {
- // Encode the protocol header.
- var json = '{';
- json += '"seq":' + this.seq;
- json += ',"type":"' + this.type + '"';
- if (this.command) {
- json += ',"command":' + StringToJSON_(this.command);
- }
- if (this.arguments) {
- json += ',"arguments":';
- // Encode the arguments part.
- if (this.arguments.toJSONProtocol) {
- json += this.arguments.toJSONProtocol()
- } else {
- json += SimpleObjectToJSON_(this.arguments);
- }
- }
- json += '}';
- return json;
-}
-
-
-DebugRequest.prototype.createRequest = function(command) {
- return new RequestPacket(command);
-};
-
-
-// Note: we use detected command repetition as a signal for continuation here.
-DebugRequest.prototype.createLOLRequest = function(command,
- start_index,
- lines_to_dump,
- is_continuation) {
- if (is_continuation) {
- start_index = lol_next_dump_index;
- }
-
- if (lines_to_dump) {
- lines_to_dump = parseInt(lines_to_dump);
- } else {
- lines_to_dump = kDefaultLolLinesToPrintAtATime;
- }
- if (lines_to_dump > kMaxLolLinesToPrintAtATime) {
- lines_to_dump = kMaxLolLinesToPrintAtATime;
- }
-
- // Save the next start_index to dump from:
- lol_next_dump_index = start_index + lines_to_dump;
-
- var request = this.createRequest(command);
- request.arguments = {};
- request.arguments.start = start_index;
- request.arguments.count = lines_to_dump;
-
- return request;
-};
-
-
-// Create a JSON request for the evaluation command.
-DebugRequest.prototype.makeEvaluateJSONRequest_ = function(expression) {
- // Global varaible used to store whether a handle was requested.
- lookup_handle = null;
-
- if (lol_is_enabled) {
- // Check if the expression is a obj id in the form @<obj id>.
- var obj_id_match = expression.match(/^@([0-9]+)$/);
- if (obj_id_match) {
- var obj_id = parseInt(obj_id_match[1]);
- // Build a dump request.
- var request = this.createRequest('getobj');
- request.arguments = {};
- request.arguments.obj_id = obj_id;
- return request.toJSONProtocol();
- }
- }
-
- // Check if the expression is a handle id in the form #<handle>#.
- var handle_match = expression.match(/^#([0-9]*)#$/);
- if (handle_match) {
- // Remember the handle requested in a global variable.
- lookup_handle = parseInt(handle_match[1]);
- // Build a lookup request.
- var request = this.createRequest('lookup');
- request.arguments = {};
- request.arguments.handles = [ lookup_handle ];
- return request.toJSONProtocol();
- } else {
- // Build an evaluate request.
- var request = this.createRequest('evaluate');
- request.arguments = {};
- request.arguments.expression = expression;
- // Request a global evaluation if there is no current frame.
- if (Debug.State.currentFrame == kNoFrame) {
- request.arguments.global = true;
- }
- return request.toJSONProtocol();
- }
-};
-
-
-// Create a JSON request for the references/instances command.
-DebugRequest.prototype.makeReferencesJSONRequest_ = function(handle, type) {
- // Build a references request.
- var handle_match = handle.match(/^#([0-9]*)#$/);
- if (handle_match) {
- var request = this.createRequest('references');
- request.arguments = {};
- request.arguments.type = type;
- request.arguments.handle = parseInt(handle_match[1]);
- return request.toJSONProtocol();
- } else {
- throw new Error('Invalid object id.');
- }
-};
-
-
-// Create a JSON request for the continue command.
-DebugRequest.prototype.continueCommandToJSONRequest_ = function(args) {
- var request = this.createRequest('continue');
- return request.toJSONProtocol();
-};
-
-
-// Create a JSON request for the step command.
-DebugRequest.prototype.stepCommandToJSONRequest_ = function(args, type) {
- // Requesting a step is through the continue command with additional
- // arguments.
- var request = this.createRequest('continue');
- request.arguments = {};
-
- // Process arguments if any.
-
- // Only process args if the command is 'step' which is indicated by type being
- // set to 'in'. For all other commands, ignore the args.
- if (args && args.length > 0) {
- args = args.split(/\s+/g);
-
- if (args.length > 2) {
- throw new Error('Invalid step arguments.');
- }
-
- if (args.length > 0) {
- // Check if we have a gdb stype step command. If so, the 1st arg would
- // be the step count. If it's not a number, then assume that we're
- // parsing for the legacy v8 step command.
- var stepcount = Number(args[0]);
- if (stepcount == Number.NaN) {
- // No step count at arg 1. Process as legacy d8 step command:
- if (args.length == 2) {
- var stepcount = parseInt(args[1]);
- if (isNaN(stepcount) || stepcount <= 0) {
- throw new Error('Invalid step count argument "' + args[0] + '".');
- }
- request.arguments.stepcount = stepcount;
- }
-
- // Get the step action.
- switch (args[0]) {
- case 'in':
- case 'i':
- request.arguments.stepaction = 'in';
- break;
-
- case 'min':
- case 'm':
- request.arguments.stepaction = 'min';
- break;
-
- case 'next':
- case 'n':
- request.arguments.stepaction = 'next';
- break;
-
- case 'out':
- case 'o':
- request.arguments.stepaction = 'out';
- break;
-
- default:
- throw new Error('Invalid step argument "' + args[0] + '".');
- }
-
- } else {
- // gdb style step commands:
- request.arguments.stepaction = type;
- request.arguments.stepcount = stepcount;
- }
- }
- } else {
- // Default is step of the specified type.
- request.arguments.stepaction = type;
- }
-
- return request.toJSONProtocol();
-};
-
-
-// Create a JSON request for the backtrace command.
-DebugRequest.prototype.backtraceCommandToJSONRequest_ = function(args) {
- // Build a backtrace request from the text command.
- var request = this.createRequest('backtrace');
-
- // Default is to show top 10 frames.
- request.arguments = {};
- request.arguments.fromFrame = 0;
- request.arguments.toFrame = 10;
-
- args = args.split(/\s*[ ]+\s*/g);
- if (args.length == 1 && args[0].length > 0) {
- var frameCount = parseInt(args[0]);
- if (frameCount > 0) {
- // Show top frames.
- request.arguments.fromFrame = 0;
- request.arguments.toFrame = frameCount;
- } else {
- // Show bottom frames.
- request.arguments.fromFrame = 0;
- request.arguments.toFrame = -frameCount;
- request.arguments.bottom = true;
- }
- } else if (args.length == 2) {
- var fromFrame = parseInt(args[0]);
- var toFrame = parseInt(args[1]);
- if (isNaN(fromFrame) || fromFrame < 0) {
- throw new Error('Invalid start frame argument "' + args[0] + '".');
- }
- if (isNaN(toFrame) || toFrame < 0) {
- throw new Error('Invalid end frame argument "' + args[1] + '".');
- }
- if (fromFrame > toFrame) {
- throw new Error('Invalid arguments start frame cannot be larger ' +
- 'than end frame.');
- }
- // Show frame range.
- request.arguments.fromFrame = fromFrame;
- request.arguments.toFrame = toFrame + 1;
- } else if (args.length > 2) {
- throw new Error('Invalid backtrace arguments.');
- }
-
- return request.toJSONProtocol();
-};
-
-
-// Create a JSON request for the frame command.
-DebugRequest.prototype.frameCommandToJSONRequest_ = function(args) {
- // Build a frame request from the text command.
- var request = this.createRequest('frame');
- args = args.split(/\s*[ ]+\s*/g);
- if (args.length > 0 && args[0].length > 0) {
- request.arguments = {};
- request.arguments.number = args[0];
- }
- return request.toJSONProtocol();
-};
-
-
-// Create a JSON request for the scopes command.
-DebugRequest.prototype.scopesCommandToJSONRequest_ = function(args) {
- // Build a scopes request from the text command.
- var request = this.createRequest('scopes');
- return request.toJSONProtocol();
-};
-
-
-// Create a JSON request for the scope command.
-DebugRequest.prototype.scopeCommandToJSONRequest_ = function(args) {
- // Build a scope request from the text command.
- var request = this.createRequest('scope');
- args = args.split(/\s*[ ]+\s*/g);
- if (args.length > 0 && args[0].length > 0) {
- request.arguments = {};
- request.arguments.number = args[0];
- }
- return request.toJSONProtocol();
-};
-
-
-// Create a JSON request for the print command.
-DebugRequest.prototype.printCommandToJSONRequest_ = function(args) {
- // Build an evaluate request from the text command.
- if (args.length == 0) {
- throw new Error('Missing expression.');
- }
- return this.makeEvaluateJSONRequest_(args);
-};
-
-
-// Create a JSON request for the dir command.
-DebugRequest.prototype.dirCommandToJSONRequest_ = function(args) {
- // Build an evaluate request from the text command.
- if (args.length == 0) {
- throw new Error('Missing expression.');
- }
- return this.makeEvaluateJSONRequest_(args);
-};
-
-
-// Create a JSON request for the references command.
-DebugRequest.prototype.referencesCommandToJSONRequest_ = function(args) {
- // Build an evaluate request from the text command.
- if (args.length == 0) {
- throw new Error('Missing object id.');
- }
-
- return this.makeReferencesJSONRequest_(args, 'referencedBy');
-};
-
-
-// Create a JSON request for the instances command.
-DebugRequest.prototype.instancesCommandToJSONRequest_ = function(args) {
- // Build an evaluate request from the text command.
- if (args.length == 0) {
- throw new Error('Missing object id.');
- }
-
- // Build a references request.
- return this.makeReferencesJSONRequest_(args, 'constructedBy');
-};
-
-
-// Create a JSON request for the list command.
-DebugRequest.prototype.listCommandToJSONRequest_ = function(args) {
-
- // Default is ten lines starting five lines before the current location.
- if (Debug.State.displaySourceEndLine == -1) {
- // If we list forwards, we will start listing after the last source end
- // line. Set it to start from 5 lines before the current location.
- Debug.State.displaySourceEndLine = Debug.State.currentSourceLine - 5;
- // If we list backwards, we will start listing backwards from the last
- // source start line. Set it to start from 1 lines before the current
- // location.
- Debug.State.displaySourceStartLine = Debug.State.currentSourceLine + 1;
- }
-
- var from = Debug.State.displaySourceEndLine + 1;
- var lines = 10;
-
- // Parse the arguments.
- args = args.split(/\s*,\s*/g);
- if (args == '') {
- } else if ((args.length == 1) && (args[0] == '-')) {
- from = Debug.State.displaySourceStartLine - lines;
- } else if (args.length == 2) {
- from = parseInt(args[0]);
- lines = parseInt(args[1]) - from + 1; // inclusive of the ending line.
- } else {
- throw new Error('Invalid list arguments.');
- }
- Debug.State.displaySourceStartLine = from;
- Debug.State.displaySourceEndLine = from + lines - 1;
- var sourceArgs = '' + from + ' ' + lines;
- return this.sourceCommandToJSONRequest_(sourceArgs);
-};
-
-
-// Create a JSON request for the source command.
-DebugRequest.prototype.sourceCommandToJSONRequest_ = function(args) {
- // Build a evaluate request from the text command.
- var request = this.createRequest('source');
-
- // Default is ten lines starting five lines before the current location.
- var from = Debug.State.currentSourceLine - 5;
- var lines = 10;
-
- // Parse the arguments.
- args = args.split(/\s*[ ]+\s*/g);
- if (args.length > 1 && args[0].length > 0 && args[1].length > 0) {
- from = parseInt(args[0]) - 1;
- lines = parseInt(args[1]);
- } else if (args.length > 0 && args[0].length > 0) {
- from = parseInt(args[0]) - 1;
- }
-
- if (from < 0) from = 0;
- if (lines < 0) lines = 10;
-
- // Request source arround current source location.
- request.arguments = {};
- request.arguments.fromLine = from;
- request.arguments.toLine = from + lines;
-
- return request.toJSONProtocol();
-};
-
-
-// Create a JSON request for the scripts command.
-DebugRequest.prototype.scriptsCommandToJSONRequest_ = function(args) {
- // Build a evaluate request from the text command.
- var request = this.createRequest('scripts');
-
- // Process arguments if any.
- if (args && args.length > 0) {
- args = args.split(/\s*[ ]+\s*/g);
-
- if (args.length > 1) {
- throw new Error('Invalid scripts arguments.');
- }
-
- request.arguments = {};
- switch (args[0]) {
- case 'natives':
- request.arguments.types = ScriptTypeFlag(Debug.ScriptType.Native);
- break;
-
- case 'extensions':
- request.arguments.types = ScriptTypeFlag(Debug.ScriptType.Extension);
- break;
-
- case 'all':
- request.arguments.types =
- ScriptTypeFlag(Debug.ScriptType.Normal) |
- ScriptTypeFlag(Debug.ScriptType.Native) |
- ScriptTypeFlag(Debug.ScriptType.Extension);
- break;
-
- default:
- // If the arg is not one of the know one aboves, then it must be a
- // filter used for filtering the results:
- request.arguments.filter = args[0];
- break;
- }
- }
-
- return request.toJSONProtocol();
-};
-
-
-// Create a JSON request for the break command.
-DebugRequest.prototype.breakCommandToJSONRequest_ = function(args) {
- // Build a evaluate request from the text command.
- // Process arguments if any.
- if (args && args.length > 0) {
- var target = args;
- var type = 'function';
- var line;
- var column;
- var condition;
- var pos;
-
- var request = this.createRequest('setbreakpoint');
-
- // Break the args into target spec and condition if appropriate.
-
- // Check for breakpoint condition.
- pos = args.indexOf(' ');
- if (pos > 0) {
- target = args.substring(0, pos);
- condition = args.substring(pos + 1, args.length);
- }
-
- // Check for script breakpoint (name:line[:column]). If no ':' in break
- // specification it is considered a function break point.
- pos = target.indexOf(':');
- if (pos > 0) {
- type = 'script';
- var tmp = target.substring(pos + 1, target.length);
- target = target.substring(0, pos);
-
- // Check for both line and column.
- pos = tmp.indexOf(':');
- if (pos > 0) {
- column = parseInt(tmp.substring(pos + 1, tmp.length)) - 1;
- line = parseInt(tmp.substring(0, pos)) - 1;
- } else {
- line = parseInt(tmp) - 1;
- }
- } else if (target[0] == '#' && target[target.length - 1] == '#') {
- type = 'handle';
- target = target.substring(1, target.length - 1);
- } else {
- type = 'function';
- }
-
- request.arguments = {};
- request.arguments.type = type;
- request.arguments.target = target;
- request.arguments.line = line;
- request.arguments.column = column;
- request.arguments.condition = condition;
- } else {
- var request = this.createRequest('suspend');
- }
-
- return request.toJSONProtocol();
-};
-
-
-DebugRequest.prototype.breakpointsCommandToJSONRequest_ = function(args) {
- if (args && args.length > 0) {
- throw new Error('Unexpected arguments.');
- }
- var request = this.createRequest('listbreakpoints');
- return request.toJSONProtocol();
-};
-
-
-// Create a JSON request for the clear command.
-DebugRequest.prototype.clearCommandToJSONRequest_ = function(args) {
- // Build a evaluate request from the text command.
- var request = this.createRequest('clearbreakpoint');
-
- // Process arguments if any.
- if (args && args.length > 0) {
- request.arguments = {};
- request.arguments.breakpoint = parseInt(args);
- } else {
- throw new Error('Invalid break arguments.');
- }
-
- return request.toJSONProtocol();
-};
-
-
-// Create a JSON request for the change breakpoint command.
-DebugRequest.prototype.changeBreakpointCommandToJSONRequest_ =
- function(args, command) {
-
- var request;
-
- // Check for exception breaks first:
- // en[able] exc[eptions] [all|unc[aught]]
- // en[able] [all|unc[aught]] exc[eptions]
- // dis[able] exc[eptions] [all|unc[aught]]
- // dis[able] [all|unc[aught]] exc[eptions]
- if ((command == 'enable' || command == 'disable') &&
- args && args.length > 1) {
- var nextPos = args.indexOf(' ');
- var arg1 = (nextPos > 0) ? args.substring(0, nextPos) : args;
- var excType = null;
-
- // Check for:
- // en[able] exc[eptions] [all|unc[aught]]
- // dis[able] exc[eptions] [all|unc[aught]]
- if (arg1 == 'exc' || arg1 == 'exception' || arg1 == 'exceptions') {
-
- var arg2 = (nextPos > 0) ?
- args.substring(nextPos + 1, args.length) : 'all';
- if (!arg2) {
- arg2 = 'all'; // if unspecified, set for all.
- } if (arg2 == 'unc') { // check for short cut.
- arg2 = 'uncaught';
- }
- excType = arg2;
-
- // Check for:
- // en[able] [all|unc[aught]] exc[eptions]
- // dis[able] [all|unc[aught]] exc[eptions]
- } else if (arg1 == 'all' || arg1 == 'unc' || arg1 == 'uncaught') {
-
- var arg2 = (nextPos > 0) ?
- args.substring(nextPos + 1, args.length) : null;
- if (arg2 == 'exc' || arg1 == 'exception' || arg1 == 'exceptions') {
- excType = arg1;
- if (excType == 'unc') {
- excType = 'uncaught';
- }
- }
- }
-
- // If we matched one of the command formats, then excType will be non-null:
- if (excType) {
- // Build a evaluate request from the text command.
- request = this.createRequest('setexceptionbreak');
-
- request.arguments = {};
- request.arguments.type = excType;
- request.arguments.enabled = (command == 'enable');
-
- return request.toJSONProtocol();
- }
- }
-
- // Build a evaluate request from the text command.
- request = this.createRequest('changebreakpoint');
-
- // Process arguments if any.
- if (args && args.length > 0) {
- request.arguments = {};
- var pos = args.indexOf(' ');
- var breakpointArg = args;
- var otherArgs;
- if (pos > 0) {
- breakpointArg = args.substring(0, pos);
- otherArgs = args.substring(pos + 1, args.length);
- }
-
- request.arguments.breakpoint = parseInt(breakpointArg);
-
- switch(command) {
- case 'cond':
- request.arguments.condition = otherArgs ? otherArgs : null;
- break;
- case 'enable':
- request.arguments.enabled = true;
- break;
- case 'disable':
- request.arguments.enabled = false;
- break;
- case 'ignore':
- request.arguments.ignoreCount = parseInt(otherArgs);
- break;
- default:
- throw new Error('Invalid arguments.');
- }
- } else {
- throw new Error('Invalid arguments.');
- }
-
- return request.toJSONProtocol();
-};
-
-
-// Create a JSON request for the disconnect command.
-DebugRequest.prototype.disconnectCommandToJSONRequest_ = function(args) {
- var request;
- request = this.createRequest('disconnect');
- return request.toJSONProtocol();
-};
-
-
-// Create a JSON request for the info command.
-DebugRequest.prototype.infoCommandToJSONRequest_ = function(args) {
- var request;
- if (args && (args == 'break' || args == 'br')) {
- // Build a evaluate request from the text command.
- request = this.createRequest('listbreakpoints');
- last_cmd = 'info break';
- } else if (args && (args == 'locals' || args == 'lo')) {
- // Build a evaluate request from the text command.
- request = this.createRequest('frame');
- last_cmd = 'info locals';
- } else if (args && (args == 'args' || args == 'ar')) {
- // Build a evaluate request from the text command.
- request = this.createRequest('frame');
- last_cmd = 'info args';
- } else if (lol_is_enabled &&
- args && (args == 'liveobjectlist' || args == 'lol')) {
- // Build a evaluate request from the text command.
- return this.liveObjectListToJSONRequest_(null);
- } else {
- throw new Error('Invalid info arguments.');
- }
-
- return request.toJSONProtocol();
-};
-
-
-DebugRequest.prototype.v8FlagsToJSONRequest_ = function(args) {
- var request;
- request = this.createRequest('v8flags');
- request.arguments = {};
- request.arguments.flags = args;
- return request.toJSONProtocol();
-};
-
-
-DebugRequest.prototype.gcToJSONRequest_ = function(args) {
- var request;
- if (!args) {
- args = 'all';
- }
- var args = args.split(/\s+/g);
- var cmd = args[0];
-
- switch(cmd) {
- case 'all':
- case 'quick':
- case 'full':
- case 'young':
- case 'old':
- case 'compact':
- case 'sweep':
- case 'scavenge': {
- if (cmd == 'young') { cmd = 'quick'; }
- else if (cmd == 'old') { cmd = 'full'; }
-
- request = this.createRequest('gc');
- request.arguments = {};
- request.arguments.type = cmd;
- break;
- }
- // Else fall thru to the default case below to report the error.
- default:
- throw new Error('Missing arguments after ' + cmd + '.');
- }
- return request.toJSONProtocol();
-};
-
-
-// Args: [v[erbose]] [<N>] [i[ndex] <i>] [t[ype] <type>] [sp[ace] <space>]
-DebugRequest.prototype.lolMakeListRequest =
- function(cmd, args, first_arg_index, is_repeating) {
-
- var request;
- var start_index = 0;
- var dump_limit = void 0;
- var type_filter = void 0;
- var space_filter = void 0;
- var prop_filter = void 0;
- var is_verbose = false;
- var i;
-
- for (i = first_arg_index; i < args.length; i++) {
- var arg = args[i];
- // Check for [v[erbose]]:
- if (arg === 'verbose' || arg === 'v') {
- // Nothing to do. This is already implied by args.length > 3.
- is_verbose = true;
-
- // Check for [<N>]:
- } else if (arg.match(/^[0-9]+$/)) {
- dump_limit = arg;
- is_verbose = true;
-
- // Check for i[ndex] <i>:
- } else if (arg === 'index' || arg === 'i') {
- i++;
- if (args.length < i) {
- throw new Error('Missing index after ' + arg + '.');
- }
- start_index = parseInt(args[i]);
- // The user input start index starts at 1:
- if (start_index <= 0) {
- throw new Error('Invalid index ' + args[i] + '.');
- }
- start_index -= 1;
- is_verbose = true;
-
- // Check for t[ype] <type>:
- } else if (arg === 'type' || arg === 't') {
- i++;
- if (args.length < i) {
- throw new Error('Missing type after ' + arg + '.');
- }
- type_filter = args[i];
-
- // Check for space <heap space name>:
- } else if (arg === 'space' || arg === 'sp') {
- i++;
- if (args.length < i) {
- throw new Error('Missing space name after ' + arg + '.');
- }
- space_filter = args[i];
-
- // Check for property <prop name>:
- } else if (arg === 'property' || arg === 'prop') {
- i++;
- if (args.length < i) {
- throw new Error('Missing property name after ' + arg + '.');
- }
- prop_filter = args[i];
-
- } else {
- throw new Error('Unknown args at ' + arg + '.');
- }
- }
-
- // Build the verbose request:
- if (is_verbose) {
- request = this.createLOLRequest('lol-'+cmd,
- start_index,
- dump_limit,
- is_repeating);
- request.arguments.verbose = true;
- } else {
- request = this.createRequest('lol-'+cmd);
- request.arguments = {};
- }
-
- request.arguments.filter = {};
- if (type_filter) {
- request.arguments.filter.type = type_filter;
- }
- if (space_filter) {
- request.arguments.filter.space = space_filter;
- }
- if (prop_filter) {
- request.arguments.filter.prop = prop_filter;
- }
-
- return request;
-}
-
-
-function extractObjId(args) {
- var id = args;
- id = id.match(/^@([0-9]+)$/);
- if (id) {
- id = id[1];
- } else {
- throw new Error('Invalid obj id ' + args + '.');
- }
- return parseInt(id);
-}
-
-
-DebugRequest.prototype.lolToJSONRequest_ = function(args, is_repeating) {
- var request;
- // Use default command if one is not specified:
- if (!args) {
- args = 'info';
- }
-
- var orig_args = args;
- var first_arg_index;
-
- var arg, i;
- var args = args.split(/\s+/g);
- var cmd = args[0];
- var id;
-
- // Command: <id> [v[erbose]] ...
- if (cmd.match(/^[0-9]+$/)) {
- // Convert to the padded list command:
- // Command: l[ist] <dummy> <id> [v[erbose]] ...
-
- // Insert the implicit 'list' in front and process as normal:
- cmd = 'list';
- args.unshift(cmd);
- }
-
- switch(cmd) {
- // Command: c[apture]
- case 'capture':
- case 'c':
- request = this.createRequest('lol-capture');
- break;
-
- // Command: clear|d[elete] <id>|all
- case 'clear':
- case 'delete':
- case 'del': {
- if (args.length < 2) {
- throw new Error('Missing argument after ' + cmd + '.');
- } else if (args.length > 2) {
- throw new Error('Too many arguments after ' + cmd + '.');
- }
- id = args[1];
- if (id.match(/^[0-9]+$/)) {
- // Delete a specific lol record:
- request = this.createRequest('lol-delete');
- request.arguments = {};
- request.arguments.id = parseInt(id);
- } else if (id === 'all') {
- // Delete all:
- request = this.createRequest('lol-reset');
- } else {
- throw new Error('Invalid argument after ' + cmd + '.');
- }
- break;
- }
-
- // Command: diff <id1> <id2> [<dump options>]
- case 'diff':
- first_arg_index = 3;
-
- // Command: list <dummy> <id> [<dump options>]
- case 'list':
-
- // Command: ret[ainers] <obj id> [<dump options>]
- case 'retainers':
- case 'ret':
- case 'retaining-paths':
- case 'rp': {
- if (cmd === 'ret') cmd = 'retainers';
- else if (cmd === 'rp') cmd = 'retaining-paths';
-
- if (!first_arg_index) first_arg_index = 2;
-
- if (args.length < first_arg_index) {
- throw new Error('Too few arguments after ' + cmd + '.');
- }
-
- var request_cmd = (cmd === 'list') ? 'diff':cmd;
- request = this.lolMakeListRequest(request_cmd,
- args,
- first_arg_index,
- is_repeating);
-
- if (cmd === 'diff') {
- request.arguments.id1 = parseInt(args[1]);
- request.arguments.id2 = parseInt(args[2]);
- } else if (cmd == 'list') {
- request.arguments.id1 = 0;
- request.arguments.id2 = parseInt(args[1]);
- } else {
- request.arguments.id = extractObjId(args[1]);
- }
- break;
- }
-
- // Command: getid
- case 'getid': {
- request = this.createRequest('lol-getid');
- request.arguments = {};
- request.arguments.address = args[1];
- break;
- }
-
- // Command: inf[o] [<N>]
- case 'info':
- case 'inf': {
- if (args.length > 2) {
- throw new Error('Too many arguments after ' + cmd + '.');
- }
- // Built the info request:
- request = this.createLOLRequest('lol-info', 0, args[1], is_repeating);
- break;
- }
-
- // Command: path <obj id 1> <obj id 2>
- case 'path': {
- request = this.createRequest('lol-path');
- request.arguments = {};
- if (args.length > 2) {
- request.arguments.id1 = extractObjId(args[1]);
- request.arguments.id2 = extractObjId(args[2]);
- } else {
- request.arguments.id1 = 0;
- request.arguments.id2 = extractObjId(args[1]);
- }
- break;
- }
-
- // Command: print
- case 'print': {
- request = this.createRequest('lol-print');
- request.arguments = {};
- request.arguments.id = extractObjId(args[1]);
- break;
- }
-
- // Command: reset
- case 'reset': {
- request = this.createRequest('lol-reset');
- break;
- }
-
- default:
- throw new Error('Invalid arguments.');
- }
- return request.toJSONProtocol();
-};
-
-
-// Create a JSON request for the threads command.
-DebugRequest.prototype.threadsCommandToJSONRequest_ = function(args) {
- // Build a threads request from the text command.
- var request = this.createRequest('threads');
- return request.toJSONProtocol();
-};
-
-
-// Handle the trace command.
-DebugRequest.prototype.traceCommand_ = function(args) {
- // Process arguments.
- if (args && args.length > 0) {
- if (args == 'compile') {
- trace_compile = !trace_compile;
- print('Tracing of compiled scripts ' + (trace_compile ? 'on' : 'off'));
- } else if (args === 'debug json' || args === 'json' || args === 'packets') {
- trace_debug_json = !trace_debug_json;
- print('Tracing of debug json packets ' +
- (trace_debug_json ? 'on' : 'off'));
- } else {
- throw new Error('Invalid trace arguments.');
- }
- } else {
- throw new Error('Invalid trace arguments.');
- }
-}
-
-// Handle the help command.
-DebugRequest.prototype.helpCommand_ = function(args) {
- // Help os quite simple.
- if (args && args.length > 0) {
- print('warning: arguments to \'help\' are ignored');
- }
-
- print('Note: <> denotes symbollic values to be replaced with real values.');
- print('Note: [] denotes optional parts of commands, or optional options / arguments.');
- print(' e.g. d[elete] - you get the same command if you type d or delete.');
- print('');
- print('[break] - break as soon as possible');
- print('b[reak] location [condition]');
- print(' - break on named function: location is a function name');
- print(' - break on function: location is #<id>#');
- print(' - break on script position: location is name:line[:column]');
- print('');
- print('clear <breakpoint #> - deletes the specified user defined breakpoint');
- print('d[elete] <breakpoint #> - deletes the specified user defined breakpoint');
- print('dis[able] <breakpoint #> - disables the specified user defined breakpoint');
- print('dis[able] exc[eptions] [[all] | unc[aught]]');
- print(' - disables breaking on exceptions');
- print('en[able] <breakpoint #> - enables the specified user defined breakpoint');
- print('en[able] exc[eptions] [[all] | unc[aught]]');
- print(' - enables breaking on exceptions');
- print('');
- print('b[ack]t[race] [n] | [-n] | [from to]');
- print(' - prints the stack back trace');
- print('f[rame] - prints info about the current frame context');
- print('f[rame] <frame #> - set context to specified frame #');
- print('scopes');
- print('scope <scope #>');
- print('');
- print('up - set context to caller of current frame');
- print('do[wn] - set context to callee of current frame');
- print('inf[o] br[eak] - prints info about breakpoints in use');
- print('inf[o] ar[gs] - prints info about arguments of the current function');
- print('inf[o] lo[cals] - prints info about locals in the current function');
- print('inf[o] liveobjectlist|lol - same as \'lol info\'');
- print('');
- print('step [in | next | out| min [step count]]');
- print('c[ontinue] - continue executing after a breakpoint');
- print('s[tep] [<N>] - step into the next N callees (default N is 1)');
- print('s[tep]i [<N>] - step into the next N callees (default N is 1)');
- print('n[ext] [<N>] - step over the next N callees (default N is 1)');
- print('fin[ish] [<N>] - step out of N frames (default N is 1)');
- print('');
- print('p[rint] <expression> - prints the result of the specified expression');
- print('dir <expression> - prints the object structure of the result');
- print('set <var> = <expression> - executes the specified statement');
- print('');
- print('l[ist] - list the source code around for the current pc');
- print('l[ist] [- | <start>,<end>] - list the specified range of source code');
- print('source [from line [num lines]]');
- print('scr[ipts] [native|extensions|all]');
- print('scr[ipts] [<filter text>] - list scripts with the specified text in its description');
- print('');
- print('gc - runs the garbage collector');
- print('');
-
- if (lol_is_enabled) {
- print('liveobjectlist|lol <command> - live object list tracking.');
- print(' where <command> can be:');
- print(' c[apture] - captures a LOL list.');
- print(' clear|del[ete] <id>|all - clears LOL of id <id>.');
- print(' If \'all\' is unspecified instead, will clear all.');
- print(' diff <id1> <id2> [<dump options>]');
- print(' - prints the diff between LOLs id1 and id2.');
- print(' - also see <dump options> below.');
- print(' getid <address> - gets the obj id for the specified address if available.');
- print(' The address must be in hex form prefixed with 0x.');
- print(' inf[o] [<N>] - lists summary info of all LOL lists.');
- print(' If N is specified, will print N items at a time.');
- print(' [l[ist]] <id> [<dump options>]');
- print(' - prints the listing of objects in LOL id.');
- print(' - also see <dump options> below.');
- print(' reset - clears all LOL lists.');
- print(' ret[ainers] <id> [<dump options>]');
- print(' - prints the list of retainers of obj id.');
- print(' - also see <dump options> below.');
- print(' path <id1> <id2> - prints the retaining path from obj id1 to id2.');
- print(' If only one id is specified, will print the path from');
- print(' roots to the specified object if available.');
- print(' print <id> - prints the obj for the specified obj id if available.');
- print('');
- print(' <dump options> includes:');
- print(' [v[erbose]] - do verbose dump.');
- print(' [<N>] - dump N items at a time. Implies verbose dump.');
- print(' If unspecified, N will default to '+
- kDefaultLolLinesToPrintAtATime+'. Max N is '+
- kMaxLolLinesToPrintAtATime+'.');
- print(' [i[ndex] <i>] - start dump from index i. Implies verbose dump.');
- print(' [t[ype] <type>] - filter by type.');
- print(' [sp[ace] <space name>] - filter by heap space where <space name> is one of');
- print(' { cell, code, lo, map, new, old-data, old-pointer }.');
- print('');
- print(' If the verbose option, or an option that implies a verbose dump');
- print(' is specified, then a verbose dump will requested. Else, a summary dump');
- print(' will be requested.');
- print('');
- }
-
- print('trace compile');
- // hidden command: trace debug json - toggles tracing of debug json packets
- print('');
- print('disconnect|exit|quit - disconnects and quits the debugger');
- print('help - prints this help information');
-}
-
-
-function formatHandleReference_(value) {
- if (value.handle() >= 0) {
- return '#' + value.handle() + '#';
- } else {
- return '#Transient#';
- }
-}
-
-
-function formatObject_(value, include_properties) {
- var result = '';
- result += formatHandleReference_(value);
- result += ', type: object'
- result += ', constructor ';
- var ctor = value.constructorFunctionValue();
- result += formatHandleReference_(ctor);
- result += ', __proto__ ';
- var proto = value.protoObjectValue();
- result += formatHandleReference_(proto);
- result += ', ';
- result += value.propertyCount();
- result += ' properties.';
- if (include_properties) {
- result += '\n';
- for (var i = 0; i < value.propertyCount(); i++) {
- result += ' ';
- result += value.propertyName(i);
- result += ': ';
- var property_value = value.propertyValue(i);
- if (property_value instanceof ProtocolReference) {
- result += '<no type>';
- } else {
- if (property_value && property_value.type()) {
- result += property_value.type();
- } else {
- result += '<no type>';
- }
- }
- result += ' ';
- result += formatHandleReference_(property_value);
- result += '\n';
- }
- }
- return result;
-}
-
-
-function formatScope_(scope) {
- var result = '';
- var index = scope.index;
- result += '#' + (index <= 9 ? '0' : '') + index;
- result += ' ';
- switch (scope.type) {
- case Debug.ScopeType.Global:
- result += 'Global, ';
- result += '#' + scope.object.ref + '#';
- break;
- case Debug.ScopeType.Local:
- result += 'Local';
- break;
- case Debug.ScopeType.With:
- result += 'With, ';
- result += '#' + scope.object.ref + '#';
- break;
- case Debug.ScopeType.Catch:
- result += 'Catch, ';
- result += '#' + scope.object.ref + '#';
- break;
- case Debug.ScopeType.Closure:
- result += 'Closure';
- break;
- default:
- result += 'UNKNOWN';
- }
- return result;
-}
-
-
-function refObjectToString_(protocolPackage, handle) {
- var value = protocolPackage.lookup(handle);
- var result = '';
- if (value.isString()) {
- result = '"' + value.value() + '"';
- } else if (value.isPrimitive()) {
- result = value.valueString();
- } else if (value.isObject()) {
- result += formatObject_(value, true);
- }
- return result;
-}
-
-
-function decodeLolCaptureResponse(body) {
- var result;
- result = 'Captured live object list '+ body.id +
- ': count '+ body.count + ' size ' + body.size;
- return result;
-}
-
-
-function decodeLolDeleteResponse(body) {
- var result;
- result = 'Deleted live object list '+ body.id;
- return result;
-}
-
-
-function digitsIn(value) {
- var digits = 0;
- if (value === 0) value = 1;
- while (value >= 1) {
- digits++;
- value /= 10;
- }
- return digits;
-}
-
-
-function padding(value, max_digits) {
- var padding_digits = max_digits - digitsIn(value);
- var padding = '';
- while (padding_digits > 0) {
- padding += ' ';
- padding_digits--;
- }
- return padding;
-}
-
-
-function decodeLolInfoResponse(body) {
- var result;
- var lists = body.lists;
- var length = lists.length;
- var first_index = body.first_index + 1;
- var has_more = ((first_index + length) <= body.count);
- result = 'captured live object lists';
- if (has_more || (first_index != 1)) {
- result += ' ['+ length +' of '+ body.count +
- ': starting from '+ first_index +']';
- }
- result += ':\n';
- var max_digits = digitsIn(body.count);
- var last_count = 0;
- var last_size = 0;
- for (var i = 0; i < length; i++) {
- var entry = lists[i];
- var count = entry.count;
- var size = entry.size;
- var index = first_index + i;
- result += ' [' + padding(index, max_digits) + index + '] id '+ entry.id +
- ': count '+ count;
- if (last_count > 0) {
- result += '(+' + (count - last_count) + ')';
- }
- result += ' size '+ size;
- if (last_size > 0) {
- result += '(+' + (size - last_size) + ')';
- }
- result += '\n';
- last_count = count;
- last_size = size;
- }
- result += ' total: '+length+' lists\n';
- if (has_more) {
- result += ' -- press <enter> for more --\n';
- } else {
- repeat_cmd_line = '';
- }
- if (length === 0) result += ' none\n';
-
- return result;
-}
-
-
-function decodeLolListResponse(body, title) {
-
- var result;
- var total_count = body.count;
- var total_size = body.size;
- var length;
- var max_digits;
- var i;
- var entry;
- var index;
-
- var max_count_digits = digitsIn(total_count);
- var max_size_digits;
-
- var summary = body.summary;
- if (summary) {
-
- var roots_count = 0;
- var found_root = body.found_root || 0;
- var found_weak_root = body.found_weak_root || 0;
-
- // Print the summary result:
- result = 'summary of objects:\n';
- length = summary.length;
- if (found_root !== 0) {
- roots_count++;
- }
- if (found_weak_root !== 0) {
- roots_count++;
- }
- max_digits = digitsIn(length + roots_count);
- max_size_digits = digitsIn(total_size);
-
- index = 1;
- if (found_root !== 0) {
- result += ' [' + padding(index, max_digits) + index + '] ' +
- ' count '+ 1 + padding(0, max_count_digits) +
- ' '+ padding(0, max_size_digits+1) +
- ' : <root>\n';
- index++;
- }
- if (found_weak_root !== 0) {
- result += ' [' + padding(index, max_digits) + index + '] ' +
- ' count '+ 1 + padding(0, max_count_digits) +
- ' '+ padding(0, max_size_digits+1) +
- ' : <weak root>\n';
- index++;
- }
-
- for (i = 0; i < length; i++) {
- entry = summary[i];
- var count = entry.count;
- var size = entry.size;
- result += ' [' + padding(index, max_digits) + index + '] ' +
- ' count '+ count + padding(count, max_count_digits) +
- ' size '+ size + padding(size, max_size_digits) +
- ' : <' + entry.desc + '>\n';
- index++;
- }
- result += '\n total count: '+(total_count+roots_count)+'\n';
- if (body.size) {
- result += ' total size: '+body.size+'\n';
- }
-
- } else {
- // Print the full dump result:
- var first_index = body.first_index + 1;
- var elements = body.elements;
- length = elements.length;
- var has_more = ((first_index + length) <= total_count);
- result = title;
- if (has_more || (first_index != 1)) {
- result += ' ['+ length +' of '+ total_count +
- ': starting from '+ first_index +']';
- }
- result += ':\n';
- if (length === 0) result += ' none\n';
- max_digits = digitsIn(length);
-
- var max_id = 0;
- var max_size = 0;
- for (i = 0; i < length; i++) {
- entry = elements[i];
- if (entry.id > max_id) max_id = entry.id;
- if (entry.size > max_size) max_size = entry.size;
- }
- var max_id_digits = digitsIn(max_id);
- max_size_digits = digitsIn(max_size);
-
- for (i = 0; i < length; i++) {
- entry = elements[i];
- index = first_index + i;
- result += ' ['+ padding(index, max_digits) + index +']';
- if (entry.id !== 0) {
- result += ' @' + entry.id + padding(entry.id, max_id_digits) +
- ': size ' + entry.size + ', ' +
- padding(entry.size, max_size_digits) + entry.desc + '\n';
- } else {
- // Must be a root or weak root:
- result += ' ' + entry.desc + '\n';
- }
- }
- if (has_more) {
- result += ' -- press <enter> for more --\n';
- } else {
- repeat_cmd_line = '';
- }
- if (length === 0) result += ' none\n';
- }
-
- return result;
-}
-
-
-function decodeLolDiffResponse(body) {
- var title = 'objects';
- return decodeLolListResponse(body, title);
-}
-
-
-function decodeLolRetainersResponse(body) {
- var title = 'retainers for @' + body.id;
- return decodeLolListResponse(body, title);
-}
-
-
-function decodeLolPathResponse(body) {
- return body.path;
-}
-
-
-function decodeLolResetResponse(body) {
- return 'Reset all live object lists.';
-}
-
-
-function decodeLolGetIdResponse(body) {
- if (body.id == 0) {
- return 'Address is invalid, or object has been moved or collected';
- }
- return 'obj id is @' + body.id;
-}
-
-
-function decodeLolPrintResponse(body) {
- return body.dump;
-}
-
-
-// Rounds number 'num' to 'length' decimal places.
-function roundNumber(num, length) {
- var factor = Math.pow(10, length);
- return Math.round(num * factor) / factor;
-}
-
-
-// Convert a JSON response to text for display in a text based debugger.
-function DebugResponseDetails(response) {
- details = {text:'', running:false}
-
- try {
- if (!response.success()) {
- details.text = response.message();
- return details;
- }
-
- // Get the running state.
- details.running = response.running();
-
- var body = response.body();
- var result = '';
- switch (response.command()) {
- case 'suspend':
- details.text = 'stopped';
- break;
-
- case 'setbreakpoint':
- result = 'set breakpoint #';
- result += body.breakpoint;
- details.text = result;
- break;
-
- case 'clearbreakpoint':
- result = 'cleared breakpoint #';
- result += body.breakpoint;
- details.text = result;
- break;
-
- case 'changebreakpoint':
- result = 'successfully changed breakpoint';
- details.text = result;
- break;
-
- case 'listbreakpoints':
- result = 'breakpoints: (' + body.breakpoints.length + ')';
- for (var i = 0; i < body.breakpoints.length; i++) {
- var breakpoint = body.breakpoints[i];
- result += '\n id=' + breakpoint.number;
- result += ' type=' + breakpoint.type;
- if (breakpoint.script_id) {
- result += ' script_id=' + breakpoint.script_id;
- }
- if (breakpoint.script_name) {
- result += ' script_name=' + breakpoint.script_name;
- }
- result += ' line=' + (breakpoint.line + 1);
- if (breakpoint.column != null) {
- result += ' column=' + (breakpoint.column + 1);
- }
- if (breakpoint.groupId) {
- result += ' groupId=' + breakpoint.groupId;
- }
- if (breakpoint.ignoreCount) {
- result += ' ignoreCount=' + breakpoint.ignoreCount;
- }
- if (breakpoint.active === false) {
- result += ' inactive';
- }
- if (breakpoint.condition) {
- result += ' condition=' + breakpoint.condition;
- }
- result += ' hit_count=' + breakpoint.hit_count;
- }
- if (body.breakpoints.length === 0) {
- result = "No user defined breakpoints\n";
- } else {
- result += '\n';
- }
- if (body.breakOnExceptions) {
- result += '* breaking on ALL exceptions is enabled\n';
- } else if (body.breakOnUncaughtExceptions) {
- result += '* breaking on UNCAUGHT exceptions is enabled\n';
- } else {
- result += '* all exception breakpoints are disabled\n';
- }
- details.text = result;
- break;
-
- case 'setexceptionbreak':
- result = 'Break on ' + body.type + ' exceptions: ';
- result += body.enabled ? 'enabled' : 'disabled';
- details.text = result;
- break;
-
- case 'backtrace':
- if (body.totalFrames == 0) {
- result = '(empty stack)';
- } else {
- var result = 'Frames #' + body.fromFrame + ' to #' +
- (body.toFrame - 1) + ' of ' + body.totalFrames + '\n';
- for (i = 0; i < body.frames.length; i++) {
- if (i != 0) result += '\n';
- result += body.frames[i].text;
- }
- }
- details.text = result;
- break;
-
- case 'frame':
- if (last_cmd === 'info locals') {
- var locals = body.locals;
- if (locals.length === 0) {
- result = 'No locals';
- } else {
- for (var i = 0; i < locals.length; i++) {
- var local = locals[i];
- result += local.name + ' = ';
- result += refObjectToString_(response, local.value.ref);
- result += '\n';
- }
- }
- } else if (last_cmd === 'info args') {
- var args = body.arguments;
- if (args.length === 0) {
- result = 'No arguments';
- } else {
- for (var i = 0; i < args.length; i++) {
- var arg = args[i];
- result += arg.name + ' = ';
- result += refObjectToString_(response, arg.value.ref);
- result += '\n';
- }
- }
- } else {
- result = SourceUnderline(body.sourceLineText,
- body.column);
- Debug.State.currentSourceLine = body.line;
- Debug.State.currentFrame = body.index;
- Debug.State.displaySourceStartLine = -1;
- Debug.State.displaySourceEndLine = -1;
- }
- details.text = result;
- break;
-
- case 'scopes':
- if (body.totalScopes == 0) {
- result = '(no scopes)';
- } else {
- result = 'Scopes #' + body.fromScope + ' to #' +
- (body.toScope - 1) + ' of ' + body.totalScopes + '\n';
- for (i = 0; i < body.scopes.length; i++) {
- if (i != 0) {
- result += '\n';
- }
- result += formatScope_(body.scopes[i]);
- }
- }
- details.text = result;
- break;
-
- case 'scope':
- result += formatScope_(body);
- result += '\n';
- var scope_object_value = response.lookup(body.object.ref);
- result += formatObject_(scope_object_value, true);
- details.text = result;
- break;
-
- case 'evaluate':
- case 'lookup':
- case 'getobj':
- if (last_cmd == 'p' || last_cmd == 'print') {
- result = body.text;
- } else {
- var value;
- if (lookup_handle) {
- value = response.bodyValue(lookup_handle);
- } else {
- value = response.bodyValue();
- }
- if (value.isObject()) {
- result += formatObject_(value, true);
- } else {
- result += 'type: ';
- result += value.type();
- if (!value.isUndefined() && !value.isNull()) {
- result += ', ';
- if (value.isString()) {
- result += '"';
- }
- result += value.value();
- if (value.isString()) {
- result += '"';
- }
- }
- result += '\n';
- }
- }
- details.text = result;
- break;
-
- case 'references':
- var count = body.length;
- result += 'found ' + count + ' objects';
- result += '\n';
- for (var i = 0; i < count; i++) {
- var value = response.bodyValue(i);
- result += formatObject_(value, false);
- result += '\n';
- }
- details.text = result;
- break;
-
- case 'source':
- // Get the source from the response.
- var source = body.source;
- var from_line = body.fromLine + 1;
- var lines = source.split('\n');
- var maxdigits = 1 + Math.floor(log10(from_line + lines.length));
- if (maxdigits < 3) {
- maxdigits = 3;
- }
- var result = '';
- for (var num = 0; num < lines.length; num++) {
- // Check if there's an extra newline at the end.
- if (num == (lines.length - 1) && lines[num].length == 0) {
- break;
- }
-
- var current_line = from_line + num;
- spacer = maxdigits - (1 + Math.floor(log10(current_line)));
- if (current_line == Debug.State.currentSourceLine + 1) {
- for (var i = 0; i < maxdigits; i++) {
- result += '>';
- }
- result += ' ';
- } else {
- for (var i = 0; i < spacer; i++) {
- result += ' ';
- }
- result += current_line + ': ';
- }
- result += lines[num];
- result += '\n';
- }
- details.text = result;
- break;
-
- case 'scripts':
- var result = '';
- for (i = 0; i < body.length; i++) {
- if (i != 0) result += '\n';
- if (body[i].id) {
- result += body[i].id;
- } else {
- result += '[no id]';
- }
- result += ', ';
- if (body[i].name) {
- result += body[i].name;
- } else {
- if (body[i].compilationType == Debug.ScriptCompilationType.Eval
- && body[i].evalFromScript
- ) {
- result += 'eval from ';
- var script_value = response.lookup(body[i].evalFromScript.ref);
- result += ' ' + script_value.field('name');
- result += ':' + (body[i].evalFromLocation.line + 1);
- result += ':' + body[i].evalFromLocation.column;
- } else if (body[i].compilationType ==
- Debug.ScriptCompilationType.JSON) {
- result += 'JSON ';
- } else { // body[i].compilation == Debug.ScriptCompilationType.Host
- result += '[unnamed] ';
- }
- }
- result += ' (lines: ';
- result += body[i].lineCount;
- result += ', length: ';
- result += body[i].sourceLength;
- if (body[i].type == Debug.ScriptType.Native) {
- result += ', native';
- } else if (body[i].type == Debug.ScriptType.Extension) {
- result += ', extension';
- }
- result += '), [';
- var sourceStart = body[i].sourceStart;
- if (sourceStart.length > 40) {
- sourceStart = sourceStart.substring(0, 37) + '...';
- }
- result += sourceStart;
- result += ']';
- }
- if (body.length == 0) {
- result = "no matching scripts found";
- }
- details.text = result;
- break;
-
- case 'threads':
- var result = 'Active V8 threads: ' + body.totalThreads + '\n';
- body.threads.sort(function(a, b) { return a.id - b.id; });
- for (i = 0; i < body.threads.length; i++) {
- result += body.threads[i].current ? '*' : ' ';
- result += ' ';
- result += body.threads[i].id;
- result += '\n';
- }
- details.text = result;
- break;
-
- case 'continue':
- details.text = "(running)";
- break;
-
- case 'v8flags':
- details.text = "flags set";
- break;
-
- case 'gc':
- details.text = "GC " + body.before + " => " + body.after;
- if (body.after > (1024*1024)) {
- details.text +=
- " (" + roundNumber(body.before/(1024*1024), 1) + "M => " +
- roundNumber(body.after/(1024*1024), 1) + "M)";
- } else if (body.after > 1024) {
- details.text +=
- " (" + roundNumber(body.before/1024, 1) + "K => " +
- roundNumber(body.after/1024, 1) + "K)";
- }
- break;
-
- case 'lol-capture':
- details.text = decodeLolCaptureResponse(body);
- break;
- case 'lol-delete':
- details.text = decodeLolDeleteResponse(body);
- break;
- case 'lol-diff':
- details.text = decodeLolDiffResponse(body);
- break;
- case 'lol-getid':
- details.text = decodeLolGetIdResponse(body);
- break;
- case 'lol-info':
- details.text = decodeLolInfoResponse(body);
- break;
- case 'lol-print':
- details.text = decodeLolPrintResponse(body);
- break;
- case 'lol-reset':
- details.text = decodeLolResetResponse(body);
- break;
- case 'lol-retainers':
- details.text = decodeLolRetainersResponse(body);
- break;
- case 'lol-path':
- details.text = decodeLolPathResponse(body);
- break;
-
- default:
- details.text =
- 'Response for unknown command \'' + response.command() + '\'' +
- ' (' + response.raw_json() + ')';
- }
- } catch (e) {
- details.text = 'Error: "' + e + '" formatting response';
- }
-
- return details;
-};
-
-
-/**
- * Protocol packages send from the debugger.
- * @param {string} json - raw protocol packet as JSON string.
- * @constructor
- */
-function ProtocolPackage(json) {
- this.raw_json_ = json;
- this.packet_ = JSON.parse(json);
- this.refs_ = [];
- if (this.packet_.refs) {
- for (var i = 0; i < this.packet_.refs.length; i++) {
- this.refs_[this.packet_.refs[i].handle] = this.packet_.refs[i];
- }
- }
-}
-
-
-/**
- * Get the packet type.
- * @return {String} the packet type
- */
-ProtocolPackage.prototype.type = function() {
- return this.packet_.type;
-}
-
-
-/**
- * Get the packet event.
- * @return {Object} the packet event
- */
-ProtocolPackage.prototype.event = function() {
- return this.packet_.event;
-}
-
-
-/**
- * Get the packet request sequence.
- * @return {number} the packet request sequence
- */
-ProtocolPackage.prototype.requestSeq = function() {
- return this.packet_.request_seq;
-}
-
-
-/**
- * Get the packet request sequence.
- * @return {number} the packet request sequence
- */
-ProtocolPackage.prototype.running = function() {
- return this.packet_.running ? true : false;
-}
-
-
-ProtocolPackage.prototype.success = function() {
- return this.packet_.success ? true : false;
-}
-
-
-ProtocolPackage.prototype.message = function() {
- return this.packet_.message;
-}
-
-
-ProtocolPackage.prototype.command = function() {
- return this.packet_.command;
-}
-
-
-ProtocolPackage.prototype.body = function() {
- return this.packet_.body;
-}
-
-
-ProtocolPackage.prototype.bodyValue = function(index) {
- if (index != null) {
- return new ProtocolValue(this.packet_.body[index], this);
- } else {
- return new ProtocolValue(this.packet_.body, this);
- }
-}
-
-
-ProtocolPackage.prototype.body = function() {
- return this.packet_.body;
-}
-
-
-ProtocolPackage.prototype.lookup = function(handle) {
- var value = this.refs_[handle];
- if (value) {
- return new ProtocolValue(value, this);
- } else {
- return new ProtocolReference(handle);
- }
-}
-
-
-ProtocolPackage.prototype.raw_json = function() {
- return this.raw_json_;
-}
-
-
-function ProtocolValue(value, packet) {
- this.value_ = value;
- this.packet_ = packet;
-}
-
-
-/**
- * Get the value type.
- * @return {String} the value type
- */
-ProtocolValue.prototype.type = function() {
- return this.value_.type;
-}
-
-
-/**
- * Get a metadata field from a protocol value.
- * @return {Object} the metadata field value
- */
-ProtocolValue.prototype.field = function(name) {
- return this.value_[name];
-}
-
-
-/**
- * Check is the value is a primitive value.
- * @return {boolean} true if the value is primitive
- */
-ProtocolValue.prototype.isPrimitive = function() {
- return this.isUndefined() || this.isNull() || this.isBoolean() ||
- this.isNumber() || this.isString();
-}
-
-
-/**
- * Get the object handle.
- * @return {number} the value handle
- */
-ProtocolValue.prototype.handle = function() {
- return this.value_.handle;
-}
-
-
-/**
- * Check is the value is undefined.
- * @return {boolean} true if the value is undefined
- */
-ProtocolValue.prototype.isUndefined = function() {
- return this.value_.type == 'undefined';
-}
-
-
-/**
- * Check is the value is null.
- * @return {boolean} true if the value is null
- */
-ProtocolValue.prototype.isNull = function() {
- return this.value_.type == 'null';
-}
-
-
-/**
- * Check is the value is a boolean.
- * @return {boolean} true if the value is a boolean
- */
-ProtocolValue.prototype.isBoolean = function() {
- return this.value_.type == 'boolean';
-}
-
-
-/**
- * Check is the value is a number.
- * @return {boolean} true if the value is a number
- */
-ProtocolValue.prototype.isNumber = function() {
- return this.value_.type == 'number';
-}
-
-
-/**
- * Check is the value is a string.
- * @return {boolean} true if the value is a string
- */
-ProtocolValue.prototype.isString = function() {
- return this.value_.type == 'string';
-}
-
-
-/**
- * Check is the value is an object.
- * @return {boolean} true if the value is an object
- */
-ProtocolValue.prototype.isObject = function() {
- return this.value_.type == 'object' || this.value_.type == 'function' ||
- this.value_.type == 'error' || this.value_.type == 'regexp';
-}
-
-
-/**
- * Get the constructor function
- * @return {ProtocolValue} constructor function
- */
-ProtocolValue.prototype.constructorFunctionValue = function() {
- var ctor = this.value_.constructorFunction;
- return this.packet_.lookup(ctor.ref);
-}
-
-
-/**
- * Get the __proto__ value
- * @return {ProtocolValue} __proto__ value
- */
-ProtocolValue.prototype.protoObjectValue = function() {
- var proto = this.value_.protoObject;
- return this.packet_.lookup(proto.ref);
-}
-
-
-/**
- * Get the number og properties.
- * @return {number} the number of properties
- */
-ProtocolValue.prototype.propertyCount = function() {
- return this.value_.properties ? this.value_.properties.length : 0;
-}
-
-
-/**
- * Get the specified property name.
- * @return {string} property name
- */
-ProtocolValue.prototype.propertyName = function(index) {
- var property = this.value_.properties[index];
- return property.name;
-}
-
-
-/**
- * Return index for the property name.
- * @param name The property name to look for
- * @return {number} index for the property name
- */
-ProtocolValue.prototype.propertyIndex = function(name) {
- for (var i = 0; i < this.propertyCount(); i++) {
- if (this.value_.properties[i].name == name) {
- return i;
- }
- }
- return null;
-}
-
-
-/**
- * Get the specified property value.
- * @return {ProtocolValue} property value
- */
-ProtocolValue.prototype.propertyValue = function(index) {
- var property = this.value_.properties[index];
- return this.packet_.lookup(property.ref);
-}
-
-
-/**
- * Check is the value is a string.
- * @return {boolean} true if the value is a string
- */
-ProtocolValue.prototype.value = function() {
- return this.value_.value;
-}
-
-
-ProtocolValue.prototype.valueString = function() {
- return this.value_.text;
-}
-
-
-function ProtocolReference(handle) {
- this.handle_ = handle;
-}
-
-
-ProtocolReference.prototype.handle = function() {
- return this.handle_;
-}
-
-
-function MakeJSONPair_(name, value) {
- return '"' + name + '":' + value;
-}
-
-
-function ArrayToJSONObject_(content) {
- return '{' + content.join(',') + '}';
-}
-
-
-function ArrayToJSONArray_(content) {
- return '[' + content.join(',') + ']';
-}
-
-
-function BooleanToJSON_(value) {
- return String(value);
-}
-
-
-function NumberToJSON_(value) {
- return String(value);
-}
-
-
-// Mapping of some control characters to avoid the \uXXXX syntax for most
-// commonly used control cahracters.
-const ctrlCharMap_ = {
- '\b': '\\b',
- '\t': '\\t',
- '\n': '\\n',
- '\f': '\\f',
- '\r': '\\r',
- '"' : '\\"',
- '\\': '\\\\'
-};
-
-
-// Regular expression testing for ", \ and control characters (0x00 - 0x1F).
-const ctrlCharTest_ = new RegExp('["\\\\\x00-\x1F]');
-
-
-// Regular expression matching ", \ and control characters (0x00 - 0x1F)
-// globally.
-const ctrlCharMatch_ = new RegExp('["\\\\\x00-\x1F]', 'g');
-
-
-/**
- * Convert a String to its JSON representation (see http://www.json.org/). To
- * avoid depending on the String object this method calls the functions in
- * string.js directly and not through the value.
- * @param {String} value The String value to format as JSON
- * @return {string} JSON formatted String value
- */
-function StringToJSON_(value) {
- // Check for" , \ and control characters (0x00 - 0x1F). No need to call
- // RegExpTest as ctrlchar is constructed using RegExp.
- if (ctrlCharTest_.test(value)) {
- // Replace ", \ and control characters (0x00 - 0x1F).
- return '"' +
- value.replace(ctrlCharMatch_, function (char) {
- // Use charmap if possible.
- var mapped = ctrlCharMap_[char];
- if (mapped) return mapped;
- mapped = char.charCodeAt();
- // Convert control character to unicode escape sequence.
- return '\\u00' +
- '0' + // TODO %NumberToRadixString(Math.floor(mapped / 16), 16) +
- '0' // TODO %NumberToRadixString(mapped % 16, 16);
- })
- + '"';
- }
-
- // Simple string with no special characters.
- return '"' + value + '"';
-}
-
-
-/**
- * Convert a Date to ISO 8601 format. To avoid depending on the Date object
- * this method calls the functions in date.js directly and not through the
- * value.
- * @param {Date} value The Date value to format as JSON
- * @return {string} JSON formatted Date value
- */
-function DateToISO8601_(value) {
- function f(n) {
- return n < 10 ? '0' + n : n;
- }
- function g(n) {
- return n < 10 ? '00' + n : n < 100 ? '0' + n : n;
- }
- return builtins.GetUTCFullYearFrom(value) + '-' +
- f(builtins.GetUTCMonthFrom(value) + 1) + '-' +
- f(builtins.GetUTCDateFrom(value)) + 'T' +
- f(builtins.GetUTCHoursFrom(value)) + ':' +
- f(builtins.GetUTCMinutesFrom(value)) + ':' +
- f(builtins.GetUTCSecondsFrom(value)) + '.' +
- g(builtins.GetUTCMillisecondsFrom(value)) + 'Z';
-}
-
-
-/**
- * Convert a Date to ISO 8601 format. To avoid depending on the Date object
- * this method calls the functions in date.js directly and not through the
- * value.
- * @param {Date} value The Date value to format as JSON
- * @return {string} JSON formatted Date value
- */
-function DateToJSON_(value) {
- return '"' + DateToISO8601_(value) + '"';
-}
-
-
-/**
- * Convert an Object to its JSON representation (see http://www.json.org/).
- * This implementation simply runs through all string property names and adds
- * each property to the JSON representation for some predefined types. For type
- * "object" the function calls itself recursively unless the object has the
- * function property "toJSONProtocol" in which case that is used. This is not
- * a general implementation but sufficient for the debugger. Note that circular
- * structures will cause infinite recursion.
- * @param {Object} object The object to format as JSON
- * @return {string} JSON formatted object value
- */
-function SimpleObjectToJSON_(object) {
- var content = [];
- for (var key in object) {
- // Only consider string keys.
- if (typeof key == 'string') {
- var property_value = object[key];
-
- // Format the value based on its type.
- var property_value_json;
- switch (typeof property_value) {
- case 'object':
- if (property_value === null) {
- property_value_json = 'null';
- } else if (typeof property_value.toJSONProtocol == 'function') {
- property_value_json = property_value.toJSONProtocol(true)
- } else if (property_value.constructor.name == 'Array'){
- property_value_json = SimpleArrayToJSON_(property_value);
- } else {
- property_value_json = SimpleObjectToJSON_(property_value);
- }
- break;
-
- case 'boolean':
- property_value_json = BooleanToJSON_(property_value);
- break;
-
- case 'number':
- property_value_json = NumberToJSON_(property_value);
- break;
-
- case 'string':
- property_value_json = StringToJSON_(property_value);
- break;
-
- default:
- property_value_json = null;
- }
-
- // Add the property if relevant.
- if (property_value_json) {
- content.push(StringToJSON_(key) + ':' + property_value_json);
- }
- }
- }
-
- // Make JSON object representation.
- return '{' + content.join(',') + '}';
-}
-
-
-/**
- * Convert an array to its JSON representation. This is a VERY simple
- * implementation just to support what is needed for the debugger.
- * @param {Array} arrya The array to format as JSON
- * @return {string} JSON formatted array value
- */
-function SimpleArrayToJSON_(array) {
- // Make JSON array representation.
- var json = '[';
- for (var i = 0; i < array.length; i++) {
- if (i != 0) {
- json += ',';
- }
- var elem = array[i];
- if (elem.toJSONProtocol) {
- json += elem.toJSONProtocol(true)
- } else if (typeof(elem) === 'object') {
- json += SimpleObjectToJSON_(elem);
- } else if (typeof(elem) === 'boolean') {
- json += BooleanToJSON_(elem);
- } else if (typeof(elem) === 'number') {
- json += NumberToJSON_(elem);
- } else if (typeof(elem) === 'string') {
- json += StringToJSON_(elem);
- } else {
- json += elem;
- }
- }
- json += ']';
- return json;
-}
diff --git a/src/3rdparty/v8/src/data-flow.cc b/src/3rdparty/v8/src/data-flow.cc
deleted file mode 100644
index 9c02ff4..0000000
--- a/src/3rdparty/v8/src/data-flow.cc
+++ /dev/null
@@ -1,545 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "data-flow.h"
-#include "scopes.h"
-
-namespace v8 {
-namespace internal {
-
-#ifdef DEBUG
-void BitVector::Print() {
- bool first = true;
- PrintF("{");
- for (int i = 0; i < length(); i++) {
- if (Contains(i)) {
- if (!first) PrintF(",");
- first = false;
- PrintF("%d", i);
- }
- }
- PrintF("}");
-}
-#endif
-
-
-void BitVector::Iterator::Advance() {
- current_++;
- uint32_t val = current_value_;
- while (val == 0) {
- current_index_++;
- if (Done()) return;
- val = target_->data_[current_index_];
- current_ = current_index_ << 5;
- }
- val = SkipZeroBytes(val);
- val = SkipZeroBits(val);
- current_value_ = val >> 1;
-}
-
-
-bool AssignedVariablesAnalyzer::Analyze(CompilationInfo* info) {
- Scope* scope = info->scope();
- int size = scope->num_parameters() + scope->num_stack_slots();
- if (size == 0) return true;
- AssignedVariablesAnalyzer analyzer(info, size);
- return analyzer.Analyze();
-}
-
-
-AssignedVariablesAnalyzer::AssignedVariablesAnalyzer(CompilationInfo* info,
- int size)
- : info_(info), av_(size) {
-}
-
-
-bool AssignedVariablesAnalyzer::Analyze() {
- ASSERT(av_.length() > 0);
- VisitStatements(info_->function()->body());
- return !HasStackOverflow();
-}
-
-
-Variable* AssignedVariablesAnalyzer::FindSmiLoopVariable(ForStatement* stmt) {
- // The loop must have all necessary parts.
- if (stmt->init() == NULL || stmt->cond() == NULL || stmt->next() == NULL) {
- return NULL;
- }
- // The initialization statement has to be a simple assignment.
- Assignment* init = stmt->init()->StatementAsSimpleAssignment();
- if (init == NULL) return NULL;
-
- // We only deal with local variables.
- Variable* loop_var = init->target()->AsVariableProxy()->AsVariable();
- if (loop_var == NULL || !loop_var->IsStackAllocated()) return NULL;
-
- // Don't try to get clever with const or dynamic variables.
- if (loop_var->mode() != Variable::VAR) return NULL;
-
- // The initial value has to be a smi.
- Literal* init_lit = init->value()->AsLiteral();
- if (init_lit == NULL || !init_lit->handle()->IsSmi()) return NULL;
- int init_value = Smi::cast(*init_lit->handle())->value();
-
- // The condition must be a compare of variable with <, <=, >, or >=.
- CompareOperation* cond = stmt->cond()->AsCompareOperation();
- if (cond == NULL) return NULL;
- if (cond->op() != Token::LT
- && cond->op() != Token::LTE
- && cond->op() != Token::GT
- && cond->op() != Token::GTE) return NULL;
-
- // The lhs must be the same variable as in the init expression.
- if (cond->left()->AsVariableProxy()->AsVariable() != loop_var) return NULL;
-
- // The rhs must be a smi.
- Literal* term_lit = cond->right()->AsLiteral();
- if (term_lit == NULL || !term_lit->handle()->IsSmi()) return NULL;
- int term_value = Smi::cast(*term_lit->handle())->value();
-
- // The count operation updates the same variable as in the init expression.
- CountOperation* update = stmt->next()->StatementAsCountOperation();
- if (update == NULL) return NULL;
- if (update->expression()->AsVariableProxy()->AsVariable() != loop_var) {
- return NULL;
- }
-
- // The direction of the count operation must agree with the start and the end
- // value. We currently do not allow the initial value to be the same as the
- // terminal value. This _would_ be ok as long as the loop body never executes
- // or executes exactly one time.
- if (init_value == term_value) return NULL;
- if (init_value < term_value && update->op() != Token::INC) return NULL;
- if (init_value > term_value && update->op() != Token::DEC) return NULL;
-
- // Check that the update operation cannot overflow the smi range. This can
- // occur in the two cases where the loop bound is equal to the largest or
- // smallest smi.
- if (update->op() == Token::INC && term_value == Smi::kMaxValue) return NULL;
- if (update->op() == Token::DEC && term_value == Smi::kMinValue) return NULL;
-
- // Found a smi loop variable.
- return loop_var;
-}
-
-int AssignedVariablesAnalyzer::BitIndex(Variable* var) {
- ASSERT(var != NULL);
- ASSERT(var->IsStackAllocated());
- Slot* slot = var->AsSlot();
- if (slot->type() == Slot::PARAMETER) {
- return slot->index();
- } else {
- return info_->scope()->num_parameters() + slot->index();
- }
-}
-
-
-void AssignedVariablesAnalyzer::RecordAssignedVar(Variable* var) {
- ASSERT(var != NULL);
- if (var->IsStackAllocated()) {
- av_.Add(BitIndex(var));
- }
-}
-
-
-void AssignedVariablesAnalyzer::MarkIfTrivial(Expression* expr) {
- Variable* var = expr->AsVariableProxy()->AsVariable();
- if (var != NULL &&
- var->IsStackAllocated() &&
- !var->is_arguments() &&
- var->mode() != Variable::CONST &&
- (var->is_this() || !av_.Contains(BitIndex(var)))) {
- expr->AsVariableProxy()->MarkAsTrivial();
- }
-}
-
-
-void AssignedVariablesAnalyzer::ProcessExpression(Expression* expr) {
- BitVector saved_av(av_);
- av_.Clear();
- Visit(expr);
- av_.Union(saved_av);
-}
-
-void AssignedVariablesAnalyzer::VisitBlock(Block* stmt) {
- VisitStatements(stmt->statements());
-}
-
-
-void AssignedVariablesAnalyzer::VisitExpressionStatement(
- ExpressionStatement* stmt) {
- ProcessExpression(stmt->expression());
-}
-
-
-void AssignedVariablesAnalyzer::VisitEmptyStatement(EmptyStatement* stmt) {
- // Do nothing.
-}
-
-
-void AssignedVariablesAnalyzer::VisitIfStatement(IfStatement* stmt) {
- ProcessExpression(stmt->condition());
- Visit(stmt->then_statement());
- Visit(stmt->else_statement());
-}
-
-
-void AssignedVariablesAnalyzer::VisitContinueStatement(
- ContinueStatement* stmt) {
- // Nothing to do.
-}
-
-
-void AssignedVariablesAnalyzer::VisitBreakStatement(BreakStatement* stmt) {
- // Nothing to do.
-}
-
-
-void AssignedVariablesAnalyzer::VisitReturnStatement(ReturnStatement* stmt) {
- ProcessExpression(stmt->expression());
-}
-
-
-void AssignedVariablesAnalyzer::VisitWithEnterStatement(
- WithEnterStatement* stmt) {
- ProcessExpression(stmt->expression());
-}
-
-
-void AssignedVariablesAnalyzer::VisitWithExitStatement(
- WithExitStatement* stmt) {
- // Nothing to do.
-}
-
-
-void AssignedVariablesAnalyzer::VisitSwitchStatement(SwitchStatement* stmt) {
- BitVector result(av_);
- av_.Clear();
- Visit(stmt->tag());
- result.Union(av_);
- for (int i = 0; i < stmt->cases()->length(); i++) {
- CaseClause* clause = stmt->cases()->at(i);
- if (!clause->is_default()) {
- av_.Clear();
- Visit(clause->label());
- result.Union(av_);
- }
- VisitStatements(clause->statements());
- }
- av_.Union(result);
-}
-
-
-void AssignedVariablesAnalyzer::VisitDoWhileStatement(DoWhileStatement* stmt) {
- ProcessExpression(stmt->cond());
- Visit(stmt->body());
-}
-
-
-void AssignedVariablesAnalyzer::VisitWhileStatement(WhileStatement* stmt) {
- ProcessExpression(stmt->cond());
- Visit(stmt->body());
-}
-
-
-void AssignedVariablesAnalyzer::VisitForStatement(ForStatement* stmt) {
- if (stmt->init() != NULL) Visit(stmt->init());
- if (stmt->cond() != NULL) ProcessExpression(stmt->cond());
- if (stmt->next() != NULL) Visit(stmt->next());
-
- // Process loop body. After visiting the loop body av_ contains
- // the assigned variables of the loop body.
- BitVector saved_av(av_);
- av_.Clear();
- Visit(stmt->body());
-
- Variable* var = FindSmiLoopVariable(stmt);
- if (var != NULL && !av_.Contains(BitIndex(var))) {
- stmt->set_loop_variable(var);
- }
- av_.Union(saved_av);
-}
-
-
-void AssignedVariablesAnalyzer::VisitForInStatement(ForInStatement* stmt) {
- ProcessExpression(stmt->each());
- ProcessExpression(stmt->enumerable());
- Visit(stmt->body());
-}
-
-
-void AssignedVariablesAnalyzer::VisitTryCatchStatement(
- TryCatchStatement* stmt) {
- Visit(stmt->try_block());
- Visit(stmt->catch_block());
-}
-
-
-void AssignedVariablesAnalyzer::VisitTryFinallyStatement(
- TryFinallyStatement* stmt) {
- Visit(stmt->try_block());
- Visit(stmt->finally_block());
-}
-
-
-void AssignedVariablesAnalyzer::VisitDebuggerStatement(
- DebuggerStatement* stmt) {
- // Nothing to do.
-}
-
-
-void AssignedVariablesAnalyzer::VisitFunctionLiteral(FunctionLiteral* expr) {
- // Nothing to do.
- ASSERT(av_.IsEmpty());
-}
-
-
-void AssignedVariablesAnalyzer::VisitSharedFunctionInfoLiteral(
- SharedFunctionInfoLiteral* expr) {
- // Nothing to do.
- ASSERT(av_.IsEmpty());
-}
-
-
-void AssignedVariablesAnalyzer::VisitConditional(Conditional* expr) {
- ASSERT(av_.IsEmpty());
-
- Visit(expr->condition());
-
- BitVector result(av_);
- av_.Clear();
- Visit(expr->then_expression());
- result.Union(av_);
-
- av_.Clear();
- Visit(expr->else_expression());
- av_.Union(result);
-}
-
-
-void AssignedVariablesAnalyzer::VisitVariableProxy(VariableProxy* expr) {
- // Nothing to do.
- ASSERT(av_.IsEmpty());
-}
-
-
-void AssignedVariablesAnalyzer::VisitLiteral(Literal* expr) {
- // Nothing to do.
- ASSERT(av_.IsEmpty());
-}
-
-
-void AssignedVariablesAnalyzer::VisitRegExpLiteral(RegExpLiteral* expr) {
- // Nothing to do.
- ASSERT(av_.IsEmpty());
-}
-
-
-void AssignedVariablesAnalyzer::VisitObjectLiteral(ObjectLiteral* expr) {
- ASSERT(av_.IsEmpty());
- BitVector result(av_.length());
- for (int i = 0; i < expr->properties()->length(); i++) {
- Visit(expr->properties()->at(i)->value());
- result.Union(av_);
- av_.Clear();
- }
- av_ = result;
-}
-
-
-void AssignedVariablesAnalyzer::VisitArrayLiteral(ArrayLiteral* expr) {
- ASSERT(av_.IsEmpty());
- BitVector result(av_.length());
- for (int i = 0; i < expr->values()->length(); i++) {
- Visit(expr->values()->at(i));
- result.Union(av_);
- av_.Clear();
- }
- av_ = result;
-}
-
-
-void AssignedVariablesAnalyzer::VisitCatchExtensionObject(
- CatchExtensionObject* expr) {
- ASSERT(av_.IsEmpty());
- Visit(expr->key());
- ProcessExpression(expr->value());
-}
-
-
-void AssignedVariablesAnalyzer::VisitAssignment(Assignment* expr) {
- ASSERT(av_.IsEmpty());
-
- // There are three kinds of assignments: variable assignments, property
- // assignments, and reference errors (invalid left-hand sides).
- Variable* var = expr->target()->AsVariableProxy()->AsVariable();
- Property* prop = expr->target()->AsProperty();
- ASSERT(var == NULL || prop == NULL);
-
- if (var != NULL) {
- MarkIfTrivial(expr->value());
- Visit(expr->value());
- if (expr->is_compound()) {
- // Left-hand side occurs also as an rvalue.
- MarkIfTrivial(expr->target());
- ProcessExpression(expr->target());
- }
- RecordAssignedVar(var);
-
- } else if (prop != NULL) {
- MarkIfTrivial(expr->value());
- Visit(expr->value());
- if (!prop->key()->IsPropertyName()) {
- MarkIfTrivial(prop->key());
- ProcessExpression(prop->key());
- }
- MarkIfTrivial(prop->obj());
- ProcessExpression(prop->obj());
-
- } else {
- Visit(expr->target());
- }
-}
-
-
-void AssignedVariablesAnalyzer::VisitThrow(Throw* expr) {
- ASSERT(av_.IsEmpty());
- Visit(expr->exception());
-}
-
-
-void AssignedVariablesAnalyzer::VisitProperty(Property* expr) {
- ASSERT(av_.IsEmpty());
- if (!expr->key()->IsPropertyName()) {
- MarkIfTrivial(expr->key());
- Visit(expr->key());
- }
- MarkIfTrivial(expr->obj());
- ProcessExpression(expr->obj());
-}
-
-
-void AssignedVariablesAnalyzer::VisitCall(Call* expr) {
- ASSERT(av_.IsEmpty());
- Visit(expr->expression());
- BitVector result(av_);
- for (int i = 0; i < expr->arguments()->length(); i++) {
- av_.Clear();
- Visit(expr->arguments()->at(i));
- result.Union(av_);
- }
- av_ = result;
-}
-
-
-void AssignedVariablesAnalyzer::VisitCallNew(CallNew* expr) {
- ASSERT(av_.IsEmpty());
- Visit(expr->expression());
- BitVector result(av_);
- for (int i = 0; i < expr->arguments()->length(); i++) {
- av_.Clear();
- Visit(expr->arguments()->at(i));
- result.Union(av_);
- }
- av_ = result;
-}
-
-
-void AssignedVariablesAnalyzer::VisitCallRuntime(CallRuntime* expr) {
- ASSERT(av_.IsEmpty());
- BitVector result(av_);
- for (int i = 0; i < expr->arguments()->length(); i++) {
- av_.Clear();
- Visit(expr->arguments()->at(i));
- result.Union(av_);
- }
- av_ = result;
-}
-
-
-void AssignedVariablesAnalyzer::VisitUnaryOperation(UnaryOperation* expr) {
- ASSERT(av_.IsEmpty());
- MarkIfTrivial(expr->expression());
- Visit(expr->expression());
-}
-
-
-void AssignedVariablesAnalyzer::VisitIncrementOperation(
- IncrementOperation* expr) {
- UNREACHABLE();
-}
-
-
-void AssignedVariablesAnalyzer::VisitCountOperation(CountOperation* expr) {
- ASSERT(av_.IsEmpty());
- if (expr->is_prefix()) MarkIfTrivial(expr->expression());
- Visit(expr->expression());
-
- Variable* var = expr->expression()->AsVariableProxy()->AsVariable();
- if (var != NULL) RecordAssignedVar(var);
-}
-
-
-void AssignedVariablesAnalyzer::VisitBinaryOperation(BinaryOperation* expr) {
- ASSERT(av_.IsEmpty());
- MarkIfTrivial(expr->right());
- Visit(expr->right());
- MarkIfTrivial(expr->left());
- ProcessExpression(expr->left());
-}
-
-
-void AssignedVariablesAnalyzer::VisitCompareOperation(CompareOperation* expr) {
- ASSERT(av_.IsEmpty());
- MarkIfTrivial(expr->right());
- Visit(expr->right());
- MarkIfTrivial(expr->left());
- ProcessExpression(expr->left());
-}
-
-
-void AssignedVariablesAnalyzer::VisitCompareToNull(CompareToNull* expr) {
- ASSERT(av_.IsEmpty());
- MarkIfTrivial(expr->expression());
- Visit(expr->expression());
-}
-
-
-void AssignedVariablesAnalyzer::VisitThisFunction(ThisFunction* expr) {
- // Nothing to do.
- ASSERT(av_.IsEmpty());
-}
-
-
-void AssignedVariablesAnalyzer::VisitDeclaration(Declaration* decl) {
- UNREACHABLE();
-}
-
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/data-flow.h b/src/3rdparty/v8/src/data-flow.h
deleted file mode 100644
index 573d7d8..0000000
--- a/src/3rdparty/v8/src/data-flow.h
+++ /dev/null
@@ -1,379 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_DATAFLOW_H_
-#define V8_DATAFLOW_H_
-
-#include "v8.h"
-
-#include "ast.h"
-#include "compiler.h"
-#include "zone-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// Forward declarations.
-class Node;
-
-class BitVector: public ZoneObject {
- public:
- // Iterator for the elements of this BitVector.
- class Iterator BASE_EMBEDDED {
- public:
- explicit Iterator(BitVector* target)
- : target_(target),
- current_index_(0),
- current_value_(target->data_[0]),
- current_(-1) {
- ASSERT(target->data_length_ > 0);
- Advance();
- }
- ~Iterator() { }
-
- bool Done() const { return current_index_ >= target_->data_length_; }
- void Advance();
-
- int Current() const {
- ASSERT(!Done());
- return current_;
- }
-
- private:
- uint32_t SkipZeroBytes(uint32_t val) {
- while ((val & 0xFF) == 0) {
- val >>= 8;
- current_ += 8;
- }
- return val;
- }
- uint32_t SkipZeroBits(uint32_t val) {
- while ((val & 0x1) == 0) {
- val >>= 1;
- current_++;
- }
- return val;
- }
-
- BitVector* target_;
- int current_index_;
- uint32_t current_value_;
- int current_;
-
- friend class BitVector;
- };
-
- explicit BitVector(int length)
- : length_(length),
- data_length_(SizeFor(length)),
- data_(ZONE->NewArray<uint32_t>(data_length_)) {
- ASSERT(length > 0);
- Clear();
- }
-
- BitVector(const BitVector& other)
- : length_(other.length()),
- data_length_(SizeFor(length_)),
- data_(ZONE->NewArray<uint32_t>(data_length_)) {
- CopyFrom(other);
- }
-
- static int SizeFor(int length) {
- return 1 + ((length - 1) / 32);
- }
-
- BitVector& operator=(const BitVector& rhs) {
- if (this != &rhs) CopyFrom(rhs);
- return *this;
- }
-
- void CopyFrom(const BitVector& other) {
- ASSERT(other.length() <= length());
- for (int i = 0; i < other.data_length_; i++) {
- data_[i] = other.data_[i];
- }
- for (int i = other.data_length_; i < data_length_; i++) {
- data_[i] = 0;
- }
- }
-
- bool Contains(int i) const {
- ASSERT(i >= 0 && i < length());
- uint32_t block = data_[i / 32];
- return (block & (1U << (i % 32))) != 0;
- }
-
- void Add(int i) {
- ASSERT(i >= 0 && i < length());
- data_[i / 32] |= (1U << (i % 32));
- }
-
- void Remove(int i) {
- ASSERT(i >= 0 && i < length());
- data_[i / 32] &= ~(1U << (i % 32));
- }
-
- void Union(const BitVector& other) {
- ASSERT(other.length() == length());
- for (int i = 0; i < data_length_; i++) {
- data_[i] |= other.data_[i];
- }
- }
-
- bool UnionIsChanged(const BitVector& other) {
- ASSERT(other.length() == length());
- bool changed = false;
- for (int i = 0; i < data_length_; i++) {
- uint32_t old_data = data_[i];
- data_[i] |= other.data_[i];
- if (data_[i] != old_data) changed = true;
- }
- return changed;
- }
-
- void Intersect(const BitVector& other) {
- ASSERT(other.length() == length());
- for (int i = 0; i < data_length_; i++) {
- data_[i] &= other.data_[i];
- }
- }
-
- void Subtract(const BitVector& other) {
- ASSERT(other.length() == length());
- for (int i = 0; i < data_length_; i++) {
- data_[i] &= ~other.data_[i];
- }
- }
-
- void Clear() {
- for (int i = 0; i < data_length_; i++) {
- data_[i] = 0;
- }
- }
-
- bool IsEmpty() const {
- for (int i = 0; i < data_length_; i++) {
- if (data_[i] != 0) return false;
- }
- return true;
- }
-
- bool Equals(const BitVector& other) {
- for (int i = 0; i < data_length_; i++) {
- if (data_[i] != other.data_[i]) return false;
- }
- return true;
- }
-
- int length() const { return length_; }
-
-#ifdef DEBUG
- void Print();
-#endif
-
- private:
- int length_;
- int data_length_;
- uint32_t* data_;
-};
-
-
-// An implementation of a sparse set whose elements are drawn from integers
-// in the range [0..universe_size[. It supports constant-time Contains,
-// destructive Add, and destructuve Remove operations and linear-time (in
-// the number of elements) destructive Union.
-class SparseSet: public ZoneObject {
- public:
- // Iterator for sparse set elements. Elements should not be added or
- // removed during iteration.
- class Iterator BASE_EMBEDDED {
- public:
- explicit Iterator(SparseSet* target) : target_(target), current_(0) {
- ASSERT(++target->iterator_count_ > 0);
- }
- ~Iterator() {
- ASSERT(target_->iterator_count_-- > 0);
- }
- bool Done() const { return current_ >= target_->dense_.length(); }
- void Advance() {
- ASSERT(!Done());
- ++current_;
- }
- int Current() {
- ASSERT(!Done());
- return target_->dense_[current_];
- }
-
- private:
- SparseSet* target_;
- int current_;
-
- friend class SparseSet;
- };
-
- explicit SparseSet(int universe_size)
- : dense_(4),
- sparse_(ZONE->NewArray<int>(universe_size)) {
-#ifdef DEBUG
- size_ = universe_size;
- iterator_count_ = 0;
-#endif
- }
-
- bool Contains(int n) const {
- ASSERT(0 <= n && n < size_);
- int dense_index = sparse_[n];
- return (0 <= dense_index) &&
- (dense_index < dense_.length()) &&
- (dense_[dense_index] == n);
- }
-
- void Add(int n) {
- ASSERT(0 <= n && n < size_);
- ASSERT(iterator_count_ == 0);
- if (!Contains(n)) {
- sparse_[n] = dense_.length();
- dense_.Add(n);
- }
- }
-
- void Remove(int n) {
- ASSERT(0 <= n && n < size_);
- ASSERT(iterator_count_ == 0);
- if (Contains(n)) {
- int dense_index = sparse_[n];
- int last = dense_.RemoveLast();
- if (dense_index < dense_.length()) {
- dense_[dense_index] = last;
- sparse_[last] = dense_index;
- }
- }
- }
-
- void Union(const SparseSet& other) {
- for (int i = 0; i < other.dense_.length(); ++i) {
- Add(other.dense_[i]);
- }
- }
-
- private:
- // The set is implemented as a pair of a growable dense list and an
- // uninitialized sparse array.
- ZoneList<int> dense_;
- int* sparse_;
-#ifdef DEBUG
- int size_;
- int iterator_count_;
-#endif
-};
-
-
-// Simple fixed-capacity list-based worklist (managed as a queue) of
-// pointers to T.
-template<typename T>
-class WorkList BASE_EMBEDDED {
- public:
- // The worklist cannot grow bigger than size. We keep one item empty to
- // distinguish between empty and full.
- explicit WorkList(int size)
- : capacity_(size + 1), head_(0), tail_(0), queue_(capacity_) {
- for (int i = 0; i < capacity_; i++) queue_.Add(NULL);
- }
-
- bool is_empty() { return head_ == tail_; }
-
- bool is_full() {
- // The worklist is full if head is at 0 and tail is at capacity - 1:
- // head == 0 && tail == capacity-1 ==> tail - head == capacity - 1
- // or if tail is immediately to the left of head:
- // tail+1 == head ==> tail - head == -1
- int diff = tail_ - head_;
- return (diff == -1 || diff == capacity_ - 1);
- }
-
- void Insert(T* item) {
- ASSERT(!is_full());
- queue_[tail_++] = item;
- if (tail_ == capacity_) tail_ = 0;
- }
-
- T* Remove() {
- ASSERT(!is_empty());
- T* item = queue_[head_++];
- if (head_ == capacity_) head_ = 0;
- return item;
- }
-
- private:
- int capacity_; // Including one empty slot.
- int head_; // Where the first item is.
- int tail_; // Where the next inserted item will go.
- List<T*> queue_;
-};
-
-
-// Computes the set of assigned variables and annotates variables proxies
-// that are trivial sub-expressions and for-loops where the loop variable
-// is guaranteed to be a smi.
-class AssignedVariablesAnalyzer : public AstVisitor {
- public:
- static bool Analyze(CompilationInfo* info);
-
- private:
- AssignedVariablesAnalyzer(CompilationInfo* info, int bits);
- bool Analyze();
-
- Variable* FindSmiLoopVariable(ForStatement* stmt);
-
- int BitIndex(Variable* var);
-
- void RecordAssignedVar(Variable* var);
-
- void MarkIfTrivial(Expression* expr);
-
- // Visits an expression saving the accumulator before, clearing
- // it before visting and restoring it after visiting.
- void ProcessExpression(Expression* expr);
-
- // AST node visit functions.
-#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
- AST_NODE_LIST(DECLARE_VISIT)
-#undef DECLARE_VISIT
-
- CompilationInfo* info_;
-
- // Accumulator for assigned variables set.
- BitVector av_;
-
- DISALLOW_COPY_AND_ASSIGN(AssignedVariablesAnalyzer);
-};
-
-
-} } // namespace v8::internal
-
-
-#endif // V8_DATAFLOW_H_
diff --git a/src/3rdparty/v8/src/date.js b/src/3rdparty/v8/src/date.js
deleted file mode 100644
index 242ab7b..0000000
--- a/src/3rdparty/v8/src/date.js
+++ /dev/null
@@ -1,1103 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-// This file relies on the fact that the following declarations have been made
-// in v8natives.js:
-// const $isFinite = GlobalIsFinite;
-
-// -------------------------------------------------------------------
-
-// This file contains date support implemented in JavaScript.
-
-
-// Keep reference to original values of some global properties. This
-// has the added benefit that the code in this file is isolated from
-// changes to these properties.
-const $Date = global.Date;
-
-// Helper function to throw error.
-function ThrowDateTypeError() {
- throw new $TypeError('this is not a Date object.');
-}
-
-// ECMA 262 - 5.2
-function Modulo(value, remainder) {
- var mod = value % remainder;
- // Guard against returning -0.
- if (mod == 0) return 0;
- return mod >= 0 ? mod : mod + remainder;
-}
-
-
-function TimeWithinDay(time) {
- return Modulo(time, msPerDay);
-}
-
-
-// ECMA 262 - 15.9.1.3
-function DaysInYear(year) {
- if (year % 4 != 0) return 365;
- if ((year % 100 == 0) && (year % 400 != 0)) return 365;
- return 366;
-}
-
-
-function DayFromYear(year) {
- return 365 * (year-1970)
- + FLOOR((year-1969)/4)
- - FLOOR((year-1901)/100)
- + FLOOR((year-1601)/400);
-}
-
-
-function TimeFromYear(year) {
- return msPerDay * DayFromYear(year);
-}
-
-
-function InLeapYear(time) {
- return DaysInYear(YearFromTime(time)) - 365; // Returns 1 or 0.
-}
-
-
-// ECMA 262 - 15.9.1.9
-function EquivalentYear(year) {
- // Returns an equivalent year in the range [2008-2035] matching
- // - leap year.
- // - week day of first day.
- var time = TimeFromYear(year);
- var recent_year = (InLeapYear(time) == 0 ? 1967 : 1956) +
- (WeekDay(time) * 12) % 28;
- // Find the year in the range 2008..2037 that is equivalent mod 28.
- // Add 3*28 to give a positive argument to the modulus operator.
- return 2008 + (recent_year + 3*28 - 2008) % 28;
-}
-
-
-function EquivalentTime(t) {
- // The issue here is that some library calls don't work right for dates
- // that cannot be represented using a non-negative signed 32 bit integer
- // (measured in whole seconds based on the 1970 epoch).
- // We solve this by mapping the time to a year with same leap-year-ness
- // and same starting day for the year. The ECMAscript specification says
- // we must do this, but for compatibility with other browsers, we use
- // the actual year if it is in the range 1970..2037
- if (t >= 0 && t <= 2.1e12) return t;
-
- var day = MakeDay(EquivalentYear(YearFromTime(t)),
- MonthFromTime(t),
- DateFromTime(t));
- return MakeDate(day, TimeWithinDay(t));
-}
-
-
-// local_time_offset is initialized when the DST_offset_cache is missed.
-// It must not be used until after a call to DaylightSavingsOffset().
-// In this way, only one check, for a DST cache miss, is needed.
-var local_time_offset;
-
-
-// Because computing the DST offset is an expensive operation,
-// we keep a cache of the last computed DST offset along with a time interval
-// where we know the cache is valid.
-// When the cache is valid, local_time_offset is also valid.
-var DST_offset_cache = {
- // Cached DST offset.
- offset: 0,
- // Time interval where the cached offset is valid.
- start: 0, end: -1,
- // Size of next interval expansion.
- increment: 0,
- initial_increment: 19 * msPerDay
-};
-
-
-// NOTE: The implementation relies on the fact that no time zones have
-// more than one daylight savings offset change per 19 days.
-//
-// In Egypt in 2010 they decided to suspend DST during Ramadan. This
-// led to a short interval where DST is in effect from September 10 to
-// September 30.
-//
-// If this function is called with NaN it returns NaN.
-function DaylightSavingsOffset(t) {
- // Load the cache object from the builtins object.
- var cache = DST_offset_cache;
-
- // Cache the start and the end in local variables for fast access.
- var start = cache.start;
- var end = cache.end;
-
- if (start <= t) {
- // If the time fits in the cached interval, return the cached offset.
- if (t <= end) return cache.offset;
-
- // If the cache misses, the local_time_offset may not be initialized.
- if (IS_UNDEFINED(local_time_offset)) {
- local_time_offset = %DateLocalTimeOffset();
- }
-
- // Compute a possible new interval end.
- var new_end = end + cache.increment;
-
- if (t <= new_end) {
- var end_offset = %DateDaylightSavingsOffset(EquivalentTime(new_end));
- if (cache.offset == end_offset) {
- // If the offset at the end of the new interval still matches
- // the offset in the cache, we grow the cached time interval
- // and return the offset.
- cache.end = new_end;
- cache.increment = cache.initial_increment;
- return end_offset;
- } else {
- var offset = %DateDaylightSavingsOffset(EquivalentTime(t));
- if (offset == end_offset) {
- // The offset at the given time is equal to the offset at the
- // new end of the interval, so that means that we've just skipped
- // the point in time where the DST offset change occurred. Updated
- // the interval to reflect this and reset the increment.
- cache.start = t;
- cache.end = new_end;
- cache.increment = cache.initial_increment;
- } else {
- // The interval contains a DST offset change and the given time is
- // before it. Adjust the increment to avoid a linear search for
- // the offset change point and change the end of the interval.
- cache.increment /= 3;
- cache.end = t;
- }
- // Update the offset in the cache and return it.
- cache.offset = offset;
- return offset;
- }
- }
- }
-
- // If the cache misses, the local_time_offset may not be initialized.
- if (IS_UNDEFINED(local_time_offset)) {
- local_time_offset = %DateLocalTimeOffset();
- }
- // Compute the DST offset for the time and shrink the cache interval
- // to only contain the time. This allows fast repeated DST offset
- // computations for the same time.
- var offset = %DateDaylightSavingsOffset(EquivalentTime(t));
- cache.offset = offset;
- cache.start = cache.end = t;
- cache.increment = cache.initial_increment;
- return offset;
-}
-
-
-var timezone_cache_time = $NaN;
-var timezone_cache_timezone;
-
-function LocalTimezone(t) {
- if (NUMBER_IS_NAN(t)) return "";
- if (t == timezone_cache_time) {
- return timezone_cache_timezone;
- }
- var timezone = %DateLocalTimezone(EquivalentTime(t));
- timezone_cache_time = t;
- timezone_cache_timezone = timezone;
- return timezone;
-}
-
-
-function WeekDay(time) {
- return Modulo(DAY(time) + 4, 7);
-}
-
-
-function LocalTime(time) {
- if (NUMBER_IS_NAN(time)) return time;
- // DaylightSavingsOffset called before local_time_offset used.
- return time + DaylightSavingsOffset(time) + local_time_offset;
-}
-
-
-var ltcache = {
- key: null,
- val: null
-};
-
-function LocalTimeNoCheck(time) {
- var ltc = ltcache;
- if (%_ObjectEquals(time, ltc.key)) return ltc.val;
-
- // Inline the DST offset cache checks for speed.
- // The cache is hit, or DaylightSavingsOffset is called,
- // before local_time_offset is used.
- var cache = DST_offset_cache;
- if (cache.start <= time && time <= cache.end) {
- var dst_offset = cache.offset;
- } else {
- var dst_offset = DaylightSavingsOffset(time);
- }
- ltc.key = time;
- return (ltc.val = time + local_time_offset + dst_offset);
-}
-
-
-function UTC(time) {
- if (NUMBER_IS_NAN(time)) return time;
- // local_time_offset is needed before the call to DaylightSavingsOffset,
- // so it may be uninitialized.
- if (IS_UNDEFINED(local_time_offset)) {
- local_time_offset = %DateLocalTimeOffset();
- }
- var tmp = time - local_time_offset;
- return tmp - DaylightSavingsOffset(tmp);
-}
-
-
-// ECMA 262 - 15.9.1.11
-function MakeTime(hour, min, sec, ms) {
- if (!$isFinite(hour)) return $NaN;
- if (!$isFinite(min)) return $NaN;
- if (!$isFinite(sec)) return $NaN;
- if (!$isFinite(ms)) return $NaN;
- return TO_INTEGER(hour) * msPerHour
- + TO_INTEGER(min) * msPerMinute
- + TO_INTEGER(sec) * msPerSecond
- + TO_INTEGER(ms);
-}
-
-
-// ECMA 262 - 15.9.1.12
-function TimeInYear(year) {
- return DaysInYear(year) * msPerDay;
-}
-
-
-var ymd_from_time_cache = [$NaN, $NaN, $NaN];
-var ymd_from_time_cached_time = $NaN;
-
-function YearFromTime(t) {
- if (t !== ymd_from_time_cached_time) {
- if (!$isFinite(t)) {
- return $NaN;
- }
-
- %DateYMDFromTime(t, ymd_from_time_cache);
- ymd_from_time_cached_time = t
- }
-
- return ymd_from_time_cache[0];
-}
-
-function MonthFromTime(t) {
- if (t !== ymd_from_time_cached_time) {
- if (!$isFinite(t)) {
- return $NaN;
- }
- %DateYMDFromTime(t, ymd_from_time_cache);
- ymd_from_time_cached_time = t
- }
-
- return ymd_from_time_cache[1];
-}
-
-function DateFromTime(t) {
- if (t !== ymd_from_time_cached_time) {
- if (!$isFinite(t)) {
- return $NaN;
- }
-
- %DateYMDFromTime(t, ymd_from_time_cache);
- ymd_from_time_cached_time = t
- }
-
- return ymd_from_time_cache[2];
-}
-
-
-// Compute number of days given a year, month, date.
-// Note that month and date can lie outside the normal range.
-// For example:
-// MakeDay(2007, -4, 20) --> MakeDay(2006, 8, 20)
-// MakeDay(2007, -33, 1) --> MakeDay(2004, 3, 1)
-// MakeDay(2007, 14, -50) --> MakeDay(2007, 8, 11)
-function MakeDay(year, month, date) {
- if (!$isFinite(year) || !$isFinite(month) || !$isFinite(date)) return $NaN;
-
- // Convert to integer and map -0 to 0.
- year = TO_INTEGER_MAP_MINUS_ZERO(year);
- month = TO_INTEGER_MAP_MINUS_ZERO(month);
- date = TO_INTEGER_MAP_MINUS_ZERO(date);
-
- if (year < kMinYear || year > kMaxYear ||
- month < kMinMonth || month > kMaxMonth ||
- date < kMinDate || date > kMaxDate) {
- return $NaN;
- }
-
- // Now we rely on year, month and date being SMIs.
- return %DateMakeDay(year, month, date);
-}
-
-
-// ECMA 262 - 15.9.1.13
-function MakeDate(day, time) {
- var time = day * msPerDay + time;
- // Some of our runtime funtions for computing UTC(time) rely on
- // times not being significantly larger than MAX_TIME_MS. If there
- // is no way that the time can be within range even after UTC
- // conversion we return NaN immediately instead of relying on
- // TimeClip to do it.
- if ($abs(time) > MAX_TIME_BEFORE_UTC) return $NaN;
- return time;
-}
-
-
-// ECMA 262 - 15.9.1.14
-function TimeClip(time) {
- if (!$isFinite(time)) return $NaN;
- if ($abs(time) > MAX_TIME_MS) return $NaN;
- return TO_INTEGER(time);
-}
-
-
-// The Date cache is used to limit the cost of parsing the same Date
-// strings over and over again.
-var Date_cache = {
- // Cached time value.
- time: $NaN,
- // Cached year when interpreting the time as a local time. Only
- // valid when the time matches cached time.
- year: $NaN,
- // String input for which the cached time is valid.
- string: null
-};
-
-
-%SetCode($Date, function(year, month, date, hours, minutes, seconds, ms) {
- if (!%_IsConstructCall()) {
- // ECMA 262 - 15.9.2
- return (new $Date()).toString();
- }
-
- // ECMA 262 - 15.9.3
- var argc = %_ArgumentsLength();
- var value;
- if (argc == 0) {
- value = %DateCurrentTime();
-
- } else if (argc == 1) {
- if (IS_NUMBER(year)) {
- value = TimeClip(year);
-
- } else if (IS_STRING(year)) {
- // Probe the Date cache. If we already have a time value for the
- // given time, we re-use that instead of parsing the string again.
- var cache = Date_cache;
- if (cache.string === year) {
- value = cache.time;
- } else {
- value = DateParse(year);
- if (!NUMBER_IS_NAN(value)) {
- cache.time = value;
- cache.year = YearFromTime(LocalTimeNoCheck(value));
- cache.string = year;
- }
- }
-
- } else {
- // According to ECMA 262, no hint should be given for this
- // conversion. However, ToPrimitive defaults to STRING_HINT for
- // Date objects which will lose precision when the Date
- // constructor is called with another Date object as its
- // argument. We therefore use NUMBER_HINT for the conversion,
- // which is the default for everything else than Date objects.
- // This makes us behave like KJS and SpiderMonkey.
- var time = ToPrimitive(year, NUMBER_HINT);
- value = IS_STRING(time) ? DateParse(time) : TimeClip(ToNumber(time));
- }
-
- } else {
- year = ToNumber(year);
- month = ToNumber(month);
- date = argc > 2 ? ToNumber(date) : 1;
- hours = argc > 3 ? ToNumber(hours) : 0;
- minutes = argc > 4 ? ToNumber(minutes) : 0;
- seconds = argc > 5 ? ToNumber(seconds) : 0;
- ms = argc > 6 ? ToNumber(ms) : 0;
- year = (!NUMBER_IS_NAN(year) && 0 <= TO_INTEGER(year) && TO_INTEGER(year) <= 99)
- ? 1900 + TO_INTEGER(year) : year;
- var day = MakeDay(year, month, date);
- var time = MakeTime(hours, minutes, seconds, ms);
- value = TimeClip(UTC(MakeDate(day, time)));
- }
- %_SetValueOf(this, value);
-});
-
-
-%FunctionSetPrototype($Date, new $Date($NaN));
-
-
-var WeekDays = ['Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat'];
-var Months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'];
-
-
-function TwoDigitString(value) {
- return value < 10 ? "0" + value : "" + value;
-}
-
-
-function DateString(time) {
- return WeekDays[WeekDay(time)] + ' '
- + Months[MonthFromTime(time)] + ' '
- + TwoDigitString(DateFromTime(time)) + ' '
- + YearFromTime(time);
-}
-
-
-var LongWeekDays = ['Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday'];
-var LongMonths = ['January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December'];
-
-
-function LongDateString(time) {
- return LongWeekDays[WeekDay(time)] + ', '
- + LongMonths[MonthFromTime(time)] + ' '
- + TwoDigitString(DateFromTime(time)) + ', '
- + YearFromTime(time);
-}
-
-
-function TimeString(time) {
- return TwoDigitString(HOUR_FROM_TIME(time)) + ':'
- + TwoDigitString(MIN_FROM_TIME(time)) + ':'
- + TwoDigitString(SEC_FROM_TIME(time));
-}
-
-
-function LocalTimezoneString(time) {
- var old_timezone = timezone_cache_timezone;
- var timezone = LocalTimezone(time);
- if (old_timezone && timezone != old_timezone) {
- // If the timezone string has changed from the one that we cached,
- // the local time offset may now be wrong. So we need to update it
- // and try again.
- local_time_offset = %DateLocalTimeOffset();
- // We also need to invalidate the DST cache as the new timezone may have
- // different DST times.
- var dst_cache = DST_offset_cache;
- dst_cache.start = 0;
- dst_cache.end = -1;
- }
-
- var timezoneOffset =
- (DaylightSavingsOffset(time) + local_time_offset) / msPerMinute;
- var sign = (timezoneOffset >= 0) ? 1 : -1;
- var hours = FLOOR((sign * timezoneOffset)/60);
- var min = FLOOR((sign * timezoneOffset)%60);
- var gmt = ' GMT' + ((sign == 1) ? '+' : '-') +
- TwoDigitString(hours) + TwoDigitString(min);
- return gmt + ' (' + timezone + ')';
-}
-
-
-function DatePrintString(time) {
- return DateString(time) + ' ' + TimeString(time);
-}
-
-// -------------------------------------------------------------------
-
-// Reused output buffer. Used when parsing date strings.
-var parse_buffer = $Array(8);
-
-// ECMA 262 - 15.9.4.2
-function DateParse(string) {
- var arr = %DateParseString(ToString(string), parse_buffer);
- if (IS_NULL(arr)) return $NaN;
-
- var day = MakeDay(arr[0], arr[1], arr[2]);
- var time = MakeTime(arr[3], arr[4], arr[5], arr[6]);
- var date = MakeDate(day, time);
-
- if (IS_NULL(arr[7])) {
- return TimeClip(UTC(date));
- } else {
- return TimeClip(date - arr[7] * 1000);
- }
-}
-
-
-// ECMA 262 - 15.9.4.3
-function DateUTC(year, month, date, hours, minutes, seconds, ms) {
- year = ToNumber(year);
- month = ToNumber(month);
- var argc = %_ArgumentsLength();
- date = argc > 2 ? ToNumber(date) : 1;
- hours = argc > 3 ? ToNumber(hours) : 0;
- minutes = argc > 4 ? ToNumber(minutes) : 0;
- seconds = argc > 5 ? ToNumber(seconds) : 0;
- ms = argc > 6 ? ToNumber(ms) : 0;
- year = (!NUMBER_IS_NAN(year) && 0 <= TO_INTEGER(year) && TO_INTEGER(year) <= 99)
- ? 1900 + TO_INTEGER(year) : year;
- var day = MakeDay(year, month, date);
- var time = MakeTime(hours, minutes, seconds, ms);
- return %_SetValueOf(this, TimeClip(MakeDate(day, time)));
-}
-
-
-// Mozilla-specific extension. Returns the number of milliseconds
-// elapsed since 1 January 1970 00:00:00 UTC.
-function DateNow() {
- return %DateCurrentTime();
-}
-
-
-// ECMA 262 - 15.9.5.2
-function DateToString() {
- var t = DATE_VALUE(this);
- if (NUMBER_IS_NAN(t)) return kInvalidDate;
- var time_zone_string = LocalTimezoneString(t); // May update local offset.
- return DatePrintString(LocalTimeNoCheck(t)) + time_zone_string;
-}
-
-
-// ECMA 262 - 15.9.5.3
-function DateToDateString() {
- var t = DATE_VALUE(this);
- if (NUMBER_IS_NAN(t)) return kInvalidDate;
- return DateString(LocalTimeNoCheck(t));
-}
-
-
-// ECMA 262 - 15.9.5.4
-function DateToTimeString() {
- var t = DATE_VALUE(this);
- if (NUMBER_IS_NAN(t)) return kInvalidDate;
- var time_zone_string = LocalTimezoneString(t); // May update local offset.
- return TimeString(LocalTimeNoCheck(t)) + time_zone_string;
-}
-
-
-// ECMA 262 - 15.9.5.5
-function DateToLocaleString() {
- return %_CallFunction(this, DateToString);
-}
-
-
-// ECMA 262 - 15.9.5.6
-function DateToLocaleDateString() {
- var t = DATE_VALUE(this);
- if (NUMBER_IS_NAN(t)) return kInvalidDate;
- return LongDateString(LocalTimeNoCheck(t));
-}
-
-
-// ECMA 262 - 15.9.5.7
-function DateToLocaleTimeString() {
- var t = DATE_VALUE(this);
- if (NUMBER_IS_NAN(t)) return kInvalidDate;
- var lt = LocalTimeNoCheck(t);
- return TimeString(lt);
-}
-
-
-// ECMA 262 - 15.9.5.8
-function DateValueOf() {
- return DATE_VALUE(this);
-}
-
-
-// ECMA 262 - 15.9.5.9
-function DateGetTime() {
- return DATE_VALUE(this);
-}
-
-
-// ECMA 262 - 15.9.5.10
-function DateGetFullYear() {
- var t = DATE_VALUE(this);
- if (NUMBER_IS_NAN(t)) return t;
- var cache = Date_cache;
- if (cache.time === t) return cache.year;
- return YearFromTime(LocalTimeNoCheck(t));
-}
-
-
-// ECMA 262 - 15.9.5.11
-function DateGetUTCFullYear() {
- var t = DATE_VALUE(this);
- if (NUMBER_IS_NAN(t)) return t;
- return YearFromTime(t);
-}
-
-
-// ECMA 262 - 15.9.5.12
-function DateGetMonth() {
- var t = DATE_VALUE(this);
- if (NUMBER_IS_NAN(t)) return t;
- return MonthFromTime(LocalTimeNoCheck(t));
-}
-
-
-// ECMA 262 - 15.9.5.13
-function DateGetUTCMonth() {
- var t = DATE_VALUE(this);
- if (NUMBER_IS_NAN(t)) return t;
- return MonthFromTime(t);
-}
-
-
-// ECMA 262 - 15.9.5.14
-function DateGetDate() {
- var t = DATE_VALUE(this);
- if (NUMBER_IS_NAN(t)) return t;
- return DateFromTime(LocalTimeNoCheck(t));
-}
-
-
-// ECMA 262 - 15.9.5.15
-function DateGetUTCDate() {
- var t = DATE_VALUE(this);
- return NAN_OR_DATE_FROM_TIME(t);
-}
-
-
-// ECMA 262 - 15.9.5.16
-function DateGetDay() {
- var t = %_ValueOf(this);
- if (NUMBER_IS_NAN(t)) return t;
- return WeekDay(LocalTimeNoCheck(t));
-}
-
-
-// ECMA 262 - 15.9.5.17
-function DateGetUTCDay() {
- var t = %_ValueOf(this);
- if (NUMBER_IS_NAN(t)) return t;
- return WeekDay(t);
-}
-
-
-// ECMA 262 - 15.9.5.18
-function DateGetHours() {
- var t = DATE_VALUE(this);
- if (NUMBER_IS_NAN(t)) return t;
- return HOUR_FROM_TIME(LocalTimeNoCheck(t));
-}
-
-
-// ECMA 262 - 15.9.5.19
-function DateGetUTCHours() {
- var t = DATE_VALUE(this);
- if (NUMBER_IS_NAN(t)) return t;
- return HOUR_FROM_TIME(t);
-}
-
-
-// ECMA 262 - 15.9.5.20
-function DateGetMinutes() {
- var t = DATE_VALUE(this);
- if (NUMBER_IS_NAN(t)) return t;
- return MIN_FROM_TIME(LocalTimeNoCheck(t));
-}
-
-
-// ECMA 262 - 15.9.5.21
-function DateGetUTCMinutes() {
- var t = DATE_VALUE(this);
- return NAN_OR_MIN_FROM_TIME(t);
-}
-
-
-// ECMA 262 - 15.9.5.22
-function DateGetSeconds() {
- var t = DATE_VALUE(this);
- if (NUMBER_IS_NAN(t)) return t;
- return SEC_FROM_TIME(LocalTimeNoCheck(t));
-}
-
-
-// ECMA 262 - 15.9.5.23
-function DateGetUTCSeconds() {
- var t = DATE_VALUE(this);
- return NAN_OR_SEC_FROM_TIME(t);
-}
-
-
-// ECMA 262 - 15.9.5.24
-function DateGetMilliseconds() {
- var t = DATE_VALUE(this);
- if (NUMBER_IS_NAN(t)) return t;
- return MS_FROM_TIME(LocalTimeNoCheck(t));
-}
-
-
-// ECMA 262 - 15.9.5.25
-function DateGetUTCMilliseconds() {
- var t = DATE_VALUE(this);
- return NAN_OR_MS_FROM_TIME(t);
-}
-
-
-// ECMA 262 - 15.9.5.26
-function DateGetTimezoneOffset() {
- var t = DATE_VALUE(this);
- if (NUMBER_IS_NAN(t)) return t;
- return (t - LocalTimeNoCheck(t)) / msPerMinute;
-}
-
-
-// ECMA 262 - 15.9.5.27
-function DateSetTime(ms) {
- if (!IS_DATE(this)) ThrowDateTypeError();
- return %_SetValueOf(this, TimeClip(ToNumber(ms)));
-}
-
-
-// ECMA 262 - 15.9.5.28
-function DateSetMilliseconds(ms) {
- var t = LocalTime(DATE_VALUE(this));
- ms = ToNumber(ms);
- var time = MakeTime(HOUR_FROM_TIME(t), MIN_FROM_TIME(t), SEC_FROM_TIME(t), ms);
- return %_SetValueOf(this, TimeClip(UTC(MakeDate(DAY(t), time))));
-}
-
-
-// ECMA 262 - 15.9.5.29
-function DateSetUTCMilliseconds(ms) {
- var t = DATE_VALUE(this);
- ms = ToNumber(ms);
- var time = MakeTime(HOUR_FROM_TIME(t), MIN_FROM_TIME(t), SEC_FROM_TIME(t), ms);
- return %_SetValueOf(this, TimeClip(MakeDate(DAY(t), time)));
-}
-
-
-// ECMA 262 - 15.9.5.30
-function DateSetSeconds(sec, ms) {
- var t = LocalTime(DATE_VALUE(this));
- sec = ToNumber(sec);
- ms = %_ArgumentsLength() < 2 ? NAN_OR_MS_FROM_TIME(t) : ToNumber(ms);
- var time = MakeTime(HOUR_FROM_TIME(t), MIN_FROM_TIME(t), sec, ms);
- return %_SetValueOf(this, TimeClip(UTC(MakeDate(DAY(t), time))));
-}
-
-
-// ECMA 262 - 15.9.5.31
-function DateSetUTCSeconds(sec, ms) {
- var t = DATE_VALUE(this);
- sec = ToNumber(sec);
- ms = %_ArgumentsLength() < 2 ? NAN_OR_MS_FROM_TIME(t) : ToNumber(ms);
- var time = MakeTime(HOUR_FROM_TIME(t), MIN_FROM_TIME(t), sec, ms);
- return %_SetValueOf(this, TimeClip(MakeDate(DAY(t), time)));
-}
-
-
-// ECMA 262 - 15.9.5.33
-function DateSetMinutes(min, sec, ms) {
- var t = LocalTime(DATE_VALUE(this));
- min = ToNumber(min);
- var argc = %_ArgumentsLength();
- sec = argc < 2 ? NAN_OR_SEC_FROM_TIME(t) : ToNumber(sec);
- ms = argc < 3 ? NAN_OR_MS_FROM_TIME(t) : ToNumber(ms);
- var time = MakeTime(HOUR_FROM_TIME(t), min, sec, ms);
- return %_SetValueOf(this, TimeClip(UTC(MakeDate(DAY(t), time))));
-}
-
-
-// ECMA 262 - 15.9.5.34
-function DateSetUTCMinutes(min, sec, ms) {
- var t = DATE_VALUE(this);
- min = ToNumber(min);
- var argc = %_ArgumentsLength();
- sec = argc < 2 ? NAN_OR_SEC_FROM_TIME(t) : ToNumber(sec);
- ms = argc < 3 ? NAN_OR_MS_FROM_TIME(t) : ToNumber(ms);
- var time = MakeTime(HOUR_FROM_TIME(t), min, sec, ms);
- return %_SetValueOf(this, TimeClip(MakeDate(DAY(t), time)));
-}
-
-
-// ECMA 262 - 15.9.5.35
-function DateSetHours(hour, min, sec, ms) {
- var t = LocalTime(DATE_VALUE(this));
- hour = ToNumber(hour);
- var argc = %_ArgumentsLength();
- min = argc < 2 ? NAN_OR_MIN_FROM_TIME(t) : ToNumber(min);
- sec = argc < 3 ? NAN_OR_SEC_FROM_TIME(t) : ToNumber(sec);
- ms = argc < 4 ? NAN_OR_MS_FROM_TIME(t) : ToNumber(ms);
- var time = MakeTime(hour, min, sec, ms);
- return %_SetValueOf(this, TimeClip(UTC(MakeDate(DAY(t), time))));
-}
-
-
-// ECMA 262 - 15.9.5.34
-function DateSetUTCHours(hour, min, sec, ms) {
- var t = DATE_VALUE(this);
- hour = ToNumber(hour);
- var argc = %_ArgumentsLength();
- min = argc < 2 ? NAN_OR_MIN_FROM_TIME(t) : ToNumber(min);
- sec = argc < 3 ? NAN_OR_SEC_FROM_TIME(t) : ToNumber(sec);
- ms = argc < 4 ? NAN_OR_MS_FROM_TIME(t) : ToNumber(ms);
- var time = MakeTime(hour, min, sec, ms);
- return %_SetValueOf(this, TimeClip(MakeDate(DAY(t), time)));
-}
-
-
-// ECMA 262 - 15.9.5.36
-function DateSetDate(date) {
- var t = LocalTime(DATE_VALUE(this));
- date = ToNumber(date);
- var day = MakeDay(YearFromTime(t), MonthFromTime(t), date);
- return %_SetValueOf(this, TimeClip(UTC(MakeDate(day, TimeWithinDay(t)))));
-}
-
-
-// ECMA 262 - 15.9.5.37
-function DateSetUTCDate(date) {
- var t = DATE_VALUE(this);
- date = ToNumber(date);
- var day = MakeDay(YearFromTime(t), MonthFromTime(t), date);
- return %_SetValueOf(this, TimeClip(MakeDate(day, TimeWithinDay(t))));
-}
-
-
-// ECMA 262 - 15.9.5.38
-function DateSetMonth(month, date) {
- var t = LocalTime(DATE_VALUE(this));
- month = ToNumber(month);
- date = %_ArgumentsLength() < 2 ? NAN_OR_DATE_FROM_TIME(t) : ToNumber(date);
- var day = MakeDay(YearFromTime(t), month, date);
- return %_SetValueOf(this, TimeClip(UTC(MakeDate(day, TimeWithinDay(t)))));
-}
-
-
-// ECMA 262 - 15.9.5.39
-function DateSetUTCMonth(month, date) {
- var t = DATE_VALUE(this);
- month = ToNumber(month);
- date = %_ArgumentsLength() < 2 ? NAN_OR_DATE_FROM_TIME(t) : ToNumber(date);
- var day = MakeDay(YearFromTime(t), month, date);
- return %_SetValueOf(this, TimeClip(MakeDate(day, TimeWithinDay(t))));
-}
-
-
-// ECMA 262 - 15.9.5.40
-function DateSetFullYear(year, month, date) {
- var t = DATE_VALUE(this);
- t = NUMBER_IS_NAN(t) ? 0 : LocalTimeNoCheck(t);
- year = ToNumber(year);
- var argc = %_ArgumentsLength();
- month = argc < 2 ? MonthFromTime(t) : ToNumber(month);
- date = argc < 3 ? DateFromTime(t) : ToNumber(date);
- var day = MakeDay(year, month, date);
- return %_SetValueOf(this, TimeClip(UTC(MakeDate(day, TimeWithinDay(t)))));
-}
-
-
-// ECMA 262 - 15.9.5.41
-function DateSetUTCFullYear(year, month, date) {
- var t = DATE_VALUE(this);
- if (NUMBER_IS_NAN(t)) t = 0;
- var argc = %_ArgumentsLength();
- year = ToNumber(year);
- month = argc < 2 ? MonthFromTime(t) : ToNumber(month);
- date = argc < 3 ? DateFromTime(t) : ToNumber(date);
- var day = MakeDay(year, month, date);
- return %_SetValueOf(this, TimeClip(MakeDate(day, TimeWithinDay(t))));
-}
-
-
-// ECMA 262 - 15.9.5.42
-function DateToUTCString() {
- var t = DATE_VALUE(this);
- if (NUMBER_IS_NAN(t)) return kInvalidDate;
- // Return UTC string of the form: Sat, 31 Jan 1970 23:00:00 GMT
- return WeekDays[WeekDay(t)] + ', '
- + TwoDigitString(DateFromTime(t)) + ' '
- + Months[MonthFromTime(t)] + ' '
- + YearFromTime(t) + ' '
- + TimeString(t) + ' GMT';
-}
-
-
-// ECMA 262 - B.2.4
-function DateGetYear() {
- var t = DATE_VALUE(this);
- if (NUMBER_IS_NAN(t)) return $NaN;
- return YearFromTime(LocalTimeNoCheck(t)) - 1900;
-}
-
-
-// ECMA 262 - B.2.5
-function DateSetYear(year) {
- var t = LocalTime(DATE_VALUE(this));
- if (NUMBER_IS_NAN(t)) t = 0;
- year = ToNumber(year);
- if (NUMBER_IS_NAN(year)) return %_SetValueOf(this, $NaN);
- year = (0 <= TO_INTEGER(year) && TO_INTEGER(year) <= 99)
- ? 1900 + TO_INTEGER(year) : year;
- var day = MakeDay(year, MonthFromTime(t), DateFromTime(t));
- return %_SetValueOf(this, TimeClip(UTC(MakeDate(day, TimeWithinDay(t)))));
-}
-
-
-// ECMA 262 - B.2.6
-//
-// Notice that this does not follow ECMA 262 completely. ECMA 262
-// says that toGMTString should be the same Function object as
-// toUTCString. JSC does not do this, so for compatibility we do not
-// do that either. Instead, we create a new function whose name
-// property will return toGMTString.
-function DateToGMTString() {
- return %_CallFunction(this, DateToUTCString);
-}
-
-
-function PadInt(n, digits) {
- if (digits == 1) return n;
- return n < MathPow(10, digits - 1) ? '0' + PadInt(n, digits - 1) : n;
-}
-
-
-function DateToISOString() {
- var t = DATE_VALUE(this);
- if (NUMBER_IS_NAN(t)) return kInvalidDate;
- return this.getUTCFullYear() +
- '-' + PadInt(this.getUTCMonth() + 1, 2) +
- '-' + PadInt(this.getUTCDate(), 2) +
- 'T' + PadInt(this.getUTCHours(), 2) +
- ':' + PadInt(this.getUTCMinutes(), 2) +
- ':' + PadInt(this.getUTCSeconds(), 2) +
- '.' + PadInt(this.getUTCMilliseconds(), 3) +
- 'Z';
-}
-
-
-function DateToJSON(key) {
- var o = ToObject(this);
- var tv = DefaultNumber(o);
- if (IS_NUMBER(tv) && !NUMBER_IS_FINITE(tv)) {
- return null;
- }
- return o.toISOString();
-}
-
-
-function ResetDateCache() {
-
- // Reset the local_time_offset:
- local_time_offset = %DateLocalTimeOffset();
-
- // Reset the DST offset cache:
- var cache = DST_offset_cache;
- cache.offset = 0;
- cache.start = 0;
- cache.end = -1;
- cache.increment = 0;
- cache.initial_increment = 19 * msPerDay;
-
- // Reset the timezone cache:
- timezone_cache_time = $NaN;
- timezone_cache_timezone = undefined;
-
- // Reset the ltcache:
- ltcache.key = null;
- ltcache.val = null;
-
- // Reset the ymd_from_time_cache:
- ymd_from_time_cache = [$NaN, $NaN, $NaN];
- ymd_from_time_cached_time = $NaN;
-
- // Reset the date cache:
- cache = Date_cache;
- cache.time = $NaN;
- cache.year = $NaN;
- cache.string = null;
-}
-
-
-// -------------------------------------------------------------------
-
-function SetupDate() {
- // Setup non-enumerable properties of the Date object itself.
- InstallFunctions($Date, DONT_ENUM, $Array(
- "UTC", DateUTC,
- "parse", DateParse,
- "now", DateNow
- ));
-
- // Setup non-enumerable constructor property of the Date prototype object.
- %SetProperty($Date.prototype, "constructor", $Date, DONT_ENUM);
-
- // Setup non-enumerable functions of the Date prototype object and
- // set their names.
- InstallFunctionsOnHiddenPrototype($Date.prototype, DONT_ENUM, $Array(
- "toString", DateToString,
- "toDateString", DateToDateString,
- "toTimeString", DateToTimeString,
- "toLocaleString", DateToLocaleString,
- "toLocaleDateString", DateToLocaleDateString,
- "toLocaleTimeString", DateToLocaleTimeString,
- "valueOf", DateValueOf,
- "getTime", DateGetTime,
- "getFullYear", DateGetFullYear,
- "getUTCFullYear", DateGetUTCFullYear,
- "getMonth", DateGetMonth,
- "getUTCMonth", DateGetUTCMonth,
- "getDate", DateGetDate,
- "getUTCDate", DateGetUTCDate,
- "getDay", DateGetDay,
- "getUTCDay", DateGetUTCDay,
- "getHours", DateGetHours,
- "getUTCHours", DateGetUTCHours,
- "getMinutes", DateGetMinutes,
- "getUTCMinutes", DateGetUTCMinutes,
- "getSeconds", DateGetSeconds,
- "getUTCSeconds", DateGetUTCSeconds,
- "getMilliseconds", DateGetMilliseconds,
- "getUTCMilliseconds", DateGetUTCMilliseconds,
- "getTimezoneOffset", DateGetTimezoneOffset,
- "setTime", DateSetTime,
- "setMilliseconds", DateSetMilliseconds,
- "setUTCMilliseconds", DateSetUTCMilliseconds,
- "setSeconds", DateSetSeconds,
- "setUTCSeconds", DateSetUTCSeconds,
- "setMinutes", DateSetMinutes,
- "setUTCMinutes", DateSetUTCMinutes,
- "setHours", DateSetHours,
- "setUTCHours", DateSetUTCHours,
- "setDate", DateSetDate,
- "setUTCDate", DateSetUTCDate,
- "setMonth", DateSetMonth,
- "setUTCMonth", DateSetUTCMonth,
- "setFullYear", DateSetFullYear,
- "setUTCFullYear", DateSetUTCFullYear,
- "toGMTString", DateToGMTString,
- "toUTCString", DateToUTCString,
- "getYear", DateGetYear,
- "setYear", DateSetYear,
- "toISOString", DateToISOString,
- "toJSON", DateToJSON
- ));
-}
-
-SetupDate();
diff --git a/src/3rdparty/v8/src/dateparser-inl.h b/src/3rdparty/v8/src/dateparser-inl.h
deleted file mode 100644
index ac28c62..0000000
--- a/src/3rdparty/v8/src/dateparser-inl.h
+++ /dev/null
@@ -1,125 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_DATEPARSER_INL_H_
-#define V8_DATEPARSER_INL_H_
-
-#include "dateparser.h"
-
-namespace v8 {
-namespace internal {
-
-template <typename Char>
-bool DateParser::Parse(Vector<Char> str, FixedArray* out) {
- ASSERT(out->length() >= OUTPUT_SIZE);
- InputReader<Char> in(str);
- TimeZoneComposer tz;
- TimeComposer time;
- DayComposer day;
-
- while (!in.IsEnd()) {
- if (in.IsAsciiDigit()) {
- // Parse a number (possibly with 1 or 2 trailing colons).
- int n = in.ReadUnsignedNumber();
- if (in.Skip(':')) {
- if (in.Skip(':')) {
- // n + "::"
- if (!time.IsEmpty()) return false;
- time.Add(n);
- time.Add(0);
- } else {
- // n + ":"
- if (!time.Add(n)) return false;
- in.Skip('.');
- }
- } else if (in.Skip('.') && time.IsExpecting(n)) {
- time.Add(n);
- if (!in.IsAsciiDigit()) return false;
- int n = in.ReadMilliseconds();
- time.AddFinal(n);
- } else if (tz.IsExpecting(n)) {
- tz.SetAbsoluteMinute(n);
- } else if (time.IsExpecting(n)) {
- time.AddFinal(n);
- // Require end, white space, "Z", "+" or "-" immediately after
- // finalizing time.
- if (!in.IsEnd() && !in.SkipWhiteSpace() && !in.Is('Z') &&
- !in.IsAsciiSign()) return false;
- } else {
- if (!day.Add(n)) return false;
- in.Skip('-'); // Ignore suffix '-' for year, month, or day.
- // Skip trailing 'T' for ECMAScript 5 date string format but make
- // sure that it is followed by a digit (for the time).
- if (in.Skip('T') && !in.IsAsciiDigit()) return false;
- }
- } else if (in.IsAsciiAlphaOrAbove()) {
- // Parse a "word" (sequence of chars. >= 'A').
- uint32_t pre[KeywordTable::kPrefixLength];
- int len = in.ReadWord(pre, KeywordTable::kPrefixLength);
- int index = KeywordTable::Lookup(pre, len);
- KeywordType type = KeywordTable::GetType(index);
-
- if (type == AM_PM && !time.IsEmpty()) {
- time.SetHourOffset(KeywordTable::GetValue(index));
- } else if (type == MONTH_NAME) {
- day.SetNamedMonth(KeywordTable::GetValue(index));
- in.Skip('-'); // Ignore suffix '-' for month names
- } else if (type == TIME_ZONE_NAME && in.HasReadNumber()) {
- tz.Set(KeywordTable::GetValue(index));
- } else {
- // Garbage words are illegal if a number has been read.
- if (in.HasReadNumber()) return false;
- }
- } else if (in.IsAsciiSign() && (tz.IsUTC() || !time.IsEmpty())) {
- // Parse UTC offset (only after UTC or time).
- tz.SetSign(in.GetAsciiSignValue());
- in.Next();
- int n = in.ReadUnsignedNumber();
- if (in.Skip(':')) {
- tz.SetAbsoluteHour(n);
- tz.SetAbsoluteMinute(kNone);
- } else {
- tz.SetAbsoluteHour(n / 100);
- tz.SetAbsoluteMinute(n % 100);
- }
- } else if (in.Is('(')) {
- // Ignore anything from '(' to a matching ')' or end of string.
- in.SkipParentheses();
- } else if ((in.IsAsciiSign() || in.Is(')')) && in.HasReadNumber()) {
- // Extra sign or ')' is illegal if a number has been read.
- return false;
- } else {
- // Ignore other characters.
- in.Next();
- }
- }
- return day.Write(out) && time.Write(out) && tz.Write(out);
-}
-
-} } // namespace v8::internal
-
-#endif // V8_DATEPARSER_INL_H_
diff --git a/src/3rdparty/v8/src/dateparser.cc b/src/3rdparty/v8/src/dateparser.cc
deleted file mode 100644
index 6d80488..0000000
--- a/src/3rdparty/v8/src/dateparser.cc
+++ /dev/null
@@ -1,178 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "dateparser.h"
-
-namespace v8 {
-namespace internal {
-
-bool DateParser::DayComposer::Write(FixedArray* output) {
- if (index_ < 1) return false;
- // Day and month defaults to 1.
- while (index_ < kSize) {
- comp_[index_++] = 1;
- }
-
- int year = 0; // Default year is 0 (=> 2000) for KJS compatibility.
- int month = kNone;
- int day = kNone;
-
- if (named_month_ == kNone) {
- if (index_ == 3 && !IsDay(comp_[0])) {
- // YMD
- year = comp_[0];
- month = comp_[1];
- day = comp_[2];
- } else {
- // MD(Y)
- month = comp_[0];
- day = comp_[1];
- if (index_ == 3) year = comp_[2];
- }
- } else {
- month = named_month_;
- if (index_ == 1) {
- // MD or DM
- day = comp_[0];
- } else if (!IsDay(comp_[0])) {
- // YMD, MYD, or YDM
- year = comp_[0];
- day = comp_[1];
- } else {
- // DMY, MDY, or DYM
- day = comp_[0];
- year = comp_[1];
- }
- }
-
- if (Between(year, 0, 49)) year += 2000;
- else if (Between(year, 50, 99)) year += 1900;
-
- if (!Smi::IsValid(year) || !IsMonth(month) || !IsDay(day)) return false;
-
- output->set(YEAR, Smi::FromInt(year));
- output->set(MONTH, Smi::FromInt(month - 1)); // 0-based
- output->set(DAY, Smi::FromInt(day));
- return true;
-}
-
-
-bool DateParser::TimeComposer::Write(FixedArray* output) {
- // All time slots default to 0
- while (index_ < kSize) {
- comp_[index_++] = 0;
- }
-
- int& hour = comp_[0];
- int& minute = comp_[1];
- int& second = comp_[2];
- int& millisecond = comp_[3];
-
- if (hour_offset_ != kNone) {
- if (!IsHour12(hour)) return false;
- hour %= 12;
- hour += hour_offset_;
- }
-
- if (!IsHour(hour) || !IsMinute(minute) ||
- !IsSecond(second) || !IsMillisecond(millisecond)) return false;
-
- output->set(HOUR, Smi::FromInt(hour));
- output->set(MINUTE, Smi::FromInt(minute));
- output->set(SECOND, Smi::FromInt(second));
- output->set(MILLISECOND, Smi::FromInt(millisecond));
- return true;
-}
-
-bool DateParser::TimeZoneComposer::Write(FixedArray* output) {
- if (sign_ != kNone) {
- if (hour_ == kNone) hour_ = 0;
- if (minute_ == kNone) minute_ = 0;
- int total_seconds = sign_ * (hour_ * 3600 + minute_ * 60);
- if (!Smi::IsValid(total_seconds)) return false;
- output->set(UTC_OFFSET, Smi::FromInt(total_seconds));
- } else {
- output->set_null(UTC_OFFSET);
- }
- return true;
-}
-
-const int8_t DateParser::KeywordTable::
- array[][DateParser::KeywordTable::kEntrySize] = {
- {'j', 'a', 'n', DateParser::MONTH_NAME, 1},
- {'f', 'e', 'b', DateParser::MONTH_NAME, 2},
- {'m', 'a', 'r', DateParser::MONTH_NAME, 3},
- {'a', 'p', 'r', DateParser::MONTH_NAME, 4},
- {'m', 'a', 'y', DateParser::MONTH_NAME, 5},
- {'j', 'u', 'n', DateParser::MONTH_NAME, 6},
- {'j', 'u', 'l', DateParser::MONTH_NAME, 7},
- {'a', 'u', 'g', DateParser::MONTH_NAME, 8},
- {'s', 'e', 'p', DateParser::MONTH_NAME, 9},
- {'o', 'c', 't', DateParser::MONTH_NAME, 10},
- {'n', 'o', 'v', DateParser::MONTH_NAME, 11},
- {'d', 'e', 'c', DateParser::MONTH_NAME, 12},
- {'a', 'm', '\0', DateParser::AM_PM, 0},
- {'p', 'm', '\0', DateParser::AM_PM, 12},
- {'u', 't', '\0', DateParser::TIME_ZONE_NAME, 0},
- {'u', 't', 'c', DateParser::TIME_ZONE_NAME, 0},
- {'z', '\0', '\0', DateParser::TIME_ZONE_NAME, 0},
- {'g', 'm', 't', DateParser::TIME_ZONE_NAME, 0},
- {'c', 'd', 't', DateParser::TIME_ZONE_NAME, -5},
- {'c', 's', 't', DateParser::TIME_ZONE_NAME, -6},
- {'e', 'd', 't', DateParser::TIME_ZONE_NAME, -4},
- {'e', 's', 't', DateParser::TIME_ZONE_NAME, -5},
- {'m', 'd', 't', DateParser::TIME_ZONE_NAME, -6},
- {'m', 's', 't', DateParser::TIME_ZONE_NAME, -7},
- {'p', 'd', 't', DateParser::TIME_ZONE_NAME, -7},
- {'p', 's', 't', DateParser::TIME_ZONE_NAME, -8},
- {'\0', '\0', '\0', DateParser::INVALID, 0},
-};
-
-
-// We could use perfect hashing here, but this is not a bottleneck.
-int DateParser::KeywordTable::Lookup(const uint32_t* pre, int len) {
- int i;
- for (i = 0; array[i][kTypeOffset] != INVALID; i++) {
- int j = 0;
- while (j < kPrefixLength &&
- pre[j] == static_cast<uint32_t>(array[i][j])) {
- j++;
- }
- // Check if we have a match and the length is legal.
- // Word longer than keyword is only allowed for month names.
- if (j == kPrefixLength &&
- (len <= kPrefixLength || array[i][kTypeOffset] == MONTH_NAME)) {
- return i;
- }
- }
- return i;
-}
-
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/dateparser.h b/src/3rdparty/v8/src/dateparser.h
deleted file mode 100644
index 51109ee..0000000
--- a/src/3rdparty/v8/src/dateparser.h
+++ /dev/null
@@ -1,265 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_DATEPARSER_H_
-#define V8_DATEPARSER_H_
-
-#include "char-predicates-inl.h"
-#include "scanner-base.h"
-
-namespace v8 {
-namespace internal {
-
-class DateParser : public AllStatic {
- public:
-
- // Parse the string as a date. If parsing succeeds, return true after
- // filling out the output array as follows (all integers are Smis):
- // [0]: year
- // [1]: month (0 = Jan, 1 = Feb, ...)
- // [2]: day
- // [3]: hour
- // [4]: minute
- // [5]: second
- // [6]: millisecond
- // [7]: UTC offset in seconds, or null value if no timezone specified
- // If parsing fails, return false (content of output array is not defined).
- template <typename Char>
- static bool Parse(Vector<Char> str, FixedArray* output);
-
- enum {
- YEAR, MONTH, DAY, HOUR, MINUTE, SECOND, MILLISECOND, UTC_OFFSET, OUTPUT_SIZE
- };
-
- private:
- // Range testing
- static inline bool Between(int x, int lo, int hi) {
- return static_cast<unsigned>(x - lo) <= static_cast<unsigned>(hi - lo);
- }
- // Indicates a missing value.
- static const int kNone = kMaxInt;
-
- // InputReader provides basic string parsing and character classification.
- template <typename Char>
- class InputReader BASE_EMBEDDED {
- public:
- explicit InputReader(Vector<Char> s)
- : index_(0),
- buffer_(s),
- has_read_number_(false),
- scanner_constants_(Isolate::Current()->scanner_constants()) {
- Next();
- }
-
- // Advance to the next character of the string.
- void Next() { ch_ = (index_ < buffer_.length()) ? buffer_[index_++] : 0; }
-
- // Read a string of digits as an unsigned number (cap just below kMaxInt).
- int ReadUnsignedNumber() {
- has_read_number_ = true;
- int n;
- for (n = 0; IsAsciiDigit() && n < kMaxInt / 10 - 1; Next()) {
- n = n * 10 + ch_ - '0';
- }
- return n;
- }
-
- // Read a string of digits, take the first three or fewer as an unsigned
- // number of milliseconds, and ignore any digits after the first three.
- int ReadMilliseconds() {
- has_read_number_ = true;
- int n = 0;
- int power;
- for (power = 100; IsAsciiDigit(); Next(), power = power / 10) {
- n = n + power * (ch_ - '0');
- }
- return n;
- }
-
- // Read a word (sequence of chars. >= 'A'), fill the given buffer with a
- // lower-case prefix, and pad any remainder of the buffer with zeroes.
- // Return word length.
- int ReadWord(uint32_t* prefix, int prefix_size) {
- int len;
- for (len = 0; IsAsciiAlphaOrAbove(); Next(), len++) {
- if (len < prefix_size) prefix[len] = AsciiAlphaToLower(ch_);
- }
- for (int i = len; i < prefix_size; i++) prefix[i] = 0;
- return len;
- }
-
- // The skip methods return whether they actually skipped something.
- bool Skip(uint32_t c) {
- if (ch_ == c) {
- Next();
- return true;
- }
- return false;
- }
-
- bool SkipWhiteSpace() {
- if (scanner_constants_->IsWhiteSpace(ch_)) {
- Next();
- return true;
- }
- return false;
- }
-
- bool SkipParentheses() {
- if (ch_ != '(') return false;
- int balance = 0;
- do {
- if (ch_ == ')') --balance;
- else if (ch_ == '(') ++balance;
- Next();
- } while (balance > 0 && ch_);
- return true;
- }
-
- // Character testing/classification. Non-ASCII digits are not supported.
- bool Is(uint32_t c) const { return ch_ == c; }
- bool IsEnd() const { return ch_ == 0; }
- bool IsAsciiDigit() const { return IsDecimalDigit(ch_); }
- bool IsAsciiAlphaOrAbove() const { return ch_ >= 'A'; }
- bool IsAsciiSign() const { return ch_ == '+' || ch_ == '-'; }
-
- // Return 1 for '+' and -1 for '-'.
- int GetAsciiSignValue() const { return 44 - static_cast<int>(ch_); }
-
- // Indicates whether any (possibly empty!) numbers have been read.
- bool HasReadNumber() const { return has_read_number_; }
-
- private:
- int index_;
- Vector<Char> buffer_;
- bool has_read_number_;
- uint32_t ch_;
- ScannerConstants* scanner_constants_;
- };
-
- enum KeywordType { INVALID, MONTH_NAME, TIME_ZONE_NAME, AM_PM };
-
- // KeywordTable maps names of months, time zones, am/pm to numbers.
- class KeywordTable : public AllStatic {
- public:
- // Look up a word in the keyword table and return an index.
- // 'pre' contains a prefix of the word, zero-padded to size kPrefixLength
- // and 'len' is the word length.
- static int Lookup(const uint32_t* pre, int len);
- // Get the type of the keyword at index i.
- static KeywordType GetType(int i) {
- return static_cast<KeywordType>(array[i][kTypeOffset]);
- }
- // Get the value of the keyword at index i.
- static int GetValue(int i) { return array[i][kValueOffset]; }
-
- static const int kPrefixLength = 3;
- static const int kTypeOffset = kPrefixLength;
- static const int kValueOffset = kTypeOffset + 1;
- static const int kEntrySize = kValueOffset + 1;
- static const int8_t array[][kEntrySize];
- };
-
- class TimeZoneComposer BASE_EMBEDDED {
- public:
- TimeZoneComposer() : sign_(kNone), hour_(kNone), minute_(kNone) {}
- void Set(int offset_in_hours) {
- sign_ = offset_in_hours < 0 ? -1 : 1;
- hour_ = offset_in_hours * sign_;
- minute_ = 0;
- }
- void SetSign(int sign) { sign_ = sign < 0 ? -1 : 1; }
- void SetAbsoluteHour(int hour) { hour_ = hour; }
- void SetAbsoluteMinute(int minute) { minute_ = minute; }
- bool IsExpecting(int n) const {
- return hour_ != kNone && minute_ == kNone && TimeComposer::IsMinute(n);
- }
- bool IsUTC() const { return hour_ == 0 && minute_ == 0; }
- bool Write(FixedArray* output);
- private:
- int sign_;
- int hour_;
- int minute_;
- };
-
- class TimeComposer BASE_EMBEDDED {
- public:
- TimeComposer() : index_(0), hour_offset_(kNone) {}
- bool IsEmpty() const { return index_ == 0; }
- bool IsExpecting(int n) const {
- return (index_ == 1 && IsMinute(n)) ||
- (index_ == 2 && IsSecond(n)) ||
- (index_ == 3 && IsMillisecond(n));
- }
- bool Add(int n) {
- return index_ < kSize ? (comp_[index_++] = n, true) : false;
- }
- bool AddFinal(int n) {
- if (!Add(n)) return false;
- while (index_ < kSize) comp_[index_++] = 0;
- return true;
- }
- void SetHourOffset(int n) { hour_offset_ = n; }
- bool Write(FixedArray* output);
-
- static bool IsMinute(int x) { return Between(x, 0, 59); }
- private:
- static bool IsHour(int x) { return Between(x, 0, 23); }
- static bool IsHour12(int x) { return Between(x, 0, 12); }
- static bool IsSecond(int x) { return Between(x, 0, 59); }
- static bool IsMillisecond(int x) { return Between(x, 0, 999); }
-
- static const int kSize = 4;
- int comp_[kSize];
- int index_;
- int hour_offset_;
- };
-
- class DayComposer BASE_EMBEDDED {
- public:
- DayComposer() : index_(0), named_month_(kNone) {}
- bool IsEmpty() const { return index_ == 0; }
- bool Add(int n) {
- return index_ < kSize ? (comp_[index_++] = n, true) : false;
- }
- void SetNamedMonth(int n) { named_month_ = n; }
- bool Write(FixedArray* output);
- private:
- static bool IsMonth(int x) { return Between(x, 1, 12); }
- static bool IsDay(int x) { return Between(x, 1, 31); }
-
- static const int kSize = 3;
- int comp_[kSize];
- int index_;
- int named_month_;
- };
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_DATEPARSER_H_
diff --git a/src/3rdparty/v8/src/debug-agent.cc b/src/3rdparty/v8/src/debug-agent.cc
deleted file mode 100644
index 498b88a..0000000
--- a/src/3rdparty/v8/src/debug-agent.cc
+++ /dev/null
@@ -1,447 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#include "v8.h"
-#include "debug.h"
-#include "debug-agent.h"
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-
-namespace v8 {
-namespace internal {
-
-// Public V8 debugger API message handler function. This function just delegates
-// to the debugger agent through it's data parameter.
-void DebuggerAgentMessageHandler(const v8::Debug::Message& message) {
- DebuggerAgent* agent = Isolate::Current()->debugger_agent_instance();
- ASSERT(agent != NULL);
- agent->DebuggerMessage(message);
-}
-
-
-// Debugger agent main thread.
-void DebuggerAgent::Run() {
- const int kOneSecondInMicros = 1000000;
-
- // Allow this socket to reuse port even if still in TIME_WAIT.
- server_->SetReuseAddress(true);
-
- // First bind the socket to the requested port.
- bool bound = false;
- while (!bound && !terminate_) {
- bound = server_->Bind(port_);
-
- // If an error occurred wait a bit before retrying. The most common error
- // would be that the port is already in use so this avoids a busy loop and
- // make the agent take over the port when it becomes free.
- if (!bound) {
- PrintF("Failed to open socket on port %d, "
- "waiting %d ms before retrying\n", port_, kOneSecondInMicros / 1000);
- terminate_now_->Wait(kOneSecondInMicros);
- }
- }
-
- // Accept connections on the bound port.
- while (!terminate_) {
- bool ok = server_->Listen(1);
- listening_->Signal();
- if (ok) {
- // Accept the new connection.
- Socket* client = server_->Accept();
- ok = client != NULL;
- if (ok) {
- // Create and start a new session.
- CreateSession(client);
- }
- }
- }
-}
-
-
-void DebuggerAgent::Shutdown() {
- // Set the termination flag.
- terminate_ = true;
-
- // Signal termination and make the server exit either its listen call or its
- // binding loop. This makes sure that no new sessions can be established.
- terminate_now_->Signal();
- server_->Shutdown();
- Join();
-
- // Close existing session if any.
- CloseSession();
-}
-
-
-void DebuggerAgent::WaitUntilListening() {
- listening_->Wait();
-}
-
-static const char* kCreateSessionMessage =
- "Remote debugging session already active\r\n";
-
-void DebuggerAgent::CreateSession(Socket* client) {
- ScopedLock with(session_access_);
-
- // If another session is already established terminate this one.
- if (session_ != NULL) {
- client->Send(kCreateSessionMessage, StrLength(kCreateSessionMessage));
- delete client;
- return;
- }
-
- // Create a new session and hook up the debug message handler.
- session_ = new DebuggerAgentSession(isolate(), this, client);
- v8::Debug::SetMessageHandler2(DebuggerAgentMessageHandler);
- session_->Start();
-}
-
-
-void DebuggerAgent::CloseSession() {
- ScopedLock with(session_access_);
-
- // Terminate the session.
- if (session_ != NULL) {
- session_->Shutdown();
- session_->Join();
- delete session_;
- session_ = NULL;
- }
-}
-
-
-void DebuggerAgent::DebuggerMessage(const v8::Debug::Message& message) {
- ScopedLock with(session_access_);
-
- // Forward the message handling to the session.
- if (session_ != NULL) {
- v8::String::Value val(message.GetJSON());
- session_->DebuggerMessage(Vector<uint16_t>(const_cast<uint16_t*>(*val),
- val.length()));
- }
-}
-
-
-void DebuggerAgent::OnSessionClosed(DebuggerAgentSession* session) {
- // Don't do anything during termination.
- if (terminate_) {
- return;
- }
-
- // Terminate the session.
- ScopedLock with(session_access_);
- ASSERT(session == session_);
- if (session == session_) {
- CloseSession();
- }
-}
-
-
-void DebuggerAgentSession::Run() {
- // Send the hello message.
- bool ok = DebuggerAgentUtil::SendConnectMessage(client_, *agent_->name_);
- if (!ok) return;
-
- while (true) {
- // Read data from the debugger front end.
- SmartPointer<char> message = DebuggerAgentUtil::ReceiveMessage(client_);
-
- const char* msg = *message;
- bool is_closing_session = (msg == NULL);
-
- if (msg == NULL) {
- // If we lost the connection, then simulate a disconnect msg:
- msg = "{\"seq\":1,\"type\":\"request\",\"command\":\"disconnect\"}";
-
- } else {
- // Check if we're getting a disconnect request:
- const char* disconnectRequestStr =
- "\"type\":\"request\",\"command\":\"disconnect\"}";
- const char* result = strstr(msg, disconnectRequestStr);
- if (result != NULL) {
- is_closing_session = true;
- }
- }
-
- // Convert UTF-8 to UTF-16.
- unibrow::Utf8InputBuffer<> buf(msg, StrLength(msg));
- int len = 0;
- while (buf.has_more()) {
- buf.GetNext();
- len++;
- }
- ScopedVector<int16_t> temp(len + 1);
- buf.Reset(msg, StrLength(msg));
- for (int i = 0; i < len; i++) {
- temp[i] = buf.GetNext();
- }
-
- // Send the request received to the debugger.
- v8::Debug::SendCommand(reinterpret_cast<const uint16_t *>(temp.start()),
- len);
-
- if (is_closing_session) {
- // Session is closed.
- agent_->OnSessionClosed(this);
- return;
- }
- }
-}
-
-
-void DebuggerAgentSession::DebuggerMessage(Vector<uint16_t> message) {
- DebuggerAgentUtil::SendMessage(client_, message);
-}
-
-
-void DebuggerAgentSession::Shutdown() {
- // Shutdown the socket to end the blocking receive.
- client_->Shutdown();
-}
-
-
-const char* const DebuggerAgentUtil::kContentLength = "Content-Length";
-const int DebuggerAgentUtil::kContentLengthSize =
- StrLength(kContentLength);
-
-
-SmartPointer<char> DebuggerAgentUtil::ReceiveMessage(const Socket* conn) {
- int received;
-
- // Read header.
- int content_length = 0;
- while (true) {
- const int kHeaderBufferSize = 80;
- char header_buffer[kHeaderBufferSize];
- int header_buffer_position = 0;
- char c = '\0'; // One character receive buffer.
- char prev_c = '\0'; // Previous character.
-
- // Read until CRLF.
- while (!(c == '\n' && prev_c == '\r')) {
- prev_c = c;
- received = conn->Receive(&c, 1);
- if (received <= 0) {
- PrintF("Error %d\n", Socket::LastError());
- return SmartPointer<char>();
- }
-
- // Add character to header buffer.
- if (header_buffer_position < kHeaderBufferSize) {
- header_buffer[header_buffer_position++] = c;
- }
- }
-
- // Check for end of header (empty header line).
- if (header_buffer_position == 2) { // Receive buffer contains CRLF.
- break;
- }
-
- // Terminate header.
- ASSERT(header_buffer_position > 1); // At least CRLF is received.
- ASSERT(header_buffer_position <= kHeaderBufferSize);
- header_buffer[header_buffer_position - 2] = '\0';
-
- // Split header.
- char* key = header_buffer;
- char* value = NULL;
- for (int i = 0; header_buffer[i] != '\0'; i++) {
- if (header_buffer[i] == ':') {
- header_buffer[i] = '\0';
- value = header_buffer + i + 1;
- while (*value == ' ') {
- value++;
- }
- break;
- }
- }
-
- // Check that key is Content-Length.
- if (strcmp(key, kContentLength) == 0) {
- // Get the content length value if present and within a sensible range.
- if (value == NULL || strlen(value) > 7) {
- return SmartPointer<char>();
- }
- for (int i = 0; value[i] != '\0'; i++) {
- // Bail out if illegal data.
- if (value[i] < '0' || value[i] > '9') {
- return SmartPointer<char>();
- }
- content_length = 10 * content_length + (value[i] - '0');
- }
- } else {
- // For now just print all other headers than Content-Length.
- PrintF("%s: %s\n", key, value != NULL ? value : "(no value)");
- }
- }
-
- // Return now if no body.
- if (content_length == 0) {
- return SmartPointer<char>();
- }
-
- // Read body.
- char* buffer = NewArray<char>(content_length + 1);
- received = ReceiveAll(conn, buffer, content_length);
- if (received < content_length) {
- PrintF("Error %d\n", Socket::LastError());
- return SmartPointer<char>();
- }
- buffer[content_length] = '\0';
-
- return SmartPointer<char>(buffer);
-}
-
-
-bool DebuggerAgentUtil::SendConnectMessage(const Socket* conn,
- const char* embedding_host) {
- static const int kBufferSize = 80;
- char buffer[kBufferSize]; // Sending buffer.
- bool ok;
- int len;
-
- // Send the header.
- len = OS::SNPrintF(Vector<char>(buffer, kBufferSize),
- "Type: connect\r\n");
- ok = conn->Send(buffer, len);
- if (!ok) return false;
-
- len = OS::SNPrintF(Vector<char>(buffer, kBufferSize),
- "V8-Version: %s\r\n", v8::V8::GetVersion());
- ok = conn->Send(buffer, len);
- if (!ok) return false;
-
- len = OS::SNPrintF(Vector<char>(buffer, kBufferSize),
- "Protocol-Version: 1\r\n");
- ok = conn->Send(buffer, len);
- if (!ok) return false;
-
- if (embedding_host != NULL) {
- len = OS::SNPrintF(Vector<char>(buffer, kBufferSize),
- "Embedding-Host: %s\r\n", embedding_host);
- ok = conn->Send(buffer, len);
- if (!ok) return false;
- }
-
- len = OS::SNPrintF(Vector<char>(buffer, kBufferSize),
- "%s: 0\r\n", kContentLength);
- ok = conn->Send(buffer, len);
- if (!ok) return false;
-
- // Terminate header with empty line.
- len = OS::SNPrintF(Vector<char>(buffer, kBufferSize), "\r\n");
- ok = conn->Send(buffer, len);
- if (!ok) return false;
-
- // No body for connect message.
-
- return true;
-}
-
-
-bool DebuggerAgentUtil::SendMessage(const Socket* conn,
- const Vector<uint16_t> message) {
- static const int kBufferSize = 80;
- char buffer[kBufferSize]; // Sending buffer both for header and body.
-
- // Calculate the message size in UTF-8 encoding.
- int utf8_len = 0;
- for (int i = 0; i < message.length(); i++) {
- utf8_len += unibrow::Utf8::Length(message[i]);
- }
-
- // Send the header.
- int len;
- len = OS::SNPrintF(Vector<char>(buffer, kBufferSize),
- "%s: %d\r\n", kContentLength, utf8_len);
- conn->Send(buffer, len);
-
- // Terminate header with empty line.
- len = OS::SNPrintF(Vector<char>(buffer, kBufferSize), "\r\n");
- conn->Send(buffer, len);
-
- // Send message body as UTF-8.
- int buffer_position = 0; // Current buffer position.
- for (int i = 0; i < message.length(); i++) {
- // Write next UTF-8 encoded character to buffer.
- buffer_position +=
- unibrow::Utf8::Encode(buffer + buffer_position, message[i]);
- ASSERT(buffer_position < kBufferSize);
-
- // Send buffer if full or last character is encoded.
- if (kBufferSize - buffer_position < 3 || i == message.length() - 1) {
- conn->Send(buffer, buffer_position);
- buffer_position = 0;
- }
- }
-
- return true;
-}
-
-
-bool DebuggerAgentUtil::SendMessage(const Socket* conn,
- const v8::Handle<v8::String> request) {
- static const int kBufferSize = 80;
- char buffer[kBufferSize]; // Sending buffer both for header and body.
-
- // Convert the request to UTF-8 encoding.
- v8::String::Utf8Value utf8_request(request);
-
- // Send the header.
- int len;
- len = OS::SNPrintF(Vector<char>(buffer, kBufferSize),
- "Content-Length: %d\r\n", utf8_request.length());
- conn->Send(buffer, len);
-
- // Terminate header with empty line.
- len = OS::SNPrintF(Vector<char>(buffer, kBufferSize), "\r\n");
- conn->Send(buffer, len);
-
- // Send message body as UTF-8.
- conn->Send(*utf8_request, utf8_request.length());
-
- return true;
-}
-
-
-// Receive the full buffer before returning unless an error occours.
-int DebuggerAgentUtil::ReceiveAll(const Socket* conn, char* data, int len) {
- int total_received = 0;
- while (total_received < len) {
- int received = conn->Receive(data + total_received, len - total_received);
- if (received <= 0) {
- return total_received;
- }
- total_received += received;
- }
- return total_received;
-}
-
-} } // namespace v8::internal
-
-#endif // ENABLE_DEBUGGER_SUPPORT
diff --git a/src/3rdparty/v8/src/debug-agent.h b/src/3rdparty/v8/src/debug-agent.h
deleted file mode 100644
index a25002e..0000000
--- a/src/3rdparty/v8/src/debug-agent.h
+++ /dev/null
@@ -1,129 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_DEBUG_AGENT_H_
-#define V8_DEBUG_AGENT_H_
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-#include "../include/v8-debug.h"
-#include "platform.h"
-
-namespace v8 {
-namespace internal {
-
-// Forward decelrations.
-class DebuggerAgentSession;
-
-
-// Debugger agent which starts a socket listener on the debugger port and
-// handles connection from a remote debugger.
-class DebuggerAgent: public Thread {
- public:
- DebuggerAgent(Isolate* isolate, const char* name, int port)
- : Thread(isolate, name),
- name_(StrDup(name)), port_(port),
- server_(OS::CreateSocket()), terminate_(false),
- session_access_(OS::CreateMutex()), session_(NULL),
- terminate_now_(OS::CreateSemaphore(0)),
- listening_(OS::CreateSemaphore(0)) {
- ASSERT(Isolate::Current()->debugger_agent_instance() == NULL);
- Isolate::Current()->set_debugger_agent_instance(this);
- }
- ~DebuggerAgent() {
- Isolate::Current()->set_debugger_agent_instance(NULL);
- delete server_;
- }
-
- void Shutdown();
- void WaitUntilListening();
-
- private:
- void Run();
- void CreateSession(Socket* socket);
- void DebuggerMessage(const v8::Debug::Message& message);
- void CloseSession();
- void OnSessionClosed(DebuggerAgentSession* session);
-
- SmartPointer<const char> name_; // Name of the embedding application.
- int port_; // Port to use for the agent.
- Socket* server_; // Server socket for listen/accept.
- bool terminate_; // Termination flag.
- Mutex* session_access_; // Mutex guarging access to session_.
- DebuggerAgentSession* session_; // Current active session if any.
- Semaphore* terminate_now_; // Semaphore to signal termination.
- Semaphore* listening_;
-
- friend class DebuggerAgentSession;
- friend void DebuggerAgentMessageHandler(const v8::Debug::Message& message);
-
- DISALLOW_COPY_AND_ASSIGN(DebuggerAgent);
-};
-
-
-// Debugger agent session. The session receives requests from the remote
-// debugger and sends debugger events/responses to the remote debugger.
-class DebuggerAgentSession: public Thread {
- public:
- DebuggerAgentSession(Isolate* isolate, DebuggerAgent* agent, Socket* client)
- : Thread(isolate, "v8:DbgAgntSessn"),
- agent_(agent), client_(client) {}
-
- void DebuggerMessage(Vector<uint16_t> message);
- void Shutdown();
-
- private:
- void Run();
-
- void DebuggerMessage(Vector<char> message);
-
- DebuggerAgent* agent_;
- Socket* client_;
-
- DISALLOW_COPY_AND_ASSIGN(DebuggerAgentSession);
-};
-
-
-// Utility methods factored out to be used by the D8 shell as well.
-class DebuggerAgentUtil {
- public:
- static const char* const kContentLength;
- static const int kContentLengthSize;
-
- static SmartPointer<char> ReceiveMessage(const Socket* conn);
- static bool SendConnectMessage(const Socket* conn,
- const char* embedding_host);
- static bool SendMessage(const Socket* conn, const Vector<uint16_t> message);
- static bool SendMessage(const Socket* conn,
- const v8::Handle<v8::String> message);
- static int ReceiveAll(const Socket* conn, char* data, int len);
-};
-
-} } // namespace v8::internal
-
-#endif // ENABLE_DEBUGGER_SUPPORT
-
-#endif // V8_DEBUG_AGENT_H_
diff --git a/src/3rdparty/v8/src/debug-debugger.js b/src/3rdparty/v8/src/debug-debugger.js
deleted file mode 100644
index bc0f966..0000000
--- a/src/3rdparty/v8/src/debug-debugger.js
+++ /dev/null
@@ -1,2569 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Default number of frames to include in the response to backtrace request.
-const kDefaultBacktraceLength = 10;
-
-const Debug = {};
-
-// Regular expression to skip "crud" at the beginning of a source line which is
-// not really code. Currently the regular expression matches whitespace and
-// comments.
-const sourceLineBeginningSkip = /^(?:\s*(?:\/\*.*?\*\/)*)*/;
-
-// Debug events which can occour in the V8 JavaScript engine. These originate
-// from the API include file debug.h.
-Debug.DebugEvent = { Break: 1,
- Exception: 2,
- NewFunction: 3,
- BeforeCompile: 4,
- AfterCompile: 5,
- ScriptCollected: 6 };
-
-// Types of exceptions that can be broken upon.
-Debug.ExceptionBreak = { Caught : 0,
- Uncaught: 1 };
-
-// The different types of steps.
-Debug.StepAction = { StepOut: 0,
- StepNext: 1,
- StepIn: 2,
- StepMin: 3,
- StepInMin: 4 };
-
-// The different types of scripts matching enum ScriptType in objects.h.
-Debug.ScriptType = { Native: 0,
- Extension: 1,
- Normal: 2 };
-
-// The different types of script compilations matching enum
-// Script::CompilationType in objects.h.
-Debug.ScriptCompilationType = { Host: 0,
- Eval: 1,
- JSON: 2 };
-
-// The different script break point types.
-Debug.ScriptBreakPointType = { ScriptId: 0,
- ScriptName: 1 };
-
-function ScriptTypeFlag(type) {
- return (1 << type);
-}
-
-// Globals.
-var next_response_seq = 0;
-var next_break_point_number = 1;
-var break_points = [];
-var script_break_points = [];
-var debugger_flags = {
- breakPointsActive: {
- value: true,
- getValue: function() { return this.value; },
- setValue: function(value) {
- this.value = !!value;
- %SetDisableBreak(!this.value);
- }
- },
- breakOnCaughtException: {
- getValue: function() { return Debug.isBreakOnException(); },
- setValue: function(value) {
- if (value) {
- Debug.setBreakOnException();
- } else {
- Debug.clearBreakOnException();
- }
- }
- },
- breakOnUncaughtException: {
- getValue: function() { return Debug.isBreakOnUncaughtException(); },
- setValue: function(value) {
- if (value) {
- Debug.setBreakOnUncaughtException();
- } else {
- Debug.clearBreakOnUncaughtException();
- }
- }
- },
-};
-var lol_is_enabled = %HasLOLEnabled();
-
-
-// Create a new break point object and add it to the list of break points.
-function MakeBreakPoint(source_position, opt_script_break_point) {
- var break_point = new BreakPoint(source_position, opt_script_break_point);
- break_points.push(break_point);
- return break_point;
-}
-
-
-// Object representing a break point.
-// NOTE: This object does not have a reference to the function having break
-// point as this would cause function not to be garbage collected when it is
-// not used any more. We do not want break points to keep functions alive.
-function BreakPoint(source_position, opt_script_break_point) {
- this.source_position_ = source_position;
- if (opt_script_break_point) {
- this.script_break_point_ = opt_script_break_point;
- } else {
- this.number_ = next_break_point_number++;
- }
- this.hit_count_ = 0;
- this.active_ = true;
- this.condition_ = null;
- this.ignoreCount_ = 0;
-}
-
-
-BreakPoint.prototype.number = function() {
- return this.number_;
-};
-
-
-BreakPoint.prototype.func = function() {
- return this.func_;
-};
-
-
-BreakPoint.prototype.source_position = function() {
- return this.source_position_;
-};
-
-
-BreakPoint.prototype.hit_count = function() {
- return this.hit_count_;
-};
-
-
-BreakPoint.prototype.active = function() {
- if (this.script_break_point()) {
- return this.script_break_point().active();
- }
- return this.active_;
-};
-
-
-BreakPoint.prototype.condition = function() {
- if (this.script_break_point() && this.script_break_point().condition()) {
- return this.script_break_point().condition();
- }
- return this.condition_;
-};
-
-
-BreakPoint.prototype.ignoreCount = function() {
- return this.ignoreCount_;
-};
-
-
-BreakPoint.prototype.script_break_point = function() {
- return this.script_break_point_;
-};
-
-
-BreakPoint.prototype.enable = function() {
- this.active_ = true;
-};
-
-
-BreakPoint.prototype.disable = function() {
- this.active_ = false;
-};
-
-
-BreakPoint.prototype.setCondition = function(condition) {
- this.condition_ = condition;
-};
-
-
-BreakPoint.prototype.setIgnoreCount = function(ignoreCount) {
- this.ignoreCount_ = ignoreCount;
-};
-
-
-BreakPoint.prototype.isTriggered = function(exec_state) {
- // Break point not active - not triggered.
- if (!this.active()) return false;
-
- // Check for conditional break point.
- if (this.condition()) {
- // If break point has condition try to evaluate it in the top frame.
- try {
- var mirror = exec_state.frame(0).evaluate(this.condition());
- // If no sensible mirror or non true value break point not triggered.
- if (!(mirror instanceof ValueMirror) || !%ToBoolean(mirror.value_)) {
- return false;
- }
- } catch (e) {
- // Exception evaluating condition counts as not triggered.
- return false;
- }
- }
-
- // Update the hit count.
- this.hit_count_++;
- if (this.script_break_point_) {
- this.script_break_point_.hit_count_++;
- }
-
- // If the break point has an ignore count it is not triggered.
- if (this.ignoreCount_ > 0) {
- this.ignoreCount_--;
- return false;
- }
-
- // Break point triggered.
- return true;
-};
-
-
-// Function called from the runtime when a break point is hit. Returns true if
-// the break point is triggered and supposed to break execution.
-function IsBreakPointTriggered(break_id, break_point) {
- return break_point.isTriggered(MakeExecutionState(break_id));
-}
-
-
-// Object representing a script break point. The script is referenced by its
-// script name or script id and the break point is represented as line and
-// column.
-function ScriptBreakPoint(type, script_id_or_name, opt_line, opt_column,
- opt_groupId) {
- this.type_ = type;
- if (type == Debug.ScriptBreakPointType.ScriptId) {
- this.script_id_ = script_id_or_name;
- } else { // type == Debug.ScriptBreakPointType.ScriptName
- this.script_name_ = script_id_or_name;
- }
- this.line_ = opt_line || 0;
- this.column_ = opt_column;
- this.groupId_ = opt_groupId;
- this.hit_count_ = 0;
- this.active_ = true;
- this.condition_ = null;
- this.ignoreCount_ = 0;
- this.break_points_ = [];
-}
-
-
-//Creates a clone of script breakpoint that is linked to another script.
-ScriptBreakPoint.prototype.cloneForOtherScript = function (other_script) {
- var copy = new ScriptBreakPoint(Debug.ScriptBreakPointType.ScriptId,
- other_script.id, this.line_, this.column_, this.groupId_);
- copy.number_ = next_break_point_number++;
- script_break_points.push(copy);
-
- copy.hit_count_ = this.hit_count_;
- copy.active_ = this.active_;
- copy.condition_ = this.condition_;
- copy.ignoreCount_ = this.ignoreCount_;
- return copy;
-}
-
-
-ScriptBreakPoint.prototype.number = function() {
- return this.number_;
-};
-
-
-ScriptBreakPoint.prototype.groupId = function() {
- return this.groupId_;
-};
-
-
-ScriptBreakPoint.prototype.type = function() {
- return this.type_;
-};
-
-
-ScriptBreakPoint.prototype.script_id = function() {
- return this.script_id_;
-};
-
-
-ScriptBreakPoint.prototype.script_name = function() {
- return this.script_name_;
-};
-
-
-ScriptBreakPoint.prototype.line = function() {
- return this.line_;
-};
-
-
-ScriptBreakPoint.prototype.column = function() {
- return this.column_;
-};
-
-
-ScriptBreakPoint.prototype.actual_locations = function() {
- var locations = [];
- for (var i = 0; i < this.break_points_.length; i++) {
- locations.push(this.break_points_[i].actual_location);
- }
- return locations;
-}
-
-
-ScriptBreakPoint.prototype.update_positions = function(line, column) {
- this.line_ = line;
- this.column_ = column;
-}
-
-
-ScriptBreakPoint.prototype.hit_count = function() {
- return this.hit_count_;
-};
-
-
-ScriptBreakPoint.prototype.active = function() {
- return this.active_;
-};
-
-
-ScriptBreakPoint.prototype.condition = function() {
- return this.condition_;
-};
-
-
-ScriptBreakPoint.prototype.ignoreCount = function() {
- return this.ignoreCount_;
-};
-
-
-ScriptBreakPoint.prototype.enable = function() {
- this.active_ = true;
-};
-
-
-ScriptBreakPoint.prototype.disable = function() {
- this.active_ = false;
-};
-
-
-ScriptBreakPoint.prototype.setCondition = function(condition) {
- this.condition_ = condition;
-};
-
-
-ScriptBreakPoint.prototype.setIgnoreCount = function(ignoreCount) {
- this.ignoreCount_ = ignoreCount;
-
- // Set ignore count on all break points created from this script break point.
- for (var i = 0; i < this.break_points_.length; i++) {
- this.break_points_[i].setIgnoreCount(ignoreCount);
- }
-};
-
-
-// Check whether a script matches this script break point. Currently this is
-// only based on script name.
-ScriptBreakPoint.prototype.matchesScript = function(script) {
- if (this.type_ == Debug.ScriptBreakPointType.ScriptId) {
- return this.script_id_ == script.id;
- } else { // this.type_ == Debug.ScriptBreakPointType.ScriptName
- return this.script_name_ == script.nameOrSourceURL() &&
- script.line_offset <= this.line_ &&
- this.line_ < script.line_offset + script.lineCount();
- }
-};
-
-
-// Set the script break point in a script.
-ScriptBreakPoint.prototype.set = function (script) {
- var column = this.column();
- var line = this.line();
- // If the column is undefined the break is on the line. To help locate the
- // first piece of breakable code on the line try to find the column on the
- // line which contains some source.
- if (IS_UNDEFINED(column)) {
- var source_line = script.sourceLine(this.line());
-
- // Allocate array for caching the columns where the actual source starts.
- if (!script.sourceColumnStart_) {
- script.sourceColumnStart_ = new Array(script.lineCount());
- }
-
- // Fill cache if needed and get column where the actual source starts.
- if (IS_UNDEFINED(script.sourceColumnStart_[line])) {
- script.sourceColumnStart_[line] =
- source_line.match(sourceLineBeginningSkip)[0].length;
- }
- column = script.sourceColumnStart_[line];
- }
-
- // Convert the line and column into an absolute position within the script.
- var position = Debug.findScriptSourcePosition(script, this.line(), column);
-
- // If the position is not found in the script (the script might be shorter
- // than it used to be) just ignore it.
- if (position === null) return;
-
- // Create a break point object and set the break point.
- break_point = MakeBreakPoint(position, this);
- break_point.setIgnoreCount(this.ignoreCount());
- var actual_position = %SetScriptBreakPoint(script, position, break_point);
- if (IS_UNDEFINED(actual_position)) {
- actual_position = position;
- }
- var actual_location = script.locationFromPosition(actual_position, true);
- break_point.actual_location = { line: actual_location.line,
- column: actual_location.column };
- this.break_points_.push(break_point);
- return break_point;
-};
-
-
-// Clear all the break points created from this script break point
-ScriptBreakPoint.prototype.clear = function () {
- var remaining_break_points = [];
- for (var i = 0; i < break_points.length; i++) {
- if (break_points[i].script_break_point() &&
- break_points[i].script_break_point() === this) {
- %ClearBreakPoint(break_points[i]);
- } else {
- remaining_break_points.push(break_points[i]);
- }
- }
- break_points = remaining_break_points;
- this.break_points_ = [];
-};
-
-
-// Function called from runtime when a new script is compiled to set any script
-// break points set in this script.
-function UpdateScriptBreakPoints(script) {
- for (var i = 0; i < script_break_points.length; i++) {
- if (script_break_points[i].type() == Debug.ScriptBreakPointType.ScriptName &&
- script_break_points[i].matchesScript(script)) {
- script_break_points[i].set(script);
- }
- }
-}
-
-
-function GetScriptBreakPoints(script) {
- var result = [];
- for (var i = 0; i < script_break_points.length; i++) {
- if (script_break_points[i].matchesScript(script)) {
- result.push(script_break_points[i]);
- }
- }
- return result;
-}
-
-
-Debug.setListener = function(listener, opt_data) {
- if (!IS_FUNCTION(listener) && !IS_UNDEFINED(listener) && !IS_NULL(listener)) {
- throw new Error('Parameters have wrong types.');
- }
- %SetDebugEventListener(listener, opt_data);
-};
-
-
-Debug.breakExecution = function(f) {
- %Break();
-};
-
-Debug.breakLocations = function(f) {
- if (!IS_FUNCTION(f)) throw new Error('Parameters have wrong types.');
- return %GetBreakLocations(f);
-};
-
-// Returns a Script object. If the parameter is a function the return value
-// is the script in which the function is defined. If the parameter is a string
-// the return value is the script for which the script name has that string
-// value. If it is a regexp and there is a unique script whose name matches
-// we return that, otherwise undefined.
-Debug.findScript = function(func_or_script_name) {
- if (IS_FUNCTION(func_or_script_name)) {
- return %FunctionGetScript(func_or_script_name);
- } else if (IS_REGEXP(func_or_script_name)) {
- var scripts = Debug.scripts();
- var last_result = null;
- var result_count = 0;
- for (var i in scripts) {
- var script = scripts[i];
- if (func_or_script_name.test(script.name)) {
- last_result = script;
- result_count++;
- }
- }
- // Return the unique script matching the regexp. If there are more
- // than one we don't return a value since there is no good way to
- // decide which one to return. Returning a "random" one, say the
- // first, would introduce nondeterminism (or something close to it)
- // because the order is the heap iteration order.
- if (result_count == 1) {
- return last_result;
- } else {
- return undefined;
- }
- } else {
- return %GetScript(func_or_script_name);
- }
-};
-
-// Returns the script source. If the parameter is a function the return value
-// is the script source for the script in which the function is defined. If the
-// parameter is a string the return value is the script for which the script
-// name has that string value.
-Debug.scriptSource = function(func_or_script_name) {
- return this.findScript(func_or_script_name).source;
-};
-
-Debug.source = function(f) {
- if (!IS_FUNCTION(f)) throw new Error('Parameters have wrong types.');
- return %FunctionGetSourceCode(f);
-};
-
-Debug.disassemble = function(f) {
- if (!IS_FUNCTION(f)) throw new Error('Parameters have wrong types.');
- return %DebugDisassembleFunction(f);
-};
-
-Debug.disassembleConstructor = function(f) {
- if (!IS_FUNCTION(f)) throw new Error('Parameters have wrong types.');
- return %DebugDisassembleConstructor(f);
-};
-
-Debug.ExecuteInDebugContext = function(f, without_debugger) {
- if (!IS_FUNCTION(f)) throw new Error('Parameters have wrong types.');
- return %ExecuteInDebugContext(f, !!without_debugger);
-};
-
-Debug.sourcePosition = function(f) {
- if (!IS_FUNCTION(f)) throw new Error('Parameters have wrong types.');
- return %FunctionGetScriptSourcePosition(f);
-};
-
-
-Debug.findFunctionSourceLocation = function(func, opt_line, opt_column) {
- var script = %FunctionGetScript(func);
- var script_offset = %FunctionGetScriptSourcePosition(func);
- return script.locationFromLine(opt_line, opt_column, script_offset);
-}
-
-
-// Returns the character position in a script based on a line number and an
-// optional position within that line.
-Debug.findScriptSourcePosition = function(script, opt_line, opt_column) {
- var location = script.locationFromLine(opt_line, opt_column);
- return location ? location.position : null;
-}
-
-
-Debug.findBreakPoint = function(break_point_number, remove) {
- var break_point;
- for (var i = 0; i < break_points.length; i++) {
- if (break_points[i].number() == break_point_number) {
- break_point = break_points[i];
- // Remove the break point from the list if requested.
- if (remove) {
- break_points.splice(i, 1);
- }
- break;
- }
- }
- if (break_point) {
- return break_point;
- } else {
- return this.findScriptBreakPoint(break_point_number, remove);
- }
-};
-
-Debug.findBreakPointActualLocations = function(break_point_number) {
- for (var i = 0; i < script_break_points.length; i++) {
- if (script_break_points[i].number() == break_point_number) {
- return script_break_points[i].actual_locations();
- }
- }
- for (var i = 0; i < break_points.length; i++) {
- if (break_points[i].number() == break_point_number) {
- return [break_points[i].actual_location];
- }
- }
- return [];
-}
-
-Debug.setBreakPoint = function(func, opt_line, opt_column, opt_condition) {
- if (!IS_FUNCTION(func)) throw new Error('Parameters have wrong types.');
- // Break points in API functions are not supported.
- if (%FunctionIsAPIFunction(func)) {
- throw new Error('Cannot set break point in native code.');
- }
- // Find source position relative to start of the function
- var break_position =
- this.findFunctionSourceLocation(func, opt_line, opt_column).position;
- var source_position = break_position - this.sourcePosition(func);
- // Find the script for the function.
- var script = %FunctionGetScript(func);
- // Break in builtin JavaScript code is not supported.
- if (script.type == Debug.ScriptType.Native) {
- throw new Error('Cannot set break point in native code.');
- }
- // If the script for the function has a name convert this to a script break
- // point.
- if (script && script.id) {
- // Adjust the source position to be script relative.
- source_position += %FunctionGetScriptSourcePosition(func);
- // Find line and column for the position in the script and set a script
- // break point from that.
- var location = script.locationFromPosition(source_position, false);
- return this.setScriptBreakPointById(script.id,
- location.line, location.column,
- opt_condition);
- } else {
- // Set a break point directly on the function.
- var break_point = MakeBreakPoint(source_position);
- var actual_position =
- %SetFunctionBreakPoint(func, source_position, break_point);
- actual_position += this.sourcePosition(func);
- var actual_location = script.locationFromPosition(actual_position, true);
- break_point.actual_location = { line: actual_location.line,
- column: actual_location.column };
- break_point.setCondition(opt_condition);
- return break_point.number();
- }
-};
-
-
-Debug.setBreakPointByScriptIdAndPosition = function(script_id, position,
- condition, enabled)
-{
- break_point = MakeBreakPoint(position);
- break_point.setCondition(condition);
- if (!enabled)
- break_point.disable();
- var scripts = this.scripts();
- for (var i = 0; i < scripts.length; i++) {
- if (script_id == scripts[i].id) {
- break_point.actual_position = %SetScriptBreakPoint(scripts[i], position,
- break_point);
- break;
- }
- }
- return break_point;
-};
-
-
-Debug.enableBreakPoint = function(break_point_number) {
- var break_point = this.findBreakPoint(break_point_number, false);
- // Only enable if the breakpoint hasn't been deleted:
- if (break_point) {
- break_point.enable();
- }
-};
-
-
-Debug.disableBreakPoint = function(break_point_number) {
- var break_point = this.findBreakPoint(break_point_number, false);
- // Only enable if the breakpoint hasn't been deleted:
- if (break_point) {
- break_point.disable();
- }
-};
-
-
-Debug.changeBreakPointCondition = function(break_point_number, condition) {
- var break_point = this.findBreakPoint(break_point_number, false);
- break_point.setCondition(condition);
-};
-
-
-Debug.changeBreakPointIgnoreCount = function(break_point_number, ignoreCount) {
- if (ignoreCount < 0) {
- throw new Error('Invalid argument');
- }
- var break_point = this.findBreakPoint(break_point_number, false);
- break_point.setIgnoreCount(ignoreCount);
-};
-
-
-Debug.clearBreakPoint = function(break_point_number) {
- var break_point = this.findBreakPoint(break_point_number, true);
- if (break_point) {
- return %ClearBreakPoint(break_point);
- } else {
- break_point = this.findScriptBreakPoint(break_point_number, true);
- if (!break_point) {
- throw new Error('Invalid breakpoint');
- }
- }
-};
-
-
-Debug.clearAllBreakPoints = function() {
- for (var i = 0; i < break_points.length; i++) {
- break_point = break_points[i];
- %ClearBreakPoint(break_point);
- }
- break_points = [];
-};
-
-
-Debug.disableAllBreakPoints = function() {
- // Disable all user defined breakpoints:
- for (var i = 1; i < next_break_point_number; i++) {
- Debug.disableBreakPoint(i);
- }
- // Disable all exception breakpoints:
- %ChangeBreakOnException(Debug.ExceptionBreak.Caught, false);
- %ChangeBreakOnException(Debug.ExceptionBreak.Uncaught, false);
-};
-
-
-Debug.findScriptBreakPoint = function(break_point_number, remove) {
- var script_break_point;
- for (var i = 0; i < script_break_points.length; i++) {
- if (script_break_points[i].number() == break_point_number) {
- script_break_point = script_break_points[i];
- // Remove the break point from the list if requested.
- if (remove) {
- script_break_point.clear();
- script_break_points.splice(i,1);
- }
- break;
- }
- }
- return script_break_point;
-}
-
-
-// Sets a breakpoint in a script identified through id or name at the
-// specified source line and column within that line.
-Debug.setScriptBreakPoint = function(type, script_id_or_name,
- opt_line, opt_column, opt_condition,
- opt_groupId) {
- // Create script break point object.
- var script_break_point =
- new ScriptBreakPoint(type, script_id_or_name, opt_line, opt_column,
- opt_groupId);
-
- // Assign number to the new script break point and add it.
- script_break_point.number_ = next_break_point_number++;
- script_break_point.setCondition(opt_condition);
- script_break_points.push(script_break_point);
-
- // Run through all scripts to see if this script break point matches any
- // loaded scripts.
- var scripts = this.scripts();
- for (var i = 0; i < scripts.length; i++) {
- if (script_break_point.matchesScript(scripts[i])) {
- script_break_point.set(scripts[i]);
- }
- }
-
- return script_break_point.number();
-}
-
-
-Debug.setScriptBreakPointById = function(script_id,
- opt_line, opt_column,
- opt_condition, opt_groupId) {
- return this.setScriptBreakPoint(Debug.ScriptBreakPointType.ScriptId,
- script_id, opt_line, opt_column,
- opt_condition, opt_groupId);
-}
-
-
-Debug.setScriptBreakPointByName = function(script_name,
- opt_line, opt_column,
- opt_condition, opt_groupId) {
- return this.setScriptBreakPoint(Debug.ScriptBreakPointType.ScriptName,
- script_name, opt_line, opt_column,
- opt_condition, opt_groupId);
-}
-
-
-Debug.enableScriptBreakPoint = function(break_point_number) {
- var script_break_point = this.findScriptBreakPoint(break_point_number, false);
- script_break_point.enable();
-};
-
-
-Debug.disableScriptBreakPoint = function(break_point_number) {
- var script_break_point = this.findScriptBreakPoint(break_point_number, false);
- script_break_point.disable();
-};
-
-
-Debug.changeScriptBreakPointCondition = function(break_point_number, condition) {
- var script_break_point = this.findScriptBreakPoint(break_point_number, false);
- script_break_point.setCondition(condition);
-};
-
-
-Debug.changeScriptBreakPointIgnoreCount = function(break_point_number, ignoreCount) {
- if (ignoreCount < 0) {
- throw new Error('Invalid argument');
- }
- var script_break_point = this.findScriptBreakPoint(break_point_number, false);
- script_break_point.setIgnoreCount(ignoreCount);
-};
-
-
-Debug.scriptBreakPoints = function() {
- return script_break_points;
-}
-
-
-Debug.clearStepping = function() {
- %ClearStepping();
-}
-
-Debug.setBreakOnException = function() {
- return %ChangeBreakOnException(Debug.ExceptionBreak.Caught, true);
-};
-
-Debug.clearBreakOnException = function() {
- return %ChangeBreakOnException(Debug.ExceptionBreak.Caught, false);
-};
-
-Debug.isBreakOnException = function() {
- return !!%IsBreakOnException(Debug.ExceptionBreak.Caught);
-};
-
-Debug.setBreakOnUncaughtException = function() {
- return %ChangeBreakOnException(Debug.ExceptionBreak.Uncaught, true);
-};
-
-Debug.clearBreakOnUncaughtException = function() {
- return %ChangeBreakOnException(Debug.ExceptionBreak.Uncaught, false);
-};
-
-Debug.isBreakOnUncaughtException = function() {
- return !!%IsBreakOnException(Debug.ExceptionBreak.Uncaught);
-};
-
-Debug.showBreakPoints = function(f, full) {
- if (!IS_FUNCTION(f)) throw new Error('Parameters have wrong types.');
- var source = full ? this.scriptSource(f) : this.source(f);
- var offset = full ? this.sourcePosition(f) : 0;
- var locations = this.breakLocations(f);
- if (!locations) return source;
- locations.sort(function(x, y) { return x - y; });
- var result = "";
- var prev_pos = 0;
- var pos;
- for (var i = 0; i < locations.length; i++) {
- pos = locations[i] - offset;
- result += source.slice(prev_pos, pos);
- result += "[B" + i + "]";
- prev_pos = pos;
- }
- pos = source.length;
- result += source.substring(prev_pos, pos);
- return result;
-};
-
-
-// Get all the scripts currently loaded. Locating all the scripts is based on
-// scanning the heap.
-Debug.scripts = function() {
- // Collect all scripts in the heap.
- return %DebugGetLoadedScripts();
-};
-
-
-Debug.debuggerFlags = function() {
- return debugger_flags;
-};
-
-Debug.MakeMirror = MakeMirror;
-
-function MakeExecutionState(break_id) {
- return new ExecutionState(break_id);
-}
-
-function ExecutionState(break_id) {
- this.break_id = break_id;
- this.selected_frame = 0;
-}
-
-ExecutionState.prototype.prepareStep = function(opt_action, opt_count) {
- var action = Debug.StepAction.StepIn;
- if (!IS_UNDEFINED(opt_action)) action = %ToNumber(opt_action);
- var count = opt_count ? %ToNumber(opt_count) : 1;
-
- return %PrepareStep(this.break_id, action, count);
-}
-
-ExecutionState.prototype.evaluateGlobal = function(source, disable_break,
- opt_additional_context) {
- return MakeMirror(%DebugEvaluateGlobal(this.break_id, source,
- Boolean(disable_break),
- opt_additional_context));
-};
-
-ExecutionState.prototype.frameCount = function() {
- return %GetFrameCount(this.break_id);
-};
-
-ExecutionState.prototype.threadCount = function() {
- return %GetThreadCount(this.break_id);
-};
-
-ExecutionState.prototype.frame = function(opt_index) {
- // If no index supplied return the selected frame.
- if (opt_index == null) opt_index = this.selected_frame;
- if (opt_index < 0 || opt_index >= this.frameCount())
- throw new Error('Illegal frame index.');
- return new FrameMirror(this.break_id, opt_index);
-};
-
-ExecutionState.prototype.setSelectedFrame = function(index) {
- var i = %ToNumber(index);
- if (i < 0 || i >= this.frameCount()) throw new Error('Illegal frame index.');
- this.selected_frame = i;
-};
-
-ExecutionState.prototype.selectedFrame = function() {
- return this.selected_frame;
-};
-
-ExecutionState.prototype.debugCommandProcessor = function(opt_is_running) {
- return new DebugCommandProcessor(this, opt_is_running);
-};
-
-
-function MakeBreakEvent(exec_state, break_points_hit) {
- return new BreakEvent(exec_state, break_points_hit);
-}
-
-
-function BreakEvent(exec_state, break_points_hit) {
- this.exec_state_ = exec_state;
- this.break_points_hit_ = break_points_hit;
-}
-
-
-BreakEvent.prototype.executionState = function() {
- return this.exec_state_;
-};
-
-
-BreakEvent.prototype.eventType = function() {
- return Debug.DebugEvent.Break;
-};
-
-
-BreakEvent.prototype.func = function() {
- return this.exec_state_.frame(0).func();
-};
-
-
-BreakEvent.prototype.sourceLine = function() {
- return this.exec_state_.frame(0).sourceLine();
-};
-
-
-BreakEvent.prototype.sourceColumn = function() {
- return this.exec_state_.frame(0).sourceColumn();
-};
-
-
-BreakEvent.prototype.sourceLineText = function() {
- return this.exec_state_.frame(0).sourceLineText();
-};
-
-
-BreakEvent.prototype.breakPointsHit = function() {
- return this.break_points_hit_;
-};
-
-
-BreakEvent.prototype.toJSONProtocol = function() {
- var o = { seq: next_response_seq++,
- type: "event",
- event: "break",
- body: { invocationText: this.exec_state_.frame(0).invocationText(),
- }
- };
-
- // Add script related information to the event if available.
- var script = this.func().script();
- if (script) {
- o.body.sourceLine = this.sourceLine(),
- o.body.sourceColumn = this.sourceColumn(),
- o.body.sourceLineText = this.sourceLineText(),
- o.body.script = MakeScriptObject_(script, false);
- }
-
- // Add an Array of break points hit if any.
- if (this.breakPointsHit()) {
- o.body.breakpoints = [];
- for (var i = 0; i < this.breakPointsHit().length; i++) {
- // Find the break point number. For break points originating from a
- // script break point supply the script break point number.
- var breakpoint = this.breakPointsHit()[i];
- var script_break_point = breakpoint.script_break_point();
- var number;
- if (script_break_point) {
- number = script_break_point.number();
- } else {
- number = breakpoint.number();
- }
- o.body.breakpoints.push(number);
- }
- }
- return JSON.stringify(ObjectToProtocolObject_(o));
-};
-
-
-function MakeExceptionEvent(exec_state, exception, uncaught) {
- return new ExceptionEvent(exec_state, exception, uncaught);
-}
-
-
-function ExceptionEvent(exec_state, exception, uncaught) {
- this.exec_state_ = exec_state;
- this.exception_ = exception;
- this.uncaught_ = uncaught;
-}
-
-
-ExceptionEvent.prototype.executionState = function() {
- return this.exec_state_;
-};
-
-
-ExceptionEvent.prototype.eventType = function() {
- return Debug.DebugEvent.Exception;
-};
-
-
-ExceptionEvent.prototype.exception = function() {
- return this.exception_;
-}
-
-
-ExceptionEvent.prototype.uncaught = function() {
- return this.uncaught_;
-}
-
-
-ExceptionEvent.prototype.func = function() {
- return this.exec_state_.frame(0).func();
-};
-
-
-ExceptionEvent.prototype.sourceLine = function() {
- return this.exec_state_.frame(0).sourceLine();
-};
-
-
-ExceptionEvent.prototype.sourceColumn = function() {
- return this.exec_state_.frame(0).sourceColumn();
-};
-
-
-ExceptionEvent.prototype.sourceLineText = function() {
- return this.exec_state_.frame(0).sourceLineText();
-};
-
-
-ExceptionEvent.prototype.toJSONProtocol = function() {
- var o = new ProtocolMessage();
- o.event = "exception";
- o.body = { uncaught: this.uncaught_,
- exception: MakeMirror(this.exception_)
- };
-
- // Exceptions might happen whithout any JavaScript frames.
- if (this.exec_state_.frameCount() > 0) {
- o.body.sourceLine = this.sourceLine();
- o.body.sourceColumn = this.sourceColumn();
- o.body.sourceLineText = this.sourceLineText();
-
- // Add script information to the event if available.
- var script = this.func().script();
- if (script) {
- o.body.script = MakeScriptObject_(script, false);
- }
- } else {
- o.body.sourceLine = -1;
- }
-
- return o.toJSONProtocol();
-};
-
-
-function MakeCompileEvent(exec_state, script, before) {
- return new CompileEvent(exec_state, script, before);
-}
-
-
-function CompileEvent(exec_state, script, before) {
- this.exec_state_ = exec_state;
- this.script_ = MakeMirror(script);
- this.before_ = before;
-}
-
-
-CompileEvent.prototype.executionState = function() {
- return this.exec_state_;
-};
-
-
-CompileEvent.prototype.eventType = function() {
- if (this.before_) {
- return Debug.DebugEvent.BeforeCompile;
- } else {
- return Debug.DebugEvent.AfterCompile;
- }
-};
-
-
-CompileEvent.prototype.script = function() {
- return this.script_;
-};
-
-
-CompileEvent.prototype.toJSONProtocol = function() {
- var o = new ProtocolMessage();
- o.running = true;
- if (this.before_) {
- o.event = "beforeCompile";
- } else {
- o.event = "afterCompile";
- }
- o.body = {};
- o.body.script = this.script_;
-
- return o.toJSONProtocol();
-}
-
-
-function MakeNewFunctionEvent(func) {
- return new NewFunctionEvent(func);
-}
-
-
-function NewFunctionEvent(func) {
- this.func = func;
-}
-
-
-NewFunctionEvent.prototype.eventType = function() {
- return Debug.DebugEvent.NewFunction;
-};
-
-
-NewFunctionEvent.prototype.name = function() {
- return this.func.name;
-};
-
-
-NewFunctionEvent.prototype.setBreakPoint = function(p) {
- Debug.setBreakPoint(this.func, p || 0);
-};
-
-
-function MakeScriptCollectedEvent(exec_state, id) {
- return new ScriptCollectedEvent(exec_state, id);
-}
-
-
-function ScriptCollectedEvent(exec_state, id) {
- this.exec_state_ = exec_state;
- this.id_ = id;
-}
-
-
-ScriptCollectedEvent.prototype.id = function() {
- return this.id_;
-};
-
-
-ScriptCollectedEvent.prototype.executionState = function() {
- return this.exec_state_;
-};
-
-
-ScriptCollectedEvent.prototype.toJSONProtocol = function() {
- var o = new ProtocolMessage();
- o.running = true;
- o.event = "scriptCollected";
- o.body = {};
- o.body.script = { id: this.id() };
- return o.toJSONProtocol();
-}
-
-
-function MakeScriptObject_(script, include_source) {
- var o = { id: script.id(),
- name: script.name(),
- lineOffset: script.lineOffset(),
- columnOffset: script.columnOffset(),
- lineCount: script.lineCount(),
- };
- if (!IS_UNDEFINED(script.data())) {
- o.data = script.data();
- }
- if (include_source) {
- o.source = script.source();
- }
- return o;
-};
-
-
-function DebugCommandProcessor(exec_state, opt_is_running) {
- this.exec_state_ = exec_state;
- this.running_ = opt_is_running || false;
-};
-
-
-DebugCommandProcessor.prototype.processDebugRequest = function (request) {
- return this.processDebugJSONRequest(request);
-}
-
-
-function ProtocolMessage(request) {
- // Update sequence number.
- this.seq = next_response_seq++;
-
- if (request) {
- // If message is based on a request this is a response. Fill the initial
- // response from the request.
- this.type = 'response';
- this.request_seq = request.seq;
- this.command = request.command;
- } else {
- // If message is not based on a request it is a dabugger generated event.
- this.type = 'event';
- }
- this.success = true;
- // Handler may set this field to control debugger state.
- this.running = undefined;
-}
-
-
-ProtocolMessage.prototype.setOption = function(name, value) {
- if (!this.options_) {
- this.options_ = {};
- }
- this.options_[name] = value;
-}
-
-
-ProtocolMessage.prototype.failed = function(message) {
- this.success = false;
- this.message = message;
-}
-
-
-ProtocolMessage.prototype.toJSONProtocol = function() {
- // Encode the protocol header.
- var json = {};
- json.seq= this.seq;
- if (this.request_seq) {
- json.request_seq = this.request_seq;
- }
- json.type = this.type;
- if (this.event) {
- json.event = this.event;
- }
- if (this.command) {
- json.command = this.command;
- }
- if (this.success) {
- json.success = this.success;
- } else {
- json.success = false;
- }
- if (this.body) {
- // Encode the body part.
- var bodyJson;
- var serializer = MakeMirrorSerializer(true, this.options_);
- if (this.body instanceof Mirror) {
- bodyJson = serializer.serializeValue(this.body);
- } else if (this.body instanceof Array) {
- bodyJson = [];
- for (var i = 0; i < this.body.length; i++) {
- if (this.body[i] instanceof Mirror) {
- bodyJson.push(serializer.serializeValue(this.body[i]));
- } else {
- bodyJson.push(ObjectToProtocolObject_(this.body[i], serializer));
- }
- }
- } else {
- bodyJson = ObjectToProtocolObject_(this.body, serializer);
- }
- json.body = bodyJson;
- json.refs = serializer.serializeReferencedObjects();
- }
- if (this.message) {
- json.message = this.message;
- }
- json.running = this.running;
- return JSON.stringify(json);
-}
-
-
-DebugCommandProcessor.prototype.createResponse = function(request) {
- return new ProtocolMessage(request);
-};
-
-
-DebugCommandProcessor.prototype.processDebugJSONRequest = function(json_request) {
- var request; // Current request.
- var response; // Generated response.
- try {
- try {
- // Convert the JSON string to an object.
- request = %CompileString('(' + json_request + ')')();
-
- // Create an initial response.
- response = this.createResponse(request);
-
- if (!request.type) {
- throw new Error('Type not specified');
- }
-
- if (request.type != 'request') {
- throw new Error("Illegal type '" + request.type + "' in request");
- }
-
- if (!request.command) {
- throw new Error('Command not specified');
- }
-
- if (request.arguments) {
- var args = request.arguments;
- // TODO(yurys): remove request.arguments.compactFormat check once
- // ChromeDevTools are switched to 'inlineRefs'
- if (args.inlineRefs || args.compactFormat) {
- response.setOption('inlineRefs', true);
- }
- if (!IS_UNDEFINED(args.maxStringLength)) {
- response.setOption('maxStringLength', args.maxStringLength);
- }
- }
-
- if (request.command == 'continue') {
- this.continueRequest_(request, response);
- } else if (request.command == 'break') {
- this.breakRequest_(request, response);
- } else if (request.command == 'setbreakpoint') {
- this.setBreakPointRequest_(request, response);
- } else if (request.command == 'changebreakpoint') {
- this.changeBreakPointRequest_(request, response);
- } else if (request.command == 'clearbreakpoint') {
- this.clearBreakPointRequest_(request, response);
- } else if (request.command == 'clearbreakpointgroup') {
- this.clearBreakPointGroupRequest_(request, response);
- } else if (request.command == 'disconnect') {
- this.disconnectRequest_(request, response);
- } else if (request.command == 'setexceptionbreak') {
- this.setExceptionBreakRequest_(request, response);
- } else if (request.command == 'listbreakpoints') {
- this.listBreakpointsRequest_(request, response);
- } else if (request.command == 'backtrace') {
- this.backtraceRequest_(request, response);
- } else if (request.command == 'frame') {
- this.frameRequest_(request, response);
- } else if (request.command == 'scopes') {
- this.scopesRequest_(request, response);
- } else if (request.command == 'scope') {
- this.scopeRequest_(request, response);
- } else if (request.command == 'evaluate') {
- this.evaluateRequest_(request, response);
- } else if (lol_is_enabled && request.command == 'getobj') {
- this.getobjRequest_(request, response);
- } else if (request.command == 'lookup') {
- this.lookupRequest_(request, response);
- } else if (request.command == 'references') {
- this.referencesRequest_(request, response);
- } else if (request.command == 'source') {
- this.sourceRequest_(request, response);
- } else if (request.command == 'scripts') {
- this.scriptsRequest_(request, response);
- } else if (request.command == 'threads') {
- this.threadsRequest_(request, response);
- } else if (request.command == 'suspend') {
- this.suspendRequest_(request, response);
- } else if (request.command == 'version') {
- this.versionRequest_(request, response);
- } else if (request.command == 'profile') {
- this.profileRequest_(request, response);
- } else if (request.command == 'changelive') {
- this.changeLiveRequest_(request, response);
- } else if (request.command == 'flags') {
- this.debuggerFlagsRequest_(request, response);
- } else if (request.command == 'v8flags') {
- this.v8FlagsRequest_(request, response);
-
- // GC tools:
- } else if (request.command == 'gc') {
- this.gcRequest_(request, response);
-
- // LiveObjectList tools:
- } else if (lol_is_enabled && request.command == 'lol-capture') {
- this.lolCaptureRequest_(request, response);
- } else if (lol_is_enabled && request.command == 'lol-delete') {
- this.lolDeleteRequest_(request, response);
- } else if (lol_is_enabled && request.command == 'lol-diff') {
- this.lolDiffRequest_(request, response);
- } else if (lol_is_enabled && request.command == 'lol-getid') {
- this.lolGetIdRequest_(request, response);
- } else if (lol_is_enabled && request.command == 'lol-info') {
- this.lolInfoRequest_(request, response);
- } else if (lol_is_enabled && request.command == 'lol-reset') {
- this.lolResetRequest_(request, response);
- } else if (lol_is_enabled && request.command == 'lol-retainers') {
- this.lolRetainersRequest_(request, response);
- } else if (lol_is_enabled && request.command == 'lol-path') {
- this.lolPathRequest_(request, response);
- } else if (lol_is_enabled && request.command == 'lol-print') {
- this.lolPrintRequest_(request, response);
- } else if (lol_is_enabled && request.command == 'lol-stats') {
- this.lolStatsRequest_(request, response);
-
- } else {
- throw new Error('Unknown command "' + request.command + '" in request');
- }
- } catch (e) {
- // If there is no response object created one (without command).
- if (!response) {
- response = this.createResponse();
- }
- response.success = false;
- response.message = %ToString(e);
- }
-
- // Return the response as a JSON encoded string.
- try {
- if (!IS_UNDEFINED(response.running)) {
- // Response controls running state.
- this.running_ = response.running;
- }
- response.running = this.running_;
- return response.toJSONProtocol();
- } catch (e) {
- // Failed to generate response - return generic error.
- return '{"seq":' + response.seq + ',' +
- '"request_seq":' + request.seq + ',' +
- '"type":"response",' +
- '"success":false,' +
- '"message":"Internal error: ' + %ToString(e) + '"}';
- }
- } catch (e) {
- // Failed in one of the catch blocks above - most generic error.
- return '{"seq":0,"type":"response","success":false,"message":"Internal error"}';
- }
-};
-
-
-DebugCommandProcessor.prototype.continueRequest_ = function(request, response) {
- // Check for arguments for continue.
- if (request.arguments) {
- var count = 1;
- var action = Debug.StepAction.StepIn;
-
- // Pull out arguments.
- var stepaction = request.arguments.stepaction;
- var stepcount = request.arguments.stepcount;
-
- // Get the stepcount argument if any.
- if (stepcount) {
- count = %ToNumber(stepcount);
- if (count < 0) {
- throw new Error('Invalid stepcount argument "' + stepcount + '".');
- }
- }
-
- // Get the stepaction argument.
- if (stepaction) {
- if (stepaction == 'in') {
- action = Debug.StepAction.StepIn;
- } else if (stepaction == 'min') {
- action = Debug.StepAction.StepMin;
- } else if (stepaction == 'next') {
- action = Debug.StepAction.StepNext;
- } else if (stepaction == 'out') {
- action = Debug.StepAction.StepOut;
- } else {
- throw new Error('Invalid stepaction argument "' + stepaction + '".');
- }
- }
-
- // Setup the VM for stepping.
- this.exec_state_.prepareStep(action, count);
- }
-
- // VM should be running after executing this request.
- response.running = true;
-};
-
-
-DebugCommandProcessor.prototype.breakRequest_ = function(request, response) {
- // Ignore as break command does not do anything when broken.
-};
-
-
-DebugCommandProcessor.prototype.setBreakPointRequest_ =
- function(request, response) {
- // Check for legal request.
- if (!request.arguments) {
- response.failed('Missing arguments');
- return;
- }
-
- // Pull out arguments.
- var type = request.arguments.type;
- var target = request.arguments.target;
- var line = request.arguments.line;
- var column = request.arguments.column;
- var enabled = IS_UNDEFINED(request.arguments.enabled) ?
- true : request.arguments.enabled;
- var condition = request.arguments.condition;
- var ignoreCount = request.arguments.ignoreCount;
- var groupId = request.arguments.groupId;
-
- // Check for legal arguments.
- if (!type || IS_UNDEFINED(target)) {
- response.failed('Missing argument "type" or "target"');
- return;
- }
- if (type != 'function' && type != 'handle' &&
- type != 'script' && type != 'scriptId') {
- response.failed('Illegal type "' + type + '"');
- return;
- }
-
- // Either function or script break point.
- var break_point_number;
- if (type == 'function') {
- // Handle function break point.
- if (!IS_STRING(target)) {
- response.failed('Argument "target" is not a string value');
- return;
- }
- var f;
- try {
- // Find the function through a global evaluate.
- f = this.exec_state_.evaluateGlobal(target).value();
- } catch (e) {
- response.failed('Error: "' + %ToString(e) +
- '" evaluating "' + target + '"');
- return;
- }
- if (!IS_FUNCTION(f)) {
- response.failed('"' + target + '" does not evaluate to a function');
- return;
- }
-
- // Set function break point.
- break_point_number = Debug.setBreakPoint(f, line, column, condition);
- } else if (type == 'handle') {
- // Find the object pointed by the specified handle.
- var handle = parseInt(target, 10);
- var mirror = LookupMirror(handle);
- if (!mirror) {
- return response.failed('Object #' + handle + '# not found');
- }
- if (!mirror.isFunction()) {
- return response.failed('Object #' + handle + '# is not a function');
- }
-
- // Set function break point.
- break_point_number = Debug.setBreakPoint(mirror.value(),
- line, column, condition);
- } else if (type == 'script') {
- // set script break point.
- break_point_number =
- Debug.setScriptBreakPointByName(target, line, column, condition,
- groupId);
- } else { // type == 'scriptId.
- break_point_number =
- Debug.setScriptBreakPointById(target, line, column, condition, groupId);
- }
-
- // Set additional break point properties.
- var break_point = Debug.findBreakPoint(break_point_number);
- if (ignoreCount) {
- Debug.changeBreakPointIgnoreCount(break_point_number, ignoreCount);
- }
- if (!enabled) {
- Debug.disableBreakPoint(break_point_number);
- }
-
- // Add the break point number to the response.
- response.body = { type: type,
- breakpoint: break_point_number }
-
- // Add break point information to the response.
- if (break_point instanceof ScriptBreakPoint) {
- if (break_point.type() == Debug.ScriptBreakPointType.ScriptId) {
- response.body.type = 'scriptId';
- response.body.script_id = break_point.script_id();
- } else {
- response.body.type = 'scriptName';
- response.body.script_name = break_point.script_name();
- }
- response.body.line = break_point.line();
- response.body.column = break_point.column();
- response.body.actual_locations = break_point.actual_locations();
- } else {
- response.body.type = 'function';
- response.body.actual_locations = [break_point.actual_location];
- }
-};
-
-
-DebugCommandProcessor.prototype.changeBreakPointRequest_ = function(request, response) {
- // Check for legal request.
- if (!request.arguments) {
- response.failed('Missing arguments');
- return;
- }
-
- // Pull out arguments.
- var break_point = %ToNumber(request.arguments.breakpoint);
- var enabled = request.arguments.enabled;
- var condition = request.arguments.condition;
- var ignoreCount = request.arguments.ignoreCount;
-
- // Check for legal arguments.
- if (!break_point) {
- response.failed('Missing argument "breakpoint"');
- return;
- }
-
- // Change enabled state if supplied.
- if (!IS_UNDEFINED(enabled)) {
- if (enabled) {
- Debug.enableBreakPoint(break_point);
- } else {
- Debug.disableBreakPoint(break_point);
- }
- }
-
- // Change condition if supplied
- if (!IS_UNDEFINED(condition)) {
- Debug.changeBreakPointCondition(break_point, condition);
- }
-
- // Change ignore count if supplied
- if (!IS_UNDEFINED(ignoreCount)) {
- Debug.changeBreakPointIgnoreCount(break_point, ignoreCount);
- }
-}
-
-
-DebugCommandProcessor.prototype.clearBreakPointGroupRequest_ = function(request, response) {
- // Check for legal request.
- if (!request.arguments) {
- response.failed('Missing arguments');
- return;
- }
-
- // Pull out arguments.
- var group_id = request.arguments.groupId;
-
- // Check for legal arguments.
- if (!group_id) {
- response.failed('Missing argument "groupId"');
- return;
- }
-
- var cleared_break_points = [];
- var new_script_break_points = [];
- for (var i = 0; i < script_break_points.length; i++) {
- var next_break_point = script_break_points[i];
- if (next_break_point.groupId() == group_id) {
- cleared_break_points.push(next_break_point.number());
- next_break_point.clear();
- } else {
- new_script_break_points.push(next_break_point);
- }
- }
- script_break_points = new_script_break_points;
-
- // Add the cleared break point numbers to the response.
- response.body = { breakpoints: cleared_break_points };
-}
-
-
-DebugCommandProcessor.prototype.clearBreakPointRequest_ = function(request, response) {
- // Check for legal request.
- if (!request.arguments) {
- response.failed('Missing arguments');
- return;
- }
-
- // Pull out arguments.
- var break_point = %ToNumber(request.arguments.breakpoint);
-
- // Check for legal arguments.
- if (!break_point) {
- response.failed('Missing argument "breakpoint"');
- return;
- }
-
- // Clear break point.
- Debug.clearBreakPoint(break_point);
-
- // Add the cleared break point number to the response.
- response.body = { breakpoint: break_point }
-}
-
-
-DebugCommandProcessor.prototype.listBreakpointsRequest_ = function(request, response) {
- var array = [];
- for (var i = 0; i < script_break_points.length; i++) {
- var break_point = script_break_points[i];
-
- var description = {
- number: break_point.number(),
- line: break_point.line(),
- column: break_point.column(),
- groupId: break_point.groupId(),
- hit_count: break_point.hit_count(),
- active: break_point.active(),
- condition: break_point.condition(),
- ignoreCount: break_point.ignoreCount(),
- actual_locations: break_point.actual_locations()
- }
-
- if (break_point.type() == Debug.ScriptBreakPointType.ScriptId) {
- description.type = 'scriptId';
- description.script_id = break_point.script_id();
- } else {
- description.type = 'scriptName';
- description.script_name = break_point.script_name();
- }
- array.push(description);
- }
-
- response.body = {
- breakpoints: array,
- breakOnExceptions: Debug.isBreakOnException(),
- breakOnUncaughtExceptions: Debug.isBreakOnUncaughtException()
- }
-}
-
-
-DebugCommandProcessor.prototype.disconnectRequest_ =
- function(request, response) {
- Debug.disableAllBreakPoints();
- this.continueRequest_(request, response);
-}
-
-
-DebugCommandProcessor.prototype.setExceptionBreakRequest_ =
- function(request, response) {
- // Check for legal request.
- if (!request.arguments) {
- response.failed('Missing arguments');
- return;
- }
-
- // Pull out and check the 'type' argument:
- var type = request.arguments.type;
- if (!type) {
- response.failed('Missing argument "type"');
- return;
- }
-
- // Initialize the default value of enable:
- var enabled;
- if (type == 'all') {
- enabled = !Debug.isBreakOnException();
- } else if (type == 'uncaught') {
- enabled = !Debug.isBreakOnUncaughtException();
- }
-
- // Pull out and check the 'enabled' argument if present:
- if (!IS_UNDEFINED(request.arguments.enabled)) {
- enabled = request.arguments.enabled;
- if ((enabled != true) && (enabled != false)) {
- response.failed('Illegal value for "enabled":"' + enabled + '"');
- }
- }
-
- // Now set the exception break state:
- if (type == 'all') {
- %ChangeBreakOnException(Debug.ExceptionBreak.Caught, enabled);
- } else if (type == 'uncaught') {
- %ChangeBreakOnException(Debug.ExceptionBreak.Uncaught, enabled);
- } else {
- response.failed('Unknown "type":"' + type + '"');
- }
-
- // Add the cleared break point number to the response.
- response.body = { 'type': type, 'enabled': enabled };
-}
-
-
-DebugCommandProcessor.prototype.backtraceRequest_ = function(request, response) {
- // Get the number of frames.
- var total_frames = this.exec_state_.frameCount();
-
- // Create simple response if there are no frames.
- if (total_frames == 0) {
- response.body = {
- totalFrames: total_frames
- }
- return;
- }
-
- // Default frame range to include in backtrace.
- var from_index = 0
- var to_index = kDefaultBacktraceLength;
-
- // Get the range from the arguments.
- if (request.arguments) {
- if (request.arguments.fromFrame) {
- from_index = request.arguments.fromFrame;
- }
- if (request.arguments.toFrame) {
- to_index = request.arguments.toFrame;
- }
- if (request.arguments.bottom) {
- var tmp_index = total_frames - from_index;
- from_index = total_frames - to_index
- to_index = tmp_index;
- }
- if (from_index < 0 || to_index < 0) {
- return response.failed('Invalid frame number');
- }
- }
-
- // Adjust the index.
- to_index = Math.min(total_frames, to_index);
-
- if (to_index <= from_index) {
- var error = 'Invalid frame range';
- return response.failed(error);
- }
-
- // Create the response body.
- var frames = [];
- for (var i = from_index; i < to_index; i++) {
- frames.push(this.exec_state_.frame(i));
- }
- response.body = {
- fromFrame: from_index,
- toFrame: to_index,
- totalFrames: total_frames,
- frames: frames
- }
-};
-
-
-DebugCommandProcessor.prototype.frameRequest_ = function(request, response) {
- // No frames no source.
- if (this.exec_state_.frameCount() == 0) {
- return response.failed('No frames');
- }
-
- // With no arguments just keep the selected frame.
- if (request.arguments) {
- var index = request.arguments.number;
- if (index < 0 || this.exec_state_.frameCount() <= index) {
- return response.failed('Invalid frame number');
- }
-
- this.exec_state_.setSelectedFrame(request.arguments.number);
- }
- response.body = this.exec_state_.frame();
-};
-
-
-DebugCommandProcessor.prototype.frameForScopeRequest_ = function(request) {
- // Get the frame for which the scope or scopes are requested. With no frameNumber
- // argument use the currently selected frame.
- if (request.arguments && !IS_UNDEFINED(request.arguments.frameNumber)) {
- frame_index = request.arguments.frameNumber;
- if (frame_index < 0 || this.exec_state_.frameCount() <= frame_index) {
- return response.failed('Invalid frame number');
- }
- return this.exec_state_.frame(frame_index);
- } else {
- return this.exec_state_.frame();
- }
-}
-
-
-DebugCommandProcessor.prototype.scopesRequest_ = function(request, response) {
- // No frames no scopes.
- if (this.exec_state_.frameCount() == 0) {
- return response.failed('No scopes');
- }
-
- // Get the frame for which the scopes are requested.
- var frame = this.frameForScopeRequest_(request);
-
- // Fill all scopes for this frame.
- var total_scopes = frame.scopeCount();
- var scopes = [];
- for (var i = 0; i < total_scopes; i++) {
- scopes.push(frame.scope(i));
- }
- response.body = {
- fromScope: 0,
- toScope: total_scopes,
- totalScopes: total_scopes,
- scopes: scopes
- }
-};
-
-
-DebugCommandProcessor.prototype.scopeRequest_ = function(request, response) {
- // No frames no scopes.
- if (this.exec_state_.frameCount() == 0) {
- return response.failed('No scopes');
- }
-
- // Get the frame for which the scope is requested.
- var frame = this.frameForScopeRequest_(request);
-
- // With no scope argument just return top scope.
- var scope_index = 0;
- if (request.arguments && !IS_UNDEFINED(request.arguments.number)) {
- scope_index = %ToNumber(request.arguments.number);
- if (scope_index < 0 || frame.scopeCount() <= scope_index) {
- return response.failed('Invalid scope number');
- }
- }
-
- response.body = frame.scope(scope_index);
-};
-
-
-DebugCommandProcessor.prototype.evaluateRequest_ = function(request, response) {
- if (!request.arguments) {
- return response.failed('Missing arguments');
- }
-
- // Pull out arguments.
- var expression = request.arguments.expression;
- var frame = request.arguments.frame;
- var global = request.arguments.global;
- var disable_break = request.arguments.disable_break;
- var additional_context = request.arguments.additional_context;
-
- // The expression argument could be an integer so we convert it to a
- // string.
- try {
- expression = String(expression);
- } catch(e) {
- return response.failed('Failed to convert expression argument to string');
- }
-
- // Check for legal arguments.
- if (!IS_UNDEFINED(frame) && global) {
- return response.failed('Arguments "frame" and "global" are exclusive');
- }
-
- var additional_context_object;
- if (additional_context) {
- additional_context_object = {};
- for (var i = 0; i < additional_context.length; i++) {
- var mapping = additional_context[i];
- if (!IS_STRING(mapping.name) || !IS_NUMBER(mapping.handle)) {
- return response.failed("Context element #" + i +
- " must contain name:string and handle:number");
- }
- var context_value_mirror = LookupMirror(mapping.handle);
- if (!context_value_mirror) {
- return response.failed("Context object '" + mapping.name +
- "' #" + mapping.handle + "# not found");
- }
- additional_context_object[mapping.name] = context_value_mirror.value();
- }
- }
-
- // Global evaluate.
- if (global) {
- // Evaluate in the global context.
- response.body = this.exec_state_.evaluateGlobal(
- expression, Boolean(disable_break), additional_context_object);
- return;
- }
-
- // Default value for disable_break is true.
- if (IS_UNDEFINED(disable_break)) {
- disable_break = true;
- }
-
- // No frames no evaluate in frame.
- if (this.exec_state_.frameCount() == 0) {
- return response.failed('No frames');
- }
-
- // Check whether a frame was specified.
- if (!IS_UNDEFINED(frame)) {
- var frame_number = %ToNumber(frame);
- if (frame_number < 0 || frame_number >= this.exec_state_.frameCount()) {
- return response.failed('Invalid frame "' + frame + '"');
- }
- // Evaluate in the specified frame.
- response.body = this.exec_state_.frame(frame_number).evaluate(
- expression, Boolean(disable_break), additional_context_object);
- return;
- } else {
- // Evaluate in the selected frame.
- response.body = this.exec_state_.frame().evaluate(
- expression, Boolean(disable_break), additional_context_object);
- return;
- }
-};
-
-
-DebugCommandProcessor.prototype.getobjRequest_ = function(request, response) {
- if (!request.arguments) {
- return response.failed('Missing arguments');
- }
-
- // Pull out arguments.
- var obj_id = request.arguments.obj_id;
-
- // Check for legal arguments.
- if (IS_UNDEFINED(obj_id)) {
- return response.failed('Argument "obj_id" missing');
- }
-
- // Dump the object.
- response.body = MakeMirror(%GetLOLObj(obj_id));
-};
-
-
-DebugCommandProcessor.prototype.lookupRequest_ = function(request, response) {
- if (!request.arguments) {
- return response.failed('Missing arguments');
- }
-
- // Pull out arguments.
- var handles = request.arguments.handles;
-
- // Check for legal arguments.
- if (IS_UNDEFINED(handles)) {
- return response.failed('Argument "handles" missing');
- }
-
- // Set 'includeSource' option for script lookup.
- if (!IS_UNDEFINED(request.arguments.includeSource)) {
- includeSource = %ToBoolean(request.arguments.includeSource);
- response.setOption('includeSource', includeSource);
- }
-
- // Lookup handles.
- var mirrors = {};
- for (var i = 0; i < handles.length; i++) {
- var handle = handles[i];
- var mirror = LookupMirror(handle);
- if (!mirror) {
- return response.failed('Object #' + handle + '# not found');
- }
- mirrors[handle] = mirror;
- }
- response.body = mirrors;
-};
-
-
-DebugCommandProcessor.prototype.referencesRequest_ =
- function(request, response) {
- if (!request.arguments) {
- return response.failed('Missing arguments');
- }
-
- // Pull out arguments.
- var type = request.arguments.type;
- var handle = request.arguments.handle;
-
- // Check for legal arguments.
- if (IS_UNDEFINED(type)) {
- return response.failed('Argument "type" missing');
- }
- if (IS_UNDEFINED(handle)) {
- return response.failed('Argument "handle" missing');
- }
- if (type != 'referencedBy' && type != 'constructedBy') {
- return response.failed('Invalid type "' + type + '"');
- }
-
- // Lookup handle and return objects with references the object.
- var mirror = LookupMirror(handle);
- if (mirror) {
- if (type == 'referencedBy') {
- response.body = mirror.referencedBy();
- } else {
- response.body = mirror.constructedBy();
- }
- } else {
- return response.failed('Object #' + handle + '# not found');
- }
-};
-
-
-DebugCommandProcessor.prototype.sourceRequest_ = function(request, response) {
- // No frames no source.
- if (this.exec_state_.frameCount() == 0) {
- return response.failed('No source');
- }
-
- var from_line;
- var to_line;
- var frame = this.exec_state_.frame();
- if (request.arguments) {
- // Pull out arguments.
- from_line = request.arguments.fromLine;
- to_line = request.arguments.toLine;
-
- if (!IS_UNDEFINED(request.arguments.frame)) {
- var frame_number = %ToNumber(request.arguments.frame);
- if (frame_number < 0 || frame_number >= this.exec_state_.frameCount()) {
- return response.failed('Invalid frame "' + frame + '"');
- }
- frame = this.exec_state_.frame(frame_number);
- }
- }
-
- // Get the script selected.
- var script = frame.func().script();
- if (!script) {
- return response.failed('No source');
- }
-
- // Get the source slice and fill it into the response.
- var slice = script.sourceSlice(from_line, to_line);
- if (!slice) {
- return response.failed('Invalid line interval');
- }
- response.body = {};
- response.body.source = slice.sourceText();
- response.body.fromLine = slice.from_line;
- response.body.toLine = slice.to_line;
- response.body.fromPosition = slice.from_position;
- response.body.toPosition = slice.to_position;
- response.body.totalLines = script.lineCount();
-};
-
-
-DebugCommandProcessor.prototype.scriptsRequest_ = function(request, response) {
- var types = ScriptTypeFlag(Debug.ScriptType.Normal);
- var includeSource = false;
- var idsToInclude = null;
- if (request.arguments) {
- // Pull out arguments.
- if (!IS_UNDEFINED(request.arguments.types)) {
- types = %ToNumber(request.arguments.types);
- if (isNaN(types) || types < 0) {
- return response.failed('Invalid types "' + request.arguments.types + '"');
- }
- }
-
- if (!IS_UNDEFINED(request.arguments.includeSource)) {
- includeSource = %ToBoolean(request.arguments.includeSource);
- response.setOption('includeSource', includeSource);
- }
-
- if (IS_ARRAY(request.arguments.ids)) {
- idsToInclude = {};
- var ids = request.arguments.ids;
- for (var i = 0; i < ids.length; i++) {
- idsToInclude[ids[i]] = true;
- }
- }
-
- var filterStr = null;
- var filterNum = null;
- if (!IS_UNDEFINED(request.arguments.filter)) {
- var num = %ToNumber(request.arguments.filter);
- if (!isNaN(num)) {
- filterNum = num;
- }
- filterStr = request.arguments.filter;
- }
- }
-
- // Collect all scripts in the heap.
- var scripts = %DebugGetLoadedScripts();
-
- response.body = [];
-
- for (var i = 0; i < scripts.length; i++) {
- if (idsToInclude && !idsToInclude[scripts[i].id]) {
- continue;
- }
- if (filterStr || filterNum) {
- var script = scripts[i];
- var found = false;
- if (filterNum && !found) {
- if (script.id && script.id === filterNum) {
- found = true;
- }
- }
- if (filterStr && !found) {
- if (script.name && script.name.indexOf(filterStr) >= 0) {
- found = true;
- }
- }
- if (!found) continue;
- }
- if (types & ScriptTypeFlag(scripts[i].type)) {
- response.body.push(MakeMirror(scripts[i]));
- }
- }
-};
-
-
-DebugCommandProcessor.prototype.threadsRequest_ = function(request, response) {
- // Get the number of threads.
- var total_threads = this.exec_state_.threadCount();
-
- // Get information for all threads.
- var threads = [];
- for (var i = 0; i < total_threads; i++) {
- var details = %GetThreadDetails(this.exec_state_.break_id, i);
- var thread_info = { current: details[0],
- id: details[1]
- }
- threads.push(thread_info);
- }
-
- // Create the response body.
- response.body = {
- totalThreads: total_threads,
- threads: threads
- }
-};
-
-
-DebugCommandProcessor.prototype.suspendRequest_ = function(request, response) {
- response.running = false;
-};
-
-
-DebugCommandProcessor.prototype.versionRequest_ = function(request, response) {
- response.body = {
- V8Version: %GetV8Version()
- }
-};
-
-
-DebugCommandProcessor.prototype.profileRequest_ = function(request, response) {
- if (!request.arguments) {
- return response.failed('Missing arguments');
- }
- var modules = parseInt(request.arguments.modules);
- if (isNaN(modules)) {
- return response.failed('Modules is not an integer');
- }
- var tag = parseInt(request.arguments.tag);
- if (isNaN(tag)) {
- tag = 0;
- }
- if (request.arguments.command == 'resume') {
- %ProfilerResume(modules, tag);
- } else if (request.arguments.command == 'pause') {
- %ProfilerPause(modules, tag);
- } else {
- return response.failed('Unknown command');
- }
- response.body = {};
-};
-
-
-DebugCommandProcessor.prototype.changeLiveRequest_ = function(request, response) {
- if (!Debug.LiveEdit) {
- return response.failed('LiveEdit feature is not supported');
- }
- if (!request.arguments) {
- return response.failed('Missing arguments');
- }
- var script_id = request.arguments.script_id;
- var preview_only = !!request.arguments.preview_only;
-
- var scripts = %DebugGetLoadedScripts();
-
- var the_script = null;
- for (var i = 0; i < scripts.length; i++) {
- if (scripts[i].id == script_id) {
- the_script = scripts[i];
- }
- }
- if (!the_script) {
- response.failed('Script not found');
- return;
- }
-
- var change_log = new Array();
-
- if (!IS_STRING(request.arguments.new_source)) {
- throw "new_source argument expected";
- }
-
- var new_source = request.arguments.new_source;
-
- var result_description = Debug.LiveEdit.SetScriptSource(the_script,
- new_source, preview_only, change_log);
- response.body = {change_log: change_log, result: result_description};
-
- if (!preview_only && !this.running_ && result_description.stack_modified) {
- response.body.stepin_recommended = true;
- }
-};
-
-
-DebugCommandProcessor.prototype.debuggerFlagsRequest_ = function(request,
- response) {
- // Check for legal request.
- if (!request.arguments) {
- response.failed('Missing arguments');
- return;
- }
-
- // Pull out arguments.
- var flags = request.arguments.flags;
-
- response.body = { flags: [] };
- if (!IS_UNDEFINED(flags)) {
- for (var i = 0; i < flags.length; i++) {
- var name = flags[i].name;
- var debugger_flag = debugger_flags[name];
- if (!debugger_flag) {
- continue;
- }
- if ('value' in flags[i]) {
- debugger_flag.setValue(flags[i].value);
- }
- response.body.flags.push({ name: name, value: debugger_flag.getValue() });
- }
- } else {
- for (var name in debugger_flags) {
- var value = debugger_flags[name].getValue();
- response.body.flags.push({ name: name, value: value });
- }
- }
-}
-
-
-DebugCommandProcessor.prototype.v8FlagsRequest_ = function(request, response) {
- var flags = request.arguments.flags;
- if (!flags) flags = '';
- %SetFlags(flags);
-};
-
-
-DebugCommandProcessor.prototype.gcRequest_ = function(request, response) {
- var type = request.arguments.type;
- if (!type) type = 'all';
-
- var before = %GetHeapUsage();
- %CollectGarbage(type);
- var after = %GetHeapUsage();
-
- response.body = { "before": before, "after": after };
-};
-
-
-DebugCommandProcessor.prototype.lolCaptureRequest_ =
- function(request, response) {
- response.body = %CaptureLOL();
-};
-
-
-DebugCommandProcessor.prototype.lolDeleteRequest_ =
- function(request, response) {
- var id = request.arguments.id;
- var result = %DeleteLOL(id);
- if (result) {
- response.body = { id: id };
- } else {
- response.failed('Failed to delete: live object list ' + id + ' not found.');
- }
-};
-
-
-DebugCommandProcessor.prototype.lolDiffRequest_ = function(request, response) {
- var id1 = request.arguments.id1;
- var id2 = request.arguments.id2;
- var verbose = request.arguments.verbose;
- var filter = request.arguments.filter;
- if (verbose === true) {
- var start = request.arguments.start;
- var count = request.arguments.count;
- response.body = %DumpLOL(id1, id2, start, count, filter);
- } else {
- response.body = %SummarizeLOL(id1, id2, filter);
- }
-};
-
-
-DebugCommandProcessor.prototype.lolGetIdRequest_ = function(request, response) {
- var address = request.arguments.address;
- response.body = {};
- response.body.id = %GetLOLObjId(address);
-};
-
-
-DebugCommandProcessor.prototype.lolInfoRequest_ = function(request, response) {
- var start = request.arguments.start;
- var count = request.arguments.count;
- response.body = %InfoLOL(start, count);
-};
-
-
-DebugCommandProcessor.prototype.lolResetRequest_ = function(request, response) {
- %ResetLOL();
-};
-
-
-DebugCommandProcessor.prototype.lolRetainersRequest_ =
- function(request, response) {
- var id = request.arguments.id;
- var verbose = request.arguments.verbose;
- var start = request.arguments.start;
- var count = request.arguments.count;
- var filter = request.arguments.filter;
-
- response.body = %GetLOLObjRetainers(id, Mirror.prototype, verbose,
- start, count, filter);
-};
-
-
-DebugCommandProcessor.prototype.lolPathRequest_ = function(request, response) {
- var id1 = request.arguments.id1;
- var id2 = request.arguments.id2;
- response.body = {};
- response.body.path = %GetLOLPath(id1, id2, Mirror.prototype);
-};
-
-
-DebugCommandProcessor.prototype.lolPrintRequest_ = function(request, response) {
- var id = request.arguments.id;
- response.body = {};
- response.body.dump = %PrintLOLObj(id);
-};
-
-
-// Check whether the previously processed command caused the VM to become
-// running.
-DebugCommandProcessor.prototype.isRunning = function() {
- return this.running_;
-}
-
-
-DebugCommandProcessor.prototype.systemBreak = function(cmd, args) {
- return %SystemBreak();
-};
-
-
-function NumberToHex8Str(n) {
- var r = "";
- for (var i = 0; i < 8; ++i) {
- var c = hexCharArray[n & 0x0F]; // hexCharArray is defined in uri.js
- r = c + r;
- n = n >>> 4;
- }
- return r;
-};
-
-
-/**
- * Convert an Object to its debugger protocol representation. The representation
- * may be serilized to a JSON object using JSON.stringify().
- * This implementation simply runs through all string property names, converts
- * each property value to a protocol value and adds the property to the result
- * object. For type "object" the function will be called recursively. Note that
- * circular structures will cause infinite recursion.
- * @param {Object} object The object to format as protocol object.
- * @param {MirrorSerializer} mirror_serializer The serializer to use if any
- * mirror objects are encountered.
- * @return {Object} Protocol object value.
- */
-function ObjectToProtocolObject_(object, mirror_serializer) {
- var content = {};
- for (var key in object) {
- // Only consider string keys.
- if (typeof key == 'string') {
- // Format the value based on its type.
- var property_value_json = ValueToProtocolValue_(object[key],
- mirror_serializer);
- // Add the property if relevant.
- if (!IS_UNDEFINED(property_value_json)) {
- content[key] = property_value_json;
- }
- }
- }
-
- return content;
-}
-
-
-/**
- * Convert an array to its debugger protocol representation. It will convert
- * each array element to a protocol value.
- * @param {Array} array The array to format as protocol array.
- * @param {MirrorSerializer} mirror_serializer The serializer to use if any
- * mirror objects are encountered.
- * @return {Array} Protocol array value.
- */
-function ArrayToProtocolArray_(array, mirror_serializer) {
- var json = [];
- for (var i = 0; i < array.length; i++) {
- json.push(ValueToProtocolValue_(array[i], mirror_serializer));
- }
- return json;
-}
-
-
-/**
- * Convert a value to its debugger protocol representation.
- * @param {*} value The value to format as protocol value.
- * @param {MirrorSerializer} mirror_serializer The serializer to use if any
- * mirror objects are encountered.
- * @return {*} Protocol value.
- */
-function ValueToProtocolValue_(value, mirror_serializer) {
- // Format the value based on its type.
- var json;
- switch (typeof value) {
- case 'object':
- if (value instanceof Mirror) {
- json = mirror_serializer.serializeValue(value);
- } else if (IS_ARRAY(value)){
- json = ArrayToProtocolArray_(value, mirror_serializer);
- } else {
- json = ObjectToProtocolObject_(value, mirror_serializer);
- }
- break;
-
- case 'boolean':
- case 'string':
- case 'number':
- json = value;
- break
-
- default:
- json = null;
- }
- return json;
-}
diff --git a/src/3rdparty/v8/src/debug.cc b/src/3rdparty/v8/src/debug.cc
deleted file mode 100644
index d6f91d8..0000000
--- a/src/3rdparty/v8/src/debug.cc
+++ /dev/null
@@ -1,3188 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "api.h"
-#include "arguments.h"
-#include "bootstrapper.h"
-#include "code-stubs.h"
-#include "codegen.h"
-#include "compilation-cache.h"
-#include "compiler.h"
-#include "debug.h"
-#include "deoptimizer.h"
-#include "execution.h"
-#include "global-handles.h"
-#include "ic.h"
-#include "ic-inl.h"
-#include "messages.h"
-#include "natives.h"
-#include "stub-cache.h"
-#include "log.h"
-
-#include "../include/v8-debug.h"
-
-namespace v8 {
-namespace internal {
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-
-
-Debug::Debug(Isolate* isolate)
- : has_break_points_(false),
- script_cache_(NULL),
- debug_info_list_(NULL),
- disable_break_(false),
- break_on_exception_(false),
- break_on_uncaught_exception_(false),
- debug_break_return_(NULL),
- debug_break_slot_(NULL),
- isolate_(isolate) {
- memset(registers_, 0, sizeof(JSCallerSavedBuffer));
-}
-
-
-Debug::~Debug() {
-}
-
-
-static void PrintLn(v8::Local<v8::Value> value) {
- v8::Local<v8::String> s = value->ToString();
- ScopedVector<char> data(s->Length() + 1);
- if (data.start() == NULL) {
- V8::FatalProcessOutOfMemory("PrintLn");
- return;
- }
- s->WriteAscii(data.start());
- PrintF("%s\n", data.start());
-}
-
-
-static Handle<Code> ComputeCallDebugBreak(int argc, Code::Kind kind) {
- Isolate* isolate = Isolate::Current();
- CALL_HEAP_FUNCTION(
- isolate,
- isolate->stub_cache()->ComputeCallDebugBreak(argc, kind),
- Code);
-}
-
-
-static Handle<Code> ComputeCallDebugPrepareStepIn(int argc, Code::Kind kind) {
- Isolate* isolate = Isolate::Current();
- CALL_HEAP_FUNCTION(
- isolate,
- isolate->stub_cache()->ComputeCallDebugPrepareStepIn(argc, kind),
- Code);
-}
-
-
-static v8::Handle<v8::Context> GetDebugEventContext(Isolate* isolate) {
- Handle<Context> context = isolate->debug()->debugger_entry()->GetContext();
- // Isolate::context() may have been NULL when "script collected" event
- // occured.
- if (context.is_null()) return v8::Local<v8::Context>();
- Handle<Context> global_context(context->global_context());
- return v8::Utils::ToLocal(global_context);
-}
-
-
-BreakLocationIterator::BreakLocationIterator(Handle<DebugInfo> debug_info,
- BreakLocatorType type) {
- debug_info_ = debug_info;
- type_ = type;
- reloc_iterator_ = NULL;
- reloc_iterator_original_ = NULL;
- Reset(); // Initialize the rest of the member variables.
-}
-
-
-BreakLocationIterator::~BreakLocationIterator() {
- ASSERT(reloc_iterator_ != NULL);
- ASSERT(reloc_iterator_original_ != NULL);
- delete reloc_iterator_;
- delete reloc_iterator_original_;
-}
-
-
-void BreakLocationIterator::Next() {
- AssertNoAllocation nogc;
- ASSERT(!RinfoDone());
-
- // Iterate through reloc info for code and original code stopping at each
- // breakable code target.
- bool first = break_point_ == -1;
- while (!RinfoDone()) {
- if (!first) RinfoNext();
- first = false;
- if (RinfoDone()) return;
-
- // Whenever a statement position or (plain) position is passed update the
- // current value of these.
- if (RelocInfo::IsPosition(rmode())) {
- if (RelocInfo::IsStatementPosition(rmode())) {
- statement_position_ = static_cast<int>(
- rinfo()->data() - debug_info_->shared()->start_position());
- }
- // Always update the position as we don't want that to be before the
- // statement position.
- position_ = static_cast<int>(
- rinfo()->data() - debug_info_->shared()->start_position());
- ASSERT(position_ >= 0);
- ASSERT(statement_position_ >= 0);
- }
-
- if (IsDebugBreakSlot()) {
- // There is always a possible break point at a debug break slot.
- break_point_++;
- return;
- } else if (RelocInfo::IsCodeTarget(rmode())) {
- // Check for breakable code target. Look in the original code as setting
- // break points can cause the code targets in the running (debugged) code
- // to be of a different kind than in the original code.
- Address target = original_rinfo()->target_address();
- Code* code = Code::GetCodeFromTargetAddress(target);
- if ((code->is_inline_cache_stub() &&
- !code->is_binary_op_stub() &&
- !code->is_type_recording_binary_op_stub() &&
- !code->is_compare_ic_stub()) ||
- RelocInfo::IsConstructCall(rmode())) {
- break_point_++;
- return;
- }
- if (code->kind() == Code::STUB) {
- if (IsDebuggerStatement()) {
- break_point_++;
- return;
- }
- if (type_ == ALL_BREAK_LOCATIONS) {
- if (Debug::IsBreakStub(code)) {
- break_point_++;
- return;
- }
- } else {
- ASSERT(type_ == SOURCE_BREAK_LOCATIONS);
- if (Debug::IsSourceBreakStub(code)) {
- break_point_++;
- return;
- }
- }
- }
- }
-
- // Check for break at return.
- if (RelocInfo::IsJSReturn(rmode())) {
- // Set the positions to the end of the function.
- if (debug_info_->shared()->HasSourceCode()) {
- position_ = debug_info_->shared()->end_position() -
- debug_info_->shared()->start_position() - 1;
- } else {
- position_ = 0;
- }
- statement_position_ = position_;
- break_point_++;
- return;
- }
- }
-}
-
-
-void BreakLocationIterator::Next(int count) {
- while (count > 0) {
- Next();
- count--;
- }
-}
-
-
-// Find the break point closest to the supplied address.
-void BreakLocationIterator::FindBreakLocationFromAddress(Address pc) {
- // Run through all break points to locate the one closest to the address.
- int closest_break_point = 0;
- int distance = kMaxInt;
- while (!Done()) {
- // Check if this break point is closer that what was previously found.
- if (this->pc() < pc && pc - this->pc() < distance) {
- closest_break_point = break_point();
- distance = static_cast<int>(pc - this->pc());
- // Check whether we can't get any closer.
- if (distance == 0) break;
- }
- Next();
- }
-
- // Move to the break point found.
- Reset();
- Next(closest_break_point);
-}
-
-
-// Find the break point closest to the supplied source position.
-void BreakLocationIterator::FindBreakLocationFromPosition(int position) {
- // Run through all break points to locate the one closest to the source
- // position.
- int closest_break_point = 0;
- int distance = kMaxInt;
- while (!Done()) {
- // Check if this break point is closer that what was previously found.
- if (position <= statement_position() &&
- statement_position() - position < distance) {
- closest_break_point = break_point();
- distance = statement_position() - position;
- // Check whether we can't get any closer.
- if (distance == 0) break;
- }
- Next();
- }
-
- // Move to the break point found.
- Reset();
- Next(closest_break_point);
-}
-
-
-void BreakLocationIterator::Reset() {
- // Create relocation iterators for the two code objects.
- if (reloc_iterator_ != NULL) delete reloc_iterator_;
- if (reloc_iterator_original_ != NULL) delete reloc_iterator_original_;
- reloc_iterator_ = new RelocIterator(debug_info_->code());
- reloc_iterator_original_ = new RelocIterator(debug_info_->original_code());
-
- // Position at the first break point.
- break_point_ = -1;
- position_ = 1;
- statement_position_ = 1;
- Next();
-}
-
-
-bool BreakLocationIterator::Done() const {
- return RinfoDone();
-}
-
-
-void BreakLocationIterator::SetBreakPoint(Handle<Object> break_point_object) {
- // If there is not already a real break point here patch code with debug
- // break.
- if (!HasBreakPoint()) {
- SetDebugBreak();
- }
- ASSERT(IsDebugBreak() || IsDebuggerStatement());
- // Set the break point information.
- DebugInfo::SetBreakPoint(debug_info_, code_position(),
- position(), statement_position(),
- break_point_object);
-}
-
-
-void BreakLocationIterator::ClearBreakPoint(Handle<Object> break_point_object) {
- // Clear the break point information.
- DebugInfo::ClearBreakPoint(debug_info_, code_position(), break_point_object);
- // If there are no more break points here remove the debug break.
- if (!HasBreakPoint()) {
- ClearDebugBreak();
- ASSERT(!IsDebugBreak());
- }
-}
-
-
-void BreakLocationIterator::SetOneShot() {
- // Debugger statement always calls debugger. No need to modify it.
- if (IsDebuggerStatement()) {
- return;
- }
-
- // If there is a real break point here no more to do.
- if (HasBreakPoint()) {
- ASSERT(IsDebugBreak());
- return;
- }
-
- // Patch code with debug break.
- SetDebugBreak();
-}
-
-
-void BreakLocationIterator::ClearOneShot() {
- // Debugger statement always calls debugger. No need to modify it.
- if (IsDebuggerStatement()) {
- return;
- }
-
- // If there is a real break point here no more to do.
- if (HasBreakPoint()) {
- ASSERT(IsDebugBreak());
- return;
- }
-
- // Patch code removing debug break.
- ClearDebugBreak();
- ASSERT(!IsDebugBreak());
-}
-
-
-void BreakLocationIterator::SetDebugBreak() {
- // Debugger statement always calls debugger. No need to modify it.
- if (IsDebuggerStatement()) {
- return;
- }
-
- // If there is already a break point here just return. This might happen if
- // the same code is flooded with break points twice. Flooding the same
- // function twice might happen when stepping in a function with an exception
- // handler as the handler and the function is the same.
- if (IsDebugBreak()) {
- return;
- }
-
- if (RelocInfo::IsJSReturn(rmode())) {
- // Patch the frame exit code with a break point.
- SetDebugBreakAtReturn();
- } else if (IsDebugBreakSlot()) {
- // Patch the code in the break slot.
- SetDebugBreakAtSlot();
- } else {
- // Patch the IC call.
- SetDebugBreakAtIC();
- }
- ASSERT(IsDebugBreak());
-}
-
-
-void BreakLocationIterator::ClearDebugBreak() {
- // Debugger statement always calls debugger. No need to modify it.
- if (IsDebuggerStatement()) {
- return;
- }
-
- if (RelocInfo::IsJSReturn(rmode())) {
- // Restore the frame exit code.
- ClearDebugBreakAtReturn();
- } else if (IsDebugBreakSlot()) {
- // Restore the code in the break slot.
- ClearDebugBreakAtSlot();
- } else {
- // Patch the IC call.
- ClearDebugBreakAtIC();
- }
- ASSERT(!IsDebugBreak());
-}
-
-
-void BreakLocationIterator::PrepareStepIn() {
- HandleScope scope;
-
- // Step in can only be prepared if currently positioned on an IC call,
- // construct call or CallFunction stub call.
- Address target = rinfo()->target_address();
- Handle<Code> code(Code::GetCodeFromTargetAddress(target));
- if (code->is_call_stub() || code->is_keyed_call_stub()) {
- // Step in through IC call is handled by the runtime system. Therefore make
- // sure that the any current IC is cleared and the runtime system is
- // called. If the executing code has a debug break at the location change
- // the call in the original code as it is the code there that will be
- // executed in place of the debug break call.
- Handle<Code> stub = ComputeCallDebugPrepareStepIn(code->arguments_count(),
- code->kind());
- if (IsDebugBreak()) {
- original_rinfo()->set_target_address(stub->entry());
- } else {
- rinfo()->set_target_address(stub->entry());
- }
- } else {
-#ifdef DEBUG
- // All the following stuff is needed only for assertion checks so the code
- // is wrapped in ifdef.
- Handle<Code> maybe_call_function_stub = code;
- if (IsDebugBreak()) {
- Address original_target = original_rinfo()->target_address();
- maybe_call_function_stub =
- Handle<Code>(Code::GetCodeFromTargetAddress(original_target));
- }
- bool is_call_function_stub =
- (maybe_call_function_stub->kind() == Code::STUB &&
- maybe_call_function_stub->major_key() == CodeStub::CallFunction);
-
- // Step in through construct call requires no changes to the running code.
- // Step in through getters/setters should already be prepared as well
- // because caller of this function (Debug::PrepareStep) is expected to
- // flood the top frame's function with one shot breakpoints.
- // Step in through CallFunction stub should also be prepared by caller of
- // this function (Debug::PrepareStep) which should flood target function
- // with breakpoints.
- ASSERT(RelocInfo::IsConstructCall(rmode()) || code->is_inline_cache_stub()
- || is_call_function_stub);
-#endif
- }
-}
-
-
-// Check whether the break point is at a position which will exit the function.
-bool BreakLocationIterator::IsExit() const {
- return (RelocInfo::IsJSReturn(rmode()));
-}
-
-
-bool BreakLocationIterator::HasBreakPoint() {
- return debug_info_->HasBreakPoint(code_position());
-}
-
-
-// Check whether there is a debug break at the current position.
-bool BreakLocationIterator::IsDebugBreak() {
- if (RelocInfo::IsJSReturn(rmode())) {
- return IsDebugBreakAtReturn();
- } else if (IsDebugBreakSlot()) {
- return IsDebugBreakAtSlot();
- } else {
- return Debug::IsDebugBreak(rinfo()->target_address());
- }
-}
-
-
-void BreakLocationIterator::SetDebugBreakAtIC() {
- // Patch the original code with the current address as the current address
- // might have changed by the inline caching since the code was copied.
- original_rinfo()->set_target_address(rinfo()->target_address());
-
- RelocInfo::Mode mode = rmode();
- if (RelocInfo::IsCodeTarget(mode)) {
- Address target = rinfo()->target_address();
- Handle<Code> code(Code::GetCodeFromTargetAddress(target));
-
- // Patch the code to invoke the builtin debug break function matching the
- // calling convention used by the call site.
- Handle<Code> dbgbrk_code(Debug::FindDebugBreak(code, mode));
- rinfo()->set_target_address(dbgbrk_code->entry());
-
- // For stubs that refer back to an inlined version clear the cached map for
- // the inlined case to always go through the IC. As long as the break point
- // is set the patching performed by the runtime system will take place in
- // the code copy and will therefore have no effect on the running code
- // keeping it from using the inlined code.
- if (code->is_keyed_load_stub()) {
- KeyedLoadIC::ClearInlinedVersion(pc());
- } else if (code->is_keyed_store_stub()) {
- KeyedStoreIC::ClearInlinedVersion(pc());
- } else if (code->is_load_stub()) {
- LoadIC::ClearInlinedVersion(pc());
- } else if (code->is_store_stub()) {
- StoreIC::ClearInlinedVersion(pc());
- }
- }
-}
-
-
-void BreakLocationIterator::ClearDebugBreakAtIC() {
- // Patch the code to the original invoke.
- rinfo()->set_target_address(original_rinfo()->target_address());
-
- RelocInfo::Mode mode = rmode();
- if (RelocInfo::IsCodeTarget(mode)) {
- AssertNoAllocation nogc;
- Address target = original_rinfo()->target_address();
- Code* code = Code::GetCodeFromTargetAddress(target);
-
- // Restore the inlined version of keyed stores to get back to the
- // fast case. We need to patch back the keyed store because no
- // patching happens when running normally. For keyed loads, the
- // map check will get patched back when running normally after ICs
- // have been cleared at GC.
- if (code->is_keyed_store_stub()) KeyedStoreIC::RestoreInlinedVersion(pc());
- }
-}
-
-
-bool BreakLocationIterator::IsDebuggerStatement() {
- return RelocInfo::DEBUG_BREAK == rmode();
-}
-
-
-bool BreakLocationIterator::IsDebugBreakSlot() {
- return RelocInfo::DEBUG_BREAK_SLOT == rmode();
-}
-
-
-Object* BreakLocationIterator::BreakPointObjects() {
- return debug_info_->GetBreakPointObjects(code_position());
-}
-
-
-// Clear out all the debug break code. This is ONLY supposed to be used when
-// shutting down the debugger as it will leave the break point information in
-// DebugInfo even though the code is patched back to the non break point state.
-void BreakLocationIterator::ClearAllDebugBreak() {
- while (!Done()) {
- ClearDebugBreak();
- Next();
- }
-}
-
-
-bool BreakLocationIterator::RinfoDone() const {
- ASSERT(reloc_iterator_->done() == reloc_iterator_original_->done());
- return reloc_iterator_->done();
-}
-
-
-void BreakLocationIterator::RinfoNext() {
- reloc_iterator_->next();
- reloc_iterator_original_->next();
-#ifdef DEBUG
- ASSERT(reloc_iterator_->done() == reloc_iterator_original_->done());
- if (!reloc_iterator_->done()) {
- ASSERT(rmode() == original_rmode());
- }
-#endif
-}
-
-
-// Threading support.
-void Debug::ThreadInit() {
- thread_local_.break_count_ = 0;
- thread_local_.break_id_ = 0;
- thread_local_.break_frame_id_ = StackFrame::NO_ID;
- thread_local_.last_step_action_ = StepNone;
- thread_local_.last_statement_position_ = RelocInfo::kNoPosition;
- thread_local_.step_count_ = 0;
- thread_local_.last_fp_ = 0;
- thread_local_.step_into_fp_ = 0;
- thread_local_.step_out_fp_ = 0;
- thread_local_.after_break_target_ = 0;
- // TODO(isolates): frames_are_dropped_?
- thread_local_.debugger_entry_ = NULL;
- thread_local_.pending_interrupts_ = 0;
- thread_local_.restarter_frame_function_pointer_ = NULL;
-}
-
-
-char* Debug::ArchiveDebug(char* storage) {
- char* to = storage;
- memcpy(to, reinterpret_cast<char*>(&thread_local_), sizeof(ThreadLocal));
- to += sizeof(ThreadLocal);
- memcpy(to, reinterpret_cast<char*>(&registers_), sizeof(registers_));
- ThreadInit();
- ASSERT(to <= storage + ArchiveSpacePerThread());
- return storage + ArchiveSpacePerThread();
-}
-
-
-char* Debug::RestoreDebug(char* storage) {
- char* from = storage;
- memcpy(reinterpret_cast<char*>(&thread_local_), from, sizeof(ThreadLocal));
- from += sizeof(ThreadLocal);
- memcpy(reinterpret_cast<char*>(&registers_), from, sizeof(registers_));
- ASSERT(from <= storage + ArchiveSpacePerThread());
- return storage + ArchiveSpacePerThread();
-}
-
-
-int Debug::ArchiveSpacePerThread() {
- return sizeof(ThreadLocal) + sizeof(JSCallerSavedBuffer);
-}
-
-
-// Frame structure (conforms InternalFrame structure):
-// -- code
-// -- SMI maker
-// -- function (slot is called "context")
-// -- frame base
-Object** Debug::SetUpFrameDropperFrame(StackFrame* bottom_js_frame,
- Handle<Code> code) {
- ASSERT(bottom_js_frame->is_java_script());
-
- Address fp = bottom_js_frame->fp();
-
- // Move function pointer into "context" slot.
- Memory::Object_at(fp + StandardFrameConstants::kContextOffset) =
- Memory::Object_at(fp + JavaScriptFrameConstants::kFunctionOffset);
-
- Memory::Object_at(fp + InternalFrameConstants::kCodeOffset) = *code;
- Memory::Object_at(fp + StandardFrameConstants::kMarkerOffset) =
- Smi::FromInt(StackFrame::INTERNAL);
-
- return reinterpret_cast<Object**>(&Memory::Object_at(
- fp + StandardFrameConstants::kContextOffset));
-}
-
-const int Debug::kFrameDropperFrameSize = 4;
-
-
-void ScriptCache::Add(Handle<Script> script) {
- GlobalHandles* global_handles = Isolate::Current()->global_handles();
- // Create an entry in the hash map for the script.
- int id = Smi::cast(script->id())->value();
- HashMap::Entry* entry =
- HashMap::Lookup(reinterpret_cast<void*>(id), Hash(id), true);
- if (entry->value != NULL) {
- ASSERT(*script == *reinterpret_cast<Script**>(entry->value));
- return;
- }
-
- // Globalize the script object, make it weak and use the location of the
- // global handle as the value in the hash map.
- Handle<Script> script_ =
- Handle<Script>::cast(
- (global_handles->Create(*script)));
- global_handles->MakeWeak(
- reinterpret_cast<Object**>(script_.location()),
- this,
- ScriptCache::HandleWeakScript);
- entry->value = script_.location();
-}
-
-
-Handle<FixedArray> ScriptCache::GetScripts() {
- Handle<FixedArray> instances = FACTORY->NewFixedArray(occupancy());
- int count = 0;
- for (HashMap::Entry* entry = Start(); entry != NULL; entry = Next(entry)) {
- ASSERT(entry->value != NULL);
- if (entry->value != NULL) {
- instances->set(count, *reinterpret_cast<Script**>(entry->value));
- count++;
- }
- }
- return instances;
-}
-
-
-void ScriptCache::ProcessCollectedScripts() {
- Debugger* debugger = Isolate::Current()->debugger();
- for (int i = 0; i < collected_scripts_.length(); i++) {
- debugger->OnScriptCollected(collected_scripts_[i]);
- }
- collected_scripts_.Clear();
-}
-
-
-void ScriptCache::Clear() {
- GlobalHandles* global_handles = Isolate::Current()->global_handles();
- // Iterate the script cache to get rid of all the weak handles.
- for (HashMap::Entry* entry = Start(); entry != NULL; entry = Next(entry)) {
- ASSERT(entry != NULL);
- Object** location = reinterpret_cast<Object**>(entry->value);
- ASSERT((*location)->IsScript());
- global_handles->ClearWeakness(location);
- global_handles->Destroy(location);
- }
- // Clear the content of the hash map.
- HashMap::Clear();
-}
-
-
-void ScriptCache::HandleWeakScript(v8::Persistent<v8::Value> obj, void* data) {
- ScriptCache* script_cache = reinterpret_cast<ScriptCache*>(data);
- // Find the location of the global handle.
- Script** location =
- reinterpret_cast<Script**>(Utils::OpenHandle(*obj).location());
- ASSERT((*location)->IsScript());
-
- // Remove the entry from the cache.
- int id = Smi::cast((*location)->id())->value();
- script_cache->Remove(reinterpret_cast<void*>(id), Hash(id));
- script_cache->collected_scripts_.Add(id);
-
- // Clear the weak handle.
- obj.Dispose();
- obj.Clear();
-}
-
-
-void Debug::Setup(bool create_heap_objects) {
- ThreadInit();
- if (create_heap_objects) {
- // Get code to handle debug break on return.
- debug_break_return_ =
- isolate_->builtins()->builtin(Builtins::kReturn_DebugBreak);
- ASSERT(debug_break_return_->IsCode());
- // Get code to handle debug break in debug break slots.
- debug_break_slot_ =
- isolate_->builtins()->builtin(Builtins::kSlot_DebugBreak);
- ASSERT(debug_break_slot_->IsCode());
- }
-}
-
-
-void Debug::HandleWeakDebugInfo(v8::Persistent<v8::Value> obj, void* data) {
- Debug* debug = Isolate::Current()->debug();
- DebugInfoListNode* node = reinterpret_cast<DebugInfoListNode*>(data);
- // We need to clear all breakpoints associated with the function to restore
- // original code and avoid patching the code twice later because
- // the function will live in the heap until next gc, and can be found by
- // Runtime::FindSharedFunctionInfoInScript.
- BreakLocationIterator it(node->debug_info(), ALL_BREAK_LOCATIONS);
- it.ClearAllDebugBreak();
- debug->RemoveDebugInfo(node->debug_info());
-#ifdef DEBUG
- node = debug->debug_info_list_;
- while (node != NULL) {
- ASSERT(node != reinterpret_cast<DebugInfoListNode*>(data));
- node = node->next();
- }
-#endif
-}
-
-
-DebugInfoListNode::DebugInfoListNode(DebugInfo* debug_info): next_(NULL) {
- GlobalHandles* global_handles = Isolate::Current()->global_handles();
- // Globalize the request debug info object and make it weak.
- debug_info_ = Handle<DebugInfo>::cast(
- (global_handles->Create(debug_info)));
- global_handles->MakeWeak(
- reinterpret_cast<Object**>(debug_info_.location()),
- this,
- Debug::HandleWeakDebugInfo);
-}
-
-
-DebugInfoListNode::~DebugInfoListNode() {
- Isolate::Current()->global_handles()->Destroy(
- reinterpret_cast<Object**>(debug_info_.location()));
-}
-
-
-bool Debug::CompileDebuggerScript(int index) {
- Isolate* isolate = Isolate::Current();
- Factory* factory = isolate->factory();
- HandleScope scope(isolate);
-
- // Bail out if the index is invalid.
- if (index == -1) {
- return false;
- }
-
- // Find source and name for the requested script.
- Handle<String> source_code =
- isolate->bootstrapper()->NativesSourceLookup(index);
- Vector<const char> name = Natives::GetScriptName(index);
- Handle<String> script_name = factory->NewStringFromAscii(name);
-
- // Compile the script.
- Handle<SharedFunctionInfo> function_info;
- function_info = Compiler::Compile(source_code,
- script_name,
- 0, 0, NULL, NULL,
- Handle<String>::null(),
- NATIVES_CODE);
-
- // Silently ignore stack overflows during compilation.
- if (function_info.is_null()) {
- ASSERT(isolate->has_pending_exception());
- isolate->clear_pending_exception();
- return false;
- }
-
- // Execute the shared function in the debugger context.
- Handle<Context> context = isolate->global_context();
- bool caught_exception = false;
- Handle<JSFunction> function =
- factory->NewFunctionFromSharedFunctionInfo(function_info, context);
- Handle<Object> result =
- Execution::TryCall(function, Handle<Object>(context->global()),
- 0, NULL, &caught_exception);
-
- // Check for caught exceptions.
- if (caught_exception) {
- Handle<Object> message = MessageHandler::MakeMessageObject(
- "error_loading_debugger", NULL, Vector<Handle<Object> >::empty(),
- Handle<String>(), Handle<JSArray>());
- MessageHandler::ReportMessage(NULL, message);
- return false;
- }
-
- // Mark this script as native and return successfully.
- Handle<Script> script(Script::cast(function->shared()->script()));
- script->set_type(Smi::FromInt(Script::TYPE_NATIVE));
- return true;
-}
-
-
-bool Debug::Load() {
- // Return if debugger is already loaded.
- if (IsLoaded()) return true;
-
- ASSERT(Isolate::Current() == isolate_);
- Debugger* debugger = isolate_->debugger();
-
- // Bail out if we're already in the process of compiling the native
- // JavaScript source code for the debugger.
- if (debugger->compiling_natives() ||
- debugger->is_loading_debugger())
- return false;
- debugger->set_loading_debugger(true);
-
- // Disable breakpoints and interrupts while compiling and running the
- // debugger scripts including the context creation code.
- DisableBreak disable(true);
- PostponeInterruptsScope postpone(isolate_);
-
- // Create the debugger context.
- HandleScope scope(isolate_);
- Handle<Context> context =
- isolate_->bootstrapper()->CreateEnvironment(
- Handle<Object>::null(),
- v8::Handle<ObjectTemplate>(),
- NULL);
-
- // Use the debugger context.
- SaveContext save(isolate_);
- isolate_->set_context(*context);
-
- // Expose the builtins object in the debugger context.
- Handle<String> key = isolate_->factory()->LookupAsciiSymbol("builtins");
- Handle<GlobalObject> global = Handle<GlobalObject>(context->global());
- RETURN_IF_EMPTY_HANDLE_VALUE(
- isolate_,
- SetProperty(global, key, Handle<Object>(global->builtins()),
- NONE, kNonStrictMode),
- false);
-
- // Compile the JavaScript for the debugger in the debugger context.
- debugger->set_compiling_natives(true);
- bool caught_exception =
- !CompileDebuggerScript(Natives::GetIndex("mirror")) ||
- !CompileDebuggerScript(Natives::GetIndex("debug"));
-
- if (FLAG_enable_liveedit) {
- caught_exception = caught_exception ||
- !CompileDebuggerScript(Natives::GetIndex("liveedit"));
- }
-
- debugger->set_compiling_natives(false);
-
- // Make sure we mark the debugger as not loading before we might
- // return.
- debugger->set_loading_debugger(false);
-
- // Check for caught exceptions.
- if (caught_exception) return false;
-
- // Debugger loaded.
- debug_context_ = context;
-
- return true;
-}
-
-
-void Debug::Unload() {
- // Return debugger is not loaded.
- if (!IsLoaded()) {
- return;
- }
-
- // Clear the script cache.
- DestroyScriptCache();
-
- // Clear debugger context global handle.
- Isolate::Current()->global_handles()->Destroy(
- reinterpret_cast<Object**>(debug_context_.location()));
- debug_context_ = Handle<Context>();
-}
-
-
-// Set the flag indicating that preemption happened during debugging.
-void Debug::PreemptionWhileInDebugger() {
- ASSERT(InDebugger());
- Debug::set_interrupts_pending(PREEMPT);
-}
-
-
-void Debug::Iterate(ObjectVisitor* v) {
- v->VisitPointer(BitCast<Object**>(&(debug_break_return_)));
- v->VisitPointer(BitCast<Object**>(&(debug_break_slot_)));
-}
-
-
-Object* Debug::Break(Arguments args) {
- Heap* heap = isolate_->heap();
- HandleScope scope(isolate_);
- ASSERT(args.length() == 0);
-
- thread_local_.frame_drop_mode_ = FRAMES_UNTOUCHED;
-
- // Get the top-most JavaScript frame.
- JavaScriptFrameIterator it(isolate_);
- JavaScriptFrame* frame = it.frame();
-
- // Just continue if breaks are disabled or debugger cannot be loaded.
- if (disable_break() || !Load()) {
- SetAfterBreakTarget(frame);
- return heap->undefined_value();
- }
-
- // Enter the debugger.
- EnterDebugger debugger;
- if (debugger.FailedToEnter()) {
- return heap->undefined_value();
- }
-
- // Postpone interrupt during breakpoint processing.
- PostponeInterruptsScope postpone(isolate_);
-
- // Get the debug info (create it if it does not exist).
- Handle<SharedFunctionInfo> shared =
- Handle<SharedFunctionInfo>(JSFunction::cast(frame->function())->shared());
- Handle<DebugInfo> debug_info = GetDebugInfo(shared);
-
- // Find the break point where execution has stopped.
- BreakLocationIterator break_location_iterator(debug_info,
- ALL_BREAK_LOCATIONS);
- break_location_iterator.FindBreakLocationFromAddress(frame->pc());
-
- // Check whether step next reached a new statement.
- if (!StepNextContinue(&break_location_iterator, frame)) {
- // Decrease steps left if performing multiple steps.
- if (thread_local_.step_count_ > 0) {
- thread_local_.step_count_--;
- }
- }
-
- // If there is one or more real break points check whether any of these are
- // triggered.
- Handle<Object> break_points_hit(heap->undefined_value());
- if (break_location_iterator.HasBreakPoint()) {
- Handle<Object> break_point_objects =
- Handle<Object>(break_location_iterator.BreakPointObjects());
- break_points_hit = CheckBreakPoints(break_point_objects);
- }
-
- // If step out is active skip everything until the frame where we need to step
- // out to is reached, unless real breakpoint is hit.
- if (StepOutActive() && frame->fp() != step_out_fp() &&
- break_points_hit->IsUndefined() ) {
- // Step count should always be 0 for StepOut.
- ASSERT(thread_local_.step_count_ == 0);
- } else if (!break_points_hit->IsUndefined() ||
- (thread_local_.last_step_action_ != StepNone &&
- thread_local_.step_count_ == 0)) {
- // Notify debugger if a real break point is triggered or if performing
- // single stepping with no more steps to perform. Otherwise do another step.
-
- // Clear all current stepping setup.
- ClearStepping();
-
- // Notify the debug event listeners.
- isolate_->debugger()->OnDebugBreak(break_points_hit, false);
- } else if (thread_local_.last_step_action_ != StepNone) {
- // Hold on to last step action as it is cleared by the call to
- // ClearStepping.
- StepAction step_action = thread_local_.last_step_action_;
- int step_count = thread_local_.step_count_;
-
- // Clear all current stepping setup.
- ClearStepping();
-
- // Set up for the remaining steps.
- PrepareStep(step_action, step_count);
- }
-
- if (thread_local_.frame_drop_mode_ == FRAMES_UNTOUCHED) {
- SetAfterBreakTarget(frame);
- } else if (thread_local_.frame_drop_mode_ ==
- FRAME_DROPPED_IN_IC_CALL) {
- // We must have been calling IC stub. Do not go there anymore.
- Code* plain_return = isolate_->builtins()->builtin(
- Builtins::kPlainReturn_LiveEdit);
- thread_local_.after_break_target_ = plain_return->entry();
- } else if (thread_local_.frame_drop_mode_ ==
- FRAME_DROPPED_IN_DEBUG_SLOT_CALL) {
- // Debug break slot stub does not return normally, instead it manually
- // cleans the stack and jumps. We should patch the jump address.
- Code* plain_return = isolate_->builtins()->builtin(
- Builtins::kFrameDropper_LiveEdit);
- thread_local_.after_break_target_ = plain_return->entry();
- } else if (thread_local_.frame_drop_mode_ ==
- FRAME_DROPPED_IN_DIRECT_CALL) {
- // Nothing to do, after_break_target is not used here.
- } else {
- UNREACHABLE();
- }
-
- return heap->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(Object*, Debug_Break) {
- return isolate->debug()->Break(args);
-}
-
-
-// Check the break point objects for whether one or more are actually
-// triggered. This function returns a JSArray with the break point objects
-// which is triggered.
-Handle<Object> Debug::CheckBreakPoints(Handle<Object> break_point_objects) {
- Factory* factory = isolate_->factory();
-
- // Count the number of break points hit. If there are multiple break points
- // they are in a FixedArray.
- Handle<FixedArray> break_points_hit;
- int break_points_hit_count = 0;
- ASSERT(!break_point_objects->IsUndefined());
- if (break_point_objects->IsFixedArray()) {
- Handle<FixedArray> array(FixedArray::cast(*break_point_objects));
- break_points_hit = factory->NewFixedArray(array->length());
- for (int i = 0; i < array->length(); i++) {
- Handle<Object> o(array->get(i));
- if (CheckBreakPoint(o)) {
- break_points_hit->set(break_points_hit_count++, *o);
- }
- }
- } else {
- break_points_hit = factory->NewFixedArray(1);
- if (CheckBreakPoint(break_point_objects)) {
- break_points_hit->set(break_points_hit_count++, *break_point_objects);
- }
- }
-
- // Return undefined if no break points were triggered.
- if (break_points_hit_count == 0) {
- return factory->undefined_value();
- }
- // Return break points hit as a JSArray.
- Handle<JSArray> result = factory->NewJSArrayWithElements(break_points_hit);
- result->set_length(Smi::FromInt(break_points_hit_count));
- return result;
-}
-
-
-// Check whether a single break point object is triggered.
-bool Debug::CheckBreakPoint(Handle<Object> break_point_object) {
- ASSERT(Isolate::Current() == isolate_);
- Factory* factory = isolate_->factory();
- HandleScope scope(isolate_);
-
- // Ignore check if break point object is not a JSObject.
- if (!break_point_object->IsJSObject()) return true;
-
- // Get the function IsBreakPointTriggered (defined in debug-debugger.js).
- Handle<String> is_break_point_triggered_symbol =
- factory->LookupAsciiSymbol("IsBreakPointTriggered");
- Handle<JSFunction> check_break_point =
- Handle<JSFunction>(JSFunction::cast(
- debug_context()->global()->GetPropertyNoExceptionThrown(
- *is_break_point_triggered_symbol)));
-
- // Get the break id as an object.
- Handle<Object> break_id = factory->NewNumberFromInt(Debug::break_id());
-
- // Call HandleBreakPointx.
- bool caught_exception = false;
- const int argc = 2;
- Object** argv[argc] = {
- break_id.location(),
- reinterpret_cast<Object**>(break_point_object.location())
- };
- Handle<Object> result = Execution::TryCall(check_break_point,
- isolate_->js_builtins_object(), argc, argv, &caught_exception);
-
- // If exception or non boolean result handle as not triggered
- if (caught_exception || !result->IsBoolean()) {
- return false;
- }
-
- // Return whether the break point is triggered.
- ASSERT(!result.is_null());
- return (*result)->IsTrue();
-}
-
-
-// Check whether the function has debug information.
-bool Debug::HasDebugInfo(Handle<SharedFunctionInfo> shared) {
- return !shared->debug_info()->IsUndefined();
-}
-
-
-// Return the debug info for this function. EnsureDebugInfo must be called
-// prior to ensure the debug info has been generated for shared.
-Handle<DebugInfo> Debug::GetDebugInfo(Handle<SharedFunctionInfo> shared) {
- ASSERT(HasDebugInfo(shared));
- return Handle<DebugInfo>(DebugInfo::cast(shared->debug_info()));
-}
-
-
-void Debug::SetBreakPoint(Handle<SharedFunctionInfo> shared,
- Handle<Object> break_point_object,
- int* source_position) {
- HandleScope scope(isolate_);
-
- if (!EnsureDebugInfo(shared)) {
- // Return if retrieving debug info failed.
- return;
- }
-
- Handle<DebugInfo> debug_info = GetDebugInfo(shared);
- // Source positions starts with zero.
- ASSERT(source_position >= 0);
-
- // Find the break point and change it.
- BreakLocationIterator it(debug_info, SOURCE_BREAK_LOCATIONS);
- it.FindBreakLocationFromPosition(*source_position);
- it.SetBreakPoint(break_point_object);
-
- *source_position = it.position();
-
- // At least one active break point now.
- ASSERT(debug_info->GetBreakPointCount() > 0);
-}
-
-
-void Debug::ClearBreakPoint(Handle<Object> break_point_object) {
- HandleScope scope(isolate_);
-
- DebugInfoListNode* node = debug_info_list_;
- while (node != NULL) {
- Object* result = DebugInfo::FindBreakPointInfo(node->debug_info(),
- break_point_object);
- if (!result->IsUndefined()) {
- // Get information in the break point.
- BreakPointInfo* break_point_info = BreakPointInfo::cast(result);
- Handle<DebugInfo> debug_info = node->debug_info();
- Handle<SharedFunctionInfo> shared(debug_info->shared());
- int source_position = break_point_info->statement_position()->value();
-
- // Source positions starts with zero.
- ASSERT(source_position >= 0);
-
- // Find the break point and clear it.
- BreakLocationIterator it(debug_info, SOURCE_BREAK_LOCATIONS);
- it.FindBreakLocationFromPosition(source_position);
- it.ClearBreakPoint(break_point_object);
-
- // If there are no more break points left remove the debug info for this
- // function.
- if (debug_info->GetBreakPointCount() == 0) {
- RemoveDebugInfo(debug_info);
- }
-
- return;
- }
- node = node->next();
- }
-}
-
-
-void Debug::ClearAllBreakPoints() {
- DebugInfoListNode* node = debug_info_list_;
- while (node != NULL) {
- // Remove all debug break code.
- BreakLocationIterator it(node->debug_info(), ALL_BREAK_LOCATIONS);
- it.ClearAllDebugBreak();
- node = node->next();
- }
-
- // Remove all debug info.
- while (debug_info_list_ != NULL) {
- RemoveDebugInfo(debug_info_list_->debug_info());
- }
-}
-
-
-void Debug::FloodWithOneShot(Handle<SharedFunctionInfo> shared) {
- // Make sure the function has setup the debug info.
- if (!EnsureDebugInfo(shared)) {
- // Return if we failed to retrieve the debug info.
- return;
- }
-
- // Flood the function with break points.
- BreakLocationIterator it(GetDebugInfo(shared), ALL_BREAK_LOCATIONS);
- while (!it.Done()) {
- it.SetOneShot();
- it.Next();
- }
-}
-
-
-void Debug::FloodHandlerWithOneShot() {
- // Iterate through the JavaScript stack looking for handlers.
- StackFrame::Id id = break_frame_id();
- if (id == StackFrame::NO_ID) {
- // If there is no JavaScript stack don't do anything.
- return;
- }
- for (JavaScriptFrameIterator it(isolate_, id); !it.done(); it.Advance()) {
- JavaScriptFrame* frame = it.frame();
- if (frame->HasHandler()) {
- Handle<SharedFunctionInfo> shared =
- Handle<SharedFunctionInfo>(
- JSFunction::cast(frame->function())->shared());
- // Flood the function with the catch block with break points
- FloodWithOneShot(shared);
- return;
- }
- }
-}
-
-
-void Debug::ChangeBreakOnException(ExceptionBreakType type, bool enable) {
- if (type == BreakUncaughtException) {
- break_on_uncaught_exception_ = enable;
- } else {
- break_on_exception_ = enable;
- }
-}
-
-
-bool Debug::IsBreakOnException(ExceptionBreakType type) {
- if (type == BreakUncaughtException) {
- return break_on_uncaught_exception_;
- } else {
- return break_on_exception_;
- }
-}
-
-
-void Debug::PrepareStep(StepAction step_action, int step_count) {
- ASSERT(Isolate::Current() == isolate_);
- HandleScope scope(isolate_);
- ASSERT(Debug::InDebugger());
-
- // Remember this step action and count.
- thread_local_.last_step_action_ = step_action;
- if (step_action == StepOut) {
- // For step out target frame will be found on the stack so there is no need
- // to set step counter for it. It's expected to always be 0 for StepOut.
- thread_local_.step_count_ = 0;
- } else {
- thread_local_.step_count_ = step_count;
- }
-
- // Get the frame where the execution has stopped and skip the debug frame if
- // any. The debug frame will only be present if execution was stopped due to
- // hitting a break point. In other situations (e.g. unhandled exception) the
- // debug frame is not present.
- StackFrame::Id id = break_frame_id();
- if (id == StackFrame::NO_ID) {
- // If there is no JavaScript stack don't do anything.
- return;
- }
- JavaScriptFrameIterator frames_it(isolate_, id);
- JavaScriptFrame* frame = frames_it.frame();
-
- // First of all ensure there is one-shot break points in the top handler
- // if any.
- FloodHandlerWithOneShot();
-
- // If the function on the top frame is unresolved perform step out. This will
- // be the case when calling unknown functions and having the debugger stopped
- // in an unhandled exception.
- if (!frame->function()->IsJSFunction()) {
- // Step out: Find the calling JavaScript frame and flood it with
- // breakpoints.
- frames_it.Advance();
- // Fill the function to return to with one-shot break points.
- JSFunction* function = JSFunction::cast(frames_it.frame()->function());
- FloodWithOneShot(Handle<SharedFunctionInfo>(function->shared()));
- return;
- }
-
- // Get the debug info (create it if it does not exist).
- Handle<SharedFunctionInfo> shared =
- Handle<SharedFunctionInfo>(JSFunction::cast(frame->function())->shared());
- if (!EnsureDebugInfo(shared)) {
- // Return if ensuring debug info failed.
- return;
- }
- Handle<DebugInfo> debug_info = GetDebugInfo(shared);
-
- // Find the break location where execution has stopped.
- BreakLocationIterator it(debug_info, ALL_BREAK_LOCATIONS);
- it.FindBreakLocationFromAddress(frame->pc());
-
- // Compute whether or not the target is a call target.
- bool is_load_or_store = false;
- bool is_inline_cache_stub = false;
- bool is_at_restarted_function = false;
- Handle<Code> call_function_stub;
-
- if (thread_local_.restarter_frame_function_pointer_ == NULL) {
- if (RelocInfo::IsCodeTarget(it.rinfo()->rmode())) {
- bool is_call_target = false;
- Address target = it.rinfo()->target_address();
- Code* code = Code::GetCodeFromTargetAddress(target);
- if (code->is_call_stub() || code->is_keyed_call_stub()) {
- is_call_target = true;
- }
- if (code->is_inline_cache_stub()) {
- is_inline_cache_stub = true;
- is_load_or_store = !is_call_target;
- }
-
- // Check if target code is CallFunction stub.
- Code* maybe_call_function_stub = code;
- // If there is a breakpoint at this line look at the original code to
- // check if it is a CallFunction stub.
- if (it.IsDebugBreak()) {
- Address original_target = it.original_rinfo()->target_address();
- maybe_call_function_stub =
- Code::GetCodeFromTargetAddress(original_target);
- }
- if (maybe_call_function_stub->kind() == Code::STUB &&
- maybe_call_function_stub->major_key() == CodeStub::CallFunction) {
- // Save reference to the code as we may need it to find out arguments
- // count for 'step in' later.
- call_function_stub = Handle<Code>(maybe_call_function_stub);
- }
- }
- } else {
- is_at_restarted_function = true;
- }
-
- // If this is the last break code target step out is the only possibility.
- if (it.IsExit() || step_action == StepOut) {
- if (step_action == StepOut) {
- // Skip step_count frames starting with the current one.
- while (step_count-- > 0 && !frames_it.done()) {
- frames_it.Advance();
- }
- } else {
- ASSERT(it.IsExit());
- frames_it.Advance();
- }
- // Skip builtin functions on the stack.
- while (!frames_it.done() &&
- JSFunction::cast(frames_it.frame()->function())->IsBuiltin()) {
- frames_it.Advance();
- }
- // Step out: If there is a JavaScript caller frame, we need to
- // flood it with breakpoints.
- if (!frames_it.done()) {
- // Fill the function to return to with one-shot break points.
- JSFunction* function = JSFunction::cast(frames_it.frame()->function());
- FloodWithOneShot(Handle<SharedFunctionInfo>(function->shared()));
- // Set target frame pointer.
- ActivateStepOut(frames_it.frame());
- }
- } else if (!(is_inline_cache_stub || RelocInfo::IsConstructCall(it.rmode()) ||
- !call_function_stub.is_null() || is_at_restarted_function)
- || step_action == StepNext || step_action == StepMin) {
- // Step next or step min.
-
- // Fill the current function with one-shot break points.
- FloodWithOneShot(shared);
-
- // Remember source position and frame to handle step next.
- thread_local_.last_statement_position_ =
- debug_info->code()->SourceStatementPosition(frame->pc());
- thread_local_.last_fp_ = frame->fp();
- } else {
- // If there's restarter frame on top of the stack, just get the pointer
- // to function which is going to be restarted.
- if (is_at_restarted_function) {
- Handle<JSFunction> restarted_function(
- JSFunction::cast(*thread_local_.restarter_frame_function_pointer_));
- Handle<SharedFunctionInfo> restarted_shared(
- restarted_function->shared());
- FloodWithOneShot(restarted_shared);
- } else if (!call_function_stub.is_null()) {
- // If it's CallFunction stub ensure target function is compiled and flood
- // it with one shot breakpoints.
-
- // Find out number of arguments from the stub minor key.
- // Reverse lookup required as the minor key cannot be retrieved
- // from the code object.
- Handle<Object> obj(
- isolate_->heap()->code_stubs()->SlowReverseLookup(
- *call_function_stub));
- ASSERT(!obj.is_null());
- ASSERT(!(*obj)->IsUndefined());
- ASSERT(obj->IsSmi());
- // Get the STUB key and extract major and minor key.
- uint32_t key = Smi::cast(*obj)->value();
- // Argc in the stub is the number of arguments passed - not the
- // expected arguments of the called function.
- int call_function_arg_count =
- CallFunctionStub::ExtractArgcFromMinorKey(
- CodeStub::MinorKeyFromKey(key));
- ASSERT(call_function_stub->major_key() ==
- CodeStub::MajorKeyFromKey(key));
-
- // Find target function on the expression stack.
- // Expression stack looks like this (top to bottom):
- // argN
- // ...
- // arg0
- // Receiver
- // Function to call
- int expressions_count = frame->ComputeExpressionsCount();
- ASSERT(expressions_count - 2 - call_function_arg_count >= 0);
- Object* fun = frame->GetExpression(
- expressions_count - 2 - call_function_arg_count);
- if (fun->IsJSFunction()) {
- Handle<JSFunction> js_function(JSFunction::cast(fun));
- // Don't step into builtins.
- if (!js_function->IsBuiltin()) {
- // It will also compile target function if it's not compiled yet.
- FloodWithOneShot(Handle<SharedFunctionInfo>(js_function->shared()));
- }
- }
- }
-
- // Fill the current function with one-shot break points even for step in on
- // a call target as the function called might be a native function for
- // which step in will not stop. It also prepares for stepping in
- // getters/setters.
- FloodWithOneShot(shared);
-
- if (is_load_or_store) {
- // Remember source position and frame to handle step in getter/setter. If
- // there is a custom getter/setter it will be handled in
- // Object::Get/SetPropertyWithCallback, otherwise the step action will be
- // propagated on the next Debug::Break.
- thread_local_.last_statement_position_ =
- debug_info->code()->SourceStatementPosition(frame->pc());
- thread_local_.last_fp_ = frame->fp();
- }
-
- // Step in or Step in min
- it.PrepareStepIn();
- ActivateStepIn(frame);
- }
-}
-
-
-// Check whether the current debug break should be reported to the debugger. It
-// is used to have step next and step in only report break back to the debugger
-// if on a different frame or in a different statement. In some situations
-// there will be several break points in the same statement when the code is
-// flooded with one-shot break points. This function helps to perform several
-// steps before reporting break back to the debugger.
-bool Debug::StepNextContinue(BreakLocationIterator* break_location_iterator,
- JavaScriptFrame* frame) {
- // If the step last action was step next or step in make sure that a new
- // statement is hit.
- if (thread_local_.last_step_action_ == StepNext ||
- thread_local_.last_step_action_ == StepIn) {
- // Never continue if returning from function.
- if (break_location_iterator->IsExit()) return false;
-
- // Continue if we are still on the same frame and in the same statement.
- int current_statement_position =
- break_location_iterator->code()->SourceStatementPosition(frame->pc());
- return thread_local_.last_fp_ == frame->fp() &&
- thread_local_.last_statement_position_ == current_statement_position;
- }
-
- // No step next action - don't continue.
- return false;
-}
-
-
-// Check whether the code object at the specified address is a debug break code
-// object.
-bool Debug::IsDebugBreak(Address addr) {
- Code* code = Code::GetCodeFromTargetAddress(addr);
- return code->ic_state() == DEBUG_BREAK;
-}
-
-
-// Check whether a code stub with the specified major key is a possible break
-// point location when looking for source break locations.
-bool Debug::IsSourceBreakStub(Code* code) {
- CodeStub::Major major_key = CodeStub::GetMajorKey(code);
- return major_key == CodeStub::CallFunction;
-}
-
-
-// Check whether a code stub with the specified major key is a possible break
-// location.
-bool Debug::IsBreakStub(Code* code) {
- CodeStub::Major major_key = CodeStub::GetMajorKey(code);
- return major_key == CodeStub::CallFunction;
-}
-
-
-// Find the builtin to use for invoking the debug break
-Handle<Code> Debug::FindDebugBreak(Handle<Code> code, RelocInfo::Mode mode) {
- // Find the builtin debug break function matching the calling convention
- // used by the call site.
- if (code->is_inline_cache_stub()) {
- switch (code->kind()) {
- case Code::CALL_IC:
- case Code::KEYED_CALL_IC:
- return ComputeCallDebugBreak(code->arguments_count(), code->kind());
-
- case Code::LOAD_IC:
- return Isolate::Current()->builtins()->LoadIC_DebugBreak();
-
- case Code::STORE_IC:
- return Isolate::Current()->builtins()->StoreIC_DebugBreak();
-
- case Code::KEYED_LOAD_IC:
- return Isolate::Current()->builtins()->KeyedLoadIC_DebugBreak();
-
- case Code::KEYED_STORE_IC:
- return Isolate::Current()->builtins()->KeyedStoreIC_DebugBreak();
-
- default:
- UNREACHABLE();
- }
- }
- if (RelocInfo::IsConstructCall(mode)) {
- Handle<Code> result =
- Isolate::Current()->builtins()->ConstructCall_DebugBreak();
- return result;
- }
- if (code->kind() == Code::STUB) {
- ASSERT(code->major_key() == CodeStub::CallFunction);
- Handle<Code> result =
- Isolate::Current()->builtins()->StubNoRegisters_DebugBreak();
- return result;
- }
-
- UNREACHABLE();
- return Handle<Code>::null();
-}
-
-
-// Simple function for returning the source positions for active break points.
-Handle<Object> Debug::GetSourceBreakLocations(
- Handle<SharedFunctionInfo> shared) {
- Isolate* isolate = Isolate::Current();
- Heap* heap = isolate->heap();
- if (!HasDebugInfo(shared)) return Handle<Object>(heap->undefined_value());
- Handle<DebugInfo> debug_info = GetDebugInfo(shared);
- if (debug_info->GetBreakPointCount() == 0) {
- return Handle<Object>(heap->undefined_value());
- }
- Handle<FixedArray> locations =
- isolate->factory()->NewFixedArray(debug_info->GetBreakPointCount());
- int count = 0;
- for (int i = 0; i < debug_info->break_points()->length(); i++) {
- if (!debug_info->break_points()->get(i)->IsUndefined()) {
- BreakPointInfo* break_point_info =
- BreakPointInfo::cast(debug_info->break_points()->get(i));
- if (break_point_info->GetBreakPointCount() > 0) {
- locations->set(count++, break_point_info->statement_position());
- }
- }
- }
- return locations;
-}
-
-
-void Debug::NewBreak(StackFrame::Id break_frame_id) {
- thread_local_.break_frame_id_ = break_frame_id;
- thread_local_.break_id_ = ++thread_local_.break_count_;
-}
-
-
-void Debug::SetBreak(StackFrame::Id break_frame_id, int break_id) {
- thread_local_.break_frame_id_ = break_frame_id;
- thread_local_.break_id_ = break_id;
-}
-
-
-// Handle stepping into a function.
-void Debug::HandleStepIn(Handle<JSFunction> function,
- Handle<Object> holder,
- Address fp,
- bool is_constructor) {
- // If the frame pointer is not supplied by the caller find it.
- if (fp == 0) {
- StackFrameIterator it;
- it.Advance();
- // For constructor functions skip another frame.
- if (is_constructor) {
- ASSERT(it.frame()->is_construct());
- it.Advance();
- }
- fp = it.frame()->fp();
- }
-
- // Flood the function with one-shot break points if it is called from where
- // step into was requested.
- if (fp == step_in_fp()) {
- // Don't allow step into functions in the native context.
- if (!function->IsBuiltin()) {
- if (function->shared()->code() ==
- Isolate::Current()->builtins()->builtin(Builtins::kFunctionApply) ||
- function->shared()->code() ==
- Isolate::Current()->builtins()->builtin(Builtins::kFunctionCall)) {
- // Handle function.apply and function.call separately to flood the
- // function to be called and not the code for Builtins::FunctionApply or
- // Builtins::FunctionCall. The receiver of call/apply is the target
- // function.
- if (!holder.is_null() && holder->IsJSFunction() &&
- !JSFunction::cast(*holder)->IsBuiltin()) {
- Handle<SharedFunctionInfo> shared_info(
- JSFunction::cast(*holder)->shared());
- Debug::FloodWithOneShot(shared_info);
- }
- } else {
- Debug::FloodWithOneShot(Handle<SharedFunctionInfo>(function->shared()));
- }
- }
- }
-}
-
-
-void Debug::ClearStepping() {
- // Clear the various stepping setup.
- ClearOneShot();
- ClearStepIn();
- ClearStepOut();
- ClearStepNext();
-
- // Clear multiple step counter.
- thread_local_.step_count_ = 0;
-}
-
-// Clears all the one-shot break points that are currently set. Normally this
-// function is called each time a break point is hit as one shot break points
-// are used to support stepping.
-void Debug::ClearOneShot() {
- // The current implementation just runs through all the breakpoints. When the
- // last break point for a function is removed that function is automatically
- // removed from the list.
-
- DebugInfoListNode* node = debug_info_list_;
- while (node != NULL) {
- BreakLocationIterator it(node->debug_info(), ALL_BREAK_LOCATIONS);
- while (!it.Done()) {
- it.ClearOneShot();
- it.Next();
- }
- node = node->next();
- }
-}
-
-
-void Debug::ActivateStepIn(StackFrame* frame) {
- ASSERT(!StepOutActive());
- thread_local_.step_into_fp_ = frame->fp();
-}
-
-
-void Debug::ClearStepIn() {
- thread_local_.step_into_fp_ = 0;
-}
-
-
-void Debug::ActivateStepOut(StackFrame* frame) {
- ASSERT(!StepInActive());
- thread_local_.step_out_fp_ = frame->fp();
-}
-
-
-void Debug::ClearStepOut() {
- thread_local_.step_out_fp_ = 0;
-}
-
-
-void Debug::ClearStepNext() {
- thread_local_.last_step_action_ = StepNone;
- thread_local_.last_statement_position_ = RelocInfo::kNoPosition;
- thread_local_.last_fp_ = 0;
-}
-
-
-// Ensures the debug information is present for shared.
-bool Debug::EnsureDebugInfo(Handle<SharedFunctionInfo> shared) {
- // Return if we already have the debug info for shared.
- if (HasDebugInfo(shared)) return true;
-
- // Ensure shared in compiled. Return false if this failed.
- if (!EnsureCompiled(shared, CLEAR_EXCEPTION)) return false;
-
- // If preparing for the first break point make sure to deoptimize all
- // functions as debugging does not work with optimized code.
- if (!has_break_points_) {
- Deoptimizer::DeoptimizeAll();
- }
-
- // Create the debug info object.
- Handle<DebugInfo> debug_info = FACTORY->NewDebugInfo(shared);
-
- // Add debug info to the list.
- DebugInfoListNode* node = new DebugInfoListNode(*debug_info);
- node->set_next(debug_info_list_);
- debug_info_list_ = node;
-
- // Now there is at least one break point.
- has_break_points_ = true;
-
- return true;
-}
-
-
-void Debug::RemoveDebugInfo(Handle<DebugInfo> debug_info) {
- ASSERT(debug_info_list_ != NULL);
- // Run through the debug info objects to find this one and remove it.
- DebugInfoListNode* prev = NULL;
- DebugInfoListNode* current = debug_info_list_;
- while (current != NULL) {
- if (*current->debug_info() == *debug_info) {
- // Unlink from list. If prev is NULL we are looking at the first element.
- if (prev == NULL) {
- debug_info_list_ = current->next();
- } else {
- prev->set_next(current->next());
- }
- current->debug_info()->shared()->set_debug_info(
- isolate_->heap()->undefined_value());
- delete current;
-
- // If there are no more debug info objects there are not more break
- // points.
- has_break_points_ = debug_info_list_ != NULL;
-
- return;
- }
- // Move to next in list.
- prev = current;
- current = current->next();
- }
- UNREACHABLE();
-}
-
-
-void Debug::SetAfterBreakTarget(JavaScriptFrame* frame) {
- ASSERT(Isolate::Current() == isolate_);
- HandleScope scope(isolate_);
-
- // Get the executing function in which the debug break occurred.
- Handle<SharedFunctionInfo> shared =
- Handle<SharedFunctionInfo>(JSFunction::cast(frame->function())->shared());
- if (!EnsureDebugInfo(shared)) {
- // Return if we failed to retrieve the debug info.
- return;
- }
- Handle<DebugInfo> debug_info = GetDebugInfo(shared);
- Handle<Code> code(debug_info->code());
- Handle<Code> original_code(debug_info->original_code());
-#ifdef DEBUG
- // Get the code which is actually executing.
- Handle<Code> frame_code(frame->LookupCode());
- ASSERT(frame_code.is_identical_to(code));
-#endif
-
- // Find the call address in the running code. This address holds the call to
- // either a DebugBreakXXX or to the debug break return entry code if the
- // break point is still active after processing the break point.
- Address addr = frame->pc() - Assembler::kCallTargetAddressOffset;
-
- // Check if the location is at JS exit or debug break slot.
- bool at_js_return = false;
- bool break_at_js_return_active = false;
- bool at_debug_break_slot = false;
- RelocIterator it(debug_info->code());
- while (!it.done() && !at_js_return && !at_debug_break_slot) {
- if (RelocInfo::IsJSReturn(it.rinfo()->rmode())) {
- at_js_return = (it.rinfo()->pc() ==
- addr - Assembler::kPatchReturnSequenceAddressOffset);
- break_at_js_return_active = it.rinfo()->IsPatchedReturnSequence();
- }
- if (RelocInfo::IsDebugBreakSlot(it.rinfo()->rmode())) {
- at_debug_break_slot = (it.rinfo()->pc() ==
- addr - Assembler::kPatchDebugBreakSlotAddressOffset);
- }
- it.next();
- }
-
- // Handle the jump to continue execution after break point depending on the
- // break location.
- if (at_js_return) {
- // If the break point as return is still active jump to the corresponding
- // place in the original code. If not the break point was removed during
- // break point processing.
- if (break_at_js_return_active) {
- addr += original_code->instruction_start() - code->instruction_start();
- }
-
- // Move back to where the call instruction sequence started.
- thread_local_.after_break_target_ =
- addr - Assembler::kPatchReturnSequenceAddressOffset;
- } else if (at_debug_break_slot) {
- // Address of where the debug break slot starts.
- addr = addr - Assembler::kPatchDebugBreakSlotAddressOffset;
-
- // Continue just after the slot.
- thread_local_.after_break_target_ = addr + Assembler::kDebugBreakSlotLength;
- } else if (IsDebugBreak(Assembler::target_address_at(addr))) {
- // We now know that there is still a debug break call at the target address,
- // so the break point is still there and the original code will hold the
- // address to jump to in order to complete the call which is replaced by a
- // call to DebugBreakXXX.
-
- // Find the corresponding address in the original code.
- addr += original_code->instruction_start() - code->instruction_start();
-
- // Install jump to the call address in the original code. This will be the
- // call which was overwritten by the call to DebugBreakXXX.
- thread_local_.after_break_target_ = Assembler::target_address_at(addr);
- } else {
- // There is no longer a break point present. Don't try to look in the
- // original code as the running code will have the right address. This takes
- // care of the case where the last break point is removed from the function
- // and therefore no "original code" is available.
- thread_local_.after_break_target_ = Assembler::target_address_at(addr);
- }
-}
-
-
-bool Debug::IsBreakAtReturn(JavaScriptFrame* frame) {
- HandleScope scope(isolate_);
-
- // Get the executing function in which the debug break occurred.
- Handle<SharedFunctionInfo> shared =
- Handle<SharedFunctionInfo>(JSFunction::cast(frame->function())->shared());
- if (!EnsureDebugInfo(shared)) {
- // Return if we failed to retrieve the debug info.
- return false;
- }
- Handle<DebugInfo> debug_info = GetDebugInfo(shared);
- Handle<Code> code(debug_info->code());
-#ifdef DEBUG
- // Get the code which is actually executing.
- Handle<Code> frame_code(frame->LookupCode());
- ASSERT(frame_code.is_identical_to(code));
-#endif
-
- // Find the call address in the running code.
- Address addr = frame->pc() - Assembler::kCallTargetAddressOffset;
-
- // Check if the location is at JS return.
- RelocIterator it(debug_info->code());
- while (!it.done()) {
- if (RelocInfo::IsJSReturn(it.rinfo()->rmode())) {
- return (it.rinfo()->pc() ==
- addr - Assembler::kPatchReturnSequenceAddressOffset);
- }
- it.next();
- }
- return false;
-}
-
-
-void Debug::FramesHaveBeenDropped(StackFrame::Id new_break_frame_id,
- FrameDropMode mode,
- Object** restarter_frame_function_pointer) {
- thread_local_.frame_drop_mode_ = mode;
- thread_local_.break_frame_id_ = new_break_frame_id;
- thread_local_.restarter_frame_function_pointer_ =
- restarter_frame_function_pointer;
-}
-
-
-bool Debug::IsDebugGlobal(GlobalObject* global) {
- return IsLoaded() && global == debug_context()->global();
-}
-
-
-void Debug::ClearMirrorCache() {
- ASSERT(Isolate::Current() == isolate_);
- PostponeInterruptsScope postpone(isolate_);
- HandleScope scope(isolate_);
- ASSERT(isolate_->context() == *Debug::debug_context());
-
- // Clear the mirror cache.
- Handle<String> function_name =
- isolate_->factory()->LookupSymbol(CStrVector("ClearMirrorCache"));
- Handle<Object> fun(Isolate::Current()->global()->GetPropertyNoExceptionThrown(
- *function_name));
- ASSERT(fun->IsJSFunction());
- bool caught_exception;
- Handle<Object> js_object = Execution::TryCall(
- Handle<JSFunction>::cast(fun),
- Handle<JSObject>(Debug::debug_context()->global()),
- 0, NULL, &caught_exception);
-}
-
-
-void Debug::CreateScriptCache() {
- ASSERT(Isolate::Current() == isolate_);
- Heap* heap = isolate_->heap();
- HandleScope scope(isolate_);
-
- // Perform two GCs to get rid of all unreferenced scripts. The first GC gets
- // rid of all the cached script wrappers and the second gets rid of the
- // scripts which are no longer referenced.
- heap->CollectAllGarbage(false);
- heap->CollectAllGarbage(false);
-
- ASSERT(script_cache_ == NULL);
- script_cache_ = new ScriptCache();
-
- // Scan heap for Script objects.
- int count = 0;
- HeapIterator iterator;
- for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
- if (obj->IsScript() && Script::cast(obj)->HasValidSource()) {
- script_cache_->Add(Handle<Script>(Script::cast(obj)));
- count++;
- }
- }
-}
-
-
-void Debug::DestroyScriptCache() {
- // Get rid of the script cache if it was created.
- if (script_cache_ != NULL) {
- delete script_cache_;
- script_cache_ = NULL;
- }
-}
-
-
-void Debug::AddScriptToScriptCache(Handle<Script> script) {
- if (script_cache_ != NULL) {
- script_cache_->Add(script);
- }
-}
-
-
-Handle<FixedArray> Debug::GetLoadedScripts() {
- ASSERT(Isolate::Current() == isolate_);
- // Create and fill the script cache when the loaded scripts is requested for
- // the first time.
- if (script_cache_ == NULL) {
- CreateScriptCache();
- }
-
- // If the script cache is not active just return an empty array.
- ASSERT(script_cache_ != NULL);
- if (script_cache_ == NULL) {
- isolate_->factory()->NewFixedArray(0);
- }
-
- // Perform GC to get unreferenced scripts evicted from the cache before
- // returning the content.
- isolate_->heap()->CollectAllGarbage(false);
-
- // Get the scripts from the cache.
- return script_cache_->GetScripts();
-}
-
-
-void Debug::AfterGarbageCollection() {
- // Generate events for collected scripts.
- if (script_cache_ != NULL) {
- script_cache_->ProcessCollectedScripts();
- }
-}
-
-
-Debugger::Debugger()
- : debugger_access_(OS::CreateMutex()),
- event_listener_(Handle<Object>()),
- event_listener_data_(Handle<Object>()),
- compiling_natives_(false),
- is_loading_debugger_(false),
- never_unload_debugger_(false),
- message_handler_(NULL),
- debugger_unload_pending_(false),
- host_dispatch_handler_(NULL),
- dispatch_handler_access_(OS::CreateMutex()),
- debug_message_dispatch_handler_(NULL),
- message_dispatch_helper_thread_(NULL),
- host_dispatch_micros_(100 * 1000),
- agent_(NULL),
- command_queue_(kQueueInitialSize),
- command_received_(OS::CreateSemaphore(0)),
- event_command_queue_(kQueueInitialSize) {
-}
-
-
-Debugger::~Debugger() {
- delete debugger_access_;
- debugger_access_ = 0;
- delete dispatch_handler_access_;
- dispatch_handler_access_ = 0;
- delete command_received_;
- command_received_ = 0;
-}
-
-
-Handle<Object> Debugger::MakeJSObject(Vector<const char> constructor_name,
- int argc, Object*** argv,
- bool* caught_exception) {
- ASSERT(Isolate::Current() == isolate_);
- ASSERT(isolate_->context() == *isolate_->debug()->debug_context());
-
- // Create the execution state object.
- Handle<String> constructor_str =
- isolate_->factory()->LookupSymbol(constructor_name);
- Handle<Object> constructor(
- isolate_->global()->GetPropertyNoExceptionThrown(*constructor_str));
- ASSERT(constructor->IsJSFunction());
- if (!constructor->IsJSFunction()) {
- *caught_exception = true;
- return isolate_->factory()->undefined_value();
- }
- Handle<Object> js_object = Execution::TryCall(
- Handle<JSFunction>::cast(constructor),
- Handle<JSObject>(isolate_->debug()->debug_context()->global()),
- argc, argv, caught_exception);
- return js_object;
-}
-
-
-Handle<Object> Debugger::MakeExecutionState(bool* caught_exception) {
- ASSERT(Isolate::Current() == isolate_);
- // Create the execution state object.
- Handle<Object> break_id = isolate_->factory()->NewNumberFromInt(
- isolate_->debug()->break_id());
- const int argc = 1;
- Object** argv[argc] = { break_id.location() };
- return MakeJSObject(CStrVector("MakeExecutionState"),
- argc, argv, caught_exception);
-}
-
-
-Handle<Object> Debugger::MakeBreakEvent(Handle<Object> exec_state,
- Handle<Object> break_points_hit,
- bool* caught_exception) {
- ASSERT(Isolate::Current() == isolate_);
- // Create the new break event object.
- const int argc = 2;
- Object** argv[argc] = { exec_state.location(),
- break_points_hit.location() };
- return MakeJSObject(CStrVector("MakeBreakEvent"),
- argc,
- argv,
- caught_exception);
-}
-
-
-Handle<Object> Debugger::MakeExceptionEvent(Handle<Object> exec_state,
- Handle<Object> exception,
- bool uncaught,
- bool* caught_exception) {
- ASSERT(Isolate::Current() == isolate_);
- Factory* factory = isolate_->factory();
- // Create the new exception event object.
- const int argc = 3;
- Object** argv[argc] = { exec_state.location(),
- exception.location(),
- uncaught ? factory->true_value().location() :
- factory->false_value().location()};
- return MakeJSObject(CStrVector("MakeExceptionEvent"),
- argc, argv, caught_exception);
-}
-
-
-Handle<Object> Debugger::MakeNewFunctionEvent(Handle<Object> function,
- bool* caught_exception) {
- ASSERT(Isolate::Current() == isolate_);
- // Create the new function event object.
- const int argc = 1;
- Object** argv[argc] = { function.location() };
- return MakeJSObject(CStrVector("MakeNewFunctionEvent"),
- argc, argv, caught_exception);
-}
-
-
-Handle<Object> Debugger::MakeCompileEvent(Handle<Script> script,
- bool before,
- bool* caught_exception) {
- ASSERT(Isolate::Current() == isolate_);
- Factory* factory = isolate_->factory();
- // Create the compile event object.
- Handle<Object> exec_state = MakeExecutionState(caught_exception);
- Handle<Object> script_wrapper = GetScriptWrapper(script);
- const int argc = 3;
- Object** argv[argc] = { exec_state.location(),
- script_wrapper.location(),
- before ? factory->true_value().location() :
- factory->false_value().location() };
-
- return MakeJSObject(CStrVector("MakeCompileEvent"),
- argc,
- argv,
- caught_exception);
-}
-
-
-Handle<Object> Debugger::MakeScriptCollectedEvent(int id,
- bool* caught_exception) {
- ASSERT(Isolate::Current() == isolate_);
- // Create the script collected event object.
- Handle<Object> exec_state = MakeExecutionState(caught_exception);
- Handle<Object> id_object = Handle<Smi>(Smi::FromInt(id));
- const int argc = 2;
- Object** argv[argc] = { exec_state.location(), id_object.location() };
-
- return MakeJSObject(CStrVector("MakeScriptCollectedEvent"),
- argc,
- argv,
- caught_exception);
-}
-
-
-void Debugger::OnException(Handle<Object> exception, bool uncaught) {
- ASSERT(Isolate::Current() == isolate_);
- HandleScope scope(isolate_);
- Debug* debug = isolate_->debug();
-
- // Bail out based on state or if there is no listener for this event
- if (debug->InDebugger()) return;
- if (!Debugger::EventActive(v8::Exception)) return;
-
- // Bail out if exception breaks are not active
- if (uncaught) {
- // Uncaught exceptions are reported by either flags.
- if (!(debug->break_on_uncaught_exception() ||
- debug->break_on_exception())) return;
- } else {
- // Caught exceptions are reported is activated.
- if (!debug->break_on_exception()) return;
- }
-
- // Enter the debugger.
- EnterDebugger debugger;
- if (debugger.FailedToEnter()) return;
-
- // Clear all current stepping setup.
- debug->ClearStepping();
- // Create the event data object.
- bool caught_exception = false;
- Handle<Object> exec_state = MakeExecutionState(&caught_exception);
- Handle<Object> event_data;
- if (!caught_exception) {
- event_data = MakeExceptionEvent(exec_state, exception, uncaught,
- &caught_exception);
- }
- // Bail out and don't call debugger if exception.
- if (caught_exception) {
- return;
- }
-
- // Process debug event.
- ProcessDebugEvent(v8::Exception, Handle<JSObject>::cast(event_data), false);
- // Return to continue execution from where the exception was thrown.
-}
-
-
-void Debugger::OnDebugBreak(Handle<Object> break_points_hit,
- bool auto_continue) {
- ASSERT(Isolate::Current() == isolate_);
- HandleScope scope(isolate_);
-
- // Debugger has already been entered by caller.
- ASSERT(isolate_->context() == *isolate_->debug()->debug_context());
-
- // Bail out if there is no listener for this event
- if (!Debugger::EventActive(v8::Break)) return;
-
- // Debugger must be entered in advance.
- ASSERT(Isolate::Current()->context() == *isolate_->debug()->debug_context());
-
- // Create the event data object.
- bool caught_exception = false;
- Handle<Object> exec_state = MakeExecutionState(&caught_exception);
- Handle<Object> event_data;
- if (!caught_exception) {
- event_data = MakeBreakEvent(exec_state, break_points_hit,
- &caught_exception);
- }
- // Bail out and don't call debugger if exception.
- if (caught_exception) {
- return;
- }
-
- // Process debug event.
- ProcessDebugEvent(v8::Break,
- Handle<JSObject>::cast(event_data),
- auto_continue);
-}
-
-
-void Debugger::OnBeforeCompile(Handle<Script> script) {
- ASSERT(Isolate::Current() == isolate_);
- HandleScope scope(isolate_);
-
- // Bail out based on state or if there is no listener for this event
- if (isolate_->debug()->InDebugger()) return;
- if (compiling_natives()) return;
- if (!EventActive(v8::BeforeCompile)) return;
-
- // Enter the debugger.
- EnterDebugger debugger;
- if (debugger.FailedToEnter()) return;
-
- // Create the event data object.
- bool caught_exception = false;
- Handle<Object> event_data = MakeCompileEvent(script, true, &caught_exception);
- // Bail out and don't call debugger if exception.
- if (caught_exception) {
- return;
- }
-
- // Process debug event.
- ProcessDebugEvent(v8::BeforeCompile,
- Handle<JSObject>::cast(event_data),
- true);
-}
-
-
-// Handle debugger actions when a new script is compiled.
-void Debugger::OnAfterCompile(Handle<Script> script,
- AfterCompileFlags after_compile_flags) {
- ASSERT(Isolate::Current() == isolate_);
- HandleScope scope(isolate_);
- Debug* debug = isolate_->debug();
-
- // Add the newly compiled script to the script cache.
- debug->AddScriptToScriptCache(script);
-
- // No more to do if not debugging.
- if (!IsDebuggerActive()) return;
-
- // No compile events while compiling natives.
- if (compiling_natives()) return;
-
- // Store whether in debugger before entering debugger.
- bool in_debugger = debug->InDebugger();
-
- // Enter the debugger.
- EnterDebugger debugger;
- if (debugger.FailedToEnter()) return;
-
- // If debugging there might be script break points registered for this
- // script. Make sure that these break points are set.
-
- // Get the function UpdateScriptBreakPoints (defined in debug-debugger.js).
- Handle<String> update_script_break_points_symbol =
- isolate_->factory()->LookupAsciiSymbol("UpdateScriptBreakPoints");
- Handle<Object> update_script_break_points =
- Handle<Object>(debug->debug_context()->global()->
- GetPropertyNoExceptionThrown(*update_script_break_points_symbol));
- if (!update_script_break_points->IsJSFunction()) {
- return;
- }
- ASSERT(update_script_break_points->IsJSFunction());
-
- // Wrap the script object in a proper JS object before passing it
- // to JavaScript.
- Handle<JSValue> wrapper = GetScriptWrapper(script);
-
- // Call UpdateScriptBreakPoints expect no exceptions.
- bool caught_exception = false;
- const int argc = 1;
- Object** argv[argc] = { reinterpret_cast<Object**>(wrapper.location()) };
- Handle<Object> result = Execution::TryCall(
- Handle<JSFunction>::cast(update_script_break_points),
- Isolate::Current()->js_builtins_object(), argc, argv,
- &caught_exception);
- if (caught_exception) {
- return;
- }
- // Bail out based on state or if there is no listener for this event
- if (in_debugger && (after_compile_flags & SEND_WHEN_DEBUGGING) == 0) return;
- if (!Debugger::EventActive(v8::AfterCompile)) return;
-
- // Create the compile state object.
- Handle<Object> event_data = MakeCompileEvent(script,
- false,
- &caught_exception);
- // Bail out and don't call debugger if exception.
- if (caught_exception) {
- return;
- }
- // Process debug event.
- ProcessDebugEvent(v8::AfterCompile,
- Handle<JSObject>::cast(event_data),
- true);
-}
-
-
-void Debugger::OnScriptCollected(int id) {
- ASSERT(Isolate::Current() == isolate_);
- HandleScope scope(isolate_);
-
- // No more to do if not debugging.
- if (!IsDebuggerActive()) return;
- if (!Debugger::EventActive(v8::ScriptCollected)) return;
-
- // Enter the debugger.
- EnterDebugger debugger;
- if (debugger.FailedToEnter()) return;
-
- // Create the script collected state object.
- bool caught_exception = false;
- Handle<Object> event_data = MakeScriptCollectedEvent(id,
- &caught_exception);
- // Bail out and don't call debugger if exception.
- if (caught_exception) {
- return;
- }
-
- // Process debug event.
- ProcessDebugEvent(v8::ScriptCollected,
- Handle<JSObject>::cast(event_data),
- true);
-}
-
-
-void Debugger::ProcessDebugEvent(v8::DebugEvent event,
- Handle<JSObject> event_data,
- bool auto_continue) {
- ASSERT(Isolate::Current() == isolate_);
- HandleScope scope(isolate_);
-
- // Clear any pending debug break if this is a real break.
- if (!auto_continue) {
- isolate_->debug()->clear_interrupt_pending(DEBUGBREAK);
- }
-
- // Create the execution state.
- bool caught_exception = false;
- Handle<Object> exec_state = MakeExecutionState(&caught_exception);
- if (caught_exception) {
- return;
- }
- // First notify the message handler if any.
- if (message_handler_ != NULL) {
- NotifyMessageHandler(event,
- Handle<JSObject>::cast(exec_state),
- event_data,
- auto_continue);
- }
- // Notify registered debug event listener. This can be either a C or
- // a JavaScript function. Don't call event listener for v8::Break
- // here, if it's only a debug command -- they will be processed later.
- if ((event != v8::Break || !auto_continue) && !event_listener_.is_null()) {
- CallEventCallback(event, exec_state, event_data, NULL);
- }
- // Process pending debug commands.
- if (event == v8::Break) {
- while (!event_command_queue_.IsEmpty()) {
- CommandMessage command = event_command_queue_.Get();
- if (!event_listener_.is_null()) {
- CallEventCallback(v8::BreakForCommand,
- exec_state,
- event_data,
- command.client_data());
- }
- command.Dispose();
- }
- }
-}
-
-
-void Debugger::CallEventCallback(v8::DebugEvent event,
- Handle<Object> exec_state,
- Handle<Object> event_data,
- v8::Debug::ClientData* client_data) {
- if (event_listener_->IsProxy()) {
- CallCEventCallback(event, exec_state, event_data, client_data);
- } else {
- CallJSEventCallback(event, exec_state, event_data);
- }
-}
-
-
-void Debugger::CallCEventCallback(v8::DebugEvent event,
- Handle<Object> exec_state,
- Handle<Object> event_data,
- v8::Debug::ClientData* client_data) {
- Handle<Proxy> callback_obj(Handle<Proxy>::cast(event_listener_));
- v8::Debug::EventCallback2 callback =
- FUNCTION_CAST<v8::Debug::EventCallback2>(callback_obj->proxy());
- EventDetailsImpl event_details(
- event,
- Handle<JSObject>::cast(exec_state),
- Handle<JSObject>::cast(event_data),
- event_listener_data_,
- client_data);
- callback(event_details);
-}
-
-
-void Debugger::CallJSEventCallback(v8::DebugEvent event,
- Handle<Object> exec_state,
- Handle<Object> event_data) {
- ASSERT(event_listener_->IsJSFunction());
- ASSERT(Isolate::Current() == isolate_);
- Handle<JSFunction> fun(Handle<JSFunction>::cast(event_listener_));
-
- // Invoke the JavaScript debug event listener.
- const int argc = 4;
- Object** argv[argc] = { Handle<Object>(Smi::FromInt(event)).location(),
- exec_state.location(),
- Handle<Object>::cast(event_data).location(),
- event_listener_data_.location() };
- bool caught_exception = false;
- Execution::TryCall(fun, isolate_->global(), argc, argv, &caught_exception);
- // Silently ignore exceptions from debug event listeners.
-}
-
-
-Handle<Context> Debugger::GetDebugContext() {
- ASSERT(Isolate::Current() == isolate_);
- never_unload_debugger_ = true;
- EnterDebugger debugger;
- return isolate_->debug()->debug_context();
-}
-
-
-void Debugger::UnloadDebugger() {
- ASSERT(Isolate::Current() == isolate_);
- Debug* debug = isolate_->debug();
-
- // Make sure that there are no breakpoints left.
- debug->ClearAllBreakPoints();
-
- // Unload the debugger if feasible.
- if (!never_unload_debugger_) {
- debug->Unload();
- }
-
- // Clear the flag indicating that the debugger should be unloaded.
- debugger_unload_pending_ = false;
-}
-
-
-void Debugger::NotifyMessageHandler(v8::DebugEvent event,
- Handle<JSObject> exec_state,
- Handle<JSObject> event_data,
- bool auto_continue) {
- ASSERT(Isolate::Current() == isolate_);
- HandleScope scope(isolate_);
-
- if (!isolate_->debug()->Load()) return;
-
- // Process the individual events.
- bool sendEventMessage = false;
- switch (event) {
- case v8::Break:
- case v8::BreakForCommand:
- sendEventMessage = !auto_continue;
- break;
- case v8::Exception:
- sendEventMessage = true;
- break;
- case v8::BeforeCompile:
- break;
- case v8::AfterCompile:
- sendEventMessage = true;
- break;
- case v8::ScriptCollected:
- sendEventMessage = true;
- break;
- case v8::NewFunction:
- break;
- default:
- UNREACHABLE();
- }
-
- // The debug command interrupt flag might have been set when the command was
- // added. It should be enough to clear the flag only once while we are in the
- // debugger.
- ASSERT(isolate_->debug()->InDebugger());
- isolate_->stack_guard()->Continue(DEBUGCOMMAND);
-
- // Notify the debugger that a debug event has occurred unless auto continue is
- // active in which case no event is send.
- if (sendEventMessage) {
- MessageImpl message = MessageImpl::NewEvent(
- event,
- auto_continue,
- Handle<JSObject>::cast(exec_state),
- Handle<JSObject>::cast(event_data));
- InvokeMessageHandler(message);
- }
-
- // If auto continue don't make the event cause a break, but process messages
- // in the queue if any. For script collected events don't even process
- // messages in the queue as the execution state might not be what is expected
- // by the client.
- if ((auto_continue && !HasCommands()) || event == v8::ScriptCollected) {
- return;
- }
-
- v8::TryCatch try_catch;
-
- // DebugCommandProcessor goes here.
- v8::Local<v8::Object> cmd_processor;
- {
- v8::Local<v8::Object> api_exec_state =
- v8::Utils::ToLocal(Handle<JSObject>::cast(exec_state));
- v8::Local<v8::String> fun_name =
- v8::String::New("debugCommandProcessor");
- v8::Local<v8::Function> fun =
- v8::Function::Cast(*api_exec_state->Get(fun_name));
-
- v8::Handle<v8::Boolean> running =
- auto_continue ? v8::True() : v8::False();
- static const int kArgc = 1;
- v8::Handle<Value> argv[kArgc] = { running };
- cmd_processor = v8::Object::Cast(*fun->Call(api_exec_state, kArgc, argv));
- if (try_catch.HasCaught()) {
- PrintLn(try_catch.Exception());
- return;
- }
- }
-
- bool running = auto_continue;
-
- // Process requests from the debugger.
- while (true) {
- // Wait for new command in the queue.
- if (Debugger::host_dispatch_handler_) {
- // In case there is a host dispatch - do periodic dispatches.
- if (!command_received_->Wait(host_dispatch_micros_)) {
- // Timout expired, do the dispatch.
- Debugger::host_dispatch_handler_();
- continue;
- }
- } else {
- // In case there is no host dispatch - just wait.
- command_received_->Wait();
- }
-
- // Get the command from the queue.
- CommandMessage command = command_queue_.Get();
- LOGGER->DebugTag("Got request from command queue, in interactive loop.");
- if (!Debugger::IsDebuggerActive()) {
- // Delete command text and user data.
- command.Dispose();
- return;
- }
-
- // Invoke JavaScript to process the debug request.
- v8::Local<v8::String> fun_name;
- v8::Local<v8::Function> fun;
- v8::Local<v8::Value> request;
- v8::TryCatch try_catch;
- fun_name = v8::String::New("processDebugRequest");
- fun = v8::Function::Cast(*cmd_processor->Get(fun_name));
-
- request = v8::String::New(command.text().start(),
- command.text().length());
- static const int kArgc = 1;
- v8::Handle<Value> argv[kArgc] = { request };
- v8::Local<v8::Value> response_val = fun->Call(cmd_processor, kArgc, argv);
-
- // Get the response.
- v8::Local<v8::String> response;
- if (!try_catch.HasCaught()) {
- // Get response string.
- if (!response_val->IsUndefined()) {
- response = v8::String::Cast(*response_val);
- } else {
- response = v8::String::New("");
- }
-
- // Log the JSON request/response.
- if (FLAG_trace_debug_json) {
- PrintLn(request);
- PrintLn(response);
- }
-
- // Get the running state.
- fun_name = v8::String::New("isRunning");
- fun = v8::Function::Cast(*cmd_processor->Get(fun_name));
- static const int kArgc = 1;
- v8::Handle<Value> argv[kArgc] = { response };
- v8::Local<v8::Value> running_val = fun->Call(cmd_processor, kArgc, argv);
- if (!try_catch.HasCaught()) {
- running = running_val->ToBoolean()->Value();
- }
- } else {
- // In case of failure the result text is the exception text.
- response = try_catch.Exception()->ToString();
- }
-
- // Return the result.
- MessageImpl message = MessageImpl::NewResponse(
- event,
- running,
- Handle<JSObject>::cast(exec_state),
- Handle<JSObject>::cast(event_data),
- Handle<String>(Utils::OpenHandle(*response)),
- command.client_data());
- InvokeMessageHandler(message);
- command.Dispose();
-
- // Return from debug event processing if either the VM is put into the
- // runnning state (through a continue command) or auto continue is active
- // and there are no more commands queued.
- if (running && !HasCommands()) {
- return;
- }
- }
-}
-
-
-void Debugger::SetEventListener(Handle<Object> callback,
- Handle<Object> data) {
- ASSERT(Isolate::Current() == isolate_);
- HandleScope scope(isolate_);
- GlobalHandles* global_handles = isolate_->global_handles();
-
- // Clear the global handles for the event listener and the event listener data
- // object.
- if (!event_listener_.is_null()) {
- global_handles->Destroy(
- reinterpret_cast<Object**>(event_listener_.location()));
- event_listener_ = Handle<Object>();
- }
- if (!event_listener_data_.is_null()) {
- global_handles->Destroy(
- reinterpret_cast<Object**>(event_listener_data_.location()));
- event_listener_data_ = Handle<Object>();
- }
-
- // If there is a new debug event listener register it together with its data
- // object.
- if (!callback->IsUndefined() && !callback->IsNull()) {
- event_listener_ = Handle<Object>::cast(
- global_handles->Create(*callback));
- if (data.is_null()) {
- data = isolate_->factory()->undefined_value();
- }
- event_listener_data_ = Handle<Object>::cast(
- global_handles->Create(*data));
- }
-
- ListenersChanged();
-}
-
-
-void Debugger::SetMessageHandler(v8::Debug::MessageHandler2 handler) {
- ASSERT(Isolate::Current() == isolate_);
- ScopedLock with(debugger_access_);
-
- message_handler_ = handler;
- ListenersChanged();
- if (handler == NULL) {
- // Send an empty command to the debugger if in a break to make JavaScript
- // run again if the debugger is closed.
- if (isolate_->debug()->InDebugger()) {
- ProcessCommand(Vector<const uint16_t>::empty());
- }
- }
-}
-
-
-void Debugger::ListenersChanged() {
- ASSERT(Isolate::Current() == isolate_);
- if (IsDebuggerActive()) {
- // Disable the compilation cache when the debugger is active.
- isolate_->compilation_cache()->Disable();
- debugger_unload_pending_ = false;
- } else {
- isolate_->compilation_cache()->Enable();
- // Unload the debugger if event listener and message handler cleared.
- // Schedule this for later, because we may be in non-V8 thread.
- debugger_unload_pending_ = true;
- }
-}
-
-
-void Debugger::SetHostDispatchHandler(v8::Debug::HostDispatchHandler handler,
- int period) {
- ASSERT(Isolate::Current() == isolate_);
- host_dispatch_handler_ = handler;
- host_dispatch_micros_ = period * 1000;
-}
-
-
-void Debugger::SetDebugMessageDispatchHandler(
- v8::Debug::DebugMessageDispatchHandler handler, bool provide_locker) {
- ASSERT(Isolate::Current() == isolate_);
- ScopedLock with(dispatch_handler_access_);
- debug_message_dispatch_handler_ = handler;
-
- if (provide_locker && message_dispatch_helper_thread_ == NULL) {
- message_dispatch_helper_thread_ = new MessageDispatchHelperThread(isolate_);
- message_dispatch_helper_thread_->Start();
- }
-}
-
-
-// Calls the registered debug message handler. This callback is part of the
-// public API.
-void Debugger::InvokeMessageHandler(MessageImpl message) {
- ASSERT(Isolate::Current() == isolate_);
- ScopedLock with(debugger_access_);
-
- if (message_handler_ != NULL) {
- message_handler_(message);
- }
-}
-
-
-// Puts a command coming from the public API on the queue. Creates
-// a copy of the command string managed by the debugger. Up to this
-// point, the command data was managed by the API client. Called
-// by the API client thread.
-void Debugger::ProcessCommand(Vector<const uint16_t> command,
- v8::Debug::ClientData* client_data) {
- ASSERT(Isolate::Current() == isolate_);
- // Need to cast away const.
- CommandMessage message = CommandMessage::New(
- Vector<uint16_t>(const_cast<uint16_t*>(command.start()),
- command.length()),
- client_data);
- LOGGER->DebugTag("Put command on command_queue.");
- command_queue_.Put(message);
- command_received_->Signal();
-
- // Set the debug command break flag to have the command processed.
- if (!isolate_->debug()->InDebugger()) {
- isolate_->stack_guard()->DebugCommand();
- }
-
- MessageDispatchHelperThread* dispatch_thread;
- {
- ScopedLock with(dispatch_handler_access_);
- dispatch_thread = message_dispatch_helper_thread_;
- }
-
- if (dispatch_thread == NULL) {
- CallMessageDispatchHandler();
- } else {
- dispatch_thread->Schedule();
- }
-}
-
-
-bool Debugger::HasCommands() {
- ASSERT(Isolate::Current() == isolate_);
- return !command_queue_.IsEmpty();
-}
-
-
-void Debugger::EnqueueDebugCommand(v8::Debug::ClientData* client_data) {
- ASSERT(Isolate::Current() == isolate_);
- CommandMessage message = CommandMessage::New(Vector<uint16_t>(), client_data);
- event_command_queue_.Put(message);
-
- // Set the debug command break flag to have the command processed.
- if (!isolate_->debug()->InDebugger()) {
- isolate_->stack_guard()->DebugCommand();
- }
-}
-
-
-bool Debugger::IsDebuggerActive() {
- ASSERT(Isolate::Current() == isolate_);
- ScopedLock with(debugger_access_);
-
- return message_handler_ != NULL || !event_listener_.is_null();
-}
-
-
-Handle<Object> Debugger::Call(Handle<JSFunction> fun,
- Handle<Object> data,
- bool* pending_exception) {
- ASSERT(Isolate::Current() == isolate_);
- // When calling functions in the debugger prevent it from beeing unloaded.
- Debugger::never_unload_debugger_ = true;
-
- // Enter the debugger.
- EnterDebugger debugger;
- if (debugger.FailedToEnter()) {
- return isolate_->factory()->undefined_value();
- }
-
- // Create the execution state.
- bool caught_exception = false;
- Handle<Object> exec_state = MakeExecutionState(&caught_exception);
- if (caught_exception) {
- return isolate_->factory()->undefined_value();
- }
-
- static const int kArgc = 2;
- Object** argv[kArgc] = { exec_state.location(), data.location() };
- Handle<Object> result = Execution::Call(
- fun,
- Handle<Object>(isolate_->debug()->debug_context_->global_proxy()),
- kArgc,
- argv,
- pending_exception);
- return result;
-}
-
-
-static void StubMessageHandler2(const v8::Debug::Message& message) {
- // Simply ignore message.
-}
-
-
-bool Debugger::StartAgent(const char* name, int port,
- bool wait_for_connection) {
- ASSERT(Isolate::Current() == isolate_);
- if (wait_for_connection) {
- // Suspend V8 if it is already running or set V8 to suspend whenever
- // it starts.
- // Provide stub message handler; V8 auto-continues each suspend
- // when there is no message handler; we doesn't need it.
- // Once become suspended, V8 will stay so indefinitely long, until remote
- // debugger connects and issues "continue" command.
- Debugger::message_handler_ = StubMessageHandler2;
- v8::Debug::DebugBreak();
- }
-
- if (Socket::Setup()) {
- if (agent_ == NULL) {
- agent_ = new DebuggerAgent(isolate_, name, port);
- agent_->Start();
- }
- return true;
- }
-
- return false;
-}
-
-
-void Debugger::StopAgent() {
- ASSERT(Isolate::Current() == isolate_);
- if (agent_ != NULL) {
- agent_->Shutdown();
- agent_->Join();
- delete agent_;
- agent_ = NULL;
- }
-}
-
-
-void Debugger::WaitForAgent() {
- ASSERT(Isolate::Current() == isolate_);
- if (agent_ != NULL)
- agent_->WaitUntilListening();
-}
-
-
-void Debugger::CallMessageDispatchHandler() {
- ASSERT(Isolate::Current() == isolate_);
- v8::Debug::DebugMessageDispatchHandler handler;
- {
- ScopedLock with(dispatch_handler_access_);
- handler = Debugger::debug_message_dispatch_handler_;
- }
- if (handler != NULL) {
- handler();
- }
-}
-
-
-MessageImpl MessageImpl::NewEvent(DebugEvent event,
- bool running,
- Handle<JSObject> exec_state,
- Handle<JSObject> event_data) {
- MessageImpl message(true, event, running,
- exec_state, event_data, Handle<String>(), NULL);
- return message;
-}
-
-
-MessageImpl MessageImpl::NewResponse(DebugEvent event,
- bool running,
- Handle<JSObject> exec_state,
- Handle<JSObject> event_data,
- Handle<String> response_json,
- v8::Debug::ClientData* client_data) {
- MessageImpl message(false, event, running,
- exec_state, event_data, response_json, client_data);
- return message;
-}
-
-
-MessageImpl::MessageImpl(bool is_event,
- DebugEvent event,
- bool running,
- Handle<JSObject> exec_state,
- Handle<JSObject> event_data,
- Handle<String> response_json,
- v8::Debug::ClientData* client_data)
- : is_event_(is_event),
- event_(event),
- running_(running),
- exec_state_(exec_state),
- event_data_(event_data),
- response_json_(response_json),
- client_data_(client_data) {}
-
-
-bool MessageImpl::IsEvent() const {
- return is_event_;
-}
-
-
-bool MessageImpl::IsResponse() const {
- return !is_event_;
-}
-
-
-DebugEvent MessageImpl::GetEvent() const {
- return event_;
-}
-
-
-bool MessageImpl::WillStartRunning() const {
- return running_;
-}
-
-
-v8::Handle<v8::Object> MessageImpl::GetExecutionState() const {
- return v8::Utils::ToLocal(exec_state_);
-}
-
-
-v8::Handle<v8::Object> MessageImpl::GetEventData() const {
- return v8::Utils::ToLocal(event_data_);
-}
-
-
-v8::Handle<v8::String> MessageImpl::GetJSON() const {
- v8::HandleScope scope;
-
- if (IsEvent()) {
- // Call toJSONProtocol on the debug event object.
- Handle<Object> fun = GetProperty(event_data_, "toJSONProtocol");
- if (!fun->IsJSFunction()) {
- return v8::Handle<v8::String>();
- }
- bool caught_exception;
- Handle<Object> json = Execution::TryCall(Handle<JSFunction>::cast(fun),
- event_data_,
- 0, NULL, &caught_exception);
- if (caught_exception || !json->IsString()) {
- return v8::Handle<v8::String>();
- }
- return scope.Close(v8::Utils::ToLocal(Handle<String>::cast(json)));
- } else {
- return v8::Utils::ToLocal(response_json_);
- }
-}
-
-
-v8::Handle<v8::Context> MessageImpl::GetEventContext() const {
- Isolate* isolate = Isolate::Current();
- v8::Handle<v8::Context> context = GetDebugEventContext(isolate);
- // Isolate::context() may be NULL when "script collected" event occures.
- ASSERT(!context.IsEmpty() || event_ == v8::ScriptCollected);
- return GetDebugEventContext(isolate);
-}
-
-
-v8::Debug::ClientData* MessageImpl::GetClientData() const {
- return client_data_;
-}
-
-
-EventDetailsImpl::EventDetailsImpl(DebugEvent event,
- Handle<JSObject> exec_state,
- Handle<JSObject> event_data,
- Handle<Object> callback_data,
- v8::Debug::ClientData* client_data)
- : event_(event),
- exec_state_(exec_state),
- event_data_(event_data),
- callback_data_(callback_data),
- client_data_(client_data) {}
-
-
-DebugEvent EventDetailsImpl::GetEvent() const {
- return event_;
-}
-
-
-v8::Handle<v8::Object> EventDetailsImpl::GetExecutionState() const {
- return v8::Utils::ToLocal(exec_state_);
-}
-
-
-v8::Handle<v8::Object> EventDetailsImpl::GetEventData() const {
- return v8::Utils::ToLocal(event_data_);
-}
-
-
-v8::Handle<v8::Context> EventDetailsImpl::GetEventContext() const {
- return GetDebugEventContext(Isolate::Current());
-}
-
-
-v8::Handle<v8::Value> EventDetailsImpl::GetCallbackData() const {
- return v8::Utils::ToLocal(callback_data_);
-}
-
-
-v8::Debug::ClientData* EventDetailsImpl::GetClientData() const {
- return client_data_;
-}
-
-
-CommandMessage::CommandMessage() : text_(Vector<uint16_t>::empty()),
- client_data_(NULL) {
-}
-
-
-CommandMessage::CommandMessage(const Vector<uint16_t>& text,
- v8::Debug::ClientData* data)
- : text_(text),
- client_data_(data) {
-}
-
-
-CommandMessage::~CommandMessage() {
-}
-
-
-void CommandMessage::Dispose() {
- text_.Dispose();
- delete client_data_;
- client_data_ = NULL;
-}
-
-
-CommandMessage CommandMessage::New(const Vector<uint16_t>& command,
- v8::Debug::ClientData* data) {
- return CommandMessage(command.Clone(), data);
-}
-
-
-CommandMessageQueue::CommandMessageQueue(int size) : start_(0), end_(0),
- size_(size) {
- messages_ = NewArray<CommandMessage>(size);
-}
-
-
-CommandMessageQueue::~CommandMessageQueue() {
- while (!IsEmpty()) {
- CommandMessage m = Get();
- m.Dispose();
- }
- DeleteArray(messages_);
-}
-
-
-CommandMessage CommandMessageQueue::Get() {
- ASSERT(!IsEmpty());
- int result = start_;
- start_ = (start_ + 1) % size_;
- return messages_[result];
-}
-
-
-void CommandMessageQueue::Put(const CommandMessage& message) {
- if ((end_ + 1) % size_ == start_) {
- Expand();
- }
- messages_[end_] = message;
- end_ = (end_ + 1) % size_;
-}
-
-
-void CommandMessageQueue::Expand() {
- CommandMessageQueue new_queue(size_ * 2);
- while (!IsEmpty()) {
- new_queue.Put(Get());
- }
- CommandMessage* array_to_free = messages_;
- *this = new_queue;
- new_queue.messages_ = array_to_free;
- // Make the new_queue empty so that it doesn't call Dispose on any messages.
- new_queue.start_ = new_queue.end_;
- // Automatic destructor called on new_queue, freeing array_to_free.
-}
-
-
-LockingCommandMessageQueue::LockingCommandMessageQueue(int size)
- : queue_(size) {
- lock_ = OS::CreateMutex();
-}
-
-
-LockingCommandMessageQueue::~LockingCommandMessageQueue() {
- delete lock_;
-}
-
-
-bool LockingCommandMessageQueue::IsEmpty() const {
- ScopedLock sl(lock_);
- return queue_.IsEmpty();
-}
-
-
-CommandMessage LockingCommandMessageQueue::Get() {
- ScopedLock sl(lock_);
- CommandMessage result = queue_.Get();
- LOGGER->DebugEvent("Get", result.text());
- return result;
-}
-
-
-void LockingCommandMessageQueue::Put(const CommandMessage& message) {
- ScopedLock sl(lock_);
- queue_.Put(message);
- LOGGER->DebugEvent("Put", message.text());
-}
-
-
-void LockingCommandMessageQueue::Clear() {
- ScopedLock sl(lock_);
- queue_.Clear();
-}
-
-
-MessageDispatchHelperThread::MessageDispatchHelperThread(Isolate* isolate)
- : Thread(isolate, "v8:MsgDispHelpr"),
- sem_(OS::CreateSemaphore(0)), mutex_(OS::CreateMutex()),
- already_signalled_(false) {
-}
-
-
-MessageDispatchHelperThread::~MessageDispatchHelperThread() {
- delete mutex_;
- delete sem_;
-}
-
-
-void MessageDispatchHelperThread::Schedule() {
- {
- ScopedLock lock(mutex_);
- if (already_signalled_) {
- return;
- }
- already_signalled_ = true;
- }
- sem_->Signal();
-}
-
-
-void MessageDispatchHelperThread::Run() {
- while (true) {
- sem_->Wait();
- {
- ScopedLock lock(mutex_);
- already_signalled_ = false;
- }
- {
- Locker locker;
- Isolate::Current()->debugger()->CallMessageDispatchHandler();
- }
- }
-}
-
-#endif // ENABLE_DEBUGGER_SUPPORT
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/debug.h b/src/3rdparty/v8/src/debug.h
deleted file mode 100644
index 9366fc3..0000000
--- a/src/3rdparty/v8/src/debug.h
+++ /dev/null
@@ -1,1055 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_DEBUG_H_
-#define V8_DEBUG_H_
-
-#include "arguments.h"
-#include "assembler.h"
-#include "debug-agent.h"
-#include "execution.h"
-#include "factory.h"
-#include "flags.h"
-#include "hashmap.h"
-#include "platform.h"
-#include "string-stream.h"
-#include "v8threads.h"
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-#include "../include/v8-debug.h"
-
-namespace v8 {
-namespace internal {
-
-
-// Forward declarations.
-class EnterDebugger;
-
-
-// Step actions. NOTE: These values are in macros.py as well.
-enum StepAction {
- StepNone = -1, // Stepping not prepared.
- StepOut = 0, // Step out of the current function.
- StepNext = 1, // Step to the next statement in the current function.
- StepIn = 2, // Step into new functions invoked or the next statement
- // in the current function.
- StepMin = 3, // Perform a minimum step in the current function.
- StepInMin = 4 // Step into new functions invoked or perform a minimum step
- // in the current function.
-};
-
-
-// Type of exception break. NOTE: These values are in macros.py as well.
-enum ExceptionBreakType {
- BreakException = 0,
- BreakUncaughtException = 1
-};
-
-
-// Type of exception break. NOTE: These values are in macros.py as well.
-enum BreakLocatorType {
- ALL_BREAK_LOCATIONS = 0,
- SOURCE_BREAK_LOCATIONS = 1
-};
-
-
-// Class for iterating through the break points in a function and changing
-// them.
-class BreakLocationIterator {
- public:
- explicit BreakLocationIterator(Handle<DebugInfo> debug_info,
- BreakLocatorType type);
- virtual ~BreakLocationIterator();
-
- void Next();
- void Next(int count);
- void FindBreakLocationFromAddress(Address pc);
- void FindBreakLocationFromPosition(int position);
- void Reset();
- bool Done() const;
- void SetBreakPoint(Handle<Object> break_point_object);
- void ClearBreakPoint(Handle<Object> break_point_object);
- void SetOneShot();
- void ClearOneShot();
- void PrepareStepIn();
- bool IsExit() const;
- bool HasBreakPoint();
- bool IsDebugBreak();
- Object* BreakPointObjects();
- void ClearAllDebugBreak();
-
-
- inline int code_position() {
- return static_cast<int>(pc() - debug_info_->code()->entry());
- }
- inline int break_point() { return break_point_; }
- inline int position() { return position_; }
- inline int statement_position() { return statement_position_; }
- inline Address pc() { return reloc_iterator_->rinfo()->pc(); }
- inline Code* code() { return debug_info_->code(); }
- inline RelocInfo* rinfo() { return reloc_iterator_->rinfo(); }
- inline RelocInfo::Mode rmode() const {
- return reloc_iterator_->rinfo()->rmode();
- }
- inline RelocInfo* original_rinfo() {
- return reloc_iterator_original_->rinfo();
- }
- inline RelocInfo::Mode original_rmode() const {
- return reloc_iterator_original_->rinfo()->rmode();
- }
-
- bool IsDebuggerStatement();
-
- protected:
- bool RinfoDone() const;
- void RinfoNext();
-
- BreakLocatorType type_;
- int break_point_;
- int position_;
- int statement_position_;
- Handle<DebugInfo> debug_info_;
- RelocIterator* reloc_iterator_;
- RelocIterator* reloc_iterator_original_;
-
- private:
- void SetDebugBreak();
- void ClearDebugBreak();
-
- void SetDebugBreakAtIC();
- void ClearDebugBreakAtIC();
-
- bool IsDebugBreakAtReturn();
- void SetDebugBreakAtReturn();
- void ClearDebugBreakAtReturn();
-
- bool IsDebugBreakSlot();
- bool IsDebugBreakAtSlot();
- void SetDebugBreakAtSlot();
- void ClearDebugBreakAtSlot();
-
- DISALLOW_COPY_AND_ASSIGN(BreakLocationIterator);
-};
-
-
-// Cache of all script objects in the heap. When a script is added a weak handle
-// to it is created and that weak handle is stored in the cache. The weak handle
-// callback takes care of removing the script from the cache. The key used in
-// the cache is the script id.
-class ScriptCache : private HashMap {
- public:
- ScriptCache() : HashMap(ScriptMatch), collected_scripts_(10) {}
- virtual ~ScriptCache() { Clear(); }
-
- // Add script to the cache.
- void Add(Handle<Script> script);
-
- // Return the scripts in the cache.
- Handle<FixedArray> GetScripts();
-
- // Generate debugger events for collected scripts.
- void ProcessCollectedScripts();
-
- private:
- // Calculate the hash value from the key (script id).
- static uint32_t Hash(int key) { return ComputeIntegerHash(key); }
-
- // Scripts match if their keys (script id) match.
- static bool ScriptMatch(void* key1, void* key2) { return key1 == key2; }
-
- // Clear the cache releasing all the weak handles.
- void Clear();
-
- // Weak handle callback for scripts in the cache.
- static void HandleWeakScript(v8::Persistent<v8::Value> obj, void* data);
-
- // List used during GC to temporarily store id's of collected scripts.
- List<int> collected_scripts_;
-};
-
-
-// Linked list holding debug info objects. The debug info objects are kept as
-// weak handles to avoid a debug info object to keep a function alive.
-class DebugInfoListNode {
- public:
- explicit DebugInfoListNode(DebugInfo* debug_info);
- virtual ~DebugInfoListNode();
-
- DebugInfoListNode* next() { return next_; }
- void set_next(DebugInfoListNode* next) { next_ = next; }
- Handle<DebugInfo> debug_info() { return debug_info_; }
-
- private:
- // Global (weak) handle to the debug info object.
- Handle<DebugInfo> debug_info_;
-
- // Next pointer for linked list.
- DebugInfoListNode* next_;
-};
-
-// This class contains the debugger support. The main purpose is to handle
-// setting break points in the code.
-//
-// This class controls the debug info for all functions which currently have
-// active breakpoints in them. This debug info is held in the heap root object
-// debug_info which is a FixedArray. Each entry in this list is of class
-// DebugInfo.
-class Debug {
- public:
- void Setup(bool create_heap_objects);
- bool Load();
- void Unload();
- bool IsLoaded() { return !debug_context_.is_null(); }
- bool InDebugger() { return thread_local_.debugger_entry_ != NULL; }
- void PreemptionWhileInDebugger();
- void Iterate(ObjectVisitor* v);
-
- Object* Break(Arguments args);
- void SetBreakPoint(Handle<SharedFunctionInfo> shared,
- Handle<Object> break_point_object,
- int* source_position);
- void ClearBreakPoint(Handle<Object> break_point_object);
- void ClearAllBreakPoints();
- void FloodWithOneShot(Handle<SharedFunctionInfo> shared);
- void FloodHandlerWithOneShot();
- void ChangeBreakOnException(ExceptionBreakType type, bool enable);
- bool IsBreakOnException(ExceptionBreakType type);
- void PrepareStep(StepAction step_action, int step_count);
- void ClearStepping();
- bool StepNextContinue(BreakLocationIterator* break_location_iterator,
- JavaScriptFrame* frame);
- static Handle<DebugInfo> GetDebugInfo(Handle<SharedFunctionInfo> shared);
- static bool HasDebugInfo(Handle<SharedFunctionInfo> shared);
-
- // Returns whether the operation succeeded.
- bool EnsureDebugInfo(Handle<SharedFunctionInfo> shared);
-
- // Returns true if the current stub call is patched to call the debugger.
- static bool IsDebugBreak(Address addr);
- // Returns true if the current return statement has been patched to be
- // a debugger breakpoint.
- static bool IsDebugBreakAtReturn(RelocInfo* rinfo);
-
- // Check whether a code stub with the specified major key is a possible break
- // point location.
- static bool IsSourceBreakStub(Code* code);
- static bool IsBreakStub(Code* code);
-
- // Find the builtin to use for invoking the debug break
- static Handle<Code> FindDebugBreak(Handle<Code> code, RelocInfo::Mode mode);
-
- static Handle<Object> GetSourceBreakLocations(
- Handle<SharedFunctionInfo> shared);
-
- // Getter for the debug_context.
- inline Handle<Context> debug_context() { return debug_context_; }
-
- // Check whether a global object is the debug global object.
- bool IsDebugGlobal(GlobalObject* global);
-
- // Check whether this frame is just about to return.
- bool IsBreakAtReturn(JavaScriptFrame* frame);
-
- // Fast check to see if any break points are active.
- inline bool has_break_points() { return has_break_points_; }
-
- void NewBreak(StackFrame::Id break_frame_id);
- void SetBreak(StackFrame::Id break_frame_id, int break_id);
- StackFrame::Id break_frame_id() {
- return thread_local_.break_frame_id_;
- }
- int break_id() { return thread_local_.break_id_; }
-
- bool StepInActive() { return thread_local_.step_into_fp_ != 0; }
- void HandleStepIn(Handle<JSFunction> function,
- Handle<Object> holder,
- Address fp,
- bool is_constructor);
- Address step_in_fp() { return thread_local_.step_into_fp_; }
- Address* step_in_fp_addr() { return &thread_local_.step_into_fp_; }
-
- bool StepOutActive() { return thread_local_.step_out_fp_ != 0; }
- Address step_out_fp() { return thread_local_.step_out_fp_; }
-
- EnterDebugger* debugger_entry() {
- return thread_local_.debugger_entry_;
- }
- void set_debugger_entry(EnterDebugger* entry) {
- thread_local_.debugger_entry_ = entry;
- }
-
- // Check whether any of the specified interrupts are pending.
- bool is_interrupt_pending(InterruptFlag what) {
- return (thread_local_.pending_interrupts_ & what) != 0;
- }
-
- // Set specified interrupts as pending.
- void set_interrupts_pending(InterruptFlag what) {
- thread_local_.pending_interrupts_ |= what;
- }
-
- // Clear specified interrupts from pending.
- void clear_interrupt_pending(InterruptFlag what) {
- thread_local_.pending_interrupts_ &= ~static_cast<int>(what);
- }
-
- // Getter and setter for the disable break state.
- bool disable_break() { return disable_break_; }
- void set_disable_break(bool disable_break) {
- disable_break_ = disable_break;
- }
-
- // Getters for the current exception break state.
- bool break_on_exception() { return break_on_exception_; }
- bool break_on_uncaught_exception() {
- return break_on_uncaught_exception_;
- }
-
- enum AddressId {
- k_after_break_target_address,
- k_debug_break_return_address,
- k_debug_break_slot_address,
- k_restarter_frame_function_pointer
- };
-
- // Support for setting the address to jump to when returning from break point.
- Address* after_break_target_address() {
- return reinterpret_cast<Address*>(&thread_local_.after_break_target_);
- }
- Address* restarter_frame_function_pointer_address() {
- Object*** address = &thread_local_.restarter_frame_function_pointer_;
- return reinterpret_cast<Address*>(address);
- }
-
- // Support for saving/restoring registers when handling debug break calls.
- Object** register_address(int r) {
- return &registers_[r];
- }
-
- // Access to the debug break on return code.
- Code* debug_break_return() { return debug_break_return_; }
- Code** debug_break_return_address() {
- return &debug_break_return_;
- }
-
- // Access to the debug break in debug break slot code.
- Code* debug_break_slot() { return debug_break_slot_; }
- Code** debug_break_slot_address() {
- return &debug_break_slot_;
- }
-
- static const int kEstimatedNofDebugInfoEntries = 16;
- static const int kEstimatedNofBreakPointsInFunction = 16;
-
- // Passed to MakeWeak.
- static void HandleWeakDebugInfo(v8::Persistent<v8::Value> obj, void* data);
-
- friend class Debugger;
- friend Handle<FixedArray> GetDebuggedFunctions(); // In test-debug.cc
- friend void CheckDebuggerUnloaded(bool check_functions); // In test-debug.cc
-
- // Threading support.
- char* ArchiveDebug(char* to);
- char* RestoreDebug(char* from);
- static int ArchiveSpacePerThread();
- void FreeThreadResources() { }
-
- // Mirror cache handling.
- void ClearMirrorCache();
-
- // Script cache handling.
- void CreateScriptCache();
- void DestroyScriptCache();
- void AddScriptToScriptCache(Handle<Script> script);
- Handle<FixedArray> GetLoadedScripts();
-
- // Garbage collection notifications.
- void AfterGarbageCollection();
-
- // Code generator routines.
- static void GenerateSlot(MacroAssembler* masm);
- static void GenerateLoadICDebugBreak(MacroAssembler* masm);
- static void GenerateStoreICDebugBreak(MacroAssembler* masm);
- static void GenerateKeyedLoadICDebugBreak(MacroAssembler* masm);
- static void GenerateKeyedStoreICDebugBreak(MacroAssembler* masm);
- static void GenerateConstructCallDebugBreak(MacroAssembler* masm);
- static void GenerateReturnDebugBreak(MacroAssembler* masm);
- static void GenerateStubNoRegistersDebugBreak(MacroAssembler* masm);
- static void GenerateSlotDebugBreak(MacroAssembler* masm);
- static void GeneratePlainReturnLiveEdit(MacroAssembler* masm);
-
- // FrameDropper is a code replacement for a JavaScript frame with possibly
- // several frames above.
- // There is no calling conventions here, because it never actually gets
- // called, it only gets returned to.
- static void GenerateFrameDropperLiveEdit(MacroAssembler* masm);
-
- // Called from stub-cache.cc.
- static void GenerateCallICDebugBreak(MacroAssembler* masm);
-
- // Describes how exactly a frame has been dropped from stack.
- enum FrameDropMode {
- // No frame has been dropped.
- FRAMES_UNTOUCHED,
- // The top JS frame had been calling IC stub. IC stub mustn't be called now.
- FRAME_DROPPED_IN_IC_CALL,
- // The top JS frame had been calling debug break slot stub. Patch the
- // address this stub jumps to in the end.
- FRAME_DROPPED_IN_DEBUG_SLOT_CALL,
- // The top JS frame had been calling some C++ function. The return address
- // gets patched automatically.
- FRAME_DROPPED_IN_DIRECT_CALL
- };
-
- void FramesHaveBeenDropped(StackFrame::Id new_break_frame_id,
- FrameDropMode mode,
- Object** restarter_frame_function_pointer);
-
- // Initializes an artificial stack frame. The data it contains is used for:
- // a. successful work of frame dropper code which eventually gets control,
- // b. being compatible with regular stack structure for various stack
- // iterators.
- // Returns address of stack allocated pointer to restarted function,
- // the value that is called 'restarter_frame_function_pointer'. The value
- // at this address (possibly updated by GC) may be used later when preparing
- // 'step in' operation.
- static Object** SetUpFrameDropperFrame(StackFrame* bottom_js_frame,
- Handle<Code> code);
-
- static const int kFrameDropperFrameSize;
-
- // Architecture-specific constant.
- static const bool kFrameDropperSupported;
-
- private:
- explicit Debug(Isolate* isolate);
- ~Debug();
-
- static bool CompileDebuggerScript(int index);
- void ClearOneShot();
- void ActivateStepIn(StackFrame* frame);
- void ClearStepIn();
- void ActivateStepOut(StackFrame* frame);
- void ClearStepOut();
- void ClearStepNext();
- // Returns whether the compile succeeded.
- void RemoveDebugInfo(Handle<DebugInfo> debug_info);
- void SetAfterBreakTarget(JavaScriptFrame* frame);
- Handle<Object> CheckBreakPoints(Handle<Object> break_point);
- bool CheckBreakPoint(Handle<Object> break_point_object);
-
- // Global handle to debug context where all the debugger JavaScript code is
- // loaded.
- Handle<Context> debug_context_;
-
- // Boolean state indicating whether any break points are set.
- bool has_break_points_;
-
- // Cache of all scripts in the heap.
- ScriptCache* script_cache_;
-
- // List of active debug info objects.
- DebugInfoListNode* debug_info_list_;
-
- bool disable_break_;
- bool break_on_exception_;
- bool break_on_uncaught_exception_;
-
- // Per-thread data.
- class ThreadLocal {
- public:
- // Counter for generating next break id.
- int break_count_;
-
- // Current break id.
- int break_id_;
-
- // Frame id for the frame of the current break.
- StackFrame::Id break_frame_id_;
-
- // Step action for last step performed.
- StepAction last_step_action_;
-
- // Source statement position from last step next action.
- int last_statement_position_;
-
- // Number of steps left to perform before debug event.
- int step_count_;
-
- // Frame pointer from last step next action.
- Address last_fp_;
-
- // Frame pointer for frame from which step in was performed.
- Address step_into_fp_;
-
- // Frame pointer for the frame where debugger should be called when current
- // step out action is completed.
- Address step_out_fp_;
-
- // Storage location for jump when exiting debug break calls.
- Address after_break_target_;
-
- // Stores the way how LiveEdit has patched the stack. It is used when
- // debugger returns control back to user script.
- FrameDropMode frame_drop_mode_;
-
- // Top debugger entry.
- EnterDebugger* debugger_entry_;
-
- // Pending interrupts scheduled while debugging.
- int pending_interrupts_;
-
- // When restarter frame is on stack, stores the address
- // of the pointer to function being restarted. Otherwise (most of the time)
- // stores NULL. This pointer is used with 'step in' implementation.
- Object** restarter_frame_function_pointer_;
- };
-
- // Storage location for registers when handling debug break calls
- JSCallerSavedBuffer registers_;
- ThreadLocal thread_local_;
- void ThreadInit();
-
- // Code to call for handling debug break on return.
- Code* debug_break_return_;
-
- // Code to call for handling debug break in debug break slots.
- Code* debug_break_slot_;
-
- Isolate* isolate_;
-
- friend class Isolate;
-
- DISALLOW_COPY_AND_ASSIGN(Debug);
-};
-
-
-DECLARE_RUNTIME_FUNCTION(Object*, Debug_Break);
-
-
-// Message delivered to the message handler callback. This is either a debugger
-// event or the response to a command.
-class MessageImpl: public v8::Debug::Message {
- public:
- // Create a message object for a debug event.
- static MessageImpl NewEvent(DebugEvent event,
- bool running,
- Handle<JSObject> exec_state,
- Handle<JSObject> event_data);
-
- // Create a message object for the response to a debug command.
- static MessageImpl NewResponse(DebugEvent event,
- bool running,
- Handle<JSObject> exec_state,
- Handle<JSObject> event_data,
- Handle<String> response_json,
- v8::Debug::ClientData* client_data);
-
- // Implementation of interface v8::Debug::Message.
- virtual bool IsEvent() const;
- virtual bool IsResponse() const;
- virtual DebugEvent GetEvent() const;
- virtual bool WillStartRunning() const;
- virtual v8::Handle<v8::Object> GetExecutionState() const;
- virtual v8::Handle<v8::Object> GetEventData() const;
- virtual v8::Handle<v8::String> GetJSON() const;
- virtual v8::Handle<v8::Context> GetEventContext() const;
- virtual v8::Debug::ClientData* GetClientData() const;
-
- private:
- MessageImpl(bool is_event,
- DebugEvent event,
- bool running,
- Handle<JSObject> exec_state,
- Handle<JSObject> event_data,
- Handle<String> response_json,
- v8::Debug::ClientData* client_data);
-
- bool is_event_; // Does this message represent a debug event?
- DebugEvent event_; // Debug event causing the break.
- bool running_; // Will the VM start running after this event?
- Handle<JSObject> exec_state_; // Current execution state.
- Handle<JSObject> event_data_; // Data associated with the event.
- Handle<String> response_json_; // Response JSON if message holds a response.
- v8::Debug::ClientData* client_data_; // Client data passed with the request.
-};
-
-
-// Details of the debug event delivered to the debug event listener.
-class EventDetailsImpl : public v8::Debug::EventDetails {
- public:
- EventDetailsImpl(DebugEvent event,
- Handle<JSObject> exec_state,
- Handle<JSObject> event_data,
- Handle<Object> callback_data,
- v8::Debug::ClientData* client_data);
- virtual DebugEvent GetEvent() const;
- virtual v8::Handle<v8::Object> GetExecutionState() const;
- virtual v8::Handle<v8::Object> GetEventData() const;
- virtual v8::Handle<v8::Context> GetEventContext() const;
- virtual v8::Handle<v8::Value> GetCallbackData() const;
- virtual v8::Debug::ClientData* GetClientData() const;
- private:
- DebugEvent event_; // Debug event causing the break.
- Handle<JSObject> exec_state_; // Current execution state.
- Handle<JSObject> event_data_; // Data associated with the event.
- Handle<Object> callback_data_; // User data passed with the callback
- // when it was registered.
- v8::Debug::ClientData* client_data_; // Data passed to DebugBreakForCommand.
-};
-
-
-// Message send by user to v8 debugger or debugger output message.
-// In addition to command text it may contain a pointer to some user data
-// which are expected to be passed along with the command reponse to message
-// handler.
-class CommandMessage {
- public:
- static CommandMessage New(const Vector<uint16_t>& command,
- v8::Debug::ClientData* data);
- CommandMessage();
- ~CommandMessage();
-
- // Deletes user data and disposes of the text.
- void Dispose();
- Vector<uint16_t> text() const { return text_; }
- v8::Debug::ClientData* client_data() const { return client_data_; }
- private:
- CommandMessage(const Vector<uint16_t>& text,
- v8::Debug::ClientData* data);
-
- Vector<uint16_t> text_;
- v8::Debug::ClientData* client_data_;
-};
-
-// A Queue of CommandMessage objects. A thread-safe version is
-// LockingCommandMessageQueue, based on this class.
-class CommandMessageQueue BASE_EMBEDDED {
- public:
- explicit CommandMessageQueue(int size);
- ~CommandMessageQueue();
- bool IsEmpty() const { return start_ == end_; }
- CommandMessage Get();
- void Put(const CommandMessage& message);
- void Clear() { start_ = end_ = 0; } // Queue is empty after Clear().
- private:
- // Doubles the size of the message queue, and copies the messages.
- void Expand();
-
- CommandMessage* messages_;
- int start_;
- int end_;
- int size_; // The size of the queue buffer. Queue can hold size-1 messages.
-};
-
-
-class MessageDispatchHelperThread;
-
-
-// LockingCommandMessageQueue is a thread-safe circular buffer of CommandMessage
-// messages. The message data is not managed by LockingCommandMessageQueue.
-// Pointers to the data are passed in and out. Implemented by adding a
-// Mutex to CommandMessageQueue. Includes logging of all puts and gets.
-class LockingCommandMessageQueue BASE_EMBEDDED {
- public:
- explicit LockingCommandMessageQueue(int size);
- ~LockingCommandMessageQueue();
- bool IsEmpty() const;
- CommandMessage Get();
- void Put(const CommandMessage& message);
- void Clear();
- private:
- CommandMessageQueue queue_;
- Mutex* lock_;
- DISALLOW_COPY_AND_ASSIGN(LockingCommandMessageQueue);
-};
-
-
-class Debugger {
- public:
- ~Debugger();
-
- void DebugRequest(const uint16_t* json_request, int length);
-
- Handle<Object> MakeJSObject(Vector<const char> constructor_name,
- int argc, Object*** argv,
- bool* caught_exception);
- Handle<Object> MakeExecutionState(bool* caught_exception);
- Handle<Object> MakeBreakEvent(Handle<Object> exec_state,
- Handle<Object> break_points_hit,
- bool* caught_exception);
- Handle<Object> MakeExceptionEvent(Handle<Object> exec_state,
- Handle<Object> exception,
- bool uncaught,
- bool* caught_exception);
- Handle<Object> MakeNewFunctionEvent(Handle<Object> func,
- bool* caught_exception);
- Handle<Object> MakeCompileEvent(Handle<Script> script,
- bool before,
- bool* caught_exception);
- Handle<Object> MakeScriptCollectedEvent(int id,
- bool* caught_exception);
- void OnDebugBreak(Handle<Object> break_points_hit, bool auto_continue);
- void OnException(Handle<Object> exception, bool uncaught);
- void OnBeforeCompile(Handle<Script> script);
-
- enum AfterCompileFlags {
- NO_AFTER_COMPILE_FLAGS,
- SEND_WHEN_DEBUGGING
- };
- void OnAfterCompile(Handle<Script> script,
- AfterCompileFlags after_compile_flags);
- void OnNewFunction(Handle<JSFunction> fun);
- void OnScriptCollected(int id);
- void ProcessDebugEvent(v8::DebugEvent event,
- Handle<JSObject> event_data,
- bool auto_continue);
- void NotifyMessageHandler(v8::DebugEvent event,
- Handle<JSObject> exec_state,
- Handle<JSObject> event_data,
- bool auto_continue);
- void SetEventListener(Handle<Object> callback, Handle<Object> data);
- void SetMessageHandler(v8::Debug::MessageHandler2 handler);
- void SetHostDispatchHandler(v8::Debug::HostDispatchHandler handler,
- int period);
- void SetDebugMessageDispatchHandler(
- v8::Debug::DebugMessageDispatchHandler handler,
- bool provide_locker);
-
- // Invoke the message handler function.
- void InvokeMessageHandler(MessageImpl message);
-
- // Add a debugger command to the command queue.
- void ProcessCommand(Vector<const uint16_t> command,
- v8::Debug::ClientData* client_data = NULL);
-
- // Check whether there are commands in the command queue.
- bool HasCommands();
-
- // Enqueue a debugger command to the command queue for event listeners.
- void EnqueueDebugCommand(v8::Debug::ClientData* client_data = NULL);
-
- Handle<Object> Call(Handle<JSFunction> fun,
- Handle<Object> data,
- bool* pending_exception);
-
- // Start the debugger agent listening on the provided port.
- bool StartAgent(const char* name, int port,
- bool wait_for_connection = false);
-
- // Stop the debugger agent.
- void StopAgent();
-
- // Blocks until the agent has started listening for connections
- void WaitForAgent();
-
- void CallMessageDispatchHandler();
-
- Handle<Context> GetDebugContext();
-
- // Unload the debugger if possible. Only called when no debugger is currently
- // active.
- void UnloadDebugger();
- friend void ForceUnloadDebugger(); // In test-debug.cc
-
- inline bool EventActive(v8::DebugEvent event) {
- ScopedLock with(debugger_access_);
-
- // Check whether the message handler was been cleared.
- if (debugger_unload_pending_) {
- if (isolate_->debug()->debugger_entry() == NULL) {
- UnloadDebugger();
- }
- }
-
- if (((event == v8::BeforeCompile) || (event == v8::AfterCompile)) &&
- !FLAG_debug_compile_events) {
- return false;
-
- } else if ((event == v8::ScriptCollected) &&
- !FLAG_debug_script_collected_events) {
- return false;
- }
-
- // Currently argument event is not used.
- return !compiling_natives_ && Debugger::IsDebuggerActive();
- }
-
- void set_compiling_natives(bool compiling_natives) {
- Debugger::compiling_natives_ = compiling_natives;
- }
- bool compiling_natives() const { return compiling_natives_; }
- void set_loading_debugger(bool v) { is_loading_debugger_ = v; }
- bool is_loading_debugger() const { return is_loading_debugger_; }
-
- bool IsDebuggerActive();
-
- private:
- Debugger();
-
- void CallEventCallback(v8::DebugEvent event,
- Handle<Object> exec_state,
- Handle<Object> event_data,
- v8::Debug::ClientData* client_data);
- void CallCEventCallback(v8::DebugEvent event,
- Handle<Object> exec_state,
- Handle<Object> event_data,
- v8::Debug::ClientData* client_data);
- void CallJSEventCallback(v8::DebugEvent event,
- Handle<Object> exec_state,
- Handle<Object> event_data);
- void ListenersChanged();
-
- Mutex* debugger_access_; // Mutex guarding debugger variables.
- Handle<Object> event_listener_; // Global handle to listener.
- Handle<Object> event_listener_data_;
- bool compiling_natives_; // Are we compiling natives?
- bool is_loading_debugger_; // Are we loading the debugger?
- bool never_unload_debugger_; // Can we unload the debugger?
- v8::Debug::MessageHandler2 message_handler_;
- bool debugger_unload_pending_; // Was message handler cleared?
- v8::Debug::HostDispatchHandler host_dispatch_handler_;
- Mutex* dispatch_handler_access_; // Mutex guarding dispatch handler.
- v8::Debug::DebugMessageDispatchHandler debug_message_dispatch_handler_;
- MessageDispatchHelperThread* message_dispatch_helper_thread_;
- int host_dispatch_micros_;
-
- DebuggerAgent* agent_;
-
- static const int kQueueInitialSize = 4;
- LockingCommandMessageQueue command_queue_;
- Semaphore* command_received_; // Signaled for each command received.
- LockingCommandMessageQueue event_command_queue_;
-
- Isolate* isolate_;
-
- friend class EnterDebugger;
- friend class Isolate;
-
- DISALLOW_COPY_AND_ASSIGN(Debugger);
-};
-
-
-// This class is used for entering the debugger. Create an instance in the stack
-// to enter the debugger. This will set the current break state, make sure the
-// debugger is loaded and switch to the debugger context. If the debugger for
-// some reason could not be entered FailedToEnter will return true.
-class EnterDebugger BASE_EMBEDDED {
- public:
- EnterDebugger()
- : isolate_(Isolate::Current()),
- prev_(isolate_->debug()->debugger_entry()),
- it_(isolate_),
- has_js_frames_(!it_.done()),
- save_(isolate_) {
- Debug* debug = isolate_->debug();
- ASSERT(prev_ != NULL || !debug->is_interrupt_pending(PREEMPT));
- ASSERT(prev_ != NULL || !debug->is_interrupt_pending(DEBUGBREAK));
-
- // Link recursive debugger entry.
- debug->set_debugger_entry(this);
-
- // Store the previous break id and frame id.
- break_id_ = debug->break_id();
- break_frame_id_ = debug->break_frame_id();
-
- // Create the new break info. If there is no JavaScript frames there is no
- // break frame id.
- if (has_js_frames_) {
- debug->NewBreak(it_.frame()->id());
- } else {
- debug->NewBreak(StackFrame::NO_ID);
- }
-
- // Make sure that debugger is loaded and enter the debugger context.
- load_failed_ = !debug->Load();
- if (!load_failed_) {
- // NOTE the member variable save which saves the previous context before
- // this change.
- isolate_->set_context(*debug->debug_context());
- }
- }
-
- ~EnterDebugger() {
- ASSERT(Isolate::Current() == isolate_);
- Debug* debug = isolate_->debug();
-
- // Restore to the previous break state.
- debug->SetBreak(break_frame_id_, break_id_);
-
- // Check for leaving the debugger.
- if (prev_ == NULL) {
- // Clear mirror cache when leaving the debugger. Skip this if there is a
- // pending exception as clearing the mirror cache calls back into
- // JavaScript. This can happen if the v8::Debug::Call is used in which
- // case the exception should end up in the calling code.
- if (!isolate_->has_pending_exception()) {
- // Try to avoid any pending debug break breaking in the clear mirror
- // cache JavaScript code.
- if (isolate_->stack_guard()->IsDebugBreak()) {
- debug->set_interrupts_pending(DEBUGBREAK);
- isolate_->stack_guard()->Continue(DEBUGBREAK);
- }
- debug->ClearMirrorCache();
- }
-
- // Request preemption and debug break when leaving the last debugger entry
- // if any of these where recorded while debugging.
- if (debug->is_interrupt_pending(PREEMPT)) {
- // This re-scheduling of preemption is to avoid starvation in some
- // debugging scenarios.
- debug->clear_interrupt_pending(PREEMPT);
- isolate_->stack_guard()->Preempt();
- }
- if (debug->is_interrupt_pending(DEBUGBREAK)) {
- debug->clear_interrupt_pending(DEBUGBREAK);
- isolate_->stack_guard()->DebugBreak();
- }
-
- // If there are commands in the queue when leaving the debugger request
- // that these commands are processed.
- if (isolate_->debugger()->HasCommands()) {
- isolate_->stack_guard()->DebugCommand();
- }
-
- // If leaving the debugger with the debugger no longer active unload it.
- if (!isolate_->debugger()->IsDebuggerActive()) {
- isolate_->debugger()->UnloadDebugger();
- }
- }
-
- // Leaving this debugger entry.
- debug->set_debugger_entry(prev_);
- }
-
- // Check whether the debugger could be entered.
- inline bool FailedToEnter() { return load_failed_; }
-
- // Check whether there are any JavaScript frames on the stack.
- inline bool HasJavaScriptFrames() { return has_js_frames_; }
-
- // Get the active context from before entering the debugger.
- inline Handle<Context> GetContext() { return save_.context(); }
-
- private:
- Isolate* isolate_;
- EnterDebugger* prev_; // Previous debugger entry if entered recursively.
- JavaScriptFrameIterator it_;
- const bool has_js_frames_; // Were there any JavaScript frames?
- StackFrame::Id break_frame_id_; // Previous break frame id.
- int break_id_; // Previous break id.
- bool load_failed_; // Did the debugger fail to load?
- SaveContext save_; // Saves previous context.
-};
-
-
-// Stack allocated class for disabling break.
-class DisableBreak BASE_EMBEDDED {
- public:
- explicit DisableBreak(bool disable_break) : isolate_(Isolate::Current()) {
- prev_disable_break_ = isolate_->debug()->disable_break();
- isolate_->debug()->set_disable_break(disable_break);
- }
- ~DisableBreak() {
- ASSERT(Isolate::Current() == isolate_);
- isolate_->debug()->set_disable_break(prev_disable_break_);
- }
-
- private:
- Isolate* isolate_;
- // The previous state of the disable break used to restore the value when this
- // object is destructed.
- bool prev_disable_break_;
-};
-
-
-// Debug_Address encapsulates the Address pointers used in generating debug
-// code.
-class Debug_Address {
- public:
- explicit Debug_Address(Debug::AddressId id) : id_(id) { }
-
- static Debug_Address AfterBreakTarget() {
- return Debug_Address(Debug::k_after_break_target_address);
- }
-
- static Debug_Address DebugBreakReturn() {
- return Debug_Address(Debug::k_debug_break_return_address);
- }
-
- static Debug_Address RestarterFrameFunctionPointer() {
- return Debug_Address(Debug::k_restarter_frame_function_pointer);
- }
-
- Address address(Isolate* isolate) const {
- Debug* debug = isolate->debug();
- switch (id_) {
- case Debug::k_after_break_target_address:
- return reinterpret_cast<Address>(debug->after_break_target_address());
- case Debug::k_debug_break_return_address:
- return reinterpret_cast<Address>(debug->debug_break_return_address());
- case Debug::k_debug_break_slot_address:
- return reinterpret_cast<Address>(debug->debug_break_slot_address());
- case Debug::k_restarter_frame_function_pointer:
- return reinterpret_cast<Address>(
- debug->restarter_frame_function_pointer_address());
- default:
- UNREACHABLE();
- return NULL;
- }
- }
- private:
- Debug::AddressId id_;
-};
-
-// The optional thread that Debug Agent may use to temporary call V8 to process
-// pending debug requests if debuggee is not running V8 at the moment.
-// Techincally it does not call V8 itself, rather it asks embedding program
-// to do this via v8::Debug::HostDispatchHandler
-class MessageDispatchHelperThread: public Thread {
- public:
- explicit MessageDispatchHelperThread(Isolate* isolate);
- ~MessageDispatchHelperThread();
-
- void Schedule();
-
- private:
- void Run();
-
- Semaphore* const sem_;
- Mutex* const mutex_;
- bool already_signalled_;
-
- DISALLOW_COPY_AND_ASSIGN(MessageDispatchHelperThread);
-};
-
-
-} } // namespace v8::internal
-
-#endif // ENABLE_DEBUGGER_SUPPORT
-
-#endif // V8_DEBUG_H_
diff --git a/src/3rdparty/v8/src/deoptimizer.cc b/src/3rdparty/v8/src/deoptimizer.cc
deleted file mode 100644
index 0fed391..0000000
--- a/src/3rdparty/v8/src/deoptimizer.cc
+++ /dev/null
@@ -1,1296 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "codegen.h"
-#include "deoptimizer.h"
-#include "disasm.h"
-#include "full-codegen.h"
-#include "global-handles.h"
-#include "macro-assembler.h"
-#include "prettyprinter.h"
-
-
-namespace v8 {
-namespace internal {
-
-DeoptimizerData::DeoptimizerData() {
- eager_deoptimization_entry_code_ = NULL;
- lazy_deoptimization_entry_code_ = NULL;
- current_ = NULL;
- deoptimizing_code_list_ = NULL;
-}
-
-
-DeoptimizerData::~DeoptimizerData() {
- if (eager_deoptimization_entry_code_ != NULL) {
- eager_deoptimization_entry_code_->Free(EXECUTABLE);
- eager_deoptimization_entry_code_ = NULL;
- }
- if (lazy_deoptimization_entry_code_ != NULL) {
- lazy_deoptimization_entry_code_->Free(EXECUTABLE);
- lazy_deoptimization_entry_code_ = NULL;
- }
-}
-
-Deoptimizer* Deoptimizer::New(JSFunction* function,
- BailoutType type,
- unsigned bailout_id,
- Address from,
- int fp_to_sp_delta,
- Isolate* isolate) {
- ASSERT(isolate == Isolate::Current());
- Deoptimizer* deoptimizer = new Deoptimizer(isolate,
- function,
- type,
- bailout_id,
- from,
- fp_to_sp_delta);
- ASSERT(isolate->deoptimizer_data()->current_ == NULL);
- isolate->deoptimizer_data()->current_ = deoptimizer;
- return deoptimizer;
-}
-
-
-Deoptimizer* Deoptimizer::Grab(Isolate* isolate) {
- ASSERT(isolate == Isolate::Current());
- Deoptimizer* result = isolate->deoptimizer_data()->current_;
- ASSERT(result != NULL);
- result->DeleteFrameDescriptions();
- isolate->deoptimizer_data()->current_ = NULL;
- return result;
-}
-
-
-void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
- int count,
- BailoutType type) {
- TableEntryGenerator generator(masm, type, count);
- generator.Generate();
-}
-
-
-class DeoptimizingVisitor : public OptimizedFunctionVisitor {
- public:
- virtual void EnterContext(Context* context) {
- if (FLAG_trace_deopt) {
- PrintF("[deoptimize context: %" V8PRIxPTR "]\n",
- reinterpret_cast<intptr_t>(context));
- }
- }
-
- virtual void VisitFunction(JSFunction* function) {
- Deoptimizer::DeoptimizeFunction(function);
- }
-
- virtual void LeaveContext(Context* context) {
- context->ClearOptimizedFunctions();
- }
-};
-
-
-void Deoptimizer::DeoptimizeAll() {
- AssertNoAllocation no_allocation;
-
- if (FLAG_trace_deopt) {
- PrintF("[deoptimize all contexts]\n");
- }
-
- DeoptimizingVisitor visitor;
- VisitAllOptimizedFunctions(&visitor);
-}
-
-
-void Deoptimizer::DeoptimizeGlobalObject(JSObject* object) {
- AssertNoAllocation no_allocation;
-
- DeoptimizingVisitor visitor;
- VisitAllOptimizedFunctionsForGlobalObject(object, &visitor);
-}
-
-
-void Deoptimizer::VisitAllOptimizedFunctionsForContext(
- Context* context, OptimizedFunctionVisitor* visitor) {
- AssertNoAllocation no_allocation;
-
- ASSERT(context->IsGlobalContext());
-
- visitor->EnterContext(context);
- // Run through the list of optimized functions and deoptimize them.
- Object* element = context->OptimizedFunctionsListHead();
- while (!element->IsUndefined()) {
- JSFunction* element_function = JSFunction::cast(element);
- // Get the next link before deoptimizing as deoptimizing will clear the
- // next link.
- element = element_function->next_function_link();
- visitor->VisitFunction(element_function);
- }
- visitor->LeaveContext(context);
-}
-
-
-void Deoptimizer::VisitAllOptimizedFunctionsForGlobalObject(
- JSObject* object, OptimizedFunctionVisitor* visitor) {
- AssertNoAllocation no_allocation;
-
- if (object->IsJSGlobalProxy()) {
- Object* proto = object->GetPrototype();
- ASSERT(proto->IsJSGlobalObject());
- VisitAllOptimizedFunctionsForContext(
- GlobalObject::cast(proto)->global_context(), visitor);
- } else if (object->IsGlobalObject()) {
- VisitAllOptimizedFunctionsForContext(
- GlobalObject::cast(object)->global_context(), visitor);
- }
-}
-
-
-void Deoptimizer::VisitAllOptimizedFunctions(
- OptimizedFunctionVisitor* visitor) {
- AssertNoAllocation no_allocation;
-
- // Run through the list of all global contexts and deoptimize.
- Object* global = Isolate::Current()->heap()->global_contexts_list();
- while (!global->IsUndefined()) {
- VisitAllOptimizedFunctionsForGlobalObject(Context::cast(global)->global(),
- visitor);
- global = Context::cast(global)->get(Context::NEXT_CONTEXT_LINK);
- }
-}
-
-
-void Deoptimizer::HandleWeakDeoptimizedCode(
- v8::Persistent<v8::Value> obj, void* data) {
- DeoptimizingCodeListNode* node =
- reinterpret_cast<DeoptimizingCodeListNode*>(data);
- RemoveDeoptimizingCode(*node->code());
-#ifdef DEBUG
- node = Isolate::Current()->deoptimizer_data()->deoptimizing_code_list_;
- while (node != NULL) {
- ASSERT(node != reinterpret_cast<DeoptimizingCodeListNode*>(data));
- node = node->next();
- }
-#endif
-}
-
-
-void Deoptimizer::ComputeOutputFrames(Deoptimizer* deoptimizer) {
- deoptimizer->DoComputeOutputFrames();
-}
-
-
-Deoptimizer::Deoptimizer(Isolate* isolate,
- JSFunction* function,
- BailoutType type,
- unsigned bailout_id,
- Address from,
- int fp_to_sp_delta)
- : isolate_(isolate),
- function_(function),
- bailout_id_(bailout_id),
- bailout_type_(type),
- from_(from),
- fp_to_sp_delta_(fp_to_sp_delta),
- output_count_(0),
- output_(NULL),
- integer32_values_(NULL),
- double_values_(NULL) {
- if (FLAG_trace_deopt && type != OSR) {
- PrintF("**** DEOPT: ");
- function->PrintName();
- PrintF(" at bailout #%u, address 0x%" V8PRIxPTR ", frame size %d\n",
- bailout_id,
- reinterpret_cast<intptr_t>(from),
- fp_to_sp_delta - (2 * kPointerSize));
- } else if (FLAG_trace_osr && type == OSR) {
- PrintF("**** OSR: ");
- function->PrintName();
- PrintF(" at ast id #%u, address 0x%" V8PRIxPTR ", frame size %d\n",
- bailout_id,
- reinterpret_cast<intptr_t>(from),
- fp_to_sp_delta - (2 * kPointerSize));
- }
- // Find the optimized code.
- if (type == EAGER) {
- ASSERT(from == NULL);
- optimized_code_ = function_->code();
- } else if (type == LAZY) {
- optimized_code_ = FindDeoptimizingCodeFromAddress(from);
- ASSERT(optimized_code_ != NULL);
- } else if (type == OSR) {
- // The function has already been optimized and we're transitioning
- // from the unoptimized shared version to the optimized one in the
- // function. The return address (from) points to unoptimized code.
- optimized_code_ = function_->code();
- ASSERT(optimized_code_->kind() == Code::OPTIMIZED_FUNCTION);
- ASSERT(!optimized_code_->contains(from));
- }
- ASSERT(HEAP->allow_allocation(false));
- unsigned size = ComputeInputFrameSize();
- input_ = new(size) FrameDescription(size, function);
-}
-
-
-Deoptimizer::~Deoptimizer() {
- ASSERT(input_ == NULL && output_ == NULL);
- delete[] integer32_values_;
- delete[] double_values_;
-}
-
-
-void Deoptimizer::DeleteFrameDescriptions() {
- delete input_;
- for (int i = 0; i < output_count_; ++i) {
- if (output_[i] != input_) delete output_[i];
- }
- delete[] output_;
- input_ = NULL;
- output_ = NULL;
- ASSERT(!HEAP->allow_allocation(true));
-}
-
-
-Address Deoptimizer::GetDeoptimizationEntry(int id, BailoutType type) {
- ASSERT(id >= 0);
- if (id >= kNumberOfEntries) return NULL;
- LargeObjectChunk* base = NULL;
- DeoptimizerData* data = Isolate::Current()->deoptimizer_data();
- if (type == EAGER) {
- if (data->eager_deoptimization_entry_code_ == NULL) {
- data->eager_deoptimization_entry_code_ = CreateCode(type);
- }
- base = data->eager_deoptimization_entry_code_;
- } else {
- if (data->lazy_deoptimization_entry_code_ == NULL) {
- data->lazy_deoptimization_entry_code_ = CreateCode(type);
- }
- base = data->lazy_deoptimization_entry_code_;
- }
- return
- static_cast<Address>(base->GetStartAddress()) + (id * table_entry_size_);
-}
-
-
-int Deoptimizer::GetDeoptimizationId(Address addr, BailoutType type) {
- LargeObjectChunk* base = NULL;
- DeoptimizerData* data = Isolate::Current()->deoptimizer_data();
- if (type == EAGER) {
- base = data->eager_deoptimization_entry_code_;
- } else {
- base = data->lazy_deoptimization_entry_code_;
- }
- if (base == NULL ||
- addr < base->GetStartAddress() ||
- addr >= base->GetStartAddress() +
- (kNumberOfEntries * table_entry_size_)) {
- return kNotDeoptimizationEntry;
- }
- ASSERT_EQ(0,
- static_cast<int>(addr - base->GetStartAddress()) % table_entry_size_);
- return static_cast<int>(addr - base->GetStartAddress()) / table_entry_size_;
-}
-
-
-int Deoptimizer::GetOutputInfo(DeoptimizationOutputData* data,
- unsigned id,
- SharedFunctionInfo* shared) {
- // TODO(kasperl): For now, we do a simple linear search for the PC
- // offset associated with the given node id. This should probably be
- // changed to a binary search.
- int length = data->DeoptPoints();
- Smi* smi_id = Smi::FromInt(id);
- for (int i = 0; i < length; i++) {
- if (data->AstId(i) == smi_id) {
- return data->PcAndState(i)->value();
- }
- }
- PrintF("[couldn't find pc offset for node=%u]\n", id);
- PrintF("[method: %s]\n", *shared->DebugName()->ToCString());
- // Print the source code if available.
- HeapStringAllocator string_allocator;
- StringStream stream(&string_allocator);
- shared->SourceCodePrint(&stream, -1);
- PrintF("[source:\n%s\n]", *stream.ToCString());
-
- UNREACHABLE();
- return -1;
-}
-
-
-int Deoptimizer::GetDeoptimizedCodeCount(Isolate* isolate) {
- int length = 0;
- DeoptimizingCodeListNode* node =
- isolate->deoptimizer_data()->deoptimizing_code_list_;
- while (node != NULL) {
- length++;
- node = node->next();
- }
- return length;
-}
-
-
-void Deoptimizer::DoComputeOutputFrames() {
- if (bailout_type_ == OSR) {
- DoComputeOsrOutputFrame();
- return;
- }
-
- // Print some helpful diagnostic information.
- int64_t start = OS::Ticks();
- if (FLAG_trace_deopt) {
- PrintF("[deoptimizing%s: begin 0x%08" V8PRIxPTR " ",
- (bailout_type_ == LAZY ? " (lazy)" : ""),
- reinterpret_cast<intptr_t>(function_));
- function_->PrintName();
- PrintF(" @%d]\n", bailout_id_);
- }
-
- // Determine basic deoptimization information. The optimized frame is
- // described by the input data.
- DeoptimizationInputData* input_data =
- DeoptimizationInputData::cast(optimized_code_->deoptimization_data());
- unsigned node_id = input_data->AstId(bailout_id_)->value();
- ByteArray* translations = input_data->TranslationByteArray();
- unsigned translation_index =
- input_data->TranslationIndex(bailout_id_)->value();
-
- // Do the input frame to output frame(s) translation.
- TranslationIterator iterator(translations, translation_index);
- Translation::Opcode opcode =
- static_cast<Translation::Opcode>(iterator.Next());
- ASSERT(Translation::BEGIN == opcode);
- USE(opcode);
- // Read the number of output frames and allocate an array for their
- // descriptions.
- int count = iterator.Next();
- ASSERT(output_ == NULL);
- output_ = new FrameDescription*[count];
- // Per-frame lists of untagged and unboxed int32 and double values.
- integer32_values_ = new List<ValueDescriptionInteger32>[count];
- double_values_ = new List<ValueDescriptionDouble>[count];
- for (int i = 0; i < count; ++i) {
- output_[i] = NULL;
- integer32_values_[i].Initialize(0);
- double_values_[i].Initialize(0);
- }
- output_count_ = count;
-
- // Translate each output frame.
- for (int i = 0; i < count; ++i) {
- DoComputeFrame(&iterator, i);
- }
-
- // Print some helpful diagnostic information.
- if (FLAG_trace_deopt) {
- double ms = static_cast<double>(OS::Ticks() - start) / 1000;
- int index = output_count_ - 1; // Index of the topmost frame.
- JSFunction* function = output_[index]->GetFunction();
- PrintF("[deoptimizing: end 0x%08" V8PRIxPTR " ",
- reinterpret_cast<intptr_t>(function));
- function->PrintName();
- PrintF(" => node=%u, pc=0x%08" V8PRIxPTR ", state=%s, took %0.3f ms]\n",
- node_id,
- output_[index]->GetPc(),
- FullCodeGenerator::State2String(
- static_cast<FullCodeGenerator::State>(
- output_[index]->GetState()->value())),
- ms);
- }
-}
-
-
-void Deoptimizer::InsertHeapNumberValues(int index, JavaScriptFrame* frame) {
- // We need to adjust the stack index by one for the top-most frame.
- int extra_slot_count = (index == output_count() - 1) ? 1 : 0;
- List<ValueDescriptionInteger32>* ints = &integer32_values_[index];
- for (int i = 0; i < ints->length(); i++) {
- ValueDescriptionInteger32 value = ints->at(i);
- double val = static_cast<double>(value.int32_value());
- InsertHeapNumberValue(frame, value.stack_index(), val, extra_slot_count);
- }
-
- // Iterate over double values and convert them to a heap number.
- List<ValueDescriptionDouble>* doubles = &double_values_[index];
- for (int i = 0; i < doubles->length(); ++i) {
- ValueDescriptionDouble value = doubles->at(i);
- InsertHeapNumberValue(frame, value.stack_index(), value.double_value(),
- extra_slot_count);
- }
-}
-
-
-void Deoptimizer::InsertHeapNumberValue(JavaScriptFrame* frame,
- int stack_index,
- double val,
- int extra_slot_count) {
- // Add one to the TOS index to take the 'state' pushed before jumping
- // to the stub that calls Runtime::NotifyDeoptimized into account.
- int tos_index = stack_index + extra_slot_count;
- int index = (frame->ComputeExpressionsCount() - 1) - tos_index;
- if (FLAG_trace_deopt) PrintF("Allocating a new heap number: %e\n", val);
- Handle<Object> num = isolate_->factory()->NewNumber(val);
- frame->SetExpression(index, *num);
-}
-
-
-void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
- int frame_index,
- unsigned output_offset) {
- disasm::NameConverter converter;
- // A GC-safe temporary placeholder that we can put in the output frame.
- const intptr_t kPlaceholder = reinterpret_cast<intptr_t>(Smi::FromInt(0));
-
- // Ignore commands marked as duplicate and act on the first non-duplicate.
- Translation::Opcode opcode =
- static_cast<Translation::Opcode>(iterator->Next());
- while (opcode == Translation::DUPLICATE) {
- opcode = static_cast<Translation::Opcode>(iterator->Next());
- iterator->Skip(Translation::NumberOfOperandsFor(opcode));
- opcode = static_cast<Translation::Opcode>(iterator->Next());
- }
-
- switch (opcode) {
- case Translation::BEGIN:
- case Translation::FRAME:
- case Translation::DUPLICATE:
- UNREACHABLE();
- return;
-
- case Translation::REGISTER: {
- int input_reg = iterator->Next();
- intptr_t input_value = input_->GetRegister(input_reg);
- if (FLAG_trace_deopt) {
- PrintF(
- " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" V8PRIxPTR " ; %s\n",
- output_[frame_index]->GetTop() + output_offset,
- output_offset,
- input_value,
- converter.NameOfCPURegister(input_reg));
- }
- output_[frame_index]->SetFrameSlot(output_offset, input_value);
- return;
- }
-
- case Translation::INT32_REGISTER: {
- int input_reg = iterator->Next();
- intptr_t value = input_->GetRegister(input_reg);
- bool is_smi = Smi::IsValid(value);
- unsigned output_index = output_offset / kPointerSize;
- if (FLAG_trace_deopt) {
- PrintF(
- " 0x%08" V8PRIxPTR ": [top + %d] <- %" V8PRIdPTR " ; %s (%s)\n",
- output_[frame_index]->GetTop() + output_offset,
- output_offset,
- value,
- converter.NameOfCPURegister(input_reg),
- is_smi ? "smi" : "heap number");
- }
- if (is_smi) {
- intptr_t tagged_value =
- reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(value)));
- output_[frame_index]->SetFrameSlot(output_offset, tagged_value);
- } else {
- // We save the untagged value on the side and store a GC-safe
- // temporary placeholder in the frame.
- AddInteger32Value(frame_index,
- output_index,
- static_cast<int32_t>(value));
- output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder);
- }
- return;
- }
-
- case Translation::DOUBLE_REGISTER: {
- int input_reg = iterator->Next();
- double value = input_->GetDoubleRegister(input_reg);
- unsigned output_index = output_offset / kPointerSize;
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- %e ; %s\n",
- output_[frame_index]->GetTop() + output_offset,
- output_offset,
- value,
- DoubleRegister::AllocationIndexToString(input_reg));
- }
- // We save the untagged value on the side and store a GC-safe
- // temporary placeholder in the frame.
- AddDoubleValue(frame_index, output_index, value);
- output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder);
- return;
- }
-
- case Translation::STACK_SLOT: {
- int input_slot_index = iterator->Next();
- unsigned input_offset =
- input_->GetOffsetFromSlotIndex(this, input_slot_index);
- intptr_t input_value = input_->GetFrameSlot(input_offset);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08" V8PRIxPTR ": ",
- output_[frame_index]->GetTop() + output_offset);
- PrintF("[top + %d] <- 0x%08" V8PRIxPTR " ; [esp + %d]\n",
- output_offset,
- input_value,
- input_offset);
- }
- output_[frame_index]->SetFrameSlot(output_offset, input_value);
- return;
- }
-
- case Translation::INT32_STACK_SLOT: {
- int input_slot_index = iterator->Next();
- unsigned input_offset =
- input_->GetOffsetFromSlotIndex(this, input_slot_index);
- intptr_t value = input_->GetFrameSlot(input_offset);
- bool is_smi = Smi::IsValid(value);
- unsigned output_index = output_offset / kPointerSize;
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08" V8PRIxPTR ": ",
- output_[frame_index]->GetTop() + output_offset);
- PrintF("[top + %d] <- %" V8PRIdPTR " ; [esp + %d] (%s)\n",
- output_offset,
- value,
- input_offset,
- is_smi ? "smi" : "heap number");
- }
- if (is_smi) {
- intptr_t tagged_value =
- reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(value)));
- output_[frame_index]->SetFrameSlot(output_offset, tagged_value);
- } else {
- // We save the untagged value on the side and store a GC-safe
- // temporary placeholder in the frame.
- AddInteger32Value(frame_index,
- output_index,
- static_cast<int32_t>(value));
- output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder);
- }
- return;
- }
-
- case Translation::DOUBLE_STACK_SLOT: {
- int input_slot_index = iterator->Next();
- unsigned input_offset =
- input_->GetOffsetFromSlotIndex(this, input_slot_index);
- double value = input_->GetDoubleFrameSlot(input_offset);
- unsigned output_index = output_offset / kPointerSize;
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- %e ; [esp + %d]\n",
- output_[frame_index]->GetTop() + output_offset,
- output_offset,
- value,
- input_offset);
- }
- // We save the untagged value on the side and store a GC-safe
- // temporary placeholder in the frame.
- AddDoubleValue(frame_index, output_index, value);
- output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder);
- return;
- }
-
- case Translation::LITERAL: {
- Object* literal = ComputeLiteral(iterator->Next());
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- ",
- output_[frame_index]->GetTop() + output_offset,
- output_offset);
- literal->ShortPrint();
- PrintF(" ; literal\n");
- }
- intptr_t value = reinterpret_cast<intptr_t>(literal);
- output_[frame_index]->SetFrameSlot(output_offset, value);
- return;
- }
-
- case Translation::ARGUMENTS_OBJECT: {
- // Use the arguments marker value as a sentinel and fill in the arguments
- // object after the deoptimized frame is built.
- ASSERT(frame_index == 0); // Only supported for first frame.
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- ",
- output_[frame_index]->GetTop() + output_offset,
- output_offset);
- isolate_->heap()->arguments_marker()->ShortPrint();
- PrintF(" ; arguments object\n");
- }
- intptr_t value = reinterpret_cast<intptr_t>(
- isolate_->heap()->arguments_marker());
- output_[frame_index]->SetFrameSlot(output_offset, value);
- return;
- }
- }
-}
-
-
-bool Deoptimizer::DoOsrTranslateCommand(TranslationIterator* iterator,
- int* input_offset) {
- disasm::NameConverter converter;
- FrameDescription* output = output_[0];
-
- // The input values are all part of the unoptimized frame so they
- // are all tagged pointers.
- uintptr_t input_value = input_->GetFrameSlot(*input_offset);
- Object* input_object = reinterpret_cast<Object*>(input_value);
-
- Translation::Opcode opcode =
- static_cast<Translation::Opcode>(iterator->Next());
- bool duplicate = (opcode == Translation::DUPLICATE);
- if (duplicate) {
- opcode = static_cast<Translation::Opcode>(iterator->Next());
- }
-
- switch (opcode) {
- case Translation::BEGIN:
- case Translation::FRAME:
- case Translation::DUPLICATE:
- UNREACHABLE(); // Malformed input.
- return false;
-
- case Translation::REGISTER: {
- int output_reg = iterator->Next();
- if (FLAG_trace_osr) {
- PrintF(" %s <- 0x%08" V8PRIxPTR " ; [sp + %d]\n",
- converter.NameOfCPURegister(output_reg),
- input_value,
- *input_offset);
- }
- output->SetRegister(output_reg, input_value);
- break;
- }
-
- case Translation::INT32_REGISTER: {
- // Abort OSR if we don't have a number.
- if (!input_object->IsNumber()) return false;
-
- int output_reg = iterator->Next();
- int int32_value = input_object->IsSmi()
- ? Smi::cast(input_object)->value()
- : FastD2I(input_object->Number());
- // Abort the translation if the conversion lost information.
- if (!input_object->IsSmi() &&
- FastI2D(int32_value) != input_object->Number()) {
- if (FLAG_trace_osr) {
- PrintF("**** %g could not be converted to int32 ****\n",
- input_object->Number());
- }
- return false;
- }
- if (FLAG_trace_osr) {
- PrintF(" %s <- %d (int32) ; [sp + %d]\n",
- converter.NameOfCPURegister(output_reg),
- int32_value,
- *input_offset);
- }
- output->SetRegister(output_reg, int32_value);
- break;
- }
-
- case Translation::DOUBLE_REGISTER: {
- // Abort OSR if we don't have a number.
- if (!input_object->IsNumber()) return false;
-
- int output_reg = iterator->Next();
- double double_value = input_object->Number();
- if (FLAG_trace_osr) {
- PrintF(" %s <- %g (double) ; [sp + %d]\n",
- DoubleRegister::AllocationIndexToString(output_reg),
- double_value,
- *input_offset);
- }
- output->SetDoubleRegister(output_reg, double_value);
- break;
- }
-
- case Translation::STACK_SLOT: {
- int output_index = iterator->Next();
- unsigned output_offset =
- output->GetOffsetFromSlotIndex(this, output_index);
- if (FLAG_trace_osr) {
- PrintF(" [sp + %d] <- 0x%08" V8PRIxPTR " ; [sp + %d]\n",
- output_offset,
- input_value,
- *input_offset);
- }
- output->SetFrameSlot(output_offset, input_value);
- break;
- }
-
- case Translation::INT32_STACK_SLOT: {
- // Abort OSR if we don't have a number.
- if (!input_object->IsNumber()) return false;
-
- int output_index = iterator->Next();
- unsigned output_offset =
- output->GetOffsetFromSlotIndex(this, output_index);
- int int32_value = input_object->IsSmi()
- ? Smi::cast(input_object)->value()
- : DoubleToInt32(input_object->Number());
- // Abort the translation if the conversion lost information.
- if (!input_object->IsSmi() &&
- FastI2D(int32_value) != input_object->Number()) {
- if (FLAG_trace_osr) {
- PrintF("**** %g could not be converted to int32 ****\n",
- input_object->Number());
- }
- return false;
- }
- if (FLAG_trace_osr) {
- PrintF(" [sp + %d] <- %d (int32) ; [sp + %d]\n",
- output_offset,
- int32_value,
- *input_offset);
- }
- output->SetFrameSlot(output_offset, int32_value);
- break;
- }
-
- case Translation::DOUBLE_STACK_SLOT: {
- static const int kLowerOffset = 0 * kPointerSize;
- static const int kUpperOffset = 1 * kPointerSize;
-
- // Abort OSR if we don't have a number.
- if (!input_object->IsNumber()) return false;
-
- int output_index = iterator->Next();
- unsigned output_offset =
- output->GetOffsetFromSlotIndex(this, output_index);
- double double_value = input_object->Number();
- uint64_t int_value = BitCast<uint64_t, double>(double_value);
- int32_t lower = static_cast<int32_t>(int_value);
- int32_t upper = static_cast<int32_t>(int_value >> kBitsPerInt);
- if (FLAG_trace_osr) {
- PrintF(" [sp + %d] <- 0x%08x (upper bits of %g) ; [sp + %d]\n",
- output_offset + kUpperOffset,
- upper,
- double_value,
- *input_offset);
- PrintF(" [sp + %d] <- 0x%08x (lower bits of %g) ; [sp + %d]\n",
- output_offset + kLowerOffset,
- lower,
- double_value,
- *input_offset);
- }
- output->SetFrameSlot(output_offset + kLowerOffset, lower);
- output->SetFrameSlot(output_offset + kUpperOffset, upper);
- break;
- }
-
- case Translation::LITERAL: {
- // Just ignore non-materialized literals.
- iterator->Next();
- break;
- }
-
- case Translation::ARGUMENTS_OBJECT: {
- // Optimized code assumes that the argument object has not been
- // materialized and so bypasses it when doing arguments access.
- // We should have bailed out before starting the frame
- // translation.
- UNREACHABLE();
- return false;
- }
- }
-
- if (!duplicate) *input_offset -= kPointerSize;
- return true;
-}
-
-
-void Deoptimizer::PatchStackCheckCode(Code* unoptimized_code,
- Code* check_code,
- Code* replacement_code) {
- // Iterate over the stack check table and patch every stack check
- // call to an unconditional call to the replacement code.
- ASSERT(unoptimized_code->kind() == Code::FUNCTION);
- Address stack_check_cursor = unoptimized_code->instruction_start() +
- unoptimized_code->stack_check_table_offset();
- uint32_t table_length = Memory::uint32_at(stack_check_cursor);
- stack_check_cursor += kIntSize;
- for (uint32_t i = 0; i < table_length; ++i) {
- uint32_t pc_offset = Memory::uint32_at(stack_check_cursor + kIntSize);
- Address pc_after = unoptimized_code->instruction_start() + pc_offset;
- PatchStackCheckCodeAt(pc_after, check_code, replacement_code);
- stack_check_cursor += 2 * kIntSize;
- }
-}
-
-
-void Deoptimizer::RevertStackCheckCode(Code* unoptimized_code,
- Code* check_code,
- Code* replacement_code) {
- // Iterate over the stack check table and revert the patched
- // stack check calls.
- ASSERT(unoptimized_code->kind() == Code::FUNCTION);
- Address stack_check_cursor = unoptimized_code->instruction_start() +
- unoptimized_code->stack_check_table_offset();
- uint32_t table_length = Memory::uint32_at(stack_check_cursor);
- stack_check_cursor += kIntSize;
- for (uint32_t i = 0; i < table_length; ++i) {
- uint32_t pc_offset = Memory::uint32_at(stack_check_cursor + kIntSize);
- Address pc_after = unoptimized_code->instruction_start() + pc_offset;
- RevertStackCheckCodeAt(pc_after, check_code, replacement_code);
- stack_check_cursor += 2 * kIntSize;
- }
-}
-
-
-unsigned Deoptimizer::ComputeInputFrameSize() const {
- unsigned fixed_size = ComputeFixedSize(function_);
- // The fp-to-sp delta already takes the context and the function
- // into account so we have to avoid double counting them (-2).
- unsigned result = fixed_size + fp_to_sp_delta_ - (2 * kPointerSize);
-#ifdef DEBUG
- if (bailout_type_ == OSR) {
- // TODO(kasperl): It would be nice if we could verify that the
- // size matches with the stack height we can compute based on the
- // environment at the OSR entry. The code for that his built into
- // the DoComputeOsrOutputFrame function for now.
- } else {
- unsigned stack_slots = optimized_code_->stack_slots();
- unsigned outgoing_size = ComputeOutgoingArgumentSize();
- ASSERT(result == fixed_size + (stack_slots * kPointerSize) + outgoing_size);
- }
-#endif
- return result;
-}
-
-
-unsigned Deoptimizer::ComputeFixedSize(JSFunction* function) const {
- // The fixed part of the frame consists of the return address, frame
- // pointer, function, context, and all the incoming arguments.
- static const unsigned kFixedSlotSize = 4 * kPointerSize;
- return ComputeIncomingArgumentSize(function) + kFixedSlotSize;
-}
-
-
-unsigned Deoptimizer::ComputeIncomingArgumentSize(JSFunction* function) const {
- // The incoming arguments is the values for formal parameters and
- // the receiver. Every slot contains a pointer.
- unsigned arguments = function->shared()->formal_parameter_count() + 1;
- return arguments * kPointerSize;
-}
-
-
-unsigned Deoptimizer::ComputeOutgoingArgumentSize() const {
- DeoptimizationInputData* data = DeoptimizationInputData::cast(
- optimized_code_->deoptimization_data());
- unsigned height = data->ArgumentsStackHeight(bailout_id_)->value();
- return height * kPointerSize;
-}
-
-
-Object* Deoptimizer::ComputeLiteral(int index) const {
- DeoptimizationInputData* data = DeoptimizationInputData::cast(
- optimized_code_->deoptimization_data());
- FixedArray* literals = data->LiteralArray();
- return literals->get(index);
-}
-
-
-void Deoptimizer::AddInteger32Value(int frame_index,
- int slot_index,
- int32_t value) {
- ValueDescriptionInteger32 value_desc(slot_index, value);
- integer32_values_[frame_index].Add(value_desc);
-}
-
-
-void Deoptimizer::AddDoubleValue(int frame_index,
- int slot_index,
- double value) {
- ValueDescriptionDouble value_desc(slot_index, value);
- double_values_[frame_index].Add(value_desc);
-}
-
-
-LargeObjectChunk* Deoptimizer::CreateCode(BailoutType type) {
- // We cannot run this if the serializer is enabled because this will
- // cause us to emit relocation information for the external
- // references. This is fine because the deoptimizer's code section
- // isn't meant to be serialized at all.
- ASSERT(!Serializer::enabled());
-
- MacroAssembler masm(Isolate::Current(), NULL, 16 * KB);
- masm.set_emit_debug_code(false);
- GenerateDeoptimizationEntries(&masm, kNumberOfEntries, type);
- CodeDesc desc;
- masm.GetCode(&desc);
- ASSERT(desc.reloc_size == 0);
-
- LargeObjectChunk* chunk = LargeObjectChunk::New(desc.instr_size, EXECUTABLE);
- memcpy(chunk->GetStartAddress(), desc.buffer, desc.instr_size);
- CPU::FlushICache(chunk->GetStartAddress(), desc.instr_size);
- return chunk;
-}
-
-
-Code* Deoptimizer::FindDeoptimizingCodeFromAddress(Address addr) {
- DeoptimizingCodeListNode* node =
- Isolate::Current()->deoptimizer_data()->deoptimizing_code_list_;
- while (node != NULL) {
- if (node->code()->contains(addr)) return *node->code();
- node = node->next();
- }
- return NULL;
-}
-
-
-void Deoptimizer::RemoveDeoptimizingCode(Code* code) {
- DeoptimizerData* data = Isolate::Current()->deoptimizer_data();
- ASSERT(data->deoptimizing_code_list_ != NULL);
- // Run through the code objects to find this one and remove it.
- DeoptimizingCodeListNode* prev = NULL;
- DeoptimizingCodeListNode* current = data->deoptimizing_code_list_;
- while (current != NULL) {
- if (*current->code() == code) {
- // Unlink from list. If prev is NULL we are looking at the first element.
- if (prev == NULL) {
- data->deoptimizing_code_list_ = current->next();
- } else {
- prev->set_next(current->next());
- }
- delete current;
- return;
- }
- // Move to next in list.
- prev = current;
- current = current->next();
- }
- // Deoptimizing code is removed through weak callback. Each object is expected
- // to be removed once and only once.
- UNREACHABLE();
-}
-
-
-FrameDescription::FrameDescription(uint32_t frame_size,
- JSFunction* function)
- : frame_size_(frame_size),
- function_(function),
- top_(kZapUint32),
- pc_(kZapUint32),
- fp_(kZapUint32) {
- // Zap all the registers.
- for (int r = 0; r < Register::kNumRegisters; r++) {
- SetRegister(r, kZapUint32);
- }
-
- // Zap all the slots.
- for (unsigned o = 0; o < frame_size; o += kPointerSize) {
- SetFrameSlot(o, kZapUint32);
- }
-}
-
-
-unsigned FrameDescription::GetOffsetFromSlotIndex(Deoptimizer* deoptimizer,
- int slot_index) {
- if (slot_index >= 0) {
- // Local or spill slots. Skip the fixed part of the frame
- // including all arguments.
- unsigned base = static_cast<unsigned>(
- GetFrameSize() - deoptimizer->ComputeFixedSize(GetFunction()));
- return base - ((slot_index + 1) * kPointerSize);
- } else {
- // Incoming parameter.
- unsigned base = static_cast<unsigned>(GetFrameSize() -
- deoptimizer->ComputeIncomingArgumentSize(GetFunction()));
- return base - ((slot_index + 1) * kPointerSize);
- }
-}
-
-
-void TranslationBuffer::Add(int32_t value) {
- // Encode the sign bit in the least significant bit.
- bool is_negative = (value < 0);
- uint32_t bits = ((is_negative ? -value : value) << 1) |
- static_cast<int32_t>(is_negative);
- // Encode the individual bytes using the least significant bit of
- // each byte to indicate whether or not more bytes follow.
- do {
- uint32_t next = bits >> 7;
- contents_.Add(((bits << 1) & 0xFF) | (next != 0));
- bits = next;
- } while (bits != 0);
-}
-
-
-int32_t TranslationIterator::Next() {
- ASSERT(HasNext());
- // Run through the bytes until we reach one with a least significant
- // bit of zero (marks the end).
- uint32_t bits = 0;
- for (int i = 0; true; i += 7) {
- uint8_t next = buffer_->get(index_++);
- bits |= (next >> 1) << i;
- if ((next & 1) == 0) break;
- }
- // The bits encode the sign in the least significant bit.
- bool is_negative = (bits & 1) == 1;
- int32_t result = bits >> 1;
- return is_negative ? -result : result;
-}
-
-
-Handle<ByteArray> TranslationBuffer::CreateByteArray() {
- int length = contents_.length();
- Handle<ByteArray> result =
- Isolate::Current()->factory()->NewByteArray(length, TENURED);
- memcpy(result->GetDataStartAddress(), contents_.ToVector().start(), length);
- return result;
-}
-
-
-void Translation::BeginFrame(int node_id, int literal_id, unsigned height) {
- buffer_->Add(FRAME);
- buffer_->Add(node_id);
- buffer_->Add(literal_id);
- buffer_->Add(height);
-}
-
-
-void Translation::StoreRegister(Register reg) {
- buffer_->Add(REGISTER);
- buffer_->Add(reg.code());
-}
-
-
-void Translation::StoreInt32Register(Register reg) {
- buffer_->Add(INT32_REGISTER);
- buffer_->Add(reg.code());
-}
-
-
-void Translation::StoreDoubleRegister(DoubleRegister reg) {
- buffer_->Add(DOUBLE_REGISTER);
- buffer_->Add(DoubleRegister::ToAllocationIndex(reg));
-}
-
-
-void Translation::StoreStackSlot(int index) {
- buffer_->Add(STACK_SLOT);
- buffer_->Add(index);
-}
-
-
-void Translation::StoreInt32StackSlot(int index) {
- buffer_->Add(INT32_STACK_SLOT);
- buffer_->Add(index);
-}
-
-
-void Translation::StoreDoubleStackSlot(int index) {
- buffer_->Add(DOUBLE_STACK_SLOT);
- buffer_->Add(index);
-}
-
-
-void Translation::StoreLiteral(int literal_id) {
- buffer_->Add(LITERAL);
- buffer_->Add(literal_id);
-}
-
-
-void Translation::StoreArgumentsObject() {
- buffer_->Add(ARGUMENTS_OBJECT);
-}
-
-
-void Translation::MarkDuplicate() {
- buffer_->Add(DUPLICATE);
-}
-
-
-int Translation::NumberOfOperandsFor(Opcode opcode) {
- switch (opcode) {
- case ARGUMENTS_OBJECT:
- case DUPLICATE:
- return 0;
- case BEGIN:
- case REGISTER:
- case INT32_REGISTER:
- case DOUBLE_REGISTER:
- case STACK_SLOT:
- case INT32_STACK_SLOT:
- case DOUBLE_STACK_SLOT:
- case LITERAL:
- return 1;
- case FRAME:
- return 3;
- }
- UNREACHABLE();
- return -1;
-}
-
-
-#ifdef OBJECT_PRINT
-
-const char* Translation::StringFor(Opcode opcode) {
- switch (opcode) {
- case BEGIN:
- return "BEGIN";
- case FRAME:
- return "FRAME";
- case REGISTER:
- return "REGISTER";
- case INT32_REGISTER:
- return "INT32_REGISTER";
- case DOUBLE_REGISTER:
- return "DOUBLE_REGISTER";
- case STACK_SLOT:
- return "STACK_SLOT";
- case INT32_STACK_SLOT:
- return "INT32_STACK_SLOT";
- case DOUBLE_STACK_SLOT:
- return "DOUBLE_STACK_SLOT";
- case LITERAL:
- return "LITERAL";
- case ARGUMENTS_OBJECT:
- return "ARGUMENTS_OBJECT";
- case DUPLICATE:
- return "DUPLICATE";
- }
- UNREACHABLE();
- return "";
-}
-
-#endif
-
-
-DeoptimizingCodeListNode::DeoptimizingCodeListNode(Code* code): next_(NULL) {
- GlobalHandles* global_handles = Isolate::Current()->global_handles();
- // Globalize the code object and make it weak.
- code_ = Handle<Code>::cast(global_handles->Create(code));
- global_handles->MakeWeak(reinterpret_cast<Object**>(code_.location()),
- this,
- Deoptimizer::HandleWeakDeoptimizedCode);
-}
-
-
-DeoptimizingCodeListNode::~DeoptimizingCodeListNode() {
- GlobalHandles* global_handles = Isolate::Current()->global_handles();
- global_handles->Destroy(reinterpret_cast<Object**>(code_.location()));
-}
-
-
-// We can't intermix stack decoding and allocations because
-// deoptimization infrastracture is not GC safe.
-// Thus we build a temporary structure in malloced space.
-SlotRef SlotRef::ComputeSlotForNextArgument(TranslationIterator* iterator,
- DeoptimizationInputData* data,
- JavaScriptFrame* frame) {
- Translation::Opcode opcode =
- static_cast<Translation::Opcode>(iterator->Next());
-
- switch (opcode) {
- case Translation::BEGIN:
- case Translation::FRAME:
- // Peeled off before getting here.
- break;
-
- case Translation::ARGUMENTS_OBJECT:
- // This can be only emitted for local slots not for argument slots.
- break;
-
- case Translation::REGISTER:
- case Translation::INT32_REGISTER:
- case Translation::DOUBLE_REGISTER:
- case Translation::DUPLICATE:
- // We are at safepoint which corresponds to call. All registers are
- // saved by caller so there would be no live registers at this
- // point. Thus these translation commands should not be used.
- break;
-
- case Translation::STACK_SLOT: {
- int slot_index = iterator->Next();
- Address slot_addr = SlotAddress(frame, slot_index);
- return SlotRef(slot_addr, SlotRef::TAGGED);
- }
-
- case Translation::INT32_STACK_SLOT: {
- int slot_index = iterator->Next();
- Address slot_addr = SlotAddress(frame, slot_index);
- return SlotRef(slot_addr, SlotRef::INT32);
- }
-
- case Translation::DOUBLE_STACK_SLOT: {
- int slot_index = iterator->Next();
- Address slot_addr = SlotAddress(frame, slot_index);
- return SlotRef(slot_addr, SlotRef::DOUBLE);
- }
-
- case Translation::LITERAL: {
- int literal_index = iterator->Next();
- return SlotRef(data->LiteralArray()->get(literal_index));
- }
- }
-
- UNREACHABLE();
- return SlotRef();
-}
-
-
-void SlotRef::ComputeSlotMappingForArguments(JavaScriptFrame* frame,
- int inlined_frame_index,
- Vector<SlotRef>* args_slots) {
- AssertNoAllocation no_gc;
- int deopt_index = AstNode::kNoNumber;
- DeoptimizationInputData* data =
- static_cast<OptimizedFrame*>(frame)->GetDeoptimizationData(&deopt_index);
- TranslationIterator it(data->TranslationByteArray(),
- data->TranslationIndex(deopt_index)->value());
- Translation::Opcode opcode = static_cast<Translation::Opcode>(it.Next());
- ASSERT(opcode == Translation::BEGIN);
- int frame_count = it.Next();
- USE(frame_count);
- ASSERT(frame_count > inlined_frame_index);
- int frames_to_skip = inlined_frame_index;
- while (true) {
- opcode = static_cast<Translation::Opcode>(it.Next());
- // Skip over operands to advance to the next opcode.
- it.Skip(Translation::NumberOfOperandsFor(opcode));
- if (opcode == Translation::FRAME) {
- if (frames_to_skip == 0) {
- // We reached the frame corresponding to the inlined function
- // in question. Process the translation commands for the
- // arguments.
- //
- // Skip the translation command for the receiver.
- it.Skip(Translation::NumberOfOperandsFor(
- static_cast<Translation::Opcode>(it.Next())));
- // Compute slots for arguments.
- for (int i = 0; i < args_slots->length(); ++i) {
- (*args_slots)[i] = ComputeSlotForNextArgument(&it, data, frame);
- }
- return;
- }
- frames_to_skip--;
- }
- }
-
- UNREACHABLE();
-}
-
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/deoptimizer.h b/src/3rdparty/v8/src/deoptimizer.h
deleted file mode 100644
index 514de05..0000000
--- a/src/3rdparty/v8/src/deoptimizer.h
+++ /dev/null
@@ -1,629 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_DEOPTIMIZER_H_
-#define V8_DEOPTIMIZER_H_
-
-#include "v8.h"
-
-#include "macro-assembler.h"
-#include "zone-inl.h"
-
-
-namespace v8 {
-namespace internal {
-
-class FrameDescription;
-class TranslationIterator;
-class DeoptimizingCodeListNode;
-
-
-class ValueDescription BASE_EMBEDDED {
- public:
- explicit ValueDescription(int index) : stack_index_(index) { }
- int stack_index() const { return stack_index_; }
-
- private:
- // Offset relative to the top of the stack.
- int stack_index_;
-};
-
-
-class ValueDescriptionInteger32: public ValueDescription {
- public:
- ValueDescriptionInteger32(int index, int32_t value)
- : ValueDescription(index), int32_value_(value) { }
- int32_t int32_value() const { return int32_value_; }
-
- private:
- // Raw value.
- int32_t int32_value_;
-};
-
-
-class ValueDescriptionDouble: public ValueDescription {
- public:
- ValueDescriptionDouble(int index, double value)
- : ValueDescription(index), double_value_(value) { }
- double double_value() const { return double_value_; }
-
- private:
- // Raw value.
- double double_value_;
-};
-
-
-class OptimizedFunctionVisitor BASE_EMBEDDED {
- public:
- virtual ~OptimizedFunctionVisitor() {}
-
- // Function which is called before iteration of any optimized functions
- // from given global context.
- virtual void EnterContext(Context* context) = 0;
-
- virtual void VisitFunction(JSFunction* function) = 0;
-
- // Function which is called after iteration of all optimized functions
- // from given global context.
- virtual void LeaveContext(Context* context) = 0;
-};
-
-
-class Deoptimizer;
-
-
-class DeoptimizerData {
- public:
- DeoptimizerData();
- ~DeoptimizerData();
-
- private:
- LargeObjectChunk* eager_deoptimization_entry_code_;
- LargeObjectChunk* lazy_deoptimization_entry_code_;
- Deoptimizer* current_;
-
- // List of deoptimized code which still have references from active stack
- // frames. These code objects are needed by the deoptimizer when deoptimizing
- // a frame for which the code object for the function function has been
- // changed from the code present when deoptimizing was done.
- DeoptimizingCodeListNode* deoptimizing_code_list_;
-
- friend class Deoptimizer;
-
- DISALLOW_COPY_AND_ASSIGN(DeoptimizerData);
-};
-
-
-class Deoptimizer : public Malloced {
- public:
- enum BailoutType {
- EAGER,
- LAZY,
- OSR
- };
-
- int output_count() const { return output_count_; }
-
- static Deoptimizer* New(JSFunction* function,
- BailoutType type,
- unsigned bailout_id,
- Address from,
- int fp_to_sp_delta,
- Isolate* isolate);
- static Deoptimizer* Grab(Isolate* isolate);
-
- // Makes sure that there is enough room in the relocation
- // information of a code object to perform lazy deoptimization
- // patching. If there is not enough room a new relocation
- // information object is allocated and comments are added until it
- // is big enough.
- static void EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code);
-
- // Deoptimize the function now. Its current optimized code will never be run
- // again and any activations of the optimized code will get deoptimized when
- // execution returns.
- static void DeoptimizeFunction(JSFunction* function);
-
- // Deoptimize all functions in the heap.
- static void DeoptimizeAll();
-
- static void DeoptimizeGlobalObject(JSObject* object);
-
- static void VisitAllOptimizedFunctionsForContext(
- Context* context, OptimizedFunctionVisitor* visitor);
-
- static void VisitAllOptimizedFunctionsForGlobalObject(
- JSObject* object, OptimizedFunctionVisitor* visitor);
-
- static void VisitAllOptimizedFunctions(OptimizedFunctionVisitor* visitor);
-
- // The size in bytes of the code required at a lazy deopt patch site.
- static int patch_size();
-
- // Patch all stack guard checks in the unoptimized code to
- // unconditionally call replacement_code.
- static void PatchStackCheckCode(Code* unoptimized_code,
- Code* check_code,
- Code* replacement_code);
-
- // Patch stack guard check at instruction before pc_after in
- // the unoptimized code to unconditionally call replacement_code.
- static void PatchStackCheckCodeAt(Address pc_after,
- Code* check_code,
- Code* replacement_code);
-
- // Change all patched stack guard checks in the unoptimized code
- // back to a normal stack guard check.
- static void RevertStackCheckCode(Code* unoptimized_code,
- Code* check_code,
- Code* replacement_code);
-
- // Change all patched stack guard checks in the unoptimized code
- // back to a normal stack guard check.
- static void RevertStackCheckCodeAt(Address pc_after,
- Code* check_code,
- Code* replacement_code);
-
- ~Deoptimizer();
-
- void InsertHeapNumberValues(int index, JavaScriptFrame* frame);
-
- static void ComputeOutputFrames(Deoptimizer* deoptimizer);
-
- static Address GetDeoptimizationEntry(int id, BailoutType type);
- static int GetDeoptimizationId(Address addr, BailoutType type);
- static int GetOutputInfo(DeoptimizationOutputData* data,
- unsigned node_id,
- SharedFunctionInfo* shared);
-
- // Code generation support.
- static int input_offset() { return OFFSET_OF(Deoptimizer, input_); }
- static int output_count_offset() {
- return OFFSET_OF(Deoptimizer, output_count_);
- }
- static int output_offset() { return OFFSET_OF(Deoptimizer, output_); }
-
- static int GetDeoptimizedCodeCount(Isolate* isolate);
-
- static const int kNotDeoptimizationEntry = -1;
-
- // Generators for the deoptimization entry code.
- class EntryGenerator BASE_EMBEDDED {
- public:
- EntryGenerator(MacroAssembler* masm, BailoutType type)
- : masm_(masm), type_(type) { }
- virtual ~EntryGenerator() { }
-
- void Generate();
-
- protected:
- MacroAssembler* masm() const { return masm_; }
- BailoutType type() const { return type_; }
-
- virtual void GeneratePrologue() { }
-
- private:
- MacroAssembler* masm_;
- Deoptimizer::BailoutType type_;
- };
-
- class TableEntryGenerator : public EntryGenerator {
- public:
- TableEntryGenerator(MacroAssembler* masm, BailoutType type, int count)
- : EntryGenerator(masm, type), count_(count) { }
-
- protected:
- virtual void GeneratePrologue();
-
- private:
- int count() const { return count_; }
-
- int count_;
- };
-
- private:
- static const int kNumberOfEntries = 4096;
-
- Deoptimizer(Isolate* isolate,
- JSFunction* function,
- BailoutType type,
- unsigned bailout_id,
- Address from,
- int fp_to_sp_delta);
- void DeleteFrameDescriptions();
-
- void DoComputeOutputFrames();
- void DoComputeOsrOutputFrame();
- void DoComputeFrame(TranslationIterator* iterator, int frame_index);
- void DoTranslateCommand(TranslationIterator* iterator,
- int frame_index,
- unsigned output_offset);
- // Translate a command for OSR. Updates the input offset to be used for
- // the next command. Returns false if translation of the command failed
- // (e.g., a number conversion failed) and may or may not have updated the
- // input offset.
- bool DoOsrTranslateCommand(TranslationIterator* iterator,
- int* input_offset);
-
- unsigned ComputeInputFrameSize() const;
- unsigned ComputeFixedSize(JSFunction* function) const;
-
- unsigned ComputeIncomingArgumentSize(JSFunction* function) const;
- unsigned ComputeOutgoingArgumentSize() const;
-
- Object* ComputeLiteral(int index) const;
-
- void InsertHeapNumberValue(JavaScriptFrame* frame,
- int stack_index,
- double val,
- int extra_slot_count);
-
- void AddInteger32Value(int frame_index, int slot_index, int32_t value);
- void AddDoubleValue(int frame_index, int slot_index, double value);
-
- static LargeObjectChunk* CreateCode(BailoutType type);
- static void GenerateDeoptimizationEntries(
- MacroAssembler* masm, int count, BailoutType type);
-
- // Weak handle callback for deoptimizing code objects.
- static void HandleWeakDeoptimizedCode(
- v8::Persistent<v8::Value> obj, void* data);
- static Code* FindDeoptimizingCodeFromAddress(Address addr);
- static void RemoveDeoptimizingCode(Code* code);
-
- Isolate* isolate_;
- JSFunction* function_;
- Code* optimized_code_;
- unsigned bailout_id_;
- BailoutType bailout_type_;
- Address from_;
- int fp_to_sp_delta_;
-
- // Input frame description.
- FrameDescription* input_;
- // Number of output frames.
- int output_count_;
- // Array of output frame descriptions.
- FrameDescription** output_;
-
- List<ValueDescriptionInteger32>* integer32_values_;
- List<ValueDescriptionDouble>* double_values_;
-
- static int table_entry_size_;
-
- friend class FrameDescription;
- friend class DeoptimizingCodeListNode;
-};
-
-
-class FrameDescription {
- public:
- FrameDescription(uint32_t frame_size,
- JSFunction* function);
-
- void* operator new(size_t size, uint32_t frame_size) {
- // Subtracts kPointerSize, as the member frame_content_ already supplies
- // the first element of the area to store the frame.
- return malloc(size + frame_size - kPointerSize);
- }
-
- void operator delete(void* description) {
- free(description);
- }
-
- intptr_t GetFrameSize() const { return frame_size_; }
-
- JSFunction* GetFunction() const { return function_; }
-
- unsigned GetOffsetFromSlotIndex(Deoptimizer* deoptimizer, int slot_index);
-
- intptr_t GetFrameSlot(unsigned offset) {
- return *GetFrameSlotPointer(offset);
- }
-
- double GetDoubleFrameSlot(unsigned offset) {
- return *reinterpret_cast<double*>(GetFrameSlotPointer(offset));
- }
-
- void SetFrameSlot(unsigned offset, intptr_t value) {
- *GetFrameSlotPointer(offset) = value;
- }
-
- intptr_t GetRegister(unsigned n) const {
- ASSERT(n < ARRAY_SIZE(registers_));
- return registers_[n];
- }
-
- double GetDoubleRegister(unsigned n) const {
- ASSERT(n < ARRAY_SIZE(double_registers_));
- return double_registers_[n];
- }
-
- void SetRegister(unsigned n, intptr_t value) {
- ASSERT(n < ARRAY_SIZE(registers_));
- registers_[n] = value;
- }
-
- void SetDoubleRegister(unsigned n, double value) {
- ASSERT(n < ARRAY_SIZE(double_registers_));
- double_registers_[n] = value;
- }
-
- intptr_t GetTop() const { return top_; }
- void SetTop(intptr_t top) { top_ = top; }
-
- intptr_t GetPc() const { return pc_; }
- void SetPc(intptr_t pc) { pc_ = pc; }
-
- intptr_t GetFp() const { return fp_; }
- void SetFp(intptr_t fp) { fp_ = fp; }
-
- Smi* GetState() const { return state_; }
- void SetState(Smi* state) { state_ = state; }
-
- void SetContinuation(intptr_t pc) { continuation_ = pc; }
-
- static int registers_offset() {
- return OFFSET_OF(FrameDescription, registers_);
- }
-
- static int double_registers_offset() {
- return OFFSET_OF(FrameDescription, double_registers_);
- }
-
- static int frame_size_offset() {
- return OFFSET_OF(FrameDescription, frame_size_);
- }
-
- static int pc_offset() {
- return OFFSET_OF(FrameDescription, pc_);
- }
-
- static int state_offset() {
- return OFFSET_OF(FrameDescription, state_);
- }
-
- static int continuation_offset() {
- return OFFSET_OF(FrameDescription, continuation_);
- }
-
- static int frame_content_offset() {
- return OFFSET_OF(FrameDescription, frame_content_);
- }
-
- private:
- static const uint32_t kZapUint32 = 0xbeeddead;
-
- uintptr_t frame_size_; // Number of bytes.
- JSFunction* function_;
- intptr_t registers_[Register::kNumRegisters];
- double double_registers_[DoubleRegister::kNumAllocatableRegisters];
- intptr_t top_;
- intptr_t pc_;
- intptr_t fp_;
- Smi* state_;
-
- // Continuation is the PC where the execution continues after
- // deoptimizing.
- intptr_t continuation_;
-
- // This must be at the end of the object as the object is allocated larger
- // than it's definition indicate to extend this array.
- intptr_t frame_content_[1];
-
- intptr_t* GetFrameSlotPointer(unsigned offset) {
- ASSERT(offset < frame_size_);
- return reinterpret_cast<intptr_t*>(
- reinterpret_cast<Address>(this) + frame_content_offset() + offset);
- }
-};
-
-
-class TranslationBuffer BASE_EMBEDDED {
- public:
- TranslationBuffer() : contents_(256) { }
-
- int CurrentIndex() const { return contents_.length(); }
- void Add(int32_t value);
-
- Handle<ByteArray> CreateByteArray();
-
- private:
- ZoneList<uint8_t> contents_;
-};
-
-
-class TranslationIterator BASE_EMBEDDED {
- public:
- TranslationIterator(ByteArray* buffer, int index)
- : buffer_(buffer), index_(index) {
- ASSERT(index >= 0 && index < buffer->length());
- }
-
- int32_t Next();
-
- bool HasNext() const { return index_ >= 0; }
-
- void Done() { index_ = -1; }
-
- void Skip(int n) {
- for (int i = 0; i < n; i++) Next();
- }
-
- private:
- ByteArray* buffer_;
- int index_;
-};
-
-
-class Translation BASE_EMBEDDED {
- public:
- enum Opcode {
- BEGIN,
- FRAME,
- REGISTER,
- INT32_REGISTER,
- DOUBLE_REGISTER,
- STACK_SLOT,
- INT32_STACK_SLOT,
- DOUBLE_STACK_SLOT,
- LITERAL,
- ARGUMENTS_OBJECT,
-
- // A prefix indicating that the next command is a duplicate of the one
- // that follows it.
- DUPLICATE
- };
-
- Translation(TranslationBuffer* buffer, int frame_count)
- : buffer_(buffer),
- index_(buffer->CurrentIndex()) {
- buffer_->Add(BEGIN);
- buffer_->Add(frame_count);
- }
-
- int index() const { return index_; }
-
- // Commands.
- void BeginFrame(int node_id, int literal_id, unsigned height);
- void StoreRegister(Register reg);
- void StoreInt32Register(Register reg);
- void StoreDoubleRegister(DoubleRegister reg);
- void StoreStackSlot(int index);
- void StoreInt32StackSlot(int index);
- void StoreDoubleStackSlot(int index);
- void StoreLiteral(int literal_id);
- void StoreArgumentsObject();
- void MarkDuplicate();
-
- static int NumberOfOperandsFor(Opcode opcode);
-
-#ifdef OBJECT_PRINT
- static const char* StringFor(Opcode opcode);
-#endif
-
- private:
- TranslationBuffer* buffer_;
- int index_;
-};
-
-
-// Linked list holding deoptimizing code objects. The deoptimizing code objects
-// are kept as weak handles until they are no longer activated on the stack.
-class DeoptimizingCodeListNode : public Malloced {
- public:
- explicit DeoptimizingCodeListNode(Code* code);
- ~DeoptimizingCodeListNode();
-
- DeoptimizingCodeListNode* next() const { return next_; }
- void set_next(DeoptimizingCodeListNode* next) { next_ = next; }
- Handle<Code> code() const { return code_; }
-
- private:
- // Global (weak) handle to the deoptimizing code object.
- Handle<Code> code_;
-
- // Next pointer for linked list.
- DeoptimizingCodeListNode* next_;
-};
-
-
-class SlotRef BASE_EMBEDDED {
- public:
- enum SlotRepresentation {
- UNKNOWN,
- TAGGED,
- INT32,
- DOUBLE,
- LITERAL
- };
-
- SlotRef()
- : addr_(NULL), representation_(UNKNOWN) { }
-
- SlotRef(Address addr, SlotRepresentation representation)
- : addr_(addr), representation_(representation) { }
-
- explicit SlotRef(Object* literal)
- : literal_(literal), representation_(LITERAL) { }
-
- Handle<Object> GetValue() {
- switch (representation_) {
- case TAGGED:
- return Handle<Object>(Memory::Object_at(addr_));
-
- case INT32: {
- int value = Memory::int32_at(addr_);
- if (Smi::IsValid(value)) {
- return Handle<Object>(Smi::FromInt(value));
- } else {
- return Isolate::Current()->factory()->NewNumberFromInt(value);
- }
- }
-
- case DOUBLE: {
- double value = Memory::double_at(addr_);
- return Isolate::Current()->factory()->NewNumber(value);
- }
-
- case LITERAL:
- return literal_;
-
- default:
- UNREACHABLE();
- return Handle<Object>::null();
- }
- }
-
- static void ComputeSlotMappingForArguments(JavaScriptFrame* frame,
- int inlined_frame_index,
- Vector<SlotRef>* args_slots);
-
- private:
- Address addr_;
- Handle<Object> literal_;
- SlotRepresentation representation_;
-
- static Address SlotAddress(JavaScriptFrame* frame, int slot_index) {
- if (slot_index >= 0) {
- const int offset = JavaScriptFrameConstants::kLocal0Offset;
- return frame->fp() + offset - (slot_index * kPointerSize);
- } else {
- const int offset = JavaScriptFrameConstants::kLastParameterOffset;
- return frame->fp() + offset - ((slot_index + 1) * kPointerSize);
- }
- }
-
- static SlotRef ComputeSlotForNextArgument(TranslationIterator* iterator,
- DeoptimizationInputData* data,
- JavaScriptFrame* frame);
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_DEOPTIMIZER_H_
diff --git a/src/3rdparty/v8/src/disasm.h b/src/3rdparty/v8/src/disasm.h
deleted file mode 100644
index f7f2d41..0000000
--- a/src/3rdparty/v8/src/disasm.h
+++ /dev/null
@@ -1,80 +0,0 @@
-// Copyright 2007-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_DISASM_H_
-#define V8_DISASM_H_
-
-namespace disasm {
-
-typedef unsigned char byte;
-
-// Interface and default implementation for converting addresses and
-// register-numbers to text. The default implementation is machine
-// specific.
-class NameConverter {
- public:
- virtual ~NameConverter() {}
- virtual const char* NameOfCPURegister(int reg) const;
- virtual const char* NameOfByteCPURegister(int reg) const;
- virtual const char* NameOfXMMRegister(int reg) const;
- virtual const char* NameOfAddress(byte* addr) const;
- virtual const char* NameOfConstant(byte* addr) const;
- virtual const char* NameInCode(byte* addr) const;
-
- protected:
- v8::internal::EmbeddedVector<char, 128> tmp_buffer_;
-};
-
-
-// A generic Disassembler interface
-class Disassembler {
- public:
- // Caller deallocates converter.
- explicit Disassembler(const NameConverter& converter);
-
- virtual ~Disassembler();
-
- // Writes one disassembled instruction into 'buffer' (0-terminated).
- // Returns the length of the disassembled machine instruction in bytes.
- int InstructionDecode(v8::internal::Vector<char> buffer, byte* instruction);
-
- // Returns -1 if instruction does not mark the beginning of a constant pool,
- // or the number of entries in the constant pool beginning here.
- int ConstantPoolSizeAt(byte* instruction);
-
- // Write disassembly into specified file 'f' using specified NameConverter
- // (see constructor).
- static void Disassemble(FILE* f, byte* begin, byte* end);
- private:
- const NameConverter& converter_;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(Disassembler);
-};
-
-} // namespace disasm
-
-#endif // V8_DISASM_H_
diff --git a/src/3rdparty/v8/src/disassembler.cc b/src/3rdparty/v8/src/disassembler.cc
deleted file mode 100644
index d142ef6..0000000
--- a/src/3rdparty/v8/src/disassembler.cc
+++ /dev/null
@@ -1,339 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "code-stubs.h"
-#include "codegen-inl.h"
-#include "debug.h"
-#include "deoptimizer.h"
-#include "disasm.h"
-#include "disassembler.h"
-#include "macro-assembler.h"
-#include "serialize.h"
-#include "string-stream.h"
-
-namespace v8 {
-namespace internal {
-
-#ifdef ENABLE_DISASSEMBLER
-
-void Disassembler::Dump(FILE* f, byte* begin, byte* end) {
- for (byte* pc = begin; pc < end; pc++) {
- if (f == NULL) {
- PrintF("%" V8PRIxPTR " %4" V8PRIdPTR " %02x\n",
- reinterpret_cast<intptr_t>(pc),
- pc - begin,
- *pc);
- } else {
- fprintf(f, "%" V8PRIxPTR " %4" V8PRIdPTR " %02x\n",
- reinterpret_cast<uintptr_t>(pc), pc - begin, *pc);
- }
- }
-}
-
-
-class V8NameConverter: public disasm::NameConverter {
- public:
- explicit V8NameConverter(Code* code) : code_(code) {}
- virtual const char* NameOfAddress(byte* pc) const;
- virtual const char* NameInCode(byte* addr) const;
- Code* code() const { return code_; }
- private:
- Code* code_;
-
- EmbeddedVector<char, 128> v8_buffer_;
-};
-
-
-const char* V8NameConverter::NameOfAddress(byte* pc) const {
- const char* name = Isolate::Current()->builtins()->Lookup(pc);
- if (name != NULL) {
- OS::SNPrintF(v8_buffer_, "%s (%p)", name, pc);
- return v8_buffer_.start();
- }
-
- if (code_ != NULL) {
- int offs = static_cast<int>(pc - code_->instruction_start());
- // print as code offset, if it seems reasonable
- if (0 <= offs && offs < code_->instruction_size()) {
- OS::SNPrintF(v8_buffer_, "%d (%p)", offs, pc);
- return v8_buffer_.start();
- }
- }
-
- return disasm::NameConverter::NameOfAddress(pc);
-}
-
-
-const char* V8NameConverter::NameInCode(byte* addr) const {
- // The V8NameConverter is used for well known code, so we can "safely"
- // dereference pointers in generated code.
- return (code_ != NULL) ? reinterpret_cast<const char*>(addr) : "";
-}
-
-
-static void DumpBuffer(FILE* f, char* buff) {
- if (f == NULL) {
- PrintF("%s", buff);
- } else {
- fprintf(f, "%s", buff);
- }
-}
-
-static const int kOutBufferSize = 2048 + String::kMaxShortPrintLength;
-static const int kRelocInfoPosition = 57;
-
-static int DecodeIt(FILE* f,
- const V8NameConverter& converter,
- byte* begin,
- byte* end) {
- NoHandleAllocation ha;
- AssertNoAllocation no_alloc;
- ExternalReferenceEncoder ref_encoder;
- Heap* heap = HEAP;
-
- v8::internal::EmbeddedVector<char, 128> decode_buffer;
- v8::internal::EmbeddedVector<char, kOutBufferSize> out_buffer;
- byte* pc = begin;
- disasm::Disassembler d(converter);
- RelocIterator* it = NULL;
- if (converter.code() != NULL) {
- it = new RelocIterator(converter.code());
- } else {
- // No relocation information when printing code stubs.
- }
- int constants = -1; // no constants being decoded at the start
-
- while (pc < end) {
- // First decode instruction so that we know its length.
- byte* prev_pc = pc;
- if (constants > 0) {
- OS::SNPrintF(decode_buffer,
- "%08x constant",
- *reinterpret_cast<int32_t*>(pc));
- constants--;
- pc += 4;
- } else {
- int num_const = d.ConstantPoolSizeAt(pc);
- if (num_const >= 0) {
- OS::SNPrintF(decode_buffer,
- "%08x constant pool begin",
- *reinterpret_cast<int32_t*>(pc));
- constants = num_const;
- pc += 4;
- } else if (it != NULL && !it->done() && it->rinfo()->pc() == pc &&
- it->rinfo()->rmode() == RelocInfo::INTERNAL_REFERENCE) {
- // raw pointer embedded in code stream, e.g., jump table
- byte* ptr = *reinterpret_cast<byte**>(pc);
- OS::SNPrintF(decode_buffer,
- "%08" V8PRIxPTR " jump table entry %4" V8PRIdPTR,
- ptr,
- ptr - begin);
- pc += 4;
- } else {
- decode_buffer[0] = '\0';
- pc += d.InstructionDecode(decode_buffer, pc);
- }
- }
-
- // Collect RelocInfo for this instruction (prev_pc .. pc-1)
- List<const char*> comments(4);
- List<byte*> pcs(1);
- List<RelocInfo::Mode> rmodes(1);
- List<intptr_t> datas(1);
- if (it != NULL) {
- while (!it->done() && it->rinfo()->pc() < pc) {
- if (RelocInfo::IsComment(it->rinfo()->rmode())) {
- // For comments just collect the text.
- comments.Add(reinterpret_cast<const char*>(it->rinfo()->data()));
- } else {
- // For other reloc info collect all data.
- pcs.Add(it->rinfo()->pc());
- rmodes.Add(it->rinfo()->rmode());
- datas.Add(it->rinfo()->data());
- }
- it->next();
- }
- }
-
- StringBuilder out(out_buffer.start(), out_buffer.length());
-
- // Comments.
- for (int i = 0; i < comments.length(); i++) {
- out.AddFormatted(" %s\n", comments[i]);
- }
-
- // Write out comments, resets outp so that we can format the next line.
- DumpBuffer(f, out.Finalize());
- out.Reset();
-
- // Instruction address and instruction offset.
- out.AddFormatted("%p %4d ", prev_pc, prev_pc - begin);
-
- // Instruction.
- out.AddFormatted("%s", decode_buffer.start());
-
- // Print all the reloc info for this instruction which are not comments.
- for (int i = 0; i < pcs.length(); i++) {
- // Put together the reloc info
- RelocInfo relocinfo(pcs[i], rmodes[i], datas[i]);
-
- // Indent the printing of the reloc info.
- if (i == 0) {
- // The first reloc info is printed after the disassembled instruction.
- out.AddPadding(' ', kRelocInfoPosition - out.position());
- } else {
- // Additional reloc infos are printed on separate lines.
- out.AddFormatted("\n");
- out.AddPadding(' ', kRelocInfoPosition);
- }
-
- RelocInfo::Mode rmode = relocinfo.rmode();
- if (RelocInfo::IsPosition(rmode)) {
- if (RelocInfo::IsStatementPosition(rmode)) {
- out.AddFormatted(" ;; debug: statement %d", relocinfo.data());
- } else {
- out.AddFormatted(" ;; debug: position %d", relocinfo.data());
- }
- } else if (rmode == RelocInfo::EMBEDDED_OBJECT) {
- HeapStringAllocator allocator;
- StringStream accumulator(&allocator);
- relocinfo.target_object()->ShortPrint(&accumulator);
- SmartPointer<const char> obj_name = accumulator.ToCString();
- out.AddFormatted(" ;; object: %s", *obj_name);
- } else if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
- const char* reference_name =
- ref_encoder.NameOfAddress(*relocinfo.target_reference_address());
- out.AddFormatted(" ;; external reference (%s)", reference_name);
- } else if (RelocInfo::IsCodeTarget(rmode)) {
- out.AddFormatted(" ;; code:");
- if (rmode == RelocInfo::CONSTRUCT_CALL) {
- out.AddFormatted(" constructor,");
- }
- Code* code = Code::GetCodeFromTargetAddress(relocinfo.target_address());
- Code::Kind kind = code->kind();
- if (code->is_inline_cache_stub()) {
- if (rmode == RelocInfo::CODE_TARGET_CONTEXT) {
- out.AddFormatted(" contextual,");
- }
- InlineCacheState ic_state = code->ic_state();
- out.AddFormatted(" %s, %s", Code::Kind2String(kind),
- Code::ICState2String(ic_state));
- if (ic_state == MONOMORPHIC) {
- PropertyType type = code->type();
- out.AddFormatted(", %s", Code::PropertyType2String(type));
- }
- if (code->ic_in_loop() == IN_LOOP) {
- out.AddFormatted(", in_loop");
- }
- if (kind == Code::CALL_IC || kind == Code::KEYED_CALL_IC) {
- out.AddFormatted(", argc = %d", code->arguments_count());
- }
- } else if (kind == Code::STUB) {
- // Reverse lookup required as the minor key cannot be retrieved
- // from the code object.
- Object* obj = heap->code_stubs()->SlowReverseLookup(code);
- if (obj != heap->undefined_value()) {
- ASSERT(obj->IsSmi());
- // Get the STUB key and extract major and minor key.
- uint32_t key = Smi::cast(obj)->value();
- uint32_t minor_key = CodeStub::MinorKeyFromKey(key);
- CodeStub::Major major_key = CodeStub::GetMajorKey(code);
- ASSERT(major_key == CodeStub::MajorKeyFromKey(key));
- out.AddFormatted(" %s, %s, ",
- Code::Kind2String(kind),
- CodeStub::MajorName(major_key, false));
- switch (major_key) {
- case CodeStub::CallFunction: {
- int argc =
- CallFunctionStub::ExtractArgcFromMinorKey(minor_key);
- out.AddFormatted("argc = %d", argc);
- break;
- }
- default:
- out.AddFormatted("minor: %d", minor_key);
- }
- }
- } else {
- out.AddFormatted(" %s", Code::Kind2String(kind));
- }
- } else if (rmode == RelocInfo::RUNTIME_ENTRY) {
- // A runtime entry reloinfo might be a deoptimization bailout.
- Address addr = relocinfo.target_address();
- int id = Deoptimizer::GetDeoptimizationId(addr, Deoptimizer::EAGER);
- if (id == Deoptimizer::kNotDeoptimizationEntry) {
- out.AddFormatted(" ;; %s", RelocInfo::RelocModeName(rmode));
- } else {
- out.AddFormatted(" ;; deoptimization bailout %d", id);
- }
- } else {
- out.AddFormatted(" ;; %s", RelocInfo::RelocModeName(rmode));
- }
- }
- out.AddString("\n");
- DumpBuffer(f, out.Finalize());
- out.Reset();
- }
-
- delete it;
- return static_cast<int>(pc - begin);
-}
-
-
-int Disassembler::Decode(FILE* f, byte* begin, byte* end) {
- V8NameConverter defaultConverter(NULL);
- return DecodeIt(f, defaultConverter, begin, end);
-}
-
-
-// Called by Code::CodePrint.
-void Disassembler::Decode(FILE* f, Code* code) {
- int decode_size = (code->kind() == Code::OPTIMIZED_FUNCTION)
- ? static_cast<int>(code->safepoint_table_offset())
- : code->instruction_size();
- // If there might be a stack check table, stop before reaching it.
- if (code->kind() == Code::FUNCTION) {
- decode_size =
- Min(decode_size, static_cast<int>(code->stack_check_table_offset()));
- }
-
- byte* begin = code->instruction_start();
- byte* end = begin + decode_size;
- V8NameConverter v8NameConverter(code);
- DecodeIt(f, v8NameConverter, begin, end);
-}
-
-#else // ENABLE_DISASSEMBLER
-
-void Disassembler::Dump(FILE* f, byte* begin, byte* end) {}
-int Disassembler::Decode(FILE* f, byte* begin, byte* end) { return 0; }
-void Disassembler::Decode(FILE* f, Code* code) {}
-
-#endif // ENABLE_DISASSEMBLER
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/disassembler.h b/src/3rdparty/v8/src/disassembler.h
deleted file mode 100644
index 68a338d..0000000
--- a/src/3rdparty/v8/src/disassembler.h
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_DISASSEMBLER_H_
-#define V8_DISASSEMBLER_H_
-
-namespace v8 {
-namespace internal {
-
-class Disassembler : public AllStatic {
- public:
- // Print the bytes in the interval [begin, end) into f.
- static void Dump(FILE* f, byte* begin, byte* end);
-
- // Decode instructions in the the interval [begin, end) and print the
- // code into f. Returns the number of bytes disassembled or 1 if no
- // instruction could be decoded.
- static int Decode(FILE* f, byte* begin, byte* end);
-
- // Decode instructions in code.
- static void Decode(FILE* f, Code* code);
- private:
- // Decode instruction at pc and print disassembled instruction into f.
- // Returns the instruction length in bytes, or 1 if the instruction could
- // not be decoded. The number of characters written is written into
- // the out parameter char_count.
- static int Decode(FILE* f, byte* pc, int* char_count);
-};
-
-} } // namespace v8::internal
-
-#endif // V8_DISASSEMBLER_H_
diff --git a/src/3rdparty/v8/src/diy-fp.cc b/src/3rdparty/v8/src/diy-fp.cc
deleted file mode 100644
index c54bd1d..0000000
--- a/src/3rdparty/v8/src/diy-fp.cc
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "diy-fp.h"
-
-namespace v8 {
-namespace internal {
-
-void DiyFp::Multiply(const DiyFp& other) {
- // Simply "emulates" a 128 bit multiplication.
- // However: the resulting number only contains 64 bits. The least
- // significant 64 bits are only used for rounding the most significant 64
- // bits.
- const uint64_t kM32 = 0xFFFFFFFFu;
- uint64_t a = f_ >> 32;
- uint64_t b = f_ & kM32;
- uint64_t c = other.f_ >> 32;
- uint64_t d = other.f_ & kM32;
- uint64_t ac = a * c;
- uint64_t bc = b * c;
- uint64_t ad = a * d;
- uint64_t bd = b * d;
- uint64_t tmp = (bd >> 32) + (ad & kM32) + (bc & kM32);
- // By adding 1U << 31 to tmp we round the final result.
- // Halfway cases will be round up.
- tmp += 1U << 31;
- uint64_t result_f = ac + (ad >> 32) + (bc >> 32) + (tmp >> 32);
- e_ += other.e_ + 64;
- f_ = result_f;
-}
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/diy-fp.h b/src/3rdparty/v8/src/diy-fp.h
deleted file mode 100644
index cfe05ef..0000000
--- a/src/3rdparty/v8/src/diy-fp.h
+++ /dev/null
@@ -1,117 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_DIY_FP_H_
-#define V8_DIY_FP_H_
-
-namespace v8 {
-namespace internal {
-
-// This "Do It Yourself Floating Point" class implements a floating-point number
-// with a uint64 significand and an int exponent. Normalized DiyFp numbers will
-// have the most significant bit of the significand set.
-// Multiplication and Subtraction do not normalize their results.
-// DiyFp are not designed to contain special doubles (NaN and Infinity).
-class DiyFp {
- public:
- static const int kSignificandSize = 64;
-
- DiyFp() : f_(0), e_(0) {}
- DiyFp(uint64_t f, int e) : f_(f), e_(e) {}
-
- // this = this - other.
- // The exponents of both numbers must be the same and the significand of this
- // must be bigger than the significand of other.
- // The result will not be normalized.
- void Subtract(const DiyFp& other) {
- ASSERT(e_ == other.e_);
- ASSERT(f_ >= other.f_);
- f_ -= other.f_;
- }
-
- // Returns a - b.
- // The exponents of both numbers must be the same and this must be bigger
- // than other. The result will not be normalized.
- static DiyFp Minus(const DiyFp& a, const DiyFp& b) {
- DiyFp result = a;
- result.Subtract(b);
- return result;
- }
-
-
- // this = this * other.
- void Multiply(const DiyFp& other);
-
- // returns a * b;
- static DiyFp Times(const DiyFp& a, const DiyFp& b) {
- DiyFp result = a;
- result.Multiply(b);
- return result;
- }
-
- void Normalize() {
- ASSERT(f_ != 0);
- uint64_t f = f_;
- int e = e_;
-
- // This method is mainly called for normalizing boundaries. In general
- // boundaries need to be shifted by 10 bits. We thus optimize for this case.
- const uint64_t k10MSBits = V8_2PART_UINT64_C(0xFFC00000, 00000000);
- while ((f & k10MSBits) == 0) {
- f <<= 10;
- e -= 10;
- }
- while ((f & kUint64MSB) == 0) {
- f <<= 1;
- e--;
- }
- f_ = f;
- e_ = e;
- }
-
- static DiyFp Normalize(const DiyFp& a) {
- DiyFp result = a;
- result.Normalize();
- return result;
- }
-
- uint64_t f() const { return f_; }
- int e() const { return e_; }
-
- void set_f(uint64_t new_value) { f_ = new_value; }
- void set_e(int new_value) { e_ = new_value; }
-
- private:
- static const uint64_t kUint64MSB = V8_2PART_UINT64_C(0x80000000, 00000000);
-
- uint64_t f_;
- int e_;
-};
-
-} } // namespace v8::internal
-
-#endif // V8_DIY_FP_H_
diff --git a/src/3rdparty/v8/src/double.h b/src/3rdparty/v8/src/double.h
deleted file mode 100644
index 65eded9..0000000
--- a/src/3rdparty/v8/src/double.h
+++ /dev/null
@@ -1,238 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_DOUBLE_H_
-#define V8_DOUBLE_H_
-
-#include "diy-fp.h"
-
-namespace v8 {
-namespace internal {
-
-// We assume that doubles and uint64_t have the same endianness.
-static uint64_t double_to_uint64(double d) { return BitCast<uint64_t>(d); }
-static double uint64_to_double(uint64_t d64) { return BitCast<double>(d64); }
-
-// Helper functions for doubles.
-class Double {
- public:
- static const uint64_t kSignMask = V8_2PART_UINT64_C(0x80000000, 00000000);
- static const uint64_t kExponentMask = V8_2PART_UINT64_C(0x7FF00000, 00000000);
- static const uint64_t kSignificandMask =
- V8_2PART_UINT64_C(0x000FFFFF, FFFFFFFF);
- static const uint64_t kHiddenBit = V8_2PART_UINT64_C(0x00100000, 00000000);
- static const int kPhysicalSignificandSize = 52; // Excludes the hidden bit.
- static const int kSignificandSize = 53;
-
- Double() : d64_(0) {}
- explicit Double(double d) : d64_(double_to_uint64(d)) {}
- explicit Double(uint64_t d64) : d64_(d64) {}
- explicit Double(DiyFp diy_fp)
- : d64_(DiyFpToUint64(diy_fp)) {}
-
- // The value encoded by this Double must be greater or equal to +0.0.
- // It must not be special (infinity, or NaN).
- DiyFp AsDiyFp() const {
- ASSERT(Sign() > 0);
- ASSERT(!IsSpecial());
- return DiyFp(Significand(), Exponent());
- }
-
- // The value encoded by this Double must be strictly greater than 0.
- DiyFp AsNormalizedDiyFp() const {
- ASSERT(value() > 0.0);
- uint64_t f = Significand();
- int e = Exponent();
-
- // The current double could be a denormal.
- while ((f & kHiddenBit) == 0) {
- f <<= 1;
- e--;
- }
- // Do the final shifts in one go.
- f <<= DiyFp::kSignificandSize - kSignificandSize;
- e -= DiyFp::kSignificandSize - kSignificandSize;
- return DiyFp(f, e);
- }
-
- // Returns the double's bit as uint64.
- uint64_t AsUint64() const {
- return d64_;
- }
-
- // Returns the next greater double. Returns +infinity on input +infinity.
- double NextDouble() const {
- if (d64_ == kInfinity) return Double(kInfinity).value();
- if (Sign() < 0 && Significand() == 0) {
- // -0.0
- return 0.0;
- }
- if (Sign() < 0) {
- return Double(d64_ - 1).value();
- } else {
- return Double(d64_ + 1).value();
- }
- }
-
- int Exponent() const {
- if (IsDenormal()) return kDenormalExponent;
-
- uint64_t d64 = AsUint64();
- int biased_e =
- static_cast<int>((d64 & kExponentMask) >> kPhysicalSignificandSize);
- return biased_e - kExponentBias;
- }
-
- uint64_t Significand() const {
- uint64_t d64 = AsUint64();
- uint64_t significand = d64 & kSignificandMask;
- if (!IsDenormal()) {
- return significand + kHiddenBit;
- } else {
- return significand;
- }
- }
-
- // Returns true if the double is a denormal.
- bool IsDenormal() const {
- uint64_t d64 = AsUint64();
- return (d64 & kExponentMask) == 0;
- }
-
- // We consider denormals not to be special.
- // Hence only Infinity and NaN are special.
- bool IsSpecial() const {
- uint64_t d64 = AsUint64();
- return (d64 & kExponentMask) == kExponentMask;
- }
-
- bool IsNan() const {
- uint64_t d64 = AsUint64();
- return ((d64 & kExponentMask) == kExponentMask) &&
- ((d64 & kSignificandMask) != 0);
- }
-
- bool IsInfinite() const {
- uint64_t d64 = AsUint64();
- return ((d64 & kExponentMask) == kExponentMask) &&
- ((d64 & kSignificandMask) == 0);
- }
-
- int Sign() const {
- uint64_t d64 = AsUint64();
- return (d64 & kSignMask) == 0? 1: -1;
- }
-
- // Precondition: the value encoded by this Double must be greater or equal
- // than +0.0.
- DiyFp UpperBoundary() const {
- ASSERT(Sign() > 0);
- return DiyFp(Significand() * 2 + 1, Exponent() - 1);
- }
-
- // Returns the two boundaries of this.
- // The bigger boundary (m_plus) is normalized. The lower boundary has the same
- // exponent as m_plus.
- // Precondition: the value encoded by this Double must be greater than 0.
- void NormalizedBoundaries(DiyFp* out_m_minus, DiyFp* out_m_plus) const {
- ASSERT(value() > 0.0);
- DiyFp v = this->AsDiyFp();
- bool significand_is_zero = (v.f() == kHiddenBit);
- DiyFp m_plus = DiyFp::Normalize(DiyFp((v.f() << 1) + 1, v.e() - 1));
- DiyFp m_minus;
- if (significand_is_zero && v.e() != kDenormalExponent) {
- // The boundary is closer. Think of v = 1000e10 and v- = 9999e9.
- // Then the boundary (== (v - v-)/2) is not just at a distance of 1e9 but
- // at a distance of 1e8.
- // The only exception is for the smallest normal: the largest denormal is
- // at the same distance as its successor.
- // Note: denormals have the same exponent as the smallest normals.
- m_minus = DiyFp((v.f() << 2) - 1, v.e() - 2);
- } else {
- m_minus = DiyFp((v.f() << 1) - 1, v.e() - 1);
- }
- m_minus.set_f(m_minus.f() << (m_minus.e() - m_plus.e()));
- m_minus.set_e(m_plus.e());
- *out_m_plus = m_plus;
- *out_m_minus = m_minus;
- }
-
- double value() const { return uint64_to_double(d64_); }
-
- // Returns the significand size for a given order of magnitude.
- // If v = f*2^e with 2^p-1 <= f <= 2^p then p+e is v's order of magnitude.
- // This function returns the number of significant binary digits v will have
- // once its encoded into a double. In almost all cases this is equal to
- // kSignificandSize. The only exception are denormals. They start with leading
- // zeroes and their effective significand-size is hence smaller.
- static int SignificandSizeForOrderOfMagnitude(int order) {
- if (order >= (kDenormalExponent + kSignificandSize)) {
- return kSignificandSize;
- }
- if (order <= kDenormalExponent) return 0;
- return order - kDenormalExponent;
- }
-
- private:
- static const int kExponentBias = 0x3FF + kPhysicalSignificandSize;
- static const int kDenormalExponent = -kExponentBias + 1;
- static const int kMaxExponent = 0x7FF - kExponentBias;
- static const uint64_t kInfinity = V8_2PART_UINT64_C(0x7FF00000, 00000000);
-
- const uint64_t d64_;
-
- static uint64_t DiyFpToUint64(DiyFp diy_fp) {
- uint64_t significand = diy_fp.f();
- int exponent = diy_fp.e();
- while (significand > kHiddenBit + kSignificandMask) {
- significand >>= 1;
- exponent++;
- }
- if (exponent >= kMaxExponent) {
- return kInfinity;
- }
- if (exponent < kDenormalExponent) {
- return 0;
- }
- while (exponent > kDenormalExponent && (significand & kHiddenBit) == 0) {
- significand <<= 1;
- exponent--;
- }
- uint64_t biased_exponent;
- if (exponent == kDenormalExponent && (significand & kHiddenBit) == 0) {
- biased_exponent = 0;
- } else {
- biased_exponent = static_cast<uint64_t>(exponent + kExponentBias);
- }
- return (significand & kSignificandMask) |
- (biased_exponent << kPhysicalSignificandSize);
- }
-};
-
-} } // namespace v8::internal
-
-#endif // V8_DOUBLE_H_
diff --git a/src/3rdparty/v8/src/dtoa.cc b/src/3rdparty/v8/src/dtoa.cc
deleted file mode 100644
index b857a5d..0000000
--- a/src/3rdparty/v8/src/dtoa.cc
+++ /dev/null
@@ -1,103 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include <math.h>
-
-#include "v8.h"
-#include "dtoa.h"
-
-#include "bignum-dtoa.h"
-#include "double.h"
-#include "fast-dtoa.h"
-#include "fixed-dtoa.h"
-
-namespace v8 {
-namespace internal {
-
-static BignumDtoaMode DtoaToBignumDtoaMode(DtoaMode dtoa_mode) {
- switch (dtoa_mode) {
- case DTOA_SHORTEST: return BIGNUM_DTOA_SHORTEST;
- case DTOA_FIXED: return BIGNUM_DTOA_FIXED;
- case DTOA_PRECISION: return BIGNUM_DTOA_PRECISION;
- default:
- UNREACHABLE();
- return BIGNUM_DTOA_SHORTEST; // To silence compiler.
- }
-}
-
-
-void DoubleToAscii(double v, DtoaMode mode, int requested_digits,
- Vector<char> buffer, int* sign, int* length, int* point) {
- ASSERT(!Double(v).IsSpecial());
- ASSERT(mode == DTOA_SHORTEST || requested_digits >= 0);
-
- if (Double(v).Sign() < 0) {
- *sign = 1;
- v = -v;
- } else {
- *sign = 0;
- }
-
- if (v == 0) {
- buffer[0] = '0';
- buffer[1] = '\0';
- *length = 1;
- *point = 1;
- return;
- }
-
- if (mode == DTOA_PRECISION && requested_digits == 0) {
- buffer[0] = '\0';
- *length = 0;
- return;
- }
-
- bool fast_worked;
- switch (mode) {
- case DTOA_SHORTEST:
- fast_worked = FastDtoa(v, FAST_DTOA_SHORTEST, 0, buffer, length, point);
- break;
- case DTOA_FIXED:
- fast_worked = FastFixedDtoa(v, requested_digits, buffer, length, point);
- break;
- case DTOA_PRECISION:
- fast_worked = FastDtoa(v, FAST_DTOA_PRECISION, requested_digits,
- buffer, length, point);
- break;
- default:
- UNREACHABLE();
- fast_worked = false;
- }
- if (fast_worked) return;
-
- // If the fast dtoa didn't succeed use the slower bignum version.
- BignumDtoaMode bignum_mode = DtoaToBignumDtoaMode(mode);
- BignumDtoa(v, bignum_mode, requested_digits, buffer, length, point);
- buffer[*length] = '\0';
-}
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/dtoa.h b/src/3rdparty/v8/src/dtoa.h
deleted file mode 100644
index b3e79af..0000000
--- a/src/3rdparty/v8/src/dtoa.h
+++ /dev/null
@@ -1,85 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_DTOA_H_
-#define V8_DTOA_H_
-
-namespace v8 {
-namespace internal {
-
-enum DtoaMode {
- // Return the shortest correct representation.
- // For example the output of 0.299999999999999988897 is (the less accurate but
- // correct) 0.3.
- DTOA_SHORTEST,
- // Return a fixed number of digits after the decimal point.
- // For instance fixed(0.1, 4) becomes 0.1000
- // If the input number is big, the output will be big.
- DTOA_FIXED,
- // Return a fixed number of digits, no matter what the exponent is.
- DTOA_PRECISION
-};
-
-// The maximal length of digits a double can have in base 10.
-// Note that DoubleToAscii null-terminates its input. So the given buffer should
-// be at least kBase10MaximalLength + 1 characters long.
-static const int kBase10MaximalLength = 17;
-
-// Converts the given double 'v' to ascii.
-// The result should be interpreted as buffer * 10^(point-length).
-//
-// The output depends on the given mode:
-// - SHORTEST: produce the least amount of digits for which the internal
-// identity requirement is still satisfied. If the digits are printed
-// (together with the correct exponent) then reading this number will give
-// 'v' again. The buffer will choose the representation that is closest to
-// 'v'. If there are two at the same distance, than the one farther away
-// from 0 is chosen (halfway cases - ending with 5 - are rounded up).
-// In this mode the 'requested_digits' parameter is ignored.
-// - FIXED: produces digits necessary to print a given number with
-// 'requested_digits' digits after the decimal point. The produced digits
-// might be too short in which case the caller has to fill the gaps with '0's.
-// Example: toFixed(0.001, 5) is allowed to return buffer="1", point=-2.
-// Halfway cases are rounded towards +/-Infinity (away from 0). The call
-// toFixed(0.15, 2) thus returns buffer="2", point=0.
-// The returned buffer may contain digits that would be truncated from the
-// shortest representation of the input.
-// - PRECISION: produces 'requested_digits' where the first digit is not '0'.
-// Even though the length of produced digits usually equals
-// 'requested_digits', the function is allowed to return fewer digits, in
-// which case the caller has to fill the missing digits with '0's.
-// Halfway cases are again rounded away from 0.
-// 'DoubleToAscii' expects the given buffer to be big enough to hold all digits
-// and a terminating null-character. In SHORTEST-mode it expects a buffer of
-// at least kBase10MaximalLength + 1. Otherwise, the size of the output is
-// limited to requested_digits digits plus the null terminator.
-void DoubleToAscii(double v, DtoaMode mode, int requested_digits,
- Vector<char> buffer, int* sign, int* length, int* point);
-
-} } // namespace v8::internal
-
-#endif // V8_DTOA_H_
diff --git a/src/3rdparty/v8/src/execution.cc b/src/3rdparty/v8/src/execution.cc
deleted file mode 100644
index ea53a2a..0000000
--- a/src/3rdparty/v8/src/execution.cc
+++ /dev/null
@@ -1,835 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include <stdlib.h>
-
-#include "v8.h"
-
-#include "api.h"
-#include "bootstrapper.h"
-#include "codegen-inl.h"
-#include "debug.h"
-#include "runtime-profiler.h"
-#include "simulator.h"
-#include "v8threads.h"
-#include "vm-state-inl.h"
-
-namespace v8 {
-namespace internal {
-
-
-StackGuard::StackGuard()
- : isolate_(NULL) {
-}
-
-
-void StackGuard::set_interrupt_limits(const ExecutionAccess& lock) {
- ASSERT(isolate_ != NULL);
- // Ignore attempts to interrupt when interrupts are postponed.
- if (should_postpone_interrupts(lock)) return;
- thread_local_.jslimit_ = kInterruptLimit;
- thread_local_.climit_ = kInterruptLimit;
- isolate_->heap()->SetStackLimits();
-}
-
-
-void StackGuard::reset_limits(const ExecutionAccess& lock) {
- ASSERT(isolate_ != NULL);
- thread_local_.jslimit_ = thread_local_.real_jslimit_;
- thread_local_.climit_ = thread_local_.real_climit_;
- isolate_->heap()->SetStackLimits();
-}
-
-
-static Handle<Object> Invoke(bool construct,
- Handle<JSFunction> func,
- Handle<Object> receiver,
- int argc,
- Object*** args,
- bool* has_pending_exception) {
- Isolate* isolate = func->GetIsolate();
-
- // Entering JavaScript.
- VMState state(isolate, JS);
-
- // Placeholder for return value.
- MaybeObject* value = reinterpret_cast<Object*>(kZapValue);
-
- typedef Object* (*JSEntryFunction)(
- byte* entry,
- Object* function,
- Object* receiver,
- int argc,
- Object*** args);
-
- Handle<Code> code;
- if (construct) {
- JSConstructEntryStub stub;
- code = stub.GetCode();
- } else {
- JSEntryStub stub;
- code = stub.GetCode();
- }
-
- // Convert calls on global objects to be calls on the global
- // receiver instead to avoid having a 'this' pointer which refers
- // directly to a global object.
- if (receiver->IsGlobalObject()) {
- Handle<GlobalObject> global = Handle<GlobalObject>::cast(receiver);
- receiver = Handle<JSObject>(global->global_receiver());
- }
-
- // Make sure that the global object of the context we're about to
- // make the current one is indeed a global object.
- ASSERT(func->context()->global()->IsGlobalObject());
-
- {
- // Save and restore context around invocation and block the
- // allocation of handles without explicit handle scopes.
- SaveContext save(isolate);
- NoHandleAllocation na;
- JSEntryFunction entry = FUNCTION_CAST<JSEntryFunction>(code->entry());
-
- // Call the function through the right JS entry stub.
- byte* entry_address = func->code()->entry();
- JSFunction* function = *func;
- Object* receiver_pointer = *receiver;
- value = CALL_GENERATED_CODE(entry, entry_address, function,
- receiver_pointer, argc, args);
- }
-
-#ifdef DEBUG
- value->Verify();
-#endif
-
- // Update the pending exception flag and return the value.
- *has_pending_exception = value->IsException();
- ASSERT(*has_pending_exception == Isolate::Current()->has_pending_exception());
- if (*has_pending_exception) {
- isolate->ReportPendingMessages();
- if (isolate->pending_exception() == Failure::OutOfMemoryException()) {
- if (!isolate->handle_scope_implementer()->ignore_out_of_memory()) {
- V8::FatalProcessOutOfMemory("JS", true);
- }
- }
- return Handle<Object>();
- } else {
- isolate->clear_pending_message();
- }
-
- return Handle<Object>(value->ToObjectUnchecked(), isolate);
-}
-
-
-Handle<Object> Execution::Call(Handle<JSFunction> func,
- Handle<Object> receiver,
- int argc,
- Object*** args,
- bool* pending_exception) {
- return Invoke(false, func, receiver, argc, args, pending_exception);
-}
-
-
-Handle<Object> Execution::New(Handle<JSFunction> func, int argc,
- Object*** args, bool* pending_exception) {
- return Invoke(true, func, Isolate::Current()->global(), argc, args,
- pending_exception);
-}
-
-
-Handle<Object> Execution::TryCall(Handle<JSFunction> func,
- Handle<Object> receiver,
- int argc,
- Object*** args,
- bool* caught_exception) {
- // Enter a try-block while executing the JavaScript code. To avoid
- // duplicate error printing it must be non-verbose. Also, to avoid
- // creating message objects during stack overflow we shouldn't
- // capture messages.
- v8::TryCatch catcher;
- catcher.SetVerbose(false);
- catcher.SetCaptureMessage(false);
-
- Handle<Object> result = Invoke(false, func, receiver, argc, args,
- caught_exception);
-
- if (*caught_exception) {
- ASSERT(catcher.HasCaught());
- Isolate* isolate = Isolate::Current();
- ASSERT(isolate->has_pending_exception());
- ASSERT(isolate->external_caught_exception());
- if (isolate->pending_exception() ==
- isolate->heap()->termination_exception()) {
- result = isolate->factory()->termination_exception();
- } else {
- result = v8::Utils::OpenHandle(*catcher.Exception());
- }
- isolate->OptionalRescheduleException(true);
- }
-
- ASSERT(!Isolate::Current()->has_pending_exception());
- ASSERT(!Isolate::Current()->external_caught_exception());
- return result;
-}
-
-
-Handle<Object> Execution::GetFunctionDelegate(Handle<Object> object) {
- ASSERT(!object->IsJSFunction());
- Isolate* isolate = Isolate::Current();
- Factory* factory = isolate->factory();
-
- // If you return a function from here, it will be called when an
- // attempt is made to call the given object as a function.
-
- // Regular expressions can be called as functions in both Firefox
- // and Safari so we allow it too.
- if (object->IsJSRegExp()) {
- Handle<String> exec = factory->exec_symbol();
- // TODO(lrn): Bug 617. We should use the default function here, not the
- // one on the RegExp object.
- Object* exec_function;
- { MaybeObject* maybe_exec_function = object->GetProperty(*exec);
- // This can lose an exception, but the alternative is to put a failure
- // object in a handle, which is not GC safe.
- if (!maybe_exec_function->ToObject(&exec_function)) {
- return factory->undefined_value();
- }
- }
- return Handle<Object>(exec_function);
- }
-
- // Objects created through the API can have an instance-call handler
- // that should be used when calling the object as a function.
- if (object->IsHeapObject() &&
- HeapObject::cast(*object)->map()->has_instance_call_handler()) {
- return Handle<JSFunction>(
- isolate->global_context()->call_as_function_delegate());
- }
-
- return factory->undefined_value();
-}
-
-
-Handle<Object> Execution::GetConstructorDelegate(Handle<Object> object) {
- ASSERT(!object->IsJSFunction());
- Isolate* isolate = Isolate::Current();
-
- // If you return a function from here, it will be called when an
- // attempt is made to call the given object as a constructor.
-
- // Objects created through the API can have an instance-call handler
- // that should be used when calling the object as a function.
- if (object->IsHeapObject() &&
- HeapObject::cast(*object)->map()->has_instance_call_handler()) {
- return Handle<JSFunction>(
- isolate->global_context()->call_as_constructor_delegate());
- }
-
- return isolate->factory()->undefined_value();
-}
-
-
-bool StackGuard::IsStackOverflow() {
- ExecutionAccess access(isolate_);
- return (thread_local_.jslimit_ != kInterruptLimit &&
- thread_local_.climit_ != kInterruptLimit);
-}
-
-
-void StackGuard::EnableInterrupts() {
- ExecutionAccess access(isolate_);
- if (has_pending_interrupts(access)) {
- set_interrupt_limits(access);
- }
-}
-
-
-void StackGuard::SetStackLimit(uintptr_t limit) {
- ExecutionAccess access(isolate_);
- // If the current limits are special (eg due to a pending interrupt) then
- // leave them alone.
- uintptr_t jslimit = SimulatorStack::JsLimitFromCLimit(limit);
- if (thread_local_.jslimit_ == thread_local_.real_jslimit_) {
- thread_local_.jslimit_ = jslimit;
- }
- if (thread_local_.climit_ == thread_local_.real_climit_) {
- thread_local_.climit_ = limit;
- }
- thread_local_.real_climit_ = limit;
- thread_local_.real_jslimit_ = jslimit;
-}
-
-
-void StackGuard::DisableInterrupts() {
- ExecutionAccess access(isolate_);
- reset_limits(access);
-}
-
-
-bool StackGuard::IsInterrupted() {
- ExecutionAccess access(isolate_);
- return thread_local_.interrupt_flags_ & INTERRUPT;
-}
-
-
-void StackGuard::Interrupt() {
- ExecutionAccess access(isolate_);
- thread_local_.interrupt_flags_ |= INTERRUPT;
- set_interrupt_limits(access);
-}
-
-
-bool StackGuard::IsPreempted() {
- ExecutionAccess access(isolate_);
- return thread_local_.interrupt_flags_ & PREEMPT;
-}
-
-
-void StackGuard::Preempt() {
- ExecutionAccess access(isolate_);
- thread_local_.interrupt_flags_ |= PREEMPT;
- set_interrupt_limits(access);
-}
-
-
-bool StackGuard::IsTerminateExecution() {
- ExecutionAccess access(isolate_);
- return thread_local_.interrupt_flags_ & TERMINATE;
-}
-
-#ifdef QT_BUILD_SCRIPT_LIB
-bool StackGuard::IsUserCallback()
-{
- ExecutionAccess access(isolate_);
- return thread_local_.interrupt_flags_ & USERCALLBACK;
-}
-
-void StackGuard::RunUserCallbackNow()
-{
- UserCallback cb;
- void *data;
- {
- ExecutionAccess access(isolate_);
- cb = thread_local_.user_callback_;
- data = thread_local_.user_data_;
- }
- if (cb)
- cb(data);
-}
-#endif
-
-void StackGuard::TerminateExecution() {
- ExecutionAccess access(isolate_);
- thread_local_.interrupt_flags_ |= TERMINATE;
- set_interrupt_limits(access);
-}
-
-#ifdef QT_BUILD_SCRIPT_LIB
-void StackGuard::ExecuteUserCallback(UserCallback callback, void *data)
-{
- ExecutionAccess access(isolate_);
- thread_local_.user_callback_ = callback;
- thread_local_.user_data_ = data;
- thread_local_.interrupt_flags_ |= USERCALLBACK;
- set_interrupt_limits(access);
-}
-#endif
-
-
-
-bool StackGuard::IsRuntimeProfilerTick() {
- ExecutionAccess access(isolate_);
- return thread_local_.interrupt_flags_ & RUNTIME_PROFILER_TICK;
-}
-
-
-void StackGuard::RequestRuntimeProfilerTick() {
- // Ignore calls if we're not optimizing or if we can't get the lock.
- if (FLAG_opt && ExecutionAccess::TryLock(isolate_)) {
- thread_local_.interrupt_flags_ |= RUNTIME_PROFILER_TICK;
- if (thread_local_.postpone_interrupts_nesting_ == 0) {
- thread_local_.jslimit_ = thread_local_.climit_ = kInterruptLimit;
- isolate_->heap()->SetStackLimits();
- }
- ExecutionAccess::Unlock(isolate_);
- }
-}
-
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-bool StackGuard::IsDebugBreak() {
- ExecutionAccess access(isolate_);
- return thread_local_.interrupt_flags_ & DEBUGBREAK;
-}
-
-
-void StackGuard::DebugBreak() {
- ExecutionAccess access(isolate_);
- thread_local_.interrupt_flags_ |= DEBUGBREAK;
- set_interrupt_limits(access);
-}
-
-
-bool StackGuard::IsDebugCommand() {
- ExecutionAccess access(isolate_);
- return thread_local_.interrupt_flags_ & DEBUGCOMMAND;
-}
-
-
-void StackGuard::DebugCommand() {
- if (FLAG_debugger_auto_break) {
- ExecutionAccess access(isolate_);
- thread_local_.interrupt_flags_ |= DEBUGCOMMAND;
- set_interrupt_limits(access);
- }
-}
-#endif
-
-void StackGuard::Continue(InterruptFlag after_what) {
- ExecutionAccess access(isolate_);
- thread_local_.interrupt_flags_ &= ~static_cast<int>(after_what);
- if (!should_postpone_interrupts(access) && !has_pending_interrupts(access)) {
- reset_limits(access);
- }
-}
-
-
-char* StackGuard::ArchiveStackGuard(char* to) {
- ExecutionAccess access(isolate_);
- memcpy(to, reinterpret_cast<char*>(&thread_local_), sizeof(ThreadLocal));
- ThreadLocal blank;
-
- // Set the stack limits using the old thread_local_.
- // TODO(isolates): This was the old semantics of constructing a ThreadLocal
- // (as the ctor called SetStackLimits, which looked at the
- // current thread_local_ from StackGuard)-- but is this
- // really what was intended?
- isolate_->heap()->SetStackLimits();
- thread_local_ = blank;
-
- return to + sizeof(ThreadLocal);
-}
-
-
-char* StackGuard::RestoreStackGuard(char* from) {
- ExecutionAccess access(isolate_);
- memcpy(reinterpret_cast<char*>(&thread_local_), from, sizeof(ThreadLocal));
- isolate_->heap()->SetStackLimits();
- return from + sizeof(ThreadLocal);
-}
-
-
-void StackGuard::FreeThreadResources() {
- Isolate::CurrentPerIsolateThreadData()->set_stack_limit(
- thread_local_.real_climit_);
-}
-
-
-void StackGuard::ThreadLocal::Clear() {
- real_jslimit_ = kIllegalLimit;
- jslimit_ = kIllegalLimit;
- real_climit_ = kIllegalLimit;
- climit_ = kIllegalLimit;
- nesting_ = 0;
- postpone_interrupts_nesting_ = 0;
- interrupt_flags_ = 0;
-#ifdef QT_BUILD_SCRIPT_LIB
- user_callback_ = 0;
- user_data_ = 0;
-#endif
-}
-
-
-bool StackGuard::ThreadLocal::Initialize() {
- bool should_set_stack_limits = false;
- if (real_climit_ == kIllegalLimit) {
- // Takes the address of the limit variable in order to find out where
- // the top of stack is right now.
- const uintptr_t kLimitSize = FLAG_stack_size * KB;
- uintptr_t limit = reinterpret_cast<uintptr_t>(&limit) - kLimitSize;
- ASSERT(reinterpret_cast<uintptr_t>(&limit) > kLimitSize);
- real_jslimit_ = SimulatorStack::JsLimitFromCLimit(limit);
- jslimit_ = SimulatorStack::JsLimitFromCLimit(limit);
- real_climit_ = limit;
- climit_ = limit;
- should_set_stack_limits = true;
- }
- nesting_ = 0;
- postpone_interrupts_nesting_ = 0;
- interrupt_flags_ = 0;
- return should_set_stack_limits;
-}
-
-
-void StackGuard::ClearThread(const ExecutionAccess& lock) {
- thread_local_.Clear();
- isolate_->heap()->SetStackLimits();
-}
-
-
-void StackGuard::InitThread(const ExecutionAccess& lock) {
- if (thread_local_.Initialize()) isolate_->heap()->SetStackLimits();
- uintptr_t stored_limit =
- Isolate::CurrentPerIsolateThreadData()->stack_limit();
- // You should hold the ExecutionAccess lock when you call this.
- if (stored_limit != 0) {
- StackGuard::SetStackLimit(stored_limit);
- }
-}
-
-
-// --- C a l l s t o n a t i v e s ---
-
-#define RETURN_NATIVE_CALL(name, argc, argv, has_pending_exception) \
- do { \
- Isolate* isolate = Isolate::Current(); \
- Object** args[argc] = argv; \
- ASSERT(has_pending_exception != NULL); \
- return Call(isolate->name##_fun(), \
- isolate->js_builtins_object(), argc, args, \
- has_pending_exception); \
- } while (false)
-
-
-Handle<Object> Execution::ToBoolean(Handle<Object> obj) {
- // See the similar code in runtime.js:ToBoolean.
- if (obj->IsBoolean()) return obj;
- bool result = true;
- if (obj->IsString()) {
- result = Handle<String>::cast(obj)->length() != 0;
- } else if (obj->IsNull() || obj->IsUndefined()) {
- result = false;
- } else if (obj->IsNumber()) {
- double value = obj->Number();
- result = !((value == 0) || isnan(value));
- }
- return Handle<Object>(HEAP->ToBoolean(result));
-}
-
-
-Handle<Object> Execution::ToNumber(Handle<Object> obj, bool* exc) {
- RETURN_NATIVE_CALL(to_number, 1, { obj.location() }, exc);
-}
-
-
-Handle<Object> Execution::ToString(Handle<Object> obj, bool* exc) {
- RETURN_NATIVE_CALL(to_string, 1, { obj.location() }, exc);
-}
-
-
-Handle<Object> Execution::ToDetailString(Handle<Object> obj, bool* exc) {
- RETURN_NATIVE_CALL(to_detail_string, 1, { obj.location() }, exc);
-}
-
-
-Handle<Object> Execution::ToObject(Handle<Object> obj, bool* exc) {
- if (obj->IsJSObject()) return obj;
- RETURN_NATIVE_CALL(to_object, 1, { obj.location() }, exc);
-}
-
-
-Handle<Object> Execution::ToInteger(Handle<Object> obj, bool* exc) {
- RETURN_NATIVE_CALL(to_integer, 1, { obj.location() }, exc);
-}
-
-
-Handle<Object> Execution::ToUint32(Handle<Object> obj, bool* exc) {
- RETURN_NATIVE_CALL(to_uint32, 1, { obj.location() }, exc);
-}
-
-
-Handle<Object> Execution::ToInt32(Handle<Object> obj, bool* exc) {
- RETURN_NATIVE_CALL(to_int32, 1, { obj.location() }, exc);
-}
-
-
-Handle<Object> Execution::NewDate(double time, bool* exc) {
- Handle<Object> time_obj = FACTORY->NewNumber(time);
- RETURN_NATIVE_CALL(create_date, 1, { time_obj.location() }, exc);
-}
-
-
-#undef RETURN_NATIVE_CALL
-
-
-Handle<JSRegExp> Execution::NewJSRegExp(Handle<String> pattern,
- Handle<String> flags,
- bool* exc) {
- Handle<JSFunction> function = Handle<JSFunction>(
- pattern->GetIsolate()->global_context()->regexp_function());
- Handle<Object> re_obj = RegExpImpl::CreateRegExpLiteral(
- function, pattern, flags, exc);
- if (*exc) return Handle<JSRegExp>();
- return Handle<JSRegExp>::cast(re_obj);
-}
-
-
-Handle<Object> Execution::CharAt(Handle<String> string, uint32_t index) {
- Isolate* isolate = string->GetIsolate();
- Factory* factory = isolate->factory();
-
- int int_index = static_cast<int>(index);
- if (int_index < 0 || int_index >= string->length()) {
- return factory->undefined_value();
- }
-
- Handle<Object> char_at =
- GetProperty(isolate->js_builtins_object(),
- factory->char_at_symbol());
- if (!char_at->IsJSFunction()) {
- return factory->undefined_value();
- }
-
- bool caught_exception;
- Handle<Object> index_object = factory->NewNumberFromInt(int_index);
- Object** index_arg[] = { index_object.location() };
- Handle<Object> result = TryCall(Handle<JSFunction>::cast(char_at),
- string,
- ARRAY_SIZE(index_arg),
- index_arg,
- &caught_exception);
- if (caught_exception) {
- return factory->undefined_value();
- }
- return result;
-}
-
-
-Handle<JSFunction> Execution::InstantiateFunction(
- Handle<FunctionTemplateInfo> data, bool* exc) {
- Isolate* isolate = data->GetIsolate();
- // Fast case: see if the function has already been instantiated
- int serial_number = Smi::cast(data->serial_number())->value();
- Object* elm =
- isolate->global_context()->function_cache()->
- GetElementNoExceptionThrown(serial_number);
- if (elm->IsJSFunction()) return Handle<JSFunction>(JSFunction::cast(elm));
- // The function has not yet been instantiated in this context; do it.
- Object** args[1] = { Handle<Object>::cast(data).location() };
- Handle<Object> result =
- Call(isolate->instantiate_fun(),
- isolate->js_builtins_object(), 1, args, exc);
- if (*exc) return Handle<JSFunction>::null();
- return Handle<JSFunction>::cast(result);
-}
-
-
-Handle<JSObject> Execution::InstantiateObject(Handle<ObjectTemplateInfo> data,
- bool* exc) {
- Isolate* isolate = data->GetIsolate();
- if (data->property_list()->IsUndefined() &&
- !data->constructor()->IsUndefined()) {
- // Initialization to make gcc happy.
- Object* result = NULL;
- {
- HandleScope scope(isolate);
- Handle<FunctionTemplateInfo> cons_template =
- Handle<FunctionTemplateInfo>(
- FunctionTemplateInfo::cast(data->constructor()));
- Handle<JSFunction> cons = InstantiateFunction(cons_template, exc);
- if (*exc) return Handle<JSObject>::null();
- Handle<Object> value = New(cons, 0, NULL, exc);
- if (*exc) return Handle<JSObject>::null();
- result = *value;
- }
- ASSERT(!*exc);
- return Handle<JSObject>(JSObject::cast(result));
- } else {
- Object** args[1] = { Handle<Object>::cast(data).location() };
- Handle<Object> result =
- Call(isolate->instantiate_fun(),
- isolate->js_builtins_object(), 1, args, exc);
- if (*exc) return Handle<JSObject>::null();
- return Handle<JSObject>::cast(result);
- }
-}
-
-
-void Execution::ConfigureInstance(Handle<Object> instance,
- Handle<Object> instance_template,
- bool* exc) {
- Isolate* isolate = Isolate::Current();
- Object** args[2] = { instance.location(), instance_template.location() };
- Execution::Call(isolate->configure_instance_fun(),
- isolate->js_builtins_object(), 2, args, exc);
-}
-
-
-Handle<String> Execution::GetStackTraceLine(Handle<Object> recv,
- Handle<JSFunction> fun,
- Handle<Object> pos,
- Handle<Object> is_global) {
- Isolate* isolate = fun->GetIsolate();
- const int argc = 4;
- Object** args[argc] = { recv.location(),
- Handle<Object>::cast(fun).location(),
- pos.location(),
- is_global.location() };
- bool caught_exception = false;
- Handle<Object> result =
- TryCall(isolate->get_stack_trace_line_fun(),
- isolate->js_builtins_object(), argc, args,
- &caught_exception);
- if (caught_exception || !result->IsString()) {
- return isolate->factory()->empty_symbol();
- }
-
- return Handle<String>::cast(result);
-}
-
-
-static Object* RuntimePreempt() {
- Isolate* isolate = Isolate::Current();
-
- // Clear the preempt request flag.
- isolate->stack_guard()->Continue(PREEMPT);
-
- ContextSwitcher::PreemptionReceived();
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- if (isolate->debug()->InDebugger()) {
- // If currently in the debugger don't do any actual preemption but record
- // that preemption occoured while in the debugger.
- isolate->debug()->PreemptionWhileInDebugger();
- } else {
- // Perform preemption.
- v8::Unlocker unlocker;
- Thread::YieldCPU();
- }
-#else
- { // NOLINT
- // Perform preemption.
- v8::Unlocker unlocker;
- Thread::YieldCPU();
- }
-#endif
-
- return isolate->heap()->undefined_value();
-}
-
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-Object* Execution::DebugBreakHelper() {
- Isolate* isolate = Isolate::Current();
-
- // Just continue if breaks are disabled.
- if (isolate->debug()->disable_break()) {
- return isolate->heap()->undefined_value();
- }
-
- // Ignore debug break during bootstrapping.
- if (isolate->bootstrapper()->IsActive()) {
- return isolate->heap()->undefined_value();
- }
-
- {
- JavaScriptFrameIterator it(isolate);
- ASSERT(!it.done());
- Object* fun = it.frame()->function();
- if (fun && fun->IsJSFunction()) {
- // Don't stop in builtin functions.
- if (JSFunction::cast(fun)->IsBuiltin()) {
- return isolate->heap()->undefined_value();
- }
- GlobalObject* global = JSFunction::cast(fun)->context()->global();
- // Don't stop in debugger functions.
- if (isolate->debug()->IsDebugGlobal(global)) {
- return isolate->heap()->undefined_value();
- }
- }
- }
-
- // Collect the break state before clearing the flags.
- bool debug_command_only =
- isolate->stack_guard()->IsDebugCommand() &&
- !isolate->stack_guard()->IsDebugBreak();
-
- // Clear the debug break request flag.
- isolate->stack_guard()->Continue(DEBUGBREAK);
-
- ProcessDebugMesssages(debug_command_only);
-
- // Return to continue execution.
- return isolate->heap()->undefined_value();
-}
-
-void Execution::ProcessDebugMesssages(bool debug_command_only) {
- Isolate* isolate = Isolate::Current();
- // Clear the debug command request flag.
- isolate->stack_guard()->Continue(DEBUGCOMMAND);
-
- HandleScope scope(isolate);
- // Enter the debugger. Just continue if we fail to enter the debugger.
- EnterDebugger debugger;
- if (debugger.FailedToEnter()) {
- return;
- }
-
- // Notify the debug event listeners. Indicate auto continue if the break was
- // a debug command break.
- isolate->debugger()->OnDebugBreak(isolate->factory()->undefined_value(),
- debug_command_only);
-}
-
-
-#endif
-
-MaybeObject* Execution::HandleStackGuardInterrupt() {
- Isolate* isolate = Isolate::Current();
- StackGuard* stack_guard = isolate->stack_guard();
- isolate->counters()->stack_interrupts()->Increment();
- if (stack_guard->IsRuntimeProfilerTick()) {
- isolate->counters()->runtime_profiler_ticks()->Increment();
- stack_guard->Continue(RUNTIME_PROFILER_TICK);
- isolate->runtime_profiler()->OptimizeNow();
- }
-#ifdef ENABLE_DEBUGGER_SUPPORT
- if (stack_guard->IsDebugBreak() || stack_guard->IsDebugCommand()) {
- DebugBreakHelper();
- }
-#endif
-#ifdef QT_BUILD_SCRIPT_LIB
- if (stack_guard->IsUserCallback()) {
- stack_guard->Continue(USERCALLBACK);
- stack_guard->RunUserCallbackNow();
- if (isolate->has_scheduled_exception() && !stack_guard->IsTerminateExecution())
- return isolate->PromoteScheduledException();
- }
-#endif
- if (stack_guard->IsPreempted()) RuntimePreempt();
- if (stack_guard->IsTerminateExecution()) {
- stack_guard->Continue(TERMINATE);
- return isolate->TerminateExecution();
- }
- if (stack_guard->IsInterrupted()) {
- stack_guard->Continue(INTERRUPT);
- return isolate->StackOverflow();
- }
- return isolate->heap()->undefined_value();
-}
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/execution.h b/src/3rdparty/v8/src/execution.h
deleted file mode 100644
index 7e5bcf7..0000000
--- a/src/3rdparty/v8/src/execution.h
+++ /dev/null
@@ -1,303 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_EXECUTION_H_
-#define V8_EXECUTION_H_
-
-namespace v8 {
-namespace internal {
-
-
-// Flag used to set the interrupt causes.
-enum InterruptFlag {
- INTERRUPT = 1 << 0,
- DEBUGBREAK = 1 << 1,
- DEBUGCOMMAND = 1 << 2,
- PREEMPT = 1 << 3,
- TERMINATE = 1 << 4,
- RUNTIME_PROFILER_TICK = 1 << 5
-#ifdef QT_BUILD_SCRIPT_LIB
- , USERCALLBACK = 1 << 6
-#endif
-};
-
-class Execution : public AllStatic {
- public:
- // Call a function, the caller supplies a receiver and an array
- // of arguments. Arguments are Object* type. After function returns,
- // pointers in 'args' might be invalid.
- //
- // *pending_exception tells whether the invoke resulted in
- // a pending exception.
- //
- static Handle<Object> Call(Handle<JSFunction> func,
- Handle<Object> receiver,
- int argc,
- Object*** args,
- bool* pending_exception);
-
- // Construct object from function, the caller supplies an array of
- // arguments. Arguments are Object* type. After function returns,
- // pointers in 'args' might be invalid.
- //
- // *pending_exception tells whether the invoke resulted in
- // a pending exception.
- //
- static Handle<Object> New(Handle<JSFunction> func,
- int argc,
- Object*** args,
- bool* pending_exception);
-
- // Call a function, just like Call(), but make sure to silently catch
- // any thrown exceptions. The return value is either the result of
- // calling the function (if caught exception is false) or the exception
- // that occurred (if caught exception is true).
- static Handle<Object> TryCall(Handle<JSFunction> func,
- Handle<Object> receiver,
- int argc,
- Object*** args,
- bool* caught_exception);
-
- // ECMA-262 9.2
- static Handle<Object> ToBoolean(Handle<Object> obj);
-
- // ECMA-262 9.3
- static Handle<Object> ToNumber(Handle<Object> obj, bool* exc);
-
- // ECMA-262 9.4
- static Handle<Object> ToInteger(Handle<Object> obj, bool* exc);
-
- // ECMA-262 9.5
- static Handle<Object> ToInt32(Handle<Object> obj, bool* exc);
-
- // ECMA-262 9.6
- static Handle<Object> ToUint32(Handle<Object> obj, bool* exc);
-
- // ECMA-262 9.8
- static Handle<Object> ToString(Handle<Object> obj, bool* exc);
-
- // ECMA-262 9.8
- static Handle<Object> ToDetailString(Handle<Object> obj, bool* exc);
-
- // ECMA-262 9.9
- static Handle<Object> ToObject(Handle<Object> obj, bool* exc);
-
- // Create a new date object from 'time'.
- static Handle<Object> NewDate(double time, bool* exc);
-
- // Create a new regular expression object from 'pattern' and 'flags'.
- static Handle<JSRegExp> NewJSRegExp(Handle<String> pattern,
- Handle<String> flags,
- bool* exc);
-
- // Used to implement [] notation on strings (calls JS code)
- static Handle<Object> CharAt(Handle<String> str, uint32_t index);
-
- static Handle<Object> GetFunctionFor();
- static Handle<JSFunction> InstantiateFunction(
- Handle<FunctionTemplateInfo> data, bool* exc);
- static Handle<JSObject> InstantiateObject(Handle<ObjectTemplateInfo> data,
- bool* exc);
- static void ConfigureInstance(Handle<Object> instance,
- Handle<Object> data,
- bool* exc);
- static Handle<String> GetStackTraceLine(Handle<Object> recv,
- Handle<JSFunction> fun,
- Handle<Object> pos,
- Handle<Object> is_global);
-#ifdef ENABLE_DEBUGGER_SUPPORT
- static Object* DebugBreakHelper();
- static void ProcessDebugMesssages(bool debug_command_only);
-#endif
-
- // If the stack guard is triggered, but it is not an actual
- // stack overflow, then handle the interruption accordingly.
- MUST_USE_RESULT static MaybeObject* HandleStackGuardInterrupt();
-
- // Get a function delegate (or undefined) for the given non-function
- // object. Used for support calling objects as functions.
- static Handle<Object> GetFunctionDelegate(Handle<Object> object);
-
- // Get a function delegate (or undefined) for the given non-function
- // object. Used for support calling objects as constructors.
- static Handle<Object> GetConstructorDelegate(Handle<Object> object);
-};
-
-
-class ExecutionAccess;
-class Isolate;
-
-
-// StackGuard contains the handling of the limits that are used to limit the
-// number of nested invocations of JavaScript and the stack size used in each
-// invocation.
-class StackGuard {
- public:
- // Pass the address beyond which the stack should not grow. The stack
- // is assumed to grow downwards.
- void SetStackLimit(uintptr_t limit);
-
- // Threading support.
- char* ArchiveStackGuard(char* to);
- char* RestoreStackGuard(char* from);
- static int ArchiveSpacePerThread() { return sizeof(ThreadLocal); }
- void FreeThreadResources();
- // Sets up the default stack guard for this thread if it has not
- // already been set up.
- void InitThread(const ExecutionAccess& lock);
- // Clears the stack guard for this thread so it does not look as if
- // it has been set up.
- void ClearThread(const ExecutionAccess& lock);
-
- bool IsStackOverflow();
- bool IsPreempted();
- void Preempt();
- bool IsInterrupted();
- void Interrupt();
- bool IsTerminateExecution();
- void TerminateExecution();
- bool IsRuntimeProfilerTick();
- void RequestRuntimeProfilerTick();
-#ifdef QT_BUILD_SCRIPT_LIB
- bool IsUserCallback();
- void ExecuteUserCallback(UserCallback callback, void *data);
- void RunUserCallbackNow();
-#endif
-#ifdef ENABLE_DEBUGGER_SUPPORT
- bool IsDebugBreak();
- void DebugBreak();
- bool IsDebugCommand();
- void DebugCommand();
-#endif
- void Continue(InterruptFlag after_what);
-
- // This provides an asynchronous read of the stack limits for the current
- // thread. There are no locks protecting this, but it is assumed that you
- // have the global V8 lock if you are using multiple V8 threads.
- uintptr_t climit() {
- return thread_local_.climit_;
- }
- uintptr_t real_climit() {
- return thread_local_.real_climit_;
- }
- uintptr_t jslimit() {
- return thread_local_.jslimit_;
- }
- uintptr_t real_jslimit() {
- return thread_local_.real_jslimit_;
- }
- Address address_of_jslimit() {
- return reinterpret_cast<Address>(&thread_local_.jslimit_);
- }
- Address address_of_real_jslimit() {
- return reinterpret_cast<Address>(&thread_local_.real_jslimit_);
- }
-
- private:
- StackGuard();
-
- // You should hold the ExecutionAccess lock when calling this method.
- bool has_pending_interrupts(const ExecutionAccess& lock) {
- // Sanity check: We shouldn't be asking about pending interrupts
- // unless we're not postponing them anymore.
- ASSERT(!should_postpone_interrupts(lock));
- return thread_local_.interrupt_flags_ != 0;
- }
-
- // You should hold the ExecutionAccess lock when calling this method.
- bool should_postpone_interrupts(const ExecutionAccess& lock) {
- return thread_local_.postpone_interrupts_nesting_ > 0;
- }
-
- // You should hold the ExecutionAccess lock when calling this method.
- inline void set_interrupt_limits(const ExecutionAccess& lock);
-
- // Reset limits to actual values. For example after handling interrupt.
- // You should hold the ExecutionAccess lock when calling this method.
- inline void reset_limits(const ExecutionAccess& lock);
-
- // Enable or disable interrupts.
- void EnableInterrupts();
- void DisableInterrupts();
-
-#ifdef V8_TARGET_ARCH_X64
- static const uintptr_t kInterruptLimit = V8_UINT64_C(0xfffffffffffffffe);
- static const uintptr_t kIllegalLimit = V8_UINT64_C(0xfffffffffffffff8);
-#else
- static const uintptr_t kInterruptLimit = 0xfffffffe;
- static const uintptr_t kIllegalLimit = 0xfffffff8;
-#endif
-
- class ThreadLocal {
- public:
- ThreadLocal() { Clear(); }
- // You should hold the ExecutionAccess lock when you call Initialize or
- // Clear.
- void Clear();
-
- // Returns true if the heap's stack limits should be set, false if not.
- bool Initialize();
-
- // The stack limit is split into a JavaScript and a C++ stack limit. These
- // two are the same except when running on a simulator where the C++ and
- // JavaScript stacks are separate. Each of the two stack limits have two
- // values. The one eith the real_ prefix is the actual stack limit
- // set for the VM. The one without the real_ prefix has the same value as
- // the actual stack limit except when there is an interruption (e.g. debug
- // break or preemption) in which case it is lowered to make stack checks
- // fail. Both the generated code and the runtime system check against the
- // one without the real_ prefix.
- uintptr_t real_jslimit_; // Actual JavaScript stack limit set for the VM.
- uintptr_t jslimit_;
- uintptr_t real_climit_; // Actual C++ stack limit set for the VM.
- uintptr_t climit_;
-
- int nesting_;
- int postpone_interrupts_nesting_;
- int interrupt_flags_;
-#ifdef QT_BUILD_SCRIPT_LIB
- UserCallback user_callback_;
- void *user_data_;
-#endif
- };
-
- // TODO(isolates): Technically this could be calculated directly from a
- // pointer to StackGuard.
- Isolate* isolate_;
- ThreadLocal thread_local_;
-
- friend class Isolate;
- friend class StackLimitCheck;
- friend class PostponeInterruptsScope;
-
- DISALLOW_COPY_AND_ASSIGN(StackGuard);
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_EXECUTION_H_
diff --git a/src/3rdparty/v8/src/extensions/experimental/break-iterator.cc b/src/3rdparty/v8/src/extensions/experimental/break-iterator.cc
deleted file mode 100644
index e8baea7..0000000
--- a/src/3rdparty/v8/src/extensions/experimental/break-iterator.cc
+++ /dev/null
@@ -1,250 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "break-iterator.h"
-
-#include "unicode/brkiter.h"
-#include "unicode/locid.h"
-#include "unicode/rbbi.h"
-
-namespace v8 {
-namespace internal {
-
-v8::Persistent<v8::FunctionTemplate> BreakIterator::break_iterator_template_;
-
-icu::BreakIterator* BreakIterator::UnpackBreakIterator(
- v8::Handle<v8::Object> obj) {
- if (break_iterator_template_->HasInstance(obj)) {
- return static_cast<icu::BreakIterator*>(
- obj->GetPointerFromInternalField(0));
- }
-
- return NULL;
-}
-
-icu::UnicodeString* BreakIterator::ResetAdoptedText(
- v8::Handle<v8::Object> obj, v8::Handle<v8::Value> value) {
- // Get the previous value from the internal field.
- icu::UnicodeString* text = static_cast<icu::UnicodeString*>(
- obj->GetPointerFromInternalField(1));
- delete text;
-
- // Assign new value to the internal pointer.
- v8::String::Value text_value(value);
- text = new icu::UnicodeString(
- reinterpret_cast<const UChar*>(*text_value), text_value.length());
- obj->SetPointerInInternalField(1, text);
-
- // Return new unicode string pointer.
- return text;
-}
-
-void BreakIterator::DeleteBreakIterator(v8::Persistent<v8::Value> object,
- void* param) {
- v8::Persistent<v8::Object> persistent_object =
- v8::Persistent<v8::Object>::Cast(object);
-
- // First delete the hidden C++ object.
- // Unpacking should never return NULL here. That would only happen if
- // this method is used as the weak callback for persistent handles not
- // pointing to a break iterator.
- delete UnpackBreakIterator(persistent_object);
-
- delete static_cast<icu::UnicodeString*>(
- persistent_object->GetPointerFromInternalField(1));
-
- // Then dispose of the persistent handle to JS object.
- persistent_object.Dispose();
-}
-
-// Throws a JavaScript exception.
-static v8::Handle<v8::Value> ThrowUnexpectedObjectError() {
- // Returns undefined, and schedules an exception to be thrown.
- return v8::ThrowException(v8::Exception::Error(
- v8::String::New("BreakIterator method called on an object "
- "that is not a BreakIterator.")));
-}
-
-v8::Handle<v8::Value> BreakIterator::BreakIteratorAdoptText(
- const v8::Arguments& args) {
- if (args.Length() != 1 || !args[0]->IsString()) {
- return v8::ThrowException(v8::Exception::SyntaxError(
- v8::String::New("Text input is required.")));
- }
-
- icu::BreakIterator* break_iterator = UnpackBreakIterator(args.Holder());
- if (!break_iterator) {
- return ThrowUnexpectedObjectError();
- }
-
- break_iterator->setText(*ResetAdoptedText(args.Holder(), args[0]));
-
- return v8::Undefined();
-}
-
-v8::Handle<v8::Value> BreakIterator::BreakIteratorFirst(
- const v8::Arguments& args) {
- icu::BreakIterator* break_iterator = UnpackBreakIterator(args.Holder());
- if (!break_iterator) {
- return ThrowUnexpectedObjectError();
- }
-
- return v8::Int32::New(break_iterator->first());
-}
-
-v8::Handle<v8::Value> BreakIterator::BreakIteratorNext(
- const v8::Arguments& args) {
- icu::BreakIterator* break_iterator = UnpackBreakIterator(args.Holder());
- if (!break_iterator) {
- return ThrowUnexpectedObjectError();
- }
-
- return v8::Int32::New(break_iterator->next());
-}
-
-v8::Handle<v8::Value> BreakIterator::BreakIteratorCurrent(
- const v8::Arguments& args) {
- icu::BreakIterator* break_iterator = UnpackBreakIterator(args.Holder());
- if (!break_iterator) {
- return ThrowUnexpectedObjectError();
- }
-
- return v8::Int32::New(break_iterator->current());
-}
-
-v8::Handle<v8::Value> BreakIterator::BreakIteratorBreakType(
- const v8::Arguments& args) {
- icu::BreakIterator* break_iterator = UnpackBreakIterator(args.Holder());
- if (!break_iterator) {
- return ThrowUnexpectedObjectError();
- }
-
- // TODO(cira): Remove cast once ICU fixes base BreakIterator class.
- icu::RuleBasedBreakIterator* rule_based_iterator =
- static_cast<icu::RuleBasedBreakIterator*>(break_iterator);
- int32_t status = rule_based_iterator->getRuleStatus();
- // Keep return values in sync with JavaScript BreakType enum.
- if (status >= UBRK_WORD_NONE && status < UBRK_WORD_NONE_LIMIT) {
- return v8::Int32::New(UBRK_WORD_NONE);
- } else if (status >= UBRK_WORD_NUMBER && status < UBRK_WORD_NUMBER_LIMIT) {
- return v8::Int32::New(UBRK_WORD_NUMBER);
- } else if (status >= UBRK_WORD_LETTER && status < UBRK_WORD_LETTER_LIMIT) {
- return v8::Int32::New(UBRK_WORD_LETTER);
- } else if (status >= UBRK_WORD_KANA && status < UBRK_WORD_KANA_LIMIT) {
- return v8::Int32::New(UBRK_WORD_KANA);
- } else if (status >= UBRK_WORD_IDEO && status < UBRK_WORD_IDEO_LIMIT) {
- return v8::Int32::New(UBRK_WORD_IDEO);
- } else {
- return v8::Int32::New(-1);
- }
-}
-
-v8::Handle<v8::Value> BreakIterator::JSBreakIterator(
- const v8::Arguments& args) {
- v8::HandleScope handle_scope;
-
- if (args.Length() != 2 || !args[0]->IsString() || !args[1]->IsString()) {
- return v8::ThrowException(v8::Exception::SyntaxError(
- v8::String::New("Locale and iterator type are required.")));
- }
-
- v8::String::Utf8Value locale(args[0]);
- icu::Locale icu_locale(*locale);
-
- UErrorCode status = U_ZERO_ERROR;
- icu::BreakIterator* break_iterator = NULL;
- v8::String::Utf8Value type(args[1]);
- if (!strcmp(*type, "character")) {
- break_iterator =
- icu::BreakIterator::createCharacterInstance(icu_locale, status);
- } else if (!strcmp(*type, "word")) {
- break_iterator =
- icu::BreakIterator::createWordInstance(icu_locale, status);
- } else if (!strcmp(*type, "sentence")) {
- break_iterator =
- icu::BreakIterator::createSentenceInstance(icu_locale, status);
- } else if (!strcmp(*type, "line")) {
- break_iterator =
- icu::BreakIterator::createLineInstance(icu_locale, status);
- } else {
- return v8::ThrowException(v8::Exception::SyntaxError(
- v8::String::New("Invalid iterator type.")));
- }
-
- if (U_FAILURE(status)) {
- delete break_iterator;
- return v8::ThrowException(v8::Exception::Error(
- v8::String::New("Failed to create break iterator.")));
- }
-
- if (break_iterator_template_.IsEmpty()) {
- v8::Local<v8::FunctionTemplate> raw_template(v8::FunctionTemplate::New());
-
- raw_template->SetClassName(v8::String::New("v8Locale.v8BreakIterator"));
-
- // Define internal field count on instance template.
- v8::Local<v8::ObjectTemplate> object_template =
- raw_template->InstanceTemplate();
-
- // Set aside internal fields for icu break iterator and adopted text.
- object_template->SetInternalFieldCount(2);
-
- // Define all of the prototype methods on prototype template.
- v8::Local<v8::ObjectTemplate> proto = raw_template->PrototypeTemplate();
- proto->Set(v8::String::New("adoptText"),
- v8::FunctionTemplate::New(BreakIteratorAdoptText));
- proto->Set(v8::String::New("first"),
- v8::FunctionTemplate::New(BreakIteratorFirst));
- proto->Set(v8::String::New("next"),
- v8::FunctionTemplate::New(BreakIteratorNext));
- proto->Set(v8::String::New("current"),
- v8::FunctionTemplate::New(BreakIteratorCurrent));
- proto->Set(v8::String::New("breakType"),
- v8::FunctionTemplate::New(BreakIteratorBreakType));
-
- break_iterator_template_ =
- v8::Persistent<v8::FunctionTemplate>::New(raw_template);
- }
-
- // Create an empty object wrapper.
- v8::Local<v8::Object> local_object =
- break_iterator_template_->GetFunction()->NewInstance();
- v8::Persistent<v8::Object> wrapper =
- v8::Persistent<v8::Object>::New(local_object);
-
- // Set break iterator as internal field of the resulting JS object.
- wrapper->SetPointerInInternalField(0, break_iterator);
- // Make sure that the pointer to adopted text is NULL.
- wrapper->SetPointerInInternalField(1, NULL);
-
- // Make object handle weak so we can delete iterator once GC kicks in.
- wrapper.MakeWeak(NULL, DeleteBreakIterator);
-
- return wrapper;
-}
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/extensions/experimental/break-iterator.h b/src/3rdparty/v8/src/extensions/experimental/break-iterator.h
deleted file mode 100644
index fac1ed8..0000000
--- a/src/3rdparty/v8/src/extensions/experimental/break-iterator.h
+++ /dev/null
@@ -1,89 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_EXTENSIONS_EXPERIMENTAL_BREAK_ITERATOR_H_
-#define V8_EXTENSIONS_EXPERIMENTAL_BREAK_ITERATOR_H_
-
-#include <v8.h>
-
-#include "unicode/uversion.h"
-
-namespace U_ICU_NAMESPACE {
-class BreakIterator;
-class UnicodeString;
-}
-
-namespace v8 {
-namespace internal {
-
-class BreakIterator {
- public:
- static v8::Handle<v8::Value> JSBreakIterator(const v8::Arguments& args);
-
- // Helper methods for various bindings.
-
- // Unpacks break iterator object from corresponding JavaScript object.
- static icu::BreakIterator* UnpackBreakIterator(v8::Handle<v8::Object> obj);
-
- // Deletes the old value and sets the adopted text in
- // corresponding JavaScript object.
- static icu::UnicodeString* ResetAdoptedText(v8::Handle<v8::Object> obj,
- v8::Handle<v8::Value> text_value);
-
- // Release memory we allocated for the BreakIterator once the JS object that
- // holds the pointer gets garbage collected.
- static void DeleteBreakIterator(v8::Persistent<v8::Value> object,
- void* param);
-
- // Assigns new text to the iterator.
- static v8::Handle<v8::Value> BreakIteratorAdoptText(
- const v8::Arguments& args);
-
- // Moves iterator to the beginning of the string and returns new position.
- static v8::Handle<v8::Value> BreakIteratorFirst(const v8::Arguments& args);
-
- // Moves iterator to the next position and returns it.
- static v8::Handle<v8::Value> BreakIteratorNext(const v8::Arguments& args);
-
- // Returns current iterator's current position.
- static v8::Handle<v8::Value> BreakIteratorCurrent(
- const v8::Arguments& args);
-
- // Returns type of the item from current position.
- // This call is only valid for word break iterators. Others just return 0.
- static v8::Handle<v8::Value> BreakIteratorBreakType(
- const v8::Arguments& args);
-
- private:
- BreakIterator() {}
-
- static v8::Persistent<v8::FunctionTemplate> break_iterator_template_;
-};
-
-} } // namespace v8::internal
-
-#endif // V8_EXTENSIONS_EXPERIMENTAL_BREAK_ITERATOR_H_
diff --git a/src/3rdparty/v8/src/extensions/experimental/experimental.gyp b/src/3rdparty/v8/src/extensions/experimental/experimental.gyp
deleted file mode 100644
index 761f4c7..0000000
--- a/src/3rdparty/v8/src/extensions/experimental/experimental.gyp
+++ /dev/null
@@ -1,55 +0,0 @@
-# Copyright 2011 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-{
- 'variables': {
- # TODO(cira): Find out how to pass this value for arbitrary embedder.
- # Chromium sets it in common.gypi and does force include of that file for
- # all sub projects.
- 'icu_src_dir%': '../../../../third_party/icu',
- },
- 'targets': [
- {
- 'target_name': 'i18n_api',
- 'type': 'static_library',
- 'sources': [
- 'break-iterator.cc',
- 'break-iterator.h',
- 'i18n-extension.cc',
- 'i18n-extension.h',
- ],
- 'include_dirs': [
- '<(icu_src_dir)/public/common',
- '../..',
- ],
- 'dependencies': [
- '<(icu_src_dir)/icu.gyp:*',
- '../../../tools/gyp/v8.gyp:v8',
- ],
- },
- ], # targets
-}
diff --git a/src/3rdparty/v8/src/extensions/experimental/i18n-extension.cc b/src/3rdparty/v8/src/extensions/experimental/i18n-extension.cc
deleted file mode 100644
index f14fd9e..0000000
--- a/src/3rdparty/v8/src/extensions/experimental/i18n-extension.cc
+++ /dev/null
@@ -1,284 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "i18n-extension.h"
-
-#include <algorithm>
-#include <string>
-
-#include "break-iterator.h"
-#include "unicode/locid.h"
-#include "unicode/uloc.h"
-
-namespace v8 {
-namespace internal {
-
-I18NExtension* I18NExtension::extension_ = NULL;
-
-// TODO(cira): maybe move JS code to a .js file and generata cc files from it?
-// TODO(cira): Remove v8 prefix from v8Locale once we have stable API.
-const char* const I18NExtension::kSource =
- "v8Locale = function(optLocale) {"
- " native function NativeJSLocale();"
- " var properties = NativeJSLocale(optLocale);"
- " this.locale = properties.locale;"
- " this.language = properties.language;"
- " this.script = properties.script;"
- " this.region = properties.region;"
- "};"
- "v8Locale.availableLocales = function() {"
- " native function NativeJSAvailableLocales();"
- " return NativeJSAvailableLocales();"
- "};"
- "v8Locale.prototype.maximizedLocale = function() {"
- " native function NativeJSMaximizedLocale();"
- " return new v8Locale(NativeJSMaximizedLocale(this.locale));"
- "};"
- "v8Locale.prototype.minimizedLocale = function() {"
- " native function NativeJSMinimizedLocale();"
- " return new v8Locale(NativeJSMinimizedLocale(this.locale));"
- "};"
- "v8Locale.prototype.displayLocale_ = function(displayLocale) {"
- " var result = this.locale;"
- " if (displayLocale !== undefined) {"
- " result = displayLocale.locale;"
- " }"
- " return result;"
- "};"
- "v8Locale.prototype.displayLanguage = function(optDisplayLocale) {"
- " var displayLocale = this.displayLocale_(optDisplayLocale);"
- " native function NativeJSDisplayLanguage();"
- " return NativeJSDisplayLanguage(this.locale, displayLocale);"
- "};"
- "v8Locale.prototype.displayScript = function(optDisplayLocale) {"
- " var displayLocale = this.displayLocale_(optDisplayLocale);"
- " native function NativeJSDisplayScript();"
- " return NativeJSDisplayScript(this.locale, displayLocale);"
- "};"
- "v8Locale.prototype.displayRegion = function(optDisplayLocale) {"
- " var displayLocale = this.displayLocale_(optDisplayLocale);"
- " native function NativeJSDisplayRegion();"
- " return NativeJSDisplayRegion(this.locale, displayLocale);"
- "};"
- "v8Locale.prototype.displayName = function(optDisplayLocale) {"
- " var displayLocale = this.displayLocale_(optDisplayLocale);"
- " native function NativeJSDisplayName();"
- " return NativeJSDisplayName(this.locale, displayLocale);"
- "};"
- "v8Locale.v8BreakIterator = function(locale, type) {"
- " native function NativeJSBreakIterator();"
- " var iterator = NativeJSBreakIterator(locale, type);"
- " iterator.type = type;"
- " return iterator;"
- "};"
- "v8Locale.v8BreakIterator.BreakType = {"
- " 'unknown': -1,"
- " 'none': 0,"
- " 'number': 100,"
- " 'word': 200,"
- " 'kana': 300,"
- " 'ideo': 400"
- "};"
- "v8Locale.prototype.v8CreateBreakIterator = function(type) {"
- " return new v8Locale.v8BreakIterator(this.locale, type);"
- "};";
-
-v8::Handle<v8::FunctionTemplate> I18NExtension::GetNativeFunction(
- v8::Handle<v8::String> name) {
- if (name->Equals(v8::String::New("NativeJSLocale"))) {
- return v8::FunctionTemplate::New(JSLocale);
- } else if (name->Equals(v8::String::New("NativeJSAvailableLocales"))) {
- return v8::FunctionTemplate::New(JSAvailableLocales);
- } else if (name->Equals(v8::String::New("NativeJSMaximizedLocale"))) {
- return v8::FunctionTemplate::New(JSMaximizedLocale);
- } else if (name->Equals(v8::String::New("NativeJSMinimizedLocale"))) {
- return v8::FunctionTemplate::New(JSMinimizedLocale);
- } else if (name->Equals(v8::String::New("NativeJSDisplayLanguage"))) {
- return v8::FunctionTemplate::New(JSDisplayLanguage);
- } else if (name->Equals(v8::String::New("NativeJSDisplayScript"))) {
- return v8::FunctionTemplate::New(JSDisplayScript);
- } else if (name->Equals(v8::String::New("NativeJSDisplayRegion"))) {
- return v8::FunctionTemplate::New(JSDisplayRegion);
- } else if (name->Equals(v8::String::New("NativeJSDisplayName"))) {
- return v8::FunctionTemplate::New(JSDisplayName);
- } else if (name->Equals(v8::String::New("NativeJSBreakIterator"))) {
- return v8::FunctionTemplate::New(BreakIterator::JSBreakIterator);
- }
-
- return v8::Handle<v8::FunctionTemplate>();
-}
-
-v8::Handle<v8::Value> I18NExtension::JSLocale(const v8::Arguments& args) {
- // TODO(cira): Fetch browser locale. Accept en-US as good default for now.
- // We could possibly pass browser locale as a parameter in the constructor.
- std::string locale_name("en-US");
- if (args.Length() == 1 && args[0]->IsString()) {
- locale_name = *v8::String::Utf8Value(args[0]->ToString());
- }
-
- v8::Local<v8::Object> locale = v8::Object::New();
- locale->Set(v8::String::New("locale"), v8::String::New(locale_name.c_str()));
-
- icu::Locale icu_locale(locale_name.c_str());
-
- const char* language = icu_locale.getLanguage();
- locale->Set(v8::String::New("language"), v8::String::New(language));
-
- const char* script = icu_locale.getScript();
- if (strlen(script)) {
- locale->Set(v8::String::New("script"), v8::String::New(script));
- }
-
- const char* region = icu_locale.getCountry();
- if (strlen(region)) {
- locale->Set(v8::String::New("region"), v8::String::New(region));
- }
-
- return locale;
-}
-
-// TODO(cira): Filter out locales that Chrome doesn't support.
-v8::Handle<v8::Value> I18NExtension::JSAvailableLocales(
- const v8::Arguments& args) {
- v8::Local<v8::Array> all_locales = v8::Array::New();
-
- int count = 0;
- const icu::Locale* icu_locales = icu::Locale::getAvailableLocales(count);
- for (int i = 0; i < count; ++i) {
- all_locales->Set(i, v8::String::New(icu_locales[i].getName()));
- }
-
- return all_locales;
-}
-
-// Use - as tag separator, not _ that ICU uses.
-static std::string NormalizeLocale(const std::string& locale) {
- std::string result(locale);
- // TODO(cira): remove STL dependency.
- std::replace(result.begin(), result.end(), '_', '-');
- return result;
-}
-
-v8::Handle<v8::Value> I18NExtension::JSMaximizedLocale(
- const v8::Arguments& args) {
- if (!args.Length() || !args[0]->IsString()) {
- return v8::Undefined();
- }
-
- UErrorCode status = U_ZERO_ERROR;
- std::string locale_name = *v8::String::Utf8Value(args[0]->ToString());
- char max_locale[ULOC_FULLNAME_CAPACITY];
- uloc_addLikelySubtags(locale_name.c_str(), max_locale,
- sizeof(max_locale), &status);
- if (U_FAILURE(status)) {
- return v8::Undefined();
- }
-
- return v8::String::New(NormalizeLocale(max_locale).c_str());
-}
-
-v8::Handle<v8::Value> I18NExtension::JSMinimizedLocale(
- const v8::Arguments& args) {
- if (!args.Length() || !args[0]->IsString()) {
- return v8::Undefined();
- }
-
- UErrorCode status = U_ZERO_ERROR;
- std::string locale_name = *v8::String::Utf8Value(args[0]->ToString());
- char min_locale[ULOC_FULLNAME_CAPACITY];
- uloc_minimizeSubtags(locale_name.c_str(), min_locale,
- sizeof(min_locale), &status);
- if (U_FAILURE(status)) {
- return v8::Undefined();
- }
-
- return v8::String::New(NormalizeLocale(min_locale).c_str());
-}
-
-// Common code for JSDisplayXXX methods.
-static v8::Handle<v8::Value> GetDisplayItem(const v8::Arguments& args,
- const std::string& item) {
- if (args.Length() != 2 || !args[0]->IsString() || !args[1]->IsString()) {
- return v8::Undefined();
- }
-
- std::string base_locale = *v8::String::Utf8Value(args[0]->ToString());
- icu::Locale icu_locale(base_locale.c_str());
- icu::Locale display_locale =
- icu::Locale(*v8::String::Utf8Value(args[1]->ToString()));
- icu::UnicodeString result;
- if (item == "language") {
- icu_locale.getDisplayLanguage(display_locale, result);
- } else if (item == "script") {
- icu_locale.getDisplayScript(display_locale, result);
- } else if (item == "region") {
- icu_locale.getDisplayCountry(display_locale, result);
- } else if (item == "name") {
- icu_locale.getDisplayName(display_locale, result);
- } else {
- return v8::Undefined();
- }
-
- if (result.length()) {
- return v8::String::New(
- reinterpret_cast<const uint16_t*>(result.getBuffer()), result.length());
- }
-
- return v8::Undefined();
-}
-
-v8::Handle<v8::Value> I18NExtension::JSDisplayLanguage(
- const v8::Arguments& args) {
- return GetDisplayItem(args, "language");
-}
-
-v8::Handle<v8::Value> I18NExtension::JSDisplayScript(
- const v8::Arguments& args) {
- return GetDisplayItem(args, "script");
-}
-
-v8::Handle<v8::Value> I18NExtension::JSDisplayRegion(
- const v8::Arguments& args) {
- return GetDisplayItem(args, "region");
-}
-
-v8::Handle<v8::Value> I18NExtension::JSDisplayName(const v8::Arguments& args) {
- return GetDisplayItem(args, "name");
-}
-
-I18NExtension* I18NExtension::get() {
- if (!extension_) {
- extension_ = new I18NExtension();
- }
- return extension_;
-}
-
-void I18NExtension::Register() {
- static v8::DeclareExtension i18n_extension_declaration(I18NExtension::get());
-}
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/extensions/experimental/i18n-extension.h b/src/3rdparty/v8/src/extensions/experimental/i18n-extension.h
deleted file mode 100644
index 629332b..0000000
--- a/src/3rdparty/v8/src/extensions/experimental/i18n-extension.h
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_EXTENSIONS_EXPERIMENTAL_I18N_EXTENSION_H_
-#define V8_EXTENSIONS_EXPERIMENTAL_I18N_EXTENSION_H_
-
-#include <v8.h>
-
-namespace v8 {
-namespace internal {
-
-
-class I18NExtension : public v8::Extension {
- public:
- I18NExtension() : v8::Extension("v8/i18n", kSource) {}
- virtual v8::Handle<v8::FunctionTemplate> GetNativeFunction(
- v8::Handle<v8::String> name);
-
- // Implementations of window.Locale methods.
- static v8::Handle<v8::Value> JSLocale(const v8::Arguments& args);
- static v8::Handle<v8::Value> JSAvailableLocales(const v8::Arguments& args);
- static v8::Handle<v8::Value> JSMaximizedLocale(const v8::Arguments& args);
- static v8::Handle<v8::Value> JSMinimizedLocale(const v8::Arguments& args);
- static v8::Handle<v8::Value> JSDisplayLanguage(const v8::Arguments& args);
- static v8::Handle<v8::Value> JSDisplayScript(const v8::Arguments& args);
- static v8::Handle<v8::Value> JSDisplayRegion(const v8::Arguments& args);
- static v8::Handle<v8::Value> JSDisplayName(const v8::Arguments& args);
-
- // V8 code prefers Register, while Chrome and WebKit use get kind of methods.
- static void Register();
- static I18NExtension* get();
-
- private:
- static const char* const kSource;
- static I18NExtension* extension_;
-};
-
-} } // namespace v8::internal
-
-#endif // V8_EXTENSIONS_EXPERIMENTAL_I18N_EXTENSION_H_
diff --git a/src/3rdparty/v8/src/extensions/externalize-string-extension.cc b/src/3rdparty/v8/src/extensions/externalize-string-extension.cc
deleted file mode 100644
index b3f83fe..0000000
--- a/src/3rdparty/v8/src/extensions/externalize-string-extension.cc
+++ /dev/null
@@ -1,141 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "externalize-string-extension.h"
-
-namespace v8 {
-namespace internal {
-
-template <typename Char, typename Base>
-class SimpleStringResource : public Base {
- public:
- // Takes ownership of |data|.
- SimpleStringResource(Char* data, size_t length)
- : data_(data),
- length_(length) {}
-
- virtual ~SimpleStringResource() { delete[] data_; }
-
- virtual const Char* data() const { return data_; }
-
- virtual size_t length() const { return length_; }
-
- private:
- Char* const data_;
- const size_t length_;
-};
-
-
-typedef SimpleStringResource<char, v8::String::ExternalAsciiStringResource>
- SimpleAsciiStringResource;
-typedef SimpleStringResource<uc16, v8::String::ExternalStringResource>
- SimpleTwoByteStringResource;
-
-
-const char* const ExternalizeStringExtension::kSource =
- "native function externalizeString();"
- "native function isAsciiString();";
-
-
-v8::Handle<v8::FunctionTemplate> ExternalizeStringExtension::GetNativeFunction(
- v8::Handle<v8::String> str) {
- if (strcmp(*v8::String::AsciiValue(str), "externalizeString") == 0) {
- return v8::FunctionTemplate::New(ExternalizeStringExtension::Externalize);
- } else {
- ASSERT(strcmp(*v8::String::AsciiValue(str), "isAsciiString") == 0);
- return v8::FunctionTemplate::New(ExternalizeStringExtension::IsAscii);
- }
-}
-
-
-v8::Handle<v8::Value> ExternalizeStringExtension::Externalize(
- const v8::Arguments& args) {
- if (args.Length() < 1 || !args[0]->IsString()) {
- return v8::ThrowException(v8::String::New(
- "First parameter to externalizeString() must be a string."));
- }
- bool force_two_byte = false;
- if (args.Length() >= 2) {
- if (args[1]->IsBoolean()) {
- force_two_byte = args[1]->BooleanValue();
- } else {
- return v8::ThrowException(v8::String::New(
- "Second parameter to externalizeString() must be a boolean."));
- }
- }
- bool result = false;
- Handle<String> string = Utils::OpenHandle(*args[0].As<v8::String>());
- if (string->IsExternalString()) {
- return v8::ThrowException(v8::String::New(
- "externalizeString() can't externalize twice."));
- }
- if (string->IsAsciiRepresentation() && !force_two_byte) {
- char* data = new char[string->length()];
- String::WriteToFlat(*string, data, 0, string->length());
- SimpleAsciiStringResource* resource = new SimpleAsciiStringResource(
- data, string->length());
- result = string->MakeExternal(resource);
- if (result && !string->IsSymbol()) {
- HEAP->external_string_table()->AddString(*string);
- }
- if (!result) delete resource;
- } else {
- uc16* data = new uc16[string->length()];
- String::WriteToFlat(*string, data, 0, string->length());
- SimpleTwoByteStringResource* resource = new SimpleTwoByteStringResource(
- data, string->length());
- result = string->MakeExternal(resource);
- if (result && !string->IsSymbol()) {
- HEAP->external_string_table()->AddString(*string);
- }
- if (!result) delete resource;
- }
- if (!result) {
- return v8::ThrowException(v8::String::New("externalizeString() failed."));
- }
- return v8::Undefined();
-}
-
-
-v8::Handle<v8::Value> ExternalizeStringExtension::IsAscii(
- const v8::Arguments& args) {
- if (args.Length() != 1 || !args[0]->IsString()) {
- return v8::ThrowException(v8::String::New(
- "isAsciiString() requires a single string argument."));
- }
- return Utils::OpenHandle(*args[0].As<v8::String>())->IsAsciiRepresentation() ?
- v8::True() : v8::False();
-}
-
-
-void ExternalizeStringExtension::Register() {
- static ExternalizeStringExtension externalize_extension;
- static v8::DeclareExtension externalize_extension_declaration(
- &externalize_extension);
-}
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/extensions/externalize-string-extension.h b/src/3rdparty/v8/src/extensions/externalize-string-extension.h
deleted file mode 100644
index b97b496..0000000
--- a/src/3rdparty/v8/src/extensions/externalize-string-extension.h
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_EXTENSIONS_EXTERNALIZE_STRING_EXTENSION_H_
-#define V8_EXTENSIONS_EXTERNALIZE_STRING_EXTENSION_H_
-
-#include "v8.h"
-
-namespace v8 {
-namespace internal {
-
-class ExternalizeStringExtension : public v8::Extension {
- public:
- ExternalizeStringExtension() : v8::Extension("v8/externalize", kSource) {}
- virtual v8::Handle<v8::FunctionTemplate> GetNativeFunction(
- v8::Handle<v8::String> name);
- static v8::Handle<v8::Value> Externalize(const v8::Arguments& args);
- static v8::Handle<v8::Value> IsAscii(const v8::Arguments& args);
- static void Register();
- private:
- static const char* const kSource;
-};
-
-} } // namespace v8::internal
-
-#endif // V8_EXTENSIONS_EXTERNALIZE_STRING_EXTENSION_H_
diff --git a/src/3rdparty/v8/src/extensions/gc-extension.cc b/src/3rdparty/v8/src/extensions/gc-extension.cc
deleted file mode 100644
index 3740c27..0000000
--- a/src/3rdparty/v8/src/extensions/gc-extension.cc
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "gc-extension.h"
-
-namespace v8 {
-namespace internal {
-
-const char* const GCExtension::kSource = "native function gc();";
-
-
-v8::Handle<v8::FunctionTemplate> GCExtension::GetNativeFunction(
- v8::Handle<v8::String> str) {
- return v8::FunctionTemplate::New(GCExtension::GC);
-}
-
-
-v8::Handle<v8::Value> GCExtension::GC(const v8::Arguments& args) {
- bool compact = false;
- // All allocation spaces other than NEW_SPACE have the same effect.
- if (args.Length() >= 1 && args[0]->IsBoolean()) {
- compact = args[0]->BooleanValue();
- }
- HEAP->CollectAllGarbage(compact);
- return v8::Undefined();
-}
-
-
-void GCExtension::Register() {
- static GCExtension gc_extension;
- static v8::DeclareExtension gc_extension_declaration(&gc_extension);
-}
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/extensions/gc-extension.h b/src/3rdparty/v8/src/extensions/gc-extension.h
deleted file mode 100644
index 06ea4ed..0000000
--- a/src/3rdparty/v8/src/extensions/gc-extension.h
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_EXTENSIONS_GC_EXTENSION_H_
-#define V8_EXTENSIONS_GC_EXTENSION_H_
-
-#include "v8.h"
-
-namespace v8 {
-namespace internal {
-
-class GCExtension : public v8::Extension {
- public:
- GCExtension() : v8::Extension("v8/gc", kSource) {}
- virtual v8::Handle<v8::FunctionTemplate> GetNativeFunction(
- v8::Handle<v8::String> name);
- static v8::Handle<v8::Value> GC(const v8::Arguments& args);
- static void Register();
- private:
- static const char* const kSource;
-};
-
-} } // namespace v8::internal
-
-#endif // V8_EXTENSIONS_GC_EXTENSION_H_
diff --git a/src/3rdparty/v8/src/factory.cc b/src/3rdparty/v8/src/factory.cc
deleted file mode 100644
index 7dee66f..0000000
--- a/src/3rdparty/v8/src/factory.cc
+++ /dev/null
@@ -1,1194 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "api.h"
-#include "debug.h"
-#include "execution.h"
-#include "factory.h"
-#include "macro-assembler.h"
-#include "objects.h"
-#include "objects-visiting.h"
-
-namespace v8 {
-namespace internal {
-
-
-Handle<FixedArray> Factory::NewFixedArray(int size, PretenureFlag pretenure) {
- ASSERT(0 <= size);
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateFixedArray(size, pretenure),
- FixedArray);
-}
-
-
-Handle<FixedArray> Factory::NewFixedArrayWithHoles(int size,
- PretenureFlag pretenure) {
- ASSERT(0 <= size);
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateFixedArrayWithHoles(size, pretenure),
- FixedArray);
-}
-
-
-Handle<StringDictionary> Factory::NewStringDictionary(int at_least_space_for) {
- ASSERT(0 <= at_least_space_for);
- CALL_HEAP_FUNCTION(isolate(),
- StringDictionary::Allocate(at_least_space_for),
- StringDictionary);
-}
-
-
-Handle<NumberDictionary> Factory::NewNumberDictionary(int at_least_space_for) {
- ASSERT(0 <= at_least_space_for);
- CALL_HEAP_FUNCTION(isolate(),
- NumberDictionary::Allocate(at_least_space_for),
- NumberDictionary);
-}
-
-
-Handle<DescriptorArray> Factory::NewDescriptorArray(int number_of_descriptors) {
- ASSERT(0 <= number_of_descriptors);
- CALL_HEAP_FUNCTION(isolate(),
- DescriptorArray::Allocate(number_of_descriptors),
- DescriptorArray);
-}
-
-
-Handle<DeoptimizationInputData> Factory::NewDeoptimizationInputData(
- int deopt_entry_count,
- PretenureFlag pretenure) {
- ASSERT(deopt_entry_count > 0);
- CALL_HEAP_FUNCTION(isolate(),
- DeoptimizationInputData::Allocate(deopt_entry_count,
- pretenure),
- DeoptimizationInputData);
-}
-
-
-Handle<DeoptimizationOutputData> Factory::NewDeoptimizationOutputData(
- int deopt_entry_count,
- PretenureFlag pretenure) {
- ASSERT(deopt_entry_count > 0);
- CALL_HEAP_FUNCTION(isolate(),
- DeoptimizationOutputData::Allocate(deopt_entry_count,
- pretenure),
- DeoptimizationOutputData);
-}
-
-
-// Symbols are created in the old generation (data space).
-Handle<String> Factory::LookupSymbol(Vector<const char> string) {
- CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->LookupSymbol(string),
- String);
-}
-
-Handle<String> Factory::LookupAsciiSymbol(Vector<const char> string) {
- CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->LookupAsciiSymbol(string),
- String);
-}
-
-Handle<String> Factory::LookupTwoByteSymbol(Vector<const uc16> string) {
- CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->LookupTwoByteSymbol(string),
- String);
-}
-
-
-Handle<String> Factory::NewStringFromAscii(Vector<const char> string,
- PretenureFlag pretenure) {
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateStringFromAscii(string, pretenure),
- String);
-}
-
-Handle<String> Factory::NewStringFromUtf8(Vector<const char> string,
- PretenureFlag pretenure) {
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateStringFromUtf8(string, pretenure),
- String);
-}
-
-
-Handle<String> Factory::NewStringFromTwoByte(Vector<const uc16> string,
- PretenureFlag pretenure) {
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateStringFromTwoByte(string, pretenure),
- String);
-}
-
-
-Handle<String> Factory::NewRawAsciiString(int length,
- PretenureFlag pretenure) {
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateRawAsciiString(length, pretenure),
- String);
-}
-
-
-Handle<String> Factory::NewRawTwoByteString(int length,
- PretenureFlag pretenure) {
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateRawTwoByteString(length, pretenure),
- String);
-}
-
-
-Handle<String> Factory::NewConsString(Handle<String> first,
- Handle<String> second) {
- CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->AllocateConsString(*first, *second),
- String);
-}
-
-
-Handle<String> Factory::NewSubString(Handle<String> str,
- int begin,
- int end) {
- CALL_HEAP_FUNCTION(isolate(),
- str->SubString(begin, end),
- String);
-}
-
-
-Handle<String> Factory::NewExternalStringFromAscii(
- ExternalAsciiString::Resource* resource) {
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateExternalStringFromAscii(resource),
- String);
-}
-
-
-Handle<String> Factory::NewExternalStringFromTwoByte(
- ExternalTwoByteString::Resource* resource) {
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateExternalStringFromTwoByte(resource),
- String);
-}
-
-
-Handle<Context> Factory::NewGlobalContext() {
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateGlobalContext(),
- Context);
-}
-
-
-Handle<Context> Factory::NewFunctionContext(int length,
- Handle<JSFunction> closure) {
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateFunctionContext(length, *closure),
- Context);
-}
-
-
-Handle<Context> Factory::NewWithContext(Handle<Context> previous,
- Handle<JSObject> extension,
- bool is_catch_context) {
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateWithContext(*previous,
- *extension,
- is_catch_context),
- Context);
-}
-
-
-Handle<Struct> Factory::NewStruct(InstanceType type) {
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateStruct(type),
- Struct);
-}
-
-
-Handle<AccessorInfo> Factory::NewAccessorInfo() {
- Handle<AccessorInfo> info =
- Handle<AccessorInfo>::cast(NewStruct(ACCESSOR_INFO_TYPE));
- info->set_flag(0); // Must clear the flag, it was initialized as undefined.
- return info;
-}
-
-
-Handle<Script> Factory::NewScript(Handle<String> source) {
- // Generate id for this script.
- int id;
- Heap* heap = isolate()->heap();
- if (heap->last_script_id()->IsUndefined()) {
- // Script ids start from one.
- id = 1;
- } else {
- // Increment id, wrap when positive smi is exhausted.
- id = Smi::cast(heap->last_script_id())->value();
- id++;
- if (!Smi::IsValid(id)) {
- id = 0;
- }
- }
- heap->SetLastScriptId(Smi::FromInt(id));
-
- // Create and initialize script object.
- Handle<Proxy> wrapper = NewProxy(0, TENURED);
- Handle<Script> script = Handle<Script>::cast(NewStruct(SCRIPT_TYPE));
- script->set_source(*source);
- script->set_name(heap->undefined_value());
- script->set_id(heap->last_script_id());
- script->set_line_offset(Smi::FromInt(0));
- script->set_column_offset(Smi::FromInt(0));
- script->set_data(heap->undefined_value());
- script->set_context_data(heap->undefined_value());
- script->set_type(Smi::FromInt(Script::TYPE_NORMAL));
- script->set_compilation_type(Smi::FromInt(Script::COMPILATION_TYPE_HOST));
- script->set_wrapper(*wrapper);
- script->set_line_ends(heap->undefined_value());
- script->set_eval_from_shared(heap->undefined_value());
- script->set_eval_from_instructions_offset(Smi::FromInt(0));
-
- return script;
-}
-
-
-Handle<Proxy> Factory::NewProxy(Address addr, PretenureFlag pretenure) {
- CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->AllocateProxy(addr, pretenure),
- Proxy);
-}
-
-
-Handle<Proxy> Factory::NewProxy(const AccessorDescriptor* desc) {
- return NewProxy((Address) desc, TENURED);
-}
-
-
-Handle<ByteArray> Factory::NewByteArray(int length, PretenureFlag pretenure) {
- ASSERT(0 <= length);
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateByteArray(length, pretenure),
- ByteArray);
-}
-
-
-Handle<ExternalArray> Factory::NewExternalArray(int length,
- ExternalArrayType array_type,
- void* external_pointer,
- PretenureFlag pretenure) {
- ASSERT(0 <= length);
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateExternalArray(length,
- array_type,
- external_pointer,
- pretenure),
- ExternalArray);
-}
-
-
-Handle<JSGlobalPropertyCell> Factory::NewJSGlobalPropertyCell(
- Handle<Object> value) {
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateJSGlobalPropertyCell(*value),
- JSGlobalPropertyCell);
-}
-
-
-Handle<Map> Factory::NewMap(InstanceType type, int instance_size) {
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateMap(type, instance_size),
- Map);
-}
-
-
-Handle<JSObject> Factory::NewFunctionPrototype(Handle<JSFunction> function) {
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateFunctionPrototype(*function),
- JSObject);
-}
-
-
-Handle<Map> Factory::CopyMapDropDescriptors(Handle<Map> src) {
- CALL_HEAP_FUNCTION(isolate(), src->CopyDropDescriptors(), Map);
-}
-
-
-Handle<Map> Factory::CopyMap(Handle<Map> src,
- int extra_inobject_properties) {
- Handle<Map> copy = CopyMapDropDescriptors(src);
- // Check that we do not overflow the instance size when adding the
- // extra inobject properties.
- int instance_size_delta = extra_inobject_properties * kPointerSize;
- int max_instance_size_delta =
- JSObject::kMaxInstanceSize - copy->instance_size();
- if (instance_size_delta > max_instance_size_delta) {
- // If the instance size overflows, we allocate as many properties
- // as we can as inobject properties.
- instance_size_delta = max_instance_size_delta;
- extra_inobject_properties = max_instance_size_delta >> kPointerSizeLog2;
- }
- // Adjust the map with the extra inobject properties.
- int inobject_properties =
- copy->inobject_properties() + extra_inobject_properties;
- copy->set_inobject_properties(inobject_properties);
- copy->set_unused_property_fields(inobject_properties);
- copy->set_instance_size(copy->instance_size() + instance_size_delta);
- copy->set_visitor_id(StaticVisitorBase::GetVisitorId(*copy));
- return copy;
-}
-
-
-Handle<Map> Factory::CopyMapDropTransitions(Handle<Map> src) {
- CALL_HEAP_FUNCTION(isolate(), src->CopyDropTransitions(), Map);
-}
-
-
-Handle<Map> Factory::GetFastElementsMap(Handle<Map> src) {
- CALL_HEAP_FUNCTION(isolate(), src->GetFastElementsMap(), Map);
-}
-
-
-Handle<Map> Factory::GetSlowElementsMap(Handle<Map> src) {
- CALL_HEAP_FUNCTION(isolate(), src->GetSlowElementsMap(), Map);
-}
-
-
-Handle<Map> Factory::GetExternalArrayElementsMap(
- Handle<Map> src,
- ExternalArrayType array_type,
- bool safe_to_add_transition) {
- CALL_HEAP_FUNCTION(isolate(),
- src->GetExternalArrayElementsMap(array_type,
- safe_to_add_transition),
- Map);
-}
-
-
-Handle<FixedArray> Factory::CopyFixedArray(Handle<FixedArray> array) {
- CALL_HEAP_FUNCTION(isolate(), array->Copy(), FixedArray);
-}
-
-
-Handle<JSFunction> Factory::BaseNewFunctionFromSharedFunctionInfo(
- Handle<SharedFunctionInfo> function_info,
- Handle<Map> function_map,
- PretenureFlag pretenure) {
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateFunction(*function_map,
- *function_info,
- isolate()->heap()->the_hole_value(),
- pretenure),
- JSFunction);
-}
-
-
-Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
- Handle<SharedFunctionInfo> function_info,
- Handle<Context> context,
- PretenureFlag pretenure) {
- Handle<JSFunction> result = BaseNewFunctionFromSharedFunctionInfo(
- function_info,
- function_info->strict_mode()
- ? isolate()->strict_mode_function_map()
- : isolate()->function_map(),
- pretenure);
-
- result->set_context(*context);
- int number_of_literals = function_info->num_literals();
- Handle<FixedArray> literals = NewFixedArray(number_of_literals, pretenure);
- if (number_of_literals > 0) {
- // Store the object, regexp and array functions in the literals
- // array prefix. These functions will be used when creating
- // object, regexp and array literals in this function.
- literals->set(JSFunction::kLiteralGlobalContextIndex,
- context->global_context());
- }
- result->set_literals(*literals);
- result->set_next_function_link(isolate()->heap()->undefined_value());
-
- if (V8::UseCrankshaft() &&
- FLAG_always_opt &&
- result->is_compiled() &&
- !function_info->is_toplevel() &&
- function_info->allows_lazy_compilation()) {
- result->MarkForLazyRecompilation();
- }
- return result;
-}
-
-
-Handle<Object> Factory::NewNumber(double value,
- PretenureFlag pretenure) {
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->NumberFromDouble(value, pretenure), Object);
-}
-
-
-Handle<Object> Factory::NewNumberFromInt(int value) {
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->NumberFromInt32(value), Object);
-}
-
-
-Handle<Object> Factory::NewNumberFromUint(uint32_t value) {
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->NumberFromUint32(value), Object);
-}
-
-
-Handle<JSObject> Factory::NewNeanderObject() {
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateJSObjectFromMap(
- isolate()->heap()->neander_map()),
- JSObject);
-}
-
-
-Handle<Object> Factory::NewTypeError(const char* type,
- Vector< Handle<Object> > args) {
- return NewError("MakeTypeError", type, args);
-}
-
-
-Handle<Object> Factory::NewTypeError(Handle<String> message) {
- return NewError("$TypeError", message);
-}
-
-
-Handle<Object> Factory::NewRangeError(const char* type,
- Vector< Handle<Object> > args) {
- return NewError("MakeRangeError", type, args);
-}
-
-
-Handle<Object> Factory::NewRangeError(Handle<String> message) {
- return NewError("$RangeError", message);
-}
-
-
-Handle<Object> Factory::NewSyntaxError(const char* type, Handle<JSArray> args) {
- return NewError("MakeSyntaxError", type, args);
-}
-
-
-Handle<Object> Factory::NewSyntaxError(Handle<String> message) {
- return NewError("$SyntaxError", message);
-}
-
-
-Handle<Object> Factory::NewReferenceError(const char* type,
- Vector< Handle<Object> > args) {
- return NewError("MakeReferenceError", type, args);
-}
-
-
-Handle<Object> Factory::NewReferenceError(Handle<String> message) {
- return NewError("$ReferenceError", message);
-}
-
-
-Handle<Object> Factory::NewError(const char* maker, const char* type,
- Vector< Handle<Object> > args) {
- v8::HandleScope scope; // Instantiate a closeable HandleScope for EscapeFrom.
- Handle<FixedArray> array = NewFixedArray(args.length());
- for (int i = 0; i < args.length(); i++) {
- array->set(i, *args[i]);
- }
- Handle<JSArray> object = NewJSArrayWithElements(array);
- Handle<Object> result = NewError(maker, type, object);
- return result.EscapeFrom(&scope);
-}
-
-
-Handle<Object> Factory::NewEvalError(const char* type,
- Vector< Handle<Object> > args) {
- return NewError("MakeEvalError", type, args);
-}
-
-
-Handle<Object> Factory::NewError(const char* type,
- Vector< Handle<Object> > args) {
- return NewError("MakeError", type, args);
-}
-
-
-Handle<Object> Factory::NewError(const char* maker,
- const char* type,
- Handle<JSArray> args) {
- Handle<String> make_str = LookupAsciiSymbol(maker);
- Handle<Object> fun_obj(
- isolate()->js_builtins_object()->GetPropertyNoExceptionThrown(*make_str));
- // If the builtins haven't been properly configured yet this error
- // constructor may not have been defined. Bail out.
- if (!fun_obj->IsJSFunction())
- return undefined_value();
- Handle<JSFunction> fun = Handle<JSFunction>::cast(fun_obj);
- Handle<Object> type_obj = LookupAsciiSymbol(type);
- Object** argv[2] = { type_obj.location(),
- Handle<Object>::cast(args).location() };
-
- // Invoke the JavaScript factory method. If an exception is thrown while
- // running the factory method, use the exception as the result.
- bool caught_exception;
- Handle<Object> result = Execution::TryCall(fun,
- isolate()->js_builtins_object(), 2, argv, &caught_exception);
- return result;
-}
-
-
-Handle<Object> Factory::NewError(Handle<String> message) {
- return NewError("$Error", message);
-}
-
-
-Handle<Object> Factory::NewError(const char* constructor,
- Handle<String> message) {
- Handle<String> constr = LookupAsciiSymbol(constructor);
- Handle<JSFunction> fun = Handle<JSFunction>(
- JSFunction::cast(isolate()->js_builtins_object()->
- GetPropertyNoExceptionThrown(*constr)));
- Object** argv[1] = { Handle<Object>::cast(message).location() };
-
- // Invoke the JavaScript factory method. If an exception is thrown while
- // running the factory method, use the exception as the result.
- bool caught_exception;
- Handle<Object> result = Execution::TryCall(fun,
- isolate()->js_builtins_object(), 1, argv, &caught_exception);
- return result;
-}
-
-
-Handle<JSFunction> Factory::NewFunction(Handle<String> name,
- InstanceType type,
- int instance_size,
- Handle<Code> code,
- bool force_initial_map) {
- // Allocate the function
- Handle<JSFunction> function = NewFunction(name, the_hole_value());
-
- // Setup the code pointer in both the shared function info and in
- // the function itself.
- function->shared()->set_code(*code);
- function->set_code(*code);
-
- if (force_initial_map ||
- type != JS_OBJECT_TYPE ||
- instance_size != JSObject::kHeaderSize) {
- Handle<Map> initial_map = NewMap(type, instance_size);
- Handle<JSObject> prototype = NewFunctionPrototype(function);
- initial_map->set_prototype(*prototype);
- function->set_initial_map(*initial_map);
- initial_map->set_constructor(*function);
- } else {
- ASSERT(!function->has_initial_map());
- ASSERT(!function->has_prototype());
- }
-
- return function;
-}
-
-
-Handle<JSFunction> Factory::NewFunctionWithPrototype(Handle<String> name,
- InstanceType type,
- int instance_size,
- Handle<JSObject> prototype,
- Handle<Code> code,
- bool force_initial_map) {
- // Allocate the function.
- Handle<JSFunction> function = NewFunction(name, prototype);
-
- // Setup the code pointer in both the shared function info and in
- // the function itself.
- function->shared()->set_code(*code);
- function->set_code(*code);
-
- if (force_initial_map ||
- type != JS_OBJECT_TYPE ||
- instance_size != JSObject::kHeaderSize) {
- Handle<Map> initial_map = NewMap(type, instance_size);
- function->set_initial_map(*initial_map);
- initial_map->set_constructor(*function);
- }
-
- // Set function.prototype and give the prototype a constructor
- // property that refers to the function.
- SetPrototypeProperty(function, prototype);
- // Currently safe because it is only invoked from Genesis.
- SetLocalPropertyNoThrow(prototype, constructor_symbol(), function, DONT_ENUM);
- return function;
-}
-
-
-Handle<JSFunction> Factory::NewFunctionWithoutPrototype(Handle<String> name,
- Handle<Code> code) {
- Handle<JSFunction> function = NewFunctionWithoutPrototype(name,
- kNonStrictMode);
- function->shared()->set_code(*code);
- function->set_code(*code);
- ASSERT(!function->has_initial_map());
- ASSERT(!function->has_prototype());
- return function;
-}
-
-
-Handle<Code> Factory::NewCode(const CodeDesc& desc,
- Code::Flags flags,
- Handle<Object> self_ref,
- bool immovable) {
- CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->CreateCode(
- desc, flags, self_ref, immovable),
- Code);
-}
-
-
-Handle<Code> Factory::CopyCode(Handle<Code> code) {
- CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->CopyCode(*code),
- Code);
-}
-
-
-Handle<Code> Factory::CopyCode(Handle<Code> code, Vector<byte> reloc_info) {
- CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->CopyCode(*code, reloc_info),
- Code);
-}
-
-
-MUST_USE_RESULT static inline MaybeObject* DoCopyInsert(
- DescriptorArray* array,
- String* key,
- Object* value,
- PropertyAttributes attributes) {
- CallbacksDescriptor desc(key, value, attributes);
- MaybeObject* obj = array->CopyInsert(&desc, REMOVE_TRANSITIONS);
- return obj;
-}
-
-
-// Allocate the new array.
-Handle<DescriptorArray> Factory::CopyAppendProxyDescriptor(
- Handle<DescriptorArray> array,
- Handle<String> key,
- Handle<Object> value,
- PropertyAttributes attributes) {
- CALL_HEAP_FUNCTION(isolate(),
- DoCopyInsert(*array, *key, *value, attributes),
- DescriptorArray);
-}
-
-
-Handle<String> Factory::SymbolFromString(Handle<String> value) {
- CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->LookupSymbol(*value), String);
-}
-
-
-Handle<DescriptorArray> Factory::CopyAppendCallbackDescriptors(
- Handle<DescriptorArray> array,
- Handle<Object> descriptors) {
- v8::NeanderArray callbacks(descriptors);
- int nof_callbacks = callbacks.length();
- Handle<DescriptorArray> result =
- NewDescriptorArray(array->number_of_descriptors() + nof_callbacks);
-
- // Number of descriptors added to the result so far.
- int descriptor_count = 0;
-
- // Copy the descriptors from the array.
- for (int i = 0; i < array->number_of_descriptors(); i++) {
- if (array->GetType(i) != NULL_DESCRIPTOR) {
- result->CopyFrom(descriptor_count++, *array, i);
- }
- }
-
- // Number of duplicates detected.
- int duplicates = 0;
-
- // Fill in new callback descriptors. Process the callbacks from
- // back to front so that the last callback with a given name takes
- // precedence over previously added callbacks with that name.
- for (int i = nof_callbacks - 1; i >= 0; i--) {
- Handle<AccessorInfo> entry =
- Handle<AccessorInfo>(AccessorInfo::cast(callbacks.get(i)));
- // Ensure the key is a symbol before writing into the instance descriptor.
- Handle<String> key =
- SymbolFromString(Handle<String>(String::cast(entry->name())));
- // Check if a descriptor with this name already exists before writing.
- if (result->LinearSearch(*key, descriptor_count) ==
- DescriptorArray::kNotFound) {
- CallbacksDescriptor desc(*key, *entry, entry->property_attributes());
- result->Set(descriptor_count, &desc);
- descriptor_count++;
- } else {
- duplicates++;
- }
- }
-
- // If duplicates were detected, allocate a result of the right size
- // and transfer the elements.
- if (duplicates > 0) {
- int number_of_descriptors = result->number_of_descriptors() - duplicates;
- Handle<DescriptorArray> new_result =
- NewDescriptorArray(number_of_descriptors);
- for (int i = 0; i < number_of_descriptors; i++) {
- new_result->CopyFrom(i, *result, i);
- }
- result = new_result;
- }
-
- // Sort the result before returning.
- result->Sort();
- return result;
-}
-
-
-Handle<JSObject> Factory::NewJSObject(Handle<JSFunction> constructor,
- PretenureFlag pretenure) {
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateJSObject(*constructor, pretenure), JSObject);
-}
-
-
-Handle<GlobalObject> Factory::NewGlobalObject(
- Handle<JSFunction> constructor) {
- CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->AllocateGlobalObject(*constructor),
- GlobalObject);
-}
-
-
-
-Handle<JSObject> Factory::NewJSObjectFromMap(Handle<Map> map) {
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateJSObjectFromMap(*map, NOT_TENURED),
- JSObject);
-}
-
-
-Handle<JSArray> Factory::NewJSArray(int capacity,
- PretenureFlag pretenure) {
- Handle<JSObject> obj = NewJSObject(isolate()->array_function(), pretenure);
- CALL_HEAP_FUNCTION(isolate(),
- Handle<JSArray>::cast(obj)->Initialize(capacity),
- JSArray);
-}
-
-
-Handle<JSArray> Factory::NewJSArrayWithElements(Handle<FixedArray> elements,
- PretenureFlag pretenure) {
- Handle<JSArray> result =
- Handle<JSArray>::cast(NewJSObject(isolate()->array_function(),
- pretenure));
- result->SetContent(*elements);
- return result;
-}
-
-
-Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
- Handle<String> name,
- int number_of_literals,
- Handle<Code> code,
- Handle<SerializedScopeInfo> scope_info) {
- Handle<SharedFunctionInfo> shared = NewSharedFunctionInfo(name);
- shared->set_code(*code);
- shared->set_scope_info(*scope_info);
- int literals_array_size = number_of_literals;
- // If the function contains object, regexp or array literals,
- // allocate extra space for a literals array prefix containing the
- // context.
- if (number_of_literals > 0) {
- literals_array_size += JSFunction::kLiteralsPrefixSize;
- }
- shared->set_num_literals(literals_array_size);
- return shared;
-}
-
-
-Handle<JSMessageObject> Factory::NewJSMessageObject(
- Handle<String> type,
- Handle<JSArray> arguments,
- int start_position,
- int end_position,
- Handle<Object> script,
- Handle<Object> stack_trace,
- Handle<Object> stack_frames) {
- CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->AllocateJSMessageObject(*type,
- *arguments,
- start_position,
- end_position,
- *script,
- *stack_trace,
- *stack_frames),
- JSMessageObject);
-}
-
-Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(Handle<String> name) {
- CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->AllocateSharedFunctionInfo(*name),
- SharedFunctionInfo);
-}
-
-
-Handle<String> Factory::NumberToString(Handle<Object> number) {
- CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->NumberToString(*number), String);
-}
-
-
-Handle<NumberDictionary> Factory::DictionaryAtNumberPut(
- Handle<NumberDictionary> dictionary,
- uint32_t key,
- Handle<Object> value) {
- CALL_HEAP_FUNCTION(isolate(),
- dictionary->AtNumberPut(key, *value),
- NumberDictionary);
-}
-
-
-Handle<JSFunction> Factory::NewFunctionHelper(Handle<String> name,
- Handle<Object> prototype) {
- Handle<SharedFunctionInfo> function_share = NewSharedFunctionInfo(name);
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateFunction(*isolate()->function_map(),
- *function_share,
- *prototype),
- JSFunction);
-}
-
-
-Handle<JSFunction> Factory::NewFunction(Handle<String> name,
- Handle<Object> prototype) {
- Handle<JSFunction> fun = NewFunctionHelper(name, prototype);
- fun->set_context(isolate()->context()->global_context());
- return fun;
-}
-
-
-Handle<JSFunction> Factory::NewFunctionWithoutPrototypeHelper(
- Handle<String> name,
- StrictModeFlag strict_mode) {
- Handle<SharedFunctionInfo> function_share = NewSharedFunctionInfo(name);
- Handle<Map> map = strict_mode == kStrictMode
- ? isolate()->strict_mode_function_without_prototype_map()
- : isolate()->function_without_prototype_map();
- CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->AllocateFunction(
- *map,
- *function_share,
- *the_hole_value()),
- JSFunction);
-}
-
-
-Handle<JSFunction> Factory::NewFunctionWithoutPrototype(
- Handle<String> name,
- StrictModeFlag strict_mode) {
- Handle<JSFunction> fun = NewFunctionWithoutPrototypeHelper(name, strict_mode);
- fun->set_context(isolate()->context()->global_context());
- return fun;
-}
-
-
-Handle<Object> Factory::ToObject(Handle<Object> object) {
- CALL_HEAP_FUNCTION(isolate(), object->ToObject(), Object);
-}
-
-
-Handle<Object> Factory::ToObject(Handle<Object> object,
- Handle<Context> global_context) {
- CALL_HEAP_FUNCTION(isolate(), object->ToObject(*global_context), Object);
-}
-
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-Handle<DebugInfo> Factory::NewDebugInfo(Handle<SharedFunctionInfo> shared) {
- // Get the original code of the function.
- Handle<Code> code(shared->code());
-
- // Create a copy of the code before allocating the debug info object to avoid
- // allocation while setting up the debug info object.
- Handle<Code> original_code(*Factory::CopyCode(code));
-
- // Allocate initial fixed array for active break points before allocating the
- // debug info object to avoid allocation while setting up the debug info
- // object.
- Handle<FixedArray> break_points(
- NewFixedArray(Debug::kEstimatedNofBreakPointsInFunction));
-
- // Create and set up the debug info object. Debug info contains function, a
- // copy of the original code, the executing code and initial fixed array for
- // active break points.
- Handle<DebugInfo> debug_info =
- Handle<DebugInfo>::cast(NewStruct(DEBUG_INFO_TYPE));
- debug_info->set_shared(*shared);
- debug_info->set_original_code(*original_code);
- debug_info->set_code(*code);
- debug_info->set_break_points(*break_points);
-
- // Link debug info to function.
- shared->set_debug_info(*debug_info);
-
- return debug_info;
-}
-#endif
-
-
-Handle<JSObject> Factory::NewArgumentsObject(Handle<Object> callee,
- int length) {
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateArgumentsObject(*callee, length), JSObject);
-}
-
-
-Handle<JSFunction> Factory::CreateApiFunction(
- Handle<FunctionTemplateInfo> obj, ApiInstanceType instance_type) {
- Handle<Code> code = isolate()->builtins()->HandleApiCall();
- Handle<Code> construct_stub = isolate()->builtins()->JSConstructStubApi();
-
- int internal_field_count = 0;
- if (!obj->instance_template()->IsUndefined()) {
- Handle<ObjectTemplateInfo> instance_template =
- Handle<ObjectTemplateInfo>(
- ObjectTemplateInfo::cast(obj->instance_template()));
- internal_field_count =
- Smi::cast(instance_template->internal_field_count())->value();
- }
-
- int instance_size = kPointerSize * internal_field_count;
- InstanceType type = INVALID_TYPE;
- switch (instance_type) {
- case JavaScriptObject:
- type = JS_OBJECT_TYPE;
- instance_size += JSObject::kHeaderSize;
- break;
- case InnerGlobalObject:
- type = JS_GLOBAL_OBJECT_TYPE;
- instance_size += JSGlobalObject::kSize;
- break;
- case OuterGlobalObject:
- type = JS_GLOBAL_PROXY_TYPE;
- instance_size += JSGlobalProxy::kSize;
- break;
- default:
- break;
- }
- ASSERT(type != INVALID_TYPE);
-
- Handle<JSFunction> result =
- NewFunction(Factory::empty_symbol(),
- type,
- instance_size,
- code,
- true);
- // Set class name.
- Handle<Object> class_name = Handle<Object>(obj->class_name());
- if (class_name->IsString()) {
- result->shared()->set_instance_class_name(*class_name);
- result->shared()->set_name(*class_name);
- }
-
- Handle<Map> map = Handle<Map>(result->initial_map());
-
- // Mark as undetectable if needed.
- if (obj->undetectable()) {
- map->set_is_undetectable();
- }
-
- // Mark as hidden for the __proto__ accessor if needed.
- if (obj->hidden_prototype()) {
- map->set_is_hidden_prototype();
- }
-
- // Mark as needs_access_check if needed.
- if (obj->needs_access_check()) {
- map->set_is_access_check_needed(true);
- }
-
- // Set interceptor information in the map.
- if (!obj->named_property_handler()->IsUndefined()) {
- map->set_has_named_interceptor();
- }
- if (!obj->indexed_property_handler()->IsUndefined()) {
- map->set_has_indexed_interceptor();
- }
-
- // Set instance call-as-function information in the map.
- if (!obj->instance_call_handler()->IsUndefined()) {
- map->set_has_instance_call_handler();
- }
-
- result->shared()->set_function_data(*obj);
- result->shared()->set_construct_stub(*construct_stub);
- result->shared()->DontAdaptArguments();
-
- // Recursively copy parent templates' accessors, 'data' may be modified.
- Handle<DescriptorArray> array =
- Handle<DescriptorArray>(map->instance_descriptors());
- while (true) {
- Handle<Object> props = Handle<Object>(obj->property_accessors());
- if (!props->IsUndefined()) {
- array = CopyAppendCallbackDescriptors(array, props);
- }
- Handle<Object> parent = Handle<Object>(obj->parent_template());
- if (parent->IsUndefined()) break;
- obj = Handle<FunctionTemplateInfo>::cast(parent);
- }
- if (!array->IsEmpty()) {
- map->set_instance_descriptors(*array);
- }
-
- ASSERT(result->shared()->IsApiFunction());
- return result;
-}
-
-
-Handle<MapCache> Factory::NewMapCache(int at_least_space_for) {
- CALL_HEAP_FUNCTION(isolate(),
- MapCache::Allocate(at_least_space_for), MapCache);
-}
-
-
-MUST_USE_RESULT static MaybeObject* UpdateMapCacheWith(Context* context,
- FixedArray* keys,
- Map* map) {
- Object* result;
- { MaybeObject* maybe_result =
- MapCache::cast(context->map_cache())->Put(keys, map);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- context->set_map_cache(MapCache::cast(result));
- return result;
-}
-
-
-Handle<MapCache> Factory::AddToMapCache(Handle<Context> context,
- Handle<FixedArray> keys,
- Handle<Map> map) {
- CALL_HEAP_FUNCTION(isolate(),
- UpdateMapCacheWith(*context, *keys, *map), MapCache);
-}
-
-
-Handle<Map> Factory::ObjectLiteralMapFromCache(Handle<Context> context,
- Handle<FixedArray> keys) {
- if (context->map_cache()->IsUndefined()) {
- // Allocate the new map cache for the global context.
- Handle<MapCache> new_cache = NewMapCache(24);
- context->set_map_cache(*new_cache);
- }
- // Check to see whether there is a matching element in the cache.
- Handle<MapCache> cache =
- Handle<MapCache>(MapCache::cast(context->map_cache()));
- Handle<Object> result = Handle<Object>(cache->Lookup(*keys));
- if (result->IsMap()) return Handle<Map>::cast(result);
- // Create a new map and add it to the cache.
- Handle<Map> map =
- CopyMap(Handle<Map>(context->object_function()->initial_map()),
- keys->length());
- AddToMapCache(context, keys, map);
- return Handle<Map>(map);
-}
-
-
-void Factory::SetRegExpAtomData(Handle<JSRegExp> regexp,
- JSRegExp::Type type,
- Handle<String> source,
- JSRegExp::Flags flags,
- Handle<Object> data) {
- Handle<FixedArray> store = NewFixedArray(JSRegExp::kAtomDataSize);
-
- store->set(JSRegExp::kTagIndex, Smi::FromInt(type));
- store->set(JSRegExp::kSourceIndex, *source);
- store->set(JSRegExp::kFlagsIndex, Smi::FromInt(flags.value()));
- store->set(JSRegExp::kAtomPatternIndex, *data);
- regexp->set_data(*store);
-}
-
-void Factory::SetRegExpIrregexpData(Handle<JSRegExp> regexp,
- JSRegExp::Type type,
- Handle<String> source,
- JSRegExp::Flags flags,
- int capture_count) {
- Handle<FixedArray> store = NewFixedArray(JSRegExp::kIrregexpDataSize);
-
- store->set(JSRegExp::kTagIndex, Smi::FromInt(type));
- store->set(JSRegExp::kSourceIndex, *source);
- store->set(JSRegExp::kFlagsIndex, Smi::FromInt(flags.value()));
- store->set(JSRegExp::kIrregexpASCIICodeIndex, HEAP->the_hole_value());
- store->set(JSRegExp::kIrregexpUC16CodeIndex, HEAP->the_hole_value());
- store->set(JSRegExp::kIrregexpMaxRegisterCountIndex, Smi::FromInt(0));
- store->set(JSRegExp::kIrregexpCaptureCountIndex,
- Smi::FromInt(capture_count));
- regexp->set_data(*store);
-}
-
-
-
-void Factory::ConfigureInstance(Handle<FunctionTemplateInfo> desc,
- Handle<JSObject> instance,
- bool* pending_exception) {
- // Configure the instance by adding the properties specified by the
- // instance template.
- Handle<Object> instance_template = Handle<Object>(desc->instance_template());
- if (!instance_template->IsUndefined()) {
- Execution::ConfigureInstance(instance,
- instance_template,
- pending_exception);
- } else {
- *pending_exception = false;
- }
-}
-
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/factory.h b/src/3rdparty/v8/src/factory.h
deleted file mode 100644
index 71bfdc4..0000000
--- a/src/3rdparty/v8/src/factory.h
+++ /dev/null
@@ -1,436 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_FACTORY_H_
-#define V8_FACTORY_H_
-
-#include "globals.h"
-#include "handles.h"
-#include "heap.h"
-
-namespace v8 {
-namespace internal {
-
-// Interface for handle based allocation.
-
-class Factory {
- public:
- // Allocate a new fixed array with undefined entries.
- Handle<FixedArray> NewFixedArray(
- int size,
- PretenureFlag pretenure = NOT_TENURED);
-
- // Allocate a new fixed array with non-existing entries (the hole).
- Handle<FixedArray> NewFixedArrayWithHoles(
- int size,
- PretenureFlag pretenure = NOT_TENURED);
-
- Handle<NumberDictionary> NewNumberDictionary(int at_least_space_for);
-
- Handle<StringDictionary> NewStringDictionary(int at_least_space_for);
-
- Handle<DescriptorArray> NewDescriptorArray(int number_of_descriptors);
- Handle<DeoptimizationInputData> NewDeoptimizationInputData(
- int deopt_entry_count,
- PretenureFlag pretenure);
- Handle<DeoptimizationOutputData> NewDeoptimizationOutputData(
- int deopt_entry_count,
- PretenureFlag pretenure);
-
- Handle<String> LookupSymbol(Vector<const char> str);
- Handle<String> LookupAsciiSymbol(Vector<const char> str);
- Handle<String> LookupTwoByteSymbol(Vector<const uc16> str);
- Handle<String> LookupAsciiSymbol(const char* str) {
- return LookupSymbol(CStrVector(str));
- }
-
-
- // String creation functions. Most of the string creation functions take
- // a Heap::PretenureFlag argument to optionally request that they be
- // allocated in the old generation. The pretenure flag defaults to
- // DONT_TENURE.
- //
- // Creates a new String object. There are two String encodings: ASCII and
- // two byte. One should choose between the three string factory functions
- // based on the encoding of the string buffer that the string is
- // initialized from.
- // - ...FromAscii initializes the string from a buffer that is ASCII
- // encoded (it does not check that the buffer is ASCII encoded) and
- // the result will be ASCII encoded.
- // - ...FromUtf8 initializes the string from a buffer that is UTF-8
- // encoded. If the characters are all single-byte characters, the
- // result will be ASCII encoded, otherwise it will converted to two
- // byte.
- // - ...FromTwoByte initializes the string from a buffer that is two
- // byte encoded. If the characters are all single-byte characters,
- // the result will be converted to ASCII, otherwise it will be left as
- // two byte.
- //
- // ASCII strings are pretenured when used as keys in the SourceCodeCache.
- Handle<String> NewStringFromAscii(
- Vector<const char> str,
- PretenureFlag pretenure = NOT_TENURED);
-
- // UTF8 strings are pretenured when used for regexp literal patterns and
- // flags in the parser.
- Handle<String> NewStringFromUtf8(
- Vector<const char> str,
- PretenureFlag pretenure = NOT_TENURED);
-
- Handle<String> NewStringFromTwoByte(
- Vector<const uc16> str,
- PretenureFlag pretenure = NOT_TENURED);
-
- // Allocates and partially initializes an ASCII or TwoByte String. The
- // characters of the string are uninitialized. Currently used in regexp code
- // only, where they are pretenured.
- Handle<String> NewRawAsciiString(
- int length,
- PretenureFlag pretenure = NOT_TENURED);
- Handle<String> NewRawTwoByteString(
- int length,
- PretenureFlag pretenure = NOT_TENURED);
-
- // Create a new cons string object which consists of a pair of strings.
- Handle<String> NewConsString(Handle<String> first,
- Handle<String> second);
-
- // Create a new string object which holds a substring of a string.
- Handle<String> NewSubString(Handle<String> str,
- int begin,
- int end);
-
- // Creates a new external String object. There are two String encodings
- // in the system: ASCII and two byte. Unlike other String types, it does
- // not make sense to have a UTF-8 factory function for external strings,
- // because we cannot change the underlying buffer.
- Handle<String> NewExternalStringFromAscii(
- ExternalAsciiString::Resource* resource);
- Handle<String> NewExternalStringFromTwoByte(
- ExternalTwoByteString::Resource* resource);
-
- // Create a global (but otherwise uninitialized) context.
- Handle<Context> NewGlobalContext();
-
- // Create a function context.
- Handle<Context> NewFunctionContext(int length,
- Handle<JSFunction> closure);
-
- // Create a 'with' context.
- Handle<Context> NewWithContext(Handle<Context> previous,
- Handle<JSObject> extension,
- bool is_catch_context);
-
- // Return the Symbol matching the passed in string.
- Handle<String> SymbolFromString(Handle<String> value);
-
- // Allocate a new struct. The struct is pretenured (allocated directly in
- // the old generation).
- Handle<Struct> NewStruct(InstanceType type);
-
- Handle<AccessorInfo> NewAccessorInfo();
-
- Handle<Script> NewScript(Handle<String> source);
-
- // Proxies are pretenured when allocated by the bootstrapper.
- Handle<Proxy> NewProxy(Address addr,
- PretenureFlag pretenure = NOT_TENURED);
-
- // Allocate a new proxy. The proxy is pretenured (allocated directly in
- // the old generation).
- Handle<Proxy> NewProxy(const AccessorDescriptor* proxy);
-
- Handle<ByteArray> NewByteArray(int length,
- PretenureFlag pretenure = NOT_TENURED);
-
- Handle<ExternalArray> NewExternalArray(
- int length,
- ExternalArrayType array_type,
- void* external_pointer,
- PretenureFlag pretenure = NOT_TENURED);
-
- Handle<JSGlobalPropertyCell> NewJSGlobalPropertyCell(
- Handle<Object> value);
-
- Handle<Map> NewMap(InstanceType type, int instance_size);
-
- Handle<JSObject> NewFunctionPrototype(Handle<JSFunction> function);
-
- Handle<Map> CopyMapDropDescriptors(Handle<Map> map);
-
- // Copy the map adding more inobject properties if possible without
- // overflowing the instance size.
- Handle<Map> CopyMap(Handle<Map> map, int extra_inobject_props);
-
- Handle<Map> CopyMapDropTransitions(Handle<Map> map);
-
- Handle<Map> GetFastElementsMap(Handle<Map> map);
-
- Handle<Map> GetSlowElementsMap(Handle<Map> map);
-
- Handle<Map> GetExternalArrayElementsMap(Handle<Map> map,
- ExternalArrayType array_type,
- bool safe_to_add_transition);
-
- Handle<FixedArray> CopyFixedArray(Handle<FixedArray> array);
-
- // Numbers (eg, literals) are pretenured by the parser.
- Handle<Object> NewNumber(double value,
- PretenureFlag pretenure = NOT_TENURED);
-
- Handle<Object> NewNumberFromInt(int value);
- Handle<Object> NewNumberFromUint(uint32_t value);
-
- // These objects are used by the api to create env-independent data
- // structures in the heap.
- Handle<JSObject> NewNeanderObject();
-
- Handle<JSObject> NewArgumentsObject(Handle<Object> callee, int length);
-
- // JS objects are pretenured when allocated by the bootstrapper and
- // runtime.
- Handle<JSObject> NewJSObject(Handle<JSFunction> constructor,
- PretenureFlag pretenure = NOT_TENURED);
-
- // Global objects are pretenured.
- Handle<GlobalObject> NewGlobalObject(Handle<JSFunction> constructor);
-
- // JS objects are pretenured when allocated by the bootstrapper and
- // runtime.
- Handle<JSObject> NewJSObjectFromMap(Handle<Map> map);
-
- // JS arrays are pretenured when allocated by the parser.
- Handle<JSArray> NewJSArray(int capacity,
- PretenureFlag pretenure = NOT_TENURED);
-
- Handle<JSArray> NewJSArrayWithElements(
- Handle<FixedArray> elements,
- PretenureFlag pretenure = NOT_TENURED);
-
- Handle<JSFunction> NewFunction(Handle<String> name,
- Handle<Object> prototype);
-
- Handle<JSFunction> NewFunctionWithoutPrototype(
- Handle<String> name,
- StrictModeFlag strict_mode);
-
- Handle<JSFunction> NewFunction(Handle<Object> super, bool is_global);
-
- Handle<JSFunction> BaseNewFunctionFromSharedFunctionInfo(
- Handle<SharedFunctionInfo> function_info,
- Handle<Map> function_map,
- PretenureFlag pretenure);
-
- Handle<JSFunction> NewFunctionFromSharedFunctionInfo(
- Handle<SharedFunctionInfo> function_info,
- Handle<Context> context,
- PretenureFlag pretenure = TENURED);
-
- Handle<Code> NewCode(const CodeDesc& desc,
- Code::Flags flags,
- Handle<Object> self_reference,
- bool immovable = false);
-
- Handle<Code> CopyCode(Handle<Code> code);
-
- Handle<Code> CopyCode(Handle<Code> code, Vector<byte> reloc_info);
-
- Handle<Object> ToObject(Handle<Object> object);
- Handle<Object> ToObject(Handle<Object> object,
- Handle<Context> global_context);
-
- // Interface for creating error objects.
-
- Handle<Object> NewError(const char* maker, const char* type,
- Handle<JSArray> args);
- Handle<Object> NewError(const char* maker, const char* type,
- Vector< Handle<Object> > args);
- Handle<Object> NewError(const char* type,
- Vector< Handle<Object> > args);
- Handle<Object> NewError(Handle<String> message);
- Handle<Object> NewError(const char* constructor,
- Handle<String> message);
-
- Handle<Object> NewTypeError(const char* type,
- Vector< Handle<Object> > args);
- Handle<Object> NewTypeError(Handle<String> message);
-
- Handle<Object> NewRangeError(const char* type,
- Vector< Handle<Object> > args);
- Handle<Object> NewRangeError(Handle<String> message);
-
- Handle<Object> NewSyntaxError(const char* type, Handle<JSArray> args);
- Handle<Object> NewSyntaxError(Handle<String> message);
-
- Handle<Object> NewReferenceError(const char* type,
- Vector< Handle<Object> > args);
- Handle<Object> NewReferenceError(Handle<String> message);
-
- Handle<Object> NewEvalError(const char* type,
- Vector< Handle<Object> > args);
-
-
- Handle<JSFunction> NewFunction(Handle<String> name,
- InstanceType type,
- int instance_size,
- Handle<Code> code,
- bool force_initial_map);
-
- Handle<JSFunction> NewFunction(Handle<Map> function_map,
- Handle<SharedFunctionInfo> shared, Handle<Object> prototype);
-
-
- Handle<JSFunction> NewFunctionWithPrototype(Handle<String> name,
- InstanceType type,
- int instance_size,
- Handle<JSObject> prototype,
- Handle<Code> code,
- bool force_initial_map);
-
- Handle<JSFunction> NewFunctionWithoutPrototype(Handle<String> name,
- Handle<Code> code);
-
- Handle<DescriptorArray> CopyAppendProxyDescriptor(
- Handle<DescriptorArray> array,
- Handle<String> key,
- Handle<Object> value,
- PropertyAttributes attributes);
-
- Handle<String> NumberToString(Handle<Object> number);
-
- enum ApiInstanceType {
- JavaScriptObject,
- InnerGlobalObject,
- OuterGlobalObject
- };
-
- Handle<JSFunction> CreateApiFunction(
- Handle<FunctionTemplateInfo> data,
- ApiInstanceType type = JavaScriptObject);
-
- Handle<JSFunction> InstallMembers(Handle<JSFunction> function);
-
- // Installs interceptors on the instance. 'desc' is a function template,
- // and instance is an object instance created by the function of this
- // function template.
- void ConfigureInstance(Handle<FunctionTemplateInfo> desc,
- Handle<JSObject> instance,
- bool* pending_exception);
-
-#define ROOT_ACCESSOR(type, name, camel_name) \
- inline Handle<type> name() { \
- return Handle<type>(BitCast<type**>( \
- &isolate()->heap()->roots_[Heap::k##camel_name##RootIndex])); \
- }
- ROOT_LIST(ROOT_ACCESSOR)
-#undef ROOT_ACCESSOR_ACCESSOR
-
-#define SYMBOL_ACCESSOR(name, str) \
- inline Handle<String> name() { \
- return Handle<String>(BitCast<String**>( \
- &isolate()->heap()->roots_[Heap::k##name##RootIndex])); \
- }
- SYMBOL_LIST(SYMBOL_ACCESSOR)
-#undef SYMBOL_ACCESSOR
-
- Handle<String> hidden_symbol() {
- return Handle<String>(&isolate()->heap()->hidden_symbol_);
- }
-
- Handle<SharedFunctionInfo> NewSharedFunctionInfo(
- Handle<String> name,
- int number_of_literals,
- Handle<Code> code,
- Handle<SerializedScopeInfo> scope_info);
- Handle<SharedFunctionInfo> NewSharedFunctionInfo(Handle<String> name);
-
- Handle<JSMessageObject> NewJSMessageObject(
- Handle<String> type,
- Handle<JSArray> arguments,
- int start_position,
- int end_position,
- Handle<Object> script,
- Handle<Object> stack_trace,
- Handle<Object> stack_frames);
-
- Handle<NumberDictionary> DictionaryAtNumberPut(
- Handle<NumberDictionary>,
- uint32_t key,
- Handle<Object> value);
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- Handle<DebugInfo> NewDebugInfo(Handle<SharedFunctionInfo> shared);
-#endif
-
- // Return a map using the map cache in the global context.
- // The key the an ordered set of property names.
- Handle<Map> ObjectLiteralMapFromCache(Handle<Context> context,
- Handle<FixedArray> keys);
-
- // Creates a new FixedArray that holds the data associated with the
- // atom regexp and stores it in the regexp.
- void SetRegExpAtomData(Handle<JSRegExp> regexp,
- JSRegExp::Type type,
- Handle<String> source,
- JSRegExp::Flags flags,
- Handle<Object> match_pattern);
-
- // Creates a new FixedArray that holds the data associated with the
- // irregexp regexp and stores it in the regexp.
- void SetRegExpIrregexpData(Handle<JSRegExp> regexp,
- JSRegExp::Type type,
- Handle<String> source,
- JSRegExp::Flags flags,
- int capture_count);
-
- private:
- Isolate* isolate() { return reinterpret_cast<Isolate*>(this); }
-
- Handle<JSFunction> NewFunctionHelper(Handle<String> name,
- Handle<Object> prototype);
-
- Handle<JSFunction> NewFunctionWithoutPrototypeHelper(
- Handle<String> name,
- StrictModeFlag strict_mode);
-
- Handle<DescriptorArray> CopyAppendCallbackDescriptors(
- Handle<DescriptorArray> array,
- Handle<Object> descriptors);
-
- // Create a new map cache.
- Handle<MapCache> NewMapCache(int at_least_space_for);
-
- // Update the map cache in the global context with (keys, map)
- Handle<MapCache> AddToMapCache(Handle<Context> context,
- Handle<FixedArray> keys,
- Handle<Map> map);
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_FACTORY_H_
diff --git a/src/3rdparty/v8/src/fast-dtoa.cc b/src/3rdparty/v8/src/fast-dtoa.cc
deleted file mode 100644
index c7f6aa1..0000000
--- a/src/3rdparty/v8/src/fast-dtoa.cc
+++ /dev/null
@@ -1,736 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "fast-dtoa.h"
-
-#include "cached-powers.h"
-#include "diy-fp.h"
-#include "double.h"
-
-namespace v8 {
-namespace internal {
-
-// The minimal and maximal target exponent define the range of w's binary
-// exponent, where 'w' is the result of multiplying the input by a cached power
-// of ten.
-//
-// A different range might be chosen on a different platform, to optimize digit
-// generation, but a smaller range requires more powers of ten to be cached.
-static const int kMinimalTargetExponent = -60;
-static const int kMaximalTargetExponent = -32;
-
-
-// Adjusts the last digit of the generated number, and screens out generated
-// solutions that may be inaccurate. A solution may be inaccurate if it is
-// outside the safe interval, or if we ctannot prove that it is closer to the
-// input than a neighboring representation of the same length.
-//
-// Input: * buffer containing the digits of too_high / 10^kappa
-// * the buffer's length
-// * distance_too_high_w == (too_high - w).f() * unit
-// * unsafe_interval == (too_high - too_low).f() * unit
-// * rest = (too_high - buffer * 10^kappa).f() * unit
-// * ten_kappa = 10^kappa * unit
-// * unit = the common multiplier
-// Output: returns true if the buffer is guaranteed to contain the closest
-// representable number to the input.
-// Modifies the generated digits in the buffer to approach (round towards) w.
-static bool RoundWeed(Vector<char> buffer,
- int length,
- uint64_t distance_too_high_w,
- uint64_t unsafe_interval,
- uint64_t rest,
- uint64_t ten_kappa,
- uint64_t unit) {
- uint64_t small_distance = distance_too_high_w - unit;
- uint64_t big_distance = distance_too_high_w + unit;
- // Let w_low = too_high - big_distance, and
- // w_high = too_high - small_distance.
- // Note: w_low < w < w_high
- //
- // The real w (* unit) must lie somewhere inside the interval
- // ]w_low; w_high[ (often written as "(w_low; w_high)")
-
- // Basically the buffer currently contains a number in the unsafe interval
- // ]too_low; too_high[ with too_low < w < too_high
- //
- // too_high - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- // ^v 1 unit ^ ^ ^ ^
- // boundary_high --------------------- . . . .
- // ^v 1 unit . . . .
- // - - - - - - - - - - - - - - - - - - - + - - + - - - - - - . .
- // . . ^ . .
- // . big_distance . . .
- // . . . . rest
- // small_distance . . . .
- // v . . . .
- // w_high - - - - - - - - - - - - - - - - - - . . . .
- // ^v 1 unit . . . .
- // w ---------------------------------------- . . . .
- // ^v 1 unit v . . .
- // w_low - - - - - - - - - - - - - - - - - - - - - . . .
- // . . v
- // buffer --------------------------------------------------+-------+--------
- // . .
- // safe_interval .
- // v .
- // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - .
- // ^v 1 unit .
- // boundary_low ------------------------- unsafe_interval
- // ^v 1 unit v
- // too_low - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- //
- //
- // Note that the value of buffer could lie anywhere inside the range too_low
- // to too_high.
- //
- // boundary_low, boundary_high and w are approximations of the real boundaries
- // and v (the input number). They are guaranteed to be precise up to one unit.
- // In fact the error is guaranteed to be strictly less than one unit.
- //
- // Anything that lies outside the unsafe interval is guaranteed not to round
- // to v when read again.
- // Anything that lies inside the safe interval is guaranteed to round to v
- // when read again.
- // If the number inside the buffer lies inside the unsafe interval but not
- // inside the safe interval then we simply do not know and bail out (returning
- // false).
- //
- // Similarly we have to take into account the imprecision of 'w' when finding
- // the closest representation of 'w'. If we have two potential
- // representations, and one is closer to both w_low and w_high, then we know
- // it is closer to the actual value v.
- //
- // By generating the digits of too_high we got the largest (closest to
- // too_high) buffer that is still in the unsafe interval. In the case where
- // w_high < buffer < too_high we try to decrement the buffer.
- // This way the buffer approaches (rounds towards) w.
- // There are 3 conditions that stop the decrementation process:
- // 1) the buffer is already below w_high
- // 2) decrementing the buffer would make it leave the unsafe interval
- // 3) decrementing the buffer would yield a number below w_high and farther
- // away than the current number. In other words:
- // (buffer{-1} < w_high) && w_high - buffer{-1} > buffer - w_high
- // Instead of using the buffer directly we use its distance to too_high.
- // Conceptually rest ~= too_high - buffer
- // We need to do the following tests in this order to avoid over- and
- // underflows.
- ASSERT(rest <= unsafe_interval);
- while (rest < small_distance && // Negated condition 1
- unsafe_interval - rest >= ten_kappa && // Negated condition 2
- (rest + ten_kappa < small_distance || // buffer{-1} > w_high
- small_distance - rest >= rest + ten_kappa - small_distance)) {
- buffer[length - 1]--;
- rest += ten_kappa;
- }
-
- // We have approached w+ as much as possible. We now test if approaching w-
- // would require changing the buffer. If yes, then we have two possible
- // representations close to w, but we cannot decide which one is closer.
- if (rest < big_distance &&
- unsafe_interval - rest >= ten_kappa &&
- (rest + ten_kappa < big_distance ||
- big_distance - rest > rest + ten_kappa - big_distance)) {
- return false;
- }
-
- // Weeding test.
- // The safe interval is [too_low + 2 ulp; too_high - 2 ulp]
- // Since too_low = too_high - unsafe_interval this is equivalent to
- // [too_high - unsafe_interval + 4 ulp; too_high - 2 ulp]
- // Conceptually we have: rest ~= too_high - buffer
- return (2 * unit <= rest) && (rest <= unsafe_interval - 4 * unit);
-}
-
-
-// Rounds the buffer upwards if the result is closer to v by possibly adding
-// 1 to the buffer. If the precision of the calculation is not sufficient to
-// round correctly, return false.
-// The rounding might shift the whole buffer in which case the kappa is
-// adjusted. For example "99", kappa = 3 might become "10", kappa = 4.
-//
-// If 2*rest > ten_kappa then the buffer needs to be round up.
-// rest can have an error of +/- 1 unit. This function accounts for the
-// imprecision and returns false, if the rounding direction cannot be
-// unambiguously determined.
-//
-// Precondition: rest < ten_kappa.
-static bool RoundWeedCounted(Vector<char> buffer,
- int length,
- uint64_t rest,
- uint64_t ten_kappa,
- uint64_t unit,
- int* kappa) {
- ASSERT(rest < ten_kappa);
- // The following tests are done in a specific order to avoid overflows. They
- // will work correctly with any uint64 values of rest < ten_kappa and unit.
- //
- // If the unit is too big, then we don't know which way to round. For example
- // a unit of 50 means that the real number lies within rest +/- 50. If
- // 10^kappa == 40 then there is no way to tell which way to round.
- if (unit >= ten_kappa) return false;
- // Even if unit is just half the size of 10^kappa we are already completely
- // lost. (And after the previous test we know that the expression will not
- // over/underflow.)
- if (ten_kappa - unit <= unit) return false;
- // If 2 * (rest + unit) <= 10^kappa we can safely round down.
- if ((ten_kappa - rest > rest) && (ten_kappa - 2 * rest >= 2 * unit)) {
- return true;
- }
- // If 2 * (rest - unit) >= 10^kappa, then we can safely round up.
- if ((rest > unit) && (ten_kappa - (rest - unit) <= (rest - unit))) {
- // Increment the last digit recursively until we find a non '9' digit.
- buffer[length - 1]++;
- for (int i = length - 1; i > 0; --i) {
- if (buffer[i] != '0' + 10) break;
- buffer[i] = '0';
- buffer[i - 1]++;
- }
- // If the first digit is now '0'+ 10 we had a buffer with all '9's. With the
- // exception of the first digit all digits are now '0'. Simply switch the
- // first digit to '1' and adjust the kappa. Example: "99" becomes "10" and
- // the power (the kappa) is increased.
- if (buffer[0] == '0' + 10) {
- buffer[0] = '1';
- (*kappa) += 1;
- }
- return true;
- }
- return false;
-}
-
-
-static const uint32_t kTen4 = 10000;
-static const uint32_t kTen5 = 100000;
-static const uint32_t kTen6 = 1000000;
-static const uint32_t kTen7 = 10000000;
-static const uint32_t kTen8 = 100000000;
-static const uint32_t kTen9 = 1000000000;
-
-// Returns the biggest power of ten that is less than or equal than the given
-// number. We furthermore receive the maximum number of bits 'number' has.
-// If number_bits == 0 then 0^-1 is returned
-// The number of bits must be <= 32.
-// Precondition: number < (1 << (number_bits + 1)).
-static void BiggestPowerTen(uint32_t number,
- int number_bits,
- uint32_t* power,
- int* exponent) {
- switch (number_bits) {
- case 32:
- case 31:
- case 30:
- if (kTen9 <= number) {
- *power = kTen9;
- *exponent = 9;
- break;
- } // else fallthrough
- case 29:
- case 28:
- case 27:
- if (kTen8 <= number) {
- *power = kTen8;
- *exponent = 8;
- break;
- } // else fallthrough
- case 26:
- case 25:
- case 24:
- if (kTen7 <= number) {
- *power = kTen7;
- *exponent = 7;
- break;
- } // else fallthrough
- case 23:
- case 22:
- case 21:
- case 20:
- if (kTen6 <= number) {
- *power = kTen6;
- *exponent = 6;
- break;
- } // else fallthrough
- case 19:
- case 18:
- case 17:
- if (kTen5 <= number) {
- *power = kTen5;
- *exponent = 5;
- break;
- } // else fallthrough
- case 16:
- case 15:
- case 14:
- if (kTen4 <= number) {
- *power = kTen4;
- *exponent = 4;
- break;
- } // else fallthrough
- case 13:
- case 12:
- case 11:
- case 10:
- if (1000 <= number) {
- *power = 1000;
- *exponent = 3;
- break;
- } // else fallthrough
- case 9:
- case 8:
- case 7:
- if (100 <= number) {
- *power = 100;
- *exponent = 2;
- break;
- } // else fallthrough
- case 6:
- case 5:
- case 4:
- if (10 <= number) {
- *power = 10;
- *exponent = 1;
- break;
- } // else fallthrough
- case 3:
- case 2:
- case 1:
- if (1 <= number) {
- *power = 1;
- *exponent = 0;
- break;
- } // else fallthrough
- case 0:
- *power = 0;
- *exponent = -1;
- break;
- default:
- // Following assignments are here to silence compiler warnings.
- *power = 0;
- *exponent = 0;
- UNREACHABLE();
- }
-}
-
-
-// Generates the digits of input number w.
-// w is a floating-point number (DiyFp), consisting of a significand and an
-// exponent. Its exponent is bounded by kMinimalTargetExponent and
-// kMaximalTargetExponent.
-// Hence -60 <= w.e() <= -32.
-//
-// Returns false if it fails, in which case the generated digits in the buffer
-// should not be used.
-// Preconditions:
-// * low, w and high are correct up to 1 ulp (unit in the last place). That
-// is, their error must be less than a unit of their last digits.
-// * low.e() == w.e() == high.e()
-// * low < w < high, and taking into account their error: low~ <= high~
-// * kMinimalTargetExponent <= w.e() <= kMaximalTargetExponent
-// Postconditions: returns false if procedure fails.
-// otherwise:
-// * buffer is not null-terminated, but len contains the number of digits.
-// * buffer contains the shortest possible decimal digit-sequence
-// such that LOW < buffer * 10^kappa < HIGH, where LOW and HIGH are the
-// correct values of low and high (without their error).
-// * if more than one decimal representation gives the minimal number of
-// decimal digits then the one closest to W (where W is the correct value
-// of w) is chosen.
-// Remark: this procedure takes into account the imprecision of its input
-// numbers. If the precision is not enough to guarantee all the postconditions
-// then false is returned. This usually happens rarely (~0.5%).
-//
-// Say, for the sake of example, that
-// w.e() == -48, and w.f() == 0x1234567890abcdef
-// w's value can be computed by w.f() * 2^w.e()
-// We can obtain w's integral digits by simply shifting w.f() by -w.e().
-// -> w's integral part is 0x1234
-// w's fractional part is therefore 0x567890abcdef.
-// Printing w's integral part is easy (simply print 0x1234 in decimal).
-// In order to print its fraction we repeatedly multiply the fraction by 10 and
-// get each digit. Example the first digit after the point would be computed by
-// (0x567890abcdef * 10) >> 48. -> 3
-// The whole thing becomes slightly more complicated because we want to stop
-// once we have enough digits. That is, once the digits inside the buffer
-// represent 'w' we can stop. Everything inside the interval low - high
-// represents w. However we have to pay attention to low, high and w's
-// imprecision.
-static bool DigitGen(DiyFp low,
- DiyFp w,
- DiyFp high,
- Vector<char> buffer,
- int* length,
- int* kappa) {
- ASSERT(low.e() == w.e() && w.e() == high.e());
- ASSERT(low.f() + 1 <= high.f() - 1);
- ASSERT(kMinimalTargetExponent <= w.e() && w.e() <= kMaximalTargetExponent);
- // low, w and high are imprecise, but by less than one ulp (unit in the last
- // place).
- // If we remove (resp. add) 1 ulp from low (resp. high) we are certain that
- // the new numbers are outside of the interval we want the final
- // representation to lie in.
- // Inversely adding (resp. removing) 1 ulp from low (resp. high) would yield
- // numbers that are certain to lie in the interval. We will use this fact
- // later on.
- // We will now start by generating the digits within the uncertain
- // interval. Later we will weed out representations that lie outside the safe
- // interval and thus _might_ lie outside the correct interval.
- uint64_t unit = 1;
- DiyFp too_low = DiyFp(low.f() - unit, low.e());
- DiyFp too_high = DiyFp(high.f() + unit, high.e());
- // too_low and too_high are guaranteed to lie outside the interval we want the
- // generated number in.
- DiyFp unsafe_interval = DiyFp::Minus(too_high, too_low);
- // We now cut the input number into two parts: the integral digits and the
- // fractionals. We will not write any decimal separator though, but adapt
- // kappa instead.
- // Reminder: we are currently computing the digits (stored inside the buffer)
- // such that: too_low < buffer * 10^kappa < too_high
- // We use too_high for the digit_generation and stop as soon as possible.
- // If we stop early we effectively round down.
- DiyFp one = DiyFp(static_cast<uint64_t>(1) << -w.e(), w.e());
- // Division by one is a shift.
- uint32_t integrals = static_cast<uint32_t>(too_high.f() >> -one.e());
- // Modulo by one is an and.
- uint64_t fractionals = too_high.f() & (one.f() - 1);
- uint32_t divisor;
- int divisor_exponent;
- BiggestPowerTen(integrals, DiyFp::kSignificandSize - (-one.e()),
- &divisor, &divisor_exponent);
- *kappa = divisor_exponent + 1;
- *length = 0;
- // Loop invariant: buffer = too_high / 10^kappa (integer division)
- // The invariant holds for the first iteration: kappa has been initialized
- // with the divisor exponent + 1. And the divisor is the biggest power of ten
- // that is smaller than integrals.
- while (*kappa > 0) {
- int digit = integrals / divisor;
- buffer[*length] = '0' + digit;
- (*length)++;
- integrals %= divisor;
- (*kappa)--;
- // Note that kappa now equals the exponent of the divisor and that the
- // invariant thus holds again.
- uint64_t rest =
- (static_cast<uint64_t>(integrals) << -one.e()) + fractionals;
- // Invariant: too_high = buffer * 10^kappa + DiyFp(rest, one.e())
- // Reminder: unsafe_interval.e() == one.e()
- if (rest < unsafe_interval.f()) {
- // Rounding down (by not emitting the remaining digits) yields a number
- // that lies within the unsafe interval.
- return RoundWeed(buffer, *length, DiyFp::Minus(too_high, w).f(),
- unsafe_interval.f(), rest,
- static_cast<uint64_t>(divisor) << -one.e(), unit);
- }
- divisor /= 10;
- }
-
- // The integrals have been generated. We are at the point of the decimal
- // separator. In the following loop we simply multiply the remaining digits by
- // 10 and divide by one. We just need to pay attention to multiply associated
- // data (like the interval or 'unit'), too.
- // Note that the multiplication by 10 does not overflow, because w.e >= -60
- // and thus one.e >= -60.
- ASSERT(one.e() >= -60);
- ASSERT(fractionals < one.f());
- ASSERT(V8_2PART_UINT64_C(0xFFFFFFFF, FFFFFFFF) / 10 >= one.f());
- while (true) {
- fractionals *= 10;
- unit *= 10;
- unsafe_interval.set_f(unsafe_interval.f() * 10);
- // Integer division by one.
- int digit = static_cast<int>(fractionals >> -one.e());
- buffer[*length] = '0' + digit;
- (*length)++;
- fractionals &= one.f() - 1; // Modulo by one.
- (*kappa)--;
- if (fractionals < unsafe_interval.f()) {
- return RoundWeed(buffer, *length, DiyFp::Minus(too_high, w).f() * unit,
- unsafe_interval.f(), fractionals, one.f(), unit);
- }
- }
-}
-
-
-
-// Generates (at most) requested_digits of input number w.
-// w is a floating-point number (DiyFp), consisting of a significand and an
-// exponent. Its exponent is bounded by kMinimalTargetExponent and
-// kMaximalTargetExponent.
-// Hence -60 <= w.e() <= -32.
-//
-// Returns false if it fails, in which case the generated digits in the buffer
-// should not be used.
-// Preconditions:
-// * w is correct up to 1 ulp (unit in the last place). That
-// is, its error must be strictly less than a unit of its last digit.
-// * kMinimalTargetExponent <= w.e() <= kMaximalTargetExponent
-//
-// Postconditions: returns false if procedure fails.
-// otherwise:
-// * buffer is not null-terminated, but length contains the number of
-// digits.
-// * the representation in buffer is the most precise representation of
-// requested_digits digits.
-// * buffer contains at most requested_digits digits of w. If there are less
-// than requested_digits digits then some trailing '0's have been removed.
-// * kappa is such that
-// w = buffer * 10^kappa + eps with |eps| < 10^kappa / 2.
-//
-// Remark: This procedure takes into account the imprecision of its input
-// numbers. If the precision is not enough to guarantee all the postconditions
-// then false is returned. This usually happens rarely, but the failure-rate
-// increases with higher requested_digits.
-static bool DigitGenCounted(DiyFp w,
- int requested_digits,
- Vector<char> buffer,
- int* length,
- int* kappa) {
- ASSERT(kMinimalTargetExponent <= w.e() && w.e() <= kMaximalTargetExponent);
- ASSERT(kMinimalTargetExponent >= -60);
- ASSERT(kMaximalTargetExponent <= -32);
- // w is assumed to have an error less than 1 unit. Whenever w is scaled we
- // also scale its error.
- uint64_t w_error = 1;
- // We cut the input number into two parts: the integral digits and the
- // fractional digits. We don't emit any decimal separator, but adapt kappa
- // instead. Example: instead of writing "1.2" we put "12" into the buffer and
- // increase kappa by 1.
- DiyFp one = DiyFp(static_cast<uint64_t>(1) << -w.e(), w.e());
- // Division by one is a shift.
- uint32_t integrals = static_cast<uint32_t>(w.f() >> -one.e());
- // Modulo by one is an and.
- uint64_t fractionals = w.f() & (one.f() - 1);
- uint32_t divisor;
- int divisor_exponent;
- BiggestPowerTen(integrals, DiyFp::kSignificandSize - (-one.e()),
- &divisor, &divisor_exponent);
- *kappa = divisor_exponent + 1;
- *length = 0;
-
- // Loop invariant: buffer = w / 10^kappa (integer division)
- // The invariant holds for the first iteration: kappa has been initialized
- // with the divisor exponent + 1. And the divisor is the biggest power of ten
- // that is smaller than 'integrals'.
- while (*kappa > 0) {
- int digit = integrals / divisor;
- buffer[*length] = '0' + digit;
- (*length)++;
- requested_digits--;
- integrals %= divisor;
- (*kappa)--;
- // Note that kappa now equals the exponent of the divisor and that the
- // invariant thus holds again.
- if (requested_digits == 0) break;
- divisor /= 10;
- }
-
- if (requested_digits == 0) {
- uint64_t rest =
- (static_cast<uint64_t>(integrals) << -one.e()) + fractionals;
- return RoundWeedCounted(buffer, *length, rest,
- static_cast<uint64_t>(divisor) << -one.e(), w_error,
- kappa);
- }
-
- // The integrals have been generated. We are at the point of the decimal
- // separator. In the following loop we simply multiply the remaining digits by
- // 10 and divide by one. We just need to pay attention to multiply associated
- // data (the 'unit'), too.
- // Note that the multiplication by 10 does not overflow, because w.e >= -60
- // and thus one.e >= -60.
- ASSERT(one.e() >= -60);
- ASSERT(fractionals < one.f());
- ASSERT(V8_2PART_UINT64_C(0xFFFFFFFF, FFFFFFFF) / 10 >= one.f());
- while (requested_digits > 0 && fractionals > w_error) {
- fractionals *= 10;
- w_error *= 10;
- // Integer division by one.
- int digit = static_cast<int>(fractionals >> -one.e());
- buffer[*length] = '0' + digit;
- (*length)++;
- requested_digits--;
- fractionals &= one.f() - 1; // Modulo by one.
- (*kappa)--;
- }
- if (requested_digits != 0) return false;
- return RoundWeedCounted(buffer, *length, fractionals, one.f(), w_error,
- kappa);
-}
-
-
-// Provides a decimal representation of v.
-// Returns true if it succeeds, otherwise the result cannot be trusted.
-// There will be *length digits inside the buffer (not null-terminated).
-// If the function returns true then
-// v == (double) (buffer * 10^decimal_exponent).
-// The digits in the buffer are the shortest representation possible: no
-// 0.09999999999999999 instead of 0.1. The shorter representation will even be
-// chosen even if the longer one would be closer to v.
-// The last digit will be closest to the actual v. That is, even if several
-// digits might correctly yield 'v' when read again, the closest will be
-// computed.
-static bool Grisu3(double v,
- Vector<char> buffer,
- int* length,
- int* decimal_exponent) {
- DiyFp w = Double(v).AsNormalizedDiyFp();
- // boundary_minus and boundary_plus are the boundaries between v and its
- // closest floating-point neighbors. Any number strictly between
- // boundary_minus and boundary_plus will round to v when convert to a double.
- // Grisu3 will never output representations that lie exactly on a boundary.
- DiyFp boundary_minus, boundary_plus;
- Double(v).NormalizedBoundaries(&boundary_minus, &boundary_plus);
- ASSERT(boundary_plus.e() == w.e());
- DiyFp ten_mk; // Cached power of ten: 10^-k
- int mk; // -k
- int ten_mk_minimal_binary_exponent =
- kMinimalTargetExponent - (w.e() + DiyFp::kSignificandSize);
- int ten_mk_maximal_binary_exponent =
- kMaximalTargetExponent - (w.e() + DiyFp::kSignificandSize);
- PowersOfTenCache::GetCachedPowerForBinaryExponentRange(
- ten_mk_minimal_binary_exponent,
- ten_mk_maximal_binary_exponent,
- &ten_mk, &mk);
- ASSERT((kMinimalTargetExponent <= w.e() + ten_mk.e() +
- DiyFp::kSignificandSize) &&
- (kMaximalTargetExponent >= w.e() + ten_mk.e() +
- DiyFp::kSignificandSize));
- // Note that ten_mk is only an approximation of 10^-k. A DiyFp only contains a
- // 64 bit significand and ten_mk is thus only precise up to 64 bits.
-
- // The DiyFp::Times procedure rounds its result, and ten_mk is approximated
- // too. The variable scaled_w (as well as scaled_boundary_minus/plus) are now
- // off by a small amount.
- // In fact: scaled_w - w*10^k < 1ulp (unit in the last place) of scaled_w.
- // In other words: let f = scaled_w.f() and e = scaled_w.e(), then
- // (f-1) * 2^e < w*10^k < (f+1) * 2^e
- DiyFp scaled_w = DiyFp::Times(w, ten_mk);
- ASSERT(scaled_w.e() ==
- boundary_plus.e() + ten_mk.e() + DiyFp::kSignificandSize);
- // In theory it would be possible to avoid some recomputations by computing
- // the difference between w and boundary_minus/plus (a power of 2) and to
- // compute scaled_boundary_minus/plus by subtracting/adding from
- // scaled_w. However the code becomes much less readable and the speed
- // enhancements are not terriffic.
- DiyFp scaled_boundary_minus = DiyFp::Times(boundary_minus, ten_mk);
- DiyFp scaled_boundary_plus = DiyFp::Times(boundary_plus, ten_mk);
-
- // DigitGen will generate the digits of scaled_w. Therefore we have
- // v == (double) (scaled_w * 10^-mk).
- // Set decimal_exponent == -mk and pass it to DigitGen. If scaled_w is not an
- // integer than it will be updated. For instance if scaled_w == 1.23 then
- // the buffer will be filled with "123" und the decimal_exponent will be
- // decreased by 2.
- int kappa;
- bool result = DigitGen(scaled_boundary_minus, scaled_w, scaled_boundary_plus,
- buffer, length, &kappa);
- *decimal_exponent = -mk + kappa;
- return result;
-}
-
-
-// The "counted" version of grisu3 (see above) only generates requested_digits
-// number of digits. This version does not generate the shortest representation,
-// and with enough requested digits 0.1 will at some point print as 0.9999999...
-// Grisu3 is too imprecise for real halfway cases (1.5 will not work) and
-// therefore the rounding strategy for halfway cases is irrelevant.
-static bool Grisu3Counted(double v,
- int requested_digits,
- Vector<char> buffer,
- int* length,
- int* decimal_exponent) {
- DiyFp w = Double(v).AsNormalizedDiyFp();
- DiyFp ten_mk; // Cached power of ten: 10^-k
- int mk; // -k
- int ten_mk_minimal_binary_exponent =
- kMinimalTargetExponent - (w.e() + DiyFp::kSignificandSize);
- int ten_mk_maximal_binary_exponent =
- kMaximalTargetExponent - (w.e() + DiyFp::kSignificandSize);
- PowersOfTenCache::GetCachedPowerForBinaryExponentRange(
- ten_mk_minimal_binary_exponent,
- ten_mk_maximal_binary_exponent,
- &ten_mk, &mk);
- ASSERT((kMinimalTargetExponent <= w.e() + ten_mk.e() +
- DiyFp::kSignificandSize) &&
- (kMaximalTargetExponent >= w.e() + ten_mk.e() +
- DiyFp::kSignificandSize));
- // Note that ten_mk is only an approximation of 10^-k. A DiyFp only contains a
- // 64 bit significand and ten_mk is thus only precise up to 64 bits.
-
- // The DiyFp::Times procedure rounds its result, and ten_mk is approximated
- // too. The variable scaled_w (as well as scaled_boundary_minus/plus) are now
- // off by a small amount.
- // In fact: scaled_w - w*10^k < 1ulp (unit in the last place) of scaled_w.
- // In other words: let f = scaled_w.f() and e = scaled_w.e(), then
- // (f-1) * 2^e < w*10^k < (f+1) * 2^e
- DiyFp scaled_w = DiyFp::Times(w, ten_mk);
-
- // We now have (double) (scaled_w * 10^-mk).
- // DigitGen will generate the first requested_digits digits of scaled_w and
- // return together with a kappa such that scaled_w ~= buffer * 10^kappa. (It
- // will not always be exactly the same since DigitGenCounted only produces a
- // limited number of digits.)
- int kappa;
- bool result = DigitGenCounted(scaled_w, requested_digits,
- buffer, length, &kappa);
- *decimal_exponent = -mk + kappa;
- return result;
-}
-
-
-bool FastDtoa(double v,
- FastDtoaMode mode,
- int requested_digits,
- Vector<char> buffer,
- int* length,
- int* decimal_point) {
- ASSERT(v > 0);
- ASSERT(!Double(v).IsSpecial());
-
- bool result = false;
- int decimal_exponent = 0;
- switch (mode) {
- case FAST_DTOA_SHORTEST:
- result = Grisu3(v, buffer, length, &decimal_exponent);
- break;
- case FAST_DTOA_PRECISION:
- result = Grisu3Counted(v, requested_digits,
- buffer, length, &decimal_exponent);
- break;
- default:
- UNREACHABLE();
- }
- if (result) {
- *decimal_point = *length + decimal_exponent;
- buffer[*length] = '\0';
- }
- return result;
-}
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/fast-dtoa.h b/src/3rdparty/v8/src/fast-dtoa.h
deleted file mode 100644
index 94c22ec..0000000
--- a/src/3rdparty/v8/src/fast-dtoa.h
+++ /dev/null
@@ -1,83 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_FAST_DTOA_H_
-#define V8_FAST_DTOA_H_
-
-namespace v8 {
-namespace internal {
-
-enum FastDtoaMode {
- // Computes the shortest representation of the given input. The returned
- // result will be the most accurate number of this length. Longer
- // representations might be more accurate.
- FAST_DTOA_SHORTEST,
- // Computes a representation where the precision (number of digits) is
- // given as input. The precision is independent of the decimal point.
- FAST_DTOA_PRECISION
-};
-
-// FastDtoa will produce at most kFastDtoaMaximalLength digits. This does not
-// include the terminating '\0' character.
-static const int kFastDtoaMaximalLength = 17;
-
-// Provides a decimal representation of v.
-// The result should be interpreted as buffer * 10^(point - length).
-//
-// Precondition:
-// * v must be a strictly positive finite double.
-//
-// Returns true if it succeeds, otherwise the result can not be trusted.
-// There will be *length digits inside the buffer followed by a null terminator.
-// If the function returns true and mode equals
-// - FAST_DTOA_SHORTEST, then
-// the parameter requested_digits is ignored.
-// The result satisfies
-// v == (double) (buffer * 10^(point - length)).
-// The digits in the buffer are the shortest representation possible. E.g.
-// if 0.099999999999 and 0.1 represent the same double then "1" is returned
-// with point = 0.
-// The last digit will be closest to the actual v. That is, even if several
-// digits might correctly yield 'v' when read again, the buffer will contain
-// the one closest to v.
-// - FAST_DTOA_PRECISION, then
-// the buffer contains requested_digits digits.
-// the difference v - (buffer * 10^(point-length)) is closest to zero for
-// all possible representations of requested_digits digits.
-// If there are two values that are equally close, then FastDtoa returns
-// false.
-// For both modes the buffer must be large enough to hold the result.
-bool FastDtoa(double d,
- FastDtoaMode mode,
- int requested_digits,
- Vector<char> buffer,
- int* length,
- int* decimal_point);
-
-} } // namespace v8::internal
-
-#endif // V8_FAST_DTOA_H_
diff --git a/src/3rdparty/v8/src/fixed-dtoa.cc b/src/3rdparty/v8/src/fixed-dtoa.cc
deleted file mode 100644
index 8ad88f6..0000000
--- a/src/3rdparty/v8/src/fixed-dtoa.cc
+++ /dev/null
@@ -1,405 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include <math.h>
-
-#include "v8.h"
-
-#include "double.h"
-#include "fixed-dtoa.h"
-
-namespace v8 {
-namespace internal {
-
-// Represents a 128bit type. This class should be replaced by a native type on
-// platforms that support 128bit integers.
-class UInt128 {
- public:
- UInt128() : high_bits_(0), low_bits_(0) { }
- UInt128(uint64_t high, uint64_t low) : high_bits_(high), low_bits_(low) { }
-
- void Multiply(uint32_t multiplicand) {
- uint64_t accumulator;
-
- accumulator = (low_bits_ & kMask32) * multiplicand;
- uint32_t part = static_cast<uint32_t>(accumulator & kMask32);
- accumulator >>= 32;
- accumulator = accumulator + (low_bits_ >> 32) * multiplicand;
- low_bits_ = (accumulator << 32) + part;
- accumulator >>= 32;
- accumulator = accumulator + (high_bits_ & kMask32) * multiplicand;
- part = static_cast<uint32_t>(accumulator & kMask32);
- accumulator >>= 32;
- accumulator = accumulator + (high_bits_ >> 32) * multiplicand;
- high_bits_ = (accumulator << 32) + part;
- ASSERT((accumulator >> 32) == 0);
- }
-
- void Shift(int shift_amount) {
- ASSERT(-64 <= shift_amount && shift_amount <= 64);
- if (shift_amount == 0) {
- return;
- } else if (shift_amount == -64) {
- high_bits_ = low_bits_;
- low_bits_ = 0;
- } else if (shift_amount == 64) {
- low_bits_ = high_bits_;
- high_bits_ = 0;
- } else if (shift_amount <= 0) {
- high_bits_ <<= -shift_amount;
- high_bits_ += low_bits_ >> (64 + shift_amount);
- low_bits_ <<= -shift_amount;
- } else {
- low_bits_ >>= shift_amount;
- low_bits_ += high_bits_ << (64 - shift_amount);
- high_bits_ >>= shift_amount;
- }
- }
-
- // Modifies *this to *this MOD (2^power).
- // Returns *this DIV (2^power).
- int DivModPowerOf2(int power) {
- if (power >= 64) {
- int result = static_cast<int>(high_bits_ >> (power - 64));
- high_bits_ -= static_cast<uint64_t>(result) << (power - 64);
- return result;
- } else {
- uint64_t part_low = low_bits_ >> power;
- uint64_t part_high = high_bits_ << (64 - power);
- int result = static_cast<int>(part_low + part_high);
- high_bits_ = 0;
- low_bits_ -= part_low << power;
- return result;
- }
- }
-
- bool IsZero() const {
- return high_bits_ == 0 && low_bits_ == 0;
- }
-
- int BitAt(int position) {
- if (position >= 64) {
- return static_cast<int>(high_bits_ >> (position - 64)) & 1;
- } else {
- return static_cast<int>(low_bits_ >> position) & 1;
- }
- }
-
- private:
- static const uint64_t kMask32 = 0xFFFFFFFF;
- // Value == (high_bits_ << 64) + low_bits_
- uint64_t high_bits_;
- uint64_t low_bits_;
-};
-
-
-static const int kDoubleSignificandSize = 53; // Includes the hidden bit.
-
-
-static void FillDigits32FixedLength(uint32_t number, int requested_length,
- Vector<char> buffer, int* length) {
- for (int i = requested_length - 1; i >= 0; --i) {
- buffer[(*length) + i] = '0' + number % 10;
- number /= 10;
- }
- *length += requested_length;
-}
-
-
-static void FillDigits32(uint32_t number, Vector<char> buffer, int* length) {
- int number_length = 0;
- // We fill the digits in reverse order and exchange them afterwards.
- while (number != 0) {
- int digit = number % 10;
- number /= 10;
- buffer[(*length) + number_length] = '0' + digit;
- number_length++;
- }
- // Exchange the digits.
- int i = *length;
- int j = *length + number_length - 1;
- while (i < j) {
- char tmp = buffer[i];
- buffer[i] = buffer[j];
- buffer[j] = tmp;
- i++;
- j--;
- }
- *length += number_length;
-}
-
-
-static void FillDigits64FixedLength(uint64_t number, int requested_length,
- Vector<char> buffer, int* length) {
- const uint32_t kTen7 = 10000000;
- // For efficiency cut the number into 3 uint32_t parts, and print those.
- uint32_t part2 = static_cast<uint32_t>(number % kTen7);
- number /= kTen7;
- uint32_t part1 = static_cast<uint32_t>(number % kTen7);
- uint32_t part0 = static_cast<uint32_t>(number / kTen7);
-
- FillDigits32FixedLength(part0, 3, buffer, length);
- FillDigits32FixedLength(part1, 7, buffer, length);
- FillDigits32FixedLength(part2, 7, buffer, length);
-}
-
-
-static void FillDigits64(uint64_t number, Vector<char> buffer, int* length) {
- const uint32_t kTen7 = 10000000;
- // For efficiency cut the number into 3 uint32_t parts, and print those.
- uint32_t part2 = static_cast<uint32_t>(number % kTen7);
- number /= kTen7;
- uint32_t part1 = static_cast<uint32_t>(number % kTen7);
- uint32_t part0 = static_cast<uint32_t>(number / kTen7);
-
- if (part0 != 0) {
- FillDigits32(part0, buffer, length);
- FillDigits32FixedLength(part1, 7, buffer, length);
- FillDigits32FixedLength(part2, 7, buffer, length);
- } else if (part1 != 0) {
- FillDigits32(part1, buffer, length);
- FillDigits32FixedLength(part2, 7, buffer, length);
- } else {
- FillDigits32(part2, buffer, length);
- }
-}
-
-
-static void RoundUp(Vector<char> buffer, int* length, int* decimal_point) {
- // An empty buffer represents 0.
- if (*length == 0) {
- buffer[0] = '1';
- *decimal_point = 1;
- *length = 1;
- return;
- }
- // Round the last digit until we either have a digit that was not '9' or until
- // we reached the first digit.
- buffer[(*length) - 1]++;
- for (int i = (*length) - 1; i > 0; --i) {
- if (buffer[i] != '0' + 10) {
- return;
- }
- buffer[i] = '0';
- buffer[i - 1]++;
- }
- // If the first digit is now '0' + 10, we would need to set it to '0' and add
- // a '1' in front. However we reach the first digit only if all following
- // digits had been '9' before rounding up. Now all trailing digits are '0' and
- // we simply switch the first digit to '1' and update the decimal-point
- // (indicating that the point is now one digit to the right).
- if (buffer[0] == '0' + 10) {
- buffer[0] = '1';
- (*decimal_point)++;
- }
-}
-
-
-// The given fractionals number represents a fixed-point number with binary
-// point at bit (-exponent).
-// Preconditions:
-// -128 <= exponent <= 0.
-// 0 <= fractionals * 2^exponent < 1
-// The buffer holds the result.
-// The function will round its result. During the rounding-process digits not
-// generated by this function might be updated, and the decimal-point variable
-// might be updated. If this function generates the digits 99 and the buffer
-// already contained "199" (thus yielding a buffer of "19999") then a
-// rounding-up will change the contents of the buffer to "20000".
-static void FillFractionals(uint64_t fractionals, int exponent,
- int fractional_count, Vector<char> buffer,
- int* length, int* decimal_point) {
- ASSERT(-128 <= exponent && exponent <= 0);
- // 'fractionals' is a fixed-point number, with binary point at bit
- // (-exponent). Inside the function the non-converted remainder of fractionals
- // is a fixed-point number, with binary point at bit 'point'.
- if (-exponent <= 64) {
- // One 64 bit number is sufficient.
- ASSERT(fractionals >> 56 == 0);
- int point = -exponent;
- for (int i = 0; i < fractional_count; ++i) {
- if (fractionals == 0) break;
- // Instead of multiplying by 10 we multiply by 5 and adjust the point
- // location. This way the fractionals variable will not overflow.
- // Invariant at the beginning of the loop: fractionals < 2^point.
- // Initially we have: point <= 64 and fractionals < 2^56
- // After each iteration the point is decremented by one.
- // Note that 5^3 = 125 < 128 = 2^7.
- // Therefore three iterations of this loop will not overflow fractionals
- // (even without the subtraction at the end of the loop body). At this
- // time point will satisfy point <= 61 and therefore fractionals < 2^point
- // and any further multiplication of fractionals by 5 will not overflow.
- fractionals *= 5;
- point--;
- int digit = static_cast<int>(fractionals >> point);
- buffer[*length] = '0' + digit;
- (*length)++;
- fractionals -= static_cast<uint64_t>(digit) << point;
- }
- // If the first bit after the point is set we have to round up.
- if (((fractionals >> (point - 1)) & 1) == 1) {
- RoundUp(buffer, length, decimal_point);
- }
- } else { // We need 128 bits.
- ASSERT(64 < -exponent && -exponent <= 128);
- UInt128 fractionals128 = UInt128(fractionals, 0);
- fractionals128.Shift(-exponent - 64);
- int point = 128;
- for (int i = 0; i < fractional_count; ++i) {
- if (fractionals128.IsZero()) break;
- // As before: instead of multiplying by 10 we multiply by 5 and adjust the
- // point location.
- // This multiplication will not overflow for the same reasons as before.
- fractionals128.Multiply(5);
- point--;
- int digit = fractionals128.DivModPowerOf2(point);
- buffer[*length] = '0' + digit;
- (*length)++;
- }
- if (fractionals128.BitAt(point - 1) == 1) {
- RoundUp(buffer, length, decimal_point);
- }
- }
-}
-
-
-// Removes leading and trailing zeros.
-// If leading zeros are removed then the decimal point position is adjusted.
-static void TrimZeros(Vector<char> buffer, int* length, int* decimal_point) {
- while (*length > 0 && buffer[(*length) - 1] == '0') {
- (*length)--;
- }
- int first_non_zero = 0;
- while (first_non_zero < *length && buffer[first_non_zero] == '0') {
- first_non_zero++;
- }
- if (first_non_zero != 0) {
- for (int i = first_non_zero; i < *length; ++i) {
- buffer[i - first_non_zero] = buffer[i];
- }
- *length -= first_non_zero;
- *decimal_point -= first_non_zero;
- }
-}
-
-
-bool FastFixedDtoa(double v,
- int fractional_count,
- Vector<char> buffer,
- int* length,
- int* decimal_point) {
- const uint32_t kMaxUInt32 = 0xFFFFFFFF;
- uint64_t significand = Double(v).Significand();
- int exponent = Double(v).Exponent();
- // v = significand * 2^exponent (with significand a 53bit integer).
- // If the exponent is larger than 20 (i.e. we may have a 73bit number) then we
- // don't know how to compute the representation. 2^73 ~= 9.5*10^21.
- // If necessary this limit could probably be increased, but we don't need
- // more.
- if (exponent > 20) return false;
- if (fractional_count > 20) return false;
- *length = 0;
- // At most kDoubleSignificandSize bits of the significand are non-zero.
- // Given a 64 bit integer we have 11 0s followed by 53 potentially non-zero
- // bits: 0..11*..0xxx..53*..xx
- if (exponent + kDoubleSignificandSize > 64) {
- // The exponent must be > 11.
- //
- // We know that v = significand * 2^exponent.
- // And the exponent > 11.
- // We simplify the task by dividing v by 10^17.
- // The quotient delivers the first digits, and the remainder fits into a 64
- // bit number.
- // Dividing by 10^17 is equivalent to dividing by 5^17*2^17.
- const uint64_t kFive17 = V8_2PART_UINT64_C(0xB1, A2BC2EC5); // 5^17
- uint64_t divisor = kFive17;
- int divisor_power = 17;
- uint64_t dividend = significand;
- uint32_t quotient;
- uint64_t remainder;
- // Let v = f * 2^e with f == significand and e == exponent.
- // Then need q (quotient) and r (remainder) as follows:
- // v = q * 10^17 + r
- // f * 2^e = q * 10^17 + r
- // f * 2^e = q * 5^17 * 2^17 + r
- // If e > 17 then
- // f * 2^(e-17) = q * 5^17 + r/2^17
- // else
- // f = q * 5^17 * 2^(17-e) + r/2^e
- if (exponent > divisor_power) {
- // We only allow exponents of up to 20 and therefore (17 - e) <= 3
- dividend <<= exponent - divisor_power;
- quotient = static_cast<uint32_t>(dividend / divisor);
- remainder = (dividend % divisor) << divisor_power;
- } else {
- divisor <<= divisor_power - exponent;
- quotient = static_cast<uint32_t>(dividend / divisor);
- remainder = (dividend % divisor) << exponent;
- }
- FillDigits32(quotient, buffer, length);
- FillDigits64FixedLength(remainder, divisor_power, buffer, length);
- *decimal_point = *length;
- } else if (exponent >= 0) {
- // 0 <= exponent <= 11
- significand <<= exponent;
- FillDigits64(significand, buffer, length);
- *decimal_point = *length;
- } else if (exponent > -kDoubleSignificandSize) {
- // We have to cut the number.
- uint64_t integrals = significand >> -exponent;
- uint64_t fractionals = significand - (integrals << -exponent);
- if (integrals > kMaxUInt32) {
- FillDigits64(integrals, buffer, length);
- } else {
- FillDigits32(static_cast<uint32_t>(integrals), buffer, length);
- }
- *decimal_point = *length;
- FillFractionals(fractionals, exponent, fractional_count,
- buffer, length, decimal_point);
- } else if (exponent < -128) {
- // This configuration (with at most 20 digits) means that all digits must be
- // 0.
- ASSERT(fractional_count <= 20);
- buffer[0] = '\0';
- *length = 0;
- *decimal_point = -fractional_count;
- } else {
- *decimal_point = 0;
- FillFractionals(significand, exponent, fractional_count,
- buffer, length, decimal_point);
- }
- TrimZeros(buffer, length, decimal_point);
- buffer[*length] = '\0';
- if ((*length) == 0) {
- // The string is empty and the decimal_point thus has no importance. Mimick
- // Gay's dtoa and and set it to -fractional_count.
- *decimal_point = -fractional_count;
- }
- return true;
-}
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/fixed-dtoa.h b/src/3rdparty/v8/src/fixed-dtoa.h
deleted file mode 100644
index 93f826f..0000000
--- a/src/3rdparty/v8/src/fixed-dtoa.h
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_FIXED_DTOA_H_
-#define V8_FIXED_DTOA_H_
-
-namespace v8 {
-namespace internal {
-
-// Produces digits necessary to print a given number with
-// 'fractional_count' digits after the decimal point.
-// The buffer must be big enough to hold the result plus one terminating null
-// character.
-//
-// The produced digits might be too short in which case the caller has to fill
-// the gaps with '0's.
-// Example: FastFixedDtoa(0.001, 5, ...) is allowed to return buffer = "1", and
-// decimal_point = -2.
-// Halfway cases are rounded towards +/-Infinity (away from 0). The call
-// FastFixedDtoa(0.15, 2, ...) thus returns buffer = "2", decimal_point = 0.
-// The returned buffer may contain digits that would be truncated from the
-// shortest representation of the input.
-//
-// This method only works for some parameters. If it can't handle the input it
-// returns false. The output is null-terminated when the function succeeds.
-bool FastFixedDtoa(double v, int fractional_count,
- Vector<char> buffer, int* length, int* decimal_point);
-
-} } // namespace v8::internal
-
-#endif // V8_FIXED_DTOA_H_
diff --git a/src/3rdparty/v8/src/flag-definitions.h b/src/3rdparty/v8/src/flag-definitions.h
deleted file mode 100644
index d6cb6e3..0000000
--- a/src/3rdparty/v8/src/flag-definitions.h
+++ /dev/null
@@ -1,556 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// This file defines all of the flags. It is separated into different section,
-// for Debug, Release, Logging and Profiling, etc. To add a new flag, find the
-// correct section, and use one of the DEFINE_ macros, without a trailing ';'.
-//
-// This include does not have a guard, because it is a template-style include,
-// which can be included multiple times in different modes. It expects to have
-// a mode defined before it's included. The modes are FLAG_MODE_... below:
-
-// We want to declare the names of the variables for the header file. Normally
-// this will just be an extern declaration, but for a readonly flag we let the
-// compiler make better optimizations by giving it the value.
-#if defined(FLAG_MODE_DECLARE)
-#define FLAG_FULL(ftype, ctype, nam, def, cmt) \
- extern ctype FLAG_##nam;
-#define FLAG_READONLY(ftype, ctype, nam, def, cmt) \
- static ctype const FLAG_##nam = def;
-
-// We want to supply the actual storage and value for the flag variable in the
-// .cc file. We only do this for writable flags.
-#elif defined(FLAG_MODE_DEFINE)
-#define FLAG_FULL(ftype, ctype, nam, def, cmt) \
- ctype FLAG_##nam = def;
-#define FLAG_READONLY(ftype, ctype, nam, def, cmt)
-
-// We need to define all of our default values so that the Flag structure can
-// access them by pointer. These are just used internally inside of one .cc,
-// for MODE_META, so there is no impact on the flags interface.
-#elif defined(FLAG_MODE_DEFINE_DEFAULTS)
-#define FLAG_FULL(ftype, ctype, nam, def, cmt) \
- static ctype const FLAGDEFAULT_##nam = def;
-#define FLAG_READONLY(ftype, ctype, nam, def, cmt)
-
-
-// We want to write entries into our meta data table, for internal parsing and
-// printing / etc in the flag parser code. We only do this for writable flags.
-#elif defined(FLAG_MODE_META)
-#define FLAG_FULL(ftype, ctype, nam, def, cmt) \
- { Flag::TYPE_##ftype, #nam, &FLAG_##nam, &FLAGDEFAULT_##nam, cmt, false },
-#define FLAG_READONLY(ftype, ctype, nam, def, cmt)
-
-#else
-#error No mode supplied when including flags.defs
-#endif
-
-#ifdef FLAG_MODE_DECLARE
-// Structure used to hold a collection of arguments to the JavaScript code.
-struct JSArguments {
-public:
- JSArguments();
- JSArguments(int argc, const char** argv);
- int argc() const;
- const char** argv();
- const char*& operator[](int idx);
- JSArguments& operator=(JSArguments args);
-private:
- int argc_;
- const char** argv_;
-};
-#endif
-
-#define DEFINE_bool(nam, def, cmt) FLAG(BOOL, bool, nam, def, cmt)
-#define DEFINE_int(nam, def, cmt) FLAG(INT, int, nam, def, cmt)
-#define DEFINE_float(nam, def, cmt) FLAG(FLOAT, double, nam, def, cmt)
-#define DEFINE_string(nam, def, cmt) FLAG(STRING, const char*, nam, def, cmt)
-#define DEFINE_args(nam, def, cmt) FLAG(ARGS, JSArguments, nam, def, cmt)
-
-//
-// Flags in all modes.
-//
-#define FLAG FLAG_FULL
-
-// Flags for Crankshaft.
-#ifdef V8_TARGET_ARCH_MIPS
- DEFINE_bool(crankshaft, false, "use crankshaft")
-#else
- DEFINE_bool(crankshaft, true, "use crankshaft")
-#endif
-DEFINE_string(hydrogen_filter, "", "hydrogen use/trace filter")
-DEFINE_bool(use_hydrogen, true, "use generated hydrogen for compilation")
-DEFINE_bool(build_lithium, true, "use lithium chunk builder")
-DEFINE_bool(alloc_lithium, true, "use lithium register allocator")
-DEFINE_bool(use_lithium, true, "use lithium code generator")
-DEFINE_bool(use_range, true, "use hydrogen range analysis")
-DEFINE_bool(eliminate_dead_phis, true, "eliminate dead phis")
-DEFINE_bool(use_gvn, true, "use hydrogen global value numbering")
-DEFINE_bool(use_canonicalizing, true, "use hydrogen instruction canonicalizing")
-DEFINE_bool(use_inlining, true, "use function inlining")
-DEFINE_bool(limit_inlining, true, "limit code size growth from inlining")
-DEFINE_bool(eliminate_empty_blocks, true, "eliminate empty blocks")
-DEFINE_bool(loop_invariant_code_motion, true, "loop invariant code motion")
-DEFINE_bool(hydrogen_stats, false, "print statistics for hydrogen")
-DEFINE_bool(trace_hydrogen, false, "trace generated hydrogen to file")
-DEFINE_bool(trace_inlining, false, "trace inlining decisions")
-DEFINE_bool(trace_alloc, false, "trace register allocator")
-DEFINE_bool(trace_all_uses, false, "trace all use positions")
-DEFINE_bool(trace_range, false, "trace range analysis")
-DEFINE_bool(trace_gvn, false, "trace global value numbering")
-DEFINE_bool(trace_representation, false, "trace representation types")
-DEFINE_bool(stress_pointer_maps, false, "pointer map for every instruction")
-DEFINE_bool(stress_environments, false, "environment for every instruction")
-DEFINE_int(deopt_every_n_times,
- 0,
- "deoptimize every n times a deopt point is passed")
-DEFINE_bool(process_arguments_object, true, "try to deal with arguments object")
-DEFINE_bool(trap_on_deopt, false, "put a break point before deoptimizing")
-DEFINE_bool(deoptimize_uncommon_cases, true, "deoptimize uncommon cases")
-DEFINE_bool(polymorphic_inlining, true, "polymorphic inlining")
-DEFINE_bool(aggressive_loop_invariant_motion, true,
- "aggressive motion of instructions out of loops")
-DEFINE_bool(use_osr, true, "use on-stack replacement")
-
-DEFINE_bool(trace_osr, false, "trace on-stack replacement")
-DEFINE_int(stress_runs, 0, "number of stress runs")
-DEFINE_bool(optimize_closures, true, "optimize closures")
-
-// assembler-ia32.cc / assembler-arm.cc / assembler-x64.cc
-DEFINE_bool(debug_code, false,
- "generate extra code (assertions) for debugging")
-DEFINE_bool(code_comments, false, "emit comments in code disassembly")
-DEFINE_bool(emit_branch_hints, false, "emit branch hints")
-DEFINE_bool(peephole_optimization, true,
- "perform peephole optimizations in assembly code")
-DEFINE_bool(print_peephole_optimization, false,
- "print peephole optimizations in assembly code")
-DEFINE_bool(enable_sse2, true,
- "enable use of SSE2 instructions if available")
-DEFINE_bool(enable_sse3, true,
- "enable use of SSE3 instructions if available")
-DEFINE_bool(enable_sse4_1, true,
- "enable use of SSE4.1 instructions if available")
-DEFINE_bool(enable_cmov, true,
- "enable use of CMOV instruction if available")
-DEFINE_bool(enable_rdtsc, true,
- "enable use of RDTSC instruction if available")
-DEFINE_bool(enable_sahf, true,
- "enable use of SAHF instruction if available (X64 only)")
-DEFINE_bool(enable_vfp3, true,
- "enable use of VFP3 instructions if available (ARM only)")
-DEFINE_bool(enable_armv7, true,
- "enable use of ARMv7 instructions if available (ARM only)")
-DEFINE_bool(enable_fpu, true,
- "enable use of MIPS FPU instructions if available (MIPS only)")
-
-// bootstrapper.cc
-DEFINE_string(expose_natives_as, NULL, "expose natives in global object")
-DEFINE_string(expose_debug_as, NULL, "expose debug in global object")
-DEFINE_bool(expose_gc, false, "expose gc extension")
-DEFINE_bool(expose_externalize_string, false,
- "expose externalize string extension")
-DEFINE_int(stack_trace_limit, 10, "number of stack frames to capture")
-DEFINE_bool(disable_native_files, false, "disable builtin natives files")
-
-// builtins-ia32.cc
-DEFINE_bool(inline_new, true, "use fast inline allocation")
-
-// checks.cc
-DEFINE_bool(stack_trace_on_abort, true,
- "print a stack trace if an assertion failure occurs")
-
-// codegen-ia32.cc / codegen-arm.cc
-DEFINE_bool(trace, false, "trace function calls")
-DEFINE_bool(defer_negation, true, "defer negation operation")
-DEFINE_bool(mask_constants_with_cookie,
- true,
- "use random jit cookie to mask large constants")
-
-// codegen.cc
-DEFINE_bool(lazy, true, "use lazy compilation")
-DEFINE_bool(trace_opt, false, "trace lazy optimization")
-DEFINE_bool(trace_opt_stats, false, "trace lazy optimization statistics")
-DEFINE_bool(opt, true, "use adaptive optimizations")
-DEFINE_bool(opt_eagerly, false, "be more eager when adaptively optimizing")
-DEFINE_bool(always_opt, false, "always try to optimize functions")
-DEFINE_bool(prepare_always_opt, false, "prepare for turning on always opt")
-DEFINE_bool(debug_info, true, "add debug information to compiled functions")
-DEFINE_bool(deopt, true, "support deoptimization")
-DEFINE_bool(trace_deopt, false, "trace deoptimization")
-
-// compiler.cc
-DEFINE_bool(strict, false, "strict error checking")
-DEFINE_int(min_preparse_length, 1024,
- "minimum length for automatic enable preparsing")
-DEFINE_bool(full_compiler, true, "enable dedicated backend for run-once code")
-DEFINE_bool(always_full_compiler, false,
- "try to use the dedicated run-once backend for all code")
-DEFINE_bool(trace_bailout, false,
- "print reasons for falling back to using the classic V8 backend")
-DEFINE_bool(safe_int32_compiler, true,
- "enable optimized side-effect-free int32 expressions.")
-DEFINE_bool(use_flow_graph, false, "perform flow-graph based optimizations")
-
-// compilation-cache.cc
-DEFINE_bool(compilation_cache, true, "enable compilation cache")
-
-// data-flow.cc
-DEFINE_bool(loop_peeling, false, "Peel off the first iteration of loops.")
-
-// debug.cc
-DEFINE_bool(remote_debugging, false, "enable remote debugging")
-DEFINE_bool(trace_debug_json, false, "trace debugging JSON request/response")
-DEFINE_bool(debugger_auto_break, true,
- "automatically set the debug break flag when debugger commands are "
- "in the queue")
-DEFINE_bool(enable_liveedit, true, "enable liveedit experimental feature")
-
-// execution.cc
-DEFINE_int(stack_size, kPointerSize * 128,
- "default size of stack region v8 is allowed to use (in KkBytes)")
-
-// frames.cc
-DEFINE_int(max_stack_trace_source_length, 300,
- "maximum length of function source code printed in a stack trace.")
-
-// full-codegen.cc
-DEFINE_bool(always_inline_smi_code, false,
- "always inline smi code in non-opt code")
-
-// heap.cc
-DEFINE_int(max_new_space_size, 0, "max size of the new generation (in kBytes)")
-DEFINE_int(max_old_space_size, 0, "max size of the old generation (in Mbytes)")
-DEFINE_int(max_executable_size, 0, "max size of executable memory (in Mbytes)")
-DEFINE_bool(gc_global, false, "always perform global GCs")
-DEFINE_int(gc_interval, -1, "garbage collect after <n> allocations")
-DEFINE_bool(trace_gc, false,
- "print one trace line following each garbage collection")
-DEFINE_bool(trace_gc_nvp, false,
- "print one detailed trace line in name=value format "
- "after each garbage collection")
-DEFINE_bool(print_cumulative_gc_stat, false,
- "print cumulative GC statistics in name=value format on exit")
-DEFINE_bool(trace_gc_verbose, false,
- "print more details following each garbage collection")
-DEFINE_bool(collect_maps, true,
- "garbage collect maps from which no objects can be reached")
-DEFINE_bool(flush_code, true,
- "flush code that we expect not to use again before full gc")
-
-// v8.cc
-DEFINE_bool(use_idle_notification, true,
- "Use idle notification to reduce memory footprint.")
-// ic.cc
-DEFINE_bool(use_ic, true, "use inline caching")
-
-#ifdef LIVE_OBJECT_LIST
-// liveobjectlist.cc
-DEFINE_string(lol_workdir, NULL, "path for lol temp files")
-DEFINE_bool(verify_lol, false, "perform debugging verification for lol")
-#endif
-
-// macro-assembler-ia32.cc
-DEFINE_bool(native_code_counters, false,
- "generate extra code for manipulating stats counters")
-
-// mark-compact.cc
-DEFINE_bool(always_compact, false, "Perform compaction on every full GC")
-DEFINE_bool(never_compact, false,
- "Never perform compaction on full GC - testing only")
-DEFINE_bool(cleanup_ics_at_gc, true,
- "Flush inline caches prior to mark compact collection.")
-DEFINE_bool(cleanup_caches_in_maps_at_gc, true,
- "Flush code caches in maps during mark compact cycle.")
-DEFINE_int(random_seed, 0,
- "Default seed for initializing random generator "
- "(0, the default, means to use system random).")
-
-DEFINE_bool(canonicalize_object_literal_maps, true,
- "Canonicalize maps for object literals.")
-
-DEFINE_bool(use_big_map_space, true,
- "Use big map space, but don't compact if it grew too big.")
-
-DEFINE_int(max_map_space_pages, MapSpace::kMaxMapPageIndex - 1,
- "Maximum number of pages in map space which still allows to encode "
- "forwarding pointers. That's actually a constant, but it's useful "
- "to control it with a flag for better testing.")
-
-// mksnapshot.cc
-DEFINE_bool(h, false, "print this message")
-DEFINE_bool(new_snapshot, true, "use new snapshot implementation")
-
-// objects.cc
-DEFINE_bool(use_verbose_printer, true, "allows verbose printing")
-
-// parser.cc
-DEFINE_bool(allow_natives_syntax, false, "allow natives syntax")
-DEFINE_bool(strict_mode, true, "allow strict mode directives")
-
-// rewriter.cc
-DEFINE_bool(optimize_ast, true, "optimize the ast")
-
-// simulator-arm.cc and simulator-mips.cc
-DEFINE_bool(trace_sim, false, "Trace simulator execution")
-DEFINE_bool(check_icache, false, "Check icache flushes in ARM simulator")
-DEFINE_int(stop_sim_at, 0, "Simulator stop after x number of instructions")
-DEFINE_int(sim_stack_alignment, 8,
- "Stack alingment in bytes in simulator (4 or 8, 8 is default)")
-
-// top.cc
-DEFINE_bool(trace_exception, false,
- "print stack trace when throwing exceptions")
-DEFINE_bool(preallocate_message_memory, false,
- "preallocate some memory to build stack traces.")
-
-// v8.cc
-DEFINE_bool(preemption, false,
- "activate a 100ms timer that switches between V8 threads")
-
-// Regexp
-DEFINE_bool(trace_regexps, false, "trace regexp execution")
-DEFINE_bool(regexp_optimization, true, "generate optimized regexp code")
-DEFINE_bool(regexp_entry_native, true, "use native code to enter regexp")
-
-// Testing flags test/cctest/test-{flags,api,serialization}.cc
-DEFINE_bool(testing_bool_flag, true, "testing_bool_flag")
-DEFINE_int(testing_int_flag, 13, "testing_int_flag")
-DEFINE_float(testing_float_flag, 2.5, "float-flag")
-DEFINE_string(testing_string_flag, "Hello, world!", "string-flag")
-DEFINE_int(testing_prng_seed, 42, "Seed used for threading test randomness")
-#ifdef WIN32
-DEFINE_string(testing_serialization_file, "C:\\Windows\\Temp\\serdes",
- "file in which to testing_serialize heap")
-#else
-DEFINE_string(testing_serialization_file, "/tmp/serdes",
- "file in which to serialize heap")
-#endif
-
-//
-// Dev shell flags
-//
-
-DEFINE_bool(help, false, "Print usage message, including flags, on console")
-DEFINE_bool(dump_counters, false, "Dump counters on exit")
-DEFINE_bool(debugger, false, "Enable JavaScript debugger")
-DEFINE_bool(remote_debugger, false, "Connect JavaScript debugger to the "
- "debugger agent in another process")
-DEFINE_bool(debugger_agent, false, "Enable debugger agent")
-DEFINE_int(debugger_port, 5858, "Port to use for remote debugging")
-DEFINE_string(map_counters, "", "Map counters to a file")
-DEFINE_args(js_arguments, JSArguments(),
- "Pass all remaining arguments to the script. Alias for \"--\".")
-
-#if defined(WEBOS__)
-DEFINE_bool(debug_compile_events, false, "Enable debugger compile events")
-DEFINE_bool(debug_script_collected_events, false,
- "Enable debugger script collected events")
-#else
-DEFINE_bool(debug_compile_events, true, "Enable debugger compile events")
-DEFINE_bool(debug_script_collected_events, true,
- "Enable debugger script collected events")
-#endif
-
-
-//
-// GDB JIT integration flags.
-//
-
-DEFINE_bool(gdbjit, false, "enable GDBJIT interface (disables compacting GC)")
-DEFINE_bool(gdbjit_full, false, "enable GDBJIT interface for all code objects")
-DEFINE_bool(gdbjit_dump, false, "dump elf objects with debug info to disk")
-
-//
-// Debug only flags
-//
-#undef FLAG
-#ifdef DEBUG
-#define FLAG FLAG_FULL
-#else
-#define FLAG FLAG_READONLY
-#endif
-
-// checks.cc
-DEFINE_bool(enable_slow_asserts, false,
- "enable asserts that are slow to execute")
-
-// codegen-ia32.cc / codegen-arm.cc
-DEFINE_bool(trace_codegen, false,
- "print name of functions for which code is generated")
-DEFINE_bool(print_source, false, "pretty print source code")
-DEFINE_bool(print_builtin_source, false,
- "pretty print source code for builtins")
-DEFINE_bool(print_ast, false, "print source AST")
-DEFINE_bool(print_builtin_ast, false, "print source AST for builtins")
-DEFINE_bool(print_json_ast, false, "print source AST as JSON")
-DEFINE_bool(print_builtin_json_ast, false,
- "print source AST for builtins as JSON")
-DEFINE_bool(trace_calls, false, "trace calls")
-DEFINE_bool(trace_builtin_calls, false, "trace builtins calls")
-DEFINE_string(stop_at, "", "function name where to insert a breakpoint")
-
-// compiler.cc
-DEFINE_bool(print_builtin_scopes, false, "print scopes for builtins")
-DEFINE_bool(print_scopes, false, "print scopes")
-DEFINE_bool(print_ir, false, "print the AST as seen by the backend")
-DEFINE_bool(print_graph_text, false,
- "print a text representation of the flow graph")
-
-// contexts.cc
-DEFINE_bool(trace_contexts, false, "trace contexts operations")
-
-// heap.cc
-DEFINE_bool(gc_greedy, false, "perform GC prior to some allocations")
-DEFINE_bool(gc_verbose, false, "print stuff during garbage collection")
-DEFINE_bool(heap_stats, false, "report heap statistics before and after GC")
-DEFINE_bool(code_stats, false, "report code statistics after GC")
-DEFINE_bool(verify_heap, false, "verify heap pointers before and after GC")
-DEFINE_bool(print_handles, false, "report handles after GC")
-DEFINE_bool(print_global_handles, false, "report global handles after GC")
-
-// ic.cc
-DEFINE_bool(trace_ic, false, "trace inline cache state transitions")
-
-// objects.cc
-DEFINE_bool(trace_normalization,
- false,
- "prints when objects are turned into dictionaries.")
-
-// runtime.cc
-DEFINE_bool(trace_lazy, false, "trace lazy compilation")
-
-// serialize.cc
-DEFINE_bool(debug_serialization, false,
- "write debug information into the snapshot.")
-
-// spaces.cc
-DEFINE_bool(collect_heap_spill_statistics, false,
- "report heap spill statistics along with heap_stats "
- "(requires heap_stats)")
-
-DEFINE_bool(trace_isolates, false, "trace isolate state changes")
-
-// VM state
-DEFINE_bool(log_state_changes, false, "Log state changes.")
-
-// Regexp
-DEFINE_bool(regexp_possessive_quantifier,
- false,
- "enable possessive quantifier syntax for testing")
-DEFINE_bool(trace_regexp_bytecodes, false, "trace regexp bytecode execution")
-DEFINE_bool(trace_regexp_assembler,
- false,
- "trace regexp macro assembler calls.")
-
-//
-// Logging and profiling only flags
-//
-#undef FLAG
-#ifdef ENABLE_LOGGING_AND_PROFILING
-#define FLAG FLAG_FULL
-#else
-#define FLAG FLAG_READONLY
-#endif
-
-// log.cc
-DEFINE_bool(log, false,
- "Minimal logging (no API, code, GC, suspect, or handles samples).")
-DEFINE_bool(log_all, false, "Log all events to the log file.")
-DEFINE_bool(log_runtime, false, "Activate runtime system %Log call.")
-DEFINE_bool(log_api, false, "Log API events to the log file.")
-DEFINE_bool(log_code, false,
- "Log code events to the log file without profiling.")
-DEFINE_bool(log_gc, false,
- "Log heap samples on garbage collection for the hp2ps tool.")
-DEFINE_bool(log_handles, false, "Log global handle events.")
-DEFINE_bool(log_snapshot_positions, false,
- "log positions of (de)serialized objects in the snapshot.")
-DEFINE_bool(log_suspect, false, "Log suspect operations.")
-DEFINE_bool(log_producers, false, "Log stack traces of JS objects allocations.")
-DEFINE_bool(prof, false,
- "Log statistical profiling information (implies --log-code).")
-DEFINE_bool(prof_auto, true,
- "Used with --prof, starts profiling automatically")
-DEFINE_bool(prof_lazy, false,
- "Used with --prof, only does sampling and logging"
- " when profiler is active (implies --noprof_auto).")
-DEFINE_bool(prof_browser_mode, true,
- "Used with --prof, turns on browser-compatible mode for profiling.")
-DEFINE_bool(log_regexp, false, "Log regular expression execution.")
-DEFINE_bool(sliding_state_window, false,
- "Update sliding state window counters.")
-DEFINE_string(logfile, "v8.log", "Specify the name of the log file.")
-DEFINE_bool(ll_prof, false, "Enable low-level linux profiler.")
-
-//
-// Heap protection flags
-// Using heap protection requires ENABLE_LOGGING_AND_PROFILING as well.
-//
-#ifdef ENABLE_HEAP_PROTECTION
-#undef FLAG
-#define FLAG FLAG_FULL
-
-DEFINE_bool(protect_heap, false,
- "Protect/unprotect V8's heap when leaving/entring the VM.")
-
-#endif
-
-//
-// Disassembler only flags
-//
-#undef FLAG
-#ifdef ENABLE_DISASSEMBLER
-#define FLAG FLAG_FULL
-#else
-#define FLAG FLAG_READONLY
-#endif
-
-// code-stubs.cc
-DEFINE_bool(print_code_stubs, false, "print code stubs")
-
-// codegen-ia32.cc / codegen-arm.cc
-DEFINE_bool(print_code, false, "print generated code")
-DEFINE_bool(print_opt_code, false, "print optimized code")
-DEFINE_bool(print_unopt_code, false, "print unoptimized code before "
- "printing optimized code based on it")
-DEFINE_bool(print_code_verbose, false, "print more information for code")
-DEFINE_bool(print_builtin_code, false, "print generated code for builtins")
-
-// Cleanup...
-#undef FLAG_FULL
-#undef FLAG_READONLY
-#undef FLAG
-
-#undef DEFINE_bool
-#undef DEFINE_int
-#undef DEFINE_string
-
-#undef FLAG_MODE_DECLARE
-#undef FLAG_MODE_DEFINE
-#undef FLAG_MODE_DEFINE_DEFAULTS
-#undef FLAG_MODE_META
diff --git a/src/3rdparty/v8/src/flags.cc b/src/3rdparty/v8/src/flags.cc
deleted file mode 100644
index c20f5ee..0000000
--- a/src/3rdparty/v8/src/flags.cc
+++ /dev/null
@@ -1,551 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include <ctype.h>
-#include <stdlib.h>
-
-#include "v8.h"
-
-#include "platform.h"
-#include "smart-pointer.h"
-#include "string-stream.h"
-
-
-namespace v8 {
-namespace internal {
-
-// Define all of our flags.
-#define FLAG_MODE_DEFINE
-#include "flag-definitions.h"
-
-// Define all of our flags default values.
-#define FLAG_MODE_DEFINE_DEFAULTS
-#include "flag-definitions.h"
-
-namespace {
-
-// This structure represents a single entry in the flag system, with a pointer
-// to the actual flag, default value, comment, etc. This is designed to be POD
-// initialized as to avoid requiring static constructors.
-struct Flag {
- enum FlagType { TYPE_BOOL, TYPE_INT, TYPE_FLOAT, TYPE_STRING, TYPE_ARGS };
-
- FlagType type_; // What type of flag, bool, int, or string.
- const char* name_; // Name of the flag, ex "my_flag".
- void* valptr_; // Pointer to the global flag variable.
- const void* defptr_; // Pointer to the default value.
- const char* cmt_; // A comment about the flags purpose.
- bool owns_ptr_; // Does the flag own its string value?
-
- FlagType type() const { return type_; }
-
- const char* name() const { return name_; }
-
- const char* comment() const { return cmt_; }
-
- bool* bool_variable() const {
- ASSERT(type_ == TYPE_BOOL);
- return reinterpret_cast<bool*>(valptr_);
- }
-
- int* int_variable() const {
- ASSERT(type_ == TYPE_INT);
- return reinterpret_cast<int*>(valptr_);
- }
-
- double* float_variable() const {
- ASSERT(type_ == TYPE_FLOAT);
- return reinterpret_cast<double*>(valptr_);
- }
-
- const char* string_value() const {
- ASSERT(type_ == TYPE_STRING);
- return *reinterpret_cast<const char**>(valptr_);
- }
-
- void set_string_value(const char* value, bool owns_ptr) {
- ASSERT(type_ == TYPE_STRING);
- const char** ptr = reinterpret_cast<const char**>(valptr_);
- if (owns_ptr_ && *ptr != NULL) DeleteArray(*ptr);
- *ptr = value;
- owns_ptr_ = owns_ptr;
- }
-
- JSArguments* args_variable() const {
- ASSERT(type_ == TYPE_ARGS);
- return reinterpret_cast<JSArguments*>(valptr_);
- }
-
- bool bool_default() const {
- ASSERT(type_ == TYPE_BOOL);
- return *reinterpret_cast<const bool*>(defptr_);
- }
-
- int int_default() const {
- ASSERT(type_ == TYPE_INT);
- return *reinterpret_cast<const int*>(defptr_);
- }
-
- double float_default() const {
- ASSERT(type_ == TYPE_FLOAT);
- return *reinterpret_cast<const double*>(defptr_);
- }
-
- const char* string_default() const {
- ASSERT(type_ == TYPE_STRING);
- return *reinterpret_cast<const char* const *>(defptr_);
- }
-
- JSArguments args_default() const {
- ASSERT(type_ == TYPE_ARGS);
- return *reinterpret_cast<const JSArguments*>(defptr_);
- }
-
- // Compare this flag's current value against the default.
- bool IsDefault() const {
- switch (type_) {
- case TYPE_BOOL:
- return *bool_variable() == bool_default();
- case TYPE_INT:
- return *int_variable() == int_default();
- case TYPE_FLOAT:
- return *float_variable() == float_default();
- case TYPE_STRING: {
- const char* str1 = string_value();
- const char* str2 = string_default();
- if (str2 == NULL) return str1 == NULL;
- if (str1 == NULL) return str2 == NULL;
- return strcmp(str1, str2) == 0;
- }
- case TYPE_ARGS:
- return args_variable()->argc() == 0;
- }
- UNREACHABLE();
- return true;
- }
-
- // Set a flag back to it's default value.
- void Reset() {
- switch (type_) {
- case TYPE_BOOL:
- *bool_variable() = bool_default();
- break;
- case TYPE_INT:
- *int_variable() = int_default();
- break;
- case TYPE_FLOAT:
- *float_variable() = float_default();
- break;
- case TYPE_STRING:
- set_string_value(string_default(), false);
- break;
- case TYPE_ARGS:
- *args_variable() = args_default();
- break;
- }
- }
-};
-
-Flag flags[] = {
-#define FLAG_MODE_META
-#include "flag-definitions.h"
-};
-
-const size_t num_flags = sizeof(flags) / sizeof(*flags);
-
-} // namespace
-
-
-static const char* Type2String(Flag::FlagType type) {
- switch (type) {
- case Flag::TYPE_BOOL: return "bool";
- case Flag::TYPE_INT: return "int";
- case Flag::TYPE_FLOAT: return "float";
- case Flag::TYPE_STRING: return "string";
- case Flag::TYPE_ARGS: return "arguments";
- }
- UNREACHABLE();
- return NULL;
-}
-
-
-static SmartPointer<const char> ToString(Flag* flag) {
- HeapStringAllocator string_allocator;
- StringStream buffer(&string_allocator);
- switch (flag->type()) {
- case Flag::TYPE_BOOL:
- buffer.Add("%s", (*flag->bool_variable() ? "true" : "false"));
- break;
- case Flag::TYPE_INT:
- buffer.Add("%d", *flag->int_variable());
- break;
- case Flag::TYPE_FLOAT:
- buffer.Add("%f", FmtElm(*flag->float_variable()));
- break;
- case Flag::TYPE_STRING: {
- const char* str = flag->string_value();
- buffer.Add("%s", str ? str : "NULL");
- break;
- }
- case Flag::TYPE_ARGS: {
- JSArguments args = *flag->args_variable();
- if (args.argc() > 0) {
- buffer.Add("%s", args[0]);
- for (int i = 1; i < args.argc(); i++) {
- buffer.Add(" %s", args[i]);
- }
- }
- break;
- }
- }
- return buffer.ToCString();
-}
-
-
-// static
-List<const char*>* FlagList::argv() {
- List<const char*>* args = new List<const char*>(8);
- Flag* args_flag = NULL;
- for (size_t i = 0; i < num_flags; ++i) {
- Flag* f = &flags[i];
- if (!f->IsDefault()) {
- if (f->type() == Flag::TYPE_ARGS) {
- ASSERT(args_flag == NULL);
- args_flag = f; // Must be last in arguments.
- continue;
- }
- HeapStringAllocator string_allocator;
- StringStream buffer(&string_allocator);
- if (f->type() != Flag::TYPE_BOOL || *(f->bool_variable())) {
- buffer.Add("--%s", f->name());
- } else {
- buffer.Add("--no%s", f->name());
- }
- args->Add(buffer.ToCString().Detach());
- if (f->type() != Flag::TYPE_BOOL) {
- args->Add(ToString(f).Detach());
- }
- }
- }
- if (args_flag != NULL) {
- HeapStringAllocator string_allocator;
- StringStream buffer(&string_allocator);
- buffer.Add("--%s", args_flag->name());
- args->Add(buffer.ToCString().Detach());
- JSArguments jsargs = *args_flag->args_variable();
- for (int j = 0; j < jsargs.argc(); j++) {
- args->Add(StrDup(jsargs[j]));
- }
- }
- return args;
-}
-
-
-// Helper function to parse flags: Takes an argument arg and splits it into
-// a flag name and flag value (or NULL if they are missing). is_bool is set
-// if the arg started with "-no" or "--no". The buffer may be used to NUL-
-// terminate the name, it must be large enough to hold any possible name.
-static void SplitArgument(const char* arg,
- char* buffer,
- int buffer_size,
- const char** name,
- const char** value,
- bool* is_bool) {
- *name = NULL;
- *value = NULL;
- *is_bool = false;
-
- if (arg != NULL && *arg == '-') {
- // find the begin of the flag name
- arg++; // remove 1st '-'
- if (*arg == '-') {
- arg++; // remove 2nd '-'
- if (arg[0] == '\0') {
- const char* kJSArgumentsFlagName = "js_arguments";
- *name = kJSArgumentsFlagName;
- return;
- }
- }
- if (arg[0] == 'n' && arg[1] == 'o') {
- arg += 2; // remove "no"
- *is_bool = true;
- }
- *name = arg;
-
- // find the end of the flag name
- while (*arg != '\0' && *arg != '=')
- arg++;
-
- // get the value if any
- if (*arg == '=') {
- // make a copy so we can NUL-terminate flag name
- size_t n = arg - *name;
- CHECK(n < static_cast<size_t>(buffer_size)); // buffer is too small
- memcpy(buffer, *name, n);
- buffer[n] = '\0';
- *name = buffer;
- // get the value
- *value = arg + 1;
- }
- }
-}
-
-
-inline char NormalizeChar(char ch) {
- return ch == '_' ? '-' : ch;
-}
-
-
-static bool EqualNames(const char* a, const char* b) {
- for (int i = 0; NormalizeChar(a[i]) == NormalizeChar(b[i]); i++) {
- if (a[i] == '\0') {
- return true;
- }
- }
- return false;
-}
-
-
-static Flag* FindFlag(const char* name) {
- for (size_t i = 0; i < num_flags; ++i) {
- if (EqualNames(name, flags[i].name()))
- return &flags[i];
- }
- return NULL;
-}
-
-
-// static
-int FlagList::SetFlagsFromCommandLine(int* argc,
- char** argv,
- bool remove_flags) {
- // parse arguments
- for (int i = 1; i < *argc;) {
- int j = i; // j > 0
- const char* arg = argv[i++];
-
- // split arg into flag components
- char buffer[1*KB];
- const char* name;
- const char* value;
- bool is_bool;
- SplitArgument(arg, buffer, sizeof buffer, &name, &value, &is_bool);
-
- if (name != NULL) {
- // lookup the flag
- Flag* flag = FindFlag(name);
- if (flag == NULL) {
- if (remove_flags) {
- // We don't recognize this flag but since we're removing
- // the flags we recognize we assume that the remaining flags
- // will be processed somewhere else so this flag might make
- // sense there.
- continue;
- } else {
- fprintf(stderr, "Error: unrecognized flag %s\n"
- "Try --help for options\n", arg);
- return j;
- }
- }
-
- // if we still need a flag value, use the next argument if available
- if (flag->type() != Flag::TYPE_BOOL &&
- flag->type() != Flag::TYPE_ARGS &&
- value == NULL) {
- if (i < *argc) {
- value = argv[i++];
- } else {
- fprintf(stderr, "Error: missing value for flag %s of type %s\n"
- "Try --help for options\n",
- arg, Type2String(flag->type()));
- return j;
- }
- }
-
- // set the flag
- char* endp = const_cast<char*>(""); // *endp is only read
- switch (flag->type()) {
- case Flag::TYPE_BOOL:
- *flag->bool_variable() = !is_bool;
- break;
- case Flag::TYPE_INT:
- *flag->int_variable() = strtol(value, &endp, 10); // NOLINT
- break;
- case Flag::TYPE_FLOAT:
- *flag->float_variable() = strtod(value, &endp);
- break;
- case Flag::TYPE_STRING:
- flag->set_string_value(value ? StrDup(value) : NULL, true);
- break;
- case Flag::TYPE_ARGS: {
- int start_pos = (value == NULL) ? i : i - 1;
- int js_argc = *argc - start_pos;
- const char** js_argv = NewArray<const char*>(js_argc);
- if (value != NULL) {
- js_argv[0] = StrDup(value);
- }
- for (int k = i; k < *argc; k++) {
- js_argv[k - start_pos] = StrDup(argv[k]);
- }
- *flag->args_variable() = JSArguments(js_argc, js_argv);
- i = *argc; // Consume all arguments
- break;
- }
- }
-
- // handle errors
- if ((flag->type() == Flag::TYPE_BOOL && value != NULL) ||
- (flag->type() != Flag::TYPE_BOOL && is_bool) ||
- *endp != '\0') {
- fprintf(stderr, "Error: illegal value for flag %s of type %s\n"
- "Try --help for options\n",
- arg, Type2String(flag->type()));
- return j;
- }
-
- // remove the flag & value from the command
- if (remove_flags) {
- while (j < i) {
- argv[j++] = NULL;
- }
- }
- }
- }
-
- // shrink the argument list
- if (remove_flags) {
- int j = 1;
- for (int i = 1; i < *argc; i++) {
- if (argv[i] != NULL)
- argv[j++] = argv[i];
- }
- *argc = j;
- }
-
- if (FLAG_help) {
- PrintHelp();
- exit(0);
- }
- // parsed all flags successfully
- return 0;
-}
-
-
-static char* SkipWhiteSpace(char* p) {
- while (*p != '\0' && isspace(*p) != 0) p++;
- return p;
-}
-
-
-static char* SkipBlackSpace(char* p) {
- while (*p != '\0' && isspace(*p) == 0) p++;
- return p;
-}
-
-
-// static
-int FlagList::SetFlagsFromString(const char* str, int len) {
- // make a 0-terminated copy of str
- ScopedVector<char> copy0(len + 1);
- memcpy(copy0.start(), str, len);
- copy0[len] = '\0';
-
- // strip leading white space
- char* copy = SkipWhiteSpace(copy0.start());
-
- // count the number of 'arguments'
- int argc = 1; // be compatible with SetFlagsFromCommandLine()
- for (char* p = copy; *p != '\0'; argc++) {
- p = SkipBlackSpace(p);
- p = SkipWhiteSpace(p);
- }
-
- // allocate argument array
- ScopedVector<char*> argv(argc);
-
- // split the flags string into arguments
- argc = 1; // be compatible with SetFlagsFromCommandLine()
- for (char* p = copy; *p != '\0'; argc++) {
- argv[argc] = p;
- p = SkipBlackSpace(p);
- if (*p != '\0') *p++ = '\0'; // 0-terminate argument
- p = SkipWhiteSpace(p);
- }
-
- // set the flags
- int result = SetFlagsFromCommandLine(&argc, argv.start(), false);
-
- return result;
-}
-
-
-// static
-void FlagList::ResetAllFlags() {
- for (size_t i = 0; i < num_flags; ++i) {
- flags[i].Reset();
- }
-}
-
-
-// static
-void FlagList::PrintHelp() {
- printf("Usage:\n");
- printf(" shell [options] -e string\n");
- printf(" execute string in V8\n");
- printf(" shell [options] file1 file2 ... filek\n");
- printf(" run JavaScript scripts in file1, file2, ..., filek\n");
- printf(" shell [options]\n");
- printf(" shell [options] --shell [file1 file2 ... filek]\n");
- printf(" run an interactive JavaScript shell\n");
- printf(" d8 [options] file1 file2 ... filek\n");
- printf(" d8 [options]\n");
- printf(" d8 [options] --shell [file1 file2 ... filek]\n");
- printf(" run the new debugging shell\n\n");
- printf("Options:\n");
- for (size_t i = 0; i < num_flags; ++i) {
- Flag* f = &flags[i];
- SmartPointer<const char> value = ToString(f);
- printf(" --%s (%s)\n type: %s default: %s\n",
- f->name(), f->comment(), Type2String(f->type()), *value);
- }
-}
-
-JSArguments::JSArguments()
- : argc_(0), argv_(NULL) {}
-JSArguments::JSArguments(int argc, const char** argv)
- : argc_(argc), argv_(argv) {}
-int JSArguments::argc() const { return argc_; }
-const char** JSArguments::argv() { return argv_; }
-const char*& JSArguments::operator[](int idx) { return argv_[idx]; }
-JSArguments& JSArguments::operator=(JSArguments args) {
- argc_ = args.argc_;
- argv_ = args.argv_;
- return *this;
-}
-
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/flags.h b/src/3rdparty/v8/src/flags.h
deleted file mode 100644
index f9cbde0..0000000
--- a/src/3rdparty/v8/src/flags.h
+++ /dev/null
@@ -1,79 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#ifndef V8_FLAGS_H_
-#define V8_FLAGS_H_
-
-namespace v8 {
-namespace internal {
-
-// Declare all of our flags.
-#define FLAG_MODE_DECLARE
-#include "flag-definitions.h"
-
-// The global list of all flags.
-class FlagList {
- public:
- // The list of all flags with a value different from the default
- // and their values. The format of the list is like the format of the
- // argv array passed to the main function, e.g.
- // ("--prof", "--log-file", "v8.prof", "--nolazy").
- //
- // The caller is responsible for disposing the list, as well
- // as every element of it.
- static List<const char*>* argv();
-
- // Set the flag values by parsing the command line. If remove_flags is
- // set, the flags and associated values are removed from (argc,
- // argv). Returns 0 if no error occurred. Otherwise, returns the argv
- // index > 0 for the argument where an error occurred. In that case,
- // (argc, argv) will remain unchanged independent of the remove_flags
- // value, and no assumptions about flag settings should be made.
- //
- // The following syntax for flags is accepted (both '-' and '--' are ok):
- //
- // --flag (bool flags only)
- // --noflag (bool flags only)
- // --flag=value (non-bool flags only, no spaces around '=')
- // --flag value (non-bool flags only)
- // -- (equivalent to --js_arguments, captures all remaining args)
- static int SetFlagsFromCommandLine(int* argc, char** argv, bool remove_flags);
-
- // Set the flag values by parsing the string str. Splits string into argc
- // substrings argv[], each of which consisting of non-white-space chars,
- // and then calls SetFlagsFromCommandLine() and returns its result.
- static int SetFlagsFromString(const char* str, int len);
-
- // Reset all flags to their default value.
- static void ResetAllFlags();
-
- // Print help to stdout with flags, types, and default values.
- static void PrintHelp();
-};
-
-} } // namespace v8::internal
-
-#endif // V8_FLAGS_H_
diff --git a/src/3rdparty/v8/src/frame-element.cc b/src/3rdparty/v8/src/frame-element.cc
deleted file mode 100644
index f629900..0000000
--- a/src/3rdparty/v8/src/frame-element.cc
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "frame-element.h"
-#include "zone-inl.h"
-
-namespace v8 {
-namespace internal {
-
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/frame-element.h b/src/3rdparty/v8/src/frame-element.h
deleted file mode 100644
index 0c7d010..0000000
--- a/src/3rdparty/v8/src/frame-element.h
+++ /dev/null
@@ -1,269 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_FRAME_ELEMENT_H_
-#define V8_FRAME_ELEMENT_H_
-
-#include "type-info.h"
-#include "macro-assembler.h"
-#include "zone.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// Virtual frame elements
-//
-// The internal elements of the virtual frames. There are several kinds of
-// elements:
-// * Invalid: elements that are uninitialized or not actually part
-// of the virtual frame. They should not be read.
-// * Memory: an element that resides in the actual frame. Its address is
-// given by its position in the virtual frame.
-// * Register: an element that resides in a register.
-// * Constant: an element whose value is known at compile time.
-
-class FrameElement BASE_EMBEDDED {
- public:
- enum SyncFlag {
- NOT_SYNCED,
- SYNCED
- };
-
- inline TypeInfo type_info() {
- // Copied elements do not have type info. Instead
- // we have to inspect their backing element in the frame.
- ASSERT(!is_copy());
- return TypeInfo::FromInt(TypeInfoField::decode(value_));
- }
-
- inline void set_type_info(TypeInfo info) {
- // Copied elements do not have type info. Instead
- // we have to inspect their backing element in the frame.
- ASSERT(!is_copy());
- value_ = value_ & ~TypeInfoField::mask();
- value_ = value_ | TypeInfoField::encode(info.ToInt());
- }
-
- // The default constructor creates an invalid frame element.
- FrameElement() {
- value_ = TypeField::encode(INVALID)
- | CopiedField::encode(false)
- | SyncedField::encode(false)
- | TypeInfoField::encode(TypeInfo::Uninitialized().ToInt())
- | DataField::encode(0);
- }
-
- // Factory function to construct an invalid frame element.
- static FrameElement InvalidElement() {
- FrameElement result;
- return result;
- }
-
- // Factory function to construct an in-memory frame element.
- static FrameElement MemoryElement(TypeInfo info) {
- FrameElement result(MEMORY, no_reg, SYNCED, info);
- return result;
- }
-
- // Factory function to construct an in-register frame element.
- static FrameElement RegisterElement(Register reg,
- SyncFlag is_synced,
- TypeInfo info) {
- return FrameElement(REGISTER, reg, is_synced, info);
- }
-
- // Factory function to construct a frame element whose value is known at
- // compile time.
- static FrameElement ConstantElement(Handle<Object> value,
- SyncFlag is_synced) {
- TypeInfo info = TypeInfo::TypeFromValue(value);
- FrameElement result(value, is_synced, info);
- return result;
- }
-
- static bool ConstantPoolOverflowed() {
- return !DataField::is_valid(
- Isolate::Current()->frame_element_constant_list()->length());
- }
-
- bool is_synced() const { return SyncedField::decode(value_); }
-
- void set_sync() {
- ASSERT(type() != MEMORY);
- value_ = value_ | SyncedField::encode(true);
- }
-
- void clear_sync() {
- ASSERT(type() != MEMORY);
- value_ = value_ & ~SyncedField::mask();
- }
-
- bool is_valid() const { return type() != INVALID; }
- bool is_memory() const { return type() == MEMORY; }
- bool is_register() const { return type() == REGISTER; }
- bool is_constant() const { return type() == CONSTANT; }
- bool is_copy() const { return type() == COPY; }
-
- bool is_copied() const { return CopiedField::decode(value_); }
- void set_copied() { value_ = value_ | CopiedField::encode(true); }
- void clear_copied() { value_ = value_ & ~CopiedField::mask(); }
-
- // An untagged int32 FrameElement represents a signed int32
- // on the stack. These are only allowed in a side-effect-free
- // int32 calculation, and if a non-int32 input shows up or an overflow
- // occurs, we bail out and drop all the int32 values.
- void set_untagged_int32(bool value) {
- value_ &= ~UntaggedInt32Field::mask();
- value_ |= UntaggedInt32Field::encode(value);
- }
- bool is_untagged_int32() const { return UntaggedInt32Field::decode(value_); }
-
- Register reg() const {
- ASSERT(is_register());
- uint32_t reg = DataField::decode(value_);
- Register result;
- result.code_ = reg;
- return result;
- }
-
- Handle<Object> handle() const {
- ASSERT(is_constant());
- return Isolate::Current()->frame_element_constant_list()->
- at(DataField::decode(value_));
- }
-
- int index() const {
- ASSERT(is_copy());
- return DataField::decode(value_);
- }
-
- bool Equals(FrameElement other) {
- uint32_t masked_difference = (value_ ^ other.value_) & ~CopiedField::mask();
- if (!masked_difference) {
- // The elements are equal if they agree exactly except on copied field.
- return true;
- } else {
- // If two constants have the same value, and agree otherwise, return true.
- return !(masked_difference & ~DataField::mask()) &&
- is_constant() &&
- handle().is_identical_to(other.handle());
- }
- }
-
- // Test if two FrameElements refer to the same memory or register location.
- bool SameLocation(FrameElement* other) {
- if (type() == other->type()) {
- if (value_ == other->value_) return true;
- if (is_constant() && handle().is_identical_to(other->handle())) {
- return true;
- }
- }
- return false;
- }
-
- // Given a pair of non-null frame element pointers, return one of them
- // as an entry frame candidate or null if they are incompatible.
- FrameElement* Combine(FrameElement* other) {
- // If either is invalid, the result is.
- if (!is_valid()) return this;
- if (!other->is_valid()) return other;
-
- if (!SameLocation(other)) return NULL;
- // If either is unsynced, the result is.
- FrameElement* result = is_synced() ? other : this;
- return result;
- }
-
- private:
- enum Type {
- INVALID,
- MEMORY,
- REGISTER,
- CONSTANT,
- COPY
- };
-
- // Used to construct memory and register elements.
- FrameElement(Type type,
- Register reg,
- SyncFlag is_synced,
- TypeInfo info) {
- value_ = TypeField::encode(type)
- | CopiedField::encode(false)
- | SyncedField::encode(is_synced != NOT_SYNCED)
- | TypeInfoField::encode(info.ToInt())
- | DataField::encode(reg.code_ > 0 ? reg.code_ : 0);
- }
-
- // Used to construct constant elements.
- FrameElement(Handle<Object> value, SyncFlag is_synced, TypeInfo info) {
- ZoneObjectList* constant_list =
- Isolate::Current()->frame_element_constant_list();
- value_ = TypeField::encode(CONSTANT)
- | CopiedField::encode(false)
- | SyncedField::encode(is_synced != NOT_SYNCED)
- | TypeInfoField::encode(info.ToInt())
- | DataField::encode(constant_list->length());
- constant_list->Add(value);
- }
-
- Type type() const { return TypeField::decode(value_); }
- void set_type(Type type) {
- value_ = value_ & ~TypeField::mask();
- value_ = value_ | TypeField::encode(type);
- }
-
- void set_index(int new_index) {
- ASSERT(is_copy());
- value_ = value_ & ~DataField::mask();
- value_ = value_ | DataField::encode(new_index);
- }
-
- void set_reg(Register new_reg) {
- ASSERT(is_register());
- value_ = value_ & ~DataField::mask();
- value_ = value_ | DataField::encode(new_reg.code_);
- }
-
- // Encode type, copied, synced and data in one 32 bit integer.
- uint32_t value_;
-
- // Declare BitFields with template parameters <type, start, size>.
- class TypeField: public BitField<Type, 0, 3> {};
- class CopiedField: public BitField<bool, 3, 1> {};
- class SyncedField: public BitField<bool, 4, 1> {};
- class UntaggedInt32Field: public BitField<bool, 5, 1> {};
- class TypeInfoField: public BitField<int, 6, 7> {};
- class DataField: public BitField<uint32_t, 13, 32 - 13> {};
-
- friend class VirtualFrame;
-};
-
-} } // namespace v8::internal
-
-#endif // V8_FRAME_ELEMENT_H_
diff --git a/src/3rdparty/v8/src/frames-inl.h b/src/3rdparty/v8/src/frames-inl.h
deleted file mode 100644
index 236db05..0000000
--- a/src/3rdparty/v8/src/frames-inl.h
+++ /dev/null
@@ -1,236 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_FRAMES_INL_H_
-#define V8_FRAMES_INL_H_
-
-#include "frames.h"
-#include "isolate.h"
-#include "v8memory.h"
-
-#if V8_TARGET_ARCH_IA32
-#include "ia32/frames-ia32.h"
-#elif V8_TARGET_ARCH_X64
-#include "x64/frames-x64.h"
-#elif V8_TARGET_ARCH_ARM
-#include "arm/frames-arm.h"
-#elif V8_TARGET_ARCH_MIPS
-#include "mips/frames-mips.h"
-#else
-#error Unsupported target architecture.
-#endif
-
-namespace v8 {
-namespace internal {
-
-
-inline Address StackHandler::address() const {
- return reinterpret_cast<Address>(const_cast<StackHandler*>(this));
-}
-
-
-inline StackHandler* StackHandler::next() const {
- const int offset = StackHandlerConstants::kNextOffset;
- return FromAddress(Memory::Address_at(address() + offset));
-}
-
-
-inline bool StackHandler::includes(Address address) const {
- Address start = this->address();
- Address end = start + StackHandlerConstants::kSize;
- return start <= address && address <= end;
-}
-
-
-inline void StackHandler::Iterate(ObjectVisitor* v, Code* holder) const {
- StackFrame::IteratePc(v, pc_address(), holder);
-}
-
-
-inline StackHandler* StackHandler::FromAddress(Address address) {
- return reinterpret_cast<StackHandler*>(address);
-}
-
-
-inline StackHandler::State StackHandler::state() const {
- const int offset = StackHandlerConstants::kStateOffset;
- return static_cast<State>(Memory::int_at(address() + offset));
-}
-
-
-inline Address* StackHandler::pc_address() const {
- const int offset = StackHandlerConstants::kPCOffset;
- return reinterpret_cast<Address*>(address() + offset);
-}
-
-
-inline StackFrame::StackFrame(StackFrameIterator* iterator)
- : iterator_(iterator), isolate_(iterator_->isolate()) {
-}
-
-
-inline StackHandler* StackFrame::top_handler() const {
- return iterator_->handler();
-}
-
-
-inline Code* StackFrame::GetContainingCode(Isolate* isolate, Address pc) {
- return isolate->pc_to_code_cache()->GetCacheEntry(pc)->code;
-}
-
-
-inline Object* StandardFrame::GetExpression(int index) const {
- return Memory::Object_at(GetExpressionAddress(index));
-}
-
-
-inline void StandardFrame::SetExpression(int index, Object* value) {
- Memory::Object_at(GetExpressionAddress(index)) = value;
-}
-
-
-inline Object* StandardFrame::context() const {
- const int offset = StandardFrameConstants::kContextOffset;
- return Memory::Object_at(fp() + offset);
-}
-
-
-inline Address StandardFrame::caller_fp() const {
- return Memory::Address_at(fp() + StandardFrameConstants::kCallerFPOffset);
-}
-
-
-inline Address StandardFrame::caller_pc() const {
- return Memory::Address_at(ComputePCAddress(fp()));
-}
-
-
-inline Address StandardFrame::ComputePCAddress(Address fp) {
- return fp + StandardFrameConstants::kCallerPCOffset;
-}
-
-
-inline bool StandardFrame::IsArgumentsAdaptorFrame(Address fp) {
- Object* marker =
- Memory::Object_at(fp + StandardFrameConstants::kContextOffset);
- return marker == Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR);
-}
-
-
-inline bool StandardFrame::IsConstructFrame(Address fp) {
- Object* marker =
- Memory::Object_at(fp + StandardFrameConstants::kMarkerOffset);
- return marker == Smi::FromInt(CONSTRUCT);
-}
-
-
-inline Object* JavaScriptFrame::receiver() const {
- const int offset = JavaScriptFrameConstants::kReceiverOffset;
- return Memory::Object_at(caller_sp() + offset);
-}
-
-
-inline void JavaScriptFrame::set_receiver(Object* value) {
- const int offset = JavaScriptFrameConstants::kReceiverOffset;
- Memory::Object_at(caller_sp() + offset) = value;
-}
-
-
-inline bool JavaScriptFrame::has_adapted_arguments() const {
- return IsArgumentsAdaptorFrame(caller_fp());
-}
-
-
-inline Object* JavaScriptFrame::function() const {
- Object* result = function_slot_object();
- ASSERT(result->IsJSFunction());
- return result;
-}
-
-
-template<typename Iterator>
-inline JavaScriptFrameIteratorTemp<Iterator>::JavaScriptFrameIteratorTemp(
- Isolate* isolate)
- : iterator_(isolate) {
- if (!done()) Advance();
-}
-
-template<typename Iterator>
-inline JavaScriptFrame* JavaScriptFrameIteratorTemp<Iterator>::frame() const {
- // TODO(1233797): The frame hierarchy needs to change. It's
- // problematic that we can't use the safe-cast operator to cast to
- // the JavaScript frame type, because we may encounter arguments
- // adaptor frames.
- StackFrame* frame = iterator_.frame();
- ASSERT(frame->is_java_script() || frame->is_arguments_adaptor());
- return static_cast<JavaScriptFrame*>(frame);
-}
-
-
-template<typename Iterator>
-JavaScriptFrameIteratorTemp<Iterator>::JavaScriptFrameIteratorTemp(
- Isolate* isolate, StackFrame::Id id)
- : iterator_(isolate) {
- AdvanceToId(id);
-}
-
-
-template<typename Iterator>
-void JavaScriptFrameIteratorTemp<Iterator>::Advance() {
- do {
- iterator_.Advance();
- } while (!iterator_.done() && !iterator_.frame()->is_java_script());
-}
-
-
-template<typename Iterator>
-void JavaScriptFrameIteratorTemp<Iterator>::AdvanceToArgumentsFrame() {
- if (!frame()->has_adapted_arguments()) return;
- iterator_.Advance();
- ASSERT(iterator_.frame()->is_arguments_adaptor());
-}
-
-
-template<typename Iterator>
-void JavaScriptFrameIteratorTemp<Iterator>::AdvanceToId(StackFrame::Id id) {
- while (!done()) {
- Advance();
- if (frame()->id() == id) return;
- }
-}
-
-
-template<typename Iterator>
-void JavaScriptFrameIteratorTemp<Iterator>::Reset() {
- iterator_.Reset();
- if (!done()) Advance();
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_FRAMES_INL_H_
diff --git a/src/3rdparty/v8/src/frames.cc b/src/3rdparty/v8/src/frames.cc
deleted file mode 100644
index 1672b1d..0000000
--- a/src/3rdparty/v8/src/frames.cc
+++ /dev/null
@@ -1,1273 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "ast.h"
-#include "deoptimizer.h"
-#include "frames-inl.h"
-#include "full-codegen.h"
-#include "mark-compact.h"
-#include "safepoint-table.h"
-#include "scopeinfo.h"
-#include "string-stream.h"
-
-namespace v8 {
-namespace internal {
-
-// Iterator that supports traversing the stack handlers of a
-// particular frame. Needs to know the top of the handler chain.
-class StackHandlerIterator BASE_EMBEDDED {
- public:
- StackHandlerIterator(const StackFrame* frame, StackHandler* handler)
- : limit_(frame->fp()), handler_(handler) {
- // Make sure the handler has already been unwound to this frame.
- ASSERT(frame->sp() <= handler->address());
- }
-
- StackHandler* handler() const { return handler_; }
-
- bool done() {
- return handler_ == NULL || handler_->address() > limit_;
- }
- void Advance() {
- ASSERT(!done());
- handler_ = handler_->next();
- }
-
- private:
- const Address limit_;
- StackHandler* handler_;
-};
-
-
-// -------------------------------------------------------------------------
-
-
-#define INITIALIZE_SINGLETON(type, field) field##_(this),
-StackFrameIterator::StackFrameIterator()
- : isolate_(Isolate::Current()),
- STACK_FRAME_TYPE_LIST(INITIALIZE_SINGLETON)
- frame_(NULL), handler_(NULL),
- thread_(isolate_->thread_local_top()),
- fp_(NULL), sp_(NULL), advance_(&StackFrameIterator::AdvanceWithHandler) {
- Reset();
-}
-StackFrameIterator::StackFrameIterator(Isolate* isolate)
- : isolate_(isolate),
- STACK_FRAME_TYPE_LIST(INITIALIZE_SINGLETON)
- frame_(NULL), handler_(NULL),
- thread_(isolate_->thread_local_top()),
- fp_(NULL), sp_(NULL), advance_(&StackFrameIterator::AdvanceWithHandler) {
- Reset();
-}
-StackFrameIterator::StackFrameIterator(Isolate* isolate, ThreadLocalTop* t)
- : isolate_(isolate),
- STACK_FRAME_TYPE_LIST(INITIALIZE_SINGLETON)
- frame_(NULL), handler_(NULL), thread_(t),
- fp_(NULL), sp_(NULL), advance_(&StackFrameIterator::AdvanceWithHandler) {
- Reset();
-}
-StackFrameIterator::StackFrameIterator(Isolate* isolate,
- bool use_top, Address fp, Address sp)
- : isolate_(isolate),
- STACK_FRAME_TYPE_LIST(INITIALIZE_SINGLETON)
- frame_(NULL), handler_(NULL),
- thread_(use_top ? isolate_->thread_local_top() : NULL),
- fp_(use_top ? NULL : fp), sp_(sp),
- advance_(use_top ? &StackFrameIterator::AdvanceWithHandler :
- &StackFrameIterator::AdvanceWithoutHandler) {
- if (use_top || fp != NULL) {
- Reset();
- }
-}
-
-#undef INITIALIZE_SINGLETON
-
-
-void StackFrameIterator::AdvanceWithHandler() {
- ASSERT(!done());
- // Compute the state of the calling frame before restoring
- // callee-saved registers and unwinding handlers. This allows the
- // frame code that computes the caller state to access the top
- // handler and the value of any callee-saved register if needed.
- StackFrame::State state;
- StackFrame::Type type = frame_->GetCallerState(&state);
-
- // Unwind handlers corresponding to the current frame.
- StackHandlerIterator it(frame_, handler_);
- while (!it.done()) it.Advance();
- handler_ = it.handler();
-
- // Advance to the calling frame.
- frame_ = SingletonFor(type, &state);
-
- // When we're done iterating over the stack frames, the handler
- // chain must have been completely unwound.
- ASSERT(!done() || handler_ == NULL);
-}
-
-
-void StackFrameIterator::AdvanceWithoutHandler() {
- // A simpler version of Advance which doesn't care about handler.
- ASSERT(!done());
- StackFrame::State state;
- StackFrame::Type type = frame_->GetCallerState(&state);
- frame_ = SingletonFor(type, &state);
-}
-
-
-void StackFrameIterator::Reset() {
- StackFrame::State state;
- StackFrame::Type type;
- if (thread_ != NULL) {
- type = ExitFrame::GetStateForFramePointer(
- Isolate::c_entry_fp(thread_), &state);
- handler_ = StackHandler::FromAddress(
- Isolate::handler(thread_));
- } else {
- ASSERT(fp_ != NULL);
- state.fp = fp_;
- state.sp = sp_;
- state.pc_address =
- reinterpret_cast<Address*>(StandardFrame::ComputePCAddress(fp_));
- type = StackFrame::ComputeType(isolate(), &state);
- }
- if (SingletonFor(type) == NULL) return;
- frame_ = SingletonFor(type, &state);
-}
-
-
-StackFrame* StackFrameIterator::SingletonFor(StackFrame::Type type,
- StackFrame::State* state) {
- if (type == StackFrame::NONE) return NULL;
- StackFrame* result = SingletonFor(type);
- ASSERT(result != NULL);
- result->state_ = *state;
- return result;
-}
-
-
-StackFrame* StackFrameIterator::SingletonFor(StackFrame::Type type) {
-#define FRAME_TYPE_CASE(type, field) \
- case StackFrame::type: result = &field##_; break;
-
- StackFrame* result = NULL;
- switch (type) {
- case StackFrame::NONE: return NULL;
- STACK_FRAME_TYPE_LIST(FRAME_TYPE_CASE)
- default: break;
- }
- return result;
-
-#undef FRAME_TYPE_CASE
-}
-
-
-// -------------------------------------------------------------------------
-
-
-StackTraceFrameIterator::StackTraceFrameIterator() {
- if (!done() && !IsValidFrame()) Advance();
-}
-
-
-StackTraceFrameIterator::StackTraceFrameIterator(Isolate* isolate)
- : JavaScriptFrameIterator(isolate) {
- if (!done() && !IsValidFrame()) Advance();
-}
-
-
-void StackTraceFrameIterator::Advance() {
- while (true) {
- JavaScriptFrameIterator::Advance();
- if (done()) return;
- if (IsValidFrame()) return;
- }
-}
-
-bool StackTraceFrameIterator::IsValidFrame() {
- if (!frame()->function()->IsJSFunction()) return false;
- Object* script = JSFunction::cast(frame()->function())->shared()->script();
- // Don't show functions from native scripts to user.
- return (script->IsScript() &&
- Script::TYPE_NATIVE != Script::cast(script)->type()->value());
-}
-
-
-// -------------------------------------------------------------------------
-
-
-bool SafeStackFrameIterator::ExitFrameValidator::IsValidFP(Address fp) {
- if (!validator_.IsValid(fp)) return false;
- Address sp = ExitFrame::ComputeStackPointer(fp);
- if (!validator_.IsValid(sp)) return false;
- StackFrame::State state;
- ExitFrame::FillState(fp, sp, &state);
- if (!validator_.IsValid(reinterpret_cast<Address>(state.pc_address))) {
- return false;
- }
- return *state.pc_address != NULL;
-}
-
-
-SafeStackFrameIterator::ActiveCountMaintainer::ActiveCountMaintainer(
- Isolate* isolate)
- : isolate_(isolate) {
- isolate_->set_safe_stack_iterator_counter(
- isolate_->safe_stack_iterator_counter() + 1);
-}
-
-
-SafeStackFrameIterator::ActiveCountMaintainer::~ActiveCountMaintainer() {
- isolate_->set_safe_stack_iterator_counter(
- isolate_->safe_stack_iterator_counter() - 1);
-}
-
-
-SafeStackFrameIterator::SafeStackFrameIterator(
- Isolate* isolate,
- Address fp, Address sp, Address low_bound, Address high_bound) :
- maintainer_(isolate),
- stack_validator_(low_bound, high_bound),
- is_valid_top_(IsValidTop(isolate, low_bound, high_bound)),
- is_valid_fp_(IsWithinBounds(low_bound, high_bound, fp)),
- is_working_iterator_(is_valid_top_ || is_valid_fp_),
- iteration_done_(!is_working_iterator_),
- iterator_(isolate, is_valid_top_, is_valid_fp_ ? fp : NULL, sp) {
-}
-
-bool SafeStackFrameIterator::is_active(Isolate* isolate) {
- return isolate->safe_stack_iterator_counter() > 0;
-}
-
-
-bool SafeStackFrameIterator::IsValidTop(Isolate* isolate,
- Address low_bound, Address high_bound) {
- ThreadLocalTop* top = isolate->thread_local_top();
- Address fp = Isolate::c_entry_fp(top);
- ExitFrameValidator validator(low_bound, high_bound);
- if (!validator.IsValidFP(fp)) return false;
- return Isolate::handler(top) != NULL;
-}
-
-
-void SafeStackFrameIterator::Advance() {
- ASSERT(is_working_iterator_);
- ASSERT(!done());
- StackFrame* last_frame = iterator_.frame();
- Address last_sp = last_frame->sp(), last_fp = last_frame->fp();
- // Before advancing to the next stack frame, perform pointer validity tests
- iteration_done_ = !IsValidFrame(last_frame) ||
- !CanIterateHandles(last_frame, iterator_.handler()) ||
- !IsValidCaller(last_frame);
- if (iteration_done_) return;
-
- iterator_.Advance();
- if (iterator_.done()) return;
- // Check that we have actually moved to the previous frame in the stack
- StackFrame* prev_frame = iterator_.frame();
- iteration_done_ = prev_frame->sp() < last_sp || prev_frame->fp() < last_fp;
-}
-
-
-bool SafeStackFrameIterator::CanIterateHandles(StackFrame* frame,
- StackHandler* handler) {
- // If StackIterator iterates over StackHandles, verify that
- // StackHandlerIterator can be instantiated (see StackHandlerIterator
- // constructor.)
- return !is_valid_top_ || (frame->sp() <= handler->address());
-}
-
-
-bool SafeStackFrameIterator::IsValidFrame(StackFrame* frame) const {
- return IsValidStackAddress(frame->sp()) && IsValidStackAddress(frame->fp());
-}
-
-
-bool SafeStackFrameIterator::IsValidCaller(StackFrame* frame) {
- StackFrame::State state;
- if (frame->is_entry() || frame->is_entry_construct()) {
- // See EntryFrame::GetCallerState. It computes the caller FP address
- // and calls ExitFrame::GetStateForFramePointer on it. We need to be
- // sure that caller FP address is valid.
- Address caller_fp = Memory::Address_at(
- frame->fp() + EntryFrameConstants::kCallerFPOffset);
- ExitFrameValidator validator(stack_validator_);
- if (!validator.IsValidFP(caller_fp)) return false;
- } else if (frame->is_arguments_adaptor()) {
- // See ArgumentsAdaptorFrame::GetCallerStackPointer. It assumes that
- // the number of arguments is stored on stack as Smi. We need to check
- // that it really an Smi.
- Object* number_of_args = reinterpret_cast<ArgumentsAdaptorFrame*>(frame)->
- GetExpression(0);
- if (!number_of_args->IsSmi()) {
- return false;
- }
- }
- frame->ComputeCallerState(&state);
- return IsValidStackAddress(state.sp) && IsValidStackAddress(state.fp) &&
- iterator_.SingletonFor(frame->GetCallerState(&state)) != NULL;
-}
-
-
-void SafeStackFrameIterator::Reset() {
- if (is_working_iterator_) {
- iterator_.Reset();
- iteration_done_ = false;
- }
-}
-
-
-// -------------------------------------------------------------------------
-
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
-SafeStackTraceFrameIterator::SafeStackTraceFrameIterator(
- Isolate* isolate,
- Address fp, Address sp, Address low_bound, Address high_bound) :
- SafeJavaScriptFrameIterator(isolate, fp, sp, low_bound, high_bound) {
- if (!done() && !frame()->is_java_script()) Advance();
-}
-
-
-void SafeStackTraceFrameIterator::Advance() {
- while (true) {
- SafeJavaScriptFrameIterator::Advance();
- if (done()) return;
- if (frame()->is_java_script()) return;
- }
-}
-#endif
-
-
-Code* StackFrame::GetSafepointData(Isolate* isolate,
- Address pc,
- SafepointEntry* safepoint_entry,
- unsigned* stack_slots) {
- PcToCodeCache::PcToCodeCacheEntry* entry =
- isolate->pc_to_code_cache()->GetCacheEntry(pc);
- SafepointEntry cached_safepoint_entry = entry->safepoint_entry;
- if (!entry->safepoint_entry.is_valid()) {
- entry->safepoint_entry = entry->code->GetSafepointEntry(pc);
- ASSERT(entry->safepoint_entry.is_valid());
- } else {
- ASSERT(entry->safepoint_entry.Equals(entry->code->GetSafepointEntry(pc)));
- }
-
- // Fill in the results and return the code.
- Code* code = entry->code;
- *safepoint_entry = entry->safepoint_entry;
- *stack_slots = code->stack_slots();
- return code;
-}
-
-
-bool StackFrame::HasHandler() const {
- StackHandlerIterator it(this, top_handler());
- return !it.done();
-}
-
-
-void StackFrame::IteratePc(ObjectVisitor* v,
- Address* pc_address,
- Code* holder) {
- Address pc = *pc_address;
- ASSERT(holder->contains(pc));
- unsigned pc_offset = static_cast<unsigned>(pc - holder->instruction_start());
- Object* code = holder;
- v->VisitPointer(&code);
- if (code != holder) {
- holder = reinterpret_cast<Code*>(code);
- pc = holder->instruction_start() + pc_offset;
- *pc_address = pc;
- }
-}
-
-
-StackFrame::Type StackFrame::ComputeType(Isolate* isolate, State* state) {
- ASSERT(state->fp != NULL);
- if (StandardFrame::IsArgumentsAdaptorFrame(state->fp)) {
- return ARGUMENTS_ADAPTOR;
- }
- // The marker and function offsets overlap. If the marker isn't a
- // smi then the frame is a JavaScript frame -- and the marker is
- // really the function.
- const int offset = StandardFrameConstants::kMarkerOffset;
- Object* marker = Memory::Object_at(state->fp + offset);
- if (!marker->IsSmi()) {
- // If we're using a "safe" stack iterator, we treat optimized
- // frames as normal JavaScript frames to avoid having to look
- // into the heap to determine the state. This is safe as long
- // as nobody tries to GC...
- if (SafeStackFrameIterator::is_active(isolate)) return JAVA_SCRIPT;
- Code::Kind kind = GetContainingCode(isolate, *(state->pc_address))->kind();
- ASSERT(kind == Code::FUNCTION || kind == Code::OPTIMIZED_FUNCTION);
- return (kind == Code::OPTIMIZED_FUNCTION) ? OPTIMIZED : JAVA_SCRIPT;
- }
- return static_cast<StackFrame::Type>(Smi::cast(marker)->value());
-}
-
-
-
-StackFrame::Type StackFrame::GetCallerState(State* state) const {
- ComputeCallerState(state);
- return ComputeType(isolate(), state);
-}
-
-
-Code* EntryFrame::unchecked_code() const {
- return HEAP->raw_unchecked_js_entry_code();
-}
-
-
-void EntryFrame::ComputeCallerState(State* state) const {
- GetCallerState(state);
-}
-
-
-void EntryFrame::SetCallerFp(Address caller_fp) {
- const int offset = EntryFrameConstants::kCallerFPOffset;
- Memory::Address_at(this->fp() + offset) = caller_fp;
-}
-
-
-StackFrame::Type EntryFrame::GetCallerState(State* state) const {
- const int offset = EntryFrameConstants::kCallerFPOffset;
- Address fp = Memory::Address_at(this->fp() + offset);
- return ExitFrame::GetStateForFramePointer(fp, state);
-}
-
-
-Code* EntryConstructFrame::unchecked_code() const {
- return HEAP->raw_unchecked_js_construct_entry_code();
-}
-
-
-Object*& ExitFrame::code_slot() const {
- const int offset = ExitFrameConstants::kCodeOffset;
- return Memory::Object_at(fp() + offset);
-}
-
-
-Code* ExitFrame::unchecked_code() const {
- return reinterpret_cast<Code*>(code_slot());
-}
-
-
-void ExitFrame::ComputeCallerState(State* state) const {
- // Setup the caller state.
- state->sp = caller_sp();
- state->fp = Memory::Address_at(fp() + ExitFrameConstants::kCallerFPOffset);
- state->pc_address
- = reinterpret_cast<Address*>(fp() + ExitFrameConstants::kCallerPCOffset);
-}
-
-
-void ExitFrame::SetCallerFp(Address caller_fp) {
- Memory::Address_at(fp() + ExitFrameConstants::kCallerFPOffset) = caller_fp;
-}
-
-
-void ExitFrame::Iterate(ObjectVisitor* v) const {
- // The arguments are traversed as part of the expression stack of
- // the calling frame.
- IteratePc(v, pc_address(), LookupCode());
- v->VisitPointer(&code_slot());
-}
-
-
-Address ExitFrame::GetCallerStackPointer() const {
- return fp() + ExitFrameConstants::kCallerSPDisplacement;
-}
-
-
-StackFrame::Type ExitFrame::GetStateForFramePointer(Address fp, State* state) {
- if (fp == 0) return NONE;
- Address sp = ComputeStackPointer(fp);
- FillState(fp, sp, state);
- ASSERT(*state->pc_address != NULL);
- return EXIT;
-}
-
-
-void ExitFrame::FillState(Address fp, Address sp, State* state) {
- state->sp = sp;
- state->fp = fp;
- state->pc_address = reinterpret_cast<Address*>(sp - 1 * kPointerSize);
-}
-
-
-Address StandardFrame::GetExpressionAddress(int n) const {
- const int offset = StandardFrameConstants::kExpressionsOffset;
- return fp() + offset - n * kPointerSize;
-}
-
-
-int StandardFrame::ComputeExpressionsCount() const {
- const int offset =
- StandardFrameConstants::kExpressionsOffset + kPointerSize;
- Address base = fp() + offset;
- Address limit = sp();
- ASSERT(base >= limit); // stack grows downwards
- // Include register-allocated locals in number of expressions.
- return static_cast<int>((base - limit) / kPointerSize);
-}
-
-
-void StandardFrame::ComputeCallerState(State* state) const {
- state->sp = caller_sp();
- state->fp = caller_fp();
- state->pc_address = reinterpret_cast<Address*>(ComputePCAddress(fp()));
-}
-
-
-void StandardFrame::SetCallerFp(Address caller_fp) {
- Memory::Address_at(fp() + StandardFrameConstants::kCallerFPOffset) =
- caller_fp;
-}
-
-
-bool StandardFrame::IsExpressionInsideHandler(int n) const {
- Address address = GetExpressionAddress(n);
- for (StackHandlerIterator it(this, top_handler()); !it.done(); it.Advance()) {
- if (it.handler()->includes(address)) return true;
- }
- return false;
-}
-
-
-void OptimizedFrame::Iterate(ObjectVisitor* v) const {
-#ifdef DEBUG
- // Make sure that optimized frames do not contain any stack handlers.
- StackHandlerIterator it(this, top_handler());
- ASSERT(it.done());
-#endif
-
- // Make sure that we're not doing "safe" stack frame iteration. We cannot
- // possibly find pointers in optimized frames in that state.
- ASSERT(!SafeStackFrameIterator::is_active(isolate()));
-
- // Compute the safepoint information.
- unsigned stack_slots = 0;
- SafepointEntry safepoint_entry;
- Code* code = StackFrame::GetSafepointData(
- isolate(), pc(), &safepoint_entry, &stack_slots);
- unsigned slot_space = stack_slots * kPointerSize;
-
- // Visit the outgoing parameters. This is usually dealt with by the
- // callee, but while GC'ing we artificially lower the number of
- // arguments to zero and let the caller deal with it.
- Object** parameters_base = &Memory::Object_at(sp());
- Object** parameters_limit = &Memory::Object_at(
- fp() + JavaScriptFrameConstants::kFunctionOffset - slot_space);
-
- // Visit the parameters that may be on top of the saved registers.
- if (safepoint_entry.argument_count() > 0) {
- v->VisitPointers(parameters_base,
- parameters_base + safepoint_entry.argument_count());
- parameters_base += safepoint_entry.argument_count();
- }
-
- // Skip saved double registers.
- if (safepoint_entry.has_doubles()) {
- parameters_base += DoubleRegister::kNumAllocatableRegisters *
- kDoubleSize / kPointerSize;
- }
-
- // Visit the registers that contain pointers if any.
- if (safepoint_entry.HasRegisters()) {
- for (int i = kNumSafepointRegisters - 1; i >=0; i--) {
- if (safepoint_entry.HasRegisterAt(i)) {
- int reg_stack_index = MacroAssembler::SafepointRegisterStackIndex(i);
- v->VisitPointer(parameters_base + reg_stack_index);
- }
- }
- // Skip the words containing the register values.
- parameters_base += kNumSafepointRegisters;
- }
-
- // We're done dealing with the register bits.
- uint8_t* safepoint_bits = safepoint_entry.bits();
- safepoint_bits += kNumSafepointRegisters >> kBitsPerByteLog2;
-
- // Visit the rest of the parameters.
- v->VisitPointers(parameters_base, parameters_limit);
-
- // Visit pointer spill slots and locals.
- for (unsigned index = 0; index < stack_slots; index++) {
- int byte_index = index >> kBitsPerByteLog2;
- int bit_index = index & (kBitsPerByte - 1);
- if ((safepoint_bits[byte_index] & (1U << bit_index)) != 0) {
- v->VisitPointer(parameters_limit + index);
- }
- }
-
- // Visit the context and the function.
- Object** fixed_base = &Memory::Object_at(
- fp() + JavaScriptFrameConstants::kFunctionOffset);
- Object** fixed_limit = &Memory::Object_at(fp());
- v->VisitPointers(fixed_base, fixed_limit);
-
- // Visit the return address in the callee and incoming arguments.
- IteratePc(v, pc_address(), code);
- IterateArguments(v);
-}
-
-
-Object* JavaScriptFrame::GetParameter(int index) const {
- ASSERT(index >= 0 && index < ComputeParametersCount());
- const int offset = JavaScriptFrameConstants::kParam0Offset;
- return Memory::Object_at(caller_sp() + offset - (index * kPointerSize));
-}
-
-
-int JavaScriptFrame::ComputeParametersCount() const {
- Address base = caller_sp() + JavaScriptFrameConstants::kReceiverOffset;
- Address limit = fp() + JavaScriptFrameConstants::kLastParameterOffset;
- return static_cast<int>((base - limit) / kPointerSize);
-}
-
-
-bool JavaScriptFrame::IsConstructor() const {
- Address fp = caller_fp();
- if (has_adapted_arguments()) {
- // Skip the arguments adaptor frame and look at the real caller.
- fp = Memory::Address_at(fp + StandardFrameConstants::kCallerFPOffset);
- }
- return IsConstructFrame(fp);
-}
-
-
-Code* JavaScriptFrame::unchecked_code() const {
- JSFunction* function = JSFunction::cast(this->function());
- return function->unchecked_code();
-}
-
-
-Address JavaScriptFrame::GetCallerStackPointer() const {
- int arguments;
- if (SafeStackFrameIterator::is_active(isolate()) ||
- isolate()->heap()->gc_state() != Heap::NOT_IN_GC) {
- // If the we are currently iterating the safe stack the
- // arguments for frames are traversed as if they were
- // expression stack elements of the calling frame. The reason for
- // this rather strange decision is that we cannot access the
- // function during mark-compact GCs when objects may have been marked.
- // In fact accessing heap objects (like function->shared() below)
- // at all during GC is problematic.
- arguments = 0;
- } else {
- // Compute the number of arguments by getting the number of formal
- // parameters of the function. We must remember to take the
- // receiver into account (+1).
- JSFunction* function = JSFunction::cast(this->function());
- arguments = function->shared()->formal_parameter_count() + 1;
- }
- const int offset = StandardFrameConstants::kCallerSPOffset;
- return fp() + offset + (arguments * kPointerSize);
-}
-
-
-void JavaScriptFrame::GetFunctions(List<JSFunction*>* functions) {
- ASSERT(functions->length() == 0);
- functions->Add(JSFunction::cast(function()));
-}
-
-
-void JavaScriptFrame::Summarize(List<FrameSummary>* functions) {
- ASSERT(functions->length() == 0);
- Code* code_pointer = LookupCode();
- int offset = static_cast<int>(pc() - code_pointer->address());
- FrameSummary summary(receiver(),
- JSFunction::cast(function()),
- code_pointer,
- offset,
- IsConstructor());
- functions->Add(summary);
-}
-
-
-void FrameSummary::Print() {
- PrintF("receiver: ");
- receiver_->ShortPrint();
- PrintF("\nfunction: ");
- function_->shared()->DebugName()->ShortPrint();
- PrintF("\ncode: ");
- code_->ShortPrint();
- if (code_->kind() == Code::FUNCTION) PrintF(" NON-OPT");
- if (code_->kind() == Code::OPTIMIZED_FUNCTION) PrintF(" OPT");
- PrintF("\npc: %d\n", offset_);
-}
-
-
-void OptimizedFrame::Summarize(List<FrameSummary>* frames) {
- ASSERT(frames->length() == 0);
- ASSERT(is_optimized());
-
- int deopt_index = Safepoint::kNoDeoptimizationIndex;
- DeoptimizationInputData* data = GetDeoptimizationData(&deopt_index);
-
- // BUG(3243555): Since we don't have a lazy-deopt registered at
- // throw-statements, we can't use the translation at the call-site of
- // throw. An entry with no deoptimization index indicates a call-site
- // without a lazy-deopt. As a consequence we are not allowed to inline
- // functions containing throw.
- if (deopt_index == Safepoint::kNoDeoptimizationIndex) {
- JavaScriptFrame::Summarize(frames);
- return;
- }
-
- TranslationIterator it(data->TranslationByteArray(),
- data->TranslationIndex(deopt_index)->value());
- Translation::Opcode opcode = static_cast<Translation::Opcode>(it.Next());
- ASSERT(opcode == Translation::BEGIN);
- int frame_count = it.Next();
-
- // We create the summary in reverse order because the frames
- // in the deoptimization translation are ordered bottom-to-top.
- int i = frame_count;
- while (i > 0) {
- opcode = static_cast<Translation::Opcode>(it.Next());
- if (opcode == Translation::FRAME) {
- // We don't inline constructor calls, so only the first, outermost
- // frame can be a constructor frame in case of inlining.
- bool is_constructor = (i == frame_count) && IsConstructor();
-
- i--;
- int ast_id = it.Next();
- int function_id = it.Next();
- it.Next(); // Skip height.
- JSFunction* function =
- JSFunction::cast(data->LiteralArray()->get(function_id));
-
- // The translation commands are ordered and the receiver is always
- // at the first position. Since we are always at a call when we need
- // to construct a stack trace, the receiver is always in a stack slot.
- opcode = static_cast<Translation::Opcode>(it.Next());
- ASSERT(opcode == Translation::STACK_SLOT);
- int input_slot_index = it.Next();
-
- // Get the correct receiver in the optimized frame.
- Object* receiver = NULL;
- // Positive index means the value is spilled to the locals area. Negative
- // means it is stored in the incoming parameter area.
- if (input_slot_index >= 0) {
- receiver = GetExpression(input_slot_index);
- } else {
- // Index -1 overlaps with last parameter, -n with the first parameter,
- // (-n - 1) with the receiver with n being the number of parameters
- // of the outermost, optimized frame.
- int parameter_count = ComputeParametersCount();
- int parameter_index = input_slot_index + parameter_count;
- receiver = (parameter_index == -1)
- ? this->receiver()
- : this->GetParameter(parameter_index);
- }
-
- Code* code = function->shared()->code();
- DeoptimizationOutputData* output_data =
- DeoptimizationOutputData::cast(code->deoptimization_data());
- unsigned entry = Deoptimizer::GetOutputInfo(output_data,
- ast_id,
- function->shared());
- unsigned pc_offset =
- FullCodeGenerator::PcField::decode(entry) + Code::kHeaderSize;
- ASSERT(pc_offset > 0);
-
- FrameSummary summary(receiver, function, code, pc_offset, is_constructor);
- frames->Add(summary);
- } else {
- // Skip over operands to advance to the next opcode.
- it.Skip(Translation::NumberOfOperandsFor(opcode));
- }
- }
-}
-
-
-DeoptimizationInputData* OptimizedFrame::GetDeoptimizationData(
- int* deopt_index) {
- ASSERT(is_optimized());
-
- JSFunction* opt_function = JSFunction::cast(function());
- Code* code = opt_function->code();
-
- // The code object may have been replaced by lazy deoptimization. Fall
- // back to a slow search in this case to find the original optimized
- // code object.
- if (!code->contains(pc())) {
- code = isolate()->pc_to_code_cache()->GcSafeFindCodeForPc(pc());
- }
- ASSERT(code != NULL);
- ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
-
- SafepointEntry safepoint_entry = code->GetSafepointEntry(pc());
- *deopt_index = safepoint_entry.deoptimization_index();
- ASSERT(*deopt_index != Safepoint::kNoDeoptimizationIndex);
-
- return DeoptimizationInputData::cast(code->deoptimization_data());
-}
-
-
-void OptimizedFrame::GetFunctions(List<JSFunction*>* functions) {
- ASSERT(functions->length() == 0);
- ASSERT(is_optimized());
-
- int deopt_index = Safepoint::kNoDeoptimizationIndex;
- DeoptimizationInputData* data = GetDeoptimizationData(&deopt_index);
-
- TranslationIterator it(data->TranslationByteArray(),
- data->TranslationIndex(deopt_index)->value());
- Translation::Opcode opcode = static_cast<Translation::Opcode>(it.Next());
- ASSERT(opcode == Translation::BEGIN);
- int frame_count = it.Next();
-
- // We insert the frames in reverse order because the frames
- // in the deoptimization translation are ordered bottom-to-top.
- while (frame_count > 0) {
- opcode = static_cast<Translation::Opcode>(it.Next());
- if (opcode == Translation::FRAME) {
- frame_count--;
- it.Next(); // Skip ast id.
- int function_id = it.Next();
- it.Next(); // Skip height.
- JSFunction* function =
- JSFunction::cast(data->LiteralArray()->get(function_id));
- functions->Add(function);
- } else {
- // Skip over operands to advance to the next opcode.
- it.Skip(Translation::NumberOfOperandsFor(opcode));
- }
- }
-}
-
-
-Address ArgumentsAdaptorFrame::GetCallerStackPointer() const {
- const int arguments = Smi::cast(GetExpression(0))->value();
- const int offset = StandardFrameConstants::kCallerSPOffset;
- return fp() + offset + (arguments + 1) * kPointerSize;
-}
-
-
-Address InternalFrame::GetCallerStackPointer() const {
- // Internal frames have no arguments. The stack pointer of the
- // caller is at a fixed offset from the frame pointer.
- return fp() + StandardFrameConstants::kCallerSPOffset;
-}
-
-
-Code* ArgumentsAdaptorFrame::unchecked_code() const {
- return isolate()->builtins()->builtin(
- Builtins::kArgumentsAdaptorTrampoline);
-}
-
-
-Code* InternalFrame::unchecked_code() const {
- const int offset = InternalFrameConstants::kCodeOffset;
- Object* code = Memory::Object_at(fp() + offset);
- ASSERT(code != NULL);
- return reinterpret_cast<Code*>(code);
-}
-
-
-void StackFrame::PrintIndex(StringStream* accumulator,
- PrintMode mode,
- int index) {
- accumulator->Add((mode == OVERVIEW) ? "%5d: " : "[%d]: ", index);
-}
-
-
-void JavaScriptFrame::Print(StringStream* accumulator,
- PrintMode mode,
- int index) const {
- HandleScope scope;
- Object* receiver = this->receiver();
- Object* function = this->function();
-
- accumulator->PrintSecurityTokenIfChanged(function);
- PrintIndex(accumulator, mode, index);
- Code* code = NULL;
- if (IsConstructor()) accumulator->Add("new ");
- accumulator->PrintFunction(function, receiver, &code);
-
- Handle<SerializedScopeInfo> scope_info(SerializedScopeInfo::Empty());
-
- if (function->IsJSFunction()) {
- Handle<SharedFunctionInfo> shared(JSFunction::cast(function)->shared());
- scope_info = Handle<SerializedScopeInfo>(shared->scope_info());
- Object* script_obj = shared->script();
- if (script_obj->IsScript()) {
- Handle<Script> script(Script::cast(script_obj));
- accumulator->Add(" [");
- accumulator->PrintName(script->name());
-
- Address pc = this->pc();
- if (code != NULL && code->kind() == Code::FUNCTION &&
- pc >= code->instruction_start() && pc < code->instruction_end()) {
- int source_pos = code->SourcePosition(pc);
- int line = GetScriptLineNumberSafe(script, source_pos) + 1;
- accumulator->Add(":%d", line);
- } else {
- int function_start_pos = shared->start_position();
- int line = GetScriptLineNumberSafe(script, function_start_pos) + 1;
- accumulator->Add(":~%d", line);
- }
-
- accumulator->Add("] ");
- }
- }
-
- accumulator->Add("(this=%o", receiver);
-
- // Get scope information for nicer output, if possible. If code is
- // NULL, or doesn't contain scope info, info will return 0 for the
- // number of parameters, stack slots, or context slots.
- ScopeInfo<PreallocatedStorage> info(*scope_info);
-
- // Print the parameters.
- int parameters_count = ComputeParametersCount();
- for (int i = 0; i < parameters_count; i++) {
- accumulator->Add(",");
- // If we have a name for the parameter we print it. Nameless
- // parameters are either because we have more actual parameters
- // than formal parameters or because we have no scope information.
- if (i < info.number_of_parameters()) {
- accumulator->PrintName(*info.parameter_name(i));
- accumulator->Add("=");
- }
- accumulator->Add("%o", GetParameter(i));
- }
-
- accumulator->Add(")");
- if (mode == OVERVIEW) {
- accumulator->Add("\n");
- return;
- }
- accumulator->Add(" {\n");
-
- // Compute the number of locals and expression stack elements.
- int stack_locals_count = info.number_of_stack_slots();
- int heap_locals_count = info.number_of_context_slots();
- int expressions_count = ComputeExpressionsCount();
-
- // Print stack-allocated local variables.
- if (stack_locals_count > 0) {
- accumulator->Add(" // stack-allocated locals\n");
- }
- for (int i = 0; i < stack_locals_count; i++) {
- accumulator->Add(" var ");
- accumulator->PrintName(*info.stack_slot_name(i));
- accumulator->Add(" = ");
- if (i < expressions_count) {
- accumulator->Add("%o", GetExpression(i));
- } else {
- accumulator->Add("// no expression found - inconsistent frame?");
- }
- accumulator->Add("\n");
- }
-
- // Try to get hold of the context of this frame.
- Context* context = NULL;
- if (this->context() != NULL && this->context()->IsContext()) {
- context = Context::cast(this->context());
- }
-
- // Print heap-allocated local variables.
- if (heap_locals_count > Context::MIN_CONTEXT_SLOTS) {
- accumulator->Add(" // heap-allocated locals\n");
- }
- for (int i = Context::MIN_CONTEXT_SLOTS; i < heap_locals_count; i++) {
- accumulator->Add(" var ");
- accumulator->PrintName(*info.context_slot_name(i));
- accumulator->Add(" = ");
- if (context != NULL) {
- if (i < context->length()) {
- accumulator->Add("%o", context->get(i));
- } else {
- accumulator->Add(
- "// warning: missing context slot - inconsistent frame?");
- }
- } else {
- accumulator->Add("// warning: no context found - inconsistent frame?");
- }
- accumulator->Add("\n");
- }
-
- // Print the expression stack.
- int expressions_start = stack_locals_count;
- if (expressions_start < expressions_count) {
- accumulator->Add(" // expression stack (top to bottom)\n");
- }
- for (int i = expressions_count - 1; i >= expressions_start; i--) {
- if (IsExpressionInsideHandler(i)) continue;
- accumulator->Add(" [%02d] : %o\n", i, GetExpression(i));
- }
-
- // Print details about the function.
- if (FLAG_max_stack_trace_source_length != 0 && code != NULL) {
- SharedFunctionInfo* shared = JSFunction::cast(function)->shared();
- accumulator->Add("--------- s o u r c e c o d e ---------\n");
- shared->SourceCodePrint(accumulator, FLAG_max_stack_trace_source_length);
- accumulator->Add("\n-----------------------------------------\n");
- }
-
- accumulator->Add("}\n\n");
-}
-
-
-void ArgumentsAdaptorFrame::Print(StringStream* accumulator,
- PrintMode mode,
- int index) const {
- int actual = ComputeParametersCount();
- int expected = -1;
- Object* function = this->function();
- if (function->IsJSFunction()) {
- expected = JSFunction::cast(function)->shared()->formal_parameter_count();
- }
-
- PrintIndex(accumulator, mode, index);
- accumulator->Add("arguments adaptor frame: %d->%d", actual, expected);
- if (mode == OVERVIEW) {
- accumulator->Add("\n");
- return;
- }
- accumulator->Add(" {\n");
-
- // Print actual arguments.
- if (actual > 0) accumulator->Add(" // actual arguments\n");
- for (int i = 0; i < actual; i++) {
- accumulator->Add(" [%02d] : %o", i, GetParameter(i));
- if (expected != -1 && i >= expected) {
- accumulator->Add(" // not passed to callee");
- }
- accumulator->Add("\n");
- }
-
- accumulator->Add("}\n\n");
-}
-
-
-void EntryFrame::Iterate(ObjectVisitor* v) const {
- StackHandlerIterator it(this, top_handler());
- ASSERT(!it.done());
- StackHandler* handler = it.handler();
- ASSERT(handler->is_entry());
- handler->Iterate(v, LookupCode());
-#ifdef DEBUG
- // Make sure that the entry frame does not contain more than one
- // stack handler.
- it.Advance();
- ASSERT(it.done());
-#endif
- IteratePc(v, pc_address(), LookupCode());
-}
-
-
-void StandardFrame::IterateExpressions(ObjectVisitor* v) const {
- const int offset = StandardFrameConstants::kContextOffset;
- Object** base = &Memory::Object_at(sp());
- Object** limit = &Memory::Object_at(fp() + offset) + 1;
- for (StackHandlerIterator it(this, top_handler()); !it.done(); it.Advance()) {
- StackHandler* handler = it.handler();
- // Traverse pointers down to - but not including - the next
- // handler in the handler chain. Update the base to skip the
- // handler and allow the handler to traverse its own pointers.
- const Address address = handler->address();
- v->VisitPointers(base, reinterpret_cast<Object**>(address));
- base = reinterpret_cast<Object**>(address + StackHandlerConstants::kSize);
- // Traverse the pointers in the handler itself.
- handler->Iterate(v, LookupCode());
- }
- v->VisitPointers(base, limit);
-}
-
-
-void JavaScriptFrame::Iterate(ObjectVisitor* v) const {
- IterateExpressions(v);
- IteratePc(v, pc_address(), LookupCode());
- IterateArguments(v);
-}
-
-
-void JavaScriptFrame::IterateArguments(ObjectVisitor* v) const {
- // Traverse callee-saved registers, receiver, and parameters.
- const int kBaseOffset = JavaScriptFrameConstants::kLastParameterOffset;
- const int kLimitOffset = JavaScriptFrameConstants::kReceiverOffset;
- Object** base = &Memory::Object_at(fp() + kBaseOffset);
- Object** limit = &Memory::Object_at(caller_sp() + kLimitOffset) + 1;
- v->VisitPointers(base, limit);
-}
-
-
-void InternalFrame::Iterate(ObjectVisitor* v) const {
- // Internal frames only have object pointers on the expression stack
- // as they never have any arguments.
- IterateExpressions(v);
- IteratePc(v, pc_address(), LookupCode());
-}
-
-
-// -------------------------------------------------------------------------
-
-
-JavaScriptFrame* StackFrameLocator::FindJavaScriptFrame(int n) {
- ASSERT(n >= 0);
- for (int i = 0; i <= n; i++) {
- while (!iterator_.frame()->is_java_script()) iterator_.Advance();
- if (i == n) return JavaScriptFrame::cast(iterator_.frame());
- iterator_.Advance();
- }
- UNREACHABLE();
- return NULL;
-}
-
-
-// -------------------------------------------------------------------------
-
-
-Code* PcToCodeCache::GcSafeCastToCode(HeapObject* object, Address pc) {
- Code* code = reinterpret_cast<Code*>(object);
- ASSERT(code != NULL && code->contains(pc));
- return code;
-}
-
-
-Code* PcToCodeCache::GcSafeFindCodeForPc(Address pc) {
- Heap* heap = isolate_->heap();
- // Check if the pc points into a large object chunk.
- LargeObjectChunk* chunk = heap->lo_space()->FindChunkContainingPc(pc);
- if (chunk != NULL) return GcSafeCastToCode(chunk->GetObject(), pc);
-
- // Iterate through the 8K page until we reach the end or find an
- // object starting after the pc.
- Page* page = Page::FromAddress(pc);
- HeapObjectIterator iterator(page, heap->GcSafeSizeOfOldObjectFunction());
- HeapObject* previous = NULL;
- while (true) {
- HeapObject* next = iterator.next();
- if (next == NULL || next->address() >= pc) {
- return GcSafeCastToCode(previous, pc);
- }
- previous = next;
- }
-}
-
-
-PcToCodeCache::PcToCodeCacheEntry* PcToCodeCache::GetCacheEntry(Address pc) {
- isolate_->counters()->pc_to_code()->Increment();
- ASSERT(IsPowerOf2(kPcToCodeCacheSize));
- uint32_t hash = ComputeIntegerHash(
- static_cast<uint32_t>(reinterpret_cast<uintptr_t>(pc)));
- uint32_t index = hash & (kPcToCodeCacheSize - 1);
- PcToCodeCacheEntry* entry = cache(index);
- if (entry->pc == pc) {
- isolate_->counters()->pc_to_code_cached()->Increment();
- ASSERT(entry->code == GcSafeFindCodeForPc(pc));
- } else {
- // Because this code may be interrupted by a profiling signal that
- // also queries the cache, we cannot update pc before the code has
- // been set. Otherwise, we risk trying to use a cache entry before
- // the code has been computed.
- entry->code = GcSafeFindCodeForPc(pc);
- entry->safepoint_entry.Reset();
- entry->pc = pc;
- }
- return entry;
-}
-
-
-// -------------------------------------------------------------------------
-
-int NumRegs(RegList reglist) {
- int n = 0;
- while (reglist != 0) {
- n++;
- reglist &= reglist - 1; // clear one bit
- }
- return n;
-}
-
-
-struct JSCallerSavedCodeData {
- JSCallerSavedCodeData() {
- int i = 0;
- for (int r = 0; r < kNumRegs; r++)
- if ((kJSCallerSaved & (1 << r)) != 0)
- reg_code[i++] = r;
-
- ASSERT(i == kNumJSCallerSaved);
- }
- int reg_code[kNumJSCallerSaved];
-};
-
-
-static const JSCallerSavedCodeData kCallerSavedCodeData;
-
-
-int JSCallerSavedCode(int n) {
- ASSERT(0 <= n && n < kNumJSCallerSaved);
- return kCallerSavedCodeData.reg_code[n];
-}
-
-
-#define DEFINE_WRAPPER(type, field) \
-class field##_Wrapper : public ZoneObject { \
- public: /* NOLINT */ \
- field##_Wrapper(const field& original) : frame_(original) { \
- } \
- field frame_; \
-};
-STACK_FRAME_TYPE_LIST(DEFINE_WRAPPER)
-#undef DEFINE_WRAPPER
-
-static StackFrame* AllocateFrameCopy(StackFrame* frame) {
-#define FRAME_TYPE_CASE(type, field) \
- case StackFrame::type: { \
- field##_Wrapper* wrapper = \
- new field##_Wrapper(*(reinterpret_cast<field*>(frame))); \
- return &wrapper->frame_; \
- }
-
- switch (frame->type()) {
- STACK_FRAME_TYPE_LIST(FRAME_TYPE_CASE)
- default: UNREACHABLE();
- }
-#undef FRAME_TYPE_CASE
- return NULL;
-}
-
-Vector<StackFrame*> CreateStackMap() {
- ZoneList<StackFrame*> list(10);
- for (StackFrameIterator it; !it.done(); it.Advance()) {
- StackFrame* frame = AllocateFrameCopy(it.frame());
- list.Add(frame);
- }
- return list.ToVector();
-}
-
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/frames.h b/src/3rdparty/v8/src/frames.h
deleted file mode 100644
index d6307f0..0000000
--- a/src/3rdparty/v8/src/frames.h
+++ /dev/null
@@ -1,854 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_FRAMES_H_
-#define V8_FRAMES_H_
-
-#include "handles.h"
-#include "safepoint-table.h"
-
-namespace v8 {
-namespace internal {
-
-typedef uint32_t RegList;
-
-// Get the number of registers in a given register list.
-int NumRegs(RegList list);
-
-// Return the code of the n-th saved register available to JavaScript.
-int JSCallerSavedCode(int n);
-
-
-// Forward declarations.
-class StackFrameIterator;
-class ThreadLocalTop;
-class Isolate;
-
-class PcToCodeCache {
- public:
- struct PcToCodeCacheEntry {
- Address pc;
- Code* code;
- SafepointEntry safepoint_entry;
- };
-
- explicit PcToCodeCache(Isolate* isolate) : isolate_(isolate) {
- Flush();
- }
-
- Code* GcSafeFindCodeForPc(Address pc);
- Code* GcSafeCastToCode(HeapObject* object, Address pc);
-
- void Flush() {
- memset(&cache_[0], 0, sizeof(cache_));
- }
-
- PcToCodeCacheEntry* GetCacheEntry(Address pc);
-
- private:
- PcToCodeCacheEntry* cache(int index) { return &cache_[index]; }
-
- Isolate* isolate_;
-
- static const int kPcToCodeCacheSize = 1024;
- PcToCodeCacheEntry cache_[kPcToCodeCacheSize];
-
- DISALLOW_COPY_AND_ASSIGN(PcToCodeCache);
-};
-
-
-class StackHandler BASE_EMBEDDED {
- public:
- enum State {
- ENTRY,
- TRY_CATCH,
- TRY_FINALLY
- };
-
- // Get the address of this stack handler.
- inline Address address() const;
-
- // Get the next stack handler in the chain.
- inline StackHandler* next() const;
-
- // Tells whether the given address is inside this handler.
- inline bool includes(Address address) const;
-
- // Garbage collection support.
- inline void Iterate(ObjectVisitor* v, Code* holder) const;
-
- // Conversion support.
- static inline StackHandler* FromAddress(Address address);
-
- // Testers
- bool is_entry() { return state() == ENTRY; }
- bool is_try_catch() { return state() == TRY_CATCH; }
- bool is_try_finally() { return state() == TRY_FINALLY; }
-
- private:
- // Accessors.
- inline State state() const;
-
- inline Address* pc_address() const;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(StackHandler);
-};
-
-
-#define STACK_FRAME_TYPE_LIST(V) \
- V(ENTRY, EntryFrame) \
- V(ENTRY_CONSTRUCT, EntryConstructFrame) \
- V(EXIT, ExitFrame) \
- V(JAVA_SCRIPT, JavaScriptFrame) \
- V(OPTIMIZED, OptimizedFrame) \
- V(INTERNAL, InternalFrame) \
- V(CONSTRUCT, ConstructFrame) \
- V(ARGUMENTS_ADAPTOR, ArgumentsAdaptorFrame)
-
-
-// Abstract base class for all stack frames.
-class StackFrame BASE_EMBEDDED {
- public:
-#define DECLARE_TYPE(type, ignore) type,
- enum Type {
- NONE = 0,
- STACK_FRAME_TYPE_LIST(DECLARE_TYPE)
- NUMBER_OF_TYPES
- };
-#undef DECLARE_TYPE
-
- // Opaque data type for identifying stack frames. Used extensively
- // by the debugger.
- // ID_MIN_VALUE and ID_MAX_VALUE are specified to ensure that enumeration type
- // has correct value range (see Issue 830 for more details).
- enum Id {
- ID_MIN_VALUE = kMinInt,
- ID_MAX_VALUE = kMaxInt,
- NO_ID = 0
- };
-
- struct State {
- State() : sp(NULL), fp(NULL), pc_address(NULL) { }
- Address sp;
- Address fp;
- Address* pc_address;
- };
-
- // Copy constructor; it breaks the connection to host iterator
- // (as an iterator usually lives on stack).
- StackFrame(const StackFrame& original) {
- this->state_ = original.state_;
- this->iterator_ = NULL;
- this->isolate_ = original.isolate_;
- }
-
- // Type testers.
- bool is_entry() const { return type() == ENTRY; }
- bool is_entry_construct() const { return type() == ENTRY_CONSTRUCT; }
- bool is_exit() const { return type() == EXIT; }
- bool is_optimized() const { return type() == OPTIMIZED; }
- bool is_arguments_adaptor() const { return type() == ARGUMENTS_ADAPTOR; }
- bool is_internal() const { return type() == INTERNAL; }
- bool is_construct() const { return type() == CONSTRUCT; }
- virtual bool is_standard() const { return false; }
-
- bool is_java_script() const {
- Type type = this->type();
- return (type == JAVA_SCRIPT) || (type == OPTIMIZED);
- }
-
- // Accessors.
- Address sp() const { return state_.sp; }
- Address fp() const { return state_.fp; }
- Address caller_sp() const { return GetCallerStackPointer(); }
-
- Address pc() const { return *pc_address(); }
- void set_pc(Address pc) { *pc_address() = pc; }
-
- virtual void SetCallerFp(Address caller_fp) = 0;
-
- Address* pc_address() const { return state_.pc_address; }
-
- // Get the id of this stack frame.
- Id id() const { return static_cast<Id>(OffsetFrom(caller_sp())); }
-
- // Checks if this frame includes any stack handlers.
- bool HasHandler() const;
-
- // Get the type of this frame.
- virtual Type type() const = 0;
-
- // Get the code associated with this frame.
- // This method could be called during marking phase of GC.
- virtual Code* unchecked_code() const = 0;
-
- // Get the code associated with this frame.
- Code* LookupCode() const {
- return GetContainingCode(isolate(), pc());
- }
-
- // Get the code object that contains the given pc.
- static inline Code* GetContainingCode(Isolate* isolate, Address pc);
-
- // Get the code object containing the given pc and fill in the
- // safepoint entry and the number of stack slots. The pc must be at
- // a safepoint.
- static Code* GetSafepointData(Isolate* isolate,
- Address pc,
- SafepointEntry* safepoint_entry,
- unsigned* stack_slots);
-
- virtual void Iterate(ObjectVisitor* v) const = 0;
- static void IteratePc(ObjectVisitor* v, Address* pc_address, Code* holder);
-
-
- // Printing support.
- enum PrintMode { OVERVIEW, DETAILS };
- virtual void Print(StringStream* accumulator,
- PrintMode mode,
- int index) const { }
-
- protected:
- inline explicit StackFrame(StackFrameIterator* iterator);
- virtual ~StackFrame() { }
-
- Isolate* isolate() const { return isolate_; }
-
- // Compute the stack pointer for the calling frame.
- virtual Address GetCallerStackPointer() const = 0;
-
- // Printing support.
- static void PrintIndex(StringStream* accumulator,
- PrintMode mode,
- int index);
-
- // Get the top handler from the current stack iterator.
- inline StackHandler* top_handler() const;
-
- // Compute the stack frame type for the given state.
- static Type ComputeType(Isolate* isolate, State* state);
-
- private:
- const StackFrameIterator* iterator_;
- Isolate* isolate_;
- State state_;
-
- // Fill in the state of the calling frame.
- virtual void ComputeCallerState(State* state) const = 0;
-
- // Get the type and the state of the calling frame.
- virtual Type GetCallerState(State* state) const;
-
- static const intptr_t kIsolateTag = 1;
-
- friend class StackFrameIterator;
- friend class StackHandlerIterator;
- friend class SafeStackFrameIterator;
-
- private:
- void operator=(const StackFrame& original);
-};
-
-
-// Entry frames are used to enter JavaScript execution from C.
-class EntryFrame: public StackFrame {
- public:
- virtual Type type() const { return ENTRY; }
-
- virtual Code* unchecked_code() const;
-
- // Garbage collection support.
- virtual void Iterate(ObjectVisitor* v) const;
-
- static EntryFrame* cast(StackFrame* frame) {
- ASSERT(frame->is_entry());
- return static_cast<EntryFrame*>(frame);
- }
- virtual void SetCallerFp(Address caller_fp);
-
- protected:
- explicit EntryFrame(StackFrameIterator* iterator) : StackFrame(iterator) { }
-
- // The caller stack pointer for entry frames is always zero. The
- // real information about the caller frame is available through the
- // link to the top exit frame.
- virtual Address GetCallerStackPointer() const { return 0; }
-
- private:
- virtual void ComputeCallerState(State* state) const;
- virtual Type GetCallerState(State* state) const;
-
- friend class StackFrameIterator;
-};
-
-
-class EntryConstructFrame: public EntryFrame {
- public:
- virtual Type type() const { return ENTRY_CONSTRUCT; }
-
- virtual Code* unchecked_code() const;
-
- static EntryConstructFrame* cast(StackFrame* frame) {
- ASSERT(frame->is_entry_construct());
- return static_cast<EntryConstructFrame*>(frame);
- }
-
- protected:
- explicit EntryConstructFrame(StackFrameIterator* iterator)
- : EntryFrame(iterator) { }
-
- private:
- friend class StackFrameIterator;
-};
-
-
-// Exit frames are used to exit JavaScript execution and go to C.
-class ExitFrame: public StackFrame {
- public:
- virtual Type type() const { return EXIT; }
-
- virtual Code* unchecked_code() const;
-
- Object*& code_slot() const;
-
- // Garbage collection support.
- virtual void Iterate(ObjectVisitor* v) const;
-
- virtual void SetCallerFp(Address caller_fp);
-
- static ExitFrame* cast(StackFrame* frame) {
- ASSERT(frame->is_exit());
- return static_cast<ExitFrame*>(frame);
- }
-
- // Compute the state and type of an exit frame given a frame
- // pointer. Used when constructing the first stack frame seen by an
- // iterator and the frames following entry frames.
- static Type GetStateForFramePointer(Address fp, State* state);
- static Address ComputeStackPointer(Address fp);
- static void FillState(Address fp, Address sp, State* state);
-
- protected:
- explicit ExitFrame(StackFrameIterator* iterator) : StackFrame(iterator) { }
-
- virtual Address GetCallerStackPointer() const;
-
- private:
- virtual void ComputeCallerState(State* state) const;
-
- friend class StackFrameIterator;
-};
-
-
-class StandardFrame: public StackFrame {
- public:
- // Testers.
- virtual bool is_standard() const { return true; }
-
- // Accessors.
- inline Object* context() const;
-
- // Access the expressions in the stack frame including locals.
- inline Object* GetExpression(int index) const;
- inline void SetExpression(int index, Object* value);
- int ComputeExpressionsCount() const;
-
- virtual void SetCallerFp(Address caller_fp);
-
- static StandardFrame* cast(StackFrame* frame) {
- ASSERT(frame->is_standard());
- return static_cast<StandardFrame*>(frame);
- }
-
- protected:
- explicit StandardFrame(StackFrameIterator* iterator)
- : StackFrame(iterator) { }
-
- virtual void ComputeCallerState(State* state) const;
-
- // Accessors.
- inline Address caller_fp() const;
- inline Address caller_pc() const;
-
- // Computes the address of the PC field in the standard frame given
- // by the provided frame pointer.
- static inline Address ComputePCAddress(Address fp);
-
- // Iterate over expression stack including stack handlers, locals,
- // and parts of the fixed part including context and code fields.
- void IterateExpressions(ObjectVisitor* v) const;
-
- // Returns the address of the n'th expression stack element.
- Address GetExpressionAddress(int n) const;
-
- // Determines if the n'th expression stack element is in a stack
- // handler or not. Requires traversing all handlers in this frame.
- bool IsExpressionInsideHandler(int n) const;
-
- // Determines if the standard frame for the given frame pointer is
- // an arguments adaptor frame.
- static inline bool IsArgumentsAdaptorFrame(Address fp);
-
- // Determines if the standard frame for the given frame pointer is a
- // construct frame.
- static inline bool IsConstructFrame(Address fp);
-
- private:
- friend class StackFrame;
- friend class StackFrameIterator;
-};
-
-
-class FrameSummary BASE_EMBEDDED {
- public:
- FrameSummary(Object* receiver,
- JSFunction* function,
- Code* code,
- int offset,
- bool is_constructor)
- : receiver_(receiver),
- function_(function),
- code_(code),
- offset_(offset),
- is_constructor_(is_constructor) { }
- Handle<Object> receiver() { return receiver_; }
- Handle<JSFunction> function() { return function_; }
- Handle<Code> code() { return code_; }
- Address pc() { return code_->address() + offset_; }
- int offset() { return offset_; }
- bool is_constructor() { return is_constructor_; }
-
- void Print();
-
- private:
- Handle<Object> receiver_;
- Handle<JSFunction> function_;
- Handle<Code> code_;
- int offset_;
- bool is_constructor_;
-};
-
-
-class JavaScriptFrame: public StandardFrame {
- public:
- virtual Type type() const { return JAVA_SCRIPT; }
-
- // Accessors.
- inline Object* function() const;
- inline Object* receiver() const;
- inline void set_receiver(Object* value);
-
- // Access the parameters.
- Object* GetParameter(int index) const;
- int ComputeParametersCount() const;
-
- // Check if this frame is a constructor frame invoked through 'new'.
- bool IsConstructor() const;
-
- // Check if this frame has "adapted" arguments in the sense that the
- // actual passed arguments are available in an arguments adaptor
- // frame below it on the stack.
- inline bool has_adapted_arguments() const;
-
- // Garbage collection support.
- virtual void Iterate(ObjectVisitor* v) const;
-
- // Printing support.
- virtual void Print(StringStream* accumulator,
- PrintMode mode,
- int index) const;
-
- // Determine the code for the frame.
- virtual Code* unchecked_code() const;
-
- // Return a list with JSFunctions of this frame.
- virtual void GetFunctions(List<JSFunction*>* functions);
-
- // Build a list with summaries for this frame including all inlined frames.
- virtual void Summarize(List<FrameSummary>* frames);
-
- static JavaScriptFrame* cast(StackFrame* frame) {
- ASSERT(frame->is_java_script());
- return static_cast<JavaScriptFrame*>(frame);
- }
-
- protected:
- explicit JavaScriptFrame(StackFrameIterator* iterator)
- : StandardFrame(iterator) { }
-
- virtual Address GetCallerStackPointer() const;
-
- // Garbage collection support. Iterates over incoming arguments,
- // receiver, and any callee-saved registers.
- void IterateArguments(ObjectVisitor* v) const;
-
- private:
- inline Object* function_slot_object() const;
-
- friend class StackFrameIterator;
- friend class StackTracer;
-};
-
-
-class OptimizedFrame : public JavaScriptFrame {
- public:
- virtual Type type() const { return OPTIMIZED; }
-
- // GC support.
- virtual void Iterate(ObjectVisitor* v) const;
-
- // Return a list with JSFunctions of this frame.
- // The functions are ordered bottom-to-top (i.e. functions.last()
- // is the top-most activation)
- virtual void GetFunctions(List<JSFunction*>* functions);
-
- virtual void Summarize(List<FrameSummary>* frames);
-
- DeoptimizationInputData* GetDeoptimizationData(int* deopt_index);
-
- protected:
- explicit OptimizedFrame(StackFrameIterator* iterator)
- : JavaScriptFrame(iterator) { }
-
- private:
- friend class StackFrameIterator;
-};
-
-
-// Arguments adaptor frames are automatically inserted below
-// JavaScript frames when the actual number of parameters does not
-// match the formal number of parameters.
-class ArgumentsAdaptorFrame: public JavaScriptFrame {
- public:
- virtual Type type() const { return ARGUMENTS_ADAPTOR; }
-
- // Determine the code for the frame.
- virtual Code* unchecked_code() const;
-
- static ArgumentsAdaptorFrame* cast(StackFrame* frame) {
- ASSERT(frame->is_arguments_adaptor());
- return static_cast<ArgumentsAdaptorFrame*>(frame);
- }
-
- // Printing support.
- virtual void Print(StringStream* accumulator,
- PrintMode mode,
- int index) const;
- protected:
- explicit ArgumentsAdaptorFrame(StackFrameIterator* iterator)
- : JavaScriptFrame(iterator) { }
-
- virtual Address GetCallerStackPointer() const;
-
- private:
- friend class StackFrameIterator;
-};
-
-
-class InternalFrame: public StandardFrame {
- public:
- virtual Type type() const { return INTERNAL; }
-
- // Garbage collection support.
- virtual void Iterate(ObjectVisitor* v) const;
-
- // Determine the code for the frame.
- virtual Code* unchecked_code() const;
-
- static InternalFrame* cast(StackFrame* frame) {
- ASSERT(frame->is_internal());
- return static_cast<InternalFrame*>(frame);
- }
-
- protected:
- explicit InternalFrame(StackFrameIterator* iterator)
- : StandardFrame(iterator) { }
-
- virtual Address GetCallerStackPointer() const;
-
- private:
- friend class StackFrameIterator;
-};
-
-
-// Construct frames are special trampoline frames introduced to handle
-// function invocations through 'new'.
-class ConstructFrame: public InternalFrame {
- public:
- virtual Type type() const { return CONSTRUCT; }
-
- static ConstructFrame* cast(StackFrame* frame) {
- ASSERT(frame->is_construct());
- return static_cast<ConstructFrame*>(frame);
- }
-
- protected:
- explicit ConstructFrame(StackFrameIterator* iterator)
- : InternalFrame(iterator) { }
-
- private:
- friend class StackFrameIterator;
-};
-
-
-class StackFrameIterator BASE_EMBEDDED {
- public:
- // An iterator that iterates over the current thread's stack,
- // and uses current isolate.
- StackFrameIterator();
-
- // An iterator that iterates over the isolate's current thread's stack,
- explicit StackFrameIterator(Isolate* isolate);
-
- // An iterator that iterates over a given thread's stack.
- StackFrameIterator(Isolate* isolate, ThreadLocalTop* t);
-
- // An iterator that can start from a given FP address.
- // If use_top, then work as usual, if fp isn't NULL, use it,
- // otherwise, do nothing.
- StackFrameIterator(Isolate* isolate, bool use_top, Address fp, Address sp);
-
- StackFrame* frame() const {
- ASSERT(!done());
- return frame_;
- }
-
- Isolate* isolate() const { return isolate_; }
-
- bool done() const { return frame_ == NULL; }
- void Advance() { (this->*advance_)(); }
-
- // Go back to the first frame.
- void Reset();
-
- private:
- Isolate* isolate_;
-#define DECLARE_SINGLETON(ignore, type) type type##_;
- STACK_FRAME_TYPE_LIST(DECLARE_SINGLETON)
-#undef DECLARE_SINGLETON
- StackFrame* frame_;
- StackHandler* handler_;
- ThreadLocalTop* thread_;
- Address fp_;
- Address sp_;
- void (StackFrameIterator::*advance_)();
-
- StackHandler* handler() const {
- ASSERT(!done());
- return handler_;
- }
-
- // Get the type-specific frame singleton in a given state.
- StackFrame* SingletonFor(StackFrame::Type type, StackFrame::State* state);
- // A helper function, can return a NULL pointer.
- StackFrame* SingletonFor(StackFrame::Type type);
-
- void AdvanceWithHandler();
- void AdvanceWithoutHandler();
-
- friend class StackFrame;
- friend class SafeStackFrameIterator;
- DISALLOW_COPY_AND_ASSIGN(StackFrameIterator);
-};
-
-
-// Iterator that supports iterating through all JavaScript frames.
-template<typename Iterator>
-class JavaScriptFrameIteratorTemp BASE_EMBEDDED {
- public:
- JavaScriptFrameIteratorTemp() { if (!done()) Advance(); }
-
- inline explicit JavaScriptFrameIteratorTemp(Isolate* isolate);
-
- // Skip frames until the frame with the given id is reached.
- explicit JavaScriptFrameIteratorTemp(StackFrame::Id id) { AdvanceToId(id); }
-
- inline JavaScriptFrameIteratorTemp(Isolate* isolate, StackFrame::Id id);
-
- JavaScriptFrameIteratorTemp(Address fp, Address sp,
- Address low_bound, Address high_bound) :
- iterator_(fp, sp, low_bound, high_bound) {
- if (!done()) Advance();
- }
-
- JavaScriptFrameIteratorTemp(Isolate* isolate,
- Address fp, Address sp,
- Address low_bound, Address high_bound) :
- iterator_(isolate, fp, sp, low_bound, high_bound) {
- if (!done()) Advance();
- }
-
- inline JavaScriptFrame* frame() const;
-
- bool done() const { return iterator_.done(); }
- void Advance();
-
- // Advance to the frame holding the arguments for the current
- // frame. This only affects the current frame if it has adapted
- // arguments.
- void AdvanceToArgumentsFrame();
-
- // Go back to the first frame.
- void Reset();
-
- private:
- inline void AdvanceToId(StackFrame::Id id);
-
- Iterator iterator_;
-};
-
-
-typedef JavaScriptFrameIteratorTemp<StackFrameIterator> JavaScriptFrameIterator;
-
-
-// NOTE: The stack trace frame iterator is an iterator that only
-// traverse proper JavaScript frames; that is JavaScript frames that
-// have proper JavaScript functions. This excludes the problematic
-// functions in runtime.js.
-class StackTraceFrameIterator: public JavaScriptFrameIterator {
- public:
- StackTraceFrameIterator();
- explicit StackTraceFrameIterator(Isolate* isolate);
- void Advance();
-
- private:
- bool IsValidFrame();
-};
-
-
-class SafeStackFrameIterator BASE_EMBEDDED {
- public:
- SafeStackFrameIterator(Isolate* isolate,
- Address fp, Address sp,
- Address low_bound, Address high_bound);
-
- StackFrame* frame() const {
- ASSERT(is_working_iterator_);
- return iterator_.frame();
- }
-
- bool done() const { return iteration_done_ ? true : iterator_.done(); }
-
- void Advance();
- void Reset();
-
- static bool is_active(Isolate* isolate);
-
- static bool IsWithinBounds(
- Address low_bound, Address high_bound, Address addr) {
- return low_bound <= addr && addr <= high_bound;
- }
-
- private:
- class StackAddressValidator {
- public:
- StackAddressValidator(Address low_bound, Address high_bound)
- : low_bound_(low_bound), high_bound_(high_bound) { }
- bool IsValid(Address addr) const {
- return IsWithinBounds(low_bound_, high_bound_, addr);
- }
- private:
- Address low_bound_;
- Address high_bound_;
- };
-
- class ExitFrameValidator {
- public:
- explicit ExitFrameValidator(const StackAddressValidator& validator)
- : validator_(validator) { }
- ExitFrameValidator(Address low_bound, Address high_bound)
- : validator_(low_bound, high_bound) { }
- bool IsValidFP(Address fp);
- private:
- StackAddressValidator validator_;
- };
-
- bool IsValidStackAddress(Address addr) const {
- return stack_validator_.IsValid(addr);
- }
- bool CanIterateHandles(StackFrame* frame, StackHandler* handler);
- bool IsValidFrame(StackFrame* frame) const;
- bool IsValidCaller(StackFrame* frame);
- static bool IsValidTop(Isolate* isolate,
- Address low_bound, Address high_bound);
-
- // This is a nasty hack to make sure the active count is incremented
- // before the constructor for the embedded iterator is invoked. This
- // is needed because the constructor will start looking at frames
- // right away and we need to make sure it doesn't start inspecting
- // heap objects.
- class ActiveCountMaintainer BASE_EMBEDDED {
- public:
- explicit ActiveCountMaintainer(Isolate* isolate);
- ~ActiveCountMaintainer();
- private:
- Isolate* isolate_;
- };
-
- ActiveCountMaintainer maintainer_;
- StackAddressValidator stack_validator_;
- const bool is_valid_top_;
- const bool is_valid_fp_;
- const bool is_working_iterator_;
- bool iteration_done_;
- StackFrameIterator iterator_;
-};
-
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
-typedef JavaScriptFrameIteratorTemp<SafeStackFrameIterator>
- SafeJavaScriptFrameIterator;
-
-
-class SafeStackTraceFrameIterator: public SafeJavaScriptFrameIterator {
- public:
- explicit SafeStackTraceFrameIterator(Isolate* isolate,
- Address fp, Address sp,
- Address low_bound, Address high_bound);
- void Advance();
-};
-#endif
-
-
-class StackFrameLocator BASE_EMBEDDED {
- public:
- // Find the nth JavaScript frame on the stack. The caller must
- // guarantee that such a frame exists.
- JavaScriptFrame* FindJavaScriptFrame(int n);
-
- private:
- StackFrameIterator iterator_;
-};
-
-
-// Reads all frames on the current stack and copies them into the current
-// zone memory.
-Vector<StackFrame*> CreateStackMap();
-
-} } // namespace v8::internal
-
-#endif // V8_FRAMES_H_
diff --git a/src/3rdparty/v8/src/full-codegen.cc b/src/3rdparty/v8/src/full-codegen.cc
deleted file mode 100644
index b896fc8..0000000
--- a/src/3rdparty/v8/src/full-codegen.cc
+++ /dev/null
@@ -1,1385 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "codegen-inl.h"
-#include "compiler.h"
-#include "debug.h"
-#include "full-codegen.h"
-#include "liveedit.h"
-#include "macro-assembler.h"
-#include "prettyprinter.h"
-#include "scopes.h"
-#include "stub-cache.h"
-
-namespace v8 {
-namespace internal {
-
-void BreakableStatementChecker::Check(Statement* stmt) {
- Visit(stmt);
-}
-
-
-void BreakableStatementChecker::Check(Expression* expr) {
- Visit(expr);
-}
-
-
-void BreakableStatementChecker::VisitDeclaration(Declaration* decl) {
-}
-
-
-void BreakableStatementChecker::VisitBlock(Block* stmt) {
-}
-
-
-void BreakableStatementChecker::VisitExpressionStatement(
- ExpressionStatement* stmt) {
- // Check if expression is breakable.
- Visit(stmt->expression());
-}
-
-
-void BreakableStatementChecker::VisitEmptyStatement(EmptyStatement* stmt) {
-}
-
-
-void BreakableStatementChecker::VisitIfStatement(IfStatement* stmt) {
- // If the condition is breakable the if statement is breakable.
- Visit(stmt->condition());
-}
-
-
-void BreakableStatementChecker::VisitContinueStatement(
- ContinueStatement* stmt) {
-}
-
-
-void BreakableStatementChecker::VisitBreakStatement(BreakStatement* stmt) {
-}
-
-
-void BreakableStatementChecker::VisitReturnStatement(ReturnStatement* stmt) {
- // Return is breakable if the expression is.
- Visit(stmt->expression());
-}
-
-
-void BreakableStatementChecker::VisitWithEnterStatement(
- WithEnterStatement* stmt) {
- Visit(stmt->expression());
-}
-
-
-void BreakableStatementChecker::VisitWithExitStatement(
- WithExitStatement* stmt) {
-}
-
-
-void BreakableStatementChecker::VisitSwitchStatement(SwitchStatement* stmt) {
- // Switch statements breakable if the tag expression is.
- Visit(stmt->tag());
-}
-
-
-void BreakableStatementChecker::VisitDoWhileStatement(DoWhileStatement* stmt) {
- // Mark do while as breakable to avoid adding a break slot in front of it.
- is_breakable_ = true;
-}
-
-
-void BreakableStatementChecker::VisitWhileStatement(WhileStatement* stmt) {
- // Mark while statements breakable if the condition expression is.
- Visit(stmt->cond());
-}
-
-
-void BreakableStatementChecker::VisitForStatement(ForStatement* stmt) {
- // Mark for statements breakable if the condition expression is.
- if (stmt->cond() != NULL) {
- Visit(stmt->cond());
- }
-}
-
-
-void BreakableStatementChecker::VisitForInStatement(ForInStatement* stmt) {
- // Mark for in statements breakable if the enumerable expression is.
- Visit(stmt->enumerable());
-}
-
-
-void BreakableStatementChecker::VisitTryCatchStatement(
- TryCatchStatement* stmt) {
- // Mark try catch as breakable to avoid adding a break slot in front of it.
- is_breakable_ = true;
-}
-
-
-void BreakableStatementChecker::VisitTryFinallyStatement(
- TryFinallyStatement* stmt) {
- // Mark try finally as breakable to avoid adding a break slot in front of it.
- is_breakable_ = true;
-}
-
-
-void BreakableStatementChecker::VisitDebuggerStatement(
- DebuggerStatement* stmt) {
- // The debugger statement is breakable.
- is_breakable_ = true;
-}
-
-
-void BreakableStatementChecker::VisitFunctionLiteral(FunctionLiteral* expr) {
-}
-
-
-void BreakableStatementChecker::VisitSharedFunctionInfoLiteral(
- SharedFunctionInfoLiteral* expr) {
-}
-
-
-void BreakableStatementChecker::VisitConditional(Conditional* expr) {
-}
-
-
-void BreakableStatementChecker::VisitVariableProxy(VariableProxy* expr) {
-}
-
-
-void BreakableStatementChecker::VisitLiteral(Literal* expr) {
-}
-
-
-void BreakableStatementChecker::VisitRegExpLiteral(RegExpLiteral* expr) {
-}
-
-
-void BreakableStatementChecker::VisitObjectLiteral(ObjectLiteral* expr) {
-}
-
-
-void BreakableStatementChecker::VisitArrayLiteral(ArrayLiteral* expr) {
-}
-
-
-void BreakableStatementChecker::VisitCatchExtensionObject(
- CatchExtensionObject* expr) {
-}
-
-
-void BreakableStatementChecker::VisitAssignment(Assignment* expr) {
- // If assigning to a property (including a global property) the assignment is
- // breakable.
- Variable* var = expr->target()->AsVariableProxy()->AsVariable();
- Property* prop = expr->target()->AsProperty();
- if (prop != NULL || (var != NULL && var->is_global())) {
- is_breakable_ = true;
- return;
- }
-
- // Otherwise the assignment is breakable if the assigned value is.
- Visit(expr->value());
-}
-
-
-void BreakableStatementChecker::VisitThrow(Throw* expr) {
- // Throw is breakable if the expression is.
- Visit(expr->exception());
-}
-
-
-void BreakableStatementChecker::VisitIncrementOperation(
- IncrementOperation* expr) {
- UNREACHABLE();
-}
-
-
-void BreakableStatementChecker::VisitProperty(Property* expr) {
- // Property load is breakable.
- is_breakable_ = true;
-}
-
-
-void BreakableStatementChecker::VisitCall(Call* expr) {
- // Function calls both through IC and call stub are breakable.
- is_breakable_ = true;
-}
-
-
-void BreakableStatementChecker::VisitCallNew(CallNew* expr) {
- // Function calls through new are breakable.
- is_breakable_ = true;
-}
-
-
-void BreakableStatementChecker::VisitCallRuntime(CallRuntime* expr) {
-}
-
-
-void BreakableStatementChecker::VisitUnaryOperation(UnaryOperation* expr) {
- Visit(expr->expression());
-}
-
-
-void BreakableStatementChecker::VisitCountOperation(CountOperation* expr) {
- Visit(expr->expression());
-}
-
-
-void BreakableStatementChecker::VisitBinaryOperation(BinaryOperation* expr) {
- Visit(expr->left());
- Visit(expr->right());
-}
-
-
-void BreakableStatementChecker::VisitCompareToNull(CompareToNull* expr) {
- Visit(expr->expression());
-}
-
-
-void BreakableStatementChecker::VisitCompareOperation(CompareOperation* expr) {
- Visit(expr->left());
- Visit(expr->right());
-}
-
-
-void BreakableStatementChecker::VisitThisFunction(ThisFunction* expr) {
-}
-
-
-#define __ ACCESS_MASM(masm())
-
-bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
- Isolate* isolate = info->isolate();
- Handle<Script> script = info->script();
- if (!script->IsUndefined() && !script->source()->IsUndefined()) {
- int len = String::cast(script->source())->length();
- isolate->counters()->total_full_codegen_source_size()->Increment(len);
- }
- if (FLAG_trace_codegen) {
- PrintF("Full Compiler - ");
- }
- CodeGenerator::MakeCodePrologue(info);
- const int kInitialBufferSize = 4 * KB;
- MacroAssembler masm(info->isolate(), NULL, kInitialBufferSize);
-#ifdef ENABLE_GDB_JIT_INTERFACE
- masm.positions_recorder()->StartGDBJITLineInfoRecording();
-#endif
-
- FullCodeGenerator cgen(&masm);
- cgen.Generate(info);
- if (cgen.HasStackOverflow()) {
- ASSERT(!isolate->has_pending_exception());
- return false;
- }
- unsigned table_offset = cgen.EmitStackCheckTable();
-
- Code::Flags flags = Code::ComputeFlags(Code::FUNCTION, NOT_IN_LOOP);
- Handle<Code> code = CodeGenerator::MakeCodeEpilogue(&masm, flags, info);
- code->set_optimizable(info->IsOptimizable());
- cgen.PopulateDeoptimizationData(code);
- code->set_has_deoptimization_support(info->HasDeoptimizationSupport());
- code->set_allow_osr_at_loop_nesting_level(0);
- code->set_stack_check_table_offset(table_offset);
- CodeGenerator::PrintCode(code, info);
- info->SetCode(code); // may be an empty handle.
-#ifdef ENABLE_GDB_JIT_INTERFACE
- if (FLAG_gdbjit && !code.is_null()) {
- GDBJITLineInfo* lineinfo =
- masm.positions_recorder()->DetachGDBJITLineInfo();
-
- GDBJIT(RegisterDetailedLineInfo(*code, lineinfo));
- }
-#endif
- return !code.is_null();
-}
-
-
-unsigned FullCodeGenerator::EmitStackCheckTable() {
- // The stack check table consists of a length (in number of entries)
- // field, and then a sequence of entries. Each entry is a pair of AST id
- // and code-relative pc offset.
- masm()->Align(kIntSize);
- masm()->RecordComment("[ Stack check table");
- unsigned offset = masm()->pc_offset();
- unsigned length = stack_checks_.length();
- __ dd(length);
- for (unsigned i = 0; i < length; ++i) {
- __ dd(stack_checks_[i].id);
- __ dd(stack_checks_[i].pc_and_state);
- }
- masm()->RecordComment("]");
- return offset;
-}
-
-
-void FullCodeGenerator::PopulateDeoptimizationData(Handle<Code> code) {
- // Fill in the deoptimization information.
- ASSERT(info_->HasDeoptimizationSupport() || bailout_entries_.is_empty());
- if (!info_->HasDeoptimizationSupport()) return;
- int length = bailout_entries_.length();
- Handle<DeoptimizationOutputData> data =
- isolate()->factory()->
- NewDeoptimizationOutputData(length, TENURED);
- for (int i = 0; i < length; i++) {
- data->SetAstId(i, Smi::FromInt(bailout_entries_[i].id));
- data->SetPcAndState(i, Smi::FromInt(bailout_entries_[i].pc_and_state));
- }
- code->set_deoptimization_data(*data);
-}
-
-
-void FullCodeGenerator::PrepareForBailout(AstNode* node, State state) {
- PrepareForBailoutForId(node->id(), state);
-}
-
-
-void FullCodeGenerator::RecordJSReturnSite(Call* call) {
- // We record the offset of the function return so we can rebuild the frame
- // if the function was inlined, i.e., this is the return address in the
- // inlined function's frame.
- //
- // The state is ignored. We defensively set it to TOS_REG, which is the
- // real state of the unoptimized code at the return site.
- PrepareForBailoutForId(call->ReturnId(), TOS_REG);
-#ifdef DEBUG
- // In debug builds, mark the return so we can verify that this function
- // was called.
- ASSERT(!call->return_is_recorded_);
- call->return_is_recorded_ = true;
-#endif
-}
-
-
-void FullCodeGenerator::PrepareForBailoutForId(int id, State state) {
- // There's no need to prepare this code for bailouts from already optimized
- // code or code that can't be optimized.
- if (!FLAG_deopt || !info_->HasDeoptimizationSupport()) return;
- unsigned pc_and_state =
- StateField::encode(state) | PcField::encode(masm_->pc_offset());
- BailoutEntry entry = { id, pc_and_state };
-#ifdef DEBUG
- // Assert that we don't have multiple bailout entries for the same node.
- for (int i = 0; i < bailout_entries_.length(); i++) {
- if (bailout_entries_.at(i).id == entry.id) {
- AstPrinter printer;
- PrintF("%s", printer.PrintProgram(info_->function()));
- UNREACHABLE();
- }
- }
-#endif // DEBUG
- bailout_entries_.Add(entry);
-}
-
-
-void FullCodeGenerator::RecordStackCheck(int ast_id) {
- // The pc offset does not need to be encoded and packed together with a
- // state.
- BailoutEntry entry = { ast_id, masm_->pc_offset() };
- stack_checks_.Add(entry);
-}
-
-
-int FullCodeGenerator::SlotOffset(Slot* slot) {
- ASSERT(slot != NULL);
- // Offset is negative because higher indexes are at lower addresses.
- int offset = -slot->index() * kPointerSize;
- // Adjust by a (parameter or local) base offset.
- switch (slot->type()) {
- case Slot::PARAMETER:
- offset += (scope()->num_parameters() + 1) * kPointerSize;
- break;
- case Slot::LOCAL:
- offset += JavaScriptFrameConstants::kLocal0Offset;
- break;
- case Slot::CONTEXT:
- case Slot::LOOKUP:
- UNREACHABLE();
- }
- return offset;
-}
-
-
-bool FullCodeGenerator::ShouldInlineSmiCase(Token::Value op) {
- // Inline smi case inside loops, but not division and modulo which
- // are too complicated and take up too much space.
- if (op == Token::DIV ||op == Token::MOD) return false;
- if (FLAG_always_inline_smi_code) return true;
- return loop_depth_ > 0;
-}
-
-
-void FullCodeGenerator::EffectContext::Plug(Register reg) const {
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::Plug(Register reg) const {
- __ Move(result_register(), reg);
-}
-
-
-void FullCodeGenerator::StackValueContext::Plug(Register reg) const {
- __ push(reg);
-}
-
-
-void FullCodeGenerator::TestContext::Plug(Register reg) const {
- // For simplicity we always test the accumulator register.
- __ Move(result_register(), reg);
- codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
- codegen()->DoTest(true_label_, false_label_, fall_through_);
-}
-
-
-void FullCodeGenerator::EffectContext::PlugTOS() const {
- __ Drop(1);
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::PlugTOS() const {
- __ pop(result_register());
-}
-
-
-void FullCodeGenerator::StackValueContext::PlugTOS() const {
-}
-
-
-void FullCodeGenerator::TestContext::PlugTOS() const {
- // For simplicity we always test the accumulator register.
- __ pop(result_register());
- codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
- codegen()->DoTest(true_label_, false_label_, fall_through_);
-}
-
-
-void FullCodeGenerator::EffectContext::PrepareTest(
- Label* materialize_true,
- Label* materialize_false,
- Label** if_true,
- Label** if_false,
- Label** fall_through) const {
- // In an effect context, the true and the false case branch to the
- // same label.
- *if_true = *if_false = *fall_through = materialize_true;
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::PrepareTest(
- Label* materialize_true,
- Label* materialize_false,
- Label** if_true,
- Label** if_false,
- Label** fall_through) const {
- *if_true = *fall_through = materialize_true;
- *if_false = materialize_false;
-}
-
-
-void FullCodeGenerator::StackValueContext::PrepareTest(
- Label* materialize_true,
- Label* materialize_false,
- Label** if_true,
- Label** if_false,
- Label** fall_through) const {
- *if_true = *fall_through = materialize_true;
- *if_false = materialize_false;
-}
-
-
-void FullCodeGenerator::TestContext::PrepareTest(
- Label* materialize_true,
- Label* materialize_false,
- Label** if_true,
- Label** if_false,
- Label** fall_through) const {
- *if_true = true_label_;
- *if_false = false_label_;
- *fall_through = fall_through_;
-}
-
-
-void FullCodeGenerator::VisitDeclarations(
- ZoneList<Declaration*>* declarations) {
- int length = declarations->length();
- int globals = 0;
- for (int i = 0; i < length; i++) {
- Declaration* decl = declarations->at(i);
- Variable* var = decl->proxy()->var();
- Slot* slot = var->AsSlot();
-
- // If it was not possible to allocate the variable at compile
- // time, we need to "declare" it at runtime to make sure it
- // actually exists in the local context.
- if ((slot != NULL && slot->type() == Slot::LOOKUP) || !var->is_global()) {
- VisitDeclaration(decl);
- } else {
- // Count global variables and functions for later processing
- globals++;
- }
- }
-
- // Compute array of global variable and function declarations.
- // Do nothing in case of no declared global functions or variables.
- if (globals > 0) {
- Handle<FixedArray> array =
- isolate()->factory()->NewFixedArray(2 * globals, TENURED);
- for (int j = 0, i = 0; i < length; i++) {
- Declaration* decl = declarations->at(i);
- Variable* var = decl->proxy()->var();
- Slot* slot = var->AsSlot();
-
- if ((slot == NULL || slot->type() != Slot::LOOKUP) && var->is_global()) {
- array->set(j++, *(var->name()));
- if (decl->fun() == NULL) {
- if (var->mode() == Variable::CONST) {
- // In case this is const property use the hole.
- array->set_the_hole(j++);
- } else {
- array->set_undefined(j++);
- }
- } else {
- Handle<SharedFunctionInfo> function =
- Compiler::BuildFunctionInfo(decl->fun(), script());
- // Check for stack-overflow exception.
- if (function.is_null()) {
- SetStackOverflow();
- return;
- }
- array->set(j++, *function);
- }
- }
- }
- // Invoke the platform-dependent code generator to do the actual
- // declaration the global variables and functions.
- DeclareGlobals(array);
- }
-}
-
-
-void FullCodeGenerator::SetFunctionPosition(FunctionLiteral* fun) {
- if (FLAG_debug_info) {
- CodeGenerator::RecordPositions(masm_, fun->start_position());
- }
-}
-
-
-void FullCodeGenerator::SetReturnPosition(FunctionLiteral* fun) {
- if (FLAG_debug_info) {
- CodeGenerator::RecordPositions(masm_, fun->end_position() - 1);
- }
-}
-
-
-void FullCodeGenerator::SetStatementPosition(Statement* stmt) {
- if (FLAG_debug_info) {
-#ifdef ENABLE_DEBUGGER_SUPPORT
- if (!isolate()->debugger()->IsDebuggerActive()) {
- CodeGenerator::RecordPositions(masm_, stmt->statement_pos());
- } else {
- // Check if the statement will be breakable without adding a debug break
- // slot.
- BreakableStatementChecker checker;
- checker.Check(stmt);
- // Record the statement position right here if the statement is not
- // breakable. For breakable statements the actual recording of the
- // position will be postponed to the breakable code (typically an IC).
- bool position_recorded = CodeGenerator::RecordPositions(
- masm_, stmt->statement_pos(), !checker.is_breakable());
- // If the position recording did record a new position generate a debug
- // break slot to make the statement breakable.
- if (position_recorded) {
- Debug::GenerateSlot(masm_);
- }
- }
-#else
- CodeGenerator::RecordPositions(masm_, stmt->statement_pos());
-#endif
- }
-}
-
-
-void FullCodeGenerator::SetExpressionPosition(Expression* expr, int pos) {
- if (FLAG_debug_info) {
-#ifdef ENABLE_DEBUGGER_SUPPORT
- if (!isolate()->debugger()->IsDebuggerActive()) {
- CodeGenerator::RecordPositions(masm_, pos);
- } else {
- // Check if the expression will be breakable without adding a debug break
- // slot.
- BreakableStatementChecker checker;
- checker.Check(expr);
- // Record a statement position right here if the expression is not
- // breakable. For breakable expressions the actual recording of the
- // position will be postponed to the breakable code (typically an IC).
- // NOTE this will record a statement position for something which might
- // not be a statement. As stepping in the debugger will only stop at
- // statement positions this is used for e.g. the condition expression of
- // a do while loop.
- bool position_recorded = CodeGenerator::RecordPositions(
- masm_, pos, !checker.is_breakable());
- // If the position recording did record a new position generate a debug
- // break slot to make the statement breakable.
- if (position_recorded) {
- Debug::GenerateSlot(masm_);
- }
- }
-#else
- CodeGenerator::RecordPositions(masm_, pos);
-#endif
- }
-}
-
-
-void FullCodeGenerator::SetStatementPosition(int pos) {
- if (FLAG_debug_info) {
- CodeGenerator::RecordPositions(masm_, pos);
- }
-}
-
-
-void FullCodeGenerator::SetSourcePosition(int pos) {
- if (FLAG_debug_info && pos != RelocInfo::kNoPosition) {
- masm_->positions_recorder()->RecordPosition(pos);
- }
-}
-
-
-// Lookup table for code generators for special runtime calls which are
-// generated inline.
-#define INLINE_FUNCTION_GENERATOR_ADDRESS(Name, argc, ressize) \
- &FullCodeGenerator::Emit##Name,
-
-const FullCodeGenerator::InlineFunctionGenerator
- FullCodeGenerator::kInlineFunctionGenerators[] = {
- INLINE_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_ADDRESS)
- INLINE_RUNTIME_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_ADDRESS)
- };
-#undef INLINE_FUNCTION_GENERATOR_ADDRESS
-
-
-FullCodeGenerator::InlineFunctionGenerator
- FullCodeGenerator::FindInlineFunctionGenerator(Runtime::FunctionId id) {
- int lookup_index =
- static_cast<int>(id) - static_cast<int>(Runtime::kFirstInlineFunction);
- ASSERT(lookup_index >= 0);
- ASSERT(static_cast<size_t>(lookup_index) <
- ARRAY_SIZE(kInlineFunctionGenerators));
- return kInlineFunctionGenerators[lookup_index];
-}
-
-
-void FullCodeGenerator::EmitInlineRuntimeCall(CallRuntime* node) {
- ZoneList<Expression*>* args = node->arguments();
- Handle<String> name = node->name();
- const Runtime::Function* function = node->function();
- ASSERT(function != NULL);
- ASSERT(function->intrinsic_type == Runtime::INLINE);
- InlineFunctionGenerator generator =
- FindInlineFunctionGenerator(function->function_id);
- ((*this).*(generator))(args);
-}
-
-
-void FullCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
- Comment cmnt(masm_, "[ BinaryOperation");
- Token::Value op = expr->op();
- Expression* left = expr->left();
- Expression* right = expr->right();
-
- OverwriteMode mode = NO_OVERWRITE;
- if (left->ResultOverwriteAllowed()) {
- mode = OVERWRITE_LEFT;
- } else if (right->ResultOverwriteAllowed()) {
- mode = OVERWRITE_RIGHT;
- }
-
- switch (op) {
- case Token::COMMA:
- VisitForEffect(left);
- if (context()->IsTest()) ForwardBailoutToChild(expr);
- context()->HandleExpression(right);
- break;
-
- case Token::OR:
- case Token::AND:
- EmitLogicalOperation(expr);
- break;
-
- case Token::ADD:
- case Token::SUB:
- case Token::DIV:
- case Token::MOD:
- case Token::MUL:
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SHL:
- case Token::SHR:
- case Token::SAR: {
- // Load both operands.
- VisitForStackValue(left);
- VisitForAccumulatorValue(right);
-
- SetSourcePosition(expr->position());
- if (ShouldInlineSmiCase(op)) {
- EmitInlineSmiBinaryOp(expr, op, mode, left, right);
- } else {
- EmitBinaryOp(op, mode);
- }
- break;
- }
-
- default:
- UNREACHABLE();
- }
-}
-
-
-void FullCodeGenerator::EmitLogicalOperation(BinaryOperation* expr) {
- Label eval_right, done;
-
- context()->EmitLogicalLeft(expr, &eval_right, &done);
-
- PrepareForBailoutForId(expr->RightId(), NO_REGISTERS);
- __ bind(&eval_right);
- if (context()->IsTest()) ForwardBailoutToChild(expr);
- context()->HandleExpression(expr->right());
-
- __ bind(&done);
-}
-
-
-void FullCodeGenerator::EffectContext::EmitLogicalLeft(BinaryOperation* expr,
- Label* eval_right,
- Label* done) const {
- if (expr->op() == Token::OR) {
- codegen()->VisitForControl(expr->left(), done, eval_right, eval_right);
- } else {
- ASSERT(expr->op() == Token::AND);
- codegen()->VisitForControl(expr->left(), eval_right, done, eval_right);
- }
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::EmitLogicalLeft(
- BinaryOperation* expr,
- Label* eval_right,
- Label* done) const {
- HandleExpression(expr->left());
- // We want the value in the accumulator for the test, and on the stack in case
- // we need it.
- __ push(result_register());
- Label discard, restore;
- if (expr->op() == Token::OR) {
- codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
- codegen()->DoTest(&restore, &discard, &restore);
- } else {
- ASSERT(expr->op() == Token::AND);
- codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
- codegen()->DoTest(&discard, &restore, &restore);
- }
- __ bind(&restore);
- __ pop(result_register());
- __ jmp(done);
- __ bind(&discard);
- __ Drop(1);
-}
-
-
-void FullCodeGenerator::StackValueContext::EmitLogicalLeft(
- BinaryOperation* expr,
- Label* eval_right,
- Label* done) const {
- codegen()->VisitForAccumulatorValue(expr->left());
- // We want the value in the accumulator for the test, and on the stack in case
- // we need it.
- __ push(result_register());
- Label discard;
- if (expr->op() == Token::OR) {
- codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
- codegen()->DoTest(done, &discard, &discard);
- } else {
- ASSERT(expr->op() == Token::AND);
- codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
- codegen()->DoTest(&discard, done, &discard);
- }
- __ bind(&discard);
- __ Drop(1);
-}
-
-
-void FullCodeGenerator::TestContext::EmitLogicalLeft(BinaryOperation* expr,
- Label* eval_right,
- Label* done) const {
- if (expr->op() == Token::OR) {
- codegen()->VisitForControl(expr->left(),
- true_label_, eval_right, eval_right);
- } else {
- ASSERT(expr->op() == Token::AND);
- codegen()->VisitForControl(expr->left(),
- eval_right, false_label_, eval_right);
- }
-}
-
-
-void FullCodeGenerator::ForwardBailoutToChild(Expression* expr) {
- if (!info_->HasDeoptimizationSupport()) return;
- ASSERT(context()->IsTest());
- ASSERT(expr == forward_bailout_stack_->expr());
- forward_bailout_pending_ = forward_bailout_stack_;
-}
-
-
-void FullCodeGenerator::EffectContext::HandleExpression(
- Expression* expr) const {
- codegen()->HandleInNonTestContext(expr, NO_REGISTERS);
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::HandleExpression(
- Expression* expr) const {
- codegen()->HandleInNonTestContext(expr, TOS_REG);
-}
-
-
-void FullCodeGenerator::StackValueContext::HandleExpression(
- Expression* expr) const {
- codegen()->HandleInNonTestContext(expr, NO_REGISTERS);
-}
-
-
-void FullCodeGenerator::TestContext::HandleExpression(Expression* expr) const {
- codegen()->VisitInTestContext(expr);
-}
-
-
-void FullCodeGenerator::HandleInNonTestContext(Expression* expr, State state) {
- ASSERT(forward_bailout_pending_ == NULL);
- AstVisitor::Visit(expr);
- PrepareForBailout(expr, state);
- // Forwarding bailouts to children is a one shot operation. It
- // should have been processed at this point.
- ASSERT(forward_bailout_pending_ == NULL);
-}
-
-
-void FullCodeGenerator::VisitInTestContext(Expression* expr) {
- ForwardBailoutStack stack(expr, forward_bailout_pending_);
- ForwardBailoutStack* saved = forward_bailout_stack_;
- forward_bailout_pending_ = NULL;
- forward_bailout_stack_ = &stack;
- AstVisitor::Visit(expr);
- forward_bailout_stack_ = saved;
-}
-
-
-void FullCodeGenerator::VisitBlock(Block* stmt) {
- Comment cmnt(masm_, "[ Block");
- Breakable nested_statement(this, stmt);
- SetStatementPosition(stmt);
-
- PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
- VisitStatements(stmt->statements());
- __ bind(nested_statement.break_target());
- PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
-}
-
-
-void FullCodeGenerator::VisitExpressionStatement(ExpressionStatement* stmt) {
- Comment cmnt(masm_, "[ ExpressionStatement");
- SetStatementPosition(stmt);
- VisitForEffect(stmt->expression());
-}
-
-
-void FullCodeGenerator::VisitEmptyStatement(EmptyStatement* stmt) {
- Comment cmnt(masm_, "[ EmptyStatement");
- SetStatementPosition(stmt);
-}
-
-
-void FullCodeGenerator::VisitIfStatement(IfStatement* stmt) {
- Comment cmnt(masm_, "[ IfStatement");
- SetStatementPosition(stmt);
- Label then_part, else_part, done;
-
- if (stmt->HasElseStatement()) {
- VisitForControl(stmt->condition(), &then_part, &else_part, &then_part);
- PrepareForBailoutForId(stmt->ThenId(), NO_REGISTERS);
- __ bind(&then_part);
- Visit(stmt->then_statement());
- __ jmp(&done);
-
- PrepareForBailoutForId(stmt->ElseId(), NO_REGISTERS);
- __ bind(&else_part);
- Visit(stmt->else_statement());
- } else {
- VisitForControl(stmt->condition(), &then_part, &done, &then_part);
- PrepareForBailoutForId(stmt->ThenId(), NO_REGISTERS);
- __ bind(&then_part);
- Visit(stmt->then_statement());
-
- PrepareForBailoutForId(stmt->ElseId(), NO_REGISTERS);
- }
- __ bind(&done);
- PrepareForBailoutForId(stmt->id(), NO_REGISTERS);
-}
-
-
-void FullCodeGenerator::VisitContinueStatement(ContinueStatement* stmt) {
- Comment cmnt(masm_, "[ ContinueStatement");
- SetStatementPosition(stmt);
- NestedStatement* current = nesting_stack_;
- int stack_depth = 0;
- // When continuing, we clobber the unpredictable value in the accumulator
- // with one that's safe for GC. If we hit an exit from the try block of
- // try...finally on our way out, we will unconditionally preserve the
- // accumulator on the stack.
- ClearAccumulator();
- while (!current->IsContinueTarget(stmt->target())) {
- stack_depth = current->Exit(stack_depth);
- current = current->outer();
- }
- __ Drop(stack_depth);
-
- Iteration* loop = current->AsIteration();
- __ jmp(loop->continue_target());
-}
-
-
-void FullCodeGenerator::VisitBreakStatement(BreakStatement* stmt) {
- Comment cmnt(masm_, "[ BreakStatement");
- SetStatementPosition(stmt);
- NestedStatement* current = nesting_stack_;
- int stack_depth = 0;
- // When breaking, we clobber the unpredictable value in the accumulator
- // with one that's safe for GC. If we hit an exit from the try block of
- // try...finally on our way out, we will unconditionally preserve the
- // accumulator on the stack.
- ClearAccumulator();
- while (!current->IsBreakTarget(stmt->target())) {
- stack_depth = current->Exit(stack_depth);
- current = current->outer();
- }
- __ Drop(stack_depth);
-
- Breakable* target = current->AsBreakable();
- __ jmp(target->break_target());
-}
-
-
-void FullCodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
- Comment cmnt(masm_, "[ ReturnStatement");
- SetStatementPosition(stmt);
- Expression* expr = stmt->expression();
- VisitForAccumulatorValue(expr);
-
- // Exit all nested statements.
- NestedStatement* current = nesting_stack_;
- int stack_depth = 0;
- while (current != NULL) {
- stack_depth = current->Exit(stack_depth);
- current = current->outer();
- }
- __ Drop(stack_depth);
-
- EmitReturnSequence();
-}
-
-
-void FullCodeGenerator::VisitWithEnterStatement(WithEnterStatement* stmt) {
- Comment cmnt(masm_, "[ WithEnterStatement");
- SetStatementPosition(stmt);
-
- VisitForStackValue(stmt->expression());
- if (stmt->is_catch_block()) {
- __ CallRuntime(Runtime::kPushCatchContext, 1);
- } else {
- __ CallRuntime(Runtime::kPushContext, 1);
- }
- // Both runtime calls return the new context in both the context and the
- // result registers.
-
- // Update local stack frame context field.
- StoreToFrameField(StandardFrameConstants::kContextOffset, context_register());
-}
-
-
-void FullCodeGenerator::VisitWithExitStatement(WithExitStatement* stmt) {
- Comment cmnt(masm_, "[ WithExitStatement");
- SetStatementPosition(stmt);
-
- // Pop context.
- LoadContextField(context_register(), Context::PREVIOUS_INDEX);
- // Update local stack frame context field.
- StoreToFrameField(StandardFrameConstants::kContextOffset, context_register());
-}
-
-
-void FullCodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
- Comment cmnt(masm_, "[ DoWhileStatement");
- SetStatementPosition(stmt);
- Label body, stack_check;
-
- Iteration loop_statement(this, stmt);
- increment_loop_depth();
-
- __ bind(&body);
- Visit(stmt->body());
-
- // Record the position of the do while condition and make sure it is
- // possible to break on the condition.
- __ bind(loop_statement.continue_target());
- PrepareForBailoutForId(stmt->ContinueId(), NO_REGISTERS);
- SetExpressionPosition(stmt->cond(), stmt->condition_position());
- VisitForControl(stmt->cond(),
- &stack_check,
- loop_statement.break_target(),
- &stack_check);
-
- // Check stack before looping.
- PrepareForBailoutForId(stmt->BackEdgeId(), NO_REGISTERS);
- __ bind(&stack_check);
- EmitStackCheck(stmt);
- __ jmp(&body);
-
- PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
- __ bind(loop_statement.break_target());
- decrement_loop_depth();
-}
-
-
-void FullCodeGenerator::VisitWhileStatement(WhileStatement* stmt) {
- Comment cmnt(masm_, "[ WhileStatement");
- Label test, body;
-
- Iteration loop_statement(this, stmt);
- increment_loop_depth();
-
- // Emit the test at the bottom of the loop.
- __ jmp(&test);
-
- PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
- __ bind(&body);
- Visit(stmt->body());
-
- // Emit the statement position here as this is where the while
- // statement code starts.
- __ bind(loop_statement.continue_target());
- SetStatementPosition(stmt);
-
- // Check stack before looping.
- EmitStackCheck(stmt);
-
- __ bind(&test);
- VisitForControl(stmt->cond(),
- &body,
- loop_statement.break_target(),
- loop_statement.break_target());
-
- PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
- __ bind(loop_statement.break_target());
- decrement_loop_depth();
-}
-
-
-void FullCodeGenerator::VisitForStatement(ForStatement* stmt) {
- Comment cmnt(masm_, "[ ForStatement");
- Label test, body;
-
- Iteration loop_statement(this, stmt);
- if (stmt->init() != NULL) {
- Visit(stmt->init());
- }
-
- increment_loop_depth();
- // Emit the test at the bottom of the loop (even if empty).
- __ jmp(&test);
-
- PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
- __ bind(&body);
- Visit(stmt->body());
-
- PrepareForBailoutForId(stmt->ContinueId(), NO_REGISTERS);
- __ bind(loop_statement.continue_target());
- SetStatementPosition(stmt);
- if (stmt->next() != NULL) {
- Visit(stmt->next());
- }
-
- // Emit the statement position here as this is where the for
- // statement code starts.
- SetStatementPosition(stmt);
-
- // Check stack before looping.
- EmitStackCheck(stmt);
-
- __ bind(&test);
- if (stmt->cond() != NULL) {
- VisitForControl(stmt->cond(),
- &body,
- loop_statement.break_target(),
- loop_statement.break_target());
- } else {
- __ jmp(&body);
- }
-
- PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
- __ bind(loop_statement.break_target());
- decrement_loop_depth();
-}
-
-
-void FullCodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
- Comment cmnt(masm_, "[ TryCatchStatement");
- SetStatementPosition(stmt);
- // The try block adds a handler to the exception handler chain
- // before entering, and removes it again when exiting normally.
- // If an exception is thrown during execution of the try block,
- // control is passed to the handler, which also consumes the handler.
- // At this point, the exception is in a register, and store it in
- // the temporary local variable (prints as ".catch-var") before
- // executing the catch block. The catch block has been rewritten
- // to introduce a new scope to bind the catch variable and to remove
- // that scope again afterwards.
-
- Label try_handler_setup, catch_entry, done;
- __ Call(&try_handler_setup);
- // Try handler code, exception in result register.
-
- // Store exception in local .catch variable before executing catch block.
- {
- // The catch variable is *always* a variable proxy for a local variable.
- Variable* catch_var = stmt->catch_var()->AsVariableProxy()->AsVariable();
- ASSERT_NOT_NULL(catch_var);
- Slot* variable_slot = catch_var->AsSlot();
- ASSERT_NOT_NULL(variable_slot);
- ASSERT_EQ(Slot::LOCAL, variable_slot->type());
- StoreToFrameField(SlotOffset(variable_slot), result_register());
- }
-
- Visit(stmt->catch_block());
- __ jmp(&done);
-
- // Try block code. Sets up the exception handler chain.
- __ bind(&try_handler_setup);
- {
- TryCatch try_block(this, &catch_entry);
- __ PushTryHandler(IN_JAVASCRIPT, TRY_CATCH_HANDLER);
- Visit(stmt->try_block());
- __ PopTryHandler();
- }
- __ bind(&done);
-}
-
-
-void FullCodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
- Comment cmnt(masm_, "[ TryFinallyStatement");
- SetStatementPosition(stmt);
- // Try finally is compiled by setting up a try-handler on the stack while
- // executing the try body, and removing it again afterwards.
- //
- // The try-finally construct can enter the finally block in three ways:
- // 1. By exiting the try-block normally. This removes the try-handler and
- // calls the finally block code before continuing.
- // 2. By exiting the try-block with a function-local control flow transfer
- // (break/continue/return). The site of the, e.g., break removes the
- // try handler and calls the finally block code before continuing
- // its outward control transfer.
- // 3. by exiting the try-block with a thrown exception.
- // This can happen in nested function calls. It traverses the try-handler
- // chain and consumes the try-handler entry before jumping to the
- // handler code. The handler code then calls the finally-block before
- // rethrowing the exception.
- //
- // The finally block must assume a return address on top of the stack
- // (or in the link register on ARM chips) and a value (return value or
- // exception) in the result register (rax/eax/r0), both of which must
- // be preserved. The return address isn't GC-safe, so it should be
- // cooked before GC.
- Label finally_entry;
- Label try_handler_setup;
-
- // Setup the try-handler chain. Use a call to
- // Jump to try-handler setup and try-block code. Use call to put try-handler
- // address on stack.
- __ Call(&try_handler_setup);
- // Try handler code. Return address of call is pushed on handler stack.
- {
- // This code is only executed during stack-handler traversal when an
- // exception is thrown. The execption is in the result register, which
- // is retained by the finally block.
- // Call the finally block and then rethrow the exception.
- __ Call(&finally_entry);
- __ push(result_register());
- __ CallRuntime(Runtime::kReThrow, 1);
- }
-
- __ bind(&finally_entry);
- {
- // Finally block implementation.
- Finally finally_block(this);
- EnterFinallyBlock();
- Visit(stmt->finally_block());
- ExitFinallyBlock(); // Return to the calling code.
- }
-
- __ bind(&try_handler_setup);
- {
- // Setup try handler (stack pointer registers).
- TryFinally try_block(this, &finally_entry);
- __ PushTryHandler(IN_JAVASCRIPT, TRY_FINALLY_HANDLER);
- Visit(stmt->try_block());
- __ PopTryHandler();
- }
- // Execute the finally block on the way out. Clobber the unpredictable
- // value in the accumulator with one that's safe for GC. The finally
- // block will unconditionally preserve the accumulator on the stack.
- ClearAccumulator();
- __ Call(&finally_entry);
-}
-
-
-void FullCodeGenerator::VisitDebuggerStatement(DebuggerStatement* stmt) {
-#ifdef ENABLE_DEBUGGER_SUPPORT
- Comment cmnt(masm_, "[ DebuggerStatement");
- SetStatementPosition(stmt);
-
- __ DebugBreak();
- // Ignore the return value.
-#endif
-}
-
-
-void FullCodeGenerator::VisitConditional(Conditional* expr) {
- Comment cmnt(masm_, "[ Conditional");
- Label true_case, false_case, done;
- VisitForControl(expr->condition(), &true_case, &false_case, &true_case);
-
- PrepareForBailoutForId(expr->ThenId(), NO_REGISTERS);
- __ bind(&true_case);
- SetExpressionPosition(expr->then_expression(),
- expr->then_expression_position());
- if (context()->IsTest()) {
- const TestContext* for_test = TestContext::cast(context());
- VisitForControl(expr->then_expression(),
- for_test->true_label(),
- for_test->false_label(),
- NULL);
- } else {
- context()->HandleExpression(expr->then_expression());
- __ jmp(&done);
- }
-
- PrepareForBailoutForId(expr->ElseId(), NO_REGISTERS);
- __ bind(&false_case);
- if (context()->IsTest()) ForwardBailoutToChild(expr);
- SetExpressionPosition(expr->else_expression(),
- expr->else_expression_position());
- context()->HandleExpression(expr->else_expression());
- // If control flow falls through Visit, merge it with true case here.
- if (!context()->IsTest()) {
- __ bind(&done);
- }
-}
-
-
-void FullCodeGenerator::VisitLiteral(Literal* expr) {
- Comment cmnt(masm_, "[ Literal");
- context()->Plug(expr->handle());
-}
-
-
-void FullCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
- Comment cmnt(masm_, "[ FunctionLiteral");
-
- // Build the function boilerplate and instantiate it.
- Handle<SharedFunctionInfo> function_info =
- Compiler::BuildFunctionInfo(expr, script());
- if (function_info.is_null()) {
- SetStackOverflow();
- return;
- }
- EmitNewClosure(function_info, expr->pretenure());
-}
-
-
-void FullCodeGenerator::VisitSharedFunctionInfoLiteral(
- SharedFunctionInfoLiteral* expr) {
- Comment cmnt(masm_, "[ SharedFunctionInfoLiteral");
- EmitNewClosure(expr->shared_function_info(), false);
-}
-
-
-void FullCodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* expr) {
- // Call runtime routine to allocate the catch extension object and
- // assign the exception value to the catch variable.
- Comment cmnt(masm_, "[ CatchExtensionObject");
- VisitForStackValue(expr->key());
- VisitForStackValue(expr->value());
- // Create catch extension object.
- __ CallRuntime(Runtime::kCreateCatchExtensionObject, 2);
- context()->Plug(result_register());
-}
-
-
-void FullCodeGenerator::VisitThrow(Throw* expr) {
- Comment cmnt(masm_, "[ Throw");
- VisitForStackValue(expr->exception());
- __ CallRuntime(Runtime::kThrow, 1);
- // Never returns here.
-}
-
-
-void FullCodeGenerator::VisitIncrementOperation(IncrementOperation* expr) {
- UNREACHABLE();
-}
-
-
-int FullCodeGenerator::TryFinally::Exit(int stack_depth) {
- // The macros used here must preserve the result register.
- __ Drop(stack_depth);
- __ PopTryHandler();
- __ Call(finally_entry_);
- return 0;
-}
-
-
-int FullCodeGenerator::TryCatch::Exit(int stack_depth) {
- // The macros used here must preserve the result register.
- __ Drop(stack_depth);
- __ PopTryHandler();
- return 0;
-}
-
-
-#undef __
-
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/full-codegen.h b/src/3rdparty/v8/src/full-codegen.h
deleted file mode 100644
index d6ed1b9..0000000
--- a/src/3rdparty/v8/src/full-codegen.h
+++ /dev/null
@@ -1,753 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_FULL_CODEGEN_H_
-#define V8_FULL_CODEGEN_H_
-
-#include "v8.h"
-
-#include "ast.h"
-#include "code-stubs.h"
-#include "codegen.h"
-#include "compiler.h"
-
-namespace v8 {
-namespace internal {
-
-// Forward declarations.
-class JumpPatchSite;
-
-// AST node visitor which can tell whether a given statement will be breakable
-// when the code is compiled by the full compiler in the debugger. This means
-// that there will be an IC (load/store/call) in the code generated for the
-// debugger to piggybag on.
-class BreakableStatementChecker: public AstVisitor {
- public:
- BreakableStatementChecker() : is_breakable_(false) {}
-
- void Check(Statement* stmt);
- void Check(Expression* stmt);
-
- bool is_breakable() { return is_breakable_; }
-
- private:
- // AST node visit functions.
-#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
- AST_NODE_LIST(DECLARE_VISIT)
-#undef DECLARE_VISIT
-
- bool is_breakable_;
-
- DISALLOW_COPY_AND_ASSIGN(BreakableStatementChecker);
-};
-
-
-// -----------------------------------------------------------------------------
-// Full code generator.
-
-class FullCodeGenerator: public AstVisitor {
- public:
- enum State {
- NO_REGISTERS,
- TOS_REG
- };
-
- explicit FullCodeGenerator(MacroAssembler* masm)
- : masm_(masm),
- info_(NULL),
- nesting_stack_(NULL),
- loop_depth_(0),
- context_(NULL),
- bailout_entries_(0),
- stack_checks_(2), // There's always at least one.
- forward_bailout_stack_(NULL),
- forward_bailout_pending_(NULL) {
- }
-
- static bool MakeCode(CompilationInfo* info);
-
- void Generate(CompilationInfo* info);
- void PopulateDeoptimizationData(Handle<Code> code);
-
- class StateField : public BitField<State, 0, 8> { };
- class PcField : public BitField<unsigned, 8, 32-8> { };
-
- static const char* State2String(State state) {
- switch (state) {
- case NO_REGISTERS: return "NO_REGISTERS";
- case TOS_REG: return "TOS_REG";
- }
- UNREACHABLE();
- return NULL;
- }
-
- private:
- class Breakable;
- class Iteration;
- class TryCatch;
- class TryFinally;
- class Finally;
- class ForIn;
-
- class NestedStatement BASE_EMBEDDED {
- public:
- explicit NestedStatement(FullCodeGenerator* codegen) : codegen_(codegen) {
- // Link into codegen's nesting stack.
- previous_ = codegen->nesting_stack_;
- codegen->nesting_stack_ = this;
- }
- virtual ~NestedStatement() {
- // Unlink from codegen's nesting stack.
- ASSERT_EQ(this, codegen_->nesting_stack_);
- codegen_->nesting_stack_ = previous_;
- }
-
- virtual Breakable* AsBreakable() { return NULL; }
- virtual Iteration* AsIteration() { return NULL; }
- virtual TryCatch* AsTryCatch() { return NULL; }
- virtual TryFinally* AsTryFinally() { return NULL; }
- virtual Finally* AsFinally() { return NULL; }
- virtual ForIn* AsForIn() { return NULL; }
-
- virtual bool IsContinueTarget(Statement* target) { return false; }
- virtual bool IsBreakTarget(Statement* target) { return false; }
-
- // Generate code to leave the nested statement. This includes
- // cleaning up any stack elements in use and restoring the
- // stack to the expectations of the surrounding statements.
- // Takes a number of stack elements currently on top of the
- // nested statement's stack, and returns a number of stack
- // elements left on top of the surrounding statement's stack.
- // The generated code must preserve the result register (which
- // contains the value in case of a return).
- virtual int Exit(int stack_depth) {
- // Default implementation for the case where there is
- // nothing to clean up.
- return stack_depth;
- }
- NestedStatement* outer() { return previous_; }
- protected:
- MacroAssembler* masm() { return codegen_->masm(); }
- private:
- FullCodeGenerator* codegen_;
- NestedStatement* previous_;
- DISALLOW_COPY_AND_ASSIGN(NestedStatement);
- };
-
- class Breakable : public NestedStatement {
- public:
- Breakable(FullCodeGenerator* codegen,
- BreakableStatement* break_target)
- : NestedStatement(codegen),
- target_(break_target) {}
- virtual ~Breakable() {}
- virtual Breakable* AsBreakable() { return this; }
- virtual bool IsBreakTarget(Statement* statement) {
- return target_ == statement;
- }
- BreakableStatement* statement() { return target_; }
- Label* break_target() { return &break_target_label_; }
- private:
- BreakableStatement* target_;
- Label break_target_label_;
- DISALLOW_COPY_AND_ASSIGN(Breakable);
- };
-
- class Iteration : public Breakable {
- public:
- Iteration(FullCodeGenerator* codegen,
- IterationStatement* iteration_statement)
- : Breakable(codegen, iteration_statement) {}
- virtual ~Iteration() {}
- virtual Iteration* AsIteration() { return this; }
- virtual bool IsContinueTarget(Statement* statement) {
- return this->statement() == statement;
- }
- Label* continue_target() { return &continue_target_label_; }
- private:
- Label continue_target_label_;
- DISALLOW_COPY_AND_ASSIGN(Iteration);
- };
-
- // The environment inside the try block of a try/catch statement.
- class TryCatch : public NestedStatement {
- public:
- explicit TryCatch(FullCodeGenerator* codegen, Label* catch_entry)
- : NestedStatement(codegen), catch_entry_(catch_entry) { }
- virtual ~TryCatch() {}
- virtual TryCatch* AsTryCatch() { return this; }
- Label* catch_entry() { return catch_entry_; }
- virtual int Exit(int stack_depth);
- private:
- Label* catch_entry_;
- DISALLOW_COPY_AND_ASSIGN(TryCatch);
- };
-
- // The environment inside the try block of a try/finally statement.
- class TryFinally : public NestedStatement {
- public:
- explicit TryFinally(FullCodeGenerator* codegen, Label* finally_entry)
- : NestedStatement(codegen), finally_entry_(finally_entry) { }
- virtual ~TryFinally() {}
- virtual TryFinally* AsTryFinally() { return this; }
- Label* finally_entry() { return finally_entry_; }
- virtual int Exit(int stack_depth);
- private:
- Label* finally_entry_;
- DISALLOW_COPY_AND_ASSIGN(TryFinally);
- };
-
- // A FinallyEnvironment represents being inside a finally block.
- // Abnormal termination of the finally block needs to clean up
- // the block's parameters from the stack.
- class Finally : public NestedStatement {
- public:
- explicit Finally(FullCodeGenerator* codegen) : NestedStatement(codegen) { }
- virtual ~Finally() {}
- virtual Finally* AsFinally() { return this; }
- virtual int Exit(int stack_depth) {
- return stack_depth + kFinallyStackElementCount;
- }
- private:
- // Number of extra stack slots occupied during a finally block.
- static const int kFinallyStackElementCount = 2;
- DISALLOW_COPY_AND_ASSIGN(Finally);
- };
-
- // A ForInEnvironment represents being inside a for-in loop.
- // Abnormal termination of the for-in block needs to clean up
- // the block's temporary storage from the stack.
- class ForIn : public Iteration {
- public:
- ForIn(FullCodeGenerator* codegen,
- ForInStatement* statement)
- : Iteration(codegen, statement) { }
- virtual ~ForIn() {}
- virtual ForIn* AsForIn() { return this; }
- virtual int Exit(int stack_depth) {
- return stack_depth + kForInStackElementCount;
- }
- private:
- static const int kForInStackElementCount = 5;
- DISALLOW_COPY_AND_ASSIGN(ForIn);
- };
-
- // The forward bailout stack keeps track of the expressions that can
- // bail out to just before the control flow is split in a child
- // node. The stack elements are linked together through the parent
- // link when visiting expressions in test contexts after requesting
- // bailout in child forwarding.
- class ForwardBailoutStack BASE_EMBEDDED {
- public:
- ForwardBailoutStack(Expression* expr, ForwardBailoutStack* parent)
- : expr_(expr), parent_(parent) { }
-
- Expression* expr() const { return expr_; }
- ForwardBailoutStack* parent() const { return parent_; }
-
- private:
- Expression* const expr_;
- ForwardBailoutStack* const parent_;
- };
-
- // Type of a member function that generates inline code for a native function.
- typedef void (FullCodeGenerator::*InlineFunctionGenerator)
- (ZoneList<Expression*>*);
-
- static const InlineFunctionGenerator kInlineFunctionGenerators[];
-
- // A platform-specific utility to overwrite the accumulator register
- // with a GC-safe value.
- void ClearAccumulator();
-
- // Compute the frame pointer relative offset for a given local or
- // parameter slot.
- int SlotOffset(Slot* slot);
-
- // Determine whether or not to inline the smi case for the given
- // operation.
- bool ShouldInlineSmiCase(Token::Value op);
-
- // Helper function to convert a pure value into a test context. The value
- // is expected on the stack or the accumulator, depending on the platform.
- // See the platform-specific implementation for details.
- void DoTest(Label* if_true, Label* if_false, Label* fall_through);
-
- // Helper function to split control flow and avoid a branch to the
- // fall-through label if it is set up.
- void Split(Condition cc,
- Label* if_true,
- Label* if_false,
- Label* fall_through);
-
- void Move(Slot* dst, Register source, Register scratch1, Register scratch2);
- void Move(Register dst, Slot* source);
-
- // Return an operand used to read/write to a known (ie, non-LOOKUP) slot.
- // May emit code to traverse the context chain, destroying the scratch
- // register.
- MemOperand EmitSlotSearch(Slot* slot, Register scratch);
-
- // Forward the bailout responsibility for the given expression to
- // the next child visited (which must be in a test context).
- void ForwardBailoutToChild(Expression* expr);
-
- void VisitForEffect(Expression* expr) {
- EffectContext context(this);
- HandleInNonTestContext(expr, NO_REGISTERS);
- }
-
- void VisitForAccumulatorValue(Expression* expr) {
- AccumulatorValueContext context(this);
- HandleInNonTestContext(expr, TOS_REG);
- }
-
- void VisitForStackValue(Expression* expr) {
- StackValueContext context(this);
- HandleInNonTestContext(expr, NO_REGISTERS);
- }
-
- void VisitForControl(Expression* expr,
- Label* if_true,
- Label* if_false,
- Label* fall_through) {
- TestContext context(this, if_true, if_false, fall_through);
- VisitInTestContext(expr);
- // Forwarding bailouts to children is a one shot operation. It
- // should have been processed at this point.
- ASSERT(forward_bailout_pending_ == NULL);
- }
-
- void HandleInNonTestContext(Expression* expr, State state);
- void VisitInTestContext(Expression* expr);
-
- void VisitDeclarations(ZoneList<Declaration*>* declarations);
- void DeclareGlobals(Handle<FixedArray> pairs);
-
- // Try to perform a comparison as a fast inlined literal compare if
- // the operands allow it. Returns true if the compare operations
- // has been matched and all code generated; false otherwise.
- bool TryLiteralCompare(Token::Value op,
- Expression* left,
- Expression* right,
- Label* if_true,
- Label* if_false,
- Label* fall_through);
-
- // Bailout support.
- void PrepareForBailout(AstNode* node, State state);
- void PrepareForBailoutForId(int id, State state);
-
- // Record a call's return site offset, used to rebuild the frame if the
- // called function was inlined at the site.
- void RecordJSReturnSite(Call* call);
-
- // Prepare for bailout before a test (or compare) and branch. If
- // should_normalize, then the following comparison will not handle the
- // canonical JS true value so we will insert a (dead) test against true at
- // the actual bailout target from the optimized code. If not
- // should_normalize, the true and false labels are ignored.
- void PrepareForBailoutBeforeSplit(State state,
- bool should_normalize,
- Label* if_true,
- Label* if_false);
-
- // Platform-specific code for a variable, constant, or function
- // declaration. Functions have an initial value.
- void EmitDeclaration(Variable* variable,
- Variable::Mode mode,
- FunctionLiteral* function);
-
- // Platform-specific code for checking the stack limit at the back edge of
- // a loop.
- void EmitStackCheck(IterationStatement* stmt);
- // Record the OSR AST id corresponding to a stack check in the code.
- void RecordStackCheck(int osr_ast_id);
- // Emit a table of stack check ids and pcs into the code stream. Return
- // the offset of the start of the table.
- unsigned EmitStackCheckTable();
-
- // Platform-specific return sequence
- void EmitReturnSequence();
-
- // Platform-specific code sequences for calls
- void EmitCallWithStub(Call* expr);
- void EmitCallWithIC(Call* expr, Handle<Object> name, RelocInfo::Mode mode);
- void EmitKeyedCallWithIC(Call* expr, Expression* key, RelocInfo::Mode mode);
-
- // Platform-specific code for inline runtime calls.
- InlineFunctionGenerator FindInlineFunctionGenerator(Runtime::FunctionId id);
-
- void EmitInlineRuntimeCall(CallRuntime* expr);
-
-#define EMIT_INLINE_RUNTIME_CALL(name, x, y) \
- void Emit##name(ZoneList<Expression*>* arguments);
- INLINE_FUNCTION_LIST(EMIT_INLINE_RUNTIME_CALL)
- INLINE_RUNTIME_FUNCTION_LIST(EMIT_INLINE_RUNTIME_CALL)
-#undef EMIT_INLINE_RUNTIME_CALL
-
- // Platform-specific code for loading variables.
- void EmitLoadGlobalSlotCheckExtensions(Slot* slot,
- TypeofState typeof_state,
- Label* slow);
- MemOperand ContextSlotOperandCheckExtensions(Slot* slot, Label* slow);
- void EmitDynamicLoadFromSlotFastCase(Slot* slot,
- TypeofState typeof_state,
- Label* slow,
- Label* done);
- void EmitVariableLoad(Variable* expr);
-
- enum ResolveEvalFlag {
- SKIP_CONTEXT_LOOKUP,
- PERFORM_CONTEXT_LOOKUP
- };
-
- // Expects the arguments and the function already pushed.
- void EmitResolvePossiblyDirectEval(ResolveEvalFlag flag, int arg_count);
-
- // Platform-specific support for allocating a new closure based on
- // the given function info.
- void EmitNewClosure(Handle<SharedFunctionInfo> info, bool pretenure);
-
- // Platform-specific support for compiling assignments.
-
- // Load a value from a named property.
- // The receiver is left on the stack by the IC.
- void EmitNamedPropertyLoad(Property* expr);
-
- // Load a value from a keyed property.
- // The receiver and the key is left on the stack by the IC.
- void EmitKeyedPropertyLoad(Property* expr);
-
- // Apply the compound assignment operator. Expects the left operand on top
- // of the stack and the right one in the accumulator.
- void EmitBinaryOp(Token::Value op,
- OverwriteMode mode);
-
- // Helper functions for generating inlined smi code for certain
- // binary operations.
- void EmitInlineSmiBinaryOp(Expression* expr,
- Token::Value op,
- OverwriteMode mode,
- Expression* left,
- Expression* right);
-
- // Assign to the given expression as if via '='. The right-hand-side value
- // is expected in the accumulator.
- void EmitAssignment(Expression* expr, int bailout_ast_id);
-
- // Complete a variable assignment. The right-hand-side value is expected
- // in the accumulator.
- void EmitVariableAssignment(Variable* var,
- Token::Value op);
-
- // Complete a named property assignment. The receiver is expected on top
- // of the stack and the right-hand-side value in the accumulator.
- void EmitNamedPropertyAssignment(Assignment* expr);
-
- // Complete a keyed property assignment. The receiver and key are
- // expected on top of the stack and the right-hand-side value in the
- // accumulator.
- void EmitKeyedPropertyAssignment(Assignment* expr);
-
- void SetFunctionPosition(FunctionLiteral* fun);
- void SetReturnPosition(FunctionLiteral* fun);
- void SetStatementPosition(Statement* stmt);
- void SetExpressionPosition(Expression* expr, int pos);
- void SetStatementPosition(int pos);
- void SetSourcePosition(int pos);
-
- // Non-local control flow support.
- void EnterFinallyBlock();
- void ExitFinallyBlock();
-
- // Loop nesting counter.
- int loop_depth() { return loop_depth_; }
- void increment_loop_depth() { loop_depth_++; }
- void decrement_loop_depth() {
- ASSERT(loop_depth_ > 0);
- loop_depth_--;
- }
-
- MacroAssembler* masm() { return masm_; }
-
- class ExpressionContext;
- const ExpressionContext* context() { return context_; }
- void set_new_context(const ExpressionContext* context) { context_ = context; }
-
- Handle<Script> script() { return info_->script(); }
- bool is_eval() { return info_->is_eval(); }
- bool is_strict_mode() { return function()->strict_mode(); }
- StrictModeFlag strict_mode_flag() {
- return is_strict_mode() ? kStrictMode : kNonStrictMode;
- }
- FunctionLiteral* function() { return info_->function(); }
- Scope* scope() { return info_->scope(); }
-
- static Register result_register();
- static Register context_register();
-
- // Helper for calling an IC stub.
- void EmitCallIC(Handle<Code> ic, RelocInfo::Mode mode);
-
- // Calling an IC stub with a patch site. Passing NULL for patch_site
- // or non NULL patch_site which is not activated indicates no inlined smi code
- // and emits a nop after the IC call.
- void EmitCallIC(Handle<Code> ic, JumpPatchSite* patch_site);
-
- // Set fields in the stack frame. Offsets are the frame pointer relative
- // offsets defined in, e.g., StandardFrameConstants.
- void StoreToFrameField(int frame_offset, Register value);
-
- // Load a value from the current context. Indices are defined as an enum
- // in v8::internal::Context.
- void LoadContextField(Register dst, int context_index);
-
- // AST node visit functions.
-#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
- AST_NODE_LIST(DECLARE_VISIT)
-#undef DECLARE_VISIT
- // Handles the shortcutted logical binary operations in VisitBinaryOperation.
- void EmitLogicalOperation(BinaryOperation* expr);
-
- void VisitForTypeofValue(Expression* expr);
-
- struct BailoutEntry {
- unsigned id;
- unsigned pc_and_state;
- };
-
-
- class ExpressionContext BASE_EMBEDDED {
- public:
- explicit ExpressionContext(FullCodeGenerator* codegen)
- : masm_(codegen->masm()), old_(codegen->context()), codegen_(codegen) {
- codegen->set_new_context(this);
- }
-
- virtual ~ExpressionContext() {
- codegen_->set_new_context(old_);
- }
-
- Isolate* isolate() const { return codegen_->isolate(); }
-
- // Convert constant control flow (true or false) to the result expected for
- // this expression context.
- virtual void Plug(bool flag) const = 0;
-
- // Emit code to convert a pure value (in a register, slot, as a literal,
- // or on top of the stack) into the result expected according to this
- // expression context.
- virtual void Plug(Register reg) const = 0;
- virtual void Plug(Slot* slot) const = 0;
- virtual void Plug(Handle<Object> lit) const = 0;
- virtual void Plug(Heap::RootListIndex index) const = 0;
- virtual void PlugTOS() const = 0;
-
- // Emit code to convert pure control flow to a pair of unbound labels into
- // the result expected according to this expression context. The
- // implementation will bind both labels unless it's a TestContext, which
- // won't bind them at this point.
- virtual void Plug(Label* materialize_true,
- Label* materialize_false) const = 0;
-
- // Emit code to discard count elements from the top of stack, then convert
- // a pure value into the result expected according to this expression
- // context.
- virtual void DropAndPlug(int count, Register reg) const = 0;
-
- // For shortcutting operations || and &&.
- virtual void EmitLogicalLeft(BinaryOperation* expr,
- Label* eval_right,
- Label* done) const = 0;
-
- // Set up branch labels for a test expression. The three Label** parameters
- // are output parameters.
- virtual void PrepareTest(Label* materialize_true,
- Label* materialize_false,
- Label** if_true,
- Label** if_false,
- Label** fall_through) const = 0;
-
- virtual void HandleExpression(Expression* expr) const = 0;
-
- // Returns true if we are evaluating only for side effects (ie if the result
- // will be discarded).
- virtual bool IsEffect() const { return false; }
-
- // Returns true if we are branching on the value rather than materializing
- // it. Only used for asserts.
- virtual bool IsTest() const { return false; }
-
- protected:
- FullCodeGenerator* codegen() const { return codegen_; }
- MacroAssembler* masm() const { return masm_; }
- MacroAssembler* masm_;
-
- private:
- const ExpressionContext* old_;
- FullCodeGenerator* codegen_;
- };
-
- class AccumulatorValueContext : public ExpressionContext {
- public:
- explicit AccumulatorValueContext(FullCodeGenerator* codegen)
- : ExpressionContext(codegen) { }
-
- virtual void Plug(bool flag) const;
- virtual void Plug(Register reg) const;
- virtual void Plug(Label* materialize_true, Label* materialize_false) const;
- virtual void Plug(Slot* slot) const;
- virtual void Plug(Handle<Object> lit) const;
- virtual void Plug(Heap::RootListIndex) const;
- virtual void PlugTOS() const;
- virtual void DropAndPlug(int count, Register reg) const;
- virtual void EmitLogicalLeft(BinaryOperation* expr,
- Label* eval_right,
- Label* done) const;
- virtual void PrepareTest(Label* materialize_true,
- Label* materialize_false,
- Label** if_true,
- Label** if_false,
- Label** fall_through) const;
- virtual void HandleExpression(Expression* expr) const;
- };
-
- class StackValueContext : public ExpressionContext {
- public:
- explicit StackValueContext(FullCodeGenerator* codegen)
- : ExpressionContext(codegen) { }
-
- virtual void Plug(bool flag) const;
- virtual void Plug(Register reg) const;
- virtual void Plug(Label* materialize_true, Label* materialize_false) const;
- virtual void Plug(Slot* slot) const;
- virtual void Plug(Handle<Object> lit) const;
- virtual void Plug(Heap::RootListIndex) const;
- virtual void PlugTOS() const;
- virtual void DropAndPlug(int count, Register reg) const;
- virtual void EmitLogicalLeft(BinaryOperation* expr,
- Label* eval_right,
- Label* done) const;
- virtual void PrepareTest(Label* materialize_true,
- Label* materialize_false,
- Label** if_true,
- Label** if_false,
- Label** fall_through) const;
- virtual void HandleExpression(Expression* expr) const;
- };
-
- class TestContext : public ExpressionContext {
- public:
- explicit TestContext(FullCodeGenerator* codegen,
- Label* true_label,
- Label* false_label,
- Label* fall_through)
- : ExpressionContext(codegen),
- true_label_(true_label),
- false_label_(false_label),
- fall_through_(fall_through) { }
-
- static const TestContext* cast(const ExpressionContext* context) {
- ASSERT(context->IsTest());
- return reinterpret_cast<const TestContext*>(context);
- }
-
- Label* true_label() const { return true_label_; }
- Label* false_label() const { return false_label_; }
- Label* fall_through() const { return fall_through_; }
-
- virtual void Plug(bool flag) const;
- virtual void Plug(Register reg) const;
- virtual void Plug(Label* materialize_true, Label* materialize_false) const;
- virtual void Plug(Slot* slot) const;
- virtual void Plug(Handle<Object> lit) const;
- virtual void Plug(Heap::RootListIndex) const;
- virtual void PlugTOS() const;
- virtual void DropAndPlug(int count, Register reg) const;
- virtual void EmitLogicalLeft(BinaryOperation* expr,
- Label* eval_right,
- Label* done) const;
- virtual void PrepareTest(Label* materialize_true,
- Label* materialize_false,
- Label** if_true,
- Label** if_false,
- Label** fall_through) const;
- virtual void HandleExpression(Expression* expr) const;
- virtual bool IsTest() const { return true; }
-
- private:
- Label* true_label_;
- Label* false_label_;
- Label* fall_through_;
- };
-
- class EffectContext : public ExpressionContext {
- public:
- explicit EffectContext(FullCodeGenerator* codegen)
- : ExpressionContext(codegen) { }
-
- virtual void Plug(bool flag) const;
- virtual void Plug(Register reg) const;
- virtual void Plug(Label* materialize_true, Label* materialize_false) const;
- virtual void Plug(Slot* slot) const;
- virtual void Plug(Handle<Object> lit) const;
- virtual void Plug(Heap::RootListIndex) const;
- virtual void PlugTOS() const;
- virtual void DropAndPlug(int count, Register reg) const;
- virtual void EmitLogicalLeft(BinaryOperation* expr,
- Label* eval_right,
- Label* done) const;
- virtual void PrepareTest(Label* materialize_true,
- Label* materialize_false,
- Label** if_true,
- Label** if_false,
- Label** fall_through) const;
- virtual void HandleExpression(Expression* expr) const;
- virtual bool IsEffect() const { return true; }
- };
-
- MacroAssembler* masm_;
- CompilationInfo* info_;
- Label return_label_;
- NestedStatement* nesting_stack_;
- int loop_depth_;
- const ExpressionContext* context_;
- ZoneList<BailoutEntry> bailout_entries_;
- ZoneList<BailoutEntry> stack_checks_;
- ForwardBailoutStack* forward_bailout_stack_;
- ForwardBailoutStack* forward_bailout_pending_;
-
- friend class NestedStatement;
-
- DISALLOW_COPY_AND_ASSIGN(FullCodeGenerator);
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_FULL_CODEGEN_H_
diff --git a/src/3rdparty/v8/src/func-name-inferrer.cc b/src/3rdparty/v8/src/func-name-inferrer.cc
deleted file mode 100644
index c094251..0000000
--- a/src/3rdparty/v8/src/func-name-inferrer.cc
+++ /dev/null
@@ -1,91 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "ast.h"
-#include "func-name-inferrer.h"
-
-namespace v8 {
-namespace internal {
-
-
-void FuncNameInferrer::PushEnclosingName(Handle<String> name) {
- // Enclosing name is a name of a constructor function. To check
- // that it is really a constructor, we check that it is not empty
- // and starts with a capital letter.
- if (name->length() > 0 && Runtime::IsUpperCaseChar(
- Isolate::Current()->runtime_state(), name->Get(0))) {
- names_stack_.Add(name);
- }
-}
-
-
-void FuncNameInferrer::PushLiteralName(Handle<String> name) {
- if (IsOpen() && !HEAP->prototype_symbol()->Equals(*name)) {
- names_stack_.Add(name);
- }
-}
-
-
-void FuncNameInferrer::PushVariableName(Handle<String> name) {
- if (IsOpen() && !HEAP->result_symbol()->Equals(*name)) {
- names_stack_.Add(name);
- }
-}
-
-
-Handle<String> FuncNameInferrer::MakeNameFromStack() {
- if (names_stack_.is_empty()) {
- return FACTORY->empty_string();
- } else {
- return MakeNameFromStackHelper(1, names_stack_.at(0));
- }
-}
-
-
-Handle<String> FuncNameInferrer::MakeNameFromStackHelper(int pos,
- Handle<String> prev) {
- if (pos >= names_stack_.length()) {
- return prev;
- } else {
- Handle<String> curr = FACTORY->NewConsString(dot_, names_stack_.at(pos));
- return MakeNameFromStackHelper(pos + 1, FACTORY->NewConsString(prev, curr));
- }
-}
-
-
-void FuncNameInferrer::InferFunctionsNames() {
- Handle<String> func_name = MakeNameFromStack();
- for (int i = 0; i < funcs_to_infer_.length(); ++i) {
- funcs_to_infer_[i]->set_inferred_name(func_name);
- }
- funcs_to_infer_.Rewind(0);
-}
-
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/func-name-inferrer.h b/src/3rdparty/v8/src/func-name-inferrer.h
deleted file mode 100644
index 5aa2b35..0000000
--- a/src/3rdparty/v8/src/func-name-inferrer.h
+++ /dev/null
@@ -1,111 +0,0 @@
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_FUNC_NAME_INFERRER_H_
-#define V8_FUNC_NAME_INFERRER_H_
-
-namespace v8 {
-namespace internal {
-
-// FuncNameInferrer is a stateful class that is used to perform name
-// inference for anonymous functions during static analysis of source code.
-// Inference is performed in cases when an anonymous function is assigned
-// to a variable or a property (see test-func-name-inference.cc for examples.)
-//
-// The basic idea is that during parsing of LHSs of certain expressions
-// (assignments, declarations, object literals) we collect name strings,
-// and during parsing of the RHS, a function literal can be collected. After
-// parsing the RHS we can infer a name for function literals that do not have
-// a name.
-class FuncNameInferrer : public ZoneObject {
- public:
- FuncNameInferrer()
- : entries_stack_(10),
- names_stack_(5),
- funcs_to_infer_(4),
- dot_(FACTORY->NewStringFromAscii(CStrVector("."))) {
- }
-
- // Returns whether we have entered name collection state.
- bool IsOpen() const { return !entries_stack_.is_empty(); }
-
- // Pushes an enclosing the name of enclosing function onto names stack.
- void PushEnclosingName(Handle<String> name);
-
- // Enters name collection state.
- void Enter() {
- entries_stack_.Add(names_stack_.length());
- }
-
- // Pushes an encountered name onto names stack when in collection state.
- void PushLiteralName(Handle<String> name);
-
- void PushVariableName(Handle<String> name);
-
- // Adds a function to infer name for.
- void AddFunction(FunctionLiteral* func_to_infer) {
- if (IsOpen()) {
- funcs_to_infer_.Add(func_to_infer);
- }
- }
-
- // Infers a function name and leaves names collection state.
- void Infer() {
- ASSERT(IsOpen());
- if (!funcs_to_infer_.is_empty()) {
- InferFunctionsNames();
- }
- }
-
- // Infers a function name and leaves names collection state.
- void Leave() {
- ASSERT(IsOpen());
- names_stack_.Rewind(entries_stack_.RemoveLast());
- }
-
- private:
- // Constructs a full name in dotted notation from gathered names.
- Handle<String> MakeNameFromStack();
-
- // A helper function for MakeNameFromStack.
- Handle<String> MakeNameFromStackHelper(int pos, Handle<String> prev);
-
- // Performs name inferring for added functions.
- void InferFunctionsNames();
-
- ZoneList<int> entries_stack_;
- ZoneList<Handle<String> > names_stack_;
- ZoneList<FunctionLiteral*> funcs_to_infer_;
- Handle<String> dot_;
-
- DISALLOW_COPY_AND_ASSIGN(FuncNameInferrer);
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_FUNC_NAME_INFERRER_H_
diff --git a/src/3rdparty/v8/src/gdb-jit.cc b/src/3rdparty/v8/src/gdb-jit.cc
deleted file mode 100644
index c8dbf5d..0000000
--- a/src/3rdparty/v8/src/gdb-jit.cc
+++ /dev/null
@@ -1,1548 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifdef ENABLE_GDB_JIT_INTERFACE
-#include "v8.h"
-#include "gdb-jit.h"
-
-#include "bootstrapper.h"
-#include "compiler.h"
-#include "global-handles.h"
-#include "messages.h"
-#include "natives.h"
-
-namespace v8 {
-namespace internal {
-
-class ELF;
-
-class Writer BASE_EMBEDDED {
- public:
- explicit Writer(ELF* elf)
- : elf_(elf),
- position_(0),
- capacity_(1024),
- buffer_(reinterpret_cast<byte*>(malloc(capacity_))) {
- }
-
- ~Writer() {
- free(buffer_);
- }
-
- uintptr_t position() const {
- return position_;
- }
-
- template<typename T>
- class Slot {
- public:
- Slot(Writer* w, uintptr_t offset) : w_(w), offset_(offset) { }
-
- T* operator-> () {
- return w_->RawSlotAt<T>(offset_);
- }
-
- void set(const T& value) {
- *w_->RawSlotAt<T>(offset_) = value;
- }
-
- Slot<T> at(int i) {
- return Slot<T>(w_, offset_ + sizeof(T) * i);
- }
-
- private:
- Writer* w_;
- uintptr_t offset_;
- };
-
- template<typename T>
- void Write(const T& val) {
- Ensure(position_ + sizeof(T));
- *RawSlotAt<T>(position_) = val;
- position_ += sizeof(T);
- }
-
- template<typename T>
- Slot<T> SlotAt(uintptr_t offset) {
- Ensure(offset + sizeof(T));
- return Slot<T>(this, offset);
- }
-
- template<typename T>
- Slot<T> CreateSlotHere() {
- return CreateSlotsHere<T>(1);
- }
-
- template<typename T>
- Slot<T> CreateSlotsHere(uint32_t count) {
- uintptr_t slot_position = position_;
- position_ += sizeof(T) * count;
- Ensure(position_);
- return SlotAt<T>(slot_position);
- }
-
- void Ensure(uintptr_t pos) {
- if (capacity_ < pos) {
- while (capacity_ < pos) capacity_ *= 2;
- buffer_ = reinterpret_cast<byte*>(realloc(buffer_, capacity_));
- }
- }
-
- ELF* elf() { return elf_; }
-
- byte* buffer() { return buffer_; }
-
- void Align(uintptr_t align) {
- uintptr_t delta = position_ % align;
- if (delta == 0) return;
- uintptr_t padding = align - delta;
- Ensure(position_ += padding);
- ASSERT((position_ % align) == 0);
- }
-
- void WriteULEB128(uintptr_t value) {
- do {
- uint8_t byte = value & 0x7F;
- value >>= 7;
- if (value != 0) byte |= 0x80;
- Write<uint8_t>(byte);
- } while (value != 0);
- }
-
- void WriteSLEB128(intptr_t value) {
- bool more = true;
- while (more) {
- int8_t byte = value & 0x7F;
- bool byte_sign = byte & 0x40;
- value >>= 7;
-
- if ((value == 0 && !byte_sign) || (value == -1 && byte_sign)) {
- more = false;
- } else {
- byte |= 0x80;
- }
-
- Write<int8_t>(byte);
- }
- }
-
- void WriteString(const char* str) {
- do {
- Write<char>(*str);
- } while (*str++);
- }
-
- private:
- template<typename T> friend class Slot;
-
- template<typename T>
- T* RawSlotAt(uintptr_t offset) {
- ASSERT(offset < capacity_ && offset + sizeof(T) <= capacity_);
- return reinterpret_cast<T*>(&buffer_[offset]);
- }
-
- ELF* elf_;
- uintptr_t position_;
- uintptr_t capacity_;
- byte* buffer_;
-};
-
-class StringTable;
-
-class ELFSection : public ZoneObject {
- public:
- struct Header {
- uint32_t name;
- uint32_t type;
- uintptr_t flags;
- uintptr_t address;
- uintptr_t offset;
- uintptr_t size;
- uint32_t link;
- uint32_t info;
- uintptr_t alignment;
- uintptr_t entry_size;
- };
-
- enum Type {
- TYPE_NULL = 0,
- TYPE_PROGBITS = 1,
- TYPE_SYMTAB = 2,
- TYPE_STRTAB = 3,
- TYPE_RELA = 4,
- TYPE_HASH = 5,
- TYPE_DYNAMIC = 6,
- TYPE_NOTE = 7,
- TYPE_NOBITS = 8,
- TYPE_REL = 9,
- TYPE_SHLIB = 10,
- TYPE_DYNSYM = 11,
- TYPE_LOPROC = 0x70000000,
- TYPE_X86_64_UNWIND = 0x70000001,
- TYPE_HIPROC = 0x7fffffff,
- TYPE_LOUSER = 0x80000000,
- TYPE_HIUSER = 0xffffffff
- };
-
- enum Flags {
- FLAG_WRITE = 1,
- FLAG_ALLOC = 2,
- FLAG_EXEC = 4
- };
-
- enum SpecialIndexes {
- INDEX_ABSOLUTE = 0xfff1
- };
-
- ELFSection(const char* name, Type type, uintptr_t align)
- : name_(name), type_(type), align_(align) { }
-
- virtual ~ELFSection() { }
-
- void PopulateHeader(Writer::Slot<Header> header, StringTable* strtab);
-
- virtual void WriteBody(Writer::Slot<Header> header, Writer* w) {
- uintptr_t start = w->position();
- if (WriteBody(w)) {
- uintptr_t end = w->position();
- header->offset = start;
- header->size = end - start;
- }
- }
-
- virtual bool WriteBody(Writer* w) {
- return false;
- }
-
- uint16_t index() const { return index_; }
- void set_index(uint16_t index) { index_ = index; }
-
- protected:
- virtual void PopulateHeader(Writer::Slot<Header> header) {
- header->flags = 0;
- header->address = 0;
- header->offset = 0;
- header->size = 0;
- header->link = 0;
- header->info = 0;
- header->entry_size = 0;
- }
-
-
- private:
- const char* name_;
- Type type_;
- uintptr_t align_;
- uint16_t index_;
-};
-
-
-class FullHeaderELFSection : public ELFSection {
- public:
- FullHeaderELFSection(const char* name,
- Type type,
- uintptr_t align,
- uintptr_t addr,
- uintptr_t offset,
- uintptr_t size,
- uintptr_t flags)
- : ELFSection(name, type, align),
- addr_(addr),
- offset_(offset),
- size_(size),
- flags_(flags) { }
-
- protected:
- virtual void PopulateHeader(Writer::Slot<Header> header) {
- ELFSection::PopulateHeader(header);
- header->address = addr_;
- header->offset = offset_;
- header->size = size_;
- header->flags = flags_;
- }
-
- private:
- uintptr_t addr_;
- uintptr_t offset_;
- uintptr_t size_;
- uintptr_t flags_;
-};
-
-
-class StringTable : public ELFSection {
- public:
- explicit StringTable(const char* name)
- : ELFSection(name, TYPE_STRTAB, 1), writer_(NULL), offset_(0), size_(0) {
- }
-
- uintptr_t Add(const char* str) {
- if (*str == '\0') return 0;
-
- uintptr_t offset = size_;
- WriteString(str);
- return offset;
- }
-
- void AttachWriter(Writer* w) {
- writer_ = w;
- offset_ = writer_->position();
-
- // First entry in the string table should be an empty string.
- WriteString("");
- }
-
- void DetachWriter() {
- writer_ = NULL;
- }
-
- virtual void WriteBody(Writer::Slot<Header> header, Writer* w) {
- ASSERT(writer_ == NULL);
- header->offset = offset_;
- header->size = size_;
- }
-
- private:
- void WriteString(const char* str) {
- uintptr_t written = 0;
- do {
- writer_->Write(*str);
- written++;
- } while (*str++);
- size_ += written;
- }
-
- Writer* writer_;
-
- uintptr_t offset_;
- uintptr_t size_;
-};
-
-
-void ELFSection::PopulateHeader(Writer::Slot<ELFSection::Header> header,
- StringTable* strtab) {
- header->name = strtab->Add(name_);
- header->type = type_;
- header->alignment = align_;
- PopulateHeader(header);
-}
-
-
-class ELF BASE_EMBEDDED {
- public:
- ELF() : sections_(6) {
- sections_.Add(new ELFSection("", ELFSection::TYPE_NULL, 0));
- sections_.Add(new StringTable(".shstrtab"));
- }
-
- void Write(Writer* w) {
- WriteHeader(w);
- WriteSectionTable(w);
- WriteSections(w);
- }
-
- ELFSection* SectionAt(uint32_t index) {
- return sections_[index];
- }
-
- uint32_t AddSection(ELFSection* section) {
- sections_.Add(section);
- section->set_index(sections_.length() - 1);
- return sections_.length() - 1;
- }
-
- private:
- struct ELFHeader {
- uint8_t ident[16];
- uint16_t type;
- uint16_t machine;
- uint32_t version;
- uintptr_t entry;
- uintptr_t pht_offset;
- uintptr_t sht_offset;
- uint32_t flags;
- uint16_t header_size;
- uint16_t pht_entry_size;
- uint16_t pht_entry_num;
- uint16_t sht_entry_size;
- uint16_t sht_entry_num;
- uint16_t sht_strtab_index;
- };
-
-
- void WriteHeader(Writer* w) {
- ASSERT(w->position() == 0);
- Writer::Slot<ELFHeader> header = w->CreateSlotHere<ELFHeader>();
-#if defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_ARM)
- const uint8_t ident[16] =
- { 0x7f, 'E', 'L', 'F', 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0};
-#elif defined(V8_TARGET_ARCH_X64)
- const uint8_t ident[16] =
- { 0x7f, 'E', 'L', 'F', 2, 1, 1, 0, 0, 0 , 0, 0, 0, 0, 0, 0};
-#else
-#error Unsupported target architecture.
-#endif
- memcpy(header->ident, ident, 16);
- header->type = 1;
-#if defined(V8_TARGET_ARCH_IA32)
- header->machine = 3;
-#elif defined(V8_TARGET_ARCH_X64)
- // Processor identification value for x64 is 62 as defined in
- // System V ABI, AMD64 Supplement
- // http://www.x86-64.org/documentation/abi.pdf
- header->machine = 62;
-#elif defined(V8_TARGET_ARCH_ARM)
- // Set to EM_ARM, defined as 40, in "ARM ELF File Format" at
- // infocenter.arm.com/help/topic/com.arm.doc.dui0101a/DUI0101A_Elf.pdf
- header->machine = 40;
-#else
-#error Unsupported target architecture.
-#endif
- header->version = 1;
- header->entry = 0;
- header->pht_offset = 0;
- header->sht_offset = sizeof(ELFHeader); // Section table follows header.
- header->flags = 0;
- header->header_size = sizeof(ELFHeader);
- header->pht_entry_size = 0;
- header->pht_entry_num = 0;
- header->sht_entry_size = sizeof(ELFSection::Header);
- header->sht_entry_num = sections_.length();
- header->sht_strtab_index = 1;
- }
-
- void WriteSectionTable(Writer* w) {
- // Section headers table immediately follows file header.
- ASSERT(w->position() == sizeof(ELFHeader));
-
- Writer::Slot<ELFSection::Header> headers =
- w->CreateSlotsHere<ELFSection::Header>(sections_.length());
-
- // String table for section table is the first section.
- StringTable* strtab = static_cast<StringTable*>(SectionAt(1));
- strtab->AttachWriter(w);
- for (int i = 0, length = sections_.length();
- i < length;
- i++) {
- sections_[i]->PopulateHeader(headers.at(i), strtab);
- }
- strtab->DetachWriter();
- }
-
- int SectionHeaderPosition(uint32_t section_index) {
- return sizeof(ELFHeader) + sizeof(ELFSection::Header) * section_index;
- }
-
- void WriteSections(Writer* w) {
- Writer::Slot<ELFSection::Header> headers =
- w->SlotAt<ELFSection::Header>(sizeof(ELFHeader));
-
- for (int i = 0, length = sections_.length();
- i < length;
- i++) {
- sections_[i]->WriteBody(headers.at(i), w);
- }
- }
-
- ZoneList<ELFSection*> sections_;
-};
-
-
-class ELFSymbol BASE_EMBEDDED {
- public:
- enum Type {
- TYPE_NOTYPE = 0,
- TYPE_OBJECT = 1,
- TYPE_FUNC = 2,
- TYPE_SECTION = 3,
- TYPE_FILE = 4,
- TYPE_LOPROC = 13,
- TYPE_HIPROC = 15
- };
-
- enum Binding {
- BIND_LOCAL = 0,
- BIND_GLOBAL = 1,
- BIND_WEAK = 2,
- BIND_LOPROC = 13,
- BIND_HIPROC = 15
- };
-
- ELFSymbol(const char* name,
- uintptr_t value,
- uintptr_t size,
- Binding binding,
- Type type,
- uint16_t section)
- : name(name),
- value(value),
- size(size),
- info((binding << 4) | type),
- other(0),
- section(section) {
- }
-
- Binding binding() const {
- return static_cast<Binding>(info >> 4);
- }
-#if defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_ARM)
- struct SerializedLayout {
- SerializedLayout(uint32_t name,
- uintptr_t value,
- uintptr_t size,
- Binding binding,
- Type type,
- uint16_t section)
- : name(name),
- value(value),
- size(size),
- info((binding << 4) | type),
- other(0),
- section(section) {
- }
-
- uint32_t name;
- uintptr_t value;
- uintptr_t size;
- uint8_t info;
- uint8_t other;
- uint16_t section;
- };
-#elif defined(V8_TARGET_ARCH_X64)
- struct SerializedLayout {
- SerializedLayout(uint32_t name,
- uintptr_t value,
- uintptr_t size,
- Binding binding,
- Type type,
- uint16_t section)
- : name(name),
- info((binding << 4) | type),
- other(0),
- section(section),
- value(value),
- size(size) {
- }
-
- uint32_t name;
- uint8_t info;
- uint8_t other;
- uint16_t section;
- uintptr_t value;
- uintptr_t size;
- };
-#endif
-
- void Write(Writer::Slot<SerializedLayout> s, StringTable* t) {
- // Convert symbol names from strings to indexes in the string table.
- s->name = t->Add(name);
- s->value = value;
- s->size = size;
- s->info = info;
- s->other = other;
- s->section = section;
- }
-
- private:
- const char* name;
- uintptr_t value;
- uintptr_t size;
- uint8_t info;
- uint8_t other;
- uint16_t section;
-};
-
-
-class ELFSymbolTable : public ELFSection {
- public:
- explicit ELFSymbolTable(const char* name)
- : ELFSection(name, TYPE_SYMTAB, sizeof(uintptr_t)),
- locals_(1),
- globals_(1) {
- }
-
- virtual void WriteBody(Writer::Slot<Header> header, Writer* w) {
- w->Align(header->alignment);
- int total_symbols = locals_.length() + globals_.length() + 1;
- header->offset = w->position();
-
- Writer::Slot<ELFSymbol::SerializedLayout> symbols =
- w->CreateSlotsHere<ELFSymbol::SerializedLayout>(total_symbols);
-
- header->size = w->position() - header->offset;
-
- // String table for this symbol table should follow it in the section table.
- StringTable* strtab =
- static_cast<StringTable*>(w->elf()->SectionAt(index() + 1));
- strtab->AttachWriter(w);
- symbols.at(0).set(ELFSymbol::SerializedLayout(0,
- 0,
- 0,
- ELFSymbol::BIND_LOCAL,
- ELFSymbol::TYPE_NOTYPE,
- 0));
- WriteSymbolsList(&locals_, symbols.at(1), strtab);
- WriteSymbolsList(&globals_, symbols.at(locals_.length() + 1), strtab);
- strtab->DetachWriter();
- }
-
- void Add(const ELFSymbol& symbol) {
- if (symbol.binding() == ELFSymbol::BIND_LOCAL) {
- locals_.Add(symbol);
- } else {
- globals_.Add(symbol);
- }
- }
-
- protected:
- virtual void PopulateHeader(Writer::Slot<Header> header) {
- ELFSection::PopulateHeader(header);
- // We are assuming that string table will follow symbol table.
- header->link = index() + 1;
- header->info = locals_.length() + 1;
- header->entry_size = sizeof(ELFSymbol::SerializedLayout);
- }
-
- private:
- void WriteSymbolsList(const ZoneList<ELFSymbol>* src,
- Writer::Slot<ELFSymbol::SerializedLayout> dst,
- StringTable* strtab) {
- for (int i = 0, len = src->length();
- i < len;
- i++) {
- src->at(i).Write(dst.at(i), strtab);
- }
- }
-
- ZoneList<ELFSymbol> locals_;
- ZoneList<ELFSymbol> globals_;
-};
-
-
-class CodeDescription BASE_EMBEDDED {
- public:
-
-#ifdef V8_TARGET_ARCH_X64
- enum StackState {
- POST_RBP_PUSH,
- POST_RBP_SET,
- POST_RBP_POP,
- STACK_STATE_MAX
- };
-#endif
-
- CodeDescription(const char* name,
- Code* code,
- Handle<Script> script,
- GDBJITLineInfo* lineinfo,
- GDBJITInterface::CodeTag tag)
- : name_(name),
- code_(code),
- script_(script),
- lineinfo_(lineinfo),
- tag_(tag) {
- }
-
- const char* name() const {
- return name_;
- }
-
- GDBJITLineInfo* lineinfo() const {
- return lineinfo_;
- }
-
- GDBJITInterface::CodeTag tag() const {
- return tag_;
- }
-
- uintptr_t CodeStart() const {
- return reinterpret_cast<uintptr_t>(code_->instruction_start());
- }
-
- uintptr_t CodeEnd() const {
- return reinterpret_cast<uintptr_t>(code_->instruction_end());
- }
-
- uintptr_t CodeSize() const {
- return CodeEnd() - CodeStart();
- }
-
- bool IsLineInfoAvailable() {
- return !script_.is_null() &&
- script_->source()->IsString() &&
- script_->HasValidSource() &&
- script_->name()->IsString() &&
- lineinfo_ != NULL;
- }
-
-#ifdef V8_TARGET_ARCH_X64
- uintptr_t GetStackStateStartAddress(StackState state) const {
- ASSERT(state < STACK_STATE_MAX);
- return stack_state_start_addresses_[state];
- }
-
- void SetStackStateStartAddress(StackState state, uintptr_t addr) {
- ASSERT(state < STACK_STATE_MAX);
- stack_state_start_addresses_[state] = addr;
- }
-#endif
-
- SmartPointer<char> GetFilename() {
- return String::cast(script_->name())->ToCString();
- }
-
- int GetScriptLineNumber(int pos) {
- return GetScriptLineNumberSafe(script_, pos) + 1;
- }
-
-
- private:
- const char* name_;
- Code* code_;
- Handle<Script> script_;
- GDBJITLineInfo* lineinfo_;
- GDBJITInterface::CodeTag tag_;
-#ifdef V8_TARGET_ARCH_X64
- uintptr_t stack_state_start_addresses_[STACK_STATE_MAX];
-#endif
-};
-
-
-static void CreateSymbolsTable(CodeDescription* desc,
- ELF* elf,
- int text_section_index) {
- ELFSymbolTable* symtab = new ELFSymbolTable(".symtab");
- StringTable* strtab = new StringTable(".strtab");
-
- // Symbol table should be followed by the linked string table.
- elf->AddSection(symtab);
- elf->AddSection(strtab);
-
- symtab->Add(ELFSymbol("V8 Code",
- 0,
- 0,
- ELFSymbol::BIND_LOCAL,
- ELFSymbol::TYPE_FILE,
- ELFSection::INDEX_ABSOLUTE));
-
- symtab->Add(ELFSymbol(desc->name(),
- 0,
- desc->CodeSize(),
- ELFSymbol::BIND_GLOBAL,
- ELFSymbol::TYPE_FUNC,
- text_section_index));
-}
-
-
-class DebugInfoSection : public ELFSection {
- public:
- explicit DebugInfoSection(CodeDescription* desc)
- : ELFSection(".debug_info", TYPE_PROGBITS, 1), desc_(desc) { }
-
- bool WriteBody(Writer* w) {
- Writer::Slot<uint32_t> size = w->CreateSlotHere<uint32_t>();
- uintptr_t start = w->position();
- w->Write<uint16_t>(2); // DWARF version.
- w->Write<uint32_t>(0); // Abbreviation table offset.
- w->Write<uint8_t>(sizeof(intptr_t));
-
- w->WriteULEB128(1); // Abbreviation code.
- w->WriteString(*desc_->GetFilename());
- w->Write<intptr_t>(desc_->CodeStart());
- w->Write<intptr_t>(desc_->CodeStart() + desc_->CodeSize());
- w->Write<uint32_t>(0);
- size.set(static_cast<uint32_t>(w->position() - start));
- return true;
- }
-
- private:
- CodeDescription* desc_;
-};
-
-
-class DebugAbbrevSection : public ELFSection {
- public:
- DebugAbbrevSection() : ELFSection(".debug_abbrev", TYPE_PROGBITS, 1) { }
-
- // DWARF2 standard, figure 14.
- enum DWARF2Tags {
- DW_TAG_COMPILE_UNIT = 0x11
- };
-
- // DWARF2 standard, figure 16.
- enum DWARF2ChildrenDetermination {
- DW_CHILDREN_NO = 0,
- DW_CHILDREN_YES = 1
- };
-
- // DWARF standard, figure 17.
- enum DWARF2Attribute {
- DW_AT_NAME = 0x3,
- DW_AT_STMT_LIST = 0x10,
- DW_AT_LOW_PC = 0x11,
- DW_AT_HIGH_PC = 0x12
- };
-
- // DWARF2 standard, figure 19.
- enum DWARF2AttributeForm {
- DW_FORM_ADDR = 0x1,
- DW_FORM_STRING = 0x8,
- DW_FORM_DATA4 = 0x6
- };
-
- bool WriteBody(Writer* w) {
- w->WriteULEB128(1);
- w->WriteULEB128(DW_TAG_COMPILE_UNIT);
- w->Write<uint8_t>(DW_CHILDREN_NO);
- w->WriteULEB128(DW_AT_NAME);
- w->WriteULEB128(DW_FORM_STRING);
- w->WriteULEB128(DW_AT_LOW_PC);
- w->WriteULEB128(DW_FORM_ADDR);
- w->WriteULEB128(DW_AT_HIGH_PC);
- w->WriteULEB128(DW_FORM_ADDR);
- w->WriteULEB128(DW_AT_STMT_LIST);
- w->WriteULEB128(DW_FORM_DATA4);
- w->WriteULEB128(0);
- w->WriteULEB128(0);
- w->WriteULEB128(0);
- return true;
- }
-};
-
-
-class DebugLineSection : public ELFSection {
- public:
- explicit DebugLineSection(CodeDescription* desc)
- : ELFSection(".debug_line", TYPE_PROGBITS, 1),
- desc_(desc) { }
-
- // DWARF2 standard, figure 34.
- enum DWARF2Opcodes {
- DW_LNS_COPY = 1,
- DW_LNS_ADVANCE_PC = 2,
- DW_LNS_ADVANCE_LINE = 3,
- DW_LNS_SET_FILE = 4,
- DW_LNS_SET_COLUMN = 5,
- DW_LNS_NEGATE_STMT = 6
- };
-
- // DWARF2 standard, figure 35.
- enum DWARF2ExtendedOpcode {
- DW_LNE_END_SEQUENCE = 1,
- DW_LNE_SET_ADDRESS = 2,
- DW_LNE_DEFINE_FILE = 3
- };
-
- bool WriteBody(Writer* w) {
- // Write prologue.
- Writer::Slot<uint32_t> total_length = w->CreateSlotHere<uint32_t>();
- uintptr_t start = w->position();
-
- // Used for special opcodes
- const int8_t line_base = 1;
- const uint8_t line_range = 7;
- const int8_t max_line_incr = (line_base + line_range - 1);
- const uint8_t opcode_base = DW_LNS_NEGATE_STMT + 1;
-
- w->Write<uint16_t>(2); // Field version.
- Writer::Slot<uint32_t> prologue_length = w->CreateSlotHere<uint32_t>();
- uintptr_t prologue_start = w->position();
- w->Write<uint8_t>(1); // Field minimum_instruction_length.
- w->Write<uint8_t>(1); // Field default_is_stmt.
- w->Write<int8_t>(line_base); // Field line_base.
- w->Write<uint8_t>(line_range); // Field line_range.
- w->Write<uint8_t>(opcode_base); // Field opcode_base.
- w->Write<uint8_t>(0); // DW_LNS_COPY operands count.
- w->Write<uint8_t>(1); // DW_LNS_ADVANCE_PC operands count.
- w->Write<uint8_t>(1); // DW_LNS_ADVANCE_LINE operands count.
- w->Write<uint8_t>(1); // DW_LNS_SET_FILE operands count.
- w->Write<uint8_t>(1); // DW_LNS_SET_COLUMN operands count.
- w->Write<uint8_t>(0); // DW_LNS_NEGATE_STMT operands count.
- w->Write<uint8_t>(0); // Empty include_directories sequence.
- w->WriteString(*desc_->GetFilename()); // File name.
- w->WriteULEB128(0); // Current directory.
- w->WriteULEB128(0); // Unknown modification time.
- w->WriteULEB128(0); // Unknown file size.
- w->Write<uint8_t>(0);
- prologue_length.set(static_cast<uint32_t>(w->position() - prologue_start));
-
- WriteExtendedOpcode(w, DW_LNE_SET_ADDRESS, sizeof(intptr_t));
- w->Write<intptr_t>(desc_->CodeStart());
- w->Write<uint8_t>(DW_LNS_COPY);
-
- intptr_t pc = 0;
- intptr_t line = 1;
- bool is_statement = true;
-
- List<GDBJITLineInfo::PCInfo>* pc_info = desc_->lineinfo()->pc_info();
- pc_info->Sort(&ComparePCInfo);
-
- int pc_info_length = pc_info->length();
- for (int i = 0; i < pc_info_length; i++) {
- GDBJITLineInfo::PCInfo* info = &pc_info->at(i);
- ASSERT(info->pc_ >= pc);
-
- // Reduce bloating in the debug line table by removing duplicate line
- // entries (per DWARF2 standard).
- intptr_t new_line = desc_->GetScriptLineNumber(info->pos_);
- if (new_line == line) {
- continue;
- }
-
- // Mark statement boundaries. For a better debugging experience, mark
- // the last pc address in the function as a statement (e.g. "}"), so that
- // a user can see the result of the last line executed in the function,
- // should control reach the end.
- if ((i+1) == pc_info_length) {
- if (!is_statement) {
- w->Write<uint8_t>(DW_LNS_NEGATE_STMT);
- }
- } else if (is_statement != info->is_statement_) {
- w->Write<uint8_t>(DW_LNS_NEGATE_STMT);
- is_statement = !is_statement;
- }
-
- // Generate special opcodes, if possible. This results in more compact
- // debug line tables. See the DWARF 2.0 standard to learn more about
- // special opcodes.
- uintptr_t pc_diff = info->pc_ - pc;
- intptr_t line_diff = new_line - line;
-
- // Compute special opcode (see DWARF 2.0 standard)
- intptr_t special_opcode = (line_diff - line_base) +
- (line_range * pc_diff) + opcode_base;
-
- // If special_opcode is less than or equal to 255, it can be used as a
- // special opcode. If line_diff is larger than the max line increment
- // allowed for a special opcode, or if line_diff is less than the minimum
- // line that can be added to the line register (i.e. line_base), then
- // special_opcode can't be used.
- if ((special_opcode >= opcode_base) && (special_opcode <= 255) &&
- (line_diff <= max_line_incr) && (line_diff >= line_base)) {
- w->Write<uint8_t>(special_opcode);
- } else {
- w->Write<uint8_t>(DW_LNS_ADVANCE_PC);
- w->WriteSLEB128(pc_diff);
- w->Write<uint8_t>(DW_LNS_ADVANCE_LINE);
- w->WriteSLEB128(line_diff);
- w->Write<uint8_t>(DW_LNS_COPY);
- }
-
- // Increment the pc and line operands.
- pc += pc_diff;
- line += line_diff;
- }
- // Advance the pc to the end of the routine, since the end sequence opcode
- // requires this.
- w->Write<uint8_t>(DW_LNS_ADVANCE_PC);
- w->WriteSLEB128(desc_->CodeSize() - pc);
- WriteExtendedOpcode(w, DW_LNE_END_SEQUENCE, 0);
- total_length.set(static_cast<uint32_t>(w->position() - start));
- return true;
- }
-
- private:
- void WriteExtendedOpcode(Writer* w,
- DWARF2ExtendedOpcode op,
- size_t operands_size) {
- w->Write<uint8_t>(0);
- w->WriteULEB128(operands_size + 1);
- w->Write<uint8_t>(op);
- }
-
- static int ComparePCInfo(const GDBJITLineInfo::PCInfo* a,
- const GDBJITLineInfo::PCInfo* b) {
- if (a->pc_ == b->pc_) {
- if (a->is_statement_ != b->is_statement_) {
- return b->is_statement_ ? +1 : -1;
- }
- return 0;
- } else if (a->pc_ > b->pc_) {
- return +1;
- } else {
- return -1;
- }
- }
-
- CodeDescription* desc_;
-};
-
-
-#ifdef V8_TARGET_ARCH_X64
-
-
-class UnwindInfoSection : public ELFSection {
- public:
- explicit UnwindInfoSection(CodeDescription *desc);
- virtual bool WriteBody(Writer *w);
-
- int WriteCIE(Writer *w);
- void WriteFDE(Writer *w, int);
-
- void WriteFDEStateOnEntry(Writer *w);
- void WriteFDEStateAfterRBPPush(Writer *w);
- void WriteFDEStateAfterRBPSet(Writer *w);
- void WriteFDEStateAfterRBPPop(Writer *w);
-
- void WriteLength(Writer *w,
- Writer::Slot<uint32_t>* length_slot,
- int initial_position);
-
- private:
- CodeDescription *desc_;
-
- // DWARF3 Specification, Table 7.23
- enum CFIInstructions {
- DW_CFA_ADVANCE_LOC = 0x40,
- DW_CFA_OFFSET = 0x80,
- DW_CFA_RESTORE = 0xC0,
- DW_CFA_NOP = 0x00,
- DW_CFA_SET_LOC = 0x01,
- DW_CFA_ADVANCE_LOC1 = 0x02,
- DW_CFA_ADVANCE_LOC2 = 0x03,
- DW_CFA_ADVANCE_LOC4 = 0x04,
- DW_CFA_OFFSET_EXTENDED = 0x05,
- DW_CFA_RESTORE_EXTENDED = 0x06,
- DW_CFA_UNDEFINED = 0x07,
- DW_CFA_SAME_VALUE = 0x08,
- DW_CFA_REGISTER = 0x09,
- DW_CFA_REMEMBER_STATE = 0x0A,
- DW_CFA_RESTORE_STATE = 0x0B,
- DW_CFA_DEF_CFA = 0x0C,
- DW_CFA_DEF_CFA_REGISTER = 0x0D,
- DW_CFA_DEF_CFA_OFFSET = 0x0E,
-
- DW_CFA_DEF_CFA_EXPRESSION = 0x0F,
- DW_CFA_EXPRESSION = 0x10,
- DW_CFA_OFFSET_EXTENDED_SF = 0x11,
- DW_CFA_DEF_CFA_SF = 0x12,
- DW_CFA_DEF_CFA_OFFSET_SF = 0x13,
- DW_CFA_VAL_OFFSET = 0x14,
- DW_CFA_VAL_OFFSET_SF = 0x15,
- DW_CFA_VAL_EXPRESSION = 0x16
- };
-
- // System V ABI, AMD64 Supplement, Version 0.99.5, Figure 3.36
- enum RegisterMapping {
- // Only the relevant ones have been added to reduce clutter.
- AMD64_RBP = 6,
- AMD64_RSP = 7,
- AMD64_RA = 16
- };
-
- enum CFIConstants {
- CIE_ID = 0,
- CIE_VERSION = 1,
- CODE_ALIGN_FACTOR = 1,
- DATA_ALIGN_FACTOR = 1,
- RETURN_ADDRESS_REGISTER = AMD64_RA
- };
-};
-
-
-void UnwindInfoSection::WriteLength(Writer *w,
- Writer::Slot<uint32_t>* length_slot,
- int initial_position) {
- uint32_t align = (w->position() - initial_position) % kPointerSize;
-
- if (align != 0) {
- for (uint32_t i = 0; i < (kPointerSize - align); i++) {
- w->Write<uint8_t>(DW_CFA_NOP);
- }
- }
-
- ASSERT((w->position() - initial_position) % kPointerSize == 0);
- length_slot->set(w->position() - initial_position);
-}
-
-
-UnwindInfoSection::UnwindInfoSection(CodeDescription *desc)
- : ELFSection(".eh_frame", TYPE_X86_64_UNWIND, 1), desc_(desc)
-{ }
-
-int UnwindInfoSection::WriteCIE(Writer *w) {
- Writer::Slot<uint32_t> cie_length_slot = w->CreateSlotHere<uint32_t>();
- uint32_t cie_position = w->position();
-
- // Write out the CIE header. Currently no 'common instructions' are
- // emitted onto the CIE; every FDE has its own set of instructions.
-
- w->Write<uint32_t>(CIE_ID);
- w->Write<uint8_t>(CIE_VERSION);
- w->Write<uint8_t>(0); // Null augmentation string.
- w->WriteSLEB128(CODE_ALIGN_FACTOR);
- w->WriteSLEB128(DATA_ALIGN_FACTOR);
- w->Write<uint8_t>(RETURN_ADDRESS_REGISTER);
-
- WriteLength(w, &cie_length_slot, cie_position);
-
- return cie_position;
-}
-
-
-void UnwindInfoSection::WriteFDE(Writer *w, int cie_position) {
- // The only FDE for this function. The CFA is the current RBP.
- Writer::Slot<uint32_t> fde_length_slot = w->CreateSlotHere<uint32_t>();
- int fde_position = w->position();
- w->Write<int32_t>(fde_position - cie_position + 4);
-
- w->Write<uintptr_t>(desc_->CodeStart());
- w->Write<uintptr_t>(desc_->CodeSize());
-
- WriteFDEStateOnEntry(w);
- WriteFDEStateAfterRBPPush(w);
- WriteFDEStateAfterRBPSet(w);
- WriteFDEStateAfterRBPPop(w);
-
- WriteLength(w, &fde_length_slot, fde_position);
-}
-
-
-void UnwindInfoSection::WriteFDEStateOnEntry(Writer *w) {
- // The first state, just after the control has been transferred to the the
- // function.
-
- // RBP for this function will be the value of RSP after pushing the RBP
- // for the previous function. The previous RBP has not been pushed yet.
- w->Write<uint8_t>(DW_CFA_DEF_CFA_SF);
- w->WriteULEB128(AMD64_RSP);
- w->WriteSLEB128(-kPointerSize);
-
- // The RA is stored at location CFA + kCallerPCOffset. This is an invariant,
- // and hence omitted from the next states.
- w->Write<uint8_t>(DW_CFA_OFFSET_EXTENDED);
- w->WriteULEB128(AMD64_RA);
- w->WriteSLEB128(StandardFrameConstants::kCallerPCOffset);
-
- // The RBP of the previous function is still in RBP.
- w->Write<uint8_t>(DW_CFA_SAME_VALUE);
- w->WriteULEB128(AMD64_RBP);
-
- // Last location described by this entry.
- w->Write<uint8_t>(DW_CFA_SET_LOC);
- w->Write<uint64_t>(
- desc_->GetStackStateStartAddress(CodeDescription::POST_RBP_PUSH));
-}
-
-
-void UnwindInfoSection::WriteFDEStateAfterRBPPush(Writer *w) {
- // The second state, just after RBP has been pushed.
-
- // RBP / CFA for this function is now the current RSP, so just set the
- // offset from the previous rule (from -8) to 0.
- w->Write<uint8_t>(DW_CFA_DEF_CFA_OFFSET);
- w->WriteULEB128(0);
-
- // The previous RBP is stored at CFA + kCallerFPOffset. This is an invariant
- // in this and the next state, and hence omitted in the next state.
- w->Write<uint8_t>(DW_CFA_OFFSET_EXTENDED);
- w->WriteULEB128(AMD64_RBP);
- w->WriteSLEB128(StandardFrameConstants::kCallerFPOffset);
-
- // Last location described by this entry.
- w->Write<uint8_t>(DW_CFA_SET_LOC);
- w->Write<uint64_t>(
- desc_->GetStackStateStartAddress(CodeDescription::POST_RBP_SET));
-}
-
-
-void UnwindInfoSection::WriteFDEStateAfterRBPSet(Writer *w) {
- // The third state, after the RBP has been set.
-
- // The CFA can now directly be set to RBP.
- w->Write<uint8_t>(DW_CFA_DEF_CFA);
- w->WriteULEB128(AMD64_RBP);
- w->WriteULEB128(0);
-
- // Last location described by this entry.
- w->Write<uint8_t>(DW_CFA_SET_LOC);
- w->Write<uint64_t>(
- desc_->GetStackStateStartAddress(CodeDescription::POST_RBP_POP));
-}
-
-
-void UnwindInfoSection::WriteFDEStateAfterRBPPop(Writer *w) {
- // The fourth (final) state. The RBP has been popped (just before issuing a
- // return).
-
- // The CFA can is now calculated in the same way as in the first state.
- w->Write<uint8_t>(DW_CFA_DEF_CFA_SF);
- w->WriteULEB128(AMD64_RSP);
- w->WriteSLEB128(-kPointerSize);
-
- // The RBP
- w->Write<uint8_t>(DW_CFA_OFFSET_EXTENDED);
- w->WriteULEB128(AMD64_RBP);
- w->WriteSLEB128(StandardFrameConstants::kCallerFPOffset);
-
- // Last location described by this entry.
- w->Write<uint8_t>(DW_CFA_SET_LOC);
- w->Write<uint64_t>(desc_->CodeEnd());
-}
-
-
-bool UnwindInfoSection::WriteBody(Writer *w) {
- uint32_t cie_position = WriteCIE(w);
- WriteFDE(w, cie_position);
- return true;
-}
-
-
-#endif // V8_TARGET_ARCH_X64
-
-
-static void CreateDWARFSections(CodeDescription* desc, ELF* elf) {
- if (desc->IsLineInfoAvailable()) {
- elf->AddSection(new DebugInfoSection(desc));
- elf->AddSection(new DebugAbbrevSection);
- elf->AddSection(new DebugLineSection(desc));
- }
-#ifdef V8_TARGET_ARCH_X64
- elf->AddSection(new UnwindInfoSection(desc));
-#endif
-}
-
-
-// -------------------------------------------------------------------
-// Binary GDB JIT Interface as described in
-// http://sourceware.org/gdb/onlinedocs/gdb/Declarations.html
-extern "C" {
- typedef enum {
- JIT_NOACTION = 0,
- JIT_REGISTER_FN,
- JIT_UNREGISTER_FN
- } JITAction;
-
- struct JITCodeEntry {
- JITCodeEntry* next_;
- JITCodeEntry* prev_;
- Address symfile_addr_;
- uint64_t symfile_size_;
- };
-
- struct JITDescriptor {
- uint32_t version_;
- uint32_t action_flag_;
- JITCodeEntry *relevant_entry_;
- JITCodeEntry *first_entry_;
- };
-
- // GDB will place breakpoint into this function.
- // To prevent GCC from inlining or removing it we place noinline attribute
- // and inline assembler statement inside.
- void __attribute__((noinline)) __jit_debug_register_code() {
- __asm__("");
- }
-
- // GDB will inspect contents of this descriptor.
- // Static initialization is necessary to prevent GDB from seeing
- // uninitialized descriptor.
- JITDescriptor __jit_debug_descriptor = { 1, 0, 0, 0 };
-}
-
-
-static JITCodeEntry* CreateCodeEntry(Address symfile_addr,
- uintptr_t symfile_size) {
- JITCodeEntry* entry = static_cast<JITCodeEntry*>(
- malloc(sizeof(JITCodeEntry) + symfile_size));
-
- entry->symfile_addr_ = reinterpret_cast<Address>(entry + 1);
- entry->symfile_size_ = symfile_size;
- memcpy(entry->symfile_addr_, symfile_addr, symfile_size);
-
- entry->prev_ = entry->next_ = NULL;
-
- return entry;
-}
-
-
-static void DestroyCodeEntry(JITCodeEntry* entry) {
- free(entry);
-}
-
-
-static void RegisterCodeEntry(JITCodeEntry* entry) {
-#if defined(DEBUG) && !defined(WIN32)
- static int file_num = 0;
- if (FLAG_gdbjit_dump) {
- static const int kMaxFileNameSize = 64;
- static const char* kElfFilePrefix = "/tmp/elfdump";
- static const char* kObjFileExt = ".o";
- char file_name[64];
-
- OS::SNPrintF(Vector<char>(file_name, kMaxFileNameSize), "%s%d%s",
- kElfFilePrefix, file_num++, kObjFileExt);
- WriteBytes(file_name, entry->symfile_addr_, entry->symfile_size_);
- }
-#endif
-
- entry->next_ = __jit_debug_descriptor.first_entry_;
- if (entry->next_ != NULL) entry->next_->prev_ = entry;
- __jit_debug_descriptor.first_entry_ =
- __jit_debug_descriptor.relevant_entry_ = entry;
-
- __jit_debug_descriptor.action_flag_ = JIT_REGISTER_FN;
- __jit_debug_register_code();
-}
-
-
-static void UnregisterCodeEntry(JITCodeEntry* entry) {
- if (entry->prev_ != NULL) {
- entry->prev_->next_ = entry->next_;
- } else {
- __jit_debug_descriptor.first_entry_ = entry->next_;
- }
-
- if (entry->next_ != NULL) {
- entry->next_->prev_ = entry->prev_;
- }
-
- __jit_debug_descriptor.relevant_entry_ = entry;
- __jit_debug_descriptor.action_flag_ = JIT_UNREGISTER_FN;
- __jit_debug_register_code();
-}
-
-
-static JITCodeEntry* CreateELFObject(CodeDescription* desc) {
- ZoneScope zone_scope(DELETE_ON_EXIT);
-
- ELF elf;
- Writer w(&elf);
-
- int text_section_index = elf.AddSection(
- new FullHeaderELFSection(".text",
- ELFSection::TYPE_NOBITS,
- kCodeAlignment,
- desc->CodeStart(),
- 0,
- desc->CodeSize(),
- ELFSection::FLAG_ALLOC | ELFSection::FLAG_EXEC));
-
- CreateSymbolsTable(desc, &elf, text_section_index);
-
- CreateDWARFSections(desc, &elf);
-
- elf.Write(&w);
-
- return CreateCodeEntry(w.buffer(), w.position());
-}
-
-
-static bool SameCodeObjects(void* key1, void* key2) {
- return key1 == key2;
-}
-
-
-static HashMap* GetEntries() {
- static HashMap* entries = NULL;
- if (entries == NULL) {
- entries = new HashMap(&SameCodeObjects);
- }
- return entries;
-}
-
-
-static uint32_t HashForCodeObject(Code* code) {
- static const uintptr_t kGoldenRatio = 2654435761u;
- uintptr_t hash = reinterpret_cast<uintptr_t>(code->address());
- return static_cast<uint32_t>((hash >> kCodeAlignmentBits) * kGoldenRatio);
-}
-
-
-static const intptr_t kLineInfoTag = 0x1;
-
-
-static bool IsLineInfoTagged(void* ptr) {
- return 0 != (reinterpret_cast<intptr_t>(ptr) & kLineInfoTag);
-}
-
-
-static void* TagLineInfo(GDBJITLineInfo* ptr) {
- return reinterpret_cast<void*>(
- reinterpret_cast<intptr_t>(ptr) | kLineInfoTag);
-}
-
-
-static GDBJITLineInfo* UntagLineInfo(void* ptr) {
- return reinterpret_cast<GDBJITLineInfo*>(
- reinterpret_cast<intptr_t>(ptr) & ~kLineInfoTag);
-}
-
-
-void GDBJITInterface::AddCode(Handle<String> name,
- Handle<Script> script,
- Handle<Code> code) {
- if (!FLAG_gdbjit) return;
-
- // Force initialization of line_ends array.
- GetScriptLineNumber(script, 0);
-
- if (!name.is_null()) {
- SmartPointer<char> name_cstring = name->ToCString(DISALLOW_NULLS);
- AddCode(*name_cstring, *code, GDBJITInterface::FUNCTION, *script);
- } else {
- AddCode("", *code, GDBJITInterface::FUNCTION, *script);
- }
-}
-
-static void AddUnwindInfo(CodeDescription *desc) {
-#ifdef V8_TARGET_ARCH_X64
- if (desc->tag() == GDBJITInterface::FUNCTION) {
- // To avoid propagating unwinding information through
- // compilation pipeline we use an approximation.
- // For most use cases this should not affect usability.
- static const int kFramePointerPushOffset = 1;
- static const int kFramePointerSetOffset = 4;
- static const int kFramePointerPopOffset = -3;
-
- uintptr_t frame_pointer_push_address =
- desc->CodeStart() + kFramePointerPushOffset;
-
- uintptr_t frame_pointer_set_address =
- desc->CodeStart() + kFramePointerSetOffset;
-
- uintptr_t frame_pointer_pop_address =
- desc->CodeEnd() + kFramePointerPopOffset;
-
- desc->SetStackStateStartAddress(CodeDescription::POST_RBP_PUSH,
- frame_pointer_push_address);
- desc->SetStackStateStartAddress(CodeDescription::POST_RBP_SET,
- frame_pointer_set_address);
- desc->SetStackStateStartAddress(CodeDescription::POST_RBP_POP,
- frame_pointer_pop_address);
- } else {
- desc->SetStackStateStartAddress(CodeDescription::POST_RBP_PUSH,
- desc->CodeStart());
- desc->SetStackStateStartAddress(CodeDescription::POST_RBP_SET,
- desc->CodeStart());
- desc->SetStackStateStartAddress(CodeDescription::POST_RBP_POP,
- desc->CodeEnd());
- }
-#endif // V8_TARGET_ARCH_X64
-}
-
-
-void GDBJITInterface::AddCode(const char* name,
- Code* code,
- GDBJITInterface::CodeTag tag,
- Script* script) {
- if (!FLAG_gdbjit) return;
- AssertNoAllocation no_gc;
-
- HashMap::Entry* e = GetEntries()->Lookup(code, HashForCodeObject(code), true);
- if (e->value != NULL && !IsLineInfoTagged(e->value)) return;
-
- GDBJITLineInfo* lineinfo = UntagLineInfo(e->value);
- CodeDescription code_desc(name,
- code,
- script != NULL ? Handle<Script>(script)
- : Handle<Script>(),
- lineinfo,
- tag);
-
- if (!FLAG_gdbjit_full && !code_desc.IsLineInfoAvailable()) {
- delete lineinfo;
- GetEntries()->Remove(code, HashForCodeObject(code));
- return;
- }
-
- AddUnwindInfo(&code_desc);
- JITCodeEntry* entry = CreateELFObject(&code_desc);
- ASSERT(!IsLineInfoTagged(entry));
-
- delete lineinfo;
- e->value = entry;
-
- RegisterCodeEntry(entry);
-}
-
-
-void GDBJITInterface::AddCode(GDBJITInterface::CodeTag tag,
- const char* name,
- Code* code) {
- if (!FLAG_gdbjit) return;
-
- EmbeddedVector<char, 256> buffer;
- StringBuilder builder(buffer.start(), buffer.length());
-
- builder.AddString(Tag2String(tag));
- if ((name != NULL) && (*name != '\0')) {
- builder.AddString(": ");
- builder.AddString(name);
- } else {
- builder.AddFormatted(": code object %p", static_cast<void*>(code));
- }
-
- AddCode(builder.Finalize(), code, tag);
-}
-
-
-void GDBJITInterface::AddCode(GDBJITInterface::CodeTag tag,
- String* name,
- Code* code) {
- if (!FLAG_gdbjit) return;
- AddCode(tag, name != NULL ? *name->ToCString(DISALLOW_NULLS) : NULL, code);
-}
-
-
-void GDBJITInterface::AddCode(GDBJITInterface::CodeTag tag, Code* code) {
- if (!FLAG_gdbjit) return;
-
- AddCode(tag, "", code);
-}
-
-
-void GDBJITInterface::RemoveCode(Code* code) {
- if (!FLAG_gdbjit) return;
-
- HashMap::Entry* e = GetEntries()->Lookup(code,
- HashForCodeObject(code),
- false);
- if (e == NULL) return;
-
- if (IsLineInfoTagged(e->value)) {
- delete UntagLineInfo(e->value);
- } else {
- JITCodeEntry* entry = static_cast<JITCodeEntry*>(e->value);
- UnregisterCodeEntry(entry);
- DestroyCodeEntry(entry);
- }
- e->value = NULL;
- GetEntries()->Remove(code, HashForCodeObject(code));
-}
-
-
-void GDBJITInterface::RegisterDetailedLineInfo(Code* code,
- GDBJITLineInfo* line_info) {
- ASSERT(!IsLineInfoTagged(line_info));
- HashMap::Entry* e = GetEntries()->Lookup(code, HashForCodeObject(code), true);
- ASSERT(e->value == NULL);
- e->value = TagLineInfo(line_info);
-}
-
-
-} } // namespace v8::internal
-#endif
diff --git a/src/3rdparty/v8/src/gdb-jit.h b/src/3rdparty/v8/src/gdb-jit.h
deleted file mode 100644
index d46fec6..0000000
--- a/src/3rdparty/v8/src/gdb-jit.h
+++ /dev/null
@@ -1,138 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_GDB_JIT_H_
-#define V8_GDB_JIT_H_
-
-//
-// Basic implementation of GDB JIT Interface client.
-// GBD JIT Interface is supported in GDB 7.0 and above.
-// Currently on x64 and ia32 architectures and Linux OS are supported.
-//
-
-#ifdef ENABLE_GDB_JIT_INTERFACE
-#include "v8.h"
-#include "factory.h"
-
-namespace v8 {
-namespace internal {
-
-#define CODE_TAGS_LIST(V) \
- V(LOAD_IC) \
- V(KEYED_LOAD_IC) \
- V(STORE_IC) \
- V(KEYED_STORE_IC) \
- V(CALL_IC) \
- V(CALL_INITIALIZE) \
- V(CALL_PRE_MONOMORPHIC) \
- V(CALL_NORMAL) \
- V(CALL_MEGAMORPHIC) \
- V(CALL_MISS) \
- V(STUB) \
- V(BUILTIN) \
- V(SCRIPT) \
- V(EVAL) \
- V(FUNCTION)
-
-class GDBJITLineInfo : public Malloced {
- public:
- GDBJITLineInfo()
- : pc_info_(10) { }
-
- void SetPosition(intptr_t pc, int pos, bool is_statement) {
- AddPCInfo(PCInfo(pc, pos, is_statement));
- }
-
- struct PCInfo {
- PCInfo(intptr_t pc, int pos, bool is_statement)
- : pc_(pc), pos_(pos), is_statement_(is_statement) { }
-
- intptr_t pc_;
- int pos_;
- bool is_statement_;
- };
-
- List<PCInfo>* pc_info() {
- return &pc_info_;
- }
-
- private:
- void AddPCInfo(const PCInfo& pc_info) {
- pc_info_.Add(pc_info);
- }
-
- List<PCInfo> pc_info_;
-};
-
-
-class GDBJITInterface: public AllStatic {
- public:
- enum CodeTag {
-#define V(x) x,
- CODE_TAGS_LIST(V)
-#undef V
- TAG_COUNT
- };
-
- static const char* Tag2String(CodeTag tag) {
- switch (tag) {
-#define V(x) case x: return #x;
- CODE_TAGS_LIST(V)
-#undef V
- default:
- return NULL;
- }
- }
-
- static void AddCode(const char* name,
- Code* code,
- CodeTag tag,
- Script* script = NULL);
-
- static void AddCode(Handle<String> name,
- Handle<Script> script,
- Handle<Code> code);
-
- static void AddCode(CodeTag tag, String* name, Code* code);
-
- static void AddCode(CodeTag tag, const char* name, Code* code);
-
- static void AddCode(CodeTag tag, Code* code);
-
- static void RemoveCode(Code* code);
-
- static void RegisterDetailedLineInfo(Code* code, GDBJITLineInfo* line_info);
-};
-
-#define GDBJIT(action) GDBJITInterface::action
-
-} } // namespace v8::internal
-#else
-#define GDBJIT(action) ((void) 0)
-#endif
-
-#endif
diff --git a/src/3rdparty/v8/src/global-handles.cc b/src/3rdparty/v8/src/global-handles.cc
deleted file mode 100644
index 4d13859..0000000
--- a/src/3rdparty/v8/src/global-handles.cc
+++ /dev/null
@@ -1,596 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "api.h"
-#include "global-handles.h"
-
-#include "vm-state-inl.h"
-
-namespace v8 {
-namespace internal {
-
-
-ObjectGroup::~ObjectGroup() {
- if (info_ != NULL) info_->Dispose();
-}
-
-
-class GlobalHandles::Node : public Malloced {
- public:
-
- void Initialize(Object* object) {
- // Set the initial value of the handle.
- object_ = object;
- class_id_ = v8::HeapProfiler::kPersistentHandleNoClassId;
- state_ = NORMAL;
- parameter_or_next_free_.parameter = NULL;
- callback_ = NULL;
- }
-
- Node() {
- state_ = DESTROYED;
- }
-
- explicit Node(Object* object) {
- Initialize(object);
- // Initialize link structure.
- next_ = NULL;
- }
-
- ~Node() {
- if (state_ != DESTROYED) Destroy(Isolate::Current()->global_handles());
-#ifdef DEBUG
- // Zap the values for eager trapping.
- object_ = NULL;
- next_ = NULL;
- parameter_or_next_free_.next_free = NULL;
-#endif
- }
-
- void Destroy(GlobalHandles* global_handles) {
- if (state_ == WEAK || IsNearDeath()) {
- global_handles->number_of_weak_handles_--;
- if (object_->IsJSGlobalObject()) {
- global_handles->number_of_global_object_weak_handles_--;
- }
- }
- state_ = DESTROYED;
- }
-
- // Accessors for next_.
- Node* next() { return next_; }
- void set_next(Node* value) { next_ = value; }
- Node** next_addr() { return &next_; }
-
- // Accessors for next free node in the free list.
- Node* next_free() {
- ASSERT(state_ == DESTROYED);
- return parameter_or_next_free_.next_free;
- }
- void set_next_free(Node* value) {
- ASSERT(state_ == DESTROYED);
- parameter_or_next_free_.next_free = value;
- }
-
- // Returns a link from the handle.
- static Node* FromLocation(Object** location) {
- ASSERT(OFFSET_OF(Node, object_) == 0);
- return reinterpret_cast<Node*>(location);
- }
-
- // Returns the handle.
- Handle<Object> handle() { return Handle<Object>(&object_); }
-
- // Make this handle weak.
- void MakeWeak(GlobalHandles* global_handles, void* parameter,
- WeakReferenceCallback callback) {
- LOG(global_handles->isolate(),
- HandleEvent("GlobalHandle::MakeWeak", handle().location()));
- ASSERT(state_ != DESTROYED);
- if (state_ != WEAK && !IsNearDeath()) {
- global_handles->number_of_weak_handles_++;
- if (object_->IsJSGlobalObject()) {
- global_handles->number_of_global_object_weak_handles_++;
- }
- }
- state_ = WEAK;
- set_parameter(parameter);
- callback_ = callback;
- }
-
- void ClearWeakness(GlobalHandles* global_handles) {
- LOG(global_handles->isolate(),
- HandleEvent("GlobalHandle::ClearWeakness", handle().location()));
- ASSERT(state_ != DESTROYED);
- if (state_ == WEAK || IsNearDeath()) {
- global_handles->number_of_weak_handles_--;
- if (object_->IsJSGlobalObject()) {
- global_handles->number_of_global_object_weak_handles_--;
- }
- }
- state_ = NORMAL;
- set_parameter(NULL);
- }
-
- bool IsNearDeath() {
- // Check for PENDING to ensure correct answer when processing callbacks.
- return state_ == PENDING || state_ == NEAR_DEATH;
- }
-
- bool IsWeak() {
- return state_ == WEAK;
- }
-
- bool CanBeRetainer() {
- return state_ != DESTROYED && state_ != NEAR_DEATH;
- }
-
- void SetWrapperClassId(uint16_t class_id) {
- class_id_ = class_id;
- }
-
- // Returns the id for this weak handle.
- void set_parameter(void* parameter) {
- ASSERT(state_ != DESTROYED);
- parameter_or_next_free_.parameter = parameter;
- }
- void* parameter() {
- ASSERT(state_ != DESTROYED);
- return parameter_or_next_free_.parameter;
- }
-
- // Returns the callback for this weak handle.
- WeakReferenceCallback callback() { return callback_; }
-
- bool PostGarbageCollectionProcessing(Isolate* isolate,
- GlobalHandles* global_handles) {
- if (state_ != Node::PENDING) return false;
- LOG(isolate, HandleEvent("GlobalHandle::Processing", handle().location()));
- WeakReferenceCallback func = callback();
- if (func == NULL) {
- Destroy(global_handles);
- return false;
- }
- void* par = parameter();
- state_ = NEAR_DEATH;
- set_parameter(NULL);
-
- v8::Persistent<v8::Object> object = ToApi<v8::Object>(handle());
- {
- // Forbid reuse of destroyed nodes as they might be already deallocated.
- // It's fine though to reuse nodes that were destroyed in weak callback
- // as those cannot be deallocated until we are back from the callback.
- global_handles->set_first_free(NULL);
- if (global_handles->first_deallocated()) {
- global_handles->first_deallocated()->set_next(global_handles->head());
- }
- // Check that we are not passing a finalized external string to
- // the callback.
- ASSERT(!object_->IsExternalAsciiString() ||
- ExternalAsciiString::cast(object_)->resource() != NULL);
- ASSERT(!object_->IsExternalTwoByteString() ||
- ExternalTwoByteString::cast(object_)->resource() != NULL);
- // Leaving V8.
- VMState state(isolate, EXTERNAL);
- func(object, par);
- }
- // Absense of explicit cleanup or revival of weak handle
- // in most of the cases would lead to memory leak.
- ASSERT(state_ != NEAR_DEATH);
- return true;
- }
-
- // Place the handle address first to avoid offset computation.
- Object* object_; // Storage for object pointer.
-
- uint16_t class_id_;
-
- // Transition diagram:
- // NORMAL <-> WEAK -> PENDING -> NEAR_DEATH -> { NORMAL, WEAK, DESTROYED }
- enum State {
- NORMAL, // Normal global handle.
- WEAK, // Flagged as weak but not yet finalized.
- PENDING, // Has been recognized as only reachable by weak handles.
- NEAR_DEATH, // Callback has informed the handle is near death.
- DESTROYED
- };
- State state_ : 4; // Need one more bit for MSVC as it treats enums as signed.
-
- private:
- // Handle specific callback.
- WeakReferenceCallback callback_;
- // Provided data for callback. In DESTROYED state, this is used for
- // the free list link.
- union {
- void* parameter;
- Node* next_free;
- } parameter_or_next_free_;
-
- // Linkage for the list.
- Node* next_;
-
- public:
- TRACK_MEMORY("GlobalHandles::Node")
-};
-
-
-class GlobalHandles::Pool {
- public:
- Pool() {
- current_ = new Chunk();
- current_->previous = NULL;
- next_ = current_->nodes;
- limit_ = current_->nodes + kNodesPerChunk;
- }
-
- ~Pool() {
- if (current_ != NULL) {
- Release();
- }
- }
-
- Node* Allocate() {
- if (next_ < limit_) {
- return next_++;
- }
- return SlowAllocate();
- }
-
- void Release() {
- Chunk* current = current_;
- ASSERT(current != NULL); // At least a single block must by allocated
- do {
- Chunk* previous = current->previous;
- delete current;
- current = previous;
- } while (current != NULL);
- current_ = NULL;
- next_ = limit_ = NULL;
- }
-
- private:
- static const int kNodesPerChunk = (1 << 12) - 1;
- struct Chunk : public Malloced {
- Chunk* previous;
- Node nodes[kNodesPerChunk];
- };
-
- Node* SlowAllocate() {
- Chunk* chunk = new Chunk();
- chunk->previous = current_;
- current_ = chunk;
-
- Node* new_nodes = current_->nodes;
- next_ = new_nodes + 1;
- limit_ = new_nodes + kNodesPerChunk;
- return new_nodes;
- }
-
- Chunk* current_;
- Node* next_;
- Node* limit_;
-};
-
-
-GlobalHandles::GlobalHandles(Isolate* isolate)
- : isolate_(isolate),
- number_of_weak_handles_(0),
- number_of_global_object_weak_handles_(0),
- head_(NULL),
- first_free_(NULL),
- first_deallocated_(NULL),
- pool_(new Pool()),
- post_gc_processing_count_(0),
- object_groups_(4) {
-}
-
-
-GlobalHandles::~GlobalHandles() {
- delete pool_;
- pool_ = 0;
-}
-
-
-Handle<Object> GlobalHandles::Create(Object* value) {
- isolate_->counters()->global_handles()->Increment();
- Node* result;
- if (first_free()) {
- // Take the first node in the free list.
- result = first_free();
- set_first_free(result->next_free());
- } else if (first_deallocated()) {
- // Next try deallocated list
- result = first_deallocated();
- set_first_deallocated(result->next_free());
- ASSERT(result->next() == head());
- set_head(result);
- } else {
- // Allocate a new node.
- result = pool_->Allocate();
- result->set_next(head());
- set_head(result);
- }
- result->Initialize(value);
- return result->handle();
-}
-
-
-void GlobalHandles::Destroy(Object** location) {
- isolate_->counters()->global_handles()->Decrement();
- if (location == NULL) return;
- Node* node = Node::FromLocation(location);
- node->Destroy(this);
- // Link the destroyed.
- node->set_next_free(first_free());
- set_first_free(node);
-}
-
-
-void GlobalHandles::MakeWeak(Object** location, void* parameter,
- WeakReferenceCallback callback) {
- ASSERT(callback != NULL);
- Node::FromLocation(location)->MakeWeak(this, parameter, callback);
-}
-
-
-void GlobalHandles::ClearWeakness(Object** location) {
- Node::FromLocation(location)->ClearWeakness(this);
-}
-
-
-bool GlobalHandles::IsNearDeath(Object** location) {
- return Node::FromLocation(location)->IsNearDeath();
-}
-
-
-bool GlobalHandles::IsWeak(Object** location) {
- return Node::FromLocation(location)->IsWeak();
-}
-
-
-void GlobalHandles::SetWrapperClassId(Object** location, uint16_t class_id) {
- Node::FromLocation(location)->SetWrapperClassId(class_id);
-}
-
-
-void GlobalHandles::IterateWeakRoots(ObjectVisitor* v) {
- // Traversal of GC roots in the global handle list that are marked as
- // WEAK or PENDING.
- for (Node* current = head_; current != NULL; current = current->next()) {
- if (current->state_ == Node::WEAK
- || current->state_ == Node::PENDING
- || current->state_ == Node::NEAR_DEATH) {
- v->VisitPointer(&current->object_);
- }
- }
-}
-
-
-void GlobalHandles::IterateWeakRoots(WeakReferenceGuest f,
- WeakReferenceCallback callback) {
- for (Node* current = head_; current != NULL; current = current->next()) {
- if (current->IsWeak() && current->callback() == callback) {
- f(current->object_, current->parameter());
- }
- }
-}
-
-
-void GlobalHandles::IdentifyWeakHandles(WeakSlotCallback f) {
- for (Node* current = head_; current != NULL; current = current->next()) {
- if (current->state_ == Node::WEAK) {
- if (f(&current->object_)) {
- current->state_ = Node::PENDING;
- LOG(isolate_,
- HandleEvent("GlobalHandle::Pending", current->handle().location()));
- }
- }
- }
-}
-
-
-bool GlobalHandles::PostGarbageCollectionProcessing() {
- // Process weak global handle callbacks. This must be done after the
- // GC is completely done, because the callbacks may invoke arbitrary
- // API functions.
- // At the same time deallocate all DESTROYED nodes.
- ASSERT(isolate_->heap()->gc_state() == Heap::NOT_IN_GC);
- const int initial_post_gc_processing_count = ++post_gc_processing_count_;
- bool next_gc_likely_to_collect_more = false;
- Node** p = &head_;
- while (*p != NULL) {
- if ((*p)->PostGarbageCollectionProcessing(isolate_, this)) {
- if (initial_post_gc_processing_count != post_gc_processing_count_) {
- // Weak callback triggered another GC and another round of
- // PostGarbageCollection processing. The current node might
- // have been deleted in that round, so we need to bail out (or
- // restart the processing).
- break;
- }
- }
- if ((*p)->state_ == Node::DESTROYED) {
- // Delete the link.
- Node* node = *p;
- *p = node->next(); // Update the link.
- if (first_deallocated()) {
- first_deallocated()->set_next(node);
- }
- node->set_next_free(first_deallocated());
- set_first_deallocated(node);
- next_gc_likely_to_collect_more = true;
- } else {
- p = (*p)->next_addr();
- }
- }
- set_first_free(NULL);
- if (first_deallocated()) {
- first_deallocated()->set_next(head());
- }
-
- return next_gc_likely_to_collect_more;
-}
-
-
-void GlobalHandles::IterateStrongRoots(ObjectVisitor* v) {
- // Traversal of global handles marked as NORMAL.
- for (Node* current = head_; current != NULL; current = current->next()) {
- if (current->state_ == Node::NORMAL) {
- v->VisitPointer(&current->object_);
- }
- }
-}
-
-
-void GlobalHandles::IterateAllRoots(ObjectVisitor* v) {
- for (Node* current = head_; current != NULL; current = current->next()) {
- if (current->state_ != Node::DESTROYED) {
- v->VisitPointer(&current->object_);
- }
- }
-}
-
-
-void GlobalHandles::IterateAllRootsWithClassIds(ObjectVisitor* v) {
- for (Node* current = head_; current != NULL; current = current->next()) {
- if (current->class_id_ != v8::HeapProfiler::kPersistentHandleNoClassId &&
- current->CanBeRetainer()) {
- v->VisitEmbedderReference(&current->object_, current->class_id_);
- }
- }
-}
-
-
-void GlobalHandles::TearDown() {
- // Reset all the lists.
- set_head(NULL);
- set_first_free(NULL);
- set_first_deallocated(NULL);
- pool_->Release();
-}
-
-
-void GlobalHandles::RecordStats(HeapStats* stats) {
- *stats->global_handle_count = 0;
- *stats->weak_global_handle_count = 0;
- *stats->pending_global_handle_count = 0;
- *stats->near_death_global_handle_count = 0;
- *stats->destroyed_global_handle_count = 0;
- for (Node* current = head_; current != NULL; current = current->next()) {
- *stats->global_handle_count += 1;
- if (current->state_ == Node::WEAK) {
- *stats->weak_global_handle_count += 1;
- } else if (current->state_ == Node::PENDING) {
- *stats->pending_global_handle_count += 1;
- } else if (current->state_ == Node::NEAR_DEATH) {
- *stats->near_death_global_handle_count += 1;
- } else if (current->state_ == Node::DESTROYED) {
- *stats->destroyed_global_handle_count += 1;
- }
- }
-}
-
-#ifdef DEBUG
-
-void GlobalHandles::PrintStats() {
- int total = 0;
- int weak = 0;
- int pending = 0;
- int near_death = 0;
- int destroyed = 0;
-
- for (Node* current = head_; current != NULL; current = current->next()) {
- total++;
- if (current->state_ == Node::WEAK) weak++;
- if (current->state_ == Node::PENDING) pending++;
- if (current->state_ == Node::NEAR_DEATH) near_death++;
- if (current->state_ == Node::DESTROYED) destroyed++;
- }
-
- PrintF("Global Handle Statistics:\n");
- PrintF(" allocated memory = %" V8_PTR_PREFIX "dB\n", sizeof(Node) * total);
- PrintF(" # weak = %d\n", weak);
- PrintF(" # pending = %d\n", pending);
- PrintF(" # near_death = %d\n", near_death);
- PrintF(" # destroyed = %d\n", destroyed);
- PrintF(" # total = %d\n", total);
-}
-
-void GlobalHandles::Print() {
- PrintF("Global handles:\n");
- for (Node* current = head_; current != NULL; current = current->next()) {
- PrintF(" handle %p to %p (weak=%d)\n",
- reinterpret_cast<void*>(current->handle().location()),
- reinterpret_cast<void*>(*current->handle()),
- current->state_ == Node::WEAK);
- }
-}
-
-#endif
-
-
-
-void GlobalHandles::AddObjectGroup(Object*** handles,
- size_t length,
- v8::RetainedObjectInfo* info) {
- ObjectGroup* new_entry = new ObjectGroup(length, info);
- for (size_t i = 0; i < length; ++i) {
- new_entry->objects_.Add(handles[i]);
- }
- object_groups_.Add(new_entry);
-}
-
-
-void GlobalHandles::AddImplicitReferences(HeapObject* parent,
- Object*** children,
- size_t length) {
- ImplicitRefGroup* new_entry = new ImplicitRefGroup(parent, length);
- for (size_t i = 0; i < length; ++i) {
- new_entry->children_.Add(children[i]);
- }
- implicit_ref_groups_.Add(new_entry);
-}
-
-
-void GlobalHandles::RemoveObjectGroups() {
- for (int i = 0; i < object_groups_.length(); i++) {
- delete object_groups_.at(i);
- }
- object_groups_.Clear();
-}
-
-
-void GlobalHandles::RemoveImplicitRefGroups() {
- for (int i = 0; i < implicit_ref_groups_.length(); i++) {
- delete implicit_ref_groups_.at(i);
- }
- implicit_ref_groups_.Clear();
-}
-
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/global-handles.h b/src/3rdparty/v8/src/global-handles.h
deleted file mode 100644
index a6afb2d..0000000
--- a/src/3rdparty/v8/src/global-handles.h
+++ /dev/null
@@ -1,239 +0,0 @@
-// Copyright 2007-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_GLOBAL_HANDLES_H_
-#define V8_GLOBAL_HANDLES_H_
-
-#include "list-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// Structure for tracking global handles.
-// A single list keeps all the allocated global handles.
-// Destroyed handles stay in the list but is added to the free list.
-// At GC the destroyed global handles are removed from the free list
-// and deallocated.
-
-// An object group is treated like a single JS object: if one of object in
-// the group is alive, all objects in the same group are considered alive.
-// An object group is used to simulate object relationship in a DOM tree.
-class ObjectGroup : public Malloced {
- public:
- ObjectGroup() : objects_(4) {}
- ObjectGroup(size_t capacity, v8::RetainedObjectInfo* info)
- : objects_(static_cast<int>(capacity)),
- info_(info) { }
- ~ObjectGroup();
-
- List<Object**> objects_;
- v8::RetainedObjectInfo* info_;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(ObjectGroup);
-};
-
-
-// An implicit references group consists of two parts: a parent object and
-// a list of children objects. If the parent is alive, all the children
-// are alive too.
-class ImplicitRefGroup : public Malloced {
- public:
- ImplicitRefGroup() : children_(4) {}
- ImplicitRefGroup(HeapObject* parent, size_t capacity)
- : parent_(parent),
- children_(static_cast<int>(capacity)) { }
-
- HeapObject* parent_;
- List<Object**> children_;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(ImplicitRefGroup);
-};
-
-
-typedef void (*WeakReferenceGuest)(Object* object, void* parameter);
-
-class GlobalHandles {
- public:
- ~GlobalHandles();
-
- // Creates a new global handle that is alive until Destroy is called.
- Handle<Object> Create(Object* value);
-
- // Destroy a global handle.
- void Destroy(Object** location);
-
- // Make the global handle weak and set the callback parameter for the
- // handle. When the garbage collector recognizes that only weak global
- // handles point to an object the handles are cleared and the callback
- // function is invoked (for each handle) with the handle and corresponding
- // parameter as arguments. Note: cleared means set to Smi::FromInt(0). The
- // reason is that Smi::FromInt(0) does not change during garage collection.
- void MakeWeak(Object** location,
- void* parameter,
- WeakReferenceCallback callback);
-
- static void SetWrapperClassId(Object** location, uint16_t class_id);
-
- // Returns the current number of weak handles.
- int NumberOfWeakHandles() { return number_of_weak_handles_; }
-
- void RecordStats(HeapStats* stats);
-
- // Returns the current number of weak handles to global objects.
- // These handles are also included in NumberOfWeakHandles().
- int NumberOfGlobalObjectWeakHandles() {
- return number_of_global_object_weak_handles_;
- }
-
- // Clear the weakness of a global handle.
- void ClearWeakness(Object** location);
-
- // Tells whether global handle is near death.
- static bool IsNearDeath(Object** location);
-
- // Tells whether global handle is weak.
- static bool IsWeak(Object** location);
-
- // Process pending weak handles.
- // Returns true if next major GC is likely to collect more garbage.
- bool PostGarbageCollectionProcessing();
-
- // Iterates over all strong handles.
- void IterateStrongRoots(ObjectVisitor* v);
-
- // Iterates over all handles.
- void IterateAllRoots(ObjectVisitor* v);
-
- // Iterates over all handles that have embedder-assigned class ID.
- void IterateAllRootsWithClassIds(ObjectVisitor* v);
-
- // Iterates over all weak roots in heap.
- void IterateWeakRoots(ObjectVisitor* v);
-
- // Iterates over weak roots that are bound to a given callback.
- void IterateWeakRoots(WeakReferenceGuest f,
- WeakReferenceCallback callback);
-
- // Find all weak handles satisfying the callback predicate, mark
- // them as pending.
- void IdentifyWeakHandles(WeakSlotCallback f);
-
- // Add an object group.
- // Should be only used in GC callback function before a collection.
- // All groups are destroyed after a mark-compact collection.
- void AddObjectGroup(Object*** handles,
- size_t length,
- v8::RetainedObjectInfo* info);
-
- // Add an implicit references' group.
- // Should be only used in GC callback function before a collection.
- // All groups are destroyed after a mark-compact collection.
- void AddImplicitReferences(HeapObject* parent,
- Object*** children,
- size_t length);
-
- // Returns the object groups.
- List<ObjectGroup*>* object_groups() { return &object_groups_; }
-
- // Returns the implicit references' groups.
- List<ImplicitRefGroup*>* implicit_ref_groups() {
- return &implicit_ref_groups_;
- }
-
- // Remove bags, this should only happen after GC.
- void RemoveObjectGroups();
- void RemoveImplicitRefGroups();
-
- // Tear down the global handle structure.
- void TearDown();
-
- Isolate* isolate() { return isolate_; }
-
-#ifdef DEBUG
- void PrintStats();
- void Print();
-#endif
- class Pool;
- private:
- explicit GlobalHandles(Isolate* isolate);
-
- // Internal node structure, one for each global handle.
- class Node;
-
- Isolate* isolate_;
-
- // Field always containing the number of weak and near-death handles.
- int number_of_weak_handles_;
-
- // Field always containing the number of weak and near-death handles
- // to global objects. These objects are also included in
- // number_of_weak_handles_.
- int number_of_global_object_weak_handles_;
-
- // Global handles are kept in a single linked list pointed to by head_.
- Node* head_;
- Node* head() { return head_; }
- void set_head(Node* value) { head_ = value; }
-
- // Free list for DESTROYED global handles not yet deallocated.
- Node* first_free_;
- Node* first_free() { return first_free_; }
- void set_first_free(Node* value) { first_free_ = value; }
-
- // List of deallocated nodes.
- // Deallocated nodes form a prefix of all the nodes and
- // |first_deallocated| points to last deallocated node before
- // |head|. Those deallocated nodes are additionally linked
- // by |next_free|:
- // 1st deallocated head
- // | |
- // V V
- // node node ... node node
- // .next -> .next -> .next ->
- // <- .next_free <- .next_free <- .next_free
- Node* first_deallocated_;
- Node* first_deallocated() { return first_deallocated_; }
- void set_first_deallocated(Node* value) {
- first_deallocated_ = value;
- }
-
- Pool* pool_;
- int post_gc_processing_count_;
- List<ObjectGroup*> object_groups_;
- List<ImplicitRefGroup*> implicit_ref_groups_;
-
- friend class Isolate;
-
- DISALLOW_COPY_AND_ASSIGN(GlobalHandles);
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_GLOBAL_HANDLES_H_
diff --git a/src/3rdparty/v8/src/globals.h b/src/3rdparty/v8/src/globals.h
deleted file mode 100644
index 5ab9806..0000000
--- a/src/3rdparty/v8/src/globals.h
+++ /dev/null
@@ -1,325 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_GLOBALS_H_
-#define V8_GLOBALS_H_
-
-#include "../include/v8stdint.h"
-
-namespace v8 {
-namespace internal {
-
-// Processor architecture detection. For more info on what's defined, see:
-// http://msdn.microsoft.com/en-us/library/b0084kay.aspx
-// http://www.agner.org/optimize/calling_conventions.pdf
-// or with gcc, run: "echo | gcc -E -dM -"
-#if defined(_M_X64) || defined(__x86_64__)
-#define V8_HOST_ARCH_X64 1
-#define V8_HOST_ARCH_64_BIT 1
-#define V8_HOST_CAN_READ_UNALIGNED 1
-#elif defined(_M_IX86) || defined(__i386__)
-#define V8_HOST_ARCH_IA32 1
-#define V8_HOST_ARCH_32_BIT 1
-#define V8_HOST_CAN_READ_UNALIGNED 1
-#elif defined(__ARMEL__)
-#define V8_HOST_ARCH_ARM 1
-#define V8_HOST_ARCH_32_BIT 1
-// Some CPU-OS combinations allow unaligned access on ARM. We assume
-// that unaligned accesses are not allowed unless the build system
-// defines the CAN_USE_UNALIGNED_ACCESSES macro to be non-zero.
-#if CAN_USE_UNALIGNED_ACCESSES
-#define V8_HOST_CAN_READ_UNALIGNED 1
-#endif
-#elif defined(__MIPSEL__)
-#define V8_HOST_ARCH_MIPS 1
-#define V8_HOST_ARCH_32_BIT 1
-#else
-#error Host architecture was not detected as supported by v8
-#endif
-
-// Target architecture detection. This may be set externally. If not, detect
-// in the same way as the host architecture, that is, target the native
-// environment as presented by the compiler.
-#if !defined(V8_TARGET_ARCH_X64) && !defined(V8_TARGET_ARCH_IA32) && \
- !defined(V8_TARGET_ARCH_ARM) && !defined(V8_TARGET_ARCH_MIPS)
-#if defined(_M_X64) || defined(__x86_64__)
-#define V8_TARGET_ARCH_X64 1
-#elif defined(_M_IX86) || defined(__i386__)
-#define V8_TARGET_ARCH_IA32 1
-#elif defined(__ARMEL__)
-#define V8_TARGET_ARCH_ARM 1
-#elif defined(__MIPSEL__)
-#define V8_TARGET_ARCH_MIPS 1
-#else
-#error Target architecture was not detected as supported by v8
-#endif
-#endif
-
-// Check for supported combinations of host and target architectures.
-#if defined(V8_TARGET_ARCH_IA32) && !defined(V8_HOST_ARCH_IA32)
-#error Target architecture ia32 is only supported on ia32 host
-#endif
-#if defined(V8_TARGET_ARCH_X64) && !defined(V8_HOST_ARCH_X64)
-#error Target architecture x64 is only supported on x64 host
-#endif
-#if (defined(V8_TARGET_ARCH_ARM) && \
- !(defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_ARM)))
-#error Target architecture arm is only supported on arm and ia32 host
-#endif
-#if (defined(V8_TARGET_ARCH_MIPS) && \
- !(defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_MIPS)))
-#error Target architecture mips is only supported on mips and ia32 host
-#endif
-
-// Determine whether we are running in a simulated environment.
-// Setting USE_SIMULATOR explicitly from the build script will force
-// the use of a simulated environment.
-#if !defined(USE_SIMULATOR)
-#if (defined(V8_TARGET_ARCH_ARM) && !defined(V8_HOST_ARCH_ARM))
-#define USE_SIMULATOR 1
-#endif
-#if (defined(V8_TARGET_ARCH_MIPS) && !defined(V8_HOST_ARCH_MIPS))
-#define USE_SIMULATOR 1
-#endif
-#endif
-
-// Define unaligned read for the target architectures supporting it.
-#if defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_IA32)
-#define V8_TARGET_CAN_READ_UNALIGNED 1
-#elif V8_TARGET_ARCH_ARM
-// Some CPU-OS combinations allow unaligned access on ARM. We assume
-// that unaligned accesses are not allowed unless the build system
-// defines the CAN_USE_UNALIGNED_ACCESSES macro to be non-zero.
-#if CAN_USE_UNALIGNED_ACCESSES
-#define V8_TARGET_CAN_READ_UNALIGNED 1
-#endif
-#elif V8_TARGET_ARCH_MIPS
-#else
-#error Target architecture is not supported by v8
-#endif
-
-// Support for alternative bool type. This is only enabled if the code is
-// compiled with USE_MYBOOL defined. This catches some nasty type bugs.
-// For instance, 'bool b = "false";' results in b == true! This is a hidden
-// source of bugs.
-// However, redefining the bool type does have some negative impact on some
-// platforms. It gives rise to compiler warnings (i.e. with
-// MSVC) in the API header files when mixing code that uses the standard
-// bool with code that uses the redefined version.
-// This does not actually belong in the platform code, but needs to be
-// defined here because the platform code uses bool, and platform.h is
-// include very early in the main include file.
-
-#ifdef USE_MYBOOL
-typedef unsigned int __my_bool__;
-#define bool __my_bool__ // use 'indirection' to avoid name clashes
-#endif
-
-typedef uint8_t byte;
-typedef byte* Address;
-
-// Define our own macros for writing 64-bit constants. This is less fragile
-// than defining __STDC_CONSTANT_MACROS before including <stdint.h>, and it
-// works on compilers that don't have it (like MSVC).
-#if V8_HOST_ARCH_64_BIT
-#ifdef _MSC_VER
-#define V8_UINT64_C(x) (x ## UI64)
-#define V8_INT64_C(x) (x ## I64)
-#define V8_INTPTR_C(x) (x ## I64)
-#define V8_PTR_PREFIX "ll"
-#else // _MSC_VER
-#define V8_UINT64_C(x) (x ## UL)
-#define V8_INT64_C(x) (x ## L)
-#define V8_INTPTR_C(x) (x ## L)
-#define V8_PTR_PREFIX "l"
-#endif // _MSC_VER
-#else // V8_HOST_ARCH_64_BIT
-#define V8_INTPTR_C(x) (x)
-#define V8_PTR_PREFIX ""
-#endif // V8_HOST_ARCH_64_BIT
-
-// The following macro works on both 32 and 64-bit platforms.
-// Usage: instead of writing 0x1234567890123456
-// write V8_2PART_UINT64_C(0x12345678,90123456);
-#define V8_2PART_UINT64_C(a, b) (((static_cast<uint64_t>(a) << 32) + 0x##b##u))
-
-#define V8PRIxPTR V8_PTR_PREFIX "x"
-#define V8PRIdPTR V8_PTR_PREFIX "d"
-
-// Fix for Mac OS X defining uintptr_t as "unsigned long":
-#if defined(__APPLE__) && defined(__MACH__)
-#undef V8PRIxPTR
-#define V8PRIxPTR "lx"
-#endif
-
-#if (defined(__APPLE__) && defined(__MACH__)) || \
- defined(__FreeBSD__) || defined(__OpenBSD__)
-#define USING_BSD_ABI
-#endif
-
-// -----------------------------------------------------------------------------
-// Constants
-
-const int KB = 1024;
-const int MB = KB * KB;
-const int GB = KB * KB * KB;
-const int kMaxInt = 0x7FFFFFFF;
-const int kMinInt = -kMaxInt - 1;
-
-const uint32_t kMaxUInt32 = 0xFFFFFFFFu;
-
-const int kCharSize = sizeof(char); // NOLINT
-const int kShortSize = sizeof(short); // NOLINT
-const int kIntSize = sizeof(int); // NOLINT
-const int kDoubleSize = sizeof(double); // NOLINT
-const int kIntptrSize = sizeof(intptr_t); // NOLINT
-const int kPointerSize = sizeof(void*); // NOLINT
-
-#if V8_HOST_ARCH_64_BIT
-const int kPointerSizeLog2 = 3;
-const intptr_t kIntptrSignBit = V8_INT64_C(0x8000000000000000);
-const uintptr_t kUintptrAllBitsSet = V8_UINT64_C(0xFFFFFFFFFFFFFFFF);
-#else
-const int kPointerSizeLog2 = 2;
-const intptr_t kIntptrSignBit = 0x80000000;
-const uintptr_t kUintptrAllBitsSet = 0xFFFFFFFFu;
-#endif
-
-const int kBitsPerByte = 8;
-const int kBitsPerByteLog2 = 3;
-const int kBitsPerPointer = kPointerSize * kBitsPerByte;
-const int kBitsPerInt = kIntSize * kBitsPerByte;
-
-// IEEE 754 single precision floating point number bit layout.
-const uint32_t kBinary32SignMask = 0x80000000u;
-const uint32_t kBinary32ExponentMask = 0x7f800000u;
-const uint32_t kBinary32MantissaMask = 0x007fffffu;
-const int kBinary32ExponentBias = 127;
-const int kBinary32MaxExponent = 0xFE;
-const int kBinary32MinExponent = 0x01;
-const int kBinary32MantissaBits = 23;
-const int kBinary32ExponentShift = 23;
-
-// ASCII/UC16 constants
-// Code-point values in Unicode 4.0 are 21 bits wide.
-typedef uint16_t uc16;
-typedef int32_t uc32;
-const int kASCIISize = kCharSize;
-const int kUC16Size = sizeof(uc16); // NOLINT
-const uc32 kMaxAsciiCharCode = 0x7f;
-const uint32_t kMaxAsciiCharCodeU = 0x7fu;
-
-
-// The expression OFFSET_OF(type, field) computes the byte-offset
-// of the specified field relative to the containing type. This
-// corresponds to 'offsetof' (in stddef.h), except that it doesn't
-// use 0 or NULL, which causes a problem with the compiler warnings
-// we have enabled (which is also why 'offsetof' doesn't seem to work).
-// Here we simply use the non-zero value 4, which seems to work.
-#define OFFSET_OF(type, field) \
- (reinterpret_cast<intptr_t>(&(reinterpret_cast<type*>(4)->field)) - 4)
-
-
-// The expression ARRAY_SIZE(a) is a compile-time constant of type
-// size_t which represents the number of elements of the given
-// array. You should only use ARRAY_SIZE on statically allocated
-// arrays.
-#define ARRAY_SIZE(a) \
- ((sizeof(a) / sizeof(*(a))) / \
- static_cast<size_t>(!(sizeof(a) % sizeof(*(a)))))
-
-
-// The USE(x) template is used to silence C++ compiler warnings
-// issued for (yet) unused variables (typically parameters).
-template <typename T>
-static inline void USE(T) { }
-
-
-// FUNCTION_ADDR(f) gets the address of a C function f.
-#define FUNCTION_ADDR(f) \
- (reinterpret_cast<v8::internal::Address>(reinterpret_cast<intptr_t>(f)))
-
-
-// FUNCTION_CAST<F>(addr) casts an address into a function
-// of type F. Used to invoke generated code from within C.
-template <typename F>
-F FUNCTION_CAST(Address addr) {
- return reinterpret_cast<F>(reinterpret_cast<intptr_t>(addr));
-}
-
-
-// A macro to disallow the evil copy constructor and operator= functions
-// This should be used in the private: declarations for a class
-#define DISALLOW_COPY_AND_ASSIGN(TypeName) \
- TypeName(const TypeName&); \
- void operator=(const TypeName&)
-
-
-// A macro to disallow all the implicit constructors, namely the
-// default constructor, copy constructor and operator= functions.
-//
-// This should be used in the private: declarations for a class
-// that wants to prevent anyone from instantiating it. This is
-// especially useful for classes containing only static methods.
-#define DISALLOW_IMPLICIT_CONSTRUCTORS(TypeName) \
- TypeName(); \
- DISALLOW_COPY_AND_ASSIGN(TypeName)
-
-
-// Define used for helping GCC to make better inlining. Don't bother for debug
-// builds. On GCC 3.4.5 using __attribute__((always_inline)) causes compilation
-// errors in debug build.
-#if defined(__GNUC__) && !defined(DEBUG)
-#if (__GNUC__ >= 4)
-#define INLINE(header) inline header __attribute__((always_inline))
-#define NO_INLINE(header) header __attribute__((noinline))
-#else
-#define INLINE(header) inline __attribute__((always_inline)) header
-#define NO_INLINE(header) __attribute__((noinline)) header
-#endif
-#else
-#define INLINE(header) inline header
-#define NO_INLINE(header) header
-#endif
-
-
-#if defined(__GNUC__) && __GNUC__ >= 4
-#define MUST_USE_RESULT __attribute__ ((warn_unused_result))
-#else
-#define MUST_USE_RESULT
-#endif
-
-// -----------------------------------------------------------------------------
-// Forward declarations for frequently used classes
-// (sorted alphabetically)
-
-class FreeStoreAllocationPolicy;
-template <typename T, class P = FreeStoreAllocationPolicy> class List;
-
-} } // namespace v8::internal
-
-#endif // V8_GLOBALS_H_
diff --git a/src/3rdparty/v8/src/handles-inl.h b/src/3rdparty/v8/src/handles-inl.h
deleted file mode 100644
index a5c81ce..0000000
--- a/src/3rdparty/v8/src/handles-inl.h
+++ /dev/null
@@ -1,177 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-
-#ifndef V8_HANDLES_INL_H_
-#define V8_HANDLES_INL_H_
-
-#include "api.h"
-#include "apiutils.h"
-#include "handles.h"
-#include "isolate.h"
-
-namespace v8 {
-namespace internal {
-
-inline Isolate* GetIsolateForHandle(Object* obj) {
- return Isolate::Current();
-}
-
-inline Isolate* GetIsolateForHandle(HeapObject* obj) {
- return obj->GetIsolate();
-}
-
-template<typename T>
-Handle<T>::Handle(T* obj) {
- ASSERT(!obj->IsFailure());
- location_ = HandleScope::CreateHandle(obj, GetIsolateForHandle(obj));
-}
-
-
-template<typename T>
-Handle<T>::Handle(T* obj, Isolate* isolate) {
- ASSERT(!obj->IsFailure());
- location_ = HandleScope::CreateHandle(obj, isolate);
-}
-
-
-template <typename T>
-inline T* Handle<T>::operator*() const {
- ASSERT(location_ != NULL);
- ASSERT(reinterpret_cast<Address>(*location_) != kHandleZapValue);
- return *BitCast<T**>(location_);
-}
-
-
-HandleScope::HandleScope() {
- Isolate* isolate = Isolate::Current();
- v8::ImplementationUtilities::HandleScopeData* current =
- isolate->handle_scope_data();
- isolate_ = isolate;
- prev_next_ = current->next;
- prev_limit_ = current->limit;
- current->level++;
-}
-
-
-HandleScope::HandleScope(Isolate* isolate) {
- ASSERT(isolate == Isolate::Current());
- v8::ImplementationUtilities::HandleScopeData* current =
- isolate->handle_scope_data();
- isolate_ = isolate;
- prev_next_ = current->next;
- prev_limit_ = current->limit;
- current->level++;
-}
-
-
-HandleScope::~HandleScope() {
- CloseScope();
-}
-
-void HandleScope::CloseScope() {
- ASSERT(isolate_ == Isolate::Current());
- v8::ImplementationUtilities::HandleScopeData* current =
- isolate_->handle_scope_data();
- current->next = prev_next_;
- current->level--;
- if (current->limit != prev_limit_) {
- current->limit = prev_limit_;
- DeleteExtensions(isolate_);
- }
-#ifdef DEBUG
- ZapRange(prev_next_, prev_limit_);
-#endif
-}
-
-
-template <typename T>
-Handle<T> HandleScope::CloseAndEscape(Handle<T> handle_value) {
- T* value = *handle_value;
- // Throw away all handles in the current scope.
- CloseScope();
- v8::ImplementationUtilities::HandleScopeData* current =
- isolate_->handle_scope_data();
- // Allocate one handle in the parent scope.
- ASSERT(current->level > 0);
- Handle<T> result(CreateHandle<T>(value, isolate_));
- // Reinitialize the current scope (so that it's ready
- // to be used or closed again).
- prev_next_ = current->next;
- prev_limit_ = current->limit;
- current->level++;
- return result;
-}
-
-
-template <typename T>
-T** HandleScope::CreateHandle(T* value, Isolate* isolate) {
- ASSERT(isolate == Isolate::Current());
- v8::ImplementationUtilities::HandleScopeData* current =
- isolate->handle_scope_data();
-
- internal::Object** cur = current->next;
- if (cur == current->limit) cur = Extend();
- // Update the current next field, set the value in the created
- // handle, and return the result.
- ASSERT(cur < current->limit);
- current->next = cur + 1;
-
- T** result = reinterpret_cast<T**>(cur);
- *result = value;
- return result;
-}
-
-
-#ifdef DEBUG
-inline NoHandleAllocation::NoHandleAllocation() {
- v8::ImplementationUtilities::HandleScopeData* current =
- Isolate::Current()->handle_scope_data();
-
- // Shrink the current handle scope to make it impossible to do
- // handle allocations without an explicit handle scope.
- current->limit = current->next;
-
- level_ = current->level;
- current->level = 0;
-}
-
-
-inline NoHandleAllocation::~NoHandleAllocation() {
- // Restore state in current handle scope to re-enable handle
- // allocations.
- v8::ImplementationUtilities::HandleScopeData* data =
- Isolate::Current()->handle_scope_data();
- ASSERT_EQ(0, data->level);
- data->level = level_;
-}
-#endif
-
-
-} } // namespace v8::internal
-
-#endif // V8_HANDLES_INL_H_
diff --git a/src/3rdparty/v8/src/handles.cc b/src/3rdparty/v8/src/handles.cc
deleted file mode 100644
index 97a06d9..0000000
--- a/src/3rdparty/v8/src/handles.cc
+++ /dev/null
@@ -1,965 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "accessors.h"
-#include "api.h"
-#include "arguments.h"
-#include "bootstrapper.h"
-#include "compiler.h"
-#include "debug.h"
-#include "execution.h"
-#include "global-handles.h"
-#include "natives.h"
-#include "runtime.h"
-#include "string-search.h"
-#include "stub-cache.h"
-#include "vm-state-inl.h"
-
-namespace v8 {
-namespace internal {
-
-
-int HandleScope::NumberOfHandles() {
- Isolate* isolate = Isolate::Current();
- HandleScopeImplementer* impl = isolate->handle_scope_implementer();
- int n = impl->blocks()->length();
- if (n == 0) return 0;
- return ((n - 1) * kHandleBlockSize) + static_cast<int>(
- (isolate->handle_scope_data()->next - impl->blocks()->last()));
-}
-
-
-Object** HandleScope::Extend() {
- Isolate* isolate = Isolate::Current();
- v8::ImplementationUtilities::HandleScopeData* current =
- isolate->handle_scope_data();
-
- Object** result = current->next;
-
- ASSERT(result == current->limit);
- // Make sure there's at least one scope on the stack and that the
- // top of the scope stack isn't a barrier.
- if (current->level == 0) {
- Utils::ReportApiFailure("v8::HandleScope::CreateHandle()",
- "Cannot create a handle without a HandleScope");
- return NULL;
- }
- HandleScopeImplementer* impl = isolate->handle_scope_implementer();
- // If there's more room in the last block, we use that. This is used
- // for fast creation of scopes after scope barriers.
- if (!impl->blocks()->is_empty()) {
- Object** limit = &impl->blocks()->last()[kHandleBlockSize];
- if (current->limit != limit) {
- current->limit = limit;
- ASSERT(limit - current->next < kHandleBlockSize);
- }
- }
-
- // If we still haven't found a slot for the handle, we extend the
- // current handle scope by allocating a new handle block.
- if (result == current->limit) {
- // If there's a spare block, use it for growing the current scope.
- result = impl->GetSpareOrNewBlock();
- // Add the extension to the global list of blocks, but count the
- // extension as part of the current scope.
- impl->blocks()->Add(result);
- current->limit = &result[kHandleBlockSize];
- }
-
- return result;
-}
-
-
-void HandleScope::DeleteExtensions(Isolate* isolate) {
- ASSERT(isolate == Isolate::Current());
- v8::ImplementationUtilities::HandleScopeData* current =
- isolate->handle_scope_data();
- isolate->handle_scope_implementer()->DeleteExtensions(current->limit);
-}
-
-
-void HandleScope::ZapRange(Object** start, Object** end) {
- ASSERT(end - start <= kHandleBlockSize);
- for (Object** p = start; p != end; p++) {
- *reinterpret_cast<Address*>(p) = v8::internal::kHandleZapValue;
- }
-}
-
-
-Address HandleScope::current_level_address() {
- return reinterpret_cast<Address>(
- &Isolate::Current()->handle_scope_data()->level);
-}
-
-
-Address HandleScope::current_next_address() {
- return reinterpret_cast<Address>(
- &Isolate::Current()->handle_scope_data()->next);
-}
-
-
-Address HandleScope::current_limit_address() {
- return reinterpret_cast<Address>(
- &Isolate::Current()->handle_scope_data()->limit);
-}
-
-
-Handle<FixedArray> AddKeysFromJSArray(Handle<FixedArray> content,
- Handle<JSArray> array) {
- CALL_HEAP_FUNCTION(content->GetIsolate(),
- content->AddKeysFromJSArray(*array), FixedArray);
-}
-
-
-Handle<FixedArray> UnionOfKeys(Handle<FixedArray> first,
- Handle<FixedArray> second) {
- CALL_HEAP_FUNCTION(first->GetIsolate(),
- first->UnionOfKeys(*second), FixedArray);
-}
-
-
-Handle<JSGlobalProxy> ReinitializeJSGlobalProxy(
- Handle<JSFunction> constructor,
- Handle<JSGlobalProxy> global) {
- CALL_HEAP_FUNCTION(
- constructor->GetIsolate(),
- constructor->GetHeap()->ReinitializeJSGlobalProxy(*constructor, *global),
- JSGlobalProxy);
-}
-
-
-void SetExpectedNofProperties(Handle<JSFunction> func, int nof) {
- // If objects constructed from this function exist then changing
- // 'estimated_nof_properties' is dangerous since the previous value might
- // have been compiled into the fast construct stub. More over, the inobject
- // slack tracking logic might have adjusted the previous value, so even
- // passing the same value is risky.
- if (func->shared()->live_objects_may_exist()) return;
-
- func->shared()->set_expected_nof_properties(nof);
- if (func->has_initial_map()) {
- Handle<Map> new_initial_map =
- func->GetIsolate()->factory()->CopyMapDropTransitions(
- Handle<Map>(func->initial_map()));
- new_initial_map->set_unused_property_fields(nof);
- func->set_initial_map(*new_initial_map);
- }
-}
-
-
-void SetPrototypeProperty(Handle<JSFunction> func, Handle<JSObject> value) {
- CALL_HEAP_FUNCTION_VOID(func->GetIsolate(),
- func->SetPrototype(*value));
-}
-
-
-static int ExpectedNofPropertiesFromEstimate(int estimate) {
- // If no properties are added in the constructor, they are more likely
- // to be added later.
- if (estimate == 0) estimate = 2;
-
- // We do not shrink objects that go into a snapshot (yet), so we adjust
- // the estimate conservatively.
- if (Serializer::enabled()) return estimate + 2;
-
- // Inobject slack tracking will reclaim redundant inobject space later,
- // so we can afford to adjust the estimate generously.
- return estimate + 8;
-}
-
-
-void SetExpectedNofPropertiesFromEstimate(Handle<SharedFunctionInfo> shared,
- int estimate) {
- // See the comment in SetExpectedNofProperties.
- if (shared->live_objects_may_exist()) return;
-
- shared->set_expected_nof_properties(
- ExpectedNofPropertiesFromEstimate(estimate));
-}
-
-
-void NormalizeProperties(Handle<JSObject> object,
- PropertyNormalizationMode mode,
- int expected_additional_properties) {
- CALL_HEAP_FUNCTION_VOID(object->GetIsolate(),
- object->NormalizeProperties(
- mode,
- expected_additional_properties));
-}
-
-
-void NormalizeElements(Handle<JSObject> object) {
- CALL_HEAP_FUNCTION_VOID(object->GetIsolate(),
- object->NormalizeElements());
-}
-
-
-void TransformToFastProperties(Handle<JSObject> object,
- int unused_property_fields) {
- CALL_HEAP_FUNCTION_VOID(
- object->GetIsolate(),
- object->TransformToFastProperties(unused_property_fields));
-}
-
-
-void NumberDictionarySet(Handle<NumberDictionary> dictionary,
- uint32_t index,
- Handle<Object> value,
- PropertyDetails details) {
- CALL_HEAP_FUNCTION_VOID(dictionary->GetIsolate(),
- dictionary->Set(index, *value, details));
-}
-
-
-void FlattenString(Handle<String> string) {
- CALL_HEAP_FUNCTION_VOID(string->GetIsolate(), string->TryFlatten());
-}
-
-
-Handle<String> FlattenGetString(Handle<String> string) {
- CALL_HEAP_FUNCTION(string->GetIsolate(), string->TryFlatten(), String);
-}
-
-
-Handle<Object> SetPrototype(Handle<JSFunction> function,
- Handle<Object> prototype) {
- ASSERT(function->should_have_prototype());
- CALL_HEAP_FUNCTION(function->GetIsolate(),
- Accessors::FunctionSetPrototype(*function,
- *prototype,
- NULL),
- Object);
-}
-
-
-Handle<Object> SetProperty(Handle<JSObject> object,
- Handle<String> key,
- Handle<Object> value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode) {
- CALL_HEAP_FUNCTION(object->GetIsolate(),
- object->SetProperty(*key, *value, attributes, strict_mode),
- Object);
-}
-
-
-Handle<Object> SetProperty(Handle<Object> object,
- Handle<Object> key,
- Handle<Object> value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode) {
- Isolate* isolate = Isolate::Current();
- CALL_HEAP_FUNCTION(
- isolate,
- Runtime::SetObjectProperty(
- isolate, object, key, value, attributes, strict_mode),
- Object);
-}
-
-
-Handle<Object> ForceSetProperty(Handle<JSObject> object,
- Handle<Object> key,
- Handle<Object> value,
- PropertyAttributes attributes) {
- Isolate* isolate = object->GetIsolate();
- CALL_HEAP_FUNCTION(
- isolate,
- Runtime::ForceSetObjectProperty(
- isolate, object, key, value, attributes),
- Object);
-}
-
-
-Handle<Object> SetNormalizedProperty(Handle<JSObject> object,
- Handle<String> key,
- Handle<Object> value,
- PropertyDetails details) {
- CALL_HEAP_FUNCTION(object->GetIsolate(),
- object->SetNormalizedProperty(*key, *value, details),
- Object);
-}
-
-
-Handle<Object> ForceDeleteProperty(Handle<JSObject> object,
- Handle<Object> key) {
- Isolate* isolate = object->GetIsolate();
- CALL_HEAP_FUNCTION(isolate,
- Runtime::ForceDeleteObjectProperty(isolate, object, key),
- Object);
-}
-
-
-Handle<Object> SetLocalPropertyIgnoreAttributes(
- Handle<JSObject> object,
- Handle<String> key,
- Handle<Object> value,
- PropertyAttributes attributes) {
- CALL_HEAP_FUNCTION(
- object->GetIsolate(),
- object->SetLocalPropertyIgnoreAttributes(*key, *value, attributes),
- Object);
-}
-
-
-void SetLocalPropertyNoThrow(Handle<JSObject> object,
- Handle<String> key,
- Handle<Object> value,
- PropertyAttributes attributes) {
- Isolate* isolate = object->GetIsolate();
- ASSERT(!isolate->has_pending_exception());
- CHECK(!SetLocalPropertyIgnoreAttributes(
- object, key, value, attributes).is_null());
- CHECK(!isolate->has_pending_exception());
-}
-
-
-Handle<Object> SetPropertyWithInterceptor(Handle<JSObject> object,
- Handle<String> key,
- Handle<Object> value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode) {
- CALL_HEAP_FUNCTION(object->GetIsolate(),
- object->SetPropertyWithInterceptor(*key,
- *value,
- attributes,
- strict_mode),
- Object);
-}
-
-
-Handle<Object> GetProperty(Handle<JSObject> obj,
- const char* name) {
- Isolate* isolate = obj->GetIsolate();
- Handle<String> str = isolate->factory()->LookupAsciiSymbol(name);
- CALL_HEAP_FUNCTION(isolate, obj->GetProperty(*str), Object);
-}
-
-
-Handle<Object> GetProperty(Handle<Object> obj,
- Handle<Object> key) {
- Isolate* isolate = Isolate::Current();
- CALL_HEAP_FUNCTION(isolate,
- Runtime::GetObjectProperty(isolate, obj, key), Object);
-}
-
-
-Handle<Object> GetElement(Handle<Object> obj,
- uint32_t index) {
- Isolate* isolate = Isolate::Current();
- CALL_HEAP_FUNCTION(isolate, Runtime::GetElement(obj, index), Object);
-}
-
-
-Handle<Object> GetPropertyWithInterceptor(Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<String> name,
- PropertyAttributes* attributes) {
- Isolate* isolate = receiver->GetIsolate();
- CALL_HEAP_FUNCTION(isolate,
- holder->GetPropertyWithInterceptor(*receiver,
- *name,
- attributes),
- Object);
-}
-
-
-Handle<Object> GetPrototype(Handle<Object> obj) {
- Handle<Object> result(obj->GetPrototype());
- return result;
-}
-
-
-Handle<Object> SetPrototype(Handle<JSObject> obj, Handle<Object> value) {
- const bool skip_hidden_prototypes = false;
- CALL_HEAP_FUNCTION(obj->GetIsolate(),
- obj->SetPrototype(*value, skip_hidden_prototypes), Object);
-}
-
-
-Handle<Object> PreventExtensions(Handle<JSObject> object) {
- CALL_HEAP_FUNCTION(object->GetIsolate(), object->PreventExtensions(), Object);
-}
-
-
-Handle<Object> GetHiddenProperties(Handle<JSObject> obj,
- bool create_if_needed) {
- Isolate* isolate = obj->GetIsolate();
- Object* holder = obj->BypassGlobalProxy();
- if (holder->IsUndefined()) return isolate->factory()->undefined_value();
- obj = Handle<JSObject>(JSObject::cast(holder), isolate);
-
- if (obj->HasFastProperties()) {
- // If the object has fast properties, check whether the first slot
- // in the descriptor array matches the hidden symbol. Since the
- // hidden symbols hash code is zero (and no other string has hash
- // code zero) it will always occupy the first entry if present.
- DescriptorArray* descriptors = obj->map()->instance_descriptors();
- if ((descriptors->number_of_descriptors() > 0) &&
- (descriptors->GetKey(0) == isolate->heap()->hidden_symbol()) &&
- descriptors->IsProperty(0)) {
- ASSERT(descriptors->GetType(0) == FIELD);
- return Handle<Object>(obj->FastPropertyAt(descriptors->GetFieldIndex(0)),
- isolate);
- }
- }
-
- // Only attempt to find the hidden properties in the local object and not
- // in the prototype chain. Note that HasLocalProperty() can cause a GC in
- // the general case in the presence of interceptors.
- if (!obj->HasHiddenPropertiesObject()) {
- // Hidden properties object not found. Allocate a new hidden properties
- // object if requested. Otherwise return the undefined value.
- if (create_if_needed) {
- Handle<Object> hidden_obj =
- isolate->factory()->NewJSObject(isolate->object_function());
- CALL_HEAP_FUNCTION(isolate,
- obj->SetHiddenPropertiesObject(*hidden_obj), Object);
- } else {
- return isolate->factory()->undefined_value();
- }
- }
- return Handle<Object>(obj->GetHiddenPropertiesObject(), isolate);
-}
-
-
-Handle<Object> DeleteElement(Handle<JSObject> obj,
- uint32_t index) {
- CALL_HEAP_FUNCTION(obj->GetIsolate(),
- obj->DeleteElement(index, JSObject::NORMAL_DELETION),
- Object);
-}
-
-
-Handle<Object> DeleteProperty(Handle<JSObject> obj,
- Handle<String> prop) {
- CALL_HEAP_FUNCTION(obj->GetIsolate(),
- obj->DeleteProperty(*prop, JSObject::NORMAL_DELETION),
- Object);
-}
-
-
-Handle<Object> LookupSingleCharacterStringFromCode(uint32_t index) {
- Isolate* isolate = Isolate::Current();
- CALL_HEAP_FUNCTION(
- isolate,
- isolate->heap()->LookupSingleCharacterStringFromCode(index), Object);
-}
-
-
-Handle<String> SubString(Handle<String> str,
- int start,
- int end,
- PretenureFlag pretenure) {
- CALL_HEAP_FUNCTION(str->GetIsolate(),
- str->SubString(start, end, pretenure), String);
-}
-
-
-Handle<Object> SetElement(Handle<JSObject> object,
- uint32_t index,
- Handle<Object> value,
- StrictModeFlag strict_mode) {
- if (object->HasExternalArrayElements()) {
- if (!value->IsSmi() && !value->IsHeapNumber() && !value->IsUndefined()) {
- bool has_exception;
- Handle<Object> number = Execution::ToNumber(value, &has_exception);
- if (has_exception) return Handle<Object>();
- value = number;
- }
- }
- CALL_HEAP_FUNCTION(object->GetIsolate(),
- object->SetElement(index, *value, strict_mode), Object);
-}
-
-
-Handle<Object> SetOwnElement(Handle<JSObject> object,
- uint32_t index,
- Handle<Object> value,
- StrictModeFlag strict_mode) {
- ASSERT(!object->HasExternalArrayElements());
- CALL_HEAP_FUNCTION(object->GetIsolate(),
- object->SetElement(index, *value, strict_mode, false),
- Object);
-}
-
-
-Handle<JSObject> Copy(Handle<JSObject> obj) {
- Isolate* isolate = obj->GetIsolate();
- CALL_HEAP_FUNCTION(isolate,
- isolate->heap()->CopyJSObject(*obj), JSObject);
-}
-
-
-Handle<Object> SetAccessor(Handle<JSObject> obj, Handle<AccessorInfo> info) {
- CALL_HEAP_FUNCTION(obj->GetIsolate(), obj->DefineAccessor(*info), Object);
-}
-
-
-// Wrappers for scripts are kept alive and cached in weak global
-// handles referred from proxy objects held by the scripts as long as
-// they are used. When they are not used anymore, the garbage
-// collector will call the weak callback on the global handle
-// associated with the wrapper and get rid of both the wrapper and the
-// handle.
-static void ClearWrapperCache(Persistent<v8::Value> handle, void*) {
-#ifdef ENABLE_HEAP_PROTECTION
- // Weak reference callbacks are called as if from outside V8. We
- // need to reeenter to unprotect the heap.
- VMState state(OTHER);
-#endif
- Handle<Object> cache = Utils::OpenHandle(*handle);
- JSValue* wrapper = JSValue::cast(*cache);
- Proxy* proxy = Script::cast(wrapper->value())->wrapper();
- ASSERT(proxy->proxy() == reinterpret_cast<Address>(cache.location()));
- proxy->set_proxy(0);
- Isolate* isolate = Isolate::Current();
- isolate->global_handles()->Destroy(cache.location());
- isolate->counters()->script_wrappers()->Decrement();
-}
-
-
-Handle<JSValue> GetScriptWrapper(Handle<Script> script) {
- if (script->wrapper()->proxy() != NULL) {
- // Return the script wrapper directly from the cache.
- return Handle<JSValue>(
- reinterpret_cast<JSValue**>(script->wrapper()->proxy()));
- }
- Isolate* isolate = Isolate::Current();
- // Construct a new script wrapper.
- isolate->counters()->script_wrappers()->Increment();
- Handle<JSFunction> constructor = isolate->script_function();
- Handle<JSValue> result =
- Handle<JSValue>::cast(isolate->factory()->NewJSObject(constructor));
- result->set_value(*script);
-
- // Create a new weak global handle and use it to cache the wrapper
- // for future use. The cache will automatically be cleared by the
- // garbage collector when it is not used anymore.
- Handle<Object> handle = isolate->global_handles()->Create(*result);
- isolate->global_handles()->MakeWeak(handle.location(), NULL,
- &ClearWrapperCache);
- script->wrapper()->set_proxy(reinterpret_cast<Address>(handle.location()));
- return result;
-}
-
-
-// Init line_ends array with code positions of line ends inside script
-// source.
-void InitScriptLineEnds(Handle<Script> script) {
- if (!script->line_ends()->IsUndefined()) return;
-
- Isolate* isolate = script->GetIsolate();
-
- if (!script->source()->IsString()) {
- ASSERT(script->source()->IsUndefined());
- Handle<FixedArray> empty = isolate->factory()->NewFixedArray(0);
- script->set_line_ends(*empty);
- ASSERT(script->line_ends()->IsFixedArray());
- return;
- }
-
- Handle<String> src(String::cast(script->source()), isolate);
-
- Handle<FixedArray> array = CalculateLineEnds(src, true);
-
- if (*array != isolate->heap()->empty_fixed_array()) {
- array->set_map(isolate->heap()->fixed_cow_array_map());
- }
-
- script->set_line_ends(*array);
- ASSERT(script->line_ends()->IsFixedArray());
-}
-
-
-template <typename SourceChar>
-static void CalculateLineEnds(Isolate* isolate,
- List<int>* line_ends,
- Vector<const SourceChar> src,
- bool with_last_line) {
- const int src_len = src.length();
- StringSearch<char, SourceChar> search(isolate, CStrVector("\n"));
-
- // Find and record line ends.
- int position = 0;
- while (position != -1 && position < src_len) {
- position = search.Search(src, position);
- if (position != -1) {
- line_ends->Add(position);
- position++;
- } else if (with_last_line) {
- // Even if the last line misses a line end, it is counted.
- line_ends->Add(src_len);
- return;
- }
- }
-}
-
-
-Handle<FixedArray> CalculateLineEnds(Handle<String> src,
- bool with_last_line) {
- src = FlattenGetString(src);
- // Rough estimate of line count based on a roughly estimated average
- // length of (unpacked) code.
- int line_count_estimate = src->length() >> 4;
- List<int> line_ends(line_count_estimate);
- Isolate* isolate = src->GetIsolate();
- {
- AssertNoAllocation no_heap_allocation; // ensure vectors stay valid.
- // Dispatch on type of strings.
- if (src->IsAsciiRepresentation()) {
- CalculateLineEnds(isolate,
- &line_ends,
- src->ToAsciiVector(),
- with_last_line);
- } else {
- CalculateLineEnds(isolate,
- &line_ends,
- src->ToUC16Vector(),
- with_last_line);
- }
- }
- int line_count = line_ends.length();
- Handle<FixedArray> array = isolate->factory()->NewFixedArray(line_count);
- for (int i = 0; i < line_count; i++) {
- array->set(i, Smi::FromInt(line_ends[i]));
- }
- return array;
-}
-
-
-// Convert code position into line number.
-int GetScriptLineNumber(Handle<Script> script, int code_pos) {
- InitScriptLineEnds(script);
- AssertNoAllocation no_allocation;
- FixedArray* line_ends_array = FixedArray::cast(script->line_ends());
- const int line_ends_len = line_ends_array->length();
-
- if (!line_ends_len) return -1;
-
- if ((Smi::cast(line_ends_array->get(0)))->value() >= code_pos) {
- return script->line_offset()->value();
- }
-
- int left = 0;
- int right = line_ends_len;
- while (int half = (right - left) / 2) {
- if ((Smi::cast(line_ends_array->get(left + half)))->value() > code_pos) {
- right -= half;
- } else {
- left += half;
- }
- }
- return right + script->line_offset()->value();
-}
-
-
-int GetScriptLineNumberSafe(Handle<Script> script, int code_pos) {
- AssertNoAllocation no_allocation;
- if (!script->line_ends()->IsUndefined()) {
- return GetScriptLineNumber(script, code_pos);
- }
- // Slow mode: we do not have line_ends. We have to iterate through source.
- if (!script->source()->IsString()) {
- return -1;
- }
- String* source = String::cast(script->source());
- int line = 0;
- int len = source->length();
- for (int pos = 0; pos < len; pos++) {
- if (pos == code_pos) {
- break;
- }
- if (source->Get(pos) == '\n') {
- line++;
- }
- }
- return line;
-}
-
-
-void CustomArguments::IterateInstance(ObjectVisitor* v) {
- v->VisitPointers(values_, values_ + ARRAY_SIZE(values_));
-}
-
-
-// Compute the property keys from the interceptor.
-v8::Handle<v8::Array> GetKeysForNamedInterceptor(Handle<JSObject> receiver,
- Handle<JSObject> object) {
- Isolate* isolate = receiver->GetIsolate();
- Handle<InterceptorInfo> interceptor(object->GetNamedInterceptor());
- CustomArguments args(isolate, interceptor->data(), *receiver, *object);
- v8::AccessorInfo info(args.end());
- v8::Handle<v8::Array> result;
- if (!interceptor->enumerator()->IsUndefined()) {
- v8::NamedPropertyEnumerator enum_fun =
- v8::ToCData<v8::NamedPropertyEnumerator>(interceptor->enumerator());
- LOG(isolate, ApiObjectAccess("interceptor-named-enum", *object));
- {
- // Leaving JavaScript.
- VMState state(isolate, EXTERNAL);
- result = enum_fun(info);
- }
- }
- return result;
-}
-
-
-// Compute the element keys from the interceptor.
-v8::Handle<v8::Array> GetKeysForIndexedInterceptor(Handle<JSObject> receiver,
- Handle<JSObject> object) {
- Isolate* isolate = receiver->GetIsolate();
- Handle<InterceptorInfo> interceptor(object->GetIndexedInterceptor());
- CustomArguments args(isolate, interceptor->data(), *receiver, *object);
- v8::AccessorInfo info(args.end());
- v8::Handle<v8::Array> result;
- if (!interceptor->enumerator()->IsUndefined()) {
- v8::IndexedPropertyEnumerator enum_fun =
- v8::ToCData<v8::IndexedPropertyEnumerator>(interceptor->enumerator());
- LOG(isolate, ApiObjectAccess("interceptor-indexed-enum", *object));
- {
- // Leaving JavaScript.
- VMState state(isolate, EXTERNAL);
- result = enum_fun(info);
- }
- }
- return result;
-}
-
-
-static bool ContainsOnlyValidKeys(Handle<FixedArray> array) {
- int len = array->length();
- for (int i = 0; i < len; i++) {
- Object* e = array->get(i);
- if (!(e->IsString() || e->IsNumber())) return false;
- }
- return true;
-}
-
-
-Handle<FixedArray> GetKeysInFixedArrayFor(Handle<JSObject> object,
- KeyCollectionType type) {
- USE(ContainsOnlyValidKeys);
- Isolate* isolate = object->GetIsolate();
- Handle<FixedArray> content = isolate->factory()->empty_fixed_array();
- Handle<JSObject> arguments_boilerplate = Handle<JSObject>(
- isolate->context()->global_context()->arguments_boilerplate(),
- isolate);
- Handle<JSFunction> arguments_function = Handle<JSFunction>(
- JSFunction::cast(arguments_boilerplate->map()->constructor()),
- isolate);
-
- // Only collect keys if access is permitted.
- for (Handle<Object> p = object;
- *p != isolate->heap()->null_value();
- p = Handle<Object>(p->GetPrototype(), isolate)) {
- Handle<JSObject> current(JSObject::cast(*p), isolate);
-
- // Check access rights if required.
- if (current->IsAccessCheckNeeded() &&
- !isolate->MayNamedAccess(*current,
- isolate->heap()->undefined_value(),
- v8::ACCESS_KEYS)) {
- isolate->ReportFailedAccessCheck(*current, v8::ACCESS_KEYS);
- break;
- }
-
- // Compute the element keys.
- Handle<FixedArray> element_keys =
- isolate->factory()->NewFixedArray(current->NumberOfEnumElements());
- current->GetEnumElementKeys(*element_keys);
- content = UnionOfKeys(content, element_keys);
- ASSERT(ContainsOnlyValidKeys(content));
-
- // Add the element keys from the interceptor.
- if (current->HasIndexedInterceptor()) {
- v8::Handle<v8::Array> result =
- GetKeysForIndexedInterceptor(object, current);
- if (!result.IsEmpty())
- content = AddKeysFromJSArray(content, v8::Utils::OpenHandle(*result));
- ASSERT(ContainsOnlyValidKeys(content));
- }
-
- // We can cache the computed property keys if access checks are
- // not needed and no interceptors are involved.
- //
- // We do not use the cache if the object has elements and
- // therefore it does not make sense to cache the property names
- // for arguments objects. Arguments objects will always have
- // elements.
- // Wrapped strings have elements, but don't have an elements
- // array or dictionary. So the fast inline test for whether to
- // use the cache says yes, so we should not create a cache.
- bool cache_enum_keys =
- ((current->map()->constructor() != *arguments_function) &&
- !current->IsJSValue() &&
- !current->IsAccessCheckNeeded() &&
- !current->HasNamedInterceptor() &&
- !current->HasIndexedInterceptor());
- // Compute the property keys and cache them if possible.
- content =
- UnionOfKeys(content, GetEnumPropertyKeys(current, cache_enum_keys));
- ASSERT(ContainsOnlyValidKeys(content));
-
- // Add the property keys from the interceptor.
- if (current->HasNamedInterceptor()) {
- v8::Handle<v8::Array> result =
- GetKeysForNamedInterceptor(object, current);
- if (!result.IsEmpty())
- content = AddKeysFromJSArray(content, v8::Utils::OpenHandle(*result));
- ASSERT(ContainsOnlyValidKeys(content));
- }
-
- // If we only want local properties we bail out after the first
- // iteration.
- if (type == LOCAL_ONLY)
- break;
- }
- return content;
-}
-
-
-Handle<JSArray> GetKeysFor(Handle<JSObject> object) {
- Isolate* isolate = object->GetIsolate();
- isolate->counters()->for_in()->Increment();
- Handle<FixedArray> elements = GetKeysInFixedArrayFor(object,
- INCLUDE_PROTOS);
- return isolate->factory()->NewJSArrayWithElements(elements);
-}
-
-
-Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object,
- bool cache_result) {
- int index = 0;
- Isolate* isolate = object->GetIsolate();
- if (object->HasFastProperties()) {
- if (object->map()->instance_descriptors()->HasEnumCache()) {
- isolate->counters()->enum_cache_hits()->Increment();
- DescriptorArray* desc = object->map()->instance_descriptors();
- return Handle<FixedArray>(FixedArray::cast(desc->GetEnumCache()),
- isolate);
- }
- isolate->counters()->enum_cache_misses()->Increment();
- int num_enum = object->NumberOfEnumProperties();
- Handle<FixedArray> storage = isolate->factory()->NewFixedArray(num_enum);
- Handle<FixedArray> sort_array = isolate->factory()->NewFixedArray(num_enum);
- Handle<DescriptorArray> descs =
- Handle<DescriptorArray>(object->map()->instance_descriptors(), isolate);
- for (int i = 0; i < descs->number_of_descriptors(); i++) {
- if (descs->IsProperty(i) && !descs->IsDontEnum(i)) {
- (*storage)->set(index, descs->GetKey(i));
- PropertyDetails details(descs->GetDetails(i));
- (*sort_array)->set(index, Smi::FromInt(details.index()));
- index++;
- }
- }
- (*storage)->SortPairs(*sort_array, sort_array->length());
- if (cache_result) {
- Handle<FixedArray> bridge_storage =
- isolate->factory()->NewFixedArray(
- DescriptorArray::kEnumCacheBridgeLength);
- DescriptorArray* desc = object->map()->instance_descriptors();
- desc->SetEnumCache(*bridge_storage, *storage);
- }
- ASSERT(storage->length() == index);
- return storage;
- } else {
- int num_enum = object->NumberOfEnumProperties();
- Handle<FixedArray> storage = isolate->factory()->NewFixedArray(num_enum);
- Handle<FixedArray> sort_array = isolate->factory()->NewFixedArray(num_enum);
- object->property_dictionary()->CopyEnumKeysTo(*storage, *sort_array);
- return storage;
- }
-}
-
-
-bool EnsureCompiled(Handle<SharedFunctionInfo> shared,
- ClearExceptionFlag flag) {
- return shared->is_compiled() || CompileLazyShared(shared, flag);
-}
-
-
-static bool CompileLazyHelper(CompilationInfo* info,
- ClearExceptionFlag flag) {
- // Compile the source information to a code object.
- ASSERT(info->IsOptimizing() || !info->shared_info()->is_compiled());
- ASSERT(!info->isolate()->has_pending_exception());
- bool result = Compiler::CompileLazy(info);
- ASSERT(result != Isolate::Current()->has_pending_exception());
- if (!result && flag == CLEAR_EXCEPTION) {
- info->isolate()->clear_pending_exception();
- }
- return result;
-}
-
-
-bool CompileLazyShared(Handle<SharedFunctionInfo> shared,
- ClearExceptionFlag flag) {
- CompilationInfo info(shared);
- return CompileLazyHelper(&info, flag);
-}
-
-
-static bool CompileLazyFunction(Handle<JSFunction> function,
- ClearExceptionFlag flag,
- InLoopFlag in_loop_flag) {
- bool result = true;
- if (function->shared()->is_compiled()) {
- function->ReplaceCode(function->shared()->code());
- function->shared()->set_code_age(0);
- } else {
- CompilationInfo info(function);
- if (in_loop_flag == IN_LOOP) info.MarkAsInLoop();
- result = CompileLazyHelper(&info, flag);
- ASSERT(!result || function->is_compiled());
- }
- return result;
-}
-
-
-bool CompileLazy(Handle<JSFunction> function,
- ClearExceptionFlag flag) {
- return CompileLazyFunction(function, flag, NOT_IN_LOOP);
-}
-
-
-bool CompileLazyInLoop(Handle<JSFunction> function,
- ClearExceptionFlag flag) {
- return CompileLazyFunction(function, flag, IN_LOOP);
-}
-
-
-bool CompileOptimized(Handle<JSFunction> function,
- int osr_ast_id,
- ClearExceptionFlag flag) {
- CompilationInfo info(function);
- info.SetOptimizing(osr_ast_id);
- return CompileLazyHelper(&info, flag);
-}
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/handles.h b/src/3rdparty/v8/src/handles.h
deleted file mode 100644
index a357a00..0000000
--- a/src/3rdparty/v8/src/handles.h
+++ /dev/null
@@ -1,372 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_HANDLES_H_
-#define V8_HANDLES_H_
-
-#include "apiutils.h"
-
-namespace v8 {
-namespace internal {
-
-// ----------------------------------------------------------------------------
-// A Handle provides a reference to an object that survives relocation by
-// the garbage collector.
-// Handles are only valid within a HandleScope.
-// When a handle is created for an object a cell is allocated in the heap.
-
-template<typename T>
-class Handle {
- public:
- INLINE(explicit Handle(T** location)) { location_ = location; }
- INLINE(explicit Handle(T* obj));
- INLINE(Handle(T* obj, Isolate* isolate));
-
- INLINE(Handle()) : location_(NULL) {}
-
- // Constructor for handling automatic up casting.
- // Ex. Handle<JSFunction> can be passed when Handle<Object> is expected.
- template <class S> Handle(Handle<S> handle) {
-#ifdef DEBUG
- T* a = NULL;
- S* b = NULL;
- a = b; // Fake assignment to enforce type checks.
- USE(a);
-#endif
- location_ = reinterpret_cast<T**>(handle.location());
- }
-
- INLINE(T* operator ->() const) { return operator*(); }
-
- // Check if this handle refers to the exact same object as the other handle.
- bool is_identical_to(const Handle<T> other) const {
- return operator*() == *other;
- }
-
- // Provides the C++ dereference operator.
- INLINE(T* operator*() const);
-
- // Returns the address to where the raw pointer is stored.
- T** location() const {
- ASSERT(location_ == NULL ||
- reinterpret_cast<Address>(*location_) != kZapValue);
- return location_;
- }
-
- template <class S> static Handle<T> cast(Handle<S> that) {
- T::cast(*that);
- return Handle<T>(reinterpret_cast<T**>(that.location()));
- }
-
- static Handle<T> null() { return Handle<T>(); }
- bool is_null() const { return location_ == NULL; }
-
- // Closes the given scope, but lets this handle escape. See
- // implementation in api.h.
- inline Handle<T> EscapeFrom(v8::HandleScope* scope);
-
- private:
- T** location_;
-};
-
-
-// A stack-allocated class that governs a number of local handles.
-// After a handle scope has been created, all local handles will be
-// allocated within that handle scope until either the handle scope is
-// deleted or another handle scope is created. If there is already a
-// handle scope and a new one is created, all allocations will take
-// place in the new handle scope until it is deleted. After that,
-// new handles will again be allocated in the original handle scope.
-//
-// After the handle scope of a local handle has been deleted the
-// garbage collector will no longer track the object stored in the
-// handle and may deallocate it. The behavior of accessing a handle
-// for which the handle scope has been deleted is undefined.
-class HandleScope {
- public:
- inline HandleScope();
- explicit inline HandleScope(Isolate* isolate);
-
- inline ~HandleScope();
-
- // Counts the number of allocated handles.
- static int NumberOfHandles();
-
- // Creates a new handle with the given value.
- template <typename T>
- static inline T** CreateHandle(T* value, Isolate* isolate);
-
- // Deallocates any extensions used by the current scope.
- static void DeleteExtensions(Isolate* isolate);
-
- static Address current_next_address();
- static Address current_limit_address();
- static Address current_level_address();
-
- // Closes the HandleScope (invalidating all handles
- // created in the scope of the HandleScope) and returns
- // a Handle backed by the parent scope holding the
- // value of the argument handle.
- template <typename T>
- Handle<T> CloseAndEscape(Handle<T> handle_value);
-
- Isolate* isolate() { return isolate_; }
-
- private:
- // Prevent heap allocation or illegal handle scopes.
- HandleScope(const HandleScope&);
- void operator=(const HandleScope&);
- void* operator new(size_t size);
- void operator delete(void* size_t);
-
- inline void CloseScope();
-
- Isolate* isolate_;
- Object** prev_next_;
- Object** prev_limit_;
-
- // Extend the handle scope making room for more handles.
- static internal::Object** Extend();
-
- // Zaps the handles in the half-open interval [start, end).
- static void ZapRange(internal::Object** start, internal::Object** end);
-
- friend class v8::HandleScope;
- friend class v8::ImplementationUtilities;
-};
-
-
-// ----------------------------------------------------------------------------
-// Handle operations.
-// They might invoke garbage collection. The result is an handle to
-// an object of expected type, or the handle is an error if running out
-// of space or encountering an internal error.
-
-void NormalizeProperties(Handle<JSObject> object,
- PropertyNormalizationMode mode,
- int expected_additional_properties);
-void NormalizeElements(Handle<JSObject> object);
-void TransformToFastProperties(Handle<JSObject> object,
- int unused_property_fields);
-void NumberDictionarySet(Handle<NumberDictionary> dictionary,
- uint32_t index,
- Handle<Object> value,
- PropertyDetails details);
-
-// Flattens a string.
-void FlattenString(Handle<String> str);
-
-// Flattens a string and returns the underlying external or sequential
-// string.
-Handle<String> FlattenGetString(Handle<String> str);
-
-Handle<Object> SetProperty(Handle<JSObject> object,
- Handle<String> key,
- Handle<Object> value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode);
-
-Handle<Object> SetProperty(Handle<Object> object,
- Handle<Object> key,
- Handle<Object> value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode);
-
-Handle<Object> ForceSetProperty(Handle<JSObject> object,
- Handle<Object> key,
- Handle<Object> value,
- PropertyAttributes attributes);
-
-Handle<Object> SetNormalizedProperty(Handle<JSObject> object,
- Handle<String> key,
- Handle<Object> value,
- PropertyDetails details);
-
-Handle<Object> ForceDeleteProperty(Handle<JSObject> object,
- Handle<Object> key);
-
-Handle<Object> SetLocalPropertyIgnoreAttributes(
- Handle<JSObject> object,
- Handle<String> key,
- Handle<Object> value,
- PropertyAttributes attributes);
-
-// Used to set local properties on the object we totally control
-// and which therefore has no accessors and alikes.
-void SetLocalPropertyNoThrow(Handle<JSObject> object,
- Handle<String> key,
- Handle<Object> value,
- PropertyAttributes attributes = NONE);
-
-Handle<Object> SetPropertyWithInterceptor(Handle<JSObject> object,
- Handle<String> key,
- Handle<Object> value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode);
-
-MUST_USE_RESULT Handle<Object> SetElement(Handle<JSObject> object,
- uint32_t index,
- Handle<Object> value,
- StrictModeFlag strict_mode);
-
-Handle<Object> SetOwnElement(Handle<JSObject> object,
- uint32_t index,
- Handle<Object> value,
- StrictModeFlag strict_mode);
-
-Handle<Object> GetProperty(Handle<JSObject> obj,
- const char* name);
-
-Handle<Object> GetProperty(Handle<Object> obj,
- Handle<Object> key);
-
-Handle<Object> GetElement(Handle<Object> obj,
- uint32_t index);
-
-Handle<Object> GetPropertyWithInterceptor(Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<String> name,
- PropertyAttributes* attributes);
-
-Handle<Object> GetPrototype(Handle<Object> obj);
-
-Handle<Object> SetPrototype(Handle<JSObject> obj, Handle<Object> value);
-
-// Return the object's hidden properties object. If the object has no hidden
-// properties and create_if_needed is true, then a new hidden property object
-// will be allocated. Otherwise the Heap::undefined_value is returned.
-Handle<Object> GetHiddenProperties(Handle<JSObject> obj, bool create_if_needed);
-
-Handle<Object> DeleteElement(Handle<JSObject> obj, uint32_t index);
-Handle<Object> DeleteProperty(Handle<JSObject> obj, Handle<String> prop);
-
-Handle<Object> LookupSingleCharacterStringFromCode(uint32_t index);
-
-Handle<JSObject> Copy(Handle<JSObject> obj);
-
-Handle<Object> SetAccessor(Handle<JSObject> obj, Handle<AccessorInfo> info);
-
-Handle<FixedArray> AddKeysFromJSArray(Handle<FixedArray>,
- Handle<JSArray> array);
-
-// Get the JS object corresponding to the given script; create it
-// if none exists.
-Handle<JSValue> GetScriptWrapper(Handle<Script> script);
-
-// Script line number computations.
-void InitScriptLineEnds(Handle<Script> script);
-// For string calculates an array of line end positions. If the string
-// does not end with a new line character, this character may optionally be
-// imagined.
-Handle<FixedArray> CalculateLineEnds(Handle<String> string,
- bool with_imaginary_last_new_line);
-int GetScriptLineNumber(Handle<Script> script, int code_position);
-// The safe version does not make heap allocations but may work much slower.
-int GetScriptLineNumberSafe(Handle<Script> script, int code_position);
-
-// Computes the enumerable keys from interceptors. Used for debug mirrors and
-// by GetKeysInFixedArrayFor below.
-v8::Handle<v8::Array> GetKeysForNamedInterceptor(Handle<JSObject> receiver,
- Handle<JSObject> object);
-v8::Handle<v8::Array> GetKeysForIndexedInterceptor(Handle<JSObject> receiver,
- Handle<JSObject> object);
-
-enum KeyCollectionType { LOCAL_ONLY, INCLUDE_PROTOS };
-
-// Computes the enumerable keys for a JSObject. Used for implementing
-// "for (n in object) { }".
-Handle<FixedArray> GetKeysInFixedArrayFor(Handle<JSObject> object,
- KeyCollectionType type);
-Handle<JSArray> GetKeysFor(Handle<JSObject> object);
-Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object,
- bool cache_result);
-
-// Computes the union of keys and return the result.
-// Used for implementing "for (n in object) { }"
-Handle<FixedArray> UnionOfKeys(Handle<FixedArray> first,
- Handle<FixedArray> second);
-
-Handle<String> SubString(Handle<String> str,
- int start,
- int end,
- PretenureFlag pretenure = NOT_TENURED);
-
-
-// Sets the expected number of properties for the function's instances.
-void SetExpectedNofProperties(Handle<JSFunction> func, int nof);
-
-// Sets the prototype property for a function instance.
-void SetPrototypeProperty(Handle<JSFunction> func, Handle<JSObject> value);
-
-// Sets the expected number of properties based on estimate from compiler.
-void SetExpectedNofPropertiesFromEstimate(Handle<SharedFunctionInfo> shared,
- int estimate);
-
-
-Handle<JSGlobalProxy> ReinitializeJSGlobalProxy(
- Handle<JSFunction> constructor,
- Handle<JSGlobalProxy> global);
-
-Handle<Object> SetPrototype(Handle<JSFunction> function,
- Handle<Object> prototype);
-
-Handle<Object> PreventExtensions(Handle<JSObject> object);
-
-// Does lazy compilation of the given function. Returns true on success and
-// false if the compilation resulted in a stack overflow.
-enum ClearExceptionFlag { KEEP_EXCEPTION, CLEAR_EXCEPTION };
-
-bool EnsureCompiled(Handle<SharedFunctionInfo> shared,
- ClearExceptionFlag flag);
-
-bool CompileLazyShared(Handle<SharedFunctionInfo> shared,
- ClearExceptionFlag flag);
-
-bool CompileLazy(Handle<JSFunction> function, ClearExceptionFlag flag);
-
-bool CompileLazyInLoop(Handle<JSFunction> function, ClearExceptionFlag flag);
-
-bool CompileOptimized(Handle<JSFunction> function,
- int osr_ast_id,
- ClearExceptionFlag flag);
-
-class NoHandleAllocation BASE_EMBEDDED {
- public:
-#ifndef DEBUG
- NoHandleAllocation() {}
- ~NoHandleAllocation() {}
-#else
- inline NoHandleAllocation();
- inline ~NoHandleAllocation();
- private:
- int level_;
-#endif
-};
-
-} } // namespace v8::internal
-
-#endif // V8_HANDLES_H_
diff --git a/src/3rdparty/v8/src/hashmap.cc b/src/3rdparty/v8/src/hashmap.cc
deleted file mode 100644
index 1422afd..0000000
--- a/src/3rdparty/v8/src/hashmap.cc
+++ /dev/null
@@ -1,230 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "../include/v8stdint.h"
-#include "globals.h"
-#include "checks.h"
-#include "utils.h"
-#include "allocation.h"
-
-#include "hashmap.h"
-
-namespace v8 {
-namespace internal {
-
-Allocator HashMap::DefaultAllocator;
-
-
-HashMap::HashMap() {
- allocator_ = NULL;
- match_ = NULL;
-}
-
-
-HashMap::HashMap(MatchFun match,
- Allocator* allocator,
- uint32_t initial_capacity) {
- allocator_ = allocator;
- match_ = match;
- Initialize(initial_capacity);
-}
-
-
-HashMap::~HashMap() {
- if (allocator_) {
- allocator_->Delete(map_);
- }
-}
-
-
-HashMap::Entry* HashMap::Lookup(void* key, uint32_t hash, bool insert) {
- // Find a matching entry.
- Entry* p = Probe(key, hash);
- if (p->key != NULL) {
- return p;
- }
-
- // No entry found; insert one if necessary.
- if (insert) {
- p->key = key;
- p->value = NULL;
- p->hash = hash;
- occupancy_++;
-
- // Grow the map if we reached >= 80% occupancy.
- if (occupancy_ + occupancy_/4 >= capacity_) {
- Resize();
- p = Probe(key, hash);
- }
-
- return p;
- }
-
- // No entry found and none inserted.
- return NULL;
-}
-
-
-void HashMap::Remove(void* key, uint32_t hash) {
- // Lookup the entry for the key to remove.
- Entry* p = Probe(key, hash);
- if (p->key == NULL) {
- // Key not found nothing to remove.
- return;
- }
-
- // To remove an entry we need to ensure that it does not create an empty
- // entry that will cause the search for another entry to stop too soon. If all
- // the entries between the entry to remove and the next empty slot have their
- // initial position inside this interval, clearing the entry to remove will
- // not break the search. If, while searching for the next empty entry, an
- // entry is encountered which does not have its initial position between the
- // entry to remove and the position looked at, then this entry can be moved to
- // the place of the entry to remove without breaking the search for it. The
- // entry made vacant by this move is now the entry to remove and the process
- // starts over.
- // Algorithm from http://en.wikipedia.org/wiki/Open_addressing.
-
- // This guarantees loop termination as there is at least one empty entry so
- // eventually the removed entry will have an empty entry after it.
- ASSERT(occupancy_ < capacity_);
-
- // p is the candidate entry to clear. q is used to scan forwards.
- Entry* q = p; // Start at the entry to remove.
- while (true) {
- // Move q to the next entry.
- q = q + 1;
- if (q == map_end()) {
- q = map_;
- }
-
- // All entries between p and q have their initial position between p and q
- // and the entry p can be cleared without breaking the search for these
- // entries.
- if (q->key == NULL) {
- break;
- }
-
- // Find the initial position for the entry at position q.
- Entry* r = map_ + (q->hash & (capacity_ - 1));
-
- // If the entry at position q has its initial position outside the range
- // between p and q it can be moved forward to position p and will still be
- // found. There is now a new candidate entry for clearing.
- if ((q > p && (r <= p || r > q)) ||
- (q < p && (r <= p && r > q))) {
- *p = *q;
- p = q;
- }
- }
-
- // Clear the entry which is allowed to en emptied.
- p->key = NULL;
- occupancy_--;
-}
-
-
-void HashMap::Clear() {
- // Mark all entries as empty.
- const Entry* end = map_end();
- for (Entry* p = map_; p < end; p++) {
- p->key = NULL;
- }
- occupancy_ = 0;
-}
-
-
-HashMap::Entry* HashMap::Start() const {
- return Next(map_ - 1);
-}
-
-
-HashMap::Entry* HashMap::Next(Entry* p) const {
- const Entry* end = map_end();
- ASSERT(map_ - 1 <= p && p < end);
- for (p++; p < end; p++) {
- if (p->key != NULL) {
- return p;
- }
- }
- return NULL;
-}
-
-
-HashMap::Entry* HashMap::Probe(void* key, uint32_t hash) {
- ASSERT(key != NULL);
-
- ASSERT(IsPowerOf2(capacity_));
- Entry* p = map_ + (hash & (capacity_ - 1));
- const Entry* end = map_end();
- ASSERT(map_ <= p && p < end);
-
- ASSERT(occupancy_ < capacity_); // Guarantees loop termination.
- while (p->key != NULL && (hash != p->hash || !match_(key, p->key))) {
- p++;
- if (p >= end) {
- p = map_;
- }
- }
-
- return p;
-}
-
-
-void HashMap::Initialize(uint32_t capacity) {
- ASSERT(IsPowerOf2(capacity));
- map_ = reinterpret_cast<Entry*>(allocator_->New(capacity * sizeof(Entry)));
- if (map_ == NULL) {
- v8::internal::FatalProcessOutOfMemory("HashMap::Initialize");
- return;
- }
- capacity_ = capacity;
- Clear();
-}
-
-
-void HashMap::Resize() {
- Entry* map = map_;
- uint32_t n = occupancy_;
-
- // Allocate larger map.
- Initialize(capacity_ * 2);
-
- // Rehash all current entries.
- for (Entry* p = map; n > 0; p++) {
- if (p->key != NULL) {
- Lookup(p->key, p->hash, true)->value = p->value;
- n--;
- }
- }
-
- // Delete old map.
- allocator_->Delete(map);
-}
-
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/hashmap.h b/src/3rdparty/v8/src/hashmap.h
deleted file mode 100644
index bb3e3ce..0000000
--- a/src/3rdparty/v8/src/hashmap.h
+++ /dev/null
@@ -1,121 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_HASHMAP_H_
-#define V8_HASHMAP_H_
-
-namespace v8 {
-namespace internal {
-
-
-// Allocator defines the memory allocator interface
-// used by HashMap and implements a default allocator.
-class Allocator BASE_EMBEDDED {
- public:
- virtual ~Allocator() {}
- virtual void* New(size_t size) { return Malloced::New(size); }
- virtual void Delete(void* p) { Malloced::Delete(p); }
-};
-
-
-class HashMap {
- public:
- static Allocator DefaultAllocator;
-
- typedef bool (*MatchFun) (void* key1, void* key2);
-
- // Dummy constructor. This constructor doesn't set up the hash
- // map properly so don't use it unless you have good reason (e.g.,
- // you know that the HashMap will never be used).
- HashMap();
-
- // initial_capacity is the size of the initial hash map;
- // it must be a power of 2 (and thus must not be 0).
- explicit HashMap(MatchFun match,
- Allocator* allocator = &DefaultAllocator,
- uint32_t initial_capacity = 8);
-
- ~HashMap();
-
- // HashMap entries are (key, value, hash) triplets.
- // Some clients may not need to use the value slot
- // (e.g. implementers of sets, where the key is the value).
- struct Entry {
- void* key;
- void* value;
- uint32_t hash; // the full hash value for key
- };
-
- // If an entry with matching key is found, Lookup()
- // returns that entry. If no matching entry is found,
- // but insert is set, a new entry is inserted with
- // corresponding key, key hash, and NULL value.
- // Otherwise, NULL is returned.
- Entry* Lookup(void* key, uint32_t hash, bool insert);
-
- // Removes the entry with matching key.
- void Remove(void* key, uint32_t hash);
-
- // Empties the hash map (occupancy() == 0).
- void Clear();
-
- // The number of (non-empty) entries in the table.
- uint32_t occupancy() const { return occupancy_; }
-
- // The capacity of the table. The implementation
- // makes sure that occupancy is at most 80% of
- // the table capacity.
- uint32_t capacity() const { return capacity_; }
-
- // Iteration
- //
- // for (Entry* p = map.Start(); p != NULL; p = map.Next(p)) {
- // ...
- // }
- //
- // If entries are inserted during iteration, the effect of
- // calling Next() is undefined.
- Entry* Start() const;
- Entry* Next(Entry* p) const;
-
- private:
- Allocator* allocator_;
- MatchFun match_;
- Entry* map_;
- uint32_t capacity_;
- uint32_t occupancy_;
-
- Entry* map_end() const { return map_ + capacity_; }
- Entry* Probe(void* key, uint32_t hash);
- void Initialize(uint32_t capacity);
- void Resize();
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_HASHMAP_H_
diff --git a/src/3rdparty/v8/src/heap-inl.h b/src/3rdparty/v8/src/heap-inl.h
deleted file mode 100644
index 99737ed..0000000
--- a/src/3rdparty/v8/src/heap-inl.h
+++ /dev/null
@@ -1,703 +0,0 @@
-// Copyright 2006-2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_HEAP_INL_H_
-#define V8_HEAP_INL_H_
-
-#include "heap.h"
-#include "objects.h"
-#include "isolate.h"
-#include "v8-counters.h"
-
-namespace v8 {
-namespace internal {
-
-void PromotionQueue::insert(HeapObject* target, int size) {
- *(--rear_) = reinterpret_cast<intptr_t>(target);
- *(--rear_) = size;
- // Assert no overflow into live objects.
- ASSERT(reinterpret_cast<Address>(rear_) >= HEAP->new_space()->top());
-}
-
-
-int Heap::MaxObjectSizeInPagedSpace() {
- return Page::kMaxHeapObjectSize;
-}
-
-
-MaybeObject* Heap::AllocateStringFromUtf8(Vector<const char> str,
- PretenureFlag pretenure) {
- // Check for ASCII first since this is the common case.
- if (String::IsAscii(str.start(), str.length())) {
- // If the string is ASCII, we do not need to convert the characters
- // since UTF8 is backwards compatible with ASCII.
- return AllocateStringFromAscii(str, pretenure);
- }
- // Non-ASCII and we need to decode.
- return AllocateStringFromUtf8Slow(str, pretenure);
-}
-
-
-MaybeObject* Heap::AllocateSymbol(Vector<const char> str,
- int chars,
- uint32_t hash_field) {
- unibrow::Utf8InputBuffer<> buffer(str.start(),
- static_cast<unsigned>(str.length()));
- return AllocateInternalSymbol(&buffer, chars, hash_field);
-}
-
-
-MaybeObject* Heap::AllocateAsciiSymbol(Vector<const char> str,
- uint32_t hash_field) {
- if (str.length() > SeqAsciiString::kMaxLength) {
- return Failure::OutOfMemoryException();
- }
- // Compute map and object size.
- Map* map = ascii_symbol_map();
- int size = SeqAsciiString::SizeFor(str.length());
-
- // Allocate string.
- Object* result;
- { MaybeObject* maybe_result = (size > MaxObjectSizeInPagedSpace())
- ? lo_space_->AllocateRaw(size)
- : old_data_space_->AllocateRaw(size);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
-
- reinterpret_cast<HeapObject*>(result)->set_map(map);
- // Set length and hash fields of the allocated string.
- String* answer = String::cast(result);
- answer->set_length(str.length());
- answer->set_hash_field(hash_field);
-
- ASSERT_EQ(size, answer->Size());
-
- // Fill in the characters.
- memcpy(answer->address() + SeqAsciiString::kHeaderSize,
- str.start(), str.length());
-
- return answer;
-}
-
-
-MaybeObject* Heap::AllocateTwoByteSymbol(Vector<const uc16> str,
- uint32_t hash_field) {
- if (str.length() > SeqTwoByteString::kMaxLength) {
- return Failure::OutOfMemoryException();
- }
- // Compute map and object size.
- Map* map = symbol_map();
- int size = SeqTwoByteString::SizeFor(str.length());
-
- // Allocate string.
- Object* result;
- { MaybeObject* maybe_result = (size > MaxObjectSizeInPagedSpace())
- ? lo_space_->AllocateRaw(size)
- : old_data_space_->AllocateRaw(size);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
-
- reinterpret_cast<HeapObject*>(result)->set_map(map);
- // Set length and hash fields of the allocated string.
- String* answer = String::cast(result);
- answer->set_length(str.length());
- answer->set_hash_field(hash_field);
-
- ASSERT_EQ(size, answer->Size());
-
- // Fill in the characters.
- memcpy(answer->address() + SeqTwoByteString::kHeaderSize,
- str.start(), str.length() * kUC16Size);
-
- return answer;
-}
-
-MaybeObject* Heap::CopyFixedArray(FixedArray* src) {
- return CopyFixedArrayWithMap(src, src->map());
-}
-
-
-MaybeObject* Heap::AllocateRaw(int size_in_bytes,
- AllocationSpace space,
- AllocationSpace retry_space) {
- ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
- ASSERT(space != NEW_SPACE ||
- retry_space == OLD_POINTER_SPACE ||
- retry_space == OLD_DATA_SPACE ||
- retry_space == LO_SPACE);
-#ifdef DEBUG
- if (FLAG_gc_interval >= 0 &&
- !disallow_allocation_failure_ &&
- Heap::allocation_timeout_-- <= 0) {
- return Failure::RetryAfterGC(space);
- }
- isolate_->counters()->objs_since_last_full()->Increment();
- isolate_->counters()->objs_since_last_young()->Increment();
-#endif
- MaybeObject* result;
- if (NEW_SPACE == space) {
- result = new_space_.AllocateRaw(size_in_bytes);
- if (always_allocate() && result->IsFailure()) {
- space = retry_space;
- } else {
- return result;
- }
- }
-
- if (OLD_POINTER_SPACE == space) {
- result = old_pointer_space_->AllocateRaw(size_in_bytes);
- } else if (OLD_DATA_SPACE == space) {
- result = old_data_space_->AllocateRaw(size_in_bytes);
- } else if (CODE_SPACE == space) {
- result = code_space_->AllocateRaw(size_in_bytes);
- } else if (LO_SPACE == space) {
- result = lo_space_->AllocateRaw(size_in_bytes);
- } else if (CELL_SPACE == space) {
- result = cell_space_->AllocateRaw(size_in_bytes);
- } else {
- ASSERT(MAP_SPACE == space);
- result = map_space_->AllocateRaw(size_in_bytes);
- }
- if (result->IsFailure()) old_gen_exhausted_ = true;
- return result;
-}
-
-
-MaybeObject* Heap::NumberFromInt32(int32_t value) {
- if (Smi::IsValid(value)) return Smi::FromInt(value);
- // Bypass NumberFromDouble to avoid various redundant checks.
- return AllocateHeapNumber(FastI2D(value));
-}
-
-
-MaybeObject* Heap::NumberFromUint32(uint32_t value) {
- if ((int32_t)value >= 0 && Smi::IsValid((int32_t)value)) {
- return Smi::FromInt((int32_t)value);
- }
- // Bypass NumberFromDouble to avoid various redundant checks.
- return AllocateHeapNumber(FastUI2D(value));
-}
-
-
-void Heap::FinalizeExternalString(String* string) {
- ASSERT(string->IsExternalString());
- v8::String::ExternalStringResourceBase** resource_addr =
- reinterpret_cast<v8::String::ExternalStringResourceBase**>(
- reinterpret_cast<byte*>(string) +
- ExternalString::kResourceOffset -
- kHeapObjectTag);
-
- // Dispose of the C++ object if it has not already been disposed.
- if (*resource_addr != NULL) {
- (*resource_addr)->Dispose();
- }
-
- // Clear the resource pointer in the string.
- *resource_addr = NULL;
-}
-
-
-MaybeObject* Heap::AllocateRawMap() {
-#ifdef DEBUG
- isolate_->counters()->objs_since_last_full()->Increment();
- isolate_->counters()->objs_since_last_young()->Increment();
-#endif
- MaybeObject* result = map_space_->AllocateRaw(Map::kSize);
- if (result->IsFailure()) old_gen_exhausted_ = true;
-#ifdef DEBUG
- if (!result->IsFailure()) {
- // Maps have their own alignment.
- CHECK((reinterpret_cast<intptr_t>(result) & kMapAlignmentMask) ==
- static_cast<intptr_t>(kHeapObjectTag));
- }
-#endif
- return result;
-}
-
-
-MaybeObject* Heap::AllocateRawCell() {
-#ifdef DEBUG
- isolate_->counters()->objs_since_last_full()->Increment();
- isolate_->counters()->objs_since_last_young()->Increment();
-#endif
- MaybeObject* result = cell_space_->AllocateRaw(JSGlobalPropertyCell::kSize);
- if (result->IsFailure()) old_gen_exhausted_ = true;
- return result;
-}
-
-
-bool Heap::InNewSpace(Object* object) {
- bool result = new_space_.Contains(object);
- ASSERT(!result || // Either not in new space
- gc_state_ != NOT_IN_GC || // ... or in the middle of GC
- InToSpace(object)); // ... or in to-space (where we allocate).
- return result;
-}
-
-
-bool Heap::InFromSpace(Object* object) {
- return new_space_.FromSpaceContains(object);
-}
-
-
-bool Heap::InToSpace(Object* object) {
- return new_space_.ToSpaceContains(object);
-}
-
-
-bool Heap::ShouldBePromoted(Address old_address, int object_size) {
- // An object should be promoted if:
- // - the object has survived a scavenge operation or
- // - to space is already 25% full.
- return old_address < new_space_.age_mark()
- || (new_space_.Size() + object_size) >= (new_space_.Capacity() >> 2);
-}
-
-
-void Heap::RecordWrite(Address address, int offset) {
- if (new_space_.Contains(address)) return;
- ASSERT(!new_space_.FromSpaceContains(address));
- SLOW_ASSERT(Contains(address + offset));
- Page::FromAddress(address)->MarkRegionDirty(address + offset);
-}
-
-
-void Heap::RecordWrites(Address address, int start, int len) {
- if (new_space_.Contains(address)) return;
- ASSERT(!new_space_.FromSpaceContains(address));
- Page* page = Page::FromAddress(address);
- page->SetRegionMarks(page->GetRegionMarks() |
- page->GetRegionMaskForSpan(address + start, len * kPointerSize));
-}
-
-
-OldSpace* Heap::TargetSpace(HeapObject* object) {
- InstanceType type = object->map()->instance_type();
- AllocationSpace space = TargetSpaceId(type);
- return (space == OLD_POINTER_SPACE)
- ? old_pointer_space_
- : old_data_space_;
-}
-
-
-AllocationSpace Heap::TargetSpaceId(InstanceType type) {
- // Heap numbers and sequential strings are promoted to old data space, all
- // other object types are promoted to old pointer space. We do not use
- // object->IsHeapNumber() and object->IsSeqString() because we already
- // know that object has the heap object tag.
-
- // These objects are never allocated in new space.
- ASSERT(type != MAP_TYPE);
- ASSERT(type != CODE_TYPE);
- ASSERT(type != ODDBALL_TYPE);
- ASSERT(type != JS_GLOBAL_PROPERTY_CELL_TYPE);
-
- if (type < FIRST_NONSTRING_TYPE) {
- // There are three string representations: sequential strings, cons
- // strings, and external strings. Only cons strings contain
- // non-map-word pointers to heap objects.
- return ((type & kStringRepresentationMask) == kConsStringTag)
- ? OLD_POINTER_SPACE
- : OLD_DATA_SPACE;
- } else {
- return (type <= LAST_DATA_TYPE) ? OLD_DATA_SPACE : OLD_POINTER_SPACE;
- }
-}
-
-
-void Heap::CopyBlock(Address dst, Address src, int byte_size) {
- ASSERT(IsAligned(byte_size, kPointerSize));
- CopyWords(reinterpret_cast<Object**>(dst),
- reinterpret_cast<Object**>(src),
- byte_size / kPointerSize);
-}
-
-
-void Heap::CopyBlockToOldSpaceAndUpdateRegionMarks(Address dst,
- Address src,
- int byte_size) {
- ASSERT(IsAligned(byte_size, kPointerSize));
-
- Page* page = Page::FromAddress(dst);
- uint32_t marks = page->GetRegionMarks();
-
- for (int remaining = byte_size / kPointerSize;
- remaining > 0;
- remaining--) {
- Memory::Object_at(dst) = Memory::Object_at(src);
-
- if (InNewSpace(Memory::Object_at(dst))) {
- marks |= page->GetRegionMaskForAddress(dst);
- }
-
- dst += kPointerSize;
- src += kPointerSize;
- }
-
- page->SetRegionMarks(marks);
-}
-
-
-void Heap::MoveBlock(Address dst, Address src, int byte_size) {
- ASSERT(IsAligned(byte_size, kPointerSize));
-
- int size_in_words = byte_size / kPointerSize;
-
- if ((dst < src) || (dst >= (src + size_in_words))) {
- ASSERT((dst >= (src + size_in_words)) ||
- ((OffsetFrom(reinterpret_cast<Address>(src)) -
- OffsetFrom(reinterpret_cast<Address>(dst))) >= kPointerSize));
-
- Object** src_slot = reinterpret_cast<Object**>(src);
- Object** dst_slot = reinterpret_cast<Object**>(dst);
- Object** end_slot = src_slot + size_in_words;
-
- while (src_slot != end_slot) {
- *dst_slot++ = *src_slot++;
- }
- } else {
- memmove(dst, src, byte_size);
- }
-}
-
-
-void Heap::MoveBlockToOldSpaceAndUpdateRegionMarks(Address dst,
- Address src,
- int byte_size) {
- ASSERT(IsAligned(byte_size, kPointerSize));
- ASSERT((dst >= (src + byte_size)) ||
- ((OffsetFrom(src) - OffsetFrom(dst)) >= kPointerSize));
-
- CopyBlockToOldSpaceAndUpdateRegionMarks(dst, src, byte_size);
-}
-
-
-void Heap::ScavengePointer(HeapObject** p) {
- ScavengeObject(p, *p);
-}
-
-
-void Heap::ScavengeObject(HeapObject** p, HeapObject* object) {
- ASSERT(HEAP->InFromSpace(object));
-
- // We use the first word (where the map pointer usually is) of a heap
- // object to record the forwarding pointer. A forwarding pointer can
- // point to an old space, the code space, or the to space of the new
- // generation.
- MapWord first_word = object->map_word();
-
- // If the first word is a forwarding address, the object has already been
- // copied.
- if (first_word.IsForwardingAddress()) {
- *p = first_word.ToForwardingAddress();
- return;
- }
-
- // Call the slow part of scavenge object.
- return ScavengeObjectSlow(p, object);
-}
-
-
-bool Heap::CollectGarbage(AllocationSpace space) {
- return CollectGarbage(space, SelectGarbageCollector(space));
-}
-
-
-MaybeObject* Heap::PrepareForCompare(String* str) {
- // Always flatten small strings and force flattening of long strings
- // after we have accumulated a certain amount we failed to flatten.
- static const int kMaxAlwaysFlattenLength = 32;
- static const int kFlattenLongThreshold = 16*KB;
-
- const int length = str->length();
- MaybeObject* obj = str->TryFlatten();
- if (length <= kMaxAlwaysFlattenLength ||
- unflattened_strings_length_ >= kFlattenLongThreshold) {
- return obj;
- }
- if (obj->IsFailure()) {
- unflattened_strings_length_ += length;
- }
- return str;
-}
-
-
-int Heap::AdjustAmountOfExternalAllocatedMemory(int change_in_bytes) {
- ASSERT(HasBeenSetup());
- int amount = amount_of_external_allocated_memory_ + change_in_bytes;
- if (change_in_bytes >= 0) {
- // Avoid overflow.
- if (amount > amount_of_external_allocated_memory_) {
- amount_of_external_allocated_memory_ = amount;
- }
- int amount_since_last_global_gc =
- amount_of_external_allocated_memory_ -
- amount_of_external_allocated_memory_at_last_global_gc_;
- if (amount_since_last_global_gc > external_allocation_limit_) {
- CollectAllGarbage(false);
- }
- } else {
- // Avoid underflow.
- if (amount >= 0) {
- amount_of_external_allocated_memory_ = amount;
- }
- }
- ASSERT(amount_of_external_allocated_memory_ >= 0);
- return amount_of_external_allocated_memory_;
-}
-
-
-void Heap::SetLastScriptId(Object* last_script_id) {
- roots_[kLastScriptIdRootIndex] = last_script_id;
-}
-
-Isolate* Heap::isolate() {
- return reinterpret_cast<Isolate*>(reinterpret_cast<intptr_t>(this) -
- reinterpret_cast<size_t>(reinterpret_cast<Isolate*>(4)->heap()) + 4);
-}
-
-
-#ifdef DEBUG
-#define GC_GREEDY_CHECK() \
- if (FLAG_gc_greedy) HEAP->GarbageCollectionGreedyCheck()
-#else
-#define GC_GREEDY_CHECK() { }
-#endif
-
-
-// Calls the FUNCTION_CALL function and retries it up to three times
-// to guarantee that any allocations performed during the call will
-// succeed if there's enough memory.
-
-// Warning: Do not use the identifiers __object__, __maybe_object__ or
-// __scope__ in a call to this macro.
-
-#define CALL_AND_RETRY(ISOLATE, FUNCTION_CALL, RETURN_VALUE, RETURN_EMPTY)\
- do { \
- GC_GREEDY_CHECK(); \
- MaybeObject* __maybe_object__ = FUNCTION_CALL; \
- Object* __object__ = NULL; \
- if (__maybe_object__->ToObject(&__object__)) RETURN_VALUE; \
- if (__maybe_object__->IsOutOfMemory()) { \
- v8::internal::V8::FatalProcessOutOfMemory("CALL_AND_RETRY_0", true);\
- } \
- if (!__maybe_object__->IsRetryAfterGC()) RETURN_EMPTY; \
- ISOLATE->heap()->CollectGarbage(Failure::cast(__maybe_object__)-> \
- allocation_space()); \
- __maybe_object__ = FUNCTION_CALL; \
- if (__maybe_object__->ToObject(&__object__)) RETURN_VALUE; \
- if (__maybe_object__->IsOutOfMemory()) { \
- v8::internal::V8::FatalProcessOutOfMemory("CALL_AND_RETRY_1", true);\
- } \
- if (!__maybe_object__->IsRetryAfterGC()) RETURN_EMPTY; \
- ISOLATE->counters()->gc_last_resort_from_handles()->Increment(); \
- ISOLATE->heap()->CollectAllAvailableGarbage(); \
- { \
- AlwaysAllocateScope __scope__; \
- __maybe_object__ = FUNCTION_CALL; \
- } \
- if (__maybe_object__->ToObject(&__object__)) RETURN_VALUE; \
- if (__maybe_object__->IsOutOfMemory() || \
- __maybe_object__->IsRetryAfterGC()) { \
- /* TODO(1181417): Fix this. */ \
- v8::internal::V8::FatalProcessOutOfMemory("CALL_AND_RETRY_2", true);\
- } \
- RETURN_EMPTY; \
- } while (false)
-
-
-// TODO(isolates): cache isolate: either accept as a parameter or
-// set to some known symbol (__CUR_ISOLATE__?)
-#define CALL_HEAP_FUNCTION(ISOLATE, FUNCTION_CALL, TYPE) \
- CALL_AND_RETRY(ISOLATE, \
- FUNCTION_CALL, \
- return Handle<TYPE>(TYPE::cast(__object__), ISOLATE), \
- return Handle<TYPE>())
-
-
-#define CALL_HEAP_FUNCTION_VOID(ISOLATE, FUNCTION_CALL) \
- CALL_AND_RETRY(ISOLATE, FUNCTION_CALL, return, return)
-
-
-#ifdef DEBUG
-
-inline bool Heap::allow_allocation(bool new_state) {
- bool old = allocation_allowed_;
- allocation_allowed_ = new_state;
- return old;
-}
-
-#endif
-
-
-void ExternalStringTable::AddString(String* string) {
- ASSERT(string->IsExternalString());
- if (heap_->InNewSpace(string)) {
- new_space_strings_.Add(string);
- } else {
- old_space_strings_.Add(string);
- }
-}
-
-
-void ExternalStringTable::Iterate(ObjectVisitor* v) {
- if (!new_space_strings_.is_empty()) {
- Object** start = &new_space_strings_[0];
- v->VisitPointers(start, start + new_space_strings_.length());
- }
- if (!old_space_strings_.is_empty()) {
- Object** start = &old_space_strings_[0];
- v->VisitPointers(start, start + old_space_strings_.length());
- }
-}
-
-
-// Verify() is inline to avoid ifdef-s around its calls in release
-// mode.
-void ExternalStringTable::Verify() {
-#ifdef DEBUG
- for (int i = 0; i < new_space_strings_.length(); ++i) {
- ASSERT(heap_->InNewSpace(new_space_strings_[i]));
- ASSERT(new_space_strings_[i] != HEAP->raw_unchecked_null_value());
- }
- for (int i = 0; i < old_space_strings_.length(); ++i) {
- ASSERT(!heap_->InNewSpace(old_space_strings_[i]));
- ASSERT(old_space_strings_[i] != HEAP->raw_unchecked_null_value());
- }
-#endif
-}
-
-
-void ExternalStringTable::AddOldString(String* string) {
- ASSERT(string->IsExternalString());
- ASSERT(!heap_->InNewSpace(string));
- old_space_strings_.Add(string);
-}
-
-
-void ExternalStringTable::ShrinkNewStrings(int position) {
- new_space_strings_.Rewind(position);
- Verify();
-}
-
-
-void Heap::ClearInstanceofCache() {
- set_instanceof_cache_function(the_hole_value());
-}
-
-
-Object* Heap::ToBoolean(bool condition) {
- return condition ? true_value() : false_value();
-}
-
-
-void Heap::CompletelyClearInstanceofCache() {
- set_instanceof_cache_map(the_hole_value());
- set_instanceof_cache_function(the_hole_value());
-}
-
-
-MaybeObject* TranscendentalCache::Get(Type type, double input) {
- SubCache* cache = caches_[type];
- if (cache == NULL) {
- caches_[type] = cache = new SubCache(type);
- }
- return cache->Get(input);
-}
-
-
-Address TranscendentalCache::cache_array_address() {
- return reinterpret_cast<Address>(caches_);
-}
-
-
-double TranscendentalCache::SubCache::Calculate(double input) {
- switch (type_) {
- case ACOS:
- return acos(input);
- case ASIN:
- return asin(input);
- case ATAN:
- return atan(input);
- case COS:
- return cos(input);
- case EXP:
- return exp(input);
- case LOG:
- return log(input);
- case SIN:
- return sin(input);
- case TAN:
- return tan(input);
- default:
- return 0.0; // Never happens.
- }
-}
-
-
-MaybeObject* TranscendentalCache::SubCache::Get(double input) {
- Converter c;
- c.dbl = input;
- int hash = Hash(c);
- Element e = elements_[hash];
- if (e.in[0] == c.integers[0] &&
- e.in[1] == c.integers[1]) {
- ASSERT(e.output != NULL);
- isolate_->counters()->transcendental_cache_hit()->Increment();
- return e.output;
- }
- double answer = Calculate(input);
- isolate_->counters()->transcendental_cache_miss()->Increment();
- Object* heap_number;
- { MaybeObject* maybe_heap_number =
- isolate_->heap()->AllocateHeapNumber(answer);
- if (!maybe_heap_number->ToObject(&heap_number)) return maybe_heap_number;
- }
- elements_[hash].in[0] = c.integers[0];
- elements_[hash].in[1] = c.integers[1];
- elements_[hash].output = heap_number;
- return heap_number;
-}
-
-
-Heap* _inline_get_heap_() {
- return HEAP;
-}
-
-
-void MarkCompactCollector::SetMark(HeapObject* obj) {
- tracer_->increment_marked_count();
-#ifdef DEBUG
- UpdateLiveObjectCount(obj);
-#endif
- obj->SetMark();
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_HEAP_INL_H_
diff --git a/src/3rdparty/v8/src/heap-profiler.cc b/src/3rdparty/v8/src/heap-profiler.cc
deleted file mode 100644
index 4815f82..0000000
--- a/src/3rdparty/v8/src/heap-profiler.cc
+++ /dev/null
@@ -1,1173 +0,0 @@
-// Copyright 2009-2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "heap-profiler.h"
-#include "frames-inl.h"
-#include "global-handles.h"
-#include "profile-generator.h"
-#include "string-stream.h"
-
-namespace v8 {
-namespace internal {
-
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
-namespace {
-
-// Clusterizer is a set of helper functions for converting
-// object references into clusters.
-class Clusterizer : public AllStatic {
- public:
- static JSObjectsCluster Clusterize(HeapObject* obj) {
- return Clusterize(obj, true);
- }
- static void InsertIntoTree(JSObjectsClusterTree* tree,
- HeapObject* obj, bool fine_grain);
- static void InsertReferenceIntoTree(JSObjectsClusterTree* tree,
- const JSObjectsCluster& cluster) {
- InsertIntoTree(tree, cluster, 0);
- }
-
- private:
- static JSObjectsCluster Clusterize(HeapObject* obj, bool fine_grain);
- static int CalculateNetworkSize(JSObject* obj);
- static int GetObjectSize(HeapObject* obj) {
- return obj->IsJSObject() ?
- CalculateNetworkSize(JSObject::cast(obj)) : obj->Size();
- }
- static void InsertIntoTree(JSObjectsClusterTree* tree,
- const JSObjectsCluster& cluster, int size);
-};
-
-
-JSObjectsCluster Clusterizer::Clusterize(HeapObject* obj, bool fine_grain) {
- if (obj->IsJSObject()) {
- JSObject* js_obj = JSObject::cast(obj);
- String* constructor = GetConstructorNameForHeapProfile(
- JSObject::cast(js_obj));
- // Differentiate Object and Array instances.
- if (fine_grain && (constructor == HEAP->Object_symbol() ||
- constructor == HEAP->Array_symbol())) {
- return JSObjectsCluster(constructor, obj);
- } else {
- return JSObjectsCluster(constructor);
- }
- } else if (obj->IsString()) {
- return JSObjectsCluster(HEAP->String_symbol());
- } else if (obj->IsJSGlobalPropertyCell()) {
- return JSObjectsCluster(JSObjectsCluster::GLOBAL_PROPERTY);
- } else if (obj->IsCode() || obj->IsSharedFunctionInfo() || obj->IsScript()) {
- return JSObjectsCluster(JSObjectsCluster::CODE);
- }
- return JSObjectsCluster();
-}
-
-
-void Clusterizer::InsertIntoTree(JSObjectsClusterTree* tree,
- HeapObject* obj, bool fine_grain) {
- JSObjectsCluster cluster = Clusterize(obj, fine_grain);
- if (cluster.is_null()) return;
- InsertIntoTree(tree, cluster, GetObjectSize(obj));
-}
-
-
-void Clusterizer::InsertIntoTree(JSObjectsClusterTree* tree,
- const JSObjectsCluster& cluster, int size) {
- JSObjectsClusterTree::Locator loc;
- tree->Insert(cluster, &loc);
- NumberAndSizeInfo number_and_size = loc.value();
- number_and_size.increment_number(1);
- number_and_size.increment_bytes(size);
- loc.set_value(number_and_size);
-}
-
-
-int Clusterizer::CalculateNetworkSize(JSObject* obj) {
- int size = obj->Size();
- // If 'properties' and 'elements' are non-empty (thus, non-shared),
- // take their size into account.
- if (obj->properties() != HEAP->empty_fixed_array()) {
- size += obj->properties()->Size();
- }
- if (obj->elements() != HEAP->empty_fixed_array()) {
- size += obj->elements()->Size();
- }
- // For functions, also account non-empty context and literals sizes.
- if (obj->IsJSFunction()) {
- JSFunction* f = JSFunction::cast(obj);
- if (f->unchecked_context()->IsContext()) {
- size += f->context()->Size();
- }
- if (f->literals()->length() != 0) {
- size += f->literals()->Size();
- }
- }
- return size;
-}
-
-
-// A helper class for recording back references.
-class ReferencesExtractor : public ObjectVisitor {
- public:
- ReferencesExtractor(const JSObjectsCluster& cluster,
- RetainerHeapProfile* profile)
- : cluster_(cluster),
- profile_(profile),
- inside_array_(false) {
- }
-
- void VisitPointer(Object** o) {
- if ((*o)->IsFixedArray() && !inside_array_) {
- // Traverse one level deep for data members that are fixed arrays.
- // This covers the case of 'elements' and 'properties' of JSObject,
- // and function contexts.
- inside_array_ = true;
- FixedArray::cast(*o)->Iterate(this);
- inside_array_ = false;
- } else if ((*o)->IsHeapObject()) {
- profile_->StoreReference(cluster_, HeapObject::cast(*o));
- }
- }
-
- void VisitPointers(Object** start, Object** end) {
- for (Object** p = start; p < end; p++) VisitPointer(p);
- }
-
- private:
- const JSObjectsCluster& cluster_;
- RetainerHeapProfile* profile_;
- bool inside_array_;
-};
-
-
-// A printer interface implementation for the Retainers profile.
-class RetainersPrinter : public RetainerHeapProfile::Printer {
- public:
- void PrintRetainers(const JSObjectsCluster& cluster,
- const StringStream& retainers) {
- HeapStringAllocator allocator;
- StringStream stream(&allocator);
- cluster.Print(&stream);
- LOG(ISOLATE,
- HeapSampleJSRetainersEvent(
- *(stream.ToCString()), *(retainers.ToCString())));
- }
-};
-
-
-// Visitor for printing a cluster tree.
-class ClusterTreePrinter BASE_EMBEDDED {
- public:
- explicit ClusterTreePrinter(StringStream* stream) : stream_(stream) {}
- void Call(const JSObjectsCluster& cluster,
- const NumberAndSizeInfo& number_and_size) {
- Print(stream_, cluster, number_and_size);
- }
- static void Print(StringStream* stream,
- const JSObjectsCluster& cluster,
- const NumberAndSizeInfo& number_and_size);
-
- private:
- StringStream* stream_;
-};
-
-
-void ClusterTreePrinter::Print(StringStream* stream,
- const JSObjectsCluster& cluster,
- const NumberAndSizeInfo& number_and_size) {
- stream->Put(',');
- cluster.Print(stream);
- stream->Add(";%d", number_and_size.number());
-}
-
-
-// Visitor for printing a retainer tree.
-class SimpleRetainerTreePrinter BASE_EMBEDDED {
- public:
- explicit SimpleRetainerTreePrinter(RetainerHeapProfile::Printer* printer)
- : printer_(printer) {}
- void Call(const JSObjectsCluster& cluster, JSObjectsClusterTree* tree);
-
- private:
- RetainerHeapProfile::Printer* printer_;
-};
-
-
-void SimpleRetainerTreePrinter::Call(const JSObjectsCluster& cluster,
- JSObjectsClusterTree* tree) {
- HeapStringAllocator allocator;
- StringStream stream(&allocator);
- ClusterTreePrinter retainers_printer(&stream);
- tree->ForEach(&retainers_printer);
- printer_->PrintRetainers(cluster, stream);
-}
-
-
-// Visitor for aggregating references count of equivalent clusters.
-class RetainersAggregator BASE_EMBEDDED {
- public:
- RetainersAggregator(ClustersCoarser* coarser, JSObjectsClusterTree* dest_tree)
- : coarser_(coarser), dest_tree_(dest_tree) {}
- void Call(const JSObjectsCluster& cluster,
- const NumberAndSizeInfo& number_and_size);
-
- private:
- ClustersCoarser* coarser_;
- JSObjectsClusterTree* dest_tree_;
-};
-
-
-void RetainersAggregator::Call(const JSObjectsCluster& cluster,
- const NumberAndSizeInfo& number_and_size) {
- JSObjectsCluster eq = coarser_->GetCoarseEquivalent(cluster);
- if (eq.is_null()) eq = cluster;
- JSObjectsClusterTree::Locator loc;
- dest_tree_->Insert(eq, &loc);
- NumberAndSizeInfo aggregated_number = loc.value();
- aggregated_number.increment_number(number_and_size.number());
- loc.set_value(aggregated_number);
-}
-
-
-// Visitor for printing retainers tree. Aggregates equivalent retainer clusters.
-class AggregatingRetainerTreePrinter BASE_EMBEDDED {
- public:
- AggregatingRetainerTreePrinter(ClustersCoarser* coarser,
- RetainerHeapProfile::Printer* printer)
- : coarser_(coarser), printer_(printer) {}
- void Call(const JSObjectsCluster& cluster, JSObjectsClusterTree* tree);
-
- private:
- ClustersCoarser* coarser_;
- RetainerHeapProfile::Printer* printer_;
-};
-
-
-void AggregatingRetainerTreePrinter::Call(const JSObjectsCluster& cluster,
- JSObjectsClusterTree* tree) {
- if (!coarser_->GetCoarseEquivalent(cluster).is_null()) return;
- JSObjectsClusterTree dest_tree_;
- RetainersAggregator retainers_aggregator(coarser_, &dest_tree_);
- tree->ForEach(&retainers_aggregator);
- HeapStringAllocator allocator;
- StringStream stream(&allocator);
- ClusterTreePrinter retainers_printer(&stream);
- dest_tree_.ForEach(&retainers_printer);
- printer_->PrintRetainers(cluster, stream);
-}
-
-} // namespace
-
-
-// A helper class for building a retainers tree, that aggregates
-// all equivalent clusters.
-class RetainerTreeAggregator {
- public:
- explicit RetainerTreeAggregator(ClustersCoarser* coarser)
- : coarser_(coarser) {}
- void Process(JSObjectsRetainerTree* input_tree) {
- input_tree->ForEach(this);
- }
- void Call(const JSObjectsCluster& cluster, JSObjectsClusterTree* tree);
- JSObjectsRetainerTree& output_tree() { return output_tree_; }
-
- private:
- ClustersCoarser* coarser_;
- JSObjectsRetainerTree output_tree_;
-};
-
-
-void RetainerTreeAggregator::Call(const JSObjectsCluster& cluster,
- JSObjectsClusterTree* tree) {
- JSObjectsCluster eq = coarser_->GetCoarseEquivalent(cluster);
- if (eq.is_null()) return;
- JSObjectsRetainerTree::Locator loc;
- if (output_tree_.Insert(eq, &loc)) {
- loc.set_value(new JSObjectsClusterTree());
- }
- RetainersAggregator retainers_aggregator(coarser_, loc.value());
- tree->ForEach(&retainers_aggregator);
-}
-
-
-HeapProfiler::HeapProfiler()
- : snapshots_(new HeapSnapshotsCollection()),
- next_snapshot_uid_(1) {
-}
-
-
-HeapProfiler::~HeapProfiler() {
- delete snapshots_;
-}
-
-
-void HeapProfiler::ResetSnapshots() {
- delete snapshots_;
- snapshots_ = new HeapSnapshotsCollection();
-}
-
-
-#endif // ENABLE_LOGGING_AND_PROFILING
-
-void HeapProfiler::Setup() {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- Isolate* isolate = Isolate::Current();
- if (isolate->heap_profiler() == NULL) {
- isolate->set_heap_profiler(new HeapProfiler());
- }
-#endif
-}
-
-
-void HeapProfiler::TearDown() {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- Isolate* isolate = Isolate::Current();
- delete isolate->heap_profiler();
- isolate->set_heap_profiler(NULL);
-#endif
-}
-
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
-
-HeapSnapshot* HeapProfiler::TakeSnapshot(const char* name,
- int type,
- v8::ActivityControl* control) {
- ASSERT(Isolate::Current()->heap_profiler() != NULL);
- return Isolate::Current()->heap_profiler()->TakeSnapshotImpl(name,
- type,
- control);
-}
-
-
-HeapSnapshot* HeapProfiler::TakeSnapshot(String* name,
- int type,
- v8::ActivityControl* control) {
- ASSERT(Isolate::Current()->heap_profiler() != NULL);
- return Isolate::Current()->heap_profiler()->TakeSnapshotImpl(name,
- type,
- control);
-}
-
-
-void HeapProfiler::DefineWrapperClass(
- uint16_t class_id, v8::HeapProfiler::WrapperInfoCallback callback) {
- ASSERT(class_id != v8::HeapProfiler::kPersistentHandleNoClassId);
- if (wrapper_callbacks_.length() <= class_id) {
- wrapper_callbacks_.AddBlock(
- NULL, class_id - wrapper_callbacks_.length() + 1);
- }
- wrapper_callbacks_[class_id] = callback;
-}
-
-
-v8::RetainedObjectInfo* HeapProfiler::ExecuteWrapperClassCallback(
- uint16_t class_id, Object** wrapper) {
- if (wrapper_callbacks_.length() <= class_id) return NULL;
- return wrapper_callbacks_[class_id](
- class_id, Utils::ToLocal(Handle<Object>(wrapper)));
-}
-
-
-HeapSnapshot* HeapProfiler::TakeSnapshotImpl(const char* name,
- int type,
- v8::ActivityControl* control) {
- HeapSnapshot::Type s_type = static_cast<HeapSnapshot::Type>(type);
- HeapSnapshot* result =
- snapshots_->NewSnapshot(s_type, name, next_snapshot_uid_++);
- bool generation_completed = true;
- switch (s_type) {
- case HeapSnapshot::kFull: {
- HEAP->CollectAllGarbage(true);
- HeapSnapshotGenerator generator(result, control);
- generation_completed = generator.GenerateSnapshot();
- break;
- }
- case HeapSnapshot::kAggregated: {
- HEAP->CollectAllGarbage(true);
- AggregatedHeapSnapshot agg_snapshot;
- AggregatedHeapSnapshotGenerator generator(&agg_snapshot);
- generator.GenerateSnapshot();
- generator.FillHeapSnapshot(result);
- break;
- }
- default:
- UNREACHABLE();
- }
- if (!generation_completed) {
- delete result;
- result = NULL;
- }
- snapshots_->SnapshotGenerationFinished(result);
- return result;
-}
-
-
-HeapSnapshot* HeapProfiler::TakeSnapshotImpl(String* name,
- int type,
- v8::ActivityControl* control) {
- return TakeSnapshotImpl(snapshots_->names()->GetName(name), type, control);
-}
-
-
-int HeapProfiler::GetSnapshotsCount() {
- HeapProfiler* profiler = Isolate::Current()->heap_profiler();
- ASSERT(profiler != NULL);
- return profiler->snapshots_->snapshots()->length();
-}
-
-
-HeapSnapshot* HeapProfiler::GetSnapshot(int index) {
- HeapProfiler* profiler = Isolate::Current()->heap_profiler();
- ASSERT(profiler != NULL);
- return profiler->snapshots_->snapshots()->at(index);
-}
-
-
-HeapSnapshot* HeapProfiler::FindSnapshot(unsigned uid) {
- HeapProfiler* profiler = Isolate::Current()->heap_profiler();
- ASSERT(profiler != NULL);
- return profiler->snapshots_->GetSnapshot(uid);
-}
-
-
-void HeapProfiler::DeleteAllSnapshots() {
- HeapProfiler* profiler = Isolate::Current()->heap_profiler();
- ASSERT(profiler != NULL);
- profiler->ResetSnapshots();
-}
-
-
-void HeapProfiler::ObjectMoveEvent(Address from, Address to) {
- snapshots_->ObjectMoveEvent(from, to);
-}
-
-
-const JSObjectsClusterTreeConfig::Key JSObjectsClusterTreeConfig::kNoKey;
-const JSObjectsClusterTreeConfig::Value JSObjectsClusterTreeConfig::kNoValue;
-
-
-ConstructorHeapProfile::ConstructorHeapProfile()
- : zscope_(DELETE_ON_EXIT) {
-}
-
-
-void ConstructorHeapProfile::Call(const JSObjectsCluster& cluster,
- const NumberAndSizeInfo& number_and_size) {
- HeapStringAllocator allocator;
- StringStream stream(&allocator);
- cluster.Print(&stream);
- LOG(ISOLATE,
- HeapSampleJSConstructorEvent(*(stream.ToCString()),
- number_and_size.number(),
- number_and_size.bytes()));
-}
-
-
-void ConstructorHeapProfile::CollectStats(HeapObject* obj) {
- Clusterizer::InsertIntoTree(&js_objects_info_tree_, obj, false);
-}
-
-
-void ConstructorHeapProfile::PrintStats() {
- js_objects_info_tree_.ForEach(this);
-}
-
-
-static const char* GetConstructorName(const char* name) {
- return name[0] != '\0' ? name : "(anonymous)";
-}
-
-
-const char* JSObjectsCluster::GetSpecialCaseName() const {
- if (constructor_ == FromSpecialCase(ROOTS)) {
- return "(roots)";
- } else if (constructor_ == FromSpecialCase(GLOBAL_PROPERTY)) {
- return "(global property)";
- } else if (constructor_ == FromSpecialCase(CODE)) {
- return "(code)";
- } else if (constructor_ == FromSpecialCase(SELF)) {
- return "(self)";
- }
- return NULL;
-}
-
-
-void JSObjectsCluster::Print(StringStream* accumulator) const {
- ASSERT(!is_null());
- const char* special_case_name = GetSpecialCaseName();
- if (special_case_name != NULL) {
- accumulator->Add(special_case_name);
- } else {
- SmartPointer<char> s_name(
- constructor_->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL));
- accumulator->Add("%s", GetConstructorName(*s_name));
- if (instance_ != NULL) {
- accumulator->Add(":%p", static_cast<void*>(instance_));
- }
- }
-}
-
-
-void JSObjectsCluster::DebugPrint(StringStream* accumulator) const {
- if (!is_null()) {
- Print(accumulator);
- } else {
- accumulator->Add("(null cluster)");
- }
-}
-
-
-inline ClustersCoarser::ClusterBackRefs::ClusterBackRefs(
- const JSObjectsCluster& cluster_)
- : cluster(cluster_), refs(kInitialBackrefsListCapacity) {
-}
-
-
-inline ClustersCoarser::ClusterBackRefs::ClusterBackRefs(
- const ClustersCoarser::ClusterBackRefs& src)
- : cluster(src.cluster), refs(src.refs.capacity()) {
- refs.AddAll(src.refs);
-}
-
-
-inline ClustersCoarser::ClusterBackRefs&
- ClustersCoarser::ClusterBackRefs::operator=(
- const ClustersCoarser::ClusterBackRefs& src) {
- if (this == &src) return *this;
- cluster = src.cluster;
- refs.Clear();
- refs.AddAll(src.refs);
- return *this;
-}
-
-
-inline int ClustersCoarser::ClusterBackRefs::Compare(
- const ClustersCoarser::ClusterBackRefs& a,
- const ClustersCoarser::ClusterBackRefs& b) {
- int cmp = JSObjectsCluster::CompareConstructors(a.cluster, b.cluster);
- if (cmp != 0) return cmp;
- if (a.refs.length() < b.refs.length()) return -1;
- if (a.refs.length() > b.refs.length()) return 1;
- for (int i = 0; i < a.refs.length(); ++i) {
- int cmp = JSObjectsCluster::Compare(a.refs[i], b.refs[i]);
- if (cmp != 0) return cmp;
- }
- return 0;
-}
-
-
-ClustersCoarser::ClustersCoarser()
- : zscope_(DELETE_ON_EXIT),
- sim_list_(ClustersCoarser::kInitialSimilarityListCapacity),
- current_pair_(NULL),
- current_set_(NULL),
- self_(NULL) {
-}
-
-
-void ClustersCoarser::Call(const JSObjectsCluster& cluster,
- JSObjectsClusterTree* tree) {
- if (!cluster.can_be_coarsed()) return;
- ClusterBackRefs pair(cluster);
- ASSERT(current_pair_ == NULL);
- current_pair_ = &pair;
- current_set_ = new JSObjectsRetainerTree();
- self_ = &cluster;
- tree->ForEach(this);
- sim_list_.Add(pair);
- current_pair_ = NULL;
- current_set_ = NULL;
- self_ = NULL;
-}
-
-
-void ClustersCoarser::Call(const JSObjectsCluster& cluster,
- const NumberAndSizeInfo& number_and_size) {
- ASSERT(current_pair_ != NULL);
- ASSERT(current_set_ != NULL);
- ASSERT(self_ != NULL);
- JSObjectsRetainerTree::Locator loc;
- if (JSObjectsCluster::Compare(*self_, cluster) == 0) {
- current_pair_->refs.Add(JSObjectsCluster(JSObjectsCluster::SELF));
- return;
- }
- JSObjectsCluster eq = GetCoarseEquivalent(cluster);
- if (!eq.is_null()) {
- if (current_set_->Find(eq, &loc)) return;
- current_pair_->refs.Add(eq);
- current_set_->Insert(eq, &loc);
- } else {
- current_pair_->refs.Add(cluster);
- }
-}
-
-
-void ClustersCoarser::Process(JSObjectsRetainerTree* tree) {
- int last_eq_clusters = -1;
- for (int i = 0; i < kMaxPassesCount; ++i) {
- sim_list_.Clear();
- const int curr_eq_clusters = DoProcess(tree);
- // If no new cluster equivalents discovered, abort processing.
- if (last_eq_clusters == curr_eq_clusters) break;
- last_eq_clusters = curr_eq_clusters;
- }
-}
-
-
-int ClustersCoarser::DoProcess(JSObjectsRetainerTree* tree) {
- tree->ForEach(this);
- sim_list_.Iterate(ClusterBackRefs::SortRefsIterator);
- sim_list_.Sort(ClusterBackRefsCmp);
- return FillEqualityTree();
-}
-
-
-JSObjectsCluster ClustersCoarser::GetCoarseEquivalent(
- const JSObjectsCluster& cluster) {
- if (!cluster.can_be_coarsed()) return JSObjectsCluster();
- EqualityTree::Locator loc;
- return eq_tree_.Find(cluster, &loc) ? loc.value() : JSObjectsCluster();
-}
-
-
-bool ClustersCoarser::HasAnEquivalent(const JSObjectsCluster& cluster) {
- // Return true for coarsible clusters that have a non-identical equivalent.
- if (!cluster.can_be_coarsed()) return false;
- JSObjectsCluster eq = GetCoarseEquivalent(cluster);
- return !eq.is_null() && JSObjectsCluster::Compare(cluster, eq) != 0;
-}
-
-
-int ClustersCoarser::FillEqualityTree() {
- int eq_clusters_count = 0;
- int eq_to = 0;
- bool first_added = false;
- for (int i = 1; i < sim_list_.length(); ++i) {
- if (ClusterBackRefs::Compare(sim_list_[i], sim_list_[eq_to]) == 0) {
- EqualityTree::Locator loc;
- if (!first_added) {
- // Add self-equivalence, if we have more than one item in this
- // equivalence class.
- eq_tree_.Insert(sim_list_[eq_to].cluster, &loc);
- loc.set_value(sim_list_[eq_to].cluster);
- first_added = true;
- }
- eq_tree_.Insert(sim_list_[i].cluster, &loc);
- loc.set_value(sim_list_[eq_to].cluster);
- ++eq_clusters_count;
- } else {
- eq_to = i;
- first_added = false;
- }
- }
- return eq_clusters_count;
-}
-
-
-const JSObjectsCluster ClustersCoarser::ClusterEqualityConfig::kNoKey;
-const JSObjectsCluster ClustersCoarser::ClusterEqualityConfig::kNoValue;
-const JSObjectsRetainerTreeConfig::Key JSObjectsRetainerTreeConfig::kNoKey;
-const JSObjectsRetainerTreeConfig::Value JSObjectsRetainerTreeConfig::kNoValue =
- NULL;
-
-
-RetainerHeapProfile::RetainerHeapProfile()
- : zscope_(DELETE_ON_EXIT),
- aggregator_(NULL) {
- JSObjectsCluster roots(JSObjectsCluster::ROOTS);
- ReferencesExtractor extractor(roots, this);
- HEAP->IterateRoots(&extractor, VISIT_ONLY_STRONG);
-}
-
-
-RetainerHeapProfile::~RetainerHeapProfile() {
- delete aggregator_;
-}
-
-
-void RetainerHeapProfile::StoreReference(const JSObjectsCluster& cluster,
- HeapObject* ref) {
- JSObjectsCluster ref_cluster = Clusterizer::Clusterize(ref);
- if (ref_cluster.is_null()) return;
- JSObjectsRetainerTree::Locator ref_loc;
- if (retainers_tree_.Insert(ref_cluster, &ref_loc)) {
- ref_loc.set_value(new JSObjectsClusterTree());
- }
- JSObjectsClusterTree* referenced_by = ref_loc.value();
- Clusterizer::InsertReferenceIntoTree(referenced_by, cluster);
-}
-
-
-void RetainerHeapProfile::CollectStats(HeapObject* obj) {
- const JSObjectsCluster cluster = Clusterizer::Clusterize(obj);
- if (cluster.is_null()) return;
- ReferencesExtractor extractor(cluster, this);
- obj->Iterate(&extractor);
-}
-
-
-void RetainerHeapProfile::CoarseAndAggregate() {
- coarser_.Process(&retainers_tree_);
- ASSERT(aggregator_ == NULL);
- aggregator_ = new RetainerTreeAggregator(&coarser_);
- aggregator_->Process(&retainers_tree_);
-}
-
-
-void RetainerHeapProfile::DebugPrintStats(
- RetainerHeapProfile::Printer* printer) {
- // Print clusters that have no equivalents, aggregating their retainers.
- AggregatingRetainerTreePrinter agg_printer(&coarser_, printer);
- retainers_tree_.ForEach(&agg_printer);
- // Print clusters that have equivalents.
- SimpleRetainerTreePrinter s_printer(printer);
- aggregator_->output_tree().ForEach(&s_printer);
-}
-
-
-void RetainerHeapProfile::PrintStats() {
- RetainersPrinter printer;
- DebugPrintStats(&printer);
-}
-
-
-//
-// HeapProfiler class implementation.
-//
-static void StackWeakReferenceCallback(Persistent<Value> object,
- void* trace) {
- DeleteArray(static_cast<Address*>(trace));
- object.Dispose();
-}
-
-
-static void PrintProducerStackTrace(Object* obj, void* trace) {
- if (!obj->IsJSObject()) return;
- String* constructor = GetConstructorNameForHeapProfile(JSObject::cast(obj));
- SmartPointer<char> s_name(
- constructor->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL));
- LOG(ISOLATE,
- HeapSampleJSProducerEvent(GetConstructorName(*s_name),
- reinterpret_cast<Address*>(trace)));
-}
-
-
-void HeapProfiler::WriteSample() {
- Isolate* isolate = Isolate::Current();
- LOG(isolate, HeapSampleBeginEvent("Heap", "allocated"));
- LOG(isolate,
- HeapSampleStats(
- "Heap", "allocated", HEAP->CommittedMemory(), HEAP->SizeOfObjects()));
-
- AggregatedHeapSnapshot snapshot;
- AggregatedHeapSnapshotGenerator generator(&snapshot);
- generator.GenerateSnapshot();
-
- HistogramInfo* info = snapshot.info();
- for (int i = FIRST_NONSTRING_TYPE;
- i <= AggregatedHeapSnapshotGenerator::kAllStringsType;
- ++i) {
- if (info[i].bytes() > 0) {
- LOG(isolate,
- HeapSampleItemEvent(info[i].name(), info[i].number(),
- info[i].bytes()));
- }
- }
-
- snapshot.js_cons_profile()->PrintStats();
- snapshot.js_retainer_profile()->PrintStats();
-
- isolate->global_handles()->IterateWeakRoots(PrintProducerStackTrace,
- StackWeakReferenceCallback);
-
- LOG(isolate, HeapSampleEndEvent("Heap", "allocated"));
-}
-
-
-AggregatedHeapSnapshot::AggregatedHeapSnapshot()
- : info_(NewArray<HistogramInfo>(
- AggregatedHeapSnapshotGenerator::kAllStringsType + 1)) {
-#define DEF_TYPE_NAME(name) info_[name].set_name(#name);
- INSTANCE_TYPE_LIST(DEF_TYPE_NAME);
-#undef DEF_TYPE_NAME
- info_[AggregatedHeapSnapshotGenerator::kAllStringsType].set_name(
- "STRING_TYPE");
-}
-
-
-AggregatedHeapSnapshot::~AggregatedHeapSnapshot() {
- DeleteArray(info_);
-}
-
-
-AggregatedHeapSnapshotGenerator::AggregatedHeapSnapshotGenerator(
- AggregatedHeapSnapshot* agg_snapshot)
- : agg_snapshot_(agg_snapshot) {
-}
-
-
-void AggregatedHeapSnapshotGenerator::CalculateStringsStats() {
- HistogramInfo* info = agg_snapshot_->info();
- HistogramInfo& strings = info[kAllStringsType];
- // Lump all the string types together.
-#define INCREMENT_SIZE(type, size, name, camel_name) \
- strings.increment_number(info[type].number()); \
- strings.increment_bytes(info[type].bytes());
- STRING_TYPE_LIST(INCREMENT_SIZE);
-#undef INCREMENT_SIZE
-}
-
-
-void AggregatedHeapSnapshotGenerator::CollectStats(HeapObject* obj) {
- InstanceType type = obj->map()->instance_type();
- ASSERT(0 <= type && type <= LAST_TYPE);
- agg_snapshot_->info()[type].increment_number(1);
- agg_snapshot_->info()[type].increment_bytes(obj->Size());
-}
-
-
-void AggregatedHeapSnapshotGenerator::GenerateSnapshot() {
- HeapIterator iterator(HeapIterator::kFilterUnreachable);
- for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
- CollectStats(obj);
- agg_snapshot_->js_cons_profile()->CollectStats(obj);
- agg_snapshot_->js_retainer_profile()->CollectStats(obj);
- }
- CalculateStringsStats();
- agg_snapshot_->js_retainer_profile()->CoarseAndAggregate();
-}
-
-
-class CountingConstructorHeapProfileIterator {
- public:
- CountingConstructorHeapProfileIterator()
- : entities_count_(0), children_count_(0) {
- }
-
- void Call(const JSObjectsCluster& cluster,
- const NumberAndSizeInfo& number_and_size) {
- ++entities_count_;
- children_count_ += number_and_size.number();
- }
-
- int entities_count() { return entities_count_; }
- int children_count() { return children_count_; }
-
- private:
- int entities_count_;
- int children_count_;
-};
-
-
-static HeapEntry* AddEntryFromAggregatedSnapshot(HeapSnapshot* snapshot,
- int* root_child_index,
- HeapEntry::Type type,
- const char* name,
- int count,
- int size,
- int children_count,
- int retainers_count) {
- HeapEntry* entry = snapshot->AddEntry(
- type, name, count, size, children_count, retainers_count);
- ASSERT(entry != NULL);
- snapshot->root()->SetUnidirElementReference(*root_child_index,
- *root_child_index + 1,
- entry);
- *root_child_index = *root_child_index + 1;
- return entry;
-}
-
-
-class AllocatingConstructorHeapProfileIterator {
- public:
- AllocatingConstructorHeapProfileIterator(HeapSnapshot* snapshot,
- int* root_child_index)
- : snapshot_(snapshot),
- root_child_index_(root_child_index) {
- }
-
- void Call(const JSObjectsCluster& cluster,
- const NumberAndSizeInfo& number_and_size) {
- const char* name = cluster.GetSpecialCaseName();
- if (name == NULL) {
- name = snapshot_->collection()->names()->GetFunctionName(
- cluster.constructor());
- }
- AddEntryFromAggregatedSnapshot(snapshot_,
- root_child_index_,
- HeapEntry::kObject,
- name,
- number_and_size.number(),
- number_and_size.bytes(),
- 0,
- 0);
- }
-
- private:
- HeapSnapshot* snapshot_;
- int* root_child_index_;
-};
-
-
-static HeapObject* ClusterAsHeapObject(const JSObjectsCluster& cluster) {
- return cluster.can_be_coarsed() ?
- reinterpret_cast<HeapObject*>(cluster.instance()) : cluster.constructor();
-}
-
-
-static JSObjectsCluster HeapObjectAsCluster(HeapObject* object) {
- if (object->IsString()) {
- return JSObjectsCluster(String::cast(object));
- } else {
- JSObject* js_obj = JSObject::cast(object);
- String* constructor = GetConstructorNameForHeapProfile(
- JSObject::cast(js_obj));
- return JSObjectsCluster(constructor, object);
- }
-}
-
-
-class CountingRetainersIterator {
- public:
- CountingRetainersIterator(const JSObjectsCluster& child_cluster,
- HeapEntriesAllocator* allocator,
- HeapEntriesMap* map)
- : child_(ClusterAsHeapObject(child_cluster)),
- allocator_(allocator),
- map_(map) {
- if (map_->Map(child_) == NULL)
- map_->Pair(child_, allocator_, HeapEntriesMap::kHeapEntryPlaceholder);
- }
-
- void Call(const JSObjectsCluster& cluster,
- const NumberAndSizeInfo& number_and_size) {
- if (map_->Map(ClusterAsHeapObject(cluster)) == NULL)
- map_->Pair(ClusterAsHeapObject(cluster),
- allocator_,
- HeapEntriesMap::kHeapEntryPlaceholder);
- map_->CountReference(ClusterAsHeapObject(cluster), child_);
- }
-
- private:
- HeapObject* child_;
- HeapEntriesAllocator* allocator_;
- HeapEntriesMap* map_;
-};
-
-
-class AllocatingRetainersIterator {
- public:
- AllocatingRetainersIterator(const JSObjectsCluster& child_cluster,
- HeapEntriesAllocator*,
- HeapEntriesMap* map)
- : child_(ClusterAsHeapObject(child_cluster)), map_(map) {
- child_entry_ = map_->Map(child_);
- ASSERT(child_entry_ != NULL);
- }
-
- void Call(const JSObjectsCluster& cluster,
- const NumberAndSizeInfo& number_and_size) {
- int child_index, retainer_index;
- map_->CountReference(ClusterAsHeapObject(cluster),
- child_,
- &child_index,
- &retainer_index);
- map_->Map(ClusterAsHeapObject(cluster))->SetIndexedReference(
- HeapGraphEdge::kElement,
- child_index,
- number_and_size.number(),
- child_entry_,
- retainer_index);
- }
-
- private:
- HeapObject* child_;
- HeapEntriesMap* map_;
- HeapEntry* child_entry_;
-};
-
-
-template<class RetainersIterator>
-class AggregatingRetainerTreeIterator {
- public:
- explicit AggregatingRetainerTreeIterator(ClustersCoarser* coarser,
- HeapEntriesAllocator* allocator,
- HeapEntriesMap* map)
- : coarser_(coarser), allocator_(allocator), map_(map) {
- }
-
- void Call(const JSObjectsCluster& cluster, JSObjectsClusterTree* tree) {
- if (coarser_ != NULL &&
- !coarser_->GetCoarseEquivalent(cluster).is_null()) return;
- JSObjectsClusterTree* tree_to_iterate = tree;
- ZoneScope zs(DELETE_ON_EXIT);
- JSObjectsClusterTree dest_tree_;
- if (coarser_ != NULL) {
- RetainersAggregator retainers_aggregator(coarser_, &dest_tree_);
- tree->ForEach(&retainers_aggregator);
- tree_to_iterate = &dest_tree_;
- }
- RetainersIterator iterator(cluster, allocator_, map_);
- tree_to_iterate->ForEach(&iterator);
- }
-
- private:
- ClustersCoarser* coarser_;
- HeapEntriesAllocator* allocator_;
- HeapEntriesMap* map_;
-};
-
-
-class AggregatedRetainerTreeAllocator : public HeapEntriesAllocator {
- public:
- AggregatedRetainerTreeAllocator(HeapSnapshot* snapshot,
- int* root_child_index)
- : snapshot_(snapshot), root_child_index_(root_child_index) {
- }
- ~AggregatedRetainerTreeAllocator() { }
-
- HeapEntry* AllocateEntry(
- HeapThing ptr, int children_count, int retainers_count) {
- HeapObject* obj = reinterpret_cast<HeapObject*>(ptr);
- JSObjectsCluster cluster = HeapObjectAsCluster(obj);
- const char* name = cluster.GetSpecialCaseName();
- if (name == NULL) {
- name = snapshot_->collection()->names()->GetFunctionName(
- cluster.constructor());
- }
- return AddEntryFromAggregatedSnapshot(
- snapshot_, root_child_index_, HeapEntry::kObject, name,
- 0, 0, children_count, retainers_count);
- }
-
- private:
- HeapSnapshot* snapshot_;
- int* root_child_index_;
-};
-
-
-template<class Iterator>
-void AggregatedHeapSnapshotGenerator::IterateRetainers(
- HeapEntriesAllocator* allocator, HeapEntriesMap* entries_map) {
- RetainerHeapProfile* p = agg_snapshot_->js_retainer_profile();
- AggregatingRetainerTreeIterator<Iterator> agg_ret_iter_1(
- p->coarser(), allocator, entries_map);
- p->retainers_tree()->ForEach(&agg_ret_iter_1);
- AggregatingRetainerTreeIterator<Iterator> agg_ret_iter_2(
- NULL, allocator, entries_map);
- p->aggregator()->output_tree().ForEach(&agg_ret_iter_2);
-}
-
-
-void AggregatedHeapSnapshotGenerator::FillHeapSnapshot(HeapSnapshot* snapshot) {
- // Count the number of entities.
- int histogram_entities_count = 0;
- int histogram_children_count = 0;
- int histogram_retainers_count = 0;
- for (int i = FIRST_NONSTRING_TYPE; i <= kAllStringsType; ++i) {
- if (agg_snapshot_->info()[i].bytes() > 0) {
- ++histogram_entities_count;
- }
- }
- CountingConstructorHeapProfileIterator counting_cons_iter;
- agg_snapshot_->js_cons_profile()->ForEach(&counting_cons_iter);
- histogram_entities_count += counting_cons_iter.entities_count();
- HeapEntriesMap entries_map;
- int root_child_index = 0;
- AggregatedRetainerTreeAllocator allocator(snapshot, &root_child_index);
- IterateRetainers<CountingRetainersIterator>(&allocator, &entries_map);
- histogram_entities_count += entries_map.entries_count();
- histogram_children_count += entries_map.total_children_count();
- histogram_retainers_count += entries_map.total_retainers_count();
-
- // Root entry references all other entries.
- histogram_children_count += histogram_entities_count;
- int root_children_count = histogram_entities_count;
- ++histogram_entities_count;
-
- // Allocate and fill entries in the snapshot, allocate references.
- snapshot->AllocateEntries(histogram_entities_count,
- histogram_children_count,
- histogram_retainers_count);
- snapshot->AddRootEntry(root_children_count);
- for (int i = FIRST_NONSTRING_TYPE; i <= kAllStringsType; ++i) {
- if (agg_snapshot_->info()[i].bytes() > 0) {
- AddEntryFromAggregatedSnapshot(snapshot,
- &root_child_index,
- HeapEntry::kHidden,
- agg_snapshot_->info()[i].name(),
- agg_snapshot_->info()[i].number(),
- agg_snapshot_->info()[i].bytes(),
- 0,
- 0);
- }
- }
- AllocatingConstructorHeapProfileIterator alloc_cons_iter(
- snapshot, &root_child_index);
- agg_snapshot_->js_cons_profile()->ForEach(&alloc_cons_iter);
- entries_map.AllocateEntries();
-
- // Fill up references.
- IterateRetainers<AllocatingRetainersIterator>(&allocator, &entries_map);
-
- snapshot->SetDominatorsToSelf();
-}
-
-
-void ProducerHeapProfile::Setup() {
- can_log_ = true;
-}
-
-void ProducerHeapProfile::DoRecordJSObjectAllocation(Object* obj) {
- ASSERT(FLAG_log_producers);
- if (!can_log_) return;
- int framesCount = 0;
- for (JavaScriptFrameIterator it; !it.done(); it.Advance()) {
- ++framesCount;
- }
- if (framesCount == 0) return;
- ++framesCount; // Reserve place for the terminator item.
- Vector<Address> stack(NewArray<Address>(framesCount), framesCount);
- int i = 0;
- for (JavaScriptFrameIterator it; !it.done(); it.Advance()) {
- stack[i++] = it.frame()->pc();
- }
- stack[i] = NULL;
- Handle<Object> handle = isolate_->global_handles()->Create(obj);
- isolate_->global_handles()->MakeWeak(handle.location(),
- static_cast<void*>(stack.start()),
- StackWeakReferenceCallback);
-}
-
-
-#endif // ENABLE_LOGGING_AND_PROFILING
-
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/heap-profiler.h b/src/3rdparty/v8/src/heap-profiler.h
deleted file mode 100644
index 89a2e8a..0000000
--- a/src/3rdparty/v8/src/heap-profiler.h
+++ /dev/null
@@ -1,396 +0,0 @@
-// Copyright 2009-2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_HEAP_PROFILER_H_
-#define V8_HEAP_PROFILER_H_
-
-#include "isolate.h"
-#include "zone-inl.h"
-
-namespace v8 {
-namespace internal {
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
-
-class HeapSnapshot;
-class HeapSnapshotsCollection;
-
-#define HEAP_PROFILE(heap, call) \
- do { \
- v8::internal::HeapProfiler* profiler = heap->isolate()->heap_profiler(); \
- if (profiler != NULL && profiler->is_profiling()) { \
- profiler->call; \
- } \
- } while (false)
-#else
-#define HEAP_PROFILE(heap, call) ((void) 0)
-#endif // ENABLE_LOGGING_AND_PROFILING
-
-// The HeapProfiler writes data to the log files, which can be postprocessed
-// to generate .hp files for use by the GHC/Valgrind tool hp2ps.
-class HeapProfiler {
- public:
- static void Setup();
- static void TearDown();
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
- static HeapSnapshot* TakeSnapshot(const char* name,
- int type,
- v8::ActivityControl* control);
- static HeapSnapshot* TakeSnapshot(String* name,
- int type,
- v8::ActivityControl* control);
- static int GetSnapshotsCount();
- static HeapSnapshot* GetSnapshot(int index);
- static HeapSnapshot* FindSnapshot(unsigned uid);
- static void DeleteAllSnapshots();
-
- void ObjectMoveEvent(Address from, Address to);
-
- void DefineWrapperClass(
- uint16_t class_id, v8::HeapProfiler::WrapperInfoCallback callback);
-
- v8::RetainedObjectInfo* ExecuteWrapperClassCallback(uint16_t class_id,
- Object** wrapper);
- INLINE(bool is_profiling()) {
- return snapshots_->is_tracking_objects();
- }
-
- // Obsolete interface.
- // Write a single heap sample to the log file.
- static void WriteSample();
-
- private:
- HeapProfiler();
- ~HeapProfiler();
- HeapSnapshot* TakeSnapshotImpl(const char* name,
- int type,
- v8::ActivityControl* control);
- HeapSnapshot* TakeSnapshotImpl(String* name,
- int type,
- v8::ActivityControl* control);
- void ResetSnapshots();
-
- HeapSnapshotsCollection* snapshots_;
- unsigned next_snapshot_uid_;
- List<v8::HeapProfiler::WrapperInfoCallback> wrapper_callbacks_;
-
-#endif // ENABLE_LOGGING_AND_PROFILING
-};
-
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
-
-// JSObjectsCluster describes a group of JS objects that are
-// considered equivalent in terms of a particular profile.
-class JSObjectsCluster BASE_EMBEDDED {
- public:
- // These special cases are used in retainer profile.
- enum SpecialCase {
- ROOTS = 1,
- GLOBAL_PROPERTY = 2,
- CODE = 3,
- SELF = 100 // This case is used in ClustersCoarser only.
- };
-
- JSObjectsCluster() : constructor_(NULL), instance_(NULL) {}
- explicit JSObjectsCluster(String* constructor)
- : constructor_(constructor), instance_(NULL) {}
- explicit JSObjectsCluster(SpecialCase special)
- : constructor_(FromSpecialCase(special)), instance_(NULL) {}
- JSObjectsCluster(String* constructor, Object* instance)
- : constructor_(constructor), instance_(instance) {}
-
- static int CompareConstructors(const JSObjectsCluster& a,
- const JSObjectsCluster& b) {
- // Strings are unique, so it is sufficient to compare their pointers.
- return a.constructor_ == b.constructor_ ? 0
- : (a.constructor_ < b.constructor_ ? -1 : 1);
- }
- static int Compare(const JSObjectsCluster& a, const JSObjectsCluster& b) {
- // Strings are unique, so it is sufficient to compare their pointers.
- const int cons_cmp = CompareConstructors(a, b);
- return cons_cmp == 0 ?
- (a.instance_ == b.instance_ ? 0 : (a.instance_ < b.instance_ ? -1 : 1))
- : cons_cmp;
- }
- static int Compare(const JSObjectsCluster* a, const JSObjectsCluster* b) {
- return Compare(*a, *b);
- }
-
- bool is_null() const { return constructor_ == NULL; }
- bool can_be_coarsed() const { return instance_ != NULL; }
- String* constructor() const { return constructor_; }
- Object* instance() const { return instance_; }
-
- const char* GetSpecialCaseName() const;
- void Print(StringStream* accumulator) const;
- // Allows null clusters to be printed.
- void DebugPrint(StringStream* accumulator) const;
-
- private:
- static String* FromSpecialCase(SpecialCase special) {
- // We use symbols that are illegal JS identifiers to identify special cases.
- // Their actual value is irrelevant for us.
- switch (special) {
- case ROOTS: return HEAP->result_symbol();
- case GLOBAL_PROPERTY: return HEAP->code_symbol();
- case CODE: return HEAP->arguments_shadow_symbol();
- case SELF: return HEAP->catch_var_symbol();
- default:
- UNREACHABLE();
- return NULL;
- }
- }
-
- String* constructor_;
- Object* instance_;
-};
-
-
-struct JSObjectsClusterTreeConfig {
- typedef JSObjectsCluster Key;
- typedef NumberAndSizeInfo Value;
- static const Key kNoKey;
- static const Value kNoValue;
- static int Compare(const Key& a, const Key& b) {
- return Key::Compare(a, b);
- }
-};
-typedef ZoneSplayTree<JSObjectsClusterTreeConfig> JSObjectsClusterTree;
-
-
-// ConstructorHeapProfile is responsible for gathering and logging
-// "constructor profile" of JS objects allocated on heap.
-// It is run during garbage collection cycle, thus it doesn't need
-// to use handles.
-class ConstructorHeapProfile BASE_EMBEDDED {
- public:
- ConstructorHeapProfile();
- virtual ~ConstructorHeapProfile() {}
- void CollectStats(HeapObject* obj);
- void PrintStats();
-
- template<class Callback>
- void ForEach(Callback* callback) { js_objects_info_tree_.ForEach(callback); }
- // Used by ZoneSplayTree::ForEach. Made virtual to allow overriding in tests.
- virtual void Call(const JSObjectsCluster& cluster,
- const NumberAndSizeInfo& number_and_size);
-
- private:
- ZoneScope zscope_;
- JSObjectsClusterTree js_objects_info_tree_;
-};
-
-
-// JSObjectsRetainerTree is used to represent retainer graphs using
-// adjacency list form:
-//
-// Cluster -> (Cluster -> NumberAndSizeInfo)
-//
-// Subordinate splay trees are stored by pointer. They are zone-allocated,
-// so it isn't needed to manage their lifetime.
-//
-struct JSObjectsRetainerTreeConfig {
- typedef JSObjectsCluster Key;
- typedef JSObjectsClusterTree* Value;
- static const Key kNoKey;
- static const Value kNoValue;
- static int Compare(const Key& a, const Key& b) {
- return Key::Compare(a, b);
- }
-};
-typedef ZoneSplayTree<JSObjectsRetainerTreeConfig> JSObjectsRetainerTree;
-
-
-class ClustersCoarser BASE_EMBEDDED {
- public:
- ClustersCoarser();
-
- // Processes a given retainer graph.
- void Process(JSObjectsRetainerTree* tree);
-
- // Returns an equivalent cluster (can be the cluster itself).
- // If the given cluster doesn't have an equivalent, returns null cluster.
- JSObjectsCluster GetCoarseEquivalent(const JSObjectsCluster& cluster);
- // Returns whether a cluster can be substitued with an equivalent and thus,
- // skipped in some cases.
- bool HasAnEquivalent(const JSObjectsCluster& cluster);
-
- // Used by JSObjectsRetainerTree::ForEach.
- void Call(const JSObjectsCluster& cluster, JSObjectsClusterTree* tree);
- void Call(const JSObjectsCluster& cluster,
- const NumberAndSizeInfo& number_and_size);
-
- private:
- // Stores a list of back references for a cluster.
- struct ClusterBackRefs {
- explicit ClusterBackRefs(const JSObjectsCluster& cluster_);
- ClusterBackRefs(const ClusterBackRefs& src);
- ClusterBackRefs& operator=(const ClusterBackRefs& src);
-
- static int Compare(const ClusterBackRefs& a, const ClusterBackRefs& b);
- void SortRefs() { refs.Sort(JSObjectsCluster::Compare); }
- static void SortRefsIterator(ClusterBackRefs* ref) { ref->SortRefs(); }
-
- JSObjectsCluster cluster;
- ZoneList<JSObjectsCluster> refs;
- };
- typedef ZoneList<ClusterBackRefs> SimilarityList;
-
- // A tree for storing a list of equivalents for a cluster.
- struct ClusterEqualityConfig {
- typedef JSObjectsCluster Key;
- typedef JSObjectsCluster Value;
- static const Key kNoKey;
- static const Value kNoValue;
- static int Compare(const Key& a, const Key& b) {
- return Key::Compare(a, b);
- }
- };
- typedef ZoneSplayTree<ClusterEqualityConfig> EqualityTree;
-
- static int ClusterBackRefsCmp(const ClusterBackRefs* a,
- const ClusterBackRefs* b) {
- return ClusterBackRefs::Compare(*a, *b);
- }
- int DoProcess(JSObjectsRetainerTree* tree);
- int FillEqualityTree();
-
- static const int kInitialBackrefsListCapacity = 2;
- static const int kInitialSimilarityListCapacity = 2000;
- // Number of passes for finding equivalents. Limits the length of paths
- // that can be considered equivalent.
- static const int kMaxPassesCount = 10;
-
- ZoneScope zscope_;
- SimilarityList sim_list_;
- EqualityTree eq_tree_;
- ClusterBackRefs* current_pair_;
- JSObjectsRetainerTree* current_set_;
- const JSObjectsCluster* self_;
-};
-
-
-// RetainerHeapProfile is responsible for gathering and logging
-// "retainer profile" of JS objects allocated on heap.
-// It is run during garbage collection cycle, thus it doesn't need
-// to use handles.
-class RetainerTreeAggregator;
-
-class RetainerHeapProfile BASE_EMBEDDED {
- public:
- class Printer {
- public:
- virtual ~Printer() {}
- virtual void PrintRetainers(const JSObjectsCluster& cluster,
- const StringStream& retainers) = 0;
- };
-
- RetainerHeapProfile();
- ~RetainerHeapProfile();
-
- RetainerTreeAggregator* aggregator() { return aggregator_; }
- ClustersCoarser* coarser() { return &coarser_; }
- JSObjectsRetainerTree* retainers_tree() { return &retainers_tree_; }
-
- void CollectStats(HeapObject* obj);
- void CoarseAndAggregate();
- void PrintStats();
- void DebugPrintStats(Printer* printer);
- void StoreReference(const JSObjectsCluster& cluster, HeapObject* ref);
-
- private:
- ZoneScope zscope_;
- JSObjectsRetainerTree retainers_tree_;
- ClustersCoarser coarser_;
- RetainerTreeAggregator* aggregator_;
-};
-
-
-class AggregatedHeapSnapshot {
- public:
- AggregatedHeapSnapshot();
- ~AggregatedHeapSnapshot();
-
- HistogramInfo* info() { return info_; }
- ConstructorHeapProfile* js_cons_profile() { return &js_cons_profile_; }
- RetainerHeapProfile* js_retainer_profile() { return &js_retainer_profile_; }
-
- private:
- HistogramInfo* info_;
- ConstructorHeapProfile js_cons_profile_;
- RetainerHeapProfile js_retainer_profile_;
-};
-
-
-class HeapEntriesMap;
-class HeapEntriesAllocator;
-
-class AggregatedHeapSnapshotGenerator {
- public:
- explicit AggregatedHeapSnapshotGenerator(AggregatedHeapSnapshot* snapshot);
- void GenerateSnapshot();
- void FillHeapSnapshot(HeapSnapshot* snapshot);
-
- static const int kAllStringsType = LAST_TYPE + 1;
-
- private:
- void CalculateStringsStats();
- void CollectStats(HeapObject* obj);
- template<class Iterator>
- void IterateRetainers(
- HeapEntriesAllocator* allocator, HeapEntriesMap* entries_map);
-
- AggregatedHeapSnapshot* agg_snapshot_;
-};
-
-
-class ProducerHeapProfile {
- public:
- void Setup();
- void RecordJSObjectAllocation(Object* obj) {
- if (FLAG_log_producers) DoRecordJSObjectAllocation(obj);
- }
-
- private:
- ProducerHeapProfile() : can_log_(false) { }
-
- void DoRecordJSObjectAllocation(Object* obj);
- Isolate* isolate_;
- bool can_log_;
-
- friend class Isolate;
-
- DISALLOW_COPY_AND_ASSIGN(ProducerHeapProfile);
-};
-
-#endif // ENABLE_LOGGING_AND_PROFILING
-
-} } // namespace v8::internal
-
-#endif // V8_HEAP_PROFILER_H_
diff --git a/src/3rdparty/v8/src/heap.cc b/src/3rdparty/v8/src/heap.cc
deleted file mode 100644
index 6250172..0000000
--- a/src/3rdparty/v8/src/heap.cc
+++ /dev/null
@@ -1,5856 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "accessors.h"
-#include "api.h"
-#include "bootstrapper.h"
-#include "codegen-inl.h"
-#include "compilation-cache.h"
-#include "debug.h"
-#include "heap-profiler.h"
-#include "global-handles.h"
-#include "liveobjectlist-inl.h"
-#include "mark-compact.h"
-#include "natives.h"
-#include "objects-visiting.h"
-#include "runtime-profiler.h"
-#include "scanner-base.h"
-#include "scopeinfo.h"
-#include "snapshot.h"
-#include "v8threads.h"
-#include "vm-state-inl.h"
-#if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
-#include "regexp-macro-assembler.h"
-#include "arm/regexp-macro-assembler-arm.h"
-#endif
-#if V8_TARGET_ARCH_MIPS && !V8_INTERPRETED_REGEXP
-#include "regexp-macro-assembler.h"
-#include "mips/regexp-macro-assembler-mips.h"
-#endif
-
-namespace v8 {
-namespace internal {
-
-
-static const intptr_t kMinimumPromotionLimit = 2 * MB;
-static const intptr_t kMinimumAllocationLimit = 8 * MB;
-
-
-static Mutex* gc_initializer_mutex = OS::CreateMutex();
-
-
-Heap::Heap()
- : isolate_(NULL),
-// semispace_size_ should be a power of 2 and old_generation_size_ should be
-// a multiple of Page::kPageSize.
-#if defined(ANDROID)
- reserved_semispace_size_(2*MB),
- max_semispace_size_(2*MB),
- initial_semispace_size_(128*KB),
- max_old_generation_size_(192*MB),
- max_executable_size_(max_old_generation_size_),
- code_range_size_(0),
-#elif defined(V8_TARGET_ARCH_X64)
- reserved_semispace_size_(16*MB),
- max_semispace_size_(16*MB),
- initial_semispace_size_(1*MB),
- max_old_generation_size_(1*GB),
- max_executable_size_(256*MB),
- code_range_size_(512*MB),
-#else
- reserved_semispace_size_(8*MB),
- max_semispace_size_(8*MB),
- initial_semispace_size_(512*KB),
- max_old_generation_size_(512*MB),
- max_executable_size_(128*MB),
- code_range_size_(0),
-#endif
-// Variables set based on semispace_size_ and old_generation_size_ in
-// ConfigureHeap (survived_since_last_expansion_, external_allocation_limit_)
-// Will be 4 * reserved_semispace_size_ to ensure that young
-// generation can be aligned to its size.
- survived_since_last_expansion_(0),
- always_allocate_scope_depth_(0),
- linear_allocation_scope_depth_(0),
- contexts_disposed_(0),
- new_space_(this),
- old_pointer_space_(NULL),
- old_data_space_(NULL),
- code_space_(NULL),
- map_space_(NULL),
- cell_space_(NULL),
- lo_space_(NULL),
- gc_state_(NOT_IN_GC),
- mc_count_(0),
- ms_count_(0),
- gc_count_(0),
- unflattened_strings_length_(0),
-#ifdef DEBUG
- allocation_allowed_(true),
- allocation_timeout_(0),
- disallow_allocation_failure_(false),
- debug_utils_(NULL),
-#endif // DEBUG
- old_gen_promotion_limit_(kMinimumPromotionLimit),
- old_gen_allocation_limit_(kMinimumAllocationLimit),
- external_allocation_limit_(0),
- amount_of_external_allocated_memory_(0),
- amount_of_external_allocated_memory_at_last_global_gc_(0),
- old_gen_exhausted_(false),
- hidden_symbol_(NULL),
- global_gc_prologue_callback_(NULL),
- global_gc_epilogue_callback_(NULL),
- gc_safe_size_of_old_object_(NULL),
- tracer_(NULL),
- young_survivors_after_last_gc_(0),
- high_survival_rate_period_length_(0),
- survival_rate_(0),
- previous_survival_rate_trend_(Heap::STABLE),
- survival_rate_trend_(Heap::STABLE),
- max_gc_pause_(0),
- max_alive_after_gc_(0),
- min_in_mutator_(kMaxInt),
- alive_after_last_gc_(0),
- last_gc_end_timestamp_(0.0),
- page_watermark_invalidated_mark_(1 << Page::WATERMARK_INVALIDATED),
- number_idle_notifications_(0),
- last_idle_notification_gc_count_(0),
- last_idle_notification_gc_count_init_(false),
- configured_(false),
- is_safe_to_read_maps_(true) {
- // Allow build-time customization of the max semispace size. Building
- // V8 with snapshots and a non-default max semispace size is much
- // easier if you can define it as part of the build environment.
-#if defined(V8_MAX_SEMISPACE_SIZE)
- max_semispace_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
-#endif
-
- memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
- global_contexts_list_ = NULL;
- mark_compact_collector_.heap_ = this;
- external_string_table_.heap_ = this;
-}
-
-
-intptr_t Heap::Capacity() {
- if (!HasBeenSetup()) return 0;
-
- return new_space_.Capacity() +
- old_pointer_space_->Capacity() +
- old_data_space_->Capacity() +
- code_space_->Capacity() +
- map_space_->Capacity() +
- cell_space_->Capacity();
-}
-
-
-intptr_t Heap::CommittedMemory() {
- if (!HasBeenSetup()) return 0;
-
- return new_space_.CommittedMemory() +
- old_pointer_space_->CommittedMemory() +
- old_data_space_->CommittedMemory() +
- code_space_->CommittedMemory() +
- map_space_->CommittedMemory() +
- cell_space_->CommittedMemory() +
- lo_space_->Size();
-}
-
-intptr_t Heap::CommittedMemoryExecutable() {
- if (!HasBeenSetup()) return 0;
-
- return isolate()->memory_allocator()->SizeExecutable();
-}
-
-
-intptr_t Heap::Available() {
- if (!HasBeenSetup()) return 0;
-
- return new_space_.Available() +
- old_pointer_space_->Available() +
- old_data_space_->Available() +
- code_space_->Available() +
- map_space_->Available() +
- cell_space_->Available();
-}
-
-
-bool Heap::HasBeenSetup() {
- return old_pointer_space_ != NULL &&
- old_data_space_ != NULL &&
- code_space_ != NULL &&
- map_space_ != NULL &&
- cell_space_ != NULL &&
- lo_space_ != NULL;
-}
-
-
-int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
- ASSERT(!HEAP->InNewSpace(object)); // Code only works for old objects.
- ASSERT(!HEAP->mark_compact_collector()->are_map_pointers_encoded());
- MapWord map_word = object->map_word();
- map_word.ClearMark();
- map_word.ClearOverflow();
- return object->SizeFromMap(map_word.ToMap());
-}
-
-
-int Heap::GcSafeSizeOfOldObjectWithEncodedMap(HeapObject* object) {
- ASSERT(!HEAP->InNewSpace(object)); // Code only works for old objects.
- ASSERT(HEAP->mark_compact_collector()->are_map_pointers_encoded());
- uint32_t marker = Memory::uint32_at(object->address());
- if (marker == MarkCompactCollector::kSingleFreeEncoding) {
- return kIntSize;
- } else if (marker == MarkCompactCollector::kMultiFreeEncoding) {
- return Memory::int_at(object->address() + kIntSize);
- } else {
- MapWord map_word = object->map_word();
- Address map_address = map_word.DecodeMapAddress(HEAP->map_space());
- Map* map = reinterpret_cast<Map*>(HeapObject::FromAddress(map_address));
- return object->SizeFromMap(map);
- }
-}
-
-
-GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space) {
- // Is global GC requested?
- if (space != NEW_SPACE || FLAG_gc_global) {
- isolate_->counters()->gc_compactor_caused_by_request()->Increment();
- return MARK_COMPACTOR;
- }
-
- // Is enough data promoted to justify a global GC?
- if (OldGenerationPromotionLimitReached()) {
- isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment();
- return MARK_COMPACTOR;
- }
-
- // Have allocation in OLD and LO failed?
- if (old_gen_exhausted_) {
- isolate_->counters()->
- gc_compactor_caused_by_oldspace_exhaustion()->Increment();
- return MARK_COMPACTOR;
- }
-
- // Is there enough space left in OLD to guarantee that a scavenge can
- // succeed?
- //
- // Note that MemoryAllocator->MaxAvailable() undercounts the memory available
- // for object promotion. It counts only the bytes that the memory
- // allocator has not yet allocated from the OS and assigned to any space,
- // and does not count available bytes already in the old space or code
- // space. Undercounting is safe---we may get an unrequested full GC when
- // a scavenge would have succeeded.
- if (isolate_->memory_allocator()->MaxAvailable() <= new_space_.Size()) {
- isolate_->counters()->
- gc_compactor_caused_by_oldspace_exhaustion()->Increment();
- return MARK_COMPACTOR;
- }
-
- // Default
- return SCAVENGER;
-}
-
-
-// TODO(1238405): Combine the infrastructure for --heap-stats and
-// --log-gc to avoid the complicated preprocessor and flag testing.
-#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
-void Heap::ReportStatisticsBeforeGC() {
- // Heap::ReportHeapStatistics will also log NewSpace statistics when
- // compiled with ENABLE_LOGGING_AND_PROFILING and --log-gc is set. The
- // following logic is used to avoid double logging.
-#if defined(DEBUG) && defined(ENABLE_LOGGING_AND_PROFILING)
- if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics();
- if (FLAG_heap_stats) {
- ReportHeapStatistics("Before GC");
- } else if (FLAG_log_gc) {
- new_space_.ReportStatistics();
- }
- if (FLAG_heap_stats || FLAG_log_gc) new_space_.ClearHistograms();
-#elif defined(DEBUG)
- if (FLAG_heap_stats) {
- new_space_.CollectStatistics();
- ReportHeapStatistics("Before GC");
- new_space_.ClearHistograms();
- }
-#elif defined(ENABLE_LOGGING_AND_PROFILING)
- if (FLAG_log_gc) {
- new_space_.CollectStatistics();
- new_space_.ReportStatistics();
- new_space_.ClearHistograms();
- }
-#endif
-}
-
-
-#if defined(ENABLE_LOGGING_AND_PROFILING)
-void Heap::PrintShortHeapStatistics() {
- if (!FLAG_trace_gc_verbose) return;
- PrintF("Memory allocator, used: %8" V8_PTR_PREFIX "d"
- ", available: %8" V8_PTR_PREFIX "d\n",
- isolate_->memory_allocator()->Size(),
- isolate_->memory_allocator()->Available());
- PrintF("New space, used: %8" V8_PTR_PREFIX "d"
- ", available: %8" V8_PTR_PREFIX "d\n",
- Heap::new_space_.Size(),
- new_space_.Available());
- PrintF("Old pointers, used: %8" V8_PTR_PREFIX "d"
- ", available: %8" V8_PTR_PREFIX "d"
- ", waste: %8" V8_PTR_PREFIX "d\n",
- old_pointer_space_->Size(),
- old_pointer_space_->Available(),
- old_pointer_space_->Waste());
- PrintF("Old data space, used: %8" V8_PTR_PREFIX "d"
- ", available: %8" V8_PTR_PREFIX "d"
- ", waste: %8" V8_PTR_PREFIX "d\n",
- old_data_space_->Size(),
- old_data_space_->Available(),
- old_data_space_->Waste());
- PrintF("Code space, used: %8" V8_PTR_PREFIX "d"
- ", available: %8" V8_PTR_PREFIX "d"
- ", waste: %8" V8_PTR_PREFIX "d\n",
- code_space_->Size(),
- code_space_->Available(),
- code_space_->Waste());
- PrintF("Map space, used: %8" V8_PTR_PREFIX "d"
- ", available: %8" V8_PTR_PREFIX "d"
- ", waste: %8" V8_PTR_PREFIX "d\n",
- map_space_->Size(),
- map_space_->Available(),
- map_space_->Waste());
- PrintF("Cell space, used: %8" V8_PTR_PREFIX "d"
- ", available: %8" V8_PTR_PREFIX "d"
- ", waste: %8" V8_PTR_PREFIX "d\n",
- cell_space_->Size(),
- cell_space_->Available(),
- cell_space_->Waste());
- PrintF("Large object space, used: %8" V8_PTR_PREFIX "d"
- ", available: %8" V8_PTR_PREFIX "d\n",
- lo_space_->Size(),
- lo_space_->Available());
-}
-#endif
-
-
-// TODO(1238405): Combine the infrastructure for --heap-stats and
-// --log-gc to avoid the complicated preprocessor and flag testing.
-void Heap::ReportStatisticsAfterGC() {
- // Similar to the before GC, we use some complicated logic to ensure that
- // NewSpace statistics are logged exactly once when --log-gc is turned on.
-#if defined(DEBUG) && defined(ENABLE_LOGGING_AND_PROFILING)
- if (FLAG_heap_stats) {
- new_space_.CollectStatistics();
- ReportHeapStatistics("After GC");
- } else if (FLAG_log_gc) {
- new_space_.ReportStatistics();
- }
-#elif defined(DEBUG)
- if (FLAG_heap_stats) ReportHeapStatistics("After GC");
-#elif defined(ENABLE_LOGGING_AND_PROFILING)
- if (FLAG_log_gc) new_space_.ReportStatistics();
-#endif
-}
-#endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
-
-
-void Heap::GarbageCollectionPrologue() {
- isolate_->transcendental_cache()->Clear();
- ClearJSFunctionResultCaches();
- gc_count_++;
- unflattened_strings_length_ = 0;
-#ifdef DEBUG
- ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
- allow_allocation(false);
-
- if (FLAG_verify_heap) {
- Verify();
- }
-
- if (FLAG_gc_verbose) Print();
-#endif
-
-#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
- ReportStatisticsBeforeGC();
-#endif
-
- LiveObjectList::GCPrologue();
-}
-
-intptr_t Heap::SizeOfObjects() {
- intptr_t total = 0;
- AllSpaces spaces;
- for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
- total += space->SizeOfObjects();
- }
- return total;
-}
-
-void Heap::GarbageCollectionEpilogue() {
- LiveObjectList::GCEpilogue();
-#ifdef DEBUG
- allow_allocation(true);
- ZapFromSpace();
-
- if (FLAG_verify_heap) {
- Verify();
- }
-
- if (FLAG_print_global_handles) isolate_->global_handles()->Print();
- if (FLAG_print_handles) PrintHandles();
- if (FLAG_gc_verbose) Print();
- if (FLAG_code_stats) ReportCodeStatistics("After GC");
-#endif
-
- isolate_->counters()->alive_after_last_gc()->Set(
- static_cast<int>(SizeOfObjects()));
-
- isolate_->counters()->symbol_table_capacity()->Set(
- symbol_table()->Capacity());
- isolate_->counters()->number_of_symbols()->Set(
- symbol_table()->NumberOfElements());
-#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
- ReportStatisticsAfterGC();
-#endif
-#ifdef ENABLE_DEBUGGER_SUPPORT
- isolate_->debug()->AfterGarbageCollection();
-#endif
-}
-
-
-void Heap::CollectAllGarbage(bool force_compaction) {
- // Since we are ignoring the return value, the exact choice of space does
- // not matter, so long as we do not specify NEW_SPACE, which would not
- // cause a full GC.
- mark_compact_collector_.SetForceCompaction(force_compaction);
- CollectGarbage(OLD_POINTER_SPACE);
- mark_compact_collector_.SetForceCompaction(false);
-}
-
-
-void Heap::CollectAllAvailableGarbage() {
- // Since we are ignoring the return value, the exact choice of space does
- // not matter, so long as we do not specify NEW_SPACE, which would not
- // cause a full GC.
- mark_compact_collector()->SetForceCompaction(true);
-
- // Major GC would invoke weak handle callbacks on weakly reachable
- // handles, but won't collect weakly reachable objects until next
- // major GC. Therefore if we collect aggressively and weak handle callback
- // has been invoked, we rerun major GC to release objects which become
- // garbage.
- // Note: as weak callbacks can execute arbitrary code, we cannot
- // hope that eventually there will be no weak callbacks invocations.
- // Therefore stop recollecting after several attempts.
- const int kMaxNumberOfAttempts = 7;
- for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
- if (!CollectGarbage(OLD_POINTER_SPACE, MARK_COMPACTOR)) {
- break;
- }
- }
- mark_compact_collector()->SetForceCompaction(false);
-}
-
-
-bool Heap::CollectGarbage(AllocationSpace space, GarbageCollector collector) {
- // The VM is in the GC state until exiting this function.
- VMState state(isolate_, GC);
-
-#ifdef DEBUG
- // Reset the allocation timeout to the GC interval, but make sure to
- // allow at least a few allocations after a collection. The reason
- // for this is that we have a lot of allocation sequences and we
- // assume that a garbage collection will allow the subsequent
- // allocation attempts to go through.
- allocation_timeout_ = Max(6, FLAG_gc_interval);
-#endif
-
- bool next_gc_likely_to_collect_more = false;
-
- { GCTracer tracer(this);
- GarbageCollectionPrologue();
- // The GC count was incremented in the prologue. Tell the tracer about
- // it.
- tracer.set_gc_count(gc_count_);
-
- // Tell the tracer which collector we've selected.
- tracer.set_collector(collector);
-
- HistogramTimer* rate = (collector == SCAVENGER)
- ? isolate_->counters()->gc_scavenger()
- : isolate_->counters()->gc_compactor();
- rate->Start();
- next_gc_likely_to_collect_more =
- PerformGarbageCollection(collector, &tracer);
- rate->Stop();
-
- GarbageCollectionEpilogue();
- }
-
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
- if (FLAG_log_gc) HeapProfiler::WriteSample();
-#endif
-
- return next_gc_likely_to_collect_more;
-}
-
-
-void Heap::PerformScavenge() {
- GCTracer tracer(this);
- PerformGarbageCollection(SCAVENGER, &tracer);
-}
-
-
-#ifdef DEBUG
-// Helper class for verifying the symbol table.
-class SymbolTableVerifier : public ObjectVisitor {
- public:
- void VisitPointers(Object** start, Object** end) {
- // Visit all HeapObject pointers in [start, end).
- for (Object** p = start; p < end; p++) {
- if ((*p)->IsHeapObject()) {
- // Check that the symbol is actually a symbol.
- ASSERT((*p)->IsNull() || (*p)->IsUndefined() || (*p)->IsSymbol());
- }
- }
- }
-};
-#endif // DEBUG
-
-
-static void VerifySymbolTable() {
-#ifdef DEBUG
- SymbolTableVerifier verifier;
- HEAP->symbol_table()->IterateElements(&verifier);
-#endif // DEBUG
-}
-
-
-void Heap::ReserveSpace(
- int new_space_size,
- int pointer_space_size,
- int data_space_size,
- int code_space_size,
- int map_space_size,
- int cell_space_size,
- int large_object_size) {
- NewSpace* new_space = Heap::new_space();
- PagedSpace* old_pointer_space = Heap::old_pointer_space();
- PagedSpace* old_data_space = Heap::old_data_space();
- PagedSpace* code_space = Heap::code_space();
- PagedSpace* map_space = Heap::map_space();
- PagedSpace* cell_space = Heap::cell_space();
- LargeObjectSpace* lo_space = Heap::lo_space();
- bool gc_performed = true;
- while (gc_performed) {
- gc_performed = false;
- if (!new_space->ReserveSpace(new_space_size)) {
- Heap::CollectGarbage(NEW_SPACE);
- gc_performed = true;
- }
- if (!old_pointer_space->ReserveSpace(pointer_space_size)) {
- Heap::CollectGarbage(OLD_POINTER_SPACE);
- gc_performed = true;
- }
- if (!(old_data_space->ReserveSpace(data_space_size))) {
- Heap::CollectGarbage(OLD_DATA_SPACE);
- gc_performed = true;
- }
- if (!(code_space->ReserveSpace(code_space_size))) {
- Heap::CollectGarbage(CODE_SPACE);
- gc_performed = true;
- }
- if (!(map_space->ReserveSpace(map_space_size))) {
- Heap::CollectGarbage(MAP_SPACE);
- gc_performed = true;
- }
- if (!(cell_space->ReserveSpace(cell_space_size))) {
- Heap::CollectGarbage(CELL_SPACE);
- gc_performed = true;
- }
- // We add a slack-factor of 2 in order to have space for a series of
- // large-object allocations that are only just larger than the page size.
- large_object_size *= 2;
- // The ReserveSpace method on the large object space checks how much
- // we can expand the old generation. This includes expansion caused by
- // allocation in the other spaces.
- large_object_size += cell_space_size + map_space_size + code_space_size +
- data_space_size + pointer_space_size;
- if (!(lo_space->ReserveSpace(large_object_size))) {
- Heap::CollectGarbage(LO_SPACE);
- gc_performed = true;
- }
- }
-}
-
-
-void Heap::EnsureFromSpaceIsCommitted() {
- if (new_space_.CommitFromSpaceIfNeeded()) return;
-
- // Committing memory to from space failed.
- // Try shrinking and try again.
- PagedSpaces spaces;
- for (PagedSpace* space = spaces.next();
- space != NULL;
- space = spaces.next()) {
- space->RelinkPageListInChunkOrder(true);
- }
-
- Shrink();
- if (new_space_.CommitFromSpaceIfNeeded()) return;
-
- // Committing memory to from space failed again.
- // Memory is exhausted and we will die.
- V8::FatalProcessOutOfMemory("Committing semi space failed.");
-}
-
-
-void Heap::ClearJSFunctionResultCaches() {
- if (isolate_->bootstrapper()->IsActive()) return;
-
- Object* context = global_contexts_list_;
- while (!context->IsUndefined()) {
- // Get the caches for this context:
- FixedArray* caches =
- Context::cast(context)->jsfunction_result_caches();
- // Clear the caches:
- int length = caches->length();
- for (int i = 0; i < length; i++) {
- JSFunctionResultCache::cast(caches->get(i))->Clear();
- }
- // Get the next context:
- context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
- }
-}
-
-
-
-void Heap::ClearNormalizedMapCaches() {
- if (isolate_->bootstrapper()->IsActive()) return;
-
- Object* context = global_contexts_list_;
- while (!context->IsUndefined()) {
- Context::cast(context)->normalized_map_cache()->Clear();
- context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
- }
-}
-
-
-#ifdef DEBUG
-
-enum PageWatermarkValidity {
- ALL_VALID,
- ALL_INVALID
-};
-
-static void VerifyPageWatermarkValidity(PagedSpace* space,
- PageWatermarkValidity validity) {
- PageIterator it(space, PageIterator::PAGES_IN_USE);
- bool expected_value = (validity == ALL_VALID);
- while (it.has_next()) {
- Page* page = it.next();
- ASSERT(page->IsWatermarkValid() == expected_value);
- }
-}
-#endif
-
-void Heap::UpdateSurvivalRateTrend(int start_new_space_size) {
- double survival_rate =
- (static_cast<double>(young_survivors_after_last_gc_) * 100) /
- start_new_space_size;
-
- if (survival_rate > kYoungSurvivalRateThreshold) {
- high_survival_rate_period_length_++;
- } else {
- high_survival_rate_period_length_ = 0;
- }
-
- double survival_rate_diff = survival_rate_ - survival_rate;
-
- if (survival_rate_diff > kYoungSurvivalRateAllowedDeviation) {
- set_survival_rate_trend(DECREASING);
- } else if (survival_rate_diff < -kYoungSurvivalRateAllowedDeviation) {
- set_survival_rate_trend(INCREASING);
- } else {
- set_survival_rate_trend(STABLE);
- }
-
- survival_rate_ = survival_rate;
-}
-
-bool Heap::PerformGarbageCollection(GarbageCollector collector,
- GCTracer* tracer) {
- bool next_gc_likely_to_collect_more = false;
-
- if (collector != SCAVENGER) {
- PROFILE(isolate_, CodeMovingGCEvent());
- }
-
- VerifySymbolTable();
- if (collector == MARK_COMPACTOR && global_gc_prologue_callback_) {
- ASSERT(!allocation_allowed_);
- GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
- global_gc_prologue_callback_();
- }
-
- GCType gc_type =
- collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
-
- for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
- if (gc_type & gc_prologue_callbacks_[i].gc_type) {
- gc_prologue_callbacks_[i].callback(gc_type, kNoGCCallbackFlags);
- }
- }
-
- EnsureFromSpaceIsCommitted();
-
- int start_new_space_size = Heap::new_space()->SizeAsInt();
-
- if (collector == MARK_COMPACTOR) {
- // Perform mark-sweep with optional compaction.
- MarkCompact(tracer);
-
- bool high_survival_rate_during_scavenges = IsHighSurvivalRate() &&
- IsStableOrIncreasingSurvivalTrend();
-
- UpdateSurvivalRateTrend(start_new_space_size);
-
- intptr_t old_gen_size = PromotedSpaceSize();
- old_gen_promotion_limit_ =
- old_gen_size + Max(kMinimumPromotionLimit, old_gen_size / 3);
- old_gen_allocation_limit_ =
- old_gen_size + Max(kMinimumAllocationLimit, old_gen_size / 2);
-
- if (high_survival_rate_during_scavenges &&
- IsStableOrIncreasingSurvivalTrend()) {
- // Stable high survival rates of young objects both during partial and
- // full collection indicate that mutator is either building or modifying
- // a structure with a long lifetime.
- // In this case we aggressively raise old generation memory limits to
- // postpone subsequent mark-sweep collection and thus trade memory
- // space for the mutation speed.
- old_gen_promotion_limit_ *= 2;
- old_gen_allocation_limit_ *= 2;
- }
-
- old_gen_exhausted_ = false;
- } else {
- tracer_ = tracer;
- Scavenge();
- tracer_ = NULL;
-
- UpdateSurvivalRateTrend(start_new_space_size);
- }
-
- isolate_->counters()->objs_since_last_young()->Set(0);
-
- if (collector == MARK_COMPACTOR) {
- DisableAssertNoAllocation allow_allocation;
- GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
- next_gc_likely_to_collect_more =
- isolate_->global_handles()->PostGarbageCollectionProcessing();
- }
-
- // Update relocatables.
- Relocatable::PostGarbageCollectionProcessing();
-
- if (collector == MARK_COMPACTOR) {
- // Register the amount of external allocated memory.
- amount_of_external_allocated_memory_at_last_global_gc_ =
- amount_of_external_allocated_memory_;
- }
-
- GCCallbackFlags callback_flags = tracer->is_compacting()
- ? kGCCallbackFlagCompacted
- : kNoGCCallbackFlags;
- for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
- if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
- gc_epilogue_callbacks_[i].callback(gc_type, callback_flags);
- }
- }
-
- if (collector == MARK_COMPACTOR && global_gc_epilogue_callback_) {
- ASSERT(!allocation_allowed_);
- GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
- global_gc_epilogue_callback_();
- }
- VerifySymbolTable();
-
- return next_gc_likely_to_collect_more;
-}
-
-
-void Heap::MarkCompact(GCTracer* tracer) {
- gc_state_ = MARK_COMPACT;
- LOG(isolate_, ResourceEvent("markcompact", "begin"));
-
- mark_compact_collector_.Prepare(tracer);
-
- bool is_compacting = mark_compact_collector_.IsCompacting();
-
- if (is_compacting) {
- mc_count_++;
- } else {
- ms_count_++;
- }
- tracer->set_full_gc_count(mc_count_ + ms_count_);
-
- MarkCompactPrologue(is_compacting);
-
- is_safe_to_read_maps_ = false;
- mark_compact_collector_.CollectGarbage();
- is_safe_to_read_maps_ = true;
-
- LOG(isolate_, ResourceEvent("markcompact", "end"));
-
- gc_state_ = NOT_IN_GC;
-
- Shrink();
-
- isolate_->counters()->objs_since_last_full()->Set(0);
-
- contexts_disposed_ = 0;
-}
-
-
-void Heap::MarkCompactPrologue(bool is_compacting) {
- // At any old GC clear the keyed lookup cache to enable collection of unused
- // maps.
- isolate_->keyed_lookup_cache()->Clear();
- isolate_->context_slot_cache()->Clear();
- isolate_->descriptor_lookup_cache()->Clear();
-
- isolate_->compilation_cache()->MarkCompactPrologue();
-
- CompletelyClearInstanceofCache();
-
- if (is_compacting) FlushNumberStringCache();
-
- ClearNormalizedMapCaches();
-}
-
-
-Object* Heap::FindCodeObject(Address a) {
- Object* obj = NULL; // Initialization to please compiler.
- { MaybeObject* maybe_obj = code_space_->FindObject(a);
- if (!maybe_obj->ToObject(&obj)) {
- obj = lo_space_->FindObject(a)->ToObjectUnchecked();
- }
- }
- return obj;
-}
-
-
-// Helper class for copying HeapObjects
-class ScavengeVisitor: public ObjectVisitor {
- public:
- explicit ScavengeVisitor(Heap* heap) : heap_(heap) {}
-
- void VisitPointer(Object** p) { ScavengePointer(p); }
-
- void VisitPointers(Object** start, Object** end) {
- // Copy all HeapObject pointers in [start, end)
- for (Object** p = start; p < end; p++) ScavengePointer(p);
- }
-
- private:
- void ScavengePointer(Object** p) {
- Object* object = *p;
- if (!heap_->InNewSpace(object)) return;
- Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
- reinterpret_cast<HeapObject*>(object));
- }
-
- Heap* heap_;
-};
-
-
-#ifdef DEBUG
-// Visitor class to verify pointers in code or data space do not point into
-// new space.
-class VerifyNonPointerSpacePointersVisitor: public ObjectVisitor {
- public:
- void VisitPointers(Object** start, Object**end) {
- for (Object** current = start; current < end; current++) {
- if ((*current)->IsHeapObject()) {
- ASSERT(!HEAP->InNewSpace(HeapObject::cast(*current)));
- }
- }
- }
-};
-
-
-static void VerifyNonPointerSpacePointers() {
- // Verify that there are no pointers to new space in spaces where we
- // do not expect them.
- VerifyNonPointerSpacePointersVisitor v;
- HeapObjectIterator code_it(HEAP->code_space());
- for (HeapObject* object = code_it.next();
- object != NULL; object = code_it.next())
- object->Iterate(&v);
-
- HeapObjectIterator data_it(HEAP->old_data_space());
- for (HeapObject* object = data_it.next();
- object != NULL; object = data_it.next())
- object->Iterate(&v);
-}
-#endif
-
-
-void Heap::CheckNewSpaceExpansionCriteria() {
- if (new_space_.Capacity() < new_space_.MaximumCapacity() &&
- survived_since_last_expansion_ > new_space_.Capacity()) {
- // Grow the size of new space if there is room to grow and enough
- // data has survived scavenge since the last expansion.
- new_space_.Grow();
- survived_since_last_expansion_ = 0;
- }
-}
-
-
-void Heap::Scavenge() {
-#ifdef DEBUG
- if (FLAG_enable_slow_asserts) VerifyNonPointerSpacePointers();
-#endif
-
- gc_state_ = SCAVENGE;
-
- SwitchScavengingVisitorsTableIfProfilingWasEnabled();
-
- Page::FlipMeaningOfInvalidatedWatermarkFlag(this);
-#ifdef DEBUG
- VerifyPageWatermarkValidity(old_pointer_space_, ALL_VALID);
- VerifyPageWatermarkValidity(map_space_, ALL_VALID);
-#endif
-
- // We do not update an allocation watermark of the top page during linear
- // allocation to avoid overhead. So to maintain the watermark invariant
- // we have to manually cache the watermark and mark the top page as having an
- // invalid watermark. This guarantees that dirty regions iteration will use a
- // correct watermark even if a linear allocation happens.
- old_pointer_space_->FlushTopPageWatermark();
- map_space_->FlushTopPageWatermark();
-
- // Implements Cheney's copying algorithm
- LOG(isolate_, ResourceEvent("scavenge", "begin"));
-
- // Clear descriptor cache.
- isolate_->descriptor_lookup_cache()->Clear();
-
- // Used for updating survived_since_last_expansion_ at function end.
- intptr_t survived_watermark = PromotedSpaceSize();
-
- CheckNewSpaceExpansionCriteria();
-
- // Flip the semispaces. After flipping, to space is empty, from space has
- // live objects.
- new_space_.Flip();
- new_space_.ResetAllocationInfo();
-
- // We need to sweep newly copied objects which can be either in the
- // to space or promoted to the old generation. For to-space
- // objects, we treat the bottom of the to space as a queue. Newly
- // copied and unswept objects lie between a 'front' mark and the
- // allocation pointer.
- //
- // Promoted objects can go into various old-generation spaces, and
- // can be allocated internally in the spaces (from the free list).
- // We treat the top of the to space as a queue of addresses of
- // promoted objects. The addresses of newly promoted and unswept
- // objects lie between a 'front' mark and a 'rear' mark that is
- // updated as a side effect of promoting an object.
- //
- // There is guaranteed to be enough room at the top of the to space
- // for the addresses of promoted objects: every object promoted
- // frees up its size in bytes from the top of the new space, and
- // objects are at least one pointer in size.
- Address new_space_front = new_space_.ToSpaceLow();
- promotion_queue_.Initialize(new_space_.ToSpaceHigh());
-
- is_safe_to_read_maps_ = false;
- ScavengeVisitor scavenge_visitor(this);
- // Copy roots.
- IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
-
- // Copy objects reachable from the old generation. By definition,
- // there are no intergenerational pointers in code or data spaces.
- IterateDirtyRegions(old_pointer_space_,
- &Heap::IteratePointersInDirtyRegion,
- &ScavengePointer,
- WATERMARK_CAN_BE_INVALID);
-
- IterateDirtyRegions(map_space_,
- &IteratePointersInDirtyMapsRegion,
- &ScavengePointer,
- WATERMARK_CAN_BE_INVALID);
-
- lo_space_->IterateDirtyRegions(&ScavengePointer);
-
- // Copy objects reachable from cells by scavenging cell values directly.
- HeapObjectIterator cell_iterator(cell_space_);
- for (HeapObject* cell = cell_iterator.next();
- cell != NULL; cell = cell_iterator.next()) {
- if (cell->IsJSGlobalPropertyCell()) {
- Address value_address =
- reinterpret_cast<Address>(cell) +
- (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag);
- scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
- }
- }
-
- // Scavenge object reachable from the global contexts list directly.
- scavenge_visitor.VisitPointer(BitCast<Object**>(&global_contexts_list_));
-
- new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
-
- UpdateNewSpaceReferencesInExternalStringTable(
- &UpdateNewSpaceReferenceInExternalStringTableEntry);
-
- LiveObjectList::UpdateReferencesForScavengeGC();
- isolate()->runtime_profiler()->UpdateSamplesAfterScavenge();
-
- ASSERT(new_space_front == new_space_.top());
-
- is_safe_to_read_maps_ = true;
-
- // Set age mark.
- new_space_.set_age_mark(new_space_.top());
-
- // Update how much has survived scavenge.
- IncrementYoungSurvivorsCounter(static_cast<int>(
- (PromotedSpaceSize() - survived_watermark) + new_space_.Size()));
-
- LOG(isolate_, ResourceEvent("scavenge", "end"));
-
- gc_state_ = NOT_IN_GC;
-}
-
-
-String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
- Object** p) {
- MapWord first_word = HeapObject::cast(*p)->map_word();
-
- if (!first_word.IsForwardingAddress()) {
- // Unreachable external string can be finalized.
- heap->FinalizeExternalString(String::cast(*p));
- return NULL;
- }
-
- // String is still reachable.
- return String::cast(first_word.ToForwardingAddress());
-}
-
-
-void Heap::UpdateNewSpaceReferencesInExternalStringTable(
- ExternalStringTableUpdaterCallback updater_func) {
- external_string_table_.Verify();
-
- if (external_string_table_.new_space_strings_.is_empty()) return;
-
- Object** start = &external_string_table_.new_space_strings_[0];
- Object** end = start + external_string_table_.new_space_strings_.length();
- Object** last = start;
-
- for (Object** p = start; p < end; ++p) {
- ASSERT(InFromSpace(*p));
- String* target = updater_func(this, p);
-
- if (target == NULL) continue;
-
- ASSERT(target->IsExternalString());
-
- if (InNewSpace(target)) {
- // String is still in new space. Update the table entry.
- *last = target;
- ++last;
- } else {
- // String got promoted. Move it to the old string list.
- external_string_table_.AddOldString(target);
- }
- }
-
- ASSERT(last <= end);
- external_string_table_.ShrinkNewStrings(static_cast<int>(last - start));
-}
-
-
-static Object* ProcessFunctionWeakReferences(Heap* heap,
- Object* function,
- WeakObjectRetainer* retainer) {
- Object* head = heap->undefined_value();
- JSFunction* tail = NULL;
- Object* candidate = function;
- while (candidate != heap->undefined_value()) {
- // Check whether to keep the candidate in the list.
- JSFunction* candidate_function = reinterpret_cast<JSFunction*>(candidate);
- Object* retain = retainer->RetainAs(candidate);
- if (retain != NULL) {
- if (head == heap->undefined_value()) {
- // First element in the list.
- head = candidate_function;
- } else {
- // Subsequent elements in the list.
- ASSERT(tail != NULL);
- tail->set_next_function_link(candidate_function);
- }
- // Retained function is new tail.
- tail = candidate_function;
- }
- // Move to next element in the list.
- candidate = candidate_function->next_function_link();
- }
-
- // Terminate the list if there is one or more elements.
- if (tail != NULL) {
- tail->set_next_function_link(heap->undefined_value());
- }
-
- return head;
-}
-
-
-void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
- Object* head = undefined_value();
- Context* tail = NULL;
- Object* candidate = global_contexts_list_;
- while (candidate != undefined_value()) {
- // Check whether to keep the candidate in the list.
- Context* candidate_context = reinterpret_cast<Context*>(candidate);
- Object* retain = retainer->RetainAs(candidate);
- if (retain != NULL) {
- if (head == undefined_value()) {
- // First element in the list.
- head = candidate_context;
- } else {
- // Subsequent elements in the list.
- ASSERT(tail != NULL);
- tail->set_unchecked(this,
- Context::NEXT_CONTEXT_LINK,
- candidate_context,
- UPDATE_WRITE_BARRIER);
- }
- // Retained context is new tail.
- tail = candidate_context;
-
- // Process the weak list of optimized functions for the context.
- Object* function_list_head =
- ProcessFunctionWeakReferences(
- this,
- candidate_context->get(Context::OPTIMIZED_FUNCTIONS_LIST),
- retainer);
- candidate_context->set_unchecked(this,
- Context::OPTIMIZED_FUNCTIONS_LIST,
- function_list_head,
- UPDATE_WRITE_BARRIER);
- }
- // Move to next element in the list.
- candidate = candidate_context->get(Context::NEXT_CONTEXT_LINK);
- }
-
- // Terminate the list if there is one or more elements.
- if (tail != NULL) {
- tail->set_unchecked(this,
- Context::NEXT_CONTEXT_LINK,
- Heap::undefined_value(),
- UPDATE_WRITE_BARRIER);
- }
-
- // Update the head of the list of contexts.
- global_contexts_list_ = head;
-}
-
-
-class NewSpaceScavenger : public StaticNewSpaceVisitor<NewSpaceScavenger> {
- public:
- static inline void VisitPointer(Heap* heap, Object** p) {
- Object* object = *p;
- if (!heap->InNewSpace(object)) return;
- Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
- reinterpret_cast<HeapObject*>(object));
- }
-};
-
-
-Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
- Address new_space_front) {
- do {
- ASSERT(new_space_front <= new_space_.top());
-
- // The addresses new_space_front and new_space_.top() define a
- // queue of unprocessed copied objects. Process them until the
- // queue is empty.
- while (new_space_front < new_space_.top()) {
- HeapObject* object = HeapObject::FromAddress(new_space_front);
- new_space_front += NewSpaceScavenger::IterateBody(object->map(), object);
- }
-
- // Promote and process all the to-be-promoted objects.
- while (!promotion_queue_.is_empty()) {
- HeapObject* target;
- int size;
- promotion_queue_.remove(&target, &size);
-
- // Promoted object might be already partially visited
- // during dirty regions iteration. Thus we search specificly
- // for pointers to from semispace instead of looking for pointers
- // to new space.
- ASSERT(!target->IsMap());
- IterateAndMarkPointersToFromSpace(target->address(),
- target->address() + size,
- &ScavengePointer);
- }
-
- // Take another spin if there are now unswept objects in new space
- // (there are currently no more unswept promoted objects).
- } while (new_space_front < new_space_.top());
-
- return new_space_front;
-}
-
-
-enum LoggingAndProfiling {
- LOGGING_AND_PROFILING_ENABLED,
- LOGGING_AND_PROFILING_DISABLED
-};
-
-
-typedef void (*ScavengingCallback)(Map* map,
- HeapObject** slot,
- HeapObject* object);
-
-
-static Atomic32 scavenging_visitors_table_mode_;
-static VisitorDispatchTable<ScavengingCallback> scavenging_visitors_table_;
-
-
-INLINE(static void DoScavengeObject(Map* map,
- HeapObject** slot,
- HeapObject* obj));
-
-
-void DoScavengeObject(Map* map, HeapObject** slot, HeapObject* obj) {
- scavenging_visitors_table_.GetVisitor(map)(map, slot, obj);
-}
-
-
-template<LoggingAndProfiling logging_and_profiling_mode>
-class ScavengingVisitor : public StaticVisitorBase {
- public:
- static void Initialize() {
- table_.Register(kVisitSeqAsciiString, &EvacuateSeqAsciiString);
- table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString);
- table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
- table_.Register(kVisitByteArray, &EvacuateByteArray);
- table_.Register(kVisitFixedArray, &EvacuateFixedArray);
-
- table_.Register(kVisitGlobalContext,
- &ObjectEvacuationStrategy<POINTER_OBJECT>::
- template VisitSpecialized<Context::kSize>);
-
- table_.Register(kVisitConsString,
- &ObjectEvacuationStrategy<POINTER_OBJECT>::
- template VisitSpecialized<ConsString::kSize>);
-
- table_.Register(kVisitSharedFunctionInfo,
- &ObjectEvacuationStrategy<POINTER_OBJECT>::
- template VisitSpecialized<SharedFunctionInfo::kSize>);
-
- table_.Register(kVisitJSFunction,
- &ObjectEvacuationStrategy<POINTER_OBJECT>::
- template VisitSpecialized<JSFunction::kSize>);
-
- table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
- kVisitDataObject,
- kVisitDataObjectGeneric>();
-
- table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
- kVisitJSObject,
- kVisitJSObjectGeneric>();
-
- table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
- kVisitStruct,
- kVisitStructGeneric>();
- }
-
- static VisitorDispatchTable<ScavengingCallback>* GetTable() {
- return &table_;
- }
-
- private:
- enum ObjectContents { DATA_OBJECT, POINTER_OBJECT };
- enum SizeRestriction { SMALL, UNKNOWN_SIZE };
-
-#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
- static void RecordCopiedObject(Heap* heap, HeapObject* obj) {
- bool should_record = false;
-#ifdef DEBUG
- should_record = FLAG_heap_stats;
-#endif
-#ifdef ENABLE_LOGGING_AND_PROFILING
- should_record = should_record || FLAG_log_gc;
-#endif
- if (should_record) {
- if (heap->new_space()->Contains(obj)) {
- heap->new_space()->RecordAllocation(obj);
- } else {
- heap->new_space()->RecordPromotion(obj);
- }
- }
- }
-#endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
-
- // Helper function used by CopyObject to copy a source object to an
- // allocated target object and update the forwarding pointer in the source
- // object. Returns the target object.
- INLINE(static HeapObject* MigrateObject(Heap* heap,
- HeapObject* source,
- HeapObject* target,
- int size)) {
- // Copy the content of source to target.
- heap->CopyBlock(target->address(), source->address(), size);
-
- // Set the forwarding address.
- source->set_map_word(MapWord::FromForwardingAddress(target));
-
- if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) {
-#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
- // Update NewSpace stats if necessary.
- RecordCopiedObject(heap, target);
-#endif
- HEAP_PROFILE(heap, ObjectMoveEvent(source->address(), target->address()));
-#if defined(ENABLE_LOGGING_AND_PROFILING)
- Isolate* isolate = heap->isolate();
- if (isolate->logger()->is_logging() ||
- isolate->cpu_profiler()->is_profiling()) {
- if (target->IsSharedFunctionInfo()) {
- PROFILE(isolate, SharedFunctionInfoMoveEvent(
- source->address(), target->address()));
- }
- }
-#endif
- }
-
- return target;
- }
-
-
- template<ObjectContents object_contents, SizeRestriction size_restriction>
- static inline void EvacuateObject(Map* map,
- HeapObject** slot,
- HeapObject* object,
- int object_size) {
- ASSERT((size_restriction != SMALL) ||
- (object_size <= Page::kMaxHeapObjectSize));
- ASSERT(object->Size() == object_size);
-
- Heap* heap = map->heap();
- if (heap->ShouldBePromoted(object->address(), object_size)) {
- MaybeObject* maybe_result;
-
- if ((size_restriction != SMALL) &&
- (object_size > Page::kMaxHeapObjectSize)) {
- maybe_result = heap->lo_space()->AllocateRawFixedArray(object_size);
- } else {
- if (object_contents == DATA_OBJECT) {
- maybe_result = heap->old_data_space()->AllocateRaw(object_size);
- } else {
- maybe_result = heap->old_pointer_space()->AllocateRaw(object_size);
- }
- }
-
- Object* result = NULL; // Initialization to please compiler.
- if (maybe_result->ToObject(&result)) {
- HeapObject* target = HeapObject::cast(result);
- *slot = MigrateObject(heap, object , target, object_size);
-
- if (object_contents == POINTER_OBJECT) {
- heap->promotion_queue()->insert(target, object_size);
- }
-
- heap->tracer()->increment_promoted_objects_size(object_size);
- return;
- }
- }
- Object* result =
- heap->new_space()->AllocateRaw(object_size)->ToObjectUnchecked();
- *slot = MigrateObject(heap, object, HeapObject::cast(result), object_size);
- return;
- }
-
-
- static inline void EvacuateFixedArray(Map* map,
- HeapObject** slot,
- HeapObject* object) {
- int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
- EvacuateObject<POINTER_OBJECT, UNKNOWN_SIZE>(map,
- slot,
- object,
- object_size);
- }
-
-
- static inline void EvacuateByteArray(Map* map,
- HeapObject** slot,
- HeapObject* object) {
- int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize();
- EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
- }
-
-
- static inline void EvacuateSeqAsciiString(Map* map,
- HeapObject** slot,
- HeapObject* object) {
- int object_size = SeqAsciiString::cast(object)->
- SeqAsciiStringSize(map->instance_type());
- EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
- }
-
-
- static inline void EvacuateSeqTwoByteString(Map* map,
- HeapObject** slot,
- HeapObject* object) {
- int object_size = SeqTwoByteString::cast(object)->
- SeqTwoByteStringSize(map->instance_type());
- EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
- }
-
-
- static inline bool IsShortcutCandidate(int type) {
- return ((type & kShortcutTypeMask) == kShortcutTypeTag);
- }
-
- static inline void EvacuateShortcutCandidate(Map* map,
- HeapObject** slot,
- HeapObject* object) {
- ASSERT(IsShortcutCandidate(map->instance_type()));
-
- if (ConsString::cast(object)->unchecked_second() ==
- map->heap()->empty_string()) {
- HeapObject* first =
- HeapObject::cast(ConsString::cast(object)->unchecked_first());
-
- *slot = first;
-
- if (!map->heap()->InNewSpace(first)) {
- object->set_map_word(MapWord::FromForwardingAddress(first));
- return;
- }
-
- MapWord first_word = first->map_word();
- if (first_word.IsForwardingAddress()) {
- HeapObject* target = first_word.ToForwardingAddress();
-
- *slot = target;
- object->set_map_word(MapWord::FromForwardingAddress(target));
- return;
- }
-
- DoScavengeObject(first->map(), slot, first);
- object->set_map_word(MapWord::FromForwardingAddress(*slot));
- return;
- }
-
- int object_size = ConsString::kSize;
- EvacuateObject<POINTER_OBJECT, SMALL>(map, slot, object, object_size);
- }
-
- template<ObjectContents object_contents>
- class ObjectEvacuationStrategy {
- public:
- template<int object_size>
- static inline void VisitSpecialized(Map* map,
- HeapObject** slot,
- HeapObject* object) {
- EvacuateObject<object_contents, SMALL>(map, slot, object, object_size);
- }
-
- static inline void Visit(Map* map,
- HeapObject** slot,
- HeapObject* object) {
- int object_size = map->instance_size();
- EvacuateObject<object_contents, SMALL>(map, slot, object, object_size);
- }
- };
-
- static VisitorDispatchTable<ScavengingCallback> table_;
-};
-
-
-template<LoggingAndProfiling logging_and_profiling_mode>
-VisitorDispatchTable<ScavengingCallback>
- ScavengingVisitor<logging_and_profiling_mode>::table_;
-
-
-static void InitializeScavengingVisitorsTables() {
- ScavengingVisitor<LOGGING_AND_PROFILING_DISABLED>::Initialize();
- ScavengingVisitor<LOGGING_AND_PROFILING_ENABLED>::Initialize();
- scavenging_visitors_table_.CopyFrom(
- ScavengingVisitor<LOGGING_AND_PROFILING_DISABLED>::GetTable());
- scavenging_visitors_table_mode_ = LOGGING_AND_PROFILING_DISABLED;
-}
-
-
-void Heap::SwitchScavengingVisitorsTableIfProfilingWasEnabled() {
- if (scavenging_visitors_table_mode_ == LOGGING_AND_PROFILING_ENABLED) {
- // Table was already updated by some isolate.
- return;
- }
-
- if (isolate()->logger()->is_logging() ||
- isolate()->cpu_profiler()->is_profiling() ||
- (isolate()->heap_profiler() != NULL &&
- isolate()->heap_profiler()->is_profiling())) {
- // If one of the isolates is doing scavenge at this moment of time
- // it might see this table in an inconsitent state when
- // some of the callbacks point to
- // ScavengingVisitor<LOGGING_AND_PROFILING_ENABLED> and others
- // to ScavengingVisitor<LOGGING_AND_PROFILING_DISABLED>.
- // However this does not lead to any bugs as such isolate does not have
- // profiling enabled and any isolate with enabled profiling is guaranteed
- // to see the table in the consistent state.
- scavenging_visitors_table_.CopyFrom(
- ScavengingVisitor<LOGGING_AND_PROFILING_ENABLED>::GetTable());
-
- // We use Release_Store to prevent reordering of this write before writes
- // to the table.
- Release_Store(&scavenging_visitors_table_mode_,
- LOGGING_AND_PROFILING_ENABLED);
- }
-}
-
-
-void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
- ASSERT(HEAP->InFromSpace(object));
- MapWord first_word = object->map_word();
- ASSERT(!first_word.IsForwardingAddress());
- Map* map = first_word.ToMap();
- DoScavengeObject(map, p, object);
-}
-
-
-MaybeObject* Heap::AllocatePartialMap(InstanceType instance_type,
- int instance_size) {
- Object* result;
- { MaybeObject* maybe_result = AllocateRawMap();
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
-
- // Map::cast cannot be used due to uninitialized map field.
- reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map());
- reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
- reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
- reinterpret_cast<Map*>(result)->set_visitor_id(
- StaticVisitorBase::GetVisitorId(instance_type, instance_size));
- reinterpret_cast<Map*>(result)->set_inobject_properties(0);
- reinterpret_cast<Map*>(result)->set_pre_allocated_property_fields(0);
- reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
- reinterpret_cast<Map*>(result)->set_bit_field(0);
- reinterpret_cast<Map*>(result)->set_bit_field2(0);
- return result;
-}
-
-
-MaybeObject* Heap::AllocateMap(InstanceType instance_type, int instance_size) {
- Object* result;
- { MaybeObject* maybe_result = AllocateRawMap();
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
-
- Map* map = reinterpret_cast<Map*>(result);
- map->set_map(meta_map());
- map->set_instance_type(instance_type);
- map->set_visitor_id(
- StaticVisitorBase::GetVisitorId(instance_type, instance_size));
- map->set_prototype(null_value());
- map->set_constructor(null_value());
- map->set_instance_size(instance_size);
- map->set_inobject_properties(0);
- map->set_pre_allocated_property_fields(0);
- map->set_instance_descriptors(empty_descriptor_array());
- map->set_code_cache(empty_fixed_array());
- map->set_unused_property_fields(0);
- map->set_bit_field(0);
- map->set_bit_field2((1 << Map::kIsExtensible) | (1 << Map::kHasFastElements));
-
- // If the map object is aligned fill the padding area with Smi 0 objects.
- if (Map::kPadStart < Map::kSize) {
- memset(reinterpret_cast<byte*>(map) + Map::kPadStart - kHeapObjectTag,
- 0,
- Map::kSize - Map::kPadStart);
- }
- return map;
-}
-
-
-MaybeObject* Heap::AllocateCodeCache() {
- Object* result;
- { MaybeObject* maybe_result = AllocateStruct(CODE_CACHE_TYPE);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- CodeCache* code_cache = CodeCache::cast(result);
- code_cache->set_default_cache(empty_fixed_array());
- code_cache->set_normal_type_cache(undefined_value());
- return code_cache;
-}
-
-
-const Heap::StringTypeTable Heap::string_type_table[] = {
-#define STRING_TYPE_ELEMENT(type, size, name, camel_name) \
- {type, size, k##camel_name##MapRootIndex},
- STRING_TYPE_LIST(STRING_TYPE_ELEMENT)
-#undef STRING_TYPE_ELEMENT
-};
-
-
-const Heap::ConstantSymbolTable Heap::constant_symbol_table[] = {
-#define CONSTANT_SYMBOL_ELEMENT(name, contents) \
- {contents, k##name##RootIndex},
- SYMBOL_LIST(CONSTANT_SYMBOL_ELEMENT)
-#undef CONSTANT_SYMBOL_ELEMENT
-};
-
-
-const Heap::StructTable Heap::struct_table[] = {
-#define STRUCT_TABLE_ELEMENT(NAME, Name, name) \
- { NAME##_TYPE, Name::kSize, k##Name##MapRootIndex },
- STRUCT_LIST(STRUCT_TABLE_ELEMENT)
-#undef STRUCT_TABLE_ELEMENT
-};
-
-
-bool Heap::CreateInitialMaps() {
- Object* obj;
- { MaybeObject* maybe_obj = AllocatePartialMap(MAP_TYPE, Map::kSize);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- // Map::cast cannot be used due to uninitialized map field.
- Map* new_meta_map = reinterpret_cast<Map*>(obj);
- set_meta_map(new_meta_map);
- new_meta_map->set_map(new_meta_map);
-
- { MaybeObject* maybe_obj =
- AllocatePartialMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_fixed_array_map(Map::cast(obj));
-
- { MaybeObject* maybe_obj = AllocatePartialMap(ODDBALL_TYPE, Oddball::kSize);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_oddball_map(Map::cast(obj));
-
- // Allocate the empty array.
- { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_empty_fixed_array(FixedArray::cast(obj));
-
- { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_DATA_SPACE);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_null_value(obj);
- Oddball::cast(obj)->set_kind(Oddball::kNull);
-
- // Allocate the empty descriptor array.
- { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_empty_descriptor_array(DescriptorArray::cast(obj));
-
- // Fix the instance_descriptors for the existing maps.
- meta_map()->set_instance_descriptors(empty_descriptor_array());
- meta_map()->set_code_cache(empty_fixed_array());
-
- fixed_array_map()->set_instance_descriptors(empty_descriptor_array());
- fixed_array_map()->set_code_cache(empty_fixed_array());
-
- oddball_map()->set_instance_descriptors(empty_descriptor_array());
- oddball_map()->set_code_cache(empty_fixed_array());
-
- // Fix prototype object for existing maps.
- meta_map()->set_prototype(null_value());
- meta_map()->set_constructor(null_value());
-
- fixed_array_map()->set_prototype(null_value());
- fixed_array_map()->set_constructor(null_value());
-
- oddball_map()->set_prototype(null_value());
- oddball_map()->set_constructor(null_value());
-
- { MaybeObject* maybe_obj =
- AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_fixed_cow_array_map(Map::cast(obj));
- ASSERT(fixed_array_map() != fixed_cow_array_map());
-
- { MaybeObject* maybe_obj = AllocateMap(HEAP_NUMBER_TYPE, HeapNumber::kSize);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_heap_number_map(Map::cast(obj));
-
- { MaybeObject* maybe_obj = AllocateMap(PROXY_TYPE, Proxy::kSize);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_proxy_map(Map::cast(obj));
-
- for (unsigned i = 0; i < ARRAY_SIZE(string_type_table); i++) {
- const StringTypeTable& entry = string_type_table[i];
- { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- roots_[entry.index] = Map::cast(obj);
- }
-
- { MaybeObject* maybe_obj = AllocateMap(STRING_TYPE, kVariableSizeSentinel);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_undetectable_string_map(Map::cast(obj));
- Map::cast(obj)->set_is_undetectable();
-
- { MaybeObject* maybe_obj =
- AllocateMap(ASCII_STRING_TYPE, kVariableSizeSentinel);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_undetectable_ascii_string_map(Map::cast(obj));
- Map::cast(obj)->set_is_undetectable();
-
- { MaybeObject* maybe_obj =
- AllocateMap(BYTE_ARRAY_TYPE, kVariableSizeSentinel);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_byte_array_map(Map::cast(obj));
-
- { MaybeObject* maybe_obj = AllocateByteArray(0, TENURED);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_empty_byte_array(ByteArray::cast(obj));
-
- { MaybeObject* maybe_obj =
- AllocateMap(EXTERNAL_PIXEL_ARRAY_TYPE, ExternalArray::kAlignedSize);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_external_pixel_array_map(Map::cast(obj));
-
- { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_BYTE_ARRAY_TYPE,
- ExternalArray::kAlignedSize);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_external_byte_array_map(Map::cast(obj));
-
- { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE,
- ExternalArray::kAlignedSize);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_external_unsigned_byte_array_map(Map::cast(obj));
-
- { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_SHORT_ARRAY_TYPE,
- ExternalArray::kAlignedSize);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_external_short_array_map(Map::cast(obj));
-
- { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE,
- ExternalArray::kAlignedSize);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_external_unsigned_short_array_map(Map::cast(obj));
-
- { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_INT_ARRAY_TYPE,
- ExternalArray::kAlignedSize);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_external_int_array_map(Map::cast(obj));
-
- { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_INT_ARRAY_TYPE,
- ExternalArray::kAlignedSize);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_external_unsigned_int_array_map(Map::cast(obj));
-
- { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_FLOAT_ARRAY_TYPE,
- ExternalArray::kAlignedSize);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_external_float_array_map(Map::cast(obj));
-
- { MaybeObject* maybe_obj = AllocateMap(CODE_TYPE, kVariableSizeSentinel);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_code_map(Map::cast(obj));
-
- { MaybeObject* maybe_obj = AllocateMap(JS_GLOBAL_PROPERTY_CELL_TYPE,
- JSGlobalPropertyCell::kSize);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_global_property_cell_map(Map::cast(obj));
-
- { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, kPointerSize);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_one_pointer_filler_map(Map::cast(obj));
-
- { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, 2 * kPointerSize);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_two_pointer_filler_map(Map::cast(obj));
-
- for (unsigned i = 0; i < ARRAY_SIZE(struct_table); i++) {
- const StructTable& entry = struct_table[i];
- { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- roots_[entry.index] = Map::cast(obj);
- }
-
- { MaybeObject* maybe_obj =
- AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_hash_table_map(Map::cast(obj));
-
- { MaybeObject* maybe_obj =
- AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_context_map(Map::cast(obj));
-
- { MaybeObject* maybe_obj =
- AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_catch_context_map(Map::cast(obj));
-
- { MaybeObject* maybe_obj =
- AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- Map* global_context_map = Map::cast(obj);
- global_context_map->set_visitor_id(StaticVisitorBase::kVisitGlobalContext);
- set_global_context_map(global_context_map);
-
- { MaybeObject* maybe_obj = AllocateMap(SHARED_FUNCTION_INFO_TYPE,
- SharedFunctionInfo::kAlignedSize);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_shared_function_info_map(Map::cast(obj));
-
- { MaybeObject* maybe_obj = AllocateMap(JS_MESSAGE_OBJECT_TYPE,
- JSMessageObject::kSize);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_message_object_map(Map::cast(obj));
-
- ASSERT(!InNewSpace(empty_fixed_array()));
- return true;
-}
-
-
-MaybeObject* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) {
- // Statically ensure that it is safe to allocate heap numbers in paged
- // spaces.
- STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize);
- AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
-
- Object* result;
- { MaybeObject* maybe_result =
- AllocateRaw(HeapNumber::kSize, space, OLD_DATA_SPACE);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
-
- HeapObject::cast(result)->set_map(heap_number_map());
- HeapNumber::cast(result)->set_value(value);
- return result;
-}
-
-
-MaybeObject* Heap::AllocateHeapNumber(double value) {
- // Use general version, if we're forced to always allocate.
- if (always_allocate()) return AllocateHeapNumber(value, TENURED);
-
- // This version of AllocateHeapNumber is optimized for
- // allocation in new space.
- STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize);
- ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
- Object* result;
- { MaybeObject* maybe_result = new_space_.AllocateRaw(HeapNumber::kSize);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- HeapObject::cast(result)->set_map(heap_number_map());
- HeapNumber::cast(result)->set_value(value);
- return result;
-}
-
-
-MaybeObject* Heap::AllocateJSGlobalPropertyCell(Object* value) {
- Object* result;
- { MaybeObject* maybe_result = AllocateRawCell();
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- HeapObject::cast(result)->set_map(global_property_cell_map());
- JSGlobalPropertyCell::cast(result)->set_value(value);
- return result;
-}
-
-
-MaybeObject* Heap::CreateOddball(const char* to_string,
- Object* to_number,
- byte kind) {
- Object* result;
- { MaybeObject* maybe_result = Allocate(oddball_map(), OLD_DATA_SPACE);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- return Oddball::cast(result)->Initialize(to_string, to_number, kind);
-}
-
-
-bool Heap::CreateApiObjects() {
- Object* obj;
-
- { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_neander_map(Map::cast(obj));
-
- { MaybeObject* maybe_obj = AllocateJSObjectFromMap(neander_map());
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- Object* elements;
- { MaybeObject* maybe_elements = AllocateFixedArray(2);
- if (!maybe_elements->ToObject(&elements)) return false;
- }
- FixedArray::cast(elements)->set(0, Smi::FromInt(0));
- JSObject::cast(obj)->set_elements(FixedArray::cast(elements));
- set_message_listeners(JSObject::cast(obj));
-
- return true;
-}
-
-
-void Heap::CreateJSEntryStub() {
- JSEntryStub stub;
- set_js_entry_code(*stub.GetCode());
-}
-
-
-void Heap::CreateJSConstructEntryStub() {
- JSConstructEntryStub stub;
- set_js_construct_entry_code(*stub.GetCode());
-}
-
-
-void Heap::CreateFixedStubs() {
- // Here we create roots for fixed stubs. They are needed at GC
- // for cooking and uncooking (check out frames.cc).
- // The eliminates the need for doing dictionary lookup in the
- // stub cache for these stubs.
- HandleScope scope;
- // gcc-4.4 has problem generating correct code of following snippet:
- // { JSEntryStub stub;
- // js_entry_code_ = *stub.GetCode();
- // }
- // { JSConstructEntryStub stub;
- // js_construct_entry_code_ = *stub.GetCode();
- // }
- // To workaround the problem, make separate functions without inlining.
- Heap::CreateJSEntryStub();
- Heap::CreateJSConstructEntryStub();
-}
-
-
-bool Heap::CreateInitialObjects() {
- Object* obj;
-
- // The -0 value must be set before NumberFromDouble works.
- { MaybeObject* maybe_obj = AllocateHeapNumber(-0.0, TENURED);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_minus_zero_value(obj);
- ASSERT(signbit(minus_zero_value()->Number()) != 0);
-
- { MaybeObject* maybe_obj = AllocateHeapNumber(OS::nan_value(), TENURED);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_nan_value(obj);
-
- { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_DATA_SPACE);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_undefined_value(obj);
- Oddball::cast(obj)->set_kind(Oddball::kUndefined);
- ASSERT(!InNewSpace(undefined_value()));
-
- // Allocate initial symbol table.
- { MaybeObject* maybe_obj = SymbolTable::Allocate(kInitialSymbolTableSize);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- // Don't use set_symbol_table() due to asserts.
- roots_[kSymbolTableRootIndex] = obj;
-
- // Assign the print strings for oddballs after creating symboltable.
- Object* symbol;
- { MaybeObject* maybe_symbol = LookupAsciiSymbol("undefined");
- if (!maybe_symbol->ToObject(&symbol)) return false;
- }
- Oddball::cast(undefined_value())->set_to_string(String::cast(symbol));
- Oddball::cast(undefined_value())->set_to_number(nan_value());
-
- // Allocate the null_value
- { MaybeObject* maybe_obj =
- Oddball::cast(null_value())->Initialize("null",
- Smi::FromInt(0),
- Oddball::kNull);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
-
- { MaybeObject* maybe_obj = CreateOddball("true",
- Smi::FromInt(1),
- Oddball::kTrue);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_true_value(obj);
-
- { MaybeObject* maybe_obj = CreateOddball("false",
- Smi::FromInt(0),
- Oddball::kFalse);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_false_value(obj);
-
- { MaybeObject* maybe_obj = CreateOddball("hole",
- Smi::FromInt(-1),
- Oddball::kTheHole);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_the_hole_value(obj);
-
- { MaybeObject* maybe_obj = CreateOddball("arguments_marker",
- Smi::FromInt(-4),
- Oddball::kArgumentMarker);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_arguments_marker(obj);
-
- { MaybeObject* maybe_obj = CreateOddball("no_interceptor_result_sentinel",
- Smi::FromInt(-2),
- Oddball::kOther);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_no_interceptor_result_sentinel(obj);
-
- { MaybeObject* maybe_obj = CreateOddball("termination_exception",
- Smi::FromInt(-3),
- Oddball::kOther);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_termination_exception(obj);
-
- // Allocate the empty string.
- { MaybeObject* maybe_obj = AllocateRawAsciiString(0, TENURED);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_empty_string(String::cast(obj));
-
- for (unsigned i = 0; i < ARRAY_SIZE(constant_symbol_table); i++) {
- { MaybeObject* maybe_obj =
- LookupAsciiSymbol(constant_symbol_table[i].contents);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- roots_[constant_symbol_table[i].index] = String::cast(obj);
- }
-
- // Allocate the hidden symbol which is used to identify the hidden properties
- // in JSObjects. The hash code has a special value so that it will not match
- // the empty string when searching for the property. It cannot be part of the
- // loop above because it needs to be allocated manually with the special
- // hash code in place. The hash code for the hidden_symbol is zero to ensure
- // that it will always be at the first entry in property descriptors.
- { MaybeObject* maybe_obj =
- AllocateSymbol(CStrVector(""), 0, String::kZeroHash);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- hidden_symbol_ = String::cast(obj);
-
- // Allocate the proxy for __proto__.
- { MaybeObject* maybe_obj =
- AllocateProxy((Address) &Accessors::ObjectPrototype);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_prototype_accessors(Proxy::cast(obj));
-
- // Allocate the code_stubs dictionary. The initial size is set to avoid
- // expanding the dictionary during bootstrapping.
- { MaybeObject* maybe_obj = NumberDictionary::Allocate(128);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_code_stubs(NumberDictionary::cast(obj));
-
- // Allocate the non_monomorphic_cache used in stub-cache.cc. The initial size
- // is set to avoid expanding the dictionary during bootstrapping.
- { MaybeObject* maybe_obj = NumberDictionary::Allocate(64);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_non_monomorphic_cache(NumberDictionary::cast(obj));
-
- set_instanceof_cache_function(Smi::FromInt(0));
- set_instanceof_cache_map(Smi::FromInt(0));
- set_instanceof_cache_answer(Smi::FromInt(0));
-
- CreateFixedStubs();
-
- // Allocate the dictionary of intrinsic function names.
- { MaybeObject* maybe_obj = StringDictionary::Allocate(Runtime::kNumFunctions);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- { MaybeObject* maybe_obj = Runtime::InitializeIntrinsicFunctionNames(this,
- obj);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_intrinsic_function_names(StringDictionary::cast(obj));
-
- if (InitializeNumberStringCache()->IsFailure()) return false;
-
- // Allocate cache for single character ASCII strings.
- { MaybeObject* maybe_obj =
- AllocateFixedArray(String::kMaxAsciiCharCode + 1, TENURED);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_single_character_string_cache(FixedArray::cast(obj));
-
- // Allocate cache for external strings pointing to native source code.
- { MaybeObject* maybe_obj = AllocateFixedArray(Natives::GetBuiltinsCount());
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_natives_source_cache(FixedArray::cast(obj));
-
- // Handling of script id generation is in FACTORY->NewScript.
- set_last_script_id(undefined_value());
-
- // Initialize keyed lookup cache.
- isolate_->keyed_lookup_cache()->Clear();
-
- // Initialize context slot cache.
- isolate_->context_slot_cache()->Clear();
-
- // Initialize descriptor cache.
- isolate_->descriptor_lookup_cache()->Clear();
-
- // Initialize compilation cache.
- isolate_->compilation_cache()->Clear();
-
- return true;
-}
-
-
-MaybeObject* Heap::InitializeNumberStringCache() {
- // Compute the size of the number string cache based on the max heap size.
- // max_semispace_size_ == 512 KB => number_string_cache_size = 32.
- // max_semispace_size_ == 8 MB => number_string_cache_size = 16KB.
- int number_string_cache_size = max_semispace_size_ / 512;
- number_string_cache_size = Max(32, Min(16*KB, number_string_cache_size));
- Object* obj;
- MaybeObject* maybe_obj =
- AllocateFixedArray(number_string_cache_size * 2, TENURED);
- if (maybe_obj->ToObject(&obj)) set_number_string_cache(FixedArray::cast(obj));
- return maybe_obj;
-}
-
-
-void Heap::FlushNumberStringCache() {
- // Flush the number to string cache.
- int len = number_string_cache()->length();
- for (int i = 0; i < len; i++) {
- number_string_cache()->set_undefined(this, i);
- }
-}
-
-
-static inline int double_get_hash(double d) {
- DoubleRepresentation rep(d);
- return static_cast<int>(rep.bits) ^ static_cast<int>(rep.bits >> 32);
-}
-
-
-static inline int smi_get_hash(Smi* smi) {
- return smi->value();
-}
-
-
-Object* Heap::GetNumberStringCache(Object* number) {
- int hash;
- int mask = (number_string_cache()->length() >> 1) - 1;
- if (number->IsSmi()) {
- hash = smi_get_hash(Smi::cast(number)) & mask;
- } else {
- hash = double_get_hash(number->Number()) & mask;
- }
- Object* key = number_string_cache()->get(hash * 2);
- if (key == number) {
- return String::cast(number_string_cache()->get(hash * 2 + 1));
- } else if (key->IsHeapNumber() &&
- number->IsHeapNumber() &&
- key->Number() == number->Number()) {
- return String::cast(number_string_cache()->get(hash * 2 + 1));
- }
- return undefined_value();
-}
-
-
-void Heap::SetNumberStringCache(Object* number, String* string) {
- int hash;
- int mask = (number_string_cache()->length() >> 1) - 1;
- if (number->IsSmi()) {
- hash = smi_get_hash(Smi::cast(number)) & mask;
- number_string_cache()->set(hash * 2, Smi::cast(number));
- } else {
- hash = double_get_hash(number->Number()) & mask;
- number_string_cache()->set(hash * 2, number);
- }
- number_string_cache()->set(hash * 2 + 1, string);
-}
-
-
-MaybeObject* Heap::NumberToString(Object* number,
- bool check_number_string_cache) {
- isolate_->counters()->number_to_string_runtime()->Increment();
- if (check_number_string_cache) {
- Object* cached = GetNumberStringCache(number);
- if (cached != undefined_value()) {
- return cached;
- }
- }
-
- char arr[100];
- Vector<char> buffer(arr, ARRAY_SIZE(arr));
- const char* str;
- if (number->IsSmi()) {
- int num = Smi::cast(number)->value();
- str = IntToCString(num, buffer);
- } else {
- double num = HeapNumber::cast(number)->value();
- str = DoubleToCString(num, buffer);
- }
-
- Object* js_string;
- MaybeObject* maybe_js_string = AllocateStringFromAscii(CStrVector(str));
- if (maybe_js_string->ToObject(&js_string)) {
- SetNumberStringCache(number, String::cast(js_string));
- }
- return maybe_js_string;
-}
-
-
-Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) {
- return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]);
-}
-
-
-Heap::RootListIndex Heap::RootIndexForExternalArrayType(
- ExternalArrayType array_type) {
- switch (array_type) {
- case kExternalByteArray:
- return kExternalByteArrayMapRootIndex;
- case kExternalUnsignedByteArray:
- return kExternalUnsignedByteArrayMapRootIndex;
- case kExternalShortArray:
- return kExternalShortArrayMapRootIndex;
- case kExternalUnsignedShortArray:
- return kExternalUnsignedShortArrayMapRootIndex;
- case kExternalIntArray:
- return kExternalIntArrayMapRootIndex;
- case kExternalUnsignedIntArray:
- return kExternalUnsignedIntArrayMapRootIndex;
- case kExternalFloatArray:
- return kExternalFloatArrayMapRootIndex;
- case kExternalPixelArray:
- return kExternalPixelArrayMapRootIndex;
- default:
- UNREACHABLE();
- return kUndefinedValueRootIndex;
- }
-}
-
-
-MaybeObject* Heap::NumberFromDouble(double value, PretenureFlag pretenure) {
- // We need to distinguish the minus zero value and this cannot be
- // done after conversion to int. Doing this by comparing bit
- // patterns is faster than using fpclassify() et al.
- static const DoubleRepresentation minus_zero(-0.0);
-
- DoubleRepresentation rep(value);
- if (rep.bits == minus_zero.bits) {
- return AllocateHeapNumber(-0.0, pretenure);
- }
-
- int int_value = FastD2I(value);
- if (value == int_value && Smi::IsValid(int_value)) {
- return Smi::FromInt(int_value);
- }
-
- // Materialize the value in the heap.
- return AllocateHeapNumber(value, pretenure);
-}
-
-
-MaybeObject* Heap::AllocateProxy(Address proxy, PretenureFlag pretenure) {
- // Statically ensure that it is safe to allocate proxies in paged spaces.
- STATIC_ASSERT(Proxy::kSize <= Page::kMaxHeapObjectSize);
- AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
- Object* result;
- { MaybeObject* maybe_result = Allocate(proxy_map(), space);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
-
- Proxy::cast(result)->set_proxy(proxy);
- return result;
-}
-
-
-MaybeObject* Heap::AllocateSharedFunctionInfo(Object* name) {
- Object* result;
- { MaybeObject* maybe_result =
- Allocate(shared_function_info_map(), OLD_POINTER_SPACE);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
-
- SharedFunctionInfo* share = SharedFunctionInfo::cast(result);
- share->set_name(name);
- Code* illegal = isolate_->builtins()->builtin(Builtins::kIllegal);
- share->set_code(illegal);
- share->set_scope_info(SerializedScopeInfo::Empty());
- Code* construct_stub = isolate_->builtins()->builtin(
- Builtins::kJSConstructStubGeneric);
- share->set_construct_stub(construct_stub);
- share->set_expected_nof_properties(0);
- share->set_length(0);
- share->set_formal_parameter_count(0);
- share->set_instance_class_name(Object_symbol());
- share->set_function_data(undefined_value());
- share->set_script(undefined_value());
- share->set_start_position_and_type(0);
- share->set_debug_info(undefined_value());
- share->set_inferred_name(empty_string());
- share->set_compiler_hints(0);
- share->set_deopt_counter(Smi::FromInt(FLAG_deopt_every_n_times));
- share->set_initial_map(undefined_value());
- share->set_this_property_assignments_count(0);
- share->set_this_property_assignments(undefined_value());
- share->set_opt_count(0);
- share->set_num_literals(0);
- share->set_end_position(0);
- share->set_function_token_position(0);
- return result;
-}
-
-
-MaybeObject* Heap::AllocateJSMessageObject(String* type,
- JSArray* arguments,
- int start_position,
- int end_position,
- Object* script,
- Object* stack_trace,
- Object* stack_frames) {
- Object* result;
- { MaybeObject* maybe_result = Allocate(message_object_map(), NEW_SPACE);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- JSMessageObject* message = JSMessageObject::cast(result);
- message->set_properties(Heap::empty_fixed_array());
- message->set_elements(Heap::empty_fixed_array());
- message->set_type(type);
- message->set_arguments(arguments);
- message->set_start_position(start_position);
- message->set_end_position(end_position);
- message->set_script(script);
- message->set_stack_trace(stack_trace);
- message->set_stack_frames(stack_frames);
- return result;
-}
-
-
-
-// Returns true for a character in a range. Both limits are inclusive.
-static inline bool Between(uint32_t character, uint32_t from, uint32_t to) {
- // This makes uses of the the unsigned wraparound.
- return character - from <= to - from;
-}
-
-
-MUST_USE_RESULT static inline MaybeObject* MakeOrFindTwoCharacterString(
- Heap* heap,
- uint32_t c1,
- uint32_t c2) {
- String* symbol;
- // Numeric strings have a different hash algorithm not known by
- // LookupTwoCharsSymbolIfExists, so we skip this step for such strings.
- if ((!Between(c1, '0', '9') || !Between(c2, '0', '9')) &&
- heap->symbol_table()->LookupTwoCharsSymbolIfExists(c1, c2, &symbol)) {
- return symbol;
- // Now we know the length is 2, we might as well make use of that fact
- // when building the new string.
- } else if ((c1 | c2) <= String::kMaxAsciiCharCodeU) { // We can do this
- ASSERT(IsPowerOf2(String::kMaxAsciiCharCodeU + 1)); // because of this.
- Object* result;
- { MaybeObject* maybe_result = heap->AllocateRawAsciiString(2);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- char* dest = SeqAsciiString::cast(result)->GetChars();
- dest[0] = c1;
- dest[1] = c2;
- return result;
- } else {
- Object* result;
- { MaybeObject* maybe_result = heap->AllocateRawTwoByteString(2);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- uc16* dest = SeqTwoByteString::cast(result)->GetChars();
- dest[0] = c1;
- dest[1] = c2;
- return result;
- }
-}
-
-
-MaybeObject* Heap::AllocateConsString(String* first, String* second) {
- int first_length = first->length();
- if (first_length == 0) {
- return second;
- }
-
- int second_length = second->length();
- if (second_length == 0) {
- return first;
- }
-
- int length = first_length + second_length;
-
- // Optimization for 2-byte strings often used as keys in a decompression
- // dictionary. Check whether we already have the string in the symbol
- // table to prevent creation of many unneccesary strings.
- if (length == 2) {
- unsigned c1 = first->Get(0);
- unsigned c2 = second->Get(0);
- return MakeOrFindTwoCharacterString(this, c1, c2);
- }
-
- bool first_is_ascii = first->IsAsciiRepresentation();
- bool second_is_ascii = second->IsAsciiRepresentation();
- bool is_ascii = first_is_ascii && second_is_ascii;
-
- // Make sure that an out of memory exception is thrown if the length
- // of the new cons string is too large.
- if (length > String::kMaxLength || length < 0) {
- isolate()->context()->mark_out_of_memory();
- return Failure::OutOfMemoryException();
- }
-
- bool is_ascii_data_in_two_byte_string = false;
- if (!is_ascii) {
- // At least one of the strings uses two-byte representation so we
- // can't use the fast case code for short ascii strings below, but
- // we can try to save memory if all chars actually fit in ascii.
- is_ascii_data_in_two_byte_string =
- first->HasOnlyAsciiChars() && second->HasOnlyAsciiChars();
- if (is_ascii_data_in_two_byte_string) {
- isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
- }
- }
-
- // If the resulting string is small make a flat string.
- if (length < String::kMinNonFlatLength) {
- ASSERT(first->IsFlat());
- ASSERT(second->IsFlat());
- if (is_ascii) {
- Object* result;
- { MaybeObject* maybe_result = AllocateRawAsciiString(length);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- // Copy the characters into the new object.
- char* dest = SeqAsciiString::cast(result)->GetChars();
- // Copy first part.
- const char* src;
- if (first->IsExternalString()) {
- src = ExternalAsciiString::cast(first)->resource()->data();
- } else {
- src = SeqAsciiString::cast(first)->GetChars();
- }
- for (int i = 0; i < first_length; i++) *dest++ = src[i];
- // Copy second part.
- if (second->IsExternalString()) {
- src = ExternalAsciiString::cast(second)->resource()->data();
- } else {
- src = SeqAsciiString::cast(second)->GetChars();
- }
- for (int i = 0; i < second_length; i++) *dest++ = src[i];
- return result;
- } else {
- if (is_ascii_data_in_two_byte_string) {
- Object* result;
- { MaybeObject* maybe_result = AllocateRawAsciiString(length);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- // Copy the characters into the new object.
- char* dest = SeqAsciiString::cast(result)->GetChars();
- String::WriteToFlat(first, dest, 0, first_length);
- String::WriteToFlat(second, dest + first_length, 0, second_length);
- isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
- return result;
- }
-
- Object* result;
- { MaybeObject* maybe_result = AllocateRawTwoByteString(length);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- // Copy the characters into the new object.
- uc16* dest = SeqTwoByteString::cast(result)->GetChars();
- String::WriteToFlat(first, dest, 0, first_length);
- String::WriteToFlat(second, dest + first_length, 0, second_length);
- return result;
- }
- }
-
- Map* map = (is_ascii || is_ascii_data_in_two_byte_string) ?
- cons_ascii_string_map() : cons_string_map();
-
- Object* result;
- { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
-
- AssertNoAllocation no_gc;
- ConsString* cons_string = ConsString::cast(result);
- WriteBarrierMode mode = cons_string->GetWriteBarrierMode(no_gc);
- cons_string->set_length(length);
- cons_string->set_hash_field(String::kEmptyHashField);
- cons_string->set_first(first, mode);
- cons_string->set_second(second, mode);
- return result;
-}
-
-
-MaybeObject* Heap::AllocateSubString(String* buffer,
- int start,
- int end,
- PretenureFlag pretenure) {
- int length = end - start;
-
- if (length == 1) {
- return LookupSingleCharacterStringFromCode(buffer->Get(start));
- } else if (length == 2) {
- // Optimization for 2-byte strings often used as keys in a decompression
- // dictionary. Check whether we already have the string in the symbol
- // table to prevent creation of many unneccesary strings.
- unsigned c1 = buffer->Get(start);
- unsigned c2 = buffer->Get(start + 1);
- return MakeOrFindTwoCharacterString(this, c1, c2);
- }
-
- // Make an attempt to flatten the buffer to reduce access time.
- buffer = buffer->TryFlattenGetString();
-
- Object* result;
- { MaybeObject* maybe_result = buffer->IsAsciiRepresentation()
- ? AllocateRawAsciiString(length, pretenure )
- : AllocateRawTwoByteString(length, pretenure);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- String* string_result = String::cast(result);
- // Copy the characters into the new object.
- if (buffer->IsAsciiRepresentation()) {
- ASSERT(string_result->IsAsciiRepresentation());
- char* dest = SeqAsciiString::cast(string_result)->GetChars();
- String::WriteToFlat(buffer, dest, start, end);
- } else {
- ASSERT(string_result->IsTwoByteRepresentation());
- uc16* dest = SeqTwoByteString::cast(string_result)->GetChars();
- String::WriteToFlat(buffer, dest, start, end);
- }
-
- return result;
-}
-
-
-MaybeObject* Heap::AllocateExternalStringFromAscii(
- ExternalAsciiString::Resource* resource) {
- size_t length = resource->length();
- if (length > static_cast<size_t>(String::kMaxLength)) {
- isolate()->context()->mark_out_of_memory();
- return Failure::OutOfMemoryException();
- }
-
- Map* map = external_ascii_string_map();
- Object* result;
- { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
-
- ExternalAsciiString* external_string = ExternalAsciiString::cast(result);
- external_string->set_length(static_cast<int>(length));
- external_string->set_hash_field(String::kEmptyHashField);
- external_string->set_resource(resource);
-
- return result;
-}
-
-
-MaybeObject* Heap::AllocateExternalStringFromTwoByte(
- ExternalTwoByteString::Resource* resource) {
- size_t length = resource->length();
- if (length > static_cast<size_t>(String::kMaxLength)) {
- isolate()->context()->mark_out_of_memory();
- return Failure::OutOfMemoryException();
- }
-
- // For small strings we check whether the resource contains only
- // ASCII characters. If yes, we use a different string map.
- static const size_t kAsciiCheckLengthLimit = 32;
- bool is_ascii = length <= kAsciiCheckLengthLimit &&
- String::IsAscii(resource->data(), static_cast<int>(length));
- Map* map = is_ascii ?
- external_string_with_ascii_data_map() : external_string_map();
- Object* result;
- { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
-
- ExternalTwoByteString* external_string = ExternalTwoByteString::cast(result);
- external_string->set_length(static_cast<int>(length));
- external_string->set_hash_field(String::kEmptyHashField);
- external_string->set_resource(resource);
-
- return result;
-}
-
-
-MaybeObject* Heap::LookupSingleCharacterStringFromCode(uint16_t code) {
- if (code <= String::kMaxAsciiCharCode) {
- Object* value = single_character_string_cache()->get(code);
- if (value != undefined_value()) return value;
-
- char buffer[1];
- buffer[0] = static_cast<char>(code);
- Object* result;
- MaybeObject* maybe_result = LookupSymbol(Vector<const char>(buffer, 1));
-
- if (!maybe_result->ToObject(&result)) return maybe_result;
- single_character_string_cache()->set(code, result);
- return result;
- }
-
- Object* result;
- { MaybeObject* maybe_result = AllocateRawTwoByteString(1);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- String* answer = String::cast(result);
- answer->Set(0, code);
- return answer;
-}
-
-
-MaybeObject* Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
- if (length < 0 || length > ByteArray::kMaxLength) {
- return Failure::OutOfMemoryException();
- }
- if (pretenure == NOT_TENURED) {
- return AllocateByteArray(length);
- }
- int size = ByteArray::SizeFor(length);
- Object* result;
- { MaybeObject* maybe_result = (size <= MaxObjectSizeInPagedSpace())
- ? old_data_space_->AllocateRaw(size)
- : lo_space_->AllocateRaw(size);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
-
- reinterpret_cast<ByteArray*>(result)->set_map(byte_array_map());
- reinterpret_cast<ByteArray*>(result)->set_length(length);
- return result;
-}
-
-
-MaybeObject* Heap::AllocateByteArray(int length) {
- if (length < 0 || length > ByteArray::kMaxLength) {
- return Failure::OutOfMemoryException();
- }
- int size = ByteArray::SizeFor(length);
- AllocationSpace space =
- (size > MaxObjectSizeInPagedSpace()) ? LO_SPACE : NEW_SPACE;
- Object* result;
- { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
-
- reinterpret_cast<ByteArray*>(result)->set_map(byte_array_map());
- reinterpret_cast<ByteArray*>(result)->set_length(length);
- return result;
-}
-
-
-void Heap::CreateFillerObjectAt(Address addr, int size) {
- if (size == 0) return;
- HeapObject* filler = HeapObject::FromAddress(addr);
- if (size == kPointerSize) {
- filler->set_map(one_pointer_filler_map());
- } else if (size == 2 * kPointerSize) {
- filler->set_map(two_pointer_filler_map());
- } else {
- filler->set_map(byte_array_map());
- ByteArray::cast(filler)->set_length(ByteArray::LengthFor(size));
- }
-}
-
-
-MaybeObject* Heap::AllocateExternalArray(int length,
- ExternalArrayType array_type,
- void* external_pointer,
- PretenureFlag pretenure) {
- AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
- Object* result;
- { MaybeObject* maybe_result = AllocateRaw(ExternalArray::kAlignedSize,
- space,
- OLD_DATA_SPACE);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
-
- reinterpret_cast<ExternalArray*>(result)->set_map(
- MapForExternalArrayType(array_type));
- reinterpret_cast<ExternalArray*>(result)->set_length(length);
- reinterpret_cast<ExternalArray*>(result)->set_external_pointer(
- external_pointer);
-
- return result;
-}
-
-
-MaybeObject* Heap::CreateCode(const CodeDesc& desc,
- Code::Flags flags,
- Handle<Object> self_reference,
- bool immovable) {
- // Allocate ByteArray before the Code object, so that we do not risk
- // leaving uninitialized Code object (and breaking the heap).
- Object* reloc_info;
- { MaybeObject* maybe_reloc_info = AllocateByteArray(desc.reloc_size, TENURED);
- if (!maybe_reloc_info->ToObject(&reloc_info)) return maybe_reloc_info;
- }
-
- // Compute size.
- int body_size = RoundUp(desc.instr_size, kObjectAlignment);
- int obj_size = Code::SizeFor(body_size);
- ASSERT(IsAligned(static_cast<intptr_t>(obj_size), kCodeAlignment));
- MaybeObject* maybe_result;
- // Large code objects and code objects which should stay at a fixed address
- // are allocated in large object space.
- if (obj_size > MaxObjectSizeInPagedSpace() || immovable) {
- maybe_result = lo_space_->AllocateRawCode(obj_size);
- } else {
- maybe_result = code_space_->AllocateRaw(obj_size);
- }
-
- Object* result;
- if (!maybe_result->ToObject(&result)) return maybe_result;
-
- // Initialize the object
- HeapObject::cast(result)->set_map(code_map());
- Code* code = Code::cast(result);
- ASSERT(!isolate_->code_range()->exists() ||
- isolate_->code_range()->contains(code->address()));
- code->set_instruction_size(desc.instr_size);
- code->set_relocation_info(ByteArray::cast(reloc_info));
- code->set_flags(flags);
- if (code->is_call_stub() || code->is_keyed_call_stub()) {
- code->set_check_type(RECEIVER_MAP_CHECK);
- }
- code->set_deoptimization_data(empty_fixed_array());
- // Allow self references to created code object by patching the handle to
- // point to the newly allocated Code object.
- if (!self_reference.is_null()) {
- *(self_reference.location()) = code;
- }
- // Migrate generated code.
- // The generated code can contain Object** values (typically from handles)
- // that are dereferenced during the copy to point directly to the actual heap
- // objects. These pointers can include references to the code object itself,
- // through the self_reference parameter.
- code->CopyFrom(desc);
-
-#ifdef DEBUG
- code->Verify();
-#endif
- return code;
-}
-
-
-MaybeObject* Heap::CopyCode(Code* code) {
- // Allocate an object the same size as the code object.
- int obj_size = code->Size();
- MaybeObject* maybe_result;
- if (obj_size > MaxObjectSizeInPagedSpace()) {
- maybe_result = lo_space_->AllocateRawCode(obj_size);
- } else {
- maybe_result = code_space_->AllocateRaw(obj_size);
- }
-
- Object* result;
- if (!maybe_result->ToObject(&result)) return maybe_result;
-
- // Copy code object.
- Address old_addr = code->address();
- Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
- CopyBlock(new_addr, old_addr, obj_size);
- // Relocate the copy.
- Code* new_code = Code::cast(result);
- ASSERT(!isolate_->code_range()->exists() ||
- isolate_->code_range()->contains(code->address()));
- new_code->Relocate(new_addr - old_addr);
- return new_code;
-}
-
-
-MaybeObject* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
- // Allocate ByteArray before the Code object, so that we do not risk
- // leaving uninitialized Code object (and breaking the heap).
- Object* reloc_info_array;
- { MaybeObject* maybe_reloc_info_array =
- AllocateByteArray(reloc_info.length(), TENURED);
- if (!maybe_reloc_info_array->ToObject(&reloc_info_array)) {
- return maybe_reloc_info_array;
- }
- }
-
- int new_body_size = RoundUp(code->instruction_size(), kObjectAlignment);
-
- int new_obj_size = Code::SizeFor(new_body_size);
-
- Address old_addr = code->address();
-
- size_t relocation_offset =
- static_cast<size_t>(code->instruction_end() - old_addr);
-
- MaybeObject* maybe_result;
- if (new_obj_size > MaxObjectSizeInPagedSpace()) {
- maybe_result = lo_space_->AllocateRawCode(new_obj_size);
- } else {
- maybe_result = code_space_->AllocateRaw(new_obj_size);
- }
-
- Object* result;
- if (!maybe_result->ToObject(&result)) return maybe_result;
-
- // Copy code object.
- Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
-
- // Copy header and instructions.
- memcpy(new_addr, old_addr, relocation_offset);
-
- Code* new_code = Code::cast(result);
- new_code->set_relocation_info(ByteArray::cast(reloc_info_array));
-
- // Copy patched rinfo.
- memcpy(new_code->relocation_start(), reloc_info.start(), reloc_info.length());
-
- // Relocate the copy.
- ASSERT(!isolate_->code_range()->exists() ||
- isolate_->code_range()->contains(code->address()));
- new_code->Relocate(new_addr - old_addr);
-
-#ifdef DEBUG
- code->Verify();
-#endif
- return new_code;
-}
-
-
-MaybeObject* Heap::Allocate(Map* map, AllocationSpace space) {
- ASSERT(gc_state_ == NOT_IN_GC);
- ASSERT(map->instance_type() != MAP_TYPE);
- // If allocation failures are disallowed, we may allocate in a different
- // space when new space is full and the object is not a large object.
- AllocationSpace retry_space =
- (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
- Object* result;
- { MaybeObject* maybe_result =
- AllocateRaw(map->instance_size(), space, retry_space);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- HeapObject::cast(result)->set_map(map);
-#ifdef ENABLE_LOGGING_AND_PROFILING
- isolate_->producer_heap_profile()->RecordJSObjectAllocation(result);
-#endif
- return result;
-}
-
-
-MaybeObject* Heap::InitializeFunction(JSFunction* function,
- SharedFunctionInfo* shared,
- Object* prototype) {
- ASSERT(!prototype->IsMap());
- function->initialize_properties();
- function->initialize_elements();
- function->set_shared(shared);
- function->set_code(shared->code());
- function->set_prototype_or_initial_map(prototype);
- function->set_context(undefined_value());
- function->set_literals(empty_fixed_array());
- function->set_next_function_link(undefined_value());
- return function;
-}
-
-
-MaybeObject* Heap::AllocateFunctionPrototype(JSFunction* function) {
- // Allocate the prototype. Make sure to use the object function
- // from the function's context, since the function can be from a
- // different context.
- JSFunction* object_function =
- function->context()->global_context()->object_function();
- Object* prototype;
- { MaybeObject* maybe_prototype = AllocateJSObject(object_function);
- if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
- }
- // When creating the prototype for the function we must set its
- // constructor to the function.
- Object* result;
- { MaybeObject* maybe_result =
- JSObject::cast(prototype)->SetLocalPropertyIgnoreAttributes(
- constructor_symbol(), function, DONT_ENUM);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- return prototype;
-}
-
-
-MaybeObject* Heap::AllocateFunction(Map* function_map,
- SharedFunctionInfo* shared,
- Object* prototype,
- PretenureFlag pretenure) {
- AllocationSpace space =
- (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
- Object* result;
- { MaybeObject* maybe_result = Allocate(function_map, space);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- return InitializeFunction(JSFunction::cast(result), shared, prototype);
-}
-
-
-MaybeObject* Heap::AllocateArgumentsObject(Object* callee, int length) {
- // To get fast allocation and map sharing for arguments objects we
- // allocate them based on an arguments boilerplate.
-
- JSObject* boilerplate;
- int arguments_object_size;
- bool strict_mode_callee = callee->IsJSFunction() &&
- JSFunction::cast(callee)->shared()->strict_mode();
- if (strict_mode_callee) {
- boilerplate =
- isolate()->context()->global_context()->
- strict_mode_arguments_boilerplate();
- arguments_object_size = kArgumentsObjectSizeStrict;
- } else {
- boilerplate =
- isolate()->context()->global_context()->arguments_boilerplate();
- arguments_object_size = kArgumentsObjectSize;
- }
-
- // This calls Copy directly rather than using Heap::AllocateRaw so we
- // duplicate the check here.
- ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
-
- // Check that the size of the boilerplate matches our
- // expectations. The ArgumentsAccessStub::GenerateNewObject relies
- // on the size being a known constant.
- ASSERT(arguments_object_size == boilerplate->map()->instance_size());
-
- // Do the allocation.
- Object* result;
- { MaybeObject* maybe_result =
- AllocateRaw(arguments_object_size, NEW_SPACE, OLD_POINTER_SPACE);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
-
- // Copy the content. The arguments boilerplate doesn't have any
- // fields that point to new space so it's safe to skip the write
- // barrier here.
- CopyBlock(HeapObject::cast(result)->address(),
- boilerplate->address(),
- JSObject::kHeaderSize);
-
- // Set the length property.
- JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsLengthIndex,
- Smi::FromInt(length),
- SKIP_WRITE_BARRIER);
- // Set the callee property for non-strict mode arguments object only.
- if (!strict_mode_callee) {
- JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsCalleeIndex,
- callee);
- }
-
- // Check the state of the object
- ASSERT(JSObject::cast(result)->HasFastProperties());
- ASSERT(JSObject::cast(result)->HasFastElements());
-
- return result;
-}
-
-
-static bool HasDuplicates(DescriptorArray* descriptors) {
- int count = descriptors->number_of_descriptors();
- if (count > 1) {
- String* prev_key = descriptors->GetKey(0);
- for (int i = 1; i != count; i++) {
- String* current_key = descriptors->GetKey(i);
- if (prev_key == current_key) return true;
- prev_key = current_key;
- }
- }
- return false;
-}
-
-
-MaybeObject* Heap::AllocateInitialMap(JSFunction* fun) {
- ASSERT(!fun->has_initial_map());
-
- // First create a new map with the size and number of in-object properties
- // suggested by the function.
- int instance_size = fun->shared()->CalculateInstanceSize();
- int in_object_properties = fun->shared()->CalculateInObjectProperties();
- Object* map_obj;
- { MaybeObject* maybe_map_obj = AllocateMap(JS_OBJECT_TYPE, instance_size);
- if (!maybe_map_obj->ToObject(&map_obj)) return maybe_map_obj;
- }
-
- // Fetch or allocate prototype.
- Object* prototype;
- if (fun->has_instance_prototype()) {
- prototype = fun->instance_prototype();
- } else {
- { MaybeObject* maybe_prototype = AllocateFunctionPrototype(fun);
- if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
- }
- }
- Map* map = Map::cast(map_obj);
- map->set_inobject_properties(in_object_properties);
- map->set_unused_property_fields(in_object_properties);
- map->set_prototype(prototype);
- ASSERT(map->has_fast_elements());
-
- // If the function has only simple this property assignments add
- // field descriptors for these to the initial map as the object
- // cannot be constructed without having these properties. Guard by
- // the inline_new flag so we only change the map if we generate a
- // specialized construct stub.
- ASSERT(in_object_properties <= Map::kMaxPreAllocatedPropertyFields);
- if (fun->shared()->CanGenerateInlineConstructor(prototype)) {
- int count = fun->shared()->this_property_assignments_count();
- if (count > in_object_properties) {
- // Inline constructor can only handle inobject properties.
- fun->shared()->ForbidInlineConstructor();
- } else {
- Object* descriptors_obj;
- { MaybeObject* maybe_descriptors_obj = DescriptorArray::Allocate(count);
- if (!maybe_descriptors_obj->ToObject(&descriptors_obj)) {
- return maybe_descriptors_obj;
- }
- }
- DescriptorArray* descriptors = DescriptorArray::cast(descriptors_obj);
- for (int i = 0; i < count; i++) {
- String* name = fun->shared()->GetThisPropertyAssignmentName(i);
- ASSERT(name->IsSymbol());
- FieldDescriptor field(name, i, NONE);
- field.SetEnumerationIndex(i);
- descriptors->Set(i, &field);
- }
- descriptors->SetNextEnumerationIndex(count);
- descriptors->SortUnchecked();
-
- // The descriptors may contain duplicates because the compiler does not
- // guarantee the uniqueness of property names (it would have required
- // quadratic time). Once the descriptors are sorted we can check for
- // duplicates in linear time.
- if (HasDuplicates(descriptors)) {
- fun->shared()->ForbidInlineConstructor();
- } else {
- map->set_instance_descriptors(descriptors);
- map->set_pre_allocated_property_fields(count);
- map->set_unused_property_fields(in_object_properties - count);
- }
- }
- }
-
- fun->shared()->StartInobjectSlackTracking(map);
-
- return map;
-}
-
-
-void Heap::InitializeJSObjectFromMap(JSObject* obj,
- FixedArray* properties,
- Map* map) {
- obj->set_properties(properties);
- obj->initialize_elements();
- // TODO(1240798): Initialize the object's body using valid initial values
- // according to the object's initial map. For example, if the map's
- // instance type is JS_ARRAY_TYPE, the length field should be initialized
- // to a number (eg, Smi::FromInt(0)) and the elements initialized to a
- // fixed array (eg, Heap::empty_fixed_array()). Currently, the object
- // verification code has to cope with (temporarily) invalid objects. See
- // for example, JSArray::JSArrayVerify).
- Object* filler;
- // We cannot always fill with one_pointer_filler_map because objects
- // created from API functions expect their internal fields to be initialized
- // with undefined_value.
- if (map->constructor()->IsJSFunction() &&
- JSFunction::cast(map->constructor())->shared()->
- IsInobjectSlackTrackingInProgress()) {
- // We might want to shrink the object later.
- ASSERT(obj->GetInternalFieldCount() == 0);
- filler = Heap::one_pointer_filler_map();
- } else {
- filler = Heap::undefined_value();
- }
- obj->InitializeBody(map->instance_size(), filler);
-}
-
-
-MaybeObject* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
- // JSFunctions should be allocated using AllocateFunction to be
- // properly initialized.
- ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
-
- // Both types of global objects should be allocated using
- // AllocateGlobalObject to be properly initialized.
- ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
- ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
-
- // Allocate the backing storage for the properties.
- int prop_size =
- map->pre_allocated_property_fields() +
- map->unused_property_fields() -
- map->inobject_properties();
- ASSERT(prop_size >= 0);
- Object* properties;
- { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, pretenure);
- if (!maybe_properties->ToObject(&properties)) return maybe_properties;
- }
-
- // Allocate the JSObject.
- AllocationSpace space =
- (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
- if (map->instance_size() > MaxObjectSizeInPagedSpace()) space = LO_SPACE;
- Object* obj;
- { MaybeObject* maybe_obj = Allocate(map, space);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
-
- // Initialize the JSObject.
- InitializeJSObjectFromMap(JSObject::cast(obj),
- FixedArray::cast(properties),
- map);
- ASSERT(JSObject::cast(obj)->HasFastElements());
- return obj;
-}
-
-
-MaybeObject* Heap::AllocateJSObject(JSFunction* constructor,
- PretenureFlag pretenure) {
- // Allocate the initial map if absent.
- if (!constructor->has_initial_map()) {
- Object* initial_map;
- { MaybeObject* maybe_initial_map = AllocateInitialMap(constructor);
- if (!maybe_initial_map->ToObject(&initial_map)) return maybe_initial_map;
- }
- constructor->set_initial_map(Map::cast(initial_map));
- Map::cast(initial_map)->set_constructor(constructor);
- }
- // Allocate the object based on the constructors initial map.
- MaybeObject* result =
- AllocateJSObjectFromMap(constructor->initial_map(), pretenure);
-#ifdef DEBUG
- // Make sure result is NOT a global object if valid.
- Object* non_failure;
- ASSERT(!result->ToObject(&non_failure) || !non_failure->IsGlobalObject());
-#endif
- return result;
-}
-
-
-MaybeObject* Heap::AllocateGlobalObject(JSFunction* constructor) {
- ASSERT(constructor->has_initial_map());
- Map* map = constructor->initial_map();
-
- // Make sure no field properties are described in the initial map.
- // This guarantees us that normalizing the properties does not
- // require us to change property values to JSGlobalPropertyCells.
- ASSERT(map->NextFreePropertyIndex() == 0);
-
- // Make sure we don't have a ton of pre-allocated slots in the
- // global objects. They will be unused once we normalize the object.
- ASSERT(map->unused_property_fields() == 0);
- ASSERT(map->inobject_properties() == 0);
-
- // Initial size of the backing store to avoid resize of the storage during
- // bootstrapping. The size differs between the JS global object ad the
- // builtins object.
- int initial_size = map->instance_type() == JS_GLOBAL_OBJECT_TYPE ? 64 : 512;
-
- // Allocate a dictionary object for backing storage.
- Object* obj;
- { MaybeObject* maybe_obj =
- StringDictionary::Allocate(
- map->NumberOfDescribedProperties() * 2 + initial_size);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- StringDictionary* dictionary = StringDictionary::cast(obj);
-
- // The global object might be created from an object template with accessors.
- // Fill these accessors into the dictionary.
- DescriptorArray* descs = map->instance_descriptors();
- for (int i = 0; i < descs->number_of_descriptors(); i++) {
- PropertyDetails details = descs->GetDetails(i);
- ASSERT(details.type() == CALLBACKS); // Only accessors are expected.
- PropertyDetails d =
- PropertyDetails(details.attributes(), CALLBACKS, details.index());
- Object* value = descs->GetCallbacksObject(i);
- { MaybeObject* maybe_value = AllocateJSGlobalPropertyCell(value);
- if (!maybe_value->ToObject(&value)) return maybe_value;
- }
-
- Object* result;
- { MaybeObject* maybe_result = dictionary->Add(descs->GetKey(i), value, d);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- dictionary = StringDictionary::cast(result);
- }
-
- // Allocate the global object and initialize it with the backing store.
- { MaybeObject* maybe_obj = Allocate(map, OLD_POINTER_SPACE);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- JSObject* global = JSObject::cast(obj);
- InitializeJSObjectFromMap(global, dictionary, map);
-
- // Create a new map for the global object.
- { MaybeObject* maybe_obj = map->CopyDropDescriptors();
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- Map* new_map = Map::cast(obj);
-
- // Setup the global object as a normalized object.
- global->set_map(new_map);
- global->map()->set_instance_descriptors(empty_descriptor_array());
- global->set_properties(dictionary);
-
- // Make sure result is a global object with properties in dictionary.
- ASSERT(global->IsGlobalObject());
- ASSERT(!global->HasFastProperties());
- return global;
-}
-
-
-MaybeObject* Heap::CopyJSObject(JSObject* source) {
- // Never used to copy functions. If functions need to be copied we
- // have to be careful to clear the literals array.
- ASSERT(!source->IsJSFunction());
-
- // Make the clone.
- Map* map = source->map();
- int object_size = map->instance_size();
- Object* clone;
-
- // If we're forced to always allocate, we use the general allocation
- // functions which may leave us with an object in old space.
- if (always_allocate()) {
- { MaybeObject* maybe_clone =
- AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
- if (!maybe_clone->ToObject(&clone)) return maybe_clone;
- }
- Address clone_address = HeapObject::cast(clone)->address();
- CopyBlock(clone_address,
- source->address(),
- object_size);
- // Update write barrier for all fields that lie beyond the header.
- RecordWrites(clone_address,
- JSObject::kHeaderSize,
- (object_size - JSObject::kHeaderSize) / kPointerSize);
- } else {
- { MaybeObject* maybe_clone = new_space_.AllocateRaw(object_size);
- if (!maybe_clone->ToObject(&clone)) return maybe_clone;
- }
- ASSERT(InNewSpace(clone));
- // Since we know the clone is allocated in new space, we can copy
- // the contents without worrying about updating the write barrier.
- CopyBlock(HeapObject::cast(clone)->address(),
- source->address(),
- object_size);
- }
-
- FixedArray* elements = FixedArray::cast(source->elements());
- FixedArray* properties = FixedArray::cast(source->properties());
- // Update elements if necessary.
- if (elements->length() > 0) {
- Object* elem;
- { MaybeObject* maybe_elem =
- (elements->map() == fixed_cow_array_map()) ?
- elements : CopyFixedArray(elements);
- if (!maybe_elem->ToObject(&elem)) return maybe_elem;
- }
- JSObject::cast(clone)->set_elements(FixedArray::cast(elem));
- }
- // Update properties if necessary.
- if (properties->length() > 0) {
- Object* prop;
- { MaybeObject* maybe_prop = CopyFixedArray(properties);
- if (!maybe_prop->ToObject(&prop)) return maybe_prop;
- }
- JSObject::cast(clone)->set_properties(FixedArray::cast(prop));
- }
- // Return the new clone.
-#ifdef ENABLE_LOGGING_AND_PROFILING
- isolate_->producer_heap_profile()->RecordJSObjectAllocation(clone);
-#endif
- return clone;
-}
-
-
-MaybeObject* Heap::ReinitializeJSGlobalProxy(JSFunction* constructor,
- JSGlobalProxy* object) {
- ASSERT(constructor->has_initial_map());
- Map* map = constructor->initial_map();
-
- // Check that the already allocated object has the same size and type as
- // objects allocated using the constructor.
- ASSERT(map->instance_size() == object->map()->instance_size());
- ASSERT(map->instance_type() == object->map()->instance_type());
-
- // Allocate the backing storage for the properties.
- int prop_size = map->unused_property_fields() - map->inobject_properties();
- Object* properties;
- { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, TENURED);
- if (!maybe_properties->ToObject(&properties)) return maybe_properties;
- }
-
- // Reset the map for the object.
- object->set_map(constructor->initial_map());
-
- // Reinitialize the object from the constructor map.
- InitializeJSObjectFromMap(object, FixedArray::cast(properties), map);
- return object;
-}
-
-
-MaybeObject* Heap::AllocateStringFromAscii(Vector<const char> string,
- PretenureFlag pretenure) {
- Object* result;
- { MaybeObject* maybe_result =
- AllocateRawAsciiString(string.length(), pretenure);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
-
- // Copy the characters into the new object.
- SeqAsciiString* string_result = SeqAsciiString::cast(result);
- for (int i = 0; i < string.length(); i++) {
- string_result->SeqAsciiStringSet(i, string[i]);
- }
- return result;
-}
-
-
-MaybeObject* Heap::AllocateStringFromUtf8Slow(Vector<const char> string,
- PretenureFlag pretenure) {
- // V8 only supports characters in the Basic Multilingual Plane.
- const uc32 kMaxSupportedChar = 0xFFFF;
- // Count the number of characters in the UTF-8 string and check if
- // it is an ASCII string.
- Access<ScannerConstants::Utf8Decoder>
- decoder(isolate_->scanner_constants()->utf8_decoder());
- decoder->Reset(string.start(), string.length());
- int chars = 0;
- while (decoder->has_more()) {
- decoder->GetNext();
- chars++;
- }
-
- Object* result;
- { MaybeObject* maybe_result = AllocateRawTwoByteString(chars, pretenure);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
-
- // Convert and copy the characters into the new object.
- String* string_result = String::cast(result);
- decoder->Reset(string.start(), string.length());
- for (int i = 0; i < chars; i++) {
- uc32 r = decoder->GetNext();
- if (r > kMaxSupportedChar) { r = unibrow::Utf8::kBadChar; }
- string_result->Set(i, r);
- }
- return result;
-}
-
-
-MaybeObject* Heap::AllocateStringFromTwoByte(Vector<const uc16> string,
- PretenureFlag pretenure) {
- // Check if the string is an ASCII string.
- MaybeObject* maybe_result;
- if (String::IsAscii(string.start(), string.length())) {
- maybe_result = AllocateRawAsciiString(string.length(), pretenure);
- } else { // It's not an ASCII string.
- maybe_result = AllocateRawTwoByteString(string.length(), pretenure);
- }
- Object* result;
- if (!maybe_result->ToObject(&result)) return maybe_result;
-
- // Copy the characters into the new object, which may be either ASCII or
- // UTF-16.
- String* string_result = String::cast(result);
- for (int i = 0; i < string.length(); i++) {
- string_result->Set(i, string[i]);
- }
- return result;
-}
-
-
-Map* Heap::SymbolMapForString(String* string) {
- // If the string is in new space it cannot be used as a symbol.
- if (InNewSpace(string)) return NULL;
-
- // Find the corresponding symbol map for strings.
- Map* map = string->map();
- if (map == ascii_string_map()) {
- return ascii_symbol_map();
- }
- if (map == string_map()) {
- return symbol_map();
- }
- if (map == cons_string_map()) {
- return cons_symbol_map();
- }
- if (map == cons_ascii_string_map()) {
- return cons_ascii_symbol_map();
- }
- if (map == external_string_map()) {
- return external_symbol_map();
- }
- if (map == external_ascii_string_map()) {
- return external_ascii_symbol_map();
- }
- if (map == external_string_with_ascii_data_map()) {
- return external_symbol_with_ascii_data_map();
- }
-
- // No match found.
- return NULL;
-}
-
-
-MaybeObject* Heap::AllocateInternalSymbol(unibrow::CharacterStream* buffer,
- int chars,
- uint32_t hash_field) {
- ASSERT(chars >= 0);
- // Ensure the chars matches the number of characters in the buffer.
- ASSERT(static_cast<unsigned>(chars) == buffer->Length());
- // Determine whether the string is ascii.
- bool is_ascii = true;
- while (buffer->has_more()) {
- if (buffer->GetNext() > unibrow::Utf8::kMaxOneByteChar) {
- is_ascii = false;
- break;
- }
- }
- buffer->Rewind();
-
- // Compute map and object size.
- int size;
- Map* map;
-
- if (is_ascii) {
- if (chars > SeqAsciiString::kMaxLength) {
- return Failure::OutOfMemoryException();
- }
- map = ascii_symbol_map();
- size = SeqAsciiString::SizeFor(chars);
- } else {
- if (chars > SeqTwoByteString::kMaxLength) {
- return Failure::OutOfMemoryException();
- }
- map = symbol_map();
- size = SeqTwoByteString::SizeFor(chars);
- }
-
- // Allocate string.
- Object* result;
- { MaybeObject* maybe_result = (size > MaxObjectSizeInPagedSpace())
- ? lo_space_->AllocateRaw(size)
- : old_data_space_->AllocateRaw(size);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
-
- reinterpret_cast<HeapObject*>(result)->set_map(map);
- // Set length and hash fields of the allocated string.
- String* answer = String::cast(result);
- answer->set_length(chars);
- answer->set_hash_field(hash_field);
-
- ASSERT_EQ(size, answer->Size());
-
- // Fill in the characters.
- for (int i = 0; i < chars; i++) {
- answer->Set(i, buffer->GetNext());
- }
- return answer;
-}
-
-
-MaybeObject* Heap::AllocateRawAsciiString(int length, PretenureFlag pretenure) {
- if (length < 0 || length > SeqAsciiString::kMaxLength) {
- return Failure::OutOfMemoryException();
- }
-
- int size = SeqAsciiString::SizeFor(length);
- ASSERT(size <= SeqAsciiString::kMaxSize);
-
- AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
- AllocationSpace retry_space = OLD_DATA_SPACE;
-
- if (space == NEW_SPACE) {
- if (size > kMaxObjectSizeInNewSpace) {
- // Allocate in large object space, retry space will be ignored.
- space = LO_SPACE;
- } else if (size > MaxObjectSizeInPagedSpace()) {
- // Allocate in new space, retry in large object space.
- retry_space = LO_SPACE;
- }
- } else if (space == OLD_DATA_SPACE && size > MaxObjectSizeInPagedSpace()) {
- space = LO_SPACE;
- }
- Object* result;
- { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
-
- // Partially initialize the object.
- HeapObject::cast(result)->set_map(ascii_string_map());
- String::cast(result)->set_length(length);
- String::cast(result)->set_hash_field(String::kEmptyHashField);
- ASSERT_EQ(size, HeapObject::cast(result)->Size());
- return result;
-}
-
-
-MaybeObject* Heap::AllocateRawTwoByteString(int length,
- PretenureFlag pretenure) {
- if (length < 0 || length > SeqTwoByteString::kMaxLength) {
- return Failure::OutOfMemoryException();
- }
- int size = SeqTwoByteString::SizeFor(length);
- ASSERT(size <= SeqTwoByteString::kMaxSize);
- AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
- AllocationSpace retry_space = OLD_DATA_SPACE;
-
- if (space == NEW_SPACE) {
- if (size > kMaxObjectSizeInNewSpace) {
- // Allocate in large object space, retry space will be ignored.
- space = LO_SPACE;
- } else if (size > MaxObjectSizeInPagedSpace()) {
- // Allocate in new space, retry in large object space.
- retry_space = LO_SPACE;
- }
- } else if (space == OLD_DATA_SPACE && size > MaxObjectSizeInPagedSpace()) {
- space = LO_SPACE;
- }
- Object* result;
- { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
-
- // Partially initialize the object.
- HeapObject::cast(result)->set_map(string_map());
- String::cast(result)->set_length(length);
- String::cast(result)->set_hash_field(String::kEmptyHashField);
- ASSERT_EQ(size, HeapObject::cast(result)->Size());
- return result;
-}
-
-
-MaybeObject* Heap::AllocateEmptyFixedArray() {
- int size = FixedArray::SizeFor(0);
- Object* result;
- { MaybeObject* maybe_result =
- AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- // Initialize the object.
- reinterpret_cast<FixedArray*>(result)->set_map(fixed_array_map());
- reinterpret_cast<FixedArray*>(result)->set_length(0);
- return result;
-}
-
-
-MaybeObject* Heap::AllocateRawFixedArray(int length) {
- if (length < 0 || length > FixedArray::kMaxLength) {
- return Failure::OutOfMemoryException();
- }
- ASSERT(length > 0);
- // Use the general function if we're forced to always allocate.
- if (always_allocate()) return AllocateFixedArray(length, TENURED);
- // Allocate the raw data for a fixed array.
- int size = FixedArray::SizeFor(length);
- return size <= kMaxObjectSizeInNewSpace
- ? new_space_.AllocateRaw(size)
- : lo_space_->AllocateRawFixedArray(size);
-}
-
-
-MaybeObject* Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
- int len = src->length();
- Object* obj;
- { MaybeObject* maybe_obj = AllocateRawFixedArray(len);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- if (InNewSpace(obj)) {
- HeapObject* dst = HeapObject::cast(obj);
- dst->set_map(map);
- CopyBlock(dst->address() + kPointerSize,
- src->address() + kPointerSize,
- FixedArray::SizeFor(len) - kPointerSize);
- return obj;
- }
- HeapObject::cast(obj)->set_map(map);
- FixedArray* result = FixedArray::cast(obj);
- result->set_length(len);
-
- // Copy the content
- AssertNoAllocation no_gc;
- WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
- for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
- return result;
-}
-
-
-MaybeObject* Heap::AllocateFixedArray(int length) {
- ASSERT(length >= 0);
- if (length == 0) return empty_fixed_array();
- Object* result;
- { MaybeObject* maybe_result = AllocateRawFixedArray(length);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- // Initialize header.
- FixedArray* array = reinterpret_cast<FixedArray*>(result);
- array->set_map(fixed_array_map());
- array->set_length(length);
- // Initialize body.
- ASSERT(!InNewSpace(undefined_value()));
- MemsetPointer(array->data_start(), undefined_value(), length);
- return result;
-}
-
-
-MaybeObject* Heap::AllocateRawFixedArray(int length, PretenureFlag pretenure) {
- if (length < 0 || length > FixedArray::kMaxLength) {
- return Failure::OutOfMemoryException();
- }
-
- AllocationSpace space =
- (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
- int size = FixedArray::SizeFor(length);
- if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) {
- // Too big for new space.
- space = LO_SPACE;
- } else if (space == OLD_POINTER_SPACE &&
- size > MaxObjectSizeInPagedSpace()) {
- // Too big for old pointer space.
- space = LO_SPACE;
- }
-
- AllocationSpace retry_space =
- (size <= MaxObjectSizeInPagedSpace()) ? OLD_POINTER_SPACE : LO_SPACE;
-
- return AllocateRaw(size, space, retry_space);
-}
-
-
-MUST_USE_RESULT static MaybeObject* AllocateFixedArrayWithFiller(
- Heap* heap,
- int length,
- PretenureFlag pretenure,
- Object* filler) {
- ASSERT(length >= 0);
- ASSERT(heap->empty_fixed_array()->IsFixedArray());
- if (length == 0) return heap->empty_fixed_array();
-
- ASSERT(!heap->InNewSpace(filler));
- Object* result;
- { MaybeObject* maybe_result = heap->AllocateRawFixedArray(length, pretenure);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
-
- HeapObject::cast(result)->set_map(heap->fixed_array_map());
- FixedArray* array = FixedArray::cast(result);
- array->set_length(length);
- MemsetPointer(array->data_start(), filler, length);
- return array;
-}
-
-
-MaybeObject* Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
- return AllocateFixedArrayWithFiller(this,
- length,
- pretenure,
- undefined_value());
-}
-
-
-MaybeObject* Heap::AllocateFixedArrayWithHoles(int length,
- PretenureFlag pretenure) {
- return AllocateFixedArrayWithFiller(this,
- length,
- pretenure,
- the_hole_value());
-}
-
-
-MaybeObject* Heap::AllocateUninitializedFixedArray(int length) {
- if (length == 0) return empty_fixed_array();
-
- Object* obj;
- { MaybeObject* maybe_obj = AllocateRawFixedArray(length);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
-
- reinterpret_cast<FixedArray*>(obj)->set_map(fixed_array_map());
- FixedArray::cast(obj)->set_length(length);
- return obj;
-}
-
-
-MaybeObject* Heap::AllocateHashTable(int length, PretenureFlag pretenure) {
- Object* result;
- { MaybeObject* maybe_result = AllocateFixedArray(length, pretenure);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- reinterpret_cast<HeapObject*>(result)->set_map(hash_table_map());
- ASSERT(result->IsHashTable());
- return result;
-}
-
-
-MaybeObject* Heap::AllocateGlobalContext() {
- Object* result;
- { MaybeObject* maybe_result =
- AllocateFixedArray(Context::GLOBAL_CONTEXT_SLOTS);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- Context* context = reinterpret_cast<Context*>(result);
- context->set_map(global_context_map());
- ASSERT(context->IsGlobalContext());
- ASSERT(result->IsContext());
- return result;
-}
-
-
-MaybeObject* Heap::AllocateFunctionContext(int length, JSFunction* function) {
- ASSERT(length >= Context::MIN_CONTEXT_SLOTS);
- Object* result;
- { MaybeObject* maybe_result = AllocateFixedArray(length);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- Context* context = reinterpret_cast<Context*>(result);
- context->set_map(context_map());
- context->set_closure(function);
- context->set_fcontext(context);
- context->set_previous(NULL);
- context->set_extension(NULL);
- context->set_global(function->context()->global());
- ASSERT(!context->IsGlobalContext());
- ASSERT(context->is_function_context());
- ASSERT(result->IsContext());
- return result;
-}
-
-
-MaybeObject* Heap::AllocateWithContext(Context* previous,
- JSObject* extension,
- bool is_catch_context) {
- Object* result;
- { MaybeObject* maybe_result = AllocateFixedArray(Context::MIN_CONTEXT_SLOTS);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- Context* context = reinterpret_cast<Context*>(result);
- context->set_map(is_catch_context ? catch_context_map() :
- context_map());
- context->set_closure(previous->closure());
- context->set_fcontext(previous->fcontext());
- context->set_previous(previous);
- context->set_extension(extension);
- context->set_global(previous->global());
- ASSERT(!context->IsGlobalContext());
- ASSERT(!context->is_function_context());
- ASSERT(result->IsContext());
- return result;
-}
-
-
-MaybeObject* Heap::AllocateStruct(InstanceType type) {
- Map* map;
- switch (type) {
-#define MAKE_CASE(NAME, Name, name) \
- case NAME##_TYPE: map = name##_map(); break;
-STRUCT_LIST(MAKE_CASE)
-#undef MAKE_CASE
- default:
- UNREACHABLE();
- return Failure::InternalError();
- }
- int size = map->instance_size();
- AllocationSpace space =
- (size > MaxObjectSizeInPagedSpace()) ? LO_SPACE : OLD_POINTER_SPACE;
- Object* result;
- { MaybeObject* maybe_result = Allocate(map, space);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- Struct::cast(result)->InitializeBody(size);
- return result;
-}
-
-
-bool Heap::IdleNotification() {
- static const int kIdlesBeforeScavenge = 4;
- static const int kIdlesBeforeMarkSweep = 7;
- static const int kIdlesBeforeMarkCompact = 8;
- static const int kMaxIdleCount = kIdlesBeforeMarkCompact + 1;
- static const unsigned int kGCsBetweenCleanup = 4;
-
- if (!last_idle_notification_gc_count_init_) {
- last_idle_notification_gc_count_ = gc_count_;
- last_idle_notification_gc_count_init_ = true;
- }
-
- bool uncommit = true;
- bool finished = false;
-
- // Reset the number of idle notifications received when a number of
- // GCs have taken place. This allows another round of cleanup based
- // on idle notifications if enough work has been carried out to
- // provoke a number of garbage collections.
- if (gc_count_ - last_idle_notification_gc_count_ < kGCsBetweenCleanup) {
- number_idle_notifications_ =
- Min(number_idle_notifications_ + 1, kMaxIdleCount);
- } else {
- number_idle_notifications_ = 0;
- last_idle_notification_gc_count_ = gc_count_;
- }
-
- if (number_idle_notifications_ == kIdlesBeforeScavenge) {
- if (contexts_disposed_ > 0) {
- HistogramTimerScope scope(isolate_->counters()->gc_context());
- CollectAllGarbage(false);
- } else {
- CollectGarbage(NEW_SPACE);
- }
- new_space_.Shrink();
- last_idle_notification_gc_count_ = gc_count_;
- } else if (number_idle_notifications_ == kIdlesBeforeMarkSweep) {
- // Before doing the mark-sweep collections we clear the
- // compilation cache to avoid hanging on to source code and
- // generated code for cached functions.
- isolate_->compilation_cache()->Clear();
-
- CollectAllGarbage(false);
- new_space_.Shrink();
- last_idle_notification_gc_count_ = gc_count_;
-
- } else if (number_idle_notifications_ == kIdlesBeforeMarkCompact) {
- CollectAllGarbage(true);
- new_space_.Shrink();
- last_idle_notification_gc_count_ = gc_count_;
- number_idle_notifications_ = 0;
- finished = true;
- } else if (contexts_disposed_ > 0) {
- if (FLAG_expose_gc) {
- contexts_disposed_ = 0;
- } else {
- HistogramTimerScope scope(isolate_->counters()->gc_context());
- CollectAllGarbage(false);
- last_idle_notification_gc_count_ = gc_count_;
- }
- // If this is the first idle notification, we reset the
- // notification count to avoid letting idle notifications for
- // context disposal garbage collections start a potentially too
- // aggressive idle GC cycle.
- if (number_idle_notifications_ <= 1) {
- number_idle_notifications_ = 0;
- uncommit = false;
- }
- } else if (number_idle_notifications_ > kIdlesBeforeMarkCompact) {
- // If we have received more than kIdlesBeforeMarkCompact idle
- // notifications we do not perform any cleanup because we don't
- // expect to gain much by doing so.
- finished = true;
- }
-
- // Make sure that we have no pending context disposals and
- // conditionally uncommit from space.
- ASSERT(contexts_disposed_ == 0);
- if (uncommit) UncommitFromSpace();
- return finished;
-}
-
-
-#ifdef DEBUG
-
-void Heap::Print() {
- if (!HasBeenSetup()) return;
- isolate()->PrintStack();
- AllSpaces spaces;
- for (Space* space = spaces.next(); space != NULL; space = spaces.next())
- space->Print();
-}
-
-
-void Heap::ReportCodeStatistics(const char* title) {
- PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
- PagedSpace::ResetCodeStatistics();
- // We do not look for code in new space, map space, or old space. If code
- // somehow ends up in those spaces, we would miss it here.
- code_space_->CollectCodeStatistics();
- lo_space_->CollectCodeStatistics();
- PagedSpace::ReportCodeStatistics();
-}
-
-
-// This function expects that NewSpace's allocated objects histogram is
-// populated (via a call to CollectStatistics or else as a side effect of a
-// just-completed scavenge collection).
-void Heap::ReportHeapStatistics(const char* title) {
- USE(title);
- PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n",
- title, gc_count_);
- PrintF("mark-compact GC : %d\n", mc_count_);
- PrintF("old_gen_promotion_limit_ %" V8_PTR_PREFIX "d\n",
- old_gen_promotion_limit_);
- PrintF("old_gen_allocation_limit_ %" V8_PTR_PREFIX "d\n",
- old_gen_allocation_limit_);
-
- PrintF("\n");
- PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles());
- isolate_->global_handles()->PrintStats();
- PrintF("\n");
-
- PrintF("Heap statistics : ");
- isolate_->memory_allocator()->ReportStatistics();
- PrintF("To space : ");
- new_space_.ReportStatistics();
- PrintF("Old pointer space : ");
- old_pointer_space_->ReportStatistics();
- PrintF("Old data space : ");
- old_data_space_->ReportStatistics();
- PrintF("Code space : ");
- code_space_->ReportStatistics();
- PrintF("Map space : ");
- map_space_->ReportStatistics();
- PrintF("Cell space : ");
- cell_space_->ReportStatistics();
- PrintF("Large object space : ");
- lo_space_->ReportStatistics();
- PrintF(">>>>>> ========================================= >>>>>>\n");
-}
-
-#endif // DEBUG
-
-bool Heap::Contains(HeapObject* value) {
- return Contains(value->address());
-}
-
-
-bool Heap::Contains(Address addr) {
- if (OS::IsOutsideAllocatedSpace(addr)) return false;
- return HasBeenSetup() &&
- (new_space_.ToSpaceContains(addr) ||
- old_pointer_space_->Contains(addr) ||
- old_data_space_->Contains(addr) ||
- code_space_->Contains(addr) ||
- map_space_->Contains(addr) ||
- cell_space_->Contains(addr) ||
- lo_space_->SlowContains(addr));
-}
-
-
-bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
- return InSpace(value->address(), space);
-}
-
-
-bool Heap::InSpace(Address addr, AllocationSpace space) {
- if (OS::IsOutsideAllocatedSpace(addr)) return false;
- if (!HasBeenSetup()) return false;
-
- switch (space) {
- case NEW_SPACE:
- return new_space_.ToSpaceContains(addr);
- case OLD_POINTER_SPACE:
- return old_pointer_space_->Contains(addr);
- case OLD_DATA_SPACE:
- return old_data_space_->Contains(addr);
- case CODE_SPACE:
- return code_space_->Contains(addr);
- case MAP_SPACE:
- return map_space_->Contains(addr);
- case CELL_SPACE:
- return cell_space_->Contains(addr);
- case LO_SPACE:
- return lo_space_->SlowContains(addr);
- }
-
- return false;
-}
-
-
-#ifdef DEBUG
-static void DummyScavengePointer(HeapObject** p) {
-}
-
-
-static void VerifyPointersUnderWatermark(
- PagedSpace* space,
- DirtyRegionCallback visit_dirty_region) {
- PageIterator it(space, PageIterator::PAGES_IN_USE);
-
- while (it.has_next()) {
- Page* page = it.next();
- Address start = page->ObjectAreaStart();
- Address end = page->AllocationWatermark();
-
- HEAP->IterateDirtyRegions(Page::kAllRegionsDirtyMarks,
- start,
- end,
- visit_dirty_region,
- &DummyScavengePointer);
- }
-}
-
-
-static void VerifyPointersUnderWatermark(LargeObjectSpace* space) {
- LargeObjectIterator it(space);
- for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
- if (object->IsFixedArray()) {
- Address slot_address = object->address();
- Address end = object->address() + object->Size();
-
- while (slot_address < end) {
- HeapObject** slot = reinterpret_cast<HeapObject**>(slot_address);
- // When we are not in GC the Heap::InNewSpace() predicate
- // checks that pointers which satisfy predicate point into
- // the active semispace.
- HEAP->InNewSpace(*slot);
- slot_address += kPointerSize;
- }
- }
- }
-}
-
-
-void Heap::Verify() {
- ASSERT(HasBeenSetup());
-
- VerifyPointersVisitor visitor;
- IterateRoots(&visitor, VISIT_ONLY_STRONG);
-
- new_space_.Verify();
-
- VerifyPointersAndDirtyRegionsVisitor dirty_regions_visitor;
- old_pointer_space_->Verify(&dirty_regions_visitor);
- map_space_->Verify(&dirty_regions_visitor);
-
- VerifyPointersUnderWatermark(old_pointer_space_,
- &IteratePointersInDirtyRegion);
- VerifyPointersUnderWatermark(map_space_,
- &IteratePointersInDirtyMapsRegion);
- VerifyPointersUnderWatermark(lo_space_);
-
- VerifyPageWatermarkValidity(old_pointer_space_, ALL_INVALID);
- VerifyPageWatermarkValidity(map_space_, ALL_INVALID);
-
- VerifyPointersVisitor no_dirty_regions_visitor;
- old_data_space_->Verify(&no_dirty_regions_visitor);
- code_space_->Verify(&no_dirty_regions_visitor);
- cell_space_->Verify(&no_dirty_regions_visitor);
-
- lo_space_->Verify();
-}
-#endif // DEBUG
-
-
-MaybeObject* Heap::LookupSymbol(Vector<const char> string) {
- Object* symbol = NULL;
- Object* new_table;
- { MaybeObject* maybe_new_table =
- symbol_table()->LookupSymbol(string, &symbol);
- if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
- }
- // Can't use set_symbol_table because SymbolTable::cast knows that
- // SymbolTable is a singleton and checks for identity.
- roots_[kSymbolTableRootIndex] = new_table;
- ASSERT(symbol != NULL);
- return symbol;
-}
-
-
-MaybeObject* Heap::LookupAsciiSymbol(Vector<const char> string) {
- Object* symbol = NULL;
- Object* new_table;
- { MaybeObject* maybe_new_table =
- symbol_table()->LookupAsciiSymbol(string, &symbol);
- if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
- }
- // Can't use set_symbol_table because SymbolTable::cast knows that
- // SymbolTable is a singleton and checks for identity.
- roots_[kSymbolTableRootIndex] = new_table;
- ASSERT(symbol != NULL);
- return symbol;
-}
-
-
-MaybeObject* Heap::LookupTwoByteSymbol(Vector<const uc16> string) {
- Object* symbol = NULL;
- Object* new_table;
- { MaybeObject* maybe_new_table =
- symbol_table()->LookupTwoByteSymbol(string, &symbol);
- if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
- }
- // Can't use set_symbol_table because SymbolTable::cast knows that
- // SymbolTable is a singleton and checks for identity.
- roots_[kSymbolTableRootIndex] = new_table;
- ASSERT(symbol != NULL);
- return symbol;
-}
-
-
-MaybeObject* Heap::LookupSymbol(String* string) {
- if (string->IsSymbol()) return string;
- Object* symbol = NULL;
- Object* new_table;
- { MaybeObject* maybe_new_table =
- symbol_table()->LookupString(string, &symbol);
- if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
- }
- // Can't use set_symbol_table because SymbolTable::cast knows that
- // SymbolTable is a singleton and checks for identity.
- roots_[kSymbolTableRootIndex] = new_table;
- ASSERT(symbol != NULL);
- return symbol;
-}
-
-
-bool Heap::LookupSymbolIfExists(String* string, String** symbol) {
- if (string->IsSymbol()) {
- *symbol = string;
- return true;
- }
- return symbol_table()->LookupSymbolIfExists(string, symbol);
-}
-
-
-#ifdef DEBUG
-void Heap::ZapFromSpace() {
- ASSERT(reinterpret_cast<Object*>(kFromSpaceZapValue)->IsFailure());
- for (Address a = new_space_.FromSpaceLow();
- a < new_space_.FromSpaceHigh();
- a += kPointerSize) {
- Memory::Address_at(a) = kFromSpaceZapValue;
- }
-}
-#endif // DEBUG
-
-
-bool Heap::IteratePointersInDirtyRegion(Heap* heap,
- Address start,
- Address end,
- ObjectSlotCallback copy_object_func) {
- Address slot_address = start;
- bool pointers_to_new_space_found = false;
-
- while (slot_address < end) {
- Object** slot = reinterpret_cast<Object**>(slot_address);
- if (heap->InNewSpace(*slot)) {
- ASSERT((*slot)->IsHeapObject());
- copy_object_func(reinterpret_cast<HeapObject**>(slot));
- if (heap->InNewSpace(*slot)) {
- ASSERT((*slot)->IsHeapObject());
- pointers_to_new_space_found = true;
- }
- }
- slot_address += kPointerSize;
- }
- return pointers_to_new_space_found;
-}
-
-
-// Compute start address of the first map following given addr.
-static inline Address MapStartAlign(Address addr) {
- Address page = Page::FromAddress(addr)->ObjectAreaStart();
- return page + (((addr - page) + (Map::kSize - 1)) / Map::kSize * Map::kSize);
-}
-
-
-// Compute end address of the first map preceding given addr.
-static inline Address MapEndAlign(Address addr) {
- Address page = Page::FromAllocationTop(addr)->ObjectAreaStart();
- return page + ((addr - page) / Map::kSize * Map::kSize);
-}
-
-
-static bool IteratePointersInDirtyMaps(Address start,
- Address end,
- ObjectSlotCallback copy_object_func) {
- ASSERT(MapStartAlign(start) == start);
- ASSERT(MapEndAlign(end) == end);
-
- Address map_address = start;
- bool pointers_to_new_space_found = false;
-
- Heap* heap = HEAP;
- while (map_address < end) {
- ASSERT(!heap->InNewSpace(Memory::Object_at(map_address)));
- ASSERT(Memory::Object_at(map_address)->IsMap());
-
- Address pointer_fields_start = map_address + Map::kPointerFieldsBeginOffset;
- Address pointer_fields_end = map_address + Map::kPointerFieldsEndOffset;
-
- if (Heap::IteratePointersInDirtyRegion(heap,
- pointer_fields_start,
- pointer_fields_end,
- copy_object_func)) {
- pointers_to_new_space_found = true;
- }
-
- map_address += Map::kSize;
- }
-
- return pointers_to_new_space_found;
-}
-
-
-bool Heap::IteratePointersInDirtyMapsRegion(
- Heap* heap,
- Address start,
- Address end,
- ObjectSlotCallback copy_object_func) {
- Address map_aligned_start = MapStartAlign(start);
- Address map_aligned_end = MapEndAlign(end);
-
- bool contains_pointers_to_new_space = false;
-
- if (map_aligned_start != start) {
- Address prev_map = map_aligned_start - Map::kSize;
- ASSERT(Memory::Object_at(prev_map)->IsMap());
-
- Address pointer_fields_start =
- Max(start, prev_map + Map::kPointerFieldsBeginOffset);
-
- Address pointer_fields_end =
- Min(prev_map + Map::kPointerFieldsEndOffset, end);
-
- contains_pointers_to_new_space =
- IteratePointersInDirtyRegion(heap,
- pointer_fields_start,
- pointer_fields_end,
- copy_object_func)
- || contains_pointers_to_new_space;
- }
-
- contains_pointers_to_new_space =
- IteratePointersInDirtyMaps(map_aligned_start,
- map_aligned_end,
- copy_object_func)
- || contains_pointers_to_new_space;
-
- if (map_aligned_end != end) {
- ASSERT(Memory::Object_at(map_aligned_end)->IsMap());
-
- Address pointer_fields_start =
- map_aligned_end + Map::kPointerFieldsBeginOffset;
-
- Address pointer_fields_end =
- Min(end, map_aligned_end + Map::kPointerFieldsEndOffset);
-
- contains_pointers_to_new_space =
- IteratePointersInDirtyRegion(heap,
- pointer_fields_start,
- pointer_fields_end,
- copy_object_func)
- || contains_pointers_to_new_space;
- }
-
- return contains_pointers_to_new_space;
-}
-
-
-void Heap::IterateAndMarkPointersToFromSpace(Address start,
- Address end,
- ObjectSlotCallback callback) {
- Address slot_address = start;
- Page* page = Page::FromAddress(start);
-
- uint32_t marks = page->GetRegionMarks();
-
- while (slot_address < end) {
- Object** slot = reinterpret_cast<Object**>(slot_address);
- if (InFromSpace(*slot)) {
- ASSERT((*slot)->IsHeapObject());
- callback(reinterpret_cast<HeapObject**>(slot));
- if (InNewSpace(*slot)) {
- ASSERT((*slot)->IsHeapObject());
- marks |= page->GetRegionMaskForAddress(slot_address);
- }
- }
- slot_address += kPointerSize;
- }
-
- page->SetRegionMarks(marks);
-}
-
-
-uint32_t Heap::IterateDirtyRegions(
- uint32_t marks,
- Address area_start,
- Address area_end,
- DirtyRegionCallback visit_dirty_region,
- ObjectSlotCallback copy_object_func) {
- uint32_t newmarks = 0;
- uint32_t mask = 1;
-
- if (area_start >= area_end) {
- return newmarks;
- }
-
- Address region_start = area_start;
-
- // area_start does not necessarily coincide with start of the first region.
- // Thus to calculate the beginning of the next region we have to align
- // area_start by Page::kRegionSize.
- Address second_region =
- reinterpret_cast<Address>(
- reinterpret_cast<intptr_t>(area_start + Page::kRegionSize) &
- ~Page::kRegionAlignmentMask);
-
- // Next region might be beyond area_end.
- Address region_end = Min(second_region, area_end);
-
- if (marks & mask) {
- if (visit_dirty_region(this, region_start, region_end, copy_object_func)) {
- newmarks |= mask;
- }
- }
- mask <<= 1;
-
- // Iterate subsequent regions which fully lay inside [area_start, area_end[.
- region_start = region_end;
- region_end = region_start + Page::kRegionSize;
-
- while (region_end <= area_end) {
- if (marks & mask) {
- if (visit_dirty_region(this,
- region_start,
- region_end,
- copy_object_func)) {
- newmarks |= mask;
- }
- }
-
- region_start = region_end;
- region_end = region_start + Page::kRegionSize;
-
- mask <<= 1;
- }
-
- if (region_start != area_end) {
- // A small piece of area left uniterated because area_end does not coincide
- // with region end. Check whether region covering last part of area is
- // dirty.
- if (marks & mask) {
- if (visit_dirty_region(this, region_start, area_end, copy_object_func)) {
- newmarks |= mask;
- }
- }
- }
-
- return newmarks;
-}
-
-
-
-void Heap::IterateDirtyRegions(
- PagedSpace* space,
- DirtyRegionCallback visit_dirty_region,
- ObjectSlotCallback copy_object_func,
- ExpectedPageWatermarkState expected_page_watermark_state) {
-
- PageIterator it(space, PageIterator::PAGES_IN_USE);
-
- while (it.has_next()) {
- Page* page = it.next();
- uint32_t marks = page->GetRegionMarks();
-
- if (marks != Page::kAllRegionsCleanMarks) {
- Address start = page->ObjectAreaStart();
-
- // Do not try to visit pointers beyond page allocation watermark.
- // Page can contain garbage pointers there.
- Address end;
-
- if ((expected_page_watermark_state == WATERMARK_SHOULD_BE_VALID) ||
- page->IsWatermarkValid()) {
- end = page->AllocationWatermark();
- } else {
- end = page->CachedAllocationWatermark();
- }
-
- ASSERT(space == old_pointer_space_ ||
- (space == map_space_ &&
- ((page->ObjectAreaStart() - end) % Map::kSize == 0)));
-
- page->SetRegionMarks(IterateDirtyRegions(marks,
- start,
- end,
- visit_dirty_region,
- copy_object_func));
- }
-
- // Mark page watermark as invalid to maintain watermark validity invariant.
- // See Page::FlipMeaningOfInvalidatedWatermarkFlag() for details.
- page->InvalidateWatermark(true);
- }
-}
-
-
-void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
- IterateStrongRoots(v, mode);
- IterateWeakRoots(v, mode);
-}
-
-
-void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
- v->VisitPointer(reinterpret_cast<Object**>(&roots_[kSymbolTableRootIndex]));
- v->Synchronize("symbol_table");
- if (mode != VISIT_ALL_IN_SCAVENGE) {
- // Scavenge collections have special processing for this.
- external_string_table_.Iterate(v);
- }
- v->Synchronize("external_string_table");
-}
-
-
-void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
- v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
- v->Synchronize("strong_root_list");
-
- v->VisitPointer(BitCast<Object**>(&hidden_symbol_));
- v->Synchronize("symbol");
-
- isolate_->bootstrapper()->Iterate(v);
- v->Synchronize("bootstrapper");
- isolate_->Iterate(v);
- v->Synchronize("top");
- Relocatable::Iterate(v);
- v->Synchronize("relocatable");
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- isolate_->debug()->Iterate(v);
-#endif
- v->Synchronize("debug");
- isolate_->compilation_cache()->Iterate(v);
- v->Synchronize("compilationcache");
-
- // Iterate over local handles in handle scopes.
- isolate_->handle_scope_implementer()->Iterate(v);
- v->Synchronize("handlescope");
-
- // Iterate over the builtin code objects and code stubs in the
- // heap. Note that it is not necessary to iterate over code objects
- // on scavenge collections.
- if (mode != VISIT_ALL_IN_SCAVENGE) {
- isolate_->builtins()->IterateBuiltins(v);
- }
- v->Synchronize("builtins");
-
- // Iterate over global handles.
- if (mode == VISIT_ONLY_STRONG) {
- isolate_->global_handles()->IterateStrongRoots(v);
- } else {
- isolate_->global_handles()->IterateAllRoots(v);
- }
- v->Synchronize("globalhandles");
-
- // Iterate over pointers being held by inactive threads.
- isolate_->thread_manager()->Iterate(v);
- v->Synchronize("threadmanager");
-
- // Iterate over the pointers the Serialization/Deserialization code is
- // holding.
- // During garbage collection this keeps the partial snapshot cache alive.
- // During deserialization of the startup snapshot this creates the partial
- // snapshot cache and deserializes the objects it refers to. During
- // serialization this does nothing, since the partial snapshot cache is
- // empty. However the next thing we do is create the partial snapshot,
- // filling up the partial snapshot cache with objects it needs as we go.
- SerializerDeserializer::Iterate(v);
- // We don't do a v->Synchronize call here, because in debug mode that will
- // output a flag to the snapshot. However at this point the serializer and
- // deserializer are deliberately a little unsynchronized (see above) so the
- // checking of the sync flag in the snapshot would fail.
-}
-
-
-// TODO(1236194): Since the heap size is configurable on the command line
-// and through the API, we should gracefully handle the case that the heap
-// size is not big enough to fit all the initial objects.
-bool Heap::ConfigureHeap(int max_semispace_size,
- int max_old_gen_size,
- int max_executable_size) {
- if (HasBeenSetup()) return false;
-
- if (max_semispace_size > 0) max_semispace_size_ = max_semispace_size;
-
- if (Snapshot::IsEnabled()) {
- // If we are using a snapshot we always reserve the default amount
- // of memory for each semispace because code in the snapshot has
- // write-barrier code that relies on the size and alignment of new
- // space. We therefore cannot use a larger max semispace size
- // than the default reserved semispace size.
- if (max_semispace_size_ > reserved_semispace_size_) {
- max_semispace_size_ = reserved_semispace_size_;
- }
- } else {
- // If we are not using snapshots we reserve space for the actual
- // max semispace size.
- reserved_semispace_size_ = max_semispace_size_;
- }
-
- if (max_old_gen_size > 0) max_old_generation_size_ = max_old_gen_size;
- if (max_executable_size > 0) {
- max_executable_size_ = RoundUp(max_executable_size, Page::kPageSize);
- }
-
- // The max executable size must be less than or equal to the max old
- // generation size.
- if (max_executable_size_ > max_old_generation_size_) {
- max_executable_size_ = max_old_generation_size_;
- }
-
- // The new space size must be a power of two to support single-bit testing
- // for containment.
- max_semispace_size_ = RoundUpToPowerOf2(max_semispace_size_);
- reserved_semispace_size_ = RoundUpToPowerOf2(reserved_semispace_size_);
- initial_semispace_size_ = Min(initial_semispace_size_, max_semispace_size_);
- external_allocation_limit_ = 10 * max_semispace_size_;
-
- // The old generation is paged.
- max_old_generation_size_ = RoundUp(max_old_generation_size_, Page::kPageSize);
-
- configured_ = true;
- return true;
-}
-
-
-bool Heap::ConfigureHeapDefault() {
- return ConfigureHeap(FLAG_max_new_space_size / 2 * KB,
- FLAG_max_old_space_size * MB,
- FLAG_max_executable_size * MB);
-}
-
-
-void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
- *stats->start_marker = HeapStats::kStartMarker;
- *stats->end_marker = HeapStats::kEndMarker;
- *stats->new_space_size = new_space_.SizeAsInt();
- *stats->new_space_capacity = static_cast<int>(new_space_.Capacity());
- *stats->old_pointer_space_size = old_pointer_space_->Size();
- *stats->old_pointer_space_capacity = old_pointer_space_->Capacity();
- *stats->old_data_space_size = old_data_space_->Size();
- *stats->old_data_space_capacity = old_data_space_->Capacity();
- *stats->code_space_size = code_space_->Size();
- *stats->code_space_capacity = code_space_->Capacity();
- *stats->map_space_size = map_space_->Size();
- *stats->map_space_capacity = map_space_->Capacity();
- *stats->cell_space_size = cell_space_->Size();
- *stats->cell_space_capacity = cell_space_->Capacity();
- *stats->lo_space_size = lo_space_->Size();
- isolate_->global_handles()->RecordStats(stats);
- *stats->memory_allocator_size = isolate()->memory_allocator()->Size();
- *stats->memory_allocator_capacity =
- isolate()->memory_allocator()->Size() +
- isolate()->memory_allocator()->Available();
- *stats->os_error = OS::GetLastError();
- isolate()->memory_allocator()->Available();
- if (take_snapshot) {
- HeapIterator iterator(HeapIterator::kFilterFreeListNodes);
- for (HeapObject* obj = iterator.next();
- obj != NULL;
- obj = iterator.next()) {
- InstanceType type = obj->map()->instance_type();
- ASSERT(0 <= type && type <= LAST_TYPE);
- stats->objects_per_type[type]++;
- stats->size_per_type[type] += obj->Size();
- }
- }
-}
-
-
-intptr_t Heap::PromotedSpaceSize() {
- return old_pointer_space_->Size()
- + old_data_space_->Size()
- + code_space_->Size()
- + map_space_->Size()
- + cell_space_->Size()
- + lo_space_->Size();
-}
-
-
-int Heap::PromotedExternalMemorySize() {
- if (amount_of_external_allocated_memory_
- <= amount_of_external_allocated_memory_at_last_global_gc_) return 0;
- return amount_of_external_allocated_memory_
- - amount_of_external_allocated_memory_at_last_global_gc_;
-}
-
-#ifdef DEBUG
-
-// Tags 0, 1, and 3 are used. Use 2 for marking visited HeapObject.
-static const int kMarkTag = 2;
-
-
-class HeapDebugUtils {
- public:
- explicit HeapDebugUtils(Heap* heap)
- : search_for_any_global_(false),
- search_target_(NULL),
- found_target_(false),
- object_stack_(20),
- heap_(heap) {
- }
-
- class MarkObjectVisitor : public ObjectVisitor {
- public:
- explicit MarkObjectVisitor(HeapDebugUtils* utils) : utils_(utils) { }
-
- void VisitPointers(Object** start, Object** end) {
- // Copy all HeapObject pointers in [start, end)
- for (Object** p = start; p < end; p++) {
- if ((*p)->IsHeapObject())
- utils_->MarkObjectRecursively(p);
- }
- }
-
- HeapDebugUtils* utils_;
- };
-
- void MarkObjectRecursively(Object** p) {
- if (!(*p)->IsHeapObject()) return;
-
- HeapObject* obj = HeapObject::cast(*p);
-
- Object* map = obj->map();
-
- if (!map->IsHeapObject()) return; // visited before
-
- if (found_target_) return; // stop if target found
- object_stack_.Add(obj);
- if ((search_for_any_global_ && obj->IsJSGlobalObject()) ||
- (!search_for_any_global_ && (obj == search_target_))) {
- found_target_ = true;
- return;
- }
-
- // not visited yet
- Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
-
- Address map_addr = map_p->address();
-
- obj->set_map(reinterpret_cast<Map*>(map_addr + kMarkTag));
-
- MarkObjectRecursively(&map);
-
- MarkObjectVisitor mark_visitor(this);
-
- obj->IterateBody(map_p->instance_type(), obj->SizeFromMap(map_p),
- &mark_visitor);
-
- if (!found_target_) // don't pop if found the target
- object_stack_.RemoveLast();
- }
-
-
- class UnmarkObjectVisitor : public ObjectVisitor {
- public:
- explicit UnmarkObjectVisitor(HeapDebugUtils* utils) : utils_(utils) { }
-
- void VisitPointers(Object** start, Object** end) {
- // Copy all HeapObject pointers in [start, end)
- for (Object** p = start; p < end; p++) {
- if ((*p)->IsHeapObject())
- utils_->UnmarkObjectRecursively(p);
- }
- }
-
- HeapDebugUtils* utils_;
- };
-
-
- void UnmarkObjectRecursively(Object** p) {
- if (!(*p)->IsHeapObject()) return;
-
- HeapObject* obj = HeapObject::cast(*p);
-
- Object* map = obj->map();
-
- if (map->IsHeapObject()) return; // unmarked already
-
- Address map_addr = reinterpret_cast<Address>(map);
-
- map_addr -= kMarkTag;
-
- ASSERT_TAG_ALIGNED(map_addr);
-
- HeapObject* map_p = HeapObject::FromAddress(map_addr);
-
- obj->set_map(reinterpret_cast<Map*>(map_p));
-
- UnmarkObjectRecursively(reinterpret_cast<Object**>(&map_p));
-
- UnmarkObjectVisitor unmark_visitor(this);
-
- obj->IterateBody(Map::cast(map_p)->instance_type(),
- obj->SizeFromMap(Map::cast(map_p)),
- &unmark_visitor);
- }
-
-
- void MarkRootObjectRecursively(Object** root) {
- if (search_for_any_global_) {
- ASSERT(search_target_ == NULL);
- } else {
- ASSERT(search_target_->IsHeapObject());
- }
- found_target_ = false;
- object_stack_.Clear();
-
- MarkObjectRecursively(root);
- UnmarkObjectRecursively(root);
-
- if (found_target_) {
- PrintF("=====================================\n");
- PrintF("==== Path to object ====\n");
- PrintF("=====================================\n\n");
-
- ASSERT(!object_stack_.is_empty());
- for (int i = 0; i < object_stack_.length(); i++) {
- if (i > 0) PrintF("\n |\n |\n V\n\n");
- Object* obj = object_stack_[i];
- obj->Print();
- }
- PrintF("=====================================\n");
- }
- }
-
- // Helper class for visiting HeapObjects recursively.
- class MarkRootVisitor: public ObjectVisitor {
- public:
- explicit MarkRootVisitor(HeapDebugUtils* utils) : utils_(utils) { }
-
- void VisitPointers(Object** start, Object** end) {
- // Visit all HeapObject pointers in [start, end)
- for (Object** p = start; p < end; p++) {
- if ((*p)->IsHeapObject())
- utils_->MarkRootObjectRecursively(p);
- }
- }
-
- HeapDebugUtils* utils_;
- };
-
- bool search_for_any_global_;
- Object* search_target_;
- bool found_target_;
- List<Object*> object_stack_;
- Heap* heap_;
-
- friend class Heap;
-};
-
-#endif
-
-bool Heap::Setup(bool create_heap_objects) {
-#ifdef DEBUG
- debug_utils_ = new HeapDebugUtils(this);
-#endif
-
- // Initialize heap spaces and initial maps and objects. Whenever something
- // goes wrong, just return false. The caller should check the results and
- // call Heap::TearDown() to release allocated memory.
- //
- // If the heap is not yet configured (eg, through the API), configure it.
- // Configuration is based on the flags new-space-size (really the semispace
- // size) and old-space-size if set or the initial values of semispace_size_
- // and old_generation_size_ otherwise.
- if (!configured_) {
- if (!ConfigureHeapDefault()) return false;
- }
-
- gc_initializer_mutex->Lock();
- static bool initialized_gc = false;
- if (!initialized_gc) {
- initialized_gc = true;
- InitializeScavengingVisitorsTables();
- NewSpaceScavenger::Initialize();
- MarkCompactCollector::Initialize();
- }
- gc_initializer_mutex->Unlock();
-
- MarkMapPointersAsEncoded(false);
-
- // Setup memory allocator and reserve a chunk of memory for new
- // space. The chunk is double the size of the requested reserved
- // new space size to ensure that we can find a pair of semispaces that
- // are contiguous and aligned to their size.
- if (!isolate_->memory_allocator()->Setup(MaxReserved(), MaxExecutableSize()))
- return false;
- void* chunk =
- isolate_->memory_allocator()->ReserveInitialChunk(
- 4 * reserved_semispace_size_);
- if (chunk == NULL) return false;
-
- // Align the pair of semispaces to their size, which must be a power
- // of 2.
- Address new_space_start =
- RoundUp(reinterpret_cast<byte*>(chunk), 2 * reserved_semispace_size_);
- if (!new_space_.Setup(new_space_start, 2 * reserved_semispace_size_)) {
- return false;
- }
-
- // Initialize old pointer space.
- old_pointer_space_ =
- new OldSpace(this,
- max_old_generation_size_,
- OLD_POINTER_SPACE,
- NOT_EXECUTABLE);
- if (old_pointer_space_ == NULL) return false;
- if (!old_pointer_space_->Setup(NULL, 0)) return false;
-
- // Initialize old data space.
- old_data_space_ =
- new OldSpace(this,
- max_old_generation_size_,
- OLD_DATA_SPACE,
- NOT_EXECUTABLE);
- if (old_data_space_ == NULL) return false;
- if (!old_data_space_->Setup(NULL, 0)) return false;
-
- // Initialize the code space, set its maximum capacity to the old
- // generation size. It needs executable memory.
- // On 64-bit platform(s), we put all code objects in a 2 GB range of
- // virtual address space, so that they can call each other with near calls.
- if (code_range_size_ > 0) {
- if (!isolate_->code_range()->Setup(code_range_size_)) {
- return false;
- }
- }
-
- code_space_ =
- new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE);
- if (code_space_ == NULL) return false;
- if (!code_space_->Setup(NULL, 0)) return false;
-
- // Initialize map space.
- map_space_ = new MapSpace(this, FLAG_use_big_map_space
- ? max_old_generation_size_
- : MapSpace::kMaxMapPageIndex * Page::kPageSize,
- FLAG_max_map_space_pages,
- MAP_SPACE);
- if (map_space_ == NULL) return false;
- if (!map_space_->Setup(NULL, 0)) return false;
-
- // Initialize global property cell space.
- cell_space_ = new CellSpace(this, max_old_generation_size_, CELL_SPACE);
- if (cell_space_ == NULL) return false;
- if (!cell_space_->Setup(NULL, 0)) return false;
-
- // The large object code space may contain code or data. We set the memory
- // to be non-executable here for safety, but this means we need to enable it
- // explicitly when allocating large code objects.
- lo_space_ = new LargeObjectSpace(this, LO_SPACE);
- if (lo_space_ == NULL) return false;
- if (!lo_space_->Setup()) return false;
-
- if (create_heap_objects) {
- // Create initial maps.
- if (!CreateInitialMaps()) return false;
- if (!CreateApiObjects()) return false;
-
- // Create initial objects
- if (!CreateInitialObjects()) return false;
-
- global_contexts_list_ = undefined_value();
- }
-
- LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
- LOG(isolate_, IntPtrTEvent("heap-available", Available()));
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
- // This should be called only after initial objects have been created.
- isolate_->producer_heap_profile()->Setup();
-#endif
-
- return true;
-}
-
-
-void Heap::SetStackLimits() {
- ASSERT(isolate_ != NULL);
- ASSERT(isolate_ == isolate());
- // On 64 bit machines, pointers are generally out of range of Smis. We write
- // something that looks like an out of range Smi to the GC.
-
- // Set up the special root array entries containing the stack limits.
- // These are actually addresses, but the tag makes the GC ignore it.
- roots_[kStackLimitRootIndex] =
- reinterpret_cast<Object*>(
- (isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag);
- roots_[kRealStackLimitRootIndex] =
- reinterpret_cast<Object*>(
- (isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag);
-}
-
-
-void Heap::TearDown() {
- if (FLAG_print_cumulative_gc_stat) {
- PrintF("\n\n");
- PrintF("gc_count=%d ", gc_count_);
- PrintF("mark_sweep_count=%d ", ms_count_);
- PrintF("mark_compact_count=%d ", mc_count_);
- PrintF("max_gc_pause=%d ", get_max_gc_pause());
- PrintF("min_in_mutator=%d ", get_min_in_mutator());
- PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ",
- get_max_alive_after_gc());
- PrintF("\n\n");
- }
-
- isolate_->global_handles()->TearDown();
-
- external_string_table_.TearDown();
-
- new_space_.TearDown();
-
- if (old_pointer_space_ != NULL) {
- old_pointer_space_->TearDown();
- delete old_pointer_space_;
- old_pointer_space_ = NULL;
- }
-
- if (old_data_space_ != NULL) {
- old_data_space_->TearDown();
- delete old_data_space_;
- old_data_space_ = NULL;
- }
-
- if (code_space_ != NULL) {
- code_space_->TearDown();
- delete code_space_;
- code_space_ = NULL;
- }
-
- if (map_space_ != NULL) {
- map_space_->TearDown();
- delete map_space_;
- map_space_ = NULL;
- }
-
- if (cell_space_ != NULL) {
- cell_space_->TearDown();
- delete cell_space_;
- cell_space_ = NULL;
- }
-
- if (lo_space_ != NULL) {
- lo_space_->TearDown();
- delete lo_space_;
- lo_space_ = NULL;
- }
-
- isolate_->memory_allocator()->TearDown();
-
-#ifdef DEBUG
- delete debug_utils_;
- debug_utils_ = NULL;
-#endif
-}
-
-
-void Heap::Shrink() {
- // Try to shrink all paged spaces.
- PagedSpaces spaces;
- for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next())
- space->Shrink();
-}
-
-
-#ifdef ENABLE_HEAP_PROTECTION
-
-void Heap::Protect() {
- if (HasBeenSetup()) {
- AllSpaces spaces;
- for (Space* space = spaces.next(); space != NULL; space = spaces.next())
- space->Protect();
- }
-}
-
-
-void Heap::Unprotect() {
- if (HasBeenSetup()) {
- AllSpaces spaces;
- for (Space* space = spaces.next(); space != NULL; space = spaces.next())
- space->Unprotect();
- }
-}
-
-#endif
-
-
-void Heap::AddGCPrologueCallback(GCPrologueCallback callback, GCType gc_type) {
- ASSERT(callback != NULL);
- GCPrologueCallbackPair pair(callback, gc_type);
- ASSERT(!gc_prologue_callbacks_.Contains(pair));
- return gc_prologue_callbacks_.Add(pair);
-}
-
-
-void Heap::RemoveGCPrologueCallback(GCPrologueCallback callback) {
- ASSERT(callback != NULL);
- for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
- if (gc_prologue_callbacks_[i].callback == callback) {
- gc_prologue_callbacks_.Remove(i);
- return;
- }
- }
- UNREACHABLE();
-}
-
-
-void Heap::AddGCEpilogueCallback(GCEpilogueCallback callback, GCType gc_type) {
- ASSERT(callback != NULL);
- GCEpilogueCallbackPair pair(callback, gc_type);
- ASSERT(!gc_epilogue_callbacks_.Contains(pair));
- return gc_epilogue_callbacks_.Add(pair);
-}
-
-
-void Heap::RemoveGCEpilogueCallback(GCEpilogueCallback callback) {
- ASSERT(callback != NULL);
- for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
- if (gc_epilogue_callbacks_[i].callback == callback) {
- gc_epilogue_callbacks_.Remove(i);
- return;
- }
- }
- UNREACHABLE();
-}
-
-
-#ifdef DEBUG
-
-class PrintHandleVisitor: public ObjectVisitor {
- public:
- void VisitPointers(Object** start, Object** end) {
- for (Object** p = start; p < end; p++)
- PrintF(" handle %p to %p\n",
- reinterpret_cast<void*>(p),
- reinterpret_cast<void*>(*p));
- }
-};
-
-void Heap::PrintHandles() {
- PrintF("Handles:\n");
- PrintHandleVisitor v;
- isolate_->handle_scope_implementer()->Iterate(&v);
-}
-
-#endif
-
-
-Space* AllSpaces::next() {
- switch (counter_++) {
- case NEW_SPACE:
- return HEAP->new_space();
- case OLD_POINTER_SPACE:
- return HEAP->old_pointer_space();
- case OLD_DATA_SPACE:
- return HEAP->old_data_space();
- case CODE_SPACE:
- return HEAP->code_space();
- case MAP_SPACE:
- return HEAP->map_space();
- case CELL_SPACE:
- return HEAP->cell_space();
- case LO_SPACE:
- return HEAP->lo_space();
- default:
- return NULL;
- }
-}
-
-
-PagedSpace* PagedSpaces::next() {
- switch (counter_++) {
- case OLD_POINTER_SPACE:
- return HEAP->old_pointer_space();
- case OLD_DATA_SPACE:
- return HEAP->old_data_space();
- case CODE_SPACE:
- return HEAP->code_space();
- case MAP_SPACE:
- return HEAP->map_space();
- case CELL_SPACE:
- return HEAP->cell_space();
- default:
- return NULL;
- }
-}
-
-
-
-OldSpace* OldSpaces::next() {
- switch (counter_++) {
- case OLD_POINTER_SPACE:
- return HEAP->old_pointer_space();
- case OLD_DATA_SPACE:
- return HEAP->old_data_space();
- case CODE_SPACE:
- return HEAP->code_space();
- default:
- return NULL;
- }
-}
-
-
-SpaceIterator::SpaceIterator()
- : current_space_(FIRST_SPACE),
- iterator_(NULL),
- size_func_(NULL) {
-}
-
-
-SpaceIterator::SpaceIterator(HeapObjectCallback size_func)
- : current_space_(FIRST_SPACE),
- iterator_(NULL),
- size_func_(size_func) {
-}
-
-
-SpaceIterator::~SpaceIterator() {
- // Delete active iterator if any.
- delete iterator_;
-}
-
-
-bool SpaceIterator::has_next() {
- // Iterate until no more spaces.
- return current_space_ != LAST_SPACE;
-}
-
-
-ObjectIterator* SpaceIterator::next() {
- if (iterator_ != NULL) {
- delete iterator_;
- iterator_ = NULL;
- // Move to the next space
- current_space_++;
- if (current_space_ > LAST_SPACE) {
- return NULL;
- }
- }
-
- // Return iterator for the new current space.
- return CreateIterator();
-}
-
-
-// Create an iterator for the space to iterate.
-ObjectIterator* SpaceIterator::CreateIterator() {
- ASSERT(iterator_ == NULL);
-
- switch (current_space_) {
- case NEW_SPACE:
- iterator_ = new SemiSpaceIterator(HEAP->new_space(), size_func_);
- break;
- case OLD_POINTER_SPACE:
- iterator_ = new HeapObjectIterator(HEAP->old_pointer_space(), size_func_);
- break;
- case OLD_DATA_SPACE:
- iterator_ = new HeapObjectIterator(HEAP->old_data_space(), size_func_);
- break;
- case CODE_SPACE:
- iterator_ = new HeapObjectIterator(HEAP->code_space(), size_func_);
- break;
- case MAP_SPACE:
- iterator_ = new HeapObjectIterator(HEAP->map_space(), size_func_);
- break;
- case CELL_SPACE:
- iterator_ = new HeapObjectIterator(HEAP->cell_space(), size_func_);
- break;
- case LO_SPACE:
- iterator_ = new LargeObjectIterator(HEAP->lo_space(), size_func_);
- break;
- }
-
- // Return the newly allocated iterator;
- ASSERT(iterator_ != NULL);
- return iterator_;
-}
-
-
-class HeapObjectsFilter {
- public:
- virtual ~HeapObjectsFilter() {}
- virtual bool SkipObject(HeapObject* object) = 0;
-};
-
-
-class FreeListNodesFilter : public HeapObjectsFilter {
- public:
- FreeListNodesFilter() {
- MarkFreeListNodes();
- }
-
- bool SkipObject(HeapObject* object) {
- if (object->IsMarked()) {
- object->ClearMark();
- return true;
- } else {
- return false;
- }
- }
-
- private:
- void MarkFreeListNodes() {
- Heap* heap = HEAP;
- heap->old_pointer_space()->MarkFreeListNodes();
- heap->old_data_space()->MarkFreeListNodes();
- MarkCodeSpaceFreeListNodes(heap);
- heap->map_space()->MarkFreeListNodes();
- heap->cell_space()->MarkFreeListNodes();
- }
-
- void MarkCodeSpaceFreeListNodes(Heap* heap) {
- // For code space, using FreeListNode::IsFreeListNode is OK.
- HeapObjectIterator iter(heap->code_space());
- for (HeapObject* obj = iter.next_object();
- obj != NULL;
- obj = iter.next_object()) {
- if (FreeListNode::IsFreeListNode(obj)) obj->SetMark();
- }
- }
-
- AssertNoAllocation no_alloc;
-};
-
-
-class UnreachableObjectsFilter : public HeapObjectsFilter {
- public:
- UnreachableObjectsFilter() {
- MarkUnreachableObjects();
- }
-
- bool SkipObject(HeapObject* object) {
- if (object->IsMarked()) {
- object->ClearMark();
- return true;
- } else {
- return false;
- }
- }
-
- private:
- class UnmarkingVisitor : public ObjectVisitor {
- public:
- UnmarkingVisitor() : list_(10) {}
-
- void VisitPointers(Object** start, Object** end) {
- for (Object** p = start; p < end; p++) {
- if (!(*p)->IsHeapObject()) continue;
- HeapObject* obj = HeapObject::cast(*p);
- if (obj->IsMarked()) {
- obj->ClearMark();
- list_.Add(obj);
- }
- }
- }
-
- bool can_process() { return !list_.is_empty(); }
-
- void ProcessNext() {
- HeapObject* obj = list_.RemoveLast();
- obj->Iterate(this);
- }
-
- private:
- List<HeapObject*> list_;
- };
-
- void MarkUnreachableObjects() {
- HeapIterator iterator;
- for (HeapObject* obj = iterator.next();
- obj != NULL;
- obj = iterator.next()) {
- obj->SetMark();
- }
- UnmarkingVisitor visitor;
- HEAP->IterateRoots(&visitor, VISIT_ALL);
- while (visitor.can_process())
- visitor.ProcessNext();
- }
-
- AssertNoAllocation no_alloc;
-};
-
-
-HeapIterator::HeapIterator()
- : filtering_(HeapIterator::kNoFiltering),
- filter_(NULL) {
- Init();
-}
-
-
-HeapIterator::HeapIterator(HeapIterator::HeapObjectsFiltering filtering)
- : filtering_(filtering),
- filter_(NULL) {
- Init();
-}
-
-
-HeapIterator::~HeapIterator() {
- Shutdown();
-}
-
-
-void HeapIterator::Init() {
- // Start the iteration.
- space_iterator_ = filtering_ == kNoFiltering ? new SpaceIterator :
- new SpaceIterator(MarkCompactCollector::SizeOfMarkedObject);
- switch (filtering_) {
- case kFilterFreeListNodes:
- filter_ = new FreeListNodesFilter;
- break;
- case kFilterUnreachable:
- filter_ = new UnreachableObjectsFilter;
- break;
- default:
- break;
- }
- object_iterator_ = space_iterator_->next();
-}
-
-
-void HeapIterator::Shutdown() {
-#ifdef DEBUG
- // Assert that in filtering mode we have iterated through all
- // objects. Otherwise, heap will be left in an inconsistent state.
- if (filtering_ != kNoFiltering) {
- ASSERT(object_iterator_ == NULL);
- }
-#endif
- // Make sure the last iterator is deallocated.
- delete space_iterator_;
- space_iterator_ = NULL;
- object_iterator_ = NULL;
- delete filter_;
- filter_ = NULL;
-}
-
-
-HeapObject* HeapIterator::next() {
- if (filter_ == NULL) return NextObject();
-
- HeapObject* obj = NextObject();
- while (obj != NULL && filter_->SkipObject(obj)) obj = NextObject();
- return obj;
-}
-
-
-HeapObject* HeapIterator::NextObject() {
- // No iterator means we are done.
- if (object_iterator_ == NULL) return NULL;
-
- if (HeapObject* obj = object_iterator_->next_object()) {
- // If the current iterator has more objects we are fine.
- return obj;
- } else {
- // Go though the spaces looking for one that has objects.
- while (space_iterator_->has_next()) {
- object_iterator_ = space_iterator_->next();
- if (HeapObject* obj = object_iterator_->next_object()) {
- return obj;
- }
- }
- }
- // Done with the last space.
- object_iterator_ = NULL;
- return NULL;
-}
-
-
-void HeapIterator::reset() {
- // Restart the iterator.
- Shutdown();
- Init();
-}
-
-
-#if defined(DEBUG) || defined(LIVE_OBJECT_LIST)
-
-Object* const PathTracer::kAnyGlobalObject = reinterpret_cast<Object*>(NULL);
-
-class PathTracer::MarkVisitor: public ObjectVisitor {
- public:
- explicit MarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
- void VisitPointers(Object** start, Object** end) {
- // Scan all HeapObject pointers in [start, end)
- for (Object** p = start; !tracer_->found() && (p < end); p++) {
- if ((*p)->IsHeapObject())
- tracer_->MarkRecursively(p, this);
- }
- }
-
- private:
- PathTracer* tracer_;
-};
-
-
-class PathTracer::UnmarkVisitor: public ObjectVisitor {
- public:
- explicit UnmarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
- void VisitPointers(Object** start, Object** end) {
- // Scan all HeapObject pointers in [start, end)
- for (Object** p = start; p < end; p++) {
- if ((*p)->IsHeapObject())
- tracer_->UnmarkRecursively(p, this);
- }
- }
-
- private:
- PathTracer* tracer_;
-};
-
-
-void PathTracer::VisitPointers(Object** start, Object** end) {
- bool done = ((what_to_find_ == FIND_FIRST) && found_target_);
- // Visit all HeapObject pointers in [start, end)
- for (Object** p = start; !done && (p < end); p++) {
- if ((*p)->IsHeapObject()) {
- TracePathFrom(p);
- done = ((what_to_find_ == FIND_FIRST) && found_target_);
- }
- }
-}
-
-
-void PathTracer::Reset() {
- found_target_ = false;
- object_stack_.Clear();
-}
-
-
-void PathTracer::TracePathFrom(Object** root) {
- ASSERT((search_target_ == kAnyGlobalObject) ||
- search_target_->IsHeapObject());
- found_target_in_trace_ = false;
- object_stack_.Clear();
-
- MarkVisitor mark_visitor(this);
- MarkRecursively(root, &mark_visitor);
-
- UnmarkVisitor unmark_visitor(this);
- UnmarkRecursively(root, &unmark_visitor);
-
- ProcessResults();
-}
-
-
-void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
- if (!(*p)->IsHeapObject()) return;
-
- HeapObject* obj = HeapObject::cast(*p);
-
- Object* map = obj->map();
-
- if (!map->IsHeapObject()) return; // visited before
-
- if (found_target_in_trace_) return; // stop if target found
- object_stack_.Add(obj);
- if (((search_target_ == kAnyGlobalObject) && obj->IsJSGlobalObject()) ||
- (obj == search_target_)) {
- found_target_in_trace_ = true;
- found_target_ = true;
- return;
- }
-
- bool is_global_context = obj->IsGlobalContext();
-
- // not visited yet
- Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
-
- Address map_addr = map_p->address();
-
- obj->set_map(reinterpret_cast<Map*>(map_addr + kMarkTag));
-
- // Scan the object body.
- if (is_global_context && (visit_mode_ == VISIT_ONLY_STRONG)) {
- // This is specialized to scan Context's properly.
- Object** start = reinterpret_cast<Object**>(obj->address() +
- Context::kHeaderSize);
- Object** end = reinterpret_cast<Object**>(obj->address() +
- Context::kHeaderSize + Context::FIRST_WEAK_SLOT * kPointerSize);
- mark_visitor->VisitPointers(start, end);
- } else {
- obj->IterateBody(map_p->instance_type(),
- obj->SizeFromMap(map_p),
- mark_visitor);
- }
-
- // Scan the map after the body because the body is a lot more interesting
- // when doing leak detection.
- MarkRecursively(&map, mark_visitor);
-
- if (!found_target_in_trace_) // don't pop if found the target
- object_stack_.RemoveLast();
-}
-
-
-void PathTracer::UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor) {
- if (!(*p)->IsHeapObject()) return;
-
- HeapObject* obj = HeapObject::cast(*p);
-
- Object* map = obj->map();
-
- if (map->IsHeapObject()) return; // unmarked already
-
- Address map_addr = reinterpret_cast<Address>(map);
-
- map_addr -= kMarkTag;
-
- ASSERT_TAG_ALIGNED(map_addr);
-
- HeapObject* map_p = HeapObject::FromAddress(map_addr);
-
- obj->set_map(reinterpret_cast<Map*>(map_p));
-
- UnmarkRecursively(reinterpret_cast<Object**>(&map_p), unmark_visitor);
-
- obj->IterateBody(Map::cast(map_p)->instance_type(),
- obj->SizeFromMap(Map::cast(map_p)),
- unmark_visitor);
-}
-
-
-void PathTracer::ProcessResults() {
- if (found_target_) {
- PrintF("=====================================\n");
- PrintF("==== Path to object ====\n");
- PrintF("=====================================\n\n");
-
- ASSERT(!object_stack_.is_empty());
- for (int i = 0; i < object_stack_.length(); i++) {
- if (i > 0) PrintF("\n |\n |\n V\n\n");
- Object* obj = object_stack_[i];
-#ifdef OBJECT_PRINT
- obj->Print();
-#else
- obj->ShortPrint();
-#endif
- }
- PrintF("=====================================\n");
- }
-}
-#endif // DEBUG || LIVE_OBJECT_LIST
-
-
-#ifdef DEBUG
-// Triggers a depth-first traversal of reachable objects from roots
-// and finds a path to a specific heap object and prints it.
-void Heap::TracePathToObject(Object* target) {
- PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
- IterateRoots(&tracer, VISIT_ONLY_STRONG);
-}
-
-
-// Triggers a depth-first traversal of reachable objects from roots
-// and finds a path to any global object and prints it. Useful for
-// determining the source for leaks of global objects.
-void Heap::TracePathToGlobal() {
- PathTracer tracer(PathTracer::kAnyGlobalObject,
- PathTracer::FIND_ALL,
- VISIT_ALL);
- IterateRoots(&tracer, VISIT_ONLY_STRONG);
-}
-#endif
-
-
-static intptr_t CountTotalHolesSize() {
- intptr_t holes_size = 0;
- OldSpaces spaces;
- for (OldSpace* space = spaces.next();
- space != NULL;
- space = spaces.next()) {
- holes_size += space->Waste() + space->AvailableFree();
- }
- return holes_size;
-}
-
-
-GCTracer::GCTracer(Heap* heap)
- : start_time_(0.0),
- start_size_(0),
- gc_count_(0),
- full_gc_count_(0),
- is_compacting_(false),
- marked_count_(0),
- allocated_since_last_gc_(0),
- spent_in_mutator_(0),
- promoted_objects_size_(0),
- heap_(heap) {
- // These two fields reflect the state of the previous full collection.
- // Set them before they are changed by the collector.
- previous_has_compacted_ = heap_->mark_compact_collector_.HasCompacted();
- previous_marked_count_ =
- heap_->mark_compact_collector_.previous_marked_count();
- if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
- start_time_ = OS::TimeCurrentMillis();
- start_size_ = heap_->SizeOfObjects();
-
- for (int i = 0; i < Scope::kNumberOfScopes; i++) {
- scopes_[i] = 0;
- }
-
- in_free_list_or_wasted_before_gc_ = CountTotalHolesSize();
-
- allocated_since_last_gc_ =
- heap_->SizeOfObjects() - heap_->alive_after_last_gc_;
-
- if (heap_->last_gc_end_timestamp_ > 0) {
- spent_in_mutator_ = Max(start_time_ - heap_->last_gc_end_timestamp_, 0.0);
- }
-}
-
-
-GCTracer::~GCTracer() {
- // Printf ONE line iff flag is set.
- if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
-
- bool first_gc = (heap_->last_gc_end_timestamp_ == 0);
-
- heap_->alive_after_last_gc_ = heap_->SizeOfObjects();
- heap_->last_gc_end_timestamp_ = OS::TimeCurrentMillis();
-
- int time = static_cast<int>(heap_->last_gc_end_timestamp_ - start_time_);
-
- // Update cumulative GC statistics if required.
- if (FLAG_print_cumulative_gc_stat) {
- heap_->max_gc_pause_ = Max(heap_->max_gc_pause_, time);
- heap_->max_alive_after_gc_ = Max(heap_->max_alive_after_gc_,
- heap_->alive_after_last_gc_);
- if (!first_gc) {
- heap_->min_in_mutator_ = Min(heap_->min_in_mutator_,
- static_cast<int>(spent_in_mutator_));
- }
- }
-
- if (!FLAG_trace_gc_nvp) {
- int external_time = static_cast<int>(scopes_[Scope::EXTERNAL]);
-
- PrintF("%s %.1f -> %.1f MB, ",
- CollectorString(),
- static_cast<double>(start_size_) / MB,
- SizeOfHeapObjects());
-
- if (external_time > 0) PrintF("%d / ", external_time);
- PrintF("%d ms.\n", time);
- } else {
- PrintF("pause=%d ", time);
- PrintF("mutator=%d ",
- static_cast<int>(spent_in_mutator_));
-
- PrintF("gc=");
- switch (collector_) {
- case SCAVENGER:
- PrintF("s");
- break;
- case MARK_COMPACTOR:
- PrintF("%s",
- heap_->mark_compact_collector_.HasCompacted() ? "mc" : "ms");
- break;
- default:
- UNREACHABLE();
- }
- PrintF(" ");
-
- PrintF("external=%d ", static_cast<int>(scopes_[Scope::EXTERNAL]));
- PrintF("mark=%d ", static_cast<int>(scopes_[Scope::MC_MARK]));
- PrintF("sweep=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP]));
- PrintF("sweepns=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP_NEWSPACE]));
- PrintF("compact=%d ", static_cast<int>(scopes_[Scope::MC_COMPACT]));
-
- PrintF("total_size_before=%" V8_PTR_PREFIX "d ", start_size_);
- PrintF("total_size_after=%" V8_PTR_PREFIX "d ", heap_->SizeOfObjects());
- PrintF("holes_size_before=%" V8_PTR_PREFIX "d ",
- in_free_list_or_wasted_before_gc_);
- PrintF("holes_size_after=%" V8_PTR_PREFIX "d ", CountTotalHolesSize());
-
- PrintF("allocated=%" V8_PTR_PREFIX "d ", allocated_since_last_gc_);
- PrintF("promoted=%" V8_PTR_PREFIX "d ", promoted_objects_size_);
-
- PrintF("\n");
- }
-
-#if defined(ENABLE_LOGGING_AND_PROFILING)
- heap_->PrintShortHeapStatistics();
-#endif
-}
-
-
-const char* GCTracer::CollectorString() {
- switch (collector_) {
- case SCAVENGER:
- return "Scavenge";
- case MARK_COMPACTOR:
- return heap_->mark_compact_collector_.HasCompacted() ? "Mark-compact"
- : "Mark-sweep";
- }
- return "Unknown GC";
-}
-
-
-int KeyedLookupCache::Hash(Map* map, String* name) {
- // Uses only lower 32 bits if pointers are larger.
- uintptr_t addr_hash =
- static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map)) >> kMapHashShift;
- return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask);
-}
-
-
-int KeyedLookupCache::Lookup(Map* map, String* name) {
- int index = Hash(map, name);
- Key& key = keys_[index];
- if ((key.map == map) && key.name->Equals(name)) {
- return field_offsets_[index];
- }
- return kNotFound;
-}
-
-
-void KeyedLookupCache::Update(Map* map, String* name, int field_offset) {
- String* symbol;
- if (HEAP->LookupSymbolIfExists(name, &symbol)) {
- int index = Hash(map, symbol);
- Key& key = keys_[index];
- key.map = map;
- key.name = symbol;
- field_offsets_[index] = field_offset;
- }
-}
-
-
-void KeyedLookupCache::Clear() {
- for (int index = 0; index < kLength; index++) keys_[index].map = NULL;
-}
-
-
-void DescriptorLookupCache::Clear() {
- for (int index = 0; index < kLength; index++) keys_[index].array = NULL;
-}
-
-
-#ifdef DEBUG
-void Heap::GarbageCollectionGreedyCheck() {
- ASSERT(FLAG_gc_greedy);
- if (isolate_->bootstrapper()->IsActive()) return;
- if (disallow_allocation_failure()) return;
- CollectGarbage(NEW_SPACE);
-}
-#endif
-
-
-TranscendentalCache::SubCache::SubCache(Type t)
- : type_(t),
- isolate_(Isolate::Current()) {
- uint32_t in0 = 0xffffffffu; // Bit-pattern for a NaN that isn't
- uint32_t in1 = 0xffffffffu; // generated by the FPU.
- for (int i = 0; i < kCacheSize; i++) {
- elements_[i].in[0] = in0;
- elements_[i].in[1] = in1;
- elements_[i].output = NULL;
- }
-}
-
-
-void TranscendentalCache::Clear() {
- for (int i = 0; i < kNumberOfCaches; i++) {
- if (caches_[i] != NULL) {
- delete caches_[i];
- caches_[i] = NULL;
- }
- }
-}
-
-
-void ExternalStringTable::CleanUp() {
- int last = 0;
- for (int i = 0; i < new_space_strings_.length(); ++i) {
- if (new_space_strings_[i] == heap_->raw_unchecked_null_value()) continue;
- if (heap_->InNewSpace(new_space_strings_[i])) {
- new_space_strings_[last++] = new_space_strings_[i];
- } else {
- old_space_strings_.Add(new_space_strings_[i]);
- }
- }
- new_space_strings_.Rewind(last);
- last = 0;
- for (int i = 0; i < old_space_strings_.length(); ++i) {
- if (old_space_strings_[i] == heap_->raw_unchecked_null_value()) continue;
- ASSERT(!heap_->InNewSpace(old_space_strings_[i]));
- old_space_strings_[last++] = old_space_strings_[i];
- }
- old_space_strings_.Rewind(last);
- Verify();
-}
-
-
-void ExternalStringTable::TearDown() {
- new_space_strings_.Free();
- old_space_strings_.Free();
-}
-
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/heap.h b/src/3rdparty/v8/src/heap.h
deleted file mode 100644
index ee1c9f6..0000000
--- a/src/3rdparty/v8/src/heap.h
+++ /dev/null
@@ -1,2265 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_HEAP_H_
-#define V8_HEAP_H_
-
-#include <math.h>
-
-#include "globals.h"
-#include "list.h"
-#include "mark-compact.h"
-#include "spaces.h"
-#include "splay-tree-inl.h"
-#include "v8-counters.h"
-
-namespace v8 {
-namespace internal {
-
-// TODO(isolates): remove HEAP here
-#define HEAP (_inline_get_heap_())
-class Heap;
-inline Heap* _inline_get_heap_();
-
-
-// Defines all the roots in Heap.
-#define STRONG_ROOT_LIST(V) \
- /* Put the byte array map early. We need it to be in place by the time */ \
- /* the deserializer hits the next page, since it wants to put a byte */ \
- /* array in the unused space at the end of the page. */ \
- V(Map, byte_array_map, ByteArrayMap) \
- V(Map, one_pointer_filler_map, OnePointerFillerMap) \
- V(Map, two_pointer_filler_map, TwoPointerFillerMap) \
- /* Cluster the most popular ones in a few cache lines here at the top. */ \
- V(Object, undefined_value, UndefinedValue) \
- V(Object, the_hole_value, TheHoleValue) \
- V(Object, null_value, NullValue) \
- V(Object, true_value, TrueValue) \
- V(Object, false_value, FalseValue) \
- V(Object, arguments_marker, ArgumentsMarker) \
- V(Map, heap_number_map, HeapNumberMap) \
- V(Map, global_context_map, GlobalContextMap) \
- V(Map, fixed_array_map, FixedArrayMap) \
- V(Map, fixed_cow_array_map, FixedCOWArrayMap) \
- V(Object, no_interceptor_result_sentinel, NoInterceptorResultSentinel) \
- V(Map, meta_map, MetaMap) \
- V(Map, hash_table_map, HashTableMap) \
- V(Smi, stack_limit, StackLimit) \
- V(FixedArray, number_string_cache, NumberStringCache) \
- V(Object, instanceof_cache_function, InstanceofCacheFunction) \
- V(Object, instanceof_cache_map, InstanceofCacheMap) \
- V(Object, instanceof_cache_answer, InstanceofCacheAnswer) \
- V(FixedArray, single_character_string_cache, SingleCharacterStringCache) \
- V(Object, termination_exception, TerminationException) \
- V(FixedArray, empty_fixed_array, EmptyFixedArray) \
- V(ByteArray, empty_byte_array, EmptyByteArray) \
- V(String, empty_string, EmptyString) \
- V(DescriptorArray, empty_descriptor_array, EmptyDescriptorArray) \
- V(Map, string_map, StringMap) \
- V(Map, ascii_string_map, AsciiStringMap) \
- V(Map, symbol_map, SymbolMap) \
- V(Map, cons_string_map, ConsStringMap) \
- V(Map, cons_ascii_string_map, ConsAsciiStringMap) \
- V(Map, ascii_symbol_map, AsciiSymbolMap) \
- V(Map, cons_symbol_map, ConsSymbolMap) \
- V(Map, cons_ascii_symbol_map, ConsAsciiSymbolMap) \
- V(Map, external_symbol_map, ExternalSymbolMap) \
- V(Map, external_symbol_with_ascii_data_map, ExternalSymbolWithAsciiDataMap) \
- V(Map, external_ascii_symbol_map, ExternalAsciiSymbolMap) \
- V(Map, external_string_map, ExternalStringMap) \
- V(Map, external_string_with_ascii_data_map, ExternalStringWithAsciiDataMap) \
- V(Map, external_ascii_string_map, ExternalAsciiStringMap) \
- V(Map, undetectable_string_map, UndetectableStringMap) \
- V(Map, undetectable_ascii_string_map, UndetectableAsciiStringMap) \
- V(Map, external_pixel_array_map, ExternalPixelArrayMap) \
- V(Map, external_byte_array_map, ExternalByteArrayMap) \
- V(Map, external_unsigned_byte_array_map, ExternalUnsignedByteArrayMap) \
- V(Map, external_short_array_map, ExternalShortArrayMap) \
- V(Map, external_unsigned_short_array_map, ExternalUnsignedShortArrayMap) \
- V(Map, external_int_array_map, ExternalIntArrayMap) \
- V(Map, external_unsigned_int_array_map, ExternalUnsignedIntArrayMap) \
- V(Map, external_float_array_map, ExternalFloatArrayMap) \
- V(Map, context_map, ContextMap) \
- V(Map, catch_context_map, CatchContextMap) \
- V(Map, code_map, CodeMap) \
- V(Map, oddball_map, OddballMap) \
- V(Map, global_property_cell_map, GlobalPropertyCellMap) \
- V(Map, shared_function_info_map, SharedFunctionInfoMap) \
- V(Map, message_object_map, JSMessageObjectMap) \
- V(Map, proxy_map, ProxyMap) \
- V(Object, nan_value, NanValue) \
- V(Object, minus_zero_value, MinusZeroValue) \
- V(Map, neander_map, NeanderMap) \
- V(JSObject, message_listeners, MessageListeners) \
- V(Proxy, prototype_accessors, PrototypeAccessors) \
- V(NumberDictionary, code_stubs, CodeStubs) \
- V(NumberDictionary, non_monomorphic_cache, NonMonomorphicCache) \
- V(Code, js_entry_code, JsEntryCode) \
- V(Code, js_construct_entry_code, JsConstructEntryCode) \
- V(FixedArray, natives_source_cache, NativesSourceCache) \
- V(Object, last_script_id, LastScriptId) \
- V(Script, empty_script, EmptyScript) \
- V(Smi, real_stack_limit, RealStackLimit) \
- V(StringDictionary, intrinsic_function_names, IntrinsicFunctionNames) \
-
-#define ROOT_LIST(V) \
- STRONG_ROOT_LIST(V) \
- V(SymbolTable, symbol_table, SymbolTable)
-
-#define SYMBOL_LIST(V) \
- V(Array_symbol, "Array") \
- V(Object_symbol, "Object") \
- V(Proto_symbol, "__proto__") \
- V(StringImpl_symbol, "StringImpl") \
- V(arguments_symbol, "arguments") \
- V(Arguments_symbol, "Arguments") \
- V(arguments_shadow_symbol, ".arguments") \
- V(call_symbol, "call") \
- V(apply_symbol, "apply") \
- V(caller_symbol, "caller") \
- V(boolean_symbol, "boolean") \
- V(Boolean_symbol, "Boolean") \
- V(callee_symbol, "callee") \
- V(constructor_symbol, "constructor") \
- V(code_symbol, ".code") \
- V(result_symbol, ".result") \
- V(catch_var_symbol, ".catch-var") \
- V(empty_symbol, "") \
- V(eval_symbol, "eval") \
- V(function_symbol, "function") \
- V(length_symbol, "length") \
- V(name_symbol, "name") \
- V(number_symbol, "number") \
- V(Number_symbol, "Number") \
- V(nan_symbol, "NaN") \
- V(RegExp_symbol, "RegExp") \
- V(source_symbol, "source") \
- V(global_symbol, "global") \
- V(ignore_case_symbol, "ignoreCase") \
- V(multiline_symbol, "multiline") \
- V(input_symbol, "input") \
- V(index_symbol, "index") \
- V(last_index_symbol, "lastIndex") \
- V(object_symbol, "object") \
- V(prototype_symbol, "prototype") \
- V(string_symbol, "string") \
- V(String_symbol, "String") \
- V(Date_symbol, "Date") \
- V(Error_symbol, "Error") \
- V(this_symbol, "this") \
- V(to_string_symbol, "toString") \
- V(char_at_symbol, "CharAt") \
- V(undefined_symbol, "undefined") \
- V(value_of_symbol, "valueOf") \
- V(InitializeVarGlobal_symbol, "InitializeVarGlobal") \
- V(InitializeConstGlobal_symbol, "InitializeConstGlobal") \
- V(KeyedLoadSpecialized_symbol, "KeyedLoadSpecialized") \
- V(KeyedStoreSpecialized_symbol, "KeyedStoreSpecialized") \
- V(stack_overflow_symbol, "kStackOverflowBoilerplate") \
- V(illegal_access_symbol, "illegal access") \
- V(out_of_memory_symbol, "out-of-memory") \
- V(illegal_execution_state_symbol, "illegal execution state") \
- V(get_symbol, "get") \
- V(set_symbol, "set") \
- V(function_class_symbol, "Function") \
- V(illegal_argument_symbol, "illegal argument") \
- V(MakeReferenceError_symbol, "MakeReferenceError") \
- V(MakeSyntaxError_symbol, "MakeSyntaxError") \
- V(MakeTypeError_symbol, "MakeTypeError") \
- V(invalid_lhs_in_assignment_symbol, "invalid_lhs_in_assignment") \
- V(invalid_lhs_in_for_in_symbol, "invalid_lhs_in_for_in") \
- V(invalid_lhs_in_postfix_op_symbol, "invalid_lhs_in_postfix_op") \
- V(invalid_lhs_in_prefix_op_symbol, "invalid_lhs_in_prefix_op") \
- V(illegal_return_symbol, "illegal_return") \
- V(illegal_break_symbol, "illegal_break") \
- V(illegal_continue_symbol, "illegal_continue") \
- V(unknown_label_symbol, "unknown_label") \
- V(redeclaration_symbol, "redeclaration") \
- V(failure_symbol, "<failure>") \
- V(space_symbol, " ") \
- V(exec_symbol, "exec") \
- V(zero_symbol, "0") \
- V(global_eval_symbol, "GlobalEval") \
- V(identity_hash_symbol, "v8::IdentityHash") \
- V(closure_symbol, "(closure)") \
- V(use_strict, "use strict") \
- V(KeyedLoadExternalByteArray_symbol, "KeyedLoadExternalByteArray") \
- V(KeyedLoadExternalUnsignedByteArray_symbol, \
- "KeyedLoadExternalUnsignedByteArray") \
- V(KeyedLoadExternalShortArray_symbol, \
- "KeyedLoadExternalShortArray") \
- V(KeyedLoadExternalUnsignedShortArray_symbol, \
- "KeyedLoadExternalUnsignedShortArray") \
- V(KeyedLoadExternalIntArray_symbol, "KeyedLoadExternalIntArray") \
- V(KeyedLoadExternalUnsignedIntArray_symbol, \
- "KeyedLoadExternalUnsignedIntArray") \
- V(KeyedLoadExternalFloatArray_symbol, "KeyedLoadExternalFloatArray") \
- V(KeyedLoadExternalPixelArray_symbol, "KeyedLoadExternalPixelArray") \
- V(KeyedStoreExternalByteArray_symbol, "KeyedStoreExternalByteArray") \
- V(KeyedStoreExternalUnsignedByteArray_symbol, \
- "KeyedStoreExternalUnsignedByteArray") \
- V(KeyedStoreExternalShortArray_symbol, "KeyedStoreExternalShortArray") \
- V(KeyedStoreExternalUnsignedShortArray_symbol, \
- "KeyedStoreExternalUnsignedShortArray") \
- V(KeyedStoreExternalIntArray_symbol, "KeyedStoreExternalIntArray") \
- V(KeyedStoreExternalUnsignedIntArray_symbol, \
- "KeyedStoreExternalUnsignedIntArray") \
- V(KeyedStoreExternalFloatArray_symbol, "KeyedStoreExternalFloatArray") \
- V(KeyedStoreExternalPixelArray_symbol, "KeyedStoreExternalPixelArray")
-
-// Forward declarations.
-class GCTracer;
-class HeapStats;
-class Isolate;
-class WeakObjectRetainer;
-
-
-typedef String* (*ExternalStringTableUpdaterCallback)(Heap* heap,
- Object** pointer);
-
-typedef bool (*DirtyRegionCallback)(Heap* heap,
- Address start,
- Address end,
- ObjectSlotCallback copy_object_func);
-
-
-// The all static Heap captures the interface to the global object heap.
-// All JavaScript contexts by this process share the same object heap.
-
-#ifdef DEBUG
-class HeapDebugUtils;
-#endif
-
-
-// A queue of objects promoted during scavenge. Each object is accompanied
-// by it's size to avoid dereferencing a map pointer for scanning.
-class PromotionQueue {
- public:
- PromotionQueue() : front_(NULL), rear_(NULL) { }
-
- void Initialize(Address start_address) {
- front_ = rear_ = reinterpret_cast<intptr_t*>(start_address);
- }
-
- bool is_empty() { return front_ <= rear_; }
-
- inline void insert(HeapObject* target, int size);
-
- void remove(HeapObject** target, int* size) {
- *target = reinterpret_cast<HeapObject*>(*(--front_));
- *size = static_cast<int>(*(--front_));
- // Assert no underflow.
- ASSERT(front_ >= rear_);
- }
-
- private:
- // The front of the queue is higher in memory than the rear.
- intptr_t* front_;
- intptr_t* rear_;
-
- DISALLOW_COPY_AND_ASSIGN(PromotionQueue);
-};
-
-
-// External strings table is a place where all external strings are
-// registered. We need to keep track of such strings to properly
-// finalize them.
-class ExternalStringTable {
- public:
- // Registers an external string.
- inline void AddString(String* string);
-
- inline void Iterate(ObjectVisitor* v);
-
- // Restores internal invariant and gets rid of collected strings.
- // Must be called after each Iterate() that modified the strings.
- void CleanUp();
-
- // Destroys all allocated memory.
- void TearDown();
-
- private:
- ExternalStringTable() { }
-
- friend class Heap;
-
- inline void Verify();
-
- inline void AddOldString(String* string);
-
- // Notifies the table that only a prefix of the new list is valid.
- inline void ShrinkNewStrings(int position);
-
- // To speed up scavenge collections new space string are kept
- // separate from old space strings.
- List<Object*> new_space_strings_;
- List<Object*> old_space_strings_;
-
- Heap* heap_;
-
- DISALLOW_COPY_AND_ASSIGN(ExternalStringTable);
-};
-
-
-class Heap {
- public:
- // Configure heap size before setup. Return false if the heap has been
- // setup already.
- bool ConfigureHeap(int max_semispace_size,
- int max_old_gen_size,
- int max_executable_size);
- bool ConfigureHeapDefault();
-
- // Initializes the global object heap. If create_heap_objects is true,
- // also creates the basic non-mutable objects.
- // Returns whether it succeeded.
- bool Setup(bool create_heap_objects);
-
- // Destroys all memory allocated by the heap.
- void TearDown();
-
- // Set the stack limit in the roots_ array. Some architectures generate
- // code that looks here, because it is faster than loading from the static
- // jslimit_/real_jslimit_ variable in the StackGuard.
- void SetStackLimits();
-
- // Returns whether Setup has been called.
- bool HasBeenSetup();
-
- // Returns the maximum amount of memory reserved for the heap. For
- // the young generation, we reserve 4 times the amount needed for a
- // semi space. The young generation consists of two semi spaces and
- // we reserve twice the amount needed for those in order to ensure
- // that new space can be aligned to its size.
- intptr_t MaxReserved() {
- return 4 * reserved_semispace_size_ + max_old_generation_size_;
- }
- int MaxSemiSpaceSize() { return max_semispace_size_; }
- int ReservedSemiSpaceSize() { return reserved_semispace_size_; }
- int InitialSemiSpaceSize() { return initial_semispace_size_; }
- intptr_t MaxOldGenerationSize() { return max_old_generation_size_; }
- intptr_t MaxExecutableSize() { return max_executable_size_; }
-
- // Returns the capacity of the heap in bytes w/o growing. Heap grows when
- // more spaces are needed until it reaches the limit.
- intptr_t Capacity();
-
- // Returns the amount of memory currently committed for the heap.
- intptr_t CommittedMemory();
-
- // Returns the amount of executable memory currently committed for the heap.
- intptr_t CommittedMemoryExecutable();
-
- // Returns the available bytes in space w/o growing.
- // Heap doesn't guarantee that it can allocate an object that requires
- // all available bytes. Check MaxHeapObjectSize() instead.
- intptr_t Available();
-
- // Returns the maximum object size in paged space.
- inline int MaxObjectSizeInPagedSpace();
-
- // Returns of size of all objects residing in the heap.
- intptr_t SizeOfObjects();
-
- // Return the starting address and a mask for the new space. And-masking an
- // address with the mask will result in the start address of the new space
- // for all addresses in either semispace.
- Address NewSpaceStart() { return new_space_.start(); }
- uintptr_t NewSpaceMask() { return new_space_.mask(); }
- Address NewSpaceTop() { return new_space_.top(); }
-
- NewSpace* new_space() { return &new_space_; }
- OldSpace* old_pointer_space() { return old_pointer_space_; }
- OldSpace* old_data_space() { return old_data_space_; }
- OldSpace* code_space() { return code_space_; }
- MapSpace* map_space() { return map_space_; }
- CellSpace* cell_space() { return cell_space_; }
- LargeObjectSpace* lo_space() { return lo_space_; }
-
- bool always_allocate() { return always_allocate_scope_depth_ != 0; }
- Address always_allocate_scope_depth_address() {
- return reinterpret_cast<Address>(&always_allocate_scope_depth_);
- }
- bool linear_allocation() {
- return linear_allocation_scope_depth_ != 0;
- }
-
- Address* NewSpaceAllocationTopAddress() {
- return new_space_.allocation_top_address();
- }
- Address* NewSpaceAllocationLimitAddress() {
- return new_space_.allocation_limit_address();
- }
-
- // Uncommit unused semi space.
- bool UncommitFromSpace() { return new_space_.UncommitFromSpace(); }
-
-#ifdef ENABLE_HEAP_PROTECTION
- // Protect/unprotect the heap by marking all spaces read-only/writable.
- void Protect();
- void Unprotect();
-#endif
-
- // Allocates and initializes a new JavaScript object based on a
- // constructor.
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateJSObject(
- JSFunction* constructor, PretenureFlag pretenure = NOT_TENURED);
-
- // Allocates and initializes a new global object based on a constructor.
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateGlobalObject(JSFunction* constructor);
-
- // Returns a deep copy of the JavaScript object.
- // Properties and elements are copied too.
- // Returns failure if allocation failed.
- MUST_USE_RESULT MaybeObject* CopyJSObject(JSObject* source);
-
- // Allocates the function prototype.
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateFunctionPrototype(JSFunction* function);
-
- // Reinitialize an JSGlobalProxy based on a constructor. The object
- // must have the same size as objects allocated using the
- // constructor. The object is reinitialized and behaves as an
- // object that has been freshly allocated using the constructor.
- MUST_USE_RESULT MaybeObject* ReinitializeJSGlobalProxy(
- JSFunction* constructor, JSGlobalProxy* global);
-
- // Allocates and initializes a new JavaScript object based on a map.
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateJSObjectFromMap(
- Map* map, PretenureFlag pretenure = NOT_TENURED);
-
- // Allocates a heap object based on the map.
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this function does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* Allocate(Map* map, AllocationSpace space);
-
- // Allocates a JS Map in the heap.
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this function does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateMap(InstanceType instance_type,
- int instance_size);
-
- // Allocates a partial map for bootstrapping.
- MUST_USE_RESULT MaybeObject* AllocatePartialMap(InstanceType instance_type,
- int instance_size);
-
- // Allocate a map for the specified function
- MUST_USE_RESULT MaybeObject* AllocateInitialMap(JSFunction* fun);
-
- // Allocates an empty code cache.
- MUST_USE_RESULT MaybeObject* AllocateCodeCache();
-
- // Clear the Instanceof cache (used when a prototype changes).
- inline void ClearInstanceofCache();
-
- // Allocates and fully initializes a String. There are two String
- // encodings: ASCII and two byte. One should choose between the three string
- // allocation functions based on the encoding of the string buffer used to
- // initialized the string.
- // - ...FromAscii initializes the string from a buffer that is ASCII
- // encoded (it does not check that the buffer is ASCII encoded) and the
- // result will be ASCII encoded.
- // - ...FromUTF8 initializes the string from a buffer that is UTF-8
- // encoded. If the characters are all single-byte characters, the
- // result will be ASCII encoded, otherwise it will converted to two
- // byte.
- // - ...FromTwoByte initializes the string from a buffer that is two-byte
- // encoded. If the characters are all single-byte characters, the
- // result will be converted to ASCII, otherwise it will be left as
- // two-byte.
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateStringFromAscii(
- Vector<const char> str,
- PretenureFlag pretenure = NOT_TENURED);
- MUST_USE_RESULT inline MaybeObject* AllocateStringFromUtf8(
- Vector<const char> str,
- PretenureFlag pretenure = NOT_TENURED);
- MUST_USE_RESULT MaybeObject* AllocateStringFromUtf8Slow(
- Vector<const char> str,
- PretenureFlag pretenure = NOT_TENURED);
- MUST_USE_RESULT MaybeObject* AllocateStringFromTwoByte(
- Vector<const uc16> str,
- PretenureFlag pretenure = NOT_TENURED);
-
- // Allocates a symbol in old space based on the character stream.
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this function does not perform a garbage collection.
- MUST_USE_RESULT inline MaybeObject* AllocateSymbol(Vector<const char> str,
- int chars,
- uint32_t hash_field);
-
- MUST_USE_RESULT inline MaybeObject* AllocateAsciiSymbol(
- Vector<const char> str,
- uint32_t hash_field);
-
- MUST_USE_RESULT inline MaybeObject* AllocateTwoByteSymbol(
- Vector<const uc16> str,
- uint32_t hash_field);
-
- MUST_USE_RESULT MaybeObject* AllocateInternalSymbol(
- unibrow::CharacterStream* buffer, int chars, uint32_t hash_field);
-
- MUST_USE_RESULT MaybeObject* AllocateExternalSymbol(
- Vector<const char> str,
- int chars);
-
- // Allocates and partially initializes a String. There are two String
- // encodings: ASCII and two byte. These functions allocate a string of the
- // given length and set its map and length fields. The characters of the
- // string are uninitialized.
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateRawAsciiString(
- int length,
- PretenureFlag pretenure = NOT_TENURED);
- MUST_USE_RESULT MaybeObject* AllocateRawTwoByteString(
- int length,
- PretenureFlag pretenure = NOT_TENURED);
-
- // Computes a single character string where the character has code.
- // A cache is used for ascii codes.
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed. Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* LookupSingleCharacterStringFromCode(
- uint16_t code);
-
- // Allocate a byte array of the specified length
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateByteArray(int length,
- PretenureFlag pretenure);
-
- // Allocate a non-tenured byte array of the specified length
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateByteArray(int length);
-
- // Allocates an external array of the specified length and type.
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateExternalArray(
- int length,
- ExternalArrayType array_type,
- void* external_pointer,
- PretenureFlag pretenure);
-
- // Allocate a tenured JS global property cell.
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateJSGlobalPropertyCell(Object* value);
-
- // Allocates a fixed array initialized with undefined values
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateFixedArray(int length,
- PretenureFlag pretenure);
- // Allocates a fixed array initialized with undefined values
- MUST_USE_RESULT MaybeObject* AllocateFixedArray(int length);
-
- // Allocates an uninitialized fixed array. It must be filled by the caller.
- //
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateUninitializedFixedArray(int length);
-
- // Make a copy of src and return it. Returns
- // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
- MUST_USE_RESULT inline MaybeObject* CopyFixedArray(FixedArray* src);
-
- // Make a copy of src, set the map, and return the copy. Returns
- // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
- MUST_USE_RESULT MaybeObject* CopyFixedArrayWithMap(FixedArray* src, Map* map);
-
- // Allocates a fixed array initialized with the hole values.
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateFixedArrayWithHoles(
- int length,
- PretenureFlag pretenure = NOT_TENURED);
-
- // AllocateHashTable is identical to AllocateFixedArray except
- // that the resulting object has hash_table_map as map.
- MUST_USE_RESULT MaybeObject* AllocateHashTable(
- int length, PretenureFlag pretenure = NOT_TENURED);
-
- // Allocate a global (but otherwise uninitialized) context.
- MUST_USE_RESULT MaybeObject* AllocateGlobalContext();
-
- // Allocate a function context.
- MUST_USE_RESULT MaybeObject* AllocateFunctionContext(int length,
- JSFunction* closure);
-
- // Allocate a 'with' context.
- MUST_USE_RESULT MaybeObject* AllocateWithContext(Context* previous,
- JSObject* extension,
- bool is_catch_context);
-
- // Allocates a new utility object in the old generation.
- MUST_USE_RESULT MaybeObject* AllocateStruct(InstanceType type);
-
- // Allocates a function initialized with a shared part.
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateFunction(
- Map* function_map,
- SharedFunctionInfo* shared,
- Object* prototype,
- PretenureFlag pretenure = TENURED);
-
- // Arguments object size.
- static const int kArgumentsObjectSize =
- JSObject::kHeaderSize + 2 * kPointerSize;
- // Strict mode arguments has no callee so it is smaller.
- static const int kArgumentsObjectSizeStrict =
- JSObject::kHeaderSize + 1 * kPointerSize;
- // Indicies for direct access into argument objects.
- static const int kArgumentsLengthIndex = 0;
- // callee is only valid in non-strict mode.
- static const int kArgumentsCalleeIndex = 1;
-
- // Allocates an arguments object - optionally with an elements array.
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateArgumentsObject(
- Object* callee, int length);
-
- // Same as NewNumberFromDouble, but may return a preallocated/immutable
- // number object (e.g., minus_zero_value_, nan_value_)
- MUST_USE_RESULT MaybeObject* NumberFromDouble(
- double value, PretenureFlag pretenure = NOT_TENURED);
-
- // Allocated a HeapNumber from value.
- MUST_USE_RESULT MaybeObject* AllocateHeapNumber(
- double value,
- PretenureFlag pretenure);
- // pretenure = NOT_TENURED
- MUST_USE_RESULT MaybeObject* AllocateHeapNumber(double value);
-
- // Converts an int into either a Smi or a HeapNumber object.
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this does not perform a garbage collection.
- MUST_USE_RESULT inline MaybeObject* NumberFromInt32(int32_t value);
-
- // Converts an int into either a Smi or a HeapNumber object.
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this does not perform a garbage collection.
- MUST_USE_RESULT inline MaybeObject* NumberFromUint32(uint32_t value);
-
- // Allocates a new proxy object.
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateProxy(
- Address proxy, PretenureFlag pretenure = NOT_TENURED);
-
- // Allocates a new SharedFunctionInfo object.
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateSharedFunctionInfo(Object* name);
-
- // Allocates a new JSMessageObject object.
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note that this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateJSMessageObject(
- String* type,
- JSArray* arguments,
- int start_position,
- int end_position,
- Object* script,
- Object* stack_trace,
- Object* stack_frames);
-
- // Allocates a new cons string object.
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateConsString(String* first,
- String* second);
-
- // Allocates a new sub string object which is a substring of an underlying
- // string buffer stretching from the index start (inclusive) to the index
- // end (exclusive).
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateSubString(
- String* buffer,
- int start,
- int end,
- PretenureFlag pretenure = NOT_TENURED);
-
- // Allocate a new external string object, which is backed by a string
- // resource that resides outside the V8 heap.
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateExternalStringFromAscii(
- ExternalAsciiString::Resource* resource);
- MUST_USE_RESULT MaybeObject* AllocateExternalStringFromTwoByte(
- ExternalTwoByteString::Resource* resource);
-
- // Finalizes an external string by deleting the associated external
- // data and clearing the resource pointer.
- inline void FinalizeExternalString(String* string);
-
- // Allocates an uninitialized object. The memory is non-executable if the
- // hardware and OS allow.
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this function does not perform a garbage collection.
- MUST_USE_RESULT inline MaybeObject* AllocateRaw(int size_in_bytes,
- AllocationSpace space,
- AllocationSpace retry_space);
-
- // Initialize a filler object to keep the ability to iterate over the heap
- // when shortening objects.
- void CreateFillerObjectAt(Address addr, int size);
-
- // Makes a new native code object
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed. On success, the pointer to the Code object is stored in the
- // self_reference. This allows generated code to reference its own Code
- // object by containing this pointer.
- // Please note this function does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* CreateCode(const CodeDesc& desc,
- Code::Flags flags,
- Handle<Object> self_reference,
- bool immovable = false);
-
- MUST_USE_RESULT MaybeObject* CopyCode(Code* code);
-
- // Copy the code and scope info part of the code object, but insert
- // the provided data as the relocation information.
- MUST_USE_RESULT MaybeObject* CopyCode(Code* code, Vector<byte> reloc_info);
-
- // Finds the symbol for string in the symbol table.
- // If not found, a new symbol is added to the table and returned.
- // Returns Failure::RetryAfterGC(requested_bytes, space) if allocation
- // failed.
- // Please note this function does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* LookupSymbol(Vector<const char> str);
- MUST_USE_RESULT MaybeObject* LookupAsciiSymbol(Vector<const char> str);
- MUST_USE_RESULT MaybeObject* LookupTwoByteSymbol(
- Vector<const uc16> str);
- MUST_USE_RESULT MaybeObject* LookupAsciiSymbol(const char* str) {
- return LookupSymbol(CStrVector(str));
- }
- MUST_USE_RESULT MaybeObject* LookupSymbol(String* str);
- bool LookupSymbolIfExists(String* str, String** symbol);
- bool LookupTwoCharsSymbolIfExists(String* str, String** symbol);
-
- // Compute the matching symbol map for a string if possible.
- // NULL is returned if string is in new space or not flattened.
- Map* SymbolMapForString(String* str);
-
- // Tries to flatten a string before compare operation.
- //
- // Returns a failure in case it was decided that flattening was
- // necessary and failed. Note, if flattening is not necessary the
- // string might stay non-flat even when not a failure is returned.
- //
- // Please note this function does not perform a garbage collection.
- MUST_USE_RESULT inline MaybeObject* PrepareForCompare(String* str);
-
- // Converts the given boolean condition to JavaScript boolean value.
- inline Object* ToBoolean(bool condition);
-
- // Code that should be run before and after each GC. Includes some
- // reporting/verification activities when compiled with DEBUG set.
- void GarbageCollectionPrologue();
- void GarbageCollectionEpilogue();
-
- // Performs garbage collection operation.
- // Returns whether there is a chance that another major GC could
- // collect more garbage.
- bool CollectGarbage(AllocationSpace space, GarbageCollector collector);
-
- // Performs garbage collection operation.
- // Returns whether there is a chance that another major GC could
- // collect more garbage.
- inline bool CollectGarbage(AllocationSpace space);
-
- // Performs a full garbage collection. Force compaction if the
- // parameter is true.
- void CollectAllGarbage(bool force_compaction);
-
- // Last hope GC, should try to squeeze as much as possible.
- void CollectAllAvailableGarbage();
-
- // Notify the heap that a context has been disposed.
- int NotifyContextDisposed() { return ++contexts_disposed_; }
-
- // Utility to invoke the scavenger. This is needed in test code to
- // ensure correct callback for weak global handles.
- void PerformScavenge();
-
- PromotionQueue* promotion_queue() { return &promotion_queue_; }
-
-#ifdef DEBUG
- // Utility used with flag gc-greedy.
- void GarbageCollectionGreedyCheck();
-#endif
-
- void AddGCPrologueCallback(
- GCEpilogueCallback callback, GCType gc_type_filter);
- void RemoveGCPrologueCallback(GCEpilogueCallback callback);
-
- void AddGCEpilogueCallback(
- GCEpilogueCallback callback, GCType gc_type_filter);
- void RemoveGCEpilogueCallback(GCEpilogueCallback callback);
-
- void SetGlobalGCPrologueCallback(GCCallback callback) {
- ASSERT((callback == NULL) ^ (global_gc_prologue_callback_ == NULL));
- global_gc_prologue_callback_ = callback;
- }
- void SetGlobalGCEpilogueCallback(GCCallback callback) {
- ASSERT((callback == NULL) ^ (global_gc_epilogue_callback_ == NULL));
- global_gc_epilogue_callback_ = callback;
- }
-
- // Heap root getters. We have versions with and without type::cast() here.
- // You can't use type::cast during GC because the assert fails.
-#define ROOT_ACCESSOR(type, name, camel_name) \
- type* name() { \
- return type::cast(roots_[k##camel_name##RootIndex]); \
- } \
- type* raw_unchecked_##name() { \
- return reinterpret_cast<type*>(roots_[k##camel_name##RootIndex]); \
- }
- ROOT_LIST(ROOT_ACCESSOR)
-#undef ROOT_ACCESSOR
-
-// Utility type maps
-#define STRUCT_MAP_ACCESSOR(NAME, Name, name) \
- Map* name##_map() { \
- return Map::cast(roots_[k##Name##MapRootIndex]); \
- }
- STRUCT_LIST(STRUCT_MAP_ACCESSOR)
-#undef STRUCT_MAP_ACCESSOR
-
-#define SYMBOL_ACCESSOR(name, str) String* name() { \
- return String::cast(roots_[k##name##RootIndex]); \
- }
- SYMBOL_LIST(SYMBOL_ACCESSOR)
-#undef SYMBOL_ACCESSOR
-
- // The hidden_symbol is special because it is the empty string, but does
- // not match the empty string.
- String* hidden_symbol() { return hidden_symbol_; }
-
- void set_global_contexts_list(Object* object) {
- global_contexts_list_ = object;
- }
- Object* global_contexts_list() { return global_contexts_list_; }
-
- // Iterates over all roots in the heap.
- void IterateRoots(ObjectVisitor* v, VisitMode mode);
- // Iterates over all strong roots in the heap.
- void IterateStrongRoots(ObjectVisitor* v, VisitMode mode);
- // Iterates over all the other roots in the heap.
- void IterateWeakRoots(ObjectVisitor* v, VisitMode mode);
-
- enum ExpectedPageWatermarkState {
- WATERMARK_SHOULD_BE_VALID,
- WATERMARK_CAN_BE_INVALID
- };
-
- // For each dirty region on a page in use from an old space call
- // visit_dirty_region callback.
- // If either visit_dirty_region or callback can cause an allocation
- // in old space and changes in allocation watermark then
- // can_preallocate_during_iteration should be set to true.
- // All pages will be marked as having invalid watermark upon
- // iteration completion.
- void IterateDirtyRegions(
- PagedSpace* space,
- DirtyRegionCallback visit_dirty_region,
- ObjectSlotCallback callback,
- ExpectedPageWatermarkState expected_page_watermark_state);
-
- // Interpret marks as a bitvector of dirty marks for regions of size
- // Page::kRegionSize aligned by Page::kRegionAlignmentMask and covering
- // memory interval from start to top. For each dirty region call a
- // visit_dirty_region callback. Return updated bitvector of dirty marks.
- uint32_t IterateDirtyRegions(uint32_t marks,
- Address start,
- Address end,
- DirtyRegionCallback visit_dirty_region,
- ObjectSlotCallback callback);
-
- // Iterate pointers to from semispace of new space found in memory interval
- // from start to end.
- // Update dirty marks for page containing start address.
- void IterateAndMarkPointersToFromSpace(Address start,
- Address end,
- ObjectSlotCallback callback);
-
- // Iterate pointers to new space found in memory interval from start to end.
- // Return true if pointers to new space was found.
- static bool IteratePointersInDirtyRegion(Heap* heap,
- Address start,
- Address end,
- ObjectSlotCallback callback);
-
-
- // Iterate pointers to new space found in memory interval from start to end.
- // This interval is considered to belong to the map space.
- // Return true if pointers to new space was found.
- static bool IteratePointersInDirtyMapsRegion(Heap* heap,
- Address start,
- Address end,
- ObjectSlotCallback callback);
-
-
- // Returns whether the object resides in new space.
- inline bool InNewSpace(Object* object);
- inline bool InFromSpace(Object* object);
- inline bool InToSpace(Object* object);
-
- // Checks whether an address/object in the heap (including auxiliary
- // area and unused area).
- bool Contains(Address addr);
- bool Contains(HeapObject* value);
-
- // Checks whether an address/object in a space.
- // Currently used by tests, serialization and heap verification only.
- bool InSpace(Address addr, AllocationSpace space);
- bool InSpace(HeapObject* value, AllocationSpace space);
-
- // Finds out which space an object should get promoted to based on its type.
- inline OldSpace* TargetSpace(HeapObject* object);
- inline AllocationSpace TargetSpaceId(InstanceType type);
-
- // Sets the stub_cache_ (only used when expanding the dictionary).
- void public_set_code_stubs(NumberDictionary* value) {
- roots_[kCodeStubsRootIndex] = value;
- }
-
- // Support for computing object sizes for old objects during GCs. Returns
- // a function that is guaranteed to be safe for computing object sizes in
- // the current GC phase.
- HeapObjectCallback GcSafeSizeOfOldObjectFunction() {
- return gc_safe_size_of_old_object_;
- }
-
- // Sets the non_monomorphic_cache_ (only used when expanding the dictionary).
- void public_set_non_monomorphic_cache(NumberDictionary* value) {
- roots_[kNonMonomorphicCacheRootIndex] = value;
- }
-
- void public_set_empty_script(Script* script) {
- roots_[kEmptyScriptRootIndex] = script;
- }
-
- // Update the next script id.
- inline void SetLastScriptId(Object* last_script_id);
-
- // Generated code can embed this address to get access to the roots.
- Object** roots_address() { return roots_; }
-
- // Get address of global contexts list for serialization support.
- Object** global_contexts_list_address() {
- return &global_contexts_list_;
- }
-
-#ifdef DEBUG
- void Print();
- void PrintHandles();
-
- // Verify the heap is in its normal state before or after a GC.
- void Verify();
-
- // Report heap statistics.
- void ReportHeapStatistics(const char* title);
- void ReportCodeStatistics(const char* title);
-
- // Fill in bogus values in from space
- void ZapFromSpace();
-#endif
-
-#if defined(ENABLE_LOGGING_AND_PROFILING)
- // Print short heap statistics.
- void PrintShortHeapStatistics();
-#endif
-
- // Makes a new symbol object
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this function does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* CreateSymbol(
- const char* str, int length, int hash);
- MUST_USE_RESULT MaybeObject* CreateSymbol(String* str);
-
- // Write barrier support for address[offset] = o.
- inline void RecordWrite(Address address, int offset);
-
- // Write barrier support for address[start : start + len[ = o.
- inline void RecordWrites(Address address, int start, int len);
-
- // Given an address occupied by a live code object, return that object.
- Object* FindCodeObject(Address a);
-
- // Invoke Shrink on shrinkable spaces.
- void Shrink();
-
- enum HeapState { NOT_IN_GC, SCAVENGE, MARK_COMPACT };
- inline HeapState gc_state() { return gc_state_; }
-
-#ifdef DEBUG
- bool IsAllocationAllowed() { return allocation_allowed_; }
- inline bool allow_allocation(bool enable);
-
- bool disallow_allocation_failure() {
- return disallow_allocation_failure_;
- }
-
- void TracePathToObject(Object* target);
- void TracePathToGlobal();
-#endif
-
- // Callback function passed to Heap::Iterate etc. Copies an object if
- // necessary, the object might be promoted to an old space. The caller must
- // ensure the precondition that the object is (a) a heap object and (b) in
- // the heap's from space.
- static inline void ScavengePointer(HeapObject** p);
- static inline void ScavengeObject(HeapObject** p, HeapObject* object);
-
- // Commits from space if it is uncommitted.
- void EnsureFromSpaceIsCommitted();
-
- // Support for partial snapshots. After calling this we can allocate a
- // certain number of bytes using only linear allocation (with a
- // LinearAllocationScope and an AlwaysAllocateScope) without using freelists
- // or causing a GC. It returns true of space was reserved or false if a GC is
- // needed. For paged spaces the space requested must include the space wasted
- // at the end of each page when allocating linearly.
- void ReserveSpace(
- int new_space_size,
- int pointer_space_size,
- int data_space_size,
- int code_space_size,
- int map_space_size,
- int cell_space_size,
- int large_object_size);
-
- //
- // Support for the API.
- //
-
- bool CreateApiObjects();
-
- // Attempt to find the number in a small cache. If we finds it, return
- // the string representation of the number. Otherwise return undefined.
- Object* GetNumberStringCache(Object* number);
-
- // Update the cache with a new number-string pair.
- void SetNumberStringCache(Object* number, String* str);
-
- // Adjusts the amount of registered external memory.
- // Returns the adjusted value.
- inline int AdjustAmountOfExternalAllocatedMemory(int change_in_bytes);
-
- // Allocate uninitialized fixed array.
- MUST_USE_RESULT MaybeObject* AllocateRawFixedArray(int length);
- MUST_USE_RESULT MaybeObject* AllocateRawFixedArray(int length,
- PretenureFlag pretenure);
-
- // True if we have reached the allocation limit in the old generation that
- // should force the next GC (caused normally) to be a full one.
- bool OldGenerationPromotionLimitReached() {
- return (PromotedSpaceSize() + PromotedExternalMemorySize())
- > old_gen_promotion_limit_;
- }
-
- intptr_t OldGenerationSpaceAvailable() {
- return old_gen_allocation_limit_ -
- (PromotedSpaceSize() + PromotedExternalMemorySize());
- }
-
- // True if we have reached the allocation limit in the old generation that
- // should artificially cause a GC right now.
- bool OldGenerationAllocationLimitReached() {
- return OldGenerationSpaceAvailable() < 0;
- }
-
- // Can be called when the embedding application is idle.
- bool IdleNotification();
-
- // Declare all the root indices.
- enum RootListIndex {
-#define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex,
- STRONG_ROOT_LIST(ROOT_INDEX_DECLARATION)
-#undef ROOT_INDEX_DECLARATION
-
-// Utility type maps
-#define DECLARE_STRUCT_MAP(NAME, Name, name) k##Name##MapRootIndex,
- STRUCT_LIST(DECLARE_STRUCT_MAP)
-#undef DECLARE_STRUCT_MAP
-
-#define SYMBOL_INDEX_DECLARATION(name, str) k##name##RootIndex,
- SYMBOL_LIST(SYMBOL_INDEX_DECLARATION)
-#undef SYMBOL_DECLARATION
-
- kSymbolTableRootIndex,
- kStrongRootListLength = kSymbolTableRootIndex,
- kRootListLength
- };
-
- MUST_USE_RESULT MaybeObject* NumberToString(
- Object* number, bool check_number_string_cache = true);
-
- Map* MapForExternalArrayType(ExternalArrayType array_type);
- RootListIndex RootIndexForExternalArrayType(
- ExternalArrayType array_type);
-
- void RecordStats(HeapStats* stats, bool take_snapshot = false);
-
- // Copy block of memory from src to dst. Size of block should be aligned
- // by pointer size.
- static inline void CopyBlock(Address dst, Address src, int byte_size);
-
- inline void CopyBlockToOldSpaceAndUpdateRegionMarks(Address dst,
- Address src,
- int byte_size);
-
- // Optimized version of memmove for blocks with pointer size aligned sizes and
- // pointer size aligned addresses.
- static inline void MoveBlock(Address dst, Address src, int byte_size);
-
- inline void MoveBlockToOldSpaceAndUpdateRegionMarks(Address dst,
- Address src,
- int byte_size);
-
- // Check new space expansion criteria and expand semispaces if it was hit.
- void CheckNewSpaceExpansionCriteria();
-
- inline void IncrementYoungSurvivorsCounter(int survived) {
- young_survivors_after_last_gc_ = survived;
- survived_since_last_expansion_ += survived;
- }
-
- void UpdateNewSpaceReferencesInExternalStringTable(
- ExternalStringTableUpdaterCallback updater_func);
-
- void ProcessWeakReferences(WeakObjectRetainer* retainer);
-
- // Helper function that governs the promotion policy from new space to
- // old. If the object's old address lies below the new space's age
- // mark or if we've already filled the bottom 1/16th of the to space,
- // we try to promote this object.
- inline bool ShouldBePromoted(Address old_address, int object_size);
-
- int MaxObjectSizeInNewSpace() { return kMaxObjectSizeInNewSpace; }
-
- void ClearJSFunctionResultCaches();
-
- void ClearNormalizedMapCaches();
-
- GCTracer* tracer() { return tracer_; }
-
- // Returns maximum GC pause.
- int get_max_gc_pause() { return max_gc_pause_; }
-
- // Returns maximum size of objects alive after GC.
- intptr_t get_max_alive_after_gc() { return max_alive_after_gc_; }
-
- // Returns minimal interval between two subsequent collections.
- int get_min_in_mutator() { return min_in_mutator_; }
-
- MarkCompactCollector* mark_compact_collector() {
- return &mark_compact_collector_;
- }
-
- ExternalStringTable* external_string_table() {
- return &external_string_table_;
- }
-
- inline Isolate* isolate();
- bool is_safe_to_read_maps() { return is_safe_to_read_maps_; }
-
- void CallGlobalGCPrologueCallback() {
- if (global_gc_prologue_callback_ != NULL) global_gc_prologue_callback_();
- }
-
- void CallGlobalGCEpilogueCallback() {
- if (global_gc_epilogue_callback_ != NULL) global_gc_epilogue_callback_();
- }
-
- private:
- Heap();
-
- // This can be calculated directly from a pointer to the heap; however, it is
- // more expedient to get at the isolate directly from within Heap methods.
- Isolate* isolate_;
-
- int reserved_semispace_size_;
- int max_semispace_size_;
- int initial_semispace_size_;
- intptr_t max_old_generation_size_;
- intptr_t max_executable_size_;
- intptr_t code_range_size_;
-
- // For keeping track of how much data has survived
- // scavenge since last new space expansion.
- int survived_since_last_expansion_;
-
- int always_allocate_scope_depth_;
- int linear_allocation_scope_depth_;
-
- // For keeping track of context disposals.
- int contexts_disposed_;
-
-#if defined(V8_TARGET_ARCH_X64)
- static const int kMaxObjectSizeInNewSpace = 1024*KB;
-#else
- static const int kMaxObjectSizeInNewSpace = 512*KB;
-#endif
-
- NewSpace new_space_;
- OldSpace* old_pointer_space_;
- OldSpace* old_data_space_;
- OldSpace* code_space_;
- MapSpace* map_space_;
- CellSpace* cell_space_;
- LargeObjectSpace* lo_space_;
- HeapState gc_state_;
-
- // Returns the size of object residing in non new spaces.
- intptr_t PromotedSpaceSize();
-
- // Returns the amount of external memory registered since last global gc.
- int PromotedExternalMemorySize();
-
- int mc_count_; // how many mark-compact collections happened
- int ms_count_; // how many mark-sweep collections happened
- unsigned int gc_count_; // how many gc happened
-
- // Total length of the strings we failed to flatten since the last GC.
- int unflattened_strings_length_;
-
-#define ROOT_ACCESSOR(type, name, camel_name) \
- inline void set_##name(type* value) { \
- roots_[k##camel_name##RootIndex] = value; \
- }
- ROOT_LIST(ROOT_ACCESSOR)
-#undef ROOT_ACCESSOR
-
-#ifdef DEBUG
- bool allocation_allowed_;
-
- // If the --gc-interval flag is set to a positive value, this
- // variable holds the value indicating the number of allocations
- // remain until the next failure and garbage collection.
- int allocation_timeout_;
-
- // Do we expect to be able to handle allocation failure at this
- // time?
- bool disallow_allocation_failure_;
-
- HeapDebugUtils* debug_utils_;
-#endif // DEBUG
-
- // Limit that triggers a global GC on the next (normally caused) GC. This
- // is checked when we have already decided to do a GC to help determine
- // which collector to invoke.
- intptr_t old_gen_promotion_limit_;
-
- // Limit that triggers a global GC as soon as is reasonable. This is
- // checked before expanding a paged space in the old generation and on
- // every allocation in large object space.
- intptr_t old_gen_allocation_limit_;
-
- // Limit on the amount of externally allocated memory allowed
- // between global GCs. If reached a global GC is forced.
- intptr_t external_allocation_limit_;
-
- // The amount of external memory registered through the API kept alive
- // by global handles
- int amount_of_external_allocated_memory_;
-
- // Caches the amount of external memory registered at the last global gc.
- int amount_of_external_allocated_memory_at_last_global_gc_;
-
- // Indicates that an allocation has failed in the old generation since the
- // last GC.
- int old_gen_exhausted_;
-
- Object* roots_[kRootListLength];
-
- Object* global_contexts_list_;
-
- struct StringTypeTable {
- InstanceType type;
- int size;
- RootListIndex index;
- };
-
- struct ConstantSymbolTable {
- const char* contents;
- RootListIndex index;
- };
-
- struct StructTable {
- InstanceType type;
- int size;
- RootListIndex index;
- };
-
- static const StringTypeTable string_type_table[];
- static const ConstantSymbolTable constant_symbol_table[];
- static const StructTable struct_table[];
-
- // The special hidden symbol which is an empty string, but does not match
- // any string when looked up in properties.
- String* hidden_symbol_;
-
- // GC callback function, called before and after mark-compact GC.
- // Allocations in the callback function are disallowed.
- struct GCPrologueCallbackPair {
- GCPrologueCallbackPair(GCPrologueCallback callback, GCType gc_type)
- : callback(callback), gc_type(gc_type) {
- }
- bool operator==(const GCPrologueCallbackPair& pair) const {
- return pair.callback == callback;
- }
- GCPrologueCallback callback;
- GCType gc_type;
- };
- List<GCPrologueCallbackPair> gc_prologue_callbacks_;
-
- struct GCEpilogueCallbackPair {
- GCEpilogueCallbackPair(GCEpilogueCallback callback, GCType gc_type)
- : callback(callback), gc_type(gc_type) {
- }
- bool operator==(const GCEpilogueCallbackPair& pair) const {
- return pair.callback == callback;
- }
- GCEpilogueCallback callback;
- GCType gc_type;
- };
- List<GCEpilogueCallbackPair> gc_epilogue_callbacks_;
-
- GCCallback global_gc_prologue_callback_;
- GCCallback global_gc_epilogue_callback_;
-
- // Support for computing object sizes during GC.
- HeapObjectCallback gc_safe_size_of_old_object_;
- static int GcSafeSizeOfOldObject(HeapObject* object);
- static int GcSafeSizeOfOldObjectWithEncodedMap(HeapObject* object);
-
- // Update the GC state. Called from the mark-compact collector.
- void MarkMapPointersAsEncoded(bool encoded) {
- gc_safe_size_of_old_object_ = encoded
- ? &GcSafeSizeOfOldObjectWithEncodedMap
- : &GcSafeSizeOfOldObject;
- }
-
- // Checks whether a global GC is necessary
- GarbageCollector SelectGarbageCollector(AllocationSpace space);
-
- // Performs garbage collection
- // Returns whether there is a chance another major GC could
- // collect more garbage.
- bool PerformGarbageCollection(GarbageCollector collector,
- GCTracer* tracer);
-
- static const intptr_t kMinimumPromotionLimit = 2 * MB;
- static const intptr_t kMinimumAllocationLimit = 8 * MB;
-
- inline void UpdateOldSpaceLimits();
-
- // Allocate an uninitialized object in map space. The behavior is identical
- // to Heap::AllocateRaw(size_in_bytes, MAP_SPACE), except that (a) it doesn't
- // have to test the allocation space argument and (b) can reduce code size
- // (since both AllocateRaw and AllocateRawMap are inlined).
- MUST_USE_RESULT inline MaybeObject* AllocateRawMap();
-
- // Allocate an uninitialized object in the global property cell space.
- MUST_USE_RESULT inline MaybeObject* AllocateRawCell();
-
- // Initializes a JSObject based on its map.
- void InitializeJSObjectFromMap(JSObject* obj,
- FixedArray* properties,
- Map* map);
-
- bool CreateInitialMaps();
- bool CreateInitialObjects();
-
- // These five Create*EntryStub functions are here and forced to not be inlined
- // because of a gcc-4.4 bug that assigns wrong vtable entries.
- NO_INLINE(void CreateJSEntryStub());
- NO_INLINE(void CreateJSConstructEntryStub());
-
- void CreateFixedStubs();
-
- MaybeObject* CreateOddball(const char* to_string,
- Object* to_number,
- byte kind);
-
- // Allocate empty fixed array.
- MUST_USE_RESULT MaybeObject* AllocateEmptyFixedArray();
-
- void SwitchScavengingVisitorsTableIfProfilingWasEnabled();
-
- // Performs a minor collection in new generation.
- void Scavenge();
-
- static String* UpdateNewSpaceReferenceInExternalStringTableEntry(
- Heap* heap,
- Object** pointer);
-
- Address DoScavenge(ObjectVisitor* scavenge_visitor, Address new_space_front);
-
- // Performs a major collection in the whole heap.
- void MarkCompact(GCTracer* tracer);
-
- // Code to be run before and after mark-compact.
- void MarkCompactPrologue(bool is_compacting);
-
- // Completely clear the Instanceof cache (to stop it keeping objects alive
- // around a GC).
- inline void CompletelyClearInstanceofCache();
-
-#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
- // Record statistics before and after garbage collection.
- void ReportStatisticsBeforeGC();
- void ReportStatisticsAfterGC();
-#endif
-
- // Slow part of scavenge object.
- static void ScavengeObjectSlow(HeapObject** p, HeapObject* object);
-
- // Initializes a function with a shared part and prototype.
- // Returns the function.
- // Note: this code was factored out of AllocateFunction such that
- // other parts of the VM could use it. Specifically, a function that creates
- // instances of type JS_FUNCTION_TYPE benefit from the use of this function.
- // Please note this does not perform a garbage collection.
- MUST_USE_RESULT inline MaybeObject* InitializeFunction(
- JSFunction* function,
- SharedFunctionInfo* shared,
- Object* prototype);
-
- GCTracer* tracer_;
-
-
- // Initializes the number to string cache based on the max semispace size.
- MUST_USE_RESULT MaybeObject* InitializeNumberStringCache();
- // Flush the number to string cache.
- void FlushNumberStringCache();
-
- void UpdateSurvivalRateTrend(int start_new_space_size);
-
- enum SurvivalRateTrend { INCREASING, STABLE, DECREASING, FLUCTUATING };
-
- static const int kYoungSurvivalRateThreshold = 90;
- static const int kYoungSurvivalRateAllowedDeviation = 15;
-
- int young_survivors_after_last_gc_;
- int high_survival_rate_period_length_;
- double survival_rate_;
- SurvivalRateTrend previous_survival_rate_trend_;
- SurvivalRateTrend survival_rate_trend_;
-
- void set_survival_rate_trend(SurvivalRateTrend survival_rate_trend) {
- ASSERT(survival_rate_trend != FLUCTUATING);
- previous_survival_rate_trend_ = survival_rate_trend_;
- survival_rate_trend_ = survival_rate_trend;
- }
-
- SurvivalRateTrend survival_rate_trend() {
- if (survival_rate_trend_ == STABLE) {
- return STABLE;
- } else if (previous_survival_rate_trend_ == STABLE) {
- return survival_rate_trend_;
- } else if (survival_rate_trend_ != previous_survival_rate_trend_) {
- return FLUCTUATING;
- } else {
- return survival_rate_trend_;
- }
- }
-
- bool IsStableOrIncreasingSurvivalTrend() {
- switch (survival_rate_trend()) {
- case STABLE:
- case INCREASING:
- return true;
- default:
- return false;
- }
- }
-
- bool IsIncreasingSurvivalTrend() {
- return survival_rate_trend() == INCREASING;
- }
-
- bool IsHighSurvivalRate() {
- return high_survival_rate_period_length_ > 0;
- }
-
- static const int kInitialSymbolTableSize = 2048;
- static const int kInitialEvalCacheSize = 64;
-
- // Maximum GC pause.
- int max_gc_pause_;
-
- // Maximum size of objects alive after GC.
- intptr_t max_alive_after_gc_;
-
- // Minimal interval between two subsequent collections.
- int min_in_mutator_;
-
- // Size of objects alive after last GC.
- intptr_t alive_after_last_gc_;
-
- double last_gc_end_timestamp_;
-
- MarkCompactCollector mark_compact_collector_;
-
- // This field contains the meaning of the WATERMARK_INVALIDATED flag.
- // Instead of clearing this flag from all pages we just flip
- // its meaning at the beginning of a scavenge.
- intptr_t page_watermark_invalidated_mark_;
-
- int number_idle_notifications_;
- unsigned int last_idle_notification_gc_count_;
- bool last_idle_notification_gc_count_init_;
-
- // Shared state read by the scavenge collector and set by ScavengeObject.
- PromotionQueue promotion_queue_;
-
- // Flag is set when the heap has been configured. The heap can be repeatedly
- // configured through the API until it is setup.
- bool configured_;
-
- ExternalStringTable external_string_table_;
-
- bool is_safe_to_read_maps_;
-
- friend class Factory;
- friend class GCTracer;
- friend class DisallowAllocationFailure;
- friend class AlwaysAllocateScope;
- friend class LinearAllocationScope;
- friend class Page;
- friend class Isolate;
- friend class MarkCompactCollector;
- friend class MapCompact;
-
- DISALLOW_COPY_AND_ASSIGN(Heap);
-};
-
-
-class HeapStats {
- public:
- static const int kStartMarker = 0xDECADE00;
- static const int kEndMarker = 0xDECADE01;
-
- int* start_marker; // 0
- int* new_space_size; // 1
- int* new_space_capacity; // 2
- intptr_t* old_pointer_space_size; // 3
- intptr_t* old_pointer_space_capacity; // 4
- intptr_t* old_data_space_size; // 5
- intptr_t* old_data_space_capacity; // 6
- intptr_t* code_space_size; // 7
- intptr_t* code_space_capacity; // 8
- intptr_t* map_space_size; // 9
- intptr_t* map_space_capacity; // 10
- intptr_t* cell_space_size; // 11
- intptr_t* cell_space_capacity; // 12
- intptr_t* lo_space_size; // 13
- int* global_handle_count; // 14
- int* weak_global_handle_count; // 15
- int* pending_global_handle_count; // 16
- int* near_death_global_handle_count; // 17
- int* destroyed_global_handle_count; // 18
- intptr_t* memory_allocator_size; // 19
- intptr_t* memory_allocator_capacity; // 20
- int* objects_per_type; // 21
- int* size_per_type; // 22
- int* os_error; // 23
- int* end_marker; // 24
-};
-
-
-class AlwaysAllocateScope {
- public:
- AlwaysAllocateScope() {
- // We shouldn't hit any nested scopes, because that requires
- // non-handle code to call handle code. The code still works but
- // performance will degrade, so we want to catch this situation
- // in debug mode.
- ASSERT(HEAP->always_allocate_scope_depth_ == 0);
- HEAP->always_allocate_scope_depth_++;
- }
-
- ~AlwaysAllocateScope() {
- HEAP->always_allocate_scope_depth_--;
- ASSERT(HEAP->always_allocate_scope_depth_ == 0);
- }
-};
-
-
-class LinearAllocationScope {
- public:
- LinearAllocationScope() {
- HEAP->linear_allocation_scope_depth_++;
- }
-
- ~LinearAllocationScope() {
- HEAP->linear_allocation_scope_depth_--;
- ASSERT(HEAP->linear_allocation_scope_depth_ >= 0);
- }
-};
-
-
-#ifdef DEBUG
-// Visitor class to verify interior pointers in spaces that do not contain
-// or care about intergenerational references. All heap object pointers have to
-// point into the heap to a location that has a map pointer at its first word.
-// Caveat: Heap::Contains is an approximation because it can return true for
-// objects in a heap space but above the allocation pointer.
-class VerifyPointersVisitor: public ObjectVisitor {
- public:
- void VisitPointers(Object** start, Object** end) {
- for (Object** current = start; current < end; current++) {
- if ((*current)->IsHeapObject()) {
- HeapObject* object = HeapObject::cast(*current);
- ASSERT(HEAP->Contains(object));
- ASSERT(object->map()->IsMap());
- }
- }
- }
-};
-
-
-// Visitor class to verify interior pointers in spaces that use region marks
-// to keep track of intergenerational references.
-// As VerifyPointersVisitor but also checks that dirty marks are set
-// for regions covering intergenerational references.
-class VerifyPointersAndDirtyRegionsVisitor: public ObjectVisitor {
- public:
- void VisitPointers(Object** start, Object** end) {
- for (Object** current = start; current < end; current++) {
- if ((*current)->IsHeapObject()) {
- HeapObject* object = HeapObject::cast(*current);
- ASSERT(HEAP->Contains(object));
- ASSERT(object->map()->IsMap());
- if (HEAP->InNewSpace(object)) {
- ASSERT(HEAP->InToSpace(object));
- Address addr = reinterpret_cast<Address>(current);
- ASSERT(Page::FromAddress(addr)->IsRegionDirty(addr));
- }
- }
- }
- }
-};
-#endif
-
-
-// Space iterator for iterating over all spaces of the heap.
-// Returns each space in turn, and null when it is done.
-class AllSpaces BASE_EMBEDDED {
- public:
- Space* next();
- AllSpaces() { counter_ = FIRST_SPACE; }
- private:
- int counter_;
-};
-
-
-// Space iterator for iterating over all old spaces of the heap: Old pointer
-// space, old data space and code space.
-// Returns each space in turn, and null when it is done.
-class OldSpaces BASE_EMBEDDED {
- public:
- OldSpace* next();
- OldSpaces() { counter_ = OLD_POINTER_SPACE; }
- private:
- int counter_;
-};
-
-
-// Space iterator for iterating over all the paged spaces of the heap:
-// Map space, old pointer space, old data space, code space and cell space.
-// Returns each space in turn, and null when it is done.
-class PagedSpaces BASE_EMBEDDED {
- public:
- PagedSpace* next();
- PagedSpaces() { counter_ = OLD_POINTER_SPACE; }
- private:
- int counter_;
-};
-
-
-// Space iterator for iterating over all spaces of the heap.
-// For each space an object iterator is provided. The deallocation of the
-// returned object iterators is handled by the space iterator.
-class SpaceIterator : public Malloced {
- public:
- SpaceIterator();
- explicit SpaceIterator(HeapObjectCallback size_func);
- virtual ~SpaceIterator();
-
- bool has_next();
- ObjectIterator* next();
-
- private:
- ObjectIterator* CreateIterator();
-
- int current_space_; // from enum AllocationSpace.
- ObjectIterator* iterator_; // object iterator for the current space.
- HeapObjectCallback size_func_;
-};
-
-
-// A HeapIterator provides iteration over the whole heap. It
-// aggregates the specific iterators for the different spaces as
-// these can only iterate over one space only.
-//
-// HeapIterator can skip free list nodes (that is, de-allocated heap
-// objects that still remain in the heap). As implementation of free
-// nodes filtering uses GC marks, it can't be used during MS/MC GC
-// phases. Also, it is forbidden to interrupt iteration in this mode,
-// as this will leave heap objects marked (and thus, unusable).
-class HeapObjectsFilter;
-
-class HeapIterator BASE_EMBEDDED {
- public:
- enum HeapObjectsFiltering {
- kNoFiltering,
- kFilterFreeListNodes,
- kFilterUnreachable
- };
-
- HeapIterator();
- explicit HeapIterator(HeapObjectsFiltering filtering);
- ~HeapIterator();
-
- HeapObject* next();
- void reset();
-
- private:
- // Perform the initialization.
- void Init();
- // Perform all necessary shutdown (destruction) work.
- void Shutdown();
- HeapObject* NextObject();
-
- HeapObjectsFiltering filtering_;
- HeapObjectsFilter* filter_;
- // Space iterator for iterating all the spaces.
- SpaceIterator* space_iterator_;
- // Object iterator for the space currently being iterated.
- ObjectIterator* object_iterator_;
-};
-
-
-// Cache for mapping (map, property name) into field offset.
-// Cleared at startup and prior to mark sweep collection.
-class KeyedLookupCache {
- public:
- // Lookup field offset for (map, name). If absent, -1 is returned.
- int Lookup(Map* map, String* name);
-
- // Update an element in the cache.
- void Update(Map* map, String* name, int field_offset);
-
- // Clear the cache.
- void Clear();
-
- static const int kLength = 64;
- static const int kCapacityMask = kLength - 1;
- static const int kMapHashShift = 2;
- static const int kNotFound = -1;
-
- private:
- KeyedLookupCache() {
- for (int i = 0; i < kLength; ++i) {
- keys_[i].map = NULL;
- keys_[i].name = NULL;
- field_offsets_[i] = kNotFound;
- }
- }
-
- static inline int Hash(Map* map, String* name);
-
- // Get the address of the keys and field_offsets arrays. Used in
- // generated code to perform cache lookups.
- Address keys_address() {
- return reinterpret_cast<Address>(&keys_);
- }
-
- Address field_offsets_address() {
- return reinterpret_cast<Address>(&field_offsets_);
- }
-
- struct Key {
- Map* map;
- String* name;
- };
-
- Key keys_[kLength];
- int field_offsets_[kLength];
-
- friend class ExternalReference;
- friend class Isolate;
- DISALLOW_COPY_AND_ASSIGN(KeyedLookupCache);
-};
-
-
-// Cache for mapping (array, property name) into descriptor index.
-// The cache contains both positive and negative results.
-// Descriptor index equals kNotFound means the property is absent.
-// Cleared at startup and prior to any gc.
-class DescriptorLookupCache {
- public:
- // Lookup descriptor index for (map, name).
- // If absent, kAbsent is returned.
- int Lookup(DescriptorArray* array, String* name) {
- if (!StringShape(name).IsSymbol()) return kAbsent;
- int index = Hash(array, name);
- Key& key = keys_[index];
- if ((key.array == array) && (key.name == name)) return results_[index];
- return kAbsent;
- }
-
- // Update an element in the cache.
- void Update(DescriptorArray* array, String* name, int result) {
- ASSERT(result != kAbsent);
- if (StringShape(name).IsSymbol()) {
- int index = Hash(array, name);
- Key& key = keys_[index];
- key.array = array;
- key.name = name;
- results_[index] = result;
- }
- }
-
- // Clear the cache.
- void Clear();
-
- static const int kAbsent = -2;
- private:
- DescriptorLookupCache() {
- for (int i = 0; i < kLength; ++i) {
- keys_[i].array = NULL;
- keys_[i].name = NULL;
- results_[i] = kAbsent;
- }
- }
-
- static int Hash(DescriptorArray* array, String* name) {
- // Uses only lower 32 bits if pointers are larger.
- uint32_t array_hash =
- static_cast<uint32_t>(reinterpret_cast<uintptr_t>(array)) >> 2;
- uint32_t name_hash =
- static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name)) >> 2;
- return (array_hash ^ name_hash) % kLength;
- }
-
- static const int kLength = 64;
- struct Key {
- DescriptorArray* array;
- String* name;
- };
-
- Key keys_[kLength];
- int results_[kLength];
-
- friend class Isolate;
- DISALLOW_COPY_AND_ASSIGN(DescriptorLookupCache);
-};
-
-
-// A helper class to document/test C++ scopes where we do not
-// expect a GC. Usage:
-//
-// /* Allocation not allowed: we cannot handle a GC in this scope. */
-// { AssertNoAllocation nogc;
-// ...
-// }
-
-#ifdef DEBUG
-
-class DisallowAllocationFailure {
- public:
- DisallowAllocationFailure() {
- old_state_ = HEAP->disallow_allocation_failure_;
- HEAP->disallow_allocation_failure_ = true;
- }
- ~DisallowAllocationFailure() {
- HEAP->disallow_allocation_failure_ = old_state_;
- }
- private:
- bool old_state_;
-};
-
-class AssertNoAllocation {
- public:
- AssertNoAllocation() {
- old_state_ = HEAP->allow_allocation(false);
- }
-
- ~AssertNoAllocation() {
- HEAP->allow_allocation(old_state_);
- }
-
- private:
- bool old_state_;
-};
-
-class DisableAssertNoAllocation {
- public:
- DisableAssertNoAllocation() {
- old_state_ = HEAP->allow_allocation(true);
- }
-
- ~DisableAssertNoAllocation() {
- HEAP->allow_allocation(old_state_);
- }
-
- private:
- bool old_state_;
-};
-
-#else // ndef DEBUG
-
-class AssertNoAllocation {
- public:
- AssertNoAllocation() { }
- ~AssertNoAllocation() { }
-};
-
-class DisableAssertNoAllocation {
- public:
- DisableAssertNoAllocation() { }
- ~DisableAssertNoAllocation() { }
-};
-
-#endif
-
-// GCTracer collects and prints ONE line after each garbage collector
-// invocation IFF --trace_gc is used.
-
-class GCTracer BASE_EMBEDDED {
- public:
- class Scope BASE_EMBEDDED {
- public:
- enum ScopeId {
- EXTERNAL,
- MC_MARK,
- MC_SWEEP,
- MC_SWEEP_NEWSPACE,
- MC_COMPACT,
- MC_FLUSH_CODE,
- kNumberOfScopes
- };
-
- Scope(GCTracer* tracer, ScopeId scope)
- : tracer_(tracer),
- scope_(scope) {
- start_time_ = OS::TimeCurrentMillis();
- }
-
- ~Scope() {
- ASSERT(scope_ < kNumberOfScopes); // scope_ is unsigned.
- tracer_->scopes_[scope_] += OS::TimeCurrentMillis() - start_time_;
- }
-
- private:
- GCTracer* tracer_;
- ScopeId scope_;
- double start_time_;
- };
-
- explicit GCTracer(Heap* heap);
- ~GCTracer();
-
- // Sets the collector.
- void set_collector(GarbageCollector collector) { collector_ = collector; }
-
- // Sets the GC count.
- void set_gc_count(unsigned int count) { gc_count_ = count; }
-
- // Sets the full GC count.
- void set_full_gc_count(int count) { full_gc_count_ = count; }
-
- // Sets the flag that this is a compacting full GC.
- void set_is_compacting() { is_compacting_ = true; }
- bool is_compacting() const { return is_compacting_; }
-
- // Increment and decrement the count of marked objects.
- void increment_marked_count() { ++marked_count_; }
- void decrement_marked_count() { --marked_count_; }
-
- int marked_count() { return marked_count_; }
-
- void increment_promoted_objects_size(int object_size) {
- promoted_objects_size_ += object_size;
- }
-
- private:
- // Returns a string matching the collector.
- const char* CollectorString();
-
- // Returns size of object in heap (in MB).
- double SizeOfHeapObjects() {
- return (static_cast<double>(HEAP->SizeOfObjects())) / MB;
- }
-
- double start_time_; // Timestamp set in the constructor.
- intptr_t start_size_; // Size of objects in heap set in constructor.
- GarbageCollector collector_; // Type of collector.
-
- // A count (including this one, eg, the first collection is 1) of the
- // number of garbage collections.
- unsigned int gc_count_;
-
- // A count (including this one) of the number of full garbage collections.
- int full_gc_count_;
-
- // True if the current GC is a compacting full collection, false
- // otherwise.
- bool is_compacting_;
-
- // True if the *previous* full GC cwas a compacting collection (will be
- // false if there has not been a previous full GC).
- bool previous_has_compacted_;
-
- // On a full GC, a count of the number of marked objects. Incremented
- // when an object is marked and decremented when an object's mark bit is
- // cleared. Will be zero on a scavenge collection.
- int marked_count_;
-
- // The count from the end of the previous full GC. Will be zero if there
- // was no previous full GC.
- int previous_marked_count_;
-
- // Amounts of time spent in different scopes during GC.
- double scopes_[Scope::kNumberOfScopes];
-
- // Total amount of space either wasted or contained in one of free lists
- // before the current GC.
- intptr_t in_free_list_or_wasted_before_gc_;
-
- // Difference between space used in the heap at the beginning of the current
- // collection and the end of the previous collection.
- intptr_t allocated_since_last_gc_;
-
- // Amount of time spent in mutator that is time elapsed between end of the
- // previous collection and the beginning of the current one.
- double spent_in_mutator_;
-
- // Size of objects promoted during the current collection.
- intptr_t promoted_objects_size_;
-
- Heap* heap_;
-};
-
-
-class TranscendentalCache {
- public:
- enum Type {ACOS, ASIN, ATAN, COS, EXP, LOG, SIN, TAN, kNumberOfCaches};
- static const int kTranscendentalTypeBits = 3;
- STATIC_ASSERT((1 << kTranscendentalTypeBits) >= kNumberOfCaches);
-
- // Returns a heap number with f(input), where f is a math function specified
- // by the 'type' argument.
- MUST_USE_RESULT inline MaybeObject* Get(Type type, double input);
-
- // The cache contains raw Object pointers. This method disposes of
- // them before a garbage collection.
- void Clear();
-
- private:
- class SubCache {
- static const int kCacheSize = 512;
-
- explicit SubCache(Type t);
-
- MUST_USE_RESULT inline MaybeObject* Get(double input);
-
- inline double Calculate(double input);
-
- struct Element {
- uint32_t in[2];
- Object* output;
- };
-
- union Converter {
- double dbl;
- uint32_t integers[2];
- };
-
- inline static int Hash(const Converter& c) {
- uint32_t hash = (c.integers[0] ^ c.integers[1]);
- hash ^= static_cast<int32_t>(hash) >> 16;
- hash ^= static_cast<int32_t>(hash) >> 8;
- return (hash & (kCacheSize - 1));
- }
-
- Element elements_[kCacheSize];
- Type type_;
- Isolate* isolate_;
-
- // Allow access to the caches_ array as an ExternalReference.
- friend class ExternalReference;
- // Inline implementation of the cache.
- friend class TranscendentalCacheStub;
- // For evaluating value.
- friend class TranscendentalCache;
-
- DISALLOW_COPY_AND_ASSIGN(SubCache);
- };
-
- TranscendentalCache() {
- for (int i = 0; i < kNumberOfCaches; ++i) caches_[i] = NULL;
- }
-
- // Used to create an external reference.
- inline Address cache_array_address();
-
- // Instantiation
- friend class Isolate;
- // Inline implementation of the caching.
- friend class TranscendentalCacheStub;
- // Allow access to the caches_ array as an ExternalReference.
- friend class ExternalReference;
-
- SubCache* caches_[kNumberOfCaches];
- DISALLOW_COPY_AND_ASSIGN(TranscendentalCache);
-};
-
-
-// Abstract base class for checking whether a weak object should be retained.
-class WeakObjectRetainer {
- public:
- virtual ~WeakObjectRetainer() {}
-
- // Return whether this object should be retained. If NULL is returned the
- // object has no references. Otherwise the address of the retained object
- // should be returned as in some GC situations the object has been moved.
- virtual Object* RetainAs(Object* object) = 0;
-};
-
-
-#if defined(DEBUG) || defined(LIVE_OBJECT_LIST)
-// Helper class for tracing paths to a search target Object from all roots.
-// The TracePathFrom() method can be used to trace paths from a specific
-// object to the search target object.
-class PathTracer : public ObjectVisitor {
- public:
- enum WhatToFind {
- FIND_ALL, // Will find all matches.
- FIND_FIRST // Will stop the search after first match.
- };
-
- // For the WhatToFind arg, if FIND_FIRST is specified, tracing will stop
- // after the first match. If FIND_ALL is specified, then tracing will be
- // done for all matches.
- PathTracer(Object* search_target,
- WhatToFind what_to_find,
- VisitMode visit_mode)
- : search_target_(search_target),
- found_target_(false),
- found_target_in_trace_(false),
- what_to_find_(what_to_find),
- visit_mode_(visit_mode),
- object_stack_(20),
- no_alloc() {}
-
- virtual void VisitPointers(Object** start, Object** end);
-
- void Reset();
- void TracePathFrom(Object** root);
-
- bool found() const { return found_target_; }
-
- static Object* const kAnyGlobalObject;
-
- protected:
- class MarkVisitor;
- class UnmarkVisitor;
-
- void MarkRecursively(Object** p, MarkVisitor* mark_visitor);
- void UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor);
- virtual void ProcessResults();
-
- // Tags 0, 1, and 3 are used. Use 2 for marking visited HeapObject.
- static const int kMarkTag = 2;
-
- Object* search_target_;
- bool found_target_;
- bool found_target_in_trace_;
- WhatToFind what_to_find_;
- VisitMode visit_mode_;
- List<Object*> object_stack_;
-
- AssertNoAllocation no_alloc; // i.e. no gc allowed.
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(PathTracer);
-};
-#endif // DEBUG || LIVE_OBJECT_LIST
-
-
-} } // namespace v8::internal
-
-#undef HEAP
-
-#endif // V8_HEAP_H_
diff --git a/src/3rdparty/v8/src/hydrogen-instructions.cc b/src/3rdparty/v8/src/hydrogen-instructions.cc
deleted file mode 100644
index f7adea6..0000000
--- a/src/3rdparty/v8/src/hydrogen-instructions.cc
+++ /dev/null
@@ -1,1639 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "factory.h"
-#include "hydrogen.h"
-
-#if V8_TARGET_ARCH_IA32
-#include "ia32/lithium-ia32.h"
-#elif V8_TARGET_ARCH_X64
-#include "x64/lithium-x64.h"
-#elif V8_TARGET_ARCH_ARM
-#include "arm/lithium-arm.h"
-#elif V8_TARGET_ARCH_MIPS
-#include "mips/lithium-mips.h"
-#else
-#error Unsupported target architecture.
-#endif
-
-namespace v8 {
-namespace internal {
-
-#define DEFINE_COMPILE(type) \
- LInstruction* H##type::CompileToLithium(LChunkBuilder* builder) { \
- return builder->Do##type(this); \
- }
-HYDROGEN_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
-#undef DEFINE_COMPILE
-
-
-const char* Representation::Mnemonic() const {
- switch (kind_) {
- case kNone: return "v";
- case kTagged: return "t";
- case kDouble: return "d";
- case kInteger32: return "i";
- case kExternal: return "x";
- case kNumRepresentations:
- UNREACHABLE();
- return NULL;
- }
- UNREACHABLE();
- return NULL;
-}
-
-
-static int32_t ConvertAndSetOverflow(int64_t result, bool* overflow) {
- if (result > kMaxInt) {
- *overflow = true;
- return kMaxInt;
- }
- if (result < kMinInt) {
- *overflow = true;
- return kMinInt;
- }
- return static_cast<int32_t>(result);
-}
-
-
-static int32_t AddWithoutOverflow(int32_t a, int32_t b, bool* overflow) {
- int64_t result = static_cast<int64_t>(a) + static_cast<int64_t>(b);
- return ConvertAndSetOverflow(result, overflow);
-}
-
-
-static int32_t SubWithoutOverflow(int32_t a, int32_t b, bool* overflow) {
- int64_t result = static_cast<int64_t>(a) - static_cast<int64_t>(b);
- return ConvertAndSetOverflow(result, overflow);
-}
-
-
-static int32_t MulWithoutOverflow(int32_t a, int32_t b, bool* overflow) {
- int64_t result = static_cast<int64_t>(a) * static_cast<int64_t>(b);
- return ConvertAndSetOverflow(result, overflow);
-}
-
-
-int32_t Range::Mask() const {
- if (lower_ == upper_) return lower_;
- if (lower_ >= 0) {
- int32_t res = 1;
- while (res < upper_) {
- res = (res << 1) | 1;
- }
- return res;
- }
- return 0xffffffff;
-}
-
-
-void Range::AddConstant(int32_t value) {
- if (value == 0) return;
- bool may_overflow = false; // Overflow is ignored here.
- lower_ = AddWithoutOverflow(lower_, value, &may_overflow);
- upper_ = AddWithoutOverflow(upper_, value, &may_overflow);
- Verify();
-}
-
-
-void Range::Intersect(Range* other) {
- upper_ = Min(upper_, other->upper_);
- lower_ = Max(lower_, other->lower_);
- bool b = CanBeMinusZero() && other->CanBeMinusZero();
- set_can_be_minus_zero(b);
-}
-
-
-void Range::Union(Range* other) {
- upper_ = Max(upper_, other->upper_);
- lower_ = Min(lower_, other->lower_);
- bool b = CanBeMinusZero() || other->CanBeMinusZero();
- set_can_be_minus_zero(b);
-}
-
-
-void Range::Sar(int32_t value) {
- int32_t bits = value & 0x1F;
- lower_ = lower_ >> bits;
- upper_ = upper_ >> bits;
- set_can_be_minus_zero(false);
-}
-
-
-void Range::Shl(int32_t value) {
- int32_t bits = value & 0x1F;
- int old_lower = lower_;
- int old_upper = upper_;
- lower_ = lower_ << bits;
- upper_ = upper_ << bits;
- if (old_lower != lower_ >> bits || old_upper != upper_ >> bits) {
- upper_ = kMaxInt;
- lower_ = kMinInt;
- }
- set_can_be_minus_zero(false);
-}
-
-
-bool Range::AddAndCheckOverflow(Range* other) {
- bool may_overflow = false;
- lower_ = AddWithoutOverflow(lower_, other->lower(), &may_overflow);
- upper_ = AddWithoutOverflow(upper_, other->upper(), &may_overflow);
- KeepOrder();
- Verify();
- return may_overflow;
-}
-
-
-bool Range::SubAndCheckOverflow(Range* other) {
- bool may_overflow = false;
- lower_ = SubWithoutOverflow(lower_, other->upper(), &may_overflow);
- upper_ = SubWithoutOverflow(upper_, other->lower(), &may_overflow);
- KeepOrder();
- Verify();
- return may_overflow;
-}
-
-
-void Range::KeepOrder() {
- if (lower_ > upper_) {
- int32_t tmp = lower_;
- lower_ = upper_;
- upper_ = tmp;
- }
-}
-
-
-void Range::Verify() const {
- ASSERT(lower_ <= upper_);
-}
-
-
-bool Range::MulAndCheckOverflow(Range* other) {
- bool may_overflow = false;
- int v1 = MulWithoutOverflow(lower_, other->lower(), &may_overflow);
- int v2 = MulWithoutOverflow(lower_, other->upper(), &may_overflow);
- int v3 = MulWithoutOverflow(upper_, other->lower(), &may_overflow);
- int v4 = MulWithoutOverflow(upper_, other->upper(), &may_overflow);
- lower_ = Min(Min(v1, v2), Min(v3, v4));
- upper_ = Max(Max(v1, v2), Max(v3, v4));
- Verify();
- return may_overflow;
-}
-
-
-const char* HType::ToString() {
- switch (type_) {
- case kTagged: return "tagged";
- case kTaggedPrimitive: return "primitive";
- case kTaggedNumber: return "number";
- case kSmi: return "smi";
- case kHeapNumber: return "heap-number";
- case kString: return "string";
- case kBoolean: return "boolean";
- case kNonPrimitive: return "non-primitive";
- case kJSArray: return "array";
- case kJSObject: return "object";
- case kUninitialized: return "uninitialized";
- }
- UNREACHABLE();
- return "Unreachable code";
-}
-
-
-const char* HType::ToShortString() {
- switch (type_) {
- case kTagged: return "t";
- case kTaggedPrimitive: return "p";
- case kTaggedNumber: return "n";
- case kSmi: return "m";
- case kHeapNumber: return "h";
- case kString: return "s";
- case kBoolean: return "b";
- case kNonPrimitive: return "r";
- case kJSArray: return "a";
- case kJSObject: return "o";
- case kUninitialized: return "z";
- }
- UNREACHABLE();
- return "Unreachable code";
-}
-
-
-HType HType::TypeFromValue(Handle<Object> value) {
- HType result = HType::Tagged();
- if (value->IsSmi()) {
- result = HType::Smi();
- } else if (value->IsHeapNumber()) {
- result = HType::HeapNumber();
- } else if (value->IsString()) {
- result = HType::String();
- } else if (value->IsBoolean()) {
- result = HType::Boolean();
- } else if (value->IsJSObject()) {
- result = HType::JSObject();
- } else if (value->IsJSArray()) {
- result = HType::JSArray();
- }
- return result;
-}
-
-
-int HValue::LookupOperandIndex(int occurrence_index, HValue* op) {
- for (int i = 0; i < OperandCount(); ++i) {
- if (OperandAt(i) == op) {
- if (occurrence_index == 0) return i;
- --occurrence_index;
- }
- }
- return -1;
-}
-
-
-bool HValue::IsDefinedAfter(HBasicBlock* other) const {
- return block()->block_id() > other->block_id();
-}
-
-
-bool HValue::UsesMultipleTimes(HValue* op) {
- bool seen = false;
- for (int i = 0; i < OperandCount(); ++i) {
- if (OperandAt(i) == op) {
- if (seen) return true;
- seen = true;
- }
- }
- return false;
-}
-
-
-bool HValue::Equals(HValue* other) {
- if (other->opcode() != opcode()) return false;
- if (!other->representation().Equals(representation())) return false;
- if (!other->type_.Equals(type_)) return false;
- if (other->flags() != flags()) return false;
- if (OperandCount() != other->OperandCount()) return false;
- for (int i = 0; i < OperandCount(); ++i) {
- if (OperandAt(i)->id() != other->OperandAt(i)->id()) return false;
- }
- bool result = DataEquals(other);
- ASSERT(!result || Hashcode() == other->Hashcode());
- return result;
-}
-
-
-intptr_t HValue::Hashcode() {
- intptr_t result = opcode();
- int count = OperandCount();
- for (int i = 0; i < count; ++i) {
- result = result * 19 + OperandAt(i)->id() + (result >> 7);
- }
- return result;
-}
-
-
-void HValue::SetOperandAt(int index, HValue* value) {
- ASSERT(value == NULL || !value->representation().IsNone());
- RegisterUse(index, value);
- InternalSetOperandAt(index, value);
-}
-
-
-void HValue::ReplaceAndDelete(HValue* other) {
- if (other != NULL) ReplaceValue(other);
- Delete();
-}
-
-
-void HValue::ReplaceValue(HValue* other) {
- for (int i = 0; i < uses_.length(); ++i) {
- HValue* use = uses_[i];
- ASSERT(!use->block()->IsStartBlock());
- InternalReplaceAtUse(use, other);
- other->uses_.Add(use);
- }
- uses_.Rewind(0);
-}
-
-
-void HValue::ClearOperands() {
- for (int i = 0; i < OperandCount(); ++i) {
- SetOperandAt(i, NULL);
- }
-}
-
-
-void HValue::Delete() {
- ASSERT(HasNoUses());
- ClearOperands();
- DeleteFromGraph();
-}
-
-
-void HValue::ReplaceAtUse(HValue* use, HValue* other) {
- for (int i = 0; i < use->OperandCount(); ++i) {
- if (use->OperandAt(i) == this) {
- use->SetOperandAt(i, other);
- }
- }
-}
-
-
-void HValue::ReplaceFirstAtUse(HValue* use, HValue* other, Representation r) {
- for (int i = 0; i < use->OperandCount(); ++i) {
- if (use->RequiredInputRepresentation(i).Equals(r) &&
- use->OperandAt(i) == this) {
- use->SetOperandAt(i, other);
- return;
- }
- }
-}
-
-
-void HValue::InternalReplaceAtUse(HValue* use, HValue* other) {
- for (int i = 0; i < use->OperandCount(); ++i) {
- if (use->OperandAt(i) == this) {
- // Call internal method that does not update use lists. The caller is
- // responsible for doing so.
- use->InternalSetOperandAt(i, other);
- }
- }
-}
-
-
-void HValue::SetBlock(HBasicBlock* block) {
- ASSERT(block_ == NULL || block == NULL);
- block_ = block;
- if (id_ == kNoNumber && block != NULL) {
- id_ = block->graph()->GetNextValueID(this);
- }
-}
-
-
-void HValue::PrintTypeTo(HType type, StringStream* stream) {
- stream->Add(type.ToShortString());
-}
-
-
-void HValue::PrintNameTo(StringStream* stream) {
- stream->Add("%s%d", representation_.Mnemonic(), id());
-}
-
-
-bool HValue::UpdateInferredType() {
- HType type = CalculateInferredType();
- bool result = (!type.Equals(type_));
- type_ = type;
- return result;
-}
-
-
-void HValue::RegisterUse(int index, HValue* new_value) {
- HValue* old_value = OperandAt(index);
- if (old_value == new_value) return;
- if (old_value != NULL) old_value->uses_.RemoveElement(this);
- if (new_value != NULL) {
- new_value->uses_.Add(this);
- }
-}
-
-
-void HValue::AddNewRange(Range* r) {
- if (!HasRange()) ComputeInitialRange();
- if (!HasRange()) range_ = new Range();
- ASSERT(HasRange());
- r->StackUpon(range_);
- range_ = r;
-}
-
-
-void HValue::RemoveLastAddedRange() {
- ASSERT(HasRange());
- ASSERT(range_->next() != NULL);
- range_ = range_->next();
-}
-
-
-void HValue::ComputeInitialRange() {
- ASSERT(!HasRange());
- range_ = InferRange();
- ASSERT(HasRange());
-}
-
-
-void HInstruction::PrintTo(StringStream* stream) {
- stream->Add("%s", Mnemonic());
- if (HasSideEffects()) stream->Add("*");
- stream->Add(" ");
- PrintDataTo(stream);
-
- if (range() != NULL &&
- !range()->IsMostGeneric() &&
- !range()->CanBeMinusZero()) {
- stream->Add(" range[%d,%d,m0=%d]",
- range()->lower(),
- range()->upper(),
- static_cast<int>(range()->CanBeMinusZero()));
- }
-
- int changes_flags = (flags() & HValue::ChangesFlagsMask());
- if (changes_flags != 0) {
- stream->Add(" changes[0x%x]", changes_flags);
- }
-
- if (representation().IsTagged() && !type().Equals(HType::Tagged())) {
- stream->Add(" type[%s]", type().ToString());
- }
-}
-
-
-void HInstruction::Unlink() {
- ASSERT(IsLinked());
- ASSERT(!IsControlInstruction()); // Must never move control instructions.
- ASSERT(!IsBlockEntry()); // Doesn't make sense to delete these.
- ASSERT(previous_ != NULL);
- previous_->next_ = next_;
- if (next_ == NULL) {
- ASSERT(block()->last() == this);
- block()->set_last(previous_);
- } else {
- next_->previous_ = previous_;
- }
- clear_block();
-}
-
-
-void HInstruction::InsertBefore(HInstruction* next) {
- ASSERT(!IsLinked());
- ASSERT(!next->IsBlockEntry());
- ASSERT(!IsControlInstruction());
- ASSERT(!next->block()->IsStartBlock());
- ASSERT(next->previous_ != NULL);
- HInstruction* prev = next->previous();
- prev->next_ = this;
- next->previous_ = this;
- next_ = next;
- previous_ = prev;
- SetBlock(next->block());
-}
-
-
-void HInstruction::InsertAfter(HInstruction* previous) {
- ASSERT(!IsLinked());
- ASSERT(!previous->IsControlInstruction());
- ASSERT(!IsControlInstruction() || previous->next_ == NULL);
- HBasicBlock* block = previous->block();
- // Never insert anything except constants into the start block after finishing
- // it.
- if (block->IsStartBlock() && block->IsFinished() && !IsConstant()) {
- ASSERT(block->end()->SecondSuccessor() == NULL);
- InsertAfter(block->end()->FirstSuccessor()->first());
- return;
- }
-
- // If we're inserting after an instruction with side-effects that is
- // followed by a simulate instruction, we need to insert after the
- // simulate instruction instead.
- HInstruction* next = previous->next_;
- if (previous->HasSideEffects() && next != NULL) {
- ASSERT(next->IsSimulate());
- previous = next;
- next = previous->next_;
- }
-
- previous_ = previous;
- next_ = next;
- SetBlock(block);
- previous->next_ = this;
- if (next != NULL) next->previous_ = this;
-}
-
-
-#ifdef DEBUG
-void HInstruction::Verify() {
- // Verify that input operands are defined before use.
- HBasicBlock* cur_block = block();
- for (int i = 0; i < OperandCount(); ++i) {
- HValue* other_operand = OperandAt(i);
- HBasicBlock* other_block = other_operand->block();
- if (cur_block == other_block) {
- if (!other_operand->IsPhi()) {
- HInstruction* cur = cur_block->first();
- while (cur != NULL) {
- ASSERT(cur != this); // We should reach other_operand before!
- if (cur == other_operand) break;
- cur = cur->next();
- }
- // Must reach other operand in the same block!
- ASSERT(cur == other_operand);
- }
- } else {
- ASSERT(other_block->Dominates(cur_block));
- }
- }
-
- // Verify that instructions that may have side-effects are followed
- // by a simulate instruction.
- if (HasSideEffects() && !IsOsrEntry()) {
- ASSERT(next()->IsSimulate());
- }
-
- // Verify that instructions that can be eliminated by GVN have overridden
- // HValue::DataEquals. The default implementation is UNREACHABLE. We
- // don't actually care whether DataEquals returns true or false here.
- if (CheckFlag(kUseGVN)) DataEquals(this);
-}
-#endif
-
-
-void HUnaryCall::PrintDataTo(StringStream* stream) {
- value()->PrintNameTo(stream);
- stream->Add(" ");
- stream->Add("#%d", argument_count());
-}
-
-
-void HBinaryCall::PrintDataTo(StringStream* stream) {
- first()->PrintNameTo(stream);
- stream->Add(" ");
- second()->PrintNameTo(stream);
- stream->Add(" ");
- stream->Add("#%d", argument_count());
-}
-
-
-void HCallConstantFunction::PrintDataTo(StringStream* stream) {
- if (IsApplyFunction()) {
- stream->Add("optimized apply ");
- } else {
- stream->Add("%o ", function()->shared()->DebugName());
- }
- stream->Add("#%d", argument_count());
-}
-
-
-void HCallNamed::PrintDataTo(StringStream* stream) {
- stream->Add("%o ", *name());
- HUnaryCall::PrintDataTo(stream);
-}
-
-
-void HCallGlobal::PrintDataTo(StringStream* stream) {
- stream->Add("%o ", *name());
- HUnaryCall::PrintDataTo(stream);
-}
-
-
-void HCallKnownGlobal::PrintDataTo(StringStream* stream) {
- stream->Add("o ", target()->shared()->DebugName());
- stream->Add("#%d", argument_count());
-}
-
-
-void HCallRuntime::PrintDataTo(StringStream* stream) {
- stream->Add("%o ", *name());
- stream->Add("#%d", argument_count());
-}
-
-
-void HClassOfTest::PrintDataTo(StringStream* stream) {
- stream->Add("class_of_test(");
- value()->PrintNameTo(stream);
- stream->Add(", \"%o\")", *class_name());
-}
-
-
-void HAccessArgumentsAt::PrintDataTo(StringStream* stream) {
- arguments()->PrintNameTo(stream);
- stream->Add("[");
- index()->PrintNameTo(stream);
- stream->Add("], length ");
- length()->PrintNameTo(stream);
-}
-
-
-void HControlInstruction::PrintDataTo(StringStream* stream) {
- if (FirstSuccessor() != NULL) {
- int first_id = FirstSuccessor()->block_id();
- if (SecondSuccessor() == NULL) {
- stream->Add(" B%d", first_id);
- } else {
- int second_id = SecondSuccessor()->block_id();
- stream->Add(" goto (B%d, B%d)", first_id, second_id);
- }
- }
-}
-
-
-void HUnaryControlInstruction::PrintDataTo(StringStream* stream) {
- value()->PrintNameTo(stream);
- HControlInstruction::PrintDataTo(stream);
-}
-
-
-void HCompareMap::PrintDataTo(StringStream* stream) {
- value()->PrintNameTo(stream);
- stream->Add(" (%p)", *map());
- HControlInstruction::PrintDataTo(stream);
-}
-
-
-const char* HUnaryMathOperation::OpName() const {
- switch (op()) {
- case kMathFloor: return "floor";
- case kMathRound: return "round";
- case kMathCeil: return "ceil";
- case kMathAbs: return "abs";
- case kMathLog: return "log";
- case kMathSin: return "sin";
- case kMathCos: return "cos";
- case kMathTan: return "tan";
- case kMathASin: return "asin";
- case kMathACos: return "acos";
- case kMathATan: return "atan";
- case kMathExp: return "exp";
- case kMathSqrt: return "sqrt";
- default: break;
- }
- return "(unknown operation)";
-}
-
-
-void HUnaryMathOperation::PrintDataTo(StringStream* stream) {
- const char* name = OpName();
- stream->Add("%s ", name);
- value()->PrintNameTo(stream);
-}
-
-
-void HUnaryOperation::PrintDataTo(StringStream* stream) {
- value()->PrintNameTo(stream);
-}
-
-
-void HHasInstanceType::PrintDataTo(StringStream* stream) {
- value()->PrintNameTo(stream);
- switch (from_) {
- case FIRST_JS_OBJECT_TYPE:
- if (to_ == LAST_TYPE) stream->Add(" spec_object");
- break;
- case JS_REGEXP_TYPE:
- if (to_ == JS_REGEXP_TYPE) stream->Add(" reg_exp");
- break;
- case JS_ARRAY_TYPE:
- if (to_ == JS_ARRAY_TYPE) stream->Add(" array");
- break;
- case JS_FUNCTION_TYPE:
- if (to_ == JS_FUNCTION_TYPE) stream->Add(" function");
- break;
- default:
- break;
- }
-}
-
-
-void HTypeofIs::PrintDataTo(StringStream* stream) {
- value()->PrintNameTo(stream);
- stream->Add(" == ");
- stream->Add(type_literal_->ToAsciiVector());
-}
-
-
-void HChange::PrintDataTo(StringStream* stream) {
- HUnaryOperation::PrintDataTo(stream);
- stream->Add(" %s to %s", from_.Mnemonic(), to().Mnemonic());
-
- if (CanTruncateToInt32()) stream->Add(" truncating-int32");
- if (CheckFlag(kBailoutOnMinusZero)) stream->Add(" -0?");
-}
-
-
-HCheckInstanceType* HCheckInstanceType::NewIsJSObjectOrJSFunction(
- HValue* value) {
- STATIC_ASSERT((LAST_JS_OBJECT_TYPE + 1) == JS_FUNCTION_TYPE);
- return new HCheckInstanceType(value, FIRST_JS_OBJECT_TYPE, JS_FUNCTION_TYPE);
-}
-
-
-void HCheckMap::PrintDataTo(StringStream* stream) {
- value()->PrintNameTo(stream);
- stream->Add(" %p", *map());
-}
-
-
-void HCheckFunction::PrintDataTo(StringStream* stream) {
- value()->PrintNameTo(stream);
- stream->Add(" %p", *target());
-}
-
-
-void HCallStub::PrintDataTo(StringStream* stream) {
- stream->Add("%s ",
- CodeStub::MajorName(major_key_, false));
- HUnaryCall::PrintDataTo(stream);
-}
-
-
-void HInstanceOf::PrintDataTo(StringStream* stream) {
- left()->PrintNameTo(stream);
- stream->Add(" ");
- right()->PrintNameTo(stream);
- stream->Add(" ");
- context()->PrintNameTo(stream);
-}
-
-
-Range* HValue::InferRange() {
- if (representation().IsTagged()) {
- // Tagged values are always in int32 range when converted to integer,
- // but they can contain -0.
- Range* result = new Range();
- result->set_can_be_minus_zero(true);
- return result;
- } else if (representation().IsNone()) {
- return NULL;
- } else {
- // Untagged integer32 cannot be -0 and we don't compute ranges for
- // untagged doubles.
- return new Range();
- }
-}
-
-
-Range* HConstant::InferRange() {
- if (has_int32_value_) {
- Range* result = new Range(int32_value_, int32_value_);
- result->set_can_be_minus_zero(false);
- return result;
- }
- return HValue::InferRange();
-}
-
-
-Range* HPhi::InferRange() {
- if (representation().IsInteger32()) {
- if (block()->IsLoopHeader()) {
- Range* range = new Range(kMinInt, kMaxInt);
- return range;
- } else {
- Range* range = OperandAt(0)->range()->Copy();
- for (int i = 1; i < OperandCount(); ++i) {
- range->Union(OperandAt(i)->range());
- }
- return range;
- }
- } else {
- return HValue::InferRange();
- }
-}
-
-
-Range* HAdd::InferRange() {
- if (representation().IsInteger32()) {
- Range* a = left()->range();
- Range* b = right()->range();
- Range* res = a->Copy();
- if (!res->AddAndCheckOverflow(b)) {
- ClearFlag(kCanOverflow);
- }
- bool m0 = a->CanBeMinusZero() && b->CanBeMinusZero();
- res->set_can_be_minus_zero(m0);
- return res;
- } else {
- return HValue::InferRange();
- }
-}
-
-
-Range* HSub::InferRange() {
- if (representation().IsInteger32()) {
- Range* a = left()->range();
- Range* b = right()->range();
- Range* res = a->Copy();
- if (!res->SubAndCheckOverflow(b)) {
- ClearFlag(kCanOverflow);
- }
- res->set_can_be_minus_zero(a->CanBeMinusZero() && b->CanBeZero());
- return res;
- } else {
- return HValue::InferRange();
- }
-}
-
-
-Range* HMul::InferRange() {
- if (representation().IsInteger32()) {
- Range* a = left()->range();
- Range* b = right()->range();
- Range* res = a->Copy();
- if (!res->MulAndCheckOverflow(b)) {
- ClearFlag(kCanOverflow);
- }
- bool m0 = (a->CanBeZero() && b->CanBeNegative()) ||
- (a->CanBeNegative() && b->CanBeZero());
- res->set_can_be_minus_zero(m0);
- return res;
- } else {
- return HValue::InferRange();
- }
-}
-
-
-Range* HDiv::InferRange() {
- if (representation().IsInteger32()) {
- Range* result = new Range();
- if (left()->range()->CanBeMinusZero()) {
- result->set_can_be_minus_zero(true);
- }
-
- if (left()->range()->CanBeZero() && right()->range()->CanBeNegative()) {
- result->set_can_be_minus_zero(true);
- }
-
- if (right()->range()->Includes(-1) && left()->range()->Includes(kMinInt)) {
- SetFlag(HValue::kCanOverflow);
- }
-
- if (!right()->range()->CanBeZero()) {
- ClearFlag(HValue::kCanBeDivByZero);
- }
- return result;
- } else {
- return HValue::InferRange();
- }
-}
-
-
-Range* HMod::InferRange() {
- if (representation().IsInteger32()) {
- Range* a = left()->range();
- Range* result = new Range();
- if (a->CanBeMinusZero() || a->CanBeNegative()) {
- result->set_can_be_minus_zero(true);
- }
- if (!right()->range()->CanBeZero()) {
- ClearFlag(HValue::kCanBeDivByZero);
- }
- return result;
- } else {
- return HValue::InferRange();
- }
-}
-
-
-void HPhi::PrintTo(StringStream* stream) {
- stream->Add("[");
- for (int i = 0; i < OperandCount(); ++i) {
- HValue* value = OperandAt(i);
- stream->Add(" ");
- value->PrintNameTo(stream);
- stream->Add(" ");
- }
- stream->Add(" uses%d_%di_%dd_%dt]",
- uses()->length(),
- int32_non_phi_uses() + int32_indirect_uses(),
- double_non_phi_uses() + double_indirect_uses(),
- tagged_non_phi_uses() + tagged_indirect_uses());
-}
-
-
-void HPhi::AddInput(HValue* value) {
- inputs_.Add(NULL);
- SetOperandAt(OperandCount() - 1, value);
- // Mark phis that may have 'arguments' directly or indirectly as an operand.
- if (!CheckFlag(kIsArguments) && value->CheckFlag(kIsArguments)) {
- SetFlag(kIsArguments);
- }
-}
-
-
-bool HPhi::HasRealUses() {
- for (int i = 0; i < uses()->length(); i++) {
- if (!uses()->at(i)->IsPhi()) return true;
- }
- return false;
-}
-
-
-HValue* HPhi::GetRedundantReplacement() {
- HValue* candidate = NULL;
- int count = OperandCount();
- int position = 0;
- while (position < count && candidate == NULL) {
- HValue* current = OperandAt(position++);
- if (current != this) candidate = current;
- }
- while (position < count) {
- HValue* current = OperandAt(position++);
- if (current != this && current != candidate) return NULL;
- }
- ASSERT(candidate != this);
- return candidate;
-}
-
-
-void HPhi::DeleteFromGraph() {
- ASSERT(block() != NULL);
- block()->RemovePhi(this);
- ASSERT(block() == NULL);
-}
-
-
-void HPhi::InitRealUses(int phi_id) {
- // Initialize real uses.
- phi_id_ = phi_id;
- for (int j = 0; j < uses()->length(); j++) {
- HValue* use = uses()->at(j);
- if (!use->IsPhi()) {
- int index = use->LookupOperandIndex(0, this);
- Representation req_rep = use->RequiredInputRepresentation(index);
- non_phi_uses_[req_rep.kind()]++;
- }
- }
-}
-
-
-void HPhi::AddNonPhiUsesFrom(HPhi* other) {
- for (int i = 0; i < Representation::kNumRepresentations; i++) {
- indirect_uses_[i] += other->non_phi_uses_[i];
- }
-}
-
-
-void HPhi::AddIndirectUsesTo(int* dest) {
- for (int i = 0; i < Representation::kNumRepresentations; i++) {
- dest[i] += indirect_uses_[i];
- }
-}
-
-
-void HSimulate::PrintDataTo(StringStream* stream) {
- stream->Add("id=%d ", ast_id());
- if (pop_count_ > 0) stream->Add("pop %d", pop_count_);
- if (values_.length() > 0) {
- if (pop_count_ > 0) stream->Add(" /");
- for (int i = 0; i < values_.length(); ++i) {
- if (!HasAssignedIndexAt(i)) {
- stream->Add(" push ");
- } else {
- stream->Add(" var[%d] = ", GetAssignedIndexAt(i));
- }
- values_[i]->PrintNameTo(stream);
- }
- }
-}
-
-
-void HEnterInlined::PrintDataTo(StringStream* stream) {
- SmartPointer<char> name = function()->debug_name()->ToCString();
- stream->Add("%s, id=%d", *name, function()->id());
-}
-
-
-HConstant::HConstant(Handle<Object> handle, Representation r)
- : handle_(handle),
- constant_type_(HType::TypeFromValue(handle)),
- has_int32_value_(false),
- int32_value_(0),
- has_double_value_(false),
- double_value_(0) {
- set_representation(r);
- SetFlag(kUseGVN);
- if (handle_->IsNumber()) {
- double n = handle_->Number();
- double roundtrip_value = static_cast<double>(static_cast<int32_t>(n));
- has_int32_value_ = BitCast<int64_t>(roundtrip_value) == BitCast<int64_t>(n);
- if (has_int32_value_) int32_value_ = static_cast<int32_t>(n);
- double_value_ = n;
- has_double_value_ = true;
- }
-}
-
-
-HConstant* HConstant::CopyToRepresentation(Representation r) const {
- if (r.IsInteger32() && !has_int32_value_) return NULL;
- if (r.IsDouble() && !has_double_value_) return NULL;
- return new HConstant(handle_, r);
-}
-
-
-HConstant* HConstant::CopyToTruncatedInt32() const {
- if (!has_double_value_) return NULL;
- int32_t truncated = NumberToInt32(*handle_);
- return new HConstant(FACTORY->NewNumberFromInt(truncated),
- Representation::Integer32());
-}
-
-
-void HConstant::PrintDataTo(StringStream* stream) {
- handle()->ShortPrint(stream);
-}
-
-
-bool HArrayLiteral::IsCopyOnWrite() const {
- return constant_elements()->map() == HEAP->fixed_cow_array_map();
-}
-
-
-void HBinaryOperation::PrintDataTo(StringStream* stream) {
- left()->PrintNameTo(stream);
- stream->Add(" ");
- right()->PrintNameTo(stream);
- if (CheckFlag(kCanOverflow)) stream->Add(" !");
- if (CheckFlag(kBailoutOnMinusZero)) stream->Add(" -0?");
-}
-
-
-Range* HBitAnd::InferRange() {
- int32_t left_mask = (left()->range() != NULL)
- ? left()->range()->Mask()
- : 0xffffffff;
- int32_t right_mask = (right()->range() != NULL)
- ? right()->range()->Mask()
- : 0xffffffff;
- int32_t result_mask = left_mask & right_mask;
- return (result_mask >= 0)
- ? new Range(0, result_mask)
- : HValue::InferRange();
-}
-
-
-Range* HBitOr::InferRange() {
- int32_t left_mask = (left()->range() != NULL)
- ? left()->range()->Mask()
- : 0xffffffff;
- int32_t right_mask = (right()->range() != NULL)
- ? right()->range()->Mask()
- : 0xffffffff;
- int32_t result_mask = left_mask | right_mask;
- return (result_mask >= 0)
- ? new Range(0, result_mask)
- : HValue::InferRange();
-}
-
-
-Range* HSar::InferRange() {
- if (right()->IsConstant()) {
- HConstant* c = HConstant::cast(right());
- if (c->HasInteger32Value()) {
- Range* result = (left()->range() != NULL)
- ? left()->range()->Copy()
- : new Range();
- result->Sar(c->Integer32Value());
- return result;
- }
- }
- return HValue::InferRange();
-}
-
-
-Range* HShl::InferRange() {
- if (right()->IsConstant()) {
- HConstant* c = HConstant::cast(right());
- if (c->HasInteger32Value()) {
- Range* result = (left()->range() != NULL)
- ? left()->range()->Copy()
- : new Range();
- result->Shl(c->Integer32Value());
- return result;
- }
- }
- return HValue::InferRange();
-}
-
-
-
-void HCompare::PrintDataTo(StringStream* stream) {
- stream->Add(Token::Name(token()));
- stream->Add(" ");
- HBinaryOperation::PrintDataTo(stream);
-}
-
-
-void HCompare::SetInputRepresentation(Representation r) {
- input_representation_ = r;
- if (r.IsTagged()) {
- SetAllSideEffects();
- ClearFlag(kUseGVN);
- } else {
- ClearAllSideEffects();
- SetFlag(kUseGVN);
- }
-}
-
-
-void HParameter::PrintDataTo(StringStream* stream) {
- stream->Add("%u", index());
-}
-
-
-void HLoadNamedField::PrintDataTo(StringStream* stream) {
- object()->PrintNameTo(stream);
- stream->Add(" @%d%s", offset(), is_in_object() ? "[in-object]" : "");
-}
-
-
-HLoadNamedFieldPolymorphic::HLoadNamedFieldPolymorphic(HValue* object,
- ZoneMapList* types,
- Handle<String> name)
- : HUnaryOperation(object),
- types_(Min(types->length(), kMaxLoadPolymorphism)),
- name_(name),
- need_generic_(false) {
- set_representation(Representation::Tagged());
- SetFlag(kDependsOnMaps);
- for (int i = 0;
- i < types->length() && types_.length() < kMaxLoadPolymorphism;
- ++i) {
- Handle<Map> map = types->at(i);
- LookupResult lookup;
- map->LookupInDescriptors(NULL, *name, &lookup);
- if (lookup.IsProperty() && lookup.type() == FIELD) {
- types_.Add(types->at(i));
- int index = lookup.GetLocalFieldIndexFromMap(*map);
- if (index < 0) {
- SetFlag(kDependsOnInobjectFields);
- } else {
- SetFlag(kDependsOnBackingStoreFields);
- }
- }
- }
-
- if (types_.length() == types->length() && FLAG_deoptimize_uncommon_cases) {
- SetFlag(kUseGVN);
- } else {
- SetAllSideEffects();
- need_generic_ = true;
- }
-}
-
-
-bool HLoadNamedFieldPolymorphic::DataEquals(HValue* value) {
- HLoadNamedFieldPolymorphic* other = HLoadNamedFieldPolymorphic::cast(value);
- if (types_.length() != other->types()->length()) return false;
- if (!name_.is_identical_to(other->name())) return false;
- if (need_generic_ != other->need_generic_) return false;
- for (int i = 0; i < types_.length(); i++) {
- bool found = false;
- for (int j = 0; j < types_.length(); j++) {
- if (types_.at(j).is_identical_to(other->types()->at(i))) {
- found = true;
- break;
- }
- }
- if (!found) return false;
- }
- return true;
-}
-
-
-void HLoadKeyedFastElement::PrintDataTo(StringStream* stream) {
- object()->PrintNameTo(stream);
- stream->Add("[");
- key()->PrintNameTo(stream);
- stream->Add("]");
-}
-
-
-void HLoadKeyedGeneric::PrintDataTo(StringStream* stream) {
- object()->PrintNameTo(stream);
- stream->Add("[");
- key()->PrintNameTo(stream);
- stream->Add("]");
-}
-
-
-void HLoadKeyedSpecializedArrayElement::PrintDataTo(
- StringStream* stream) {
- external_pointer()->PrintNameTo(stream);
- stream->Add(".");
- switch (array_type()) {
- case kExternalByteArray:
- stream->Add("byte");
- break;
- case kExternalUnsignedByteArray:
- stream->Add("u_byte");
- break;
- case kExternalShortArray:
- stream->Add("short");
- break;
- case kExternalUnsignedShortArray:
- stream->Add("u_short");
- break;
- case kExternalIntArray:
- stream->Add("int");
- break;
- case kExternalUnsignedIntArray:
- stream->Add("u_int");
- break;
- case kExternalFloatArray:
- stream->Add("float");
- break;
- case kExternalPixelArray:
- stream->Add("pixel");
- break;
- }
- stream->Add("[");
- key()->PrintNameTo(stream);
- stream->Add("]");
-}
-
-
-void HStoreNamedGeneric::PrintDataTo(StringStream* stream) {
- object()->PrintNameTo(stream);
- stream->Add(".");
- ASSERT(name()->IsString());
- stream->Add(*String::cast(*name())->ToCString());
- stream->Add(" = ");
- value()->PrintNameTo(stream);
-}
-
-
-void HStoreNamedField::PrintDataTo(StringStream* stream) {
- object()->PrintNameTo(stream);
- stream->Add(".");
- ASSERT(name()->IsString());
- stream->Add(*String::cast(*name())->ToCString());
- stream->Add(" = ");
- value()->PrintNameTo(stream);
- if (!transition().is_null()) {
- stream->Add(" (transition map %p)", *transition());
- }
-}
-
-
-void HStoreKeyedFastElement::PrintDataTo(StringStream* stream) {
- object()->PrintNameTo(stream);
- stream->Add("[");
- key()->PrintNameTo(stream);
- stream->Add("] = ");
- value()->PrintNameTo(stream);
-}
-
-
-void HStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
- object()->PrintNameTo(stream);
- stream->Add("[");
- key()->PrintNameTo(stream);
- stream->Add("] = ");
- value()->PrintNameTo(stream);
-}
-
-
-void HStoreKeyedSpecializedArrayElement::PrintDataTo(
- StringStream* stream) {
- external_pointer()->PrintNameTo(stream);
- stream->Add(".");
- switch (array_type()) {
- case kExternalByteArray:
- stream->Add("byte");
- break;
- case kExternalUnsignedByteArray:
- stream->Add("u_byte");
- break;
- case kExternalShortArray:
- stream->Add("short");
- break;
- case kExternalUnsignedShortArray:
- stream->Add("u_short");
- break;
- case kExternalIntArray:
- stream->Add("int");
- break;
- case kExternalUnsignedIntArray:
- stream->Add("u_int");
- break;
- case kExternalFloatArray:
- stream->Add("float");
- break;
- case kExternalPixelArray:
- stream->Add("pixel");
- break;
- }
- stream->Add("[");
- key()->PrintNameTo(stream);
- stream->Add("] = ");
- value()->PrintNameTo(stream);
-}
-
-
-void HLoadGlobalCell::PrintDataTo(StringStream* stream) {
- stream->Add("[%p]", *cell());
- if (check_hole_value()) stream->Add(" (deleteable/read-only)");
-}
-
-
-void HLoadGlobalGeneric::PrintDataTo(StringStream* stream) {
- stream->Add("%o ", *name());
-}
-
-
-void HStoreGlobalCell::PrintDataTo(StringStream* stream) {
- stream->Add("[%p] = ", *cell());
- value()->PrintNameTo(stream);
-}
-
-
-void HStoreGlobalGeneric::PrintDataTo(StringStream* stream) {
- stream->Add("%o = ", *name());
- value()->PrintNameTo(stream);
-}
-
-
-void HLoadContextSlot::PrintDataTo(StringStream* stream) {
- value()->PrintNameTo(stream);
- stream->Add("[%d]", slot_index());
-}
-
-
-void HStoreContextSlot::PrintDataTo(StringStream* stream) {
- context()->PrintNameTo(stream);
- stream->Add("[%d] = ", slot_index());
- value()->PrintNameTo(stream);
-}
-
-
-// Implementation of type inference and type conversions. Calculates
-// the inferred type of this instruction based on the input operands.
-
-HType HValue::CalculateInferredType() {
- return type_;
-}
-
-
-HType HCheckMap::CalculateInferredType() {
- return value()->type();
-}
-
-
-HType HCheckFunction::CalculateInferredType() {
- return value()->type();
-}
-
-
-HType HCheckNonSmi::CalculateInferredType() {
- // TODO(kasperl): Is there any way to signal that this isn't a smi?
- return HType::Tagged();
-}
-
-
-HType HCheckSmi::CalculateInferredType() {
- return HType::Smi();
-}
-
-
-HType HPhi::CalculateInferredType() {
- HType result = HType::Uninitialized();
- for (int i = 0; i < OperandCount(); ++i) {
- HType current = OperandAt(i)->type();
- result = result.Combine(current);
- }
- return result;
-}
-
-
-HType HConstant::CalculateInferredType() {
- return constant_type_;
-}
-
-
-HType HCompare::CalculateInferredType() {
- return HType::Boolean();
-}
-
-
-HType HCompareJSObjectEq::CalculateInferredType() {
- return HType::Boolean();
-}
-
-
-HType HUnaryPredicate::CalculateInferredType() {
- return HType::Boolean();
-}
-
-
-HType HBitwiseBinaryOperation::CalculateInferredType() {
- return HType::TaggedNumber();
-}
-
-
-HType HArithmeticBinaryOperation::CalculateInferredType() {
- return HType::TaggedNumber();
-}
-
-
-HType HAdd::CalculateInferredType() {
- return HType::Tagged();
-}
-
-
-HType HBitAnd::CalculateInferredType() {
- return HType::TaggedNumber();
-}
-
-
-HType HBitXor::CalculateInferredType() {
- return HType::TaggedNumber();
-}
-
-
-HType HBitOr::CalculateInferredType() {
- return HType::TaggedNumber();
-}
-
-
-HType HBitNot::CalculateInferredType() {
- return HType::TaggedNumber();
-}
-
-
-HType HUnaryMathOperation::CalculateInferredType() {
- return HType::TaggedNumber();
-}
-
-
-HType HShl::CalculateInferredType() {
- return HType::TaggedNumber();
-}
-
-
-HType HShr::CalculateInferredType() {
- return HType::TaggedNumber();
-}
-
-
-HType HSar::CalculateInferredType() {
- return HType::TaggedNumber();
-}
-
-
-HValue* HUnaryMathOperation::EnsureAndPropagateNotMinusZero(
- BitVector* visited) {
- visited->Add(id());
- if (representation().IsInteger32() &&
- !value()->representation().IsInteger32()) {
- if (value()->range() == NULL || value()->range()->CanBeMinusZero()) {
- SetFlag(kBailoutOnMinusZero);
- }
- }
- if (RequiredInputRepresentation(0).IsInteger32() &&
- representation().IsInteger32()) {
- return value();
- }
- return NULL;
-}
-
-
-
-HValue* HChange::EnsureAndPropagateNotMinusZero(BitVector* visited) {
- visited->Add(id());
- if (from().IsInteger32()) return NULL;
- if (CanTruncateToInt32()) return NULL;
- if (value()->range() == NULL || value()->range()->CanBeMinusZero()) {
- SetFlag(kBailoutOnMinusZero);
- }
- ASSERT(!from().IsInteger32() || !to().IsInteger32());
- return NULL;
-}
-
-
-HValue* HMod::EnsureAndPropagateNotMinusZero(BitVector* visited) {
- visited->Add(id());
- if (range() == NULL || range()->CanBeMinusZero()) {
- SetFlag(kBailoutOnMinusZero);
- return left();
- }
- return NULL;
-}
-
-
-HValue* HDiv::EnsureAndPropagateNotMinusZero(BitVector* visited) {
- visited->Add(id());
- if (range() == NULL || range()->CanBeMinusZero()) {
- SetFlag(kBailoutOnMinusZero);
- }
- return NULL;
-}
-
-
-HValue* HMul::EnsureAndPropagateNotMinusZero(BitVector* visited) {
- visited->Add(id());
- if (range() == NULL || range()->CanBeMinusZero()) {
- SetFlag(kBailoutOnMinusZero);
- }
- return NULL;
-}
-
-
-HValue* HSub::EnsureAndPropagateNotMinusZero(BitVector* visited) {
- visited->Add(id());
- // Propagate to the left argument. If the left argument cannot be -0, then
- // the result of the add operation cannot be either.
- if (range() == NULL || range()->CanBeMinusZero()) {
- return left();
- }
- return NULL;
-}
-
-
-HValue* HAdd::EnsureAndPropagateNotMinusZero(BitVector* visited) {
- visited->Add(id());
- // Propagate to the left argument. If the left argument cannot be -0, then
- // the result of the sub operation cannot be either.
- if (range() == NULL || range()->CanBeMinusZero()) {
- return left();
- }
- return NULL;
-}
-
-
-// Node-specific verification code is only included in debug mode.
-#ifdef DEBUG
-
-void HPhi::Verify() {
- ASSERT(OperandCount() == block()->predecessors()->length());
- for (int i = 0; i < OperandCount(); ++i) {
- HValue* value = OperandAt(i);
- HBasicBlock* defining_block = value->block();
- HBasicBlock* predecessor_block = block()->predecessors()->at(i);
- ASSERT(defining_block == predecessor_block ||
- defining_block->Dominates(predecessor_block));
- }
-}
-
-
-void HSimulate::Verify() {
- HInstruction::Verify();
- ASSERT(HasAstId());
-}
-
-
-void HBoundsCheck::Verify() {
- HInstruction::Verify();
- ASSERT(HasNoUses());
-}
-
-
-void HCheckSmi::Verify() {
- HInstruction::Verify();
- ASSERT(HasNoUses());
-}
-
-
-void HCheckNonSmi::Verify() {
- HInstruction::Verify();
- ASSERT(HasNoUses());
-}
-
-
-void HCheckInstanceType::Verify() {
- HInstruction::Verify();
- ASSERT(HasNoUses());
-}
-
-
-void HCheckMap::Verify() {
- HInstruction::Verify();
- ASSERT(HasNoUses());
-}
-
-
-void HCheckFunction::Verify() {
- HInstruction::Verify();
- ASSERT(HasNoUses());
-}
-
-
-void HCheckPrototypeMaps::Verify() {
- HInstruction::Verify();
- ASSERT(HasNoUses());
-}
-
-#endif
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/hydrogen-instructions.h b/src/3rdparty/v8/src/hydrogen-instructions.h
deleted file mode 100644
index 053ae9e..0000000
--- a/src/3rdparty/v8/src/hydrogen-instructions.h
+++ /dev/null
@@ -1,3657 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_HYDROGEN_INSTRUCTIONS_H_
-#define V8_HYDROGEN_INSTRUCTIONS_H_
-
-#include "v8.h"
-
-#include "code-stubs.h"
-#include "small-pointer-list.h"
-#include "string-stream.h"
-#include "zone.h"
-
-namespace v8 {
-namespace internal {
-
-// Forward declarations.
-class HBasicBlock;
-class HEnvironment;
-class HInstruction;
-class HLoopInformation;
-class HValue;
-class LInstruction;
-class LChunkBuilder;
-
-
-#define HYDROGEN_ALL_INSTRUCTION_LIST(V) \
- V(ArithmeticBinaryOperation) \
- V(BinaryCall) \
- V(BinaryOperation) \
- V(BitwiseBinaryOperation) \
- V(ControlInstruction) \
- V(Instruction) \
- V(Phi) \
- V(UnaryCall) \
- V(UnaryControlInstruction) \
- V(UnaryOperation) \
- HYDROGEN_CONCRETE_INSTRUCTION_LIST(V)
-
-
-#define HYDROGEN_CONCRETE_INSTRUCTION_LIST(V) \
- V(AbnormalExit) \
- V(AccessArgumentsAt) \
- V(Add) \
- V(ApplyArguments) \
- V(ArgumentsElements) \
- V(ArgumentsLength) \
- V(ArgumentsObject) \
- V(ArrayLiteral) \
- V(BitAnd) \
- V(BitNot) \
- V(BitOr) \
- V(BitXor) \
- V(BlockEntry) \
- V(BoundsCheck) \
- V(CallConstantFunction) \
- V(CallFunction) \
- V(CallGlobal) \
- V(CallKeyed) \
- V(CallKnownGlobal) \
- V(CallNamed) \
- V(CallNew) \
- V(CallRuntime) \
- V(CallStub) \
- V(Change) \
- V(CheckFunction) \
- V(CheckInstanceType) \
- V(CheckMap) \
- V(CheckNonSmi) \
- V(CheckPrototypeMaps) \
- V(CheckSmi) \
- V(ClassOfTest) \
- V(Compare) \
- V(CompareJSObjectEq) \
- V(CompareMap) \
- V(Constant) \
- V(Context) \
- V(DeleteProperty) \
- V(Deoptimize) \
- V(Div) \
- V(EnterInlined) \
- V(ExternalArrayLength) \
- V(FixedArrayLength) \
- V(FunctionLiteral) \
- V(GetCachedArrayIndex) \
- V(GlobalObject) \
- V(GlobalReceiver) \
- V(Goto) \
- V(HasInstanceType) \
- V(HasCachedArrayIndex) \
- V(InstanceOf) \
- V(InstanceOfKnownGlobal) \
- V(IsNull) \
- V(IsObject) \
- V(IsSmi) \
- V(IsConstructCall) \
- V(JSArrayLength) \
- V(LeaveInlined) \
- V(LoadContextSlot) \
- V(LoadElements) \
- V(LoadExternalArrayPointer) \
- V(LoadFunctionPrototype) \
- V(LoadGlobalCell) \
- V(LoadGlobalGeneric) \
- V(LoadKeyedFastElement) \
- V(LoadKeyedGeneric) \
- V(LoadKeyedSpecializedArrayElement) \
- V(LoadNamedField) \
- V(LoadNamedFieldPolymorphic) \
- V(LoadNamedGeneric) \
- V(Mod) \
- V(Mul) \
- V(ObjectLiteral) \
- V(OsrEntry) \
- V(OuterContext) \
- V(Parameter) \
- V(Power) \
- V(PushArgument) \
- V(RegExpLiteral) \
- V(Return) \
- V(Sar) \
- V(Shl) \
- V(Shr) \
- V(Simulate) \
- V(StackCheck) \
- V(StoreContextSlot) \
- V(StoreGlobalCell) \
- V(StoreGlobalGeneric) \
- V(StoreKeyedFastElement) \
- V(StoreKeyedSpecializedArrayElement) \
- V(StoreKeyedGeneric) \
- V(StoreNamedField) \
- V(StoreNamedGeneric) \
- V(StringCharCodeAt) \
- V(StringCharFromCode) \
- V(StringLength) \
- V(Sub) \
- V(Test) \
- V(Throw) \
- V(ToFastProperties) \
- V(Typeof) \
- V(TypeofIs) \
- V(UnaryMathOperation) \
- V(UnknownOSRValue) \
- V(ValueOf)
-
-#define GVN_FLAG_LIST(V) \
- V(Calls) \
- V(InobjectFields) \
- V(BackingStoreFields) \
- V(ArrayElements) \
- V(SpecializedArrayElements) \
- V(GlobalVars) \
- V(Maps) \
- V(ArrayLengths) \
- V(ContextSlots) \
- V(OsrEntries)
-
-#define DECLARE_INSTRUCTION(type) \
- virtual bool Is##type() const { return true; } \
- static H##type* cast(HValue* value) { \
- ASSERT(value->Is##type()); \
- return reinterpret_cast<H##type*>(value); \
- } \
- Opcode opcode() const { return HValue::k##type; }
-
-
-#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
- virtual LInstruction* CompileToLithium(LChunkBuilder* builder); \
- virtual const char* Mnemonic() const { return mnemonic; } \
- DECLARE_INSTRUCTION(type)
-
-
-class Range: public ZoneObject {
- public:
- Range()
- : lower_(kMinInt),
- upper_(kMaxInt),
- next_(NULL),
- can_be_minus_zero_(false) { }
-
- Range(int32_t lower, int32_t upper)
- : lower_(lower),
- upper_(upper),
- next_(NULL),
- can_be_minus_zero_(false) { }
-
- int32_t upper() const { return upper_; }
- int32_t lower() const { return lower_; }
- Range* next() const { return next_; }
- Range* CopyClearLower() const { return new Range(kMinInt, upper_); }
- Range* CopyClearUpper() const { return new Range(lower_, kMaxInt); }
- Range* Copy() const { return new Range(lower_, upper_); }
- int32_t Mask() const;
- void set_can_be_minus_zero(bool b) { can_be_minus_zero_ = b; }
- bool CanBeMinusZero() const { return CanBeZero() && can_be_minus_zero_; }
- bool CanBeZero() const { return upper_ >= 0 && lower_ <= 0; }
- bool CanBeNegative() const { return lower_ < 0; }
- bool Includes(int value) const { return lower_ <= value && upper_ >= value; }
- bool IsMostGeneric() const { return lower_ == kMinInt && upper_ == kMaxInt; }
- bool IsInSmiRange() const {
- return lower_ >= Smi::kMinValue && upper_ <= Smi::kMaxValue;
- }
- void KeepOrder();
- void Verify() const;
-
- void StackUpon(Range* other) {
- Intersect(other);
- next_ = other;
- }
-
- void Intersect(Range* other);
- void Union(Range* other);
-
- void AddConstant(int32_t value);
- void Sar(int32_t value);
- void Shl(int32_t value);
- bool AddAndCheckOverflow(Range* other);
- bool SubAndCheckOverflow(Range* other);
- bool MulAndCheckOverflow(Range* other);
-
- private:
- int32_t lower_;
- int32_t upper_;
- Range* next_;
- bool can_be_minus_zero_;
-};
-
-
-class Representation {
- public:
- enum Kind {
- kNone,
- kTagged,
- kDouble,
- kInteger32,
- kExternal,
- kNumRepresentations
- };
-
- Representation() : kind_(kNone) { }
-
- static Representation None() { return Representation(kNone); }
- static Representation Tagged() { return Representation(kTagged); }
- static Representation Integer32() { return Representation(kInteger32); }
- static Representation Double() { return Representation(kDouble); }
- static Representation External() { return Representation(kExternal); }
-
- bool Equals(const Representation& other) {
- return kind_ == other.kind_;
- }
-
- Kind kind() const { return kind_; }
- bool IsNone() const { return kind_ == kNone; }
- bool IsTagged() const { return kind_ == kTagged; }
- bool IsInteger32() const { return kind_ == kInteger32; }
- bool IsDouble() const { return kind_ == kDouble; }
- bool IsExternal() const { return kind_ == kExternal; }
- bool IsSpecialization() const {
- return kind_ == kInteger32 || kind_ == kDouble;
- }
- const char* Mnemonic() const;
-
- private:
- explicit Representation(Kind k) : kind_(k) { }
-
- Kind kind_;
-};
-
-
-class HType {
- public:
- HType() : type_(kUninitialized) { }
-
- static HType Tagged() { return HType(kTagged); }
- static HType TaggedPrimitive() { return HType(kTaggedPrimitive); }
- static HType TaggedNumber() { return HType(kTaggedNumber); }
- static HType Smi() { return HType(kSmi); }
- static HType HeapNumber() { return HType(kHeapNumber); }
- static HType String() { return HType(kString); }
- static HType Boolean() { return HType(kBoolean); }
- static HType NonPrimitive() { return HType(kNonPrimitive); }
- static HType JSArray() { return HType(kJSArray); }
- static HType JSObject() { return HType(kJSObject); }
- static HType Uninitialized() { return HType(kUninitialized); }
-
- // Return the weakest (least precise) common type.
- HType Combine(HType other) {
- return HType(static_cast<Type>(type_ & other.type_));
- }
-
- bool Equals(const HType& other) {
- return type_ == other.type_;
- }
-
- bool IsSubtypeOf(const HType& other) {
- return Combine(other).Equals(other);
- }
-
- bool IsTagged() {
- ASSERT(type_ != kUninitialized);
- return ((type_ & kTagged) == kTagged);
- }
-
- bool IsTaggedPrimitive() {
- ASSERT(type_ != kUninitialized);
- return ((type_ & kTaggedPrimitive) == kTaggedPrimitive);
- }
-
- bool IsTaggedNumber() {
- ASSERT(type_ != kUninitialized);
- return ((type_ & kTaggedNumber) == kTaggedNumber);
- }
-
- bool IsSmi() {
- ASSERT(type_ != kUninitialized);
- return ((type_ & kSmi) == kSmi);
- }
-
- bool IsHeapNumber() {
- ASSERT(type_ != kUninitialized);
- return ((type_ & kHeapNumber) == kHeapNumber);
- }
-
- bool IsString() {
- ASSERT(type_ != kUninitialized);
- return ((type_ & kString) == kString);
- }
-
- bool IsBoolean() {
- ASSERT(type_ != kUninitialized);
- return ((type_ & kBoolean) == kBoolean);
- }
-
- bool IsNonPrimitive() {
- ASSERT(type_ != kUninitialized);
- return ((type_ & kNonPrimitive) == kNonPrimitive);
- }
-
- bool IsJSArray() {
- ASSERT(type_ != kUninitialized);
- return ((type_ & kJSArray) == kJSArray);
- }
-
- bool IsJSObject() {
- ASSERT(type_ != kUninitialized);
- return ((type_ & kJSObject) == kJSObject);
- }
-
- bool IsUninitialized() {
- return type_ == kUninitialized;
- }
-
- static HType TypeFromValue(Handle<Object> value);
-
- const char* ToString();
- const char* ToShortString();
-
- private:
- enum Type {
- kTagged = 0x1, // 0000 0000 0000 0001
- kTaggedPrimitive = 0x5, // 0000 0000 0000 0101
- kTaggedNumber = 0xd, // 0000 0000 0000 1101
- kSmi = 0x1d, // 0000 0000 0001 1101
- kHeapNumber = 0x2d, // 0000 0000 0010 1101
- kString = 0x45, // 0000 0000 0100 0101
- kBoolean = 0x85, // 0000 0000 1000 0101
- kNonPrimitive = 0x101, // 0000 0001 0000 0001
- kJSObject = 0x301, // 0000 0011 0000 0001
- kJSArray = 0x701, // 0000 0111 1000 0001
- kUninitialized = 0x1fff // 0001 1111 1111 1111
- };
-
- explicit HType(Type t) : type_(t) { }
-
- Type type_;
-};
-
-
-class HValue: public ZoneObject {
- public:
- static const int kNoNumber = -1;
-
- // There must be one corresponding kDepends flag for every kChanges flag and
- // the order of the kChanges flags must be exactly the same as of the kDepends
- // flags.
- enum Flag {
- // Declare global value numbering flags.
- #define DECLARE_DO(type) kChanges##type, kDependsOn##type,
- GVN_FLAG_LIST(DECLARE_DO)
- #undef DECLARE_DO
- kFlexibleRepresentation,
- kUseGVN,
- kCanOverflow,
- kBailoutOnMinusZero,
- kCanBeDivByZero,
- kIsArguments,
- kTruncatingToInt32,
- kLastFlag = kTruncatingToInt32
- };
-
- STATIC_ASSERT(kLastFlag < kBitsPerInt);
-
- static const int kChangesToDependsFlagsLeftShift = 1;
-
- static int ChangesFlagsMask() {
- int result = 0;
- // Create changes mask.
-#define DECLARE_DO(type) result |= (1 << kChanges##type);
- GVN_FLAG_LIST(DECLARE_DO)
-#undef DECLARE_DO
- return result;
- }
-
- static int DependsFlagsMask() {
- return ConvertChangesToDependsFlags(ChangesFlagsMask());
- }
-
- static int ConvertChangesToDependsFlags(int flags) {
- return flags << kChangesToDependsFlagsLeftShift;
- }
-
- static HValue* cast(HValue* value) { return value; }
-
- enum Opcode {
- // Declare a unique enum value for each hydrogen instruction.
- #define DECLARE_DO(type) k##type,
- HYDROGEN_ALL_INSTRUCTION_LIST(DECLARE_DO)
- #undef DECLARE_DO
- kMaxInstructionClass
- };
-
- HValue() : block_(NULL),
- id_(kNoNumber),
- type_(HType::Tagged()),
- range_(NULL),
- flags_(0) {}
- virtual ~HValue() {}
-
- HBasicBlock* block() const { return block_; }
- void SetBlock(HBasicBlock* block);
-
- int id() const { return id_; }
- void set_id(int id) { id_ = id; }
-
- SmallPointerList<HValue>* uses() { return &uses_; }
-
- virtual bool EmitAtUses() { return false; }
- Representation representation() const { return representation_; }
- void ChangeRepresentation(Representation r) {
- // Representation was already set and is allowed to be changed.
- ASSERT(!representation_.IsNone());
- ASSERT(!r.IsNone());
- ASSERT(CheckFlag(kFlexibleRepresentation));
- RepresentationChanged(r);
- representation_ = r;
- }
-
- HType type() const { return type_; }
- void set_type(HType type) {
- ASSERT(uses_.length() == 0);
- type_ = type;
- }
-
- // An operation needs to override this function iff:
- // 1) it can produce an int32 output.
- // 2) the true value of its output can potentially be minus zero.
- // The implementation must set a flag so that it bails out in the case where
- // it would otherwise output what should be a minus zero as an int32 zero.
- // If the operation also exists in a form that takes int32 and outputs int32
- // then the operation should return its input value so that we can propagate
- // back. There are two operations that need to propagate back to more than
- // one input. They are phi and binary add. They always return NULL and
- // expect the caller to take care of things.
- virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited) {
- visited->Add(id());
- return NULL;
- }
-
- bool IsDefinedAfter(HBasicBlock* other) const;
-
- // Operands.
- virtual int OperandCount() = 0;
- virtual HValue* OperandAt(int index) = 0;
- void SetOperandAt(int index, HValue* value);
-
- int LookupOperandIndex(int occurrence_index, HValue* op);
- bool UsesMultipleTimes(HValue* op);
-
- void ReplaceAndDelete(HValue* other);
- void ReplaceValue(HValue* other);
- void ReplaceAtUse(HValue* use, HValue* other);
- void ReplaceFirstAtUse(HValue* use, HValue* other, Representation r);
- bool HasNoUses() const { return uses_.is_empty(); }
- void ClearOperands();
- void Delete();
-
- int flags() const { return flags_; }
- void SetFlag(Flag f) { flags_ |= (1 << f); }
- void ClearFlag(Flag f) { flags_ &= ~(1 << f); }
- bool CheckFlag(Flag f) const { return (flags_ & (1 << f)) != 0; }
-
- void SetAllSideEffects() { flags_ |= AllSideEffects(); }
- void ClearAllSideEffects() { flags_ &= ~AllSideEffects(); }
- bool HasSideEffects() const { return (flags_ & AllSideEffects()) != 0; }
-
- Range* range() const { return range_; }
- bool HasRange() const { return range_ != NULL; }
- void AddNewRange(Range* r);
- void RemoveLastAddedRange();
- void ComputeInitialRange();
-
- // Representation helpers.
- virtual Representation RequiredInputRepresentation(int index) const = 0;
-
- virtual Representation InferredRepresentation() {
- return representation();
- }
-
- // This gives the instruction an opportunity to replace itself with an
- // instruction that does the same in some better way. To replace an
- // instruction with a new one, first add the new instruction to the graph,
- // then return it. Return NULL to have the instruction deleted.
- virtual HValue* Canonicalize() { return this; }
-
- // Declare virtual type testers.
-#define DECLARE_DO(type) virtual bool Is##type() const { return false; }
- HYDROGEN_ALL_INSTRUCTION_LIST(DECLARE_DO)
-#undef DECLARE_DO
-
- bool Equals(HValue* other);
- virtual intptr_t Hashcode();
-
- // Printing support.
- virtual void PrintTo(StringStream* stream) = 0;
- void PrintNameTo(StringStream* stream);
- static void PrintTypeTo(HType type, StringStream* stream);
-
- virtual const char* Mnemonic() const = 0;
- virtual Opcode opcode() const = 0;
-
- // Updated the inferred type of this instruction and returns true if
- // it has changed.
- bool UpdateInferredType();
-
- virtual HType CalculateInferredType();
-
-#ifdef DEBUG
- virtual void Verify() = 0;
-#endif
-
- protected:
- // This function must be overridden for instructions with flag kUseGVN, to
- // compare the non-Operand parts of the instruction.
- virtual bool DataEquals(HValue* other) {
- UNREACHABLE();
- return false;
- }
- virtual void RepresentationChanged(Representation to) { }
- virtual Range* InferRange();
- virtual void DeleteFromGraph() = 0;
- virtual void InternalSetOperandAt(int index, HValue* value) = 0;
- void clear_block() {
- ASSERT(block_ != NULL);
- block_ = NULL;
- }
-
- void set_representation(Representation r) {
- // Representation is set-once.
- ASSERT(representation_.IsNone() && !r.IsNone());
- representation_ = r;
- }
-
- private:
- // A flag mask to mark an instruction as having arbitrary side effects.
- static int AllSideEffects() {
- return ChangesFlagsMask() & ~(1 << kChangesOsrEntries);
- }
-
- void InternalReplaceAtUse(HValue* use, HValue* other);
- void RegisterUse(int index, HValue* new_value);
-
- HBasicBlock* block_;
-
- // The id of this instruction in the hydrogen graph, assigned when first
- // added to the graph. Reflects creation order.
- int id_;
-
- Representation representation_;
- SmallPointerList<HValue> uses_;
- HType type_;
- Range* range_;
- int flags_;
-
- DISALLOW_COPY_AND_ASSIGN(HValue);
-};
-
-
-class HInstruction: public HValue {
- public:
- HInstruction* next() const { return next_; }
- HInstruction* previous() const { return previous_; }
-
- virtual void PrintTo(StringStream* stream);
- virtual void PrintDataTo(StringStream* stream) { }
-
- bool IsLinked() const { return block() != NULL; }
- void Unlink();
- void InsertBefore(HInstruction* next);
- void InsertAfter(HInstruction* previous);
-
- int position() const { return position_; }
- bool has_position() const { return position_ != RelocInfo::kNoPosition; }
- void set_position(int position) { position_ = position; }
-
- virtual LInstruction* CompileToLithium(LChunkBuilder* builder) = 0;
-
-#ifdef DEBUG
- virtual void Verify();
-#endif
-
- // Returns whether this is some kind of deoptimizing check
- // instruction.
- virtual bool IsCheckInstruction() const { return false; }
-
- virtual bool IsCall() { return false; }
-
- DECLARE_INSTRUCTION(Instruction)
-
- protected:
- HInstruction()
- : next_(NULL),
- previous_(NULL),
- position_(RelocInfo::kNoPosition) {
- SetFlag(kDependsOnOsrEntries);
- }
-
- virtual void DeleteFromGraph() { Unlink(); }
-
- private:
- void InitializeAsFirst(HBasicBlock* block) {
- ASSERT(!IsLinked());
- SetBlock(block);
- }
-
- HInstruction* next_;
- HInstruction* previous_;
- int position_;
-
- friend class HBasicBlock;
-};
-
-
-class HControlInstruction: public HInstruction {
- public:
- HControlInstruction(HBasicBlock* first, HBasicBlock* second)
- : first_successor_(first), second_successor_(second) {
- }
-
- HBasicBlock* FirstSuccessor() const { return first_successor_; }
- HBasicBlock* SecondSuccessor() const { return second_successor_; }
-
- virtual void PrintDataTo(StringStream* stream);
-
- DECLARE_INSTRUCTION(ControlInstruction)
-
- private:
- HBasicBlock* first_successor_;
- HBasicBlock* second_successor_;
-};
-
-
-template<int NumElements>
-class HOperandContainer {
- public:
- HOperandContainer() : elems_() { }
-
- int length() { return NumElements; }
- HValue*& operator[](int i) {
- ASSERT(i < length());
- return elems_[i];
- }
-
- private:
- HValue* elems_[NumElements];
-};
-
-
-template<>
-class HOperandContainer<0> {
- public:
- int length() { return 0; }
- HValue*& operator[](int i) {
- UNREACHABLE();
- static HValue* t = 0;
- return t;
- }
-};
-
-
-template<int V>
-class HTemplateInstruction : public HInstruction {
- public:
- int OperandCount() { return V; }
- HValue* OperandAt(int i) { return inputs_[i]; }
-
- protected:
- void InternalSetOperandAt(int i, HValue* value) { inputs_[i] = value; }
-
- private:
- HOperandContainer<V> inputs_;
-};
-
-
-template<int V>
-class HTemplateControlInstruction : public HControlInstruction {
- public:
- HTemplateControlInstruction<V>(HBasicBlock* first, HBasicBlock* second)
- : HControlInstruction(first, second) { }
- int OperandCount() { return V; }
- HValue* OperandAt(int i) { return inputs_[i]; }
-
- protected:
- void InternalSetOperandAt(int i, HValue* value) { inputs_[i] = value; }
-
- private:
- HOperandContainer<V> inputs_;
-};
-
-
-class HBlockEntry: public HTemplateInstruction<0> {
- public:
- virtual Representation RequiredInputRepresentation(int index) const {
- return Representation::None();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(BlockEntry, "block_entry")
-};
-
-
-class HDeoptimize: public HControlInstruction {
- public:
- explicit HDeoptimize(int environment_length)
- : HControlInstruction(NULL, NULL),
- values_(environment_length) { }
-
- virtual Representation RequiredInputRepresentation(int index) const {
- return Representation::None();
- }
-
- virtual int OperandCount() { return values_.length(); }
- virtual HValue* OperandAt(int index) { return values_[index]; }
-
- void AddEnvironmentValue(HValue* value) {
- values_.Add(NULL);
- SetOperandAt(values_.length() - 1, value);
- }
-
- DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
-
- protected:
- virtual void InternalSetOperandAt(int index, HValue* value) {
- values_[index] = value;
- }
-
- private:
- ZoneList<HValue*> values_;
-};
-
-
-class HGoto: public HTemplateControlInstruction<0> {
- public:
- explicit HGoto(HBasicBlock* target)
- : HTemplateControlInstruction<0>(target, NULL),
- include_stack_check_(false) { }
-
- void set_include_stack_check(bool include_stack_check) {
- include_stack_check_ = include_stack_check;
- }
- bool include_stack_check() const { return include_stack_check_; }
-
- virtual Representation RequiredInputRepresentation(int index) const {
- return Representation::None();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
-
- private:
- bool include_stack_check_;
-};
-
-
-class HUnaryControlInstruction: public HTemplateControlInstruction<1> {
- public:
- explicit HUnaryControlInstruction(HValue* value,
- HBasicBlock* true_target,
- HBasicBlock* false_target)
- : HTemplateControlInstruction<1>(true_target, false_target) {
- SetOperandAt(0, value);
- }
-
- virtual void PrintDataTo(StringStream* stream);
-
- HValue* value() { return OperandAt(0); }
-
- DECLARE_INSTRUCTION(UnaryControlInstruction)
-};
-
-
-class HTest: public HUnaryControlInstruction {
- public:
- HTest(HValue* value, HBasicBlock* true_target, HBasicBlock* false_target)
- : HUnaryControlInstruction(value, true_target, false_target) {
- ASSERT(true_target != NULL && false_target != NULL);
- }
-
- virtual Representation RequiredInputRepresentation(int index) const {
- return Representation::None();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(Test, "test")
-};
-
-
-class HCompareMap: public HUnaryControlInstruction {
- public:
- HCompareMap(HValue* value,
- Handle<Map> map,
- HBasicBlock* true_target,
- HBasicBlock* false_target)
- : HUnaryControlInstruction(value, true_target, false_target),
- map_(map) {
- ASSERT(true_target != NULL);
- ASSERT(false_target != NULL);
- ASSERT(!map.is_null());
- }
-
- virtual void PrintDataTo(StringStream* stream);
-
- Handle<Map> map() const { return map_; }
-
- virtual Representation RequiredInputRepresentation(int index) const {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CompareMap, "compare_map")
-
- private:
- Handle<Map> map_;
-};
-
-
-class HReturn: public HUnaryControlInstruction {
- public:
- explicit HReturn(HValue* value)
- : HUnaryControlInstruction(value, NULL, NULL) {
- }
-
- virtual Representation RequiredInputRepresentation(int index) const {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(Return, "return")
-};
-
-
-class HAbnormalExit: public HTemplateControlInstruction<0> {
- public:
- HAbnormalExit() : HTemplateControlInstruction<0>(NULL, NULL) { }
-
- virtual Representation RequiredInputRepresentation(int index) const {
- return Representation::None();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(AbnormalExit, "abnormal_exit")
-};
-
-
-class HUnaryOperation: public HTemplateInstruction<1> {
- public:
- explicit HUnaryOperation(HValue* value) {
- SetOperandAt(0, value);
- }
-
- HValue* value() { return OperandAt(0); }
- virtual void PrintDataTo(StringStream* stream);
-
- DECLARE_INSTRUCTION(UnaryOperation)
-};
-
-
-class HThrow: public HUnaryOperation {
- public:
- explicit HThrow(HValue* value) : HUnaryOperation(value) {
- SetAllSideEffects();
- }
-
- virtual Representation RequiredInputRepresentation(int index) const {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(Throw, "throw")
-};
-
-
-class HChange: public HUnaryOperation {
- public:
- HChange(HValue* value,
- Representation from,
- Representation to,
- bool is_truncating)
- : HUnaryOperation(value), from_(from) {
- ASSERT(!from.IsNone() && !to.IsNone());
- ASSERT(!from.Equals(to));
- set_representation(to);
- SetFlag(kUseGVN);
- if (is_truncating) SetFlag(kTruncatingToInt32);
- if (from.IsInteger32() && to.IsTagged() && value->range() != NULL &&
- value->range()->IsInSmiRange()) {
- set_type(HType::Smi());
- }
- }
-
- virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
-
- Representation from() const { return from_; }
- Representation to() const { return representation(); }
- virtual Representation RequiredInputRepresentation(int index) const {
- return from_;
- }
-
- bool CanTruncateToInt32() const { return CheckFlag(kTruncatingToInt32); }
-
- virtual void PrintDataTo(StringStream* stream);
-
- DECLARE_CONCRETE_INSTRUCTION(Change,
- CanTruncateToInt32() ? "truncate" : "change")
-
- protected:
- virtual bool DataEquals(HValue* other) {
- if (!other->IsChange()) return false;
- HChange* change = HChange::cast(other);
- return value() == change->value()
- && to().Equals(change->to());
- }
-
- private:
- Representation from_;
-};
-
-
-class HSimulate: public HInstruction {
- public:
- HSimulate(int ast_id, int pop_count)
- : ast_id_(ast_id),
- pop_count_(pop_count),
- values_(2),
- assigned_indexes_(2) {}
- virtual ~HSimulate() {}
-
- virtual void PrintDataTo(StringStream* stream);
-
- bool HasAstId() const { return ast_id_ != AstNode::kNoNumber; }
- int ast_id() const { return ast_id_; }
- void set_ast_id(int id) {
- ASSERT(!HasAstId());
- ast_id_ = id;
- }
-
- int pop_count() const { return pop_count_; }
- const ZoneList<HValue*>* values() const { return &values_; }
- int GetAssignedIndexAt(int index) const {
- ASSERT(HasAssignedIndexAt(index));
- return assigned_indexes_[index];
- }
- bool HasAssignedIndexAt(int index) const {
- return assigned_indexes_[index] != kNoIndex;
- }
- void AddAssignedValue(int index, HValue* value) {
- AddValue(index, value);
- }
- void AddPushedValue(HValue* value) {
- AddValue(kNoIndex, value);
- }
- virtual int OperandCount() { return values_.length(); }
- virtual HValue* OperandAt(int index) { return values_[index]; }
-
- virtual Representation RequiredInputRepresentation(int index) const {
- return Representation::None();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(Simulate, "simulate")
-
-#ifdef DEBUG
- virtual void Verify();
-#endif
-
- protected:
- virtual void InternalSetOperandAt(int index, HValue* value) {
- values_[index] = value;
- }
-
- private:
- static const int kNoIndex = -1;
- void AddValue(int index, HValue* value) {
- assigned_indexes_.Add(index);
- // Resize the list of pushed values.
- values_.Add(NULL);
- // Set the operand through the base method in HValue to make sure that the
- // use lists are correctly updated.
- SetOperandAt(values_.length() - 1, value);
- }
- int ast_id_;
- int pop_count_;
- ZoneList<HValue*> values_;
- ZoneList<int> assigned_indexes_;
-};
-
-
-class HStackCheck: public HTemplateInstruction<0> {
- public:
- HStackCheck() { }
-
- virtual Representation RequiredInputRepresentation(int index) const {
- return Representation::None();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack_check")
-};
-
-
-class HEnterInlined: public HTemplateInstruction<0> {
- public:
- HEnterInlined(Handle<JSFunction> closure, FunctionLiteral* function)
- : closure_(closure), function_(function) {
- }
-
- virtual void PrintDataTo(StringStream* stream);
-
- Handle<JSFunction> closure() const { return closure_; }
- FunctionLiteral* function() const { return function_; }
-
- virtual Representation RequiredInputRepresentation(int index) const {
- return Representation::None();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(EnterInlined, "enter_inlined")
-
- private:
- Handle<JSFunction> closure_;
- FunctionLiteral* function_;
-};
-
-
-class HLeaveInlined: public HTemplateInstruction<0> {
- public:
- HLeaveInlined() {}
-
- virtual Representation RequiredInputRepresentation(int index) const {
- return Representation::None();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LeaveInlined, "leave_inlined")
-};
-
-
-class HPushArgument: public HUnaryOperation {
- public:
- explicit HPushArgument(HValue* value) : HUnaryOperation(value) {
- set_representation(Representation::Tagged());
- }
-
- virtual Representation RequiredInputRepresentation(int index) const {
- return Representation::Tagged();
- }
-
- HValue* argument() { return OperandAt(0); }
-
- DECLARE_CONCRETE_INSTRUCTION(PushArgument, "push_argument")
-};
-
-
-class HContext: public HTemplateInstruction<0> {
- public:
- HContext() {
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- }
-
- virtual Representation RequiredInputRepresentation(int index) const {
- return Representation::None();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(Context, "context");
-
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
-};
-
-
-class HOuterContext: public HUnaryOperation {
- public:
- explicit HOuterContext(HValue* inner) : HUnaryOperation(inner) {
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- }
-
- DECLARE_CONCRETE_INSTRUCTION(OuterContext, "outer_context");
-
- virtual Representation RequiredInputRepresentation(int index) const {
- return Representation::Tagged();
- }
-
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
-};
-
-
-class HGlobalObject: public HUnaryOperation {
- public:
- explicit HGlobalObject(HValue* context) : HUnaryOperation(context) {
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- }
-
- DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global_object")
-
- virtual Representation RequiredInputRepresentation(int index) const {
- return Representation::Tagged();
- }
-
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
-};
-
-
-class HGlobalReceiver: public HUnaryOperation {
- public:
- explicit HGlobalReceiver(HValue* global_object)
- : HUnaryOperation(global_object) {
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- }
-
- DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver, "global_receiver")
-
- virtual Representation RequiredInputRepresentation(int index) const {
- return Representation::Tagged();
- }
-
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
-};
-
-
-template <int V>
-class HCall: public HTemplateInstruction<V> {
- public:
- // The argument count includes the receiver.
- explicit HCall<V>(int argument_count) : argument_count_(argument_count) {
- this->set_representation(Representation::Tagged());
- this->SetAllSideEffects();
- }
-
- virtual HType CalculateInferredType() { return HType::Tagged(); }
-
- virtual int argument_count() const { return argument_count_; }
-
- virtual bool IsCall() { return true; }
-
- private:
- int argument_count_;
-};
-
-
-class HUnaryCall: public HCall<1> {
- public:
- HUnaryCall(HValue* value, int argument_count)
- : HCall<1>(argument_count) {
- SetOperandAt(0, value);
- }
-
- virtual Representation RequiredInputRepresentation(int index) const {
- return Representation::Tagged();
- }
-
- virtual void PrintDataTo(StringStream* stream);
-
- HValue* value() { return OperandAt(0); }
-
- DECLARE_INSTRUCTION(UnaryCall)
-};
-
-
-class HBinaryCall: public HCall<2> {
- public:
- HBinaryCall(HValue* first, HValue* second, int argument_count)
- : HCall<2>(argument_count) {
- SetOperandAt(0, first);
- SetOperandAt(1, second);
- }
-
- virtual void PrintDataTo(StringStream* stream);
-
- virtual Representation RequiredInputRepresentation(int index) const {
- return Representation::Tagged();
- }
-
- HValue* first() { return OperandAt(0); }
- HValue* second() { return OperandAt(1); }
-
- DECLARE_INSTRUCTION(BinaryCall)
-};
-
-
-class HCallConstantFunction: public HCall<0> {
- public:
- HCallConstantFunction(Handle<JSFunction> function, int argument_count)
- : HCall<0>(argument_count), function_(function) { }
-
- Handle<JSFunction> function() const { return function_; }
-
- bool IsApplyFunction() const {
- return function_->code() ==
- Isolate::Current()->builtins()->builtin(Builtins::kFunctionApply);
- }
-
- virtual void PrintDataTo(StringStream* stream);
-
- virtual Representation RequiredInputRepresentation(int index) const {
- return Representation::None();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CallConstantFunction, "call_constant_function")
-
- private:
- Handle<JSFunction> function_;
-};
-
-
-class HCallKeyed: public HBinaryCall {
- public:
- HCallKeyed(HValue* context, HValue* key, int argument_count)
- : HBinaryCall(context, key, argument_count) {
- }
-
- virtual Representation RequiredInputRepresentation(int index) const {
- return Representation::Tagged();
- }
-
- HValue* context() { return first(); }
- HValue* key() { return second(); }
-
- DECLARE_CONCRETE_INSTRUCTION(CallKeyed, "call_keyed")
-};
-
-
-class HCallNamed: public HUnaryCall {
- public:
- HCallNamed(HValue* context, Handle<String> name, int argument_count)
- : HUnaryCall(context, argument_count), name_(name) {
- }
-
- virtual void PrintDataTo(StringStream* stream);
-
- HValue* context() { return value(); }
- Handle<String> name() const { return name_; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallNamed, "call_named")
-
- virtual Representation RequiredInputRepresentation(int index) const {
- return Representation::Tagged();
- }
-
- private:
- Handle<String> name_;
-};
-
-
-class HCallFunction: public HUnaryCall {
- public:
- HCallFunction(HValue* context, int argument_count)
- : HUnaryCall(context, argument_count) {
- }
-
- HValue* context() { return value(); }
-
- virtual Representation RequiredInputRepresentation(int index) const {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call_function")
-};
-
-
-class HCallGlobal: public HUnaryCall {
- public:
- HCallGlobal(HValue* context, Handle<String> name, int argument_count)
- : HUnaryCall(context, argument_count), name_(name) {
- }
-
- virtual void PrintDataTo(StringStream* stream);
-
- HValue* context() { return value(); }
- Handle<String> name() const { return name_; }
-
- virtual Representation RequiredInputRepresentation(int index) const {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CallGlobal, "call_global")
-
- private:
- Handle<String> name_;
-};
-
-
-class HCallKnownGlobal: public HCall<0> {
- public:
- HCallKnownGlobal(Handle<JSFunction> target, int argument_count)
- : HCall<0>(argument_count), target_(target) { }
-
- virtual void PrintDataTo(StringStream* stream);
-
- Handle<JSFunction> target() const { return target_; }
-
- virtual Representation RequiredInputRepresentation(int index) const {
- return Representation::None();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CallKnownGlobal, "call_known_global")
-
- private:
- Handle<JSFunction> target_;
-};
-
-
-class HCallNew: public HBinaryCall {
- public:
- HCallNew(HValue* context, HValue* constructor, int argument_count)
- : HBinaryCall(context, constructor, argument_count) {
- }
-
- virtual Representation RequiredInputRepresentation(int index) const {
- return Representation::Tagged();
- }
-
- HValue* context() { return first(); }
- HValue* constructor() { return second(); }
-
- DECLARE_CONCRETE_INSTRUCTION(CallNew, "call_new")
-};
-
-
-class HCallRuntime: public HCall<0> {
- public:
- HCallRuntime(Handle<String> name,
- const Runtime::Function* c_function,
- int argument_count)
- : HCall<0>(argument_count), c_function_(c_function), name_(name) { }
- virtual void PrintDataTo(StringStream* stream);
-
- const Runtime::Function* function() const { return c_function_; }
- Handle<String> name() const { return name_; }
-
- virtual Representation RequiredInputRepresentation(int index) const {
- return Representation::None();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call_runtime")
-
- private:
- const Runtime::Function* c_function_;
- Handle<String> name_;
-};
-
-
-class HJSArrayLength: public HUnaryOperation {
- public:
- explicit HJSArrayLength(HValue* value) : HUnaryOperation(value) {
- // The length of an array is stored as a tagged value in the array
- // object. It is guaranteed to be 32 bit integer, but it can be
- // represented as either a smi or heap number.
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- SetFlag(kDependsOnArrayLengths);
- SetFlag(kDependsOnMaps);
- }
-
- virtual Representation RequiredInputRepresentation(int index) const {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(JSArrayLength, "js_array_length")
-
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
-};
-
-
-class HFixedArrayLength: public HUnaryOperation {
- public:
- explicit HFixedArrayLength(HValue* value) : HUnaryOperation(value) {
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- SetFlag(kDependsOnArrayLengths);
- }
-
- virtual Representation RequiredInputRepresentation(int index) const {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(FixedArrayLength, "fixed_array_length")
-
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
-};
-
-
-class HExternalArrayLength: public HUnaryOperation {
- public:
- explicit HExternalArrayLength(HValue* value) : HUnaryOperation(value) {
- set_representation(Representation::Integer32());
- // The result of this instruction is idempotent as long as its inputs don't
- // change. The length of a pixel array cannot change once set, so it's not
- // necessary to introduce a kDependsOnArrayLengths or any other dependency.
- SetFlag(kUseGVN);
- }
-
- virtual Representation RequiredInputRepresentation(int index) const {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(ExternalArrayLength, "external_array_length")
-
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
-};
-
-
-class HBitNot: public HUnaryOperation {
- public:
- explicit HBitNot(HValue* value) : HUnaryOperation(value) {
- set_representation(Representation::Integer32());
- SetFlag(kUseGVN);
- SetFlag(kTruncatingToInt32);
- }
-
- virtual Representation RequiredInputRepresentation(int index) const {
- return Representation::Integer32();
- }
- virtual HType CalculateInferredType();
-
- DECLARE_CONCRETE_INSTRUCTION(BitNot, "bit_not")
-
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
-};
-
-
-class HUnaryMathOperation: public HUnaryOperation {
- public:
- HUnaryMathOperation(HValue* value, BuiltinFunctionId op)
- : HUnaryOperation(value), op_(op) {
- switch (op) {
- case kMathFloor:
- case kMathRound:
- case kMathCeil:
- set_representation(Representation::Integer32());
- break;
- case kMathAbs:
- set_representation(Representation::Tagged());
- SetFlag(kFlexibleRepresentation);
- break;
- case kMathSqrt:
- case kMathPowHalf:
- case kMathLog:
- case kMathSin:
- case kMathCos:
- set_representation(Representation::Double());
- break;
- default:
- UNREACHABLE();
- }
- SetFlag(kUseGVN);
- }
-
- virtual void PrintDataTo(StringStream* stream);
-
- virtual HType CalculateInferredType();
-
- virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
-
- virtual Representation RequiredInputRepresentation(int index) const {
- switch (op_) {
- case kMathFloor:
- case kMathRound:
- case kMathCeil:
- case kMathSqrt:
- case kMathPowHalf:
- case kMathLog:
- case kMathSin:
- case kMathCos:
- return Representation::Double();
- case kMathAbs:
- return representation();
- default:
- UNREACHABLE();
- return Representation::None();
- }
- }
-
- virtual HValue* Canonicalize() {
- // If the input is integer32 then we replace the floor instruction
- // with its inputs. This happens before the representation changes are
- // introduced.
- if (op() == kMathFloor) {
- if (value()->representation().IsInteger32()) return value();
- }
- return this;
- }
-
- BuiltinFunctionId op() const { return op_; }
- const char* OpName() const;
-
- DECLARE_CONCRETE_INSTRUCTION(UnaryMathOperation, "unary_math_operation")
-
- protected:
- virtual bool DataEquals(HValue* other) {
- HUnaryMathOperation* b = HUnaryMathOperation::cast(other);
- return op_ == b->op();
- }
-
- private:
- BuiltinFunctionId op_;
-};
-
-
-class HLoadElements: public HUnaryOperation {
- public:
- explicit HLoadElements(HValue* value) : HUnaryOperation(value) {
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- SetFlag(kDependsOnMaps);
- }
-
- virtual Representation RequiredInputRepresentation(int index) const {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadElements, "load-elements")
-
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
-};
-
-
-class HLoadExternalArrayPointer: public HUnaryOperation {
- public:
- explicit HLoadExternalArrayPointer(HValue* value)
- : HUnaryOperation(value) {
- set_representation(Representation::External());
- // The result of this instruction is idempotent as long as its inputs don't
- // change. The external array of a specialized array elements object cannot
- // change once set, so it's no necessary to introduce any additional
- // dependencies on top of the inputs.
- SetFlag(kUseGVN);
- }
-
- virtual Representation RequiredInputRepresentation(int index) const {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadExternalArrayPointer,
- "load-external-array-pointer")
-
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
-};
-
-
-class HCheckMap: public HUnaryOperation {
- public:
- HCheckMap(HValue* value, Handle<Map> map)
- : HUnaryOperation(value), map_(map) {
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- SetFlag(kDependsOnMaps);
- }
-
- virtual bool IsCheckInstruction() const { return true; }
-
- virtual Representation RequiredInputRepresentation(int index) const {
- return Representation::Tagged();
- }
- virtual void PrintDataTo(StringStream* stream);
- virtual HType CalculateInferredType();
-
-#ifdef DEBUG
- virtual void Verify();
-#endif
-
- Handle<Map> map() const { return map_; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckMap, "check_map")
-
- protected:
- virtual bool DataEquals(HValue* other) {
- HCheckMap* b = HCheckMap::cast(other);
- return map_.is_identical_to(b->map());
- }
-
- private:
- Handle<Map> map_;
-};
-
-
-class HCheckFunction: public HUnaryOperation {
- public:
- HCheckFunction(HValue* value, Handle<JSFunction> function)
- : HUnaryOperation(value), target_(function) {
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- }
-
- virtual bool IsCheckInstruction() const { return true; }
-
- virtual Representation RequiredInputRepresentation(int index) const {
- return Representation::Tagged();
- }
- virtual void PrintDataTo(StringStream* stream);
- virtual HType CalculateInferredType();
-
-#ifdef DEBUG
- virtual void Verify();
-#endif
-
- Handle<JSFunction> target() const { return target_; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckFunction, "check_function")
-
- protected:
- virtual bool DataEquals(HValue* other) {
- HCheckFunction* b = HCheckFunction::cast(other);
- return target_.is_identical_to(b->target());
- }
-
- private:
- Handle<JSFunction> target_;
-};
-
-
-class HCheckInstanceType: public HUnaryOperation {
- public:
- // Check that the instance type is in the range [first, last] where
- // both first and last are included.
- HCheckInstanceType(HValue* value, InstanceType first, InstanceType last)
- : HUnaryOperation(value), first_(first), last_(last) {
- ASSERT(first <= last);
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- if ((FIRST_STRING_TYPE < first && last <= LAST_STRING_TYPE) ||
- (FIRST_STRING_TYPE <= first && last < LAST_STRING_TYPE)) {
- // A particular string instance type can change because of GC or
- // externalization, but the value still remains a string.
- SetFlag(kDependsOnMaps);
- }
- }
-
- virtual bool IsCheckInstruction() const { return true; }
-
- virtual Representation RequiredInputRepresentation(int index) const {
- return Representation::Tagged();
- }
-
-#ifdef DEBUG
- virtual void Verify();
-#endif
-
- static HCheckInstanceType* NewIsJSObjectOrJSFunction(HValue* value);
-
- InstanceType first() const { return first_; }
- InstanceType last() const { return last_; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check_instance_type")
-
- protected:
- // TODO(ager): It could be nice to allow the ommision of instance
- // type checks if we have already performed an instance type check
- // with a larger range.
- virtual bool DataEquals(HValue* other) {
- HCheckInstanceType* b = HCheckInstanceType::cast(other);
- return (first_ == b->first()) && (last_ == b->last());
- }
-
- private:
- InstanceType first_;
- InstanceType last_;
-};
-
-
-class HCheckNonSmi: public HUnaryOperation {
- public:
- explicit HCheckNonSmi(HValue* value) : HUnaryOperation(value) {
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- }
-
- virtual bool IsCheckInstruction() const { return true; }
-
- virtual Representation RequiredInputRepresentation(int index) const {
- return Representation::Tagged();
- }
-
- virtual HType CalculateInferredType();
-
-#ifdef DEBUG
- virtual void Verify();
-#endif
-
- DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check_non_smi")
-
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
-};
-
-
-class HCheckPrototypeMaps: public HTemplateInstruction<0> {
- public:
- HCheckPrototypeMaps(Handle<JSObject> prototype, Handle<JSObject> holder)
- : prototype_(prototype), holder_(holder) {
- SetFlag(kUseGVN);
- SetFlag(kDependsOnMaps);
- }
-
- virtual bool IsCheckInstruction() const { return true; }
-
-#ifdef DEBUG
- virtual void Verify();
-#endif
-
- Handle<JSObject> prototype() const { return prototype_; }
- Handle<JSObject> holder() const { return holder_; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckPrototypeMaps, "check_prototype_maps")
-
- virtual Representation RequiredInputRepresentation(int index) const {
- return Representation::None();
- }
-
- virtual intptr_t Hashcode() {
- ASSERT(!HEAP->IsAllocationAllowed());
- intptr_t hash = reinterpret_cast<intptr_t>(*prototype());
- hash = 17 * hash + reinterpret_cast<intptr_t>(*holder());
- return hash;
- }
-
- protected:
- virtual bool DataEquals(HValue* other) {
- HCheckPrototypeMaps* b = HCheckPrototypeMaps::cast(other);
- return prototype_.is_identical_to(b->prototype()) &&
- holder_.is_identical_to(b->holder());
- }
-
- private:
- Handle<JSObject> prototype_;
- Handle<JSObject> holder_;
-};
-
-
-class HCheckSmi: public HUnaryOperation {
- public:
- explicit HCheckSmi(HValue* value) : HUnaryOperation(value) {
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- }
-
- virtual bool IsCheckInstruction() const { return true; }
-
- virtual Representation RequiredInputRepresentation(int index) const {
- return Representation::Tagged();
- }
- virtual HType CalculateInferredType();
-
-#ifdef DEBUG
- virtual void Verify();
-#endif
-
- DECLARE_CONCRETE_INSTRUCTION(CheckSmi, "check_smi")
-
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
-};
-
-
-class HPhi: public HValue {
- public:
- explicit HPhi(int merged_index)
- : inputs_(2),
- merged_index_(merged_index),
- phi_id_(-1),
- is_live_(false) {
- for (int i = 0; i < Representation::kNumRepresentations; i++) {
- non_phi_uses_[i] = 0;
- indirect_uses_[i] = 0;
- }
- ASSERT(merged_index >= 0);
- set_representation(Representation::Tagged());
- SetFlag(kFlexibleRepresentation);
- }
-
- virtual Representation InferredRepresentation() {
- bool double_occurred = false;
- bool int32_occurred = false;
- for (int i = 0; i < OperandCount(); ++i) {
- HValue* value = OperandAt(i);
- if (value->representation().IsDouble()) double_occurred = true;
- if (value->representation().IsInteger32()) int32_occurred = true;
- if (value->representation().IsTagged()) return Representation::Tagged();
- }
-
- if (double_occurred) return Representation::Double();
- if (int32_occurred) return Representation::Integer32();
- return Representation::None();
- }
-
- virtual Range* InferRange();
- virtual Representation RequiredInputRepresentation(int index) const {
- return representation();
- }
- virtual HType CalculateInferredType();
- virtual int OperandCount() { return inputs_.length(); }
- virtual HValue* OperandAt(int index) { return inputs_[index]; }
- HValue* GetRedundantReplacement();
- void AddInput(HValue* value);
- bool HasRealUses();
-
- bool IsReceiver() { return merged_index_ == 0; }
-
- int merged_index() const { return merged_index_; }
-
- virtual const char* Mnemonic() const { return "phi"; }
-
- virtual void PrintTo(StringStream* stream);
-
-#ifdef DEBUG
- virtual void Verify();
-#endif
-
- DECLARE_INSTRUCTION(Phi)
-
- void InitRealUses(int id);
- void AddNonPhiUsesFrom(HPhi* other);
- void AddIndirectUsesTo(int* use_count);
-
- int tagged_non_phi_uses() const {
- return non_phi_uses_[Representation::kTagged];
- }
- int int32_non_phi_uses() const {
- return non_phi_uses_[Representation::kInteger32];
- }
- int double_non_phi_uses() const {
- return non_phi_uses_[Representation::kDouble];
- }
- int tagged_indirect_uses() const {
- return indirect_uses_[Representation::kTagged];
- }
- int int32_indirect_uses() const {
- return indirect_uses_[Representation::kInteger32];
- }
- int double_indirect_uses() const {
- return indirect_uses_[Representation::kDouble];
- }
- int phi_id() { return phi_id_; }
- bool is_live() { return is_live_; }
- void set_is_live(bool b) { is_live_ = b; }
-
- protected:
- virtual void DeleteFromGraph();
- virtual void InternalSetOperandAt(int index, HValue* value) {
- inputs_[index] = value;
- }
-
- private:
- ZoneList<HValue*> inputs_;
- int merged_index_;
-
- int non_phi_uses_[Representation::kNumRepresentations];
- int indirect_uses_[Representation::kNumRepresentations];
- int phi_id_;
- bool is_live_;
-};
-
-
-class HArgumentsObject: public HTemplateInstruction<0> {
- public:
- HArgumentsObject() {
- set_representation(Representation::Tagged());
- SetFlag(kIsArguments);
- }
-
- virtual Representation RequiredInputRepresentation(int index) const {
- return Representation::None();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(ArgumentsObject, "arguments-object")
-};
-
-
-class HConstant: public HTemplateInstruction<0> {
- public:
- HConstant(Handle<Object> handle, Representation r);
-
- Handle<Object> handle() const { return handle_; }
-
- bool InOldSpace() const { return !HEAP->InNewSpace(*handle_); }
-
- virtual Representation RequiredInputRepresentation(int index) const {
- return Representation::None();
- }
-
- virtual bool EmitAtUses() { return !representation().IsDouble(); }
- virtual void PrintDataTo(StringStream* stream);
- virtual HType CalculateInferredType();
- bool IsInteger() const { return handle_->IsSmi(); }
- HConstant* CopyToRepresentation(Representation r) const;
- HConstant* CopyToTruncatedInt32() const;
- bool HasInteger32Value() const { return has_int32_value_; }
- int32_t Integer32Value() const {
- ASSERT(HasInteger32Value());
- return int32_value_;
- }
- bool HasDoubleValue() const { return has_double_value_; }
- double DoubleValue() const {
- ASSERT(HasDoubleValue());
- return double_value_;
- }
- bool HasStringValue() const { return handle_->IsString(); }
-
- virtual intptr_t Hashcode() {
- ASSERT(!HEAP->allow_allocation(false));
- return reinterpret_cast<intptr_t>(*handle());
- }
-
-#ifdef DEBUG
- virtual void Verify() { }
-#endif
-
- DECLARE_CONCRETE_INSTRUCTION(Constant, "constant")
-
- protected:
- virtual Range* InferRange();
-
- virtual bool DataEquals(HValue* other) {
- HConstant* other_constant = HConstant::cast(other);
- return handle().is_identical_to(other_constant->handle());
- }
-
- private:
- Handle<Object> handle_;
- HType constant_type_;
-
- // The following two values represent the int32 and the double value of the
- // given constant if there is a lossless conversion between the constant
- // and the specific representation.
- bool has_int32_value_;
- int32_t int32_value_;
- bool has_double_value_;
- double double_value_;
-};
-
-
-class HBinaryOperation: public HTemplateInstruction<2> {
- public:
- HBinaryOperation(HValue* left, HValue* right) {
- ASSERT(left != NULL && right != NULL);
- SetOperandAt(0, left);
- SetOperandAt(1, right);
- }
-
- HValue* left() { return OperandAt(0); }
- HValue* right() { return OperandAt(1); }
-
- // TODO(kasperl): Move these helpers to the IA-32 Lithium
- // instruction sequence builder.
- HValue* LeastConstantOperand() {
- if (IsCommutative() && left()->IsConstant()) return right();
- return left();
- }
- HValue* MostConstantOperand() {
- if (IsCommutative() && left()->IsConstant()) return left();
- return right();
- }
-
- virtual bool IsCommutative() const { return false; }
-
- virtual void PrintDataTo(StringStream* stream);
-
- DECLARE_INSTRUCTION(BinaryOperation)
-};
-
-
-class HApplyArguments: public HTemplateInstruction<4> {
- public:
- HApplyArguments(HValue* function,
- HValue* receiver,
- HValue* length,
- HValue* elements) {
- set_representation(Representation::Tagged());
- SetOperandAt(0, function);
- SetOperandAt(1, receiver);
- SetOperandAt(2, length);
- SetOperandAt(3, elements);
- SetAllSideEffects();
- }
-
- virtual Representation RequiredInputRepresentation(int index) const {
- // The length is untagged, all other inputs are tagged.
- return (index == 2)
- ? Representation::Integer32()
- : Representation::Tagged();
- }
-
- HValue* function() { return OperandAt(0); }
- HValue* receiver() { return OperandAt(1); }
- HValue* length() { return OperandAt(2); }
- HValue* elements() { return OperandAt(3); }
-
- DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply_arguments")
-};
-
-
-class HArgumentsElements: public HTemplateInstruction<0> {
- public:
- HArgumentsElements() {
- // The value produced by this instruction is a pointer into the stack
- // that looks as if it was a smi because of alignment.
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- }
-
- DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments_elements")
-
- virtual Representation RequiredInputRepresentation(int index) const {
- return Representation::None();
- }
-
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
-};
-
-
-class HArgumentsLength: public HUnaryOperation {
- public:
- explicit HArgumentsLength(HValue* value) : HUnaryOperation(value) {
- set_representation(Representation::Integer32());
- SetFlag(kUseGVN);
- }
-
- virtual Representation RequiredInputRepresentation(int index) const {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments_length")
-
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
-};
-
-
-class HAccessArgumentsAt: public HTemplateInstruction<3> {
- public:
- HAccessArgumentsAt(HValue* arguments, HValue* length, HValue* index) {
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- SetOperandAt(0, arguments);
- SetOperandAt(1, length);
- SetOperandAt(2, index);
- }
-
- virtual void PrintDataTo(StringStream* stream);
-
- virtual Representation RequiredInputRepresentation(int index) const {
- // The arguments elements is considered tagged.
- return index == 0
- ? Representation::Tagged()
- : Representation::Integer32();
- }
-
- HValue* arguments() { return OperandAt(0); }
- HValue* length() { return OperandAt(1); }
- HValue* index() { return OperandAt(2); }
-
- DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access_arguments_at")
-
- virtual bool DataEquals(HValue* other) { return true; }
-};
-
-
-class HBoundsCheck: public HBinaryOperation {
- public:
- HBoundsCheck(HValue* index, HValue* length)
- : HBinaryOperation(index, length) {
- SetFlag(kUseGVN);
- }
-
- virtual bool IsCheckInstruction() const { return true; }
-
- virtual Representation RequiredInputRepresentation(int index) const {
- return Representation::Integer32();
- }
-
-#ifdef DEBUG
- virtual void Verify();
-#endif
-
- HValue* index() { return left(); }
- HValue* length() { return right(); }
-
- DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds_check")
-
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
-};
-
-
-class HBitwiseBinaryOperation: public HBinaryOperation {
- public:
- HBitwiseBinaryOperation(HValue* left, HValue* right)
- : HBinaryOperation(left, right) {
- set_representation(Representation::Tagged());
- SetFlag(kFlexibleRepresentation);
- SetAllSideEffects();
- }
-
- virtual Representation RequiredInputRepresentation(int index) const {
- return representation();
- }
-
- virtual void RepresentationChanged(Representation to) {
- if (!to.IsTagged()) {
- ASSERT(to.IsInteger32());
- ClearAllSideEffects();
- SetFlag(kTruncatingToInt32);
- SetFlag(kUseGVN);
- }
- }
-
- virtual HType CalculateInferredType();
-
- DECLARE_INSTRUCTION(BitwiseBinaryOperation)
-};
-
-
-class HArithmeticBinaryOperation: public HBinaryOperation {
- public:
- HArithmeticBinaryOperation(HValue* left, HValue* right)
- : HBinaryOperation(left, right) {
- set_representation(Representation::Tagged());
- SetFlag(kFlexibleRepresentation);
- SetAllSideEffects();
- }
-
- virtual void RepresentationChanged(Representation to) {
- if (!to.IsTagged()) {
- ClearAllSideEffects();
- SetFlag(kUseGVN);
- }
- }
-
- virtual HType CalculateInferredType();
- virtual Representation RequiredInputRepresentation(int index) const {
- return representation();
- }
- virtual Representation InferredRepresentation() {
- if (left()->representation().Equals(right()->representation())) {
- return left()->representation();
- }
- return HValue::InferredRepresentation();
- }
-
- DECLARE_INSTRUCTION(ArithmeticBinaryOperation)
-};
-
-
-class HCompare: public HBinaryOperation {
- public:
- HCompare(HValue* left, HValue* right, Token::Value token)
- : HBinaryOperation(left, right), token_(token) {
- ASSERT(Token::IsCompareOp(token));
- set_representation(Representation::Tagged());
- SetAllSideEffects();
- }
-
- void SetInputRepresentation(Representation r);
-
- virtual bool EmitAtUses() {
- return !HasSideEffects() && (uses()->length() <= 1);
- }
-
- virtual Representation RequiredInputRepresentation(int index) const {
- return input_representation_;
- }
- Representation GetInputRepresentation() const {
- return input_representation_;
- }
- Token::Value token() const { return token_; }
- virtual void PrintDataTo(StringStream* stream);
-
- virtual HType CalculateInferredType();
-
- virtual intptr_t Hashcode() {
- return HValue::Hashcode() * 7 + token_;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(Compare, "compare")
-
- protected:
- virtual bool DataEquals(HValue* other) {
- HCompare* comp = HCompare::cast(other);
- return token_ == comp->token();
- }
-
- private:
- Representation input_representation_;
- Token::Value token_;
-};
-
-
-class HCompareJSObjectEq: public HBinaryOperation {
- public:
- HCompareJSObjectEq(HValue* left, HValue* right)
- : HBinaryOperation(left, right) {
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- SetFlag(kDependsOnMaps);
- }
-
- virtual bool EmitAtUses() {
- return !HasSideEffects() && (uses()->length() <= 1);
- }
-
- virtual Representation RequiredInputRepresentation(int index) const {
- return Representation::Tagged();
- }
- virtual HType CalculateInferredType();
-
- DECLARE_CONCRETE_INSTRUCTION(CompareJSObjectEq, "compare-js-object-eq")
-
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
-};
-
-
-class HUnaryPredicate: public HUnaryOperation {
- public:
- explicit HUnaryPredicate(HValue* value) : HUnaryOperation(value) {
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- }
-
- virtual bool EmitAtUses() {
- return !HasSideEffects() && (uses()->length() <= 1);
- }
-
- virtual Representation RequiredInputRepresentation(int index) const {
- return Representation::Tagged();
- }
- virtual HType CalculateInferredType();
-};
-
-
-class HIsNull: public HUnaryPredicate {
- public:
- HIsNull(HValue* value, bool is_strict)
- : HUnaryPredicate(value), is_strict_(is_strict) { }
-
- bool is_strict() const { return is_strict_; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsNull, "is_null")
-
- protected:
- virtual bool DataEquals(HValue* other) {
- HIsNull* b = HIsNull::cast(other);
- return is_strict_ == b->is_strict();
- }
-
- private:
- bool is_strict_;
-};
-
-
-class HIsObject: public HUnaryPredicate {
- public:
- explicit HIsObject(HValue* value) : HUnaryPredicate(value) { }
-
- DECLARE_CONCRETE_INSTRUCTION(IsObject, "is_object")
-
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
-};
-
-
-class HIsSmi: public HUnaryPredicate {
- public:
- explicit HIsSmi(HValue* value) : HUnaryPredicate(value) { }
-
- DECLARE_CONCRETE_INSTRUCTION(IsSmi, "is_smi")
-
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
-};
-
-
-class HIsConstructCall: public HTemplateInstruction<0> {
- public:
- HIsConstructCall() {
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- }
-
- virtual bool EmitAtUses() {
- return !HasSideEffects() && (uses()->length() <= 1);
- }
-
- virtual Representation RequiredInputRepresentation(int index) const {
- return Representation::None();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(IsConstructCall, "is_construct_call")
-
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
-};
-
-
-class HHasInstanceType: public HUnaryPredicate {
- public:
- HHasInstanceType(HValue* value, InstanceType type)
- : HUnaryPredicate(value), from_(type), to_(type) { }
- HHasInstanceType(HValue* value, InstanceType from, InstanceType to)
- : HUnaryPredicate(value), from_(from), to_(to) {
- ASSERT(to == LAST_TYPE); // Others not implemented yet in backend.
- }
-
- InstanceType from() { return from_; }
- InstanceType to() { return to_; }
-
- virtual void PrintDataTo(StringStream* stream);
-
- DECLARE_CONCRETE_INSTRUCTION(HasInstanceType, "has_instance_type")
-
- protected:
- virtual bool DataEquals(HValue* other) {
- HHasInstanceType* b = HHasInstanceType::cast(other);
- return (from_ == b->from()) && (to_ == b->to());
- }
-
- private:
- InstanceType from_;
- InstanceType to_; // Inclusive range, not all combinations work.
-};
-
-
-class HHasCachedArrayIndex: public HUnaryPredicate {
- public:
- explicit HHasCachedArrayIndex(HValue* value) : HUnaryPredicate(value) { }
-
- DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndex, "has_cached_array_index")
-
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
-};
-
-
-class HGetCachedArrayIndex: public HUnaryPredicate {
- public:
- explicit HGetCachedArrayIndex(HValue* value) : HUnaryPredicate(value) { }
-
- DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get_cached_array_index")
-
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
-};
-
-
-class HClassOfTest: public HUnaryPredicate {
- public:
- HClassOfTest(HValue* value, Handle<String> class_name)
- : HUnaryPredicate(value), class_name_(class_name) { }
-
- DECLARE_CONCRETE_INSTRUCTION(ClassOfTest, "class_of_test")
-
- virtual void PrintDataTo(StringStream* stream);
-
- Handle<String> class_name() const { return class_name_; }
-
- protected:
- virtual bool DataEquals(HValue* other) {
- HClassOfTest* b = HClassOfTest::cast(other);
- return class_name_.is_identical_to(b->class_name_);
- }
-
- private:
- Handle<String> class_name_;
-};
-
-
-class HTypeofIs: public HUnaryPredicate {
- public:
- HTypeofIs(HValue* value, Handle<String> type_literal)
- : HUnaryPredicate(value), type_literal_(type_literal) { }
-
- Handle<String> type_literal() { return type_literal_; }
- virtual void PrintDataTo(StringStream* stream);
-
- DECLARE_CONCRETE_INSTRUCTION(TypeofIs, "typeof_is")
-
- protected:
- virtual bool DataEquals(HValue* other) {
- HTypeofIs* b = HTypeofIs::cast(other);
- return type_literal_.is_identical_to(b->type_literal_);
- }
-
- private:
- Handle<String> type_literal_;
-};
-
-
-class HInstanceOf: public HTemplateInstruction<3> {
- public:
- HInstanceOf(HValue* context, HValue* left, HValue* right) {
- SetOperandAt(0, context);
- SetOperandAt(1, left);
- SetOperandAt(2, right);
- set_representation(Representation::Tagged());
- SetAllSideEffects();
- }
-
- HValue* context() { return OperandAt(0); }
- HValue* left() { return OperandAt(1); }
- HValue* right() { return OperandAt(2); }
-
- virtual bool EmitAtUses() {
- return !HasSideEffects() && (uses()->length() <= 1);
- }
-
- virtual Representation RequiredInputRepresentation(int index) const {
- return Representation::Tagged();
- }
-
- virtual void PrintDataTo(StringStream* stream);
-
- DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance_of")
-};
-
-
-class HInstanceOfKnownGlobal: public HUnaryOperation {
- public:
- HInstanceOfKnownGlobal(HValue* left, Handle<JSFunction> right)
- : HUnaryOperation(left), function_(right) {
- set_representation(Representation::Tagged());
- SetAllSideEffects();
- }
-
- Handle<JSFunction> function() { return function_; }
-
- virtual Representation RequiredInputRepresentation(int index) const {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(InstanceOfKnownGlobal,
- "instance_of_known_global")
-
- private:
- Handle<JSFunction> function_;
-};
-
-
-class HPower: public HBinaryOperation {
- public:
- HPower(HValue* left, HValue* right)
- : HBinaryOperation(left, right) {
- set_representation(Representation::Double());
- SetFlag(kUseGVN);
- }
-
- virtual Representation RequiredInputRepresentation(int index) const {
- return (index == 1) ? Representation::None() : Representation::Double();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(Power, "power")
-
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
-};
-
-
-class HAdd: public HArithmeticBinaryOperation {
- public:
- HAdd(HValue* left, HValue* right) : HArithmeticBinaryOperation(left, right) {
- SetFlag(kCanOverflow);
- }
-
- // Add is only commutative if two integer values are added and not if two
- // tagged values are added (because it might be a String concatenation).
- virtual bool IsCommutative() const {
- return !representation().IsTagged();
- }
-
- virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
-
- virtual HType CalculateInferredType();
-
- DECLARE_CONCRETE_INSTRUCTION(Add, "add")
-
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
-
- virtual Range* InferRange();
-};
-
-
-class HSub: public HArithmeticBinaryOperation {
- public:
- HSub(HValue* left, HValue* right) : HArithmeticBinaryOperation(left, right) {
- SetFlag(kCanOverflow);
- }
-
- virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
-
- DECLARE_CONCRETE_INSTRUCTION(Sub, "sub")
-
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
-
- virtual Range* InferRange();
-};
-
-
-class HMul: public HArithmeticBinaryOperation {
- public:
- HMul(HValue* left, HValue* right) : HArithmeticBinaryOperation(left, right) {
- SetFlag(kCanOverflow);
- }
-
- virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
-
- // Only commutative if it is certain that not two objects are multiplicated.
- virtual bool IsCommutative() const {
- return !representation().IsTagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(Mul, "mul")
-
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
-
- virtual Range* InferRange();
-};
-
-
-class HMod: public HArithmeticBinaryOperation {
- public:
- HMod(HValue* left, HValue* right) : HArithmeticBinaryOperation(left, right) {
- SetFlag(kCanBeDivByZero);
- }
-
- bool HasPowerOf2Divisor() {
- if (right()->IsConstant() &&
- HConstant::cast(right())->HasInteger32Value()) {
- int32_t value = HConstant::cast(right())->Integer32Value();
- return value != 0 && (IsPowerOf2(value) || IsPowerOf2(-value));
- }
-
- return false;
- }
-
- virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
-
- DECLARE_CONCRETE_INSTRUCTION(Mod, "mod")
-
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
-
- virtual Range* InferRange();
-};
-
-
-class HDiv: public HArithmeticBinaryOperation {
- public:
- HDiv(HValue* left, HValue* right) : HArithmeticBinaryOperation(left, right) {
- SetFlag(kCanBeDivByZero);
- SetFlag(kCanOverflow);
- }
-
- virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
-
- DECLARE_CONCRETE_INSTRUCTION(Div, "div")
-
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
-
- virtual Range* InferRange();
-};
-
-
-class HBitAnd: public HBitwiseBinaryOperation {
- public:
- HBitAnd(HValue* left, HValue* right)
- : HBitwiseBinaryOperation(left, right) { }
-
- virtual bool IsCommutative() const { return true; }
- virtual HType CalculateInferredType();
-
- DECLARE_CONCRETE_INSTRUCTION(BitAnd, "bit_and")
-
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
-
- virtual Range* InferRange();
-};
-
-
-class HBitXor: public HBitwiseBinaryOperation {
- public:
- HBitXor(HValue* left, HValue* right)
- : HBitwiseBinaryOperation(left, right) { }
-
- virtual bool IsCommutative() const { return true; }
- virtual HType CalculateInferredType();
-
- DECLARE_CONCRETE_INSTRUCTION(BitXor, "bit_xor")
-
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
-};
-
-
-class HBitOr: public HBitwiseBinaryOperation {
- public:
- HBitOr(HValue* left, HValue* right)
- : HBitwiseBinaryOperation(left, right) { }
-
- virtual bool IsCommutative() const { return true; }
- virtual HType CalculateInferredType();
-
- DECLARE_CONCRETE_INSTRUCTION(BitOr, "bit_or")
-
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
-
- virtual Range* InferRange();
-};
-
-
-class HShl: public HBitwiseBinaryOperation {
- public:
- HShl(HValue* left, HValue* right)
- : HBitwiseBinaryOperation(left, right) { }
-
- virtual Range* InferRange();
- virtual HType CalculateInferredType();
-
- DECLARE_CONCRETE_INSTRUCTION(Shl, "shl")
-
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
-};
-
-
-class HShr: public HBitwiseBinaryOperation {
- public:
- HShr(HValue* left, HValue* right)
- : HBitwiseBinaryOperation(left, right) { }
-
- virtual HType CalculateInferredType();
-
- DECLARE_CONCRETE_INSTRUCTION(Shr, "shr")
-
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
-};
-
-
-class HSar: public HBitwiseBinaryOperation {
- public:
- HSar(HValue* left, HValue* right)
- : HBitwiseBinaryOperation(left, right) { }
-
- virtual Range* InferRange();
- virtual HType CalculateInferredType();
-
- DECLARE_CONCRETE_INSTRUCTION(Sar, "sar")
-
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
-};
-
-
-class HOsrEntry: public HTemplateInstruction<0> {
- public:
- explicit HOsrEntry(int ast_id) : ast_id_(ast_id) {
- SetFlag(kChangesOsrEntries);
- }
-
- int ast_id() const { return ast_id_; }
-
- virtual Representation RequiredInputRepresentation(int index) const {
- return Representation::None();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr_entry")
-
- private:
- int ast_id_;
-};
-
-
-class HParameter: public HTemplateInstruction<0> {
- public:
- explicit HParameter(unsigned index) : index_(index) {
- set_representation(Representation::Tagged());
- }
-
- unsigned index() const { return index_; }
-
- virtual void PrintDataTo(StringStream* stream);
-
- virtual Representation RequiredInputRepresentation(int index) const {
- return Representation::None();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
-
- private:
- unsigned index_;
-};
-
-
-class HCallStub: public HUnaryCall {
- public:
- HCallStub(HValue* context, CodeStub::Major major_key, int argument_count)
- : HUnaryCall(context, argument_count),
- major_key_(major_key),
- transcendental_type_(TranscendentalCache::kNumberOfCaches) {
- }
-
- CodeStub::Major major_key() { return major_key_; }
-
- HValue* context() { return value(); }
-
- void set_transcendental_type(TranscendentalCache::Type transcendental_type) {
- transcendental_type_ = transcendental_type;
- }
- TranscendentalCache::Type transcendental_type() {
- return transcendental_type_;
- }
-
- virtual void PrintDataTo(StringStream* stream);
-
- virtual Representation RequiredInputRepresentation(int index) const {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CallStub, "call_stub")
-
- private:
- CodeStub::Major major_key_;
- TranscendentalCache::Type transcendental_type_;
-};
-
-
-class HUnknownOSRValue: public HTemplateInstruction<0> {
- public:
- HUnknownOSRValue() { set_representation(Representation::Tagged()); }
-
- virtual Representation RequiredInputRepresentation(int index) const {
- return Representation::None();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown_osr_value")
-};
-
-
-class HLoadGlobalCell: public HTemplateInstruction<0> {
- public:
- HLoadGlobalCell(Handle<JSGlobalPropertyCell> cell, bool check_hole_value)
- : cell_(cell), check_hole_value_(check_hole_value) {
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- SetFlag(kDependsOnGlobalVars);
- }
-
- Handle<JSGlobalPropertyCell> cell() const { return cell_; }
- bool check_hole_value() const { return check_hole_value_; }
-
- virtual void PrintDataTo(StringStream* stream);
-
- virtual intptr_t Hashcode() {
- ASSERT(!HEAP->allow_allocation(false));
- return reinterpret_cast<intptr_t>(*cell_);
- }
-
- virtual Representation RequiredInputRepresentation(int index) const {
- return Representation::None();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load_global_cell")
-
- protected:
- virtual bool DataEquals(HValue* other) {
- HLoadGlobalCell* b = HLoadGlobalCell::cast(other);
- return cell_.is_identical_to(b->cell());
- }
-
- private:
- Handle<JSGlobalPropertyCell> cell_;
- bool check_hole_value_;
-};
-
-
-class HLoadGlobalGeneric: public HBinaryOperation {
- public:
- HLoadGlobalGeneric(HValue* context,
- HValue* global_object,
- Handle<Object> name,
- bool for_typeof)
- : HBinaryOperation(context, global_object),
- name_(name),
- for_typeof_(for_typeof) {
- set_representation(Representation::Tagged());
- SetAllSideEffects();
- }
-
- HValue* context() { return OperandAt(0); }
- HValue* global_object() { return OperandAt(1); }
- Handle<Object> name() const { return name_; }
- bool for_typeof() const { return for_typeof_; }
-
- virtual void PrintDataTo(StringStream* stream);
-
- virtual Representation RequiredInputRepresentation(int index) const {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load_global_generic")
-
- private:
- Handle<Object> name_;
- bool for_typeof_;
-};
-
-
-class HStoreGlobalCell: public HUnaryOperation {
- public:
- HStoreGlobalCell(HValue* value,
- Handle<JSGlobalPropertyCell> cell,
- bool check_hole_value)
- : HUnaryOperation(value),
- cell_(cell),
- check_hole_value_(check_hole_value) {
- SetFlag(kChangesGlobalVars);
- }
-
- Handle<JSGlobalPropertyCell> cell() const { return cell_; }
- bool check_hole_value() const { return check_hole_value_; }
-
- virtual Representation RequiredInputRepresentation(int index) const {
- return Representation::Tagged();
- }
- virtual void PrintDataTo(StringStream* stream);
-
- DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store_global_cell")
-
- private:
- Handle<JSGlobalPropertyCell> cell_;
- bool check_hole_value_;
-};
-
-
-class HStoreGlobalGeneric: public HTemplateInstruction<3> {
- public:
- HStoreGlobalGeneric(HValue* context,
- HValue* global_object,
- Handle<Object> name,
- HValue* value)
- : name_(name) {
- SetOperandAt(0, context);
- SetOperandAt(1, global_object);
- SetOperandAt(2, value);
- set_representation(Representation::Tagged());
- SetAllSideEffects();
- }
-
- HValue* context() { return OperandAt(0); }
- HValue* global_object() { return OperandAt(1); }
- Handle<Object> name() const { return name_; }
- HValue* value() { return OperandAt(2); }
-
- virtual void PrintDataTo(StringStream* stream);
-
- virtual Representation RequiredInputRepresentation(int index) const {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreGlobalGeneric, "store_global_generic")
-
- private:
- Handle<Object> name_;
-};
-
-
-class HLoadContextSlot: public HUnaryOperation {
- public:
- HLoadContextSlot(HValue* context , int slot_index)
- : HUnaryOperation(context), slot_index_(slot_index) {
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- SetFlag(kDependsOnContextSlots);
- }
-
- int slot_index() const { return slot_index_; }
-
- virtual Representation RequiredInputRepresentation(int index) const {
- return Representation::Tagged();
- }
-
- virtual void PrintDataTo(StringStream* stream);
-
- DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load_context_slot")
-
- protected:
- virtual bool DataEquals(HValue* other) {
- HLoadContextSlot* b = HLoadContextSlot::cast(other);
- return (slot_index() == b->slot_index());
- }
-
- private:
- int slot_index_;
-};
-
-
-static inline bool StoringValueNeedsWriteBarrier(HValue* value) {
- return !value->type().IsSmi() &&
- !(value->IsConstant() && HConstant::cast(value)->InOldSpace());
-}
-
-
-class HStoreContextSlot: public HBinaryOperation {
- public:
- HStoreContextSlot(HValue* context, int slot_index, HValue* value)
- : HBinaryOperation(context, value), slot_index_(slot_index) {
- SetFlag(kChangesContextSlots);
- }
-
- HValue* context() { return OperandAt(0); }
- HValue* value() { return OperandAt(1); }
- int slot_index() const { return slot_index_; }
-
- bool NeedsWriteBarrier() {
- return StoringValueNeedsWriteBarrier(value());
- }
-
- virtual Representation RequiredInputRepresentation(int index) const {
- return Representation::Tagged();
- }
-
- virtual void PrintDataTo(StringStream* stream);
-
- DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store_context_slot")
-
- private:
- int slot_index_;
-};
-
-
-class HLoadNamedField: public HUnaryOperation {
- public:
- HLoadNamedField(HValue* object, bool is_in_object, int offset)
- : HUnaryOperation(object),
- is_in_object_(is_in_object),
- offset_(offset) {
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- SetFlag(kDependsOnMaps);
- if (is_in_object) {
- SetFlag(kDependsOnInobjectFields);
- } else {
- SetFlag(kDependsOnBackingStoreFields);
- }
- }
-
- HValue* object() { return OperandAt(0); }
- bool is_in_object() const { return is_in_object_; }
- int offset() const { return offset_; }
-
- virtual Representation RequiredInputRepresentation(int index) const {
- return Representation::Tagged();
- }
- virtual void PrintDataTo(StringStream* stream);
-
- DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load_named_field")
-
- protected:
- virtual bool DataEquals(HValue* other) {
- HLoadNamedField* b = HLoadNamedField::cast(other);
- return is_in_object_ == b->is_in_object_ && offset_ == b->offset_;
- }
-
- private:
- bool is_in_object_;
- int offset_;
-};
-
-
-class HLoadNamedFieldPolymorphic: public HUnaryOperation {
- public:
- HLoadNamedFieldPolymorphic(HValue* object,
- ZoneMapList* types,
- Handle<String> name);
-
- HValue* object() { return OperandAt(0); }
- ZoneMapList* types() { return &types_; }
- Handle<String> name() { return name_; }
- bool need_generic() { return need_generic_; }
-
- virtual Representation RequiredInputRepresentation(int index) const {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadNamedFieldPolymorphic,
- "load_named_field_polymorphic")
-
- static const int kMaxLoadPolymorphism = 4;
-
- protected:
- virtual bool DataEquals(HValue* value);
-
- private:
- ZoneMapList types_;
- Handle<String> name_;
- bool need_generic_;
-};
-
-
-
-class HLoadNamedGeneric: public HBinaryOperation {
- public:
- HLoadNamedGeneric(HValue* context, HValue* object, Handle<Object> name)
- : HBinaryOperation(context, object), name_(name) {
- set_representation(Representation::Tagged());
- SetAllSideEffects();
- }
-
- HValue* context() { return OperandAt(0); }
- HValue* object() { return OperandAt(1); }
- Handle<Object> name() const { return name_; }
-
- virtual Representation RequiredInputRepresentation(int index) const {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load_named_generic")
-
- private:
- Handle<Object> name_;
-};
-
-
-class HLoadFunctionPrototype: public HUnaryOperation {
- public:
- explicit HLoadFunctionPrototype(HValue* function)
- : HUnaryOperation(function) {
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- SetFlag(kDependsOnCalls);
- }
-
- HValue* function() { return OperandAt(0); }
-
- virtual Representation RequiredInputRepresentation(int index) const {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype, "load_function_prototype")
-
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
-};
-
-
-class HLoadKeyedFastElement: public HBinaryOperation {
- public:
- HLoadKeyedFastElement(HValue* obj, HValue* key) : HBinaryOperation(obj, key) {
- set_representation(Representation::Tagged());
- SetFlag(kDependsOnArrayElements);
- SetFlag(kUseGVN);
- }
-
- HValue* object() { return OperandAt(0); }
- HValue* key() { return OperandAt(1); }
-
- virtual Representation RequiredInputRepresentation(int index) const {
- // The key is supposed to be Integer32.
- return (index == 1) ? Representation::Integer32()
- : Representation::Tagged();
- }
-
- virtual void PrintDataTo(StringStream* stream);
-
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastElement,
- "load_keyed_fast_element")
-
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
-};
-
-
-class HLoadKeyedSpecializedArrayElement: public HBinaryOperation {
- public:
- HLoadKeyedSpecializedArrayElement(HValue* external_elements,
- HValue* key,
- ExternalArrayType array_type)
- : HBinaryOperation(external_elements, key),
- array_type_(array_type) {
- if (array_type == kExternalFloatArray) {
- set_representation(Representation::Double());
- } else {
- set_representation(Representation::Integer32());
- }
- SetFlag(kDependsOnSpecializedArrayElements);
- // Native code could change the specialized array.
- SetFlag(kDependsOnCalls);
- SetFlag(kUseGVN);
- }
-
- virtual void PrintDataTo(StringStream* stream);
-
- virtual Representation RequiredInputRepresentation(int index) const {
- // The key is supposed to be Integer32, but the base pointer
- // for the element load is a naked pointer.
- return (index == 1) ? Representation::Integer32()
- : Representation::External();
- }
-
- HValue* external_pointer() { return OperandAt(0); }
- HValue* key() { return OperandAt(1); }
- ExternalArrayType array_type() const { return array_type_; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyedSpecializedArrayElement,
- "load_keyed_specialized_array_element")
-
- protected:
- virtual bool DataEquals(HValue* other) {
- if (!other->IsLoadKeyedSpecializedArrayElement()) return false;
- HLoadKeyedSpecializedArrayElement* cast_other =
- HLoadKeyedSpecializedArrayElement::cast(other);
- return array_type_ == cast_other->array_type();
- }
-
- private:
- ExternalArrayType array_type_;
-};
-
-
-class HLoadKeyedGeneric: public HTemplateInstruction<3> {
- public:
- HLoadKeyedGeneric(HContext* context, HValue* obj, HValue* key) {
- set_representation(Representation::Tagged());
- SetOperandAt(0, obj);
- SetOperandAt(1, key);
- SetOperandAt(2, context);
- SetAllSideEffects();
- }
-
- HValue* object() { return OperandAt(0); }
- HValue* key() { return OperandAt(1); }
- HValue* context() { return OperandAt(2); }
-
- virtual void PrintDataTo(StringStream* stream);
-
- virtual Representation RequiredInputRepresentation(int index) const {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load_keyed_generic")
-};
-
-
-class HStoreNamedField: public HBinaryOperation {
- public:
- HStoreNamedField(HValue* obj,
- Handle<String> name,
- HValue* val,
- bool in_object,
- int offset)
- : HBinaryOperation(obj, val),
- name_(name),
- is_in_object_(in_object),
- offset_(offset) {
- if (is_in_object_) {
- SetFlag(kChangesInobjectFields);
- } else {
- SetFlag(kChangesBackingStoreFields);
- }
- }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store_named_field")
-
- virtual Representation RequiredInputRepresentation(int index) const {
- return Representation::Tagged();
- }
- virtual void PrintDataTo(StringStream* stream);
-
- HValue* object() { return OperandAt(0); }
- HValue* value() { return OperandAt(1); }
-
- Handle<String> name() const { return name_; }
- bool is_in_object() const { return is_in_object_; }
- int offset() const { return offset_; }
- Handle<Map> transition() const { return transition_; }
- void set_transition(Handle<Map> map) { transition_ = map; }
-
- bool NeedsWriteBarrier() {
- return StoringValueNeedsWriteBarrier(value());
- }
-
- private:
- Handle<String> name_;
- bool is_in_object_;
- int offset_;
- Handle<Map> transition_;
-};
-
-
-class HStoreNamedGeneric: public HTemplateInstruction<3> {
- public:
- HStoreNamedGeneric(HValue* context,
- HValue* object,
- Handle<String> name,
- HValue* value)
- : name_(name) {
- SetOperandAt(0, object);
- SetOperandAt(1, value);
- SetOperandAt(2, context);
- SetAllSideEffects();
- }
-
- HValue* object() { return OperandAt(0); }
- HValue* value() { return OperandAt(1); }
- HValue* context() { return OperandAt(2); }
- Handle<String> name() { return name_; }
-
- virtual void PrintDataTo(StringStream* stream);
-
- virtual Representation RequiredInputRepresentation(int index) const {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store_named_generic")
-
- private:
- Handle<String> name_;
-};
-
-
-class HStoreKeyedFastElement: public HTemplateInstruction<3> {
- public:
- HStoreKeyedFastElement(HValue* obj, HValue* key, HValue* val) {
- SetOperandAt(0, obj);
- SetOperandAt(1, key);
- SetOperandAt(2, val);
- SetFlag(kChangesArrayElements);
- }
-
- virtual Representation RequiredInputRepresentation(int index) const {
- // The key is supposed to be Integer32.
- return (index == 1) ? Representation::Integer32()
- : Representation::Tagged();
- }
-
- HValue* object() { return OperandAt(0); }
- HValue* key() { return OperandAt(1); }
- HValue* value() { return OperandAt(2); }
-
- bool NeedsWriteBarrier() {
- return StoringValueNeedsWriteBarrier(value());
- }
-
- virtual void PrintDataTo(StringStream* stream);
-
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement,
- "store_keyed_fast_element")
-};
-
-
-class HStoreKeyedSpecializedArrayElement: public HTemplateInstruction<3> {
- public:
- HStoreKeyedSpecializedArrayElement(HValue* external_elements,
- HValue* key,
- HValue* val,
- ExternalArrayType array_type)
- : array_type_(array_type) {
- SetFlag(kChangesSpecializedArrayElements);
- SetOperandAt(0, external_elements);
- SetOperandAt(1, key);
- SetOperandAt(2, val);
- }
-
- virtual void PrintDataTo(StringStream* stream);
-
- virtual Representation RequiredInputRepresentation(int index) const {
- if (index == 0) {
- return Representation::External();
- } else {
- if (index == 2 && array_type() == kExternalFloatArray) {
- return Representation::Double();
- } else {
- return Representation::Integer32();
- }
- }
- }
-
- HValue* external_pointer() { return OperandAt(0); }
- HValue* key() { return OperandAt(1); }
- HValue* value() { return OperandAt(2); }
- ExternalArrayType array_type() const { return array_type_; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyedSpecializedArrayElement,
- "store_keyed_specialized_array_element")
- private:
- ExternalArrayType array_type_;
-};
-
-
-class HStoreKeyedGeneric: public HTemplateInstruction<4> {
- public:
- HStoreKeyedGeneric(HValue* context,
- HValue* object,
- HValue* key,
- HValue* value) {
- SetOperandAt(0, object);
- SetOperandAt(1, key);
- SetOperandAt(2, value);
- SetOperandAt(3, context);
- SetAllSideEffects();
- }
-
- HValue* object() { return OperandAt(0); }
- HValue* key() { return OperandAt(1); }
- HValue* value() { return OperandAt(2); }
- HValue* context() { return OperandAt(3); }
-
- virtual Representation RequiredInputRepresentation(int index) const {
- return Representation::Tagged();
- }
-
- virtual void PrintDataTo(StringStream* stream);
-
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store_keyed_generic")
-};
-
-
-class HStringCharCodeAt: public HBinaryOperation {
- public:
- HStringCharCodeAt(HValue* string, HValue* index)
- : HBinaryOperation(string, index) {
- set_representation(Representation::Integer32());
- SetFlag(kUseGVN);
- SetFlag(kDependsOnMaps);
- }
-
- virtual Representation RequiredInputRepresentation(int index) const {
- // The index is supposed to be Integer32.
- return (index == 1) ? Representation::Integer32()
- : Representation::Tagged();
- }
-
- HValue* string() { return OperandAt(0); }
- HValue* index() { return OperandAt(1); }
-
- DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string_char_code_at")
-
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
-
- virtual Range* InferRange() {
- return new Range(0, String::kMaxUC16CharCode);
- }
-};
-
-
-class HStringCharFromCode: public HUnaryOperation {
- public:
- explicit HStringCharFromCode(HValue* char_code) : HUnaryOperation(char_code) {
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- }
-
- virtual Representation RequiredInputRepresentation(int index) const {
- return Representation::Integer32();
- }
-
- virtual bool DataEquals(HValue* other) { return true; }
-
- DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string_char_from_code")
-};
-
-
-class HStringLength: public HUnaryOperation {
- public:
- explicit HStringLength(HValue* string) : HUnaryOperation(string) {
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- SetFlag(kDependsOnMaps);
- }
-
- virtual Representation RequiredInputRepresentation(int index) const {
- return Representation::Tagged();
- }
-
- virtual HType CalculateInferredType() {
- STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
- return HType::Smi();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(StringLength, "string_length")
-
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
-
- virtual Range* InferRange() {
- return new Range(0, String::kMaxLength);
- }
-};
-
-
-template <int V>
-class HMaterializedLiteral: public HTemplateInstruction<V> {
- public:
- HMaterializedLiteral<V>(int index, int depth)
- : literal_index_(index), depth_(depth) {
- this->set_representation(Representation::Tagged());
- }
-
- int literal_index() const { return literal_index_; }
- int depth() const { return depth_; }
-
- private:
- int literal_index_;
- int depth_;
-};
-
-
-class HArrayLiteral: public HMaterializedLiteral<0> {
- public:
- HArrayLiteral(Handle<FixedArray> constant_elements,
- int length,
- int literal_index,
- int depth)
- : HMaterializedLiteral<0>(literal_index, depth),
- length_(length),
- constant_elements_(constant_elements) {}
-
- Handle<FixedArray> constant_elements() const { return constant_elements_; }
- int length() const { return length_; }
-
- bool IsCopyOnWrite() const;
-
- virtual Representation RequiredInputRepresentation(int index) const {
- return Representation::None();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral, "array_literal")
-
- private:
- int length_;
- Handle<FixedArray> constant_elements_;
-};
-
-
-class HObjectLiteral: public HMaterializedLiteral<1> {
- public:
- HObjectLiteral(HValue* context,
- Handle<FixedArray> constant_properties,
- bool fast_elements,
- int literal_index,
- int depth,
- bool has_function)
- : HMaterializedLiteral<1>(literal_index, depth),
- constant_properties_(constant_properties),
- fast_elements_(fast_elements),
- has_function_(has_function) {
- SetOperandAt(0, context);
- }
-
- HValue* context() { return OperandAt(0); }
- Handle<FixedArray> constant_properties() const {
- return constant_properties_;
- }
- bool fast_elements() const { return fast_elements_; }
- bool has_function() const { return has_function_; }
-
- virtual Representation RequiredInputRepresentation(int index) const {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(ObjectLiteral, "object_literal")
-
- private:
- Handle<FixedArray> constant_properties_;
- bool fast_elements_;
- bool has_function_;
-};
-
-
-class HRegExpLiteral: public HMaterializedLiteral<0> {
- public:
- HRegExpLiteral(Handle<String> pattern,
- Handle<String> flags,
- int literal_index)
- : HMaterializedLiteral<0>(literal_index, 0),
- pattern_(pattern),
- flags_(flags) { }
-
- Handle<String> pattern() { return pattern_; }
- Handle<String> flags() { return flags_; }
-
- virtual Representation RequiredInputRepresentation(int index) const {
- return Representation::None();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp_literal")
-
- private:
- Handle<String> pattern_;
- Handle<String> flags_;
-};
-
-
-class HFunctionLiteral: public HTemplateInstruction<0> {
- public:
- HFunctionLiteral(Handle<SharedFunctionInfo> shared, bool pretenure)
- : shared_info_(shared), pretenure_(pretenure) {
- set_representation(Representation::Tagged());
- }
-
- virtual Representation RequiredInputRepresentation(int index) const {
- return Representation::None();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function_literal")
-
- Handle<SharedFunctionInfo> shared_info() const { return shared_info_; }
- bool pretenure() const { return pretenure_; }
-
- private:
- Handle<SharedFunctionInfo> shared_info_;
- bool pretenure_;
-};
-
-
-class HTypeof: public HUnaryOperation {
- public:
- explicit HTypeof(HValue* value) : HUnaryOperation(value) {
- set_representation(Representation::Tagged());
- }
-
- virtual Representation RequiredInputRepresentation(int index) const {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof")
-};
-
-
-class HToFastProperties: public HUnaryOperation {
- public:
- explicit HToFastProperties(HValue* value) : HUnaryOperation(value) {
- // This instruction is not marked as having side effects, but
- // changes the map of the input operand. Use it only when creating
- // object literals.
- ASSERT(value->IsObjectLiteral());
- set_representation(Representation::Tagged());
- }
-
- virtual Representation RequiredInputRepresentation(int index) const {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(ToFastProperties, "to_fast_properties")
-};
-
-
-class HValueOf: public HUnaryOperation {
- public:
- explicit HValueOf(HValue* value) : HUnaryOperation(value) {
- set_representation(Representation::Tagged());
- }
-
- virtual Representation RequiredInputRepresentation(int index) const {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(ValueOf, "value_of")
-};
-
-
-class HDeleteProperty: public HBinaryOperation {
- public:
- HDeleteProperty(HValue* obj, HValue* key)
- : HBinaryOperation(obj, key) {
- set_representation(Representation::Tagged());
- SetAllSideEffects();
- }
-
- virtual Representation RequiredInputRepresentation(int index) const {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(DeleteProperty, "delete_property")
-
- HValue* object() { return left(); }
- HValue* key() { return right(); }
-};
-
-#undef DECLARE_INSTRUCTION
-#undef DECLARE_CONCRETE_INSTRUCTION
-
-} } // namespace v8::internal
-
-#endif // V8_HYDROGEN_INSTRUCTIONS_H_
diff --git a/src/3rdparty/v8/src/hydrogen.cc b/src/3rdparty/v8/src/hydrogen.cc
deleted file mode 100644
index 99e206b..0000000
--- a/src/3rdparty/v8/src/hydrogen.cc
+++ /dev/null
@@ -1,5976 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-#include "hydrogen.h"
-
-#include "codegen.h"
-#include "data-flow.h"
-#include "full-codegen.h"
-#include "hashmap.h"
-#include "lithium-allocator.h"
-#include "parser.h"
-#include "scopes.h"
-#include "stub-cache.h"
-
-#if V8_TARGET_ARCH_IA32
-#include "ia32/lithium-codegen-ia32.h"
-#elif V8_TARGET_ARCH_X64
-#include "x64/lithium-codegen-x64.h"
-#elif V8_TARGET_ARCH_ARM
-#include "arm/lithium-codegen-arm.h"
-#elif V8_TARGET_ARCH_MIPS
-#include "mips/lithium-codegen-mips.h"
-#else
-#error Unsupported target architecture.
-#endif
-
-namespace v8 {
-namespace internal {
-
-HBasicBlock::HBasicBlock(HGraph* graph)
- : block_id_(graph->GetNextBlockID()),
- graph_(graph),
- phis_(4),
- first_(NULL),
- last_(NULL),
- end_(NULL),
- loop_information_(NULL),
- predecessors_(2),
- dominator_(NULL),
- dominated_blocks_(4),
- last_environment_(NULL),
- argument_count_(-1),
- first_instruction_index_(-1),
- last_instruction_index_(-1),
- deleted_phis_(4),
- parent_loop_header_(NULL),
- is_inline_return_target_(false) {
-}
-
-
-void HBasicBlock::AttachLoopInformation() {
- ASSERT(!IsLoopHeader());
- loop_information_ = new HLoopInformation(this);
-}
-
-
-void HBasicBlock::DetachLoopInformation() {
- ASSERT(IsLoopHeader());
- loop_information_ = NULL;
-}
-
-
-void HBasicBlock::AddPhi(HPhi* phi) {
- ASSERT(!IsStartBlock());
- phis_.Add(phi);
- phi->SetBlock(this);
-}
-
-
-void HBasicBlock::RemovePhi(HPhi* phi) {
- ASSERT(phi->block() == this);
- ASSERT(phis_.Contains(phi));
- ASSERT(phi->HasNoUses() || !phi->is_live());
- phi->ClearOperands();
- phis_.RemoveElement(phi);
- phi->SetBlock(NULL);
-}
-
-
-void HBasicBlock::AddInstruction(HInstruction* instr) {
- ASSERT(!IsStartBlock() || !IsFinished());
- ASSERT(!instr->IsLinked());
- ASSERT(!IsFinished());
- if (first_ == NULL) {
- HBlockEntry* entry = new HBlockEntry();
- entry->InitializeAsFirst(this);
- first_ = last_ = entry;
- }
- instr->InsertAfter(last_);
- last_ = instr;
-}
-
-
-HDeoptimize* HBasicBlock::CreateDeoptimize() {
- ASSERT(HasEnvironment());
- HEnvironment* environment = last_environment();
-
- HDeoptimize* instr = new HDeoptimize(environment->length());
-
- for (int i = 0; i < environment->length(); i++) {
- HValue* val = environment->values()->at(i);
- instr->AddEnvironmentValue(val);
- }
-
- return instr;
-}
-
-
-HSimulate* HBasicBlock::CreateSimulate(int id) {
- ASSERT(HasEnvironment());
- HEnvironment* environment = last_environment();
- ASSERT(id == AstNode::kNoNumber ||
- environment->closure()->shared()->VerifyBailoutId(id));
-
- int push_count = environment->push_count();
- int pop_count = environment->pop_count();
-
- HSimulate* instr = new HSimulate(id, pop_count);
- for (int i = push_count - 1; i >= 0; --i) {
- instr->AddPushedValue(environment->ExpressionStackAt(i));
- }
- for (int i = 0; i < environment->assigned_variables()->length(); ++i) {
- int index = environment->assigned_variables()->at(i);
- instr->AddAssignedValue(index, environment->Lookup(index));
- }
- environment->ClearHistory();
- return instr;
-}
-
-
-void HBasicBlock::Finish(HControlInstruction* end) {
- ASSERT(!IsFinished());
- AddInstruction(end);
- end_ = end;
- if (end->FirstSuccessor() != NULL) {
- end->FirstSuccessor()->RegisterPredecessor(this);
- if (end->SecondSuccessor() != NULL) {
- end->SecondSuccessor()->RegisterPredecessor(this);
- }
- }
-}
-
-
-void HBasicBlock::Goto(HBasicBlock* block, bool include_stack_check) {
- if (block->IsInlineReturnTarget()) {
- AddInstruction(new HLeaveInlined);
- last_environment_ = last_environment()->outer();
- }
- AddSimulate(AstNode::kNoNumber);
- HGoto* instr = new HGoto(block);
- instr->set_include_stack_check(include_stack_check);
- Finish(instr);
-}
-
-
-void HBasicBlock::AddLeaveInlined(HValue* return_value, HBasicBlock* target) {
- ASSERT(target->IsInlineReturnTarget());
- ASSERT(return_value != NULL);
- AddInstruction(new HLeaveInlined);
- last_environment_ = last_environment()->outer();
- last_environment()->Push(return_value);
- AddSimulate(AstNode::kNoNumber);
- HGoto* instr = new HGoto(target);
- Finish(instr);
-}
-
-
-void HBasicBlock::SetInitialEnvironment(HEnvironment* env) {
- ASSERT(!HasEnvironment());
- ASSERT(first() == NULL);
- UpdateEnvironment(env);
-}
-
-
-void HBasicBlock::SetJoinId(int id) {
- int length = predecessors_.length();
- ASSERT(length > 0);
- for (int i = 0; i < length; i++) {
- HBasicBlock* predecessor = predecessors_[i];
- ASSERT(predecessor->end()->IsGoto());
- HSimulate* simulate = HSimulate::cast(predecessor->end()->previous());
- // We only need to verify the ID once.
- ASSERT(i != 0 ||
- predecessor->last_environment()->closure()->shared()
- ->VerifyBailoutId(id));
- simulate->set_ast_id(id);
- }
-}
-
-
-bool HBasicBlock::Dominates(HBasicBlock* other) const {
- HBasicBlock* current = other->dominator();
- while (current != NULL) {
- if (current == this) return true;
- current = current->dominator();
- }
- return false;
-}
-
-
-void HBasicBlock::PostProcessLoopHeader(IterationStatement* stmt) {
- ASSERT(IsLoopHeader());
-
- SetJoinId(stmt->EntryId());
- if (predecessors()->length() == 1) {
- // This is a degenerated loop.
- DetachLoopInformation();
- return;
- }
-
- // Only the first entry into the loop is from outside the loop. All other
- // entries must be back edges.
- for (int i = 1; i < predecessors()->length(); ++i) {
- loop_information()->RegisterBackEdge(predecessors()->at(i));
- }
-}
-
-
-void HBasicBlock::RegisterPredecessor(HBasicBlock* pred) {
- if (!predecessors_.is_empty()) {
- // Only loop header blocks can have a predecessor added after
- // instructions have been added to the block (they have phis for all
- // values in the environment, these phis may be eliminated later).
- ASSERT(IsLoopHeader() || first_ == NULL);
- HEnvironment* incoming_env = pred->last_environment();
- if (IsLoopHeader()) {
- ASSERT(phis()->length() == incoming_env->length());
- for (int i = 0; i < phis_.length(); ++i) {
- phis_[i]->AddInput(incoming_env->values()->at(i));
- }
- } else {
- last_environment()->AddIncomingEdge(this, pred->last_environment());
- }
- } else if (!HasEnvironment() && !IsFinished()) {
- ASSERT(!IsLoopHeader());
- SetInitialEnvironment(pred->last_environment()->Copy());
- }
-
- predecessors_.Add(pred);
-}
-
-
-void HBasicBlock::AddDominatedBlock(HBasicBlock* block) {
- ASSERT(!dominated_blocks_.Contains(block));
- // Keep the list of dominated blocks sorted such that if there is two
- // succeeding block in this list, the predecessor is before the successor.
- int index = 0;
- while (index < dominated_blocks_.length() &&
- dominated_blocks_[index]->block_id() < block->block_id()) {
- ++index;
- }
- dominated_blocks_.InsertAt(index, block);
-}
-
-
-void HBasicBlock::AssignCommonDominator(HBasicBlock* other) {
- if (dominator_ == NULL) {
- dominator_ = other;
- other->AddDominatedBlock(this);
- } else if (other->dominator() != NULL) {
- HBasicBlock* first = dominator_;
- HBasicBlock* second = other;
-
- while (first != second) {
- if (first->block_id() > second->block_id()) {
- first = first->dominator();
- } else {
- second = second->dominator();
- }
- ASSERT(first != NULL && second != NULL);
- }
-
- if (dominator_ != first) {
- ASSERT(dominator_->dominated_blocks_.Contains(this));
- dominator_->dominated_blocks_.RemoveElement(this);
- dominator_ = first;
- first->AddDominatedBlock(this);
- }
- }
-}
-
-
-int HBasicBlock::PredecessorIndexOf(HBasicBlock* predecessor) const {
- for (int i = 0; i < predecessors_.length(); ++i) {
- if (predecessors_[i] == predecessor) return i;
- }
- UNREACHABLE();
- return -1;
-}
-
-
-#ifdef DEBUG
-void HBasicBlock::Verify() {
- // Check that every block is finished.
- ASSERT(IsFinished());
- ASSERT(block_id() >= 0);
-
- // Check that the incoming edges are in edge split form.
- if (predecessors_.length() > 1) {
- for (int i = 0; i < predecessors_.length(); ++i) {
- ASSERT(predecessors_[i]->end()->SecondSuccessor() == NULL);
- }
- }
-}
-#endif
-
-
-void HLoopInformation::RegisterBackEdge(HBasicBlock* block) {
- this->back_edges_.Add(block);
- AddBlock(block);
-}
-
-
-HBasicBlock* HLoopInformation::GetLastBackEdge() const {
- int max_id = -1;
- HBasicBlock* result = NULL;
- for (int i = 0; i < back_edges_.length(); ++i) {
- HBasicBlock* cur = back_edges_[i];
- if (cur->block_id() > max_id) {
- max_id = cur->block_id();
- result = cur;
- }
- }
- return result;
-}
-
-
-void HLoopInformation::AddBlock(HBasicBlock* block) {
- if (block == loop_header()) return;
- if (block->parent_loop_header() == loop_header()) return;
- if (block->parent_loop_header() != NULL) {
- AddBlock(block->parent_loop_header());
- } else {
- block->set_parent_loop_header(loop_header());
- blocks_.Add(block);
- for (int i = 0; i < block->predecessors()->length(); ++i) {
- AddBlock(block->predecessors()->at(i));
- }
- }
-}
-
-
-#ifdef DEBUG
-
-// Checks reachability of the blocks in this graph and stores a bit in
-// the BitVector "reachable()" for every block that can be reached
-// from the start block of the graph. If "dont_visit" is non-null, the given
-// block is treated as if it would not be part of the graph. "visited_count()"
-// returns the number of reachable blocks.
-class ReachabilityAnalyzer BASE_EMBEDDED {
- public:
- ReachabilityAnalyzer(HBasicBlock* entry_block,
- int block_count,
- HBasicBlock* dont_visit)
- : visited_count_(0),
- stack_(16),
- reachable_(block_count),
- dont_visit_(dont_visit) {
- PushBlock(entry_block);
- Analyze();
- }
-
- int visited_count() const { return visited_count_; }
- const BitVector* reachable() const { return &reachable_; }
-
- private:
- void PushBlock(HBasicBlock* block) {
- if (block != NULL && block != dont_visit_ &&
- !reachable_.Contains(block->block_id())) {
- reachable_.Add(block->block_id());
- stack_.Add(block);
- visited_count_++;
- }
- }
-
- void Analyze() {
- while (!stack_.is_empty()) {
- HControlInstruction* end = stack_.RemoveLast()->end();
- PushBlock(end->FirstSuccessor());
- PushBlock(end->SecondSuccessor());
- }
- }
-
- int visited_count_;
- ZoneList<HBasicBlock*> stack_;
- BitVector reachable_;
- HBasicBlock* dont_visit_;
-};
-
-
-void HGraph::Verify() const {
- for (int i = 0; i < blocks_.length(); i++) {
- HBasicBlock* block = blocks_.at(i);
-
- block->Verify();
-
- // Check that every block contains at least one node and that only the last
- // node is a control instruction.
- HInstruction* current = block->first();
- ASSERT(current != NULL && current->IsBlockEntry());
- while (current != NULL) {
- ASSERT((current->next() == NULL) == current->IsControlInstruction());
- ASSERT(current->block() == block);
- current->Verify();
- current = current->next();
- }
-
- // Check that successors are correctly set.
- HBasicBlock* first = block->end()->FirstSuccessor();
- HBasicBlock* second = block->end()->SecondSuccessor();
- ASSERT(second == NULL || first != NULL);
-
- // Check that the predecessor array is correct.
- if (first != NULL) {
- ASSERT(first->predecessors()->Contains(block));
- if (second != NULL) {
- ASSERT(second->predecessors()->Contains(block));
- }
- }
-
- // Check that phis have correct arguments.
- for (int j = 0; j < block->phis()->length(); j++) {
- HPhi* phi = block->phis()->at(j);
- phi->Verify();
- }
-
- // Check that all join blocks have predecessors that end with an
- // unconditional goto and agree on their environment node id.
- if (block->predecessors()->length() >= 2) {
- int id = block->predecessors()->first()->last_environment()->ast_id();
- for (int k = 0; k < block->predecessors()->length(); k++) {
- HBasicBlock* predecessor = block->predecessors()->at(k);
- ASSERT(predecessor->end()->IsGoto());
- ASSERT(predecessor->last_environment()->ast_id() == id);
- }
- }
- }
-
- // Check special property of first block to have no predecessors.
- ASSERT(blocks_.at(0)->predecessors()->is_empty());
-
- // Check that the graph is fully connected.
- ReachabilityAnalyzer analyzer(entry_block_, blocks_.length(), NULL);
- ASSERT(analyzer.visited_count() == blocks_.length());
-
- // Check that entry block dominator is NULL.
- ASSERT(entry_block_->dominator() == NULL);
-
- // Check dominators.
- for (int i = 0; i < blocks_.length(); ++i) {
- HBasicBlock* block = blocks_.at(i);
- if (block->dominator() == NULL) {
- // Only start block may have no dominator assigned to.
- ASSERT(i == 0);
- } else {
- // Assert that block is unreachable if dominator must not be visited.
- ReachabilityAnalyzer dominator_analyzer(entry_block_,
- blocks_.length(),
- block->dominator());
- ASSERT(!dominator_analyzer.reachable()->Contains(block->block_id()));
- }
- }
-}
-
-#endif
-
-
-HConstant* HGraph::GetConstant(SetOncePointer<HConstant>* pointer,
- Object* value) {
- if (!pointer->is_set()) {
- HConstant* constant = new HConstant(Handle<Object>(value),
- Representation::Tagged());
- constant->InsertAfter(GetConstantUndefined());
- pointer->set(constant);
- }
- return pointer->get();
-}
-
-
-HConstant* HGraph::GetConstant1() {
- return GetConstant(&constant_1_, Smi::FromInt(1));
-}
-
-
-HConstant* HGraph::GetConstantMinus1() {
- return GetConstant(&constant_minus1_, Smi::FromInt(-1));
-}
-
-
-HConstant* HGraph::GetConstantTrue() {
- return GetConstant(&constant_true_, isolate()->heap()->true_value());
-}
-
-
-HConstant* HGraph::GetConstantFalse() {
- return GetConstant(&constant_false_, isolate()->heap()->false_value());
-}
-
-
-HBasicBlock* HGraphBuilder::CreateJoin(HBasicBlock* first,
- HBasicBlock* second,
- int join_id) {
- if (first == NULL) {
- return second;
- } else if (second == NULL) {
- return first;
- } else {
- HBasicBlock* join_block = graph_->CreateBasicBlock();
- first->Goto(join_block);
- second->Goto(join_block);
- join_block->SetJoinId(join_id);
- return join_block;
- }
-}
-
-
-HBasicBlock* HGraphBuilder::JoinContinue(IterationStatement* statement,
- HBasicBlock* exit_block,
- HBasicBlock* continue_block) {
- if (continue_block != NULL) {
- if (exit_block != NULL) exit_block->Goto(continue_block);
- continue_block->SetJoinId(statement->ContinueId());
- return continue_block;
- }
- return exit_block;
-}
-
-
-HBasicBlock* HGraphBuilder::CreateLoop(IterationStatement* statement,
- HBasicBlock* loop_entry,
- HBasicBlock* body_exit,
- HBasicBlock* loop_successor,
- HBasicBlock* break_block) {
- if (body_exit != NULL) body_exit->Goto(loop_entry, true);
- loop_entry->PostProcessLoopHeader(statement);
- if (break_block != NULL) {
- if (loop_successor != NULL) loop_successor->Goto(break_block);
- break_block->SetJoinId(statement->ExitId());
- return break_block;
- }
- return loop_successor;
-}
-
-
-void HBasicBlock::FinishExit(HControlInstruction* instruction) {
- Finish(instruction);
- ClearEnvironment();
-}
-
-
-HGraph::HGraph(CompilationInfo* info)
- : isolate_(info->isolate()),
- next_block_id_(0),
- entry_block_(NULL),
- blocks_(8),
- values_(16),
- phi_list_(NULL) {
- start_environment_ = new HEnvironment(NULL, info->scope(), info->closure());
- start_environment_->set_ast_id(info->function()->id());
- entry_block_ = CreateBasicBlock();
- entry_block_->SetInitialEnvironment(start_environment_);
-}
-
-
-Handle<Code> HGraph::Compile(CompilationInfo* info) {
- int values = GetMaximumValueID();
- if (values > LAllocator::max_initial_value_ids()) {
- if (FLAG_trace_bailout) PrintF("Function is too big\n");
- return Handle<Code>::null();
- }
-
- LAllocator allocator(values, this);
- LChunkBuilder builder(info, this, &allocator);
- LChunk* chunk = builder.Build();
- if (chunk == NULL) return Handle<Code>::null();
-
- if (!FLAG_alloc_lithium) return Handle<Code>::null();
-
- allocator.Allocate(chunk);
-
- if (!FLAG_use_lithium) return Handle<Code>::null();
-
- MacroAssembler assembler(info->isolate(), NULL, 0);
- LCodeGen generator(chunk, &assembler, info);
-
- if (FLAG_eliminate_empty_blocks) {
- chunk->MarkEmptyBlocks();
- }
-
- if (generator.GenerateCode()) {
- if (FLAG_trace_codegen) {
- PrintF("Crankshaft Compiler - ");
- }
- CodeGenerator::MakeCodePrologue(info);
- Code::Flags flags =
- Code::ComputeFlags(Code::OPTIMIZED_FUNCTION, NOT_IN_LOOP);
- Handle<Code> code =
- CodeGenerator::MakeCodeEpilogue(&assembler, flags, info);
- generator.FinishCode(code);
- CodeGenerator::PrintCode(code, info);
- return code;
- }
- return Handle<Code>::null();
-}
-
-
-HBasicBlock* HGraph::CreateBasicBlock() {
- HBasicBlock* result = new HBasicBlock(this);
- blocks_.Add(result);
- return result;
-}
-
-
-void HGraph::Canonicalize() {
- if (!FLAG_use_canonicalizing) return;
- HPhase phase("Canonicalize", this);
- for (int i = 0; i < blocks()->length(); ++i) {
- HInstruction* instr = blocks()->at(i)->first();
- while (instr != NULL) {
- HValue* value = instr->Canonicalize();
- if (value != instr) instr->ReplaceAndDelete(value);
- instr = instr->next();
- }
- }
-}
-
-
-void HGraph::OrderBlocks() {
- HPhase phase("Block ordering");
- BitVector visited(blocks_.length());
-
- ZoneList<HBasicBlock*> reverse_result(8);
- HBasicBlock* start = blocks_[0];
- Postorder(start, &visited, &reverse_result, NULL);
-
- blocks_.Rewind(0);
- int index = 0;
- for (int i = reverse_result.length() - 1; i >= 0; --i) {
- HBasicBlock* b = reverse_result[i];
- blocks_.Add(b);
- b->set_block_id(index++);
- }
-}
-
-
-void HGraph::PostorderLoopBlocks(HLoopInformation* loop,
- BitVector* visited,
- ZoneList<HBasicBlock*>* order,
- HBasicBlock* loop_header) {
- for (int i = 0; i < loop->blocks()->length(); ++i) {
- HBasicBlock* b = loop->blocks()->at(i);
- Postorder(b->end()->SecondSuccessor(), visited, order, loop_header);
- Postorder(b->end()->FirstSuccessor(), visited, order, loop_header);
- if (b->IsLoopHeader() && b != loop->loop_header()) {
- PostorderLoopBlocks(b->loop_information(), visited, order, loop_header);
- }
- }
-}
-
-
-void HGraph::Postorder(HBasicBlock* block,
- BitVector* visited,
- ZoneList<HBasicBlock*>* order,
- HBasicBlock* loop_header) {
- if (block == NULL || visited->Contains(block->block_id())) return;
- if (block->parent_loop_header() != loop_header) return;
- visited->Add(block->block_id());
- if (block->IsLoopHeader()) {
- PostorderLoopBlocks(block->loop_information(), visited, order, loop_header);
- Postorder(block->end()->SecondSuccessor(), visited, order, block);
- Postorder(block->end()->FirstSuccessor(), visited, order, block);
- } else {
- Postorder(block->end()->SecondSuccessor(), visited, order, loop_header);
- Postorder(block->end()->FirstSuccessor(), visited, order, loop_header);
- }
- ASSERT(block->end()->FirstSuccessor() == NULL ||
- order->Contains(block->end()->FirstSuccessor()) ||
- block->end()->FirstSuccessor()->IsLoopHeader());
- ASSERT(block->end()->SecondSuccessor() == NULL ||
- order->Contains(block->end()->SecondSuccessor()) ||
- block->end()->SecondSuccessor()->IsLoopHeader());
- order->Add(block);
-}
-
-
-void HGraph::AssignDominators() {
- HPhase phase("Assign dominators", this);
- for (int i = 0; i < blocks_.length(); ++i) {
- if (blocks_[i]->IsLoopHeader()) {
- blocks_[i]->AssignCommonDominator(blocks_[i]->predecessors()->first());
- } else {
- for (int j = 0; j < blocks_[i]->predecessors()->length(); ++j) {
- blocks_[i]->AssignCommonDominator(blocks_[i]->predecessors()->at(j));
- }
- }
- }
-}
-
-
-void HGraph::EliminateRedundantPhis() {
- HPhase phase("Redundant phi elimination", this);
-
- // Worklist of phis that can potentially be eliminated. Initialized
- // with all phi nodes. When elimination of a phi node modifies
- // another phi node the modified phi node is added to the worklist.
- ZoneList<HPhi*> worklist(blocks_.length());
- for (int i = 0; i < blocks_.length(); ++i) {
- worklist.AddAll(*blocks_[i]->phis());
- }
-
- while (!worklist.is_empty()) {
- HPhi* phi = worklist.RemoveLast();
- HBasicBlock* block = phi->block();
-
- // Skip phi node if it was already replaced.
- if (block == NULL) continue;
-
- // Get replacement value if phi is redundant.
- HValue* value = phi->GetRedundantReplacement();
-
- if (value != NULL) {
- // Iterate through uses finding the ones that should be
- // replaced.
- SmallPointerList<HValue>* uses = phi->uses();
- while (!uses->is_empty()) {
- HValue* use = uses->RemoveLast();
- if (use != NULL) {
- phi->ReplaceAtUse(use, value);
- if (use->IsPhi()) worklist.Add(HPhi::cast(use));
- }
- }
- block->RemovePhi(phi);
- }
- }
-}
-
-
-void HGraph::EliminateUnreachablePhis() {
- HPhase phase("Unreachable phi elimination", this);
-
- // Initialize worklist.
- ZoneList<HPhi*> phi_list(blocks_.length());
- ZoneList<HPhi*> worklist(blocks_.length());
- for (int i = 0; i < blocks_.length(); ++i) {
- for (int j = 0; j < blocks_[i]->phis()->length(); j++) {
- HPhi* phi = blocks_[i]->phis()->at(j);
- phi_list.Add(phi);
- // We can't eliminate phis in the receiver position in the environment
- // because in case of throwing an error we need this value to
- // construct a stack trace.
- if (phi->HasRealUses() || phi->IsReceiver()) {
- phi->set_is_live(true);
- worklist.Add(phi);
- }
- }
- }
-
- // Iteratively mark live phis.
- while (!worklist.is_empty()) {
- HPhi* phi = worklist.RemoveLast();
- for (int i = 0; i < phi->OperandCount(); i++) {
- HValue* operand = phi->OperandAt(i);
- if (operand->IsPhi() && !HPhi::cast(operand)->is_live()) {
- HPhi::cast(operand)->set_is_live(true);
- worklist.Add(HPhi::cast(operand));
- }
- }
- }
-
- // Remove unreachable phis.
- for (int i = 0; i < phi_list.length(); i++) {
- HPhi* phi = phi_list[i];
- if (!phi->is_live()) {
- HBasicBlock* block = phi->block();
- block->RemovePhi(phi);
- block->RecordDeletedPhi(phi->merged_index());
- }
- }
-}
-
-
-bool HGraph::CollectPhis() {
- int block_count = blocks_.length();
- phi_list_ = new ZoneList<HPhi*>(block_count);
- for (int i = 0; i < block_count; ++i) {
- for (int j = 0; j < blocks_[i]->phis()->length(); ++j) {
- HPhi* phi = blocks_[i]->phis()->at(j);
- phi_list_->Add(phi);
- // We don't support phi uses of arguments for now.
- if (phi->CheckFlag(HValue::kIsArguments)) return false;
- }
- }
- return true;
-}
-
-
-void HGraph::InferTypes(ZoneList<HValue*>* worklist) {
- BitVector in_worklist(GetMaximumValueID());
- for (int i = 0; i < worklist->length(); ++i) {
- ASSERT(!in_worklist.Contains(worklist->at(i)->id()));
- in_worklist.Add(worklist->at(i)->id());
- }
-
- while (!worklist->is_empty()) {
- HValue* current = worklist->RemoveLast();
- in_worklist.Remove(current->id());
- if (current->UpdateInferredType()) {
- for (int j = 0; j < current->uses()->length(); j++) {
- HValue* use = current->uses()->at(j);
- if (!in_worklist.Contains(use->id())) {
- in_worklist.Add(use->id());
- worklist->Add(use);
- }
- }
- }
- }
-}
-
-
-class HRangeAnalysis BASE_EMBEDDED {
- public:
- explicit HRangeAnalysis(HGraph* graph) : graph_(graph), changed_ranges_(16) {}
-
- void Analyze();
-
- private:
- void TraceRange(const char* msg, ...);
- void Analyze(HBasicBlock* block);
- void InferControlFlowRange(HTest* test, HBasicBlock* dest);
- void InferControlFlowRange(Token::Value op, HValue* value, HValue* other);
- void InferPhiRange(HPhi* phi);
- void InferRange(HValue* value);
- void RollBackTo(int index);
- void AddRange(HValue* value, Range* range);
-
- HGraph* graph_;
- ZoneList<HValue*> changed_ranges_;
-};
-
-
-void HRangeAnalysis::TraceRange(const char* msg, ...) {
- if (FLAG_trace_range) {
- va_list arguments;
- va_start(arguments, msg);
- OS::VPrint(msg, arguments);
- va_end(arguments);
- }
-}
-
-
-void HRangeAnalysis::Analyze() {
- HPhase phase("Range analysis", graph_);
- Analyze(graph_->blocks()->at(0));
-}
-
-
-void HRangeAnalysis::Analyze(HBasicBlock* block) {
- TraceRange("Analyzing block B%d\n", block->block_id());
-
- int last_changed_range = changed_ranges_.length() - 1;
-
- // Infer range based on control flow.
- if (block->predecessors()->length() == 1) {
- HBasicBlock* pred = block->predecessors()->first();
- if (pred->end()->IsTest()) {
- InferControlFlowRange(HTest::cast(pred->end()), block);
- }
- }
-
- // Process phi instructions.
- for (int i = 0; i < block->phis()->length(); ++i) {
- HPhi* phi = block->phis()->at(i);
- InferPhiRange(phi);
- }
-
- // Go through all instructions of the current block.
- HInstruction* instr = block->first();
- while (instr != block->end()) {
- InferRange(instr);
- instr = instr->next();
- }
-
- // Continue analysis in all dominated blocks.
- for (int i = 0; i < block->dominated_blocks()->length(); ++i) {
- Analyze(block->dominated_blocks()->at(i));
- }
-
- RollBackTo(last_changed_range);
-}
-
-
-void HRangeAnalysis::InferControlFlowRange(HTest* test, HBasicBlock* dest) {
- ASSERT((test->FirstSuccessor() == dest) == (test->SecondSuccessor() != dest));
- if (test->value()->IsCompare()) {
- HCompare* compare = HCompare::cast(test->value());
- if (compare->GetInputRepresentation().IsInteger32()) {
- Token::Value op = compare->token();
- if (test->SecondSuccessor() == dest) {
- op = Token::NegateCompareOp(op);
- }
- Token::Value inverted_op = Token::InvertCompareOp(op);
- InferControlFlowRange(op, compare->left(), compare->right());
- InferControlFlowRange(inverted_op, compare->right(), compare->left());
- }
- }
-}
-
-
-// We know that value [op] other. Use this information to update the range on
-// value.
-void HRangeAnalysis::InferControlFlowRange(Token::Value op,
- HValue* value,
- HValue* other) {
- Range temp_range;
- Range* range = other->range() != NULL ? other->range() : &temp_range;
- Range* new_range = NULL;
-
- TraceRange("Control flow range infer %d %s %d\n",
- value->id(),
- Token::Name(op),
- other->id());
-
- if (op == Token::EQ || op == Token::EQ_STRICT) {
- // The same range has to apply for value.
- new_range = range->Copy();
- } else if (op == Token::LT || op == Token::LTE) {
- new_range = range->CopyClearLower();
- if (op == Token::LT) {
- new_range->AddConstant(-1);
- }
- } else if (op == Token::GT || op == Token::GTE) {
- new_range = range->CopyClearUpper();
- if (op == Token::GT) {
- new_range->AddConstant(1);
- }
- }
-
- if (new_range != NULL && !new_range->IsMostGeneric()) {
- AddRange(value, new_range);
- }
-}
-
-
-void HRangeAnalysis::InferPhiRange(HPhi* phi) {
- // TODO(twuerthinger): Infer loop phi ranges.
- InferRange(phi);
-}
-
-
-void HRangeAnalysis::InferRange(HValue* value) {
- ASSERT(!value->HasRange());
- if (!value->representation().IsNone()) {
- value->ComputeInitialRange();
- Range* range = value->range();
- TraceRange("Initial inferred range of %d (%s) set to [%d,%d]\n",
- value->id(),
- value->Mnemonic(),
- range->lower(),
- range->upper());
- }
-}
-
-
-void HRangeAnalysis::RollBackTo(int index) {
- for (int i = index + 1; i < changed_ranges_.length(); ++i) {
- changed_ranges_[i]->RemoveLastAddedRange();
- }
- changed_ranges_.Rewind(index + 1);
-}
-
-
-void HRangeAnalysis::AddRange(HValue* value, Range* range) {
- Range* original_range = value->range();
- value->AddNewRange(range);
- changed_ranges_.Add(value);
- Range* new_range = value->range();
- TraceRange("Updated range of %d set to [%d,%d]\n",
- value->id(),
- new_range->lower(),
- new_range->upper());
- if (original_range != NULL) {
- TraceRange("Original range was [%d,%d]\n",
- original_range->lower(),
- original_range->upper());
- }
- TraceRange("New information was [%d,%d]\n",
- range->lower(),
- range->upper());
-}
-
-
-void TraceGVN(const char* msg, ...) {
- if (FLAG_trace_gvn) {
- va_list arguments;
- va_start(arguments, msg);
- OS::VPrint(msg, arguments);
- va_end(arguments);
- }
-}
-
-
-HValueMap::HValueMap(const HValueMap* other)
- : array_size_(other->array_size_),
- lists_size_(other->lists_size_),
- count_(other->count_),
- present_flags_(other->present_flags_),
- array_(ZONE->NewArray<HValueMapListElement>(other->array_size_)),
- lists_(ZONE->NewArray<HValueMapListElement>(other->lists_size_)),
- free_list_head_(other->free_list_head_) {
- memcpy(array_, other->array_, array_size_ * sizeof(HValueMapListElement));
- memcpy(lists_, other->lists_, lists_size_ * sizeof(HValueMapListElement));
-}
-
-
-void HValueMap::Kill(int flags) {
- int depends_flags = HValue::ConvertChangesToDependsFlags(flags);
- if ((present_flags_ & depends_flags) == 0) return;
- present_flags_ = 0;
- for (int i = 0; i < array_size_; ++i) {
- HValue* value = array_[i].value;
- if (value != NULL) {
- // Clear list of collisions first, so we know if it becomes empty.
- int kept = kNil; // List of kept elements.
- int next;
- for (int current = array_[i].next; current != kNil; current = next) {
- next = lists_[current].next;
- if ((lists_[current].value->flags() & depends_flags) != 0) {
- // Drop it.
- count_--;
- lists_[current].next = free_list_head_;
- free_list_head_ = current;
- } else {
- // Keep it.
- lists_[current].next = kept;
- kept = current;
- present_flags_ |= lists_[current].value->flags();
- }
- }
- array_[i].next = kept;
-
- // Now possibly drop directly indexed element.
- if ((array_[i].value->flags() & depends_flags) != 0) { // Drop it.
- count_--;
- int head = array_[i].next;
- if (head == kNil) {
- array_[i].value = NULL;
- } else {
- array_[i].value = lists_[head].value;
- array_[i].next = lists_[head].next;
- lists_[head].next = free_list_head_;
- free_list_head_ = head;
- }
- } else {
- present_flags_ |= array_[i].value->flags(); // Keep it.
- }
- }
- }
-}
-
-
-HValue* HValueMap::Lookup(HValue* value) const {
- uint32_t hash = static_cast<uint32_t>(value->Hashcode());
- uint32_t pos = Bound(hash);
- if (array_[pos].value != NULL) {
- if (array_[pos].value->Equals(value)) return array_[pos].value;
- int next = array_[pos].next;
- while (next != kNil) {
- if (lists_[next].value->Equals(value)) return lists_[next].value;
- next = lists_[next].next;
- }
- }
- return NULL;
-}
-
-
-void HValueMap::Resize(int new_size) {
- ASSERT(new_size > count_);
- // Hashing the values into the new array has no more collisions than in the
- // old hash map, so we can use the existing lists_ array, if we are careful.
-
- // Make sure we have at least one free element.
- if (free_list_head_ == kNil) {
- ResizeLists(lists_size_ << 1);
- }
-
- HValueMapListElement* new_array =
- ZONE->NewArray<HValueMapListElement>(new_size);
- memset(new_array, 0, sizeof(HValueMapListElement) * new_size);
-
- HValueMapListElement* old_array = array_;
- int old_size = array_size_;
-
- int old_count = count_;
- count_ = 0;
- // Do not modify present_flags_. It is currently correct.
- array_size_ = new_size;
- array_ = new_array;
-
- if (old_array != NULL) {
- // Iterate over all the elements in lists, rehashing them.
- for (int i = 0; i < old_size; ++i) {
- if (old_array[i].value != NULL) {
- int current = old_array[i].next;
- while (current != kNil) {
- Insert(lists_[current].value);
- int next = lists_[current].next;
- lists_[current].next = free_list_head_;
- free_list_head_ = current;
- current = next;
- }
- // Rehash the directly stored value.
- Insert(old_array[i].value);
- }
- }
- }
- USE(old_count);
- ASSERT(count_ == old_count);
-}
-
-
-void HValueMap::ResizeLists(int new_size) {
- ASSERT(new_size > lists_size_);
-
- HValueMapListElement* new_lists =
- ZONE->NewArray<HValueMapListElement>(new_size);
- memset(new_lists, 0, sizeof(HValueMapListElement) * new_size);
-
- HValueMapListElement* old_lists = lists_;
- int old_size = lists_size_;
-
- lists_size_ = new_size;
- lists_ = new_lists;
-
- if (old_lists != NULL) {
- memcpy(lists_, old_lists, old_size * sizeof(HValueMapListElement));
- }
- for (int i = old_size; i < lists_size_; ++i) {
- lists_[i].next = free_list_head_;
- free_list_head_ = i;
- }
-}
-
-
-void HValueMap::Insert(HValue* value) {
- ASSERT(value != NULL);
- // Resizing when half of the hashtable is filled up.
- if (count_ >= array_size_ >> 1) Resize(array_size_ << 1);
- ASSERT(count_ < array_size_);
- count_++;
- uint32_t pos = Bound(static_cast<uint32_t>(value->Hashcode()));
- if (array_[pos].value == NULL) {
- array_[pos].value = value;
- array_[pos].next = kNil;
- } else {
- if (free_list_head_ == kNil) {
- ResizeLists(lists_size_ << 1);
- }
- int new_element_pos = free_list_head_;
- ASSERT(new_element_pos != kNil);
- free_list_head_ = lists_[free_list_head_].next;
- lists_[new_element_pos].value = value;
- lists_[new_element_pos].next = array_[pos].next;
- ASSERT(array_[pos].next == kNil || lists_[array_[pos].next].value != NULL);
- array_[pos].next = new_element_pos;
- }
-}
-
-
-class HStackCheckEliminator BASE_EMBEDDED {
- public:
- explicit HStackCheckEliminator(HGraph* graph) : graph_(graph) { }
-
- void Process();
-
- private:
- void RemoveStackCheck(HBasicBlock* block);
-
- HGraph* graph_;
-};
-
-
-void HStackCheckEliminator::Process() {
- // For each loop block walk the dominator tree from the backwards branch to
- // the loop header. If a call instruction is encountered the backwards branch
- // is dominated by a call and the stack check in the backwards branch can be
- // removed.
- for (int i = 0; i < graph_->blocks()->length(); i++) {
- HBasicBlock* block = graph_->blocks()->at(i);
- if (block->IsLoopHeader()) {
- HBasicBlock* back_edge = block->loop_information()->GetLastBackEdge();
- HBasicBlock* dominator = back_edge;
- bool back_edge_dominated_by_call = false;
- while (dominator != block && !back_edge_dominated_by_call) {
- HInstruction* instr = dominator->first();
- while (instr != NULL && !back_edge_dominated_by_call) {
- if (instr->IsCall()) {
- RemoveStackCheck(back_edge);
- back_edge_dominated_by_call = true;
- }
- instr = instr->next();
- }
- dominator = dominator->dominator();
- }
- }
- }
-}
-
-
-void HStackCheckEliminator::RemoveStackCheck(HBasicBlock* block) {
- HInstruction* instr = block->first();
- while (instr != NULL) {
- if (instr->IsGoto()) {
- HGoto::cast(instr)->set_include_stack_check(false);
- return;
- }
- instr = instr->next();
- }
-}
-
-
-class HGlobalValueNumberer BASE_EMBEDDED {
- public:
- explicit HGlobalValueNumberer(HGraph* graph, CompilationInfo* info)
- : graph_(graph),
- info_(info),
- block_side_effects_(graph_->blocks()->length()),
- loop_side_effects_(graph_->blocks()->length()) {
- ASSERT(info->isolate()->heap()->allow_allocation(false));
- block_side_effects_.AddBlock(0, graph_->blocks()->length());
- loop_side_effects_.AddBlock(0, graph_->blocks()->length());
- }
- ~HGlobalValueNumberer() {
- ASSERT(!info_->isolate()->heap()->allow_allocation(true));
- }
-
- void Analyze();
-
- private:
- void AnalyzeBlock(HBasicBlock* block, HValueMap* map);
- void ComputeBlockSideEffects();
- void LoopInvariantCodeMotion();
- void ProcessLoopBlock(HBasicBlock* block,
- HBasicBlock* before_loop,
- int loop_kills);
- bool AllowCodeMotion();
- bool ShouldMove(HInstruction* instr, HBasicBlock* loop_header);
-
- HGraph* graph() { return graph_; }
- CompilationInfo* info() { return info_; }
-
- HGraph* graph_;
- CompilationInfo* info_;
-
- // A map of block IDs to their side effects.
- ZoneList<int> block_side_effects_;
-
- // A map of loop header block IDs to their loop's side effects.
- ZoneList<int> loop_side_effects_;
-};
-
-
-void HGlobalValueNumberer::Analyze() {
- ComputeBlockSideEffects();
- if (FLAG_loop_invariant_code_motion) {
- LoopInvariantCodeMotion();
- }
- HValueMap* map = new HValueMap();
- AnalyzeBlock(graph_->blocks()->at(0), map);
-}
-
-
-void HGlobalValueNumberer::ComputeBlockSideEffects() {
- for (int i = graph_->blocks()->length() - 1; i >= 0; --i) {
- // Compute side effects for the block.
- HBasicBlock* block = graph_->blocks()->at(i);
- HInstruction* instr = block->first();
- int id = block->block_id();
- int side_effects = 0;
- while (instr != NULL) {
- side_effects |= (instr->flags() & HValue::ChangesFlagsMask());
- instr = instr->next();
- }
- block_side_effects_[id] |= side_effects;
-
- // Loop headers are part of their loop.
- if (block->IsLoopHeader()) {
- loop_side_effects_[id] |= side_effects;
- }
-
- // Propagate loop side effects upwards.
- if (block->HasParentLoopHeader()) {
- int header_id = block->parent_loop_header()->block_id();
- loop_side_effects_[header_id] |=
- block->IsLoopHeader() ? loop_side_effects_[id] : side_effects;
- }
- }
-}
-
-
-void HGlobalValueNumberer::LoopInvariantCodeMotion() {
- for (int i = graph_->blocks()->length() - 1; i >= 0; --i) {
- HBasicBlock* block = graph_->blocks()->at(i);
- if (block->IsLoopHeader()) {
- int side_effects = loop_side_effects_[block->block_id()];
- TraceGVN("Try loop invariant motion for block B%d effects=0x%x\n",
- block->block_id(),
- side_effects);
-
- HBasicBlock* last = block->loop_information()->GetLastBackEdge();
- for (int j = block->block_id(); j <= last->block_id(); ++j) {
- ProcessLoopBlock(graph_->blocks()->at(j), block, side_effects);
- }
- }
- }
-}
-
-
-void HGlobalValueNumberer::ProcessLoopBlock(HBasicBlock* block,
- HBasicBlock* loop_header,
- int loop_kills) {
- HBasicBlock* pre_header = loop_header->predecessors()->at(0);
- int depends_flags = HValue::ConvertChangesToDependsFlags(loop_kills);
- TraceGVN("Loop invariant motion for B%d depends_flags=0x%x\n",
- block->block_id(),
- depends_flags);
- HInstruction* instr = block->first();
- while (instr != NULL) {
- HInstruction* next = instr->next();
- if (instr->CheckFlag(HValue::kUseGVN) &&
- (instr->flags() & depends_flags) == 0) {
- TraceGVN("Checking instruction %d (%s)\n",
- instr->id(),
- instr->Mnemonic());
- bool inputs_loop_invariant = true;
- for (int i = 0; i < instr->OperandCount(); ++i) {
- if (instr->OperandAt(i)->IsDefinedAfter(pre_header)) {
- inputs_loop_invariant = false;
- }
- }
-
- if (inputs_loop_invariant && ShouldMove(instr, loop_header)) {
- TraceGVN("Found loop invariant instruction %d\n", instr->id());
- // Move the instruction out of the loop.
- instr->Unlink();
- instr->InsertBefore(pre_header->end());
- }
- }
- instr = next;
- }
-}
-
-
-bool HGlobalValueNumberer::AllowCodeMotion() {
- return info()->shared_info()->opt_count() + 1 < Compiler::kDefaultMaxOptCount;
-}
-
-
-bool HGlobalValueNumberer::ShouldMove(HInstruction* instr,
- HBasicBlock* loop_header) {
- // If we've disabled code motion, don't move any instructions.
- if (!AllowCodeMotion()) return false;
-
- // If --aggressive-loop-invariant-motion, move everything except change
- // instructions.
- if (FLAG_aggressive_loop_invariant_motion && !instr->IsChange()) {
- return true;
- }
-
- // Otherwise only move instructions that postdominate the loop header
- // (i.e. are always executed inside the loop). This is to avoid
- // unnecessary deoptimizations assuming the loop is executed at least
- // once. TODO(fschneider): Better type feedback should give us
- // information about code that was never executed.
- HBasicBlock* block = instr->block();
- bool result = true;
- if (block != loop_header) {
- for (int i = 1; i < loop_header->predecessors()->length(); ++i) {
- bool found = false;
- HBasicBlock* pred = loop_header->predecessors()->at(i);
- while (pred != loop_header) {
- if (pred == block) found = true;
- pred = pred->dominator();
- }
- if (!found) {
- result = false;
- break;
- }
- }
- }
- return result;
-}
-
-
-void HGlobalValueNumberer::AnalyzeBlock(HBasicBlock* block, HValueMap* map) {
- TraceGVN("Analyzing block B%d\n", block->block_id());
-
- // If this is a loop header kill everything killed by the loop.
- if (block->IsLoopHeader()) {
- map->Kill(loop_side_effects_[block->block_id()]);
- }
-
- // Go through all instructions of the current block.
- HInstruction* instr = block->first();
- while (instr != NULL) {
- HInstruction* next = instr->next();
- int flags = (instr->flags() & HValue::ChangesFlagsMask());
- if (flags != 0) {
- ASSERT(!instr->CheckFlag(HValue::kUseGVN));
- // Clear all instructions in the map that are affected by side effects.
- map->Kill(flags);
- TraceGVN("Instruction %d kills\n", instr->id());
- } else if (instr->CheckFlag(HValue::kUseGVN)) {
- HValue* other = map->Lookup(instr);
- if (other != NULL) {
- ASSERT(instr->Equals(other) && other->Equals(instr));
- TraceGVN("Replacing value %d (%s) with value %d (%s)\n",
- instr->id(),
- instr->Mnemonic(),
- other->id(),
- other->Mnemonic());
- instr->ReplaceAndDelete(other);
- } else {
- map->Add(instr);
- }
- }
- instr = next;
- }
-
- // Recursively continue analysis for all immediately dominated blocks.
- int length = block->dominated_blocks()->length();
- for (int i = 0; i < length; ++i) {
- HBasicBlock* dominated = block->dominated_blocks()->at(i);
- // No need to copy the map for the last child in the dominator tree.
- HValueMap* successor_map = (i == length - 1) ? map : map->Copy();
-
- // If the dominated block is not a successor to this block we have to
- // kill everything killed on any path between this block and the
- // dominated block. Note we rely on the block ordering.
- bool is_successor = false;
- int predecessor_count = dominated->predecessors()->length();
- for (int j = 0; !is_successor && j < predecessor_count; ++j) {
- is_successor = (dominated->predecessors()->at(j) == block);
- }
-
- if (!is_successor) {
- int side_effects = 0;
- for (int j = block->block_id() + 1; j < dominated->block_id(); ++j) {
- side_effects |= block_side_effects_[j];
- }
- successor_map->Kill(side_effects);
- }
-
- AnalyzeBlock(dominated, successor_map);
- }
-}
-
-
-class HInferRepresentation BASE_EMBEDDED {
- public:
- explicit HInferRepresentation(HGraph* graph)
- : graph_(graph), worklist_(8), in_worklist_(graph->GetMaximumValueID()) {}
-
- void Analyze();
-
- private:
- Representation TryChange(HValue* current);
- void AddToWorklist(HValue* current);
- void InferBasedOnInputs(HValue* current);
- void AddDependantsToWorklist(HValue* current);
- void InferBasedOnUses(HValue* current);
-
- HGraph* graph_;
- ZoneList<HValue*> worklist_;
- BitVector in_worklist_;
-};
-
-
-void HInferRepresentation::AddToWorklist(HValue* current) {
- if (current->representation().IsSpecialization()) return;
- if (!current->CheckFlag(HValue::kFlexibleRepresentation)) return;
- if (in_worklist_.Contains(current->id())) return;
- worklist_.Add(current);
- in_worklist_.Add(current->id());
-}
-
-
-// This method tries to specialize the representation type of the value
-// given as a parameter. The value is asked to infer its representation type
-// based on its inputs. If the inferred type is more specialized, then this
-// becomes the new representation type of the node.
-void HInferRepresentation::InferBasedOnInputs(HValue* current) {
- Representation r = current->representation();
- if (r.IsSpecialization()) return;
- ASSERT(current->CheckFlag(HValue::kFlexibleRepresentation));
- Representation inferred = current->InferredRepresentation();
- if (inferred.IsSpecialization()) {
- current->ChangeRepresentation(inferred);
- AddDependantsToWorklist(current);
- }
-}
-
-
-void HInferRepresentation::AddDependantsToWorklist(HValue* current) {
- for (int i = 0; i < current->uses()->length(); ++i) {
- AddToWorklist(current->uses()->at(i));
- }
- for (int i = 0; i < current->OperandCount(); ++i) {
- AddToWorklist(current->OperandAt(i));
- }
-}
-
-
-// This method calculates whether specializing the representation of the value
-// given as the parameter has a benefit in terms of less necessary type
-// conversions. If there is a benefit, then the representation of the value is
-// specialized.
-void HInferRepresentation::InferBasedOnUses(HValue* current) {
- Representation r = current->representation();
- if (r.IsSpecialization() || current->HasNoUses()) return;
- ASSERT(current->CheckFlag(HValue::kFlexibleRepresentation));
- Representation new_rep = TryChange(current);
- if (!new_rep.IsNone()) {
- if (!current->representation().Equals(new_rep)) {
- current->ChangeRepresentation(new_rep);
- AddDependantsToWorklist(current);
- }
- }
-}
-
-
-Representation HInferRepresentation::TryChange(HValue* current) {
- // Array of use counts for each representation.
- int use_count[Representation::kNumRepresentations];
- for (int i = 0; i < Representation::kNumRepresentations; i++) {
- use_count[i] = 0;
- }
-
- for (int i = 0; i < current->uses()->length(); ++i) {
- HValue* use = current->uses()->at(i);
- int index = use->LookupOperandIndex(0, current);
- Representation req_rep = use->RequiredInputRepresentation(index);
- if (req_rep.IsNone()) continue;
- if (use->IsPhi()) {
- HPhi* phi = HPhi::cast(use);
- phi->AddIndirectUsesTo(&use_count[0]);
- }
- use_count[req_rep.kind()]++;
- }
- int tagged_count = use_count[Representation::kTagged];
- int double_count = use_count[Representation::kDouble];
- int int32_count = use_count[Representation::kInteger32];
- int non_tagged_count = double_count + int32_count;
-
- // If a non-loop phi has tagged uses, don't convert it to untagged.
- if (current->IsPhi() && !current->block()->IsLoopHeader()) {
- if (tagged_count > 0) return Representation::None();
- }
-
- if (non_tagged_count >= tagged_count) {
- // More untagged than tagged.
- if (double_count > 0) {
- // There is at least one usage that is a double => guess that the
- // correct representation is double.
- return Representation::Double();
- } else if (int32_count > 0) {
- return Representation::Integer32();
- }
- }
- return Representation::None();
-}
-
-
-void HInferRepresentation::Analyze() {
- HPhase phase("Infer representations", graph_);
-
- // (1) Initialize bit vectors and count real uses. Each phi
- // gets a bit-vector of length <number of phis>.
- const ZoneList<HPhi*>* phi_list = graph_->phi_list();
- int num_phis = phi_list->length();
- ScopedVector<BitVector*> connected_phis(num_phis);
- for (int i = 0; i < num_phis; i++) {
- phi_list->at(i)->InitRealUses(i);
- connected_phis[i] = new BitVector(num_phis);
- connected_phis[i]->Add(i);
- }
-
- // (2) Do a fixed point iteration to find the set of connected phis.
- // A phi is connected to another phi if its value is used either
- // directly or indirectly through a transitive closure of the def-use
- // relation.
- bool change = true;
- while (change) {
- change = false;
- for (int i = 0; i < num_phis; i++) {
- HPhi* phi = phi_list->at(i);
- for (int j = 0; j < phi->uses()->length(); j++) {
- HValue* use = phi->uses()->at(j);
- if (use->IsPhi()) {
- int phi_use = HPhi::cast(use)->phi_id();
- if (connected_phis[i]->UnionIsChanged(*connected_phis[phi_use])) {
- change = true;
- }
- }
- }
- }
- }
-
- // (3) Sum up the non-phi use counts of all connected phis.
- // Don't include the non-phi uses of the phi itself.
- for (int i = 0; i < num_phis; i++) {
- HPhi* phi = phi_list->at(i);
- for (BitVector::Iterator it(connected_phis.at(i));
- !it.Done();
- it.Advance()) {
- int index = it.Current();
- if (index != i) {
- HPhi* it_use = phi_list->at(it.Current());
- phi->AddNonPhiUsesFrom(it_use);
- }
- }
- }
-
- for (int i = 0; i < graph_->blocks()->length(); ++i) {
- HBasicBlock* block = graph_->blocks()->at(i);
- const ZoneList<HPhi*>* phis = block->phis();
- for (int j = 0; j < phis->length(); ++j) {
- AddToWorklist(phis->at(j));
- }
-
- HInstruction* current = block->first();
- while (current != NULL) {
- AddToWorklist(current);
- current = current->next();
- }
- }
-
- while (!worklist_.is_empty()) {
- HValue* current = worklist_.RemoveLast();
- in_worklist_.Remove(current->id());
- InferBasedOnInputs(current);
- InferBasedOnUses(current);
- }
-}
-
-
-void HGraph::InitializeInferredTypes() {
- HPhase phase("Inferring types", this);
- InitializeInferredTypes(0, this->blocks_.length() - 1);
-}
-
-
-void HGraph::InitializeInferredTypes(int from_inclusive, int to_inclusive) {
- for (int i = from_inclusive; i <= to_inclusive; ++i) {
- HBasicBlock* block = blocks_[i];
-
- const ZoneList<HPhi*>* phis = block->phis();
- for (int j = 0; j < phis->length(); j++) {
- phis->at(j)->UpdateInferredType();
- }
-
- HInstruction* current = block->first();
- while (current != NULL) {
- current->UpdateInferredType();
- current = current->next();
- }
-
- if (block->IsLoopHeader()) {
- HBasicBlock* last_back_edge =
- block->loop_information()->GetLastBackEdge();
- InitializeInferredTypes(i + 1, last_back_edge->block_id());
- // Skip all blocks already processed by the recursive call.
- i = last_back_edge->block_id();
- // Update phis of the loop header now after the whole loop body is
- // guaranteed to be processed.
- ZoneList<HValue*> worklist(block->phis()->length());
- for (int j = 0; j < block->phis()->length(); ++j) {
- worklist.Add(block->phis()->at(j));
- }
- InferTypes(&worklist);
- }
- }
-}
-
-
-void HGraph::PropagateMinusZeroChecks(HValue* value, BitVector* visited) {
- HValue* current = value;
- while (current != NULL) {
- if (visited->Contains(current->id())) return;
-
- // For phis, we must propagate the check to all of its inputs.
- if (current->IsPhi()) {
- visited->Add(current->id());
- HPhi* phi = HPhi::cast(current);
- for (int i = 0; i < phi->OperandCount(); ++i) {
- PropagateMinusZeroChecks(phi->OperandAt(i), visited);
- }
- break;
- }
-
- // For multiplication and division, we must propagate to the left and
- // the right side.
- if (current->IsMul()) {
- HMul* mul = HMul::cast(current);
- mul->EnsureAndPropagateNotMinusZero(visited);
- PropagateMinusZeroChecks(mul->left(), visited);
- PropagateMinusZeroChecks(mul->right(), visited);
- } else if (current->IsDiv()) {
- HDiv* div = HDiv::cast(current);
- div->EnsureAndPropagateNotMinusZero(visited);
- PropagateMinusZeroChecks(div->left(), visited);
- PropagateMinusZeroChecks(div->right(), visited);
- }
-
- current = current->EnsureAndPropagateNotMinusZero(visited);
- }
-}
-
-
-void HGraph::InsertRepresentationChangeForUse(HValue* value,
- HValue* use,
- Representation to) {
- // Insert the representation change right before its use. For phi-uses we
- // insert at the end of the corresponding predecessor.
- HInstruction* next = NULL;
- if (use->IsPhi()) {
- int index = 0;
- while (use->OperandAt(index) != value) ++index;
- next = use->block()->predecessors()->at(index)->end();
- } else {
- next = HInstruction::cast(use);
- }
-
- // For constants we try to make the representation change at compile
- // time. When a representation change is not possible without loss of
- // information we treat constants like normal instructions and insert the
- // change instructions for them.
- HInstruction* new_value = NULL;
- bool is_truncating = use->CheckFlag(HValue::kTruncatingToInt32);
- if (value->IsConstant()) {
- HConstant* constant = HConstant::cast(value);
- // Try to create a new copy of the constant with the new representation.
- new_value = is_truncating
- ? constant->CopyToTruncatedInt32()
- : constant->CopyToRepresentation(to);
- }
-
- if (new_value == NULL) {
- new_value = new HChange(value, value->representation(), to, is_truncating);
- }
-
- new_value->InsertBefore(next);
- value->ReplaceFirstAtUse(use, new_value, to);
-}
-
-
-int CompareConversionUses(HValue* a,
- HValue* b,
- Representation a_rep,
- Representation b_rep) {
- if (a_rep.kind() > b_rep.kind()) {
- // Make sure specializations are separated in the result array.
- return 1;
- }
- // Put truncating conversions before non-truncating conversions.
- bool a_truncate = a->CheckFlag(HValue::kTruncatingToInt32);
- bool b_truncate = b->CheckFlag(HValue::kTruncatingToInt32);
- if (a_truncate != b_truncate) {
- return a_truncate ? -1 : 1;
- }
- // Sort by increasing block ID.
- return a->block()->block_id() - b->block()->block_id();
-}
-
-
-void HGraph::InsertRepresentationChangesForValue(
- HValue* current,
- ZoneList<HValue*>* to_convert,
- ZoneList<Representation>* to_convert_reps) {
- Representation r = current->representation();
- if (r.IsNone()) return;
- if (current->uses()->length() == 0) return;
-
- // Collect the representation changes in a sorted list. This allows
- // us to avoid duplicate changes without searching the list.
- ASSERT(to_convert->is_empty());
- ASSERT(to_convert_reps->is_empty());
- for (int i = 0; i < current->uses()->length(); ++i) {
- HValue* use = current->uses()->at(i);
- // The occurrences index means the index within the operand array of "use"
- // at which "current" is used. While iterating through the use array we
- // also have to iterate over the different occurrence indices.
- int occurrence_index = 0;
- if (use->UsesMultipleTimes(current)) {
- occurrence_index = current->uses()->CountOccurrences(use, 0, i - 1);
- if (FLAG_trace_representation) {
- PrintF("Instruction %d is used multiple times at %d; occurrence=%d\n",
- current->id(),
- use->id(),
- occurrence_index);
- }
- }
- int operand_index = use->LookupOperandIndex(occurrence_index, current);
- Representation req = use->RequiredInputRepresentation(operand_index);
- if (req.IsNone() || req.Equals(r)) continue;
- int index = 0;
- while (index < to_convert->length() &&
- CompareConversionUses(to_convert->at(index),
- use,
- to_convert_reps->at(index),
- req) < 0) {
- ++index;
- }
- if (FLAG_trace_representation) {
- PrintF("Inserting a representation change to %s of %d for use at %d\n",
- req.Mnemonic(),
- current->id(),
- use->id());
- }
- to_convert->InsertAt(index, use);
- to_convert_reps->InsertAt(index, req);
- }
-
- for (int i = 0; i < to_convert->length(); ++i) {
- HValue* use = to_convert->at(i);
- Representation r_to = to_convert_reps->at(i);
- InsertRepresentationChangeForUse(current, use, r_to);
- }
-
- if (current->uses()->is_empty()) {
- ASSERT(current->IsConstant());
- current->Delete();
- }
- to_convert->Rewind(0);
- to_convert_reps->Rewind(0);
-}
-
-
-void HGraph::InsertRepresentationChanges() {
- HPhase phase("Insert representation changes", this);
-
-
- // Compute truncation flag for phis: Initially assume that all
- // int32-phis allow truncation and iteratively remove the ones that
- // are used in an operation that does not allow a truncating
- // conversion.
- // TODO(fschneider): Replace this with a worklist-based iteration.
- for (int i = 0; i < phi_list()->length(); i++) {
- HPhi* phi = phi_list()->at(i);
- if (phi->representation().IsInteger32()) {
- phi->SetFlag(HValue::kTruncatingToInt32);
- }
- }
- bool change = true;
- while (change) {
- change = false;
- for (int i = 0; i < phi_list()->length(); i++) {
- HPhi* phi = phi_list()->at(i);
- if (!phi->CheckFlag(HValue::kTruncatingToInt32)) continue;
- for (int j = 0; j < phi->uses()->length(); j++) {
- HValue* use = phi->uses()->at(j);
- if (!use->CheckFlag(HValue::kTruncatingToInt32)) {
- phi->ClearFlag(HValue::kTruncatingToInt32);
- change = true;
- break;
- }
- }
- }
- }
-
- ZoneList<HValue*> value_list(4);
- ZoneList<Representation> rep_list(4);
- for (int i = 0; i < blocks_.length(); ++i) {
- // Process phi instructions first.
- for (int j = 0; j < blocks_[i]->phis()->length(); j++) {
- HPhi* phi = blocks_[i]->phis()->at(j);
- InsertRepresentationChangesForValue(phi, &value_list, &rep_list);
- }
-
- // Process normal instructions.
- HInstruction* current = blocks_[i]->first();
- while (current != NULL) {
- InsertRepresentationChangesForValue(current, &value_list, &rep_list);
- current = current->next();
- }
- }
-}
-
-
-void HGraph::ComputeMinusZeroChecks() {
- BitVector visited(GetMaximumValueID());
- for (int i = 0; i < blocks_.length(); ++i) {
- for (HInstruction* current = blocks_[i]->first();
- current != NULL;
- current = current->next()) {
- if (current->IsChange()) {
- HChange* change = HChange::cast(current);
- // Propagate flags for negative zero checks upwards from conversions
- // int32-to-tagged and int32-to-double.
- Representation from = change->value()->representation();
- ASSERT(from.Equals(change->from()));
- if (from.IsInteger32()) {
- ASSERT(change->to().IsTagged() || change->to().IsDouble());
- ASSERT(visited.IsEmpty());
- PropagateMinusZeroChecks(change->value(), &visited);
- visited.Clear();
- }
- }
- }
- }
-}
-
-
-// Implementation of utility class to encapsulate the translation state for
-// a (possibly inlined) function.
-FunctionState::FunctionState(HGraphBuilder* owner,
- CompilationInfo* info,
- TypeFeedbackOracle* oracle)
- : owner_(owner),
- compilation_info_(info),
- oracle_(oracle),
- call_context_(NULL),
- function_return_(NULL),
- test_context_(NULL),
- outer_(owner->function_state()) {
- if (outer_ != NULL) {
- // State for an inline function.
- if (owner->ast_context()->IsTest()) {
- HBasicBlock* if_true = owner->graph()->CreateBasicBlock();
- HBasicBlock* if_false = owner->graph()->CreateBasicBlock();
- if_true->MarkAsInlineReturnTarget();
- if_false->MarkAsInlineReturnTarget();
- // The AstContext constructor pushed on the context stack. This newed
- // instance is the reason that AstContext can't be BASE_EMBEDDED.
- test_context_ = new TestContext(owner, if_true, if_false);
- } else {
- function_return_ = owner->graph()->CreateBasicBlock();
- function_return()->MarkAsInlineReturnTarget();
- }
- // Set this after possibly allocating a new TestContext above.
- call_context_ = owner->ast_context();
- }
-
- // Push on the state stack.
- owner->set_function_state(this);
-}
-
-
-FunctionState::~FunctionState() {
- delete test_context_;
- owner_->set_function_state(outer_);
-}
-
-
-// Implementation of utility classes to represent an expression's context in
-// the AST.
-AstContext::AstContext(HGraphBuilder* owner, Expression::Context kind)
- : owner_(owner),
- kind_(kind),
- outer_(owner->ast_context()),
- for_typeof_(false) {
- owner->set_ast_context(this); // Push.
-#ifdef DEBUG
- original_length_ = owner->environment()->length();
-#endif
-}
-
-
-AstContext::~AstContext() {
- owner_->set_ast_context(outer_); // Pop.
-}
-
-
-EffectContext::~EffectContext() {
- ASSERT(owner()->HasStackOverflow() ||
- owner()->current_block() == NULL ||
- owner()->environment()->length() == original_length_);
-}
-
-
-ValueContext::~ValueContext() {
- ASSERT(owner()->HasStackOverflow() ||
- owner()->current_block() == NULL ||
- owner()->environment()->length() == original_length_ + 1);
-}
-
-
-void EffectContext::ReturnValue(HValue* value) {
- // The value is simply ignored.
-}
-
-
-void ValueContext::ReturnValue(HValue* value) {
- // The value is tracked in the bailout environment, and communicated
- // through the environment as the result of the expression.
- owner()->Push(value);
-}
-
-
-void TestContext::ReturnValue(HValue* value) {
- BuildBranch(value);
-}
-
-
-void EffectContext::ReturnInstruction(HInstruction* instr, int ast_id) {
- owner()->AddInstruction(instr);
- if (instr->HasSideEffects()) owner()->AddSimulate(ast_id);
-}
-
-
-void ValueContext::ReturnInstruction(HInstruction* instr, int ast_id) {
- owner()->AddInstruction(instr);
- owner()->Push(instr);
- if (instr->HasSideEffects()) owner()->AddSimulate(ast_id);
-}
-
-
-void TestContext::ReturnInstruction(HInstruction* instr, int ast_id) {
- HGraphBuilder* builder = owner();
- builder->AddInstruction(instr);
- // We expect a simulate after every expression with side effects, though
- // this one isn't actually needed (and wouldn't work if it were targeted).
- if (instr->HasSideEffects()) {
- builder->Push(instr);
- builder->AddSimulate(ast_id);
- builder->Pop();
- }
- BuildBranch(instr);
-}
-
-
-void TestContext::BuildBranch(HValue* value) {
- // We expect the graph to be in edge-split form: there is no edge that
- // connects a branch node to a join node. We conservatively ensure that
- // property by always adding an empty block on the outgoing edges of this
- // branch.
- HGraphBuilder* builder = owner();
- HBasicBlock* empty_true = builder->graph()->CreateBasicBlock();
- HBasicBlock* empty_false = builder->graph()->CreateBasicBlock();
- HTest* test = new HTest(value, empty_true, empty_false);
- builder->current_block()->Finish(test);
-
- empty_true->Goto(if_true(), false);
- empty_false->Goto(if_false(), false);
- builder->set_current_block(NULL);
-}
-
-
-// HGraphBuilder infrastructure for bailing out and checking bailouts.
-#define BAILOUT(reason) \
- do { \
- Bailout(reason); \
- return; \
- } while (false)
-
-
-#define CHECK_BAILOUT \
- do { \
- if (HasStackOverflow()) return; \
- } while (false)
-
-
-#define VISIT_FOR_EFFECT(expr) \
- do { \
- VisitForEffect(expr); \
- if (HasStackOverflow()) return; \
- } while (false)
-
-
-#define VISIT_FOR_VALUE(expr) \
- do { \
- VisitForValue(expr); \
- if (HasStackOverflow()) return; \
- } while (false)
-
-
-#define VISIT_FOR_CONTROL(expr, true_block, false_block) \
- do { \
- VisitForControl(expr, true_block, false_block); \
- if (HasStackOverflow()) return; \
- } while (false)
-
-
-void HGraphBuilder::Bailout(const char* reason) {
- if (FLAG_trace_bailout) {
- SmartPointer<char> name(info()->shared_info()->DebugName()->ToCString());
- PrintF("Bailout in HGraphBuilder: @\"%s\": %s\n", *name, reason);
- }
- SetStackOverflow();
-}
-
-
-void HGraphBuilder::VisitForEffect(Expression* expr) {
- EffectContext for_effect(this);
- Visit(expr);
-}
-
-
-void HGraphBuilder::VisitForValue(Expression* expr) {
- ValueContext for_value(this);
- Visit(expr);
-}
-
-
-void HGraphBuilder::VisitForTypeOf(Expression* expr) {
- ValueContext for_value(this);
- for_value.set_for_typeof(true);
- Visit(expr);
-}
-
-
-
-void HGraphBuilder::VisitForControl(Expression* expr,
- HBasicBlock* true_block,
- HBasicBlock* false_block) {
- TestContext for_test(this, true_block, false_block);
- Visit(expr);
-}
-
-
-void HGraphBuilder::VisitArgument(Expression* expr) {
- VISIT_FOR_VALUE(expr);
- Push(AddInstruction(new HPushArgument(Pop())));
-}
-
-
-void HGraphBuilder::VisitArgumentList(ZoneList<Expression*>* arguments) {
- for (int i = 0; i < arguments->length(); i++) {
- VisitArgument(arguments->at(i));
- if (HasStackOverflow() || current_block() == NULL) return;
- }
-}
-
-
-void HGraphBuilder::VisitExpressions(ZoneList<Expression*>* exprs) {
- for (int i = 0; i < exprs->length(); ++i) {
- VISIT_FOR_VALUE(exprs->at(i));
- }
-}
-
-
-HGraph* HGraphBuilder::CreateGraph() {
- graph_ = new HGraph(info());
- if (FLAG_hydrogen_stats) HStatistics::Instance()->Initialize(info());
-
- {
- HPhase phase("Block building");
- current_block_ = graph()->entry_block();
-
- Scope* scope = info()->scope();
- if (scope->HasIllegalRedeclaration()) {
- Bailout("function with illegal redeclaration");
- return NULL;
- }
- SetupScope(scope);
- VisitDeclarations(scope->declarations());
- AddInstruction(new HStackCheck());
-
- // Add an edge to the body entry. This is warty: the graph's start
- // environment will be used by the Lithium translation as the initial
- // environment on graph entry, but it has now been mutated by the
- // Hydrogen translation of the instructions in the start block. This
- // environment uses values which have not been defined yet. These
- // Hydrogen instructions will then be replayed by the Lithium
- // translation, so they cannot have an environment effect. The edge to
- // the body's entry block (along with some special logic for the start
- // block in HInstruction::InsertAfter) seals the start block from
- // getting unwanted instructions inserted.
- //
- // TODO(kmillikin): Fix this. Stop mutating the initial environment.
- // Make the Hydrogen instructions in the initial block into Hydrogen
- // values (but not instructions), present in the initial environment and
- // not replayed by the Lithium translation.
- HEnvironment* initial_env = environment()->CopyWithoutHistory();
- HBasicBlock* body_entry = CreateBasicBlock(initial_env);
- current_block()->Goto(body_entry);
- body_entry->SetJoinId(info()->function()->id());
- set_current_block(body_entry);
- VisitStatements(info()->function()->body());
- if (HasStackOverflow()) return NULL;
-
- if (current_block() != NULL) {
- HReturn* instr = new HReturn(graph()->GetConstantUndefined());
- current_block()->FinishExit(instr);
- set_current_block(NULL);
- }
- }
-
- graph()->OrderBlocks();
- graph()->AssignDominators();
- graph()->EliminateRedundantPhis();
- if (FLAG_eliminate_dead_phis) graph()->EliminateUnreachablePhis();
- if (!graph()->CollectPhis()) {
- Bailout("Phi-use of arguments object");
- return NULL;
- }
-
- HInferRepresentation rep(graph());
- rep.Analyze();
-
- if (FLAG_use_range) {
- HRangeAnalysis rangeAnalysis(graph());
- rangeAnalysis.Analyze();
- }
-
- graph()->InitializeInferredTypes();
- graph()->Canonicalize();
- graph()->InsertRepresentationChanges();
- graph()->ComputeMinusZeroChecks();
-
- // Eliminate redundant stack checks on backwards branches.
- HStackCheckEliminator sce(graph());
- sce.Process();
-
- // Perform common subexpression elimination and loop-invariant code motion.
- if (FLAG_use_gvn) {
- HPhase phase("Global value numbering", graph());
- HGlobalValueNumberer gvn(graph(), info());
- gvn.Analyze();
- }
-
- return graph();
-}
-
-
-HInstruction* HGraphBuilder::AddInstruction(HInstruction* instr) {
- ASSERT(current_block() != NULL);
- current_block()->AddInstruction(instr);
- return instr;
-}
-
-
-void HGraphBuilder::AddSimulate(int id) {
- ASSERT(current_block() != NULL);
- current_block()->AddSimulate(id);
-}
-
-
-void HGraphBuilder::AddPhi(HPhi* instr) {
- ASSERT(current_block() != NULL);
- current_block()->AddPhi(instr);
-}
-
-
-void HGraphBuilder::PushAndAdd(HInstruction* instr) {
- Push(instr);
- AddInstruction(instr);
-}
-
-
-template <int V>
-HInstruction* HGraphBuilder::PreProcessCall(HCall<V>* call) {
- int count = call->argument_count();
- ZoneList<HValue*> arguments(count);
- for (int i = 0; i < count; ++i) {
- arguments.Add(Pop());
- }
-
- while (!arguments.is_empty()) {
- AddInstruction(new HPushArgument(arguments.RemoveLast()));
- }
- return call;
-}
-
-
-void HGraphBuilder::SetupScope(Scope* scope) {
- // We don't yet handle the function name for named function expressions.
- if (scope->function() != NULL) BAILOUT("named function expression");
-
- HConstant* undefined_constant = new HConstant(
- isolate()->factory()->undefined_value(), Representation::Tagged());
- AddInstruction(undefined_constant);
- graph_->set_undefined_constant(undefined_constant);
-
- // Set the initial values of parameters including "this". "This" has
- // parameter index 0.
- int count = scope->num_parameters() + 1;
- for (int i = 0; i < count; ++i) {
- HInstruction* parameter = AddInstruction(new HParameter(i));
- environment()->Bind(i, parameter);
- }
-
- // Set the initial values of stack-allocated locals.
- for (int i = count; i < environment()->length(); ++i) {
- environment()->Bind(i, undefined_constant);
- }
-
- // Handle the arguments and arguments shadow variables specially (they do
- // not have declarations).
- if (scope->arguments() != NULL) {
- if (!scope->arguments()->IsStackAllocated() ||
- (scope->arguments_shadow() != NULL &&
- !scope->arguments_shadow()->IsStackAllocated())) {
- BAILOUT("context-allocated arguments");
- }
- HArgumentsObject* object = new HArgumentsObject;
- AddInstruction(object);
- graph()->SetArgumentsObject(object);
- environment()->Bind(scope->arguments(), object);
- if (scope->arguments_shadow() != NULL) {
- environment()->Bind(scope->arguments_shadow(), object);
- }
- }
-}
-
-
-void HGraphBuilder::VisitStatements(ZoneList<Statement*>* statements) {
- for (int i = 0; i < statements->length(); i++) {
- Visit(statements->at(i));
- if (HasStackOverflow() || current_block() == NULL) break;
- }
-}
-
-
-HBasicBlock* HGraphBuilder::CreateBasicBlock(HEnvironment* env) {
- HBasicBlock* b = graph()->CreateBasicBlock();
- b->SetInitialEnvironment(env);
- return b;
-}
-
-
-HBasicBlock* HGraphBuilder::CreateLoopHeaderBlock() {
- HBasicBlock* header = graph()->CreateBasicBlock();
- HEnvironment* entry_env = environment()->CopyAsLoopHeader(header);
- header->SetInitialEnvironment(entry_env);
- header->AttachLoopInformation();
- return header;
-}
-
-
-void HGraphBuilder::VisitBlock(Block* stmt) {
- BreakAndContinueInfo break_info(stmt);
- { BreakAndContinueScope push(&break_info, this);
- VisitStatements(stmt->statements());
- CHECK_BAILOUT;
- }
- HBasicBlock* break_block = break_info.break_block();
- if (break_block != NULL) {
- if (current_block() != NULL) current_block()->Goto(break_block);
- break_block->SetJoinId(stmt->ExitId());
- set_current_block(break_block);
- }
-}
-
-
-void HGraphBuilder::VisitExpressionStatement(ExpressionStatement* stmt) {
- VisitForEffect(stmt->expression());
-}
-
-
-void HGraphBuilder::VisitEmptyStatement(EmptyStatement* stmt) {
-}
-
-
-void HGraphBuilder::VisitIfStatement(IfStatement* stmt) {
- if (stmt->condition()->ToBooleanIsTrue()) {
- AddSimulate(stmt->ThenId());
- Visit(stmt->then_statement());
- } else if (stmt->condition()->ToBooleanIsFalse()) {
- AddSimulate(stmt->ElseId());
- Visit(stmt->else_statement());
- } else {
- HBasicBlock* cond_true = graph()->CreateBasicBlock();
- HBasicBlock* cond_false = graph()->CreateBasicBlock();
- VISIT_FOR_CONTROL(stmt->condition(), cond_true, cond_false);
- cond_true->SetJoinId(stmt->ThenId());
- cond_false->SetJoinId(stmt->ElseId());
-
- set_current_block(cond_true);
- Visit(stmt->then_statement());
- CHECK_BAILOUT;
- HBasicBlock* other = current_block();
-
- set_current_block(cond_false);
- Visit(stmt->else_statement());
- CHECK_BAILOUT;
-
- HBasicBlock* join = CreateJoin(other, current_block(), stmt->id());
- set_current_block(join);
- }
-}
-
-
-HBasicBlock* HGraphBuilder::BreakAndContinueScope::Get(
- BreakableStatement* stmt,
- BreakType type) {
- BreakAndContinueScope* current = this;
- while (current != NULL && current->info()->target() != stmt) {
- current = current->next();
- }
- ASSERT(current != NULL); // Always found (unless stack is malformed).
- HBasicBlock* block = NULL;
- switch (type) {
- case BREAK:
- block = current->info()->break_block();
- if (block == NULL) {
- block = current->owner()->graph()->CreateBasicBlock();
- current->info()->set_break_block(block);
- }
- break;
-
- case CONTINUE:
- block = current->info()->continue_block();
- if (block == NULL) {
- block = current->owner()->graph()->CreateBasicBlock();
- current->info()->set_continue_block(block);
- }
- break;
- }
-
- return block;
-}
-
-
-void HGraphBuilder::VisitContinueStatement(ContinueStatement* stmt) {
- HBasicBlock* continue_block = break_scope()->Get(stmt->target(), CONTINUE);
- current_block()->Goto(continue_block);
- set_current_block(NULL);
-}
-
-
-void HGraphBuilder::VisitBreakStatement(BreakStatement* stmt) {
- HBasicBlock* break_block = break_scope()->Get(stmt->target(), BREAK);
- current_block()->Goto(break_block);
- set_current_block(NULL);
-}
-
-
-void HGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) {
- AstContext* context = call_context();
- if (context == NULL) {
- // Not an inlined return, so an actual one.
- VISIT_FOR_VALUE(stmt->expression());
- HValue* result = environment()->Pop();
- current_block()->FinishExit(new HReturn(result));
- set_current_block(NULL);
- } else {
- // Return from an inlined function, visit the subexpression in the
- // expression context of the call.
- if (context->IsTest()) {
- TestContext* test = TestContext::cast(context);
- VisitForControl(stmt->expression(),
- test->if_true(),
- test->if_false());
- } else if (context->IsEffect()) {
- VISIT_FOR_EFFECT(stmt->expression());
- current_block()->Goto(function_return(), false);
- } else {
- ASSERT(context->IsValue());
- VISIT_FOR_VALUE(stmt->expression());
- HValue* return_value = environment()->Pop();
- current_block()->AddLeaveInlined(return_value, function_return());
- }
- set_current_block(NULL);
- }
-}
-
-
-void HGraphBuilder::VisitWithEnterStatement(WithEnterStatement* stmt) {
- BAILOUT("WithEnterStatement");
-}
-
-
-void HGraphBuilder::VisitWithExitStatement(WithExitStatement* stmt) {
- BAILOUT("WithExitStatement");
-}
-
-
-void HGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
- // We only optimize switch statements with smi-literal smi comparisons,
- // with a bounded number of clauses.
- const int kCaseClauseLimit = 128;
- ZoneList<CaseClause*>* clauses = stmt->cases();
- int clause_count = clauses->length();
- if (clause_count > kCaseClauseLimit) {
- BAILOUT("SwitchStatement: too many clauses");
- }
-
- VISIT_FOR_VALUE(stmt->tag());
- AddSimulate(stmt->EntryId());
- HValue* tag_value = Pop();
- HBasicBlock* first_test_block = current_block();
-
- // 1. Build all the tests, with dangling true branches. Unconditionally
- // deoptimize if we encounter a non-smi comparison.
- for (int i = 0; i < clause_count; ++i) {
- CaseClause* clause = clauses->at(i);
- if (clause->is_default()) continue;
- if (!clause->label()->IsSmiLiteral()) {
- BAILOUT("SwitchStatement: non-literal switch label");
- }
-
- // Unconditionally deoptimize on the first non-smi compare.
- clause->RecordTypeFeedback(oracle());
- if (!clause->IsSmiCompare()) {
- current_block()->FinishExitWithDeoptimization();
- set_current_block(NULL);
- break;
- }
-
- // Otherwise generate a compare and branch.
- VISIT_FOR_VALUE(clause->label());
- HValue* label_value = Pop();
- HCompare* compare = new HCompare(tag_value, label_value, Token::EQ_STRICT);
- compare->SetInputRepresentation(Representation::Integer32());
- ASSERT(!compare->HasSideEffects());
- AddInstruction(compare);
- HBasicBlock* body_block = graph()->CreateBasicBlock();
- HBasicBlock* next_test_block = graph()->CreateBasicBlock();
- HTest* branch = new HTest(compare, body_block, next_test_block);
- current_block()->Finish(branch);
- set_current_block(next_test_block);
- }
-
- // Save the current block to use for the default or to join with the
- // exit. This block is NULL if we deoptimized.
- HBasicBlock* last_block = current_block();
-
- // 2. Loop over the clauses and the linked list of tests in lockstep,
- // translating the clause bodies.
- HBasicBlock* curr_test_block = first_test_block;
- HBasicBlock* fall_through_block = NULL;
- BreakAndContinueInfo break_info(stmt);
- { BreakAndContinueScope push(&break_info, this);
- for (int i = 0; i < clause_count; ++i) {
- CaseClause* clause = clauses->at(i);
-
- // Identify the block where normal (non-fall-through) control flow
- // goes to.
- HBasicBlock* normal_block = NULL;
- if (clause->is_default() && last_block != NULL) {
- normal_block = last_block;
- last_block = NULL; // Cleared to indicate we've handled it.
- } else if (!curr_test_block->end()->IsDeoptimize()) {
- normal_block = curr_test_block->end()->FirstSuccessor();
- curr_test_block = curr_test_block->end()->SecondSuccessor();
- }
-
- // Identify a block to emit the body into.
- if (normal_block == NULL) {
- if (fall_through_block == NULL) {
- // (a) Unreachable.
- if (clause->is_default()) {
- continue; // Might still be reachable clause bodies.
- } else {
- break;
- }
- } else {
- // (b) Reachable only as fall through.
- set_current_block(fall_through_block);
- }
- } else if (fall_through_block == NULL) {
- // (c) Reachable only normally.
- set_current_block(normal_block);
- } else {
- // (d) Reachable both ways.
- HBasicBlock* join = CreateJoin(fall_through_block,
- normal_block,
- clause->EntryId());
- set_current_block(join);
- }
-
- VisitStatements(clause->statements());
- CHECK_BAILOUT;
- fall_through_block = current_block();
- }
- }
-
- // Create an up-to-3-way join. Use the break block if it exists since
- // it's already a join block.
- HBasicBlock* break_block = break_info.break_block();
- if (break_block == NULL) {
- set_current_block(CreateJoin(fall_through_block,
- last_block,
- stmt->ExitId()));
- } else {
- if (fall_through_block != NULL) fall_through_block->Goto(break_block);
- if (last_block != NULL) last_block->Goto(break_block);
- break_block->SetJoinId(stmt->ExitId());
- set_current_block(break_block);
- }
-}
-
-
-bool HGraphBuilder::HasOsrEntryAt(IterationStatement* statement) {
- return statement->OsrEntryId() == info()->osr_ast_id();
-}
-
-
-void HGraphBuilder::PreProcessOsrEntry(IterationStatement* statement) {
- if (!HasOsrEntryAt(statement)) return;
-
- HBasicBlock* non_osr_entry = graph()->CreateBasicBlock();
- HBasicBlock* osr_entry = graph()->CreateBasicBlock();
- HValue* true_value = graph()->GetConstantTrue();
- HTest* test = new HTest(true_value, non_osr_entry, osr_entry);
- current_block()->Finish(test);
-
- HBasicBlock* loop_predecessor = graph()->CreateBasicBlock();
- non_osr_entry->Goto(loop_predecessor);
-
- set_current_block(osr_entry);
- int osr_entry_id = statement->OsrEntryId();
- // We want the correct environment at the OsrEntry instruction. Build
- // it explicitly. The expression stack should be empty.
- int count = environment()->length();
- ASSERT(count ==
- (environment()->parameter_count() + environment()->local_count()));
- for (int i = 0; i < count; ++i) {
- HUnknownOSRValue* unknown = new HUnknownOSRValue;
- AddInstruction(unknown);
- environment()->Bind(i, unknown);
- }
-
- AddSimulate(osr_entry_id);
- AddInstruction(new HOsrEntry(osr_entry_id));
- current_block()->Goto(loop_predecessor);
- loop_predecessor->SetJoinId(statement->EntryId());
- set_current_block(loop_predecessor);
-}
-
-
-void HGraphBuilder::VisitDoWhileStatement(DoWhileStatement* stmt) {
- ASSERT(current_block() != NULL);
- PreProcessOsrEntry(stmt);
- HBasicBlock* loop_entry = CreateLoopHeaderBlock();
- current_block()->Goto(loop_entry, false);
- set_current_block(loop_entry);
-
- BreakAndContinueInfo break_info(stmt);
- { BreakAndContinueScope push(&break_info, this);
- Visit(stmt->body());
- CHECK_BAILOUT;
- }
- HBasicBlock* body_exit =
- JoinContinue(stmt, current_block(), break_info.continue_block());
- HBasicBlock* loop_successor = NULL;
- if (body_exit != NULL && !stmt->cond()->ToBooleanIsTrue()) {
- set_current_block(body_exit);
- // The block for a true condition, the actual predecessor block of the
- // back edge.
- body_exit = graph()->CreateBasicBlock();
- loop_successor = graph()->CreateBasicBlock();
- VISIT_FOR_CONTROL(stmt->cond(), body_exit, loop_successor);
- body_exit->SetJoinId(stmt->BackEdgeId());
- loop_successor->SetJoinId(stmt->ExitId());
- }
- HBasicBlock* loop_exit = CreateLoop(stmt,
- loop_entry,
- body_exit,
- loop_successor,
- break_info.break_block());
- set_current_block(loop_exit);
-}
-
-
-void HGraphBuilder::VisitWhileStatement(WhileStatement* stmt) {
- ASSERT(current_block() != NULL);
- PreProcessOsrEntry(stmt);
- HBasicBlock* loop_entry = CreateLoopHeaderBlock();
- current_block()->Goto(loop_entry, false);
- set_current_block(loop_entry);
-
- // If the condition is constant true, do not generate a branch.
- HBasicBlock* loop_successor = NULL;
- if (!stmt->cond()->ToBooleanIsTrue()) {
- HBasicBlock* body_entry = graph()->CreateBasicBlock();
- loop_successor = graph()->CreateBasicBlock();
- VISIT_FOR_CONTROL(stmt->cond(), body_entry, loop_successor);
- body_entry->SetJoinId(stmt->BodyId());
- loop_successor->SetJoinId(stmt->ExitId());
- set_current_block(body_entry);
- }
-
- BreakAndContinueInfo break_info(stmt);
- { BreakAndContinueScope push(&break_info, this);
- Visit(stmt->body());
- CHECK_BAILOUT;
- }
- HBasicBlock* body_exit =
- JoinContinue(stmt, current_block(), break_info.continue_block());
- HBasicBlock* loop_exit = CreateLoop(stmt,
- loop_entry,
- body_exit,
- loop_successor,
- break_info.break_block());
- set_current_block(loop_exit);
-}
-
-
-void HGraphBuilder::VisitForStatement(ForStatement* stmt) {
- if (stmt->init() != NULL) {
- Visit(stmt->init());
- CHECK_BAILOUT;
- }
- ASSERT(current_block() != NULL);
- PreProcessOsrEntry(stmt);
- HBasicBlock* loop_entry = CreateLoopHeaderBlock();
- current_block()->Goto(loop_entry, false);
- set_current_block(loop_entry);
-
- HBasicBlock* loop_successor = NULL;
- if (stmt->cond() != NULL) {
- HBasicBlock* body_entry = graph()->CreateBasicBlock();
- loop_successor = graph()->CreateBasicBlock();
- VISIT_FOR_CONTROL(stmt->cond(), body_entry, loop_successor);
- body_entry->SetJoinId(stmt->BodyId());
- loop_successor->SetJoinId(stmt->ExitId());
- set_current_block(body_entry);
- }
-
- BreakAndContinueInfo break_info(stmt);
- { BreakAndContinueScope push(&break_info, this);
- Visit(stmt->body());
- CHECK_BAILOUT;
- }
- HBasicBlock* body_exit =
- JoinContinue(stmt, current_block(), break_info.continue_block());
-
- if (stmt->next() != NULL && body_exit != NULL) {
- set_current_block(body_exit);
- Visit(stmt->next());
- CHECK_BAILOUT;
- body_exit = current_block();
- }
-
- HBasicBlock* loop_exit = CreateLoop(stmt,
- loop_entry,
- body_exit,
- loop_successor,
- break_info.break_block());
- set_current_block(loop_exit);
-}
-
-
-void HGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
- BAILOUT("ForInStatement");
-}
-
-
-void HGraphBuilder::VisitTryCatchStatement(TryCatchStatement* stmt) {
- BAILOUT("TryCatchStatement");
-}
-
-
-void HGraphBuilder::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
- BAILOUT("TryFinallyStatement");
-}
-
-
-void HGraphBuilder::VisitDebuggerStatement(DebuggerStatement* stmt) {
- BAILOUT("DebuggerStatement");
-}
-
-
-static Handle<SharedFunctionInfo> SearchSharedFunctionInfo(
- Code* unoptimized_code, FunctionLiteral* expr) {
- int start_position = expr->start_position();
- RelocIterator it(unoptimized_code);
- for (;!it.done(); it.next()) {
- RelocInfo* rinfo = it.rinfo();
- if (rinfo->rmode() != RelocInfo::EMBEDDED_OBJECT) continue;
- Object* obj = rinfo->target_object();
- if (obj->IsSharedFunctionInfo()) {
- SharedFunctionInfo* shared = SharedFunctionInfo::cast(obj);
- if (shared->start_position() == start_position) {
- return Handle<SharedFunctionInfo>(shared);
- }
- }
- }
-
- return Handle<SharedFunctionInfo>();
-}
-
-
-void HGraphBuilder::VisitFunctionLiteral(FunctionLiteral* expr) {
- Handle<SharedFunctionInfo> shared_info =
- SearchSharedFunctionInfo(info()->shared_info()->code(),
- expr);
- if (shared_info.is_null()) {
- shared_info = Compiler::BuildFunctionInfo(expr, info()->script());
- }
- CHECK_BAILOUT;
- HFunctionLiteral* instr =
- new HFunctionLiteral(shared_info, expr->pretenure());
- ast_context()->ReturnInstruction(instr, expr->id());
-}
-
-
-void HGraphBuilder::VisitSharedFunctionInfoLiteral(
- SharedFunctionInfoLiteral* expr) {
- BAILOUT("SharedFunctionInfoLiteral");
-}
-
-
-void HGraphBuilder::VisitConditional(Conditional* expr) {
- HBasicBlock* cond_true = graph()->CreateBasicBlock();
- HBasicBlock* cond_false = graph()->CreateBasicBlock();
- VISIT_FOR_CONTROL(expr->condition(), cond_true, cond_false);
- cond_true->SetJoinId(expr->ThenId());
- cond_false->SetJoinId(expr->ElseId());
-
- // Visit the true and false subexpressions in the same AST context as the
- // whole expression.
- set_current_block(cond_true);
- Visit(expr->then_expression());
- CHECK_BAILOUT;
- HBasicBlock* other = current_block();
-
- set_current_block(cond_false);
- Visit(expr->else_expression());
- CHECK_BAILOUT;
-
- if (!ast_context()->IsTest()) {
- HBasicBlock* join = CreateJoin(other, current_block(), expr->id());
- set_current_block(join);
- if (!ast_context()->IsEffect()) ast_context()->ReturnValue(Pop());
- }
-}
-
-
-HGraphBuilder::GlobalPropertyAccess HGraphBuilder::LookupGlobalProperty(
- Variable* var, LookupResult* lookup, bool is_store) {
- if (var->is_this() || !info()->has_global_object()) {
- return kUseGeneric;
- }
- Handle<GlobalObject> global(info()->global_object());
- global->Lookup(*var->name(), lookup);
- if (!lookup->IsProperty() ||
- lookup->type() != NORMAL ||
- (is_store && lookup->IsReadOnly()) ||
- lookup->holder() != *global) {
- return kUseGeneric;
- }
-
- return kUseCell;
-}
-
-
-HValue* HGraphBuilder::BuildContextChainWalk(Variable* var) {
- ASSERT(var->IsContextSlot());
- HInstruction* context = new HContext;
- AddInstruction(context);
- int length = info()->scope()->ContextChainLength(var->scope());
- while (length-- > 0) {
- context = new HOuterContext(context);
- AddInstruction(context);
- }
- return context;
-}
-
-
-void HGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
- Variable* variable = expr->AsVariable();
- if (variable == NULL) {
- BAILOUT("reference to rewritten variable");
- } else if (variable->IsStackAllocated()) {
- if (environment()->Lookup(variable)->CheckFlag(HValue::kIsArguments)) {
- BAILOUT("unsupported context for arguments object");
- }
- ast_context()->ReturnValue(environment()->Lookup(variable));
- } else if (variable->IsContextSlot()) {
- if (variable->mode() == Variable::CONST) {
- BAILOUT("reference to const context slot");
- }
- HValue* context = BuildContextChainWalk(variable);
- int index = variable->AsSlot()->index();
- HLoadContextSlot* instr = new HLoadContextSlot(context, index);
- ast_context()->ReturnInstruction(instr, expr->id());
- } else if (variable->is_global()) {
- LookupResult lookup;
- GlobalPropertyAccess type = LookupGlobalProperty(variable, &lookup, false);
-
- if (type == kUseCell &&
- info()->global_object()->IsAccessCheckNeeded()) {
- type = kUseGeneric;
- }
-
- if (type == kUseCell) {
- Handle<GlobalObject> global(info()->global_object());
- Handle<JSGlobalPropertyCell> cell(global->GetPropertyCell(&lookup));
- bool check_hole = !lookup.IsDontDelete() || lookup.IsReadOnly();
- HLoadGlobalCell* instr = new HLoadGlobalCell(cell, check_hole);
- ast_context()->ReturnInstruction(instr, expr->id());
- } else {
- HContext* context = new HContext;
- AddInstruction(context);
- HGlobalObject* global_object = new HGlobalObject(context);
- AddInstruction(global_object);
- HLoadGlobalGeneric* instr =
- new HLoadGlobalGeneric(context,
- global_object,
- variable->name(),
- ast_context()->is_for_typeof());
- instr->set_position(expr->position());
- ASSERT(instr->HasSideEffects());
- ast_context()->ReturnInstruction(instr, expr->id());
- }
- } else {
- BAILOUT("reference to a variable which requires dynamic lookup");
- }
-}
-
-
-void HGraphBuilder::VisitLiteral(Literal* expr) {
- HConstant* instr = new HConstant(expr->handle(), Representation::Tagged());
- ast_context()->ReturnInstruction(instr, expr->id());
-}
-
-
-void HGraphBuilder::VisitRegExpLiteral(RegExpLiteral* expr) {
- HRegExpLiteral* instr = new HRegExpLiteral(expr->pattern(),
- expr->flags(),
- expr->literal_index());
- ast_context()->ReturnInstruction(instr, expr->id());
-}
-
-
-void HGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
- HContext* context = new HContext;
- AddInstruction(context);
- HObjectLiteral* literal = (new HObjectLiteral(context,
- expr->constant_properties(),
- expr->fast_elements(),
- expr->literal_index(),
- expr->depth(),
- expr->has_function()));
- // The object is expected in the bailout environment during computation
- // of the property values and is the value of the entire expression.
- PushAndAdd(literal);
-
- expr->CalculateEmitStore();
-
- for (int i = 0; i < expr->properties()->length(); i++) {
- ObjectLiteral::Property* property = expr->properties()->at(i);
- if (property->IsCompileTimeValue()) continue;
-
- Literal* key = property->key();
- Expression* value = property->value();
-
- switch (property->kind()) {
- case ObjectLiteral::Property::MATERIALIZED_LITERAL:
- ASSERT(!CompileTimeValue::IsCompileTimeValue(value));
- // Fall through.
- case ObjectLiteral::Property::COMPUTED:
- if (key->handle()->IsSymbol()) {
- if (property->emit_store()) {
- VISIT_FOR_VALUE(value);
- HValue* value = Pop();
- Handle<String> name = Handle<String>::cast(key->handle());
- HStoreNamedGeneric* store =
- new HStoreNamedGeneric(context, literal, name, value);
- AddInstruction(store);
- AddSimulate(key->id());
- } else {
- VISIT_FOR_EFFECT(value);
- }
- break;
- }
- // Fall through.
- case ObjectLiteral::Property::PROTOTYPE:
- case ObjectLiteral::Property::SETTER:
- case ObjectLiteral::Property::GETTER:
- BAILOUT("Object literal with complex property");
- default: UNREACHABLE();
- }
- }
-
- if (expr->has_function()) {
- // Return the result of the transformation to fast properties
- // instead of the original since this operation changes the map
- // of the object. This makes sure that the original object won't
- // be used by other optimized code before it is transformed
- // (e.g. because of code motion).
- HToFastProperties* result = new HToFastProperties(Pop());
- AddInstruction(result);
- ast_context()->ReturnValue(result);
- } else {
- ast_context()->ReturnValue(Pop());
- }
-}
-
-
-void HGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
- ZoneList<Expression*>* subexprs = expr->values();
- int length = subexprs->length();
-
- HArrayLiteral* literal = new HArrayLiteral(expr->constant_elements(),
- length,
- expr->literal_index(),
- expr->depth());
- // The array is expected in the bailout environment during computation
- // of the property values and is the value of the entire expression.
- PushAndAdd(literal);
-
- HLoadElements* elements = NULL;
-
- for (int i = 0; i < length; i++) {
- Expression* subexpr = subexprs->at(i);
- // If the subexpression is a literal or a simple materialized literal it
- // is already set in the cloned array.
- if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
-
- VISIT_FOR_VALUE(subexpr);
- HValue* value = Pop();
- if (!Smi::IsValid(i)) BAILOUT("Non-smi key in array literal");
-
- // Load the elements array before the first store.
- if (elements == NULL) {
- elements = new HLoadElements(literal);
- AddInstruction(elements);
- }
-
- HValue* key = AddInstruction(new HConstant(Handle<Object>(Smi::FromInt(i)),
- Representation::Integer32()));
- AddInstruction(new HStoreKeyedFastElement(elements, key, value));
- AddSimulate(expr->GetIdForElement(i));
- }
- ast_context()->ReturnValue(Pop());
-}
-
-
-void HGraphBuilder::VisitCatchExtensionObject(CatchExtensionObject* expr) {
- BAILOUT("CatchExtensionObject");
-}
-
-
-// Sets the lookup result and returns true if the store can be inlined.
-static bool ComputeStoredField(Handle<Map> type,
- Handle<String> name,
- LookupResult* lookup) {
- type->LookupInDescriptors(NULL, *name, lookup);
- if (!lookup->IsPropertyOrTransition()) return false;
- if (lookup->type() == FIELD) return true;
- return (lookup->type() == MAP_TRANSITION) &&
- (type->unused_property_fields() > 0);
-}
-
-
-static int ComputeStoredFieldIndex(Handle<Map> type,
- Handle<String> name,
- LookupResult* lookup) {
- ASSERT(lookup->type() == FIELD || lookup->type() == MAP_TRANSITION);
- if (lookup->type() == FIELD) {
- return lookup->GetLocalFieldIndexFromMap(*type);
- } else {
- Map* transition = lookup->GetTransitionMapFromMap(*type);
- return transition->PropertyIndexFor(*name) - type->inobject_properties();
- }
-}
-
-
-HInstruction* HGraphBuilder::BuildStoreNamedField(HValue* object,
- Handle<String> name,
- HValue* value,
- Handle<Map> type,
- LookupResult* lookup,
- bool smi_and_map_check) {
- if (smi_and_map_check) {
- AddInstruction(new HCheckNonSmi(object));
- AddInstruction(new HCheckMap(object, type));
- }
-
- int index = ComputeStoredFieldIndex(type, name, lookup);
- bool is_in_object = index < 0;
- int offset = index * kPointerSize;
- if (index < 0) {
- // Negative property indices are in-object properties, indexed
- // from the end of the fixed part of the object.
- offset += type->instance_size();
- } else {
- offset += FixedArray::kHeaderSize;
- }
- HStoreNamedField* instr =
- new HStoreNamedField(object, name, value, is_in_object, offset);
- if (lookup->type() == MAP_TRANSITION) {
- Handle<Map> transition(lookup->GetTransitionMapFromMap(*type));
- instr->set_transition(transition);
- // TODO(fschneider): Record the new map type of the object in the IR to
- // enable elimination of redundant checks after the transition store.
- instr->SetFlag(HValue::kChangesMaps);
- }
- return instr;
-}
-
-
-HInstruction* HGraphBuilder::BuildStoreNamedGeneric(HValue* object,
- Handle<String> name,
- HValue* value) {
- HContext* context = new HContext;
- AddInstruction(context);
- return new HStoreNamedGeneric(context, object, name, value);
-}
-
-
-HInstruction* HGraphBuilder::BuildStoreNamed(HValue* object,
- HValue* value,
- Expression* expr) {
- Property* prop = (expr->AsProperty() != NULL)
- ? expr->AsProperty()
- : expr->AsAssignment()->target()->AsProperty();
- Literal* key = prop->key()->AsLiteral();
- Handle<String> name = Handle<String>::cast(key->handle());
- ASSERT(!name.is_null());
-
- LookupResult lookup;
- ZoneMapList* types = expr->GetReceiverTypes();
- bool is_monomorphic = expr->IsMonomorphic() &&
- ComputeStoredField(types->first(), name, &lookup);
-
- return is_monomorphic
- ? BuildStoreNamedField(object, name, value, types->first(), &lookup,
- true) // Needs smi and map check.
- : BuildStoreNamedGeneric(object, name, value);
-}
-
-
-void HGraphBuilder::HandlePolymorphicStoreNamedField(Assignment* expr,
- HValue* object,
- HValue* value,
- ZoneMapList* types,
- Handle<String> name) {
- // TODO(ager): We should recognize when the prototype chains for different
- // maps are identical. In that case we can avoid repeatedly generating the
- // same prototype map checks.
- int count = 0;
- HBasicBlock* join = NULL;
- for (int i = 0; i < types->length() && count < kMaxStorePolymorphism; ++i) {
- Handle<Map> map = types->at(i);
- LookupResult lookup;
- if (ComputeStoredField(map, name, &lookup)) {
- if (count == 0) {
- AddInstruction(new HCheckNonSmi(object)); // Only needed once.
- join = graph()->CreateBasicBlock();
- }
- ++count;
- HBasicBlock* if_true = graph()->CreateBasicBlock();
- HBasicBlock* if_false = graph()->CreateBasicBlock();
- HCompareMap* compare = new HCompareMap(object, map, if_true, if_false);
- current_block()->Finish(compare);
-
- set_current_block(if_true);
- HInstruction* instr =
- BuildStoreNamedField(object, name, value, map, &lookup, false);
- instr->set_position(expr->position());
- // Goto will add the HSimulate for the store.
- AddInstruction(instr);
- if (!ast_context()->IsEffect()) Push(value);
- current_block()->Goto(join);
-
- set_current_block(if_false);
- }
- }
-
- // Finish up. Unconditionally deoptimize if we've handled all the maps we
- // know about and do not want to handle ones we've never seen. Otherwise
- // use a generic IC.
- if (count == types->length() && FLAG_deoptimize_uncommon_cases) {
- current_block()->FinishExitWithDeoptimization();
- } else {
- HInstruction* instr = BuildStoreNamedGeneric(object, name, value);
- instr->set_position(expr->position());
- AddInstruction(instr);
-
- if (join != NULL) {
- if (!ast_context()->IsEffect()) Push(value);
- current_block()->Goto(join);
- } else {
- // The HSimulate for the store should not see the stored value in
- // effect contexts (it is not materialized at expr->id() in the
- // unoptimized code).
- if (instr->HasSideEffects()) {
- if (ast_context()->IsEffect()) {
- AddSimulate(expr->id());
- } else {
- Push(value);
- AddSimulate(expr->id());
- Drop(1);
- }
- }
- ast_context()->ReturnValue(value);
- return;
- }
- }
-
- ASSERT(join != NULL);
- join->SetJoinId(expr->id());
- set_current_block(join);
- if (!ast_context()->IsEffect()) ast_context()->ReturnValue(Pop());
-}
-
-
-void HGraphBuilder::HandlePropertyAssignment(Assignment* expr) {
- Property* prop = expr->target()->AsProperty();
- ASSERT(prop != NULL);
- expr->RecordTypeFeedback(oracle());
- VISIT_FOR_VALUE(prop->obj());
-
- HValue* value = NULL;
- HInstruction* instr = NULL;
-
- if (prop->key()->IsPropertyName()) {
- // Named store.
- VISIT_FOR_VALUE(expr->value());
- value = Pop();
- HValue* object = Pop();
-
- Literal* key = prop->key()->AsLiteral();
- Handle<String> name = Handle<String>::cast(key->handle());
- ASSERT(!name.is_null());
-
- ZoneMapList* types = expr->GetReceiverTypes();
- LookupResult lookup;
-
- if (expr->IsMonomorphic()) {
- instr = BuildStoreNamed(object, value, expr);
-
- } else if (types != NULL && types->length() > 1) {
- HandlePolymorphicStoreNamedField(expr, object, value, types, name);
- return;
-
- } else {
- instr = BuildStoreNamedGeneric(object, name, value);
- }
-
- } else {
- // Keyed store.
- VISIT_FOR_VALUE(prop->key());
- VISIT_FOR_VALUE(expr->value());
- value = Pop();
- HValue* key = Pop();
- HValue* object = Pop();
-
- if (expr->IsMonomorphic()) {
- Handle<Map> receiver_type(expr->GetMonomorphicReceiverType());
- // An object has either fast elements or external array elements, but
- // never both. Pixel array maps that are assigned to pixel array elements
- // are always created with the fast elements flag cleared.
- if (receiver_type->has_external_array_elements()) {
- instr = BuildStoreKeyedSpecializedArrayElement(object,
- key,
- value,
- expr);
- } else if (receiver_type->has_fast_elements()) {
- instr = BuildStoreKeyedFastElement(object, key, value, expr);
- }
- }
- if (instr == NULL) {
- instr = BuildStoreKeyedGeneric(object, key, value);
- }
- }
-
- Push(value);
- instr->set_position(expr->position());
- AddInstruction(instr);
- if (instr->HasSideEffects()) AddSimulate(expr->AssignmentId());
- ast_context()->ReturnValue(Pop());
-}
-
-
-// Because not every expression has a position and there is not common
-// superclass of Assignment and CountOperation, we cannot just pass the
-// owning expression instead of position and ast_id separately.
-void HGraphBuilder::HandleGlobalVariableAssignment(Variable* var,
- HValue* value,
- int position,
- int ast_id) {
- LookupResult lookup;
- GlobalPropertyAccess type = LookupGlobalProperty(var, &lookup, true);
- if (type == kUseCell) {
- bool check_hole = !lookup.IsDontDelete() || lookup.IsReadOnly();
- Handle<GlobalObject> global(info()->global_object());
- Handle<JSGlobalPropertyCell> cell(global->GetPropertyCell(&lookup));
- HInstruction* instr = new HStoreGlobalCell(value, cell, check_hole);
- instr->set_position(position);
- AddInstruction(instr);
- if (instr->HasSideEffects()) AddSimulate(ast_id);
- } else {
- HContext* context = new HContext;
- AddInstruction(context);
- HGlobalObject* global_object = new HGlobalObject(context);
- AddInstruction(global_object);
- HStoreGlobalGeneric* instr =
- new HStoreGlobalGeneric(context,
- global_object,
- var->name(),
- value);
- instr->set_position(position);
- AddInstruction(instr);
- ASSERT(instr->HasSideEffects());
- if (instr->HasSideEffects()) AddSimulate(ast_id);
- }
-}
-
-
-void HGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
- Expression* target = expr->target();
- VariableProxy* proxy = target->AsVariableProxy();
- Variable* var = proxy->AsVariable();
- Property* prop = target->AsProperty();
- ASSERT(var == NULL || prop == NULL);
-
- // We have a second position recorded in the FullCodeGenerator to have
- // type feedback for the binary operation.
- BinaryOperation* operation = expr->binary_operation();
-
- if (var != NULL) {
- VISIT_FOR_VALUE(operation);
-
- if (var->is_global()) {
- HandleGlobalVariableAssignment(var,
- Top(),
- expr->position(),
- expr->AssignmentId());
- } else if (var->IsStackAllocated()) {
- Bind(var, Top());
- } else if (var->IsContextSlot()) {
- HValue* context = BuildContextChainWalk(var);
- int index = var->AsSlot()->index();
- HStoreContextSlot* instr = new HStoreContextSlot(context, index, Top());
- AddInstruction(instr);
- if (instr->HasSideEffects()) AddSimulate(expr->AssignmentId());
- } else {
- BAILOUT("compound assignment to lookup slot");
- }
- ast_context()->ReturnValue(Pop());
-
- } else if (prop != NULL) {
- prop->RecordTypeFeedback(oracle());
-
- if (prop->key()->IsPropertyName()) {
- // Named property.
- VISIT_FOR_VALUE(prop->obj());
- HValue* obj = Top();
-
- HInstruction* load = NULL;
- if (prop->IsMonomorphic()) {
- Handle<String> name = prop->key()->AsLiteral()->AsPropertyName();
- Handle<Map> map = prop->GetReceiverTypes()->first();
- load = BuildLoadNamed(obj, prop, map, name);
- } else {
- load = BuildLoadNamedGeneric(obj, prop);
- }
- PushAndAdd(load);
- if (load->HasSideEffects()) AddSimulate(expr->CompoundLoadId());
-
- VISIT_FOR_VALUE(expr->value());
- HValue* right = Pop();
- HValue* left = Pop();
-
- HInstruction* instr = BuildBinaryOperation(operation, left, right);
- PushAndAdd(instr);
- if (instr->HasSideEffects()) AddSimulate(operation->id());
-
- HInstruction* store = BuildStoreNamed(obj, instr, prop);
- AddInstruction(store);
- // Drop the simulated receiver and value. Return the value.
- Drop(2);
- Push(instr);
- if (store->HasSideEffects()) AddSimulate(expr->AssignmentId());
- ast_context()->ReturnValue(Pop());
-
- } else {
- // Keyed property.
- VISIT_FOR_VALUE(prop->obj());
- VISIT_FOR_VALUE(prop->key());
- HValue* obj = environment()->ExpressionStackAt(1);
- HValue* key = environment()->ExpressionStackAt(0);
-
- bool is_fast_elements = prop->IsMonomorphic() &&
- prop->GetMonomorphicReceiverType()->has_fast_elements();
- HInstruction* load = is_fast_elements
- ? BuildLoadKeyedFastElement(obj, key, prop)
- : BuildLoadKeyedGeneric(obj, key);
- PushAndAdd(load);
- if (load->HasSideEffects()) AddSimulate(expr->CompoundLoadId());
-
- VISIT_FOR_VALUE(expr->value());
- HValue* right = Pop();
- HValue* left = Pop();
-
- HInstruction* instr = BuildBinaryOperation(operation, left, right);
- PushAndAdd(instr);
- if (instr->HasSideEffects()) AddSimulate(operation->id());
-
- HInstruction* store = is_fast_elements
- ? BuildStoreKeyedFastElement(obj, key, instr, prop)
- : BuildStoreKeyedGeneric(obj, key, instr);
- AddInstruction(store);
- // Drop the simulated receiver, key, and value. Return the value.
- Drop(3);
- Push(instr);
- if (store->HasSideEffects()) AddSimulate(expr->AssignmentId());
- ast_context()->ReturnValue(Pop());
- }
-
- } else {
- BAILOUT("invalid lhs in compound assignment");
- }
-}
-
-
-void HGraphBuilder::VisitAssignment(Assignment* expr) {
- VariableProxy* proxy = expr->target()->AsVariableProxy();
- Variable* var = proxy->AsVariable();
- Property* prop = expr->target()->AsProperty();
- ASSERT(var == NULL || prop == NULL);
-
- if (expr->is_compound()) {
- HandleCompoundAssignment(expr);
- return;
- }
-
- if (var != NULL) {
- if (proxy->IsArguments()) BAILOUT("assignment to arguments");
-
- // Handle the assignment.
- if (var->IsStackAllocated()) {
- HValue* value = NULL;
- // Handle stack-allocated variables on the right-hand side directly.
- // We do not allow the arguments object to occur in a context where it
- // may escape, but assignments to stack-allocated locals are
- // permitted. Handling such assignments here bypasses the check for
- // the arguments object in VisitVariableProxy.
- Variable* rhs_var = expr->value()->AsVariableProxy()->AsVariable();
- if (rhs_var != NULL && rhs_var->IsStackAllocated()) {
- value = environment()->Lookup(rhs_var);
- } else {
- VISIT_FOR_VALUE(expr->value());
- value = Pop();
- }
- Bind(var, value);
- ast_context()->ReturnValue(value);
-
- } else if (var->IsContextSlot() && var->mode() != Variable::CONST) {
- VISIT_FOR_VALUE(expr->value());
- HValue* context = BuildContextChainWalk(var);
- int index = var->AsSlot()->index();
- HStoreContextSlot* instr = new HStoreContextSlot(context, index, Top());
- AddInstruction(instr);
- if (instr->HasSideEffects()) AddSimulate(expr->AssignmentId());
- ast_context()->ReturnValue(Pop());
-
- } else if (var->is_global()) {
- VISIT_FOR_VALUE(expr->value());
- HandleGlobalVariableAssignment(var,
- Top(),
- expr->position(),
- expr->AssignmentId());
- ast_context()->ReturnValue(Pop());
-
- } else {
- BAILOUT("assignment to LOOKUP or const CONTEXT variable");
- }
-
- } else if (prop != NULL) {
- HandlePropertyAssignment(expr);
- } else {
- BAILOUT("invalid left-hand side in assignment");
- }
-}
-
-
-void HGraphBuilder::VisitThrow(Throw* expr) {
- // We don't optimize functions with invalid left-hand sides in
- // assignments, count operations, or for-in. Consequently throw can
- // currently only occur in an effect context.
- ASSERT(ast_context()->IsEffect());
- VISIT_FOR_VALUE(expr->exception());
-
- HValue* value = environment()->Pop();
- HThrow* instr = new HThrow(value);
- instr->set_position(expr->position());
- AddInstruction(instr);
- AddSimulate(expr->id());
- current_block()->FinishExit(new HAbnormalExit);
- set_current_block(NULL);
-}
-
-
-HLoadNamedField* HGraphBuilder::BuildLoadNamedField(HValue* object,
- Property* expr,
- Handle<Map> type,
- LookupResult* lookup,
- bool smi_and_map_check) {
- if (smi_and_map_check) {
- AddInstruction(new HCheckNonSmi(object));
- AddInstruction(new HCheckMap(object, type));
- }
-
- int index = lookup->GetLocalFieldIndexFromMap(*type);
- if (index < 0) {
- // Negative property indices are in-object properties, indexed
- // from the end of the fixed part of the object.
- int offset = (index * kPointerSize) + type->instance_size();
- return new HLoadNamedField(object, true, offset);
- } else {
- // Non-negative property indices are in the properties array.
- int offset = (index * kPointerSize) + FixedArray::kHeaderSize;
- return new HLoadNamedField(object, false, offset);
- }
-}
-
-
-HInstruction* HGraphBuilder::BuildLoadNamedGeneric(HValue* obj,
- Property* expr) {
- ASSERT(expr->key()->IsPropertyName());
- Handle<Object> name = expr->key()->AsLiteral()->handle();
- HContext* context = new HContext;
- AddInstruction(context);
- return new HLoadNamedGeneric(context, obj, name);
-}
-
-
-HInstruction* HGraphBuilder::BuildLoadNamed(HValue* obj,
- Property* expr,
- Handle<Map> map,
- Handle<String> name) {
- LookupResult lookup;
- map->LookupInDescriptors(NULL, *name, &lookup);
- if (lookup.IsProperty() && lookup.type() == FIELD) {
- return BuildLoadNamedField(obj,
- expr,
- map,
- &lookup,
- true);
- } else if (lookup.IsProperty() && lookup.type() == CONSTANT_FUNCTION) {
- AddInstruction(new HCheckNonSmi(obj));
- AddInstruction(new HCheckMap(obj, map));
- Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*map));
- return new HConstant(function, Representation::Tagged());
- } else {
- return BuildLoadNamedGeneric(obj, expr);
- }
-}
-
-
-HInstruction* HGraphBuilder::BuildLoadKeyedGeneric(HValue* object,
- HValue* key) {
- HContext* context = new HContext;
- AddInstruction(context);
- return new HLoadKeyedGeneric(context, object, key);
-}
-
-
-HInstruction* HGraphBuilder::BuildLoadKeyedFastElement(HValue* object,
- HValue* key,
- Property* expr) {
- ASSERT(!expr->key()->IsPropertyName() && expr->IsMonomorphic());
- AddInstruction(new HCheckNonSmi(object));
- Handle<Map> map = expr->GetMonomorphicReceiverType();
- ASSERT(map->has_fast_elements());
- AddInstruction(new HCheckMap(object, map));
- bool is_array = (map->instance_type() == JS_ARRAY_TYPE);
- HLoadElements* elements = new HLoadElements(object);
- HInstruction* length = NULL;
- if (is_array) {
- length = AddInstruction(new HJSArrayLength(object));
- AddInstruction(new HBoundsCheck(key, length));
- AddInstruction(elements);
- } else {
- AddInstruction(elements);
- length = AddInstruction(new HFixedArrayLength(elements));
- AddInstruction(new HBoundsCheck(key, length));
- }
- return new HLoadKeyedFastElement(elements, key);
-}
-
-
-HInstruction* HGraphBuilder::BuildLoadKeyedSpecializedArrayElement(
- HValue* object,
- HValue* key,
- Property* expr) {
- ASSERT(!expr->key()->IsPropertyName() && expr->IsMonomorphic());
- AddInstruction(new HCheckNonSmi(object));
- Handle<Map> map = expr->GetMonomorphicReceiverType();
- ASSERT(!map->has_fast_elements());
- ASSERT(map->has_external_array_elements());
- AddInstruction(new HCheckMap(object, map));
- HLoadElements* elements = new HLoadElements(object);
- AddInstruction(elements);
- HInstruction* length = new HExternalArrayLength(elements);
- AddInstruction(length);
- AddInstruction(new HBoundsCheck(key, length));
- HLoadExternalArrayPointer* external_elements =
- new HLoadExternalArrayPointer(elements);
- AddInstruction(external_elements);
- HLoadKeyedSpecializedArrayElement* pixel_array_value =
- new HLoadKeyedSpecializedArrayElement(external_elements,
- key,
- expr->GetExternalArrayType());
- return pixel_array_value;
-}
-
-
-HInstruction* HGraphBuilder::BuildStoreKeyedGeneric(HValue* object,
- HValue* key,
- HValue* value) {
- HContext* context = new HContext;
- AddInstruction(context);
- return new HStoreKeyedGeneric(context, object, key, value);
-}
-
-
-HInstruction* HGraphBuilder::BuildStoreKeyedFastElement(HValue* object,
- HValue* key,
- HValue* val,
- Expression* expr) {
- ASSERT(expr->IsMonomorphic());
- AddInstruction(new HCheckNonSmi(object));
- Handle<Map> map = expr->GetMonomorphicReceiverType();
- ASSERT(map->has_fast_elements());
- AddInstruction(new HCheckMap(object, map));
- HInstruction* elements = AddInstruction(new HLoadElements(object));
- AddInstruction(new HCheckMap(elements,
- isolate()->factory()->fixed_array_map()));
- bool is_array = (map->instance_type() == JS_ARRAY_TYPE);
- HInstruction* length = NULL;
- if (is_array) {
- length = AddInstruction(new HJSArrayLength(object));
- } else {
- length = AddInstruction(new HFixedArrayLength(elements));
- }
- AddInstruction(new HBoundsCheck(key, length));
- return new HStoreKeyedFastElement(elements, key, val);
-}
-
-
-HInstruction* HGraphBuilder::BuildStoreKeyedSpecializedArrayElement(
- HValue* object,
- HValue* key,
- HValue* val,
- Assignment* expr) {
- ASSERT(expr->IsMonomorphic());
- AddInstruction(new HCheckNonSmi(object));
- Handle<Map> map = expr->GetMonomorphicReceiverType();
- ASSERT(!map->has_fast_elements());
- ASSERT(map->has_external_array_elements());
- AddInstruction(new HCheckMap(object, map));
- HLoadElements* elements = new HLoadElements(object);
- AddInstruction(elements);
- HInstruction* length = AddInstruction(new HExternalArrayLength(elements));
- AddInstruction(new HBoundsCheck(key, length));
- HLoadExternalArrayPointer* external_elements =
- new HLoadExternalArrayPointer(elements);
- AddInstruction(external_elements);
- return new HStoreKeyedSpecializedArrayElement(
- external_elements,
- key,
- val,
- expr->GetExternalArrayType());
-}
-
-
-bool HGraphBuilder::TryArgumentsAccess(Property* expr) {
- VariableProxy* proxy = expr->obj()->AsVariableProxy();
- if (proxy == NULL) return false;
- if (!proxy->var()->IsStackAllocated()) return false;
- if (!environment()->Lookup(proxy->var())->CheckFlag(HValue::kIsArguments)) {
- return false;
- }
-
- HInstruction* result = NULL;
- if (expr->key()->IsPropertyName()) {
- Handle<String> name = expr->key()->AsLiteral()->AsPropertyName();
- if (!name->IsEqualTo(CStrVector("length"))) return false;
- HInstruction* elements = AddInstruction(new HArgumentsElements);
- result = new HArgumentsLength(elements);
- } else {
- Push(graph()->GetArgumentsObject());
- VisitForValue(expr->key());
- if (HasStackOverflow()) return false;
- HValue* key = Pop();
- Drop(1); // Arguments object.
- HInstruction* elements = AddInstruction(new HArgumentsElements);
- HInstruction* length = AddInstruction(new HArgumentsLength(elements));
- AddInstruction(new HBoundsCheck(key, length));
- result = new HAccessArgumentsAt(elements, length, key);
- }
- ast_context()->ReturnInstruction(result, expr->id());
- return true;
-}
-
-
-void HGraphBuilder::VisitProperty(Property* expr) {
- expr->RecordTypeFeedback(oracle());
-
- if (TryArgumentsAccess(expr)) return;
- CHECK_BAILOUT;
-
- VISIT_FOR_VALUE(expr->obj());
-
- HInstruction* instr = NULL;
- if (expr->IsArrayLength()) {
- HValue* array = Pop();
- AddInstruction(new HCheckNonSmi(array));
- AddInstruction(new HCheckInstanceType(array, JS_ARRAY_TYPE, JS_ARRAY_TYPE));
- instr = new HJSArrayLength(array);
-
- } else if (expr->IsStringLength()) {
- HValue* string = Pop();
- AddInstruction(new HCheckNonSmi(string));
- AddInstruction(new HCheckInstanceType(string,
- FIRST_STRING_TYPE,
- LAST_STRING_TYPE));
- instr = new HStringLength(string);
- } else if (expr->IsStringAccess()) {
- VISIT_FOR_VALUE(expr->key());
- HValue* index = Pop();
- HValue* string = Pop();
- HStringCharCodeAt* char_code = BuildStringCharCodeAt(string, index);
- AddInstruction(char_code);
- instr = new HStringCharFromCode(char_code);
-
- } else if (expr->IsFunctionPrototype()) {
- HValue* function = Pop();
- AddInstruction(new HCheckNonSmi(function));
- instr = new HLoadFunctionPrototype(function);
-
- } else if (expr->key()->IsPropertyName()) {
- Handle<String> name = expr->key()->AsLiteral()->AsPropertyName();
- ZoneMapList* types = expr->GetReceiverTypes();
-
- HValue* obj = Pop();
- if (expr->IsMonomorphic()) {
- instr = BuildLoadNamed(obj, expr, types->first(), name);
- } else if (types != NULL && types->length() > 1) {
- AddInstruction(new HCheckNonSmi(obj));
- instr = new HLoadNamedFieldPolymorphic(obj, types, name);
- } else {
- instr = BuildLoadNamedGeneric(obj, expr);
- }
-
- } else {
- VISIT_FOR_VALUE(expr->key());
-
- HValue* key = Pop();
- HValue* obj = Pop();
-
- if (expr->IsMonomorphic()) {
- Handle<Map> receiver_type(expr->GetMonomorphicReceiverType());
- // An object has either fast elements or pixel array elements, but never
- // both. Pixel array maps that are assigned to pixel array elements are
- // always created with the fast elements flag cleared.
- if (receiver_type->has_external_array_elements()) {
- instr = BuildLoadKeyedSpecializedArrayElement(obj, key, expr);
- } else if (receiver_type->has_fast_elements()) {
- instr = BuildLoadKeyedFastElement(obj, key, expr);
- }
- }
- if (instr == NULL) {
- instr = BuildLoadKeyedGeneric(obj, key);
- }
- }
- instr->set_position(expr->position());
- ast_context()->ReturnInstruction(instr, expr->id());
-}
-
-
-void HGraphBuilder::AddCheckConstantFunction(Call* expr,
- HValue* receiver,
- Handle<Map> receiver_map,
- bool smi_and_map_check) {
- // Constant functions have the nice property that the map will change if they
- // are overwritten. Therefore it is enough to check the map of the holder and
- // its prototypes.
- if (smi_and_map_check) {
- AddInstruction(new HCheckNonSmi(receiver));
- AddInstruction(new HCheckMap(receiver, receiver_map));
- }
- if (!expr->holder().is_null()) {
- AddInstruction(new HCheckPrototypeMaps(
- Handle<JSObject>(JSObject::cast(receiver_map->prototype())),
- expr->holder()));
- }
-}
-
-
-void HGraphBuilder::HandlePolymorphicCallNamed(Call* expr,
- HValue* receiver,
- ZoneMapList* types,
- Handle<String> name) {
- // TODO(ager): We should recognize when the prototype chains for different
- // maps are identical. In that case we can avoid repeatedly generating the
- // same prototype map checks.
- int argument_count = expr->arguments()->length() + 1; // Includes receiver.
- int count = 0;
- HBasicBlock* join = NULL;
- for (int i = 0; i < types->length() && count < kMaxCallPolymorphism; ++i) {
- Handle<Map> map = types->at(i);
- if (expr->ComputeTarget(map, name)) {
- if (count == 0) {
- AddInstruction(new HCheckNonSmi(receiver)); // Only needed once.
- join = graph()->CreateBasicBlock();
- }
- ++count;
- HBasicBlock* if_true = graph()->CreateBasicBlock();
- HBasicBlock* if_false = graph()->CreateBasicBlock();
- HCompareMap* compare = new HCompareMap(receiver, map, if_true, if_false);
- current_block()->Finish(compare);
-
- set_current_block(if_true);
- AddCheckConstantFunction(expr, receiver, map, false);
- if (FLAG_trace_inlining && FLAG_polymorphic_inlining) {
- PrintF("Trying to inline the polymorphic call to %s\n",
- *name->ToCString());
- }
- if (!FLAG_polymorphic_inlining || !TryInline(expr)) {
- // Check for bailout, as trying to inline might fail due to bailout
- // during hydrogen processing.
- CHECK_BAILOUT;
- HCallConstantFunction* call =
- new HCallConstantFunction(expr->target(), argument_count);
- call->set_position(expr->position());
- PreProcessCall(call);
- AddInstruction(call);
- if (!ast_context()->IsEffect()) Push(call);
- }
-
- if (current_block() != NULL) current_block()->Goto(join);
- set_current_block(if_false);
- }
- }
-
- // Finish up. Unconditionally deoptimize if we've handled all the maps we
- // know about and do not want to handle ones we've never seen. Otherwise
- // use a generic IC.
- if (count == types->length() && FLAG_deoptimize_uncommon_cases) {
- current_block()->FinishExitWithDeoptimization();
- } else {
- HContext* context = new HContext;
- AddInstruction(context);
- HCallNamed* call = new HCallNamed(context, name, argument_count);
- call->set_position(expr->position());
- PreProcessCall(call);
-
- if (join != NULL) {
- AddInstruction(call);
- if (!ast_context()->IsEffect()) Push(call);
- current_block()->Goto(join);
- } else {
- ast_context()->ReturnInstruction(call, expr->id());
- return;
- }
- }
-
- // We assume that control flow is always live after an expression. So
- // even without predecessors to the join block, we set it as the exit
- // block and continue by adding instructions there.
- ASSERT(join != NULL);
- set_current_block(join);
- if (join->HasPredecessor()) {
- join->SetJoinId(expr->id());
- if (!ast_context()->IsEffect()) ast_context()->ReturnValue(Pop());
- }
-}
-
-
-void HGraphBuilder::TraceInline(Handle<JSFunction> target, const char* reason) {
- if (FLAG_trace_inlining) {
- if (reason == NULL) {
- // We are currently in the context of inlined function thus we have
- // to go to an outer FunctionState to get caller.
- SmartPointer<char> callee = target->shared()->DebugName()->ToCString();
- SmartPointer<char> caller =
- function_state()->outer()->compilation_info()->function()->
- debug_name()->ToCString();
- PrintF("Inlined %s called from %s.\n", *callee, *caller);
- } else {
- SmartPointer<char> callee = target->shared()->DebugName()->ToCString();
- SmartPointer<char> caller =
- info()->function()->debug_name()->ToCString();
- PrintF("Did not inline %s called from %s (%s).\n",
- *callee, *caller, reason);
- }
- }
-}
-
-
-bool HGraphBuilder::TryInline(Call* expr) {
- if (!FLAG_use_inlining) return false;
-
- // Precondition: call is monomorphic and we have found a target with the
- // appropriate arity.
- Handle<JSFunction> target = expr->target();
-
- // Do a quick check on source code length to avoid parsing large
- // inlining candidates.
- if (FLAG_limit_inlining && target->shared()->SourceSize() > kMaxSourceSize) {
- TraceInline(target, "target text too big");
- return false;
- }
-
- // Target must be inlineable.
- if (!target->IsInlineable()) {
- TraceInline(target, "target not inlineable");
- return false;
- }
-
- // No context change required.
- CompilationInfo* outer_info = info();
- if (target->context() != outer_info->closure()->context() ||
- outer_info->scope()->contains_with() ||
- outer_info->scope()->num_heap_slots() > 0) {
- TraceInline(target, "target requires context change");
- return false;
- }
-
- // Don't inline deeper than kMaxInliningLevels calls.
- HEnvironment* env = environment();
- int current_level = 1;
- while (env->outer() != NULL) {
- if (current_level == Compiler::kMaxInliningLevels) {
- TraceInline(target, "inline depth limit reached");
- return false;
- }
- current_level++;
- env = env->outer();
- }
-
- // Don't inline recursive functions.
- if (target->shared() == outer_info->closure()->shared()) {
- TraceInline(target, "target is recursive");
- return false;
- }
-
- // We don't want to add more than a certain number of nodes from inlining.
- if (FLAG_limit_inlining && inlined_count_ > kMaxInlinedNodes) {
- TraceInline(target, "cumulative AST node limit reached");
- return false;
- }
-
- int count_before = AstNode::Count();
-
- // Parse and allocate variables.
- CompilationInfo target_info(target);
- if (!ParserApi::Parse(&target_info) ||
- !Scope::Analyze(&target_info)) {
- if (target_info.isolate()->has_pending_exception()) {
- // Parse or scope error, never optimize this function.
- SetStackOverflow();
- target->shared()->set_optimization_disabled(true);
- }
- TraceInline(target, "parse failure");
- return false;
- }
-
- if (target_info.scope()->num_heap_slots() > 0) {
- TraceInline(target, "target has context-allocated variables");
- return false;
- }
- FunctionLiteral* function = target_info.function();
-
- // Count the number of AST nodes added by inlining this call.
- int nodes_added = AstNode::Count() - count_before;
- if (FLAG_limit_inlining && nodes_added > kMaxInlinedSize) {
- TraceInline(target, "target AST is too large");
- return false;
- }
-
- // Check if we can handle all declarations in the inlined functions.
- VisitDeclarations(target_info.scope()->declarations());
- if (HasStackOverflow()) {
- TraceInline(target, "target has non-trivial declaration");
- ClearStackOverflow();
- return false;
- }
-
- // Don't inline functions that uses the arguments object or that
- // have a mismatching number of parameters.
- Handle<SharedFunctionInfo> target_shared(target->shared());
- int arity = expr->arguments()->length();
- if (function->scope()->arguments() != NULL ||
- arity != target_shared->formal_parameter_count()) {
- TraceInline(target, "target requires special argument handling");
- return false;
- }
-
- // All statements in the body must be inlineable.
- for (int i = 0, count = function->body()->length(); i < count; ++i) {
- if (!function->body()->at(i)->IsInlineable()) {
- TraceInline(target, "target contains unsupported syntax");
- return false;
- }
- }
-
- // Generate the deoptimization data for the unoptimized version of
- // the target function if we don't already have it.
- if (!target_shared->has_deoptimization_support()) {
- // Note that we compile here using the same AST that we will use for
- // generating the optimized inline code.
- target_info.EnableDeoptimizationSupport();
- if (!FullCodeGenerator::MakeCode(&target_info)) {
- TraceInline(target, "could not generate deoptimization info");
- return false;
- }
- target_shared->EnableDeoptimizationSupport(*target_info.code());
- Compiler::RecordFunctionCompilation(Logger::FUNCTION_TAG,
- &target_info,
- target_shared);
- }
-
- // ----------------------------------------------------------------
- // Save the pending call context and type feedback oracle. Set up new ones
- // for the inlined function.
- ASSERT(target_shared->has_deoptimization_support());
- TypeFeedbackOracle target_oracle(
- Handle<Code>(target_shared->code()),
- Handle<Context>(target->context()->global_context()));
- FunctionState target_state(this, &target_info, &target_oracle);
-
- HConstant* undefined = graph()->GetConstantUndefined();
- HEnvironment* inner_env =
- environment()->CopyForInlining(target, function, true, undefined);
- HBasicBlock* body_entry = CreateBasicBlock(inner_env);
- current_block()->Goto(body_entry);
-
- body_entry->SetJoinId(expr->ReturnId());
- set_current_block(body_entry);
- AddInstruction(new HEnterInlined(target, function));
- VisitStatements(function->body());
- if (HasStackOverflow()) {
- // Bail out if the inline function did, as we cannot residualize a call
- // instead.
- TraceInline(target, "inline graph construction failed");
- return false;
- }
-
- // Update inlined nodes count.
- inlined_count_ += nodes_added;
-
- TraceInline(target, NULL);
-
- if (current_block() != NULL) {
- // Add a return of undefined if control can fall off the body. In a
- // test context, undefined is false.
- if (inlined_test_context() == NULL) {
- ASSERT(function_return() != NULL);
- ASSERT(call_context()->IsEffect() || call_context()->IsValue());
- if (call_context()->IsEffect()) {
- current_block()->Goto(function_return(), false);
- } else {
- current_block()->AddLeaveInlined(undefined, function_return());
- }
- } else {
- // The graph builder assumes control can reach both branches of a
- // test, so we materialize the undefined value and test it rather than
- // simply jumping to the false target.
- //
- // TODO(3168478): refactor to avoid this.
- HBasicBlock* empty_true = graph()->CreateBasicBlock();
- HBasicBlock* empty_false = graph()->CreateBasicBlock();
- HTest* test = new HTest(undefined, empty_true, empty_false);
- current_block()->Finish(test);
-
- empty_true->Goto(inlined_test_context()->if_true(), false);
- empty_false->Goto(inlined_test_context()->if_false(), false);
- }
- }
-
- // Fix up the function exits.
- if (inlined_test_context() != NULL) {
- HBasicBlock* if_true = inlined_test_context()->if_true();
- HBasicBlock* if_false = inlined_test_context()->if_false();
- if_true->SetJoinId(expr->id());
- if_false->SetJoinId(expr->id());
- ASSERT(ast_context() == inlined_test_context());
- // Pop the return test context from the expression context stack.
- ClearInlinedTestContext();
-
- // Forward to the real test context.
- HBasicBlock* true_target = TestContext::cast(ast_context())->if_true();
- HBasicBlock* false_target = TestContext::cast(ast_context())->if_false();
- if_true->Goto(true_target, false);
- if_false->Goto(false_target, false);
-
- // TODO(kmillikin): Come up with a better way to handle this. It is too
- // subtle. NULL here indicates that the enclosing context has no control
- // flow to handle.
- set_current_block(NULL);
-
- } else {
- function_return()->SetJoinId(expr->id());
- set_current_block(function_return());
- }
-
- return true;
-}
-
-
-bool HGraphBuilder::TryInlineBuiltinFunction(Call* expr,
- HValue* receiver,
- Handle<Map> receiver_map,
- CheckType check_type) {
- ASSERT(check_type != RECEIVER_MAP_CHECK || !receiver_map.is_null());
- // Try to inline calls like Math.* as operations in the calling function.
- if (!expr->target()->shared()->HasBuiltinFunctionId()) return false;
- BuiltinFunctionId id = expr->target()->shared()->builtin_function_id();
- int argument_count = expr->arguments()->length() + 1; // Plus receiver.
- switch (id) {
- case kStringCharCodeAt:
- case kStringCharAt:
- if (argument_count == 2 && check_type == STRING_CHECK) {
- HValue* index = Pop();
- HValue* string = Pop();
- ASSERT(!expr->holder().is_null());
- AddInstruction(new HCheckPrototypeMaps(
- oracle()->GetPrototypeForPrimitiveCheck(STRING_CHECK),
- expr->holder()));
- HStringCharCodeAt* char_code = BuildStringCharCodeAt(string, index);
- if (id == kStringCharCodeAt) {
- ast_context()->ReturnInstruction(char_code, expr->id());
- return true;
- }
- AddInstruction(char_code);
- HStringCharFromCode* result = new HStringCharFromCode(char_code);
- ast_context()->ReturnInstruction(result, expr->id());
- return true;
- }
- break;
- case kMathRound:
- case kMathFloor:
- case kMathAbs:
- case kMathSqrt:
- case kMathLog:
- case kMathSin:
- case kMathCos:
- if (argument_count == 2 && check_type == RECEIVER_MAP_CHECK) {
- AddCheckConstantFunction(expr, receiver, receiver_map, true);
- HValue* argument = Pop();
- Drop(1); // Receiver.
- HUnaryMathOperation* op = new HUnaryMathOperation(argument, id);
- op->set_position(expr->position());
- ast_context()->ReturnInstruction(op, expr->id());
- return true;
- }
- break;
- case kMathPow:
- if (argument_count == 3 && check_type == RECEIVER_MAP_CHECK) {
- AddCheckConstantFunction(expr, receiver, receiver_map, true);
- HValue* right = Pop();
- HValue* left = Pop();
- Pop(); // Pop receiver.
- HInstruction* result = NULL;
- // Use sqrt() if exponent is 0.5 or -0.5.
- if (right->IsConstant() && HConstant::cast(right)->HasDoubleValue()) {
- double exponent = HConstant::cast(right)->DoubleValue();
- if (exponent == 0.5) {
- result = new HUnaryMathOperation(left, kMathPowHalf);
- } else if (exponent == -0.5) {
- HConstant* double_one =
- new HConstant(Handle<Object>(Smi::FromInt(1)),
- Representation::Double());
- AddInstruction(double_one);
- HUnaryMathOperation* square_root =
- new HUnaryMathOperation(left, kMathPowHalf);
- AddInstruction(square_root);
- // MathPowHalf doesn't have side effects so there's no need for
- // an environment simulation here.
- ASSERT(!square_root->HasSideEffects());
- result = new HDiv(double_one, square_root);
- } else if (exponent == 2.0) {
- result = new HMul(left, left);
- }
- } else if (right->IsConstant() &&
- HConstant::cast(right)->HasInteger32Value() &&
- HConstant::cast(right)->Integer32Value() == 2) {
- result = new HMul(left, left);
- }
-
- if (result == NULL) {
- result = new HPower(left, right);
- }
- ast_context()->ReturnInstruction(result, expr->id());
- return true;
- }
- break;
- default:
- // Not yet supported for inlining.
- break;
- }
- return false;
-}
-
-
-bool HGraphBuilder::TryCallApply(Call* expr) {
- Expression* callee = expr->expression();
- Property* prop = callee->AsProperty();
- ASSERT(prop != NULL);
-
- if (info()->scope()->arguments() == NULL) return false;
-
- Handle<String> name = prop->key()->AsLiteral()->AsPropertyName();
- if (!name->IsEqualTo(CStrVector("apply"))) return false;
-
- ZoneList<Expression*>* args = expr->arguments();
- if (args->length() != 2) return false;
-
- VariableProxy* arg_two = args->at(1)->AsVariableProxy();
- if (arg_two == NULL || !arg_two->var()->IsStackAllocated()) return false;
- HValue* arg_two_value = environment()->Lookup(arg_two->var());
- if (!arg_two_value->CheckFlag(HValue::kIsArguments)) return false;
-
- if (!expr->IsMonomorphic() ||
- expr->check_type() != RECEIVER_MAP_CHECK) return false;
-
- // Found pattern f.apply(receiver, arguments).
- VisitForValue(prop->obj());
- if (HasStackOverflow()) return false;
- HValue* function = Pop();
- VisitForValue(args->at(0));
- if (HasStackOverflow()) return false;
- HValue* receiver = Pop();
- HInstruction* elements = AddInstruction(new HArgumentsElements);
- HInstruction* length = AddInstruction(new HArgumentsLength(elements));
- AddCheckConstantFunction(expr,
- function,
- expr->GetReceiverTypes()->first(),
- true);
- HInstruction* result =
- new HApplyArguments(function, receiver, length, elements);
- result->set_position(expr->position());
- ast_context()->ReturnInstruction(result, expr->id());
- return true;
-}
-
-
-void HGraphBuilder::VisitCall(Call* expr) {
- Expression* callee = expr->expression();
- int argument_count = expr->arguments()->length() + 1; // Plus receiver.
- HInstruction* call = NULL;
-
- Property* prop = callee->AsProperty();
- if (prop != NULL) {
- if (!prop->key()->IsPropertyName()) {
- // Keyed function call.
- VISIT_FOR_VALUE(prop->obj());
-
- VISIT_FOR_VALUE(prop->key());
- // Push receiver and key like the non-optimized code generator expects it.
- HValue* key = Pop();
- HValue* receiver = Pop();
- Push(key);
- Push(receiver);
-
- VisitExpressions(expr->arguments());
- CHECK_BAILOUT;
-
- HContext* context = new HContext;
- AddInstruction(context);
- call = PreProcessCall(new HCallKeyed(context, key, argument_count));
- call->set_position(expr->position());
- Drop(1); // Key.
- ast_context()->ReturnInstruction(call, expr->id());
- return;
- }
-
- // Named function call.
- expr->RecordTypeFeedback(oracle());
-
- if (TryCallApply(expr)) return;
- CHECK_BAILOUT;
-
- VISIT_FOR_VALUE(prop->obj());
- VisitExpressions(expr->arguments());
- CHECK_BAILOUT;
-
- Handle<String> name = prop->key()->AsLiteral()->AsPropertyName();
-
- expr->RecordTypeFeedback(oracle());
- ZoneMapList* types = expr->GetReceiverTypes();
-
- HValue* receiver =
- environment()->ExpressionStackAt(expr->arguments()->length());
- if (expr->IsMonomorphic()) {
- Handle<Map> receiver_map =
- (types == NULL) ? Handle<Map>::null() : types->first();
- if (TryInlineBuiltinFunction(expr,
- receiver,
- receiver_map,
- expr->check_type())) {
- return;
- }
-
- if (CallStubCompiler::HasCustomCallGenerator(*expr->target()) ||
- expr->check_type() != RECEIVER_MAP_CHECK) {
- // When the target has a custom call IC generator, use the IC,
- // because it is likely to generate better code. Also use the IC
- // when a primitive receiver check is required.
- HContext* context = new HContext;
- AddInstruction(context);
- call = PreProcessCall(new HCallNamed(context, name, argument_count));
- } else {
- AddCheckConstantFunction(expr, receiver, receiver_map, true);
-
- if (TryInline(expr)) {
- return;
- } else {
- // Check for bailout, as the TryInline call in the if condition above
- // might return false due to bailout during hydrogen processing.
- CHECK_BAILOUT;
- call = PreProcessCall(new HCallConstantFunction(expr->target(),
- argument_count));
- }
- }
- } else if (types != NULL && types->length() > 1) {
- ASSERT(expr->check_type() == RECEIVER_MAP_CHECK);
- HandlePolymorphicCallNamed(expr, receiver, types, name);
- return;
-
- } else {
- HContext* context = new HContext;
- AddInstruction(context);
- call = PreProcessCall(new HCallNamed(context, name, argument_count));
- }
-
- } else {
- Variable* var = expr->expression()->AsVariableProxy()->AsVariable();
- bool global_call = (var != NULL) && var->is_global() && !var->is_this();
-
- if (!global_call) {
- ++argument_count;
- VISIT_FOR_VALUE(expr->expression());
- }
-
- if (global_call) {
- bool known_global_function = false;
- // If there is a global property cell for the name at compile time and
- // access check is not enabled we assume that the function will not change
- // and generate optimized code for calling the function.
- LookupResult lookup;
- GlobalPropertyAccess type = LookupGlobalProperty(var, &lookup, false);
- if (type == kUseCell &&
- !info()->global_object()->IsAccessCheckNeeded()) {
- Handle<GlobalObject> global(info()->global_object());
- known_global_function = expr->ComputeGlobalTarget(global, &lookup);
- }
- if (known_global_function) {
- // Push the global object instead of the global receiver because
- // code generated by the full code generator expects it.
- HContext* context = new HContext;
- HGlobalObject* global_object = new HGlobalObject(context);
- AddInstruction(context);
- PushAndAdd(global_object);
- VisitExpressions(expr->arguments());
- CHECK_BAILOUT;
-
- VISIT_FOR_VALUE(expr->expression());
- HValue* function = Pop();
- AddInstruction(new HCheckFunction(function, expr->target()));
-
- // Replace the global object with the global receiver.
- HGlobalReceiver* global_receiver = new HGlobalReceiver(global_object);
- // Index of the receiver from the top of the expression stack.
- const int receiver_index = argument_count - 1;
- AddInstruction(global_receiver);
- ASSERT(environment()->ExpressionStackAt(receiver_index)->
- IsGlobalObject());
- environment()->SetExpressionStackAt(receiver_index, global_receiver);
-
- if (TryInline(expr)) {
- return;
- }
- // Check for bailout, as trying to inline might fail due to bailout
- // during hydrogen processing.
- CHECK_BAILOUT;
-
- call = PreProcessCall(new HCallKnownGlobal(expr->target(),
- argument_count));
- } else {
- HContext* context = new HContext;
- AddInstruction(context);
- PushAndAdd(new HGlobalObject(context));
- VisitExpressions(expr->arguments());
- CHECK_BAILOUT;
-
- call = PreProcessCall(new HCallGlobal(context,
- var->name(),
- argument_count));
- }
-
- } else {
- HContext* context = new HContext;
- HGlobalObject* global_object = new HGlobalObject(context);
- AddInstruction(context);
- AddInstruction(global_object);
- PushAndAdd(new HGlobalReceiver(global_object));
- VisitExpressions(expr->arguments());
- CHECK_BAILOUT;
-
- call = PreProcessCall(new HCallFunction(context, argument_count));
- }
- }
-
- call->set_position(expr->position());
- ast_context()->ReturnInstruction(call, expr->id());
-}
-
-
-void HGraphBuilder::VisitCallNew(CallNew* expr) {
- // The constructor function is also used as the receiver argument to the
- // JS construct call builtin.
- VISIT_FOR_VALUE(expr->expression());
- VisitExpressions(expr->arguments());
- CHECK_BAILOUT;
-
- HContext* context = new HContext;
- AddInstruction(context);
-
- // The constructor is both an operand to the instruction and an argument
- // to the construct call.
- int arg_count = expr->arguments()->length() + 1; // Plus constructor.
- HValue* constructor = environment()->ExpressionStackAt(arg_count - 1);
- HCallNew* call = new HCallNew(context, constructor, arg_count);
- call->set_position(expr->position());
- PreProcessCall(call);
- ast_context()->ReturnInstruction(call, expr->id());
-}
-
-
-// Support for generating inlined runtime functions.
-
-// Lookup table for generators for runtime calls that are generated inline.
-// Elements of the table are member pointers to functions of HGraphBuilder.
-#define INLINE_FUNCTION_GENERATOR_ADDRESS(Name, argc, ressize) \
- &HGraphBuilder::Generate##Name,
-
-const HGraphBuilder::InlineFunctionGenerator
- HGraphBuilder::kInlineFunctionGenerators[] = {
- INLINE_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_ADDRESS)
- INLINE_RUNTIME_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_ADDRESS)
-};
-#undef INLINE_FUNCTION_GENERATOR_ADDRESS
-
-
-void HGraphBuilder::VisitCallRuntime(CallRuntime* expr) {
- if (expr->is_jsruntime()) {
- BAILOUT("call to a JavaScript runtime function");
- }
-
- const Runtime::Function* function = expr->function();
- ASSERT(function != NULL);
- if (function->intrinsic_type == Runtime::INLINE) {
- ASSERT(expr->name()->length() > 0);
- ASSERT(expr->name()->Get(0) == '_');
- // Call to an inline function.
- int lookup_index = static_cast<int>(function->function_id) -
- static_cast<int>(Runtime::kFirstInlineFunction);
- ASSERT(lookup_index >= 0);
- ASSERT(static_cast<size_t>(lookup_index) <
- ARRAY_SIZE(kInlineFunctionGenerators));
- InlineFunctionGenerator generator = kInlineFunctionGenerators[lookup_index];
-
- // Call the inline code generator using the pointer-to-member.
- (this->*generator)(expr);
- } else {
- ASSERT(function->intrinsic_type == Runtime::RUNTIME);
- VisitArgumentList(expr->arguments());
- CHECK_BAILOUT;
-
- Handle<String> name = expr->name();
- int argument_count = expr->arguments()->length();
- HCallRuntime* call = new HCallRuntime(name, function, argument_count);
- call->set_position(RelocInfo::kNoPosition);
- Drop(argument_count);
- ast_context()->ReturnInstruction(call, expr->id());
- }
-}
-
-
-void HGraphBuilder::VisitUnaryOperation(UnaryOperation* expr) {
- Token::Value op = expr->op();
- if (op == Token::VOID) {
- VISIT_FOR_EFFECT(expr->expression());
- ast_context()->ReturnValue(graph()->GetConstantUndefined());
- } else if (op == Token::DELETE) {
- Property* prop = expr->expression()->AsProperty();
- Variable* var = expr->expression()->AsVariableProxy()->AsVariable();
- if (prop == NULL && var == NULL) {
- // Result of deleting non-property, non-variable reference is true.
- // Evaluate the subexpression for side effects.
- VISIT_FOR_EFFECT(expr->expression());
- ast_context()->ReturnValue(graph()->GetConstantTrue());
- } else if (var != NULL &&
- !var->is_global() &&
- var->AsSlot() != NULL &&
- var->AsSlot()->type() != Slot::LOOKUP) {
- // Result of deleting non-global, non-dynamic variables is false.
- // The subexpression does not have side effects.
- ast_context()->ReturnValue(graph()->GetConstantFalse());
- } else if (prop != NULL) {
- if (prop->is_synthetic()) {
- // Result of deleting parameters is false, even when they rewrite
- // to accesses on the arguments object.
- ast_context()->ReturnValue(graph()->GetConstantFalse());
- } else {
- VISIT_FOR_VALUE(prop->obj());
- VISIT_FOR_VALUE(prop->key());
- HValue* key = Pop();
- HValue* obj = Pop();
- HDeleteProperty* instr = new HDeleteProperty(obj, key);
- ast_context()->ReturnInstruction(instr, expr->id());
- }
- } else if (var->is_global()) {
- BAILOUT("delete with global variable");
- } else {
- BAILOUT("delete with non-global variable");
- }
- } else if (op == Token::NOT) {
- if (ast_context()->IsTest()) {
- TestContext* context = TestContext::cast(ast_context());
- VisitForControl(expr->expression(),
- context->if_false(),
- context->if_true());
- } else if (ast_context()->IsValue()) {
- HBasicBlock* materialize_false = graph()->CreateBasicBlock();
- HBasicBlock* materialize_true = graph()->CreateBasicBlock();
- VISIT_FOR_CONTROL(expr->expression(),
- materialize_false,
- materialize_true);
- materialize_false->SetJoinId(expr->expression()->id());
- materialize_true->SetJoinId(expr->expression()->id());
-
- set_current_block(materialize_false);
- Push(graph()->GetConstantFalse());
- set_current_block(materialize_true);
- Push(graph()->GetConstantTrue());
-
- HBasicBlock* join =
- CreateJoin(materialize_false, materialize_true, expr->id());
- set_current_block(join);
- ast_context()->ReturnValue(Pop());
- } else {
- ASSERT(ast_context()->IsEffect());
- VisitForEffect(expr->expression());
- }
-
- } else if (op == Token::TYPEOF) {
- VisitForTypeOf(expr->expression());
- if (HasStackOverflow()) return;
- HValue* value = Pop();
- ast_context()->ReturnInstruction(new HTypeof(value), expr->id());
-
- } else {
- VISIT_FOR_VALUE(expr->expression());
- HValue* value = Pop();
- HInstruction* instr = NULL;
- switch (op) {
- case Token::BIT_NOT:
- instr = new HBitNot(value);
- break;
- case Token::SUB:
- instr = new HMul(value, graph_->GetConstantMinus1());
- break;
- case Token::ADD:
- instr = new HMul(value, graph_->GetConstant1());
- break;
- default:
- BAILOUT("Value: unsupported unary operation");
- break;
- }
- ast_context()->ReturnInstruction(instr, expr->id());
- }
-}
-
-
-void HGraphBuilder::VisitIncrementOperation(IncrementOperation* expr) {
- // IncrementOperation is never visited by the visitor. It only
- // occurs as a subexpression of CountOperation.
- UNREACHABLE();
-}
-
-
-HInstruction* HGraphBuilder::BuildIncrement(HValue* value, bool increment) {
- HConstant* delta = increment
- ? graph_->GetConstant1()
- : graph_->GetConstantMinus1();
- HInstruction* instr = new HAdd(value, delta);
- AssumeRepresentation(instr, Representation::Integer32());
- return instr;
-}
-
-
-void HGraphBuilder::VisitCountOperation(CountOperation* expr) {
- IncrementOperation* increment = expr->increment();
- Expression* target = increment->expression();
- VariableProxy* proxy = target->AsVariableProxy();
- Variable* var = proxy->AsVariable();
- Property* prop = target->AsProperty();
- ASSERT(var == NULL || prop == NULL);
- bool inc = expr->op() == Token::INC;
-
- if (var != NULL) {
- VISIT_FOR_VALUE(target);
-
- // Match the full code generator stack by simulating an extra stack
- // element for postfix operations in a non-effect context.
- bool has_extra = expr->is_postfix() && !ast_context()->IsEffect();
- HValue* before = has_extra ? Top() : Pop();
- HInstruction* after = BuildIncrement(before, inc);
- AddInstruction(after);
- Push(after);
-
- if (var->is_global()) {
- HandleGlobalVariableAssignment(var,
- after,
- expr->position(),
- expr->AssignmentId());
- } else if (var->IsStackAllocated()) {
- Bind(var, after);
- } else if (var->IsContextSlot()) {
- HValue* context = BuildContextChainWalk(var);
- int index = var->AsSlot()->index();
- HStoreContextSlot* instr = new HStoreContextSlot(context, index, after);
- AddInstruction(instr);
- if (instr->HasSideEffects()) AddSimulate(expr->AssignmentId());
- } else {
- BAILOUT("lookup variable in count operation");
- }
- Drop(has_extra ? 2 : 1);
- ast_context()->ReturnValue(expr->is_postfix() ? before : after);
-
- } else if (prop != NULL) {
- prop->RecordTypeFeedback(oracle());
-
- if (prop->key()->IsPropertyName()) {
- // Named property.
-
- // Match the full code generator stack by simulating an extra stack
- // element for postfix operations in a non-effect context.
- bool has_extra = expr->is_postfix() && !ast_context()->IsEffect();
- if (has_extra) Push(graph_->GetConstantUndefined());
-
- VISIT_FOR_VALUE(prop->obj());
- HValue* obj = Top();
-
- HInstruction* load = NULL;
- if (prop->IsMonomorphic()) {
- Handle<String> name = prop->key()->AsLiteral()->AsPropertyName();
- Handle<Map> map = prop->GetReceiverTypes()->first();
- load = BuildLoadNamed(obj, prop, map, name);
- } else {
- load = BuildLoadNamedGeneric(obj, prop);
- }
- PushAndAdd(load);
- if (load->HasSideEffects()) AddSimulate(increment->id());
-
- HValue* before = Pop();
- // There is no deoptimization to after the increment, so we don't need
- // to simulate the expression stack after this instruction.
- HInstruction* after = BuildIncrement(before, inc);
- AddInstruction(after);
-
- HInstruction* store = BuildStoreNamed(obj, after, prop);
- AddInstruction(store);
-
- // Overwrite the receiver in the bailout environment with the result
- // of the operation, and the placeholder with the original value if
- // necessary.
- environment()->SetExpressionStackAt(0, after);
- if (has_extra) environment()->SetExpressionStackAt(1, before);
- if (store->HasSideEffects()) AddSimulate(expr->AssignmentId());
- Drop(has_extra ? 2 : 1);
-
- ast_context()->ReturnValue(expr->is_postfix() ? before : after);
-
- } else {
- // Keyed property.
-
- // Match the full code generator stack by simulate an extra stack element
- // for postfix operations in a non-effect context.
- bool has_extra = expr->is_postfix() && !ast_context()->IsEffect();
- if (has_extra) Push(graph_->GetConstantUndefined());
-
- VISIT_FOR_VALUE(prop->obj());
- VISIT_FOR_VALUE(prop->key());
- HValue* obj = environment()->ExpressionStackAt(1);
- HValue* key = environment()->ExpressionStackAt(0);
-
- bool is_fast_elements = prop->IsMonomorphic() &&
- prop->GetMonomorphicReceiverType()->has_fast_elements();
-
- HInstruction* load = is_fast_elements
- ? BuildLoadKeyedFastElement(obj, key, prop)
- : BuildLoadKeyedGeneric(obj, key);
- PushAndAdd(load);
- if (load->HasSideEffects()) AddSimulate(increment->id());
-
- HValue* before = Pop();
- // There is no deoptimization to after the increment, so we don't need
- // to simulate the expression stack after this instruction.
- HInstruction* after = BuildIncrement(before, inc);
- AddInstruction(after);
-
- HInstruction* store = is_fast_elements
- ? BuildStoreKeyedFastElement(obj, key, after, prop)
- : BuildStoreKeyedGeneric(obj, key, after);
- AddInstruction(store);
-
- // Drop the key from the bailout environment. Overwrite the receiver
- // with the result of the operation, and the placeholder with the
- // original value if necessary.
- Drop(1);
- environment()->SetExpressionStackAt(0, after);
- if (has_extra) environment()->SetExpressionStackAt(1, before);
- if (store->HasSideEffects()) AddSimulate(expr->AssignmentId());
- Drop(has_extra ? 2 : 1);
-
- ast_context()->ReturnValue(expr->is_postfix() ? before : after);
- }
-
- } else {
- BAILOUT("invalid lhs in count operation");
- }
-}
-
-
-HStringCharCodeAt* HGraphBuilder::BuildStringCharCodeAt(HValue* string,
- HValue* index) {
- AddInstruction(new HCheckNonSmi(string));
- AddInstruction(new HCheckInstanceType(
- string, FIRST_STRING_TYPE, LAST_STRING_TYPE));
- HStringLength* length = new HStringLength(string);
- AddInstruction(length);
- AddInstruction(new HBoundsCheck(index, length));
- return new HStringCharCodeAt(string, index);
-}
-
-
-HInstruction* HGraphBuilder::BuildBinaryOperation(BinaryOperation* expr,
- HValue* left,
- HValue* right) {
- HInstruction* instr = NULL;
- switch (expr->op()) {
- case Token::ADD:
- instr = new HAdd(left, right);
- break;
- case Token::SUB:
- instr = new HSub(left, right);
- break;
- case Token::MUL:
- instr = new HMul(left, right);
- break;
- case Token::MOD:
- instr = new HMod(left, right);
- break;
- case Token::DIV:
- instr = new HDiv(left, right);
- break;
- case Token::BIT_XOR:
- instr = new HBitXor(left, right);
- break;
- case Token::BIT_AND:
- instr = new HBitAnd(left, right);
- break;
- case Token::BIT_OR:
- instr = new HBitOr(left, right);
- break;
- case Token::SAR:
- instr = new HSar(left, right);
- break;
- case Token::SHR:
- instr = new HShr(left, right);
- break;
- case Token::SHL:
- instr = new HShl(left, right);
- break;
- default:
- UNREACHABLE();
- }
- TypeInfo info = oracle()->BinaryType(expr);
- // If we hit an uninitialized binary op stub we will get type info
- // for a smi operation. If one of the operands is a constant string
- // do not generate code assuming it is a smi operation.
- if (info.IsSmi() &&
- ((left->IsConstant() && HConstant::cast(left)->HasStringValue()) ||
- (right->IsConstant() && HConstant::cast(right)->HasStringValue()))) {
- return instr;
- }
- if (FLAG_trace_representation) {
- PrintF("Info: %s/%s\n", info.ToString(), ToRepresentation(info).Mnemonic());
- }
- Representation rep = ToRepresentation(info);
- // We only generate either int32 or generic tagged bitwise operations.
- if (instr->IsBitwiseBinaryOperation() && rep.IsDouble()) {
- rep = Representation::Integer32();
- }
- AssumeRepresentation(instr, rep);
- return instr;
-}
-
-
-// Check for the form (%_ClassOf(foo) === 'BarClass').
-static bool IsClassOfTest(CompareOperation* expr) {
- if (expr->op() != Token::EQ_STRICT) return false;
- CallRuntime* call = expr->left()->AsCallRuntime();
- if (call == NULL) return false;
- Literal* literal = expr->right()->AsLiteral();
- if (literal == NULL) return false;
- if (!literal->handle()->IsString()) return false;
- if (!call->name()->IsEqualTo(CStrVector("_ClassOf"))) return false;
- ASSERT(call->arguments()->length() == 1);
- return true;
-}
-
-
-void HGraphBuilder::VisitBinaryOperation(BinaryOperation* expr) {
- if (expr->op() == Token::COMMA) {
- VISIT_FOR_EFFECT(expr->left());
- // Visit the right subexpression in the same AST context as the entire
- // expression.
- Visit(expr->right());
-
- } else if (expr->op() == Token::AND || expr->op() == Token::OR) {
- bool is_logical_and = (expr->op() == Token::AND);
- if (ast_context()->IsTest()) {
- TestContext* context = TestContext::cast(ast_context());
- // Translate left subexpression.
- HBasicBlock* eval_right = graph()->CreateBasicBlock();
- if (is_logical_and) {
- VISIT_FOR_CONTROL(expr->left(), eval_right, context->if_false());
- } else {
- VISIT_FOR_CONTROL(expr->left(), context->if_true(), eval_right);
- }
- eval_right->SetJoinId(expr->RightId());
-
- // Translate right subexpression by visiting it in the same AST
- // context as the entire expression.
- set_current_block(eval_right);
- Visit(expr->right());
-
- } else if (ast_context()->IsValue()) {
- VISIT_FOR_VALUE(expr->left());
- ASSERT(current_block() != NULL);
-
- // We need an extra block to maintain edge-split form.
- HBasicBlock* empty_block = graph()->CreateBasicBlock();
- HBasicBlock* eval_right = graph()->CreateBasicBlock();
- HTest* test = is_logical_and
- ? new HTest(Top(), eval_right, empty_block)
- : new HTest(Top(), empty_block, eval_right);
- current_block()->Finish(test);
-
- set_current_block(eval_right);
- Drop(1); // Value of the left subexpression.
- VISIT_FOR_VALUE(expr->right());
-
- HBasicBlock* join_block =
- CreateJoin(empty_block, current_block(), expr->id());
- set_current_block(join_block);
- ast_context()->ReturnValue(Pop());
-
- } else {
- ASSERT(ast_context()->IsEffect());
- // In an effect context, we don't need the value of the left
- // subexpression, only its control flow and side effects. We need an
- // extra block to maintain edge-split form.
- HBasicBlock* empty_block = graph()->CreateBasicBlock();
- HBasicBlock* right_block = graph()->CreateBasicBlock();
- HBasicBlock* join_block = graph()->CreateBasicBlock();
- if (is_logical_and) {
- VISIT_FOR_CONTROL(expr->left(), right_block, empty_block);
- } else {
- VISIT_FOR_CONTROL(expr->left(), empty_block, right_block);
- }
- // TODO(kmillikin): Find a way to fix this. It's ugly that there are
- // actually two empty blocks (one here and one inserted by
- // TestContext::BuildBranch, and that they both have an HSimulate
- // though the second one is not a merge node, and that we really have
- // no good AST ID to put on that first HSimulate.
- empty_block->SetJoinId(expr->id());
- right_block->SetJoinId(expr->RightId());
- set_current_block(right_block);
- VISIT_FOR_EFFECT(expr->right());
-
- empty_block->Goto(join_block);
- current_block()->Goto(join_block);
- join_block->SetJoinId(expr->id());
- set_current_block(join_block);
- // We did not materialize any value in the predecessor environments,
- // so there is no need to handle it here.
- }
-
- } else {
- VISIT_FOR_VALUE(expr->left());
- VISIT_FOR_VALUE(expr->right());
-
- HValue* right = Pop();
- HValue* left = Pop();
- HInstruction* instr = BuildBinaryOperation(expr, left, right);
- instr->set_position(expr->position());
- ast_context()->ReturnInstruction(instr, expr->id());
- }
-}
-
-
-void HGraphBuilder::AssumeRepresentation(HValue* value, Representation r) {
- if (value->CheckFlag(HValue::kFlexibleRepresentation)) {
- if (FLAG_trace_representation) {
- PrintF("Assume representation for %s to be %s (%d)\n",
- value->Mnemonic(),
- r.Mnemonic(),
- graph_->GetMaximumValueID());
- }
- value->ChangeRepresentation(r);
- // The representation of the value is dictated by type feedback and
- // will not be changed later.
- value->ClearFlag(HValue::kFlexibleRepresentation);
- } else if (FLAG_trace_representation) {
- PrintF("No representation assumed\n");
- }
-}
-
-
-Representation HGraphBuilder::ToRepresentation(TypeInfo info) {
- if (info.IsSmi()) return Representation::Integer32();
- if (info.IsInteger32()) return Representation::Integer32();
- if (info.IsDouble()) return Representation::Double();
- if (info.IsNumber()) return Representation::Double();
- return Representation::Tagged();
-}
-
-
-void HGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
- if (IsClassOfTest(expr)) {
- CallRuntime* call = expr->left()->AsCallRuntime();
- VISIT_FOR_VALUE(call->arguments()->at(0));
- HValue* value = Pop();
- Literal* literal = expr->right()->AsLiteral();
- Handle<String> rhs = Handle<String>::cast(literal->handle());
- HInstruction* instr = new HClassOfTest(value, rhs);
- instr->set_position(expr->position());
- ast_context()->ReturnInstruction(instr, expr->id());
- return;
- }
-
- // Check for the pattern: typeof <expression> == <string literal>.
- UnaryOperation* left_unary = expr->left()->AsUnaryOperation();
- Literal* right_literal = expr->right()->AsLiteral();
- if ((expr->op() == Token::EQ || expr->op() == Token::EQ_STRICT) &&
- left_unary != NULL && left_unary->op() == Token::TYPEOF &&
- right_literal != NULL && right_literal->handle()->IsString()) {
- VisitForTypeOf(left_unary->expression());
- if (HasStackOverflow()) return;
- HValue* left = Pop();
- HInstruction* instr = new HTypeofIs(left,
- Handle<String>::cast(right_literal->handle()));
- instr->set_position(expr->position());
- ast_context()->ReturnInstruction(instr, expr->id());
- return;
- }
-
- VISIT_FOR_VALUE(expr->left());
- VISIT_FOR_VALUE(expr->right());
-
- HValue* right = Pop();
- HValue* left = Pop();
- Token::Value op = expr->op();
-
- TypeInfo type_info = oracle()->CompareType(expr);
- HInstruction* instr = NULL;
- if (op == Token::INSTANCEOF) {
- // Check to see if the rhs of the instanceof is a global function not
- // residing in new space. If it is we assume that the function will stay the
- // same.
- Handle<JSFunction> target = Handle<JSFunction>::null();
- Variable* var = expr->right()->AsVariableProxy()->AsVariable();
- bool global_function = (var != NULL) && var->is_global() && !var->is_this();
- if (global_function &&
- info()->has_global_object() &&
- !info()->global_object()->IsAccessCheckNeeded()) {
- Handle<String> name = var->name();
- Handle<GlobalObject> global(info()->global_object());
- LookupResult lookup;
- global->Lookup(*name, &lookup);
- if (lookup.IsProperty() &&
- lookup.type() == NORMAL &&
- lookup.GetValue()->IsJSFunction()) {
- Handle<JSFunction> candidate(JSFunction::cast(lookup.GetValue()));
- // If the function is in new space we assume it's more likely to
- // change and thus prefer the general IC code.
- if (!isolate()->heap()->InNewSpace(*candidate)) {
- target = candidate;
- }
- }
- }
-
- // If the target is not null we have found a known global function that is
- // assumed to stay the same for this instanceof.
- if (target.is_null()) {
- HContext* context = new HContext;
- AddInstruction(context);
- instr = new HInstanceOf(context, left, right);
- } else {
- AddInstruction(new HCheckFunction(right, target));
- instr = new HInstanceOfKnownGlobal(left, target);
- }
- } else if (op == Token::IN) {
- BAILOUT("Unsupported comparison: in");
- } else if (type_info.IsNonPrimitive()) {
- switch (op) {
- case Token::EQ:
- case Token::EQ_STRICT: {
- AddInstruction(new HCheckNonSmi(left));
- AddInstruction(HCheckInstanceType::NewIsJSObjectOrJSFunction(left));
- AddInstruction(new HCheckNonSmi(right));
- AddInstruction(HCheckInstanceType::NewIsJSObjectOrJSFunction(right));
- instr = new HCompareJSObjectEq(left, right);
- break;
- }
- default:
- BAILOUT("Unsupported non-primitive compare");
- break;
- }
- } else {
- HCompare* compare = new HCompare(left, right, op);
- Representation r = ToRepresentation(type_info);
- compare->SetInputRepresentation(r);
- instr = compare;
- }
- instr->set_position(expr->position());
- ast_context()->ReturnInstruction(instr, expr->id());
-}
-
-
-void HGraphBuilder::VisitCompareToNull(CompareToNull* expr) {
- VISIT_FOR_VALUE(expr->expression());
-
- HValue* value = Pop();
- HIsNull* compare = new HIsNull(value, expr->is_strict());
- ast_context()->ReturnInstruction(compare, expr->id());
-}
-
-
-void HGraphBuilder::VisitThisFunction(ThisFunction* expr) {
- BAILOUT("ThisFunction");
-}
-
-
-void HGraphBuilder::VisitDeclaration(Declaration* decl) {
- // We allow only declarations that do not require code generation.
- // The following all require code generation: global variables and
- // functions, variables with slot type LOOKUP, declarations with
- // mode CONST, and functions.
- Variable* var = decl->proxy()->var();
- Slot* slot = var->AsSlot();
- if (var->is_global() ||
- (slot != NULL && slot->type() == Slot::LOOKUP) ||
- decl->mode() == Variable::CONST ||
- decl->fun() != NULL) {
- BAILOUT("unsupported declaration");
- }
-}
-
-
-// Generators for inline runtime functions.
-// Support for types.
-void HGraphBuilder::GenerateIsSmi(CallRuntime* call) {
- ASSERT(call->arguments()->length() == 1);
- VISIT_FOR_VALUE(call->arguments()->at(0));
- HValue* value = Pop();
- HIsSmi* result = new HIsSmi(value);
- ast_context()->ReturnInstruction(result, call->id());
-}
-
-
-void HGraphBuilder::GenerateIsSpecObject(CallRuntime* call) {
- ASSERT(call->arguments()->length() == 1);
- VISIT_FOR_VALUE(call->arguments()->at(0));
- HValue* value = Pop();
- HHasInstanceType* result =
- new HHasInstanceType(value, FIRST_JS_OBJECT_TYPE, LAST_TYPE);
- ast_context()->ReturnInstruction(result, call->id());
-}
-
-
-void HGraphBuilder::GenerateIsFunction(CallRuntime* call) {
- ASSERT(call->arguments()->length() == 1);
- VISIT_FOR_VALUE(call->arguments()->at(0));
- HValue* value = Pop();
- HHasInstanceType* result = new HHasInstanceType(value, JS_FUNCTION_TYPE);
- ast_context()->ReturnInstruction(result, call->id());
-}
-
-
-void HGraphBuilder::GenerateHasCachedArrayIndex(CallRuntime* call) {
- ASSERT(call->arguments()->length() == 1);
- VISIT_FOR_VALUE(call->arguments()->at(0));
- HValue* value = Pop();
- HHasCachedArrayIndex* result = new HHasCachedArrayIndex(value);
- ast_context()->ReturnInstruction(result, call->id());
-}
-
-
-void HGraphBuilder::GenerateIsArray(CallRuntime* call) {
- ASSERT(call->arguments()->length() == 1);
- VISIT_FOR_VALUE(call->arguments()->at(0));
- HValue* value = Pop();
- HHasInstanceType* result = new HHasInstanceType(value, JS_ARRAY_TYPE);
- ast_context()->ReturnInstruction(result, call->id());
-}
-
-
-void HGraphBuilder::GenerateIsRegExp(CallRuntime* call) {
- ASSERT(call->arguments()->length() == 1);
- VISIT_FOR_VALUE(call->arguments()->at(0));
- HValue* value = Pop();
- HHasInstanceType* result = new HHasInstanceType(value, JS_REGEXP_TYPE);
- ast_context()->ReturnInstruction(result, call->id());
-}
-
-
-void HGraphBuilder::GenerateIsObject(CallRuntime* call) {
- ASSERT(call->arguments()->length() == 1);
- VISIT_FOR_VALUE(call->arguments()->at(0));
- HValue* value = Pop();
- HIsObject* test = new HIsObject(value);
- ast_context()->ReturnInstruction(test, call->id());
-}
-
-
-void HGraphBuilder::GenerateIsNonNegativeSmi(CallRuntime* call) {
- BAILOUT("inlined runtime function: IsNonNegativeSmi");
-}
-
-
-void HGraphBuilder::GenerateIsUndetectableObject(CallRuntime* call) {
- BAILOUT("inlined runtime function: IsUndetectableObject");
-}
-
-
-void HGraphBuilder::GenerateIsStringWrapperSafeForDefaultValueOf(
- CallRuntime* call) {
- BAILOUT("inlined runtime function: IsStringWrapperSafeForDefaultValueOf");
-}
-
-
-// Support for construct call checks.
-void HGraphBuilder::GenerateIsConstructCall(CallRuntime* call) {
- ASSERT(call->arguments()->length() == 0);
- if (function_state()->outer() != NULL) {
- // We are generating graph for inlined function. Currently
- // constructor inlining is not supported and we can just return
- // false from %_IsConstructCall().
- ast_context()->ReturnValue(graph()->GetConstantFalse());
- } else {
- ast_context()->ReturnInstruction(new HIsConstructCall, call->id());
- }
-}
-
-
-// Support for arguments.length and arguments[?].
-void HGraphBuilder::GenerateArgumentsLength(CallRuntime* call) {
- ASSERT(call->arguments()->length() == 0);
- HInstruction* elements = AddInstruction(new HArgumentsElements);
- HArgumentsLength* result = new HArgumentsLength(elements);
- ast_context()->ReturnInstruction(result, call->id());
-}
-
-
-void HGraphBuilder::GenerateArguments(CallRuntime* call) {
- ASSERT(call->arguments()->length() == 1);
- VISIT_FOR_VALUE(call->arguments()->at(0));
- HValue* index = Pop();
- HInstruction* elements = AddInstruction(new HArgumentsElements);
- HInstruction* length = AddInstruction(new HArgumentsLength(elements));
- HAccessArgumentsAt* result = new HAccessArgumentsAt(elements, length, index);
- ast_context()->ReturnInstruction(result, call->id());
-}
-
-
-// Support for accessing the class and value fields of an object.
-void HGraphBuilder::GenerateClassOf(CallRuntime* call) {
- // The special form detected by IsClassOfTest is detected before we get here
- // and does not cause a bailout.
- BAILOUT("inlined runtime function: ClassOf");
-}
-
-
-void HGraphBuilder::GenerateValueOf(CallRuntime* call) {
- ASSERT(call->arguments()->length() == 1);
- VISIT_FOR_VALUE(call->arguments()->at(0));
- HValue* value = Pop();
- HValueOf* result = new HValueOf(value);
- ast_context()->ReturnInstruction(result, call->id());
-}
-
-
-void HGraphBuilder::GenerateSetValueOf(CallRuntime* call) {
- BAILOUT("inlined runtime function: SetValueOf");
-}
-
-
-// Fast support for charCodeAt(n).
-void HGraphBuilder::GenerateStringCharCodeAt(CallRuntime* call) {
- ASSERT(call->arguments()->length() == 2);
- VISIT_FOR_VALUE(call->arguments()->at(0));
- VISIT_FOR_VALUE(call->arguments()->at(1));
- HValue* index = Pop();
- HValue* string = Pop();
- HStringCharCodeAt* result = BuildStringCharCodeAt(string, index);
- ast_context()->ReturnInstruction(result, call->id());
-}
-
-
-// Fast support for string.charAt(n) and string[n].
-void HGraphBuilder::GenerateStringCharFromCode(CallRuntime* call) {
- ASSERT(call->arguments()->length() == 1);
- VISIT_FOR_VALUE(call->arguments()->at(0));
- HValue* char_code = Pop();
- HStringCharFromCode* result = new HStringCharFromCode(char_code);
- ast_context()->ReturnInstruction(result, call->id());
-}
-
-
-// Fast support for string.charAt(n) and string[n].
-void HGraphBuilder::GenerateStringCharAt(CallRuntime* call) {
- ASSERT(call->arguments()->length() == 2);
- VISIT_FOR_VALUE(call->arguments()->at(0));
- VISIT_FOR_VALUE(call->arguments()->at(1));
- HValue* index = Pop();
- HValue* string = Pop();
- HStringCharCodeAt* char_code = BuildStringCharCodeAt(string, index);
- AddInstruction(char_code);
- HStringCharFromCode* result = new HStringCharFromCode(char_code);
- ast_context()->ReturnInstruction(result, call->id());
-}
-
-
-// Fast support for object equality testing.
-void HGraphBuilder::GenerateObjectEquals(CallRuntime* call) {
- ASSERT(call->arguments()->length() == 2);
- VISIT_FOR_VALUE(call->arguments()->at(0));
- VISIT_FOR_VALUE(call->arguments()->at(1));
- HValue* right = Pop();
- HValue* left = Pop();
- HCompareJSObjectEq* result = new HCompareJSObjectEq(left, right);
- ast_context()->ReturnInstruction(result, call->id());
-}
-
-
-void HGraphBuilder::GenerateLog(CallRuntime* call) {
- // %_Log is ignored in optimized code.
- ast_context()->ReturnValue(graph()->GetConstantUndefined());
-}
-
-
-// Fast support for Math.random().
-void HGraphBuilder::GenerateRandomHeapNumber(CallRuntime* call) {
- BAILOUT("inlined runtime function: RandomHeapNumber");
-}
-
-
-// Fast support for StringAdd.
-void HGraphBuilder::GenerateStringAdd(CallRuntime* call) {
- ASSERT_EQ(2, call->arguments()->length());
- VisitArgumentList(call->arguments());
- CHECK_BAILOUT;
- HContext* context = new HContext;
- AddInstruction(context);
- HCallStub* result = new HCallStub(context, CodeStub::StringAdd, 2);
- Drop(2);
- ast_context()->ReturnInstruction(result, call->id());
-}
-
-
-// Fast support for SubString.
-void HGraphBuilder::GenerateSubString(CallRuntime* call) {
- ASSERT_EQ(3, call->arguments()->length());
- VisitArgumentList(call->arguments());
- CHECK_BAILOUT;
- HContext* context = new HContext;
- AddInstruction(context);
- HCallStub* result = new HCallStub(context, CodeStub::SubString, 3);
- Drop(3);
- ast_context()->ReturnInstruction(result, call->id());
-}
-
-
-// Fast support for StringCompare.
-void HGraphBuilder::GenerateStringCompare(CallRuntime* call) {
- ASSERT_EQ(2, call->arguments()->length());
- VisitArgumentList(call->arguments());
- CHECK_BAILOUT;
- HContext* context = new HContext;
- AddInstruction(context);
- HCallStub* result = new HCallStub(context, CodeStub::StringCompare, 2);
- Drop(2);
- ast_context()->ReturnInstruction(result, call->id());
-}
-
-
-// Support for direct calls from JavaScript to native RegExp code.
-void HGraphBuilder::GenerateRegExpExec(CallRuntime* call) {
- ASSERT_EQ(4, call->arguments()->length());
- VisitArgumentList(call->arguments());
- CHECK_BAILOUT;
- HContext* context = new HContext;
- AddInstruction(context);
- HCallStub* result = new HCallStub(context, CodeStub::RegExpExec, 4);
- Drop(4);
- ast_context()->ReturnInstruction(result, call->id());
-}
-
-
-// Construct a RegExp exec result with two in-object properties.
-void HGraphBuilder::GenerateRegExpConstructResult(CallRuntime* call) {
- ASSERT_EQ(3, call->arguments()->length());
- VisitArgumentList(call->arguments());
- CHECK_BAILOUT;
- HContext* context = new HContext;
- AddInstruction(context);
- HCallStub* result =
- new HCallStub(context, CodeStub::RegExpConstructResult, 3);
- Drop(3);
- ast_context()->ReturnInstruction(result, call->id());
-}
-
-
-// Support for fast native caches.
-void HGraphBuilder::GenerateGetFromCache(CallRuntime* call) {
- BAILOUT("inlined runtime function: GetFromCache");
-}
-
-
-// Fast support for number to string.
-void HGraphBuilder::GenerateNumberToString(CallRuntime* call) {
- ASSERT_EQ(1, call->arguments()->length());
- VisitArgumentList(call->arguments());
- CHECK_BAILOUT;
- HContext* context = new HContext;
- AddInstruction(context);
- HCallStub* result = new HCallStub(context, CodeStub::NumberToString, 1);
- Drop(1);
- ast_context()->ReturnInstruction(result, call->id());
-}
-
-
-// Fast swapping of elements. Takes three expressions, the object and two
-// indices. This should only be used if the indices are known to be
-// non-negative and within bounds of the elements array at the call site.
-void HGraphBuilder::GenerateSwapElements(CallRuntime* call) {
- BAILOUT("inlined runtime function: SwapElements");
-}
-
-
-// Fast call for custom callbacks.
-void HGraphBuilder::GenerateCallFunction(CallRuntime* call) {
- BAILOUT("inlined runtime function: CallFunction");
-}
-
-
-// Fast call to math functions.
-void HGraphBuilder::GenerateMathPow(CallRuntime* call) {
- ASSERT_EQ(2, call->arguments()->length());
- VISIT_FOR_VALUE(call->arguments()->at(0));
- VISIT_FOR_VALUE(call->arguments()->at(1));
- HValue* right = Pop();
- HValue* left = Pop();
- HPower* result = new HPower(left, right);
- ast_context()->ReturnInstruction(result, call->id());
-}
-
-
-void HGraphBuilder::GenerateMathSin(CallRuntime* call) {
- ASSERT_EQ(1, call->arguments()->length());
- VisitArgumentList(call->arguments());
- CHECK_BAILOUT;
- HContext* context = new HContext;
- AddInstruction(context);
- HCallStub* result = new HCallStub(context, CodeStub::TranscendentalCache, 1);
- result->set_transcendental_type(TranscendentalCache::SIN);
- Drop(1);
- ast_context()->ReturnInstruction(result, call->id());
-}
-
-
-void HGraphBuilder::GenerateMathCos(CallRuntime* call) {
- ASSERT_EQ(1, call->arguments()->length());
- VisitArgumentList(call->arguments());
- CHECK_BAILOUT;
- HContext* context = new HContext;
- AddInstruction(context);
- HCallStub* result = new HCallStub(context, CodeStub::TranscendentalCache, 1);
- result->set_transcendental_type(TranscendentalCache::COS);
- Drop(1);
- ast_context()->ReturnInstruction(result, call->id());
-}
-
-
-void HGraphBuilder::GenerateMathLog(CallRuntime* call) {
- ASSERT_EQ(1, call->arguments()->length());
- VisitArgumentList(call->arguments());
- CHECK_BAILOUT;
- HContext* context = new HContext;
- AddInstruction(context);
- HCallStub* result = new HCallStub(context, CodeStub::TranscendentalCache, 1);
- result->set_transcendental_type(TranscendentalCache::LOG);
- Drop(1);
- ast_context()->ReturnInstruction(result, call->id());
-}
-
-
-void HGraphBuilder::GenerateMathSqrt(CallRuntime* call) {
- BAILOUT("inlined runtime function: MathSqrt");
-}
-
-
-// Check whether two RegExps are equivalent
-void HGraphBuilder::GenerateIsRegExpEquivalent(CallRuntime* call) {
- BAILOUT("inlined runtime function: IsRegExpEquivalent");
-}
-
-
-void HGraphBuilder::GenerateGetCachedArrayIndex(CallRuntime* call) {
- ASSERT(call->arguments()->length() == 1);
- VISIT_FOR_VALUE(call->arguments()->at(0));
- HValue* value = Pop();
- HGetCachedArrayIndex* result = new HGetCachedArrayIndex(value);
- ast_context()->ReturnInstruction(result, call->id());
-}
-
-
-void HGraphBuilder::GenerateFastAsciiArrayJoin(CallRuntime* call) {
- BAILOUT("inlined runtime function: FastAsciiArrayJoin");
-}
-
-
-#undef BAILOUT
-#undef CHECK_BAILOUT
-#undef VISIT_FOR_EFFECT
-#undef VISIT_FOR_VALUE
-#undef ADD_TO_SUBGRAPH
-
-
-HEnvironment::HEnvironment(HEnvironment* outer,
- Scope* scope,
- Handle<JSFunction> closure)
- : closure_(closure),
- values_(0),
- assigned_variables_(4),
- parameter_count_(0),
- local_count_(0),
- outer_(outer),
- pop_count_(0),
- push_count_(0),
- ast_id_(AstNode::kNoNumber) {
- Initialize(scope->num_parameters() + 1, scope->num_stack_slots(), 0);
-}
-
-
-HEnvironment::HEnvironment(const HEnvironment* other)
- : values_(0),
- assigned_variables_(0),
- parameter_count_(0),
- local_count_(0),
- outer_(NULL),
- pop_count_(0),
- push_count_(0),
- ast_id_(other->ast_id()) {
- Initialize(other);
-}
-
-
-void HEnvironment::Initialize(int parameter_count,
- int local_count,
- int stack_height) {
- parameter_count_ = parameter_count;
- local_count_ = local_count;
-
- // Avoid reallocating the temporaries' backing store on the first Push.
- int total = parameter_count + local_count + stack_height;
- values_.Initialize(total + 4);
- for (int i = 0; i < total; ++i) values_.Add(NULL);
-}
-
-
-void HEnvironment::Initialize(const HEnvironment* other) {
- closure_ = other->closure();
- values_.AddAll(other->values_);
- assigned_variables_.AddAll(other->assigned_variables_);
- parameter_count_ = other->parameter_count_;
- local_count_ = other->local_count_;
- if (other->outer_ != NULL) outer_ = other->outer_->Copy(); // Deep copy.
- pop_count_ = other->pop_count_;
- push_count_ = other->push_count_;
- ast_id_ = other->ast_id_;
-}
-
-
-void HEnvironment::AddIncomingEdge(HBasicBlock* block, HEnvironment* other) {
- ASSERT(!block->IsLoopHeader());
- ASSERT(values_.length() == other->values_.length());
-
- int length = values_.length();
- for (int i = 0; i < length; ++i) {
- HValue* value = values_[i];
- if (value != NULL && value->IsPhi() && value->block() == block) {
- // There is already a phi for the i'th value.
- HPhi* phi = HPhi::cast(value);
- // Assert index is correct and that we haven't missed an incoming edge.
- ASSERT(phi->merged_index() == i);
- ASSERT(phi->OperandCount() == block->predecessors()->length());
- phi->AddInput(other->values_[i]);
- } else if (values_[i] != other->values_[i]) {
- // There is a fresh value on the incoming edge, a phi is needed.
- ASSERT(values_[i] != NULL && other->values_[i] != NULL);
- HPhi* phi = new HPhi(i);
- HValue* old_value = values_[i];
- for (int j = 0; j < block->predecessors()->length(); j++) {
- phi->AddInput(old_value);
- }
- phi->AddInput(other->values_[i]);
- this->values_[i] = phi;
- block->AddPhi(phi);
- }
- }
-}
-
-
-void HEnvironment::Bind(int index, HValue* value) {
- ASSERT(value != NULL);
- if (!assigned_variables_.Contains(index)) {
- assigned_variables_.Add(index);
- }
- values_[index] = value;
-}
-
-
-bool HEnvironment::HasExpressionAt(int index) const {
- return index >= parameter_count_ + local_count_;
-}
-
-
-bool HEnvironment::ExpressionStackIsEmpty() const {
- int first_expression = parameter_count() + local_count();
- ASSERT(length() >= first_expression);
- return length() == first_expression;
-}
-
-
-void HEnvironment::SetExpressionStackAt(int index_from_top, HValue* value) {
- int count = index_from_top + 1;
- int index = values_.length() - count;
- ASSERT(HasExpressionAt(index));
- // The push count must include at least the element in question or else
- // the new value will not be included in this environment's history.
- if (push_count_ < count) {
- // This is the same effect as popping then re-pushing 'count' elements.
- pop_count_ += (count - push_count_);
- push_count_ = count;
- }
- values_[index] = value;
-}
-
-
-void HEnvironment::Drop(int count) {
- for (int i = 0; i < count; ++i) {
- Pop();
- }
-}
-
-
-HEnvironment* HEnvironment::Copy() const {
- return new HEnvironment(this);
-}
-
-
-HEnvironment* HEnvironment::CopyWithoutHistory() const {
- HEnvironment* result = Copy();
- result->ClearHistory();
- return result;
-}
-
-
-HEnvironment* HEnvironment::CopyAsLoopHeader(HBasicBlock* loop_header) const {
- HEnvironment* new_env = Copy();
- for (int i = 0; i < values_.length(); ++i) {
- HPhi* phi = new HPhi(i);
- phi->AddInput(values_[i]);
- new_env->values_[i] = phi;
- loop_header->AddPhi(phi);
- }
- new_env->ClearHistory();
- return new_env;
-}
-
-
-HEnvironment* HEnvironment::CopyForInlining(Handle<JSFunction> target,
- FunctionLiteral* function,
- bool is_speculative,
- HConstant* undefined) const {
- // Outer environment is a copy of this one without the arguments.
- int arity = function->scope()->num_parameters();
- HEnvironment* outer = Copy();
- outer->Drop(arity + 1); // Including receiver.
- outer->ClearHistory();
- HEnvironment* inner = new HEnvironment(outer, function->scope(), target);
- // Get the argument values from the original environment.
- if (is_speculative) {
- for (int i = 0; i <= arity; ++i) { // Include receiver.
- HValue* push = ExpressionStackAt(arity - i);
- inner->SetValueAt(i, push);
- }
- } else {
- for (int i = 0; i <= arity; ++i) { // Include receiver.
- inner->SetValueAt(i, ExpressionStackAt(arity - i));
- }
- }
-
- // Initialize the stack-allocated locals to undefined.
- int local_base = arity + 1;
- int local_count = function->scope()->num_stack_slots();
- for (int i = 0; i < local_count; ++i) {
- inner->SetValueAt(local_base + i, undefined);
- }
-
- inner->set_ast_id(function->id());
- return inner;
-}
-
-
-void HEnvironment::PrintTo(StringStream* stream) {
- for (int i = 0; i < length(); i++) {
- if (i == 0) stream->Add("parameters\n");
- if (i == parameter_count()) stream->Add("locals\n");
- if (i == parameter_count() + local_count()) stream->Add("expressions");
- HValue* val = values_.at(i);
- stream->Add("%d: ", i);
- if (val != NULL) {
- val->PrintNameTo(stream);
- } else {
- stream->Add("NULL");
- }
- stream->Add("\n");
- }
-}
-
-
-void HEnvironment::PrintToStd() {
- HeapStringAllocator string_allocator;
- StringStream trace(&string_allocator);
- PrintTo(&trace);
- PrintF("%s", *trace.ToCString());
-}
-
-
-void HTracer::TraceCompilation(FunctionLiteral* function) {
- Tag tag(this, "compilation");
- Handle<String> name = function->debug_name();
- PrintStringProperty("name", *name->ToCString());
- PrintStringProperty("method", *name->ToCString());
- PrintLongProperty("date", static_cast<int64_t>(OS::TimeCurrentMillis()));
-}
-
-
-void HTracer::TraceLithium(const char* name, LChunk* chunk) {
- Trace(name, chunk->graph(), chunk);
-}
-
-
-void HTracer::TraceHydrogen(const char* name, HGraph* graph) {
- Trace(name, graph, NULL);
-}
-
-
-void HTracer::Trace(const char* name, HGraph* graph, LChunk* chunk) {
- Tag tag(this, "cfg");
- PrintStringProperty("name", name);
- const ZoneList<HBasicBlock*>* blocks = graph->blocks();
- for (int i = 0; i < blocks->length(); i++) {
- HBasicBlock* current = blocks->at(i);
- Tag block_tag(this, "block");
- PrintBlockProperty("name", current->block_id());
- PrintIntProperty("from_bci", -1);
- PrintIntProperty("to_bci", -1);
-
- if (!current->predecessors()->is_empty()) {
- PrintIndent();
- trace_.Add("predecessors");
- for (int j = 0; j < current->predecessors()->length(); ++j) {
- trace_.Add(" \"B%d\"", current->predecessors()->at(j)->block_id());
- }
- trace_.Add("\n");
- } else {
- PrintEmptyProperty("predecessors");
- }
-
- if (current->end() == NULL || current->end()->FirstSuccessor() == NULL) {
- PrintEmptyProperty("successors");
- } else if (current->end()->SecondSuccessor() == NULL) {
- PrintBlockProperty("successors",
- current->end()->FirstSuccessor()->block_id());
- } else {
- PrintBlockProperty("successors",
- current->end()->FirstSuccessor()->block_id(),
- current->end()->SecondSuccessor()->block_id());
- }
-
- PrintEmptyProperty("xhandlers");
- PrintEmptyProperty("flags");
-
- if (current->dominator() != NULL) {
- PrintBlockProperty("dominator", current->dominator()->block_id());
- }
-
- if (chunk != NULL) {
- int first_index = current->first_instruction_index();
- int last_index = current->last_instruction_index();
- PrintIntProperty(
- "first_lir_id",
- LifetimePosition::FromInstructionIndex(first_index).Value());
- PrintIntProperty(
- "last_lir_id",
- LifetimePosition::FromInstructionIndex(last_index).Value());
- }
-
- {
- Tag states_tag(this, "states");
- Tag locals_tag(this, "locals");
- int total = current->phis()->length();
- trace_.Add("size %d\n", total);
- trace_.Add("method \"None\"");
- for (int j = 0; j < total; ++j) {
- HPhi* phi = current->phis()->at(j);
- trace_.Add("%d ", phi->merged_index());
- phi->PrintNameTo(&trace_);
- trace_.Add(" ");
- phi->PrintTo(&trace_);
- trace_.Add("\n");
- }
- }
-
- {
- Tag HIR_tag(this, "HIR");
- HInstruction* instruction = current->first();
- while (instruction != NULL) {
- int bci = 0;
- int uses = instruction->uses()->length();
- trace_.Add("%d %d ", bci, uses);
- instruction->PrintNameTo(&trace_);
- trace_.Add(" ");
- instruction->PrintTo(&trace_);
- trace_.Add(" <|@\n");
- instruction = instruction->next();
- }
- }
-
-
- if (chunk != NULL) {
- Tag LIR_tag(this, "LIR");
- int first_index = current->first_instruction_index();
- int last_index = current->last_instruction_index();
- if (first_index != -1 && last_index != -1) {
- const ZoneList<LInstruction*>* instructions = chunk->instructions();
- for (int i = first_index; i <= last_index; ++i) {
- LInstruction* linstr = instructions->at(i);
- if (linstr != NULL) {
- trace_.Add("%d ",
- LifetimePosition::FromInstructionIndex(i).Value());
- linstr->PrintTo(&trace_);
- trace_.Add(" <|@\n");
- }
- }
- }
- }
- }
-}
-
-
-void HTracer::TraceLiveRanges(const char* name, LAllocator* allocator) {
- Tag tag(this, "intervals");
- PrintStringProperty("name", name);
-
- const Vector<LiveRange*>* fixed_d = allocator->fixed_double_live_ranges();
- for (int i = 0; i < fixed_d->length(); ++i) {
- TraceLiveRange(fixed_d->at(i), "fixed");
- }
-
- const Vector<LiveRange*>* fixed = allocator->fixed_live_ranges();
- for (int i = 0; i < fixed->length(); ++i) {
- TraceLiveRange(fixed->at(i), "fixed");
- }
-
- const ZoneList<LiveRange*>* live_ranges = allocator->live_ranges();
- for (int i = 0; i < live_ranges->length(); ++i) {
- TraceLiveRange(live_ranges->at(i), "object");
- }
-}
-
-
-void HTracer::TraceLiveRange(LiveRange* range, const char* type) {
- if (range != NULL && !range->IsEmpty()) {
- trace_.Add("%d %s", range->id(), type);
- if (range->HasRegisterAssigned()) {
- LOperand* op = range->CreateAssignedOperand();
- int assigned_reg = op->index();
- if (op->IsDoubleRegister()) {
- trace_.Add(" \"%s\"",
- DoubleRegister::AllocationIndexToString(assigned_reg));
- } else {
- ASSERT(op->IsRegister());
- trace_.Add(" \"%s\"", Register::AllocationIndexToString(assigned_reg));
- }
- } else if (range->IsSpilled()) {
- LOperand* op = range->TopLevel()->GetSpillOperand();
- if (op->IsDoubleStackSlot()) {
- trace_.Add(" \"double_stack:%d\"", op->index());
- } else {
- ASSERT(op->IsStackSlot());
- trace_.Add(" \"stack:%d\"", op->index());
- }
- }
- int parent_index = -1;
- if (range->IsChild()) {
- parent_index = range->parent()->id();
- } else {
- parent_index = range->id();
- }
- LOperand* op = range->FirstHint();
- int hint_index = -1;
- if (op != NULL && op->IsUnallocated()) hint_index = op->VirtualRegister();
- trace_.Add(" %d %d", parent_index, hint_index);
- UseInterval* cur_interval = range->first_interval();
- while (cur_interval != NULL && range->Covers(cur_interval->start())) {
- trace_.Add(" [%d, %d[",
- cur_interval->start().Value(),
- cur_interval->end().Value());
- cur_interval = cur_interval->next();
- }
-
- UsePosition* current_pos = range->first_pos();
- while (current_pos != NULL) {
- if (current_pos->RegisterIsBeneficial() || FLAG_trace_all_uses) {
- trace_.Add(" %d M", current_pos->pos().Value());
- }
- current_pos = current_pos->next();
- }
-
- trace_.Add(" \"\"\n");
- }
-}
-
-
-void HTracer::FlushToFile() {
- AppendChars(filename_, *trace_.ToCString(), trace_.length(), false);
- trace_.Reset();
-}
-
-
-void HStatistics::Initialize(CompilationInfo* info) {
- source_size_ += info->shared_info()->SourceSize();
-}
-
-
-void HStatistics::Print() {
- PrintF("Timing results:\n");
- int64_t sum = 0;
- for (int i = 0; i < timing_.length(); ++i) {
- sum += timing_[i];
- }
-
- for (int i = 0; i < names_.length(); ++i) {
- PrintF("%30s", names_[i]);
- double ms = static_cast<double>(timing_[i]) / 1000;
- double percent = static_cast<double>(timing_[i]) * 100 / sum;
- PrintF(" - %7.3f ms / %4.1f %% ", ms, percent);
-
- unsigned size = sizes_[i];
- double size_percent = static_cast<double>(size) * 100 / total_size_;
- PrintF(" %8u bytes / %4.1f %%\n", size, size_percent);
- }
- double source_size_in_kb = static_cast<double>(source_size_) / 1024;
- double normalized_time = source_size_in_kb > 0
- ? (static_cast<double>(sum) / 1000) / source_size_in_kb
- : 0;
- double normalized_bytes = source_size_in_kb > 0
- ? total_size_ / source_size_in_kb
- : 0;
- PrintF("%30s - %7.3f ms %7.3f bytes\n", "Sum",
- normalized_time, normalized_bytes);
- PrintF("---------------------------------------------------------------\n");
- PrintF("%30s - %7.3f ms (%.1f times slower than full code gen)\n",
- "Total",
- static_cast<double>(total_) / 1000,
- static_cast<double>(total_) / full_code_gen_);
-}
-
-
-void HStatistics::SaveTiming(const char* name, int64_t ticks, unsigned size) {
- if (name == HPhase::kFullCodeGen) {
- full_code_gen_ += ticks;
- } else if (name == HPhase::kTotal) {
- total_ += ticks;
- } else {
- total_size_ += size;
- for (int i = 0; i < names_.length(); ++i) {
- if (names_[i] == name) {
- timing_[i] += ticks;
- sizes_[i] += size;
- return;
- }
- }
- names_.Add(name);
- timing_.Add(ticks);
- sizes_.Add(size);
- }
-}
-
-
-const char* const HPhase::kFullCodeGen = "Full code generator";
-const char* const HPhase::kTotal = "Total";
-
-
-void HPhase::Begin(const char* name,
- HGraph* graph,
- LChunk* chunk,
- LAllocator* allocator) {
- name_ = name;
- graph_ = graph;
- chunk_ = chunk;
- allocator_ = allocator;
- if (allocator != NULL && chunk_ == NULL) {
- chunk_ = allocator->chunk();
- }
- if (FLAG_hydrogen_stats) start_ = OS::Ticks();
- start_allocation_size_ = Zone::allocation_size_;
-}
-
-
-void HPhase::End() const {
- if (FLAG_hydrogen_stats) {
- int64_t end = OS::Ticks();
- unsigned size = Zone::allocation_size_ - start_allocation_size_;
- HStatistics::Instance()->SaveTiming(name_, end - start_, size);
- }
-
- if (FLAG_trace_hydrogen) {
- if (graph_ != NULL) HTracer::Instance()->TraceHydrogen(name_, graph_);
- if (chunk_ != NULL) HTracer::Instance()->TraceLithium(name_, chunk_);
- if (allocator_ != NULL) {
- HTracer::Instance()->TraceLiveRanges(name_, allocator_);
- }
- }
-
-#ifdef DEBUG
- if (graph_ != NULL) graph_->Verify();
- if (allocator_ != NULL) allocator_->Verify();
-#endif
-}
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/hydrogen.h b/src/3rdparty/v8/src/hydrogen.h
deleted file mode 100644
index 93664e9..0000000
--- a/src/3rdparty/v8/src/hydrogen.h
+++ /dev/null
@@ -1,1119 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_HYDROGEN_H_
-#define V8_HYDROGEN_H_
-
-#include "v8.h"
-
-#include "ast.h"
-#include "compiler.h"
-#include "data-flow.h"
-#include "hydrogen-instructions.h"
-#include "zone.h"
-
-namespace v8 {
-namespace internal {
-
-// Forward declarations.
-class HEnvironment;
-class HGraph;
-class HLoopInformation;
-class HTracer;
-class LAllocator;
-class LChunk;
-class LiveRange;
-
-
-class HBasicBlock: public ZoneObject {
- public:
- explicit HBasicBlock(HGraph* graph);
- virtual ~HBasicBlock() { }
-
- // Simple accessors.
- int block_id() const { return block_id_; }
- void set_block_id(int id) { block_id_ = id; }
- HGraph* graph() const { return graph_; }
- const ZoneList<HPhi*>* phis() const { return &phis_; }
- HInstruction* first() const { return first_; }
- HInstruction* last() const { return last_; }
- void set_last(HInstruction* instr) { last_ = instr; }
- HInstruction* GetLastInstruction();
- HControlInstruction* end() const { return end_; }
- HLoopInformation* loop_information() const { return loop_information_; }
- const ZoneList<HBasicBlock*>* predecessors() const { return &predecessors_; }
- bool HasPredecessor() const { return predecessors_.length() > 0; }
- const ZoneList<HBasicBlock*>* dominated_blocks() const {
- return &dominated_blocks_;
- }
- const ZoneList<int>* deleted_phis() const {
- return &deleted_phis_;
- }
- void RecordDeletedPhi(int merge_index) {
- deleted_phis_.Add(merge_index);
- }
- HBasicBlock* dominator() const { return dominator_; }
- HEnvironment* last_environment() const { return last_environment_; }
- int argument_count() const { return argument_count_; }
- void set_argument_count(int count) { argument_count_ = count; }
- int first_instruction_index() const { return first_instruction_index_; }
- void set_first_instruction_index(int index) {
- first_instruction_index_ = index;
- }
- int last_instruction_index() const { return last_instruction_index_; }
- void set_last_instruction_index(int index) {
- last_instruction_index_ = index;
- }
-
- void AttachLoopInformation();
- void DetachLoopInformation();
- bool IsLoopHeader() const { return loop_information() != NULL; }
- bool IsStartBlock() const { return block_id() == 0; }
- void PostProcessLoopHeader(IterationStatement* stmt);
-
- bool IsFinished() const { return end_ != NULL; }
- void AddPhi(HPhi* phi);
- void RemovePhi(HPhi* phi);
- void AddInstruction(HInstruction* instr);
- bool Dominates(HBasicBlock* other) const;
-
- void SetInitialEnvironment(HEnvironment* env);
- void ClearEnvironment() { last_environment_ = NULL; }
- bool HasEnvironment() const { return last_environment_ != NULL; }
- void UpdateEnvironment(HEnvironment* env) { last_environment_ = env; }
- HBasicBlock* parent_loop_header() const { return parent_loop_header_; }
-
- void set_parent_loop_header(HBasicBlock* block) {
- ASSERT(parent_loop_header_ == NULL);
- parent_loop_header_ = block;
- }
-
- bool HasParentLoopHeader() const { return parent_loop_header_ != NULL; }
-
- void SetJoinId(int id);
-
- void Finish(HControlInstruction* last);
- void FinishExit(HControlInstruction* instruction);
- void Goto(HBasicBlock* block, bool include_stack_check = false);
-
- int PredecessorIndexOf(HBasicBlock* predecessor) const;
- void AddSimulate(int id) { AddInstruction(CreateSimulate(id)); }
- void AssignCommonDominator(HBasicBlock* other);
-
- void FinishExitWithDeoptimization() {
- FinishExit(CreateDeoptimize());
- }
-
- // Add the inlined function exit sequence, adding an HLeaveInlined
- // instruction and updating the bailout environment.
- void AddLeaveInlined(HValue* return_value, HBasicBlock* target);
-
- // If a target block is tagged as an inline function return, all
- // predecessors should contain the inlined exit sequence:
- //
- // LeaveInlined
- // Simulate (caller's environment)
- // Goto (target block)
- bool IsInlineReturnTarget() const { return is_inline_return_target_; }
- void MarkAsInlineReturnTarget() { is_inline_return_target_ = true; }
-
-#ifdef DEBUG
- void Verify();
-#endif
-
- private:
- void RegisterPredecessor(HBasicBlock* pred);
- void AddDominatedBlock(HBasicBlock* block);
-
- HSimulate* CreateSimulate(int id);
- HDeoptimize* CreateDeoptimize();
-
- int block_id_;
- HGraph* graph_;
- ZoneList<HPhi*> phis_;
- HInstruction* first_;
- HInstruction* last_;
- HControlInstruction* end_;
- HLoopInformation* loop_information_;
- ZoneList<HBasicBlock*> predecessors_;
- HBasicBlock* dominator_;
- ZoneList<HBasicBlock*> dominated_blocks_;
- HEnvironment* last_environment_;
- // Outgoing parameter count at block exit, set during lithium translation.
- int argument_count_;
- // Instruction indices into the lithium code stream.
- int first_instruction_index_;
- int last_instruction_index_;
- ZoneList<int> deleted_phis_;
- HBasicBlock* parent_loop_header_;
- bool is_inline_return_target_;
-};
-
-
-class HLoopInformation: public ZoneObject {
- public:
- explicit HLoopInformation(HBasicBlock* loop_header)
- : back_edges_(4), loop_header_(loop_header), blocks_(8) {
- blocks_.Add(loop_header);
- }
- virtual ~HLoopInformation() {}
-
- const ZoneList<HBasicBlock*>* back_edges() const { return &back_edges_; }
- const ZoneList<HBasicBlock*>* blocks() const { return &blocks_; }
- HBasicBlock* loop_header() const { return loop_header_; }
- HBasicBlock* GetLastBackEdge() const;
- void RegisterBackEdge(HBasicBlock* block);
-
- private:
- void AddBlock(HBasicBlock* block);
-
- ZoneList<HBasicBlock*> back_edges_;
- HBasicBlock* loop_header_;
- ZoneList<HBasicBlock*> blocks_;
-};
-
-
-class HGraph: public ZoneObject {
- public:
- explicit HGraph(CompilationInfo* info);
-
- const ZoneList<HBasicBlock*>* blocks() const { return &blocks_; }
- const ZoneList<HPhi*>* phi_list() const { return phi_list_; }
- HBasicBlock* entry_block() const { return entry_block_; }
- HEnvironment* start_environment() const { return start_environment_; }
-
- void InitializeInferredTypes();
- void InsertTypeConversions();
- void InsertRepresentationChanges();
- void ComputeMinusZeroChecks();
- bool ProcessArgumentsObject();
- void EliminateRedundantPhis();
- void EliminateUnreachablePhis();
- void Canonicalize();
- void OrderBlocks();
- void AssignDominators();
-
- // Returns false if there are phi-uses of the arguments-object
- // which are not supported by the optimizing compiler.
- bool CollectPhis();
-
- Handle<Code> Compile(CompilationInfo* info);
-
- void set_undefined_constant(HConstant* constant) {
- undefined_constant_.set(constant);
- }
- HConstant* GetConstantUndefined() const { return undefined_constant_.get(); }
- HConstant* GetConstant1();
- HConstant* GetConstantMinus1();
- HConstant* GetConstantTrue();
- HConstant* GetConstantFalse();
-
- HBasicBlock* CreateBasicBlock();
- HArgumentsObject* GetArgumentsObject() const {
- return arguments_object_.get();
- }
- bool HasArgumentsObject() const { return arguments_object_.is_set(); }
-
- void SetArgumentsObject(HArgumentsObject* object) {
- arguments_object_.set(object);
- }
-
- int GetMaximumValueID() const { return values_.length(); }
- int GetNextBlockID() { return next_block_id_++; }
- int GetNextValueID(HValue* value) {
- values_.Add(value);
- return values_.length() - 1;
- }
- HValue* LookupValue(int id) const {
- if (id >= 0 && id < values_.length()) return values_[id];
- return NULL;
- }
-
-#ifdef DEBUG
- void Verify() const;
-#endif
-
- private:
- void Postorder(HBasicBlock* block,
- BitVector* visited,
- ZoneList<HBasicBlock*>* order,
- HBasicBlock* loop_header);
- void PostorderLoopBlocks(HLoopInformation* loop,
- BitVector* visited,
- ZoneList<HBasicBlock*>* order,
- HBasicBlock* loop_header);
- HConstant* GetConstant(SetOncePointer<HConstant>* pointer,
- Object* value);
-
- void InsertTypeConversions(HInstruction* instr);
- void PropagateMinusZeroChecks(HValue* value, BitVector* visited);
- void InsertRepresentationChangeForUse(HValue* value,
- HValue* use,
- Representation to);
- void InsertRepresentationChangesForValue(HValue* current,
- ZoneList<HValue*>* value_list,
- ZoneList<Representation>* rep_list);
- void InferTypes(ZoneList<HValue*>* worklist);
- void InitializeInferredTypes(int from_inclusive, int to_inclusive);
- void CheckForBackEdge(HBasicBlock* block, HBasicBlock* successor);
-
- Isolate* isolate() { return isolate_; }
-
- Isolate* isolate_;
- int next_block_id_;
- HBasicBlock* entry_block_;
- HEnvironment* start_environment_;
- ZoneList<HBasicBlock*> blocks_;
- ZoneList<HValue*> values_;
- ZoneList<HPhi*>* phi_list_;
- SetOncePointer<HConstant> undefined_constant_;
- SetOncePointer<HConstant> constant_1_;
- SetOncePointer<HConstant> constant_minus1_;
- SetOncePointer<HConstant> constant_true_;
- SetOncePointer<HConstant> constant_false_;
- SetOncePointer<HArgumentsObject> arguments_object_;
-
- DISALLOW_COPY_AND_ASSIGN(HGraph);
-};
-
-
-class HEnvironment: public ZoneObject {
- public:
- HEnvironment(HEnvironment* outer,
- Scope* scope,
- Handle<JSFunction> closure);
-
- // Simple accessors.
- Handle<JSFunction> closure() const { return closure_; }
- const ZoneList<HValue*>* values() const { return &values_; }
- const ZoneList<int>* assigned_variables() const {
- return &assigned_variables_;
- }
- int parameter_count() const { return parameter_count_; }
- int local_count() const { return local_count_; }
- HEnvironment* outer() const { return outer_; }
- int pop_count() const { return pop_count_; }
- int push_count() const { return push_count_; }
-
- int ast_id() const { return ast_id_; }
- void set_ast_id(int id) { ast_id_ = id; }
-
- int length() const { return values_.length(); }
-
- void Bind(Variable* variable, HValue* value) {
- Bind(IndexFor(variable), value);
- }
-
- void Bind(int index, HValue* value);
-
- HValue* Lookup(Variable* variable) const {
- return Lookup(IndexFor(variable));
- }
-
- HValue* Lookup(int index) const {
- HValue* result = values_[index];
- ASSERT(result != NULL);
- return result;
- }
-
- void Push(HValue* value) {
- ASSERT(value != NULL);
- ++push_count_;
- values_.Add(value);
- }
-
- HValue* Pop() {
- ASSERT(!ExpressionStackIsEmpty());
- if (push_count_ > 0) {
- --push_count_;
- } else {
- ++pop_count_;
- }
- return values_.RemoveLast();
- }
-
- void Drop(int count);
-
- HValue* Top() const { return ExpressionStackAt(0); }
-
- HValue* ExpressionStackAt(int index_from_top) const {
- int index = length() - index_from_top - 1;
- ASSERT(HasExpressionAt(index));
- return values_[index];
- }
-
- void SetExpressionStackAt(int index_from_top, HValue* value);
-
- HEnvironment* Copy() const;
- HEnvironment* CopyWithoutHistory() const;
- HEnvironment* CopyAsLoopHeader(HBasicBlock* block) const;
-
- // Create an "inlined version" of this environment, where the original
- // environment is the outer environment but the top expression stack
- // elements are moved to an inner environment as parameters. If
- // is_speculative, the argument values are expected to be PushArgument
- // instructions, otherwise they are the actual values.
- HEnvironment* CopyForInlining(Handle<JSFunction> target,
- FunctionLiteral* function,
- bool is_speculative,
- HConstant* undefined) const;
-
- void AddIncomingEdge(HBasicBlock* block, HEnvironment* other);
-
- void ClearHistory() {
- pop_count_ = 0;
- push_count_ = 0;
- assigned_variables_.Rewind(0);
- }
-
- void SetValueAt(int index, HValue* value) {
- ASSERT(index < length());
- values_[index] = value;
- }
-
- void PrintTo(StringStream* stream);
- void PrintToStd();
-
- private:
- explicit HEnvironment(const HEnvironment* other);
-
- // True if index is included in the expression stack part of the environment.
- bool HasExpressionAt(int index) const;
-
- bool ExpressionStackIsEmpty() const;
-
- void Initialize(int parameter_count, int local_count, int stack_height);
- void Initialize(const HEnvironment* other);
-
- // Map a variable to an environment index. Parameter indices are shifted
- // by 1 (receiver is parameter index -1 but environment index 0).
- // Stack-allocated local indices are shifted by the number of parameters.
- int IndexFor(Variable* variable) const {
- Slot* slot = variable->AsSlot();
- ASSERT(slot != NULL && slot->IsStackAllocated());
- int shift = (slot->type() == Slot::PARAMETER) ? 1 : parameter_count_;
- return slot->index() + shift;
- }
-
- Handle<JSFunction> closure_;
- // Value array [parameters] [locals] [temporaries].
- ZoneList<HValue*> values_;
- ZoneList<int> assigned_variables_;
- int parameter_count_;
- int local_count_;
- HEnvironment* outer_;
- int pop_count_;
- int push_count_;
- int ast_id_;
-};
-
-
-class HGraphBuilder;
-
-// This class is not BASE_EMBEDDED because our inlining implementation uses
-// new and delete.
-class AstContext {
- public:
- bool IsEffect() const { return kind_ == Expression::kEffect; }
- bool IsValue() const { return kind_ == Expression::kValue; }
- bool IsTest() const { return kind_ == Expression::kTest; }
-
- // 'Fill' this context with a hydrogen value. The value is assumed to
- // have already been inserted in the instruction stream (or not need to
- // be, e.g., HPhi). Call this function in tail position in the Visit
- // functions for expressions.
- virtual void ReturnValue(HValue* value) = 0;
-
- // Add a hydrogen instruction to the instruction stream (recording an
- // environment simulation if necessary) and then fill this context with
- // the instruction as value.
- virtual void ReturnInstruction(HInstruction* instr, int ast_id) = 0;
-
- void set_for_typeof(bool for_typeof) { for_typeof_ = for_typeof; }
- bool is_for_typeof() { return for_typeof_; }
-
- protected:
- AstContext(HGraphBuilder* owner, Expression::Context kind);
- virtual ~AstContext();
-
- HGraphBuilder* owner() const { return owner_; }
-
- // We want to be able to assert, in a context-specific way, that the stack
- // height makes sense when the context is filled.
-#ifdef DEBUG
- int original_length_;
-#endif
-
- private:
- HGraphBuilder* owner_;
- Expression::Context kind_;
- AstContext* outer_;
- bool for_typeof_;
-};
-
-
-class EffectContext: public AstContext {
- public:
- explicit EffectContext(HGraphBuilder* owner)
- : AstContext(owner, Expression::kEffect) {
- }
- virtual ~EffectContext();
-
- virtual void ReturnValue(HValue* value);
- virtual void ReturnInstruction(HInstruction* instr, int ast_id);
-};
-
-
-class ValueContext: public AstContext {
- public:
- explicit ValueContext(HGraphBuilder* owner)
- : AstContext(owner, Expression::kValue) {
- }
- virtual ~ValueContext();
-
- virtual void ReturnValue(HValue* value);
- virtual void ReturnInstruction(HInstruction* instr, int ast_id);
-};
-
-
-class TestContext: public AstContext {
- public:
- TestContext(HGraphBuilder* owner,
- HBasicBlock* if_true,
- HBasicBlock* if_false)
- : AstContext(owner, Expression::kTest),
- if_true_(if_true),
- if_false_(if_false) {
- }
-
- virtual void ReturnValue(HValue* value);
- virtual void ReturnInstruction(HInstruction* instr, int ast_id);
-
- static TestContext* cast(AstContext* context) {
- ASSERT(context->IsTest());
- return reinterpret_cast<TestContext*>(context);
- }
-
- HBasicBlock* if_true() const { return if_true_; }
- HBasicBlock* if_false() const { return if_false_; }
-
- private:
- // Build the shared core part of the translation unpacking a value into
- // control flow.
- void BuildBranch(HValue* value);
-
- HBasicBlock* if_true_;
- HBasicBlock* if_false_;
-};
-
-
-class FunctionState BASE_EMBEDDED {
- public:
- FunctionState(HGraphBuilder* owner,
- CompilationInfo* info,
- TypeFeedbackOracle* oracle);
- ~FunctionState();
-
- CompilationInfo* compilation_info() { return compilation_info_; }
- TypeFeedbackOracle* oracle() { return oracle_; }
- AstContext* call_context() { return call_context_; }
- HBasicBlock* function_return() { return function_return_; }
- TestContext* test_context() { return test_context_; }
- void ClearInlinedTestContext() {
- delete test_context_;
- test_context_ = NULL;
- }
-
- FunctionState* outer() { return outer_; }
-
- private:
- HGraphBuilder* owner_;
-
- CompilationInfo* compilation_info_;
- TypeFeedbackOracle* oracle_;
-
- // During function inlining, expression context of the call being
- // inlined. NULL when not inlining.
- AstContext* call_context_;
-
- // When inlining in an effect of value context, this is the return block.
- // It is NULL otherwise. When inlining in a test context, there are a
- // pair of return blocks in the context. When not inlining, there is no
- // local return point.
- HBasicBlock* function_return_;
-
- // When inlining a call in a test context, a context containing a pair of
- // return blocks. NULL in all other cases.
- TestContext* test_context_;
-
- FunctionState* outer_;
-};
-
-
-class HGraphBuilder: public AstVisitor {
- public:
- enum BreakType { BREAK, CONTINUE };
-
- // A class encapsulating (lazily-allocated) break and continue blocks for
- // a breakable statement. Separated from BreakAndContinueScope so that it
- // can have a separate lifetime.
- class BreakAndContinueInfo BASE_EMBEDDED {
- public:
- explicit BreakAndContinueInfo(BreakableStatement* target)
- : target_(target), break_block_(NULL), continue_block_(NULL) {
- }
-
- BreakableStatement* target() { return target_; }
- HBasicBlock* break_block() { return break_block_; }
- void set_break_block(HBasicBlock* block) { break_block_ = block; }
- HBasicBlock* continue_block() { return continue_block_; }
- void set_continue_block(HBasicBlock* block) { continue_block_ = block; }
-
- private:
- BreakableStatement* target_;
- HBasicBlock* break_block_;
- HBasicBlock* continue_block_;
- };
-
- // A helper class to maintain a stack of current BreakAndContinueInfo
- // structures mirroring BreakableStatement nesting.
- class BreakAndContinueScope BASE_EMBEDDED {
- public:
- BreakAndContinueScope(BreakAndContinueInfo* info, HGraphBuilder* owner)
- : info_(info), owner_(owner), next_(owner->break_scope()) {
- owner->set_break_scope(this);
- }
-
- ~BreakAndContinueScope() { owner_->set_break_scope(next_); }
-
- BreakAndContinueInfo* info() { return info_; }
- HGraphBuilder* owner() { return owner_; }
- BreakAndContinueScope* next() { return next_; }
-
- // Search the break stack for a break or continue target.
- HBasicBlock* Get(BreakableStatement* stmt, BreakType type);
-
- private:
- BreakAndContinueInfo* info_;
- HGraphBuilder* owner_;
- BreakAndContinueScope* next_;
- };
-
- HGraphBuilder(CompilationInfo* info, TypeFeedbackOracle* oracle)
- : function_state_(NULL),
- initial_function_state_(this, info, oracle),
- ast_context_(NULL),
- break_scope_(NULL),
- graph_(NULL),
- current_block_(NULL),
- inlined_count_(0) {
- // This is not initialized in the initializer list because the
- // constructor for the initial state relies on function_state_ == NULL
- // to know it's the initial state.
- function_state_= &initial_function_state_;
- }
-
- HGraph* CreateGraph();
-
- // Simple accessors.
- HGraph* graph() const { return graph_; }
- BreakAndContinueScope* break_scope() const { return break_scope_; }
- void set_break_scope(BreakAndContinueScope* head) { break_scope_ = head; }
-
- HBasicBlock* current_block() const { return current_block_; }
- void set_current_block(HBasicBlock* block) { current_block_ = block; }
- HEnvironment* environment() const {
- return current_block()->last_environment();
- }
-
- // Adding instructions.
- HInstruction* AddInstruction(HInstruction* instr);
- void AddSimulate(int id);
-
- // Bailout environment manipulation.
- void Push(HValue* value) { environment()->Push(value); }
- HValue* Pop() { return environment()->Pop(); }
-
- private:
- // Type of a member function that generates inline code for a native function.
- typedef void (HGraphBuilder::*InlineFunctionGenerator)(CallRuntime* call);
-
- // Forward declarations for inner scope classes.
- class SubgraphScope;
-
- static const InlineFunctionGenerator kInlineFunctionGenerators[];
-
- static const int kMaxCallPolymorphism = 4;
- static const int kMaxLoadPolymorphism = 4;
- static const int kMaxStorePolymorphism = 4;
-
- static const int kMaxInlinedNodes = 196;
- static const int kMaxInlinedSize = 196;
- static const int kMaxSourceSize = 600;
-
- // Simple accessors.
- FunctionState* function_state() const { return function_state_; }
- void set_function_state(FunctionState* state) { function_state_ = state; }
-
- AstContext* ast_context() const { return ast_context_; }
- void set_ast_context(AstContext* context) { ast_context_ = context; }
-
- // Accessors forwarded to the function state.
- CompilationInfo* info() const {
- return function_state()->compilation_info();
- }
- TypeFeedbackOracle* oracle() const { return function_state()->oracle(); }
-
- AstContext* call_context() const {
- return function_state()->call_context();
- }
- HBasicBlock* function_return() const {
- return function_state()->function_return();
- }
- TestContext* inlined_test_context() const {
- return function_state()->test_context();
- }
- void ClearInlinedTestContext() {
- function_state()->ClearInlinedTestContext();
- }
-
- // Generators for inline runtime functions.
-#define INLINE_FUNCTION_GENERATOR_DECLARATION(Name, argc, ressize) \
- void Generate##Name(CallRuntime* call);
-
- INLINE_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_DECLARATION)
- INLINE_RUNTIME_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_DECLARATION)
-#undef INLINE_FUNCTION_GENERATOR_DECLARATION
-
- void Bailout(const char* reason);
-
- void PreProcessOsrEntry(IterationStatement* statement);
- // True iff. we are compiling for OSR and the statement is the entry.
- bool HasOsrEntryAt(IterationStatement* statement);
-
- HBasicBlock* CreateJoin(HBasicBlock* first,
- HBasicBlock* second,
- int join_id);
-
- // Create a back edge in the flow graph. body_exit is the predecessor
- // block and loop_entry is the successor block. loop_successor is the
- // block where control flow exits the loop normally (e.g., via failure of
- // the condition) and break_block is the block where control flow breaks
- // from the loop. All blocks except loop_entry can be NULL. The return
- // value is the new successor block which is the join of loop_successor
- // and break_block, or NULL.
- HBasicBlock* CreateLoop(IterationStatement* statement,
- HBasicBlock* loop_entry,
- HBasicBlock* body_exit,
- HBasicBlock* loop_successor,
- HBasicBlock* break_block);
-
- HBasicBlock* JoinContinue(IterationStatement* statement,
- HBasicBlock* exit_block,
- HBasicBlock* continue_block);
-
- HValue* Top() const { return environment()->Top(); }
- void Drop(int n) { environment()->Drop(n); }
- void Bind(Variable* var, HValue* value) { environment()->Bind(var, value); }
-
- void VisitForValue(Expression* expr);
- void VisitForTypeOf(Expression* expr);
- void VisitForEffect(Expression* expr);
- void VisitForControl(Expression* expr,
- HBasicBlock* true_block,
- HBasicBlock* false_block);
-
- // Visit an argument subexpression and emit a push to the outgoing
- // arguments.
- void VisitArgument(Expression* expr);
- void VisitArgumentList(ZoneList<Expression*>* arguments);
-
- // Visit a list of expressions from left to right, each in a value context.
- void VisitExpressions(ZoneList<Expression*>* exprs);
-
- void AddPhi(HPhi* phi);
-
- void PushAndAdd(HInstruction* instr);
-
- // Remove the arguments from the bailout environment and emit instructions
- // to push them as outgoing parameters.
- template <int V> HInstruction* PreProcessCall(HCall<V>* call);
-
- void AssumeRepresentation(HValue* value, Representation r);
- static Representation ToRepresentation(TypeInfo info);
-
- void SetupScope(Scope* scope);
- virtual void VisitStatements(ZoneList<Statement*>* statements);
-
-#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
- AST_NODE_LIST(DECLARE_VISIT)
-#undef DECLARE_VISIT
-
- HBasicBlock* CreateBasicBlock(HEnvironment* env);
- HBasicBlock* CreateLoopHeaderBlock();
-
- // Helpers for flow graph construction.
- enum GlobalPropertyAccess {
- kUseCell,
- kUseGeneric
- };
- GlobalPropertyAccess LookupGlobalProperty(Variable* var,
- LookupResult* lookup,
- bool is_store);
-
- bool TryArgumentsAccess(Property* expr);
- bool TryCallApply(Call* expr);
- bool TryInline(Call* expr);
- bool TryInlineBuiltinFunction(Call* expr,
- HValue* receiver,
- Handle<Map> receiver_map,
- CheckType check_type);
-
- // If --trace-inlining, print a line of the inlining trace. Inlining
- // succeeded if the reason string is NULL and failed if there is a
- // non-NULL reason string.
- void TraceInline(Handle<JSFunction> target, const char* failure_reason);
-
- void HandleGlobalVariableAssignment(Variable* var,
- HValue* value,
- int position,
- int ast_id);
-
- void HandlePropertyAssignment(Assignment* expr);
- void HandleCompoundAssignment(Assignment* expr);
- void HandlePolymorphicStoreNamedField(Assignment* expr,
- HValue* object,
- HValue* value,
- ZoneMapList* types,
- Handle<String> name);
- void HandlePolymorphicCallNamed(Call* expr,
- HValue* receiver,
- ZoneMapList* types,
- Handle<String> name);
-
- HStringCharCodeAt* BuildStringCharCodeAt(HValue* string,
- HValue* index);
- HInstruction* BuildBinaryOperation(BinaryOperation* expr,
- HValue* left,
- HValue* right);
- HInstruction* BuildIncrement(HValue* value, bool increment);
- HLoadNamedField* BuildLoadNamedField(HValue* object,
- Property* expr,
- Handle<Map> type,
- LookupResult* result,
- bool smi_and_map_check);
- HInstruction* BuildLoadNamedGeneric(HValue* object, Property* expr);
- HInstruction* BuildLoadKeyedFastElement(HValue* object,
- HValue* key,
- Property* expr);
- HInstruction* BuildLoadKeyedSpecializedArrayElement(HValue* object,
- HValue* key,
- Property* expr);
- HInstruction* BuildLoadKeyedGeneric(HValue* object,
- HValue* key);
-
- HInstruction* BuildLoadNamed(HValue* object,
- Property* prop,
- Handle<Map> map,
- Handle<String> name);
- HInstruction* BuildStoreNamed(HValue* object,
- HValue* value,
- Expression* expr);
- HInstruction* BuildStoreNamedField(HValue* object,
- Handle<String> name,
- HValue* value,
- Handle<Map> type,
- LookupResult* lookup,
- bool smi_and_map_check);
- HInstruction* BuildStoreNamedGeneric(HValue* object,
- Handle<String> name,
- HValue* value);
- HInstruction* BuildStoreKeyedGeneric(HValue* object,
- HValue* key,
- HValue* value);
-
- HInstruction* BuildStoreKeyedFastElement(HValue* object,
- HValue* key,
- HValue* val,
- Expression* expr);
-
- HInstruction* BuildStoreKeyedSpecializedArrayElement(
- HValue* object,
- HValue* key,
- HValue* val,
- Assignment* expr);
-
- HValue* BuildContextChainWalk(Variable* var);
-
- void AddCheckConstantFunction(Call* expr,
- HValue* receiver,
- Handle<Map> receiver_map,
- bool smi_and_map_check);
-
-
- // The translation state of the currently-being-translated function.
- FunctionState* function_state_;
-
- // The base of the function state stack.
- FunctionState initial_function_state_;
-
- // Expression context of the currently visited subexpression. NULL when
- // visiting statements.
- AstContext* ast_context_;
-
- // A stack of breakable statements entered.
- BreakAndContinueScope* break_scope_;
-
- HGraph* graph_;
- HBasicBlock* current_block_;
-
- int inlined_count_;
-
- friend class FunctionState; // Pushes and pops the state stack.
- friend class AstContext; // Pushes and pops the AST context stack.
-
- DISALLOW_COPY_AND_ASSIGN(HGraphBuilder);
-};
-
-
-class HValueMap: public ZoneObject {
- public:
- HValueMap()
- : array_size_(0),
- lists_size_(0),
- count_(0),
- present_flags_(0),
- array_(NULL),
- lists_(NULL),
- free_list_head_(kNil) {
- ResizeLists(kInitialSize);
- Resize(kInitialSize);
- }
-
- void Kill(int flags);
-
- void Add(HValue* value) {
- present_flags_ |= value->flags();
- Insert(value);
- }
-
- HValue* Lookup(HValue* value) const;
- HValueMap* Copy() const { return new HValueMap(this); }
-
- private:
- // A linked list of HValue* values. Stored in arrays.
- struct HValueMapListElement {
- HValue* value;
- int next; // Index in the array of the next list element.
- };
- static const int kNil = -1; // The end of a linked list
-
- // Must be a power of 2.
- static const int kInitialSize = 16;
-
- explicit HValueMap(const HValueMap* other);
-
- void Resize(int new_size);
- void ResizeLists(int new_size);
- void Insert(HValue* value);
- uint32_t Bound(uint32_t value) const { return value & (array_size_ - 1); }
-
- int array_size_;
- int lists_size_;
- int count_; // The number of values stored in the HValueMap.
- int present_flags_; // All flags that are in any value in the HValueMap.
- HValueMapListElement* array_; // Primary store - contains the first value
- // with a given hash. Colliding elements are stored in linked lists.
- HValueMapListElement* lists_; // The linked lists containing hash collisions.
- int free_list_head_; // Unused elements in lists_ are on the free list.
-};
-
-
-class HStatistics: public Malloced {
- public:
- void Initialize(CompilationInfo* info);
- void Print();
- void SaveTiming(const char* name, int64_t ticks, unsigned size);
- static HStatistics* Instance() {
- static SetOncePointer<HStatistics> instance;
- if (!instance.is_set()) {
- instance.set(new HStatistics());
- }
- return instance.get();
- }
-
- private:
-
- HStatistics()
- : timing_(5),
- names_(5),
- sizes_(5),
- total_(0),
- total_size_(0),
- full_code_gen_(0),
- source_size_(0) { }
-
- List<int64_t> timing_;
- List<const char*> names_;
- List<unsigned> sizes_;
- int64_t total_;
- unsigned total_size_;
- int64_t full_code_gen_;
- double source_size_;
-};
-
-
-class HPhase BASE_EMBEDDED {
- public:
- static const char* const kFullCodeGen;
- static const char* const kTotal;
-
- explicit HPhase(const char* name) { Begin(name, NULL, NULL, NULL); }
- HPhase(const char* name, HGraph* graph) {
- Begin(name, graph, NULL, NULL);
- }
- HPhase(const char* name, LChunk* chunk) {
- Begin(name, NULL, chunk, NULL);
- }
- HPhase(const char* name, LAllocator* allocator) {
- Begin(name, NULL, NULL, allocator);
- }
-
- ~HPhase() {
- End();
- }
-
- private:
- void Begin(const char* name,
- HGraph* graph,
- LChunk* chunk,
- LAllocator* allocator);
- void End() const;
-
- int64_t start_;
- const char* name_;
- HGraph* graph_;
- LChunk* chunk_;
- LAllocator* allocator_;
- unsigned start_allocation_size_;
-};
-
-
-class HTracer: public Malloced {
- public:
- void TraceCompilation(FunctionLiteral* function);
- void TraceHydrogen(const char* name, HGraph* graph);
- void TraceLithium(const char* name, LChunk* chunk);
- void TraceLiveRanges(const char* name, LAllocator* allocator);
-
- static HTracer* Instance() {
- static SetOncePointer<HTracer> instance;
- if (!instance.is_set()) {
- instance.set(new HTracer("hydrogen.cfg"));
- }
- return instance.get();
- }
-
- private:
- class Tag BASE_EMBEDDED {
- public:
- Tag(HTracer* tracer, const char* name) {
- name_ = name;
- tracer_ = tracer;
- tracer->PrintIndent();
- tracer->trace_.Add("begin_%s\n", name);
- tracer->indent_++;
- }
-
- ~Tag() {
- tracer_->indent_--;
- tracer_->PrintIndent();
- tracer_->trace_.Add("end_%s\n", name_);
- ASSERT(tracer_->indent_ >= 0);
- tracer_->FlushToFile();
- }
-
- private:
- HTracer* tracer_;
- const char* name_;
- };
-
- explicit HTracer(const char* filename)
- : filename_(filename), trace_(&string_allocator_), indent_(0) {
- WriteChars(filename, "", 0, false);
- }
-
- void TraceLiveRange(LiveRange* range, const char* type);
- void Trace(const char* name, HGraph* graph, LChunk* chunk);
- void FlushToFile();
-
- void PrintEmptyProperty(const char* name) {
- PrintIndent();
- trace_.Add("%s\n", name);
- }
-
- void PrintStringProperty(const char* name, const char* value) {
- PrintIndent();
- trace_.Add("%s \"%s\"\n", name, value);
- }
-
- void PrintLongProperty(const char* name, int64_t value) {
- PrintIndent();
- trace_.Add("%s %d000\n", name, static_cast<int>(value / 1000));
- }
-
- void PrintBlockProperty(const char* name, int block_id) {
- PrintIndent();
- trace_.Add("%s \"B%d\"\n", name, block_id);
- }
-
- void PrintBlockProperty(const char* name, int block_id1, int block_id2) {
- PrintIndent();
- trace_.Add("%s \"B%d\" \"B%d\"\n", name, block_id1, block_id2);
- }
-
- void PrintIntProperty(const char* name, int value) {
- PrintIndent();
- trace_.Add("%s %d\n", name, value);
- }
-
- void PrintIndent() {
- for (int i = 0; i < indent_; i++) {
- trace_.Add(" ");
- }
- }
-
- const char* filename_;
- HeapStringAllocator string_allocator_;
- StringStream trace_;
- int indent_;
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_HYDROGEN_H_
diff --git a/src/3rdparty/v8/src/ia32/assembler-ia32-inl.h b/src/3rdparty/v8/src/ia32/assembler-ia32-inl.h
deleted file mode 100644
index a9247f4..0000000
--- a/src/3rdparty/v8/src/ia32/assembler-ia32-inl.h
+++ /dev/null
@@ -1,430 +0,0 @@
-// Copyright (c) 1994-2006 Sun Microsystems Inc.
-// All Rights Reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// - Redistributions of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-//
-// - Redistribution in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// - Neither the name of Sun Microsystems or the names of contributors may
-// be used to endorse or promote products derived from this software without
-// specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
-// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
-// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// The original source code covered by the above license above has been
-// modified significantly by Google Inc.
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-
-// A light-weight IA32 Assembler.
-
-#ifndef V8_IA32_ASSEMBLER_IA32_INL_H_
-#define V8_IA32_ASSEMBLER_IA32_INL_H_
-
-#include "cpu.h"
-#include "debug.h"
-
-namespace v8 {
-namespace internal {
-
-
-// The modes possibly affected by apply must be in kApplyMask.
-void RelocInfo::apply(intptr_t delta) {
- if (rmode_ == RUNTIME_ENTRY || IsCodeTarget(rmode_)) {
- int32_t* p = reinterpret_cast<int32_t*>(pc_);
- *p -= delta; // Relocate entry.
- CPU::FlushICache(p, sizeof(uint32_t));
- } else if (rmode_ == JS_RETURN && IsPatchedReturnSequence()) {
- // Special handling of js_return when a break point is set (call
- // instruction has been inserted).
- int32_t* p = reinterpret_cast<int32_t*>(pc_ + 1);
- *p -= delta; // Relocate entry.
- CPU::FlushICache(p, sizeof(uint32_t));
- } else if (rmode_ == DEBUG_BREAK_SLOT && IsPatchedDebugBreakSlotSequence()) {
- // Special handling of a debug break slot when a break point is set (call
- // instruction has been inserted).
- int32_t* p = reinterpret_cast<int32_t*>(pc_ + 1);
- *p -= delta; // Relocate entry.
- CPU::FlushICache(p, sizeof(uint32_t));
- } else if (IsInternalReference(rmode_)) {
- // absolute code pointer inside code object moves with the code object.
- int32_t* p = reinterpret_cast<int32_t*>(pc_);
- *p += delta; // Relocate entry.
- CPU::FlushICache(p, sizeof(uint32_t));
- }
-}
-
-
-Address RelocInfo::target_address() {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
- return Assembler::target_address_at(pc_);
-}
-
-
-Address RelocInfo::target_address_address() {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
- return reinterpret_cast<Address>(pc_);
-}
-
-
-int RelocInfo::target_address_size() {
- return Assembler::kExternalTargetSize;
-}
-
-
-void RelocInfo::set_target_address(Address target) {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
- Assembler::set_target_address_at(pc_, target);
-}
-
-
-Object* RelocInfo::target_object() {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return Memory::Object_at(pc_);
-}
-
-
-Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return Memory::Object_Handle_at(pc_);
-}
-
-
-Object** RelocInfo::target_object_address() {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return &Memory::Object_at(pc_);
-}
-
-
-void RelocInfo::set_target_object(Object* target) {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- Memory::Object_at(pc_) = target;
- CPU::FlushICache(pc_, sizeof(Address));
-}
-
-
-Address* RelocInfo::target_reference_address() {
- ASSERT(rmode_ == RelocInfo::EXTERNAL_REFERENCE);
- return reinterpret_cast<Address*>(pc_);
-}
-
-
-Handle<JSGlobalPropertyCell> RelocInfo::target_cell_handle() {
- ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
- Address address = Memory::Address_at(pc_);
- return Handle<JSGlobalPropertyCell>(
- reinterpret_cast<JSGlobalPropertyCell**>(address));
-}
-
-
-JSGlobalPropertyCell* RelocInfo::target_cell() {
- ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
- Address address = Memory::Address_at(pc_);
- Object* object = HeapObject::FromAddress(
- address - JSGlobalPropertyCell::kValueOffset);
- return reinterpret_cast<JSGlobalPropertyCell*>(object);
-}
-
-
-void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell) {
- ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
- Address address = cell->address() + JSGlobalPropertyCell::kValueOffset;
- Memory::Address_at(pc_) = address;
- CPU::FlushICache(pc_, sizeof(Address));
-}
-
-
-Address RelocInfo::call_address() {
- ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
- (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
- return Assembler::target_address_at(pc_ + 1);
-}
-
-
-void RelocInfo::set_call_address(Address target) {
- ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
- (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
- Assembler::set_target_address_at(pc_ + 1, target);
-}
-
-
-Object* RelocInfo::call_object() {
- return *call_object_address();
-}
-
-
-void RelocInfo::set_call_object(Object* target) {
- *call_object_address() = target;
-}
-
-
-Object** RelocInfo::call_object_address() {
- ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
- (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
- return reinterpret_cast<Object**>(pc_ + 1);
-}
-
-
-bool RelocInfo::IsPatchedReturnSequence() {
- return *pc_ == 0xE8;
-}
-
-
-bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
- return !Assembler::IsNop(pc());
-}
-
-
-void RelocInfo::Visit(ObjectVisitor* visitor) {
- RelocInfo::Mode mode = rmode();
- if (mode == RelocInfo::EMBEDDED_OBJECT) {
- visitor->VisitPointer(target_object_address());
- CPU::FlushICache(pc_, sizeof(Address));
- } else if (RelocInfo::IsCodeTarget(mode)) {
- visitor->VisitCodeTarget(this);
- } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
- visitor->VisitGlobalPropertyCell(this);
- } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
- visitor->VisitExternalReference(target_reference_address());
- CPU::FlushICache(pc_, sizeof(Address));
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // TODO(isolates): Get a cached isolate below.
- } else if (((RelocInfo::IsJSReturn(mode) &&
- IsPatchedReturnSequence()) ||
- (RelocInfo::IsDebugBreakSlot(mode) &&
- IsPatchedDebugBreakSlotSequence())) &&
- Isolate::Current()->debug()->has_break_points()) {
- visitor->VisitDebugTarget(this);
-#endif
- } else if (mode == RelocInfo::RUNTIME_ENTRY) {
- visitor->VisitRuntimeEntry(this);
- }
-}
-
-
-template<typename StaticVisitor>
-void RelocInfo::Visit(Heap* heap) {
- RelocInfo::Mode mode = rmode();
- if (mode == RelocInfo::EMBEDDED_OBJECT) {
- StaticVisitor::VisitPointer(heap, target_object_address());
- CPU::FlushICache(pc_, sizeof(Address));
- } else if (RelocInfo::IsCodeTarget(mode)) {
- StaticVisitor::VisitCodeTarget(heap, this);
- } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
- StaticVisitor::VisitGlobalPropertyCell(heap, this);
- } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
- StaticVisitor::VisitExternalReference(target_reference_address());
- CPU::FlushICache(pc_, sizeof(Address));
-#ifdef ENABLE_DEBUGGER_SUPPORT
- } else if (heap->isolate()->debug()->has_break_points() &&
- ((RelocInfo::IsJSReturn(mode) &&
- IsPatchedReturnSequence()) ||
- (RelocInfo::IsDebugBreakSlot(mode) &&
- IsPatchedDebugBreakSlotSequence()))) {
- StaticVisitor::VisitDebugTarget(heap, this);
-#endif
- } else if (mode == RelocInfo::RUNTIME_ENTRY) {
- StaticVisitor::VisitRuntimeEntry(this);
- }
-}
-
-
-
-Immediate::Immediate(int x) {
- x_ = x;
- rmode_ = RelocInfo::NONE;
-}
-
-
-Immediate::Immediate(const ExternalReference& ext) {
- x_ = reinterpret_cast<int32_t>(ext.address());
- rmode_ = RelocInfo::EXTERNAL_REFERENCE;
-}
-
-
-Immediate::Immediate(Label* internal_offset) {
- x_ = reinterpret_cast<int32_t>(internal_offset);
- rmode_ = RelocInfo::INTERNAL_REFERENCE;
-}
-
-
-Immediate::Immediate(Handle<Object> handle) {
- // Verify all Objects referred by code are NOT in new space.
- Object* obj = *handle;
- ASSERT(!HEAP->InNewSpace(obj));
- if (obj->IsHeapObject()) {
- x_ = reinterpret_cast<intptr_t>(handle.location());
- rmode_ = RelocInfo::EMBEDDED_OBJECT;
- } else {
- // no relocation needed
- x_ = reinterpret_cast<intptr_t>(obj);
- rmode_ = RelocInfo::NONE;
- }
-}
-
-
-Immediate::Immediate(Smi* value) {
- x_ = reinterpret_cast<intptr_t>(value);
- rmode_ = RelocInfo::NONE;
-}
-
-
-Immediate::Immediate(Address addr) {
- x_ = reinterpret_cast<int32_t>(addr);
- rmode_ = RelocInfo::NONE;
-}
-
-
-void Assembler::emit(uint32_t x) {
- *reinterpret_cast<uint32_t*>(pc_) = x;
- pc_ += sizeof(uint32_t);
-}
-
-
-void Assembler::emit(Handle<Object> handle) {
- // Verify all Objects referred by code are NOT in new space.
- Object* obj = *handle;
- ASSERT(!isolate()->heap()->InNewSpace(obj));
- if (obj->IsHeapObject()) {
- emit(reinterpret_cast<intptr_t>(handle.location()),
- RelocInfo::EMBEDDED_OBJECT);
- } else {
- // no relocation needed
- emit(reinterpret_cast<intptr_t>(obj));
- }
-}
-
-
-void Assembler::emit(uint32_t x, RelocInfo::Mode rmode) {
- if (rmode != RelocInfo::NONE) RecordRelocInfo(rmode);
- emit(x);
-}
-
-
-void Assembler::emit(const Immediate& x) {
- if (x.rmode_ == RelocInfo::INTERNAL_REFERENCE) {
- Label* label = reinterpret_cast<Label*>(x.x_);
- emit_code_relative_offset(label);
- return;
- }
- if (x.rmode_ != RelocInfo::NONE) RecordRelocInfo(x.rmode_);
- emit(x.x_);
-}
-
-
-void Assembler::emit_code_relative_offset(Label* label) {
- if (label->is_bound()) {
- int32_t pos;
- pos = label->pos() + Code::kHeaderSize - kHeapObjectTag;
- emit(pos);
- } else {
- emit_disp(label, Displacement::CODE_RELATIVE);
- }
-}
-
-
-void Assembler::emit_w(const Immediate& x) {
- ASSERT(x.rmode_ == RelocInfo::NONE);
- uint16_t value = static_cast<uint16_t>(x.x_);
- reinterpret_cast<uint16_t*>(pc_)[0] = value;
- pc_ += sizeof(uint16_t);
-}
-
-
-Address Assembler::target_address_at(Address pc) {
- return pc + sizeof(int32_t) + *reinterpret_cast<int32_t*>(pc);
-}
-
-
-void Assembler::set_target_address_at(Address pc, Address target) {
- int32_t* p = reinterpret_cast<int32_t*>(pc);
- *p = target - (pc + sizeof(int32_t));
- CPU::FlushICache(p, sizeof(int32_t));
-}
-
-
-Displacement Assembler::disp_at(Label* L) {
- return Displacement(long_at(L->pos()));
-}
-
-
-void Assembler::disp_at_put(Label* L, Displacement disp) {
- long_at_put(L->pos(), disp.data());
-}
-
-
-void Assembler::emit_disp(Label* L, Displacement::Type type) {
- Displacement disp(L, type);
- L->link_to(pc_offset());
- emit(static_cast<int>(disp.data()));
-}
-
-
-void Operand::set_modrm(int mod, Register rm) {
- ASSERT((mod & -4) == 0);
- buf_[0] = mod << 6 | rm.code();
- len_ = 1;
-}
-
-
-void Operand::set_sib(ScaleFactor scale, Register index, Register base) {
- ASSERT(len_ == 1);
- ASSERT((scale & -4) == 0);
- // Use SIB with no index register only for base esp.
- ASSERT(!index.is(esp) || base.is(esp));
- buf_[1] = scale << 6 | index.code() << 3 | base.code();
- len_ = 2;
-}
-
-
-void Operand::set_disp8(int8_t disp) {
- ASSERT(len_ == 1 || len_ == 2);
- *reinterpret_cast<int8_t*>(&buf_[len_++]) = disp;
-}
-
-
-void Operand::set_dispr(int32_t disp, RelocInfo::Mode rmode) {
- ASSERT(len_ == 1 || len_ == 2);
- int32_t* p = reinterpret_cast<int32_t*>(&buf_[len_]);
- *p = disp;
- len_ += sizeof(int32_t);
- rmode_ = rmode;
-}
-
-Operand::Operand(Register reg) {
- // reg
- set_modrm(3, reg);
-}
-
-
-Operand::Operand(XMMRegister xmm_reg) {
- Register reg = { xmm_reg.code() };
- set_modrm(3, reg);
-}
-
-
-Operand::Operand(int32_t disp, RelocInfo::Mode rmode) {
- // [disp/r]
- set_modrm(0, ebp);
- set_dispr(disp, rmode);
-}
-
-} } // namespace v8::internal
-
-#endif // V8_IA32_ASSEMBLER_IA32_INL_H_
diff --git a/src/3rdparty/v8/src/ia32/assembler-ia32.cc b/src/3rdparty/v8/src/ia32/assembler-ia32.cc
deleted file mode 100644
index 9273037..0000000
--- a/src/3rdparty/v8/src/ia32/assembler-ia32.cc
+++ /dev/null
@@ -1,2846 +0,0 @@
-// Copyright (c) 1994-2006 Sun Microsystems Inc.
-// All Rights Reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-//
-// - Redistributions of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-//
-// - Redistribution in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the
-// distribution.
-//
-// - Neither the name of Sun Microsystems or the names of contributors may
-// be used to endorse or promote products derived from this software without
-// specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
-// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
-// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
-// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
-// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
-// OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// The original source code covered by the above license above has been modified
-// significantly by Google Inc.
-// Copyright 2010 the V8 project authors. All rights reserved.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_IA32)
-
-#include "disassembler.h"
-#include "macro-assembler.h"
-#include "serialize.h"
-
-namespace v8 {
-namespace internal {
-
-// -----------------------------------------------------------------------------
-// Implementation of CpuFeatures
-
-#ifdef DEBUG
-bool CpuFeatures::initialized_ = false;
-#endif
-uint64_t CpuFeatures::supported_ = 0;
-uint64_t CpuFeatures::found_by_runtime_probing_ = 0;
-
-
-void CpuFeatures::Probe() {
- ASSERT(!initialized_);
- ASSERT(supported_ == 0);
-#ifdef DEBUG
- initialized_ = true;
-#endif
- if (Serializer::enabled()) {
- supported_ |= OS::CpuFeaturesImpliedByPlatform();
- return; // No features if we might serialize.
- }
-
- const int kBufferSize = 4 * KB;
- VirtualMemory* memory = new VirtualMemory(kBufferSize);
- if (!memory->IsReserved()) {
- delete memory;
- return;
- }
- ASSERT(memory->size() >= static_cast<size_t>(kBufferSize));
- if (!memory->Commit(memory->address(), kBufferSize, true/*executable*/)) {
- delete memory;
- return;
- }
-
- Assembler assm(NULL, memory->address(), kBufferSize);
- Label cpuid, done;
-#define __ assm.
- // Save old esp, since we are going to modify the stack.
- __ push(ebp);
- __ pushfd();
- __ push(ecx);
- __ push(ebx);
- __ mov(ebp, Operand(esp));
-
- // If we can modify bit 21 of the EFLAGS register, then CPUID is supported.
- __ pushfd();
- __ pop(eax);
- __ mov(edx, Operand(eax));
- __ xor_(eax, 0x200000); // Flip bit 21.
- __ push(eax);
- __ popfd();
- __ pushfd();
- __ pop(eax);
- __ xor_(eax, Operand(edx)); // Different if CPUID is supported.
- __ j(not_zero, &cpuid);
-
- // CPUID not supported. Clear the supported features in edx:eax.
- __ xor_(eax, Operand(eax));
- __ xor_(edx, Operand(edx));
- __ jmp(&done);
-
- // Invoke CPUID with 1 in eax to get feature information in
- // ecx:edx. Temporarily enable CPUID support because we know it's
- // safe here.
- __ bind(&cpuid);
- __ mov(eax, 1);
- supported_ = (1 << CPUID);
- { Scope fscope(CPUID);
- __ cpuid();
- }
- supported_ = 0;
-
- // Move the result from ecx:edx to edx:eax and make sure to mark the
- // CPUID feature as supported.
- __ mov(eax, Operand(edx));
- __ or_(eax, 1 << CPUID);
- __ mov(edx, Operand(ecx));
-
- // Done.
- __ bind(&done);
- __ mov(esp, Operand(ebp));
- __ pop(ebx);
- __ pop(ecx);
- __ popfd();
- __ pop(ebp);
- __ ret(0);
-#undef __
-
- typedef uint64_t (*F0)();
- F0 probe = FUNCTION_CAST<F0>(reinterpret_cast<Address>(memory->address()));
- supported_ = probe();
- found_by_runtime_probing_ = supported_;
- uint64_t os_guarantees = OS::CpuFeaturesImpliedByPlatform();
- supported_ |= os_guarantees;
- found_by_runtime_probing_ &= ~os_guarantees;
-
- delete memory;
-}
-
-
-// -----------------------------------------------------------------------------
-// Implementation of Displacement
-
-void Displacement::init(Label* L, Type type) {
- ASSERT(!L->is_bound());
- int next = 0;
- if (L->is_linked()) {
- next = L->pos();
- ASSERT(next > 0); // Displacements must be at positions > 0
- }
- // Ensure that we _never_ overflow the next field.
- ASSERT(NextField::is_valid(Assembler::kMaximalBufferSize));
- data_ = NextField::encode(next) | TypeField::encode(type);
-}
-
-
-// -----------------------------------------------------------------------------
-// Implementation of RelocInfo
-
-
-const int RelocInfo::kApplyMask =
- RelocInfo::kCodeTargetMask | 1 << RelocInfo::RUNTIME_ENTRY |
- 1 << RelocInfo::JS_RETURN | 1 << RelocInfo::INTERNAL_REFERENCE |
- 1 << RelocInfo::DEBUG_BREAK_SLOT;
-
-
-bool RelocInfo::IsCodedSpecially() {
- // The deserializer needs to know whether a pointer is specially coded. Being
- // specially coded on IA32 means that it is a relative address, as used by
- // branch instructions. These are also the ones that need changing when a
- // code object moves.
- return (1 << rmode_) & kApplyMask;
-}
-
-
-void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
- // Patch the code at the current address with the supplied instructions.
- for (int i = 0; i < instruction_count; i++) {
- *(pc_ + i) = *(instructions + i);
- }
-
- // Indicate that code has changed.
- CPU::FlushICache(pc_, instruction_count);
-}
-
-
-// Patch the code at the current PC with a call to the target address.
-// Additional guard int3 instructions can be added if required.
-void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
- // Call instruction takes up 5 bytes and int3 takes up one byte.
- static const int kCallCodeSize = 5;
- int code_size = kCallCodeSize + guard_bytes;
-
- // Create a code patcher.
- CodePatcher patcher(pc_, code_size);
-
- // Add a label for checking the size of the code used for returning.
-#ifdef DEBUG
- Label check_codesize;
- patcher.masm()->bind(&check_codesize);
-#endif
-
- // Patch the code.
- patcher.masm()->call(target, RelocInfo::NONE);
-
- // Check that the size of the code generated is as expected.
- ASSERT_EQ(kCallCodeSize,
- patcher.masm()->SizeOfCodeGeneratedSince(&check_codesize));
-
- // Add the requested number of int3 instructions after the call.
- ASSERT_GE(guard_bytes, 0);
- for (int i = 0; i < guard_bytes; i++) {
- patcher.masm()->int3();
- }
-}
-
-
-// -----------------------------------------------------------------------------
-// Implementation of Operand
-
-Operand::Operand(Register base, int32_t disp, RelocInfo::Mode rmode) {
- // [base + disp/r]
- if (disp == 0 && rmode == RelocInfo::NONE && !base.is(ebp)) {
- // [base]
- set_modrm(0, base);
- if (base.is(esp)) set_sib(times_1, esp, base);
- } else if (is_int8(disp) && rmode == RelocInfo::NONE) {
- // [base + disp8]
- set_modrm(1, base);
- if (base.is(esp)) set_sib(times_1, esp, base);
- set_disp8(disp);
- } else {
- // [base + disp/r]
- set_modrm(2, base);
- if (base.is(esp)) set_sib(times_1, esp, base);
- set_dispr(disp, rmode);
- }
-}
-
-
-Operand::Operand(Register base,
- Register index,
- ScaleFactor scale,
- int32_t disp,
- RelocInfo::Mode rmode) {
- ASSERT(!index.is(esp)); // illegal addressing mode
- // [base + index*scale + disp/r]
- if (disp == 0 && rmode == RelocInfo::NONE && !base.is(ebp)) {
- // [base + index*scale]
- set_modrm(0, esp);
- set_sib(scale, index, base);
- } else if (is_int8(disp) && rmode == RelocInfo::NONE) {
- // [base + index*scale + disp8]
- set_modrm(1, esp);
- set_sib(scale, index, base);
- set_disp8(disp);
- } else {
- // [base + index*scale + disp/r]
- set_modrm(2, esp);
- set_sib(scale, index, base);
- set_dispr(disp, rmode);
- }
-}
-
-
-Operand::Operand(Register index,
- ScaleFactor scale,
- int32_t disp,
- RelocInfo::Mode rmode) {
- ASSERT(!index.is(esp)); // illegal addressing mode
- // [index*scale + disp/r]
- set_modrm(0, esp);
- set_sib(scale, index, ebp);
- set_dispr(disp, rmode);
-}
-
-
-bool Operand::is_reg(Register reg) const {
- return ((buf_[0] & 0xF8) == 0xC0) // addressing mode is register only.
- && ((buf_[0] & 0x07) == reg.code()); // register codes match.
-}
-
-// -----------------------------------------------------------------------------
-// Implementation of Assembler.
-
-// Emit a single byte. Must always be inlined.
-#define EMIT(x) \
- *pc_++ = (x)
-
-
-#ifdef GENERATED_CODE_COVERAGE
-static void InitCoverageLog();
-#endif
-
-Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size)
- : AssemblerBase(arg_isolate),
- positions_recorder_(this),
- emit_debug_code_(FLAG_debug_code) {
- if (buffer == NULL) {
- // Do our own buffer management.
- if (buffer_size <= kMinimalBufferSize) {
- buffer_size = kMinimalBufferSize;
-
- if (isolate()->assembler_spare_buffer() != NULL) {
- buffer = isolate()->assembler_spare_buffer();
- isolate()->set_assembler_spare_buffer(NULL);
- }
- }
- if (buffer == NULL) {
- buffer_ = NewArray<byte>(buffer_size);
- } else {
- buffer_ = static_cast<byte*>(buffer);
- }
- buffer_size_ = buffer_size;
- own_buffer_ = true;
- } else {
- // Use externally provided buffer instead.
- ASSERT(buffer_size > 0);
- buffer_ = static_cast<byte*>(buffer);
- buffer_size_ = buffer_size;
- own_buffer_ = false;
- }
-
- // Clear the buffer in debug mode unless it was provided by the
- // caller in which case we can't be sure it's okay to overwrite
- // existing code in it; see CodePatcher::CodePatcher(...).
-#ifdef DEBUG
- if (own_buffer_) {
- memset(buffer_, 0xCC, buffer_size); // int3
- }
-#endif
-
- // Setup buffer pointers.
- ASSERT(buffer_ != NULL);
- pc_ = buffer_;
- reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
-
- last_pc_ = NULL;
-#ifdef GENERATED_CODE_COVERAGE
- InitCoverageLog();
-#endif
-}
-
-
-Assembler::~Assembler() {
- if (own_buffer_) {
- if (isolate()->assembler_spare_buffer() == NULL &&
- buffer_size_ == kMinimalBufferSize) {
- isolate()->set_assembler_spare_buffer(buffer_);
- } else {
- DeleteArray(buffer_);
- }
- }
-}
-
-
-void Assembler::GetCode(CodeDesc* desc) {
- // Finalize code (at this point overflow() may be true, but the gap ensures
- // that we are still not overlapping instructions and relocation info).
- ASSERT(pc_ <= reloc_info_writer.pos()); // No overlap.
- // Setup code descriptor.
- desc->buffer = buffer_;
- desc->buffer_size = buffer_size_;
- desc->instr_size = pc_offset();
- desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
- desc->origin = this;
-}
-
-
-void Assembler::Align(int m) {
- ASSERT(IsPowerOf2(m));
- while ((pc_offset() & (m - 1)) != 0) {
- nop();
- }
-}
-
-
-void Assembler::CodeTargetAlign() {
- Align(16); // Preferred alignment of jump targets on ia32.
-}
-
-
-void Assembler::cpuid() {
- ASSERT(CpuFeatures::IsEnabled(CPUID));
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0x0F);
- EMIT(0xA2);
-}
-
-
-void Assembler::pushad() {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0x60);
-}
-
-
-void Assembler::popad() {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0x61);
-}
-
-
-void Assembler::pushfd() {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0x9C);
-}
-
-
-void Assembler::popfd() {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0x9D);
-}
-
-
-void Assembler::push(const Immediate& x) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- if (x.is_int8()) {
- EMIT(0x6a);
- EMIT(x.x_);
- } else {
- EMIT(0x68);
- emit(x);
- }
-}
-
-
-void Assembler::push_imm32(int32_t imm32) {
- EnsureSpace ensure_space(this);
- EMIT(0x68);
- emit(imm32);
-}
-
-
-void Assembler::push(Register src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0x50 | src.code());
-}
-
-
-void Assembler::push(const Operand& src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0xFF);
- emit_operand(esi, src);
-}
-
-
-void Assembler::pop(Register dst) {
- ASSERT(reloc_info_writer.last_pc() != NULL);
- if (FLAG_peephole_optimization && (reloc_info_writer.last_pc() <= last_pc_)) {
- // (last_pc_ != NULL) is rolled into the above check.
- // If a last_pc_ is set, we need to make sure that there has not been any
- // relocation information generated between the last instruction and this
- // pop instruction.
- byte instr = last_pc_[0];
- if ((instr & ~0x7) == 0x50) {
- int push_reg_code = instr & 0x7;
- if (push_reg_code == dst.code()) {
- pc_ = last_pc_;
- if (FLAG_print_peephole_optimization) {
- PrintF("%d push/pop (same reg) eliminated\n", pc_offset());
- }
- } else {
- // Convert 'push src; pop dst' to 'mov dst, src'.
- last_pc_[0] = 0x8b;
- Register src = { push_reg_code };
- EnsureSpace ensure_space(this);
- emit_operand(dst, Operand(src));
- if (FLAG_print_peephole_optimization) {
- PrintF("%d push/pop (reg->reg) eliminated\n", pc_offset());
- }
- }
- last_pc_ = NULL;
- return;
- } else if (instr == 0xff) { // push of an operand, convert to a move
- byte op1 = last_pc_[1];
- // Check if the operation is really a push.
- if ((op1 & 0x38) == (6 << 3)) {
- op1 = (op1 & ~0x38) | static_cast<byte>(dst.code() << 3);
- last_pc_[0] = 0x8b;
- last_pc_[1] = op1;
- last_pc_ = NULL;
- if (FLAG_print_peephole_optimization) {
- PrintF("%d push/pop (op->reg) eliminated\n", pc_offset());
- }
- return;
- }
- } else if ((instr == 0x89) &&
- (last_pc_[1] == 0x04) &&
- (last_pc_[2] == 0x24)) {
- // 0x71283c 396 890424 mov [esp],eax
- // 0x71283f 399 58 pop eax
- if (dst.is(eax)) {
- // change to
- // 0x710fac 216 83c404 add esp,0x4
- last_pc_[0] = 0x83;
- last_pc_[1] = 0xc4;
- last_pc_[2] = 0x04;
- last_pc_ = NULL;
- if (FLAG_print_peephole_optimization) {
- PrintF("%d push/pop (mov-pop) eliminated\n", pc_offset());
- }
- return;
- }
- } else if (instr == 0x6a && dst.is(eax)) { // push of immediate 8 bit
- byte imm8 = last_pc_[1];
- if (imm8 == 0) {
- // 6a00 push 0x0
- // 58 pop eax
- last_pc_[0] = 0x31;
- last_pc_[1] = 0xc0;
- // change to
- // 31c0 xor eax,eax
- last_pc_ = NULL;
- if (FLAG_print_peephole_optimization) {
- PrintF("%d push/pop (imm->reg) eliminated\n", pc_offset());
- }
- return;
- } else {
- // 6a00 push 0xXX
- // 58 pop eax
- last_pc_[0] = 0xb8;
- EnsureSpace ensure_space(this);
- if ((imm8 & 0x80) != 0) {
- EMIT(0xff);
- EMIT(0xff);
- EMIT(0xff);
- // change to
- // b8XXffffff mov eax,0xffffffXX
- } else {
- EMIT(0x00);
- EMIT(0x00);
- EMIT(0x00);
- // change to
- // b8XX000000 mov eax,0x000000XX
- }
- last_pc_ = NULL;
- if (FLAG_print_peephole_optimization) {
- PrintF("%d push/pop (imm->reg) eliminated\n", pc_offset());
- }
- return;
- }
- } else if (instr == 0x68 && dst.is(eax)) { // push of immediate 32 bit
- // 68XXXXXXXX push 0xXXXXXXXX
- // 58 pop eax
- last_pc_[0] = 0xb8;
- last_pc_ = NULL;
- // change to
- // b8XXXXXXXX mov eax,0xXXXXXXXX
- if (FLAG_print_peephole_optimization) {
- PrintF("%d push/pop (imm->reg) eliminated\n", pc_offset());
- }
- return;
- }
-
- // Other potential patterns for peephole:
- // 0x712716 102 890424 mov [esp], eax
- // 0x712719 105 8b1424 mov edx, [esp]
- }
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0x58 | dst.code());
-}
-
-
-void Assembler::pop(const Operand& dst) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0x8F);
- emit_operand(eax, dst);
-}
-
-
-void Assembler::enter(const Immediate& size) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0xC8);
- emit_w(size);
- EMIT(0);
-}
-
-
-void Assembler::leave() {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0xC9);
-}
-
-
-void Assembler::mov_b(Register dst, const Operand& src) {
- ASSERT(dst.code() < 4);
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0x8A);
- emit_operand(dst, src);
-}
-
-
-void Assembler::mov_b(const Operand& dst, int8_t imm8) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0xC6);
- emit_operand(eax, dst);
- EMIT(imm8);
-}
-
-
-void Assembler::mov_b(const Operand& dst, Register src) {
- ASSERT(src.code() < 4);
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0x88);
- emit_operand(src, dst);
-}
-
-
-void Assembler::mov_w(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0x66);
- EMIT(0x8B);
- emit_operand(dst, src);
-}
-
-
-void Assembler::mov_w(const Operand& dst, Register src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0x66);
- EMIT(0x89);
- emit_operand(src, dst);
-}
-
-
-void Assembler::mov(Register dst, int32_t imm32) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0xB8 | dst.code());
- emit(imm32);
-}
-
-
-void Assembler::mov(Register dst, const Immediate& x) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0xB8 | dst.code());
- emit(x);
-}
-
-
-void Assembler::mov(Register dst, Handle<Object> handle) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0xB8 | dst.code());
- emit(handle);
-}
-
-
-void Assembler::mov(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0x8B);
- emit_operand(dst, src);
-}
-
-
-void Assembler::mov(Register dst, Register src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0x89);
- EMIT(0xC0 | src.code() << 3 | dst.code());
-}
-
-
-void Assembler::mov(const Operand& dst, const Immediate& x) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0xC7);
- emit_operand(eax, dst);
- emit(x);
-}
-
-
-void Assembler::mov(const Operand& dst, Handle<Object> handle) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0xC7);
- emit_operand(eax, dst);
- emit(handle);
-}
-
-
-void Assembler::mov(const Operand& dst, Register src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0x89);
- emit_operand(src, dst);
-}
-
-
-void Assembler::movsx_b(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0x0F);
- EMIT(0xBE);
- emit_operand(dst, src);
-}
-
-
-void Assembler::movsx_w(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0x0F);
- EMIT(0xBF);
- emit_operand(dst, src);
-}
-
-
-void Assembler::movzx_b(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0x0F);
- EMIT(0xB6);
- emit_operand(dst, src);
-}
-
-
-void Assembler::movzx_w(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0x0F);
- EMIT(0xB7);
- emit_operand(dst, src);
-}
-
-
-void Assembler::cmov(Condition cc, Register dst, int32_t imm32) {
- ASSERT(CpuFeatures::IsEnabled(CMOV));
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- UNIMPLEMENTED();
- USE(cc);
- USE(dst);
- USE(imm32);
-}
-
-
-void Assembler::cmov(Condition cc, Register dst, Handle<Object> handle) {
- ASSERT(CpuFeatures::IsEnabled(CMOV));
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- UNIMPLEMENTED();
- USE(cc);
- USE(dst);
- USE(handle);
-}
-
-
-void Assembler::cmov(Condition cc, Register dst, const Operand& src) {
- ASSERT(CpuFeatures::IsEnabled(CMOV));
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- // Opcode: 0f 40 + cc /r.
- EMIT(0x0F);
- EMIT(0x40 + cc);
- emit_operand(dst, src);
-}
-
-
-void Assembler::cld() {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0xFC);
-}
-
-
-void Assembler::rep_movs() {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0xF3);
- EMIT(0xA5);
-}
-
-
-void Assembler::rep_stos() {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0xF3);
- EMIT(0xAB);
-}
-
-
-void Assembler::stos() {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0xAB);
-}
-
-
-void Assembler::xchg(Register dst, Register src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- if (src.is(eax) || dst.is(eax)) { // Single-byte encoding.
- EMIT(0x90 | (src.is(eax) ? dst.code() : src.code()));
- } else {
- EMIT(0x87);
- EMIT(0xC0 | src.code() << 3 | dst.code());
- }
-}
-
-
-void Assembler::adc(Register dst, int32_t imm32) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_arith(2, Operand(dst), Immediate(imm32));
-}
-
-
-void Assembler::adc(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0x13);
- emit_operand(dst, src);
-}
-
-
-void Assembler::add(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0x03);
- emit_operand(dst, src);
-}
-
-
-void Assembler::add(const Operand& dst, const Immediate& x) {
- ASSERT(reloc_info_writer.last_pc() != NULL);
- if (FLAG_peephole_optimization && (reloc_info_writer.last_pc() <= last_pc_)) {
- byte instr = last_pc_[0];
- if ((instr & 0xf8) == 0x50) {
- // Last instruction was a push. Check whether this is a pop without a
- // result.
- if ((dst.is_reg(esp)) &&
- (x.x_ == kPointerSize) && (x.rmode_ == RelocInfo::NONE)) {
- pc_ = last_pc_;
- last_pc_ = NULL;
- if (FLAG_print_peephole_optimization) {
- PrintF("%d push/pop(noreg) eliminated\n", pc_offset());
- }
- return;
- }
- }
- }
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_arith(0, dst, x);
-}
-
-
-void Assembler::and_(Register dst, int32_t imm32) {
- and_(dst, Immediate(imm32));
-}
-
-
-void Assembler::and_(Register dst, const Immediate& x) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_arith(4, Operand(dst), x);
-}
-
-
-void Assembler::and_(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0x23);
- emit_operand(dst, src);
-}
-
-
-void Assembler::and_(const Operand& dst, const Immediate& x) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_arith(4, dst, x);
-}
-
-
-void Assembler::and_(const Operand& dst, Register src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0x21);
- emit_operand(src, dst);
-}
-
-
-void Assembler::cmpb(const Operand& op, int8_t imm8) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0x80);
- emit_operand(edi, op); // edi == 7
- EMIT(imm8);
-}
-
-
-void Assembler::cmpb(const Operand& dst, Register src) {
- ASSERT(src.is_byte_register());
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0x38);
- emit_operand(src, dst);
-}
-
-
-void Assembler::cmpb(Register dst, const Operand& src) {
- ASSERT(dst.is_byte_register());
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0x3A);
- emit_operand(dst, src);
-}
-
-
-void Assembler::cmpw(const Operand& op, Immediate imm16) {
- ASSERT(imm16.is_int16());
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0x66);
- EMIT(0x81);
- emit_operand(edi, op);
- emit_w(imm16);
-}
-
-
-void Assembler::cmp(Register reg, int32_t imm32) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_arith(7, Operand(reg), Immediate(imm32));
-}
-
-
-void Assembler::cmp(Register reg, Handle<Object> handle) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_arith(7, Operand(reg), Immediate(handle));
-}
-
-
-void Assembler::cmp(Register reg, const Operand& op) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0x3B);
- emit_operand(reg, op);
-}
-
-
-void Assembler::cmp(const Operand& op, const Immediate& imm) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_arith(7, op, imm);
-}
-
-
-void Assembler::cmp(const Operand& op, Handle<Object> handle) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_arith(7, op, Immediate(handle));
-}
-
-
-void Assembler::cmpb_al(const Operand& op) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0x38); // CMP r/m8, r8
- emit_operand(eax, op); // eax has same code as register al.
-}
-
-
-void Assembler::cmpw_ax(const Operand& op) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0x66);
- EMIT(0x39); // CMP r/m16, r16
- emit_operand(eax, op); // eax has same code as register ax.
-}
-
-
-void Assembler::dec_b(Register dst) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0xFE);
- EMIT(0xC8 | dst.code());
-}
-
-
-void Assembler::dec_b(const Operand& dst) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0xFE);
- emit_operand(ecx, dst);
-}
-
-
-void Assembler::dec(Register dst) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0x48 | dst.code());
-}
-
-
-void Assembler::dec(const Operand& dst) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0xFF);
- emit_operand(ecx, dst);
-}
-
-
-void Assembler::cdq() {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0x99);
-}
-
-
-void Assembler::idiv(Register src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0xF7);
- EMIT(0xF8 | src.code());
-}
-
-
-void Assembler::imul(Register reg) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0xF7);
- EMIT(0xE8 | reg.code());
-}
-
-
-void Assembler::imul(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0x0F);
- EMIT(0xAF);
- emit_operand(dst, src);
-}
-
-
-void Assembler::imul(Register dst, Register src, int32_t imm32) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- if (is_int8(imm32)) {
- EMIT(0x6B);
- EMIT(0xC0 | dst.code() << 3 | src.code());
- EMIT(imm32);
- } else {
- EMIT(0x69);
- EMIT(0xC0 | dst.code() << 3 | src.code());
- emit(imm32);
- }
-}
-
-
-void Assembler::inc(Register dst) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0x40 | dst.code());
-}
-
-
-void Assembler::inc(const Operand& dst) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0xFF);
- emit_operand(eax, dst);
-}
-
-
-void Assembler::lea(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0x8D);
- emit_operand(dst, src);
-}
-
-
-void Assembler::mul(Register src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0xF7);
- EMIT(0xE0 | src.code());
-}
-
-
-void Assembler::neg(Register dst) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0xF7);
- EMIT(0xD8 | dst.code());
-}
-
-
-void Assembler::not_(Register dst) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0xF7);
- EMIT(0xD0 | dst.code());
-}
-
-
-void Assembler::or_(Register dst, int32_t imm32) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_arith(1, Operand(dst), Immediate(imm32));
-}
-
-
-void Assembler::or_(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0x0B);
- emit_operand(dst, src);
-}
-
-
-void Assembler::or_(const Operand& dst, const Immediate& x) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_arith(1, dst, x);
-}
-
-
-void Assembler::or_(const Operand& dst, Register src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0x09);
- emit_operand(src, dst);
-}
-
-
-void Assembler::rcl(Register dst, uint8_t imm8) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- ASSERT(is_uint5(imm8)); // illegal shift count
- if (imm8 == 1) {
- EMIT(0xD1);
- EMIT(0xD0 | dst.code());
- } else {
- EMIT(0xC1);
- EMIT(0xD0 | dst.code());
- EMIT(imm8);
- }
-}
-
-
-void Assembler::rcr(Register dst, uint8_t imm8) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- ASSERT(is_uint5(imm8)); // illegal shift count
- if (imm8 == 1) {
- EMIT(0xD1);
- EMIT(0xD8 | dst.code());
- } else {
- EMIT(0xC1);
- EMIT(0xD8 | dst.code());
- EMIT(imm8);
- }
-}
-
-
-void Assembler::sar(Register dst, uint8_t imm8) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- ASSERT(is_uint5(imm8)); // illegal shift count
- if (imm8 == 1) {
- EMIT(0xD1);
- EMIT(0xF8 | dst.code());
- } else {
- EMIT(0xC1);
- EMIT(0xF8 | dst.code());
- EMIT(imm8);
- }
-}
-
-
-void Assembler::sar_cl(Register dst) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0xD3);
- EMIT(0xF8 | dst.code());
-}
-
-
-void Assembler::sbb(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0x1B);
- emit_operand(dst, src);
-}
-
-
-void Assembler::shld(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0x0F);
- EMIT(0xA5);
- emit_operand(dst, src);
-}
-
-
-void Assembler::shl(Register dst, uint8_t imm8) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- ASSERT(is_uint5(imm8)); // illegal shift count
- if (imm8 == 1) {
- EMIT(0xD1);
- EMIT(0xE0 | dst.code());
- } else {
- EMIT(0xC1);
- EMIT(0xE0 | dst.code());
- EMIT(imm8);
- }
-}
-
-
-void Assembler::shl_cl(Register dst) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0xD3);
- EMIT(0xE0 | dst.code());
-}
-
-
-void Assembler::shrd(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0x0F);
- EMIT(0xAD);
- emit_operand(dst, src);
-}
-
-
-void Assembler::shr(Register dst, uint8_t imm8) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- ASSERT(is_uint5(imm8)); // illegal shift count
- if (imm8 == 1) {
- EMIT(0xD1);
- EMIT(0xE8 | dst.code());
- } else {
- EMIT(0xC1);
- EMIT(0xE8 | dst.code());
- EMIT(imm8);
- }
-}
-
-
-void Assembler::shr_cl(Register dst) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0xD3);
- EMIT(0xE8 | dst.code());
-}
-
-
-void Assembler::subb(const Operand& op, int8_t imm8) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- if (op.is_reg(eax)) {
- EMIT(0x2c);
- } else {
- EMIT(0x80);
- emit_operand(ebp, op); // ebp == 5
- }
- EMIT(imm8);
-}
-
-
-void Assembler::sub(const Operand& dst, const Immediate& x) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_arith(5, dst, x);
-}
-
-
-void Assembler::sub(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0x2B);
- emit_operand(dst, src);
-}
-
-
-void Assembler::subb(Register dst, const Operand& src) {
- ASSERT(dst.code() < 4);
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0x2A);
- emit_operand(dst, src);
-}
-
-
-void Assembler::sub(const Operand& dst, Register src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0x29);
- emit_operand(src, dst);
-}
-
-
-void Assembler::test(Register reg, const Immediate& imm) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- // Only use test against byte for registers that have a byte
- // variant: eax, ebx, ecx, and edx.
- if (imm.rmode_ == RelocInfo::NONE && is_uint8(imm.x_) && reg.code() < 4) {
- uint8_t imm8 = imm.x_;
- if (reg.is(eax)) {
- EMIT(0xA8);
- EMIT(imm8);
- } else {
- emit_arith_b(0xF6, 0xC0, reg, imm8);
- }
- } else {
- // This is not using emit_arith because test doesn't support
- // sign-extension of 8-bit operands.
- if (reg.is(eax)) {
- EMIT(0xA9);
- } else {
- EMIT(0xF7);
- EMIT(0xC0 | reg.code());
- }
- emit(imm);
- }
-}
-
-
-void Assembler::test(Register reg, const Operand& op) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0x85);
- emit_operand(reg, op);
-}
-
-
-void Assembler::test_b(Register reg, const Operand& op) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0x84);
- emit_operand(reg, op);
-}
-
-
-void Assembler::test(const Operand& op, const Immediate& imm) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0xF7);
- emit_operand(eax, op);
- emit(imm);
-}
-
-
-void Assembler::test_b(const Operand& op, uint8_t imm8) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0xF6);
- emit_operand(eax, op);
- EMIT(imm8);
-}
-
-
-void Assembler::xor_(Register dst, int32_t imm32) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_arith(6, Operand(dst), Immediate(imm32));
-}
-
-
-void Assembler::xor_(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0x33);
- emit_operand(dst, src);
-}
-
-
-void Assembler::xor_(const Operand& src, Register dst) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0x31);
- emit_operand(dst, src);
-}
-
-
-void Assembler::xor_(const Operand& dst, const Immediate& x) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_arith(6, dst, x);
-}
-
-
-void Assembler::bt(const Operand& dst, Register src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0x0F);
- EMIT(0xA3);
- emit_operand(src, dst);
-}
-
-
-void Assembler::bts(const Operand& dst, Register src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0x0F);
- EMIT(0xAB);
- emit_operand(src, dst);
-}
-
-
-void Assembler::hlt() {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0xF4);
-}
-
-
-void Assembler::int3() {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0xCC);
-}
-
-
-void Assembler::nop() {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0x90);
-}
-
-
-void Assembler::rdtsc() {
- ASSERT(CpuFeatures::IsEnabled(RDTSC));
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0x0F);
- EMIT(0x31);
-}
-
-
-void Assembler::ret(int imm16) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- ASSERT(is_uint16(imm16));
- if (imm16 == 0) {
- EMIT(0xC3);
- } else {
- EMIT(0xC2);
- EMIT(imm16 & 0xFF);
- EMIT((imm16 >> 8) & 0xFF);
- }
-}
-
-
-// Labels refer to positions in the (to be) generated code.
-// There are bound, linked, and unused labels.
-//
-// Bound labels refer to known positions in the already
-// generated code. pos() is the position the label refers to.
-//
-// Linked labels refer to unknown positions in the code
-// to be generated; pos() is the position of the 32bit
-// Displacement of the last instruction using the label.
-
-
-void Assembler::print(Label* L) {
- if (L->is_unused()) {
- PrintF("unused label\n");
- } else if (L->is_bound()) {
- PrintF("bound label to %d\n", L->pos());
- } else if (L->is_linked()) {
- Label l = *L;
- PrintF("unbound label");
- while (l.is_linked()) {
- Displacement disp = disp_at(&l);
- PrintF("@ %d ", l.pos());
- disp.print();
- PrintF("\n");
- disp.next(&l);
- }
- } else {
- PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
- }
-}
-
-
-void Assembler::bind_to(Label* L, int pos) {
- EnsureSpace ensure_space(this);
- last_pc_ = NULL;
- ASSERT(0 <= pos && pos <= pc_offset()); // must have a valid binding position
- while (L->is_linked()) {
- Displacement disp = disp_at(L);
- int fixup_pos = L->pos();
- if (disp.type() == Displacement::CODE_RELATIVE) {
- // Relative to Code* heap object pointer.
- long_at_put(fixup_pos, pos + Code::kHeaderSize - kHeapObjectTag);
- } else {
- if (disp.type() == Displacement::UNCONDITIONAL_JUMP) {
- ASSERT(byte_at(fixup_pos - 1) == 0xE9); // jmp expected
- }
- // Relative address, relative to point after address.
- int imm32 = pos - (fixup_pos + sizeof(int32_t));
- long_at_put(fixup_pos, imm32);
- }
- disp.next(L);
- }
- L->bind_to(pos);
-}
-
-
-void Assembler::bind(Label* L) {
- EnsureSpace ensure_space(this);
- last_pc_ = NULL;
- ASSERT(!L->is_bound()); // label can only be bound once
- bind_to(L, pc_offset());
-}
-
-
-void Assembler::bind(NearLabel* L) {
- ASSERT(!L->is_bound());
- last_pc_ = NULL;
- while (L->unresolved_branches_ > 0) {
- int branch_pos = L->unresolved_positions_[L->unresolved_branches_ - 1];
- int disp = pc_offset() - branch_pos;
- ASSERT(is_int8(disp));
- set_byte_at(branch_pos - sizeof(int8_t), disp);
- L->unresolved_branches_--;
- }
- L->bind_to(pc_offset());
-}
-
-
-void Assembler::call(Label* L) {
- positions_recorder()->WriteRecordedPositions();
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- if (L->is_bound()) {
- const int long_size = 5;
- int offs = L->pos() - pc_offset();
- ASSERT(offs <= 0);
- // 1110 1000 #32-bit disp.
- EMIT(0xE8);
- emit(offs - long_size);
- } else {
- // 1110 1000 #32-bit disp.
- EMIT(0xE8);
- emit_disp(L, Displacement::OTHER);
- }
-}
-
-
-void Assembler::call(byte* entry, RelocInfo::Mode rmode) {
- positions_recorder()->WriteRecordedPositions();
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- ASSERT(!RelocInfo::IsCodeTarget(rmode));
- EMIT(0xE8);
- emit(entry - (pc_ + sizeof(int32_t)), rmode);
-}
-
-
-void Assembler::call(const Operand& adr) {
- positions_recorder()->WriteRecordedPositions();
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0xFF);
- emit_operand(edx, adr);
-}
-
-
-void Assembler::call(Handle<Code> code, RelocInfo::Mode rmode) {
- positions_recorder()->WriteRecordedPositions();
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- ASSERT(RelocInfo::IsCodeTarget(rmode));
- EMIT(0xE8);
- emit(reinterpret_cast<intptr_t>(code.location()), rmode);
-}
-
-
-void Assembler::jmp(Label* L) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- if (L->is_bound()) {
- const int short_size = 2;
- const int long_size = 5;
- int offs = L->pos() - pc_offset();
- ASSERT(offs <= 0);
- if (is_int8(offs - short_size)) {
- // 1110 1011 #8-bit disp.
- EMIT(0xEB);
- EMIT((offs - short_size) & 0xFF);
- } else {
- // 1110 1001 #32-bit disp.
- EMIT(0xE9);
- emit(offs - long_size);
- }
- } else {
- // 1110 1001 #32-bit disp.
- EMIT(0xE9);
- emit_disp(L, Displacement::UNCONDITIONAL_JUMP);
- }
-}
-
-
-void Assembler::jmp(byte* entry, RelocInfo::Mode rmode) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- ASSERT(!RelocInfo::IsCodeTarget(rmode));
- EMIT(0xE9);
- emit(entry - (pc_ + sizeof(int32_t)), rmode);
-}
-
-
-void Assembler::jmp(const Operand& adr) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0xFF);
- emit_operand(esp, adr);
-}
-
-
-void Assembler::jmp(Handle<Code> code, RelocInfo::Mode rmode) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- ASSERT(RelocInfo::IsCodeTarget(rmode));
- EMIT(0xE9);
- emit(reinterpret_cast<intptr_t>(code.location()), rmode);
-}
-
-
-void Assembler::jmp(NearLabel* L) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- if (L->is_bound()) {
- const int short_size = 2;
- int offs = L->pos() - pc_offset();
- ASSERT(offs <= 0);
- ASSERT(is_int8(offs - short_size));
- // 1110 1011 #8-bit disp.
- EMIT(0xEB);
- EMIT((offs - short_size) & 0xFF);
- } else {
- EMIT(0xEB);
- EMIT(0x00); // The displacement will be resolved later.
- L->link_to(pc_offset());
- }
-}
-
-
-void Assembler::j(Condition cc, Label* L, Hint hint) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- ASSERT(0 <= cc && cc < 16);
- if (FLAG_emit_branch_hints && hint != no_hint) EMIT(hint);
- if (L->is_bound()) {
- const int short_size = 2;
- const int long_size = 6;
- int offs = L->pos() - pc_offset();
- ASSERT(offs <= 0);
- if (is_int8(offs - short_size)) {
- // 0111 tttn #8-bit disp
- EMIT(0x70 | cc);
- EMIT((offs - short_size) & 0xFF);
- } else {
- // 0000 1111 1000 tttn #32-bit disp
- EMIT(0x0F);
- EMIT(0x80 | cc);
- emit(offs - long_size);
- }
- } else {
- // 0000 1111 1000 tttn #32-bit disp
- // Note: could eliminate cond. jumps to this jump if condition
- // is the same however, seems to be rather unlikely case.
- EMIT(0x0F);
- EMIT(0x80 | cc);
- emit_disp(L, Displacement::OTHER);
- }
-}
-
-
-void Assembler::j(Condition cc, byte* entry, RelocInfo::Mode rmode, Hint hint) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- ASSERT((0 <= cc) && (cc < 16));
- if (FLAG_emit_branch_hints && hint != no_hint) EMIT(hint);
- // 0000 1111 1000 tttn #32-bit disp.
- EMIT(0x0F);
- EMIT(0x80 | cc);
- emit(entry - (pc_ + sizeof(int32_t)), rmode);
-}
-
-
-void Assembler::j(Condition cc, Handle<Code> code, Hint hint) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- if (FLAG_emit_branch_hints && hint != no_hint) EMIT(hint);
- // 0000 1111 1000 tttn #32-bit disp
- EMIT(0x0F);
- EMIT(0x80 | cc);
- emit(reinterpret_cast<intptr_t>(code.location()), RelocInfo::CODE_TARGET);
-}
-
-
-void Assembler::j(Condition cc, NearLabel* L, Hint hint) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- ASSERT(0 <= cc && cc < 16);
- if (FLAG_emit_branch_hints && hint != no_hint) EMIT(hint);
- if (L->is_bound()) {
- const int short_size = 2;
- int offs = L->pos() - pc_offset();
- ASSERT(offs <= 0);
- ASSERT(is_int8(offs - short_size));
- // 0111 tttn #8-bit disp
- EMIT(0x70 | cc);
- EMIT((offs - short_size) & 0xFF);
- } else {
- EMIT(0x70 | cc);
- EMIT(0x00); // The displacement will be resolved later.
- L->link_to(pc_offset());
- }
-}
-
-
-// FPU instructions.
-
-void Assembler::fld(int i) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_farith(0xD9, 0xC0, i);
-}
-
-
-void Assembler::fstp(int i) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_farith(0xDD, 0xD8, i);
-}
-
-
-void Assembler::fld1() {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0xD9);
- EMIT(0xE8);
-}
-
-
-void Assembler::fldpi() {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0xD9);
- EMIT(0xEB);
-}
-
-
-void Assembler::fldz() {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0xD9);
- EMIT(0xEE);
-}
-
-
-void Assembler::fldln2() {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0xD9);
- EMIT(0xED);
-}
-
-
-void Assembler::fld_s(const Operand& adr) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0xD9);
- emit_operand(eax, adr);
-}
-
-
-void Assembler::fld_d(const Operand& adr) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0xDD);
- emit_operand(eax, adr);
-}
-
-
-void Assembler::fstp_s(const Operand& adr) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0xD9);
- emit_operand(ebx, adr);
-}
-
-
-void Assembler::fstp_d(const Operand& adr) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0xDD);
- emit_operand(ebx, adr);
-}
-
-
-void Assembler::fst_d(const Operand& adr) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0xDD);
- emit_operand(edx, adr);
-}
-
-
-void Assembler::fild_s(const Operand& adr) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0xDB);
- emit_operand(eax, adr);
-}
-
-
-void Assembler::fild_d(const Operand& adr) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0xDF);
- emit_operand(ebp, adr);
-}
-
-
-void Assembler::fistp_s(const Operand& adr) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0xDB);
- emit_operand(ebx, adr);
-}
-
-
-void Assembler::fisttp_s(const Operand& adr) {
- ASSERT(CpuFeatures::IsEnabled(SSE3));
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0xDB);
- emit_operand(ecx, adr);
-}
-
-
-void Assembler::fisttp_d(const Operand& adr) {
- ASSERT(CpuFeatures::IsEnabled(SSE3));
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0xDD);
- emit_operand(ecx, adr);
-}
-
-
-void Assembler::fist_s(const Operand& adr) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0xDB);
- emit_operand(edx, adr);
-}
-
-
-void Assembler::fistp_d(const Operand& adr) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0xDF);
- emit_operand(edi, adr);
-}
-
-
-void Assembler::fabs() {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0xD9);
- EMIT(0xE1);
-}
-
-
-void Assembler::fchs() {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0xD9);
- EMIT(0xE0);
-}
-
-
-void Assembler::fcos() {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0xD9);
- EMIT(0xFF);
-}
-
-
-void Assembler::fsin() {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0xD9);
- EMIT(0xFE);
-}
-
-
-void Assembler::fyl2x() {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0xD9);
- EMIT(0xF1);
-}
-
-
-void Assembler::fadd(int i) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_farith(0xDC, 0xC0, i);
-}
-
-
-void Assembler::fsub(int i) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_farith(0xDC, 0xE8, i);
-}
-
-
-void Assembler::fisub_s(const Operand& adr) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0xDA);
- emit_operand(esp, adr);
-}
-
-
-void Assembler::fmul(int i) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_farith(0xDC, 0xC8, i);
-}
-
-
-void Assembler::fdiv(int i) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_farith(0xDC, 0xF8, i);
-}
-
-
-void Assembler::faddp(int i) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_farith(0xDE, 0xC0, i);
-}
-
-
-void Assembler::fsubp(int i) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_farith(0xDE, 0xE8, i);
-}
-
-
-void Assembler::fsubrp(int i) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_farith(0xDE, 0xE0, i);
-}
-
-
-void Assembler::fmulp(int i) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_farith(0xDE, 0xC8, i);
-}
-
-
-void Assembler::fdivp(int i) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_farith(0xDE, 0xF8, i);
-}
-
-
-void Assembler::fprem() {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0xD9);
- EMIT(0xF8);
-}
-
-
-void Assembler::fprem1() {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0xD9);
- EMIT(0xF5);
-}
-
-
-void Assembler::fxch(int i) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_farith(0xD9, 0xC8, i);
-}
-
-
-void Assembler::fincstp() {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0xD9);
- EMIT(0xF7);
-}
-
-
-void Assembler::ffree(int i) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_farith(0xDD, 0xC0, i);
-}
-
-
-void Assembler::ftst() {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0xD9);
- EMIT(0xE4);
-}
-
-
-void Assembler::fucomp(int i) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_farith(0xDD, 0xE8, i);
-}
-
-
-void Assembler::fucompp() {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0xDA);
- EMIT(0xE9);
-}
-
-
-void Assembler::fucomi(int i) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0xDB);
- EMIT(0xE8 + i);
-}
-
-
-void Assembler::fucomip() {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0xDF);
- EMIT(0xE9);
-}
-
-
-void Assembler::fcompp() {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0xDE);
- EMIT(0xD9);
-}
-
-
-void Assembler::fnstsw_ax() {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0xDF);
- EMIT(0xE0);
-}
-
-
-void Assembler::fwait() {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0x9B);
-}
-
-
-void Assembler::frndint() {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0xD9);
- EMIT(0xFC);
-}
-
-
-void Assembler::fnclex() {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0xDB);
- EMIT(0xE2);
-}
-
-
-void Assembler::sahf() {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0x9E);
-}
-
-
-void Assembler::setcc(Condition cc, Register reg) {
- ASSERT(reg.is_byte_register());
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0x0F);
- EMIT(0x90 | cc);
- EMIT(0xC0 | reg.code());
-}
-
-
-void Assembler::cvttss2si(Register dst, const Operand& src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0xF3);
- EMIT(0x0F);
- EMIT(0x2C);
- emit_operand(dst, src);
-}
-
-
-void Assembler::cvttsd2si(Register dst, const Operand& src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0xF2);
- EMIT(0x0F);
- EMIT(0x2C);
- emit_operand(dst, src);
-}
-
-
-void Assembler::cvtsi2sd(XMMRegister dst, const Operand& src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0xF2);
- EMIT(0x0F);
- EMIT(0x2A);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::cvtss2sd(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0xF3);
- EMIT(0x0F);
- EMIT(0x5A);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::cvtsd2ss(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0xF2);
- EMIT(0x0F);
- EMIT(0x5A);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::addsd(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0xF2);
- EMIT(0x0F);
- EMIT(0x58);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::mulsd(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0xF2);
- EMIT(0x0F);
- EMIT(0x59);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::subsd(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0xF2);
- EMIT(0x0F);
- EMIT(0x5C);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::divsd(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0xF2);
- EMIT(0x0F);
- EMIT(0x5E);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::xorpd(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0x66);
- EMIT(0x0F);
- EMIT(0x57);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0xF2);
- EMIT(0x0F);
- EMIT(0x51);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::andpd(XMMRegister dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0x66);
- EMIT(0x0F);
- EMIT(0x54);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::ucomisd(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0x66);
- EMIT(0x0F);
- EMIT(0x2E);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::movmskpd(Register dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0x66);
- EMIT(0x0F);
- EMIT(0x50);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::cmpltsd(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0xF2);
- EMIT(0x0F);
- EMIT(0xC2);
- emit_sse_operand(dst, src);
- EMIT(1); // LT == 1
-}
-
-
-void Assembler::movaps(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0x0F);
- EMIT(0x28);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::movdqa(const Operand& dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0x66);
- EMIT(0x0F);
- EMIT(0x7F);
- emit_sse_operand(src, dst);
-}
-
-
-void Assembler::movdqa(XMMRegister dst, const Operand& src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0x66);
- EMIT(0x0F);
- EMIT(0x6F);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::movdqu(const Operand& dst, XMMRegister src ) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0xF3);
- EMIT(0x0F);
- EMIT(0x7F);
- emit_sse_operand(src, dst);
-}
-
-
-void Assembler::movdqu(XMMRegister dst, const Operand& src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0xF3);
- EMIT(0x0F);
- EMIT(0x6F);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::movntdqa(XMMRegister dst, const Operand& src) {
- ASSERT(CpuFeatures::IsEnabled(SSE4_1));
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0x66);
- EMIT(0x0F);
- EMIT(0x38);
- EMIT(0x2A);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::movntdq(const Operand& dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0x66);
- EMIT(0x0F);
- EMIT(0xE7);
- emit_sse_operand(src, dst);
-}
-
-
-void Assembler::prefetch(const Operand& src, int level) {
- ASSERT(is_uint2(level));
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0x0F);
- EMIT(0x18);
- XMMRegister code = { level }; // Emit hint number in Reg position of RegR/M.
- emit_sse_operand(code, src);
-}
-
-
-void Assembler::movdbl(XMMRegister dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- movsd(dst, src);
-}
-
-
-void Assembler::movdbl(const Operand& dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- movsd(dst, src);
-}
-
-
-void Assembler::movsd(const Operand& dst, XMMRegister src ) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0xF2); // double
- EMIT(0x0F);
- EMIT(0x11); // store
- emit_sse_operand(src, dst);
-}
-
-
-void Assembler::movsd(XMMRegister dst, const Operand& src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0xF2); // double
- EMIT(0x0F);
- EMIT(0x10); // load
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::movsd(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0xF2);
- EMIT(0x0F);
- EMIT(0x10);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::movss(const Operand& dst, XMMRegister src ) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0xF3); // float
- EMIT(0x0F);
- EMIT(0x11); // store
- emit_sse_operand(src, dst);
-}
-
-
-void Assembler::movss(XMMRegister dst, const Operand& src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0xF3); // float
- EMIT(0x0F);
- EMIT(0x10); // load
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::movss(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0xF3);
- EMIT(0x0F);
- EMIT(0x10);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::movd(XMMRegister dst, const Operand& src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0x66);
- EMIT(0x0F);
- EMIT(0x6E);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::movd(const Operand& dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0x66);
- EMIT(0x0F);
- EMIT(0x7E);
- emit_sse_operand(src, dst);
-}
-
-
-void Assembler::pand(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0x66);
- EMIT(0x0F);
- EMIT(0xDB);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::pxor(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0x66);
- EMIT(0x0F);
- EMIT(0xEF);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::por(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0x66);
- EMIT(0x0F);
- EMIT(0xEB);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::ptest(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE4_1));
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0x66);
- EMIT(0x0F);
- EMIT(0x38);
- EMIT(0x17);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::psllq(XMMRegister reg, int8_t shift) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0x66);
- EMIT(0x0F);
- EMIT(0x73);
- emit_sse_operand(esi, reg); // esi == 6
- EMIT(shift);
-}
-
-
-void Assembler::psllq(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0x66);
- EMIT(0x0F);
- EMIT(0xF3);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::psrlq(XMMRegister reg, int8_t shift) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0x66);
- EMIT(0x0F);
- EMIT(0x73);
- emit_sse_operand(edx, reg); // edx == 2
- EMIT(shift);
-}
-
-
-void Assembler::psrlq(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0x66);
- EMIT(0x0F);
- EMIT(0xD3);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::pshufd(XMMRegister dst, XMMRegister src, int8_t shuffle) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0x66);
- EMIT(0x0F);
- EMIT(0x70);
- emit_sse_operand(dst, src);
- EMIT(shuffle);
-}
-
-
-void Assembler::pextrd(const Operand& dst, XMMRegister src, int8_t offset) {
- ASSERT(CpuFeatures::IsEnabled(SSE4_1));
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0x66);
- EMIT(0x0F);
- EMIT(0x3A);
- EMIT(0x16);
- emit_sse_operand(src, dst);
- EMIT(offset);
-}
-
-
-void Assembler::pinsrd(XMMRegister dst, const Operand& src, int8_t offset) {
- ASSERT(CpuFeatures::IsEnabled(SSE4_1));
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0x66);
- EMIT(0x0F);
- EMIT(0x3A);
- EMIT(0x22);
- emit_sse_operand(dst, src);
- EMIT(offset);
-}
-
-
-void Assembler::emit_sse_operand(XMMRegister reg, const Operand& adr) {
- Register ireg = { reg.code() };
- emit_operand(ireg, adr);
-}
-
-
-void Assembler::emit_sse_operand(XMMRegister dst, XMMRegister src) {
- EMIT(0xC0 | dst.code() << 3 | src.code());
-}
-
-
-void Assembler::emit_sse_operand(Register dst, XMMRegister src) {
- EMIT(0xC0 | dst.code() << 3 | src.code());
-}
-
-
-void Assembler::Print() {
- Disassembler::Decode(stdout, buffer_, pc_);
-}
-
-
-void Assembler::RecordJSReturn() {
- positions_recorder()->WriteRecordedPositions();
- EnsureSpace ensure_space(this);
- RecordRelocInfo(RelocInfo::JS_RETURN);
-}
-
-
-void Assembler::RecordDebugBreakSlot() {
- positions_recorder()->WriteRecordedPositions();
- EnsureSpace ensure_space(this);
- RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT);
-}
-
-
-void Assembler::RecordComment(const char* msg, bool force) {
- if (FLAG_code_comments || force) {
- EnsureSpace ensure_space(this);
- RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
- }
-}
-
-
-void Assembler::GrowBuffer() {
- ASSERT(overflow());
- if (!own_buffer_) FATAL("external code buffer is too small");
-
- // Compute new buffer size.
- CodeDesc desc; // the new buffer
- if (buffer_size_ < 4*KB) {
- desc.buffer_size = 4*KB;
- } else {
- desc.buffer_size = 2*buffer_size_;
- }
- // Some internal data structures overflow for very large buffers,
- // they must ensure that kMaximalBufferSize is not too large.
- if ((desc.buffer_size > kMaximalBufferSize) ||
- (desc.buffer_size > isolate()->heap()->MaxOldGenerationSize())) {
- V8::FatalProcessOutOfMemory("Assembler::GrowBuffer");
- }
-
- // Setup new buffer.
- desc.buffer = NewArray<byte>(desc.buffer_size);
- desc.instr_size = pc_offset();
- desc.reloc_size = (buffer_ + buffer_size_) - (reloc_info_writer.pos());
-
- // Clear the buffer in debug mode. Use 'int3' instructions to make
- // sure to get into problems if we ever run uninitialized code.
-#ifdef DEBUG
- memset(desc.buffer, 0xCC, desc.buffer_size);
-#endif
-
- // Copy the data.
- int pc_delta = desc.buffer - buffer_;
- int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
- memmove(desc.buffer, buffer_, desc.instr_size);
- memmove(rc_delta + reloc_info_writer.pos(),
- reloc_info_writer.pos(), desc.reloc_size);
-
- // Switch buffers.
- if (isolate()->assembler_spare_buffer() == NULL &&
- buffer_size_ == kMinimalBufferSize) {
- isolate()->set_assembler_spare_buffer(buffer_);
- } else {
- DeleteArray(buffer_);
- }
- buffer_ = desc.buffer;
- buffer_size_ = desc.buffer_size;
- pc_ += pc_delta;
- if (last_pc_ != NULL) {
- last_pc_ += pc_delta;
- }
- reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
- reloc_info_writer.last_pc() + pc_delta);
-
- // Relocate runtime entries.
- for (RelocIterator it(desc); !it.done(); it.next()) {
- RelocInfo::Mode rmode = it.rinfo()->rmode();
- if (rmode == RelocInfo::RUNTIME_ENTRY) {
- int32_t* p = reinterpret_cast<int32_t*>(it.rinfo()->pc());
- *p -= pc_delta; // relocate entry
- } else if (rmode == RelocInfo::INTERNAL_REFERENCE) {
- int32_t* p = reinterpret_cast<int32_t*>(it.rinfo()->pc());
- if (*p != 0) { // 0 means uninitialized.
- *p += pc_delta;
- }
- }
- }
-
- ASSERT(!overflow());
-}
-
-
-void Assembler::emit_arith_b(int op1, int op2, Register dst, int imm8) {
- ASSERT(is_uint8(op1) && is_uint8(op2)); // wrong opcode
- ASSERT(is_uint8(imm8));
- ASSERT((op1 & 0x01) == 0); // should be 8bit operation
- EMIT(op1);
- EMIT(op2 | dst.code());
- EMIT(imm8);
-}
-
-
-void Assembler::emit_arith(int sel, Operand dst, const Immediate& x) {
- ASSERT((0 <= sel) && (sel <= 7));
- Register ireg = { sel };
- if (x.is_int8()) {
- EMIT(0x83); // using a sign-extended 8-bit immediate.
- emit_operand(ireg, dst);
- EMIT(x.x_ & 0xFF);
- } else if (dst.is_reg(eax)) {
- EMIT((sel << 3) | 0x05); // short form if the destination is eax.
- emit(x);
- } else {
- EMIT(0x81); // using a literal 32-bit immediate.
- emit_operand(ireg, dst);
- emit(x);
- }
-}
-
-
-void Assembler::emit_operand(Register reg, const Operand& adr) {
- const unsigned length = adr.len_;
- ASSERT(length > 0);
-
- // Emit updated ModRM byte containing the given register.
- pc_[0] = (adr.buf_[0] & ~0x38) | (reg.code() << 3);
-
- // Emit the rest of the encoded operand.
- for (unsigned i = 1; i < length; i++) pc_[i] = adr.buf_[i];
- pc_ += length;
-
- // Emit relocation information if necessary.
- if (length >= sizeof(int32_t) && adr.rmode_ != RelocInfo::NONE) {
- pc_ -= sizeof(int32_t); // pc_ must be *at* disp32
- RecordRelocInfo(adr.rmode_);
- pc_ += sizeof(int32_t);
- }
-}
-
-
-void Assembler::emit_farith(int b1, int b2, int i) {
- ASSERT(is_uint8(b1) && is_uint8(b2)); // wrong opcode
- ASSERT(0 <= i && i < 8); // illegal stack offset
- EMIT(b1);
- EMIT(b2 + i);
-}
-
-
-void Assembler::db(uint8_t data) {
- EnsureSpace ensure_space(this);
- EMIT(data);
-}
-
-
-void Assembler::dd(uint32_t data) {
- EnsureSpace ensure_space(this);
- emit(data);
-}
-
-
-void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
- ASSERT(rmode != RelocInfo::NONE);
- // Don't record external references unless the heap will be serialized.
- if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
-#ifdef DEBUG
- if (!Serializer::enabled()) {
- Serializer::TooLateToEnableNow();
- }
-#endif
- if (!Serializer::enabled() && !emit_debug_code()) {
- return;
- }
- }
- RelocInfo rinfo(pc_, rmode, data);
- reloc_info_writer.Write(&rinfo);
-}
-
-
-#ifdef GENERATED_CODE_COVERAGE
-static FILE* coverage_log = NULL;
-
-
-static void InitCoverageLog() {
- char* file_name = getenv("V8_GENERATED_CODE_COVERAGE_LOG");
- if (file_name != NULL) {
- coverage_log = fopen(file_name, "aw+");
- }
-}
-
-
-void LogGeneratedCodeCoverage(const char* file_line) {
- const char* return_address = (&file_line)[-1];
- char* push_insn = const_cast<char*>(return_address - 12);
- push_insn[0] = 0xeb; // Relative branch insn.
- push_insn[1] = 13; // Skip over coverage insns.
- if (coverage_log != NULL) {
- fprintf(coverage_log, "%s\n", file_line);
- fflush(coverage_log);
- }
-}
-
-#endif
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_IA32
diff --git a/src/3rdparty/v8/src/ia32/assembler-ia32.h b/src/3rdparty/v8/src/ia32/assembler-ia32.h
deleted file mode 100644
index 079dca7..0000000
--- a/src/3rdparty/v8/src/ia32/assembler-ia32.h
+++ /dev/null
@@ -1,1159 +0,0 @@
-// Copyright (c) 1994-2006 Sun Microsystems Inc.
-// All Rights Reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// - Redistributions of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-//
-// - Redistribution in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// - Neither the name of Sun Microsystems or the names of contributors may
-// be used to endorse or promote products derived from this software without
-// specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
-// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
-// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// The original source code covered by the above license above has been
-// modified significantly by Google Inc.
-// Copyright 2011 the V8 project authors. All rights reserved.
-
-// A light-weight IA32 Assembler.
-
-#ifndef V8_IA32_ASSEMBLER_IA32_H_
-#define V8_IA32_ASSEMBLER_IA32_H_
-
-#include "isolate.h"
-#include "serialize.h"
-
-namespace v8 {
-namespace internal {
-
-// CPU Registers.
-//
-// 1) We would prefer to use an enum, but enum values are assignment-
-// compatible with int, which has caused code-generation bugs.
-//
-// 2) We would prefer to use a class instead of a struct but we don't like
-// the register initialization to depend on the particular initialization
-// order (which appears to be different on OS X, Linux, and Windows for the
-// installed versions of C++ we tried). Using a struct permits C-style
-// "initialization". Also, the Register objects cannot be const as this
-// forces initialization stubs in MSVC, making us dependent on initialization
-// order.
-//
-// 3) By not using an enum, we are possibly preventing the compiler from
-// doing certain constant folds, which may significantly reduce the
-// code generated for some assembly instructions (because they boil down
-// to a few constants). If this is a problem, we could change the code
-// such that we use an enum in optimized mode, and the struct in debug
-// mode. This way we get the compile-time error checking in debug mode
-// and best performance in optimized code.
-//
-struct Register {
- static const int kNumAllocatableRegisters = 6;
- static const int kNumRegisters = 8;
-
- static inline const char* AllocationIndexToString(int index);
-
- static inline int ToAllocationIndex(Register reg);
-
- static inline Register FromAllocationIndex(int index);
-
- static Register from_code(int code) {
- Register r = { code };
- return r;
- }
- bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
- bool is(Register reg) const { return code_ == reg.code_; }
- // eax, ebx, ecx and edx are byte registers, the rest are not.
- bool is_byte_register() const { return code_ <= 3; }
- int code() const {
- ASSERT(is_valid());
- return code_;
- }
- int bit() const {
- ASSERT(is_valid());
- return 1 << code_;
- }
-
- // Unfortunately we can't make this private in a struct.
- int code_;
-};
-
-
-const Register eax = { 0 };
-const Register ecx = { 1 };
-const Register edx = { 2 };
-const Register ebx = { 3 };
-const Register esp = { 4 };
-const Register ebp = { 5 };
-const Register esi = { 6 };
-const Register edi = { 7 };
-const Register no_reg = { -1 };
-
-
-inline const char* Register::AllocationIndexToString(int index) {
- ASSERT(index >= 0 && index < kNumAllocatableRegisters);
- // This is the mapping of allocation indices to registers.
- const char* const kNames[] = { "eax", "ecx", "edx", "ebx", "esi", "edi" };
- return kNames[index];
-}
-
-
-inline int Register::ToAllocationIndex(Register reg) {
- ASSERT(reg.is_valid() && !reg.is(esp) && !reg.is(ebp));
- return (reg.code() >= 6) ? reg.code() - 2 : reg.code();
-}
-
-
-inline Register Register::FromAllocationIndex(int index) {
- ASSERT(index >= 0 && index < kNumAllocatableRegisters);
- return (index >= 4) ? from_code(index + 2) : from_code(index);
-}
-
-
-struct XMMRegister {
- static const int kNumAllocatableRegisters = 7;
- static const int kNumRegisters = 8;
-
- static int ToAllocationIndex(XMMRegister reg) {
- ASSERT(reg.code() != 0);
- return reg.code() - 1;
- }
-
- static XMMRegister FromAllocationIndex(int index) {
- ASSERT(index >= 0 && index < kNumAllocatableRegisters);
- return from_code(index + 1);
- }
-
- static const char* AllocationIndexToString(int index) {
- ASSERT(index >= 0 && index < kNumAllocatableRegisters);
- const char* const names[] = {
- "xmm1",
- "xmm2",
- "xmm3",
- "xmm4",
- "xmm5",
- "xmm6",
- "xmm7"
- };
- return names[index];
- }
-
- static XMMRegister from_code(int code) {
- XMMRegister r = { code };
- return r;
- }
-
- bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
- bool is(XMMRegister reg) const { return code_ == reg.code_; }
- int code() const {
- ASSERT(is_valid());
- return code_;
- }
-
- int code_;
-};
-
-
-const XMMRegister xmm0 = { 0 };
-const XMMRegister xmm1 = { 1 };
-const XMMRegister xmm2 = { 2 };
-const XMMRegister xmm3 = { 3 };
-const XMMRegister xmm4 = { 4 };
-const XMMRegister xmm5 = { 5 };
-const XMMRegister xmm6 = { 6 };
-const XMMRegister xmm7 = { 7 };
-
-
-typedef XMMRegister DoubleRegister;
-
-
-enum Condition {
- // any value < 0 is considered no_condition
- no_condition = -1,
-
- overflow = 0,
- no_overflow = 1,
- below = 2,
- above_equal = 3,
- equal = 4,
- not_equal = 5,
- below_equal = 6,
- above = 7,
- negative = 8,
- positive = 9,
- parity_even = 10,
- parity_odd = 11,
- less = 12,
- greater_equal = 13,
- less_equal = 14,
- greater = 15,
-
- // aliases
- carry = below,
- not_carry = above_equal,
- zero = equal,
- not_zero = not_equal,
- sign = negative,
- not_sign = positive
-};
-
-
-// Returns the equivalent of !cc.
-// Negation of the default no_condition (-1) results in a non-default
-// no_condition value (-2). As long as tests for no_condition check
-// for condition < 0, this will work as expected.
-inline Condition NegateCondition(Condition cc) {
- return static_cast<Condition>(cc ^ 1);
-}
-
-
-// Corresponds to transposing the operands of a comparison.
-inline Condition ReverseCondition(Condition cc) {
- switch (cc) {
- case below:
- return above;
- case above:
- return below;
- case above_equal:
- return below_equal;
- case below_equal:
- return above_equal;
- case less:
- return greater;
- case greater:
- return less;
- case greater_equal:
- return less_equal;
- case less_equal:
- return greater_equal;
- default:
- return cc;
- };
-}
-
-
-enum Hint {
- no_hint = 0,
- not_taken = 0x2e,
- taken = 0x3e
-};
-
-
-// The result of negating a hint is as if the corresponding condition
-// were negated by NegateCondition. That is, no_hint is mapped to
-// itself and not_taken and taken are mapped to each other.
-inline Hint NegateHint(Hint hint) {
- return (hint == no_hint)
- ? no_hint
- : ((hint == not_taken) ? taken : not_taken);
-}
-
-
-// -----------------------------------------------------------------------------
-// Machine instruction Immediates
-
-class Immediate BASE_EMBEDDED {
- public:
- inline explicit Immediate(int x);
- inline explicit Immediate(const ExternalReference& ext);
- inline explicit Immediate(Handle<Object> handle);
- inline explicit Immediate(Smi* value);
- inline explicit Immediate(Address addr);
-
- static Immediate CodeRelativeOffset(Label* label) {
- return Immediate(label);
- }
-
- bool is_zero() const { return x_ == 0 && rmode_ == RelocInfo::NONE; }
- bool is_int8() const {
- return -128 <= x_ && x_ < 128 && rmode_ == RelocInfo::NONE;
- }
- bool is_int16() const {
- return -32768 <= x_ && x_ < 32768 && rmode_ == RelocInfo::NONE;
- }
-
- private:
- inline explicit Immediate(Label* value);
-
- int x_;
- RelocInfo::Mode rmode_;
-
- friend class Assembler;
-};
-
-
-// -----------------------------------------------------------------------------
-// Machine instruction Operands
-
-enum ScaleFactor {
- times_1 = 0,
- times_2 = 1,
- times_4 = 2,
- times_8 = 3,
- times_int_size = times_4,
- times_half_pointer_size = times_2,
- times_pointer_size = times_4,
- times_twice_pointer_size = times_8
-};
-
-
-class Operand BASE_EMBEDDED {
- public:
- // reg
- INLINE(explicit Operand(Register reg));
-
- // XMM reg
- INLINE(explicit Operand(XMMRegister xmm_reg));
-
- // [disp/r]
- INLINE(explicit Operand(int32_t disp, RelocInfo::Mode rmode));
- // disp only must always be relocated
-
- // [base + disp/r]
- explicit Operand(Register base, int32_t disp,
- RelocInfo::Mode rmode = RelocInfo::NONE);
-
- // [base + index*scale + disp/r]
- explicit Operand(Register base,
- Register index,
- ScaleFactor scale,
- int32_t disp,
- RelocInfo::Mode rmode = RelocInfo::NONE);
-
- // [index*scale + disp/r]
- explicit Operand(Register index,
- ScaleFactor scale,
- int32_t disp,
- RelocInfo::Mode rmode = RelocInfo::NONE);
-
- static Operand StaticVariable(const ExternalReference& ext) {
- return Operand(reinterpret_cast<int32_t>(ext.address()),
- RelocInfo::EXTERNAL_REFERENCE);
- }
-
- static Operand StaticArray(Register index,
- ScaleFactor scale,
- const ExternalReference& arr) {
- return Operand(index, scale, reinterpret_cast<int32_t>(arr.address()),
- RelocInfo::EXTERNAL_REFERENCE);
- }
-
- static Operand Cell(Handle<JSGlobalPropertyCell> cell) {
- return Operand(reinterpret_cast<int32_t>(cell.location()),
- RelocInfo::GLOBAL_PROPERTY_CELL);
- }
-
- // Returns true if this Operand is a wrapper for the specified register.
- bool is_reg(Register reg) const;
-
- private:
- byte buf_[6];
- // The number of bytes in buf_.
- unsigned int len_;
- // Only valid if len_ > 4.
- RelocInfo::Mode rmode_;
-
- // Set the ModRM byte without an encoded 'reg' register. The
- // register is encoded later as part of the emit_operand operation.
- inline void set_modrm(int mod, Register rm);
-
- inline void set_sib(ScaleFactor scale, Register index, Register base);
- inline void set_disp8(int8_t disp);
- inline void set_dispr(int32_t disp, RelocInfo::Mode rmode);
-
- friend class Assembler;
-};
-
-
-// -----------------------------------------------------------------------------
-// A Displacement describes the 32bit immediate field of an instruction which
-// may be used together with a Label in order to refer to a yet unknown code
-// position. Displacements stored in the instruction stream are used to describe
-// the instruction and to chain a list of instructions using the same Label.
-// A Displacement contains 2 different fields:
-//
-// next field: position of next displacement in the chain (0 = end of list)
-// type field: instruction type
-//
-// A next value of null (0) indicates the end of a chain (note that there can
-// be no displacement at position zero, because there is always at least one
-// instruction byte before the displacement).
-//
-// Displacement _data field layout
-//
-// |31.....2|1......0|
-// [ next | type |
-
-class Displacement BASE_EMBEDDED {
- public:
- enum Type {
- UNCONDITIONAL_JUMP,
- CODE_RELATIVE,
- OTHER
- };
-
- int data() const { return data_; }
- Type type() const { return TypeField::decode(data_); }
- void next(Label* L) const {
- int n = NextField::decode(data_);
- n > 0 ? L->link_to(n) : L->Unuse();
- }
- void link_to(Label* L) { init(L, type()); }
-
- explicit Displacement(int data) { data_ = data; }
-
- Displacement(Label* L, Type type) { init(L, type); }
-
- void print() {
- PrintF("%s (%x) ", (type() == UNCONDITIONAL_JUMP ? "jmp" : "[other]"),
- NextField::decode(data_));
- }
-
- private:
- int data_;
-
- class TypeField: public BitField<Type, 0, 2> {};
- class NextField: public BitField<int, 2, 32-2> {};
-
- void init(Label* L, Type type);
-};
-
-
-
-// CpuFeatures keeps track of which features are supported by the target CPU.
-// Supported features must be enabled by a Scope before use.
-// Example:
-// if (CpuFeatures::IsSupported(SSE2)) {
-// CpuFeatures::Scope fscope(SSE2);
-// // Generate SSE2 floating point code.
-// } else {
-// // Generate standard x87 floating point code.
-// }
-class CpuFeatures : public AllStatic {
- public:
- // Detect features of the target CPU. Set safe defaults if the serializer
- // is enabled (snapshots must be portable).
- static void Probe();
-
- // Check whether a feature is supported by the target CPU.
- static bool IsSupported(CpuFeature f) {
- ASSERT(initialized_);
- if (f == SSE2 && !FLAG_enable_sse2) return false;
- if (f == SSE3 && !FLAG_enable_sse3) return false;
- if (f == SSE4_1 && !FLAG_enable_sse4_1) return false;
- if (f == CMOV && !FLAG_enable_cmov) return false;
- if (f == RDTSC && !FLAG_enable_rdtsc) return false;
- return (supported_ & (static_cast<uint64_t>(1) << f)) != 0;
- }
-
-#ifdef DEBUG
- // Check whether a feature is currently enabled.
- static bool IsEnabled(CpuFeature f) {
- ASSERT(initialized_);
- Isolate* isolate = Isolate::UncheckedCurrent();
- if (isolate == NULL) {
- // When no isolate is available, work as if we're running in
- // release mode.
- return IsSupported(f);
- }
- uint64_t enabled = isolate->enabled_cpu_features();
- return (enabled & (static_cast<uint64_t>(1) << f)) != 0;
- }
-#endif
-
- // Enable a specified feature within a scope.
- class Scope BASE_EMBEDDED {
-#ifdef DEBUG
- public:
- explicit Scope(CpuFeature f) {
- uint64_t mask = static_cast<uint64_t>(1) << f;
- ASSERT(CpuFeatures::IsSupported(f));
- ASSERT(!Serializer::enabled() ||
- (CpuFeatures::found_by_runtime_probing_ & mask) == 0);
- isolate_ = Isolate::UncheckedCurrent();
- old_enabled_ = 0;
- if (isolate_ != NULL) {
- old_enabled_ = isolate_->enabled_cpu_features();
- isolate_->set_enabled_cpu_features(old_enabled_ | mask);
- }
- }
- ~Scope() {
- ASSERT_EQ(Isolate::UncheckedCurrent(), isolate_);
- if (isolate_ != NULL) {
- isolate_->set_enabled_cpu_features(old_enabled_);
- }
- }
- private:
- Isolate* isolate_;
- uint64_t old_enabled_;
-#else
- public:
- explicit Scope(CpuFeature f) {}
-#endif
- };
-
- class TryForceFeatureScope BASE_EMBEDDED {
- public:
- explicit TryForceFeatureScope(CpuFeature f)
- : old_supported_(CpuFeatures::supported_) {
- if (CanForce()) {
- CpuFeatures::supported_ |= (static_cast<uint64_t>(1) << f);
- }
- }
-
- ~TryForceFeatureScope() {
- if (CanForce()) {
- CpuFeatures::supported_ = old_supported_;
- }
- }
-
- private:
- static bool CanForce() {
- // It's only safe to temporarily force support of CPU features
- // when there's only a single isolate, which is guaranteed when
- // the serializer is enabled.
- return Serializer::enabled();
- }
-
- const uint64_t old_supported_;
- };
-
- private:
-#ifdef DEBUG
- static bool initialized_;
-#endif
- static uint64_t supported_;
- static uint64_t found_by_runtime_probing_;
-
- DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
-};
-
-
-class Assembler : public AssemblerBase {
- private:
- // We check before assembling an instruction that there is sufficient
- // space to write an instruction and its relocation information.
- // The relocation writer's position must be kGap bytes above the end of
- // the generated instructions. This leaves enough space for the
- // longest possible ia32 instruction, 15 bytes, and the longest possible
- // relocation information encoding, RelocInfoWriter::kMaxLength == 16.
- // (There is a 15 byte limit on ia32 instruction length that rules out some
- // otherwise valid instructions.)
- // This allows for a single, fast space check per instruction.
- static const int kGap = 32;
-
- public:
- // Create an assembler. Instructions and relocation information are emitted
- // into a buffer, with the instructions starting from the beginning and the
- // relocation information starting from the end of the buffer. See CodeDesc
- // for a detailed comment on the layout (globals.h).
- //
- // If the provided buffer is NULL, the assembler allocates and grows its own
- // buffer, and buffer_size determines the initial buffer size. The buffer is
- // owned by the assembler and deallocated upon destruction of the assembler.
- //
- // If the provided buffer is not NULL, the assembler uses the provided buffer
- // for code generation and assumes its size to be buffer_size. If the buffer
- // is too small, a fatal error occurs. No deallocation of the buffer is done
- // upon destruction of the assembler.
- // TODO(vitalyr): the assembler does not need an isolate.
- Assembler(Isolate* isolate, void* buffer, int buffer_size);
- ~Assembler();
-
- // Overrides the default provided by FLAG_debug_code.
- void set_emit_debug_code(bool value) { emit_debug_code_ = value; }
-
- // GetCode emits any pending (non-emitted) code and fills the descriptor
- // desc. GetCode() is idempotent; it returns the same result if no other
- // Assembler functions are invoked in between GetCode() calls.
- void GetCode(CodeDesc* desc);
-
- // Read/Modify the code target in the branch/call instruction at pc.
- inline static Address target_address_at(Address pc);
- inline static void set_target_address_at(Address pc, Address target);
-
- // This sets the branch destination (which is in the instruction on x86).
- // This is for calls and branches within generated code.
- inline static void set_target_at(Address instruction_payload,
- Address target) {
- set_target_address_at(instruction_payload, target);
- }
-
- // This sets the branch destination (which is in the instruction on x86).
- // This is for calls and branches to runtime code.
- inline static void set_external_target_at(Address instruction_payload,
- Address target) {
- set_target_address_at(instruction_payload, target);
- }
-
- static const int kCallTargetSize = kPointerSize;
- static const int kExternalTargetSize = kPointerSize;
-
- // Distance between the address of the code target in the call instruction
- // and the return address
- static const int kCallTargetAddressOffset = kPointerSize;
- // Distance between start of patched return sequence and the emitted address
- // to jump to.
- static const int kPatchReturnSequenceAddressOffset = 1; // JMP imm32.
-
- // Distance between start of patched debug break slot and the emitted address
- // to jump to.
- static const int kPatchDebugBreakSlotAddressOffset = 1; // JMP imm32.
-
- static const int kCallInstructionLength = 5;
- static const int kJSReturnSequenceLength = 6;
-
- // The debug break slot must be able to contain a call instruction.
- static const int kDebugBreakSlotLength = kCallInstructionLength;
-
- // One byte opcode for test eax,0xXXXXXXXX.
- static const byte kTestEaxByte = 0xA9;
- // One byte opcode for test al, 0xXX.
- static const byte kTestAlByte = 0xA8;
- // One byte opcode for nop.
- static const byte kNopByte = 0x90;
-
- // One byte opcode for a short unconditional jump.
- static const byte kJmpShortOpcode = 0xEB;
- // One byte prefix for a short conditional jump.
- static const byte kJccShortPrefix = 0x70;
- static const byte kJncShortOpcode = kJccShortPrefix | not_carry;
- static const byte kJcShortOpcode = kJccShortPrefix | carry;
-
- // ---------------------------------------------------------------------------
- // Code generation
- //
- // - function names correspond one-to-one to ia32 instruction mnemonics
- // - unless specified otherwise, instructions operate on 32bit operands
- // - instructions on 8bit (byte) operands/registers have a trailing '_b'
- // - instructions on 16bit (word) operands/registers have a trailing '_w'
- // - naming conflicts with C++ keywords are resolved via a trailing '_'
-
- // NOTE ON INTERFACE: Currently, the interface is not very consistent
- // in the sense that some operations (e.g. mov()) can be called in more
- // the one way to generate the same instruction: The Register argument
- // can in some cases be replaced with an Operand(Register) argument.
- // This should be cleaned up and made more orthogonal. The questions
- // is: should we always use Operands instead of Registers where an
- // Operand is possible, or should we have a Register (overloaded) form
- // instead? We must be careful to make sure that the selected instruction
- // is obvious from the parameters to avoid hard-to-find code generation
- // bugs.
-
- // Insert the smallest number of nop instructions
- // possible to align the pc offset to a multiple
- // of m. m must be a power of 2.
- void Align(int m);
- // Aligns code to something that's optimal for a jump target for the platform.
- void CodeTargetAlign();
-
- // Stack
- void pushad();
- void popad();
-
- void pushfd();
- void popfd();
-
- void push(const Immediate& x);
- void push_imm32(int32_t imm32);
- void push(Register src);
- void push(const Operand& src);
-
- void pop(Register dst);
- void pop(const Operand& dst);
-
- void enter(const Immediate& size);
- void leave();
-
- // Moves
- void mov_b(Register dst, const Operand& src);
- void mov_b(const Operand& dst, int8_t imm8);
- void mov_b(const Operand& dst, Register src);
-
- void mov_w(Register dst, const Operand& src);
- void mov_w(const Operand& dst, Register src);
-
- void mov(Register dst, int32_t imm32);
- void mov(Register dst, const Immediate& x);
- void mov(Register dst, Handle<Object> handle);
- void mov(Register dst, const Operand& src);
- void mov(Register dst, Register src);
- void mov(const Operand& dst, const Immediate& x);
- void mov(const Operand& dst, Handle<Object> handle);
- void mov(const Operand& dst, Register src);
-
- void movsx_b(Register dst, const Operand& src);
-
- void movsx_w(Register dst, const Operand& src);
-
- void movzx_b(Register dst, const Operand& src);
-
- void movzx_w(Register dst, const Operand& src);
-
- // Conditional moves
- void cmov(Condition cc, Register dst, int32_t imm32);
- void cmov(Condition cc, Register dst, Handle<Object> handle);
- void cmov(Condition cc, Register dst, const Operand& src);
-
- // Flag management.
- void cld();
-
- // Repetitive string instructions.
- void rep_movs();
- void rep_stos();
- void stos();
-
- // Exchange two registers
- void xchg(Register dst, Register src);
-
- // Arithmetics
- void adc(Register dst, int32_t imm32);
- void adc(Register dst, const Operand& src);
-
- void add(Register dst, const Operand& src);
- void add(const Operand& dst, const Immediate& x);
-
- void and_(Register dst, int32_t imm32);
- void and_(Register dst, const Immediate& x);
- void and_(Register dst, const Operand& src);
- void and_(const Operand& src, Register dst);
- void and_(const Operand& dst, const Immediate& x);
-
- void cmpb(const Operand& op, int8_t imm8);
- void cmpb(Register src, const Operand& dst);
- void cmpb(const Operand& dst, Register src);
- void cmpb_al(const Operand& op);
- void cmpw_ax(const Operand& op);
- void cmpw(const Operand& op, Immediate imm16);
- void cmp(Register reg, int32_t imm32);
- void cmp(Register reg, Handle<Object> handle);
- void cmp(Register reg, const Operand& op);
- void cmp(const Operand& op, const Immediate& imm);
- void cmp(const Operand& op, Handle<Object> handle);
-
- void dec_b(Register dst);
- void dec_b(const Operand& dst);
-
- void dec(Register dst);
- void dec(const Operand& dst);
-
- void cdq();
-
- void idiv(Register src);
-
- // Signed multiply instructions.
- void imul(Register src); // edx:eax = eax * src.
- void imul(Register dst, const Operand& src); // dst = dst * src.
- void imul(Register dst, Register src, int32_t imm32); // dst = src * imm32.
-
- void inc(Register dst);
- void inc(const Operand& dst);
-
- void lea(Register dst, const Operand& src);
-
- // Unsigned multiply instruction.
- void mul(Register src); // edx:eax = eax * reg.
-
- void neg(Register dst);
-
- void not_(Register dst);
-
- void or_(Register dst, int32_t imm32);
- void or_(Register dst, const Operand& src);
- void or_(const Operand& dst, Register src);
- void or_(const Operand& dst, const Immediate& x);
-
- void rcl(Register dst, uint8_t imm8);
- void rcr(Register dst, uint8_t imm8);
-
- void sar(Register dst, uint8_t imm8);
- void sar_cl(Register dst);
-
- void sbb(Register dst, const Operand& src);
-
- void shld(Register dst, const Operand& src);
-
- void shl(Register dst, uint8_t imm8);
- void shl_cl(Register dst);
-
- void shrd(Register dst, const Operand& src);
-
- void shr(Register dst, uint8_t imm8);
- void shr_cl(Register dst);
-
- void subb(const Operand& dst, int8_t imm8);
- void subb(Register dst, const Operand& src);
- void sub(const Operand& dst, const Immediate& x);
- void sub(Register dst, const Operand& src);
- void sub(const Operand& dst, Register src);
-
- void test(Register reg, const Immediate& imm);
- void test(Register reg, const Operand& op);
- void test_b(Register reg, const Operand& op);
- void test(const Operand& op, const Immediate& imm);
- void test_b(const Operand& op, uint8_t imm8);
-
- void xor_(Register dst, int32_t imm32);
- void xor_(Register dst, const Operand& src);
- void xor_(const Operand& src, Register dst);
- void xor_(const Operand& dst, const Immediate& x);
-
- // Bit operations.
- void bt(const Operand& dst, Register src);
- void bts(const Operand& dst, Register src);
-
- // Miscellaneous
- void hlt();
- void int3();
- void nop();
- void rdtsc();
- void ret(int imm16);
-
- // Label operations & relative jumps (PPUM Appendix D)
- //
- // Takes a branch opcode (cc) and a label (L) and generates
- // either a backward branch or a forward branch and links it
- // to the label fixup chain. Usage:
- //
- // Label L; // unbound label
- // j(cc, &L); // forward branch to unbound label
- // bind(&L); // bind label to the current pc
- // j(cc, &L); // backward branch to bound label
- // bind(&L); // illegal: a label may be bound only once
- //
- // Note: The same Label can be used for forward and backward branches
- // but it may be bound only once.
-
- void bind(Label* L); // binds an unbound label L to the current code position
- void bind(NearLabel* L);
-
- // Calls
- void call(Label* L);
- void call(byte* entry, RelocInfo::Mode rmode);
- void call(const Operand& adr);
- void call(Handle<Code> code, RelocInfo::Mode rmode);
-
- // Jumps
- void jmp(Label* L); // unconditional jump to L
- void jmp(byte* entry, RelocInfo::Mode rmode);
- void jmp(const Operand& adr);
- void jmp(Handle<Code> code, RelocInfo::Mode rmode);
-
- // Short jump
- void jmp(NearLabel* L);
-
- // Conditional jumps
- void j(Condition cc, Label* L, Hint hint = no_hint);
- void j(Condition cc, byte* entry, RelocInfo::Mode rmode, Hint hint = no_hint);
- void j(Condition cc, Handle<Code> code, Hint hint = no_hint);
-
- // Conditional short jump
- void j(Condition cc, NearLabel* L, Hint hint = no_hint);
-
- // Floating-point operations
- void fld(int i);
- void fstp(int i);
-
- void fld1();
- void fldz();
- void fldpi();
- void fldln2();
-
- void fld_s(const Operand& adr);
- void fld_d(const Operand& adr);
-
- void fstp_s(const Operand& adr);
- void fstp_d(const Operand& adr);
- void fst_d(const Operand& adr);
-
- void fild_s(const Operand& adr);
- void fild_d(const Operand& adr);
-
- void fist_s(const Operand& adr);
-
- void fistp_s(const Operand& adr);
- void fistp_d(const Operand& adr);
-
- // The fisttp instructions require SSE3.
- void fisttp_s(const Operand& adr);
- void fisttp_d(const Operand& adr);
-
- void fabs();
- void fchs();
- void fcos();
- void fsin();
- void fyl2x();
-
- void fadd(int i);
- void fsub(int i);
- void fmul(int i);
- void fdiv(int i);
-
- void fisub_s(const Operand& adr);
-
- void faddp(int i = 1);
- void fsubp(int i = 1);
- void fsubrp(int i = 1);
- void fmulp(int i = 1);
- void fdivp(int i = 1);
- void fprem();
- void fprem1();
-
- void fxch(int i = 1);
- void fincstp();
- void ffree(int i = 0);
-
- void ftst();
- void fucomp(int i);
- void fucompp();
- void fucomi(int i);
- void fucomip();
- void fcompp();
- void fnstsw_ax();
- void fwait();
- void fnclex();
-
- void frndint();
-
- void sahf();
- void setcc(Condition cc, Register reg);
-
- void cpuid();
-
- // SSE2 instructions
- void cvttss2si(Register dst, const Operand& src);
- void cvttsd2si(Register dst, const Operand& src);
-
- void cvtsi2sd(XMMRegister dst, const Operand& src);
- void cvtss2sd(XMMRegister dst, XMMRegister src);
- void cvtsd2ss(XMMRegister dst, XMMRegister src);
-
- void addsd(XMMRegister dst, XMMRegister src);
- void subsd(XMMRegister dst, XMMRegister src);
- void mulsd(XMMRegister dst, XMMRegister src);
- void divsd(XMMRegister dst, XMMRegister src);
- void xorpd(XMMRegister dst, XMMRegister src);
- void sqrtsd(XMMRegister dst, XMMRegister src);
-
- void andpd(XMMRegister dst, XMMRegister src);
-
- void ucomisd(XMMRegister dst, XMMRegister src);
- void movmskpd(Register dst, XMMRegister src);
-
- void cmpltsd(XMMRegister dst, XMMRegister src);
-
- void movaps(XMMRegister dst, XMMRegister src);
-
- void movdqa(XMMRegister dst, const Operand& src);
- void movdqa(const Operand& dst, XMMRegister src);
- void movdqu(XMMRegister dst, const Operand& src);
- void movdqu(const Operand& dst, XMMRegister src);
-
- // Use either movsd or movlpd.
- void movdbl(XMMRegister dst, const Operand& src);
- void movdbl(const Operand& dst, XMMRegister src);
-
- void movd(XMMRegister dst, const Operand& src);
- void movd(const Operand& src, XMMRegister dst);
- void movsd(XMMRegister dst, XMMRegister src);
-
- void movss(XMMRegister dst, const Operand& src);
- void movss(const Operand& src, XMMRegister dst);
- void movss(XMMRegister dst, XMMRegister src);
-
- void pand(XMMRegister dst, XMMRegister src);
- void pxor(XMMRegister dst, XMMRegister src);
- void por(XMMRegister dst, XMMRegister src);
- void ptest(XMMRegister dst, XMMRegister src);
-
- void psllq(XMMRegister reg, int8_t shift);
- void psllq(XMMRegister dst, XMMRegister src);
- void psrlq(XMMRegister reg, int8_t shift);
- void psrlq(XMMRegister dst, XMMRegister src);
- void pshufd(XMMRegister dst, XMMRegister src, int8_t shuffle);
- void pextrd(const Operand& dst, XMMRegister src, int8_t offset);
- void pinsrd(XMMRegister dst, const Operand& src, int8_t offset);
-
- // Parallel XMM operations.
- void movntdqa(XMMRegister src, const Operand& dst);
- void movntdq(const Operand& dst, XMMRegister src);
- // Prefetch src position into cache level.
- // Level 1, 2 or 3 specifies CPU cache level. Level 0 specifies a
- // non-temporal
- void prefetch(const Operand& src, int level);
- // TODO(lrn): Need SFENCE for movnt?
-
- // Debugging
- void Print();
-
- // Check the code size generated from label to here.
- int SizeOfCodeGeneratedSince(Label* l) { return pc_offset() - l->pos(); }
-
- // Mark address of the ExitJSFrame code.
- void RecordJSReturn();
-
- // Mark address of a debug break slot.
- void RecordDebugBreakSlot();
-
- // Record a comment relocation entry that can be used by a disassembler.
- // Use --code-comments to enable, or provide "force = true" flag to always
- // write a comment.
- void RecordComment(const char* msg, bool force = false);
-
- // Writes a single byte or word of data in the code stream. Used for
- // inline tables, e.g., jump-tables.
- void db(uint8_t data);
- void dd(uint32_t data);
-
- int pc_offset() const { return pc_ - buffer_; }
-
- // Check if there is less than kGap bytes available in the buffer.
- // If this is the case, we need to grow the buffer before emitting
- // an instruction or relocation information.
- inline bool overflow() const { return pc_ >= reloc_info_writer.pos() - kGap; }
-
- // Get the number of bytes available in the buffer.
- inline int available_space() const { return reloc_info_writer.pos() - pc_; }
-
- static bool IsNop(Address addr) { return *addr == 0x90; }
-
- PositionsRecorder* positions_recorder() { return &positions_recorder_; }
-
- int relocation_writer_size() {
- return (buffer_ + buffer_size_) - reloc_info_writer.pos();
- }
-
- // Avoid overflows for displacements etc.
- static const int kMaximalBufferSize = 512*MB;
- static const int kMinimalBufferSize = 4*KB;
-
- protected:
- bool emit_debug_code() const { return emit_debug_code_; }
-
- void movsd(XMMRegister dst, const Operand& src);
- void movsd(const Operand& dst, XMMRegister src);
-
- void emit_sse_operand(XMMRegister reg, const Operand& adr);
- void emit_sse_operand(XMMRegister dst, XMMRegister src);
- void emit_sse_operand(Register dst, XMMRegister src);
-
- byte* addr_at(int pos) { return buffer_ + pos; }
-
- private:
- byte byte_at(int pos) { return buffer_[pos]; }
- void set_byte_at(int pos, byte value) { buffer_[pos] = value; }
- uint32_t long_at(int pos) {
- return *reinterpret_cast<uint32_t*>(addr_at(pos));
- }
- void long_at_put(int pos, uint32_t x) {
- *reinterpret_cast<uint32_t*>(addr_at(pos)) = x;
- }
-
- // code emission
- void GrowBuffer();
- inline void emit(uint32_t x);
- inline void emit(Handle<Object> handle);
- inline void emit(uint32_t x, RelocInfo::Mode rmode);
- inline void emit(const Immediate& x);
- inline void emit_w(const Immediate& x);
-
- // Emit the code-object-relative offset of the label's position
- inline void emit_code_relative_offset(Label* label);
-
- // instruction generation
- void emit_arith_b(int op1, int op2, Register dst, int imm8);
-
- // Emit a basic arithmetic instruction (i.e. first byte of the family is 0x81)
- // with a given destination expression and an immediate operand. It attempts
- // to use the shortest encoding possible.
- // sel specifies the /n in the modrm byte (see the Intel PRM).
- void emit_arith(int sel, Operand dst, const Immediate& x);
-
- void emit_operand(Register reg, const Operand& adr);
-
- void emit_farith(int b1, int b2, int i);
-
- // labels
- void print(Label* L);
- void bind_to(Label* L, int pos);
-
- // displacements
- inline Displacement disp_at(Label* L);
- inline void disp_at_put(Label* L, Displacement disp);
- inline void emit_disp(Label* L, Displacement::Type type);
-
- // record reloc info for current pc_
- void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
-
- friend class CodePatcher;
- friend class EnsureSpace;
-
- // Code buffer:
- // The buffer into which code and relocation info are generated.
- byte* buffer_;
- int buffer_size_;
- // True if the assembler owns the buffer, false if buffer is external.
- bool own_buffer_;
-
- // code generation
- byte* pc_; // the program counter; moves forward
- RelocInfoWriter reloc_info_writer;
-
- // push-pop elimination
- byte* last_pc_;
-
- PositionsRecorder positions_recorder_;
-
- bool emit_debug_code_;
-
- friend class PositionsRecorder;
-};
-
-
-// Helper class that ensures that there is enough space for generating
-// instructions and relocation information. The constructor makes
-// sure that there is enough space and (in debug mode) the destructor
-// checks that we did not generate too much.
-class EnsureSpace BASE_EMBEDDED {
- public:
- explicit EnsureSpace(Assembler* assembler) : assembler_(assembler) {
- if (assembler_->overflow()) assembler_->GrowBuffer();
-#ifdef DEBUG
- space_before_ = assembler_->available_space();
-#endif
- }
-
-#ifdef DEBUG
- ~EnsureSpace() {
- int bytes_generated = space_before_ - assembler_->available_space();
- ASSERT(bytes_generated < assembler_->kGap);
- }
-#endif
-
- private:
- Assembler* assembler_;
-#ifdef DEBUG
- int space_before_;
-#endif
-};
-
-} } // namespace v8::internal
-
-#endif // V8_IA32_ASSEMBLER_IA32_H_
diff --git a/src/3rdparty/v8/src/ia32/builtins-ia32.cc b/src/3rdparty/v8/src/ia32/builtins-ia32.cc
deleted file mode 100644
index 97d2b03..0000000
--- a/src/3rdparty/v8/src/ia32/builtins-ia32.cc
+++ /dev/null
@@ -1,1596 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_IA32)
-
-#include "codegen-inl.h"
-#include "deoptimizer.h"
-#include "full-codegen.h"
-
-namespace v8 {
-namespace internal {
-
-
-#define __ ACCESS_MASM(masm)
-
-
-void Builtins::Generate_Adaptor(MacroAssembler* masm,
- CFunctionId id,
- BuiltinExtraArguments extra_args) {
- // ----------- S t a t e -------------
- // -- eax : number of arguments excluding receiver
- // -- edi : called function (only guaranteed when
- // extra_args requires it)
- // -- esi : context
- // -- esp[0] : return address
- // -- esp[4] : last argument
- // -- ...
- // -- esp[4 * argc] : first argument (argc == eax)
- // -- esp[4 * (argc +1)] : receiver
- // -----------------------------------
-
- // Insert extra arguments.
- int num_extra_args = 0;
- if (extra_args == NEEDS_CALLED_FUNCTION) {
- num_extra_args = 1;
- Register scratch = ebx;
- __ pop(scratch); // Save return address.
- __ push(edi);
- __ push(scratch); // Restore return address.
- } else {
- ASSERT(extra_args == NO_EXTRA_ARGUMENTS);
- }
-
- // JumpToExternalReference expects eax to contain the number of arguments
- // including the receiver and the extra arguments.
- __ add(Operand(eax), Immediate(num_extra_args + 1));
- __ JumpToExternalReference(ExternalReference(id, masm->isolate()));
-}
-
-
-void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax: number of arguments
- // -- edi: constructor function
- // -----------------------------------
-
- Label non_function_call;
- // Check that function is not a smi.
- __ test(edi, Immediate(kSmiTagMask));
- __ j(zero, &non_function_call);
- // Check that function is a JSFunction.
- __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
- __ j(not_equal, &non_function_call);
-
- // Jump to the function-specific construct stub.
- __ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(ebx, FieldOperand(ebx, SharedFunctionInfo::kConstructStubOffset));
- __ lea(ebx, FieldOperand(ebx, Code::kHeaderSize));
- __ jmp(Operand(ebx));
-
- // edi: called object
- // eax: number of arguments
- __ bind(&non_function_call);
- // Set expected number of arguments to zero (not changing eax).
- __ Set(ebx, Immediate(0));
- __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
- Handle<Code> arguments_adaptor =
- masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
- __ jmp(arguments_adaptor, RelocInfo::CODE_TARGET);
-}
-
-
-static void Generate_JSConstructStubHelper(MacroAssembler* masm,
- bool is_api_function,
- bool count_constructions) {
- // Should never count constructions for api objects.
- ASSERT(!is_api_function || !count_constructions);
-
- // Enter a construct frame.
- __ EnterConstructFrame();
-
- // Store a smi-tagged arguments count on the stack.
- __ SmiTag(eax);
- __ push(eax);
-
- // Push the function to invoke on the stack.
- __ push(edi);
-
- // Try to allocate the object without transitioning into C code. If any of the
- // preconditions is not met, the code bails out to the runtime call.
- Label rt_call, allocated;
- if (FLAG_inline_new) {
- Label undo_allocation;
-#ifdef ENABLE_DEBUGGER_SUPPORT
- ExternalReference debug_step_in_fp =
- ExternalReference::debug_step_in_fp_address(masm->isolate());
- __ cmp(Operand::StaticVariable(debug_step_in_fp), Immediate(0));
- __ j(not_equal, &rt_call);
-#endif
-
- // Verified that the constructor is a JSFunction.
- // Load the initial map and verify that it is in fact a map.
- // edi: constructor
- __ mov(eax, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a NULL and a Smi
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &rt_call);
- // edi: constructor
- // eax: initial map (if proven valid below)
- __ CmpObjectType(eax, MAP_TYPE, ebx);
- __ j(not_equal, &rt_call);
-
- // Check that the constructor is not constructing a JSFunction (see comments
- // in Runtime_NewObject in runtime.cc). In which case the initial map's
- // instance type would be JS_FUNCTION_TYPE.
- // edi: constructor
- // eax: initial map
- __ CmpInstanceType(eax, JS_FUNCTION_TYPE);
- __ j(equal, &rt_call);
-
- if (count_constructions) {
- Label allocate;
- // Decrease generous allocation count.
- __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ dec_b(FieldOperand(ecx, SharedFunctionInfo::kConstructionCountOffset));
- __ j(not_zero, &allocate);
-
- __ push(eax);
- __ push(edi);
-
- __ push(edi); // constructor
- // The call will replace the stub, so the countdown is only done once.
- __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
-
- __ pop(edi);
- __ pop(eax);
-
- __ bind(&allocate);
- }
-
- // Now allocate the JSObject on the heap.
- // edi: constructor
- // eax: initial map
- __ movzx_b(edi, FieldOperand(eax, Map::kInstanceSizeOffset));
- __ shl(edi, kPointerSizeLog2);
- __ AllocateInNewSpace(edi, ebx, edi, no_reg, &rt_call, NO_ALLOCATION_FLAGS);
- // Allocated the JSObject, now initialize the fields.
- // eax: initial map
- // ebx: JSObject
- // edi: start of next object
- __ mov(Operand(ebx, JSObject::kMapOffset), eax);
- Factory* factory = masm->isolate()->factory();
- __ mov(ecx, factory->empty_fixed_array());
- __ mov(Operand(ebx, JSObject::kPropertiesOffset), ecx);
- __ mov(Operand(ebx, JSObject::kElementsOffset), ecx);
- // Set extra fields in the newly allocated object.
- // eax: initial map
- // ebx: JSObject
- // edi: start of next object
- { Label loop, entry;
- // To allow for truncation.
- if (count_constructions) {
- __ mov(edx, factory->one_pointer_filler_map());
- } else {
- __ mov(edx, factory->undefined_value());
- }
- __ lea(ecx, Operand(ebx, JSObject::kHeaderSize));
- __ jmp(&entry);
- __ bind(&loop);
- __ mov(Operand(ecx, 0), edx);
- __ add(Operand(ecx), Immediate(kPointerSize));
- __ bind(&entry);
- __ cmp(ecx, Operand(edi));
- __ j(less, &loop);
- }
-
- // Add the object tag to make the JSObject real, so that we can continue and
- // jump into the continuation code at any time from now on. Any failures
- // need to undo the allocation, so that the heap is in a consistent state
- // and verifiable.
- // eax: initial map
- // ebx: JSObject
- // edi: start of next object
- __ or_(Operand(ebx), Immediate(kHeapObjectTag));
-
- // Check if a non-empty properties array is needed.
- // Allocate and initialize a FixedArray if it is.
- // eax: initial map
- // ebx: JSObject
- // edi: start of next object
- // Calculate the total number of properties described by the map.
- __ movzx_b(edx, FieldOperand(eax, Map::kUnusedPropertyFieldsOffset));
- __ movzx_b(ecx, FieldOperand(eax, Map::kPreAllocatedPropertyFieldsOffset));
- __ add(edx, Operand(ecx));
- // Calculate unused properties past the end of the in-object properties.
- __ movzx_b(ecx, FieldOperand(eax, Map::kInObjectPropertiesOffset));
- __ sub(edx, Operand(ecx));
- // Done if no extra properties are to be allocated.
- __ j(zero, &allocated);
- __ Assert(positive, "Property allocation count failed.");
-
- // Scale the number of elements by pointer size and add the header for
- // FixedArrays to the start of the next object calculation from above.
- // ebx: JSObject
- // edi: start of next object (will be start of FixedArray)
- // edx: number of elements in properties array
- __ AllocateInNewSpace(FixedArray::kHeaderSize,
- times_pointer_size,
- edx,
- edi,
- ecx,
- no_reg,
- &undo_allocation,
- RESULT_CONTAINS_TOP);
-
- // Initialize the FixedArray.
- // ebx: JSObject
- // edi: FixedArray
- // edx: number of elements
- // ecx: start of next object
- __ mov(eax, factory->fixed_array_map());
- __ mov(Operand(edi, FixedArray::kMapOffset), eax); // setup the map
- __ SmiTag(edx);
- __ mov(Operand(edi, FixedArray::kLengthOffset), edx); // and length
-
- // Initialize the fields to undefined.
- // ebx: JSObject
- // edi: FixedArray
- // ecx: start of next object
- { Label loop, entry;
- __ mov(edx, factory->undefined_value());
- __ lea(eax, Operand(edi, FixedArray::kHeaderSize));
- __ jmp(&entry);
- __ bind(&loop);
- __ mov(Operand(eax, 0), edx);
- __ add(Operand(eax), Immediate(kPointerSize));
- __ bind(&entry);
- __ cmp(eax, Operand(ecx));
- __ j(below, &loop);
- }
-
- // Store the initialized FixedArray into the properties field of
- // the JSObject
- // ebx: JSObject
- // edi: FixedArray
- __ or_(Operand(edi), Immediate(kHeapObjectTag)); // add the heap tag
- __ mov(FieldOperand(ebx, JSObject::kPropertiesOffset), edi);
-
-
- // Continue with JSObject being successfully allocated
- // ebx: JSObject
- __ jmp(&allocated);
-
- // Undo the setting of the new top so that the heap is verifiable. For
- // example, the map's unused properties potentially do not match the
- // allocated objects unused properties.
- // ebx: JSObject (previous new top)
- __ bind(&undo_allocation);
- __ UndoAllocationInNewSpace(ebx);
- }
-
- // Allocate the new receiver object using the runtime call.
- __ bind(&rt_call);
- // Must restore edi (constructor) before calling runtime.
- __ mov(edi, Operand(esp, 0));
- // edi: function (constructor)
- __ push(edi);
- __ CallRuntime(Runtime::kNewObject, 1);
- __ mov(ebx, Operand(eax)); // store result in ebx
-
- // New object allocated.
- // ebx: newly allocated object
- __ bind(&allocated);
- // Retrieve the function from the stack.
- __ pop(edi);
-
- // Retrieve smi-tagged arguments count from the stack.
- __ mov(eax, Operand(esp, 0));
- __ SmiUntag(eax);
-
- // Push the allocated receiver to the stack. We need two copies
- // because we may have to return the original one and the calling
- // conventions dictate that the called function pops the receiver.
- __ push(ebx);
- __ push(ebx);
-
- // Setup pointer to last argument.
- __ lea(ebx, Operand(ebp, StandardFrameConstants::kCallerSPOffset));
-
- // Copy arguments and receiver to the expression stack.
- Label loop, entry;
- __ mov(ecx, Operand(eax));
- __ jmp(&entry);
- __ bind(&loop);
- __ push(Operand(ebx, ecx, times_4, 0));
- __ bind(&entry);
- __ dec(ecx);
- __ j(greater_equal, &loop);
-
- // Call the function.
- if (is_api_function) {
- __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
- Handle<Code> code =
- masm->isolate()->builtins()->HandleApiCallConstruct();
- ParameterCount expected(0);
- __ InvokeCode(code, expected, expected,
- RelocInfo::CODE_TARGET, CALL_FUNCTION);
- } else {
- ParameterCount actual(eax);
- __ InvokeFunction(edi, actual, CALL_FUNCTION);
- }
-
- // Restore context from the frame.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-
- // If the result is an object (in the ECMA sense), we should get rid
- // of the receiver and use the result; see ECMA-262 section 13.2.2-7
- // on page 74.
- Label use_receiver, exit;
-
- // If the result is a smi, it is *not* an object in the ECMA sense.
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &use_receiver, not_taken);
-
- // If the type of the result (stored in its map) is less than
- // FIRST_JS_OBJECT_TYPE, it is not an object in the ECMA sense.
- __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
- __ j(above_equal, &exit, not_taken);
-
- // Throw away the result of the constructor invocation and use the
- // on-stack receiver as the result.
- __ bind(&use_receiver);
- __ mov(eax, Operand(esp, 0));
-
- // Restore the arguments count and leave the construct frame.
- __ bind(&exit);
- __ mov(ebx, Operand(esp, kPointerSize)); // get arguments count
- __ LeaveConstructFrame();
-
- // Remove caller arguments from the stack and return.
- ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
- __ pop(ecx);
- __ lea(esp, Operand(esp, ebx, times_2, 1 * kPointerSize)); // 1 ~ receiver
- __ push(ecx);
- __ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1);
- __ ret(0);
-}
-
-
-void Builtins::Generate_JSConstructStubCountdown(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, true);
-}
-
-
-void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, false);
-}
-
-
-void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, true, false);
-}
-
-
-static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
- bool is_construct) {
- // Clear the context before we push it when entering the JS frame.
- __ Set(esi, Immediate(0));
-
- // Enter an internal frame.
- __ EnterInternalFrame();
-
- // Load the previous frame pointer (ebx) to access C arguments
- __ mov(ebx, Operand(ebp, 0));
-
- // Get the function from the frame and setup the context.
- __ mov(ecx, Operand(ebx, EntryFrameConstants::kFunctionArgOffset));
- __ mov(esi, FieldOperand(ecx, JSFunction::kContextOffset));
-
- // Push the function and the receiver onto the stack.
- __ push(ecx);
- __ push(Operand(ebx, EntryFrameConstants::kReceiverArgOffset));
-
- // Load the number of arguments and setup pointer to the arguments.
- __ mov(eax, Operand(ebx, EntryFrameConstants::kArgcOffset));
- __ mov(ebx, Operand(ebx, EntryFrameConstants::kArgvOffset));
-
- // Copy arguments to the stack in a loop.
- Label loop, entry;
- __ Set(ecx, Immediate(0));
- __ jmp(&entry);
- __ bind(&loop);
- __ mov(edx, Operand(ebx, ecx, times_4, 0)); // push parameter from argv
- __ push(Operand(edx, 0)); // dereference handle
- __ inc(Operand(ecx));
- __ bind(&entry);
- __ cmp(ecx, Operand(eax));
- __ j(not_equal, &loop);
-
- // Get the function from the stack and call it.
- __ mov(edi, Operand(esp, eax, times_4, +1 * kPointerSize)); // +1 ~ receiver
-
- // Invoke the code.
- if (is_construct) {
- __ call(masm->isolate()->builtins()->JSConstructCall(),
- RelocInfo::CODE_TARGET);
- } else {
- ParameterCount actual(eax);
- __ InvokeFunction(edi, actual, CALL_FUNCTION);
- }
-
- // Exit the JS frame. Notice that this also removes the empty
- // context and the function left on the stack by the code
- // invocation.
- __ LeaveInternalFrame();
- __ ret(1 * kPointerSize); // remove receiver
-}
-
-
-void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
- Generate_JSEntryTrampolineHelper(masm, false);
-}
-
-
-void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
- Generate_JSEntryTrampolineHelper(masm, true);
-}
-
-
-void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
- // Enter an internal frame.
- __ EnterInternalFrame();
-
- // Push a copy of the function onto the stack.
- __ push(edi);
-
- __ push(edi); // Function is also the parameter to the runtime call.
- __ CallRuntime(Runtime::kLazyCompile, 1);
- __ pop(edi);
-
- // Tear down temporary frame.
- __ LeaveInternalFrame();
-
- // Do a tail-call of the compiled function.
- __ lea(ecx, FieldOperand(eax, Code::kHeaderSize));
- __ jmp(Operand(ecx));
-}
-
-
-void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
- // Enter an internal frame.
- __ EnterInternalFrame();
-
- // Push a copy of the function onto the stack.
- __ push(edi);
-
- __ push(edi); // Function is also the parameter to the runtime call.
- __ CallRuntime(Runtime::kLazyRecompile, 1);
-
- // Restore function and tear down temporary frame.
- __ pop(edi);
- __ LeaveInternalFrame();
-
- // Do a tail-call of the compiled function.
- __ lea(ecx, FieldOperand(eax, Code::kHeaderSize));
- __ jmp(Operand(ecx));
-}
-
-
-static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
- Deoptimizer::BailoutType type) {
- // Enter an internal frame.
- __ EnterInternalFrame();
-
- // Pass the function and deoptimization type to the runtime system.
- __ push(Immediate(Smi::FromInt(static_cast<int>(type))));
- __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
-
- // Tear down temporary frame.
- __ LeaveInternalFrame();
-
- // Get the full codegen state from the stack and untag it.
- __ mov(ecx, Operand(esp, 1 * kPointerSize));
- __ SmiUntag(ecx);
-
- // Switch on the state.
- NearLabel not_no_registers, not_tos_eax;
- __ cmp(ecx, FullCodeGenerator::NO_REGISTERS);
- __ j(not_equal, &not_no_registers);
- __ ret(1 * kPointerSize); // Remove state.
-
- __ bind(&not_no_registers);
- __ mov(eax, Operand(esp, 2 * kPointerSize));
- __ cmp(ecx, FullCodeGenerator::TOS_REG);
- __ j(not_equal, &not_tos_eax);
- __ ret(2 * kPointerSize); // Remove state, eax.
-
- __ bind(&not_tos_eax);
- __ Abort("no cases left");
-}
-
-
-void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
- Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
-}
-
-
-void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
- Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
-}
-
-
-void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
- // TODO(kasperl): Do we need to save/restore the XMM registers too?
-
- // For now, we are relying on the fact that Runtime::NotifyOSR
- // doesn't do any garbage collection which allows us to save/restore
- // the registers without worrying about which of them contain
- // pointers. This seems a bit fragile.
- __ pushad();
- __ EnterInternalFrame();
- __ CallRuntime(Runtime::kNotifyOSR, 0);
- __ LeaveInternalFrame();
- __ popad();
- __ ret(0);
-}
-
-
-void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
- Factory* factory = masm->isolate()->factory();
-
- // 1. Make sure we have at least one argument.
- { Label done;
- __ test(eax, Operand(eax));
- __ j(not_zero, &done, taken);
- __ pop(ebx);
- __ push(Immediate(factory->undefined_value()));
- __ push(ebx);
- __ inc(eax);
- __ bind(&done);
- }
-
- // 2. Get the function to call (passed as receiver) from the stack, check
- // if it is a function.
- Label non_function;
- // 1 ~ return address.
- __ mov(edi, Operand(esp, eax, times_4, 1 * kPointerSize));
- __ test(edi, Immediate(kSmiTagMask));
- __ j(zero, &non_function, not_taken);
- __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
- __ j(not_equal, &non_function, not_taken);
-
-
- // 3a. Patch the first argument if necessary when calling a function.
- Label shift_arguments;
- { Label convert_to_object, use_global_receiver, patch_receiver;
- // Change context eagerly in case we need the global receiver.
- __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
-
- // Do not transform the receiver for strict mode functions.
- __ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ test_b(FieldOperand(ebx, SharedFunctionInfo::kStrictModeByteOffset),
- 1 << SharedFunctionInfo::kStrictModeBitWithinByte);
- __ j(not_equal, &shift_arguments);
-
- // Compute the receiver in non-strict mode.
- __ mov(ebx, Operand(esp, eax, times_4, 0)); // First argument.
- __ test(ebx, Immediate(kSmiTagMask));
- __ j(zero, &convert_to_object);
-
- __ cmp(ebx, factory->null_value());
- __ j(equal, &use_global_receiver);
- __ cmp(ebx, factory->undefined_value());
- __ j(equal, &use_global_receiver);
-
- // We don't use IsObjectJSObjectType here because we jump on success.
- __ mov(ecx, FieldOperand(ebx, HeapObject::kMapOffset));
- __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
- __ sub(Operand(ecx), Immediate(FIRST_JS_OBJECT_TYPE));
- __ cmp(ecx, LAST_JS_OBJECT_TYPE - FIRST_JS_OBJECT_TYPE);
- __ j(below_equal, &shift_arguments);
-
- __ bind(&convert_to_object);
- __ EnterInternalFrame(); // In order to preserve argument count.
- __ SmiTag(eax);
- __ push(eax);
-
- __ push(ebx);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ mov(ebx, eax);
-
- __ pop(eax);
- __ SmiUntag(eax);
- __ LeaveInternalFrame();
- // Restore the function to edi.
- __ mov(edi, Operand(esp, eax, times_4, 1 * kPointerSize));
- __ jmp(&patch_receiver);
-
- // Use the global receiver object from the called function as the
- // receiver.
- __ bind(&use_global_receiver);
- const int kGlobalIndex =
- Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
- __ mov(ebx, FieldOperand(esi, kGlobalIndex));
- __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalContextOffset));
- __ mov(ebx, FieldOperand(ebx, kGlobalIndex));
- __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset));
-
- __ bind(&patch_receiver);
- __ mov(Operand(esp, eax, times_4, 0), ebx);
-
- __ jmp(&shift_arguments);
- }
-
- // 3b. Patch the first argument when calling a non-function. The
- // CALL_NON_FUNCTION builtin expects the non-function callee as
- // receiver, so overwrite the first argument which will ultimately
- // become the receiver.
- __ bind(&non_function);
- __ mov(Operand(esp, eax, times_4, 0), edi);
- // Clear edi to indicate a non-function being called.
- __ Set(edi, Immediate(0));
-
- // 4. Shift arguments and return address one slot down on the stack
- // (overwriting the original receiver). Adjust argument count to make
- // the original first argument the new receiver.
- __ bind(&shift_arguments);
- { Label loop;
- __ mov(ecx, eax);
- __ bind(&loop);
- __ mov(ebx, Operand(esp, ecx, times_4, 0));
- __ mov(Operand(esp, ecx, times_4, kPointerSize), ebx);
- __ dec(ecx);
- __ j(not_sign, &loop); // While non-negative (to copy return address).
- __ pop(ebx); // Discard copy of return address.
- __ dec(eax); // One fewer argument (first argument is new receiver).
- }
-
- // 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin.
- { Label function;
- __ test(edi, Operand(edi));
- __ j(not_zero, &function, taken);
- __ Set(ebx, Immediate(0));
- __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION);
- __ jmp(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
- __ bind(&function);
- }
-
- // 5b. Get the code to call from the function and check that the number of
- // expected arguments matches what we're providing. If so, jump
- // (tail-call) to the code in register edx without checking arguments.
- __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(ebx,
- FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
- __ mov(edx, FieldOperand(edi, JSFunction::kCodeEntryOffset));
- __ SmiUntag(ebx);
- __ cmp(eax, Operand(ebx));
- __ j(not_equal,
- masm->isolate()->builtins()->ArgumentsAdaptorTrampoline());
-
- ParameterCount expected(0);
- __ InvokeCode(Operand(edx), expected, expected, JUMP_FUNCTION);
-}
-
-
-void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
- __ EnterInternalFrame();
-
- __ push(Operand(ebp, 4 * kPointerSize)); // push this
- __ push(Operand(ebp, 2 * kPointerSize)); // push arguments
- __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
-
- // Check the stack for overflow. We are not trying need to catch
- // interruptions (e.g. debug break and preemption) here, so the "real stack
- // limit" is checked.
- Label okay;
- ExternalReference real_stack_limit =
- ExternalReference::address_of_real_stack_limit(masm->isolate());
- __ mov(edi, Operand::StaticVariable(real_stack_limit));
- // Make ecx the space we have left. The stack might already be overflowed
- // here which will cause ecx to become negative.
- __ mov(ecx, Operand(esp));
- __ sub(ecx, Operand(edi));
- // Make edx the space we need for the array when it is unrolled onto the
- // stack.
- __ mov(edx, Operand(eax));
- __ shl(edx, kPointerSizeLog2 - kSmiTagSize);
- // Check if the arguments will overflow the stack.
- __ cmp(ecx, Operand(edx));
- __ j(greater, &okay, taken); // Signed comparison.
-
- // Out of stack space.
- __ push(Operand(ebp, 4 * kPointerSize)); // push this
- __ push(eax);
- __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
- __ bind(&okay);
- // End of stack check.
-
- // Push current index and limit.
- const int kLimitOffset =
- StandardFrameConstants::kExpressionsOffset - 1 * kPointerSize;
- const int kIndexOffset = kLimitOffset - 1 * kPointerSize;
- __ push(eax); // limit
- __ push(Immediate(0)); // index
-
- // Change context eagerly to get the right global object if
- // necessary.
- __ mov(edi, Operand(ebp, 4 * kPointerSize));
- __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
-
- // Compute the receiver.
- Label call_to_object, use_global_receiver, push_receiver;
- __ mov(ebx, Operand(ebp, 3 * kPointerSize));
-
- // Do not transform the receiver for strict mode functions.
- __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ test_b(FieldOperand(ecx, SharedFunctionInfo::kStrictModeByteOffset),
- 1 << SharedFunctionInfo::kStrictModeBitWithinByte);
- __ j(not_equal, &push_receiver);
-
- // Compute the receiver in non-strict mode.
- __ test(ebx, Immediate(kSmiTagMask));
- __ j(zero, &call_to_object);
- Factory* factory = masm->isolate()->factory();
- __ cmp(ebx, factory->null_value());
- __ j(equal, &use_global_receiver);
- __ cmp(ebx, factory->undefined_value());
- __ j(equal, &use_global_receiver);
-
- // If given receiver is already a JavaScript object then there's no
- // reason for converting it.
- // We don't use IsObjectJSObjectType here because we jump on success.
- __ mov(ecx, FieldOperand(ebx, HeapObject::kMapOffset));
- __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
- __ sub(Operand(ecx), Immediate(FIRST_JS_OBJECT_TYPE));
- __ cmp(ecx, LAST_JS_OBJECT_TYPE - FIRST_JS_OBJECT_TYPE);
- __ j(below_equal, &push_receiver);
-
- // Convert the receiver to an object.
- __ bind(&call_to_object);
- __ push(ebx);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ mov(ebx, Operand(eax));
- __ jmp(&push_receiver);
-
- // Use the current global receiver object as the receiver.
- __ bind(&use_global_receiver);
- const int kGlobalOffset =
- Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
- __ mov(ebx, FieldOperand(esi, kGlobalOffset));
- __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalContextOffset));
- __ mov(ebx, FieldOperand(ebx, kGlobalOffset));
- __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset));
-
- // Push the receiver.
- __ bind(&push_receiver);
- __ push(ebx);
-
- // Copy all arguments from the array to the stack.
- Label entry, loop;
- __ mov(eax, Operand(ebp, kIndexOffset));
- __ jmp(&entry);
- __ bind(&loop);
- __ mov(edx, Operand(ebp, 2 * kPointerSize)); // load arguments
-
- // Use inline caching to speed up access to arguments.
- Handle<Code> ic = masm->isolate()->builtins()->KeyedLoadIC_Initialize();
- __ call(ic, RelocInfo::CODE_TARGET);
- // It is important that we do not have a test instruction after the
- // call. A test instruction after the call is used to indicate that
- // we have generated an inline version of the keyed load. In this
- // case, we know that we are not generating a test instruction next.
-
- // Push the nth argument.
- __ push(eax);
-
- // Update the index on the stack and in register eax.
- __ mov(eax, Operand(ebp, kIndexOffset));
- __ add(Operand(eax), Immediate(1 << kSmiTagSize));
- __ mov(Operand(ebp, kIndexOffset), eax);
-
- __ bind(&entry);
- __ cmp(eax, Operand(ebp, kLimitOffset));
- __ j(not_equal, &loop);
-
- // Invoke the function.
- ParameterCount actual(eax);
- __ SmiUntag(eax);
- __ mov(edi, Operand(ebp, 4 * kPointerSize));
- __ InvokeFunction(edi, actual, CALL_FUNCTION);
-
- __ LeaveInternalFrame();
- __ ret(3 * kPointerSize); // remove this, receiver, and arguments
-}
-
-
-// Number of empty elements to allocate for an empty array.
-static const int kPreallocatedArrayElements = 4;
-
-
-// Allocate an empty JSArray. The allocated array is put into the result
-// register. If the parameter initial_capacity is larger than zero an elements
-// backing store is allocated with this size and filled with the hole values.
-// Otherwise the elements backing store is set to the empty FixedArray.
-static void AllocateEmptyJSArray(MacroAssembler* masm,
- Register array_function,
- Register result,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- int initial_capacity,
- Label* gc_required) {
- ASSERT(initial_capacity >= 0);
-
- // Load the initial map from the array function.
- __ mov(scratch1, FieldOperand(array_function,
- JSFunction::kPrototypeOrInitialMapOffset));
-
- // Allocate the JSArray object together with space for a fixed array with the
- // requested elements.
- int size = JSArray::kSize;
- if (initial_capacity > 0) {
- size += FixedArray::SizeFor(initial_capacity);
- }
- __ AllocateInNewSpace(size,
- result,
- scratch2,
- scratch3,
- gc_required,
- TAG_OBJECT);
-
- // Allocated the JSArray. Now initialize the fields except for the elements
- // array.
- // result: JSObject
- // scratch1: initial map
- // scratch2: start of next object
- __ mov(FieldOperand(result, JSObject::kMapOffset), scratch1);
- Factory* factory = masm->isolate()->factory();
- __ mov(FieldOperand(result, JSArray::kPropertiesOffset),
- factory->empty_fixed_array());
- // Field JSArray::kElementsOffset is initialized later.
- __ mov(FieldOperand(result, JSArray::kLengthOffset), Immediate(0));
-
- // If no storage is requested for the elements array just set the empty
- // fixed array.
- if (initial_capacity == 0) {
- __ mov(FieldOperand(result, JSArray::kElementsOffset),
- factory->empty_fixed_array());
- return;
- }
-
- // Calculate the location of the elements array and set elements array member
- // of the JSArray.
- // result: JSObject
- // scratch2: start of next object
- __ lea(scratch1, Operand(result, JSArray::kSize));
- __ mov(FieldOperand(result, JSArray::kElementsOffset), scratch1);
-
- // Initialize the FixedArray and fill it with holes. FixedArray length is
- // stored as a smi.
- // result: JSObject
- // scratch1: elements array
- // scratch2: start of next object
- __ mov(FieldOperand(scratch1, FixedArray::kMapOffset),
- factory->fixed_array_map());
- __ mov(FieldOperand(scratch1, FixedArray::kLengthOffset),
- Immediate(Smi::FromInt(initial_capacity)));
-
- // Fill the FixedArray with the hole value. Inline the code if short.
- // Reconsider loop unfolding if kPreallocatedArrayElements gets changed.
- static const int kLoopUnfoldLimit = 4;
- ASSERT(kPreallocatedArrayElements <= kLoopUnfoldLimit);
- if (initial_capacity <= kLoopUnfoldLimit) {
- // Use a scratch register here to have only one reloc info when unfolding
- // the loop.
- __ mov(scratch3, factory->the_hole_value());
- for (int i = 0; i < initial_capacity; i++) {
- __ mov(FieldOperand(scratch1,
- FixedArray::kHeaderSize + i * kPointerSize),
- scratch3);
- }
- } else {
- Label loop, entry;
- __ jmp(&entry);
- __ bind(&loop);
- __ mov(Operand(scratch1, 0), factory->the_hole_value());
- __ add(Operand(scratch1), Immediate(kPointerSize));
- __ bind(&entry);
- __ cmp(scratch1, Operand(scratch2));
- __ j(below, &loop);
- }
-}
-
-
-// Allocate a JSArray with the number of elements stored in a register. The
-// register array_function holds the built-in Array function and the register
-// array_size holds the size of the array as a smi. The allocated array is put
-// into the result register and beginning and end of the FixedArray elements
-// storage is put into registers elements_array and elements_array_end (see
-// below for when that is not the case). If the parameter fill_with_holes is
-// true the allocated elements backing store is filled with the hole values
-// otherwise it is left uninitialized. When the backing store is filled the
-// register elements_array is scratched.
-static void AllocateJSArray(MacroAssembler* masm,
- Register array_function, // Array function.
- Register array_size, // As a smi, cannot be 0.
- Register result,
- Register elements_array,
- Register elements_array_end,
- Register scratch,
- bool fill_with_hole,
- Label* gc_required) {
- ASSERT(scratch.is(edi)); // rep stos destination
- ASSERT(!fill_with_hole || array_size.is(ecx)); // rep stos count
- ASSERT(!fill_with_hole || !result.is(eax)); // result is never eax
-
- // Load the initial map from the array function.
- __ mov(elements_array,
- FieldOperand(array_function,
- JSFunction::kPrototypeOrInitialMapOffset));
-
- // Allocate the JSArray object together with space for a FixedArray with the
- // requested elements.
- ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
- __ AllocateInNewSpace(JSArray::kSize + FixedArray::kHeaderSize,
- times_half_pointer_size, // array_size is a smi.
- array_size,
- result,
- elements_array_end,
- scratch,
- gc_required,
- TAG_OBJECT);
-
- // Allocated the JSArray. Now initialize the fields except for the elements
- // array.
- // result: JSObject
- // elements_array: initial map
- // elements_array_end: start of next object
- // array_size: size of array (smi)
- __ mov(FieldOperand(result, JSObject::kMapOffset), elements_array);
- Factory* factory = masm->isolate()->factory();
- __ mov(elements_array, factory->empty_fixed_array());
- __ mov(FieldOperand(result, JSArray::kPropertiesOffset), elements_array);
- // Field JSArray::kElementsOffset is initialized later.
- __ mov(FieldOperand(result, JSArray::kLengthOffset), array_size);
-
- // Calculate the location of the elements array and set elements array member
- // of the JSArray.
- // result: JSObject
- // elements_array_end: start of next object
- // array_size: size of array (smi)
- __ lea(elements_array, Operand(result, JSArray::kSize));
- __ mov(FieldOperand(result, JSArray::kElementsOffset), elements_array);
-
- // Initialize the fixed array. FixedArray length is stored as a smi.
- // result: JSObject
- // elements_array: elements array
- // elements_array_end: start of next object
- // array_size: size of array (smi)
- __ mov(FieldOperand(elements_array, FixedArray::kMapOffset),
- factory->fixed_array_map());
- // For non-empty JSArrays the length of the FixedArray and the JSArray is the
- // same.
- __ mov(FieldOperand(elements_array, FixedArray::kLengthOffset), array_size);
-
- // Fill the allocated FixedArray with the hole value if requested.
- // result: JSObject
- // elements_array: elements array
- if (fill_with_hole) {
- __ SmiUntag(array_size);
- __ lea(edi, Operand(elements_array,
- FixedArray::kHeaderSize - kHeapObjectTag));
- __ mov(eax, factory->the_hole_value());
- __ cld();
- // Do not use rep stos when filling less than kRepStosThreshold
- // words.
- const int kRepStosThreshold = 16;
- Label loop, entry, done;
- __ cmp(ecx, kRepStosThreshold);
- __ j(below, &loop); // Note: ecx > 0.
- __ rep_stos();
- __ jmp(&done);
- __ bind(&loop);
- __ stos();
- __ bind(&entry);
- __ cmp(edi, Operand(elements_array_end));
- __ j(below, &loop);
- __ bind(&done);
- }
-}
-
-
-// Create a new array for the built-in Array function. This function allocates
-// the JSArray object and the FixedArray elements array and initializes these.
-// If the Array cannot be constructed in native code the runtime is called. This
-// function assumes the following state:
-// edi: constructor (built-in Array function)
-// eax: argc
-// esp[0]: return address
-// esp[4]: last argument
-// This function is used for both construct and normal calls of Array. Whether
-// it is a construct call or not is indicated by the construct_call parameter.
-// The only difference between handling a construct call and a normal call is
-// that for a construct call the constructor function in edi needs to be
-// preserved for entering the generic code. In both cases argc in eax needs to
-// be preserved.
-static void ArrayNativeCode(MacroAssembler* masm,
- bool construct_call,
- Label* call_generic_code) {
- Label argc_one_or_more, argc_two_or_more, prepare_generic_code_call,
- empty_array, not_empty_array;
-
- // Push the constructor and argc. No need to tag argc as a smi, as there will
- // be no garbage collection with this on the stack.
- int push_count = 0;
- if (construct_call) {
- push_count++;
- __ push(edi);
- }
- push_count++;
- __ push(eax);
-
- // Check for array construction with zero arguments.
- __ test(eax, Operand(eax));
- __ j(not_zero, &argc_one_or_more);
-
- __ bind(&empty_array);
- // Handle construction of an empty array.
- AllocateEmptyJSArray(masm,
- edi,
- eax,
- ebx,
- ecx,
- edi,
- kPreallocatedArrayElements,
- &prepare_generic_code_call);
- __ IncrementCounter(masm->isolate()->counters()->array_function_native(), 1);
- __ pop(ebx);
- if (construct_call) {
- __ pop(edi);
- }
- __ ret(kPointerSize);
-
- // Check for one argument. Bail out if argument is not smi or if it is
- // negative.
- __ bind(&argc_one_or_more);
- __ cmp(eax, 1);
- __ j(not_equal, &argc_two_or_more);
- ASSERT(kSmiTag == 0);
- __ mov(ecx, Operand(esp, (push_count + 1) * kPointerSize));
- __ test(ecx, Operand(ecx));
- __ j(not_zero, &not_empty_array);
-
- // The single argument passed is zero, so we jump to the code above used to
- // handle the case of no arguments passed. To adapt the stack for that we move
- // the return address and the pushed constructor (if pushed) one stack slot up
- // thereby removing the passed argument. Argc is also on the stack - at the
- // bottom - and it needs to be changed from 1 to 0 to have the call into the
- // runtime system work in case a GC is required.
- for (int i = push_count; i > 0; i--) {
- __ mov(eax, Operand(esp, i * kPointerSize));
- __ mov(Operand(esp, (i + 1) * kPointerSize), eax);
- }
- __ add(Operand(esp), Immediate(2 * kPointerSize)); // Drop two stack slots.
- __ push(Immediate(0)); // Treat this as a call with argc of zero.
- __ jmp(&empty_array);
-
- __ bind(&not_empty_array);
- __ test(ecx, Immediate(kIntptrSignBit | kSmiTagMask));
- __ j(not_zero, &prepare_generic_code_call);
-
- // Handle construction of an empty array of a certain size. Get the size from
- // the stack and bail out if size is to large to actually allocate an elements
- // array.
- __ cmp(ecx, JSObject::kInitialMaxFastElementArray << kSmiTagSize);
- __ j(greater_equal, &prepare_generic_code_call);
-
- // edx: array_size (smi)
- // edi: constructor
- // esp[0]: argc (cannot be 0 here)
- // esp[4]: constructor (only if construct_call)
- // esp[8]: return address
- // esp[C]: argument
- AllocateJSArray(masm,
- edi,
- ecx,
- ebx,
- eax,
- edx,
- edi,
- true,
- &prepare_generic_code_call);
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->array_function_native(), 1);
- __ mov(eax, ebx);
- __ pop(ebx);
- if (construct_call) {
- __ pop(edi);
- }
- __ ret(2 * kPointerSize);
-
- // Handle construction of an array from a list of arguments.
- __ bind(&argc_two_or_more);
- ASSERT(kSmiTag == 0);
- __ SmiTag(eax); // Convet argc to a smi.
- // eax: array_size (smi)
- // edi: constructor
- // esp[0] : argc
- // esp[4]: constructor (only if construct_call)
- // esp[8] : return address
- // esp[C] : last argument
- AllocateJSArray(masm,
- edi,
- eax,
- ebx,
- ecx,
- edx,
- edi,
- false,
- &prepare_generic_code_call);
- __ IncrementCounter(counters->array_function_native(), 1);
- __ mov(eax, ebx);
- __ pop(ebx);
- if (construct_call) {
- __ pop(edi);
- }
- __ push(eax);
- // eax: JSArray
- // ebx: argc
- // edx: elements_array_end (untagged)
- // esp[0]: JSArray
- // esp[4]: return address
- // esp[8]: last argument
-
- // Location of the last argument
- __ lea(edi, Operand(esp, 2 * kPointerSize));
-
- // Location of the first array element (Parameter fill_with_holes to
- // AllocateJSArrayis false, so the FixedArray is returned in ecx).
- __ lea(edx, Operand(ecx, FixedArray::kHeaderSize - kHeapObjectTag));
-
- // ebx: argc
- // edx: location of the first array element
- // edi: location of the last argument
- // esp[0]: JSArray
- // esp[4]: return address
- // esp[8]: last argument
- Label loop, entry;
- __ mov(ecx, ebx);
- __ jmp(&entry);
- __ bind(&loop);
- __ mov(eax, Operand(edi, ecx, times_pointer_size, 0));
- __ mov(Operand(edx, 0), eax);
- __ add(Operand(edx), Immediate(kPointerSize));
- __ bind(&entry);
- __ dec(ecx);
- __ j(greater_equal, &loop);
-
- // Remove caller arguments from the stack and return.
- // ebx: argc
- // esp[0]: JSArray
- // esp[4]: return address
- // esp[8]: last argument
- __ pop(eax);
- __ pop(ecx);
- __ lea(esp, Operand(esp, ebx, times_pointer_size, 1 * kPointerSize));
- __ push(ecx);
- __ ret(0);
-
- // Restore argc and constructor before running the generic code.
- __ bind(&prepare_generic_code_call);
- __ pop(eax);
- if (construct_call) {
- __ pop(edi);
- }
- __ jmp(call_generic_code);
-}
-
-
-void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : argc
- // -- esp[0] : return address
- // -- esp[4] : last argument
- // -----------------------------------
- Label generic_array_code;
-
- // Get the Array function.
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, edi);
-
- if (FLAG_debug_code) {
- // Initial map for the builtin Array function shoud be a map.
- __ mov(ebx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a NULL and a Smi.
- __ test(ebx, Immediate(kSmiTagMask));
- __ Assert(not_zero, "Unexpected initial map for Array function");
- __ CmpObjectType(ebx, MAP_TYPE, ecx);
- __ Assert(equal, "Unexpected initial map for Array function");
- }
-
- // Run the native code for the Array function called as a normal function.
- ArrayNativeCode(masm, false, &generic_array_code);
-
- // Jump to the generic array code in case the specialized code cannot handle
- // the construction.
- __ bind(&generic_array_code);
- Handle<Code> array_code =
- masm->isolate()->builtins()->ArrayCodeGeneric();
- __ jmp(array_code, RelocInfo::CODE_TARGET);
-}
-
-
-void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : argc
- // -- edi : constructor
- // -- esp[0] : return address
- // -- esp[4] : last argument
- // -----------------------------------
- Label generic_constructor;
-
- if (FLAG_debug_code) {
- // The array construct code is only set for the global and natives
- // builtin Array functions which always have maps.
-
- // Initial map for the builtin Array function should be a map.
- __ mov(ebx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a NULL and a Smi.
- __ test(ebx, Immediate(kSmiTagMask));
- __ Assert(not_zero, "Unexpected initial map for Array function");
- __ CmpObjectType(ebx, MAP_TYPE, ecx);
- __ Assert(equal, "Unexpected initial map for Array function");
- }
-
- // Run the native code for the Array function called as constructor.
- ArrayNativeCode(masm, true, &generic_constructor);
-
- // Jump to the generic construct code in case the specialized code cannot
- // handle the construction.
- __ bind(&generic_constructor);
- Handle<Code> generic_construct_stub =
- masm->isolate()->builtins()->JSConstructStubGeneric();
- __ jmp(generic_construct_stub, RelocInfo::CODE_TARGET);
-}
-
-
-void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : number of arguments
- // -- edi : constructor function
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->string_ctor_calls(), 1);
-
- if (FLAG_debug_code) {
- __ LoadGlobalFunction(Context::STRING_FUNCTION_INDEX, ecx);
- __ cmp(edi, Operand(ecx));
- __ Assert(equal, "Unexpected String function");
- }
-
- // Load the first argument into eax and get rid of the rest
- // (including the receiver).
- Label no_arguments;
- __ test(eax, Operand(eax));
- __ j(zero, &no_arguments);
- __ mov(ebx, Operand(esp, eax, times_pointer_size, 0));
- __ pop(ecx);
- __ lea(esp, Operand(esp, eax, times_pointer_size, kPointerSize));
- __ push(ecx);
- __ mov(eax, ebx);
-
- // Lookup the argument in the number to string cache.
- Label not_cached, argument_is_string;
- NumberToStringStub::GenerateLookupNumberStringCache(
- masm,
- eax, // Input.
- ebx, // Result.
- ecx, // Scratch 1.
- edx, // Scratch 2.
- false, // Input is known to be smi?
- &not_cached);
- __ IncrementCounter(counters->string_ctor_cached_number(), 1);
- __ bind(&argument_is_string);
- // ----------- S t a t e -------------
- // -- ebx : argument converted to string
- // -- edi : constructor function
- // -- esp[0] : return address
- // -----------------------------------
-
- // Allocate a JSValue and put the tagged pointer into eax.
- Label gc_required;
- __ AllocateInNewSpace(JSValue::kSize,
- eax, // Result.
- ecx, // New allocation top (we ignore it).
- no_reg,
- &gc_required,
- TAG_OBJECT);
-
- // Set the map.
- __ LoadGlobalFunctionInitialMap(edi, ecx);
- if (FLAG_debug_code) {
- __ cmpb(FieldOperand(ecx, Map::kInstanceSizeOffset),
- JSValue::kSize >> kPointerSizeLog2);
- __ Assert(equal, "Unexpected string wrapper instance size");
- __ cmpb(FieldOperand(ecx, Map::kUnusedPropertyFieldsOffset), 0);
- __ Assert(equal, "Unexpected unused properties of string wrapper");
- }
- __ mov(FieldOperand(eax, HeapObject::kMapOffset), ecx);
-
- // Set properties and elements.
- Factory* factory = masm->isolate()->factory();
- __ Set(ecx, Immediate(factory->empty_fixed_array()));
- __ mov(FieldOperand(eax, JSObject::kPropertiesOffset), ecx);
- __ mov(FieldOperand(eax, JSObject::kElementsOffset), ecx);
-
- // Set the value.
- __ mov(FieldOperand(eax, JSValue::kValueOffset), ebx);
-
- // Ensure the object is fully initialized.
- STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
-
- // We're done. Return.
- __ ret(0);
-
- // The argument was not found in the number to string cache. Check
- // if it's a string already before calling the conversion builtin.
- Label convert_argument;
- __ bind(&not_cached);
- STATIC_ASSERT(kSmiTag == 0);
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &convert_argument);
- Condition is_string = masm->IsObjectStringType(eax, ebx, ecx);
- __ j(NegateCondition(is_string), &convert_argument);
- __ mov(ebx, eax);
- __ IncrementCounter(counters->string_ctor_string_value(), 1);
- __ jmp(&argument_is_string);
-
- // Invoke the conversion builtin and put the result into ebx.
- __ bind(&convert_argument);
- __ IncrementCounter(counters->string_ctor_conversions(), 1);
- __ EnterInternalFrame();
- __ push(edi); // Preserve the function.
- __ push(eax);
- __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
- __ pop(edi);
- __ LeaveInternalFrame();
- __ mov(ebx, eax);
- __ jmp(&argument_is_string);
-
- // Load the empty string into ebx, remove the receiver from the
- // stack, and jump back to the case where the argument is a string.
- __ bind(&no_arguments);
- __ Set(ebx, Immediate(factory->empty_string()));
- __ pop(ecx);
- __ lea(esp, Operand(esp, kPointerSize));
- __ push(ecx);
- __ jmp(&argument_is_string);
-
- // At this point the argument is already a string. Call runtime to
- // create a string wrapper.
- __ bind(&gc_required);
- __ IncrementCounter(counters->string_ctor_gc_required(), 1);
- __ EnterInternalFrame();
- __ push(ebx);
- __ CallRuntime(Runtime::kNewStringWrapper, 1);
- __ LeaveInternalFrame();
- __ ret(0);
-}
-
-
-static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
- __ push(ebp);
- __ mov(ebp, Operand(esp));
-
- // Store the arguments adaptor context sentinel.
- __ push(Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-
- // Push the function on the stack.
- __ push(edi);
-
- // Preserve the number of arguments on the stack. Must preserve both
- // eax and ebx because these registers are used when copying the
- // arguments and the receiver.
- ASSERT(kSmiTagSize == 1);
- __ lea(ecx, Operand(eax, eax, times_1, kSmiTag));
- __ push(ecx);
-}
-
-
-static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
- // Retrieve the number of arguments from the stack.
- __ mov(ebx, Operand(ebp, ArgumentsAdaptorFrameConstants::kLengthOffset));
-
- // Leave the frame.
- __ leave();
-
- // Remove caller arguments from the stack.
- ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
- __ pop(ecx);
- __ lea(esp, Operand(esp, ebx, times_2, 1 * kPointerSize)); // 1 ~ receiver
- __ push(ecx);
-}
-
-
-void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : actual number of arguments
- // -- ebx : expected number of arguments
- // -- edx : code entry to call
- // -----------------------------------
-
- Label invoke, dont_adapt_arguments;
- __ IncrementCounter(masm->isolate()->counters()->arguments_adaptors(), 1);
-
- Label enough, too_few;
- __ cmp(eax, Operand(ebx));
- __ j(less, &too_few);
- __ cmp(ebx, SharedFunctionInfo::kDontAdaptArgumentsSentinel);
- __ j(equal, &dont_adapt_arguments);
-
- { // Enough parameters: Actual >= expected.
- __ bind(&enough);
- EnterArgumentsAdaptorFrame(masm);
-
- // Copy receiver and all expected arguments.
- const int offset = StandardFrameConstants::kCallerSPOffset;
- __ lea(eax, Operand(ebp, eax, times_4, offset));
- __ mov(ecx, -1); // account for receiver
-
- Label copy;
- __ bind(&copy);
- __ inc(ecx);
- __ push(Operand(eax, 0));
- __ sub(Operand(eax), Immediate(kPointerSize));
- __ cmp(ecx, Operand(ebx));
- __ j(less, &copy);
- __ jmp(&invoke);
- }
-
- { // Too few parameters: Actual < expected.
- __ bind(&too_few);
- EnterArgumentsAdaptorFrame(masm);
-
- // Copy receiver and all actual arguments.
- const int offset = StandardFrameConstants::kCallerSPOffset;
- __ lea(edi, Operand(ebp, eax, times_4, offset));
- __ mov(ecx, -1); // account for receiver
-
- Label copy;
- __ bind(&copy);
- __ inc(ecx);
- __ push(Operand(edi, 0));
- __ sub(Operand(edi), Immediate(kPointerSize));
- __ cmp(ecx, Operand(eax));
- __ j(less, &copy);
-
- // Fill remaining expected arguments with undefined values.
- Label fill;
- __ bind(&fill);
- __ inc(ecx);
- __ push(Immediate(masm->isolate()->factory()->undefined_value()));
- __ cmp(ecx, Operand(ebx));
- __ j(less, &fill);
-
- // Restore function pointer.
- __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- }
-
- // Call the entry point.
- __ bind(&invoke);
- __ call(Operand(edx));
-
- // Leave frame and return.
- LeaveArgumentsAdaptorFrame(masm);
- __ ret(0);
-
- // -------------------------------------------
- // Dont adapt arguments.
- // -------------------------------------------
- __ bind(&dont_adapt_arguments);
- __ jmp(Operand(edx));
-}
-
-
-void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
- CpuFeatures::TryForceFeatureScope scope(SSE2);
- if (!CpuFeatures::IsSupported(SSE2)) {
- __ Abort("Unreachable code: Cannot optimize without SSE2 support.");
- return;
- }
-
- // Get the loop depth of the stack guard check. This is recorded in
- // a test(eax, depth) instruction right after the call.
- Label stack_check;
- __ mov(ebx, Operand(esp, 0)); // return address
- if (FLAG_debug_code) {
- __ cmpb(Operand(ebx, 0), Assembler::kTestAlByte);
- __ Assert(equal, "test eax instruction not found after loop stack check");
- }
- __ movzx_b(ebx, Operand(ebx, 1)); // depth
-
- // Get the loop nesting level at which we allow OSR from the
- // unoptimized code and check if we want to do OSR yet. If not we
- // should perform a stack guard check so we can get interrupts while
- // waiting for on-stack replacement.
- __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ mov(ecx, FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset));
- __ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kCodeOffset));
- __ cmpb(ebx, FieldOperand(ecx, Code::kAllowOSRAtLoopNestingLevelOffset));
- __ j(greater, &stack_check);
-
- // Pass the function to optimize as the argument to the on-stack
- // replacement runtime function.
- __ EnterInternalFrame();
- __ push(eax);
- __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
- __ LeaveInternalFrame();
-
- // If the result was -1 it means that we couldn't optimize the
- // function. Just return and continue in the unoptimized version.
- NearLabel skip;
- __ cmp(Operand(eax), Immediate(Smi::FromInt(-1)));
- __ j(not_equal, &skip);
- __ ret(0);
-
- // If we decide not to perform on-stack replacement we perform a
- // stack guard check to enable interrupts.
- __ bind(&stack_check);
- NearLabel ok;
- ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit(masm->isolate());
- __ cmp(esp, Operand::StaticVariable(stack_limit));
- __ j(above_equal, &ok, taken);
- StackCheckStub stub;
- __ TailCallStub(&stub);
- __ Abort("Unreachable code: returned from tail call.");
- __ bind(&ok);
- __ ret(0);
-
- __ bind(&skip);
- // Untag the AST id and push it on the stack.
- __ SmiUntag(eax);
- __ push(eax);
-
- // Generate the code for doing the frame-to-frame translation using
- // the deoptimizer infrastructure.
- Deoptimizer::EntryGenerator generator(masm, Deoptimizer::OSR);
- generator.Generate();
-}
-
-
-#undef __
-}
-} // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_IA32
diff --git a/src/3rdparty/v8/src/ia32/code-stubs-ia32.cc b/src/3rdparty/v8/src/ia32/code-stubs-ia32.cc
deleted file mode 100644
index 78daf7c..0000000
--- a/src/3rdparty/v8/src/ia32/code-stubs-ia32.cc
+++ /dev/null
@@ -1,6549 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_IA32)
-
-#include "code-stubs.h"
-#include "bootstrapper.h"
-#include "jsregexp.h"
-#include "isolate.h"
-#include "regexp-macro-assembler.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-void ToNumberStub::Generate(MacroAssembler* masm) {
- // The ToNumber stub takes one argument in eax.
- NearLabel check_heap_number, call_builtin;
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &check_heap_number);
- __ ret(0);
-
- __ bind(&check_heap_number);
- __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
- Factory* factory = masm->isolate()->factory();
- __ cmp(Operand(ebx), Immediate(factory->heap_number_map()));
- __ j(not_equal, &call_builtin);
- __ ret(0);
-
- __ bind(&call_builtin);
- __ pop(ecx); // Pop return address.
- __ push(eax);
- __ push(ecx); // Push return address.
- __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
-}
-
-
-void FastNewClosureStub::Generate(MacroAssembler* masm) {
- // Create a new closure from the given function info in new
- // space. Set the context to the current context in esi.
- Label gc;
- __ AllocateInNewSpace(JSFunction::kSize, eax, ebx, ecx, &gc, TAG_OBJECT);
-
- // Get the function info from the stack.
- __ mov(edx, Operand(esp, 1 * kPointerSize));
-
- int map_index = strict_mode_ == kStrictMode
- ? Context::STRICT_MODE_FUNCTION_MAP_INDEX
- : Context::FUNCTION_MAP_INDEX;
-
- // Compute the function map in the current global context and set that
- // as the map of the allocated object.
- __ mov(ecx, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalContextOffset));
- __ mov(ecx, Operand(ecx, Context::SlotOffset(map_index)));
- __ mov(FieldOperand(eax, JSObject::kMapOffset), ecx);
-
- // Initialize the rest of the function. We don't have to update the
- // write barrier because the allocated object is in new space.
- Factory* factory = masm->isolate()->factory();
- __ mov(ebx, Immediate(factory->empty_fixed_array()));
- __ mov(FieldOperand(eax, JSObject::kPropertiesOffset), ebx);
- __ mov(FieldOperand(eax, JSObject::kElementsOffset), ebx);
- __ mov(FieldOperand(eax, JSFunction::kPrototypeOrInitialMapOffset),
- Immediate(factory->the_hole_value()));
- __ mov(FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset), edx);
- __ mov(FieldOperand(eax, JSFunction::kContextOffset), esi);
- __ mov(FieldOperand(eax, JSFunction::kLiteralsOffset), ebx);
- __ mov(FieldOperand(eax, JSFunction::kNextFunctionLinkOffset),
- Immediate(factory->undefined_value()));
-
- // Initialize the code pointer in the function to be the one
- // found in the shared function info object.
- __ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset));
- __ lea(edx, FieldOperand(edx, Code::kHeaderSize));
- __ mov(FieldOperand(eax, JSFunction::kCodeEntryOffset), edx);
-
- // Return and remove the on-stack parameter.
- __ ret(1 * kPointerSize);
-
- // Create a new closure through the slower runtime call.
- __ bind(&gc);
- __ pop(ecx); // Temporarily remove return address.
- __ pop(edx);
- __ push(esi);
- __ push(edx);
- __ push(Immediate(factory->false_value()));
- __ push(ecx); // Restore return address.
- __ TailCallRuntime(Runtime::kNewClosure, 3, 1);
-}
-
-
-void FastNewContextStub::Generate(MacroAssembler* masm) {
- // Try to allocate the context in new space.
- Label gc;
- int length = slots_ + Context::MIN_CONTEXT_SLOTS;
- __ AllocateInNewSpace((length * kPointerSize) + FixedArray::kHeaderSize,
- eax, ebx, ecx, &gc, TAG_OBJECT);
-
- // Get the function from the stack.
- __ mov(ecx, Operand(esp, 1 * kPointerSize));
-
- // Setup the object header.
- Factory* factory = masm->isolate()->factory();
- __ mov(FieldOperand(eax, HeapObject::kMapOffset), factory->context_map());
- __ mov(FieldOperand(eax, Context::kLengthOffset),
- Immediate(Smi::FromInt(length)));
-
- // Setup the fixed slots.
- __ Set(ebx, Immediate(0)); // Set to NULL.
- __ mov(Operand(eax, Context::SlotOffset(Context::CLOSURE_INDEX)), ecx);
- __ mov(Operand(eax, Context::SlotOffset(Context::FCONTEXT_INDEX)), eax);
- __ mov(Operand(eax, Context::SlotOffset(Context::PREVIOUS_INDEX)), ebx);
- __ mov(Operand(eax, Context::SlotOffset(Context::EXTENSION_INDEX)), ebx);
-
- // Copy the global object from the surrounding context. We go through the
- // context in the function (ecx) to match the allocation behavior we have
- // in the runtime system (see Heap::AllocateFunctionContext).
- __ mov(ebx, FieldOperand(ecx, JSFunction::kContextOffset));
- __ mov(ebx, Operand(ebx, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ mov(Operand(eax, Context::SlotOffset(Context::GLOBAL_INDEX)), ebx);
-
- // Initialize the rest of the slots to undefined.
- __ mov(ebx, factory->undefined_value());
- for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
- __ mov(Operand(eax, Context::SlotOffset(i)), ebx);
- }
-
- // Return and remove the on-stack parameter.
- __ mov(esi, Operand(eax));
- __ ret(1 * kPointerSize);
-
- // Need to collect. Call into runtime system.
- __ bind(&gc);
- __ TailCallRuntime(Runtime::kNewContext, 1, 1);
-}
-
-
-void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
- // Stack layout on entry:
- //
- // [esp + kPointerSize]: constant elements.
- // [esp + (2 * kPointerSize)]: literal index.
- // [esp + (3 * kPointerSize)]: literals array.
-
- // All sizes here are multiples of kPointerSize.
- int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0;
- int size = JSArray::kSize + elements_size;
-
- // Load boilerplate object into ecx and check if we need to create a
- // boilerplate.
- Label slow_case;
- __ mov(ecx, Operand(esp, 3 * kPointerSize));
- __ mov(eax, Operand(esp, 2 * kPointerSize));
- STATIC_ASSERT(kPointerSize == 4);
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kSmiTag == 0);
- __ mov(ecx, FieldOperand(ecx, eax, times_half_pointer_size,
- FixedArray::kHeaderSize));
- Factory* factory = masm->isolate()->factory();
- __ cmp(ecx, factory->undefined_value());
- __ j(equal, &slow_case);
-
- if (FLAG_debug_code) {
- const char* message;
- Handle<Map> expected_map;
- if (mode_ == CLONE_ELEMENTS) {
- message = "Expected (writable) fixed array";
- expected_map = factory->fixed_array_map();
- } else {
- ASSERT(mode_ == COPY_ON_WRITE_ELEMENTS);
- message = "Expected copy-on-write fixed array";
- expected_map = factory->fixed_cow_array_map();
- }
- __ push(ecx);
- __ mov(ecx, FieldOperand(ecx, JSArray::kElementsOffset));
- __ cmp(FieldOperand(ecx, HeapObject::kMapOffset), expected_map);
- __ Assert(equal, message);
- __ pop(ecx);
- }
-
- // Allocate both the JS array and the elements array in one big
- // allocation. This avoids multiple limit checks.
- __ AllocateInNewSpace(size, eax, ebx, edx, &slow_case, TAG_OBJECT);
-
- // Copy the JS array part.
- for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
- if ((i != JSArray::kElementsOffset) || (length_ == 0)) {
- __ mov(ebx, FieldOperand(ecx, i));
- __ mov(FieldOperand(eax, i), ebx);
- }
- }
-
- if (length_ > 0) {
- // Get hold of the elements array of the boilerplate and setup the
- // elements pointer in the resulting object.
- __ mov(ecx, FieldOperand(ecx, JSArray::kElementsOffset));
- __ lea(edx, Operand(eax, JSArray::kSize));
- __ mov(FieldOperand(eax, JSArray::kElementsOffset), edx);
-
- // Copy the elements array.
- for (int i = 0; i < elements_size; i += kPointerSize) {
- __ mov(ebx, FieldOperand(ecx, i));
- __ mov(FieldOperand(edx, i), ebx);
- }
- }
-
- // Return and remove the on-stack parameters.
- __ ret(3 * kPointerSize);
-
- __ bind(&slow_case);
- __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
-}
-
-
-// NOTE: The stub does not handle the inlined cases (Smis, Booleans, undefined).
-void ToBooleanStub::Generate(MacroAssembler* masm) {
- NearLabel false_result, true_result, not_string;
- __ mov(eax, Operand(esp, 1 * kPointerSize));
-
- // 'null' => false.
- Factory* factory = masm->isolate()->factory();
- __ cmp(eax, factory->null_value());
- __ j(equal, &false_result);
-
- // Get the map and type of the heap object.
- __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
- __ movzx_b(ecx, FieldOperand(edx, Map::kInstanceTypeOffset));
-
- // Undetectable => false.
- __ test_b(FieldOperand(edx, Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
- __ j(not_zero, &false_result);
-
- // JavaScript object => true.
- __ CmpInstanceType(edx, FIRST_JS_OBJECT_TYPE);
- __ j(above_equal, &true_result);
-
- // String value => false iff empty.
- __ CmpInstanceType(edx, FIRST_NONSTRING_TYPE);
- __ j(above_equal, &not_string);
- STATIC_ASSERT(kSmiTag == 0);
- __ cmp(FieldOperand(eax, String::kLengthOffset), Immediate(0));
- __ j(zero, &false_result);
- __ jmp(&true_result);
-
- __ bind(&not_string);
- // HeapNumber => false iff +0, -0, or NaN.
- __ cmp(edx, factory->heap_number_map());
- __ j(not_equal, &true_result);
- __ fldz();
- __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ FCmp();
- __ j(zero, &false_result);
- // Fall through to |true_result|.
-
- // Return 1/0 for true/false in eax.
- __ bind(&true_result);
- __ mov(eax, 1);
- __ ret(1 * kPointerSize);
- __ bind(&false_result);
- __ mov(eax, 0);
- __ ret(1 * kPointerSize);
-}
-
-
-const char* GenericBinaryOpStub::GetName() {
- if (name_ != NULL) return name_;
- const int kMaxNameLength = 100;
- name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
- kMaxNameLength);
- if (name_ == NULL) return "OOM";
- const char* op_name = Token::Name(op_);
- const char* overwrite_name;
- switch (mode_) {
- case NO_OVERWRITE: overwrite_name = "Alloc"; break;
- case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
- case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
- default: overwrite_name = "UnknownOverwrite"; break;
- }
-
- OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
- "GenericBinaryOpStub_%s_%s%s_%s%s_%s_%s",
- op_name,
- overwrite_name,
- (flags_ & NO_SMI_CODE_IN_STUB) ? "_NoSmiInStub" : "",
- args_in_registers_ ? "RegArgs" : "StackArgs",
- args_reversed_ ? "_R" : "",
- static_operands_type_.ToString(),
- BinaryOpIC::GetName(runtime_operands_type_));
- return name_;
-}
-
-
-void GenericBinaryOpStub::GenerateCall(
- MacroAssembler* masm,
- Register left,
- Register right) {
- if (!ArgsInRegistersSupported()) {
- // Pass arguments on the stack.
- __ push(left);
- __ push(right);
- } else {
- // The calling convention with registers is left in edx and right in eax.
- Register left_arg = edx;
- Register right_arg = eax;
- if (!(left.is(left_arg) && right.is(right_arg))) {
- if (left.is(right_arg) && right.is(left_arg)) {
- if (IsOperationCommutative()) {
- SetArgsReversed();
- } else {
- __ xchg(left, right);
- }
- } else if (left.is(left_arg)) {
- __ mov(right_arg, right);
- } else if (right.is(right_arg)) {
- __ mov(left_arg, left);
- } else if (left.is(right_arg)) {
- if (IsOperationCommutative()) {
- __ mov(left_arg, right);
- SetArgsReversed();
- } else {
- // Order of moves important to avoid destroying left argument.
- __ mov(left_arg, left);
- __ mov(right_arg, right);
- }
- } else if (right.is(left_arg)) {
- if (IsOperationCommutative()) {
- __ mov(right_arg, left);
- SetArgsReversed();
- } else {
- // Order of moves important to avoid destroying right argument.
- __ mov(right_arg, right);
- __ mov(left_arg, left);
- }
- } else {
- // Order of moves is not important.
- __ mov(left_arg, left);
- __ mov(right_arg, right);
- }
- }
-
- // Update flags to indicate that arguments are in registers.
- SetArgsInRegisters();
- __ IncrementCounter(
- masm->isolate()->counters()->generic_binary_stub_calls_regs(), 1);
- }
-
- // Call the stub.
- __ CallStub(this);
-}
-
-
-void GenericBinaryOpStub::GenerateCall(
- MacroAssembler* masm,
- Register left,
- Smi* right) {
- if (!ArgsInRegistersSupported()) {
- // Pass arguments on the stack.
- __ push(left);
- __ push(Immediate(right));
- } else {
- // The calling convention with registers is left in edx and right in eax.
- Register left_arg = edx;
- Register right_arg = eax;
- if (left.is(left_arg)) {
- __ mov(right_arg, Immediate(right));
- } else if (left.is(right_arg) && IsOperationCommutative()) {
- __ mov(left_arg, Immediate(right));
- SetArgsReversed();
- } else {
- // For non-commutative operations, left and right_arg might be
- // the same register. Therefore, the order of the moves is
- // important here in order to not overwrite left before moving
- // it to left_arg.
- __ mov(left_arg, left);
- __ mov(right_arg, Immediate(right));
- }
-
- // Update flags to indicate that arguments are in registers.
- SetArgsInRegisters();
- __ IncrementCounter(
- masm->isolate()->counters()->generic_binary_stub_calls_regs(), 1);
- }
-
- // Call the stub.
- __ CallStub(this);
-}
-
-
-void GenericBinaryOpStub::GenerateCall(
- MacroAssembler* masm,
- Smi* left,
- Register right) {
- if (!ArgsInRegistersSupported()) {
- // Pass arguments on the stack.
- __ push(Immediate(left));
- __ push(right);
- } else {
- // The calling convention with registers is left in edx and right in eax.
- Register left_arg = edx;
- Register right_arg = eax;
- if (right.is(right_arg)) {
- __ mov(left_arg, Immediate(left));
- } else if (right.is(left_arg) && IsOperationCommutative()) {
- __ mov(right_arg, Immediate(left));
- SetArgsReversed();
- } else {
- // For non-commutative operations, right and left_arg might be
- // the same register. Therefore, the order of the moves is
- // important here in order to not overwrite right before moving
- // it to right_arg.
- __ mov(right_arg, right);
- __ mov(left_arg, Immediate(left));
- }
- // Update flags to indicate that arguments are in registers.
- SetArgsInRegisters();
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->generic_binary_stub_calls_regs(), 1);
- }
-
- // Call the stub.
- __ CallStub(this);
-}
-
-
-class FloatingPointHelper : public AllStatic {
- public:
-
- enum ArgLocation {
- ARGS_ON_STACK,
- ARGS_IN_REGISTERS
- };
-
- // Code pattern for loading a floating point value. Input value must
- // be either a smi or a heap number object (fp value). Requirements:
- // operand in register number. Returns operand as floating point number
- // on FPU stack.
- static void LoadFloatOperand(MacroAssembler* masm, Register number);
-
- // Code pattern for loading floating point values. Input values must
- // be either smi or heap number objects (fp values). Requirements:
- // operand_1 on TOS+1 or in edx, operand_2 on TOS+2 or in eax.
- // Returns operands as floating point numbers on FPU stack.
- static void LoadFloatOperands(MacroAssembler* masm,
- Register scratch,
- ArgLocation arg_location = ARGS_ON_STACK);
-
- // Similar to LoadFloatOperand but assumes that both operands are smis.
- // Expects operands in edx, eax.
- static void LoadFloatSmis(MacroAssembler* masm, Register scratch);
-
- // Test if operands are smi or number objects (fp). Requirements:
- // operand_1 in eax, operand_2 in edx; falls through on float
- // operands, jumps to the non_float label otherwise.
- static void CheckFloatOperands(MacroAssembler* masm,
- Label* non_float,
- Register scratch);
-
- // Checks that the two floating point numbers on top of the FPU stack
- // have int32 values.
- static void CheckFloatOperandsAreInt32(MacroAssembler* masm,
- Label* non_int32);
-
- // Takes the operands in edx and eax and loads them as integers in eax
- // and ecx.
- static void LoadAsIntegers(MacroAssembler* masm,
- TypeInfo type_info,
- bool use_sse3,
- Label* operand_conversion_failure);
- static void LoadNumbersAsIntegers(MacroAssembler* masm,
- TypeInfo type_info,
- bool use_sse3,
- Label* operand_conversion_failure);
- static void LoadUnknownsAsIntegers(MacroAssembler* masm,
- bool use_sse3,
- Label* operand_conversion_failure);
-
- // Must only be called after LoadUnknownsAsIntegers. Assumes that the
- // operands are pushed on the stack, and that their conversions to int32
- // are in eax and ecx. Checks that the original numbers were in the int32
- // range.
- static void CheckLoadedIntegersWereInt32(MacroAssembler* masm,
- bool use_sse3,
- Label* not_int32);
-
- // Assumes that operands are smis or heap numbers and loads them
- // into xmm0 and xmm1. Operands are in edx and eax.
- // Leaves operands unchanged.
- static void LoadSSE2Operands(MacroAssembler* masm);
-
- // Test if operands are numbers (smi or HeapNumber objects), and load
- // them into xmm0 and xmm1 if they are. Jump to label not_numbers if
- // either operand is not a number. Operands are in edx and eax.
- // Leaves operands unchanged.
- static void LoadSSE2Operands(MacroAssembler* masm, Label* not_numbers);
-
- // Similar to LoadSSE2Operands but assumes that both operands are smis.
- // Expects operands in edx, eax.
- static void LoadSSE2Smis(MacroAssembler* masm, Register scratch);
-
- // Checks that the two floating point numbers loaded into xmm0 and xmm1
- // have int32 values.
- static void CheckSSE2OperandsAreInt32(MacroAssembler* masm,
- Label* non_int32,
- Register scratch);
-};
-
-
-void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
- // 1. Move arguments into edx, eax except for DIV and MOD, which need the
- // dividend in eax and edx free for the division. Use eax, ebx for those.
- Comment load_comment(masm, "-- Load arguments");
- Register left = edx;
- Register right = eax;
- if (op_ == Token::DIV || op_ == Token::MOD) {
- left = eax;
- right = ebx;
- if (HasArgsInRegisters()) {
- __ mov(ebx, eax);
- __ mov(eax, edx);
- }
- }
- if (!HasArgsInRegisters()) {
- __ mov(right, Operand(esp, 1 * kPointerSize));
- __ mov(left, Operand(esp, 2 * kPointerSize));
- }
-
- if (static_operands_type_.IsSmi()) {
- if (FLAG_debug_code) {
- __ AbortIfNotSmi(left);
- __ AbortIfNotSmi(right);
- }
- if (op_ == Token::BIT_OR) {
- __ or_(right, Operand(left));
- GenerateReturn(masm);
- return;
- } else if (op_ == Token::BIT_AND) {
- __ and_(right, Operand(left));
- GenerateReturn(masm);
- return;
- } else if (op_ == Token::BIT_XOR) {
- __ xor_(right, Operand(left));
- GenerateReturn(masm);
- return;
- }
- }
-
- // 2. Prepare the smi check of both operands by oring them together.
- Comment smi_check_comment(masm, "-- Smi check arguments");
- Label not_smis;
- Register combined = ecx;
- ASSERT(!left.is(combined) && !right.is(combined));
- switch (op_) {
- case Token::BIT_OR:
- // Perform the operation into eax and smi check the result. Preserve
- // eax in case the result is not a smi.
- ASSERT(!left.is(ecx) && !right.is(ecx));
- __ mov(ecx, right);
- __ or_(right, Operand(left)); // Bitwise or is commutative.
- combined = right;
- break;
-
- case Token::BIT_XOR:
- case Token::BIT_AND:
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- case Token::MOD:
- __ mov(combined, right);
- __ or_(combined, Operand(left));
- break;
-
- case Token::SHL:
- case Token::SAR:
- case Token::SHR:
- // Move the right operand into ecx for the shift operation, use eax
- // for the smi check register.
- ASSERT(!left.is(ecx) && !right.is(ecx));
- __ mov(ecx, right);
- __ or_(right, Operand(left));
- combined = right;
- break;
-
- default:
- break;
- }
-
- // 3. Perform the smi check of the operands.
- STATIC_ASSERT(kSmiTag == 0); // Adjust zero check if not the case.
- __ test(combined, Immediate(kSmiTagMask));
- __ j(not_zero, &not_smis, not_taken);
-
- // 4. Operands are both smis, perform the operation leaving the result in
- // eax and check the result if necessary.
- Comment perform_smi(masm, "-- Perform smi operation");
- Label use_fp_on_smis;
- switch (op_) {
- case Token::BIT_OR:
- // Nothing to do.
- break;
-
- case Token::BIT_XOR:
- ASSERT(right.is(eax));
- __ xor_(right, Operand(left)); // Bitwise xor is commutative.
- break;
-
- case Token::BIT_AND:
- ASSERT(right.is(eax));
- __ and_(right, Operand(left)); // Bitwise and is commutative.
- break;
-
- case Token::SHL:
- // Remove tags from operands (but keep sign).
- __ SmiUntag(left);
- __ SmiUntag(ecx);
- // Perform the operation.
- __ shl_cl(left);
- // Check that the *signed* result fits in a smi.
- __ cmp(left, 0xc0000000);
- __ j(sign, &use_fp_on_smis, not_taken);
- // Tag the result and store it in register eax.
- __ SmiTag(left);
- __ mov(eax, left);
- break;
-
- case Token::SAR:
- // Remove tags from operands (but keep sign).
- __ SmiUntag(left);
- __ SmiUntag(ecx);
- // Perform the operation.
- __ sar_cl(left);
- // Tag the result and store it in register eax.
- __ SmiTag(left);
- __ mov(eax, left);
- break;
-
- case Token::SHR:
- // Remove tags from operands (but keep sign).
- __ SmiUntag(left);
- __ SmiUntag(ecx);
- // Perform the operation.
- __ shr_cl(left);
- // Check that the *unsigned* result fits in a smi.
- // Neither of the two high-order bits can be set:
- // - 0x80000000: high bit would be lost when smi tagging.
- // - 0x40000000: this number would convert to negative when
- // Smi tagging these two cases can only happen with shifts
- // by 0 or 1 when handed a valid smi.
- __ test(left, Immediate(0xc0000000));
- __ j(not_zero, slow, not_taken);
- // Tag the result and store it in register eax.
- __ SmiTag(left);
- __ mov(eax, left);
- break;
-
- case Token::ADD:
- ASSERT(right.is(eax));
- __ add(right, Operand(left)); // Addition is commutative.
- __ j(overflow, &use_fp_on_smis, not_taken);
- break;
-
- case Token::SUB:
- __ sub(left, Operand(right));
- __ j(overflow, &use_fp_on_smis, not_taken);
- __ mov(eax, left);
- break;
-
- case Token::MUL:
- // If the smi tag is 0 we can just leave the tag on one operand.
- STATIC_ASSERT(kSmiTag == 0); // Adjust code below if not the case.
- // We can't revert the multiplication if the result is not a smi
- // so save the right operand.
- __ mov(ebx, right);
- // Remove tag from one of the operands (but keep sign).
- __ SmiUntag(right);
- // Do multiplication.
- __ imul(right, Operand(left)); // Multiplication is commutative.
- __ j(overflow, &use_fp_on_smis, not_taken);
- // Check for negative zero result. Use combined = left | right.
- __ NegativeZeroTest(right, combined, &use_fp_on_smis);
- break;
-
- case Token::DIV:
- // We can't revert the division if the result is not a smi so
- // save the left operand.
- __ mov(edi, left);
- // Check for 0 divisor.
- __ test(right, Operand(right));
- __ j(zero, &use_fp_on_smis, not_taken);
- // Sign extend left into edx:eax.
- ASSERT(left.is(eax));
- __ cdq();
- // Divide edx:eax by right.
- __ idiv(right);
- // Check for the corner case of dividing the most negative smi by
- // -1. We cannot use the overflow flag, since it is not set by idiv
- // instruction.
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
- __ cmp(eax, 0x40000000);
- __ j(equal, &use_fp_on_smis);
- // Check for negative zero result. Use combined = left | right.
- __ NegativeZeroTest(eax, combined, &use_fp_on_smis);
- // Check that the remainder is zero.
- __ test(edx, Operand(edx));
- __ j(not_zero, &use_fp_on_smis);
- // Tag the result and store it in register eax.
- __ SmiTag(eax);
- break;
-
- case Token::MOD:
- // Check for 0 divisor.
- __ test(right, Operand(right));
- __ j(zero, &not_smis, not_taken);
-
- // Sign extend left into edx:eax.
- ASSERT(left.is(eax));
- __ cdq();
- // Divide edx:eax by right.
- __ idiv(right);
- // Check for negative zero result. Use combined = left | right.
- __ NegativeZeroTest(edx, combined, slow);
- // Move remainder to register eax.
- __ mov(eax, edx);
- break;
-
- default:
- UNREACHABLE();
- }
-
- // 5. Emit return of result in eax.
- GenerateReturn(masm);
-
- // 6. For some operations emit inline code to perform floating point
- // operations on known smis (e.g., if the result of the operation
- // overflowed the smi range).
- switch (op_) {
- case Token::SHL: {
- Comment perform_float(masm, "-- Perform float operation on smis");
- __ bind(&use_fp_on_smis);
- if (runtime_operands_type_ != BinaryOpIC::UNINIT_OR_SMI) {
- // Result we want is in left == edx, so we can put the allocated heap
- // number in eax.
- __ AllocateHeapNumber(eax, ecx, ebx, slow);
- // Store the result in the HeapNumber and return.
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- __ cvtsi2sd(xmm0, Operand(left));
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- } else {
- // It's OK to overwrite the right argument on the stack because we
- // are about to return.
- __ mov(Operand(esp, 1 * kPointerSize), left);
- __ fild_s(Operand(esp, 1 * kPointerSize));
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- }
- GenerateReturn(masm);
- } else {
- ASSERT(runtime_operands_type_ == BinaryOpIC::UNINIT_OR_SMI);
- __ jmp(slow);
- }
- break;
- }
-
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV: {
- Comment perform_float(masm, "-- Perform float operation on smis");
- __ bind(&use_fp_on_smis);
- // Restore arguments to edx, eax.
- switch (op_) {
- case Token::ADD:
- // Revert right = right + left.
- __ sub(right, Operand(left));
- break;
- case Token::SUB:
- // Revert left = left - right.
- __ add(left, Operand(right));
- break;
- case Token::MUL:
- // Right was clobbered but a copy is in ebx.
- __ mov(right, ebx);
- break;
- case Token::DIV:
- // Left was clobbered but a copy is in edi. Right is in ebx for
- // division.
- __ mov(edx, edi);
- __ mov(eax, right);
- break;
- default: UNREACHABLE();
- break;
- }
- if (runtime_operands_type_ != BinaryOpIC::UNINIT_OR_SMI) {
- __ AllocateHeapNumber(ecx, ebx, no_reg, slow);
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- FloatingPointHelper::LoadSSE2Smis(masm, ebx);
- switch (op_) {
- case Token::ADD: __ addsd(xmm0, xmm1); break;
- case Token::SUB: __ subsd(xmm0, xmm1); break;
- case Token::MUL: __ mulsd(xmm0, xmm1); break;
- case Token::DIV: __ divsd(xmm0, xmm1); break;
- default: UNREACHABLE();
- }
- __ movdbl(FieldOperand(ecx, HeapNumber::kValueOffset), xmm0);
- } else { // SSE2 not available, use FPU.
- FloatingPointHelper::LoadFloatSmis(masm, ebx);
- switch (op_) {
- case Token::ADD: __ faddp(1); break;
- case Token::SUB: __ fsubp(1); break;
- case Token::MUL: __ fmulp(1); break;
- case Token::DIV: __ fdivp(1); break;
- default: UNREACHABLE();
- }
- __ fstp_d(FieldOperand(ecx, HeapNumber::kValueOffset));
- }
- __ mov(eax, ecx);
- GenerateReturn(masm);
- } else {
- ASSERT(runtime_operands_type_ == BinaryOpIC::UNINIT_OR_SMI);
- __ jmp(slow);
- }
- break;
- }
-
- default:
- break;
- }
-
- // 7. Non-smi operands, fall out to the non-smi code with the operands in
- // edx and eax.
- Comment done_comment(masm, "-- Enter non-smi code");
- __ bind(&not_smis);
- switch (op_) {
- case Token::BIT_OR:
- case Token::SHL:
- case Token::SAR:
- case Token::SHR:
- // Right operand is saved in ecx and eax was destroyed by the smi
- // check.
- __ mov(eax, ecx);
- break;
-
- case Token::DIV:
- case Token::MOD:
- // Operands are in eax, ebx at this point.
- __ mov(edx, eax);
- __ mov(eax, ebx);
- break;
-
- default:
- break;
- }
-}
-
-
-void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
- Label call_runtime;
-
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->generic_binary_stub_calls(), 1);
-
- if (runtime_operands_type_ == BinaryOpIC::UNINIT_OR_SMI) {
- Label slow;
- if (ShouldGenerateSmiCode()) GenerateSmiCode(masm, &slow);
- __ bind(&slow);
- GenerateTypeTransition(masm);
- }
-
- // Generate fast case smi code if requested. This flag is set when the fast
- // case smi code is not generated by the caller. Generating it here will speed
- // up common operations.
- if (ShouldGenerateSmiCode()) {
- GenerateSmiCode(masm, &call_runtime);
- } else if (op_ != Token::MOD) { // MOD goes straight to runtime.
- if (!HasArgsInRegisters()) {
- GenerateLoadArguments(masm);
- }
- }
-
- // Floating point case.
- if (ShouldGenerateFPCode()) {
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV: {
- if (runtime_operands_type_ == BinaryOpIC::DEFAULT &&
- HasSmiCodeInStub()) {
- // Execution reaches this point when the first non-smi argument occurs
- // (and only if smi code is generated). This is the right moment to
- // patch to HEAP_NUMBERS state. The transition is attempted only for
- // the four basic operations. The stub stays in the DEFAULT state
- // forever for all other operations (also if smi code is skipped).
- GenerateTypeTransition(masm);
- break;
- }
-
- Label not_floats;
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- if (static_operands_type_.IsNumber()) {
- if (FLAG_debug_code) {
- // Assert at runtime that inputs are only numbers.
- __ AbortIfNotNumber(edx);
- __ AbortIfNotNumber(eax);
- }
- if (static_operands_type_.IsSmi()) {
- if (FLAG_debug_code) {
- __ AbortIfNotSmi(edx);
- __ AbortIfNotSmi(eax);
- }
- FloatingPointHelper::LoadSSE2Smis(masm, ecx);
- } else {
- FloatingPointHelper::LoadSSE2Operands(masm);
- }
- } else {
- FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
- }
-
- switch (op_) {
- case Token::ADD: __ addsd(xmm0, xmm1); break;
- case Token::SUB: __ subsd(xmm0, xmm1); break;
- case Token::MUL: __ mulsd(xmm0, xmm1); break;
- case Token::DIV: __ divsd(xmm0, xmm1); break;
- default: UNREACHABLE();
- }
- GenerateHeapResultAllocation(masm, &call_runtime);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- GenerateReturn(masm);
- } else { // SSE2 not available, use FPU.
- if (static_operands_type_.IsNumber()) {
- if (FLAG_debug_code) {
- // Assert at runtime that inputs are only numbers.
- __ AbortIfNotNumber(edx);
- __ AbortIfNotNumber(eax);
- }
- } else {
- FloatingPointHelper::CheckFloatOperands(masm, &not_floats, ebx);
- }
- FloatingPointHelper::LoadFloatOperands(
- masm,
- ecx,
- FloatingPointHelper::ARGS_IN_REGISTERS);
- switch (op_) {
- case Token::ADD: __ faddp(1); break;
- case Token::SUB: __ fsubp(1); break;
- case Token::MUL: __ fmulp(1); break;
- case Token::DIV: __ fdivp(1); break;
- default: UNREACHABLE();
- }
- Label after_alloc_failure;
- GenerateHeapResultAllocation(masm, &after_alloc_failure);
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- GenerateReturn(masm);
- __ bind(&after_alloc_failure);
- __ ffree();
- __ jmp(&call_runtime);
- }
- __ bind(&not_floats);
- if (runtime_operands_type_ == BinaryOpIC::DEFAULT &&
- !HasSmiCodeInStub()) {
- // Execution reaches this point when the first non-number argument
- // occurs (and only if smi code is skipped from the stub, otherwise
- // the patching has already been done earlier in this case branch).
- // Try patching to STRINGS for ADD operation.
- if (op_ == Token::ADD) {
- GenerateTypeTransition(masm);
- }
- }
- break;
- }
- case Token::MOD: {
- // For MOD we go directly to runtime in the non-smi case.
- break;
- }
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR: {
- Label non_smi_result;
- FloatingPointHelper::LoadAsIntegers(masm,
- static_operands_type_,
- use_sse3_,
- &call_runtime);
- switch (op_) {
- case Token::BIT_OR: __ or_(eax, Operand(ecx)); break;
- case Token::BIT_AND: __ and_(eax, Operand(ecx)); break;
- case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break;
- case Token::SAR: __ sar_cl(eax); break;
- case Token::SHL: __ shl_cl(eax); break;
- case Token::SHR: __ shr_cl(eax); break;
- default: UNREACHABLE();
- }
- if (op_ == Token::SHR) {
- // Check if result is non-negative and fits in a smi.
- __ test(eax, Immediate(0xc0000000));
- __ j(not_zero, &call_runtime);
- } else {
- // Check if result fits in a smi.
- __ cmp(eax, 0xc0000000);
- __ j(negative, &non_smi_result);
- }
- // Tag smi result and return.
- __ SmiTag(eax);
- GenerateReturn(masm);
-
- // All ops except SHR return a signed int32 that we load in
- // a HeapNumber.
- if (op_ != Token::SHR) {
- __ bind(&non_smi_result);
- // Allocate a heap number if needed.
- __ mov(ebx, Operand(eax)); // ebx: result
- NearLabel skip_allocation;
- switch (mode_) {
- case OVERWRITE_LEFT:
- case OVERWRITE_RIGHT:
- // If the operand was an object, we skip the
- // allocation of a heap number.
- __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
- 1 * kPointerSize : 2 * kPointerSize));
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &skip_allocation, not_taken);
- // Fall through!
- case NO_OVERWRITE:
- __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
- __ bind(&skip_allocation);
- break;
- default: UNREACHABLE();
- }
- // Store the result in the HeapNumber and return.
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- __ cvtsi2sd(xmm0, Operand(ebx));
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- } else {
- __ mov(Operand(esp, 1 * kPointerSize), ebx);
- __ fild_s(Operand(esp, 1 * kPointerSize));
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- }
- GenerateReturn(masm);
- }
- break;
- }
- default: UNREACHABLE(); break;
- }
- }
-
- // If all else fails, use the runtime system to get the correct
- // result. If arguments was passed in registers now place them on the
- // stack in the correct order below the return address.
-
- // Avoid hitting the string ADD code below when allocation fails in
- // the floating point code above.
- if (op_ != Token::ADD) {
- __ bind(&call_runtime);
- }
-
- if (HasArgsInRegisters()) {
- GenerateRegisterArgsPush(masm);
- }
-
- switch (op_) {
- case Token::ADD: {
- // Test for string arguments before calling runtime.
-
- // If this stub has already generated FP-specific code then the arguments
- // are already in edx, eax
- if (!ShouldGenerateFPCode() && !HasArgsInRegisters()) {
- GenerateLoadArguments(masm);
- }
-
- // Registers containing left and right operands respectively.
- Register lhs, rhs;
- if (HasArgsReversed()) {
- lhs = eax;
- rhs = edx;
- } else {
- lhs = edx;
- rhs = eax;
- }
-
- // Test if left operand is a string.
- NearLabel lhs_not_string;
- __ test(lhs, Immediate(kSmiTagMask));
- __ j(zero, &lhs_not_string);
- __ CmpObjectType(lhs, FIRST_NONSTRING_TYPE, ecx);
- __ j(above_equal, &lhs_not_string);
-
- StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
- __ TailCallStub(&string_add_left_stub);
-
- NearLabel call_runtime_with_args;
- // Left operand is not a string, test right.
- __ bind(&lhs_not_string);
- __ test(rhs, Immediate(kSmiTagMask));
- __ j(zero, &call_runtime_with_args);
- __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, ecx);
- __ j(above_equal, &call_runtime_with_args);
-
- StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
- __ TailCallStub(&string_add_right_stub);
-
- // Neither argument is a string.
- __ bind(&call_runtime);
- if (HasArgsInRegisters()) {
- GenerateRegisterArgsPush(masm);
- }
- __ bind(&call_runtime_with_args);
- __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
- break;
- }
- case Token::SUB:
- __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
- break;
- case Token::MUL:
- __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
- break;
- case Token::DIV:
- __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
- break;
- case Token::MOD:
- __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
- break;
- case Token::BIT_OR:
- __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
- break;
- case Token::BIT_AND:
- __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
- break;
- case Token::BIT_XOR:
- __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
- break;
- case Token::SAR:
- __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
- break;
- case Token::SHL:
- __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
- break;
- case Token::SHR:
- __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void GenericBinaryOpStub::GenerateHeapResultAllocation(MacroAssembler* masm,
- Label* alloc_failure) {
- Label skip_allocation;
- OverwriteMode mode = mode_;
- if (HasArgsReversed()) {
- if (mode == OVERWRITE_RIGHT) {
- mode = OVERWRITE_LEFT;
- } else if (mode == OVERWRITE_LEFT) {
- mode = OVERWRITE_RIGHT;
- }
- }
- switch (mode) {
- case OVERWRITE_LEFT: {
- // If the argument in edx is already an object, we skip the
- // allocation of a heap number.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(not_zero, &skip_allocation, not_taken);
- // Allocate a heap number for the result. Keep eax and edx intact
- // for the possible runtime call.
- __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
- // Now edx can be overwritten losing one of the arguments as we are
- // now done and will not need it any more.
- __ mov(edx, Operand(ebx));
- __ bind(&skip_allocation);
- // Use object in edx as a result holder
- __ mov(eax, Operand(edx));
- break;
- }
- case OVERWRITE_RIGHT:
- // If the argument in eax is already an object, we skip the
- // allocation of a heap number.
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &skip_allocation, not_taken);
- // Fall through!
- case NO_OVERWRITE:
- // Allocate a heap number for the result. Keep eax and edx intact
- // for the possible runtime call.
- __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
- // Now eax can be overwritten losing one of the arguments as we are
- // now done and will not need it any more.
- __ mov(eax, ebx);
- __ bind(&skip_allocation);
- break;
- default: UNREACHABLE();
- }
-}
-
-
-void GenericBinaryOpStub::GenerateLoadArguments(MacroAssembler* masm) {
- // If arguments are not passed in registers read them from the stack.
- ASSERT(!HasArgsInRegisters());
- __ mov(eax, Operand(esp, 1 * kPointerSize));
- __ mov(edx, Operand(esp, 2 * kPointerSize));
-}
-
-
-void GenericBinaryOpStub::GenerateReturn(MacroAssembler* masm) {
- // If arguments are not passed in registers remove them from the stack before
- // returning.
- if (!HasArgsInRegisters()) {
- __ ret(2 * kPointerSize); // Remove both operands
- } else {
- __ ret(0);
- }
-}
-
-
-void GenericBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
- ASSERT(HasArgsInRegisters());
- __ pop(ecx);
- if (HasArgsReversed()) {
- __ push(eax);
- __ push(edx);
- } else {
- __ push(edx);
- __ push(eax);
- }
- __ push(ecx);
-}
-
-
-void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
- // Ensure the operands are on the stack.
- if (HasArgsInRegisters()) {
- GenerateRegisterArgsPush(masm);
- }
-
- __ pop(ecx); // Save return address.
-
- // Left and right arguments are now on top.
- // Push this stub's key. Although the operation and the type info are
- // encoded into the key, the encoding is opaque, so push them too.
- __ push(Immediate(Smi::FromInt(MinorKey())));
- __ push(Immediate(Smi::FromInt(op_)));
- __ push(Immediate(Smi::FromInt(runtime_operands_type_)));
-
- __ push(ecx); // Push return address.
-
- // Patch the caller to an appropriate specialized stub and return the
- // operation result to the caller of the stub.
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kBinaryOp_Patch), masm->isolate()),
- 5,
- 1);
-}
-
-
-Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
- GenericBinaryOpStub stub(key, type_info);
- return stub.GetCode();
-}
-
-
-Handle<Code> GetTypeRecordingBinaryOpStub(int key,
- TRBinaryOpIC::TypeInfo type_info,
- TRBinaryOpIC::TypeInfo result_type_info) {
- TypeRecordingBinaryOpStub stub(key, type_info, result_type_info);
- return stub.GetCode();
-}
-
-
-void TypeRecordingBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
- __ pop(ecx); // Save return address.
- __ push(edx);
- __ push(eax);
- // Left and right arguments are now on top.
- // Push this stub's key. Although the operation and the type info are
- // encoded into the key, the encoding is opaque, so push them too.
- __ push(Immediate(Smi::FromInt(MinorKey())));
- __ push(Immediate(Smi::FromInt(op_)));
- __ push(Immediate(Smi::FromInt(operands_type_)));
-
- __ push(ecx); // Push return address.
-
- // Patch the caller to an appropriate specialized stub and return the
- // operation result to the caller of the stub.
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kTypeRecordingBinaryOp_Patch),
- masm->isolate()),
- 5,
- 1);
-}
-
-
-// Prepare for a type transition runtime call when the args are already on
-// the stack, under the return address.
-void TypeRecordingBinaryOpStub::GenerateTypeTransitionWithSavedArgs(
- MacroAssembler* masm) {
- __ pop(ecx); // Save return address.
- // Left and right arguments are already on top of the stack.
- // Push this stub's key. Although the operation and the type info are
- // encoded into the key, the encoding is opaque, so push them too.
- __ push(Immediate(Smi::FromInt(MinorKey())));
- __ push(Immediate(Smi::FromInt(op_)));
- __ push(Immediate(Smi::FromInt(operands_type_)));
-
- __ push(ecx); // Push return address.
-
- // Patch the caller to an appropriate specialized stub and return the
- // operation result to the caller of the stub.
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kTypeRecordingBinaryOp_Patch),
- masm->isolate()),
- 5,
- 1);
-}
-
-
-void TypeRecordingBinaryOpStub::Generate(MacroAssembler* masm) {
- switch (operands_type_) {
- case TRBinaryOpIC::UNINITIALIZED:
- GenerateTypeTransition(masm);
- break;
- case TRBinaryOpIC::SMI:
- GenerateSmiStub(masm);
- break;
- case TRBinaryOpIC::INT32:
- GenerateInt32Stub(masm);
- break;
- case TRBinaryOpIC::HEAP_NUMBER:
- GenerateHeapNumberStub(masm);
- break;
- case TRBinaryOpIC::ODDBALL:
- GenerateOddballStub(masm);
- break;
- case TRBinaryOpIC::STRING:
- GenerateStringStub(masm);
- break;
- case TRBinaryOpIC::GENERIC:
- GenerateGeneric(masm);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-const char* TypeRecordingBinaryOpStub::GetName() {
- if (name_ != NULL) return name_;
- const int kMaxNameLength = 100;
- name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
- kMaxNameLength);
- if (name_ == NULL) return "OOM";
- const char* op_name = Token::Name(op_);
- const char* overwrite_name;
- switch (mode_) {
- case NO_OVERWRITE: overwrite_name = "Alloc"; break;
- case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
- case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
- default: overwrite_name = "UnknownOverwrite"; break;
- }
-
- OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
- "TypeRecordingBinaryOpStub_%s_%s_%s",
- op_name,
- overwrite_name,
- TRBinaryOpIC::GetName(operands_type_));
- return name_;
-}
-
-
-void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm,
- Label* slow,
- SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
- // 1. Move arguments into edx, eax except for DIV and MOD, which need the
- // dividend in eax and edx free for the division. Use eax, ebx for those.
- Comment load_comment(masm, "-- Load arguments");
- Register left = edx;
- Register right = eax;
- if (op_ == Token::DIV || op_ == Token::MOD) {
- left = eax;
- right = ebx;
- __ mov(ebx, eax);
- __ mov(eax, edx);
- }
-
-
- // 2. Prepare the smi check of both operands by oring them together.
- Comment smi_check_comment(masm, "-- Smi check arguments");
- Label not_smis;
- Register combined = ecx;
- ASSERT(!left.is(combined) && !right.is(combined));
- switch (op_) {
- case Token::BIT_OR:
- // Perform the operation into eax and smi check the result. Preserve
- // eax in case the result is not a smi.
- ASSERT(!left.is(ecx) && !right.is(ecx));
- __ mov(ecx, right);
- __ or_(right, Operand(left)); // Bitwise or is commutative.
- combined = right;
- break;
-
- case Token::BIT_XOR:
- case Token::BIT_AND:
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- case Token::MOD:
- __ mov(combined, right);
- __ or_(combined, Operand(left));
- break;
-
- case Token::SHL:
- case Token::SAR:
- case Token::SHR:
- // Move the right operand into ecx for the shift operation, use eax
- // for the smi check register.
- ASSERT(!left.is(ecx) && !right.is(ecx));
- __ mov(ecx, right);
- __ or_(right, Operand(left));
- combined = right;
- break;
-
- default:
- break;
- }
-
- // 3. Perform the smi check of the operands.
- STATIC_ASSERT(kSmiTag == 0); // Adjust zero check if not the case.
- __ test(combined, Immediate(kSmiTagMask));
- __ j(not_zero, &not_smis, not_taken);
-
- // 4. Operands are both smis, perform the operation leaving the result in
- // eax and check the result if necessary.
- Comment perform_smi(masm, "-- Perform smi operation");
- Label use_fp_on_smis;
- switch (op_) {
- case Token::BIT_OR:
- // Nothing to do.
- break;
-
- case Token::BIT_XOR:
- ASSERT(right.is(eax));
- __ xor_(right, Operand(left)); // Bitwise xor is commutative.
- break;
-
- case Token::BIT_AND:
- ASSERT(right.is(eax));
- __ and_(right, Operand(left)); // Bitwise and is commutative.
- break;
-
- case Token::SHL:
- // Remove tags from operands (but keep sign).
- __ SmiUntag(left);
- __ SmiUntag(ecx);
- // Perform the operation.
- __ shl_cl(left);
- // Check that the *signed* result fits in a smi.
- __ cmp(left, 0xc0000000);
- __ j(sign, &use_fp_on_smis, not_taken);
- // Tag the result and store it in register eax.
- __ SmiTag(left);
- __ mov(eax, left);
- break;
-
- case Token::SAR:
- // Remove tags from operands (but keep sign).
- __ SmiUntag(left);
- __ SmiUntag(ecx);
- // Perform the operation.
- __ sar_cl(left);
- // Tag the result and store it in register eax.
- __ SmiTag(left);
- __ mov(eax, left);
- break;
-
- case Token::SHR:
- // Remove tags from operands (but keep sign).
- __ SmiUntag(left);
- __ SmiUntag(ecx);
- // Perform the operation.
- __ shr_cl(left);
- // Check that the *unsigned* result fits in a smi.
- // Neither of the two high-order bits can be set:
- // - 0x80000000: high bit would be lost when smi tagging.
- // - 0x40000000: this number would convert to negative when
- // Smi tagging these two cases can only happen with shifts
- // by 0 or 1 when handed a valid smi.
- __ test(left, Immediate(0xc0000000));
- __ j(not_zero, slow, not_taken);
- // Tag the result and store it in register eax.
- __ SmiTag(left);
- __ mov(eax, left);
- break;
-
- case Token::ADD:
- ASSERT(right.is(eax));
- __ add(right, Operand(left)); // Addition is commutative.
- __ j(overflow, &use_fp_on_smis, not_taken);
- break;
-
- case Token::SUB:
- __ sub(left, Operand(right));
- __ j(overflow, &use_fp_on_smis, not_taken);
- __ mov(eax, left);
- break;
-
- case Token::MUL:
- // If the smi tag is 0 we can just leave the tag on one operand.
- STATIC_ASSERT(kSmiTag == 0); // Adjust code below if not the case.
- // We can't revert the multiplication if the result is not a smi
- // so save the right operand.
- __ mov(ebx, right);
- // Remove tag from one of the operands (but keep sign).
- __ SmiUntag(right);
- // Do multiplication.
- __ imul(right, Operand(left)); // Multiplication is commutative.
- __ j(overflow, &use_fp_on_smis, not_taken);
- // Check for negative zero result. Use combined = left | right.
- __ NegativeZeroTest(right, combined, &use_fp_on_smis);
- break;
-
- case Token::DIV:
- // We can't revert the division if the result is not a smi so
- // save the left operand.
- __ mov(edi, left);
- // Check for 0 divisor.
- __ test(right, Operand(right));
- __ j(zero, &use_fp_on_smis, not_taken);
- // Sign extend left into edx:eax.
- ASSERT(left.is(eax));
- __ cdq();
- // Divide edx:eax by right.
- __ idiv(right);
- // Check for the corner case of dividing the most negative smi by
- // -1. We cannot use the overflow flag, since it is not set by idiv
- // instruction.
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
- __ cmp(eax, 0x40000000);
- __ j(equal, &use_fp_on_smis);
- // Check for negative zero result. Use combined = left | right.
- __ NegativeZeroTest(eax, combined, &use_fp_on_smis);
- // Check that the remainder is zero.
- __ test(edx, Operand(edx));
- __ j(not_zero, &use_fp_on_smis);
- // Tag the result and store it in register eax.
- __ SmiTag(eax);
- break;
-
- case Token::MOD:
- // Check for 0 divisor.
- __ test(right, Operand(right));
- __ j(zero, &not_smis, not_taken);
-
- // Sign extend left into edx:eax.
- ASSERT(left.is(eax));
- __ cdq();
- // Divide edx:eax by right.
- __ idiv(right);
- // Check for negative zero result. Use combined = left | right.
- __ NegativeZeroTest(edx, combined, slow);
- // Move remainder to register eax.
- __ mov(eax, edx);
- break;
-
- default:
- UNREACHABLE();
- }
-
- // 5. Emit return of result in eax. Some operations have registers pushed.
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- __ ret(0);
- break;
- case Token::MOD:
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR:
- __ ret(2 * kPointerSize);
- break;
- default:
- UNREACHABLE();
- }
-
- // 6. For some operations emit inline code to perform floating point
- // operations on known smis (e.g., if the result of the operation
- // overflowed the smi range).
- if (allow_heapnumber_results == NO_HEAPNUMBER_RESULTS) {
- __ bind(&use_fp_on_smis);
- switch (op_) {
- // Undo the effects of some operations, and some register moves.
- case Token::SHL:
- // The arguments are saved on the stack, and only used from there.
- break;
- case Token::ADD:
- // Revert right = right + left.
- __ sub(right, Operand(left));
- break;
- case Token::SUB:
- // Revert left = left - right.
- __ add(left, Operand(right));
- break;
- case Token::MUL:
- // Right was clobbered but a copy is in ebx.
- __ mov(right, ebx);
- break;
- case Token::DIV:
- // Left was clobbered but a copy is in edi. Right is in ebx for
- // division. They should be in eax, ebx for jump to not_smi.
- __ mov(eax, edi);
- break;
- default:
- // No other operators jump to use_fp_on_smis.
- break;
- }
- __ jmp(&not_smis);
- } else {
- ASSERT(allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS);
- switch (op_) {
- case Token::SHL: {
- Comment perform_float(masm, "-- Perform float operation on smis");
- __ bind(&use_fp_on_smis);
- // Result we want is in left == edx, so we can put the allocated heap
- // number in eax.
- __ AllocateHeapNumber(eax, ecx, ebx, slow);
- // Store the result in the HeapNumber and return.
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- __ cvtsi2sd(xmm0, Operand(left));
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- } else {
- // It's OK to overwrite the right argument on the stack because we
- // are about to return.
- __ mov(Operand(esp, 1 * kPointerSize), left);
- __ fild_s(Operand(esp, 1 * kPointerSize));
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- }
- __ ret(2 * kPointerSize);
- break;
- }
-
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV: {
- Comment perform_float(masm, "-- Perform float operation on smis");
- __ bind(&use_fp_on_smis);
- // Restore arguments to edx, eax.
- switch (op_) {
- case Token::ADD:
- // Revert right = right + left.
- __ sub(right, Operand(left));
- break;
- case Token::SUB:
- // Revert left = left - right.
- __ add(left, Operand(right));
- break;
- case Token::MUL:
- // Right was clobbered but a copy is in ebx.
- __ mov(right, ebx);
- break;
- case Token::DIV:
- // Left was clobbered but a copy is in edi. Right is in ebx for
- // division.
- __ mov(edx, edi);
- __ mov(eax, right);
- break;
- default: UNREACHABLE();
- break;
- }
- __ AllocateHeapNumber(ecx, ebx, no_reg, slow);
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- FloatingPointHelper::LoadSSE2Smis(masm, ebx);
- switch (op_) {
- case Token::ADD: __ addsd(xmm0, xmm1); break;
- case Token::SUB: __ subsd(xmm0, xmm1); break;
- case Token::MUL: __ mulsd(xmm0, xmm1); break;
- case Token::DIV: __ divsd(xmm0, xmm1); break;
- default: UNREACHABLE();
- }
- __ movdbl(FieldOperand(ecx, HeapNumber::kValueOffset), xmm0);
- } else { // SSE2 not available, use FPU.
- FloatingPointHelper::LoadFloatSmis(masm, ebx);
- switch (op_) {
- case Token::ADD: __ faddp(1); break;
- case Token::SUB: __ fsubp(1); break;
- case Token::MUL: __ fmulp(1); break;
- case Token::DIV: __ fdivp(1); break;
- default: UNREACHABLE();
- }
- __ fstp_d(FieldOperand(ecx, HeapNumber::kValueOffset));
- }
- __ mov(eax, ecx);
- __ ret(0);
- break;
- }
-
- default:
- break;
- }
- }
-
- // 7. Non-smi operands, fall out to the non-smi code with the operands in
- // edx and eax.
- Comment done_comment(masm, "-- Enter non-smi code");
- __ bind(&not_smis);
- switch (op_) {
- case Token::BIT_OR:
- case Token::SHL:
- case Token::SAR:
- case Token::SHR:
- // Right operand is saved in ecx and eax was destroyed by the smi
- // check.
- __ mov(eax, ecx);
- break;
-
- case Token::DIV:
- case Token::MOD:
- // Operands are in eax, ebx at this point.
- __ mov(edx, eax);
- __ mov(eax, ebx);
- break;
-
- default:
- break;
- }
-}
-
-
-void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
- Label call_runtime;
-
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- break;
- case Token::MOD:
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR:
- GenerateRegisterArgsPush(masm);
- break;
- default:
- UNREACHABLE();
- }
-
- if (result_type_ == TRBinaryOpIC::UNINITIALIZED ||
- result_type_ == TRBinaryOpIC::SMI) {
- GenerateSmiCode(masm, &call_runtime, NO_HEAPNUMBER_RESULTS);
- } else {
- GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
- }
- __ bind(&call_runtime);
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- GenerateTypeTransition(masm);
- break;
- case Token::MOD:
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR:
- GenerateTypeTransitionWithSavedArgs(masm);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void TypeRecordingBinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
- ASSERT(operands_type_ == TRBinaryOpIC::STRING);
- ASSERT(op_ == Token::ADD);
- // Try to add arguments as strings, otherwise, transition to the generic
- // TRBinaryOpIC type.
- GenerateAddStrings(masm);
- GenerateTypeTransition(masm);
-}
-
-
-void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
- Label call_runtime;
- ASSERT(operands_type_ == TRBinaryOpIC::INT32);
-
- // Floating point case.
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV: {
- Label not_floats;
- Label not_int32;
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
- FloatingPointHelper::CheckSSE2OperandsAreInt32(masm, &not_int32, ecx);
- switch (op_) {
- case Token::ADD: __ addsd(xmm0, xmm1); break;
- case Token::SUB: __ subsd(xmm0, xmm1); break;
- case Token::MUL: __ mulsd(xmm0, xmm1); break;
- case Token::DIV: __ divsd(xmm0, xmm1); break;
- default: UNREACHABLE();
- }
- // Check result type if it is currently Int32.
- if (result_type_ <= TRBinaryOpIC::INT32) {
- __ cvttsd2si(ecx, Operand(xmm0));
- __ cvtsi2sd(xmm2, Operand(ecx));
- __ ucomisd(xmm0, xmm2);
- __ j(not_zero, &not_int32);
- __ j(carry, &not_int32);
- }
- GenerateHeapResultAllocation(masm, &call_runtime);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- __ ret(0);
- } else { // SSE2 not available, use FPU.
- FloatingPointHelper::CheckFloatOperands(masm, &not_floats, ebx);
- FloatingPointHelper::LoadFloatOperands(
- masm,
- ecx,
- FloatingPointHelper::ARGS_IN_REGISTERS);
- FloatingPointHelper::CheckFloatOperandsAreInt32(masm, &not_int32);
- switch (op_) {
- case Token::ADD: __ faddp(1); break;
- case Token::SUB: __ fsubp(1); break;
- case Token::MUL: __ fmulp(1); break;
- case Token::DIV: __ fdivp(1); break;
- default: UNREACHABLE();
- }
- Label after_alloc_failure;
- GenerateHeapResultAllocation(masm, &after_alloc_failure);
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ ret(0);
- __ bind(&after_alloc_failure);
- __ ffree();
- __ jmp(&call_runtime);
- }
-
- __ bind(&not_floats);
- __ bind(&not_int32);
- GenerateTypeTransition(masm);
- break;
- }
-
- case Token::MOD: {
- // For MOD we go directly to runtime in the non-smi case.
- break;
- }
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR: {
- GenerateRegisterArgsPush(masm);
- Label not_floats;
- Label not_int32;
- Label non_smi_result;
- /* {
- CpuFeatures::Scope use_sse2(SSE2);
- FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
- FloatingPointHelper::CheckSSE2OperandsAreInt32(masm, &not_int32, ecx);
- }*/
- FloatingPointHelper::LoadUnknownsAsIntegers(masm,
- use_sse3_,
- &not_floats);
- FloatingPointHelper::CheckLoadedIntegersWereInt32(masm, use_sse3_,
- &not_int32);
- switch (op_) {
- case Token::BIT_OR: __ or_(eax, Operand(ecx)); break;
- case Token::BIT_AND: __ and_(eax, Operand(ecx)); break;
- case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break;
- case Token::SAR: __ sar_cl(eax); break;
- case Token::SHL: __ shl_cl(eax); break;
- case Token::SHR: __ shr_cl(eax); break;
- default: UNREACHABLE();
- }
- if (op_ == Token::SHR) {
- // Check if result is non-negative and fits in a smi.
- __ test(eax, Immediate(0xc0000000));
- __ j(not_zero, &call_runtime);
- } else {
- // Check if result fits in a smi.
- __ cmp(eax, 0xc0000000);
- __ j(negative, &non_smi_result);
- }
- // Tag smi result and return.
- __ SmiTag(eax);
- __ ret(2 * kPointerSize); // Drop two pushed arguments from the stack.
-
- // All ops except SHR return a signed int32 that we load in
- // a HeapNumber.
- if (op_ != Token::SHR) {
- __ bind(&non_smi_result);
- // Allocate a heap number if needed.
- __ mov(ebx, Operand(eax)); // ebx: result
- NearLabel skip_allocation;
- switch (mode_) {
- case OVERWRITE_LEFT:
- case OVERWRITE_RIGHT:
- // If the operand was an object, we skip the
- // allocation of a heap number.
- __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
- 1 * kPointerSize : 2 * kPointerSize));
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &skip_allocation, not_taken);
- // Fall through!
- case NO_OVERWRITE:
- __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
- __ bind(&skip_allocation);
- break;
- default: UNREACHABLE();
- }
- // Store the result in the HeapNumber and return.
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- __ cvtsi2sd(xmm0, Operand(ebx));
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- } else {
- __ mov(Operand(esp, 1 * kPointerSize), ebx);
- __ fild_s(Operand(esp, 1 * kPointerSize));
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- }
- __ ret(2 * kPointerSize); // Drop two pushed arguments from the stack.
- }
-
- __ bind(&not_floats);
- __ bind(&not_int32);
- GenerateTypeTransitionWithSavedArgs(masm);
- break;
- }
- default: UNREACHABLE(); break;
- }
-
- // If an allocation fails, or SHR or MOD hit a hard case,
- // use the runtime system to get the correct result.
- __ bind(&call_runtime);
-
- switch (op_) {
- case Token::ADD:
- GenerateRegisterArgsPush(masm);
- __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
- break;
- case Token::SUB:
- GenerateRegisterArgsPush(masm);
- __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
- break;
- case Token::MUL:
- GenerateRegisterArgsPush(masm);
- __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
- break;
- case Token::DIV:
- GenerateRegisterArgsPush(masm);
- __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
- break;
- case Token::MOD:
- GenerateRegisterArgsPush(masm);
- __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
- break;
- case Token::BIT_OR:
- __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
- break;
- case Token::BIT_AND:
- __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
- break;
- case Token::BIT_XOR:
- __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
- break;
- case Token::SAR:
- __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
- break;
- case Token::SHL:
- __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
- break;
- case Token::SHR:
- __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void TypeRecordingBinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
- Label call_runtime;
-
- if (op_ == Token::ADD) {
- // Handle string addition here, because it is the only operation
- // that does not do a ToNumber conversion on the operands.
- GenerateAddStrings(masm);
- }
-
- // Convert odd ball arguments to numbers.
- NearLabel check, done;
- __ cmp(edx, FACTORY->undefined_value());
- __ j(not_equal, &check);
- if (Token::IsBitOp(op_)) {
- __ xor_(edx, Operand(edx));
- } else {
- __ mov(edx, Immediate(FACTORY->nan_value()));
- }
- __ jmp(&done);
- __ bind(&check);
- __ cmp(eax, FACTORY->undefined_value());
- __ j(not_equal, &done);
- if (Token::IsBitOp(op_)) {
- __ xor_(eax, Operand(eax));
- } else {
- __ mov(eax, Immediate(FACTORY->nan_value()));
- }
- __ bind(&done);
-
- GenerateHeapNumberStub(masm);
-}
-
-
-void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
- Label call_runtime;
-
- // Floating point case.
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV: {
- Label not_floats;
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
-
- switch (op_) {
- case Token::ADD: __ addsd(xmm0, xmm1); break;
- case Token::SUB: __ subsd(xmm0, xmm1); break;
- case Token::MUL: __ mulsd(xmm0, xmm1); break;
- case Token::DIV: __ divsd(xmm0, xmm1); break;
- default: UNREACHABLE();
- }
- GenerateHeapResultAllocation(masm, &call_runtime);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- __ ret(0);
- } else { // SSE2 not available, use FPU.
- FloatingPointHelper::CheckFloatOperands(masm, &not_floats, ebx);
- FloatingPointHelper::LoadFloatOperands(
- masm,
- ecx,
- FloatingPointHelper::ARGS_IN_REGISTERS);
- switch (op_) {
- case Token::ADD: __ faddp(1); break;
- case Token::SUB: __ fsubp(1); break;
- case Token::MUL: __ fmulp(1); break;
- case Token::DIV: __ fdivp(1); break;
- default: UNREACHABLE();
- }
- Label after_alloc_failure;
- GenerateHeapResultAllocation(masm, &after_alloc_failure);
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ ret(0);
- __ bind(&after_alloc_failure);
- __ ffree();
- __ jmp(&call_runtime);
- }
-
- __ bind(&not_floats);
- GenerateTypeTransition(masm);
- break;
- }
-
- case Token::MOD: {
- // For MOD we go directly to runtime in the non-smi case.
- break;
- }
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR: {
- GenerateRegisterArgsPush(masm);
- Label not_floats;
- Label non_smi_result;
- FloatingPointHelper::LoadUnknownsAsIntegers(masm,
- use_sse3_,
- &not_floats);
- switch (op_) {
- case Token::BIT_OR: __ or_(eax, Operand(ecx)); break;
- case Token::BIT_AND: __ and_(eax, Operand(ecx)); break;
- case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break;
- case Token::SAR: __ sar_cl(eax); break;
- case Token::SHL: __ shl_cl(eax); break;
- case Token::SHR: __ shr_cl(eax); break;
- default: UNREACHABLE();
- }
- if (op_ == Token::SHR) {
- // Check if result is non-negative and fits in a smi.
- __ test(eax, Immediate(0xc0000000));
- __ j(not_zero, &call_runtime);
- } else {
- // Check if result fits in a smi.
- __ cmp(eax, 0xc0000000);
- __ j(negative, &non_smi_result);
- }
- // Tag smi result and return.
- __ SmiTag(eax);
- __ ret(2 * kPointerSize); // Drop two pushed arguments from the stack.
-
- // All ops except SHR return a signed int32 that we load in
- // a HeapNumber.
- if (op_ != Token::SHR) {
- __ bind(&non_smi_result);
- // Allocate a heap number if needed.
- __ mov(ebx, Operand(eax)); // ebx: result
- NearLabel skip_allocation;
- switch (mode_) {
- case OVERWRITE_LEFT:
- case OVERWRITE_RIGHT:
- // If the operand was an object, we skip the
- // allocation of a heap number.
- __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
- 1 * kPointerSize : 2 * kPointerSize));
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &skip_allocation, not_taken);
- // Fall through!
- case NO_OVERWRITE:
- __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
- __ bind(&skip_allocation);
- break;
- default: UNREACHABLE();
- }
- // Store the result in the HeapNumber and return.
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- __ cvtsi2sd(xmm0, Operand(ebx));
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- } else {
- __ mov(Operand(esp, 1 * kPointerSize), ebx);
- __ fild_s(Operand(esp, 1 * kPointerSize));
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- }
- __ ret(2 * kPointerSize); // Drop two pushed arguments from the stack.
- }
-
- __ bind(&not_floats);
- GenerateTypeTransitionWithSavedArgs(masm);
- break;
- }
- default: UNREACHABLE(); break;
- }
-
- // If an allocation fails, or SHR or MOD hit a hard case,
- // use the runtime system to get the correct result.
- __ bind(&call_runtime);
-
- switch (op_) {
- case Token::ADD:
- GenerateRegisterArgsPush(masm);
- __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
- break;
- case Token::SUB:
- GenerateRegisterArgsPush(masm);
- __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
- break;
- case Token::MUL:
- GenerateRegisterArgsPush(masm);
- __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
- break;
- case Token::DIV:
- GenerateRegisterArgsPush(masm);
- __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
- break;
- case Token::MOD:
- GenerateRegisterArgsPush(masm);
- __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
- break;
- case Token::BIT_OR:
- __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
- break;
- case Token::BIT_AND:
- __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
- break;
- case Token::BIT_XOR:
- __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
- break;
- case Token::SAR:
- __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
- break;
- case Token::SHL:
- __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
- break;
- case Token::SHR:
- __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
- Label call_runtime;
-
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->generic_binary_stub_calls(), 1);
-
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- break;
- case Token::MOD:
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR:
- GenerateRegisterArgsPush(masm);
- break;
- default:
- UNREACHABLE();
- }
-
- GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
-
- // Floating point case.
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV: {
- Label not_floats;
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
-
- switch (op_) {
- case Token::ADD: __ addsd(xmm0, xmm1); break;
- case Token::SUB: __ subsd(xmm0, xmm1); break;
- case Token::MUL: __ mulsd(xmm0, xmm1); break;
- case Token::DIV: __ divsd(xmm0, xmm1); break;
- default: UNREACHABLE();
- }
- GenerateHeapResultAllocation(masm, &call_runtime);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- __ ret(0);
- } else { // SSE2 not available, use FPU.
- FloatingPointHelper::CheckFloatOperands(masm, &not_floats, ebx);
- FloatingPointHelper::LoadFloatOperands(
- masm,
- ecx,
- FloatingPointHelper::ARGS_IN_REGISTERS);
- switch (op_) {
- case Token::ADD: __ faddp(1); break;
- case Token::SUB: __ fsubp(1); break;
- case Token::MUL: __ fmulp(1); break;
- case Token::DIV: __ fdivp(1); break;
- default: UNREACHABLE();
- }
- Label after_alloc_failure;
- GenerateHeapResultAllocation(masm, &after_alloc_failure);
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ ret(0);
- __ bind(&after_alloc_failure);
- __ ffree();
- __ jmp(&call_runtime);
- }
- __ bind(&not_floats);
- break;
- }
- case Token::MOD: {
- // For MOD we go directly to runtime in the non-smi case.
- break;
- }
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR: {
- Label non_smi_result;
- FloatingPointHelper::LoadUnknownsAsIntegers(masm,
- use_sse3_,
- &call_runtime);
- switch (op_) {
- case Token::BIT_OR: __ or_(eax, Operand(ecx)); break;
- case Token::BIT_AND: __ and_(eax, Operand(ecx)); break;
- case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break;
- case Token::SAR: __ sar_cl(eax); break;
- case Token::SHL: __ shl_cl(eax); break;
- case Token::SHR: __ shr_cl(eax); break;
- default: UNREACHABLE();
- }
- if (op_ == Token::SHR) {
- // Check if result is non-negative and fits in a smi.
- __ test(eax, Immediate(0xc0000000));
- __ j(not_zero, &call_runtime);
- } else {
- // Check if result fits in a smi.
- __ cmp(eax, 0xc0000000);
- __ j(negative, &non_smi_result);
- }
- // Tag smi result and return.
- __ SmiTag(eax);
- __ ret(2 * kPointerSize); // Drop the arguments from the stack.
-
- // All ops except SHR return a signed int32 that we load in
- // a HeapNumber.
- if (op_ != Token::SHR) {
- __ bind(&non_smi_result);
- // Allocate a heap number if needed.
- __ mov(ebx, Operand(eax)); // ebx: result
- NearLabel skip_allocation;
- switch (mode_) {
- case OVERWRITE_LEFT:
- case OVERWRITE_RIGHT:
- // If the operand was an object, we skip the
- // allocation of a heap number.
- __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
- 1 * kPointerSize : 2 * kPointerSize));
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &skip_allocation, not_taken);
- // Fall through!
- case NO_OVERWRITE:
- __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
- __ bind(&skip_allocation);
- break;
- default: UNREACHABLE();
- }
- // Store the result in the HeapNumber and return.
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- __ cvtsi2sd(xmm0, Operand(ebx));
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- } else {
- __ mov(Operand(esp, 1 * kPointerSize), ebx);
- __ fild_s(Operand(esp, 1 * kPointerSize));
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- }
- __ ret(2 * kPointerSize);
- }
- break;
- }
- default: UNREACHABLE(); break;
- }
-
- // If all else fails, use the runtime system to get the correct
- // result.
- __ bind(&call_runtime);
- switch (op_) {
- case Token::ADD: {
- GenerateAddStrings(masm);
- GenerateRegisterArgsPush(masm);
- __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
- break;
- }
- case Token::SUB:
- GenerateRegisterArgsPush(masm);
- __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
- break;
- case Token::MUL:
- GenerateRegisterArgsPush(masm);
- __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
- break;
- case Token::DIV:
- GenerateRegisterArgsPush(masm);
- __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
- break;
- case Token::MOD:
- __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
- break;
- case Token::BIT_OR:
- __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
- break;
- case Token::BIT_AND:
- __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
- break;
- case Token::BIT_XOR:
- __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
- break;
- case Token::SAR:
- __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
- break;
- case Token::SHL:
- __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
- break;
- case Token::SHR:
- __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void TypeRecordingBinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
- ASSERT(op_ == Token::ADD);
- NearLabel left_not_string, call_runtime;
-
- // Registers containing left and right operands respectively.
- Register left = edx;
- Register right = eax;
-
- // Test if left operand is a string.
- __ test(left, Immediate(kSmiTagMask));
- __ j(zero, &left_not_string);
- __ CmpObjectType(left, FIRST_NONSTRING_TYPE, ecx);
- __ j(above_equal, &left_not_string);
-
- StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
- GenerateRegisterArgsPush(masm);
- __ TailCallStub(&string_add_left_stub);
-
- // Left operand is not a string, test right.
- __ bind(&left_not_string);
- __ test(right, Immediate(kSmiTagMask));
- __ j(zero, &call_runtime);
- __ CmpObjectType(right, FIRST_NONSTRING_TYPE, ecx);
- __ j(above_equal, &call_runtime);
-
- StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
- GenerateRegisterArgsPush(masm);
- __ TailCallStub(&string_add_right_stub);
-
- // Neither argument is a string.
- __ bind(&call_runtime);
-}
-
-
-void TypeRecordingBinaryOpStub::GenerateHeapResultAllocation(
- MacroAssembler* masm,
- Label* alloc_failure) {
- Label skip_allocation;
- OverwriteMode mode = mode_;
- switch (mode) {
- case OVERWRITE_LEFT: {
- // If the argument in edx is already an object, we skip the
- // allocation of a heap number.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(not_zero, &skip_allocation, not_taken);
- // Allocate a heap number for the result. Keep eax and edx intact
- // for the possible runtime call.
- __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
- // Now edx can be overwritten losing one of the arguments as we are
- // now done and will not need it any more.
- __ mov(edx, Operand(ebx));
- __ bind(&skip_allocation);
- // Use object in edx as a result holder
- __ mov(eax, Operand(edx));
- break;
- }
- case OVERWRITE_RIGHT:
- // If the argument in eax is already an object, we skip the
- // allocation of a heap number.
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &skip_allocation, not_taken);
- // Fall through!
- case NO_OVERWRITE:
- // Allocate a heap number for the result. Keep eax and edx intact
- // for the possible runtime call.
- __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
- // Now eax can be overwritten losing one of the arguments as we are
- // now done and will not need it any more.
- __ mov(eax, ebx);
- __ bind(&skip_allocation);
- break;
- default: UNREACHABLE();
- }
-}
-
-
-void TypeRecordingBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
- __ pop(ecx);
- __ push(edx);
- __ push(eax);
- __ push(ecx);
-}
-
-
-void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
- // TAGGED case:
- // Input:
- // esp[4]: tagged number input argument (should be number).
- // esp[0]: return address.
- // Output:
- // eax: tagged double result.
- // UNTAGGED case:
- // Input::
- // esp[0]: return address.
- // xmm1: untagged double input argument
- // Output:
- // xmm1: untagged double result.
-
- Label runtime_call;
- Label runtime_call_clear_stack;
- Label skip_cache;
- const bool tagged = (argument_type_ == TAGGED);
- if (tagged) {
- // Test that eax is a number.
- NearLabel input_not_smi;
- NearLabel loaded;
- __ mov(eax, Operand(esp, kPointerSize));
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &input_not_smi);
- // Input is a smi. Untag and load it onto the FPU stack.
- // Then load the low and high words of the double into ebx, edx.
- STATIC_ASSERT(kSmiTagSize == 1);
- __ sar(eax, 1);
- __ sub(Operand(esp), Immediate(2 * kPointerSize));
- __ mov(Operand(esp, 0), eax);
- __ fild_s(Operand(esp, 0));
- __ fst_d(Operand(esp, 0));
- __ pop(edx);
- __ pop(ebx);
- __ jmp(&loaded);
- __ bind(&input_not_smi);
- // Check if input is a HeapNumber.
- __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
- Factory* factory = masm->isolate()->factory();
- __ cmp(Operand(ebx), Immediate(factory->heap_number_map()));
- __ j(not_equal, &runtime_call);
- // Input is a HeapNumber. Push it on the FPU stack and load its
- // low and high words into ebx, edx.
- __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ mov(edx, FieldOperand(eax, HeapNumber::kExponentOffset));
- __ mov(ebx, FieldOperand(eax, HeapNumber::kMantissaOffset));
-
- __ bind(&loaded);
- } else { // UNTAGGED.
- if (CpuFeatures::IsSupported(SSE4_1)) {
- CpuFeatures::Scope sse4_scope(SSE4_1);
- __ pextrd(Operand(edx), xmm1, 0x1); // copy xmm1[63..32] to edx.
- } else {
- __ pshufd(xmm0, xmm1, 0x1);
- __ movd(Operand(edx), xmm0);
- }
- __ movd(Operand(ebx), xmm1);
- }
-
- // ST[0] or xmm1 == double value
- // ebx = low 32 bits of double value
- // edx = high 32 bits of double value
- // Compute hash (the shifts are arithmetic):
- // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
- __ mov(ecx, ebx);
- __ xor_(ecx, Operand(edx));
- __ mov(eax, ecx);
- __ sar(eax, 16);
- __ xor_(ecx, Operand(eax));
- __ mov(eax, ecx);
- __ sar(eax, 8);
- __ xor_(ecx, Operand(eax));
- ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize));
- __ and_(Operand(ecx),
- Immediate(TranscendentalCache::SubCache::kCacheSize - 1));
-
- // ST[0] or xmm1 == double value.
- // ebx = low 32 bits of double value.
- // edx = high 32 bits of double value.
- // ecx = TranscendentalCache::hash(double value).
- ExternalReference cache_array =
- ExternalReference::transcendental_cache_array_address(masm->isolate());
- __ mov(eax, Immediate(cache_array));
- int cache_array_index =
- type_ * sizeof(masm->isolate()->transcendental_cache()->caches_[0]);
- __ mov(eax, Operand(eax, cache_array_index));
- // Eax points to the cache for the type type_.
- // If NULL, the cache hasn't been initialized yet, so go through runtime.
- __ test(eax, Operand(eax));
- __ j(zero, &runtime_call_clear_stack);
-#ifdef DEBUG
- // Check that the layout of cache elements match expectations.
- { TranscendentalCache::SubCache::Element test_elem[2];
- char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
- char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
- char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
- char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
- char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
- CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer.
- CHECK_EQ(0, elem_in0 - elem_start);
- CHECK_EQ(kIntSize, elem_in1 - elem_start);
- CHECK_EQ(2 * kIntSize, elem_out - elem_start);
- }
-#endif
- // Find the address of the ecx'th entry in the cache, i.e., &eax[ecx*12].
- __ lea(ecx, Operand(ecx, ecx, times_2, 0));
- __ lea(ecx, Operand(eax, ecx, times_4, 0));
- // Check if cache matches: Double value is stored in uint32_t[2] array.
- NearLabel cache_miss;
- __ cmp(ebx, Operand(ecx, 0));
- __ j(not_equal, &cache_miss);
- __ cmp(edx, Operand(ecx, kIntSize));
- __ j(not_equal, &cache_miss);
- // Cache hit!
- __ mov(eax, Operand(ecx, 2 * kIntSize));
- if (tagged) {
- __ fstp(0);
- __ ret(kPointerSize);
- } else { // UNTAGGED.
- __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
- __ Ret();
- }
-
- __ bind(&cache_miss);
- // Update cache with new value.
- // We are short on registers, so use no_reg as scratch.
- // This gives slightly larger code.
- if (tagged) {
- __ AllocateHeapNumber(eax, edi, no_reg, &runtime_call_clear_stack);
- } else { // UNTAGGED.
- __ AllocateHeapNumber(eax, edi, no_reg, &skip_cache);
- __ sub(Operand(esp), Immediate(kDoubleSize));
- __ movdbl(Operand(esp, 0), xmm1);
- __ fld_d(Operand(esp, 0));
- __ add(Operand(esp), Immediate(kDoubleSize));
- }
- GenerateOperation(masm);
- __ mov(Operand(ecx, 0), ebx);
- __ mov(Operand(ecx, kIntSize), edx);
- __ mov(Operand(ecx, 2 * kIntSize), eax);
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- if (tagged) {
- __ ret(kPointerSize);
- } else { // UNTAGGED.
- __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
- __ Ret();
-
- // Skip cache and return answer directly, only in untagged case.
- __ bind(&skip_cache);
- __ sub(Operand(esp), Immediate(kDoubleSize));
- __ movdbl(Operand(esp, 0), xmm1);
- __ fld_d(Operand(esp, 0));
- GenerateOperation(masm);
- __ fstp_d(Operand(esp, 0));
- __ movdbl(xmm1, Operand(esp, 0));
- __ add(Operand(esp), Immediate(kDoubleSize));
- // We return the value in xmm1 without adding it to the cache, but
- // we cause a scavenging GC so that future allocations will succeed.
- __ EnterInternalFrame();
- // Allocate an unused object bigger than a HeapNumber.
- __ push(Immediate(Smi::FromInt(2 * kDoubleSize)));
- __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
- __ LeaveInternalFrame();
- __ Ret();
- }
-
- // Call runtime, doing whatever allocation and cleanup is necessary.
- if (tagged) {
- __ bind(&runtime_call_clear_stack);
- __ fstp(0);
- __ bind(&runtime_call);
- ExternalReference runtime =
- ExternalReference(RuntimeFunction(), masm->isolate());
- __ TailCallExternalReference(runtime, 1, 1);
- } else { // UNTAGGED.
- __ bind(&runtime_call_clear_stack);
- __ bind(&runtime_call);
- __ AllocateHeapNumber(eax, edi, no_reg, &skip_cache);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm1);
- __ EnterInternalFrame();
- __ push(eax);
- __ CallRuntime(RuntimeFunction(), 1);
- __ LeaveInternalFrame();
- __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
- __ Ret();
- }
-}
-
-
-Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
- switch (type_) {
- case TranscendentalCache::SIN: return Runtime::kMath_sin;
- case TranscendentalCache::COS: return Runtime::kMath_cos;
- case TranscendentalCache::LOG: return Runtime::kMath_log;
- default:
- UNIMPLEMENTED();
- return Runtime::kAbort;
- }
-}
-
-
-void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) {
- // Only free register is edi.
- // Input value is on FP stack, and also in ebx/edx.
- // Input value is possibly in xmm1.
- // Address of result (a newly allocated HeapNumber) may be in eax.
- if (type_ == TranscendentalCache::SIN || type_ == TranscendentalCache::COS) {
- // Both fsin and fcos require arguments in the range +/-2^63 and
- // return NaN for infinities and NaN. They can share all code except
- // the actual fsin/fcos operation.
- NearLabel in_range, done;
- // If argument is outside the range -2^63..2^63, fsin/cos doesn't
- // work. We must reduce it to the appropriate range.
- __ mov(edi, edx);
- __ and_(Operand(edi), Immediate(0x7ff00000)); // Exponent only.
- int supported_exponent_limit =
- (63 + HeapNumber::kExponentBias) << HeapNumber::kExponentShift;
- __ cmp(Operand(edi), Immediate(supported_exponent_limit));
- __ j(below, &in_range, taken);
- // Check for infinity and NaN. Both return NaN for sin.
- __ cmp(Operand(edi), Immediate(0x7ff00000));
- NearLabel non_nan_result;
- __ j(not_equal, &non_nan_result, taken);
- // Input is +/-Infinity or NaN. Result is NaN.
- __ fstp(0);
- // NaN is represented by 0x7ff8000000000000.
- __ push(Immediate(0x7ff80000));
- __ push(Immediate(0));
- __ fld_d(Operand(esp, 0));
- __ add(Operand(esp), Immediate(2 * kPointerSize));
- __ jmp(&done);
-
- __ bind(&non_nan_result);
-
- // Use fpmod to restrict argument to the range +/-2*PI.
- __ mov(edi, eax); // Save eax before using fnstsw_ax.
- __ fldpi();
- __ fadd(0);
- __ fld(1);
- // FPU Stack: input, 2*pi, input.
- {
- NearLabel no_exceptions;
- __ fwait();
- __ fnstsw_ax();
- // Clear if Illegal Operand or Zero Division exceptions are set.
- __ test(Operand(eax), Immediate(5));
- __ j(zero, &no_exceptions);
- __ fnclex();
- __ bind(&no_exceptions);
- }
-
- // Compute st(0) % st(1)
- {
- NearLabel partial_remainder_loop;
- __ bind(&partial_remainder_loop);
- __ fprem1();
- __ fwait();
- __ fnstsw_ax();
- __ test(Operand(eax), Immediate(0x400 /* C2 */));
- // If C2 is set, computation only has partial result. Loop to
- // continue computation.
- __ j(not_zero, &partial_remainder_loop);
- }
- // FPU Stack: input, 2*pi, input % 2*pi
- __ fstp(2);
- __ fstp(0);
- __ mov(eax, edi); // Restore eax (allocated HeapNumber pointer).
-
- // FPU Stack: input % 2*pi
- __ bind(&in_range);
- switch (type_) {
- case TranscendentalCache::SIN:
- __ fsin();
- break;
- case TranscendentalCache::COS:
- __ fcos();
- break;
- default:
- UNREACHABLE();
- }
- __ bind(&done);
- } else {
- ASSERT(type_ == TranscendentalCache::LOG);
- __ fldln2();
- __ fxch();
- __ fyl2x();
- }
-}
-
-
-// Get the integer part of a heap number. Surprisingly, all this bit twiddling
-// is faster than using the built-in instructions on floating point registers.
-// Trashes edi and ebx. Dest is ecx. Source cannot be ecx or one of the
-// trashed registers.
-void IntegerConvert(MacroAssembler* masm,
- Register source,
- TypeInfo type_info,
- bool use_sse3,
- Label* conversion_failure) {
- ASSERT(!source.is(ecx) && !source.is(edi) && !source.is(ebx));
- Label done, right_exponent, normal_exponent;
- Register scratch = ebx;
- Register scratch2 = edi;
- if (type_info.IsInteger32() && CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope scope(SSE2);
- __ cvttsd2si(ecx, FieldOperand(source, HeapNumber::kValueOffset));
- return;
- }
- if (!type_info.IsInteger32() || !use_sse3) {
- // Get exponent word.
- __ mov(scratch, FieldOperand(source, HeapNumber::kExponentOffset));
- // Get exponent alone in scratch2.
- __ mov(scratch2, scratch);
- __ and_(scratch2, HeapNumber::kExponentMask);
- }
- if (use_sse3) {
- CpuFeatures::Scope scope(SSE3);
- if (!type_info.IsInteger32()) {
- // Check whether the exponent is too big for a 64 bit signed integer.
- static const uint32_t kTooBigExponent =
- (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
- __ cmp(Operand(scratch2), Immediate(kTooBigExponent));
- __ j(greater_equal, conversion_failure);
- }
- // Load x87 register with heap number.
- __ fld_d(FieldOperand(source, HeapNumber::kValueOffset));
- // Reserve space for 64 bit answer.
- __ sub(Operand(esp), Immediate(sizeof(uint64_t))); // Nolint.
- // Do conversion, which cannot fail because we checked the exponent.
- __ fisttp_d(Operand(esp, 0));
- __ mov(ecx, Operand(esp, 0)); // Load low word of answer into ecx.
- __ add(Operand(esp), Immediate(sizeof(uint64_t))); // Nolint.
- } else {
- // Load ecx with zero. We use this either for the final shift or
- // for the answer.
- __ xor_(ecx, Operand(ecx));
- // Check whether the exponent matches a 32 bit signed int that cannot be
- // represented by a Smi. A non-smi 32 bit integer is 1.xxx * 2^30 so the
- // exponent is 30 (biased). This is the exponent that we are fastest at and
- // also the highest exponent we can handle here.
- const uint32_t non_smi_exponent =
- (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
- __ cmp(Operand(scratch2), Immediate(non_smi_exponent));
- // If we have a match of the int32-but-not-Smi exponent then skip some
- // logic.
- __ j(equal, &right_exponent);
- // If the exponent is higher than that then go to slow case. This catches
- // numbers that don't fit in a signed int32, infinities and NaNs.
- __ j(less, &normal_exponent);
-
- {
- // Handle a big exponent. The only reason we have this code is that the
- // >>> operator has a tendency to generate numbers with an exponent of 31.
- const uint32_t big_non_smi_exponent =
- (HeapNumber::kExponentBias + 31) << HeapNumber::kExponentShift;
- __ cmp(Operand(scratch2), Immediate(big_non_smi_exponent));
- __ j(not_equal, conversion_failure);
- // We have the big exponent, typically from >>>. This means the number is
- // in the range 2^31 to 2^32 - 1. Get the top bits of the mantissa.
- __ mov(scratch2, scratch);
- __ and_(scratch2, HeapNumber::kMantissaMask);
- // Put back the implicit 1.
- __ or_(scratch2, 1 << HeapNumber::kExponentShift);
- // Shift up the mantissa bits to take up the space the exponent used to
- // take. We just orred in the implicit bit so that took care of one and
- // we want to use the full unsigned range so we subtract 1 bit from the
- // shift distance.
- const int big_shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 1;
- __ shl(scratch2, big_shift_distance);
- // Get the second half of the double.
- __ mov(ecx, FieldOperand(source, HeapNumber::kMantissaOffset));
- // Shift down 21 bits to get the most significant 11 bits or the low
- // mantissa word.
- __ shr(ecx, 32 - big_shift_distance);
- __ or_(ecx, Operand(scratch2));
- // We have the answer in ecx, but we may need to negate it.
- __ test(scratch, Operand(scratch));
- __ j(positive, &done);
- __ neg(ecx);
- __ jmp(&done);
- }
-
- __ bind(&normal_exponent);
- // Exponent word in scratch, exponent part of exponent word in scratch2.
- // Zero in ecx.
- // We know the exponent is smaller than 30 (biased). If it is less than
- // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
- // it rounds to zero.
- const uint32_t zero_exponent =
- (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
- __ sub(Operand(scratch2), Immediate(zero_exponent));
- // ecx already has a Smi zero.
- __ j(less, &done);
-
- // We have a shifted exponent between 0 and 30 in scratch2.
- __ shr(scratch2, HeapNumber::kExponentShift);
- __ mov(ecx, Immediate(30));
- __ sub(ecx, Operand(scratch2));
-
- __ bind(&right_exponent);
- // Here ecx is the shift, scratch is the exponent word.
- // Get the top bits of the mantissa.
- __ and_(scratch, HeapNumber::kMantissaMask);
- // Put back the implicit 1.
- __ or_(scratch, 1 << HeapNumber::kExponentShift);
- // Shift up the mantissa bits to take up the space the exponent used to
- // take. We have kExponentShift + 1 significant bits int he low end of the
- // word. Shift them to the top bits.
- const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
- __ shl(scratch, shift_distance);
- // Get the second half of the double. For some exponents we don't
- // actually need this because the bits get shifted out again, but
- // it's probably slower to test than just to do it.
- __ mov(scratch2, FieldOperand(source, HeapNumber::kMantissaOffset));
- // Shift down 22 bits to get the most significant 10 bits or the low
- // mantissa word.
- __ shr(scratch2, 32 - shift_distance);
- __ or_(scratch2, Operand(scratch));
- // Move down according to the exponent.
- __ shr_cl(scratch2);
- // Now the unsigned answer is in scratch2. We need to move it to ecx and
- // we may need to fix the sign.
- NearLabel negative;
- __ xor_(ecx, Operand(ecx));
- __ cmp(ecx, FieldOperand(source, HeapNumber::kExponentOffset));
- __ j(greater, &negative);
- __ mov(ecx, scratch2);
- __ jmp(&done);
- __ bind(&negative);
- __ sub(ecx, Operand(scratch2));
- __ bind(&done);
- }
-}
-
-
-// Input: edx, eax are the left and right objects of a bit op.
-// Output: eax, ecx are left and right integers for a bit op.
-void FloatingPointHelper::LoadNumbersAsIntegers(MacroAssembler* masm,
- TypeInfo type_info,
- bool use_sse3,
- Label* conversion_failure) {
- // Check float operands.
- Label arg1_is_object, check_undefined_arg1;
- Label arg2_is_object, check_undefined_arg2;
- Label load_arg2, done;
-
- if (!type_info.IsDouble()) {
- if (!type_info.IsSmi()) {
- __ test(edx, Immediate(kSmiTagMask));
- __ j(not_zero, &arg1_is_object);
- } else {
- if (FLAG_debug_code) __ AbortIfNotSmi(edx);
- }
- __ SmiUntag(edx);
- __ jmp(&load_arg2);
- }
-
- __ bind(&arg1_is_object);
-
- // Get the untagged integer version of the edx heap number in ecx.
- IntegerConvert(masm, edx, type_info, use_sse3, conversion_failure);
- __ mov(edx, ecx);
-
- // Here edx has the untagged integer, eax has a Smi or a heap number.
- __ bind(&load_arg2);
- if (!type_info.IsDouble()) {
- // Test if arg2 is a Smi.
- if (!type_info.IsSmi()) {
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &arg2_is_object);
- } else {
- if (FLAG_debug_code) __ AbortIfNotSmi(eax);
- }
- __ SmiUntag(eax);
- __ mov(ecx, eax);
- __ jmp(&done);
- }
-
- __ bind(&arg2_is_object);
-
- // Get the untagged integer version of the eax heap number in ecx.
- IntegerConvert(masm, eax, type_info, use_sse3, conversion_failure);
- __ bind(&done);
- __ mov(eax, edx);
-}
-
-
-// Input: edx, eax are the left and right objects of a bit op.
-// Output: eax, ecx are left and right integers for a bit op.
-void FloatingPointHelper::LoadUnknownsAsIntegers(MacroAssembler* masm,
- bool use_sse3,
- Label* conversion_failure) {
- // Check float operands.
- Label arg1_is_object, check_undefined_arg1;
- Label arg2_is_object, check_undefined_arg2;
- Label load_arg2, done;
-
- // Test if arg1 is a Smi.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(not_zero, &arg1_is_object);
-
- __ SmiUntag(edx);
- __ jmp(&load_arg2);
-
- // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
- __ bind(&check_undefined_arg1);
- Factory* factory = masm->isolate()->factory();
- __ cmp(edx, factory->undefined_value());
- __ j(not_equal, conversion_failure);
- __ mov(edx, Immediate(0));
- __ jmp(&load_arg2);
-
- __ bind(&arg1_is_object);
- __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
- __ cmp(ebx, factory->heap_number_map());
- __ j(not_equal, &check_undefined_arg1);
-
- // Get the untagged integer version of the edx heap number in ecx.
- IntegerConvert(masm,
- edx,
- TypeInfo::Unknown(),
- use_sse3,
- conversion_failure);
- __ mov(edx, ecx);
-
- // Here edx has the untagged integer, eax has a Smi or a heap number.
- __ bind(&load_arg2);
-
- // Test if arg2 is a Smi.
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &arg2_is_object);
-
- __ SmiUntag(eax);
- __ mov(ecx, eax);
- __ jmp(&done);
-
- // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
- __ bind(&check_undefined_arg2);
- __ cmp(eax, factory->undefined_value());
- __ j(not_equal, conversion_failure);
- __ mov(ecx, Immediate(0));
- __ jmp(&done);
-
- __ bind(&arg2_is_object);
- __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
- __ cmp(ebx, factory->heap_number_map());
- __ j(not_equal, &check_undefined_arg2);
-
- // Get the untagged integer version of the eax heap number in ecx.
- IntegerConvert(masm,
- eax,
- TypeInfo::Unknown(),
- use_sse3,
- conversion_failure);
- __ bind(&done);
- __ mov(eax, edx);
-}
-
-
-void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
- TypeInfo type_info,
- bool use_sse3,
- Label* conversion_failure) {
- if (type_info.IsNumber()) {
- LoadNumbersAsIntegers(masm, type_info, use_sse3, conversion_failure);
- } else {
- LoadUnknownsAsIntegers(masm, use_sse3, conversion_failure);
- }
-}
-
-
-void FloatingPointHelper::CheckLoadedIntegersWereInt32(MacroAssembler* masm,
- bool use_sse3,
- Label* not_int32) {
- return;
-}
-
-
-void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
- Register number) {
- NearLabel load_smi, done;
-
- __ test(number, Immediate(kSmiTagMask));
- __ j(zero, &load_smi, not_taken);
- __ fld_d(FieldOperand(number, HeapNumber::kValueOffset));
- __ jmp(&done);
-
- __ bind(&load_smi);
- __ SmiUntag(number);
- __ push(number);
- __ fild_s(Operand(esp, 0));
- __ pop(number);
-
- __ bind(&done);
-}
-
-
-void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm) {
- NearLabel load_smi_edx, load_eax, load_smi_eax, done;
- // Load operand in edx into xmm0.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &load_smi_edx, not_taken); // Argument in edx is a smi.
- __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
-
- __ bind(&load_eax);
- // Load operand in eax into xmm1.
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &load_smi_eax, not_taken); // Argument in eax is a smi.
- __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
- __ jmp(&done);
-
- __ bind(&load_smi_edx);
- __ SmiUntag(edx); // Untag smi before converting to float.
- __ cvtsi2sd(xmm0, Operand(edx));
- __ SmiTag(edx); // Retag smi for heap number overwriting test.
- __ jmp(&load_eax);
-
- __ bind(&load_smi_eax);
- __ SmiUntag(eax); // Untag smi before converting to float.
- __ cvtsi2sd(xmm1, Operand(eax));
- __ SmiTag(eax); // Retag smi for heap number overwriting test.
-
- __ bind(&done);
-}
-
-
-void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm,
- Label* not_numbers) {
- NearLabel load_smi_edx, load_eax, load_smi_eax, load_float_eax, done;
- // Load operand in edx into xmm0, or branch to not_numbers.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &load_smi_edx, not_taken); // Argument in edx is a smi.
- Factory* factory = masm->isolate()->factory();
- __ cmp(FieldOperand(edx, HeapObject::kMapOffset), factory->heap_number_map());
- __ j(not_equal, not_numbers); // Argument in edx is not a number.
- __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
- __ bind(&load_eax);
- // Load operand in eax into xmm1, or branch to not_numbers.
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &load_smi_eax, not_taken); // Argument in eax is a smi.
- __ cmp(FieldOperand(eax, HeapObject::kMapOffset), factory->heap_number_map());
- __ j(equal, &load_float_eax);
- __ jmp(not_numbers); // Argument in eax is not a number.
- __ bind(&load_smi_edx);
- __ SmiUntag(edx); // Untag smi before converting to float.
- __ cvtsi2sd(xmm0, Operand(edx));
- __ SmiTag(edx); // Retag smi for heap number overwriting test.
- __ jmp(&load_eax);
- __ bind(&load_smi_eax);
- __ SmiUntag(eax); // Untag smi before converting to float.
- __ cvtsi2sd(xmm1, Operand(eax));
- __ SmiTag(eax); // Retag smi for heap number overwriting test.
- __ jmp(&done);
- __ bind(&load_float_eax);
- __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
- __ bind(&done);
-}
-
-
-void FloatingPointHelper::LoadSSE2Smis(MacroAssembler* masm,
- Register scratch) {
- const Register left = edx;
- const Register right = eax;
- __ mov(scratch, left);
- ASSERT(!scratch.is(right)); // We're about to clobber scratch.
- __ SmiUntag(scratch);
- __ cvtsi2sd(xmm0, Operand(scratch));
-
- __ mov(scratch, right);
- __ SmiUntag(scratch);
- __ cvtsi2sd(xmm1, Operand(scratch));
-}
-
-
-void FloatingPointHelper::CheckSSE2OperandsAreInt32(MacroAssembler* masm,
- Label* non_int32,
- Register scratch) {
- __ cvttsd2si(scratch, Operand(xmm0));
- __ cvtsi2sd(xmm2, Operand(scratch));
- __ ucomisd(xmm0, xmm2);
- __ j(not_zero, non_int32);
- __ j(carry, non_int32);
- __ cvttsd2si(scratch, Operand(xmm1));
- __ cvtsi2sd(xmm2, Operand(scratch));
- __ ucomisd(xmm1, xmm2);
- __ j(not_zero, non_int32);
- __ j(carry, non_int32);
-}
-
-
-void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
- Register scratch,
- ArgLocation arg_location) {
- NearLabel load_smi_1, load_smi_2, done_load_1, done;
- if (arg_location == ARGS_IN_REGISTERS) {
- __ mov(scratch, edx);
- } else {
- __ mov(scratch, Operand(esp, 2 * kPointerSize));
- }
- __ test(scratch, Immediate(kSmiTagMask));
- __ j(zero, &load_smi_1, not_taken);
- __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
- __ bind(&done_load_1);
-
- if (arg_location == ARGS_IN_REGISTERS) {
- __ mov(scratch, eax);
- } else {
- __ mov(scratch, Operand(esp, 1 * kPointerSize));
- }
- __ test(scratch, Immediate(kSmiTagMask));
- __ j(zero, &load_smi_2, not_taken);
- __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
- __ jmp(&done);
-
- __ bind(&load_smi_1);
- __ SmiUntag(scratch);
- __ push(scratch);
- __ fild_s(Operand(esp, 0));
- __ pop(scratch);
- __ jmp(&done_load_1);
-
- __ bind(&load_smi_2);
- __ SmiUntag(scratch);
- __ push(scratch);
- __ fild_s(Operand(esp, 0));
- __ pop(scratch);
-
- __ bind(&done);
-}
-
-
-void FloatingPointHelper::LoadFloatSmis(MacroAssembler* masm,
- Register scratch) {
- const Register left = edx;
- const Register right = eax;
- __ mov(scratch, left);
- ASSERT(!scratch.is(right)); // We're about to clobber scratch.
- __ SmiUntag(scratch);
- __ push(scratch);
- __ fild_s(Operand(esp, 0));
-
- __ mov(scratch, right);
- __ SmiUntag(scratch);
- __ mov(Operand(esp, 0), scratch);
- __ fild_s(Operand(esp, 0));
- __ pop(scratch);
-}
-
-
-void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm,
- Label* non_float,
- Register scratch) {
- NearLabel test_other, done;
- // Test if both operands are floats or smi -> scratch=k_is_float;
- // Otherwise scratch = k_not_float.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &test_other, not_taken); // argument in edx is OK
- __ mov(scratch, FieldOperand(edx, HeapObject::kMapOffset));
- Factory* factory = masm->isolate()->factory();
- __ cmp(scratch, factory->heap_number_map());
- __ j(not_equal, non_float); // argument in edx is not a number -> NaN
-
- __ bind(&test_other);
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &done); // argument in eax is OK
- __ mov(scratch, FieldOperand(eax, HeapObject::kMapOffset));
- __ cmp(scratch, factory->heap_number_map());
- __ j(not_equal, non_float); // argument in eax is not a number -> NaN
-
- // Fall-through: Both operands are numbers.
- __ bind(&done);
-}
-
-
-void FloatingPointHelper::CheckFloatOperandsAreInt32(MacroAssembler* masm,
- Label* non_int32) {
- return;
-}
-
-
-void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
- Label slow, done, undo;
-
- if (op_ == Token::SUB) {
- if (include_smi_code_) {
- // Check whether the value is a smi.
- NearLabel try_float;
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &try_float, not_taken);
-
- if (negative_zero_ == kStrictNegativeZero) {
- // Go slow case if the value of the expression is zero
- // to make sure that we switch between 0 and -0.
- __ test(eax, Operand(eax));
- __ j(zero, &slow, not_taken);
- }
-
- // The value of the expression is a smi that is not zero. Try
- // optimistic subtraction '0 - value'.
- __ mov(edx, Operand(eax));
- __ Set(eax, Immediate(0));
- __ sub(eax, Operand(edx));
- __ j(overflow, &undo, not_taken);
- __ StubReturn(1);
-
- // Try floating point case.
- __ bind(&try_float);
- } else if (FLAG_debug_code) {
- __ AbortIfSmi(eax);
- }
-
- __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
- __ cmp(edx, masm->isolate()->factory()->heap_number_map());
- __ j(not_equal, &slow);
- if (overwrite_ == UNARY_OVERWRITE) {
- __ mov(edx, FieldOperand(eax, HeapNumber::kExponentOffset));
- __ xor_(edx, HeapNumber::kSignMask); // Flip sign.
- __ mov(FieldOperand(eax, HeapNumber::kExponentOffset), edx);
- } else {
- __ mov(edx, Operand(eax));
- // edx: operand
- __ AllocateHeapNumber(eax, ebx, ecx, &undo);
- // eax: allocated 'empty' number
- __ mov(ecx, FieldOperand(edx, HeapNumber::kExponentOffset));
- __ xor_(ecx, HeapNumber::kSignMask); // Flip sign.
- __ mov(FieldOperand(eax, HeapNumber::kExponentOffset), ecx);
- __ mov(ecx, FieldOperand(edx, HeapNumber::kMantissaOffset));
- __ mov(FieldOperand(eax, HeapNumber::kMantissaOffset), ecx);
- }
- } else if (op_ == Token::BIT_NOT) {
- if (include_smi_code_) {
- Label non_smi;
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &non_smi);
- __ not_(eax);
- __ and_(eax, ~kSmiTagMask); // Remove inverted smi-tag.
- __ ret(0);
- __ bind(&non_smi);
- } else if (FLAG_debug_code) {
- __ AbortIfSmi(eax);
- }
-
- // Check if the operand is a heap number.
- __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
- __ cmp(edx, masm->isolate()->factory()->heap_number_map());
- __ j(not_equal, &slow, not_taken);
-
- // Convert the heap number in eax to an untagged integer in ecx.
- IntegerConvert(masm,
- eax,
- TypeInfo::Unknown(),
- CpuFeatures::IsSupported(SSE3),
- &slow);
-
- // Do the bitwise operation and check if the result fits in a smi.
- NearLabel try_float;
- __ not_(ecx);
- __ cmp(ecx, 0xc0000000);
- __ j(sign, &try_float, not_taken);
-
- // Tag the result as a smi and we're done.
- STATIC_ASSERT(kSmiTagSize == 1);
- __ lea(eax, Operand(ecx, times_2, kSmiTag));
- __ jmp(&done);
-
- // Try to store the result in a heap number.
- __ bind(&try_float);
- if (overwrite_ == UNARY_NO_OVERWRITE) {
- // Allocate a fresh heap number, but don't overwrite eax until
- // we're sure we can do it without going through the slow case
- // that needs the value in eax.
- __ AllocateHeapNumber(ebx, edx, edi, &slow);
- __ mov(eax, Operand(ebx));
- }
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- __ cvtsi2sd(xmm0, Operand(ecx));
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- } else {
- __ push(ecx);
- __ fild_s(Operand(esp, 0));
- __ pop(ecx);
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- }
- } else {
- UNIMPLEMENTED();
- }
-
- // Return from the stub.
- __ bind(&done);
- __ StubReturn(1);
-
- // Restore eax and go slow case.
- __ bind(&undo);
- __ mov(eax, Operand(edx));
-
- // Handle the slow case by jumping to the JavaScript builtin.
- __ bind(&slow);
- __ pop(ecx); // pop return address.
- __ push(eax);
- __ push(ecx); // push return address
- switch (op_) {
- case Token::SUB:
- __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
- break;
- case Token::BIT_NOT:
- __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void MathPowStub::Generate(MacroAssembler* masm) {
- // Registers are used as follows:
- // edx = base
- // eax = exponent
- // ecx = temporary, result
-
- CpuFeatures::Scope use_sse2(SSE2);
- Label allocate_return, call_runtime;
-
- // Load input parameters.
- __ mov(edx, Operand(esp, 2 * kPointerSize));
- __ mov(eax, Operand(esp, 1 * kPointerSize));
-
- // Save 1 in xmm3 - we need this several times later on.
- __ mov(ecx, Immediate(1));
- __ cvtsi2sd(xmm3, Operand(ecx));
-
- Label exponent_nonsmi;
- Label base_nonsmi;
- // If the exponent is a heap number go to that specific case.
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &exponent_nonsmi);
- __ test(edx, Immediate(kSmiTagMask));
- __ j(not_zero, &base_nonsmi);
-
- // Optimized version when both exponent and base are smis.
- Label powi;
- __ SmiUntag(edx);
- __ cvtsi2sd(xmm0, Operand(edx));
- __ jmp(&powi);
- // exponent is smi and base is a heapnumber.
- __ bind(&base_nonsmi);
- Factory* factory = masm->isolate()->factory();
- __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
- factory->heap_number_map());
- __ j(not_equal, &call_runtime);
-
- __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
-
- // Optimized version of pow if exponent is a smi.
- // xmm0 contains the base.
- __ bind(&powi);
- __ SmiUntag(eax);
-
- // Save exponent in base as we need to check if exponent is negative later.
- // We know that base and exponent are in different registers.
- __ mov(edx, eax);
-
- // Get absolute value of exponent.
- NearLabel no_neg;
- __ cmp(eax, 0);
- __ j(greater_equal, &no_neg);
- __ neg(eax);
- __ bind(&no_neg);
-
- // Load xmm1 with 1.
- __ movsd(xmm1, xmm3);
- NearLabel while_true;
- NearLabel no_multiply;
-
- __ bind(&while_true);
- __ shr(eax, 1);
- __ j(not_carry, &no_multiply);
- __ mulsd(xmm1, xmm0);
- __ bind(&no_multiply);
- __ mulsd(xmm0, xmm0);
- __ j(not_zero, &while_true);
-
- // base has the original value of the exponent - if the exponent is
- // negative return 1/result.
- __ test(edx, Operand(edx));
- __ j(positive, &allocate_return);
- // Special case if xmm1 has reached infinity.
- __ mov(ecx, Immediate(0x7FB00000));
- __ movd(xmm0, Operand(ecx));
- __ cvtss2sd(xmm0, xmm0);
- __ ucomisd(xmm0, xmm1);
- __ j(equal, &call_runtime);
- __ divsd(xmm3, xmm1);
- __ movsd(xmm1, xmm3);
- __ jmp(&allocate_return);
-
- // exponent (or both) is a heapnumber - no matter what we should now work
- // on doubles.
- __ bind(&exponent_nonsmi);
- __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
- factory->heap_number_map());
- __ j(not_equal, &call_runtime);
- __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
- // Test if exponent is nan.
- __ ucomisd(xmm1, xmm1);
- __ j(parity_even, &call_runtime);
-
- NearLabel base_not_smi;
- NearLabel handle_special_cases;
- __ test(edx, Immediate(kSmiTagMask));
- __ j(not_zero, &base_not_smi);
- __ SmiUntag(edx);
- __ cvtsi2sd(xmm0, Operand(edx));
- __ jmp(&handle_special_cases);
-
- __ bind(&base_not_smi);
- __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
- factory->heap_number_map());
- __ j(not_equal, &call_runtime);
- __ mov(ecx, FieldOperand(edx, HeapNumber::kExponentOffset));
- __ and_(ecx, HeapNumber::kExponentMask);
- __ cmp(Operand(ecx), Immediate(HeapNumber::kExponentMask));
- // base is NaN or +/-Infinity
- __ j(greater_equal, &call_runtime);
- __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
-
- // base is in xmm0 and exponent is in xmm1.
- __ bind(&handle_special_cases);
- NearLabel not_minus_half;
- // Test for -0.5.
- // Load xmm2 with -0.5.
- __ mov(ecx, Immediate(0xBF000000));
- __ movd(xmm2, Operand(ecx));
- __ cvtss2sd(xmm2, xmm2);
- // xmm2 now has -0.5.
- __ ucomisd(xmm2, xmm1);
- __ j(not_equal, &not_minus_half);
-
- // Calculates reciprocal of square root.
- // sqrtsd returns -0 when input is -0. ECMA spec requires +0.
- __ xorpd(xmm1, xmm1);
- __ addsd(xmm1, xmm0);
- __ sqrtsd(xmm1, xmm1);
- __ divsd(xmm3, xmm1);
- __ movsd(xmm1, xmm3);
- __ jmp(&allocate_return);
-
- // Test for 0.5.
- __ bind(&not_minus_half);
- // Load xmm2 with 0.5.
- // Since xmm3 is 1 and xmm2 is -0.5 this is simply xmm2 + xmm3.
- __ addsd(xmm2, xmm3);
- // xmm2 now has 0.5.
- __ ucomisd(xmm2, xmm1);
- __ j(not_equal, &call_runtime);
- // Calculates square root.
- // sqrtsd returns -0 when input is -0. ECMA spec requires +0.
- __ xorpd(xmm1, xmm1);
- __ addsd(xmm1, xmm0);
- __ sqrtsd(xmm1, xmm1);
-
- __ bind(&allocate_return);
- __ AllocateHeapNumber(ecx, eax, edx, &call_runtime);
- __ movdbl(FieldOperand(ecx, HeapNumber::kValueOffset), xmm1);
- __ mov(eax, ecx);
- __ ret(2 * kPointerSize);
-
- __ bind(&call_runtime);
- __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
-}
-
-
-void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
- // The key is in edx and the parameter count is in eax.
-
- // The displacement is used for skipping the frame pointer on the
- // stack. It is the offset of the last parameter (if any) relative
- // to the frame pointer.
- static const int kDisplacement = 1 * kPointerSize;
-
- // Check that the key is a smi.
- Label slow;
- __ test(edx, Immediate(kSmiTagMask));
- __ j(not_zero, &slow, not_taken);
-
- // Check if the calling frame is an arguments adaptor frame.
- NearLabel adaptor;
- __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ mov(ecx, Operand(ebx, StandardFrameConstants::kContextOffset));
- __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(equal, &adaptor);
-
- // Check index against formal parameters count limit passed in
- // through register eax. Use unsigned comparison to get negative
- // check for free.
- __ cmp(edx, Operand(eax));
- __ j(above_equal, &slow, not_taken);
-
- // Read the argument from the stack and return it.
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kSmiTag == 0); // Shifting code depends on these.
- __ lea(ebx, Operand(ebp, eax, times_2, 0));
- __ neg(edx);
- __ mov(eax, Operand(ebx, edx, times_2, kDisplacement));
- __ ret(0);
-
- // Arguments adaptor case: Check index against actual arguments
- // limit found in the arguments adaptor frame. Use unsigned
- // comparison to get negative check for free.
- __ bind(&adaptor);
- __ mov(ecx, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ cmp(edx, Operand(ecx));
- __ j(above_equal, &slow, not_taken);
-
- // Read the argument from the stack and return it.
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kSmiTag == 0); // Shifting code depends on these.
- __ lea(ebx, Operand(ebx, ecx, times_2, 0));
- __ neg(edx);
- __ mov(eax, Operand(ebx, edx, times_2, kDisplacement));
- __ ret(0);
-
- // Slow-case: Handle non-smi or out-of-bounds access to arguments
- // by calling the runtime system.
- __ bind(&slow);
- __ pop(ebx); // Return address.
- __ push(edx);
- __ push(ebx);
- __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
-}
-
-
-void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
- // esp[0] : return address
- // esp[4] : number of parameters
- // esp[8] : receiver displacement
- // esp[16] : function
-
- // The displacement is used for skipping the return address and the
- // frame pointer on the stack. It is the offset of the last
- // parameter (if any) relative to the frame pointer.
- static const int kDisplacement = 2 * kPointerSize;
-
- // Check if the calling frame is an arguments adaptor frame.
- Label adaptor_frame, try_allocate, runtime;
- __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
- __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(equal, &adaptor_frame);
-
- // Get the length from the frame.
- __ mov(ecx, Operand(esp, 1 * kPointerSize));
- __ jmp(&try_allocate);
-
- // Patch the arguments.length and the parameters pointer.
- __ bind(&adaptor_frame);
- __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ mov(Operand(esp, 1 * kPointerSize), ecx);
- __ lea(edx, Operand(edx, ecx, times_2, kDisplacement));
- __ mov(Operand(esp, 2 * kPointerSize), edx);
-
- // Try the new space allocation. Start out with computing the size of
- // the arguments object and the elements array.
- NearLabel add_arguments_object;
- __ bind(&try_allocate);
- __ test(ecx, Operand(ecx));
- __ j(zero, &add_arguments_object);
- __ lea(ecx, Operand(ecx, times_2, FixedArray::kHeaderSize));
- __ bind(&add_arguments_object);
- __ add(Operand(ecx), Immediate(GetArgumentsObjectSize()));
-
- // Do the allocation of both objects in one go.
- __ AllocateInNewSpace(ecx, eax, edx, ebx, &runtime, TAG_OBJECT);
-
- // Get the arguments boilerplate from the current (global) context.
- __ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ mov(edi, FieldOperand(edi, GlobalObject::kGlobalContextOffset));
- __ mov(edi, Operand(edi,
- Context::SlotOffset(GetArgumentsBoilerplateIndex())));
-
- // Copy the JS object part.
- for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
- __ mov(ebx, FieldOperand(edi, i));
- __ mov(FieldOperand(eax, i), ebx);
- }
-
- if (type_ == NEW_NON_STRICT) {
- // Setup the callee in-object property.
- STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
- __ mov(ebx, Operand(esp, 3 * kPointerSize));
- __ mov(FieldOperand(eax, JSObject::kHeaderSize +
- Heap::kArgumentsCalleeIndex * kPointerSize),
- ebx);
- }
-
- // Get the length (smi tagged) and set that as an in-object property too.
- STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
- __ mov(ecx, Operand(esp, 1 * kPointerSize));
- __ mov(FieldOperand(eax, JSObject::kHeaderSize +
- Heap::kArgumentsLengthIndex * kPointerSize),
- ecx);
-
- // If there are no actual arguments, we're done.
- Label done;
- __ test(ecx, Operand(ecx));
- __ j(zero, &done);
-
- // Get the parameters pointer from the stack.
- __ mov(edx, Operand(esp, 2 * kPointerSize));
-
- // Setup the elements pointer in the allocated arguments object and
- // initialize the header in the elements fixed array.
- __ lea(edi, Operand(eax, GetArgumentsObjectSize()));
- __ mov(FieldOperand(eax, JSObject::kElementsOffset), edi);
- __ mov(FieldOperand(edi, FixedArray::kMapOffset),
- Immediate(masm->isolate()->factory()->fixed_array_map()));
-
- __ mov(FieldOperand(edi, FixedArray::kLengthOffset), ecx);
- // Untag the length for the loop below.
- __ SmiUntag(ecx);
-
- // Copy the fixed array slots.
- NearLabel loop;
- __ bind(&loop);
- __ mov(ebx, Operand(edx, -1 * kPointerSize)); // Skip receiver.
- __ mov(FieldOperand(edi, FixedArray::kHeaderSize), ebx);
- __ add(Operand(edi), Immediate(kPointerSize));
- __ sub(Operand(edx), Immediate(kPointerSize));
- __ dec(ecx);
- __ j(not_zero, &loop);
-
- // Return and remove the on-stack parameters.
- __ bind(&done);
- __ ret(3 * kPointerSize);
-
- // Do the runtime call to allocate the arguments object.
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
-}
-
-
-void RegExpExecStub::Generate(MacroAssembler* masm) {
- // Just jump directly to runtime if native RegExp is not selected at compile
- // time or if regexp entry in generated code is turned off runtime switch or
- // at compilation.
-#ifdef V8_INTERPRETED_REGEXP
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
-#else // V8_INTERPRETED_REGEXP
- if (!FLAG_regexp_entry_native) {
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
- return;
- }
-
- // Stack frame on entry.
- // esp[0]: return address
- // esp[4]: last_match_info (expected JSArray)
- // esp[8]: previous index
- // esp[12]: subject string
- // esp[16]: JSRegExp object
-
- static const int kLastMatchInfoOffset = 1 * kPointerSize;
- static const int kPreviousIndexOffset = 2 * kPointerSize;
- static const int kSubjectOffset = 3 * kPointerSize;
- static const int kJSRegExpOffset = 4 * kPointerSize;
-
- Label runtime, invoke_regexp;
-
- // Ensure that a RegExp stack is allocated.
- ExternalReference address_of_regexp_stack_memory_address =
- ExternalReference::address_of_regexp_stack_memory_address(
- masm->isolate());
- ExternalReference address_of_regexp_stack_memory_size =
- ExternalReference::address_of_regexp_stack_memory_size(masm->isolate());
- __ mov(ebx, Operand::StaticVariable(address_of_regexp_stack_memory_size));
- __ test(ebx, Operand(ebx));
- __ j(zero, &runtime, not_taken);
-
- // Check that the first argument is a JSRegExp object.
- __ mov(eax, Operand(esp, kJSRegExpOffset));
- STATIC_ASSERT(kSmiTag == 0);
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &runtime);
- __ CmpObjectType(eax, JS_REGEXP_TYPE, ecx);
- __ j(not_equal, &runtime);
- // Check that the RegExp has been compiled (data contains a fixed array).
- __ mov(ecx, FieldOperand(eax, JSRegExp::kDataOffset));
- if (FLAG_debug_code) {
- __ test(ecx, Immediate(kSmiTagMask));
- __ Check(not_zero, "Unexpected type for RegExp data, FixedArray expected");
- __ CmpObjectType(ecx, FIXED_ARRAY_TYPE, ebx);
- __ Check(equal, "Unexpected type for RegExp data, FixedArray expected");
- }
-
- // ecx: RegExp data (FixedArray)
- // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
- __ mov(ebx, FieldOperand(ecx, JSRegExp::kDataTagOffset));
- __ cmp(Operand(ebx), Immediate(Smi::FromInt(JSRegExp::IRREGEXP)));
- __ j(not_equal, &runtime);
-
- // ecx: RegExp data (FixedArray)
- // Check that the number of captures fit in the static offsets vector buffer.
- __ mov(edx, FieldOperand(ecx, JSRegExp::kIrregexpCaptureCountOffset));
- // Calculate number of capture registers (number_of_captures + 1) * 2. This
- // uses the asumption that smis are 2 * their untagged value.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
- __ add(Operand(edx), Immediate(2)); // edx was a smi.
- // Check that the static offsets vector buffer is large enough.
- __ cmp(edx, OffsetsVector::kStaticOffsetsVectorSize);
- __ j(above, &runtime);
-
- // ecx: RegExp data (FixedArray)
- // edx: Number of capture registers
- // Check that the second argument is a string.
- __ mov(eax, Operand(esp, kSubjectOffset));
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &runtime);
- Condition is_string = masm->IsObjectStringType(eax, ebx, ebx);
- __ j(NegateCondition(is_string), &runtime);
- // Get the length of the string to ebx.
- __ mov(ebx, FieldOperand(eax, String::kLengthOffset));
-
- // ebx: Length of subject string as a smi
- // ecx: RegExp data (FixedArray)
- // edx: Number of capture registers
- // Check that the third argument is a positive smi less than the subject
- // string length. A negative value will be greater (unsigned comparison).
- __ mov(eax, Operand(esp, kPreviousIndexOffset));
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &runtime);
- __ cmp(eax, Operand(ebx));
- __ j(above_equal, &runtime);
-
- // ecx: RegExp data (FixedArray)
- // edx: Number of capture registers
- // Check that the fourth object is a JSArray object.
- __ mov(eax, Operand(esp, kLastMatchInfoOffset));
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &runtime);
- __ CmpObjectType(eax, JS_ARRAY_TYPE, ebx);
- __ j(not_equal, &runtime);
- // Check that the JSArray is in fast case.
- __ mov(ebx, FieldOperand(eax, JSArray::kElementsOffset));
- __ mov(eax, FieldOperand(ebx, HeapObject::kMapOffset));
- Factory* factory = masm->isolate()->factory();
- __ cmp(eax, factory->fixed_array_map());
- __ j(not_equal, &runtime);
- // Check that the last match info has space for the capture registers and the
- // additional information.
- __ mov(eax, FieldOperand(ebx, FixedArray::kLengthOffset));
- __ SmiUntag(eax);
- __ add(Operand(edx), Immediate(RegExpImpl::kLastMatchOverhead));
- __ cmp(edx, Operand(eax));
- __ j(greater, &runtime);
-
- // ecx: RegExp data (FixedArray)
- // Check the representation and encoding of the subject string.
- Label seq_ascii_string, seq_two_byte_string, check_code;
- __ mov(eax, Operand(esp, kSubjectOffset));
- __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
- __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
- // First check for flat two byte string.
- __ and_(ebx,
- kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask);
- STATIC_ASSERT((kStringTag | kSeqStringTag | kTwoByteStringTag) == 0);
- __ j(zero, &seq_two_byte_string);
- // Any other flat string must be a flat ascii string.
- __ test(Operand(ebx),
- Immediate(kIsNotStringMask | kStringRepresentationMask));
- __ j(zero, &seq_ascii_string);
-
- // Check for flat cons string.
- // A flat cons string is a cons string where the second part is the empty
- // string. In that case the subject string is just the first part of the cons
- // string. Also in this case the first part of the cons string is known to be
- // a sequential string or an external string.
- STATIC_ASSERT(kExternalStringTag != 0);
- STATIC_ASSERT((kConsStringTag & kExternalStringTag) == 0);
- __ test(Operand(ebx),
- Immediate(kIsNotStringMask | kExternalStringTag));
- __ j(not_zero, &runtime);
- // String is a cons string.
- __ mov(edx, FieldOperand(eax, ConsString::kSecondOffset));
- __ cmp(Operand(edx), factory->empty_string());
- __ j(not_equal, &runtime);
- __ mov(eax, FieldOperand(eax, ConsString::kFirstOffset));
- __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
- // String is a cons string with empty second part.
- // eax: first part of cons string.
- // ebx: map of first part of cons string.
- // Is first part a flat two byte string?
- __ test_b(FieldOperand(ebx, Map::kInstanceTypeOffset),
- kStringRepresentationMask | kStringEncodingMask);
- STATIC_ASSERT((kSeqStringTag | kTwoByteStringTag) == 0);
- __ j(zero, &seq_two_byte_string);
- // Any other flat string must be ascii.
- __ test_b(FieldOperand(ebx, Map::kInstanceTypeOffset),
- kStringRepresentationMask);
- __ j(not_zero, &runtime);
-
- __ bind(&seq_ascii_string);
- // eax: subject string (flat ascii)
- // ecx: RegExp data (FixedArray)
- __ mov(edx, FieldOperand(ecx, JSRegExp::kDataAsciiCodeOffset));
- __ Set(edi, Immediate(1)); // Type is ascii.
- __ jmp(&check_code);
-
- __ bind(&seq_two_byte_string);
- // eax: subject string (flat two byte)
- // ecx: RegExp data (FixedArray)
- __ mov(edx, FieldOperand(ecx, JSRegExp::kDataUC16CodeOffset));
- __ Set(edi, Immediate(0)); // Type is two byte.
-
- __ bind(&check_code);
- // Check that the irregexp code has been generated for the actual string
- // encoding. If it has, the field contains a code object otherwise it contains
- // the hole.
- __ CmpObjectType(edx, CODE_TYPE, ebx);
- __ j(not_equal, &runtime);
-
- // eax: subject string
- // edx: code
- // edi: encoding of subject string (1 if ascii, 0 if two_byte);
- // Load used arguments before starting to push arguments for call to native
- // RegExp code to avoid handling changing stack height.
- __ mov(ebx, Operand(esp, kPreviousIndexOffset));
- __ SmiUntag(ebx); // Previous index from smi.
-
- // eax: subject string
- // ebx: previous index
- // edx: code
- // edi: encoding of subject string (1 if ascii 0 if two_byte);
- // All checks done. Now push arguments for native regexp code.
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->regexp_entry_native(), 1);
-
- // Isolates: note we add an additional parameter here (isolate pointer).
- static const int kRegExpExecuteArguments = 8;
- __ EnterApiExitFrame(kRegExpExecuteArguments);
-
- // Argument 8: Pass current isolate address.
- __ mov(Operand(esp, 7 * kPointerSize),
- Immediate(ExternalReference::isolate_address()));
-
- // Argument 7: Indicate that this is a direct call from JavaScript.
- __ mov(Operand(esp, 6 * kPointerSize), Immediate(1));
-
- // Argument 6: Start (high end) of backtracking stack memory area.
- __ mov(ecx, Operand::StaticVariable(address_of_regexp_stack_memory_address));
- __ add(ecx, Operand::StaticVariable(address_of_regexp_stack_memory_size));
- __ mov(Operand(esp, 5 * kPointerSize), ecx);
-
- // Argument 5: static offsets vector buffer.
- __ mov(Operand(esp, 4 * kPointerSize),
- Immediate(ExternalReference::address_of_static_offsets_vector(
- masm->isolate())));
-
- // Argument 4: End of string data
- // Argument 3: Start of string data
- NearLabel setup_two_byte, setup_rest;
- __ test(edi, Operand(edi));
- __ mov(edi, FieldOperand(eax, String::kLengthOffset));
- __ j(zero, &setup_two_byte);
- __ SmiUntag(edi);
- __ lea(ecx, FieldOperand(eax, edi, times_1, SeqAsciiString::kHeaderSize));
- __ mov(Operand(esp, 3 * kPointerSize), ecx); // Argument 4.
- __ lea(ecx, FieldOperand(eax, ebx, times_1, SeqAsciiString::kHeaderSize));
- __ mov(Operand(esp, 2 * kPointerSize), ecx); // Argument 3.
- __ jmp(&setup_rest);
-
- __ bind(&setup_two_byte);
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1); // edi is smi (powered by 2).
- __ lea(ecx, FieldOperand(eax, edi, times_1, SeqTwoByteString::kHeaderSize));
- __ mov(Operand(esp, 3 * kPointerSize), ecx); // Argument 4.
- __ lea(ecx, FieldOperand(eax, ebx, times_2, SeqTwoByteString::kHeaderSize));
- __ mov(Operand(esp, 2 * kPointerSize), ecx); // Argument 3.
-
- __ bind(&setup_rest);
-
- // Argument 2: Previous index.
- __ mov(Operand(esp, 1 * kPointerSize), ebx);
-
- // Argument 1: Subject string.
- __ mov(Operand(esp, 0 * kPointerSize), eax);
-
- // Locate the code entry and call it.
- __ add(Operand(edx), Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ call(Operand(edx));
-
- // Drop arguments and come back to JS mode.
- __ LeaveApiExitFrame();
-
- // Check the result.
- Label success;
- __ cmp(eax, NativeRegExpMacroAssembler::SUCCESS);
- __ j(equal, &success, taken);
- Label failure;
- __ cmp(eax, NativeRegExpMacroAssembler::FAILURE);
- __ j(equal, &failure, taken);
- __ cmp(eax, NativeRegExpMacroAssembler::EXCEPTION);
- // If not exception it can only be retry. Handle that in the runtime system.
- __ j(not_equal, &runtime);
- // Result must now be exception. If there is no pending exception already a
- // stack overflow (on the backtrack stack) was detected in RegExp code but
- // haven't created the exception yet. Handle that in the runtime system.
- // TODO(592): Rerunning the RegExp to get the stack overflow exception.
- ExternalReference pending_exception(Isolate::k_pending_exception_address,
- masm->isolate());
- __ mov(edx,
- Operand::StaticVariable(ExternalReference::the_hole_value_location(
- masm->isolate())));
- __ mov(eax, Operand::StaticVariable(pending_exception));
- __ cmp(edx, Operand(eax));
- __ j(equal, &runtime);
- // For exception, throw the exception again.
-
- // Clear the pending exception variable.
- __ mov(Operand::StaticVariable(pending_exception), edx);
-
- // Special handling of termination exceptions which are uncatchable
- // by javascript code.
- __ cmp(eax, factory->termination_exception());
- Label throw_termination_exception;
- __ j(equal, &throw_termination_exception);
-
- // Handle normal exception by following handler chain.
- __ Throw(eax);
-
- __ bind(&throw_termination_exception);
- __ ThrowUncatchable(TERMINATION, eax);
-
- __ bind(&failure);
- // For failure to match, return null.
- __ mov(Operand(eax), factory->null_value());
- __ ret(4 * kPointerSize);
-
- // Load RegExp data.
- __ bind(&success);
- __ mov(eax, Operand(esp, kJSRegExpOffset));
- __ mov(ecx, FieldOperand(eax, JSRegExp::kDataOffset));
- __ mov(edx, FieldOperand(ecx, JSRegExp::kIrregexpCaptureCountOffset));
- // Calculate number of capture registers (number_of_captures + 1) * 2.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
- __ add(Operand(edx), Immediate(2)); // edx was a smi.
-
- // edx: Number of capture registers
- // Load last_match_info which is still known to be a fast case JSArray.
- __ mov(eax, Operand(esp, kLastMatchInfoOffset));
- __ mov(ebx, FieldOperand(eax, JSArray::kElementsOffset));
-
- // ebx: last_match_info backing store (FixedArray)
- // edx: number of capture registers
- // Store the capture count.
- __ SmiTag(edx); // Number of capture registers to smi.
- __ mov(FieldOperand(ebx, RegExpImpl::kLastCaptureCountOffset), edx);
- __ SmiUntag(edx); // Number of capture registers back from smi.
- // Store last subject and last input.
- __ mov(eax, Operand(esp, kSubjectOffset));
- __ mov(FieldOperand(ebx, RegExpImpl::kLastSubjectOffset), eax);
- __ mov(ecx, ebx);
- __ RecordWrite(ecx, RegExpImpl::kLastSubjectOffset, eax, edi);
- __ mov(eax, Operand(esp, kSubjectOffset));
- __ mov(FieldOperand(ebx, RegExpImpl::kLastInputOffset), eax);
- __ mov(ecx, ebx);
- __ RecordWrite(ecx, RegExpImpl::kLastInputOffset, eax, edi);
-
- // Get the static offsets vector filled by the native regexp code.
- ExternalReference address_of_static_offsets_vector =
- ExternalReference::address_of_static_offsets_vector(masm->isolate());
- __ mov(ecx, Immediate(address_of_static_offsets_vector));
-
- // ebx: last_match_info backing store (FixedArray)
- // ecx: offsets vector
- // edx: number of capture registers
- NearLabel next_capture, done;
- // Capture register counter starts from number of capture registers and
- // counts down until wraping after zero.
- __ bind(&next_capture);
- __ sub(Operand(edx), Immediate(1));
- __ j(negative, &done);
- // Read the value from the static offsets vector buffer.
- __ mov(edi, Operand(ecx, edx, times_int_size, 0));
- __ SmiTag(edi);
- // Store the smi value in the last match info.
- __ mov(FieldOperand(ebx,
- edx,
- times_pointer_size,
- RegExpImpl::kFirstCaptureOffset),
- edi);
- __ jmp(&next_capture);
- __ bind(&done);
-
- // Return last match info.
- __ mov(eax, Operand(esp, kLastMatchInfoOffset));
- __ ret(4 * kPointerSize);
-
- // Do the runtime call to execute the regexp.
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
-#endif // V8_INTERPRETED_REGEXP
-}
-
-
-void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
- const int kMaxInlineLength = 100;
- Label slowcase;
- NearLabel done;
- __ mov(ebx, Operand(esp, kPointerSize * 3));
- __ test(ebx, Immediate(kSmiTagMask));
- __ j(not_zero, &slowcase);
- __ cmp(Operand(ebx), Immediate(Smi::FromInt(kMaxInlineLength)));
- __ j(above, &slowcase);
- // Smi-tagging is equivalent to multiplying by 2.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
- // Allocate RegExpResult followed by FixedArray with size in ebx.
- // JSArray: [Map][empty properties][Elements][Length-smi][index][input]
- // Elements: [Map][Length][..elements..]
- __ AllocateInNewSpace(JSRegExpResult::kSize + FixedArray::kHeaderSize,
- times_half_pointer_size,
- ebx, // In: Number of elements (times 2, being a smi)
- eax, // Out: Start of allocation (tagged).
- ecx, // Out: End of allocation.
- edx, // Scratch register
- &slowcase,
- TAG_OBJECT);
- // eax: Start of allocated area, object-tagged.
-
- // Set JSArray map to global.regexp_result_map().
- // Set empty properties FixedArray.
- // Set elements to point to FixedArray allocated right after the JSArray.
- // Interleave operations for better latency.
- __ mov(edx, ContextOperand(esi, Context::GLOBAL_INDEX));
- Factory* factory = masm->isolate()->factory();
- __ mov(ecx, Immediate(factory->empty_fixed_array()));
- __ lea(ebx, Operand(eax, JSRegExpResult::kSize));
- __ mov(edx, FieldOperand(edx, GlobalObject::kGlobalContextOffset));
- __ mov(FieldOperand(eax, JSObject::kElementsOffset), ebx);
- __ mov(FieldOperand(eax, JSObject::kPropertiesOffset), ecx);
- __ mov(edx, ContextOperand(edx, Context::REGEXP_RESULT_MAP_INDEX));
- __ mov(FieldOperand(eax, HeapObject::kMapOffset), edx);
-
- // Set input, index and length fields from arguments.
- __ mov(ecx, Operand(esp, kPointerSize * 1));
- __ mov(FieldOperand(eax, JSRegExpResult::kInputOffset), ecx);
- __ mov(ecx, Operand(esp, kPointerSize * 2));
- __ mov(FieldOperand(eax, JSRegExpResult::kIndexOffset), ecx);
- __ mov(ecx, Operand(esp, kPointerSize * 3));
- __ mov(FieldOperand(eax, JSArray::kLengthOffset), ecx);
-
- // Fill out the elements FixedArray.
- // eax: JSArray.
- // ebx: FixedArray.
- // ecx: Number of elements in array, as smi.
-
- // Set map.
- __ mov(FieldOperand(ebx, HeapObject::kMapOffset),
- Immediate(factory->fixed_array_map()));
- // Set length.
- __ mov(FieldOperand(ebx, FixedArray::kLengthOffset), ecx);
- // Fill contents of fixed-array with the-hole.
- __ SmiUntag(ecx);
- __ mov(edx, Immediate(factory->the_hole_value()));
- __ lea(ebx, FieldOperand(ebx, FixedArray::kHeaderSize));
- // Fill fixed array elements with hole.
- // eax: JSArray.
- // ecx: Number of elements to fill.
- // ebx: Start of elements in FixedArray.
- // edx: the hole.
- Label loop;
- __ test(ecx, Operand(ecx));
- __ bind(&loop);
- __ j(less_equal, &done); // Jump if ecx is negative or zero.
- __ sub(Operand(ecx), Immediate(1));
- __ mov(Operand(ebx, ecx, times_pointer_size, 0), edx);
- __ jmp(&loop);
-
- __ bind(&done);
- __ ret(3 * kPointerSize);
-
- __ bind(&slowcase);
- __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1);
-}
-
-
-void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
- Register object,
- Register result,
- Register scratch1,
- Register scratch2,
- bool object_is_smi,
- Label* not_found) {
- // Use of registers. Register result is used as a temporary.
- Register number_string_cache = result;
- Register mask = scratch1;
- Register scratch = scratch2;
-
- // Load the number string cache.
- ExternalReference roots_address =
- ExternalReference::roots_address(masm->isolate());
- __ mov(scratch, Immediate(Heap::kNumberStringCacheRootIndex));
- __ mov(number_string_cache,
- Operand::StaticArray(scratch, times_pointer_size, roots_address));
- // Make the hash mask from the length of the number string cache. It
- // contains two elements (number and string) for each cache entry.
- __ mov(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
- __ shr(mask, kSmiTagSize + 1); // Untag length and divide it by two.
- __ sub(Operand(mask), Immediate(1)); // Make mask.
-
- // Calculate the entry in the number string cache. The hash value in the
- // number string cache for smis is just the smi value, and the hash for
- // doubles is the xor of the upper and lower words. See
- // Heap::GetNumberStringCache.
- NearLabel smi_hash_calculated;
- NearLabel load_result_from_cache;
- if (object_is_smi) {
- __ mov(scratch, object);
- __ SmiUntag(scratch);
- } else {
- NearLabel not_smi, hash_calculated;
- STATIC_ASSERT(kSmiTag == 0);
- __ test(object, Immediate(kSmiTagMask));
- __ j(not_zero, &not_smi);
- __ mov(scratch, object);
- __ SmiUntag(scratch);
- __ jmp(&smi_hash_calculated);
- __ bind(&not_smi);
- __ cmp(FieldOperand(object, HeapObject::kMapOffset),
- masm->isolate()->factory()->heap_number_map());
- __ j(not_equal, not_found);
- STATIC_ASSERT(8 == kDoubleSize);
- __ mov(scratch, FieldOperand(object, HeapNumber::kValueOffset));
- __ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
- // Object is heap number and hash is now in scratch. Calculate cache index.
- __ and_(scratch, Operand(mask));
- Register index = scratch;
- Register probe = mask;
- __ mov(probe,
- FieldOperand(number_string_cache,
- index,
- times_twice_pointer_size,
- FixedArray::kHeaderSize));
- __ test(probe, Immediate(kSmiTagMask));
- __ j(zero, not_found);
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope fscope(SSE2);
- __ movdbl(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
- __ movdbl(xmm1, FieldOperand(probe, HeapNumber::kValueOffset));
- __ ucomisd(xmm0, xmm1);
- } else {
- __ fld_d(FieldOperand(object, HeapNumber::kValueOffset));
- __ fld_d(FieldOperand(probe, HeapNumber::kValueOffset));
- __ FCmp();
- }
- __ j(parity_even, not_found); // Bail out if NaN is involved.
- __ j(not_equal, not_found); // The cache did not contain this value.
- __ jmp(&load_result_from_cache);
- }
-
- __ bind(&smi_hash_calculated);
- // Object is smi and hash is now in scratch. Calculate cache index.
- __ and_(scratch, Operand(mask));
- Register index = scratch;
- // Check if the entry is the smi we are looking for.
- __ cmp(object,
- FieldOperand(number_string_cache,
- index,
- times_twice_pointer_size,
- FixedArray::kHeaderSize));
- __ j(not_equal, not_found);
-
- // Get the result from the cache.
- __ bind(&load_result_from_cache);
- __ mov(result,
- FieldOperand(number_string_cache,
- index,
- times_twice_pointer_size,
- FixedArray::kHeaderSize + kPointerSize));
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->number_to_string_native(), 1);
-}
-
-
-void NumberToStringStub::Generate(MacroAssembler* masm) {
- Label runtime;
-
- __ mov(ebx, Operand(esp, kPointerSize));
-
- // Generate code to lookup number in the number string cache.
- GenerateLookupNumberStringCache(masm, ebx, eax, ecx, edx, false, &runtime);
- __ ret(1 * kPointerSize);
-
- __ bind(&runtime);
- // Handle number to string in the runtime system if not found in the cache.
- __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1);
-}
-
-
-static int NegativeComparisonResult(Condition cc) {
- ASSERT(cc != equal);
- ASSERT((cc == less) || (cc == less_equal)
- || (cc == greater) || (cc == greater_equal));
- return (cc == greater || cc == greater_equal) ? LESS : GREATER;
-}
-
-void CompareStub::Generate(MacroAssembler* masm) {
- ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
-
- Label check_unequal_objects, done;
-
- // Compare two smis if required.
- if (include_smi_compare_) {
- Label non_smi, smi_done;
- __ mov(ecx, Operand(edx));
- __ or_(ecx, Operand(eax));
- __ test(ecx, Immediate(kSmiTagMask));
- __ j(not_zero, &non_smi, not_taken);
- __ sub(edx, Operand(eax)); // Return on the result of the subtraction.
- __ j(no_overflow, &smi_done);
- __ not_(edx); // Correct sign in case of overflow. edx is never 0 here.
- __ bind(&smi_done);
- __ mov(eax, edx);
- __ ret(0);
- __ bind(&non_smi);
- } else if (FLAG_debug_code) {
- __ mov(ecx, Operand(edx));
- __ or_(ecx, Operand(eax));
- __ test(ecx, Immediate(kSmiTagMask));
- __ Assert(not_zero, "Unexpected smi operands.");
- }
-
- // NOTICE! This code is only reached after a smi-fast-case check, so
- // it is certain that at least one operand isn't a smi.
-
- // Identical objects can be compared fast, but there are some tricky cases
- // for NaN and undefined.
- {
- Label not_identical;
- __ cmp(eax, Operand(edx));
- __ j(not_equal, &not_identical);
-
- if (cc_ != equal) {
- // Check for undefined. undefined OP undefined is false even though
- // undefined == undefined.
- NearLabel check_for_nan;
- __ cmp(edx, masm->isolate()->factory()->undefined_value());
- __ j(not_equal, &check_for_nan);
- __ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc_))));
- __ ret(0);
- __ bind(&check_for_nan);
- }
-
- // Test for NaN. Sadly, we can't just compare to factory->nan_value(),
- // so we do the second best thing - test it ourselves.
- // Note: if cc_ != equal, never_nan_nan_ is not used.
- if (never_nan_nan_ && (cc_ == equal)) {
- __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
- __ ret(0);
- } else {
- NearLabel heap_number;
- __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
- Immediate(masm->isolate()->factory()->heap_number_map()));
- __ j(equal, &heap_number);
- if (cc_ != equal) {
- // Call runtime on identical JSObjects. Otherwise return equal.
- __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
- __ j(above_equal, &not_identical);
- }
- __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
- __ ret(0);
-
- __ bind(&heap_number);
- // It is a heap number, so return non-equal if it's NaN and equal if
- // it's not NaN.
- // The representation of NaN values has all exponent bits (52..62) set,
- // and not all mantissa bits (0..51) clear.
- // We only accept QNaNs, which have bit 51 set.
- // Read top bits of double representation (second word of value).
-
- // Value is a QNaN if value & kQuietNaNMask == kQuietNaNMask, i.e.,
- // all bits in the mask are set. We only need to check the word
- // that contains the exponent and high bit of the mantissa.
- STATIC_ASSERT(((kQuietNaNHighBitsMask << 1) & 0x80000000u) != 0);
- __ mov(edx, FieldOperand(edx, HeapNumber::kExponentOffset));
- __ Set(eax, Immediate(0));
- // Shift value and mask so kQuietNaNHighBitsMask applies to topmost
- // bits.
- __ add(edx, Operand(edx));
- __ cmp(edx, kQuietNaNHighBitsMask << 1);
- if (cc_ == equal) {
- STATIC_ASSERT(EQUAL != 1);
- __ setcc(above_equal, eax);
- __ ret(0);
- } else {
- NearLabel nan;
- __ j(above_equal, &nan);
- __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
- __ ret(0);
- __ bind(&nan);
- __ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc_))));
- __ ret(0);
- }
- }
-
- __ bind(&not_identical);
- }
-
- // Strict equality can quickly decide whether objects are equal.
- // Non-strict object equality is slower, so it is handled later in the stub.
- if (cc_ == equal && strict_) {
- Label slow; // Fallthrough label.
- NearLabel not_smis;
- // If we're doing a strict equality comparison, we don't have to do
- // type conversion, so we generate code to do fast comparison for objects
- // and oddballs. Non-smi numbers and strings still go through the usual
- // slow-case code.
- // If either is a Smi (we know that not both are), then they can only
- // be equal if the other is a HeapNumber. If so, use the slow case.
- STATIC_ASSERT(kSmiTag == 0);
- ASSERT_EQ(0, Smi::FromInt(0));
- __ mov(ecx, Immediate(kSmiTagMask));
- __ and_(ecx, Operand(eax));
- __ test(ecx, Operand(edx));
- __ j(not_zero, &not_smis);
- // One operand is a smi.
-
- // Check whether the non-smi is a heap number.
- STATIC_ASSERT(kSmiTagMask == 1);
- // ecx still holds eax & kSmiTag, which is either zero or one.
- __ sub(Operand(ecx), Immediate(0x01));
- __ mov(ebx, edx);
- __ xor_(ebx, Operand(eax));
- __ and_(ebx, Operand(ecx)); // ebx holds either 0 or eax ^ edx.
- __ xor_(ebx, Operand(eax));
- // if eax was smi, ebx is now edx, else eax.
-
- // Check if the non-smi operand is a heap number.
- __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
- Immediate(masm->isolate()->factory()->heap_number_map()));
- // If heap number, handle it in the slow case.
- __ j(equal, &slow);
- // Return non-equal (ebx is not zero)
- __ mov(eax, ebx);
- __ ret(0);
-
- __ bind(&not_smis);
- // If either operand is a JSObject or an oddball value, then they are not
- // equal since their pointers are different
- // There is no test for undetectability in strict equality.
-
- // Get the type of the first operand.
- // If the first object is a JS object, we have done pointer comparison.
- NearLabel first_non_object;
- STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
- __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
- __ j(below, &first_non_object);
-
- // Return non-zero (eax is not zero)
- NearLabel return_not_equal;
- STATIC_ASSERT(kHeapObjectTag != 0);
- __ bind(&return_not_equal);
- __ ret(0);
-
- __ bind(&first_non_object);
- // Check for oddballs: true, false, null, undefined.
- __ CmpInstanceType(ecx, ODDBALL_TYPE);
- __ j(equal, &return_not_equal);
-
- __ CmpObjectType(edx, FIRST_JS_OBJECT_TYPE, ecx);
- __ j(above_equal, &return_not_equal);
-
- // Check for oddballs: true, false, null, undefined.
- __ CmpInstanceType(ecx, ODDBALL_TYPE);
- __ j(equal, &return_not_equal);
-
- // Fall through to the general case.
- __ bind(&slow);
- }
-
- // Generate the number comparison code.
- if (include_number_compare_) {
- Label non_number_comparison;
- Label unordered;
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- CpuFeatures::Scope use_cmov(CMOV);
-
- FloatingPointHelper::LoadSSE2Operands(masm, &non_number_comparison);
- __ ucomisd(xmm0, xmm1);
-
- // Don't base result on EFLAGS when a NaN is involved.
- __ j(parity_even, &unordered, not_taken);
- // Return a result of -1, 0, or 1, based on EFLAGS.
- __ mov(eax, 0); // equal
- __ mov(ecx, Immediate(Smi::FromInt(1)));
- __ cmov(above, eax, Operand(ecx));
- __ mov(ecx, Immediate(Smi::FromInt(-1)));
- __ cmov(below, eax, Operand(ecx));
- __ ret(0);
- } else {
- FloatingPointHelper::CheckFloatOperands(
- masm, &non_number_comparison, ebx);
- FloatingPointHelper::LoadFloatOperand(masm, eax);
- FloatingPointHelper::LoadFloatOperand(masm, edx);
- __ FCmp();
-
- // Don't base result on EFLAGS when a NaN is involved.
- __ j(parity_even, &unordered, not_taken);
-
- NearLabel below_label, above_label;
- // Return a result of -1, 0, or 1, based on EFLAGS.
- __ j(below, &below_label, not_taken);
- __ j(above, &above_label, not_taken);
-
- __ Set(eax, Immediate(0));
- __ ret(0);
-
- __ bind(&below_label);
- __ mov(eax, Immediate(Smi::FromInt(-1)));
- __ ret(0);
-
- __ bind(&above_label);
- __ mov(eax, Immediate(Smi::FromInt(1)));
- __ ret(0);
- }
-
- // If one of the numbers was NaN, then the result is always false.
- // The cc is never not-equal.
- __ bind(&unordered);
- ASSERT(cc_ != not_equal);
- if (cc_ == less || cc_ == less_equal) {
- __ mov(eax, Immediate(Smi::FromInt(1)));
- } else {
- __ mov(eax, Immediate(Smi::FromInt(-1)));
- }
- __ ret(0);
-
- // The number comparison code did not provide a valid result.
- __ bind(&non_number_comparison);
- }
-
- // Fast negative check for symbol-to-symbol equality.
- Label check_for_strings;
- if (cc_ == equal) {
- BranchIfNonSymbol(masm, &check_for_strings, eax, ecx);
- BranchIfNonSymbol(masm, &check_for_strings, edx, ecx);
-
- // We've already checked for object identity, so if both operands
- // are symbols they aren't equal. Register eax already holds a
- // non-zero value, which indicates not equal, so just return.
- __ ret(0);
- }
-
- __ bind(&check_for_strings);
-
- __ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx,
- &check_unequal_objects);
-
- // Inline comparison of ascii strings.
- StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
- edx,
- eax,
- ecx,
- ebx,
- edi);
-#ifdef DEBUG
- __ Abort("Unexpected fall-through from string comparison");
-#endif
-
- __ bind(&check_unequal_objects);
- if (cc_ == equal && !strict_) {
- // Non-strict equality. Objects are unequal if
- // they are both JSObjects and not undetectable,
- // and their pointers are different.
- NearLabel not_both_objects;
- NearLabel return_unequal;
- // At most one is a smi, so we can test for smi by adding the two.
- // A smi plus a heap object has the low bit set, a heap object plus
- // a heap object has the low bit clear.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagMask == 1);
- __ lea(ecx, Operand(eax, edx, times_1, 0));
- __ test(ecx, Immediate(kSmiTagMask));
- __ j(not_zero, &not_both_objects);
- __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
- __ j(below, &not_both_objects);
- __ CmpObjectType(edx, FIRST_JS_OBJECT_TYPE, ebx);
- __ j(below, &not_both_objects);
- // We do not bail out after this point. Both are JSObjects, and
- // they are equal if and only if both are undetectable.
- // The and of the undetectable flags is 1 if and only if they are equal.
- __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
- __ j(zero, &return_unequal);
- __ test_b(FieldOperand(ebx, Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
- __ j(zero, &return_unequal);
- // The objects are both undetectable, so they both compare as the value
- // undefined, and are equal.
- __ Set(eax, Immediate(EQUAL));
- __ bind(&return_unequal);
- // Return non-equal by returning the non-zero object pointer in eax,
- // or return equal if we fell through to here.
- __ ret(0); // rax, rdx were pushed
- __ bind(&not_both_objects);
- }
-
- // Push arguments below the return address.
- __ pop(ecx);
- __ push(edx);
- __ push(eax);
-
- // Figure out which native to call and setup the arguments.
- Builtins::JavaScript builtin;
- if (cc_ == equal) {
- builtin = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
- } else {
- builtin = Builtins::COMPARE;
- __ push(Immediate(Smi::FromInt(NegativeComparisonResult(cc_))));
- }
-
- // Restore return address on the stack.
- __ push(ecx);
-
- // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
- // tagged as a small integer.
- __ InvokeBuiltin(builtin, JUMP_FUNCTION);
-}
-
-
-void CompareStub::BranchIfNonSymbol(MacroAssembler* masm,
- Label* label,
- Register object,
- Register scratch) {
- __ test(object, Immediate(kSmiTagMask));
- __ j(zero, label);
- __ mov(scratch, FieldOperand(object, HeapObject::kMapOffset));
- __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
- __ and_(scratch, kIsSymbolMask | kIsNotStringMask);
- __ cmp(scratch, kSymbolTag | kStringTag);
- __ j(not_equal, label);
-}
-
-
-void StackCheckStub::Generate(MacroAssembler* masm) {
- __ TailCallRuntime(Runtime::kStackGuard, 0, 1);
-}
-
-
-void CallFunctionStub::Generate(MacroAssembler* masm) {
- Label slow;
-
- // If the receiver might be a value (string, number or boolean) check for this
- // and box it if it is.
- if (ReceiverMightBeValue()) {
- // Get the receiver from the stack.
- // +1 ~ return address
- Label receiver_is_value, receiver_is_js_object;
- __ mov(eax, Operand(esp, (argc_ + 1) * kPointerSize));
-
- // Check if receiver is a smi (which is a number value).
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &receiver_is_value, not_taken);
-
- // Check if the receiver is a valid JS object.
- __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, edi);
- __ j(above_equal, &receiver_is_js_object);
-
- // Call the runtime to box the value.
- __ bind(&receiver_is_value);
- __ EnterInternalFrame();
- __ push(eax);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ LeaveInternalFrame();
- __ mov(Operand(esp, (argc_ + 1) * kPointerSize), eax);
-
- __ bind(&receiver_is_js_object);
- }
-
- // Get the function to call from the stack.
- // +2 ~ receiver, return address
- __ mov(edi, Operand(esp, (argc_ + 2) * kPointerSize));
-
- // Check that the function really is a JavaScript function.
- __ test(edi, Immediate(kSmiTagMask));
- __ j(zero, &slow, not_taken);
- // Goto slow case if we do not have a function.
- __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
- __ j(not_equal, &slow, not_taken);
-
- // Fast-case: Just invoke the function.
- ParameterCount actual(argc_);
- __ InvokeFunction(edi, actual, JUMP_FUNCTION);
-
- // Slow-case: Non-function called.
- __ bind(&slow);
- // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
- // of the original receiver from the call site).
- __ mov(Operand(esp, (argc_ + 1) * kPointerSize), edi);
- __ Set(eax, Immediate(argc_));
- __ Set(ebx, Immediate(0));
- __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION);
- Handle<Code> adaptor =
- masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
- __ jmp(adaptor, RelocInfo::CODE_TARGET);
-}
-
-
-bool CEntryStub::NeedsImmovableCode() {
- return false;
-}
-
-
-void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
- __ Throw(eax);
-}
-
-
-void CEntryStub::GenerateCore(MacroAssembler* masm,
- Label* throw_normal_exception,
- Label* throw_termination_exception,
- Label* throw_out_of_memory_exception,
- bool do_gc,
- bool always_allocate_scope) {
- // eax: result parameter for PerformGC, if any
- // ebx: pointer to C function (C callee-saved)
- // ebp: frame pointer (restored after C call)
- // esp: stack pointer (restored after C call)
- // edi: number of arguments including receiver (C callee-saved)
- // esi: pointer to the first argument (C callee-saved)
-
- // Result returned in eax, or eax+edx if result_size_ is 2.
-
- // Check stack alignment.
- if (FLAG_debug_code) {
- __ CheckStackAlignment();
- }
-
- if (do_gc) {
- // Pass failure code returned from last attempt as first argument to
- // PerformGC. No need to use PrepareCallCFunction/CallCFunction here as the
- // stack alignment is known to be correct. This function takes one argument
- // which is passed on the stack, and we know that the stack has been
- // prepared to pass at least one argument.
- __ mov(Operand(esp, 0 * kPointerSize), eax); // Result.
- __ call(FUNCTION_ADDR(Runtime::PerformGC), RelocInfo::RUNTIME_ENTRY);
- }
-
- ExternalReference scope_depth =
- ExternalReference::heap_always_allocate_scope_depth(masm->isolate());
- if (always_allocate_scope) {
- __ inc(Operand::StaticVariable(scope_depth));
- }
-
- // Call C function.
- __ mov(Operand(esp, 0 * kPointerSize), edi); // argc.
- __ mov(Operand(esp, 1 * kPointerSize), esi); // argv.
- __ mov(Operand(esp, 2 * kPointerSize),
- Immediate(ExternalReference::isolate_address()));
- __ call(Operand(ebx));
- // Result is in eax or edx:eax - do not destroy these registers!
-
- if (always_allocate_scope) {
- __ dec(Operand::StaticVariable(scope_depth));
- }
-
- // Make sure we're not trying to return 'the hole' from the runtime
- // call as this may lead to crashes in the IC code later.
- if (FLAG_debug_code) {
- NearLabel okay;
- __ cmp(eax, masm->isolate()->factory()->the_hole_value());
- __ j(not_equal, &okay);
- __ int3();
- __ bind(&okay);
- }
-
- // Check for failure result.
- Label failure_returned;
- STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
- __ lea(ecx, Operand(eax, 1));
- // Lower 2 bits of ecx are 0 iff eax has failure tag.
- __ test(ecx, Immediate(kFailureTagMask));
- __ j(zero, &failure_returned, not_taken);
-
- ExternalReference pending_exception_address(
- Isolate::k_pending_exception_address, masm->isolate());
-
- // Check that there is no pending exception, otherwise we
- // should have returned some failure value.
- if (FLAG_debug_code) {
- __ push(edx);
- __ mov(edx, Operand::StaticVariable(
- ExternalReference::the_hole_value_location(masm->isolate())));
- NearLabel okay;
- __ cmp(edx, Operand::StaticVariable(pending_exception_address));
- // Cannot use check here as it attempts to generate call into runtime.
- __ j(equal, &okay);
- __ int3();
- __ bind(&okay);
- __ pop(edx);
- }
-
- // Exit the JavaScript to C++ exit frame.
- __ LeaveExitFrame(save_doubles_);
- __ ret(0);
-
- // Handling of failure.
- __ bind(&failure_returned);
-
- Label retry;
- // If the returned exception is RETRY_AFTER_GC continue at retry label
- STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
- __ test(eax, Immediate(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
- __ j(zero, &retry, taken);
-
- // Special handling of out of memory exceptions.
- __ cmp(eax, reinterpret_cast<int32_t>(Failure::OutOfMemoryException()));
- __ j(equal, throw_out_of_memory_exception);
-
- // Retrieve the pending exception and clear the variable.
- ExternalReference the_hole_location =
- ExternalReference::the_hole_value_location(masm->isolate());
- __ mov(eax, Operand::StaticVariable(pending_exception_address));
- __ mov(edx, Operand::StaticVariable(the_hole_location));
- __ mov(Operand::StaticVariable(pending_exception_address), edx);
-
- // Special handling of termination exceptions which are uncatchable
- // by javascript code.
- __ cmp(eax, masm->isolate()->factory()->termination_exception());
- __ j(equal, throw_termination_exception);
-
- // Handle normal exception.
- __ jmp(throw_normal_exception);
-
- // Retry.
- __ bind(&retry);
-}
-
-
-void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
- UncatchableExceptionType type) {
- __ ThrowUncatchable(type, eax);
-}
-
-
-void CEntryStub::Generate(MacroAssembler* masm) {
- // eax: number of arguments including receiver
- // ebx: pointer to C function (C callee-saved)
- // ebp: frame pointer (restored after C call)
- // esp: stack pointer (restored after C call)
- // esi: current context (C callee-saved)
- // edi: JS function of the caller (C callee-saved)
-
- // NOTE: Invocations of builtins may return failure objects instead
- // of a proper result. The builtin entry handles this by performing
- // a garbage collection and retrying the builtin (twice).
-
- // Enter the exit frame that transitions from JavaScript to C++.
- __ EnterExitFrame(save_doubles_);
-
- // eax: result parameter for PerformGC, if any (setup below)
- // ebx: pointer to builtin function (C callee-saved)
- // ebp: frame pointer (restored after C call)
- // esp: stack pointer (restored after C call)
- // edi: number of arguments including receiver (C callee-saved)
- // esi: argv pointer (C callee-saved)
-
- Label throw_normal_exception;
- Label throw_termination_exception;
- Label throw_out_of_memory_exception;
-
- // Call into the runtime system.
- GenerateCore(masm,
- &throw_normal_exception,
- &throw_termination_exception,
- &throw_out_of_memory_exception,
- false,
- false);
-
- // Do space-specific GC and retry runtime call.
- GenerateCore(masm,
- &throw_normal_exception,
- &throw_termination_exception,
- &throw_out_of_memory_exception,
- true,
- false);
-
- // Do full GC and retry runtime call one final time.
- Failure* failure = Failure::InternalError();
- __ mov(eax, Immediate(reinterpret_cast<int32_t>(failure)));
- GenerateCore(masm,
- &throw_normal_exception,
- &throw_termination_exception,
- &throw_out_of_memory_exception,
- true,
- true);
-
- __ bind(&throw_out_of_memory_exception);
- GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
-
- __ bind(&throw_termination_exception);
- GenerateThrowUncatchable(masm, TERMINATION);
-
- __ bind(&throw_normal_exception);
- GenerateThrowTOS(masm);
-}
-
-
-void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
- Label invoke, exit;
-#ifdef ENABLE_LOGGING_AND_PROFILING
- Label not_outermost_js, not_outermost_js_2;
-#endif
-
- // Setup frame.
- __ push(ebp);
- __ mov(ebp, Operand(esp));
-
- // Push marker in two places.
- int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
- __ push(Immediate(Smi::FromInt(marker))); // context slot
- __ push(Immediate(Smi::FromInt(marker))); // function slot
- // Save callee-saved registers (C calling conventions).
- __ push(edi);
- __ push(esi);
- __ push(ebx);
-
- // Save copies of the top frame descriptor on the stack.
- ExternalReference c_entry_fp(Isolate::k_c_entry_fp_address, masm->isolate());
- __ push(Operand::StaticVariable(c_entry_fp));
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
- // If this is the outermost JS call, set js_entry_sp value.
- ExternalReference js_entry_sp(Isolate::k_js_entry_sp_address,
- masm->isolate());
- __ cmp(Operand::StaticVariable(js_entry_sp), Immediate(0));
- __ j(not_equal, &not_outermost_js);
- __ mov(Operand::StaticVariable(js_entry_sp), ebp);
- __ bind(&not_outermost_js);
-#endif
-
- // Call a faked try-block that does the invoke.
- __ call(&invoke);
-
- // Caught exception: Store result (exception) in the pending
- // exception field in the JSEnv and return a failure sentinel.
- ExternalReference pending_exception(Isolate::k_pending_exception_address,
- masm->isolate());
- __ mov(Operand::StaticVariable(pending_exception), eax);
- __ mov(eax, reinterpret_cast<int32_t>(Failure::Exception()));
- __ jmp(&exit);
-
- // Invoke: Link this frame into the handler chain.
- __ bind(&invoke);
- __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
-
- // Clear any pending exceptions.
- ExternalReference the_hole_location =
- ExternalReference::the_hole_value_location(masm->isolate());
- __ mov(edx, Operand::StaticVariable(the_hole_location));
- __ mov(Operand::StaticVariable(pending_exception), edx);
-
- // Fake a receiver (NULL).
- __ push(Immediate(0)); // receiver
-
- // Invoke the function by calling through JS entry trampoline
- // builtin and pop the faked function when we return. Notice that we
- // cannot store a reference to the trampoline code directly in this
- // stub, because the builtin stubs may not have been generated yet.
- if (is_construct) {
- ExternalReference construct_entry(
- Builtins::kJSConstructEntryTrampoline,
- masm->isolate());
- __ mov(edx, Immediate(construct_entry));
- } else {
- ExternalReference entry(Builtins::kJSEntryTrampoline,
- masm->isolate());
- __ mov(edx, Immediate(entry));
- }
- __ mov(edx, Operand(edx, 0)); // deref address
- __ lea(edx, FieldOperand(edx, Code::kHeaderSize));
- __ call(Operand(edx));
-
- // Unlink this frame from the handler chain.
- __ pop(Operand::StaticVariable(ExternalReference(
- Isolate::k_handler_address,
- masm->isolate())));
- // Pop next_sp.
- __ add(Operand(esp), Immediate(StackHandlerConstants::kSize - kPointerSize));
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
- // If current EBP value is the same as js_entry_sp value, it means that
- // the current function is the outermost.
- __ cmp(ebp, Operand::StaticVariable(js_entry_sp));
- __ j(not_equal, &not_outermost_js_2);
- __ mov(Operand::StaticVariable(js_entry_sp), Immediate(0));
- __ bind(&not_outermost_js_2);
-#endif
-
- // Restore the top frame descriptor from the stack.
- __ bind(&exit);
- __ pop(Operand::StaticVariable(ExternalReference(
- Isolate::k_c_entry_fp_address,
- masm->isolate())));
-
- // Restore callee-saved registers (C calling conventions).
- __ pop(ebx);
- __ pop(esi);
- __ pop(edi);
- __ add(Operand(esp), Immediate(2 * kPointerSize)); // remove markers
-
- // Restore frame pointer and return.
- __ pop(ebp);
- __ ret(0);
-}
-
-
-// Generate stub code for instanceof.
-// This code can patch a call site inlined cache of the instance of check,
-// which looks like this.
-//
-// 81 ff XX XX XX XX cmp edi, <the hole, patched to a map>
-// 75 0a jne <some near label>
-// b8 XX XX XX XX mov eax, <the hole, patched to either true or false>
-//
-// If call site patching is requested the stack will have the delta from the
-// return address to the cmp instruction just below the return address. This
-// also means that call site patching can only take place with arguments in
-// registers. TOS looks like this when call site patching is requested
-//
-// esp[0] : return address
-// esp[4] : delta from return address to cmp instruction
-//
-void InstanceofStub::Generate(MacroAssembler* masm) {
- // Call site inlining and patching implies arguments in registers.
- ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck());
-
- // Fixed register usage throughout the stub.
- Register object = eax; // Object (lhs).
- Register map = ebx; // Map of the object.
- Register function = edx; // Function (rhs).
- Register prototype = edi; // Prototype of the function.
- Register scratch = ecx;
-
- // Constants describing the call site code to patch.
- static const int kDeltaToCmpImmediate = 2;
- static const int kDeltaToMov = 8;
- static const int kDeltaToMovImmediate = 9;
- static const int8_t kCmpEdiImmediateByte1 = BitCast<int8_t, uint8_t>(0x81);
- static const int8_t kCmpEdiImmediateByte2 = BitCast<int8_t, uint8_t>(0xff);
- static const int8_t kMovEaxImmediateByte = BitCast<int8_t, uint8_t>(0xb8);
-
- ExternalReference roots_address =
- ExternalReference::roots_address(masm->isolate());
-
- ASSERT_EQ(object.code(), InstanceofStub::left().code());
- ASSERT_EQ(function.code(), InstanceofStub::right().code());
-
- // Get the object and function - they are always both needed.
- Label slow, not_js_object;
- if (!HasArgsInRegisters()) {
- __ mov(object, Operand(esp, 2 * kPointerSize));
- __ mov(function, Operand(esp, 1 * kPointerSize));
- }
-
- // Check that the left hand is a JS object.
- __ test(object, Immediate(kSmiTagMask));
- __ j(zero, &not_js_object, not_taken);
- __ IsObjectJSObjectType(object, map, scratch, &not_js_object);
-
- // If there is a call site cache don't look in the global cache, but do the
- // real lookup and update the call site cache.
- if (!HasCallSiteInlineCheck()) {
- // Look up the function and the map in the instanceof cache.
- NearLabel miss;
- __ mov(scratch, Immediate(Heap::kInstanceofCacheFunctionRootIndex));
- __ cmp(function,
- Operand::StaticArray(scratch, times_pointer_size, roots_address));
- __ j(not_equal, &miss);
- __ mov(scratch, Immediate(Heap::kInstanceofCacheMapRootIndex));
- __ cmp(map, Operand::StaticArray(
- scratch, times_pointer_size, roots_address));
- __ j(not_equal, &miss);
- __ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
- __ mov(eax, Operand::StaticArray(
- scratch, times_pointer_size, roots_address));
- __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
- __ bind(&miss);
- }
-
- // Get the prototype of the function.
- __ TryGetFunctionPrototype(function, prototype, scratch, &slow);
-
- // Check that the function prototype is a JS object.
- __ test(prototype, Immediate(kSmiTagMask));
- __ j(zero, &slow, not_taken);
- __ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
-
- // Update the global instanceof or call site inlined cache with the current
- // map and function. The cached answer will be set when it is known below.
- if (!HasCallSiteInlineCheck()) {
- __ mov(scratch, Immediate(Heap::kInstanceofCacheMapRootIndex));
- __ mov(Operand::StaticArray(scratch, times_pointer_size, roots_address), map);
- __ mov(scratch, Immediate(Heap::kInstanceofCacheFunctionRootIndex));
- __ mov(Operand::StaticArray(scratch, times_pointer_size, roots_address),
- function);
- } else {
- // The constants for the code patching are based on no push instructions
- // at the call site.
- ASSERT(HasArgsInRegisters());
- // Get return address and delta to inlined map check.
- __ mov(scratch, Operand(esp, 0 * kPointerSize));
- __ sub(scratch, Operand(esp, 1 * kPointerSize));
- if (FLAG_debug_code) {
- __ cmpb(Operand(scratch, 0), kCmpEdiImmediateByte1);
- __ Assert(equal, "InstanceofStub unexpected call site cache (cmp 1)");
- __ cmpb(Operand(scratch, 1), kCmpEdiImmediateByte2);
- __ Assert(equal, "InstanceofStub unexpected call site cache (cmp 2)");
- }
- __ mov(Operand(scratch, kDeltaToCmpImmediate), map);
- }
-
- // Loop through the prototype chain of the object looking for the function
- // prototype.
- __ mov(scratch, FieldOperand(map, Map::kPrototypeOffset));
- NearLabel loop, is_instance, is_not_instance;
- __ bind(&loop);
- __ cmp(scratch, Operand(prototype));
- __ j(equal, &is_instance);
- Factory* factory = masm->isolate()->factory();
- __ cmp(Operand(scratch), Immediate(factory->null_value()));
- __ j(equal, &is_not_instance);
- __ mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
- __ mov(scratch, FieldOperand(scratch, Map::kPrototypeOffset));
- __ jmp(&loop);
-
- __ bind(&is_instance);
- if (!HasCallSiteInlineCheck()) {
- __ Set(eax, Immediate(0));
- __ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
- __ mov(Operand::StaticArray(scratch,
- times_pointer_size, roots_address), eax);
- } else {
- // Get return address and delta to inlined map check.
- __ mov(eax, factory->true_value());
- __ mov(scratch, Operand(esp, 0 * kPointerSize));
- __ sub(scratch, Operand(esp, 1 * kPointerSize));
- if (FLAG_debug_code) {
- __ cmpb(Operand(scratch, kDeltaToMov), kMovEaxImmediateByte);
- __ Assert(equal, "InstanceofStub unexpected call site cache (mov)");
- }
- __ mov(Operand(scratch, kDeltaToMovImmediate), eax);
- if (!ReturnTrueFalseObject()) {
- __ Set(eax, Immediate(0));
- }
- }
- __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
-
- __ bind(&is_not_instance);
- if (!HasCallSiteInlineCheck()) {
- __ Set(eax, Immediate(Smi::FromInt(1)));
- __ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
- __ mov(Operand::StaticArray(
- scratch, times_pointer_size, roots_address), eax);
- } else {
- // Get return address and delta to inlined map check.
- __ mov(eax, factory->false_value());
- __ mov(scratch, Operand(esp, 0 * kPointerSize));
- __ sub(scratch, Operand(esp, 1 * kPointerSize));
- if (FLAG_debug_code) {
- __ cmpb(Operand(scratch, kDeltaToMov), kMovEaxImmediateByte);
- __ Assert(equal, "InstanceofStub unexpected call site cache (mov)");
- }
- __ mov(Operand(scratch, kDeltaToMovImmediate), eax);
- if (!ReturnTrueFalseObject()) {
- __ Set(eax, Immediate(Smi::FromInt(1)));
- }
- }
- __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
-
- Label object_not_null, object_not_null_or_smi;
- __ bind(&not_js_object);
- // Before null, smi and string value checks, check that the rhs is a function
- // as for a non-function rhs an exception needs to be thrown.
- __ test(function, Immediate(kSmiTagMask));
- __ j(zero, &slow, not_taken);
- __ CmpObjectType(function, JS_FUNCTION_TYPE, scratch);
- __ j(not_equal, &slow, not_taken);
-
- // Null is not instance of anything.
- __ cmp(object, factory->null_value());
- __ j(not_equal, &object_not_null);
- __ Set(eax, Immediate(Smi::FromInt(1)));
- __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
-
- __ bind(&object_not_null);
- // Smi values is not instance of anything.
- __ test(object, Immediate(kSmiTagMask));
- __ j(not_zero, &object_not_null_or_smi, not_taken);
- __ Set(eax, Immediate(Smi::FromInt(1)));
- __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
-
- __ bind(&object_not_null_or_smi);
- // String values is not instance of anything.
- Condition is_string = masm->IsObjectStringType(object, scratch, scratch);
- __ j(NegateCondition(is_string), &slow);
- __ Set(eax, Immediate(Smi::FromInt(1)));
- __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
-
- // Slow-case: Go through the JavaScript implementation.
- __ bind(&slow);
- if (!ReturnTrueFalseObject()) {
- // Tail call the builtin which returns 0 or 1.
- if (HasArgsInRegisters()) {
- // Push arguments below return address.
- __ pop(scratch);
- __ push(object);
- __ push(function);
- __ push(scratch);
- }
- __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
- } else {
- // Call the builtin and convert 0/1 to true/false.
- __ EnterInternalFrame();
- __ push(object);
- __ push(function);
- __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
- __ LeaveInternalFrame();
- NearLabel true_value, done;
- __ test(eax, Operand(eax));
- __ j(zero, &true_value);
- __ mov(eax, factory->false_value());
- __ jmp(&done);
- __ bind(&true_value);
- __ mov(eax, factory->true_value());
- __ bind(&done);
- __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
- }
-}
-
-
-Register InstanceofStub::left() { return eax; }
-
-
-Register InstanceofStub::right() { return edx; }
-
-
-int CompareStub::MinorKey() {
- // Encode the three parameters in a unique 16 bit value. To avoid duplicate
- // stubs the never NaN NaN condition is only taken into account if the
- // condition is equals.
- ASSERT(static_cast<unsigned>(cc_) < (1 << 12));
- ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
- return ConditionField::encode(static_cast<unsigned>(cc_))
- | RegisterField::encode(false) // lhs_ and rhs_ are not used
- | StrictField::encode(strict_)
- | NeverNanNanField::encode(cc_ == equal ? never_nan_nan_ : false)
- | IncludeNumberCompareField::encode(include_number_compare_)
- | IncludeSmiCompareField::encode(include_smi_compare_);
-}
-
-
-// Unfortunately you have to run without snapshots to see most of these
-// names in the profile since most compare stubs end up in the snapshot.
-const char* CompareStub::GetName() {
- ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
-
- if (name_ != NULL) return name_;
- const int kMaxNameLength = 100;
- name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
- kMaxNameLength);
- if (name_ == NULL) return "OOM";
-
- const char* cc_name;
- switch (cc_) {
- case less: cc_name = "LT"; break;
- case greater: cc_name = "GT"; break;
- case less_equal: cc_name = "LE"; break;
- case greater_equal: cc_name = "GE"; break;
- case equal: cc_name = "EQ"; break;
- case not_equal: cc_name = "NE"; break;
- default: cc_name = "UnknownCondition"; break;
- }
-
- const char* strict_name = "";
- if (strict_ && (cc_ == equal || cc_ == not_equal)) {
- strict_name = "_STRICT";
- }
-
- const char* never_nan_nan_name = "";
- if (never_nan_nan_ && (cc_ == equal || cc_ == not_equal)) {
- never_nan_nan_name = "_NO_NAN";
- }
-
- const char* include_number_compare_name = "";
- if (!include_number_compare_) {
- include_number_compare_name = "_NO_NUMBER";
- }
-
- const char* include_smi_compare_name = "";
- if (!include_smi_compare_) {
- include_smi_compare_name = "_NO_SMI";
- }
-
- OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
- "CompareStub_%s%s%s%s%s",
- cc_name,
- strict_name,
- never_nan_nan_name,
- include_number_compare_name,
- include_smi_compare_name);
- return name_;
-}
-
-
-// -------------------------------------------------------------------------
-// StringCharCodeAtGenerator
-
-void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
- Label flat_string;
- Label ascii_string;
- Label got_char_code;
-
- // If the receiver is a smi trigger the non-string case.
- STATIC_ASSERT(kSmiTag == 0);
- __ test(object_, Immediate(kSmiTagMask));
- __ j(zero, receiver_not_string_);
-
- // Fetch the instance type of the receiver into result register.
- __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset));
- __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
- // If the receiver is not a string trigger the non-string case.
- __ test(result_, Immediate(kIsNotStringMask));
- __ j(not_zero, receiver_not_string_);
-
- // If the index is non-smi trigger the non-smi case.
- STATIC_ASSERT(kSmiTag == 0);
- __ test(index_, Immediate(kSmiTagMask));
- __ j(not_zero, &index_not_smi_);
-
- // Put smi-tagged index into scratch register.
- __ mov(scratch_, index_);
- __ bind(&got_smi_index_);
-
- // Check for index out of range.
- __ cmp(scratch_, FieldOperand(object_, String::kLengthOffset));
- __ j(above_equal, index_out_of_range_);
-
- // We need special handling for non-flat strings.
- STATIC_ASSERT(kSeqStringTag == 0);
- __ test(result_, Immediate(kStringRepresentationMask));
- __ j(zero, &flat_string);
-
- // Handle non-flat strings.
- __ test(result_, Immediate(kIsConsStringMask));
- __ j(zero, &call_runtime_);
-
- // ConsString.
- // Check whether the right hand side is the empty string (i.e. if
- // this is really a flat string in a cons string). If that is not
- // the case we would rather go to the runtime system now to flatten
- // the string.
- __ cmp(FieldOperand(object_, ConsString::kSecondOffset),
- Immediate(masm->isolate()->factory()->empty_string()));
- __ j(not_equal, &call_runtime_);
- // Get the first of the two strings and load its instance type.
- __ mov(object_, FieldOperand(object_, ConsString::kFirstOffset));
- __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset));
- __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
- // If the first cons component is also non-flat, then go to runtime.
- STATIC_ASSERT(kSeqStringTag == 0);
- __ test(result_, Immediate(kStringRepresentationMask));
- __ j(not_zero, &call_runtime_);
-
- // Check for 1-byte or 2-byte string.
- __ bind(&flat_string);
- STATIC_ASSERT(kAsciiStringTag != 0);
- __ test(result_, Immediate(kStringEncodingMask));
- __ j(not_zero, &ascii_string);
-
- // 2-byte string.
- // Load the 2-byte character code into the result register.
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
- __ movzx_w(result_, FieldOperand(object_,
- scratch_, times_1, // Scratch is smi-tagged.
- SeqTwoByteString::kHeaderSize));
- __ jmp(&got_char_code);
-
- // ASCII string.
- // Load the byte into the result register.
- __ bind(&ascii_string);
- __ SmiUntag(scratch_);
- __ movzx_b(result_, FieldOperand(object_,
- scratch_, times_1,
- SeqAsciiString::kHeaderSize));
- __ bind(&got_char_code);
- __ SmiTag(result_);
- __ bind(&exit_);
-}
-
-
-void StringCharCodeAtGenerator::GenerateSlow(
- MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
- __ Abort("Unexpected fallthrough to CharCodeAt slow case");
-
- // Index is not a smi.
- __ bind(&index_not_smi_);
- // If index is a heap number, try converting it to an integer.
- __ CheckMap(index_,
- masm->isolate()->factory()->heap_number_map(),
- index_not_number_,
- true);
- call_helper.BeforeCall(masm);
- __ push(object_);
- __ push(index_);
- __ push(index_); // Consumed by runtime conversion function.
- if (index_flags_ == STRING_INDEX_IS_NUMBER) {
- __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
- } else {
- ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
- // NumberToSmi discards numbers that are not exact integers.
- __ CallRuntime(Runtime::kNumberToSmi, 1);
- }
- if (!scratch_.is(eax)) {
- // Save the conversion result before the pop instructions below
- // have a chance to overwrite it.
- __ mov(scratch_, eax);
- }
- __ pop(index_);
- __ pop(object_);
- // Reload the instance type.
- __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset));
- __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
- call_helper.AfterCall(masm);
- // If index is still not a smi, it must be out of range.
- STATIC_ASSERT(kSmiTag == 0);
- __ test(scratch_, Immediate(kSmiTagMask));
- __ j(not_zero, index_out_of_range_);
- // Otherwise, return to the fast path.
- __ jmp(&got_smi_index_);
-
- // Call runtime. We get here when the receiver is a string and the
- // index is a number, but the code of getting the actual character
- // is too complex (e.g., when the string needs to be flattened).
- __ bind(&call_runtime_);
- call_helper.BeforeCall(masm);
- __ push(object_);
- __ push(index_);
- __ CallRuntime(Runtime::kStringCharCodeAt, 2);
- if (!result_.is(eax)) {
- __ mov(result_, eax);
- }
- call_helper.AfterCall(masm);
- __ jmp(&exit_);
-
- __ Abort("Unexpected fallthrough from CharCodeAt slow case");
-}
-
-
-// -------------------------------------------------------------------------
-// StringCharFromCodeGenerator
-
-void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
- // Fast case of Heap::LookupSingleCharacterStringFromCode.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiShiftSize == 0);
- ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1));
- __ test(code_,
- Immediate(kSmiTagMask |
- ((~String::kMaxAsciiCharCode) << kSmiTagSize)));
- __ j(not_zero, &slow_case_, not_taken);
-
- Factory* factory = masm->isolate()->factory();
- __ Set(result_, Immediate(factory->single_character_string_cache()));
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kSmiShiftSize == 0);
- // At this point code register contains smi tagged ascii char code.
- __ mov(result_, FieldOperand(result_,
- code_, times_half_pointer_size,
- FixedArray::kHeaderSize));
- __ cmp(result_, factory->undefined_value());
- __ j(equal, &slow_case_, not_taken);
- __ bind(&exit_);
-}
-
-
-void StringCharFromCodeGenerator::GenerateSlow(
- MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
- __ Abort("Unexpected fallthrough to CharFromCode slow case");
-
- __ bind(&slow_case_);
- call_helper.BeforeCall(masm);
- __ push(code_);
- __ CallRuntime(Runtime::kCharFromCode, 1);
- if (!result_.is(eax)) {
- __ mov(result_, eax);
- }
- call_helper.AfterCall(masm);
- __ jmp(&exit_);
-
- __ Abort("Unexpected fallthrough from CharFromCode slow case");
-}
-
-
-// -------------------------------------------------------------------------
-// StringCharAtGenerator
-
-void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) {
- char_code_at_generator_.GenerateFast(masm);
- char_from_code_generator_.GenerateFast(masm);
-}
-
-
-void StringCharAtGenerator::GenerateSlow(
- MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
- char_code_at_generator_.GenerateSlow(masm, call_helper);
- char_from_code_generator_.GenerateSlow(masm, call_helper);
-}
-
-
-void StringAddStub::Generate(MacroAssembler* masm) {
- Label string_add_runtime, call_builtin;
- Builtins::JavaScript builtin_id = Builtins::ADD;
-
- // Load the two arguments.
- __ mov(eax, Operand(esp, 2 * kPointerSize)); // First argument.
- __ mov(edx, Operand(esp, 1 * kPointerSize)); // Second argument.
-
- // Make sure that both arguments are strings if not known in advance.
- if (flags_ == NO_STRING_ADD_FLAGS) {
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &string_add_runtime);
- __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, ebx);
- __ j(above_equal, &string_add_runtime);
-
- // First argument is a a string, test second.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &string_add_runtime);
- __ CmpObjectType(edx, FIRST_NONSTRING_TYPE, ebx);
- __ j(above_equal, &string_add_runtime);
- } else {
- // Here at least one of the arguments is definitely a string.
- // We convert the one that is not known to be a string.
- if ((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) == 0) {
- ASSERT((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) != 0);
- GenerateConvertArgument(masm, 2 * kPointerSize, eax, ebx, ecx, edi,
- &call_builtin);
- builtin_id = Builtins::STRING_ADD_RIGHT;
- } else if ((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) == 0) {
- ASSERT((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) != 0);
- GenerateConvertArgument(masm, 1 * kPointerSize, edx, ebx, ecx, edi,
- &call_builtin);
- builtin_id = Builtins::STRING_ADD_LEFT;
- }
- }
-
- // Both arguments are strings.
- // eax: first string
- // edx: second string
- // Check if either of the strings are empty. In that case return the other.
- NearLabel second_not_zero_length, both_not_zero_length;
- __ mov(ecx, FieldOperand(edx, String::kLengthOffset));
- STATIC_ASSERT(kSmiTag == 0);
- __ test(ecx, Operand(ecx));
- __ j(not_zero, &second_not_zero_length);
- // Second string is empty, result is first string which is already in eax.
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->string_add_native(), 1);
- __ ret(2 * kPointerSize);
- __ bind(&second_not_zero_length);
- __ mov(ebx, FieldOperand(eax, String::kLengthOffset));
- STATIC_ASSERT(kSmiTag == 0);
- __ test(ebx, Operand(ebx));
- __ j(not_zero, &both_not_zero_length);
- // First string is empty, result is second string which is in edx.
- __ mov(eax, edx);
- __ IncrementCounter(counters->string_add_native(), 1);
- __ ret(2 * kPointerSize);
-
- // Both strings are non-empty.
- // eax: first string
- // ebx: length of first string as a smi
- // ecx: length of second string as a smi
- // edx: second string
- // Look at the length of the result of adding the two strings.
- Label string_add_flat_result, longer_than_two;
- __ bind(&both_not_zero_length);
- __ add(ebx, Operand(ecx));
- STATIC_ASSERT(Smi::kMaxValue == String::kMaxLength);
- // Handle exceptionally long strings in the runtime system.
- __ j(overflow, &string_add_runtime);
- // Use the symbol table when adding two one character strings, as it
- // helps later optimizations to return a symbol here.
- __ cmp(Operand(ebx), Immediate(Smi::FromInt(2)));
- __ j(not_equal, &longer_than_two);
-
- // Check that both strings are non-external ascii strings.
- __ JumpIfNotBothSequentialAsciiStrings(eax, edx, ebx, ecx,
- &string_add_runtime);
-
- // Get the two characters forming the new string.
- __ movzx_b(ebx, FieldOperand(eax, SeqAsciiString::kHeaderSize));
- __ movzx_b(ecx, FieldOperand(edx, SeqAsciiString::kHeaderSize));
-
- // Try to lookup two character string in symbol table. If it is not found
- // just allocate a new one.
- Label make_two_character_string, make_two_character_string_no_reload;
- StringHelper::GenerateTwoCharacterSymbolTableProbe(
- masm, ebx, ecx, eax, edx, edi,
- &make_two_character_string_no_reload, &make_two_character_string);
- __ IncrementCounter(counters->string_add_native(), 1);
- __ ret(2 * kPointerSize);
-
- // Allocate a two character string.
- __ bind(&make_two_character_string);
- // Reload the arguments.
- __ mov(eax, Operand(esp, 2 * kPointerSize)); // First argument.
- __ mov(edx, Operand(esp, 1 * kPointerSize)); // Second argument.
- // Get the two characters forming the new string.
- __ movzx_b(ebx, FieldOperand(eax, SeqAsciiString::kHeaderSize));
- __ movzx_b(ecx, FieldOperand(edx, SeqAsciiString::kHeaderSize));
- __ bind(&make_two_character_string_no_reload);
- __ IncrementCounter(counters->string_add_make_two_char(), 1);
- __ AllocateAsciiString(eax, // Result.
- 2, // Length.
- edi, // Scratch 1.
- edx, // Scratch 2.
- &string_add_runtime);
- // Pack both characters in ebx.
- __ shl(ecx, kBitsPerByte);
- __ or_(ebx, Operand(ecx));
- // Set the characters in the new string.
- __ mov_w(FieldOperand(eax, SeqAsciiString::kHeaderSize), ebx);
- __ IncrementCounter(counters->string_add_native(), 1);
- __ ret(2 * kPointerSize);
-
- __ bind(&longer_than_two);
- // Check if resulting string will be flat.
- __ cmp(Operand(ebx), Immediate(Smi::FromInt(String::kMinNonFlatLength)));
- __ j(below, &string_add_flat_result);
-
- // If result is not supposed to be flat allocate a cons string object. If both
- // strings are ascii the result is an ascii cons string.
- Label non_ascii, allocated, ascii_data;
- __ mov(edi, FieldOperand(eax, HeapObject::kMapOffset));
- __ movzx_b(ecx, FieldOperand(edi, Map::kInstanceTypeOffset));
- __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
- __ movzx_b(edi, FieldOperand(edi, Map::kInstanceTypeOffset));
- __ and_(ecx, Operand(edi));
- STATIC_ASSERT(kStringEncodingMask == kAsciiStringTag);
- __ test(ecx, Immediate(kAsciiStringTag));
- __ j(zero, &non_ascii);
- __ bind(&ascii_data);
- // Allocate an acsii cons string.
- __ AllocateAsciiConsString(ecx, edi, no_reg, &string_add_runtime);
- __ bind(&allocated);
- // Fill the fields of the cons string.
- if (FLAG_debug_code) __ AbortIfNotSmi(ebx);
- __ mov(FieldOperand(ecx, ConsString::kLengthOffset), ebx);
- __ mov(FieldOperand(ecx, ConsString::kHashFieldOffset),
- Immediate(String::kEmptyHashField));
- __ mov(FieldOperand(ecx, ConsString::kFirstOffset), eax);
- __ mov(FieldOperand(ecx, ConsString::kSecondOffset), edx);
- __ mov(eax, ecx);
- __ IncrementCounter(counters->string_add_native(), 1);
- __ ret(2 * kPointerSize);
- __ bind(&non_ascii);
- // At least one of the strings is two-byte. Check whether it happens
- // to contain only ascii characters.
- // ecx: first instance type AND second instance type.
- // edi: second instance type.
- __ test(ecx, Immediate(kAsciiDataHintMask));
- __ j(not_zero, &ascii_data);
- __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
- __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
- __ xor_(edi, Operand(ecx));
- STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
- __ and_(edi, kAsciiStringTag | kAsciiDataHintTag);
- __ cmp(edi, kAsciiStringTag | kAsciiDataHintTag);
- __ j(equal, &ascii_data);
- // Allocate a two byte cons string.
- __ AllocateConsString(ecx, edi, no_reg, &string_add_runtime);
- __ jmp(&allocated);
-
- // Handle creating a flat result. First check that both strings are not
- // external strings.
- // eax: first string
- // ebx: length of resulting flat string as a smi
- // edx: second string
- __ bind(&string_add_flat_result);
- __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
- __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
- __ and_(ecx, kStringRepresentationMask);
- __ cmp(ecx, kExternalStringTag);
- __ j(equal, &string_add_runtime);
- __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
- __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
- __ and_(ecx, kStringRepresentationMask);
- __ cmp(ecx, kExternalStringTag);
- __ j(equal, &string_add_runtime);
- // Now check if both strings are ascii strings.
- // eax: first string
- // ebx: length of resulting flat string as a smi
- // edx: second string
- Label non_ascii_string_add_flat_result;
- STATIC_ASSERT(kStringEncodingMask == kAsciiStringTag);
- __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
- __ test_b(FieldOperand(ecx, Map::kInstanceTypeOffset), kAsciiStringTag);
- __ j(zero, &non_ascii_string_add_flat_result);
- __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
- __ test_b(FieldOperand(ecx, Map::kInstanceTypeOffset), kAsciiStringTag);
- __ j(zero, &string_add_runtime);
-
- // Both strings are ascii strings. As they are short they are both flat.
- // ebx: length of resulting flat string as a smi
- __ SmiUntag(ebx);
- __ AllocateAsciiString(eax, ebx, ecx, edx, edi, &string_add_runtime);
- // eax: result string
- __ mov(ecx, eax);
- // Locate first character of result.
- __ add(Operand(ecx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- // Load first argument and locate first character.
- __ mov(edx, Operand(esp, 2 * kPointerSize));
- __ mov(edi, FieldOperand(edx, String::kLengthOffset));
- __ SmiUntag(edi);
- __ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- // eax: result string
- // ecx: first character of result
- // edx: first char of first argument
- // edi: length of first argument
- StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, true);
- // Load second argument and locate first character.
- __ mov(edx, Operand(esp, 1 * kPointerSize));
- __ mov(edi, FieldOperand(edx, String::kLengthOffset));
- __ SmiUntag(edi);
- __ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- // eax: result string
- // ecx: next character of result
- // edx: first char of second argument
- // edi: length of second argument
- StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, true);
- __ IncrementCounter(counters->string_add_native(), 1);
- __ ret(2 * kPointerSize);
-
- // Handle creating a flat two byte result.
- // eax: first string - known to be two byte
- // ebx: length of resulting flat string as a smi
- // edx: second string
- __ bind(&non_ascii_string_add_flat_result);
- __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
- __ test_b(FieldOperand(ecx, Map::kInstanceTypeOffset), kAsciiStringTag);
- __ j(not_zero, &string_add_runtime);
- // Both strings are two byte strings. As they are short they are both
- // flat.
- __ SmiUntag(ebx);
- __ AllocateTwoByteString(eax, ebx, ecx, edx, edi, &string_add_runtime);
- // eax: result string
- __ mov(ecx, eax);
- // Locate first character of result.
- __ add(Operand(ecx),
- Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- // Load first argument and locate first character.
- __ mov(edx, Operand(esp, 2 * kPointerSize));
- __ mov(edi, FieldOperand(edx, String::kLengthOffset));
- __ SmiUntag(edi);
- __ add(Operand(edx),
- Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- // eax: result string
- // ecx: first character of result
- // edx: first char of first argument
- // edi: length of first argument
- StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, false);
- // Load second argument and locate first character.
- __ mov(edx, Operand(esp, 1 * kPointerSize));
- __ mov(edi, FieldOperand(edx, String::kLengthOffset));
- __ SmiUntag(edi);
- __ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- // eax: result string
- // ecx: next character of result
- // edx: first char of second argument
- // edi: length of second argument
- StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, false);
- __ IncrementCounter(counters->string_add_native(), 1);
- __ ret(2 * kPointerSize);
-
- // Just jump to runtime to add the two strings.
- __ bind(&string_add_runtime);
- __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
-
- if (call_builtin.is_linked()) {
- __ bind(&call_builtin);
- __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
- }
-}
-
-
-void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
- int stack_offset,
- Register arg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* slow) {
- // First check if the argument is already a string.
- Label not_string, done;
- __ test(arg, Immediate(kSmiTagMask));
- __ j(zero, &not_string);
- __ CmpObjectType(arg, FIRST_NONSTRING_TYPE, scratch1);
- __ j(below, &done);
-
- // Check the number to string cache.
- Label not_cached;
- __ bind(&not_string);
- // Puts the cached result into scratch1.
- NumberToStringStub::GenerateLookupNumberStringCache(masm,
- arg,
- scratch1,
- scratch2,
- scratch3,
- false,
- &not_cached);
- __ mov(arg, scratch1);
- __ mov(Operand(esp, stack_offset), arg);
- __ jmp(&done);
-
- // Check if the argument is a safe string wrapper.
- __ bind(&not_cached);
- __ test(arg, Immediate(kSmiTagMask));
- __ j(zero, slow);
- __ CmpObjectType(arg, JS_VALUE_TYPE, scratch1); // map -> scratch1.
- __ j(not_equal, slow);
- __ test_b(FieldOperand(scratch1, Map::kBitField2Offset),
- 1 << Map::kStringWrapperSafeForDefaultValueOf);
- __ j(zero, slow);
- __ mov(arg, FieldOperand(arg, JSValue::kValueOffset));
- __ mov(Operand(esp, stack_offset), arg);
-
- __ bind(&done);
-}
-
-
-void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- Register scratch,
- bool ascii) {
- NearLabel loop;
- __ bind(&loop);
- // This loop just copies one character at a time, as it is only used for very
- // short strings.
- if (ascii) {
- __ mov_b(scratch, Operand(src, 0));
- __ mov_b(Operand(dest, 0), scratch);
- __ add(Operand(src), Immediate(1));
- __ add(Operand(dest), Immediate(1));
- } else {
- __ mov_w(scratch, Operand(src, 0));
- __ mov_w(Operand(dest, 0), scratch);
- __ add(Operand(src), Immediate(2));
- __ add(Operand(dest), Immediate(2));
- }
- __ sub(Operand(count), Immediate(1));
- __ j(not_zero, &loop);
-}
-
-
-void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- Register scratch,
- bool ascii) {
- // Copy characters using rep movs of doublewords.
- // The destination is aligned on a 4 byte boundary because we are
- // copying to the beginning of a newly allocated string.
- ASSERT(dest.is(edi)); // rep movs destination
- ASSERT(src.is(esi)); // rep movs source
- ASSERT(count.is(ecx)); // rep movs count
- ASSERT(!scratch.is(dest));
- ASSERT(!scratch.is(src));
- ASSERT(!scratch.is(count));
-
- // Nothing to do for zero characters.
- Label done;
- __ test(count, Operand(count));
- __ j(zero, &done);
-
- // Make count the number of bytes to copy.
- if (!ascii) {
- __ shl(count, 1);
- }
-
- // Don't enter the rep movs if there are less than 4 bytes to copy.
- NearLabel last_bytes;
- __ test(count, Immediate(~3));
- __ j(zero, &last_bytes);
-
- // Copy from edi to esi using rep movs instruction.
- __ mov(scratch, count);
- __ sar(count, 2); // Number of doublewords to copy.
- __ cld();
- __ rep_movs();
-
- // Find number of bytes left.
- __ mov(count, scratch);
- __ and_(count, 3);
-
- // Check if there are more bytes to copy.
- __ bind(&last_bytes);
- __ test(count, Operand(count));
- __ j(zero, &done);
-
- // Copy remaining characters.
- NearLabel loop;
- __ bind(&loop);
- __ mov_b(scratch, Operand(src, 0));
- __ mov_b(Operand(dest, 0), scratch);
- __ add(Operand(src), Immediate(1));
- __ add(Operand(dest), Immediate(1));
- __ sub(Operand(count), Immediate(1));
- __ j(not_zero, &loop);
-
- __ bind(&done);
-}
-
-
-void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
- Register c1,
- Register c2,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* not_probed,
- Label* not_found) {
- // Register scratch3 is the general scratch register in this function.
- Register scratch = scratch3;
-
- // Make sure that both characters are not digits as such strings has a
- // different hash algorithm. Don't try to look for these in the symbol table.
- NearLabel not_array_index;
- __ mov(scratch, c1);
- __ sub(Operand(scratch), Immediate(static_cast<int>('0')));
- __ cmp(Operand(scratch), Immediate(static_cast<int>('9' - '0')));
- __ j(above, &not_array_index);
- __ mov(scratch, c2);
- __ sub(Operand(scratch), Immediate(static_cast<int>('0')));
- __ cmp(Operand(scratch), Immediate(static_cast<int>('9' - '0')));
- __ j(below_equal, not_probed);
-
- __ bind(&not_array_index);
- // Calculate the two character string hash.
- Register hash = scratch1;
- GenerateHashInit(masm, hash, c1, scratch);
- GenerateHashAddCharacter(masm, hash, c2, scratch);
- GenerateHashGetHash(masm, hash, scratch);
-
- // Collect the two characters in a register.
- Register chars = c1;
- __ shl(c2, kBitsPerByte);
- __ or_(chars, Operand(c2));
-
- // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
- // hash: hash of two character string.
-
- // Load the symbol table.
- Register symbol_table = c2;
- ExternalReference roots_address =
- ExternalReference::roots_address(masm->isolate());
- __ mov(scratch, Immediate(Heap::kSymbolTableRootIndex));
- __ mov(symbol_table,
- Operand::StaticArray(scratch, times_pointer_size, roots_address));
-
- // Calculate capacity mask from the symbol table capacity.
- Register mask = scratch2;
- __ mov(mask, FieldOperand(symbol_table, SymbolTable::kCapacityOffset));
- __ SmiUntag(mask);
- __ sub(Operand(mask), Immediate(1));
-
- // Registers
- // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
- // hash: hash of two character string
- // symbol_table: symbol table
- // mask: capacity mask
- // scratch: -
-
- // Perform a number of probes in the symbol table.
- static const int kProbes = 4;
- Label found_in_symbol_table;
- Label next_probe[kProbes], next_probe_pop_mask[kProbes];
- for (int i = 0; i < kProbes; i++) {
- // Calculate entry in symbol table.
- __ mov(scratch, hash);
- if (i > 0) {
- __ add(Operand(scratch), Immediate(SymbolTable::GetProbeOffset(i)));
- }
- __ and_(scratch, Operand(mask));
-
- // Load the entry from the symbol table.
- Register candidate = scratch; // Scratch register contains candidate.
- STATIC_ASSERT(SymbolTable::kEntrySize == 1);
- __ mov(candidate,
- FieldOperand(symbol_table,
- scratch,
- times_pointer_size,
- SymbolTable::kElementsStartOffset));
-
- // If entry is undefined no string with this hash can be found.
- Factory* factory = masm->isolate()->factory();
- __ cmp(candidate, factory->undefined_value());
- __ j(equal, not_found);
- __ cmp(candidate, factory->null_value());
- __ j(equal, &next_probe[i]);
-
- // If length is not 2 the string is not a candidate.
- __ cmp(FieldOperand(candidate, String::kLengthOffset),
- Immediate(Smi::FromInt(2)));
- __ j(not_equal, &next_probe[i]);
-
- // As we are out of registers save the mask on the stack and use that
- // register as a temporary.
- __ push(mask);
- Register temp = mask;
-
- // Check that the candidate is a non-external ascii string.
- __ mov(temp, FieldOperand(candidate, HeapObject::kMapOffset));
- __ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
- __ JumpIfInstanceTypeIsNotSequentialAscii(
- temp, temp, &next_probe_pop_mask[i]);
-
- // Check if the two characters match.
- __ mov(temp, FieldOperand(candidate, SeqAsciiString::kHeaderSize));
- __ and_(temp, 0x0000ffff);
- __ cmp(chars, Operand(temp));
- __ j(equal, &found_in_symbol_table);
- __ bind(&next_probe_pop_mask[i]);
- __ pop(mask);
- __ bind(&next_probe[i]);
- }
-
- // No matching 2 character string found by probing.
- __ jmp(not_found);
-
- // Scratch register contains result when we fall through to here.
- Register result = scratch;
- __ bind(&found_in_symbol_table);
- __ pop(mask); // Pop saved mask from the stack.
- if (!result.is(eax)) {
- __ mov(eax, result);
- }
-}
-
-
-void StringHelper::GenerateHashInit(MacroAssembler* masm,
- Register hash,
- Register character,
- Register scratch) {
- // hash = character + (character << 10);
- __ mov(hash, character);
- __ shl(hash, 10);
- __ add(hash, Operand(character));
- // hash ^= hash >> 6;
- __ mov(scratch, hash);
- __ sar(scratch, 6);
- __ xor_(hash, Operand(scratch));
-}
-
-
-void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
- Register hash,
- Register character,
- Register scratch) {
- // hash += character;
- __ add(hash, Operand(character));
- // hash += hash << 10;
- __ mov(scratch, hash);
- __ shl(scratch, 10);
- __ add(hash, Operand(scratch));
- // hash ^= hash >> 6;
- __ mov(scratch, hash);
- __ sar(scratch, 6);
- __ xor_(hash, Operand(scratch));
-}
-
-
-void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
- Register hash,
- Register scratch) {
- // hash += hash << 3;
- __ mov(scratch, hash);
- __ shl(scratch, 3);
- __ add(hash, Operand(scratch));
- // hash ^= hash >> 11;
- __ mov(scratch, hash);
- __ sar(scratch, 11);
- __ xor_(hash, Operand(scratch));
- // hash += hash << 15;
- __ mov(scratch, hash);
- __ shl(scratch, 15);
- __ add(hash, Operand(scratch));
-
- // if (hash == 0) hash = 27;
- NearLabel hash_not_zero;
- __ test(hash, Operand(hash));
- __ j(not_zero, &hash_not_zero);
- __ mov(hash, Immediate(27));
- __ bind(&hash_not_zero);
-}
-
-
-void SubStringStub::Generate(MacroAssembler* masm) {
- Label runtime;
-
- // Stack frame on entry.
- // esp[0]: return address
- // esp[4]: to
- // esp[8]: from
- // esp[12]: string
-
- // Make sure first argument is a string.
- __ mov(eax, Operand(esp, 3 * kPointerSize));
- STATIC_ASSERT(kSmiTag == 0);
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &runtime);
- Condition is_string = masm->IsObjectStringType(eax, ebx, ebx);
- __ j(NegateCondition(is_string), &runtime);
-
- // eax: string
- // ebx: instance type
-
- // Calculate length of sub string using the smi values.
- Label result_longer_than_two;
- __ mov(ecx, Operand(esp, 1 * kPointerSize)); // To index.
- __ test(ecx, Immediate(kSmiTagMask));
- __ j(not_zero, &runtime);
- __ mov(edx, Operand(esp, 2 * kPointerSize)); // From index.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(not_zero, &runtime);
- __ sub(ecx, Operand(edx));
- __ cmp(ecx, FieldOperand(eax, String::kLengthOffset));
- Label return_eax;
- __ j(equal, &return_eax);
- // Special handling of sub-strings of length 1 and 2. One character strings
- // are handled in the runtime system (looked up in the single character
- // cache). Two character strings are looked for in the symbol cache.
- __ SmiUntag(ecx); // Result length is no longer smi.
- __ cmp(ecx, 2);
- __ j(greater, &result_longer_than_two);
- __ j(less, &runtime);
-
- // Sub string of length 2 requested.
- // eax: string
- // ebx: instance type
- // ecx: sub string length (value is 2)
- // edx: from index (smi)
- __ JumpIfInstanceTypeIsNotSequentialAscii(ebx, ebx, &runtime);
-
- // Get the two characters forming the sub string.
- __ SmiUntag(edx); // From index is no longer smi.
- __ movzx_b(ebx, FieldOperand(eax, edx, times_1, SeqAsciiString::kHeaderSize));
- __ movzx_b(ecx,
- FieldOperand(eax, edx, times_1, SeqAsciiString::kHeaderSize + 1));
-
- // Try to lookup two character string in symbol table.
- Label make_two_character_string;
- StringHelper::GenerateTwoCharacterSymbolTableProbe(
- masm, ebx, ecx, eax, edx, edi,
- &make_two_character_string, &make_two_character_string);
- __ ret(3 * kPointerSize);
-
- __ bind(&make_two_character_string);
- // Setup registers for allocating the two character string.
- __ mov(eax, Operand(esp, 3 * kPointerSize));
- __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
- __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
- __ Set(ecx, Immediate(2));
-
- __ bind(&result_longer_than_two);
- // eax: string
- // ebx: instance type
- // ecx: result string length
- // Check for flat ascii string
- Label non_ascii_flat;
- __ JumpIfInstanceTypeIsNotSequentialAscii(ebx, ebx, &non_ascii_flat);
-
- // Allocate the result.
- __ AllocateAsciiString(eax, ecx, ebx, edx, edi, &runtime);
-
- // eax: result string
- // ecx: result string length
- __ mov(edx, esi); // esi used by following code.
- // Locate first character of result.
- __ mov(edi, eax);
- __ add(Operand(edi), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- // Load string argument and locate character of sub string start.
- __ mov(esi, Operand(esp, 3 * kPointerSize));
- __ add(Operand(esi), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- __ mov(ebx, Operand(esp, 2 * kPointerSize)); // from
- __ SmiUntag(ebx);
- __ add(esi, Operand(ebx));
-
- // eax: result string
- // ecx: result length
- // edx: original value of esi
- // edi: first character of result
- // esi: character of sub string start
- StringHelper::GenerateCopyCharactersREP(masm, edi, esi, ecx, ebx, true);
- __ mov(esi, edx); // Restore esi.
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->sub_string_native(), 1);
- __ ret(3 * kPointerSize);
-
- __ bind(&non_ascii_flat);
- // eax: string
- // ebx: instance type & kStringRepresentationMask | kStringEncodingMask
- // ecx: result string length
- // Check for flat two byte string
- __ cmp(ebx, kSeqStringTag | kTwoByteStringTag);
- __ j(not_equal, &runtime);
-
- // Allocate the result.
- __ AllocateTwoByteString(eax, ecx, ebx, edx, edi, &runtime);
-
- // eax: result string
- // ecx: result string length
- __ mov(edx, esi); // esi used by following code.
- // Locate first character of result.
- __ mov(edi, eax);
- __ add(Operand(edi),
- Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- // Load string argument and locate character of sub string start.
- __ mov(esi, Operand(esp, 3 * kPointerSize));
- __ add(Operand(esi),
- Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- __ mov(ebx, Operand(esp, 2 * kPointerSize)); // from
- // As from is a smi it is 2 times the value which matches the size of a two
- // byte character.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
- __ add(esi, Operand(ebx));
-
- // eax: result string
- // ecx: result length
- // edx: original value of esi
- // edi: first character of result
- // esi: character of sub string start
- StringHelper::GenerateCopyCharactersREP(masm, edi, esi, ecx, ebx, false);
- __ mov(esi, edx); // Restore esi.
-
- __ bind(&return_eax);
- __ IncrementCounter(counters->sub_string_native(), 1);
- __ ret(3 * kPointerSize);
-
- // Just jump to runtime to create the sub string.
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kSubString, 3, 1);
-}
-
-
-void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2,
- Register scratch3) {
- Label result_not_equal;
- Label result_greater;
- Label compare_lengths;
-
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->string_compare_native(), 1);
-
- // Find minimum length.
- NearLabel left_shorter;
- __ mov(scratch1, FieldOperand(left, String::kLengthOffset));
- __ mov(scratch3, scratch1);
- __ sub(scratch3, FieldOperand(right, String::kLengthOffset));
-
- Register length_delta = scratch3;
-
- __ j(less_equal, &left_shorter);
- // Right string is shorter. Change scratch1 to be length of right string.
- __ sub(scratch1, Operand(length_delta));
- __ bind(&left_shorter);
-
- Register min_length = scratch1;
-
- // If either length is zero, just compare lengths.
- __ test(min_length, Operand(min_length));
- __ j(zero, &compare_lengths);
-
- // Change index to run from -min_length to -1 by adding min_length
- // to string start. This means that loop ends when index reaches zero,
- // which doesn't need an additional compare.
- __ SmiUntag(min_length);
- __ lea(left,
- FieldOperand(left,
- min_length, times_1,
- SeqAsciiString::kHeaderSize));
- __ lea(right,
- FieldOperand(right,
- min_length, times_1,
- SeqAsciiString::kHeaderSize));
- __ neg(min_length);
-
- Register index = min_length; // index = -min_length;
-
- {
- // Compare loop.
- NearLabel loop;
- __ bind(&loop);
- // Compare characters.
- __ mov_b(scratch2, Operand(left, index, times_1, 0));
- __ cmpb(scratch2, Operand(right, index, times_1, 0));
- __ j(not_equal, &result_not_equal);
- __ add(Operand(index), Immediate(1));
- __ j(not_zero, &loop);
- }
-
- // Compare lengths - strings up to min-length are equal.
- __ bind(&compare_lengths);
- __ test(length_delta, Operand(length_delta));
- __ j(not_zero, &result_not_equal);
-
- // Result is EQUAL.
- STATIC_ASSERT(EQUAL == 0);
- STATIC_ASSERT(kSmiTag == 0);
- __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
- __ ret(0);
-
- __ bind(&result_not_equal);
- __ j(greater, &result_greater);
-
- // Result is LESS.
- __ Set(eax, Immediate(Smi::FromInt(LESS)));
- __ ret(0);
-
- // Result is GREATER.
- __ bind(&result_greater);
- __ Set(eax, Immediate(Smi::FromInt(GREATER)));
- __ ret(0);
-}
-
-
-void StringCompareStub::Generate(MacroAssembler* masm) {
- Label runtime;
-
- // Stack frame on entry.
- // esp[0]: return address
- // esp[4]: right string
- // esp[8]: left string
-
- __ mov(edx, Operand(esp, 2 * kPointerSize)); // left
- __ mov(eax, Operand(esp, 1 * kPointerSize)); // right
-
- NearLabel not_same;
- __ cmp(edx, Operand(eax));
- __ j(not_equal, &not_same);
- STATIC_ASSERT(EQUAL == 0);
- STATIC_ASSERT(kSmiTag == 0);
- __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
- __ IncrementCounter(masm->isolate()->counters()->string_compare_native(), 1);
- __ ret(2 * kPointerSize);
-
- __ bind(&not_same);
-
- // Check that both objects are sequential ascii strings.
- __ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx, &runtime);
-
- // Compare flat ascii strings.
- // Drop arguments from the stack.
- __ pop(ecx);
- __ add(Operand(esp), Immediate(2 * kPointerSize));
- __ push(ecx);
- GenerateCompareFlatAsciiStrings(masm, edx, eax, ecx, ebx, edi);
-
- // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
- // tagged as a small integer.
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
-}
-
-
-void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::SMIS);
- NearLabel miss;
- __ mov(ecx, Operand(edx));
- __ or_(ecx, Operand(eax));
- __ test(ecx, Immediate(kSmiTagMask));
- __ j(not_zero, &miss, not_taken);
-
- if (GetCondition() == equal) {
- // For equality we do not care about the sign of the result.
- __ sub(eax, Operand(edx));
- } else {
- NearLabel done;
- __ sub(edx, Operand(eax));
- __ j(no_overflow, &done);
- // Correct sign of result in case of overflow.
- __ not_(edx);
- __ bind(&done);
- __ mov(eax, edx);
- }
- __ ret(0);
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::HEAP_NUMBERS);
-
- NearLabel generic_stub;
- NearLabel unordered;
- NearLabel miss;
- __ mov(ecx, Operand(edx));
- __ and_(ecx, Operand(eax));
- __ test(ecx, Immediate(kSmiTagMask));
- __ j(zero, &generic_stub, not_taken);
-
- __ CmpObjectType(eax, HEAP_NUMBER_TYPE, ecx);
- __ j(not_equal, &miss, not_taken);
- __ CmpObjectType(edx, HEAP_NUMBER_TYPE, ecx);
- __ j(not_equal, &miss, not_taken);
-
- // Inlining the double comparison and falling back to the general compare
- // stub if NaN is involved or SS2 or CMOV is unsupported.
- if (CpuFeatures::IsSupported(SSE2) && CpuFeatures::IsSupported(CMOV)) {
- CpuFeatures::Scope scope1(SSE2);
- CpuFeatures::Scope scope2(CMOV);
-
- // Load left and right operand
- __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
- __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
-
- // Compare operands
- __ ucomisd(xmm0, xmm1);
-
- // Don't base result on EFLAGS when a NaN is involved.
- __ j(parity_even, &unordered, not_taken);
-
- // Return a result of -1, 0, or 1, based on EFLAGS.
- // Performing mov, because xor would destroy the flag register.
- __ mov(eax, 0); // equal
- __ mov(ecx, Immediate(Smi::FromInt(1)));
- __ cmov(above, eax, Operand(ecx));
- __ mov(ecx, Immediate(Smi::FromInt(-1)));
- __ cmov(below, eax, Operand(ecx));
- __ ret(0);
-
- __ bind(&unordered);
- }
-
- CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS);
- __ bind(&generic_stub);
- __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::OBJECTS);
- NearLabel miss;
- __ mov(ecx, Operand(edx));
- __ and_(ecx, Operand(eax));
- __ test(ecx, Immediate(kSmiTagMask));
- __ j(zero, &miss, not_taken);
-
- __ CmpObjectType(eax, JS_OBJECT_TYPE, ecx);
- __ j(not_equal, &miss, not_taken);
- __ CmpObjectType(edx, JS_OBJECT_TYPE, ecx);
- __ j(not_equal, &miss, not_taken);
-
- ASSERT(GetCondition() == equal);
- __ sub(eax, Operand(edx));
- __ ret(0);
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
- // Save the registers.
- __ pop(ecx);
- __ push(edx);
- __ push(eax);
- __ push(ecx);
-
- // Call the runtime system in a fresh internal frame.
- ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss),
- masm->isolate());
- __ EnterInternalFrame();
- __ push(edx);
- __ push(eax);
- __ push(Immediate(Smi::FromInt(op_)));
- __ CallExternalReference(miss, 3);
- __ LeaveInternalFrame();
-
- // Compute the entry point of the rewritten stub.
- __ lea(edi, FieldOperand(eax, Code::kHeaderSize));
-
- // Restore registers.
- __ pop(ecx);
- __ pop(eax);
- __ pop(edx);
- __ push(ecx);
-
- // Do a tail call to the rewritten stub.
- __ jmp(Operand(edi));
-}
-
-
-#undef __
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_IA32
diff --git a/src/3rdparty/v8/src/ia32/code-stubs-ia32.h b/src/3rdparty/v8/src/ia32/code-stubs-ia32.h
deleted file mode 100644
index d116bf7..0000000
--- a/src/3rdparty/v8/src/ia32/code-stubs-ia32.h
+++ /dev/null
@@ -1,495 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_IA32_CODE_STUBS_IA32_H_
-#define V8_IA32_CODE_STUBS_IA32_H_
-
-#include "macro-assembler.h"
-#include "code-stubs.h"
-#include "ic-inl.h"
-
-namespace v8 {
-namespace internal {
-
-
-// Compute a transcendental math function natively, or call the
-// TranscendentalCache runtime function.
-class TranscendentalCacheStub: public CodeStub {
- public:
- enum ArgumentType {
- TAGGED = 0,
- UNTAGGED = 1 << TranscendentalCache::kTranscendentalTypeBits
- };
-
- TranscendentalCacheStub(TranscendentalCache::Type type,
- ArgumentType argument_type)
- : type_(type), argument_type_(argument_type) {}
- void Generate(MacroAssembler* masm);
- private:
- TranscendentalCache::Type type_;
- ArgumentType argument_type_;
-
- Major MajorKey() { return TranscendentalCache; }
- int MinorKey() { return type_ | argument_type_; }
- Runtime::FunctionId RuntimeFunction();
- void GenerateOperation(MacroAssembler* masm);
-};
-
-
-class ToBooleanStub: public CodeStub {
- public:
- ToBooleanStub() { }
-
- void Generate(MacroAssembler* masm);
-
- private:
- Major MajorKey() { return ToBoolean; }
- int MinorKey() { return 0; }
-};
-
-
-// Flag that indicates how to generate code for the stub GenericBinaryOpStub.
-enum GenericBinaryFlags {
- NO_GENERIC_BINARY_FLAGS = 0,
- NO_SMI_CODE_IN_STUB = 1 << 0 // Omit smi code in stub.
-};
-
-
-class GenericBinaryOpStub: public CodeStub {
- public:
- GenericBinaryOpStub(Token::Value op,
- OverwriteMode mode,
- GenericBinaryFlags flags,
- TypeInfo operands_type)
- : op_(op),
- mode_(mode),
- flags_(flags),
- args_in_registers_(false),
- args_reversed_(false),
- static_operands_type_(operands_type),
- runtime_operands_type_(BinaryOpIC::UNINIT_OR_SMI),
- name_(NULL) {
- if (static_operands_type_.IsSmi()) {
- mode_ = NO_OVERWRITE;
- }
- use_sse3_ = CpuFeatures::IsSupported(SSE3);
- ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
- }
-
- GenericBinaryOpStub(int key, BinaryOpIC::TypeInfo runtime_operands_type)
- : op_(OpBits::decode(key)),
- mode_(ModeBits::decode(key)),
- flags_(FlagBits::decode(key)),
- args_in_registers_(ArgsInRegistersBits::decode(key)),
- args_reversed_(ArgsReversedBits::decode(key)),
- use_sse3_(SSE3Bits::decode(key)),
- static_operands_type_(TypeInfo::ExpandedRepresentation(
- StaticTypeInfoBits::decode(key))),
- runtime_operands_type_(runtime_operands_type),
- name_(NULL) {
- }
-
- // Generate code to call the stub with the supplied arguments. This will add
- // code at the call site to prepare arguments either in registers or on the
- // stack together with the actual call.
- void GenerateCall(MacroAssembler* masm, Register left, Register right);
- void GenerateCall(MacroAssembler* masm, Register left, Smi* right);
- void GenerateCall(MacroAssembler* masm, Smi* left, Register right);
-
- bool ArgsInRegistersSupported() {
- return op_ == Token::ADD || op_ == Token::SUB
- || op_ == Token::MUL || op_ == Token::DIV;
- }
-
- void SetArgsInRegisters() {
- ASSERT(ArgsInRegistersSupported());
- args_in_registers_ = true;
- }
-
- private:
- Token::Value op_;
- OverwriteMode mode_;
- GenericBinaryFlags flags_;
- bool args_in_registers_; // Arguments passed in registers not on the stack.
- bool args_reversed_; // Left and right argument are swapped.
- bool use_sse3_;
-
- // Number type information of operands, determined by code generator.
- TypeInfo static_operands_type_;
-
- // Operand type information determined at runtime.
- BinaryOpIC::TypeInfo runtime_operands_type_;
-
- char* name_;
-
- const char* GetName();
-
-#ifdef DEBUG
- void Print() {
- PrintF("GenericBinaryOpStub %d (op %s), "
- "(mode %d, flags %d, registers %d, reversed %d, type_info %s)\n",
- MinorKey(),
- Token::String(op_),
- static_cast<int>(mode_),
- static_cast<int>(flags_),
- static_cast<int>(args_in_registers_),
- static_cast<int>(args_reversed_),
- static_operands_type_.ToString());
- }
-#endif
-
- // Minor key encoding in 18 bits RRNNNFRASOOOOOOOMM.
- class ModeBits: public BitField<OverwriteMode, 0, 2> {};
- class OpBits: public BitField<Token::Value, 2, 7> {};
- class SSE3Bits: public BitField<bool, 9, 1> {};
- class ArgsInRegistersBits: public BitField<bool, 10, 1> {};
- class ArgsReversedBits: public BitField<bool, 11, 1> {};
- class FlagBits: public BitField<GenericBinaryFlags, 12, 1> {};
- class StaticTypeInfoBits: public BitField<int, 13, 3> {};
- class RuntimeTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 16, 3> {};
-
- Major MajorKey() { return GenericBinaryOp; }
- int MinorKey() {
- // Encode the parameters in a unique 18 bit value.
- return OpBits::encode(op_)
- | ModeBits::encode(mode_)
- | FlagBits::encode(flags_)
- | SSE3Bits::encode(use_sse3_)
- | ArgsInRegistersBits::encode(args_in_registers_)
- | ArgsReversedBits::encode(args_reversed_)
- | StaticTypeInfoBits::encode(
- static_operands_type_.ThreeBitRepresentation())
- | RuntimeTypeInfoBits::encode(runtime_operands_type_);
- }
-
- void Generate(MacroAssembler* masm);
- void GenerateSmiCode(MacroAssembler* masm, Label* slow);
- void GenerateLoadArguments(MacroAssembler* masm);
- void GenerateReturn(MacroAssembler* masm);
- void GenerateHeapResultAllocation(MacroAssembler* masm, Label* alloc_failure);
- void GenerateRegisterArgsPush(MacroAssembler* masm);
- void GenerateTypeTransition(MacroAssembler* masm);
-
- bool IsOperationCommutative() {
- return (op_ == Token::ADD) || (op_ == Token::MUL);
- }
-
- void SetArgsReversed() { args_reversed_ = true; }
- bool HasSmiCodeInStub() { return (flags_ & NO_SMI_CODE_IN_STUB) == 0; }
- bool HasArgsInRegisters() { return args_in_registers_; }
- bool HasArgsReversed() { return args_reversed_; }
-
- bool ShouldGenerateSmiCode() {
- return HasSmiCodeInStub() &&
- runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS &&
- runtime_operands_type_ != BinaryOpIC::STRINGS;
- }
-
- bool ShouldGenerateFPCode() {
- return runtime_operands_type_ != BinaryOpIC::STRINGS;
- }
-
- virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
-
- virtual InlineCacheState GetICState() {
- return BinaryOpIC::ToState(runtime_operands_type_);
- }
-
- virtual void FinishCode(Code* code) {
- code->set_binary_op_type(runtime_operands_type_);
- }
-
- friend class CodeGenerator;
-};
-
-
-class TypeRecordingBinaryOpStub: public CodeStub {
- public:
- TypeRecordingBinaryOpStub(Token::Value op, OverwriteMode mode)
- : op_(op),
- mode_(mode),
- operands_type_(TRBinaryOpIC::UNINITIALIZED),
- result_type_(TRBinaryOpIC::UNINITIALIZED),
- name_(NULL) {
- use_sse3_ = CpuFeatures::IsSupported(SSE3);
- ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
- }
-
- TypeRecordingBinaryOpStub(
- int key,
- TRBinaryOpIC::TypeInfo operands_type,
- TRBinaryOpIC::TypeInfo result_type = TRBinaryOpIC::UNINITIALIZED)
- : op_(OpBits::decode(key)),
- mode_(ModeBits::decode(key)),
- use_sse3_(SSE3Bits::decode(key)),
- operands_type_(operands_type),
- result_type_(result_type),
- name_(NULL) { }
-
- private:
- enum SmiCodeGenerateHeapNumberResults {
- ALLOW_HEAPNUMBER_RESULTS,
- NO_HEAPNUMBER_RESULTS
- };
-
- Token::Value op_;
- OverwriteMode mode_;
- bool use_sse3_;
-
- // Operand type information determined at runtime.
- TRBinaryOpIC::TypeInfo operands_type_;
- TRBinaryOpIC::TypeInfo result_type_;
-
- char* name_;
-
- const char* GetName();
-
-#ifdef DEBUG
- void Print() {
- PrintF("TypeRecordingBinaryOpStub %d (op %s), "
- "(mode %d, runtime_type_info %s)\n",
- MinorKey(),
- Token::String(op_),
- static_cast<int>(mode_),
- TRBinaryOpIC::GetName(operands_type_));
- }
-#endif
-
- // Minor key encoding in 16 bits RRRTTTSOOOOOOOMM.
- class ModeBits: public BitField<OverwriteMode, 0, 2> {};
- class OpBits: public BitField<Token::Value, 2, 7> {};
- class SSE3Bits: public BitField<bool, 9, 1> {};
- class OperandTypeInfoBits: public BitField<TRBinaryOpIC::TypeInfo, 10, 3> {};
- class ResultTypeInfoBits: public BitField<TRBinaryOpIC::TypeInfo, 13, 3> {};
-
- Major MajorKey() { return TypeRecordingBinaryOp; }
- int MinorKey() {
- return OpBits::encode(op_)
- | ModeBits::encode(mode_)
- | SSE3Bits::encode(use_sse3_)
- | OperandTypeInfoBits::encode(operands_type_)
- | ResultTypeInfoBits::encode(result_type_);
- }
-
- void Generate(MacroAssembler* masm);
- void GenerateGeneric(MacroAssembler* masm);
- void GenerateSmiCode(MacroAssembler* masm,
- Label* slow,
- SmiCodeGenerateHeapNumberResults heapnumber_results);
- void GenerateLoadArguments(MacroAssembler* masm);
- void GenerateReturn(MacroAssembler* masm);
- void GenerateUninitializedStub(MacroAssembler* masm);
- void GenerateSmiStub(MacroAssembler* masm);
- void GenerateInt32Stub(MacroAssembler* masm);
- void GenerateHeapNumberStub(MacroAssembler* masm);
- void GenerateOddballStub(MacroAssembler* masm);
- void GenerateStringStub(MacroAssembler* masm);
- void GenerateGenericStub(MacroAssembler* masm);
- void GenerateAddStrings(MacroAssembler* masm);
-
- void GenerateHeapResultAllocation(MacroAssembler* masm, Label* alloc_failure);
- void GenerateRegisterArgsPush(MacroAssembler* masm);
- void GenerateTypeTransition(MacroAssembler* masm);
- void GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm);
-
- virtual int GetCodeKind() { return Code::TYPE_RECORDING_BINARY_OP_IC; }
-
- virtual InlineCacheState GetICState() {
- return TRBinaryOpIC::ToState(operands_type_);
- }
-
- virtual void FinishCode(Code* code) {
- code->set_type_recording_binary_op_type(operands_type_);
- code->set_type_recording_binary_op_result_type(result_type_);
- }
-
- friend class CodeGenerator;
-};
-
-
-class StringHelper : public AllStatic {
- public:
- // Generate code for copying characters using a simple loop. This should only
- // be used in places where the number of characters is small and the
- // additional setup and checking in GenerateCopyCharactersREP adds too much
- // overhead. Copying of overlapping regions is not supported.
- static void GenerateCopyCharacters(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- Register scratch,
- bool ascii);
-
- // Generate code for copying characters using the rep movs instruction.
- // Copies ecx characters from esi to edi. Copying of overlapping regions is
- // not supported.
- static void GenerateCopyCharactersREP(MacroAssembler* masm,
- Register dest, // Must be edi.
- Register src, // Must be esi.
- Register count, // Must be ecx.
- Register scratch, // Neither of above.
- bool ascii);
-
- // Probe the symbol table for a two character string. If the string
- // requires non-standard hashing a jump to the label not_probed is
- // performed and registers c1 and c2 are preserved. In all other
- // cases they are clobbered. If the string is not found by probing a
- // jump to the label not_found is performed. This jump does not
- // guarantee that the string is not in the symbol table. If the
- // string is found the code falls through with the string in
- // register eax.
- static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
- Register c1,
- Register c2,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* not_probed,
- Label* not_found);
-
- // Generate string hash.
- static void GenerateHashInit(MacroAssembler* masm,
- Register hash,
- Register character,
- Register scratch);
- static void GenerateHashAddCharacter(MacroAssembler* masm,
- Register hash,
- Register character,
- Register scratch);
- static void GenerateHashGetHash(MacroAssembler* masm,
- Register hash,
- Register scratch);
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
-};
-
-
-// Flag that indicates how to generate code for the stub StringAddStub.
-enum StringAddFlags {
- NO_STRING_ADD_FLAGS = 0,
- // Omit left string check in stub (left is definitely a string).
- NO_STRING_CHECK_LEFT_IN_STUB = 1 << 0,
- // Omit right string check in stub (right is definitely a string).
- NO_STRING_CHECK_RIGHT_IN_STUB = 1 << 1,
- // Omit both string checks in stub.
- NO_STRING_CHECK_IN_STUB =
- NO_STRING_CHECK_LEFT_IN_STUB | NO_STRING_CHECK_RIGHT_IN_STUB
-};
-
-
-class StringAddStub: public CodeStub {
- public:
- explicit StringAddStub(StringAddFlags flags) : flags_(flags) {}
-
- private:
- Major MajorKey() { return StringAdd; }
- int MinorKey() { return flags_; }
-
- void Generate(MacroAssembler* masm);
-
- void GenerateConvertArgument(MacroAssembler* masm,
- int stack_offset,
- Register arg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* slow);
-
- const StringAddFlags flags_;
-};
-
-
-class SubStringStub: public CodeStub {
- public:
- SubStringStub() {}
-
- private:
- Major MajorKey() { return SubString; }
- int MinorKey() { return 0; }
-
- void Generate(MacroAssembler* masm);
-};
-
-
-class StringCompareStub: public CodeStub {
- public:
- explicit StringCompareStub() {
- }
-
- // Compare two flat ascii strings and returns result in eax after popping two
- // arguments from the stack.
- static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2,
- Register scratch3);
-
- private:
- Major MajorKey() { return StringCompare; }
- int MinorKey() { return 0; }
-
- void Generate(MacroAssembler* masm);
-};
-
-
-class NumberToStringStub: public CodeStub {
- public:
- NumberToStringStub() { }
-
- // Generate code to do a lookup in the number string cache. If the number in
- // the register object is found in the cache the generated code falls through
- // with the result in the result register. The object and the result register
- // can be the same. If the number is not found in the cache the code jumps to
- // the label not_found with only the content of register object unchanged.
- static void GenerateLookupNumberStringCache(MacroAssembler* masm,
- Register object,
- Register result,
- Register scratch1,
- Register scratch2,
- bool object_is_smi,
- Label* not_found);
-
- private:
- Major MajorKey() { return NumberToString; }
- int MinorKey() { return 0; }
-
- void Generate(MacroAssembler* masm);
-
- const char* GetName() { return "NumberToStringStub"; }
-
-#ifdef DEBUG
- void Print() {
- PrintF("NumberToStringStub\n");
- }
-#endif
-};
-
-} } // namespace v8::internal
-
-#endif // V8_IA32_CODE_STUBS_IA32_H_
diff --git a/src/3rdparty/v8/src/ia32/codegen-ia32-inl.h b/src/3rdparty/v8/src/ia32/codegen-ia32-inl.h
deleted file mode 100644
index 49c706d..0000000
--- a/src/3rdparty/v8/src/ia32/codegen-ia32-inl.h
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#ifndef V8_IA32_CODEGEN_IA32_INL_H_
-#define V8_IA32_CODEGEN_IA32_INL_H_
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm_)
-
-// Platform-specific inline functions.
-
-void DeferredCode::Jump() { __ jmp(&entry_label_); }
-void DeferredCode::Branch(Condition cc) { __ j(cc, &entry_label_); }
-
-#undef __
-
-} } // namespace v8::internal
-
-#endif // V8_IA32_CODEGEN_IA32_INL_H_
diff --git a/src/3rdparty/v8/src/ia32/codegen-ia32.cc b/src/3rdparty/v8/src/ia32/codegen-ia32.cc
deleted file mode 100644
index 8a47e72..0000000
--- a/src/3rdparty/v8/src/ia32/codegen-ia32.cc
+++ /dev/null
@@ -1,10385 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_IA32)
-
-#include "codegen-inl.h"
-#include "bootstrapper.h"
-#include "code-stubs.h"
-#include "compiler.h"
-#include "debug.h"
-#include "ic-inl.h"
-#include "parser.h"
-#include "regexp-macro-assembler.h"
-#include "register-allocator-inl.h"
-#include "scopes.h"
-#include "virtual-frame-inl.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-// -------------------------------------------------------------------------
-// Platform-specific FrameRegisterState functions.
-
-void FrameRegisterState::Save(MacroAssembler* masm) const {
- for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
- int action = registers_[i];
- if (action == kPush) {
- __ push(RegisterAllocator::ToRegister(i));
- } else if (action != kIgnore && (action & kSyncedFlag) == 0) {
- __ mov(Operand(ebp, action), RegisterAllocator::ToRegister(i));
- }
- }
-}
-
-
-void FrameRegisterState::Restore(MacroAssembler* masm) const {
- // Restore registers in reverse order due to the stack.
- for (int i = RegisterAllocator::kNumRegisters - 1; i >= 0; i--) {
- int action = registers_[i];
- if (action == kPush) {
- __ pop(RegisterAllocator::ToRegister(i));
- } else if (action != kIgnore) {
- action &= ~kSyncedFlag;
- __ mov(RegisterAllocator::ToRegister(i), Operand(ebp, action));
- }
- }
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm_)
-
-// -------------------------------------------------------------------------
-// Platform-specific DeferredCode functions.
-
-void DeferredCode::SaveRegisters() {
- frame_state_.Save(masm_);
-}
-
-
-void DeferredCode::RestoreRegisters() {
- frame_state_.Restore(masm_);
-}
-
-
-// -------------------------------------------------------------------------
-// Platform-specific RuntimeCallHelper functions.
-
-void VirtualFrameRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
- frame_state_->Save(masm);
-}
-
-
-void VirtualFrameRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
- frame_state_->Restore(masm);
-}
-
-
-void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
- masm->EnterInternalFrame();
-}
-
-
-void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
- masm->LeaveInternalFrame();
-}
-
-
-// -------------------------------------------------------------------------
-// CodeGenState implementation.
-
-CodeGenState::CodeGenState(CodeGenerator* owner)
- : owner_(owner),
- destination_(NULL),
- previous_(NULL) {
- owner_->set_state(this);
-}
-
-
-CodeGenState::CodeGenState(CodeGenerator* owner,
- ControlDestination* destination)
- : owner_(owner),
- destination_(destination),
- previous_(owner->state()) {
- owner_->set_state(this);
-}
-
-
-CodeGenState::~CodeGenState() {
- ASSERT(owner_->state() == this);
- owner_->set_state(previous_);
-}
-
-// -------------------------------------------------------------------------
-// CodeGenerator implementation.
-
-CodeGenerator::CodeGenerator(MacroAssembler* masm)
- : deferred_(8),
- masm_(masm),
- info_(NULL),
- frame_(NULL),
- allocator_(NULL),
- state_(NULL),
- loop_nesting_(0),
- in_safe_int32_mode_(false),
- safe_int32_mode_enabled_(true),
- function_return_is_shadowed_(false),
- in_spilled_code_(false),
- jit_cookie_((FLAG_mask_constants_with_cookie) ?
- V8::RandomPrivate(Isolate::Current()) : 0) {
-}
-
-
-// Calling conventions:
-// ebp: caller's frame pointer
-// esp: stack pointer
-// edi: called JS function
-// esi: callee's context
-
-void CodeGenerator::Generate(CompilationInfo* info) {
- // Record the position for debugging purposes.
- CodeForFunctionPosition(info->function());
- Comment cmnt(masm_, "[ function compiled by virtual frame code generator");
-
- // Initialize state.
- info_ = info;
- ASSERT(allocator_ == NULL);
- RegisterAllocator register_allocator(this);
- allocator_ = &register_allocator;
- ASSERT(frame_ == NULL);
- frame_ = new VirtualFrame();
- set_in_spilled_code(false);
-
- // Adjust for function-level loop nesting.
- ASSERT_EQ(0, loop_nesting_);
- loop_nesting_ = info->is_in_loop() ? 1 : 0;
-
- masm()->isolate()->set_jump_target_compiling_deferred_code(false);
-
- {
- CodeGenState state(this);
-
- // Entry:
- // Stack: receiver, arguments, return address.
- // ebp: caller's frame pointer
- // esp: stack pointer
- // edi: called JS function
- // esi: callee's context
- allocator_->Initialize();
-
-#ifdef DEBUG
- if (strlen(FLAG_stop_at) > 0 &&
- info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
- frame_->SpillAll();
- __ int3();
- }
-#endif
-
- frame_->Enter();
-
- // Allocate space for locals and initialize them.
- frame_->AllocateStackSlots();
-
- // Allocate the local context if needed.
- int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
- if (heap_slots > 0) {
- Comment cmnt(masm_, "[ allocate local context");
- // Allocate local context.
- // Get outer context and create a new context based on it.
- frame_->PushFunction();
- Result context;
- if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(heap_slots);
- context = frame_->CallStub(&stub, 1);
- } else {
- context = frame_->CallRuntime(Runtime::kNewContext, 1);
- }
-
- // Update context local.
- frame_->SaveContextRegister();
-
- // Verify that the runtime call result and esi agree.
- if (FLAG_debug_code) {
- __ cmp(context.reg(), Operand(esi));
- __ Assert(equal, "Runtime::NewContext should end up in esi");
- }
- }
-
- // TODO(1241774): Improve this code:
- // 1) only needed if we have a context
- // 2) no need to recompute context ptr every single time
- // 3) don't copy parameter operand code from SlotOperand!
- {
- Comment cmnt2(masm_, "[ copy context parameters into .context");
- // Note that iteration order is relevant here! If we have the same
- // parameter twice (e.g., function (x, y, x)), and that parameter
- // needs to be copied into the context, it must be the last argument
- // passed to the parameter that needs to be copied. This is a rare
- // case so we don't check for it, instead we rely on the copying
- // order: such a parameter is copied repeatedly into the same
- // context location and thus the last value is what is seen inside
- // the function.
- for (int i = 0; i < scope()->num_parameters(); i++) {
- Variable* par = scope()->parameter(i);
- Slot* slot = par->AsSlot();
- if (slot != NULL && slot->type() == Slot::CONTEXT) {
- // The use of SlotOperand below is safe in unspilled code
- // because the slot is guaranteed to be a context slot.
- //
- // There are no parameters in the global scope.
- ASSERT(!scope()->is_global_scope());
- frame_->PushParameterAt(i);
- Result value = frame_->Pop();
- value.ToRegister();
-
- // SlotOperand loads context.reg() with the context object
- // stored to, used below in RecordWrite.
- Result context = allocator_->Allocate();
- ASSERT(context.is_valid());
- __ mov(SlotOperand(slot, context.reg()), value.reg());
- int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
- Result scratch = allocator_->Allocate();
- ASSERT(scratch.is_valid());
- frame_->Spill(context.reg());
- frame_->Spill(value.reg());
- __ RecordWrite(context.reg(), offset, value.reg(), scratch.reg());
- }
- }
- }
-
- // Store the arguments object. This must happen after context
- // initialization because the arguments object may be stored in
- // the context.
- if (ArgumentsMode() != NO_ARGUMENTS_ALLOCATION) {
- StoreArgumentsObject(true);
- }
-
- // Initialize ThisFunction reference if present.
- if (scope()->is_function_scope() && scope()->function() != NULL) {
- frame_->Push(FACTORY->the_hole_value());
- StoreToSlot(scope()->function()->AsSlot(), NOT_CONST_INIT);
- }
-
-
- // Initialize the function return target after the locals are set
- // up, because it needs the expected frame height from the frame.
- function_return_.set_direction(JumpTarget::BIDIRECTIONAL);
- function_return_is_shadowed_ = false;
-
- // Generate code to 'execute' declarations and initialize functions
- // (source elements). In case of an illegal redeclaration we need to
- // handle that instead of processing the declarations.
- if (scope()->HasIllegalRedeclaration()) {
- Comment cmnt(masm_, "[ illegal redeclarations");
- scope()->VisitIllegalRedeclaration(this);
- } else {
- Comment cmnt(masm_, "[ declarations");
- ProcessDeclarations(scope()->declarations());
- // Bail out if a stack-overflow exception occurred when processing
- // declarations.
- if (HasStackOverflow()) return;
- }
-
- if (FLAG_trace) {
- frame_->CallRuntime(Runtime::kTraceEnter, 0);
- // Ignore the return value.
- }
- CheckStack();
-
- // Compile the body of the function in a vanilla state. Don't
- // bother compiling all the code if the scope has an illegal
- // redeclaration.
- if (!scope()->HasIllegalRedeclaration()) {
- Comment cmnt(masm_, "[ function body");
-#ifdef DEBUG
- bool is_builtin = info->isolate()->bootstrapper()->IsActive();
- bool should_trace =
- is_builtin ? FLAG_trace_builtin_calls : FLAG_trace_calls;
- if (should_trace) {
- frame_->CallRuntime(Runtime::kDebugTrace, 0);
- // Ignore the return value.
- }
-#endif
- VisitStatements(info->function()->body());
-
- // Handle the return from the function.
- if (has_valid_frame()) {
- // If there is a valid frame, control flow can fall off the end of
- // the body. In that case there is an implicit return statement.
- ASSERT(!function_return_is_shadowed_);
- CodeForReturnPosition(info->function());
- frame_->PrepareForReturn();
- Result undefined(FACTORY->undefined_value());
- if (function_return_.is_bound()) {
- function_return_.Jump(&undefined);
- } else {
- function_return_.Bind(&undefined);
- GenerateReturnSequence(&undefined);
- }
- } else if (function_return_.is_linked()) {
- // If the return target has dangling jumps to it, then we have not
- // yet generated the return sequence. This can happen when (a)
- // control does not flow off the end of the body so we did not
- // compile an artificial return statement just above, and (b) there
- // are return statements in the body but (c) they are all shadowed.
- Result return_value;
- function_return_.Bind(&return_value);
- GenerateReturnSequence(&return_value);
- }
- }
- }
-
- // Adjust for function-level loop nesting.
- ASSERT_EQ(loop_nesting_, info->is_in_loop() ? 1 : 0);
- loop_nesting_ = 0;
-
- // Code generation state must be reset.
- ASSERT(state_ == NULL);
- ASSERT(!function_return_is_shadowed_);
- function_return_.Unuse();
- DeleteFrame();
-
- // Process any deferred code using the register allocator.
- if (!HasStackOverflow()) {
- info->isolate()->set_jump_target_compiling_deferred_code(true);
- ProcessDeferred();
- info->isolate()->set_jump_target_compiling_deferred_code(false);
- }
-
- // There is no need to delete the register allocator, it is a
- // stack-allocated local.
- allocator_ = NULL;
-}
-
-
-Operand CodeGenerator::SlotOperand(Slot* slot, Register tmp) {
- // Currently, this assertion will fail if we try to assign to
- // a constant variable that is constant because it is read-only
- // (such as the variable referring to a named function expression).
- // We need to implement assignments to read-only variables.
- // Ideally, we should do this during AST generation (by converting
- // such assignments into expression statements); however, in general
- // we may not be able to make the decision until past AST generation,
- // that is when the entire program is known.
- ASSERT(slot != NULL);
- int index = slot->index();
- switch (slot->type()) {
- case Slot::PARAMETER:
- return frame_->ParameterAt(index);
-
- case Slot::LOCAL:
- return frame_->LocalAt(index);
-
- case Slot::CONTEXT: {
- // Follow the context chain if necessary.
- ASSERT(!tmp.is(esi)); // do not overwrite context register
- Register context = esi;
- int chain_length = scope()->ContextChainLength(slot->var()->scope());
- for (int i = 0; i < chain_length; i++) {
- // Load the closure.
- // (All contexts, even 'with' contexts, have a closure,
- // and it is the same for all contexts inside a function.
- // There is no need to go to the function context first.)
- __ mov(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
- // Load the function context (which is the incoming, outer context).
- __ mov(tmp, FieldOperand(tmp, JSFunction::kContextOffset));
- context = tmp;
- }
- // We may have a 'with' context now. Get the function context.
- // (In fact this mov may never be the needed, since the scope analysis
- // may not permit a direct context access in this case and thus we are
- // always at a function context. However it is safe to dereference be-
- // cause the function context of a function context is itself. Before
- // deleting this mov we should try to create a counter-example first,
- // though...)
- __ mov(tmp, ContextOperand(context, Context::FCONTEXT_INDEX));
- return ContextOperand(tmp, index);
- }
-
- default:
- UNREACHABLE();
- return Operand(eax);
- }
-}
-
-
-Operand CodeGenerator::ContextSlotOperandCheckExtensions(Slot* slot,
- Result tmp,
- JumpTarget* slow) {
- ASSERT(slot->type() == Slot::CONTEXT);
- ASSERT(tmp.is_register());
- Register context = esi;
-
- for (Scope* s = scope(); s != slot->var()->scope(); s = s->outer_scope()) {
- if (s->num_heap_slots() > 0) {
- if (s->calls_eval()) {
- // Check that extension is NULL.
- __ cmp(ContextOperand(context, Context::EXTENSION_INDEX),
- Immediate(0));
- slow->Branch(not_equal, not_taken);
- }
- __ mov(tmp.reg(), ContextOperand(context, Context::CLOSURE_INDEX));
- __ mov(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
- context = tmp.reg();
- }
- }
- // Check that last extension is NULL.
- __ cmp(ContextOperand(context, Context::EXTENSION_INDEX), Immediate(0));
- slow->Branch(not_equal, not_taken);
- __ mov(tmp.reg(), ContextOperand(context, Context::FCONTEXT_INDEX));
- return ContextOperand(tmp.reg(), slot->index());
-}
-
-
-// Emit code to load the value of an expression to the top of the
-// frame. If the expression is boolean-valued it may be compiled (or
-// partially compiled) into control flow to the control destination.
-// If force_control is true, control flow is forced.
-void CodeGenerator::LoadCondition(Expression* expr,
- ControlDestination* dest,
- bool force_control) {
- ASSERT(!in_spilled_code());
- int original_height = frame_->height();
-
- { CodeGenState new_state(this, dest);
- Visit(expr);
-
- // If we hit a stack overflow, we may not have actually visited
- // the expression. In that case, we ensure that we have a
- // valid-looking frame state because we will continue to generate
- // code as we unwind the C++ stack.
- //
- // It's possible to have both a stack overflow and a valid frame
- // state (eg, a subexpression overflowed, visiting it returned
- // with a dummied frame state, and visiting this expression
- // returned with a normal-looking state).
- if (HasStackOverflow() &&
- !dest->is_used() &&
- frame_->height() == original_height) {
- dest->Goto(true);
- }
- }
-
- if (force_control && !dest->is_used()) {
- // Convert the TOS value into flow to the control destination.
- ToBoolean(dest);
- }
-
- ASSERT(!(force_control && !dest->is_used()));
- ASSERT(dest->is_used() || frame_->height() == original_height + 1);
-}
-
-
-void CodeGenerator::LoadAndSpill(Expression* expression) {
- ASSERT(in_spilled_code());
- set_in_spilled_code(false);
- Load(expression);
- frame_->SpillAll();
- set_in_spilled_code(true);
-}
-
-
-void CodeGenerator::LoadInSafeInt32Mode(Expression* expr,
- BreakTarget* unsafe_bailout) {
- set_unsafe_bailout(unsafe_bailout);
- set_in_safe_int32_mode(true);
- Load(expr);
- Result value = frame_->Pop();
- ASSERT(frame_->HasNoUntaggedInt32Elements());
- if (expr->GuaranteedSmiResult()) {
- ConvertInt32ResultToSmi(&value);
- } else {
- ConvertInt32ResultToNumber(&value);
- }
- set_in_safe_int32_mode(false);
- set_unsafe_bailout(NULL);
- frame_->Push(&value);
-}
-
-
-void CodeGenerator::LoadWithSafeInt32ModeDisabled(Expression* expr) {
- set_safe_int32_mode_enabled(false);
- Load(expr);
- set_safe_int32_mode_enabled(true);
-}
-
-
-void CodeGenerator::ConvertInt32ResultToSmi(Result* value) {
- ASSERT(value->is_untagged_int32());
- if (value->is_register()) {
- __ add(value->reg(), Operand(value->reg()));
- } else {
- ASSERT(value->is_constant());
- ASSERT(value->handle()->IsSmi());
- }
- value->set_untagged_int32(false);
- value->set_type_info(TypeInfo::Smi());
-}
-
-
-void CodeGenerator::ConvertInt32ResultToNumber(Result* value) {
- ASSERT(value->is_untagged_int32());
- if (value->is_register()) {
- Register val = value->reg();
- JumpTarget done;
- __ add(val, Operand(val));
- done.Branch(no_overflow, value);
- __ sar(val, 1);
- // If there was an overflow, bits 30 and 31 of the original number disagree.
- __ xor_(val, 0x80000000u);
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope fscope(SSE2);
- __ cvtsi2sd(xmm0, Operand(val));
- } else {
- // Move val to ST[0] in the FPU
- // Push and pop are safe with respect to the virtual frame because
- // all synced elements are below the actual stack pointer.
- __ push(val);
- __ fild_s(Operand(esp, 0));
- __ pop(val);
- }
- Result scratch = allocator_->Allocate();
- ASSERT(scratch.is_register());
- Label allocation_failed;
- __ AllocateHeapNumber(val, scratch.reg(),
- no_reg, &allocation_failed);
- VirtualFrame* clone = new VirtualFrame(frame_);
- scratch.Unuse();
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope fscope(SSE2);
- __ movdbl(FieldOperand(val, HeapNumber::kValueOffset), xmm0);
- } else {
- __ fstp_d(FieldOperand(val, HeapNumber::kValueOffset));
- }
- done.Jump(value);
-
- // Establish the virtual frame, cloned from where AllocateHeapNumber
- // jumped to allocation_failed.
- RegisterFile empty_regs;
- SetFrame(clone, &empty_regs);
- __ bind(&allocation_failed);
- if (!CpuFeatures::IsSupported(SSE2)) {
- // Pop the value from the floating point stack.
- __ fstp(0);
- }
- unsafe_bailout_->Jump();
-
- done.Bind(value);
- } else {
- ASSERT(value->is_constant());
- }
- value->set_untagged_int32(false);
- value->set_type_info(TypeInfo::Integer32());
-}
-
-
-void CodeGenerator::Load(Expression* expr) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- ASSERT(!in_spilled_code());
-
- // If the expression should be a side-effect-free 32-bit int computation,
- // compile that SafeInt32 path, and a bailout path.
- if (!in_safe_int32_mode() &&
- safe_int32_mode_enabled() &&
- expr->side_effect_free() &&
- expr->num_bit_ops() > 2 &&
- CpuFeatures::IsSupported(SSE2)) {
- BreakTarget unsafe_bailout;
- JumpTarget done;
- unsafe_bailout.set_expected_height(frame_->height());
- LoadInSafeInt32Mode(expr, &unsafe_bailout);
- done.Jump();
-
- if (unsafe_bailout.is_linked()) {
- unsafe_bailout.Bind();
- LoadWithSafeInt32ModeDisabled(expr);
- }
- done.Bind();
- } else {
- JumpTarget true_target;
- JumpTarget false_target;
- ControlDestination dest(&true_target, &false_target, true);
- LoadCondition(expr, &dest, false);
-
- if (dest.false_was_fall_through()) {
- // The false target was just bound.
- JumpTarget loaded;
- frame_->Push(FACTORY->false_value());
- // There may be dangling jumps to the true target.
- if (true_target.is_linked()) {
- loaded.Jump();
- true_target.Bind();
- frame_->Push(FACTORY->true_value());
- loaded.Bind();
- }
-
- } else if (dest.is_used()) {
- // There is true, and possibly false, control flow (with true as
- // the fall through).
- JumpTarget loaded;
- frame_->Push(FACTORY->true_value());
- if (false_target.is_linked()) {
- loaded.Jump();
- false_target.Bind();
- frame_->Push(FACTORY->false_value());
- loaded.Bind();
- }
-
- } else {
- // We have a valid value on top of the frame, but we still may
- // have dangling jumps to the true and false targets from nested
- // subexpressions (eg, the left subexpressions of the
- // short-circuited boolean operators).
- ASSERT(has_valid_frame());
- if (true_target.is_linked() || false_target.is_linked()) {
- JumpTarget loaded;
- loaded.Jump(); // Don't lose the current TOS.
- if (true_target.is_linked()) {
- true_target.Bind();
- frame_->Push(FACTORY->true_value());
- if (false_target.is_linked()) {
- loaded.Jump();
- }
- }
- if (false_target.is_linked()) {
- false_target.Bind();
- frame_->Push(FACTORY->false_value());
- }
- loaded.Bind();
- }
- }
- }
- ASSERT(has_valid_frame());
- ASSERT(frame_->height() == original_height + 1);
-}
-
-
-void CodeGenerator::LoadGlobal() {
- if (in_spilled_code()) {
- frame_->EmitPush(GlobalObjectOperand());
- } else {
- Result temp = allocator_->Allocate();
- __ mov(temp.reg(), GlobalObjectOperand());
- frame_->Push(&temp);
- }
-}
-
-
-void CodeGenerator::LoadGlobalReceiver() {
- Result temp = allocator_->Allocate();
- Register reg = temp.reg();
- __ mov(reg, GlobalObjectOperand());
- __ mov(reg, FieldOperand(reg, GlobalObject::kGlobalReceiverOffset));
- frame_->Push(&temp);
-}
-
-
-void CodeGenerator::LoadTypeofExpression(Expression* expr) {
- // Special handling of identifiers as subexpressions of typeof.
- Variable* variable = expr->AsVariableProxy()->AsVariable();
- if (variable != NULL && !variable->is_this() && variable->is_global()) {
- // For a global variable we build the property reference
- // <global>.<variable> and perform a (regular non-contextual) property
- // load to make sure we do not get reference errors.
- Slot global(variable, Slot::CONTEXT, Context::GLOBAL_INDEX);
- Literal key(variable->name());
- Property property(&global, &key, RelocInfo::kNoPosition);
- Reference ref(this, &property);
- ref.GetValue();
- } else if (variable != NULL && variable->AsSlot() != NULL) {
- // For a variable that rewrites to a slot, we signal it is the immediate
- // subexpression of a typeof.
- LoadFromSlotCheckForArguments(variable->AsSlot(), INSIDE_TYPEOF);
- } else {
- // Anything else can be handled normally.
- Load(expr);
- }
-}
-
-
-ArgumentsAllocationMode CodeGenerator::ArgumentsMode() {
- if (scope()->arguments() == NULL) return NO_ARGUMENTS_ALLOCATION;
-
- // In strict mode there is no need for shadow arguments.
- ASSERT(scope()->arguments_shadow() != NULL || scope()->is_strict_mode());
-
- // We don't want to do lazy arguments allocation for functions that
- // have heap-allocated contexts, because it interfers with the
- // uninitialized const tracking in the context objects.
- return (scope()->num_heap_slots() > 0 || scope()->is_strict_mode())
- ? EAGER_ARGUMENTS_ALLOCATION
- : LAZY_ARGUMENTS_ALLOCATION;
-}
-
-
-Result CodeGenerator::StoreArgumentsObject(bool initial) {
- ArgumentsAllocationMode mode = ArgumentsMode();
- ASSERT(mode != NO_ARGUMENTS_ALLOCATION);
-
- Comment cmnt(masm_, "[ store arguments object");
- if (mode == LAZY_ARGUMENTS_ALLOCATION && initial) {
- // When using lazy arguments allocation, we store the arguments marker value
- // as a sentinel indicating that the arguments object hasn't been
- // allocated yet.
- frame_->Push(FACTORY->arguments_marker());
- } else {
- ArgumentsAccessStub stub(is_strict_mode()
- ? ArgumentsAccessStub::NEW_STRICT
- : ArgumentsAccessStub::NEW_NON_STRICT);
- frame_->PushFunction();
- frame_->PushReceiverSlotAddress();
- frame_->Push(Smi::FromInt(scope()->num_parameters()));
- Result result = frame_->CallStub(&stub, 3);
- frame_->Push(&result);
- }
-
- Variable* arguments = scope()->arguments();
- Variable* shadow = scope()->arguments_shadow();
-
- ASSERT(arguments != NULL && arguments->AsSlot() != NULL);
- ASSERT((shadow != NULL && shadow->AsSlot() != NULL) ||
- scope()->is_strict_mode());
-
- JumpTarget done;
- bool skip_arguments = false;
- if (mode == LAZY_ARGUMENTS_ALLOCATION && !initial) {
- // We have to skip storing into the arguments slot if it has
- // already been written to. This can happen if the a function
- // has a local variable named 'arguments'.
- LoadFromSlot(arguments->AsSlot(), NOT_INSIDE_TYPEOF);
- Result probe = frame_->Pop();
- if (probe.is_constant()) {
- // We have to skip updating the arguments object if it has
- // been assigned a proper value.
- skip_arguments = !probe.handle()->IsArgumentsMarker();
- } else {
- __ cmp(Operand(probe.reg()), Immediate(FACTORY->arguments_marker()));
- probe.Unuse();
- done.Branch(not_equal);
- }
- }
- if (!skip_arguments) {
- StoreToSlot(arguments->AsSlot(), NOT_CONST_INIT);
- if (mode == LAZY_ARGUMENTS_ALLOCATION) done.Bind();
- }
- if (shadow != NULL) {
- StoreToSlot(shadow->AsSlot(), NOT_CONST_INIT);
- }
- return frame_->Pop();
-}
-
-//------------------------------------------------------------------------------
-// CodeGenerator implementation of variables, lookups, and stores.
-
-Reference::Reference(CodeGenerator* cgen,
- Expression* expression,
- bool persist_after_get)
- : cgen_(cgen),
- expression_(expression),
- type_(ILLEGAL),
- persist_after_get_(persist_after_get) {
- cgen->LoadReference(this);
-}
-
-
-Reference::~Reference() {
- ASSERT(is_unloaded() || is_illegal());
-}
-
-
-void CodeGenerator::LoadReference(Reference* ref) {
- // References are loaded from both spilled and unspilled code. Set the
- // state to unspilled to allow that (and explicitly spill after
- // construction at the construction sites).
- bool was_in_spilled_code = in_spilled_code_;
- in_spilled_code_ = false;
-
- Comment cmnt(masm_, "[ LoadReference");
- Expression* e = ref->expression();
- Property* property = e->AsProperty();
- Variable* var = e->AsVariableProxy()->AsVariable();
-
- if (property != NULL) {
- // The expression is either a property or a variable proxy that rewrites
- // to a property.
- Load(property->obj());
- if (property->key()->IsPropertyName()) {
- ref->set_type(Reference::NAMED);
- } else {
- Load(property->key());
- ref->set_type(Reference::KEYED);
- }
- } else if (var != NULL) {
- // The expression is a variable proxy that does not rewrite to a
- // property. Global variables are treated as named property references.
- if (var->is_global()) {
- // If eax is free, the register allocator prefers it. Thus the code
- // generator will load the global object into eax, which is where
- // LoadIC wants it. Most uses of Reference call LoadIC directly
- // after the reference is created.
- frame_->Spill(eax);
- LoadGlobal();
- ref->set_type(Reference::NAMED);
- } else {
- ASSERT(var->AsSlot() != NULL);
- ref->set_type(Reference::SLOT);
- }
- } else {
- // Anything else is a runtime error.
- Load(e);
- frame_->CallRuntime(Runtime::kThrowReferenceError, 1);
- }
-
- in_spilled_code_ = was_in_spilled_code;
-}
-
-
-// ECMA-262, section 9.2, page 30: ToBoolean(). Pop the top of stack and
-// convert it to a boolean in the condition code register or jump to
-// 'false_target'/'true_target' as appropriate.
-void CodeGenerator::ToBoolean(ControlDestination* dest) {
- Comment cmnt(masm_, "[ ToBoolean");
-
- // The value to convert should be popped from the frame.
- Result value = frame_->Pop();
- value.ToRegister();
-
- if (value.is_integer32()) { // Also takes Smi case.
- Comment cmnt(masm_, "ONLY_INTEGER_32");
- if (FLAG_debug_code) {
- Label ok;
- __ AbortIfNotNumber(value.reg());
- __ test(value.reg(), Immediate(kSmiTagMask));
- __ j(zero, &ok);
- __ fldz();
- __ fld_d(FieldOperand(value.reg(), HeapNumber::kValueOffset));
- __ FCmp();
- __ j(not_zero, &ok);
- __ Abort("Smi was wrapped in HeapNumber in output from bitop");
- __ bind(&ok);
- }
- // In the integer32 case there are no Smis hidden in heap numbers, so we
- // need only test for Smi zero.
- __ test(value.reg(), Operand(value.reg()));
- dest->false_target()->Branch(zero);
- value.Unuse();
- dest->Split(not_zero);
- } else if (value.is_number()) {
- Comment cmnt(masm_, "ONLY_NUMBER");
- // Fast case if TypeInfo indicates only numbers.
- if (FLAG_debug_code) {
- __ AbortIfNotNumber(value.reg());
- }
- // Smi => false iff zero.
- STATIC_ASSERT(kSmiTag == 0);
- __ test(value.reg(), Operand(value.reg()));
- dest->false_target()->Branch(zero);
- __ test(value.reg(), Immediate(kSmiTagMask));
- dest->true_target()->Branch(zero);
- __ fldz();
- __ fld_d(FieldOperand(value.reg(), HeapNumber::kValueOffset));
- __ FCmp();
- value.Unuse();
- dest->Split(not_zero);
- } else {
- // Fast case checks.
- // 'false' => false.
- __ cmp(value.reg(), FACTORY->false_value());
- dest->false_target()->Branch(equal);
-
- // 'true' => true.
- __ cmp(value.reg(), FACTORY->true_value());
- dest->true_target()->Branch(equal);
-
- // 'undefined' => false.
- __ cmp(value.reg(), FACTORY->undefined_value());
- dest->false_target()->Branch(equal);
-
- // Smi => false iff zero.
- STATIC_ASSERT(kSmiTag == 0);
- __ test(value.reg(), Operand(value.reg()));
- dest->false_target()->Branch(zero);
- __ test(value.reg(), Immediate(kSmiTagMask));
- dest->true_target()->Branch(zero);
-
- // Call the stub for all other cases.
- frame_->Push(&value); // Undo the Pop() from above.
- ToBooleanStub stub;
- Result temp = frame_->CallStub(&stub, 1);
- // Convert the result to a condition code.
- __ test(temp.reg(), Operand(temp.reg()));
- temp.Unuse();
- dest->Split(not_equal);
- }
-}
-
-
-// Perform or call the specialized stub for a binary operation. Requires the
-// three registers left, right and dst to be distinct and spilled. This
-// deferred operation has up to three entry points: The main one calls the
-// runtime system. The second is for when the result is a non-Smi. The
-// third is for when at least one of the inputs is non-Smi and we have SSE2.
-class DeferredInlineBinaryOperation: public DeferredCode {
- public:
- DeferredInlineBinaryOperation(Token::Value op,
- Register dst,
- Register left,
- Register right,
- TypeInfo left_info,
- TypeInfo right_info,
- OverwriteMode mode)
- : op_(op), dst_(dst), left_(left), right_(right),
- left_info_(left_info), right_info_(right_info), mode_(mode) {
- set_comment("[ DeferredInlineBinaryOperation");
- ASSERT(!left.is(right));
- }
-
- virtual void Generate();
-
- // This stub makes explicit calls to SaveRegisters(), RestoreRegisters() and
- // Exit().
- virtual bool AutoSaveAndRestore() { return false; }
-
- void JumpToAnswerOutOfRange(Condition cond);
- void JumpToConstantRhs(Condition cond, Smi* smi_value);
- Label* NonSmiInputLabel();
-
- private:
- void GenerateAnswerOutOfRange();
- void GenerateNonSmiInput();
-
- Token::Value op_;
- Register dst_;
- Register left_;
- Register right_;
- TypeInfo left_info_;
- TypeInfo right_info_;
- OverwriteMode mode_;
- Label answer_out_of_range_;
- Label non_smi_input_;
- Label constant_rhs_;
- Smi* smi_value_;
-};
-
-
-Label* DeferredInlineBinaryOperation::NonSmiInputLabel() {
- if (Token::IsBitOp(op_) &&
- CpuFeatures::IsSupported(SSE2)) {
- return &non_smi_input_;
- } else {
- return entry_label();
- }
-}
-
-
-void DeferredInlineBinaryOperation::JumpToAnswerOutOfRange(Condition cond) {
- __ j(cond, &answer_out_of_range_);
-}
-
-
-void DeferredInlineBinaryOperation::JumpToConstantRhs(Condition cond,
- Smi* smi_value) {
- smi_value_ = smi_value;
- __ j(cond, &constant_rhs_);
-}
-
-
-void DeferredInlineBinaryOperation::Generate() {
- // Registers are not saved implicitly for this stub, so we should not
- // tread on the registers that were not passed to us.
- if (CpuFeatures::IsSupported(SSE2) &&
- ((op_ == Token::ADD) ||
- (op_ == Token::SUB) ||
- (op_ == Token::MUL) ||
- (op_ == Token::DIV))) {
- CpuFeatures::Scope use_sse2(SSE2);
- Label call_runtime, after_alloc_failure;
- Label left_smi, right_smi, load_right, do_op;
- if (!left_info_.IsSmi()) {
- __ test(left_, Immediate(kSmiTagMask));
- __ j(zero, &left_smi);
- if (!left_info_.IsNumber()) {
- __ cmp(FieldOperand(left_, HeapObject::kMapOffset),
- FACTORY->heap_number_map());
- __ j(not_equal, &call_runtime);
- }
- __ movdbl(xmm0, FieldOperand(left_, HeapNumber::kValueOffset));
- if (mode_ == OVERWRITE_LEFT) {
- __ mov(dst_, left_);
- }
- __ jmp(&load_right);
-
- __ bind(&left_smi);
- } else {
- if (FLAG_debug_code) __ AbortIfNotSmi(left_);
- }
- __ SmiUntag(left_);
- __ cvtsi2sd(xmm0, Operand(left_));
- __ SmiTag(left_);
- if (mode_ == OVERWRITE_LEFT) {
- Label alloc_failure;
- __ push(left_);
- __ AllocateHeapNumber(dst_, left_, no_reg, &after_alloc_failure);
- __ pop(left_);
- }
-
- __ bind(&load_right);
- if (!right_info_.IsSmi()) {
- __ test(right_, Immediate(kSmiTagMask));
- __ j(zero, &right_smi);
- if (!right_info_.IsNumber()) {
- __ cmp(FieldOperand(right_, HeapObject::kMapOffset),
- FACTORY->heap_number_map());
- __ j(not_equal, &call_runtime);
- }
- __ movdbl(xmm1, FieldOperand(right_, HeapNumber::kValueOffset));
- if (mode_ == OVERWRITE_RIGHT) {
- __ mov(dst_, right_);
- } else if (mode_ == NO_OVERWRITE) {
- Label alloc_failure;
- __ push(left_);
- __ AllocateHeapNumber(dst_, left_, no_reg, &after_alloc_failure);
- __ pop(left_);
- }
- __ jmp(&do_op);
-
- __ bind(&right_smi);
- } else {
- if (FLAG_debug_code) __ AbortIfNotSmi(right_);
- }
- __ SmiUntag(right_);
- __ cvtsi2sd(xmm1, Operand(right_));
- __ SmiTag(right_);
- if (mode_ == OVERWRITE_RIGHT || mode_ == NO_OVERWRITE) {
- __ push(left_);
- __ AllocateHeapNumber(dst_, left_, no_reg, &after_alloc_failure);
- __ pop(left_);
- }
-
- __ bind(&do_op);
- switch (op_) {
- case Token::ADD: __ addsd(xmm0, xmm1); break;
- case Token::SUB: __ subsd(xmm0, xmm1); break;
- case Token::MUL: __ mulsd(xmm0, xmm1); break;
- case Token::DIV: __ divsd(xmm0, xmm1); break;
- default: UNREACHABLE();
- }
- __ movdbl(FieldOperand(dst_, HeapNumber::kValueOffset), xmm0);
- Exit();
-
-
- __ bind(&after_alloc_failure);
- __ pop(left_);
- __ bind(&call_runtime);
- }
- // Register spilling is not done implicitly for this stub.
- // We can't postpone it any more now though.
- SaveRegisters();
-
- GenericBinaryOpStub stub(op_,
- mode_,
- NO_SMI_CODE_IN_STUB,
- TypeInfo::Combine(left_info_, right_info_));
- stub.GenerateCall(masm_, left_, right_);
- if (!dst_.is(eax)) __ mov(dst_, eax);
- RestoreRegisters();
- Exit();
-
- if (non_smi_input_.is_linked() || constant_rhs_.is_linked()) {
- GenerateNonSmiInput();
- }
- if (answer_out_of_range_.is_linked()) {
- GenerateAnswerOutOfRange();
- }
-}
-
-
-void DeferredInlineBinaryOperation::GenerateNonSmiInput() {
- // We know at least one of the inputs was not a Smi.
- // This is a third entry point into the deferred code.
- // We may not overwrite left_ because we want to be able
- // to call the handling code for non-smi answer and it
- // might want to overwrite the heap number in left_.
- ASSERT(!right_.is(dst_));
- ASSERT(!left_.is(dst_));
- ASSERT(!left_.is(right_));
- // This entry point is used for bit ops where the right hand side
- // is a constant Smi and the left hand side is a heap object. It
- // is also used for bit ops where both sides are unknown, but where
- // at least one of them is a heap object.
- bool rhs_is_constant = constant_rhs_.is_linked();
- // We can't generate code for both cases.
- ASSERT(!non_smi_input_.is_linked() || !constant_rhs_.is_linked());
-
- if (FLAG_debug_code) {
- __ int3(); // We don't fall through into this code.
- }
-
- __ bind(&non_smi_input_);
-
- if (rhs_is_constant) {
- __ bind(&constant_rhs_);
- // In this case the input is a heap object and it is in the dst_ register.
- // The left_ and right_ registers have not been initialized yet.
- __ mov(right_, Immediate(smi_value_));
- __ mov(left_, Operand(dst_));
- if (!CpuFeatures::IsSupported(SSE2)) {
- __ jmp(entry_label());
- return;
- } else {
- CpuFeatures::Scope use_sse2(SSE2);
- __ JumpIfNotNumber(dst_, left_info_, entry_label());
- __ ConvertToInt32(dst_, left_, dst_, left_info_, entry_label());
- __ SmiUntag(right_);
- }
- } else {
- // We know we have SSE2 here because otherwise the label is not linked (see
- // NonSmiInputLabel).
- CpuFeatures::Scope use_sse2(SSE2);
- // Handle the non-constant right hand side situation:
- if (left_info_.IsSmi()) {
- // Right is a heap object.
- __ JumpIfNotNumber(right_, right_info_, entry_label());
- __ ConvertToInt32(right_, right_, dst_, right_info_, entry_label());
- __ mov(dst_, Operand(left_));
- __ SmiUntag(dst_);
- } else if (right_info_.IsSmi()) {
- // Left is a heap object.
- __ JumpIfNotNumber(left_, left_info_, entry_label());
- __ ConvertToInt32(dst_, left_, dst_, left_info_, entry_label());
- __ SmiUntag(right_);
- } else {
- // Here we don't know if it's one or both that is a heap object.
- Label only_right_is_heap_object, got_both;
- __ mov(dst_, Operand(left_));
- __ SmiUntag(dst_, &only_right_is_heap_object);
- // Left was a heap object.
- __ JumpIfNotNumber(left_, left_info_, entry_label());
- __ ConvertToInt32(dst_, left_, dst_, left_info_, entry_label());
- __ SmiUntag(right_, &got_both);
- // Both were heap objects.
- __ rcl(right_, 1); // Put tag back.
- __ JumpIfNotNumber(right_, right_info_, entry_label());
- __ ConvertToInt32(right_, right_, no_reg, right_info_, entry_label());
- __ jmp(&got_both);
- __ bind(&only_right_is_heap_object);
- __ JumpIfNotNumber(right_, right_info_, entry_label());
- __ ConvertToInt32(right_, right_, no_reg, right_info_, entry_label());
- __ bind(&got_both);
- }
- }
- ASSERT(op_ == Token::BIT_AND ||
- op_ == Token::BIT_OR ||
- op_ == Token::BIT_XOR ||
- right_.is(ecx));
- switch (op_) {
- case Token::BIT_AND: __ and_(dst_, Operand(right_)); break;
- case Token::BIT_OR: __ or_(dst_, Operand(right_)); break;
- case Token::BIT_XOR: __ xor_(dst_, Operand(right_)); break;
- case Token::SHR: __ shr_cl(dst_); break;
- case Token::SAR: __ sar_cl(dst_); break;
- case Token::SHL: __ shl_cl(dst_); break;
- default: UNREACHABLE();
- }
- if (op_ == Token::SHR) {
- // Check that the *unsigned* result fits in a smi. Neither of
- // the two high-order bits can be set:
- // * 0x80000000: high bit would be lost when smi tagging.
- // * 0x40000000: this number would convert to negative when smi
- // tagging.
- __ test(dst_, Immediate(0xc0000000));
- __ j(not_zero, &answer_out_of_range_);
- } else {
- // Check that the *signed* result fits in a smi.
- __ cmp(dst_, 0xc0000000);
- __ j(negative, &answer_out_of_range_);
- }
- __ SmiTag(dst_);
- Exit();
-}
-
-
-void DeferredInlineBinaryOperation::GenerateAnswerOutOfRange() {
- Label after_alloc_failure2;
- Label allocation_ok;
- __ bind(&after_alloc_failure2);
- // We have to allocate a number, causing a GC, while keeping hold of
- // the answer in dst_. The answer is not a Smi. We can't just call the
- // runtime shift function here because we already threw away the inputs.
- __ xor_(left_, Operand(left_));
- __ shl(dst_, 1); // Put top bit in carry flag and Smi tag the low bits.
- __ rcr(left_, 1); // Rotate with carry.
- __ push(dst_); // Smi tagged low 31 bits.
- __ push(left_); // 0 or 0x80000000, which is Smi tagged in both cases.
- __ CallRuntime(Runtime::kNumberAlloc, 0);
- if (!left_.is(eax)) {
- __ mov(left_, eax);
- }
- __ pop(right_); // High bit.
- __ pop(dst_); // Low 31 bits.
- __ shr(dst_, 1); // Put 0 in top bit.
- __ or_(dst_, Operand(right_));
- __ jmp(&allocation_ok);
-
- // This is the second entry point to the deferred code. It is used only by
- // the bit operations.
- // The dst_ register has the answer. It is not Smi tagged. If mode_ is
- // OVERWRITE_LEFT then left_ must contain either an overwritable heap number
- // or a Smi.
- // Put a heap number pointer in left_.
- __ bind(&answer_out_of_range_);
- SaveRegisters();
- if (mode_ == OVERWRITE_LEFT) {
- __ test(left_, Immediate(kSmiTagMask));
- __ j(not_zero, &allocation_ok);
- }
- // This trashes right_.
- __ AllocateHeapNumber(left_, right_, no_reg, &after_alloc_failure2);
- __ bind(&allocation_ok);
- if (CpuFeatures::IsSupported(SSE2) &&
- op_ != Token::SHR) {
- CpuFeatures::Scope use_sse2(SSE2);
- ASSERT(Token::IsBitOp(op_));
- // Signed conversion.
- __ cvtsi2sd(xmm0, Operand(dst_));
- __ movdbl(FieldOperand(left_, HeapNumber::kValueOffset), xmm0);
- } else {
- if (op_ == Token::SHR) {
- __ push(Immediate(0)); // High word of unsigned value.
- __ push(dst_);
- __ fild_d(Operand(esp, 0));
- __ Drop(2);
- } else {
- ASSERT(Token::IsBitOp(op_));
- __ push(dst_);
- __ fild_s(Operand(esp, 0)); // Signed conversion.
- __ pop(dst_);
- }
- __ fstp_d(FieldOperand(left_, HeapNumber::kValueOffset));
- }
- __ mov(dst_, left_);
- RestoreRegisters();
- Exit();
-}
-
-
-static TypeInfo CalculateTypeInfo(TypeInfo operands_type,
- Token::Value op,
- const Result& right,
- const Result& left) {
- // Set TypeInfo of result according to the operation performed.
- // Rely on the fact that smis have a 31 bit payload on ia32.
- STATIC_ASSERT(kSmiValueSize == 31);
- switch (op) {
- case Token::COMMA:
- return right.type_info();
- case Token::OR:
- case Token::AND:
- // Result type can be either of the two input types.
- return operands_type;
- case Token::BIT_AND: {
- // Anding with positive Smis will give you a Smi.
- if (right.is_constant() && right.handle()->IsSmi() &&
- Smi::cast(*right.handle())->value() >= 0) {
- return TypeInfo::Smi();
- } else if (left.is_constant() && left.handle()->IsSmi() &&
- Smi::cast(*left.handle())->value() >= 0) {
- return TypeInfo::Smi();
- }
- return (operands_type.IsSmi())
- ? TypeInfo::Smi()
- : TypeInfo::Integer32();
- }
- case Token::BIT_OR: {
- // Oring with negative Smis will give you a Smi.
- if (right.is_constant() && right.handle()->IsSmi() &&
- Smi::cast(*right.handle())->value() < 0) {
- return TypeInfo::Smi();
- } else if (left.is_constant() && left.handle()->IsSmi() &&
- Smi::cast(*left.handle())->value() < 0) {
- return TypeInfo::Smi();
- }
- return (operands_type.IsSmi())
- ? TypeInfo::Smi()
- : TypeInfo::Integer32();
- }
- case Token::BIT_XOR:
- // Result is always a 32 bit integer. Smi property of inputs is preserved.
- return (operands_type.IsSmi())
- ? TypeInfo::Smi()
- : TypeInfo::Integer32();
- case Token::SAR:
- if (left.is_smi()) return TypeInfo::Smi();
- // Result is a smi if we shift by a constant >= 1, otherwise an integer32.
- // Shift amount is masked with 0x1F (ECMA standard 11.7.2).
- return (right.is_constant() && right.handle()->IsSmi()
- && (Smi::cast(*right.handle())->value() & 0x1F) >= 1)
- ? TypeInfo::Smi()
- : TypeInfo::Integer32();
- case Token::SHR:
- // Result is a smi if we shift by a constant >= 2, an integer32 if
- // we shift by 1, and an unsigned 32-bit integer if we shift by 0.
- if (right.is_constant() && right.handle()->IsSmi()) {
- int shift_amount = Smi::cast(*right.handle())->value() & 0x1F;
- if (shift_amount > 1) {
- return TypeInfo::Smi();
- } else if (shift_amount > 0) {
- return TypeInfo::Integer32();
- }
- }
- return TypeInfo::Number();
- case Token::ADD:
- if (operands_type.IsSmi()) {
- // The Integer32 range is big enough to take the sum of any two Smis.
- return TypeInfo::Integer32();
- } else if (operands_type.IsNumber()) {
- return TypeInfo::Number();
- } else if (left.type_info().IsString() || right.type_info().IsString()) {
- return TypeInfo::String();
- } else {
- return TypeInfo::Unknown();
- }
- case Token::SHL:
- return TypeInfo::Integer32();
- case Token::SUB:
- // The Integer32 range is big enough to take the difference of any two
- // Smis.
- return (operands_type.IsSmi()) ?
- TypeInfo::Integer32() :
- TypeInfo::Number();
- case Token::MUL:
- case Token::DIV:
- case Token::MOD:
- // Result is always a number.
- return TypeInfo::Number();
- default:
- UNREACHABLE();
- }
- UNREACHABLE();
- return TypeInfo::Unknown();
-}
-
-
-void CodeGenerator::GenericBinaryOperation(BinaryOperation* expr,
- OverwriteMode overwrite_mode) {
- Comment cmnt(masm_, "[ BinaryOperation");
- Token::Value op = expr->op();
- Comment cmnt_token(masm_, Token::String(op));
-
- if (op == Token::COMMA) {
- // Simply discard left value.
- frame_->Nip(1);
- return;
- }
-
- Result right = frame_->Pop();
- Result left = frame_->Pop();
-
- if (op == Token::ADD) {
- const bool left_is_string = left.type_info().IsString();
- const bool right_is_string = right.type_info().IsString();
- // Make sure constant strings have string type info.
- ASSERT(!(left.is_constant() && left.handle()->IsString()) ||
- left_is_string);
- ASSERT(!(right.is_constant() && right.handle()->IsString()) ||
- right_is_string);
- if (left_is_string || right_is_string) {
- frame_->Push(&left);
- frame_->Push(&right);
- Result answer;
- if (left_is_string) {
- if (right_is_string) {
- StringAddStub stub(NO_STRING_CHECK_IN_STUB);
- answer = frame_->CallStub(&stub, 2);
- } else {
- StringAddStub stub(NO_STRING_CHECK_LEFT_IN_STUB);
- answer = frame_->CallStub(&stub, 2);
- }
- } else if (right_is_string) {
- StringAddStub stub(NO_STRING_CHECK_RIGHT_IN_STUB);
- answer = frame_->CallStub(&stub, 2);
- }
- answer.set_type_info(TypeInfo::String());
- frame_->Push(&answer);
- return;
- }
- // Neither operand is known to be a string.
- }
-
- bool left_is_smi_constant = left.is_constant() && left.handle()->IsSmi();
- bool left_is_non_smi_constant = left.is_constant() && !left.handle()->IsSmi();
- bool right_is_smi_constant = right.is_constant() && right.handle()->IsSmi();
- bool right_is_non_smi_constant =
- right.is_constant() && !right.handle()->IsSmi();
-
- if (left_is_smi_constant && right_is_smi_constant) {
- // Compute the constant result at compile time, and leave it on the frame.
- int left_int = Smi::cast(*left.handle())->value();
- int right_int = Smi::cast(*right.handle())->value();
- if (FoldConstantSmis(op, left_int, right_int)) return;
- }
-
- // Get number type of left and right sub-expressions.
- TypeInfo operands_type =
- TypeInfo::Combine(left.type_info(), right.type_info());
-
- TypeInfo result_type = CalculateTypeInfo(operands_type, op, right, left);
-
- Result answer;
- if (left_is_non_smi_constant || right_is_non_smi_constant) {
- // Go straight to the slow case, with no smi code.
- GenericBinaryOpStub stub(op,
- overwrite_mode,
- NO_SMI_CODE_IN_STUB,
- operands_type);
- answer = GenerateGenericBinaryOpStubCall(&stub, &left, &right);
- } else if (right_is_smi_constant) {
- answer = ConstantSmiBinaryOperation(expr, &left, right.handle(),
- false, overwrite_mode);
- } else if (left_is_smi_constant) {
- answer = ConstantSmiBinaryOperation(expr, &right, left.handle(),
- true, overwrite_mode);
- } else {
- // Set the flags based on the operation, type and loop nesting level.
- // Bit operations always assume they likely operate on Smis. Still only
- // generate the inline Smi check code if this operation is part of a loop.
- // For all other operations only inline the Smi check code for likely smis
- // if the operation is part of a loop.
- if (loop_nesting() > 0 &&
- (Token::IsBitOp(op) ||
- operands_type.IsInteger32() ||
- expr->type()->IsLikelySmi())) {
- answer = LikelySmiBinaryOperation(expr, &left, &right, overwrite_mode);
- } else {
- GenericBinaryOpStub stub(op,
- overwrite_mode,
- NO_GENERIC_BINARY_FLAGS,
- operands_type);
- answer = GenerateGenericBinaryOpStubCall(&stub, &left, &right);
- }
- }
-
- answer.set_type_info(result_type);
- frame_->Push(&answer);
-}
-
-
-Result CodeGenerator::GenerateGenericBinaryOpStubCall(GenericBinaryOpStub* stub,
- Result* left,
- Result* right) {
- if (stub->ArgsInRegistersSupported()) {
- stub->SetArgsInRegisters();
- return frame_->CallStub(stub, left, right);
- } else {
- frame_->Push(left);
- frame_->Push(right);
- return frame_->CallStub(stub, 2);
- }
-}
-
-
-bool CodeGenerator::FoldConstantSmis(Token::Value op, int left, int right) {
- Object* answer_object = HEAP->undefined_value();
- switch (op) {
- case Token::ADD:
- if (Smi::IsValid(left + right)) {
- answer_object = Smi::FromInt(left + right);
- }
- break;
- case Token::SUB:
- if (Smi::IsValid(left - right)) {
- answer_object = Smi::FromInt(left - right);
- }
- break;
- case Token::MUL: {
- double answer = static_cast<double>(left) * right;
- if (answer >= Smi::kMinValue && answer <= Smi::kMaxValue) {
- // If the product is zero and the non-zero factor is negative,
- // the spec requires us to return floating point negative zero.
- if (answer != 0 || (left >= 0 && right >= 0)) {
- answer_object = Smi::FromInt(static_cast<int>(answer));
- }
- }
- }
- break;
- case Token::DIV:
- case Token::MOD:
- break;
- case Token::BIT_OR:
- answer_object = Smi::FromInt(left | right);
- break;
- case Token::BIT_AND:
- answer_object = Smi::FromInt(left & right);
- break;
- case Token::BIT_XOR:
- answer_object = Smi::FromInt(left ^ right);
- break;
-
- case Token::SHL: {
- int shift_amount = right & 0x1F;
- if (Smi::IsValid(left << shift_amount)) {
- answer_object = Smi::FromInt(left << shift_amount);
- }
- break;
- }
- case Token::SHR: {
- int shift_amount = right & 0x1F;
- unsigned int unsigned_left = left;
- unsigned_left >>= shift_amount;
- if (unsigned_left <= static_cast<unsigned int>(Smi::kMaxValue)) {
- answer_object = Smi::FromInt(unsigned_left);
- }
- break;
- }
- case Token::SAR: {
- int shift_amount = right & 0x1F;
- unsigned int unsigned_left = left;
- if (left < 0) {
- // Perform arithmetic shift of a negative number by
- // complementing number, logical shifting, complementing again.
- unsigned_left = ~unsigned_left;
- unsigned_left >>= shift_amount;
- unsigned_left = ~unsigned_left;
- } else {
- unsigned_left >>= shift_amount;
- }
- ASSERT(Smi::IsValid(static_cast<int32_t>(unsigned_left)));
- answer_object = Smi::FromInt(static_cast<int32_t>(unsigned_left));
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
- if (answer_object->IsUndefined()) {
- return false;
- }
- frame_->Push(Handle<Object>(answer_object));
- return true;
-}
-
-
-void CodeGenerator::JumpIfBothSmiUsingTypeInfo(Result* left,
- Result* right,
- JumpTarget* both_smi) {
- TypeInfo left_info = left->type_info();
- TypeInfo right_info = right->type_info();
- if (left_info.IsDouble() || left_info.IsString() ||
- right_info.IsDouble() || right_info.IsString()) {
- // We know that left and right are not both smi. Don't do any tests.
- return;
- }
-
- if (left->reg().is(right->reg())) {
- if (!left_info.IsSmi()) {
- __ test(left->reg(), Immediate(kSmiTagMask));
- both_smi->Branch(zero);
- } else {
- if (FLAG_debug_code) __ AbortIfNotSmi(left->reg());
- left->Unuse();
- right->Unuse();
- both_smi->Jump();
- }
- } else if (!left_info.IsSmi()) {
- if (!right_info.IsSmi()) {
- Result temp = allocator_->Allocate();
- ASSERT(temp.is_valid());
- __ mov(temp.reg(), left->reg());
- __ or_(temp.reg(), Operand(right->reg()));
- __ test(temp.reg(), Immediate(kSmiTagMask));
- temp.Unuse();
- both_smi->Branch(zero);
- } else {
- __ test(left->reg(), Immediate(kSmiTagMask));
- both_smi->Branch(zero);
- }
- } else {
- if (FLAG_debug_code) __ AbortIfNotSmi(left->reg());
- if (!right_info.IsSmi()) {
- __ test(right->reg(), Immediate(kSmiTagMask));
- both_smi->Branch(zero);
- } else {
- if (FLAG_debug_code) __ AbortIfNotSmi(right->reg());
- left->Unuse();
- right->Unuse();
- both_smi->Jump();
- }
- }
-}
-
-
-void CodeGenerator::JumpIfNotBothSmiUsingTypeInfo(Register left,
- Register right,
- Register scratch,
- TypeInfo left_info,
- TypeInfo right_info,
- DeferredCode* deferred) {
- JumpIfNotBothSmiUsingTypeInfo(left,
- right,
- scratch,
- left_info,
- right_info,
- deferred->entry_label());
-}
-
-
-void CodeGenerator::JumpIfNotBothSmiUsingTypeInfo(Register left,
- Register right,
- Register scratch,
- TypeInfo left_info,
- TypeInfo right_info,
- Label* on_not_smi) {
- if (left.is(right)) {
- if (!left_info.IsSmi()) {
- __ test(left, Immediate(kSmiTagMask));
- __ j(not_zero, on_not_smi);
- } else {
- if (FLAG_debug_code) __ AbortIfNotSmi(left);
- }
- } else if (!left_info.IsSmi()) {
- if (!right_info.IsSmi()) {
- __ mov(scratch, left);
- __ or_(scratch, Operand(right));
- __ test(scratch, Immediate(kSmiTagMask));
- __ j(not_zero, on_not_smi);
- } else {
- __ test(left, Immediate(kSmiTagMask));
- __ j(not_zero, on_not_smi);
- if (FLAG_debug_code) __ AbortIfNotSmi(right);
- }
- } else {
- if (FLAG_debug_code) __ AbortIfNotSmi(left);
- if (!right_info.IsSmi()) {
- __ test(right, Immediate(kSmiTagMask));
- __ j(not_zero, on_not_smi);
- } else {
- if (FLAG_debug_code) __ AbortIfNotSmi(right);
- }
- }
-}
-
-
-// Implements a binary operation using a deferred code object and some
-// inline code to operate on smis quickly.
-Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
- Result* left,
- Result* right,
- OverwriteMode overwrite_mode) {
- // Copy the type info because left and right may be overwritten.
- TypeInfo left_type_info = left->type_info();
- TypeInfo right_type_info = right->type_info();
- Token::Value op = expr->op();
- Result answer;
- // Special handling of div and mod because they use fixed registers.
- if (op == Token::DIV || op == Token::MOD) {
- // We need eax as the quotient register, edx as the remainder
- // register, neither left nor right in eax or edx, and left copied
- // to eax.
- Result quotient;
- Result remainder;
- bool left_is_in_eax = false;
- // Step 1: get eax for quotient.
- if ((left->is_register() && left->reg().is(eax)) ||
- (right->is_register() && right->reg().is(eax))) {
- // One or both is in eax. Use a fresh non-edx register for
- // them.
- Result fresh = allocator_->Allocate();
- ASSERT(fresh.is_valid());
- if (fresh.reg().is(edx)) {
- remainder = fresh;
- fresh = allocator_->Allocate();
- ASSERT(fresh.is_valid());
- }
- if (left->is_register() && left->reg().is(eax)) {
- quotient = *left;
- *left = fresh;
- left_is_in_eax = true;
- }
- if (right->is_register() && right->reg().is(eax)) {
- quotient = *right;
- *right = fresh;
- }
- __ mov(fresh.reg(), eax);
- } else {
- // Neither left nor right is in eax.
- quotient = allocator_->Allocate(eax);
- }
- ASSERT(quotient.is_register() && quotient.reg().is(eax));
- ASSERT(!(left->is_register() && left->reg().is(eax)));
- ASSERT(!(right->is_register() && right->reg().is(eax)));
-
- // Step 2: get edx for remainder if necessary.
- if (!remainder.is_valid()) {
- if ((left->is_register() && left->reg().is(edx)) ||
- (right->is_register() && right->reg().is(edx))) {
- Result fresh = allocator_->Allocate();
- ASSERT(fresh.is_valid());
- if (left->is_register() && left->reg().is(edx)) {
- remainder = *left;
- *left = fresh;
- }
- if (right->is_register() && right->reg().is(edx)) {
- remainder = *right;
- *right = fresh;
- }
- __ mov(fresh.reg(), edx);
- } else {
- // Neither left nor right is in edx.
- remainder = allocator_->Allocate(edx);
- }
- }
- ASSERT(remainder.is_register() && remainder.reg().is(edx));
- ASSERT(!(left->is_register() && left->reg().is(edx)));
- ASSERT(!(right->is_register() && right->reg().is(edx)));
-
- left->ToRegister();
- right->ToRegister();
- frame_->Spill(eax);
- frame_->Spill(edx);
- // DeferredInlineBinaryOperation requires all the registers that it is
- // told about to be spilled and distinct.
- Result distinct_right = frame_->MakeDistinctAndSpilled(left, right);
-
- // Check that left and right are smi tagged.
- DeferredInlineBinaryOperation* deferred =
- new DeferredInlineBinaryOperation(op,
- (op == Token::DIV) ? eax : edx,
- left->reg(),
- distinct_right.reg(),
- left_type_info,
- right_type_info,
- overwrite_mode);
- JumpIfNotBothSmiUsingTypeInfo(left->reg(), right->reg(), edx,
- left_type_info, right_type_info, deferred);
- if (!left_is_in_eax) {
- __ mov(eax, left->reg());
- }
- // Sign extend eax into edx:eax.
- __ cdq();
- // Check for 0 divisor.
- __ test(right->reg(), Operand(right->reg()));
- deferred->Branch(zero);
- // Divide edx:eax by the right operand.
- __ idiv(right->reg());
-
- // Complete the operation.
- if (op == Token::DIV) {
- // Check for negative zero result. If result is zero, and divisor
- // is negative, return a floating point negative zero. The
- // virtual frame is unchanged in this block, so local control flow
- // can use a Label rather than a JumpTarget. If the context of this
- // expression will treat -0 like 0, do not do this test.
- if (!expr->no_negative_zero()) {
- Label non_zero_result;
- __ test(left->reg(), Operand(left->reg()));
- __ j(not_zero, &non_zero_result);
- __ test(right->reg(), Operand(right->reg()));
- deferred->Branch(negative);
- __ bind(&non_zero_result);
- }
- // Check for the corner case of dividing the most negative smi by
- // -1. We cannot use the overflow flag, since it is not set by
- // idiv instruction.
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
- __ cmp(eax, 0x40000000);
- deferred->Branch(equal);
- // Check that the remainder is zero.
- __ test(edx, Operand(edx));
- deferred->Branch(not_zero);
- // Tag the result and store it in the quotient register.
- __ SmiTag(eax);
- deferred->BindExit();
- left->Unuse();
- right->Unuse();
- answer = quotient;
- } else {
- ASSERT(op == Token::MOD);
- // Check for a negative zero result. If the result is zero, and
- // the dividend is negative, return a floating point negative
- // zero. The frame is unchanged in this block, so local control
- // flow can use a Label rather than a JumpTarget.
- if (!expr->no_negative_zero()) {
- Label non_zero_result;
- __ test(edx, Operand(edx));
- __ j(not_zero, &non_zero_result, taken);
- __ test(left->reg(), Operand(left->reg()));
- deferred->Branch(negative);
- __ bind(&non_zero_result);
- }
- deferred->BindExit();
- left->Unuse();
- right->Unuse();
- answer = remainder;
- }
- ASSERT(answer.is_valid());
- return answer;
- }
-
- // Special handling of shift operations because they use fixed
- // registers.
- if (op == Token::SHL || op == Token::SHR || op == Token::SAR) {
- // Move left out of ecx if necessary.
- if (left->is_register() && left->reg().is(ecx)) {
- *left = allocator_->Allocate();
- ASSERT(left->is_valid());
- __ mov(left->reg(), ecx);
- }
- right->ToRegister(ecx);
- left->ToRegister();
- ASSERT(left->is_register() && !left->reg().is(ecx));
- ASSERT(right->is_register() && right->reg().is(ecx));
- if (left_type_info.IsSmi()) {
- if (FLAG_debug_code) __ AbortIfNotSmi(left->reg());
- }
- if (right_type_info.IsSmi()) {
- if (FLAG_debug_code) __ AbortIfNotSmi(right->reg());
- }
-
- // We will modify right, it must be spilled.
- frame_->Spill(ecx);
- // DeferredInlineBinaryOperation requires all the registers that it is told
- // about to be spilled and distinct. We know that right is ecx and left is
- // not ecx.
- frame_->Spill(left->reg());
-
- // Use a fresh answer register to avoid spilling the left operand.
- answer = allocator_->Allocate();
- ASSERT(answer.is_valid());
-
- DeferredInlineBinaryOperation* deferred =
- new DeferredInlineBinaryOperation(op,
- answer.reg(),
- left->reg(),
- ecx,
- left_type_info,
- right_type_info,
- overwrite_mode);
- JumpIfNotBothSmiUsingTypeInfo(left->reg(), right->reg(), answer.reg(),
- left_type_info, right_type_info,
- deferred->NonSmiInputLabel());
-
- // Untag both operands.
- __ mov(answer.reg(), left->reg());
- __ SmiUntag(answer.reg());
- __ SmiUntag(right->reg()); // Right is ecx.
-
- // Perform the operation.
- ASSERT(right->reg().is(ecx));
- switch (op) {
- case Token::SAR: {
- __ sar_cl(answer.reg());
- if (!left_type_info.IsSmi()) {
- // Check that the *signed* result fits in a smi.
- __ cmp(answer.reg(), 0xc0000000);
- deferred->JumpToAnswerOutOfRange(negative);
- }
- break;
- }
- case Token::SHR: {
- __ shr_cl(answer.reg());
- // Check that the *unsigned* result fits in a smi. Neither of
- // the two high-order bits can be set:
- // * 0x80000000: high bit would be lost when smi tagging.
- // * 0x40000000: this number would convert to negative when smi
- // tagging.
- // These two cases can only happen with shifts by 0 or 1 when
- // handed a valid smi. If the answer cannot be represented by a
- // smi, restore the left and right arguments, and jump to slow
- // case. The low bit of the left argument may be lost, but only
- // in a case where it is dropped anyway.
- __ test(answer.reg(), Immediate(0xc0000000));
- deferred->JumpToAnswerOutOfRange(not_zero);
- break;
- }
- case Token::SHL: {
- __ shl_cl(answer.reg());
- // Check that the *signed* result fits in a smi.
- __ cmp(answer.reg(), 0xc0000000);
- deferred->JumpToAnswerOutOfRange(negative);
- break;
- }
- default:
- UNREACHABLE();
- }
- // Smi-tag the result in answer.
- __ SmiTag(answer.reg());
- deferred->BindExit();
- left->Unuse();
- right->Unuse();
- ASSERT(answer.is_valid());
- return answer;
- }
-
- // Handle the other binary operations.
- left->ToRegister();
- right->ToRegister();
- // DeferredInlineBinaryOperation requires all the registers that it is told
- // about to be spilled.
- Result distinct_right = frame_->MakeDistinctAndSpilled(left, right);
- // A newly allocated register answer is used to hold the answer. The
- // registers containing left and right are not modified so they don't
- // need to be spilled in the fast case.
- answer = allocator_->Allocate();
- ASSERT(answer.is_valid());
-
- // Perform the smi tag check.
- DeferredInlineBinaryOperation* deferred =
- new DeferredInlineBinaryOperation(op,
- answer.reg(),
- left->reg(),
- distinct_right.reg(),
- left_type_info,
- right_type_info,
- overwrite_mode);
- Label non_smi_bit_op;
- if (op != Token::BIT_OR) {
- JumpIfNotBothSmiUsingTypeInfo(left->reg(), right->reg(), answer.reg(),
- left_type_info, right_type_info,
- deferred->NonSmiInputLabel());
- }
-
- __ mov(answer.reg(), left->reg());
- switch (op) {
- case Token::ADD:
- __ add(answer.reg(), Operand(right->reg()));
- deferred->Branch(overflow);
- break;
-
- case Token::SUB:
- __ sub(answer.reg(), Operand(right->reg()));
- deferred->Branch(overflow);
- break;
-
- case Token::MUL: {
- // If the smi tag is 0 we can just leave the tag on one operand.
- STATIC_ASSERT(kSmiTag == 0); // Adjust code below if not the case.
- // Remove smi tag from the left operand (but keep sign).
- // Left-hand operand has been copied into answer.
- __ SmiUntag(answer.reg());
- // Do multiplication of smis, leaving result in answer.
- __ imul(answer.reg(), Operand(right->reg()));
- // Go slow on overflows.
- deferred->Branch(overflow);
- // Check for negative zero result. If product is zero, and one
- // argument is negative, go to slow case. The frame is unchanged
- // in this block, so local control flow can use a Label rather
- // than a JumpTarget.
- if (!expr->no_negative_zero()) {
- Label non_zero_result;
- __ test(answer.reg(), Operand(answer.reg()));
- __ j(not_zero, &non_zero_result, taken);
- __ mov(answer.reg(), left->reg());
- __ or_(answer.reg(), Operand(right->reg()));
- deferred->Branch(negative);
- __ xor_(answer.reg(), Operand(answer.reg())); // Positive 0 is correct.
- __ bind(&non_zero_result);
- }
- break;
- }
-
- case Token::BIT_OR:
- __ or_(answer.reg(), Operand(right->reg()));
- __ test(answer.reg(), Immediate(kSmiTagMask));
- __ j(not_zero, deferred->NonSmiInputLabel());
- break;
-
- case Token::BIT_AND:
- __ and_(answer.reg(), Operand(right->reg()));
- break;
-
- case Token::BIT_XOR:
- __ xor_(answer.reg(), Operand(right->reg()));
- break;
-
- default:
- UNREACHABLE();
- break;
- }
-
- deferred->BindExit();
- left->Unuse();
- right->Unuse();
- ASSERT(answer.is_valid());
- return answer;
-}
-
-
-// Call the appropriate binary operation stub to compute src op value
-// and leave the result in dst.
-class DeferredInlineSmiOperation: public DeferredCode {
- public:
- DeferredInlineSmiOperation(Token::Value op,
- Register dst,
- Register src,
- TypeInfo type_info,
- Smi* value,
- OverwriteMode overwrite_mode)
- : op_(op),
- dst_(dst),
- src_(src),
- type_info_(type_info),
- value_(value),
- overwrite_mode_(overwrite_mode) {
- if (type_info.IsSmi()) overwrite_mode_ = NO_OVERWRITE;
- set_comment("[ DeferredInlineSmiOperation");
- }
-
- virtual void Generate();
-
- private:
- Token::Value op_;
- Register dst_;
- Register src_;
- TypeInfo type_info_;
- Smi* value_;
- OverwriteMode overwrite_mode_;
-};
-
-
-void DeferredInlineSmiOperation::Generate() {
- // For mod we don't generate all the Smi code inline.
- GenericBinaryOpStub stub(
- op_,
- overwrite_mode_,
- (op_ == Token::MOD) ? NO_GENERIC_BINARY_FLAGS : NO_SMI_CODE_IN_STUB,
- TypeInfo::Combine(TypeInfo::Smi(), type_info_));
- stub.GenerateCall(masm_, src_, value_);
- if (!dst_.is(eax)) __ mov(dst_, eax);
-}
-
-
-// Call the appropriate binary operation stub to compute value op src
-// and leave the result in dst.
-class DeferredInlineSmiOperationReversed: public DeferredCode {
- public:
- DeferredInlineSmiOperationReversed(Token::Value op,
- Register dst,
- Smi* value,
- Register src,
- TypeInfo type_info,
- OverwriteMode overwrite_mode)
- : op_(op),
- dst_(dst),
- type_info_(type_info),
- value_(value),
- src_(src),
- overwrite_mode_(overwrite_mode) {
- set_comment("[ DeferredInlineSmiOperationReversed");
- }
-
- virtual void Generate();
-
- private:
- Token::Value op_;
- Register dst_;
- TypeInfo type_info_;
- Smi* value_;
- Register src_;
- OverwriteMode overwrite_mode_;
-};
-
-
-void DeferredInlineSmiOperationReversed::Generate() {
- GenericBinaryOpStub stub(
- op_,
- overwrite_mode_,
- NO_SMI_CODE_IN_STUB,
- TypeInfo::Combine(TypeInfo::Smi(), type_info_));
- stub.GenerateCall(masm_, value_, src_);
- if (!dst_.is(eax)) __ mov(dst_, eax);
-}
-
-
-// The result of src + value is in dst. It either overflowed or was not
-// smi tagged. Undo the speculative addition and call the appropriate
-// specialized stub for add. The result is left in dst.
-class DeferredInlineSmiAdd: public DeferredCode {
- public:
- DeferredInlineSmiAdd(Register dst,
- TypeInfo type_info,
- Smi* value,
- OverwriteMode overwrite_mode)
- : dst_(dst),
- type_info_(type_info),
- value_(value),
- overwrite_mode_(overwrite_mode) {
- if (type_info_.IsSmi()) overwrite_mode_ = NO_OVERWRITE;
- set_comment("[ DeferredInlineSmiAdd");
- }
-
- virtual void Generate();
-
- private:
- Register dst_;
- TypeInfo type_info_;
- Smi* value_;
- OverwriteMode overwrite_mode_;
-};
-
-
-void DeferredInlineSmiAdd::Generate() {
- // Undo the optimistic add operation and call the shared stub.
- __ sub(Operand(dst_), Immediate(value_));
- GenericBinaryOpStub igostub(
- Token::ADD,
- overwrite_mode_,
- NO_SMI_CODE_IN_STUB,
- TypeInfo::Combine(TypeInfo::Smi(), type_info_));
- igostub.GenerateCall(masm_, dst_, value_);
- if (!dst_.is(eax)) __ mov(dst_, eax);
-}
-
-
-// The result of value + src is in dst. It either overflowed or was not
-// smi tagged. Undo the speculative addition and call the appropriate
-// specialized stub for add. The result is left in dst.
-class DeferredInlineSmiAddReversed: public DeferredCode {
- public:
- DeferredInlineSmiAddReversed(Register dst,
- TypeInfo type_info,
- Smi* value,
- OverwriteMode overwrite_mode)
- : dst_(dst),
- type_info_(type_info),
- value_(value),
- overwrite_mode_(overwrite_mode) {
- set_comment("[ DeferredInlineSmiAddReversed");
- }
-
- virtual void Generate();
-
- private:
- Register dst_;
- TypeInfo type_info_;
- Smi* value_;
- OverwriteMode overwrite_mode_;
-};
-
-
-void DeferredInlineSmiAddReversed::Generate() {
- // Undo the optimistic add operation and call the shared stub.
- __ sub(Operand(dst_), Immediate(value_));
- GenericBinaryOpStub igostub(
- Token::ADD,
- overwrite_mode_,
- NO_SMI_CODE_IN_STUB,
- TypeInfo::Combine(TypeInfo::Smi(), type_info_));
- igostub.GenerateCall(masm_, value_, dst_);
- if (!dst_.is(eax)) __ mov(dst_, eax);
-}
-
-
-// The result of src - value is in dst. It either overflowed or was not
-// smi tagged. Undo the speculative subtraction and call the
-// appropriate specialized stub for subtract. The result is left in
-// dst.
-class DeferredInlineSmiSub: public DeferredCode {
- public:
- DeferredInlineSmiSub(Register dst,
- TypeInfo type_info,
- Smi* value,
- OverwriteMode overwrite_mode)
- : dst_(dst),
- type_info_(type_info),
- value_(value),
- overwrite_mode_(overwrite_mode) {
- if (type_info.IsSmi()) overwrite_mode_ = NO_OVERWRITE;
- set_comment("[ DeferredInlineSmiSub");
- }
-
- virtual void Generate();
-
- private:
- Register dst_;
- TypeInfo type_info_;
- Smi* value_;
- OverwriteMode overwrite_mode_;
-};
-
-
-void DeferredInlineSmiSub::Generate() {
- // Undo the optimistic sub operation and call the shared stub.
- __ add(Operand(dst_), Immediate(value_));
- GenericBinaryOpStub igostub(
- Token::SUB,
- overwrite_mode_,
- NO_SMI_CODE_IN_STUB,
- TypeInfo::Combine(TypeInfo::Smi(), type_info_));
- igostub.GenerateCall(masm_, dst_, value_);
- if (!dst_.is(eax)) __ mov(dst_, eax);
-}
-
-
-Result CodeGenerator::ConstantSmiBinaryOperation(BinaryOperation* expr,
- Result* operand,
- Handle<Object> value,
- bool reversed,
- OverwriteMode overwrite_mode) {
- // Generate inline code for a binary operation when one of the
- // operands is a constant smi. Consumes the argument "operand".
- if (IsUnsafeSmi(value)) {
- Result unsafe_operand(value);
- if (reversed) {
- return LikelySmiBinaryOperation(expr, &unsafe_operand, operand,
- overwrite_mode);
- } else {
- return LikelySmiBinaryOperation(expr, operand, &unsafe_operand,
- overwrite_mode);
- }
- }
-
- // Get the literal value.
- Smi* smi_value = Smi::cast(*value);
- int int_value = smi_value->value();
-
- Token::Value op = expr->op();
- Result answer;
- switch (op) {
- case Token::ADD: {
- operand->ToRegister();
- frame_->Spill(operand->reg());
-
- // Optimistically add. Call the specialized add stub if the
- // result is not a smi or overflows.
- DeferredCode* deferred = NULL;
- if (reversed) {
- deferred = new DeferredInlineSmiAddReversed(operand->reg(),
- operand->type_info(),
- smi_value,
- overwrite_mode);
- } else {
- deferred = new DeferredInlineSmiAdd(operand->reg(),
- operand->type_info(),
- smi_value,
- overwrite_mode);
- }
- __ add(Operand(operand->reg()), Immediate(value));
- deferred->Branch(overflow);
- if (!operand->type_info().IsSmi()) {
- __ test(operand->reg(), Immediate(kSmiTagMask));
- deferred->Branch(not_zero);
- } else if (FLAG_debug_code) {
- __ AbortIfNotSmi(operand->reg());
- }
- deferred->BindExit();
- answer = *operand;
- break;
- }
-
- case Token::SUB: {
- DeferredCode* deferred = NULL;
- if (reversed) {
- // The reversed case is only hit when the right operand is not a
- // constant.
- ASSERT(operand->is_register());
- answer = allocator()->Allocate();
- ASSERT(answer.is_valid());
- __ Set(answer.reg(), Immediate(value));
- deferred =
- new DeferredInlineSmiOperationReversed(op,
- answer.reg(),
- smi_value,
- operand->reg(),
- operand->type_info(),
- overwrite_mode);
- __ sub(answer.reg(), Operand(operand->reg()));
- } else {
- operand->ToRegister();
- frame_->Spill(operand->reg());
- answer = *operand;
- deferred = new DeferredInlineSmiSub(operand->reg(),
- operand->type_info(),
- smi_value,
- overwrite_mode);
- __ sub(Operand(operand->reg()), Immediate(value));
- }
- deferred->Branch(overflow);
- if (!operand->type_info().IsSmi()) {
- __ test(answer.reg(), Immediate(kSmiTagMask));
- deferred->Branch(not_zero);
- } else if (FLAG_debug_code) {
- __ AbortIfNotSmi(operand->reg());
- }
- deferred->BindExit();
- operand->Unuse();
- break;
- }
-
- case Token::SAR:
- if (reversed) {
- Result constant_operand(value);
- answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
- overwrite_mode);
- } else {
- // Only the least significant 5 bits of the shift value are used.
- // In the slow case, this masking is done inside the runtime call.
- int shift_value = int_value & 0x1f;
- operand->ToRegister();
- frame_->Spill(operand->reg());
- if (!operand->type_info().IsSmi()) {
- DeferredInlineSmiOperation* deferred =
- new DeferredInlineSmiOperation(op,
- operand->reg(),
- operand->reg(),
- operand->type_info(),
- smi_value,
- overwrite_mode);
- __ test(operand->reg(), Immediate(kSmiTagMask));
- deferred->Branch(not_zero);
- if (shift_value > 0) {
- __ sar(operand->reg(), shift_value);
- __ and_(operand->reg(), ~kSmiTagMask);
- }
- deferred->BindExit();
- } else {
- if (FLAG_debug_code) {
- __ AbortIfNotSmi(operand->reg());
- }
- if (shift_value > 0) {
- __ sar(operand->reg(), shift_value);
- __ and_(operand->reg(), ~kSmiTagMask);
- }
- }
- answer = *operand;
- }
- break;
-
- case Token::SHR:
- if (reversed) {
- Result constant_operand(value);
- answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
- overwrite_mode);
- } else {
- // Only the least significant 5 bits of the shift value are used.
- // In the slow case, this masking is done inside the runtime call.
- int shift_value = int_value & 0x1f;
- operand->ToRegister();
- answer = allocator()->Allocate();
- ASSERT(answer.is_valid());
- DeferredInlineSmiOperation* deferred =
- new DeferredInlineSmiOperation(op,
- answer.reg(),
- operand->reg(),
- operand->type_info(),
- smi_value,
- overwrite_mode);
- if (!operand->type_info().IsSmi()) {
- __ test(operand->reg(), Immediate(kSmiTagMask));
- deferred->Branch(not_zero);
- } else if (FLAG_debug_code) {
- __ AbortIfNotSmi(operand->reg());
- }
- __ mov(answer.reg(), operand->reg());
- __ SmiUntag(answer.reg());
- __ shr(answer.reg(), shift_value);
- // A negative Smi shifted right two is in the positive Smi range.
- if (shift_value < 2) {
- __ test(answer.reg(), Immediate(0xc0000000));
- deferred->Branch(not_zero);
- }
- operand->Unuse();
- __ SmiTag(answer.reg());
- deferred->BindExit();
- }
- break;
-
- case Token::SHL:
- if (reversed) {
- // Move operand into ecx and also into a second register.
- // If operand is already in a register, take advantage of that.
- // This lets us modify ecx, but still bail out to deferred code.
- Result right;
- Result right_copy_in_ecx;
- TypeInfo right_type_info = operand->type_info();
- operand->ToRegister();
- if (operand->reg().is(ecx)) {
- right = allocator()->Allocate();
- __ mov(right.reg(), ecx);
- frame_->Spill(ecx);
- right_copy_in_ecx = *operand;
- } else {
- right_copy_in_ecx = allocator()->Allocate(ecx);
- __ mov(ecx, operand->reg());
- right = *operand;
- }
- operand->Unuse();
-
- answer = allocator()->Allocate();
- DeferredInlineSmiOperationReversed* deferred =
- new DeferredInlineSmiOperationReversed(op,
- answer.reg(),
- smi_value,
- right.reg(),
- right_type_info,
- overwrite_mode);
- __ mov(answer.reg(), Immediate(int_value));
- __ sar(ecx, kSmiTagSize);
- if (!right_type_info.IsSmi()) {
- deferred->Branch(carry);
- } else if (FLAG_debug_code) {
- __ AbortIfNotSmi(right.reg());
- }
- __ shl_cl(answer.reg());
- __ cmp(answer.reg(), 0xc0000000);
- deferred->Branch(sign);
- __ SmiTag(answer.reg());
-
- deferred->BindExit();
- } else {
- // Only the least significant 5 bits of the shift value are used.
- // In the slow case, this masking is done inside the runtime call.
- int shift_value = int_value & 0x1f;
- operand->ToRegister();
- if (shift_value == 0) {
- // Spill operand so it can be overwritten in the slow case.
- frame_->Spill(operand->reg());
- DeferredInlineSmiOperation* deferred =
- new DeferredInlineSmiOperation(op,
- operand->reg(),
- operand->reg(),
- operand->type_info(),
- smi_value,
- overwrite_mode);
- __ test(operand->reg(), Immediate(kSmiTagMask));
- deferred->Branch(not_zero);
- deferred->BindExit();
- answer = *operand;
- } else {
- // Use a fresh temporary for nonzero shift values.
- answer = allocator()->Allocate();
- ASSERT(answer.is_valid());
- DeferredInlineSmiOperation* deferred =
- new DeferredInlineSmiOperation(op,
- answer.reg(),
- operand->reg(),
- operand->type_info(),
- smi_value,
- overwrite_mode);
- if (!operand->type_info().IsSmi()) {
- __ test(operand->reg(), Immediate(kSmiTagMask));
- deferred->Branch(not_zero);
- } else if (FLAG_debug_code) {
- __ AbortIfNotSmi(operand->reg());
- }
- __ mov(answer.reg(), operand->reg());
- STATIC_ASSERT(kSmiTag == 0); // adjust code if not the case
- // We do no shifts, only the Smi conversion, if shift_value is 1.
- if (shift_value > 1) {
- __ shl(answer.reg(), shift_value - 1);
- }
- // Convert int result to Smi, checking that it is in int range.
- STATIC_ASSERT(kSmiTagSize == 1); // adjust code if not the case
- __ add(answer.reg(), Operand(answer.reg()));
- deferred->Branch(overflow);
- deferred->BindExit();
- operand->Unuse();
- }
- }
- break;
-
- case Token::BIT_OR:
- case Token::BIT_XOR:
- case Token::BIT_AND: {
- operand->ToRegister();
- // DeferredInlineBinaryOperation requires all the registers that it is
- // told about to be spilled.
- frame_->Spill(operand->reg());
- DeferredInlineBinaryOperation* deferred = NULL;
- if (!operand->type_info().IsSmi()) {
- Result left = allocator()->Allocate();
- ASSERT(left.is_valid());
- Result right = allocator()->Allocate();
- ASSERT(right.is_valid());
- deferred = new DeferredInlineBinaryOperation(
- op,
- operand->reg(),
- left.reg(),
- right.reg(),
- operand->type_info(),
- TypeInfo::Smi(),
- overwrite_mode == NO_OVERWRITE ? NO_OVERWRITE : OVERWRITE_LEFT);
- __ test(operand->reg(), Immediate(kSmiTagMask));
- deferred->JumpToConstantRhs(not_zero, smi_value);
- } else if (FLAG_debug_code) {
- __ AbortIfNotSmi(operand->reg());
- }
- if (op == Token::BIT_AND) {
- __ and_(Operand(operand->reg()), Immediate(value));
- } else if (op == Token::BIT_XOR) {
- if (int_value != 0) {
- __ xor_(Operand(operand->reg()), Immediate(value));
- }
- } else {
- ASSERT(op == Token::BIT_OR);
- if (int_value != 0) {
- __ or_(Operand(operand->reg()), Immediate(value));
- }
- }
- if (deferred != NULL) deferred->BindExit();
- answer = *operand;
- break;
- }
-
- case Token::DIV:
- if (!reversed && int_value == 2) {
- operand->ToRegister();
- frame_->Spill(operand->reg());
-
- DeferredInlineSmiOperation* deferred =
- new DeferredInlineSmiOperation(op,
- operand->reg(),
- operand->reg(),
- operand->type_info(),
- smi_value,
- overwrite_mode);
- // Check that lowest log2(value) bits of operand are zero, and test
- // smi tag at the same time.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
- __ test(operand->reg(), Immediate(3));
- deferred->Branch(not_zero); // Branch if non-smi or odd smi.
- __ sar(operand->reg(), 1);
- deferred->BindExit();
- answer = *operand;
- } else {
- // Cannot fall through MOD to default case, so we duplicate the
- // default case here.
- Result constant_operand(value);
- if (reversed) {
- answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
- overwrite_mode);
- } else {
- answer = LikelySmiBinaryOperation(expr, operand, &constant_operand,
- overwrite_mode);
- }
- }
- break;
-
- // Generate inline code for mod of powers of 2 and negative powers of 2.
- case Token::MOD:
- if (!reversed &&
- int_value != 0 &&
- (IsPowerOf2(int_value) || IsPowerOf2(-int_value))) {
- operand->ToRegister();
- frame_->Spill(operand->reg());
- DeferredCode* deferred =
- new DeferredInlineSmiOperation(op,
- operand->reg(),
- operand->reg(),
- operand->type_info(),
- smi_value,
- overwrite_mode);
- // Check for negative or non-Smi left hand side.
- __ test(operand->reg(), Immediate(kSmiTagMask | kSmiSignMask));
- deferred->Branch(not_zero);
- if (int_value < 0) int_value = -int_value;
- if (int_value == 1) {
- __ mov(operand->reg(), Immediate(Smi::FromInt(0)));
- } else {
- __ and_(operand->reg(), (int_value << kSmiTagSize) - 1);
- }
- deferred->BindExit();
- answer = *operand;
- break;
- }
- // Fall through if we did not find a power of 2 on the right hand side!
- // The next case must be the default.
-
- default: {
- Result constant_operand(value);
- if (reversed) {
- answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
- overwrite_mode);
- } else {
- answer = LikelySmiBinaryOperation(expr, operand, &constant_operand,
- overwrite_mode);
- }
- break;
- }
- }
- ASSERT(answer.is_valid());
- return answer;
-}
-
-
-static bool CouldBeNaN(const Result& result) {
- if (result.type_info().IsSmi()) return false;
- if (result.type_info().IsInteger32()) return false;
- if (!result.is_constant()) return true;
- if (!result.handle()->IsHeapNumber()) return false;
- return isnan(HeapNumber::cast(*result.handle())->value());
-}
-
-
-// Convert from signed to unsigned comparison to match the way EFLAGS are set
-// by FPU and XMM compare instructions.
-static Condition DoubleCondition(Condition cc) {
- switch (cc) {
- case less: return below;
- case equal: return equal;
- case less_equal: return below_equal;
- case greater: return above;
- case greater_equal: return above_equal;
- default: UNREACHABLE();
- }
- UNREACHABLE();
- return equal;
-}
-
-
-static CompareFlags ComputeCompareFlags(NaNInformation nan_info,
- bool inline_number_compare) {
- CompareFlags flags = NO_SMI_COMPARE_IN_STUB;
- if (nan_info == kCantBothBeNaN) {
- flags = static_cast<CompareFlags>(flags | CANT_BOTH_BE_NAN);
- }
- if (inline_number_compare) {
- flags = static_cast<CompareFlags>(flags | NO_NUMBER_COMPARE_IN_STUB);
- }
- return flags;
-}
-
-
-void CodeGenerator::Comparison(AstNode* node,
- Condition cc,
- bool strict,
- ControlDestination* dest) {
- // Strict only makes sense for equality comparisons.
- ASSERT(!strict || cc == equal);
-
- Result left_side;
- Result right_side;
- // Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order.
- if (cc == greater || cc == less_equal) {
- cc = ReverseCondition(cc);
- left_side = frame_->Pop();
- right_side = frame_->Pop();
- } else {
- right_side = frame_->Pop();
- left_side = frame_->Pop();
- }
- ASSERT(cc == less || cc == equal || cc == greater_equal);
-
- // If either side is a constant smi, optimize the comparison.
- bool left_side_constant_smi = false;
- bool left_side_constant_null = false;
- bool left_side_constant_1_char_string = false;
- if (left_side.is_constant()) {
- left_side_constant_smi = left_side.handle()->IsSmi();
- left_side_constant_null = left_side.handle()->IsNull();
- left_side_constant_1_char_string =
- (left_side.handle()->IsString() &&
- String::cast(*left_side.handle())->length() == 1 &&
- String::cast(*left_side.handle())->IsAsciiRepresentation());
- }
- bool right_side_constant_smi = false;
- bool right_side_constant_null = false;
- bool right_side_constant_1_char_string = false;
- if (right_side.is_constant()) {
- right_side_constant_smi = right_side.handle()->IsSmi();
- right_side_constant_null = right_side.handle()->IsNull();
- right_side_constant_1_char_string =
- (right_side.handle()->IsString() &&
- String::cast(*right_side.handle())->length() == 1 &&
- String::cast(*right_side.handle())->IsAsciiRepresentation());
- }
-
- if (left_side_constant_smi || right_side_constant_smi) {
- bool is_loop_condition = (node->AsExpression() != NULL) &&
- node->AsExpression()->is_loop_condition();
- ConstantSmiComparison(cc, strict, dest, &left_side, &right_side,
- left_side_constant_smi, right_side_constant_smi,
- is_loop_condition);
- } else if (left_side_constant_1_char_string ||
- right_side_constant_1_char_string) {
- if (left_side_constant_1_char_string && right_side_constant_1_char_string) {
- // Trivial case, comparing two constants.
- int left_value = String::cast(*left_side.handle())->Get(0);
- int right_value = String::cast(*right_side.handle())->Get(0);
- switch (cc) {
- case less:
- dest->Goto(left_value < right_value);
- break;
- case equal:
- dest->Goto(left_value == right_value);
- break;
- case greater_equal:
- dest->Goto(left_value >= right_value);
- break;
- default:
- UNREACHABLE();
- }
- } else {
- // Only one side is a constant 1 character string.
- // If left side is a constant 1-character string, reverse the operands.
- // Since one side is a constant string, conversion order does not matter.
- if (left_side_constant_1_char_string) {
- Result temp = left_side;
- left_side = right_side;
- right_side = temp;
- cc = ReverseCondition(cc);
- // This may reintroduce greater or less_equal as the value of cc.
- // CompareStub and the inline code both support all values of cc.
- }
- // Implement comparison against a constant string, inlining the case
- // where both sides are strings.
- left_side.ToRegister();
-
- // Here we split control flow to the stub call and inlined cases
- // before finally splitting it to the control destination. We use
- // a jump target and branching to duplicate the virtual frame at
- // the first split. We manually handle the off-frame references
- // by reconstituting them on the non-fall-through path.
- JumpTarget is_not_string, is_string;
- Register left_reg = left_side.reg();
- Handle<Object> right_val = right_side.handle();
- ASSERT(StringShape(String::cast(*right_val)).IsSymbol());
- __ test(left_side.reg(), Immediate(kSmiTagMask));
- is_not_string.Branch(zero, &left_side);
- Result temp = allocator_->Allocate();
- ASSERT(temp.is_valid());
- __ mov(temp.reg(),
- FieldOperand(left_side.reg(), HeapObject::kMapOffset));
- __ movzx_b(temp.reg(),
- FieldOperand(temp.reg(), Map::kInstanceTypeOffset));
- // If we are testing for equality then make use of the symbol shortcut.
- // Check if the right left hand side has the same type as the left hand
- // side (which is always a symbol).
- if (cc == equal) {
- Label not_a_symbol;
- STATIC_ASSERT(kSymbolTag != 0);
- // Ensure that no non-strings have the symbol bit set.
- STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsSymbolMask);
- __ test(temp.reg(), Immediate(kIsSymbolMask)); // Test the symbol bit.
- __ j(zero, &not_a_symbol);
- // They are symbols, so do identity compare.
- __ cmp(left_side.reg(), right_side.handle());
- dest->true_target()->Branch(equal);
- dest->false_target()->Branch(not_equal);
- __ bind(&not_a_symbol);
- }
- // Call the compare stub if the left side is not a flat ascii string.
- __ and_(temp.reg(),
- kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask);
- __ cmp(temp.reg(), kStringTag | kSeqStringTag | kAsciiStringTag);
- temp.Unuse();
- is_string.Branch(equal, &left_side);
-
- // Setup and call the compare stub.
- is_not_string.Bind(&left_side);
- CompareFlags flags =
- static_cast<CompareFlags>(CANT_BOTH_BE_NAN | NO_SMI_COMPARE_IN_STUB);
- CompareStub stub(cc, strict, flags);
- Result result = frame_->CallStub(&stub, &left_side, &right_side);
- result.ToRegister();
- __ cmp(result.reg(), 0);
- result.Unuse();
- dest->true_target()->Branch(cc);
- dest->false_target()->Jump();
-
- is_string.Bind(&left_side);
- // left_side is a sequential ASCII string.
- left_side = Result(left_reg);
- right_side = Result(right_val);
- // Test string equality and comparison.
- Label comparison_done;
- if (cc == equal) {
- __ cmp(FieldOperand(left_side.reg(), String::kLengthOffset),
- Immediate(Smi::FromInt(1)));
- __ j(not_equal, &comparison_done);
- uint8_t char_value =
- static_cast<uint8_t>(String::cast(*right_val)->Get(0));
- __ cmpb(FieldOperand(left_side.reg(), SeqAsciiString::kHeaderSize),
- char_value);
- } else {
- __ cmp(FieldOperand(left_side.reg(), String::kLengthOffset),
- Immediate(Smi::FromInt(1)));
- // If the length is 0 then the jump is taken and the flags
- // correctly represent being less than the one-character string.
- __ j(below, &comparison_done);
- // Compare the first character of the string with the
- // constant 1-character string.
- uint8_t char_value =
- static_cast<uint8_t>(String::cast(*right_val)->Get(0));
- __ cmpb(FieldOperand(left_side.reg(), SeqAsciiString::kHeaderSize),
- char_value);
- __ j(not_equal, &comparison_done);
- // If the first character is the same then the long string sorts after
- // the short one.
- __ cmp(FieldOperand(left_side.reg(), String::kLengthOffset),
- Immediate(Smi::FromInt(1)));
- }
- __ bind(&comparison_done);
- left_side.Unuse();
- right_side.Unuse();
- dest->Split(cc);
- }
- } else {
- // Neither side is a constant Smi, constant 1-char string or constant null.
- // If either side is a non-smi constant, or known to be a heap number,
- // skip the smi check.
- bool known_non_smi =
- (left_side.is_constant() && !left_side.handle()->IsSmi()) ||
- (right_side.is_constant() && !right_side.handle()->IsSmi()) ||
- left_side.type_info().IsDouble() ||
- right_side.type_info().IsDouble();
-
- NaNInformation nan_info =
- (CouldBeNaN(left_side) && CouldBeNaN(right_side)) ?
- kBothCouldBeNaN :
- kCantBothBeNaN;
-
- // Inline number comparison handling any combination of smi's and heap
- // numbers if:
- // code is in a loop
- // the compare operation is different from equal
- // compare is not a for-loop comparison
- // The reason for excluding equal is that it will most likely be done
- // with smi's (not heap numbers) and the code to comparing smi's is inlined
- // separately. The same reason applies for for-loop comparison which will
- // also most likely be smi comparisons.
- bool is_loop_condition = (node->AsExpression() != NULL)
- && node->AsExpression()->is_loop_condition();
- bool inline_number_compare =
- loop_nesting() > 0 && cc != equal && !is_loop_condition;
-
- // Left and right needed in registers for the following code.
- left_side.ToRegister();
- right_side.ToRegister();
-
- if (known_non_smi) {
- // Inlined equality check:
- // If at least one of the objects is not NaN, then if the objects
- // are identical, they are equal.
- if (nan_info == kCantBothBeNaN && cc == equal) {
- __ cmp(left_side.reg(), Operand(right_side.reg()));
- dest->true_target()->Branch(equal);
- }
-
- // Inlined number comparison:
- if (inline_number_compare) {
- GenerateInlineNumberComparison(&left_side, &right_side, cc, dest);
- }
-
- // End of in-line compare, call out to the compare stub. Don't include
- // number comparison in the stub if it was inlined.
- CompareFlags flags = ComputeCompareFlags(nan_info, inline_number_compare);
- CompareStub stub(cc, strict, flags);
- Result answer = frame_->CallStub(&stub, &left_side, &right_side);
- __ test(answer.reg(), Operand(answer.reg()));
- answer.Unuse();
- dest->Split(cc);
- } else {
- // Here we split control flow to the stub call and inlined cases
- // before finally splitting it to the control destination. We use
- // a jump target and branching to duplicate the virtual frame at
- // the first split. We manually handle the off-frame references
- // by reconstituting them on the non-fall-through path.
- JumpTarget is_smi;
- Register left_reg = left_side.reg();
- Register right_reg = right_side.reg();
-
- // In-line check for comparing two smis.
- JumpIfBothSmiUsingTypeInfo(&left_side, &right_side, &is_smi);
-
- if (has_valid_frame()) {
- // Inline the equality check if both operands can't be a NaN. If both
- // objects are the same they are equal.
- if (nan_info == kCantBothBeNaN && cc == equal) {
- __ cmp(left_side.reg(), Operand(right_side.reg()));
- dest->true_target()->Branch(equal);
- }
-
- // Inlined number comparison:
- if (inline_number_compare) {
- GenerateInlineNumberComparison(&left_side, &right_side, cc, dest);
- }
-
- // End of in-line compare, call out to the compare stub. Don't include
- // number comparison in the stub if it was inlined.
- CompareFlags flags =
- ComputeCompareFlags(nan_info, inline_number_compare);
- CompareStub stub(cc, strict, flags);
- Result answer = frame_->CallStub(&stub, &left_side, &right_side);
- __ test(answer.reg(), Operand(answer.reg()));
- answer.Unuse();
- if (is_smi.is_linked()) {
- dest->true_target()->Branch(cc);
- dest->false_target()->Jump();
- } else {
- dest->Split(cc);
- }
- }
-
- if (is_smi.is_linked()) {
- is_smi.Bind();
- left_side = Result(left_reg);
- right_side = Result(right_reg);
- __ cmp(left_side.reg(), Operand(right_side.reg()));
- right_side.Unuse();
- left_side.Unuse();
- dest->Split(cc);
- }
- }
- }
-}
-
-
-void CodeGenerator::ConstantSmiComparison(Condition cc,
- bool strict,
- ControlDestination* dest,
- Result* left_side,
- Result* right_side,
- bool left_side_constant_smi,
- bool right_side_constant_smi,
- bool is_loop_condition) {
- if (left_side_constant_smi && right_side_constant_smi) {
- // Trivial case, comparing two constants.
- int left_value = Smi::cast(*left_side->handle())->value();
- int right_value = Smi::cast(*right_side->handle())->value();
- switch (cc) {
- case less:
- dest->Goto(left_value < right_value);
- break;
- case equal:
- dest->Goto(left_value == right_value);
- break;
- case greater_equal:
- dest->Goto(left_value >= right_value);
- break;
- default:
- UNREACHABLE();
- }
- } else {
- // Only one side is a constant Smi.
- // If left side is a constant Smi, reverse the operands.
- // Since one side is a constant Smi, conversion order does not matter.
- if (left_side_constant_smi) {
- Result* temp = left_side;
- left_side = right_side;
- right_side = temp;
- cc = ReverseCondition(cc);
- // This may re-introduce greater or less_equal as the value of cc.
- // CompareStub and the inline code both support all values of cc.
- }
- // Implement comparison against a constant Smi, inlining the case
- // where both sides are Smis.
- left_side->ToRegister();
- Register left_reg = left_side->reg();
- Handle<Object> right_val = right_side->handle();
-
- if (left_side->is_smi()) {
- if (FLAG_debug_code) {
- __ AbortIfNotSmi(left_reg);
- }
- // Test smi equality and comparison by signed int comparison.
- if (IsUnsafeSmi(right_side->handle())) {
- right_side->ToRegister();
- __ cmp(left_reg, Operand(right_side->reg()));
- } else {
- __ cmp(Operand(left_reg), Immediate(right_side->handle()));
- }
- left_side->Unuse();
- right_side->Unuse();
- dest->Split(cc);
- } else {
- // Only the case where the left side could possibly be a non-smi is left.
- JumpTarget is_smi;
- if (cc == equal) {
- // We can do the equality comparison before the smi check.
- __ cmp(Operand(left_reg), Immediate(right_side->handle()));
- dest->true_target()->Branch(equal);
- __ test(left_reg, Immediate(kSmiTagMask));
- dest->false_target()->Branch(zero);
- } else {
- // Do the smi check, then the comparison.
- __ test(left_reg, Immediate(kSmiTagMask));
- is_smi.Branch(zero, left_side, right_side);
- }
-
- // Jump or fall through to here if we are comparing a non-smi to a
- // constant smi. If the non-smi is a heap number and this is not
- // a loop condition, inline the floating point code.
- if (!is_loop_condition &&
- CpuFeatures::IsSupported(SSE2)) {
- // Right side is a constant smi and left side has been checked
- // not to be a smi.
- CpuFeatures::Scope use_sse2(SSE2);
- JumpTarget not_number;
- __ cmp(FieldOperand(left_reg, HeapObject::kMapOffset),
- Immediate(FACTORY->heap_number_map()));
- not_number.Branch(not_equal, left_side);
- __ movdbl(xmm1,
- FieldOperand(left_reg, HeapNumber::kValueOffset));
- int value = Smi::cast(*right_val)->value();
- if (value == 0) {
- __ xorpd(xmm0, xmm0);
- } else {
- Result temp = allocator()->Allocate();
- __ mov(temp.reg(), Immediate(value));
- __ cvtsi2sd(xmm0, Operand(temp.reg()));
- temp.Unuse();
- }
- __ ucomisd(xmm1, xmm0);
- // Jump to builtin for NaN.
- not_number.Branch(parity_even, left_side);
- left_side->Unuse();
- dest->true_target()->Branch(DoubleCondition(cc));
- dest->false_target()->Jump();
- not_number.Bind(left_side);
- }
-
- // Setup and call the compare stub.
- CompareFlags flags =
- static_cast<CompareFlags>(CANT_BOTH_BE_NAN | NO_SMI_CODE_IN_STUB);
- CompareStub stub(cc, strict, flags);
- Result result = frame_->CallStub(&stub, left_side, right_side);
- result.ToRegister();
- __ test(result.reg(), Operand(result.reg()));
- result.Unuse();
- if (cc == equal) {
- dest->Split(cc);
- } else {
- dest->true_target()->Branch(cc);
- dest->false_target()->Jump();
-
- // It is important for performance for this case to be at the end.
- is_smi.Bind(left_side, right_side);
- if (IsUnsafeSmi(right_side->handle())) {
- right_side->ToRegister();
- __ cmp(left_reg, Operand(right_side->reg()));
- } else {
- __ cmp(Operand(left_reg), Immediate(right_side->handle()));
- }
- left_side->Unuse();
- right_side->Unuse();
- dest->Split(cc);
- }
- }
- }
-}
-
-
-// Check that the comparison operand is a number. Jump to not_numbers jump
-// target passing the left and right result if the operand is not a number.
-static void CheckComparisonOperand(MacroAssembler* masm_,
- Result* operand,
- Result* left_side,
- Result* right_side,
- JumpTarget* not_numbers) {
- // Perform check if operand is not known to be a number.
- if (!operand->type_info().IsNumber()) {
- Label done;
- __ test(operand->reg(), Immediate(kSmiTagMask));
- __ j(zero, &done);
- __ cmp(FieldOperand(operand->reg(), HeapObject::kMapOffset),
- Immediate(FACTORY->heap_number_map()));
- not_numbers->Branch(not_equal, left_side, right_side, not_taken);
- __ bind(&done);
- }
-}
-
-
-// Load a comparison operand to the FPU stack. This assumes that the operand has
-// already been checked and is a number.
-static void LoadComparisonOperand(MacroAssembler* masm_,
- Result* operand) {
- Label done;
- if (operand->type_info().IsDouble()) {
- // Operand is known to be a heap number, just load it.
- __ fld_d(FieldOperand(operand->reg(), HeapNumber::kValueOffset));
- } else if (operand->type_info().IsSmi()) {
- // Operand is known to be a smi. Convert it to double and keep the original
- // smi.
- __ SmiUntag(operand->reg());
- __ push(operand->reg());
- __ fild_s(Operand(esp, 0));
- __ pop(operand->reg());
- __ SmiTag(operand->reg());
- } else {
- // Operand type not known, check for smi otherwise assume heap number.
- Label smi;
- __ test(operand->reg(), Immediate(kSmiTagMask));
- __ j(zero, &smi);
- __ fld_d(FieldOperand(operand->reg(), HeapNumber::kValueOffset));
- __ jmp(&done);
- __ bind(&smi);
- __ SmiUntag(operand->reg());
- __ push(operand->reg());
- __ fild_s(Operand(esp, 0));
- __ pop(operand->reg());
- __ SmiTag(operand->reg());
- __ jmp(&done);
- }
- __ bind(&done);
-}
-
-
-// Load a comparison operand into into a XMM register. Jump to not_numbers jump
-// target passing the left and right result if the operand is not a number.
-static void LoadComparisonOperandSSE2(MacroAssembler* masm_,
- Result* operand,
- XMMRegister xmm_reg,
- Result* left_side,
- Result* right_side,
- JumpTarget* not_numbers) {
- Label done;
- if (operand->type_info().IsDouble()) {
- // Operand is known to be a heap number, just load it.
- __ movdbl(xmm_reg, FieldOperand(operand->reg(), HeapNumber::kValueOffset));
- } else if (operand->type_info().IsSmi()) {
- // Operand is known to be a smi. Convert it to double and keep the original
- // smi.
- __ SmiUntag(operand->reg());
- __ cvtsi2sd(xmm_reg, Operand(operand->reg()));
- __ SmiTag(operand->reg());
- } else {
- // Operand type not known, check for smi or heap number.
- Label smi;
- __ test(operand->reg(), Immediate(kSmiTagMask));
- __ j(zero, &smi);
- if (!operand->type_info().IsNumber()) {
- __ cmp(FieldOperand(operand->reg(), HeapObject::kMapOffset),
- Immediate(FACTORY->heap_number_map()));
- not_numbers->Branch(not_equal, left_side, right_side, taken);
- }
- __ movdbl(xmm_reg, FieldOperand(operand->reg(), HeapNumber::kValueOffset));
- __ jmp(&done);
-
- __ bind(&smi);
- // Comvert smi to float and keep the original smi.
- __ SmiUntag(operand->reg());
- __ cvtsi2sd(xmm_reg, Operand(operand->reg()));
- __ SmiTag(operand->reg());
- __ jmp(&done);
- }
- __ bind(&done);
-}
-
-
-void CodeGenerator::GenerateInlineNumberComparison(Result* left_side,
- Result* right_side,
- Condition cc,
- ControlDestination* dest) {
- ASSERT(left_side->is_register());
- ASSERT(right_side->is_register());
-
- JumpTarget not_numbers;
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
-
- // Load left and right operand into registers xmm0 and xmm1 and compare.
- LoadComparisonOperandSSE2(masm_, left_side, xmm0, left_side, right_side,
- &not_numbers);
- LoadComparisonOperandSSE2(masm_, right_side, xmm1, left_side, right_side,
- &not_numbers);
- __ ucomisd(xmm0, xmm1);
- } else {
- Label check_right, compare;
-
- // Make sure that both comparison operands are numbers.
- CheckComparisonOperand(masm_, left_side, left_side, right_side,
- &not_numbers);
- CheckComparisonOperand(masm_, right_side, left_side, right_side,
- &not_numbers);
-
- // Load right and left operand to FPU stack and compare.
- LoadComparisonOperand(masm_, right_side);
- LoadComparisonOperand(masm_, left_side);
- __ FCmp();
- }
-
- // Bail out if a NaN is involved.
- not_numbers.Branch(parity_even, left_side, right_side, not_taken);
-
- // Split to destination targets based on comparison.
- left_side->Unuse();
- right_side->Unuse();
- dest->true_target()->Branch(DoubleCondition(cc));
- dest->false_target()->Jump();
-
- not_numbers.Bind(left_side, right_side);
-}
-
-
-// Call the function just below TOS on the stack with the given
-// arguments. The receiver is the TOS.
-void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
- CallFunctionFlags flags,
- int position) {
- // Push the arguments ("left-to-right") on the stack.
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Load(args->at(i));
- frame_->SpillTop();
- }
-
- // Record the position for debugging purposes.
- CodeForSourcePosition(position);
-
- // Use the shared code stub to call the function.
- InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
- CallFunctionStub call_function(arg_count, in_loop, flags);
- Result answer = frame_->CallStub(&call_function, arg_count + 1);
- // Restore context and replace function on the stack with the
- // result of the stub invocation.
- frame_->RestoreContextRegister();
- frame_->SetElementAt(0, &answer);
-}
-
-
-void CodeGenerator::CallApplyLazy(Expression* applicand,
- Expression* receiver,
- VariableProxy* arguments,
- int position) {
- // An optimized implementation of expressions of the form
- // x.apply(y, arguments).
- // If the arguments object of the scope has not been allocated,
- // and x.apply is Function.prototype.apply, this optimization
- // just copies y and the arguments of the current function on the
- // stack, as receiver and arguments, and calls x.
- // In the implementation comments, we call x the applicand
- // and y the receiver.
- ASSERT(ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION);
- ASSERT(arguments->IsArguments());
-
- // Load applicand.apply onto the stack. This will usually
- // give us a megamorphic load site. Not super, but it works.
- Load(applicand);
- frame()->Dup();
- Handle<String> name = FACTORY->LookupAsciiSymbol("apply");
- frame()->Push(name);
- Result answer = frame()->CallLoadIC(RelocInfo::CODE_TARGET);
- __ nop();
- frame()->Push(&answer);
-
- // Load the receiver and the existing arguments object onto the
- // expression stack. Avoid allocating the arguments object here.
- Load(receiver);
- LoadFromSlot(scope()->arguments()->AsSlot(), NOT_INSIDE_TYPEOF);
-
- // Emit the source position information after having loaded the
- // receiver and the arguments.
- CodeForSourcePosition(position);
- // Contents of frame at this point:
- // Frame[0]: arguments object of the current function or the hole.
- // Frame[1]: receiver
- // Frame[2]: applicand.apply
- // Frame[3]: applicand.
-
- // Check if the arguments object has been lazily allocated
- // already. If so, just use that instead of copying the arguments
- // from the stack. This also deals with cases where a local variable
- // named 'arguments' has been introduced.
- frame_->Dup();
- Result probe = frame_->Pop();
- { VirtualFrame::SpilledScope spilled_scope;
- Label slow, done;
- bool try_lazy = true;
- if (probe.is_constant()) {
- try_lazy = probe.handle()->IsArgumentsMarker();
- } else {
- __ cmp(Operand(probe.reg()), Immediate(FACTORY->arguments_marker()));
- probe.Unuse();
- __ j(not_equal, &slow);
- }
-
- if (try_lazy) {
- Label build_args;
- // Get rid of the arguments object probe.
- frame_->Drop(); // Can be called on a spilled frame.
- // Stack now has 3 elements on it.
- // Contents of stack at this point:
- // esp[0]: receiver
- // esp[1]: applicand.apply
- // esp[2]: applicand.
-
- // Check that the receiver really is a JavaScript object.
- __ mov(eax, Operand(esp, 0));
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &build_args);
- // We allow all JSObjects including JSFunctions. As long as
- // JS_FUNCTION_TYPE is the last instance type and it is right
- // after LAST_JS_OBJECT_TYPE, we do not have to check the upper
- // bound.
- STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
- STATIC_ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
- __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
- __ j(below, &build_args);
-
- // Check that applicand.apply is Function.prototype.apply.
- __ mov(eax, Operand(esp, kPointerSize));
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &build_args);
- __ CmpObjectType(eax, JS_FUNCTION_TYPE, ecx);
- __ j(not_equal, &build_args);
- __ mov(ecx, FieldOperand(eax, JSFunction::kCodeEntryOffset));
- __ sub(Operand(ecx), Immediate(Code::kHeaderSize - kHeapObjectTag));
- Handle<Code> apply_code(masm()->isolate()->builtins()->builtin(
- Builtins::kFunctionApply));
- __ cmp(Operand(ecx), Immediate(apply_code));
- __ j(not_equal, &build_args);
-
- // Check that applicand is a function.
- __ mov(edi, Operand(esp, 2 * kPointerSize));
- __ test(edi, Immediate(kSmiTagMask));
- __ j(zero, &build_args);
- __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
- __ j(not_equal, &build_args);
-
- // Copy the arguments to this function possibly from the
- // adaptor frame below it.
- Label invoke, adapted;
- __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
- __ cmp(Operand(ecx),
- Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(equal, &adapted);
-
- // No arguments adaptor frame. Copy fixed number of arguments.
- __ mov(eax, Immediate(scope()->num_parameters()));
- for (int i = 0; i < scope()->num_parameters(); i++) {
- __ push(frame_->ParameterAt(i));
- }
- __ jmp(&invoke);
-
- // Arguments adaptor frame present. Copy arguments from there, but
- // avoid copying too many arguments to avoid stack overflows.
- __ bind(&adapted);
- static const uint32_t kArgumentsLimit = 1 * KB;
- __ mov(eax, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(eax);
- __ mov(ecx, Operand(eax));
- __ cmp(eax, kArgumentsLimit);
- __ j(above, &build_args);
-
- // Loop through the arguments pushing them onto the execution
- // stack. We don't inform the virtual frame of the push, so we don't
- // have to worry about getting rid of the elements from the virtual
- // frame.
- Label loop;
- // ecx is a small non-negative integer, due to the test above.
- __ test(ecx, Operand(ecx));
- __ j(zero, &invoke);
- __ bind(&loop);
- __ push(Operand(edx, ecx, times_pointer_size, 1 * kPointerSize));
- __ dec(ecx);
- __ j(not_zero, &loop);
-
- // Invoke the function.
- __ bind(&invoke);
- ParameterCount actual(eax);
- __ InvokeFunction(edi, actual, CALL_FUNCTION);
- // Drop applicand.apply and applicand from the stack, and push
- // the result of the function call, but leave the spilled frame
- // unchanged, with 3 elements, so it is correct when we compile the
- // slow-case code.
- __ add(Operand(esp), Immediate(2 * kPointerSize));
- __ push(eax);
- // Stack now has 1 element:
- // esp[0]: result
- __ jmp(&done);
-
- // Slow-case: Allocate the arguments object since we know it isn't
- // there, and fall-through to the slow-case where we call
- // applicand.apply.
- __ bind(&build_args);
- // Stack now has 3 elements, because we have jumped from where:
- // esp[0]: receiver
- // esp[1]: applicand.apply
- // esp[2]: applicand.
-
- // StoreArgumentsObject requires a correct frame, and may modify it.
- Result arguments_object = StoreArgumentsObject(false);
- frame_->SpillAll();
- arguments_object.ToRegister();
- frame_->EmitPush(arguments_object.reg());
- arguments_object.Unuse();
- // Stack and frame now have 4 elements.
- __ bind(&slow);
- }
-
- // Generic computation of x.apply(y, args) with no special optimization.
- // Flip applicand.apply and applicand on the stack, so
- // applicand looks like the receiver of the applicand.apply call.
- // Then process it as a normal function call.
- __ mov(eax, Operand(esp, 3 * kPointerSize));
- __ mov(ebx, Operand(esp, 2 * kPointerSize));
- __ mov(Operand(esp, 2 * kPointerSize), eax);
- __ mov(Operand(esp, 3 * kPointerSize), ebx);
-
- CallFunctionStub call_function(2, NOT_IN_LOOP, NO_CALL_FUNCTION_FLAGS);
- Result res = frame_->CallStub(&call_function, 3);
- // The function and its two arguments have been dropped.
- frame_->Drop(1); // Drop the receiver as well.
- res.ToRegister();
- frame_->EmitPush(res.reg());
- // Stack now has 1 element:
- // esp[0]: result
- if (try_lazy) __ bind(&done);
- } // End of spilled scope.
- // Restore the context register after a call.
- frame_->RestoreContextRegister();
-}
-
-
-class DeferredStackCheck: public DeferredCode {
- public:
- DeferredStackCheck() {
- set_comment("[ DeferredStackCheck");
- }
-
- virtual void Generate();
-};
-
-
-void DeferredStackCheck::Generate() {
- StackCheckStub stub;
- __ CallStub(&stub);
-}
-
-
-void CodeGenerator::CheckStack() {
- DeferredStackCheck* deferred = new DeferredStackCheck;
- ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit(masm()->isolate());
- __ cmp(esp, Operand::StaticVariable(stack_limit));
- deferred->Branch(below);
- deferred->BindExit();
-}
-
-
-void CodeGenerator::VisitAndSpill(Statement* statement) {
- ASSERT(in_spilled_code());
- set_in_spilled_code(false);
- Visit(statement);
- if (frame_ != NULL) {
- frame_->SpillAll();
- }
- set_in_spilled_code(true);
-}
-
-
-void CodeGenerator::VisitStatementsAndSpill(ZoneList<Statement*>* statements) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- ASSERT(in_spilled_code());
- set_in_spilled_code(false);
- VisitStatements(statements);
- if (frame_ != NULL) {
- frame_->SpillAll();
- }
- set_in_spilled_code(true);
-
- ASSERT(!has_valid_frame() || frame_->height() == original_height);
-}
-
-
-void CodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- ASSERT(!in_spilled_code());
- for (int i = 0; has_valid_frame() && i < statements->length(); i++) {
- Visit(statements->at(i));
- }
- ASSERT(!has_valid_frame() || frame_->height() == original_height);
-}
-
-
-void CodeGenerator::VisitBlock(Block* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "[ Block");
- CodeForStatementPosition(node);
- node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
- VisitStatements(node->statements());
- if (node->break_target()->is_linked()) {
- node->break_target()->Bind();
- }
- node->break_target()->Unuse();
-}
-
-
-void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
- // Call the runtime to declare the globals. The inevitable call
- // will sync frame elements to memory anyway, so we do it eagerly to
- // allow us to push the arguments directly into place.
- frame_->SyncRange(0, frame_->element_count() - 1);
-
- frame_->EmitPush(esi); // The context is the first argument.
- frame_->EmitPush(Immediate(pairs));
- frame_->EmitPush(Immediate(Smi::FromInt(is_eval() ? 1 : 0)));
- frame_->EmitPush(Immediate(Smi::FromInt(strict_mode_flag())));
- Result ignored = frame_->CallRuntime(Runtime::kDeclareGlobals, 4);
- // Return value is ignored.
-}
-
-
-void CodeGenerator::VisitDeclaration(Declaration* node) {
- Comment cmnt(masm_, "[ Declaration");
- Variable* var = node->proxy()->var();
- ASSERT(var != NULL); // must have been resolved
- Slot* slot = var->AsSlot();
-
- // If it was not possible to allocate the variable at compile time,
- // we need to "declare" it at runtime to make sure it actually
- // exists in the local context.
- if (slot != NULL && slot->type() == Slot::LOOKUP) {
- // Variables with a "LOOKUP" slot were introduced as non-locals
- // during variable resolution and must have mode DYNAMIC.
- ASSERT(var->is_dynamic());
- // For now, just do a runtime call. Sync the virtual frame eagerly
- // so we can simply push the arguments into place.
- frame_->SyncRange(0, frame_->element_count() - 1);
- frame_->EmitPush(esi);
- frame_->EmitPush(Immediate(var->name()));
- // Declaration nodes are always introduced in one of two modes.
- ASSERT(node->mode() == Variable::VAR || node->mode() == Variable::CONST);
- PropertyAttributes attr = node->mode() == Variable::VAR ? NONE : READ_ONLY;
- frame_->EmitPush(Immediate(Smi::FromInt(attr)));
- // Push initial value, if any.
- // Note: For variables we must not push an initial value (such as
- // 'undefined') because we may have a (legal) redeclaration and we
- // must not destroy the current value.
- if (node->mode() == Variable::CONST) {
- frame_->EmitPush(Immediate(FACTORY->the_hole_value()));
- } else if (node->fun() != NULL) {
- Load(node->fun());
- } else {
- frame_->EmitPush(Immediate(Smi::FromInt(0))); // no initial value!
- }
- Result ignored = frame_->CallRuntime(Runtime::kDeclareContextSlot, 4);
- // Ignore the return value (declarations are statements).
- return;
- }
-
- ASSERT(!var->is_global());
-
- // If we have a function or a constant, we need to initialize the variable.
- Expression* val = NULL;
- if (node->mode() == Variable::CONST) {
- val = new Literal(FACTORY->the_hole_value());
- } else {
- val = node->fun(); // NULL if we don't have a function
- }
-
- if (val != NULL) {
- {
- // Set the initial value.
- Reference target(this, node->proxy());
- Load(val);
- target.SetValue(NOT_CONST_INIT);
- // The reference is removed from the stack (preserving TOS) when
- // it goes out of scope.
- }
- // Get rid of the assigned value (declarations are statements).
- frame_->Drop();
- }
-}
-
-
-void CodeGenerator::VisitExpressionStatement(ExpressionStatement* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "[ ExpressionStatement");
- CodeForStatementPosition(node);
- Expression* expression = node->expression();
- expression->MarkAsStatement();
- Load(expression);
- // Remove the lingering expression result from the top of stack.
- frame_->Drop();
-}
-
-
-void CodeGenerator::VisitEmptyStatement(EmptyStatement* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "// EmptyStatement");
- CodeForStatementPosition(node);
- // nothing to do
-}
-
-
-void CodeGenerator::VisitIfStatement(IfStatement* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "[ IfStatement");
- // Generate different code depending on which parts of the if statement
- // are present or not.
- bool has_then_stm = node->HasThenStatement();
- bool has_else_stm = node->HasElseStatement();
-
- CodeForStatementPosition(node);
- JumpTarget exit;
- if (has_then_stm && has_else_stm) {
- JumpTarget then;
- JumpTarget else_;
- ControlDestination dest(&then, &else_, true);
- LoadCondition(node->condition(), &dest, true);
-
- if (dest.false_was_fall_through()) {
- // The else target was bound, so we compile the else part first.
- Visit(node->else_statement());
-
- // We may have dangling jumps to the then part.
- if (then.is_linked()) {
- if (has_valid_frame()) exit.Jump();
- then.Bind();
- Visit(node->then_statement());
- }
- } else {
- // The then target was bound, so we compile the then part first.
- Visit(node->then_statement());
-
- if (else_.is_linked()) {
- if (has_valid_frame()) exit.Jump();
- else_.Bind();
- Visit(node->else_statement());
- }
- }
-
- } else if (has_then_stm) {
- ASSERT(!has_else_stm);
- JumpTarget then;
- ControlDestination dest(&then, &exit, true);
- LoadCondition(node->condition(), &dest, true);
-
- if (dest.false_was_fall_through()) {
- // The exit label was bound. We may have dangling jumps to the
- // then part.
- if (then.is_linked()) {
- exit.Unuse();
- exit.Jump();
- then.Bind();
- Visit(node->then_statement());
- }
- } else {
- // The then label was bound.
- Visit(node->then_statement());
- }
-
- } else if (has_else_stm) {
- ASSERT(!has_then_stm);
- JumpTarget else_;
- ControlDestination dest(&exit, &else_, false);
- LoadCondition(node->condition(), &dest, true);
-
- if (dest.true_was_fall_through()) {
- // The exit label was bound. We may have dangling jumps to the
- // else part.
- if (else_.is_linked()) {
- exit.Unuse();
- exit.Jump();
- else_.Bind();
- Visit(node->else_statement());
- }
- } else {
- // The else label was bound.
- Visit(node->else_statement());
- }
-
- } else {
- ASSERT(!has_then_stm && !has_else_stm);
- // We only care about the condition's side effects (not its value
- // or control flow effect). LoadCondition is called without
- // forcing control flow.
- ControlDestination dest(&exit, &exit, true);
- LoadCondition(node->condition(), &dest, false);
- if (!dest.is_used()) {
- // We got a value on the frame rather than (or in addition to)
- // control flow.
- frame_->Drop();
- }
- }
-
- if (exit.is_linked()) {
- exit.Bind();
- }
-}
-
-
-void CodeGenerator::VisitContinueStatement(ContinueStatement* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "[ ContinueStatement");
- CodeForStatementPosition(node);
- node->target()->continue_target()->Jump();
-}
-
-
-void CodeGenerator::VisitBreakStatement(BreakStatement* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "[ BreakStatement");
- CodeForStatementPosition(node);
- node->target()->break_target()->Jump();
-}
-
-
-void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "[ ReturnStatement");
-
- CodeForStatementPosition(node);
- Load(node->expression());
- Result return_value = frame_->Pop();
- masm()->positions_recorder()->WriteRecordedPositions();
- if (function_return_is_shadowed_) {
- function_return_.Jump(&return_value);
- } else {
- frame_->PrepareForReturn();
- if (function_return_.is_bound()) {
- // If the function return label is already bound we reuse the
- // code by jumping to the return site.
- function_return_.Jump(&return_value);
- } else {
- function_return_.Bind(&return_value);
- GenerateReturnSequence(&return_value);
- }
- }
-}
-
-
-void CodeGenerator::GenerateReturnSequence(Result* return_value) {
- // The return value is a live (but not currently reference counted)
- // reference to eax. This is safe because the current frame does not
- // contain a reference to eax (it is prepared for the return by spilling
- // all registers).
- if (FLAG_trace) {
- frame_->Push(return_value);
- *return_value = frame_->CallRuntime(Runtime::kTraceExit, 1);
- }
- return_value->ToRegister(eax);
-
- // Add a label for checking the size of the code used for returning.
-#ifdef DEBUG
- Label check_exit_codesize;
- masm_->bind(&check_exit_codesize);
-#endif
-
- // Leave the frame and return popping the arguments and the
- // receiver.
- frame_->Exit();
- int arguments_bytes = (scope()->num_parameters() + 1) * kPointerSize;
- __ Ret(arguments_bytes, ecx);
- DeleteFrame();
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Check that the size of the code used for returning is large enough
- // for the debugger's requirements.
- ASSERT(Assembler::kJSReturnSequenceLength <=
- masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
-#endif
-}
-
-
-void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "[ WithEnterStatement");
- CodeForStatementPosition(node);
- Load(node->expression());
- Result context;
- if (node->is_catch_block()) {
- context = frame_->CallRuntime(Runtime::kPushCatchContext, 1);
- } else {
- context = frame_->CallRuntime(Runtime::kPushContext, 1);
- }
-
- // Update context local.
- frame_->SaveContextRegister();
-
- // Verify that the runtime call result and esi agree.
- if (FLAG_debug_code) {
- __ cmp(context.reg(), Operand(esi));
- __ Assert(equal, "Runtime::NewContext should end up in esi");
- }
-}
-
-
-void CodeGenerator::VisitWithExitStatement(WithExitStatement* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "[ WithExitStatement");
- CodeForStatementPosition(node);
- // Pop context.
- __ mov(esi, ContextOperand(esi, Context::PREVIOUS_INDEX));
- // Update context local.
- frame_->SaveContextRegister();
-}
-
-
-void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "[ SwitchStatement");
- CodeForStatementPosition(node);
- node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
-
- // Compile the switch value.
- Load(node->tag());
-
- ZoneList<CaseClause*>* cases = node->cases();
- int length = cases->length();
- CaseClause* default_clause = NULL;
-
- JumpTarget next_test;
- // Compile the case label expressions and comparisons. Exit early
- // if a comparison is unconditionally true. The target next_test is
- // bound before the loop in order to indicate control flow to the
- // first comparison.
- next_test.Bind();
- for (int i = 0; i < length && !next_test.is_unused(); i++) {
- CaseClause* clause = cases->at(i);
- // The default is not a test, but remember it for later.
- if (clause->is_default()) {
- default_clause = clause;
- continue;
- }
-
- Comment cmnt(masm_, "[ Case comparison");
- // We recycle the same target next_test for each test. Bind it if
- // the previous test has not done so and then unuse it for the
- // loop.
- if (next_test.is_linked()) {
- next_test.Bind();
- }
- next_test.Unuse();
-
- // Duplicate the switch value.
- frame_->Dup();
-
- // Compile the label expression.
- Load(clause->label());
-
- // Compare and branch to the body if true or the next test if
- // false. Prefer the next test as a fall through.
- ControlDestination dest(clause->body_target(), &next_test, false);
- Comparison(node, equal, true, &dest);
-
- // If the comparison fell through to the true target, jump to the
- // actual body.
- if (dest.true_was_fall_through()) {
- clause->body_target()->Unuse();
- clause->body_target()->Jump();
- }
- }
-
- // If there was control flow to a next test from the last one
- // compiled, compile a jump to the default or break target.
- if (!next_test.is_unused()) {
- if (next_test.is_linked()) {
- next_test.Bind();
- }
- // Drop the switch value.
- frame_->Drop();
- if (default_clause != NULL) {
- default_clause->body_target()->Jump();
- } else {
- node->break_target()->Jump();
- }
- }
-
- // The last instruction emitted was a jump, either to the default
- // clause or the break target, or else to a case body from the loop
- // that compiles the tests.
- ASSERT(!has_valid_frame());
- // Compile case bodies as needed.
- for (int i = 0; i < length; i++) {
- CaseClause* clause = cases->at(i);
-
- // There are two ways to reach the body: from the corresponding
- // test or as the fall through of the previous body.
- if (clause->body_target()->is_linked() || has_valid_frame()) {
- if (clause->body_target()->is_linked()) {
- if (has_valid_frame()) {
- // If we have both a jump to the test and a fall through, put
- // a jump on the fall through path to avoid the dropping of
- // the switch value on the test path. The exception is the
- // default which has already had the switch value dropped.
- if (clause->is_default()) {
- clause->body_target()->Bind();
- } else {
- JumpTarget body;
- body.Jump();
- clause->body_target()->Bind();
- frame_->Drop();
- body.Bind();
- }
- } else {
- // No fall through to worry about.
- clause->body_target()->Bind();
- if (!clause->is_default()) {
- frame_->Drop();
- }
- }
- } else {
- // Otherwise, we have only fall through.
- ASSERT(has_valid_frame());
- }
-
- // We are now prepared to compile the body.
- Comment cmnt(masm_, "[ Case body");
- VisitStatements(clause->statements());
- }
- clause->body_target()->Unuse();
- }
-
- // We may not have a valid frame here so bind the break target only
- // if needed.
- if (node->break_target()->is_linked()) {
- node->break_target()->Bind();
- }
- node->break_target()->Unuse();
-}
-
-
-void CodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "[ DoWhileStatement");
- CodeForStatementPosition(node);
- node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
- JumpTarget body(JumpTarget::BIDIRECTIONAL);
- IncrementLoopNesting();
-
- ConditionAnalysis info = AnalyzeCondition(node->cond());
- // Label the top of the loop for the backward jump if necessary.
- switch (info) {
- case ALWAYS_TRUE:
- // Use the continue target.
- node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
- node->continue_target()->Bind();
- break;
- case ALWAYS_FALSE:
- // No need to label it.
- node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
- break;
- case DONT_KNOW:
- // Continue is the test, so use the backward body target.
- node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
- body.Bind();
- break;
- }
-
- CheckStack(); // TODO(1222600): ignore if body contains calls.
- Visit(node->body());
-
- // Compile the test.
- switch (info) {
- case ALWAYS_TRUE:
- // If control flow can fall off the end of the body, jump back
- // to the top and bind the break target at the exit.
- if (has_valid_frame()) {
- node->continue_target()->Jump();
- }
- if (node->break_target()->is_linked()) {
- node->break_target()->Bind();
- }
- break;
- case ALWAYS_FALSE:
- // We may have had continues or breaks in the body.
- if (node->continue_target()->is_linked()) {
- node->continue_target()->Bind();
- }
- if (node->break_target()->is_linked()) {
- node->break_target()->Bind();
- }
- break;
- case DONT_KNOW:
- // We have to compile the test expression if it can be reached by
- // control flow falling out of the body or via continue.
- if (node->continue_target()->is_linked()) {
- node->continue_target()->Bind();
- }
- if (has_valid_frame()) {
- Comment cmnt(masm_, "[ DoWhileCondition");
- CodeForDoWhileConditionPosition(node);
- ControlDestination dest(&body, node->break_target(), false);
- LoadCondition(node->cond(), &dest, true);
- }
- if (node->break_target()->is_linked()) {
- node->break_target()->Bind();
- }
- break;
- }
-
- DecrementLoopNesting();
- node->continue_target()->Unuse();
- node->break_target()->Unuse();
-}
-
-
-void CodeGenerator::VisitWhileStatement(WhileStatement* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "[ WhileStatement");
- CodeForStatementPosition(node);
-
- // If the condition is always false and has no side effects, we do not
- // need to compile anything.
- ConditionAnalysis info = AnalyzeCondition(node->cond());
- if (info == ALWAYS_FALSE) return;
-
- // Do not duplicate conditions that may have function literal
- // subexpressions. This can cause us to compile the function literal
- // twice.
- bool test_at_bottom = !node->may_have_function_literal();
- node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
- IncrementLoopNesting();
- JumpTarget body;
- if (test_at_bottom) {
- body.set_direction(JumpTarget::BIDIRECTIONAL);
- }
-
- // Based on the condition analysis, compile the test as necessary.
- switch (info) {
- case ALWAYS_TRUE:
- // We will not compile the test expression. Label the top of the
- // loop with the continue target.
- node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
- node->continue_target()->Bind();
- break;
- case DONT_KNOW: {
- if (test_at_bottom) {
- // Continue is the test at the bottom, no need to label the test
- // at the top. The body is a backward target.
- node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
- } else {
- // Label the test at the top as the continue target. The body
- // is a forward-only target.
- node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
- node->continue_target()->Bind();
- }
- // Compile the test with the body as the true target and preferred
- // fall-through and with the break target as the false target.
- ControlDestination dest(&body, node->break_target(), true);
- LoadCondition(node->cond(), &dest, true);
-
- if (dest.false_was_fall_through()) {
- // If we got the break target as fall-through, the test may have
- // been unconditionally false (if there are no jumps to the
- // body).
- if (!body.is_linked()) {
- DecrementLoopNesting();
- return;
- }
-
- // Otherwise, jump around the body on the fall through and then
- // bind the body target.
- node->break_target()->Unuse();
- node->break_target()->Jump();
- body.Bind();
- }
- break;
- }
- case ALWAYS_FALSE:
- UNREACHABLE();
- break;
- }
-
- CheckStack(); // TODO(1222600): ignore if body contains calls.
- Visit(node->body());
-
- // Based on the condition analysis, compile the backward jump as
- // necessary.
- switch (info) {
- case ALWAYS_TRUE:
- // The loop body has been labeled with the continue target.
- if (has_valid_frame()) {
- node->continue_target()->Jump();
- }
- break;
- case DONT_KNOW:
- if (test_at_bottom) {
- // If we have chosen to recompile the test at the bottom,
- // then it is the continue target.
- if (node->continue_target()->is_linked()) {
- node->continue_target()->Bind();
- }
- if (has_valid_frame()) {
- // The break target is the fall-through (body is a backward
- // jump from here and thus an invalid fall-through).
- ControlDestination dest(&body, node->break_target(), false);
- LoadCondition(node->cond(), &dest, true);
- }
- } else {
- // If we have chosen not to recompile the test at the bottom,
- // jump back to the one at the top.
- if (has_valid_frame()) {
- node->continue_target()->Jump();
- }
- }
- break;
- case ALWAYS_FALSE:
- UNREACHABLE();
- break;
- }
-
- // The break target may be already bound (by the condition), or there
- // may not be a valid frame. Bind it only if needed.
- if (node->break_target()->is_linked()) {
- node->break_target()->Bind();
- }
- DecrementLoopNesting();
-}
-
-
-void CodeGenerator::SetTypeForStackSlot(Slot* slot, TypeInfo info) {
- ASSERT(slot->type() == Slot::LOCAL || slot->type() == Slot::PARAMETER);
- if (slot->type() == Slot::LOCAL) {
- frame_->SetTypeForLocalAt(slot->index(), info);
- } else {
- frame_->SetTypeForParamAt(slot->index(), info);
- }
- if (FLAG_debug_code && info.IsSmi()) {
- if (slot->type() == Slot::LOCAL) {
- frame_->PushLocalAt(slot->index());
- } else {
- frame_->PushParameterAt(slot->index());
- }
- Result var = frame_->Pop();
- var.ToRegister();
- __ AbortIfNotSmi(var.reg());
- }
-}
-
-
-void CodeGenerator::VisitForStatement(ForStatement* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "[ ForStatement");
- CodeForStatementPosition(node);
-
- // Compile the init expression if present.
- if (node->init() != NULL) {
- Visit(node->init());
- }
-
- // If the condition is always false and has no side effects, we do not
- // need to compile anything else.
- ConditionAnalysis info = AnalyzeCondition(node->cond());
- if (info == ALWAYS_FALSE) return;
-
- // Do not duplicate conditions that may have function literal
- // subexpressions. This can cause us to compile the function literal
- // twice.
- bool test_at_bottom = !node->may_have_function_literal();
- node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
- IncrementLoopNesting();
-
- // Target for backward edge if no test at the bottom, otherwise
- // unused.
- JumpTarget loop(JumpTarget::BIDIRECTIONAL);
-
- // Target for backward edge if there is a test at the bottom,
- // otherwise used as target for test at the top.
- JumpTarget body;
- if (test_at_bottom) {
- body.set_direction(JumpTarget::BIDIRECTIONAL);
- }
-
- // Based on the condition analysis, compile the test as necessary.
- switch (info) {
- case ALWAYS_TRUE:
- // We will not compile the test expression. Label the top of the
- // loop.
- if (node->next() == NULL) {
- // Use the continue target if there is no update expression.
- node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
- node->continue_target()->Bind();
- } else {
- // Otherwise use the backward loop target.
- node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
- loop.Bind();
- }
- break;
- case DONT_KNOW: {
- if (test_at_bottom) {
- // Continue is either the update expression or the test at the
- // bottom, no need to label the test at the top.
- node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
- } else if (node->next() == NULL) {
- // We are not recompiling the test at the bottom and there is no
- // update expression.
- node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
- node->continue_target()->Bind();
- } else {
- // We are not recompiling the test at the bottom and there is an
- // update expression.
- node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
- loop.Bind();
- }
-
- // Compile the test with the body as the true target and preferred
- // fall-through and with the break target as the false target.
- ControlDestination dest(&body, node->break_target(), true);
- LoadCondition(node->cond(), &dest, true);
-
- if (dest.false_was_fall_through()) {
- // If we got the break target as fall-through, the test may have
- // been unconditionally false (if there are no jumps to the
- // body).
- if (!body.is_linked()) {
- DecrementLoopNesting();
- return;
- }
-
- // Otherwise, jump around the body on the fall through and then
- // bind the body target.
- node->break_target()->Unuse();
- node->break_target()->Jump();
- body.Bind();
- }
- break;
- }
- case ALWAYS_FALSE:
- UNREACHABLE();
- break;
- }
-
- CheckStack(); // TODO(1222600): ignore if body contains calls.
-
- // We know that the loop index is a smi if it is not modified in the
- // loop body and it is checked against a constant limit in the loop
- // condition. In this case, we reset the static type information of the
- // loop index to smi before compiling the body, the update expression, and
- // the bottom check of the loop condition.
- if (node->is_fast_smi_loop()) {
- // Set number type of the loop variable to smi.
- SetTypeForStackSlot(node->loop_variable()->AsSlot(), TypeInfo::Smi());
- }
-
- Visit(node->body());
-
- // If there is an update expression, compile it if necessary.
- if (node->next() != NULL) {
- if (node->continue_target()->is_linked()) {
- node->continue_target()->Bind();
- }
-
- // Control can reach the update by falling out of the body or by a
- // continue.
- if (has_valid_frame()) {
- // Record the source position of the statement as this code which
- // is after the code for the body actually belongs to the loop
- // statement and not the body.
- CodeForStatementPosition(node);
- Visit(node->next());
- }
- }
-
- // Set the type of the loop variable to smi before compiling the test
- // expression if we are in a fast smi loop condition.
- if (node->is_fast_smi_loop() && has_valid_frame()) {
- // Set number type of the loop variable to smi.
- SetTypeForStackSlot(node->loop_variable()->AsSlot(), TypeInfo::Smi());
- }
-
- // Based on the condition analysis, compile the backward jump as
- // necessary.
- switch (info) {
- case ALWAYS_TRUE:
- if (has_valid_frame()) {
- if (node->next() == NULL) {
- node->continue_target()->Jump();
- } else {
- loop.Jump();
- }
- }
- break;
- case DONT_KNOW:
- if (test_at_bottom) {
- if (node->continue_target()->is_linked()) {
- // We can have dangling jumps to the continue target if there
- // was no update expression.
- node->continue_target()->Bind();
- }
- // Control can reach the test at the bottom by falling out of
- // the body, by a continue in the body, or from the update
- // expression.
- if (has_valid_frame()) {
- // The break target is the fall-through (body is a backward
- // jump from here).
- ControlDestination dest(&body, node->break_target(), false);
- LoadCondition(node->cond(), &dest, true);
- }
- } else {
- // Otherwise, jump back to the test at the top.
- if (has_valid_frame()) {
- if (node->next() == NULL) {
- node->continue_target()->Jump();
- } else {
- loop.Jump();
- }
- }
- }
- break;
- case ALWAYS_FALSE:
- UNREACHABLE();
- break;
- }
-
- // The break target may be already bound (by the condition), or there
- // may not be a valid frame. Bind it only if needed.
- if (node->break_target()->is_linked()) {
- node->break_target()->Bind();
- }
- DecrementLoopNesting();
-}
-
-
-void CodeGenerator::VisitForInStatement(ForInStatement* node) {
- ASSERT(!in_spilled_code());
- VirtualFrame::SpilledScope spilled_scope;
- Comment cmnt(masm_, "[ ForInStatement");
- CodeForStatementPosition(node);
-
- JumpTarget primitive;
- JumpTarget jsobject;
- JumpTarget fixed_array;
- JumpTarget entry(JumpTarget::BIDIRECTIONAL);
- JumpTarget end_del_check;
- JumpTarget exit;
-
- // Get the object to enumerate over (converted to JSObject).
- LoadAndSpill(node->enumerable());
-
- // Both SpiderMonkey and kjs ignore null and undefined in contrast
- // to the specification. 12.6.4 mandates a call to ToObject.
- frame_->EmitPop(eax);
-
- // eax: value to be iterated over
- __ cmp(eax, FACTORY->undefined_value());
- exit.Branch(equal);
- __ cmp(eax, FACTORY->null_value());
- exit.Branch(equal);
-
- // Stack layout in body:
- // [iteration counter (smi)] <- slot 0
- // [length of array] <- slot 1
- // [FixedArray] <- slot 2
- // [Map or 0] <- slot 3
- // [Object] <- slot 4
-
- // Check if enumerable is already a JSObject
- // eax: value to be iterated over
- __ test(eax, Immediate(kSmiTagMask));
- primitive.Branch(zero);
- __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
- jsobject.Branch(above_equal);
-
- primitive.Bind();
- frame_->EmitPush(eax);
- frame_->InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION, 1);
- // function call returns the value in eax, which is where we want it below
-
- jsobject.Bind();
- // Get the set of properties (as a FixedArray or Map).
- // eax: value to be iterated over
- frame_->EmitPush(eax); // Push the object being iterated over.
-
- // Check cache validity in generated code. This is a fast case for
- // the JSObject::IsSimpleEnum cache validity checks. If we cannot
- // guarantee cache validity, call the runtime system to check cache
- // validity or get the property names in a fixed array.
- JumpTarget call_runtime;
- JumpTarget loop(JumpTarget::BIDIRECTIONAL);
- JumpTarget check_prototype;
- JumpTarget use_cache;
- __ mov(ecx, eax);
- loop.Bind();
- // Check that there are no elements.
- __ mov(edx, FieldOperand(ecx, JSObject::kElementsOffset));
- __ cmp(Operand(edx), Immediate(FACTORY->empty_fixed_array()));
- call_runtime.Branch(not_equal);
- // Check that instance descriptors are not empty so that we can
- // check for an enum cache. Leave the map in ebx for the subsequent
- // prototype load.
- __ mov(ebx, FieldOperand(ecx, HeapObject::kMapOffset));
- __ mov(edx, FieldOperand(ebx, Map::kInstanceDescriptorsOffset));
- __ cmp(Operand(edx), Immediate(FACTORY->empty_descriptor_array()));
- call_runtime.Branch(equal);
- // Check that there in an enum cache in the non-empty instance
- // descriptors. This is the case if the next enumeration index
- // field does not contain a smi.
- __ mov(edx, FieldOperand(edx, DescriptorArray::kEnumerationIndexOffset));
- __ test(edx, Immediate(kSmiTagMask));
- call_runtime.Branch(zero);
- // For all objects but the receiver, check that the cache is empty.
- __ cmp(ecx, Operand(eax));
- check_prototype.Branch(equal);
- __ mov(edx, FieldOperand(edx, DescriptorArray::kEnumCacheBridgeCacheOffset));
- __ cmp(Operand(edx), Immediate(FACTORY->empty_fixed_array()));
- call_runtime.Branch(not_equal);
- check_prototype.Bind();
- // Load the prototype from the map and loop if non-null.
- __ mov(ecx, FieldOperand(ebx, Map::kPrototypeOffset));
- __ cmp(Operand(ecx), Immediate(FACTORY->null_value()));
- loop.Branch(not_equal);
- // The enum cache is valid. Load the map of the object being
- // iterated over and use the cache for the iteration.
- __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset));
- use_cache.Jump();
-
- call_runtime.Bind();
- // Call the runtime to get the property names for the object.
- frame_->EmitPush(eax); // push the Object (slot 4) for the runtime call
- frame_->CallRuntime(Runtime::kGetPropertyNamesFast, 1);
-
- // If we got a map from the runtime call, we can do a fast
- // modification check. Otherwise, we got a fixed array, and we have
- // to do a slow check.
- // eax: map or fixed array (result from call to
- // Runtime::kGetPropertyNamesFast)
- __ mov(edx, Operand(eax));
- __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
- __ cmp(ecx, FACTORY->meta_map());
- fixed_array.Branch(not_equal);
-
- use_cache.Bind();
- // Get enum cache
- // eax: map (either the result from a call to
- // Runtime::kGetPropertyNamesFast or has been fetched directly from
- // the object)
- __ mov(ecx, Operand(eax));
-
- __ mov(ecx, FieldOperand(ecx, Map::kInstanceDescriptorsOffset));
- // Get the bridge array held in the enumeration index field.
- __ mov(ecx, FieldOperand(ecx, DescriptorArray::kEnumerationIndexOffset));
- // Get the cache from the bridge array.
- __ mov(edx, FieldOperand(ecx, DescriptorArray::kEnumCacheBridgeCacheOffset));
-
- frame_->EmitPush(eax); // <- slot 3
- frame_->EmitPush(edx); // <- slot 2
- __ mov(eax, FieldOperand(edx, FixedArray::kLengthOffset));
- frame_->EmitPush(eax); // <- slot 1
- frame_->EmitPush(Immediate(Smi::FromInt(0))); // <- slot 0
- entry.Jump();
-
- fixed_array.Bind();
- // eax: fixed array (result from call to Runtime::kGetPropertyNamesFast)
- frame_->EmitPush(Immediate(Smi::FromInt(0))); // <- slot 3
- frame_->EmitPush(eax); // <- slot 2
-
- // Push the length of the array and the initial index onto the stack.
- __ mov(eax, FieldOperand(eax, FixedArray::kLengthOffset));
- frame_->EmitPush(eax); // <- slot 1
- frame_->EmitPush(Immediate(Smi::FromInt(0))); // <- slot 0
-
- // Condition.
- entry.Bind();
- // Grab the current frame's height for the break and continue
- // targets only after all the state is pushed on the frame.
- node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
- node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
-
- __ mov(eax, frame_->ElementAt(0)); // load the current count
- __ cmp(eax, frame_->ElementAt(1)); // compare to the array length
- node->break_target()->Branch(above_equal);
-
- // Get the i'th entry of the array.
- __ mov(edx, frame_->ElementAt(2));
- __ mov(ebx, FixedArrayElementOperand(edx, eax));
-
- // Get the expected map from the stack or a zero map in the
- // permanent slow case eax: current iteration count ebx: i'th entry
- // of the enum cache
- __ mov(edx, frame_->ElementAt(3));
- // Check if the expected map still matches that of the enumerable.
- // If not, we have to filter the key.
- // eax: current iteration count
- // ebx: i'th entry of the enum cache
- // edx: expected map value
- __ mov(ecx, frame_->ElementAt(4));
- __ mov(ecx, FieldOperand(ecx, HeapObject::kMapOffset));
- __ cmp(ecx, Operand(edx));
- end_del_check.Branch(equal);
-
- // Convert the entry to a string (or null if it isn't a property anymore).
- frame_->EmitPush(frame_->ElementAt(4)); // push enumerable
- frame_->EmitPush(ebx); // push entry
- frame_->InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION, 2);
- __ mov(ebx, Operand(eax));
-
- // If the property has been removed while iterating, we just skip it.
- __ test(ebx, Operand(ebx));
- node->continue_target()->Branch(equal);
-
- end_del_check.Bind();
- // Store the entry in the 'each' expression and take another spin in the
- // loop. edx: i'th entry of the enum cache (or string there of)
- frame_->EmitPush(ebx);
- { Reference each(this, node->each());
- if (!each.is_illegal()) {
- if (each.size() > 0) {
- // Loading a reference may leave the frame in an unspilled state.
- frame_->SpillAll();
- // Get the value (under the reference on the stack) from memory.
- frame_->EmitPush(frame_->ElementAt(each.size()));
- each.SetValue(NOT_CONST_INIT);
- frame_->Drop(2);
- } else {
- // If the reference was to a slot we rely on the convenient property
- // that it doesn't matter whether a value (eg, ebx pushed above) is
- // right on top of or right underneath a zero-sized reference.
- each.SetValue(NOT_CONST_INIT);
- frame_->Drop();
- }
- }
- }
- // Unloading a reference may leave the frame in an unspilled state.
- frame_->SpillAll();
-
- // Body.
- CheckStack(); // TODO(1222600): ignore if body contains calls.
- VisitAndSpill(node->body());
-
- // Next. Reestablish a spilled frame in case we are coming here via
- // a continue in the body.
- node->continue_target()->Bind();
- frame_->SpillAll();
- frame_->EmitPop(eax);
- __ add(Operand(eax), Immediate(Smi::FromInt(1)));
- frame_->EmitPush(eax);
- entry.Jump();
-
- // Cleanup. No need to spill because VirtualFrame::Drop is safe for
- // any frame.
- node->break_target()->Bind();
- frame_->Drop(5);
-
- // Exit.
- exit.Bind();
-
- node->continue_target()->Unuse();
- node->break_target()->Unuse();
-}
-
-
-void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) {
- ASSERT(!in_spilled_code());
- VirtualFrame::SpilledScope spilled_scope;
- Comment cmnt(masm_, "[ TryCatchStatement");
- CodeForStatementPosition(node);
-
- JumpTarget try_block;
- JumpTarget exit;
-
- try_block.Call();
- // --- Catch block ---
- frame_->EmitPush(eax);
-
- // Store the caught exception in the catch variable.
- Variable* catch_var = node->catch_var()->var();
- ASSERT(catch_var != NULL && catch_var->AsSlot() != NULL);
- StoreToSlot(catch_var->AsSlot(), NOT_CONST_INIT);
-
- // Remove the exception from the stack.
- frame_->Drop();
-
- VisitStatementsAndSpill(node->catch_block()->statements());
- if (has_valid_frame()) {
- exit.Jump();
- }
-
-
- // --- Try block ---
- try_block.Bind();
-
- frame_->PushTryHandler(TRY_CATCH_HANDLER);
- int handler_height = frame_->height();
-
- // Shadow the jump targets for all escapes from the try block, including
- // returns. During shadowing, the original target is hidden as the
- // ShadowTarget and operations on the original actually affect the
- // shadowing target.
- //
- // We should probably try to unify the escaping targets and the return
- // target.
- int nof_escapes = node->escaping_targets()->length();
- List<ShadowTarget*> shadows(1 + nof_escapes);
-
- // Add the shadow target for the function return.
- static const int kReturnShadowIndex = 0;
- shadows.Add(new ShadowTarget(&function_return_));
- bool function_return_was_shadowed = function_return_is_shadowed_;
- function_return_is_shadowed_ = true;
- ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
-
- // Add the remaining shadow targets.
- for (int i = 0; i < nof_escapes; i++) {
- shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
- }
-
- // Generate code for the statements in the try block.
- VisitStatementsAndSpill(node->try_block()->statements());
-
- // Stop the introduced shadowing and count the number of required unlinks.
- // After shadowing stops, the original targets are unshadowed and the
- // ShadowTargets represent the formerly shadowing targets.
- bool has_unlinks = false;
- for (int i = 0; i < shadows.length(); i++) {
- shadows[i]->StopShadowing();
- has_unlinks = has_unlinks || shadows[i]->is_linked();
- }
- function_return_is_shadowed_ = function_return_was_shadowed;
-
- // Get an external reference to the handler address.
- ExternalReference handler_address(Isolate::k_handler_address,
- masm()->isolate());
-
- // Make sure that there's nothing left on the stack above the
- // handler structure.
- if (FLAG_debug_code) {
- __ mov(eax, Operand::StaticVariable(handler_address));
- __ cmp(esp, Operand(eax));
- __ Assert(equal, "stack pointer should point to top handler");
- }
-
- // If we can fall off the end of the try block, unlink from try chain.
- if (has_valid_frame()) {
- // The next handler address is on top of the frame. Unlink from
- // the handler list and drop the rest of this handler from the
- // frame.
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- frame_->EmitPop(Operand::StaticVariable(handler_address));
- frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
- if (has_unlinks) {
- exit.Jump();
- }
- }
-
- // Generate unlink code for the (formerly) shadowing targets that
- // have been jumped to. Deallocate each shadow target.
- Result return_value;
- for (int i = 0; i < shadows.length(); i++) {
- if (shadows[i]->is_linked()) {
- // Unlink from try chain; be careful not to destroy the TOS if
- // there is one.
- if (i == kReturnShadowIndex) {
- shadows[i]->Bind(&return_value);
- return_value.ToRegister(eax);
- } else {
- shadows[i]->Bind();
- }
- // Because we can be jumping here (to spilled code) from
- // unspilled code, we need to reestablish a spilled frame at
- // this block.
- frame_->SpillAll();
-
- // Reload sp from the top handler, because some statements that we
- // break from (eg, for...in) may have left stuff on the stack.
- __ mov(esp, Operand::StaticVariable(handler_address));
- frame_->Forget(frame_->height() - handler_height);
-
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- frame_->EmitPop(Operand::StaticVariable(handler_address));
- frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
-
- if (i == kReturnShadowIndex) {
- if (!function_return_is_shadowed_) frame_->PrepareForReturn();
- shadows[i]->other_target()->Jump(&return_value);
- } else {
- shadows[i]->other_target()->Jump();
- }
- }
- }
-
- exit.Bind();
-}
-
-
-void CodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* node) {
- ASSERT(!in_spilled_code());
- VirtualFrame::SpilledScope spilled_scope;
- Comment cmnt(masm_, "[ TryFinallyStatement");
- CodeForStatementPosition(node);
-
- // State: Used to keep track of reason for entering the finally
- // block. Should probably be extended to hold information for
- // break/continue from within the try block.
- enum { FALLING, THROWING, JUMPING };
-
- JumpTarget try_block;
- JumpTarget finally_block;
-
- try_block.Call();
-
- frame_->EmitPush(eax);
- // In case of thrown exceptions, this is where we continue.
- __ Set(ecx, Immediate(Smi::FromInt(THROWING)));
- finally_block.Jump();
-
- // --- Try block ---
- try_block.Bind();
-
- frame_->PushTryHandler(TRY_FINALLY_HANDLER);
- int handler_height = frame_->height();
-
- // Shadow the jump targets for all escapes from the try block, including
- // returns. During shadowing, the original target is hidden as the
- // ShadowTarget and operations on the original actually affect the
- // shadowing target.
- //
- // We should probably try to unify the escaping targets and the return
- // target.
- int nof_escapes = node->escaping_targets()->length();
- List<ShadowTarget*> shadows(1 + nof_escapes);
-
- // Add the shadow target for the function return.
- static const int kReturnShadowIndex = 0;
- shadows.Add(new ShadowTarget(&function_return_));
- bool function_return_was_shadowed = function_return_is_shadowed_;
- function_return_is_shadowed_ = true;
- ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
-
- // Add the remaining shadow targets.
- for (int i = 0; i < nof_escapes; i++) {
- shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
- }
-
- // Generate code for the statements in the try block.
- VisitStatementsAndSpill(node->try_block()->statements());
-
- // Stop the introduced shadowing and count the number of required unlinks.
- // After shadowing stops, the original targets are unshadowed and the
- // ShadowTargets represent the formerly shadowing targets.
- int nof_unlinks = 0;
- for (int i = 0; i < shadows.length(); i++) {
- shadows[i]->StopShadowing();
- if (shadows[i]->is_linked()) nof_unlinks++;
- }
- function_return_is_shadowed_ = function_return_was_shadowed;
-
- // Get an external reference to the handler address.
- ExternalReference handler_address(Isolate::k_handler_address,
- masm()->isolate());
-
- // If we can fall off the end of the try block, unlink from the try
- // chain and set the state on the frame to FALLING.
- if (has_valid_frame()) {
- // The next handler address is on top of the frame.
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- frame_->EmitPop(Operand::StaticVariable(handler_address));
- frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
-
- // Fake a top of stack value (unneeded when FALLING) and set the
- // state in ecx, then jump around the unlink blocks if any.
- frame_->EmitPush(Immediate(FACTORY->undefined_value()));
- __ Set(ecx, Immediate(Smi::FromInt(FALLING)));
- if (nof_unlinks > 0) {
- finally_block.Jump();
- }
- }
-
- // Generate code to unlink and set the state for the (formerly)
- // shadowing targets that have been jumped to.
- for (int i = 0; i < shadows.length(); i++) {
- if (shadows[i]->is_linked()) {
- // If we have come from the shadowed return, the return value is
- // on the virtual frame. We must preserve it until it is
- // pushed.
- if (i == kReturnShadowIndex) {
- Result return_value;
- shadows[i]->Bind(&return_value);
- return_value.ToRegister(eax);
- } else {
- shadows[i]->Bind();
- }
- // Because we can be jumping here (to spilled code) from
- // unspilled code, we need to reestablish a spilled frame at
- // this block.
- frame_->SpillAll();
-
- // Reload sp from the top handler, because some statements that
- // we break from (eg, for...in) may have left stuff on the
- // stack.
- __ mov(esp, Operand::StaticVariable(handler_address));
- frame_->Forget(frame_->height() - handler_height);
-
- // Unlink this handler and drop it from the frame.
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- frame_->EmitPop(Operand::StaticVariable(handler_address));
- frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
-
- if (i == kReturnShadowIndex) {
- // If this target shadowed the function return, materialize
- // the return value on the stack.
- frame_->EmitPush(eax);
- } else {
- // Fake TOS for targets that shadowed breaks and continues.
- frame_->EmitPush(Immediate(FACTORY->undefined_value()));
- }
- __ Set(ecx, Immediate(Smi::FromInt(JUMPING + i)));
- if (--nof_unlinks > 0) {
- // If this is not the last unlink block, jump around the next.
- finally_block.Jump();
- }
- }
- }
-
- // --- Finally block ---
- finally_block.Bind();
-
- // Push the state on the stack.
- frame_->EmitPush(ecx);
-
- // We keep two elements on the stack - the (possibly faked) result
- // and the state - while evaluating the finally block.
- //
- // Generate code for the statements in the finally block.
- VisitStatementsAndSpill(node->finally_block()->statements());
-
- if (has_valid_frame()) {
- // Restore state and return value or faked TOS.
- frame_->EmitPop(ecx);
- frame_->EmitPop(eax);
- }
-
- // Generate code to jump to the right destination for all used
- // formerly shadowing targets. Deallocate each shadow target.
- for (int i = 0; i < shadows.length(); i++) {
- if (has_valid_frame() && shadows[i]->is_bound()) {
- BreakTarget* original = shadows[i]->other_target();
- __ cmp(Operand(ecx), Immediate(Smi::FromInt(JUMPING + i)));
- if (i == kReturnShadowIndex) {
- // The return value is (already) in eax.
- Result return_value = allocator_->Allocate(eax);
- ASSERT(return_value.is_valid());
- if (function_return_is_shadowed_) {
- original->Branch(equal, &return_value);
- } else {
- // Branch around the preparation for return which may emit
- // code.
- JumpTarget skip;
- skip.Branch(not_equal);
- frame_->PrepareForReturn();
- original->Jump(&return_value);
- skip.Bind();
- }
- } else {
- original->Branch(equal);
- }
- }
- }
-
- if (has_valid_frame()) {
- // Check if we need to rethrow the exception.
- JumpTarget exit;
- __ cmp(Operand(ecx), Immediate(Smi::FromInt(THROWING)));
- exit.Branch(not_equal);
-
- // Rethrow exception.
- frame_->EmitPush(eax); // undo pop from above
- frame_->CallRuntime(Runtime::kReThrow, 1);
-
- // Done.
- exit.Bind();
- }
-}
-
-
-void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "[ DebuggerStatement");
- CodeForStatementPosition(node);
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Spill everything, even constants, to the frame.
- frame_->SpillAll();
-
- frame_->DebugBreak();
- // Ignore the return value.
-#endif
-}
-
-
-Result CodeGenerator::InstantiateFunction(
- Handle<SharedFunctionInfo> function_info,
- bool pretenure) {
- // The inevitable call will sync frame elements to memory anyway, so
- // we do it eagerly to allow us to push the arguments directly into
- // place.
- frame()->SyncRange(0, frame()->element_count() - 1);
-
- // Use the fast case closure allocation code that allocates in new
- // space for nested functions that don't need literals cloning.
- if (!pretenure &&
- scope()->is_function_scope() &&
- function_info->num_literals() == 0) {
- FastNewClosureStub stub(
- function_info->strict_mode() ? kStrictMode : kNonStrictMode);
- frame()->EmitPush(Immediate(function_info));
- return frame()->CallStub(&stub, 1);
- } else {
- // Call the runtime to instantiate the function based on the
- // shared function info.
- frame()->EmitPush(esi);
- frame()->EmitPush(Immediate(function_info));
- frame()->EmitPush(Immediate(pretenure
- ? FACTORY->true_value()
- : FACTORY->false_value()));
- return frame()->CallRuntime(Runtime::kNewClosure, 3);
- }
-}
-
-
-void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
- Comment cmnt(masm_, "[ FunctionLiteral");
- ASSERT(!in_safe_int32_mode());
- // Build the function info and instantiate it.
- Handle<SharedFunctionInfo> function_info =
- Compiler::BuildFunctionInfo(node, script());
- // Check for stack-overflow exception.
- if (function_info.is_null()) {
- SetStackOverflow();
- return;
- }
- Result result = InstantiateFunction(function_info, node->pretenure());
- frame()->Push(&result);
-}
-
-
-void CodeGenerator::VisitSharedFunctionInfoLiteral(
- SharedFunctionInfoLiteral* node) {
- ASSERT(!in_safe_int32_mode());
- Comment cmnt(masm_, "[ SharedFunctionInfoLiteral");
- Result result = InstantiateFunction(node->shared_function_info(), false);
- frame()->Push(&result);
-}
-
-
-void CodeGenerator::VisitConditional(Conditional* node) {
- Comment cmnt(masm_, "[ Conditional");
- ASSERT(!in_safe_int32_mode());
- JumpTarget then;
- JumpTarget else_;
- JumpTarget exit;
- ControlDestination dest(&then, &else_, true);
- LoadCondition(node->condition(), &dest, true);
-
- if (dest.false_was_fall_through()) {
- // The else target was bound, so we compile the else part first.
- Load(node->else_expression());
-
- if (then.is_linked()) {
- exit.Jump();
- then.Bind();
- Load(node->then_expression());
- }
- } else {
- // The then target was bound, so we compile the then part first.
- Load(node->then_expression());
-
- if (else_.is_linked()) {
- exit.Jump();
- else_.Bind();
- Load(node->else_expression());
- }
- }
-
- exit.Bind();
-}
-
-
-void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
- if (slot->type() == Slot::LOOKUP) {
- ASSERT(slot->var()->is_dynamic());
- JumpTarget slow;
- JumpTarget done;
- Result value;
-
- // Generate fast case for loading from slots that correspond to
- // local/global variables or arguments unless they are shadowed by
- // eval-introduced bindings.
- EmitDynamicLoadFromSlotFastCase(slot,
- typeof_state,
- &value,
- &slow,
- &done);
-
- slow.Bind();
- // A runtime call is inevitable. We eagerly sync frame elements
- // to memory so that we can push the arguments directly into place
- // on top of the frame.
- frame()->SyncRange(0, frame()->element_count() - 1);
- frame()->EmitPush(esi);
- frame()->EmitPush(Immediate(slot->var()->name()));
- if (typeof_state == INSIDE_TYPEOF) {
- value =
- frame()->CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
- } else {
- value = frame()->CallRuntime(Runtime::kLoadContextSlot, 2);
- }
-
- done.Bind(&value);
- frame_->Push(&value);
-
- } else if (slot->var()->mode() == Variable::CONST) {
- // Const slots may contain 'the hole' value (the constant hasn't been
- // initialized yet) which needs to be converted into the 'undefined'
- // value.
- //
- // We currently spill the virtual frame because constants use the
- // potentially unsafe direct-frame access of SlotOperand.
- VirtualFrame::SpilledScope spilled_scope;
- Comment cmnt(masm_, "[ Load const");
- Label exit;
- __ mov(ecx, SlotOperand(slot, ecx));
- __ cmp(ecx, FACTORY->the_hole_value());
- __ j(not_equal, &exit);
- __ mov(ecx, FACTORY->undefined_value());
- __ bind(&exit);
- frame()->EmitPush(ecx);
-
- } else if (slot->type() == Slot::PARAMETER) {
- frame()->PushParameterAt(slot->index());
-
- } else if (slot->type() == Slot::LOCAL) {
- frame()->PushLocalAt(slot->index());
-
- } else {
- // The other remaining slot types (LOOKUP and GLOBAL) cannot reach
- // here.
- //
- // The use of SlotOperand below is safe for an unspilled frame
- // because it will always be a context slot.
- ASSERT(slot->type() == Slot::CONTEXT);
- Result temp = allocator()->Allocate();
- ASSERT(temp.is_valid());
- __ mov(temp.reg(), SlotOperand(slot, temp.reg()));
- frame()->Push(&temp);
- }
-}
-
-
-void CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot,
- TypeofState state) {
- LoadFromSlot(slot, state);
-
- // Bail out quickly if we're not using lazy arguments allocation.
- if (ArgumentsMode() != LAZY_ARGUMENTS_ALLOCATION) return;
-
- // ... or if the slot isn't a non-parameter arguments slot.
- if (slot->type() == Slot::PARAMETER || !slot->is_arguments()) return;
-
- // If the loaded value is a constant, we know if the arguments
- // object has been lazily loaded yet.
- Result result = frame()->Pop();
- if (result.is_constant()) {
- if (result.handle()->IsArgumentsMarker()) {
- result = StoreArgumentsObject(false);
- }
- frame()->Push(&result);
- return;
- }
- ASSERT(result.is_register());
- // The loaded value is in a register. If it is the sentinel that
- // indicates that we haven't loaded the arguments object yet, we
- // need to do it now.
- JumpTarget exit;
- __ cmp(Operand(result.reg()), Immediate(FACTORY->arguments_marker()));
- frame()->Push(&result);
- exit.Branch(not_equal);
-
- result = StoreArgumentsObject(false);
- frame()->SetElementAt(0, &result);
- result.Unuse();
- exit.Bind();
- return;
-}
-
-
-Result CodeGenerator::LoadFromGlobalSlotCheckExtensions(
- Slot* slot,
- TypeofState typeof_state,
- JumpTarget* slow) {
- ASSERT(!in_safe_int32_mode());
- // Check that no extension objects have been created by calls to
- // eval from the current scope to the global scope.
- Register context = esi;
- Result tmp = allocator_->Allocate();
- ASSERT(tmp.is_valid()); // All non-reserved registers were available.
-
- Scope* s = scope();
- while (s != NULL) {
- if (s->num_heap_slots() > 0) {
- if (s->calls_eval()) {
- // Check that extension is NULL.
- __ cmp(ContextOperand(context, Context::EXTENSION_INDEX),
- Immediate(0));
- slow->Branch(not_equal, not_taken);
- }
- // Load next context in chain.
- __ mov(tmp.reg(), ContextOperand(context, Context::CLOSURE_INDEX));
- __ mov(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
- context = tmp.reg();
- }
- // If no outer scope calls eval, we do not need to check more
- // context extensions. If we have reached an eval scope, we check
- // all extensions from this point.
- if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break;
- s = s->outer_scope();
- }
-
- if (s != NULL && s->is_eval_scope()) {
- // Loop up the context chain. There is no frame effect so it is
- // safe to use raw labels here.
- Label next, fast;
- if (!context.is(tmp.reg())) {
- __ mov(tmp.reg(), context);
- }
- __ bind(&next);
- // Terminate at global context.
- __ cmp(FieldOperand(tmp.reg(), HeapObject::kMapOffset),
- Immediate(FACTORY->global_context_map()));
- __ j(equal, &fast);
- // Check that extension is NULL.
- __ cmp(ContextOperand(tmp.reg(), Context::EXTENSION_INDEX), Immediate(0));
- slow->Branch(not_equal, not_taken);
- // Load next context in chain.
- __ mov(tmp.reg(), ContextOperand(tmp.reg(), Context::CLOSURE_INDEX));
- __ mov(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
- __ jmp(&next);
- __ bind(&fast);
- }
- tmp.Unuse();
-
- // All extension objects were empty and it is safe to use a global
- // load IC call.
- // The register allocator prefers eax if it is free, so the code generator
- // will load the global object directly into eax, which is where the LoadIC
- // expects it.
- frame_->Spill(eax);
- LoadGlobal();
- frame_->Push(slot->var()->name());
- RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
- ? RelocInfo::CODE_TARGET
- : RelocInfo::CODE_TARGET_CONTEXT;
- Result answer = frame_->CallLoadIC(mode);
- // A test eax instruction following the call signals that the inobject
- // property case was inlined. Ensure that there is not a test eax
- // instruction here.
- __ nop();
- return answer;
-}
-
-
-void CodeGenerator::EmitDynamicLoadFromSlotFastCase(Slot* slot,
- TypeofState typeof_state,
- Result* result,
- JumpTarget* slow,
- JumpTarget* done) {
- // Generate fast-case code for variables that might be shadowed by
- // eval-introduced variables. Eval is used a lot without
- // introducing variables. In those cases, we do not want to
- // perform a runtime call for all variables in the scope
- // containing the eval.
- if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
- *result = LoadFromGlobalSlotCheckExtensions(slot, typeof_state, slow);
- done->Jump(result);
-
- } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
- Slot* potential_slot = slot->var()->local_if_not_shadowed()->AsSlot();
- Expression* rewrite = slot->var()->local_if_not_shadowed()->rewrite();
- if (potential_slot != NULL) {
- // Generate fast case for locals that rewrite to slots.
- // Allocate a fresh register to use as a temp in
- // ContextSlotOperandCheckExtensions and to hold the result
- // value.
- *result = allocator()->Allocate();
- ASSERT(result->is_valid());
- __ mov(result->reg(),
- ContextSlotOperandCheckExtensions(potential_slot, *result, slow));
- if (potential_slot->var()->mode() == Variable::CONST) {
- __ cmp(result->reg(), FACTORY->the_hole_value());
- done->Branch(not_equal, result);
- __ mov(result->reg(), FACTORY->undefined_value());
- }
- done->Jump(result);
- } else if (rewrite != NULL) {
- // Generate fast case for calls of an argument function.
- Property* property = rewrite->AsProperty();
- if (property != NULL) {
- VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
- Literal* key_literal = property->key()->AsLiteral();
- if (obj_proxy != NULL &&
- key_literal != NULL &&
- obj_proxy->IsArguments() &&
- key_literal->handle()->IsSmi()) {
- // Load arguments object if there are no eval-introduced
- // variables. Then load the argument from the arguments
- // object using keyed load.
- Result arguments = allocator()->Allocate();
- ASSERT(arguments.is_valid());
- __ mov(arguments.reg(),
- ContextSlotOperandCheckExtensions(obj_proxy->var()->AsSlot(),
- arguments,
- slow));
- frame_->Push(&arguments);
- frame_->Push(key_literal->handle());
- *result = EmitKeyedLoad();
- done->Jump(result);
- }
- }
- }
- }
-}
-
-
-void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
- if (slot->type() == Slot::LOOKUP) {
- ASSERT(slot->var()->is_dynamic());
-
- // For now, just do a runtime call. Since the call is inevitable,
- // we eagerly sync the virtual frame so we can directly push the
- // arguments into place.
- frame_->SyncRange(0, frame_->element_count() - 1);
-
- frame_->EmitPush(esi);
- frame_->EmitPush(Immediate(slot->var()->name()));
-
- Result value;
- if (init_state == CONST_INIT) {
- // Same as the case for a normal store, but ignores attribute
- // (e.g. READ_ONLY) of context slot so that we can initialize const
- // properties (introduced via eval("const foo = (some expr);")). Also,
- // uses the current function context instead of the top context.
- //
- // Note that we must declare the foo upon entry of eval(), via a
- // context slot declaration, but we cannot initialize it at the same
- // time, because the const declaration may be at the end of the eval
- // code (sigh...) and the const variable may have been used before
- // (where its value is 'undefined'). Thus, we can only do the
- // initialization when we actually encounter the expression and when
- // the expression operands are defined and valid, and thus we need the
- // split into 2 operations: declaration of the context slot followed
- // by initialization.
- value = frame_->CallRuntime(Runtime::kInitializeConstContextSlot, 3);
- } else {
- frame_->Push(Smi::FromInt(strict_mode_flag()));
- value = frame_->CallRuntime(Runtime::kStoreContextSlot, 4);
- }
- // Storing a variable must keep the (new) value on the expression
- // stack. This is necessary for compiling chained assignment
- // expressions.
- frame_->Push(&value);
-
- } else {
- ASSERT(!slot->var()->is_dynamic());
-
- JumpTarget exit;
- if (init_state == CONST_INIT) {
- ASSERT(slot->var()->mode() == Variable::CONST);
- // Only the first const initialization must be executed (the slot
- // still contains 'the hole' value). When the assignment is executed,
- // the code is identical to a normal store (see below).
- //
- // We spill the frame in the code below because the direct-frame
- // access of SlotOperand is potentially unsafe with an unspilled
- // frame.
- VirtualFrame::SpilledScope spilled_scope;
- Comment cmnt(masm_, "[ Init const");
- __ mov(ecx, SlotOperand(slot, ecx));
- __ cmp(ecx, FACTORY->the_hole_value());
- exit.Branch(not_equal);
- }
-
- // We must execute the store. Storing a variable must keep the (new)
- // value on the stack. This is necessary for compiling assignment
- // expressions.
- //
- // Note: We will reach here even with slot->var()->mode() ==
- // Variable::CONST because of const declarations which will initialize
- // consts to 'the hole' value and by doing so, end up calling this code.
- if (slot->type() == Slot::PARAMETER) {
- frame_->StoreToParameterAt(slot->index());
- } else if (slot->type() == Slot::LOCAL) {
- frame_->StoreToLocalAt(slot->index());
- } else {
- // The other slot types (LOOKUP and GLOBAL) cannot reach here.
- //
- // The use of SlotOperand below is safe for an unspilled frame
- // because the slot is a context slot.
- ASSERT(slot->type() == Slot::CONTEXT);
- frame_->Dup();
- Result value = frame_->Pop();
- value.ToRegister();
- Result start = allocator_->Allocate();
- ASSERT(start.is_valid());
- __ mov(SlotOperand(slot, start.reg()), value.reg());
- // RecordWrite may destroy the value registers.
- //
- // TODO(204): Avoid actually spilling when the value is not
- // needed (probably the common case).
- frame_->Spill(value.reg());
- int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
- Result temp = allocator_->Allocate();
- ASSERT(temp.is_valid());
- __ RecordWrite(start.reg(), offset, value.reg(), temp.reg());
- // The results start, value, and temp are unused by going out of
- // scope.
- }
-
- exit.Bind();
- }
-}
-
-
-void CodeGenerator::VisitSlot(Slot* slot) {
- Comment cmnt(masm_, "[ Slot");
- if (in_safe_int32_mode()) {
- if ((slot->type() == Slot::LOCAL && !slot->is_arguments())) {
- frame()->UntaggedPushLocalAt(slot->index());
- } else if (slot->type() == Slot::PARAMETER) {
- frame()->UntaggedPushParameterAt(slot->index());
- } else {
- UNREACHABLE();
- }
- } else {
- LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
- }
-}
-
-
-void CodeGenerator::VisitVariableProxy(VariableProxy* node) {
- Comment cmnt(masm_, "[ VariableProxy");
- Variable* var = node->var();
- Expression* expr = var->rewrite();
- if (expr != NULL) {
- Visit(expr);
- } else {
- ASSERT(var->is_global());
- ASSERT(!in_safe_int32_mode());
- Reference ref(this, node);
- ref.GetValue();
- }
-}
-
-
-void CodeGenerator::VisitLiteral(Literal* node) {
- Comment cmnt(masm_, "[ Literal");
- if (frame_->ConstantPoolOverflowed()) {
- Result temp = allocator_->Allocate();
- ASSERT(temp.is_valid());
- if (in_safe_int32_mode()) {
- temp.set_untagged_int32(true);
- }
- __ Set(temp.reg(), Immediate(node->handle()));
- frame_->Push(&temp);
- } else {
- if (in_safe_int32_mode()) {
- frame_->PushUntaggedElement(node->handle());
- } else {
- frame_->Push(node->handle());
- }
- }
-}
-
-
-void CodeGenerator::PushUnsafeSmi(Handle<Object> value) {
- ASSERT(value->IsSmi());
- int bits = reinterpret_cast<int>(*value);
- __ push(Immediate(bits ^ jit_cookie_));
- __ xor_(Operand(esp, 0), Immediate(jit_cookie_));
-}
-
-
-void CodeGenerator::StoreUnsafeSmiToLocal(int offset, Handle<Object> value) {
- ASSERT(value->IsSmi());
- int bits = reinterpret_cast<int>(*value);
- __ mov(Operand(ebp, offset), Immediate(bits ^ jit_cookie_));
- __ xor_(Operand(ebp, offset), Immediate(jit_cookie_));
-}
-
-
-void CodeGenerator::MoveUnsafeSmi(Register target, Handle<Object> value) {
- ASSERT(target.is_valid());
- ASSERT(value->IsSmi());
- int bits = reinterpret_cast<int>(*value);
- __ Set(target, Immediate(bits ^ jit_cookie_));
- __ xor_(target, jit_cookie_);
-}
-
-
-bool CodeGenerator::IsUnsafeSmi(Handle<Object> value) {
- if (!value->IsSmi()) return false;
- int int_value = Smi::cast(*value)->value();
- return !is_intn(int_value, kMaxSmiInlinedBits);
-}
-
-
-// Materialize the regexp literal 'node' in the literals array
-// 'literals' of the function. Leave the regexp boilerplate in
-// 'boilerplate'.
-class DeferredRegExpLiteral: public DeferredCode {
- public:
- DeferredRegExpLiteral(Register boilerplate,
- Register literals,
- RegExpLiteral* node)
- : boilerplate_(boilerplate), literals_(literals), node_(node) {
- set_comment("[ DeferredRegExpLiteral");
- }
-
- void Generate();
-
- private:
- Register boilerplate_;
- Register literals_;
- RegExpLiteral* node_;
-};
-
-
-void DeferredRegExpLiteral::Generate() {
- // Since the entry is undefined we call the runtime system to
- // compute the literal.
- // Literal array (0).
- __ push(literals_);
- // Literal index (1).
- __ push(Immediate(Smi::FromInt(node_->literal_index())));
- // RegExp pattern (2).
- __ push(Immediate(node_->pattern()));
- // RegExp flags (3).
- __ push(Immediate(node_->flags()));
- __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
- if (!boilerplate_.is(eax)) __ mov(boilerplate_, eax);
-}
-
-
-class DeferredAllocateInNewSpace: public DeferredCode {
- public:
- DeferredAllocateInNewSpace(int size,
- Register target,
- int registers_to_save = 0)
- : size_(size), target_(target), registers_to_save_(registers_to_save) {
- ASSERT(size >= kPointerSize && size <= HEAP->MaxObjectSizeInNewSpace());
- ASSERT_EQ(0, registers_to_save & target.bit());
- set_comment("[ DeferredAllocateInNewSpace");
- }
- void Generate();
-
- private:
- int size_;
- Register target_;
- int registers_to_save_;
-};
-
-
-void DeferredAllocateInNewSpace::Generate() {
- for (int i = 0; i < kNumRegs; i++) {
- if (registers_to_save_ & (1 << i)) {
- Register save_register = { i };
- __ push(save_register);
- }
- }
- __ push(Immediate(Smi::FromInt(size_)));
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
- if (!target_.is(eax)) {
- __ mov(target_, eax);
- }
- for (int i = kNumRegs - 1; i >= 0; i--) {
- if (registers_to_save_ & (1 << i)) {
- Register save_register = { i };
- __ pop(save_register);
- }
- }
-}
-
-
-void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
- ASSERT(!in_safe_int32_mode());
- Comment cmnt(masm_, "[ RegExp Literal");
-
- // Retrieve the literals array and check the allocated entry. Begin
- // with a writable copy of the function of this activation in a
- // register.
- frame_->PushFunction();
- Result literals = frame_->Pop();
- literals.ToRegister();
- frame_->Spill(literals.reg());
-
- // Load the literals array of the function.
- __ mov(literals.reg(),
- FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
-
- // Load the literal at the ast saved index.
- Result boilerplate = allocator_->Allocate();
- ASSERT(boilerplate.is_valid());
- int literal_offset =
- FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
- __ mov(boilerplate.reg(), FieldOperand(literals.reg(), literal_offset));
-
- // Check whether we need to materialize the RegExp object. If so,
- // jump to the deferred code passing the literals array.
- DeferredRegExpLiteral* deferred =
- new DeferredRegExpLiteral(boilerplate.reg(), literals.reg(), node);
- __ cmp(boilerplate.reg(), FACTORY->undefined_value());
- deferred->Branch(equal);
- deferred->BindExit();
-
- // Register of boilerplate contains RegExp object.
-
- Result tmp = allocator()->Allocate();
- ASSERT(tmp.is_valid());
-
- int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
-
- DeferredAllocateInNewSpace* allocate_fallback =
- new DeferredAllocateInNewSpace(size, literals.reg());
- frame_->Push(&boilerplate);
- frame_->SpillTop();
- __ AllocateInNewSpace(size,
- literals.reg(),
- tmp.reg(),
- no_reg,
- allocate_fallback->entry_label(),
- TAG_OBJECT);
- allocate_fallback->BindExit();
- boilerplate = frame_->Pop();
- // Copy from boilerplate to clone and return clone.
-
- for (int i = 0; i < size; i += kPointerSize) {
- __ mov(tmp.reg(), FieldOperand(boilerplate.reg(), i));
- __ mov(FieldOperand(literals.reg(), i), tmp.reg());
- }
- frame_->Push(&literals);
-}
-
-
-void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
- ASSERT(!in_safe_int32_mode());
- Comment cmnt(masm_, "[ ObjectLiteral");
-
- // Load a writable copy of the function of this activation in a
- // register.
- frame_->PushFunction();
- Result literals = frame_->Pop();
- literals.ToRegister();
- frame_->Spill(literals.reg());
-
- // Load the literals array of the function.
- __ mov(literals.reg(),
- FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
- // Literal array.
- frame_->Push(&literals);
- // Literal index.
- frame_->Push(Smi::FromInt(node->literal_index()));
- // Constant properties.
- frame_->Push(node->constant_properties());
- // Should the object literal have fast elements?
- frame_->Push(Smi::FromInt(node->fast_elements() ? 1 : 0));
- Result clone;
- if (node->depth() > 1) {
- clone = frame_->CallRuntime(Runtime::kCreateObjectLiteral, 4);
- } else {
- clone = frame_->CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
- }
- frame_->Push(&clone);
-
- // Mark all computed expressions that are bound to a key that
- // is shadowed by a later occurrence of the same key. For the
- // marked expressions, no store code is emitted.
- node->CalculateEmitStore();
-
- for (int i = 0; i < node->properties()->length(); i++) {
- ObjectLiteral::Property* property = node->properties()->at(i);
- switch (property->kind()) {
- case ObjectLiteral::Property::CONSTANT:
- break;
- case ObjectLiteral::Property::MATERIALIZED_LITERAL:
- if (CompileTimeValue::IsCompileTimeValue(property->value())) break;
- // else fall through.
- case ObjectLiteral::Property::COMPUTED: {
- Handle<Object> key(property->key()->handle());
- if (key->IsSymbol()) {
- // Duplicate the object as the IC receiver.
- frame_->Dup();
- Load(property->value());
- if (property->emit_store()) {
- Result ignored =
- frame_->CallStoreIC(Handle<String>::cast(key), false,
- strict_mode_flag());
- // A test eax instruction following the store IC call would
- // indicate the presence of an inlined version of the
- // store. Add a nop to indicate that there is no such
- // inlined version.
- __ nop();
- } else {
- frame_->Drop(2);
- }
- break;
- }
- // Fall through
- }
- case ObjectLiteral::Property::PROTOTYPE: {
- // Duplicate the object as an argument to the runtime call.
- frame_->Dup();
- Load(property->key());
- Load(property->value());
- if (property->emit_store()) {
- frame_->Push(Smi::FromInt(NONE)); // PropertyAttributes
- // Ignore the result.
- Result ignored = frame_->CallRuntime(Runtime::kSetProperty, 4);
- } else {
- frame_->Drop(3);
- }
- break;
- }
- case ObjectLiteral::Property::SETTER: {
- // Duplicate the object as an argument to the runtime call.
- frame_->Dup();
- Load(property->key());
- frame_->Push(Smi::FromInt(1));
- Load(property->value());
- Result ignored = frame_->CallRuntime(Runtime::kDefineAccessor, 4);
- // Ignore the result.
- break;
- }
- case ObjectLiteral::Property::GETTER: {
- // Duplicate the object as an argument to the runtime call.
- frame_->Dup();
- Load(property->key());
- frame_->Push(Smi::FromInt(0));
- Load(property->value());
- Result ignored = frame_->CallRuntime(Runtime::kDefineAccessor, 4);
- // Ignore the result.
- break;
- }
- default: UNREACHABLE();
- }
- }
-}
-
-
-void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
- ASSERT(!in_safe_int32_mode());
- Comment cmnt(masm_, "[ ArrayLiteral");
-
- // Load a writable copy of the function of this activation in a
- // register.
- frame_->PushFunction();
- Result literals = frame_->Pop();
- literals.ToRegister();
- frame_->Spill(literals.reg());
-
- // Load the literals array of the function.
- __ mov(literals.reg(),
- FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
-
- frame_->Push(&literals);
- frame_->Push(Smi::FromInt(node->literal_index()));
- frame_->Push(node->constant_elements());
- int length = node->values()->length();
- Result clone;
- if (node->constant_elements()->map() == HEAP->fixed_cow_array_map()) {
- FastCloneShallowArrayStub stub(
- FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length);
- clone = frame_->CallStub(&stub, 3);
- Counters* counters = masm()->isolate()->counters();
- __ IncrementCounter(counters->cow_arrays_created_stub(), 1);
- } else if (node->depth() > 1) {
- clone = frame_->CallRuntime(Runtime::kCreateArrayLiteral, 3);
- } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
- clone = frame_->CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
- } else {
- FastCloneShallowArrayStub stub(
- FastCloneShallowArrayStub::CLONE_ELEMENTS, length);
- clone = frame_->CallStub(&stub, 3);
- }
- frame_->Push(&clone);
-
- // Generate code to set the elements in the array that are not
- // literals.
- for (int i = 0; i < length; i++) {
- Expression* value = node->values()->at(i);
-
- if (!CompileTimeValue::ArrayLiteralElementNeedsInitialization(value)) {
- continue;
- }
-
- // The property must be set by generated code.
- Load(value);
-
- // Get the property value off the stack.
- Result prop_value = frame_->Pop();
- prop_value.ToRegister();
-
- // Fetch the array literal while leaving a copy on the stack and
- // use it to get the elements array.
- frame_->Dup();
- Result elements = frame_->Pop();
- elements.ToRegister();
- frame_->Spill(elements.reg());
- // Get the elements array.
- __ mov(elements.reg(),
- FieldOperand(elements.reg(), JSObject::kElementsOffset));
-
- // Write to the indexed properties array.
- int offset = i * kPointerSize + FixedArray::kHeaderSize;
- __ mov(FieldOperand(elements.reg(), offset), prop_value.reg());
-
- // Update the write barrier for the array address.
- frame_->Spill(prop_value.reg()); // Overwritten by the write barrier.
- Result scratch = allocator_->Allocate();
- ASSERT(scratch.is_valid());
- __ RecordWrite(elements.reg(), offset, prop_value.reg(), scratch.reg());
- }
-}
-
-
-void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) {
- ASSERT(!in_safe_int32_mode());
- ASSERT(!in_spilled_code());
- // Call runtime routine to allocate the catch extension object and
- // assign the exception value to the catch variable.
- Comment cmnt(masm_, "[ CatchExtensionObject");
- Load(node->key());
- Load(node->value());
- Result result =
- frame_->CallRuntime(Runtime::kCreateCatchExtensionObject, 2);
- frame_->Push(&result);
-}
-
-
-void CodeGenerator::EmitSlotAssignment(Assignment* node) {
-#ifdef DEBUG
- int original_height = frame()->height();
-#endif
- Comment cmnt(masm(), "[ Variable Assignment");
- Variable* var = node->target()->AsVariableProxy()->AsVariable();
- ASSERT(var != NULL);
- Slot* slot = var->AsSlot();
- ASSERT(slot != NULL);
-
- // Evaluate the right-hand side.
- if (node->is_compound()) {
- // For a compound assignment the right-hand side is a binary operation
- // between the current property value and the actual right-hand side.
- LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
- Load(node->value());
-
- // Perform the binary operation.
- bool overwrite_value = node->value()->ResultOverwriteAllowed();
- // Construct the implicit binary operation.
- BinaryOperation expr(node);
- GenericBinaryOperation(&expr,
- overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
- } else {
- // For non-compound assignment just load the right-hand side.
- Load(node->value());
- }
-
- // Perform the assignment.
- if (var->mode() != Variable::CONST || node->op() == Token::INIT_CONST) {
- CodeForSourcePosition(node->position());
- StoreToSlot(slot,
- node->op() == Token::INIT_CONST ? CONST_INIT : NOT_CONST_INIT);
- }
- ASSERT(frame()->height() == original_height + 1);
-}
-
-
-void CodeGenerator::EmitNamedPropertyAssignment(Assignment* node) {
-#ifdef DEBUG
- int original_height = frame()->height();
-#endif
- Comment cmnt(masm(), "[ Named Property Assignment");
- Variable* var = node->target()->AsVariableProxy()->AsVariable();
- Property* prop = node->target()->AsProperty();
- ASSERT(var == NULL || (prop == NULL && var->is_global()));
-
- // Initialize name and evaluate the receiver sub-expression if necessary. If
- // the receiver is trivial it is not placed on the stack at this point, but
- // loaded whenever actually needed.
- Handle<String> name;
- bool is_trivial_receiver = false;
- if (var != NULL) {
- name = var->name();
- } else {
- Literal* lit = prop->key()->AsLiteral();
- ASSERT_NOT_NULL(lit);
- name = Handle<String>::cast(lit->handle());
- // Do not materialize the receiver on the frame if it is trivial.
- is_trivial_receiver = prop->obj()->IsTrivial();
- if (!is_trivial_receiver) Load(prop->obj());
- }
-
- // Change to slow case in the beginning of an initialization block to
- // avoid the quadratic behavior of repeatedly adding fast properties.
- if (node->starts_initialization_block()) {
- // Initialization block consists of assignments of the form expr.x = ..., so
- // this will never be an assignment to a variable, so there must be a
- // receiver object.
- ASSERT_EQ(NULL, var);
- if (is_trivial_receiver) {
- frame()->Push(prop->obj());
- } else {
- frame()->Dup();
- }
- Result ignored = frame()->CallRuntime(Runtime::kToSlowProperties, 1);
- }
-
- // Change to fast case at the end of an initialization block. To prepare for
- // that add an extra copy of the receiver to the frame, so that it can be
- // converted back to fast case after the assignment.
- if (node->ends_initialization_block() && !is_trivial_receiver) {
- frame()->Dup();
- }
-
- // Stack layout:
- // [tos] : receiver (only materialized if non-trivial)
- // [tos+1] : receiver if at the end of an initialization block
-
- // Evaluate the right-hand side.
- if (node->is_compound()) {
- // For a compound assignment the right-hand side is a binary operation
- // between the current property value and the actual right-hand side.
- if (is_trivial_receiver) {
- frame()->Push(prop->obj());
- } else if (var != NULL) {
- // The LoadIC stub expects the object in eax.
- // Freeing eax causes the code generator to load the global into it.
- frame_->Spill(eax);
- LoadGlobal();
- } else {
- frame()->Dup();
- }
- Result value = EmitNamedLoad(name, var != NULL);
- frame()->Push(&value);
- Load(node->value());
-
- bool overwrite_value = node->value()->ResultOverwriteAllowed();
- // Construct the implicit binary operation.
- BinaryOperation expr(node);
- GenericBinaryOperation(&expr,
- overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
- } else {
- // For non-compound assignment just load the right-hand side.
- Load(node->value());
- }
-
- // Stack layout:
- // [tos] : value
- // [tos+1] : receiver (only materialized if non-trivial)
- // [tos+2] : receiver if at the end of an initialization block
-
- // Perform the assignment. It is safe to ignore constants here.
- ASSERT(var == NULL || var->mode() != Variable::CONST);
- ASSERT_NE(Token::INIT_CONST, node->op());
- if (is_trivial_receiver) {
- Result value = frame()->Pop();
- frame()->Push(prop->obj());
- frame()->Push(&value);
- }
- CodeForSourcePosition(node->position());
- bool is_contextual = (var != NULL);
- Result answer = EmitNamedStore(name, is_contextual);
- frame()->Push(&answer);
-
- // Stack layout:
- // [tos] : result
- // [tos+1] : receiver if at the end of an initialization block
-
- if (node->ends_initialization_block()) {
- ASSERT_EQ(NULL, var);
- // The argument to the runtime call is the receiver.
- if (is_trivial_receiver) {
- frame()->Push(prop->obj());
- } else {
- // A copy of the receiver is below the value of the assignment. Swap
- // the receiver and the value of the assignment expression.
- Result result = frame()->Pop();
- Result receiver = frame()->Pop();
- frame()->Push(&result);
- frame()->Push(&receiver);
- }
- Result ignored = frame_->CallRuntime(Runtime::kToFastProperties, 1);
- }
-
- // Stack layout:
- // [tos] : result
-
- ASSERT_EQ(frame()->height(), original_height + 1);
-}
-
-
-void CodeGenerator::EmitKeyedPropertyAssignment(Assignment* node) {
-#ifdef DEBUG
- int original_height = frame()->height();
-#endif
- Comment cmnt(masm_, "[ Keyed Property Assignment");
- Property* prop = node->target()->AsProperty();
- ASSERT_NOT_NULL(prop);
-
- // Evaluate the receiver subexpression.
- Load(prop->obj());
-
- // Change to slow case in the beginning of an initialization block to
- // avoid the quadratic behavior of repeatedly adding fast properties.
- if (node->starts_initialization_block()) {
- frame_->Dup();
- Result ignored = frame_->CallRuntime(Runtime::kToSlowProperties, 1);
- }
-
- // Change to fast case at the end of an initialization block. To prepare for
- // that add an extra copy of the receiver to the frame, so that it can be
- // converted back to fast case after the assignment.
- if (node->ends_initialization_block()) {
- frame_->Dup();
- }
-
- // Evaluate the key subexpression.
- Load(prop->key());
-
- // Stack layout:
- // [tos] : key
- // [tos+1] : receiver
- // [tos+2] : receiver if at the end of an initialization block
-
- // Evaluate the right-hand side.
- if (node->is_compound()) {
- // For a compound assignment the right-hand side is a binary operation
- // between the current property value and the actual right-hand side.
- // Duplicate receiver and key for loading the current property value.
- frame()->PushElementAt(1);
- frame()->PushElementAt(1);
- Result value = EmitKeyedLoad();
- frame()->Push(&value);
- Load(node->value());
-
- // Perform the binary operation.
- bool overwrite_value = node->value()->ResultOverwriteAllowed();
- BinaryOperation expr(node);
- GenericBinaryOperation(&expr,
- overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
- } else {
- // For non-compound assignment just load the right-hand side.
- Load(node->value());
- }
-
- // Stack layout:
- // [tos] : value
- // [tos+1] : key
- // [tos+2] : receiver
- // [tos+3] : receiver if at the end of an initialization block
-
- // Perform the assignment. It is safe to ignore constants here.
- ASSERT(node->op() != Token::INIT_CONST);
- CodeForSourcePosition(node->position());
- Result answer = EmitKeyedStore(prop->key()->type());
- frame()->Push(&answer);
-
- // Stack layout:
- // [tos] : result
- // [tos+1] : receiver if at the end of an initialization block
-
- // Change to fast case at the end of an initialization block.
- if (node->ends_initialization_block()) {
- // The argument to the runtime call is the extra copy of the receiver,
- // which is below the value of the assignment. Swap the receiver and
- // the value of the assignment expression.
- Result result = frame()->Pop();
- Result receiver = frame()->Pop();
- frame()->Push(&result);
- frame()->Push(&receiver);
- Result ignored = frame_->CallRuntime(Runtime::kToFastProperties, 1);
- }
-
- // Stack layout:
- // [tos] : result
-
- ASSERT(frame()->height() == original_height + 1);
-}
-
-
-void CodeGenerator::VisitAssignment(Assignment* node) {
- ASSERT(!in_safe_int32_mode());
-#ifdef DEBUG
- int original_height = frame()->height();
-#endif
- Variable* var = node->target()->AsVariableProxy()->AsVariable();
- Property* prop = node->target()->AsProperty();
-
- if (var != NULL && !var->is_global()) {
- EmitSlotAssignment(node);
-
- } else if ((prop != NULL && prop->key()->IsPropertyName()) ||
- (var != NULL && var->is_global())) {
- // Properties whose keys are property names and global variables are
- // treated as named property references. We do not need to consider
- // global 'this' because it is not a valid left-hand side.
- EmitNamedPropertyAssignment(node);
-
- } else if (prop != NULL) {
- // Other properties (including rewritten parameters for a function that
- // uses arguments) are keyed property assignments.
- EmitKeyedPropertyAssignment(node);
-
- } else {
- // Invalid left-hand side.
- Load(node->target());
- Result result = frame()->CallRuntime(Runtime::kThrowReferenceError, 1);
- // The runtime call doesn't actually return but the code generator will
- // still generate code and expects a certain frame height.
- frame()->Push(&result);
- }
-
- ASSERT(frame()->height() == original_height + 1);
-}
-
-
-void CodeGenerator::VisitThrow(Throw* node) {
- ASSERT(!in_safe_int32_mode());
- Comment cmnt(masm_, "[ Throw");
- Load(node->exception());
- Result result = frame_->CallRuntime(Runtime::kThrow, 1);
- frame_->Push(&result);
-}
-
-
-void CodeGenerator::VisitProperty(Property* node) {
- ASSERT(!in_safe_int32_mode());
- Comment cmnt(masm_, "[ Property");
- Reference property(this, node);
- property.GetValue();
-}
-
-
-void CodeGenerator::VisitCall(Call* node) {
- ASSERT(!in_safe_int32_mode());
- Comment cmnt(masm_, "[ Call");
-
- Expression* function = node->expression();
- ZoneList<Expression*>* args = node->arguments();
-
- // Check if the function is a variable or a property.
- Variable* var = function->AsVariableProxy()->AsVariable();
- Property* property = function->AsProperty();
-
- // ------------------------------------------------------------------------
- // Fast-case: Use inline caching.
- // ---
- // According to ECMA-262, section 11.2.3, page 44, the function to call
- // must be resolved after the arguments have been evaluated. The IC code
- // automatically handles this by loading the arguments before the function
- // is resolved in cache misses (this also holds for megamorphic calls).
- // ------------------------------------------------------------------------
-
- if (var != NULL && var->is_possibly_eval()) {
- // ----------------------------------
- // JavaScript example: 'eval(arg)' // eval is not known to be shadowed
- // ----------------------------------
-
- // In a call to eval, we first call %ResolvePossiblyDirectEval to
- // resolve the function we need to call and the receiver of the
- // call. Then we call the resolved function using the given
- // arguments.
-
- // Prepare the stack for the call to the resolved function.
- Load(function);
-
- // Allocate a frame slot for the receiver.
- frame_->Push(FACTORY->undefined_value());
-
- // Load the arguments.
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Load(args->at(i));
- frame_->SpillTop();
- }
-
- // Result to hold the result of the function resolution and the
- // final result of the eval call.
- Result result;
-
- // If we know that eval can only be shadowed by eval-introduced
- // variables we attempt to load the global eval function directly
- // in generated code. If we succeed, there is no need to perform a
- // context lookup in the runtime system.
- JumpTarget done;
- if (var->AsSlot() != NULL && var->mode() == Variable::DYNAMIC_GLOBAL) {
- ASSERT(var->AsSlot()->type() == Slot::LOOKUP);
- JumpTarget slow;
- // Prepare the stack for the call to
- // ResolvePossiblyDirectEvalNoLookup by pushing the loaded
- // function, the first argument to the eval call and the
- // receiver.
- Result fun = LoadFromGlobalSlotCheckExtensions(var->AsSlot(),
- NOT_INSIDE_TYPEOF,
- &slow);
- frame_->Push(&fun);
- if (arg_count > 0) {
- frame_->PushElementAt(arg_count);
- } else {
- frame_->Push(FACTORY->undefined_value());
- }
- frame_->PushParameterAt(-1);
-
- // Push the strict mode flag.
- frame_->Push(Smi::FromInt(strict_mode_flag()));
-
- // Resolve the call.
- result =
- frame_->CallRuntime(Runtime::kResolvePossiblyDirectEvalNoLookup, 4);
-
- done.Jump(&result);
- slow.Bind();
- }
-
- // Prepare the stack for the call to ResolvePossiblyDirectEval by
- // pushing the loaded function, the first argument to the eval
- // call and the receiver.
- frame_->PushElementAt(arg_count + 1);
- if (arg_count > 0) {
- frame_->PushElementAt(arg_count);
- } else {
- frame_->Push(FACTORY->undefined_value());
- }
- frame_->PushParameterAt(-1);
-
- // Push the strict mode flag.
- frame_->Push(Smi::FromInt(strict_mode_flag()));
-
- // Resolve the call.
- result = frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 4);
-
- // If we generated fast-case code bind the jump-target where fast
- // and slow case merge.
- if (done.is_linked()) done.Bind(&result);
-
- // The runtime call returns a pair of values in eax (function) and
- // edx (receiver). Touch up the stack with the right values.
- Result receiver = allocator_->Allocate(edx);
- frame_->SetElementAt(arg_count + 1, &result);
- frame_->SetElementAt(arg_count, &receiver);
- receiver.Unuse();
-
- // Call the function.
- CodeForSourcePosition(node->position());
- InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
- CallFunctionStub call_function(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
- result = frame_->CallStub(&call_function, arg_count + 1);
-
- // Restore the context and overwrite the function on the stack with
- // the result.
- frame_->RestoreContextRegister();
- frame_->SetElementAt(0, &result);
-
- } else if (var != NULL && !var->is_this() && var->is_global()) {
- // ----------------------------------
- // JavaScript example: 'foo(1, 2, 3)' // foo is global
- // ----------------------------------
-
- // Pass the global object as the receiver and let the IC stub
- // patch the stack to use the global proxy as 'this' in the
- // invoked function.
- LoadGlobal();
-
- // Load the arguments.
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Load(args->at(i));
- frame_->SpillTop();
- }
-
- // Push the name of the function onto the frame.
- frame_->Push(var->name());
-
- // Call the IC initialization code.
- CodeForSourcePosition(node->position());
- Result result = frame_->CallCallIC(RelocInfo::CODE_TARGET_CONTEXT,
- arg_count,
- loop_nesting());
- frame_->RestoreContextRegister();
- frame_->Push(&result);
-
- } else if (var != NULL && var->AsSlot() != NULL &&
- var->AsSlot()->type() == Slot::LOOKUP) {
- // ----------------------------------
- // JavaScript examples:
- //
- // with (obj) foo(1, 2, 3) // foo may be in obj.
- //
- // function f() {};
- // function g() {
- // eval(...);
- // f(); // f could be in extension object.
- // }
- // ----------------------------------
-
- JumpTarget slow, done;
- Result function;
-
- // Generate fast case for loading functions from slots that
- // correspond to local/global variables or arguments unless they
- // are shadowed by eval-introduced bindings.
- EmitDynamicLoadFromSlotFastCase(var->AsSlot(),
- NOT_INSIDE_TYPEOF,
- &function,
- &slow,
- &done);
-
- slow.Bind();
- // Enter the runtime system to load the function from the context.
- // Sync the frame so we can push the arguments directly into
- // place.
- frame_->SyncRange(0, frame_->element_count() - 1);
- frame_->EmitPush(esi);
- frame_->EmitPush(Immediate(var->name()));
- frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
- // The runtime call returns a pair of values in eax and edx. The
- // looked-up function is in eax and the receiver is in edx. These
- // register references are not ref counted here. We spill them
- // eagerly since they are arguments to an inevitable call (and are
- // not sharable by the arguments).
- ASSERT(!allocator()->is_used(eax));
- frame_->EmitPush(eax);
-
- // Load the receiver.
- ASSERT(!allocator()->is_used(edx));
- frame_->EmitPush(edx);
-
- // If fast case code has been generated, emit code to push the
- // function and receiver and have the slow path jump around this
- // code.
- if (done.is_linked()) {
- JumpTarget call;
- call.Jump();
- done.Bind(&function);
- frame_->Push(&function);
- LoadGlobalReceiver();
- call.Bind();
- }
-
- // Call the function.
- CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
-
- } else if (property != NULL) {
- // Check if the key is a literal string.
- Literal* literal = property->key()->AsLiteral();
-
- if (literal != NULL && literal->handle()->IsSymbol()) {
- // ------------------------------------------------------------------
- // JavaScript example: 'object.foo(1, 2, 3)' or 'map["key"](1, 2, 3)'
- // ------------------------------------------------------------------
-
- Handle<String> name = Handle<String>::cast(literal->handle());
-
- if (ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION &&
- name->IsEqualTo(CStrVector("apply")) &&
- args->length() == 2 &&
- args->at(1)->AsVariableProxy() != NULL &&
- args->at(1)->AsVariableProxy()->IsArguments()) {
- // Use the optimized Function.prototype.apply that avoids
- // allocating lazily allocated arguments objects.
- CallApplyLazy(property->obj(),
- args->at(0),
- args->at(1)->AsVariableProxy(),
- node->position());
-
- } else {
- // Push the receiver onto the frame.
- Load(property->obj());
-
- // Load the arguments.
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Load(args->at(i));
- frame_->SpillTop();
- }
-
- // Push the name of the function onto the frame.
- frame_->Push(name);
-
- // Call the IC initialization code.
- CodeForSourcePosition(node->position());
- Result result =
- frame_->CallCallIC(RelocInfo::CODE_TARGET, arg_count,
- loop_nesting());
- frame_->RestoreContextRegister();
- frame_->Push(&result);
- }
-
- } else {
- // -------------------------------------------
- // JavaScript example: 'array[index](1, 2, 3)'
- // -------------------------------------------
-
- // Load the function to call from the property through a reference.
-
- // Pass receiver to called function.
- if (property->is_synthetic()) {
- Reference ref(this, property);
- ref.GetValue();
- // Use global object as receiver.
- LoadGlobalReceiver();
- // Call the function.
- CallWithArguments(args, RECEIVER_MIGHT_BE_VALUE, node->position());
- } else {
- // Push the receiver onto the frame.
- Load(property->obj());
-
- // Load the name of the function.
- Load(property->key());
-
- // Swap the name of the function and the receiver on the stack to follow
- // the calling convention for call ICs.
- Result key = frame_->Pop();
- Result receiver = frame_->Pop();
- frame_->Push(&key);
- frame_->Push(&receiver);
- key.Unuse();
- receiver.Unuse();
-
- // Load the arguments.
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Load(args->at(i));
- frame_->SpillTop();
- }
-
- // Place the key on top of stack and call the IC initialization code.
- frame_->PushElementAt(arg_count + 1);
- CodeForSourcePosition(node->position());
- Result result =
- frame_->CallKeyedCallIC(RelocInfo::CODE_TARGET,
- arg_count,
- loop_nesting());
- frame_->Drop(); // Drop the key still on the stack.
- frame_->RestoreContextRegister();
- frame_->Push(&result);
- }
- }
-
- } else {
- // ----------------------------------
- // JavaScript example: 'foo(1, 2, 3)' // foo is not global
- // ----------------------------------
-
- // Load the function.
- Load(function);
-
- // Pass the global proxy as the receiver.
- LoadGlobalReceiver();
-
- // Call the function.
- CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
- }
-}
-
-
-void CodeGenerator::VisitCallNew(CallNew* node) {
- ASSERT(!in_safe_int32_mode());
- Comment cmnt(masm_, "[ CallNew");
-
- // According to ECMA-262, section 11.2.2, page 44, the function
- // expression in new calls must be evaluated before the
- // arguments. This is different from ordinary calls, where the
- // actual function to call is resolved after the arguments have been
- // evaluated.
-
- // Push constructor on the stack. If it's not a function it's used as
- // receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
- // ignored.
- Load(node->expression());
-
- // Push the arguments ("left-to-right") on the stack.
- ZoneList<Expression*>* args = node->arguments();
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Load(args->at(i));
- }
-
- // Call the construct call builtin that handles allocation and
- // constructor invocation.
- CodeForSourcePosition(node->position());
- Result result = frame_->CallConstructor(arg_count);
- frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Result value = frame_->Pop();
- value.ToRegister();
- ASSERT(value.is_valid());
- __ test(value.reg(), Immediate(kSmiTagMask));
- value.Unuse();
- destination()->Split(zero);
-}
-
-
-void CodeGenerator::GenerateLog(ZoneList<Expression*>* args) {
- // Conditionally generate a log call.
- // Args:
- // 0 (literal string): The type of logging (corresponds to the flags).
- // This is used to determine whether or not to generate the log call.
- // 1 (string): Format string. Access the string at argument index 2
- // with '%2s' (see Logger::LogRuntime for all the formats).
- // 2 (array): Arguments to the format string.
- ASSERT_EQ(args->length(), 3);
-#ifdef ENABLE_LOGGING_AND_PROFILING
- if (ShouldGenerateLog(args->at(0))) {
- Load(args->at(1));
- Load(args->at(2));
- frame_->CallRuntime(Runtime::kLog, 2);
- }
-#endif
- // Finally, we're expected to leave a value on the top of the stack.
- frame_->Push(FACTORY->undefined_value());
-}
-
-
-void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Result value = frame_->Pop();
- value.ToRegister();
- ASSERT(value.is_valid());
- __ test(value.reg(), Immediate(kSmiTagMask | kSmiSignMask));
- value.Unuse();
- destination()->Split(zero);
-}
-
-
-class DeferredStringCharCodeAt : public DeferredCode {
- public:
- DeferredStringCharCodeAt(Register object,
- Register index,
- Register scratch,
- Register result)
- : result_(result),
- char_code_at_generator_(object,
- index,
- scratch,
- result,
- &need_conversion_,
- &need_conversion_,
- &index_out_of_range_,
- STRING_INDEX_IS_NUMBER) {}
-
- StringCharCodeAtGenerator* fast_case_generator() {
- return &char_code_at_generator_;
- }
-
- virtual void Generate() {
- VirtualFrameRuntimeCallHelper call_helper(frame_state());
- char_code_at_generator_.GenerateSlow(masm(), call_helper);
-
- __ bind(&need_conversion_);
- // Move the undefined value into the result register, which will
- // trigger conversion.
- __ Set(result_, Immediate(FACTORY->undefined_value()));
- __ jmp(exit_label());
-
- __ bind(&index_out_of_range_);
- // When the index is out of range, the spec requires us to return
- // NaN.
- __ Set(result_, Immediate(FACTORY->nan_value()));
- __ jmp(exit_label());
- }
-
- private:
- Register result_;
-
- Label need_conversion_;
- Label index_out_of_range_;
-
- StringCharCodeAtGenerator char_code_at_generator_;
-};
-
-
-// This generates code that performs a String.prototype.charCodeAt() call
-// or returns a smi in order to trigger conversion.
-void CodeGenerator::GenerateStringCharCodeAt(ZoneList<Expression*>* args) {
- Comment(masm_, "[ GenerateStringCharCodeAt");
- ASSERT(args->length() == 2);
-
- Load(args->at(0));
- Load(args->at(1));
- Result index = frame_->Pop();
- Result object = frame_->Pop();
- object.ToRegister();
- index.ToRegister();
- // We might mutate the object register.
- frame_->Spill(object.reg());
-
- // We need two extra registers.
- Result result = allocator()->Allocate();
- ASSERT(result.is_valid());
- Result scratch = allocator()->Allocate();
- ASSERT(scratch.is_valid());
-
- DeferredStringCharCodeAt* deferred =
- new DeferredStringCharCodeAt(object.reg(),
- index.reg(),
- scratch.reg(),
- result.reg());
- deferred->fast_case_generator()->GenerateFast(masm_);
- deferred->BindExit();
- frame_->Push(&result);
-}
-
-
-class DeferredStringCharFromCode : public DeferredCode {
- public:
- DeferredStringCharFromCode(Register code,
- Register result)
- : char_from_code_generator_(code, result) {}
-
- StringCharFromCodeGenerator* fast_case_generator() {
- return &char_from_code_generator_;
- }
-
- virtual void Generate() {
- VirtualFrameRuntimeCallHelper call_helper(frame_state());
- char_from_code_generator_.GenerateSlow(masm(), call_helper);
- }
-
- private:
- StringCharFromCodeGenerator char_from_code_generator_;
-};
-
-
-// Generates code for creating a one-char string from a char code.
-void CodeGenerator::GenerateStringCharFromCode(ZoneList<Expression*>* args) {
- Comment(masm_, "[ GenerateStringCharFromCode");
- ASSERT(args->length() == 1);
-
- Load(args->at(0));
-
- Result code = frame_->Pop();
- code.ToRegister();
- ASSERT(code.is_valid());
-
- Result result = allocator()->Allocate();
- ASSERT(result.is_valid());
-
- DeferredStringCharFromCode* deferred = new DeferredStringCharFromCode(
- code.reg(), result.reg());
- deferred->fast_case_generator()->GenerateFast(masm_);
- deferred->BindExit();
- frame_->Push(&result);
-}
-
-
-class DeferredStringCharAt : public DeferredCode {
- public:
- DeferredStringCharAt(Register object,
- Register index,
- Register scratch1,
- Register scratch2,
- Register result)
- : result_(result),
- char_at_generator_(object,
- index,
- scratch1,
- scratch2,
- result,
- &need_conversion_,
- &need_conversion_,
- &index_out_of_range_,
- STRING_INDEX_IS_NUMBER) {}
-
- StringCharAtGenerator* fast_case_generator() {
- return &char_at_generator_;
- }
-
- virtual void Generate() {
- VirtualFrameRuntimeCallHelper call_helper(frame_state());
- char_at_generator_.GenerateSlow(masm(), call_helper);
-
- __ bind(&need_conversion_);
- // Move smi zero into the result register, which will trigger
- // conversion.
- __ Set(result_, Immediate(Smi::FromInt(0)));
- __ jmp(exit_label());
-
- __ bind(&index_out_of_range_);
- // When the index is out of range, the spec requires us to return
- // the empty string.
- __ Set(result_, Immediate(FACTORY->empty_string()));
- __ jmp(exit_label());
- }
-
- private:
- Register result_;
-
- Label need_conversion_;
- Label index_out_of_range_;
-
- StringCharAtGenerator char_at_generator_;
-};
-
-
-// This generates code that performs a String.prototype.charAt() call
-// or returns a smi in order to trigger conversion.
-void CodeGenerator::GenerateStringCharAt(ZoneList<Expression*>* args) {
- Comment(masm_, "[ GenerateStringCharAt");
- ASSERT(args->length() == 2);
-
- Load(args->at(0));
- Load(args->at(1));
- Result index = frame_->Pop();
- Result object = frame_->Pop();
- object.ToRegister();
- index.ToRegister();
- // We might mutate the object register.
- frame_->Spill(object.reg());
-
- // We need three extra registers.
- Result result = allocator()->Allocate();
- ASSERT(result.is_valid());
- Result scratch1 = allocator()->Allocate();
- ASSERT(scratch1.is_valid());
- Result scratch2 = allocator()->Allocate();
- ASSERT(scratch2.is_valid());
-
- DeferredStringCharAt* deferred =
- new DeferredStringCharAt(object.reg(),
- index.reg(),
- scratch1.reg(),
- scratch2.reg(),
- result.reg());
- deferred->fast_case_generator()->GenerateFast(masm_);
- deferred->BindExit();
- frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Result value = frame_->Pop();
- value.ToRegister();
- ASSERT(value.is_valid());
- __ test(value.reg(), Immediate(kSmiTagMask));
- destination()->false_target()->Branch(equal);
- // It is a heap object - get map.
- Result temp = allocator()->Allocate();
- ASSERT(temp.is_valid());
- // Check if the object is a JS array or not.
- __ CmpObjectType(value.reg(), JS_ARRAY_TYPE, temp.reg());
- value.Unuse();
- temp.Unuse();
- destination()->Split(equal);
-}
-
-
-void CodeGenerator::GenerateFastAsciiArrayJoin(ZoneList<Expression*>* args) {
- Label bailout, done, one_char_separator, long_separator,
- non_trivial_array, not_size_one_array, loop, loop_condition,
- loop_1, loop_1_condition, loop_2, loop_2_entry, loop_3, loop_3_entry;
-
- ASSERT(args->length() == 2);
- // We will leave the separator on the stack until the end of the function.
- Load(args->at(1));
- // Load this to eax (= array)
- Load(args->at(0));
- Result array_result = frame_->Pop();
- array_result.ToRegister(eax);
- frame_->SpillAll();
-
- // All aliases of the same register have disjoint lifetimes.
- Register array = eax;
- Register elements = no_reg; // Will be eax.
-
- Register index = edx;
-
- Register string_length = ecx;
-
- Register string = esi;
-
- Register scratch = ebx;
-
- Register array_length = edi;
- Register result_pos = no_reg; // Will be edi.
-
- // Separator operand is already pushed.
- Operand separator_operand = Operand(esp, 2 * kPointerSize);
- Operand result_operand = Operand(esp, 1 * kPointerSize);
- Operand array_length_operand = Operand(esp, 0);
- __ sub(Operand(esp), Immediate(2 * kPointerSize));
- __ cld();
- // Check that the array is a JSArray
- __ test(array, Immediate(kSmiTagMask));
- __ j(zero, &bailout);
- __ CmpObjectType(array, JS_ARRAY_TYPE, scratch);
- __ j(not_equal, &bailout);
-
- // Check that the array has fast elements.
- __ test_b(FieldOperand(scratch, Map::kBitField2Offset),
- 1 << Map::kHasFastElements);
- __ j(zero, &bailout);
-
- // If the array has length zero, return the empty string.
- __ mov(array_length, FieldOperand(array, JSArray::kLengthOffset));
- __ sar(array_length, 1);
- __ j(not_zero, &non_trivial_array);
- __ mov(result_operand, FACTORY->empty_string());
- __ jmp(&done);
-
- // Save the array length.
- __ bind(&non_trivial_array);
- __ mov(array_length_operand, array_length);
-
- // Save the FixedArray containing array's elements.
- // End of array's live range.
- elements = array;
- __ mov(elements, FieldOperand(array, JSArray::kElementsOffset));
- array = no_reg;
-
-
- // Check that all array elements are sequential ASCII strings, and
- // accumulate the sum of their lengths, as a smi-encoded value.
- __ Set(index, Immediate(0));
- __ Set(string_length, Immediate(0));
- // Loop condition: while (index < length).
- // Live loop registers: index, array_length, string,
- // scratch, string_length, elements.
- __ jmp(&loop_condition);
- __ bind(&loop);
- __ cmp(index, Operand(array_length));
- __ j(greater_equal, &done);
-
- __ mov(string, FieldOperand(elements, index,
- times_pointer_size,
- FixedArray::kHeaderSize));
- __ test(string, Immediate(kSmiTagMask));
- __ j(zero, &bailout);
- __ mov(scratch, FieldOperand(string, HeapObject::kMapOffset));
- __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
- __ and_(scratch, Immediate(
- kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
- __ cmp(scratch, kStringTag | kAsciiStringTag | kSeqStringTag);
- __ j(not_equal, &bailout);
- __ add(string_length,
- FieldOperand(string, SeqAsciiString::kLengthOffset));
- __ j(overflow, &bailout);
- __ add(Operand(index), Immediate(1));
- __ bind(&loop_condition);
- __ cmp(index, Operand(array_length));
- __ j(less, &loop);
-
- // If array_length is 1, return elements[0], a string.
- __ cmp(array_length, 1);
- __ j(not_equal, &not_size_one_array);
- __ mov(scratch, FieldOperand(elements, FixedArray::kHeaderSize));
- __ mov(result_operand, scratch);
- __ jmp(&done);
-
- __ bind(&not_size_one_array);
-
- // End of array_length live range.
- result_pos = array_length;
- array_length = no_reg;
-
- // Live registers:
- // string_length: Sum of string lengths, as a smi.
- // elements: FixedArray of strings.
-
- // Check that the separator is a flat ASCII string.
- __ mov(string, separator_operand);
- __ test(string, Immediate(kSmiTagMask));
- __ j(zero, &bailout);
- __ mov(scratch, FieldOperand(string, HeapObject::kMapOffset));
- __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
- __ and_(scratch, Immediate(
- kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
- __ cmp(scratch, kStringTag | kAsciiStringTag | kSeqStringTag);
- __ j(not_equal, &bailout);
-
- // Add (separator length times array_length) - separator length
- // to string_length.
- __ mov(scratch, separator_operand);
- __ mov(scratch, FieldOperand(scratch, SeqAsciiString::kLengthOffset));
- __ sub(string_length, Operand(scratch)); // May be negative, temporarily.
- __ imul(scratch, array_length_operand);
- __ j(overflow, &bailout);
- __ add(string_length, Operand(scratch));
- __ j(overflow, &bailout);
-
- __ shr(string_length, 1);
- // Live registers and stack values:
- // string_length
- // elements
- __ AllocateAsciiString(result_pos, string_length, scratch,
- index, string, &bailout);
- __ mov(result_operand, result_pos);
- __ lea(result_pos, FieldOperand(result_pos, SeqAsciiString::kHeaderSize));
-
-
- __ mov(string, separator_operand);
- __ cmp(FieldOperand(string, SeqAsciiString::kLengthOffset),
- Immediate(Smi::FromInt(1)));
- __ j(equal, &one_char_separator);
- __ j(greater, &long_separator);
-
-
- // Empty separator case
- __ mov(index, Immediate(0));
- __ jmp(&loop_1_condition);
- // Loop condition: while (index < length).
- __ bind(&loop_1);
- // Each iteration of the loop concatenates one string to the result.
- // Live values in registers:
- // index: which element of the elements array we are adding to the result.
- // result_pos: the position to which we are currently copying characters.
- // elements: the FixedArray of strings we are joining.
-
- // Get string = array[index].
- __ mov(string, FieldOperand(elements, index,
- times_pointer_size,
- FixedArray::kHeaderSize));
- __ mov(string_length,
- FieldOperand(string, String::kLengthOffset));
- __ shr(string_length, 1);
- __ lea(string,
- FieldOperand(string, SeqAsciiString::kHeaderSize));
- __ CopyBytes(string, result_pos, string_length, scratch);
- __ add(Operand(index), Immediate(1));
- __ bind(&loop_1_condition);
- __ cmp(index, array_length_operand);
- __ j(less, &loop_1); // End while (index < length).
- __ jmp(&done);
-
-
-
- // One-character separator case
- __ bind(&one_char_separator);
- // Replace separator with its ascii character value.
- __ mov_b(scratch, FieldOperand(string, SeqAsciiString::kHeaderSize));
- __ mov_b(separator_operand, scratch);
-
- __ Set(index, Immediate(0));
- // Jump into the loop after the code that copies the separator, so the first
- // element is not preceded by a separator
- __ jmp(&loop_2_entry);
- // Loop condition: while (index < length).
- __ bind(&loop_2);
- // Each iteration of the loop concatenates one string to the result.
- // Live values in registers:
- // index: which element of the elements array we are adding to the result.
- // result_pos: the position to which we are currently copying characters.
-
- // Copy the separator character to the result.
- __ mov_b(scratch, separator_operand);
- __ mov_b(Operand(result_pos, 0), scratch);
- __ inc(result_pos);
-
- __ bind(&loop_2_entry);
- // Get string = array[index].
- __ mov(string, FieldOperand(elements, index,
- times_pointer_size,
- FixedArray::kHeaderSize));
- __ mov(string_length,
- FieldOperand(string, String::kLengthOffset));
- __ shr(string_length, 1);
- __ lea(string,
- FieldOperand(string, SeqAsciiString::kHeaderSize));
- __ CopyBytes(string, result_pos, string_length, scratch);
- __ add(Operand(index), Immediate(1));
-
- __ cmp(index, array_length_operand);
- __ j(less, &loop_2); // End while (index < length).
- __ jmp(&done);
-
-
- // Long separator case (separator is more than one character).
- __ bind(&long_separator);
-
- __ Set(index, Immediate(0));
- // Jump into the loop after the code that copies the separator, so the first
- // element is not preceded by a separator
- __ jmp(&loop_3_entry);
- // Loop condition: while (index < length).
- __ bind(&loop_3);
- // Each iteration of the loop concatenates one string to the result.
- // Live values in registers:
- // index: which element of the elements array we are adding to the result.
- // result_pos: the position to which we are currently copying characters.
-
- // Copy the separator to the result.
- __ mov(string, separator_operand);
- __ mov(string_length,
- FieldOperand(string, String::kLengthOffset));
- __ shr(string_length, 1);
- __ lea(string,
- FieldOperand(string, SeqAsciiString::kHeaderSize));
- __ CopyBytes(string, result_pos, string_length, scratch);
-
- __ bind(&loop_3_entry);
- // Get string = array[index].
- __ mov(string, FieldOperand(elements, index,
- times_pointer_size,
- FixedArray::kHeaderSize));
- __ mov(string_length,
- FieldOperand(string, String::kLengthOffset));
- __ shr(string_length, 1);
- __ lea(string,
- FieldOperand(string, SeqAsciiString::kHeaderSize));
- __ CopyBytes(string, result_pos, string_length, scratch);
- __ add(Operand(index), Immediate(1));
-
- __ cmp(index, array_length_operand);
- __ j(less, &loop_3); // End while (index < length).
- __ jmp(&done);
-
-
- __ bind(&bailout);
- __ mov(result_operand, FACTORY->undefined_value());
- __ bind(&done);
- __ mov(eax, result_operand);
- // Drop temp values from the stack, and restore context register.
- __ add(Operand(esp), Immediate(2 * kPointerSize));
-
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- frame_->Drop(1);
- frame_->Push(&array_result);
-}
-
-
-void CodeGenerator::GenerateIsRegExp(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Result value = frame_->Pop();
- value.ToRegister();
- ASSERT(value.is_valid());
- __ test(value.reg(), Immediate(kSmiTagMask));
- destination()->false_target()->Branch(equal);
- // It is a heap object - get map.
- Result temp = allocator()->Allocate();
- ASSERT(temp.is_valid());
- // Check if the object is a regexp.
- __ CmpObjectType(value.reg(), JS_REGEXP_TYPE, temp.reg());
- value.Unuse();
- temp.Unuse();
- destination()->Split(equal);
-}
-
-
-void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
- // This generates a fast version of:
- // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp')
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Result obj = frame_->Pop();
- obj.ToRegister();
-
- __ test(obj.reg(), Immediate(kSmiTagMask));
- destination()->false_target()->Branch(zero);
- __ cmp(obj.reg(), FACTORY->null_value());
- destination()->true_target()->Branch(equal);
-
- Result map = allocator()->Allocate();
- ASSERT(map.is_valid());
- __ mov(map.reg(), FieldOperand(obj.reg(), HeapObject::kMapOffset));
- // Undetectable objects behave like undefined when tested with typeof.
- __ test_b(FieldOperand(map.reg(), Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
- destination()->false_target()->Branch(not_zero);
- // Do a range test for JSObject type. We can't use
- // MacroAssembler::IsInstanceJSObjectType, because we are using a
- // ControlDestination, so we copy its implementation here.
- __ movzx_b(map.reg(), FieldOperand(map.reg(), Map::kInstanceTypeOffset));
- __ sub(Operand(map.reg()), Immediate(FIRST_JS_OBJECT_TYPE));
- __ cmp(map.reg(), LAST_JS_OBJECT_TYPE - FIRST_JS_OBJECT_TYPE);
- obj.Unuse();
- map.Unuse();
- destination()->Split(below_equal);
-}
-
-
-void CodeGenerator::GenerateIsSpecObject(ZoneList<Expression*>* args) {
- // This generates a fast version of:
- // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp' ||
- // typeof(arg) == function).
- // It includes undetectable objects (as opposed to IsObject).
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Result value = frame_->Pop();
- value.ToRegister();
- ASSERT(value.is_valid());
- __ test(value.reg(), Immediate(kSmiTagMask));
- destination()->false_target()->Branch(equal);
-
- // Check that this is an object.
- frame_->Spill(value.reg());
- __ CmpObjectType(value.reg(), FIRST_JS_OBJECT_TYPE, value.reg());
- value.Unuse();
- destination()->Split(above_equal);
-}
-
-
-// Deferred code to check whether the String JavaScript object is safe for using
-// default value of. This code is called after the bit caching this information
-// in the map has been checked with the map for the object in the map_result_
-// register. On return the register map_result_ contains 1 for true and 0 for
-// false.
-class DeferredIsStringWrapperSafeForDefaultValueOf : public DeferredCode {
- public:
- DeferredIsStringWrapperSafeForDefaultValueOf(Register object,
- Register map_result,
- Register scratch1,
- Register scratch2)
- : object_(object),
- map_result_(map_result),
- scratch1_(scratch1),
- scratch2_(scratch2) { }
-
- virtual void Generate() {
- Label false_result;
-
- // Check that map is loaded as expected.
- if (FLAG_debug_code) {
- __ cmp(map_result_, FieldOperand(object_, HeapObject::kMapOffset));
- __ Assert(equal, "Map not in expected register");
- }
-
- // Check for fast case object. Generate false result for slow case object.
- __ mov(scratch1_, FieldOperand(object_, JSObject::kPropertiesOffset));
- __ mov(scratch1_, FieldOperand(scratch1_, HeapObject::kMapOffset));
- __ cmp(scratch1_, FACTORY->hash_table_map());
- __ j(equal, &false_result);
-
- // Look for valueOf symbol in the descriptor array, and indicate false if
- // found. The type is not checked, so if it is a transition it is a false
- // negative.
- __ mov(map_result_,
- FieldOperand(map_result_, Map::kInstanceDescriptorsOffset));
- __ mov(scratch1_, FieldOperand(map_result_, FixedArray::kLengthOffset));
- // map_result_: descriptor array
- // scratch1_: length of descriptor array
- // Calculate the end of the descriptor array.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kPointerSize == 4);
- __ lea(scratch1_,
- Operand(map_result_, scratch1_, times_2, FixedArray::kHeaderSize));
- // Calculate location of the first key name.
- __ add(Operand(map_result_),
- Immediate(FixedArray::kHeaderSize +
- DescriptorArray::kFirstIndex * kPointerSize));
- // Loop through all the keys in the descriptor array. If one of these is the
- // symbol valueOf the result is false.
- Label entry, loop;
- __ jmp(&entry);
- __ bind(&loop);
- __ mov(scratch2_, FieldOperand(map_result_, 0));
- __ cmp(scratch2_, FACTORY->value_of_symbol());
- __ j(equal, &false_result);
- __ add(Operand(map_result_), Immediate(kPointerSize));
- __ bind(&entry);
- __ cmp(map_result_, Operand(scratch1_));
- __ j(not_equal, &loop);
-
- // Reload map as register map_result_ was used as temporary above.
- __ mov(map_result_, FieldOperand(object_, HeapObject::kMapOffset));
-
- // If a valueOf property is not found on the object check that it's
- // prototype is the un-modified String prototype. If not result is false.
- __ mov(scratch1_, FieldOperand(map_result_, Map::kPrototypeOffset));
- __ test(scratch1_, Immediate(kSmiTagMask));
- __ j(zero, &false_result);
- __ mov(scratch1_, FieldOperand(scratch1_, HeapObject::kMapOffset));
- __ mov(scratch2_, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ mov(scratch2_,
- FieldOperand(scratch2_, GlobalObject::kGlobalContextOffset));
- __ cmp(scratch1_,
- ContextOperand(scratch2_,
- Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
- __ j(not_equal, &false_result);
- // Set the bit in the map to indicate that it has been checked safe for
- // default valueOf and set true result.
- __ or_(FieldOperand(map_result_, Map::kBitField2Offset),
- Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
- __ Set(map_result_, Immediate(1));
- __ jmp(exit_label());
- __ bind(&false_result);
- // Set false result.
- __ Set(map_result_, Immediate(0));
- }
-
- private:
- Register object_;
- Register map_result_;
- Register scratch1_;
- Register scratch2_;
-};
-
-
-void CodeGenerator::GenerateIsStringWrapperSafeForDefaultValueOf(
- ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Result obj = frame_->Pop(); // Pop the string wrapper.
- obj.ToRegister();
- ASSERT(obj.is_valid());
- if (FLAG_debug_code) {
- __ AbortIfSmi(obj.reg());
- }
-
- // Check whether this map has already been checked to be safe for default
- // valueOf.
- Result map_result = allocator()->Allocate();
- ASSERT(map_result.is_valid());
- __ mov(map_result.reg(), FieldOperand(obj.reg(), HeapObject::kMapOffset));
- __ test_b(FieldOperand(map_result.reg(), Map::kBitField2Offset),
- 1 << Map::kStringWrapperSafeForDefaultValueOf);
- destination()->true_target()->Branch(not_zero);
-
- // We need an additional two scratch registers for the deferred code.
- Result temp1 = allocator()->Allocate();
- ASSERT(temp1.is_valid());
- Result temp2 = allocator()->Allocate();
- ASSERT(temp2.is_valid());
-
- DeferredIsStringWrapperSafeForDefaultValueOf* deferred =
- new DeferredIsStringWrapperSafeForDefaultValueOf(
- obj.reg(), map_result.reg(), temp1.reg(), temp2.reg());
- deferred->Branch(zero);
- deferred->BindExit();
- __ test(map_result.reg(), Operand(map_result.reg()));
- obj.Unuse();
- map_result.Unuse();
- temp1.Unuse();
- temp2.Unuse();
- destination()->Split(not_equal);
-}
-
-
-void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
- // This generates a fast version of:
- // (%_ClassOf(arg) === 'Function')
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Result obj = frame_->Pop();
- obj.ToRegister();
- __ test(obj.reg(), Immediate(kSmiTagMask));
- destination()->false_target()->Branch(zero);
- Result temp = allocator()->Allocate();
- ASSERT(temp.is_valid());
- __ CmpObjectType(obj.reg(), JS_FUNCTION_TYPE, temp.reg());
- obj.Unuse();
- temp.Unuse();
- destination()->Split(equal);
-}
-
-
-void CodeGenerator::GenerateIsUndetectableObject(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Result obj = frame_->Pop();
- obj.ToRegister();
- __ test(obj.reg(), Immediate(kSmiTagMask));
- destination()->false_target()->Branch(zero);
- Result temp = allocator()->Allocate();
- ASSERT(temp.is_valid());
- __ mov(temp.reg(),
- FieldOperand(obj.reg(), HeapObject::kMapOffset));
- __ test_b(FieldOperand(temp.reg(), Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
- obj.Unuse();
- temp.Unuse();
- destination()->Split(not_zero);
-}
-
-
-void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 0);
-
- // Get the frame pointer for the calling frame.
- Result fp = allocator()->Allocate();
- __ mov(fp.reg(), Operand(ebp, StandardFrameConstants::kCallerFPOffset));
-
- // Skip the arguments adaptor frame if it exists.
- Label check_frame_marker;
- __ cmp(Operand(fp.reg(), StandardFrameConstants::kContextOffset),
- Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(not_equal, &check_frame_marker);
- __ mov(fp.reg(), Operand(fp.reg(), StandardFrameConstants::kCallerFPOffset));
-
- // Check the marker in the calling frame.
- __ bind(&check_frame_marker);
- __ cmp(Operand(fp.reg(), StandardFrameConstants::kMarkerOffset),
- Immediate(Smi::FromInt(StackFrame::CONSTRUCT)));
- fp.Unuse();
- destination()->Split(equal);
-}
-
-
-void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 0);
-
- Result fp = allocator_->Allocate();
- Result result = allocator_->Allocate();
- ASSERT(fp.is_valid() && result.is_valid());
-
- Label exit;
-
- // Get the number of formal parameters.
- __ Set(result.reg(), Immediate(Smi::FromInt(scope()->num_parameters())));
-
- // Check if the calling frame is an arguments adaptor frame.
- __ mov(fp.reg(), Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ cmp(Operand(fp.reg(), StandardFrameConstants::kContextOffset),
- Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(not_equal, &exit);
-
- // Arguments adaptor case: Read the arguments length from the
- // adaptor frame.
- __ mov(result.reg(),
- Operand(fp.reg(), ArgumentsAdaptorFrameConstants::kLengthOffset));
-
- __ bind(&exit);
- result.set_type_info(TypeInfo::Smi());
- if (FLAG_debug_code) __ AbortIfNotSmi(result.reg());
- frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- JumpTarget leave, null, function, non_function_constructor;
- Load(args->at(0)); // Load the object.
- Result obj = frame_->Pop();
- obj.ToRegister();
- frame_->Spill(obj.reg());
-
- // If the object is a smi, we return null.
- __ test(obj.reg(), Immediate(kSmiTagMask));
- null.Branch(zero);
-
- // Check that the object is a JS object but take special care of JS
- // functions to make sure they have 'Function' as their class.
- __ CmpObjectType(obj.reg(), FIRST_JS_OBJECT_TYPE, obj.reg());
- null.Branch(below);
-
- // As long as JS_FUNCTION_TYPE is the last instance type and it is
- // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
- // LAST_JS_OBJECT_TYPE.
- STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
- STATIC_ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
- __ CmpInstanceType(obj.reg(), JS_FUNCTION_TYPE);
- function.Branch(equal);
-
- // Check if the constructor in the map is a function.
- { Result tmp = allocator()->Allocate();
- __ mov(obj.reg(), FieldOperand(obj.reg(), Map::kConstructorOffset));
- __ CmpObjectType(obj.reg(), JS_FUNCTION_TYPE, tmp.reg());
- non_function_constructor.Branch(not_equal);
- }
-
- // The map register now contains the constructor function. Grab the
- // instance class name from there.
- __ mov(obj.reg(),
- FieldOperand(obj.reg(), JSFunction::kSharedFunctionInfoOffset));
- __ mov(obj.reg(),
- FieldOperand(obj.reg(), SharedFunctionInfo::kInstanceClassNameOffset));
- frame_->Push(&obj);
- leave.Jump();
-
- // Functions have class 'Function'.
- function.Bind();
- frame_->Push(FACTORY->function_class_symbol());
- leave.Jump();
-
- // Objects with a non-function constructor have class 'Object'.
- non_function_constructor.Bind();
- frame_->Push(FACTORY->Object_symbol());
- leave.Jump();
-
- // Non-JS objects have class null.
- null.Bind();
- frame_->Push(FACTORY->null_value());
-
- // All done.
- leave.Bind();
-}
-
-
-void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- JumpTarget leave;
- Load(args->at(0)); // Load the object.
- frame_->Dup();
- Result object = frame_->Pop();
- object.ToRegister();
- ASSERT(object.is_valid());
- // if (object->IsSmi()) return object.
- __ test(object.reg(), Immediate(kSmiTagMask));
- leave.Branch(zero, taken);
- // It is a heap object - get map.
- Result temp = allocator()->Allocate();
- ASSERT(temp.is_valid());
- // if (!object->IsJSValue()) return object.
- __ CmpObjectType(object.reg(), JS_VALUE_TYPE, temp.reg());
- leave.Branch(not_equal, not_taken);
- __ mov(temp.reg(), FieldOperand(object.reg(), JSValue::kValueOffset));
- object.Unuse();
- frame_->SetElementAt(0, &temp);
- leave.Bind();
-}
-
-
-void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 2);
- JumpTarget leave;
- Load(args->at(0)); // Load the object.
- Load(args->at(1)); // Load the value.
- Result value = frame_->Pop();
- Result object = frame_->Pop();
- value.ToRegister();
- object.ToRegister();
-
- // if (object->IsSmi()) return value.
- __ test(object.reg(), Immediate(kSmiTagMask));
- leave.Branch(zero, &value, taken);
-
- // It is a heap object - get its map.
- Result scratch = allocator_->Allocate();
- ASSERT(scratch.is_valid());
- // if (!object->IsJSValue()) return value.
- __ CmpObjectType(object.reg(), JS_VALUE_TYPE, scratch.reg());
- leave.Branch(not_equal, &value, not_taken);
-
- // Store the value.
- __ mov(FieldOperand(object.reg(), JSValue::kValueOffset), value.reg());
- // Update the write barrier. Save the value as it will be
- // overwritten by the write barrier code and is needed afterward.
- Result duplicate_value = allocator_->Allocate();
- ASSERT(duplicate_value.is_valid());
- __ mov(duplicate_value.reg(), value.reg());
- // The object register is also overwritten by the write barrier and
- // possibly aliased in the frame.
- frame_->Spill(object.reg());
- __ RecordWrite(object.reg(), JSValue::kValueOffset, duplicate_value.reg(),
- scratch.reg());
- object.Unuse();
- scratch.Unuse();
- duplicate_value.Unuse();
-
- // Leave.
- leave.Bind(&value);
- frame_->Push(&value);
-}
-
-
-void CodeGenerator::GenerateArguments(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
-
- // ArgumentsAccessStub expects the key in edx and the formal
- // parameter count in eax.
- Load(args->at(0));
- Result key = frame_->Pop();
- // Explicitly create a constant result.
- Result count(Handle<Smi>(Smi::FromInt(scope()->num_parameters())));
- // Call the shared stub to get to arguments[key].
- ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
- Result result = frame_->CallStub(&stub, &key, &count);
- frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 2);
-
- // Load the two objects into registers and perform the comparison.
- Load(args->at(0));
- Load(args->at(1));
- Result right = frame_->Pop();
- Result left = frame_->Pop();
- right.ToRegister();
- left.ToRegister();
- __ cmp(right.reg(), Operand(left.reg()));
- right.Unuse();
- left.Unuse();
- destination()->Split(equal);
-}
-
-
-void CodeGenerator::GenerateGetFramePointer(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 0);
- STATIC_ASSERT(kSmiTag == 0); // EBP value is aligned, so it looks like a Smi.
- Result ebp_as_smi = allocator_->Allocate();
- ASSERT(ebp_as_smi.is_valid());
- __ mov(ebp_as_smi.reg(), Operand(ebp));
- frame_->Push(&ebp_as_smi);
-}
-
-
-void CodeGenerator::GenerateRandomHeapNumber(
- ZoneList<Expression*>* args) {
- ASSERT(args->length() == 0);
- frame_->SpillAll();
-
- Label slow_allocate_heapnumber;
- Label heapnumber_allocated;
-
- __ AllocateHeapNumber(edi, ebx, ecx, &slow_allocate_heapnumber);
- __ jmp(&heapnumber_allocated);
-
- __ bind(&slow_allocate_heapnumber);
- // Allocate a heap number.
- __ CallRuntime(Runtime::kNumberAlloc, 0);
- __ mov(edi, eax);
-
- __ bind(&heapnumber_allocated);
-
- __ PrepareCallCFunction(1, ebx);
- __ mov(Operand(esp, 0), Immediate(ExternalReference::isolate_address()));
- __ CallCFunction(ExternalReference::random_uint32_function(masm()->isolate()),
- 1);
-
- // Convert 32 random bits in eax to 0.(32 random bits) in a double
- // by computing:
- // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
- // This is implemented on both SSE2 and FPU.
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope fscope(SSE2);
- __ mov(ebx, Immediate(0x49800000)); // 1.0 x 2^20 as single.
- __ movd(xmm1, Operand(ebx));
- __ movd(xmm0, Operand(eax));
- __ cvtss2sd(xmm1, xmm1);
- __ pxor(xmm0, xmm1);
- __ subsd(xmm0, xmm1);
- __ movdbl(FieldOperand(edi, HeapNumber::kValueOffset), xmm0);
- } else {
- // 0x4130000000000000 is 1.0 x 2^20 as a double.
- __ mov(FieldOperand(edi, HeapNumber::kExponentOffset),
- Immediate(0x41300000));
- __ mov(FieldOperand(edi, HeapNumber::kMantissaOffset), eax);
- __ fld_d(FieldOperand(edi, HeapNumber::kValueOffset));
- __ mov(FieldOperand(edi, HeapNumber::kMantissaOffset), Immediate(0));
- __ fld_d(FieldOperand(edi, HeapNumber::kValueOffset));
- __ fsubp(1);
- __ fstp_d(FieldOperand(edi, HeapNumber::kValueOffset));
- }
- __ mov(eax, edi);
-
- Result result = allocator_->Allocate(eax);
- frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateStringAdd(ZoneList<Expression*>* args) {
- ASSERT_EQ(2, args->length());
-
- Load(args->at(0));
- Load(args->at(1));
-
- StringAddStub stub(NO_STRING_ADD_FLAGS);
- Result answer = frame_->CallStub(&stub, 2);
- frame_->Push(&answer);
-}
-
-
-void CodeGenerator::GenerateSubString(ZoneList<Expression*>* args) {
- ASSERT_EQ(3, args->length());
-
- Load(args->at(0));
- Load(args->at(1));
- Load(args->at(2));
-
- SubStringStub stub;
- Result answer = frame_->CallStub(&stub, 3);
- frame_->Push(&answer);
-}
-
-
-void CodeGenerator::GenerateStringCompare(ZoneList<Expression*>* args) {
- ASSERT_EQ(2, args->length());
-
- Load(args->at(0));
- Load(args->at(1));
-
- StringCompareStub stub;
- Result answer = frame_->CallStub(&stub, 2);
- frame_->Push(&answer);
-}
-
-
-void CodeGenerator::GenerateRegExpExec(ZoneList<Expression*>* args) {
- ASSERT_EQ(4, args->length());
-
- // Load the arguments on the stack and call the stub.
- Load(args->at(0));
- Load(args->at(1));
- Load(args->at(2));
- Load(args->at(3));
-
- RegExpExecStub stub;
- Result result = frame_->CallStub(&stub, 4);
- frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateRegExpConstructResult(ZoneList<Expression*>* args) {
- ASSERT_EQ(3, args->length());
-
- Load(args->at(0)); // Size of array, smi.
- Load(args->at(1)); // "index" property value.
- Load(args->at(2)); // "input" property value.
-
- RegExpConstructResultStub stub;
- Result result = frame_->CallStub(&stub, 3);
- frame_->Push(&result);
-}
-
-
-class DeferredSearchCache: public DeferredCode {
- public:
- DeferredSearchCache(Register dst, Register cache, Register key)
- : dst_(dst), cache_(cache), key_(key) {
- set_comment("[ DeferredSearchCache");
- }
-
- virtual void Generate();
-
- private:
- Register dst_; // on invocation Smi index of finger, on exit
- // holds value being looked up.
- Register cache_; // instance of JSFunctionResultCache.
- Register key_; // key being looked up.
-};
-
-
-void DeferredSearchCache::Generate() {
- Label first_loop, search_further, second_loop, cache_miss;
-
- // Smi-tagging is equivalent to multiplying by 2.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
-
- Smi* kEntrySizeSmi = Smi::FromInt(JSFunctionResultCache::kEntrySize);
- Smi* kEntriesIndexSmi = Smi::FromInt(JSFunctionResultCache::kEntriesIndex);
-
- // Check the cache from finger to start of the cache.
- __ bind(&first_loop);
- __ sub(Operand(dst_), Immediate(kEntrySizeSmi));
- __ cmp(Operand(dst_), Immediate(kEntriesIndexSmi));
- __ j(less, &search_further);
-
- __ cmp(key_, CodeGenerator::FixedArrayElementOperand(cache_, dst_));
- __ j(not_equal, &first_loop);
-
- __ mov(FieldOperand(cache_, JSFunctionResultCache::kFingerOffset), dst_);
- __ mov(dst_, CodeGenerator::FixedArrayElementOperand(cache_, dst_, 1));
- __ jmp(exit_label());
-
- __ bind(&search_further);
-
- // Check the cache from end of cache up to finger.
- __ mov(dst_, FieldOperand(cache_, JSFunctionResultCache::kCacheSizeOffset));
-
- __ bind(&second_loop);
- __ sub(Operand(dst_), Immediate(kEntrySizeSmi));
- // Consider prefetching into some reg.
- __ cmp(dst_, FieldOperand(cache_, JSFunctionResultCache::kFingerOffset));
- __ j(less_equal, &cache_miss);
-
- __ cmp(key_, CodeGenerator::FixedArrayElementOperand(cache_, dst_));
- __ j(not_equal, &second_loop);
-
- __ mov(FieldOperand(cache_, JSFunctionResultCache::kFingerOffset), dst_);
- __ mov(dst_, CodeGenerator::FixedArrayElementOperand(cache_, dst_, 1));
- __ jmp(exit_label());
-
- __ bind(&cache_miss);
- __ push(cache_); // store a reference to cache
- __ push(key_); // store a key
- __ push(Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ push(key_);
- // On ia32 function must be in edi.
- __ mov(edi, FieldOperand(cache_, JSFunctionResultCache::kFactoryOffset));
- ParameterCount expected(1);
- __ InvokeFunction(edi, expected, CALL_FUNCTION);
-
- // Find a place to put new cached value into.
- Label add_new_entry, update_cache;
- __ mov(ecx, Operand(esp, kPointerSize)); // restore the cache
- // Possible optimization: cache size is constant for the given cache
- // so technically we could use a constant here. However, if we have
- // cache miss this optimization would hardly matter much.
-
- // Check if we could add new entry to cache.
- __ mov(ebx, FieldOperand(ecx, FixedArray::kLengthOffset));
- __ cmp(ebx, FieldOperand(ecx, JSFunctionResultCache::kCacheSizeOffset));
- __ j(greater, &add_new_entry);
-
- // Check if we could evict entry after finger.
- __ mov(edx, FieldOperand(ecx, JSFunctionResultCache::kFingerOffset));
- __ add(Operand(edx), Immediate(kEntrySizeSmi));
- __ cmp(ebx, Operand(edx));
- __ j(greater, &update_cache);
-
- // Need to wrap over the cache.
- __ mov(edx, Immediate(kEntriesIndexSmi));
- __ jmp(&update_cache);
-
- __ bind(&add_new_entry);
- __ mov(edx, FieldOperand(ecx, JSFunctionResultCache::kCacheSizeOffset));
- __ lea(ebx, Operand(edx, JSFunctionResultCache::kEntrySize << 1));
- __ mov(FieldOperand(ecx, JSFunctionResultCache::kCacheSizeOffset), ebx);
-
- // Update the cache itself.
- // edx holds the index.
- __ bind(&update_cache);
- __ pop(ebx); // restore the key
- __ mov(FieldOperand(ecx, JSFunctionResultCache::kFingerOffset), edx);
- // Store key.
- __ mov(CodeGenerator::FixedArrayElementOperand(ecx, edx), ebx);
- __ RecordWrite(ecx, 0, ebx, edx);
-
- // Store value.
- __ pop(ecx); // restore the cache.
- __ mov(edx, FieldOperand(ecx, JSFunctionResultCache::kFingerOffset));
- __ add(Operand(edx), Immediate(Smi::FromInt(1)));
- __ mov(ebx, eax);
- __ mov(CodeGenerator::FixedArrayElementOperand(ecx, edx), ebx);
- __ RecordWrite(ecx, 0, ebx, edx);
-
- if (!dst_.is(eax)) {
- __ mov(dst_, eax);
- }
-}
-
-
-void CodeGenerator::GenerateGetFromCache(ZoneList<Expression*>* args) {
- ASSERT_EQ(2, args->length());
-
- ASSERT_NE(NULL, args->at(0)->AsLiteral());
- int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
-
- Handle<FixedArray> jsfunction_result_caches(
- masm()->isolate()->global_context()->jsfunction_result_caches());
- if (jsfunction_result_caches->length() <= cache_id) {
- __ Abort("Attempt to use undefined cache.");
- frame_->Push(FACTORY->undefined_value());
- return;
- }
-
- Load(args->at(1));
- Result key = frame_->Pop();
- key.ToRegister();
-
- Result cache = allocator()->Allocate();
- ASSERT(cache.is_valid());
- __ mov(cache.reg(), ContextOperand(esi, Context::GLOBAL_INDEX));
- __ mov(cache.reg(),
- FieldOperand(cache.reg(), GlobalObject::kGlobalContextOffset));
- __ mov(cache.reg(),
- ContextOperand(cache.reg(), Context::JSFUNCTION_RESULT_CACHES_INDEX));
- __ mov(cache.reg(),
- FieldOperand(cache.reg(), FixedArray::OffsetOfElementAt(cache_id)));
-
- Result tmp = allocator()->Allocate();
- ASSERT(tmp.is_valid());
-
- DeferredSearchCache* deferred = new DeferredSearchCache(tmp.reg(),
- cache.reg(),
- key.reg());
-
- // tmp.reg() now holds finger offset as a smi.
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
- __ mov(tmp.reg(), FieldOperand(cache.reg(),
- JSFunctionResultCache::kFingerOffset));
- __ cmp(key.reg(), FixedArrayElementOperand(cache.reg(), tmp.reg()));
- deferred->Branch(not_equal);
-
- __ mov(tmp.reg(), FixedArrayElementOperand(cache.reg(), tmp.reg(), 1));
-
- deferred->BindExit();
- frame_->Push(&tmp);
-}
-
-
-void CodeGenerator::GenerateNumberToString(ZoneList<Expression*>* args) {
- ASSERT_EQ(args->length(), 1);
-
- // Load the argument on the stack and call the stub.
- Load(args->at(0));
- NumberToStringStub stub;
- Result result = frame_->CallStub(&stub, 1);
- frame_->Push(&result);
-}
-
-
-class DeferredSwapElements: public DeferredCode {
- public:
- DeferredSwapElements(Register object, Register index1, Register index2)
- : object_(object), index1_(index1), index2_(index2) {
- set_comment("[ DeferredSwapElements");
- }
-
- virtual void Generate();
-
- private:
- Register object_, index1_, index2_;
-};
-
-
-void DeferredSwapElements::Generate() {
- __ push(object_);
- __ push(index1_);
- __ push(index2_);
- __ CallRuntime(Runtime::kSwapElements, 3);
-}
-
-
-void CodeGenerator::GenerateSwapElements(ZoneList<Expression*>* args) {
- // Note: this code assumes that indices are passed are within
- // elements' bounds and refer to valid (not holes) values.
- Comment cmnt(masm_, "[ GenerateSwapElements");
-
- ASSERT_EQ(3, args->length());
-
- Load(args->at(0));
- Load(args->at(1));
- Load(args->at(2));
-
- Result index2 = frame_->Pop();
- index2.ToRegister();
-
- Result index1 = frame_->Pop();
- index1.ToRegister();
-
- Result object = frame_->Pop();
- object.ToRegister();
-
- Result tmp1 = allocator()->Allocate();
- tmp1.ToRegister();
- Result tmp2 = allocator()->Allocate();
- tmp2.ToRegister();
-
- frame_->Spill(object.reg());
- frame_->Spill(index1.reg());
- frame_->Spill(index2.reg());
-
- DeferredSwapElements* deferred = new DeferredSwapElements(object.reg(),
- index1.reg(),
- index2.reg());
-
- // Fetch the map and check if array is in fast case.
- // Check that object doesn't require security checks and
- // has no indexed interceptor.
- __ CmpObjectType(object.reg(), FIRST_JS_OBJECT_TYPE, tmp1.reg());
- deferred->Branch(below);
- __ test_b(FieldOperand(tmp1.reg(), Map::kBitFieldOffset),
- KeyedLoadIC::kSlowCaseBitFieldMask);
- deferred->Branch(not_zero);
-
- // Check the object's elements are in fast case and writable.
- __ mov(tmp1.reg(), FieldOperand(object.reg(), JSObject::kElementsOffset));
- __ cmp(FieldOperand(tmp1.reg(), HeapObject::kMapOffset),
- Immediate(FACTORY->fixed_array_map()));
- deferred->Branch(not_equal);
-
- // Smi-tagging is equivalent to multiplying by 2.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
-
- // Check that both indices are smis.
- __ mov(tmp2.reg(), index1.reg());
- __ or_(tmp2.reg(), Operand(index2.reg()));
- __ test(tmp2.reg(), Immediate(kSmiTagMask));
- deferred->Branch(not_zero);
-
- // Check that both indices are valid.
- __ mov(tmp2.reg(), FieldOperand(object.reg(), JSArray::kLengthOffset));
- __ cmp(tmp2.reg(), Operand(index1.reg()));
- deferred->Branch(below_equal);
- __ cmp(tmp2.reg(), Operand(index2.reg()));
- deferred->Branch(below_equal);
-
- // Bring addresses into index1 and index2.
- __ lea(index1.reg(), FixedArrayElementOperand(tmp1.reg(), index1.reg()));
- __ lea(index2.reg(), FixedArrayElementOperand(tmp1.reg(), index2.reg()));
-
- // Swap elements.
- __ mov(object.reg(), Operand(index1.reg(), 0));
- __ mov(tmp2.reg(), Operand(index2.reg(), 0));
- __ mov(Operand(index2.reg(), 0), object.reg());
- __ mov(Operand(index1.reg(), 0), tmp2.reg());
-
- Label done;
- __ InNewSpace(tmp1.reg(), tmp2.reg(), equal, &done);
- // Possible optimization: do a check that both values are Smis
- // (or them and test against Smi mask.)
-
- __ mov(tmp2.reg(), tmp1.reg());
- __ RecordWriteHelper(tmp2.reg(), index1.reg(), object.reg());
- __ RecordWriteHelper(tmp1.reg(), index2.reg(), object.reg());
- __ bind(&done);
-
- deferred->BindExit();
- frame_->Push(FACTORY->undefined_value());
-}
-
-
-void CodeGenerator::GenerateCallFunction(ZoneList<Expression*>* args) {
- Comment cmnt(masm_, "[ GenerateCallFunction");
-
- ASSERT(args->length() >= 2);
-
- int n_args = args->length() - 2; // for receiver and function.
- Load(args->at(0)); // receiver
- for (int i = 0; i < n_args; i++) {
- Load(args->at(i + 1));
- }
- Load(args->at(n_args + 1)); // function
- Result result = frame_->CallJSFunction(n_args);
- frame_->Push(&result);
-}
-
-
-// Generates the Math.pow method. Only handles special cases and
-// branches to the runtime system for everything else. Please note
-// that this function assumes that the callsite has executed ToNumber
-// on both arguments.
-void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 2);
- Load(args->at(0));
- Load(args->at(1));
- if (!CpuFeatures::IsSupported(SSE2)) {
- Result res = frame_->CallRuntime(Runtime::kMath_pow, 2);
- frame_->Push(&res);
- } else {
- CpuFeatures::Scope use_sse2(SSE2);
- Label allocate_return;
- // Load the two operands while leaving the values on the frame.
- frame()->Dup();
- Result exponent = frame()->Pop();
- exponent.ToRegister();
- frame()->Spill(exponent.reg());
- frame()->PushElementAt(1);
- Result base = frame()->Pop();
- base.ToRegister();
- frame()->Spill(base.reg());
-
- Result answer = allocator()->Allocate();
- ASSERT(answer.is_valid());
- ASSERT(!exponent.reg().is(base.reg()));
- JumpTarget call_runtime;
-
- // Save 1 in xmm3 - we need this several times later on.
- __ mov(answer.reg(), Immediate(1));
- __ cvtsi2sd(xmm3, Operand(answer.reg()));
-
- Label exponent_nonsmi;
- Label base_nonsmi;
- // If the exponent is a heap number go to that specific case.
- __ test(exponent.reg(), Immediate(kSmiTagMask));
- __ j(not_zero, &exponent_nonsmi);
- __ test(base.reg(), Immediate(kSmiTagMask));
- __ j(not_zero, &base_nonsmi);
-
- // Optimized version when y is an integer.
- Label powi;
- __ SmiUntag(base.reg());
- __ cvtsi2sd(xmm0, Operand(base.reg()));
- __ jmp(&powi);
- // exponent is smi and base is a heapnumber.
- __ bind(&base_nonsmi);
- __ cmp(FieldOperand(base.reg(), HeapObject::kMapOffset),
- FACTORY->heap_number_map());
- call_runtime.Branch(not_equal);
-
- __ movdbl(xmm0, FieldOperand(base.reg(), HeapNumber::kValueOffset));
-
- // Optimized version of pow if y is an integer.
- __ bind(&powi);
- __ SmiUntag(exponent.reg());
-
- // Save exponent in base as we need to check if exponent is negative later.
- // We know that base and exponent are in different registers.
- __ mov(base.reg(), exponent.reg());
-
- // Get absolute value of exponent.
- Label no_neg;
- __ cmp(exponent.reg(), 0);
- __ j(greater_equal, &no_neg);
- __ neg(exponent.reg());
- __ bind(&no_neg);
-
- // Load xmm1 with 1.
- __ movsd(xmm1, xmm3);
- Label while_true;
- Label no_multiply;
-
- __ bind(&while_true);
- __ shr(exponent.reg(), 1);
- __ j(not_carry, &no_multiply);
- __ mulsd(xmm1, xmm0);
- __ bind(&no_multiply);
- __ test(exponent.reg(), Operand(exponent.reg()));
- __ mulsd(xmm0, xmm0);
- __ j(not_zero, &while_true);
-
- // x has the original value of y - if y is negative return 1/result.
- __ test(base.reg(), Operand(base.reg()));
- __ j(positive, &allocate_return);
- // Special case if xmm1 has reached infinity.
- __ mov(answer.reg(), Immediate(0x7FB00000));
- __ movd(xmm0, Operand(answer.reg()));
- __ cvtss2sd(xmm0, xmm0);
- __ ucomisd(xmm0, xmm1);
- call_runtime.Branch(equal);
- __ divsd(xmm3, xmm1);
- __ movsd(xmm1, xmm3);
- __ jmp(&allocate_return);
-
- // exponent (or both) is a heapnumber - no matter what we should now work
- // on doubles.
- __ bind(&exponent_nonsmi);
- __ cmp(FieldOperand(exponent.reg(), HeapObject::kMapOffset),
- FACTORY->heap_number_map());
- call_runtime.Branch(not_equal);
- __ movdbl(xmm1, FieldOperand(exponent.reg(), HeapNumber::kValueOffset));
- // Test if exponent is nan.
- __ ucomisd(xmm1, xmm1);
- call_runtime.Branch(parity_even);
-
- Label base_not_smi;
- Label handle_special_cases;
- __ test(base.reg(), Immediate(kSmiTagMask));
- __ j(not_zero, &base_not_smi);
- __ SmiUntag(base.reg());
- __ cvtsi2sd(xmm0, Operand(base.reg()));
- __ jmp(&handle_special_cases);
- __ bind(&base_not_smi);
- __ cmp(FieldOperand(base.reg(), HeapObject::kMapOffset),
- FACTORY->heap_number_map());
- call_runtime.Branch(not_equal);
- __ mov(answer.reg(), FieldOperand(base.reg(), HeapNumber::kExponentOffset));
- __ and_(answer.reg(), HeapNumber::kExponentMask);
- __ cmp(Operand(answer.reg()), Immediate(HeapNumber::kExponentMask));
- // base is NaN or +/-Infinity
- call_runtime.Branch(greater_equal);
- __ movdbl(xmm0, FieldOperand(base.reg(), HeapNumber::kValueOffset));
-
- // base is in xmm0 and exponent is in xmm1.
- __ bind(&handle_special_cases);
- Label not_minus_half;
- // Test for -0.5.
- // Load xmm2 with -0.5.
- __ mov(answer.reg(), Immediate(0xBF000000));
- __ movd(xmm2, Operand(answer.reg()));
- __ cvtss2sd(xmm2, xmm2);
- // xmm2 now has -0.5.
- __ ucomisd(xmm2, xmm1);
- __ j(not_equal, &not_minus_half);
-
- // Calculates reciprocal of square root.
- // sqrtsd returns -0 when input is -0. ECMA spec requires +0.
- __ xorpd(xmm1, xmm1);
- __ addsd(xmm1, xmm0);
- __ sqrtsd(xmm1, xmm1);
- __ divsd(xmm3, xmm1);
- __ movsd(xmm1, xmm3);
- __ jmp(&allocate_return);
-
- // Test for 0.5.
- __ bind(&not_minus_half);
- // Load xmm2 with 0.5.
- // Since xmm3 is 1 and xmm2 is -0.5 this is simply xmm2 + xmm3.
- __ addsd(xmm2, xmm3);
- // xmm2 now has 0.5.
- __ ucomisd(xmm2, xmm1);
- call_runtime.Branch(not_equal);
- // Calculates square root.
- // sqrtsd returns -0 when input is -0. ECMA spec requires +0.
- __ xorpd(xmm1, xmm1);
- __ addsd(xmm1, xmm0);
- __ sqrtsd(xmm1, xmm1);
-
- JumpTarget done;
- Label failure, success;
- __ bind(&allocate_return);
- // Make a copy of the frame to enable us to handle allocation
- // failure after the JumpTarget jump.
- VirtualFrame* clone = new VirtualFrame(frame());
- __ AllocateHeapNumber(answer.reg(), exponent.reg(),
- base.reg(), &failure);
- __ movdbl(FieldOperand(answer.reg(), HeapNumber::kValueOffset), xmm1);
- // Remove the two original values from the frame - we only need those
- // in the case where we branch to runtime.
- frame()->Drop(2);
- exponent.Unuse();
- base.Unuse();
- done.Jump(&answer);
- // Use the copy of the original frame as our current frame.
- RegisterFile empty_regs;
- SetFrame(clone, &empty_regs);
- // If we experience an allocation failure we branch to runtime.
- __ bind(&failure);
- call_runtime.Bind();
- answer = frame()->CallRuntime(Runtime::kMath_pow_cfunction, 2);
-
- done.Bind(&answer);
- frame()->Push(&answer);
- }
-}
-
-
-void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
- ASSERT_EQ(args->length(), 1);
- Load(args->at(0));
- TranscendentalCacheStub stub(TranscendentalCache::SIN,
- TranscendentalCacheStub::TAGGED);
- Result result = frame_->CallStub(&stub, 1);
- frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
- ASSERT_EQ(args->length(), 1);
- Load(args->at(0));
- TranscendentalCacheStub stub(TranscendentalCache::COS,
- TranscendentalCacheStub::TAGGED);
- Result result = frame_->CallStub(&stub, 1);
- frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateMathLog(ZoneList<Expression*>* args) {
- ASSERT_EQ(args->length(), 1);
- Load(args->at(0));
- TranscendentalCacheStub stub(TranscendentalCache::LOG,
- TranscendentalCacheStub::TAGGED);
- Result result = frame_->CallStub(&stub, 1);
- frame_->Push(&result);
-}
-
-
-// Generates the Math.sqrt method. Please note - this function assumes that
-// the callsite has executed ToNumber on the argument.
-void CodeGenerator::GenerateMathSqrt(ZoneList<Expression*>* args) {
- ASSERT_EQ(args->length(), 1);
- Load(args->at(0));
-
- if (!CpuFeatures::IsSupported(SSE2)) {
- Result result = frame()->CallRuntime(Runtime::kMath_sqrt, 1);
- frame()->Push(&result);
- } else {
- CpuFeatures::Scope use_sse2(SSE2);
- // Leave original value on the frame if we need to call runtime.
- frame()->Dup();
- Result result = frame()->Pop();
- result.ToRegister();
- frame()->Spill(result.reg());
- Label runtime;
- Label non_smi;
- Label load_done;
- JumpTarget end;
-
- __ test(result.reg(), Immediate(kSmiTagMask));
- __ j(not_zero, &non_smi);
- __ SmiUntag(result.reg());
- __ cvtsi2sd(xmm0, Operand(result.reg()));
- __ jmp(&load_done);
- __ bind(&non_smi);
- __ cmp(FieldOperand(result.reg(), HeapObject::kMapOffset),
- FACTORY->heap_number_map());
- __ j(not_equal, &runtime);
- __ movdbl(xmm0, FieldOperand(result.reg(), HeapNumber::kValueOffset));
-
- __ bind(&load_done);
- __ sqrtsd(xmm0, xmm0);
- // A copy of the virtual frame to allow us to go to runtime after the
- // JumpTarget jump.
- Result scratch = allocator()->Allocate();
- VirtualFrame* clone = new VirtualFrame(frame());
- __ AllocateHeapNumber(result.reg(), scratch.reg(), no_reg, &runtime);
-
- __ movdbl(FieldOperand(result.reg(), HeapNumber::kValueOffset), xmm0);
- frame()->Drop(1);
- scratch.Unuse();
- end.Jump(&result);
- // We only branch to runtime if we have an allocation error.
- // Use the copy of the original frame as our current frame.
- RegisterFile empty_regs;
- SetFrame(clone, &empty_regs);
- __ bind(&runtime);
- result = frame()->CallRuntime(Runtime::kMath_sqrt, 1);
-
- end.Bind(&result);
- frame()->Push(&result);
- }
-}
-
-
-void CodeGenerator::GenerateIsRegExpEquivalent(ZoneList<Expression*>* args) {
- ASSERT_EQ(2, args->length());
- Load(args->at(0));
- Load(args->at(1));
- Result right_res = frame_->Pop();
- Result left_res = frame_->Pop();
- right_res.ToRegister();
- left_res.ToRegister();
- Result tmp_res = allocator()->Allocate();
- ASSERT(tmp_res.is_valid());
- Register right = right_res.reg();
- Register left = left_res.reg();
- Register tmp = tmp_res.reg();
- right_res.Unuse();
- left_res.Unuse();
- tmp_res.Unuse();
- __ cmp(left, Operand(right));
- destination()->true_target()->Branch(equal);
- // Fail if either is a non-HeapObject.
- __ mov(tmp, left);
- __ and_(Operand(tmp), right);
- __ test(Operand(tmp), Immediate(kSmiTagMask));
- destination()->false_target()->Branch(equal);
- __ CmpObjectType(left, JS_REGEXP_TYPE, tmp);
- destination()->false_target()->Branch(not_equal);
- __ cmp(tmp, FieldOperand(right, HeapObject::kMapOffset));
- destination()->false_target()->Branch(not_equal);
- __ mov(tmp, FieldOperand(left, JSRegExp::kDataOffset));
- __ cmp(tmp, FieldOperand(right, JSRegExp::kDataOffset));
- destination()->Split(equal);
-}
-
-
-void CodeGenerator::GenerateHasCachedArrayIndex(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Result value = frame_->Pop();
- value.ToRegister();
- ASSERT(value.is_valid());
- if (FLAG_debug_code) {
- __ AbortIfNotString(value.reg());
- }
-
- __ test(FieldOperand(value.reg(), String::kHashFieldOffset),
- Immediate(String::kContainsCachedArrayIndexMask));
-
- value.Unuse();
- destination()->Split(zero);
-}
-
-
-void CodeGenerator::GenerateGetCachedArrayIndex(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Result string = frame_->Pop();
- string.ToRegister();
- if (FLAG_debug_code) {
- __ AbortIfNotString(string.reg());
- }
-
- Result number = allocator()->Allocate();
- ASSERT(number.is_valid());
- __ mov(number.reg(), FieldOperand(string.reg(), String::kHashFieldOffset));
- __ IndexFromHash(number.reg(), number.reg());
- string.Unuse();
- frame_->Push(&number);
-}
-
-
-void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
- ASSERT(!in_safe_int32_mode());
- if (CheckForInlineRuntimeCall(node)) {
- return;
- }
-
- ZoneList<Expression*>* args = node->arguments();
- Comment cmnt(masm_, "[ CallRuntime");
- const Runtime::Function* function = node->function();
-
- if (function == NULL) {
- // Push the builtins object found in the current global object.
- Result temp = allocator()->Allocate();
- ASSERT(temp.is_valid());
- __ mov(temp.reg(), GlobalObjectOperand());
- __ mov(temp.reg(), FieldOperand(temp.reg(), GlobalObject::kBuiltinsOffset));
- frame_->Push(&temp);
- }
-
- // Push the arguments ("left-to-right").
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Load(args->at(i));
- }
-
- if (function == NULL) {
- // Call the JS runtime function.
- frame_->Push(node->name());
- Result answer = frame_->CallCallIC(RelocInfo::CODE_TARGET,
- arg_count,
- loop_nesting_);
- frame_->RestoreContextRegister();
- frame_->Push(&answer);
- } else {
- // Call the C runtime function.
- Result answer = frame_->CallRuntime(function, arg_count);
- frame_->Push(&answer);
- }
-}
-
-
-void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
- Comment cmnt(masm_, "[ UnaryOperation");
-
- Token::Value op = node->op();
-
- if (op == Token::NOT) {
- // Swap the true and false targets but keep the same actual label
- // as the fall through.
- destination()->Invert();
- LoadCondition(node->expression(), destination(), true);
- // Swap the labels back.
- destination()->Invert();
-
- } else if (op == Token::DELETE) {
- Property* property = node->expression()->AsProperty();
- if (property != NULL) {
- Load(property->obj());
- Load(property->key());
- frame_->Push(Smi::FromInt(strict_mode_flag()));
- Result answer = frame_->InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, 3);
- frame_->Push(&answer);
- return;
- }
-
- Variable* variable = node->expression()->AsVariableProxy()->AsVariable();
- if (variable != NULL) {
- // Delete of an unqualified identifier is disallowed in strict mode
- // but "delete this" is.
- ASSERT(strict_mode_flag() == kNonStrictMode || variable->is_this());
- Slot* slot = variable->AsSlot();
- if (variable->is_global()) {
- LoadGlobal();
- frame_->Push(variable->name());
- frame_->Push(Smi::FromInt(kNonStrictMode));
- Result answer = frame_->InvokeBuiltin(Builtins::DELETE,
- CALL_FUNCTION, 3);
- frame_->Push(&answer);
-
- } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
- // Call the runtime to delete from the context holding the named
- // variable. Sync the virtual frame eagerly so we can push the
- // arguments directly into place.
- frame_->SyncRange(0, frame_->element_count() - 1);
- frame_->EmitPush(esi);
- frame_->EmitPush(Immediate(variable->name()));
- Result answer = frame_->CallRuntime(Runtime::kDeleteContextSlot, 2);
- frame_->Push(&answer);
- } else {
- // Default: Result of deleting non-global, not dynamically
- // introduced variables is false.
- frame_->Push(FACTORY->false_value());
- }
- } else {
- // Default: Result of deleting expressions is true.
- Load(node->expression()); // may have side-effects
- frame_->SetElementAt(0, FACTORY->true_value());
- }
-
- } else if (op == Token::TYPEOF) {
- // Special case for loading the typeof expression; see comment on
- // LoadTypeofExpression().
- LoadTypeofExpression(node->expression());
- Result answer = frame_->CallRuntime(Runtime::kTypeof, 1);
- frame_->Push(&answer);
-
- } else if (op == Token::VOID) {
- Expression* expression = node->expression();
- if (expression && expression->AsLiteral() && (
- expression->AsLiteral()->IsTrue() ||
- expression->AsLiteral()->IsFalse() ||
- expression->AsLiteral()->handle()->IsNumber() ||
- expression->AsLiteral()->handle()->IsString() ||
- expression->AsLiteral()->handle()->IsJSRegExp() ||
- expression->AsLiteral()->IsNull())) {
- // Omit evaluating the value of the primitive literal.
- // It will be discarded anyway, and can have no side effect.
- frame_->Push(FACTORY->undefined_value());
- } else {
- Load(node->expression());
- frame_->SetElementAt(0, FACTORY->undefined_value());
- }
-
- } else {
- if (in_safe_int32_mode()) {
- Visit(node->expression());
- Result value = frame_->Pop();
- ASSERT(value.is_untagged_int32());
- // Registers containing an int32 value are not multiply used.
- ASSERT(!value.is_register() || !frame_->is_used(value.reg()));
- value.ToRegister();
- switch (op) {
- case Token::SUB: {
- __ neg(value.reg());
- frame_->Push(&value);
- if (node->no_negative_zero()) {
- // -MIN_INT is MIN_INT with the overflow flag set.
- unsafe_bailout_->Branch(overflow);
- } else {
- // MIN_INT and 0 both have bad negations. They both have 31 zeros.
- __ test(value.reg(), Immediate(0x7FFFFFFF));
- unsafe_bailout_->Branch(zero);
- }
- break;
- }
- case Token::BIT_NOT: {
- __ not_(value.reg());
- frame_->Push(&value);
- break;
- }
- case Token::ADD: {
- // Unary plus has no effect on int32 values.
- frame_->Push(&value);
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
- } else {
- Load(node->expression());
- bool can_overwrite = node->expression()->ResultOverwriteAllowed();
- UnaryOverwriteMode overwrite =
- can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
- bool no_negative_zero = node->expression()->no_negative_zero();
- switch (op) {
- case Token::NOT:
- case Token::DELETE:
- case Token::TYPEOF:
- UNREACHABLE(); // handled above
- break;
-
- case Token::SUB: {
- GenericUnaryOpStub stub(
- Token::SUB,
- overwrite,
- NO_UNARY_FLAGS,
- no_negative_zero ? kIgnoreNegativeZero : kStrictNegativeZero);
- Result operand = frame_->Pop();
- Result answer = frame_->CallStub(&stub, &operand);
- answer.set_type_info(TypeInfo::Number());
- frame_->Push(&answer);
- break;
- }
- case Token::BIT_NOT: {
- // Smi check.
- JumpTarget smi_label;
- JumpTarget continue_label;
- Result operand = frame_->Pop();
- TypeInfo operand_info = operand.type_info();
- operand.ToRegister();
- if (operand_info.IsSmi()) {
- if (FLAG_debug_code) __ AbortIfNotSmi(operand.reg());
- frame_->Spill(operand.reg());
- // Set smi tag bit. It will be reset by the not operation.
- __ lea(operand.reg(), Operand(operand.reg(), kSmiTagMask));
- __ not_(operand.reg());
- Result answer = operand;
- answer.set_type_info(TypeInfo::Smi());
- frame_->Push(&answer);
- } else {
- __ test(operand.reg(), Immediate(kSmiTagMask));
- smi_label.Branch(zero, &operand, taken);
-
- GenericUnaryOpStub stub(Token::BIT_NOT,
- overwrite,
- NO_UNARY_SMI_CODE_IN_STUB);
- Result answer = frame_->CallStub(&stub, &operand);
- continue_label.Jump(&answer);
-
- smi_label.Bind(&answer);
- answer.ToRegister();
- frame_->Spill(answer.reg());
- // Set smi tag bit. It will be reset by the not operation.
- __ lea(answer.reg(), Operand(answer.reg(), kSmiTagMask));
- __ not_(answer.reg());
-
- continue_label.Bind(&answer);
- answer.set_type_info(TypeInfo::Integer32());
- frame_->Push(&answer);
- }
- break;
- }
- case Token::ADD: {
- // Smi check.
- JumpTarget continue_label;
- Result operand = frame_->Pop();
- TypeInfo operand_info = operand.type_info();
- operand.ToRegister();
- __ test(operand.reg(), Immediate(kSmiTagMask));
- continue_label.Branch(zero, &operand, taken);
-
- frame_->Push(&operand);
- Result answer = frame_->InvokeBuiltin(Builtins::TO_NUMBER,
- CALL_FUNCTION, 1);
-
- continue_label.Bind(&answer);
- if (operand_info.IsSmi()) {
- answer.set_type_info(TypeInfo::Smi());
- } else if (operand_info.IsInteger32()) {
- answer.set_type_info(TypeInfo::Integer32());
- } else {
- answer.set_type_info(TypeInfo::Number());
- }
- frame_->Push(&answer);
- break;
- }
- default:
- UNREACHABLE();
- }
- }
- }
-}
-
-
-// The value in dst was optimistically incremented or decremented. The
-// result overflowed or was not smi tagged. Undo the operation, call
-// into the runtime to convert the argument to a number, and call the
-// specialized add or subtract stub. The result is left in dst.
-class DeferredPrefixCountOperation: public DeferredCode {
- public:
- DeferredPrefixCountOperation(Register dst,
- bool is_increment,
- TypeInfo input_type)
- : dst_(dst), is_increment_(is_increment), input_type_(input_type) {
- set_comment("[ DeferredCountOperation");
- }
-
- virtual void Generate();
-
- private:
- Register dst_;
- bool is_increment_;
- TypeInfo input_type_;
-};
-
-
-void DeferredPrefixCountOperation::Generate() {
- // Undo the optimistic smi operation.
- if (is_increment_) {
- __ sub(Operand(dst_), Immediate(Smi::FromInt(1)));
- } else {
- __ add(Operand(dst_), Immediate(Smi::FromInt(1)));
- }
- Register left;
- if (input_type_.IsNumber()) {
- left = dst_;
- } else {
- __ push(dst_);
- __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
- left = eax;
- }
-
- GenericBinaryOpStub stub(is_increment_ ? Token::ADD : Token::SUB,
- NO_OVERWRITE,
- NO_GENERIC_BINARY_FLAGS,
- TypeInfo::Number());
- stub.GenerateCall(masm_, left, Smi::FromInt(1));
-
- if (!dst_.is(eax)) __ mov(dst_, eax);
-}
-
-
-// The value in dst was optimistically incremented or decremented. The
-// result overflowed or was not smi tagged. Undo the operation and call
-// into the runtime to convert the argument to a number. Update the
-// original value in old. Call the specialized add or subtract stub.
-// The result is left in dst.
-class DeferredPostfixCountOperation: public DeferredCode {
- public:
- DeferredPostfixCountOperation(Register dst,
- Register old,
- bool is_increment,
- TypeInfo input_type)
- : dst_(dst),
- old_(old),
- is_increment_(is_increment),
- input_type_(input_type) {
- set_comment("[ DeferredCountOperation");
- }
-
- virtual void Generate();
-
- private:
- Register dst_;
- Register old_;
- bool is_increment_;
- TypeInfo input_type_;
-};
-
-
-void DeferredPostfixCountOperation::Generate() {
- // Undo the optimistic smi operation.
- if (is_increment_) {
- __ sub(Operand(dst_), Immediate(Smi::FromInt(1)));
- } else {
- __ add(Operand(dst_), Immediate(Smi::FromInt(1)));
- }
- Register left;
- if (input_type_.IsNumber()) {
- __ push(dst_); // Save the input to use as the old value.
- left = dst_;
- } else {
- __ push(dst_);
- __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
- __ push(eax); // Save the result of ToNumber to use as the old value.
- left = eax;
- }
-
- GenericBinaryOpStub stub(is_increment_ ? Token::ADD : Token::SUB,
- NO_OVERWRITE,
- NO_GENERIC_BINARY_FLAGS,
- TypeInfo::Number());
- stub.GenerateCall(masm_, left, Smi::FromInt(1));
-
- if (!dst_.is(eax)) __ mov(dst_, eax);
- __ pop(old_);
-}
-
-
-void CodeGenerator::VisitCountOperation(CountOperation* node) {
- ASSERT(!in_safe_int32_mode());
- Comment cmnt(masm_, "[ CountOperation");
-
- bool is_postfix = node->is_postfix();
- bool is_increment = node->op() == Token::INC;
-
- Variable* var = node->expression()->AsVariableProxy()->AsVariable();
- bool is_const = (var != NULL && var->mode() == Variable::CONST);
-
- // Postfix operations need a stack slot under the reference to hold
- // the old value while the new value is being stored. This is so that
- // in the case that storing the new value requires a call, the old
- // value will be in the frame to be spilled.
- if (is_postfix) frame_->Push(Smi::FromInt(0));
-
- // A constant reference is not saved to, so a constant reference is not a
- // compound assignment reference.
- { Reference target(this, node->expression(), !is_const);
- if (target.is_illegal()) {
- // Spoof the virtual frame to have the expected height (one higher
- // than on entry).
- if (!is_postfix) frame_->Push(Smi::FromInt(0));
- return;
- }
- target.TakeValue();
-
- Result new_value = frame_->Pop();
- new_value.ToRegister();
-
- Result old_value; // Only allocated in the postfix case.
- if (is_postfix) {
- // Allocate a temporary to preserve the old value.
- old_value = allocator_->Allocate();
- ASSERT(old_value.is_valid());
- __ mov(old_value.reg(), new_value.reg());
-
- // The return value for postfix operations is ToNumber(input).
- // Keep more precise type info if the input is some kind of
- // number already. If the input is not a number we have to wait
- // for the deferred code to convert it.
- if (new_value.type_info().IsNumber()) {
- old_value.set_type_info(new_value.type_info());
- }
- }
-
- // Ensure the new value is writable.
- frame_->Spill(new_value.reg());
-
- Result tmp;
- if (new_value.is_smi()) {
- if (FLAG_debug_code) __ AbortIfNotSmi(new_value.reg());
- } else {
- // We don't know statically if the input is a smi.
- // In order to combine the overflow and the smi tag check, we need
- // to be able to allocate a byte register. We attempt to do so
- // without spilling. If we fail, we will generate separate overflow
- // and smi tag checks.
- // We allocate and clear a temporary byte register before performing
- // the count operation since clearing the register using xor will clear
- // the overflow flag.
- tmp = allocator_->AllocateByteRegisterWithoutSpilling();
- if (tmp.is_valid()) {
- __ Set(tmp.reg(), Immediate(0));
- }
- }
-
- if (is_increment) {
- __ add(Operand(new_value.reg()), Immediate(Smi::FromInt(1)));
- } else {
- __ sub(Operand(new_value.reg()), Immediate(Smi::FromInt(1)));
- }
-
- DeferredCode* deferred = NULL;
- if (is_postfix) {
- deferred = new DeferredPostfixCountOperation(new_value.reg(),
- old_value.reg(),
- is_increment,
- new_value.type_info());
- } else {
- deferred = new DeferredPrefixCountOperation(new_value.reg(),
- is_increment,
- new_value.type_info());
- }
-
- if (new_value.is_smi()) {
- // In case we have a smi as input just check for overflow.
- deferred->Branch(overflow);
- } else {
- // If the count operation didn't overflow and the result is a valid
- // smi, we're done. Otherwise, we jump to the deferred slow-case
- // code.
- // We combine the overflow and the smi tag check if we could
- // successfully allocate a temporary byte register.
- if (tmp.is_valid()) {
- __ setcc(overflow, tmp.reg());
- __ or_(Operand(tmp.reg()), new_value.reg());
- __ test(tmp.reg(), Immediate(kSmiTagMask));
- tmp.Unuse();
- deferred->Branch(not_zero);
- } else {
- // Otherwise we test separately for overflow and smi tag.
- deferred->Branch(overflow);
- __ test(new_value.reg(), Immediate(kSmiTagMask));
- deferred->Branch(not_zero);
- }
- }
- deferred->BindExit();
-
- // Postfix count operations return their input converted to
- // number. The case when the input is already a number is covered
- // above in the allocation code for old_value.
- if (is_postfix && !new_value.type_info().IsNumber()) {
- old_value.set_type_info(TypeInfo::Number());
- }
-
- // The result of ++ or -- is an Integer32 if the
- // input is a smi. Otherwise it is a number.
- if (new_value.is_smi()) {
- new_value.set_type_info(TypeInfo::Integer32());
- } else {
- new_value.set_type_info(TypeInfo::Number());
- }
-
- // Postfix: store the old value in the allocated slot under the
- // reference.
- if (is_postfix) frame_->SetElementAt(target.size(), &old_value);
-
- frame_->Push(&new_value);
- // Non-constant: update the reference.
- if (!is_const) target.SetValue(NOT_CONST_INIT);
- }
-
- // Postfix: drop the new value and use the old.
- if (is_postfix) frame_->Drop();
-}
-
-
-void CodeGenerator::Int32BinaryOperation(BinaryOperation* node) {
- Token::Value op = node->op();
- Comment cmnt(masm_, "[ Int32BinaryOperation");
- ASSERT(in_safe_int32_mode());
- ASSERT(safe_int32_mode_enabled());
- ASSERT(FLAG_safe_int32_compiler);
-
- if (op == Token::COMMA) {
- // Discard left value.
- frame_->Nip(1);
- return;
- }
-
- Result right = frame_->Pop();
- Result left = frame_->Pop();
-
- ASSERT(right.is_untagged_int32());
- ASSERT(left.is_untagged_int32());
- // Registers containing an int32 value are not multiply used.
- ASSERT(!left.is_register() || !frame_->is_used(left.reg()));
- ASSERT(!right.is_register() || !frame_->is_used(right.reg()));
-
- switch (op) {
- case Token::COMMA:
- case Token::OR:
- case Token::AND:
- UNREACHABLE();
- break;
- case Token::BIT_OR:
- case Token::BIT_XOR:
- case Token::BIT_AND:
- if (left.is_constant() || right.is_constant()) {
- int32_t value; // Put constant in value, non-constant in left.
- // Constants are known to be int32 values, from static analysis,
- // or else will be converted to int32 by implicit ECMA [[ToInt32]].
- if (left.is_constant()) {
- ASSERT(left.handle()->IsSmi() || left.handle()->IsHeapNumber());
- value = NumberToInt32(*left.handle());
- left = right;
- } else {
- ASSERT(right.handle()->IsSmi() || right.handle()->IsHeapNumber());
- value = NumberToInt32(*right.handle());
- }
-
- left.ToRegister();
- if (op == Token::BIT_OR) {
- __ or_(Operand(left.reg()), Immediate(value));
- } else if (op == Token::BIT_XOR) {
- __ xor_(Operand(left.reg()), Immediate(value));
- } else {
- ASSERT(op == Token::BIT_AND);
- __ and_(Operand(left.reg()), Immediate(value));
- }
- } else {
- ASSERT(left.is_register());
- ASSERT(right.is_register());
- if (op == Token::BIT_OR) {
- __ or_(left.reg(), Operand(right.reg()));
- } else if (op == Token::BIT_XOR) {
- __ xor_(left.reg(), Operand(right.reg()));
- } else {
- ASSERT(op == Token::BIT_AND);
- __ and_(left.reg(), Operand(right.reg()));
- }
- }
- frame_->Push(&left);
- right.Unuse();
- break;
- case Token::SAR:
- case Token::SHL:
- case Token::SHR: {
- bool test_shr_overflow = false;
- left.ToRegister();
- if (right.is_constant()) {
- ASSERT(right.handle()->IsSmi() || right.handle()->IsHeapNumber());
- int shift_amount = NumberToInt32(*right.handle()) & 0x1F;
- if (op == Token::SAR) {
- __ sar(left.reg(), shift_amount);
- } else if (op == Token::SHL) {
- __ shl(left.reg(), shift_amount);
- } else {
- ASSERT(op == Token::SHR);
- __ shr(left.reg(), shift_amount);
- if (shift_amount == 0) test_shr_overflow = true;
- }
- } else {
- // Move right to ecx
- if (left.is_register() && left.reg().is(ecx)) {
- right.ToRegister();
- __ xchg(left.reg(), right.reg());
- left = right; // Left is unused here, copy of right unused by Push.
- } else {
- right.ToRegister(ecx);
- left.ToRegister();
- }
- if (op == Token::SAR) {
- __ sar_cl(left.reg());
- } else if (op == Token::SHL) {
- __ shl_cl(left.reg());
- } else {
- ASSERT(op == Token::SHR);
- __ shr_cl(left.reg());
- test_shr_overflow = true;
- }
- }
- {
- Register left_reg = left.reg();
- frame_->Push(&left);
- right.Unuse();
- if (test_shr_overflow && !node->to_int32()) {
- // Uint32 results with top bit set are not Int32 values.
- // If they will be forced to Int32, skip the test.
- // Test is needed because shr with shift amount 0 does not set flags.
- __ test(left_reg, Operand(left_reg));
- unsafe_bailout_->Branch(sign);
- }
- }
- break;
- }
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- if ((left.is_constant() && op != Token::SUB) || right.is_constant()) {
- int32_t value; // Put constant in value, non-constant in left.
- if (right.is_constant()) {
- ASSERT(right.handle()->IsSmi() || right.handle()->IsHeapNumber());
- value = NumberToInt32(*right.handle());
- } else {
- ASSERT(left.handle()->IsSmi() || left.handle()->IsHeapNumber());
- value = NumberToInt32(*left.handle());
- left = right;
- }
-
- left.ToRegister();
- if (op == Token::ADD) {
- __ add(Operand(left.reg()), Immediate(value));
- } else if (op == Token::SUB) {
- __ sub(Operand(left.reg()), Immediate(value));
- } else {
- ASSERT(op == Token::MUL);
- __ imul(left.reg(), left.reg(), value);
- }
- } else {
- left.ToRegister();
- ASSERT(left.is_register());
- ASSERT(right.is_register());
- if (op == Token::ADD) {
- __ add(left.reg(), Operand(right.reg()));
- } else if (op == Token::SUB) {
- __ sub(left.reg(), Operand(right.reg()));
- } else {
- ASSERT(op == Token::MUL);
- // We have statically verified that a negative zero can be ignored.
- __ imul(left.reg(), Operand(right.reg()));
- }
- }
- right.Unuse();
- frame_->Push(&left);
- if (!node->to_int32() || op == Token::MUL) {
- // If ToInt32 is called on the result of ADD, SUB, we don't
- // care about overflows.
- // Result of MUL can be non-representable precisely in double so
- // we have to check for overflow.
- unsafe_bailout_->Branch(overflow);
- }
- break;
- case Token::DIV:
- case Token::MOD: {
- if (right.is_register() && (right.reg().is(eax) || right.reg().is(edx))) {
- if (left.is_register() && left.reg().is(edi)) {
- right.ToRegister(ebx);
- } else {
- right.ToRegister(edi);
- }
- }
- left.ToRegister(eax);
- Result edx_reg = allocator_->Allocate(edx);
- right.ToRegister();
- // The results are unused here because BreakTarget::Branch cannot handle
- // live results.
- Register right_reg = right.reg();
- left.Unuse();
- right.Unuse();
- edx_reg.Unuse();
- __ cmp(right_reg, 0);
- // Ensure divisor is positive: no chance of non-int32 or -0 result.
- unsafe_bailout_->Branch(less_equal);
- __ cdq(); // Sign-extend eax into edx:eax
- __ idiv(right_reg);
- if (op == Token::MOD) {
- // Negative zero can arise as a negative divident with a zero result.
- if (!node->no_negative_zero()) {
- Label not_negative_zero;
- __ test(edx, Operand(edx));
- __ j(not_zero, &not_negative_zero);
- __ test(eax, Operand(eax));
- unsafe_bailout_->Branch(negative);
- __ bind(&not_negative_zero);
- }
- Result edx_result(edx, TypeInfo::Integer32());
- edx_result.set_untagged_int32(true);
- frame_->Push(&edx_result);
- } else {
- ASSERT(op == Token::DIV);
- __ test(edx, Operand(edx));
- unsafe_bailout_->Branch(not_equal);
- Result eax_result(eax, TypeInfo::Integer32());
- eax_result.set_untagged_int32(true);
- frame_->Push(&eax_result);
- }
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
-}
-
-
-void CodeGenerator::GenerateLogicalBooleanOperation(BinaryOperation* node) {
- // According to ECMA-262 section 11.11, page 58, the binary logical
- // operators must yield the result of one of the two expressions
- // before any ToBoolean() conversions. This means that the value
- // produced by a && or || operator is not necessarily a boolean.
-
- // NOTE: If the left hand side produces a materialized value (not
- // control flow), we force the right hand side to do the same. This
- // is necessary because we assume that if we get control flow on the
- // last path out of an expression we got it on all paths.
- if (node->op() == Token::AND) {
- ASSERT(!in_safe_int32_mode());
- JumpTarget is_true;
- ControlDestination dest(&is_true, destination()->false_target(), true);
- LoadCondition(node->left(), &dest, false);
-
- if (dest.false_was_fall_through()) {
- // The current false target was used as the fall-through. If
- // there are no dangling jumps to is_true then the left
- // subexpression was unconditionally false. Otherwise we have
- // paths where we do have to evaluate the right subexpression.
- if (is_true.is_linked()) {
- // We need to compile the right subexpression. If the jump to
- // the current false target was a forward jump then we have a
- // valid frame, we have just bound the false target, and we
- // have to jump around the code for the right subexpression.
- if (has_valid_frame()) {
- destination()->false_target()->Unuse();
- destination()->false_target()->Jump();
- }
- is_true.Bind();
- // The left subexpression compiled to control flow, so the
- // right one is free to do so as well.
- LoadCondition(node->right(), destination(), false);
- } else {
- // We have actually just jumped to or bound the current false
- // target but the current control destination is not marked as
- // used.
- destination()->Use(false);
- }
-
- } else if (dest.is_used()) {
- // The left subexpression compiled to control flow (and is_true
- // was just bound), so the right is free to do so as well.
- LoadCondition(node->right(), destination(), false);
-
- } else {
- // We have a materialized value on the frame, so we exit with
- // one on all paths. There are possibly also jumps to is_true
- // from nested subexpressions.
- JumpTarget pop_and_continue;
- JumpTarget exit;
-
- // Avoid popping the result if it converts to 'false' using the
- // standard ToBoolean() conversion as described in ECMA-262,
- // section 9.2, page 30.
- //
- // Duplicate the TOS value. The duplicate will be popped by
- // ToBoolean.
- frame_->Dup();
- ControlDestination dest(&pop_and_continue, &exit, true);
- ToBoolean(&dest);
-
- // Pop the result of evaluating the first part.
- frame_->Drop();
-
- // Compile right side expression.
- is_true.Bind();
- Load(node->right());
-
- // Exit (always with a materialized value).
- exit.Bind();
- }
-
- } else {
- ASSERT(node->op() == Token::OR);
- ASSERT(!in_safe_int32_mode());
- JumpTarget is_false;
- ControlDestination dest(destination()->true_target(), &is_false, false);
- LoadCondition(node->left(), &dest, false);
-
- if (dest.true_was_fall_through()) {
- // The current true target was used as the fall-through. If
- // there are no dangling jumps to is_false then the left
- // subexpression was unconditionally true. Otherwise we have
- // paths where we do have to evaluate the right subexpression.
- if (is_false.is_linked()) {
- // We need to compile the right subexpression. If the jump to
- // the current true target was a forward jump then we have a
- // valid frame, we have just bound the true target, and we
- // have to jump around the code for the right subexpression.
- if (has_valid_frame()) {
- destination()->true_target()->Unuse();
- destination()->true_target()->Jump();
- }
- is_false.Bind();
- // The left subexpression compiled to control flow, so the
- // right one is free to do so as well.
- LoadCondition(node->right(), destination(), false);
- } else {
- // We have just jumped to or bound the current true target but
- // the current control destination is not marked as used.
- destination()->Use(true);
- }
-
- } else if (dest.is_used()) {
- // The left subexpression compiled to control flow (and is_false
- // was just bound), so the right is free to do so as well.
- LoadCondition(node->right(), destination(), false);
-
- } else {
- // We have a materialized value on the frame, so we exit with
- // one on all paths. There are possibly also jumps to is_false
- // from nested subexpressions.
- JumpTarget pop_and_continue;
- JumpTarget exit;
-
- // Avoid popping the result if it converts to 'true' using the
- // standard ToBoolean() conversion as described in ECMA-262,
- // section 9.2, page 30.
- //
- // Duplicate the TOS value. The duplicate will be popped by
- // ToBoolean.
- frame_->Dup();
- ControlDestination dest(&exit, &pop_and_continue, false);
- ToBoolean(&dest);
-
- // Pop the result of evaluating the first part.
- frame_->Drop();
-
- // Compile right side expression.
- is_false.Bind();
- Load(node->right());
-
- // Exit (always with a materialized value).
- exit.Bind();
- }
- }
-}
-
-
-void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
- Comment cmnt(masm_, "[ BinaryOperation");
-
- if (node->op() == Token::AND || node->op() == Token::OR) {
- GenerateLogicalBooleanOperation(node);
- } else if (in_safe_int32_mode()) {
- Visit(node->left());
- Visit(node->right());
- Int32BinaryOperation(node);
- } else {
- // NOTE: The code below assumes that the slow cases (calls to runtime)
- // never return a constant/immutable object.
- OverwriteMode overwrite_mode = NO_OVERWRITE;
- if (node->left()->ResultOverwriteAllowed()) {
- overwrite_mode = OVERWRITE_LEFT;
- } else if (node->right()->ResultOverwriteAllowed()) {
- overwrite_mode = OVERWRITE_RIGHT;
- }
-
- if (node->left()->IsTrivial()) {
- Load(node->right());
- Result right = frame_->Pop();
- frame_->Push(node->left());
- frame_->Push(&right);
- } else {
- Load(node->left());
- Load(node->right());
- }
- GenericBinaryOperation(node, overwrite_mode);
- }
-}
-
-
-void CodeGenerator::VisitThisFunction(ThisFunction* node) {
- ASSERT(!in_safe_int32_mode());
- frame_->PushFunction();
-}
-
-
-void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
- ASSERT(!in_safe_int32_mode());
- Comment cmnt(masm_, "[ CompareOperation");
-
- bool left_already_loaded = false;
-
- // Get the expressions from the node.
- Expression* left = node->left();
- Expression* right = node->right();
- Token::Value op = node->op();
- // To make typeof testing for natives implemented in JavaScript really
- // efficient, we generate special code for expressions of the form:
- // 'typeof <expression> == <string>'.
- UnaryOperation* operation = left->AsUnaryOperation();
- if ((op == Token::EQ || op == Token::EQ_STRICT) &&
- (operation != NULL && operation->op() == Token::TYPEOF) &&
- (right->AsLiteral() != NULL &&
- right->AsLiteral()->handle()->IsString())) {
- Handle<String> check(String::cast(*right->AsLiteral()->handle()));
-
- // Load the operand and move it to a register.
- LoadTypeofExpression(operation->expression());
- Result answer = frame_->Pop();
- answer.ToRegister();
-
- if (check->Equals(HEAP->number_symbol())) {
- __ test(answer.reg(), Immediate(kSmiTagMask));
- destination()->true_target()->Branch(zero);
- frame_->Spill(answer.reg());
- __ mov(answer.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
- __ cmp(answer.reg(), FACTORY->heap_number_map());
- answer.Unuse();
- destination()->Split(equal);
-
- } else if (check->Equals(HEAP->string_symbol())) {
- __ test(answer.reg(), Immediate(kSmiTagMask));
- destination()->false_target()->Branch(zero);
-
- // It can be an undetectable string object.
- Result temp = allocator()->Allocate();
- ASSERT(temp.is_valid());
- __ mov(temp.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
- __ test_b(FieldOperand(temp.reg(), Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
- destination()->false_target()->Branch(not_zero);
- __ CmpInstanceType(temp.reg(), FIRST_NONSTRING_TYPE);
- temp.Unuse();
- answer.Unuse();
- destination()->Split(below);
-
- } else if (check->Equals(HEAP->boolean_symbol())) {
- __ cmp(answer.reg(), FACTORY->true_value());
- destination()->true_target()->Branch(equal);
- __ cmp(answer.reg(), FACTORY->false_value());
- answer.Unuse();
- destination()->Split(equal);
-
- } else if (check->Equals(HEAP->undefined_symbol())) {
- __ cmp(answer.reg(), FACTORY->undefined_value());
- destination()->true_target()->Branch(equal);
-
- __ test(answer.reg(), Immediate(kSmiTagMask));
- destination()->false_target()->Branch(zero);
-
- // It can be an undetectable object.
- frame_->Spill(answer.reg());
- __ mov(answer.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
- __ test_b(FieldOperand(answer.reg(), Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
- answer.Unuse();
- destination()->Split(not_zero);
-
- } else if (check->Equals(HEAP->function_symbol())) {
- __ test(answer.reg(), Immediate(kSmiTagMask));
- destination()->false_target()->Branch(zero);
- frame_->Spill(answer.reg());
- __ CmpObjectType(answer.reg(), JS_FUNCTION_TYPE, answer.reg());
- destination()->true_target()->Branch(equal);
- // Regular expressions are callable so typeof == 'function'.
- __ CmpInstanceType(answer.reg(), JS_REGEXP_TYPE);
- answer.Unuse();
- destination()->Split(equal);
- } else if (check->Equals(HEAP->object_symbol())) {
- __ test(answer.reg(), Immediate(kSmiTagMask));
- destination()->false_target()->Branch(zero);
- __ cmp(answer.reg(), FACTORY->null_value());
- destination()->true_target()->Branch(equal);
-
- Result map = allocator()->Allocate();
- ASSERT(map.is_valid());
- // Regular expressions are typeof == 'function', not 'object'.
- __ CmpObjectType(answer.reg(), JS_REGEXP_TYPE, map.reg());
- destination()->false_target()->Branch(equal);
-
- // It can be an undetectable object.
- __ test_b(FieldOperand(map.reg(), Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
- destination()->false_target()->Branch(not_zero);
- // Do a range test for JSObject type. We can't use
- // MacroAssembler::IsInstanceJSObjectType, because we are using a
- // ControlDestination, so we copy its implementation here.
- __ movzx_b(map.reg(), FieldOperand(map.reg(), Map::kInstanceTypeOffset));
- __ sub(Operand(map.reg()), Immediate(FIRST_JS_OBJECT_TYPE));
- __ cmp(map.reg(), LAST_JS_OBJECT_TYPE - FIRST_JS_OBJECT_TYPE);
- answer.Unuse();
- map.Unuse();
- destination()->Split(below_equal);
- } else {
- // Uncommon case: typeof testing against a string literal that is
- // never returned from the typeof operator.
- answer.Unuse();
- destination()->Goto(false);
- }
- return;
- } else if (op == Token::LT &&
- right->AsLiteral() != NULL &&
- right->AsLiteral()->handle()->IsHeapNumber()) {
- Handle<HeapNumber> check(HeapNumber::cast(*right->AsLiteral()->handle()));
- if (check->value() == 2147483648.0) { // 0x80000000.
- Load(left);
- left_already_loaded = true;
- Result lhs = frame_->Pop();
- lhs.ToRegister();
- __ test(lhs.reg(), Immediate(kSmiTagMask));
- destination()->true_target()->Branch(zero); // All Smis are less.
- Result scratch = allocator()->Allocate();
- ASSERT(scratch.is_valid());
- __ mov(scratch.reg(), FieldOperand(lhs.reg(), HeapObject::kMapOffset));
- __ cmp(scratch.reg(), FACTORY->heap_number_map());
- JumpTarget not_a_number;
- not_a_number.Branch(not_equal, &lhs);
- __ mov(scratch.reg(),
- FieldOperand(lhs.reg(), HeapNumber::kExponentOffset));
- __ cmp(Operand(scratch.reg()), Immediate(0xfff00000));
- not_a_number.Branch(above_equal, &lhs); // It's a negative NaN or -Inf.
- const uint32_t borderline_exponent =
- (HeapNumber::kExponentBias + 31) << HeapNumber::kExponentShift;
- __ cmp(Operand(scratch.reg()), Immediate(borderline_exponent));
- scratch.Unuse();
- lhs.Unuse();
- destination()->true_target()->Branch(less);
- destination()->false_target()->Jump();
-
- not_a_number.Bind(&lhs);
- frame_->Push(&lhs);
- }
- }
-
- Condition cc = no_condition;
- bool strict = false;
- switch (op) {
- case Token::EQ_STRICT:
- strict = true;
- // Fall through
- case Token::EQ:
- cc = equal;
- break;
- case Token::LT:
- cc = less;
- break;
- case Token::GT:
- cc = greater;
- break;
- case Token::LTE:
- cc = less_equal;
- break;
- case Token::GTE:
- cc = greater_equal;
- break;
- case Token::IN: {
- if (!left_already_loaded) Load(left);
- Load(right);
- Result answer = frame_->InvokeBuiltin(Builtins::IN, CALL_FUNCTION, 2);
- frame_->Push(&answer); // push the result
- return;
- }
- case Token::INSTANCEOF: {
- if (!left_already_loaded) Load(left);
- Load(right);
- InstanceofStub stub(InstanceofStub::kNoFlags);
- Result answer = frame_->CallStub(&stub, 2);
- answer.ToRegister();
- __ test(answer.reg(), Operand(answer.reg()));
- answer.Unuse();
- destination()->Split(zero);
- return;
- }
- default:
- UNREACHABLE();
- }
-
- if (left->IsTrivial()) {
- if (!left_already_loaded) {
- Load(right);
- Result right_result = frame_->Pop();
- frame_->Push(left);
- frame_->Push(&right_result);
- } else {
- Load(right);
- }
- } else {
- if (!left_already_loaded) Load(left);
- Load(right);
- }
- Comparison(node, cc, strict, destination());
-}
-
-
-void CodeGenerator::VisitCompareToNull(CompareToNull* node) {
- ASSERT(!in_safe_int32_mode());
- Comment cmnt(masm_, "[ CompareToNull");
-
- Load(node->expression());
- Result operand = frame_->Pop();
- operand.ToRegister();
- __ cmp(operand.reg(), FACTORY->null_value());
- if (node->is_strict()) {
- operand.Unuse();
- destination()->Split(equal);
- } else {
- // The 'null' value is only equal to 'undefined' if using non-strict
- // comparisons.
- destination()->true_target()->Branch(equal);
- __ cmp(operand.reg(), FACTORY->undefined_value());
- destination()->true_target()->Branch(equal);
- __ test(operand.reg(), Immediate(kSmiTagMask));
- destination()->false_target()->Branch(equal);
-
- // It can be an undetectable object.
- // Use a scratch register in preference to spilling operand.reg().
- Result temp = allocator()->Allocate();
- ASSERT(temp.is_valid());
- __ mov(temp.reg(),
- FieldOperand(operand.reg(), HeapObject::kMapOffset));
- __ test_b(FieldOperand(temp.reg(), Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
- temp.Unuse();
- operand.Unuse();
- destination()->Split(not_zero);
- }
-}
-
-
-#ifdef DEBUG
-bool CodeGenerator::HasValidEntryRegisters() {
- return (allocator()->count(eax) == (frame()->is_used(eax) ? 1 : 0))
- && (allocator()->count(ebx) == (frame()->is_used(ebx) ? 1 : 0))
- && (allocator()->count(ecx) == (frame()->is_used(ecx) ? 1 : 0))
- && (allocator()->count(edx) == (frame()->is_used(edx) ? 1 : 0))
- && (allocator()->count(edi) == (frame()->is_used(edi) ? 1 : 0));
-}
-#endif
-
-
-// Emit a LoadIC call to get the value from receiver and leave it in
-// dst.
-class DeferredReferenceGetNamedValue: public DeferredCode {
- public:
- DeferredReferenceGetNamedValue(Register dst,
- Register receiver,
- Handle<String> name,
- bool is_contextual)
- : dst_(dst),
- receiver_(receiver),
- name_(name),
- is_contextual_(is_contextual),
- is_dont_delete_(false) {
- set_comment(is_contextual
- ? "[ DeferredReferenceGetNamedValue (contextual)"
- : "[ DeferredReferenceGetNamedValue");
- }
-
- virtual void Generate();
-
- Label* patch_site() { return &patch_site_; }
-
- void set_is_dont_delete(bool value) {
- ASSERT(is_contextual_);
- is_dont_delete_ = value;
- }
-
- private:
- Label patch_site_;
- Register dst_;
- Register receiver_;
- Handle<String> name_;
- bool is_contextual_;
- bool is_dont_delete_;
-};
-
-
-void DeferredReferenceGetNamedValue::Generate() {
- if (!receiver_.is(eax)) {
- __ mov(eax, receiver_);
- }
- __ Set(ecx, Immediate(name_));
- Handle<Code> ic(masm()->isolate()->builtins()->builtin(
- Builtins::kLoadIC_Initialize));
- RelocInfo::Mode mode = is_contextual_
- ? RelocInfo::CODE_TARGET_CONTEXT
- : RelocInfo::CODE_TARGET;
- __ call(ic, mode);
- // The call must be followed by:
- // - a test eax instruction to indicate that the inobject property
- // case was inlined.
- // - a mov ecx or mov edx instruction to indicate that the
- // contextual property load was inlined.
- //
- // Store the delta to the map check instruction here in the test
- // instruction. Use masm_-> instead of the __ macro since the
- // latter can't return a value.
- int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
- // Here we use masm_-> instead of the __ macro because this is the
- // instruction that gets patched and coverage code gets in the way.
- Counters* counters = masm()->isolate()->counters();
- if (is_contextual_) {
- masm_->mov(is_dont_delete_ ? edx : ecx, -delta_to_patch_site);
- __ IncrementCounter(counters->named_load_global_inline_miss(), 1);
- if (is_dont_delete_) {
- __ IncrementCounter(counters->dont_delete_hint_miss(), 1);
- }
- } else {
- masm_->test(eax, Immediate(-delta_to_patch_site));
- __ IncrementCounter(counters->named_load_inline_miss(), 1);
- }
-
- if (!dst_.is(eax)) __ mov(dst_, eax);
-}
-
-
-class DeferredReferenceGetKeyedValue: public DeferredCode {
- public:
- explicit DeferredReferenceGetKeyedValue(Register dst,
- Register receiver,
- Register key)
- : dst_(dst), receiver_(receiver), key_(key) {
- set_comment("[ DeferredReferenceGetKeyedValue");
- }
-
- virtual void Generate();
-
- Label* patch_site() { return &patch_site_; }
-
- private:
- Label patch_site_;
- Register dst_;
- Register receiver_;
- Register key_;
-};
-
-
-void DeferredReferenceGetKeyedValue::Generate() {
- if (!receiver_.is(eax)) {
- // Register eax is available for key.
- if (!key_.is(eax)) {
- __ mov(eax, key_);
- }
- if (!receiver_.is(edx)) {
- __ mov(edx, receiver_);
- }
- } else if (!key_.is(edx)) {
- // Register edx is available for receiver.
- if (!receiver_.is(edx)) {
- __ mov(edx, receiver_);
- }
- if (!key_.is(eax)) {
- __ mov(eax, key_);
- }
- } else {
- __ xchg(edx, eax);
- }
- // Calculate the delta from the IC call instruction to the map check
- // cmp instruction in the inlined version. This delta is stored in
- // a test(eax, delta) instruction after the call so that we can find
- // it in the IC initialization code and patch the cmp instruction.
- // This means that we cannot allow test instructions after calls to
- // KeyedLoadIC stubs in other places.
- Handle<Code> ic(masm()->isolate()->builtins()->builtin(
- Builtins::kKeyedLoadIC_Initialize));
- __ call(ic, RelocInfo::CODE_TARGET);
- // The delta from the start of the map-compare instruction to the
- // test instruction. We use masm_-> directly here instead of the __
- // macro because the macro sometimes uses macro expansion to turn
- // into something that can't return a value. This is encountered
- // when doing generated code coverage tests.
- int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
- // Here we use masm_-> instead of the __ macro because this is the
- // instruction that gets patched and coverage code gets in the way.
- masm_->test(eax, Immediate(-delta_to_patch_site));
- Counters* counters = masm()->isolate()->counters();
- __ IncrementCounter(counters->keyed_load_inline_miss(), 1);
-
- if (!dst_.is(eax)) __ mov(dst_, eax);
-}
-
-
-class DeferredReferenceSetKeyedValue: public DeferredCode {
- public:
- DeferredReferenceSetKeyedValue(Register value,
- Register key,
- Register receiver,
- Register scratch,
- StrictModeFlag strict_mode)
- : value_(value),
- key_(key),
- receiver_(receiver),
- scratch_(scratch),
- strict_mode_(strict_mode) {
- set_comment("[ DeferredReferenceSetKeyedValue");
- }
-
- virtual void Generate();
-
- Label* patch_site() { return &patch_site_; }
-
- private:
- Register value_;
- Register key_;
- Register receiver_;
- Register scratch_;
- Label patch_site_;
- StrictModeFlag strict_mode_;
-};
-
-
-void DeferredReferenceSetKeyedValue::Generate() {
- Counters* counters = masm()->isolate()->counters();
- __ IncrementCounter(counters->keyed_store_inline_miss(), 1);
- // Move value_ to eax, key_ to ecx, and receiver_ to edx.
- Register old_value = value_;
-
- // First, move value to eax.
- if (!value_.is(eax)) {
- if (key_.is(eax)) {
- // Move key_ out of eax, preferably to ecx.
- if (!value_.is(ecx) && !receiver_.is(ecx)) {
- __ mov(ecx, key_);
- key_ = ecx;
- } else {
- __ mov(scratch_, key_);
- key_ = scratch_;
- }
- }
- if (receiver_.is(eax)) {
- // Move receiver_ out of eax, preferably to edx.
- if (!value_.is(edx) && !key_.is(edx)) {
- __ mov(edx, receiver_);
- receiver_ = edx;
- } else {
- // Both moves to scratch are from eax, also, no valid path hits both.
- __ mov(scratch_, receiver_);
- receiver_ = scratch_;
- }
- }
- __ mov(eax, value_);
- value_ = eax;
- }
-
- // Now value_ is in eax. Move the other two to the right positions.
- // We do not update the variables key_ and receiver_ to ecx and edx.
- if (key_.is(ecx)) {
- if (!receiver_.is(edx)) {
- __ mov(edx, receiver_);
- }
- } else if (key_.is(edx)) {
- if (receiver_.is(ecx)) {
- __ xchg(edx, ecx);
- } else {
- __ mov(ecx, key_);
- if (!receiver_.is(edx)) {
- __ mov(edx, receiver_);
- }
- }
- } else { // Key is not in edx or ecx.
- if (!receiver_.is(edx)) {
- __ mov(edx, receiver_);
- }
- __ mov(ecx, key_);
- }
-
- // Call the IC stub.
- Handle<Code> ic(masm()->isolate()->builtins()->builtin(
- (strict_mode_ == kStrictMode) ? Builtins::kKeyedStoreIC_Initialize_Strict
- : Builtins::kKeyedStoreIC_Initialize));
- __ call(ic, RelocInfo::CODE_TARGET);
- // The delta from the start of the map-compare instruction to the
- // test instruction. We use masm_-> directly here instead of the
- // __ macro because the macro sometimes uses macro expansion to turn
- // into something that can't return a value. This is encountered
- // when doing generated code coverage tests.
- int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
- // Here we use masm_-> instead of the __ macro because this is the
- // instruction that gets patched and coverage code gets in the way.
- masm_->test(eax, Immediate(-delta_to_patch_site));
- // Restore value (returned from store IC) register.
- if (!old_value.is(eax)) __ mov(old_value, eax);
-}
-
-
-Result CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
-#ifdef DEBUG
- int original_height = frame()->height();
-#endif
-
- Isolate* isolate = masm()->isolate();
- Factory* factory = isolate->factory();
- Counters* counters = isolate->counters();
-
- bool contextual_load_in_builtin =
- is_contextual &&
- (isolate->bootstrapper()->IsActive() ||
- (!info_->closure().is_null() && info_->closure()->IsBuiltin()));
-
- Result result;
- // Do not inline in the global code or when not in loop.
- if (scope()->is_global_scope() ||
- loop_nesting() == 0 ||
- contextual_load_in_builtin) {
- Comment cmnt(masm(), "[ Load from named Property");
- frame()->Push(name);
-
- RelocInfo::Mode mode = is_contextual
- ? RelocInfo::CODE_TARGET_CONTEXT
- : RelocInfo::CODE_TARGET;
- result = frame()->CallLoadIC(mode);
- // A test eax instruction following the call signals that the inobject
- // property case was inlined. Ensure that there is not a test eax
- // instruction here.
- __ nop();
- } else {
- // Inline the property load.
- Comment cmnt(masm(), is_contextual
- ? "[ Inlined contextual property load"
- : "[ Inlined named property load");
- Result receiver = frame()->Pop();
- receiver.ToRegister();
-
- result = allocator()->Allocate();
- ASSERT(result.is_valid());
- DeferredReferenceGetNamedValue* deferred =
- new DeferredReferenceGetNamedValue(result.reg(),
- receiver.reg(),
- name,
- is_contextual);
-
- if (!is_contextual) {
- // Check that the receiver is a heap object.
- __ test(receiver.reg(), Immediate(kSmiTagMask));
- deferred->Branch(zero);
- }
-
- __ bind(deferred->patch_site());
- // This is the map check instruction that will be patched (so we can't
- // use the double underscore macro that may insert instructions).
- // Initially use an invalid map to force a failure.
- masm()->cmp(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
- Immediate(factory->null_value()));
- // This branch is always a forwards branch so it's always a fixed size
- // which allows the assert below to succeed and patching to work.
- deferred->Branch(not_equal);
-
- // The delta from the patch label to the actual load must be
- // statically known.
- ASSERT(masm()->SizeOfCodeGeneratedSince(deferred->patch_site()) ==
- LoadIC::kOffsetToLoadInstruction);
-
- if (is_contextual) {
- // Load the (initialy invalid) cell and get its value.
- masm()->mov(result.reg(), factory->null_value());
- if (FLAG_debug_code) {
- __ cmp(FieldOperand(result.reg(), HeapObject::kMapOffset),
- factory->global_property_cell_map());
- __ Assert(equal, "Uninitialized inlined contextual load");
- }
- __ mov(result.reg(),
- FieldOperand(result.reg(), JSGlobalPropertyCell::kValueOffset));
- __ cmp(result.reg(), factory->the_hole_value());
- deferred->Branch(equal);
- bool is_dont_delete = false;
- if (!info_->closure().is_null()) {
- // When doing lazy compilation we can check if the global cell
- // already exists and use its "don't delete" status as a hint.
- AssertNoAllocation no_gc;
- v8::internal::GlobalObject* global_object =
- info_->closure()->context()->global();
- LookupResult lookup;
- global_object->LocalLookupRealNamedProperty(*name, &lookup);
- if (lookup.IsProperty() && lookup.type() == NORMAL) {
- ASSERT(lookup.holder() == global_object);
- ASSERT(global_object->property_dictionary()->ValueAt(
- lookup.GetDictionaryEntry())->IsJSGlobalPropertyCell());
- is_dont_delete = lookup.IsDontDelete();
- }
- }
- deferred->set_is_dont_delete(is_dont_delete);
- if (!is_dont_delete) {
- __ cmp(result.reg(), factory->the_hole_value());
- deferred->Branch(equal);
- } else if (FLAG_debug_code) {
- __ cmp(result.reg(), factory->the_hole_value());
- __ Check(not_equal, "DontDelete cells can't contain the hole");
- }
- __ IncrementCounter(counters->named_load_global_inline(), 1);
- if (is_dont_delete) {
- __ IncrementCounter(counters->dont_delete_hint_hit(), 1);
- }
- } else {
- // The initial (invalid) offset has to be large enough to force a 32-bit
- // instruction encoding to allow patching with an arbitrary offset. Use
- // kMaxInt (minus kHeapObjectTag).
- int offset = kMaxInt;
- masm()->mov(result.reg(), FieldOperand(receiver.reg(), offset));
- __ IncrementCounter(counters->named_load_inline(), 1);
- }
-
- deferred->BindExit();
- }
- ASSERT(frame()->height() == original_height - 1);
- return result;
-}
-
-
-Result CodeGenerator::EmitNamedStore(Handle<String> name, bool is_contextual) {
-#ifdef DEBUG
- int expected_height = frame()->height() - (is_contextual ? 1 : 2);
-#endif
-
- Result result;
- if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) {
- result = frame()->CallStoreIC(name, is_contextual, strict_mode_flag());
- // A test eax instruction following the call signals that the inobject
- // property case was inlined. Ensure that there is not a test eax
- // instruction here.
- __ nop();
- } else {
- // Inline the in-object property case.
- JumpTarget slow, done;
- Label patch_site;
-
- // Get the value and receiver from the stack.
- Result value = frame()->Pop();
- value.ToRegister();
- Result receiver = frame()->Pop();
- receiver.ToRegister();
-
- // Allocate result register.
- result = allocator()->Allocate();
- ASSERT(result.is_valid() && receiver.is_valid() && value.is_valid());
-
- // Check that the receiver is a heap object.
- __ test(receiver.reg(), Immediate(kSmiTagMask));
- slow.Branch(zero, &value, &receiver);
-
- // This is the map check instruction that will be patched (so we can't
- // use the double underscore macro that may insert instructions).
- // Initially use an invalid map to force a failure.
- __ bind(&patch_site);
- masm()->cmp(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
- Immediate(FACTORY->null_value()));
- // This branch is always a forwards branch so it's always a fixed size
- // which allows the assert below to succeed and patching to work.
- slow.Branch(not_equal, &value, &receiver);
-
- // The delta from the patch label to the store offset must be
- // statically known.
- ASSERT(masm()->SizeOfCodeGeneratedSince(&patch_site) ==
- StoreIC::kOffsetToStoreInstruction);
-
- // The initial (invalid) offset has to be large enough to force a 32-bit
- // instruction encoding to allow patching with an arbitrary offset. Use
- // kMaxInt (minus kHeapObjectTag).
- int offset = kMaxInt;
- __ mov(FieldOperand(receiver.reg(), offset), value.reg());
- __ mov(result.reg(), Operand(value.reg()));
-
- // Allocate scratch register for write barrier.
- Result scratch = allocator()->Allocate();
- ASSERT(scratch.is_valid());
-
- // The write barrier clobbers all input registers, so spill the
- // receiver and the value.
- frame_->Spill(receiver.reg());
- frame_->Spill(value.reg());
-
- // If the receiver and the value share a register allocate a new
- // register for the receiver.
- if (receiver.reg().is(value.reg())) {
- receiver = allocator()->Allocate();
- ASSERT(receiver.is_valid());
- __ mov(receiver.reg(), Operand(value.reg()));
- }
-
- // Update the write barrier. To save instructions in the inlined
- // version we do not filter smis.
- Label skip_write_barrier;
- __ InNewSpace(receiver.reg(), value.reg(), equal, &skip_write_barrier);
- int delta_to_record_write = masm_->SizeOfCodeGeneratedSince(&patch_site);
- __ lea(scratch.reg(), Operand(receiver.reg(), offset));
- __ RecordWriteHelper(receiver.reg(), scratch.reg(), value.reg());
- if (FLAG_debug_code) {
- __ mov(receiver.reg(), Immediate(BitCast<int32_t>(kZapValue)));
- __ mov(value.reg(), Immediate(BitCast<int32_t>(kZapValue)));
- __ mov(scratch.reg(), Immediate(BitCast<int32_t>(kZapValue)));
- }
- __ bind(&skip_write_barrier);
- value.Unuse();
- scratch.Unuse();
- receiver.Unuse();
- done.Jump(&result);
-
- slow.Bind(&value, &receiver);
- frame()->Push(&receiver);
- frame()->Push(&value);
- result = frame()->CallStoreIC(name, is_contextual, strict_mode_flag());
- // Encode the offset to the map check instruction and the offset
- // to the write barrier store address computation in a test eax
- // instruction.
- int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(&patch_site);
- __ test(eax,
- Immediate((delta_to_record_write << 16) | delta_to_patch_site));
- done.Bind(&result);
- }
-
- ASSERT_EQ(expected_height, frame()->height());
- return result;
-}
-
-
-Result CodeGenerator::EmitKeyedLoad() {
-#ifdef DEBUG
- int original_height = frame()->height();
-#endif
- Result result;
- // Inline array load code if inside of a loop. We do not know the
- // receiver map yet, so we initially generate the code with a check
- // against an invalid map. In the inline cache code, we patch the map
- // check if appropriate.
- if (loop_nesting() > 0) {
- Comment cmnt(masm_, "[ Inlined load from keyed Property");
-
- // Use a fresh temporary to load the elements without destroying
- // the receiver which is needed for the deferred slow case.
- Result elements = allocator()->Allocate();
- ASSERT(elements.is_valid());
-
- Result key = frame_->Pop();
- Result receiver = frame_->Pop();
- key.ToRegister();
- receiver.ToRegister();
-
- // If key and receiver are shared registers on the frame, their values will
- // be automatically saved and restored when going to deferred code.
- // The result is in elements, which is guaranteed non-shared.
- DeferredReferenceGetKeyedValue* deferred =
- new DeferredReferenceGetKeyedValue(elements.reg(),
- receiver.reg(),
- key.reg());
-
- __ test(receiver.reg(), Immediate(kSmiTagMask));
- deferred->Branch(zero);
-
- // Check that the receiver has the expected map.
- // Initially, use an invalid map. The map is patched in the IC
- // initialization code.
- __ bind(deferred->patch_site());
- // Use masm-> here instead of the double underscore macro since extra
- // coverage code can interfere with the patching.
- masm_->cmp(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
- Immediate(FACTORY->null_value()));
- deferred->Branch(not_equal);
-
- // Check that the key is a smi.
- if (!key.is_smi()) {
- __ test(key.reg(), Immediate(kSmiTagMask));
- deferred->Branch(not_zero);
- } else {
- if (FLAG_debug_code) __ AbortIfNotSmi(key.reg());
- }
-
- // Get the elements array from the receiver.
- __ mov(elements.reg(),
- FieldOperand(receiver.reg(), JSObject::kElementsOffset));
- __ AssertFastElements(elements.reg());
-
- // Check that the key is within bounds.
- __ cmp(key.reg(),
- FieldOperand(elements.reg(), FixedArray::kLengthOffset));
- deferred->Branch(above_equal);
-
- // Load and check that the result is not the hole.
- // Key holds a smi.
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
- __ mov(elements.reg(),
- FieldOperand(elements.reg(),
- key.reg(),
- times_2,
- FixedArray::kHeaderSize));
- result = elements;
- __ cmp(Operand(result.reg()), Immediate(FACTORY->the_hole_value()));
- deferred->Branch(equal);
- __ IncrementCounter(masm_->isolate()->counters()->keyed_load_inline(), 1);
-
- deferred->BindExit();
- } else {
- Comment cmnt(masm_, "[ Load from keyed Property");
- result = frame_->CallKeyedLoadIC(RelocInfo::CODE_TARGET);
- // Make sure that we do not have a test instruction after the
- // call. A test instruction after the call is used to
- // indicate that we have generated an inline version of the
- // keyed load. The explicit nop instruction is here because
- // the push that follows might be peep-hole optimized away.
- __ nop();
- }
- ASSERT(frame()->height() == original_height - 2);
- return result;
-}
-
-
-Result CodeGenerator::EmitKeyedStore(StaticType* key_type) {
-#ifdef DEBUG
- int original_height = frame()->height();
-#endif
- Result result;
- // Generate inlined version of the keyed store if the code is in a loop
- // and the key is likely to be a smi.
- if (loop_nesting() > 0 && key_type->IsLikelySmi()) {
- Comment cmnt(masm(), "[ Inlined store to keyed Property");
-
- // Get the receiver, key and value into registers.
- result = frame()->Pop();
- Result key = frame()->Pop();
- Result receiver = frame()->Pop();
-
- Result tmp = allocator_->Allocate();
- ASSERT(tmp.is_valid());
- Result tmp2 = allocator_->Allocate();
- ASSERT(tmp2.is_valid());
-
- // Determine whether the value is a constant before putting it in a
- // register.
- bool value_is_constant = result.is_constant();
-
- // Make sure that value, key and receiver are in registers.
- result.ToRegister();
- key.ToRegister();
- receiver.ToRegister();
-
- DeferredReferenceSetKeyedValue* deferred =
- new DeferredReferenceSetKeyedValue(result.reg(),
- key.reg(),
- receiver.reg(),
- tmp.reg(),
- strict_mode_flag());
-
- // Check that the receiver is not a smi.
- __ test(receiver.reg(), Immediate(kSmiTagMask));
- deferred->Branch(zero);
-
- // Check that the key is a smi.
- if (!key.is_smi()) {
- __ test(key.reg(), Immediate(kSmiTagMask));
- deferred->Branch(not_zero);
- } else {
- if (FLAG_debug_code) __ AbortIfNotSmi(key.reg());
- }
-
- // Check that the receiver is a JSArray.
- __ CmpObjectType(receiver.reg(), JS_ARRAY_TYPE, tmp.reg());
- deferred->Branch(not_equal);
-
- // Get the elements array from the receiver and check that it is not a
- // dictionary.
- __ mov(tmp.reg(),
- FieldOperand(receiver.reg(), JSArray::kElementsOffset));
-
- // Check whether it is possible to omit the write barrier. If the elements
- // array is in new space or the value written is a smi we can safely update
- // the elements array without write barrier.
- Label in_new_space;
- __ InNewSpace(tmp.reg(), tmp2.reg(), equal, &in_new_space);
- if (!value_is_constant) {
- __ test(result.reg(), Immediate(kSmiTagMask));
- deferred->Branch(not_zero);
- }
-
- __ bind(&in_new_space);
- // Bind the deferred code patch site to be able to locate the fixed
- // array map comparison. When debugging, we patch this comparison to
- // always fail so that we will hit the IC call in the deferred code
- // which will allow the debugger to break for fast case stores.
- __ bind(deferred->patch_site());
- __ cmp(FieldOperand(tmp.reg(), HeapObject::kMapOffset),
- Immediate(FACTORY->fixed_array_map()));
- deferred->Branch(not_equal);
-
- // Check that the key is within bounds. Both the key and the length of
- // the JSArray are smis (because the fixed array check above ensures the
- // elements are in fast case). Use unsigned comparison to handle negative
- // keys.
- __ cmp(key.reg(),
- FieldOperand(receiver.reg(), JSArray::kLengthOffset));
- deferred->Branch(above_equal);
-
- // Store the value.
- __ mov(FixedArrayElementOperand(tmp.reg(), key.reg()), result.reg());
- __ IncrementCounter(masm_->isolate()->counters()->keyed_store_inline(), 1);
-
- deferred->BindExit();
- } else {
- result = frame()->CallKeyedStoreIC(strict_mode_flag());
- // Make sure that we do not have a test instruction after the
- // call. A test instruction after the call is used to
- // indicate that we have generated an inline version of the
- // keyed store.
- __ nop();
- }
- ASSERT(frame()->height() == original_height - 3);
- return result;
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm)
-
-
-Handle<String> Reference::GetName() {
- ASSERT(type_ == NAMED);
- Property* property = expression_->AsProperty();
- if (property == NULL) {
- // Global variable reference treated as a named property reference.
- VariableProxy* proxy = expression_->AsVariableProxy();
- ASSERT(proxy->AsVariable() != NULL);
- ASSERT(proxy->AsVariable()->is_global());
- return proxy->name();
- } else {
- Literal* raw_name = property->key()->AsLiteral();
- ASSERT(raw_name != NULL);
- return Handle<String>::cast(raw_name->handle());
- }
-}
-
-
-void Reference::GetValue() {
- ASSERT(!cgen_->in_spilled_code());
- ASSERT(cgen_->HasValidEntryRegisters());
- ASSERT(!is_illegal());
- MacroAssembler* masm = cgen_->masm();
-
- // Record the source position for the property load.
- Property* property = expression_->AsProperty();
- if (property != NULL) {
- cgen_->CodeForSourcePosition(property->position());
- }
-
- switch (type_) {
- case SLOT: {
- Comment cmnt(masm, "[ Load from Slot");
- Slot* slot = expression_->AsVariableProxy()->AsVariable()->AsSlot();
- ASSERT(slot != NULL);
- cgen_->LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
- if (!persist_after_get_) set_unloaded();
- break;
- }
-
- case NAMED: {
- Variable* var = expression_->AsVariableProxy()->AsVariable();
- bool is_global = var != NULL;
- ASSERT(!is_global || var->is_global());
- if (persist_after_get_) cgen_->frame()->Dup();
- Result result = cgen_->EmitNamedLoad(GetName(), is_global);
- if (!persist_after_get_) set_unloaded();
- cgen_->frame()->Push(&result);
- break;
- }
-
- case KEYED: {
- if (persist_after_get_) {
- cgen_->frame()->PushElementAt(1);
- cgen_->frame()->PushElementAt(1);
- }
- Result value = cgen_->EmitKeyedLoad();
- cgen_->frame()->Push(&value);
- if (!persist_after_get_) set_unloaded();
- break;
- }
-
- default:
- UNREACHABLE();
- }
-}
-
-
-void Reference::TakeValue() {
- // For non-constant frame-allocated slots, we invalidate the value in the
- // slot. For all others, we fall back on GetValue.
- ASSERT(!cgen_->in_spilled_code());
- ASSERT(!is_illegal());
- if (type_ != SLOT) {
- GetValue();
- return;
- }
-
- Slot* slot = expression_->AsVariableProxy()->AsVariable()->AsSlot();
- ASSERT(slot != NULL);
- if (slot->type() == Slot::LOOKUP ||
- slot->type() == Slot::CONTEXT ||
- slot->var()->mode() == Variable::CONST ||
- slot->is_arguments()) {
- GetValue();
- return;
- }
-
- // Only non-constant, frame-allocated parameters and locals can
- // reach here. Be careful not to use the optimizations for arguments
- // object access since it may not have been initialized yet.
- ASSERT(!slot->is_arguments());
- if (slot->type() == Slot::PARAMETER) {
- cgen_->frame()->TakeParameterAt(slot->index());
- } else {
- ASSERT(slot->type() == Slot::LOCAL);
- cgen_->frame()->TakeLocalAt(slot->index());
- }
-
- ASSERT(persist_after_get_);
- // Do not unload the reference, because it is used in SetValue.
-}
-
-
-void Reference::SetValue(InitState init_state) {
- ASSERT(cgen_->HasValidEntryRegisters());
- ASSERT(!is_illegal());
- MacroAssembler* masm = cgen_->masm();
- switch (type_) {
- case SLOT: {
- Comment cmnt(masm, "[ Store to Slot");
- Slot* slot = expression_->AsVariableProxy()->AsVariable()->AsSlot();
- ASSERT(slot != NULL);
- cgen_->StoreToSlot(slot, init_state);
- set_unloaded();
- break;
- }
-
- case NAMED: {
- Comment cmnt(masm, "[ Store to named Property");
- Result answer = cgen_->EmitNamedStore(GetName(), false);
- cgen_->frame()->Push(&answer);
- set_unloaded();
- break;
- }
-
- case KEYED: {
- Comment cmnt(masm, "[ Store to keyed Property");
- Property* property = expression()->AsProperty();
- ASSERT(property != NULL);
-
- Result answer = cgen_->EmitKeyedStore(property->key()->type());
- cgen_->frame()->Push(&answer);
- set_unloaded();
- break;
- }
-
- case UNLOADED:
- case ILLEGAL:
- UNREACHABLE();
- }
-}
-
-
-#undef __
-
-#define __ masm.
-
-
-static void MemCopyWrapper(void* dest, const void* src, size_t size) {
- memcpy(dest, src, size);
-}
-
-
-OS::MemCopyFunction CreateMemCopyFunction() {
- size_t actual_size;
- // Allocate buffer in executable space.
- byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB,
- &actual_size,
- true));
- if (buffer == NULL) return &MemCopyWrapper;
- MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
-
- // Generated code is put into a fixed, unmovable, buffer, and not into
- // the V8 heap. We can't, and don't, refer to any relocatable addresses
- // (e.g. the JavaScript nan-object).
-
- // 32-bit C declaration function calls pass arguments on stack.
-
- // Stack layout:
- // esp[12]: Third argument, size.
- // esp[8]: Second argument, source pointer.
- // esp[4]: First argument, destination pointer.
- // esp[0]: return address
-
- const int kDestinationOffset = 1 * kPointerSize;
- const int kSourceOffset = 2 * kPointerSize;
- const int kSizeOffset = 3 * kPointerSize;
-
- int stack_offset = 0; // Update if we change the stack height.
-
- if (FLAG_debug_code) {
- __ cmp(Operand(esp, kSizeOffset + stack_offset),
- Immediate(OS::kMinComplexMemCopy));
- Label ok;
- __ j(greater_equal, &ok);
- __ int3();
- __ bind(&ok);
- }
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope enable(SSE2);
- __ push(edi);
- __ push(esi);
- stack_offset += 2 * kPointerSize;
- Register dst = edi;
- Register src = esi;
- Register count = ecx;
- __ mov(dst, Operand(esp, stack_offset + kDestinationOffset));
- __ mov(src, Operand(esp, stack_offset + kSourceOffset));
- __ mov(count, Operand(esp, stack_offset + kSizeOffset));
-
-
- __ movdqu(xmm0, Operand(src, 0));
- __ movdqu(Operand(dst, 0), xmm0);
- __ mov(edx, dst);
- __ and_(edx, 0xF);
- __ neg(edx);
- __ add(Operand(edx), Immediate(16));
- __ add(dst, Operand(edx));
- __ add(src, Operand(edx));
- __ sub(Operand(count), edx);
-
- // edi is now aligned. Check if esi is also aligned.
- Label unaligned_source;
- __ test(Operand(src), Immediate(0x0F));
- __ j(not_zero, &unaligned_source);
- {
- // Copy loop for aligned source and destination.
- __ mov(edx, count);
- Register loop_count = ecx;
- Register count = edx;
- __ shr(loop_count, 5);
- {
- // Main copy loop.
- Label loop;
- __ bind(&loop);
- __ prefetch(Operand(src, 0x20), 1);
- __ movdqa(xmm0, Operand(src, 0x00));
- __ movdqa(xmm1, Operand(src, 0x10));
- __ add(Operand(src), Immediate(0x20));
-
- __ movdqa(Operand(dst, 0x00), xmm0);
- __ movdqa(Operand(dst, 0x10), xmm1);
- __ add(Operand(dst), Immediate(0x20));
-
- __ dec(loop_count);
- __ j(not_zero, &loop);
- }
-
- // At most 31 bytes to copy.
- Label move_less_16;
- __ test(Operand(count), Immediate(0x10));
- __ j(zero, &move_less_16);
- __ movdqa(xmm0, Operand(src, 0));
- __ add(Operand(src), Immediate(0x10));
- __ movdqa(Operand(dst, 0), xmm0);
- __ add(Operand(dst), Immediate(0x10));
- __ bind(&move_less_16);
-
- // At most 15 bytes to copy. Copy 16 bytes at end of string.
- __ and_(count, 0xF);
- __ movdqu(xmm0, Operand(src, count, times_1, -0x10));
- __ movdqu(Operand(dst, count, times_1, -0x10), xmm0);
-
- __ mov(eax, Operand(esp, stack_offset + kDestinationOffset));
- __ pop(esi);
- __ pop(edi);
- __ ret(0);
- }
- __ Align(16);
- {
- // Copy loop for unaligned source and aligned destination.
- // If source is not aligned, we can't read it as efficiently.
- __ bind(&unaligned_source);
- __ mov(edx, ecx);
- Register loop_count = ecx;
- Register count = edx;
- __ shr(loop_count, 5);
- {
- // Main copy loop
- Label loop;
- __ bind(&loop);
- __ prefetch(Operand(src, 0x20), 1);
- __ movdqu(xmm0, Operand(src, 0x00));
- __ movdqu(xmm1, Operand(src, 0x10));
- __ add(Operand(src), Immediate(0x20));
-
- __ movdqa(Operand(dst, 0x00), xmm0);
- __ movdqa(Operand(dst, 0x10), xmm1);
- __ add(Operand(dst), Immediate(0x20));
-
- __ dec(loop_count);
- __ j(not_zero, &loop);
- }
-
- // At most 31 bytes to copy.
- Label move_less_16;
- __ test(Operand(count), Immediate(0x10));
- __ j(zero, &move_less_16);
- __ movdqu(xmm0, Operand(src, 0));
- __ add(Operand(src), Immediate(0x10));
- __ movdqa(Operand(dst, 0), xmm0);
- __ add(Operand(dst), Immediate(0x10));
- __ bind(&move_less_16);
-
- // At most 15 bytes to copy. Copy 16 bytes at end of string.
- __ and_(count, 0x0F);
- __ movdqu(xmm0, Operand(src, count, times_1, -0x10));
- __ movdqu(Operand(dst, count, times_1, -0x10), xmm0);
-
- __ mov(eax, Operand(esp, stack_offset + kDestinationOffset));
- __ pop(esi);
- __ pop(edi);
- __ ret(0);
- }
-
- } else {
- // SSE2 not supported. Unlikely to happen in practice.
- __ push(edi);
- __ push(esi);
- stack_offset += 2 * kPointerSize;
- __ cld();
- Register dst = edi;
- Register src = esi;
- Register count = ecx;
- __ mov(dst, Operand(esp, stack_offset + kDestinationOffset));
- __ mov(src, Operand(esp, stack_offset + kSourceOffset));
- __ mov(count, Operand(esp, stack_offset + kSizeOffset));
-
- // Copy the first word.
- __ mov(eax, Operand(src, 0));
- __ mov(Operand(dst, 0), eax);
-
- // Increment src,dstso that dst is aligned.
- __ mov(edx, dst);
- __ and_(edx, 0x03);
- __ neg(edx);
- __ add(Operand(edx), Immediate(4)); // edx = 4 - (dst & 3)
- __ add(dst, Operand(edx));
- __ add(src, Operand(edx));
- __ sub(Operand(count), edx);
- // edi is now aligned, ecx holds number of remaning bytes to copy.
-
- __ mov(edx, count);
- count = edx;
- __ shr(ecx, 2); // Make word count instead of byte count.
- __ rep_movs();
-
- // At most 3 bytes left to copy. Copy 4 bytes at end of string.
- __ and_(count, 3);
- __ mov(eax, Operand(src, count, times_1, -4));
- __ mov(Operand(dst, count, times_1, -4), eax);
-
- __ mov(eax, Operand(esp, stack_offset + kDestinationOffset));
- __ pop(esi);
- __ pop(edi);
- __ ret(0);
- }
-
- CodeDesc desc;
- masm.GetCode(&desc);
- ASSERT(desc.reloc_size == 0);
-
- CPU::FlushICache(buffer, actual_size);
- return FUNCTION_CAST<OS::MemCopyFunction>(buffer);
-}
-
-#undef __
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_IA32
diff --git a/src/3rdparty/v8/src/ia32/codegen-ia32.h b/src/3rdparty/v8/src/ia32/codegen-ia32.h
deleted file mode 100644
index acd651b..0000000
--- a/src/3rdparty/v8/src/ia32/codegen-ia32.h
+++ /dev/null
@@ -1,801 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_IA32_CODEGEN_IA32_H_
-#define V8_IA32_CODEGEN_IA32_H_
-
-#include "ast.h"
-#include "ic-inl.h"
-#include "jump-target-heavy.h"
-
-namespace v8 {
-namespace internal {
-
-// Forward declarations
-class CompilationInfo;
-class DeferredCode;
-class FrameRegisterState;
-class RegisterAllocator;
-class RegisterFile;
-class RuntimeCallHelper;
-
-
-// -------------------------------------------------------------------------
-// Reference support
-
-// A reference is a C++ stack-allocated object that puts a
-// reference on the virtual frame. The reference may be consumed
-// by GetValue, TakeValue and SetValue.
-// When the lifetime (scope) of a valid reference ends, it must have
-// been consumed, and be in state UNLOADED.
-class Reference BASE_EMBEDDED {
- public:
- // The values of the types is important, see size().
- enum Type { UNLOADED = -2, ILLEGAL = -1, SLOT = 0, NAMED = 1, KEYED = 2 };
- Reference(CodeGenerator* cgen,
- Expression* expression,
- bool persist_after_get = false);
- ~Reference();
-
- Expression* expression() const { return expression_; }
- Type type() const { return type_; }
- void set_type(Type value) {
- ASSERT_EQ(ILLEGAL, type_);
- type_ = value;
- }
-
- void set_unloaded() {
- ASSERT_NE(ILLEGAL, type_);
- ASSERT_NE(UNLOADED, type_);
- type_ = UNLOADED;
- }
- // The size the reference takes up on the stack.
- int size() const {
- return (type_ < SLOT) ? 0 : type_;
- }
-
- bool is_illegal() const { return type_ == ILLEGAL; }
- bool is_slot() const { return type_ == SLOT; }
- bool is_property() const { return type_ == NAMED || type_ == KEYED; }
- bool is_unloaded() const { return type_ == UNLOADED; }
-
- // Return the name. Only valid for named property references.
- Handle<String> GetName();
-
- // Generate code to push the value of the reference on top of the
- // expression stack. The reference is expected to be already on top of
- // the expression stack, and it is consumed by the call unless the
- // reference is for a compound assignment.
- // If the reference is not consumed, it is left in place under its value.
- void GetValue();
-
- // Like GetValue except that the slot is expected to be written to before
- // being read from again. The value of the reference may be invalidated,
- // causing subsequent attempts to read it to fail.
- void TakeValue();
-
- // Generate code to store the value on top of the expression stack in the
- // reference. The reference is expected to be immediately below the value
- // on the expression stack. The value is stored in the location specified
- // by the reference, and is left on top of the stack, after the reference
- // is popped from beneath it (unloaded).
- void SetValue(InitState init_state);
-
- private:
- CodeGenerator* cgen_;
- Expression* expression_;
- Type type_;
- // Keep the reference on the stack after get, so it can be used by set later.
- bool persist_after_get_;
-};
-
-
-// -------------------------------------------------------------------------
-// Control destinations.
-
-// A control destination encapsulates a pair of jump targets and a
-// flag indicating which one is the preferred fall-through. The
-// preferred fall-through must be unbound, the other may be already
-// bound (ie, a backward target).
-//
-// The true and false targets may be jumped to unconditionally or
-// control may split conditionally. Unconditional jumping and
-// splitting should be emitted in tail position (as the last thing
-// when compiling an expression) because they can cause either label
-// to be bound or the non-fall through to be jumped to leaving an
-// invalid virtual frame.
-//
-// The labels in the control destination can be extracted and
-// manipulated normally without affecting the state of the
-// destination.
-
-class ControlDestination BASE_EMBEDDED {
- public:
- ControlDestination(JumpTarget* true_target,
- JumpTarget* false_target,
- bool true_is_fall_through)
- : true_target_(true_target),
- false_target_(false_target),
- true_is_fall_through_(true_is_fall_through),
- is_used_(false) {
- ASSERT(true_is_fall_through ? !true_target->is_bound()
- : !false_target->is_bound());
- }
-
- // Accessors for the jump targets. Directly jumping or branching to
- // or binding the targets will not update the destination's state.
- JumpTarget* true_target() const { return true_target_; }
- JumpTarget* false_target() const { return false_target_; }
-
- // True if the the destination has been jumped to unconditionally or
- // control has been split to both targets. This predicate does not
- // test whether the targets have been extracted and manipulated as
- // raw jump targets.
- bool is_used() const { return is_used_; }
-
- // True if the destination is used and the true target (respectively
- // false target) was the fall through. If the target is backward,
- // "fall through" included jumping unconditionally to it.
- bool true_was_fall_through() const {
- return is_used_ && true_is_fall_through_;
- }
-
- bool false_was_fall_through() const {
- return is_used_ && !true_is_fall_through_;
- }
-
- // Emit a branch to one of the true or false targets, and bind the
- // other target. Because this binds the fall-through target, it
- // should be emitted in tail position (as the last thing when
- // compiling an expression).
- void Split(Condition cc) {
- ASSERT(!is_used_);
- if (true_is_fall_through_) {
- false_target_->Branch(NegateCondition(cc));
- true_target_->Bind();
- } else {
- true_target_->Branch(cc);
- false_target_->Bind();
- }
- is_used_ = true;
- }
-
- // Emit an unconditional jump in tail position, to the true target
- // (if the argument is true) or the false target. The "jump" will
- // actually bind the jump target if it is forward, jump to it if it
- // is backward.
- void Goto(bool where) {
- ASSERT(!is_used_);
- JumpTarget* target = where ? true_target_ : false_target_;
- if (target->is_bound()) {
- target->Jump();
- } else {
- target->Bind();
- }
- is_used_ = true;
- true_is_fall_through_ = where;
- }
-
- // Mark this jump target as used as if Goto had been called, but
- // without generating a jump or binding a label (the control effect
- // should have already happened). This is used when the left
- // subexpression of the short-circuit boolean operators are
- // compiled.
- void Use(bool where) {
- ASSERT(!is_used_);
- ASSERT((where ? true_target_ : false_target_)->is_bound());
- is_used_ = true;
- true_is_fall_through_ = where;
- }
-
- // Swap the true and false targets but keep the same actual label as
- // the fall through. This is used when compiling negated
- // expressions, where we want to swap the targets but preserve the
- // state.
- void Invert() {
- JumpTarget* temp_target = true_target_;
- true_target_ = false_target_;
- false_target_ = temp_target;
-
- true_is_fall_through_ = !true_is_fall_through_;
- }
-
- private:
- // True and false jump targets.
- JumpTarget* true_target_;
- JumpTarget* false_target_;
-
- // Before using the destination: true if the true target is the
- // preferred fall through, false if the false target is. After
- // using the destination: true if the true target was actually used
- // as the fall through, false if the false target was.
- bool true_is_fall_through_;
-
- // True if the Split or Goto functions have been called.
- bool is_used_;
-};
-
-
-// -------------------------------------------------------------------------
-// Code generation state
-
-// The state is passed down the AST by the code generator (and back up, in
-// the form of the state of the jump target pair). It is threaded through
-// the call stack. Constructing a state implicitly pushes it on the owning
-// code generator's stack of states, and destroying one implicitly pops it.
-//
-// The code generator state is only used for expressions, so statements have
-// the initial state.
-
-class CodeGenState BASE_EMBEDDED {
- public:
- // Create an initial code generator state. Destroying the initial state
- // leaves the code generator with a NULL state.
- explicit CodeGenState(CodeGenerator* owner);
-
- // Create a code generator state based on a code generator's current
- // state. The new state has its own control destination.
- CodeGenState(CodeGenerator* owner, ControlDestination* destination);
-
- // Destroy a code generator state and restore the owning code generator's
- // previous state.
- ~CodeGenState();
-
- // Accessors for the state.
- ControlDestination* destination() const { return destination_; }
-
- private:
- // The owning code generator.
- CodeGenerator* owner_;
-
- // A control destination in case the expression has a control-flow
- // effect.
- ControlDestination* destination_;
-
- // The previous state of the owning code generator, restored when
- // this state is destroyed.
- CodeGenState* previous_;
-};
-
-
-// -------------------------------------------------------------------------
-// Arguments allocation mode.
-
-enum ArgumentsAllocationMode {
- NO_ARGUMENTS_ALLOCATION,
- EAGER_ARGUMENTS_ALLOCATION,
- LAZY_ARGUMENTS_ALLOCATION
-};
-
-
-// -------------------------------------------------------------------------
-// CodeGenerator
-
-class CodeGenerator: public AstVisitor {
- public:
- static bool MakeCode(CompilationInfo* info);
-
- // Printing of AST, etc. as requested by flags.
- static void MakeCodePrologue(CompilationInfo* info);
-
- // Allocate and install the code.
- static Handle<Code> MakeCodeEpilogue(MacroAssembler* masm,
- Code::Flags flags,
- CompilationInfo* info);
-
- // Print the code after compiling it.
- static void PrintCode(Handle<Code> code, CompilationInfo* info);
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
- static bool ShouldGenerateLog(Expression* type);
-#endif
-
- static bool RecordPositions(MacroAssembler* masm,
- int pos,
- bool right_here = false);
-
- // Accessors
- MacroAssembler* masm() { return masm_; }
- VirtualFrame* frame() const { return frame_; }
- inline Handle<Script> script();
-
- bool has_valid_frame() const { return frame_ != NULL; }
-
- // Set the virtual frame to be new_frame, with non-frame register
- // reference counts given by non_frame_registers. The non-frame
- // register reference counts of the old frame are returned in
- // non_frame_registers.
- void SetFrame(VirtualFrame* new_frame, RegisterFile* non_frame_registers);
-
- void DeleteFrame();
-
- RegisterAllocator* allocator() const { return allocator_; }
-
- CodeGenState* state() { return state_; }
- void set_state(CodeGenState* state) { state_ = state; }
-
- void AddDeferred(DeferredCode* code) { deferred_.Add(code); }
-
- bool in_spilled_code() const { return in_spilled_code_; }
- void set_in_spilled_code(bool flag) { in_spilled_code_ = flag; }
-
- // Return a position of the element at |index_as_smi| + |additional_offset|
- // in FixedArray pointer to which is held in |array|. |index_as_smi| is Smi.
- static Operand FixedArrayElementOperand(Register array,
- Register index_as_smi,
- int additional_offset = 0) {
- int offset = FixedArray::kHeaderSize + additional_offset * kPointerSize;
- return FieldOperand(array, index_as_smi, times_half_pointer_size, offset);
- }
-
- private:
- // Type of a member function that generates inline code for a native function.
- typedef void (CodeGenerator::*InlineFunctionGenerator)
- (ZoneList<Expression*>*);
-
- static const InlineFunctionGenerator kInlineFunctionGenerators[];
-
- // Construction/Destruction
- explicit CodeGenerator(MacroAssembler* masm);
-
- // Accessors
- inline bool is_eval();
- inline Scope* scope();
- inline bool is_strict_mode();
- inline StrictModeFlag strict_mode_flag();
-
- // Generating deferred code.
- void ProcessDeferred();
-
- // State
- ControlDestination* destination() const { return state_->destination(); }
-
- // Control of side-effect-free int32 expression compilation.
- bool in_safe_int32_mode() { return in_safe_int32_mode_; }
- void set_in_safe_int32_mode(bool value) { in_safe_int32_mode_ = value; }
- bool safe_int32_mode_enabled() {
- return FLAG_safe_int32_compiler && safe_int32_mode_enabled_;
- }
- void set_safe_int32_mode_enabled(bool value) {
- safe_int32_mode_enabled_ = value;
- }
- void set_unsafe_bailout(BreakTarget* unsafe_bailout) {
- unsafe_bailout_ = unsafe_bailout;
- }
-
- // Take the Result that is an untagged int32, and convert it to a tagged
- // Smi or HeapNumber. Remove the untagged_int32 flag from the result.
- void ConvertInt32ResultToNumber(Result* value);
- void ConvertInt32ResultToSmi(Result* value);
-
- // Track loop nesting level.
- int loop_nesting() const { return loop_nesting_; }
- void IncrementLoopNesting() { loop_nesting_++; }
- void DecrementLoopNesting() { loop_nesting_--; }
-
- // Node visitors.
- void VisitStatements(ZoneList<Statement*>* statements);
-
- virtual void VisitSlot(Slot* node);
-#define DEF_VISIT(type) \
- virtual void Visit##type(type* node);
- AST_NODE_LIST(DEF_VISIT)
-#undef DEF_VISIT
-
- // Visit a statement and then spill the virtual frame if control flow can
- // reach the end of the statement (ie, it does not exit via break,
- // continue, return, or throw). This function is used temporarily while
- // the code generator is being transformed.
- void VisitAndSpill(Statement* statement);
-
- // Visit a list of statements and then spill the virtual frame if control
- // flow can reach the end of the list.
- void VisitStatementsAndSpill(ZoneList<Statement*>* statements);
-
- // Main code generation function
- void Generate(CompilationInfo* info);
-
- // Generate the return sequence code. Should be called no more than
- // once per compiled function, immediately after binding the return
- // target (which can not be done more than once).
- void GenerateReturnSequence(Result* return_value);
-
- // Returns the arguments allocation mode.
- ArgumentsAllocationMode ArgumentsMode();
-
- // Store the arguments object and allocate it if necessary.
- Result StoreArgumentsObject(bool initial);
-
- // The following are used by class Reference.
- void LoadReference(Reference* ref);
-
- Operand SlotOperand(Slot* slot, Register tmp);
-
- Operand ContextSlotOperandCheckExtensions(Slot* slot,
- Result tmp,
- JumpTarget* slow);
-
- // Expressions
- void LoadCondition(Expression* expr,
- ControlDestination* destination,
- bool force_control);
- void Load(Expression* expr);
- void LoadGlobal();
- void LoadGlobalReceiver();
-
- // Generate code to push the value of an expression on top of the frame
- // and then spill the frame fully to memory. This function is used
- // temporarily while the code generator is being transformed.
- void LoadAndSpill(Expression* expression);
-
- // Evaluate an expression and place its value on top of the frame,
- // using, or not using, the side-effect-free expression compiler.
- void LoadInSafeInt32Mode(Expression* expr, BreakTarget* unsafe_bailout);
- void LoadWithSafeInt32ModeDisabled(Expression* expr);
-
- // Read a value from a slot and leave it on top of the expression stack.
- void LoadFromSlot(Slot* slot, TypeofState typeof_state);
- void LoadFromSlotCheckForArguments(Slot* slot, TypeofState typeof_state);
- Result LoadFromGlobalSlotCheckExtensions(Slot* slot,
- TypeofState typeof_state,
- JumpTarget* slow);
-
- // Support for loading from local/global variables and arguments
- // whose location is known unless they are shadowed by
- // eval-introduced bindings. Generates no code for unsupported slot
- // types and therefore expects to fall through to the slow jump target.
- void EmitDynamicLoadFromSlotFastCase(Slot* slot,
- TypeofState typeof_state,
- Result* result,
- JumpTarget* slow,
- JumpTarget* done);
-
- // Store the value on top of the expression stack into a slot, leaving the
- // value in place.
- void StoreToSlot(Slot* slot, InitState init_state);
-
- // Support for compiling assignment expressions.
- void EmitSlotAssignment(Assignment* node);
- void EmitNamedPropertyAssignment(Assignment* node);
- void EmitKeyedPropertyAssignment(Assignment* node);
-
- // Receiver is passed on the frame and consumed.
- Result EmitNamedLoad(Handle<String> name, bool is_contextual);
-
- // If the store is contextual, value is passed on the frame and consumed.
- // Otherwise, receiver and value are passed on the frame and consumed.
- Result EmitNamedStore(Handle<String> name, bool is_contextual);
-
- // Receiver and key are passed on the frame and consumed.
- Result EmitKeyedLoad();
-
- // Receiver, key, and value are passed on the frame and consumed.
- Result EmitKeyedStore(StaticType* key_type);
-
- // Special code for typeof expressions: Unfortunately, we must
- // be careful when loading the expression in 'typeof'
- // expressions. We are not allowed to throw reference errors for
- // non-existing properties of the global object, so we must make it
- // look like an explicit property access, instead of an access
- // through the context chain.
- void LoadTypeofExpression(Expression* x);
-
- // Translate the value on top of the frame into control flow to the
- // control destination.
- void ToBoolean(ControlDestination* destination);
-
- // Generate code that computes a shortcutting logical operation.
- void GenerateLogicalBooleanOperation(BinaryOperation* node);
-
- void GenericBinaryOperation(BinaryOperation* expr,
- OverwriteMode overwrite_mode);
-
- // Emits code sequence that jumps to a JumpTarget if the inputs
- // are both smis. Cannot be in MacroAssembler because it takes
- // advantage of TypeInfo to skip unneeded checks.
- // Allocates a temporary register, possibly spilling from the frame,
- // if it needs to check both left and right.
- void JumpIfBothSmiUsingTypeInfo(Result* left,
- Result* right,
- JumpTarget* both_smi);
-
- // Emits code sequence that jumps to deferred code if the inputs
- // are not both smis. Cannot be in MacroAssembler because it takes
- // a deferred code object.
- void JumpIfNotBothSmiUsingTypeInfo(Register left,
- Register right,
- Register scratch,
- TypeInfo left_info,
- TypeInfo right_info,
- DeferredCode* deferred);
-
- // Emits code sequence that jumps to the label if the inputs
- // are not both smis.
- void JumpIfNotBothSmiUsingTypeInfo(Register left,
- Register right,
- Register scratch,
- TypeInfo left_info,
- TypeInfo right_info,
- Label* on_non_smi);
-
- // If possible, combine two constant smi values using op to produce
- // a smi result, and push it on the virtual frame, all at compile time.
- // Returns true if it succeeds. Otherwise it has no effect.
- bool FoldConstantSmis(Token::Value op, int left, int right);
-
- // Emit code to perform a binary operation on a constant
- // smi and a likely smi. Consumes the Result operand.
- Result ConstantSmiBinaryOperation(BinaryOperation* expr,
- Result* operand,
- Handle<Object> constant_operand,
- bool reversed,
- OverwriteMode overwrite_mode);
-
- // Emit code to perform a binary operation on two likely smis.
- // The code to handle smi arguments is produced inline.
- // Consumes the Results left and right.
- Result LikelySmiBinaryOperation(BinaryOperation* expr,
- Result* left,
- Result* right,
- OverwriteMode overwrite_mode);
-
-
- // Emit code to perform a binary operation on two untagged int32 values.
- // The values are on top of the frame, and the result is pushed on the frame.
- void Int32BinaryOperation(BinaryOperation* node);
-
-
- // Generate a stub call from the virtual frame.
- Result GenerateGenericBinaryOpStubCall(GenericBinaryOpStub* stub,
- Result* left,
- Result* right);
-
- void Comparison(AstNode* node,
- Condition cc,
- bool strict,
- ControlDestination* destination);
-
- // If at least one of the sides is a constant smi, generate optimized code.
- void ConstantSmiComparison(Condition cc,
- bool strict,
- ControlDestination* destination,
- Result* left_side,
- Result* right_side,
- bool left_side_constant_smi,
- bool right_side_constant_smi,
- bool is_loop_condition);
-
- void GenerateInlineNumberComparison(Result* left_side,
- Result* right_side,
- Condition cc,
- ControlDestination* dest);
-
- // To prevent long attacker-controlled byte sequences, integer constants
- // from the JavaScript source are loaded in two parts if they are larger
- // than 17 bits.
- static const int kMaxSmiInlinedBits = 17;
- bool IsUnsafeSmi(Handle<Object> value);
- // Load an integer constant x into a register target or into the stack using
- // at most 16 bits of user-controlled data per assembly operation.
- void MoveUnsafeSmi(Register target, Handle<Object> value);
- void StoreUnsafeSmiToLocal(int offset, Handle<Object> value);
- void PushUnsafeSmi(Handle<Object> value);
-
- void CallWithArguments(ZoneList<Expression*>* arguments,
- CallFunctionFlags flags,
- int position);
-
- // An optimized implementation of expressions of the form
- // x.apply(y, arguments). We call x the applicand and y the receiver.
- // The optimization avoids allocating an arguments object if possible.
- void CallApplyLazy(Expression* applicand,
- Expression* receiver,
- VariableProxy* arguments,
- int position);
-
- void CheckStack();
-
- bool CheckForInlineRuntimeCall(CallRuntime* node);
-
- void ProcessDeclarations(ZoneList<Declaration*>* declarations);
-
- // Declare global variables and functions in the given array of
- // name/value pairs.
- void DeclareGlobals(Handle<FixedArray> pairs);
-
- // Instantiate the function based on the shared function info.
- Result InstantiateFunction(Handle<SharedFunctionInfo> function_info,
- bool pretenure);
-
- // Support for types.
- void GenerateIsSmi(ZoneList<Expression*>* args);
- void GenerateIsNonNegativeSmi(ZoneList<Expression*>* args);
- void GenerateIsArray(ZoneList<Expression*>* args);
- void GenerateIsRegExp(ZoneList<Expression*>* args);
- void GenerateIsObject(ZoneList<Expression*>* args);
- void GenerateIsSpecObject(ZoneList<Expression*>* args);
- void GenerateIsFunction(ZoneList<Expression*>* args);
- void GenerateIsUndetectableObject(ZoneList<Expression*>* args);
- void GenerateIsStringWrapperSafeForDefaultValueOf(
- ZoneList<Expression*>* args);
-
- // Support for construct call checks.
- void GenerateIsConstructCall(ZoneList<Expression*>* args);
-
- // Support for arguments.length and arguments[?].
- void GenerateArgumentsLength(ZoneList<Expression*>* args);
- void GenerateArguments(ZoneList<Expression*>* args);
-
- // Support for accessing the class and value fields of an object.
- void GenerateClassOf(ZoneList<Expression*>* args);
- void GenerateValueOf(ZoneList<Expression*>* args);
- void GenerateSetValueOf(ZoneList<Expression*>* args);
-
- // Fast support for charCodeAt(n).
- void GenerateStringCharCodeAt(ZoneList<Expression*>* args);
-
- // Fast support for string.charAt(n) and string[n].
- void GenerateStringCharFromCode(ZoneList<Expression*>* args);
-
- // Fast support for string.charAt(n) and string[n].
- void GenerateStringCharAt(ZoneList<Expression*>* args);
-
- // Fast support for object equality testing.
- void GenerateObjectEquals(ZoneList<Expression*>* args);
-
- void GenerateLog(ZoneList<Expression*>* args);
-
- void GenerateGetFramePointer(ZoneList<Expression*>* args);
-
- // Fast support for Math.random().
- void GenerateRandomHeapNumber(ZoneList<Expression*>* args);
-
- // Fast support for StringAdd.
- void GenerateStringAdd(ZoneList<Expression*>* args);
-
- // Fast support for SubString.
- void GenerateSubString(ZoneList<Expression*>* args);
-
- // Fast support for StringCompare.
- void GenerateStringCompare(ZoneList<Expression*>* args);
-
- // Support for direct calls from JavaScript to native RegExp code.
- void GenerateRegExpExec(ZoneList<Expression*>* args);
-
- // Construct a RegExp exec result with two in-object properties.
- void GenerateRegExpConstructResult(ZoneList<Expression*>* args);
-
- // Support for fast native caches.
- void GenerateGetFromCache(ZoneList<Expression*>* args);
-
- // Fast support for number to string.
- void GenerateNumberToString(ZoneList<Expression*>* args);
-
- // Fast swapping of elements. Takes three expressions, the object and two
- // indices. This should only be used if the indices are known to be
- // non-negative and within bounds of the elements array at the call site.
- void GenerateSwapElements(ZoneList<Expression*>* args);
-
- // Fast call for custom callbacks.
- void GenerateCallFunction(ZoneList<Expression*>* args);
-
- // Fast call to math functions.
- void GenerateMathPow(ZoneList<Expression*>* args);
- void GenerateMathSin(ZoneList<Expression*>* args);
- void GenerateMathCos(ZoneList<Expression*>* args);
- void GenerateMathSqrt(ZoneList<Expression*>* args);
- void GenerateMathLog(ZoneList<Expression*>* args);
-
- // Check whether two RegExps are equivalent.
- void GenerateIsRegExpEquivalent(ZoneList<Expression*>* args);
-
- void GenerateHasCachedArrayIndex(ZoneList<Expression*>* args);
- void GenerateGetCachedArrayIndex(ZoneList<Expression*>* args);
- void GenerateFastAsciiArrayJoin(ZoneList<Expression*>* args);
-
- // Simple condition analysis.
- enum ConditionAnalysis {
- ALWAYS_TRUE,
- ALWAYS_FALSE,
- DONT_KNOW
- };
- ConditionAnalysis AnalyzeCondition(Expression* cond);
-
- // Methods used to indicate which source code is generated for. Source
- // positions are collected by the assembler and emitted with the relocation
- // information.
- void CodeForFunctionPosition(FunctionLiteral* fun);
- void CodeForReturnPosition(FunctionLiteral* fun);
- void CodeForStatementPosition(Statement* stmt);
- void CodeForDoWhileConditionPosition(DoWhileStatement* stmt);
- void CodeForSourcePosition(int pos);
-
- void SetTypeForStackSlot(Slot* slot, TypeInfo info);
-
-#ifdef DEBUG
- // True if the registers are valid for entry to a block. There should
- // be no frame-external references to (non-reserved) registers.
- bool HasValidEntryRegisters();
-#endif
-
- ZoneList<DeferredCode*> deferred_;
-
- // Assembler
- MacroAssembler* masm_; // to generate code
-
- CompilationInfo* info_;
-
- // Code generation state
- VirtualFrame* frame_;
- RegisterAllocator* allocator_;
- CodeGenState* state_;
- int loop_nesting_;
- bool in_safe_int32_mode_;
- bool safe_int32_mode_enabled_;
-
- // Jump targets.
- // The target of the return from the function.
- BreakTarget function_return_;
- // The target of the bailout from a side-effect-free int32 subexpression.
- BreakTarget* unsafe_bailout_;
-
- // True if the function return is shadowed (ie, jumping to the target
- // function_return_ does not jump to the true function return, but rather
- // to some unlinking code).
- bool function_return_is_shadowed_;
-
- // True when we are in code that expects the virtual frame to be fully
- // spilled. Some virtual frame function are disabled in DEBUG builds when
- // called from spilled code, because they do not leave the virtual frame
- // in a spilled state.
- bool in_spilled_code_;
-
- // A cookie that is used for JIT IMM32 Encoding. Initialized to a
- // random number when the command-line
- // FLAG_mask_constants_with_cookie is true, zero otherwise.
- int jit_cookie_;
-
- friend class VirtualFrame;
- friend class Isolate;
- friend class JumpTarget;
- friend class Reference;
- friend class Result;
- friend class FastCodeGenerator;
- friend class FullCodeGenerator;
- friend class FullCodeGenSyntaxChecker;
- friend class LCodeGen;
-
- friend class CodeGeneratorPatcher; // Used in test-log-stack-tracer.cc
- friend class InlineRuntimeFunctionsTable;
-
- DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_IA32_CODEGEN_IA32_H_
diff --git a/src/3rdparty/v8/src/ia32/cpu-ia32.cc b/src/3rdparty/v8/src/ia32/cpu-ia32.cc
deleted file mode 100644
index 615dbfe..0000000
--- a/src/3rdparty/v8/src/ia32/cpu-ia32.cc
+++ /dev/null
@@ -1,88 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// CPU specific code for ia32 independent of OS goes here.
-
-#ifdef __GNUC__
-#include "third_party/valgrind/valgrind.h"
-#endif
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_IA32)
-
-#include "cpu.h"
-#include "macro-assembler.h"
-
-namespace v8 {
-namespace internal {
-
-void CPU::Setup() {
- CpuFeatures::Probe();
-}
-
-
-bool CPU::SupportsCrankshaft() {
- return CpuFeatures::IsSupported(SSE2);
-}
-
-
-void CPU::FlushICache(void* start, size_t size) {
- // No need to flush the instruction cache on Intel. On Intel instruction
- // cache flushing is only necessary when multiple cores running the same
- // code simultaneously. V8 (and JavaScript) is single threaded and when code
- // is patched on an intel CPU the core performing the patching will have its
- // own instruction cache updated automatically.
-
- // If flushing of the instruction cache becomes necessary Windows has the
- // API function FlushInstructionCache.
-
- // By default, valgrind only checks the stack for writes that might need to
- // invalidate already cached translated code. This leads to random
- // instability when code patches or moves are sometimes unnoticed. One
- // solution is to run valgrind with --smc-check=all, but this comes at a big
- // performance cost. We can notify valgrind to invalidate its cache.
-#ifdef VALGRIND_DISCARD_TRANSLATIONS
- VALGRIND_DISCARD_TRANSLATIONS(start, size);
-#endif
-}
-
-
-void CPU::DebugBreak() {
-#ifdef _MSC_VER
- // To avoid Visual Studio runtime support the following code can be used
- // instead
- // __asm { int 3 }
- __debugbreak();
-#else
- asm("int $3");
-#endif
-}
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_IA32
diff --git a/src/3rdparty/v8/src/ia32/debug-ia32.cc b/src/3rdparty/v8/src/ia32/debug-ia32.cc
deleted file mode 100644
index 33c5251..0000000
--- a/src/3rdparty/v8/src/ia32/debug-ia32.cc
+++ /dev/null
@@ -1,312 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_IA32)
-
-#include "codegen-inl.h"
-#include "debug.h"
-
-
-namespace v8 {
-namespace internal {
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-
-bool BreakLocationIterator::IsDebugBreakAtReturn() {
- return Debug::IsDebugBreakAtReturn(rinfo());
-}
-
-
-// Patch the JS frame exit code with a debug break call. See
-// CodeGenerator::VisitReturnStatement and VirtualFrame::Exit in codegen-ia32.cc
-// for the precise return instructions sequence.
-void BreakLocationIterator::SetDebugBreakAtReturn() {
- ASSERT(Assembler::kJSReturnSequenceLength >=
- Assembler::kCallInstructionLength);
- Isolate* isolate = Isolate::Current();
- rinfo()->PatchCodeWithCall(isolate->debug()->debug_break_return()->entry(),
- Assembler::kJSReturnSequenceLength - Assembler::kCallInstructionLength);
-}
-
-
-// Restore the JS frame exit code.
-void BreakLocationIterator::ClearDebugBreakAtReturn() {
- rinfo()->PatchCode(original_rinfo()->pc(),
- Assembler::kJSReturnSequenceLength);
-}
-
-
-// A debug break in the frame exit code is identified by the JS frame exit code
-// having been patched with a call instruction.
-bool Debug::IsDebugBreakAtReturn(RelocInfo* rinfo) {
- ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()));
- return rinfo->IsPatchedReturnSequence();
-}
-
-
-bool BreakLocationIterator::IsDebugBreakAtSlot() {
- ASSERT(IsDebugBreakSlot());
- // Check whether the debug break slot instructions have been patched.
- return rinfo()->IsPatchedDebugBreakSlotSequence();
-}
-
-
-void BreakLocationIterator::SetDebugBreakAtSlot() {
- ASSERT(IsDebugBreakSlot());
- Isolate* isolate = Isolate::Current();
- rinfo()->PatchCodeWithCall(
- isolate->debug()->debug_break_slot()->entry(),
- Assembler::kDebugBreakSlotLength - Assembler::kCallInstructionLength);
-}
-
-
-void BreakLocationIterator::ClearDebugBreakAtSlot() {
- ASSERT(IsDebugBreakSlot());
- rinfo()->PatchCode(original_rinfo()->pc(), Assembler::kDebugBreakSlotLength);
-}
-
-
-#define __ ACCESS_MASM(masm)
-
-
-static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
- RegList object_regs,
- RegList non_object_regs,
- bool convert_call_to_jmp) {
- // Enter an internal frame.
- __ EnterInternalFrame();
-
- // Store the registers containing live values on the expression stack to
- // make sure that these are correctly updated during GC. Non object values
- // are stored as a smi causing it to be untouched by GC.
- ASSERT((object_regs & ~kJSCallerSaved) == 0);
- ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
- ASSERT((object_regs & non_object_regs) == 0);
- for (int i = 0; i < kNumJSCallerSaved; i++) {
- int r = JSCallerSavedCode(i);
- Register reg = { r };
- if ((object_regs & (1 << r)) != 0) {
- __ push(reg);
- }
- if ((non_object_regs & (1 << r)) != 0) {
- if (FLAG_debug_code) {
- __ test(reg, Immediate(0xc0000000));
- __ Assert(zero, "Unable to encode value as smi");
- }
- __ SmiTag(reg);
- __ push(reg);
- }
- }
-
-#ifdef DEBUG
- __ RecordComment("// Calling from debug break to runtime - come in - over");
-#endif
- __ Set(eax, Immediate(0)); // No arguments.
- __ mov(ebx, Immediate(ExternalReference::debug_break(masm->isolate())));
-
- CEntryStub ceb(1);
- __ CallStub(&ceb);
-
- // Restore the register values containing object pointers from the expression
- // stack.
- for (int i = kNumJSCallerSaved; --i >= 0;) {
- int r = JSCallerSavedCode(i);
- Register reg = { r };
- if (FLAG_debug_code) {
- __ Set(reg, Immediate(kDebugZapValue));
- }
- if ((object_regs & (1 << r)) != 0) {
- __ pop(reg);
- }
- if ((non_object_regs & (1 << r)) != 0) {
- __ pop(reg);
- __ SmiUntag(reg);
- }
- }
-
- // Get rid of the internal frame.
- __ LeaveInternalFrame();
-
- // If this call did not replace a call but patched other code then there will
- // be an unwanted return address left on the stack. Here we get rid of that.
- if (convert_call_to_jmp) {
- __ add(Operand(esp), Immediate(kPointerSize));
- }
-
- // Now that the break point has been handled, resume normal execution by
- // jumping to the target address intended by the caller and that was
- // overwritten by the address of DebugBreakXXX.
- ExternalReference after_break_target =
- ExternalReference(Debug_Address::AfterBreakTarget(), masm->isolate());
- __ jmp(Operand::StaticVariable(after_break_target));
-}
-
-
-void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) {
- // Register state for IC load call (from ic-ia32.cc).
- // ----------- S t a t e -------------
- // -- eax : receiver
- // -- ecx : name
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, eax.bit() | ecx.bit(), 0, false);
-}
-
-
-void Debug::GenerateStoreICDebugBreak(MacroAssembler* masm) {
- // Register state for IC store call (from ic-ia32.cc).
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : name
- // -- edx : receiver
- // -----------------------------------
- Generate_DebugBreakCallHelper(
- masm, eax.bit() | ecx.bit() | edx.bit(), 0, false);
-}
-
-
-void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
- // Register state for keyed IC load call (from ic-ia32.cc).
- // ----------- S t a t e -------------
- // -- edx : receiver
- // -- eax : key
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, eax.bit() | edx.bit(), 0, false);
-}
-
-
-void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
- // Register state for keyed IC load call (from ic-ia32.cc).
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : key
- // -- edx : receiver
- // -----------------------------------
- Generate_DebugBreakCallHelper(
- masm, eax.bit() | ecx.bit() | edx.bit(), 0, false);
-}
-
-
-void Debug::GenerateCallICDebugBreak(MacroAssembler* masm) {
- // Register state for keyed IC call call (from ic-ia32.cc)
- // ----------- S t a t e -------------
- // -- ecx: name
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, ecx.bit(), 0, false);
-}
-
-
-void Debug::GenerateConstructCallDebugBreak(MacroAssembler* masm) {
- // Register state just before return from JS function (from codegen-ia32.cc).
- // eax is the actual number of arguments not encoded as a smi see comment
- // above IC call.
- // ----------- S t a t e -------------
- // -- eax: number of arguments (not smi)
- // -- edi: constructor function
- // -----------------------------------
- // The number of arguments in eax is not smi encoded.
- Generate_DebugBreakCallHelper(masm, edi.bit(), eax.bit(), false);
-}
-
-
-void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
- // Register state just before return from JS function (from codegen-ia32.cc).
- // ----------- S t a t e -------------
- // -- eax: return value
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, eax.bit(), 0, true);
-}
-
-
-void Debug::GenerateStubNoRegistersDebugBreak(MacroAssembler* masm) {
- // Register state for stub CallFunction (from CallFunctionStub in ic-ia32.cc).
- // ----------- S t a t e -------------
- // No registers used on entry.
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, 0, 0, false);
-}
-
-
-void Debug::GenerateSlot(MacroAssembler* masm) {
- // Generate enough nop's to make space for a call instruction.
- Label check_codesize;
- __ bind(&check_codesize);
- __ RecordDebugBreakSlot();
- for (int i = 0; i < Assembler::kDebugBreakSlotLength; i++) {
- __ nop();
- }
- ASSERT_EQ(Assembler::kDebugBreakSlotLength,
- masm->SizeOfCodeGeneratedSince(&check_codesize));
-}
-
-
-void Debug::GenerateSlotDebugBreak(MacroAssembler* masm) {
- // In the places where a debug break slot is inserted no registers can contain
- // object pointers.
- Generate_DebugBreakCallHelper(masm, 0, 0, true);
-}
-
-
-void Debug::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
- masm->ret(0);
-}
-
-
-void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
- ExternalReference restarter_frame_function_slot =
- ExternalReference(Debug_Address::RestarterFrameFunctionPointer(),
- masm->isolate());
- __ mov(Operand::StaticVariable(restarter_frame_function_slot), Immediate(0));
-
- // We do not know our frame height, but set esp based on ebp.
- __ lea(esp, Operand(ebp, -1 * kPointerSize));
-
- __ pop(edi); // Function.
- __ pop(ebp);
-
- // Load context from the function.
- __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
-
- // Get function code.
- __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset));
- __ lea(edx, FieldOperand(edx, Code::kHeaderSize));
-
- // Re-run JSFunction, edi is function, esi is context.
- __ jmp(Operand(edx));
-}
-
-const bool Debug::kFrameDropperSupported = true;
-
-#undef __
-
-#endif // ENABLE_DEBUGGER_SUPPORT
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_IA32
diff --git a/src/3rdparty/v8/src/ia32/deoptimizer-ia32.cc b/src/3rdparty/v8/src/ia32/deoptimizer-ia32.cc
deleted file mode 100644
index 72fdac8..0000000
--- a/src/3rdparty/v8/src/ia32/deoptimizer-ia32.cc
+++ /dev/null
@@ -1,774 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_IA32)
-
-#include "codegen.h"
-#include "deoptimizer.h"
-#include "full-codegen.h"
-#include "safepoint-table.h"
-
-namespace v8 {
-namespace internal {
-
-int Deoptimizer::table_entry_size_ = 10;
-
-
-int Deoptimizer::patch_size() {
- return Assembler::kCallInstructionLength;
-}
-
-
-static void ZapCodeRange(Address start, Address end) {
-#ifdef DEBUG
- ASSERT(start <= end);
- int size = end - start;
- CodePatcher destroyer(start, size);
- while (size-- > 0) destroyer.masm()->int3();
-#endif
-}
-
-
-void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
- Isolate* isolate = code->GetIsolate();
- HandleScope scope(isolate);
-
- // Compute the size of relocation information needed for the code
- // patching in Deoptimizer::DeoptimizeFunction.
- int min_reloc_size = 0;
- Address prev_reloc_address = code->instruction_start();
- Address code_start_address = code->instruction_start();
- SafepointTable table(*code);
- for (unsigned i = 0; i < table.length(); ++i) {
- Address curr_reloc_address = code_start_address + table.GetPcOffset(i);
- ASSERT_GE(curr_reloc_address, prev_reloc_address);
- SafepointEntry safepoint_entry = table.GetEntry(i);
- int deoptimization_index = safepoint_entry.deoptimization_index();
- if (deoptimization_index != Safepoint::kNoDeoptimizationIndex) {
- // The gap code is needed to get to the state expected at the
- // bailout and we need to skip the call opcode to get to the
- // address that needs reloc.
- curr_reloc_address += safepoint_entry.gap_code_size() + 1;
- int pc_delta = curr_reloc_address - prev_reloc_address;
- // We use RUNTIME_ENTRY reloc info which has a size of 2 bytes
- // if encodable with small pc delta encoding and up to 6 bytes
- // otherwise.
- if (pc_delta <= RelocInfo::kMaxSmallPCDelta) {
- min_reloc_size += 2;
- } else {
- min_reloc_size += 6;
- }
- prev_reloc_address = curr_reloc_address;
- }
- }
-
- // If the relocation information is not big enough we create a new
- // relocation info object that is padded with comments to make it
- // big enough for lazy doptimization.
- int reloc_length = code->relocation_info()->length();
- if (min_reloc_size > reloc_length) {
- int comment_reloc_size = RelocInfo::kMinRelocCommentSize;
- // Padding needed.
- int min_padding = min_reloc_size - reloc_length;
- // Number of comments needed to take up at least that much space.
- int additional_comments =
- (min_padding + comment_reloc_size - 1) / comment_reloc_size;
- // Actual padding size.
- int padding = additional_comments * comment_reloc_size;
- // Allocate new relocation info and copy old relocation to the end
- // of the new relocation info array because relocation info is
- // written and read backwards.
- Factory* factory = isolate->factory();
- Handle<ByteArray> new_reloc =
- factory->NewByteArray(reloc_length + padding, TENURED);
- memcpy(new_reloc->GetDataStartAddress() + padding,
- code->relocation_info()->GetDataStartAddress(),
- reloc_length);
- // Create a relocation writer to write the comments in the padding
- // space. Use position 0 for everything to ensure short encoding.
- RelocInfoWriter reloc_info_writer(
- new_reloc->GetDataStartAddress() + padding, 0);
- intptr_t comment_string
- = reinterpret_cast<intptr_t>(RelocInfo::kFillerCommentString);
- RelocInfo rinfo(0, RelocInfo::COMMENT, comment_string);
- for (int i = 0; i < additional_comments; ++i) {
-#ifdef DEBUG
- byte* pos_before = reloc_info_writer.pos();
-#endif
- reloc_info_writer.Write(&rinfo);
- ASSERT(RelocInfo::kMinRelocCommentSize ==
- pos_before - reloc_info_writer.pos());
- }
- // Replace relocation information on the code object.
- code->set_relocation_info(*new_reloc);
- }
-}
-
-
-void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
- if (!function->IsOptimized()) return;
-
- Isolate* isolate = function->GetIsolate();
- HandleScope scope(isolate);
- AssertNoAllocation no_allocation;
-
- // Get the optimized code.
- Code* code = function->code();
- Address code_start_address = code->instruction_start();
-
- // We will overwrite the code's relocation info in-place. Relocation info
- // is written backward. The relocation info is the payload of a byte
- // array. Later on we will slide this to the start of the byte array and
- // create a filler object in the remaining space.
- ByteArray* reloc_info = code->relocation_info();
- Address reloc_end_address = reloc_info->address() + reloc_info->Size();
- RelocInfoWriter reloc_info_writer(reloc_end_address, code_start_address);
-
- // For each return after a safepoint insert a call to the corresponding
- // deoptimization entry. Since the call is a relative encoding, write new
- // reloc info. We do not need any of the existing reloc info because the
- // existing code will not be used again (we zap it in debug builds).
- SafepointTable table(code);
- Address prev_address = code_start_address;
- for (unsigned i = 0; i < table.length(); ++i) {
- Address curr_address = code_start_address + table.GetPcOffset(i);
- ASSERT_GE(curr_address, prev_address);
- ZapCodeRange(prev_address, curr_address);
-
- SafepointEntry safepoint_entry = table.GetEntry(i);
- int deoptimization_index = safepoint_entry.deoptimization_index();
- if (deoptimization_index != Safepoint::kNoDeoptimizationIndex) {
- // The gap code is needed to get to the state expected at the bailout.
- curr_address += safepoint_entry.gap_code_size();
-
- CodePatcher patcher(curr_address, patch_size());
- Address deopt_entry = GetDeoptimizationEntry(deoptimization_index, LAZY);
- patcher.masm()->call(deopt_entry, RelocInfo::NONE);
-
- // We use RUNTIME_ENTRY for deoptimization bailouts.
- RelocInfo rinfo(curr_address + 1, // 1 after the call opcode.
- RelocInfo::RUNTIME_ENTRY,
- reinterpret_cast<intptr_t>(deopt_entry));
- reloc_info_writer.Write(&rinfo);
- ASSERT_GE(reloc_info_writer.pos(),
- reloc_info->address() + ByteArray::kHeaderSize);
- curr_address += patch_size();
- }
- prev_address = curr_address;
- }
- ZapCodeRange(prev_address,
- code_start_address + code->safepoint_table_offset());
-
- // Move the relocation info to the beginning of the byte array.
- int new_reloc_size = reloc_end_address - reloc_info_writer.pos();
- memmove(code->relocation_start(), reloc_info_writer.pos(), new_reloc_size);
-
- // The relocation info is in place, update the size.
- reloc_info->set_length(new_reloc_size);
-
- // Handle the junk part after the new relocation info. We will create
- // a non-live object in the extra space at the end of the former reloc info.
- Address junk_address = reloc_info->address() + reloc_info->Size();
- ASSERT(junk_address <= reloc_end_address);
- isolate->heap()->CreateFillerObjectAt(junk_address,
- reloc_end_address - junk_address);
-
- // Add the deoptimizing code to the list.
- DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code);
- DeoptimizerData* data = isolate->deoptimizer_data();
- node->set_next(data->deoptimizing_code_list_);
- data->deoptimizing_code_list_ = node;
-
- // Set the code for the function to non-optimized version.
- function->ReplaceCode(function->shared()->code());
-
- if (FLAG_trace_deopt) {
- PrintF("[forced deoptimization: ");
- function->PrintName();
- PrintF(" / %x]\n", reinterpret_cast<uint32_t>(function));
-#ifdef DEBUG
- if (FLAG_print_code) {
- code->PrintLn();
- }
-#endif
- }
-}
-
-
-void Deoptimizer::PatchStackCheckCodeAt(Address pc_after,
- Code* check_code,
- Code* replacement_code) {
- Address call_target_address = pc_after - kIntSize;
- ASSERT(check_code->entry() ==
- Assembler::target_address_at(call_target_address));
- // The stack check code matches the pattern:
- //
- // cmp esp, <limit>
- // jae ok
- // call <stack guard>
- // test eax, <loop nesting depth>
- // ok: ...
- //
- // We will patch away the branch so the code is:
- //
- // cmp esp, <limit> ;; Not changed
- // nop
- // nop
- // call <on-stack replacment>
- // test eax, <loop nesting depth>
- // ok:
- ASSERT(*(call_target_address - 3) == 0x73 && // jae
- *(call_target_address - 2) == 0x07 && // offset
- *(call_target_address - 1) == 0xe8); // call
- *(call_target_address - 3) = 0x90; // nop
- *(call_target_address - 2) = 0x90; // nop
- Assembler::set_target_address_at(call_target_address,
- replacement_code->entry());
-}
-
-
-void Deoptimizer::RevertStackCheckCodeAt(Address pc_after,
- Code* check_code,
- Code* replacement_code) {
- Address call_target_address = pc_after - kIntSize;
- ASSERT(replacement_code->entry() ==
- Assembler::target_address_at(call_target_address));
- // Replace the nops from patching (Deoptimizer::PatchStackCheckCode) to
- // restore the conditional branch.
- ASSERT(*(call_target_address - 3) == 0x90 && // nop
- *(call_target_address - 2) == 0x90 && // nop
- *(call_target_address - 1) == 0xe8); // call
- *(call_target_address - 3) = 0x73; // jae
- *(call_target_address - 2) = 0x07; // offset
- Assembler::set_target_address_at(call_target_address,
- check_code->entry());
-}
-
-
-static int LookupBailoutId(DeoptimizationInputData* data, unsigned ast_id) {
- ByteArray* translations = data->TranslationByteArray();
- int length = data->DeoptCount();
- for (int i = 0; i < length; i++) {
- if (static_cast<unsigned>(data->AstId(i)->value()) == ast_id) {
- TranslationIterator it(translations, data->TranslationIndex(i)->value());
- int value = it.Next();
- ASSERT(Translation::BEGIN == static_cast<Translation::Opcode>(value));
- // Read the number of frames.
- value = it.Next();
- if (value == 1) return i;
- }
- }
- UNREACHABLE();
- return -1;
-}
-
-
-void Deoptimizer::DoComputeOsrOutputFrame() {
- DeoptimizationInputData* data = DeoptimizationInputData::cast(
- optimized_code_->deoptimization_data());
- unsigned ast_id = data->OsrAstId()->value();
- // TODO(kasperl): This should not be the bailout_id_. It should be
- // the ast id. Confusing.
- ASSERT(bailout_id_ == ast_id);
-
- int bailout_id = LookupBailoutId(data, ast_id);
- unsigned translation_index = data->TranslationIndex(bailout_id)->value();
- ByteArray* translations = data->TranslationByteArray();
-
- TranslationIterator iterator(translations, translation_index);
- Translation::Opcode opcode =
- static_cast<Translation::Opcode>(iterator.Next());
- ASSERT(Translation::BEGIN == opcode);
- USE(opcode);
- int count = iterator.Next();
- ASSERT(count == 1);
- USE(count);
-
- opcode = static_cast<Translation::Opcode>(iterator.Next());
- USE(opcode);
- ASSERT(Translation::FRAME == opcode);
- unsigned node_id = iterator.Next();
- USE(node_id);
- ASSERT(node_id == ast_id);
- JSFunction* function = JSFunction::cast(ComputeLiteral(iterator.Next()));
- USE(function);
- ASSERT(function == function_);
- unsigned height = iterator.Next();
- unsigned height_in_bytes = height * kPointerSize;
- USE(height_in_bytes);
-
- unsigned fixed_size = ComputeFixedSize(function_);
- unsigned input_frame_size = input_->GetFrameSize();
- ASSERT(fixed_size + height_in_bytes == input_frame_size);
-
- unsigned stack_slot_size = optimized_code_->stack_slots() * kPointerSize;
- unsigned outgoing_height = data->ArgumentsStackHeight(bailout_id)->value();
- unsigned outgoing_size = outgoing_height * kPointerSize;
- unsigned output_frame_size = fixed_size + stack_slot_size + outgoing_size;
- ASSERT(outgoing_size == 0); // OSR does not happen in the middle of a call.
-
- if (FLAG_trace_osr) {
- PrintF("[on-stack replacement: begin 0x%08" V8PRIxPTR " ",
- reinterpret_cast<intptr_t>(function_));
- function_->PrintName();
- PrintF(" => node=%u, frame=%d->%d]\n",
- ast_id,
- input_frame_size,
- output_frame_size);
- }
-
- // There's only one output frame in the OSR case.
- output_count_ = 1;
- output_ = new FrameDescription*[1];
- output_[0] = new(output_frame_size) FrameDescription(
- output_frame_size, function_);
-
- // Clear the incoming parameters in the optimized frame to avoid
- // confusing the garbage collector.
- unsigned output_offset = output_frame_size - kPointerSize;
- int parameter_count = function_->shared()->formal_parameter_count() + 1;
- for (int i = 0; i < parameter_count; ++i) {
- output_[0]->SetFrameSlot(output_offset, 0);
- output_offset -= kPointerSize;
- }
-
- // Translate the incoming parameters. This may overwrite some of the
- // incoming argument slots we've just cleared.
- int input_offset = input_frame_size - kPointerSize;
- bool ok = true;
- int limit = input_offset - (parameter_count * kPointerSize);
- while (ok && input_offset > limit) {
- ok = DoOsrTranslateCommand(&iterator, &input_offset);
- }
-
- // There are no translation commands for the caller's pc and fp, the
- // context, and the function. Set them up explicitly.
- for (int i = StandardFrameConstants::kCallerPCOffset;
- ok && i >= StandardFrameConstants::kMarkerOffset;
- i -= kPointerSize) {
- uint32_t input_value = input_->GetFrameSlot(input_offset);
- if (FLAG_trace_osr) {
- const char* name = "UNKNOWN";
- switch (i) {
- case StandardFrameConstants::kCallerPCOffset:
- name = "caller's pc";
- break;
- case StandardFrameConstants::kCallerFPOffset:
- name = "fp";
- break;
- case StandardFrameConstants::kContextOffset:
- name = "context";
- break;
- case StandardFrameConstants::kMarkerOffset:
- name = "function";
- break;
- }
- PrintF(" [esp + %d] <- 0x%08x ; [esp + %d] (fixed part - %s)\n",
- output_offset,
- input_value,
- input_offset,
- name);
- }
- output_[0]->SetFrameSlot(output_offset, input_->GetFrameSlot(input_offset));
- input_offset -= kPointerSize;
- output_offset -= kPointerSize;
- }
-
- // Translate the rest of the frame.
- while (ok && input_offset >= 0) {
- ok = DoOsrTranslateCommand(&iterator, &input_offset);
- }
-
- // If translation of any command failed, continue using the input frame.
- if (!ok) {
- delete output_[0];
- output_[0] = input_;
- output_[0]->SetPc(reinterpret_cast<uint32_t>(from_));
- } else {
- // Setup the frame pointer and the context pointer.
- output_[0]->SetRegister(ebp.code(), input_->GetRegister(ebp.code()));
- output_[0]->SetRegister(esi.code(), input_->GetRegister(esi.code()));
-
- unsigned pc_offset = data->OsrPcOffset()->value();
- uint32_t pc = reinterpret_cast<uint32_t>(
- optimized_code_->entry() + pc_offset);
- output_[0]->SetPc(pc);
- }
- Code* continuation =
- function->GetIsolate()->builtins()->builtin(Builtins::kNotifyOSR);
- output_[0]->SetContinuation(
- reinterpret_cast<uint32_t>(continuation->entry()));
-
- if (FLAG_trace_osr) {
- PrintF("[on-stack replacement translation %s: 0x%08" V8PRIxPTR " ",
- ok ? "finished" : "aborted",
- reinterpret_cast<intptr_t>(function));
- function->PrintName();
- PrintF(" => pc=0x%0x]\n", output_[0]->GetPc());
- }
-}
-
-
-void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
- int frame_index) {
- // Read the ast node id, function, and frame height for this output frame.
- Translation::Opcode opcode =
- static_cast<Translation::Opcode>(iterator->Next());
- USE(opcode);
- ASSERT(Translation::FRAME == opcode);
- int node_id = iterator->Next();
- JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
- unsigned height = iterator->Next();
- unsigned height_in_bytes = height * kPointerSize;
- if (FLAG_trace_deopt) {
- PrintF(" translating ");
- function->PrintName();
- PrintF(" => node=%d, height=%d\n", node_id, height_in_bytes);
- }
-
- // The 'fixed' part of the frame consists of the incoming parameters and
- // the part described by JavaScriptFrameConstants.
- unsigned fixed_frame_size = ComputeFixedSize(function);
- unsigned input_frame_size = input_->GetFrameSize();
- unsigned output_frame_size = height_in_bytes + fixed_frame_size;
-
- // Allocate and store the output frame description.
- FrameDescription* output_frame =
- new(output_frame_size) FrameDescription(output_frame_size, function);
-
- bool is_bottommost = (0 == frame_index);
- bool is_topmost = (output_count_ - 1 == frame_index);
- ASSERT(frame_index >= 0 && frame_index < output_count_);
- ASSERT(output_[frame_index] == NULL);
- output_[frame_index] = output_frame;
-
- // The top address for the bottommost output frame can be computed from
- // the input frame pointer and the output frame's height. For all
- // subsequent output frames, it can be computed from the previous one's
- // top address and the current frame's size.
- uint32_t top_address;
- if (is_bottommost) {
- // 2 = context and function in the frame.
- top_address =
- input_->GetRegister(ebp.code()) - (2 * kPointerSize) - height_in_bytes;
- } else {
- top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
- }
- output_frame->SetTop(top_address);
-
- // Compute the incoming parameter translation.
- int parameter_count = function->shared()->formal_parameter_count() + 1;
- unsigned output_offset = output_frame_size;
- unsigned input_offset = input_frame_size;
- for (int i = 0; i < parameter_count; ++i) {
- output_offset -= kPointerSize;
- DoTranslateCommand(iterator, frame_index, output_offset);
- }
- input_offset -= (parameter_count * kPointerSize);
-
- // There are no translation commands for the caller's pc and fp, the
- // context, and the function. Synthesize their values and set them up
- // explicitly.
- //
- // The caller's pc for the bottommost output frame is the same as in the
- // input frame. For all subsequent output frames, it can be read from the
- // previous one. This frame's pc can be computed from the non-optimized
- // function code and AST id of the bailout.
- output_offset -= kPointerSize;
- input_offset -= kPointerSize;
- intptr_t value;
- if (is_bottommost) {
- value = input_->GetFrameSlot(input_offset);
- } else {
- value = output_[frame_index - 1]->GetPc();
- }
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
- top_address + output_offset, output_offset, value);
- }
-
- // The caller's frame pointer for the bottommost output frame is the same
- // as in the input frame. For all subsequent output frames, it can be
- // read from the previous one. Also compute and set this frame's frame
- // pointer.
- output_offset -= kPointerSize;
- input_offset -= kPointerSize;
- if (is_bottommost) {
- value = input_->GetFrameSlot(input_offset);
- } else {
- value = output_[frame_index - 1]->GetFp();
- }
- output_frame->SetFrameSlot(output_offset, value);
- intptr_t fp_value = top_address + output_offset;
- ASSERT(!is_bottommost || input_->GetRegister(ebp.code()) == fp_value);
- output_frame->SetFp(fp_value);
- if (is_topmost) output_frame->SetRegister(ebp.code(), fp_value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
- fp_value, output_offset, value);
- }
-
- // For the bottommost output frame the context can be gotten from the input
- // frame. For all subsequent output frames it can be gotten from the function
- // so long as we don't inline functions that need local contexts.
- output_offset -= kPointerSize;
- input_offset -= kPointerSize;
- if (is_bottommost) {
- value = input_->GetFrameSlot(input_offset);
- } else {
- value = reinterpret_cast<uint32_t>(function->context());
- }
- output_frame->SetFrameSlot(output_offset, value);
- if (is_topmost) output_frame->SetRegister(esi.code(), value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context\n",
- top_address + output_offset, output_offset, value);
- }
-
- // The function was mentioned explicitly in the BEGIN_FRAME.
- output_offset -= kPointerSize;
- input_offset -= kPointerSize;
- value = reinterpret_cast<uint32_t>(function);
- // The function for the bottommost output frame should also agree with the
- // input frame.
- ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value);
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; function\n",
- top_address + output_offset, output_offset, value);
- }
-
- // Translate the rest of the frame.
- for (unsigned i = 0; i < height; ++i) {
- output_offset -= kPointerSize;
- DoTranslateCommand(iterator, frame_index, output_offset);
- }
- ASSERT(0 == output_offset);
-
- // Compute this frame's PC, state, and continuation.
- Code* non_optimized_code = function->shared()->code();
- FixedArray* raw_data = non_optimized_code->deoptimization_data();
- DeoptimizationOutputData* data = DeoptimizationOutputData::cast(raw_data);
- Address start = non_optimized_code->instruction_start();
- unsigned pc_and_state = GetOutputInfo(data, node_id, function->shared());
- unsigned pc_offset = FullCodeGenerator::PcField::decode(pc_and_state);
- uint32_t pc_value = reinterpret_cast<uint32_t>(start + pc_offset);
- output_frame->SetPc(pc_value);
-
- FullCodeGenerator::State state =
- FullCodeGenerator::StateField::decode(pc_and_state);
- output_frame->SetState(Smi::FromInt(state));
-
- // Set the continuation for the topmost frame.
- if (is_topmost) {
- Builtins* builtins = isolate_->builtins();
- Code* continuation = (bailout_type_ == EAGER)
- ? builtins->builtin(Builtins::kNotifyDeoptimized)
- : builtins->builtin(Builtins::kNotifyLazyDeoptimized);
- output_frame->SetContinuation(
- reinterpret_cast<uint32_t>(continuation->entry()));
- }
-
- if (output_count_ - 1 == frame_index) iterator->Done();
-}
-
-
-#define __ masm()->
-
-void Deoptimizer::EntryGenerator::Generate() {
- GeneratePrologue();
- CpuFeatures::Scope scope(SSE2);
-
- Isolate* isolate = masm()->isolate();
-
- // Save all general purpose registers before messing with them.
- const int kNumberOfRegisters = Register::kNumRegisters;
-
- const int kDoubleRegsSize = kDoubleSize *
- XMMRegister::kNumAllocatableRegisters;
- __ sub(Operand(esp), Immediate(kDoubleRegsSize));
- for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
- XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
- int offset = i * kDoubleSize;
- __ movdbl(Operand(esp, offset), xmm_reg);
- }
-
- __ pushad();
-
- const int kSavedRegistersAreaSize = kNumberOfRegisters * kPointerSize +
- kDoubleRegsSize;
-
- // Get the bailout id from the stack.
- __ mov(ebx, Operand(esp, kSavedRegistersAreaSize));
-
- // Get the address of the location in the code object if possible
- // and compute the fp-to-sp delta in register edx.
- if (type() == EAGER) {
- __ Set(ecx, Immediate(0));
- __ lea(edx, Operand(esp, kSavedRegistersAreaSize + 1 * kPointerSize));
- } else {
- __ mov(ecx, Operand(esp, kSavedRegistersAreaSize + 1 * kPointerSize));
- __ lea(edx, Operand(esp, kSavedRegistersAreaSize + 2 * kPointerSize));
- }
- __ sub(edx, Operand(ebp));
- __ neg(edx);
-
- // Allocate a new deoptimizer object.
- __ PrepareCallCFunction(6, eax);
- __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ mov(Operand(esp, 0 * kPointerSize), eax); // Function.
- __ mov(Operand(esp, 1 * kPointerSize), Immediate(type())); // Bailout type.
- __ mov(Operand(esp, 2 * kPointerSize), ebx); // Bailout id.
- __ mov(Operand(esp, 3 * kPointerSize), ecx); // Code address or 0.
- __ mov(Operand(esp, 4 * kPointerSize), edx); // Fp-to-sp delta.
- __ mov(Operand(esp, 5 * kPointerSize),
- Immediate(ExternalReference::isolate_address()));
- __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6);
-
- // Preserve deoptimizer object in register eax and get the input
- // frame descriptor pointer.
- __ mov(ebx, Operand(eax, Deoptimizer::input_offset()));
-
- // Fill in the input registers.
- for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
- int offset = (i * kPointerSize) + FrameDescription::registers_offset();
- __ pop(Operand(ebx, offset));
- }
-
- // Fill in the double input registers.
- int double_regs_offset = FrameDescription::double_registers_offset();
- for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
- int dst_offset = i * kDoubleSize + double_regs_offset;
- int src_offset = i * kDoubleSize;
- __ movdbl(xmm0, Operand(esp, src_offset));
- __ movdbl(Operand(ebx, dst_offset), xmm0);
- }
-
- // Remove the bailout id and the double registers from the stack.
- if (type() == EAGER) {
- __ add(Operand(esp), Immediate(kDoubleRegsSize + kPointerSize));
- } else {
- __ add(Operand(esp), Immediate(kDoubleRegsSize + 2 * kPointerSize));
- }
-
- // Compute a pointer to the unwinding limit in register ecx; that is
- // the first stack slot not part of the input frame.
- __ mov(ecx, Operand(ebx, FrameDescription::frame_size_offset()));
- __ add(ecx, Operand(esp));
-
- // Unwind the stack down to - but not including - the unwinding
- // limit and copy the contents of the activation frame to the input
- // frame description.
- __ lea(edx, Operand(ebx, FrameDescription::frame_content_offset()));
- Label pop_loop;
- __ bind(&pop_loop);
- __ pop(Operand(edx, 0));
- __ add(Operand(edx), Immediate(sizeof(uint32_t)));
- __ cmp(ecx, Operand(esp));
- __ j(not_equal, &pop_loop);
-
- // Compute the output frame in the deoptimizer.
- __ push(eax);
- __ PrepareCallCFunction(1, ebx);
- __ mov(Operand(esp, 0 * kPointerSize), eax);
- __ CallCFunction(
- ExternalReference::compute_output_frames_function(isolate), 1);
- __ pop(eax);
-
- // Replace the current frame with the output frames.
- Label outer_push_loop, inner_push_loop;
- // Outer loop state: eax = current FrameDescription**, edx = one past the
- // last FrameDescription**.
- __ mov(edx, Operand(eax, Deoptimizer::output_count_offset()));
- __ mov(eax, Operand(eax, Deoptimizer::output_offset()));
- __ lea(edx, Operand(eax, edx, times_4, 0));
- __ bind(&outer_push_loop);
- // Inner loop state: ebx = current FrameDescription*, ecx = loop index.
- __ mov(ebx, Operand(eax, 0));
- __ mov(ecx, Operand(ebx, FrameDescription::frame_size_offset()));
- __ bind(&inner_push_loop);
- __ sub(Operand(ecx), Immediate(sizeof(uint32_t)));
- __ push(Operand(ebx, ecx, times_1, FrameDescription::frame_content_offset()));
- __ test(ecx, Operand(ecx));
- __ j(not_zero, &inner_push_loop);
- __ add(Operand(eax), Immediate(kPointerSize));
- __ cmp(eax, Operand(edx));
- __ j(below, &outer_push_loop);
-
- // In case of OSR, we have to restore the XMM registers.
- if (type() == OSR) {
- for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
- XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
- int src_offset = i * kDoubleSize + double_regs_offset;
- __ movdbl(xmm_reg, Operand(ebx, src_offset));
- }
- }
-
- // Push state, pc, and continuation from the last output frame.
- if (type() != OSR) {
- __ push(Operand(ebx, FrameDescription::state_offset()));
- }
- __ push(Operand(ebx, FrameDescription::pc_offset()));
- __ push(Operand(ebx, FrameDescription::continuation_offset()));
-
-
- // Push the registers from the last output frame.
- for (int i = 0; i < kNumberOfRegisters; i++) {
- int offset = (i * kPointerSize) + FrameDescription::registers_offset();
- __ push(Operand(ebx, offset));
- }
-
- // Restore the registers from the stack.
- __ popad();
-
- // Return to the continuation point.
- __ ret(0);
-}
-
-
-void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
- // Create a sequence of deoptimization entries.
- Label done;
- for (int i = 0; i < count(); i++) {
- int start = masm()->pc_offset();
- USE(start);
- __ push_imm32(i);
- __ jmp(&done);
- ASSERT(masm()->pc_offset() - start == table_entry_size_);
- }
- __ bind(&done);
-}
-
-#undef __
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_IA32
diff --git a/src/3rdparty/v8/src/ia32/disasm-ia32.cc b/src/3rdparty/v8/src/ia32/disasm-ia32.cc
deleted file mode 100644
index d1c869a..0000000
--- a/src/3rdparty/v8/src/ia32/disasm-ia32.cc
+++ /dev/null
@@ -1,1620 +0,0 @@
-// Copyright 2007-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include <assert.h>
-#include <stdio.h>
-#include <stdarg.h>
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_IA32)
-
-#include "disasm.h"
-
-namespace disasm {
-
-enum OperandOrder {
- UNSET_OP_ORDER = 0,
- REG_OPER_OP_ORDER,
- OPER_REG_OP_ORDER
-};
-
-
-//------------------------------------------------------------------
-// Tables
-//------------------------------------------------------------------
-struct ByteMnemonic {
- int b; // -1 terminates, otherwise must be in range (0..255)
- const char* mnem;
- OperandOrder op_order_;
-};
-
-
-static ByteMnemonic two_operands_instr[] = {
- {0x03, "add", REG_OPER_OP_ORDER},
- {0x09, "or", OPER_REG_OP_ORDER},
- {0x0B, "or", REG_OPER_OP_ORDER},
- {0x1B, "sbb", REG_OPER_OP_ORDER},
- {0x21, "and", OPER_REG_OP_ORDER},
- {0x23, "and", REG_OPER_OP_ORDER},
- {0x29, "sub", OPER_REG_OP_ORDER},
- {0x2A, "subb", REG_OPER_OP_ORDER},
- {0x2B, "sub", REG_OPER_OP_ORDER},
- {0x31, "xor", OPER_REG_OP_ORDER},
- {0x33, "xor", REG_OPER_OP_ORDER},
- {0x38, "cmpb", OPER_REG_OP_ORDER},
- {0x3A, "cmpb", REG_OPER_OP_ORDER},
- {0x3B, "cmp", REG_OPER_OP_ORDER},
- {0x84, "test_b", REG_OPER_OP_ORDER},
- {0x85, "test", REG_OPER_OP_ORDER},
- {0x87, "xchg", REG_OPER_OP_ORDER},
- {0x8A, "mov_b", REG_OPER_OP_ORDER},
- {0x8B, "mov", REG_OPER_OP_ORDER},
- {0x8D, "lea", REG_OPER_OP_ORDER},
- {-1, "", UNSET_OP_ORDER}
-};
-
-
-static ByteMnemonic zero_operands_instr[] = {
- {0xC3, "ret", UNSET_OP_ORDER},
- {0xC9, "leave", UNSET_OP_ORDER},
- {0x90, "nop", UNSET_OP_ORDER},
- {0xF4, "hlt", UNSET_OP_ORDER},
- {0xCC, "int3", UNSET_OP_ORDER},
- {0x60, "pushad", UNSET_OP_ORDER},
- {0x61, "popad", UNSET_OP_ORDER},
- {0x9C, "pushfd", UNSET_OP_ORDER},
- {0x9D, "popfd", UNSET_OP_ORDER},
- {0x9E, "sahf", UNSET_OP_ORDER},
- {0x99, "cdq", UNSET_OP_ORDER},
- {0x9B, "fwait", UNSET_OP_ORDER},
- {0xFC, "cld", UNSET_OP_ORDER},
- {0xAB, "stos", UNSET_OP_ORDER},
- {-1, "", UNSET_OP_ORDER}
-};
-
-
-static ByteMnemonic call_jump_instr[] = {
- {0xE8, "call", UNSET_OP_ORDER},
- {0xE9, "jmp", UNSET_OP_ORDER},
- {-1, "", UNSET_OP_ORDER}
-};
-
-
-static ByteMnemonic short_immediate_instr[] = {
- {0x05, "add", UNSET_OP_ORDER},
- {0x0D, "or", UNSET_OP_ORDER},
- {0x15, "adc", UNSET_OP_ORDER},
- {0x25, "and", UNSET_OP_ORDER},
- {0x2D, "sub", UNSET_OP_ORDER},
- {0x35, "xor", UNSET_OP_ORDER},
- {0x3D, "cmp", UNSET_OP_ORDER},
- {-1, "", UNSET_OP_ORDER}
-};
-
-
-static const char* jump_conditional_mnem[] = {
- /*0*/ "jo", "jno", "jc", "jnc",
- /*4*/ "jz", "jnz", "jna", "ja",
- /*8*/ "js", "jns", "jpe", "jpo",
- /*12*/ "jl", "jnl", "jng", "jg"
-};
-
-
-static const char* set_conditional_mnem[] = {
- /*0*/ "seto", "setno", "setc", "setnc",
- /*4*/ "setz", "setnz", "setna", "seta",
- /*8*/ "sets", "setns", "setpe", "setpo",
- /*12*/ "setl", "setnl", "setng", "setg"
-};
-
-
-static const char* conditional_move_mnem[] = {
- /*0*/ "cmovo", "cmovno", "cmovc", "cmovnc",
- /*4*/ "cmovz", "cmovnz", "cmovna", "cmova",
- /*8*/ "cmovs", "cmovns", "cmovpe", "cmovpo",
- /*12*/ "cmovl", "cmovnl", "cmovng", "cmovg"
-};
-
-
-enum InstructionType {
- NO_INSTR,
- ZERO_OPERANDS_INSTR,
- TWO_OPERANDS_INSTR,
- JUMP_CONDITIONAL_SHORT_INSTR,
- REGISTER_INSTR,
- MOVE_REG_INSTR,
- CALL_JUMP_INSTR,
- SHORT_IMMEDIATE_INSTR
-};
-
-
-struct InstructionDesc {
- const char* mnem;
- InstructionType type;
- OperandOrder op_order_;
-};
-
-
-class InstructionTable {
- public:
- InstructionTable();
- const InstructionDesc& Get(byte x) const { return instructions_[x]; }
-
- private:
- InstructionDesc instructions_[256];
- void Clear();
- void Init();
- void CopyTable(ByteMnemonic bm[], InstructionType type);
- void SetTableRange(InstructionType type,
- byte start,
- byte end,
- const char* mnem);
- void AddJumpConditionalShort();
-};
-
-
-InstructionTable::InstructionTable() {
- Clear();
- Init();
-}
-
-
-void InstructionTable::Clear() {
- for (int i = 0; i < 256; i++) {
- instructions_[i].mnem = "";
- instructions_[i].type = NO_INSTR;
- instructions_[i].op_order_ = UNSET_OP_ORDER;
- }
-}
-
-
-void InstructionTable::Init() {
- CopyTable(two_operands_instr, TWO_OPERANDS_INSTR);
- CopyTable(zero_operands_instr, ZERO_OPERANDS_INSTR);
- CopyTable(call_jump_instr, CALL_JUMP_INSTR);
- CopyTable(short_immediate_instr, SHORT_IMMEDIATE_INSTR);
- AddJumpConditionalShort();
- SetTableRange(REGISTER_INSTR, 0x40, 0x47, "inc");
- SetTableRange(REGISTER_INSTR, 0x48, 0x4F, "dec");
- SetTableRange(REGISTER_INSTR, 0x50, 0x57, "push");
- SetTableRange(REGISTER_INSTR, 0x58, 0x5F, "pop");
- SetTableRange(REGISTER_INSTR, 0x91, 0x97, "xchg eax,"); // 0x90 is nop.
- SetTableRange(MOVE_REG_INSTR, 0xB8, 0xBF, "mov");
-}
-
-
-void InstructionTable::CopyTable(ByteMnemonic bm[], InstructionType type) {
- for (int i = 0; bm[i].b >= 0; i++) {
- InstructionDesc* id = &instructions_[bm[i].b];
- id->mnem = bm[i].mnem;
- id->op_order_ = bm[i].op_order_;
- ASSERT_EQ(NO_INSTR, id->type); // Information not already entered.
- id->type = type;
- }
-}
-
-
-void InstructionTable::SetTableRange(InstructionType type,
- byte start,
- byte end,
- const char* mnem) {
- for (byte b = start; b <= end; b++) {
- InstructionDesc* id = &instructions_[b];
- ASSERT_EQ(NO_INSTR, id->type); // Information not already entered.
- id->mnem = mnem;
- id->type = type;
- }
-}
-
-
-void InstructionTable::AddJumpConditionalShort() {
- for (byte b = 0x70; b <= 0x7F; b++) {
- InstructionDesc* id = &instructions_[b];
- ASSERT_EQ(NO_INSTR, id->type); // Information not already entered.
- id->mnem = jump_conditional_mnem[b & 0x0F];
- id->type = JUMP_CONDITIONAL_SHORT_INSTR;
- }
-}
-
-
-static InstructionTable instruction_table;
-
-
-// The IA32 disassembler implementation.
-class DisassemblerIA32 {
- public:
- DisassemblerIA32(const NameConverter& converter,
- bool abort_on_unimplemented = true)
- : converter_(converter),
- tmp_buffer_pos_(0),
- abort_on_unimplemented_(abort_on_unimplemented) {
- tmp_buffer_[0] = '\0';
- }
-
- virtual ~DisassemblerIA32() {}
-
- // Writes one disassembled instruction into 'buffer' (0-terminated).
- // Returns the length of the disassembled machine instruction in bytes.
- int InstructionDecode(v8::internal::Vector<char> buffer, byte* instruction);
-
- private:
- const NameConverter& converter_;
- v8::internal::EmbeddedVector<char, 128> tmp_buffer_;
- unsigned int tmp_buffer_pos_;
- bool abort_on_unimplemented_;
-
-
- enum {
- eax = 0,
- ecx = 1,
- edx = 2,
- ebx = 3,
- esp = 4,
- ebp = 5,
- esi = 6,
- edi = 7
- };
-
-
- enum ShiftOpcodeExtension {
- kROL = 0,
- kROR = 1,
- kRCL = 2,
- kRCR = 3,
- kSHL = 4,
- KSHR = 5,
- kSAR = 7
- };
-
-
- const char* NameOfCPURegister(int reg) const {
- return converter_.NameOfCPURegister(reg);
- }
-
-
- const char* NameOfByteCPURegister(int reg) const {
- return converter_.NameOfByteCPURegister(reg);
- }
-
-
- const char* NameOfXMMRegister(int reg) const {
- return converter_.NameOfXMMRegister(reg);
- }
-
-
- const char* NameOfAddress(byte* addr) const {
- return converter_.NameOfAddress(addr);
- }
-
-
- // Disassembler helper functions.
- static void get_modrm(byte data, int* mod, int* regop, int* rm) {
- *mod = (data >> 6) & 3;
- *regop = (data & 0x38) >> 3;
- *rm = data & 7;
- }
-
-
- static void get_sib(byte data, int* scale, int* index, int* base) {
- *scale = (data >> 6) & 3;
- *index = (data >> 3) & 7;
- *base = data & 7;
- }
-
- typedef const char* (DisassemblerIA32::*RegisterNameMapping)(int reg) const;
-
- int PrintRightOperandHelper(byte* modrmp, RegisterNameMapping register_name);
- int PrintRightOperand(byte* modrmp);
- int PrintRightByteOperand(byte* modrmp);
- int PrintRightXMMOperand(byte* modrmp);
- int PrintOperands(const char* mnem, OperandOrder op_order, byte* data);
- int PrintImmediateOp(byte* data);
- int F7Instruction(byte* data);
- int D1D3C1Instruction(byte* data);
- int JumpShort(byte* data);
- int JumpConditional(byte* data, const char* comment);
- int JumpConditionalShort(byte* data, const char* comment);
- int SetCC(byte* data);
- int CMov(byte* data);
- int FPUInstruction(byte* data);
- int MemoryFPUInstruction(int escape_opcode, int regop, byte* modrm_start);
- int RegisterFPUInstruction(int escape_opcode, byte modrm_byte);
- void AppendToBuffer(const char* format, ...);
-
-
- void UnimplementedInstruction() {
- if (abort_on_unimplemented_) {
- UNIMPLEMENTED();
- } else {
- AppendToBuffer("'Unimplemented Instruction'");
- }
- }
-};
-
-
-void DisassemblerIA32::AppendToBuffer(const char* format, ...) {
- v8::internal::Vector<char> buf = tmp_buffer_ + tmp_buffer_pos_;
- va_list args;
- va_start(args, format);
- int result = v8::internal::OS::VSNPrintF(buf, format, args);
- va_end(args);
- tmp_buffer_pos_ += result;
-}
-
-int DisassemblerIA32::PrintRightOperandHelper(
- byte* modrmp,
- RegisterNameMapping direct_register_name) {
- int mod, regop, rm;
- get_modrm(*modrmp, &mod, &regop, &rm);
- RegisterNameMapping register_name = (mod == 3) ? direct_register_name :
- &DisassemblerIA32::NameOfCPURegister;
- switch (mod) {
- case 0:
- if (rm == ebp) {
- int32_t disp = *reinterpret_cast<int32_t*>(modrmp+1);
- AppendToBuffer("[0x%x]", disp);
- return 5;
- } else if (rm == esp) {
- byte sib = *(modrmp + 1);
- int scale, index, base;
- get_sib(sib, &scale, &index, &base);
- if (index == esp && base == esp && scale == 0 /*times_1*/) {
- AppendToBuffer("[%s]", (this->*register_name)(rm));
- return 2;
- } else if (base == ebp) {
- int32_t disp = *reinterpret_cast<int32_t*>(modrmp + 2);
- AppendToBuffer("[%s*%d+0x%x]",
- (this->*register_name)(index),
- 1 << scale,
- disp);
- return 6;
- } else if (index != esp && base != ebp) {
- // [base+index*scale]
- AppendToBuffer("[%s+%s*%d]",
- (this->*register_name)(base),
- (this->*register_name)(index),
- 1 << scale);
- return 2;
- } else {
- UnimplementedInstruction();
- return 1;
- }
- } else {
- AppendToBuffer("[%s]", (this->*register_name)(rm));
- return 1;
- }
- break;
- case 1: // fall through
- case 2:
- if (rm == esp) {
- byte sib = *(modrmp + 1);
- int scale, index, base;
- get_sib(sib, &scale, &index, &base);
- int disp =
- mod == 2 ? *reinterpret_cast<int32_t*>(modrmp + 2) : *(modrmp + 2);
- if (index == base && index == rm /*esp*/ && scale == 0 /*times_1*/) {
- AppendToBuffer("[%s+0x%x]", (this->*register_name)(rm), disp);
- } else {
- AppendToBuffer("[%s+%s*%d+0x%x]",
- (this->*register_name)(base),
- (this->*register_name)(index),
- 1 << scale,
- disp);
- }
- return mod == 2 ? 6 : 3;
- } else {
- // No sib.
- int disp =
- mod == 2 ? *reinterpret_cast<int32_t*>(modrmp + 1) : *(modrmp + 1);
- AppendToBuffer("[%s+0x%x]", (this->*register_name)(rm), disp);
- return mod == 2 ? 5 : 2;
- }
- break;
- case 3:
- AppendToBuffer("%s", (this->*register_name)(rm));
- return 1;
- default:
- UnimplementedInstruction();
- return 1;
- }
- UNREACHABLE();
-}
-
-
-int DisassemblerIA32::PrintRightOperand(byte* modrmp) {
- return PrintRightOperandHelper(modrmp, &DisassemblerIA32::NameOfCPURegister);
-}
-
-
-int DisassemblerIA32::PrintRightByteOperand(byte* modrmp) {
- return PrintRightOperandHelper(modrmp,
- &DisassemblerIA32::NameOfByteCPURegister);
-}
-
-
-int DisassemblerIA32::PrintRightXMMOperand(byte* modrmp) {
- return PrintRightOperandHelper(modrmp,
- &DisassemblerIA32::NameOfXMMRegister);
-}
-
-
-// Returns number of bytes used including the current *data.
-// Writes instruction's mnemonic, left and right operands to 'tmp_buffer_'.
-int DisassemblerIA32::PrintOperands(const char* mnem,
- OperandOrder op_order,
- byte* data) {
- byte modrm = *data;
- int mod, regop, rm;
- get_modrm(modrm, &mod, &regop, &rm);
- int advance = 0;
- switch (op_order) {
- case REG_OPER_OP_ORDER: {
- AppendToBuffer("%s %s,", mnem, NameOfCPURegister(regop));
- advance = PrintRightOperand(data);
- break;
- }
- case OPER_REG_OP_ORDER: {
- AppendToBuffer("%s ", mnem);
- advance = PrintRightOperand(data);
- AppendToBuffer(",%s", NameOfCPURegister(regop));
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
- return advance;
-}
-
-
-// Returns number of bytes used by machine instruction, including *data byte.
-// Writes immediate instructions to 'tmp_buffer_'.
-int DisassemblerIA32::PrintImmediateOp(byte* data) {
- bool sign_extension_bit = (*data & 0x02) != 0;
- byte modrm = *(data+1);
- int mod, regop, rm;
- get_modrm(modrm, &mod, &regop, &rm);
- const char* mnem = "Imm???";
- switch (regop) {
- case 0: mnem = "add"; break;
- case 1: mnem = "or"; break;
- case 2: mnem = "adc"; break;
- case 4: mnem = "and"; break;
- case 5: mnem = "sub"; break;
- case 6: mnem = "xor"; break;
- case 7: mnem = "cmp"; break;
- default: UnimplementedInstruction();
- }
- AppendToBuffer("%s ", mnem);
- int count = PrintRightOperand(data+1);
- if (sign_extension_bit) {
- AppendToBuffer(",0x%x", *(data + 1 + count));
- return 1 + count + 1 /*int8*/;
- } else {
- AppendToBuffer(",0x%x", *reinterpret_cast<int32_t*>(data + 1 + count));
- return 1 + count + 4 /*int32_t*/;
- }
-}
-
-
-// Returns number of bytes used, including *data.
-int DisassemblerIA32::F7Instruction(byte* data) {
- ASSERT_EQ(0xF7, *data);
- byte modrm = *(data+1);
- int mod, regop, rm;
- get_modrm(modrm, &mod, &regop, &rm);
- if (mod == 3 && regop != 0) {
- const char* mnem = NULL;
- switch (regop) {
- case 2: mnem = "not"; break;
- case 3: mnem = "neg"; break;
- case 4: mnem = "mul"; break;
- case 7: mnem = "idiv"; break;
- default: UnimplementedInstruction();
- }
- AppendToBuffer("%s %s", mnem, NameOfCPURegister(rm));
- return 2;
- } else if (mod == 3 && regop == eax) {
- int32_t imm = *reinterpret_cast<int32_t*>(data+2);
- AppendToBuffer("test %s,0x%x", NameOfCPURegister(rm), imm);
- return 6;
- } else if (regop == eax) {
- AppendToBuffer("test ");
- int count = PrintRightOperand(data+1);
- int32_t imm = *reinterpret_cast<int32_t*>(data+1+count);
- AppendToBuffer(",0x%x", imm);
- return 1+count+4 /*int32_t*/;
- } else {
- UnimplementedInstruction();
- return 2;
- }
-}
-
-int DisassemblerIA32::D1D3C1Instruction(byte* data) {
- byte op = *data;
- ASSERT(op == 0xD1 || op == 0xD3 || op == 0xC1);
- byte modrm = *(data+1);
- int mod, regop, rm;
- get_modrm(modrm, &mod, &regop, &rm);
- int imm8 = -1;
- int num_bytes = 2;
- if (mod == 3) {
- const char* mnem = NULL;
- switch (regop) {
- case kROL: mnem = "rol"; break;
- case kROR: mnem = "ror"; break;
- case kRCL: mnem = "rcl"; break;
- case kRCR: mnem = "rcr"; break;
- case kSHL: mnem = "shl"; break;
- case KSHR: mnem = "shr"; break;
- case kSAR: mnem = "sar"; break;
- default: UnimplementedInstruction();
- }
- if (op == 0xD1) {
- imm8 = 1;
- } else if (op == 0xC1) {
- imm8 = *(data+2);
- num_bytes = 3;
- } else if (op == 0xD3) {
- // Shift/rotate by cl.
- }
- ASSERT_NE(NULL, mnem);
- AppendToBuffer("%s %s,", mnem, NameOfCPURegister(rm));
- if (imm8 > 0) {
- AppendToBuffer("%d", imm8);
- } else {
- AppendToBuffer("cl");
- }
- } else {
- UnimplementedInstruction();
- }
- return num_bytes;
-}
-
-
-// Returns number of bytes used, including *data.
-int DisassemblerIA32::JumpShort(byte* data) {
- ASSERT_EQ(0xEB, *data);
- byte b = *(data+1);
- byte* dest = data + static_cast<int8_t>(b) + 2;
- AppendToBuffer("jmp %s", NameOfAddress(dest));
- return 2;
-}
-
-
-// Returns number of bytes used, including *data.
-int DisassemblerIA32::JumpConditional(byte* data, const char* comment) {
- ASSERT_EQ(0x0F, *data);
- byte cond = *(data+1) & 0x0F;
- byte* dest = data + *reinterpret_cast<int32_t*>(data+2) + 6;
- const char* mnem = jump_conditional_mnem[cond];
- AppendToBuffer("%s %s", mnem, NameOfAddress(dest));
- if (comment != NULL) {
- AppendToBuffer(", %s", comment);
- }
- return 6; // includes 0x0F
-}
-
-
-// Returns number of bytes used, including *data.
-int DisassemblerIA32::JumpConditionalShort(byte* data, const char* comment) {
- byte cond = *data & 0x0F;
- byte b = *(data+1);
- byte* dest = data + static_cast<int8_t>(b) + 2;
- const char* mnem = jump_conditional_mnem[cond];
- AppendToBuffer("%s %s", mnem, NameOfAddress(dest));
- if (comment != NULL) {
- AppendToBuffer(", %s", comment);
- }
- return 2;
-}
-
-
-// Returns number of bytes used, including *data.
-int DisassemblerIA32::SetCC(byte* data) {
- ASSERT_EQ(0x0F, *data);
- byte cond = *(data+1) & 0x0F;
- const char* mnem = set_conditional_mnem[cond];
- AppendToBuffer("%s ", mnem);
- PrintRightByteOperand(data+2);
- return 3; // Includes 0x0F.
-}
-
-
-// Returns number of bytes used, including *data.
-int DisassemblerIA32::CMov(byte* data) {
- ASSERT_EQ(0x0F, *data);
- byte cond = *(data + 1) & 0x0F;
- const char* mnem = conditional_move_mnem[cond];
- int op_size = PrintOperands(mnem, REG_OPER_OP_ORDER, data + 2);
- return 2 + op_size; // includes 0x0F
-}
-
-
-// Returns number of bytes used, including *data.
-int DisassemblerIA32::FPUInstruction(byte* data) {
- byte escape_opcode = *data;
- ASSERT_EQ(0xD8, escape_opcode & 0xF8);
- byte modrm_byte = *(data+1);
-
- if (modrm_byte >= 0xC0) {
- return RegisterFPUInstruction(escape_opcode, modrm_byte);
- } else {
- return MemoryFPUInstruction(escape_opcode, modrm_byte, data+1);
- }
-}
-
-int DisassemblerIA32::MemoryFPUInstruction(int escape_opcode,
- int modrm_byte,
- byte* modrm_start) {
- const char* mnem = "?";
- int regop = (modrm_byte >> 3) & 0x7; // reg/op field of modrm byte.
- switch (escape_opcode) {
- case 0xD9: switch (regop) {
- case 0: mnem = "fld_s"; break;
- case 3: mnem = "fstp_s"; break;
- case 7: mnem = "fstcw"; break;
- default: UnimplementedInstruction();
- }
- break;
-
- case 0xDB: switch (regop) {
- case 0: mnem = "fild_s"; break;
- case 1: mnem = "fisttp_s"; break;
- case 2: mnem = "fist_s"; break;
- case 3: mnem = "fistp_s"; break;
- default: UnimplementedInstruction();
- }
- break;
-
- case 0xDD: switch (regop) {
- case 0: mnem = "fld_d"; break;
- case 1: mnem = "fisttp_d"; break;
- case 2: mnem = "fst_d"; break;
- case 3: mnem = "fstp_d"; break;
- default: UnimplementedInstruction();
- }
- break;
-
- case 0xDF: switch (regop) {
- case 5: mnem = "fild_d"; break;
- case 7: mnem = "fistp_d"; break;
- default: UnimplementedInstruction();
- }
- break;
-
- default: UnimplementedInstruction();
- }
- AppendToBuffer("%s ", mnem);
- int count = PrintRightOperand(modrm_start);
- return count + 1;
-}
-
-int DisassemblerIA32::RegisterFPUInstruction(int escape_opcode,
- byte modrm_byte) {
- bool has_register = false; // Is the FPU register encoded in modrm_byte?
- const char* mnem = "?";
-
- switch (escape_opcode) {
- case 0xD8:
- UnimplementedInstruction();
- break;
-
- case 0xD9:
- switch (modrm_byte & 0xF8) {
- case 0xC0:
- mnem = "fld";
- has_register = true;
- break;
- case 0xC8:
- mnem = "fxch";
- has_register = true;
- break;
- default:
- switch (modrm_byte) {
- case 0xE0: mnem = "fchs"; break;
- case 0xE1: mnem = "fabs"; break;
- case 0xE4: mnem = "ftst"; break;
- case 0xE8: mnem = "fld1"; break;
- case 0xEB: mnem = "fldpi"; break;
- case 0xED: mnem = "fldln2"; break;
- case 0xEE: mnem = "fldz"; break;
- case 0xF1: mnem = "fyl2x"; break;
- case 0xF5: mnem = "fprem1"; break;
- case 0xF7: mnem = "fincstp"; break;
- case 0xF8: mnem = "fprem"; break;
- case 0xFE: mnem = "fsin"; break;
- case 0xFF: mnem = "fcos"; break;
- default: UnimplementedInstruction();
- }
- }
- break;
-
- case 0xDA:
- if (modrm_byte == 0xE9) {
- mnem = "fucompp";
- } else {
- UnimplementedInstruction();
- }
- break;
-
- case 0xDB:
- if ((modrm_byte & 0xF8) == 0xE8) {
- mnem = "fucomi";
- has_register = true;
- } else if (modrm_byte == 0xE2) {
- mnem = "fclex";
- } else {
- UnimplementedInstruction();
- }
- break;
-
- case 0xDC:
- has_register = true;
- switch (modrm_byte & 0xF8) {
- case 0xC0: mnem = "fadd"; break;
- case 0xE8: mnem = "fsub"; break;
- case 0xC8: mnem = "fmul"; break;
- case 0xF8: mnem = "fdiv"; break;
- default: UnimplementedInstruction();
- }
- break;
-
- case 0xDD:
- has_register = true;
- switch (modrm_byte & 0xF8) {
- case 0xC0: mnem = "ffree"; break;
- case 0xD8: mnem = "fstp"; break;
- default: UnimplementedInstruction();
- }
- break;
-
- case 0xDE:
- if (modrm_byte == 0xD9) {
- mnem = "fcompp";
- } else {
- has_register = true;
- switch (modrm_byte & 0xF8) {
- case 0xC0: mnem = "faddp"; break;
- case 0xE8: mnem = "fsubp"; break;
- case 0xC8: mnem = "fmulp"; break;
- case 0xF8: mnem = "fdivp"; break;
- default: UnimplementedInstruction();
- }
- }
- break;
-
- case 0xDF:
- if (modrm_byte == 0xE0) {
- mnem = "fnstsw_ax";
- } else if ((modrm_byte & 0xF8) == 0xE8) {
- mnem = "fucomip";
- has_register = true;
- }
- break;
-
- default: UnimplementedInstruction();
- }
-
- if (has_register) {
- AppendToBuffer("%s st%d", mnem, modrm_byte & 0x7);
- } else {
- AppendToBuffer("%s", mnem);
- }
- return 2;
-}
-
-
-// Mnemonics for instructions 0xF0 byte.
-// Returns NULL if the instruction is not handled here.
-static const char* F0Mnem(byte f0byte) {
- switch (f0byte) {
- case 0x18: return "prefetch";
- case 0xA2: return "cpuid";
- case 0x31: return "rdtsc";
- case 0xBE: return "movsx_b";
- case 0xBF: return "movsx_w";
- case 0xB6: return "movzx_b";
- case 0xB7: return "movzx_w";
- case 0xAF: return "imul";
- case 0xA5: return "shld";
- case 0xAD: return "shrd";
- case 0xAB: return "bts";
- default: return NULL;
- }
-}
-
-
-// Disassembled instruction '*instr' and writes it into 'out_buffer'.
-int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
- byte* instr) {
- tmp_buffer_pos_ = 0; // starting to write as position 0
- byte* data = instr;
- // Check for hints.
- const char* branch_hint = NULL;
- // We use these two prefixes only with branch prediction
- if (*data == 0x3E /*ds*/) {
- branch_hint = "predicted taken";
- data++;
- } else if (*data == 0x2E /*cs*/) {
- branch_hint = "predicted not taken";
- data++;
- }
- bool processed = true; // Will be set to false if the current instruction
- // is not in 'instructions' table.
- const InstructionDesc& idesc = instruction_table.Get(*data);
- switch (idesc.type) {
- case ZERO_OPERANDS_INSTR:
- AppendToBuffer(idesc.mnem);
- data++;
- break;
-
- case TWO_OPERANDS_INSTR:
- data++;
- data += PrintOperands(idesc.mnem, idesc.op_order_, data);
- break;
-
- case JUMP_CONDITIONAL_SHORT_INSTR:
- data += JumpConditionalShort(data, branch_hint);
- break;
-
- case REGISTER_INSTR:
- AppendToBuffer("%s %s", idesc.mnem, NameOfCPURegister(*data & 0x07));
- data++;
- break;
-
- case MOVE_REG_INSTR: {
- byte* addr = reinterpret_cast<byte*>(*reinterpret_cast<int32_t*>(data+1));
- AppendToBuffer("mov %s,%s",
- NameOfCPURegister(*data & 0x07),
- NameOfAddress(addr));
- data += 5;
- break;
- }
-
- case CALL_JUMP_INSTR: {
- byte* addr = data + *reinterpret_cast<int32_t*>(data+1) + 5;
- AppendToBuffer("%s %s", idesc.mnem, NameOfAddress(addr));
- data += 5;
- break;
- }
-
- case SHORT_IMMEDIATE_INSTR: {
- byte* addr = reinterpret_cast<byte*>(*reinterpret_cast<int32_t*>(data+1));
- AppendToBuffer("%s eax, %s", idesc.mnem, NameOfAddress(addr));
- data += 5;
- break;
- }
-
- case NO_INSTR:
- processed = false;
- break;
-
- default:
- UNIMPLEMENTED(); // This type is not implemented.
- }
- //----------------------------
- if (!processed) {
- switch (*data) {
- case 0xC2:
- AppendToBuffer("ret 0x%x", *reinterpret_cast<uint16_t*>(data+1));
- data += 3;
- break;
-
- case 0x69: // fall through
- case 0x6B:
- { int mod, regop, rm;
- get_modrm(*(data+1), &mod, &regop, &rm);
- int32_t imm =
- *data == 0x6B ? *(data+2) : *reinterpret_cast<int32_t*>(data+2);
- AppendToBuffer("imul %s,%s,0x%x",
- NameOfCPURegister(regop),
- NameOfCPURegister(rm),
- imm);
- data += 2 + (*data == 0x6B ? 1 : 4);
- }
- break;
-
- case 0xF6:
- { data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- if (regop == eax) {
- AppendToBuffer("test_b ");
- data += PrintRightByteOperand(data);
- int32_t imm = *data;
- AppendToBuffer(",0x%x", imm);
- data++;
- } else {
- UnimplementedInstruction();
- }
- }
- break;
-
- case 0x81: // fall through
- case 0x83: // 0x81 with sign extension bit set
- data += PrintImmediateOp(data);
- break;
-
- case 0x0F:
- { byte f0byte = *(data+1);
- const char* f0mnem = F0Mnem(f0byte);
- if (f0byte == 0x18) {
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- const char* suffix[] = {"nta", "1", "2", "3"};
- AppendToBuffer("%s%s ", f0mnem, suffix[regop & 0x03]);
- data += PrintRightOperand(data);
- } else if (f0byte == 0xA2 || f0byte == 0x31) {
- AppendToBuffer("%s", f0mnem);
- data += 2;
- } else if (f0byte == 0x28) {
- data += 2;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("movaps %s,%s",
- NameOfXMMRegister(regop),
- NameOfXMMRegister(rm));
- data++;
- } else if ((f0byte & 0xF0) == 0x80) {
- data += JumpConditional(data, branch_hint);
- } else if (f0byte == 0xBE || f0byte == 0xBF || f0byte == 0xB6 ||
- f0byte == 0xB7 || f0byte == 0xAF) {
- data += 2;
- data += PrintOperands(f0mnem, REG_OPER_OP_ORDER, data);
- } else if ((f0byte & 0xF0) == 0x90) {
- data += SetCC(data);
- } else if ((f0byte & 0xF0) == 0x40) {
- data += CMov(data);
- } else {
- data += 2;
- if (f0byte == 0xAB || f0byte == 0xA5 || f0byte == 0xAD) {
- // shrd, shld, bts
- AppendToBuffer("%s ", f0mnem);
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- data += PrintRightOperand(data);
- if (f0byte == 0xAB) {
- AppendToBuffer(",%s", NameOfCPURegister(regop));
- } else {
- AppendToBuffer(",%s,cl", NameOfCPURegister(regop));
- }
- } else {
- UnimplementedInstruction();
- }
- }
- }
- break;
-
- case 0x8F:
- { data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- if (regop == eax) {
- AppendToBuffer("pop ");
- data += PrintRightOperand(data);
- }
- }
- break;
-
- case 0xFF:
- { data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- const char* mnem = NULL;
- switch (regop) {
- case esi: mnem = "push"; break;
- case eax: mnem = "inc"; break;
- case ecx: mnem = "dec"; break;
- case edx: mnem = "call"; break;
- case esp: mnem = "jmp"; break;
- default: mnem = "???";
- }
- AppendToBuffer("%s ", mnem);
- data += PrintRightOperand(data);
- }
- break;
-
- case 0xC7: // imm32, fall through
- case 0xC6: // imm8
- { bool is_byte = *data == 0xC6;
- data++;
- if (is_byte) {
- AppendToBuffer("%s ", "mov_b");
- data += PrintRightByteOperand(data);
- int32_t imm = *data;
- AppendToBuffer(",0x%x", imm);
- data++;
- } else {
- AppendToBuffer("%s ", "mov");
- data += PrintRightOperand(data);
- int32_t imm = *reinterpret_cast<int32_t*>(data);
- AppendToBuffer(",0x%x", imm);
- data += 4;
- }
- }
- break;
-
- case 0x80:
- { data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- const char* mnem = NULL;
- switch (regop) {
- case 5: mnem = "subb"; break;
- case 7: mnem = "cmpb"; break;
- default: UnimplementedInstruction();
- }
- AppendToBuffer("%s ", mnem);
- data += PrintRightByteOperand(data);
- int32_t imm = *data;
- AppendToBuffer(",0x%x", imm);
- data++;
- }
- break;
-
- case 0x88: // 8bit, fall through
- case 0x89: // 32bit
- { bool is_byte = *data == 0x88;
- int mod, regop, rm;
- data++;
- get_modrm(*data, &mod, &regop, &rm);
- if (is_byte) {
- AppendToBuffer("%s ", "mov_b");
- data += PrintRightByteOperand(data);
- AppendToBuffer(",%s", NameOfByteCPURegister(regop));
- } else {
- AppendToBuffer("%s ", "mov");
- data += PrintRightOperand(data);
- AppendToBuffer(",%s", NameOfCPURegister(regop));
- }
- }
- break;
-
- case 0x66: // prefix
- data++;
- if (*data == 0x8B) {
- data++;
- data += PrintOperands("mov_w", REG_OPER_OP_ORDER, data);
- } else if (*data == 0x89) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("mov_w ");
- data += PrintRightOperand(data);
- AppendToBuffer(",%s", NameOfCPURegister(regop));
- } else if (*data == 0x0F) {
- data++;
- if (*data == 0x38) {
- data++;
- if (*data == 0x17) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("ptest %s,%s",
- NameOfXMMRegister(regop),
- NameOfXMMRegister(rm));
- data++;
- } else if (*data == 0x2A) {
- // movntdqa
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("movntdqa %s,", NameOfXMMRegister(regop));
- data += PrintRightOperand(data);
- } else {
- UnimplementedInstruction();
- }
- } else if (*data == 0x3A) {
- data++;
- if (*data == 0x16) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- int8_t imm8 = static_cast<int8_t>(data[1]);
- AppendToBuffer("pextrd %s,%s,%d",
- NameOfCPURegister(regop),
- NameOfXMMRegister(rm),
- static_cast<int>(imm8));
- data += 2;
- } else if (*data == 0x22) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- int8_t imm8 = static_cast<int8_t>(data[1]);
- AppendToBuffer("pinsrd %s,%s,%d",
- NameOfXMMRegister(regop),
- NameOfCPURegister(rm),
- static_cast<int>(imm8));
- data += 2;
- } else {
- UnimplementedInstruction();
- }
- } else if (*data == 0x2E || *data == 0x2F) {
- const char* mnem = (*data == 0x2E) ? "ucomisd" : "comisd";
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- if (mod == 0x3) {
- AppendToBuffer("%s %s,%s", mnem,
- NameOfXMMRegister(regop),
- NameOfXMMRegister(rm));
- data++;
- } else {
- AppendToBuffer("%s %s,", mnem, NameOfXMMRegister(regop));
- data += PrintRightOperand(data);
- }
- } else if (*data == 0x50) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("movmskpd %s,%s",
- NameOfCPURegister(regop),
- NameOfXMMRegister(rm));
- data++;
- } else if (*data == 0x54) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("andpd %s,%s",
- NameOfXMMRegister(regop),
- NameOfXMMRegister(rm));
- data++;
- } else if (*data == 0x57) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("xorpd %s,%s",
- NameOfXMMRegister(regop),
- NameOfXMMRegister(rm));
- data++;
- } else if (*data == 0x6E) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("movd %s,", NameOfXMMRegister(regop));
- data += PrintRightOperand(data);
- } else if (*data == 0x6F) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("movdqa %s,", NameOfXMMRegister(regop));
- data += PrintRightXMMOperand(data);
- } else if (*data == 0x70) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- int8_t imm8 = static_cast<int8_t>(data[1]);
- AppendToBuffer("pshufd %s,%s,%d",
- NameOfXMMRegister(regop),
- NameOfXMMRegister(rm),
- static_cast<int>(imm8));
- data += 2;
- } else if (*data == 0xF3) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("psllq %s,%s",
- NameOfXMMRegister(regop),
- NameOfXMMRegister(rm));
- data++;
- } else if (*data == 0x73) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- int8_t imm8 = static_cast<int8_t>(data[1]);
- ASSERT(regop == esi || regop == edx);
- AppendToBuffer("%s %s,%d",
- (regop == esi) ? "psllq" : "psrlq",
- NameOfXMMRegister(rm),
- static_cast<int>(imm8));
- data += 2;
- } else if (*data == 0xD3) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("psrlq %s,%s",
- NameOfXMMRegister(regop),
- NameOfXMMRegister(rm));
- data++;
- } else if (*data == 0x7F) {
- AppendToBuffer("movdqa ");
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- data += PrintRightXMMOperand(data);
- AppendToBuffer(",%s", NameOfXMMRegister(regop));
- } else if (*data == 0x7E) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("movd ");
- data += PrintRightOperand(data);
- AppendToBuffer(",%s", NameOfXMMRegister(regop));
- } else if (*data == 0xDB) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("pand %s,%s",
- NameOfXMMRegister(regop),
- NameOfXMMRegister(rm));
- data++;
- } else if (*data == 0xE7) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- if (mod == 3) {
- AppendToBuffer("movntdq ");
- data += PrintRightOperand(data);
- AppendToBuffer(",%s", NameOfXMMRegister(regop));
- } else {
- UnimplementedInstruction();
- }
- } else if (*data == 0xEF) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("pxor %s,%s",
- NameOfXMMRegister(regop),
- NameOfXMMRegister(rm));
- data++;
- } else if (*data == 0xEB) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("por %s,%s",
- NameOfXMMRegister(regop),
- NameOfXMMRegister(rm));
- data++;
- } else {
- UnimplementedInstruction();
- }
- } else {
- UnimplementedInstruction();
- }
- break;
-
- case 0xFE:
- { data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- if (regop == ecx) {
- AppendToBuffer("dec_b ");
- data += PrintRightOperand(data);
- } else {
- UnimplementedInstruction();
- }
- }
- break;
-
- case 0x68:
- AppendToBuffer("push 0x%x", *reinterpret_cast<int32_t*>(data+1));
- data += 5;
- break;
-
- case 0x6A:
- AppendToBuffer("push 0x%x", *reinterpret_cast<int8_t*>(data + 1));
- data += 2;
- break;
-
- case 0xA8:
- AppendToBuffer("test al,0x%x", *reinterpret_cast<uint8_t*>(data+1));
- data += 2;
- break;
-
- case 0x2C:
- AppendToBuffer("subb eax,0x%x", *reinterpret_cast<uint8_t*>(data+1));
- data += 2;
- break;
-
- case 0xA9:
- AppendToBuffer("test eax,0x%x", *reinterpret_cast<int32_t*>(data+1));
- data += 5;
- break;
-
- case 0xD1: // fall through
- case 0xD3: // fall through
- case 0xC1:
- data += D1D3C1Instruction(data);
- break;
-
- case 0xD9: // fall through
- case 0xDA: // fall through
- case 0xDB: // fall through
- case 0xDC: // fall through
- case 0xDD: // fall through
- case 0xDE: // fall through
- case 0xDF:
- data += FPUInstruction(data);
- break;
-
- case 0xEB:
- data += JumpShort(data);
- break;
-
- case 0xF2:
- if (*(data+1) == 0x0F) {
- byte b2 = *(data+2);
- if (b2 == 0x11) {
- AppendToBuffer("movsd ");
- data += 3;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- data += PrintRightXMMOperand(data);
- AppendToBuffer(",%s", NameOfXMMRegister(regop));
- } else if (b2 == 0x10) {
- data += 3;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("movsd %s,", NameOfXMMRegister(regop));
- data += PrintRightXMMOperand(data);
- } else if (b2 == 0x5A) {
- data += 3;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("cvtsd2ss %s,", NameOfXMMRegister(regop));
- data += PrintRightXMMOperand(data);
- } else {
- const char* mnem = "?";
- switch (b2) {
- case 0x2A: mnem = "cvtsi2sd"; break;
- case 0x2C: mnem = "cvttsd2si"; break;
- case 0x51: mnem = "sqrtsd"; break;
- case 0x58: mnem = "addsd"; break;
- case 0x59: mnem = "mulsd"; break;
- case 0x5C: mnem = "subsd"; break;
- case 0x5E: mnem = "divsd"; break;
- }
- data += 3;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- if (b2 == 0x2A) {
- AppendToBuffer("%s %s,", mnem, NameOfXMMRegister(regop));
- data += PrintRightOperand(data);
- } else if (b2 == 0x2C) {
- AppendToBuffer("%s %s,", mnem, NameOfCPURegister(regop));
- data += PrintRightXMMOperand(data);
- } else if (b2 == 0xC2) {
- // Intel manual 2A, Table 3-18.
- const char* const pseudo_op[] = {
- "cmpeqsd",
- "cmpltsd",
- "cmplesd",
- "cmpunordsd",
- "cmpneqsd",
- "cmpnltsd",
- "cmpnlesd",
- "cmpordsd"
- };
- AppendToBuffer("%s %s,%s",
- pseudo_op[data[1]],
- NameOfXMMRegister(regop),
- NameOfXMMRegister(rm));
- data += 2;
- } else {
- AppendToBuffer("%s %s,", mnem, NameOfXMMRegister(regop));
- data += PrintRightXMMOperand(data);
- }
- }
- } else {
- UnimplementedInstruction();
- }
- break;
-
- case 0xF3:
- if (*(data+1) == 0x0F) {
- byte b2 = *(data+2);
- if (b2 == 0x11) {
- AppendToBuffer("movss ");
- data += 3;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- data += PrintRightXMMOperand(data);
- AppendToBuffer(",%s", NameOfXMMRegister(regop));
- } else if (b2 == 0x10) {
- data += 3;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("movss %s,", NameOfXMMRegister(regop));
- data += PrintRightXMMOperand(data);
- } else if (b2 == 0x2C) {
- data += 3;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("cvttss2si %s,", NameOfCPURegister(regop));
- data += PrintRightXMMOperand(data);
- } else if (b2 == 0x5A) {
- data += 3;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("cvtss2sd %s,", NameOfXMMRegister(regop));
- data += PrintRightXMMOperand(data);
- } else if (b2 == 0x6F) {
- data += 3;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("movdqu %s,", NameOfXMMRegister(regop));
- data += PrintRightXMMOperand(data);
- } else if (b2 == 0x7F) {
- AppendToBuffer("movdqu ");
- data += 3;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- data += PrintRightXMMOperand(data);
- AppendToBuffer(",%s", NameOfXMMRegister(regop));
- } else {
- UnimplementedInstruction();
- }
- } else if (*(data+1) == 0xA5) {
- data += 2;
- AppendToBuffer("rep_movs");
- } else if (*(data+1) == 0xAB) {
- data += 2;
- AppendToBuffer("rep_stos");
- } else {
- UnimplementedInstruction();
- }
- break;
-
- case 0xF7:
- data += F7Instruction(data);
- break;
-
- default:
- UnimplementedInstruction();
- }
- }
-
- if (tmp_buffer_pos_ < sizeof tmp_buffer_) {
- tmp_buffer_[tmp_buffer_pos_] = '\0';
- }
-
- int instr_len = data - instr;
- if (instr_len == 0) {
- printf("%02x", *data);
- }
- ASSERT(instr_len > 0); // Ensure progress.
-
- int outp = 0;
- // Instruction bytes.
- for (byte* bp = instr; bp < data; bp++) {
- outp += v8::internal::OS::SNPrintF(out_buffer + outp,
- "%02x",
- *bp);
- }
- for (int i = 6 - instr_len; i >= 0; i--) {
- outp += v8::internal::OS::SNPrintF(out_buffer + outp,
- " ");
- }
-
- outp += v8::internal::OS::SNPrintF(out_buffer + outp,
- " %s",
- tmp_buffer_.start());
- return instr_len;
-} // NOLINT (function is too long)
-
-
-//------------------------------------------------------------------------------
-
-
-static const char* cpu_regs[8] = {
- "eax", "ecx", "edx", "ebx", "esp", "ebp", "esi", "edi"
-};
-
-
-static const char* byte_cpu_regs[8] = {
- "al", "cl", "dl", "bl", "ah", "ch", "dh", "bh"
-};
-
-
-static const char* xmm_regs[8] = {
- "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7"
-};
-
-
-const char* NameConverter::NameOfAddress(byte* addr) const {
- v8::internal::OS::SNPrintF(tmp_buffer_, "%p", addr);
- return tmp_buffer_.start();
-}
-
-
-const char* NameConverter::NameOfConstant(byte* addr) const {
- return NameOfAddress(addr);
-}
-
-
-const char* NameConverter::NameOfCPURegister(int reg) const {
- if (0 <= reg && reg < 8) return cpu_regs[reg];
- return "noreg";
-}
-
-
-const char* NameConverter::NameOfByteCPURegister(int reg) const {
- if (0 <= reg && reg < 8) return byte_cpu_regs[reg];
- return "noreg";
-}
-
-
-const char* NameConverter::NameOfXMMRegister(int reg) const {
- if (0 <= reg && reg < 8) return xmm_regs[reg];
- return "noxmmreg";
-}
-
-
-const char* NameConverter::NameInCode(byte* addr) const {
- // IA32 does not embed debug strings at the moment.
- UNREACHABLE();
- return "";
-}
-
-
-//------------------------------------------------------------------------------
-
-Disassembler::Disassembler(const NameConverter& converter)
- : converter_(converter) {}
-
-
-Disassembler::~Disassembler() {}
-
-
-int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer,
- byte* instruction) {
- DisassemblerIA32 d(converter_, false /*do not crash if unimplemented*/);
- return d.InstructionDecode(buffer, instruction);
-}
-
-
-// The IA-32 assembler does not currently use constant pools.
-int Disassembler::ConstantPoolSizeAt(byte* instruction) { return -1; }
-
-
-/*static*/ void Disassembler::Disassemble(FILE* f, byte* begin, byte* end) {
- NameConverter converter;
- Disassembler d(converter);
- for (byte* pc = begin; pc < end;) {
- v8::internal::EmbeddedVector<char, 128> buffer;
- buffer[0] = '\0';
- byte* prev_pc = pc;
- pc += d.InstructionDecode(buffer, pc);
- fprintf(f, "%p", prev_pc);
- fprintf(f, " ");
-
- for (byte* bp = prev_pc; bp < pc; bp++) {
- fprintf(f, "%02x", *bp);
- }
- for (int i = 6 - (pc - prev_pc); i >= 0; i--) {
- fprintf(f, " ");
- }
- fprintf(f, " %s\n", buffer.start());
- }
-}
-
-
-} // namespace disasm
-
-#endif // V8_TARGET_ARCH_IA32
diff --git a/src/3rdparty/v8/src/ia32/frames-ia32.cc b/src/3rdparty/v8/src/ia32/frames-ia32.cc
deleted file mode 100644
index dd44f0e..0000000
--- a/src/3rdparty/v8/src/ia32/frames-ia32.cc
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_IA32)
-
-#include "frames-inl.h"
-
-namespace v8 {
-namespace internal {
-
-
-Address ExitFrame::ComputeStackPointer(Address fp) {
- return Memory::Address_at(fp + ExitFrameConstants::kSPOffset);
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_IA32
diff --git a/src/3rdparty/v8/src/ia32/frames-ia32.h b/src/3rdparty/v8/src/ia32/frames-ia32.h
deleted file mode 100644
index 0f95abd..0000000
--- a/src/3rdparty/v8/src/ia32/frames-ia32.h
+++ /dev/null
@@ -1,140 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_IA32_FRAMES_IA32_H_
-#define V8_IA32_FRAMES_IA32_H_
-
-namespace v8 {
-namespace internal {
-
-
-// Register lists
-// Note that the bit values must match those used in actual instruction encoding
-static const int kNumRegs = 8;
-
-
-// Caller-saved registers
-static const RegList kJSCallerSaved =
- 1 << 0 | // eax
- 1 << 1 | // ecx
- 1 << 2 | // edx
- 1 << 3 | // ebx - used as a caller-saved register in JavaScript code
- 1 << 7; // edi - callee function
-
-static const int kNumJSCallerSaved = 5;
-
-typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved];
-
-
-// Number of registers for which space is reserved in safepoints.
-static const int kNumSafepointRegisters = 8;
-
-// ----------------------------------------------------
-
-
-class StackHandlerConstants : public AllStatic {
- public:
- static const int kNextOffset = 0 * kPointerSize;
- static const int kFPOffset = 1 * kPointerSize;
- static const int kStateOffset = 2 * kPointerSize;
- static const int kPCOffset = 3 * kPointerSize;
-
- static const int kSize = kPCOffset + kPointerSize;
-};
-
-
-class EntryFrameConstants : public AllStatic {
- public:
- static const int kCallerFPOffset = -6 * kPointerSize;
-
- static const int kFunctionArgOffset = +3 * kPointerSize;
- static const int kReceiverArgOffset = +4 * kPointerSize;
- static const int kArgcOffset = +5 * kPointerSize;
- static const int kArgvOffset = +6 * kPointerSize;
-};
-
-
-class ExitFrameConstants : public AllStatic {
- public:
- static const int kCodeOffset = -2 * kPointerSize;
- static const int kSPOffset = -1 * kPointerSize;
-
- static const int kCallerFPOffset = 0 * kPointerSize;
- static const int kCallerPCOffset = +1 * kPointerSize;
-
- // FP-relative displacement of the caller's SP. It points just
- // below the saved PC.
- static const int kCallerSPDisplacement = +2 * kPointerSize;
-};
-
-
-class StandardFrameConstants : public AllStatic {
- public:
- static const int kFixedFrameSize = 4;
- static const int kExpressionsOffset = -3 * kPointerSize;
- static const int kMarkerOffset = -2 * kPointerSize;
- static const int kContextOffset = -1 * kPointerSize;
- static const int kCallerFPOffset = 0 * kPointerSize;
- static const int kCallerPCOffset = +1 * kPointerSize;
- static const int kCallerSPOffset = +2 * kPointerSize;
-};
-
-
-class JavaScriptFrameConstants : public AllStatic {
- public:
- // FP-relative.
- static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
- static const int kLastParameterOffset = +2 * kPointerSize;
- static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
-
- // Caller SP-relative.
- static const int kParam0Offset = -2 * kPointerSize;
- static const int kReceiverOffset = -1 * kPointerSize;
-};
-
-
-class ArgumentsAdaptorFrameConstants : public AllStatic {
- public:
- static const int kLengthOffset = StandardFrameConstants::kExpressionsOffset;
-};
-
-
-class InternalFrameConstants : public AllStatic {
- public:
- static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
-};
-
-
-inline Object* JavaScriptFrame::function_slot_object() const {
- const int offset = JavaScriptFrameConstants::kFunctionOffset;
- return Memory::Object_at(fp() + offset);
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_IA32_FRAMES_IA32_H_
diff --git a/src/3rdparty/v8/src/ia32/full-codegen-ia32.cc b/src/3rdparty/v8/src/ia32/full-codegen-ia32.cc
deleted file mode 100644
index 3f72def..0000000
--- a/src/3rdparty/v8/src/ia32/full-codegen-ia32.cc
+++ /dev/null
@@ -1,4357 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_IA32)
-
-#include "code-stubs.h"
-#include "codegen-inl.h"
-#include "compiler.h"
-#include "debug.h"
-#include "full-codegen.h"
-#include "parser.h"
-#include "scopes.h"
-#include "stub-cache.h"
-
-namespace v8 {
-namespace internal {
-
-
-#define __ ACCESS_MASM(masm_)
-
-
-class JumpPatchSite BASE_EMBEDDED {
- public:
- explicit JumpPatchSite(MacroAssembler* masm) : masm_(masm) {
-#ifdef DEBUG
- info_emitted_ = false;
-#endif
- }
-
- ~JumpPatchSite() {
- ASSERT(patch_site_.is_bound() == info_emitted_);
- }
-
- void EmitJumpIfNotSmi(Register reg, NearLabel* target) {
- __ test(reg, Immediate(kSmiTagMask));
- EmitJump(not_carry, target); // Always taken before patched.
- }
-
- void EmitJumpIfSmi(Register reg, NearLabel* target) {
- __ test(reg, Immediate(kSmiTagMask));
- EmitJump(carry, target); // Never taken before patched.
- }
-
- void EmitPatchInfo() {
- int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(&patch_site_);
- ASSERT(is_int8(delta_to_patch_site));
- __ test(eax, Immediate(delta_to_patch_site));
-#ifdef DEBUG
- info_emitted_ = true;
-#endif
- }
-
- bool is_bound() const { return patch_site_.is_bound(); }
-
- private:
- // jc will be patched with jz, jnc will become jnz.
- void EmitJump(Condition cc, NearLabel* target) {
- ASSERT(!patch_site_.is_bound() && !info_emitted_);
- ASSERT(cc == carry || cc == not_carry);
- __ bind(&patch_site_);
- __ j(cc, target);
- }
-
- MacroAssembler* masm_;
- Label patch_site_;
-#ifdef DEBUG
- bool info_emitted_;
-#endif
-};
-
-
-// Generate code for a JS function. On entry to the function the receiver
-// and arguments have been pushed on the stack left to right, with the
-// return address on top of them. The actual argument count matches the
-// formal parameter count expected by the function.
-//
-// The live registers are:
-// o edi: the JS function object being called (ie, ourselves)
-// o esi: our context
-// o ebp: our caller's frame pointer
-// o esp: stack pointer (pointing to return address)
-//
-// The function builds a JS frame. Please see JavaScriptFrameConstants in
-// frames-ia32.h for its layout.
-void FullCodeGenerator::Generate(CompilationInfo* info) {
- ASSERT(info_ == NULL);
- info_ = info;
- SetFunctionPosition(function());
- Comment cmnt(masm_, "[ function compiled by full code generator");
-
-#ifdef DEBUG
- if (strlen(FLAG_stop_at) > 0 &&
- info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
- __ int3();
- }
-#endif
-
- __ push(ebp); // Caller's frame pointer.
- __ mov(ebp, esp);
- __ push(esi); // Callee's context.
- __ push(edi); // Callee's JS Function.
-
- { Comment cmnt(masm_, "[ Allocate locals");
- int locals_count = scope()->num_stack_slots();
- if (locals_count == 1) {
- __ push(Immediate(isolate()->factory()->undefined_value()));
- } else if (locals_count > 1) {
- __ mov(eax, Immediate(isolate()->factory()->undefined_value()));
- for (int i = 0; i < locals_count; i++) {
- __ push(eax);
- }
- }
- }
-
- bool function_in_register = true;
-
- // Possibly allocate a local context.
- int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
- if (heap_slots > 0) {
- Comment cmnt(masm_, "[ Allocate local context");
- // Argument to NewContext is the function, which is still in edi.
- __ push(edi);
- if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(heap_slots);
- __ CallStub(&stub);
- } else {
- __ CallRuntime(Runtime::kNewContext, 1);
- }
- function_in_register = false;
- // Context is returned in both eax and esi. It replaces the context
- // passed to us. It's saved in the stack and kept live in esi.
- __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi);
-
- // Copy parameters into context if necessary.
- int num_parameters = scope()->num_parameters();
- for (int i = 0; i < num_parameters; i++) {
- Slot* slot = scope()->parameter(i)->AsSlot();
- if (slot != NULL && slot->type() == Slot::CONTEXT) {
- int parameter_offset = StandardFrameConstants::kCallerSPOffset +
- (num_parameters - 1 - i) * kPointerSize;
- // Load parameter from stack.
- __ mov(eax, Operand(ebp, parameter_offset));
- // Store it in the context.
- int context_offset = Context::SlotOffset(slot->index());
- __ mov(Operand(esi, context_offset), eax);
- // Update the write barrier. This clobbers all involved
- // registers, so we have use a third register to avoid
- // clobbering esi.
- __ mov(ecx, esi);
- __ RecordWrite(ecx, context_offset, eax, ebx);
- }
- }
- }
-
- Variable* arguments = scope()->arguments();
- if (arguments != NULL) {
- // Function uses arguments object.
- Comment cmnt(masm_, "[ Allocate arguments object");
- if (function_in_register) {
- __ push(edi);
- } else {
- __ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- }
- // Receiver is just before the parameters on the caller's stack.
- int offset = scope()->num_parameters() * kPointerSize;
- __ lea(edx,
- Operand(ebp, StandardFrameConstants::kCallerSPOffset + offset));
- __ push(edx);
- __ push(Immediate(Smi::FromInt(scope()->num_parameters())));
- // Arguments to ArgumentsAccessStub:
- // function, receiver address, parameter count.
- // The stub will rewrite receiver and parameter count if the previous
- // stack frame was an arguments adapter frame.
- ArgumentsAccessStub stub(
- is_strict_mode() ? ArgumentsAccessStub::NEW_STRICT
- : ArgumentsAccessStub::NEW_NON_STRICT);
- __ CallStub(&stub);
-
- Variable* arguments_shadow = scope()->arguments_shadow();
- if (arguments_shadow != NULL) {
- __ mov(ecx, eax); // Duplicate result.
- Move(arguments_shadow->AsSlot(), ecx, ebx, edx);
- }
- Move(arguments->AsSlot(), eax, ebx, edx);
- }
-
- if (FLAG_trace) {
- __ CallRuntime(Runtime::kTraceEnter, 0);
- }
-
- // Visit the declarations and body unless there is an illegal
- // redeclaration.
- if (scope()->HasIllegalRedeclaration()) {
- Comment cmnt(masm_, "[ Declarations");
- scope()->VisitIllegalRedeclaration(this);
-
- } else {
- { Comment cmnt(masm_, "[ Declarations");
- // For named function expressions, declare the function name as a
- // constant.
- if (scope()->is_function_scope() && scope()->function() != NULL) {
- EmitDeclaration(scope()->function(), Variable::CONST, NULL);
- }
- VisitDeclarations(scope()->declarations());
- }
-
- { Comment cmnt(masm_, "[ Stack check");
- PrepareForBailout(info->function(), NO_REGISTERS);
- NearLabel ok;
- ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit(isolate());
- __ cmp(esp, Operand::StaticVariable(stack_limit));
- __ j(above_equal, &ok, taken);
- StackCheckStub stub;
- __ CallStub(&stub);
- __ bind(&ok);
- }
-
- { Comment cmnt(masm_, "[ Body");
- ASSERT(loop_depth() == 0);
- VisitStatements(function()->body());
- ASSERT(loop_depth() == 0);
- }
- }
-
- // Always emit a 'return undefined' in case control fell off the end of
- // the body.
- { Comment cmnt(masm_, "[ return <undefined>;");
- __ mov(eax, isolate()->factory()->undefined_value());
- EmitReturnSequence();
- }
-}
-
-
-void FullCodeGenerator::ClearAccumulator() {
- __ Set(eax, Immediate(Smi::FromInt(0)));
-}
-
-
-void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt) {
- Comment cmnt(masm_, "[ Stack check");
- NearLabel ok;
- ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit(isolate());
- __ cmp(esp, Operand::StaticVariable(stack_limit));
- __ j(above_equal, &ok, taken);
- StackCheckStub stub;
- __ CallStub(&stub);
- // Record a mapping of this PC offset to the OSR id. This is used to find
- // the AST id from the unoptimized code in order to use it as a key into
- // the deoptimization input data found in the optimized code.
- RecordStackCheck(stmt->OsrEntryId());
-
- // Loop stack checks can be patched to perform on-stack replacement. In
- // order to decide whether or not to perform OSR we embed the loop depth
- // in a test instruction after the call so we can extract it from the OSR
- // builtin.
- ASSERT(loop_depth() > 0);
- __ test(eax, Immediate(Min(loop_depth(), Code::kMaxLoopNestingMarker)));
-
- __ bind(&ok);
- PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
- // Record a mapping of the OSR id to this PC. This is used if the OSR
- // entry becomes the target of a bailout. We don't expect it to be, but
- // we want it to work if it is.
- PrepareForBailoutForId(stmt->OsrEntryId(), NO_REGISTERS);
-}
-
-
-void FullCodeGenerator::EmitReturnSequence() {
- Comment cmnt(masm_, "[ Return sequence");
- if (return_label_.is_bound()) {
- __ jmp(&return_label_);
- } else {
- // Common return label
- __ bind(&return_label_);
- if (FLAG_trace) {
- __ push(eax);
- __ CallRuntime(Runtime::kTraceExit, 1);
- }
-#ifdef DEBUG
- // Add a label for checking the size of the code used for returning.
- Label check_exit_codesize;
- masm_->bind(&check_exit_codesize);
-#endif
- SetSourcePosition(function()->end_position() - 1);
- __ RecordJSReturn();
- // Do not use the leave instruction here because it is too short to
- // patch with the code required by the debugger.
- __ mov(esp, ebp);
- __ pop(ebp);
-
- int arguments_bytes = (scope()->num_parameters() + 1) * kPointerSize;
- __ Ret(arguments_bytes, ecx);
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Check that the size of the code used for returning is large enough
- // for the debugger's requirements.
- ASSERT(Assembler::kJSReturnSequenceLength <=
- masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
-#endif
- }
-}
-
-
-void FullCodeGenerator::EffectContext::Plug(Slot* slot) const {
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::Plug(Slot* slot) const {
- MemOperand slot_operand = codegen()->EmitSlotSearch(slot, result_register());
- __ mov(result_register(), slot_operand);
-}
-
-
-void FullCodeGenerator::StackValueContext::Plug(Slot* slot) const {
- MemOperand slot_operand = codegen()->EmitSlotSearch(slot, result_register());
- // Memory operands can be pushed directly.
- __ push(slot_operand);
-}
-
-
-void FullCodeGenerator::TestContext::Plug(Slot* slot) const {
- // For simplicity we always test the accumulator register.
- codegen()->Move(result_register(), slot);
- codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
- codegen()->DoTest(true_label_, false_label_, fall_through_);
-}
-
-
-void FullCodeGenerator::EffectContext::Plug(Heap::RootListIndex index) const {
- UNREACHABLE(); // Not used on IA32.
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::Plug(
- Heap::RootListIndex index) const {
- UNREACHABLE(); // Not used on IA32.
-}
-
-
-void FullCodeGenerator::StackValueContext::Plug(
- Heap::RootListIndex index) const {
- UNREACHABLE(); // Not used on IA32.
-}
-
-
-void FullCodeGenerator::TestContext::Plug(Heap::RootListIndex index) const {
- UNREACHABLE(); // Not used on IA32.
-}
-
-
-void FullCodeGenerator::EffectContext::Plug(Handle<Object> lit) const {
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::Plug(
- Handle<Object> lit) const {
- __ Set(result_register(), Immediate(lit));
-}
-
-
-void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
- // Immediates can be pushed directly.
- __ push(Immediate(lit));
-}
-
-
-void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
- codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
- true,
- true_label_,
- false_label_);
- ASSERT(!lit->IsUndetectableObject()); // There are no undetectable literals.
- if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
- if (false_label_ != fall_through_) __ jmp(false_label_);
- } else if (lit->IsTrue() || lit->IsJSObject()) {
- if (true_label_ != fall_through_) __ jmp(true_label_);
- } else if (lit->IsString()) {
- if (String::cast(*lit)->length() == 0) {
- if (false_label_ != fall_through_) __ jmp(false_label_);
- } else {
- if (true_label_ != fall_through_) __ jmp(true_label_);
- }
- } else if (lit->IsSmi()) {
- if (Smi::cast(*lit)->value() == 0) {
- if (false_label_ != fall_through_) __ jmp(false_label_);
- } else {
- if (true_label_ != fall_through_) __ jmp(true_label_);
- }
- } else {
- // For simplicity we always test the accumulator register.
- __ mov(result_register(), lit);
- codegen()->DoTest(true_label_, false_label_, fall_through_);
- }
-}
-
-
-void FullCodeGenerator::EffectContext::DropAndPlug(int count,
- Register reg) const {
- ASSERT(count > 0);
- __ Drop(count);
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::DropAndPlug(
- int count,
- Register reg) const {
- ASSERT(count > 0);
- __ Drop(count);
- __ Move(result_register(), reg);
-}
-
-
-void FullCodeGenerator::StackValueContext::DropAndPlug(int count,
- Register reg) const {
- ASSERT(count > 0);
- if (count > 1) __ Drop(count - 1);
- __ mov(Operand(esp, 0), reg);
-}
-
-
-void FullCodeGenerator::TestContext::DropAndPlug(int count,
- Register reg) const {
- ASSERT(count > 0);
- // For simplicity we always test the accumulator register.
- __ Drop(count);
- __ Move(result_register(), reg);
- codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
- codegen()->DoTest(true_label_, false_label_, fall_through_);
-}
-
-
-void FullCodeGenerator::EffectContext::Plug(Label* materialize_true,
- Label* materialize_false) const {
- ASSERT(materialize_true == materialize_false);
- __ bind(materialize_true);
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::Plug(
- Label* materialize_true,
- Label* materialize_false) const {
- NearLabel done;
- __ bind(materialize_true);
- __ mov(result_register(), isolate()->factory()->true_value());
- __ jmp(&done);
- __ bind(materialize_false);
- __ mov(result_register(), isolate()->factory()->false_value());
- __ bind(&done);
-}
-
-
-void FullCodeGenerator::StackValueContext::Plug(
- Label* materialize_true,
- Label* materialize_false) const {
- NearLabel done;
- __ bind(materialize_true);
- __ push(Immediate(isolate()->factory()->true_value()));
- __ jmp(&done);
- __ bind(materialize_false);
- __ push(Immediate(isolate()->factory()->false_value()));
- __ bind(&done);
-}
-
-
-void FullCodeGenerator::TestContext::Plug(Label* materialize_true,
- Label* materialize_false) const {
- ASSERT(materialize_true == true_label_);
- ASSERT(materialize_false == false_label_);
-}
-
-
-void FullCodeGenerator::EffectContext::Plug(bool flag) const {
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::Plug(bool flag) const {
- Handle<Object> value = flag
- ? isolate()->factory()->true_value()
- : isolate()->factory()->false_value();
- __ mov(result_register(), value);
-}
-
-
-void FullCodeGenerator::StackValueContext::Plug(bool flag) const {
- Handle<Object> value = flag
- ? isolate()->factory()->true_value()
- : isolate()->factory()->false_value();
- __ push(Immediate(value));
-}
-
-
-void FullCodeGenerator::TestContext::Plug(bool flag) const {
- codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
- true,
- true_label_,
- false_label_);
- if (flag) {
- if (true_label_ != fall_through_) __ jmp(true_label_);
- } else {
- if (false_label_ != fall_through_) __ jmp(false_label_);
- }
-}
-
-
-void FullCodeGenerator::DoTest(Label* if_true,
- Label* if_false,
- Label* fall_through) {
- // Emit the inlined tests assumed by the stub.
- __ cmp(result_register(), isolate()->factory()->undefined_value());
- __ j(equal, if_false);
- __ cmp(result_register(), isolate()->factory()->true_value());
- __ j(equal, if_true);
- __ cmp(result_register(), isolate()->factory()->false_value());
- __ j(equal, if_false);
- STATIC_ASSERT(kSmiTag == 0);
- __ test(result_register(), Operand(result_register()));
- __ j(zero, if_false);
- __ test(result_register(), Immediate(kSmiTagMask));
- __ j(zero, if_true);
-
- // Call the ToBoolean stub for all other cases.
- ToBooleanStub stub;
- __ push(result_register());
- __ CallStub(&stub);
- __ test(eax, Operand(eax));
-
- // The stub returns nonzero for true.
- Split(not_zero, if_true, if_false, fall_through);
-}
-
-
-void FullCodeGenerator::Split(Condition cc,
- Label* if_true,
- Label* if_false,
- Label* fall_through) {
- if (if_false == fall_through) {
- __ j(cc, if_true);
- } else if (if_true == fall_through) {
- __ j(NegateCondition(cc), if_false);
- } else {
- __ j(cc, if_true);
- __ jmp(if_false);
- }
-}
-
-
-MemOperand FullCodeGenerator::EmitSlotSearch(Slot* slot, Register scratch) {
- switch (slot->type()) {
- case Slot::PARAMETER:
- case Slot::LOCAL:
- return Operand(ebp, SlotOffset(slot));
- case Slot::CONTEXT: {
- int context_chain_length =
- scope()->ContextChainLength(slot->var()->scope());
- __ LoadContext(scratch, context_chain_length);
- return ContextOperand(scratch, slot->index());
- }
- case Slot::LOOKUP:
- UNREACHABLE();
- }
- UNREACHABLE();
- return Operand(eax, 0);
-}
-
-
-void FullCodeGenerator::Move(Register destination, Slot* source) {
- MemOperand location = EmitSlotSearch(source, destination);
- __ mov(destination, location);
-}
-
-
-void FullCodeGenerator::Move(Slot* dst,
- Register src,
- Register scratch1,
- Register scratch2) {
- ASSERT(dst->type() != Slot::LOOKUP); // Not yet implemented.
- ASSERT(!scratch1.is(src) && !scratch2.is(src));
- MemOperand location = EmitSlotSearch(dst, scratch1);
- __ mov(location, src);
- // Emit the write barrier code if the location is in the heap.
- if (dst->type() == Slot::CONTEXT) {
- int offset = Context::SlotOffset(dst->index());
- ASSERT(!scratch1.is(esi) && !src.is(esi) && !scratch2.is(esi));
- __ RecordWrite(scratch1, offset, src, scratch2);
- }
-}
-
-
-void FullCodeGenerator::PrepareForBailoutBeforeSplit(State state,
- bool should_normalize,
- Label* if_true,
- Label* if_false) {
- // Only prepare for bailouts before splits if we're in a test
- // context. Otherwise, we let the Visit function deal with the
- // preparation to avoid preparing with the same AST id twice.
- if (!context()->IsTest() || !info_->IsOptimizable()) return;
-
- NearLabel skip;
- if (should_normalize) __ jmp(&skip);
-
- ForwardBailoutStack* current = forward_bailout_stack_;
- while (current != NULL) {
- PrepareForBailout(current->expr(), state);
- current = current->parent();
- }
-
- if (should_normalize) {
- __ cmp(eax, isolate()->factory()->true_value());
- Split(equal, if_true, if_false, NULL);
- __ bind(&skip);
- }
-}
-
-
-void FullCodeGenerator::EmitDeclaration(Variable* variable,
- Variable::Mode mode,
- FunctionLiteral* function) {
- Comment cmnt(masm_, "[ Declaration");
- ASSERT(variable != NULL); // Must have been resolved.
- Slot* slot = variable->AsSlot();
- Property* prop = variable->AsProperty();
-
- if (slot != NULL) {
- switch (slot->type()) {
- case Slot::PARAMETER:
- case Slot::LOCAL:
- if (mode == Variable::CONST) {
- __ mov(Operand(ebp, SlotOffset(slot)),
- Immediate(isolate()->factory()->the_hole_value()));
- } else if (function != NULL) {
- VisitForAccumulatorValue(function);
- __ mov(Operand(ebp, SlotOffset(slot)), result_register());
- }
- break;
-
- case Slot::CONTEXT:
- // We bypass the general EmitSlotSearch because we know more about
- // this specific context.
-
- // The variable in the decl always resides in the current function
- // context.
- ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
- if (FLAG_debug_code) {
- // Check that we're not inside a 'with'.
- __ mov(ebx, ContextOperand(esi, Context::FCONTEXT_INDEX));
- __ cmp(ebx, Operand(esi));
- __ Check(equal, "Unexpected declaration in current context.");
- }
- if (mode == Variable::CONST) {
- __ mov(ContextOperand(esi, slot->index()),
- Immediate(isolate()->factory()->the_hole_value()));
- // No write barrier since the hole value is in old space.
- } else if (function != NULL) {
- VisitForAccumulatorValue(function);
- __ mov(ContextOperand(esi, slot->index()), result_register());
- int offset = Context::SlotOffset(slot->index());
- __ mov(ebx, esi);
- __ RecordWrite(ebx, offset, result_register(), ecx);
- }
- break;
-
- case Slot::LOOKUP: {
- __ push(esi);
- __ push(Immediate(variable->name()));
- // Declaration nodes are always introduced in one of two modes.
- ASSERT(mode == Variable::VAR || mode == Variable::CONST);
- PropertyAttributes attr = (mode == Variable::VAR) ? NONE : READ_ONLY;
- __ push(Immediate(Smi::FromInt(attr)));
- // Push initial value, if any.
- // Note: For variables we must not push an initial value (such as
- // 'undefined') because we may have a (legal) redeclaration and we
- // must not destroy the current value.
- if (mode == Variable::CONST) {
- __ push(Immediate(isolate()->factory()->the_hole_value()));
- } else if (function != NULL) {
- VisitForStackValue(function);
- } else {
- __ push(Immediate(Smi::FromInt(0))); // No initial value!
- }
- __ CallRuntime(Runtime::kDeclareContextSlot, 4);
- break;
- }
- }
-
- } else if (prop != NULL) {
- if (function != NULL || mode == Variable::CONST) {
- // We are declaring a function or constant that rewrites to a
- // property. Use (keyed) IC to set the initial value. We cannot
- // visit the rewrite because it's shared and we risk recording
- // duplicate AST IDs for bailouts from optimized code.
- ASSERT(prop->obj()->AsVariableProxy() != NULL);
- { AccumulatorValueContext for_object(this);
- EmitVariableLoad(prop->obj()->AsVariableProxy()->var());
- }
-
- if (function != NULL) {
- __ push(eax);
- VisitForAccumulatorValue(function);
- __ pop(edx);
- } else {
- __ mov(edx, eax);
- __ mov(eax, isolate()->factory()->the_hole_value());
- }
- ASSERT(prop->key()->AsLiteral() != NULL &&
- prop->key()->AsLiteral()->handle()->IsSmi());
- __ Set(ecx, Immediate(prop->key()->AsLiteral()->handle()));
-
- Handle<Code> ic = is_strict_mode()
- ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
- : isolate()->builtins()->KeyedStoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
- }
- }
-}
-
-
-void FullCodeGenerator::VisitDeclaration(Declaration* decl) {
- EmitDeclaration(decl->proxy()->var(), decl->mode(), decl->fun());
-}
-
-
-void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
- // Call the runtime to declare the globals.
- __ push(esi); // The context is the first argument.
- __ push(Immediate(pairs));
- __ push(Immediate(Smi::FromInt(is_eval() ? 1 : 0)));
- __ push(Immediate(Smi::FromInt(strict_mode_flag())));
- __ CallRuntime(Runtime::kDeclareGlobals, 4);
- // Return value is ignored.
-}
-
-
-void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
- Comment cmnt(masm_, "[ SwitchStatement");
- Breakable nested_statement(this, stmt);
- SetStatementPosition(stmt);
-
- // Keep the switch value on the stack until a case matches.
- VisitForStackValue(stmt->tag());
- PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
-
- ZoneList<CaseClause*>* clauses = stmt->cases();
- CaseClause* default_clause = NULL; // Can occur anywhere in the list.
-
- Label next_test; // Recycled for each test.
- // Compile all the tests with branches to their bodies.
- for (int i = 0; i < clauses->length(); i++) {
- CaseClause* clause = clauses->at(i);
- clause->body_target()->entry_label()->Unuse();
-
- // The default is not a test, but remember it as final fall through.
- if (clause->is_default()) {
- default_clause = clause;
- continue;
- }
-
- Comment cmnt(masm_, "[ Case comparison");
- __ bind(&next_test);
- next_test.Unuse();
-
- // Compile the label expression.
- VisitForAccumulatorValue(clause->label());
-
- // Perform the comparison as if via '==='.
- __ mov(edx, Operand(esp, 0)); // Switch value.
- bool inline_smi_code = ShouldInlineSmiCase(Token::EQ_STRICT);
- JumpPatchSite patch_site(masm_);
- if (inline_smi_code) {
- NearLabel slow_case;
- __ mov(ecx, edx);
- __ or_(ecx, Operand(eax));
- patch_site.EmitJumpIfNotSmi(ecx, &slow_case);
-
- __ cmp(edx, Operand(eax));
- __ j(not_equal, &next_test);
- __ Drop(1); // Switch value is no longer needed.
- __ jmp(clause->body_target()->entry_label());
- __ bind(&slow_case);
- }
-
- // Record position before stub call for type feedback.
- SetSourcePosition(clause->position());
- Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT);
- EmitCallIC(ic, &patch_site);
- __ test(eax, Operand(eax));
- __ j(not_equal, &next_test);
- __ Drop(1); // Switch value is no longer needed.
- __ jmp(clause->body_target()->entry_label());
- }
-
- // Discard the test value and jump to the default if present, otherwise to
- // the end of the statement.
- __ bind(&next_test);
- __ Drop(1); // Switch value is no longer needed.
- if (default_clause == NULL) {
- __ jmp(nested_statement.break_target());
- } else {
- __ jmp(default_clause->body_target()->entry_label());
- }
-
- // Compile all the case bodies.
- for (int i = 0; i < clauses->length(); i++) {
- Comment cmnt(masm_, "[ Case body");
- CaseClause* clause = clauses->at(i);
- __ bind(clause->body_target()->entry_label());
- PrepareForBailoutForId(clause->EntryId(), NO_REGISTERS);
- VisitStatements(clause->statements());
- }
-
- __ bind(nested_statement.break_target());
- PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
-}
-
-
-void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
- Comment cmnt(masm_, "[ ForInStatement");
- SetStatementPosition(stmt);
-
- Label loop, exit;
- ForIn loop_statement(this, stmt);
- increment_loop_depth();
-
- // Get the object to enumerate over. Both SpiderMonkey and JSC
- // ignore null and undefined in contrast to the specification; see
- // ECMA-262 section 12.6.4.
- VisitForAccumulatorValue(stmt->enumerable());
- __ cmp(eax, isolate()->factory()->undefined_value());
- __ j(equal, &exit);
- __ cmp(eax, isolate()->factory()->null_value());
- __ j(equal, &exit);
-
- // Convert the object to a JS object.
- NearLabel convert, done_convert;
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &convert);
- __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
- __ j(above_equal, &done_convert);
- __ bind(&convert);
- __ push(eax);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ bind(&done_convert);
- __ push(eax);
-
- // Check cache validity in generated code. This is a fast case for
- // the JSObject::IsSimpleEnum cache validity checks. If we cannot
- // guarantee cache validity, call the runtime system to check cache
- // validity or get the property names in a fixed array.
- Label next, call_runtime;
- __ mov(ecx, eax);
- __ bind(&next);
-
- // Check that there are no elements. Register ecx contains the
- // current JS object we've reached through the prototype chain.
- __ cmp(FieldOperand(ecx, JSObject::kElementsOffset),
- isolate()->factory()->empty_fixed_array());
- __ j(not_equal, &call_runtime);
-
- // Check that instance descriptors are not empty so that we can
- // check for an enum cache. Leave the map in ebx for the subsequent
- // prototype load.
- __ mov(ebx, FieldOperand(ecx, HeapObject::kMapOffset));
- __ mov(edx, FieldOperand(ebx, Map::kInstanceDescriptorsOffset));
- __ cmp(edx, isolate()->factory()->empty_descriptor_array());
- __ j(equal, &call_runtime);
-
- // Check that there is an enum cache in the non-empty instance
- // descriptors (edx). This is the case if the next enumeration
- // index field does not contain a smi.
- __ mov(edx, FieldOperand(edx, DescriptorArray::kEnumerationIndexOffset));
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &call_runtime);
-
- // For all objects but the receiver, check that the cache is empty.
- NearLabel check_prototype;
- __ cmp(ecx, Operand(eax));
- __ j(equal, &check_prototype);
- __ mov(edx, FieldOperand(edx, DescriptorArray::kEnumCacheBridgeCacheOffset));
- __ cmp(edx, isolate()->factory()->empty_fixed_array());
- __ j(not_equal, &call_runtime);
-
- // Load the prototype from the map and loop if non-null.
- __ bind(&check_prototype);
- __ mov(ecx, FieldOperand(ebx, Map::kPrototypeOffset));
- __ cmp(ecx, isolate()->factory()->null_value());
- __ j(not_equal, &next);
-
- // The enum cache is valid. Load the map of the object being
- // iterated over and use the cache for the iteration.
- NearLabel use_cache;
- __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset));
- __ jmp(&use_cache);
-
- // Get the set of properties to enumerate.
- __ bind(&call_runtime);
- __ push(eax); // Duplicate the enumerable object on the stack.
- __ CallRuntime(Runtime::kGetPropertyNamesFast, 1);
-
- // If we got a map from the runtime call, we can do a fast
- // modification check. Otherwise, we got a fixed array, and we have
- // to do a slow check.
- NearLabel fixed_array;
- __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
- isolate()->factory()->meta_map());
- __ j(not_equal, &fixed_array);
-
- // We got a map in register eax. Get the enumeration cache from it.
- __ bind(&use_cache);
- __ mov(ecx, FieldOperand(eax, Map::kInstanceDescriptorsOffset));
- __ mov(ecx, FieldOperand(ecx, DescriptorArray::kEnumerationIndexOffset));
- __ mov(edx, FieldOperand(ecx, DescriptorArray::kEnumCacheBridgeCacheOffset));
-
- // Setup the four remaining stack slots.
- __ push(eax); // Map.
- __ push(edx); // Enumeration cache.
- __ mov(eax, FieldOperand(edx, FixedArray::kLengthOffset));
- __ push(eax); // Enumeration cache length (as smi).
- __ push(Immediate(Smi::FromInt(0))); // Initial index.
- __ jmp(&loop);
-
- // We got a fixed array in register eax. Iterate through that.
- __ bind(&fixed_array);
- __ push(Immediate(Smi::FromInt(0))); // Map (0) - force slow check.
- __ push(eax);
- __ mov(eax, FieldOperand(eax, FixedArray::kLengthOffset));
- __ push(eax); // Fixed array length (as smi).
- __ push(Immediate(Smi::FromInt(0))); // Initial index.
-
- // Generate code for doing the condition check.
- __ bind(&loop);
- __ mov(eax, Operand(esp, 0 * kPointerSize)); // Get the current index.
- __ cmp(eax, Operand(esp, 1 * kPointerSize)); // Compare to the array length.
- __ j(above_equal, loop_statement.break_target());
-
- // Get the current entry of the array into register ebx.
- __ mov(ebx, Operand(esp, 2 * kPointerSize));
- __ mov(ebx, FieldOperand(ebx, eax, times_2, FixedArray::kHeaderSize));
-
- // Get the expected map from the stack or a zero map in the
- // permanent slow case into register edx.
- __ mov(edx, Operand(esp, 3 * kPointerSize));
-
- // Check if the expected map still matches that of the enumerable.
- // If not, we have to filter the key.
- NearLabel update_each;
- __ mov(ecx, Operand(esp, 4 * kPointerSize));
- __ cmp(edx, FieldOperand(ecx, HeapObject::kMapOffset));
- __ j(equal, &update_each);
-
- // Convert the entry to a string or null if it isn't a property
- // anymore. If the property has been removed while iterating, we
- // just skip it.
- __ push(ecx); // Enumerable.
- __ push(ebx); // Current entry.
- __ InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION);
- __ test(eax, Operand(eax));
- __ j(equal, loop_statement.continue_target());
- __ mov(ebx, Operand(eax));
-
- // Update the 'each' property or variable from the possibly filtered
- // entry in register ebx.
- __ bind(&update_each);
- __ mov(result_register(), ebx);
- // Perform the assignment as if via '='.
- { EffectContext context(this);
- EmitAssignment(stmt->each(), stmt->AssignmentId());
- }
-
- // Generate code for the body of the loop.
- Visit(stmt->body());
-
- // Generate code for going to the next element by incrementing the
- // index (smi) stored on top of the stack.
- __ bind(loop_statement.continue_target());
- __ add(Operand(esp, 0 * kPointerSize), Immediate(Smi::FromInt(1)));
-
- EmitStackCheck(stmt);
- __ jmp(&loop);
-
- // Remove the pointers stored on the stack.
- __ bind(loop_statement.break_target());
- __ add(Operand(esp), Immediate(5 * kPointerSize));
-
- // Exit and decrement the loop depth.
- __ bind(&exit);
- decrement_loop_depth();
-}
-
-
-void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
- bool pretenure) {
- // Use the fast case closure allocation code that allocates in new
- // space for nested functions that don't need literals cloning. If
- // we're running with the --always-opt or the --prepare-always-opt
- // flag, we need to use the runtime function so that the new function
- // we are creating here gets a chance to have its code optimized and
- // doesn't just get a copy of the existing unoptimized code.
- if (!FLAG_always_opt &&
- !FLAG_prepare_always_opt &&
- !pretenure &&
- scope()->is_function_scope() &&
- info->num_literals() == 0) {
- FastNewClosureStub stub(info->strict_mode() ? kStrictMode : kNonStrictMode);
- __ push(Immediate(info));
- __ CallStub(&stub);
- } else {
- __ push(esi);
- __ push(Immediate(info));
- __ push(Immediate(pretenure
- ? isolate()->factory()->true_value()
- : isolate()->factory()->false_value()));
- __ CallRuntime(Runtime::kNewClosure, 3);
- }
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
- Comment cmnt(masm_, "[ VariableProxy");
- EmitVariableLoad(expr->var());
-}
-
-
-void FullCodeGenerator::EmitLoadGlobalSlotCheckExtensions(
- Slot* slot,
- TypeofState typeof_state,
- Label* slow) {
- Register context = esi;
- Register temp = edx;
-
- Scope* s = scope();
- while (s != NULL) {
- if (s->num_heap_slots() > 0) {
- if (s->calls_eval()) {
- // Check that extension is NULL.
- __ cmp(ContextOperand(context, Context::EXTENSION_INDEX),
- Immediate(0));
- __ j(not_equal, slow);
- }
- // Load next context in chain.
- __ mov(temp, ContextOperand(context, Context::CLOSURE_INDEX));
- __ mov(temp, FieldOperand(temp, JSFunction::kContextOffset));
- // Walk the rest of the chain without clobbering esi.
- context = temp;
- }
- // If no outer scope calls eval, we do not need to check more
- // context extensions. If we have reached an eval scope, we check
- // all extensions from this point.
- if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break;
- s = s->outer_scope();
- }
-
- if (s != NULL && s->is_eval_scope()) {
- // Loop up the context chain. There is no frame effect so it is
- // safe to use raw labels here.
- NearLabel next, fast;
- if (!context.is(temp)) {
- __ mov(temp, context);
- }
- __ bind(&next);
- // Terminate at global context.
- __ cmp(FieldOperand(temp, HeapObject::kMapOffset),
- Immediate(isolate()->factory()->global_context_map()));
- __ j(equal, &fast);
- // Check that extension is NULL.
- __ cmp(ContextOperand(temp, Context::EXTENSION_INDEX), Immediate(0));
- __ j(not_equal, slow);
- // Load next context in chain.
- __ mov(temp, ContextOperand(temp, Context::CLOSURE_INDEX));
- __ mov(temp, FieldOperand(temp, JSFunction::kContextOffset));
- __ jmp(&next);
- __ bind(&fast);
- }
-
- // All extension objects were empty and it is safe to use a global
- // load IC call.
- __ mov(eax, GlobalObjectOperand());
- __ mov(ecx, slot->var()->name());
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
- ? RelocInfo::CODE_TARGET
- : RelocInfo::CODE_TARGET_CONTEXT;
- EmitCallIC(ic, mode);
-}
-
-
-MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(
- Slot* slot,
- Label* slow) {
- ASSERT(slot->type() == Slot::CONTEXT);
- Register context = esi;
- Register temp = ebx;
-
- for (Scope* s = scope(); s != slot->var()->scope(); s = s->outer_scope()) {
- if (s->num_heap_slots() > 0) {
- if (s->calls_eval()) {
- // Check that extension is NULL.
- __ cmp(ContextOperand(context, Context::EXTENSION_INDEX),
- Immediate(0));
- __ j(not_equal, slow);
- }
- __ mov(temp, ContextOperand(context, Context::CLOSURE_INDEX));
- __ mov(temp, FieldOperand(temp, JSFunction::kContextOffset));
- // Walk the rest of the chain without clobbering esi.
- context = temp;
- }
- }
- // Check that last extension is NULL.
- __ cmp(ContextOperand(context, Context::EXTENSION_INDEX), Immediate(0));
- __ j(not_equal, slow);
-
- // This function is used only for loads, not stores, so it's safe to
- // return an esi-based operand (the write barrier cannot be allowed to
- // destroy the esi register).
- return ContextOperand(context, slot->index());
-}
-
-
-void FullCodeGenerator::EmitDynamicLoadFromSlotFastCase(
- Slot* slot,
- TypeofState typeof_state,
- Label* slow,
- Label* done) {
- // Generate fast-case code for variables that might be shadowed by
- // eval-introduced variables. Eval is used a lot without
- // introducing variables. In those cases, we do not want to
- // perform a runtime call for all variables in the scope
- // containing the eval.
- if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
- EmitLoadGlobalSlotCheckExtensions(slot, typeof_state, slow);
- __ jmp(done);
- } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
- Slot* potential_slot = slot->var()->local_if_not_shadowed()->AsSlot();
- Expression* rewrite = slot->var()->local_if_not_shadowed()->rewrite();
- if (potential_slot != NULL) {
- // Generate fast case for locals that rewrite to slots.
- __ mov(eax,
- ContextSlotOperandCheckExtensions(potential_slot, slow));
- if (potential_slot->var()->mode() == Variable::CONST) {
- __ cmp(eax, isolate()->factory()->the_hole_value());
- __ j(not_equal, done);
- __ mov(eax, isolate()->factory()->undefined_value());
- }
- __ jmp(done);
- } else if (rewrite != NULL) {
- // Generate fast case for calls of an argument function.
- Property* property = rewrite->AsProperty();
- if (property != NULL) {
- VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
- Literal* key_literal = property->key()->AsLiteral();
- if (obj_proxy != NULL &&
- key_literal != NULL &&
- obj_proxy->IsArguments() &&
- key_literal->handle()->IsSmi()) {
- // Load arguments object if there are no eval-introduced
- // variables. Then load the argument from the arguments
- // object using keyed load.
- __ mov(edx,
- ContextSlotOperandCheckExtensions(obj_proxy->var()->AsSlot(),
- slow));
- __ mov(eax, Immediate(key_literal->handle()));
- Handle<Code> ic =
- isolate()->builtins()->KeyedLoadIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
- __ jmp(done);
- }
- }
- }
- }
-}
-
-
-void FullCodeGenerator::EmitVariableLoad(Variable* var) {
- // Four cases: non-this global variables, lookup slots, all other
- // types of slots, and parameters that rewrite to explicit property
- // accesses on the arguments object.
- Slot* slot = var->AsSlot();
- Property* property = var->AsProperty();
-
- if (var->is_global() && !var->is_this()) {
- Comment cmnt(masm_, "Global variable");
- // Use inline caching. Variable name is passed in ecx and the global
- // object on the stack.
- __ mov(eax, GlobalObjectOperand());
- __ mov(ecx, var->name());
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
- context()->Plug(eax);
-
- } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
- Label done, slow;
-
- // Generate code for loading from variables potentially shadowed
- // by eval-introduced variables.
- EmitDynamicLoadFromSlotFastCase(slot, NOT_INSIDE_TYPEOF, &slow, &done);
-
- __ bind(&slow);
- Comment cmnt(masm_, "Lookup slot");
- __ push(esi); // Context.
- __ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kLoadContextSlot, 2);
- __ bind(&done);
-
- context()->Plug(eax);
-
- } else if (slot != NULL) {
- Comment cmnt(masm_, (slot->type() == Slot::CONTEXT)
- ? "Context slot"
- : "Stack slot");
- if (var->mode() == Variable::CONST) {
- // Constants may be the hole value if they have not been initialized.
- // Unhole them.
- NearLabel done;
- MemOperand slot_operand = EmitSlotSearch(slot, eax);
- __ mov(eax, slot_operand);
- __ cmp(eax, isolate()->factory()->the_hole_value());
- __ j(not_equal, &done);
- __ mov(eax, isolate()->factory()->undefined_value());
- __ bind(&done);
- context()->Plug(eax);
- } else {
- context()->Plug(slot);
- }
-
- } else {
- Comment cmnt(masm_, "Rewritten parameter");
- ASSERT_NOT_NULL(property);
- // Rewritten parameter accesses are of the form "slot[literal]".
-
- // Assert that the object is in a slot.
- Variable* object_var = property->obj()->AsVariableProxy()->AsVariable();
- ASSERT_NOT_NULL(object_var);
- Slot* object_slot = object_var->AsSlot();
- ASSERT_NOT_NULL(object_slot);
-
- // Load the object.
- MemOperand object_loc = EmitSlotSearch(object_slot, eax);
- __ mov(edx, object_loc);
-
- // Assert that the key is a smi.
- Literal* key_literal = property->key()->AsLiteral();
- ASSERT_NOT_NULL(key_literal);
- ASSERT(key_literal->handle()->IsSmi());
-
- // Load the key.
- __ mov(eax, Immediate(key_literal->handle()));
-
- // Do a keyed property load.
- Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
-
- // Drop key and object left on the stack by IC.
- context()->Plug(eax);
- }
-}
-
-
-void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
- Comment cmnt(masm_, "[ RegExpLiteral");
- NearLabel materialized;
- // Registers will be used as follows:
- // edi = JS function.
- // ecx = literals array.
- // ebx = regexp literal.
- // eax = regexp literal clone.
- __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ mov(ecx, FieldOperand(edi, JSFunction::kLiteralsOffset));
- int literal_offset =
- FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
- __ mov(ebx, FieldOperand(ecx, literal_offset));
- __ cmp(ebx, isolate()->factory()->undefined_value());
- __ j(not_equal, &materialized);
-
- // Create regexp literal using runtime function
- // Result will be in eax.
- __ push(ecx);
- __ push(Immediate(Smi::FromInt(expr->literal_index())));
- __ push(Immediate(expr->pattern()));
- __ push(Immediate(expr->flags()));
- __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
- __ mov(ebx, eax);
-
- __ bind(&materialized);
- int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
- Label allocated, runtime_allocate;
- __ AllocateInNewSpace(size, eax, ecx, edx, &runtime_allocate, TAG_OBJECT);
- __ jmp(&allocated);
-
- __ bind(&runtime_allocate);
- __ push(ebx);
- __ push(Immediate(Smi::FromInt(size)));
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
- __ pop(ebx);
-
- __ bind(&allocated);
- // Copy the content into the newly allocated memory.
- // (Unroll copy loop once for better throughput).
- for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
- __ mov(edx, FieldOperand(ebx, i));
- __ mov(ecx, FieldOperand(ebx, i + kPointerSize));
- __ mov(FieldOperand(eax, i), edx);
- __ mov(FieldOperand(eax, i + kPointerSize), ecx);
- }
- if ((size % (2 * kPointerSize)) != 0) {
- __ mov(edx, FieldOperand(ebx, size - kPointerSize));
- __ mov(FieldOperand(eax, size - kPointerSize), edx);
- }
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
- Comment cmnt(masm_, "[ ObjectLiteral");
- __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(FieldOperand(edi, JSFunction::kLiteralsOffset));
- __ push(Immediate(Smi::FromInt(expr->literal_index())));
- __ push(Immediate(expr->constant_properties()));
- int flags = expr->fast_elements()
- ? ObjectLiteral::kFastElements
- : ObjectLiteral::kNoFlags;
- flags |= expr->has_function()
- ? ObjectLiteral::kHasFunction
- : ObjectLiteral::kNoFlags;
- __ push(Immediate(Smi::FromInt(flags)));
- if (expr->depth() > 1) {
- __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
- } else {
- __ CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
- }
-
- // If result_saved is true the result is on top of the stack. If
- // result_saved is false the result is in eax.
- bool result_saved = false;
-
- // Mark all computed expressions that are bound to a key that
- // is shadowed by a later occurrence of the same key. For the
- // marked expressions, no store code is emitted.
- expr->CalculateEmitStore();
-
- for (int i = 0; i < expr->properties()->length(); i++) {
- ObjectLiteral::Property* property = expr->properties()->at(i);
- if (property->IsCompileTimeValue()) continue;
-
- Literal* key = property->key();
- Expression* value = property->value();
- if (!result_saved) {
- __ push(eax); // Save result on the stack
- result_saved = true;
- }
- switch (property->kind()) {
- case ObjectLiteral::Property::MATERIALIZED_LITERAL:
- ASSERT(!CompileTimeValue::IsCompileTimeValue(value));
- // Fall through.
- case ObjectLiteral::Property::COMPUTED:
- if (key->handle()->IsSymbol()) {
- if (property->emit_store()) {
- VisitForAccumulatorValue(value);
- __ mov(ecx, Immediate(key->handle()));
- __ mov(edx, Operand(esp, 0));
- Handle<Code> ic = is_strict_mode()
- ? isolate()->builtins()->StoreIC_Initialize_Strict()
- : isolate()->builtins()->StoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
- PrepareForBailoutForId(key->id(), NO_REGISTERS);
- } else {
- VisitForEffect(value);
- }
- break;
- }
- // Fall through.
- case ObjectLiteral::Property::PROTOTYPE:
- __ push(Operand(esp, 0)); // Duplicate receiver.
- VisitForStackValue(key);
- VisitForStackValue(value);
- if (property->emit_store()) {
- __ push(Immediate(Smi::FromInt(NONE))); // PropertyAttributes
- __ CallRuntime(Runtime::kSetProperty, 4);
- } else {
- __ Drop(3);
- }
- break;
- case ObjectLiteral::Property::SETTER:
- case ObjectLiteral::Property::GETTER:
- __ push(Operand(esp, 0)); // Duplicate receiver.
- VisitForStackValue(key);
- __ push(Immediate(property->kind() == ObjectLiteral::Property::SETTER ?
- Smi::FromInt(1) :
- Smi::FromInt(0)));
- VisitForStackValue(value);
- __ CallRuntime(Runtime::kDefineAccessor, 4);
- break;
- default: UNREACHABLE();
- }
- }
-
- if (expr->has_function()) {
- ASSERT(result_saved);
- __ push(Operand(esp, 0));
- __ CallRuntime(Runtime::kToFastProperties, 1);
- }
-
- if (result_saved) {
- context()->PlugTOS();
- } else {
- context()->Plug(eax);
- }
-}
-
-
-void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
- Comment cmnt(masm_, "[ ArrayLiteral");
-
- ZoneList<Expression*>* subexprs = expr->values();
- int length = subexprs->length();
-
- __ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(FieldOperand(ebx, JSFunction::kLiteralsOffset));
- __ push(Immediate(Smi::FromInt(expr->literal_index())));
- __ push(Immediate(expr->constant_elements()));
- if (expr->constant_elements()->map() ==
- isolate()->heap()->fixed_cow_array_map()) {
- ASSERT(expr->depth() == 1);
- FastCloneShallowArrayStub stub(
- FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length);
- __ CallStub(&stub);
- __ IncrementCounter(isolate()->counters()->cow_arrays_created_stub(), 1);
- } else if (expr->depth() > 1) {
- __ CallRuntime(Runtime::kCreateArrayLiteral, 3);
- } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
- __ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
- } else {
- FastCloneShallowArrayStub stub(
- FastCloneShallowArrayStub::CLONE_ELEMENTS, length);
- __ CallStub(&stub);
- }
-
- bool result_saved = false; // Is the result saved to the stack?
-
- // Emit code to evaluate all the non-constant subexpressions and to store
- // them into the newly cloned array.
- for (int i = 0; i < length; i++) {
- Expression* subexpr = subexprs->at(i);
- // If the subexpression is a literal or a simple materialized literal it
- // is already set in the cloned array.
- if (subexpr->AsLiteral() != NULL ||
- CompileTimeValue::IsCompileTimeValue(subexpr)) {
- continue;
- }
-
- if (!result_saved) {
- __ push(eax);
- result_saved = true;
- }
- VisitForAccumulatorValue(subexpr);
-
- // Store the subexpression value in the array's elements.
- __ mov(ebx, Operand(esp, 0)); // Copy of array literal.
- __ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset));
- int offset = FixedArray::kHeaderSize + (i * kPointerSize);
- __ mov(FieldOperand(ebx, offset), result_register());
-
- // Update the write barrier for the array store.
- __ RecordWrite(ebx, offset, result_register(), ecx);
-
- PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS);
- }
-
- if (result_saved) {
- context()->PlugTOS();
- } else {
- context()->Plug(eax);
- }
-}
-
-
-void FullCodeGenerator::VisitAssignment(Assignment* expr) {
- Comment cmnt(masm_, "[ Assignment");
- // Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
- // on the left-hand side.
- if (!expr->target()->IsValidLeftHandSide()) {
- VisitForEffect(expr->target());
- return;
- }
-
- // Left-hand side can only be a property, a global or a (parameter or local)
- // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
- enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
- LhsKind assign_type = VARIABLE;
- Property* property = expr->target()->AsProperty();
- if (property != NULL) {
- assign_type = (property->key()->IsPropertyName())
- ? NAMED_PROPERTY
- : KEYED_PROPERTY;
- }
-
- // Evaluate LHS expression.
- switch (assign_type) {
- case VARIABLE:
- // Nothing to do here.
- break;
- case NAMED_PROPERTY:
- if (expr->is_compound()) {
- // We need the receiver both on the stack and in the accumulator.
- VisitForAccumulatorValue(property->obj());
- __ push(result_register());
- } else {
- VisitForStackValue(property->obj());
- }
- break;
- case KEYED_PROPERTY: {
- if (expr->is_compound()) {
- if (property->is_arguments_access()) {
- VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
- MemOperand slot_operand =
- EmitSlotSearch(obj_proxy->var()->AsSlot(), ecx);
- __ push(slot_operand);
- __ mov(eax, Immediate(property->key()->AsLiteral()->handle()));
- } else {
- VisitForStackValue(property->obj());
- VisitForAccumulatorValue(property->key());
- }
- __ mov(edx, Operand(esp, 0));
- __ push(eax);
- } else {
- if (property->is_arguments_access()) {
- VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
- MemOperand slot_operand =
- EmitSlotSearch(obj_proxy->var()->AsSlot(), ecx);
- __ push(slot_operand);
- __ push(Immediate(property->key()->AsLiteral()->handle()));
- } else {
- VisitForStackValue(property->obj());
- VisitForStackValue(property->key());
- }
- }
- break;
- }
- }
-
- // For compound assignments we need another deoptimization point after the
- // variable/property load.
- if (expr->is_compound()) {
- { AccumulatorValueContext context(this);
- switch (assign_type) {
- case VARIABLE:
- EmitVariableLoad(expr->target()->AsVariableProxy()->var());
- PrepareForBailout(expr->target(), TOS_REG);
- break;
- case NAMED_PROPERTY:
- EmitNamedPropertyLoad(property);
- PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
- break;
- case KEYED_PROPERTY:
- EmitKeyedPropertyLoad(property);
- PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
- break;
- }
- }
-
- Token::Value op = expr->binary_op();
- __ push(eax); // Left operand goes on the stack.
- VisitForAccumulatorValue(expr->value());
-
- OverwriteMode mode = expr->value()->ResultOverwriteAllowed()
- ? OVERWRITE_RIGHT
- : NO_OVERWRITE;
- SetSourcePosition(expr->position() + 1);
- AccumulatorValueContext context(this);
- if (ShouldInlineSmiCase(op)) {
- EmitInlineSmiBinaryOp(expr,
- op,
- mode,
- expr->target(),
- expr->value());
- } else {
- EmitBinaryOp(op, mode);
- }
-
- // Deoptimization point in case the binary operation may have side effects.
- PrepareForBailout(expr->binary_operation(), TOS_REG);
- } else {
- VisitForAccumulatorValue(expr->value());
- }
-
- // Record source position before possible IC call.
- SetSourcePosition(expr->position());
-
- // Store the value.
- switch (assign_type) {
- case VARIABLE:
- EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
- expr->op());
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- context()->Plug(eax);
- break;
- case NAMED_PROPERTY:
- EmitNamedPropertyAssignment(expr);
- break;
- case KEYED_PROPERTY:
- EmitKeyedPropertyAssignment(expr);
- break;
- }
-}
-
-
-void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
- SetSourcePosition(prop->position());
- Literal* key = prop->key()->AsLiteral();
- __ mov(ecx, Immediate(key->handle()));
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
-}
-
-
-void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
- SetSourcePosition(prop->position());
- Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
-}
-
-
-void FullCodeGenerator::EmitInlineSmiBinaryOp(Expression* expr,
- Token::Value op,
- OverwriteMode mode,
- Expression* left,
- Expression* right) {
- // Do combined smi check of the operands. Left operand is on the
- // stack. Right operand is in eax.
- NearLabel done, smi_case, stub_call;
- __ pop(edx);
- __ mov(ecx, eax);
- __ or_(eax, Operand(edx));
- JumpPatchSite patch_site(masm_);
- patch_site.EmitJumpIfSmi(eax, &smi_case);
-
- __ bind(&stub_call);
- __ mov(eax, ecx);
- TypeRecordingBinaryOpStub stub(op, mode);
- EmitCallIC(stub.GetCode(), &patch_site);
- __ jmp(&done);
-
- // Smi case.
- __ bind(&smi_case);
- __ mov(eax, edx); // Copy left operand in case of a stub call.
-
- switch (op) {
- case Token::SAR:
- __ SmiUntag(eax);
- __ SmiUntag(ecx);
- __ sar_cl(eax); // No checks of result necessary
- __ SmiTag(eax);
- break;
- case Token::SHL: {
- Label result_ok;
- __ SmiUntag(eax);
- __ SmiUntag(ecx);
- __ shl_cl(eax);
- // Check that the *signed* result fits in a smi.
- __ cmp(eax, 0xc0000000);
- __ j(positive, &result_ok);
- __ SmiTag(ecx);
- __ jmp(&stub_call);
- __ bind(&result_ok);
- __ SmiTag(eax);
- break;
- }
- case Token::SHR: {
- Label result_ok;
- __ SmiUntag(eax);
- __ SmiUntag(ecx);
- __ shr_cl(eax);
- __ test(eax, Immediate(0xc0000000));
- __ j(zero, &result_ok);
- __ SmiTag(ecx);
- __ jmp(&stub_call);
- __ bind(&result_ok);
- __ SmiTag(eax);
- break;
- }
- case Token::ADD:
- __ add(eax, Operand(ecx));
- __ j(overflow, &stub_call);
- break;
- case Token::SUB:
- __ sub(eax, Operand(ecx));
- __ j(overflow, &stub_call);
- break;
- case Token::MUL: {
- __ SmiUntag(eax);
- __ imul(eax, Operand(ecx));
- __ j(overflow, &stub_call);
- __ test(eax, Operand(eax));
- __ j(not_zero, &done, taken);
- __ mov(ebx, edx);
- __ or_(ebx, Operand(ecx));
- __ j(negative, &stub_call);
- break;
- }
- case Token::BIT_OR:
- __ or_(eax, Operand(ecx));
- break;
- case Token::BIT_AND:
- __ and_(eax, Operand(ecx));
- break;
- case Token::BIT_XOR:
- __ xor_(eax, Operand(ecx));
- break;
- default:
- UNREACHABLE();
- }
-
- __ bind(&done);
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitBinaryOp(Token::Value op,
- OverwriteMode mode) {
- __ pop(edx);
- TypeRecordingBinaryOpStub stub(op, mode);
- EmitCallIC(stub.GetCode(), NULL); // NULL signals no inlined smi code.
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
- // Invalid left-hand sides are rewritten to have a 'throw
- // ReferenceError' on the left-hand side.
- if (!expr->IsValidLeftHandSide()) {
- VisitForEffect(expr);
- return;
- }
-
- // Left-hand side can only be a property, a global or a (parameter or local)
- // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
- enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
- LhsKind assign_type = VARIABLE;
- Property* prop = expr->AsProperty();
- if (prop != NULL) {
- assign_type = (prop->key()->IsPropertyName())
- ? NAMED_PROPERTY
- : KEYED_PROPERTY;
- }
-
- switch (assign_type) {
- case VARIABLE: {
- Variable* var = expr->AsVariableProxy()->var();
- EffectContext context(this);
- EmitVariableAssignment(var, Token::ASSIGN);
- break;
- }
- case NAMED_PROPERTY: {
- __ push(eax); // Preserve value.
- VisitForAccumulatorValue(prop->obj());
- __ mov(edx, eax);
- __ pop(eax); // Restore value.
- __ mov(ecx, prop->key()->AsLiteral()->handle());
- Handle<Code> ic = is_strict_mode()
- ? isolate()->builtins()->StoreIC_Initialize_Strict()
- : isolate()->builtins()->StoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
- break;
- }
- case KEYED_PROPERTY: {
- __ push(eax); // Preserve value.
- if (prop->is_synthetic()) {
- ASSERT(prop->obj()->AsVariableProxy() != NULL);
- ASSERT(prop->key()->AsLiteral() != NULL);
- { AccumulatorValueContext for_object(this);
- EmitVariableLoad(prop->obj()->AsVariableProxy()->var());
- }
- __ mov(edx, eax);
- __ Set(ecx, Immediate(prop->key()->AsLiteral()->handle()));
- } else {
- VisitForStackValue(prop->obj());
- VisitForAccumulatorValue(prop->key());
- __ mov(ecx, eax);
- __ pop(edx);
- }
- __ pop(eax); // Restore value.
- Handle<Code> ic = is_strict_mode()
- ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
- : isolate()->builtins()->KeyedStoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
- break;
- }
- }
- PrepareForBailoutForId(bailout_ast_id, TOS_REG);
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitVariableAssignment(Variable* var,
- Token::Value op) {
- // Left-hand sides that rewrite to explicit property accesses do not reach
- // here.
- ASSERT(var != NULL);
- ASSERT(var->is_global() || var->AsSlot() != NULL);
-
- if (var->is_global()) {
- ASSERT(!var->is_this());
- // Assignment to a global variable. Use inline caching for the
- // assignment. Right-hand-side value is passed in eax, variable name in
- // ecx, and the global object on the stack.
- __ mov(ecx, var->name());
- __ mov(edx, GlobalObjectOperand());
- Handle<Code> ic = is_strict_mode()
- ? isolate()->builtins()->StoreIC_Initialize_Strict()
- : isolate()->builtins()->StoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
-
- } else if (op == Token::INIT_CONST) {
- // Like var declarations, const declarations are hoisted to function
- // scope. However, unlike var initializers, const initializers are able
- // to drill a hole to that function context, even from inside a 'with'
- // context. We thus bypass the normal static scope lookup.
- Slot* slot = var->AsSlot();
- Label skip;
- switch (slot->type()) {
- case Slot::PARAMETER:
- // No const parameters.
- UNREACHABLE();
- break;
- case Slot::LOCAL:
- __ mov(edx, Operand(ebp, SlotOffset(slot)));
- __ cmp(edx, isolate()->factory()->the_hole_value());
- __ j(not_equal, &skip);
- __ mov(Operand(ebp, SlotOffset(slot)), eax);
- break;
- case Slot::CONTEXT: {
- __ mov(ecx, ContextOperand(esi, Context::FCONTEXT_INDEX));
- __ mov(edx, ContextOperand(ecx, slot->index()));
- __ cmp(edx, isolate()->factory()->the_hole_value());
- __ j(not_equal, &skip);
- __ mov(ContextOperand(ecx, slot->index()), eax);
- int offset = Context::SlotOffset(slot->index());
- __ mov(edx, eax); // Preserve the stored value in eax.
- __ RecordWrite(ecx, offset, edx, ebx);
- break;
- }
- case Slot::LOOKUP:
- __ push(eax);
- __ push(esi);
- __ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
- break;
- }
- __ bind(&skip);
-
- } else if (var->mode() != Variable::CONST) {
- // Perform the assignment for non-const variables. Const assignments
- // are simply skipped.
- Slot* slot = var->AsSlot();
- switch (slot->type()) {
- case Slot::PARAMETER:
- case Slot::LOCAL:
- // Perform the assignment.
- __ mov(Operand(ebp, SlotOffset(slot)), eax);
- break;
-
- case Slot::CONTEXT: {
- MemOperand target = EmitSlotSearch(slot, ecx);
- // Perform the assignment and issue the write barrier.
- __ mov(target, eax);
- // The value of the assignment is in eax. RecordWrite clobbers its
- // register arguments.
- __ mov(edx, eax);
- int offset = Context::SlotOffset(slot->index());
- __ RecordWrite(ecx, offset, edx, ebx);
- break;
- }
-
- case Slot::LOOKUP:
- // Call the runtime for the assignment.
- __ push(eax); // Value.
- __ push(esi); // Context.
- __ push(Immediate(var->name()));
- __ push(Immediate(Smi::FromInt(strict_mode_flag())));
- __ CallRuntime(Runtime::kStoreContextSlot, 4);
- break;
- }
- }
-}
-
-
-void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
- // Assignment to a property, using a named store IC.
- Property* prop = expr->target()->AsProperty();
- ASSERT(prop != NULL);
- ASSERT(prop->key()->AsLiteral() != NULL);
-
- // If the assignment starts a block of assignments to the same object,
- // change to slow case to avoid the quadratic behavior of repeatedly
- // adding fast properties.
- if (expr->starts_initialization_block()) {
- __ push(result_register());
- __ push(Operand(esp, kPointerSize)); // Receiver is now under value.
- __ CallRuntime(Runtime::kToSlowProperties, 1);
- __ pop(result_register());
- }
-
- // Record source code position before IC call.
- SetSourcePosition(expr->position());
- __ mov(ecx, prop->key()->AsLiteral()->handle());
- if (expr->ends_initialization_block()) {
- __ mov(edx, Operand(esp, 0));
- } else {
- __ pop(edx);
- }
- Handle<Code> ic = is_strict_mode()
- ? isolate()->builtins()->StoreIC_Initialize_Strict()
- : isolate()->builtins()->StoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
-
- // If the assignment ends an initialization block, revert to fast case.
- if (expr->ends_initialization_block()) {
- __ push(eax); // Result of assignment, saved even if not needed.
- __ push(Operand(esp, kPointerSize)); // Receiver is under value.
- __ CallRuntime(Runtime::kToFastProperties, 1);
- __ pop(eax);
- __ Drop(1);
- }
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
- // Assignment to a property, using a keyed store IC.
-
- // If the assignment starts a block of assignments to the same object,
- // change to slow case to avoid the quadratic behavior of repeatedly
- // adding fast properties.
- if (expr->starts_initialization_block()) {
- __ push(result_register());
- // Receiver is now under the key and value.
- __ push(Operand(esp, 2 * kPointerSize));
- __ CallRuntime(Runtime::kToSlowProperties, 1);
- __ pop(result_register());
- }
-
- __ pop(ecx);
- if (expr->ends_initialization_block()) {
- __ mov(edx, Operand(esp, 0)); // Leave receiver on the stack for later.
- } else {
- __ pop(edx);
- }
- // Record source code position before IC call.
- SetSourcePosition(expr->position());
- Handle<Code> ic = is_strict_mode()
- ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
- : isolate()->builtins()->KeyedStoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
-
- // If the assignment ends an initialization block, revert to fast case.
- if (expr->ends_initialization_block()) {
- __ pop(edx);
- __ push(eax); // Result of assignment, saved even if not needed.
- __ push(edx);
- __ CallRuntime(Runtime::kToFastProperties, 1);
- __ pop(eax);
- }
-
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::VisitProperty(Property* expr) {
- Comment cmnt(masm_, "[ Property");
- Expression* key = expr->key();
-
- if (key->IsPropertyName()) {
- VisitForAccumulatorValue(expr->obj());
- EmitNamedPropertyLoad(expr);
- context()->Plug(eax);
- } else {
- VisitForStackValue(expr->obj());
- VisitForAccumulatorValue(expr->key());
- __ pop(edx);
- EmitKeyedPropertyLoad(expr);
- context()->Plug(eax);
- }
-}
-
-
-void FullCodeGenerator::EmitCallWithIC(Call* expr,
- Handle<Object> name,
- RelocInfo::Mode mode) {
- // Code common for calls using the IC.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- { PreservePositionScope scope(masm()->positions_recorder());
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
- __ Set(ecx, Immediate(name));
- }
- // Record source position of the IC call.
- SetSourcePosition(expr->position());
- InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
- Handle<Code> ic = isolate()->stub_cache()->ComputeCallInitialize(
- arg_count, in_loop);
- EmitCallIC(ic, mode);
- RecordJSReturnSite(expr);
- // Restore context register.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
- Expression* key,
- RelocInfo::Mode mode) {
- // Load the key.
- VisitForAccumulatorValue(key);
-
- // Swap the name of the function and the receiver on the stack to follow
- // the calling convention for call ICs.
- __ pop(ecx);
- __ push(eax);
- __ push(ecx);
-
- // Load the arguments.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- { PreservePositionScope scope(masm()->positions_recorder());
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
- }
- // Record source position of the IC call.
- SetSourcePosition(expr->position());
- InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
- Handle<Code> ic = isolate()->stub_cache()->ComputeKeyedCallInitialize(
- arg_count, in_loop);
- __ mov(ecx, Operand(esp, (arg_count + 1) * kPointerSize)); // Key.
- EmitCallIC(ic, mode);
- RecordJSReturnSite(expr);
- // Restore context register.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- context()->DropAndPlug(1, eax); // Drop the key still on the stack.
-}
-
-
-void FullCodeGenerator::EmitCallWithStub(Call* expr) {
- // Code common for calls using the call stub.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- { PreservePositionScope scope(masm()->positions_recorder());
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
- }
- // Record source position for debugger.
- SetSourcePosition(expr->position());
- InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
- CallFunctionStub stub(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
- __ CallStub(&stub);
- RecordJSReturnSite(expr);
- // Restore context register.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- context()->DropAndPlug(1, eax);
-}
-
-
-void FullCodeGenerator::EmitResolvePossiblyDirectEval(ResolveEvalFlag flag,
- int arg_count) {
- // Push copy of the first argument or undefined if it doesn't exist.
- if (arg_count > 0) {
- __ push(Operand(esp, arg_count * kPointerSize));
- } else {
- __ push(Immediate(isolate()->factory()->undefined_value()));
- }
-
- // Push the receiver of the enclosing function.
- __ push(Operand(ebp, (2 + scope()->num_parameters()) * kPointerSize));
-
- // Push the strict mode flag.
- __ push(Immediate(Smi::FromInt(strict_mode_flag())));
-
- __ CallRuntime(flag == SKIP_CONTEXT_LOOKUP
- ? Runtime::kResolvePossiblyDirectEvalNoLookup
- : Runtime::kResolvePossiblyDirectEval, 4);
-}
-
-
-void FullCodeGenerator::VisitCall(Call* expr) {
-#ifdef DEBUG
- // We want to verify that RecordJSReturnSite gets called on all paths
- // through this function. Avoid early returns.
- expr->return_is_recorded_ = false;
-#endif
-
- Comment cmnt(masm_, "[ Call");
- Expression* fun = expr->expression();
- Variable* var = fun->AsVariableProxy()->AsVariable();
-
- if (var != NULL && var->is_possibly_eval()) {
- // In a call to eval, we first call %ResolvePossiblyDirectEval to
- // resolve the function we need to call and the receiver of the
- // call. Then we call the resolved function using the given
- // arguments.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- { PreservePositionScope pos_scope(masm()->positions_recorder());
- VisitForStackValue(fun);
- // Reserved receiver slot.
- __ push(Immediate(isolate()->factory()->undefined_value()));
-
- // Push the arguments.
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
-
- // If we know that eval can only be shadowed by eval-introduced
- // variables we attempt to load the global eval function directly
- // in generated code. If we succeed, there is no need to perform a
- // context lookup in the runtime system.
- Label done;
- if (var->AsSlot() != NULL && var->mode() == Variable::DYNAMIC_GLOBAL) {
- Label slow;
- EmitLoadGlobalSlotCheckExtensions(var->AsSlot(),
- NOT_INSIDE_TYPEOF,
- &slow);
- // Push the function and resolve eval.
- __ push(eax);
- EmitResolvePossiblyDirectEval(SKIP_CONTEXT_LOOKUP, arg_count);
- __ jmp(&done);
- __ bind(&slow);
- }
-
- // Push copy of the function (found below the arguments) and
- // resolve eval.
- __ push(Operand(esp, (arg_count + 1) * kPointerSize));
- EmitResolvePossiblyDirectEval(PERFORM_CONTEXT_LOOKUP, arg_count);
- if (done.is_linked()) {
- __ bind(&done);
- }
-
- // The runtime call returns a pair of values in eax (function) and
- // edx (receiver). Touch up the stack with the right values.
- __ mov(Operand(esp, (arg_count + 0) * kPointerSize), edx);
- __ mov(Operand(esp, (arg_count + 1) * kPointerSize), eax);
- }
- // Record source position for debugger.
- SetSourcePosition(expr->position());
- InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
- CallFunctionStub stub(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
- __ CallStub(&stub);
- RecordJSReturnSite(expr);
- // Restore context register.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- context()->DropAndPlug(1, eax);
- } else if (var != NULL && !var->is_this() && var->is_global()) {
- // Push global object as receiver for the call IC.
- __ push(GlobalObjectOperand());
- EmitCallWithIC(expr, var->name(), RelocInfo::CODE_TARGET_CONTEXT);
- } else if (var != NULL && var->AsSlot() != NULL &&
- var->AsSlot()->type() == Slot::LOOKUP) {
- // Call to a lookup slot (dynamically introduced variable).
- Label slow, done;
-
- { PreservePositionScope scope(masm()->positions_recorder());
- // Generate code for loading from variables potentially shadowed
- // by eval-introduced variables.
- EmitDynamicLoadFromSlotFastCase(var->AsSlot(),
- NOT_INSIDE_TYPEOF,
- &slow,
- &done);
- }
-
- __ bind(&slow);
- // Call the runtime to find the function to call (returned in eax)
- // and the object holding it (returned in edx).
- __ push(context_register());
- __ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kLoadContextSlot, 2);
- __ push(eax); // Function.
- __ push(edx); // Receiver.
-
- // If fast case code has been generated, emit code to push the
- // function and receiver and have the slow path jump around this
- // code.
- if (done.is_linked()) {
- Label call;
- __ jmp(&call);
- __ bind(&done);
- // Push function.
- __ push(eax);
- // Push global receiver.
- __ mov(ebx, GlobalObjectOperand());
- __ push(FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset));
- __ bind(&call);
- }
-
- EmitCallWithStub(expr);
- } else if (fun->AsProperty() != NULL) {
- // Call to an object property.
- Property* prop = fun->AsProperty();
- Literal* key = prop->key()->AsLiteral();
- if (key != NULL && key->handle()->IsSymbol()) {
- // Call to a named property, use call IC.
- { PreservePositionScope scope(masm()->positions_recorder());
- VisitForStackValue(prop->obj());
- }
- EmitCallWithIC(expr, key->handle(), RelocInfo::CODE_TARGET);
- } else {
- // Call to a keyed property.
- // For a synthetic property use keyed load IC followed by function call,
- // for a regular property use keyed EmitCallIC.
- if (prop->is_synthetic()) {
- // Do not visit the object and key subexpressions (they are shared
- // by all occurrences of the same rewritten parameter).
- ASSERT(prop->obj()->AsVariableProxy() != NULL);
- ASSERT(prop->obj()->AsVariableProxy()->var()->AsSlot() != NULL);
- Slot* slot = prop->obj()->AsVariableProxy()->var()->AsSlot();
- MemOperand operand = EmitSlotSearch(slot, edx);
- __ mov(edx, operand);
-
- ASSERT(prop->key()->AsLiteral() != NULL);
- ASSERT(prop->key()->AsLiteral()->handle()->IsSmi());
- __ mov(eax, prop->key()->AsLiteral()->handle());
-
- // Record source code position for IC call.
- SetSourcePosition(prop->position());
-
- Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
- // Push result (function).
- __ push(eax);
- // Push Global receiver.
- __ mov(ecx, GlobalObjectOperand());
- __ push(FieldOperand(ecx, GlobalObject::kGlobalReceiverOffset));
- EmitCallWithStub(expr);
- } else {
- { PreservePositionScope scope(masm()->positions_recorder());
- VisitForStackValue(prop->obj());
- }
- EmitKeyedCallWithIC(expr, prop->key(), RelocInfo::CODE_TARGET);
- }
- }
- } else {
- { PreservePositionScope scope(masm()->positions_recorder());
- VisitForStackValue(fun);
- }
- // Load global receiver object.
- __ mov(ebx, GlobalObjectOperand());
- __ push(FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset));
- // Emit function call.
- EmitCallWithStub(expr);
- }
-
-#ifdef DEBUG
- // RecordJSReturnSite should have been called.
- ASSERT(expr->return_is_recorded_);
-#endif
-}
-
-
-void FullCodeGenerator::VisitCallNew(CallNew* expr) {
- Comment cmnt(masm_, "[ CallNew");
- // According to ECMA-262, section 11.2.2, page 44, the function
- // expression in new calls must be evaluated before the
- // arguments.
-
- // Push constructor on the stack. If it's not a function it's used as
- // receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
- // ignored.
- VisitForStackValue(expr->expression());
-
- // Push the arguments ("left-to-right") on the stack.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
-
- // Call the construct call builtin that handles allocation and
- // constructor invocation.
- SetSourcePosition(expr->position());
-
- // Load function and argument count into edi and eax.
- __ Set(eax, Immediate(arg_count));
- __ mov(edi, Operand(esp, arg_count * kPointerSize));
-
- Handle<Code> construct_builtin =
- isolate()->builtins()->JSConstructCall();
- __ call(construct_builtin, RelocInfo::CONSTRUCT_CALL);
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitIsSmi(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
- __ test(eax, Immediate(kSmiTagMask));
- Split(zero, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsNonNegativeSmi(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
- __ test(eax, Immediate(kSmiTagMask | 0x80000000));
- Split(zero, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, if_false);
- __ cmp(eax, isolate()->factory()->null_value());
- __ j(equal, if_true);
- __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
- // Undetectable objects behave like undefined when tested with typeof.
- __ movzx_b(ecx, FieldOperand(ebx, Map::kBitFieldOffset));
- __ test(ecx, Immediate(1 << Map::kIsUndetectable));
- __ j(not_zero, if_false);
- __ movzx_b(ecx, FieldOperand(ebx, Map::kInstanceTypeOffset));
- __ cmp(ecx, FIRST_JS_OBJECT_TYPE);
- __ j(below, if_false);
- __ cmp(ecx, LAST_JS_OBJECT_TYPE);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
- Split(below_equal, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsSpecObject(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ test(eax, Immediate(kSmiTagMask));
- __ j(equal, if_false);
- __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ebx);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
- Split(above_equal, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, if_false);
- __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
- __ movzx_b(ebx, FieldOperand(ebx, Map::kBitFieldOffset));
- __ test(ebx, Immediate(1 << Map::kIsUndetectable));
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
- Split(not_zero, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
- ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- if (FLAG_debug_code) __ AbortIfSmi(eax);
-
- // Check whether this map has already been checked to be safe for default
- // valueOf.
- __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
- __ test_b(FieldOperand(ebx, Map::kBitField2Offset),
- 1 << Map::kStringWrapperSafeForDefaultValueOf);
- __ j(not_zero, if_true);
-
- // Check for fast case object. Return false for slow case objects.
- __ mov(ecx, FieldOperand(eax, JSObject::kPropertiesOffset));
- __ mov(ecx, FieldOperand(ecx, HeapObject::kMapOffset));
- __ cmp(ecx, FACTORY->hash_table_map());
- __ j(equal, if_false);
-
- // Look for valueOf symbol in the descriptor array, and indicate false if
- // found. The type is not checked, so if it is a transition it is a false
- // negative.
- __ mov(ebx, FieldOperand(ebx, Map::kInstanceDescriptorsOffset));
- __ mov(ecx, FieldOperand(ebx, FixedArray::kLengthOffset));
- // ebx: descriptor array
- // ecx: length of descriptor array
- // Calculate the end of the descriptor array.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kPointerSize == 4);
- __ lea(ecx, Operand(ebx, ecx, times_2, FixedArray::kHeaderSize));
- // Calculate location of the first key name.
- __ add(Operand(ebx),
- Immediate(FixedArray::kHeaderSize +
- DescriptorArray::kFirstIndex * kPointerSize));
- // Loop through all the keys in the descriptor array. If one of these is the
- // symbol valueOf the result is false.
- Label entry, loop;
- __ jmp(&entry);
- __ bind(&loop);
- __ mov(edx, FieldOperand(ebx, 0));
- __ cmp(edx, FACTORY->value_of_symbol());
- __ j(equal, if_false);
- __ add(Operand(ebx), Immediate(kPointerSize));
- __ bind(&entry);
- __ cmp(ebx, Operand(ecx));
- __ j(not_equal, &loop);
-
- // Reload map as register ebx was used as temporary above.
- __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
-
- // If a valueOf property is not found on the object check that it's
- // prototype is the un-modified String prototype. If not result is false.
- __ mov(ecx, FieldOperand(ebx, Map::kPrototypeOffset));
- __ test(ecx, Immediate(kSmiTagMask));
- __ j(zero, if_false);
- __ mov(ecx, FieldOperand(ecx, HeapObject::kMapOffset));
- __ mov(edx, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ mov(edx,
- FieldOperand(edx, GlobalObject::kGlobalContextOffset));
- __ cmp(ecx,
- ContextOperand(edx,
- Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
- __ j(not_equal, if_false);
- // Set the bit in the map to indicate that it has been checked safe for
- // default valueOf and set true result.
- __ or_(FieldOperand(ebx, Map::kBitField2Offset),
- Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
- __ jmp(if_true);
-
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsFunction(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, if_false);
- __ CmpObjectType(eax, JS_FUNCTION_TYPE, ebx);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
- Split(equal, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsArray(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ test(eax, Immediate(kSmiTagMask));
- __ j(equal, if_false);
- __ CmpObjectType(eax, JS_ARRAY_TYPE, ebx);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
- Split(equal, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsRegExp(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ test(eax, Immediate(kSmiTagMask));
- __ j(equal, if_false);
- __ CmpObjectType(eax, JS_REGEXP_TYPE, ebx);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
- Split(equal, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-
-void FullCodeGenerator::EmitIsConstructCall(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 0);
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- // Get the frame pointer for the calling frame.
- __ mov(eax, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
-
- // Skip the arguments adaptor frame if it exists.
- Label check_frame_marker;
- __ cmp(Operand(eax, StandardFrameConstants::kContextOffset),
- Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(not_equal, &check_frame_marker);
- __ mov(eax, Operand(eax, StandardFrameConstants::kCallerFPOffset));
-
- // Check the marker in the calling frame.
- __ bind(&check_frame_marker);
- __ cmp(Operand(eax, StandardFrameConstants::kMarkerOffset),
- Immediate(Smi::FromInt(StackFrame::CONSTRUCT)));
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
- Split(equal, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitObjectEquals(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 2);
-
- // Load the two objects into registers and perform the comparison.
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ pop(ebx);
- __ cmp(eax, Operand(ebx));
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
- Split(equal, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitArguments(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
-
- // ArgumentsAccessStub expects the key in edx and the formal
- // parameter count in eax.
- VisitForAccumulatorValue(args->at(0));
- __ mov(edx, eax);
- __ mov(eax, Immediate(Smi::FromInt(scope()->num_parameters())));
- ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
- __ CallStub(&stub);
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitArgumentsLength(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 0);
-
- Label exit;
- // Get the number of formal parameters.
- __ Set(eax, Immediate(Smi::FromInt(scope()->num_parameters())));
-
- // Check if the calling frame is an arguments adaptor frame.
- __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ cmp(Operand(ebx, StandardFrameConstants::kContextOffset),
- Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(not_equal, &exit);
-
- // Arguments adaptor case: Read the arguments length from the
- // adaptor frame.
- __ mov(eax, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset));
-
- __ bind(&exit);
- if (FLAG_debug_code) __ AbortIfNotSmi(eax);
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Label done, null, function, non_function_constructor;
-
- VisitForAccumulatorValue(args->at(0));
-
- // If the object is a smi, we return null.
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &null);
-
- // Check that the object is a JS object but take special care of JS
- // functions to make sure they have 'Function' as their class.
- __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, eax); // Map is now in eax.
- __ j(below, &null);
-
- // As long as JS_FUNCTION_TYPE is the last instance type and it is
- // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
- // LAST_JS_OBJECT_TYPE.
- ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
- ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
- __ CmpInstanceType(eax, JS_FUNCTION_TYPE);
- __ j(equal, &function);
-
- // Check if the constructor in the map is a function.
- __ mov(eax, FieldOperand(eax, Map::kConstructorOffset));
- __ CmpObjectType(eax, JS_FUNCTION_TYPE, ebx);
- __ j(not_equal, &non_function_constructor);
-
- // eax now contains the constructor function. Grab the
- // instance class name from there.
- __ mov(eax, FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset));
- __ mov(eax, FieldOperand(eax, SharedFunctionInfo::kInstanceClassNameOffset));
- __ jmp(&done);
-
- // Functions have class 'Function'.
- __ bind(&function);
- __ mov(eax, isolate()->factory()->function_class_symbol());
- __ jmp(&done);
-
- // Objects with a non-function constructor have class 'Object'.
- __ bind(&non_function_constructor);
- __ mov(eax, isolate()->factory()->Object_symbol());
- __ jmp(&done);
-
- // Non-JS objects have class null.
- __ bind(&null);
- __ mov(eax, isolate()->factory()->null_value());
-
- // All done.
- __ bind(&done);
-
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitLog(ZoneList<Expression*>* args) {
- // Conditionally generate a log call.
- // Args:
- // 0 (literal string): The type of logging (corresponds to the flags).
- // This is used to determine whether or not to generate the log call.
- // 1 (string): Format string. Access the string at argument index 2
- // with '%2s' (see Logger::LogRuntime for all the formats).
- // 2 (array): Arguments to the format string.
- ASSERT_EQ(args->length(), 3);
-#ifdef ENABLE_LOGGING_AND_PROFILING
- if (CodeGenerator::ShouldGenerateLog(args->at(0))) {
- VisitForStackValue(args->at(1));
- VisitForStackValue(args->at(2));
- __ CallRuntime(Runtime::kLog, 2);
- }
-#endif
- // Finally, we're expected to leave a value on the top of the stack.
- __ mov(eax, isolate()->factory()->undefined_value());
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 0);
-
- Label slow_allocate_heapnumber;
- Label heapnumber_allocated;
-
- __ AllocateHeapNumber(edi, ebx, ecx, &slow_allocate_heapnumber);
- __ jmp(&heapnumber_allocated);
-
- __ bind(&slow_allocate_heapnumber);
- // Allocate a heap number.
- __ CallRuntime(Runtime::kNumberAlloc, 0);
- __ mov(edi, eax);
-
- __ bind(&heapnumber_allocated);
-
- __ PrepareCallCFunction(1, ebx);
- __ mov(Operand(esp, 0), Immediate(ExternalReference::isolate_address()));
- __ CallCFunction(ExternalReference::random_uint32_function(isolate()),
- 1);
-
- // Convert 32 random bits in eax to 0.(32 random bits) in a double
- // by computing:
- // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
- // This is implemented on both SSE2 and FPU.
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope fscope(SSE2);
- __ mov(ebx, Immediate(0x49800000)); // 1.0 x 2^20 as single.
- __ movd(xmm1, Operand(ebx));
- __ movd(xmm0, Operand(eax));
- __ cvtss2sd(xmm1, xmm1);
- __ pxor(xmm0, xmm1);
- __ subsd(xmm0, xmm1);
- __ movdbl(FieldOperand(edi, HeapNumber::kValueOffset), xmm0);
- } else {
- // 0x4130000000000000 is 1.0 x 2^20 as a double.
- __ mov(FieldOperand(edi, HeapNumber::kExponentOffset),
- Immediate(0x41300000));
- __ mov(FieldOperand(edi, HeapNumber::kMantissaOffset), eax);
- __ fld_d(FieldOperand(edi, HeapNumber::kValueOffset));
- __ mov(FieldOperand(edi, HeapNumber::kMantissaOffset), Immediate(0));
- __ fld_d(FieldOperand(edi, HeapNumber::kValueOffset));
- __ fsubp(1);
- __ fstp_d(FieldOperand(edi, HeapNumber::kValueOffset));
- }
- __ mov(eax, edi);
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitSubString(ZoneList<Expression*>* args) {
- // Load the arguments on the stack and call the stub.
- SubStringStub stub;
- ASSERT(args->length() == 3);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
- VisitForStackValue(args->at(2));
- __ CallStub(&stub);
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitRegExpExec(ZoneList<Expression*>* args) {
- // Load the arguments on the stack and call the stub.
- RegExpExecStub stub;
- ASSERT(args->length() == 4);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
- VisitForStackValue(args->at(2));
- VisitForStackValue(args->at(3));
- __ CallStub(&stub);
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitValueOf(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0)); // Load the object.
-
- NearLabel done;
- // If the object is a smi return the object.
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &done);
- // If the object is not a value type, return the object.
- __ CmpObjectType(eax, JS_VALUE_TYPE, ebx);
- __ j(not_equal, &done);
- __ mov(eax, FieldOperand(eax, JSValue::kValueOffset));
-
- __ bind(&done);
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitMathPow(ZoneList<Expression*>* args) {
- // Load the arguments on the stack and call the runtime function.
- ASSERT(args->length() == 2);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
-
- if (CpuFeatures::IsSupported(SSE2)) {
- MathPowStub stub;
- __ CallStub(&stub);
- } else {
- __ CallRuntime(Runtime::kMath_pow, 2);
- }
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitSetValueOf(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 2);
-
- VisitForStackValue(args->at(0)); // Load the object.
- VisitForAccumulatorValue(args->at(1)); // Load the value.
- __ pop(ebx); // eax = value. ebx = object.
-
- NearLabel done;
- // If the object is a smi, return the value.
- __ test(ebx, Immediate(kSmiTagMask));
- __ j(zero, &done);
-
- // If the object is not a value type, return the value.
- __ CmpObjectType(ebx, JS_VALUE_TYPE, ecx);
- __ j(not_equal, &done);
-
- // Store the value.
- __ mov(FieldOperand(ebx, JSValue::kValueOffset), eax);
- // Update the write barrier. Save the value as it will be
- // overwritten by the write barrier code and is needed afterward.
- __ mov(edx, eax);
- __ RecordWrite(ebx, JSValue::kValueOffset, edx, ecx);
-
- __ bind(&done);
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitNumberToString(ZoneList<Expression*>* args) {
- ASSERT_EQ(args->length(), 1);
-
- // Load the argument on the stack and call the stub.
- VisitForStackValue(args->at(0));
-
- NumberToStringStub stub;
- __ CallStub(&stub);
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitStringCharFromCode(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label done;
- StringCharFromCodeGenerator generator(eax, ebx);
- generator.GenerateFast(masm_);
- __ jmp(&done);
-
- NopRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm_, call_helper);
-
- __ bind(&done);
- context()->Plug(ebx);
-}
-
-
-void FullCodeGenerator::EmitStringCharCodeAt(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 2);
-
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
-
- Register object = ebx;
- Register index = eax;
- Register scratch = ecx;
- Register result = edx;
-
- __ pop(object);
-
- Label need_conversion;
- Label index_out_of_range;
- Label done;
- StringCharCodeAtGenerator generator(object,
- index,
- scratch,
- result,
- &need_conversion,
- &need_conversion,
- &index_out_of_range,
- STRING_INDEX_IS_NUMBER);
- generator.GenerateFast(masm_);
- __ jmp(&done);
-
- __ bind(&index_out_of_range);
- // When the index is out of range, the spec requires us to return
- // NaN.
- __ Set(result, Immediate(isolate()->factory()->nan_value()));
- __ jmp(&done);
-
- __ bind(&need_conversion);
- // Move the undefined value into the result register, which will
- // trigger conversion.
- __ Set(result, Immediate(isolate()->factory()->undefined_value()));
- __ jmp(&done);
-
- NopRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm_, call_helper);
-
- __ bind(&done);
- context()->Plug(result);
-}
-
-
-void FullCodeGenerator::EmitStringCharAt(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 2);
-
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
-
- Register object = ebx;
- Register index = eax;
- Register scratch1 = ecx;
- Register scratch2 = edx;
- Register result = eax;
-
- __ pop(object);
-
- Label need_conversion;
- Label index_out_of_range;
- Label done;
- StringCharAtGenerator generator(object,
- index,
- scratch1,
- scratch2,
- result,
- &need_conversion,
- &need_conversion,
- &index_out_of_range,
- STRING_INDEX_IS_NUMBER);
- generator.GenerateFast(masm_);
- __ jmp(&done);
-
- __ bind(&index_out_of_range);
- // When the index is out of range, the spec requires us to return
- // the empty string.
- __ Set(result, Immediate(isolate()->factory()->empty_string()));
- __ jmp(&done);
-
- __ bind(&need_conversion);
- // Move smi zero into the result register, which will trigger
- // conversion.
- __ Set(result, Immediate(Smi::FromInt(0)));
- __ jmp(&done);
-
- NopRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm_, call_helper);
-
- __ bind(&done);
- context()->Plug(result);
-}
-
-
-void FullCodeGenerator::EmitStringAdd(ZoneList<Expression*>* args) {
- ASSERT_EQ(2, args->length());
-
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
-
- StringAddStub stub(NO_STRING_ADD_FLAGS);
- __ CallStub(&stub);
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitStringCompare(ZoneList<Expression*>* args) {
- ASSERT_EQ(2, args->length());
-
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
-
- StringCompareStub stub;
- __ CallStub(&stub);
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitMathSin(ZoneList<Expression*>* args) {
- // Load the argument on the stack and call the stub.
- TranscendentalCacheStub stub(TranscendentalCache::SIN,
- TranscendentalCacheStub::TAGGED);
- ASSERT(args->length() == 1);
- VisitForStackValue(args->at(0));
- __ CallStub(&stub);
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitMathCos(ZoneList<Expression*>* args) {
- // Load the argument on the stack and call the stub.
- TranscendentalCacheStub stub(TranscendentalCache::COS,
- TranscendentalCacheStub::TAGGED);
- ASSERT(args->length() == 1);
- VisitForStackValue(args->at(0));
- __ CallStub(&stub);
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitMathLog(ZoneList<Expression*>* args) {
- // Load the argument on the stack and call the stub.
- TranscendentalCacheStub stub(TranscendentalCache::LOG,
- TranscendentalCacheStub::TAGGED);
- ASSERT(args->length() == 1);
- VisitForStackValue(args->at(0));
- __ CallStub(&stub);
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitMathSqrt(ZoneList<Expression*>* args) {
- // Load the argument on the stack and call the runtime function.
- ASSERT(args->length() == 1);
- VisitForStackValue(args->at(0));
- __ CallRuntime(Runtime::kMath_sqrt, 1);
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitCallFunction(ZoneList<Expression*>* args) {
- ASSERT(args->length() >= 2);
-
- int arg_count = args->length() - 2; // For receiver and function.
- VisitForStackValue(args->at(0)); // Receiver.
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i + 1));
- }
- VisitForAccumulatorValue(args->at(arg_count + 1)); // Function.
-
- // InvokeFunction requires function in edi. Move it in there.
- if (!result_register().is(edi)) __ mov(edi, result_register());
- ParameterCount count(arg_count);
- __ InvokeFunction(edi, count, CALL_FUNCTION);
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitRegExpConstructResult(ZoneList<Expression*>* args) {
- // Load the arguments on the stack and call the stub.
- RegExpConstructResultStub stub;
- ASSERT(args->length() == 3);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
- VisitForStackValue(args->at(2));
- __ CallStub(&stub);
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 3);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
- VisitForStackValue(args->at(2));
- Label done;
- Label slow_case;
- Register object = eax;
- Register index_1 = ebx;
- Register index_2 = ecx;
- Register elements = edi;
- Register temp = edx;
- __ mov(object, Operand(esp, 2 * kPointerSize));
- // Fetch the map and check if array is in fast case.
- // Check that object doesn't require security checks and
- // has no indexed interceptor.
- __ CmpObjectType(object, JS_ARRAY_TYPE, temp);
- __ j(not_equal, &slow_case);
- __ test_b(FieldOperand(temp, Map::kBitFieldOffset),
- KeyedLoadIC::kSlowCaseBitFieldMask);
- __ j(not_zero, &slow_case);
-
- // Check the object's elements are in fast case and writable.
- __ mov(elements, FieldOperand(object, JSObject::kElementsOffset));
- __ cmp(FieldOperand(elements, HeapObject::kMapOffset),
- Immediate(isolate()->factory()->fixed_array_map()));
- __ j(not_equal, &slow_case);
-
- // Check that both indices are smis.
- __ mov(index_1, Operand(esp, 1 * kPointerSize));
- __ mov(index_2, Operand(esp, 0));
- __ mov(temp, index_1);
- __ or_(temp, Operand(index_2));
- __ test(temp, Immediate(kSmiTagMask));
- __ j(not_zero, &slow_case);
-
- // Check that both indices are valid.
- __ mov(temp, FieldOperand(object, JSArray::kLengthOffset));
- __ cmp(temp, Operand(index_1));
- __ j(below_equal, &slow_case);
- __ cmp(temp, Operand(index_2));
- __ j(below_equal, &slow_case);
-
- // Bring addresses into index1 and index2.
- __ lea(index_1, CodeGenerator::FixedArrayElementOperand(elements, index_1));
- __ lea(index_2, CodeGenerator::FixedArrayElementOperand(elements, index_2));
-
- // Swap elements. Use object and temp as scratch registers.
- __ mov(object, Operand(index_1, 0));
- __ mov(temp, Operand(index_2, 0));
- __ mov(Operand(index_2, 0), object);
- __ mov(Operand(index_1, 0), temp);
-
- Label new_space;
- __ InNewSpace(elements, temp, equal, &new_space);
-
- __ mov(object, elements);
- __ RecordWriteHelper(object, index_1, temp);
- __ RecordWriteHelper(elements, index_2, temp);
-
- __ bind(&new_space);
- // We are done. Drop elements from the stack, and return undefined.
- __ add(Operand(esp), Immediate(3 * kPointerSize));
- __ mov(eax, isolate()->factory()->undefined_value());
- __ jmp(&done);
-
- __ bind(&slow_case);
- __ CallRuntime(Runtime::kSwapElements, 3);
-
- __ bind(&done);
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitGetFromCache(ZoneList<Expression*>* args) {
- ASSERT_EQ(2, args->length());
-
- ASSERT_NE(NULL, args->at(0)->AsLiteral());
- int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
-
- Handle<FixedArray> jsfunction_result_caches(
- isolate()->global_context()->jsfunction_result_caches());
- if (jsfunction_result_caches->length() <= cache_id) {
- __ Abort("Attempt to use undefined cache.");
- __ mov(eax, isolate()->factory()->undefined_value());
- context()->Plug(eax);
- return;
- }
-
- VisitForAccumulatorValue(args->at(1));
-
- Register key = eax;
- Register cache = ebx;
- Register tmp = ecx;
- __ mov(cache, ContextOperand(esi, Context::GLOBAL_INDEX));
- __ mov(cache,
- FieldOperand(cache, GlobalObject::kGlobalContextOffset));
- __ mov(cache, ContextOperand(cache, Context::JSFUNCTION_RESULT_CACHES_INDEX));
- __ mov(cache,
- FieldOperand(cache, FixedArray::OffsetOfElementAt(cache_id)));
-
- Label done, not_found;
- // tmp now holds finger offset as a smi.
- ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
- __ mov(tmp, FieldOperand(cache, JSFunctionResultCache::kFingerOffset));
- __ cmp(key, CodeGenerator::FixedArrayElementOperand(cache, tmp));
- __ j(not_equal, &not_found);
-
- __ mov(eax, CodeGenerator::FixedArrayElementOperand(cache, tmp, 1));
- __ jmp(&done);
-
- __ bind(&not_found);
- // Call runtime to perform the lookup.
- __ push(cache);
- __ push(key);
- __ CallRuntime(Runtime::kGetFromCache, 2);
-
- __ bind(&done);
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitIsRegExpEquivalent(ZoneList<Expression*>* args) {
- ASSERT_EQ(2, args->length());
-
- Register right = eax;
- Register left = ebx;
- Register tmp = ecx;
-
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
- __ pop(left);
-
- Label done, fail, ok;
- __ cmp(left, Operand(right));
- __ j(equal, &ok);
- // Fail if either is a non-HeapObject.
- __ mov(tmp, left);
- __ and_(Operand(tmp), right);
- __ test(Operand(tmp), Immediate(kSmiTagMask));
- __ j(zero, &fail);
- __ mov(tmp, FieldOperand(left, HeapObject::kMapOffset));
- __ CmpInstanceType(tmp, JS_REGEXP_TYPE);
- __ j(not_equal, &fail);
- __ cmp(tmp, FieldOperand(right, HeapObject::kMapOffset));
- __ j(not_equal, &fail);
- __ mov(tmp, FieldOperand(left, JSRegExp::kDataOffset));
- __ cmp(tmp, FieldOperand(right, JSRegExp::kDataOffset));
- __ j(equal, &ok);
- __ bind(&fail);
- __ mov(eax, Immediate(isolate()->factory()->false_value()));
- __ jmp(&done);
- __ bind(&ok);
- __ mov(eax, Immediate(isolate()->factory()->true_value()));
- __ bind(&done);
-
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitHasCachedArrayIndex(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- if (FLAG_debug_code) {
- __ AbortIfNotString(eax);
- }
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ test(FieldOperand(eax, String::kHashFieldOffset),
- Immediate(String::kContainsCachedArrayIndexMask));
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
- Split(zero, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitGetCachedArrayIndex(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- VisitForAccumulatorValue(args->at(0));
-
- if (FLAG_debug_code) {
- __ AbortIfNotString(eax);
- }
-
- __ mov(eax, FieldOperand(eax, String::kHashFieldOffset));
- __ IndexFromHash(eax, eax);
-
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
- Label bailout, done, one_char_separator, long_separator,
- non_trivial_array, not_size_one_array, loop,
- loop_1, loop_1_condition, loop_2, loop_2_entry, loop_3, loop_3_entry;
-
- ASSERT(args->length() == 2);
- // We will leave the separator on the stack until the end of the function.
- VisitForStackValue(args->at(1));
- // Load this to eax (= array)
- VisitForAccumulatorValue(args->at(0));
- // All aliases of the same register have disjoint lifetimes.
- Register array = eax;
- Register elements = no_reg; // Will be eax.
-
- Register index = edx;
-
- Register string_length = ecx;
-
- Register string = esi;
-
- Register scratch = ebx;
-
- Register array_length = edi;
- Register result_pos = no_reg; // Will be edi.
-
- // Separator operand is already pushed.
- Operand separator_operand = Operand(esp, 2 * kPointerSize);
- Operand result_operand = Operand(esp, 1 * kPointerSize);
- Operand array_length_operand = Operand(esp, 0);
- __ sub(Operand(esp), Immediate(2 * kPointerSize));
- __ cld();
- // Check that the array is a JSArray
- __ test(array, Immediate(kSmiTagMask));
- __ j(zero, &bailout);
- __ CmpObjectType(array, JS_ARRAY_TYPE, scratch);
- __ j(not_equal, &bailout);
-
- // Check that the array has fast elements.
- __ test_b(FieldOperand(scratch, Map::kBitField2Offset),
- 1 << Map::kHasFastElements);
- __ j(zero, &bailout);
-
- // If the array has length zero, return the empty string.
- __ mov(array_length, FieldOperand(array, JSArray::kLengthOffset));
- __ SmiUntag(array_length);
- __ j(not_zero, &non_trivial_array);
- __ mov(result_operand, isolate()->factory()->empty_string());
- __ jmp(&done);
-
- // Save the array length.
- __ bind(&non_trivial_array);
- __ mov(array_length_operand, array_length);
-
- // Save the FixedArray containing array's elements.
- // End of array's live range.
- elements = array;
- __ mov(elements, FieldOperand(array, JSArray::kElementsOffset));
- array = no_reg;
-
-
- // Check that all array elements are sequential ASCII strings, and
- // accumulate the sum of their lengths, as a smi-encoded value.
- __ Set(index, Immediate(0));
- __ Set(string_length, Immediate(0));
- // Loop condition: while (index < length).
- // Live loop registers: index, array_length, string,
- // scratch, string_length, elements.
- if (FLAG_debug_code) {
- __ cmp(index, Operand(array_length));
- __ Assert(less, "No empty arrays here in EmitFastAsciiArrayJoin");
- }
- __ bind(&loop);
- __ mov(string, FieldOperand(elements,
- index,
- times_pointer_size,
- FixedArray::kHeaderSize));
- __ test(string, Immediate(kSmiTagMask));
- __ j(zero, &bailout);
- __ mov(scratch, FieldOperand(string, HeapObject::kMapOffset));
- __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
- __ and_(scratch, Immediate(
- kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
- __ cmp(scratch, kStringTag | kAsciiStringTag | kSeqStringTag);
- __ j(not_equal, &bailout);
- __ add(string_length,
- FieldOperand(string, SeqAsciiString::kLengthOffset));
- __ j(overflow, &bailout);
- __ add(Operand(index), Immediate(1));
- __ cmp(index, Operand(array_length));
- __ j(less, &loop);
-
- // If array_length is 1, return elements[0], a string.
- __ cmp(array_length, 1);
- __ j(not_equal, &not_size_one_array);
- __ mov(scratch, FieldOperand(elements, FixedArray::kHeaderSize));
- __ mov(result_operand, scratch);
- __ jmp(&done);
-
- __ bind(&not_size_one_array);
-
- // End of array_length live range.
- result_pos = array_length;
- array_length = no_reg;
-
- // Live registers:
- // string_length: Sum of string lengths, as a smi.
- // elements: FixedArray of strings.
-
- // Check that the separator is a flat ASCII string.
- __ mov(string, separator_operand);
- __ test(string, Immediate(kSmiTagMask));
- __ j(zero, &bailout);
- __ mov(scratch, FieldOperand(string, HeapObject::kMapOffset));
- __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
- __ and_(scratch, Immediate(
- kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
- __ cmp(scratch, ASCII_STRING_TYPE);
- __ j(not_equal, &bailout);
-
- // Add (separator length times array_length) - separator length
- // to string_length.
- __ mov(scratch, separator_operand);
- __ mov(scratch, FieldOperand(scratch, SeqAsciiString::kLengthOffset));
- __ sub(string_length, Operand(scratch)); // May be negative, temporarily.
- __ imul(scratch, array_length_operand);
- __ j(overflow, &bailout);
- __ add(string_length, Operand(scratch));
- __ j(overflow, &bailout);
-
- __ shr(string_length, 1);
- // Live registers and stack values:
- // string_length
- // elements
- __ AllocateAsciiString(result_pos, string_length, scratch,
- index, string, &bailout);
- __ mov(result_operand, result_pos);
- __ lea(result_pos, FieldOperand(result_pos, SeqAsciiString::kHeaderSize));
-
-
- __ mov(string, separator_operand);
- __ cmp(FieldOperand(string, SeqAsciiString::kLengthOffset),
- Immediate(Smi::FromInt(1)));
- __ j(equal, &one_char_separator);
- __ j(greater, &long_separator);
-
-
- // Empty separator case
- __ mov(index, Immediate(0));
- __ jmp(&loop_1_condition);
- // Loop condition: while (index < length).
- __ bind(&loop_1);
- // Each iteration of the loop concatenates one string to the result.
- // Live values in registers:
- // index: which element of the elements array we are adding to the result.
- // result_pos: the position to which we are currently copying characters.
- // elements: the FixedArray of strings we are joining.
-
- // Get string = array[index].
- __ mov(string, FieldOperand(elements, index,
- times_pointer_size,
- FixedArray::kHeaderSize));
- __ mov(string_length,
- FieldOperand(string, String::kLengthOffset));
- __ shr(string_length, 1);
- __ lea(string,
- FieldOperand(string, SeqAsciiString::kHeaderSize));
- __ CopyBytes(string, result_pos, string_length, scratch);
- __ add(Operand(index), Immediate(1));
- __ bind(&loop_1_condition);
- __ cmp(index, array_length_operand);
- __ j(less, &loop_1); // End while (index < length).
- __ jmp(&done);
-
-
-
- // One-character separator case
- __ bind(&one_char_separator);
- // Replace separator with its ascii character value.
- __ mov_b(scratch, FieldOperand(string, SeqAsciiString::kHeaderSize));
- __ mov_b(separator_operand, scratch);
-
- __ Set(index, Immediate(0));
- // Jump into the loop after the code that copies the separator, so the first
- // element is not preceded by a separator
- __ jmp(&loop_2_entry);
- // Loop condition: while (index < length).
- __ bind(&loop_2);
- // Each iteration of the loop concatenates one string to the result.
- // Live values in registers:
- // index: which element of the elements array we are adding to the result.
- // result_pos: the position to which we are currently copying characters.
-
- // Copy the separator character to the result.
- __ mov_b(scratch, separator_operand);
- __ mov_b(Operand(result_pos, 0), scratch);
- __ inc(result_pos);
-
- __ bind(&loop_2_entry);
- // Get string = array[index].
- __ mov(string, FieldOperand(elements, index,
- times_pointer_size,
- FixedArray::kHeaderSize));
- __ mov(string_length,
- FieldOperand(string, String::kLengthOffset));
- __ shr(string_length, 1);
- __ lea(string,
- FieldOperand(string, SeqAsciiString::kHeaderSize));
- __ CopyBytes(string, result_pos, string_length, scratch);
- __ add(Operand(index), Immediate(1));
-
- __ cmp(index, array_length_operand);
- __ j(less, &loop_2); // End while (index < length).
- __ jmp(&done);
-
-
- // Long separator case (separator is more than one character).
- __ bind(&long_separator);
-
- __ Set(index, Immediate(0));
- // Jump into the loop after the code that copies the separator, so the first
- // element is not preceded by a separator
- __ jmp(&loop_3_entry);
- // Loop condition: while (index < length).
- __ bind(&loop_3);
- // Each iteration of the loop concatenates one string to the result.
- // Live values in registers:
- // index: which element of the elements array we are adding to the result.
- // result_pos: the position to which we are currently copying characters.
-
- // Copy the separator to the result.
- __ mov(string, separator_operand);
- __ mov(string_length,
- FieldOperand(string, String::kLengthOffset));
- __ shr(string_length, 1);
- __ lea(string,
- FieldOperand(string, SeqAsciiString::kHeaderSize));
- __ CopyBytes(string, result_pos, string_length, scratch);
-
- __ bind(&loop_3_entry);
- // Get string = array[index].
- __ mov(string, FieldOperand(elements, index,
- times_pointer_size,
- FixedArray::kHeaderSize));
- __ mov(string_length,
- FieldOperand(string, String::kLengthOffset));
- __ shr(string_length, 1);
- __ lea(string,
- FieldOperand(string, SeqAsciiString::kHeaderSize));
- __ CopyBytes(string, result_pos, string_length, scratch);
- __ add(Operand(index), Immediate(1));
-
- __ cmp(index, array_length_operand);
- __ j(less, &loop_3); // End while (index < length).
- __ jmp(&done);
-
-
- __ bind(&bailout);
- __ mov(result_operand, isolate()->factory()->undefined_value());
- __ bind(&done);
- __ mov(eax, result_operand);
- // Drop temp values from the stack, and restore context register.
- __ add(Operand(esp), Immediate(3 * kPointerSize));
-
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
- Handle<String> name = expr->name();
- if (name->length() > 0 && name->Get(0) == '_') {
- Comment cmnt(masm_, "[ InlineRuntimeCall");
- EmitInlineRuntimeCall(expr);
- return;
- }
-
- Comment cmnt(masm_, "[ CallRuntime");
- ZoneList<Expression*>* args = expr->arguments();
-
- if (expr->is_jsruntime()) {
- // Prepare for calling JS runtime function.
- __ mov(eax, GlobalObjectOperand());
- __ push(FieldOperand(eax, GlobalObject::kBuiltinsOffset));
- }
-
- // Push the arguments ("left-to-right").
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
-
- if (expr->is_jsruntime()) {
- // Call the JS runtime function via a call IC.
- __ Set(ecx, Immediate(expr->name()));
- InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
- Handle<Code> ic = isolate()->stub_cache()->ComputeCallInitialize(
- arg_count, in_loop);
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
- // Restore context register.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- } else {
- // Call the C runtime function.
- __ CallRuntime(expr->function(), arg_count);
- }
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
- switch (expr->op()) {
- case Token::DELETE: {
- Comment cmnt(masm_, "[ UnaryOperation (DELETE)");
- Property* prop = expr->expression()->AsProperty();
- Variable* var = expr->expression()->AsVariableProxy()->AsVariable();
-
- if (prop != NULL) {
- if (prop->is_synthetic()) {
- // Result of deleting parameters is false, even when they rewrite
- // to accesses on the arguments object.
- context()->Plug(false);
- } else {
- VisitForStackValue(prop->obj());
- VisitForStackValue(prop->key());
- __ push(Immediate(Smi::FromInt(strict_mode_flag())));
- __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
- context()->Plug(eax);
- }
- } else if (var != NULL) {
- // Delete of an unqualified identifier is disallowed in strict mode
- // but "delete this" is.
- ASSERT(strict_mode_flag() == kNonStrictMode || var->is_this());
- if (var->is_global()) {
- __ push(GlobalObjectOperand());
- __ push(Immediate(var->name()));
- __ push(Immediate(Smi::FromInt(kNonStrictMode)));
- __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
- context()->Plug(eax);
- } else if (var->AsSlot() != NULL &&
- var->AsSlot()->type() != Slot::LOOKUP) {
- // Result of deleting non-global, non-dynamic variables is false.
- // The subexpression does not have side effects.
- context()->Plug(false);
- } else {
- // Non-global variable. Call the runtime to try to delete from the
- // context where the variable was introduced.
- __ push(context_register());
- __ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kDeleteContextSlot, 2);
- context()->Plug(eax);
- }
- } else {
- // Result of deleting non-property, non-variable reference is true.
- // The subexpression may have side effects.
- VisitForEffect(expr->expression());
- context()->Plug(true);
- }
- break;
- }
-
- case Token::VOID: {
- Comment cmnt(masm_, "[ UnaryOperation (VOID)");
- VisitForEffect(expr->expression());
- context()->Plug(isolate()->factory()->undefined_value());
- break;
- }
-
- case Token::NOT: {
- Comment cmnt(masm_, "[ UnaryOperation (NOT)");
- if (context()->IsEffect()) {
- // Unary NOT has no side effects so it's only necessary to visit the
- // subexpression. Match the optimizing compiler by not branching.
- VisitForEffect(expr->expression());
- } else {
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
-
- // Notice that the labels are swapped.
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_false, &if_true, &fall_through);
- if (context()->IsTest()) ForwardBailoutToChild(expr);
- VisitForControl(expr->expression(), if_true, if_false, fall_through);
- context()->Plug(if_false, if_true); // Labels swapped.
- }
- break;
- }
-
- case Token::TYPEOF: {
- Comment cmnt(masm_, "[ UnaryOperation (TYPEOF)");
- { StackValueContext context(this);
- VisitForTypeofValue(expr->expression());
- }
- __ CallRuntime(Runtime::kTypeof, 1);
- context()->Plug(eax);
- break;
- }
-
- case Token::ADD: {
- Comment cmt(masm_, "[ UnaryOperation (ADD)");
- VisitForAccumulatorValue(expr->expression());
- Label no_conversion;
- __ test(result_register(), Immediate(kSmiTagMask));
- __ j(zero, &no_conversion);
- ToNumberStub convert_stub;
- __ CallStub(&convert_stub);
- __ bind(&no_conversion);
- context()->Plug(result_register());
- break;
- }
-
- case Token::SUB: {
- Comment cmt(masm_, "[ UnaryOperation (SUB)");
- bool can_overwrite = expr->expression()->ResultOverwriteAllowed();
- UnaryOverwriteMode overwrite =
- can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
- GenericUnaryOpStub stub(Token::SUB, overwrite, NO_UNARY_FLAGS);
- // GenericUnaryOpStub expects the argument to be in the
- // accumulator register eax.
- VisitForAccumulatorValue(expr->expression());
- __ CallStub(&stub);
- context()->Plug(eax);
- break;
- }
-
- case Token::BIT_NOT: {
- Comment cmt(masm_, "[ UnaryOperation (BIT_NOT)");
- // The generic unary operation stub expects the argument to be
- // in the accumulator register eax.
- VisitForAccumulatorValue(expr->expression());
- Label done;
- bool inline_smi_case = ShouldInlineSmiCase(expr->op());
- if (inline_smi_case) {
- NearLabel call_stub;
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &call_stub);
- __ lea(eax, Operand(eax, kSmiTagMask));
- __ not_(eax);
- __ jmp(&done);
- __ bind(&call_stub);
- }
- bool overwrite = expr->expression()->ResultOverwriteAllowed();
- UnaryOverwriteMode mode =
- overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
- UnaryOpFlags flags = inline_smi_case
- ? NO_UNARY_SMI_CODE_IN_STUB
- : NO_UNARY_FLAGS;
- GenericUnaryOpStub stub(Token::BIT_NOT, mode, flags);
- __ CallStub(&stub);
- __ bind(&done);
- context()->Plug(eax);
- break;
- }
-
- default:
- UNREACHABLE();
- }
-}
-
-
-void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
- Comment cmnt(masm_, "[ CountOperation");
- SetSourcePosition(expr->position());
-
- // Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
- // as the left-hand side.
- if (!expr->expression()->IsValidLeftHandSide()) {
- VisitForEffect(expr->expression());
- return;
- }
-
- // Expression can only be a property, a global or a (parameter or local)
- // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
- enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
- LhsKind assign_type = VARIABLE;
- Property* prop = expr->expression()->AsProperty();
- // In case of a property we use the uninitialized expression context
- // of the key to detect a named property.
- if (prop != NULL) {
- assign_type =
- (prop->key()->IsPropertyName()) ? NAMED_PROPERTY : KEYED_PROPERTY;
- }
-
- // Evaluate expression and get value.
- if (assign_type == VARIABLE) {
- ASSERT(expr->expression()->AsVariableProxy()->var() != NULL);
- AccumulatorValueContext context(this);
- EmitVariableLoad(expr->expression()->AsVariableProxy()->var());
- } else {
- // Reserve space for result of postfix operation.
- if (expr->is_postfix() && !context()->IsEffect()) {
- __ push(Immediate(Smi::FromInt(0)));
- }
- if (assign_type == NAMED_PROPERTY) {
- // Put the object both on the stack and in the accumulator.
- VisitForAccumulatorValue(prop->obj());
- __ push(eax);
- EmitNamedPropertyLoad(prop);
- } else {
- if (prop->is_arguments_access()) {
- VariableProxy* obj_proxy = prop->obj()->AsVariableProxy();
- MemOperand slot_operand =
- EmitSlotSearch(obj_proxy->var()->AsSlot(), ecx);
- __ push(slot_operand);
- __ mov(eax, Immediate(prop->key()->AsLiteral()->handle()));
- } else {
- VisitForStackValue(prop->obj());
- VisitForAccumulatorValue(prop->key());
- }
- __ mov(edx, Operand(esp, 0));
- __ push(eax);
- EmitKeyedPropertyLoad(prop);
- }
- }
-
- // We need a second deoptimization point after loading the value
- // in case evaluating the property load my have a side effect.
- if (assign_type == VARIABLE) {
- PrepareForBailout(expr->expression(), TOS_REG);
- } else {
- PrepareForBailout(expr->increment(), TOS_REG);
- }
-
- // Call ToNumber only if operand is not a smi.
- NearLabel no_conversion;
- if (ShouldInlineSmiCase(expr->op())) {
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &no_conversion);
- }
- ToNumberStub convert_stub;
- __ CallStub(&convert_stub);
- __ bind(&no_conversion);
-
- // Save result for postfix expressions.
- if (expr->is_postfix()) {
- if (!context()->IsEffect()) {
- // Save the result on the stack. If we have a named or keyed property
- // we store the result under the receiver that is currently on top
- // of the stack.
- switch (assign_type) {
- case VARIABLE:
- __ push(eax);
- break;
- case NAMED_PROPERTY:
- __ mov(Operand(esp, kPointerSize), eax);
- break;
- case KEYED_PROPERTY:
- __ mov(Operand(esp, 2 * kPointerSize), eax);
- break;
- }
- }
- }
-
- // Inline smi case if we are in a loop.
- NearLabel stub_call, done;
- JumpPatchSite patch_site(masm_);
-
- if (ShouldInlineSmiCase(expr->op())) {
- if (expr->op() == Token::INC) {
- __ add(Operand(eax), Immediate(Smi::FromInt(1)));
- } else {
- __ sub(Operand(eax), Immediate(Smi::FromInt(1)));
- }
- __ j(overflow, &stub_call);
- // We could eliminate this smi check if we split the code at
- // the first smi check before calling ToNumber.
- patch_site.EmitJumpIfSmi(eax, &done);
-
- __ bind(&stub_call);
- // Call stub. Undo operation first.
- if (expr->op() == Token::INC) {
- __ sub(Operand(eax), Immediate(Smi::FromInt(1)));
- } else {
- __ add(Operand(eax), Immediate(Smi::FromInt(1)));
- }
- }
-
- // Record position before stub call.
- SetSourcePosition(expr->position());
-
- // Call stub for +1/-1.
- __ mov(edx, eax);
- __ mov(eax, Immediate(Smi::FromInt(1)));
- TypeRecordingBinaryOpStub stub(expr->binary_op(), NO_OVERWRITE);
- EmitCallIC(stub.GetCode(), &patch_site);
- __ bind(&done);
-
- // Store the value returned in eax.
- switch (assign_type) {
- case VARIABLE:
- if (expr->is_postfix()) {
- // Perform the assignment as if via '='.
- { EffectContext context(this);
- EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
- Token::ASSIGN);
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- context.Plug(eax);
- }
- // For all contexts except EffectContext We have the result on
- // top of the stack.
- if (!context()->IsEffect()) {
- context()->PlugTOS();
- }
- } else {
- // Perform the assignment as if via '='.
- EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
- Token::ASSIGN);
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- context()->Plug(eax);
- }
- break;
- case NAMED_PROPERTY: {
- __ mov(ecx, prop->key()->AsLiteral()->handle());
- __ pop(edx);
- Handle<Code> ic = is_strict_mode()
- ? isolate()->builtins()->StoreIC_Initialize_Strict()
- : isolate()->builtins()->StoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- if (expr->is_postfix()) {
- if (!context()->IsEffect()) {
- context()->PlugTOS();
- }
- } else {
- context()->Plug(eax);
- }
- break;
- }
- case KEYED_PROPERTY: {
- __ pop(ecx);
- __ pop(edx);
- Handle<Code> ic = is_strict_mode()
- ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
- : isolate()->builtins()->KeyedStoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- if (expr->is_postfix()) {
- // Result is on the stack
- if (!context()->IsEffect()) {
- context()->PlugTOS();
- }
- } else {
- context()->Plug(eax);
- }
- break;
- }
- }
-}
-
-
-void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
- VariableProxy* proxy = expr->AsVariableProxy();
- ASSERT(!context()->IsEffect());
- ASSERT(!context()->IsTest());
-
- if (proxy != NULL && !proxy->var()->is_this() && proxy->var()->is_global()) {
- Comment cmnt(masm_, "Global variable");
- __ mov(eax, GlobalObjectOperand());
- __ mov(ecx, Immediate(proxy->name()));
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- // Use a regular load, not a contextual load, to avoid a reference
- // error.
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
- PrepareForBailout(expr, TOS_REG);
- context()->Plug(eax);
- } else if (proxy != NULL &&
- proxy->var()->AsSlot() != NULL &&
- proxy->var()->AsSlot()->type() == Slot::LOOKUP) {
- Label done, slow;
-
- // Generate code for loading from variables potentially shadowed
- // by eval-introduced variables.
- Slot* slot = proxy->var()->AsSlot();
- EmitDynamicLoadFromSlotFastCase(slot, INSIDE_TYPEOF, &slow, &done);
-
- __ bind(&slow);
- __ push(esi);
- __ push(Immediate(proxy->name()));
- __ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
- PrepareForBailout(expr, TOS_REG);
- __ bind(&done);
-
- context()->Plug(eax);
- } else {
- // This expression cannot throw a reference error at the top level.
- context()->HandleExpression(expr);
- }
-}
-
-
-bool FullCodeGenerator::TryLiteralCompare(Token::Value op,
- Expression* left,
- Expression* right,
- Label* if_true,
- Label* if_false,
- Label* fall_through) {
- if (op != Token::EQ && op != Token::EQ_STRICT) return false;
-
- // Check for the pattern: typeof <expression> == <string literal>.
- Literal* right_literal = right->AsLiteral();
- if (right_literal == NULL) return false;
- Handle<Object> right_literal_value = right_literal->handle();
- if (!right_literal_value->IsString()) return false;
- UnaryOperation* left_unary = left->AsUnaryOperation();
- if (left_unary == NULL || left_unary->op() != Token::TYPEOF) return false;
- Handle<String> check = Handle<String>::cast(right_literal_value);
-
- { AccumulatorValueContext context(this);
- VisitForTypeofValue(left_unary->expression());
- }
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
-
- if (check->Equals(isolate()->heap()->number_symbol())) {
- __ JumpIfSmi(eax, if_true);
- __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
- isolate()->factory()->heap_number_map());
- Split(equal, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->string_symbol())) {
- __ JumpIfSmi(eax, if_false);
- __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, edx);
- __ j(above_equal, if_false);
- // Check for undetectable objects => false.
- __ test_b(FieldOperand(edx, Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
- Split(zero, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->boolean_symbol())) {
- __ cmp(eax, isolate()->factory()->true_value());
- __ j(equal, if_true);
- __ cmp(eax, isolate()->factory()->false_value());
- Split(equal, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->undefined_symbol())) {
- __ cmp(eax, isolate()->factory()->undefined_value());
- __ j(equal, if_true);
- __ JumpIfSmi(eax, if_false);
- // Check for undetectable objects => true.
- __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
- __ movzx_b(ecx, FieldOperand(edx, Map::kBitFieldOffset));
- __ test(ecx, Immediate(1 << Map::kIsUndetectable));
- Split(not_zero, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->function_symbol())) {
- __ JumpIfSmi(eax, if_false);
- __ CmpObjectType(eax, FIRST_FUNCTION_CLASS_TYPE, edx);
- Split(above_equal, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->object_symbol())) {
- __ JumpIfSmi(eax, if_false);
- __ cmp(eax, isolate()->factory()->null_value());
- __ j(equal, if_true);
- __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, edx);
- __ j(below, if_false);
- __ CmpInstanceType(edx, FIRST_FUNCTION_CLASS_TYPE);
- __ j(above_equal, if_false);
- // Check for undetectable objects => false.
- __ test_b(FieldOperand(edx, Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
- Split(zero, if_true, if_false, fall_through);
- } else {
- if (if_false != fall_through) __ jmp(if_false);
- }
-
- return true;
-}
-
-
-void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
- Comment cmnt(masm_, "[ CompareOperation");
- SetSourcePosition(expr->position());
-
- // Always perform the comparison for its control flow. Pack the result
- // into the expression's context after the comparison is performed.
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- // First we try a fast inlined version of the compare when one of
- // the operands is a literal.
- Token::Value op = expr->op();
- Expression* left = expr->left();
- Expression* right = expr->right();
- if (TryLiteralCompare(op, left, right, if_true, if_false, fall_through)) {
- context()->Plug(if_true, if_false);
- return;
- }
-
- VisitForStackValue(expr->left());
- switch (expr->op()) {
- case Token::IN:
- VisitForStackValue(expr->right());
- __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
- PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
- __ cmp(eax, isolate()->factory()->true_value());
- Split(equal, if_true, if_false, fall_through);
- break;
-
- case Token::INSTANCEOF: {
- VisitForStackValue(expr->right());
- InstanceofStub stub(InstanceofStub::kNoFlags);
- __ CallStub(&stub);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
- __ test(eax, Operand(eax));
- // The stub returns 0 for true.
- Split(zero, if_true, if_false, fall_through);
- break;
- }
-
- default: {
- VisitForAccumulatorValue(expr->right());
- Condition cc = no_condition;
- bool strict = false;
- switch (op) {
- case Token::EQ_STRICT:
- strict = true;
- // Fall through
- case Token::EQ:
- cc = equal;
- __ pop(edx);
- break;
- case Token::LT:
- cc = less;
- __ pop(edx);
- break;
- case Token::GT:
- // Reverse left and right sizes to obtain ECMA-262 conversion order.
- cc = less;
- __ mov(edx, result_register());
- __ pop(eax);
- break;
- case Token::LTE:
- // Reverse left and right sizes to obtain ECMA-262 conversion order.
- cc = greater_equal;
- __ mov(edx, result_register());
- __ pop(eax);
- break;
- case Token::GTE:
- cc = greater_equal;
- __ pop(edx);
- break;
- case Token::IN:
- case Token::INSTANCEOF:
- default:
- UNREACHABLE();
- }
-
- bool inline_smi_code = ShouldInlineSmiCase(op);
- JumpPatchSite patch_site(masm_);
- if (inline_smi_code) {
- NearLabel slow_case;
- __ mov(ecx, Operand(edx));
- __ or_(ecx, Operand(eax));
- patch_site.EmitJumpIfNotSmi(ecx, &slow_case);
- __ cmp(edx, Operand(eax));
- Split(cc, if_true, if_false, NULL);
- __ bind(&slow_case);
- }
-
- // Record position and call the compare IC.
- SetSourcePosition(expr->position());
- Handle<Code> ic = CompareIC::GetUninitialized(op);
- EmitCallIC(ic, &patch_site);
-
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
- __ test(eax, Operand(eax));
- Split(cc, if_true, if_false, fall_through);
- }
- }
-
- // Convert the result of the comparison into one expected for this
- // expression's context.
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::VisitCompareToNull(CompareToNull* expr) {
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- VisitForAccumulatorValue(expr->expression());
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
-
- __ cmp(eax, isolate()->factory()->null_value());
- if (expr->is_strict()) {
- Split(equal, if_true, if_false, fall_through);
- } else {
- __ j(equal, if_true);
- __ cmp(eax, isolate()->factory()->undefined_value());
- __ j(equal, if_true);
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, if_false);
- // It can be an undetectable object.
- __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
- __ movzx_b(edx, FieldOperand(edx, Map::kBitFieldOffset));
- __ test(edx, Immediate(1 << Map::kIsUndetectable));
- Split(not_zero, if_true, if_false, fall_through);
- }
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
- __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- context()->Plug(eax);
-}
-
-
-Register FullCodeGenerator::result_register() {
- return eax;
-}
-
-
-Register FullCodeGenerator::context_register() {
- return esi;
-}
-
-
-void FullCodeGenerator::EmitCallIC(Handle<Code> ic, RelocInfo::Mode mode) {
- ASSERT(mode == RelocInfo::CODE_TARGET ||
- mode == RelocInfo::CODE_TARGET_CONTEXT);
- switch (ic->kind()) {
- case Code::LOAD_IC:
- __ IncrementCounter(isolate()->counters()->named_load_full(), 1);
- break;
- case Code::KEYED_LOAD_IC:
- __ IncrementCounter(isolate()->counters()->keyed_load_full(), 1);
- break;
- case Code::STORE_IC:
- __ IncrementCounter(isolate()->counters()->named_store_full(), 1);
- break;
- case Code::KEYED_STORE_IC:
- __ IncrementCounter(isolate()->counters()->keyed_store_full(), 1);
- default:
- break;
- }
-
- __ call(ic, mode);
-
- // Crankshaft doesn't need patching of inlined loads and stores.
- // When compiling the snapshot we need to produce code that works
- // with and without Crankshaft.
- if (V8::UseCrankshaft() && !Serializer::enabled()) {
- return;
- }
-
- // If we're calling a (keyed) load or store stub, we have to mark
- // the call as containing no inlined code so we will not attempt to
- // patch it.
- switch (ic->kind()) {
- case Code::LOAD_IC:
- case Code::KEYED_LOAD_IC:
- case Code::STORE_IC:
- case Code::KEYED_STORE_IC:
- __ nop(); // Signals no inlined code.
- break;
- default:
- // Do nothing.
- break;
- }
-}
-
-
-void FullCodeGenerator::EmitCallIC(Handle<Code> ic, JumpPatchSite* patch_site) {
- Counters* counters = isolate()->counters();
- switch (ic->kind()) {
- case Code::LOAD_IC:
- __ IncrementCounter(counters->named_load_full(), 1);
- break;
- case Code::KEYED_LOAD_IC:
- __ IncrementCounter(counters->keyed_load_full(), 1);
- break;
- case Code::STORE_IC:
- __ IncrementCounter(counters->named_store_full(), 1);
- break;
- case Code::KEYED_STORE_IC:
- __ IncrementCounter(counters->keyed_store_full(), 1);
- default:
- break;
- }
-
- __ call(ic, RelocInfo::CODE_TARGET);
- if (patch_site != NULL && patch_site->is_bound()) {
- patch_site->EmitPatchInfo();
- } else {
- __ nop(); // Signals no inlined code.
- }
-}
-
-
-void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
- ASSERT_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
- __ mov(Operand(ebp, frame_offset), value);
-}
-
-
-void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
- __ mov(dst, ContextOperand(esi, context_index));
-}
-
-
-// ----------------------------------------------------------------------------
-// Non-local control flow support.
-
-void FullCodeGenerator::EnterFinallyBlock() {
- // Cook return address on top of stack (smi encoded Code* delta)
- ASSERT(!result_register().is(edx));
- __ mov(edx, Operand(esp, 0));
- __ sub(Operand(edx), Immediate(masm_->CodeObject()));
- ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
- ASSERT_EQ(0, kSmiTag);
- __ add(edx, Operand(edx)); // Convert to smi.
- __ mov(Operand(esp, 0), edx);
- // Store result register while executing finally block.
- __ push(result_register());
-}
-
-
-void FullCodeGenerator::ExitFinallyBlock() {
- ASSERT(!result_register().is(edx));
- // Restore result register from stack.
- __ pop(result_register());
- // Uncook return address.
- __ mov(edx, Operand(esp, 0));
- __ sar(edx, 1); // Convert smi to int.
- __ add(Operand(edx), Immediate(masm_->CodeObject()));
- __ mov(Operand(esp, 0), edx);
- // And return.
- __ ret(0);
-}
-
-
-#undef __
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_IA32
diff --git a/src/3rdparty/v8/src/ia32/ic-ia32.cc b/src/3rdparty/v8/src/ia32/ic-ia32.cc
deleted file mode 100644
index 48ffc73..0000000
--- a/src/3rdparty/v8/src/ia32/ic-ia32.cc
+++ /dev/null
@@ -1,1779 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_IA32)
-
-#include "codegen-inl.h"
-#include "ic-inl.h"
-#include "runtime.h"
-#include "stub-cache.h"
-
-namespace v8 {
-namespace internal {
-
-// ----------------------------------------------------------------------------
-// Static IC stub generators.
-//
-
-#define __ ACCESS_MASM(masm)
-
-
-static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm,
- Register type,
- Label* global_object) {
- // Register usage:
- // type: holds the receiver instance type on entry.
- __ cmp(type, JS_GLOBAL_OBJECT_TYPE);
- __ j(equal, global_object, not_taken);
- __ cmp(type, JS_BUILTINS_OBJECT_TYPE);
- __ j(equal, global_object, not_taken);
- __ cmp(type, JS_GLOBAL_PROXY_TYPE);
- __ j(equal, global_object, not_taken);
-}
-
-
-// Generated code falls through if the receiver is a regular non-global
-// JS object with slow properties and no interceptors.
-static void GenerateStringDictionaryReceiverCheck(MacroAssembler* masm,
- Register receiver,
- Register r0,
- Register r1,
- Label* miss) {
- // Register usage:
- // receiver: holds the receiver on entry and is unchanged.
- // r0: used to hold receiver instance type.
- // Holds the property dictionary on fall through.
- // r1: used to hold receivers map.
-
- // Check that the receiver isn't a smi.
- __ test(receiver, Immediate(kSmiTagMask));
- __ j(zero, miss, not_taken);
-
- // Check that the receiver is a valid JS object.
- __ mov(r1, FieldOperand(receiver, HeapObject::kMapOffset));
- __ movzx_b(r0, FieldOperand(r1, Map::kInstanceTypeOffset));
- __ cmp(r0, FIRST_JS_OBJECT_TYPE);
- __ j(below, miss, not_taken);
-
- // If this assert fails, we have to check upper bound too.
- ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
-
- GenerateGlobalInstanceTypeCheck(masm, r0, miss);
-
- // Check for non-global object that requires access check.
- __ test_b(FieldOperand(r1, Map::kBitFieldOffset),
- (1 << Map::kIsAccessCheckNeeded) |
- (1 << Map::kHasNamedInterceptor));
- __ j(not_zero, miss, not_taken);
-
- __ mov(r0, FieldOperand(receiver, JSObject::kPropertiesOffset));
- __ CheckMap(r0, FACTORY->hash_table_map(), miss, true);
-}
-
-
-// Probe the string dictionary in the |elements| register. Jump to the
-// |done| label if a property with the given name is found leaving the
-// index into the dictionary in |r0|. Jump to the |miss| label
-// otherwise.
-static void GenerateStringDictionaryProbes(MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register elements,
- Register name,
- Register r0,
- Register r1) {
- // Assert that name contains a string.
- if (FLAG_debug_code) __ AbortIfNotString(name);
-
- // Compute the capacity mask.
- const int kCapacityOffset =
- StringDictionary::kHeaderSize +
- StringDictionary::kCapacityIndex * kPointerSize;
- __ mov(r1, FieldOperand(elements, kCapacityOffset));
- __ shr(r1, kSmiTagSize); // convert smi to int
- __ dec(r1);
-
- // Generate an unrolled loop that performs a few probes before
- // giving up. Measurements done on Gmail indicate that 2 probes
- // cover ~93% of loads from dictionaries.
- static const int kProbes = 4;
- const int kElementsStartOffset =
- StringDictionary::kHeaderSize +
- StringDictionary::kElementsStartIndex * kPointerSize;
- for (int i = 0; i < kProbes; i++) {
- // Compute the masked index: (hash + i + i * i) & mask.
- __ mov(r0, FieldOperand(name, String::kHashFieldOffset));
- __ shr(r0, String::kHashShift);
- if (i > 0) {
- __ add(Operand(r0), Immediate(StringDictionary::GetProbeOffset(i)));
- }
- __ and_(r0, Operand(r1));
-
- // Scale the index by multiplying by the entry size.
- ASSERT(StringDictionary::kEntrySize == 3);
- __ lea(r0, Operand(r0, r0, times_2, 0)); // r0 = r0 * 3
-
- // Check if the key is identical to the name.
- __ cmp(name, Operand(elements, r0, times_4,
- kElementsStartOffset - kHeapObjectTag));
- if (i != kProbes - 1) {
- __ j(equal, done, taken);
- } else {
- __ j(not_equal, miss, not_taken);
- }
- }
-}
-
-
-
-// Helper function used to load a property from a dictionary backing
-// storage. This function may fail to load a property even though it is
-// in the dictionary, so code at miss_label must always call a backup
-// property load that is complete. This function is safe to call if
-// name is not a symbol, and will jump to the miss_label in that
-// case. The generated code assumes that the receiver has slow
-// properties, is not a global object and does not have interceptors.
-static void GenerateDictionaryLoad(MacroAssembler* masm,
- Label* miss_label,
- Register elements,
- Register name,
- Register r0,
- Register r1,
- Register result) {
- // Register use:
- //
- // elements - holds the property dictionary on entry and is unchanged.
- //
- // name - holds the name of the property on entry and is unchanged.
- //
- // Scratch registers:
- //
- // r0 - used for the index into the property dictionary
- //
- // r1 - used to hold the capacity of the property dictionary.
- //
- // result - holds the result on exit.
-
- Label done;
-
- // Probe the dictionary.
- GenerateStringDictionaryProbes(masm,
- miss_label,
- &done,
- elements,
- name,
- r0,
- r1);
-
- // If probing finds an entry in the dictionary, r0 contains the
- // index into the dictionary. Check that the value is a normal
- // property.
- __ bind(&done);
- const int kElementsStartOffset =
- StringDictionary::kHeaderSize +
- StringDictionary::kElementsStartIndex * kPointerSize;
- const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
- __ test(Operand(elements, r0, times_4, kDetailsOffset - kHeapObjectTag),
- Immediate(PropertyDetails::TypeField::mask() << kSmiTagSize));
- __ j(not_zero, miss_label, not_taken);
-
- // Get the value at the masked, scaled index.
- const int kValueOffset = kElementsStartOffset + kPointerSize;
- __ mov(result, Operand(elements, r0, times_4, kValueOffset - kHeapObjectTag));
-}
-
-
-// Helper function used to store a property to a dictionary backing
-// storage. This function may fail to store a property eventhough it
-// is in the dictionary, so code at miss_label must always call a
-// backup property store that is complete. This function is safe to
-// call if name is not a symbol, and will jump to the miss_label in
-// that case. The generated code assumes that the receiver has slow
-// properties, is not a global object and does not have interceptors.
-static void GenerateDictionaryStore(MacroAssembler* masm,
- Label* miss_label,
- Register elements,
- Register name,
- Register value,
- Register r0,
- Register r1) {
- // Register use:
- //
- // elements - holds the property dictionary on entry and is clobbered.
- //
- // name - holds the name of the property on entry and is unchanged.
- //
- // value - holds the value to store and is unchanged.
- //
- // r0 - used for index into the property dictionary and is clobbered.
- //
- // r1 - used to hold the capacity of the property dictionary and is clobbered.
- Label done;
-
-
- // Probe the dictionary.
- GenerateStringDictionaryProbes(masm,
- miss_label,
- &done,
- elements,
- name,
- r0,
- r1);
-
- // If probing finds an entry in the dictionary, r0 contains the
- // index into the dictionary. Check that the value is a normal
- // property that is not read only.
- __ bind(&done);
- const int kElementsStartOffset =
- StringDictionary::kHeaderSize +
- StringDictionary::kElementsStartIndex * kPointerSize;
- const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
- const int kTypeAndReadOnlyMask
- = (PropertyDetails::TypeField::mask() |
- PropertyDetails::AttributesField::encode(READ_ONLY)) << kSmiTagSize;
- __ test(Operand(elements, r0, times_4, kDetailsOffset - kHeapObjectTag),
- Immediate(kTypeAndReadOnlyMask));
- __ j(not_zero, miss_label, not_taken);
-
- // Store the value at the masked, scaled index.
- const int kValueOffset = kElementsStartOffset + kPointerSize;
- __ lea(r0, Operand(elements, r0, times_4, kValueOffset - kHeapObjectTag));
- __ mov(Operand(r0, 0), value);
-
- // Update write barrier. Make sure not to clobber the value.
- __ mov(r1, value);
- __ RecordWrite(elements, r0, r1);
-}
-
-
-static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
- Label* miss,
- Register elements,
- Register key,
- Register r0,
- Register r1,
- Register r2,
- Register result) {
- // Register use:
- //
- // elements - holds the slow-case elements of the receiver and is unchanged.
- //
- // key - holds the smi key on entry and is unchanged.
- //
- // Scratch registers:
- //
- // r0 - holds the untagged key on entry and holds the hash once computed.
- //
- // r1 - used to hold the capacity mask of the dictionary
- //
- // r2 - used for the index into the dictionary.
- //
- // result - holds the result on exit if the load succeeds and we fall through.
-
- Label done;
-
- // Compute the hash code from the untagged key. This must be kept in sync
- // with ComputeIntegerHash in utils.h.
- //
- // hash = ~hash + (hash << 15);
- __ mov(r1, r0);
- __ not_(r0);
- __ shl(r1, 15);
- __ add(r0, Operand(r1));
- // hash = hash ^ (hash >> 12);
- __ mov(r1, r0);
- __ shr(r1, 12);
- __ xor_(r0, Operand(r1));
- // hash = hash + (hash << 2);
- __ lea(r0, Operand(r0, r0, times_4, 0));
- // hash = hash ^ (hash >> 4);
- __ mov(r1, r0);
- __ shr(r1, 4);
- __ xor_(r0, Operand(r1));
- // hash = hash * 2057;
- __ imul(r0, r0, 2057);
- // hash = hash ^ (hash >> 16);
- __ mov(r1, r0);
- __ shr(r1, 16);
- __ xor_(r0, Operand(r1));
-
- // Compute capacity mask.
- __ mov(r1, FieldOperand(elements, NumberDictionary::kCapacityOffset));
- __ shr(r1, kSmiTagSize); // convert smi to int
- __ dec(r1);
-
- // Generate an unrolled loop that performs a few probes before giving up.
- const int kProbes = 4;
- for (int i = 0; i < kProbes; i++) {
- // Use r2 for index calculations and keep the hash intact in r0.
- __ mov(r2, r0);
- // Compute the masked index: (hash + i + i * i) & mask.
- if (i > 0) {
- __ add(Operand(r2), Immediate(NumberDictionary::GetProbeOffset(i)));
- }
- __ and_(r2, Operand(r1));
-
- // Scale the index by multiplying by the entry size.
- ASSERT(NumberDictionary::kEntrySize == 3);
- __ lea(r2, Operand(r2, r2, times_2, 0)); // r2 = r2 * 3
-
- // Check if the key matches.
- __ cmp(key, FieldOperand(elements,
- r2,
- times_pointer_size,
- NumberDictionary::kElementsStartOffset));
- if (i != (kProbes - 1)) {
- __ j(equal, &done, taken);
- } else {
- __ j(not_equal, miss, not_taken);
- }
- }
-
- __ bind(&done);
- // Check that the value is a normal propety.
- const int kDetailsOffset =
- NumberDictionary::kElementsStartOffset + 2 * kPointerSize;
- ASSERT_EQ(NORMAL, 0);
- __ test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
- Immediate(PropertyDetails::TypeField::mask() << kSmiTagSize));
- __ j(not_zero, miss);
-
- // Get the value at the masked, scaled index.
- const int kValueOffset =
- NumberDictionary::kElementsStartOffset + kPointerSize;
- __ mov(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
-}
-
-
-// The offset from the inlined patch site to the start of the
-// inlined load instruction. It is 7 bytes (test eax, imm) plus
-// 6 bytes (jne slow_label).
-const int LoadIC::kOffsetToLoadInstruction = 13;
-
-
-void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : receiver
- // -- ecx : name
- // -- esp[0] : return address
- // -----------------------------------
- Label miss;
-
- StubCompiler::GenerateLoadArrayLength(masm, eax, edx, &miss);
- __ bind(&miss);
- StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
-}
-
-
-void LoadIC::GenerateStringLength(MacroAssembler* masm,
- bool support_wrappers) {
- // ----------- S t a t e -------------
- // -- eax : receiver
- // -- ecx : name
- // -- esp[0] : return address
- // -----------------------------------
- Label miss;
-
- StubCompiler::GenerateLoadStringLength(masm, eax, edx, ebx, &miss,
- support_wrappers);
- __ bind(&miss);
- StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
-}
-
-
-void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : receiver
- // -- ecx : name
- // -- esp[0] : return address
- // -----------------------------------
- Label miss;
-
- StubCompiler::GenerateLoadFunctionPrototype(masm, eax, edx, ebx, &miss);
- __ bind(&miss);
- StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
-}
-
-
-// Checks the receiver for special cases (value type, slow case bits).
-// Falls through for regular JS object.
-static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
- Register receiver,
- Register map,
- int interceptor_bit,
- Label* slow) {
- // Register use:
- // receiver - holds the receiver and is unchanged.
- // Scratch registers:
- // map - used to hold the map of the receiver.
-
- // Check that the object isn't a smi.
- __ test(receiver, Immediate(kSmiTagMask));
- __ j(zero, slow, not_taken);
-
- // Get the map of the receiver.
- __ mov(map, FieldOperand(receiver, HeapObject::kMapOffset));
-
- // Check bit field.
- __ test_b(FieldOperand(map, Map::kBitFieldOffset),
- (1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit));
- __ j(not_zero, slow, not_taken);
- // Check that the object is some kind of JS object EXCEPT JS Value type.
- // In the case that the object is a value-wrapper object,
- // we enter the runtime system to make sure that indexing
- // into string objects works as intended.
- ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
-
- __ CmpInstanceType(map, JS_OBJECT_TYPE);
- __ j(below, slow, not_taken);
-}
-
-
-// Loads an indexed element from a fast case array.
-// If not_fast_array is NULL, doesn't perform the elements map check.
-static void GenerateFastArrayLoad(MacroAssembler* masm,
- Register receiver,
- Register key,
- Register scratch,
- Register result,
- Label* not_fast_array,
- Label* out_of_range) {
- // Register use:
- // receiver - holds the receiver and is unchanged.
- // key - holds the key and is unchanged (must be a smi).
- // Scratch registers:
- // scratch - used to hold elements of the receiver and the loaded value.
- // result - holds the result on exit if the load succeeds and
- // we fall through.
-
- __ mov(scratch, FieldOperand(receiver, JSObject::kElementsOffset));
- if (not_fast_array != NULL) {
- // Check that the object is in fast mode and writable.
- __ CheckMap(scratch, FACTORY->fixed_array_map(), not_fast_array, true);
- } else {
- __ AssertFastElements(scratch);
- }
- // Check that the key (index) is within bounds.
- __ cmp(key, FieldOperand(scratch, FixedArray::kLengthOffset));
- __ j(above_equal, out_of_range);
- // Fast case: Do the load.
- ASSERT((kPointerSize == 4) && (kSmiTagSize == 1) && (kSmiTag == 0));
- __ mov(scratch, FieldOperand(scratch, key, times_2, FixedArray::kHeaderSize));
- __ cmp(Operand(scratch), Immediate(FACTORY->the_hole_value()));
- // In case the loaded value is the_hole we have to consult GetProperty
- // to ensure the prototype chain is searched.
- __ j(equal, out_of_range);
- if (!result.is(scratch)) {
- __ mov(result, scratch);
- }
-}
-
-
-// Checks whether a key is an array index string or a symbol string.
-// Falls through if the key is a symbol.
-static void GenerateKeyStringCheck(MacroAssembler* masm,
- Register key,
- Register map,
- Register hash,
- Label* index_string,
- Label* not_symbol) {
- // Register use:
- // key - holds the key and is unchanged. Assumed to be non-smi.
- // Scratch registers:
- // map - used to hold the map of the key.
- // hash - used to hold the hash of the key.
- __ CmpObjectType(key, FIRST_NONSTRING_TYPE, map);
- __ j(above_equal, not_symbol);
-
- // Is the string an array index, with cached numeric value?
- __ mov(hash, FieldOperand(key, String::kHashFieldOffset));
- __ test(hash, Immediate(String::kContainsCachedArrayIndexMask));
- __ j(zero, index_string, not_taken);
-
- // Is the string a symbol?
- ASSERT(kSymbolTag != 0);
- __ test_b(FieldOperand(map, Map::kInstanceTypeOffset), kIsSymbolMask);
- __ j(zero, not_symbol, not_taken);
-}
-
-
-void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label slow, check_string, index_smi, index_string, property_array_property;
- Label probe_dictionary, check_number_dictionary;
-
- // Check that the key is a smi.
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &check_string, not_taken);
- __ bind(&index_smi);
- // Now the key is known to be a smi. This place is also jumped to from
- // where a numeric string is converted to a smi.
-
- GenerateKeyedLoadReceiverCheck(
- masm, edx, ecx, Map::kHasIndexedInterceptor, &slow);
-
- // Check the "has fast elements" bit in the receiver's map which is
- // now in ecx.
- __ test_b(FieldOperand(ecx, Map::kBitField2Offset),
- 1 << Map::kHasFastElements);
- __ j(zero, &check_number_dictionary, not_taken);
-
- GenerateFastArrayLoad(masm,
- edx,
- eax,
- ecx,
- eax,
- NULL,
- &slow);
- Isolate* isolate = masm->isolate();
- Counters* counters = isolate->counters();
- __ IncrementCounter(counters->keyed_load_generic_smi(), 1);
- __ ret(0);
-
- __ bind(&check_number_dictionary);
- __ mov(ebx, eax);
- __ SmiUntag(ebx);
- __ mov(ecx, FieldOperand(edx, JSObject::kElementsOffset));
-
- // Check whether the elements is a number dictionary.
- // edx: receiver
- // ebx: untagged index
- // eax: key
- // ecx: elements
- __ CheckMap(ecx, isolate->factory()->hash_table_map(), &slow, true);
- Label slow_pop_receiver;
- // Push receiver on the stack to free up a register for the dictionary
- // probing.
- __ push(edx);
- GenerateNumberDictionaryLoad(masm,
- &slow_pop_receiver,
- ecx,
- eax,
- ebx,
- edx,
- edi,
- eax);
- // Pop receiver before returning.
- __ pop(edx);
- __ ret(0);
-
- __ bind(&slow_pop_receiver);
- // Pop the receiver from the stack and jump to runtime.
- __ pop(edx);
-
- __ bind(&slow);
- // Slow case: jump to runtime.
- // edx: receiver
- // eax: key
- __ IncrementCounter(counters->keyed_load_generic_slow(), 1);
- GenerateRuntimeGetProperty(masm);
-
- __ bind(&check_string);
- GenerateKeyStringCheck(masm, eax, ecx, ebx, &index_string, &slow);
-
- GenerateKeyedLoadReceiverCheck(
- masm, edx, ecx, Map::kHasNamedInterceptor, &slow);
-
- // If the receiver is a fast-case object, check the keyed lookup
- // cache. Otherwise probe the dictionary.
- __ mov(ebx, FieldOperand(edx, JSObject::kPropertiesOffset));
- __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
- Immediate(isolate->factory()->hash_table_map()));
- __ j(equal, &probe_dictionary);
-
- // Load the map of the receiver, compute the keyed lookup cache hash
- // based on 32 bits of the map pointer and the string hash.
- __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
- __ mov(ecx, ebx);
- __ shr(ecx, KeyedLookupCache::kMapHashShift);
- __ mov(edi, FieldOperand(eax, String::kHashFieldOffset));
- __ shr(edi, String::kHashShift);
- __ xor_(ecx, Operand(edi));
- __ and_(ecx, KeyedLookupCache::kCapacityMask);
-
- // Load the key (consisting of map and symbol) from the cache and
- // check for match.
- ExternalReference cache_keys =
- ExternalReference::keyed_lookup_cache_keys(masm->isolate());
- __ mov(edi, ecx);
- __ shl(edi, kPointerSizeLog2 + 1);
- __ cmp(ebx, Operand::StaticArray(edi, times_1, cache_keys));
- __ j(not_equal, &slow);
- __ add(Operand(edi), Immediate(kPointerSize));
- __ cmp(eax, Operand::StaticArray(edi, times_1, cache_keys));
- __ j(not_equal, &slow);
-
- // Get field offset.
- // edx : receiver
- // ebx : receiver's map
- // eax : key
- // ecx : lookup cache index
- ExternalReference cache_field_offsets =
- ExternalReference::keyed_lookup_cache_field_offsets(masm->isolate());
- __ mov(edi,
- Operand::StaticArray(ecx, times_pointer_size, cache_field_offsets));
- __ movzx_b(ecx, FieldOperand(ebx, Map::kInObjectPropertiesOffset));
- __ sub(edi, Operand(ecx));
- __ j(above_equal, &property_array_property);
-
- // Load in-object property.
- __ movzx_b(ecx, FieldOperand(ebx, Map::kInstanceSizeOffset));
- __ add(ecx, Operand(edi));
- __ mov(eax, FieldOperand(edx, ecx, times_pointer_size, 0));
- __ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1);
- __ ret(0);
-
- // Load property array property.
- __ bind(&property_array_property);
- __ mov(eax, FieldOperand(edx, JSObject::kPropertiesOffset));
- __ mov(eax, FieldOperand(eax, edi, times_pointer_size,
- FixedArray::kHeaderSize));
- __ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1);
- __ ret(0);
-
- // Do a quick inline probe of the receiver's dictionary, if it
- // exists.
- __ bind(&probe_dictionary);
-
- __ mov(ecx, FieldOperand(edx, JSObject::kMapOffset));
- __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
- GenerateGlobalInstanceTypeCheck(masm, ecx, &slow);
-
- GenerateDictionaryLoad(masm, &slow, ebx, eax, ecx, edi, eax);
- __ IncrementCounter(counters->keyed_load_generic_symbol(), 1);
- __ ret(0);
-
- __ bind(&index_string);
- __ IndexFromHash(ebx, eax);
- // Now jump to the place where smi keys are handled.
- __ jmp(&index_smi);
-}
-
-
-void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : key (index)
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss;
-
- Register receiver = edx;
- Register index = eax;
- Register scratch1 = ebx;
- Register scratch2 = ecx;
- Register result = eax;
-
- StringCharAtGenerator char_at_generator(receiver,
- index,
- scratch1,
- scratch2,
- result,
- &miss, // When not a string.
- &miss, // When not a number.
- &miss, // When index out of range.
- STRING_INDEX_IS_ARRAY_INDEX);
- char_at_generator.GenerateFast(masm);
- __ ret(0);
-
- StubRuntimeCallHelper call_helper;
- char_at_generator.GenerateSlow(masm, call_helper);
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label slow;
-
- // Check that the receiver isn't a smi.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &slow, not_taken);
-
- // Check that the key is an array index, that is Uint32.
- __ test(eax, Immediate(kSmiTagMask | kSmiSignMask));
- __ j(not_zero, &slow, not_taken);
-
- // Get the map of the receiver.
- __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
-
- // Check that it has indexed interceptor and access checks
- // are not enabled for this object.
- __ movzx_b(ecx, FieldOperand(ecx, Map::kBitFieldOffset));
- __ and_(Operand(ecx), Immediate(kSlowCaseBitFieldMask));
- __ cmp(Operand(ecx), Immediate(1 << Map::kHasIndexedInterceptor));
- __ j(not_zero, &slow, not_taken);
-
- // Everything is fine, call runtime.
- __ pop(ecx);
- __ push(edx); // receiver
- __ push(eax); // key
- __ push(ecx); // return address
-
- // Perform tail call to the entry.
- ExternalReference ref =
- ExternalReference(IC_Utility(kKeyedLoadPropertyWithInterceptor),
- masm->isolate());
- __ TailCallExternalReference(ref, 2, 1);
-
- __ bind(&slow);
- GenerateMiss(masm);
-}
-
-
-void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label slow, fast, array, extra;
-
- // Check that the object isn't a smi.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &slow, not_taken);
- // Get the map from the receiver.
- __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
- // Check that the receiver does not require access checks. We need
- // to do this because this generic stub does not perform map checks.
- __ test_b(FieldOperand(edi, Map::kBitFieldOffset),
- 1 << Map::kIsAccessCheckNeeded);
- __ j(not_zero, &slow, not_taken);
- // Check that the key is a smi.
- __ test(ecx, Immediate(kSmiTagMask));
- __ j(not_zero, &slow, not_taken);
- __ CmpInstanceType(edi, JS_ARRAY_TYPE);
- __ j(equal, &array);
- // Check that the object is some kind of JS object.
- __ CmpInstanceType(edi, FIRST_JS_OBJECT_TYPE);
- __ j(below, &slow, not_taken);
-
- // Object case: Check key against length in the elements array.
- // eax: value
- // edx: JSObject
- // ecx: key (a smi)
- __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
- // Check that the object is in fast mode and writable.
- __ CheckMap(edi, FACTORY->fixed_array_map(), &slow, true);
- __ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset));
- __ j(below, &fast, taken);
-
- // Slow case: call runtime.
- __ bind(&slow);
- GenerateRuntimeSetProperty(masm, strict_mode);
-
- // Extra capacity case: Check if there is extra capacity to
- // perform the store and update the length. Used for adding one
- // element to the array by writing to array[array.length].
- __ bind(&extra);
- // eax: value
- // edx: receiver, a JSArray
- // ecx: key, a smi.
- // edi: receiver->elements, a FixedArray
- // flags: compare (ecx, edx.length())
- __ j(not_equal, &slow, not_taken); // do not leave holes in the array
- __ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset));
- __ j(above_equal, &slow, not_taken);
- // Add 1 to receiver->length, and go to fast array write.
- __ add(FieldOperand(edx, JSArray::kLengthOffset),
- Immediate(Smi::FromInt(1)));
- __ jmp(&fast);
-
- // Array case: Get the length and the elements array from the JS
- // array. Check that the array is in fast mode (and writable); if it
- // is the length is always a smi.
- __ bind(&array);
- // eax: value
- // edx: receiver, a JSArray
- // ecx: key, a smi.
- __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
- __ CheckMap(edi, FACTORY->fixed_array_map(), &slow, true);
-
- // Check the key against the length in the array, compute the
- // address to store into and fall through to fast case.
- __ cmp(ecx, FieldOperand(edx, JSArray::kLengthOffset)); // Compare smis.
- __ j(above_equal, &extra, not_taken);
-
- // Fast case: Do the store.
- __ bind(&fast);
- // eax: value
- // ecx: key (a smi)
- // edx: receiver
- // edi: FixedArray receiver->elements
- __ mov(CodeGenerator::FixedArrayElementOperand(edi, ecx), eax);
- // Update write barrier for the elements array address.
- __ mov(edx, Operand(eax));
- __ RecordWrite(edi, 0, edx, ecx);
- __ ret(0);
-}
-
-
-// The generated code does not accept smi keys.
-// The generated code falls through if both probes miss.
-static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
- int argc,
- Code::Kind kind) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- edx : receiver
- // -----------------------------------
- Label number, non_number, non_string, boolean, probe, miss;
-
- // Probe the stub cache.
- Code::Flags flags = Code::ComputeFlags(kind,
- NOT_IN_LOOP,
- MONOMORPHIC,
- Code::kNoExtraICState,
- NORMAL,
- argc);
- Isolate::Current()->stub_cache()->GenerateProbe(masm, flags, edx, ecx, ebx,
- eax);
-
- // If the stub cache probing failed, the receiver might be a value.
- // For value objects, we use the map of the prototype objects for
- // the corresponding JSValue for the cache and that is what we need
- // to probe.
- //
- // Check for number.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &number, not_taken);
- __ CmpObjectType(edx, HEAP_NUMBER_TYPE, ebx);
- __ j(not_equal, &non_number, taken);
- __ bind(&number);
- StubCompiler::GenerateLoadGlobalFunctionPrototype(
- masm, Context::NUMBER_FUNCTION_INDEX, edx);
- __ jmp(&probe);
-
- // Check for string.
- __ bind(&non_number);
- __ CmpInstanceType(ebx, FIRST_NONSTRING_TYPE);
- __ j(above_equal, &non_string, taken);
- StubCompiler::GenerateLoadGlobalFunctionPrototype(
- masm, Context::STRING_FUNCTION_INDEX, edx);
- __ jmp(&probe);
-
- // Check for boolean.
- __ bind(&non_string);
- __ cmp(edx, FACTORY->true_value());
- __ j(equal, &boolean, not_taken);
- __ cmp(edx, FACTORY->false_value());
- __ j(not_equal, &miss, taken);
- __ bind(&boolean);
- StubCompiler::GenerateLoadGlobalFunctionPrototype(
- masm, Context::BOOLEAN_FUNCTION_INDEX, edx);
-
- // Probe the stub cache for the value object.
- __ bind(&probe);
- Isolate::Current()->stub_cache()->GenerateProbe(masm, flags, edx, ecx, ebx,
- no_reg);
- __ bind(&miss);
-}
-
-
-static void GenerateFunctionTailCall(MacroAssembler* masm,
- int argc,
- Label* miss) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- edi : function
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
-
- // Check that the result is not a smi.
- __ test(edi, Immediate(kSmiTagMask));
- __ j(zero, miss, not_taken);
-
- // Check that the value is a JavaScript function, fetching its map into eax.
- __ CmpObjectType(edi, JS_FUNCTION_TYPE, eax);
- __ j(not_equal, miss, not_taken);
-
- // Invoke the function.
- ParameterCount actual(argc);
- __ InvokeFunction(edi, actual, JUMP_FUNCTION);
-}
-
-// The generated code falls through if the call should be handled by runtime.
-static void GenerateCallNormal(MacroAssembler* masm, int argc) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
- Label miss;
-
- // Get the receiver of the function from the stack; 1 ~ return address.
- __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
-
- GenerateStringDictionaryReceiverCheck(masm, edx, eax, ebx, &miss);
-
- // eax: elements
- // Search the dictionary placing the result in edi.
- GenerateDictionaryLoad(masm, &miss, eax, ecx, edi, ebx, edi);
- GenerateFunctionTailCall(masm, argc, &miss);
-
- __ bind(&miss);
-}
-
-
-static void GenerateCallMiss(MacroAssembler* masm,
- int argc,
- IC::UtilityId id) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
-
- Counters* counters = masm->isolate()->counters();
- if (id == IC::kCallIC_Miss) {
- __ IncrementCounter(counters->call_miss(), 1);
- } else {
- __ IncrementCounter(counters->keyed_call_miss(), 1);
- }
-
- // Get the receiver of the function from the stack; 1 ~ return address.
- __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
-
- // Enter an internal frame.
- __ EnterInternalFrame();
-
- // Push the receiver and the name of the function.
- __ push(edx);
- __ push(ecx);
-
- // Call the entry.
- CEntryStub stub(1);
- __ mov(eax, Immediate(2));
- __ mov(ebx, Immediate(ExternalReference(IC_Utility(id), masm->isolate())));
- __ CallStub(&stub);
-
- // Move result to edi and exit the internal frame.
- __ mov(edi, eax);
- __ LeaveInternalFrame();
-
- // Check if the receiver is a global object of some sort.
- // This can happen only for regular CallIC but not KeyedCallIC.
- if (id == IC::kCallIC_Miss) {
- Label invoke, global;
- __ mov(edx, Operand(esp, (argc + 1) * kPointerSize)); // receiver
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &invoke, not_taken);
- __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
- __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
- __ cmp(ebx, JS_GLOBAL_OBJECT_TYPE);
- __ j(equal, &global);
- __ cmp(ebx, JS_BUILTINS_OBJECT_TYPE);
- __ j(not_equal, &invoke);
-
- // Patch the receiver on the stack.
- __ bind(&global);
- __ mov(edx, FieldOperand(edx, GlobalObject::kGlobalReceiverOffset));
- __ mov(Operand(esp, (argc + 1) * kPointerSize), edx);
- __ bind(&invoke);
- }
-
- // Invoke the function.
- ParameterCount actual(argc);
- __ InvokeFunction(edi, actual, JUMP_FUNCTION);
-}
-
-
-void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
-
- // Get the receiver of the function from the stack; 1 ~ return address.
- __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
- GenerateMonomorphicCacheProbe(masm, argc, Code::CALL_IC);
- GenerateMiss(masm, argc);
-}
-
-
-void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
-
- GenerateCallNormal(masm, argc);
- GenerateMiss(masm, argc);
-}
-
-
-void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
-
- GenerateCallMiss(masm, argc, IC::kCallIC_Miss);
-}
-
-
-void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
-
- // Get the receiver of the function from the stack; 1 ~ return address.
- __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
-
- Label do_call, slow_call, slow_load, slow_reload_receiver;
- Label check_number_dictionary, check_string, lookup_monomorphic_cache;
- Label index_smi, index_string;
-
- // Check that the key is a smi.
- __ test(ecx, Immediate(kSmiTagMask));
- __ j(not_zero, &check_string, not_taken);
-
- __ bind(&index_smi);
- // Now the key is known to be a smi. This place is also jumped to from
- // where a numeric string is converted to a smi.
-
- GenerateKeyedLoadReceiverCheck(
- masm, edx, eax, Map::kHasIndexedInterceptor, &slow_call);
-
- GenerateFastArrayLoad(
- masm, edx, ecx, eax, edi, &check_number_dictionary, &slow_load);
- Isolate* isolate = masm->isolate();
- Counters* counters = isolate->counters();
- __ IncrementCounter(counters->keyed_call_generic_smi_fast(), 1);
-
- __ bind(&do_call);
- // receiver in edx is not used after this point.
- // ecx: key
- // edi: function
- GenerateFunctionTailCall(masm, argc, &slow_call);
-
- __ bind(&check_number_dictionary);
- // eax: elements
- // ecx: smi key
- // Check whether the elements is a number dictionary.
- __ CheckMap(eax, isolate->factory()->hash_table_map(), &slow_load, true);
- __ mov(ebx, ecx);
- __ SmiUntag(ebx);
- // ebx: untagged index
- // Receiver in edx will be clobbered, need to reload it on miss.
- GenerateNumberDictionaryLoad(
- masm, &slow_reload_receiver, eax, ecx, ebx, edx, edi, edi);
- __ IncrementCounter(counters->keyed_call_generic_smi_dict(), 1);
- __ jmp(&do_call);
-
- __ bind(&slow_reload_receiver);
- __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
-
- __ bind(&slow_load);
- // This branch is taken when calling KeyedCallIC_Miss is neither required
- // nor beneficial.
- __ IncrementCounter(counters->keyed_call_generic_slow_load(), 1);
- __ EnterInternalFrame();
- __ push(ecx); // save the key
- __ push(edx); // pass the receiver
- __ push(ecx); // pass the key
- __ CallRuntime(Runtime::kKeyedGetProperty, 2);
- __ pop(ecx); // restore the key
- __ LeaveInternalFrame();
- __ mov(edi, eax);
- __ jmp(&do_call);
-
- __ bind(&check_string);
- GenerateKeyStringCheck(masm, ecx, eax, ebx, &index_string, &slow_call);
-
- // The key is known to be a symbol.
- // If the receiver is a regular JS object with slow properties then do
- // a quick inline probe of the receiver's dictionary.
- // Otherwise do the monomorphic cache probe.
- GenerateKeyedLoadReceiverCheck(
- masm, edx, eax, Map::kHasNamedInterceptor, &lookup_monomorphic_cache);
-
- __ mov(ebx, FieldOperand(edx, JSObject::kPropertiesOffset));
- __ CheckMap(ebx,
- isolate->factory()->hash_table_map(),
- &lookup_monomorphic_cache,
- true);
-
- GenerateDictionaryLoad(masm, &slow_load, ebx, ecx, eax, edi, edi);
- __ IncrementCounter(counters->keyed_call_generic_lookup_dict(), 1);
- __ jmp(&do_call);
-
- __ bind(&lookup_monomorphic_cache);
- __ IncrementCounter(counters->keyed_call_generic_lookup_cache(), 1);
- GenerateMonomorphicCacheProbe(masm, argc, Code::KEYED_CALL_IC);
- // Fall through on miss.
-
- __ bind(&slow_call);
- // This branch is taken if:
- // - the receiver requires boxing or access check,
- // - the key is neither smi nor symbol,
- // - the value loaded is not a function,
- // - there is hope that the runtime will create a monomorphic call stub
- // that will get fetched next time.
- __ IncrementCounter(counters->keyed_call_generic_slow(), 1);
- GenerateMiss(masm, argc);
-
- __ bind(&index_string);
- __ IndexFromHash(ebx, ecx);
- // Now jump to the place where smi keys are handled.
- __ jmp(&index_smi);
-}
-
-
-void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
-
- // Check if the name is a string.
- Label miss;
- __ test(ecx, Immediate(kSmiTagMask));
- __ j(zero, &miss);
- Condition cond = masm->IsObjectStringType(ecx, eax, eax);
- __ j(NegateCondition(cond), &miss);
- GenerateCallNormal(masm, argc);
- __ bind(&miss);
- GenerateMiss(masm, argc);
-}
-
-
-void KeyedCallIC::GenerateMiss(MacroAssembler* masm, int argc) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
-
- GenerateCallMiss(masm, argc, IC::kKeyedCallIC_Miss);
-}
-
-
-void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : receiver
- // -- ecx : name
- // -- esp[0] : return address
- // -----------------------------------
-
- // Probe the stub cache.
- Code::Flags flags = Code::ComputeFlags(Code::LOAD_IC,
- NOT_IN_LOOP,
- MONOMORPHIC);
- Isolate::Current()->stub_cache()->GenerateProbe(masm, flags, eax, ecx, ebx,
- edx);
-
- // Cache miss: Jump to runtime.
- GenerateMiss(masm);
-}
-
-
-void LoadIC::GenerateNormal(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : receiver
- // -- ecx : name
- // -- esp[0] : return address
- // -----------------------------------
- Label miss;
-
- GenerateStringDictionaryReceiverCheck(masm, eax, edx, ebx, &miss);
-
- // edx: elements
- // Search the dictionary placing the result in eax.
- GenerateDictionaryLoad(masm, &miss, edx, ecx, edi, ebx, eax);
- __ ret(0);
-
- // Cache miss: Jump to runtime.
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void LoadIC::GenerateMiss(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : receiver
- // -- ecx : name
- // -- esp[0] : return address
- // -----------------------------------
-
- __ IncrementCounter(masm->isolate()->counters()->load_miss(), 1);
-
- __ pop(ebx);
- __ push(eax); // receiver
- __ push(ecx); // name
- __ push(ebx); // return address
-
- // Perform tail call to the entry.
- ExternalReference ref =
- ExternalReference(IC_Utility(kLoadIC_Miss), masm->isolate());
- __ TailCallExternalReference(ref, 2, 1);
-}
-
-
-bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
- if (V8::UseCrankshaft()) return false;
-
- // The address of the instruction following the call.
- Address test_instruction_address =
- address + Assembler::kCallTargetAddressOffset;
- // If the instruction following the call is not a test eax, nothing
- // was inlined.
- if (*test_instruction_address != Assembler::kTestEaxByte) return false;
-
- Address delta_address = test_instruction_address + 1;
- // The delta to the start of the map check instruction.
- int delta = *reinterpret_cast<int*>(delta_address);
-
- // The map address is the last 4 bytes of the 7-byte
- // operand-immediate compare instruction, so we add 3 to get the
- // offset to the last 4 bytes.
- Address map_address = test_instruction_address + delta + 3;
- *(reinterpret_cast<Object**>(map_address)) = map;
-
- // The offset is in the last 4 bytes of a six byte
- // memory-to-register move instruction, so we add 2 to get the
- // offset to the last 4 bytes.
- Address offset_address =
- test_instruction_address + delta + kOffsetToLoadInstruction + 2;
- *reinterpret_cast<int*>(offset_address) = offset - kHeapObjectTag;
- return true;
-}
-
-
-// One byte opcode for mov ecx,0xXXXXXXXX.
-// Marks inlined contextual loads using all kinds of cells. Generated
-// code has the hole check:
-// mov reg, <cell>
-// mov reg, (<cell>, value offset)
-// cmp reg, <the hole>
-// je slow
-// ;; use reg
-static const byte kMovEcxByte = 0xB9;
-
-// One byte opcode for mov edx,0xXXXXXXXX.
-// Marks inlined contextual loads using only "don't delete"
-// cells. Generated code doesn't have the hole check:
-// mov reg, <cell>
-// mov reg, (<cell>, value offset)
-// ;; use reg
-static const byte kMovEdxByte = 0xBA;
-
-bool LoadIC::PatchInlinedContextualLoad(Address address,
- Object* map,
- Object* cell,
- bool is_dont_delete) {
- if (V8::UseCrankshaft()) return false;
-
- // The address of the instruction following the call.
- Address mov_instruction_address =
- address + Assembler::kCallTargetAddressOffset;
- // If the instruction following the call is not a mov ecx/edx,
- // nothing was inlined.
- byte b = *mov_instruction_address;
- if (b != kMovEcxByte && b != kMovEdxByte) return false;
- // If we don't have the hole check generated, we can only support
- // "don't delete" cells.
- if (b == kMovEdxByte && !is_dont_delete) return false;
-
- Address delta_address = mov_instruction_address + 1;
- // The delta to the start of the map check instruction.
- int delta = *reinterpret_cast<int*>(delta_address);
-
- // The map address is the last 4 bytes of the 7-byte
- // operand-immediate compare instruction, so we add 3 to get the
- // offset to the last 4 bytes.
- Address map_address = mov_instruction_address + delta + 3;
- *(reinterpret_cast<Object**>(map_address)) = map;
-
- // The cell is in the last 4 bytes of a five byte mov reg, imm32
- // instruction, so we add 1 to get the offset to the last 4 bytes.
- Address offset_address =
- mov_instruction_address + delta + kOffsetToLoadInstruction + 1;
- *reinterpret_cast<Object**>(offset_address) = cell;
- return true;
-}
-
-
-bool StoreIC::PatchInlinedStore(Address address, Object* map, int offset) {
- if (V8::UseCrankshaft()) return false;
-
- // The address of the instruction following the call.
- Address test_instruction_address =
- address + Assembler::kCallTargetAddressOffset;
-
- // If the instruction following the call is not a test eax, nothing
- // was inlined.
- if (*test_instruction_address != Assembler::kTestEaxByte) return false;
-
- // Extract the encoded deltas from the test eax instruction.
- Address encoded_offsets_address = test_instruction_address + 1;
- int encoded_offsets = *reinterpret_cast<int*>(encoded_offsets_address);
- int delta_to_map_check = -(encoded_offsets & 0xFFFF);
- int delta_to_record_write = encoded_offsets >> 16;
-
- // Patch the map to check. The map address is the last 4 bytes of
- // the 7-byte operand-immediate compare instruction.
- Address map_check_address = test_instruction_address + delta_to_map_check;
- Address map_address = map_check_address + 3;
- *(reinterpret_cast<Object**>(map_address)) = map;
-
- // Patch the offset in the store instruction. The offset is in the
- // last 4 bytes of a six byte register-to-memory move instruction.
- Address offset_address =
- map_check_address + StoreIC::kOffsetToStoreInstruction + 2;
- // The offset should have initial value (kMaxInt - 1), cleared value
- // (-1) or we should be clearing the inlined version.
- ASSERT(*reinterpret_cast<int*>(offset_address) == kMaxInt - 1 ||
- *reinterpret_cast<int*>(offset_address) == -1 ||
- (offset == 0 && map == HEAP->null_value()));
- *reinterpret_cast<int*>(offset_address) = offset - kHeapObjectTag;
-
- // Patch the offset in the write-barrier code. The offset is the
- // last 4 bytes of a six byte lea instruction.
- offset_address = map_check_address + delta_to_record_write + 2;
- // The offset should have initial value (kMaxInt), cleared value
- // (-1) or we should be clearing the inlined version.
- ASSERT(*reinterpret_cast<int*>(offset_address) == kMaxInt ||
- *reinterpret_cast<int*>(offset_address) == -1 ||
- (offset == 0 && map == HEAP->null_value()));
- *reinterpret_cast<int*>(offset_address) = offset - kHeapObjectTag;
-
- return true;
-}
-
-
-static bool PatchInlinedMapCheck(Address address, Object* map) {
- if (V8::UseCrankshaft()) return false;
-
- Address test_instruction_address =
- address + Assembler::kCallTargetAddressOffset;
- // The keyed load has a fast inlined case if the IC call instruction
- // is immediately followed by a test instruction.
- if (*test_instruction_address != Assembler::kTestEaxByte) return false;
-
- // Fetch the offset from the test instruction to the map cmp
- // instruction. This offset is stored in the last 4 bytes of the 5
- // byte test instruction.
- Address delta_address = test_instruction_address + 1;
- int delta = *reinterpret_cast<int*>(delta_address);
- // Compute the map address. The map address is in the last 4 bytes
- // of the 7-byte operand-immediate compare instruction, so we add 3
- // to the offset to get the map address.
- Address map_address = test_instruction_address + delta + 3;
- // Patch the map check.
- *(reinterpret_cast<Object**>(map_address)) = map;
- return true;
-}
-
-
-bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
- return PatchInlinedMapCheck(address, map);
-}
-
-
-bool KeyedStoreIC::PatchInlinedStore(Address address, Object* map) {
- return PatchInlinedMapCheck(address, map);
-}
-
-
-void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
-
- __ IncrementCounter(masm->isolate()->counters()->keyed_load_miss(), 1);
-
- __ pop(ebx);
- __ push(edx); // receiver
- __ push(eax); // name
- __ push(ebx); // return address
-
- // Perform tail call to the entry.
- ExternalReference ref =
- ExternalReference(IC_Utility(kKeyedLoadIC_Miss), masm->isolate());
- __ TailCallExternalReference(ref, 2, 1);
-}
-
-
-void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
-
- __ pop(ebx);
- __ push(edx); // receiver
- __ push(eax); // name
- __ push(ebx); // return address
-
- // Perform tail call to the entry.
- __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
-}
-
-
-void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : name
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
-
- Code::Flags flags = Code::ComputeFlags(Code::STORE_IC,
- NOT_IN_LOOP,
- MONOMORPHIC,
- strict_mode);
- Isolate::Current()->stub_cache()->GenerateProbe(masm, flags, edx, ecx, ebx,
- no_reg);
-
- // Cache miss: Jump to runtime.
- GenerateMiss(masm);
-}
-
-
-void StoreIC::GenerateMiss(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : name
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
-
- __ pop(ebx);
- __ push(edx);
- __ push(ecx);
- __ push(eax);
- __ push(ebx);
-
- // Perform tail call to the entry.
- ExternalReference ref =
- ExternalReference(IC_Utility(kStoreIC_Miss), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
-}
-
-
-// The offset from the inlined patch site to the start of the inlined
-// store instruction. It is 7 bytes (test reg, imm) plus 6 bytes (jne
-// slow_label).
-const int StoreIC::kOffsetToStoreInstruction = 13;
-
-
-void StoreIC::GenerateArrayLength(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : name
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- //
- // This accepts as a receiver anything JSObject::SetElementsLength accepts
- // (currently anything except for external arrays which means anything with
- // elements of FixedArray type.), but currently is restricted to JSArray.
- // Value must be a number, but only smis are accepted as the most common case.
-
- Label miss;
-
- Register receiver = edx;
- Register value = eax;
- Register scratch = ebx;
-
- // Check that the receiver isn't a smi.
- __ test(receiver, Immediate(kSmiTagMask));
- __ j(zero, &miss, not_taken);
-
- // Check that the object is a JS array.
- __ CmpObjectType(receiver, JS_ARRAY_TYPE, scratch);
- __ j(not_equal, &miss, not_taken);
-
- // Check that elements are FixedArray.
- // We rely on StoreIC_ArrayLength below to deal with all types of
- // fast elements (including COW).
- __ mov(scratch, FieldOperand(receiver, JSArray::kElementsOffset));
- __ CmpObjectType(scratch, FIXED_ARRAY_TYPE, scratch);
- __ j(not_equal, &miss, not_taken);
-
- // Check that value is a smi.
- __ test(value, Immediate(kSmiTagMask));
- __ j(not_zero, &miss, not_taken);
-
- // Prepare tail call to StoreIC_ArrayLength.
- __ pop(scratch);
- __ push(receiver);
- __ push(value);
- __ push(scratch); // return address
-
- ExternalReference ref =
- ExternalReference(IC_Utility(kStoreIC_ArrayLength), masm->isolate());
- __ TailCallExternalReference(ref, 2, 1);
-
- __ bind(&miss);
-
- GenerateMiss(masm);
-}
-
-
-void StoreIC::GenerateNormal(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : name
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
-
- Label miss, restore_miss;
-
- GenerateStringDictionaryReceiverCheck(masm, edx, ebx, edi, &miss);
-
- // A lot of registers are needed for storing to slow case
- // objects. Push and restore receiver but rely on
- // GenerateDictionaryStore preserving the value and name.
- __ push(edx);
- GenerateDictionaryStore(masm, &restore_miss, ebx, ecx, eax, edx, edi);
- __ Drop(1);
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->store_normal_hit(), 1);
- __ ret(0);
-
- __ bind(&restore_miss);
- __ pop(edx);
-
- __ bind(&miss);
- __ IncrementCounter(counters->store_normal_miss(), 1);
- GenerateMiss(masm);
-}
-
-
-void StoreIC::GenerateGlobalProxy(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : name
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- __ pop(ebx);
- __ push(edx);
- __ push(ecx);
- __ push(eax);
- __ push(Immediate(Smi::FromInt(NONE))); // PropertyAttributes
- __ push(Immediate(Smi::FromInt(strict_mode)));
- __ push(ebx); // return address
-
- // Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
-}
-
-
-void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
-
- __ pop(ebx);
- __ push(edx);
- __ push(ecx);
- __ push(eax);
- __ push(Immediate(Smi::FromInt(NONE))); // PropertyAttributes
- __ push(Immediate(Smi::FromInt(strict_mode))); // Strict mode.
- __ push(ebx); // return address
-
- // Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
-}
-
-
-void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
-
- __ pop(ebx);
- __ push(edx);
- __ push(ecx);
- __ push(eax);
- __ push(ebx);
-
- // Do tail-call to runtime routine.
- ExternalReference ref =
- ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
-}
-
-
-#undef __
-
-
-Condition CompareIC::ComputeCondition(Token::Value op) {
- switch (op) {
- case Token::EQ_STRICT:
- case Token::EQ:
- return equal;
- case Token::LT:
- return less;
- case Token::GT:
- // Reverse left and right operands to obtain ECMA-262 conversion order.
- return less;
- case Token::LTE:
- // Reverse left and right operands to obtain ECMA-262 conversion order.
- return greater_equal;
- case Token::GTE:
- return greater_equal;
- default:
- UNREACHABLE();
- return no_condition;
- }
-}
-
-
-static bool HasInlinedSmiCode(Address address) {
- // The address of the instruction following the call.
- Address test_instruction_address =
- address + Assembler::kCallTargetAddressOffset;
-
- // If the instruction following the call is not a test al, nothing
- // was inlined.
- return *test_instruction_address == Assembler::kTestAlByte;
-}
-
-
-void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
- HandleScope scope;
- Handle<Code> rewritten;
- State previous_state = GetState();
-
- State state = TargetState(previous_state, HasInlinedSmiCode(address()), x, y);
- if (state == GENERIC) {
- CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS);
- rewritten = stub.GetCode();
- } else {
- ICCompareStub stub(op_, state);
- rewritten = stub.GetCode();
- }
- set_target(*rewritten);
-
-#ifdef DEBUG
- if (FLAG_trace_ic) {
- PrintF("[CompareIC (%s->%s)#%s]\n",
- GetStateName(previous_state),
- GetStateName(state),
- Token::Name(op_));
- }
-#endif
-
- // Activate inlined smi code.
- if (previous_state == UNINITIALIZED) {
- PatchInlinedSmiCode(address());
- }
-}
-
-
-void PatchInlinedSmiCode(Address address) {
- // The address of the instruction following the call.
- Address test_instruction_address =
- address + Assembler::kCallTargetAddressOffset;
-
- // If the instruction following the call is not a test al, nothing
- // was inlined.
- if (*test_instruction_address != Assembler::kTestAlByte) {
- ASSERT(*test_instruction_address == Assembler::kNopByte);
- return;
- }
-
- Address delta_address = test_instruction_address + 1;
- // The delta to the start of the map check instruction and the
- // condition code uses at the patched jump.
- int8_t delta = *reinterpret_cast<int8_t*>(delta_address);
- if (FLAG_trace_ic) {
- PrintF("[ patching ic at %p, test=%p, delta=%d\n",
- address, test_instruction_address, delta);
- }
-
- // Patch with a short conditional jump. There must be a
- // short jump-if-carry/not-carry at this position.
- Address jmp_address = test_instruction_address - delta;
- ASSERT(*jmp_address == Assembler::kJncShortOpcode ||
- *jmp_address == Assembler::kJcShortOpcode);
- Condition cc = *jmp_address == Assembler::kJncShortOpcode
- ? not_zero
- : zero;
- *jmp_address = static_cast<byte>(Assembler::kJccShortPrefix | cc);
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_IA32
diff --git a/src/3rdparty/v8/src/ia32/jump-target-ia32.cc b/src/3rdparty/v8/src/ia32/jump-target-ia32.cc
deleted file mode 100644
index 76c0d02..0000000
--- a/src/3rdparty/v8/src/ia32/jump-target-ia32.cc
+++ /dev/null
@@ -1,437 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_IA32)
-
-#include "codegen-inl.h"
-#include "jump-target-inl.h"
-#include "register-allocator-inl.h"
-#include "virtual-frame-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// JumpTarget implementation.
-
-#define __ ACCESS_MASM(cgen()->masm())
-
-void JumpTarget::DoJump() {
- ASSERT(cgen()->has_valid_frame());
- // Live non-frame registers are not allowed at unconditional jumps
- // because we have no way of invalidating the corresponding results
- // which are still live in the C++ code.
- ASSERT(cgen()->HasValidEntryRegisters());
-
- if (is_bound()) {
- // Backward jump. There is an expected frame to merge to.
- ASSERT(direction_ == BIDIRECTIONAL);
- cgen()->frame()->PrepareMergeTo(entry_frame_);
- cgen()->frame()->MergeTo(entry_frame_);
- cgen()->DeleteFrame();
- __ jmp(&entry_label_);
- } else if (entry_frame_ != NULL) {
- // Forward jump with a preconfigured entry frame. Assert the
- // current frame matches the expected one and jump to the block.
- ASSERT(cgen()->frame()->Equals(entry_frame_));
- cgen()->DeleteFrame();
- __ jmp(&entry_label_);
- } else {
- // Forward jump. Remember the current frame and emit a jump to
- // its merge code.
- AddReachingFrame(cgen()->frame());
- RegisterFile empty;
- cgen()->SetFrame(NULL, &empty);
- __ jmp(&merge_labels_.last());
- }
-}
-
-
-void JumpTarget::DoBranch(Condition cc, Hint hint) {
- ASSERT(cgen() != NULL);
- ASSERT(cgen()->has_valid_frame());
-
- if (is_bound()) {
- ASSERT(direction_ == BIDIRECTIONAL);
- // Backward branch. We have an expected frame to merge to on the
- // backward edge.
-
- // Swap the current frame for a copy (we do the swapping to get
- // the off-frame registers off the fall through) to use for the
- // branch.
- VirtualFrame* fall_through_frame = cgen()->frame();
- VirtualFrame* branch_frame = new VirtualFrame(fall_through_frame);
- RegisterFile non_frame_registers;
- cgen()->SetFrame(branch_frame, &non_frame_registers);
-
- // Check if we can avoid merge code.
- cgen()->frame()->PrepareMergeTo(entry_frame_);
- if (cgen()->frame()->Equals(entry_frame_)) {
- // Branch right in to the block.
- cgen()->DeleteFrame();
- __ j(cc, &entry_label_, hint);
- cgen()->SetFrame(fall_through_frame, &non_frame_registers);
- return;
- }
-
- // Check if we can reuse existing merge code.
- for (int i = 0; i < reaching_frames_.length(); i++) {
- if (reaching_frames_[i] != NULL &&
- cgen()->frame()->Equals(reaching_frames_[i])) {
- // Branch to the merge code.
- cgen()->DeleteFrame();
- __ j(cc, &merge_labels_[i], hint);
- cgen()->SetFrame(fall_through_frame, &non_frame_registers);
- return;
- }
- }
-
- // To emit the merge code here, we negate the condition and branch
- // around the merge code on the fall through path.
- Label original_fall_through;
- __ j(NegateCondition(cc), &original_fall_through, NegateHint(hint));
- cgen()->frame()->MergeTo(entry_frame_);
- cgen()->DeleteFrame();
- __ jmp(&entry_label_);
- cgen()->SetFrame(fall_through_frame, &non_frame_registers);
- __ bind(&original_fall_through);
-
- } else if (entry_frame_ != NULL) {
- // Forward branch with a preconfigured entry frame. Assert the
- // current frame matches the expected one and branch to the block.
- ASSERT(cgen()->frame()->Equals(entry_frame_));
- // Explicitly use the macro assembler instead of __ as forward
- // branches are expected to be a fixed size (no inserted
- // coverage-checking instructions please). This is used in
- // Reference::GetValue.
- cgen()->masm()->j(cc, &entry_label_, hint);
-
- } else {
- // Forward branch. A copy of the current frame is remembered and
- // a branch to the merge code is emitted. Explicitly use the
- // macro assembler instead of __ as forward branches are expected
- // to be a fixed size (no inserted coverage-checking instructions
- // please). This is used in Reference::GetValue.
- AddReachingFrame(new VirtualFrame(cgen()->frame()));
- cgen()->masm()->j(cc, &merge_labels_.last(), hint);
- }
-}
-
-
-void JumpTarget::Call() {
- // Call is used to push the address of the catch block on the stack as
- // a return address when compiling try/catch and try/finally. We
- // fully spill the frame before making the call. The expected frame
- // at the label (which should be the only one) is the spilled current
- // frame plus an in-memory return address. The "fall-through" frame
- // at the return site is the spilled current frame.
- ASSERT(cgen() != NULL);
- ASSERT(cgen()->has_valid_frame());
- // There are no non-frame references across the call.
- ASSERT(cgen()->HasValidEntryRegisters());
- ASSERT(!is_linked());
-
- cgen()->frame()->SpillAll();
- VirtualFrame* target_frame = new VirtualFrame(cgen()->frame());
- target_frame->Adjust(1);
- // We do not expect a call with a preconfigured entry frame.
- ASSERT(entry_frame_ == NULL);
- AddReachingFrame(target_frame);
- __ call(&merge_labels_.last());
-}
-
-
-void JumpTarget::DoBind() {
- ASSERT(cgen() != NULL);
- ASSERT(!is_bound());
-
- // Live non-frame registers are not allowed at the start of a basic
- // block.
- ASSERT(!cgen()->has_valid_frame() || cgen()->HasValidEntryRegisters());
-
- // Fast case: the jump target was manually configured with an entry
- // frame to use.
- if (entry_frame_ != NULL) {
- // Assert no reaching frames to deal with.
- ASSERT(reaching_frames_.is_empty());
- ASSERT(!cgen()->has_valid_frame());
-
- RegisterFile empty;
- if (direction_ == BIDIRECTIONAL) {
- // Copy the entry frame so the original can be used for a
- // possible backward jump.
- cgen()->SetFrame(new VirtualFrame(entry_frame_), &empty);
- } else {
- // Take ownership of the entry frame.
- cgen()->SetFrame(entry_frame_, &empty);
- entry_frame_ = NULL;
- }
- __ bind(&entry_label_);
- return;
- }
-
- if (!is_linked()) {
- ASSERT(cgen()->has_valid_frame());
- if (direction_ == FORWARD_ONLY) {
- // Fast case: no forward jumps and no possible backward jumps.
- // The stack pointer can be floating above the top of the
- // virtual frame before the bind. Afterward, it should not.
- VirtualFrame* frame = cgen()->frame();
- int difference = frame->stack_pointer_ - (frame->element_count() - 1);
- if (difference > 0) {
- frame->stack_pointer_ -= difference;
- __ add(Operand(esp), Immediate(difference * kPointerSize));
- }
- } else {
- ASSERT(direction_ == BIDIRECTIONAL);
- // Fast case: no forward jumps, possible backward ones. Remove
- // constants and copies above the watermark on the fall-through
- // frame and use it as the entry frame.
- cgen()->frame()->MakeMergable();
- entry_frame_ = new VirtualFrame(cgen()->frame());
- }
- __ bind(&entry_label_);
- return;
- }
-
- if (direction_ == FORWARD_ONLY &&
- !cgen()->has_valid_frame() &&
- reaching_frames_.length() == 1) {
- // Fast case: no fall-through, a single forward jump, and no
- // possible backward jumps. Pick up the only reaching frame, take
- // ownership of it, and use it for the block about to be emitted.
- VirtualFrame* frame = reaching_frames_[0];
- RegisterFile empty;
- cgen()->SetFrame(frame, &empty);
- reaching_frames_[0] = NULL;
- __ bind(&merge_labels_[0]);
-
- // The stack pointer can be floating above the top of the
- // virtual frame before the bind. Afterward, it should not.
- int difference = frame->stack_pointer_ - (frame->element_count() - 1);
- if (difference > 0) {
- frame->stack_pointer_ -= difference;
- __ add(Operand(esp), Immediate(difference * kPointerSize));
- }
-
- __ bind(&entry_label_);
- return;
- }
-
- // If there is a current frame, record it as the fall-through. It
- // is owned by the reaching frames for now.
- bool had_fall_through = false;
- if (cgen()->has_valid_frame()) {
- had_fall_through = true;
- AddReachingFrame(cgen()->frame()); // Return value ignored.
- RegisterFile empty;
- cgen()->SetFrame(NULL, &empty);
- }
-
- // Compute the frame to use for entry to the block.
- ComputeEntryFrame();
-
- // Some moves required to merge to an expected frame require purely
- // frame state changes, and do not require any code generation.
- // Perform those first to increase the possibility of finding equal
- // frames below.
- for (int i = 0; i < reaching_frames_.length(); i++) {
- if (reaching_frames_[i] != NULL) {
- reaching_frames_[i]->PrepareMergeTo(entry_frame_);
- }
- }
-
- if (is_linked()) {
- // There were forward jumps. Handle merging the reaching frames
- // to the entry frame.
-
- // Loop over the (non-null) reaching frames and process any that
- // need merge code. Iterate backwards through the list to handle
- // the fall-through frame first. Set frames that will be
- // processed after 'i' to NULL if we want to avoid processing
- // them.
- for (int i = reaching_frames_.length() - 1; i >= 0; i--) {
- VirtualFrame* frame = reaching_frames_[i];
-
- if (frame != NULL) {
- // Does the frame (probably) need merge code?
- if (!frame->Equals(entry_frame_)) {
- // We could have a valid frame as the fall through to the
- // binding site or as the fall through from a previous merge
- // code block. Jump around the code we are about to
- // generate.
- if (cgen()->has_valid_frame()) {
- cgen()->DeleteFrame();
- __ jmp(&entry_label_);
- }
- // Pick up the frame for this block. Assume ownership if
- // there cannot be backward jumps.
- RegisterFile empty;
- if (direction_ == BIDIRECTIONAL) {
- cgen()->SetFrame(new VirtualFrame(frame), &empty);
- } else {
- cgen()->SetFrame(frame, &empty);
- reaching_frames_[i] = NULL;
- }
- __ bind(&merge_labels_[i]);
-
- // Loop over the remaining (non-null) reaching frames,
- // looking for any that can share merge code with this one.
- for (int j = 0; j < i; j++) {
- VirtualFrame* other = reaching_frames_[j];
- if (other != NULL && other->Equals(cgen()->frame())) {
- // Set the reaching frame element to null to avoid
- // processing it later, and then bind its entry label.
- reaching_frames_[j] = NULL;
- __ bind(&merge_labels_[j]);
- }
- }
-
- // Emit the merge code.
- cgen()->frame()->MergeTo(entry_frame_);
- } else if (i == reaching_frames_.length() - 1 && had_fall_through) {
- // If this is the fall through frame, and it didn't need
- // merge code, we need to pick up the frame so we can jump
- // around subsequent merge blocks if necessary.
- RegisterFile empty;
- cgen()->SetFrame(frame, &empty);
- reaching_frames_[i] = NULL;
- }
- }
- }
-
- // The code generator may not have a current frame if there was no
- // fall through and none of the reaching frames needed merging.
- // In that case, clone the entry frame as the current frame.
- if (!cgen()->has_valid_frame()) {
- RegisterFile empty;
- cgen()->SetFrame(new VirtualFrame(entry_frame_), &empty);
- }
-
- // There may be unprocessed reaching frames that did not need
- // merge code. They will have unbound merge labels. Bind their
- // merge labels to be the same as the entry label and deallocate
- // them.
- for (int i = 0; i < reaching_frames_.length(); i++) {
- if (!merge_labels_[i].is_bound()) {
- reaching_frames_[i] = NULL;
- __ bind(&merge_labels_[i]);
- }
- }
-
- // There are non-NULL reaching frames with bound labels for each
- // merge block, but only on backward targets.
- } else {
- // There were no forward jumps. There must be a current frame and
- // this must be a bidirectional target.
- ASSERT(reaching_frames_.length() == 1);
- ASSERT(reaching_frames_[0] != NULL);
- ASSERT(direction_ == BIDIRECTIONAL);
-
- // Use a copy of the reaching frame so the original can be saved
- // for possible reuse as a backward merge block.
- RegisterFile empty;
- cgen()->SetFrame(new VirtualFrame(reaching_frames_[0]), &empty);
- __ bind(&merge_labels_[0]);
- cgen()->frame()->MergeTo(entry_frame_);
- }
-
- __ bind(&entry_label_);
-}
-
-
-void BreakTarget::Jump() {
- // Drop leftover statement state from the frame before merging, without
- // emitting code.
- ASSERT(cgen()->has_valid_frame());
- int count = cgen()->frame()->height() - expected_height_;
- cgen()->frame()->ForgetElements(count);
- DoJump();
-}
-
-
-void BreakTarget::Jump(Result* arg) {
- // Drop leftover statement state from the frame before merging, without
- // emitting code.
- ASSERT(cgen()->has_valid_frame());
- int count = cgen()->frame()->height() - expected_height_;
- cgen()->frame()->ForgetElements(count);
- cgen()->frame()->Push(arg);
- DoJump();
-}
-
-
-void BreakTarget::Bind() {
-#ifdef DEBUG
- // All the forward-reaching frames should have been adjusted at the
- // jumps to this target.
- for (int i = 0; i < reaching_frames_.length(); i++) {
- ASSERT(reaching_frames_[i] == NULL ||
- reaching_frames_[i]->height() == expected_height_);
- }
-#endif
- // Drop leftover statement state from the frame before merging, even on
- // the fall through. This is so we can bind the return target with state
- // on the frame.
- if (cgen()->has_valid_frame()) {
- int count = cgen()->frame()->height() - expected_height_;
- cgen()->frame()->ForgetElements(count);
- }
- DoBind();
-}
-
-
-void BreakTarget::Bind(Result* arg) {
-#ifdef DEBUG
- // All the forward-reaching frames should have been adjusted at the
- // jumps to this target.
- for (int i = 0; i < reaching_frames_.length(); i++) {
- ASSERT(reaching_frames_[i] == NULL ||
- reaching_frames_[i]->height() == expected_height_ + 1);
- }
-#endif
- // Drop leftover statement state from the frame before merging, even on
- // the fall through. This is so we can bind the return target with state
- // on the frame.
- if (cgen()->has_valid_frame()) {
- int count = cgen()->frame()->height() - expected_height_;
- cgen()->frame()->ForgetElements(count);
- cgen()->frame()->Push(arg);
- }
- DoBind();
- *arg = cgen()->frame()->Pop();
-}
-
-
-#undef __
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_IA32
diff --git a/src/3rdparty/v8/src/ia32/lithium-codegen-ia32.cc b/src/3rdparty/v8/src/ia32/lithium-codegen-ia32.cc
deleted file mode 100644
index 2c5541b..0000000
--- a/src/3rdparty/v8/src/ia32/lithium-codegen-ia32.cc
+++ /dev/null
@@ -1,4158 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_IA32)
-
-#include "ia32/lithium-codegen-ia32.h"
-#include "code-stubs.h"
-#include "deoptimizer.h"
-#include "stub-cache.h"
-
-namespace v8 {
-namespace internal {
-
-
-// When invoking builtins, we need to record the safepoint in the middle of
-// the invoke instruction sequence generated by the macro assembler.
-class SafepointGenerator : public PostCallGenerator {
- public:
- SafepointGenerator(LCodeGen* codegen,
- LPointerMap* pointers,
- int deoptimization_index)
- : codegen_(codegen),
- pointers_(pointers),
- deoptimization_index_(deoptimization_index) {}
- virtual ~SafepointGenerator() { }
-
- virtual void Generate() {
- codegen_->RecordSafepoint(pointers_, deoptimization_index_);
- }
-
- private:
- LCodeGen* codegen_;
- LPointerMap* pointers_;
- int deoptimization_index_;
-};
-
-
-#define __ masm()->
-
-bool LCodeGen::GenerateCode() {
- HPhase phase("Code generation", chunk());
- ASSERT(is_unused());
- status_ = GENERATING;
- CpuFeatures::Scope scope(SSE2);
- return GeneratePrologue() &&
- GenerateBody() &&
- GenerateDeferredCode() &&
- GenerateSafepointTable();
-}
-
-
-void LCodeGen::FinishCode(Handle<Code> code) {
- ASSERT(is_done());
- code->set_stack_slots(StackSlotCount());
- code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
- PopulateDeoptimizationData(code);
- Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code);
-}
-
-
-void LCodeGen::Abort(const char* format, ...) {
- if (FLAG_trace_bailout) {
- SmartPointer<char> name(info()->shared_info()->DebugName()->ToCString());
- PrintF("Aborting LCodeGen in @\"%s\": ", *name);
- va_list arguments;
- va_start(arguments, format);
- OS::VPrint(format, arguments);
- va_end(arguments);
- PrintF("\n");
- }
- status_ = ABORTED;
-}
-
-
-void LCodeGen::Comment(const char* format, ...) {
- if (!FLAG_code_comments) return;
- char buffer[4 * KB];
- StringBuilder builder(buffer, ARRAY_SIZE(buffer));
- va_list arguments;
- va_start(arguments, format);
- builder.AddFormattedList(format, arguments);
- va_end(arguments);
-
- // Copy the string before recording it in the assembler to avoid
- // issues when the stack allocated buffer goes out of scope.
- size_t length = builder.position();
- Vector<char> copy = Vector<char>::New(length + 1);
- memcpy(copy.start(), builder.Finalize(), copy.length());
- masm()->RecordComment(copy.start());
-}
-
-
-bool LCodeGen::GeneratePrologue() {
- ASSERT(is_generating());
-
-#ifdef DEBUG
- if (strlen(FLAG_stop_at) > 0 &&
- info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
- __ int3();
- }
-#endif
-
- __ push(ebp); // Caller's frame pointer.
- __ mov(ebp, esp);
- __ push(esi); // Callee's context.
- __ push(edi); // Callee's JS function.
-
- // Reserve space for the stack slots needed by the code.
- int slots = StackSlotCount();
- if (slots > 0) {
- if (FLAG_debug_code) {
- __ mov(Operand(eax), Immediate(slots));
- Label loop;
- __ bind(&loop);
- __ push(Immediate(kSlotsZapValue));
- __ dec(eax);
- __ j(not_zero, &loop);
- } else {
- __ sub(Operand(esp), Immediate(slots * kPointerSize));
-#ifdef _MSC_VER
- // On windows, you may not access the stack more than one page below
- // the most recently mapped page. To make the allocated area randomly
- // accessible, we write to each page in turn (the value is irrelevant).
- const int kPageSize = 4 * KB;
- for (int offset = slots * kPointerSize - kPageSize;
- offset > 0;
- offset -= kPageSize) {
- __ mov(Operand(esp, offset), eax);
- }
-#endif
- }
- }
-
- // Possibly allocate a local context.
- int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
- if (heap_slots > 0) {
- Comment(";;; Allocate local context");
- // Argument to NewContext is the function, which is still in edi.
- __ push(edi);
- if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(heap_slots);
- __ CallStub(&stub);
- } else {
- __ CallRuntime(Runtime::kNewContext, 1);
- }
- RecordSafepoint(Safepoint::kNoDeoptimizationIndex);
- // Context is returned in both eax and esi. It replaces the context
- // passed to us. It's saved in the stack and kept live in esi.
- __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi);
-
- // Copy parameters into context if necessary.
- int num_parameters = scope()->num_parameters();
- for (int i = 0; i < num_parameters; i++) {
- Slot* slot = scope()->parameter(i)->AsSlot();
- if (slot != NULL && slot->type() == Slot::CONTEXT) {
- int parameter_offset = StandardFrameConstants::kCallerSPOffset +
- (num_parameters - 1 - i) * kPointerSize;
- // Load parameter from stack.
- __ mov(eax, Operand(ebp, parameter_offset));
- // Store it in the context.
- int context_offset = Context::SlotOffset(slot->index());
- __ mov(Operand(esi, context_offset), eax);
- // Update the write barrier. This clobbers all involved
- // registers, so we have to use a third register to avoid
- // clobbering esi.
- __ mov(ecx, esi);
- __ RecordWrite(ecx, context_offset, eax, ebx);
- }
- }
- Comment(";;; End allocate local context");
- }
-
- // Trace the call.
- if (FLAG_trace) {
- // We have not executed any compiled code yet, so esi still holds the
- // incoming context.
- __ CallRuntime(Runtime::kTraceEnter, 0);
- }
- return !is_aborted();
-}
-
-
-bool LCodeGen::GenerateBody() {
- ASSERT(is_generating());
- bool emit_instructions = true;
- for (current_instruction_ = 0;
- !is_aborted() && current_instruction_ < instructions_->length();
- current_instruction_++) {
- LInstruction* instr = instructions_->at(current_instruction_);
- if (instr->IsLabel()) {
- LLabel* label = LLabel::cast(instr);
- emit_instructions = !label->HasReplacement();
- }
-
- if (emit_instructions) {
- Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
- instr->CompileToNative(this);
- }
- }
- return !is_aborted();
-}
-
-
-LInstruction* LCodeGen::GetNextInstruction() {
- if (current_instruction_ < instructions_->length() - 1) {
- return instructions_->at(current_instruction_ + 1);
- } else {
- return NULL;
- }
-}
-
-
-bool LCodeGen::GenerateDeferredCode() {
- ASSERT(is_generating());
- for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
- LDeferredCode* code = deferred_[i];
- __ bind(code->entry());
- code->Generate();
- __ jmp(code->exit());
- }
-
- // Deferred code is the last part of the instruction sequence. Mark
- // the generated code as done unless we bailed out.
- if (!is_aborted()) status_ = DONE;
- return !is_aborted();
-}
-
-
-bool LCodeGen::GenerateSafepointTable() {
- ASSERT(is_done());
- safepoints_.Emit(masm(), StackSlotCount());
- return !is_aborted();
-}
-
-
-Register LCodeGen::ToRegister(int index) const {
- return Register::FromAllocationIndex(index);
-}
-
-
-XMMRegister LCodeGen::ToDoubleRegister(int index) const {
- return XMMRegister::FromAllocationIndex(index);
-}
-
-
-Register LCodeGen::ToRegister(LOperand* op) const {
- ASSERT(op->IsRegister());
- return ToRegister(op->index());
-}
-
-
-XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
- ASSERT(op->IsDoubleRegister());
- return ToDoubleRegister(op->index());
-}
-
-
-int LCodeGen::ToInteger32(LConstantOperand* op) const {
- Handle<Object> value = chunk_->LookupLiteral(op);
- ASSERT(chunk_->LookupLiteralRepresentation(op).IsInteger32());
- ASSERT(static_cast<double>(static_cast<int32_t>(value->Number())) ==
- value->Number());
- return static_cast<int32_t>(value->Number());
-}
-
-
-Immediate LCodeGen::ToImmediate(LOperand* op) {
- LConstantOperand* const_op = LConstantOperand::cast(op);
- Handle<Object> literal = chunk_->LookupLiteral(const_op);
- Representation r = chunk_->LookupLiteralRepresentation(const_op);
- if (r.IsInteger32()) {
- ASSERT(literal->IsNumber());
- return Immediate(static_cast<int32_t>(literal->Number()));
- } else if (r.IsDouble()) {
- Abort("unsupported double immediate");
- }
- ASSERT(r.IsTagged());
- return Immediate(literal);
-}
-
-
-Operand LCodeGen::ToOperand(LOperand* op) const {
- if (op->IsRegister()) return Operand(ToRegister(op));
- if (op->IsDoubleRegister()) return Operand(ToDoubleRegister(op));
- ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
- int index = op->index();
- if (index >= 0) {
- // Local or spill slot. Skip the frame pointer, function, and
- // context in the fixed part of the frame.
- return Operand(ebp, -(index + 3) * kPointerSize);
- } else {
- // Incoming parameter. Skip the return address.
- return Operand(ebp, -(index - 1) * kPointerSize);
- }
-}
-
-
-Operand LCodeGen::HighOperand(LOperand* op) {
- ASSERT(op->IsDoubleStackSlot());
- int index = op->index();
- int offset = (index >= 0) ? index + 3 : index - 1;
- return Operand(ebp, -offset * kPointerSize);
-}
-
-
-void LCodeGen::WriteTranslation(LEnvironment* environment,
- Translation* translation) {
- if (environment == NULL) return;
-
- // The translation includes one command per value in the environment.
- int translation_size = environment->values()->length();
- // The output frame height does not include the parameters.
- int height = translation_size - environment->parameter_count();
-
- WriteTranslation(environment->outer(), translation);
- int closure_id = DefineDeoptimizationLiteral(environment->closure());
- translation->BeginFrame(environment->ast_id(), closure_id, height);
- for (int i = 0; i < translation_size; ++i) {
- LOperand* value = environment->values()->at(i);
- // spilled_registers_ and spilled_double_registers_ are either
- // both NULL or both set.
- if (environment->spilled_registers() != NULL && value != NULL) {
- if (value->IsRegister() &&
- environment->spilled_registers()[value->index()] != NULL) {
- translation->MarkDuplicate();
- AddToTranslation(translation,
- environment->spilled_registers()[value->index()],
- environment->HasTaggedValueAt(i));
- } else if (
- value->IsDoubleRegister() &&
- environment->spilled_double_registers()[value->index()] != NULL) {
- translation->MarkDuplicate();
- AddToTranslation(
- translation,
- environment->spilled_double_registers()[value->index()],
- false);
- }
- }
-
- AddToTranslation(translation, value, environment->HasTaggedValueAt(i));
- }
-}
-
-
-void LCodeGen::AddToTranslation(Translation* translation,
- LOperand* op,
- bool is_tagged) {
- if (op == NULL) {
- // TODO(twuerthinger): Introduce marker operands to indicate that this value
- // is not present and must be reconstructed from the deoptimizer. Currently
- // this is only used for the arguments object.
- translation->StoreArgumentsObject();
- } else if (op->IsStackSlot()) {
- if (is_tagged) {
- translation->StoreStackSlot(op->index());
- } else {
- translation->StoreInt32StackSlot(op->index());
- }
- } else if (op->IsDoubleStackSlot()) {
- translation->StoreDoubleStackSlot(op->index());
- } else if (op->IsArgument()) {
- ASSERT(is_tagged);
- int src_index = StackSlotCount() + op->index();
- translation->StoreStackSlot(src_index);
- } else if (op->IsRegister()) {
- Register reg = ToRegister(op);
- if (is_tagged) {
- translation->StoreRegister(reg);
- } else {
- translation->StoreInt32Register(reg);
- }
- } else if (op->IsDoubleRegister()) {
- XMMRegister reg = ToDoubleRegister(op);
- translation->StoreDoubleRegister(reg);
- } else if (op->IsConstantOperand()) {
- Handle<Object> literal = chunk()->LookupLiteral(LConstantOperand::cast(op));
- int src_index = DefineDeoptimizationLiteral(literal);
- translation->StoreLiteral(src_index);
- } else {
- UNREACHABLE();
- }
-}
-
-
-void LCodeGen::CallCode(Handle<Code> code,
- RelocInfo::Mode mode,
- LInstruction* instr,
- bool adjusted) {
- ASSERT(instr != NULL);
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
-
- if (!adjusted) {
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- }
- __ call(code, mode);
-
- RegisterLazyDeoptimization(instr);
-
- // Signal that we don't inline smi code before these stubs in the
- // optimizing code generator.
- if (code->kind() == Code::TYPE_RECORDING_BINARY_OP_IC ||
- code->kind() == Code::COMPARE_IC) {
- __ nop();
- }
-}
-
-
-void LCodeGen::CallRuntime(const Runtime::Function* fun,
- int argc,
- LInstruction* instr,
- bool adjusted) {
- ASSERT(instr != NULL);
- ASSERT(instr->HasPointerMap());
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
-
- if (!adjusted) {
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- }
- __ CallRuntime(fun, argc);
-
- RegisterLazyDeoptimization(instr);
-}
-
-
-void LCodeGen::RegisterLazyDeoptimization(LInstruction* instr) {
- // Create the environment to bailout to. If the call has side effects
- // execution has to continue after the call otherwise execution can continue
- // from a previous bailout point repeating the call.
- LEnvironment* deoptimization_environment;
- if (instr->HasDeoptimizationEnvironment()) {
- deoptimization_environment = instr->deoptimization_environment();
- } else {
- deoptimization_environment = instr->environment();
- }
-
- RegisterEnvironmentForDeoptimization(deoptimization_environment);
- RecordSafepoint(instr->pointer_map(),
- deoptimization_environment->deoptimization_index());
-}
-
-
-void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment) {
- if (!environment->HasBeenRegistered()) {
- // Physical stack frame layout:
- // -x ............. -4 0 ..................................... y
- // [incoming arguments] [spill slots] [pushed outgoing arguments]
-
- // Layout of the environment:
- // 0 ..................................................... size-1
- // [parameters] [locals] [expression stack including arguments]
-
- // Layout of the translation:
- // 0 ........................................................ size - 1 + 4
- // [expression stack including arguments] [locals] [4 words] [parameters]
- // |>------------ translation_size ------------<|
-
- int frame_count = 0;
- for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
- ++frame_count;
- }
- Translation translation(&translations_, frame_count);
- WriteTranslation(environment, &translation);
- int deoptimization_index = deoptimizations_.length();
- environment->Register(deoptimization_index, translation.index());
- deoptimizations_.Add(environment);
- }
-}
-
-
-void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
- RegisterEnvironmentForDeoptimization(environment);
- ASSERT(environment->HasBeenRegistered());
- int id = environment->deoptimization_index();
- Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
- ASSERT(entry != NULL);
- if (entry == NULL) {
- Abort("bailout was not prepared");
- return;
- }
-
- if (FLAG_deopt_every_n_times != 0) {
- Handle<SharedFunctionInfo> shared(info_->shared_info());
- Label no_deopt;
- __ pushfd();
- __ push(eax);
- __ push(ebx);
- __ mov(ebx, shared);
- __ mov(eax, FieldOperand(ebx, SharedFunctionInfo::kDeoptCounterOffset));
- __ sub(Operand(eax), Immediate(Smi::FromInt(1)));
- __ j(not_zero, &no_deopt);
- if (FLAG_trap_on_deopt) __ int3();
- __ mov(eax, Immediate(Smi::FromInt(FLAG_deopt_every_n_times)));
- __ mov(FieldOperand(ebx, SharedFunctionInfo::kDeoptCounterOffset), eax);
- __ pop(ebx);
- __ pop(eax);
- __ popfd();
- __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
-
- __ bind(&no_deopt);
- __ mov(FieldOperand(ebx, SharedFunctionInfo::kDeoptCounterOffset), eax);
- __ pop(ebx);
- __ pop(eax);
- __ popfd();
- }
-
- if (cc == no_condition) {
- if (FLAG_trap_on_deopt) __ int3();
- __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
- } else {
- if (FLAG_trap_on_deopt) {
- NearLabel done;
- __ j(NegateCondition(cc), &done);
- __ int3();
- __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
- __ bind(&done);
- } else {
- __ j(cc, entry, RelocInfo::RUNTIME_ENTRY, not_taken);
- }
- }
-}
-
-
-void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
- int length = deoptimizations_.length();
- if (length == 0) return;
- ASSERT(FLAG_deopt);
- Handle<DeoptimizationInputData> data =
- factory()->NewDeoptimizationInputData(length, TENURED);
-
- Handle<ByteArray> translations = translations_.CreateByteArray();
- data->SetTranslationByteArray(*translations);
- data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
-
- Handle<FixedArray> literals =
- factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
- for (int i = 0; i < deoptimization_literals_.length(); i++) {
- literals->set(i, *deoptimization_literals_[i]);
- }
- data->SetLiteralArray(*literals);
-
- data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id()));
- data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
-
- // Populate the deoptimization entries.
- for (int i = 0; i < length; i++) {
- LEnvironment* env = deoptimizations_[i];
- data->SetAstId(i, Smi::FromInt(env->ast_id()));
- data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
- data->SetArgumentsStackHeight(i,
- Smi::FromInt(env->arguments_stack_height()));
- }
- code->set_deoptimization_data(*data);
-}
-
-
-int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
- int result = deoptimization_literals_.length();
- for (int i = 0; i < deoptimization_literals_.length(); ++i) {
- if (deoptimization_literals_[i].is_identical_to(literal)) return i;
- }
- deoptimization_literals_.Add(literal);
- return result;
-}
-
-
-void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
- ASSERT(deoptimization_literals_.length() == 0);
-
- const ZoneList<Handle<JSFunction> >* inlined_closures =
- chunk()->inlined_closures();
-
- for (int i = 0, length = inlined_closures->length();
- i < length;
- i++) {
- DefineDeoptimizationLiteral(inlined_closures->at(i));
- }
-
- inlined_function_count_ = deoptimization_literals_.length();
-}
-
-
-void LCodeGen::RecordSafepoint(
- LPointerMap* pointers,
- Safepoint::Kind kind,
- int arguments,
- int deoptimization_index) {
- const ZoneList<LOperand*>* operands = pointers->operands();
- Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
- kind, arguments, deoptimization_index);
- for (int i = 0; i < operands->length(); i++) {
- LOperand* pointer = operands->at(i);
- if (pointer->IsStackSlot()) {
- safepoint.DefinePointerSlot(pointer->index());
- } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
- safepoint.DefinePointerRegister(ToRegister(pointer));
- }
- }
-}
-
-
-void LCodeGen::RecordSafepoint(LPointerMap* pointers,
- int deoptimization_index) {
- RecordSafepoint(pointers, Safepoint::kSimple, 0, deoptimization_index);
-}
-
-
-void LCodeGen::RecordSafepoint(int deoptimization_index) {
- LPointerMap empty_pointers(RelocInfo::kNoPosition);
- RecordSafepoint(&empty_pointers, deoptimization_index);
-}
-
-
-void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
- int arguments,
- int deoptimization_index) {
- RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments,
- deoptimization_index);
-}
-
-
-void LCodeGen::RecordPosition(int position) {
- if (!FLAG_debug_info || position == RelocInfo::kNoPosition) return;
- masm()->positions_recorder()->RecordPosition(position);
-}
-
-
-void LCodeGen::DoLabel(LLabel* label) {
- if (label->is_loop_header()) {
- Comment(";;; B%d - LOOP entry", label->block_id());
- } else {
- Comment(";;; B%d", label->block_id());
- }
- __ bind(label->label());
- current_block_ = label->block_id();
- LCodeGen::DoGap(label);
-}
-
-
-void LCodeGen::DoParallelMove(LParallelMove* move) {
- resolver_.Resolve(move);
-}
-
-
-void LCodeGen::DoGap(LGap* gap) {
- for (int i = LGap::FIRST_INNER_POSITION;
- i <= LGap::LAST_INNER_POSITION;
- i++) {
- LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
- LParallelMove* move = gap->GetParallelMove(inner_pos);
- if (move != NULL) DoParallelMove(move);
- }
-
- LInstruction* next = GetNextInstruction();
- if (next != NULL && next->IsLazyBailout()) {
- int pc = masm()->pc_offset();
- safepoints_.SetPcAfterGap(pc);
- }
-}
-
-
-void LCodeGen::DoParameter(LParameter* instr) {
- // Nothing to do.
-}
-
-
-void LCodeGen::DoCallStub(LCallStub* instr) {
- ASSERT(ToRegister(instr->context()).is(esi));
- ASSERT(ToRegister(instr->result()).is(eax));
- switch (instr->hydrogen()->major_key()) {
- case CodeStub::RegExpConstructResult: {
- RegExpConstructResultStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- break;
- }
- case CodeStub::RegExpExec: {
- RegExpExecStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- break;
- }
- case CodeStub::SubString: {
- SubStringStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- break;
- }
- case CodeStub::NumberToString: {
- NumberToStringStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- break;
- }
- case CodeStub::StringAdd: {
- StringAddStub stub(NO_STRING_ADD_FLAGS);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- break;
- }
- case CodeStub::StringCompare: {
- StringCompareStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- break;
- }
- case CodeStub::TranscendentalCache: {
- TranscendentalCacheStub stub(instr->transcendental_type(),
- TranscendentalCacheStub::TAGGED);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- break;
- }
- default:
- UNREACHABLE();
- }
-}
-
-
-void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
- // Nothing to do.
-}
-
-
-void LCodeGen::DoModI(LModI* instr) {
- if (instr->hydrogen()->HasPowerOf2Divisor()) {
- Register dividend = ToRegister(instr->InputAt(0));
-
- int32_t divisor =
- HConstant::cast(instr->hydrogen()->right())->Integer32Value();
-
- if (divisor < 0) divisor = -divisor;
-
- NearLabel positive_dividend, done;
- __ test(dividend, Operand(dividend));
- __ j(not_sign, &positive_dividend);
- __ neg(dividend);
- __ and_(dividend, divisor - 1);
- __ neg(dividend);
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ j(not_zero, &done);
- DeoptimizeIf(no_condition, instr->environment());
- }
- __ bind(&positive_dividend);
- __ and_(dividend, divisor - 1);
- __ bind(&done);
- } else {
- LOperand* right = instr->InputAt(1);
- ASSERT(ToRegister(instr->InputAt(0)).is(eax));
- ASSERT(ToRegister(instr->result()).is(edx));
-
- Register right_reg = ToRegister(right);
- ASSERT(!right_reg.is(eax));
- ASSERT(!right_reg.is(edx));
-
- // Check for x % 0.
- if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
- __ test(right_reg, ToOperand(right));
- DeoptimizeIf(zero, instr->environment());
- }
-
- // Sign extend to edx.
- __ cdq();
-
- // Check for (0 % -x) that will produce negative zero.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- NearLabel positive_left;
- NearLabel done;
- __ test(eax, Operand(eax));
- __ j(not_sign, &positive_left);
- __ idiv(right_reg);
-
- // Test the remainder for 0, because then the result would be -0.
- __ test(edx, Operand(edx));
- __ j(not_zero, &done);
-
- DeoptimizeIf(no_condition, instr->environment());
- __ bind(&positive_left);
- __ idiv(right_reg);
- __ bind(&done);
- } else {
- __ idiv(right_reg);
- }
- }
-}
-
-
-void LCodeGen::DoDivI(LDivI* instr) {
- LOperand* right = instr->InputAt(1);
- ASSERT(ToRegister(instr->result()).is(eax));
- ASSERT(ToRegister(instr->InputAt(0)).is(eax));
- ASSERT(!ToRegister(instr->InputAt(1)).is(eax));
- ASSERT(!ToRegister(instr->InputAt(1)).is(edx));
-
- Register left_reg = eax;
-
- // Check for x / 0.
- Register right_reg = ToRegister(right);
- if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
- __ test(right_reg, ToOperand(right));
- DeoptimizeIf(zero, instr->environment());
- }
-
- // Check for (0 / -x) that will produce negative zero.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- NearLabel left_not_zero;
- __ test(left_reg, Operand(left_reg));
- __ j(not_zero, &left_not_zero);
- __ test(right_reg, ToOperand(right));
- DeoptimizeIf(sign, instr->environment());
- __ bind(&left_not_zero);
- }
-
- // Check for (-kMinInt / -1).
- if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- NearLabel left_not_min_int;
- __ cmp(left_reg, kMinInt);
- __ j(not_zero, &left_not_min_int);
- __ cmp(right_reg, -1);
- DeoptimizeIf(zero, instr->environment());
- __ bind(&left_not_min_int);
- }
-
- // Sign extend to edx.
- __ cdq();
- __ idiv(right_reg);
-
- // Deoptimize if remainder is not 0.
- __ test(edx, Operand(edx));
- DeoptimizeIf(not_zero, instr->environment());
-}
-
-
-void LCodeGen::DoMulI(LMulI* instr) {
- Register left = ToRegister(instr->InputAt(0));
- LOperand* right = instr->InputAt(1);
-
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ mov(ToRegister(instr->TempAt(0)), left);
- }
-
- if (right->IsConstantOperand()) {
- // Try strength reductions on the multiplication.
- // All replacement instructions are at most as long as the imul
- // and have better latency.
- int constant = ToInteger32(LConstantOperand::cast(right));
- if (constant == -1) {
- __ neg(left);
- } else if (constant == 0) {
- __ xor_(left, Operand(left));
- } else if (constant == 2) {
- __ add(left, Operand(left));
- } else if (!instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- // If we know that the multiplication can't overflow, it's safe to
- // use instructions that don't set the overflow flag for the
- // multiplication.
- switch (constant) {
- case 1:
- // Do nothing.
- break;
- case 3:
- __ lea(left, Operand(left, left, times_2, 0));
- break;
- case 4:
- __ shl(left, 2);
- break;
- case 5:
- __ lea(left, Operand(left, left, times_4, 0));
- break;
- case 8:
- __ shl(left, 3);
- break;
- case 9:
- __ lea(left, Operand(left, left, times_8, 0));
- break;
- case 16:
- __ shl(left, 4);
- break;
- default:
- __ imul(left, left, constant);
- break;
- }
- } else {
- __ imul(left, left, constant);
- }
- } else {
- __ imul(left, ToOperand(right));
- }
-
- if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- DeoptimizeIf(overflow, instr->environment());
- }
-
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- // Bail out if the result is supposed to be negative zero.
- NearLabel done;
- __ test(left, Operand(left));
- __ j(not_zero, &done);
- if (right->IsConstantOperand()) {
- if (ToInteger32(LConstantOperand::cast(right)) <= 0) {
- DeoptimizeIf(no_condition, instr->environment());
- }
- } else {
- // Test the non-zero operand for negative sign.
- __ or_(ToRegister(instr->TempAt(0)), ToOperand(right));
- DeoptimizeIf(sign, instr->environment());
- }
- __ bind(&done);
- }
-}
-
-
-void LCodeGen::DoBitI(LBitI* instr) {
- LOperand* left = instr->InputAt(0);
- LOperand* right = instr->InputAt(1);
- ASSERT(left->Equals(instr->result()));
- ASSERT(left->IsRegister());
-
- if (right->IsConstantOperand()) {
- int right_operand = ToInteger32(LConstantOperand::cast(right));
- switch (instr->op()) {
- case Token::BIT_AND:
- __ and_(ToRegister(left), right_operand);
- break;
- case Token::BIT_OR:
- __ or_(ToRegister(left), right_operand);
- break;
- case Token::BIT_XOR:
- __ xor_(ToRegister(left), right_operand);
- break;
- default:
- UNREACHABLE();
- break;
- }
- } else {
- switch (instr->op()) {
- case Token::BIT_AND:
- __ and_(ToRegister(left), ToOperand(right));
- break;
- case Token::BIT_OR:
- __ or_(ToRegister(left), ToOperand(right));
- break;
- case Token::BIT_XOR:
- __ xor_(ToRegister(left), ToOperand(right));
- break;
- default:
- UNREACHABLE();
- break;
- }
- }
-}
-
-
-void LCodeGen::DoShiftI(LShiftI* instr) {
- LOperand* left = instr->InputAt(0);
- LOperand* right = instr->InputAt(1);
- ASSERT(left->Equals(instr->result()));
- ASSERT(left->IsRegister());
- if (right->IsRegister()) {
- ASSERT(ToRegister(right).is(ecx));
-
- switch (instr->op()) {
- case Token::SAR:
- __ sar_cl(ToRegister(left));
- break;
- case Token::SHR:
- __ shr_cl(ToRegister(left));
- if (instr->can_deopt()) {
- __ test(ToRegister(left), Immediate(0x80000000));
- DeoptimizeIf(not_zero, instr->environment());
- }
- break;
- case Token::SHL:
- __ shl_cl(ToRegister(left));
- break;
- default:
- UNREACHABLE();
- break;
- }
- } else {
- int value = ToInteger32(LConstantOperand::cast(right));
- uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
- switch (instr->op()) {
- case Token::SAR:
- if (shift_count != 0) {
- __ sar(ToRegister(left), shift_count);
- }
- break;
- case Token::SHR:
- if (shift_count == 0 && instr->can_deopt()) {
- __ test(ToRegister(left), Immediate(0x80000000));
- DeoptimizeIf(not_zero, instr->environment());
- } else {
- __ shr(ToRegister(left), shift_count);
- }
- break;
- case Token::SHL:
- if (shift_count != 0) {
- __ shl(ToRegister(left), shift_count);
- }
- break;
- default:
- UNREACHABLE();
- break;
- }
- }
-}
-
-
-void LCodeGen::DoSubI(LSubI* instr) {
- LOperand* left = instr->InputAt(0);
- LOperand* right = instr->InputAt(1);
- ASSERT(left->Equals(instr->result()));
-
- if (right->IsConstantOperand()) {
- __ sub(ToOperand(left), ToImmediate(right));
- } else {
- __ sub(ToRegister(left), ToOperand(right));
- }
- if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- DeoptimizeIf(overflow, instr->environment());
- }
-}
-
-
-void LCodeGen::DoConstantI(LConstantI* instr) {
- ASSERT(instr->result()->IsRegister());
- __ Set(ToRegister(instr->result()), Immediate(instr->value()));
-}
-
-
-void LCodeGen::DoConstantD(LConstantD* instr) {
- ASSERT(instr->result()->IsDoubleRegister());
- XMMRegister res = ToDoubleRegister(instr->result());
- double v = instr->value();
- // Use xor to produce +0.0 in a fast and compact way, but avoid to
- // do so if the constant is -0.0.
- if (BitCast<uint64_t, double>(v) == 0) {
- __ xorpd(res, res);
- } else {
- Register temp = ToRegister(instr->TempAt(0));
- uint64_t int_val = BitCast<uint64_t, double>(v);
- int32_t lower = static_cast<int32_t>(int_val);
- int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
- if (CpuFeatures::IsSupported(SSE4_1)) {
- CpuFeatures::Scope scope(SSE4_1);
- if (lower != 0) {
- __ Set(temp, Immediate(lower));
- __ movd(res, Operand(temp));
- __ Set(temp, Immediate(upper));
- __ pinsrd(res, Operand(temp), 1);
- } else {
- __ xorpd(res, res);
- __ Set(temp, Immediate(upper));
- __ pinsrd(res, Operand(temp), 1);
- }
- } else {
- __ Set(temp, Immediate(upper));
- __ movd(res, Operand(temp));
- __ psllq(res, 32);
- if (lower != 0) {
- __ Set(temp, Immediate(lower));
- __ movd(xmm0, Operand(temp));
- __ por(res, xmm0);
- }
- }
- }
-}
-
-
-void LCodeGen::DoConstantT(LConstantT* instr) {
- ASSERT(instr->result()->IsRegister());
- __ Set(ToRegister(instr->result()), Immediate(instr->value()));
-}
-
-
-void LCodeGen::DoJSArrayLength(LJSArrayLength* instr) {
- Register result = ToRegister(instr->result());
- Register array = ToRegister(instr->InputAt(0));
- __ mov(result, FieldOperand(array, JSArray::kLengthOffset));
-}
-
-
-void LCodeGen::DoFixedArrayLength(LFixedArrayLength* instr) {
- Register result = ToRegister(instr->result());
- Register array = ToRegister(instr->InputAt(0));
- __ mov(result, FieldOperand(array, FixedArray::kLengthOffset));
-}
-
-
-void LCodeGen::DoExternalArrayLength(LExternalArrayLength* instr) {
- Register result = ToRegister(instr->result());
- Register array = ToRegister(instr->InputAt(0));
- __ mov(result, FieldOperand(array, ExternalArray::kLengthOffset));
-}
-
-
-void LCodeGen::DoValueOf(LValueOf* instr) {
- Register input = ToRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
- Register map = ToRegister(instr->TempAt(0));
- ASSERT(input.is(result));
- NearLabel done;
- // If the object is a smi return the object.
- __ test(input, Immediate(kSmiTagMask));
- __ j(zero, &done);
-
- // If the object is not a value type, return the object.
- __ CmpObjectType(input, JS_VALUE_TYPE, map);
- __ j(not_equal, &done);
- __ mov(result, FieldOperand(input, JSValue::kValueOffset));
-
- __ bind(&done);
-}
-
-
-void LCodeGen::DoBitNotI(LBitNotI* instr) {
- LOperand* input = instr->InputAt(0);
- ASSERT(input->Equals(instr->result()));
- __ not_(ToRegister(input));
-}
-
-
-void LCodeGen::DoThrow(LThrow* instr) {
- __ push(ToOperand(instr->InputAt(0)));
- CallRuntime(Runtime::kThrow, 1, instr, false);
-
- if (FLAG_debug_code) {
- Comment("Unreachable code.");
- __ int3();
- }
-}
-
-
-void LCodeGen::DoAddI(LAddI* instr) {
- LOperand* left = instr->InputAt(0);
- LOperand* right = instr->InputAt(1);
- ASSERT(left->Equals(instr->result()));
-
- if (right->IsConstantOperand()) {
- __ add(ToOperand(left), ToImmediate(right));
- } else {
- __ add(ToRegister(left), ToOperand(right));
- }
-
- if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- DeoptimizeIf(overflow, instr->environment());
- }
-}
-
-
-void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
- XMMRegister left = ToDoubleRegister(instr->InputAt(0));
- XMMRegister right = ToDoubleRegister(instr->InputAt(1));
- XMMRegister result = ToDoubleRegister(instr->result());
- // Modulo uses a fixed result register.
- ASSERT(instr->op() == Token::MOD || left.is(result));
- switch (instr->op()) {
- case Token::ADD:
- __ addsd(left, right);
- break;
- case Token::SUB:
- __ subsd(left, right);
- break;
- case Token::MUL:
- __ mulsd(left, right);
- break;
- case Token::DIV:
- __ divsd(left, right);
- break;
- case Token::MOD: {
- // Pass two doubles as arguments on the stack.
- __ PrepareCallCFunction(4, eax);
- __ movdbl(Operand(esp, 0 * kDoubleSize), left);
- __ movdbl(Operand(esp, 1 * kDoubleSize), right);
- __ CallCFunction(
- ExternalReference::double_fp_operation(Token::MOD, isolate()),
- 4);
-
- // Return value is in st(0) on ia32.
- // Store it into the (fixed) result register.
- __ sub(Operand(esp), Immediate(kDoubleSize));
- __ fstp_d(Operand(esp, 0));
- __ movdbl(result, Operand(esp, 0));
- __ add(Operand(esp), Immediate(kDoubleSize));
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
-}
-
-
-void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
- ASSERT(ToRegister(instr->InputAt(0)).is(edx));
- ASSERT(ToRegister(instr->InputAt(1)).is(eax));
- ASSERT(ToRegister(instr->result()).is(eax));
-
- TypeRecordingBinaryOpStub stub(instr->op(), NO_OVERWRITE);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, false);
-}
-
-
-int LCodeGen::GetNextEmittedBlock(int block) {
- for (int i = block + 1; i < graph()->blocks()->length(); ++i) {
- LLabel* label = chunk_->GetLabel(i);
- if (!label->HasReplacement()) return i;
- }
- return -1;
-}
-
-
-void LCodeGen::EmitBranch(int left_block, int right_block, Condition cc) {
- int next_block = GetNextEmittedBlock(current_block_);
- right_block = chunk_->LookupDestination(right_block);
- left_block = chunk_->LookupDestination(left_block);
-
- if (right_block == left_block) {
- EmitGoto(left_block);
- } else if (left_block == next_block) {
- __ j(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block));
- } else if (right_block == next_block) {
- __ j(cc, chunk_->GetAssemblyLabel(left_block));
- } else {
- __ j(cc, chunk_->GetAssemblyLabel(left_block));
- __ jmp(chunk_->GetAssemblyLabel(right_block));
- }
-}
-
-
-void LCodeGen::DoBranch(LBranch* instr) {
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- Representation r = instr->hydrogen()->representation();
- if (r.IsInteger32()) {
- Register reg = ToRegister(instr->InputAt(0));
- __ test(reg, Operand(reg));
- EmitBranch(true_block, false_block, not_zero);
- } else if (r.IsDouble()) {
- XMMRegister reg = ToDoubleRegister(instr->InputAt(0));
- __ xorpd(xmm0, xmm0);
- __ ucomisd(reg, xmm0);
- EmitBranch(true_block, false_block, not_equal);
- } else {
- ASSERT(r.IsTagged());
- Register reg = ToRegister(instr->InputAt(0));
- if (instr->hydrogen()->type().IsBoolean()) {
- __ cmp(reg, factory()->true_value());
- EmitBranch(true_block, false_block, equal);
- } else {
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
- __ cmp(reg, factory()->undefined_value());
- __ j(equal, false_label);
- __ cmp(reg, factory()->true_value());
- __ j(equal, true_label);
- __ cmp(reg, factory()->false_value());
- __ j(equal, false_label);
- __ test(reg, Operand(reg));
- __ j(equal, false_label);
- __ test(reg, Immediate(kSmiTagMask));
- __ j(zero, true_label);
-
- // Test for double values. Zero is false.
- NearLabel call_stub;
- __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
- factory()->heap_number_map());
- __ j(not_equal, &call_stub);
- __ fldz();
- __ fld_d(FieldOperand(reg, HeapNumber::kValueOffset));
- __ FCmp();
- __ j(zero, false_label);
- __ jmp(true_label);
-
- // The conversion stub doesn't cause garbage collections so it's
- // safe to not record a safepoint after the call.
- __ bind(&call_stub);
- ToBooleanStub stub;
- __ pushad();
- __ push(reg);
- __ CallStub(&stub);
- __ test(eax, Operand(eax));
- __ popad();
- EmitBranch(true_block, false_block, not_zero);
- }
- }
-}
-
-
-void LCodeGen::EmitGoto(int block, LDeferredCode* deferred_stack_check) {
- block = chunk_->LookupDestination(block);
- int next_block = GetNextEmittedBlock(current_block_);
- if (block != next_block) {
- // Perform stack overflow check if this goto needs it before jumping.
- if (deferred_stack_check != NULL) {
- ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit(isolate());
- __ cmp(esp, Operand::StaticVariable(stack_limit));
- __ j(above_equal, chunk_->GetAssemblyLabel(block));
- __ jmp(deferred_stack_check->entry());
- deferred_stack_check->SetExit(chunk_->GetAssemblyLabel(block));
- } else {
- __ jmp(chunk_->GetAssemblyLabel(block));
- }
- }
-}
-
-
-void LCodeGen::DoDeferredStackCheck(LGoto* instr) {
- __ pushad();
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
- __ popad();
-}
-
-void LCodeGen::DoGoto(LGoto* instr) {
- class DeferredStackCheck: public LDeferredCode {
- public:
- DeferredStackCheck(LCodeGen* codegen, LGoto* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
- private:
- LGoto* instr_;
- };
-
- DeferredStackCheck* deferred = NULL;
- if (instr->include_stack_check()) {
- deferred = new DeferredStackCheck(this, instr);
- }
- EmitGoto(instr->block_id(), deferred);
-}
-
-
-Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
- Condition cond = no_condition;
- switch (op) {
- case Token::EQ:
- case Token::EQ_STRICT:
- cond = equal;
- break;
- case Token::LT:
- cond = is_unsigned ? below : less;
- break;
- case Token::GT:
- cond = is_unsigned ? above : greater;
- break;
- case Token::LTE:
- cond = is_unsigned ? below_equal : less_equal;
- break;
- case Token::GTE:
- cond = is_unsigned ? above_equal : greater_equal;
- break;
- case Token::IN:
- case Token::INSTANCEOF:
- default:
- UNREACHABLE();
- }
- return cond;
-}
-
-
-void LCodeGen::EmitCmpI(LOperand* left, LOperand* right) {
- if (right->IsConstantOperand()) {
- __ cmp(ToOperand(left), ToImmediate(right));
- } else {
- __ cmp(ToRegister(left), ToOperand(right));
- }
-}
-
-
-void LCodeGen::DoCmpID(LCmpID* instr) {
- LOperand* left = instr->InputAt(0);
- LOperand* right = instr->InputAt(1);
- LOperand* result = instr->result();
-
- NearLabel unordered;
- if (instr->is_double()) {
- // Don't base result on EFLAGS when a NaN is involved. Instead
- // jump to the unordered case, which produces a false value.
- __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
- __ j(parity_even, &unordered, not_taken);
- } else {
- EmitCmpI(left, right);
- }
-
- NearLabel done;
- Condition cc = TokenToCondition(instr->op(), instr->is_double());
- __ mov(ToRegister(result), factory()->true_value());
- __ j(cc, &done);
-
- __ bind(&unordered);
- __ mov(ToRegister(result), factory()->false_value());
- __ bind(&done);
-}
-
-
-void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
- LOperand* left = instr->InputAt(0);
- LOperand* right = instr->InputAt(1);
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
-
- if (instr->is_double()) {
- // Don't base result on EFLAGS when a NaN is involved. Instead
- // jump to the false block.
- __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
- __ j(parity_even, chunk_->GetAssemblyLabel(false_block));
- } else {
- EmitCmpI(left, right);
- }
-
- Condition cc = TokenToCondition(instr->op(), instr->is_double());
- EmitBranch(true_block, false_block, cc);
-}
-
-
-void LCodeGen::DoCmpJSObjectEq(LCmpJSObjectEq* instr) {
- Register left = ToRegister(instr->InputAt(0));
- Register right = ToRegister(instr->InputAt(1));
- Register result = ToRegister(instr->result());
-
- __ cmp(left, Operand(right));
- __ mov(result, factory()->true_value());
- NearLabel done;
- __ j(equal, &done);
- __ mov(result, factory()->false_value());
- __ bind(&done);
-}
-
-
-void LCodeGen::DoCmpJSObjectEqAndBranch(LCmpJSObjectEqAndBranch* instr) {
- Register left = ToRegister(instr->InputAt(0));
- Register right = ToRegister(instr->InputAt(1));
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
-
- __ cmp(left, Operand(right));
- EmitBranch(true_block, false_block, equal);
-}
-
-
-void LCodeGen::DoIsNull(LIsNull* instr) {
- Register reg = ToRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
-
- // TODO(fsc): If the expression is known to be a smi, then it's
- // definitely not null. Materialize false.
-
- __ cmp(reg, factory()->null_value());
- if (instr->is_strict()) {
- __ mov(result, factory()->true_value());
- NearLabel done;
- __ j(equal, &done);
- __ mov(result, factory()->false_value());
- __ bind(&done);
- } else {
- NearLabel true_value, false_value, done;
- __ j(equal, &true_value);
- __ cmp(reg, factory()->undefined_value());
- __ j(equal, &true_value);
- __ test(reg, Immediate(kSmiTagMask));
- __ j(zero, &false_value);
- // Check for undetectable objects by looking in the bit field in
- // the map. The object has already been smi checked.
- Register scratch = result;
- __ mov(scratch, FieldOperand(reg, HeapObject::kMapOffset));
- __ movzx_b(scratch, FieldOperand(scratch, Map::kBitFieldOffset));
- __ test(scratch, Immediate(1 << Map::kIsUndetectable));
- __ j(not_zero, &true_value);
- __ bind(&false_value);
- __ mov(result, factory()->false_value());
- __ jmp(&done);
- __ bind(&true_value);
- __ mov(result, factory()->true_value());
- __ bind(&done);
- }
-}
-
-
-void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) {
- Register reg = ToRegister(instr->InputAt(0));
-
- // TODO(fsc): If the expression is known to be a smi, then it's
- // definitely not null. Jump to the false block.
-
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- __ cmp(reg, factory()->null_value());
- if (instr->is_strict()) {
- EmitBranch(true_block, false_block, equal);
- } else {
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
- __ j(equal, true_label);
- __ cmp(reg, factory()->undefined_value());
- __ j(equal, true_label);
- __ test(reg, Immediate(kSmiTagMask));
- __ j(zero, false_label);
- // Check for undetectable objects by looking in the bit field in
- // the map. The object has already been smi checked.
- Register scratch = ToRegister(instr->TempAt(0));
- __ mov(scratch, FieldOperand(reg, HeapObject::kMapOffset));
- __ movzx_b(scratch, FieldOperand(scratch, Map::kBitFieldOffset));
- __ test(scratch, Immediate(1 << Map::kIsUndetectable));
- EmitBranch(true_block, false_block, not_zero);
- }
-}
-
-
-Condition LCodeGen::EmitIsObject(Register input,
- Register temp1,
- Register temp2,
- Label* is_not_object,
- Label* is_object) {
- ASSERT(!input.is(temp1));
- ASSERT(!input.is(temp2));
- ASSERT(!temp1.is(temp2));
-
- __ test(input, Immediate(kSmiTagMask));
- __ j(equal, is_not_object);
-
- __ cmp(input, isolate()->factory()->null_value());
- __ j(equal, is_object);
-
- __ mov(temp1, FieldOperand(input, HeapObject::kMapOffset));
- // Undetectable objects behave like undefined.
- __ movzx_b(temp2, FieldOperand(temp1, Map::kBitFieldOffset));
- __ test(temp2, Immediate(1 << Map::kIsUndetectable));
- __ j(not_zero, is_not_object);
-
- __ movzx_b(temp2, FieldOperand(temp1, Map::kInstanceTypeOffset));
- __ cmp(temp2, FIRST_JS_OBJECT_TYPE);
- __ j(below, is_not_object);
- __ cmp(temp2, LAST_JS_OBJECT_TYPE);
- return below_equal;
-}
-
-
-void LCodeGen::DoIsObject(LIsObject* instr) {
- Register reg = ToRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
- Register temp = ToRegister(instr->TempAt(0));
- Label is_false, is_true, done;
-
- Condition true_cond = EmitIsObject(reg, result, temp, &is_false, &is_true);
- __ j(true_cond, &is_true);
-
- __ bind(&is_false);
- __ mov(result, factory()->false_value());
- __ jmp(&done);
-
- __ bind(&is_true);
- __ mov(result, factory()->true_value());
-
- __ bind(&done);
-}
-
-
-void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
- Register reg = ToRegister(instr->InputAt(0));
- Register temp = ToRegister(instr->TempAt(0));
- Register temp2 = ToRegister(instr->TempAt(1));
-
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
- Condition true_cond = EmitIsObject(reg, temp, temp2, false_label, true_label);
-
- EmitBranch(true_block, false_block, true_cond);
-}
-
-
-void LCodeGen::DoIsSmi(LIsSmi* instr) {
- Operand input = ToOperand(instr->InputAt(0));
- Register result = ToRegister(instr->result());
-
- ASSERT(instr->hydrogen()->value()->representation().IsTagged());
- __ test(input, Immediate(kSmiTagMask));
- __ mov(result, factory()->true_value());
- NearLabel done;
- __ j(zero, &done);
- __ mov(result, factory()->false_value());
- __ bind(&done);
-}
-
-
-void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
- Operand input = ToOperand(instr->InputAt(0));
-
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- __ test(input, Immediate(kSmiTagMask));
- EmitBranch(true_block, false_block, zero);
-}
-
-
-static InstanceType TestType(HHasInstanceType* instr) {
- InstanceType from = instr->from();
- InstanceType to = instr->to();
- if (from == FIRST_TYPE) return to;
- ASSERT(from == to || to == LAST_TYPE);
- return from;
-}
-
-
-static Condition BranchCondition(HHasInstanceType* instr) {
- InstanceType from = instr->from();
- InstanceType to = instr->to();
- if (from == to) return equal;
- if (to == LAST_TYPE) return above_equal;
- if (from == FIRST_TYPE) return below_equal;
- UNREACHABLE();
- return equal;
-}
-
-
-void LCodeGen::DoHasInstanceType(LHasInstanceType* instr) {
- Register input = ToRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
-
- ASSERT(instr->hydrogen()->value()->representation().IsTagged());
- __ test(input, Immediate(kSmiTagMask));
- NearLabel done, is_false;
- __ j(zero, &is_false);
- __ CmpObjectType(input, TestType(instr->hydrogen()), result);
- __ j(NegateCondition(BranchCondition(instr->hydrogen())), &is_false);
- __ mov(result, factory()->true_value());
- __ jmp(&done);
- __ bind(&is_false);
- __ mov(result, factory()->false_value());
- __ bind(&done);
-}
-
-
-void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
- Register input = ToRegister(instr->InputAt(0));
- Register temp = ToRegister(instr->TempAt(0));
-
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
- __ test(input, Immediate(kSmiTagMask));
- __ j(zero, false_label);
-
- __ CmpObjectType(input, TestType(instr->hydrogen()), temp);
- EmitBranch(true_block, false_block, BranchCondition(instr->hydrogen()));
-}
-
-
-void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
- Register input = ToRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
-
- if (FLAG_debug_code) {
- __ AbortIfNotString(input);
- }
-
- __ mov(result, FieldOperand(input, String::kHashFieldOffset));
- __ IndexFromHash(result, result);
-}
-
-
-void LCodeGen::DoHasCachedArrayIndex(LHasCachedArrayIndex* instr) {
- Register input = ToRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
-
- ASSERT(instr->hydrogen()->value()->representation().IsTagged());
- __ mov(result, factory()->true_value());
- __ test(FieldOperand(input, String::kHashFieldOffset),
- Immediate(String::kContainsCachedArrayIndexMask));
- NearLabel done;
- __ j(zero, &done);
- __ mov(result, factory()->false_value());
- __ bind(&done);
-}
-
-
-void LCodeGen::DoHasCachedArrayIndexAndBranch(
- LHasCachedArrayIndexAndBranch* instr) {
- Register input = ToRegister(instr->InputAt(0));
-
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- __ test(FieldOperand(input, String::kHashFieldOffset),
- Immediate(String::kContainsCachedArrayIndexMask));
- EmitBranch(true_block, false_block, equal);
-}
-
-
-// Branches to a label or falls through with the answer in the z flag. Trashes
-// the temp registers, but not the input. Only input and temp2 may alias.
-void LCodeGen::EmitClassOfTest(Label* is_true,
- Label* is_false,
- Handle<String>class_name,
- Register input,
- Register temp,
- Register temp2) {
- ASSERT(!input.is(temp));
- ASSERT(!temp.is(temp2)); // But input and temp2 may be the same register.
- __ test(input, Immediate(kSmiTagMask));
- __ j(zero, is_false);
- __ CmpObjectType(input, FIRST_JS_OBJECT_TYPE, temp);
- __ j(below, is_false);
-
- // Map is now in temp.
- // Functions have class 'Function'.
- __ CmpInstanceType(temp, JS_FUNCTION_TYPE);
- if (class_name->IsEqualTo(CStrVector("Function"))) {
- __ j(equal, is_true);
- } else {
- __ j(equal, is_false);
- }
-
- // Check if the constructor in the map is a function.
- __ mov(temp, FieldOperand(temp, Map::kConstructorOffset));
-
- // As long as JS_FUNCTION_TYPE is the last instance type and it is
- // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
- // LAST_JS_OBJECT_TYPE.
- ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
- ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
-
- // Objects with a non-function constructor have class 'Object'.
- __ CmpObjectType(temp, JS_FUNCTION_TYPE, temp2);
- if (class_name->IsEqualTo(CStrVector("Object"))) {
- __ j(not_equal, is_true);
- } else {
- __ j(not_equal, is_false);
- }
-
- // temp now contains the constructor function. Grab the
- // instance class name from there.
- __ mov(temp, FieldOperand(temp, JSFunction::kSharedFunctionInfoOffset));
- __ mov(temp, FieldOperand(temp,
- SharedFunctionInfo::kInstanceClassNameOffset));
- // The class name we are testing against is a symbol because it's a literal.
- // The name in the constructor is a symbol because of the way the context is
- // booted. This routine isn't expected to work for random API-created
- // classes and it doesn't have to because you can't access it with natives
- // syntax. Since both sides are symbols it is sufficient to use an identity
- // comparison.
- __ cmp(temp, class_name);
- // End with the answer in the z flag.
-}
-
-
-void LCodeGen::DoClassOfTest(LClassOfTest* instr) {
- Register input = ToRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
- ASSERT(input.is(result));
- Register temp = ToRegister(instr->TempAt(0));
- Handle<String> class_name = instr->hydrogen()->class_name();
- NearLabel done;
- Label is_true, is_false;
-
- EmitClassOfTest(&is_true, &is_false, class_name, input, temp, input);
-
- __ j(not_equal, &is_false);
-
- __ bind(&is_true);
- __ mov(result, factory()->true_value());
- __ jmp(&done);
-
- __ bind(&is_false);
- __ mov(result, factory()->false_value());
- __ bind(&done);
-}
-
-
-void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
- Register input = ToRegister(instr->InputAt(0));
- Register temp = ToRegister(instr->TempAt(0));
- Register temp2 = ToRegister(instr->TempAt(1));
- if (input.is(temp)) {
- // Swap.
- Register swapper = temp;
- temp = temp2;
- temp2 = swapper;
- }
- Handle<String> class_name = instr->hydrogen()->class_name();
-
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
- EmitClassOfTest(true_label, false_label, class_name, input, temp, temp2);
-
- EmitBranch(true_block, false_block, equal);
-}
-
-
-void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
- Register reg = ToRegister(instr->InputAt(0));
- int true_block = instr->true_block_id();
- int false_block = instr->false_block_id();
-
- __ cmp(FieldOperand(reg, HeapObject::kMapOffset), instr->map());
- EmitBranch(true_block, false_block, equal);
-}
-
-
-void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
- // Object and function are in fixed registers defined by the stub.
- ASSERT(ToRegister(instr->context()).is(esi));
- InstanceofStub stub(InstanceofStub::kArgsInRegisters);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-
- NearLabel true_value, done;
- __ test(eax, Operand(eax));
- __ j(zero, &true_value);
- __ mov(ToRegister(instr->result()), factory()->false_value());
- __ jmp(&done);
- __ bind(&true_value);
- __ mov(ToRegister(instr->result()), factory()->true_value());
- __ bind(&done);
-}
-
-
-void LCodeGen::DoInstanceOfAndBranch(LInstanceOfAndBranch* instr) {
- ASSERT(ToRegister(instr->context()).is(esi));
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- InstanceofStub stub(InstanceofStub::kArgsInRegisters);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- __ test(eax, Operand(eax));
- EmitBranch(true_block, false_block, zero);
-}
-
-
-void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
- class DeferredInstanceOfKnownGlobal: public LDeferredCode {
- public:
- DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
- LInstanceOfKnownGlobal* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
- codegen()->DoDeferredLInstanceOfKnownGlobal(instr_, &map_check_);
- }
-
- Label* map_check() { return &map_check_; }
-
- private:
- LInstanceOfKnownGlobal* instr_;
- Label map_check_;
- };
-
- DeferredInstanceOfKnownGlobal* deferred;
- deferred = new DeferredInstanceOfKnownGlobal(this, instr);
-
- Label done, false_result;
- Register object = ToRegister(instr->InputAt(0));
- Register temp = ToRegister(instr->TempAt(0));
-
- // A Smi is not an instance of anything.
- __ test(object, Immediate(kSmiTagMask));
- __ j(zero, &false_result, not_taken);
-
- // This is the inlined call site instanceof cache. The two occurences of the
- // hole value will be patched to the last map/result pair generated by the
- // instanceof stub.
- NearLabel cache_miss;
- Register map = ToRegister(instr->TempAt(0));
- __ mov(map, FieldOperand(object, HeapObject::kMapOffset));
- __ bind(deferred->map_check()); // Label for calculating code patching.
- __ cmp(map, factory()->the_hole_value()); // Patched to cached map.
- __ j(not_equal, &cache_miss, not_taken);
- __ mov(eax, factory()->the_hole_value()); // Patched to either true or false.
- __ jmp(&done);
-
- // The inlined call site cache did not match. Check for null and string
- // before calling the deferred code.
- __ bind(&cache_miss);
- // Null is not an instance of anything.
- __ cmp(object, factory()->null_value());
- __ j(equal, &false_result);
-
- // String values are not instances of anything.
- Condition is_string = masm_->IsObjectStringType(object, temp, temp);
- __ j(is_string, &false_result);
-
- // Go to the deferred code.
- __ jmp(deferred->entry());
-
- __ bind(&false_result);
- __ mov(ToRegister(instr->result()), factory()->false_value());
-
- // Here result has either true or false. Deferred code also produces true or
- // false object.
- __ bind(deferred->exit());
- __ bind(&done);
-}
-
-
-void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
- Label* map_check) {
- __ PushSafepointRegisters();
-
- InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
- flags = static_cast<InstanceofStub::Flags>(
- flags | InstanceofStub::kArgsInRegisters);
- flags = static_cast<InstanceofStub::Flags>(
- flags | InstanceofStub::kCallSiteInlineCheck);
- flags = static_cast<InstanceofStub::Flags>(
- flags | InstanceofStub::kReturnTrueFalseObject);
- InstanceofStub stub(flags);
-
- // Get the temp register reserved by the instruction. This needs to be edi as
- // its slot of the pushing of safepoint registers is used to communicate the
- // offset to the location of the map check.
- Register temp = ToRegister(instr->TempAt(0));
- ASSERT(temp.is(edi));
- __ mov(InstanceofStub::right(), Immediate(instr->function()));
- static const int kAdditionalDelta = 16;
- int delta = masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta;
- __ mov(temp, Immediate(delta));
- __ StoreToSafepointRegisterSlot(temp, temp);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, false);
- // Put the result value into the eax slot and restore all registers.
- __ StoreToSafepointRegisterSlot(eax, eax);
- __ PopSafepointRegisters();
-}
-
-
-static Condition ComputeCompareCondition(Token::Value op) {
- switch (op) {
- case Token::EQ_STRICT:
- case Token::EQ:
- return equal;
- case Token::LT:
- return less;
- case Token::GT:
- return greater;
- case Token::LTE:
- return less_equal;
- case Token::GTE:
- return greater_equal;
- default:
- UNREACHABLE();
- return no_condition;
- }
-}
-
-
-void LCodeGen::DoCmpT(LCmpT* instr) {
- Token::Value op = instr->op();
-
- Handle<Code> ic = CompareIC::GetUninitialized(op);
- CallCode(ic, RelocInfo::CODE_TARGET, instr, false);
-
- Condition condition = ComputeCompareCondition(op);
- if (op == Token::GT || op == Token::LTE) {
- condition = ReverseCondition(condition);
- }
- NearLabel true_value, done;
- __ test(eax, Operand(eax));
- __ j(condition, &true_value);
- __ mov(ToRegister(instr->result()), factory()->false_value());
- __ jmp(&done);
- __ bind(&true_value);
- __ mov(ToRegister(instr->result()), factory()->true_value());
- __ bind(&done);
-}
-
-
-void LCodeGen::DoCmpTAndBranch(LCmpTAndBranch* instr) {
- Token::Value op = instr->op();
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- Handle<Code> ic = CompareIC::GetUninitialized(op);
- CallCode(ic, RelocInfo::CODE_TARGET, instr, false);
-
- // The compare stub expects compare condition and the input operands
- // reversed for GT and LTE.
- Condition condition = ComputeCompareCondition(op);
- if (op == Token::GT || op == Token::LTE) {
- condition = ReverseCondition(condition);
- }
- __ test(eax, Operand(eax));
- EmitBranch(true_block, false_block, condition);
-}
-
-
-void LCodeGen::DoReturn(LReturn* instr) {
- if (FLAG_trace) {
- // Preserve the return value on the stack and rely on the runtime call
- // to return the value in the same register. We're leaving the code
- // managed by the register allocator and tearing down the frame, it's
- // safe to write to the context register.
- __ push(eax);
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ CallRuntime(Runtime::kTraceExit, 1);
- }
- __ mov(esp, ebp);
- __ pop(ebp);
- __ Ret((ParameterCount() + 1) * kPointerSize, ecx);
-}
-
-
-void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
- Register result = ToRegister(instr->result());
- __ mov(result, Operand::Cell(instr->hydrogen()->cell()));
- if (instr->hydrogen()->check_hole_value()) {
- __ cmp(result, factory()->the_hole_value());
- DeoptimizeIf(equal, instr->environment());
- }
-}
-
-
-void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
- ASSERT(ToRegister(instr->context()).is(esi));
- ASSERT(ToRegister(instr->global_object()).is(eax));
- ASSERT(ToRegister(instr->result()).is(eax));
-
- __ mov(ecx, instr->name());
- RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET :
- RelocInfo::CODE_TARGET_CONTEXT;
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallCode(ic, mode, instr);
-}
-
-
-void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
- Register value = ToRegister(instr->InputAt(0));
- Operand cell_operand = Operand::Cell(instr->hydrogen()->cell());
-
- // If the cell we are storing to contains the hole it could have
- // been deleted from the property dictionary. In that case, we need
- // to update the property details in the property dictionary to mark
- // it as no longer deleted. We deoptimize in that case.
- if (instr->hydrogen()->check_hole_value()) {
- __ cmp(cell_operand, factory()->the_hole_value());
- DeoptimizeIf(equal, instr->environment());
- }
-
- // Store the value.
- __ mov(cell_operand, value);
-}
-
-
-void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
- ASSERT(ToRegister(instr->context()).is(esi));
- ASSERT(ToRegister(instr->global_object()).is(edx));
- ASSERT(ToRegister(instr->value()).is(eax));
-
- __ mov(ecx, instr->name());
- Handle<Code> ic = isolate()->builtins()->StoreIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
-}
-
-
-void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
- Register context = ToRegister(instr->context());
- Register result = ToRegister(instr->result());
- __ mov(result, ContextOperand(context, instr->slot_index()));
-}
-
-
-void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
- Register context = ToRegister(instr->context());
- Register value = ToRegister(instr->value());
- __ mov(ContextOperand(context, instr->slot_index()), value);
- if (instr->needs_write_barrier()) {
- Register temp = ToRegister(instr->TempAt(0));
- int offset = Context::SlotOffset(instr->slot_index());
- __ RecordWrite(context, offset, value, temp);
- }
-}
-
-
-void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
- Register object = ToRegister(instr->object());
- Register result = ToRegister(instr->result());
- if (instr->hydrogen()->is_in_object()) {
- __ mov(result, FieldOperand(object, instr->hydrogen()->offset()));
- } else {
- __ mov(result, FieldOperand(object, JSObject::kPropertiesOffset));
- __ mov(result, FieldOperand(result, instr->hydrogen()->offset()));
- }
-}
-
-
-void LCodeGen::EmitLoadField(Register result,
- Register object,
- Handle<Map> type,
- Handle<String> name) {
- LookupResult lookup;
- type->LookupInDescriptors(NULL, *name, &lookup);
- ASSERT(lookup.IsProperty() && lookup.type() == FIELD);
- int index = lookup.GetLocalFieldIndexFromMap(*type);
- int offset = index * kPointerSize;
- if (index < 0) {
- // Negative property indices are in-object properties, indexed
- // from the end of the fixed part of the object.
- __ mov(result, FieldOperand(object, offset + type->instance_size()));
- } else {
- // Non-negative property indices are in the properties array.
- __ mov(result, FieldOperand(object, JSObject::kPropertiesOffset));
- __ mov(result, FieldOperand(result, offset + FixedArray::kHeaderSize));
- }
-}
-
-
-void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
- Register object = ToRegister(instr->object());
- Register result = ToRegister(instr->result());
-
- int map_count = instr->hydrogen()->types()->length();
- Handle<String> name = instr->hydrogen()->name();
- if (map_count == 0) {
- ASSERT(instr->hydrogen()->need_generic());
- __ mov(ecx, name);
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr, false);
- } else {
- NearLabel done;
- for (int i = 0; i < map_count - 1; ++i) {
- Handle<Map> map = instr->hydrogen()->types()->at(i);
- NearLabel next;
- __ cmp(FieldOperand(object, HeapObject::kMapOffset), map);
- __ j(not_equal, &next);
- EmitLoadField(result, object, map, name);
- __ jmp(&done);
- __ bind(&next);
- }
- Handle<Map> map = instr->hydrogen()->types()->last();
- __ cmp(FieldOperand(object, HeapObject::kMapOffset), map);
- if (instr->hydrogen()->need_generic()) {
- NearLabel generic;
- __ j(not_equal, &generic);
- EmitLoadField(result, object, map, name);
- __ jmp(&done);
- __ bind(&generic);
- __ mov(ecx, name);
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr, false);
- } else {
- DeoptimizeIf(not_equal, instr->environment());
- EmitLoadField(result, object, map, name);
- }
- __ bind(&done);
- }
-}
-
-
-void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
- ASSERT(ToRegister(instr->context()).is(esi));
- ASSERT(ToRegister(instr->object()).is(eax));
- ASSERT(ToRegister(instr->result()).is(eax));
-
- __ mov(ecx, instr->name());
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
- Register function = ToRegister(instr->function());
- Register temp = ToRegister(instr->TempAt(0));
- Register result = ToRegister(instr->result());
-
- // Check that the function really is a function.
- __ CmpObjectType(function, JS_FUNCTION_TYPE, result);
- DeoptimizeIf(not_equal, instr->environment());
-
- // Check whether the function has an instance prototype.
- NearLabel non_instance;
- __ test_b(FieldOperand(result, Map::kBitFieldOffset),
- 1 << Map::kHasNonInstancePrototype);
- __ j(not_zero, &non_instance);
-
- // Get the prototype or initial map from the function.
- __ mov(result,
- FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
-
- // Check that the function has a prototype or an initial map.
- __ cmp(Operand(result), Immediate(factory()->the_hole_value()));
- DeoptimizeIf(equal, instr->environment());
-
- // If the function does not have an initial map, we're done.
- NearLabel done;
- __ CmpObjectType(result, MAP_TYPE, temp);
- __ j(not_equal, &done);
-
- // Get the prototype from the initial map.
- __ mov(result, FieldOperand(result, Map::kPrototypeOffset));
- __ jmp(&done);
-
- // Non-instance prototype: Fetch prototype from constructor field
- // in the function's map.
- __ bind(&non_instance);
- __ mov(result, FieldOperand(result, Map::kConstructorOffset));
-
- // All done.
- __ bind(&done);
-}
-
-
-void LCodeGen::DoLoadElements(LLoadElements* instr) {
- Register result = ToRegister(instr->result());
- Register input = ToRegister(instr->InputAt(0));
- __ mov(result, FieldOperand(input, JSObject::kElementsOffset));
- if (FLAG_debug_code) {
- NearLabel done;
- __ cmp(FieldOperand(result, HeapObject::kMapOffset),
- Immediate(factory()->fixed_array_map()));
- __ j(equal, &done);
- __ cmp(FieldOperand(result, HeapObject::kMapOffset),
- Immediate(factory()->fixed_cow_array_map()));
- __ j(equal, &done);
- Register temp((result.is(eax)) ? ebx : eax);
- __ push(temp);
- __ mov(temp, FieldOperand(result, HeapObject::kMapOffset));
- __ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
- __ sub(Operand(temp), Immediate(FIRST_EXTERNAL_ARRAY_TYPE));
- __ cmp(Operand(temp), Immediate(kExternalArrayTypeCount));
- __ pop(temp);
- __ Check(below, "Check for fast elements or pixel array failed.");
- __ bind(&done);
- }
-}
-
-
-void LCodeGen::DoLoadExternalArrayPointer(
- LLoadExternalArrayPointer* instr) {
- Register result = ToRegister(instr->result());
- Register input = ToRegister(instr->InputAt(0));
- __ mov(result, FieldOperand(input,
- ExternalArray::kExternalPointerOffset));
-}
-
-
-void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
- Register arguments = ToRegister(instr->arguments());
- Register length = ToRegister(instr->length());
- Operand index = ToOperand(instr->index());
- Register result = ToRegister(instr->result());
-
- __ sub(length, index);
- DeoptimizeIf(below_equal, instr->environment());
-
- // There are two words between the frame pointer and the last argument.
- // Subtracting from length accounts for one of them add one more.
- __ mov(result, Operand(arguments, length, times_4, kPointerSize));
-}
-
-
-void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
- Register elements = ToRegister(instr->elements());
- Register key = ToRegister(instr->key());
- Register result = ToRegister(instr->result());
- ASSERT(result.is(elements));
-
- // Load the result.
- __ mov(result, FieldOperand(elements,
- key,
- times_pointer_size,
- FixedArray::kHeaderSize));
-
- // Check for the hole value.
- __ cmp(result, factory()->the_hole_value());
- DeoptimizeIf(equal, instr->environment());
-}
-
-
-void LCodeGen::DoLoadKeyedSpecializedArrayElement(
- LLoadKeyedSpecializedArrayElement* instr) {
- Register external_pointer = ToRegister(instr->external_pointer());
- Register key = ToRegister(instr->key());
- ExternalArrayType array_type = instr->array_type();
- if (array_type == kExternalFloatArray) {
- XMMRegister result(ToDoubleRegister(instr->result()));
- __ movss(result, Operand(external_pointer, key, times_4, 0));
- __ cvtss2sd(result, result);
- } else {
- Register result(ToRegister(instr->result()));
- switch (array_type) {
- case kExternalByteArray:
- __ movsx_b(result, Operand(external_pointer, key, times_1, 0));
- break;
- case kExternalUnsignedByteArray:
- case kExternalPixelArray:
- __ movzx_b(result, Operand(external_pointer, key, times_1, 0));
- break;
- case kExternalShortArray:
- __ movsx_w(result, Operand(external_pointer, key, times_2, 0));
- break;
- case kExternalUnsignedShortArray:
- __ movzx_w(result, Operand(external_pointer, key, times_2, 0));
- break;
- case kExternalIntArray:
- __ mov(result, Operand(external_pointer, key, times_4, 0));
- break;
- case kExternalUnsignedIntArray:
- __ mov(result, Operand(external_pointer, key, times_4, 0));
- __ test(result, Operand(result));
- // TODO(danno): we could be more clever here, perhaps having a special
- // version of the stub that detects if the overflow case actually
- // happens, and generate code that returns a double rather than int.
- DeoptimizeIf(negative, instr->environment());
- break;
- case kExternalFloatArray:
- UNREACHABLE();
- break;
- }
- }
-}
-
-
-void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
- ASSERT(ToRegister(instr->context()).is(esi));
- ASSERT(ToRegister(instr->object()).is(edx));
- ASSERT(ToRegister(instr->key()).is(eax));
-
- Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
- Register result = ToRegister(instr->result());
-
- // Check for arguments adapter frame.
- NearLabel done, adapted;
- __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ mov(result, Operand(result, StandardFrameConstants::kContextOffset));
- __ cmp(Operand(result),
- Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(equal, &adapted);
-
- // No arguments adaptor frame.
- __ mov(result, Operand(ebp));
- __ jmp(&done);
-
- // Arguments adaptor frame present.
- __ bind(&adapted);
- __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
-
- // Result is the frame pointer for the frame if not adapted and for the real
- // frame below the adaptor frame if adapted.
- __ bind(&done);
-}
-
-
-void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
- Operand elem = ToOperand(instr->InputAt(0));
- Register result = ToRegister(instr->result());
-
- NearLabel done;
-
- // If no arguments adaptor frame the number of arguments is fixed.
- __ cmp(ebp, elem);
- __ mov(result, Immediate(scope()->num_parameters()));
- __ j(equal, &done);
-
- // Arguments adaptor frame present. Get argument length from there.
- __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ mov(result, Operand(result,
- ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(result);
-
- // Argument length is in result register.
- __ bind(&done);
-}
-
-
-void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
- Register receiver = ToRegister(instr->receiver());
- Register function = ToRegister(instr->function());
- Register length = ToRegister(instr->length());
- Register elements = ToRegister(instr->elements());
- Register scratch = ToRegister(instr->TempAt(0));
- ASSERT(receiver.is(eax)); // Used for parameter count.
- ASSERT(function.is(edi)); // Required by InvokeFunction.
- ASSERT(ToRegister(instr->result()).is(eax));
-
- // If the receiver is null or undefined, we have to pass the global object
- // as a receiver.
- NearLabel global_object, receiver_ok;
- __ cmp(receiver, factory()->null_value());
- __ j(equal, &global_object);
- __ cmp(receiver, factory()->undefined_value());
- __ j(equal, &global_object);
-
- // The receiver should be a JS object.
- __ test(receiver, Immediate(kSmiTagMask));
- DeoptimizeIf(equal, instr->environment());
- __ CmpObjectType(receiver, FIRST_JS_OBJECT_TYPE, scratch);
- DeoptimizeIf(below, instr->environment());
- __ jmp(&receiver_ok);
-
- __ bind(&global_object);
- // TODO(kmillikin): We have a hydrogen value for the global object. See
- // if it's better to use it than to explicitly fetch it from the context
- // here.
- __ mov(receiver, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ mov(receiver, ContextOperand(receiver, Context::GLOBAL_INDEX));
- __ bind(&receiver_ok);
-
- // Copy the arguments to this function possibly from the
- // adaptor frame below it.
- const uint32_t kArgumentsLimit = 1 * KB;
- __ cmp(length, kArgumentsLimit);
- DeoptimizeIf(above, instr->environment());
-
- __ push(receiver);
- __ mov(receiver, length);
-
- // Loop through the arguments pushing them onto the execution
- // stack.
- NearLabel invoke, loop;
- // length is a small non-negative integer, due to the test above.
- __ test(length, Operand(length));
- __ j(zero, &invoke);
- __ bind(&loop);
- __ push(Operand(elements, length, times_pointer_size, 1 * kPointerSize));
- __ dec(length);
- __ j(not_zero, &loop);
-
- // Invoke the function.
- __ bind(&invoke);
- ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
- LPointerMap* pointers = instr->pointer_map();
- LEnvironment* env = instr->deoptimization_environment();
- RecordPosition(pointers->position());
- RegisterEnvironmentForDeoptimization(env);
- SafepointGenerator safepoint_generator(this,
- pointers,
- env->deoptimization_index());
- v8::internal::ParameterCount actual(eax);
- __ InvokeFunction(function, actual, CALL_FUNCTION, &safepoint_generator);
-}
-
-
-void LCodeGen::DoPushArgument(LPushArgument* instr) {
- LOperand* argument = instr->InputAt(0);
- if (argument->IsConstantOperand()) {
- __ push(ToImmediate(argument));
- } else {
- __ push(ToOperand(argument));
- }
-}
-
-
-void LCodeGen::DoContext(LContext* instr) {
- Register result = ToRegister(instr->result());
- __ mov(result, Operand(ebp, StandardFrameConstants::kContextOffset));
-}
-
-
-void LCodeGen::DoOuterContext(LOuterContext* instr) {
- Register context = ToRegister(instr->context());
- Register result = ToRegister(instr->result());
- __ mov(result, Operand(context, Context::SlotOffset(Context::CLOSURE_INDEX)));
- __ mov(result, FieldOperand(result, JSFunction::kContextOffset));
-}
-
-
-void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
- Register context = ToRegister(instr->context());
- Register result = ToRegister(instr->result());
- __ mov(result, Operand(context, Context::SlotOffset(Context::GLOBAL_INDEX)));
-}
-
-
-void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
- Register global = ToRegister(instr->global());
- Register result = ToRegister(instr->result());
- __ mov(result, FieldOperand(global, GlobalObject::kGlobalReceiverOffset));
-}
-
-
-void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
- int arity,
- LInstruction* instr) {
- // Change context if needed.
- bool change_context =
- (info()->closure()->context() != function->context()) ||
- scope()->contains_with() ||
- (scope()->num_heap_slots() > 0);
- if (change_context) {
- __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
- } else {
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- }
-
- // Set eax to arguments count if adaption is not needed. Assumes that eax
- // is available to write to at this point.
- if (!function->NeedsArgumentsAdaption()) {
- __ mov(eax, arity);
- }
-
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
-
- // Invoke function.
- if (*function == *info()->closure()) {
- __ CallSelf();
- } else {
- __ call(FieldOperand(edi, JSFunction::kCodeEntryOffset));
- }
-
- // Setup deoptimization.
- RegisterLazyDeoptimization(instr);
-}
-
-
-void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
- ASSERT(ToRegister(instr->result()).is(eax));
- __ mov(edi, instr->function());
- CallKnownFunction(instr->function(), instr->arity(), instr);
-}
-
-
-void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
- Register input_reg = ToRegister(instr->InputAt(0));
- __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
- factory()->heap_number_map());
- DeoptimizeIf(not_equal, instr->environment());
-
- Label done;
- Register tmp = input_reg.is(eax) ? ecx : eax;
- Register tmp2 = tmp.is(ecx) ? edx : input_reg.is(ecx) ? edx : ecx;
-
- // Preserve the value of all registers.
- __ PushSafepointRegisters();
-
- Label negative;
- __ mov(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset));
- // Check the sign of the argument. If the argument is positive, just
- // return it. We do not need to patch the stack since |input| and
- // |result| are the same register and |input| will be restored
- // unchanged by popping safepoint registers.
- __ test(tmp, Immediate(HeapNumber::kSignMask));
- __ j(not_zero, &negative);
- __ jmp(&done);
-
- __ bind(&negative);
-
- Label allocated, slow;
- __ AllocateHeapNumber(tmp, tmp2, no_reg, &slow);
- __ jmp(&allocated);
-
- // Slow case: Call the runtime system to do the number allocation.
- __ bind(&slow);
-
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
- // Set the pointer to the new heap number in tmp.
- if (!tmp.is(eax)) __ mov(tmp, eax);
-
- // Restore input_reg after call to runtime.
- __ LoadFromSafepointRegisterSlot(input_reg, input_reg);
-
- __ bind(&allocated);
- __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kExponentOffset));
- __ and_(tmp2, ~HeapNumber::kSignMask);
- __ mov(FieldOperand(tmp, HeapNumber::kExponentOffset), tmp2);
- __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
- __ mov(FieldOperand(tmp, HeapNumber::kMantissaOffset), tmp2);
- __ StoreToSafepointRegisterSlot(input_reg, tmp);
-
- __ bind(&done);
- __ PopSafepointRegisters();
-}
-
-
-void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
- Register input_reg = ToRegister(instr->InputAt(0));
- __ test(input_reg, Operand(input_reg));
- Label is_positive;
- __ j(not_sign, &is_positive);
- __ neg(input_reg);
- __ test(input_reg, Operand(input_reg));
- DeoptimizeIf(negative, instr->environment());
- __ bind(&is_positive);
-}
-
-
-void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
- // Class for deferred case.
- class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
- public:
- DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
- LUnaryMathOperation* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
- codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
- }
- private:
- LUnaryMathOperation* instr_;
- };
-
- ASSERT(instr->InputAt(0)->Equals(instr->result()));
- Representation r = instr->hydrogen()->value()->representation();
-
- if (r.IsDouble()) {
- XMMRegister scratch = xmm0;
- XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
- __ pxor(scratch, scratch);
- __ subsd(scratch, input_reg);
- __ pand(input_reg, scratch);
- } else if (r.IsInteger32()) {
- EmitIntegerMathAbs(instr);
- } else { // Tagged case.
- DeferredMathAbsTaggedHeapNumber* deferred =
- new DeferredMathAbsTaggedHeapNumber(this, instr);
- Register input_reg = ToRegister(instr->InputAt(0));
- // Smi check.
- __ test(input_reg, Immediate(kSmiTagMask));
- __ j(not_zero, deferred->entry());
- EmitIntegerMathAbs(instr);
- __ bind(deferred->exit());
- }
-}
-
-
-void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
- XMMRegister xmm_scratch = xmm0;
- Register output_reg = ToRegister(instr->result());
- XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
- __ xorpd(xmm_scratch, xmm_scratch); // Zero the register.
- __ ucomisd(input_reg, xmm_scratch);
-
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(below_equal, instr->environment());
- } else {
- DeoptimizeIf(below, instr->environment());
- }
-
- // Use truncating instruction (OK because input is positive).
- __ cvttsd2si(output_reg, Operand(input_reg));
-
- // Overflow is signalled with minint.
- __ cmp(output_reg, 0x80000000u);
- DeoptimizeIf(equal, instr->environment());
-}
-
-
-void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
- XMMRegister xmm_scratch = xmm0;
- Register output_reg = ToRegister(instr->result());
- XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
-
- // xmm_scratch = 0.5
- ExternalReference one_half = ExternalReference::address_of_one_half();
- __ movdbl(xmm_scratch, Operand::StaticVariable(one_half));
-
- // input = input + 0.5
- __ addsd(input_reg, xmm_scratch);
-
- // We need to return -0 for the input range [-0.5, 0[, otherwise
- // compute Math.floor(value + 0.5).
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ ucomisd(input_reg, xmm_scratch);
- DeoptimizeIf(below_equal, instr->environment());
- } else {
- // If we don't need to bailout on -0, we check only bailout
- // on negative inputs.
- __ xorpd(xmm_scratch, xmm_scratch); // Zero the register.
- __ ucomisd(input_reg, xmm_scratch);
- DeoptimizeIf(below, instr->environment());
- }
-
- // Compute Math.floor(value + 0.5).
- // Use truncating instruction (OK because input is positive).
- __ cvttsd2si(output_reg, Operand(input_reg));
-
- // Overflow is signalled with minint.
- __ cmp(output_reg, 0x80000000u);
- DeoptimizeIf(equal, instr->environment());
-}
-
-
-void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
- XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
- ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
- __ sqrtsd(input_reg, input_reg);
-}
-
-
-void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
- XMMRegister xmm_scratch = xmm0;
- XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
- ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
- __ xorpd(xmm_scratch, xmm_scratch);
- __ addsd(input_reg, xmm_scratch); // Convert -0 to +0.
- __ sqrtsd(input_reg, input_reg);
-}
-
-
-void LCodeGen::DoPower(LPower* instr) {
- LOperand* left = instr->InputAt(0);
- LOperand* right = instr->InputAt(1);
- DoubleRegister result_reg = ToDoubleRegister(instr->result());
- Representation exponent_type = instr->hydrogen()->right()->representation();
-
- if (exponent_type.IsDouble()) {
- // It is safe to use ebx directly since the instruction is marked
- // as a call.
- __ PrepareCallCFunction(4, ebx);
- __ movdbl(Operand(esp, 0 * kDoubleSize), ToDoubleRegister(left));
- __ movdbl(Operand(esp, 1 * kDoubleSize), ToDoubleRegister(right));
- __ CallCFunction(ExternalReference::power_double_double_function(isolate()),
- 4);
- } else if (exponent_type.IsInteger32()) {
- // It is safe to use ebx directly since the instruction is marked
- // as a call.
- ASSERT(!ToRegister(right).is(ebx));
- __ PrepareCallCFunction(4, ebx);
- __ movdbl(Operand(esp, 0 * kDoubleSize), ToDoubleRegister(left));
- __ mov(Operand(esp, 1 * kDoubleSize), ToRegister(right));
- __ CallCFunction(ExternalReference::power_double_int_function(isolate()),
- 4);
- } else {
- ASSERT(exponent_type.IsTagged());
- CpuFeatures::Scope scope(SSE2);
- Register right_reg = ToRegister(right);
-
- Label non_smi, call;
- __ test(right_reg, Immediate(kSmiTagMask));
- __ j(not_zero, &non_smi);
- __ SmiUntag(right_reg);
- __ cvtsi2sd(result_reg, Operand(right_reg));
- __ jmp(&call);
-
- __ bind(&non_smi);
- // It is safe to use ebx directly since the instruction is marked
- // as a call.
- ASSERT(!right_reg.is(ebx));
- __ CmpObjectType(right_reg, HEAP_NUMBER_TYPE , ebx);
- DeoptimizeIf(not_equal, instr->environment());
- __ movdbl(result_reg, FieldOperand(right_reg, HeapNumber::kValueOffset));
-
- __ bind(&call);
- __ PrepareCallCFunction(4, ebx);
- __ movdbl(Operand(esp, 0 * kDoubleSize), ToDoubleRegister(left));
- __ movdbl(Operand(esp, 1 * kDoubleSize), result_reg);
- __ CallCFunction(ExternalReference::power_double_double_function(isolate()),
- 4);
- }
-
- // Return value is in st(0) on ia32.
- // Store it into the (fixed) result register.
- __ sub(Operand(esp), Immediate(kDoubleSize));
- __ fstp_d(Operand(esp, 0));
- __ movdbl(result_reg, Operand(esp, 0));
- __ add(Operand(esp), Immediate(kDoubleSize));
-}
-
-
-void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
- ASSERT(instr->InputAt(0)->Equals(instr->result()));
- XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
- NearLabel positive, done, zero, negative;
- __ xorpd(xmm0, xmm0);
- __ ucomisd(input_reg, xmm0);
- __ j(above, &positive);
- __ j(equal, &zero);
- ExternalReference nan = ExternalReference::address_of_nan();
- __ movdbl(input_reg, Operand::StaticVariable(nan));
- __ jmp(&done);
- __ bind(&zero);
- __ push(Immediate(0xFFF00000));
- __ push(Immediate(0));
- __ movdbl(input_reg, Operand(esp, 0));
- __ add(Operand(esp), Immediate(kDoubleSize));
- __ jmp(&done);
- __ bind(&positive);
- __ fldln2();
- __ sub(Operand(esp), Immediate(kDoubleSize));
- __ movdbl(Operand(esp, 0), input_reg);
- __ fld_d(Operand(esp, 0));
- __ fyl2x();
- __ fstp_d(Operand(esp, 0));
- __ movdbl(input_reg, Operand(esp, 0));
- __ add(Operand(esp), Immediate(kDoubleSize));
- __ bind(&done);
-}
-
-
-void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
- ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
- TranscendentalCacheStub stub(TranscendentalCache::COS,
- TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, false);
-}
-
-
-void LCodeGen::DoMathSin(LUnaryMathOperation* instr) {
- ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
- TranscendentalCacheStub stub(TranscendentalCache::SIN,
- TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, false);
-}
-
-
-void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
- switch (instr->op()) {
- case kMathAbs:
- DoMathAbs(instr);
- break;
- case kMathFloor:
- DoMathFloor(instr);
- break;
- case kMathRound:
- DoMathRound(instr);
- break;
- case kMathSqrt:
- DoMathSqrt(instr);
- break;
- case kMathPowHalf:
- DoMathPowHalf(instr);
- break;
- case kMathCos:
- DoMathCos(instr);
- break;
- case kMathSin:
- DoMathSin(instr);
- break;
- case kMathLog:
- DoMathLog(instr);
- break;
-
- default:
- UNREACHABLE();
- }
-}
-
-
-void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
- ASSERT(ToRegister(instr->context()).is(esi));
- ASSERT(ToRegister(instr->key()).is(ecx));
- ASSERT(ToRegister(instr->result()).is(eax));
-
- int arity = instr->arity();
- Handle<Code> ic = isolate()->stub_cache()->
- ComputeKeyedCallInitialize(arity, NOT_IN_LOOP);
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoCallNamed(LCallNamed* instr) {
- ASSERT(ToRegister(instr->context()).is(esi));
- ASSERT(ToRegister(instr->result()).is(eax));
-
- int arity = instr->arity();
- Handle<Code> ic = isolate()->stub_cache()->
- ComputeCallInitialize(arity, NOT_IN_LOOP);
- __ mov(ecx, instr->name());
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoCallFunction(LCallFunction* instr) {
- ASSERT(ToRegister(instr->context()).is(esi));
- ASSERT(ToRegister(instr->result()).is(eax));
-
- int arity = instr->arity();
- CallFunctionStub stub(arity, NOT_IN_LOOP, RECEIVER_MIGHT_BE_VALUE);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- __ Drop(1);
-}
-
-
-void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
- ASSERT(ToRegister(instr->context()).is(esi));
- ASSERT(ToRegister(instr->result()).is(eax));
-
- int arity = instr->arity();
- Handle<Code> ic = isolate()->stub_cache()->
- ComputeCallInitialize(arity, NOT_IN_LOOP);
- __ mov(ecx, instr->name());
- CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
-}
-
-
-void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
- ASSERT(ToRegister(instr->result()).is(eax));
- __ mov(edi, instr->target());
- CallKnownFunction(instr->target(), instr->arity(), instr);
-}
-
-
-void LCodeGen::DoCallNew(LCallNew* instr) {
- ASSERT(ToRegister(instr->context()).is(esi));
- ASSERT(ToRegister(instr->constructor()).is(edi));
- ASSERT(ToRegister(instr->result()).is(eax));
-
- Handle<Code> builtin = isolate()->builtins()->JSConstructCall();
- __ Set(eax, Immediate(instr->arity()));
- CallCode(builtin, RelocInfo::CONSTRUCT_CALL, instr);
-}
-
-
-void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
- CallRuntime(instr->function(), instr->arity(), instr, false);
-}
-
-
-void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
- Register object = ToRegister(instr->object());
- Register value = ToRegister(instr->value());
- int offset = instr->offset();
-
- if (!instr->transition().is_null()) {
- __ mov(FieldOperand(object, HeapObject::kMapOffset), instr->transition());
- }
-
- // Do the store.
- if (instr->is_in_object()) {
- __ mov(FieldOperand(object, offset), value);
- if (instr->needs_write_barrier()) {
- Register temp = ToRegister(instr->TempAt(0));
- // Update the write barrier for the object for in-object properties.
- __ RecordWrite(object, offset, value, temp);
- }
- } else {
- Register temp = ToRegister(instr->TempAt(0));
- __ mov(temp, FieldOperand(object, JSObject::kPropertiesOffset));
- __ mov(FieldOperand(temp, offset), value);
- if (instr->needs_write_barrier()) {
- // Update the write barrier for the properties array.
- // object is used as a scratch register.
- __ RecordWrite(temp, offset, value, object);
- }
- }
-}
-
-
-void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
- ASSERT(ToRegister(instr->context()).is(esi));
- ASSERT(ToRegister(instr->object()).is(edx));
- ASSERT(ToRegister(instr->value()).is(eax));
-
- __ mov(ecx, instr->name());
- Handle<Code> ic = info_->is_strict()
- ? isolate()->builtins()->StoreIC_Initialize_Strict()
- : isolate()->builtins()->StoreIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
- __ cmp(ToRegister(instr->index()), ToOperand(instr->length()));
- DeoptimizeIf(above_equal, instr->environment());
-}
-
-
-void LCodeGen::DoStoreKeyedSpecializedArrayElement(
- LStoreKeyedSpecializedArrayElement* instr) {
- Register external_pointer = ToRegister(instr->external_pointer());
- Register key = ToRegister(instr->key());
- ExternalArrayType array_type = instr->array_type();
- if (array_type == kExternalFloatArray) {
- __ cvtsd2ss(xmm0, ToDoubleRegister(instr->value()));
- __ movss(Operand(external_pointer, key, times_4, 0), xmm0);
- } else {
- Register value = ToRegister(instr->value());
- switch (array_type) {
- case kExternalPixelArray: {
- // Clamp the value to [0..255].
- Register temp = ToRegister(instr->TempAt(0));
- // The dec_b below requires that the clamped value is in a byte
- // register. eax is an arbitrary choice to satisfy this requirement, we
- // hinted the register allocator to give us eax when building the
- // instruction.
- ASSERT(temp.is(eax));
- __ mov(temp, ToRegister(instr->value()));
- NearLabel done;
- __ test(temp, Immediate(0xFFFFFF00));
- __ j(zero, &done);
- __ setcc(negative, temp); // 1 if negative, 0 if positive.
- __ dec_b(temp); // 0 if negative, 255 if positive.
- __ bind(&done);
- __ mov_b(Operand(external_pointer, key, times_1, 0), temp);
- break;
- }
- case kExternalByteArray:
- case kExternalUnsignedByteArray:
- __ mov_b(Operand(external_pointer, key, times_1, 0), value);
- break;
- case kExternalShortArray:
- case kExternalUnsignedShortArray:
- __ mov_w(Operand(external_pointer, key, times_2, 0), value);
- break;
- case kExternalIntArray:
- case kExternalUnsignedIntArray:
- __ mov(Operand(external_pointer, key, times_4, 0), value);
- break;
- case kExternalFloatArray:
- UNREACHABLE();
- break;
- }
- }
-}
-
-
-void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
- Register value = ToRegister(instr->value());
- Register elements = ToRegister(instr->object());
- Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
-
- // Do the store.
- if (instr->key()->IsConstantOperand()) {
- ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
- LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
- int offset =
- ToInteger32(const_operand) * kPointerSize + FixedArray::kHeaderSize;
- __ mov(FieldOperand(elements, offset), value);
- } else {
- __ mov(FieldOperand(elements,
- key,
- times_pointer_size,
- FixedArray::kHeaderSize),
- value);
- }
-
- if (instr->hydrogen()->NeedsWriteBarrier()) {
- // Compute address of modified element and store it into key register.
- __ lea(key,
- FieldOperand(elements,
- key,
- times_pointer_size,
- FixedArray::kHeaderSize));
- __ RecordWrite(elements, key, value);
- }
-}
-
-
-void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
- ASSERT(ToRegister(instr->context()).is(esi));
- ASSERT(ToRegister(instr->object()).is(edx));
- ASSERT(ToRegister(instr->key()).is(ecx));
- ASSERT(ToRegister(instr->value()).is(eax));
-
- Handle<Code> ic = info_->is_strict()
- ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
- : isolate()->builtins()->KeyedStoreIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
- class DeferredStringCharCodeAt: public LDeferredCode {
- public:
- DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
- private:
- LStringCharCodeAt* instr_;
- };
-
- Register string = ToRegister(instr->string());
- Register index = no_reg;
- int const_index = -1;
- if (instr->index()->IsConstantOperand()) {
- const_index = ToInteger32(LConstantOperand::cast(instr->index()));
- STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
- if (!Smi::IsValid(const_index)) {
- // Guaranteed to be out of bounds because of the assert above.
- // So the bounds check that must dominate this instruction must
- // have deoptimized already.
- if (FLAG_debug_code) {
- __ Abort("StringCharCodeAt: out of bounds index.");
- }
- // No code needs to be generated.
- return;
- }
- } else {
- index = ToRegister(instr->index());
- }
- Register result = ToRegister(instr->result());
-
- DeferredStringCharCodeAt* deferred =
- new DeferredStringCharCodeAt(this, instr);
-
- NearLabel flat_string, ascii_string, done;
-
- // Fetch the instance type of the receiver into result register.
- __ mov(result, FieldOperand(string, HeapObject::kMapOffset));
- __ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset));
-
- // We need special handling for non-flat strings.
- STATIC_ASSERT(kSeqStringTag == 0);
- __ test(result, Immediate(kStringRepresentationMask));
- __ j(zero, &flat_string);
-
- // Handle non-flat strings.
- __ test(result, Immediate(kIsConsStringMask));
- __ j(zero, deferred->entry());
-
- // ConsString.
- // Check whether the right hand side is the empty string (i.e. if
- // this is really a flat string in a cons string). If that is not
- // the case we would rather go to the runtime system now to flatten
- // the string.
- __ cmp(FieldOperand(string, ConsString::kSecondOffset),
- Immediate(factory()->empty_string()));
- __ j(not_equal, deferred->entry());
- // Get the first of the two strings and load its instance type.
- __ mov(string, FieldOperand(string, ConsString::kFirstOffset));
- __ mov(result, FieldOperand(string, HeapObject::kMapOffset));
- __ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset));
- // If the first cons component is also non-flat, then go to runtime.
- STATIC_ASSERT(kSeqStringTag == 0);
- __ test(result, Immediate(kStringRepresentationMask));
- __ j(not_zero, deferred->entry());
-
- // Check for ASCII or two-byte string.
- __ bind(&flat_string);
- STATIC_ASSERT(kAsciiStringTag != 0);
- __ test(result, Immediate(kStringEncodingMask));
- __ j(not_zero, &ascii_string);
-
- // Two-byte string.
- // Load the two-byte character code into the result register.
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
- if (instr->index()->IsConstantOperand()) {
- __ movzx_w(result,
- FieldOperand(string,
- SeqTwoByteString::kHeaderSize +
- (kUC16Size * const_index)));
- } else {
- __ movzx_w(result, FieldOperand(string,
- index,
- times_2,
- SeqTwoByteString::kHeaderSize));
- }
- __ jmp(&done);
-
- // ASCII string.
- // Load the byte into the result register.
- __ bind(&ascii_string);
- if (instr->index()->IsConstantOperand()) {
- __ movzx_b(result, FieldOperand(string,
- SeqAsciiString::kHeaderSize + const_index));
- } else {
- __ movzx_b(result, FieldOperand(string,
- index,
- times_1,
- SeqAsciiString::kHeaderSize));
- }
- __ bind(&done);
- __ bind(deferred->exit());
-}
-
-
-void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
- Register string = ToRegister(instr->string());
- Register result = ToRegister(instr->result());
-
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- __ Set(result, Immediate(0));
-
- __ PushSafepointRegisters();
- __ push(string);
- // Push the index as a smi. This is safe because of the checks in
- // DoStringCharCodeAt above.
- STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
- if (instr->index()->IsConstantOperand()) {
- int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
- __ push(Immediate(Smi::FromInt(const_index)));
- } else {
- Register index = ToRegister(instr->index());
- __ SmiTag(index);
- __ push(index);
- }
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ CallRuntimeSaveDoubles(Runtime::kStringCharCodeAt);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 2, Safepoint::kNoDeoptimizationIndex);
- if (FLAG_debug_code) {
- __ AbortIfNotSmi(eax);
- }
- __ SmiUntag(eax);
- __ StoreToSafepointRegisterSlot(result, eax);
- __ PopSafepointRegisters();
-}
-
-
-void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
- class DeferredStringCharFromCode: public LDeferredCode {
- public:
- DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
- private:
- LStringCharFromCode* instr_;
- };
-
- DeferredStringCharFromCode* deferred =
- new DeferredStringCharFromCode(this, instr);
-
- ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
- Register char_code = ToRegister(instr->char_code());
- Register result = ToRegister(instr->result());
- ASSERT(!char_code.is(result));
-
- __ cmp(char_code, String::kMaxAsciiCharCode);
- __ j(above, deferred->entry());
- __ Set(result, Immediate(factory()->single_character_string_cache()));
- __ mov(result, FieldOperand(result,
- char_code, times_pointer_size,
- FixedArray::kHeaderSize));
- __ cmp(result, factory()->undefined_value());
- __ j(equal, deferred->entry());
- __ bind(deferred->exit());
-}
-
-
-void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
- Register char_code = ToRegister(instr->char_code());
- Register result = ToRegister(instr->result());
-
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- __ Set(result, Immediate(0));
-
- __ PushSafepointRegisters();
- __ SmiTag(char_code);
- __ push(char_code);
- __ CallRuntimeSaveDoubles(Runtime::kCharFromCode);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 1, Safepoint::kNoDeoptimizationIndex);
- __ StoreToSafepointRegisterSlot(result, eax);
- __ PopSafepointRegisters();
-}
-
-
-void LCodeGen::DoStringLength(LStringLength* instr) {
- Register string = ToRegister(instr->string());
- Register result = ToRegister(instr->result());
- __ mov(result, FieldOperand(string, String::kLengthOffset));
-}
-
-
-void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
- LOperand* input = instr->InputAt(0);
- ASSERT(input->IsRegister() || input->IsStackSlot());
- LOperand* output = instr->result();
- ASSERT(output->IsDoubleRegister());
- __ cvtsi2sd(ToDoubleRegister(output), ToOperand(input));
-}
-
-
-void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
- class DeferredNumberTagI: public LDeferredCode {
- public:
- DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredNumberTagI(instr_); }
- private:
- LNumberTagI* instr_;
- };
-
- LOperand* input = instr->InputAt(0);
- ASSERT(input->IsRegister() && input->Equals(instr->result()));
- Register reg = ToRegister(input);
-
- DeferredNumberTagI* deferred = new DeferredNumberTagI(this, instr);
- __ SmiTag(reg);
- __ j(overflow, deferred->entry());
- __ bind(deferred->exit());
-}
-
-
-void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
- Label slow;
- Register reg = ToRegister(instr->InputAt(0));
- Register tmp = reg.is(eax) ? ecx : eax;
-
- // Preserve the value of all registers.
- __ PushSafepointRegisters();
-
- // There was overflow, so bits 30 and 31 of the original integer
- // disagree. Try to allocate a heap number in new space and store
- // the value in there. If that fails, call the runtime system.
- NearLabel done;
- __ SmiUntag(reg);
- __ xor_(reg, 0x80000000);
- __ cvtsi2sd(xmm0, Operand(reg));
- if (FLAG_inline_new) {
- __ AllocateHeapNumber(reg, tmp, no_reg, &slow);
- __ jmp(&done);
- }
-
- // Slow case: Call the runtime system to do the number allocation.
- __ bind(&slow);
-
- // TODO(3095996): Put a valid pointer value in the stack slot where the result
- // register is stored, as this register is in the pointer map, but contains an
- // integer value.
- __ StoreToSafepointRegisterSlot(reg, Immediate(0));
-
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
- if (!reg.is(eax)) __ mov(reg, eax);
-
- // Done. Put the value in xmm0 into the value of the allocated heap
- // number.
- __ bind(&done);
- __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), xmm0);
- __ StoreToSafepointRegisterSlot(reg, reg);
- __ PopSafepointRegisters();
-}
-
-
-void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
- class DeferredNumberTagD: public LDeferredCode {
- public:
- DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
- private:
- LNumberTagD* instr_;
- };
-
- XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
- Register reg = ToRegister(instr->result());
- Register tmp = ToRegister(instr->TempAt(0));
-
- DeferredNumberTagD* deferred = new DeferredNumberTagD(this, instr);
- if (FLAG_inline_new) {
- __ AllocateHeapNumber(reg, tmp, no_reg, deferred->entry());
- } else {
- __ jmp(deferred->entry());
- }
- __ bind(deferred->exit());
- __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
-}
-
-
-void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- Register reg = ToRegister(instr->result());
- __ Set(reg, Immediate(0));
-
- __ PushSafepointRegisters();
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
- __ StoreToSafepointRegisterSlot(reg, eax);
- __ PopSafepointRegisters();
-}
-
-
-void LCodeGen::DoSmiTag(LSmiTag* instr) {
- LOperand* input = instr->InputAt(0);
- ASSERT(input->IsRegister() && input->Equals(instr->result()));
- ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
- __ SmiTag(ToRegister(input));
-}
-
-
-void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
- LOperand* input = instr->InputAt(0);
- ASSERT(input->IsRegister() && input->Equals(instr->result()));
- if (instr->needs_check()) {
- __ test(ToRegister(input), Immediate(kSmiTagMask));
- DeoptimizeIf(not_zero, instr->environment());
- }
- __ SmiUntag(ToRegister(input));
-}
-
-
-void LCodeGen::EmitNumberUntagD(Register input_reg,
- XMMRegister result_reg,
- LEnvironment* env) {
- NearLabel load_smi, heap_number, done;
-
- // Smi check.
- __ test(input_reg, Immediate(kSmiTagMask));
- __ j(zero, &load_smi, not_taken);
-
- // Heap number map check.
- __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
- factory()->heap_number_map());
- __ j(equal, &heap_number);
-
- __ cmp(input_reg, factory()->undefined_value());
- DeoptimizeIf(not_equal, env);
-
- // Convert undefined to NaN.
- ExternalReference nan = ExternalReference::address_of_nan();
- __ movdbl(result_reg, Operand::StaticVariable(nan));
- __ jmp(&done);
-
- // Heap number to XMM conversion.
- __ bind(&heap_number);
- __ movdbl(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
- __ jmp(&done);
-
- // Smi to XMM conversion
- __ bind(&load_smi);
- __ SmiUntag(input_reg); // Untag smi before converting to float.
- __ cvtsi2sd(result_reg, Operand(input_reg));
- __ SmiTag(input_reg); // Retag smi.
- __ bind(&done);
-}
-
-
-class DeferredTaggedToI: public LDeferredCode {
- public:
- DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
- private:
- LTaggedToI* instr_;
-};
-
-
-void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
- NearLabel done, heap_number;
- Register input_reg = ToRegister(instr->InputAt(0));
-
- // Heap number map check.
- __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
- factory()->heap_number_map());
-
- if (instr->truncating()) {
- __ j(equal, &heap_number);
- // Check for undefined. Undefined is converted to zero for truncating
- // conversions.
- __ cmp(input_reg, factory()->undefined_value());
- DeoptimizeIf(not_equal, instr->environment());
- __ mov(input_reg, 0);
- __ jmp(&done);
-
- __ bind(&heap_number);
- if (CpuFeatures::IsSupported(SSE3)) {
- CpuFeatures::Scope scope(SSE3);
- NearLabel convert;
- // Use more powerful conversion when sse3 is available.
- // Load x87 register with heap number.
- __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
- // Get exponent alone and check for too-big exponent.
- __ mov(input_reg, FieldOperand(input_reg, HeapNumber::kExponentOffset));
- __ and_(input_reg, HeapNumber::kExponentMask);
- const uint32_t kTooBigExponent =
- (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
- __ cmp(Operand(input_reg), Immediate(kTooBigExponent));
- __ j(less, &convert);
- // Pop FPU stack before deoptimizing.
- __ ffree(0);
- __ fincstp();
- DeoptimizeIf(no_condition, instr->environment());
-
- // Reserve space for 64 bit answer.
- __ bind(&convert);
- __ sub(Operand(esp), Immediate(kDoubleSize));
- // Do conversion, which cannot fail because we checked the exponent.
- __ fisttp_d(Operand(esp, 0));
- __ mov(input_reg, Operand(esp, 0)); // Low word of answer is the result.
- __ add(Operand(esp), Immediate(kDoubleSize));
- } else {
- NearLabel deopt;
- XMMRegister xmm_temp = ToDoubleRegister(instr->TempAt(0));
- __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
- __ cvttsd2si(input_reg, Operand(xmm0));
- __ cmp(input_reg, 0x80000000u);
- __ j(not_equal, &done);
- // Check if the input was 0x8000000 (kMinInt).
- // If no, then we got an overflow and we deoptimize.
- ExternalReference min_int = ExternalReference::address_of_min_int();
- __ movdbl(xmm_temp, Operand::StaticVariable(min_int));
- __ ucomisd(xmm_temp, xmm0);
- DeoptimizeIf(not_equal, instr->environment());
- DeoptimizeIf(parity_even, instr->environment()); // NaN.
- }
- } else {
- // Deoptimize if we don't have a heap number.
- DeoptimizeIf(not_equal, instr->environment());
-
- XMMRegister xmm_temp = ToDoubleRegister(instr->TempAt(0));
- __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
- __ cvttsd2si(input_reg, Operand(xmm0));
- __ cvtsi2sd(xmm_temp, Operand(input_reg));
- __ ucomisd(xmm0, xmm_temp);
- DeoptimizeIf(not_equal, instr->environment());
- DeoptimizeIf(parity_even, instr->environment()); // NaN.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ test(input_reg, Operand(input_reg));
- __ j(not_zero, &done);
- __ movmskpd(input_reg, xmm0);
- __ and_(input_reg, 1);
- DeoptimizeIf(not_zero, instr->environment());
- }
- }
- __ bind(&done);
-}
-
-
-void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
- LOperand* input = instr->InputAt(0);
- ASSERT(input->IsRegister());
- ASSERT(input->Equals(instr->result()));
-
- Register input_reg = ToRegister(input);
-
- DeferredTaggedToI* deferred = new DeferredTaggedToI(this, instr);
-
- // Smi check.
- __ test(input_reg, Immediate(kSmiTagMask));
- __ j(not_zero, deferred->entry());
-
- // Smi to int32 conversion
- __ SmiUntag(input_reg); // Untag smi.
-
- __ bind(deferred->exit());
-}
-
-
-void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
- LOperand* input = instr->InputAt(0);
- ASSERT(input->IsRegister());
- LOperand* result = instr->result();
- ASSERT(result->IsDoubleRegister());
-
- Register input_reg = ToRegister(input);
- XMMRegister result_reg = ToDoubleRegister(result);
-
- EmitNumberUntagD(input_reg, result_reg, instr->environment());
-}
-
-
-void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
- LOperand* input = instr->InputAt(0);
- ASSERT(input->IsDoubleRegister());
- LOperand* result = instr->result();
- ASSERT(result->IsRegister());
-
- XMMRegister input_reg = ToDoubleRegister(input);
- Register result_reg = ToRegister(result);
-
- if (instr->truncating()) {
- // Performs a truncating conversion of a floating point number as used by
- // the JS bitwise operations.
- __ cvttsd2si(result_reg, Operand(input_reg));
- __ cmp(result_reg, 0x80000000u);
- if (CpuFeatures::IsSupported(SSE3)) {
- // This will deoptimize if the exponent of the input in out of range.
- CpuFeatures::Scope scope(SSE3);
- NearLabel convert, done;
- __ j(not_equal, &done);
- __ sub(Operand(esp), Immediate(kDoubleSize));
- __ movdbl(Operand(esp, 0), input_reg);
- // Get exponent alone and check for too-big exponent.
- __ mov(result_reg, Operand(esp, sizeof(int32_t)));
- __ and_(result_reg, HeapNumber::kExponentMask);
- const uint32_t kTooBigExponent =
- (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
- __ cmp(Operand(result_reg), Immediate(kTooBigExponent));
- __ j(less, &convert);
- __ add(Operand(esp), Immediate(kDoubleSize));
- DeoptimizeIf(no_condition, instr->environment());
- __ bind(&convert);
- // Do conversion, which cannot fail because we checked the exponent.
- __ fld_d(Operand(esp, 0));
- __ fisttp_d(Operand(esp, 0));
- __ mov(result_reg, Operand(esp, 0)); // Low word of answer is the result.
- __ add(Operand(esp), Immediate(kDoubleSize));
- __ bind(&done);
- } else {
- NearLabel done;
- Register temp_reg = ToRegister(instr->TempAt(0));
- XMMRegister xmm_scratch = xmm0;
-
- // If cvttsd2si succeeded, we're done. Otherwise, we attempt
- // manual conversion.
- __ j(not_equal, &done);
-
- // Get high 32 bits of the input in result_reg and temp_reg.
- __ pshufd(xmm_scratch, input_reg, 1);
- __ movd(Operand(temp_reg), xmm_scratch);
- __ mov(result_reg, temp_reg);
-
- // Prepare negation mask in temp_reg.
- __ sar(temp_reg, kBitsPerInt - 1);
-
- // Extract the exponent from result_reg and subtract adjusted
- // bias from it. The adjustment is selected in a way such that
- // when the difference is zero, the answer is in the low 32 bits
- // of the input, otherwise a shift has to be performed.
- __ shr(result_reg, HeapNumber::kExponentShift);
- __ and_(result_reg,
- HeapNumber::kExponentMask >> HeapNumber::kExponentShift);
- __ sub(Operand(result_reg),
- Immediate(HeapNumber::kExponentBias +
- HeapNumber::kExponentBits +
- HeapNumber::kMantissaBits));
- // Don't handle big (> kMantissaBits + kExponentBits == 63) or
- // special exponents.
- DeoptimizeIf(greater, instr->environment());
-
- // Zero out the sign and the exponent in the input (by shifting
- // it to the left) and restore the implicit mantissa bit,
- // i.e. convert the input to unsigned int64 shifted left by
- // kExponentBits.
- ExternalReference minus_zero = ExternalReference::address_of_minus_zero();
- // Minus zero has the most significant bit set and the other
- // bits cleared.
- __ movdbl(xmm_scratch, Operand::StaticVariable(minus_zero));
- __ psllq(input_reg, HeapNumber::kExponentBits);
- __ por(input_reg, xmm_scratch);
-
- // Get the amount to shift the input right in xmm_scratch.
- __ neg(result_reg);
- __ movd(xmm_scratch, Operand(result_reg));
-
- // Shift the input right and extract low 32 bits.
- __ psrlq(input_reg, xmm_scratch);
- __ movd(Operand(result_reg), input_reg);
-
- // Use the prepared mask in temp_reg to negate the result if necessary.
- __ xor_(result_reg, Operand(temp_reg));
- __ sub(result_reg, Operand(temp_reg));
- __ bind(&done);
- }
- } else {
- NearLabel done;
- __ cvttsd2si(result_reg, Operand(input_reg));
- __ cvtsi2sd(xmm0, Operand(result_reg));
- __ ucomisd(xmm0, input_reg);
- DeoptimizeIf(not_equal, instr->environment());
- DeoptimizeIf(parity_even, instr->environment()); // NaN.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- // The integer converted back is equal to the original. We
- // only have to test if we got -0 as an input.
- __ test(result_reg, Operand(result_reg));
- __ j(not_zero, &done);
- __ movmskpd(result_reg, input_reg);
- // Bit 0 contains the sign of the double in input_reg.
- // If input was positive, we are ok and return 0, otherwise
- // deoptimize.
- __ and_(result_reg, 1);
- DeoptimizeIf(not_zero, instr->environment());
- }
- __ bind(&done);
- }
-}
-
-
-void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
- LOperand* input = instr->InputAt(0);
- __ test(ToRegister(input), Immediate(kSmiTagMask));
- DeoptimizeIf(not_zero, instr->environment());
-}
-
-
-void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
- LOperand* input = instr->InputAt(0);
- __ test(ToRegister(input), Immediate(kSmiTagMask));
- DeoptimizeIf(zero, instr->environment());
-}
-
-
-void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
- Register input = ToRegister(instr->InputAt(0));
- Register temp = ToRegister(instr->TempAt(0));
- InstanceType first = instr->hydrogen()->first();
- InstanceType last = instr->hydrogen()->last();
-
- __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
-
- // If there is only one type in the interval check for equality.
- if (first == last) {
- __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
- static_cast<int8_t>(first));
- DeoptimizeIf(not_equal, instr->environment());
- } else if (first == FIRST_STRING_TYPE && last == LAST_STRING_TYPE) {
- // String has a dedicated bit in instance type.
- __ test_b(FieldOperand(temp, Map::kInstanceTypeOffset), kIsNotStringMask);
- DeoptimizeIf(not_zero, instr->environment());
- } else {
- __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
- static_cast<int8_t>(first));
- DeoptimizeIf(below, instr->environment());
- // Omit check for the last type.
- if (last != LAST_TYPE) {
- __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
- static_cast<int8_t>(last));
- DeoptimizeIf(above, instr->environment());
- }
- }
-}
-
-
-void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
- ASSERT(instr->InputAt(0)->IsRegister());
- Register reg = ToRegister(instr->InputAt(0));
- __ cmp(reg, instr->hydrogen()->target());
- DeoptimizeIf(not_equal, instr->environment());
-}
-
-
-void LCodeGen::DoCheckMap(LCheckMap* instr) {
- LOperand* input = instr->InputAt(0);
- ASSERT(input->IsRegister());
- Register reg = ToRegister(input);
- __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
- instr->hydrogen()->map());
- DeoptimizeIf(not_equal, instr->environment());
-}
-
-
-void LCodeGen::LoadHeapObject(Register result, Handle<HeapObject> object) {
- if (isolate()->heap()->InNewSpace(*object)) {
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(object);
- __ mov(result, Operand::Cell(cell));
- } else {
- __ mov(result, object);
- }
-}
-
-
-void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
- Register reg = ToRegister(instr->TempAt(0));
-
- Handle<JSObject> holder = instr->holder();
- Handle<JSObject> current_prototype = instr->prototype();
-
- // Load prototype object.
- LoadHeapObject(reg, current_prototype);
-
- // Check prototype maps up to the holder.
- while (!current_prototype.is_identical_to(holder)) {
- __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
- Handle<Map>(current_prototype->map()));
- DeoptimizeIf(not_equal, instr->environment());
- current_prototype =
- Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
- // Load next prototype object.
- LoadHeapObject(reg, current_prototype);
- }
-
- // Check the holder map.
- __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
- Handle<Map>(current_prototype->map()));
- DeoptimizeIf(not_equal, instr->environment());
-}
-
-
-void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
- // Setup the parameters to the stub/runtime call.
- __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(FieldOperand(eax, JSFunction::kLiteralsOffset));
- __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
- __ push(Immediate(instr->hydrogen()->constant_elements()));
-
- // Pick the right runtime function or stub to call.
- int length = instr->hydrogen()->length();
- if (instr->hydrogen()->IsCopyOnWrite()) {
- ASSERT(instr->hydrogen()->depth() == 1);
- FastCloneShallowArrayStub::Mode mode =
- FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS;
- FastCloneShallowArrayStub stub(mode, length);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, false);
- } else if (instr->hydrogen()->depth() > 1) {
- CallRuntime(Runtime::kCreateArrayLiteral, 3, instr, false);
- } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
- CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr, false);
- } else {
- FastCloneShallowArrayStub::Mode mode =
- FastCloneShallowArrayStub::CLONE_ELEMENTS;
- FastCloneShallowArrayStub stub(mode, length);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, false);
- }
-}
-
-
-void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
- ASSERT(ToRegister(instr->context()).is(esi));
- // Setup the parameters to the stub/runtime call.
- __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(FieldOperand(eax, JSFunction::kLiteralsOffset));
- __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
- __ push(Immediate(instr->hydrogen()->constant_properties()));
- int flags = instr->hydrogen()->fast_elements()
- ? ObjectLiteral::kFastElements
- : ObjectLiteral::kNoFlags;
- flags |= instr->hydrogen()->has_function()
- ? ObjectLiteral::kHasFunction
- : ObjectLiteral::kNoFlags;
- __ push(Immediate(Smi::FromInt(flags)));
-
- // Pick the right runtime function to call.
- if (instr->hydrogen()->depth() > 1) {
- CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
- } else {
- CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
- }
-}
-
-
-void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
- ASSERT(ToRegister(instr->InputAt(0)).is(eax));
- __ push(eax);
- CallRuntime(Runtime::kToFastProperties, 1, instr);
-}
-
-
-void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
- NearLabel materialized;
- // Registers will be used as follows:
- // edi = JS function.
- // ecx = literals array.
- // ebx = regexp literal.
- // eax = regexp literal clone.
- __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ mov(ecx, FieldOperand(edi, JSFunction::kLiteralsOffset));
- int literal_offset = FixedArray::kHeaderSize +
- instr->hydrogen()->literal_index() * kPointerSize;
- __ mov(ebx, FieldOperand(ecx, literal_offset));
- __ cmp(ebx, factory()->undefined_value());
- __ j(not_equal, &materialized);
-
- // Create regexp literal using runtime function
- // Result will be in eax.
- __ push(ecx);
- __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
- __ push(Immediate(instr->hydrogen()->pattern()));
- __ push(Immediate(instr->hydrogen()->flags()));
- CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr, false);
- __ mov(ebx, eax);
-
- __ bind(&materialized);
- int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
- Label allocated, runtime_allocate;
- __ AllocateInNewSpace(size, eax, ecx, edx, &runtime_allocate, TAG_OBJECT);
- __ jmp(&allocated);
-
- __ bind(&runtime_allocate);
- __ push(ebx);
- __ push(Immediate(Smi::FromInt(size)));
- CallRuntime(Runtime::kAllocateInNewSpace, 1, instr, false);
- __ pop(ebx);
-
- __ bind(&allocated);
- // Copy the content into the newly allocated memory.
- // (Unroll copy loop once for better throughput).
- for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
- __ mov(edx, FieldOperand(ebx, i));
- __ mov(ecx, FieldOperand(ebx, i + kPointerSize));
- __ mov(FieldOperand(eax, i), edx);
- __ mov(FieldOperand(eax, i + kPointerSize), ecx);
- }
- if ((size % (2 * kPointerSize)) != 0) {
- __ mov(edx, FieldOperand(ebx, size - kPointerSize));
- __ mov(FieldOperand(eax, size - kPointerSize), edx);
- }
-}
-
-
-void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
- // Use the fast case closure allocation code that allocates in new
- // space for nested functions that don't need literals cloning.
- Handle<SharedFunctionInfo> shared_info = instr->shared_info();
- bool pretenure = instr->hydrogen()->pretenure();
- if (!pretenure && shared_info->num_literals() == 0) {
- FastNewClosureStub stub(
- shared_info->strict_mode() ? kStrictMode : kNonStrictMode);
- __ push(Immediate(shared_info));
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, false);
- } else {
- __ push(Operand(ebp, StandardFrameConstants::kContextOffset));
- __ push(Immediate(shared_info));
- __ push(Immediate(pretenure
- ? factory()->true_value()
- : factory()->false_value()));
- CallRuntime(Runtime::kNewClosure, 3, instr, false);
- }
-}
-
-
-void LCodeGen::DoTypeof(LTypeof* instr) {
- LOperand* input = instr->InputAt(0);
- if (input->IsConstantOperand()) {
- __ push(ToImmediate(input));
- } else {
- __ push(ToOperand(input));
- }
- CallRuntime(Runtime::kTypeof, 1, instr, false);
-}
-
-
-void LCodeGen::DoTypeofIs(LTypeofIs* instr) {
- Register input = ToRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
- Label true_label;
- Label false_label;
- NearLabel done;
-
- Condition final_branch_condition = EmitTypeofIs(&true_label,
- &false_label,
- input,
- instr->type_literal());
- __ j(final_branch_condition, &true_label);
- __ bind(&false_label);
- __ mov(result, factory()->false_value());
- __ jmp(&done);
-
- __ bind(&true_label);
- __ mov(result, factory()->true_value());
-
- __ bind(&done);
-}
-
-
-void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
- Register input = ToRegister(instr->InputAt(0));
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
- Condition final_branch_condition = EmitTypeofIs(true_label,
- false_label,
- input,
- instr->type_literal());
-
- EmitBranch(true_block, false_block, final_branch_condition);
-}
-
-
-Condition LCodeGen::EmitTypeofIs(Label* true_label,
- Label* false_label,
- Register input,
- Handle<String> type_name) {
- Condition final_branch_condition = no_condition;
- if (type_name->Equals(heap()->number_symbol())) {
- __ JumpIfSmi(input, true_label);
- __ cmp(FieldOperand(input, HeapObject::kMapOffset),
- factory()->heap_number_map());
- final_branch_condition = equal;
-
- } else if (type_name->Equals(heap()->string_symbol())) {
- __ JumpIfSmi(input, false_label);
- __ CmpObjectType(input, FIRST_NONSTRING_TYPE, input);
- __ j(above_equal, false_label);
- __ test_b(FieldOperand(input, Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
- final_branch_condition = zero;
-
- } else if (type_name->Equals(heap()->boolean_symbol())) {
- __ cmp(input, factory()->true_value());
- __ j(equal, true_label);
- __ cmp(input, factory()->false_value());
- final_branch_condition = equal;
-
- } else if (type_name->Equals(heap()->undefined_symbol())) {
- __ cmp(input, factory()->undefined_value());
- __ j(equal, true_label);
- __ JumpIfSmi(input, false_label);
- // Check for undetectable objects => true.
- __ mov(input, FieldOperand(input, HeapObject::kMapOffset));
- __ test_b(FieldOperand(input, Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
- final_branch_condition = not_zero;
-
- } else if (type_name->Equals(heap()->function_symbol())) {
- __ JumpIfSmi(input, false_label);
- __ CmpObjectType(input, JS_FUNCTION_TYPE, input);
- __ j(equal, true_label);
- // Regular expressions => 'function' (they are callable).
- __ CmpInstanceType(input, JS_REGEXP_TYPE);
- final_branch_condition = equal;
-
- } else if (type_name->Equals(heap()->object_symbol())) {
- __ JumpIfSmi(input, false_label);
- __ cmp(input, factory()->null_value());
- __ j(equal, true_label);
- // Regular expressions => 'function', not 'object'.
- __ CmpObjectType(input, FIRST_JS_OBJECT_TYPE, input);
- __ j(below, false_label);
- __ CmpInstanceType(input, FIRST_FUNCTION_CLASS_TYPE);
- __ j(above_equal, false_label);
- // Check for undetectable objects => false.
- __ test_b(FieldOperand(input, Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
- final_branch_condition = zero;
-
- } else {
- final_branch_condition = not_equal;
- __ jmp(false_label);
- // A dead branch instruction will be generated after this point.
- }
-
- return final_branch_condition;
-}
-
-
-void LCodeGen::DoIsConstructCall(LIsConstructCall* instr) {
- Register result = ToRegister(instr->result());
- NearLabel true_label;
- NearLabel false_label;
- NearLabel done;
-
- EmitIsConstructCall(result);
- __ j(equal, &true_label);
-
- __ mov(result, factory()->false_value());
- __ jmp(&done);
-
- __ bind(&true_label);
- __ mov(result, factory()->true_value());
-
- __ bind(&done);
-}
-
-
-void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
- Register temp = ToRegister(instr->TempAt(0));
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- EmitIsConstructCall(temp);
- EmitBranch(true_block, false_block, equal);
-}
-
-
-void LCodeGen::EmitIsConstructCall(Register temp) {
- // Get the frame pointer for the calling frame.
- __ mov(temp, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
-
- // Skip the arguments adaptor frame if it exists.
- NearLabel check_frame_marker;
- __ cmp(Operand(temp, StandardFrameConstants::kContextOffset),
- Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(not_equal, &check_frame_marker);
- __ mov(temp, Operand(temp, StandardFrameConstants::kCallerFPOffset));
-
- // Check the marker in the calling frame.
- __ bind(&check_frame_marker);
- __ cmp(Operand(temp, StandardFrameConstants::kMarkerOffset),
- Immediate(Smi::FromInt(StackFrame::CONSTRUCT)));
-}
-
-
-void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
- // No code for lazy bailout instruction. Used to capture environment after a
- // call for populating the safepoint data with deoptimization data.
-}
-
-
-void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
- DeoptimizeIf(no_condition, instr->environment());
-}
-
-
-void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
- LOperand* obj = instr->object();
- LOperand* key = instr->key();
- __ push(ToOperand(obj));
- if (key->IsConstantOperand()) {
- __ push(ToImmediate(key));
- } else {
- __ push(ToOperand(key));
- }
- ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
- LPointerMap* pointers = instr->pointer_map();
- LEnvironment* env = instr->deoptimization_environment();
- RecordPosition(pointers->position());
- RegisterEnvironmentForDeoptimization(env);
- // Create safepoint generator that will also ensure enough space in the
- // reloc info for patching in deoptimization (since this is invoking a
- // builtin)
- SafepointGenerator safepoint_generator(this,
- pointers,
- env->deoptimization_index());
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ push(Immediate(Smi::FromInt(strict_mode_flag())));
- __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, &safepoint_generator);
-}
-
-
-void LCodeGen::DoStackCheck(LStackCheck* instr) {
- // Perform stack overflow check.
- NearLabel done;
- ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit(isolate());
- __ cmp(esp, Operand::StaticVariable(stack_limit));
- __ j(above_equal, &done);
-
- StackCheckStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, false);
- __ bind(&done);
-}
-
-
-void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
- // This is a pseudo-instruction that ensures that the environment here is
- // properly registered for deoptimization and records the assembler's PC
- // offset.
- LEnvironment* environment = instr->environment();
- environment->SetSpilledRegisters(instr->SpilledRegisterArray(),
- instr->SpilledDoubleRegisterArray());
-
- // If the environment were already registered, we would have no way of
- // backpatching it with the spill slot operands.
- ASSERT(!environment->HasBeenRegistered());
- RegisterEnvironmentForDeoptimization(environment);
- ASSERT(osr_pc_offset_ == -1);
- osr_pc_offset_ = masm()->pc_offset();
-}
-
-
-#undef __
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_IA32
diff --git a/src/3rdparty/v8/src/ia32/lithium-codegen-ia32.h b/src/3rdparty/v8/src/ia32/lithium-codegen-ia32.h
deleted file mode 100644
index 4414e6a..0000000
--- a/src/3rdparty/v8/src/ia32/lithium-codegen-ia32.h
+++ /dev/null
@@ -1,318 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_IA32_LITHIUM_CODEGEN_IA32_H_
-#define V8_IA32_LITHIUM_CODEGEN_IA32_H_
-
-#include "ia32/lithium-ia32.h"
-
-#include "checks.h"
-#include "deoptimizer.h"
-#include "safepoint-table.h"
-#include "scopes.h"
-#include "ia32/lithium-gap-resolver-ia32.h"
-
-namespace v8 {
-namespace internal {
-
-// Forward declarations.
-class LDeferredCode;
-class LGapNode;
-class SafepointGenerator;
-
-class LCodeGen BASE_EMBEDDED {
- public:
- LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
- : chunk_(chunk),
- masm_(assembler),
- info_(info),
- current_block_(-1),
- current_instruction_(-1),
- instructions_(chunk->instructions()),
- deoptimizations_(4),
- deoptimization_literals_(8),
- inlined_function_count_(0),
- scope_(info->scope()),
- status_(UNUSED),
- deferred_(8),
- osr_pc_offset_(-1),
- deoptimization_reloc_size(),
- resolver_(this) {
- PopulateDeoptimizationLiteralsWithInlinedFunctions();
- }
-
- // Simple accessors.
- MacroAssembler* masm() const { return masm_; }
- CompilationInfo* info() const { return info_; }
- Isolate* isolate() const { return info_->isolate(); }
- Factory* factory() const { return isolate()->factory(); }
- Heap* heap() const { return isolate()->heap(); }
-
- // Support for converting LOperands to assembler types.
- Operand ToOperand(LOperand* op) const;
- Register ToRegister(LOperand* op) const;
- XMMRegister ToDoubleRegister(LOperand* op) const;
- Immediate ToImmediate(LOperand* op);
-
- // The operand denoting the second word (the one with a higher address) of
- // a double stack slot.
- Operand HighOperand(LOperand* op);
-
- // Try to generate code for the entire chunk, but it may fail if the
- // chunk contains constructs we cannot handle. Returns true if the
- // code generation attempt succeeded.
- bool GenerateCode();
-
- // Finish the code by setting stack height, safepoint, and bailout
- // information on it.
- void FinishCode(Handle<Code> code);
-
- // Deferred code support.
- void DoDeferredNumberTagD(LNumberTagD* instr);
- void DoDeferredNumberTagI(LNumberTagI* instr);
- void DoDeferredTaggedToI(LTaggedToI* instr);
- void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr);
- void DoDeferredStackCheck(LGoto* instr);
- void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
- void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
- void DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
- Label* map_check);
-
- // Parallel move support.
- void DoParallelMove(LParallelMove* move);
-
- // Emit frame translation commands for an environment.
- void WriteTranslation(LEnvironment* environment, Translation* translation);
-
- void EnsureRelocSpaceForDeoptimization();
-
- // Declare methods that deal with the individual node types.
-#define DECLARE_DO(type) void Do##type(L##type* node);
- LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
-#undef DECLARE_DO
-
- private:
- enum Status {
- UNUSED,
- GENERATING,
- DONE,
- ABORTED
- };
-
- bool is_unused() const { return status_ == UNUSED; }
- bool is_generating() const { return status_ == GENERATING; }
- bool is_done() const { return status_ == DONE; }
- bool is_aborted() const { return status_ == ABORTED; }
-
- int strict_mode_flag() const {
- return info()->is_strict() ? kStrictMode : kNonStrictMode;
- }
-
- LChunk* chunk() const { return chunk_; }
- Scope* scope() const { return scope_; }
- HGraph* graph() const { return chunk_->graph(); }
-
- int GetNextEmittedBlock(int block);
- LInstruction* GetNextInstruction();
-
- void EmitClassOfTest(Label* if_true,
- Label* if_false,
- Handle<String> class_name,
- Register input,
- Register temporary,
- Register temporary2);
-
- int StackSlotCount() const { return chunk()->spill_slot_count(); }
- int ParameterCount() const { return scope()->num_parameters(); }
-
- void Abort(const char* format, ...);
- void Comment(const char* format, ...);
-
- void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code); }
-
- // Code generation passes. Returns true if code generation should
- // continue.
- bool GeneratePrologue();
- bool GenerateBody();
- bool GenerateDeferredCode();
- // Pad the reloc info to ensure that we have enough space to patch during
- // deoptimization.
- bool GenerateRelocPadding();
- bool GenerateSafepointTable();
-
- void CallCode(Handle<Code> code, RelocInfo::Mode mode, LInstruction* instr,
- bool adjusted = true);
- void CallRuntime(const Runtime::Function* fun, int argc, LInstruction* instr,
- bool adjusted = true);
- void CallRuntime(Runtime::FunctionId id, int argc, LInstruction* instr,
- bool adjusted = true) {
- const Runtime::Function* function = Runtime::FunctionForId(id);
- CallRuntime(function, argc, instr, adjusted);
- }
-
- // Generate a direct call to a known function. Expects the function
- // to be in edi.
- void CallKnownFunction(Handle<JSFunction> function,
- int arity,
- LInstruction* instr);
-
- void LoadHeapObject(Register result, Handle<HeapObject> object);
-
- void RegisterLazyDeoptimization(LInstruction* instr);
- void RegisterEnvironmentForDeoptimization(LEnvironment* environment);
- void DeoptimizeIf(Condition cc, LEnvironment* environment);
-
- void AddToTranslation(Translation* translation,
- LOperand* op,
- bool is_tagged);
- void PopulateDeoptimizationData(Handle<Code> code);
- int DefineDeoptimizationLiteral(Handle<Object> literal);
-
- void PopulateDeoptimizationLiteralsWithInlinedFunctions();
-
- Register ToRegister(int index) const;
- XMMRegister ToDoubleRegister(int index) const;
- int ToInteger32(LConstantOperand* op) const;
-
- // Specific math operations - used from DoUnaryMathOperation.
- void EmitIntegerMathAbs(LUnaryMathOperation* instr);
- void DoMathAbs(LUnaryMathOperation* instr);
- void DoMathFloor(LUnaryMathOperation* instr);
- void DoMathRound(LUnaryMathOperation* instr);
- void DoMathSqrt(LUnaryMathOperation* instr);
- void DoMathPowHalf(LUnaryMathOperation* instr);
- void DoMathLog(LUnaryMathOperation* instr);
- void DoMathCos(LUnaryMathOperation* instr);
- void DoMathSin(LUnaryMathOperation* instr);
-
- // Support for recording safepoint and position information.
- void RecordSafepoint(LPointerMap* pointers,
- Safepoint::Kind kind,
- int arguments,
- int deoptimization_index);
- void RecordSafepoint(LPointerMap* pointers, int deoptimization_index);
- void RecordSafepoint(int deoptimization_index);
- void RecordSafepointWithRegisters(LPointerMap* pointers,
- int arguments,
- int deoptimization_index);
- void RecordPosition(int position);
-
- static Condition TokenToCondition(Token::Value op, bool is_unsigned);
- void EmitGoto(int block, LDeferredCode* deferred_stack_check = NULL);
- void EmitBranch(int left_block, int right_block, Condition cc);
- void EmitCmpI(LOperand* left, LOperand* right);
- void EmitNumberUntagD(Register input, XMMRegister result, LEnvironment* env);
-
- // Emits optimized code for typeof x == "y". Modifies input register.
- // Returns the condition on which a final split to
- // true and false label should be made, to optimize fallthrough.
- Condition EmitTypeofIs(Label* true_label, Label* false_label,
- Register input, Handle<String> type_name);
-
- // Emits optimized code for %_IsObject(x). Preserves input register.
- // Returns the condition on which a final split to
- // true and false label should be made, to optimize fallthrough.
- Condition EmitIsObject(Register input,
- Register temp1,
- Register temp2,
- Label* is_not_object,
- Label* is_object);
-
- // Emits optimized code for %_IsConstructCall().
- // Caller should branch on equal condition.
- void EmitIsConstructCall(Register temp);
-
- void EmitLoadField(Register result,
- Register object,
- Handle<Map> type,
- Handle<String> name);
-
- LChunk* const chunk_;
- MacroAssembler* const masm_;
- CompilationInfo* const info_;
-
- int current_block_;
- int current_instruction_;
- const ZoneList<LInstruction*>* instructions_;
- ZoneList<LEnvironment*> deoptimizations_;
- ZoneList<Handle<Object> > deoptimization_literals_;
- int inlined_function_count_;
- Scope* const scope_;
- Status status_;
- TranslationBuffer translations_;
- ZoneList<LDeferredCode*> deferred_;
- int osr_pc_offset_;
-
- struct DeoptimizationRelocSize {
- int min_size;
- int last_pc_offset;
- };
-
- DeoptimizationRelocSize deoptimization_reloc_size;
-
- // Builder that keeps track of safepoints in the code. The table
- // itself is emitted at the end of the generated code.
- SafepointTableBuilder safepoints_;
-
- // Compiler from a set of parallel moves to a sequential list of moves.
- LGapResolver resolver_;
-
- friend class LDeferredCode;
- friend class LEnvironment;
- friend class SafepointGenerator;
- DISALLOW_COPY_AND_ASSIGN(LCodeGen);
-};
-
-
-class LDeferredCode: public ZoneObject {
- public:
- explicit LDeferredCode(LCodeGen* codegen)
- : codegen_(codegen), external_exit_(NULL) {
- codegen->AddDeferredCode(this);
- }
-
- virtual ~LDeferredCode() { }
- virtual void Generate() = 0;
-
- void SetExit(Label *exit) { external_exit_ = exit; }
- Label* entry() { return &entry_; }
- Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
-
- protected:
- LCodeGen* codegen() const { return codegen_; }
- MacroAssembler* masm() const { return codegen_->masm(); }
-
- private:
- LCodeGen* codegen_;
- Label entry_;
- Label exit_;
- Label* external_exit_;
-};
-
-} } // namespace v8::internal
-
-#endif // V8_IA32_LITHIUM_CODEGEN_IA32_H_
diff --git a/src/3rdparty/v8/src/ia32/lithium-gap-resolver-ia32.cc b/src/3rdparty/v8/src/ia32/lithium-gap-resolver-ia32.cc
deleted file mode 100644
index 3d1da40..0000000
--- a/src/3rdparty/v8/src/ia32/lithium-gap-resolver-ia32.cc
+++ /dev/null
@@ -1,466 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_IA32)
-
-#include "ia32/lithium-gap-resolver-ia32.h"
-#include "ia32/lithium-codegen-ia32.h"
-
-namespace v8 {
-namespace internal {
-
-LGapResolver::LGapResolver(LCodeGen* owner)
- : cgen_(owner),
- moves_(32),
- source_uses_(),
- destination_uses_(),
- spilled_register_(-1) {}
-
-
-void LGapResolver::Resolve(LParallelMove* parallel_move) {
- ASSERT(HasBeenReset());
- // Build up a worklist of moves.
- BuildInitialMoveList(parallel_move);
-
- for (int i = 0; i < moves_.length(); ++i) {
- LMoveOperands move = moves_[i];
- // Skip constants to perform them last. They don't block other moves
- // and skipping such moves with register destinations keeps those
- // registers free for the whole algorithm.
- if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
- PerformMove(i);
- }
- }
-
- // Perform the moves with constant sources.
- for (int i = 0; i < moves_.length(); ++i) {
- if (!moves_[i].IsEliminated()) {
- ASSERT(moves_[i].source()->IsConstantOperand());
- EmitMove(i);
- }
- }
-
- Finish();
- ASSERT(HasBeenReset());
-}
-
-
-void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
- // Perform a linear sweep of the moves to add them to the initial list of
- // moves to perform, ignoring any move that is redundant (the source is
- // the same as the destination, the destination is ignored and
- // unallocated, or the move was already eliminated).
- const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
- for (int i = 0; i < moves->length(); ++i) {
- LMoveOperands move = moves->at(i);
- if (!move.IsRedundant()) AddMove(move);
- }
- Verify();
-}
-
-
-void LGapResolver::PerformMove(int index) {
- // Each call to this function performs a move and deletes it from the move
- // graph. We first recursively perform any move blocking this one. We
- // mark a move as "pending" on entry to PerformMove in order to detect
- // cycles in the move graph. We use operand swaps to resolve cycles,
- // which means that a call to PerformMove could change any source operand
- // in the move graph.
-
- ASSERT(!moves_[index].IsPending());
- ASSERT(!moves_[index].IsRedundant());
-
- // Clear this move's destination to indicate a pending move. The actual
- // destination is saved on the side.
- ASSERT(moves_[index].source() != NULL); // Or else it will look eliminated.
- LOperand* destination = moves_[index].destination();
- moves_[index].set_destination(NULL);
-
- // Perform a depth-first traversal of the move graph to resolve
- // dependencies. Any unperformed, unpending move with a source the same
- // as this one's destination blocks this one so recursively perform all
- // such moves.
- for (int i = 0; i < moves_.length(); ++i) {
- LMoveOperands other_move = moves_[i];
- if (other_move.Blocks(destination) && !other_move.IsPending()) {
- // Though PerformMove can change any source operand in the move graph,
- // this call cannot create a blocking move via a swap (this loop does
- // not miss any). Assume there is a non-blocking move with source A
- // and this move is blocked on source B and there is a swap of A and
- // B. Then A and B must be involved in the same cycle (or they would
- // not be swapped). Since this move's destination is B and there is
- // only a single incoming edge to an operand, this move must also be
- // involved in the same cycle. In that case, the blocking move will
- // be created but will be "pending" when we return from PerformMove.
- PerformMove(i);
- }
- }
-
- // We are about to resolve this move and don't need it marked as
- // pending, so restore its destination.
- moves_[index].set_destination(destination);
-
- // This move's source may have changed due to swaps to resolve cycles and
- // so it may now be the last move in the cycle. If so remove it.
- if (moves_[index].source()->Equals(destination)) {
- RemoveMove(index);
- return;
- }
-
- // The move may be blocked on a (at most one) pending move, in which case
- // we have a cycle. Search for such a blocking move and perform a swap to
- // resolve it.
- for (int i = 0; i < moves_.length(); ++i) {
- LMoveOperands other_move = moves_[i];
- if (other_move.Blocks(destination)) {
- ASSERT(other_move.IsPending());
- EmitSwap(index);
- return;
- }
- }
-
- // This move is not blocked.
- EmitMove(index);
-}
-
-
-void LGapResolver::AddMove(LMoveOperands move) {
- LOperand* source = move.source();
- if (source->IsRegister()) ++source_uses_[source->index()];
-
- LOperand* destination = move.destination();
- if (destination->IsRegister()) ++destination_uses_[destination->index()];
-
- moves_.Add(move);
-}
-
-
-void LGapResolver::RemoveMove(int index) {
- LOperand* source = moves_[index].source();
- if (source->IsRegister()) {
- --source_uses_[source->index()];
- ASSERT(source_uses_[source->index()] >= 0);
- }
-
- LOperand* destination = moves_[index].destination();
- if (destination->IsRegister()) {
- --destination_uses_[destination->index()];
- ASSERT(destination_uses_[destination->index()] >= 0);
- }
-
- moves_[index].Eliminate();
-}
-
-
-int LGapResolver::CountSourceUses(LOperand* operand) {
- int count = 0;
- for (int i = 0; i < moves_.length(); ++i) {
- if (!moves_[i].IsEliminated() && moves_[i].source()->Equals(operand)) {
- ++count;
- }
- }
- return count;
-}
-
-
-Register LGapResolver::GetFreeRegisterNot(Register reg) {
- int skip_index = reg.is(no_reg) ? -1 : Register::ToAllocationIndex(reg);
- for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
- if (source_uses_[i] == 0 && destination_uses_[i] > 0 && i != skip_index) {
- return Register::FromAllocationIndex(i);
- }
- }
- return no_reg;
-}
-
-
-bool LGapResolver::HasBeenReset() {
- if (!moves_.is_empty()) return false;
- if (spilled_register_ >= 0) return false;
-
- for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
- if (source_uses_[i] != 0) return false;
- if (destination_uses_[i] != 0) return false;
- }
- return true;
-}
-
-
-void LGapResolver::Verify() {
-#ifdef ENABLE_SLOW_ASSERTS
- // No operand should be the destination for more than one move.
- for (int i = 0; i < moves_.length(); ++i) {
- LOperand* destination = moves_[i].destination();
- for (int j = i + 1; j < moves_.length(); ++j) {
- SLOW_ASSERT(!destination->Equals(moves_[j].destination()));
- }
- }
-#endif
-}
-
-
-#define __ ACCESS_MASM(cgen_->masm())
-
-void LGapResolver::Finish() {
- if (spilled_register_ >= 0) {
- __ pop(Register::FromAllocationIndex(spilled_register_));
- spilled_register_ = -1;
- }
- moves_.Rewind(0);
-}
-
-
-void LGapResolver::EnsureRestored(LOperand* operand) {
- if (operand->IsRegister() && operand->index() == spilled_register_) {
- __ pop(Register::FromAllocationIndex(spilled_register_));
- spilled_register_ = -1;
- }
-}
-
-
-Register LGapResolver::EnsureTempRegister() {
- // 1. We may have already spilled to create a temp register.
- if (spilled_register_ >= 0) {
- return Register::FromAllocationIndex(spilled_register_);
- }
-
- // 2. We may have a free register that we can use without spilling.
- Register free = GetFreeRegisterNot(no_reg);
- if (!free.is(no_reg)) return free;
-
- // 3. Prefer to spill a register that is not used in any remaining move
- // because it will not need to be restored until the end.
- for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
- if (source_uses_[i] == 0 && destination_uses_[i] == 0) {
- Register scratch = Register::FromAllocationIndex(i);
- __ push(scratch);
- spilled_register_ = i;
- return scratch;
- }
- }
-
- // 4. Use an arbitrary register. Register 0 is as arbitrary as any other.
- Register scratch = Register::FromAllocationIndex(0);
- __ push(scratch);
- spilled_register_ = 0;
- return scratch;
-}
-
-
-void LGapResolver::EmitMove(int index) {
- LOperand* source = moves_[index].source();
- LOperand* destination = moves_[index].destination();
- EnsureRestored(source);
- EnsureRestored(destination);
-
- // Dispatch on the source and destination operand kinds. Not all
- // combinations are possible.
- if (source->IsRegister()) {
- ASSERT(destination->IsRegister() || destination->IsStackSlot());
- Register src = cgen_->ToRegister(source);
- Operand dst = cgen_->ToOperand(destination);
- __ mov(dst, src);
-
- } else if (source->IsStackSlot()) {
- ASSERT(destination->IsRegister() || destination->IsStackSlot());
- Operand src = cgen_->ToOperand(source);
- if (destination->IsRegister()) {
- Register dst = cgen_->ToRegister(destination);
- __ mov(dst, src);
- } else {
- // Spill on demand to use a temporary register for memory-to-memory
- // moves.
- Register tmp = EnsureTempRegister();
- Operand dst = cgen_->ToOperand(destination);
- __ mov(tmp, src);
- __ mov(dst, tmp);
- }
-
- } else if (source->IsConstantOperand()) {
- ASSERT(destination->IsRegister() || destination->IsStackSlot());
- Immediate src = cgen_->ToImmediate(source);
- Operand dst = cgen_->ToOperand(destination);
- __ mov(dst, src);
-
- } else if (source->IsDoubleRegister()) {
- ASSERT(destination->IsDoubleRegister() ||
- destination->IsDoubleStackSlot());
- XMMRegister src = cgen_->ToDoubleRegister(source);
- Operand dst = cgen_->ToOperand(destination);
- __ movdbl(dst, src);
-
- } else if (source->IsDoubleStackSlot()) {
- ASSERT(destination->IsDoubleRegister() ||
- destination->IsDoubleStackSlot());
- Operand src = cgen_->ToOperand(source);
- if (destination->IsDoubleRegister()) {
- XMMRegister dst = cgen_->ToDoubleRegister(destination);
- __ movdbl(dst, src);
- } else {
- // We rely on having xmm0 available as a fixed scratch register.
- Operand dst = cgen_->ToOperand(destination);
- __ movdbl(xmm0, src);
- __ movdbl(dst, xmm0);
- }
-
- } else {
- UNREACHABLE();
- }
-
- RemoveMove(index);
-}
-
-
-void LGapResolver::EmitSwap(int index) {
- LOperand* source = moves_[index].source();
- LOperand* destination = moves_[index].destination();
- EnsureRestored(source);
- EnsureRestored(destination);
-
- // Dispatch on the source and destination operand kinds. Not all
- // combinations are possible.
- if (source->IsRegister() && destination->IsRegister()) {
- // Register-register.
- Register src = cgen_->ToRegister(source);
- Register dst = cgen_->ToRegister(destination);
- __ xchg(dst, src);
-
- } else if ((source->IsRegister() && destination->IsStackSlot()) ||
- (source->IsStackSlot() && destination->IsRegister())) {
- // Register-memory. Use a free register as a temp if possible. Do not
- // spill on demand because the simple spill implementation cannot avoid
- // spilling src at this point.
- Register tmp = GetFreeRegisterNot(no_reg);
- Register reg =
- cgen_->ToRegister(source->IsRegister() ? source : destination);
- Operand mem =
- cgen_->ToOperand(source->IsRegister() ? destination : source);
- if (tmp.is(no_reg)) {
- __ xor_(reg, mem);
- __ xor_(mem, reg);
- __ xor_(reg, mem);
- } else {
- __ mov(tmp, mem);
- __ mov(mem, reg);
- __ mov(reg, tmp);
- }
-
- } else if (source->IsStackSlot() && destination->IsStackSlot()) {
- // Memory-memory. Spill on demand to use a temporary. If there is a
- // free register after that, use it as a second temporary.
- Register tmp0 = EnsureTempRegister();
- Register tmp1 = GetFreeRegisterNot(tmp0);
- Operand src = cgen_->ToOperand(source);
- Operand dst = cgen_->ToOperand(destination);
- if (tmp1.is(no_reg)) {
- // Only one temp register available to us.
- __ mov(tmp0, dst);
- __ xor_(tmp0, src);
- __ xor_(src, tmp0);
- __ xor_(tmp0, src);
- __ mov(dst, tmp0);
- } else {
- __ mov(tmp0, dst);
- __ mov(tmp1, src);
- __ mov(dst, tmp1);
- __ mov(src, tmp0);
- }
-
- } else if (source->IsDoubleRegister() || destination->IsDoubleRegister()) {
- // XMM register-register or register-memory. We rely on having xmm0
- // available as a fixed scratch register.
- ASSERT(source->IsDoubleRegister() || source->IsDoubleStackSlot());
- ASSERT(destination->IsDoubleRegister() ||
- destination->IsDoubleStackSlot());
- XMMRegister reg = cgen_->ToDoubleRegister(source->IsDoubleRegister()
- ? source
- : destination);
- Operand other =
- cgen_->ToOperand(source->IsDoubleRegister() ? destination : source);
- __ movdbl(xmm0, other);
- __ movdbl(other, reg);
- __ movdbl(reg, Operand(xmm0));
-
- } else if (source->IsDoubleStackSlot() && destination->IsDoubleStackSlot()) {
- // Double-width memory-to-memory. Spill on demand to use a general
- // purpose temporary register and also rely on having xmm0 available as
- // a fixed scratch register.
- Register tmp = EnsureTempRegister();
- Operand src0 = cgen_->ToOperand(source);
- Operand src1 = cgen_->HighOperand(source);
- Operand dst0 = cgen_->ToOperand(destination);
- Operand dst1 = cgen_->HighOperand(destination);
- __ movdbl(xmm0, dst0); // Save destination in xmm0.
- __ mov(tmp, src0); // Then use tmp to copy source to destination.
- __ mov(dst0, tmp);
- __ mov(tmp, src1);
- __ mov(dst1, tmp);
- __ movdbl(src0, xmm0);
-
- } else {
- // No other combinations are possible.
- UNREACHABLE();
- }
-
- // The swap of source and destination has executed a move from source to
- // destination.
- RemoveMove(index);
-
- // Any unperformed (including pending) move with a source of either
- // this move's source or destination needs to have their source
- // changed to reflect the state of affairs after the swap.
- for (int i = 0; i < moves_.length(); ++i) {
- LMoveOperands other_move = moves_[i];
- if (other_move.Blocks(source)) {
- moves_[i].set_source(destination);
- } else if (other_move.Blocks(destination)) {
- moves_[i].set_source(source);
- }
- }
-
- // In addition to swapping the actual uses as sources, we need to update
- // the use counts.
- if (source->IsRegister() && destination->IsRegister()) {
- int temp = source_uses_[source->index()];
- source_uses_[source->index()] = source_uses_[destination->index()];
- source_uses_[destination->index()] = temp;
- } else if (source->IsRegister()) {
- // We don't have use counts for non-register operands like destination.
- // Compute those counts now.
- source_uses_[source->index()] = CountSourceUses(source);
- } else if (destination->IsRegister()) {
- source_uses_[destination->index()] = CountSourceUses(destination);
- }
-}
-
-#undef __
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_IA32
diff --git a/src/3rdparty/v8/src/ia32/lithium-gap-resolver-ia32.h b/src/3rdparty/v8/src/ia32/lithium-gap-resolver-ia32.h
deleted file mode 100644
index 0c81d72..0000000
--- a/src/3rdparty/v8/src/ia32/lithium-gap-resolver-ia32.h
+++ /dev/null
@@ -1,110 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_IA32_LITHIUM_GAP_RESOLVER_IA32_H_
-#define V8_IA32_LITHIUM_GAP_RESOLVER_IA32_H_
-
-#include "v8.h"
-
-#include "lithium.h"
-
-namespace v8 {
-namespace internal {
-
-class LCodeGen;
-class LGapResolver;
-
-class LGapResolver BASE_EMBEDDED {
- public:
- explicit LGapResolver(LCodeGen* owner);
-
- // Resolve a set of parallel moves, emitting assembler instructions.
- void Resolve(LParallelMove* parallel_move);
-
- private:
- // Build the initial list of moves.
- void BuildInitialMoveList(LParallelMove* parallel_move);
-
- // Perform the move at the moves_ index in question (possibly requiring
- // other moves to satisfy dependencies).
- void PerformMove(int index);
-
- // Emit any code necessary at the end of a gap move.
- void Finish();
-
- // Add or delete a move from the move graph without emitting any code.
- // Used to build up the graph and remove trivial moves.
- void AddMove(LMoveOperands move);
- void RemoveMove(int index);
-
- // Report the count of uses of operand as a source in a not-yet-performed
- // move. Used to rebuild use counts.
- int CountSourceUses(LOperand* operand);
-
- // Emit a move and remove it from the move graph.
- void EmitMove(int index);
-
- // Execute a move by emitting a swap of two operands. The move from
- // source to destination is removed from the move graph.
- void EmitSwap(int index);
-
- // Ensure that the given operand is not spilled.
- void EnsureRestored(LOperand* operand);
-
- // Return a register that can be used as a temp register, spilling
- // something if necessary.
- Register EnsureTempRegister();
-
- // Return a known free register different from the given one (which could
- // be no_reg---returning any free register), or no_reg if there is no such
- // register.
- Register GetFreeRegisterNot(Register reg);
-
- // Verify that the state is the initial one, ready to resolve a single
- // parallel move.
- bool HasBeenReset();
-
- // Verify the move list before performing moves.
- void Verify();
-
- LCodeGen* cgen_;
-
- // List of moves not yet resolved.
- ZoneList<LMoveOperands> moves_;
-
- // Source and destination use counts for the general purpose registers.
- int source_uses_[Register::kNumAllocatableRegisters];
- int destination_uses_[Register::kNumAllocatableRegisters];
-
- // If we had to spill on demand, the currently spilled register's
- // allocation index.
- int spilled_register_;
-};
-
-} } // namespace v8::internal
-
-#endif // V8_IA32_LITHIUM_GAP_RESOLVER_IA32_H_
diff --git a/src/3rdparty/v8/src/ia32/lithium-ia32.cc b/src/3rdparty/v8/src/ia32/lithium-ia32.cc
deleted file mode 100644
index 29e1424..0000000
--- a/src/3rdparty/v8/src/ia32/lithium-ia32.cc
+++ /dev/null
@@ -1,2181 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_IA32)
-
-#include "lithium-allocator-inl.h"
-#include "ia32/lithium-ia32.h"
-#include "ia32/lithium-codegen-ia32.h"
-
-namespace v8 {
-namespace internal {
-
-#define DEFINE_COMPILE(type) \
- void L##type::CompileToNative(LCodeGen* generator) { \
- generator->Do##type(this); \
- }
-LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
-#undef DEFINE_COMPILE
-
-LOsrEntry::LOsrEntry() {
- for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
- register_spills_[i] = NULL;
- }
- for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; ++i) {
- double_register_spills_[i] = NULL;
- }
-}
-
-
-void LOsrEntry::MarkSpilledRegister(int allocation_index,
- LOperand* spill_operand) {
- ASSERT(spill_operand->IsStackSlot());
- ASSERT(register_spills_[allocation_index] == NULL);
- register_spills_[allocation_index] = spill_operand;
-}
-
-
-void LOsrEntry::MarkSpilledDoubleRegister(int allocation_index,
- LOperand* spill_operand) {
- ASSERT(spill_operand->IsDoubleStackSlot());
- ASSERT(double_register_spills_[allocation_index] == NULL);
- double_register_spills_[allocation_index] = spill_operand;
-}
-
-
-#ifdef DEBUG
-void LInstruction::VerifyCall() {
- // Call instructions can use only fixed registers as
- // temporaries and outputs because all registers
- // are blocked by the calling convention.
- // Inputs must use a fixed register.
- ASSERT(Output() == NULL ||
- LUnallocated::cast(Output())->HasFixedPolicy() ||
- !LUnallocated::cast(Output())->HasRegisterPolicy());
- for (UseIterator it(this); it.HasNext(); it.Advance()) {
- LOperand* operand = it.Next();
- ASSERT(LUnallocated::cast(operand)->HasFixedPolicy() ||
- !LUnallocated::cast(operand)->HasRegisterPolicy());
- }
- for (TempIterator it(this); it.HasNext(); it.Advance()) {
- LOperand* operand = it.Next();
- ASSERT(LUnallocated::cast(operand)->HasFixedPolicy() ||
- !LUnallocated::cast(operand)->HasRegisterPolicy());
- }
-}
-#endif
-
-
-void LInstruction::PrintTo(StringStream* stream) {
- stream->Add("%s ", this->Mnemonic());
-
- PrintOutputOperandTo(stream);
-
- PrintDataTo(stream);
-
- if (HasEnvironment()) {
- stream->Add(" ");
- environment()->PrintTo(stream);
- }
-
- if (HasPointerMap()) {
- stream->Add(" ");
- pointer_map()->PrintTo(stream);
- }
-}
-
-
-template<int R, int I, int T>
-void LTemplateInstruction<R, I, T>::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- inputs_.PrintOperandsTo(stream);
-}
-
-
-template<int R, int I, int T>
-void LTemplateInstruction<R, I, T>::PrintOutputOperandTo(StringStream* stream) {
- results_.PrintOperandsTo(stream);
-}
-
-
-template<typename T, int N>
-void OperandContainer<T, N>::PrintOperandsTo(StringStream* stream) {
- for (int i = 0; i < N; i++) {
- if (i > 0) stream->Add(" ");
- elems_[i]->PrintTo(stream);
- }
-}
-
-
-void LLabel::PrintDataTo(StringStream* stream) {
- LGap::PrintDataTo(stream);
- LLabel* rep = replacement();
- if (rep != NULL) {
- stream->Add(" Dead block replaced with B%d", rep->block_id());
- }
-}
-
-
-bool LGap::IsRedundant() const {
- for (int i = 0; i < 4; i++) {
- if (parallel_moves_[i] != NULL && !parallel_moves_[i]->IsRedundant()) {
- return false;
- }
- }
-
- return true;
-}
-
-
-void LGap::PrintDataTo(StringStream* stream) {
- for (int i = 0; i < 4; i++) {
- stream->Add("(");
- if (parallel_moves_[i] != NULL) {
- parallel_moves_[i]->PrintDataTo(stream);
- }
- stream->Add(") ");
- }
-}
-
-
-const char* LArithmeticD::Mnemonic() const {
- switch (op()) {
- case Token::ADD: return "add-d";
- case Token::SUB: return "sub-d";
- case Token::MUL: return "mul-d";
- case Token::DIV: return "div-d";
- case Token::MOD: return "mod-d";
- default:
- UNREACHABLE();
- return NULL;
- }
-}
-
-
-const char* LArithmeticT::Mnemonic() const {
- switch (op()) {
- case Token::ADD: return "add-t";
- case Token::SUB: return "sub-t";
- case Token::MUL: return "mul-t";
- case Token::MOD: return "mod-t";
- case Token::DIV: return "div-t";
- case Token::BIT_AND: return "bit-and-t";
- case Token::BIT_OR: return "bit-or-t";
- case Token::BIT_XOR: return "bit-xor-t";
- case Token::SHL: return "sal-t";
- case Token::SAR: return "sar-t";
- case Token::SHR: return "shr-t";
- default:
- UNREACHABLE();
- return NULL;
- }
-}
-
-
-void LGoto::PrintDataTo(StringStream* stream) {
- stream->Add("B%d", block_id());
-}
-
-
-void LBranch::PrintDataTo(StringStream* stream) {
- stream->Add("B%d | B%d on ", true_block_id(), false_block_id());
- InputAt(0)->PrintTo(stream);
-}
-
-
-void LCmpIDAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if ");
- InputAt(0)->PrintTo(stream);
- stream->Add(" %s ", Token::String(op()));
- InputAt(1)->PrintTo(stream);
- stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LIsNullAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if ");
- InputAt(0)->PrintTo(stream);
- stream->Add(is_strict() ? " === null" : " == null");
- stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LIsObjectAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if is_object(");
- InputAt(0)->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LIsSmiAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if is_smi(");
- InputAt(0)->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if has_instance_type(");
- InputAt(0)->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if has_cached_array_index(");
- InputAt(0)->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if class_of_test(");
- InputAt(0)->PrintTo(stream);
- stream->Add(", \"%o\") then B%d else B%d",
- *hydrogen()->class_name(),
- true_block_id(),
- false_block_id());
-}
-
-
-void LTypeofIs::PrintDataTo(StringStream* stream) {
- InputAt(0)->PrintTo(stream);
- stream->Add(" == \"%s\"", *hydrogen()->type_literal()->ToCString());
-}
-
-
-void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if typeof ");
- InputAt(0)->PrintTo(stream);
- stream->Add(" == \"%s\" then B%d else B%d",
- *hydrogen()->type_literal()->ToCString(),
- true_block_id(), false_block_id());
-}
-
-
-void LCallConstantFunction::PrintDataTo(StringStream* stream) {
- stream->Add("#%d / ", arity());
-}
-
-
-void LUnaryMathOperation::PrintDataTo(StringStream* stream) {
- stream->Add("/%s ", hydrogen()->OpName());
- InputAt(0)->PrintTo(stream);
-}
-
-
-void LLoadContextSlot::PrintDataTo(StringStream* stream) {
- InputAt(0)->PrintTo(stream);
- stream->Add("[%d]", slot_index());
-}
-
-
-void LStoreContextSlot::PrintDataTo(StringStream* stream) {
- InputAt(0)->PrintTo(stream);
- stream->Add("[%d] <- ", slot_index());
- InputAt(1)->PrintTo(stream);
-}
-
-
-void LCallKeyed::PrintDataTo(StringStream* stream) {
- stream->Add("[ecx] #%d / ", arity());
-}
-
-
-void LCallNamed::PrintDataTo(StringStream* stream) {
- SmartPointer<char> name_string = name()->ToCString();
- stream->Add("%s #%d / ", *name_string, arity());
-}
-
-
-void LCallGlobal::PrintDataTo(StringStream* stream) {
- SmartPointer<char> name_string = name()->ToCString();
- stream->Add("%s #%d / ", *name_string, arity());
-}
-
-
-void LCallKnownGlobal::PrintDataTo(StringStream* stream) {
- stream->Add("#%d / ", arity());
-}
-
-
-void LCallNew::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- InputAt(0)->PrintTo(stream);
- stream->Add(" #%d / ", arity());
-}
-
-
-void LClassOfTest::PrintDataTo(StringStream* stream) {
- stream->Add("= class_of_test(");
- InputAt(0)->PrintTo(stream);
- stream->Add(", \"%o\")", *hydrogen()->class_name());
-}
-
-
-void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
- arguments()->PrintTo(stream);
-
- stream->Add(" length ");
- length()->PrintTo(stream);
-
- stream->Add(" index ");
- index()->PrintTo(stream);
-}
-
-
-int LChunk::GetNextSpillIndex(bool is_double) {
- // Skip a slot if for a double-width slot.
- if (is_double) spill_slot_count_++;
- return spill_slot_count_++;
-}
-
-
-LOperand* LChunk::GetNextSpillSlot(bool is_double) {
- int index = GetNextSpillIndex(is_double);
- if (is_double) {
- return LDoubleStackSlot::Create(index);
- } else {
- return LStackSlot::Create(index);
- }
-}
-
-
-void LChunk::MarkEmptyBlocks() {
- HPhase phase("Mark empty blocks", this);
- for (int i = 0; i < graph()->blocks()->length(); ++i) {
- HBasicBlock* block = graph()->blocks()->at(i);
- int first = block->first_instruction_index();
- int last = block->last_instruction_index();
- LInstruction* first_instr = instructions()->at(first);
- LInstruction* last_instr = instructions()->at(last);
-
- LLabel* label = LLabel::cast(first_instr);
- if (last_instr->IsGoto()) {
- LGoto* goto_instr = LGoto::cast(last_instr);
- if (!goto_instr->include_stack_check() &&
- label->IsRedundant() &&
- !label->is_loop_header()) {
- bool can_eliminate = true;
- for (int i = first + 1; i < last && can_eliminate; ++i) {
- LInstruction* cur = instructions()->at(i);
- if (cur->IsGap()) {
- LGap* gap = LGap::cast(cur);
- if (!gap->IsRedundant()) {
- can_eliminate = false;
- }
- } else {
- can_eliminate = false;
- }
- }
-
- if (can_eliminate) {
- label->set_replacement(GetLabel(goto_instr->block_id()));
- }
- }
- }
- }
-}
-
-
-void LStoreNamedField::PrintDataTo(StringStream* stream) {
- object()->PrintTo(stream);
- stream->Add(".");
- stream->Add(*String::cast(*name())->ToCString());
- stream->Add(" <- ");
- value()->PrintTo(stream);
-}
-
-
-void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
- object()->PrintTo(stream);
- stream->Add(".");
- stream->Add(*String::cast(*name())->ToCString());
- stream->Add(" <- ");
- value()->PrintTo(stream);
-}
-
-
-void LStoreKeyedFastElement::PrintDataTo(StringStream* stream) {
- object()->PrintTo(stream);
- stream->Add("[");
- key()->PrintTo(stream);
- stream->Add("] <- ");
- value()->PrintTo(stream);
-}
-
-
-void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
- object()->PrintTo(stream);
- stream->Add("[");
- key()->PrintTo(stream);
- stream->Add("] <- ");
- value()->PrintTo(stream);
-}
-
-
-void LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
- LGap* gap = new LGap(block);
- int index = -1;
- if (instr->IsControl()) {
- instructions_.Add(gap);
- index = instructions_.length();
- instructions_.Add(instr);
- } else {
- index = instructions_.length();
- instructions_.Add(instr);
- instructions_.Add(gap);
- }
- if (instr->HasPointerMap()) {
- pointer_maps_.Add(instr->pointer_map());
- instr->pointer_map()->set_lithium_position(index);
- }
-}
-
-
-LConstantOperand* LChunk::DefineConstantOperand(HConstant* constant) {
- return LConstantOperand::Create(constant->id());
-}
-
-
-int LChunk::GetParameterStackSlot(int index) const {
- // The receiver is at index 0, the first parameter at index 1, so we
- // shift all parameter indexes down by the number of parameters, and
- // make sure they end up negative so they are distinguishable from
- // spill slots.
- int result = index - info()->scope()->num_parameters() - 1;
- ASSERT(result < 0);
- return result;
-}
-
-// A parameter relative to ebp in the arguments stub.
-int LChunk::ParameterAt(int index) {
- ASSERT(-1 <= index); // -1 is the receiver.
- return (1 + info()->scope()->num_parameters() - index) *
- kPointerSize;
-}
-
-
-LGap* LChunk::GetGapAt(int index) const {
- return LGap::cast(instructions_[index]);
-}
-
-
-bool LChunk::IsGapAt(int index) const {
- return instructions_[index]->IsGap();
-}
-
-
-int LChunk::NearestGapPos(int index) const {
- while (!IsGapAt(index)) index--;
- return index;
-}
-
-
-void LChunk::AddGapMove(int index, LOperand* from, LOperand* to) {
- GetGapAt(index)->GetOrCreateParallelMove(LGap::START)->AddMove(from, to);
-}
-
-
-Handle<Object> LChunk::LookupLiteral(LConstantOperand* operand) const {
- return HConstant::cast(graph_->LookupValue(operand->index()))->handle();
-}
-
-
-Representation LChunk::LookupLiteralRepresentation(
- LConstantOperand* operand) const {
- return graph_->LookupValue(operand->index())->representation();
-}
-
-
-LChunk* LChunkBuilder::Build() {
- ASSERT(is_unused());
- chunk_ = new LChunk(info(), graph());
- HPhase phase("Building chunk", chunk_);
- status_ = BUILDING;
- const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
- for (int i = 0; i < blocks->length(); i++) {
- HBasicBlock* next = NULL;
- if (i < blocks->length() - 1) next = blocks->at(i + 1);
- DoBasicBlock(blocks->at(i), next);
- if (is_aborted()) return NULL;
- }
- status_ = DONE;
- return chunk_;
-}
-
-
-void LChunkBuilder::Abort(const char* format, ...) {
- if (FLAG_trace_bailout) {
- SmartPointer<char> name(info()->shared_info()->DebugName()->ToCString());
- PrintF("Aborting LChunk building in @\"%s\": ", *name);
- va_list arguments;
- va_start(arguments, format);
- OS::VPrint(format, arguments);
- va_end(arguments);
- PrintF("\n");
- }
- status_ = ABORTED;
-}
-
-
-LRegister* LChunkBuilder::ToOperand(Register reg) {
- return LRegister::Create(Register::ToAllocationIndex(reg));
-}
-
-
-LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
- return new LUnallocated(LUnallocated::FIXED_REGISTER,
- Register::ToAllocationIndex(reg));
-}
-
-
-LUnallocated* LChunkBuilder::ToUnallocated(XMMRegister reg) {
- return new LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER,
- XMMRegister::ToAllocationIndex(reg));
-}
-
-
-LOperand* LChunkBuilder::UseFixed(HValue* value, Register fixed_register) {
- return Use(value, ToUnallocated(fixed_register));
-}
-
-
-LOperand* LChunkBuilder::UseFixedDouble(HValue* value, XMMRegister reg) {
- return Use(value, ToUnallocated(reg));
-}
-
-
-LOperand* LChunkBuilder::UseRegister(HValue* value) {
- return Use(value, new LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
-}
-
-
-LOperand* LChunkBuilder::UseRegisterAtStart(HValue* value) {
- return Use(value,
- new LUnallocated(LUnallocated::MUST_HAVE_REGISTER,
- LUnallocated::USED_AT_START));
-}
-
-
-LOperand* LChunkBuilder::UseTempRegister(HValue* value) {
- return Use(value, new LUnallocated(LUnallocated::WRITABLE_REGISTER));
-}
-
-
-LOperand* LChunkBuilder::Use(HValue* value) {
- return Use(value, new LUnallocated(LUnallocated::NONE));
-}
-
-
-LOperand* LChunkBuilder::UseAtStart(HValue* value) {
- return Use(value, new LUnallocated(LUnallocated::NONE,
- LUnallocated::USED_AT_START));
-}
-
-
-LOperand* LChunkBuilder::UseOrConstant(HValue* value) {
- return value->IsConstant()
- ? chunk_->DefineConstantOperand(HConstant::cast(value))
- : Use(value);
-}
-
-
-LOperand* LChunkBuilder::UseOrConstantAtStart(HValue* value) {
- return value->IsConstant()
- ? chunk_->DefineConstantOperand(HConstant::cast(value))
- : UseAtStart(value);
-}
-
-
-LOperand* LChunkBuilder::UseRegisterOrConstant(HValue* value) {
- return value->IsConstant()
- ? chunk_->DefineConstantOperand(HConstant::cast(value))
- : UseRegister(value);
-}
-
-
-LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) {
- return value->IsConstant()
- ? chunk_->DefineConstantOperand(HConstant::cast(value))
- : UseRegisterAtStart(value);
-}
-
-
-LOperand* LChunkBuilder::UseAny(HValue* value) {
- return value->IsConstant()
- ? chunk_->DefineConstantOperand(HConstant::cast(value))
- : Use(value, new LUnallocated(LUnallocated::ANY));
-}
-
-
-LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) {
- if (value->EmitAtUses()) {
- HInstruction* instr = HInstruction::cast(value);
- VisitInstruction(instr);
- }
- allocator_->RecordUse(value, operand);
- return operand;
-}
-
-
-template<int I, int T>
-LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr,
- LUnallocated* result) {
- allocator_->RecordDefinition(current_instruction_, result);
- instr->set_result(result);
- return instr;
-}
-
-
-template<int I, int T>
-LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr) {
- return Define(instr, new LUnallocated(LUnallocated::NONE));
-}
-
-
-template<int I, int T>
-LInstruction* LChunkBuilder::DefineAsRegister(
- LTemplateInstruction<1, I, T>* instr) {
- return Define(instr, new LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
-}
-
-
-template<int I, int T>
-LInstruction* LChunkBuilder::DefineAsSpilled(
- LTemplateInstruction<1, I, T>* instr,
- int index) {
- return Define(instr, new LUnallocated(LUnallocated::FIXED_SLOT, index));
-}
-
-
-template<int I, int T>
-LInstruction* LChunkBuilder::DefineSameAsFirst(
- LTemplateInstruction<1, I, T>* instr) {
- return Define(instr, new LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
-}
-
-
-template<int I, int T>
-LInstruction* LChunkBuilder::DefineFixed(LTemplateInstruction<1, I, T>* instr,
- Register reg) {
- return Define(instr, ToUnallocated(reg));
-}
-
-
-template<int I, int T>
-LInstruction* LChunkBuilder::DefineFixedDouble(
- LTemplateInstruction<1, I, T>* instr,
- XMMRegister reg) {
- return Define(instr, ToUnallocated(reg));
-}
-
-
-LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
- HEnvironment* hydrogen_env = current_block_->last_environment();
- instr->set_environment(CreateEnvironment(hydrogen_env));
- return instr;
-}
-
-
-LInstruction* LChunkBuilder::SetInstructionPendingDeoptimizationEnvironment(
- LInstruction* instr, int ast_id) {
- ASSERT(instruction_pending_deoptimization_environment_ == NULL);
- ASSERT(pending_deoptimization_ast_id_ == AstNode::kNoNumber);
- instruction_pending_deoptimization_environment_ = instr;
- pending_deoptimization_ast_id_ = ast_id;
- return instr;
-}
-
-
-void LChunkBuilder::ClearInstructionPendingDeoptimizationEnvironment() {
- instruction_pending_deoptimization_environment_ = NULL;
- pending_deoptimization_ast_id_ = AstNode::kNoNumber;
-}
-
-
-LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
- HInstruction* hinstr,
- CanDeoptimize can_deoptimize) {
-#ifdef DEBUG
- instr->VerifyCall();
-#endif
- instr->MarkAsCall();
- instr = AssignPointerMap(instr);
-
- if (hinstr->HasSideEffects()) {
- ASSERT(hinstr->next()->IsSimulate());
- HSimulate* sim = HSimulate::cast(hinstr->next());
- instr = SetInstructionPendingDeoptimizationEnvironment(
- instr, sim->ast_id());
- }
-
- // If instruction does not have side-effects lazy deoptimization
- // after the call will try to deoptimize to the point before the call.
- // Thus we still need to attach environment to this call even if
- // call sequence can not deoptimize eagerly.
- bool needs_environment =
- (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) || !hinstr->HasSideEffects();
- if (needs_environment && !instr->HasEnvironment()) {
- instr = AssignEnvironment(instr);
- }
-
- return instr;
-}
-
-
-LInstruction* LChunkBuilder::MarkAsSaveDoubles(LInstruction* instr) {
- instr->MarkAsSaveDoubles();
- return instr;
-}
-
-
-LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
- ASSERT(!instr->HasPointerMap());
- instr->set_pointer_map(new LPointerMap(position_));
- return instr;
-}
-
-
-LUnallocated* LChunkBuilder::TempRegister() {
- LUnallocated* operand = new LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
- allocator_->RecordTemporary(operand);
- return operand;
-}
-
-
-LOperand* LChunkBuilder::FixedTemp(Register reg) {
- LUnallocated* operand = ToUnallocated(reg);
- allocator_->RecordTemporary(operand);
- return operand;
-}
-
-
-LOperand* LChunkBuilder::FixedTemp(XMMRegister reg) {
- LUnallocated* operand = ToUnallocated(reg);
- allocator_->RecordTemporary(operand);
- return operand;
-}
-
-
-LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) {
- return new LLabel(instr->block());
-}
-
-
-LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
- return AssignEnvironment(new LDeoptimize);
-}
-
-
-LInstruction* LChunkBuilder::DoBit(Token::Value op,
- HBitwiseBinaryOperation* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
-
- LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
- LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
- return DefineSameAsFirst(new LBitI(op, left, right));
- } else {
- ASSERT(instr->representation().IsTagged());
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
-
- LOperand* left = UseFixed(instr->left(), edx);
- LOperand* right = UseFixed(instr->right(), eax);
- LArithmeticT* result = new LArithmeticT(op, left, right);
- return MarkAsCall(DefineFixed(result, eax), instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoShift(Token::Value op,
- HBitwiseBinaryOperation* instr) {
- if (instr->representation().IsTagged()) {
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
-
- LOperand* left = UseFixed(instr->left(), edx);
- LOperand* right = UseFixed(instr->right(), eax);
- LArithmeticT* result = new LArithmeticT(op, left, right);
- return MarkAsCall(DefineFixed(result, eax), instr);
- }
-
- ASSERT(instr->representation().IsInteger32());
- ASSERT(instr->OperandAt(0)->representation().IsInteger32());
- ASSERT(instr->OperandAt(1)->representation().IsInteger32());
- LOperand* left = UseRegisterAtStart(instr->OperandAt(0));
-
- HValue* right_value = instr->OperandAt(1);
- LOperand* right = NULL;
- int constant_value = 0;
- if (right_value->IsConstant()) {
- HConstant* constant = HConstant::cast(right_value);
- right = chunk_->DefineConstantOperand(constant);
- constant_value = constant->Integer32Value() & 0x1f;
- } else {
- right = UseFixed(right_value, ecx);
- }
-
- // Shift operations can only deoptimize if we do a logical shift
- // by 0 and the result cannot be truncated to int32.
- bool can_deopt = (op == Token::SHR && constant_value == 0);
- if (can_deopt) {
- bool can_truncate = true;
- for (int i = 0; i < instr->uses()->length(); i++) {
- if (!instr->uses()->at(i)->CheckFlag(HValue::kTruncatingToInt32)) {
- can_truncate = false;
- break;
- }
- }
- can_deopt = !can_truncate;
- }
-
- LShiftI* result = new LShiftI(op, left, right, can_deopt);
- return can_deopt
- ? AssignEnvironment(DefineSameAsFirst(result))
- : DefineSameAsFirst(result);
-}
-
-
-LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
- HArithmeticBinaryOperation* instr) {
- ASSERT(instr->representation().IsDouble());
- ASSERT(instr->left()->representation().IsDouble());
- ASSERT(instr->right()->representation().IsDouble());
- ASSERT(op != Token::MOD);
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseRegisterAtStart(instr->right());
- LArithmeticD* result = new LArithmeticD(op, left, right);
- return DefineSameAsFirst(result);
-}
-
-
-LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
- HArithmeticBinaryOperation* instr) {
- ASSERT(op == Token::ADD ||
- op == Token::DIV ||
- op == Token::MOD ||
- op == Token::MUL ||
- op == Token::SUB);
- HValue* left = instr->left();
- HValue* right = instr->right();
- ASSERT(left->representation().IsTagged());
- ASSERT(right->representation().IsTagged());
- LOperand* left_operand = UseFixed(left, edx);
- LOperand* right_operand = UseFixed(right, eax);
- LArithmeticT* result = new LArithmeticT(op, left_operand, right_operand);
- return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
- ASSERT(is_building());
- current_block_ = block;
- next_block_ = next_block;
- if (block->IsStartBlock()) {
- block->UpdateEnvironment(graph_->start_environment());
- argument_count_ = 0;
- } else if (block->predecessors()->length() == 1) {
- // We have a single predecessor => copy environment and outgoing
- // argument count from the predecessor.
- ASSERT(block->phis()->length() == 0);
- HBasicBlock* pred = block->predecessors()->at(0);
- HEnvironment* last_environment = pred->last_environment();
- ASSERT(last_environment != NULL);
- // Only copy the environment, if it is later used again.
- if (pred->end()->SecondSuccessor() == NULL) {
- ASSERT(pred->end()->FirstSuccessor() == block);
- } else {
- if (pred->end()->FirstSuccessor()->block_id() > block->block_id() ||
- pred->end()->SecondSuccessor()->block_id() > block->block_id()) {
- last_environment = last_environment->Copy();
- }
- }
- block->UpdateEnvironment(last_environment);
- ASSERT(pred->argument_count() >= 0);
- argument_count_ = pred->argument_count();
- } else {
- // We are at a state join => process phis.
- HBasicBlock* pred = block->predecessors()->at(0);
- // No need to copy the environment, it cannot be used later.
- HEnvironment* last_environment = pred->last_environment();
- for (int i = 0; i < block->phis()->length(); ++i) {
- HPhi* phi = block->phis()->at(i);
- last_environment->SetValueAt(phi->merged_index(), phi);
- }
- for (int i = 0; i < block->deleted_phis()->length(); ++i) {
- last_environment->SetValueAt(block->deleted_phis()->at(i),
- graph_->GetConstantUndefined());
- }
- block->UpdateEnvironment(last_environment);
- // Pick up the outgoing argument count of one of the predecessors.
- argument_count_ = pred->argument_count();
- }
- HInstruction* current = block->first();
- int start = chunk_->instructions()->length();
- while (current != NULL && !is_aborted()) {
- // Code for constants in registers is generated lazily.
- if (!current->EmitAtUses()) {
- VisitInstruction(current);
- }
- current = current->next();
- }
- int end = chunk_->instructions()->length() - 1;
- if (end >= start) {
- block->set_first_instruction_index(start);
- block->set_last_instruction_index(end);
- }
- block->set_argument_count(argument_count_);
- next_block_ = NULL;
- current_block_ = NULL;
-}
-
-
-void LChunkBuilder::VisitInstruction(HInstruction* current) {
- HInstruction* old_current = current_instruction_;
- current_instruction_ = current;
- if (current->has_position()) position_ = current->position();
- LInstruction* instr = current->CompileToLithium(this);
-
- if (instr != NULL) {
- if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
- instr = AssignPointerMap(instr);
- }
- if (FLAG_stress_environments && !instr->HasEnvironment()) {
- instr = AssignEnvironment(instr);
- }
- if (current->IsTest() && !instr->IsGoto()) {
- ASSERT(instr->IsControl());
- HTest* test = HTest::cast(current);
- instr->set_hydrogen_value(test->value());
- HBasicBlock* first = test->FirstSuccessor();
- HBasicBlock* second = test->SecondSuccessor();
- ASSERT(first != NULL && second != NULL);
- instr->SetBranchTargets(first->block_id(), second->block_id());
- } else {
- instr->set_hydrogen_value(current);
- }
-
- chunk_->AddInstruction(instr, current_block_);
- }
- current_instruction_ = old_current;
-}
-
-
-LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) {
- if (hydrogen_env == NULL) return NULL;
-
- LEnvironment* outer = CreateEnvironment(hydrogen_env->outer());
- int ast_id = hydrogen_env->ast_id();
- ASSERT(ast_id != AstNode::kNoNumber);
- int value_count = hydrogen_env->length();
- LEnvironment* result = new LEnvironment(hydrogen_env->closure(),
- ast_id,
- hydrogen_env->parameter_count(),
- argument_count_,
- value_count,
- outer);
- int argument_index = 0;
- for (int i = 0; i < value_count; ++i) {
- HValue* value = hydrogen_env->values()->at(i);
- LOperand* op = NULL;
- if (value->IsArgumentsObject()) {
- op = NULL;
- } else if (value->IsPushArgument()) {
- op = new LArgument(argument_index++);
- } else {
- op = UseAny(value);
- }
- result->AddValue(op, value->representation());
- }
-
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
- LGoto* result = new LGoto(instr->FirstSuccessor()->block_id(),
- instr->include_stack_check());
- return (instr->include_stack_check())
- ? AssignPointerMap(result)
- : result;
-}
-
-
-LInstruction* LChunkBuilder::DoTest(HTest* instr) {
- HValue* v = instr->value();
- if (v->EmitAtUses()) {
- if (v->IsClassOfTest()) {
- HClassOfTest* compare = HClassOfTest::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
-
- return new LClassOfTestAndBranch(UseTempRegister(compare->value()),
- TempRegister(),
- TempRegister());
- } else if (v->IsCompare()) {
- HCompare* compare = HCompare::cast(v);
- Token::Value op = compare->token();
- HValue* left = compare->left();
- HValue* right = compare->right();
- Representation r = compare->GetInputRepresentation();
- if (r.IsInteger32()) {
- ASSERT(left->representation().IsInteger32());
- ASSERT(right->representation().IsInteger32());
-
- return new LCmpIDAndBranch(UseRegisterAtStart(left),
- UseOrConstantAtStart(right));
- } else if (r.IsDouble()) {
- ASSERT(left->representation().IsDouble());
- ASSERT(right->representation().IsDouble());
-
- return new LCmpIDAndBranch(UseRegisterAtStart(left),
- UseRegisterAtStart(right));
- } else {
- ASSERT(left->representation().IsTagged());
- ASSERT(right->representation().IsTagged());
- bool reversed = op == Token::GT || op == Token::LTE;
- LOperand* left_operand = UseFixed(left, reversed ? eax : edx);
- LOperand* right_operand = UseFixed(right, reversed ? edx : eax);
- LCmpTAndBranch* result = new LCmpTAndBranch(left_operand,
- right_operand);
- return MarkAsCall(result, instr);
- }
- } else if (v->IsIsSmi()) {
- HIsSmi* compare = HIsSmi::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
-
- return new LIsSmiAndBranch(Use(compare->value()));
- } else if (v->IsHasInstanceType()) {
- HHasInstanceType* compare = HHasInstanceType::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
-
- return new LHasInstanceTypeAndBranch(UseRegisterAtStart(compare->value()),
- TempRegister());
- } else if (v->IsHasCachedArrayIndex()) {
- HHasCachedArrayIndex* compare = HHasCachedArrayIndex::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
-
- return new LHasCachedArrayIndexAndBranch(
- UseRegisterAtStart(compare->value()));
- } else if (v->IsIsNull()) {
- HIsNull* compare = HIsNull::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
-
- // We only need a temp register for non-strict compare.
- LOperand* temp = compare->is_strict() ? NULL : TempRegister();
- return new LIsNullAndBranch(UseRegisterAtStart(compare->value()),
- temp);
- } else if (v->IsIsObject()) {
- HIsObject* compare = HIsObject::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
-
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = TempRegister();
- return new LIsObjectAndBranch(UseRegisterAtStart(compare->value()),
- temp1,
- temp2);
- } else if (v->IsCompareJSObjectEq()) {
- HCompareJSObjectEq* compare = HCompareJSObjectEq::cast(v);
- return new LCmpJSObjectEqAndBranch(UseRegisterAtStart(compare->left()),
- UseRegisterAtStart(compare->right()));
- } else if (v->IsInstanceOf()) {
- HInstanceOf* instance_of = HInstanceOf::cast(v);
- LOperand* left = UseFixed(instance_of->left(), InstanceofStub::left());
- LOperand* right = UseFixed(instance_of->right(), InstanceofStub::right());
- LOperand* context = UseFixed(instance_of->context(), esi);
- LInstanceOfAndBranch* result =
- new LInstanceOfAndBranch(context, left, right);
- return MarkAsCall(result, instr);
- } else if (v->IsTypeofIs()) {
- HTypeofIs* typeof_is = HTypeofIs::cast(v);
- return new LTypeofIsAndBranch(UseTempRegister(typeof_is->value()));
- } else if (v->IsIsConstructCall()) {
- return new LIsConstructCallAndBranch(TempRegister());
- } else {
- if (v->IsConstant()) {
- if (HConstant::cast(v)->handle()->IsTrue()) {
- return new LGoto(instr->FirstSuccessor()->block_id());
- } else if (HConstant::cast(v)->handle()->IsFalse()) {
- return new LGoto(instr->SecondSuccessor()->block_id());
- }
- }
- Abort("Undefined compare before branch");
- return NULL;
- }
- }
- return new LBranch(UseRegisterAtStart(v));
-}
-
-
-LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
- return new LCmpMapAndBranch(value);
-}
-
-
-LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* length) {
- return DefineAsRegister(new LArgumentsLength(Use(length->value())));
-}
-
-
-LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
- return DefineAsRegister(new LArgumentsElements);
-}
-
-
-LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
- LOperand* left = UseFixed(instr->left(), InstanceofStub::left());
- LOperand* right = UseFixed(instr->right(), InstanceofStub::right());
- LOperand* context = UseFixed(instr->context(), esi);
- LInstanceOf* result = new LInstanceOf(context, left, right);
- return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
- HInstanceOfKnownGlobal* instr) {
- LInstanceOfKnownGlobal* result =
- new LInstanceOfKnownGlobal(
- UseFixed(instr->value(), InstanceofStub::left()),
- FixedTemp(edi));
- return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
- LOperand* function = UseFixed(instr->function(), edi);
- LOperand* receiver = UseFixed(instr->receiver(), eax);
- LOperand* length = UseFixed(instr->length(), ebx);
- LOperand* elements = UseFixed(instr->elements(), ecx);
- LOperand* temp = FixedTemp(edx);
- LApplyArguments* result = new LApplyArguments(function,
- receiver,
- length,
- elements,
- temp);
- return MarkAsCall(DefineFixed(result, eax), instr, CAN_DEOPTIMIZE_EAGERLY);
-}
-
-
-LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
- ++argument_count_;
- LOperand* argument = UseAny(instr->argument());
- return new LPushArgument(argument);
-}
-
-
-LInstruction* LChunkBuilder::DoContext(HContext* instr) {
- return DefineAsRegister(new LContext);
-}
-
-
-LInstruction* LChunkBuilder::DoOuterContext(HOuterContext* instr) {
- LOperand* context = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LOuterContext(context));
-}
-
-
-LInstruction* LChunkBuilder::DoGlobalObject(HGlobalObject* instr) {
- LOperand* context = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LGlobalObject(context));
-}
-
-
-LInstruction* LChunkBuilder::DoGlobalReceiver(HGlobalReceiver* instr) {
- LOperand* global_object = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LGlobalReceiver(global_object));
-}
-
-
-LInstruction* LChunkBuilder::DoCallConstantFunction(
- HCallConstantFunction* instr) {
- argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new LCallConstantFunction, eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
- BuiltinFunctionId op = instr->op();
- if (op == kMathLog) {
- ASSERT(instr->representation().IsDouble());
- ASSERT(instr->value()->representation().IsDouble());
- LOperand* input = UseRegisterAtStart(instr->value());
- LUnaryMathOperation* result = new LUnaryMathOperation(input);
- return DefineSameAsFirst(result);
- } else if (op == kMathSin || op == kMathCos) {
- LOperand* input = UseFixedDouble(instr->value(), xmm1);
- LUnaryMathOperation* result = new LUnaryMathOperation(input);
- return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
- } else {
- LOperand* input = UseRegisterAtStart(instr->value());
- LUnaryMathOperation* result = new LUnaryMathOperation(input);
- switch (op) {
- case kMathAbs:
- return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
- case kMathFloor:
- return AssignEnvironment(DefineAsRegister(result));
- case kMathRound:
- return AssignEnvironment(DefineAsRegister(result));
- case kMathSqrt:
- return DefineSameAsFirst(result);
- case kMathPowHalf:
- return DefineSameAsFirst(result);
- default:
- UNREACHABLE();
- return NULL;
- }
- }
-}
-
-
-LInstruction* LChunkBuilder::DoCallKeyed(HCallKeyed* instr) {
- ASSERT(instr->key()->representation().IsTagged());
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* key = UseFixed(instr->key(), ecx);
- argument_count_ -= instr->argument_count();
- LCallKeyed* result = new LCallKeyed(context, key);
- return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallNamed(HCallNamed* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- argument_count_ -= instr->argument_count();
- LCallNamed* result = new LCallNamed(context);
- return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallGlobal(HCallGlobal* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- argument_count_ -= instr->argument_count();
- LCallGlobal* result = new LCallGlobal(context);
- return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallKnownGlobal(HCallKnownGlobal* instr) {
- argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new LCallKnownGlobal, eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* constructor = UseFixed(instr->constructor(), edi);
- argument_count_ -= instr->argument_count();
- LCallNew* result = new LCallNew(context, constructor);
- return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- argument_count_ -= instr->argument_count();
- LCallFunction* result = new LCallFunction(context);
- return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
- argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new LCallRuntime, eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoShr(HShr* instr) {
- return DoShift(Token::SHR, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoSar(HSar* instr) {
- return DoShift(Token::SAR, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoShl(HShl* instr) {
- return DoShift(Token::SHL, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoBitAnd(HBitAnd* instr) {
- return DoBit(Token::BIT_AND, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoBitNot(HBitNot* instr) {
- ASSERT(instr->value()->representation().IsInteger32());
- ASSERT(instr->representation().IsInteger32());
- LOperand* input = UseRegisterAtStart(instr->value());
- LBitNotI* result = new LBitNotI(input);
- return DefineSameAsFirst(result);
-}
-
-
-LInstruction* LChunkBuilder::DoBitOr(HBitOr* instr) {
- return DoBit(Token::BIT_OR, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoBitXor(HBitXor* instr) {
- return DoBit(Token::BIT_XOR, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
- if (instr->representation().IsDouble()) {
- return DoArithmeticD(Token::DIV, instr);
- } else if (instr->representation().IsInteger32()) {
- // The temporary operand is necessary to ensure that right is not allocated
- // into edx.
- LOperand* temp = FixedTemp(edx);
- LOperand* dividend = UseFixed(instr->left(), eax);
- LOperand* divisor = UseRegister(instr->right());
- LDivI* result = new LDivI(dividend, divisor, temp);
- return AssignEnvironment(DefineFixed(result, eax));
- } else {
- ASSERT(instr->representation().IsTagged());
- return DoArithmeticT(Token::DIV, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoMod(HMod* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
-
- LInstruction* result;
- if (instr->HasPowerOf2Divisor()) {
- ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero));
- LOperand* value = UseRegisterAtStart(instr->left());
- LModI* mod = new LModI(value, UseOrConstant(instr->right()), NULL);
- result = DefineSameAsFirst(mod);
- } else {
- // The temporary operand is necessary to ensure that right is
- // not allocated into edx.
- LOperand* temp = FixedTemp(edx);
- LOperand* value = UseFixed(instr->left(), eax);
- LOperand* divisor = UseRegister(instr->right());
- LModI* mod = new LModI(value, divisor, temp);
- result = DefineFixed(mod, edx);
- }
-
- return (instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
- instr->CheckFlag(HValue::kCanBeDivByZero))
- ? AssignEnvironment(result)
- : result;
- } else if (instr->representation().IsTagged()) {
- return DoArithmeticT(Token::MOD, instr);
- } else {
- ASSERT(instr->representation().IsDouble());
- // We call a C function for double modulo. It can't trigger a GC.
- // We need to use fixed result register for the call.
- // TODO(fschneider): Allow any register as input registers.
- LOperand* left = UseFixedDouble(instr->left(), xmm2);
- LOperand* right = UseFixedDouble(instr->right(), xmm1);
- LArithmeticD* result = new LArithmeticD(Token::MOD, left, right);
- return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoMul(HMul* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
- LOperand* right = UseOrConstant(instr->MostConstantOperand());
- LOperand* temp = NULL;
- if (instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
- temp = TempRegister();
- }
- LMulI* mul = new LMulI(left, right, temp);
- return AssignEnvironment(DefineSameAsFirst(mul));
- } else if (instr->representation().IsDouble()) {
- return DoArithmeticD(Token::MUL, instr);
- } else {
- ASSERT(instr->representation().IsTagged());
- return DoArithmeticT(Token::MUL, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoSub(HSub* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseOrConstantAtStart(instr->right());
- LSubI* sub = new LSubI(left, right);
- LInstruction* result = DefineSameAsFirst(sub);
- if (instr->CheckFlag(HValue::kCanOverflow)) {
- result = AssignEnvironment(result);
- }
- return result;
- } else if (instr->representation().IsDouble()) {
- return DoArithmeticD(Token::SUB, instr);
- } else {
- ASSERT(instr->representation().IsTagged());
- return DoArithmeticT(Token::SUB, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
- LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
- LAddI* add = new LAddI(left, right);
- LInstruction* result = DefineSameAsFirst(add);
- if (instr->CheckFlag(HValue::kCanOverflow)) {
- result = AssignEnvironment(result);
- }
- return result;
- } else if (instr->representation().IsDouble()) {
- return DoArithmeticD(Token::ADD, instr);
- } else {
- ASSERT(instr->representation().IsTagged());
- return DoArithmeticT(Token::ADD, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoPower(HPower* instr) {
- ASSERT(instr->representation().IsDouble());
- // We call a C function for double power. It can't trigger a GC.
- // We need to use fixed result register for the call.
- Representation exponent_type = instr->right()->representation();
- ASSERT(instr->left()->representation().IsDouble());
- LOperand* left = UseFixedDouble(instr->left(), xmm1);
- LOperand* right = exponent_type.IsDouble() ?
- UseFixedDouble(instr->right(), xmm2) :
- UseFixed(instr->right(), eax);
- LPower* result = new LPower(left, right);
- return MarkAsCall(DefineFixedDouble(result, xmm3), instr,
- CAN_DEOPTIMIZE_EAGERLY);
-}
-
-
-LInstruction* LChunkBuilder::DoCompare(HCompare* instr) {
- Token::Value op = instr->token();
- Representation r = instr->GetInputRepresentation();
- if (r.IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseOrConstantAtStart(instr->right());
- return DefineAsRegister(new LCmpID(left, right));
- } else if (r.IsDouble()) {
- ASSERT(instr->left()->representation().IsDouble());
- ASSERT(instr->right()->representation().IsDouble());
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseRegisterAtStart(instr->right());
- return DefineAsRegister(new LCmpID(left, right));
- } else {
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
- bool reversed = (op == Token::GT || op == Token::LTE);
- LOperand* left = UseFixed(instr->left(), reversed ? eax : edx);
- LOperand* right = UseFixed(instr->right(), reversed ? edx : eax);
- LCmpT* result = new LCmpT(left, right);
- return MarkAsCall(DefineFixed(result, eax), instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoCompareJSObjectEq(
- HCompareJSObjectEq* instr) {
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseRegisterAtStart(instr->right());
- LCmpJSObjectEq* result = new LCmpJSObjectEq(left, right);
- return DefineAsRegister(result);
-}
-
-
-LInstruction* LChunkBuilder::DoIsNull(HIsNull* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
-
- return DefineAsRegister(new LIsNull(value));
-}
-
-
-LInstruction* LChunkBuilder::DoIsObject(HIsObject* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegister(instr->value());
-
- return DefineAsRegister(new LIsObject(value, TempRegister()));
-}
-
-
-LInstruction* LChunkBuilder::DoIsSmi(HIsSmi* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseAtStart(instr->value());
-
- return DefineAsRegister(new LIsSmi(value));
-}
-
-
-LInstruction* LChunkBuilder::DoHasInstanceType(HHasInstanceType* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
-
- return DefineAsRegister(new LHasInstanceType(value));
-}
-
-
-LInstruction* LChunkBuilder::DoGetCachedArrayIndex(
- HGetCachedArrayIndex* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
-
- return DefineAsRegister(new LGetCachedArrayIndex(value));
-}
-
-
-LInstruction* LChunkBuilder::DoHasCachedArrayIndex(
- HHasCachedArrayIndex* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegister(instr->value());
-
- return DefineAsRegister(new LHasCachedArrayIndex(value));
-}
-
-
-LInstruction* LChunkBuilder::DoClassOfTest(HClassOfTest* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseTempRegister(instr->value());
-
- return DefineSameAsFirst(new LClassOfTest(value, TempRegister()));
-}
-
-
-LInstruction* LChunkBuilder::DoJSArrayLength(HJSArrayLength* instr) {
- LOperand* array = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LJSArrayLength(array));
-}
-
-
-LInstruction* LChunkBuilder::DoFixedArrayLength(HFixedArrayLength* instr) {
- LOperand* array = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LFixedArrayLength(array));
-}
-
-
-LInstruction* LChunkBuilder::DoExternalArrayLength(
- HExternalArrayLength* instr) {
- LOperand* array = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LExternalArrayLength(array));
-}
-
-
-LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) {
- LOperand* object = UseRegister(instr->value());
- LValueOf* result = new LValueOf(object, TempRegister());
- return AssignEnvironment(DefineSameAsFirst(result));
-}
-
-
-LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
- return AssignEnvironment(new LBoundsCheck(UseRegisterAtStart(instr->index()),
- Use(instr->length())));
-}
-
-
-LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
- // The control instruction marking the end of a block that completed
- // abruptly (e.g., threw an exception). There is nothing specific to do.
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoThrow(HThrow* instr) {
- LOperand* value = UseFixed(instr->value(), eax);
- return MarkAsCall(new LThrow(value), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoChange(HChange* instr) {
- Representation from = instr->from();
- Representation to = instr->to();
- if (from.IsTagged()) {
- if (to.IsDouble()) {
- LOperand* value = UseRegister(instr->value());
- LNumberUntagD* res = new LNumberUntagD(value);
- return AssignEnvironment(DefineAsRegister(res));
- } else {
- ASSERT(to.IsInteger32());
- LOperand* value = UseRegister(instr->value());
- bool needs_check = !instr->value()->type().IsSmi();
- if (needs_check) {
- LOperand* xmm_temp =
- (instr->CanTruncateToInt32() && CpuFeatures::IsSupported(SSE3))
- ? NULL
- : FixedTemp(xmm1);
- LTaggedToI* res = new LTaggedToI(value, xmm_temp);
- return AssignEnvironment(DefineSameAsFirst(res));
- } else {
- return DefineSameAsFirst(new LSmiUntag(value, needs_check));
- }
- }
- } else if (from.IsDouble()) {
- if (to.IsTagged()) {
- LOperand* value = UseRegister(instr->value());
- LOperand* temp = TempRegister();
-
- // Make sure that temp and result_temp are different registers.
- LUnallocated* result_temp = TempRegister();
- LNumberTagD* result = new LNumberTagD(value, temp);
- return AssignPointerMap(Define(result, result_temp));
- } else {
- ASSERT(to.IsInteger32());
- bool needs_temp = instr->CanTruncateToInt32() &&
- !CpuFeatures::IsSupported(SSE3);
- LOperand* value = needs_temp ?
- UseTempRegister(instr->value()) : UseRegister(instr->value());
- LOperand* temp = needs_temp ? TempRegister() : NULL;
- return AssignEnvironment(DefineAsRegister(new LDoubleToI(value, temp)));
- }
- } else if (from.IsInteger32()) {
- if (to.IsTagged()) {
- HValue* val = instr->value();
- LOperand* value = UseRegister(val);
- if (val->HasRange() && val->range()->IsInSmiRange()) {
- return DefineSameAsFirst(new LSmiTag(value));
- } else {
- LNumberTagI* result = new LNumberTagI(value);
- return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
- }
- } else {
- ASSERT(to.IsDouble());
- return DefineAsRegister(new LInteger32ToDouble(Use(instr->value())));
- }
- }
- UNREACHABLE();
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoCheckNonSmi(HCheckNonSmi* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- return AssignEnvironment(new LCheckNonSmi(value));
-}
-
-
-LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- LOperand* temp = TempRegister();
- LCheckInstanceType* result = new LCheckInstanceType(value, temp);
- return AssignEnvironment(result);
-}
-
-
-LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
- LOperand* temp = TempRegister();
- LCheckPrototypeMaps* result = new LCheckPrototypeMaps(temp);
- return AssignEnvironment(result);
-}
-
-
-LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- return AssignEnvironment(new LCheckSmi(value));
-}
-
-
-LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- return AssignEnvironment(new LCheckFunction(value));
-}
-
-
-LInstruction* LChunkBuilder::DoCheckMap(HCheckMap* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- LCheckMap* result = new LCheckMap(value);
- return AssignEnvironment(result);
-}
-
-
-LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
- return new LReturn(UseFixed(instr->value(), eax));
-}
-
-
-LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
- Representation r = instr->representation();
- if (r.IsInteger32()) {
- return DefineAsRegister(new LConstantI);
- } else if (r.IsDouble()) {
- double value = instr->DoubleValue();
- LOperand* temp = (BitCast<uint64_t, double>(value) != 0)
- ? TempRegister()
- : NULL;
- return DefineAsRegister(new LConstantD(temp));
- } else if (r.IsTagged()) {
- return DefineAsRegister(new LConstantT);
- } else {
- UNREACHABLE();
- return NULL;
- }
-}
-
-
-LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
- LLoadGlobalCell* result = new LLoadGlobalCell;
- return instr->check_hole_value()
- ? AssignEnvironment(DefineAsRegister(result))
- : DefineAsRegister(result);
-}
-
-
-LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* global_object = UseFixed(instr->global_object(), eax);
- LLoadGlobalGeneric* result = new LLoadGlobalGeneric(context, global_object);
- return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
- LStoreGlobalCell* result =
- new LStoreGlobalCell(UseRegisterAtStart(instr->value()));
- return instr->check_hole_value() ? AssignEnvironment(result) : result;
-}
-
-
-LInstruction* LChunkBuilder::DoStoreGlobalGeneric(HStoreGlobalGeneric* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* global_object = UseFixed(instr->global_object(), edx);
- LOperand* value = UseFixed(instr->value(), eax);
- LStoreGlobalGeneric* result =
- new LStoreGlobalGeneric(context, global_object, value);
- return MarkAsCall(result, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
- LOperand* context = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LLoadContextSlot(context));
-}
-
-
-LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
- LOperand* context;
- LOperand* value;
- LOperand* temp;
- if (instr->NeedsWriteBarrier()) {
- context = UseTempRegister(instr->context());
- value = UseTempRegister(instr->value());
- temp = TempRegister();
- } else {
- context = UseRegister(instr->context());
- value = UseRegister(instr->value());
- temp = NULL;
- }
- return new LStoreContextSlot(context, value, temp);
-}
-
-
-LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
- ASSERT(instr->representation().IsTagged());
- LOperand* obj = UseRegisterAtStart(instr->object());
- return DefineAsRegister(new LLoadNamedField(obj));
-}
-
-
-LInstruction* LChunkBuilder::DoLoadNamedFieldPolymorphic(
- HLoadNamedFieldPolymorphic* instr) {
- ASSERT(instr->representation().IsTagged());
- if (instr->need_generic()) {
- LOperand* obj = UseFixed(instr->object(), eax);
- LLoadNamedFieldPolymorphic* result = new LLoadNamedFieldPolymorphic(obj);
- return MarkAsCall(DefineFixed(result, eax), instr);
- } else {
- LOperand* obj = UseRegisterAtStart(instr->object());
- LLoadNamedFieldPolymorphic* result = new LLoadNamedFieldPolymorphic(obj);
- return AssignEnvironment(DefineAsRegister(result));
- }
-}
-
-
-LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* object = UseFixed(instr->object(), eax);
- LLoadNamedGeneric* result = new LLoadNamedGeneric(context, object);
- return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
- HLoadFunctionPrototype* instr) {
- return AssignEnvironment(DefineAsRegister(
- new LLoadFunctionPrototype(UseRegister(instr->function()),
- TempRegister())));
-}
-
-
-LInstruction* LChunkBuilder::DoLoadElements(HLoadElements* instr) {
- LOperand* input = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LLoadElements(input));
-}
-
-
-LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
- HLoadExternalArrayPointer* instr) {
- LOperand* input = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LLoadExternalArrayPointer(input));
-}
-
-
-LInstruction* LChunkBuilder::DoLoadKeyedFastElement(
- HLoadKeyedFastElement* instr) {
- ASSERT(instr->representation().IsTagged());
- ASSERT(instr->key()->representation().IsInteger32());
- LOperand* obj = UseRegisterAtStart(instr->object());
- LOperand* key = UseRegisterAtStart(instr->key());
- LLoadKeyedFastElement* result = new LLoadKeyedFastElement(obj, key);
- return AssignEnvironment(DefineSameAsFirst(result));
-}
-
-
-LInstruction* LChunkBuilder::DoLoadKeyedSpecializedArrayElement(
- HLoadKeyedSpecializedArrayElement* instr) {
- ExternalArrayType array_type = instr->array_type();
- Representation representation(instr->representation());
- ASSERT((representation.IsInteger32() && array_type != kExternalFloatArray) ||
- (representation.IsDouble() && array_type == kExternalFloatArray));
- ASSERT(instr->key()->representation().IsInteger32());
- LOperand* external_pointer = UseRegister(instr->external_pointer());
- LOperand* key = UseRegister(instr->key());
- LLoadKeyedSpecializedArrayElement* result =
- new LLoadKeyedSpecializedArrayElement(external_pointer,
- key);
- LInstruction* load_instr = DefineAsRegister(result);
- // An unsigned int array load might overflow and cause a deopt, make sure it
- // has an environment.
- return (array_type == kExternalUnsignedIntArray)
- ? AssignEnvironment(load_instr)
- : load_instr;
-}
-
-
-LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* object = UseFixed(instr->object(), edx);
- LOperand* key = UseFixed(instr->key(), eax);
-
- LLoadKeyedGeneric* result = new LLoadKeyedGeneric(context, object, key);
- return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoStoreKeyedFastElement(
- HStoreKeyedFastElement* instr) {
- bool needs_write_barrier = instr->NeedsWriteBarrier();
- ASSERT(instr->value()->representation().IsTagged());
- ASSERT(instr->object()->representation().IsTagged());
- ASSERT(instr->key()->representation().IsInteger32());
-
- LOperand* obj = UseTempRegister(instr->object());
- LOperand* val = needs_write_barrier
- ? UseTempRegister(instr->value())
- : UseRegisterAtStart(instr->value());
- LOperand* key = needs_write_barrier
- ? UseTempRegister(instr->key())
- : UseRegisterOrConstantAtStart(instr->key());
-
- return AssignEnvironment(new LStoreKeyedFastElement(obj, key, val));
-}
-
-
-LInstruction* LChunkBuilder::DoStoreKeyedSpecializedArrayElement(
- HStoreKeyedSpecializedArrayElement* instr) {
- Representation representation(instr->value()->representation());
- ExternalArrayType array_type = instr->array_type();
- ASSERT((representation.IsInteger32() && array_type != kExternalFloatArray) ||
- (representation.IsDouble() && array_type == kExternalFloatArray));
- ASSERT(instr->external_pointer()->representation().IsExternal());
- ASSERT(instr->key()->representation().IsInteger32());
-
- LOperand* external_pointer = UseRegister(instr->external_pointer());
- LOperand* key = UseRegister(instr->key());
- LOperand* temp = NULL;
-
- if (array_type == kExternalPixelArray) {
- // The generated code for pixel array stores requires that the clamped value
- // is in a byte register. eax is an arbitrary choice to satisfy this
- // requirement.
- temp = FixedTemp(eax);
- }
-
- LOperand* val = NULL;
- if (array_type == kExternalByteArray ||
- array_type == kExternalUnsignedByteArray) {
- // We need a byte register in this case for the value.
- val = UseFixed(instr->value(), eax);
- } else {
- val = UseRegister(instr->value());
- }
-
- return new LStoreKeyedSpecializedArrayElement(external_pointer,
- key,
- val,
- temp);
-}
-
-
-LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* object = UseFixed(instr->object(), edx);
- LOperand* key = UseFixed(instr->key(), ecx);
- LOperand* value = UseFixed(instr->value(), eax);
-
- ASSERT(instr->object()->representation().IsTagged());
- ASSERT(instr->key()->representation().IsTagged());
- ASSERT(instr->value()->representation().IsTagged());
-
- LStoreKeyedGeneric* result =
- new LStoreKeyedGeneric(context, object, key, value);
- return MarkAsCall(result, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
- bool needs_write_barrier = instr->NeedsWriteBarrier();
-
- LOperand* obj = needs_write_barrier
- ? UseTempRegister(instr->object())
- : UseRegisterAtStart(instr->object());
-
- LOperand* val = needs_write_barrier
- ? UseTempRegister(instr->value())
- : UseRegister(instr->value());
-
- // We only need a scratch register if we have a write barrier or we
- // have a store into the properties array (not in-object-property).
- LOperand* temp = (!instr->is_in_object() || needs_write_barrier)
- ? TempRegister()
- : NULL;
-
- return new LStoreNamedField(obj, val, temp);
-}
-
-
-LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* object = UseFixed(instr->object(), edx);
- LOperand* value = UseFixed(instr->value(), eax);
-
- LStoreNamedGeneric* result = new LStoreNamedGeneric(context, object, value);
- return MarkAsCall(result, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
- LOperand* string = UseRegister(instr->string());
- LOperand* index = UseRegisterOrConstant(instr->index());
- LStringCharCodeAt* result = new LStringCharCodeAt(string, index);
- return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
-}
-
-
-LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
- LOperand* char_code = UseRegister(instr->value());
- LStringCharFromCode* result = new LStringCharFromCode(char_code);
- return AssignPointerMap(DefineAsRegister(result));
-}
-
-
-LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) {
- LOperand* string = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LStringLength(string));
-}
-
-
-LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) {
- return MarkAsCall(DefineFixed(new LArrayLiteral, eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoObjectLiteral(HObjectLiteral* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- return MarkAsCall(DefineFixed(new LObjectLiteral(context), eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
- return MarkAsCall(DefineFixed(new LRegExpLiteral, eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
- return MarkAsCall(DefineFixed(new LFunctionLiteral, eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoDeleteProperty(HDeleteProperty* instr) {
- LDeleteProperty* result =
- new LDeleteProperty(Use(instr->object()), UseOrConstant(instr->key()));
- return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
- allocator_->MarkAsOsrEntry();
- current_block_->last_environment()->set_ast_id(instr->ast_id());
- return AssignEnvironment(new LOsrEntry);
-}
-
-
-LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
- int spill_index = chunk()->GetParameterStackSlot(instr->index());
- return DefineAsSpilled(new LParameter, spill_index);
-}
-
-
-LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
- int spill_index = chunk()->GetNextSpillIndex(false); // Not double-width.
- return DefineAsSpilled(new LUnknownOSRValue, spill_index);
-}
-
-
-LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- argument_count_ -= instr->argument_count();
- LCallStub* result = new LCallStub(context);
- return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
- // There are no real uses of the arguments object.
- // arguments.length and element access are supported directly on
- // stack arguments, and any real arguments object use causes a bailout.
- // So this value is never used.
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
- LOperand* arguments = UseRegister(instr->arguments());
- LOperand* length = UseTempRegister(instr->length());
- LOperand* index = Use(instr->index());
- LAccessArgumentsAt* result = new LAccessArgumentsAt(arguments, length, index);
- return AssignEnvironment(DefineAsRegister(result));
-}
-
-
-LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) {
- LOperand* object = UseFixed(instr->value(), eax);
- LToFastProperties* result = new LToFastProperties(object);
- return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
- LTypeof* result = new LTypeof(UseAtStart(instr->value()));
- return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoTypeofIs(HTypeofIs* instr) {
- return DefineSameAsFirst(new LTypeofIs(UseRegister(instr->value())));
-}
-
-
-LInstruction* LChunkBuilder::DoIsConstructCall(HIsConstructCall* instr) {
- return DefineAsRegister(new LIsConstructCall);
-}
-
-
-LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
- HEnvironment* env = current_block_->last_environment();
- ASSERT(env != NULL);
-
- env->set_ast_id(instr->ast_id());
-
- env->Drop(instr->pop_count());
- for (int i = 0; i < instr->values()->length(); ++i) {
- HValue* value = instr->values()->at(i);
- if (instr->HasAssignedIndexAt(i)) {
- env->Bind(instr->GetAssignedIndexAt(i), value);
- } else {
- env->Push(value);
- }
- }
-
- // If there is an instruction pending deoptimization environment create a
- // lazy bailout instruction to capture the environment.
- if (pending_deoptimization_ast_id_ != AstNode::kNoNumber) {
- ASSERT(pending_deoptimization_ast_id_ == instr->ast_id());
- LLazyBailout* lazy_bailout = new LLazyBailout;
- LInstruction* result = AssignEnvironment(lazy_bailout);
- instruction_pending_deoptimization_environment_->
- set_deoptimization_environment(result->environment());
- ClearInstructionPendingDeoptimizationEnvironment();
- return result;
- }
-
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
- return MarkAsCall(new LStackCheck, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
- HEnvironment* outer = current_block_->last_environment();
- HConstant* undefined = graph()->GetConstantUndefined();
- HEnvironment* inner = outer->CopyForInlining(instr->closure(),
- instr->function(),
- false,
- undefined);
- current_block_->UpdateEnvironment(inner);
- chunk_->AddInlinedClosure(instr->closure());
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
- HEnvironment* outer = current_block_->last_environment()->outer();
- current_block_->UpdateEnvironment(outer);
- return NULL;
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_IA32
diff --git a/src/3rdparty/v8/src/ia32/lithium-ia32.h b/src/3rdparty/v8/src/ia32/lithium-ia32.h
deleted file mode 100644
index fe7681b..0000000
--- a/src/3rdparty/v8/src/ia32/lithium-ia32.h
+++ /dev/null
@@ -1,2235 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_IA32_LITHIUM_IA32_H_
-#define V8_IA32_LITHIUM_IA32_H_
-
-#include "hydrogen.h"
-#include "lithium-allocator.h"
-#include "lithium.h"
-#include "safepoint-table.h"
-
-namespace v8 {
-namespace internal {
-
-// Forward declarations.
-class LCodeGen;
-
-#define LITHIUM_ALL_INSTRUCTION_LIST(V) \
- V(ControlInstruction) \
- V(Call) \
- LITHIUM_CONCRETE_INSTRUCTION_LIST(V)
-
-
-#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \
- V(AccessArgumentsAt) \
- V(AddI) \
- V(ApplyArguments) \
- V(ArgumentsElements) \
- V(ArgumentsLength) \
- V(ArithmeticD) \
- V(ArithmeticT) \
- V(ArrayLiteral) \
- V(BitI) \
- V(BitNotI) \
- V(BoundsCheck) \
- V(Branch) \
- V(CallConstantFunction) \
- V(CallFunction) \
- V(CallGlobal) \
- V(CallKeyed) \
- V(CallKnownGlobal) \
- V(CallNamed) \
- V(CallNew) \
- V(CallRuntime) \
- V(CallStub) \
- V(CheckFunction) \
- V(CheckInstanceType) \
- V(CheckMap) \
- V(CheckNonSmi) \
- V(CheckPrototypeMaps) \
- V(CheckSmi) \
- V(ClassOfTest) \
- V(ClassOfTestAndBranch) \
- V(CmpID) \
- V(CmpIDAndBranch) \
- V(CmpJSObjectEq) \
- V(CmpJSObjectEqAndBranch) \
- V(CmpMapAndBranch) \
- V(CmpT) \
- V(CmpTAndBranch) \
- V(ConstantD) \
- V(ConstantI) \
- V(ConstantT) \
- V(Context) \
- V(DeleteProperty) \
- V(Deoptimize) \
- V(DivI) \
- V(DoubleToI) \
- V(ExternalArrayLength) \
- V(FixedArrayLength) \
- V(FunctionLiteral) \
- V(Gap) \
- V(GetCachedArrayIndex) \
- V(GlobalObject) \
- V(GlobalReceiver) \
- V(Goto) \
- V(HasCachedArrayIndex) \
- V(HasCachedArrayIndexAndBranch) \
- V(HasInstanceType) \
- V(HasInstanceTypeAndBranch) \
- V(InstanceOf) \
- V(InstanceOfAndBranch) \
- V(InstanceOfKnownGlobal) \
- V(Integer32ToDouble) \
- V(IsNull) \
- V(IsNullAndBranch) \
- V(IsObject) \
- V(IsObjectAndBranch) \
- V(IsSmi) \
- V(IsSmiAndBranch) \
- V(IsConstructCall) \
- V(IsConstructCallAndBranch) \
- V(JSArrayLength) \
- V(Label) \
- V(LazyBailout) \
- V(LoadContextSlot) \
- V(LoadElements) \
- V(LoadExternalArrayPointer) \
- V(LoadFunctionPrototype) \
- V(LoadGlobalCell) \
- V(LoadGlobalGeneric) \
- V(LoadKeyedFastElement) \
- V(LoadKeyedGeneric) \
- V(LoadKeyedSpecializedArrayElement) \
- V(LoadNamedField) \
- V(LoadNamedFieldPolymorphic) \
- V(LoadNamedGeneric) \
- V(ModI) \
- V(MulI) \
- V(NumberTagD) \
- V(NumberTagI) \
- V(NumberUntagD) \
- V(ObjectLiteral) \
- V(OsrEntry) \
- V(OuterContext) \
- V(Parameter) \
- V(Power) \
- V(PushArgument) \
- V(RegExpLiteral) \
- V(Return) \
- V(ShiftI) \
- V(SmiTag) \
- V(SmiUntag) \
- V(StackCheck) \
- V(StoreContextSlot) \
- V(StoreGlobalCell) \
- V(StoreGlobalGeneric) \
- V(StoreKeyedFastElement) \
- V(StoreKeyedGeneric) \
- V(StoreKeyedSpecializedArrayElement) \
- V(StoreNamedField) \
- V(StoreNamedGeneric) \
- V(StringCharCodeAt) \
- V(StringCharFromCode) \
- V(StringLength) \
- V(SubI) \
- V(TaggedToI) \
- V(Throw) \
- V(ToFastProperties) \
- V(Typeof) \
- V(TypeofIs) \
- V(TypeofIsAndBranch) \
- V(UnaryMathOperation) \
- V(UnknownOSRValue) \
- V(ValueOf)
-
-
-#define DECLARE_INSTRUCTION(type) \
- virtual bool Is##type() const { return true; } \
- static L##type* cast(LInstruction* instr) { \
- ASSERT(instr->Is##type()); \
- return reinterpret_cast<L##type*>(instr); \
- }
-
-
-#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
- virtual void CompileToNative(LCodeGen* generator); \
- virtual const char* Mnemonic() const { return mnemonic; } \
- DECLARE_INSTRUCTION(type)
-
-
-#define DECLARE_HYDROGEN_ACCESSOR(type) \
- H##type* hydrogen() const { \
- return H##type::cast(hydrogen_value()); \
- }
-
-
-class LInstruction: public ZoneObject {
- public:
- LInstruction()
- : environment_(NULL),
- hydrogen_value_(NULL),
- is_call_(false),
- is_save_doubles_(false) { }
- virtual ~LInstruction() { }
-
- virtual void CompileToNative(LCodeGen* generator) = 0;
- virtual const char* Mnemonic() const = 0;
- virtual void PrintTo(StringStream* stream);
- virtual void PrintDataTo(StringStream* stream) = 0;
- virtual void PrintOutputOperandTo(StringStream* stream) = 0;
-
- // Declare virtual type testers.
-#define DECLARE_DO(type) virtual bool Is##type() const { return false; }
- LITHIUM_ALL_INSTRUCTION_LIST(DECLARE_DO)
-#undef DECLARE_DO
-
- virtual bool IsControl() const { return false; }
- virtual void SetBranchTargets(int true_block_id, int false_block_id) { }
-
- void set_environment(LEnvironment* env) { environment_ = env; }
- LEnvironment* environment() const { return environment_; }
- bool HasEnvironment() const { return environment_ != NULL; }
-
- void set_pointer_map(LPointerMap* p) { pointer_map_.set(p); }
- LPointerMap* pointer_map() const { return pointer_map_.get(); }
- bool HasPointerMap() const { return pointer_map_.is_set(); }
-
-
- void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
- HValue* hydrogen_value() const { return hydrogen_value_; }
-
- void set_deoptimization_environment(LEnvironment* env) {
- deoptimization_environment_.set(env);
- }
- LEnvironment* deoptimization_environment() const {
- return deoptimization_environment_.get();
- }
- bool HasDeoptimizationEnvironment() const {
- return deoptimization_environment_.is_set();
- }
-
- void MarkAsCall() { is_call_ = true; }
- void MarkAsSaveDoubles() { is_save_doubles_ = true; }
-
- // Interface to the register allocator and iterators.
- bool IsMarkedAsCall() const { return is_call_; }
- bool IsMarkedAsSaveDoubles() const { return is_save_doubles_; }
-
- virtual bool HasResult() const = 0;
- virtual LOperand* result() = 0;
-
- virtual int InputCount() = 0;
- virtual LOperand* InputAt(int i) = 0;
- virtual int TempCount() = 0;
- virtual LOperand* TempAt(int i) = 0;
-
- LOperand* FirstInput() { return InputAt(0); }
- LOperand* Output() { return HasResult() ? result() : NULL; }
-
-#ifdef DEBUG
- void VerifyCall();
-#endif
-
- private:
- LEnvironment* environment_;
- SetOncePointer<LPointerMap> pointer_map_;
- HValue* hydrogen_value_;
- SetOncePointer<LEnvironment> deoptimization_environment_;
- bool is_call_;
- bool is_save_doubles_;
-};
-
-
-template<typename ElementType, int NumElements>
-class OperandContainer {
- public:
- OperandContainer() {
- for (int i = 0; i < NumElements; i++) elems_[i] = NULL;
- }
- int length() { return NumElements; }
- ElementType& operator[](int i) {
- ASSERT(i < length());
- return elems_[i];
- }
- void PrintOperandsTo(StringStream* stream);
-
- private:
- ElementType elems_[NumElements];
-};
-
-
-template<typename ElementType>
-class OperandContainer<ElementType, 0> {
- public:
- int length() { return 0; }
- void PrintOperandsTo(StringStream* stream) { }
- ElementType& operator[](int i) {
- UNREACHABLE();
- static ElementType t = 0;
- return t;
- }
-};
-
-
-// R = number of result operands (0 or 1).
-// I = number of input operands.
-// T = number of temporary operands.
-template<int R, int I, int T>
-class LTemplateInstruction: public LInstruction {
- public:
- // Allow 0 or 1 output operands.
- STATIC_ASSERT(R == 0 || R == 1);
- virtual bool HasResult() const { return R != 0; }
- void set_result(LOperand* operand) { results_[0] = operand; }
- LOperand* result() { return results_[0]; }
-
- int InputCount() { return I; }
- LOperand* InputAt(int i) { return inputs_[i]; }
-
- int TempCount() { return T; }
- LOperand* TempAt(int i) { return temps_[i]; }
-
- virtual void PrintDataTo(StringStream* stream);
- virtual void PrintOutputOperandTo(StringStream* stream);
-
- protected:
- OperandContainer<LOperand*, R> results_;
- OperandContainer<LOperand*, I> inputs_;
- OperandContainer<LOperand*, T> temps_;
-};
-
-
-class LGap: public LTemplateInstruction<0, 0, 0> {
- public:
- explicit LGap(HBasicBlock* block)
- : block_(block) {
- parallel_moves_[BEFORE] = NULL;
- parallel_moves_[START] = NULL;
- parallel_moves_[END] = NULL;
- parallel_moves_[AFTER] = NULL;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(Gap, "gap")
- virtual void PrintDataTo(StringStream* stream);
-
- bool IsRedundant() const;
-
- HBasicBlock* block() const { return block_; }
-
- enum InnerPosition {
- BEFORE,
- START,
- END,
- AFTER,
- FIRST_INNER_POSITION = BEFORE,
- LAST_INNER_POSITION = AFTER
- };
-
- LParallelMove* GetOrCreateParallelMove(InnerPosition pos) {
- if (parallel_moves_[pos] == NULL) parallel_moves_[pos] = new LParallelMove;
- return parallel_moves_[pos];
- }
-
- LParallelMove* GetParallelMove(InnerPosition pos) {
- return parallel_moves_[pos];
- }
-
- private:
- LParallelMove* parallel_moves_[LAST_INNER_POSITION + 1];
- HBasicBlock* block_;
-};
-
-
-class LGoto: public LTemplateInstruction<0, 0, 0> {
- public:
- LGoto(int block_id, bool include_stack_check = false)
- : block_id_(block_id), include_stack_check_(include_stack_check) { }
-
- DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
- virtual void PrintDataTo(StringStream* stream);
- virtual bool IsControl() const { return true; }
-
- int block_id() const { return block_id_; }
- bool include_stack_check() const { return include_stack_check_; }
-
- private:
- int block_id_;
- bool include_stack_check_;
-};
-
-
-class LLazyBailout: public LTemplateInstruction<0, 0, 0> {
- public:
- LLazyBailout() : gap_instructions_size_(0) { }
-
- DECLARE_CONCRETE_INSTRUCTION(LazyBailout, "lazy-bailout")
-
- void set_gap_instructions_size(int gap_instructions_size) {
- gap_instructions_size_ = gap_instructions_size;
- }
- int gap_instructions_size() { return gap_instructions_size_; }
-
- private:
- int gap_instructions_size_;
-};
-
-
-class LDeoptimize: public LTemplateInstruction<0, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
-};
-
-
-class LLabel: public LGap {
- public:
- explicit LLabel(HBasicBlock* block)
- : LGap(block), replacement_(NULL) { }
-
- DECLARE_CONCRETE_INSTRUCTION(Label, "label")
-
- virtual void PrintDataTo(StringStream* stream);
-
- int block_id() const { return block()->block_id(); }
- bool is_loop_header() const { return block()->IsLoopHeader(); }
- Label* label() { return &label_; }
- LLabel* replacement() const { return replacement_; }
- void set_replacement(LLabel* label) { replacement_ = label; }
- bool HasReplacement() const { return replacement_ != NULL; }
-
- private:
- Label label_;
- LLabel* replacement_;
-};
-
-
-class LParameter: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
-};
-
-
-class LCallStub: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCallStub(LOperand* context) {
- inputs_[0] = context;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
- DECLARE_HYDROGEN_ACCESSOR(CallStub)
-
- LOperand* context() { return inputs_[0]; }
-
- TranscendentalCache::Type transcendental_type() {
- return hydrogen()->transcendental_type();
- }
-};
-
-
-class LUnknownOSRValue: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
-};
-
-
-template<int I, int T>
-class LControlInstruction: public LTemplateInstruction<0, I, T> {
- public:
- DECLARE_INSTRUCTION(ControlInstruction)
- virtual bool IsControl() const { return true; }
-
- int true_block_id() const { return true_block_id_; }
- int false_block_id() const { return false_block_id_; }
- void SetBranchTargets(int true_block_id, int false_block_id) {
- true_block_id_ = true_block_id;
- false_block_id_ = false_block_id;
- }
-
- private:
- int true_block_id_;
- int false_block_id_;
-};
-
-
-class LApplyArguments: public LTemplateInstruction<1, 4, 1> {
- public:
- LApplyArguments(LOperand* function,
- LOperand* receiver,
- LOperand* length,
- LOperand* elements,
- LOperand* temp) {
- inputs_[0] = function;
- inputs_[1] = receiver;
- inputs_[2] = length;
- inputs_[3] = elements;
- temps_[0] = temp;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
-
- LOperand* function() { return inputs_[0]; }
- LOperand* receiver() { return inputs_[1]; }
- LOperand* length() { return inputs_[2]; }
- LOperand* elements() { return inputs_[3]; }
-};
-
-
-class LAccessArgumentsAt: public LTemplateInstruction<1, 3, 0> {
- public:
- LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index) {
- inputs_[0] = arguments;
- inputs_[1] = length;
- inputs_[2] = index;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at")
-
- LOperand* arguments() { return inputs_[0]; }
- LOperand* length() { return inputs_[1]; }
- LOperand* index() { return inputs_[2]; }
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LArgumentsLength: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LArgumentsLength(LOperand* elements) {
- inputs_[0] = elements;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments-length")
-};
-
-
-class LArgumentsElements: public LTemplateInstruction<1, 0, 0> {
- public:
- LArgumentsElements() { }
-
- DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
-};
-
-
-class LModI: public LTemplateInstruction<1, 2, 1> {
- public:
- LModI(LOperand* left, LOperand* right, LOperand* temp) {
- inputs_[0] = left;
- inputs_[1] = right;
- temps_[0] = temp;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(ModI, "mod-i")
- DECLARE_HYDROGEN_ACCESSOR(Mod)
-};
-
-
-class LDivI: public LTemplateInstruction<1, 2, 1> {
- public:
- LDivI(LOperand* left, LOperand* right, LOperand* temp) {
- inputs_[0] = left;
- inputs_[1] = right;
- temps_[0] = temp;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
- DECLARE_HYDROGEN_ACCESSOR(Div)
-};
-
-
-class LMulI: public LTemplateInstruction<1, 2, 1> {
- public:
- LMulI(LOperand* left, LOperand* right, LOperand* temp) {
- inputs_[0] = left;
- inputs_[1] = right;
- temps_[0] = temp;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-i")
- DECLARE_HYDROGEN_ACCESSOR(Mul)
-};
-
-
-class LCmpID: public LTemplateInstruction<1, 2, 0> {
- public:
- LCmpID(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpID, "cmp-id")
- DECLARE_HYDROGEN_ACCESSOR(Compare)
-
- Token::Value op() const { return hydrogen()->token(); }
- bool is_double() const {
- return hydrogen()->GetInputRepresentation().IsDouble();
- }
-};
-
-
-class LCmpIDAndBranch: public LControlInstruction<2, 0> {
- public:
- LCmpIDAndBranch(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpIDAndBranch, "cmp-id-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(Compare)
-
- Token::Value op() const { return hydrogen()->token(); }
- bool is_double() const {
- return hydrogen()->GetInputRepresentation().IsDouble();
- }
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LUnaryMathOperation: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LUnaryMathOperation(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(UnaryMathOperation, "unary-math-operation")
- DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
-
- virtual void PrintDataTo(StringStream* stream);
- BuiltinFunctionId op() const { return hydrogen()->op(); }
-};
-
-
-class LCmpJSObjectEq: public LTemplateInstruction<1, 2, 0> {
- public:
- LCmpJSObjectEq(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpJSObjectEq, "cmp-jsobject-eq")
-};
-
-
-class LCmpJSObjectEqAndBranch: public LControlInstruction<2, 0> {
- public:
- LCmpJSObjectEqAndBranch(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpJSObjectEqAndBranch,
- "cmp-jsobject-eq-and-branch")
-};
-
-
-class LIsNull: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LIsNull(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(IsNull, "is-null")
- DECLARE_HYDROGEN_ACCESSOR(IsNull)
-
- bool is_strict() const { return hydrogen()->is_strict(); }
-};
-
-
-class LIsNullAndBranch: public LControlInstruction<1, 1> {
- public:
- LIsNullAndBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(IsNullAndBranch, "is-null-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsNull)
-
- bool is_strict() const { return hydrogen()->is_strict(); }
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LIsObject: public LTemplateInstruction<1, 1, 1> {
- public:
- LIsObject(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(IsObject, "is-object")
-};
-
-
-class LIsObjectAndBranch: public LControlInstruction<1, 2> {
- public:
- LIsObjectAndBranch(LOperand* value, LOperand* temp, LOperand* temp2) {
- inputs_[0] = value;
- temps_[0] = temp;
- temps_[1] = temp2;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LIsSmi: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LIsSmi(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(IsSmi, "is-smi")
- DECLARE_HYDROGEN_ACCESSOR(IsSmi)
-};
-
-
-class LIsSmiAndBranch: public LControlInstruction<1, 0> {
- public:
- explicit LIsSmiAndBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LHasInstanceType: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LHasInstanceType(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(HasInstanceType, "has-instance-type")
- DECLARE_HYDROGEN_ACCESSOR(HasInstanceType)
-};
-
-
-class LHasInstanceTypeAndBranch: public LControlInstruction<1, 1> {
- public:
- LHasInstanceTypeAndBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch,
- "has-instance-type-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(HasInstanceType)
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LGetCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LGetCachedArrayIndex(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get-cached-array-index")
- DECLARE_HYDROGEN_ACCESSOR(GetCachedArrayIndex)
-};
-
-
-class LHasCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LHasCachedArrayIndex(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndex, "has-cached-array-index")
- DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndex)
-};
-
-
-class LHasCachedArrayIndexAndBranch: public LControlInstruction<1, 0> {
- public:
- explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
- "has-cached-array-index-and-branch")
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LIsConstructCall: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(IsConstructCall, "is-construct-call")
- DECLARE_HYDROGEN_ACCESSOR(IsConstructCall)
-};
-
-
-class LIsConstructCallAndBranch: public LControlInstruction<0, 1> {
- public:
- explicit LIsConstructCallAndBranch(LOperand* temp) {
- temps_[0] = temp;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(IsConstructCallAndBranch,
- "is-construct-call-and-branch")
-};
-
-
-class LClassOfTest: public LTemplateInstruction<1, 1, 1> {
- public:
- LClassOfTest(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(ClassOfTest, "class-of-test")
- DECLARE_HYDROGEN_ACCESSOR(ClassOfTest)
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LClassOfTestAndBranch: public LControlInstruction<1, 2> {
- public:
- LClassOfTestAndBranch(LOperand* value, LOperand* temp, LOperand* temp2) {
- inputs_[0] = value;
- temps_[0] = temp;
- temps_[1] = temp2;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch,
- "class-of-test-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(ClassOfTest)
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LCmpT: public LTemplateInstruction<1, 2, 0> {
- public:
- LCmpT(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
- DECLARE_HYDROGEN_ACCESSOR(Compare)
-
- Token::Value op() const { return hydrogen()->token(); }
-};
-
-
-class LCmpTAndBranch: public LControlInstruction<2, 0> {
- public:
- LCmpTAndBranch(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpTAndBranch, "cmp-t-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(Compare)
-
- Token::Value op() const { return hydrogen()->token(); }
-};
-
-
-class LInstanceOf: public LTemplateInstruction<1, 3, 0> {
- public:
- LInstanceOf(LOperand* context, LOperand* left, LOperand* right) {
- inputs_[0] = context;
- inputs_[1] = left;
- inputs_[2] = right;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
-
- LOperand* context() { return inputs_[0]; }
-};
-
-
-class LInstanceOfAndBranch: public LControlInstruction<3, 0> {
- public:
- LInstanceOfAndBranch(LOperand* context, LOperand* left, LOperand* right) {
- inputs_[0] = context;
- inputs_[1] = left;
- inputs_[2] = right;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(InstanceOfAndBranch, "instance-of-and-branch")
-
- LOperand* context() { return inputs_[0]; }
-};
-
-
-class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 1> {
- public:
- LInstanceOfKnownGlobal(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(InstanceOfKnownGlobal,
- "instance-of-known-global")
- DECLARE_HYDROGEN_ACCESSOR(InstanceOfKnownGlobal)
-
- Handle<JSFunction> function() const { return hydrogen()->function(); }
-};
-
-
-class LBoundsCheck: public LTemplateInstruction<0, 2, 0> {
- public:
- LBoundsCheck(LOperand* index, LOperand* length) {
- inputs_[0] = index;
- inputs_[1] = length;
- }
-
- LOperand* index() { return inputs_[0]; }
- LOperand* length() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds-check")
-};
-
-
-class LBitI: public LTemplateInstruction<1, 2, 0> {
- public:
- LBitI(Token::Value op, LOperand* left, LOperand* right)
- : op_(op) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- Token::Value op() const { return op_; }
-
- DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i")
-
- private:
- Token::Value op_;
-};
-
-
-class LShiftI: public LTemplateInstruction<1, 2, 0> {
- public:
- LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
- : op_(op), can_deopt_(can_deopt) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- Token::Value op() const { return op_; }
-
- bool can_deopt() const { return can_deopt_; }
-
- DECLARE_CONCRETE_INSTRUCTION(ShiftI, "shift-i")
-
- private:
- Token::Value op_;
- bool can_deopt_;
-};
-
-
-class LSubI: public LTemplateInstruction<1, 2, 0> {
- public:
- LSubI(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(SubI, "sub-i")
- DECLARE_HYDROGEN_ACCESSOR(Sub)
-};
-
-
-class LConstantI: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
- DECLARE_HYDROGEN_ACCESSOR(Constant)
-
- int32_t value() const { return hydrogen()->Integer32Value(); }
-};
-
-
-class LConstantD: public LTemplateInstruction<1, 0, 1> {
- public:
- explicit LConstantD(LOperand* temp) {
- temps_[0] = temp;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
- DECLARE_HYDROGEN_ACCESSOR(Constant)
-
- double value() const { return hydrogen()->DoubleValue(); }
-};
-
-
-class LConstantT: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t")
- DECLARE_HYDROGEN_ACCESSOR(Constant)
-
- Handle<Object> value() const { return hydrogen()->handle(); }
-};
-
-
-class LBranch: public LControlInstruction<1, 0> {
- public:
- explicit LBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
- DECLARE_HYDROGEN_ACCESSOR(Value)
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LCmpMapAndBranch: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LCmpMapAndBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareMap)
-
- virtual bool IsControl() const { return true; }
-
- Handle<Map> map() const { return hydrogen()->map(); }
- int true_block_id() const {
- return hydrogen()->FirstSuccessor()->block_id();
- }
- int false_block_id() const {
- return hydrogen()->SecondSuccessor()->block_id();
- }
-};
-
-
-class LJSArrayLength: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LJSArrayLength(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(JSArrayLength, "js-array-length")
- DECLARE_HYDROGEN_ACCESSOR(JSArrayLength)
-};
-
-
-class LExternalArrayLength: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LExternalArrayLength(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(ExternalArrayLength, "external-array-length")
- DECLARE_HYDROGEN_ACCESSOR(ExternalArrayLength)
-};
-
-
-class LFixedArrayLength: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LFixedArrayLength(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(FixedArrayLength, "fixed-array-length")
- DECLARE_HYDROGEN_ACCESSOR(FixedArrayLength)
-};
-
-
-class LValueOf: public LTemplateInstruction<1, 1, 1> {
- public:
- LValueOf(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(ValueOf, "value-of")
- DECLARE_HYDROGEN_ACCESSOR(ValueOf)
-};
-
-
-class LThrow: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LThrow(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(Throw, "throw")
-};
-
-
-class LBitNotI: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LBitNotI(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(BitNotI, "bit-not-i")
-};
-
-
-class LAddI: public LTemplateInstruction<1, 2, 0> {
- public:
- LAddI(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i")
- DECLARE_HYDROGEN_ACCESSOR(Add)
-};
-
-
-class LPower: public LTemplateInstruction<1, 2, 0> {
- public:
- LPower(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(Power, "power")
- DECLARE_HYDROGEN_ACCESSOR(Power)
-};
-
-
-class LArithmeticD: public LTemplateInstruction<1, 2, 0> {
- public:
- LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
- : op_(op) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- Token::Value op() const { return op_; }
-
- virtual void CompileToNative(LCodeGen* generator);
- virtual const char* Mnemonic() const;
-
- private:
- Token::Value op_;
-};
-
-
-class LArithmeticT: public LTemplateInstruction<1, 2, 0> {
- public:
- LArithmeticT(Token::Value op, LOperand* left, LOperand* right)
- : op_(op) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- virtual void CompileToNative(LCodeGen* generator);
- virtual const char* Mnemonic() const;
-
- Token::Value op() const { return op_; }
-
- private:
- Token::Value op_;
-};
-
-
-class LReturn: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LReturn(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(Return, "return")
-};
-
-
-class LLoadNamedField: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadNamedField(LOperand* object) {
- inputs_[0] = object;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field")
- DECLARE_HYDROGEN_ACCESSOR(LoadNamedField)
-
- LOperand* object() { return inputs_[0]; }
-};
-
-
-class LLoadNamedFieldPolymorphic: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadNamedFieldPolymorphic(LOperand* object) {
- inputs_[0] = object;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field-polymorphic")
- DECLARE_HYDROGEN_ACCESSOR(LoadNamedFieldPolymorphic)
-
- LOperand* object() { return inputs_[0]; }
-};
-
-
-class LLoadNamedGeneric: public LTemplateInstruction<1, 2, 0> {
- public:
- LLoadNamedGeneric(LOperand* context, LOperand* object) {
- inputs_[0] = context;
- inputs_[1] = object;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
- DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric)
-
- LOperand* context() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
- Handle<Object> name() const { return hydrogen()->name(); }
-};
-
-
-class LLoadFunctionPrototype: public LTemplateInstruction<1, 1, 1> {
- public:
- LLoadFunctionPrototype(LOperand* function, LOperand* temp) {
- inputs_[0] = function;
- temps_[0] = temp;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype, "load-function-prototype")
- DECLARE_HYDROGEN_ACCESSOR(LoadFunctionPrototype)
-
- LOperand* function() { return inputs_[0]; }
-};
-
-
-class LLoadElements: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadElements(LOperand* object) {
- inputs_[0] = object;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadElements, "load-elements")
-};
-
-
-class LLoadExternalArrayPointer: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadExternalArrayPointer(LOperand* object) {
- inputs_[0] = object;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadExternalArrayPointer,
- "load-external-array-pointer")
-};
-
-
-class LLoadKeyedFastElement: public LTemplateInstruction<1, 2, 0> {
- public:
- LLoadKeyedFastElement(LOperand* elements, LOperand* key) {
- inputs_[0] = elements;
- inputs_[1] = key;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastElement, "load-keyed-fast-element")
- DECLARE_HYDROGEN_ACCESSOR(LoadKeyedFastElement)
-
- LOperand* elements() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
-};
-
-
-class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> {
- public:
- LLoadKeyedSpecializedArrayElement(LOperand* external_pointer,
- LOperand* key) {
- inputs_[0] = external_pointer;
- inputs_[1] = key;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyedSpecializedArrayElement,
- "load-keyed-specialized-array-element")
- DECLARE_HYDROGEN_ACCESSOR(LoadKeyedSpecializedArrayElement)
-
- LOperand* external_pointer() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- ExternalArrayType array_type() const {
- return hydrogen()->array_type();
- }
-};
-
-
-class LLoadKeyedGeneric: public LTemplateInstruction<1, 3, 0> {
- public:
- LLoadKeyedGeneric(LOperand* context, LOperand* obj, LOperand* key) {
- inputs_[0] = context;
- inputs_[1] = obj;
- inputs_[2] = key;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
-
- LOperand* context() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
- LOperand* key() { return inputs_[2]; }
-};
-
-
-class LLoadGlobalCell: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load-global-cell")
- DECLARE_HYDROGEN_ACCESSOR(LoadGlobalCell)
-};
-
-
-class LLoadGlobalGeneric: public LTemplateInstruction<1, 2, 0> {
- public:
- LLoadGlobalGeneric(LOperand* context, LOperand* global_object) {
- inputs_[0] = context;
- inputs_[1] = global_object;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
- DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
-
- LOperand* context() { return inputs_[0]; }
- LOperand* global_object() { return inputs_[1]; }
- Handle<Object> name() const { return hydrogen()->name(); }
- bool for_typeof() const { return hydrogen()->for_typeof(); }
-};
-
-
-class LStoreGlobalCell: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LStoreGlobalCell(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store-global-cell")
- DECLARE_HYDROGEN_ACCESSOR(StoreGlobalCell)
-};
-
-
-class LStoreGlobalGeneric: public LTemplateInstruction<0, 3, 0> {
- public:
- explicit LStoreGlobalGeneric(LOperand* context,
- LOperand* global_object,
- LOperand* value) {
- inputs_[0] = context;
- inputs_[1] = global_object;
- inputs_[2] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreGlobalGeneric, "store-global-generic")
- DECLARE_HYDROGEN_ACCESSOR(StoreGlobalGeneric)
-
- LOperand* context() { return InputAt(0); }
- LOperand* global_object() { return InputAt(1); }
- Handle<Object> name() const { return hydrogen()->name(); }
- LOperand* value() { return InputAt(2); }
-};
-
-
-class LLoadContextSlot: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadContextSlot(LOperand* context) {
- inputs_[0] = context;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot")
- DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot)
-
- LOperand* context() { return InputAt(0); }
- int slot_index() { return hydrogen()->slot_index(); }
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LStoreContextSlot: public LTemplateInstruction<0, 2, 1> {
- public:
- LStoreContextSlot(LOperand* context, LOperand* value, LOperand* temp) {
- inputs_[0] = context;
- inputs_[1] = value;
- temps_[0] = temp;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store-context-slot")
- DECLARE_HYDROGEN_ACCESSOR(StoreContextSlot)
-
- LOperand* context() { return InputAt(0); }
- LOperand* value() { return InputAt(1); }
- int slot_index() { return hydrogen()->slot_index(); }
- int needs_write_barrier() { return hydrogen()->NeedsWriteBarrier(); }
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LPushArgument: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LPushArgument(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(PushArgument, "push-argument")
-};
-
-
-class LContext: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(Context, "context")
-};
-
-
-class LOuterContext: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LOuterContext(LOperand* context) {
- inputs_[0] = context;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(OuterContext, "outer-context")
-
- LOperand* context() { return InputAt(0); }
-};
-
-
-class LGlobalObject: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LGlobalObject(LOperand* context) {
- inputs_[0] = context;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global-object")
-
- LOperand* context() { return InputAt(0); }
-};
-
-
-class LGlobalReceiver: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LGlobalReceiver(LOperand* global_object) {
- inputs_[0] = global_object;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver, "global-receiver")
-
- LOperand* global() { return InputAt(0); }
-};
-
-
-class LCallConstantFunction: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(CallConstantFunction, "call-constant-function")
- DECLARE_HYDROGEN_ACCESSOR(CallConstantFunction)
-
- virtual void PrintDataTo(StringStream* stream);
-
- Handle<JSFunction> function() { return hydrogen()->function(); }
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallKeyed: public LTemplateInstruction<1, 2, 0> {
- public:
- LCallKeyed(LOperand* context, LOperand* key) {
- inputs_[0] = context;
- inputs_[1] = key;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CallKeyed, "call-keyed")
- DECLARE_HYDROGEN_ACCESSOR(CallKeyed)
-
- LOperand* context() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
-
- virtual void PrintDataTo(StringStream* stream);
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallNamed: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCallNamed(LOperand* context) {
- inputs_[0] = context;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CallNamed, "call-named")
- DECLARE_HYDROGEN_ACCESSOR(CallNamed)
-
- virtual void PrintDataTo(StringStream* stream);
-
- LOperand* context() { return inputs_[0]; }
- Handle<String> name() const { return hydrogen()->name(); }
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallFunction: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCallFunction(LOperand* context) {
- inputs_[0] = context;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
- DECLARE_HYDROGEN_ACCESSOR(CallFunction)
-
- LOperand* context() { return inputs_[0]; }
- int arity() const { return hydrogen()->argument_count() - 2; }
-};
-
-
-class LCallGlobal: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCallGlobal(LOperand* context) {
- inputs_[0] = context;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CallGlobal, "call-global")
- DECLARE_HYDROGEN_ACCESSOR(CallGlobal)
-
- virtual void PrintDataTo(StringStream* stream);
-
- LOperand* context() { return inputs_[0]; }
- Handle<String> name() const {return hydrogen()->name(); }
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallKnownGlobal: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(CallKnownGlobal, "call-known-global")
- DECLARE_HYDROGEN_ACCESSOR(CallKnownGlobal)
-
- virtual void PrintDataTo(StringStream* stream);
-
- Handle<JSFunction> target() const { return hydrogen()->target(); }
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallNew: public LTemplateInstruction<1, 2, 0> {
- public:
- LCallNew(LOperand* context, LOperand* constructor) {
- inputs_[0] = context;
- inputs_[1] = constructor;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
- DECLARE_HYDROGEN_ACCESSOR(CallNew)
-
- virtual void PrintDataTo(StringStream* stream);
-
- LOperand* context() { return inputs_[0]; }
- LOperand* constructor() { return inputs_[1]; }
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallRuntime: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
- DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
-
- const Runtime::Function* function() const { return hydrogen()->function(); }
- int arity() const { return hydrogen()->argument_count(); }
-};
-
-
-class LInteger32ToDouble: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LInteger32ToDouble(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(Integer32ToDouble, "int32-to-double")
-};
-
-
-class LNumberTagI: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LNumberTagI(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(NumberTagI, "number-tag-i")
-};
-
-
-class LNumberTagD: public LTemplateInstruction<1, 1, 1> {
- public:
- LNumberTagD(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d")
-};
-
-
-// Sometimes truncating conversion from a tagged value to an int32.
-class LDoubleToI: public LTemplateInstruction<1, 1, 1> {
- public:
- LDoubleToI(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i")
- DECLARE_HYDROGEN_ACCESSOR(Change)
-
- bool truncating() { return hydrogen()->CanTruncateToInt32(); }
-};
-
-
-// Truncating conversion from a tagged value to an int32.
-class LTaggedToI: public LTemplateInstruction<1, 1, 1> {
- public:
- LTaggedToI(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
- DECLARE_HYDROGEN_ACCESSOR(Change)
-
- bool truncating() { return hydrogen()->CanTruncateToInt32(); }
-};
-
-
-class LSmiTag: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LSmiTag(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag")
-};
-
-
-class LNumberUntagD: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LNumberUntagD(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
-};
-
-
-class LSmiUntag: public LTemplateInstruction<1, 1, 0> {
- public:
- LSmiUntag(LOperand* value, bool needs_check)
- : needs_check_(needs_check) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(SmiUntag, "smi-untag")
-
- bool needs_check() const { return needs_check_; }
-
- private:
- bool needs_check_;
-};
-
-
-class LStoreNamedField: public LTemplateInstruction<0, 2, 1> {
- public:
- LStoreNamedField(LOperand* obj, LOperand* val, LOperand* temp) {
- inputs_[0] = obj;
- inputs_[1] = val;
- temps_[0] = temp;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
- DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
-
- virtual void PrintDataTo(StringStream* stream);
-
- LOperand* object() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
-
- Handle<Object> name() const { return hydrogen()->name(); }
- bool is_in_object() { return hydrogen()->is_in_object(); }
- int offset() { return hydrogen()->offset(); }
- bool needs_write_barrier() { return hydrogen()->NeedsWriteBarrier(); }
- Handle<Map> transition() const { return hydrogen()->transition(); }
-};
-
-
-class LStoreNamedGeneric: public LTemplateInstruction<0, 3, 0> {
- public:
- LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value) {
- inputs_[0] = context;
- inputs_[1] = object;
- inputs_[2] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
- DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
-
- virtual void PrintDataTo(StringStream* stream);
-
- LOperand* context() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
- Handle<Object> name() const { return hydrogen()->name(); }
-};
-
-
-class LStoreKeyedFastElement: public LTemplateInstruction<0, 3, 0> {
- public:
- LStoreKeyedFastElement(LOperand* obj, LOperand* key, LOperand* val) {
- inputs_[0] = obj;
- inputs_[1] = key;
- inputs_[2] = val;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement,
- "store-keyed-fast-element")
- DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastElement)
-
- virtual void PrintDataTo(StringStream* stream);
-
- LOperand* object() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
-};
-
-
-class LStoreKeyedSpecializedArrayElement: public LTemplateInstruction<0, 3, 1> {
- public:
- LStoreKeyedSpecializedArrayElement(LOperand* external_pointer,
- LOperand* key,
- LOperand* val,
- LOperand* temp) {
- inputs_[0] = external_pointer;
- inputs_[1] = key;
- inputs_[2] = val;
- temps_[0] = temp;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyedSpecializedArrayElement,
- "store-keyed-specialized-array-element")
- DECLARE_HYDROGEN_ACCESSOR(StoreKeyedSpecializedArrayElement)
-
- LOperand* external_pointer() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
- ExternalArrayType array_type() const {
- return hydrogen()->array_type();
- }
-};
-
-
-class LStoreKeyedGeneric: public LTemplateInstruction<0, 4, 0> {
- public:
- LStoreKeyedGeneric(LOperand* context,
- LOperand* object,
- LOperand* key,
- LOperand* value) {
- inputs_[0] = context;
- inputs_[1] = object;
- inputs_[2] = key;
- inputs_[3] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
-
- virtual void PrintDataTo(StringStream* stream);
-
- LOperand* context() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
- LOperand* key() { return inputs_[2]; }
- LOperand* value() { return inputs_[3]; }
-};
-
-
-class LStringCharCodeAt: public LTemplateInstruction<1, 2, 0> {
- public:
- LStringCharCodeAt(LOperand* string, LOperand* index) {
- inputs_[0] = string;
- inputs_[1] = index;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at")
- DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt)
-
- LOperand* string() { return inputs_[0]; }
- LOperand* index() { return inputs_[1]; }
-};
-
-
-class LStringCharFromCode: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LStringCharFromCode(LOperand* char_code) {
- inputs_[0] = char_code;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string-char-from-code")
- DECLARE_HYDROGEN_ACCESSOR(StringCharFromCode)
-
- LOperand* char_code() { return inputs_[0]; }
-};
-
-
-class LStringLength: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LStringLength(LOperand* string) {
- inputs_[0] = string;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(StringLength, "string-length")
- DECLARE_HYDROGEN_ACCESSOR(StringLength)
-
- LOperand* string() { return inputs_[0]; }
-};
-
-
-class LCheckFunction: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LCheckFunction(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckFunction, "check-function")
- DECLARE_HYDROGEN_ACCESSOR(CheckFunction)
-};
-
-
-class LCheckInstanceType: public LTemplateInstruction<0, 1, 1> {
- public:
- LCheckInstanceType(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check-instance-type")
- DECLARE_HYDROGEN_ACCESSOR(CheckInstanceType)
-};
-
-
-class LCheckMap: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LCheckMap(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckMap, "check-map")
- DECLARE_HYDROGEN_ACCESSOR(CheckMap)
-};
-
-
-class LCheckPrototypeMaps: public LTemplateInstruction<0, 0, 1> {
- public:
- explicit LCheckPrototypeMaps(LOperand* temp) {
- temps_[0] = temp;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckPrototypeMaps, "check-prototype-maps")
- DECLARE_HYDROGEN_ACCESSOR(CheckPrototypeMaps)
-
- Handle<JSObject> prototype() const { return hydrogen()->prototype(); }
- Handle<JSObject> holder() const { return hydrogen()->holder(); }
-};
-
-
-class LCheckSmi: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LCheckSmi(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckSmi, "check-smi")
-};
-
-
-class LCheckNonSmi: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LCheckNonSmi(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check-non-smi")
-};
-
-
-class LArrayLiteral: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral, "array-literal")
- DECLARE_HYDROGEN_ACCESSOR(ArrayLiteral)
-};
-
-
-class LObjectLiteral: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LObjectLiteral(LOperand* context) {
- inputs_[0] = context;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(ObjectLiteral, "object-literal")
- DECLARE_HYDROGEN_ACCESSOR(ObjectLiteral)
-
- LOperand* context() { return inputs_[0]; }
-};
-
-
-class LRegExpLiteral: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp-literal")
- DECLARE_HYDROGEN_ACCESSOR(RegExpLiteral)
-};
-
-
-class LFunctionLiteral: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function-literal")
- DECLARE_HYDROGEN_ACCESSOR(FunctionLiteral)
-
- Handle<SharedFunctionInfo> shared_info() { return hydrogen()->shared_info(); }
-};
-
-
-class LToFastProperties: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LToFastProperties(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(ToFastProperties, "to-fast-properties")
- DECLARE_HYDROGEN_ACCESSOR(ToFastProperties)
-};
-
-
-class LTypeof: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LTypeof(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof")
-};
-
-
-class LTypeofIs: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LTypeofIs(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(TypeofIs, "typeof-is")
- DECLARE_HYDROGEN_ACCESSOR(TypeofIs)
-
- Handle<String> type_literal() { return hydrogen()->type_literal(); }
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LTypeofIsAndBranch: public LControlInstruction<1, 0> {
- public:
- explicit LTypeofIsAndBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(TypeofIs)
-
- Handle<String> type_literal() { return hydrogen()->type_literal(); }
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LDeleteProperty: public LTemplateInstruction<1, 2, 0> {
- public:
- LDeleteProperty(LOperand* obj, LOperand* key) {
- inputs_[0] = obj;
- inputs_[1] = key;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(DeleteProperty, "delete-property")
-
- LOperand* object() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
-};
-
-
-class LOsrEntry: public LTemplateInstruction<0, 0, 0> {
- public:
- LOsrEntry();
-
- DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
-
- LOperand** SpilledRegisterArray() { return register_spills_; }
- LOperand** SpilledDoubleRegisterArray() { return double_register_spills_; }
-
- void MarkSpilledRegister(int allocation_index, LOperand* spill_operand);
- void MarkSpilledDoubleRegister(int allocation_index,
- LOperand* spill_operand);
-
- private:
- // Arrays of spill slot operands for registers with an assigned spill
- // slot, i.e., that must also be restored to the spill slot on OSR entry.
- // NULL if the register has no assigned spill slot. Indexed by allocation
- // index.
- LOperand* register_spills_[Register::kNumAllocatableRegisters];
- LOperand* double_register_spills_[DoubleRegister::kNumAllocatableRegisters];
-};
-
-
-class LStackCheck: public LTemplateInstruction<0, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check")
-};
-
-
-class LChunkBuilder;
-class LChunk: public ZoneObject {
- public:
- explicit LChunk(CompilationInfo* info, HGraph* graph)
- : spill_slot_count_(0),
- info_(info),
- graph_(graph),
- instructions_(32),
- pointer_maps_(8),
- inlined_closures_(1) { }
-
- void AddInstruction(LInstruction* instruction, HBasicBlock* block);
- LConstantOperand* DefineConstantOperand(HConstant* constant);
- Handle<Object> LookupLiteral(LConstantOperand* operand) const;
- Representation LookupLiteralRepresentation(LConstantOperand* operand) const;
-
- int GetNextSpillIndex(bool is_double);
- LOperand* GetNextSpillSlot(bool is_double);
-
- int ParameterAt(int index);
- int GetParameterStackSlot(int index) const;
- int spill_slot_count() const { return spill_slot_count_; }
- CompilationInfo* info() const { return info_; }
- HGraph* graph() const { return graph_; }
- const ZoneList<LInstruction*>* instructions() const { return &instructions_; }
- void AddGapMove(int index, LOperand* from, LOperand* to);
- LGap* GetGapAt(int index) const;
- bool IsGapAt(int index) const;
- int NearestGapPos(int index) const;
- void MarkEmptyBlocks();
- const ZoneList<LPointerMap*>* pointer_maps() const { return &pointer_maps_; }
- LLabel* GetLabel(int block_id) const {
- HBasicBlock* block = graph_->blocks()->at(block_id);
- int first_instruction = block->first_instruction_index();
- return LLabel::cast(instructions_[first_instruction]);
- }
- int LookupDestination(int block_id) const {
- LLabel* cur = GetLabel(block_id);
- while (cur->replacement() != NULL) {
- cur = cur->replacement();
- }
- return cur->block_id();
- }
- Label* GetAssemblyLabel(int block_id) const {
- LLabel* label = GetLabel(block_id);
- ASSERT(!label->HasReplacement());
- return label->label();
- }
-
- const ZoneList<Handle<JSFunction> >* inlined_closures() const {
- return &inlined_closures_;
- }
-
- void AddInlinedClosure(Handle<JSFunction> closure) {
- inlined_closures_.Add(closure);
- }
-
- private:
- int spill_slot_count_;
- CompilationInfo* info_;
- HGraph* const graph_;
- ZoneList<LInstruction*> instructions_;
- ZoneList<LPointerMap*> pointer_maps_;
- ZoneList<Handle<JSFunction> > inlined_closures_;
-};
-
-
-class LChunkBuilder BASE_EMBEDDED {
- public:
- LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
- : chunk_(NULL),
- info_(info),
- graph_(graph),
- status_(UNUSED),
- current_instruction_(NULL),
- current_block_(NULL),
- next_block_(NULL),
- argument_count_(0),
- allocator_(allocator),
- position_(RelocInfo::kNoPosition),
- instruction_pending_deoptimization_environment_(NULL),
- pending_deoptimization_ast_id_(AstNode::kNoNumber) { }
-
- // Build the sequence for the graph.
- LChunk* Build();
-
- // Declare methods that deal with the individual node types.
-#define DECLARE_DO(type) LInstruction* Do##type(H##type* node);
- HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
-#undef DECLARE_DO
-
- private:
- enum Status {
- UNUSED,
- BUILDING,
- DONE,
- ABORTED
- };
-
- LChunk* chunk() const { return chunk_; }
- CompilationInfo* info() const { return info_; }
- HGraph* graph() const { return graph_; }
-
- bool is_unused() const { return status_ == UNUSED; }
- bool is_building() const { return status_ == BUILDING; }
- bool is_done() const { return status_ == DONE; }
- bool is_aborted() const { return status_ == ABORTED; }
-
- void Abort(const char* format, ...);
-
- // Methods for getting operands for Use / Define / Temp.
- LRegister* ToOperand(Register reg);
- LUnallocated* ToUnallocated(Register reg);
- LUnallocated* ToUnallocated(XMMRegister reg);
-
- // Methods for setting up define-use relationships.
- MUST_USE_RESULT LOperand* Use(HValue* value, LUnallocated* operand);
- MUST_USE_RESULT LOperand* UseFixed(HValue* value, Register fixed_register);
- MUST_USE_RESULT LOperand* UseFixedDouble(HValue* value,
- XMMRegister fixed_register);
-
- // A value that is guaranteed to be allocated to a register.
- // Operand created by UseRegister is guaranteed to be live until the end of
- // instruction. This means that register allocator will not reuse it's
- // register for any other operand inside instruction.
- // Operand created by UseRegisterAtStart is guaranteed to be live only at
- // instruction start. Register allocator is free to assign the same register
- // to some other operand used inside instruction (i.e. temporary or
- // output).
- MUST_USE_RESULT LOperand* UseRegister(HValue* value);
- MUST_USE_RESULT LOperand* UseRegisterAtStart(HValue* value);
-
- // An input operand in a register that may be trashed.
- MUST_USE_RESULT LOperand* UseTempRegister(HValue* value);
-
- // An input operand in a register or stack slot.
- MUST_USE_RESULT LOperand* Use(HValue* value);
- MUST_USE_RESULT LOperand* UseAtStart(HValue* value);
-
- // An input operand in a register, stack slot or a constant operand.
- MUST_USE_RESULT LOperand* UseOrConstant(HValue* value);
- MUST_USE_RESULT LOperand* UseOrConstantAtStart(HValue* value);
-
- // An input operand in a register or a constant operand.
- MUST_USE_RESULT LOperand* UseRegisterOrConstant(HValue* value);
- MUST_USE_RESULT LOperand* UseRegisterOrConstantAtStart(HValue* value);
-
- // An input operand in register, stack slot or a constant operand.
- // Will not be moved to a register even if one is freely available.
- MUST_USE_RESULT LOperand* UseAny(HValue* value);
-
- // Temporary operand that must be in a register.
- MUST_USE_RESULT LUnallocated* TempRegister();
- MUST_USE_RESULT LOperand* FixedTemp(Register reg);
- MUST_USE_RESULT LOperand* FixedTemp(XMMRegister reg);
-
- // Methods for setting up define-use relationships.
- // Return the same instruction that they are passed.
- template<int I, int T>
- LInstruction* Define(LTemplateInstruction<1, I, T>* instr,
- LUnallocated* result);
- template<int I, int T>
- LInstruction* Define(LTemplateInstruction<1, I, T>* instr);
- template<int I, int T>
- LInstruction* DefineAsRegister(LTemplateInstruction<1, I, T>* instr);
- template<int I, int T>
- LInstruction* DefineAsSpilled(LTemplateInstruction<1, I, T>* instr,
- int index);
- template<int I, int T>
- LInstruction* DefineSameAsFirst(LTemplateInstruction<1, I, T>* instr);
- template<int I, int T>
- LInstruction* DefineFixed(LTemplateInstruction<1, I, T>* instr,
- Register reg);
- template<int I, int T>
- LInstruction* DefineFixedDouble(LTemplateInstruction<1, I, T>* instr,
- XMMRegister reg);
- LInstruction* AssignEnvironment(LInstruction* instr);
- LInstruction* AssignPointerMap(LInstruction* instr);
-
- enum CanDeoptimize { CAN_DEOPTIMIZE_EAGERLY, CANNOT_DEOPTIMIZE_EAGERLY };
-
- // By default we assume that instruction sequences generated for calls
- // cannot deoptimize eagerly and we do not attach environment to this
- // instruction.
- LInstruction* MarkAsCall(
- LInstruction* instr,
- HInstruction* hinstr,
- CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
- LInstruction* MarkAsSaveDoubles(LInstruction* instr);
-
- LInstruction* SetInstructionPendingDeoptimizationEnvironment(
- LInstruction* instr, int ast_id);
- void ClearInstructionPendingDeoptimizationEnvironment();
-
- LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env);
-
- void VisitInstruction(HInstruction* current);
-
- void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block);
- LInstruction* DoBit(Token::Value op, HBitwiseBinaryOperation* instr);
- LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr);
- LInstruction* DoArithmeticD(Token::Value op,
- HArithmeticBinaryOperation* instr);
- LInstruction* DoArithmeticT(Token::Value op,
- HArithmeticBinaryOperation* instr);
-
- LChunk* chunk_;
- CompilationInfo* info_;
- HGraph* const graph_;
- Status status_;
- HInstruction* current_instruction_;
- HBasicBlock* current_block_;
- HBasicBlock* next_block_;
- int argument_count_;
- LAllocator* allocator_;
- int position_;
- LInstruction* instruction_pending_deoptimization_environment_;
- int pending_deoptimization_ast_id_;
-
- DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
-};
-
-#undef DECLARE_HYDROGEN_ACCESSOR
-#undef DECLARE_INSTRUCTION
-#undef DECLARE_CONCRETE_INSTRUCTION
-
-} } // namespace v8::internal
-
-#endif // V8_IA32_LITHIUM_IA32_H_
diff --git a/src/3rdparty/v8/src/ia32/macro-assembler-ia32.cc b/src/3rdparty/v8/src/ia32/macro-assembler-ia32.cc
deleted file mode 100644
index 4055498..0000000
--- a/src/3rdparty/v8/src/ia32/macro-assembler-ia32.cc
+++ /dev/null
@@ -1,2056 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_IA32)
-
-#include "bootstrapper.h"
-#include "codegen-inl.h"
-#include "debug.h"
-#include "runtime.h"
-#include "serialize.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// MacroAssembler implementation.
-
-MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
- : Assembler(arg_isolate, buffer, size),
- generating_stub_(false),
- allow_stub_calls_(true) {
- if (isolate() != NULL) {
- code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
- isolate());
- }
-}
-
-
-void MacroAssembler::RecordWriteHelper(Register object,
- Register addr,
- Register scratch) {
- if (emit_debug_code()) {
- // Check that the object is not in new space.
- Label not_in_new_space;
- InNewSpace(object, scratch, not_equal, &not_in_new_space);
- Abort("new-space object passed to RecordWriteHelper");
- bind(&not_in_new_space);
- }
-
- // Compute the page start address from the heap object pointer, and reuse
- // the 'object' register for it.
- and_(object, ~Page::kPageAlignmentMask);
-
- // Compute number of region covering addr. See Page::GetRegionNumberForAddress
- // method for more details.
- and_(addr, Page::kPageAlignmentMask);
- shr(addr, Page::kRegionSizeLog2);
-
- // Set dirty mark for region.
- bts(Operand(object, Page::kDirtyFlagOffset), addr);
-}
-
-
-void MacroAssembler::RecordWrite(Register object,
- int offset,
- Register value,
- Register scratch) {
- // First, check if a write barrier is even needed. The tests below
- // catch stores of Smis and stores into young gen.
- NearLabel done;
-
- // Skip barrier if writing a smi.
- ASSERT_EQ(0, kSmiTag);
- test(value, Immediate(kSmiTagMask));
- j(zero, &done);
-
- InNewSpace(object, value, equal, &done);
-
- // The offset is relative to a tagged or untagged HeapObject pointer,
- // so either offset or offset + kHeapObjectTag must be a
- // multiple of kPointerSize.
- ASSERT(IsAligned(offset, kPointerSize) ||
- IsAligned(offset + kHeapObjectTag, kPointerSize));
-
- Register dst = scratch;
- if (offset != 0) {
- lea(dst, Operand(object, offset));
- } else {
- // Array access: calculate the destination address in the same manner as
- // KeyedStoreIC::GenerateGeneric. Multiply a smi by 2 to get an offset
- // into an array of words.
- ASSERT_EQ(1, kSmiTagSize);
- ASSERT_EQ(0, kSmiTag);
- lea(dst, Operand(object, dst, times_half_pointer_size,
- FixedArray::kHeaderSize - kHeapObjectTag));
- }
- RecordWriteHelper(object, dst, value);
-
- bind(&done);
-
- // Clobber all input registers when running with the debug-code flag
- // turned on to provoke errors.
- if (emit_debug_code()) {
- mov(object, Immediate(BitCast<int32_t>(kZapValue)));
- mov(value, Immediate(BitCast<int32_t>(kZapValue)));
- mov(scratch, Immediate(BitCast<int32_t>(kZapValue)));
- }
-}
-
-
-void MacroAssembler::RecordWrite(Register object,
- Register address,
- Register value) {
- // First, check if a write barrier is even needed. The tests below
- // catch stores of Smis and stores into young gen.
- Label done;
-
- // Skip barrier if writing a smi.
- ASSERT_EQ(0, kSmiTag);
- test(value, Immediate(kSmiTagMask));
- j(zero, &done);
-
- InNewSpace(object, value, equal, &done);
-
- RecordWriteHelper(object, address, value);
-
- bind(&done);
-
- // Clobber all input registers when running with the debug-code flag
- // turned on to provoke errors.
- if (emit_debug_code()) {
- mov(object, Immediate(BitCast<int32_t>(kZapValue)));
- mov(address, Immediate(BitCast<int32_t>(kZapValue)));
- mov(value, Immediate(BitCast<int32_t>(kZapValue)));
- }
-}
-
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-void MacroAssembler::DebugBreak() {
- Set(eax, Immediate(0));
- mov(ebx, Immediate(ExternalReference(Runtime::kDebugBreak, isolate())));
- CEntryStub ces(1);
- call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
-}
-#endif
-
-
-void MacroAssembler::Set(Register dst, const Immediate& x) {
- if (x.is_zero()) {
- xor_(dst, Operand(dst)); // shorter than mov
- } else {
- mov(dst, x);
- }
-}
-
-
-void MacroAssembler::Set(const Operand& dst, const Immediate& x) {
- mov(dst, x);
-}
-
-
-void MacroAssembler::CmpObjectType(Register heap_object,
- InstanceType type,
- Register map) {
- mov(map, FieldOperand(heap_object, HeapObject::kMapOffset));
- CmpInstanceType(map, type);
-}
-
-
-void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
- cmpb(FieldOperand(map, Map::kInstanceTypeOffset),
- static_cast<int8_t>(type));
-}
-
-
-void MacroAssembler::CheckMap(Register obj,
- Handle<Map> map,
- Label* fail,
- bool is_heap_object) {
- if (!is_heap_object) {
- test(obj, Immediate(kSmiTagMask));
- j(zero, fail);
- }
- cmp(FieldOperand(obj, HeapObject::kMapOffset), Immediate(map));
- j(not_equal, fail);
-}
-
-
-Condition MacroAssembler::IsObjectStringType(Register heap_object,
- Register map,
- Register instance_type) {
- mov(map, FieldOperand(heap_object, HeapObject::kMapOffset));
- movzx_b(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
- ASSERT(kNotStringTag != 0);
- test(instance_type, Immediate(kIsNotStringMask));
- return zero;
-}
-
-
-void MacroAssembler::IsObjectJSObjectType(Register heap_object,
- Register map,
- Register scratch,
- Label* fail) {
- mov(map, FieldOperand(heap_object, HeapObject::kMapOffset));
- IsInstanceJSObjectType(map, scratch, fail);
-}
-
-
-void MacroAssembler::IsInstanceJSObjectType(Register map,
- Register scratch,
- Label* fail) {
- movzx_b(scratch, FieldOperand(map, Map::kInstanceTypeOffset));
- sub(Operand(scratch), Immediate(FIRST_JS_OBJECT_TYPE));
- cmp(scratch, LAST_JS_OBJECT_TYPE - FIRST_JS_OBJECT_TYPE);
- j(above, fail);
-}
-
-
-void MacroAssembler::FCmp() {
- if (CpuFeatures::IsSupported(CMOV)) {
- fucomip();
- ffree(0);
- fincstp();
- } else {
- fucompp();
- push(eax);
- fnstsw_ax();
- sahf();
- pop(eax);
- }
-}
-
-
-void MacroAssembler::AbortIfNotNumber(Register object) {
- Label ok;
- test(object, Immediate(kSmiTagMask));
- j(zero, &ok);
- cmp(FieldOperand(object, HeapObject::kMapOffset),
- isolate()->factory()->heap_number_map());
- Assert(equal, "Operand not a number");
- bind(&ok);
-}
-
-
-void MacroAssembler::AbortIfNotSmi(Register object) {
- test(object, Immediate(kSmiTagMask));
- Assert(equal, "Operand is not a smi");
-}
-
-
-void MacroAssembler::AbortIfNotString(Register object) {
- test(object, Immediate(kSmiTagMask));
- Assert(not_equal, "Operand is not a string");
- push(object);
- mov(object, FieldOperand(object, HeapObject::kMapOffset));
- CmpInstanceType(object, FIRST_NONSTRING_TYPE);
- pop(object);
- Assert(below, "Operand is not a string");
-}
-
-
-void MacroAssembler::AbortIfSmi(Register object) {
- test(object, Immediate(kSmiTagMask));
- Assert(not_equal, "Operand is a smi");
-}
-
-
-void MacroAssembler::EnterFrame(StackFrame::Type type) {
- push(ebp);
- mov(ebp, Operand(esp));
- push(esi);
- push(Immediate(Smi::FromInt(type)));
- push(Immediate(CodeObject()));
- if (emit_debug_code()) {
- cmp(Operand(esp, 0), Immediate(isolate()->factory()->undefined_value()));
- Check(not_equal, "code object not properly patched");
- }
-}
-
-
-void MacroAssembler::LeaveFrame(StackFrame::Type type) {
- if (emit_debug_code()) {
- cmp(Operand(ebp, StandardFrameConstants::kMarkerOffset),
- Immediate(Smi::FromInt(type)));
- Check(equal, "stack frame types must match");
- }
- leave();
-}
-
-
-void MacroAssembler::EnterExitFramePrologue() {
- // Setup the frame structure on the stack.
- ASSERT(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize);
- ASSERT(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize);
- ASSERT(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize);
- push(ebp);
- mov(ebp, Operand(esp));
-
- // Reserve room for entry stack pointer and push the code object.
- ASSERT(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
- push(Immediate(0)); // Saved entry sp, patched before call.
- push(Immediate(CodeObject())); // Accessed from ExitFrame::code_slot.
-
- // Save the frame pointer and the context in top.
- ExternalReference c_entry_fp_address(Isolate::k_c_entry_fp_address,
- isolate());
- ExternalReference context_address(Isolate::k_context_address,
- isolate());
- mov(Operand::StaticVariable(c_entry_fp_address), ebp);
- mov(Operand::StaticVariable(context_address), esi);
-}
-
-
-void MacroAssembler::EnterExitFrameEpilogue(int argc, bool save_doubles) {
- // Optionally save all XMM registers.
- if (save_doubles) {
- CpuFeatures::Scope scope(SSE2);
- int space = XMMRegister::kNumRegisters * kDoubleSize + argc * kPointerSize;
- sub(Operand(esp), Immediate(space));
- const int offset = -2 * kPointerSize;
- for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
- XMMRegister reg = XMMRegister::from_code(i);
- movdbl(Operand(ebp, offset - ((i + 1) * kDoubleSize)), reg);
- }
- } else {
- sub(Operand(esp), Immediate(argc * kPointerSize));
- }
-
- // Get the required frame alignment for the OS.
- const int kFrameAlignment = OS::ActivationFrameAlignment();
- if (kFrameAlignment > 0) {
- ASSERT(IsPowerOf2(kFrameAlignment));
- and_(esp, -kFrameAlignment);
- }
-
- // Patch the saved entry sp.
- mov(Operand(ebp, ExitFrameConstants::kSPOffset), esp);
-}
-
-
-void MacroAssembler::EnterExitFrame(bool save_doubles) {
- EnterExitFramePrologue();
-
- // Setup argc and argv in callee-saved registers.
- int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
- mov(edi, Operand(eax));
- lea(esi, Operand(ebp, eax, times_4, offset));
-
- // Reserve space for argc, argv and isolate.
- EnterExitFrameEpilogue(3, save_doubles);
-}
-
-
-void MacroAssembler::EnterApiExitFrame(int argc) {
- EnterExitFramePrologue();
- EnterExitFrameEpilogue(argc, false);
-}
-
-
-void MacroAssembler::LeaveExitFrame(bool save_doubles) {
- // Optionally restore all XMM registers.
- if (save_doubles) {
- CpuFeatures::Scope scope(SSE2);
- const int offset = -2 * kPointerSize;
- for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
- XMMRegister reg = XMMRegister::from_code(i);
- movdbl(reg, Operand(ebp, offset - ((i + 1) * kDoubleSize)));
- }
- }
-
- // Get the return address from the stack and restore the frame pointer.
- mov(ecx, Operand(ebp, 1 * kPointerSize));
- mov(ebp, Operand(ebp, 0 * kPointerSize));
-
- // Pop the arguments and the receiver from the caller stack.
- lea(esp, Operand(esi, 1 * kPointerSize));
-
- // Push the return address to get ready to return.
- push(ecx);
-
- LeaveExitFrameEpilogue();
-}
-
-void MacroAssembler::LeaveExitFrameEpilogue() {
- // Restore current context from top and clear it in debug mode.
- ExternalReference context_address(Isolate::k_context_address, isolate());
- mov(esi, Operand::StaticVariable(context_address));
-#ifdef DEBUG
- mov(Operand::StaticVariable(context_address), Immediate(0));
-#endif
-
- // Clear the top frame.
- ExternalReference c_entry_fp_address(Isolate::k_c_entry_fp_address,
- isolate());
- mov(Operand::StaticVariable(c_entry_fp_address), Immediate(0));
-}
-
-
-void MacroAssembler::LeaveApiExitFrame() {
- mov(esp, Operand(ebp));
- pop(ebp);
-
- LeaveExitFrameEpilogue();
-}
-
-
-void MacroAssembler::PushTryHandler(CodeLocation try_location,
- HandlerType type) {
- // Adjust this code if not the case.
- ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
- // The pc (return address) is already on TOS.
- if (try_location == IN_JAVASCRIPT) {
- if (type == TRY_CATCH_HANDLER) {
- push(Immediate(StackHandler::TRY_CATCH));
- } else {
- push(Immediate(StackHandler::TRY_FINALLY));
- }
- push(ebp);
- } else {
- ASSERT(try_location == IN_JS_ENTRY);
- // The frame pointer does not point to a JS frame so we save NULL
- // for ebp. We expect the code throwing an exception to check ebp
- // before dereferencing it to restore the context.
- push(Immediate(StackHandler::ENTRY));
- push(Immediate(0)); // NULL frame pointer.
- }
- // Save the current handler as the next handler.
- push(Operand::StaticVariable(ExternalReference(Isolate::k_handler_address,
- isolate())));
- // Link this handler as the new current one.
- mov(Operand::StaticVariable(ExternalReference(Isolate::k_handler_address,
- isolate())),
- esp);
-}
-
-
-void MacroAssembler::PopTryHandler() {
- ASSERT_EQ(0, StackHandlerConstants::kNextOffset);
- pop(Operand::StaticVariable(ExternalReference(Isolate::k_handler_address,
- isolate())));
- add(Operand(esp), Immediate(StackHandlerConstants::kSize - kPointerSize));
-}
-
-
-void MacroAssembler::Throw(Register value) {
- // Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
-
- // eax must hold the exception.
- if (!value.is(eax)) {
- mov(eax, value);
- }
-
- // Drop the sp to the top of the handler.
- ExternalReference handler_address(Isolate::k_handler_address,
- isolate());
- mov(esp, Operand::StaticVariable(handler_address));
-
- // Restore next handler and frame pointer, discard handler state.
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- pop(Operand::StaticVariable(handler_address));
- STATIC_ASSERT(StackHandlerConstants::kFPOffset == 1 * kPointerSize);
- pop(ebp);
- pop(edx); // Remove state.
-
- // Before returning we restore the context from the frame pointer if
- // not NULL. The frame pointer is NULL in the exception handler of
- // a JS entry frame.
- Set(esi, Immediate(0)); // Tentatively set context pointer to NULL.
- NearLabel skip;
- cmp(ebp, 0);
- j(equal, &skip, not_taken);
- mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- bind(&skip);
-
- STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
- ret(0);
-}
-
-
-void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type,
- Register value) {
- // Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
-
- // eax must hold the exception.
- if (!value.is(eax)) {
- mov(eax, value);
- }
-
- // Drop sp to the top stack handler.
- ExternalReference handler_address(Isolate::k_handler_address,
- isolate());
- mov(esp, Operand::StaticVariable(handler_address));
-
- // Unwind the handlers until the ENTRY handler is found.
- NearLabel loop, done;
- bind(&loop);
- // Load the type of the current stack handler.
- const int kStateOffset = StackHandlerConstants::kStateOffset;
- cmp(Operand(esp, kStateOffset), Immediate(StackHandler::ENTRY));
- j(equal, &done);
- // Fetch the next handler in the list.
- const int kNextOffset = StackHandlerConstants::kNextOffset;
- mov(esp, Operand(esp, kNextOffset));
- jmp(&loop);
- bind(&done);
-
- // Set the top handler address to next handler past the current ENTRY handler.
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- pop(Operand::StaticVariable(handler_address));
-
- if (type == OUT_OF_MEMORY) {
- // Set external caught exception to false.
- ExternalReference external_caught(
- Isolate::k_external_caught_exception_address,
- isolate());
- mov(eax, false);
- mov(Operand::StaticVariable(external_caught), eax);
-
- // Set pending exception and eax to out of memory exception.
- ExternalReference pending_exception(Isolate::k_pending_exception_address,
- isolate());
- mov(eax, reinterpret_cast<int32_t>(Failure::OutOfMemoryException()));
- mov(Operand::StaticVariable(pending_exception), eax);
- }
-
- // Clear the context pointer.
- Set(esi, Immediate(0));
-
- // Restore fp from handler and discard handler state.
- STATIC_ASSERT(StackHandlerConstants::kFPOffset == 1 * kPointerSize);
- pop(ebp);
- pop(edx); // State.
-
- STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
- ret(0);
-}
-
-
-void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
- Register scratch,
- Label* miss) {
- Label same_contexts;
-
- ASSERT(!holder_reg.is(scratch));
-
- // Load current lexical context from the stack frame.
- mov(scratch, Operand(ebp, StandardFrameConstants::kContextOffset));
-
- // When generating debug code, make sure the lexical context is set.
- if (emit_debug_code()) {
- cmp(Operand(scratch), Immediate(0));
- Check(not_equal, "we should not have an empty lexical context");
- }
- // Load the global context of the current context.
- int offset = Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
- mov(scratch, FieldOperand(scratch, offset));
- mov(scratch, FieldOperand(scratch, GlobalObject::kGlobalContextOffset));
-
- // Check the context is a global context.
- if (emit_debug_code()) {
- push(scratch);
- // Read the first word and compare to global_context_map.
- mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
- cmp(scratch, isolate()->factory()->global_context_map());
- Check(equal, "JSGlobalObject::global_context should be a global context.");
- pop(scratch);
- }
-
- // Check if both contexts are the same.
- cmp(scratch, FieldOperand(holder_reg, JSGlobalProxy::kContextOffset));
- j(equal, &same_contexts, taken);
-
- // Compare security tokens, save holder_reg on the stack so we can use it
- // as a temporary register.
- //
- // TODO(119): avoid push(holder_reg)/pop(holder_reg)
- push(holder_reg);
- // Check that the security token in the calling global object is
- // compatible with the security token in the receiving global
- // object.
- mov(holder_reg, FieldOperand(holder_reg, JSGlobalProxy::kContextOffset));
-
- // Check the context is a global context.
- if (emit_debug_code()) {
- cmp(holder_reg, isolate()->factory()->null_value());
- Check(not_equal, "JSGlobalProxy::context() should not be null.");
-
- push(holder_reg);
- // Read the first word and compare to global_context_map(),
- mov(holder_reg, FieldOperand(holder_reg, HeapObject::kMapOffset));
- cmp(holder_reg, isolate()->factory()->global_context_map());
- Check(equal, "JSGlobalObject::global_context should be a global context.");
- pop(holder_reg);
- }
-
- int token_offset = Context::kHeaderSize +
- Context::SECURITY_TOKEN_INDEX * kPointerSize;
- mov(scratch, FieldOperand(scratch, token_offset));
- cmp(scratch, FieldOperand(holder_reg, token_offset));
- pop(holder_reg);
- j(not_equal, miss, not_taken);
-
- bind(&same_contexts);
-}
-
-
-void MacroAssembler::LoadAllocationTopHelper(Register result,
- Register scratch,
- AllocationFlags flags) {
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate());
-
- // Just return if allocation top is already known.
- if ((flags & RESULT_CONTAINS_TOP) != 0) {
- // No use of scratch if allocation top is provided.
- ASSERT(scratch.is(no_reg));
-#ifdef DEBUG
- // Assert that result actually contains top on entry.
- cmp(result, Operand::StaticVariable(new_space_allocation_top));
- Check(equal, "Unexpected allocation top");
-#endif
- return;
- }
-
- // Move address of new object to result. Use scratch register if available.
- if (scratch.is(no_reg)) {
- mov(result, Operand::StaticVariable(new_space_allocation_top));
- } else {
- mov(Operand(scratch), Immediate(new_space_allocation_top));
- mov(result, Operand(scratch, 0));
- }
-}
-
-
-void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
- Register scratch) {
- if (emit_debug_code()) {
- test(result_end, Immediate(kObjectAlignmentMask));
- Check(zero, "Unaligned allocation in new space");
- }
-
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate());
-
- // Update new top. Use scratch if available.
- if (scratch.is(no_reg)) {
- mov(Operand::StaticVariable(new_space_allocation_top), result_end);
- } else {
- mov(Operand(scratch, 0), result_end);
- }
-}
-
-
-void MacroAssembler::AllocateInNewSpace(int object_size,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags) {
- if (!FLAG_inline_new) {
- if (emit_debug_code()) {
- // Trash the registers to simulate an allocation failure.
- mov(result, Immediate(0x7091));
- if (result_end.is_valid()) {
- mov(result_end, Immediate(0x7191));
- }
- if (scratch.is_valid()) {
- mov(scratch, Immediate(0x7291));
- }
- }
- jmp(gc_required);
- return;
- }
- ASSERT(!result.is(result_end));
-
- // Load address of new object into result.
- LoadAllocationTopHelper(result, scratch, flags);
-
- Register top_reg = result_end.is_valid() ? result_end : result;
-
- // Calculate new top and bail out if new space is exhausted.
- ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address(isolate());
-
- if (!top_reg.is(result)) {
- mov(top_reg, result);
- }
- add(Operand(top_reg), Immediate(object_size));
- j(carry, gc_required, not_taken);
- cmp(top_reg, Operand::StaticVariable(new_space_allocation_limit));
- j(above, gc_required, not_taken);
-
- // Update allocation top.
- UpdateAllocationTopHelper(top_reg, scratch);
-
- // Tag result if requested.
- if (top_reg.is(result)) {
- if ((flags & TAG_OBJECT) != 0) {
- sub(Operand(result), Immediate(object_size - kHeapObjectTag));
- } else {
- sub(Operand(result), Immediate(object_size));
- }
- } else if ((flags & TAG_OBJECT) != 0) {
- add(Operand(result), Immediate(kHeapObjectTag));
- }
-}
-
-
-void MacroAssembler::AllocateInNewSpace(int header_size,
- ScaleFactor element_size,
- Register element_count,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags) {
- if (!FLAG_inline_new) {
- if (emit_debug_code()) {
- // Trash the registers to simulate an allocation failure.
- mov(result, Immediate(0x7091));
- mov(result_end, Immediate(0x7191));
- if (scratch.is_valid()) {
- mov(scratch, Immediate(0x7291));
- }
- // Register element_count is not modified by the function.
- }
- jmp(gc_required);
- return;
- }
- ASSERT(!result.is(result_end));
-
- // Load address of new object into result.
- LoadAllocationTopHelper(result, scratch, flags);
-
- // Calculate new top and bail out if new space is exhausted.
- ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address(isolate());
-
- // We assume that element_count*element_size + header_size does not
- // overflow.
- lea(result_end, Operand(element_count, element_size, header_size));
- add(result_end, Operand(result));
- j(carry, gc_required);
- cmp(result_end, Operand::StaticVariable(new_space_allocation_limit));
- j(above, gc_required);
-
- // Tag result if requested.
- if ((flags & TAG_OBJECT) != 0) {
- lea(result, Operand(result, kHeapObjectTag));
- }
-
- // Update allocation top.
- UpdateAllocationTopHelper(result_end, scratch);
-}
-
-
-void MacroAssembler::AllocateInNewSpace(Register object_size,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags) {
- if (!FLAG_inline_new) {
- if (emit_debug_code()) {
- // Trash the registers to simulate an allocation failure.
- mov(result, Immediate(0x7091));
- mov(result_end, Immediate(0x7191));
- if (scratch.is_valid()) {
- mov(scratch, Immediate(0x7291));
- }
- // object_size is left unchanged by this function.
- }
- jmp(gc_required);
- return;
- }
- ASSERT(!result.is(result_end));
-
- // Load address of new object into result.
- LoadAllocationTopHelper(result, scratch, flags);
-
- // Calculate new top and bail out if new space is exhausted.
- ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address(isolate());
- if (!object_size.is(result_end)) {
- mov(result_end, object_size);
- }
- add(result_end, Operand(result));
- j(carry, gc_required, not_taken);
- cmp(result_end, Operand::StaticVariable(new_space_allocation_limit));
- j(above, gc_required, not_taken);
-
- // Tag result if requested.
- if ((flags & TAG_OBJECT) != 0) {
- lea(result, Operand(result, kHeapObjectTag));
- }
-
- // Update allocation top.
- UpdateAllocationTopHelper(result_end, scratch);
-}
-
-
-void MacroAssembler::UndoAllocationInNewSpace(Register object) {
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate());
-
- // Make sure the object has no tag before resetting top.
- and_(Operand(object), Immediate(~kHeapObjectTagMask));
-#ifdef DEBUG
- cmp(object, Operand::StaticVariable(new_space_allocation_top));
- Check(below, "Undo allocation of non allocated memory");
-#endif
- mov(Operand::StaticVariable(new_space_allocation_top), object);
-}
-
-
-void MacroAssembler::AllocateHeapNumber(Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- // Allocate heap number in new space.
- AllocateInNewSpace(HeapNumber::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
-
- // Set the map.
- mov(FieldOperand(result, HeapObject::kMapOffset),
- Immediate(isolate()->factory()->heap_number_map()));
-}
-
-
-void MacroAssembler::AllocateTwoByteString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required) {
- // Calculate the number of bytes needed for the characters in the string while
- // observing object alignment.
- ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
- ASSERT(kShortSize == 2);
- // scratch1 = length * 2 + kObjectAlignmentMask.
- lea(scratch1, Operand(length, length, times_1, kObjectAlignmentMask));
- and_(Operand(scratch1), Immediate(~kObjectAlignmentMask));
-
- // Allocate two byte string in new space.
- AllocateInNewSpace(SeqTwoByteString::kHeaderSize,
- times_1,
- scratch1,
- result,
- scratch2,
- scratch3,
- gc_required,
- TAG_OBJECT);
-
- // Set the map, length and hash field.
- mov(FieldOperand(result, HeapObject::kMapOffset),
- Immediate(isolate()->factory()->string_map()));
- mov(scratch1, length);
- SmiTag(scratch1);
- mov(FieldOperand(result, String::kLengthOffset), scratch1);
- mov(FieldOperand(result, String::kHashFieldOffset),
- Immediate(String::kEmptyHashField));
-}
-
-
-void MacroAssembler::AllocateAsciiString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required) {
- // Calculate the number of bytes needed for the characters in the string while
- // observing object alignment.
- ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
- mov(scratch1, length);
- ASSERT(kCharSize == 1);
- add(Operand(scratch1), Immediate(kObjectAlignmentMask));
- and_(Operand(scratch1), Immediate(~kObjectAlignmentMask));
-
- // Allocate ascii string in new space.
- AllocateInNewSpace(SeqAsciiString::kHeaderSize,
- times_1,
- scratch1,
- result,
- scratch2,
- scratch3,
- gc_required,
- TAG_OBJECT);
-
- // Set the map, length and hash field.
- mov(FieldOperand(result, HeapObject::kMapOffset),
- Immediate(isolate()->factory()->ascii_string_map()));
- mov(scratch1, length);
- SmiTag(scratch1);
- mov(FieldOperand(result, String::kLengthOffset), scratch1);
- mov(FieldOperand(result, String::kHashFieldOffset),
- Immediate(String::kEmptyHashField));
-}
-
-
-void MacroAssembler::AllocateAsciiString(Register result,
- int length,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- ASSERT(length > 0);
-
- // Allocate ascii string in new space.
- AllocateInNewSpace(SeqAsciiString::SizeFor(length),
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
-
- // Set the map, length and hash field.
- mov(FieldOperand(result, HeapObject::kMapOffset),
- Immediate(isolate()->factory()->ascii_string_map()));
- mov(FieldOperand(result, String::kLengthOffset),
- Immediate(Smi::FromInt(length)));
- mov(FieldOperand(result, String::kHashFieldOffset),
- Immediate(String::kEmptyHashField));
-}
-
-
-void MacroAssembler::AllocateConsString(Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- // Allocate heap number in new space.
- AllocateInNewSpace(ConsString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
-
- // Set the map. The other fields are left uninitialized.
- mov(FieldOperand(result, HeapObject::kMapOffset),
- Immediate(isolate()->factory()->cons_string_map()));
-}
-
-
-void MacroAssembler::AllocateAsciiConsString(Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- // Allocate heap number in new space.
- AllocateInNewSpace(ConsString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
-
- // Set the map. The other fields are left uninitialized.
- mov(FieldOperand(result, HeapObject::kMapOffset),
- Immediate(isolate()->factory()->cons_ascii_string_map()));
-}
-
-
-// Copy memory, byte-by-byte, from source to destination. Not optimized for
-// long or aligned copies. The contents of scratch and length are destroyed.
-// Source and destination are incremented by length.
-// Many variants of movsb, loop unrolling, word moves, and indexed operands
-// have been tried here already, and this is fastest.
-// A simpler loop is faster on small copies, but 30% slower on large ones.
-// The cld() instruction must have been emitted, to set the direction flag(),
-// before calling this function.
-void MacroAssembler::CopyBytes(Register source,
- Register destination,
- Register length,
- Register scratch) {
- Label loop, done, short_string, short_loop;
- // Experimentation shows that the short string loop is faster if length < 10.
- cmp(Operand(length), Immediate(10));
- j(less_equal, &short_string);
-
- ASSERT(source.is(esi));
- ASSERT(destination.is(edi));
- ASSERT(length.is(ecx));
-
- // Because source is 4-byte aligned in our uses of this function,
- // we keep source aligned for the rep_movs call by copying the odd bytes
- // at the end of the ranges.
- mov(scratch, Operand(source, length, times_1, -4));
- mov(Operand(destination, length, times_1, -4), scratch);
- mov(scratch, ecx);
- shr(ecx, 2);
- rep_movs();
- and_(Operand(scratch), Immediate(0x3));
- add(destination, Operand(scratch));
- jmp(&done);
-
- bind(&short_string);
- test(length, Operand(length));
- j(zero, &done);
-
- bind(&short_loop);
- mov_b(scratch, Operand(source, 0));
- mov_b(Operand(destination, 0), scratch);
- inc(source);
- inc(destination);
- dec(length);
- j(not_zero, &short_loop);
-
- bind(&done);
-}
-
-
-void MacroAssembler::NegativeZeroTest(CodeGenerator* cgen,
- Register result,
- Register op,
- JumpTarget* then_target) {
- JumpTarget ok;
- test(result, Operand(result));
- ok.Branch(not_zero, taken);
- test(op, Operand(op));
- then_target->Branch(sign, not_taken);
- ok.Bind();
-}
-
-
-void MacroAssembler::NegativeZeroTest(Register result,
- Register op,
- Label* then_label) {
- Label ok;
- test(result, Operand(result));
- j(not_zero, &ok, taken);
- test(op, Operand(op));
- j(sign, then_label, not_taken);
- bind(&ok);
-}
-
-
-void MacroAssembler::NegativeZeroTest(Register result,
- Register op1,
- Register op2,
- Register scratch,
- Label* then_label) {
- Label ok;
- test(result, Operand(result));
- j(not_zero, &ok, taken);
- mov(scratch, Operand(op1));
- or_(scratch, Operand(op2));
- j(sign, then_label, not_taken);
- bind(&ok);
-}
-
-
-void MacroAssembler::TryGetFunctionPrototype(Register function,
- Register result,
- Register scratch,
- Label* miss) {
- // Check that the receiver isn't a smi.
- test(function, Immediate(kSmiTagMask));
- j(zero, miss, not_taken);
-
- // Check that the function really is a function.
- CmpObjectType(function, JS_FUNCTION_TYPE, result);
- j(not_equal, miss, not_taken);
-
- // Make sure that the function has an instance prototype.
- Label non_instance;
- movzx_b(scratch, FieldOperand(result, Map::kBitFieldOffset));
- test(scratch, Immediate(1 << Map::kHasNonInstancePrototype));
- j(not_zero, &non_instance, not_taken);
-
- // Get the prototype or initial map from the function.
- mov(result,
- FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
-
- // If the prototype or initial map is the hole, don't return it and
- // simply miss the cache instead. This will allow us to allocate a
- // prototype object on-demand in the runtime system.
- cmp(Operand(result), Immediate(isolate()->factory()->the_hole_value()));
- j(equal, miss, not_taken);
-
- // If the function does not have an initial map, we're done.
- Label done;
- CmpObjectType(result, MAP_TYPE, scratch);
- j(not_equal, &done);
-
- // Get the prototype from the initial map.
- mov(result, FieldOperand(result, Map::kPrototypeOffset));
- jmp(&done);
-
- // Non-instance prototype: Fetch prototype from constructor field
- // in initial map.
- bind(&non_instance);
- mov(result, FieldOperand(result, Map::kConstructorOffset));
-
- // All done.
- bind(&done);
-}
-
-
-void MacroAssembler::CallStub(CodeStub* stub) {
- ASSERT(allow_stub_calls()); // Calls are not allowed in some stubs.
- call(stub->GetCode(), RelocInfo::CODE_TARGET);
-}
-
-
-MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub) {
- ASSERT(allow_stub_calls()); // Calls are not allowed in some stubs.
- Object* result;
- { MaybeObject* maybe_result = stub->TryGetCode();
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- call(Handle<Code>(Code::cast(result)), RelocInfo::CODE_TARGET);
- return result;
-}
-
-
-void MacroAssembler::TailCallStub(CodeStub* stub) {
- ASSERT(allow_stub_calls()); // Calls are not allowed in some stubs.
- jmp(stub->GetCode(), RelocInfo::CODE_TARGET);
-}
-
-
-MaybeObject* MacroAssembler::TryTailCallStub(CodeStub* stub) {
- ASSERT(allow_stub_calls()); // Calls are not allowed in some stubs.
- Object* result;
- { MaybeObject* maybe_result = stub->TryGetCode();
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- jmp(Handle<Code>(Code::cast(result)), RelocInfo::CODE_TARGET);
- return result;
-}
-
-
-void MacroAssembler::StubReturn(int argc) {
- ASSERT(argc >= 1 && generating_stub());
- ret((argc - 1) * kPointerSize);
-}
-
-
-void MacroAssembler::IllegalOperation(int num_arguments) {
- if (num_arguments > 0) {
- add(Operand(esp), Immediate(num_arguments * kPointerSize));
- }
- mov(eax, Immediate(isolate()->factory()->undefined_value()));
-}
-
-
-void MacroAssembler::IndexFromHash(Register hash, Register index) {
- // The assert checks that the constants for the maximum number of digits
- // for an array index cached in the hash field and the number of bits
- // reserved for it does not conflict.
- ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
- (1 << String::kArrayIndexValueBits));
- // We want the smi-tagged index in key. kArrayIndexValueMask has zeros in
- // the low kHashShift bits.
- and_(hash, String::kArrayIndexValueMask);
- STATIC_ASSERT(String::kHashShift >= kSmiTagSize && kSmiTag == 0);
- if (String::kHashShift > kSmiTagSize) {
- shr(hash, String::kHashShift - kSmiTagSize);
- }
- if (!index.is(hash)) {
- mov(index, hash);
- }
-}
-
-
-void MacroAssembler::CallRuntime(Runtime::FunctionId id, int num_arguments) {
- CallRuntime(Runtime::FunctionForId(id), num_arguments);
-}
-
-
-void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
- const Runtime::Function* function = Runtime::FunctionForId(id);
- Set(eax, Immediate(function->nargs));
- mov(ebx, Immediate(ExternalReference(function, isolate())));
- CEntryStub ces(1);
- ces.SaveDoubles();
- CallStub(&ces);
-}
-
-
-MaybeObject* MacroAssembler::TryCallRuntime(Runtime::FunctionId id,
- int num_arguments) {
- return TryCallRuntime(Runtime::FunctionForId(id), num_arguments);
-}
-
-
-void MacroAssembler::CallRuntime(const Runtime::Function* f,
- int num_arguments) {
- // If the expected number of arguments of the runtime function is
- // constant, we check that the actual number of arguments match the
- // expectation.
- if (f->nargs >= 0 && f->nargs != num_arguments) {
- IllegalOperation(num_arguments);
- return;
- }
-
- // TODO(1236192): Most runtime routines don't need the number of
- // arguments passed in because it is constant. At some point we
- // should remove this need and make the runtime routine entry code
- // smarter.
- Set(eax, Immediate(num_arguments));
- mov(ebx, Immediate(ExternalReference(f, isolate())));
- CEntryStub ces(1);
- CallStub(&ces);
-}
-
-
-MaybeObject* MacroAssembler::TryCallRuntime(const Runtime::Function* f,
- int num_arguments) {
- if (f->nargs >= 0 && f->nargs != num_arguments) {
- IllegalOperation(num_arguments);
- // Since we did not call the stub, there was no allocation failure.
- // Return some non-failure object.
- return isolate()->heap()->undefined_value();
- }
-
- // TODO(1236192): Most runtime routines don't need the number of
- // arguments passed in because it is constant. At some point we
- // should remove this need and make the runtime routine entry code
- // smarter.
- Set(eax, Immediate(num_arguments));
- mov(ebx, Immediate(ExternalReference(f, isolate())));
- CEntryStub ces(1);
- return TryCallStub(&ces);
-}
-
-
-void MacroAssembler::CallExternalReference(ExternalReference ref,
- int num_arguments) {
- mov(eax, Immediate(num_arguments));
- mov(ebx, Immediate(ref));
-
- CEntryStub stub(1);
- CallStub(&stub);
-}
-
-
-void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
- int num_arguments,
- int result_size) {
- // TODO(1236192): Most runtime routines don't need the number of
- // arguments passed in because it is constant. At some point we
- // should remove this need and make the runtime routine entry code
- // smarter.
- Set(eax, Immediate(num_arguments));
- JumpToExternalReference(ext);
-}
-
-
-MaybeObject* MacroAssembler::TryTailCallExternalReference(
- const ExternalReference& ext, int num_arguments, int result_size) {
- // TODO(1236192): Most runtime routines don't need the number of
- // arguments passed in because it is constant. At some point we
- // should remove this need and make the runtime routine entry code
- // smarter.
- Set(eax, Immediate(num_arguments));
- return TryJumpToExternalReference(ext);
-}
-
-
-void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
- int num_arguments,
- int result_size) {
- TailCallExternalReference(ExternalReference(fid, isolate()),
- num_arguments,
- result_size);
-}
-
-
-MaybeObject* MacroAssembler::TryTailCallRuntime(Runtime::FunctionId fid,
- int num_arguments,
- int result_size) {
- return TryTailCallExternalReference(
- ExternalReference(fid, isolate()), num_arguments, result_size);
-}
-
-
-// If true, a Handle<T> returned by value from a function with cdecl calling
-// convention will be returned directly as a value of location_ field in a
-// register eax.
-// If false, it is returned as a pointer to a preallocated by caller memory
-// region. Pointer to this region should be passed to a function as an
-// implicit first argument.
-#if defined(USING_BSD_ABI) || defined(__MINGW32__) || defined(__CYGWIN__)
-static const bool kReturnHandlesDirectly = true;
-#else
-static const bool kReturnHandlesDirectly = false;
-#endif
-
-
-Operand ApiParameterOperand(int index) {
- return Operand(
- esp, (index + (kReturnHandlesDirectly ? 0 : 1)) * kPointerSize);
-}
-
-
-void MacroAssembler::PrepareCallApiFunction(int argc, Register scratch) {
- if (kReturnHandlesDirectly) {
- EnterApiExitFrame(argc);
- // When handles are returned directly we don't have to allocate extra
- // space for and pass an out parameter.
- } else {
- // We allocate two additional slots: return value and pointer to it.
- EnterApiExitFrame(argc + 2);
-
- // The argument slots are filled as follows:
- //
- // n + 1: output cell
- // n: arg n
- // ...
- // 1: arg1
- // 0: pointer to the output cell
- //
- // Note that this is one more "argument" than the function expects
- // so the out cell will have to be popped explicitly after returning
- // from the function. The out cell contains Handle.
-
- // pointer to out cell.
- lea(scratch, Operand(esp, (argc + 1) * kPointerSize));
- mov(Operand(esp, 0 * kPointerSize), scratch); // output.
- if (emit_debug_code()) {
- mov(Operand(esp, (argc + 1) * kPointerSize), Immediate(0)); // out cell.
- }
- }
-}
-
-
-MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(ApiFunction* function,
- int stack_space) {
- ExternalReference next_address =
- ExternalReference::handle_scope_next_address();
- ExternalReference limit_address =
- ExternalReference::handle_scope_limit_address();
- ExternalReference level_address =
- ExternalReference::handle_scope_level_address();
-
- // Allocate HandleScope in callee-save registers.
- mov(ebx, Operand::StaticVariable(next_address));
- mov(edi, Operand::StaticVariable(limit_address));
- add(Operand::StaticVariable(level_address), Immediate(1));
-
- // Call the api function!
- call(function->address(), RelocInfo::RUNTIME_ENTRY);
-
- if (!kReturnHandlesDirectly) {
- // The returned value is a pointer to the handle holding the result.
- // Dereference this to get to the location.
- mov(eax, Operand(eax, 0));
- }
-
- Label empty_handle;
- Label prologue;
- Label promote_scheduled_exception;
- Label delete_allocated_handles;
- Label leave_exit_frame;
-
- // Check if the result handle holds 0.
- test(eax, Operand(eax));
- j(zero, &empty_handle, not_taken);
- // It was non-zero. Dereference to get the result value.
- mov(eax, Operand(eax, 0));
- bind(&prologue);
- // No more valid handles (the result handle was the last one). Restore
- // previous handle scope.
- mov(Operand::StaticVariable(next_address), ebx);
- sub(Operand::StaticVariable(level_address), Immediate(1));
- Assert(above_equal, "Invalid HandleScope level");
- cmp(edi, Operand::StaticVariable(limit_address));
- j(not_equal, &delete_allocated_handles, not_taken);
- bind(&leave_exit_frame);
-
- // Check if the function scheduled an exception.
- ExternalReference scheduled_exception_address =
- ExternalReference::scheduled_exception_address(isolate());
- cmp(Operand::StaticVariable(scheduled_exception_address),
- Immediate(isolate()->factory()->the_hole_value()));
- j(not_equal, &promote_scheduled_exception, not_taken);
- LeaveApiExitFrame();
- ret(stack_space * kPointerSize);
- bind(&promote_scheduled_exception);
- MaybeObject* result =
- TryTailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
- if (result->IsFailure()) {
- return result;
- }
- bind(&empty_handle);
- // It was zero; the result is undefined.
- mov(eax, isolate()->factory()->undefined_value());
- jmp(&prologue);
-
- // HandleScope limit has changed. Delete allocated extensions.
- ExternalReference delete_extensions =
- ExternalReference::delete_handle_scope_extensions(isolate());
- bind(&delete_allocated_handles);
- mov(Operand::StaticVariable(limit_address), edi);
- mov(edi, eax);
- mov(Operand(esp, 0), Immediate(ExternalReference::isolate_address()));
- mov(eax, Immediate(delete_extensions));
- call(Operand(eax));
- mov(eax, edi);
- jmp(&leave_exit_frame);
-
- return result;
-}
-
-
-void MacroAssembler::JumpToExternalReference(const ExternalReference& ext) {
- // Set the entry point and jump to the C entry runtime stub.
- mov(ebx, Immediate(ext));
- CEntryStub ces(1);
- jmp(ces.GetCode(), RelocInfo::CODE_TARGET);
-}
-
-
-MaybeObject* MacroAssembler::TryJumpToExternalReference(
- const ExternalReference& ext) {
- // Set the entry point and jump to the C entry runtime stub.
- mov(ebx, Immediate(ext));
- CEntryStub ces(1);
- return TryTailCallStub(&ces);
-}
-
-
-void MacroAssembler::InvokePrologue(const ParameterCount& expected,
- const ParameterCount& actual,
- Handle<Code> code_constant,
- const Operand& code_operand,
- NearLabel* done,
- InvokeFlag flag,
- PostCallGenerator* post_call_generator) {
- bool definitely_matches = false;
- Label invoke;
- if (expected.is_immediate()) {
- ASSERT(actual.is_immediate());
- if (expected.immediate() == actual.immediate()) {
- definitely_matches = true;
- } else {
- mov(eax, actual.immediate());
- const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
- if (expected.immediate() == sentinel) {
- // Don't worry about adapting arguments for builtins that
- // don't want that done. Skip adaption code by making it look
- // like we have a match between expected and actual number of
- // arguments.
- definitely_matches = true;
- } else {
- mov(ebx, expected.immediate());
- }
- }
- } else {
- if (actual.is_immediate()) {
- // Expected is in register, actual is immediate. This is the
- // case when we invoke function values without going through the
- // IC mechanism.
- cmp(expected.reg(), actual.immediate());
- j(equal, &invoke);
- ASSERT(expected.reg().is(ebx));
- mov(eax, actual.immediate());
- } else if (!expected.reg().is(actual.reg())) {
- // Both expected and actual are in (different) registers. This
- // is the case when we invoke functions using call and apply.
- cmp(expected.reg(), Operand(actual.reg()));
- j(equal, &invoke);
- ASSERT(actual.reg().is(eax));
- ASSERT(expected.reg().is(ebx));
- }
- }
-
- if (!definitely_matches) {
- Handle<Code> adaptor =
- isolate()->builtins()->ArgumentsAdaptorTrampoline();
- if (!code_constant.is_null()) {
- mov(edx, Immediate(code_constant));
- add(Operand(edx), Immediate(Code::kHeaderSize - kHeapObjectTag));
- } else if (!code_operand.is_reg(edx)) {
- mov(edx, code_operand);
- }
-
- if (flag == CALL_FUNCTION) {
- call(adaptor, RelocInfo::CODE_TARGET);
- if (post_call_generator != NULL) post_call_generator->Generate();
- jmp(done);
- } else {
- jmp(adaptor, RelocInfo::CODE_TARGET);
- }
- bind(&invoke);
- }
-}
-
-
-void MacroAssembler::InvokeCode(const Operand& code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag,
- PostCallGenerator* post_call_generator) {
- NearLabel done;
- InvokePrologue(expected, actual, Handle<Code>::null(), code,
- &done, flag, post_call_generator);
- if (flag == CALL_FUNCTION) {
- call(code);
- if (post_call_generator != NULL) post_call_generator->Generate();
- } else {
- ASSERT(flag == JUMP_FUNCTION);
- jmp(code);
- }
- bind(&done);
-}
-
-
-void MacroAssembler::InvokeCode(Handle<Code> code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- RelocInfo::Mode rmode,
- InvokeFlag flag,
- PostCallGenerator* post_call_generator) {
- NearLabel done;
- Operand dummy(eax);
- InvokePrologue(expected, actual, code, dummy, &done,
- flag, post_call_generator);
- if (flag == CALL_FUNCTION) {
- call(code, rmode);
- if (post_call_generator != NULL) post_call_generator->Generate();
- } else {
- ASSERT(flag == JUMP_FUNCTION);
- jmp(code, rmode);
- }
- bind(&done);
-}
-
-
-void MacroAssembler::InvokeFunction(Register fun,
- const ParameterCount& actual,
- InvokeFlag flag,
- PostCallGenerator* post_call_generator) {
- ASSERT(fun.is(edi));
- mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
- mov(ebx, FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
- SmiUntag(ebx);
-
- ParameterCount expected(ebx);
- InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
- expected, actual, flag, post_call_generator);
-}
-
-
-void MacroAssembler::InvokeFunction(JSFunction* function,
- const ParameterCount& actual,
- InvokeFlag flag,
- PostCallGenerator* post_call_generator) {
- ASSERT(function->is_compiled());
- // Get the function and setup the context.
- mov(edi, Immediate(Handle<JSFunction>(function)));
- mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
-
- ParameterCount expected(function->shared()->formal_parameter_count());
- if (V8::UseCrankshaft()) {
- // TODO(kasperl): For now, we always call indirectly through the
- // code field in the function to allow recompilation to take effect
- // without changing any of the call sites.
- InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
- expected, actual, flag, post_call_generator);
- } else {
- Handle<Code> code(function->code());
- InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET,
- flag, post_call_generator);
- }
-}
-
-
-void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
- InvokeFlag flag,
- PostCallGenerator* post_call_generator) {
- // Calls are not allowed in some stubs.
- ASSERT(flag == JUMP_FUNCTION || allow_stub_calls());
-
- // Rely on the assertion to check that the number of provided
- // arguments match the expected number of arguments. Fake a
- // parameter count to avoid emitting code to do the check.
- ParameterCount expected(0);
- GetBuiltinFunction(edi, id);
- InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
- expected, expected, flag, post_call_generator);
-}
-
-void MacroAssembler::GetBuiltinFunction(Register target,
- Builtins::JavaScript id) {
- // Load the JavaScript builtin function from the builtins object.
- mov(target, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- mov(target, FieldOperand(target, GlobalObject::kBuiltinsOffset));
- mov(target, FieldOperand(target,
- JSBuiltinsObject::OffsetOfFunctionWithId(id)));
-}
-
-void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
- ASSERT(!target.is(edi));
- // Load the JavaScript builtin function from the builtins object.
- GetBuiltinFunction(edi, id);
- // Load the code entry point from the function into the target register.
- mov(target, FieldOperand(edi, JSFunction::kCodeEntryOffset));
-}
-
-
-void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
- if (context_chain_length > 0) {
- // Move up the chain of contexts to the context containing the slot.
- mov(dst, Operand(esi, Context::SlotOffset(Context::CLOSURE_INDEX)));
- // Load the function context (which is the incoming, outer context).
- mov(dst, FieldOperand(dst, JSFunction::kContextOffset));
- for (int i = 1; i < context_chain_length; i++) {
- mov(dst, Operand(dst, Context::SlotOffset(Context::CLOSURE_INDEX)));
- mov(dst, FieldOperand(dst, JSFunction::kContextOffset));
- }
- } else {
- // Slot is in the current function context. Move it into the
- // destination register in case we store into it (the write barrier
- // cannot be allowed to destroy the context in esi).
- mov(dst, esi);
- }
-
- // We should not have found a 'with' context by walking the context chain
- // (i.e., the static scope chain and runtime context chain do not agree).
- // A variable occurring in such a scope should have slot type LOOKUP and
- // not CONTEXT.
- if (emit_debug_code()) {
- cmp(dst, Operand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX)));
- Check(equal, "Yo dawg, I heard you liked function contexts "
- "so I put function contexts in all your contexts");
- }
-}
-
-
-void MacroAssembler::LoadGlobalFunction(int index, Register function) {
- // Load the global or builtins object from the current context.
- mov(function, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- // Load the global context from the global or builtins object.
- mov(function, FieldOperand(function, GlobalObject::kGlobalContextOffset));
- // Load the function from the global context.
- mov(function, Operand(function, Context::SlotOffset(index)));
-}
-
-
-void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
- Register map) {
- // Load the initial map. The global functions all have initial maps.
- mov(map, FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
- if (emit_debug_code()) {
- Label ok, fail;
- CheckMap(map, isolate()->factory()->meta_map(), &fail, false);
- jmp(&ok);
- bind(&fail);
- Abort("Global functions must have initial map");
- bind(&ok);
- }
-}
-
-
-// Store the value in register src in the safepoint register stack
-// slot for register dst.
-void MacroAssembler::StoreToSafepointRegisterSlot(Register dst, Register src) {
- mov(SafepointRegisterSlot(dst), src);
-}
-
-
-void MacroAssembler::StoreToSafepointRegisterSlot(Register dst, Immediate src) {
- mov(SafepointRegisterSlot(dst), src);
-}
-
-
-void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
- mov(dst, SafepointRegisterSlot(src));
-}
-
-
-Operand MacroAssembler::SafepointRegisterSlot(Register reg) {
- return Operand(esp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
-}
-
-
-int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
- // The registers are pushed starting with the lowest encoding,
- // which means that lowest encodings are furthest away from
- // the stack pointer.
- ASSERT(reg_code >= 0 && reg_code < kNumSafepointRegisters);
- return kNumSafepointRegisters - reg_code - 1;
-}
-
-
-void MacroAssembler::Ret() {
- ret(0);
-}
-
-
-void MacroAssembler::Ret(int bytes_dropped, Register scratch) {
- if (is_uint16(bytes_dropped)) {
- ret(bytes_dropped);
- } else {
- pop(scratch);
- add(Operand(esp), Immediate(bytes_dropped));
- push(scratch);
- ret(0);
- }
-}
-
-
-
-
-void MacroAssembler::Drop(int stack_elements) {
- if (stack_elements > 0) {
- add(Operand(esp), Immediate(stack_elements * kPointerSize));
- }
-}
-
-
-void MacroAssembler::Move(Register dst, Register src) {
- if (!dst.is(src)) {
- mov(dst, src);
- }
-}
-
-
-void MacroAssembler::Move(Register dst, Handle<Object> value) {
- mov(dst, value);
-}
-
-
-void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
- if (FLAG_native_code_counters && counter->Enabled()) {
- mov(Operand::StaticVariable(ExternalReference(counter)), Immediate(value));
- }
-}
-
-
-void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) {
- ASSERT(value > 0);
- if (FLAG_native_code_counters && counter->Enabled()) {
- Operand operand = Operand::StaticVariable(ExternalReference(counter));
- if (value == 1) {
- inc(operand);
- } else {
- add(operand, Immediate(value));
- }
- }
-}
-
-
-void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
- ASSERT(value > 0);
- if (FLAG_native_code_counters && counter->Enabled()) {
- Operand operand = Operand::StaticVariable(ExternalReference(counter));
- if (value == 1) {
- dec(operand);
- } else {
- sub(operand, Immediate(value));
- }
- }
-}
-
-
-void MacroAssembler::IncrementCounter(Condition cc,
- StatsCounter* counter,
- int value) {
- ASSERT(value > 0);
- if (FLAG_native_code_counters && counter->Enabled()) {
- Label skip;
- j(NegateCondition(cc), &skip);
- pushfd();
- IncrementCounter(counter, value);
- popfd();
- bind(&skip);
- }
-}
-
-
-void MacroAssembler::DecrementCounter(Condition cc,
- StatsCounter* counter,
- int value) {
- ASSERT(value > 0);
- if (FLAG_native_code_counters && counter->Enabled()) {
- Label skip;
- j(NegateCondition(cc), &skip);
- pushfd();
- DecrementCounter(counter, value);
- popfd();
- bind(&skip);
- }
-}
-
-
-void MacroAssembler::Assert(Condition cc, const char* msg) {
- if (emit_debug_code()) Check(cc, msg);
-}
-
-
-void MacroAssembler::AssertFastElements(Register elements) {
- if (emit_debug_code()) {
- Factory* factory = isolate()->factory();
- Label ok;
- cmp(FieldOperand(elements, HeapObject::kMapOffset),
- Immediate(factory->fixed_array_map()));
- j(equal, &ok);
- cmp(FieldOperand(elements, HeapObject::kMapOffset),
- Immediate(factory->fixed_cow_array_map()));
- j(equal, &ok);
- Abort("JSObject with fast elements map has slow elements");
- bind(&ok);
- }
-}
-
-
-void MacroAssembler::Check(Condition cc, const char* msg) {
- Label L;
- j(cc, &L, taken);
- Abort(msg);
- // will not return here
- bind(&L);
-}
-
-
-void MacroAssembler::CheckStackAlignment() {
- int frame_alignment = OS::ActivationFrameAlignment();
- int frame_alignment_mask = frame_alignment - 1;
- if (frame_alignment > kPointerSize) {
- ASSERT(IsPowerOf2(frame_alignment));
- Label alignment_as_expected;
- test(esp, Immediate(frame_alignment_mask));
- j(zero, &alignment_as_expected);
- // Abort if stack is not aligned.
- int3();
- bind(&alignment_as_expected);
- }
-}
-
-
-void MacroAssembler::Abort(const char* msg) {
- // We want to pass the msg string like a smi to avoid GC
- // problems, however msg is not guaranteed to be aligned
- // properly. Instead, we pass an aligned pointer that is
- // a proper v8 smi, but also pass the alignment difference
- // from the real pointer as a smi.
- intptr_t p1 = reinterpret_cast<intptr_t>(msg);
- intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
- ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
-#ifdef DEBUG
- if (msg != NULL) {
- RecordComment("Abort message: ");
- RecordComment(msg);
- }
-#endif
- // Disable stub call restrictions to always allow calls to abort.
- AllowStubCallsScope allow_scope(this, true);
-
- push(eax);
- push(Immediate(p0));
- push(Immediate(reinterpret_cast<intptr_t>(Smi::FromInt(p1 - p0))));
- CallRuntime(Runtime::kAbort, 2);
- // will not return here
- int3();
-}
-
-
-void MacroAssembler::JumpIfNotNumber(Register reg,
- TypeInfo info,
- Label* on_not_number) {
- if (emit_debug_code()) AbortIfSmi(reg);
- if (!info.IsNumber()) {
- cmp(FieldOperand(reg, HeapObject::kMapOffset),
- isolate()->factory()->heap_number_map());
- j(not_equal, on_not_number);
- }
-}
-
-
-void MacroAssembler::ConvertToInt32(Register dst,
- Register source,
- Register scratch,
- TypeInfo info,
- Label* on_not_int32) {
- if (emit_debug_code()) {
- AbortIfSmi(source);
- AbortIfNotNumber(source);
- }
- if (info.IsInteger32()) {
- cvttsd2si(dst, FieldOperand(source, HeapNumber::kValueOffset));
- } else {
- Label done;
- bool push_pop = (scratch.is(no_reg) && dst.is(source));
- ASSERT(!scratch.is(source));
- if (push_pop) {
- push(dst);
- scratch = dst;
- }
- if (scratch.is(no_reg)) scratch = dst;
- cvttsd2si(scratch, FieldOperand(source, HeapNumber::kValueOffset));
- cmp(scratch, 0x80000000u);
- if (push_pop) {
- j(not_equal, &done);
- pop(dst);
- jmp(on_not_int32);
- } else {
- j(equal, on_not_int32);
- }
-
- bind(&done);
- if (push_pop) {
- add(Operand(esp), Immediate(kPointerSize)); // Pop.
- }
- if (!scratch.is(dst)) {
- mov(dst, scratch);
- }
- }
-}
-
-
-void MacroAssembler::LoadPowerOf2(XMMRegister dst,
- Register scratch,
- int power) {
- ASSERT(is_uintn(power + HeapNumber::kExponentBias,
- HeapNumber::kExponentBits));
- mov(scratch, Immediate(power + HeapNumber::kExponentBias));
- movd(dst, Operand(scratch));
- psllq(dst, HeapNumber::kMantissaBits);
-}
-
-
-void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(
- Register instance_type,
- Register scratch,
- Label* failure) {
- if (!scratch.is(instance_type)) {
- mov(scratch, instance_type);
- }
- and_(scratch,
- kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask);
- cmp(scratch, kStringTag | kSeqStringTag | kAsciiStringTag);
- j(not_equal, failure);
-}
-
-
-void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register object1,
- Register object2,
- Register scratch1,
- Register scratch2,
- Label* failure) {
- // Check that both objects are not smis.
- ASSERT_EQ(0, kSmiTag);
- mov(scratch1, Operand(object1));
- and_(scratch1, Operand(object2));
- test(scratch1, Immediate(kSmiTagMask));
- j(zero, failure);
-
- // Load instance type for both strings.
- mov(scratch1, FieldOperand(object1, HeapObject::kMapOffset));
- mov(scratch2, FieldOperand(object2, HeapObject::kMapOffset));
- movzx_b(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
- movzx_b(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
-
- // Check that both are flat ascii strings.
- const int kFlatAsciiStringMask =
- kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
- const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
- // Interleave bits from both instance types and compare them in one check.
- ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
- and_(scratch1, kFlatAsciiStringMask);
- and_(scratch2, kFlatAsciiStringMask);
- lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
- cmp(scratch1, kFlatAsciiStringTag | (kFlatAsciiStringTag << 3));
- j(not_equal, failure);
-}
-
-
-void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
- int frame_alignment = OS::ActivationFrameAlignment();
- if (frame_alignment != 0) {
- // Make stack end at alignment and make room for num_arguments words
- // and the original value of esp.
- mov(scratch, esp);
- sub(Operand(esp), Immediate((num_arguments + 1) * kPointerSize));
- ASSERT(IsPowerOf2(frame_alignment));
- and_(esp, -frame_alignment);
- mov(Operand(esp, num_arguments * kPointerSize), scratch);
- } else {
- sub(Operand(esp), Immediate(num_arguments * kPointerSize));
- }
-}
-
-
-void MacroAssembler::CallCFunction(ExternalReference function,
- int num_arguments) {
- // Trashing eax is ok as it will be the return value.
- mov(Operand(eax), Immediate(function));
- CallCFunction(eax, num_arguments);
-}
-
-
-void MacroAssembler::CallCFunction(Register function,
- int num_arguments) {
- // Check stack alignment.
- if (emit_debug_code()) {
- CheckStackAlignment();
- }
-
- call(Operand(function));
- if (OS::ActivationFrameAlignment() != 0) {
- mov(esp, Operand(esp, num_arguments * kPointerSize));
- } else {
- add(Operand(esp), Immediate(num_arguments * kPointerSize));
- }
-}
-
-
-CodePatcher::CodePatcher(byte* address, int size)
- : address_(address),
- size_(size),
- masm_(Isolate::Current(), address, size + Assembler::kGap) {
- // Create a new macro assembler pointing to the address of the code to patch.
- // The size is adjusted with kGap on order for the assembler to generate size
- // bytes of instructions without failing with buffer size constraints.
- ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
-}
-
-
-CodePatcher::~CodePatcher() {
- // Indicate that code has changed.
- CPU::FlushICache(address_, size_);
-
- // Check that the code was patched as expected.
- ASSERT(masm_.pc_ == address_ + size_);
- ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_IA32
diff --git a/src/3rdparty/v8/src/ia32/macro-assembler-ia32.h b/src/3rdparty/v8/src/ia32/macro-assembler-ia32.h
deleted file mode 100644
index 946022a..0000000
--- a/src/3rdparty/v8/src/ia32/macro-assembler-ia32.h
+++ /dev/null
@@ -1,807 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_IA32_MACRO_ASSEMBLER_IA32_H_
-#define V8_IA32_MACRO_ASSEMBLER_IA32_H_
-
-#include "assembler.h"
-#include "type-info.h"
-
-namespace v8 {
-namespace internal {
-
-// Flags used for the AllocateInNewSpace functions.
-enum AllocationFlags {
- // No special flags.
- NO_ALLOCATION_FLAGS = 0,
- // Return the pointer to the allocated already tagged as a heap object.
- TAG_OBJECT = 1 << 0,
- // The content of the result register already contains the allocation top in
- // new space.
- RESULT_CONTAINS_TOP = 1 << 1
-};
-
-// Convenience for platform-independent signatures. We do not normally
-// distinguish memory operands from other operands on ia32.
-typedef Operand MemOperand;
-
-// Forward declaration.
-class JumpTarget;
-class PostCallGenerator;
-
-// MacroAssembler implements a collection of frequently used macros.
-class MacroAssembler: public Assembler {
- public:
- // The isolate parameter can be NULL if the macro assembler should
- // not use isolate-dependent functionality. In this case, it's the
- // responsibility of the caller to never invoke such function on the
- // macro assembler.
- MacroAssembler(Isolate* isolate, void* buffer, int size);
-
- // ---------------------------------------------------------------------------
- // GC Support
-
- // For page containing |object| mark region covering |addr| dirty.
- // RecordWriteHelper only works if the object is not in new
- // space.
- void RecordWriteHelper(Register object,
- Register addr,
- Register scratch);
-
- // Check if object is in new space.
- // scratch can be object itself, but it will be clobbered.
- template <typename LabelType>
- void InNewSpace(Register object,
- Register scratch,
- Condition cc, // equal for new space, not_equal otherwise.
- LabelType* branch);
-
- // For page containing |object| mark region covering [object+offset]
- // dirty. |object| is the object being stored into, |value| is the
- // object being stored. If offset is zero, then the scratch register
- // contains the array index into the elements array represented as a
- // Smi. All registers are clobbered by the operation. RecordWrite
- // filters out smis so it does not update the write barrier if the
- // value is a smi.
- void RecordWrite(Register object,
- int offset,
- Register value,
- Register scratch);
-
- // For page containing |object| mark region covering |address|
- // dirty. |object| is the object being stored into, |value| is the
- // object being stored. All registers are clobbered by the
- // operation. RecordWrite filters out smis so it does not update the
- // write barrier if the value is a smi.
- void RecordWrite(Register object,
- Register address,
- Register value);
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // ---------------------------------------------------------------------------
- // Debugger Support
-
- void DebugBreak();
-#endif
-
- // ---------------------------------------------------------------------------
- // Activation frames
-
- void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); }
- void LeaveInternalFrame() { LeaveFrame(StackFrame::INTERNAL); }
-
- void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); }
- void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); }
-
- // Enter specific kind of exit frame. Expects the number of
- // arguments in register eax and sets up the number of arguments in
- // register edi and the pointer to the first argument in register
- // esi.
- void EnterExitFrame(bool save_doubles);
-
- void EnterApiExitFrame(int argc);
-
- // Leave the current exit frame. Expects the return value in
- // register eax:edx (untouched) and the pointer to the first
- // argument in register esi.
- void LeaveExitFrame(bool save_doubles);
-
- // Leave the current exit frame. Expects the return value in
- // register eax (untouched).
- void LeaveApiExitFrame();
-
- // Find the function context up the context chain.
- void LoadContext(Register dst, int context_chain_length);
-
- // Load the global function with the given index.
- void LoadGlobalFunction(int index, Register function);
-
- // Load the initial map from the global function. The registers
- // function and map can be the same.
- void LoadGlobalFunctionInitialMap(Register function, Register map);
-
- // Push and pop the registers that can hold pointers.
- void PushSafepointRegisters() { pushad(); }
- void PopSafepointRegisters() { popad(); }
- // Store the value in register/immediate src in the safepoint
- // register stack slot for register dst.
- void StoreToSafepointRegisterSlot(Register dst, Register src);
- void StoreToSafepointRegisterSlot(Register dst, Immediate src);
- void LoadFromSafepointRegisterSlot(Register dst, Register src);
-
- // ---------------------------------------------------------------------------
- // JavaScript invokes
-
- // Invoke the JavaScript function code by either calling or jumping.
- void InvokeCode(const Operand& code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag,
- PostCallGenerator* post_call_generator = NULL);
-
- void InvokeCode(Handle<Code> code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- RelocInfo::Mode rmode,
- InvokeFlag flag,
- PostCallGenerator* post_call_generator = NULL);
-
- // Invoke the JavaScript function in the given register. Changes the
- // current context to the context in the function before invoking.
- void InvokeFunction(Register function,
- const ParameterCount& actual,
- InvokeFlag flag,
- PostCallGenerator* post_call_generator = NULL);
-
- void InvokeFunction(JSFunction* function,
- const ParameterCount& actual,
- InvokeFlag flag,
- PostCallGenerator* post_call_generator = NULL);
-
- // Invoke specified builtin JavaScript function. Adds an entry to
- // the unresolved list if the name does not resolve.
- void InvokeBuiltin(Builtins::JavaScript id,
- InvokeFlag flag,
- PostCallGenerator* post_call_generator = NULL);
-
- // Store the function for the given builtin in the target register.
- void GetBuiltinFunction(Register target, Builtins::JavaScript id);
-
- // Store the code object for the given builtin in the target register.
- void GetBuiltinEntry(Register target, Builtins::JavaScript id);
-
- // Expression support
- void Set(Register dst, const Immediate& x);
- void Set(const Operand& dst, const Immediate& x);
-
- // Compare object type for heap object.
- // Incoming register is heap_object and outgoing register is map.
- void CmpObjectType(Register heap_object, InstanceType type, Register map);
-
- // Compare instance type for map.
- void CmpInstanceType(Register map, InstanceType type);
-
- // Check if the map of an object is equal to a specified map and
- // branch to label if not. Skip the smi check if not required
- // (object is known to be a heap object)
- void CheckMap(Register obj,
- Handle<Map> map,
- Label* fail,
- bool is_heap_object);
-
- // Check if the object in register heap_object is a string. Afterwards the
- // register map contains the object map and the register instance_type
- // contains the instance_type. The registers map and instance_type can be the
- // same in which case it contains the instance type afterwards. Either of the
- // registers map and instance_type can be the same as heap_object.
- Condition IsObjectStringType(Register heap_object,
- Register map,
- Register instance_type);
-
- // Check if a heap object's type is in the JSObject range, not including
- // JSFunction. The object's map will be loaded in the map register.
- // Any or all of the three registers may be the same.
- // The contents of the scratch register will always be overwritten.
- void IsObjectJSObjectType(Register heap_object,
- Register map,
- Register scratch,
- Label* fail);
-
- // The contents of the scratch register will be overwritten.
- void IsInstanceJSObjectType(Register map, Register scratch, Label* fail);
-
- // FCmp is similar to integer cmp, but requires unsigned
- // jcc instructions (je, ja, jae, jb, jbe, je, and jz).
- void FCmp();
-
- // Smi tagging support.
- void SmiTag(Register reg) {
- ASSERT(kSmiTag == 0);
- ASSERT(kSmiTagSize == 1);
- add(reg, Operand(reg));
- }
- void SmiUntag(Register reg) {
- sar(reg, kSmiTagSize);
- }
-
- // Modifies the register even if it does not contain a Smi!
- void SmiUntag(Register reg, TypeInfo info, Label* non_smi) {
- ASSERT(kSmiTagSize == 1);
- sar(reg, kSmiTagSize);
- if (info.IsSmi()) {
- ASSERT(kSmiTag == 0);
- j(carry, non_smi);
- }
- }
-
- // Modifies the register even if it does not contain a Smi!
- void SmiUntag(Register reg, Label* is_smi) {
- ASSERT(kSmiTagSize == 1);
- sar(reg, kSmiTagSize);
- ASSERT(kSmiTag == 0);
- j(not_carry, is_smi);
- }
-
- // Jump the register contains a smi.
- inline void JumpIfSmi(Register value, Label* smi_label) {
- test(value, Immediate(kSmiTagMask));
- j(zero, smi_label, not_taken);
- }
- // Jump if register contain a non-smi.
- inline void JumpIfNotSmi(Register value, Label* not_smi_label) {
- test(value, Immediate(kSmiTagMask));
- j(not_zero, not_smi_label, not_taken);
- }
-
- // Assumes input is a heap object.
- void JumpIfNotNumber(Register reg, TypeInfo info, Label* on_not_number);
-
- // Assumes input is a heap number. Jumps on things out of range. Also jumps
- // on the min negative int32. Ignores frational parts.
- void ConvertToInt32(Register dst,
- Register src, // Can be the same as dst.
- Register scratch, // Can be no_reg or dst, but not src.
- TypeInfo info,
- Label* on_not_int32);
-
- void LoadPowerOf2(XMMRegister dst, Register scratch, int power);
-
- // Abort execution if argument is not a number. Used in debug code.
- void AbortIfNotNumber(Register object);
-
- // Abort execution if argument is not a smi. Used in debug code.
- void AbortIfNotSmi(Register object);
-
- // Abort execution if argument is a smi. Used in debug code.
- void AbortIfSmi(Register object);
-
- // Abort execution if argument is a string. Used in debug code.
- void AbortIfNotString(Register object);
-
- // ---------------------------------------------------------------------------
- // Exception handling
-
- // Push a new try handler and link into try handler chain. The return
- // address must be pushed before calling this helper.
- void PushTryHandler(CodeLocation try_location, HandlerType type);
-
- // Unlink the stack handler on top of the stack from the try handler chain.
- void PopTryHandler();
-
- // Activate the top handler in the try hander chain.
- void Throw(Register value);
-
- void ThrowUncatchable(UncatchableExceptionType type, Register value);
-
- // ---------------------------------------------------------------------------
- // Inline caching support
-
- // Generate code for checking access rights - used for security checks
- // on access to global objects across environments. The holder register
- // is left untouched, but the scratch register is clobbered.
- void CheckAccessGlobalProxy(Register holder_reg,
- Register scratch,
- Label* miss);
-
-
- // ---------------------------------------------------------------------------
- // Allocation support
-
- // Allocate an object in new space. If the new space is exhausted control
- // continues at the gc_required label. The allocated object is returned in
- // result and end of the new object is returned in result_end. The register
- // scratch can be passed as no_reg in which case an additional object
- // reference will be added to the reloc info. The returned pointers in result
- // and result_end have not yet been tagged as heap objects. If
- // result_contains_top_on_entry is true the content of result is known to be
- // the allocation top on entry (could be result_end from a previous call to
- // AllocateInNewSpace). If result_contains_top_on_entry is true scratch
- // should be no_reg as it is never used.
- void AllocateInNewSpace(int object_size,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags);
-
- void AllocateInNewSpace(int header_size,
- ScaleFactor element_size,
- Register element_count,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags);
-
- void AllocateInNewSpace(Register object_size,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags);
-
- // Undo allocation in new space. The object passed and objects allocated after
- // it will no longer be allocated. Make sure that no pointers are left to the
- // object(s) no longer allocated as they would be invalid when allocation is
- // un-done.
- void UndoAllocationInNewSpace(Register object);
-
- // Allocate a heap number in new space with undefined value. The
- // register scratch2 can be passed as no_reg; the others must be
- // valid registers. Returns tagged pointer in result register, or
- // jumps to gc_required if new space is full.
- void AllocateHeapNumber(Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
-
- // Allocate a sequential string. All the header fields of the string object
- // are initialized.
- void AllocateTwoByteString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required);
- void AllocateAsciiString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required);
- void AllocateAsciiString(Register result,
- int length,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
-
- // Allocate a raw cons string object. Only the map field of the result is
- // initialized.
- void AllocateConsString(Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
- void AllocateAsciiConsString(Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
-
- // Copy memory, byte-by-byte, from source to destination. Not optimized for
- // long or aligned copies.
- // The contents of index and scratch are destroyed.
- void CopyBytes(Register source,
- Register destination,
- Register length,
- Register scratch);
-
- // ---------------------------------------------------------------------------
- // Support functions.
-
- // Check if result is zero and op is negative.
- void NegativeZeroTest(Register result, Register op, Label* then_label);
-
- // Check if result is zero and op is negative in code using jump targets.
- void NegativeZeroTest(CodeGenerator* cgen,
- Register result,
- Register op,
- JumpTarget* then_target);
-
- // Check if result is zero and any of op1 and op2 are negative.
- // Register scratch is destroyed, and it must be different from op2.
- void NegativeZeroTest(Register result, Register op1, Register op2,
- Register scratch, Label* then_label);
-
- // Try to get function prototype of a function and puts the value in
- // the result register. Checks that the function really is a
- // function and jumps to the miss label if the fast checks fail. The
- // function register will be untouched; the other registers may be
- // clobbered.
- void TryGetFunctionPrototype(Register function,
- Register result,
- Register scratch,
- Label* miss);
-
- // Generates code for reporting that an illegal operation has
- // occurred.
- void IllegalOperation(int num_arguments);
-
- // Picks out an array index from the hash field.
- // Register use:
- // hash - holds the index's hash. Clobbered.
- // index - holds the overwritten index on exit.
- void IndexFromHash(Register hash, Register index);
-
- // ---------------------------------------------------------------------------
- // Runtime calls
-
- // Call a code stub. Generate the code if necessary.
- void CallStub(CodeStub* stub);
-
- // Call a code stub and return the code object called. Try to generate
- // the code if necessary. Do not perform a GC but instead return a retry
- // after GC failure.
- MUST_USE_RESULT MaybeObject* TryCallStub(CodeStub* stub);
-
- // Tail call a code stub (jump). Generate the code if necessary.
- void TailCallStub(CodeStub* stub);
-
- // Tail call a code stub (jump) and return the code object called. Try to
- // generate the code if necessary. Do not perform a GC but instead return
- // a retry after GC failure.
- MUST_USE_RESULT MaybeObject* TryTailCallStub(CodeStub* stub);
-
- // Return from a code stub after popping its arguments.
- void StubReturn(int argc);
-
- // Call a runtime routine.
- void CallRuntime(const Runtime::Function* f, int num_arguments);
- void CallRuntimeSaveDoubles(Runtime::FunctionId id);
-
- // Call a runtime function, returning the CodeStub object called.
- // Try to generate the stub code if necessary. Do not perform a GC
- // but instead return a retry after GC failure.
- MUST_USE_RESULT MaybeObject* TryCallRuntime(const Runtime::Function* f,
- int num_arguments);
-
- // Convenience function: Same as above, but takes the fid instead.
- void CallRuntime(Runtime::FunctionId id, int num_arguments);
-
- // Convenience function: Same as above, but takes the fid instead.
- MUST_USE_RESULT MaybeObject* TryCallRuntime(Runtime::FunctionId id,
- int num_arguments);
-
- // Convenience function: call an external reference.
- void CallExternalReference(ExternalReference ref, int num_arguments);
-
- // Tail call of a runtime routine (jump).
- // Like JumpToExternalReference, but also takes care of passing the number
- // of parameters.
- void TailCallExternalReference(const ExternalReference& ext,
- int num_arguments,
- int result_size);
-
- // Tail call of a runtime routine (jump). Try to generate the code if
- // necessary. Do not perform a GC but instead return a retry after GC failure.
- MUST_USE_RESULT MaybeObject* TryTailCallExternalReference(
- const ExternalReference& ext, int num_arguments, int result_size);
-
- // Convenience function: tail call a runtime routine (jump).
- void TailCallRuntime(Runtime::FunctionId fid,
- int num_arguments,
- int result_size);
-
- // Convenience function: tail call a runtime routine (jump). Try to generate
- // the code if necessary. Do not perform a GC but instead return a retry after
- // GC failure.
- MUST_USE_RESULT MaybeObject* TryTailCallRuntime(Runtime::FunctionId fid,
- int num_arguments,
- int result_size);
-
- // Before calling a C-function from generated code, align arguments on stack.
- // After aligning the frame, arguments must be stored in esp[0], esp[4],
- // etc., not pushed. The argument count assumes all arguments are word sized.
- // Some compilers/platforms require the stack to be aligned when calling
- // C++ code.
- // Needs a scratch register to do some arithmetic. This register will be
- // trashed.
- void PrepareCallCFunction(int num_arguments, Register scratch);
-
- // Calls a C function and cleans up the space for arguments allocated
- // by PrepareCallCFunction. The called function is not allowed to trigger a
- // garbage collection, since that might move the code and invalidate the
- // return address (unless this is somehow accounted for by the called
- // function).
- void CallCFunction(ExternalReference function, int num_arguments);
- void CallCFunction(Register function, int num_arguments);
-
- // Prepares stack to put arguments (aligns and so on). Reserves
- // space for return value if needed (assumes the return value is a handle).
- // Uses callee-saved esi to restore stack state after call. Arguments must be
- // stored in ApiParameterOperand(0), ApiParameterOperand(1) etc. Saves
- // context (esi).
- void PrepareCallApiFunction(int argc, Register scratch);
-
- // Calls an API function. Allocates HandleScope, extracts
- // returned value from handle and propagates exceptions.
- // Clobbers ebx, edi and caller-save registers. Restores context.
- // On return removes stack_space * kPointerSize (GCed).
- MaybeObject* TryCallApiFunctionAndReturn(ApiFunction* function,
- int stack_space);
-
- // Jump to a runtime routine.
- void JumpToExternalReference(const ExternalReference& ext);
-
- MaybeObject* TryJumpToExternalReference(const ExternalReference& ext);
-
-
- // ---------------------------------------------------------------------------
- // Utilities
-
- void Ret();
-
- // Return and drop arguments from stack, where the number of arguments
- // may be bigger than 2^16 - 1. Requires a scratch register.
- void Ret(int bytes_dropped, Register scratch);
-
- // Emit code to discard a non-negative number of pointer-sized elements
- // from the stack, clobbering only the esp register.
- void Drop(int element_count);
-
- void Call(Label* target) { call(target); }
-
- // Emit call to the code we are currently generating.
- void CallSelf() {
- Handle<Code> self(reinterpret_cast<Code**>(CodeObject().location()));
- call(self, RelocInfo::CODE_TARGET);
- }
-
- // Move if the registers are not identical.
- void Move(Register target, Register source);
-
- void Move(Register target, Handle<Object> value);
-
- Handle<Object> CodeObject() {
- ASSERT(!code_object_.is_null());
- return code_object_;
- }
-
-
- // ---------------------------------------------------------------------------
- // StatsCounter support
-
- void SetCounter(StatsCounter* counter, int value);
- void IncrementCounter(StatsCounter* counter, int value);
- void DecrementCounter(StatsCounter* counter, int value);
- void IncrementCounter(Condition cc, StatsCounter* counter, int value);
- void DecrementCounter(Condition cc, StatsCounter* counter, int value);
-
-
- // ---------------------------------------------------------------------------
- // Debugging
-
- // Calls Abort(msg) if the condition cc is not satisfied.
- // Use --debug_code to enable.
- void Assert(Condition cc, const char* msg);
-
- void AssertFastElements(Register elements);
-
- // Like Assert(), but always enabled.
- void Check(Condition cc, const char* msg);
-
- // Print a message to stdout and abort execution.
- void Abort(const char* msg);
-
- // Check that the stack is aligned.
- void CheckStackAlignment();
-
- // Verify restrictions about code generated in stubs.
- void set_generating_stub(bool value) { generating_stub_ = value; }
- bool generating_stub() { return generating_stub_; }
- void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
- bool allow_stub_calls() { return allow_stub_calls_; }
-
- // ---------------------------------------------------------------------------
- // String utilities.
-
- // Check whether the instance type represents a flat ascii string. Jump to the
- // label if not. If the instance type can be scratched specify same register
- // for both instance type and scratch.
- void JumpIfInstanceTypeIsNotSequentialAscii(Register instance_type,
- Register scratch,
- Label* on_not_flat_ascii_string);
-
- // Checks if both objects are sequential ASCII strings, and jumps to label
- // if either is not.
- void JumpIfNotBothSequentialAsciiStrings(Register object1,
- Register object2,
- Register scratch1,
- Register scratch2,
- Label* on_not_flat_ascii_strings);
-
- private:
- bool generating_stub_;
- bool allow_stub_calls_;
- // This handle will be patched with the code object on installation.
- Handle<Object> code_object_;
-
- // Helper functions for generating invokes.
- void InvokePrologue(const ParameterCount& expected,
- const ParameterCount& actual,
- Handle<Code> code_constant,
- const Operand& code_operand,
- NearLabel* done,
- InvokeFlag flag,
- PostCallGenerator* post_call_generator = NULL);
-
- // Activation support.
- void EnterFrame(StackFrame::Type type);
- void LeaveFrame(StackFrame::Type type);
-
- void EnterExitFramePrologue();
- void EnterExitFrameEpilogue(int argc, bool save_doubles);
-
- void LeaveExitFrameEpilogue();
-
- // Allocation support helpers.
- void LoadAllocationTopHelper(Register result,
- Register scratch,
- AllocationFlags flags);
- void UpdateAllocationTopHelper(Register result_end, Register scratch);
-
- // Helper for PopHandleScope. Allowed to perform a GC and returns
- // NULL if gc_allowed. Does not perform a GC if !gc_allowed, and
- // possibly returns a failure object indicating an allocation failure.
- MUST_USE_RESULT MaybeObject* PopHandleScopeHelper(Register saved,
- Register scratch,
- bool gc_allowed);
-
-
- // Compute memory operands for safepoint stack slots.
- Operand SafepointRegisterSlot(Register reg);
- static int SafepointRegisterStackIndex(int reg_code);
-
- // Needs access to SafepointRegisterStackIndex for optimized frame
- // traversal.
- friend class OptimizedFrame;
-};
-
-
-template <typename LabelType>
-void MacroAssembler::InNewSpace(Register object,
- Register scratch,
- Condition cc,
- LabelType* branch) {
- ASSERT(cc == equal || cc == not_equal);
- if (Serializer::enabled()) {
- // Can't do arithmetic on external references if it might get serialized.
- mov(scratch, Operand(object));
- // The mask isn't really an address. We load it as an external reference in
- // case the size of the new space is different between the snapshot maker
- // and the running system.
- and_(Operand(scratch),
- Immediate(ExternalReference::new_space_mask(isolate())));
- cmp(Operand(scratch),
- Immediate(ExternalReference::new_space_start(isolate())));
- j(cc, branch);
- } else {
- int32_t new_space_start = reinterpret_cast<int32_t>(
- ExternalReference::new_space_start(isolate()).address());
- lea(scratch, Operand(object, -new_space_start));
- and_(scratch, isolate()->heap()->NewSpaceMask());
- j(cc, branch);
- }
-}
-
-
-// The code patcher is used to patch (typically) small parts of code e.g. for
-// debugging and other types of instrumentation. When using the code patcher
-// the exact number of bytes specified must be emitted. Is not legal to emit
-// relocation information. If any of these constraints are violated it causes
-// an assertion.
-class CodePatcher {
- public:
- CodePatcher(byte* address, int size);
- virtual ~CodePatcher();
-
- // Macro assembler to emit code.
- MacroAssembler* masm() { return &masm_; }
-
- private:
- byte* address_; // The address of the code being patched.
- int size_; // Number of bytes of the expected patch size.
- MacroAssembler masm_; // Macro assembler used to generate the code.
-};
-
-
-// Helper class for generating code or data associated with the code
-// right after a call instruction. As an example this can be used to
-// generate safepoint data after calls for crankshaft.
-class PostCallGenerator {
- public:
- PostCallGenerator() { }
- virtual ~PostCallGenerator() { }
- virtual void Generate() = 0;
-};
-
-
-// -----------------------------------------------------------------------------
-// Static helper functions.
-
-// Generate an Operand for loading a field from an object.
-static inline Operand FieldOperand(Register object, int offset) {
- return Operand(object, offset - kHeapObjectTag);
-}
-
-
-// Generate an Operand for loading an indexed field from an object.
-static inline Operand FieldOperand(Register object,
- Register index,
- ScaleFactor scale,
- int offset) {
- return Operand(object, index, scale, offset - kHeapObjectTag);
-}
-
-
-static inline Operand ContextOperand(Register context, int index) {
- return Operand(context, Context::SlotOffset(index));
-}
-
-
-static inline Operand GlobalObjectOperand() {
- return ContextOperand(esi, Context::GLOBAL_INDEX);
-}
-
-
-// Generates an Operand for saving parameters after PrepareCallApiFunction.
-Operand ApiParameterOperand(int index);
-
-
-#ifdef GENERATED_CODE_COVERAGE
-extern void LogGeneratedCodeCoverage(const char* file_line);
-#define CODE_COVERAGE_STRINGIFY(x) #x
-#define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
-#define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
-#define ACCESS_MASM(masm) { \
- byte* ia32_coverage_function = \
- reinterpret_cast<byte*>(FUNCTION_ADDR(LogGeneratedCodeCoverage)); \
- masm->pushfd(); \
- masm->pushad(); \
- masm->push(Immediate(reinterpret_cast<int>(&__FILE_LINE__))); \
- masm->call(ia32_coverage_function, RelocInfo::RUNTIME_ENTRY); \
- masm->pop(eax); \
- masm->popad(); \
- masm->popfd(); \
- } \
- masm->
-#else
-#define ACCESS_MASM(masm) masm->
-#endif
-
-
-} } // namespace v8::internal
-
-#endif // V8_IA32_MACRO_ASSEMBLER_IA32_H_
diff --git a/src/3rdparty/v8/src/ia32/regexp-macro-assembler-ia32.cc b/src/3rdparty/v8/src/ia32/regexp-macro-assembler-ia32.cc
deleted file mode 100644
index 067f8c8..0000000
--- a/src/3rdparty/v8/src/ia32/regexp-macro-assembler-ia32.cc
+++ /dev/null
@@ -1,1264 +0,0 @@
-// Copyright 2008-2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_IA32)
-
-#include "unicode.h"
-#include "log.h"
-#include "regexp-stack.h"
-#include "macro-assembler.h"
-#include "regexp-macro-assembler.h"
-#include "ia32/regexp-macro-assembler-ia32.h"
-
-namespace v8 {
-namespace internal {
-
-#ifndef V8_INTERPRETED_REGEXP
-/*
- * This assembler uses the following register assignment convention
- * - edx : current character. Must be loaded using LoadCurrentCharacter
- * before using any of the dispatch methods.
- * - edi : current position in input, as negative offset from end of string.
- * Please notice that this is the byte offset, not the character offset!
- * - esi : end of input (points to byte after last character in input).
- * - ebp : frame pointer. Used to access arguments, local variables and
- * RegExp registers.
- * - esp : points to tip of C stack.
- * - ecx : points to tip of backtrack stack
- *
- * The registers eax and ebx are free to use for computations.
- *
- * Each call to a public method should retain this convention.
- * The stack will have the following structure:
- * - Isolate* isolate (Address of the current isolate)
- * - direct_call (if 1, direct call from JavaScript code, if 0
- * call through the runtime system)
- * - stack_area_base (High end of the memory area to use as
- * backtracking stack)
- * - int* capture_array (int[num_saved_registers_], for output).
- * - end of input (Address of end of string)
- * - start of input (Address of first character in string)
- * - start index (character index of start)
- * - String* input_string (location of a handle containing the string)
- * --- frame alignment (if applicable) ---
- * - return address
- * ebp-> - old ebp
- * - backup of caller esi
- * - backup of caller edi
- * - backup of caller ebx
- * - Offset of location before start of input (effectively character
- * position -1). Used to initialize capture registers to a non-position.
- * - register 0 ebp[-4] (Only positions must be stored in the first
- * - register 1 ebp[-8] num_saved_registers_ registers)
- * - ...
- *
- * The first num_saved_registers_ registers are initialized to point to
- * "character -1" in the string (i.e., char_size() bytes before the first
- * character of the string). The remaining registers starts out as garbage.
- *
- * The data up to the return address must be placed there by the calling
- * code, by calling the code entry as cast to a function with the signature:
- * int (*match)(String* input_string,
- * int start_index,
- * Address start,
- * Address end,
- * int* capture_output_array,
- * bool at_start,
- * byte* stack_area_base,
- * bool direct_call)
- */
-
-#define __ ACCESS_MASM(masm_)
-
-RegExpMacroAssemblerIA32::RegExpMacroAssemblerIA32(
- Mode mode,
- int registers_to_save)
- : masm_(new MacroAssembler(Isolate::Current(), NULL, kRegExpCodeSize)),
- mode_(mode),
- num_registers_(registers_to_save),
- num_saved_registers_(registers_to_save),
- entry_label_(),
- start_label_(),
- success_label_(),
- backtrack_label_(),
- exit_label_() {
- ASSERT_EQ(0, registers_to_save % 2);
- __ jmp(&entry_label_); // We'll write the entry code later.
- __ bind(&start_label_); // And then continue from here.
-}
-
-
-RegExpMacroAssemblerIA32::~RegExpMacroAssemblerIA32() {
- delete masm_;
- // Unuse labels in case we throw away the assembler without calling GetCode.
- entry_label_.Unuse();
- start_label_.Unuse();
- success_label_.Unuse();
- backtrack_label_.Unuse();
- exit_label_.Unuse();
- check_preempt_label_.Unuse();
- stack_overflow_label_.Unuse();
-}
-
-
-int RegExpMacroAssemblerIA32::stack_limit_slack() {
- return RegExpStack::kStackLimitSlack;
-}
-
-
-void RegExpMacroAssemblerIA32::AdvanceCurrentPosition(int by) {
- if (by != 0) {
- __ add(Operand(edi), Immediate(by * char_size()));
- }
-}
-
-
-void RegExpMacroAssemblerIA32::AdvanceRegister(int reg, int by) {
- ASSERT(reg >= 0);
- ASSERT(reg < num_registers_);
- if (by != 0) {
- __ add(register_location(reg), Immediate(by));
- }
-}
-
-
-void RegExpMacroAssemblerIA32::Backtrack() {
- CheckPreemption();
- // Pop Code* offset from backtrack stack, add Code* and jump to location.
- Pop(ebx);
- __ add(Operand(ebx), Immediate(masm_->CodeObject()));
- __ jmp(Operand(ebx));
-}
-
-
-void RegExpMacroAssemblerIA32::Bind(Label* label) {
- __ bind(label);
-}
-
-
-void RegExpMacroAssemblerIA32::CheckCharacter(uint32_t c, Label* on_equal) {
- __ cmp(current_character(), c);
- BranchOrBacktrack(equal, on_equal);
-}
-
-
-void RegExpMacroAssemblerIA32::CheckCharacterGT(uc16 limit, Label* on_greater) {
- __ cmp(current_character(), limit);
- BranchOrBacktrack(greater, on_greater);
-}
-
-
-void RegExpMacroAssemblerIA32::CheckAtStart(Label* on_at_start) {
- Label not_at_start;
- // Did we start the match at the start of the string at all?
- __ cmp(Operand(ebp, kStartIndex), Immediate(0));
- BranchOrBacktrack(not_equal, &not_at_start);
- // If we did, are we still at the start of the input?
- __ lea(eax, Operand(esi, edi, times_1, 0));
- __ cmp(eax, Operand(ebp, kInputStart));
- BranchOrBacktrack(equal, on_at_start);
- __ bind(&not_at_start);
-}
-
-
-void RegExpMacroAssemblerIA32::CheckNotAtStart(Label* on_not_at_start) {
- // Did we start the match at the start of the string at all?
- __ cmp(Operand(ebp, kStartIndex), Immediate(0));
- BranchOrBacktrack(not_equal, on_not_at_start);
- // If we did, are we still at the start of the input?
- __ lea(eax, Operand(esi, edi, times_1, 0));
- __ cmp(eax, Operand(ebp, kInputStart));
- BranchOrBacktrack(not_equal, on_not_at_start);
-}
-
-
-void RegExpMacroAssemblerIA32::CheckCharacterLT(uc16 limit, Label* on_less) {
- __ cmp(current_character(), limit);
- BranchOrBacktrack(less, on_less);
-}
-
-
-void RegExpMacroAssemblerIA32::CheckCharacters(Vector<const uc16> str,
- int cp_offset,
- Label* on_failure,
- bool check_end_of_string) {
-#ifdef DEBUG
- // If input is ASCII, don't even bother calling here if the string to
- // match contains a non-ascii character.
- if (mode_ == ASCII) {
- ASSERT(String::IsAscii(str.start(), str.length()));
- }
-#endif
- int byte_length = str.length() * char_size();
- int byte_offset = cp_offset * char_size();
- if (check_end_of_string) {
- // Check that there are at least str.length() characters left in the input.
- __ cmp(Operand(edi), Immediate(-(byte_offset + byte_length)));
- BranchOrBacktrack(greater, on_failure);
- }
-
- if (on_failure == NULL) {
- // Instead of inlining a backtrack, (re)use the global backtrack target.
- on_failure = &backtrack_label_;
- }
-
- // Do one character test first to minimize loading for the case that
- // we don't match at all (loading more than one character introduces that
- // chance of reading unaligned and reading across cache boundaries).
- // If the first character matches, expect a larger chance of matching the
- // string, and start loading more characters at a time.
- if (mode_ == ASCII) {
- __ cmpb(Operand(esi, edi, times_1, byte_offset),
- static_cast<int8_t>(str[0]));
- } else {
- // Don't use 16-bit immediate. The size changing prefix throws off
- // pre-decoding.
- __ movzx_w(eax,
- Operand(esi, edi, times_1, byte_offset));
- __ cmp(eax, static_cast<int32_t>(str[0]));
- }
- BranchOrBacktrack(not_equal, on_failure);
-
- __ lea(ebx, Operand(esi, edi, times_1, 0));
- for (int i = 1, n = str.length(); i < n;) {
- if (mode_ == ASCII) {
- if (i <= n - 4) {
- int combined_chars =
- (static_cast<uint32_t>(str[i + 0]) << 0) |
- (static_cast<uint32_t>(str[i + 1]) << 8) |
- (static_cast<uint32_t>(str[i + 2]) << 16) |
- (static_cast<uint32_t>(str[i + 3]) << 24);
- __ cmp(Operand(ebx, byte_offset + i), Immediate(combined_chars));
- i += 4;
- } else {
- __ cmpb(Operand(ebx, byte_offset + i),
- static_cast<int8_t>(str[i]));
- i += 1;
- }
- } else {
- ASSERT(mode_ == UC16);
- if (i <= n - 2) {
- __ cmp(Operand(ebx, byte_offset + i * sizeof(uc16)),
- Immediate(*reinterpret_cast<const int*>(&str[i])));
- i += 2;
- } else {
- // Avoid a 16-bit immediate operation. It uses the length-changing
- // 0x66 prefix which causes pre-decoder misprediction and pipeline
- // stalls. See
- // "Intel(R) 64 and IA-32 Architectures Optimization Reference Manual"
- // (248966.pdf) section 3.4.2.3 "Length-Changing Prefixes (LCP)"
- __ movzx_w(eax,
- Operand(ebx, byte_offset + i * sizeof(uc16)));
- __ cmp(eax, static_cast<int32_t>(str[i]));
- i += 1;
- }
- }
- BranchOrBacktrack(not_equal, on_failure);
- }
-}
-
-
-void RegExpMacroAssemblerIA32::CheckGreedyLoop(Label* on_equal) {
- Label fallthrough;
- __ cmp(edi, Operand(backtrack_stackpointer(), 0));
- __ j(not_equal, &fallthrough);
- __ add(Operand(backtrack_stackpointer()), Immediate(kPointerSize)); // Pop.
- BranchOrBacktrack(no_condition, on_equal);
- __ bind(&fallthrough);
-}
-
-
-void RegExpMacroAssemblerIA32::CheckNotBackReferenceIgnoreCase(
- int start_reg,
- Label* on_no_match) {
- Label fallthrough;
- __ mov(edx, register_location(start_reg)); // Index of start of capture
- __ mov(ebx, register_location(start_reg + 1)); // Index of end of capture
- __ sub(ebx, Operand(edx)); // Length of capture.
-
- // The length of a capture should not be negative. This can only happen
- // if the end of the capture is unrecorded, or at a point earlier than
- // the start of the capture.
- BranchOrBacktrack(less, on_no_match, not_taken);
-
- // If length is zero, either the capture is empty or it is completely
- // uncaptured. In either case succeed immediately.
- __ j(equal, &fallthrough);
-
- if (mode_ == ASCII) {
- Label success;
- Label fail;
- Label loop_increment;
- // Save register contents to make the registers available below.
- __ push(edi);
- __ push(backtrack_stackpointer());
- // After this, the eax, ecx, and edi registers are available.
-
- __ add(edx, Operand(esi)); // Start of capture
- __ add(edi, Operand(esi)); // Start of text to match against capture.
- __ add(ebx, Operand(edi)); // End of text to match against capture.
-
- Label loop;
- __ bind(&loop);
- __ movzx_b(eax, Operand(edi, 0));
- __ cmpb_al(Operand(edx, 0));
- __ j(equal, &loop_increment);
-
- // Mismatch, try case-insensitive match (converting letters to lower-case).
- __ or_(eax, 0x20); // Convert match character to lower-case.
- __ lea(ecx, Operand(eax, -'a'));
- __ cmp(ecx, static_cast<int32_t>('z' - 'a')); // Is eax a lowercase letter?
- __ j(above, &fail);
- // Also convert capture character.
- __ movzx_b(ecx, Operand(edx, 0));
- __ or_(ecx, 0x20);
-
- __ cmp(eax, Operand(ecx));
- __ j(not_equal, &fail);
-
- __ bind(&loop_increment);
- // Increment pointers into match and capture strings.
- __ add(Operand(edx), Immediate(1));
- __ add(Operand(edi), Immediate(1));
- // Compare to end of match, and loop if not done.
- __ cmp(edi, Operand(ebx));
- __ j(below, &loop, taken);
- __ jmp(&success);
-
- __ bind(&fail);
- // Restore original values before failing.
- __ pop(backtrack_stackpointer());
- __ pop(edi);
- BranchOrBacktrack(no_condition, on_no_match);
-
- __ bind(&success);
- // Restore original value before continuing.
- __ pop(backtrack_stackpointer());
- // Drop original value of character position.
- __ add(Operand(esp), Immediate(kPointerSize));
- // Compute new value of character position after the matched part.
- __ sub(edi, Operand(esi));
- } else {
- ASSERT(mode_ == UC16);
- // Save registers before calling C function.
- __ push(esi);
- __ push(edi);
- __ push(backtrack_stackpointer());
- __ push(ebx);
-
- static const int argument_count = 4;
- __ PrepareCallCFunction(argument_count, ecx);
- // Put arguments into allocated stack area, last argument highest on stack.
- // Parameters are
- // Address byte_offset1 - Address captured substring's start.
- // Address byte_offset2 - Address of current character position.
- // size_t byte_length - length of capture in bytes(!)
- // Isolate* isolate
-
- // Set isolate.
- __ mov(Operand(esp, 3 * kPointerSize),
- Immediate(ExternalReference::isolate_address()));
- // Set byte_length.
- __ mov(Operand(esp, 2 * kPointerSize), ebx);
- // Set byte_offset2.
- // Found by adding negative string-end offset of current position (edi)
- // to end of string.
- __ add(edi, Operand(esi));
- __ mov(Operand(esp, 1 * kPointerSize), edi);
- // Set byte_offset1.
- // Start of capture, where edx already holds string-end negative offset.
- __ add(edx, Operand(esi));
- __ mov(Operand(esp, 0 * kPointerSize), edx);
-
- ExternalReference compare =
- ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate());
- __ CallCFunction(compare, argument_count);
- // Pop original values before reacting on result value.
- __ pop(ebx);
- __ pop(backtrack_stackpointer());
- __ pop(edi);
- __ pop(esi);
-
- // Check if function returned non-zero for success or zero for failure.
- __ or_(eax, Operand(eax));
- BranchOrBacktrack(zero, on_no_match);
- // On success, increment position by length of capture.
- __ add(edi, Operand(ebx));
- }
- __ bind(&fallthrough);
-}
-
-
-void RegExpMacroAssemblerIA32::CheckNotBackReference(
- int start_reg,
- Label* on_no_match) {
- Label fallthrough;
- Label success;
- Label fail;
-
- // Find length of back-referenced capture.
- __ mov(edx, register_location(start_reg));
- __ mov(eax, register_location(start_reg + 1));
- __ sub(eax, Operand(edx)); // Length to check.
- // Fail on partial or illegal capture (start of capture after end of capture).
- BranchOrBacktrack(less, on_no_match);
- // Succeed on empty capture (including no capture)
- __ j(equal, &fallthrough);
-
- // Check that there are sufficient characters left in the input.
- __ mov(ebx, edi);
- __ add(ebx, Operand(eax));
- BranchOrBacktrack(greater, on_no_match);
-
- // Save register to make it available below.
- __ push(backtrack_stackpointer());
-
- // Compute pointers to match string and capture string
- __ lea(ebx, Operand(esi, edi, times_1, 0)); // Start of match.
- __ add(edx, Operand(esi)); // Start of capture.
- __ lea(ecx, Operand(eax, ebx, times_1, 0)); // End of match
-
- Label loop;
- __ bind(&loop);
- if (mode_ == ASCII) {
- __ movzx_b(eax, Operand(edx, 0));
- __ cmpb_al(Operand(ebx, 0));
- } else {
- ASSERT(mode_ == UC16);
- __ movzx_w(eax, Operand(edx, 0));
- __ cmpw_ax(Operand(ebx, 0));
- }
- __ j(not_equal, &fail);
- // Increment pointers into capture and match string.
- __ add(Operand(edx), Immediate(char_size()));
- __ add(Operand(ebx), Immediate(char_size()));
- // Check if we have reached end of match area.
- __ cmp(ebx, Operand(ecx));
- __ j(below, &loop);
- __ jmp(&success);
-
- __ bind(&fail);
- // Restore backtrack stackpointer.
- __ pop(backtrack_stackpointer());
- BranchOrBacktrack(no_condition, on_no_match);
-
- __ bind(&success);
- // Move current character position to position after match.
- __ mov(edi, ecx);
- __ sub(Operand(edi), esi);
- // Restore backtrack stackpointer.
- __ pop(backtrack_stackpointer());
-
- __ bind(&fallthrough);
-}
-
-
-void RegExpMacroAssemblerIA32::CheckNotRegistersEqual(int reg1,
- int reg2,
- Label* on_not_equal) {
- __ mov(eax, register_location(reg1));
- __ cmp(eax, register_location(reg2));
- BranchOrBacktrack(not_equal, on_not_equal);
-}
-
-
-void RegExpMacroAssemblerIA32::CheckNotCharacter(uint32_t c,
- Label* on_not_equal) {
- __ cmp(current_character(), c);
- BranchOrBacktrack(not_equal, on_not_equal);
-}
-
-
-void RegExpMacroAssemblerIA32::CheckCharacterAfterAnd(uint32_t c,
- uint32_t mask,
- Label* on_equal) {
- __ mov(eax, current_character());
- __ and_(eax, mask);
- __ cmp(eax, c);
- BranchOrBacktrack(equal, on_equal);
-}
-
-
-void RegExpMacroAssemblerIA32::CheckNotCharacterAfterAnd(uint32_t c,
- uint32_t mask,
- Label* on_not_equal) {
- __ mov(eax, current_character());
- __ and_(eax, mask);
- __ cmp(eax, c);
- BranchOrBacktrack(not_equal, on_not_equal);
-}
-
-
-void RegExpMacroAssemblerIA32::CheckNotCharacterAfterMinusAnd(
- uc16 c,
- uc16 minus,
- uc16 mask,
- Label* on_not_equal) {
- ASSERT(minus < String::kMaxUC16CharCode);
- __ lea(eax, Operand(current_character(), -minus));
- __ and_(eax, mask);
- __ cmp(eax, c);
- BranchOrBacktrack(not_equal, on_not_equal);
-}
-
-
-bool RegExpMacroAssemblerIA32::CheckSpecialCharacterClass(uc16 type,
- Label* on_no_match) {
- // Range checks (c in min..max) are generally implemented by an unsigned
- // (c - min) <= (max - min) check
- switch (type) {
- case 's':
- // Match space-characters
- if (mode_ == ASCII) {
- // ASCII space characters are '\t'..'\r' and ' '.
- Label success;
- __ cmp(current_character(), ' ');
- __ j(equal, &success);
- // Check range 0x09..0x0d
- __ lea(eax, Operand(current_character(), -'\t'));
- __ cmp(eax, '\r' - '\t');
- BranchOrBacktrack(above, on_no_match);
- __ bind(&success);
- return true;
- }
- return false;
- case 'S':
- // Match non-space characters.
- if (mode_ == ASCII) {
- // ASCII space characters are '\t'..'\r' and ' '.
- __ cmp(current_character(), ' ');
- BranchOrBacktrack(equal, on_no_match);
- __ lea(eax, Operand(current_character(), -'\t'));
- __ cmp(eax, '\r' - '\t');
- BranchOrBacktrack(below_equal, on_no_match);
- return true;
- }
- return false;
- case 'd':
- // Match ASCII digits ('0'..'9')
- __ lea(eax, Operand(current_character(), -'0'));
- __ cmp(eax, '9' - '0');
- BranchOrBacktrack(above, on_no_match);
- return true;
- case 'D':
- // Match non ASCII-digits
- __ lea(eax, Operand(current_character(), -'0'));
- __ cmp(eax, '9' - '0');
- BranchOrBacktrack(below_equal, on_no_match);
- return true;
- case '.': {
- // Match non-newlines (not 0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
- __ mov(Operand(eax), current_character());
- __ xor_(Operand(eax), Immediate(0x01));
- // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
- __ sub(Operand(eax), Immediate(0x0b));
- __ cmp(eax, 0x0c - 0x0b);
- BranchOrBacktrack(below_equal, on_no_match);
- if (mode_ == UC16) {
- // Compare original value to 0x2028 and 0x2029, using the already
- // computed (current_char ^ 0x01 - 0x0b). I.e., check for
- // 0x201d (0x2028 - 0x0b) or 0x201e.
- __ sub(Operand(eax), Immediate(0x2028 - 0x0b));
- __ cmp(eax, 0x2029 - 0x2028);
- BranchOrBacktrack(below_equal, on_no_match);
- }
- return true;
- }
- case 'w': {
- if (mode_ != ASCII) {
- // Table is 128 entries, so all ASCII characters can be tested.
- __ cmp(Operand(current_character()), Immediate('z'));
- BranchOrBacktrack(above, on_no_match);
- }
- ASSERT_EQ(0, word_character_map[0]); // Character '\0' is not a word char.
- ExternalReference word_map = ExternalReference::re_word_character_map();
- __ test_b(current_character(),
- Operand::StaticArray(current_character(), times_1, word_map));
- BranchOrBacktrack(zero, on_no_match);
- return true;
- }
- case 'W': {
- Label done;
- if (mode_ != ASCII) {
- // Table is 128 entries, so all ASCII characters can be tested.
- __ cmp(Operand(current_character()), Immediate('z'));
- __ j(above, &done);
- }
- ASSERT_EQ(0, word_character_map[0]); // Character '\0' is not a word char.
- ExternalReference word_map = ExternalReference::re_word_character_map();
- __ test_b(current_character(),
- Operand::StaticArray(current_character(), times_1, word_map));
- BranchOrBacktrack(not_zero, on_no_match);
- if (mode_ != ASCII) {
- __ bind(&done);
- }
- return true;
- }
- // Non-standard classes (with no syntactic shorthand) used internally.
- case '*':
- // Match any character.
- return true;
- case 'n': {
- // Match newlines (0x0a('\n'), 0x0d('\r'), 0x2028 or 0x2029).
- // The opposite of '.'.
- __ mov(Operand(eax), current_character());
- __ xor_(Operand(eax), Immediate(0x01));
- // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
- __ sub(Operand(eax), Immediate(0x0b));
- __ cmp(eax, 0x0c - 0x0b);
- if (mode_ == ASCII) {
- BranchOrBacktrack(above, on_no_match);
- } else {
- Label done;
- BranchOrBacktrack(below_equal, &done);
- ASSERT_EQ(UC16, mode_);
- // Compare original value to 0x2028 and 0x2029, using the already
- // computed (current_char ^ 0x01 - 0x0b). I.e., check for
- // 0x201d (0x2028 - 0x0b) or 0x201e.
- __ sub(Operand(eax), Immediate(0x2028 - 0x0b));
- __ cmp(eax, 1);
- BranchOrBacktrack(above, on_no_match);
- __ bind(&done);
- }
- return true;
- }
- // No custom implementation (yet): s(UC16), S(UC16).
- default:
- return false;
- }
-}
-
-
-void RegExpMacroAssemblerIA32::Fail() {
- ASSERT(FAILURE == 0); // Return value for failure is zero.
- __ Set(eax, Immediate(0));
- __ jmp(&exit_label_);
-}
-
-
-Handle<Object> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
- // Finalize code - write the entry point code now we know how many
- // registers we need.
-
- // Entry code:
- __ bind(&entry_label_);
- // Start new stack frame.
- __ push(ebp);
- __ mov(ebp, esp);
- // Save callee-save registers. Order here should correspond to order of
- // kBackup_ebx etc.
- __ push(esi);
- __ push(edi);
- __ push(ebx); // Callee-save on MacOS.
- __ push(Immediate(0)); // Make room for "input start - 1" constant.
-
- // Check if we have space on the stack for registers.
- Label stack_limit_hit;
- Label stack_ok;
-
- ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit(masm_->isolate());
- __ mov(ecx, esp);
- __ sub(ecx, Operand::StaticVariable(stack_limit));
- // Handle it if the stack pointer is already below the stack limit.
- __ j(below_equal, &stack_limit_hit, not_taken);
- // Check if there is room for the variable number of registers above
- // the stack limit.
- __ cmp(ecx, num_registers_ * kPointerSize);
- __ j(above_equal, &stack_ok, taken);
- // Exit with OutOfMemory exception. There is not enough space on the stack
- // for our working registers.
- __ mov(eax, EXCEPTION);
- __ jmp(&exit_label_);
-
- __ bind(&stack_limit_hit);
- CallCheckStackGuardState(ebx);
- __ or_(eax, Operand(eax));
- // If returned value is non-zero, we exit with the returned value as result.
- __ j(not_zero, &exit_label_);
-
- __ bind(&stack_ok);
- // Load start index for later use.
- __ mov(ebx, Operand(ebp, kStartIndex));
-
- // Allocate space on stack for registers.
- __ sub(Operand(esp), Immediate(num_registers_ * kPointerSize));
- // Load string length.
- __ mov(esi, Operand(ebp, kInputEnd));
- // Load input position.
- __ mov(edi, Operand(ebp, kInputStart));
- // Set up edi to be negative offset from string end.
- __ sub(edi, Operand(esi));
-
- // Set eax to address of char before start of the string.
- // (effectively string position -1).
- __ neg(ebx);
- if (mode_ == UC16) {
- __ lea(eax, Operand(edi, ebx, times_2, -char_size()));
- } else {
- __ lea(eax, Operand(edi, ebx, times_1, -char_size()));
- }
- // Store this value in a local variable, for use when clearing
- // position registers.
- __ mov(Operand(ebp, kInputStartMinusOne), eax);
-
- if (num_saved_registers_ > 0) { // Always is, if generated from a regexp.
- // Fill saved registers with initial value = start offset - 1
- // Fill in stack push order, to avoid accessing across an unwritten
- // page (a problem on Windows).
- __ mov(ecx, kRegisterZero);
- Label init_loop;
- __ bind(&init_loop);
- __ mov(Operand(ebp, ecx, times_1, +0), eax);
- __ sub(Operand(ecx), Immediate(kPointerSize));
- __ cmp(ecx, kRegisterZero - num_saved_registers_ * kPointerSize);
- __ j(greater, &init_loop);
- }
- // Ensure that we have written to each stack page, in order. Skipping a page
- // on Windows can cause segmentation faults. Assuming page size is 4k.
- const int kPageSize = 4096;
- const int kRegistersPerPage = kPageSize / kPointerSize;
- for (int i = num_saved_registers_ + kRegistersPerPage - 1;
- i < num_registers_;
- i += kRegistersPerPage) {
- __ mov(register_location(i), eax); // One write every page.
- }
-
-
- // Initialize backtrack stack pointer.
- __ mov(backtrack_stackpointer(), Operand(ebp, kStackHighEnd));
- // Load previous char as initial value of current-character.
- Label at_start;
- __ cmp(Operand(ebp, kStartIndex), Immediate(0));
- __ j(equal, &at_start);
- LoadCurrentCharacterUnchecked(-1, 1); // Load previous char.
- __ jmp(&start_label_);
- __ bind(&at_start);
- __ mov(current_character(), '\n');
- __ jmp(&start_label_);
-
-
- // Exit code:
- if (success_label_.is_linked()) {
- // Save captures when successful.
- __ bind(&success_label_);
- if (num_saved_registers_ > 0) {
- // copy captures to output
- __ mov(ebx, Operand(ebp, kRegisterOutput));
- __ mov(ecx, Operand(ebp, kInputEnd));
- __ mov(edx, Operand(ebp, kStartIndex));
- __ sub(ecx, Operand(ebp, kInputStart));
- if (mode_ == UC16) {
- __ lea(ecx, Operand(ecx, edx, times_2, 0));
- } else {
- __ add(ecx, Operand(edx));
- }
- for (int i = 0; i < num_saved_registers_; i++) {
- __ mov(eax, register_location(i));
- // Convert to index from start of string, not end.
- __ add(eax, Operand(ecx));
- if (mode_ == UC16) {
- __ sar(eax, 1); // Convert byte index to character index.
- }
- __ mov(Operand(ebx, i * kPointerSize), eax);
- }
- }
- __ mov(eax, Immediate(SUCCESS));
- }
- // Exit and return eax
- __ bind(&exit_label_);
- // Skip esp past regexp registers.
- __ lea(esp, Operand(ebp, kBackup_ebx));
- // Restore callee-save registers.
- __ pop(ebx);
- __ pop(edi);
- __ pop(esi);
- // Exit function frame, restore previous one.
- __ pop(ebp);
- __ ret(0);
-
- // Backtrack code (branch target for conditional backtracks).
- if (backtrack_label_.is_linked()) {
- __ bind(&backtrack_label_);
- Backtrack();
- }
-
- Label exit_with_exception;
-
- // Preempt-code
- if (check_preempt_label_.is_linked()) {
- SafeCallTarget(&check_preempt_label_);
-
- __ push(backtrack_stackpointer());
- __ push(edi);
-
- CallCheckStackGuardState(ebx);
- __ or_(eax, Operand(eax));
- // If returning non-zero, we should end execution with the given
- // result as return value.
- __ j(not_zero, &exit_label_);
-
- __ pop(edi);
- __ pop(backtrack_stackpointer());
- // String might have moved: Reload esi from frame.
- __ mov(esi, Operand(ebp, kInputEnd));
- SafeReturn();
- }
-
- // Backtrack stack overflow code.
- if (stack_overflow_label_.is_linked()) {
- SafeCallTarget(&stack_overflow_label_);
- // Reached if the backtrack-stack limit has been hit.
-
- Label grow_failed;
- // Save registers before calling C function
- __ push(esi);
- __ push(edi);
-
- // Call GrowStack(backtrack_stackpointer())
- static const int num_arguments = 3;
- __ PrepareCallCFunction(num_arguments, ebx);
- __ mov(Operand(esp, 2 * kPointerSize),
- Immediate(ExternalReference::isolate_address()));
- __ lea(eax, Operand(ebp, kStackHighEnd));
- __ mov(Operand(esp, 1 * kPointerSize), eax);
- __ mov(Operand(esp, 0 * kPointerSize), backtrack_stackpointer());
- ExternalReference grow_stack =
- ExternalReference::re_grow_stack(masm_->isolate());
- __ CallCFunction(grow_stack, num_arguments);
- // If return NULL, we have failed to grow the stack, and
- // must exit with a stack-overflow exception.
- __ or_(eax, Operand(eax));
- __ j(equal, &exit_with_exception);
- // Otherwise use return value as new stack pointer.
- __ mov(backtrack_stackpointer(), eax);
- // Restore saved registers and continue.
- __ pop(edi);
- __ pop(esi);
- SafeReturn();
- }
-
- if (exit_with_exception.is_linked()) {
- // If any of the code above needed to exit with an exception.
- __ bind(&exit_with_exception);
- // Exit with Result EXCEPTION(-1) to signal thrown exception.
- __ mov(eax, EXCEPTION);
- __ jmp(&exit_label_);
- }
-
- CodeDesc code_desc;
- masm_->GetCode(&code_desc);
- Handle<Code> code =
- masm_->isolate()->factory()->NewCode(code_desc,
- Code::ComputeFlags(Code::REGEXP),
- masm_->CodeObject());
- PROFILE(masm_->isolate(), RegExpCodeCreateEvent(*code, *source));
- return Handle<Object>::cast(code);
-}
-
-
-void RegExpMacroAssemblerIA32::GoTo(Label* to) {
- BranchOrBacktrack(no_condition, to);
-}
-
-
-void RegExpMacroAssemblerIA32::IfRegisterGE(int reg,
- int comparand,
- Label* if_ge) {
- __ cmp(register_location(reg), Immediate(comparand));
- BranchOrBacktrack(greater_equal, if_ge);
-}
-
-
-void RegExpMacroAssemblerIA32::IfRegisterLT(int reg,
- int comparand,
- Label* if_lt) {
- __ cmp(register_location(reg), Immediate(comparand));
- BranchOrBacktrack(less, if_lt);
-}
-
-
-void RegExpMacroAssemblerIA32::IfRegisterEqPos(int reg,
- Label* if_eq) {
- __ cmp(edi, register_location(reg));
- BranchOrBacktrack(equal, if_eq);
-}
-
-
-RegExpMacroAssembler::IrregexpImplementation
- RegExpMacroAssemblerIA32::Implementation() {
- return kIA32Implementation;
-}
-
-
-void RegExpMacroAssemblerIA32::LoadCurrentCharacter(int cp_offset,
- Label* on_end_of_input,
- bool check_bounds,
- int characters) {
- ASSERT(cp_offset >= -1); // ^ and \b can look behind one character.
- ASSERT(cp_offset < (1<<30)); // Be sane! (And ensure negation works)
- if (check_bounds) {
- CheckPosition(cp_offset + characters - 1, on_end_of_input);
- }
- LoadCurrentCharacterUnchecked(cp_offset, characters);
-}
-
-
-void RegExpMacroAssemblerIA32::PopCurrentPosition() {
- Pop(edi);
-}
-
-
-void RegExpMacroAssemblerIA32::PopRegister(int register_index) {
- Pop(eax);
- __ mov(register_location(register_index), eax);
-}
-
-
-void RegExpMacroAssemblerIA32::PushBacktrack(Label* label) {
- Push(Immediate::CodeRelativeOffset(label));
- CheckStackLimit();
-}
-
-
-void RegExpMacroAssemblerIA32::PushCurrentPosition() {
- Push(edi);
-}
-
-
-void RegExpMacroAssemblerIA32::PushRegister(int register_index,
- StackCheckFlag check_stack_limit) {
- __ mov(eax, register_location(register_index));
- Push(eax);
- if (check_stack_limit) CheckStackLimit();
-}
-
-
-void RegExpMacroAssemblerIA32::ReadCurrentPositionFromRegister(int reg) {
- __ mov(edi, register_location(reg));
-}
-
-
-void RegExpMacroAssemblerIA32::ReadStackPointerFromRegister(int reg) {
- __ mov(backtrack_stackpointer(), register_location(reg));
- __ add(backtrack_stackpointer(), Operand(ebp, kStackHighEnd));
-}
-
-void RegExpMacroAssemblerIA32::SetCurrentPositionFromEnd(int by) {
- NearLabel after_position;
- __ cmp(edi, -by * char_size());
- __ j(greater_equal, &after_position);
- __ mov(edi, -by * char_size());
- // On RegExp code entry (where this operation is used), the character before
- // the current position is expected to be already loaded.
- // We have advanced the position, so it's safe to read backwards.
- LoadCurrentCharacterUnchecked(-1, 1);
- __ bind(&after_position);
-}
-
-void RegExpMacroAssemblerIA32::SetRegister(int register_index, int to) {
- ASSERT(register_index >= num_saved_registers_); // Reserved for positions!
- __ mov(register_location(register_index), Immediate(to));
-}
-
-
-void RegExpMacroAssemblerIA32::Succeed() {
- __ jmp(&success_label_);
-}
-
-
-void RegExpMacroAssemblerIA32::WriteCurrentPositionToRegister(int reg,
- int cp_offset) {
- if (cp_offset == 0) {
- __ mov(register_location(reg), edi);
- } else {
- __ lea(eax, Operand(edi, cp_offset * char_size()));
- __ mov(register_location(reg), eax);
- }
-}
-
-
-void RegExpMacroAssemblerIA32::ClearRegisters(int reg_from, int reg_to) {
- ASSERT(reg_from <= reg_to);
- __ mov(eax, Operand(ebp, kInputStartMinusOne));
- for (int reg = reg_from; reg <= reg_to; reg++) {
- __ mov(register_location(reg), eax);
- }
-}
-
-
-void RegExpMacroAssemblerIA32::WriteStackPointerToRegister(int reg) {
- __ mov(eax, backtrack_stackpointer());
- __ sub(eax, Operand(ebp, kStackHighEnd));
- __ mov(register_location(reg), eax);
-}
-
-
-// Private methods:
-
-void RegExpMacroAssemblerIA32::CallCheckStackGuardState(Register scratch) {
- static const int num_arguments = 3;
- __ PrepareCallCFunction(num_arguments, scratch);
- // RegExp code frame pointer.
- __ mov(Operand(esp, 2 * kPointerSize), ebp);
- // Code* of self.
- __ mov(Operand(esp, 1 * kPointerSize), Immediate(masm_->CodeObject()));
- // Next address on the stack (will be address of return address).
- __ lea(eax, Operand(esp, -kPointerSize));
- __ mov(Operand(esp, 0 * kPointerSize), eax);
- ExternalReference check_stack_guard =
- ExternalReference::re_check_stack_guard_state(masm_->isolate());
- __ CallCFunction(check_stack_guard, num_arguments);
-}
-
-
-// Helper function for reading a value out of a stack frame.
-template <typename T>
-static T& frame_entry(Address re_frame, int frame_offset) {
- return reinterpret_cast<T&>(Memory::int32_at(re_frame + frame_offset));
-}
-
-
-int RegExpMacroAssemblerIA32::CheckStackGuardState(Address* return_address,
- Code* re_code,
- Address re_frame) {
- Isolate* isolate = frame_entry<Isolate*>(re_frame, kIsolate);
- ASSERT(isolate == Isolate::Current());
- if (isolate->stack_guard()->IsStackOverflow()) {
- isolate->StackOverflow();
- return EXCEPTION;
- }
-
- // If not real stack overflow the stack guard was used to interrupt
- // execution for another purpose.
-
- // If this is a direct call from JavaScript retry the RegExp forcing the call
- // through the runtime system. Currently the direct call cannot handle a GC.
- if (frame_entry<int>(re_frame, kDirectCall) == 1) {
- return RETRY;
- }
-
- // Prepare for possible GC.
- HandleScope handles;
- Handle<Code> code_handle(re_code);
-
- Handle<String> subject(frame_entry<String*>(re_frame, kInputString));
- // Current string.
- bool is_ascii = subject->IsAsciiRepresentation();
-
- ASSERT(re_code->instruction_start() <= *return_address);
- ASSERT(*return_address <=
- re_code->instruction_start() + re_code->instruction_size());
-
- MaybeObject* result = Execution::HandleStackGuardInterrupt();
-
- if (*code_handle != re_code) { // Return address no longer valid
- int delta = *code_handle - re_code;
- // Overwrite the return address on the stack.
- *return_address += delta;
- }
-
- if (result->IsException()) {
- return EXCEPTION;
- }
-
- // String might have changed.
- if (subject->IsAsciiRepresentation() != is_ascii) {
- // If we changed between an ASCII and an UC16 string, the specialized
- // code cannot be used, and we need to restart regexp matching from
- // scratch (including, potentially, compiling a new version of the code).
- return RETRY;
- }
-
- // Otherwise, the content of the string might have moved. It must still
- // be a sequential or external string with the same content.
- // Update the start and end pointers in the stack frame to the current
- // location (whether it has actually moved or not).
- ASSERT(StringShape(*subject).IsSequential() ||
- StringShape(*subject).IsExternal());
-
- // The original start address of the characters to match.
- const byte* start_address = frame_entry<const byte*>(re_frame, kInputStart);
-
- // Find the current start address of the same character at the current string
- // position.
- int start_index = frame_entry<int>(re_frame, kStartIndex);
- const byte* new_address = StringCharacterPosition(*subject, start_index);
-
- if (start_address != new_address) {
- // If there is a difference, update the object pointer and start and end
- // addresses in the RegExp stack frame to match the new value.
- const byte* end_address = frame_entry<const byte* >(re_frame, kInputEnd);
- int byte_length = end_address - start_address;
- frame_entry<const String*>(re_frame, kInputString) = *subject;
- frame_entry<const byte*>(re_frame, kInputStart) = new_address;
- frame_entry<const byte*>(re_frame, kInputEnd) = new_address + byte_length;
- }
-
- return 0;
-}
-
-
-Operand RegExpMacroAssemblerIA32::register_location(int register_index) {
- ASSERT(register_index < (1<<30));
- if (num_registers_ <= register_index) {
- num_registers_ = register_index + 1;
- }
- return Operand(ebp, kRegisterZero - register_index * kPointerSize);
-}
-
-
-void RegExpMacroAssemblerIA32::CheckPosition(int cp_offset,
- Label* on_outside_input) {
- __ cmp(edi, -cp_offset * char_size());
- BranchOrBacktrack(greater_equal, on_outside_input);
-}
-
-
-void RegExpMacroAssemblerIA32::BranchOrBacktrack(Condition condition,
- Label* to,
- Hint hint) {
- if (condition < 0) { // No condition
- if (to == NULL) {
- Backtrack();
- return;
- }
- __ jmp(to);
- return;
- }
- if (to == NULL) {
- __ j(condition, &backtrack_label_, hint);
- return;
- }
- __ j(condition, to, hint);
-}
-
-
-void RegExpMacroAssemblerIA32::SafeCall(Label* to) {
- Label return_to;
- __ push(Immediate::CodeRelativeOffset(&return_to));
- __ jmp(to);
- __ bind(&return_to);
-}
-
-
-void RegExpMacroAssemblerIA32::SafeReturn() {
- __ pop(ebx);
- __ add(Operand(ebx), Immediate(masm_->CodeObject()));
- __ jmp(Operand(ebx));
-}
-
-
-void RegExpMacroAssemblerIA32::SafeCallTarget(Label* name) {
- __ bind(name);
-}
-
-
-void RegExpMacroAssemblerIA32::Push(Register source) {
- ASSERT(!source.is(backtrack_stackpointer()));
- // Notice: This updates flags, unlike normal Push.
- __ sub(Operand(backtrack_stackpointer()), Immediate(kPointerSize));
- __ mov(Operand(backtrack_stackpointer(), 0), source);
-}
-
-
-void RegExpMacroAssemblerIA32::Push(Immediate value) {
- // Notice: This updates flags, unlike normal Push.
- __ sub(Operand(backtrack_stackpointer()), Immediate(kPointerSize));
- __ mov(Operand(backtrack_stackpointer(), 0), value);
-}
-
-
-void RegExpMacroAssemblerIA32::Pop(Register target) {
- ASSERT(!target.is(backtrack_stackpointer()));
- __ mov(target, Operand(backtrack_stackpointer(), 0));
- // Notice: This updates flags, unlike normal Pop.
- __ add(Operand(backtrack_stackpointer()), Immediate(kPointerSize));
-}
-
-
-void RegExpMacroAssemblerIA32::CheckPreemption() {
- // Check for preemption.
- Label no_preempt;
- ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit(masm_->isolate());
- __ cmp(esp, Operand::StaticVariable(stack_limit));
- __ j(above, &no_preempt, taken);
-
- SafeCall(&check_preempt_label_);
-
- __ bind(&no_preempt);
-}
-
-
-void RegExpMacroAssemblerIA32::CheckStackLimit() {
- Label no_stack_overflow;
- ExternalReference stack_limit =
- ExternalReference::address_of_regexp_stack_limit(masm_->isolate());
- __ cmp(backtrack_stackpointer(), Operand::StaticVariable(stack_limit));
- __ j(above, &no_stack_overflow);
-
- SafeCall(&stack_overflow_label_);
-
- __ bind(&no_stack_overflow);
-}
-
-
-void RegExpMacroAssemblerIA32::LoadCurrentCharacterUnchecked(int cp_offset,
- int characters) {
- if (mode_ == ASCII) {
- if (characters == 4) {
- __ mov(current_character(), Operand(esi, edi, times_1, cp_offset));
- } else if (characters == 2) {
- __ movzx_w(current_character(), Operand(esi, edi, times_1, cp_offset));
- } else {
- ASSERT(characters == 1);
- __ movzx_b(current_character(), Operand(esi, edi, times_1, cp_offset));
- }
- } else {
- ASSERT(mode_ == UC16);
- if (characters == 2) {
- __ mov(current_character(),
- Operand(esi, edi, times_1, cp_offset * sizeof(uc16)));
- } else {
- ASSERT(characters == 1);
- __ movzx_w(current_character(),
- Operand(esi, edi, times_1, cp_offset * sizeof(uc16)));
- }
- }
-}
-
-
-#undef __
-
-#endif // V8_INTERPRETED_REGEXP
-
-}} // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_IA32
diff --git a/src/3rdparty/v8/src/ia32/regexp-macro-assembler-ia32.h b/src/3rdparty/v8/src/ia32/regexp-macro-assembler-ia32.h
deleted file mode 100644
index 0af61f2..0000000
--- a/src/3rdparty/v8/src/ia32/regexp-macro-assembler-ia32.h
+++ /dev/null
@@ -1,216 +0,0 @@
-// Copyright 2008-2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_IA32_REGEXP_MACRO_ASSEMBLER_IA32_H_
-#define V8_IA32_REGEXP_MACRO_ASSEMBLER_IA32_H_
-
-namespace v8 {
-namespace internal {
-
-#ifdef V8_INTERPRETED_REGEXP
-class RegExpMacroAssemblerIA32: public RegExpMacroAssembler {
- public:
- RegExpMacroAssemblerIA32() { }
- virtual ~RegExpMacroAssemblerIA32() { }
-};
-
-#else // V8_INTERPRETED_REGEXP
-class RegExpMacroAssemblerIA32: public NativeRegExpMacroAssembler {
- public:
- RegExpMacroAssemblerIA32(Mode mode, int registers_to_save);
- virtual ~RegExpMacroAssemblerIA32();
- virtual int stack_limit_slack();
- virtual void AdvanceCurrentPosition(int by);
- virtual void AdvanceRegister(int reg, int by);
- virtual void Backtrack();
- virtual void Bind(Label* label);
- virtual void CheckAtStart(Label* on_at_start);
- virtual void CheckCharacter(uint32_t c, Label* on_equal);
- virtual void CheckCharacterAfterAnd(uint32_t c,
- uint32_t mask,
- Label* on_equal);
- virtual void CheckCharacterGT(uc16 limit, Label* on_greater);
- virtual void CheckCharacterLT(uc16 limit, Label* on_less);
- virtual void CheckCharacters(Vector<const uc16> str,
- int cp_offset,
- Label* on_failure,
- bool check_end_of_string);
- // A "greedy loop" is a loop that is both greedy and with a simple
- // body. It has a particularly simple implementation.
- virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
- virtual void CheckNotAtStart(Label* on_not_at_start);
- virtual void CheckNotBackReference(int start_reg, Label* on_no_match);
- virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
- Label* on_no_match);
- virtual void CheckNotRegistersEqual(int reg1, int reg2, Label* on_not_equal);
- virtual void CheckNotCharacter(uint32_t c, Label* on_not_equal);
- virtual void CheckNotCharacterAfterAnd(uint32_t c,
- uint32_t mask,
- Label* on_not_equal);
- virtual void CheckNotCharacterAfterMinusAnd(uc16 c,
- uc16 minus,
- uc16 mask,
- Label* on_not_equal);
- // Checks whether the given offset from the current position is before
- // the end of the string.
- virtual void CheckPosition(int cp_offset, Label* on_outside_input);
- virtual bool CheckSpecialCharacterClass(uc16 type, Label* on_no_match);
- virtual void Fail();
- virtual Handle<Object> GetCode(Handle<String> source);
- virtual void GoTo(Label* label);
- virtual void IfRegisterGE(int reg, int comparand, Label* if_ge);
- virtual void IfRegisterLT(int reg, int comparand, Label* if_lt);
- virtual void IfRegisterEqPos(int reg, Label* if_eq);
- virtual IrregexpImplementation Implementation();
- virtual void LoadCurrentCharacter(int cp_offset,
- Label* on_end_of_input,
- bool check_bounds = true,
- int characters = 1);
- virtual void PopCurrentPosition();
- virtual void PopRegister(int register_index);
- virtual void PushBacktrack(Label* label);
- virtual void PushCurrentPosition();
- virtual void PushRegister(int register_index,
- StackCheckFlag check_stack_limit);
- virtual void ReadCurrentPositionFromRegister(int reg);
- virtual void ReadStackPointerFromRegister(int reg);
- virtual void SetCurrentPositionFromEnd(int by);
- virtual void SetRegister(int register_index, int to);
- virtual void Succeed();
- virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
- virtual void ClearRegisters(int reg_from, int reg_to);
- virtual void WriteStackPointerToRegister(int reg);
-
- // Called from RegExp if the stack-guard is triggered.
- // If the code object is relocated, the return address is fixed before
- // returning.
- static int CheckStackGuardState(Address* return_address,
- Code* re_code,
- Address re_frame);
-
- private:
- // Offsets from ebp of function parameters and stored registers.
- static const int kFramePointer = 0;
- // Above the frame pointer - function parameters and return address.
- static const int kReturn_eip = kFramePointer + kPointerSize;
- static const int kFrameAlign = kReturn_eip + kPointerSize;
- // Parameters.
- static const int kInputString = kFrameAlign;
- static const int kStartIndex = kInputString + kPointerSize;
- static const int kInputStart = kStartIndex + kPointerSize;
- static const int kInputEnd = kInputStart + kPointerSize;
- static const int kRegisterOutput = kInputEnd + kPointerSize;
- static const int kStackHighEnd = kRegisterOutput + kPointerSize;
- static const int kDirectCall = kStackHighEnd + kPointerSize;
- static const int kIsolate = kDirectCall + kPointerSize;
- // Below the frame pointer - local stack variables.
- // When adding local variables remember to push space for them in
- // the frame in GetCode.
- static const int kBackup_esi = kFramePointer - kPointerSize;
- static const int kBackup_edi = kBackup_esi - kPointerSize;
- static const int kBackup_ebx = kBackup_edi - kPointerSize;
- static const int kInputStartMinusOne = kBackup_ebx - kPointerSize;
- // First register address. Following registers are below it on the stack.
- static const int kRegisterZero = kInputStartMinusOne - kPointerSize;
-
- // Initial size of code buffer.
- static const size_t kRegExpCodeSize = 1024;
-
- // Load a number of characters at the given offset from the
- // current position, into the current-character register.
- void LoadCurrentCharacterUnchecked(int cp_offset, int character_count);
-
- // Check whether preemption has been requested.
- void CheckPreemption();
-
- // Check whether we are exceeding the stack limit on the backtrack stack.
- void CheckStackLimit();
-
- // Generate a call to CheckStackGuardState.
- void CallCheckStackGuardState(Register scratch);
-
- // The ebp-relative location of a regexp register.
- Operand register_location(int register_index);
-
- // The register containing the current character after LoadCurrentCharacter.
- inline Register current_character() { return edx; }
-
- // The register containing the backtrack stack top. Provides a meaningful
- // name to the register.
- inline Register backtrack_stackpointer() { return ecx; }
-
- // Byte size of chars in the string to match (decided by the Mode argument)
- inline int char_size() { return static_cast<int>(mode_); }
-
- // Equivalent to a conditional branch to the label, unless the label
- // is NULL, in which case it is a conditional Backtrack.
- void BranchOrBacktrack(Condition condition, Label* to, Hint hint = no_hint);
-
- // Call and return internally in the generated code in a way that
- // is GC-safe (i.e., doesn't leave absolute code addresses on the stack)
- inline void SafeCall(Label* to);
- inline void SafeReturn();
- inline void SafeCallTarget(Label* name);
-
- // Pushes the value of a register on the backtrack stack. Decrements the
- // stack pointer (ecx) by a word size and stores the register's value there.
- inline void Push(Register source);
-
- // Pushes a value on the backtrack stack. Decrements the stack pointer (ecx)
- // by a word size and stores the value there.
- inline void Push(Immediate value);
-
- // Pops a value from the backtrack stack. Reads the word at the stack pointer
- // (ecx) and increments it by a word size.
- inline void Pop(Register target);
-
- MacroAssembler* masm_;
-
- // Which mode to generate code for (ASCII or UC16).
- Mode mode_;
-
- // One greater than maximal register index actually used.
- int num_registers_;
-
- // Number of registers to output at the end (the saved registers
- // are always 0..num_saved_registers_-1)
- int num_saved_registers_;
-
- // Labels used internally.
- Label entry_label_;
- Label start_label_;
- Label success_label_;
- Label backtrack_label_;
- Label exit_label_;
- Label check_preempt_label_;
- Label stack_overflow_label_;
-};
-#endif // V8_INTERPRETED_REGEXP
-
-}} // namespace v8::internal
-
-#endif // V8_IA32_REGEXP_MACRO_ASSEMBLER_IA32_H_
diff --git a/src/3rdparty/v8/src/ia32/register-allocator-ia32-inl.h b/src/3rdparty/v8/src/ia32/register-allocator-ia32-inl.h
deleted file mode 100644
index 99ae6eb..0000000
--- a/src/3rdparty/v8/src/ia32/register-allocator-ia32-inl.h
+++ /dev/null
@@ -1,82 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_IA32_REGISTER_ALLOCATOR_IA32_INL_H_
-#define V8_IA32_REGISTER_ALLOCATOR_IA32_INL_H_
-
-#include "v8.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// RegisterAllocator implementation.
-
-bool RegisterAllocator::IsReserved(Register reg) {
- // The code for this test relies on the order of register codes.
- return reg.code() >= esp.code() && reg.code() <= esi.code();
-}
-
-
-// The register allocator uses small integers to represent the
-// non-reserved assembler registers. The mapping is:
-
-// eax <-> 0, ebx <-> 1, ecx <-> 2, edx <-> 3, edi <-> 4.
-
-int RegisterAllocator::ToNumber(Register reg) {
- ASSERT(reg.is_valid() && !IsReserved(reg));
- const int kNumbers[] = {
- 0, // eax
- 2, // ecx
- 3, // edx
- 1, // ebx
- -1, // esp
- -1, // ebp
- -1, // esi
- 4 // edi
- };
- return kNumbers[reg.code()];
-}
-
-
-Register RegisterAllocator::ToRegister(int num) {
- ASSERT(num >= 0 && num < kNumRegisters);
- const Register kRegisters[] = { eax, ebx, ecx, edx, edi };
- return kRegisters[num];
-}
-
-
-void RegisterAllocator::Initialize() {
- Reset();
- // The non-reserved edi register is live on JS function entry.
- Use(edi); // JS function.
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_IA32_REGISTER_ALLOCATOR_IA32_INL_H_
diff --git a/src/3rdparty/v8/src/ia32/register-allocator-ia32.cc b/src/3rdparty/v8/src/ia32/register-allocator-ia32.cc
deleted file mode 100644
index 6db13d4..0000000
--- a/src/3rdparty/v8/src/ia32/register-allocator-ia32.cc
+++ /dev/null
@@ -1,157 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_IA32)
-
-#include "codegen-inl.h"
-#include "register-allocator-inl.h"
-#include "virtual-frame-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// Result implementation.
-
-void Result::ToRegister() {
- ASSERT(is_valid());
- if (is_constant()) {
- CodeGenerator* code_generator =
- CodeGeneratorScope::Current(Isolate::Current());
- Result fresh = code_generator->allocator()->Allocate();
- ASSERT(fresh.is_valid());
- if (is_untagged_int32()) {
- fresh.set_untagged_int32(true);
- if (handle()->IsSmi()) {
- code_generator->masm()->Set(
- fresh.reg(),
- Immediate(Smi::cast(*handle())->value()));
- } else if (handle()->IsHeapNumber()) {
- double double_value = HeapNumber::cast(*handle())->value();
- int32_t value = DoubleToInt32(double_value);
- if (double_value == 0 && signbit(double_value)) {
- // Negative zero must not be converted to an int32 unless
- // the context allows it.
- code_generator->unsafe_bailout_->Branch(equal);
- code_generator->unsafe_bailout_->Branch(not_equal);
- } else if (double_value == value) {
- code_generator->masm()->Set(fresh.reg(), Immediate(value));
- } else {
- code_generator->unsafe_bailout_->Branch(equal);
- code_generator->unsafe_bailout_->Branch(not_equal);
- }
- } else {
- // Constant is not a number. This was not predicted by AST analysis.
- code_generator->unsafe_bailout_->Branch(equal);
- code_generator->unsafe_bailout_->Branch(not_equal);
- }
- } else if (code_generator->IsUnsafeSmi(handle())) {
- code_generator->MoveUnsafeSmi(fresh.reg(), handle());
- } else {
- code_generator->masm()->Set(fresh.reg(), Immediate(handle()));
- }
- // This result becomes a copy of the fresh one.
- fresh.set_type_info(type_info());
- *this = fresh;
- }
- ASSERT(is_register());
-}
-
-
-void Result::ToRegister(Register target) {
- CodeGenerator* code_generator =
- CodeGeneratorScope::Current(Isolate::Current());
- ASSERT(is_valid());
- if (!is_register() || !reg().is(target)) {
- Result fresh = code_generator->allocator()->Allocate(target);
- ASSERT(fresh.is_valid());
- if (is_register()) {
- code_generator->masm()->mov(fresh.reg(), reg());
- } else {
- ASSERT(is_constant());
- if (is_untagged_int32()) {
- if (handle()->IsSmi()) {
- code_generator->masm()->Set(
- fresh.reg(),
- Immediate(Smi::cast(*handle())->value()));
- } else {
- ASSERT(handle()->IsHeapNumber());
- double double_value = HeapNumber::cast(*handle())->value();
- int32_t value = DoubleToInt32(double_value);
- if (double_value == 0 && signbit(double_value)) {
- // Negative zero must not be converted to an int32 unless
- // the context allows it.
- code_generator->unsafe_bailout_->Branch(equal);
- code_generator->unsafe_bailout_->Branch(not_equal);
- } else if (double_value == value) {
- code_generator->masm()->Set(fresh.reg(), Immediate(value));
- } else {
- code_generator->unsafe_bailout_->Branch(equal);
- code_generator->unsafe_bailout_->Branch(not_equal);
- }
- }
- } else {
- if (code_generator->IsUnsafeSmi(handle())) {
- code_generator->MoveUnsafeSmi(fresh.reg(), handle());
- } else {
- code_generator->masm()->Set(fresh.reg(), Immediate(handle()));
- }
- }
- }
- fresh.set_type_info(type_info());
- fresh.set_untagged_int32(is_untagged_int32());
- *this = fresh;
- } else if (is_register() && reg().is(target)) {
- ASSERT(code_generator->has_valid_frame());
- code_generator->frame()->Spill(target);
- ASSERT(code_generator->allocator()->count(target) == 1);
- }
- ASSERT(is_register());
- ASSERT(reg().is(target));
-}
-
-
-// -------------------------------------------------------------------------
-// RegisterAllocator implementation.
-
-Result RegisterAllocator::AllocateByteRegisterWithoutSpilling() {
- Result result = AllocateWithoutSpilling();
- // Check that the register is a byte register. If not, unuse the
- // register if valid and return an invalid result.
- if (result.is_valid() && !result.reg().is_byte_register()) {
- result.Unuse();
- return Result();
- }
- return result;
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_IA32
diff --git a/src/3rdparty/v8/src/ia32/register-allocator-ia32.h b/src/3rdparty/v8/src/ia32/register-allocator-ia32.h
deleted file mode 100644
index e7ce91f..0000000
--- a/src/3rdparty/v8/src/ia32/register-allocator-ia32.h
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_IA32_REGISTER_ALLOCATOR_IA32_H_
-#define V8_IA32_REGISTER_ALLOCATOR_IA32_H_
-
-namespace v8 {
-namespace internal {
-
-class RegisterAllocatorConstants : public AllStatic {
- public:
- static const int kNumRegisters = 5;
- static const int kInvalidRegister = -1;
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_IA32_REGISTER_ALLOCATOR_IA32_H_
diff --git a/src/3rdparty/v8/src/ia32/simulator-ia32.cc b/src/3rdparty/v8/src/ia32/simulator-ia32.cc
deleted file mode 100644
index ab81693..0000000
--- a/src/3rdparty/v8/src/ia32/simulator-ia32.cc
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-// Since there is no simulator for the ia32 architecture this file is empty.
-
diff --git a/src/3rdparty/v8/src/ia32/simulator-ia32.h b/src/3rdparty/v8/src/ia32/simulator-ia32.h
deleted file mode 100644
index cb660cd..0000000
--- a/src/3rdparty/v8/src/ia32/simulator-ia32.h
+++ /dev/null
@@ -1,72 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_IA32_SIMULATOR_IA32_H_
-#define V8_IA32_SIMULATOR_IA32_H_
-
-#include "allocation.h"
-
-namespace v8 {
-namespace internal {
-
-// Since there is no simulator for the ia32 architecture the only thing we can
-// do is to call the entry directly.
-#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
- (entry(p0, p1, p2, p3, p4))
-
-
-typedef int (*regexp_matcher)(String*, int, const byte*,
- const byte*, int*, Address, int, Isolate*);
-
-// Call the generated regexp code directly. The code at the entry address should
-// expect eight int/pointer sized arguments and return an int.
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
- (FUNCTION_CAST<regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6, p7))
-
-
-#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
- (reinterpret_cast<TryCatch*>(try_catch_address))
-
-// The stack limit beyond which we will throw stack overflow errors in
-// generated code. Because generated code on ia32 uses the C stack, we
-// just use the C stack limit.
-class SimulatorStack : public v8::internal::AllStatic {
- public:
- static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) {
- return c_limit;
- }
-
- static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
- return try_catch_address;
- }
-
- static inline void UnregisterCTryCatch() { }
-};
-
-} } // namespace v8::internal
-
-#endif // V8_IA32_SIMULATOR_IA32_H_
diff --git a/src/3rdparty/v8/src/ia32/stub-cache-ia32.cc b/src/3rdparty/v8/src/ia32/stub-cache-ia32.cc
deleted file mode 100644
index 380d38f..0000000
--- a/src/3rdparty/v8/src/ia32/stub-cache-ia32.cc
+++ /dev/null
@@ -1,3711 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_IA32)
-
-#include "ic-inl.h"
-#include "codegen-inl.h"
-#include "stub-cache.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-
-static void ProbeTable(Isolate* isolate,
- MacroAssembler* masm,
- Code::Flags flags,
- StubCache::Table table,
- Register name,
- Register offset,
- Register extra) {
- ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
- ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
-
- Label miss;
-
- if (extra.is_valid()) {
- // Get the code entry from the cache.
- __ mov(extra, Operand::StaticArray(offset, times_2, value_offset));
-
- // Check that the key in the entry matches the name.
- __ cmp(name, Operand::StaticArray(offset, times_2, key_offset));
- __ j(not_equal, &miss, not_taken);
-
- // Check that the flags match what we're looking for.
- __ mov(offset, FieldOperand(extra, Code::kFlagsOffset));
- __ and_(offset, ~Code::kFlagsNotUsedInLookup);
- __ cmp(offset, flags);
- __ j(not_equal, &miss);
-
- // Jump to the first instruction in the code stub.
- __ add(Operand(extra), Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ jmp(Operand(extra));
-
- __ bind(&miss);
- } else {
- // Save the offset on the stack.
- __ push(offset);
-
- // Check that the key in the entry matches the name.
- __ cmp(name, Operand::StaticArray(offset, times_2, key_offset));
- __ j(not_equal, &miss, not_taken);
-
- // Get the code entry from the cache.
- __ mov(offset, Operand::StaticArray(offset, times_2, value_offset));
-
- // Check that the flags match what we're looking for.
- __ mov(offset, FieldOperand(offset, Code::kFlagsOffset));
- __ and_(offset, ~Code::kFlagsNotUsedInLookup);
- __ cmp(offset, flags);
- __ j(not_equal, &miss);
-
- // Restore offset and re-load code entry from cache.
- __ pop(offset);
- __ mov(offset, Operand::StaticArray(offset, times_2, value_offset));
-
- // Jump to the first instruction in the code stub.
- __ add(Operand(offset), Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ jmp(Operand(offset));
-
- // Pop at miss.
- __ bind(&miss);
- __ pop(offset);
- }
-}
-
-
-// Helper function used to check that the dictionary doesn't contain
-// the property. This function may return false negatives, so miss_label
-// must always call a backup property check that is complete.
-// This function is safe to call if the receiver has fast properties.
-// Name must be a symbol and receiver must be a heap object.
-static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
- Label* miss_label,
- Register receiver,
- String* name,
- Register r0,
- Register r1) {
- ASSERT(name->IsSymbol());
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->negative_lookups(), 1);
- __ IncrementCounter(counters->negative_lookups_miss(), 1);
-
- Label done;
- __ mov(r0, FieldOperand(receiver, HeapObject::kMapOffset));
-
- const int kInterceptorOrAccessCheckNeededMask =
- (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
-
- // Bail out if the receiver has a named interceptor or requires access checks.
- __ test_b(FieldOperand(r0, Map::kBitFieldOffset),
- kInterceptorOrAccessCheckNeededMask);
- __ j(not_zero, miss_label, not_taken);
-
- // Check that receiver is a JSObject.
- __ CmpInstanceType(r0, FIRST_JS_OBJECT_TYPE);
- __ j(below, miss_label, not_taken);
-
- // Load properties array.
- Register properties = r0;
- __ mov(properties, FieldOperand(receiver, JSObject::kPropertiesOffset));
-
- // Check that the properties array is a dictionary.
- __ cmp(FieldOperand(properties, HeapObject::kMapOffset),
- Immediate(masm->isolate()->factory()->hash_table_map()));
- __ j(not_equal, miss_label);
-
- // Compute the capacity mask.
- const int kCapacityOffset =
- StringDictionary::kHeaderSize +
- StringDictionary::kCapacityIndex * kPointerSize;
-
- // Generate an unrolled loop that performs a few probes before
- // giving up.
- static const int kProbes = 4;
- const int kElementsStartOffset =
- StringDictionary::kHeaderSize +
- StringDictionary::kElementsStartIndex * kPointerSize;
-
- // If names of slots in range from 1 to kProbes - 1 for the hash value are
- // not equal to the name and kProbes-th slot is not used (its name is the
- // undefined value), it guarantees the hash table doesn't contain the
- // property. It's true even if some slots represent deleted properties
- // (their names are the null value).
- for (int i = 0; i < kProbes; i++) {
- // r0 points to properties hash.
- // Compute the masked index: (hash + i + i * i) & mask.
- Register index = r1;
- // Capacity is smi 2^n.
- __ mov(index, FieldOperand(properties, kCapacityOffset));
- __ dec(index);
- __ and_(Operand(index),
- Immediate(Smi::FromInt(name->Hash() +
- StringDictionary::GetProbeOffset(i))));
-
- // Scale the index by multiplying by the entry size.
- ASSERT(StringDictionary::kEntrySize == 3);
- __ lea(index, Operand(index, index, times_2, 0)); // index *= 3.
-
- Register entity_name = r1;
- // Having undefined at this place means the name is not contained.
- ASSERT_EQ(kSmiTagSize, 1);
- __ mov(entity_name, Operand(properties, index, times_half_pointer_size,
- kElementsStartOffset - kHeapObjectTag));
- __ cmp(entity_name, masm->isolate()->factory()->undefined_value());
- if (i != kProbes - 1) {
- __ j(equal, &done, taken);
-
- // Stop if found the property.
- __ cmp(entity_name, Handle<String>(name));
- __ j(equal, miss_label, not_taken);
-
- // Check if the entry name is not a symbol.
- __ mov(entity_name, FieldOperand(entity_name, HeapObject::kMapOffset));
- __ test_b(FieldOperand(entity_name, Map::kInstanceTypeOffset),
- kIsSymbolMask);
- __ j(zero, miss_label, not_taken);
- } else {
- // Give up probing if still not found the undefined value.
- __ j(not_equal, miss_label, not_taken);
- }
- }
-
- __ bind(&done);
- __ DecrementCounter(counters->negative_lookups_miss(), 1);
-}
-
-
-void StubCache::GenerateProbe(MacroAssembler* masm,
- Code::Flags flags,
- Register receiver,
- Register name,
- Register scratch,
- Register extra,
- Register extra2) {
- Isolate* isolate = Isolate::Current();
- Label miss;
- USE(extra2); // The register extra2 is not used on the ia32 platform.
-
- // Make sure that code is valid. The shifting code relies on the
- // entry size being 8.
- ASSERT(sizeof(Entry) == 8);
-
- // Make sure the flags does not name a specific type.
- ASSERT(Code::ExtractTypeFromFlags(flags) == 0);
-
- // Make sure that there are no register conflicts.
- ASSERT(!scratch.is(receiver));
- ASSERT(!scratch.is(name));
- ASSERT(!extra.is(receiver));
- ASSERT(!extra.is(name));
- ASSERT(!extra.is(scratch));
-
- // Check scratch and extra registers are valid, and extra2 is unused.
- ASSERT(!scratch.is(no_reg));
- ASSERT(extra2.is(no_reg));
-
- // Check that the receiver isn't a smi.
- __ test(receiver, Immediate(kSmiTagMask));
- __ j(zero, &miss, not_taken);
-
- // Get the map of the receiver and compute the hash.
- __ mov(scratch, FieldOperand(name, String::kHashFieldOffset));
- __ add(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
- __ xor_(scratch, flags);
- __ and_(scratch, (kPrimaryTableSize - 1) << kHeapObjectTagSize);
-
- // Probe the primary table.
- ProbeTable(isolate, masm, flags, kPrimary, name, scratch, extra);
-
- // Primary miss: Compute hash for secondary probe.
- __ mov(scratch, FieldOperand(name, String::kHashFieldOffset));
- __ add(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
- __ xor_(scratch, flags);
- __ and_(scratch, (kPrimaryTableSize - 1) << kHeapObjectTagSize);
- __ sub(scratch, Operand(name));
- __ add(Operand(scratch), Immediate(flags));
- __ and_(scratch, (kSecondaryTableSize - 1) << kHeapObjectTagSize);
-
- // Probe the secondary table.
- ProbeTable(isolate, masm, flags, kSecondary, name, scratch, extra);
-
- // Cache miss: Fall-through and let caller handle the miss by
- // entering the runtime system.
- __ bind(&miss);
-}
-
-
-void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
- int index,
- Register prototype) {
- __ LoadGlobalFunction(index, prototype);
- __ LoadGlobalFunctionInitialMap(prototype, prototype);
- // Load the prototype from the initial map.
- __ mov(prototype, FieldOperand(prototype, Map::kPrototypeOffset));
-}
-
-
-void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
- MacroAssembler* masm, int index, Register prototype, Label* miss) {
- // Check we're still in the same context.
- __ cmp(Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)),
- masm->isolate()->global());
- __ j(not_equal, miss);
- // Get the global function with the given index.
- JSFunction* function =
- JSFunction::cast(masm->isolate()->global_context()->get(index));
- // Load its initial map. The global functions all have initial maps.
- __ Set(prototype, Immediate(Handle<Map>(function->initial_map())));
- // Load the prototype from the initial map.
- __ mov(prototype, FieldOperand(prototype, Map::kPrototypeOffset));
-}
-
-
-void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
- Register receiver,
- Register scratch,
- Label* miss_label) {
- // Check that the receiver isn't a smi.
- __ test(receiver, Immediate(kSmiTagMask));
- __ j(zero, miss_label, not_taken);
-
- // Check that the object is a JS array.
- __ CmpObjectType(receiver, JS_ARRAY_TYPE, scratch);
- __ j(not_equal, miss_label, not_taken);
-
- // Load length directly from the JS array.
- __ mov(eax, FieldOperand(receiver, JSArray::kLengthOffset));
- __ ret(0);
-}
-
-
-// Generate code to check if an object is a string. If the object is
-// a string, the map's instance type is left in the scratch register.
-static void GenerateStringCheck(MacroAssembler* masm,
- Register receiver,
- Register scratch,
- Label* smi,
- Label* non_string_object) {
- // Check that the object isn't a smi.
- __ test(receiver, Immediate(kSmiTagMask));
- __ j(zero, smi, not_taken);
-
- // Check that the object is a string.
- __ mov(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
- __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
- ASSERT(kNotStringTag != 0);
- __ test(scratch, Immediate(kNotStringTag));
- __ j(not_zero, non_string_object, not_taken);
-}
-
-
-void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Label* miss,
- bool support_wrappers) {
- Label check_wrapper;
-
- // Check if the object is a string leaving the instance type in the
- // scratch register.
- GenerateStringCheck(masm, receiver, scratch1, miss,
- support_wrappers ? &check_wrapper : miss);
-
- // Load length from the string and convert to a smi.
- __ mov(eax, FieldOperand(receiver, String::kLengthOffset));
- __ ret(0);
-
- if (support_wrappers) {
- // Check if the object is a JSValue wrapper.
- __ bind(&check_wrapper);
- __ cmp(scratch1, JS_VALUE_TYPE);
- __ j(not_equal, miss, not_taken);
-
- // Check if the wrapped value is a string and load the length
- // directly if it is.
- __ mov(scratch2, FieldOperand(receiver, JSValue::kValueOffset));
- GenerateStringCheck(masm, scratch2, scratch1, miss, miss);
- __ mov(eax, FieldOperand(scratch2, String::kLengthOffset));
- __ ret(0);
- }
-}
-
-
-void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Label* miss_label) {
- __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
- __ mov(eax, Operand(scratch1));
- __ ret(0);
-}
-
-
-// Load a fast property out of a holder object (src). In-object properties
-// are loaded directly otherwise the property is loaded from the properties
-// fixed array.
-void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
- Register dst, Register src,
- JSObject* holder, int index) {
- // Adjust for the number of properties stored in the holder.
- index -= holder->map()->inobject_properties();
- if (index < 0) {
- // Get the property straight out of the holder.
- int offset = holder->map()->instance_size() + (index * kPointerSize);
- __ mov(dst, FieldOperand(src, offset));
- } else {
- // Calculate the offset into the properties array.
- int offset = index * kPointerSize + FixedArray::kHeaderSize;
- __ mov(dst, FieldOperand(src, JSObject::kPropertiesOffset));
- __ mov(dst, FieldOperand(dst, offset));
- }
-}
-
-
-static void PushInterceptorArguments(MacroAssembler* masm,
- Register receiver,
- Register holder,
- Register name,
- JSObject* holder_obj) {
- __ push(name);
- InterceptorInfo* interceptor = holder_obj->GetNamedInterceptor();
- ASSERT(!masm->isolate()->heap()->InNewSpace(interceptor));
- Register scratch = name;
- __ mov(scratch, Immediate(Handle<Object>(interceptor)));
- __ push(scratch);
- __ push(receiver);
- __ push(holder);
- __ push(FieldOperand(scratch, InterceptorInfo::kDataOffset));
-}
-
-
-static void CompileCallLoadPropertyWithInterceptor(MacroAssembler* masm,
- Register receiver,
- Register holder,
- Register name,
- JSObject* holder_obj) {
- PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
- __ CallExternalReference(
- ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly),
- masm->isolate()),
- 5);
-}
-
-
-// Number of pointers to be reserved on stack for fast API call.
-static const int kFastApiCallArguments = 3;
-
-
-// Reserves space for the extra arguments to API function in the
-// caller's frame.
-//
-// These arguments are set by CheckPrototypes and GenerateFastApiCall.
-static void ReserveSpaceForFastApiCall(MacroAssembler* masm, Register scratch) {
- // ----------- S t a t e -------------
- // -- esp[0] : return address
- // -- esp[4] : last argument in the internal frame of the caller
- // -----------------------------------
- __ pop(scratch);
- for (int i = 0; i < kFastApiCallArguments; i++) {
- __ push(Immediate(Smi::FromInt(0)));
- }
- __ push(scratch);
-}
-
-
-// Undoes the effects of ReserveSpaceForFastApiCall.
-static void FreeSpaceForFastApiCall(MacroAssembler* masm, Register scratch) {
- // ----------- S t a t e -------------
- // -- esp[0] : return address.
- // -- esp[4] : last fast api call extra argument.
- // -- ...
- // -- esp[kFastApiCallArguments * 4] : first fast api call extra argument.
- // -- esp[kFastApiCallArguments * 4 + 4] : last argument in the internal
- // frame.
- // -----------------------------------
- __ pop(scratch);
- __ add(Operand(esp), Immediate(kPointerSize * kFastApiCallArguments));
- __ push(scratch);
-}
-
-
-// Generates call to API function.
-static MaybeObject* GenerateFastApiCall(MacroAssembler* masm,
- const CallOptimization& optimization,
- int argc) {
- // ----------- S t a t e -------------
- // -- esp[0] : return address
- // -- esp[4] : object passing the type check
- // (last fast api call extra argument,
- // set by CheckPrototypes)
- // -- esp[8] : api function
- // (first fast api call extra argument)
- // -- esp[12] : api call data
- // -- esp[16] : last argument
- // -- ...
- // -- esp[(argc + 3) * 4] : first argument
- // -- esp[(argc + 4) * 4] : receiver
- // -----------------------------------
- // Get the function and setup the context.
- JSFunction* function = optimization.constant_function();
- __ mov(edi, Immediate(Handle<JSFunction>(function)));
- __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
-
- // Pass the additional arguments.
- __ mov(Operand(esp, 2 * kPointerSize), edi);
- Object* call_data = optimization.api_call_info()->data();
- Handle<CallHandlerInfo> api_call_info_handle(optimization.api_call_info());
- if (masm->isolate()->heap()->InNewSpace(call_data)) {
- __ mov(ecx, api_call_info_handle);
- __ mov(ebx, FieldOperand(ecx, CallHandlerInfo::kDataOffset));
- __ mov(Operand(esp, 3 * kPointerSize), ebx);
- } else {
- __ mov(Operand(esp, 3 * kPointerSize),
- Immediate(Handle<Object>(call_data)));
- }
-
- // Prepare arguments.
- __ lea(eax, Operand(esp, 3 * kPointerSize));
-
- Object* callback = optimization.api_call_info()->callback();
- Address api_function_address = v8::ToCData<Address>(callback);
- ApiFunction fun(api_function_address);
-
- const int kApiArgc = 1; // API function gets reference to the v8::Arguments.
-
- // Allocate the v8::Arguments structure in the arguments' space since
- // it's not controlled by GC.
- const int kApiStackSpace = 4;
-
- __ PrepareCallApiFunction(kApiArgc + kApiStackSpace, ebx);
-
- __ mov(ApiParameterOperand(1), eax); // v8::Arguments::implicit_args_.
- __ add(Operand(eax), Immediate(argc * kPointerSize));
- __ mov(ApiParameterOperand(2), eax); // v8::Arguments::values_.
- __ Set(ApiParameterOperand(3), Immediate(argc)); // v8::Arguments::length_.
- // v8::Arguments::is_construct_call_.
- __ Set(ApiParameterOperand(4), Immediate(0));
-
- // v8::InvocationCallback's argument.
- __ lea(eax, ApiParameterOperand(1));
- __ mov(ApiParameterOperand(0), eax);
-
- // Emitting a stub call may try to allocate (if the code is not
- // already generated). Do not allow the assembler to perform a
- // garbage collection but instead return the allocation failure
- // object.
- return masm->TryCallApiFunctionAndReturn(&fun,
- argc + kFastApiCallArguments + 1);
-}
-
-
-class CallInterceptorCompiler BASE_EMBEDDED {
- public:
- CallInterceptorCompiler(StubCompiler* stub_compiler,
- const ParameterCount& arguments,
- Register name)
- : stub_compiler_(stub_compiler),
- arguments_(arguments),
- name_(name) {}
-
- MaybeObject* Compile(MacroAssembler* masm,
- JSObject* object,
- JSObject* holder,
- String* name,
- LookupResult* lookup,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* miss) {
- ASSERT(holder->HasNamedInterceptor());
- ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
-
- // Check that the receiver isn't a smi.
- __ test(receiver, Immediate(kSmiTagMask));
- __ j(zero, miss, not_taken);
-
- CallOptimization optimization(lookup);
-
- if (optimization.is_constant_call()) {
- return CompileCacheable(masm,
- object,
- receiver,
- scratch1,
- scratch2,
- scratch3,
- holder,
- lookup,
- name,
- optimization,
- miss);
- } else {
- CompileRegular(masm,
- object,
- receiver,
- scratch1,
- scratch2,
- scratch3,
- name,
- holder,
- miss);
- return masm->isolate()->heap()->undefined_value(); // Success.
- }
- }
-
- private:
- MaybeObject* CompileCacheable(MacroAssembler* masm,
- JSObject* object,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- JSObject* interceptor_holder,
- LookupResult* lookup,
- String* name,
- const CallOptimization& optimization,
- Label* miss_label) {
- ASSERT(optimization.is_constant_call());
- ASSERT(!lookup->holder()->IsGlobalObject());
-
- int depth1 = kInvalidProtoDepth;
- int depth2 = kInvalidProtoDepth;
- bool can_do_fast_api_call = false;
- if (optimization.is_simple_api_call() &&
- !lookup->holder()->IsGlobalObject()) {
- depth1 =
- optimization.GetPrototypeDepthOfExpectedType(object,
- interceptor_holder);
- if (depth1 == kInvalidProtoDepth) {
- depth2 =
- optimization.GetPrototypeDepthOfExpectedType(interceptor_holder,
- lookup->holder());
- }
- can_do_fast_api_call = (depth1 != kInvalidProtoDepth) ||
- (depth2 != kInvalidProtoDepth);
- }
-
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->call_const_interceptor(), 1);
-
- if (can_do_fast_api_call) {
- __ IncrementCounter(counters->call_const_interceptor_fast_api(), 1);
- ReserveSpaceForFastApiCall(masm, scratch1);
- }
-
- // Check that the maps from receiver to interceptor's holder
- // haven't changed and thus we can invoke interceptor.
- Label miss_cleanup;
- Label* miss = can_do_fast_api_call ? &miss_cleanup : miss_label;
- Register holder =
- stub_compiler_->CheckPrototypes(object, receiver,
- interceptor_holder, scratch1,
- scratch2, scratch3, name, depth1, miss);
-
- // Invoke an interceptor and if it provides a value,
- // branch to |regular_invoke|.
- Label regular_invoke;
- LoadWithInterceptor(masm, receiver, holder, interceptor_holder,
- &regular_invoke);
-
- // Interceptor returned nothing for this property. Try to use cached
- // constant function.
-
- // Check that the maps from interceptor's holder to constant function's
- // holder haven't changed and thus we can use cached constant function.
- if (interceptor_holder != lookup->holder()) {
- stub_compiler_->CheckPrototypes(interceptor_holder, receiver,
- lookup->holder(), scratch1,
- scratch2, scratch3, name, depth2, miss);
- } else {
- // CheckPrototypes has a side effect of fetching a 'holder'
- // for API (object which is instanceof for the signature). It's
- // safe to omit it here, as if present, it should be fetched
- // by the previous CheckPrototypes.
- ASSERT(depth2 == kInvalidProtoDepth);
- }
-
- // Invoke function.
- if (can_do_fast_api_call) {
- MaybeObject* result =
- GenerateFastApiCall(masm, optimization, arguments_.immediate());
- if (result->IsFailure()) return result;
- } else {
- __ InvokeFunction(optimization.constant_function(), arguments_,
- JUMP_FUNCTION);
- }
-
- // Deferred code for fast API call case---clean preallocated space.
- if (can_do_fast_api_call) {
- __ bind(&miss_cleanup);
- FreeSpaceForFastApiCall(masm, scratch1);
- __ jmp(miss_label);
- }
-
- // Invoke a regular function.
- __ bind(&regular_invoke);
- if (can_do_fast_api_call) {
- FreeSpaceForFastApiCall(masm, scratch1);
- }
-
- return masm->isolate()->heap()->undefined_value(); // Success.
- }
-
- void CompileRegular(MacroAssembler* masm,
- JSObject* object,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- String* name,
- JSObject* interceptor_holder,
- Label* miss_label) {
- Register holder =
- stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, scratch3, name,
- miss_label);
-
- __ EnterInternalFrame();
- // Save the name_ register across the call.
- __ push(name_);
-
- PushInterceptorArguments(masm,
- receiver,
- holder,
- name_,
- interceptor_holder);
-
- __ CallExternalReference(
- ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForCall),
- masm->isolate()),
- 5);
-
- // Restore the name_ register.
- __ pop(name_);
- __ LeaveInternalFrame();
- }
-
- void LoadWithInterceptor(MacroAssembler* masm,
- Register receiver,
- Register holder,
- JSObject* holder_obj,
- Label* interceptor_succeeded) {
- __ EnterInternalFrame();
- __ push(holder); // Save the holder.
- __ push(name_); // Save the name.
-
- CompileCallLoadPropertyWithInterceptor(masm,
- receiver,
- holder,
- name_,
- holder_obj);
-
- __ pop(name_); // Restore the name.
- __ pop(receiver); // Restore the holder.
- __ LeaveInternalFrame();
-
- __ cmp(eax, masm->isolate()->factory()->no_interceptor_result_sentinel());
- __ j(not_equal, interceptor_succeeded);
- }
-
- StubCompiler* stub_compiler_;
- const ParameterCount& arguments_;
- Register name_;
-};
-
-
-void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
- ASSERT(kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC);
- Code* code = NULL;
- if (kind == Code::LOAD_IC) {
- code = masm->isolate()->builtins()->builtin(Builtins::kLoadIC_Miss);
- } else {
- code = masm->isolate()->builtins()->builtin(Builtins::kKeyedLoadIC_Miss);
- }
-
- Handle<Code> ic(code);
- __ jmp(ic, RelocInfo::CODE_TARGET);
-}
-
-
-// Both name_reg and receiver_reg are preserved on jumps to miss_label,
-// but may be destroyed if store is successful.
-void StubCompiler::GenerateStoreField(MacroAssembler* masm,
- JSObject* object,
- int index,
- Map* transition,
- Register receiver_reg,
- Register name_reg,
- Register scratch,
- Label* miss_label) {
- // Check that the object isn't a smi.
- __ test(receiver_reg, Immediate(kSmiTagMask));
- __ j(zero, miss_label, not_taken);
-
- // Check that the map of the object hasn't changed.
- __ cmp(FieldOperand(receiver_reg, HeapObject::kMapOffset),
- Immediate(Handle<Map>(object->map())));
- __ j(not_equal, miss_label, not_taken);
-
- // Perform global security token check if needed.
- if (object->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(receiver_reg, scratch, miss_label);
- }
-
- // Stub never generated for non-global objects that require access
- // checks.
- ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
-
- // Perform map transition for the receiver if necessary.
- if ((transition != NULL) && (object->map()->unused_property_fields() == 0)) {
- // The properties must be extended before we can store the value.
- // We jump to a runtime call that extends the properties array.
- __ pop(scratch); // Return address.
- __ push(receiver_reg);
- __ push(Immediate(Handle<Map>(transition)));
- __ push(eax);
- __ push(scratch);
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage),
- masm->isolate()),
- 3,
- 1);
- return;
- }
-
- if (transition != NULL) {
- // Update the map of the object; no write barrier updating is
- // needed because the map is never in new space.
- __ mov(FieldOperand(receiver_reg, HeapObject::kMapOffset),
- Immediate(Handle<Map>(transition)));
- }
-
- // Adjust for the number of properties stored in the object. Even in the
- // face of a transition we can use the old map here because the size of the
- // object and the number of in-object properties is not going to change.
- index -= object->map()->inobject_properties();
-
- if (index < 0) {
- // Set the property straight into the object.
- int offset = object->map()->instance_size() + (index * kPointerSize);
- __ mov(FieldOperand(receiver_reg, offset), eax);
-
- // Update the write barrier for the array address.
- // Pass the value being stored in the now unused name_reg.
- __ mov(name_reg, Operand(eax));
- __ RecordWrite(receiver_reg, offset, name_reg, scratch);
- } else {
- // Write to the properties array.
- int offset = index * kPointerSize + FixedArray::kHeaderSize;
- // Get the properties array (optimistically).
- __ mov(scratch, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
- __ mov(FieldOperand(scratch, offset), eax);
-
- // Update the write barrier for the array address.
- // Pass the value being stored in the now unused name_reg.
- __ mov(name_reg, Operand(eax));
- __ RecordWrite(scratch, offset, name_reg, receiver_reg);
- }
-
- // Return the value (register eax).
- __ ret(0);
-}
-
-
-// Generate code to check that a global property cell is empty. Create
-// the property cell at compilation time if no cell exists for the
-// property.
-MUST_USE_RESULT static MaybeObject* GenerateCheckPropertyCell(
- MacroAssembler* masm,
- GlobalObject* global,
- String* name,
- Register scratch,
- Label* miss) {
- Object* probe;
- { MaybeObject* maybe_probe = global->EnsurePropertyCell(name);
- if (!maybe_probe->ToObject(&probe)) return maybe_probe;
- }
- JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(probe);
- ASSERT(cell->value()->IsTheHole());
- if (Serializer::enabled()) {
- __ mov(scratch, Immediate(Handle<Object>(cell)));
- __ cmp(FieldOperand(scratch, JSGlobalPropertyCell::kValueOffset),
- Immediate(masm->isolate()->factory()->the_hole_value()));
- } else {
- __ cmp(Operand::Cell(Handle<JSGlobalPropertyCell>(cell)),
- Immediate(masm->isolate()->factory()->the_hole_value()));
- }
- __ j(not_equal, miss, not_taken);
- return cell;
-}
-
-
-// Calls GenerateCheckPropertyCell for each global object in the prototype chain
-// from object to (but not including) holder.
-MUST_USE_RESULT static MaybeObject* GenerateCheckPropertyCells(
- MacroAssembler* masm,
- JSObject* object,
- JSObject* holder,
- String* name,
- Register scratch,
- Label* miss) {
- JSObject* current = object;
- while (current != holder) {
- if (current->IsGlobalObject()) {
- // Returns a cell or a failure.
- MaybeObject* result = GenerateCheckPropertyCell(
- masm,
- GlobalObject::cast(current),
- name,
- scratch,
- miss);
- if (result->IsFailure()) return result;
- }
- ASSERT(current->IsJSObject());
- current = JSObject::cast(current->GetPrototype());
- }
- return NULL;
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm())
-
-
-Register StubCompiler::CheckPrototypes(JSObject* object,
- Register object_reg,
- JSObject* holder,
- Register holder_reg,
- Register scratch1,
- Register scratch2,
- String* name,
- int save_at_depth,
- Label* miss) {
- // Make sure there's no overlap between holder and object registers.
- ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
- ASSERT(!scratch2.is(object_reg) && !scratch2.is(holder_reg)
- && !scratch2.is(scratch1));
-
- // Keep track of the current object in register reg.
- Register reg = object_reg;
- JSObject* current = object;
- int depth = 0;
-
- if (save_at_depth == depth) {
- __ mov(Operand(esp, kPointerSize), reg);
- }
-
- // Traverse the prototype chain and check the maps in the prototype chain for
- // fast and global objects or do negative lookup for normal objects.
- while (current != holder) {
- depth++;
-
- // Only global objects and objects that do not require access
- // checks are allowed in stubs.
- ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded());
-
- ASSERT(current->GetPrototype()->IsJSObject());
- JSObject* prototype = JSObject::cast(current->GetPrototype());
- if (!current->HasFastProperties() &&
- !current->IsJSGlobalObject() &&
- !current->IsJSGlobalProxy()) {
- if (!name->IsSymbol()) {
- MaybeObject* maybe_lookup_result = heap()->LookupSymbol(name);
- Object* lookup_result = NULL; // Initialization to please compiler.
- if (!maybe_lookup_result->ToObject(&lookup_result)) {
- set_failure(Failure::cast(maybe_lookup_result));
- return reg;
- }
- name = String::cast(lookup_result);
- }
- ASSERT(current->property_dictionary()->FindEntry(name) ==
- StringDictionary::kNotFound);
-
- GenerateDictionaryNegativeLookup(masm(),
- miss,
- reg,
- name,
- scratch1,
- scratch2);
- __ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
- reg = holder_reg; // from now the object is in holder_reg
- __ mov(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
- } else if (heap()->InNewSpace(prototype)) {
- // Get the map of the current object.
- __ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
- __ cmp(Operand(scratch1), Immediate(Handle<Map>(current->map())));
- // Branch on the result of the map check.
- __ j(not_equal, miss, not_taken);
- // Check access rights to the global object. This has to happen
- // after the map check so that we know that the object is
- // actually a global object.
- if (current->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(reg, scratch1, miss);
-
- // Restore scratch register to be the map of the object.
- // We load the prototype from the map in the scratch register.
- __ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
- }
- // The prototype is in new space; we cannot store a reference
- // to it in the code. Load it from the map.
- reg = holder_reg; // from now the object is in holder_reg
- __ mov(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
- } else {
- // Check the map of the current object.
- __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
- Immediate(Handle<Map>(current->map())));
- // Branch on the result of the map check.
- __ j(not_equal, miss, not_taken);
- // Check access rights to the global object. This has to happen
- // after the map check so that we know that the object is
- // actually a global object.
- if (current->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(reg, scratch1, miss);
- }
- // The prototype is in old space; load it directly.
- reg = holder_reg; // from now the object is in holder_reg
- __ mov(reg, Handle<JSObject>(prototype));
- }
-
- if (save_at_depth == depth) {
- __ mov(Operand(esp, kPointerSize), reg);
- }
-
- // Go to the next object in the prototype chain.
- current = prototype;
- }
- ASSERT(current == holder);
-
- // Log the check depth.
- LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
-
- // Check the holder map.
- __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
- Immediate(Handle<Map>(holder->map())));
- __ j(not_equal, miss, not_taken);
-
- // Perform security check for access to the global object.
- ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
- if (holder->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(reg, scratch1, miss);
- };
-
- // If we've skipped any global objects, it's not enough to verify
- // that their maps haven't changed. We also need to check that the
- // property cell for the property is still empty.
- MaybeObject* result = GenerateCheckPropertyCells(masm(),
- object,
- holder,
- name,
- scratch1,
- miss);
- if (result->IsFailure()) set_failure(Failure::cast(result));
-
- // Return the register containing the holder.
- return reg;
-}
-
-
-void StubCompiler::GenerateLoadField(JSObject* object,
- JSObject* holder,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- int index,
- String* name,
- Label* miss) {
- // Check that the receiver isn't a smi.
- __ test(receiver, Immediate(kSmiTagMask));
- __ j(zero, miss, not_taken);
-
- // Check the prototype chain.
- Register reg =
- CheckPrototypes(object, receiver, holder,
- scratch1, scratch2, scratch3, name, miss);
-
- // Get the value from the properties.
- GenerateFastPropertyLoad(masm(), eax, reg, holder, index);
- __ ret(0);
-}
-
-
-MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object,
- JSObject* holder,
- Register receiver,
- Register name_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- AccessorInfo* callback,
- String* name,
- Label* miss) {
- // Check that the receiver isn't a smi.
- __ test(receiver, Immediate(kSmiTagMask));
- __ j(zero, miss, not_taken);
-
- // Check that the maps haven't changed.
- Register reg =
- CheckPrototypes(object, receiver, holder, scratch1,
- scratch2, scratch3, name, miss);
-
- Handle<AccessorInfo> callback_handle(callback);
-
- // Insert additional parameters into the stack frame above return address.
- ASSERT(!scratch3.is(reg));
- __ pop(scratch3); // Get return address to place it below.
-
- __ push(receiver); // receiver
- __ mov(scratch2, Operand(esp));
- ASSERT(!scratch2.is(reg));
- __ push(reg); // holder
- // Push data from AccessorInfo.
- if (isolate()->heap()->InNewSpace(callback_handle->data())) {
- __ mov(scratch1, Immediate(callback_handle));
- __ push(FieldOperand(scratch1, AccessorInfo::kDataOffset));
- } else {
- __ push(Immediate(Handle<Object>(callback_handle->data())));
- }
-
- // Save a pointer to where we pushed the arguments pointer.
- // This will be passed as the const AccessorInfo& to the C++ callback.
- __ push(scratch2);
-
- __ push(name_reg); // name
- __ mov(ebx, esp); // esp points to reference to name (handler).
-
- __ push(scratch3); // Restore return address.
-
- // Do call through the api.
- Address getter_address = v8::ToCData<Address>(callback->getter());
- ApiFunction fun(getter_address);
-
- // 3 elements array for v8::Agruments::values_, handler for name and pointer
- // to the values (it considered as smi in GC).
- const int kStackSpace = 5;
- const int kApiArgc = 2;
-
- __ PrepareCallApiFunction(kApiArgc, eax);
- __ mov(ApiParameterOperand(0), ebx); // name.
- __ add(Operand(ebx), Immediate(kPointerSize));
- __ mov(ApiParameterOperand(1), ebx); // arguments pointer.
-
- // Emitting a stub call may try to allocate (if the code is not
- // already generated). Do not allow the assembler to perform a
- // garbage collection but instead return the allocation failure
- // object.
- return masm()->TryCallApiFunctionAndReturn(&fun, kStackSpace);
-}
-
-
-void StubCompiler::GenerateLoadConstant(JSObject* object,
- JSObject* holder,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Object* value,
- String* name,
- Label* miss) {
- // Check that the receiver isn't a smi.
- __ test(receiver, Immediate(kSmiTagMask));
- __ j(zero, miss, not_taken);
-
- // Check that the maps haven't changed.
- CheckPrototypes(object, receiver, holder,
- scratch1, scratch2, scratch3, name, miss);
-
- // Return the constant value.
- __ mov(eax, Handle<Object>(value));
- __ ret(0);
-}
-
-
-void StubCompiler::GenerateLoadInterceptor(JSObject* object,
- JSObject* interceptor_holder,
- LookupResult* lookup,
- Register receiver,
- Register name_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- String* name,
- Label* miss) {
- ASSERT(interceptor_holder->HasNamedInterceptor());
- ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
-
- // Check that the receiver isn't a smi.
- __ test(receiver, Immediate(kSmiTagMask));
- __ j(zero, miss, not_taken);
-
- // So far the most popular follow ups for interceptor loads are FIELD
- // and CALLBACKS, so inline only them, other cases may be added
- // later.
- bool compile_followup_inline = false;
- if (lookup->IsProperty() && lookup->IsCacheable()) {
- if (lookup->type() == FIELD) {
- compile_followup_inline = true;
- } else if (lookup->type() == CALLBACKS &&
- lookup->GetCallbackObject()->IsAccessorInfo() &&
- AccessorInfo::cast(lookup->GetCallbackObject())->getter() != NULL) {
- compile_followup_inline = true;
- }
- }
-
- if (compile_followup_inline) {
- // Compile the interceptor call, followed by inline code to load the
- // property from further up the prototype chain if the call fails.
- // Check that the maps haven't changed.
- Register holder_reg = CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, scratch3,
- name, miss);
- ASSERT(holder_reg.is(receiver) || holder_reg.is(scratch1));
-
- // Save necessary data before invoking an interceptor.
- // Requires a frame to make GC aware of pushed pointers.
- __ EnterInternalFrame();
-
- if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
- // CALLBACKS case needs a receiver to be passed into C++ callback.
- __ push(receiver);
- }
- __ push(holder_reg);
- __ push(name_reg);
-
- // Invoke an interceptor. Note: map checks from receiver to
- // interceptor's holder has been compiled before (see a caller
- // of this method.)
- CompileCallLoadPropertyWithInterceptor(masm(),
- receiver,
- holder_reg,
- name_reg,
- interceptor_holder);
-
- // Check if interceptor provided a value for property. If it's
- // the case, return immediately.
- Label interceptor_failed;
- __ cmp(eax, factory()->no_interceptor_result_sentinel());
- __ j(equal, &interceptor_failed);
- __ LeaveInternalFrame();
- __ ret(0);
-
- __ bind(&interceptor_failed);
- __ pop(name_reg);
- __ pop(holder_reg);
- if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
- __ pop(receiver);
- }
-
- __ LeaveInternalFrame();
-
- // Check that the maps from interceptor's holder to lookup's holder
- // haven't changed. And load lookup's holder into holder_reg.
- if (interceptor_holder != lookup->holder()) {
- holder_reg = CheckPrototypes(interceptor_holder,
- holder_reg,
- lookup->holder(),
- scratch1,
- scratch2,
- scratch3,
- name,
- miss);
- }
-
- if (lookup->type() == FIELD) {
- // We found FIELD property in prototype chain of interceptor's holder.
- // Retrieve a field from field's holder.
- GenerateFastPropertyLoad(masm(), eax, holder_reg,
- lookup->holder(), lookup->GetFieldIndex());
- __ ret(0);
- } else {
- // We found CALLBACKS property in prototype chain of interceptor's
- // holder.
- ASSERT(lookup->type() == CALLBACKS);
- ASSERT(lookup->GetCallbackObject()->IsAccessorInfo());
- AccessorInfo* callback = AccessorInfo::cast(lookup->GetCallbackObject());
- ASSERT(callback != NULL);
- ASSERT(callback->getter() != NULL);
-
- // Tail call to runtime.
- // Important invariant in CALLBACKS case: the code above must be
- // structured to never clobber |receiver| register.
- __ pop(scratch2); // return address
- __ push(receiver);
- __ push(holder_reg);
- __ mov(holder_reg, Immediate(Handle<AccessorInfo>(callback)));
- __ push(FieldOperand(holder_reg, AccessorInfo::kDataOffset));
- __ push(holder_reg);
- __ push(name_reg);
- __ push(scratch2); // restore return address
-
- ExternalReference ref =
- ExternalReference(IC_Utility(IC::kLoadCallbackProperty),
- masm()->isolate());
- __ TailCallExternalReference(ref, 5, 1);
- }
- } else { // !compile_followup_inline
- // Call the runtime system to load the interceptor.
- // Check that the maps haven't changed.
- Register holder_reg =
- CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, scratch3, name, miss);
- __ pop(scratch2); // save old return address
- PushInterceptorArguments(masm(), receiver, holder_reg,
- name_reg, interceptor_holder);
- __ push(scratch2); // restore old return address
-
- ExternalReference ref =
- ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForLoad),
- isolate());
- __ TailCallExternalReference(ref, 5, 1);
- }
-}
-
-
-void CallStubCompiler::GenerateNameCheck(String* name, Label* miss) {
- if (kind_ == Code::KEYED_CALL_IC) {
- __ cmp(Operand(ecx), Immediate(Handle<String>(name)));
- __ j(not_equal, miss, not_taken);
- }
-}
-
-
-void CallStubCompiler::GenerateGlobalReceiverCheck(JSObject* object,
- JSObject* holder,
- String* name,
- Label* miss) {
- ASSERT(holder->IsGlobalObject());
-
- // Get the number of arguments.
- const int argc = arguments().immediate();
-
- // Get the receiver from the stack.
- __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
-
- // If the object is the holder then we know that it's a global
- // object which can only happen for contextual calls. In this case,
- // the receiver cannot be a smi.
- if (object != holder) {
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, miss, not_taken);
- }
-
- // Check that the maps haven't changed.
- CheckPrototypes(object, edx, holder, ebx, eax, edi, name, miss);
-}
-
-
-void CallStubCompiler::GenerateLoadFunctionFromCell(JSGlobalPropertyCell* cell,
- JSFunction* function,
- Label* miss) {
- // Get the value from the cell.
- if (Serializer::enabled()) {
- __ mov(edi, Immediate(Handle<JSGlobalPropertyCell>(cell)));
- __ mov(edi, FieldOperand(edi, JSGlobalPropertyCell::kValueOffset));
- } else {
- __ mov(edi, Operand::Cell(Handle<JSGlobalPropertyCell>(cell)));
- }
-
- // Check that the cell contains the same function.
- if (isolate()->heap()->InNewSpace(function)) {
- // We can't embed a pointer to a function in new space so we have
- // to verify that the shared function info is unchanged. This has
- // the nice side effect that multiple closures based on the same
- // function can all use this call IC. Before we load through the
- // function, we have to verify that it still is a function.
- __ test(edi, Immediate(kSmiTagMask));
- __ j(zero, miss, not_taken);
- __ CmpObjectType(edi, JS_FUNCTION_TYPE, ebx);
- __ j(not_equal, miss, not_taken);
-
- // Check the shared function info. Make sure it hasn't changed.
- __ cmp(FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset),
- Immediate(Handle<SharedFunctionInfo>(function->shared())));
- __ j(not_equal, miss, not_taken);
- } else {
- __ cmp(Operand(edi), Immediate(Handle<JSFunction>(function)));
- __ j(not_equal, miss, not_taken);
- }
-}
-
-
-MaybeObject* CallStubCompiler::GenerateMissBranch() {
- MaybeObject* maybe_obj =
- isolate()->stub_cache()->ComputeCallMiss(arguments().immediate(),
- kind_);
- Object* obj;
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- __ jmp(Handle<Code>(Code::cast(obj)), RelocInfo::CODE_TARGET);
- return obj;
-}
-
-
-MUST_USE_RESULT MaybeObject* CallStubCompiler::CompileCallField(
- JSObject* object,
- JSObject* holder,
- int index,
- String* name) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
- Label miss;
-
- GenerateNameCheck(name, &miss);
-
- // Get the receiver from the stack.
- const int argc = arguments().immediate();
- __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
-
- // Check that the receiver isn't a smi.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &miss, not_taken);
-
- // Do the right check and compute the holder register.
- Register reg = CheckPrototypes(object, edx, holder, ebx, eax, edi,
- name, &miss);
-
- GenerateFastPropertyLoad(masm(), edi, reg, holder, index);
-
- // Check that the function really is a function.
- __ test(edi, Immediate(kSmiTagMask));
- __ j(zero, &miss, not_taken);
- __ CmpObjectType(edi, JS_FUNCTION_TYPE, ebx);
- __ j(not_equal, &miss, not_taken);
-
- // Patch the receiver on the stack with the global proxy if
- // necessary.
- if (object->IsGlobalObject()) {
- __ mov(edx, FieldOperand(edx, GlobalObject::kGlobalReceiverOffset));
- __ mov(Operand(esp, (argc + 1) * kPointerSize), edx);
- }
-
- // Invoke the function.
- __ InvokeFunction(edi, arguments(), JUMP_FUNCTION);
-
- // Handle call cache miss.
- __ bind(&miss);
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
-
- // Return the generated code.
- return GetCode(FIELD, name);
-}
-
-
-MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
-
- // If object is not an array, bail out to regular call.
- if (!object->IsJSArray() || cell != NULL) {
- return isolate()->heap()->undefined_value();
- }
-
- Label miss;
-
- GenerateNameCheck(name, &miss);
-
- // Get the receiver from the stack.
- const int argc = arguments().immediate();
- __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
-
- // Check that the receiver isn't a smi.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &miss);
-
- CheckPrototypes(JSObject::cast(object), edx,
- holder, ebx,
- eax, edi, name, &miss);
-
- if (argc == 0) {
- // Noop, return the length.
- __ mov(eax, FieldOperand(edx, JSArray::kLengthOffset));
- __ ret((argc + 1) * kPointerSize);
- } else {
- Label call_builtin;
-
- // Get the elements array of the object.
- __ mov(ebx, FieldOperand(edx, JSArray::kElementsOffset));
-
- // Check that the elements are in fast mode and writable.
- __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
- Immediate(factory()->fixed_array_map()));
- __ j(not_equal, &call_builtin);
-
- if (argc == 1) { // Otherwise fall through to call builtin.
- Label exit, with_write_barrier, attempt_to_grow_elements;
-
- // Get the array's length into eax and calculate new length.
- __ mov(eax, FieldOperand(edx, JSArray::kLengthOffset));
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kSmiTag == 0);
- __ add(Operand(eax), Immediate(Smi::FromInt(argc)));
-
- // Get the element's length into ecx.
- __ mov(ecx, FieldOperand(ebx, FixedArray::kLengthOffset));
-
- // Check if we could survive without allocation.
- __ cmp(eax, Operand(ecx));
- __ j(greater, &attempt_to_grow_elements);
-
- // Save new length.
- __ mov(FieldOperand(edx, JSArray::kLengthOffset), eax);
-
- // Push the element.
- __ lea(edx, FieldOperand(ebx,
- eax, times_half_pointer_size,
- FixedArray::kHeaderSize - argc * kPointerSize));
- __ mov(ecx, Operand(esp, argc * kPointerSize));
- __ mov(Operand(edx, 0), ecx);
-
- // Check if value is a smi.
- __ test(ecx, Immediate(kSmiTagMask));
- __ j(not_zero, &with_write_barrier);
-
- __ bind(&exit);
- __ ret((argc + 1) * kPointerSize);
-
- __ bind(&with_write_barrier);
-
- __ InNewSpace(ebx, ecx, equal, &exit);
-
- __ RecordWriteHelper(ebx, edx, ecx);
- __ ret((argc + 1) * kPointerSize);
-
- __ bind(&attempt_to_grow_elements);
- if (!FLAG_inline_new) {
- __ jmp(&call_builtin);
- }
-
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate());
- ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address(isolate());
-
- const int kAllocationDelta = 4;
- // Load top.
- __ mov(ecx, Operand::StaticVariable(new_space_allocation_top));
-
- // Check if it's the end of elements.
- __ lea(edx, FieldOperand(ebx,
- eax, times_half_pointer_size,
- FixedArray::kHeaderSize - argc * kPointerSize));
- __ cmp(edx, Operand(ecx));
- __ j(not_equal, &call_builtin);
- __ add(Operand(ecx), Immediate(kAllocationDelta * kPointerSize));
- __ cmp(ecx, Operand::StaticVariable(new_space_allocation_limit));
- __ j(above, &call_builtin);
-
- // We fit and could grow elements.
- __ mov(Operand::StaticVariable(new_space_allocation_top), ecx);
- __ mov(ecx, Operand(esp, argc * kPointerSize));
-
- // Push the argument...
- __ mov(Operand(edx, 0), ecx);
- // ... and fill the rest with holes.
- for (int i = 1; i < kAllocationDelta; i++) {
- __ mov(Operand(edx, i * kPointerSize),
- Immediate(factory()->the_hole_value()));
- }
-
- // Restore receiver to edx as finish sequence assumes it's here.
- __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
-
- // Increment element's and array's sizes.
- __ add(FieldOperand(ebx, FixedArray::kLengthOffset),
- Immediate(Smi::FromInt(kAllocationDelta)));
- __ mov(FieldOperand(edx, JSArray::kLengthOffset), eax);
-
- // Elements are in new space, so write barrier is not required.
- __ ret((argc + 1) * kPointerSize);
- }
-
- __ bind(&call_builtin);
- __ TailCallExternalReference(
- ExternalReference(Builtins::c_ArrayPush, isolate()),
- argc + 1,
- 1);
- }
-
- __ bind(&miss);
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
-
- // Return the generated code.
- return GetCode(function);
-}
-
-
-MaybeObject* CallStubCompiler::CompileArrayPopCall(Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
-
- // If object is not an array, bail out to regular call.
- if (!object->IsJSArray() || cell != NULL) {
- return heap()->undefined_value();
- }
-
- Label miss, return_undefined, call_builtin;
-
- GenerateNameCheck(name, &miss);
-
- // Get the receiver from the stack.
- const int argc = arguments().immediate();
- __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
-
- // Check that the receiver isn't a smi.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &miss);
- CheckPrototypes(JSObject::cast(object), edx,
- holder, ebx,
- eax, edi, name, &miss);
-
- // Get the elements array of the object.
- __ mov(ebx, FieldOperand(edx, JSArray::kElementsOffset));
-
- // Check that the elements are in fast mode and writable.
- __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
- Immediate(factory()->fixed_array_map()));
- __ j(not_equal, &call_builtin);
-
- // Get the array's length into ecx and calculate new length.
- __ mov(ecx, FieldOperand(edx, JSArray::kLengthOffset));
- __ sub(Operand(ecx), Immediate(Smi::FromInt(1)));
- __ j(negative, &return_undefined);
-
- // Get the last element.
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kSmiTag == 0);
- __ mov(eax, FieldOperand(ebx,
- ecx, times_half_pointer_size,
- FixedArray::kHeaderSize));
- __ cmp(Operand(eax), Immediate(factory()->the_hole_value()));
- __ j(equal, &call_builtin);
-
- // Set the array's length.
- __ mov(FieldOperand(edx, JSArray::kLengthOffset), ecx);
-
- // Fill with the hole.
- __ mov(FieldOperand(ebx,
- ecx, times_half_pointer_size,
- FixedArray::kHeaderSize),
- Immediate(factory()->the_hole_value()));
- __ ret((argc + 1) * kPointerSize);
-
- __ bind(&return_undefined);
- __ mov(eax, Immediate(factory()->undefined_value()));
- __ ret((argc + 1) * kPointerSize);
-
- __ bind(&call_builtin);
- __ TailCallExternalReference(
- ExternalReference(Builtins::c_ArrayPop, isolate()),
- argc + 1,
- 1);
-
- __ bind(&miss);
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
-
- // Return the generated code.
- return GetCode(function);
-}
-
-
-MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
- Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
- // ----------- S t a t e -------------
- // -- ecx : function name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
-
- // If object is not a string, bail out to regular call.
- if (!object->IsString() || cell != NULL) {
- return isolate()->heap()->undefined_value();
- }
-
- const int argc = arguments().immediate();
-
- Label miss;
- Label name_miss;
- Label index_out_of_range;
- Label* index_out_of_range_label = &index_out_of_range;
-
- if (kind_ == Code::CALL_IC && extra_ic_state_ == DEFAULT_STRING_STUB) {
- index_out_of_range_label = &miss;
- }
-
- GenerateNameCheck(name, &name_miss);
-
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(masm(),
- Context::STRING_FUNCTION_INDEX,
- eax,
- &miss);
- ASSERT(object != holder);
- CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
- ebx, edx, edi, name, &miss);
-
- Register receiver = ebx;
- Register index = edi;
- Register scratch = edx;
- Register result = eax;
- __ mov(receiver, Operand(esp, (argc + 1) * kPointerSize));
- if (argc > 0) {
- __ mov(index, Operand(esp, (argc - 0) * kPointerSize));
- } else {
- __ Set(index, Immediate(factory()->undefined_value()));
- }
-
- StringCharCodeAtGenerator char_code_at_generator(receiver,
- index,
- scratch,
- result,
- &miss, // When not a string.
- &miss, // When not a number.
- index_out_of_range_label,
- STRING_INDEX_IS_NUMBER);
- char_code_at_generator.GenerateFast(masm());
- __ ret((argc + 1) * kPointerSize);
-
- StubRuntimeCallHelper call_helper;
- char_code_at_generator.GenerateSlow(masm(), call_helper);
-
- if (index_out_of_range.is_linked()) {
- __ bind(&index_out_of_range);
- __ Set(eax, Immediate(factory()->nan_value()));
- __ ret((argc + 1) * kPointerSize);
- }
-
- __ bind(&miss);
- // Restore function name in ecx.
- __ Set(ecx, Immediate(Handle<String>(name)));
- __ bind(&name_miss);
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
-
- // Return the generated code.
- return GetCode(function);
-}
-
-
-MaybeObject* CallStubCompiler::CompileStringCharAtCall(
- Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
- // ----------- S t a t e -------------
- // -- ecx : function name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
-
- // If object is not a string, bail out to regular call.
- if (!object->IsString() || cell != NULL) {
- return heap()->undefined_value();
- }
-
- const int argc = arguments().immediate();
-
- Label miss;
- Label name_miss;
- Label index_out_of_range;
- Label* index_out_of_range_label = &index_out_of_range;
-
- if (kind_ == Code::CALL_IC && extra_ic_state_ == DEFAULT_STRING_STUB) {
- index_out_of_range_label = &miss;
- }
-
- GenerateNameCheck(name, &name_miss);
-
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(masm(),
- Context::STRING_FUNCTION_INDEX,
- eax,
- &miss);
- ASSERT(object != holder);
- CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
- ebx, edx, edi, name, &miss);
-
- Register receiver = eax;
- Register index = edi;
- Register scratch1 = ebx;
- Register scratch2 = edx;
- Register result = eax;
- __ mov(receiver, Operand(esp, (argc + 1) * kPointerSize));
- if (argc > 0) {
- __ mov(index, Operand(esp, (argc - 0) * kPointerSize));
- } else {
- __ Set(index, Immediate(factory()->undefined_value()));
- }
-
- StringCharAtGenerator char_at_generator(receiver,
- index,
- scratch1,
- scratch2,
- result,
- &miss, // When not a string.
- &miss, // When not a number.
- index_out_of_range_label,
- STRING_INDEX_IS_NUMBER);
- char_at_generator.GenerateFast(masm());
- __ ret((argc + 1) * kPointerSize);
-
- StubRuntimeCallHelper call_helper;
- char_at_generator.GenerateSlow(masm(), call_helper);
-
- if (index_out_of_range.is_linked()) {
- __ bind(&index_out_of_range);
- __ Set(eax, Immediate(factory()->empty_string()));
- __ ret((argc + 1) * kPointerSize);
- }
-
- __ bind(&miss);
- // Restore function name in ecx.
- __ Set(ecx, Immediate(Handle<String>(name)));
- __ bind(&name_miss);
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
-
- // Return the generated code.
- return GetCode(function);
-}
-
-
-MaybeObject* CallStubCompiler::CompileStringFromCharCodeCall(
- Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
- // ----------- S t a t e -------------
- // -- ecx : function name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
-
- const int argc = arguments().immediate();
-
- // If the object is not a JSObject or we got an unexpected number of
- // arguments, bail out to the regular call.
- if (!object->IsJSObject() || argc != 1) {
- return isolate()->heap()->undefined_value();
- }
-
- Label miss;
- GenerateNameCheck(name, &miss);
-
- if (cell == NULL) {
- __ mov(edx, Operand(esp, 2 * kPointerSize));
-
- STATIC_ASSERT(kSmiTag == 0);
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &miss);
-
- CheckPrototypes(JSObject::cast(object), edx, holder, ebx, eax, edi, name,
- &miss);
- } else {
- ASSERT(cell->value() == function);
- GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
- GenerateLoadFunctionFromCell(cell, function, &miss);
- }
-
- // Load the char code argument.
- Register code = ebx;
- __ mov(code, Operand(esp, 1 * kPointerSize));
-
- // Check the code is a smi.
- Label slow;
- STATIC_ASSERT(kSmiTag == 0);
- __ test(code, Immediate(kSmiTagMask));
- __ j(not_zero, &slow);
-
- // Convert the smi code to uint16.
- __ and_(code, Immediate(Smi::FromInt(0xffff)));
-
- StringCharFromCodeGenerator char_from_code_generator(code, eax);
- char_from_code_generator.GenerateFast(masm());
- __ ret(2 * kPointerSize);
-
- StubRuntimeCallHelper call_helper;
- char_from_code_generator.GenerateSlow(masm(), call_helper);
-
- // Tail call the full function. We do not have to patch the receiver
- // because the function makes no use of it.
- __ bind(&slow);
- __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
-
- __ bind(&miss);
- // ecx: function name.
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
-
- // Return the generated code.
- return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
-}
-
-
-MaybeObject* CallStubCompiler::CompileMathFloorCall(Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
-
- if (!CpuFeatures::IsSupported(SSE2)) {
- return isolate()->heap()->undefined_value();
- }
-
- CpuFeatures::Scope use_sse2(SSE2);
-
- const int argc = arguments().immediate();
-
- // If the object is not a JSObject or we got an unexpected number of
- // arguments, bail out to the regular call.
- if (!object->IsJSObject() || argc != 1) {
- return isolate()->heap()->undefined_value();
- }
-
- Label miss;
- GenerateNameCheck(name, &miss);
-
- if (cell == NULL) {
- __ mov(edx, Operand(esp, 2 * kPointerSize));
-
- STATIC_ASSERT(kSmiTag == 0);
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &miss);
-
- CheckPrototypes(JSObject::cast(object), edx, holder, ebx, eax, edi, name,
- &miss);
- } else {
- ASSERT(cell->value() == function);
- GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
- GenerateLoadFunctionFromCell(cell, function, &miss);
- }
-
- // Load the (only) argument into eax.
- __ mov(eax, Operand(esp, 1 * kPointerSize));
-
- // Check if the argument is a smi.
- Label smi;
- STATIC_ASSERT(kSmiTag == 0);
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &smi);
-
- // Check if the argument is a heap number and load its value into xmm0.
- Label slow;
- __ CheckMap(eax, factory()->heap_number_map(), &slow, true);
- __ movdbl(xmm0, FieldOperand(eax, HeapNumber::kValueOffset));
-
- // Check if the argument is strictly positive. Note this also
- // discards NaN.
- __ xorpd(xmm1, xmm1);
- __ ucomisd(xmm0, xmm1);
- __ j(below_equal, &slow);
-
- // Do a truncating conversion.
- __ cvttsd2si(eax, Operand(xmm0));
-
- // Check if the result fits into a smi. Note this also checks for
- // 0x80000000 which signals a failed conversion.
- Label wont_fit_into_smi;
- __ test(eax, Immediate(0xc0000000));
- __ j(not_zero, &wont_fit_into_smi);
-
- // Smi tag and return.
- __ SmiTag(eax);
- __ bind(&smi);
- __ ret(2 * kPointerSize);
-
- // Check if the argument is < 2^kMantissaBits.
- Label already_round;
- __ bind(&wont_fit_into_smi);
- __ LoadPowerOf2(xmm1, ebx, HeapNumber::kMantissaBits);
- __ ucomisd(xmm0, xmm1);
- __ j(above_equal, &already_round);
-
- // Save a copy of the argument.
- __ movaps(xmm2, xmm0);
-
- // Compute (argument + 2^kMantissaBits) - 2^kMantissaBits.
- __ addsd(xmm0, xmm1);
- __ subsd(xmm0, xmm1);
-
- // Compare the argument and the tentative result to get the right mask:
- // if xmm2 < xmm0:
- // xmm2 = 1...1
- // else:
- // xmm2 = 0...0
- __ cmpltsd(xmm2, xmm0);
-
- // Subtract 1 if the argument was less than the tentative result.
- __ LoadPowerOf2(xmm1, ebx, 0);
- __ andpd(xmm1, xmm2);
- __ subsd(xmm0, xmm1);
-
- // Return a new heap number.
- __ AllocateHeapNumber(eax, ebx, edx, &slow);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- __ ret(2 * kPointerSize);
-
- // Return the argument (when it's an already round heap number).
- __ bind(&already_round);
- __ mov(eax, Operand(esp, 1 * kPointerSize));
- __ ret(2 * kPointerSize);
-
- // Tail call the full function. We do not have to patch the receiver
- // because the function makes no use of it.
- __ bind(&slow);
- __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
-
- __ bind(&miss);
- // ecx: function name.
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
-
- // Return the generated code.
- return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
-}
-
-
-MaybeObject* CallStubCompiler::CompileMathAbsCall(Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
-
- const int argc = arguments().immediate();
-
- // If the object is not a JSObject or we got an unexpected number of
- // arguments, bail out to the regular call.
- if (!object->IsJSObject() || argc != 1) {
- return isolate()->heap()->undefined_value();
- }
-
- Label miss;
- GenerateNameCheck(name, &miss);
-
- if (cell == NULL) {
- __ mov(edx, Operand(esp, 2 * kPointerSize));
-
- STATIC_ASSERT(kSmiTag == 0);
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &miss);
-
- CheckPrototypes(JSObject::cast(object), edx, holder, ebx, eax, edi, name,
- &miss);
- } else {
- ASSERT(cell->value() == function);
- GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
- GenerateLoadFunctionFromCell(cell, function, &miss);
- }
-
- // Load the (only) argument into eax.
- __ mov(eax, Operand(esp, 1 * kPointerSize));
-
- // Check if the argument is a smi.
- Label not_smi;
- STATIC_ASSERT(kSmiTag == 0);
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &not_smi);
-
- // Set ebx to 1...1 (== -1) if the argument is negative, or to 0...0
- // otherwise.
- __ mov(ebx, eax);
- __ sar(ebx, kBitsPerInt - 1);
-
- // Do bitwise not or do nothing depending on ebx.
- __ xor_(eax, Operand(ebx));
-
- // Add 1 or do nothing depending on ebx.
- __ sub(eax, Operand(ebx));
-
- // If the result is still negative, go to the slow case.
- // This only happens for the most negative smi.
- Label slow;
- __ j(negative, &slow);
-
- // Smi case done.
- __ ret(2 * kPointerSize);
-
- // Check if the argument is a heap number and load its exponent and
- // sign into ebx.
- __ bind(&not_smi);
- __ CheckMap(eax, factory()->heap_number_map(), &slow, true);
- __ mov(ebx, FieldOperand(eax, HeapNumber::kExponentOffset));
-
- // Check the sign of the argument. If the argument is positive,
- // just return it.
- Label negative_sign;
- __ test(ebx, Immediate(HeapNumber::kSignMask));
- __ j(not_zero, &negative_sign);
- __ ret(2 * kPointerSize);
-
- // If the argument is negative, clear the sign, and return a new
- // number.
- __ bind(&negative_sign);
- __ and_(ebx, ~HeapNumber::kSignMask);
- __ mov(ecx, FieldOperand(eax, HeapNumber::kMantissaOffset));
- __ AllocateHeapNumber(eax, edi, edx, &slow);
- __ mov(FieldOperand(eax, HeapNumber::kExponentOffset), ebx);
- __ mov(FieldOperand(eax, HeapNumber::kMantissaOffset), ecx);
- __ ret(2 * kPointerSize);
-
- // Tail call the full function. We do not have to patch the receiver
- // because the function makes no use of it.
- __ bind(&slow);
- __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
-
- __ bind(&miss);
- // ecx: function name.
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
-
- // Return the generated code.
- return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
-}
-
-
-MaybeObject* CallStubCompiler::CompileFastApiCall(
- const CallOptimization& optimization,
- Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
- ASSERT(optimization.is_simple_api_call());
- // Bail out if object is a global object as we don't want to
- // repatch it to global receiver.
- if (object->IsGlobalObject()) return heap()->undefined_value();
- if (cell != NULL) return heap()->undefined_value();
- int depth = optimization.GetPrototypeDepthOfExpectedType(
- JSObject::cast(object), holder);
- if (depth == kInvalidProtoDepth) return heap()->undefined_value();
-
- Label miss, miss_before_stack_reserved;
-
- GenerateNameCheck(name, &miss_before_stack_reserved);
-
- // Get the receiver from the stack.
- const int argc = arguments().immediate();
- __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
-
- // Check that the receiver isn't a smi.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &miss_before_stack_reserved, not_taken);
-
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->call_const(), 1);
- __ IncrementCounter(counters->call_const_fast_api(), 1);
-
- // Allocate space for v8::Arguments implicit values. Must be initialized
- // before calling any runtime function.
- __ sub(Operand(esp), Immediate(kFastApiCallArguments * kPointerSize));
-
- // Check that the maps haven't changed and find a Holder as a side effect.
- CheckPrototypes(JSObject::cast(object), edx, holder,
- ebx, eax, edi, name, depth, &miss);
-
- // Move the return address on top of the stack.
- __ mov(eax, Operand(esp, 3 * kPointerSize));
- __ mov(Operand(esp, 0 * kPointerSize), eax);
-
- // esp[2 * kPointerSize] is uninitialized, esp[3 * kPointerSize] contains
- // duplicate of return address and will be overwritten.
- MaybeObject* result = GenerateFastApiCall(masm(), optimization, argc);
- if (result->IsFailure()) return result;
-
- __ bind(&miss);
- __ add(Operand(esp), Immediate(kFastApiCallArguments * kPointerSize));
-
- __ bind(&miss_before_stack_reserved);
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
-
- // Return the generated code.
- return GetCode(function);
-}
-
-
-MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
- JSObject* holder,
- JSFunction* function,
- String* name,
- CheckType check) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
-
- if (HasCustomCallGenerator(function)) {
- MaybeObject* maybe_result = CompileCustomCall(
- object, holder, NULL, function, name);
- Object* result;
- if (!maybe_result->ToObject(&result)) return maybe_result;
- // undefined means bail out to regular compiler.
- if (!result->IsUndefined()) return result;
- }
-
- Label miss;
-
- GenerateNameCheck(name, &miss);
-
- // Get the receiver from the stack.
- const int argc = arguments().immediate();
- __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
-
- // Check that the receiver isn't a smi.
- if (check != NUMBER_CHECK) {
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &miss, not_taken);
- }
-
- // Make sure that it's okay not to patch the on stack receiver
- // unless we're doing a receiver map check.
- ASSERT(!object->IsGlobalObject() || check == RECEIVER_MAP_CHECK);
-
- SharedFunctionInfo* function_info = function->shared();
- switch (check) {
- case RECEIVER_MAP_CHECK:
- __ IncrementCounter(isolate()->counters()->call_const(), 1);
-
- // Check that the maps haven't changed.
- CheckPrototypes(JSObject::cast(object), edx, holder,
- ebx, eax, edi, name, &miss);
-
- // Patch the receiver on the stack with the global proxy if
- // necessary.
- if (object->IsGlobalObject()) {
- __ mov(edx, FieldOperand(edx, GlobalObject::kGlobalReceiverOffset));
- __ mov(Operand(esp, (argc + 1) * kPointerSize), edx);
- }
- break;
-
- case STRING_CHECK:
- if (!function->IsBuiltin() && !function_info->strict_mode()) {
- // Calling non-strict non-builtins with a value as the receiver
- // requires boxing.
- __ jmp(&miss);
- } else {
- // Check that the object is a string or a symbol.
- __ CmpObjectType(edx, FIRST_NONSTRING_TYPE, eax);
- __ j(above_equal, &miss, not_taken);
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::STRING_FUNCTION_INDEX, eax, &miss);
- CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
- ebx, edx, edi, name, &miss);
- }
- break;
-
- case NUMBER_CHECK: {
- if (!function->IsBuiltin() && !function_info->strict_mode()) {
- // Calling non-strict non-builtins with a value as the receiver
- // requires boxing.
- __ jmp(&miss);
- } else {
- Label fast;
- // Check that the object is a smi or a heap number.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &fast, taken);
- __ CmpObjectType(edx, HEAP_NUMBER_TYPE, eax);
- __ j(not_equal, &miss, not_taken);
- __ bind(&fast);
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::NUMBER_FUNCTION_INDEX, eax, &miss);
- CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
- ebx, edx, edi, name, &miss);
- }
- break;
- }
-
- case BOOLEAN_CHECK: {
- if (!function->IsBuiltin() && !function_info->strict_mode()) {
- // Calling non-strict non-builtins with a value as the receiver
- // requires boxing.
- __ jmp(&miss);
- } else {
- Label fast;
- // Check that the object is a boolean.
- __ cmp(edx, factory()->true_value());
- __ j(equal, &fast, taken);
- __ cmp(edx, factory()->false_value());
- __ j(not_equal, &miss, not_taken);
- __ bind(&fast);
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::BOOLEAN_FUNCTION_INDEX, eax, &miss);
- CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
- ebx, edx, edi, name, &miss);
- }
- break;
- }
-
- default:
- UNREACHABLE();
- }
-
- __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
-
- // Handle call cache miss.
- __ bind(&miss);
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
-
- // Return the generated code.
- return GetCode(function);
-}
-
-
-MaybeObject* CallStubCompiler::CompileCallInterceptor(JSObject* object,
- JSObject* holder,
- String* name) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
- Label miss;
-
- GenerateNameCheck(name, &miss);
-
- // Get the number of arguments.
- const int argc = arguments().immediate();
-
- LookupResult lookup;
- LookupPostInterceptor(holder, name, &lookup);
-
- // Get the receiver from the stack.
- __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
-
- CallInterceptorCompiler compiler(this, arguments(), ecx);
- MaybeObject* result = compiler.Compile(masm(),
- object,
- holder,
- name,
- &lookup,
- edx,
- ebx,
- edi,
- eax,
- &miss);
- if (result->IsFailure()) return result;
-
- // Restore receiver.
- __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
-
- // Check that the function really is a function.
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &miss, not_taken);
- __ CmpObjectType(eax, JS_FUNCTION_TYPE, ebx);
- __ j(not_equal, &miss, not_taken);
-
- // Patch the receiver on the stack with the global proxy if
- // necessary.
- if (object->IsGlobalObject()) {
- __ mov(edx, FieldOperand(edx, GlobalObject::kGlobalReceiverOffset));
- __ mov(Operand(esp, (argc + 1) * kPointerSize), edx);
- }
-
- // Invoke the function.
- __ mov(edi, eax);
- __ InvokeFunction(edi, arguments(), JUMP_FUNCTION);
-
- // Handle load cache miss.
- __ bind(&miss);
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
-
- // Return the generated code.
- return GetCode(INTERCEPTOR, name);
-}
-
-
-MaybeObject* CallStubCompiler::CompileCallGlobal(JSObject* object,
- GlobalObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
-
- if (HasCustomCallGenerator(function)) {
- MaybeObject* maybe_result = CompileCustomCall(
- object, holder, cell, function, name);
- Object* result;
- if (!maybe_result->ToObject(&result)) return maybe_result;
- // undefined means bail out to regular compiler.
- if (!result->IsUndefined()) return result;
- }
-
- Label miss;
-
- GenerateNameCheck(name, &miss);
-
- // Get the number of arguments.
- const int argc = arguments().immediate();
-
- GenerateGlobalReceiverCheck(object, holder, name, &miss);
-
- GenerateLoadFunctionFromCell(cell, function, &miss);
-
- // Patch the receiver on the stack with the global proxy.
- if (object->IsGlobalObject()) {
- __ mov(edx, FieldOperand(edx, GlobalObject::kGlobalReceiverOffset));
- __ mov(Operand(esp, (argc + 1) * kPointerSize), edx);
- }
-
- // Setup the context (function already in edi).
- __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
-
- // Jump to the cached code (tail call).
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->call_global_inline(), 1);
- ASSERT(function->is_compiled());
- ParameterCount expected(function->shared()->formal_parameter_count());
- if (V8::UseCrankshaft()) {
- // TODO(kasperl): For now, we always call indirectly through the
- // code field in the function to allow recompilation to take effect
- // without changing any of the call sites.
- __ InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
- expected, arguments(), JUMP_FUNCTION);
- } else {
- Handle<Code> code(function->code());
- __ InvokeCode(code, expected, arguments(),
- RelocInfo::CODE_TARGET, JUMP_FUNCTION);
- }
-
- // Handle call cache miss.
- __ bind(&miss);
- __ IncrementCounter(counters->call_global_inline_miss(), 1);
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
-
- // Return the generated code.
- return GetCode(NORMAL, name);
-}
-
-
-MaybeObject* StoreStubCompiler::CompileStoreField(JSObject* object,
- int index,
- Map* transition,
- String* name) {
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : name
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss;
-
- // Generate store field code. Trashes the name register.
- GenerateStoreField(masm(),
- object,
- index,
- transition,
- edx, ecx, ebx,
- &miss);
-
- // Handle store cache miss.
- __ bind(&miss);
- __ mov(ecx, Immediate(Handle<String>(name))); // restore name
- Handle<Code> ic = isolate()->builtins()->StoreIC_Miss();
- __ jmp(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(transition == NULL ? FIELD : MAP_TRANSITION, name);
-}
-
-
-MaybeObject* StoreStubCompiler::CompileStoreCallback(JSObject* object,
- AccessorInfo* callback,
- String* name) {
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : name
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss;
-
- // Check that the object isn't a smi.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &miss, not_taken);
-
- // Check that the map of the object hasn't changed.
- __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
- Immediate(Handle<Map>(object->map())));
- __ j(not_equal, &miss, not_taken);
-
- // Perform global security token check if needed.
- if (object->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(edx, ebx, &miss);
- }
-
- // Stub never generated for non-global objects that require access
- // checks.
- ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
-
- __ pop(ebx); // remove the return address
- __ push(edx); // receiver
- __ push(Immediate(Handle<AccessorInfo>(callback))); // callback info
- __ push(ecx); // name
- __ push(eax); // value
- __ push(ebx); // restore return address
-
- // Do tail-call to the runtime system.
- ExternalReference store_callback_property =
- ExternalReference(IC_Utility(IC::kStoreCallbackProperty), isolate());
- __ TailCallExternalReference(store_callback_property, 4, 1);
-
- // Handle store cache miss.
- __ bind(&miss);
- Handle<Code> ic = isolate()->builtins()->StoreIC_Miss();
- __ jmp(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(CALLBACKS, name);
-}
-
-
-MaybeObject* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
- String* name) {
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : name
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss;
-
- // Check that the object isn't a smi.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &miss, not_taken);
-
- // Check that the map of the object hasn't changed.
- __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
- Immediate(Handle<Map>(receiver->map())));
- __ j(not_equal, &miss, not_taken);
-
- // Perform global security token check if needed.
- if (receiver->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(edx, ebx, &miss);
- }
-
- // Stub never generated for non-global objects that require access
- // checks.
- ASSERT(receiver->IsJSGlobalProxy() || !receiver->IsAccessCheckNeeded());
-
- __ pop(ebx); // remove the return address
- __ push(edx); // receiver
- __ push(ecx); // name
- __ push(eax); // value
- __ push(Immediate(Smi::FromInt(strict_mode_)));
- __ push(ebx); // restore return address
-
- // Do tail-call to the runtime system.
- ExternalReference store_ic_property =
- ExternalReference(IC_Utility(IC::kStoreInterceptorProperty), isolate());
- __ TailCallExternalReference(store_ic_property, 4, 1);
-
- // Handle store cache miss.
- __ bind(&miss);
- Handle<Code> ic = isolate()->builtins()->StoreIC_Miss();
- __ jmp(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(INTERCEPTOR, name);
-}
-
-
-MaybeObject* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
- JSGlobalPropertyCell* cell,
- String* name) {
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : name
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss;
-
- // Check that the map of the global has not changed.
- __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
- Immediate(Handle<Map>(object->map())));
- __ j(not_equal, &miss, not_taken);
-
-
- // Compute the cell operand to use.
- Operand cell_operand = Operand::Cell(Handle<JSGlobalPropertyCell>(cell));
- if (Serializer::enabled()) {
- __ mov(ebx, Immediate(Handle<JSGlobalPropertyCell>(cell)));
- cell_operand = FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset);
- }
-
- // Check that the value in the cell is not the hole. If it is, this
- // cell could have been deleted and reintroducing the global needs
- // to update the property details in the property dictionary of the
- // global object. We bail out to the runtime system to do that.
- __ cmp(cell_operand, factory()->the_hole_value());
- __ j(equal, &miss);
-
- // Store the value in the cell.
- __ mov(cell_operand, eax);
-
- // Return the value (register eax).
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->named_store_global_inline(), 1);
- __ ret(0);
-
- // Handle store cache miss.
- __ bind(&miss);
- __ IncrementCounter(counters->named_store_global_inline_miss(), 1);
- Handle<Code> ic = isolate()->builtins()->StoreIC_Miss();
- __ jmp(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(NORMAL, name);
-}
-
-
-MaybeObject* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
- int index,
- Map* transition,
- String* name) {
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss;
-
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->keyed_store_field(), 1);
-
- // Check that the name has not changed.
- __ cmp(Operand(ecx), Immediate(Handle<String>(name)));
- __ j(not_equal, &miss, not_taken);
-
- // Generate store field code. Trashes the name register.
- GenerateStoreField(masm(),
- object,
- index,
- transition,
- edx, ecx, ebx,
- &miss);
-
- // Handle store cache miss.
- __ bind(&miss);
- __ DecrementCounter(counters->keyed_store_field(), 1);
- Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss();
- __ jmp(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(transition == NULL ? FIELD : MAP_TRANSITION, name);
-}
-
-
-MaybeObject* KeyedStoreStubCompiler::CompileStoreSpecialized(
- JSObject* receiver) {
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss;
-
- // Check that the receiver isn't a smi.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &miss, not_taken);
-
- // Check that the map matches.
- __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
- Immediate(Handle<Map>(receiver->map())));
- __ j(not_equal, &miss, not_taken);
-
- // Check that the key is a smi.
- __ test(ecx, Immediate(kSmiTagMask));
- __ j(not_zero, &miss, not_taken);
-
- // Get the elements array and make sure it is a fast element array, not 'cow'.
- __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
- __ cmp(FieldOperand(edi, HeapObject::kMapOffset),
- Immediate(factory()->fixed_array_map()));
- __ j(not_equal, &miss, not_taken);
-
- // Check that the key is within bounds.
- if (receiver->IsJSArray()) {
- __ cmp(ecx, FieldOperand(edx, JSArray::kLengthOffset)); // Compare smis.
- __ j(above_equal, &miss, not_taken);
- } else {
- __ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset)); // Compare smis.
- __ j(above_equal, &miss, not_taken);
- }
-
- // Do the store and update the write barrier. Make sure to preserve
- // the value in register eax.
- __ mov(edx, Operand(eax));
- __ mov(FieldOperand(edi, ecx, times_2, FixedArray::kHeaderSize), eax);
- __ RecordWrite(edi, 0, edx, ecx);
-
- // Done.
- __ ret(0);
-
- // Handle store cache miss.
- __ bind(&miss);
- Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss();
- __ jmp(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(NORMAL, NULL);
-}
-
-
-MaybeObject* LoadStubCompiler::CompileLoadNonexistent(String* name,
- JSObject* object,
- JSObject* last) {
- // ----------- S t a t e -------------
- // -- eax : receiver
- // -- ecx : name
- // -- esp[0] : return address
- // -----------------------------------
- Label miss;
-
- // Check that the receiver isn't a smi.
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &miss, not_taken);
-
- ASSERT(last->IsGlobalObject() || last->HasFastProperties());
-
- // Check the maps of the full prototype chain. Also check that
- // global property cells up to (but not including) the last object
- // in the prototype chain are empty.
- CheckPrototypes(object, eax, last, ebx, edx, edi, name, &miss);
-
- // If the last object in the prototype chain is a global object,
- // check that the global property cell is empty.
- if (last->IsGlobalObject()) {
- MaybeObject* cell = GenerateCheckPropertyCell(masm(),
- GlobalObject::cast(last),
- name,
- edx,
- &miss);
- if (cell->IsFailure()) {
- miss.Unuse();
- return cell;
- }
- }
-
- // Return undefined if maps of the full prototype chain are still the
- // same and no global property with this name contains a value.
- __ mov(eax, isolate()->factory()->undefined_value());
- __ ret(0);
-
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::LOAD_IC);
-
- // Return the generated code.
- return GetCode(NONEXISTENT, isolate()->heap()->empty_string());
-}
-
-
-MaybeObject* LoadStubCompiler::CompileLoadField(JSObject* object,
- JSObject* holder,
- int index,
- String* name) {
- // ----------- S t a t e -------------
- // -- eax : receiver
- // -- ecx : name
- // -- esp[0] : return address
- // -----------------------------------
- Label miss;
-
- GenerateLoadField(object, holder, eax, ebx, edx, edi, index, name, &miss);
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::LOAD_IC);
-
- // Return the generated code.
- return GetCode(FIELD, name);
-}
-
-
-MaybeObject* LoadStubCompiler::CompileLoadCallback(String* name,
- JSObject* object,
- JSObject* holder,
- AccessorInfo* callback) {
- // ----------- S t a t e -------------
- // -- eax : receiver
- // -- ecx : name
- // -- esp[0] : return address
- // -----------------------------------
- Label miss;
-
- MaybeObject* result = GenerateLoadCallback(object, holder, eax, ecx, ebx, edx,
- edi, callback, name, &miss);
- if (result->IsFailure()) {
- miss.Unuse();
- return result;
- }
-
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::LOAD_IC);
-
- // Return the generated code.
- return GetCode(CALLBACKS, name);
-}
-
-
-MaybeObject* LoadStubCompiler::CompileLoadConstant(JSObject* object,
- JSObject* holder,
- Object* value,
- String* name) {
- // ----------- S t a t e -------------
- // -- eax : receiver
- // -- ecx : name
- // -- esp[0] : return address
- // -----------------------------------
- Label miss;
-
- GenerateLoadConstant(object, holder, eax, ebx, edx, edi, value, name, &miss);
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::LOAD_IC);
-
- // Return the generated code.
- return GetCode(CONSTANT_FUNCTION, name);
-}
-
-
-MaybeObject* LoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
- JSObject* holder,
- String* name) {
- // ----------- S t a t e -------------
- // -- eax : receiver
- // -- ecx : name
- // -- esp[0] : return address
- // -----------------------------------
- Label miss;
-
- LookupResult lookup;
- LookupPostInterceptor(holder, name, &lookup);
-
- // TODO(368): Compile in the whole chain: all the interceptors in
- // prototypes and ultimate answer.
- GenerateLoadInterceptor(receiver,
- holder,
- &lookup,
- eax,
- ecx,
- edx,
- ebx,
- edi,
- name,
- &miss);
-
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::LOAD_IC);
-
- // Return the generated code.
- return GetCode(INTERCEPTOR, name);
-}
-
-
-MaybeObject* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
- GlobalObject* holder,
- JSGlobalPropertyCell* cell,
- String* name,
- bool is_dont_delete) {
- // ----------- S t a t e -------------
- // -- eax : receiver
- // -- ecx : name
- // -- esp[0] : return address
- // -----------------------------------
- Label miss;
-
- // If the object is the holder then we know that it's a global
- // object which can only happen for contextual loads. In this case,
- // the receiver cannot be a smi.
- if (object != holder) {
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &miss, not_taken);
- }
-
- // Check that the maps haven't changed.
- CheckPrototypes(object, eax, holder, ebx, edx, edi, name, &miss);
-
- // Get the value from the cell.
- if (Serializer::enabled()) {
- __ mov(ebx, Immediate(Handle<JSGlobalPropertyCell>(cell)));
- __ mov(ebx, FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset));
- } else {
- __ mov(ebx, Operand::Cell(Handle<JSGlobalPropertyCell>(cell)));
- }
-
- // Check for deleted property if property can actually be deleted.
- if (!is_dont_delete) {
- __ cmp(ebx, factory()->the_hole_value());
- __ j(equal, &miss, not_taken);
- } else if (FLAG_debug_code) {
- __ cmp(ebx, factory()->the_hole_value());
- __ Check(not_equal, "DontDelete cells can't contain the hole");
- }
-
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->named_load_global_stub(), 1);
- __ mov(eax, ebx);
- __ ret(0);
-
- __ bind(&miss);
- __ IncrementCounter(counters->named_load_global_stub_miss(), 1);
- GenerateLoadMiss(masm(), Code::LOAD_IC);
-
- // Return the generated code.
- return GetCode(NORMAL, name);
-}
-
-
-MaybeObject* KeyedLoadStubCompiler::CompileLoadField(String* name,
- JSObject* receiver,
- JSObject* holder,
- int index) {
- // ----------- S t a t e -------------
- // -- eax : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss;
-
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->keyed_load_field(), 1);
-
- // Check that the name has not changed.
- __ cmp(Operand(eax), Immediate(Handle<String>(name)));
- __ j(not_equal, &miss, not_taken);
-
- GenerateLoadField(receiver, holder, edx, ebx, ecx, edi, index, name, &miss);
-
- __ bind(&miss);
- __ DecrementCounter(counters->keyed_load_field(), 1);
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- // Return the generated code.
- return GetCode(FIELD, name);
-}
-
-
-MaybeObject* KeyedLoadStubCompiler::CompileLoadCallback(
- String* name,
- JSObject* receiver,
- JSObject* holder,
- AccessorInfo* callback) {
- // ----------- S t a t e -------------
- // -- eax : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss;
-
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->keyed_load_callback(), 1);
-
- // Check that the name has not changed.
- __ cmp(Operand(eax), Immediate(Handle<String>(name)));
- __ j(not_equal, &miss, not_taken);
-
- MaybeObject* result = GenerateLoadCallback(receiver, holder, edx, eax, ebx,
- ecx, edi, callback, name, &miss);
- if (result->IsFailure()) {
- miss.Unuse();
- return result;
- }
-
- __ bind(&miss);
-
- __ DecrementCounter(counters->keyed_load_callback(), 1);
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- // Return the generated code.
- return GetCode(CALLBACKS, name);
-}
-
-
-MaybeObject* KeyedLoadStubCompiler::CompileLoadConstant(String* name,
- JSObject* receiver,
- JSObject* holder,
- Object* value) {
- // ----------- S t a t e -------------
- // -- eax : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss;
-
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->keyed_load_constant_function(), 1);
-
- // Check that the name has not changed.
- __ cmp(Operand(eax), Immediate(Handle<String>(name)));
- __ j(not_equal, &miss, not_taken);
-
- GenerateLoadConstant(receiver, holder, edx, ebx, ecx, edi,
- value, name, &miss);
- __ bind(&miss);
- __ DecrementCounter(counters->keyed_load_constant_function(), 1);
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- // Return the generated code.
- return GetCode(CONSTANT_FUNCTION, name);
-}
-
-
-MaybeObject* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
- JSObject* holder,
- String* name) {
- // ----------- S t a t e -------------
- // -- eax : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss;
-
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->keyed_load_interceptor(), 1);
-
- // Check that the name has not changed.
- __ cmp(Operand(eax), Immediate(Handle<String>(name)));
- __ j(not_equal, &miss, not_taken);
-
- LookupResult lookup;
- LookupPostInterceptor(holder, name, &lookup);
- GenerateLoadInterceptor(receiver,
- holder,
- &lookup,
- edx,
- eax,
- ecx,
- ebx,
- edi,
- name,
- &miss);
- __ bind(&miss);
- __ DecrementCounter(counters->keyed_load_interceptor(), 1);
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- // Return the generated code.
- return GetCode(INTERCEPTOR, name);
-}
-
-
-MaybeObject* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
- // ----------- S t a t e -------------
- // -- eax : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss;
-
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->keyed_load_array_length(), 1);
-
- // Check that the name has not changed.
- __ cmp(Operand(eax), Immediate(Handle<String>(name)));
- __ j(not_equal, &miss, not_taken);
-
- GenerateLoadArrayLength(masm(), edx, ecx, &miss);
- __ bind(&miss);
- __ DecrementCounter(counters->keyed_load_array_length(), 1);
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- // Return the generated code.
- return GetCode(CALLBACKS, name);
-}
-
-
-MaybeObject* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
- // ----------- S t a t e -------------
- // -- eax : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss;
-
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->keyed_load_string_length(), 1);
-
- // Check that the name has not changed.
- __ cmp(Operand(eax), Immediate(Handle<String>(name)));
- __ j(not_equal, &miss, not_taken);
-
- GenerateLoadStringLength(masm(), edx, ecx, ebx, &miss, true);
- __ bind(&miss);
- __ DecrementCounter(counters->keyed_load_string_length(), 1);
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- // Return the generated code.
- return GetCode(CALLBACKS, name);
-}
-
-
-MaybeObject* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
- // ----------- S t a t e -------------
- // -- eax : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss;
-
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->keyed_load_function_prototype(), 1);
-
- // Check that the name has not changed.
- __ cmp(Operand(eax), Immediate(Handle<String>(name)));
- __ j(not_equal, &miss, not_taken);
-
- GenerateLoadFunctionPrototype(masm(), edx, ecx, ebx, &miss);
- __ bind(&miss);
- __ DecrementCounter(counters->keyed_load_function_prototype(), 1);
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- // Return the generated code.
- return GetCode(CALLBACKS, name);
-}
-
-
-MaybeObject* KeyedLoadStubCompiler::CompileLoadSpecialized(JSObject* receiver) {
- // ----------- S t a t e -------------
- // -- eax : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss;
-
- // Check that the receiver isn't a smi.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &miss, not_taken);
-
- // Check that the map matches.
- __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
- Immediate(Handle<Map>(receiver->map())));
- __ j(not_equal, &miss, not_taken);
-
- // Check that the key is a smi.
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &miss, not_taken);
-
- // Get the elements array.
- __ mov(ecx, FieldOperand(edx, JSObject::kElementsOffset));
- __ AssertFastElements(ecx);
-
- // Check that the key is within bounds.
- __ cmp(eax, FieldOperand(ecx, FixedArray::kLengthOffset));
- __ j(above_equal, &miss, not_taken);
-
- // Load the result and make sure it's not the hole.
- __ mov(ebx, Operand(ecx, eax, times_2,
- FixedArray::kHeaderSize - kHeapObjectTag));
- __ cmp(ebx, factory()->the_hole_value());
- __ j(equal, &miss, not_taken);
- __ mov(eax, ebx);
- __ ret(0);
-
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- // Return the generated code.
- return GetCode(NORMAL, NULL);
-}
-
-
-// Specialized stub for constructing objects from functions which only have only
-// simple assignments of the form this.x = ...; in their body.
-MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
- // ----------- S t a t e -------------
- // -- eax : argc
- // -- edi : constructor
- // -- esp[0] : return address
- // -- esp[4] : last argument
- // -----------------------------------
- Label generic_stub_call;
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Check to see whether there are any break points in the function code. If
- // there are jump to the generic constructor stub which calls the actual
- // code for the function thereby hitting the break points.
- __ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(ebx, FieldOperand(ebx, SharedFunctionInfo::kDebugInfoOffset));
- __ cmp(ebx, factory()->undefined_value());
- __ j(not_equal, &generic_stub_call, not_taken);
-#endif
-
- // Load the initial map and verify that it is in fact a map.
- __ mov(ebx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a NULL and a Smi.
- __ test(ebx, Immediate(kSmiTagMask));
- __ j(zero, &generic_stub_call);
- __ CmpObjectType(ebx, MAP_TYPE, ecx);
- __ j(not_equal, &generic_stub_call);
-
-#ifdef DEBUG
- // Cannot construct functions this way.
- // edi: constructor
- // ebx: initial map
- __ CmpInstanceType(ebx, JS_FUNCTION_TYPE);
- __ Assert(not_equal, "Function constructed by construct stub.");
-#endif
-
- // Now allocate the JSObject on the heap by moving the new space allocation
- // top forward.
- // edi: constructor
- // ebx: initial map
- __ movzx_b(ecx, FieldOperand(ebx, Map::kInstanceSizeOffset));
- __ shl(ecx, kPointerSizeLog2);
- __ AllocateInNewSpace(ecx,
- edx,
- ecx,
- no_reg,
- &generic_stub_call,
- NO_ALLOCATION_FLAGS);
-
- // Allocated the JSObject, now initialize the fields and add the heap tag.
- // ebx: initial map
- // edx: JSObject (untagged)
- __ mov(Operand(edx, JSObject::kMapOffset), ebx);
- __ mov(ebx, factory()->empty_fixed_array());
- __ mov(Operand(edx, JSObject::kPropertiesOffset), ebx);
- __ mov(Operand(edx, JSObject::kElementsOffset), ebx);
-
- // Push the allocated object to the stack. This is the object that will be
- // returned (after it is tagged).
- __ push(edx);
-
- // eax: argc
- // edx: JSObject (untagged)
- // Load the address of the first in-object property into edx.
- __ lea(edx, Operand(edx, JSObject::kHeaderSize));
- // Calculate the location of the first argument. The stack contains the
- // allocated object and the return address on top of the argc arguments.
- __ lea(ecx, Operand(esp, eax, times_4, 1 * kPointerSize));
-
- // Use edi for holding undefined which is used in several places below.
- __ mov(edi, factory()->undefined_value());
-
- // eax: argc
- // ecx: first argument
- // edx: first in-object property of the JSObject
- // edi: undefined
- // Fill the initialized properties with a constant value or a passed argument
- // depending on the this.x = ...; assignment in the function.
- SharedFunctionInfo* shared = function->shared();
- for (int i = 0; i < shared->this_property_assignments_count(); i++) {
- if (shared->IsThisPropertyAssignmentArgument(i)) {
- // Check if the argument assigned to the property is actually passed.
- // If argument is not passed the property is set to undefined,
- // otherwise find it on the stack.
- int arg_number = shared->GetThisPropertyAssignmentArgument(i);
- __ mov(ebx, edi);
- __ cmp(eax, arg_number);
- if (CpuFeatures::IsSupported(CMOV)) {
- CpuFeatures::Scope use_cmov(CMOV);
- __ cmov(above, ebx, Operand(ecx, arg_number * -kPointerSize));
- } else {
- Label not_passed;
- __ j(below_equal, &not_passed);
- __ mov(ebx, Operand(ecx, arg_number * -kPointerSize));
- __ bind(&not_passed);
- }
- // Store value in the property.
- __ mov(Operand(edx, i * kPointerSize), ebx);
- } else {
- // Set the property to the constant value.
- Handle<Object> constant(shared->GetThisPropertyAssignmentConstant(i));
- __ mov(Operand(edx, i * kPointerSize), Immediate(constant));
- }
- }
-
- // Fill the unused in-object property fields with undefined.
- ASSERT(function->has_initial_map());
- for (int i = shared->this_property_assignments_count();
- i < function->initial_map()->inobject_properties();
- i++) {
- __ mov(Operand(edx, i * kPointerSize), edi);
- }
-
- // Move argc to ebx and retrieve and tag the JSObject to return.
- __ mov(ebx, eax);
- __ pop(eax);
- __ or_(Operand(eax), Immediate(kHeapObjectTag));
-
- // Remove caller arguments and receiver from the stack and return.
- __ pop(ecx);
- __ lea(esp, Operand(esp, ebx, times_pointer_size, 1 * kPointerSize));
- __ push(ecx);
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->constructed_objects(), 1);
- __ IncrementCounter(counters->constructed_objects_stub(), 1);
- __ ret(0);
-
- // Jump to the generic stub in case the specialized code cannot handle the
- // construction.
- __ bind(&generic_stub_call);
- Handle<Code> generic_construct_stub =
- isolate()->builtins()->JSConstructStubGeneric();
- __ jmp(generic_construct_stub, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode();
-}
-
-
-MaybeObject* ExternalArrayStubCompiler::CompileKeyedLoadStub(
- JSObject*receiver, ExternalArrayType array_type, Code::Flags flags) {
- // ----------- S t a t e -------------
- // -- eax : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label slow, failed_allocation;
-
- // Check that the object isn't a smi.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &slow, not_taken);
-
- // Check that the key is a smi.
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &slow, not_taken);
-
- // Check that the map matches.
- __ CheckMap(edx, Handle<Map>(receiver->map()), &slow, false);
- __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
-
- // eax: key, known to be a smi.
- // edx: receiver, known to be a JSObject.
- // ebx: elements object, known to be an external array.
- // Check that the index is in range.
- __ mov(ecx, eax);
- __ SmiUntag(ecx); // Untag the index.
- __ cmp(ecx, FieldOperand(ebx, ExternalArray::kLengthOffset));
- // Unsigned comparison catches both negative and too-large values.
- __ j(above_equal, &slow);
- __ mov(ebx, FieldOperand(ebx, ExternalArray::kExternalPointerOffset));
- // ebx: base pointer of external storage
- switch (array_type) {
- case kExternalByteArray:
- __ movsx_b(eax, Operand(ebx, ecx, times_1, 0));
- break;
- case kExternalUnsignedByteArray:
- case kExternalPixelArray:
- __ movzx_b(eax, Operand(ebx, ecx, times_1, 0));
- break;
- case kExternalShortArray:
- __ movsx_w(eax, Operand(ebx, ecx, times_2, 0));
- break;
- case kExternalUnsignedShortArray:
- __ movzx_w(eax, Operand(ebx, ecx, times_2, 0));
- break;
- case kExternalIntArray:
- case kExternalUnsignedIntArray:
- __ mov(ecx, Operand(ebx, ecx, times_4, 0));
- break;
- case kExternalFloatArray:
- __ fld_s(Operand(ebx, ecx, times_4, 0));
- break;
- default:
- UNREACHABLE();
- break;
- }
-
- // For integer array types:
- // ecx: value
- // For floating-point array type:
- // FP(0): value
-
- if (array_type == kExternalIntArray ||
- array_type == kExternalUnsignedIntArray) {
- // For the Int and UnsignedInt array types, we need to see whether
- // the value can be represented in a Smi. If not, we need to convert
- // it to a HeapNumber.
- Label box_int;
- if (array_type == kExternalIntArray) {
- __ cmp(ecx, 0xC0000000);
- __ j(sign, &box_int);
- } else {
- ASSERT_EQ(array_type, kExternalUnsignedIntArray);
- // The test is different for unsigned int values. Since we need
- // the value to be in the range of a positive smi, we can't
- // handle either of the top two bits being set in the value.
- __ test(ecx, Immediate(0xC0000000));
- __ j(not_zero, &box_int);
- }
-
- __ mov(eax, ecx);
- __ SmiTag(eax);
- __ ret(0);
-
- __ bind(&box_int);
-
- // Allocate a HeapNumber for the int and perform int-to-double
- // conversion.
- if (array_type == kExternalIntArray) {
- __ push(ecx);
- __ fild_s(Operand(esp, 0));
- __ pop(ecx);
- } else {
- ASSERT(array_type == kExternalUnsignedIntArray);
- // Need to zero-extend the value.
- // There's no fild variant for unsigned values, so zero-extend
- // to a 64-bit int manually.
- __ push(Immediate(0));
- __ push(ecx);
- __ fild_d(Operand(esp, 0));
- __ pop(ecx);
- __ pop(ecx);
- }
- // FP(0): value
- __ AllocateHeapNumber(ecx, ebx, edi, &failed_allocation);
- // Set the value.
- __ mov(eax, ecx);
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ ret(0);
- } else if (array_type == kExternalFloatArray) {
- // For the floating-point array type, we need to always allocate a
- // HeapNumber.
- __ AllocateHeapNumber(ecx, ebx, edi, &failed_allocation);
- // Set the value.
- __ mov(eax, ecx);
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ ret(0);
- } else {
- __ SmiTag(eax);
- __ ret(0);
- }
-
- // If we fail allocation of the HeapNumber, we still have a value on
- // top of the FPU stack. Remove it.
- __ bind(&failed_allocation);
- __ ffree();
- __ fincstp();
- // Fall through to slow case.
-
- // Slow case: Jump to runtime.
- __ bind(&slow);
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->keyed_load_external_array_slow(), 1);
- // ----------- S t a t e -------------
- // -- eax : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
-
- __ pop(ebx);
- __ push(edx); // receiver
- __ push(eax); // name
- __ push(ebx); // return address
-
- // Perform tail call to the entry.
- __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
-
- // Return the generated code.
- return GetCode(flags);
-}
-
-
-MaybeObject* ExternalArrayStubCompiler::CompileKeyedStoreStub(
- JSObject* receiver, ExternalArrayType array_type, Code::Flags flags) {
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label slow, check_heap_number;
-
- // Check that the object isn't a smi.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &slow);
-
- // Check that the map matches.
- __ CheckMap(edx, Handle<Map>(receiver->map()), &slow, false);
-
- // Check that the key is a smi.
- __ test(ecx, Immediate(kSmiTagMask));
- __ j(not_zero, &slow);
-
- // Check that the index is in range.
- __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
- __ mov(ebx, ecx);
- __ SmiUntag(ebx);
- __ cmp(ebx, FieldOperand(edi, ExternalArray::kLengthOffset));
- // Unsigned comparison catches both negative and too-large values.
- __ j(above_equal, &slow);
-
- // Handle both smis and HeapNumbers in the fast path. Go to the
- // runtime for all other kinds of values.
- // eax: value
- // edx: receiver
- // ecx: key
- // edi: elements array
- // ebx: untagged index
- __ test(eax, Immediate(kSmiTagMask));
- if (array_type == kExternalPixelArray)
- __ j(not_equal, &slow);
- else
- __ j(not_equal, &check_heap_number);
-
- // smi case
- __ mov(ecx, eax); // Preserve the value in eax. Key is no longer needed.
- __ SmiUntag(ecx);
- __ mov(edi, FieldOperand(edi, ExternalArray::kExternalPointerOffset));
- // ecx: base pointer of external storage
- switch (array_type) {
- case kExternalPixelArray:
- { // Clamp the value to [0..255].
- NearLabel done;
- __ test(ecx, Immediate(0xFFFFFF00));
- __ j(zero, &done);
- __ setcc(negative, ecx); // 1 if negative, 0 if positive.
- __ dec_b(ecx); // 0 if negative, 255 if positive.
- __ bind(&done);
- }
- __ mov_b(Operand(edi, ebx, times_1, 0), ecx);
- break;
- case kExternalByteArray:
- case kExternalUnsignedByteArray:
- __ mov_b(Operand(edi, ebx, times_1, 0), ecx);
- break;
- case kExternalShortArray:
- case kExternalUnsignedShortArray:
- __ mov_w(Operand(edi, ebx, times_2, 0), ecx);
- break;
- case kExternalIntArray:
- case kExternalUnsignedIntArray:
- __ mov(Operand(edi, ebx, times_4, 0), ecx);
- break;
- case kExternalFloatArray:
- // Need to perform int-to-float conversion.
- __ push(ecx);
- __ fild_s(Operand(esp, 0));
- __ pop(ecx);
- __ fstp_s(Operand(edi, ebx, times_4, 0));
- break;
- default:
- UNREACHABLE();
- break;
- }
- __ ret(0); // Return the original value.
-
- // TODO(danno): handle heap number -> pixel array conversion
- if (array_type != kExternalPixelArray) {
- __ bind(&check_heap_number);
- // eax: value
- // edx: receiver
- // ecx: key
- // edi: elements array
- // ebx: untagged index
- __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
- Immediate(factory()->heap_number_map()));
- __ j(not_equal, &slow);
-
- // The WebGL specification leaves the behavior of storing NaN and
- // +/-Infinity into integer arrays basically undefined. For more
- // reproducible behavior, convert these to zero.
- __ mov(edi, FieldOperand(edi, ExternalArray::kExternalPointerOffset));
- // ebx: untagged index
- // edi: base pointer of external storage
- if (array_type == kExternalFloatArray) {
- __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ fstp_s(Operand(edi, ebx, times_4, 0));
- __ ret(0);
- } else {
- // Perform float-to-int conversion with truncation (round-to-zero)
- // behavior.
-
- // For the moment we make the slow call to the runtime on
- // processors that don't support SSE2. The code in IntegerConvert
- // (code-stubs-ia32.cc) is roughly what is needed here though the
- // conversion failure case does not need to be handled.
- if (CpuFeatures::IsSupported(SSE2)) {
- if (array_type != kExternalIntArray &&
- array_type != kExternalUnsignedIntArray) {
- ASSERT(CpuFeatures::IsSupported(SSE2));
- CpuFeatures::Scope scope(SSE2);
- __ cvttsd2si(ecx, FieldOperand(eax, HeapNumber::kValueOffset));
- // ecx: untagged integer value
- switch (array_type) {
- case kExternalPixelArray:
- { // Clamp the value to [0..255].
- NearLabel done;
- __ test(ecx, Immediate(0xFFFFFF00));
- __ j(zero, &done);
- __ setcc(negative, ecx); // 1 if negative, 0 if positive.
- __ dec_b(ecx); // 0 if negative, 255 if positive.
- __ bind(&done);
- }
- __ mov_b(Operand(edi, ebx, times_1, 0), ecx);
- break;
- case kExternalByteArray:
- case kExternalUnsignedByteArray:
- __ mov_b(Operand(edi, ebx, times_1, 0), ecx);
- break;
- case kExternalShortArray:
- case kExternalUnsignedShortArray:
- __ mov_w(Operand(edi, ebx, times_2, 0), ecx);
- break;
- default:
- UNREACHABLE();
- break;
- }
- } else {
- if (CpuFeatures::IsSupported(SSE3)) {
- CpuFeatures::Scope scope(SSE3);
- // fisttp stores values as signed integers. To represent the
- // entire range of int and unsigned int arrays, store as a
- // 64-bit int and discard the high 32 bits.
- // If the value is NaN or +/-infinity, the result is 0x80000000,
- // which is automatically zero when taken mod 2^n, n < 32.
- __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ sub(Operand(esp), Immediate(2 * kPointerSize));
- __ fisttp_d(Operand(esp, 0));
- __ pop(ecx);
- __ add(Operand(esp), Immediate(kPointerSize));
- } else {
- ASSERT(CpuFeatures::IsSupported(SSE2));
- CpuFeatures::Scope scope(SSE2);
- // We can easily implement the correct rounding behavior for the
- // range [0, 2^31-1]. For the time being, to keep this code simple,
- // make the slow runtime call for values outside this range.
- // Note: we could do better for signed int arrays.
- __ movd(xmm0, FieldOperand(eax, HeapNumber::kValueOffset));
- // We will need the key if we have to make the slow runtime call.
- __ push(ecx);
- __ LoadPowerOf2(xmm1, ecx, 31);
- __ pop(ecx);
- __ ucomisd(xmm1, xmm0);
- __ j(above_equal, &slow);
- __ cvttsd2si(ecx, Operand(xmm0));
- }
- // ecx: untagged integer value
- __ mov(Operand(edi, ebx, times_4, 0), ecx);
- }
- __ ret(0); // Return original value.
- }
- }
- }
-
- // Slow case: call runtime.
- __ bind(&slow);
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
-
- __ pop(ebx);
- __ push(edx);
- __ push(ecx);
- __ push(eax);
- __ push(Immediate(Smi::FromInt(NONE))); // PropertyAttributes
- __ push(Immediate(Smi::FromInt(
- Code::ExtractExtraICStateFromFlags(flags) & kStrictMode)));
- __ push(ebx); // return address
-
- // Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
-
- return GetCode(flags);
-}
-
-
-#undef __
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_IA32
diff --git a/src/3rdparty/v8/src/ia32/virtual-frame-ia32.cc b/src/3rdparty/v8/src/ia32/virtual-frame-ia32.cc
deleted file mode 100644
index 0304c32..0000000
--- a/src/3rdparty/v8/src/ia32/virtual-frame-ia32.cc
+++ /dev/null
@@ -1,1366 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_IA32)
-
-#include "codegen-inl.h"
-#include "register-allocator-inl.h"
-#include "scopes.h"
-#include "virtual-frame-inl.h"
-#include "stub-cache.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm())
-
-void VirtualFrame::SyncElementBelowStackPointer(int index) {
- // Emit code to write elements below the stack pointer to their
- // (already allocated) stack address.
- ASSERT(index <= stack_pointer_);
- FrameElement element = elements_[index];
- ASSERT(!element.is_synced());
- switch (element.type()) {
- case FrameElement::INVALID:
- break;
-
- case FrameElement::MEMORY:
- // This function should not be called with synced elements.
- // (memory elements are always synced).
- UNREACHABLE();
- break;
-
- case FrameElement::REGISTER:
- __ mov(Operand(ebp, fp_relative(index)), element.reg());
- break;
-
- case FrameElement::CONSTANT:
- if (cgen()->IsUnsafeSmi(element.handle())) {
- cgen()->StoreUnsafeSmiToLocal(fp_relative(index), element.handle());
- } else {
- __ Set(Operand(ebp, fp_relative(index)),
- Immediate(element.handle()));
- }
- break;
-
- case FrameElement::COPY: {
- int backing_index = element.index();
- FrameElement backing_element = elements_[backing_index];
- if (backing_element.is_memory()) {
- Result temp = cgen()->allocator()->Allocate();
- ASSERT(temp.is_valid());
- __ mov(temp.reg(), Operand(ebp, fp_relative(backing_index)));
- __ mov(Operand(ebp, fp_relative(index)), temp.reg());
- } else {
- ASSERT(backing_element.is_register());
- __ mov(Operand(ebp, fp_relative(index)), backing_element.reg());
- }
- break;
- }
- }
- elements_[index].set_sync();
-}
-
-
-void VirtualFrame::SyncElementByPushing(int index) {
- // Sync an element of the frame that is just above the stack pointer
- // by pushing it.
- ASSERT(index == stack_pointer_ + 1);
- stack_pointer_++;
- FrameElement element = elements_[index];
-
- switch (element.type()) {
- case FrameElement::INVALID:
- __ push(Immediate(Smi::FromInt(0)));
- break;
-
- case FrameElement::MEMORY:
- // No memory elements exist above the stack pointer.
- UNREACHABLE();
- break;
-
- case FrameElement::REGISTER:
- __ push(element.reg());
- break;
-
- case FrameElement::CONSTANT:
- if (cgen()->IsUnsafeSmi(element.handle())) {
- cgen()->PushUnsafeSmi(element.handle());
- } else {
- __ push(Immediate(element.handle()));
- }
- break;
-
- case FrameElement::COPY: {
- int backing_index = element.index();
- FrameElement backing = elements_[backing_index];
- ASSERT(backing.is_memory() || backing.is_register());
- if (backing.is_memory()) {
- __ push(Operand(ebp, fp_relative(backing_index)));
- } else {
- __ push(backing.reg());
- }
- break;
- }
- }
- elements_[index].set_sync();
-}
-
-
-// Clear the dirty bits for the range of elements in
-// [min(stack_pointer_ + 1,begin), end].
-void VirtualFrame::SyncRange(int begin, int end) {
- ASSERT(begin >= 0);
- ASSERT(end < element_count());
- // Sync elements below the range if they have not been materialized
- // on the stack.
- int start = Min(begin, stack_pointer_ + 1);
-
- // Emit normal push instructions for elements above stack pointer
- // and use mov instructions if we are below stack pointer.
- for (int i = start; i <= end; i++) {
- if (!elements_[i].is_synced()) {
- if (i <= stack_pointer_) {
- SyncElementBelowStackPointer(i);
- } else {
- SyncElementByPushing(i);
- }
- }
- }
-}
-
-
-void VirtualFrame::MakeMergable() {
- for (int i = 0; i < element_count(); i++) {
- FrameElement element = elements_[i];
-
- // All number type information is reset to unknown for a mergable frame
- // because of incoming back edges.
- if (element.is_constant() || element.is_copy()) {
- if (element.is_synced()) {
- // Just spill.
- elements_[i] = FrameElement::MemoryElement(TypeInfo::Unknown());
- } else {
- // Allocate to a register.
- FrameElement backing_element; // Invalid if not a copy.
- if (element.is_copy()) {
- backing_element = elements_[element.index()];
- }
- Result fresh = cgen()->allocator()->Allocate();
- ASSERT(fresh.is_valid()); // A register was spilled if all were in use.
- elements_[i] =
- FrameElement::RegisterElement(fresh.reg(),
- FrameElement::NOT_SYNCED,
- TypeInfo::Unknown());
- Use(fresh.reg(), i);
-
- // Emit a move.
- if (element.is_constant()) {
- if (cgen()->IsUnsafeSmi(element.handle())) {
- cgen()->MoveUnsafeSmi(fresh.reg(), element.handle());
- } else {
- __ Set(fresh.reg(), Immediate(element.handle()));
- }
- } else {
- ASSERT(element.is_copy());
- // Copies are only backed by register or memory locations.
- if (backing_element.is_register()) {
- // The backing store may have been spilled by allocating,
- // but that's OK. If it was, the value is right where we
- // want it.
- if (!fresh.reg().is(backing_element.reg())) {
- __ mov(fresh.reg(), backing_element.reg());
- }
- } else {
- ASSERT(backing_element.is_memory());
- __ mov(fresh.reg(), Operand(ebp, fp_relative(element.index())));
- }
- }
- }
- // No need to set the copied flag --- there are no copies.
- } else {
- // Clear the copy flag of non-constant, non-copy elements.
- // They cannot be copied because copies are not allowed.
- // The copy flag is not relied on before the end of this loop,
- // including when registers are spilled.
- elements_[i].clear_copied();
- elements_[i].set_type_info(TypeInfo::Unknown());
- }
- }
-}
-
-
-void VirtualFrame::MergeTo(VirtualFrame* expected) {
- Comment cmnt(masm(), "[ Merge frame");
- // We should always be merging the code generator's current frame to an
- // expected frame.
- ASSERT(cgen()->frame() == this);
-
- // Adjust the stack pointer upward (toward the top of the virtual
- // frame) if necessary.
- if (stack_pointer_ < expected->stack_pointer_) {
- int difference = expected->stack_pointer_ - stack_pointer_;
- stack_pointer_ = expected->stack_pointer_;
- __ sub(Operand(esp), Immediate(difference * kPointerSize));
- }
-
- MergeMoveRegistersToMemory(expected);
- MergeMoveRegistersToRegisters(expected);
- MergeMoveMemoryToRegisters(expected);
-
- // Adjust the stack pointer downward if necessary.
- if (stack_pointer_ > expected->stack_pointer_) {
- int difference = stack_pointer_ - expected->stack_pointer_;
- stack_pointer_ = expected->stack_pointer_;
- __ add(Operand(esp), Immediate(difference * kPointerSize));
- }
-
- // At this point, the frames should be identical.
- ASSERT(Equals(expected));
-}
-
-
-void VirtualFrame::MergeMoveRegistersToMemory(VirtualFrame* expected) {
- ASSERT(stack_pointer_ >= expected->stack_pointer_);
-
- // Move registers, constants, and copies to memory. Perform moves
- // from the top downward in the frame in order to leave the backing
- // stores of copies in registers.
- //
- // Moving memory-backed copies to memory requires a spare register
- // for the memory-to-memory moves. Since we are performing a merge,
- // we use esi (which is already saved in the frame). We keep track
- // of the index of the frame element esi is caching or kIllegalIndex
- // if esi has not been disturbed.
- int esi_caches = kIllegalIndex;
- for (int i = element_count() - 1; i >= 0; i--) {
- FrameElement target = expected->elements_[i];
- if (target.is_register()) continue; // Handle registers later.
- if (target.is_memory()) {
- FrameElement source = elements_[i];
- switch (source.type()) {
- case FrameElement::INVALID:
- // Not a legal merge move.
- UNREACHABLE();
- break;
-
- case FrameElement::MEMORY:
- // Already in place.
- break;
-
- case FrameElement::REGISTER:
- Unuse(source.reg());
- if (!source.is_synced()) {
- __ mov(Operand(ebp, fp_relative(i)), source.reg());
- }
- break;
-
- case FrameElement::CONSTANT:
- if (!source.is_synced()) {
- if (cgen()->IsUnsafeSmi(source.handle())) {
- esi_caches = i;
- cgen()->MoveUnsafeSmi(esi, source.handle());
- __ mov(Operand(ebp, fp_relative(i)), esi);
- } else {
- __ Set(Operand(ebp, fp_relative(i)), Immediate(source.handle()));
- }
- }
- break;
-
- case FrameElement::COPY:
- if (!source.is_synced()) {
- int backing_index = source.index();
- FrameElement backing_element = elements_[backing_index];
- if (backing_element.is_memory()) {
- // If we have to spill a register, we spill esi.
- if (esi_caches != backing_index) {
- esi_caches = backing_index;
- __ mov(esi, Operand(ebp, fp_relative(backing_index)));
- }
- __ mov(Operand(ebp, fp_relative(i)), esi);
- } else {
- ASSERT(backing_element.is_register());
- __ mov(Operand(ebp, fp_relative(i)), backing_element.reg());
- }
- }
- break;
- }
- }
- elements_[i] = target;
- }
-
- if (esi_caches != kIllegalIndex) {
- __ mov(esi, Operand(ebp, fp_relative(context_index())));
- }
-}
-
-
-void VirtualFrame::MergeMoveRegistersToRegisters(VirtualFrame* expected) {
- // We have already done X-to-memory moves.
- ASSERT(stack_pointer_ >= expected->stack_pointer_);
-
- for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
- // Move the right value into register i if it is currently in a register.
- int index = expected->register_location(i);
- int use_index = register_location(i);
- // Skip if register i is unused in the target or else if source is
- // not a register (this is not a register-to-register move).
- if (index == kIllegalIndex || !elements_[index].is_register()) continue;
-
- Register target = RegisterAllocator::ToRegister(i);
- Register source = elements_[index].reg();
- if (index != use_index) {
- if (use_index == kIllegalIndex) { // Target is currently unused.
- // Copy contents of source from source to target.
- // Set frame element register to target.
- Use(target, index);
- Unuse(source);
- __ mov(target, source);
- } else {
- // Exchange contents of registers source and target.
- // Nothing except the register backing use_index has changed.
- elements_[use_index].set_reg(source);
- set_register_location(target, index);
- set_register_location(source, use_index);
- __ xchg(source, target);
- }
- }
-
- if (!elements_[index].is_synced() &&
- expected->elements_[index].is_synced()) {
- __ mov(Operand(ebp, fp_relative(index)), target);
- }
- elements_[index] = expected->elements_[index];
- }
-}
-
-
-void VirtualFrame::MergeMoveMemoryToRegisters(VirtualFrame* expected) {
- // Move memory, constants, and copies to registers. This is the
- // final step and since it is not done from the bottom up, but in
- // register code order, we have special code to ensure that the backing
- // elements of copies are in their correct locations when we
- // encounter the copies.
- for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
- int index = expected->register_location(i);
- if (index != kIllegalIndex) {
- FrameElement source = elements_[index];
- FrameElement target = expected->elements_[index];
- Register target_reg = RegisterAllocator::ToRegister(i);
- ASSERT(target.reg().is(target_reg));
- switch (source.type()) {
- case FrameElement::INVALID: // Fall through.
- UNREACHABLE();
- break;
- case FrameElement::REGISTER:
- ASSERT(source.Equals(target));
- // Go to next iteration. Skips Use(target_reg) and syncing
- // below. It is safe to skip syncing because a target
- // register frame element would only be synced if all source
- // elements were.
- continue;
- break;
- case FrameElement::MEMORY:
- ASSERT(index <= stack_pointer_);
- __ mov(target_reg, Operand(ebp, fp_relative(index)));
- break;
-
- case FrameElement::CONSTANT:
- if (cgen()->IsUnsafeSmi(source.handle())) {
- cgen()->MoveUnsafeSmi(target_reg, source.handle());
- } else {
- __ Set(target_reg, Immediate(source.handle()));
- }
- break;
-
- case FrameElement::COPY: {
- int backing_index = source.index();
- FrameElement backing = elements_[backing_index];
- ASSERT(backing.is_memory() || backing.is_register());
- if (backing.is_memory()) {
- ASSERT(backing_index <= stack_pointer_);
- // Code optimization if backing store should also move
- // to a register: move backing store to its register first.
- if (expected->elements_[backing_index].is_register()) {
- FrameElement new_backing = expected->elements_[backing_index];
- Register new_backing_reg = new_backing.reg();
- ASSERT(!is_used(new_backing_reg));
- elements_[backing_index] = new_backing;
- Use(new_backing_reg, backing_index);
- __ mov(new_backing_reg,
- Operand(ebp, fp_relative(backing_index)));
- __ mov(target_reg, new_backing_reg);
- } else {
- __ mov(target_reg, Operand(ebp, fp_relative(backing_index)));
- }
- } else {
- __ mov(target_reg, backing.reg());
- }
- }
- }
- // Ensure the proper sync state.
- if (target.is_synced() && !source.is_synced()) {
- __ mov(Operand(ebp, fp_relative(index)), target_reg);
- }
- Use(target_reg, index);
- elements_[index] = target;
- }
- }
-}
-
-
-void VirtualFrame::Enter() {
- // Registers live on entry: esp, ebp, esi, edi.
- Comment cmnt(masm(), "[ Enter JS frame");
-
-#ifdef DEBUG
- if (FLAG_debug_code) {
- // Verify that edi contains a JS function. The following code
- // relies on eax being available for use.
- __ test(edi, Immediate(kSmiTagMask));
- __ Check(not_zero,
- "VirtualFrame::Enter - edi is not a function (smi check).");
- __ CmpObjectType(edi, JS_FUNCTION_TYPE, eax);
- __ Check(equal,
- "VirtualFrame::Enter - edi is not a function (map check).");
- }
-#endif
-
- EmitPush(ebp);
-
- __ mov(ebp, Operand(esp));
-
- // Store the context in the frame. The context is kept in esi and a
- // copy is stored in the frame. The external reference to esi
- // remains.
- EmitPush(esi);
-
- // Store the function in the frame. The frame owns the register
- // reference now (ie, it can keep it in edi or spill it later).
- Push(edi);
- SyncElementAt(element_count() - 1);
- cgen()->allocator()->Unuse(edi);
-}
-
-
-void VirtualFrame::Exit() {
- Comment cmnt(masm(), "[ Exit JS frame");
- // Record the location of the JS exit code for patching when setting
- // break point.
- __ RecordJSReturn();
-
- // Avoid using the leave instruction here, because it is too
- // short. We need the return sequence to be a least the size of a
- // call instruction to support patching the exit code in the
- // debugger. See VisitReturnStatement for the full return sequence.
- __ mov(esp, Operand(ebp));
- stack_pointer_ = frame_pointer();
- for (int i = element_count() - 1; i > stack_pointer_; i--) {
- FrameElement last = elements_.RemoveLast();
- if (last.is_register()) {
- Unuse(last.reg());
- }
- }
-
- EmitPop(ebp);
-}
-
-
-void VirtualFrame::AllocateStackSlots() {
- int count = local_count();
- if (count > 0) {
- Comment cmnt(masm(), "[ Allocate space for locals");
- // The locals are initialized to a constant (the undefined value), but
- // we sync them with the actual frame to allocate space for spilling
- // them later. First sync everything above the stack pointer so we can
- // use pushes to allocate and initialize the locals.
- SyncRange(stack_pointer_ + 1, element_count() - 1);
- Handle<Object> undefined = FACTORY->undefined_value();
- FrameElement initial_value =
- FrameElement::ConstantElement(undefined, FrameElement::SYNCED);
- if (count == 1) {
- __ push(Immediate(undefined));
- } else if (count < kLocalVarBound) {
- // For less locals the unrolled loop is more compact.
- Result temp = cgen()->allocator()->Allocate();
- ASSERT(temp.is_valid());
- __ Set(temp.reg(), Immediate(undefined));
- for (int i = 0; i < count; i++) {
- __ push(temp.reg());
- }
- } else {
- // For more locals a loop in generated code is more compact.
- Label alloc_locals_loop;
- Result cnt = cgen()->allocator()->Allocate();
- Result tmp = cgen()->allocator()->Allocate();
- ASSERT(cnt.is_valid());
- ASSERT(tmp.is_valid());
- __ mov(cnt.reg(), Immediate(count));
- __ mov(tmp.reg(), Immediate(undefined));
- __ bind(&alloc_locals_loop);
- __ push(tmp.reg());
- __ dec(cnt.reg());
- __ j(not_zero, &alloc_locals_loop);
- }
- for (int i = 0; i < count; i++) {
- elements_.Add(initial_value);
- stack_pointer_++;
- }
- }
-}
-
-
-void VirtualFrame::SaveContextRegister() {
- ASSERT(elements_[context_index()].is_memory());
- __ mov(Operand(ebp, fp_relative(context_index())), esi);
-}
-
-
-void VirtualFrame::RestoreContextRegister() {
- ASSERT(elements_[context_index()].is_memory());
- __ mov(esi, Operand(ebp, fp_relative(context_index())));
-}
-
-
-void VirtualFrame::PushReceiverSlotAddress() {
- Result temp = cgen()->allocator()->Allocate();
- ASSERT(temp.is_valid());
- __ lea(temp.reg(), ParameterAt(-1));
- Push(&temp);
-}
-
-
-int VirtualFrame::InvalidateFrameSlotAt(int index) {
- FrameElement original = elements_[index];
-
- // Is this element the backing store of any copies?
- int new_backing_index = kIllegalIndex;
- if (original.is_copied()) {
- // Verify it is copied, and find first copy.
- for (int i = index + 1; i < element_count(); i++) {
- if (elements_[i].is_copy() && elements_[i].index() == index) {
- new_backing_index = i;
- break;
- }
- }
- }
-
- if (new_backing_index == kIllegalIndex) {
- // No copies found, return kIllegalIndex.
- if (original.is_register()) {
- Unuse(original.reg());
- }
- elements_[index] = FrameElement::InvalidElement();
- return kIllegalIndex;
- }
-
- // This is the backing store of copies.
- Register backing_reg;
- if (original.is_memory()) {
- Result fresh = cgen()->allocator()->Allocate();
- ASSERT(fresh.is_valid());
- Use(fresh.reg(), new_backing_index);
- backing_reg = fresh.reg();
- __ mov(backing_reg, Operand(ebp, fp_relative(index)));
- } else {
- // The original was in a register.
- backing_reg = original.reg();
- set_register_location(backing_reg, new_backing_index);
- }
- // Invalidate the element at index.
- elements_[index] = FrameElement::InvalidElement();
- // Set the new backing element.
- if (elements_[new_backing_index].is_synced()) {
- elements_[new_backing_index] =
- FrameElement::RegisterElement(backing_reg,
- FrameElement::SYNCED,
- original.type_info());
- } else {
- elements_[new_backing_index] =
- FrameElement::RegisterElement(backing_reg,
- FrameElement::NOT_SYNCED,
- original.type_info());
- }
- // Update the other copies.
- for (int i = new_backing_index + 1; i < element_count(); i++) {
- if (elements_[i].is_copy() && elements_[i].index() == index) {
- elements_[i].set_index(new_backing_index);
- elements_[new_backing_index].set_copied();
- }
- }
- return new_backing_index;
-}
-
-
-void VirtualFrame::TakeFrameSlotAt(int index) {
- ASSERT(index >= 0);
- ASSERT(index <= element_count());
- FrameElement original = elements_[index];
- int new_backing_store_index = InvalidateFrameSlotAt(index);
- if (new_backing_store_index != kIllegalIndex) {
- elements_.Add(CopyElementAt(new_backing_store_index));
- return;
- }
-
- switch (original.type()) {
- case FrameElement::MEMORY: {
- // Emit code to load the original element's data into a register.
- // Push that register as a FrameElement on top of the frame.
- Result fresh = cgen()->allocator()->Allocate();
- ASSERT(fresh.is_valid());
- FrameElement new_element =
- FrameElement::RegisterElement(fresh.reg(),
- FrameElement::NOT_SYNCED,
- original.type_info());
- Use(fresh.reg(), element_count());
- elements_.Add(new_element);
- __ mov(fresh.reg(), Operand(ebp, fp_relative(index)));
- break;
- }
- case FrameElement::REGISTER:
- Use(original.reg(), element_count());
- // Fall through.
- case FrameElement::CONSTANT:
- case FrameElement::COPY:
- original.clear_sync();
- elements_.Add(original);
- break;
- case FrameElement::INVALID:
- UNREACHABLE();
- break;
- }
-}
-
-
-void VirtualFrame::StoreToFrameSlotAt(int index) {
- // Store the value on top of the frame to the virtual frame slot at
- // a given index. The value on top of the frame is left in place.
- // This is a duplicating operation, so it can create copies.
- ASSERT(index >= 0);
- ASSERT(index < element_count());
-
- int top_index = element_count() - 1;
- FrameElement top = elements_[top_index];
- FrameElement original = elements_[index];
- if (top.is_copy() && top.index() == index) return;
- ASSERT(top.is_valid());
-
- InvalidateFrameSlotAt(index);
-
- // InvalidateFrameSlotAt can potentially change any frame element, due
- // to spilling registers to allocate temporaries in order to preserve
- // the copy-on-write semantics of aliased elements. Reload top from
- // the frame.
- top = elements_[top_index];
-
- if (top.is_copy()) {
- // There are two cases based on the relative positions of the
- // stored-to slot and the backing slot of the top element.
- int backing_index = top.index();
- ASSERT(backing_index != index);
- if (backing_index < index) {
- // 1. The top element is a copy of a slot below the stored-to
- // slot. The stored-to slot becomes an unsynced copy of that
- // same backing slot.
- elements_[index] = CopyElementAt(backing_index);
- } else {
- // 2. The top element is a copy of a slot above the stored-to
- // slot. The stored-to slot becomes the new (unsynced) backing
- // slot and both the top element and the element at the former
- // backing slot become copies of it. The sync state of the top
- // and former backing elements is preserved.
- FrameElement backing_element = elements_[backing_index];
- ASSERT(backing_element.is_memory() || backing_element.is_register());
- if (backing_element.is_memory()) {
- // Because sets of copies are canonicalized to be backed by
- // their lowest frame element, and because memory frame
- // elements are backed by the corresponding stack address, we
- // have to move the actual value down in the stack.
- //
- // TODO(209): considering allocating the stored-to slot to the
- // temp register. Alternatively, allow copies to appear in
- // any order in the frame and lazily move the value down to
- // the slot.
- Result temp = cgen()->allocator()->Allocate();
- ASSERT(temp.is_valid());
- __ mov(temp.reg(), Operand(ebp, fp_relative(backing_index)));
- __ mov(Operand(ebp, fp_relative(index)), temp.reg());
- } else {
- set_register_location(backing_element.reg(), index);
- if (backing_element.is_synced()) {
- // If the element is a register, we will not actually move
- // anything on the stack but only update the virtual frame
- // element.
- backing_element.clear_sync();
- }
- }
- elements_[index] = backing_element;
-
- // The old backing element becomes a copy of the new backing
- // element.
- FrameElement new_element = CopyElementAt(index);
- elements_[backing_index] = new_element;
- if (backing_element.is_synced()) {
- elements_[backing_index].set_sync();
- }
-
- // All the copies of the old backing element (including the top
- // element) become copies of the new backing element.
- for (int i = backing_index + 1; i < element_count(); i++) {
- if (elements_[i].is_copy() && elements_[i].index() == backing_index) {
- elements_[i].set_index(index);
- }
- }
- }
- return;
- }
-
- // Move the top element to the stored-to slot and replace it (the
- // top element) with a copy.
- elements_[index] = top;
- if (top.is_memory()) {
- // TODO(209): consider allocating the stored-to slot to the temp
- // register. Alternatively, allow copies to appear in any order
- // in the frame and lazily move the value down to the slot.
- FrameElement new_top = CopyElementAt(index);
- new_top.set_sync();
- elements_[top_index] = new_top;
-
- // The sync state of the former top element is correct (synced).
- // Emit code to move the value down in the frame.
- Result temp = cgen()->allocator()->Allocate();
- ASSERT(temp.is_valid());
- __ mov(temp.reg(), Operand(esp, 0));
- __ mov(Operand(ebp, fp_relative(index)), temp.reg());
- } else if (top.is_register()) {
- set_register_location(top.reg(), index);
- // The stored-to slot has the (unsynced) register reference and
- // the top element becomes a copy. The sync state of the top is
- // preserved.
- FrameElement new_top = CopyElementAt(index);
- if (top.is_synced()) {
- new_top.set_sync();
- elements_[index].clear_sync();
- }
- elements_[top_index] = new_top;
- } else {
- // The stored-to slot holds the same value as the top but
- // unsynced. (We do not have copies of constants yet.)
- ASSERT(top.is_constant());
- elements_[index].clear_sync();
- }
-}
-
-
-void VirtualFrame::UntaggedPushFrameSlotAt(int index) {
- ASSERT(index >= 0);
- ASSERT(index <= element_count());
- FrameElement original = elements_[index];
- if (original.is_copy()) {
- original = elements_[original.index()];
- index = original.index();
- }
-
- switch (original.type()) {
- case FrameElement::MEMORY:
- case FrameElement::REGISTER: {
- Label done;
- // Emit code to load the original element's data into a register.
- // Push that register as a FrameElement on top of the frame.
- Result fresh = cgen()->allocator()->Allocate();
- ASSERT(fresh.is_valid());
- Register fresh_reg = fresh.reg();
- FrameElement new_element =
- FrameElement::RegisterElement(fresh_reg,
- FrameElement::NOT_SYNCED,
- original.type_info());
- new_element.set_untagged_int32(true);
- Use(fresh_reg, element_count());
- fresh.Unuse(); // BreakTarget does not handle a live Result well.
- elements_.Add(new_element);
- if (original.is_register()) {
- __ mov(fresh_reg, original.reg());
- } else {
- ASSERT(original.is_memory());
- __ mov(fresh_reg, Operand(ebp, fp_relative(index)));
- }
- // Now convert the value to int32, or bail out.
- if (original.type_info().IsSmi()) {
- __ SmiUntag(fresh_reg);
- // Pushing the element is completely done.
- } else {
- __ test(fresh_reg, Immediate(kSmiTagMask));
- Label not_smi;
- __ j(not_zero, &not_smi);
- __ SmiUntag(fresh_reg);
- __ jmp(&done);
-
- __ bind(&not_smi);
- if (!original.type_info().IsNumber()) {
- __ cmp(FieldOperand(fresh_reg, HeapObject::kMapOffset),
- FACTORY->heap_number_map());
- cgen()->unsafe_bailout_->Branch(not_equal);
- }
-
- if (!CpuFeatures::IsSupported(SSE2)) {
- UNREACHABLE();
- } else {
- CpuFeatures::Scope use_sse2(SSE2);
- __ movdbl(xmm0, FieldOperand(fresh_reg, HeapNumber::kValueOffset));
- __ cvttsd2si(fresh_reg, Operand(xmm0));
- __ cvtsi2sd(xmm1, Operand(fresh_reg));
- __ ucomisd(xmm0, xmm1);
- cgen()->unsafe_bailout_->Branch(not_equal);
- cgen()->unsafe_bailout_->Branch(parity_even); // NaN.
- // Test for negative zero.
- __ test(fresh_reg, Operand(fresh_reg));
- __ j(not_zero, &done);
- __ movmskpd(fresh_reg, xmm0);
- __ and_(fresh_reg, 0x1);
- cgen()->unsafe_bailout_->Branch(not_equal);
- }
- __ bind(&done);
- }
- break;
- }
- case FrameElement::CONSTANT:
- elements_.Add(CopyElementAt(index));
- elements_[element_count() - 1].set_untagged_int32(true);
- break;
- case FrameElement::COPY:
- case FrameElement::INVALID:
- UNREACHABLE();
- break;
- }
-}
-
-
-void VirtualFrame::PushTryHandler(HandlerType type) {
- ASSERT(cgen()->HasValidEntryRegisters());
- // Grow the expression stack by handler size less one (the return
- // address is already pushed by a call instruction).
- Adjust(kHandlerSize - 1);
- __ PushTryHandler(IN_JAVASCRIPT, type);
-}
-
-
-Result VirtualFrame::RawCallStub(CodeStub* stub) {
- ASSERT(cgen()->HasValidEntryRegisters());
- __ CallStub(stub);
- Result result = cgen()->allocator()->Allocate(eax);
- ASSERT(result.is_valid());
- return result;
-}
-
-
-Result VirtualFrame::CallStub(CodeStub* stub, Result* arg) {
- PrepareForCall(0, 0);
- arg->ToRegister(eax);
- arg->Unuse();
- return RawCallStub(stub);
-}
-
-
-Result VirtualFrame::CallStub(CodeStub* stub, Result* arg0, Result* arg1) {
- PrepareForCall(0, 0);
-
- if (arg0->is_register() && arg0->reg().is(eax)) {
- if (arg1->is_register() && arg1->reg().is(edx)) {
- // Wrong registers.
- __ xchg(eax, edx);
- } else {
- // Register edx is free for arg0, which frees eax for arg1.
- arg0->ToRegister(edx);
- arg1->ToRegister(eax);
- }
- } else {
- // Register eax is free for arg1, which guarantees edx is free for
- // arg0.
- arg1->ToRegister(eax);
- arg0->ToRegister(edx);
- }
-
- arg0->Unuse();
- arg1->Unuse();
- return RawCallStub(stub);
-}
-
-
-Result VirtualFrame::CallJSFunction(int arg_count) {
- Result function = Pop();
-
- // InvokeFunction requires function in edi. Move it in there.
- function.ToRegister(edi);
- function.Unuse();
-
- // +1 for receiver.
- PrepareForCall(arg_count + 1, arg_count + 1);
- ASSERT(cgen()->HasValidEntryRegisters());
- ParameterCount count(arg_count);
- __ InvokeFunction(edi, count, CALL_FUNCTION);
- RestoreContextRegister();
- Result result = cgen()->allocator()->Allocate(eax);
- ASSERT(result.is_valid());
- return result;
-}
-
-
-Result VirtualFrame::CallRuntime(const Runtime::Function* f, int arg_count) {
- PrepareForCall(arg_count, arg_count);
- ASSERT(cgen()->HasValidEntryRegisters());
- __ CallRuntime(f, arg_count);
- Result result = cgen()->allocator()->Allocate(eax);
- ASSERT(result.is_valid());
- return result;
-}
-
-
-Result VirtualFrame::CallRuntime(Runtime::FunctionId id, int arg_count) {
- PrepareForCall(arg_count, arg_count);
- ASSERT(cgen()->HasValidEntryRegisters());
- __ CallRuntime(id, arg_count);
- Result result = cgen()->allocator()->Allocate(eax);
- ASSERT(result.is_valid());
- return result;
-}
-
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-void VirtualFrame::DebugBreak() {
- PrepareForCall(0, 0);
- ASSERT(cgen()->HasValidEntryRegisters());
- __ DebugBreak();
- Result result = cgen()->allocator()->Allocate(eax);
- ASSERT(result.is_valid());
-}
-#endif
-
-
-Result VirtualFrame::InvokeBuiltin(Builtins::JavaScript id,
- InvokeFlag flag,
- int arg_count) {
- PrepareForCall(arg_count, arg_count);
- ASSERT(cgen()->HasValidEntryRegisters());
- __ InvokeBuiltin(id, flag);
- Result result = cgen()->allocator()->Allocate(eax);
- ASSERT(result.is_valid());
- return result;
-}
-
-
-Result VirtualFrame::RawCallCodeObject(Handle<Code> code,
- RelocInfo::Mode rmode) {
- ASSERT(cgen()->HasValidEntryRegisters());
- __ call(code, rmode);
- Result result = cgen()->allocator()->Allocate(eax);
- ASSERT(result.is_valid());
- return result;
-}
-
-
-// This function assumes that the only results that could be in a_reg or b_reg
-// are a and b. Other results can be live, but must not be in a_reg or b_reg.
-void VirtualFrame::MoveResultsToRegisters(Result* a,
- Result* b,
- Register a_reg,
- Register b_reg) {
- if (a->is_register() && a->reg().is(a_reg)) {
- b->ToRegister(b_reg);
- } else if (!cgen()->allocator()->is_used(a_reg)) {
- a->ToRegister(a_reg);
- b->ToRegister(b_reg);
- } else if (cgen()->allocator()->is_used(b_reg)) {
- // a must be in b_reg, b in a_reg.
- __ xchg(a_reg, b_reg);
- // Results a and b will be invalidated, so it is ok if they are switched.
- } else {
- b->ToRegister(b_reg);
- a->ToRegister(a_reg);
- }
- a->Unuse();
- b->Unuse();
-}
-
-
-Result VirtualFrame::CallLoadIC(RelocInfo::Mode mode) {
- // Name and receiver are on the top of the frame. The IC expects
- // name in ecx and receiver in eax.
- Result name = Pop();
- Result receiver = Pop();
- PrepareForCall(0, 0); // No stack arguments.
- MoveResultsToRegisters(&name, &receiver, ecx, eax);
-
- Handle<Code> ic(Isolate::Current()->builtins()->builtin(
- Builtins::kLoadIC_Initialize));
- return RawCallCodeObject(ic, mode);
-}
-
-
-Result VirtualFrame::CallKeyedLoadIC(RelocInfo::Mode mode) {
- // Key and receiver are on top of the frame. Put them in eax and edx.
- Result key = Pop();
- Result receiver = Pop();
- PrepareForCall(0, 0);
- MoveResultsToRegisters(&key, &receiver, eax, edx);
-
- Handle<Code> ic(Isolate::Current()->builtins()->builtin(
- Builtins::kKeyedLoadIC_Initialize));
- return RawCallCodeObject(ic, mode);
-}
-
-
-Result VirtualFrame::CallStoreIC(Handle<String> name,
- bool is_contextual,
- StrictModeFlag strict_mode) {
- // Value and (if not contextual) receiver are on top of the frame.
- // The IC expects name in ecx, value in eax, and receiver in edx.
- Handle<Code> ic(Isolate::Current()->builtins()->builtin(
- (strict_mode == kStrictMode) ? Builtins::kStoreIC_Initialize_Strict
- : Builtins::kStoreIC_Initialize));
-
- Result value = Pop();
- RelocInfo::Mode mode;
- if (is_contextual) {
- PrepareForCall(0, 0);
- value.ToRegister(eax);
- __ mov(edx, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- value.Unuse();
- mode = RelocInfo::CODE_TARGET_CONTEXT;
- } else {
- Result receiver = Pop();
- PrepareForCall(0, 0);
- MoveResultsToRegisters(&value, &receiver, eax, edx);
- mode = RelocInfo::CODE_TARGET;
- }
- __ mov(ecx, name);
- return RawCallCodeObject(ic, mode);
-}
-
-
-Result VirtualFrame::CallKeyedStoreIC(StrictModeFlag strict_mode) {
- // Value, key, and receiver are on the top of the frame. The IC
- // expects value in eax, key in ecx, and receiver in edx.
- Result value = Pop();
- Result key = Pop();
- Result receiver = Pop();
- PrepareForCall(0, 0);
- if (!cgen()->allocator()->is_used(eax) ||
- (value.is_register() && value.reg().is(eax))) {
- if (!cgen()->allocator()->is_used(eax)) {
- value.ToRegister(eax);
- }
- MoveResultsToRegisters(&key, &receiver, ecx, edx);
- value.Unuse();
- } else if (!cgen()->allocator()->is_used(ecx) ||
- (key.is_register() && key.reg().is(ecx))) {
- if (!cgen()->allocator()->is_used(ecx)) {
- key.ToRegister(ecx);
- }
- MoveResultsToRegisters(&value, &receiver, eax, edx);
- key.Unuse();
- } else if (!cgen()->allocator()->is_used(edx) ||
- (receiver.is_register() && receiver.reg().is(edx))) {
- if (!cgen()->allocator()->is_used(edx)) {
- receiver.ToRegister(edx);
- }
- MoveResultsToRegisters(&key, &value, ecx, eax);
- receiver.Unuse();
- } else {
- // All three registers are used, and no value is in the correct place.
- // We have one of the two circular permutations of eax, ecx, edx.
- ASSERT(value.is_register());
- if (value.reg().is(ecx)) {
- __ xchg(eax, edx);
- __ xchg(eax, ecx);
- } else {
- __ xchg(eax, ecx);
- __ xchg(eax, edx);
- }
- value.Unuse();
- key.Unuse();
- receiver.Unuse();
- }
-
- Handle<Code> ic(Isolate::Current()->builtins()->builtin(
- (strict_mode == kStrictMode) ? Builtins::kKeyedStoreIC_Initialize_Strict
- : Builtins::kKeyedStoreIC_Initialize));
- return RawCallCodeObject(ic, RelocInfo::CODE_TARGET);
-}
-
-
-Result VirtualFrame::CallCallIC(RelocInfo::Mode mode,
- int arg_count,
- int loop_nesting) {
- // Function name, arguments, and receiver are on top of the frame.
- // The IC expects the name in ecx and the rest on the stack and
- // drops them all.
- InLoopFlag in_loop = loop_nesting > 0 ? IN_LOOP : NOT_IN_LOOP;
- Handle<Code> ic = Isolate::Current()->stub_cache()->ComputeCallInitialize(
- arg_count, in_loop);
- // Spill args, receiver, and function. The call will drop args and
- // receiver.
- Result name = Pop();
- PrepareForCall(arg_count + 1, arg_count + 1); // Arguments + receiver.
- name.ToRegister(ecx);
- name.Unuse();
- return RawCallCodeObject(ic, mode);
-}
-
-
-Result VirtualFrame::CallKeyedCallIC(RelocInfo::Mode mode,
- int arg_count,
- int loop_nesting) {
- // Function name, arguments, and receiver are on top of the frame.
- // The IC expects the name in ecx and the rest on the stack and
- // drops them all.
- InLoopFlag in_loop = loop_nesting > 0 ? IN_LOOP : NOT_IN_LOOP;
- Handle<Code> ic =
- Isolate::Current()->stub_cache()->ComputeKeyedCallInitialize(arg_count,
- in_loop);
- // Spill args, receiver, and function. The call will drop args and
- // receiver.
- Result name = Pop();
- PrepareForCall(arg_count + 1, arg_count + 1); // Arguments + receiver.
- name.ToRegister(ecx);
- name.Unuse();
- return RawCallCodeObject(ic, mode);
-}
-
-
-Result VirtualFrame::CallConstructor(int arg_count) {
- // Arguments, receiver, and function are on top of the frame. The
- // IC expects arg count in eax, function in edi, and the arguments
- // and receiver on the stack.
- Handle<Code> ic(Isolate::Current()->builtins()->builtin(
- Builtins::kJSConstructCall));
- // Duplicate the function before preparing the frame.
- PushElementAt(arg_count);
- Result function = Pop();
- PrepareForCall(arg_count + 1, arg_count + 1); // Spill function and args.
- function.ToRegister(edi);
-
- // Constructors are called with the number of arguments in register
- // eax for now. Another option would be to have separate construct
- // call trampolines per different arguments counts encountered.
- Result num_args = cgen()->allocator()->Allocate(eax);
- ASSERT(num_args.is_valid());
- __ Set(num_args.reg(), Immediate(arg_count));
-
- function.Unuse();
- num_args.Unuse();
- return RawCallCodeObject(ic, RelocInfo::CONSTRUCT_CALL);
-}
-
-
-void VirtualFrame::Drop(int count) {
- ASSERT(count >= 0);
- ASSERT(height() >= count);
- int num_virtual_elements = (element_count() - 1) - stack_pointer_;
-
- // Emit code to lower the stack pointer if necessary.
- if (num_virtual_elements < count) {
- int num_dropped = count - num_virtual_elements;
- stack_pointer_ -= num_dropped;
- __ add(Operand(esp), Immediate(num_dropped * kPointerSize));
- }
-
- // Discard elements from the virtual frame and free any registers.
- for (int i = 0; i < count; i++) {
- FrameElement dropped = elements_.RemoveLast();
- if (dropped.is_register()) {
- Unuse(dropped.reg());
- }
- }
-}
-
-
-Result VirtualFrame::Pop() {
- FrameElement element = elements_.RemoveLast();
- int index = element_count();
- ASSERT(element.is_valid());
- ASSERT(element.is_untagged_int32() == cgen()->in_safe_int32_mode());
-
- // Get number type information of the result.
- TypeInfo info;
- if (!element.is_copy()) {
- info = element.type_info();
- } else {
- info = elements_[element.index()].type_info();
- }
-
- bool pop_needed = (stack_pointer_ == index);
- if (pop_needed) {
- stack_pointer_--;
- if (element.is_memory()) {
- Result temp = cgen()->allocator()->Allocate();
- ASSERT(temp.is_valid());
- __ pop(temp.reg());
- temp.set_type_info(info);
- temp.set_untagged_int32(element.is_untagged_int32());
- return temp;
- }
-
- __ add(Operand(esp), Immediate(kPointerSize));
- }
- ASSERT(!element.is_memory());
-
- // The top element is a register, constant, or a copy. Unuse
- // registers and follow copies to their backing store.
- if (element.is_register()) {
- Unuse(element.reg());
- } else if (element.is_copy()) {
- ASSERT(!element.is_untagged_int32());
- ASSERT(element.index() < index);
- index = element.index();
- element = elements_[index];
- }
- ASSERT(!element.is_copy());
-
- // The element is memory, a register, or a constant.
- if (element.is_memory()) {
- // Memory elements could only be the backing store of a copy.
- // Allocate the original to a register.
- ASSERT(index <= stack_pointer_);
- ASSERT(!element.is_untagged_int32());
- Result temp = cgen()->allocator()->Allocate();
- ASSERT(temp.is_valid());
- Use(temp.reg(), index);
- FrameElement new_element =
- FrameElement::RegisterElement(temp.reg(),
- FrameElement::SYNCED,
- element.type_info());
- // Preserve the copy flag on the element.
- if (element.is_copied()) new_element.set_copied();
- elements_[index] = new_element;
- __ mov(temp.reg(), Operand(ebp, fp_relative(index)));
- return Result(temp.reg(), info);
- } else if (element.is_register()) {
- Result return_value(element.reg(), info);
- return_value.set_untagged_int32(element.is_untagged_int32());
- return return_value;
- } else {
- ASSERT(element.is_constant());
- Result return_value(element.handle());
- return_value.set_untagged_int32(element.is_untagged_int32());
- return return_value;
- }
-}
-
-
-void VirtualFrame::EmitPop(Register reg) {
- ASSERT(stack_pointer_ == element_count() - 1);
- stack_pointer_--;
- elements_.RemoveLast();
- __ pop(reg);
-}
-
-
-void VirtualFrame::EmitPop(Operand operand) {
- ASSERT(stack_pointer_ == element_count() - 1);
- stack_pointer_--;
- elements_.RemoveLast();
- __ pop(operand);
-}
-
-
-void VirtualFrame::EmitPush(Register reg, TypeInfo info) {
- ASSERT(stack_pointer_ == element_count() - 1);
- elements_.Add(FrameElement::MemoryElement(info));
- stack_pointer_++;
- __ push(reg);
-}
-
-
-void VirtualFrame::EmitPush(Operand operand, TypeInfo info) {
- ASSERT(stack_pointer_ == element_count() - 1);
- elements_.Add(FrameElement::MemoryElement(info));
- stack_pointer_++;
- __ push(operand);
-}
-
-
-void VirtualFrame::EmitPush(Immediate immediate, TypeInfo info) {
- ASSERT(stack_pointer_ == element_count() - 1);
- elements_.Add(FrameElement::MemoryElement(info));
- stack_pointer_++;
- __ push(immediate);
-}
-
-
-void VirtualFrame::PushUntaggedElement(Handle<Object> value) {
- ASSERT(!ConstantPoolOverflowed());
- elements_.Add(FrameElement::ConstantElement(value, FrameElement::NOT_SYNCED));
- elements_[element_count() - 1].set_untagged_int32(true);
-}
-
-
-void VirtualFrame::Push(Expression* expr) {
- ASSERT(expr->IsTrivial());
-
- Literal* lit = expr->AsLiteral();
- if (lit != NULL) {
- Push(lit->handle());
- return;
- }
-
- VariableProxy* proxy = expr->AsVariableProxy();
- if (proxy != NULL) {
- Slot* slot = proxy->var()->AsSlot();
- if (slot->type() == Slot::LOCAL) {
- PushLocalAt(slot->index());
- return;
- }
- if (slot->type() == Slot::PARAMETER) {
- PushParameterAt(slot->index());
- return;
- }
- }
- UNREACHABLE();
-}
-
-
-void VirtualFrame::Push(Handle<Object> value) {
- if (ConstantPoolOverflowed()) {
- Result temp = cgen()->allocator()->Allocate();
- ASSERT(temp.is_valid());
- __ Set(temp.reg(), Immediate(value));
- Push(&temp);
- } else {
- FrameElement element =
- FrameElement::ConstantElement(value, FrameElement::NOT_SYNCED);
- elements_.Add(element);
- }
-}
-
-
-#undef __
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_IA32
diff --git a/src/3rdparty/v8/src/ia32/virtual-frame-ia32.h b/src/3rdparty/v8/src/ia32/virtual-frame-ia32.h
deleted file mode 100644
index 504a8fc..0000000
--- a/src/3rdparty/v8/src/ia32/virtual-frame-ia32.h
+++ /dev/null
@@ -1,650 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_IA32_VIRTUAL_FRAME_IA32_H_
-#define V8_IA32_VIRTUAL_FRAME_IA32_H_
-
-#include "codegen.h"
-#include "register-allocator.h"
-#include "scopes.h"
-#include "type-info.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// Virtual frames
-//
-// The virtual frame is an abstraction of the physical stack frame. It
-// encapsulates the parameters, frame-allocated locals, and the expression
-// stack. It supports push/pop operations on the expression stack, as well
-// as random access to the expression stack elements, locals, and
-// parameters.
-
-class VirtualFrame: public ZoneObject {
- public:
- // A utility class to introduce a scope where the virtual frame is
- // expected to remain spilled. The constructor spills the code
- // generator's current frame, but no attempt is made to require it
- // to stay spilled. It is intended as documentation while the code
- // generator is being transformed.
- class SpilledScope BASE_EMBEDDED {
- public:
- SpilledScope() : previous_state_(cgen()->in_spilled_code()) {
- ASSERT(cgen()->has_valid_frame());
- cgen()->frame()->SpillAll();
- cgen()->set_in_spilled_code(true);
- }
-
- ~SpilledScope() {
- cgen()->set_in_spilled_code(previous_state_);
- }
-
- private:
- bool previous_state_;
-
- CodeGenerator* cgen() {
- return CodeGeneratorScope::Current(Isolate::Current());
- }
- };
-
- // An illegal index into the virtual frame.
- static const int kIllegalIndex = -1;
-
- // Construct an initial virtual frame on entry to a JS function.
- inline VirtualFrame();
-
- // Construct a virtual frame as a clone of an existing one.
- explicit inline VirtualFrame(VirtualFrame* original);
-
- CodeGenerator* cgen() {
- return CodeGeneratorScope::Current(Isolate::Current());
- }
-
- MacroAssembler* masm() { return cgen()->masm(); }
-
- // Create a duplicate of an existing valid frame element.
- FrameElement CopyElementAt(int index,
- TypeInfo info = TypeInfo::Uninitialized());
-
- // The number of elements on the virtual frame.
- int element_count() { return elements_.length(); }
-
- // The height of the virtual expression stack.
- int height() { return element_count() - expression_base_index(); }
-
- int register_location(int num) {
- ASSERT(num >= 0 && num < RegisterAllocator::kNumRegisters);
- return register_locations_[num];
- }
-
- inline int register_location(Register reg);
-
- inline void set_register_location(Register reg, int index);
-
- bool is_used(int num) {
- ASSERT(num >= 0 && num < RegisterAllocator::kNumRegisters);
- return register_locations_[num] != kIllegalIndex;
- }
-
- inline bool is_used(Register reg);
-
- // Add extra in-memory elements to the top of the frame to match an actual
- // frame (eg, the frame after an exception handler is pushed). No code is
- // emitted.
- void Adjust(int count);
-
- // Forget count elements from the top of the frame all in-memory
- // (including synced) and adjust the stack pointer downward, to
- // match an external frame effect (examples include a call removing
- // its arguments, and exiting a try/catch removing an exception
- // handler). No code will be emitted.
- void Forget(int count) {
- ASSERT(count >= 0);
- ASSERT(stack_pointer_ == element_count() - 1);
- stack_pointer_ -= count;
- ForgetElements(count);
- }
-
- // Forget count elements from the top of the frame without adjusting
- // the stack pointer downward. This is used, for example, before
- // merging frames at break, continue, and return targets.
- void ForgetElements(int count);
-
- // Spill all values from the frame to memory.
- inline void SpillAll();
-
- // Spill all occurrences of a specific register from the frame.
- void Spill(Register reg) {
- if (is_used(reg)) SpillElementAt(register_location(reg));
- }
-
- // Make the two registers distinct and spill them. Returns the second
- // register. If the registers were not distinct then it returns the new
- // second register.
- Result MakeDistinctAndSpilled(Result* left, Result* right) {
- Spill(left->reg());
- Spill(right->reg());
- if (left->reg().is(right->reg())) {
- RegisterAllocator* allocator = cgen()->allocator();
- Result fresh = allocator->Allocate();
- ASSERT(fresh.is_valid());
- masm()->mov(fresh.reg(), right->reg());
- return fresh;
- }
- return *right;
- }
-
- // Spill all occurrences of an arbitrary register if possible. Return the
- // register spilled or no_reg if it was not possible to free any register
- // (ie, they all have frame-external references).
- Register SpillAnyRegister();
-
- // Spill the top element of the frame.
- void SpillTop() { SpillElementAt(element_count() - 1); }
-
- // Sync the range of elements in [begin, end] with memory.
- void SyncRange(int begin, int end);
-
- // Make this frame so that an arbitrary frame of the same height can
- // be merged to it. Copies and constants are removed from the frame.
- void MakeMergable();
-
- // Prepare this virtual frame for merging to an expected frame by
- // performing some state changes that do not require generating
- // code. It is guaranteed that no code will be generated.
- void PrepareMergeTo(VirtualFrame* expected);
-
- // Make this virtual frame have a state identical to an expected virtual
- // frame. As a side effect, code may be emitted to make this frame match
- // the expected one.
- void MergeTo(VirtualFrame* expected);
-
- // Detach a frame from its code generator, perhaps temporarily. This
- // tells the register allocator that it is free to use frame-internal
- // registers. Used when the code generator's frame is switched from this
- // one to NULL by an unconditional jump.
- void DetachFromCodeGenerator() {
- RegisterAllocator* cgen_allocator = cgen()->allocator();
- for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
- if (is_used(i)) cgen_allocator->Unuse(i);
- }
- }
-
- // (Re)attach a frame to its code generator. This informs the register
- // allocator that the frame-internal register references are active again.
- // Used when a code generator's frame is switched from NULL to this one by
- // binding a label.
- void AttachToCodeGenerator() {
- RegisterAllocator* cgen_allocator = cgen()->allocator();
- for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
- if (is_used(i)) cgen_allocator->Use(i);
- }
- }
-
- // Emit code for the physical JS entry and exit frame sequences. After
- // calling Enter, the virtual frame is ready for use; and after calling
- // Exit it should not be used. Note that Enter does not allocate space in
- // the physical frame for storing frame-allocated locals.
- void Enter();
- void Exit();
-
- // Prepare for returning from the frame by spilling locals. This
- // avoids generating unnecessary merge code when jumping to the
- // shared return site. Emits code for spills.
- inline void PrepareForReturn();
-
- // Number of local variables after when we use a loop for allocating.
- static const int kLocalVarBound = 10;
-
- // Allocate and initialize the frame-allocated locals.
- void AllocateStackSlots();
-
- // An element of the expression stack as an assembly operand.
- Operand ElementAt(int index) const {
- return Operand(esp, index * kPointerSize);
- }
-
- // Random-access store to a frame-top relative frame element. The result
- // becomes owned by the frame and is invalidated.
- void SetElementAt(int index, Result* value);
-
- // Set a frame element to a constant. The index is frame-top relative.
- inline void SetElementAt(int index, Handle<Object> value);
-
- void PushElementAt(int index) {
- PushFrameSlotAt(element_count() - index - 1);
- }
-
- void StoreToElementAt(int index) {
- StoreToFrameSlotAt(element_count() - index - 1);
- }
-
- // A frame-allocated local as an assembly operand.
- Operand LocalAt(int index) {
- ASSERT(0 <= index);
- ASSERT(index < local_count());
- return Operand(ebp, kLocal0Offset - index * kPointerSize);
- }
-
- // Push a copy of the value of a local frame slot on top of the frame.
- void PushLocalAt(int index) {
- PushFrameSlotAt(local0_index() + index);
- }
-
- // Push a copy of the value of a local frame slot on top of the frame.
- void UntaggedPushLocalAt(int index) {
- UntaggedPushFrameSlotAt(local0_index() + index);
- }
-
- // Push the value of a local frame slot on top of the frame and invalidate
- // the local slot. The slot should be written to before trying to read
- // from it again.
- void TakeLocalAt(int index) {
- TakeFrameSlotAt(local0_index() + index);
- }
-
- // Store the top value on the virtual frame into a local frame slot. The
- // value is left in place on top of the frame.
- void StoreToLocalAt(int index) {
- StoreToFrameSlotAt(local0_index() + index);
- }
-
- // Push the address of the receiver slot on the frame.
- void PushReceiverSlotAddress();
-
- // Push the function on top of the frame.
- void PushFunction() {
- PushFrameSlotAt(function_index());
- }
-
- // Save the value of the esi register to the context frame slot.
- void SaveContextRegister();
-
- // Restore the esi register from the value of the context frame
- // slot.
- void RestoreContextRegister();
-
- // A parameter as an assembly operand.
- Operand ParameterAt(int index) {
- ASSERT(-1 <= index); // -1 is the receiver.
- ASSERT(index < parameter_count());
- return Operand(ebp, (1 + parameter_count() - index) * kPointerSize);
- }
-
- // Push a copy of the value of a parameter frame slot on top of the frame.
- void PushParameterAt(int index) {
- PushFrameSlotAt(param0_index() + index);
- }
-
- // Push a copy of the value of a parameter frame slot on top of the frame.
- void UntaggedPushParameterAt(int index) {
- UntaggedPushFrameSlotAt(param0_index() + index);
- }
-
- // Push the value of a paramter frame slot on top of the frame and
- // invalidate the parameter slot. The slot should be written to before
- // trying to read from it again.
- void TakeParameterAt(int index) {
- TakeFrameSlotAt(param0_index() + index);
- }
-
- // Store the top value on the virtual frame into a parameter frame slot.
- // The value is left in place on top of the frame.
- void StoreToParameterAt(int index) {
- StoreToFrameSlotAt(param0_index() + index);
- }
-
- // The receiver frame slot.
- Operand Receiver() {
- return ParameterAt(-1);
- }
-
- // Push a try-catch or try-finally handler on top of the virtual frame.
- void PushTryHandler(HandlerType type);
-
- // Call stub given the number of arguments it expects on (and
- // removes from) the stack.
- inline Result CallStub(CodeStub* stub, int arg_count);
-
- // Call stub that takes a single argument passed in eax. The
- // argument is given as a result which does not have to be eax or
- // even a register. The argument is consumed by the call.
- Result CallStub(CodeStub* stub, Result* arg);
-
- // Call stub that takes a pair of arguments passed in edx (arg0) and
- // eax (arg1). The arguments are given as results which do not have
- // to be in the proper registers or even in registers. The
- // arguments are consumed by the call.
- Result CallStub(CodeStub* stub, Result* arg0, Result* arg1);
-
- // Call JS function from top of the stack with arguments
- // taken from the stack.
- Result CallJSFunction(int arg_count);
-
- // Call runtime given the number of arguments expected on (and
- // removed from) the stack.
- Result CallRuntime(const Runtime::Function* f, int arg_count);
- Result CallRuntime(Runtime::FunctionId id, int arg_count);
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- void DebugBreak();
-#endif
-
- // Invoke builtin given the number of arguments it expects on (and
- // removes from) the stack.
- Result InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag, int arg_count);
-
- // Call load IC. Name and receiver are found on top of the frame.
- // Both are dropped.
- Result CallLoadIC(RelocInfo::Mode mode);
-
- // Call keyed load IC. Key and receiver are found on top of the
- // frame. Both are dropped.
- Result CallKeyedLoadIC(RelocInfo::Mode mode);
-
- // Call store IC. If the load is contextual, value is found on top of the
- // frame. If not, value and receiver are on the frame. Both are dropped.
- Result CallStoreIC(Handle<String> name, bool is_contextual,
- StrictModeFlag strict_mode);
-
- // Call keyed store IC. Value, key, and receiver are found on top
- // of the frame. All three are dropped.
- Result CallKeyedStoreIC(StrictModeFlag strict_mode);
-
- // Call call IC. Function name, arguments, and receiver are found on top
- // of the frame and dropped by the call. The argument count does not
- // include the receiver.
- Result CallCallIC(RelocInfo::Mode mode, int arg_count, int loop_nesting);
-
- // Call keyed call IC. Same calling convention as CallCallIC.
- Result CallKeyedCallIC(RelocInfo::Mode mode, int arg_count, int loop_nesting);
-
- // Allocate and call JS function as constructor. Arguments,
- // receiver (global object), and function are found on top of the
- // frame. Function is not dropped. The argument count does not
- // include the receiver.
- Result CallConstructor(int arg_count);
-
- // Drop a number of elements from the top of the expression stack. May
- // emit code to affect the physical frame. Does not clobber any registers
- // excepting possibly the stack pointer.
- void Drop(int count);
-
- // Drop one element.
- void Drop() {
- Drop(1);
- }
-
- // Duplicate the top element of the frame.
- void Dup() {
- PushFrameSlotAt(element_count() - 1);
- }
-
- // Pop an element from the top of the expression stack. Returns a
- // Result, which may be a constant or a register.
- Result Pop();
-
- // Pop and save an element from the top of the expression stack and
- // emit a corresponding pop instruction.
- void EmitPop(Register reg);
- void EmitPop(Operand operand);
-
- // Push an element on top of the expression stack and emit a
- // corresponding push instruction.
- void EmitPush(Register reg,
- TypeInfo info = TypeInfo::Unknown());
- void EmitPush(Operand operand,
- TypeInfo info = TypeInfo::Unknown());
- void EmitPush(Immediate immediate,
- TypeInfo info = TypeInfo::Unknown());
-
- inline bool ConstantPoolOverflowed();
-
- // Push an element on the virtual frame.
- void Push(Handle<Object> value);
- inline void Push(Register reg, TypeInfo info = TypeInfo::Unknown());
- inline void Push(Smi* value);
-
- void PushUntaggedElement(Handle<Object> value);
-
- // Pushing a result invalidates it (its contents become owned by the
- // frame).
- void Push(Result* result) {
- // This assert will trigger if you try to push the same value twice.
- ASSERT(result->is_valid());
- if (result->is_register()) {
- Push(result->reg(), result->type_info());
- } else {
- ASSERT(result->is_constant());
- Push(result->handle());
- }
- if (cgen()->in_safe_int32_mode()) {
- ASSERT(result->is_untagged_int32());
- elements_[element_count() - 1].set_untagged_int32(true);
- }
- result->Unuse();
- }
-
- // Pushing an expression expects that the expression is trivial (according
- // to Expression::IsTrivial).
- void Push(Expression* expr);
-
- // Nip removes zero or more elements from immediately below the top
- // of the frame, leaving the previous top-of-frame value on top of
- // the frame. Nip(k) is equivalent to x = Pop(), Drop(k), Push(x).
- inline void Nip(int num_dropped);
-
- // Check that the frame has no elements containing untagged int32 elements.
- bool HasNoUntaggedInt32Elements() {
- for (int i = 0; i < element_count(); ++i) {
- if (elements_[i].is_untagged_int32()) return false;
- }
- return true;
- }
-
- // Update the type information of a variable frame element directly.
- inline void SetTypeForLocalAt(int index, TypeInfo info);
- inline void SetTypeForParamAt(int index, TypeInfo info);
-
- private:
- static const int kLocal0Offset = JavaScriptFrameConstants::kLocal0Offset;
- static const int kFunctionOffset = JavaScriptFrameConstants::kFunctionOffset;
- static const int kContextOffset = StandardFrameConstants::kContextOffset;
-
- static const int kHandlerSize = StackHandlerConstants::kSize / kPointerSize;
- static const int kPreallocatedElements = 5 + 8; // 8 expression stack slots.
-
- ZoneList<FrameElement> elements_;
-
- // The index of the element that is at the processor's stack pointer
- // (the esp register).
- int stack_pointer_;
-
- // The index of the register frame element using each register, or
- // kIllegalIndex if a register is not on the frame.
- int register_locations_[RegisterAllocator::kNumRegisters];
-
- // The number of frame-allocated locals and parameters respectively.
- inline int parameter_count();
-
- inline int local_count();
-
- // The index of the element that is at the processor's frame pointer
- // (the ebp register). The parameters, receiver, and return address
- // are below the frame pointer.
- int frame_pointer() {
- return parameter_count() + 2;
- }
-
- // The index of the first parameter. The receiver lies below the first
- // parameter.
- int param0_index() {
- return 1;
- }
-
- // The index of the context slot in the frame. It is immediately
- // above the frame pointer.
- int context_index() {
- return frame_pointer() + 1;
- }
-
- // The index of the function slot in the frame. It is above the frame
- // pointer and the context slot.
- int function_index() {
- return frame_pointer() + 2;
- }
-
- // The index of the first local. Between the frame pointer and the
- // locals lie the context and the function.
- int local0_index() {
- return frame_pointer() + 3;
- }
-
- // The index of the base of the expression stack.
- int expression_base_index() {
- return local0_index() + local_count();
- }
-
- // Convert a frame index into a frame pointer relative offset into the
- // actual stack.
- int fp_relative(int index) {
- ASSERT(index < element_count());
- ASSERT(frame_pointer() < element_count()); // FP is on the frame.
- return (frame_pointer() - index) * kPointerSize;
- }
-
- // Record an occurrence of a register in the virtual frame. This has the
- // effect of incrementing the register's external reference count and
- // of updating the index of the register's location in the frame.
- void Use(Register reg, int index) {
- ASSERT(!is_used(reg));
- set_register_location(reg, index);
- cgen()->allocator()->Use(reg);
- }
-
- // Record that a register reference has been dropped from the frame. This
- // decrements the register's external reference count and invalidates the
- // index of the register's location in the frame.
- void Unuse(Register reg) {
- ASSERT(is_used(reg));
- set_register_location(reg, kIllegalIndex);
- cgen()->allocator()->Unuse(reg);
- }
-
- // Spill the element at a particular index---write it to memory if
- // necessary, free any associated register, and forget its value if
- // constant.
- void SpillElementAt(int index);
-
- // Sync the element at a particular index. If it is a register or
- // constant that disagrees with the value on the stack, write it to memory.
- // Keep the element type as register or constant, and clear the dirty bit.
- void SyncElementAt(int index);
-
- // Sync a single unsynced element that lies beneath or at the stack pointer.
- void SyncElementBelowStackPointer(int index);
-
- // Sync a single unsynced element that lies just above the stack pointer.
- void SyncElementByPushing(int index);
-
- // Push a copy of a frame slot (typically a local or parameter) on top of
- // the frame.
- inline void PushFrameSlotAt(int index);
-
- // Push a copy of a frame slot (typically a local or parameter) on top of
- // the frame, at an untagged int32 value. Bails out if the value is not
- // an int32.
- void UntaggedPushFrameSlotAt(int index);
-
- // Push a the value of a frame slot (typically a local or parameter) on
- // top of the frame and invalidate the slot.
- void TakeFrameSlotAt(int index);
-
- // Store the value on top of the frame to a frame slot (typically a local
- // or parameter).
- void StoreToFrameSlotAt(int index);
-
- // Spill all elements in registers. Spill the top spilled_args elements
- // on the frame. Sync all other frame elements.
- // Then drop dropped_args elements from the virtual frame, to match
- // the effect of an upcoming call that will drop them from the stack.
- void PrepareForCall(int spilled_args, int dropped_args);
-
- // Move frame elements currently in registers or constants, that
- // should be in memory in the expected frame, to memory.
- void MergeMoveRegistersToMemory(VirtualFrame* expected);
-
- // Make the register-to-register moves necessary to
- // merge this frame with the expected frame.
- // Register to memory moves must already have been made,
- // and memory to register moves must follow this call.
- // This is because some new memory-to-register moves are
- // created in order to break cycles of register moves.
- // Used in the implementation of MergeTo().
- void MergeMoveRegistersToRegisters(VirtualFrame* expected);
-
- // Make the memory-to-register and constant-to-register moves
- // needed to make this frame equal the expected frame.
- // Called after all register-to-memory and register-to-register
- // moves have been made. After this function returns, the frames
- // should be equal.
- void MergeMoveMemoryToRegisters(VirtualFrame* expected);
-
- // Invalidates a frame slot (puts an invalid frame element in it).
- // Copies on the frame are correctly handled, and if this slot was
- // the backing store of copies, the index of the new backing store
- // is returned. Otherwise, returns kIllegalIndex.
- // Register counts are correctly updated.
- int InvalidateFrameSlotAt(int index);
-
- // This function assumes that a and b are the only results that could be in
- // the registers a_reg or b_reg. Other results can be live, but must not
- // be in the registers a_reg or b_reg. The results a and b are invalidated.
- void MoveResultsToRegisters(Result* a,
- Result* b,
- Register a_reg,
- Register b_reg);
-
- // Call a code stub that has already been prepared for calling (via
- // PrepareForCall).
- Result RawCallStub(CodeStub* stub);
-
- // Calls a code object which has already been prepared for calling
- // (via PrepareForCall).
- Result RawCallCodeObject(Handle<Code> code, RelocInfo::Mode rmode);
-
- inline bool Equals(VirtualFrame* other);
-
- // Classes that need raw access to the elements_ array.
- friend class FrameRegisterState;
- friend class JumpTarget;
-};
-
-} } // namespace v8::internal
-
-#endif // V8_IA32_VIRTUAL_FRAME_IA32_H_
diff --git a/src/3rdparty/v8/src/ic-inl.h b/src/3rdparty/v8/src/ic-inl.h
deleted file mode 100644
index b4f789c..0000000
--- a/src/3rdparty/v8/src/ic-inl.h
+++ /dev/null
@@ -1,130 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_IC_INL_H_
-#define V8_IC_INL_H_
-
-#include "ic.h"
-#include "debug.h"
-#include "macro-assembler.h"
-
-namespace v8 {
-namespace internal {
-
-
-Address IC::address() {
- // Get the address of the call.
- Address result = pc() - Assembler::kCallTargetAddressOffset;
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- Debug* debug = Isolate::Current()->debug();
- // First check if any break points are active if not just return the address
- // of the call.
- if (!debug->has_break_points()) return result;
-
- // At least one break point is active perform additional test to ensure that
- // break point locations are updated correctly.
- if (debug->IsDebugBreak(Assembler::target_address_at(result))) {
- // If the call site is a call to debug break then return the address in
- // the original code instead of the address in the running code. This will
- // cause the original code to be updated and keeps the breakpoint active in
- // the running code.
- return OriginalCodeAddress();
- } else {
- // No break point here just return the address of the call.
- return result;
- }
-#else
- return result;
-#endif
-}
-
-
-Code* IC::GetTargetAtAddress(Address address) {
- // Get the target address of the IC.
- Address target = Assembler::target_address_at(address);
- // Convert target address to the code object. Code::GetCodeFromTargetAddress
- // is safe for use during GC where the map might be marked.
- Code* result = Code::GetCodeFromTargetAddress(target);
- ASSERT(result->is_inline_cache_stub());
- return result;
-}
-
-
-void IC::SetTargetAtAddress(Address address, Code* target) {
- ASSERT(target->is_inline_cache_stub() || target->is_compare_ic_stub());
-#ifdef DEBUG
- // STORE_IC and KEYED_STORE_IC use Code::extra_ic_state() to mark
- // ICs as strict mode. The strict-ness of the IC must be preserved.
- Code* old_target = GetTargetAtAddress(address);
- if (old_target->kind() == Code::STORE_IC ||
- old_target->kind() == Code::KEYED_STORE_IC) {
- ASSERT(old_target->extra_ic_state() == target->extra_ic_state());
- }
-#endif
- Assembler::set_target_address_at(address, target->instruction_start());
-}
-
-
-InlineCacheHolderFlag IC::GetCodeCacheForObject(Object* object,
- JSObject* holder) {
- if (object->IsJSObject()) {
- return GetCodeCacheForObject(JSObject::cast(object), holder);
- }
- // If the object is a value, we use the prototype map for the cache.
- ASSERT(object->IsString() || object->IsNumber() || object->IsBoolean());
- return PROTOTYPE_MAP;
-}
-
-
-InlineCacheHolderFlag IC::GetCodeCacheForObject(JSObject* object,
- JSObject* holder) {
- // Fast-properties and global objects store stubs in their own maps.
- // Slow properties objects use prototype's map (unless the property is its own
- // when holder == object). It works because slow properties objects having
- // the same prototype (or a prototype with the same map) and not having
- // the property are interchangeable for such a stub.
- if (holder != object &&
- !object->HasFastProperties() &&
- !object->IsJSGlobalProxy() &&
- !object->IsJSGlobalObject()) {
- return PROTOTYPE_MAP;
- }
- return OWN_MAP;
-}
-
-
-JSObject* IC::GetCodeCacheHolder(Object* object, InlineCacheHolderFlag holder) {
- Object* map_owner = (holder == OWN_MAP ? object : object->GetPrototype());
- ASSERT(map_owner->IsJSObject());
- return JSObject::cast(map_owner);
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_IC_INL_H_
diff --git a/src/3rdparty/v8/src/ic.cc b/src/3rdparty/v8/src/ic.cc
deleted file mode 100644
index dd4d25b..0000000
--- a/src/3rdparty/v8/src/ic.cc
+++ /dev/null
@@ -1,2389 +0,0 @@
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "accessors.h"
-#include "api.h"
-#include "arguments.h"
-#include "codegen.h"
-#include "execution.h"
-#include "ic-inl.h"
-#include "runtime.h"
-#include "stub-cache.h"
-
-namespace v8 {
-namespace internal {
-
-#ifdef DEBUG
-static char TransitionMarkFromState(IC::State state) {
- switch (state) {
- case UNINITIALIZED: return '0';
- case PREMONOMORPHIC: return 'P';
- case MONOMORPHIC: return '1';
- case MONOMORPHIC_PROTOTYPE_FAILURE: return '^';
- case MEGAMORPHIC: return 'N';
-
- // We never see the debugger states here, because the state is
- // computed from the original code - not the patched code. Let
- // these cases fall through to the unreachable code below.
- case DEBUG_BREAK: break;
- case DEBUG_PREPARE_STEP_IN: break;
- }
- UNREACHABLE();
- return 0;
-}
-
-void IC::TraceIC(const char* type,
- Handle<Object> name,
- State old_state,
- Code* new_target,
- const char* extra_info) {
- if (FLAG_trace_ic) {
- State new_state = StateFrom(new_target,
- HEAP->undefined_value(),
- HEAP->undefined_value());
- PrintF("[%s (%c->%c)%s", type,
- TransitionMarkFromState(old_state),
- TransitionMarkFromState(new_state),
- extra_info);
- name->Print();
- PrintF("]\n");
- }
-}
-#endif
-
-
-IC::IC(FrameDepth depth, Isolate* isolate) : isolate_(isolate) {
- ASSERT(isolate == Isolate::Current());
- // To improve the performance of the (much used) IC code, we unfold
- // a few levels of the stack frame iteration code. This yields a
- // ~35% speedup when running DeltaBlue with the '--nouse-ic' flag.
- const Address entry =
- Isolate::c_entry_fp(isolate->thread_local_top());
- Address* pc_address =
- reinterpret_cast<Address*>(entry + ExitFrameConstants::kCallerPCOffset);
- Address fp = Memory::Address_at(entry + ExitFrameConstants::kCallerFPOffset);
- // If there's another JavaScript frame on the stack, we need to look
- // one frame further down the stack to find the frame pointer and
- // the return address stack slot.
- if (depth == EXTRA_CALL_FRAME) {
- const int kCallerPCOffset = StandardFrameConstants::kCallerPCOffset;
- pc_address = reinterpret_cast<Address*>(fp + kCallerPCOffset);
- fp = Memory::Address_at(fp + StandardFrameConstants::kCallerFPOffset);
- }
-#ifdef DEBUG
- StackFrameIterator it;
- for (int i = 0; i < depth + 1; i++) it.Advance();
- StackFrame* frame = it.frame();
- ASSERT(fp == frame->fp() && pc_address == frame->pc_address());
-#endif
- fp_ = fp;
- pc_address_ = pc_address;
-}
-
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-Address IC::OriginalCodeAddress() {
- HandleScope scope;
- // Compute the JavaScript frame for the frame pointer of this IC
- // structure. We need this to be able to find the function
- // corresponding to the frame.
- StackFrameIterator it;
- while (it.frame()->fp() != this->fp()) it.Advance();
- JavaScriptFrame* frame = JavaScriptFrame::cast(it.frame());
- // Find the function on the stack and both the active code for the
- // function and the original code.
- JSFunction* function = JSFunction::cast(frame->function());
- Handle<SharedFunctionInfo> shared(function->shared());
- Code* code = shared->code();
- ASSERT(Debug::HasDebugInfo(shared));
- Code* original_code = Debug::GetDebugInfo(shared)->original_code();
- ASSERT(original_code->IsCode());
- // Get the address of the call site in the active code. This is the
- // place where the call to DebugBreakXXX is and where the IC
- // normally would be.
- Address addr = pc() - Assembler::kCallTargetAddressOffset;
- // Return the address in the original code. This is the place where
- // the call which has been overwritten by the DebugBreakXXX resides
- // and the place where the inline cache system should look.
- intptr_t delta =
- original_code->instruction_start() - code->instruction_start();
- return addr + delta;
-}
-#endif
-
-
-static bool HasNormalObjectsInPrototypeChain(Isolate* isolate,
- LookupResult* lookup,
- Object* receiver) {
- Object* end = lookup->IsProperty()
- ? lookup->holder() : isolate->heap()->null_value();
- for (Object* current = receiver;
- current != end;
- current = current->GetPrototype()) {
- if (current->IsJSObject() &&
- !JSObject::cast(current)->HasFastProperties() &&
- !current->IsJSGlobalProxy() &&
- !current->IsJSGlobalObject()) {
- return true;
- }
- }
-
- return false;
-}
-
-
-static bool TryRemoveInvalidPrototypeDependentStub(Code* target,
- Object* receiver,
- Object* name) {
- InlineCacheHolderFlag cache_holder =
- Code::ExtractCacheHolderFromFlags(target->flags());
-
- if (cache_holder == OWN_MAP && !receiver->IsJSObject()) {
- // The stub was generated for JSObject but called for non-JSObject.
- // IC::GetCodeCacheHolder is not applicable.
- return false;
- } else if (cache_holder == PROTOTYPE_MAP &&
- receiver->GetPrototype()->IsNull()) {
- // IC::GetCodeCacheHolder is not applicable.
- return false;
- }
- Map* map = IC::GetCodeCacheHolder(receiver, cache_holder)->map();
-
- // Decide whether the inline cache failed because of changes to the
- // receiver itself or changes to one of its prototypes.
- //
- // If there are changes to the receiver itself, the map of the
- // receiver will have changed and the current target will not be in
- // the receiver map's code cache. Therefore, if the current target
- // is in the receiver map's code cache, the inline cache failed due
- // to prototype check failure.
- int index = map->IndexInCodeCache(name, target);
- if (index >= 0) {
- map->RemoveFromCodeCache(String::cast(name), target, index);
- return true;
- }
-
- return false;
-}
-
-
-IC::State IC::StateFrom(Code* target, Object* receiver, Object* name) {
- IC::State state = target->ic_state();
-
- if (state != MONOMORPHIC || !name->IsString()) return state;
- if (receiver->IsUndefined() || receiver->IsNull()) return state;
-
- // For keyed load/store/call, the most likely cause of cache failure is
- // that the key has changed. We do not distinguish between
- // prototype and non-prototype failures for keyed access.
- Code::Kind kind = target->kind();
- if (kind == Code::KEYED_LOAD_IC ||
- kind == Code::KEYED_STORE_IC ||
- kind == Code::KEYED_CALL_IC) {
- return MONOMORPHIC;
- }
-
- // Remove the target from the code cache if it became invalid
- // because of changes in the prototype chain to avoid hitting it
- // again.
- // Call stubs handle this later to allow extra IC state
- // transitions.
- if (kind != Code::CALL_IC &&
- TryRemoveInvalidPrototypeDependentStub(target, receiver, name)) {
- return MONOMORPHIC_PROTOTYPE_FAILURE;
- }
-
- // The builtins object is special. It only changes when JavaScript
- // builtins are loaded lazily. It is important to keep inline
- // caches for the builtins object monomorphic. Therefore, if we get
- // an inline cache miss for the builtins object after lazily loading
- // JavaScript builtins, we return uninitialized as the state to
- // force the inline cache back to monomorphic state.
- if (receiver->IsJSBuiltinsObject()) {
- return UNINITIALIZED;
- }
-
- return MONOMORPHIC;
-}
-
-
-RelocInfo::Mode IC::ComputeMode() {
- Address addr = address();
- Code* code = Code::cast(isolate()->heap()->FindCodeObject(addr));
- for (RelocIterator it(code, RelocInfo::kCodeTargetMask);
- !it.done(); it.next()) {
- RelocInfo* info = it.rinfo();
- if (info->pc() == addr) return info->rmode();
- }
- UNREACHABLE();
- return RelocInfo::NONE;
-}
-
-
-Failure* IC::TypeError(const char* type,
- Handle<Object> object,
- Handle<Object> key) {
- HandleScope scope(isolate());
- Handle<Object> args[2] = { key, object };
- Handle<Object> error = isolate()->factory()->NewTypeError(
- type, HandleVector(args, 2));
- return isolate()->Throw(*error);
-}
-
-
-Failure* IC::ReferenceError(const char* type, Handle<String> name) {
- HandleScope scope(isolate());
- Handle<Object> error = isolate()->factory()->NewReferenceError(
- type, HandleVector(&name, 1));
- return isolate()->Throw(*error);
-}
-
-
-void IC::Clear(Address address) {
- Code* target = GetTargetAtAddress(address);
-
- // Don't clear debug break inline cache as it will remove the break point.
- if (target->ic_state() == DEBUG_BREAK) return;
-
- switch (target->kind()) {
- case Code::LOAD_IC: return LoadIC::Clear(address, target);
- case Code::KEYED_LOAD_IC:
- case Code::KEYED_EXTERNAL_ARRAY_LOAD_IC:
- return KeyedLoadIC::Clear(address, target);
- case Code::STORE_IC: return StoreIC::Clear(address, target);
- case Code::KEYED_STORE_IC:
- case Code::KEYED_EXTERNAL_ARRAY_STORE_IC:
- return KeyedStoreIC::Clear(address, target);
- case Code::CALL_IC: return CallIC::Clear(address, target);
- case Code::KEYED_CALL_IC: return KeyedCallIC::Clear(address, target);
- case Code::BINARY_OP_IC:
- case Code::TYPE_RECORDING_BINARY_OP_IC:
- case Code::COMPARE_IC:
- // Clearing these is tricky and does not
- // make any performance difference.
- return;
- default: UNREACHABLE();
- }
-}
-
-
-void CallICBase::Clear(Address address, Code* target) {
- State state = target->ic_state();
- if (state == UNINITIALIZED) return;
- Code* code =
- Isolate::Current()->stub_cache()->FindCallInitialize(
- target->arguments_count(),
- target->ic_in_loop(),
- target->kind());
- SetTargetAtAddress(address, code);
-}
-
-
-void KeyedLoadIC::ClearInlinedVersion(Address address) {
- // Insert null as the map to check for to make sure the map check fails
- // sending control flow to the IC instead of the inlined version.
- PatchInlinedLoad(address, HEAP->null_value());
-}
-
-
-void KeyedLoadIC::Clear(Address address, Code* target) {
- if (target->ic_state() == UNINITIALIZED) return;
- // Make sure to also clear the map used in inline fast cases. If we
- // do not clear these maps, cached code can keep objects alive
- // through the embedded maps.
- ClearInlinedVersion(address);
- SetTargetAtAddress(address, initialize_stub());
-}
-
-
-void LoadIC::ClearInlinedVersion(Address address) {
- // Reset the map check of the inlined inobject property load (if
- // present) to guarantee failure by holding an invalid map (the null
- // value). The offset can be patched to anything.
- Heap* heap = HEAP;
- PatchInlinedLoad(address, heap->null_value(), 0);
- PatchInlinedContextualLoad(address,
- heap->null_value(),
- heap->null_value(),
- true);
-}
-
-
-void LoadIC::Clear(Address address, Code* target) {
- if (target->ic_state() == UNINITIALIZED) return;
- ClearInlinedVersion(address);
- SetTargetAtAddress(address, initialize_stub());
-}
-
-
-void StoreIC::ClearInlinedVersion(Address address) {
- // Reset the map check of the inlined inobject property store (if
- // present) to guarantee failure by holding an invalid map (the null
- // value). The offset can be patched to anything.
- PatchInlinedStore(address, HEAP->null_value(), 0);
-}
-
-
-void StoreIC::Clear(Address address, Code* target) {
- if (target->ic_state() == UNINITIALIZED) return;
- ClearInlinedVersion(address);
- SetTargetAtAddress(address,
- (target->extra_ic_state() == kStrictMode)
- ? initialize_stub_strict()
- : initialize_stub());
-}
-
-
-void KeyedStoreIC::ClearInlinedVersion(Address address) {
- // Insert null as the elements map to check for. This will make
- // sure that the elements fast-case map check fails so that control
- // flows to the IC instead of the inlined version.
- PatchInlinedStore(address, HEAP->null_value());
-}
-
-
-void KeyedStoreIC::RestoreInlinedVersion(Address address) {
- // Restore the fast-case elements map check so that the inlined
- // version can be used again.
- PatchInlinedStore(address, HEAP->fixed_array_map());
-}
-
-
-void KeyedStoreIC::Clear(Address address, Code* target) {
- if (target->ic_state() == UNINITIALIZED) return;
- SetTargetAtAddress(address,
- (target->extra_ic_state() == kStrictMode)
- ? initialize_stub_strict()
- : initialize_stub());
-}
-
-
-static bool HasInterceptorGetter(JSObject* object) {
- return !object->GetNamedInterceptor()->getter()->IsUndefined();
-}
-
-
-static void LookupForRead(Object* object,
- String* name,
- LookupResult* lookup) {
- AssertNoAllocation no_gc; // pointers must stay valid
-
- // Skip all the objects with named interceptors, but
- // without actual getter.
- while (true) {
- object->Lookup(name, lookup);
- // Besides normal conditions (property not found or it's not
- // an interceptor), bail out if lookup is not cacheable: we won't
- // be able to IC it anyway and regular lookup should work fine.
- if (!lookup->IsFound()
- || (lookup->type() != INTERCEPTOR)
- || !lookup->IsCacheable()) {
- return;
- }
-
- JSObject* holder = lookup->holder();
- if (HasInterceptorGetter(holder)) {
- return;
- }
-
- holder->LocalLookupRealNamedProperty(name, lookup);
- if (lookup->IsProperty()) {
- ASSERT(lookup->type() != INTERCEPTOR);
- return;
- }
-
- Object* proto = holder->GetPrototype();
- if (proto->IsNull()) {
- lookup->NotFound();
- return;
- }
-
- object = proto;
- }
-}
-
-
-Object* CallICBase::TryCallAsFunction(Object* object) {
- HandleScope scope(isolate());
- Handle<Object> target(object, isolate());
- Handle<Object> delegate = Execution::GetFunctionDelegate(target);
-
- if (delegate->IsJSFunction()) {
- // Patch the receiver and use the delegate as the function to
- // invoke. This is used for invoking objects as if they were
- // functions.
- const int argc = this->target()->arguments_count();
- StackFrameLocator locator;
- JavaScriptFrame* frame = locator.FindJavaScriptFrame(0);
- int index = frame->ComputeExpressionsCount() - (argc + 1);
- frame->SetExpression(index, *target);
- }
-
- return *delegate;
-}
-
-
-void CallICBase::ReceiverToObjectIfRequired(Handle<Object> callee,
- Handle<Object> object) {
- if (callee->IsJSFunction()) {
- Handle<JSFunction> function = Handle<JSFunction>::cast(callee);
- if (function->shared()->strict_mode() || function->IsBuiltin()) {
- // Do not wrap receiver for strict mode functions or for builtins.
- return;
- }
- }
-
- // And only wrap string, number or boolean.
- if (object->IsString() || object->IsNumber() || object->IsBoolean()) {
- // Change the receiver to the result of calling ToObject on it.
- const int argc = this->target()->arguments_count();
- StackFrameLocator locator;
- JavaScriptFrame* frame = locator.FindJavaScriptFrame(0);
- int index = frame->ComputeExpressionsCount() - (argc + 1);
- frame->SetExpression(index, *isolate()->factory()->ToObject(object));
- }
-}
-
-
-MaybeObject* CallICBase::LoadFunction(State state,
- Code::ExtraICState extra_ic_state,
- Handle<Object> object,
- Handle<String> name) {
- // If the object is undefined or null it's illegal to try to get any
- // of its properties; throw a TypeError in that case.
- if (object->IsUndefined() || object->IsNull()) {
- return TypeError("non_object_property_call", object, name);
- }
-
- // Check if the name is trivially convertible to an index and get
- // the element if so.
- uint32_t index;
- if (name->AsArrayIndex(&index)) {
- Object* result;
- { MaybeObject* maybe_result = object->GetElement(index);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
-
- if (result->IsJSFunction()) return result;
-
- // Try to find a suitable function delegate for the object at hand.
- result = TryCallAsFunction(result);
- if (result->IsJSFunction()) return result;
-
- // Otherwise, it will fail in the lookup step.
- }
-
- // Lookup the property in the object.
- LookupResult lookup;
- LookupForRead(*object, *name, &lookup);
-
- if (!lookup.IsProperty()) {
- // If the object does not have the requested property, check which
- // exception we need to throw.
- if (IsContextual(object)) {
- return ReferenceError("not_defined", name);
- }
- return TypeError("undefined_method", object, name);
- }
-
- // Lookup is valid: Update inline cache and stub cache.
- if (FLAG_use_ic) {
- UpdateCaches(&lookup, state, extra_ic_state, object, name);
- }
-
- // Get the property.
- PropertyAttributes attr;
- Object* result;
- { MaybeObject* maybe_result =
- object->GetProperty(*object, &lookup, *name, &attr);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
-
- if (lookup.type() == INTERCEPTOR) {
- // If the object does not have the requested property, check which
- // exception we need to throw.
- if (attr == ABSENT) {
- if (IsContextual(object)) {
- return ReferenceError("not_defined", name);
- }
- return TypeError("undefined_method", object, name);
- }
- }
-
- ASSERT(!result->IsTheHole());
-
- HandleScope scope(isolate());
- // Wrap result in a handle because ReceiverToObjectIfRequired may allocate
- // new object and cause GC.
- Handle<Object> result_handle(result);
- // Make receiver an object if the callee requires it. Strict mode or builtin
- // functions do not wrap the receiver, non-strict functions and objects
- // called as functions do.
- ReceiverToObjectIfRequired(result_handle, object);
-
- if (result_handle->IsJSFunction()) {
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Handle stepping into a function if step into is active.
- Debug* debug = isolate()->debug();
- if (debug->StepInActive()) {
- // Protect the result in a handle as the debugger can allocate and might
- // cause GC.
- Handle<JSFunction> function(JSFunction::cast(*result_handle), isolate());
- debug->HandleStepIn(function, object, fp(), false);
- return *function;
- }
-#endif
-
- return *result_handle;
- }
-
- // Try to find a suitable function delegate for the object at hand.
- result_handle = Handle<Object>(TryCallAsFunction(*result_handle));
- if (result_handle->IsJSFunction()) return *result_handle;
-
- return TypeError("property_not_function", object, name);
-}
-
-
-bool CallICBase::TryUpdateExtraICState(LookupResult* lookup,
- Handle<Object> object,
- Code::ExtraICState* extra_ic_state) {
- ASSERT(kind_ == Code::CALL_IC);
- if (lookup->type() != CONSTANT_FUNCTION) return false;
- JSFunction* function = lookup->GetConstantFunction();
- if (!function->shared()->HasBuiltinFunctionId()) return false;
-
- // Fetch the arguments passed to the called function.
- const int argc = target()->arguments_count();
- Address entry = isolate()->c_entry_fp(isolate()->thread_local_top());
- Address fp = Memory::Address_at(entry + ExitFrameConstants::kCallerFPOffset);
- Arguments args(argc + 1,
- &Memory::Object_at(fp +
- StandardFrameConstants::kCallerSPOffset +
- argc * kPointerSize));
- switch (function->shared()->builtin_function_id()) {
- case kStringCharCodeAt:
- case kStringCharAt:
- if (object->IsString()) {
- String* string = String::cast(*object);
- // Check there's the right string value or wrapper in the receiver slot.
- ASSERT(string == args[0] || string == JSValue::cast(args[0])->value());
- // If we're in the default (fastest) state and the index is
- // out of bounds, update the state to record this fact.
- if (*extra_ic_state == DEFAULT_STRING_STUB &&
- argc >= 1 && args[1]->IsNumber()) {
- double index;
- if (args[1]->IsSmi()) {
- index = Smi::cast(args[1])->value();
- } else {
- ASSERT(args[1]->IsHeapNumber());
- index = DoubleToInteger(HeapNumber::cast(args[1])->value());
- }
- if (index < 0 || index >= string->length()) {
- *extra_ic_state = STRING_INDEX_OUT_OF_BOUNDS;
- return true;
- }
- }
- }
- break;
- default:
- return false;
- }
- return false;
-}
-
-
-MaybeObject* CallICBase::ComputeMonomorphicStub(
- LookupResult* lookup,
- State state,
- Code::ExtraICState extra_ic_state,
- Handle<Object> object,
- Handle<String> name) {
- int argc = target()->arguments_count();
- InLoopFlag in_loop = target()->ic_in_loop();
- MaybeObject* maybe_code = NULL;
- switch (lookup->type()) {
- case FIELD: {
- int index = lookup->GetFieldIndex();
- maybe_code = isolate()->stub_cache()->ComputeCallField(argc,
- in_loop,
- kind_,
- *name,
- *object,
- lookup->holder(),
- index);
- break;
- }
- case CONSTANT_FUNCTION: {
- // Get the constant function and compute the code stub for this
- // call; used for rewriting to monomorphic state and making sure
- // that the code stub is in the stub cache.
- JSFunction* function = lookup->GetConstantFunction();
- maybe_code =
- isolate()->stub_cache()->ComputeCallConstant(argc,
- in_loop,
- kind_,
- extra_ic_state,
- *name,
- *object,
- lookup->holder(),
- function);
- break;
- }
- case NORMAL: {
- if (!object->IsJSObject()) return NULL;
- Handle<JSObject> receiver = Handle<JSObject>::cast(object);
-
- if (lookup->holder()->IsGlobalObject()) {
- GlobalObject* global = GlobalObject::cast(lookup->holder());
- JSGlobalPropertyCell* cell =
- JSGlobalPropertyCell::cast(global->GetPropertyCell(lookup));
- if (!cell->value()->IsJSFunction()) return NULL;
- JSFunction* function = JSFunction::cast(cell->value());
- maybe_code = isolate()->stub_cache()->ComputeCallGlobal(argc,
- in_loop,
- kind_,
- *name,
- *receiver,
- global,
- cell,
- function);
- } else {
- // There is only one shared stub for calling normalized
- // properties. It does not traverse the prototype chain, so the
- // property must be found in the receiver for the stub to be
- // applicable.
- if (lookup->holder() != *receiver) return NULL;
- maybe_code = isolate()->stub_cache()->ComputeCallNormal(argc,
- in_loop,
- kind_,
- *name,
- *receiver);
- }
- break;
- }
- case INTERCEPTOR: {
- ASSERT(HasInterceptorGetter(lookup->holder()));
- maybe_code = isolate()->stub_cache()->ComputeCallInterceptor(
- argc,
- kind_,
- *name,
- *object,
- lookup->holder());
- break;
- }
- default:
- maybe_code = NULL;
- break;
- }
- return maybe_code;
-}
-
-
-void CallICBase::UpdateCaches(LookupResult* lookup,
- State state,
- Code::ExtraICState extra_ic_state,
- Handle<Object> object,
- Handle<String> name) {
- // Bail out if we didn't find a result.
- if (!lookup->IsProperty() || !lookup->IsCacheable()) return;
-
- if (lookup->holder() != *object &&
- HasNormalObjectsInPrototypeChain(
- isolate(), lookup, object->GetPrototype())) {
- // Suppress optimization for prototype chains with slow properties objects
- // in the middle.
- return;
- }
-
- // Compute the number of arguments.
- int argc = target()->arguments_count();
- InLoopFlag in_loop = target()->ic_in_loop();
- MaybeObject* maybe_code = NULL;
- bool had_proto_failure = false;
- if (state == UNINITIALIZED) {
- // This is the first time we execute this inline cache.
- // Set the target to the pre monomorphic stub to delay
- // setting the monomorphic state.
- maybe_code = isolate()->stub_cache()->ComputeCallPreMonomorphic(argc,
- in_loop,
- kind_);
- } else if (state == MONOMORPHIC) {
- if (kind_ == Code::CALL_IC &&
- TryUpdateExtraICState(lookup, object, &extra_ic_state)) {
- maybe_code = ComputeMonomorphicStub(lookup,
- state,
- extra_ic_state,
- object,
- name);
- } else if (kind_ == Code::CALL_IC &&
- TryRemoveInvalidPrototypeDependentStub(target(),
- *object,
- *name)) {
- had_proto_failure = true;
- maybe_code = ComputeMonomorphicStub(lookup,
- state,
- extra_ic_state,
- object,
- name);
- } else {
- maybe_code = isolate()->stub_cache()->ComputeCallMegamorphic(argc,
- in_loop,
- kind_);
- }
- } else {
- maybe_code = ComputeMonomorphicStub(lookup,
- state,
- extra_ic_state,
- object,
- name);
- }
-
- // If we're unable to compute the stub (not enough memory left), we
- // simply avoid updating the caches.
- Object* code;
- if (maybe_code == NULL || !maybe_code->ToObject(&code)) return;
-
- // Patch the call site depending on the state of the cache.
- if (state == UNINITIALIZED ||
- state == PREMONOMORPHIC ||
- state == MONOMORPHIC ||
- state == MONOMORPHIC_PROTOTYPE_FAILURE) {
- set_target(Code::cast(code));
- } else if (state == MEGAMORPHIC) {
- // Cache code holding map should be consistent with
- // GenerateMonomorphicCacheProbe. It is not the map which holds the stub.
- Map* map = JSObject::cast(object->IsJSObject() ? *object :
- object->GetPrototype())->map();
-
- // Update the stub cache.
- isolate()->stub_cache()->Set(*name, map, Code::cast(code));
- }
-
- USE(had_proto_failure);
-#ifdef DEBUG
- if (had_proto_failure) state = MONOMORPHIC_PROTOTYPE_FAILURE;
- TraceIC(kind_ == Code::CALL_IC ? "CallIC" : "KeyedCallIC",
- name, state, target(), in_loop ? " (in-loop)" : "");
-#endif
-}
-
-
-MaybeObject* KeyedCallIC::LoadFunction(State state,
- Handle<Object> object,
- Handle<Object> key) {
- if (key->IsSymbol()) {
- return CallICBase::LoadFunction(state,
- Code::kNoExtraICState,
- object,
- Handle<String>::cast(key));
- }
-
- if (object->IsUndefined() || object->IsNull()) {
- return TypeError("non_object_property_call", object, key);
- }
-
- if (FLAG_use_ic && state != MEGAMORPHIC && !object->IsAccessCheckNeeded()) {
- int argc = target()->arguments_count();
- InLoopFlag in_loop = target()->ic_in_loop();
- MaybeObject* maybe_code = isolate()->stub_cache()->ComputeCallMegamorphic(
- argc, in_loop, Code::KEYED_CALL_IC);
- Object* code;
- if (maybe_code->ToObject(&code)) {
- set_target(Code::cast(code));
-#ifdef DEBUG
- TraceIC(
- "KeyedCallIC", key, state, target(), in_loop ? " (in-loop)" : "");
-#endif
- }
- }
-
- HandleScope scope(isolate());
- Handle<Object> result = GetProperty(object, key);
- RETURN_IF_EMPTY_HANDLE(isolate(), result);
-
- // Make receiver an object if the callee requires it. Strict mode or builtin
- // functions do not wrap the receiver, non-strict functions and objects
- // called as functions do.
- ReceiverToObjectIfRequired(result, object);
-
- if (result->IsJSFunction()) return *result;
- result = Handle<Object>(TryCallAsFunction(*result));
- if (result->IsJSFunction()) return *result;
-
- return TypeError("property_not_function", object, key);
-}
-
-
-#ifdef DEBUG
-#define TRACE_IC_NAMED(msg, name) \
- if (FLAG_trace_ic) PrintF(msg, *(name)->ToCString())
-#else
-#define TRACE_IC_NAMED(msg, name)
-#endif
-
-
-MaybeObject* LoadIC::Load(State state,
- Handle<Object> object,
- Handle<String> name) {
- // If the object is undefined or null it's illegal to try to get any
- // of its properties; throw a TypeError in that case.
- if (object->IsUndefined() || object->IsNull()) {
- return TypeError("non_object_property_load", object, name);
- }
-
- if (FLAG_use_ic) {
- Code* non_monomorphic_stub =
- (state == UNINITIALIZED) ? pre_monomorphic_stub() : megamorphic_stub();
-
- // Use specialized code for getting the length of strings and
- // string wrapper objects. The length property of string wrapper
- // objects is read-only and therefore always returns the length of
- // the underlying string value. See ECMA-262 15.5.5.1.
- if ((object->IsString() || object->IsStringWrapper()) &&
- name->Equals(isolate()->heap()->length_symbol())) {
- HandleScope scope(isolate());
-#ifdef DEBUG
- if (FLAG_trace_ic) PrintF("[LoadIC : +#length /string]\n");
-#endif
- if (state == PREMONOMORPHIC) {
- if (object->IsString()) {
- Map* map = HeapObject::cast(*object)->map();
- const int offset = String::kLengthOffset;
- PatchInlinedLoad(address(), map, offset);
- set_target(isolate()->builtins()->builtin(
- Builtins::kLoadIC_StringLength));
- } else {
- set_target(isolate()->builtins()->builtin(
- Builtins::kLoadIC_StringWrapperLength));
- }
- } else if (state == MONOMORPHIC && object->IsStringWrapper()) {
- set_target(isolate()->builtins()->builtin(
- Builtins::kLoadIC_StringWrapperLength));
- } else {
- set_target(non_monomorphic_stub);
- }
- // Get the string if we have a string wrapper object.
- if (object->IsJSValue()) {
- object = Handle<Object>(Handle<JSValue>::cast(object)->value(),
- isolate());
- }
- return Smi::FromInt(String::cast(*object)->length());
- }
-
- // Use specialized code for getting the length of arrays.
- if (object->IsJSArray() &&
- name->Equals(isolate()->heap()->length_symbol())) {
-#ifdef DEBUG
- if (FLAG_trace_ic) PrintF("[LoadIC : +#length /array]\n");
-#endif
- if (state == PREMONOMORPHIC) {
- Map* map = HeapObject::cast(*object)->map();
- const int offset = JSArray::kLengthOffset;
- PatchInlinedLoad(address(), map, offset);
- set_target(isolate()->builtins()->builtin(
- Builtins::kLoadIC_ArrayLength));
- } else {
- set_target(non_monomorphic_stub);
- }
- return JSArray::cast(*object)->length();
- }
-
- // Use specialized code for getting prototype of functions.
- if (object->IsJSFunction() &&
- name->Equals(isolate()->heap()->prototype_symbol()) &&
- JSFunction::cast(*object)->should_have_prototype()) {
-#ifdef DEBUG
- if (FLAG_trace_ic) PrintF("[LoadIC : +#prototype /function]\n");
-#endif
- if (state == PREMONOMORPHIC) {
- set_target(isolate()->builtins()->builtin(
- Builtins::kLoadIC_FunctionPrototype));
- } else {
- set_target(non_monomorphic_stub);
- }
- return Accessors::FunctionGetPrototype(*object, 0);
- }
- }
-
- // Check if the name is trivially convertible to an index and get
- // the element if so.
- uint32_t index;
- if (name->AsArrayIndex(&index)) return object->GetElement(index);
-
- // Named lookup in the object.
- LookupResult lookup;
- LookupForRead(*object, *name, &lookup);
-
- // If we did not find a property, check if we need to throw an exception.
- if (!lookup.IsProperty()) {
- if (FLAG_strict || IsContextual(object)) {
- return ReferenceError("not_defined", name);
- }
- LOG(isolate(), SuspectReadEvent(*name, *object));
- }
-
- bool can_be_inlined_precheck =
- FLAG_use_ic &&
- lookup.IsProperty() &&
- lookup.IsCacheable() &&
- lookup.holder() == *object &&
- !object->IsAccessCheckNeeded();
-
- bool can_be_inlined =
- can_be_inlined_precheck &&
- state == PREMONOMORPHIC &&
- lookup.type() == FIELD;
-
- bool can_be_inlined_contextual =
- can_be_inlined_precheck &&
- state == UNINITIALIZED &&
- lookup.holder()->IsGlobalObject() &&
- lookup.type() == NORMAL;
-
- if (can_be_inlined) {
- Map* map = lookup.holder()->map();
- // Property's index in the properties array. If negative we have
- // an inobject property.
- int index = lookup.GetFieldIndex() - map->inobject_properties();
- if (index < 0) {
- // Index is an offset from the end of the object.
- int offset = map->instance_size() + (index * kPointerSize);
- if (PatchInlinedLoad(address(), map, offset)) {
- set_target(megamorphic_stub());
- TRACE_IC_NAMED("[LoadIC : inline patch %s]\n", name);
- return lookup.holder()->FastPropertyAt(lookup.GetFieldIndex());
- } else {
- TRACE_IC_NAMED("[LoadIC : no inline patch %s (patching failed)]\n",
- name);
- }
- } else {
- TRACE_IC_NAMED("[LoadIC : no inline patch %s (not inobject)]\n", name);
- }
- } else if (can_be_inlined_contextual) {
- Map* map = lookup.holder()->map();
- JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(
- lookup.holder()->property_dictionary()->ValueAt(
- lookup.GetDictionaryEntry()));
- if (PatchInlinedContextualLoad(address(),
- map,
- cell,
- lookup.IsDontDelete())) {
- set_target(megamorphic_stub());
- TRACE_IC_NAMED("[LoadIC : inline contextual patch %s]\n", name);
- ASSERT(cell->value() != isolate()->heap()->the_hole_value());
- return cell->value();
- }
- } else {
- if (FLAG_use_ic && state == PREMONOMORPHIC) {
- TRACE_IC_NAMED("[LoadIC : no inline patch %s (not inlinable)]\n", name);
- }
- }
-
- // Update inline cache and stub cache.
- if (FLAG_use_ic) {
- UpdateCaches(&lookup, state, object, name);
- }
-
- PropertyAttributes attr;
- if (lookup.IsProperty() && lookup.type() == INTERCEPTOR) {
- // Get the property.
- Object* result;
- { MaybeObject* maybe_result =
- object->GetProperty(*object, &lookup, *name, &attr);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- // If the property is not present, check if we need to throw an
- // exception.
- if (attr == ABSENT && IsContextual(object)) {
- return ReferenceError("not_defined", name);
- }
- return result;
- }
-
- // Get the property.
- return object->GetProperty(*object, &lookup, *name, &attr);
-}
-
-
-void LoadIC::UpdateCaches(LookupResult* lookup,
- State state,
- Handle<Object> object,
- Handle<String> name) {
- // Bail out if the result is not cacheable.
- if (!lookup->IsCacheable()) return;
-
- // Loading properties from values is not common, so don't try to
- // deal with non-JS objects here.
- if (!object->IsJSObject()) return;
- Handle<JSObject> receiver = Handle<JSObject>::cast(object);
-
- if (HasNormalObjectsInPrototypeChain(isolate(), lookup, *object)) return;
-
- // Compute the code stub for this load.
- MaybeObject* maybe_code = NULL;
- Object* code;
- if (state == UNINITIALIZED) {
- // This is the first time we execute this inline cache.
- // Set the target to the pre monomorphic stub to delay
- // setting the monomorphic state.
- maybe_code = pre_monomorphic_stub();
- } else if (!lookup->IsProperty()) {
- // Nonexistent property. The result is undefined.
- maybe_code = isolate()->stub_cache()->ComputeLoadNonexistent(*name,
- *receiver);
- } else {
- // Compute monomorphic stub.
- switch (lookup->type()) {
- case FIELD: {
- maybe_code = isolate()->stub_cache()->ComputeLoadField(
- *name,
- *receiver,
- lookup->holder(),
- lookup->GetFieldIndex());
- break;
- }
- case CONSTANT_FUNCTION: {
- Object* constant = lookup->GetConstantFunction();
- maybe_code = isolate()->stub_cache()->ComputeLoadConstant(
- *name, *receiver, lookup->holder(), constant);
- break;
- }
- case NORMAL: {
- if (lookup->holder()->IsGlobalObject()) {
- GlobalObject* global = GlobalObject::cast(lookup->holder());
- JSGlobalPropertyCell* cell =
- JSGlobalPropertyCell::cast(global->GetPropertyCell(lookup));
- maybe_code = isolate()->stub_cache()->ComputeLoadGlobal(*name,
- *receiver,
- global,
- cell,
- lookup->IsDontDelete());
- } else {
- // There is only one shared stub for loading normalized
- // properties. It does not traverse the prototype chain, so the
- // property must be found in the receiver for the stub to be
- // applicable.
- if (lookup->holder() != *receiver) return;
- maybe_code = isolate()->stub_cache()->ComputeLoadNormal();
- }
- break;
- }
- case CALLBACKS: {
- if (!lookup->GetCallbackObject()->IsAccessorInfo()) return;
- AccessorInfo* callback =
- AccessorInfo::cast(lookup->GetCallbackObject());
- if (v8::ToCData<Address>(callback->getter()) == 0) return;
- maybe_code = isolate()->stub_cache()->ComputeLoadCallback(
- *name, *receiver, lookup->holder(), callback);
- break;
- }
- case INTERCEPTOR: {
- ASSERT(HasInterceptorGetter(lookup->holder()));
- maybe_code = isolate()->stub_cache()->ComputeLoadInterceptor(
- *name, *receiver, lookup->holder());
- break;
- }
- default:
- return;
- }
- }
-
- // If we're unable to compute the stub (not enough memory left), we
- // simply avoid updating the caches.
- if (maybe_code == NULL || !maybe_code->ToObject(&code)) return;
-
- // Patch the call site depending on the state of the cache.
- if (state == UNINITIALIZED || state == PREMONOMORPHIC ||
- state == MONOMORPHIC_PROTOTYPE_FAILURE) {
- set_target(Code::cast(code));
- } else if (state == MONOMORPHIC) {
- set_target(megamorphic_stub());
- } else if (state == MEGAMORPHIC) {
- // Cache code holding map should be consistent with
- // GenerateMonomorphicCacheProbe.
- Map* map = JSObject::cast(object->IsJSObject() ? *object :
- object->GetPrototype())->map();
-
- isolate()->stub_cache()->Set(*name, map, Code::cast(code));
- }
-
-#ifdef DEBUG
- TraceIC("LoadIC", name, state, target());
-#endif
-}
-
-
-MaybeObject* KeyedLoadIC::Load(State state,
- Handle<Object> object,
- Handle<Object> key) {
- // Check for values that can be converted into a symbol.
- // TODO(1295): Remove this code.
- HandleScope scope(isolate());
- if (key->IsHeapNumber() &&
- isnan(HeapNumber::cast(*key)->value())) {
- key = isolate()->factory()->nan_symbol();
- } else if (key->IsUndefined()) {
- key = isolate()->factory()->undefined_symbol();
- }
-
- if (key->IsSymbol()) {
- Handle<String> name = Handle<String>::cast(key);
-
- // If the object is undefined or null it's illegal to try to get any
- // of its properties; throw a TypeError in that case.
- if (object->IsUndefined() || object->IsNull()) {
- return TypeError("non_object_property_load", object, name);
- }
-
- if (FLAG_use_ic) {
- // TODO(1073): don't ignore the current stub state.
-
- // Use specialized code for getting the length of strings.
- if (object->IsString() &&
- name->Equals(isolate()->heap()->length_symbol())) {
- Handle<String> string = Handle<String>::cast(object);
- Object* code = NULL;
- { MaybeObject* maybe_code =
- isolate()->stub_cache()->ComputeKeyedLoadStringLength(*name,
- *string);
- if (!maybe_code->ToObject(&code)) return maybe_code;
- }
- set_target(Code::cast(code));
-#ifdef DEBUG
- TraceIC("KeyedLoadIC", name, state, target());
-#endif // DEBUG
- return Smi::FromInt(string->length());
- }
-
- // Use specialized code for getting the length of arrays.
- if (object->IsJSArray() &&
- name->Equals(isolate()->heap()->length_symbol())) {
- Handle<JSArray> array = Handle<JSArray>::cast(object);
- Object* code;
- { MaybeObject* maybe_code =
- isolate()->stub_cache()->ComputeKeyedLoadArrayLength(*name,
- *array);
- if (!maybe_code->ToObject(&code)) return maybe_code;
- }
- set_target(Code::cast(code));
-#ifdef DEBUG
- TraceIC("KeyedLoadIC", name, state, target());
-#endif // DEBUG
- return JSArray::cast(*object)->length();
- }
-
- // Use specialized code for getting prototype of functions.
- if (object->IsJSFunction() &&
- name->Equals(isolate()->heap()->prototype_symbol()) &&
- JSFunction::cast(*object)->should_have_prototype()) {
- Handle<JSFunction> function = Handle<JSFunction>::cast(object);
- Object* code;
- { MaybeObject* maybe_code =
- isolate()->stub_cache()->ComputeKeyedLoadFunctionPrototype(
- *name, *function);
- if (!maybe_code->ToObject(&code)) return maybe_code;
- }
- set_target(Code::cast(code));
-#ifdef DEBUG
- TraceIC("KeyedLoadIC", name, state, target());
-#endif // DEBUG
- return Accessors::FunctionGetPrototype(*object, 0);
- }
- }
-
- // Check if the name is trivially convertible to an index and get
- // the element or char if so.
- uint32_t index = 0;
- if (name->AsArrayIndex(&index)) {
- HandleScope scope(isolate());
- // Rewrite to the generic keyed load stub.
- if (FLAG_use_ic) set_target(generic_stub());
- return Runtime::GetElementOrCharAt(isolate(), object, index);
- }
-
- // Named lookup.
- LookupResult lookup;
- LookupForRead(*object, *name, &lookup);
-
- // If we did not find a property, check if we need to throw an exception.
- if (!lookup.IsProperty()) {
- if (FLAG_strict || IsContextual(object)) {
- return ReferenceError("not_defined", name);
- }
- }
-
- if (FLAG_use_ic) {
- UpdateCaches(&lookup, state, object, name);
- }
-
- PropertyAttributes attr;
- if (lookup.IsProperty() && lookup.type() == INTERCEPTOR) {
- // Get the property.
- Object* result;
- { MaybeObject* maybe_result =
- object->GetProperty(*object, &lookup, *name, &attr);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- // If the property is not present, check if we need to throw an
- // exception.
- if (attr == ABSENT && IsContextual(object)) {
- return ReferenceError("not_defined", name);
- }
- return result;
- }
-
- return object->GetProperty(*object, &lookup, *name, &attr);
- }
-
- // Do not use ICs for objects that require access checks (including
- // the global object).
- bool use_ic = FLAG_use_ic && !object->IsAccessCheckNeeded();
-
- if (use_ic) {
- Code* stub = generic_stub();
- if (state == UNINITIALIZED) {
- if (object->IsString() && key->IsNumber()) {
- stub = string_stub();
- } else if (object->IsJSObject()) {
- Handle<JSObject> receiver = Handle<JSObject>::cast(object);
- if (receiver->HasExternalArrayElements()) {
- MaybeObject* probe =
- isolate()->stub_cache()->ComputeKeyedLoadOrStoreExternalArray(
- *receiver, false, kNonStrictMode);
- stub = probe->IsFailure() ?
- NULL : Code::cast(probe->ToObjectUnchecked());
- } else if (receiver->HasIndexedInterceptor()) {
- stub = indexed_interceptor_stub();
- } else if (key->IsSmi() &&
- receiver->map()->has_fast_elements()) {
- MaybeObject* probe =
- isolate()->stub_cache()->ComputeKeyedLoadSpecialized(*receiver);
- stub = probe->IsFailure() ?
- NULL : Code::cast(probe->ToObjectUnchecked());
- }
- }
- }
- if (stub != NULL) set_target(stub);
-
-#ifdef DEBUG
- TraceIC("KeyedLoadIC", key, state, target());
-#endif // DEBUG
-
- // For JSObjects with fast elements that are not value wrappers
- // and that do not have indexed interceptors, we initialize the
- // inlined fast case (if present) by patching the inlined map
- // check.
- if (object->IsJSObject() &&
- !object->IsJSValue() &&
- !JSObject::cast(*object)->HasIndexedInterceptor() &&
- JSObject::cast(*object)->HasFastElements()) {
- Map* map = JSObject::cast(*object)->map();
- PatchInlinedLoad(address(), map);
- }
- }
-
- // Get the property.
- return Runtime::GetObjectProperty(isolate(), object, key);
-}
-
-
-void KeyedLoadIC::UpdateCaches(LookupResult* lookup, State state,
- Handle<Object> object, Handle<String> name) {
- // Bail out if we didn't find a result.
- if (!lookup->IsProperty() || !lookup->IsCacheable()) return;
-
- if (!object->IsJSObject()) return;
- Handle<JSObject> receiver = Handle<JSObject>::cast(object);
-
- if (HasNormalObjectsInPrototypeChain(isolate(), lookup, *object)) return;
-
- // Compute the code stub for this load.
- MaybeObject* maybe_code = NULL;
- Object* code;
-
- if (state == UNINITIALIZED) {
- // This is the first time we execute this inline cache.
- // Set the target to the pre monomorphic stub to delay
- // setting the monomorphic state.
- maybe_code = pre_monomorphic_stub();
- } else {
- // Compute a monomorphic stub.
- switch (lookup->type()) {
- case FIELD: {
- maybe_code = isolate()->stub_cache()->ComputeKeyedLoadField(
- *name, *receiver, lookup->holder(), lookup->GetFieldIndex());
- break;
- }
- case CONSTANT_FUNCTION: {
- Object* constant = lookup->GetConstantFunction();
- maybe_code = isolate()->stub_cache()->ComputeKeyedLoadConstant(
- *name, *receiver, lookup->holder(), constant);
- break;
- }
- case CALLBACKS: {
- if (!lookup->GetCallbackObject()->IsAccessorInfo()) return;
- AccessorInfo* callback =
- AccessorInfo::cast(lookup->GetCallbackObject());
- if (v8::ToCData<Address>(callback->getter()) == 0) return;
- maybe_code = isolate()->stub_cache()->ComputeKeyedLoadCallback(
- *name, *receiver, lookup->holder(), callback);
- break;
- }
- case INTERCEPTOR: {
- ASSERT(HasInterceptorGetter(lookup->holder()));
- maybe_code = isolate()->stub_cache()->ComputeKeyedLoadInterceptor(
- *name, *receiver, lookup->holder());
- break;
- }
- default: {
- // Always rewrite to the generic case so that we do not
- // repeatedly try to rewrite.
- maybe_code = generic_stub();
- break;
- }
- }
- }
-
- // If we're unable to compute the stub (not enough memory left), we
- // simply avoid updating the caches.
- if (maybe_code == NULL || !maybe_code->ToObject(&code)) return;
-
- // Patch the call site depending on the state of the cache. Make
- // sure to always rewrite from monomorphic to megamorphic.
- ASSERT(state != MONOMORPHIC_PROTOTYPE_FAILURE);
- if (state == UNINITIALIZED || state == PREMONOMORPHIC) {
- set_target(Code::cast(code));
- } else if (state == MONOMORPHIC) {
- set_target(megamorphic_stub());
- }
-
-#ifdef DEBUG
- TraceIC("KeyedLoadIC", name, state, target());
-#endif
-}
-
-
-static bool StoreICableLookup(LookupResult* lookup) {
- // Bail out if we didn't find a result.
- if (!lookup->IsPropertyOrTransition() || !lookup->IsCacheable()) return false;
-
- // If the property is read-only, we leave the IC in its current
- // state.
- if (lookup->IsReadOnly()) return false;
-
- return true;
-}
-
-
-static bool LookupForWrite(JSObject* object,
- String* name,
- LookupResult* lookup) {
- object->LocalLookup(name, lookup);
- if (!StoreICableLookup(lookup)) {
- return false;
- }
-
- if (lookup->type() == INTERCEPTOR) {
- if (object->GetNamedInterceptor()->setter()->IsUndefined()) {
- object->LocalLookupRealNamedProperty(name, lookup);
- return StoreICableLookup(lookup);
- }
- }
-
- return true;
-}
-
-
-MaybeObject* StoreIC::Store(State state,
- StrictModeFlag strict_mode,
- Handle<Object> object,
- Handle<String> name,
- Handle<Object> value) {
- // If the object is undefined or null it's illegal to try to set any
- // properties on it; throw a TypeError in that case.
- if (object->IsUndefined() || object->IsNull()) {
- return TypeError("non_object_property_store", object, name);
- }
-
- if (!object->IsJSObject()) {
- // The length property of string values is read-only. Throw in strict mode.
- if (strict_mode == kStrictMode && object->IsString() &&
- name->Equals(isolate()->heap()->length_symbol())) {
- return TypeError("strict_read_only_property", object, name);
- }
- // Ignore stores where the receiver is not a JSObject.
- return *value;
- }
-
- Handle<JSObject> receiver = Handle<JSObject>::cast(object);
-
- // Check if the given name is an array index.
- uint32_t index;
- if (name->AsArrayIndex(&index)) {
- HandleScope scope(isolate());
- Handle<Object> result = SetElement(receiver, index, value, strict_mode);
- if (result.is_null()) return Failure::Exception();
- return *value;
- }
-
- // Use specialized code for setting the length of arrays.
- if (receiver->IsJSArray()
- && name->Equals(isolate()->heap()->length_symbol())
- && receiver->AllowsSetElementsLength()) {
-#ifdef DEBUG
- if (FLAG_trace_ic) PrintF("[StoreIC : +#length /array]\n");
-#endif
- Builtins::Name target = (strict_mode == kStrictMode)
- ? Builtins::kStoreIC_ArrayLength_Strict
- : Builtins::kStoreIC_ArrayLength;
- set_target(isolate()->builtins()->builtin(target));
- return receiver->SetProperty(*name, *value, NONE, strict_mode);
- }
-
- // Lookup the property locally in the receiver.
- if (FLAG_use_ic && !receiver->IsJSGlobalProxy()) {
- LookupResult lookup;
-
- if (LookupForWrite(*receiver, *name, &lookup)) {
- bool can_be_inlined =
- state == UNINITIALIZED &&
- lookup.IsProperty() &&
- lookup.holder() == *receiver &&
- lookup.type() == FIELD &&
- !receiver->IsAccessCheckNeeded();
-
- if (can_be_inlined) {
- Map* map = lookup.holder()->map();
- // Property's index in the properties array. If negative we have
- // an inobject property.
- int index = lookup.GetFieldIndex() - map->inobject_properties();
- if (index < 0) {
- // Index is an offset from the end of the object.
- int offset = map->instance_size() + (index * kPointerSize);
- if (PatchInlinedStore(address(), map, offset)) {
- set_target((strict_mode == kStrictMode)
- ? megamorphic_stub_strict()
- : megamorphic_stub());
-#ifdef DEBUG
- if (FLAG_trace_ic) {
- PrintF("[StoreIC : inline patch %s]\n", *name->ToCString());
- }
-#endif
- return receiver->SetProperty(*name, *value, NONE, strict_mode);
-#ifdef DEBUG
-
- } else {
- if (FLAG_trace_ic) {
- PrintF("[StoreIC : no inline patch %s (patching failed)]\n",
- *name->ToCString());
- }
- }
- } else {
- if (FLAG_trace_ic) {
- PrintF("[StoreIC : no inline patch %s (not inobject)]\n",
- *name->ToCString());
- }
- }
- } else {
- if (state == PREMONOMORPHIC) {
- if (FLAG_trace_ic) {
- PrintF("[StoreIC : no inline patch %s (not inlinable)]\n",
- *name->ToCString());
-#endif
- }
- }
- }
-
- // If no inlined store ic was patched, generate a stub for this
- // store.
- UpdateCaches(&lookup, state, strict_mode, receiver, name, value);
- } else {
- // Strict mode doesn't allow setting non-existent global property
- // or an assignment to a read only property.
- if (strict_mode == kStrictMode) {
- if (lookup.IsFound() && lookup.IsReadOnly()) {
- return TypeError("strict_read_only_property", object, name);
- } else if (IsContextual(object)) {
- return ReferenceError("not_defined", name);
- }
- }
- }
- }
-
- if (receiver->IsJSGlobalProxy()) {
- // Generate a generic stub that goes to the runtime when we see a global
- // proxy as receiver.
- Code* stub = (strict_mode == kStrictMode)
- ? global_proxy_stub_strict()
- : global_proxy_stub();
- if (target() != stub) {
- set_target(stub);
-#ifdef DEBUG
- TraceIC("StoreIC", name, state, target());
-#endif
- }
- }
-
- // Set the property.
- return receiver->SetProperty(*name, *value, NONE, strict_mode);
-}
-
-
-void StoreIC::UpdateCaches(LookupResult* lookup,
- State state,
- StrictModeFlag strict_mode,
- Handle<JSObject> receiver,
- Handle<String> name,
- Handle<Object> value) {
- // Skip JSGlobalProxy.
- ASSERT(!receiver->IsJSGlobalProxy());
-
- ASSERT(StoreICableLookup(lookup));
-
- // If the property has a non-field type allowing map transitions
- // where there is extra room in the object, we leave the IC in its
- // current state.
- PropertyType type = lookup->type();
-
- // Compute the code stub for this store; used for rewriting to
- // monomorphic state and making sure that the code stub is in the
- // stub cache.
- MaybeObject* maybe_code = NULL;
- Object* code = NULL;
- switch (type) {
- case FIELD: {
- maybe_code = isolate()->stub_cache()->ComputeStoreField(
- *name, *receiver, lookup->GetFieldIndex(), NULL, strict_mode);
- break;
- }
- case MAP_TRANSITION: {
- if (lookup->GetAttributes() != NONE) return;
- HandleScope scope(isolate());
- ASSERT(type == MAP_TRANSITION);
- Handle<Map> transition(lookup->GetTransitionMap());
- int index = transition->PropertyIndexFor(*name);
- maybe_code = isolate()->stub_cache()->ComputeStoreField(
- *name, *receiver, index, *transition, strict_mode);
- break;
- }
- case NORMAL: {
- if (receiver->IsGlobalObject()) {
- // The stub generated for the global object picks the value directly
- // from the property cell. So the property must be directly on the
- // global object.
- Handle<GlobalObject> global = Handle<GlobalObject>::cast(receiver);
- JSGlobalPropertyCell* cell =
- JSGlobalPropertyCell::cast(global->GetPropertyCell(lookup));
- maybe_code = isolate()->stub_cache()->ComputeStoreGlobal(
- *name, *global, cell, strict_mode);
- } else {
- if (lookup->holder() != *receiver) return;
- maybe_code = isolate()->stub_cache()->ComputeStoreNormal(strict_mode);
- }
- break;
- }
- case CALLBACKS: {
- if (!lookup->GetCallbackObject()->IsAccessorInfo()) return;
- AccessorInfo* callback = AccessorInfo::cast(lookup->GetCallbackObject());
- if (v8::ToCData<Address>(callback->setter()) == 0) return;
- maybe_code = isolate()->stub_cache()->ComputeStoreCallback(
- *name, *receiver, callback, strict_mode);
- break;
- }
- case INTERCEPTOR: {
- ASSERT(!receiver->GetNamedInterceptor()->setter()->IsUndefined());
- maybe_code = isolate()->stub_cache()->ComputeStoreInterceptor(
- *name, *receiver, strict_mode);
- break;
- }
- default:
- return;
- }
-
- // If we're unable to compute the stub (not enough memory left), we
- // simply avoid updating the caches.
- if (maybe_code == NULL || !maybe_code->ToObject(&code)) return;
-
- // Patch the call site depending on the state of the cache.
- if (state == UNINITIALIZED || state == MONOMORPHIC_PROTOTYPE_FAILURE) {
- set_target(Code::cast(code));
- } else if (state == MONOMORPHIC) {
- // Only move to megamorphic if the target changes.
- if (target() != Code::cast(code)) {
- set_target((strict_mode == kStrictMode)
- ? megamorphic_stub_strict()
- : megamorphic_stub());
- }
- } else if (state == MEGAMORPHIC) {
- // Update the stub cache.
- isolate()->stub_cache()->Set(*name,
- receiver->map(),
- Code::cast(code));
- }
-
-#ifdef DEBUG
- TraceIC("StoreIC", name, state, target());
-#endif
-}
-
-
-MaybeObject* KeyedStoreIC::Store(State state,
- StrictModeFlag strict_mode,
- Handle<Object> object,
- Handle<Object> key,
- Handle<Object> value) {
- if (key->IsSymbol()) {
- Handle<String> name = Handle<String>::cast(key);
-
- // If the object is undefined or null it's illegal to try to set any
- // properties on it; throw a TypeError in that case.
- if (object->IsUndefined() || object->IsNull()) {
- return TypeError("non_object_property_store", object, name);
- }
-
- // Ignore stores where the receiver is not a JSObject.
- if (!object->IsJSObject()) return *value;
- Handle<JSObject> receiver = Handle<JSObject>::cast(object);
-
- // Check if the given name is an array index.
- uint32_t index;
- if (name->AsArrayIndex(&index)) {
- HandleScope scope(isolate());
- Handle<Object> result = SetElement(receiver, index, value, strict_mode);
- if (result.is_null()) return Failure::Exception();
- return *value;
- }
-
- // Lookup the property locally in the receiver.
- LookupResult lookup;
- receiver->LocalLookup(*name, &lookup);
-
- // Update inline cache and stub cache.
- if (FLAG_use_ic) {
- UpdateCaches(&lookup, state, strict_mode, receiver, name, value);
- }
-
- // Set the property.
- return receiver->SetProperty(*name, *value, NONE, strict_mode);
- }
-
- // Do not use ICs for objects that require access checks (including
- // the global object).
- bool use_ic = FLAG_use_ic && !object->IsAccessCheckNeeded();
- ASSERT(!(use_ic && object->IsJSGlobalProxy()));
-
- if (use_ic) {
- Code* stub =
- (strict_mode == kStrictMode) ? generic_stub_strict() : generic_stub();
- if (state == UNINITIALIZED) {
- if (object->IsJSObject()) {
- Handle<JSObject> receiver = Handle<JSObject>::cast(object);
- if (receiver->HasExternalArrayElements()) {
- MaybeObject* probe =
- isolate()->stub_cache()->ComputeKeyedLoadOrStoreExternalArray(
- *receiver, true, strict_mode);
- stub = probe->IsFailure() ?
- NULL : Code::cast(probe->ToObjectUnchecked());
- } else if (key->IsSmi() && receiver->map()->has_fast_elements()) {
- MaybeObject* probe =
- isolate()->stub_cache()->ComputeKeyedStoreSpecialized(
- *receiver, strict_mode);
- stub = probe->IsFailure() ?
- NULL : Code::cast(probe->ToObjectUnchecked());
- }
- }
- }
- if (stub != NULL) set_target(stub);
- }
-
- // Set the property.
- return Runtime::SetObjectProperty(
- isolate(), object , key, value, NONE, strict_mode);
-}
-
-
-void KeyedStoreIC::UpdateCaches(LookupResult* lookup,
- State state,
- StrictModeFlag strict_mode,
- Handle<JSObject> receiver,
- Handle<String> name,
- Handle<Object> value) {
- // Skip JSGlobalProxy.
- if (receiver->IsJSGlobalProxy()) return;
-
- // Bail out if we didn't find a result.
- if (!lookup->IsPropertyOrTransition() || !lookup->IsCacheable()) return;
-
- // If the property is read-only, we leave the IC in its current
- // state.
- if (lookup->IsReadOnly()) return;
-
- // If the property has a non-field type allowing map transitions
- // where there is extra room in the object, we leave the IC in its
- // current state.
- PropertyType type = lookup->type();
-
- // Compute the code stub for this store; used for rewriting to
- // monomorphic state and making sure that the code stub is in the
- // stub cache.
- MaybeObject* maybe_code = NULL;
- Object* code = NULL;
-
- switch (type) {
- case FIELD: {
- maybe_code = isolate()->stub_cache()->ComputeKeyedStoreField(
- *name, *receiver, lookup->GetFieldIndex(), NULL, strict_mode);
- break;
- }
- case MAP_TRANSITION: {
- if (lookup->GetAttributes() == NONE) {
- HandleScope scope(isolate());
- ASSERT(type == MAP_TRANSITION);
- Handle<Map> transition(lookup->GetTransitionMap());
- int index = transition->PropertyIndexFor(*name);
- maybe_code = isolate()->stub_cache()->ComputeKeyedStoreField(
- *name, *receiver, index, *transition, strict_mode);
- break;
- }
- // fall through.
- }
- default: {
- // Always rewrite to the generic case so that we do not
- // repeatedly try to rewrite.
- maybe_code = (strict_mode == kStrictMode)
- ? generic_stub_strict()
- : generic_stub();
- break;
- }
- }
-
- // If we're unable to compute the stub (not enough memory left), we
- // simply avoid updating the caches.
- if (maybe_code == NULL || !maybe_code->ToObject(&code)) return;
-
- // Patch the call site depending on the state of the cache. Make
- // sure to always rewrite from monomorphic to megamorphic.
- ASSERT(state != MONOMORPHIC_PROTOTYPE_FAILURE);
- if (state == UNINITIALIZED || state == PREMONOMORPHIC) {
- set_target(Code::cast(code));
- } else if (state == MONOMORPHIC) {
- set_target((strict_mode == kStrictMode)
- ? megamorphic_stub_strict()
- : megamorphic_stub());
- }
-
-#ifdef DEBUG
- TraceIC("KeyedStoreIC", name, state, target());
-#endif
-}
-
-
-// ----------------------------------------------------------------------------
-// Static IC stub generators.
-//
-
-static JSFunction* CompileFunction(Isolate* isolate,
- JSFunction* function,
- InLoopFlag in_loop) {
- // Compile now with optimization.
- HandleScope scope(isolate);
- Handle<JSFunction> function_handle(function, isolate);
- if (in_loop == IN_LOOP) {
- CompileLazyInLoop(function_handle, CLEAR_EXCEPTION);
- } else {
- CompileLazy(function_handle, CLEAR_EXCEPTION);
- }
- return *function_handle;
-}
-
-
-// Used from ic-<arch>.cc.
-RUNTIME_FUNCTION(MaybeObject*, CallIC_Miss) {
- NoHandleAllocation na;
- ASSERT(args.length() == 2);
- CallIC ic(isolate);
- IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
- Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state();
- MaybeObject* maybe_result = ic.LoadFunction(state,
- extra_ic_state,
- args.at<Object>(0),
- args.at<String>(1));
- Object* result;
- if (!maybe_result->ToObject(&result)) return maybe_result;
-
- // The first time the inline cache is updated may be the first time the
- // function it references gets called. If the function was lazily compiled
- // then the first call will trigger a compilation. We check for this case
- // and we do the compilation immediately, instead of waiting for the stub
- // currently attached to the JSFunction object to trigger compilation. We
- // do this in the case where we know that the inline cache is inside a loop,
- // because then we know that we want to optimize the function.
- if (!result->IsJSFunction() || JSFunction::cast(result)->is_compiled()) {
- return result;
- }
- return CompileFunction(isolate,
- JSFunction::cast(result),
- ic.target()->ic_in_loop());
-}
-
-
-// Used from ic-<arch>.cc.
-RUNTIME_FUNCTION(MaybeObject*, KeyedCallIC_Miss) {
- NoHandleAllocation na;
- ASSERT(args.length() == 2);
- KeyedCallIC ic(isolate);
- IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
- Object* result;
- { MaybeObject* maybe_result =
- ic.LoadFunction(state, args.at<Object>(0), args.at<Object>(1));
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
-
- if (!result->IsJSFunction() || JSFunction::cast(result)->is_compiled()) {
- return result;
- }
- return CompileFunction(isolate,
- JSFunction::cast(result),
- ic.target()->ic_in_loop());
-}
-
-
-// Used from ic-<arch>.cc.
-RUNTIME_FUNCTION(MaybeObject*, LoadIC_Miss) {
- NoHandleAllocation na;
- ASSERT(args.length() == 2);
- LoadIC ic(isolate);
- IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
- return ic.Load(state, args.at<Object>(0), args.at<String>(1));
-}
-
-
-// Used from ic-<arch>.cc
-RUNTIME_FUNCTION(MaybeObject*, KeyedLoadIC_Miss) {
- NoHandleAllocation na;
- ASSERT(args.length() == 2);
- KeyedLoadIC ic(isolate);
- IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
- return ic.Load(state, args.at<Object>(0), args.at<Object>(1));
-}
-
-
-// Used from ic-<arch>.cc.
-RUNTIME_FUNCTION(MaybeObject*, StoreIC_Miss) {
- NoHandleAllocation na;
- ASSERT(args.length() == 3);
- StoreIC ic(isolate);
- IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
- Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state();
- return ic.Store(state,
- static_cast<StrictModeFlag>(extra_ic_state & kStrictMode),
- args.at<Object>(0),
- args.at<String>(1),
- args.at<Object>(2));
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, StoreIC_ArrayLength) {
- NoHandleAllocation nha;
-
- ASSERT(args.length() == 2);
- JSObject* receiver = JSObject::cast(args[0]);
- Object* len = args[1];
-
- // The generated code should filter out non-Smis before we get here.
- ASSERT(len->IsSmi());
-
- Object* result;
- { MaybeObject* maybe_result = receiver->SetElementsLength(len);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- return len;
-}
-
-
-// Extend storage is called in a store inline cache when
-// it is necessary to extend the properties array of a
-// JSObject.
-RUNTIME_FUNCTION(MaybeObject*, SharedStoreIC_ExtendStorage) {
- NoHandleAllocation na;
- ASSERT(args.length() == 3);
-
- // Convert the parameters
- JSObject* object = JSObject::cast(args[0]);
- Map* transition = Map::cast(args[1]);
- Object* value = args[2];
-
- // Check the object has run out out property space.
- ASSERT(object->HasFastProperties());
- ASSERT(object->map()->unused_property_fields() == 0);
-
- // Expand the properties array.
- FixedArray* old_storage = object->properties();
- int new_unused = transition->unused_property_fields();
- int new_size = old_storage->length() + new_unused + 1;
- Object* result;
- { MaybeObject* maybe_result = old_storage->CopySize(new_size);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- FixedArray* new_storage = FixedArray::cast(result);
- new_storage->set(old_storage->length(), value);
-
- // Set the new property value and do the map transition.
- object->set_properties(new_storage);
- object->set_map(transition);
-
- // Return the stored value.
- return value;
-}
-
-
-// Used from ic-<arch>.cc.
-RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_Miss) {
- NoHandleAllocation na;
- ASSERT(args.length() == 3);
- KeyedStoreIC ic(isolate);
- IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
- Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state();
- return ic.Store(state,
- static_cast<StrictModeFlag>(extra_ic_state & kStrictMode),
- args.at<Object>(0),
- args.at<Object>(1),
- args.at<Object>(2));
-}
-
-
-void BinaryOpIC::patch(Code* code) {
- set_target(code);
-}
-
-
-const char* BinaryOpIC::GetName(TypeInfo type_info) {
- switch (type_info) {
- case UNINIT_OR_SMI: return "UninitOrSmi";
- case DEFAULT: return "Default";
- case GENERIC: return "Generic";
- case HEAP_NUMBERS: return "HeapNumbers";
- case STRINGS: return "Strings";
- default: return "Invalid";
- }
-}
-
-
-BinaryOpIC::State BinaryOpIC::ToState(TypeInfo type_info) {
- switch (type_info) {
- case UNINIT_OR_SMI:
- return UNINITIALIZED;
- case DEFAULT:
- case HEAP_NUMBERS:
- case STRINGS:
- return MONOMORPHIC;
- case GENERIC:
- return MEGAMORPHIC;
- }
- UNREACHABLE();
- return UNINITIALIZED;
-}
-
-
-BinaryOpIC::TypeInfo BinaryOpIC::GetTypeInfo(Object* left,
- Object* right) {
- if (left->IsSmi() && right->IsSmi()) {
- // If we have two smi inputs we can reach here because
- // of an overflow. Enter default state.
- return DEFAULT;
- }
-
- if (left->IsNumber() && right->IsNumber()) {
- return HEAP_NUMBERS;
- }
-
- if (left->IsString() || right->IsString()) {
- // Patching for fast string ADD makes sense even if only one of the
- // arguments is a string.
- return STRINGS;
- }
-
- return GENERIC;
-}
-
-
-// defined in code-stubs-<arch>.cc
-Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info);
-
-
-RUNTIME_FUNCTION(MaybeObject*, BinaryOp_Patch) {
- ASSERT(args.length() == 5);
-
- HandleScope scope(isolate);
- Handle<Object> left = args.at<Object>(0);
- Handle<Object> right = args.at<Object>(1);
- int key = Smi::cast(args[2])->value();
- Token::Value op = static_cast<Token::Value>(Smi::cast(args[3])->value());
- BinaryOpIC::TypeInfo previous_type =
- static_cast<BinaryOpIC::TypeInfo>(Smi::cast(args[4])->value());
-
- BinaryOpIC::TypeInfo type = BinaryOpIC::GetTypeInfo(*left, *right);
- Handle<Code> code = GetBinaryOpStub(key, type);
- if (!code.is_null()) {
- BinaryOpIC ic(isolate);
- ic.patch(*code);
- if (FLAG_trace_ic) {
- PrintF("[BinaryOpIC (%s->%s)#%s]\n",
- BinaryOpIC::GetName(previous_type),
- BinaryOpIC::GetName(type),
- Token::Name(op));
- }
- }
-
- Handle<JSBuiltinsObject> builtins = Handle<JSBuiltinsObject>(
- isolate->thread_local_top()->context_->builtins(), isolate);
- Object* builtin = NULL; // Initialization calms down the compiler.
- switch (op) {
- case Token::ADD:
- builtin = builtins->javascript_builtin(Builtins::ADD);
- break;
- case Token::SUB:
- builtin = builtins->javascript_builtin(Builtins::SUB);
- break;
- case Token::MUL:
- builtin = builtins->javascript_builtin(Builtins::MUL);
- break;
- case Token::DIV:
- builtin = builtins->javascript_builtin(Builtins::DIV);
- break;
- case Token::MOD:
- builtin = builtins->javascript_builtin(Builtins::MOD);
- break;
- case Token::BIT_AND:
- builtin = builtins->javascript_builtin(Builtins::BIT_AND);
- break;
- case Token::BIT_OR:
- builtin = builtins->javascript_builtin(Builtins::BIT_OR);
- break;
- case Token::BIT_XOR:
- builtin = builtins->javascript_builtin(Builtins::BIT_XOR);
- break;
- case Token::SHR:
- builtin = builtins->javascript_builtin(Builtins::SHR);
- break;
- case Token::SAR:
- builtin = builtins->javascript_builtin(Builtins::SAR);
- break;
- case Token::SHL:
- builtin = builtins->javascript_builtin(Builtins::SHL);
- break;
- default:
- UNREACHABLE();
- }
-
- Handle<JSFunction> builtin_function(JSFunction::cast(builtin),
- isolate);
-
- bool caught_exception;
- Object** builtin_args[] = { right.location() };
- Handle<Object> result = Execution::Call(builtin_function,
- left,
- ARRAY_SIZE(builtin_args),
- builtin_args,
- &caught_exception);
- if (caught_exception) {
- return Failure::Exception();
- }
- return *result;
-}
-
-
-void TRBinaryOpIC::patch(Code* code) {
- set_target(code);
-}
-
-
-const char* TRBinaryOpIC::GetName(TypeInfo type_info) {
- switch (type_info) {
- case UNINITIALIZED: return "Uninitialized";
- case SMI: return "SMI";
- case INT32: return "Int32s";
- case HEAP_NUMBER: return "HeapNumbers";
- case ODDBALL: return "Oddball";
- case STRING: return "Strings";
- case GENERIC: return "Generic";
- default: return "Invalid";
- }
-}
-
-
-TRBinaryOpIC::State TRBinaryOpIC::ToState(TypeInfo type_info) {
- switch (type_info) {
- case UNINITIALIZED:
- return ::v8::internal::UNINITIALIZED;
- case SMI:
- case INT32:
- case HEAP_NUMBER:
- case ODDBALL:
- case STRING:
- return MONOMORPHIC;
- case GENERIC:
- return MEGAMORPHIC;
- }
- UNREACHABLE();
- return ::v8::internal::UNINITIALIZED;
-}
-
-
-TRBinaryOpIC::TypeInfo TRBinaryOpIC::JoinTypes(TRBinaryOpIC::TypeInfo x,
- TRBinaryOpIC::TypeInfo y) {
- if (x == UNINITIALIZED) return y;
- if (y == UNINITIALIZED) return x;
- if (x == STRING && y == STRING) return STRING;
- if (x == STRING || y == STRING) return GENERIC;
- if (x >= y) return x;
- return y;
-}
-
-TRBinaryOpIC::TypeInfo TRBinaryOpIC::GetTypeInfo(Handle<Object> left,
- Handle<Object> right) {
- ::v8::internal::TypeInfo left_type =
- ::v8::internal::TypeInfo::TypeFromValue(left);
- ::v8::internal::TypeInfo right_type =
- ::v8::internal::TypeInfo::TypeFromValue(right);
-
- if (left_type.IsSmi() && right_type.IsSmi()) {
- return SMI;
- }
-
- if (left_type.IsInteger32() && right_type.IsInteger32()) {
- // Platforms with 32-bit Smis have no distinct INT32 type.
- if (kSmiValueSize == 32) return SMI;
- return INT32;
- }
-
- if (left_type.IsNumber() && right_type.IsNumber()) {
- return HEAP_NUMBER;
- }
-
- if (left_type.IsString() || right_type.IsString()) {
- // Patching for fast string ADD makes sense even if only one of the
- // arguments is a string.
- return STRING;
- }
-
- // Check for oddball objects.
- if (left->IsUndefined() && right->IsNumber()) return ODDBALL;
- if (left->IsNumber() && right->IsUndefined()) return ODDBALL;
-
- return GENERIC;
-}
-
-
-// defined in code-stubs-<arch>.cc
-// Only needed to remove dependency of ic.cc on code-stubs-<arch>.h.
-Handle<Code> GetTypeRecordingBinaryOpStub(int key,
- TRBinaryOpIC::TypeInfo type_info,
- TRBinaryOpIC::TypeInfo result_type);
-
-
-RUNTIME_FUNCTION(MaybeObject*, TypeRecordingBinaryOp_Patch) {
- ASSERT(args.length() == 5);
-
- HandleScope scope(isolate);
- Handle<Object> left = args.at<Object>(0);
- Handle<Object> right = args.at<Object>(1);
- int key = Smi::cast(args[2])->value();
- Token::Value op = static_cast<Token::Value>(Smi::cast(args[3])->value());
- TRBinaryOpIC::TypeInfo previous_type =
- static_cast<TRBinaryOpIC::TypeInfo>(Smi::cast(args[4])->value());
-
- TRBinaryOpIC::TypeInfo type = TRBinaryOpIC::GetTypeInfo(left, right);
- type = TRBinaryOpIC::JoinTypes(type, previous_type);
- TRBinaryOpIC::TypeInfo result_type = TRBinaryOpIC::UNINITIALIZED;
- if (type == TRBinaryOpIC::STRING && op != Token::ADD) {
- type = TRBinaryOpIC::GENERIC;
- }
- if (type == TRBinaryOpIC::SMI &&
- previous_type == TRBinaryOpIC::SMI) {
- if (op == Token::DIV || op == Token::MUL || kSmiValueSize == 32) {
- // Arithmetic on two Smi inputs has yielded a heap number.
- // That is the only way to get here from the Smi stub.
- // With 32-bit Smis, all overflows give heap numbers, but with
- // 31-bit Smis, most operations overflow to int32 results.
- result_type = TRBinaryOpIC::HEAP_NUMBER;
- } else {
- // Other operations on SMIs that overflow yield int32s.
- result_type = TRBinaryOpIC::INT32;
- }
- }
- if (type == TRBinaryOpIC::INT32 &&
- previous_type == TRBinaryOpIC::INT32) {
- // We must be here because an operation on two INT32 types overflowed.
- result_type = TRBinaryOpIC::HEAP_NUMBER;
- }
-
- Handle<Code> code = GetTypeRecordingBinaryOpStub(key, type, result_type);
- if (!code.is_null()) {
- if (FLAG_trace_ic) {
- PrintF("[TypeRecordingBinaryOpIC (%s->(%s->%s))#%s]\n",
- TRBinaryOpIC::GetName(previous_type),
- TRBinaryOpIC::GetName(type),
- TRBinaryOpIC::GetName(result_type),
- Token::Name(op));
- }
- TRBinaryOpIC ic(isolate);
- ic.patch(*code);
-
- // Activate inlined smi code.
- if (previous_type == TRBinaryOpIC::UNINITIALIZED) {
- PatchInlinedSmiCode(ic.address());
- }
- }
-
- Handle<JSBuiltinsObject> builtins = Handle<JSBuiltinsObject>(
- isolate->thread_local_top()->context_->builtins(), isolate);
- Object* builtin = NULL; // Initialization calms down the compiler.
- switch (op) {
- case Token::ADD:
- builtin = builtins->javascript_builtin(Builtins::ADD);
- break;
- case Token::SUB:
- builtin = builtins->javascript_builtin(Builtins::SUB);
- break;
- case Token::MUL:
- builtin = builtins->javascript_builtin(Builtins::MUL);
- break;
- case Token::DIV:
- builtin = builtins->javascript_builtin(Builtins::DIV);
- break;
- case Token::MOD:
- builtin = builtins->javascript_builtin(Builtins::MOD);
- break;
- case Token::BIT_AND:
- builtin = builtins->javascript_builtin(Builtins::BIT_AND);
- break;
- case Token::BIT_OR:
- builtin = builtins->javascript_builtin(Builtins::BIT_OR);
- break;
- case Token::BIT_XOR:
- builtin = builtins->javascript_builtin(Builtins::BIT_XOR);
- break;
- case Token::SHR:
- builtin = builtins->javascript_builtin(Builtins::SHR);
- break;
- case Token::SAR:
- builtin = builtins->javascript_builtin(Builtins::SAR);
- break;
- case Token::SHL:
- builtin = builtins->javascript_builtin(Builtins::SHL);
- break;
- default:
- UNREACHABLE();
- }
-
- Handle<JSFunction> builtin_function(JSFunction::cast(builtin), isolate);
-
- bool caught_exception;
- Object** builtin_args[] = { right.location() };
- Handle<Object> result = Execution::Call(builtin_function,
- left,
- ARRAY_SIZE(builtin_args),
- builtin_args,
- &caught_exception);
- if (caught_exception) {
- return Failure::Exception();
- }
- return *result;
-}
-
-
-Handle<Code> CompareIC::GetUninitialized(Token::Value op) {
- ICCompareStub stub(op, UNINITIALIZED);
- return stub.GetCode();
-}
-
-
-CompareIC::State CompareIC::ComputeState(Code* target) {
- int key = target->major_key();
- if (key == CodeStub::Compare) return GENERIC;
- ASSERT(key == CodeStub::CompareIC);
- return static_cast<State>(target->compare_state());
-}
-
-
-const char* CompareIC::GetStateName(State state) {
- switch (state) {
- case UNINITIALIZED: return "UNINITIALIZED";
- case SMIS: return "SMIS";
- case HEAP_NUMBERS: return "HEAP_NUMBERS";
- case OBJECTS: return "OBJECTS";
- case GENERIC: return "GENERIC";
- default:
- UNREACHABLE();
- return NULL;
- }
-}
-
-
-CompareIC::State CompareIC::TargetState(State state,
- bool has_inlined_smi_code,
- Handle<Object> x,
- Handle<Object> y) {
- if (!has_inlined_smi_code && state != UNINITIALIZED) return GENERIC;
- if (state == UNINITIALIZED && x->IsSmi() && y->IsSmi()) return SMIS;
- if ((state == UNINITIALIZED || (state == SMIS && has_inlined_smi_code)) &&
- x->IsNumber() && y->IsNumber()) return HEAP_NUMBERS;
- if (op_ != Token::EQ && op_ != Token::EQ_STRICT) return GENERIC;
- if (state == UNINITIALIZED &&
- x->IsJSObject() && y->IsJSObject()) return OBJECTS;
- return GENERIC;
-}
-
-
-// Used from ic_<arch>.cc.
-RUNTIME_FUNCTION(Code*, CompareIC_Miss) {
- NoHandleAllocation na;
- ASSERT(args.length() == 3);
- CompareIC ic(isolate, static_cast<Token::Value>(Smi::cast(args[2])->value()));
- ic.UpdateCaches(args.at<Object>(0), args.at<Object>(1));
- return ic.target();
-}
-
-
-static const Address IC_utilities[] = {
-#define ADDR(name) FUNCTION_ADDR(name),
- IC_UTIL_LIST(ADDR)
- NULL
-#undef ADDR
-};
-
-
-Address IC::AddressFromUtilityId(IC::UtilityId id) {
- return IC_utilities[id];
-}
-
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/ic.h b/src/3rdparty/v8/src/ic.h
deleted file mode 100644
index bb8a981..0000000
--- a/src/3rdparty/v8/src/ic.h
+++ /dev/null
@@ -1,675 +0,0 @@
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_IC_H_
-#define V8_IC_H_
-
-#include "macro-assembler.h"
-
-namespace v8 {
-namespace internal {
-
-
-// IC_UTIL_LIST defines all utility functions called from generated
-// inline caching code. The argument for the macro, ICU, is the function name.
-#define IC_UTIL_LIST(ICU) \
- ICU(LoadIC_Miss) \
- ICU(KeyedLoadIC_Miss) \
- ICU(CallIC_Miss) \
- ICU(KeyedCallIC_Miss) \
- ICU(StoreIC_Miss) \
- ICU(StoreIC_ArrayLength) \
- ICU(SharedStoreIC_ExtendStorage) \
- ICU(KeyedStoreIC_Miss) \
- /* Utilities for IC stubs. */ \
- ICU(LoadCallbackProperty) \
- ICU(StoreCallbackProperty) \
- ICU(LoadPropertyWithInterceptorOnly) \
- ICU(LoadPropertyWithInterceptorForLoad) \
- ICU(LoadPropertyWithInterceptorForCall) \
- ICU(KeyedLoadPropertyWithInterceptor) \
- ICU(StoreInterceptorProperty) \
- ICU(BinaryOp_Patch) \
- ICU(TypeRecordingBinaryOp_Patch) \
- ICU(CompareIC_Miss)
-//
-// IC is the base class for LoadIC, StoreIC, CallIC, KeyedLoadIC,
-// and KeyedStoreIC.
-//
-class IC {
- public:
-
- // The ids for utility called from the generated code.
- enum UtilityId {
- #define CONST_NAME(name) k##name,
- IC_UTIL_LIST(CONST_NAME)
- #undef CONST_NAME
- kUtilityCount
- };
-
- // Looks up the address of the named utility.
- static Address AddressFromUtilityId(UtilityId id);
-
- // Alias the inline cache state type to make the IC code more readable.
- typedef InlineCacheState State;
-
- // The IC code is either invoked with no extra frames on the stack
- // or with a single extra frame for supporting calls.
- enum FrameDepth {
- NO_EXTRA_FRAME = 0,
- EXTRA_CALL_FRAME = 1
- };
-
- // Construct the IC structure with the given number of extra
- // JavaScript frames on the stack.
- IC(FrameDepth depth, Isolate* isolate);
-
- // Get the call-site target; used for determining the state.
- Code* target() { return GetTargetAtAddress(address()); }
- inline Address address();
-
- // Compute the current IC state based on the target stub, receiver and name.
- static State StateFrom(Code* target, Object* receiver, Object* name);
-
- // Clear the inline cache to initial state.
- static void Clear(Address address);
-
- // Computes the reloc info for this IC. This is a fairly expensive
- // operation as it has to search through the heap to find the code
- // object that contains this IC site.
- RelocInfo::Mode ComputeMode();
-
- // Returns if this IC is for contextual (no explicit receiver)
- // access to properties.
- bool IsContextual(Handle<Object> receiver) {
- if (receiver->IsGlobalObject()) {
- return SlowIsContextual();
- } else {
- ASSERT(!SlowIsContextual());
- return false;
- }
- }
-
- bool SlowIsContextual() {
- return ComputeMode() == RelocInfo::CODE_TARGET_CONTEXT;
- }
-
- // Determines which map must be used for keeping the code stub.
- // These methods should not be called with undefined or null.
- static inline InlineCacheHolderFlag GetCodeCacheForObject(Object* object,
- JSObject* holder);
- static inline InlineCacheHolderFlag GetCodeCacheForObject(JSObject* object,
- JSObject* holder);
- static inline JSObject* GetCodeCacheHolder(Object* object,
- InlineCacheHolderFlag holder);
-
- protected:
- Address fp() const { return fp_; }
- Address pc() const { return *pc_address_; }
- Isolate* isolate() const { return isolate_; }
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Computes the address in the original code when the code running is
- // containing break points (calls to DebugBreakXXX builtins).
- Address OriginalCodeAddress();
-#endif
-
- // Set the call-site target.
- void set_target(Code* code) { SetTargetAtAddress(address(), code); }
-
-#ifdef DEBUG
- static void TraceIC(const char* type,
- Handle<Object> name,
- State old_state,
- Code* new_target,
- const char* extra_info = "");
-#endif
-
- Failure* TypeError(const char* type,
- Handle<Object> object,
- Handle<Object> key);
- Failure* ReferenceError(const char* type, Handle<String> name);
-
- // Access the target code for the given IC address.
- static inline Code* GetTargetAtAddress(Address address);
- static inline void SetTargetAtAddress(Address address, Code* target);
-
- private:
- // Frame pointer for the frame that uses (calls) the IC.
- Address fp_;
-
- // All access to the program counter of an IC structure is indirect
- // to make the code GC safe. This feature is crucial since
- // GetProperty and SetProperty are called and they in turn might
- // invoke the garbage collector.
- Address* pc_address_;
-
- Isolate* isolate_;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(IC);
-};
-
-
-// An IC_Utility encapsulates IC::UtilityId. It exists mainly because you
-// cannot make forward declarations to an enum.
-class IC_Utility {
- public:
- explicit IC_Utility(IC::UtilityId id)
- : address_(IC::AddressFromUtilityId(id)), id_(id) {}
-
- Address address() const { return address_; }
-
- IC::UtilityId id() const { return id_; }
- private:
- Address address_;
- IC::UtilityId id_;
-};
-
-
-class CallICBase: public IC {
- protected:
- CallICBase(Code::Kind kind, Isolate* isolate)
- : IC(EXTRA_CALL_FRAME, isolate), kind_(kind) {}
-
- public:
- MUST_USE_RESULT MaybeObject* LoadFunction(State state,
- Code::ExtraICState extra_ic_state,
- Handle<Object> object,
- Handle<String> name);
-
- protected:
- Code::Kind kind_;
-
- bool TryUpdateExtraICState(LookupResult* lookup,
- Handle<Object> object,
- Code::ExtraICState* extra_ic_state);
-
- MUST_USE_RESULT MaybeObject* ComputeMonomorphicStub(
- LookupResult* lookup,
- State state,
- Code::ExtraICState extra_ic_state,
- Handle<Object> object,
- Handle<String> name);
-
- // Update the inline cache and the global stub cache based on the
- // lookup result.
- void UpdateCaches(LookupResult* lookup,
- State state,
- Code::ExtraICState extra_ic_state,
- Handle<Object> object,
- Handle<String> name);
-
- // Returns a JSFunction if the object can be called as a function,
- // and patches the stack to be ready for the call.
- // Otherwise, it returns the undefined value.
- Object* TryCallAsFunction(Object* object);
-
- void ReceiverToObjectIfRequired(Handle<Object> callee, Handle<Object> object);
-
- static void Clear(Address address, Code* target);
- friend class IC;
-};
-
-
-class CallIC: public CallICBase {
- public:
- explicit CallIC(Isolate* isolate) : CallICBase(Code::CALL_IC, isolate) {
- ASSERT(target()->is_call_stub());
- }
-
- // Code generator routines.
- static void GenerateInitialize(MacroAssembler* masm, int argc) {
- GenerateMiss(masm, argc);
- }
- static void GenerateMiss(MacroAssembler* masm, int argc);
- static void GenerateMegamorphic(MacroAssembler* masm, int argc);
- static void GenerateNormal(MacroAssembler* masm, int argc);
-};
-
-
-class KeyedCallIC: public CallICBase {
- public:
- explicit KeyedCallIC(Isolate* isolate)
- : CallICBase(Code::KEYED_CALL_IC, isolate) {
- ASSERT(target()->is_keyed_call_stub());
- }
-
- MUST_USE_RESULT MaybeObject* LoadFunction(State state,
- Handle<Object> object,
- Handle<Object> key);
-
- // Code generator routines.
- static void GenerateInitialize(MacroAssembler* masm, int argc) {
- GenerateMiss(masm, argc);
- }
- static void GenerateMiss(MacroAssembler* masm, int argc);
- static void GenerateMegamorphic(MacroAssembler* masm, int argc);
- static void GenerateNormal(MacroAssembler* masm, int argc);
-};
-
-
-class LoadIC: public IC {
- public:
- explicit LoadIC(Isolate* isolate) : IC(NO_EXTRA_FRAME, isolate) {
- ASSERT(target()->is_load_stub());
- }
-
- MUST_USE_RESULT MaybeObject* Load(State state,
- Handle<Object> object,
- Handle<String> name);
-
- // Code generator routines.
- static void GenerateInitialize(MacroAssembler* masm) { GenerateMiss(masm); }
- static void GeneratePreMonomorphic(MacroAssembler* masm) {
- GenerateMiss(masm);
- }
- static void GenerateMiss(MacroAssembler* masm);
- static void GenerateMegamorphic(MacroAssembler* masm);
- static void GenerateNormal(MacroAssembler* masm);
-
- // Specialized code generator routines.
- static void GenerateArrayLength(MacroAssembler* masm);
- static void GenerateStringLength(MacroAssembler* masm,
- bool support_wrappers);
- static void GenerateFunctionPrototype(MacroAssembler* masm);
-
- // Clear the use of the inlined version.
- static void ClearInlinedVersion(Address address);
-
- // The offset from the inlined patch site to the start of the
- // inlined load instruction. It is architecture-dependent, and not
- // used on ARM.
- static const int kOffsetToLoadInstruction;
-
- private:
- // Update the inline cache and the global stub cache based on the
- // lookup result.
- void UpdateCaches(LookupResult* lookup,
- State state,
- Handle<Object> object,
- Handle<String> name);
-
- // Stub accessors.
- Code* megamorphic_stub() {
- return isolate()->builtins()->builtin(
- Builtins::kLoadIC_Megamorphic);
- }
- static Code* initialize_stub() {
- return Isolate::Current()->builtins()->builtin(
- Builtins::kLoadIC_Initialize);
- }
- Code* pre_monomorphic_stub() {
- return isolate()->builtins()->builtin(
- Builtins::kLoadIC_PreMonomorphic);
- }
-
- static void Clear(Address address, Code* target);
-
- static bool PatchInlinedLoad(Address address, Object* map, int index);
-
- static bool PatchInlinedContextualLoad(Address address,
- Object* map,
- Object* cell,
- bool is_dont_delete);
-
- friend class IC;
-};
-
-
-class KeyedLoadIC: public IC {
- public:
- explicit KeyedLoadIC(Isolate* isolate) : IC(NO_EXTRA_FRAME, isolate) {
- ASSERT(target()->is_keyed_load_stub());
- }
-
- MUST_USE_RESULT MaybeObject* Load(State state,
- Handle<Object> object,
- Handle<Object> key);
-
- // Code generator routines.
- static void GenerateMiss(MacroAssembler* masm);
- static void GenerateRuntimeGetProperty(MacroAssembler* masm);
- static void GenerateInitialize(MacroAssembler* masm) { GenerateMiss(masm); }
- static void GeneratePreMonomorphic(MacroAssembler* masm) {
- GenerateMiss(masm);
- }
- static void GenerateGeneric(MacroAssembler* masm);
- static void GenerateString(MacroAssembler* masm);
-
- static void GenerateIndexedInterceptor(MacroAssembler* masm);
-
- // Clear the use of the inlined version.
- static void ClearInlinedVersion(Address address);
-
- // Bit mask to be tested against bit field for the cases when
- // generic stub should go into slow case.
- // Access check is necessary explicitly since generic stub does not perform
- // map checks.
- static const int kSlowCaseBitFieldMask =
- (1 << Map::kIsAccessCheckNeeded) | (1 << Map::kHasIndexedInterceptor);
-
- private:
- // Update the inline cache.
- void UpdateCaches(LookupResult* lookup,
- State state,
- Handle<Object> object,
- Handle<String> name);
-
- // Stub accessors.
- static Code* initialize_stub() {
- return Isolate::Current()->builtins()->builtin(
- Builtins::kKeyedLoadIC_Initialize);
- }
- Code* megamorphic_stub() {
- return isolate()->builtins()->builtin(
- Builtins::kKeyedLoadIC_Generic);
- }
- Code* generic_stub() {
- return isolate()->builtins()->builtin(
- Builtins::kKeyedLoadIC_Generic);
- }
- Code* pre_monomorphic_stub() {
- return isolate()->builtins()->builtin(
- Builtins::kKeyedLoadIC_PreMonomorphic);
- }
- Code* string_stub() {
- return isolate()->builtins()->builtin(
- Builtins::kKeyedLoadIC_String);
- }
-
- Code* indexed_interceptor_stub() {
- return isolate()->builtins()->builtin(
- Builtins::kKeyedLoadIC_IndexedInterceptor);
- }
-
- static void Clear(Address address, Code* target);
-
- // Support for patching the map that is checked in an inlined
- // version of keyed load.
- static bool PatchInlinedLoad(Address address, Object* map);
-
- friend class IC;
-};
-
-
-class StoreIC: public IC {
- public:
- explicit StoreIC(Isolate* isolate) : IC(NO_EXTRA_FRAME, isolate) {
- ASSERT(target()->is_store_stub());
- }
-
- MUST_USE_RESULT MaybeObject* Store(State state,
- StrictModeFlag strict_mode,
- Handle<Object> object,
- Handle<String> name,
- Handle<Object> value);
-
- // Code generators for stub routines. Only called once at startup.
- static void GenerateInitialize(MacroAssembler* masm) { GenerateMiss(masm); }
- static void GenerateMiss(MacroAssembler* masm);
- static void GenerateMegamorphic(MacroAssembler* masm,
- StrictModeFlag strict_mode);
- static void GenerateArrayLength(MacroAssembler* masm);
- static void GenerateNormal(MacroAssembler* masm);
- static void GenerateGlobalProxy(MacroAssembler* masm,
- StrictModeFlag strict_mode);
-
- // Clear the use of an inlined version.
- static void ClearInlinedVersion(Address address);
-
- // The offset from the inlined patch site to the start of the
- // inlined store instruction.
- static const int kOffsetToStoreInstruction;
-
- private:
- // Update the inline cache and the global stub cache based on the
- // lookup result.
- void UpdateCaches(LookupResult* lookup,
- State state,
- StrictModeFlag strict_mode,
- Handle<JSObject> receiver,
- Handle<String> name,
- Handle<Object> value);
-
- void set_target(Code* code) {
- // Strict mode must be preserved across IC patching.
- ASSERT((code->extra_ic_state() & kStrictMode) ==
- (target()->extra_ic_state() & kStrictMode));
- IC::set_target(code);
- }
-
- // Stub accessors.
- Code* megamorphic_stub() {
- return isolate()->builtins()->builtin(
- Builtins::kStoreIC_Megamorphic);
- }
- Code* megamorphic_stub_strict() {
- return isolate()->builtins()->builtin(
- Builtins::kStoreIC_Megamorphic_Strict);
- }
- static Code* initialize_stub() {
- return Isolate::Current()->builtins()->builtin(
- Builtins::kStoreIC_Initialize);
- }
- static Code* initialize_stub_strict() {
- return Isolate::Current()->builtins()->builtin(
- Builtins::kStoreIC_Initialize_Strict);
- }
- Code* global_proxy_stub() {
- return isolate()->builtins()->builtin(
- Builtins::kStoreIC_GlobalProxy);
- }
- Code* global_proxy_stub_strict() {
- return isolate()->builtins()->builtin(
- Builtins::kStoreIC_GlobalProxy_Strict);
- }
-
- static void Clear(Address address, Code* target);
-
- // Support for patching the index and the map that is checked in an
- // inlined version of the named store.
- static bool PatchInlinedStore(Address address, Object* map, int index);
-
- friend class IC;
-};
-
-
-class KeyedStoreIC: public IC {
- public:
- explicit KeyedStoreIC(Isolate* isolate) : IC(NO_EXTRA_FRAME, isolate) { }
-
- MUST_USE_RESULT MaybeObject* Store(State state,
- StrictModeFlag strict_mode,
- Handle<Object> object,
- Handle<Object> name,
- Handle<Object> value);
-
- // Code generators for stub routines. Only called once at startup.
- static void GenerateInitialize(MacroAssembler* masm) { GenerateMiss(masm); }
- static void GenerateMiss(MacroAssembler* masm);
- static void GenerateRuntimeSetProperty(MacroAssembler* masm,
- StrictModeFlag strict_mode);
- static void GenerateGeneric(MacroAssembler* masm, StrictModeFlag strict_mode);
-
- // Clear the inlined version so the IC is always hit.
- static void ClearInlinedVersion(Address address);
-
- // Restore the inlined version so the fast case can get hit.
- static void RestoreInlinedVersion(Address address);
-
- private:
- // Update the inline cache.
- void UpdateCaches(LookupResult* lookup,
- State state,
- StrictModeFlag strict_mode,
- Handle<JSObject> receiver,
- Handle<String> name,
- Handle<Object> value);
-
- void set_target(Code* code) {
- // Strict mode must be preserved across IC patching.
- ASSERT((code->extra_ic_state() & kStrictMode) ==
- (target()->extra_ic_state() & kStrictMode));
- IC::set_target(code);
- }
-
- // Stub accessors.
- static Code* initialize_stub() {
- return Isolate::Current()->builtins()->builtin(
- Builtins::kKeyedStoreIC_Initialize);
- }
- Code* megamorphic_stub() {
- return isolate()->builtins()->builtin(
- Builtins::kKeyedStoreIC_Generic);
- }
- static Code* initialize_stub_strict() {
- return Isolate::Current()->builtins()->builtin(
- Builtins::kKeyedStoreIC_Initialize_Strict);
- }
- Code* megamorphic_stub_strict() {
- return isolate()->builtins()->builtin(
- Builtins::kKeyedStoreIC_Generic_Strict);
- }
- Code* generic_stub() {
- return isolate()->builtins()->builtin(
- Builtins::kKeyedStoreIC_Generic);
- }
- Code* generic_stub_strict() {
- return isolate()->builtins()->builtin(
- Builtins::kKeyedStoreIC_Generic_Strict);
- }
-
- static void Clear(Address address, Code* target);
-
- // Support for patching the map that is checked in an inlined
- // version of keyed store.
- // The address is the patch point for the IC call
- // (Assembler::kCallTargetAddressOffset before the end of
- // the call/return address).
- // The map is the new map that the inlined code should check against.
- static bool PatchInlinedStore(Address address, Object* map);
-
- friend class IC;
-};
-
-
-class BinaryOpIC: public IC {
- public:
-
- enum TypeInfo {
- UNINIT_OR_SMI,
- DEFAULT, // Initial state. When first executed, patches to one
- // of the following states depending on the operands types.
- HEAP_NUMBERS, // Both arguments are HeapNumbers.
- STRINGS, // At least one of the arguments is String.
- GENERIC // Non-specialized case (processes any type combination).
- };
-
- explicit BinaryOpIC(Isolate* isolate) : IC(NO_EXTRA_FRAME, isolate) { }
-
- void patch(Code* code);
-
- static const char* GetName(TypeInfo type_info);
-
- static State ToState(TypeInfo type_info);
-
- static TypeInfo GetTypeInfo(Object* left, Object* right);
-};
-
-
-// Type Recording BinaryOpIC, that records the types of the inputs and outputs.
-class TRBinaryOpIC: public IC {
- public:
-
- enum TypeInfo {
- UNINITIALIZED,
- SMI,
- INT32,
- HEAP_NUMBER,
- ODDBALL,
- STRING, // Only used for addition operation. At least one string operand.
- GENERIC
- };
-
- explicit TRBinaryOpIC(Isolate* isolate) : IC(NO_EXTRA_FRAME, isolate) { }
-
- void patch(Code* code);
-
- static const char* GetName(TypeInfo type_info);
-
- static State ToState(TypeInfo type_info);
-
- static TypeInfo GetTypeInfo(Handle<Object> left, Handle<Object> right);
-
- static TypeInfo JoinTypes(TypeInfo x, TypeInfo y);
-};
-
-
-class CompareIC: public IC {
- public:
- enum State {
- UNINITIALIZED,
- SMIS,
- HEAP_NUMBERS,
- OBJECTS,
- GENERIC
- };
-
- CompareIC(Isolate* isolate, Token::Value op)
- : IC(EXTRA_CALL_FRAME, isolate), op_(op) { }
-
- // Update the inline cache for the given operands.
- void UpdateCaches(Handle<Object> x, Handle<Object> y);
-
- // Factory method for getting an uninitialized compare stub.
- static Handle<Code> GetUninitialized(Token::Value op);
-
- // Helper function for computing the condition for a compare operation.
- static Condition ComputeCondition(Token::Value op);
-
- // Helper function for determining the state of a compare IC.
- static State ComputeState(Code* target);
-
- static const char* GetStateName(State state);
-
- private:
- State TargetState(State state, bool has_inlined_smi_code,
- Handle<Object> x, Handle<Object> y);
-
- bool strict() const { return op_ == Token::EQ_STRICT; }
- Condition GetCondition() const { return ComputeCondition(op_); }
- State GetState() { return ComputeState(target()); }
-
- Token::Value op_;
-};
-
-// Helper for TRBinaryOpIC and CompareIC.
-void PatchInlinedSmiCode(Address address);
-
-} } // namespace v8::internal
-
-#endif // V8_IC_H_
diff --git a/src/3rdparty/v8/src/inspector.cc b/src/3rdparty/v8/src/inspector.cc
deleted file mode 100644
index 8fb80f1..0000000
--- a/src/3rdparty/v8/src/inspector.cc
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#include "v8.h"
-#include "inspector.h"
-
-
-namespace v8 {
-namespace internal {
-
-#ifdef INSPECTOR
-
-//============================================================================
-// The Inspector.
-
-void Inspector::DumpObjectType(FILE* out, Object *obj, bool print_more) {
- // Dump the object pointer.
- OS::FPrint(out, "%p:", reinterpret_cast<void*>(obj));
- if (obj->IsHeapObject()) {
- HeapObject *hobj = HeapObject::cast(obj);
- OS::FPrint(out, " size %d :", hobj->Size());
- }
-
- // Dump each object classification that matches this object.
-#define FOR_EACH_TYPE(type) \
- if (obj->Is##type()) { \
- OS::FPrint(out, " %s", #type); \
- }
- OBJECT_TYPE_LIST(FOR_EACH_TYPE)
- HEAP_OBJECT_TYPE_LIST(FOR_EACH_TYPE)
-#undef FOR_EACH_TYPE
-}
-
-
-#endif // INSPECTOR
-
-} } // namespace v8::internal
-
diff --git a/src/3rdparty/v8/src/inspector.h b/src/3rdparty/v8/src/inspector.h
deleted file mode 100644
index f8b3042..0000000
--- a/src/3rdparty/v8/src/inspector.h
+++ /dev/null
@@ -1,62 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#ifndef V8_INSPECTOR_H_
-#define V8_INSPECTOR_H_
-
-// Only build this code if we're configured with the INSPECTOR.
-#ifdef INSPECTOR
-
-#include "v8.h"
-
-#include "objects.h"
-
-namespace v8 {
-namespace internal {
-
-class Inspector {
- public:
-
- static void DumpObjectType(FILE* out, Object *obj, bool print_more);
- static void DumpObjectType(FILE* out, Object *obj) {
- DumpObjectType(out, obj, false);
- }
- static void DumpObjectType(Object *obj, bool print_more) {
- DumpObjectType(stdout, obj, print_more);
- }
- static void DumpObjectType(Object *obj) {
- DumpObjectType(stdout, obj, false);
- }
-};
-
-} } // namespace v8::internal
-
-#endif // INSPECTOR
-
-#endif // V8_INSPECTOR_H_
-
diff --git a/src/3rdparty/v8/src/interpreter-irregexp.cc b/src/3rdparty/v8/src/interpreter-irregexp.cc
deleted file mode 100644
index 1c6c52c..0000000
--- a/src/3rdparty/v8/src/interpreter-irregexp.cc
+++ /dev/null
@@ -1,659 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// A simple interpreter for the Irregexp byte code.
-
-
-#include "v8.h"
-#include "unicode.h"
-#include "utils.h"
-#include "ast.h"
-#include "bytecodes-irregexp.h"
-#include "interpreter-irregexp.h"
-
-
-namespace v8 {
-namespace internal {
-
-
-typedef unibrow::Mapping<unibrow::Ecma262Canonicalize> Canonicalize;
-
-static bool BackRefMatchesNoCase(Canonicalize* interp_canonicalize,
- int from,
- int current,
- int len,
- Vector<const uc16> subject) {
- for (int i = 0; i < len; i++) {
- unibrow::uchar old_char = subject[from++];
- unibrow::uchar new_char = subject[current++];
- if (old_char == new_char) continue;
- unibrow::uchar old_string[1] = { old_char };
- unibrow::uchar new_string[1] = { new_char };
- interp_canonicalize->get(old_char, '\0', old_string);
- interp_canonicalize->get(new_char, '\0', new_string);
- if (old_string[0] != new_string[0]) {
- return false;
- }
- }
- return true;
-}
-
-
-static bool BackRefMatchesNoCase(Canonicalize* interp_canonicalize,
- int from,
- int current,
- int len,
- Vector<const char> subject) {
- for (int i = 0; i < len; i++) {
- unsigned int old_char = subject[from++];
- unsigned int new_char = subject[current++];
- if (old_char == new_char) continue;
- if (old_char - 'A' <= 'Z' - 'A') old_char |= 0x20;
- if (new_char - 'A' <= 'Z' - 'A') new_char |= 0x20;
- if (old_char != new_char) return false;
- }
- return true;
-}
-
-
-#ifdef DEBUG
-static void TraceInterpreter(const byte* code_base,
- const byte* pc,
- int stack_depth,
- int current_position,
- uint32_t current_char,
- int bytecode_length,
- const char* bytecode_name) {
- if (FLAG_trace_regexp_bytecodes) {
- bool printable = (current_char < 127 && current_char >= 32);
- const char* format =
- printable ?
- "pc = %02x, sp = %d, curpos = %d, curchar = %08x (%c), bc = %s" :
- "pc = %02x, sp = %d, curpos = %d, curchar = %08x .%c., bc = %s";
- PrintF(format,
- pc - code_base,
- stack_depth,
- current_position,
- current_char,
- printable ? current_char : '.',
- bytecode_name);
- for (int i = 0; i < bytecode_length; i++) {
- printf(", %02x", pc[i]);
- }
- printf(" ");
- for (int i = 1; i < bytecode_length; i++) {
- unsigned char b = pc[i];
- if (b < 127 && b >= 32) {
- printf("%c", b);
- } else {
- printf(".");
- }
- }
- printf("\n");
- }
-}
-
-
-#define BYTECODE(name) \
- case BC_##name: \
- TraceInterpreter(code_base, \
- pc, \
- static_cast<int>(backtrack_sp - backtrack_stack_base), \
- current, \
- current_char, \
- BC_##name##_LENGTH, \
- #name);
-#else
-#define BYTECODE(name) \
- case BC_##name:
-#endif
-
-
-static int32_t Load32Aligned(const byte* pc) {
- ASSERT((reinterpret_cast<intptr_t>(pc) & 3) == 0);
- return *reinterpret_cast<const int32_t *>(pc);
-}
-
-
-static int32_t Load16Aligned(const byte* pc) {
- ASSERT((reinterpret_cast<intptr_t>(pc) & 1) == 0);
- return *reinterpret_cast<const uint16_t *>(pc);
-}
-
-
-// A simple abstraction over the backtracking stack used by the interpreter.
-// This backtracking stack does not grow automatically, but it ensures that the
-// the memory held by the stack is released or remembered in a cache if the
-// matching terminates.
-class BacktrackStack {
- public:
- explicit BacktrackStack(Isolate* isolate) : isolate_(isolate) {
- if (isolate->irregexp_interpreter_backtrack_stack_cache() != NULL) {
- // If the cache is not empty reuse the previously allocated stack.
- data_ = isolate->irregexp_interpreter_backtrack_stack_cache();
- isolate->set_irregexp_interpreter_backtrack_stack_cache(NULL);
- } else {
- // Cache was empty. Allocate a new backtrack stack.
- data_ = NewArray<int>(kBacktrackStackSize);
- }
- }
-
- ~BacktrackStack() {
- if (isolate_->irregexp_interpreter_backtrack_stack_cache() == NULL) {
- // The cache is empty. Keep this backtrack stack around.
- isolate_->set_irregexp_interpreter_backtrack_stack_cache(data_);
- } else {
- // A backtrack stack was already cached, just release this one.
- DeleteArray(data_);
- }
- }
-
- int* data() const { return data_; }
-
- int max_size() const { return kBacktrackStackSize; }
-
- private:
- static const int kBacktrackStackSize = 10000;
-
- int* data_;
- Isolate* isolate_;
-
- DISALLOW_COPY_AND_ASSIGN(BacktrackStack);
-};
-
-
-template <typename Char>
-static bool RawMatch(Isolate* isolate,
- const byte* code_base,
- Vector<const Char> subject,
- int* registers,
- int current,
- uint32_t current_char) {
- const byte* pc = code_base;
- // BacktrackStack ensures that the memory allocated for the backtracking stack
- // is returned to the system or cached if there is no stack being cached at
- // the moment.
- BacktrackStack backtrack_stack(isolate);
- int* backtrack_stack_base = backtrack_stack.data();
- int* backtrack_sp = backtrack_stack_base;
- int backtrack_stack_space = backtrack_stack.max_size();
-#ifdef DEBUG
- if (FLAG_trace_regexp_bytecodes) {
- PrintF("\n\nStart bytecode interpreter\n\n");
- }
-#endif
- while (true) {
- int32_t insn = Load32Aligned(pc);
- switch (insn & BYTECODE_MASK) {
- BYTECODE(BREAK)
- UNREACHABLE();
- return false;
- BYTECODE(PUSH_CP)
- if (--backtrack_stack_space < 0) {
- return false; // No match on backtrack stack overflow.
- }
- *backtrack_sp++ = current;
- pc += BC_PUSH_CP_LENGTH;
- break;
- BYTECODE(PUSH_BT)
- if (--backtrack_stack_space < 0) {
- return false; // No match on backtrack stack overflow.
- }
- *backtrack_sp++ = Load32Aligned(pc + 4);
- pc += BC_PUSH_BT_LENGTH;
- break;
- BYTECODE(PUSH_REGISTER)
- if (--backtrack_stack_space < 0) {
- return false; // No match on backtrack stack overflow.
- }
- *backtrack_sp++ = registers[insn >> BYTECODE_SHIFT];
- pc += BC_PUSH_REGISTER_LENGTH;
- break;
- BYTECODE(SET_REGISTER)
- registers[insn >> BYTECODE_SHIFT] = Load32Aligned(pc + 4);
- pc += BC_SET_REGISTER_LENGTH;
- break;
- BYTECODE(ADVANCE_REGISTER)
- registers[insn >> BYTECODE_SHIFT] += Load32Aligned(pc + 4);
- pc += BC_ADVANCE_REGISTER_LENGTH;
- break;
- BYTECODE(SET_REGISTER_TO_CP)
- registers[insn >> BYTECODE_SHIFT] = current + Load32Aligned(pc + 4);
- pc += BC_SET_REGISTER_TO_CP_LENGTH;
- break;
- BYTECODE(SET_CP_TO_REGISTER)
- current = registers[insn >> BYTECODE_SHIFT];
- pc += BC_SET_CP_TO_REGISTER_LENGTH;
- break;
- BYTECODE(SET_REGISTER_TO_SP)
- registers[insn >> BYTECODE_SHIFT] =
- static_cast<int>(backtrack_sp - backtrack_stack_base);
- pc += BC_SET_REGISTER_TO_SP_LENGTH;
- break;
- BYTECODE(SET_SP_TO_REGISTER)
- backtrack_sp = backtrack_stack_base + registers[insn >> BYTECODE_SHIFT];
- backtrack_stack_space = backtrack_stack.max_size() -
- static_cast<int>(backtrack_sp - backtrack_stack_base);
- pc += BC_SET_SP_TO_REGISTER_LENGTH;
- break;
- BYTECODE(POP_CP)
- backtrack_stack_space++;
- --backtrack_sp;
- current = *backtrack_sp;
- pc += BC_POP_CP_LENGTH;
- break;
- BYTECODE(POP_BT)
- backtrack_stack_space++;
- --backtrack_sp;
- pc = code_base + *backtrack_sp;
- break;
- BYTECODE(POP_REGISTER)
- backtrack_stack_space++;
- --backtrack_sp;
- registers[insn >> BYTECODE_SHIFT] = *backtrack_sp;
- pc += BC_POP_REGISTER_LENGTH;
- break;
- BYTECODE(FAIL)
- return false;
- BYTECODE(SUCCEED)
- return true;
- BYTECODE(ADVANCE_CP)
- current += insn >> BYTECODE_SHIFT;
- pc += BC_ADVANCE_CP_LENGTH;
- break;
- BYTECODE(GOTO)
- pc = code_base + Load32Aligned(pc + 4);
- break;
- BYTECODE(ADVANCE_CP_AND_GOTO)
- current += insn >> BYTECODE_SHIFT;
- pc = code_base + Load32Aligned(pc + 4);
- break;
- BYTECODE(CHECK_GREEDY)
- if (current == backtrack_sp[-1]) {
- backtrack_sp--;
- backtrack_stack_space++;
- pc = code_base + Load32Aligned(pc + 4);
- } else {
- pc += BC_CHECK_GREEDY_LENGTH;
- }
- break;
- BYTECODE(LOAD_CURRENT_CHAR) {
- int pos = current + (insn >> BYTECODE_SHIFT);
- if (pos >= subject.length()) {
- pc = code_base + Load32Aligned(pc + 4);
- } else {
- current_char = subject[pos];
- pc += BC_LOAD_CURRENT_CHAR_LENGTH;
- }
- break;
- }
- BYTECODE(LOAD_CURRENT_CHAR_UNCHECKED) {
- int pos = current + (insn >> BYTECODE_SHIFT);
- current_char = subject[pos];
- pc += BC_LOAD_CURRENT_CHAR_UNCHECKED_LENGTH;
- break;
- }
- BYTECODE(LOAD_2_CURRENT_CHARS) {
- int pos = current + (insn >> BYTECODE_SHIFT);
- if (pos + 2 > subject.length()) {
- pc = code_base + Load32Aligned(pc + 4);
- } else {
- Char next = subject[pos + 1];
- current_char =
- (subject[pos] | (next << (kBitsPerByte * sizeof(Char))));
- pc += BC_LOAD_2_CURRENT_CHARS_LENGTH;
- }
- break;
- }
- BYTECODE(LOAD_2_CURRENT_CHARS_UNCHECKED) {
- int pos = current + (insn >> BYTECODE_SHIFT);
- Char next = subject[pos + 1];
- current_char = (subject[pos] | (next << (kBitsPerByte * sizeof(Char))));
- pc += BC_LOAD_2_CURRENT_CHARS_UNCHECKED_LENGTH;
- break;
- }
- BYTECODE(LOAD_4_CURRENT_CHARS) {
- ASSERT(sizeof(Char) == 1);
- int pos = current + (insn >> BYTECODE_SHIFT);
- if (pos + 4 > subject.length()) {
- pc = code_base + Load32Aligned(pc + 4);
- } else {
- Char next1 = subject[pos + 1];
- Char next2 = subject[pos + 2];
- Char next3 = subject[pos + 3];
- current_char = (subject[pos] |
- (next1 << 8) |
- (next2 << 16) |
- (next3 << 24));
- pc += BC_LOAD_4_CURRENT_CHARS_LENGTH;
- }
- break;
- }
- BYTECODE(LOAD_4_CURRENT_CHARS_UNCHECKED) {
- ASSERT(sizeof(Char) == 1);
- int pos = current + (insn >> BYTECODE_SHIFT);
- Char next1 = subject[pos + 1];
- Char next2 = subject[pos + 2];
- Char next3 = subject[pos + 3];
- current_char = (subject[pos] |
- (next1 << 8) |
- (next2 << 16) |
- (next3 << 24));
- pc += BC_LOAD_4_CURRENT_CHARS_UNCHECKED_LENGTH;
- break;
- }
- BYTECODE(CHECK_4_CHARS) {
- uint32_t c = Load32Aligned(pc + 4);
- if (c == current_char) {
- pc = code_base + Load32Aligned(pc + 8);
- } else {
- pc += BC_CHECK_4_CHARS_LENGTH;
- }
- break;
- }
- BYTECODE(CHECK_CHAR) {
- uint32_t c = (insn >> BYTECODE_SHIFT);
- if (c == current_char) {
- pc = code_base + Load32Aligned(pc + 4);
- } else {
- pc += BC_CHECK_CHAR_LENGTH;
- }
- break;
- }
- BYTECODE(CHECK_NOT_4_CHARS) {
- uint32_t c = Load32Aligned(pc + 4);
- if (c != current_char) {
- pc = code_base + Load32Aligned(pc + 8);
- } else {
- pc += BC_CHECK_NOT_4_CHARS_LENGTH;
- }
- break;
- }
- BYTECODE(CHECK_NOT_CHAR) {
- uint32_t c = (insn >> BYTECODE_SHIFT);
- if (c != current_char) {
- pc = code_base + Load32Aligned(pc + 4);
- } else {
- pc += BC_CHECK_NOT_CHAR_LENGTH;
- }
- break;
- }
- BYTECODE(AND_CHECK_4_CHARS) {
- uint32_t c = Load32Aligned(pc + 4);
- if (c == (current_char & Load32Aligned(pc + 8))) {
- pc = code_base + Load32Aligned(pc + 12);
- } else {
- pc += BC_AND_CHECK_4_CHARS_LENGTH;
- }
- break;
- }
- BYTECODE(AND_CHECK_CHAR) {
- uint32_t c = (insn >> BYTECODE_SHIFT);
- if (c == (current_char & Load32Aligned(pc + 4))) {
- pc = code_base + Load32Aligned(pc + 8);
- } else {
- pc += BC_AND_CHECK_CHAR_LENGTH;
- }
- break;
- }
- BYTECODE(AND_CHECK_NOT_4_CHARS) {
- uint32_t c = Load32Aligned(pc + 4);
- if (c != (current_char & Load32Aligned(pc + 8))) {
- pc = code_base + Load32Aligned(pc + 12);
- } else {
- pc += BC_AND_CHECK_NOT_4_CHARS_LENGTH;
- }
- break;
- }
- BYTECODE(AND_CHECK_NOT_CHAR) {
- uint32_t c = (insn >> BYTECODE_SHIFT);
- if (c != (current_char & Load32Aligned(pc + 4))) {
- pc = code_base + Load32Aligned(pc + 8);
- } else {
- pc += BC_AND_CHECK_NOT_CHAR_LENGTH;
- }
- break;
- }
- BYTECODE(MINUS_AND_CHECK_NOT_CHAR) {
- uint32_t c = (insn >> BYTECODE_SHIFT);
- uint32_t minus = Load16Aligned(pc + 4);
- uint32_t mask = Load16Aligned(pc + 6);
- if (c != ((current_char - minus) & mask)) {
- pc = code_base + Load32Aligned(pc + 8);
- } else {
- pc += BC_MINUS_AND_CHECK_NOT_CHAR_LENGTH;
- }
- break;
- }
- BYTECODE(CHECK_LT) {
- uint32_t limit = (insn >> BYTECODE_SHIFT);
- if (current_char < limit) {
- pc = code_base + Load32Aligned(pc + 4);
- } else {
- pc += BC_CHECK_LT_LENGTH;
- }
- break;
- }
- BYTECODE(CHECK_GT) {
- uint32_t limit = (insn >> BYTECODE_SHIFT);
- if (current_char > limit) {
- pc = code_base + Load32Aligned(pc + 4);
- } else {
- pc += BC_CHECK_GT_LENGTH;
- }
- break;
- }
- BYTECODE(CHECK_REGISTER_LT)
- if (registers[insn >> BYTECODE_SHIFT] < Load32Aligned(pc + 4)) {
- pc = code_base + Load32Aligned(pc + 8);
- } else {
- pc += BC_CHECK_REGISTER_LT_LENGTH;
- }
- break;
- BYTECODE(CHECK_REGISTER_GE)
- if (registers[insn >> BYTECODE_SHIFT] >= Load32Aligned(pc + 4)) {
- pc = code_base + Load32Aligned(pc + 8);
- } else {
- pc += BC_CHECK_REGISTER_GE_LENGTH;
- }
- break;
- BYTECODE(CHECK_REGISTER_EQ_POS)
- if (registers[insn >> BYTECODE_SHIFT] == current) {
- pc = code_base + Load32Aligned(pc + 4);
- } else {
- pc += BC_CHECK_REGISTER_EQ_POS_LENGTH;
- }
- break;
- BYTECODE(LOOKUP_MAP1) {
- // Look up character in a bitmap. If we find a 0, then jump to the
- // location at pc + 8. Otherwise fall through!
- int index = current_char - (insn >> BYTECODE_SHIFT);
- byte map = code_base[Load32Aligned(pc + 4) + (index >> 3)];
- map = ((map >> (index & 7)) & 1);
- if (map == 0) {
- pc = code_base + Load32Aligned(pc + 8);
- } else {
- pc += BC_LOOKUP_MAP1_LENGTH;
- }
- break;
- }
- BYTECODE(LOOKUP_MAP2) {
- // Look up character in a half-nibble map. If we find 00, then jump to
- // the location at pc + 8. If we find 01 then jump to location at
- // pc + 11, etc.
- int index = (current_char - (insn >> BYTECODE_SHIFT)) << 1;
- byte map = code_base[Load32Aligned(pc + 3) + (index >> 3)];
- map = ((map >> (index & 7)) & 3);
- if (map < 2) {
- if (map == 0) {
- pc = code_base + Load32Aligned(pc + 8);
- } else {
- pc = code_base + Load32Aligned(pc + 12);
- }
- } else {
- if (map == 2) {
- pc = code_base + Load32Aligned(pc + 16);
- } else {
- pc = code_base + Load32Aligned(pc + 20);
- }
- }
- break;
- }
- BYTECODE(LOOKUP_MAP8) {
- // Look up character in a byte map. Use the byte as an index into a
- // table that follows this instruction immediately.
- int index = current_char - (insn >> BYTECODE_SHIFT);
- byte map = code_base[Load32Aligned(pc + 4) + index];
- const byte* new_pc = code_base + Load32Aligned(pc + 8) + (map << 2);
- pc = code_base + Load32Aligned(new_pc);
- break;
- }
- BYTECODE(LOOKUP_HI_MAP8) {
- // Look up high byte of this character in a byte map. Use the byte as
- // an index into a table that follows this instruction immediately.
- int index = (current_char >> 8) - (insn >> BYTECODE_SHIFT);
- byte map = code_base[Load32Aligned(pc + 4) + index];
- const byte* new_pc = code_base + Load32Aligned(pc + 8) + (map << 2);
- pc = code_base + Load32Aligned(new_pc);
- break;
- }
- BYTECODE(CHECK_NOT_REGS_EQUAL)
- if (registers[insn >> BYTECODE_SHIFT] ==
- registers[Load32Aligned(pc + 4)]) {
- pc += BC_CHECK_NOT_REGS_EQUAL_LENGTH;
- } else {
- pc = code_base + Load32Aligned(pc + 8);
- }
- break;
- BYTECODE(CHECK_NOT_BACK_REF) {
- int from = registers[insn >> BYTECODE_SHIFT];
- int len = registers[(insn >> BYTECODE_SHIFT) + 1] - from;
- if (from < 0 || len <= 0) {
- pc += BC_CHECK_NOT_BACK_REF_LENGTH;
- break;
- }
- if (current + len > subject.length()) {
- pc = code_base + Load32Aligned(pc + 4);
- break;
- } else {
- int i;
- for (i = 0; i < len; i++) {
- if (subject[from + i] != subject[current + i]) {
- pc = code_base + Load32Aligned(pc + 4);
- break;
- }
- }
- if (i < len) break;
- current += len;
- }
- pc += BC_CHECK_NOT_BACK_REF_LENGTH;
- break;
- }
- BYTECODE(CHECK_NOT_BACK_REF_NO_CASE) {
- int from = registers[insn >> BYTECODE_SHIFT];
- int len = registers[(insn >> BYTECODE_SHIFT) + 1] - from;
- if (from < 0 || len <= 0) {
- pc += BC_CHECK_NOT_BACK_REF_NO_CASE_LENGTH;
- break;
- }
- if (current + len > subject.length()) {
- pc = code_base + Load32Aligned(pc + 4);
- break;
- } else {
- if (BackRefMatchesNoCase(isolate->interp_canonicalize_mapping(),
- from, current, len, subject)) {
- current += len;
- pc += BC_CHECK_NOT_BACK_REF_NO_CASE_LENGTH;
- } else {
- pc = code_base + Load32Aligned(pc + 4);
- }
- }
- break;
- }
- BYTECODE(CHECK_AT_START)
- if (current == 0) {
- pc = code_base + Load32Aligned(pc + 4);
- } else {
- pc += BC_CHECK_AT_START_LENGTH;
- }
- break;
- BYTECODE(CHECK_NOT_AT_START)
- if (current == 0) {
- pc += BC_CHECK_NOT_AT_START_LENGTH;
- } else {
- pc = code_base + Load32Aligned(pc + 4);
- }
- break;
- BYTECODE(SET_CURRENT_POSITION_FROM_END) {
- int by = static_cast<uint32_t>(insn) >> BYTECODE_SHIFT;
- if (subject.length() - current > by) {
- current = subject.length() - by;
- current_char = subject[current - 1];
- }
- pc += BC_SET_CURRENT_POSITION_FROM_END_LENGTH;
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
- }
-}
-
-
-bool IrregexpInterpreter::Match(Isolate* isolate,
- Handle<ByteArray> code_array,
- Handle<String> subject,
- int* registers,
- int start_position) {
- ASSERT(subject->IsFlat());
-
- AssertNoAllocation a;
- const byte* code_base = code_array->GetDataStartAddress();
- uc16 previous_char = '\n';
- if (subject->IsAsciiRepresentation()) {
- Vector<const char> subject_vector = subject->ToAsciiVector();
- if (start_position != 0) previous_char = subject_vector[start_position - 1];
- return RawMatch(isolate,
- code_base,
- subject_vector,
- registers,
- start_position,
- previous_char);
- } else {
- Vector<const uc16> subject_vector = subject->ToUC16Vector();
- if (start_position != 0) previous_char = subject_vector[start_position - 1];
- return RawMatch(isolate,
- code_base,
- subject_vector,
- registers,
- start_position,
- previous_char);
- }
-}
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/interpreter-irregexp.h b/src/3rdparty/v8/src/interpreter-irregexp.h
deleted file mode 100644
index 076f0c5..0000000
--- a/src/3rdparty/v8/src/interpreter-irregexp.h
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// A simple interpreter for the Irregexp byte code.
-
-#ifndef V8_INTERPRETER_IRREGEXP_H_
-#define V8_INTERPRETER_IRREGEXP_H_
-
-namespace v8 {
-namespace internal {
-
-
-class IrregexpInterpreter {
- public:
- static bool Match(Isolate* isolate,
- Handle<ByteArray> code,
- Handle<String> subject,
- int* captures,
- int start_position);
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_INTERPRETER_IRREGEXP_H_
diff --git a/src/3rdparty/v8/src/isolate.cc b/src/3rdparty/v8/src/isolate.cc
deleted file mode 100644
index cc9bc37..0000000
--- a/src/3rdparty/v8/src/isolate.cc
+++ /dev/null
@@ -1,883 +0,0 @@
-// Copyright 2006-2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include <stdlib.h>
-
-#include "v8.h"
-
-#include "ast.h"
-#include "bootstrapper.h"
-#include "codegen.h"
-#include "compilation-cache.h"
-#include "debug.h"
-#include "deoptimizer.h"
-#include "heap-profiler.h"
-#include "hydrogen.h"
-#include "isolate.h"
-#include "lithium-allocator.h"
-#include "log.h"
-#include "regexp-stack.h"
-#include "runtime-profiler.h"
-#include "scanner.h"
-#include "scopeinfo.h"
-#include "serialize.h"
-#include "simulator.h"
-#include "spaces.h"
-#include "stub-cache.h"
-#include "version.h"
-
-
-namespace v8 {
-namespace internal {
-
-
-// Create a dummy thread that will wait forever on a semaphore. The only
-// purpose for this thread is to have some stack area to save essential data
-// into for use by a stacks only core dump (aka minidump).
-class PreallocatedMemoryThread: public Thread {
- public:
- char* data() {
- if (data_ready_semaphore_ != NULL) {
- // Initial access is guarded until the data has been published.
- data_ready_semaphore_->Wait();
- delete data_ready_semaphore_;
- data_ready_semaphore_ = NULL;
- }
- return data_;
- }
-
- unsigned length() {
- if (data_ready_semaphore_ != NULL) {
- // Initial access is guarded until the data has been published.
- data_ready_semaphore_->Wait();
- delete data_ready_semaphore_;
- data_ready_semaphore_ = NULL;
- }
- return length_;
- }
-
- // Stop the PreallocatedMemoryThread and release its resources.
- void StopThread() {
- keep_running_ = false;
- wait_for_ever_semaphore_->Signal();
-
- // Wait for the thread to terminate.
- Join();
-
- if (data_ready_semaphore_ != NULL) {
- delete data_ready_semaphore_;
- data_ready_semaphore_ = NULL;
- }
-
- delete wait_for_ever_semaphore_;
- wait_for_ever_semaphore_ = NULL;
- }
-
- protected:
- // When the thread starts running it will allocate a fixed number of bytes
- // on the stack and publish the location of this memory for others to use.
- void Run() {
- EmbeddedVector<char, 15 * 1024> local_buffer;
-
- // Initialize the buffer with a known good value.
- OS::StrNCpy(local_buffer, "Trace data was not generated.\n",
- local_buffer.length());
-
- // Publish the local buffer and signal its availability.
- data_ = local_buffer.start();
- length_ = local_buffer.length();
- data_ready_semaphore_->Signal();
-
- while (keep_running_) {
- // This thread will wait here until the end of time.
- wait_for_ever_semaphore_->Wait();
- }
-
- // Make sure we access the buffer after the wait to remove all possibility
- // of it being optimized away.
- OS::StrNCpy(local_buffer, "PreallocatedMemoryThread shutting down.\n",
- local_buffer.length());
- }
-
-
- private:
- explicit PreallocatedMemoryThread(Isolate* isolate)
- : Thread(isolate, "v8:PreallocMem"),
- keep_running_(true),
- wait_for_ever_semaphore_(OS::CreateSemaphore(0)),
- data_ready_semaphore_(OS::CreateSemaphore(0)),
- data_(NULL),
- length_(0) {
- }
-
- // Used to make sure that the thread keeps looping even for spurious wakeups.
- bool keep_running_;
-
- // This semaphore is used by the PreallocatedMemoryThread to wait for ever.
- Semaphore* wait_for_ever_semaphore_;
- // Semaphore to signal that the data has been initialized.
- Semaphore* data_ready_semaphore_;
-
- // Location and size of the preallocated memory block.
- char* data_;
- unsigned length_;
-
- friend class Isolate;
-
- DISALLOW_COPY_AND_ASSIGN(PreallocatedMemoryThread);
-};
-
-
-void Isolate::PreallocatedMemoryThreadStart() {
- if (preallocated_memory_thread_ != NULL) return;
- preallocated_memory_thread_ = new PreallocatedMemoryThread(this);
- preallocated_memory_thread_->Start();
-}
-
-
-void Isolate::PreallocatedMemoryThreadStop() {
- if (preallocated_memory_thread_ == NULL) return;
- preallocated_memory_thread_->StopThread();
- // Done with the thread entirely.
- delete preallocated_memory_thread_;
- preallocated_memory_thread_ = NULL;
-}
-
-
-void Isolate::PreallocatedStorageInit(size_t size) {
- ASSERT(free_list_.next_ == &free_list_);
- ASSERT(free_list_.previous_ == &free_list_);
- PreallocatedStorage* free_chunk =
- reinterpret_cast<PreallocatedStorage*>(new char[size]);
- free_list_.next_ = free_list_.previous_ = free_chunk;
- free_chunk->next_ = free_chunk->previous_ = &free_list_;
- free_chunk->size_ = size - sizeof(PreallocatedStorage);
- preallocated_storage_preallocated_ = true;
-}
-
-
-void* Isolate::PreallocatedStorageNew(size_t size) {
- if (!preallocated_storage_preallocated_) {
- return FreeStoreAllocationPolicy::New(size);
- }
- ASSERT(free_list_.next_ != &free_list_);
- ASSERT(free_list_.previous_ != &free_list_);
-
- size = (size + kPointerSize - 1) & ~(kPointerSize - 1);
- // Search for exact fit.
- for (PreallocatedStorage* storage = free_list_.next_;
- storage != &free_list_;
- storage = storage->next_) {
- if (storage->size_ == size) {
- storage->Unlink();
- storage->LinkTo(&in_use_list_);
- return reinterpret_cast<void*>(storage + 1);
- }
- }
- // Search for first fit.
- for (PreallocatedStorage* storage = free_list_.next_;
- storage != &free_list_;
- storage = storage->next_) {
- if (storage->size_ >= size + sizeof(PreallocatedStorage)) {
- storage->Unlink();
- storage->LinkTo(&in_use_list_);
- PreallocatedStorage* left_over =
- reinterpret_cast<PreallocatedStorage*>(
- reinterpret_cast<char*>(storage + 1) + size);
- left_over->size_ = storage->size_ - size - sizeof(PreallocatedStorage);
- ASSERT(size + left_over->size_ + sizeof(PreallocatedStorage) ==
- storage->size_);
- storage->size_ = size;
- left_over->LinkTo(&free_list_);
- return reinterpret_cast<void*>(storage + 1);
- }
- }
- // Allocation failure.
- ASSERT(false);
- return NULL;
-}
-
-
-// We don't attempt to coalesce.
-void Isolate::PreallocatedStorageDelete(void* p) {
- if (p == NULL) {
- return;
- }
- if (!preallocated_storage_preallocated_) {
- FreeStoreAllocationPolicy::Delete(p);
- return;
- }
- PreallocatedStorage* storage = reinterpret_cast<PreallocatedStorage*>(p) - 1;
- ASSERT(storage->next_->previous_ == storage);
- ASSERT(storage->previous_->next_ == storage);
- storage->Unlink();
- storage->LinkTo(&free_list_);
-}
-
-
-Isolate* Isolate::default_isolate_ = NULL;
-Thread::LocalStorageKey Isolate::isolate_key_;
-Thread::LocalStorageKey Isolate::thread_id_key_;
-Thread::LocalStorageKey Isolate::per_isolate_thread_data_key_;
-Mutex* Isolate::process_wide_mutex_ = OS::CreateMutex();
-Isolate::ThreadDataTable* Isolate::thread_data_table_ = NULL;
-Isolate::ThreadId Isolate::highest_thread_id_ = 0;
-
-
-class IsolateInitializer {
- public:
- IsolateInitializer() {
- Isolate::EnsureDefaultIsolate();
- }
-};
-
-static IsolateInitializer* EnsureDefaultIsolateAllocated() {
- // TODO(isolates): Use the system threading API to do this once?
- static IsolateInitializer static_initializer;
- return &static_initializer;
-}
-
-// This variable only needed to trigger static intialization.
-static IsolateInitializer* static_initializer = EnsureDefaultIsolateAllocated();
-
-
-Isolate::ThreadId Isolate::AllocateThreadId() {
- ThreadId new_id;
- {
- ScopedLock lock(process_wide_mutex_);
- new_id = ++highest_thread_id_;
- }
- return new_id;
-}
-
-
-Isolate::PerIsolateThreadData* Isolate::AllocatePerIsolateThreadData(
- ThreadId thread_id) {
- ASSERT(thread_id != 0);
- ASSERT(Thread::GetThreadLocalInt(thread_id_key_) == thread_id);
- PerIsolateThreadData* per_thread = new PerIsolateThreadData(this, thread_id);
- {
- ScopedLock lock(process_wide_mutex_);
- ASSERT(thread_data_table_->Lookup(this, thread_id) == NULL);
- thread_data_table_->Insert(per_thread);
- ASSERT(thread_data_table_->Lookup(this, thread_id) == per_thread);
- }
- return per_thread;
-}
-
-
-Isolate::PerIsolateThreadData*
- Isolate::FindOrAllocatePerThreadDataForThisThread() {
- ThreadId thread_id = Thread::GetThreadLocalInt(thread_id_key_);
- if (thread_id == 0) {
- thread_id = AllocateThreadId();
- Thread::SetThreadLocalInt(thread_id_key_, thread_id);
- }
- PerIsolateThreadData* per_thread = NULL;
- {
- ScopedLock lock(process_wide_mutex_);
- per_thread = thread_data_table_->Lookup(this, thread_id);
- if (per_thread == NULL) {
- per_thread = AllocatePerIsolateThreadData(thread_id);
- }
- }
- return per_thread;
-}
-
-
-void Isolate::EnsureDefaultIsolate() {
- ScopedLock lock(process_wide_mutex_);
- if (default_isolate_ == NULL) {
- isolate_key_ = Thread::CreateThreadLocalKey();
- thread_id_key_ = Thread::CreateThreadLocalKey();
- per_isolate_thread_data_key_ = Thread::CreateThreadLocalKey();
- thread_data_table_ = new Isolate::ThreadDataTable();
- default_isolate_ = new Isolate();
- }
- // Can't use SetIsolateThreadLocals(default_isolate_, NULL) here
- // becase a non-null thread data may be already set.
- Thread::SetThreadLocal(isolate_key_, default_isolate_);
- CHECK(default_isolate_->PreInit());
-}
-
-
-Debugger* Isolate::GetDefaultIsolateDebugger() {
- EnsureDefaultIsolate();
- return default_isolate_->debugger();
-}
-
-
-StackGuard* Isolate::GetDefaultIsolateStackGuard() {
- EnsureDefaultIsolate();
- return default_isolate_->stack_guard();
-}
-
-
-void Isolate::EnterDefaultIsolate() {
- EnsureDefaultIsolate();
- ASSERT(default_isolate_ != NULL);
-
- PerIsolateThreadData* data = CurrentPerIsolateThreadData();
- // If not yet in default isolate - enter it.
- if (data == NULL || data->isolate() != default_isolate_) {
- default_isolate_->Enter();
- }
-}
-
-
-Isolate* Isolate::GetDefaultIsolateForLocking() {
- EnsureDefaultIsolate();
- return default_isolate_;
-}
-
-
-Isolate::ThreadDataTable::ThreadDataTable()
- : list_(NULL) {
-}
-
-
-Isolate::PerIsolateThreadData*
- Isolate::ThreadDataTable::Lookup(Isolate* isolate, ThreadId thread_id) {
- for (PerIsolateThreadData* data = list_; data != NULL; data = data->next_) {
- if (data->Matches(isolate, thread_id)) return data;
- }
- return NULL;
-}
-
-
-void Isolate::ThreadDataTable::Insert(Isolate::PerIsolateThreadData* data) {
- if (list_ != NULL) list_->prev_ = data;
- data->next_ = list_;
- list_ = data;
-}
-
-
-void Isolate::ThreadDataTable::Remove(PerIsolateThreadData* data) {
- if (list_ == data) list_ = data->next_;
- if (data->next_ != NULL) data->next_->prev_ = data->prev_;
- if (data->prev_ != NULL) data->prev_->next_ = data->next_;
-}
-
-
-void Isolate::ThreadDataTable::Remove(Isolate* isolate, ThreadId thread_id) {
- PerIsolateThreadData* data = Lookup(isolate, thread_id);
- if (data != NULL) {
- Remove(data);
- }
-}
-
-
-#ifdef DEBUG
-#define TRACE_ISOLATE(tag) \
- do { \
- if (FLAG_trace_isolates) { \
- PrintF("Isolate %p " #tag "\n", reinterpret_cast<void*>(this)); \
- } \
- } while (false)
-#else
-#define TRACE_ISOLATE(tag)
-#endif
-
-
-Isolate::Isolate()
- : state_(UNINITIALIZED),
- entry_stack_(NULL),
- stack_trace_nesting_level_(0),
- incomplete_message_(NULL),
- preallocated_memory_thread_(NULL),
- preallocated_message_space_(NULL),
- bootstrapper_(NULL),
- runtime_profiler_(NULL),
- compilation_cache_(NULL),
- counters_(new Counters()),
- code_range_(NULL),
- break_access_(OS::CreateMutex()),
- logger_(new Logger()),
- stats_table_(new StatsTable()),
- stub_cache_(NULL),
- deoptimizer_data_(NULL),
- capture_stack_trace_for_uncaught_exceptions_(false),
- stack_trace_for_uncaught_exceptions_frame_limit_(0),
- stack_trace_for_uncaught_exceptions_options_(StackTrace::kOverview),
- transcendental_cache_(NULL),
- memory_allocator_(NULL),
- keyed_lookup_cache_(NULL),
- context_slot_cache_(NULL),
- descriptor_lookup_cache_(NULL),
- handle_scope_implementer_(NULL),
- scanner_constants_(NULL),
- in_use_list_(0),
- free_list_(0),
- preallocated_storage_preallocated_(false),
- pc_to_code_cache_(NULL),
- write_input_buffer_(NULL),
- global_handles_(NULL),
- context_switcher_(NULL),
- thread_manager_(NULL),
- ast_sentinels_(NULL),
- string_tracker_(NULL),
- regexp_stack_(NULL),
- frame_element_constant_list_(0),
- result_constant_list_(0) {
- TRACE_ISOLATE(constructor);
-
- memset(isolate_addresses_, 0,
- sizeof(isolate_addresses_[0]) * (k_isolate_address_count + 1));
-
- heap_.isolate_ = this;
- zone_.isolate_ = this;
- stack_guard_.isolate_ = this;
-
-#if defined(V8_TARGET_ARCH_ARM) && !defined(__arm__) || \
- defined(V8_TARGET_ARCH_MIPS) && !defined(__mips__)
- simulator_initialized_ = false;
- simulator_i_cache_ = NULL;
- simulator_redirection_ = NULL;
-#endif
-
-#ifdef DEBUG
- // heap_histograms_ initializes itself.
- memset(&js_spill_information_, 0, sizeof(js_spill_information_));
- memset(code_kind_statistics_, 0,
- sizeof(code_kind_statistics_[0]) * Code::NUMBER_OF_KINDS);
-#endif
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- debug_ = NULL;
- debugger_ = NULL;
-#endif
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
- producer_heap_profile_ = NULL;
-#endif
-
- handle_scope_data_.Initialize();
-
-#define ISOLATE_INIT_EXECUTE(type, name, initial_value) \
- name##_ = (initial_value);
- ISOLATE_INIT_LIST(ISOLATE_INIT_EXECUTE)
-#undef ISOLATE_INIT_EXECUTE
-
-#define ISOLATE_INIT_ARRAY_EXECUTE(type, name, length) \
- memset(name##_, 0, sizeof(type) * length);
- ISOLATE_INIT_ARRAY_LIST(ISOLATE_INIT_ARRAY_EXECUTE)
-#undef ISOLATE_INIT_ARRAY_EXECUTE
-}
-
-void Isolate::TearDown() {
- TRACE_ISOLATE(tear_down);
-
- // Temporarily set this isolate as current so that various parts of
- // the isolate can access it in their destructors without having a
- // direct pointer. We don't use Enter/Exit here to avoid
- // initializing the thread data.
- PerIsolateThreadData* saved_data = CurrentPerIsolateThreadData();
- Isolate* saved_isolate = UncheckedCurrent();
- SetIsolateThreadLocals(this, NULL);
-
- Deinit();
-
- if (!IsDefaultIsolate()) {
- delete this;
- }
-
- // Restore the previous current isolate.
- SetIsolateThreadLocals(saved_isolate, saved_data);
-}
-
-
-void Isolate::Deinit() {
- if (state_ == INITIALIZED) {
- TRACE_ISOLATE(deinit);
-
- if (FLAG_hydrogen_stats) HStatistics::Instance()->Print();
-
- // We must stop the logger before we tear down other components.
- logger_->EnsureTickerStopped();
-
- delete deoptimizer_data_;
- deoptimizer_data_ = NULL;
- if (FLAG_preemption) {
- v8::Locker locker;
- v8::Locker::StopPreemption();
- }
- builtins_.TearDown();
- bootstrapper_->TearDown();
-
- // Remove the external reference to the preallocated stack memory.
- delete preallocated_message_space_;
- preallocated_message_space_ = NULL;
- PreallocatedMemoryThreadStop();
-
- HeapProfiler::TearDown();
- CpuProfiler::TearDown();
- if (runtime_profiler_ != NULL) {
- runtime_profiler_->TearDown();
- delete runtime_profiler_;
- runtime_profiler_ = NULL;
- }
- heap_.TearDown();
- logger_->TearDown();
-
- // The default isolate is re-initializable due to legacy API.
- state_ = PREINITIALIZED;
- }
-}
-
-
-void Isolate::SetIsolateThreadLocals(Isolate* isolate,
- PerIsolateThreadData* data) {
- Thread::SetThreadLocal(isolate_key_, isolate);
- Thread::SetThreadLocal(per_isolate_thread_data_key_, data);
-}
-
-
-Isolate::~Isolate() {
- TRACE_ISOLATE(destructor);
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
- delete producer_heap_profile_;
- producer_heap_profile_ = NULL;
-#endif
-
- delete scanner_constants_;
- scanner_constants_ = NULL;
-
- delete regexp_stack_;
- regexp_stack_ = NULL;
-
- delete ast_sentinels_;
- ast_sentinels_ = NULL;
-
- delete descriptor_lookup_cache_;
- descriptor_lookup_cache_ = NULL;
- delete context_slot_cache_;
- context_slot_cache_ = NULL;
- delete keyed_lookup_cache_;
- keyed_lookup_cache_ = NULL;
-
- delete transcendental_cache_;
- transcendental_cache_ = NULL;
- delete stub_cache_;
- stub_cache_ = NULL;
- delete stats_table_;
- stats_table_ = NULL;
-
- delete logger_;
- logger_ = NULL;
-
- delete counters_;
- counters_ = NULL;
-
- delete handle_scope_implementer_;
- handle_scope_implementer_ = NULL;
- delete break_access_;
- break_access_ = NULL;
-
- delete compilation_cache_;
- compilation_cache_ = NULL;
- delete bootstrapper_;
- bootstrapper_ = NULL;
- delete pc_to_code_cache_;
- pc_to_code_cache_ = NULL;
- delete write_input_buffer_;
- write_input_buffer_ = NULL;
-
- delete context_switcher_;
- context_switcher_ = NULL;
- delete thread_manager_;
- thread_manager_ = NULL;
-
- delete string_tracker_;
- string_tracker_ = NULL;
-
- delete memory_allocator_;
- memory_allocator_ = NULL;
- delete code_range_;
- code_range_ = NULL;
- delete global_handles_;
- global_handles_ = NULL;
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- delete debugger_;
- debugger_ = NULL;
- delete debug_;
- debug_ = NULL;
-#endif
-}
-
-
-bool Isolate::PreInit() {
- if (state_ != UNINITIALIZED) return true;
-
- TRACE_ISOLATE(preinit);
-
- ASSERT(Isolate::Current() == this);
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- debug_ = new Debug(this);
- debugger_ = new Debugger();
- debugger_->isolate_ = this;
-#endif
-
- memory_allocator_ = new MemoryAllocator();
- memory_allocator_->isolate_ = this;
- code_range_ = new CodeRange();
- code_range_->isolate_ = this;
-
- // Safe after setting Heap::isolate_, initializing StackGuard and
- // ensuring that Isolate::Current() == this.
- heap_.SetStackLimits();
-
-#ifdef DEBUG
- DisallowAllocationFailure disallow_allocation_failure;
-#endif
-
-#define C(name) isolate_addresses_[Isolate::k_##name] = \
- reinterpret_cast<Address>(name());
- ISOLATE_ADDRESS_LIST(C)
- ISOLATE_ADDRESS_LIST_PROF(C)
-#undef C
-
- string_tracker_ = new StringTracker();
- string_tracker_->isolate_ = this;
- thread_manager_ = new ThreadManager();
- thread_manager_->isolate_ = this;
- compilation_cache_ = new CompilationCache(this);
- transcendental_cache_ = new TranscendentalCache();
- keyed_lookup_cache_ = new KeyedLookupCache();
- context_slot_cache_ = new ContextSlotCache();
- descriptor_lookup_cache_ = new DescriptorLookupCache();
- scanner_constants_ = new ScannerConstants();
- pc_to_code_cache_ = new PcToCodeCache(this);
- write_input_buffer_ = new StringInputBuffer();
- global_handles_ = new GlobalHandles(this);
- bootstrapper_ = new Bootstrapper();
- handle_scope_implementer_ = new HandleScopeImplementer();
- stub_cache_ = new StubCache(this);
- ast_sentinels_ = new AstSentinels();
- regexp_stack_ = new RegExpStack();
- regexp_stack_->isolate_ = this;
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
- producer_heap_profile_ = new ProducerHeapProfile();
- producer_heap_profile_->isolate_ = this;
-#endif
-
- state_ = PREINITIALIZED;
- return true;
-}
-
-
-void Isolate::InitializeThreadLocal() {
- thread_local_top_.Initialize();
- clear_pending_exception();
- clear_pending_message();
- clear_scheduled_exception();
-}
-
-
-bool Isolate::Init(Deserializer* des) {
- ASSERT(state_ != INITIALIZED);
-
- TRACE_ISOLATE(init);
-
- bool create_heap_objects = des == NULL;
-
-#ifdef DEBUG
- // The initialization process does not handle memory exhaustion.
- DisallowAllocationFailure disallow_allocation_failure;
-#endif
-
- if (state_ == UNINITIALIZED && !PreInit()) return false;
-
- // Enable logging before setting up the heap
- logger_->Setup();
-
- CpuProfiler::Setup();
- HeapProfiler::Setup();
-
- // Initialize other runtime facilities
-#if defined(USE_SIMULATOR)
-#if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS)
- Simulator::Initialize();
-#endif
-#endif
-
- { // NOLINT
- // Ensure that the thread has a valid stack guard. The v8::Locker object
- // will ensure this too, but we don't have to use lockers if we are only
- // using one thread.
- ExecutionAccess lock(this);
- stack_guard_.InitThread(lock);
- }
-
- // Setup the object heap
- ASSERT(!heap_.HasBeenSetup());
- if (!heap_.Setup(create_heap_objects)) {
- V8::SetFatalError();
- return false;
- }
-
- bootstrapper_->Initialize(create_heap_objects);
- builtins_.Setup(create_heap_objects);
-
- InitializeThreadLocal();
-
- // Only preallocate on the first initialization.
- if (FLAG_preallocate_message_memory && preallocated_message_space_ == NULL) {
- // Start the thread which will set aside some memory.
- PreallocatedMemoryThreadStart();
- preallocated_message_space_ =
- new NoAllocationStringAllocator(
- preallocated_memory_thread_->data(),
- preallocated_memory_thread_->length());
- PreallocatedStorageInit(preallocated_memory_thread_->length() / 4);
- }
-
- if (FLAG_preemption) {
- v8::Locker locker;
- v8::Locker::StartPreemption(100);
- }
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- debug_->Setup(create_heap_objects);
-#endif
- stub_cache_->Initialize(create_heap_objects);
-
- // If we are deserializing, read the state into the now-empty heap.
- if (des != NULL) {
- des->Deserialize();
- stub_cache_->Clear();
- }
-
- // Deserializing may put strange things in the root array's copy of the
- // stack guard.
- heap_.SetStackLimits();
-
- deoptimizer_data_ = new DeoptimizerData;
- runtime_profiler_ = new RuntimeProfiler(this);
- runtime_profiler_->Setup();
-
- // If we are deserializing, log non-function code objects and compiled
- // functions found in the snapshot.
- if (des != NULL && FLAG_log_code) {
- HandleScope scope;
- LOG(this, LogCodeObjects());
- LOG(this, LogCompiledFunctions());
- }
-
- state_ = INITIALIZED;
- return true;
-}
-
-
-void Isolate::Enter() {
- Isolate* current_isolate = NULL;
- PerIsolateThreadData* current_data = CurrentPerIsolateThreadData();
- if (current_data != NULL) {
- current_isolate = current_data->isolate_;
- ASSERT(current_isolate != NULL);
- if (current_isolate == this) {
- ASSERT(Current() == this);
- ASSERT(entry_stack_ != NULL);
- ASSERT(entry_stack_->previous_thread_data == NULL ||
- entry_stack_->previous_thread_data->thread_id() ==
- Thread::GetThreadLocalInt(thread_id_key_));
- // Same thread re-enters the isolate, no need to re-init anything.
- entry_stack_->entry_count++;
- return;
- }
- }
-
- // Threads can have default isolate set into TLS as Current but not yet have
- // PerIsolateThreadData for it, as it requires more advanced phase of the
- // initialization. For example, a thread might be the one that system used for
- // static initializers - in this case the default isolate is set in TLS but
- // the thread did not yet Enter the isolate. If PerisolateThreadData is not
- // there, use the isolate set in TLS.
- if (current_isolate == NULL) {
- current_isolate = Isolate::UncheckedCurrent();
- }
-
- PerIsolateThreadData* data = FindOrAllocatePerThreadDataForThisThread();
- ASSERT(data != NULL);
- ASSERT(data->isolate_ == this);
-
- EntryStackItem* item = new EntryStackItem(current_data,
- current_isolate,
- entry_stack_);
- entry_stack_ = item;
-
- SetIsolateThreadLocals(this, data);
-
- CHECK(PreInit());
-
- // In case it's the first time some thread enters the isolate.
- set_thread_id(data->thread_id());
-}
-
-
-void Isolate::Exit() {
- ASSERT(entry_stack_ != NULL);
- ASSERT(entry_stack_->previous_thread_data == NULL ||
- entry_stack_->previous_thread_data->thread_id() ==
- Thread::GetThreadLocalInt(thread_id_key_));
-
- if (--entry_stack_->entry_count > 0) return;
-
- ASSERT(CurrentPerIsolateThreadData() != NULL);
- ASSERT(CurrentPerIsolateThreadData()->isolate_ == this);
-
- // Pop the stack.
- EntryStackItem* item = entry_stack_;
- entry_stack_ = item->previous_item;
-
- PerIsolateThreadData* previous_thread_data = item->previous_thread_data;
- Isolate* previous_isolate = item->previous_isolate;
-
- delete item;
-
- // Reinit the current thread for the isolate it was running before this one.
- SetIsolateThreadLocals(previous_isolate, previous_thread_data);
-}
-
-
-void Isolate::ResetEagerOptimizingData() {
- compilation_cache_->ResetEagerOptimizingData();
-}
-
-
-#ifdef DEBUG
-#define ISOLATE_FIELD_OFFSET(type, name, ignored) \
-const intptr_t Isolate::name##_debug_offset_ = OFFSET_OF(Isolate, name##_);
-ISOLATE_INIT_LIST(ISOLATE_FIELD_OFFSET)
-ISOLATE_INIT_ARRAY_LIST(ISOLATE_FIELD_OFFSET)
-#undef ISOLATE_FIELD_OFFSET
-#endif
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/isolate.h b/src/3rdparty/v8/src/isolate.h
deleted file mode 100644
index 638658b..0000000
--- a/src/3rdparty/v8/src/isolate.h
+++ /dev/null
@@ -1,1306 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_ISOLATE_H_
-#define V8_ISOLATE_H_
-
-#include "../include/v8-debug.h"
-#include "allocation.h"
-#include "apiutils.h"
-#include "atomicops.h"
-#include "builtins.h"
-#include "contexts.h"
-#include "execution.h"
-#include "frames.h"
-#include "global-handles.h"
-#include "handles.h"
-#include "heap.h"
-#include "regexp-stack.h"
-#include "runtime-profiler.h"
-#include "runtime.h"
-#include "zone.h"
-
-namespace v8 {
-namespace internal {
-
-class AstSentinels;
-class Bootstrapper;
-class CodeGenerator;
-class CodeRange;
-class CompilationCache;
-class ContextSlotCache;
-class ContextSwitcher;
-class Counters;
-class CpuFeatures;
-class CpuProfiler;
-class DeoptimizerData;
-class Deserializer;
-class EmptyStatement;
-class ExternalReferenceTable;
-class Factory;
-class FunctionInfoListener;
-class HandleScopeImplementer;
-class HeapProfiler;
-class InlineRuntimeFunctionsTable;
-class NoAllocationStringAllocator;
-class PcToCodeCache;
-class PreallocatedMemoryThread;
-class ProducerHeapProfile;
-class RegExpStack;
-class SaveContext;
-class ScannerConstants;
-class StringInputBuffer;
-class StringTracker;
-class StubCache;
-class ThreadManager;
-class ThreadState;
-class ThreadVisitor; // Defined in v8threads.h
-class VMState;
-
-// 'void function pointer', used to roundtrip the
-// ExternalReference::ExternalReferenceRedirector since we can not include
-// assembler.h, where it is defined, here.
-typedef void* ExternalReferenceRedirectorPointer();
-
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-class Debug;
-class Debugger;
-class DebuggerAgent;
-#endif
-
-#if !defined(__arm__) && defined(V8_TARGET_ARCH_ARM) || \
- !defined(__mips__) && defined(V8_TARGET_ARCH_MIPS)
-class Redirection;
-class Simulator;
-#endif
-
-
-// Static indirection table for handles to constants. If a frame
-// element represents a constant, the data contains an index into
-// this table of handles to the actual constants.
-// Static indirection table for handles to constants. If a Result
-// represents a constant, the data contains an index into this table
-// of handles to the actual constants.
-typedef ZoneList<Handle<Object> > ZoneObjectList;
-
-#define RETURN_IF_SCHEDULED_EXCEPTION(isolate) \
- if (isolate->has_scheduled_exception()) \
- return isolate->PromoteScheduledException()
-
-#define RETURN_IF_EMPTY_HANDLE_VALUE(isolate, call, value) \
- if (call.is_null()) { \
- ASSERT(isolate->has_pending_exception()); \
- return value; \
- }
-
-#define RETURN_IF_EMPTY_HANDLE(isolate, call) \
- RETURN_IF_EMPTY_HANDLE_VALUE(isolate, call, Failure::Exception())
-
-#define ISOLATE_ADDRESS_LIST(C) \
- C(handler_address) \
- C(c_entry_fp_address) \
- C(context_address) \
- C(pending_exception_address) \
- C(external_caught_exception_address)
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
-#define ISOLATE_ADDRESS_LIST_PROF(C) \
- C(js_entry_sp_address)
-#else
-#define ISOLATE_ADDRESS_LIST_PROF(C)
-#endif
-
-
-class ThreadLocalTop BASE_EMBEDDED {
- public:
- // Initialize the thread data.
- void Initialize();
-
- // Get the top C++ try catch handler or NULL if none are registered.
- //
- // This method is not guarenteed to return an address that can be
- // used for comparison with addresses into the JS stack. If such an
- // address is needed, use try_catch_handler_address.
- v8::TryCatch* TryCatchHandler();
-
- // Get the address of the top C++ try catch handler or NULL if
- // none are registered.
- //
- // This method always returns an address that can be compared to
- // pointers into the JavaScript stack. When running on actual
- // hardware, try_catch_handler_address and TryCatchHandler return
- // the same pointer. When running on a simulator with a separate JS
- // stack, try_catch_handler_address returns a JS stack address that
- // corresponds to the place on the JS stack where the C++ handler
- // would have been if the stack were not separate.
- inline Address try_catch_handler_address() {
- return try_catch_handler_address_;
- }
-
- // Set the address of the top C++ try catch handler.
- inline void set_try_catch_handler_address(Address address) {
- try_catch_handler_address_ = address;
- }
-
- void Free() {
- ASSERT(!has_pending_message_);
- ASSERT(!external_caught_exception_);
- ASSERT(try_catch_handler_address_ == NULL);
- }
-
- // The context where the current execution method is created and for variable
- // lookups.
- Context* context_;
- int thread_id_;
- MaybeObject* pending_exception_;
- bool has_pending_message_;
- const char* pending_message_;
- Object* pending_message_obj_;
- Script* pending_message_script_;
- int pending_message_start_pos_;
- int pending_message_end_pos_;
- // Use a separate value for scheduled exceptions to preserve the
- // invariants that hold about pending_exception. We may want to
- // unify them later.
- MaybeObject* scheduled_exception_;
- bool external_caught_exception_;
- SaveContext* save_context_;
- v8::TryCatch* catcher_;
-
- // Stack.
- Address c_entry_fp_; // the frame pointer of the top c entry frame
- Address handler_; // try-blocks are chained through the stack
-
-#ifdef USE_SIMULATOR
-#if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS)
- Simulator* simulator_;
-#endif
-#endif // USE_SIMULATOR
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
- Address js_entry_sp_; // the stack pointer of the bottom js entry frame
- Address external_callback_; // the external callback we're currently in
-#endif
-
-#ifdef ENABLE_VMSTATE_TRACKING
- StateTag current_vm_state_;
-#endif
-
- // Generated code scratch locations.
- int32_t formal_count_;
-
- // Call back function to report unsafe JS accesses.
- v8::FailedAccessCheckCallback failed_access_check_callback_;
-
- private:
- Address try_catch_handler_address_;
-};
-
-#if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS)
-
-#define ISOLATE_PLATFORM_INIT_LIST(V) \
- /* VirtualFrame::SpilledScope state */ \
- V(bool, is_virtual_frame_in_spilled_scope, false) \
- /* CodeGenerator::EmitNamedStore state */ \
- V(int, inlined_write_barrier_size, -1)
-
-#if !defined(__arm__) && !defined(__mips__)
-class HashMap;
-#endif
-
-#else
-
-#define ISOLATE_PLATFORM_INIT_LIST(V)
-
-#endif
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-
-#define ISOLATE_DEBUGGER_INIT_LIST(V) \
- V(uint64_t, enabled_cpu_features, 0) \
- V(v8::Debug::EventCallback, debug_event_callback, NULL) \
- V(DebuggerAgent*, debugger_agent_instance, NULL)
-#else
-
-#define ISOLATE_DEBUGGER_INIT_LIST(V)
-
-#endif
-
-#ifdef DEBUG
-
-#define ISOLATE_INIT_DEBUG_ARRAY_LIST(V) \
- V(CommentStatistic, paged_space_comments_statistics, \
- CommentStatistic::kMaxComments + 1)
-#else
-
-#define ISOLATE_INIT_DEBUG_ARRAY_LIST(V)
-
-#endif
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
-
-#define ISOLATE_LOGGING_INIT_LIST(V) \
- V(CpuProfiler*, cpu_profiler, NULL) \
- V(HeapProfiler*, heap_profiler, NULL)
-
-#else
-
-#define ISOLATE_LOGGING_INIT_LIST(V)
-
-#endif
-
-#define ISOLATE_INIT_ARRAY_LIST(V) \
- /* SerializerDeserializer state. */ \
- V(Object*, serialize_partial_snapshot_cache, kPartialSnapshotCacheCapacity) \
- V(int, jsregexp_static_offsets_vector, kJSRegexpStaticOffsetsVectorSize) \
- V(int, bad_char_shift_table, kUC16AlphabetSize) \
- V(int, good_suffix_shift_table, (kBMMaxShift + 1)) \
- V(int, suffix_table, (kBMMaxShift + 1)) \
- ISOLATE_INIT_DEBUG_ARRAY_LIST(V)
-
-typedef List<HeapObject*, PreallocatedStorage> DebugObjectCache;
-
-#define ISOLATE_INIT_LIST(V) \
- /* AssertNoZoneAllocation state. */ \
- V(bool, zone_allow_allocation, true) \
- /* SerializerDeserializer state. */ \
- V(int, serialize_partial_snapshot_cache_length, 0) \
- /* Assembler state. */ \
- /* A previously allocated buffer of kMinimalBufferSize bytes, or NULL. */ \
- V(byte*, assembler_spare_buffer, NULL) \
- V(FatalErrorCallback, exception_behavior, NULL) \
- V(v8::Debug::MessageHandler, message_handler, NULL) \
- /* To distinguish the function templates, so that we can find them in the */ \
- /* function cache of the global context. */ \
- V(int, next_serial_number, 0) \
- V(ExternalReferenceRedirectorPointer*, external_reference_redirector, NULL) \
- V(bool, always_allow_natives_syntax, false) \
- /* Part of the state of liveedit. */ \
- V(FunctionInfoListener*, active_function_info_listener, NULL) \
- /* State for Relocatable. */ \
- V(Relocatable*, relocatable_top, NULL) \
- /* State for CodeEntry in profile-generator. */ \
- V(CodeGenerator*, current_code_generator, NULL) \
- V(bool, jump_target_compiling_deferred_code, false) \
- V(DebugObjectCache*, string_stream_debug_object_cache, NULL) \
- V(Object*, string_stream_current_security_token, NULL) \
- /* TODO(isolates): Release this on destruction? */ \
- V(int*, irregexp_interpreter_backtrack_stack_cache, NULL) \
- /* Serializer state. */ \
- V(ExternalReferenceTable*, external_reference_table, NULL) \
- /* AstNode state. */ \
- V(unsigned, ast_node_id, 0) \
- V(unsigned, ast_node_count, 0) \
- /* SafeStackFrameIterator activations count. */ \
- V(int, safe_stack_iterator_counter, 0) \
- ISOLATE_PLATFORM_INIT_LIST(V) \
- ISOLATE_LOGGING_INIT_LIST(V) \
- ISOLATE_DEBUGGER_INIT_LIST(V)
-
-class Isolate {
- // These forward declarations are required to make the friend declarations in
- // PerIsolateThreadData work on some older versions of gcc.
- class ThreadDataTable;
- class EntryStackItem;
- public:
- ~Isolate();
-
- typedef int ThreadId;
-
- // A thread has a PerIsolateThreadData instance for each isolate that it has
- // entered. That instance is allocated when the isolate is initially entered
- // and reused on subsequent entries.
- class PerIsolateThreadData {
- public:
- PerIsolateThreadData(Isolate* isolate, ThreadId thread_id)
- : isolate_(isolate),
- thread_id_(thread_id),
- stack_limit_(0),
- thread_state_(NULL),
-#if !defined(__arm__) && defined(V8_TARGET_ARCH_ARM) || \
- !defined(__mips__) && defined(V8_TARGET_ARCH_MIPS)
- simulator_(NULL),
-#endif
- next_(NULL),
- prev_(NULL) { }
- Isolate* isolate() const { return isolate_; }
- ThreadId thread_id() const { return thread_id_; }
- void set_stack_limit(uintptr_t value) { stack_limit_ = value; }
- uintptr_t stack_limit() const { return stack_limit_; }
- ThreadState* thread_state() const { return thread_state_; }
- void set_thread_state(ThreadState* value) { thread_state_ = value; }
-
-#if !defined(__arm__) && defined(V8_TARGET_ARCH_ARM) || \
- !defined(__mips__) && defined(V8_TARGET_ARCH_MIPS)
- Simulator* simulator() const { return simulator_; }
- void set_simulator(Simulator* simulator) {
- simulator_ = simulator;
- }
-#endif
-
- bool Matches(Isolate* isolate, ThreadId thread_id) const {
- return isolate_ == isolate && thread_id_ == thread_id;
- }
-
- private:
- Isolate* isolate_;
- ThreadId thread_id_;
- uintptr_t stack_limit_;
- ThreadState* thread_state_;
-
-#if !defined(__arm__) && defined(V8_TARGET_ARCH_ARM) || \
- !defined(__mips__) && defined(V8_TARGET_ARCH_MIPS)
- Simulator* simulator_;
-#endif
-
- PerIsolateThreadData* next_;
- PerIsolateThreadData* prev_;
-
- friend class Isolate;
- friend class ThreadDataTable;
- friend class EntryStackItem;
-
- DISALLOW_COPY_AND_ASSIGN(PerIsolateThreadData);
- };
-
-
- enum AddressId {
-#define C(name) k_##name,
- ISOLATE_ADDRESS_LIST(C)
- ISOLATE_ADDRESS_LIST_PROF(C)
-#undef C
- k_isolate_address_count
- };
-
- // Returns the PerIsolateThreadData for the current thread (or NULL if one is
- // not currently set).
- static PerIsolateThreadData* CurrentPerIsolateThreadData() {
- return reinterpret_cast<PerIsolateThreadData*>(
- Thread::GetThreadLocal(per_isolate_thread_data_key_));
- }
-
- // Returns the isolate inside which the current thread is running.
- INLINE(static Isolate* Current()) {
- Isolate* isolate = reinterpret_cast<Isolate*>(
- Thread::GetExistingThreadLocal(isolate_key_));
- ASSERT(isolate != NULL);
- return isolate;
- }
-
- INLINE(static Isolate* UncheckedCurrent()) {
- return reinterpret_cast<Isolate*>(Thread::GetThreadLocal(isolate_key_));
- }
-
- bool Init(Deserializer* des);
-
- bool IsInitialized() { return state_ == INITIALIZED; }
-
- // True if at least one thread Enter'ed this isolate.
- bool IsInUse() { return entry_stack_ != NULL; }
-
- // Destroys the non-default isolates.
- // Sets default isolate into "has_been_disposed" state rather then destroying,
- // for legacy API reasons.
- void TearDown();
-
- bool IsDefaultIsolate() const { return this == default_isolate_; }
-
- // Ensures that process-wide resources and the default isolate have been
- // allocated. It is only necessary to call this method in rare casses, for
- // example if you are using V8 from within the body of a static initializer.
- // Safe to call multiple times.
- static void EnsureDefaultIsolate();
-
- // Get the debugger from the default isolate. Preinitializes the
- // default isolate if needed.
- static Debugger* GetDefaultIsolateDebugger();
-
- // Get the stack guard from the default isolate. Preinitializes the
- // default isolate if needed.
- static StackGuard* GetDefaultIsolateStackGuard();
-
- // Returns the key used to store the pointer to the current isolate.
- // Used internally for V8 threads that do not execute JavaScript but still
- // are part of the domain of an isolate (like the context switcher).
- static Thread::LocalStorageKey isolate_key() {
- return isolate_key_;
- }
-
- // Returns the key used to store process-wide thread IDs.
- static Thread::LocalStorageKey thread_id_key() {
- return thread_id_key_;
- }
-
- // Atomically allocates a new thread ID.
- static ThreadId AllocateThreadId();
-
- // If a client attempts to create a Locker without specifying an isolate,
- // we assume that the client is using legacy behavior. Set up the current
- // thread to be inside the implicit isolate (or fail a check if we have
- // switched to non-legacy behavior).
- static void EnterDefaultIsolate();
-
- // Debug.
- // Mutex for serializing access to break control structures.
- Mutex* break_access() { return break_access_; }
-
- Address get_address_from_id(AddressId id);
-
- // Access to top context (where the current function object was created).
- Context* context() { return thread_local_top_.context_; }
- void set_context(Context* context) {
- thread_local_top_.context_ = context;
- }
- Context** context_address() { return &thread_local_top_.context_; }
-
- SaveContext* save_context() {return thread_local_top_.save_context_; }
- void set_save_context(SaveContext* save) {
- thread_local_top_.save_context_ = save;
- }
-
- // Access to current thread id.
- int thread_id() { return thread_local_top_.thread_id_; }
- void set_thread_id(int id) { thread_local_top_.thread_id_ = id; }
-
- // Interface to pending exception.
- MaybeObject* pending_exception() {
- ASSERT(has_pending_exception());
- return thread_local_top_.pending_exception_;
- }
- bool external_caught_exception() {
- return thread_local_top_.external_caught_exception_;
- }
- void set_pending_exception(MaybeObject* exception) {
- thread_local_top_.pending_exception_ = exception;
- }
- void clear_pending_exception() {
- thread_local_top_.pending_exception_ = heap_.the_hole_value();
- }
- MaybeObject** pending_exception_address() {
- return &thread_local_top_.pending_exception_;
- }
- bool has_pending_exception() {
- return !thread_local_top_.pending_exception_->IsTheHole();
- }
- void clear_pending_message() {
- thread_local_top_.has_pending_message_ = false;
- thread_local_top_.pending_message_ = NULL;
- thread_local_top_.pending_message_obj_ = heap_.the_hole_value();
- thread_local_top_.pending_message_script_ = NULL;
- }
- v8::TryCatch* try_catch_handler() {
- return thread_local_top_.TryCatchHandler();
- }
- Address try_catch_handler_address() {
- return thread_local_top_.try_catch_handler_address();
- }
- bool* external_caught_exception_address() {
- return &thread_local_top_.external_caught_exception_;
- }
-
- MaybeObject** scheduled_exception_address() {
- return &thread_local_top_.scheduled_exception_;
- }
- MaybeObject* scheduled_exception() {
- ASSERT(has_scheduled_exception());
- return thread_local_top_.scheduled_exception_;
- }
- bool has_scheduled_exception() {
- return !thread_local_top_.scheduled_exception_->IsTheHole();
- }
- void clear_scheduled_exception() {
- thread_local_top_.scheduled_exception_ = heap_.the_hole_value();
- }
-
- bool IsExternallyCaught();
-
- bool is_catchable_by_javascript(MaybeObject* exception) {
- return (exception != Failure::OutOfMemoryException()) &&
- (exception != heap()->termination_exception());
- }
-
- // JS execution stack (see frames.h).
- static Address c_entry_fp(ThreadLocalTop* thread) {
- return thread->c_entry_fp_;
- }
- static Address handler(ThreadLocalTop* thread) { return thread->handler_; }
-
- inline Address* c_entry_fp_address() {
- return &thread_local_top_.c_entry_fp_;
- }
- inline Address* handler_address() { return &thread_local_top_.handler_; }
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
- // Bottom JS entry (see StackTracer::Trace in log.cc).
- static Address js_entry_sp(ThreadLocalTop* thread) {
- return thread->js_entry_sp_;
- }
- inline Address* js_entry_sp_address() {
- return &thread_local_top_.js_entry_sp_;
- }
-#endif
-
- // Generated code scratch locations.
- void* formal_count_address() { return &thread_local_top_.formal_count_; }
-
- // Returns the global object of the current context. It could be
- // a builtin object, or a js global object.
- Handle<GlobalObject> global() {
- return Handle<GlobalObject>(context()->global());
- }
-
- // Returns the global proxy object of the current context.
- Object* global_proxy() {
- return context()->global_proxy();
- }
-
- Handle<JSBuiltinsObject> js_builtins_object() {
- return Handle<JSBuiltinsObject>(thread_local_top_.context_->builtins());
- }
-
- static int ArchiveSpacePerThread() { return sizeof(ThreadLocalTop); }
- void FreeThreadResources() { thread_local_top_.Free(); }
-
- // This method is called by the api after operations that may throw
- // exceptions. If an exception was thrown and not handled by an external
- // handler the exception is scheduled to be rethrown when we return to running
- // JavaScript code. If an exception is scheduled true is returned.
- bool OptionalRescheduleException(bool is_bottom_call);
-
- void SetCaptureStackTraceForUncaughtExceptions(
- bool capture,
- int frame_limit,
- StackTrace::StackTraceOptions options);
-
- // Tells whether the current context has experienced an out of memory
- // exception.
- bool is_out_of_memory();
-
- void PrintCurrentStackTrace(FILE* out);
- void PrintStackTrace(FILE* out, char* thread_data);
- void PrintStack(StringStream* accumulator);
- void PrintStack();
- Handle<String> StackTraceString();
- Handle<JSArray> CaptureCurrentStackTrace(
- int frame_limit,
- StackTrace::StackTraceOptions options);
-
- // Returns if the top context may access the given global object. If
- // the result is false, the pending exception is guaranteed to be
- // set.
- bool MayNamedAccess(JSObject* receiver,
- Object* key,
- v8::AccessType type);
- bool MayIndexedAccess(JSObject* receiver,
- uint32_t index,
- v8::AccessType type);
-
- void SetFailedAccessCheckCallback(v8::FailedAccessCheckCallback callback);
- void ReportFailedAccessCheck(JSObject* receiver, v8::AccessType type);
-
- // Exception throwing support. The caller should use the result
- // of Throw() as its return value.
- Failure* Throw(Object* exception, MessageLocation* location = NULL);
- // Re-throw an exception. This involves no error reporting since
- // error reporting was handled when the exception was thrown
- // originally.
- Failure* ReThrow(MaybeObject* exception, MessageLocation* location = NULL);
- void ScheduleThrow(Object* exception);
- void ReportPendingMessages();
- Failure* ThrowIllegalOperation();
-
- // Promote a scheduled exception to pending. Asserts has_scheduled_exception.
- Failure* PromoteScheduledException();
- void DoThrow(MaybeObject* exception,
- MessageLocation* location,
- const char* message);
- // Checks if exception should be reported and finds out if it's
- // caught externally.
- bool ShouldReportException(bool* can_be_caught_externally,
- bool catchable_by_javascript);
-
- // Attempts to compute the current source location, storing the
- // result in the target out parameter.
- void ComputeLocation(MessageLocation* target);
-
- // Override command line flag.
- void TraceException(bool flag);
-
- // Out of resource exception helpers.
- Failure* StackOverflow();
- Failure* TerminateExecution();
-
- // Administration
- void Iterate(ObjectVisitor* v);
- void Iterate(ObjectVisitor* v, ThreadLocalTop* t);
- char* Iterate(ObjectVisitor* v, char* t);
- void IterateThread(ThreadVisitor* v);
- void IterateThread(ThreadVisitor* v, char* t);
-
-
- // Returns the current global context.
- Handle<Context> global_context();
-
- // Returns the global context of the calling JavaScript code. That
- // is, the global context of the top-most JavaScript frame.
- Handle<Context> GetCallingGlobalContext();
-
- void RegisterTryCatchHandler(v8::TryCatch* that);
- void UnregisterTryCatchHandler(v8::TryCatch* that);
-
- char* ArchiveThread(char* to);
- char* RestoreThread(char* from);
-
- static const char* const kStackOverflowMessage;
-
- static const int kUC16AlphabetSize = 256; // See StringSearchBase.
- static const int kBMMaxShift = 250; // See StringSearchBase.
-
- // Accessors.
-#define GLOBAL_ACCESSOR(type, name, initialvalue) \
- inline type name() const { \
- ASSERT(OFFSET_OF(Isolate, name##_) == name##_debug_offset_); \
- return name##_; \
- } \
- inline void set_##name(type value) { \
- ASSERT(OFFSET_OF(Isolate, name##_) == name##_debug_offset_); \
- name##_ = value; \
- }
- ISOLATE_INIT_LIST(GLOBAL_ACCESSOR)
-#undef GLOBAL_ACCESSOR
-
-#define GLOBAL_ARRAY_ACCESSOR(type, name, length) \
- inline type* name() { \
- ASSERT(OFFSET_OF(Isolate, name##_) == name##_debug_offset_); \
- return &(name##_)[0]; \
- }
- ISOLATE_INIT_ARRAY_LIST(GLOBAL_ARRAY_ACCESSOR)
-#undef GLOBAL_ARRAY_ACCESSOR
-
-#define GLOBAL_CONTEXT_FIELD_ACCESSOR(index, type, name) \
- Handle<type> name() { \
- return Handle<type>(context()->global_context()->name()); \
- }
- GLOBAL_CONTEXT_FIELDS(GLOBAL_CONTEXT_FIELD_ACCESSOR)
-#undef GLOBAL_CONTEXT_FIELD_ACCESSOR
-
- Bootstrapper* bootstrapper() { return bootstrapper_; }
- Counters* counters() { return counters_; }
- CodeRange* code_range() { return code_range_; }
- RuntimeProfiler* runtime_profiler() { return runtime_profiler_; }
- CompilationCache* compilation_cache() { return compilation_cache_; }
- Logger* logger() { return logger_; }
- StackGuard* stack_guard() { return &stack_guard_; }
- Heap* heap() { return &heap_; }
- StatsTable* stats_table() { return stats_table_; }
- StubCache* stub_cache() { return stub_cache_; }
- DeoptimizerData* deoptimizer_data() { return deoptimizer_data_; }
- ThreadLocalTop* thread_local_top() { return &thread_local_top_; }
-
- TranscendentalCache* transcendental_cache() const {
- return transcendental_cache_;
- }
-
- MemoryAllocator* memory_allocator() {
- return memory_allocator_;
- }
-
- KeyedLookupCache* keyed_lookup_cache() {
- return keyed_lookup_cache_;
- }
-
- ContextSlotCache* context_slot_cache() {
- return context_slot_cache_;
- }
-
- DescriptorLookupCache* descriptor_lookup_cache() {
- return descriptor_lookup_cache_;
- }
-
- v8::ImplementationUtilities::HandleScopeData* handle_scope_data() {
- return &handle_scope_data_;
- }
- HandleScopeImplementer* handle_scope_implementer() {
- ASSERT(handle_scope_implementer_);
- return handle_scope_implementer_;
- }
- Zone* zone() { return &zone_; }
-
- ScannerConstants* scanner_constants() {
- return scanner_constants_;
- }
-
- PcToCodeCache* pc_to_code_cache() { return pc_to_code_cache_; }
-
- StringInputBuffer* write_input_buffer() { return write_input_buffer_; }
-
- GlobalHandles* global_handles() { return global_handles_; }
-
- ThreadManager* thread_manager() { return thread_manager_; }
-
- ContextSwitcher* context_switcher() { return context_switcher_; }
-
- void set_context_switcher(ContextSwitcher* switcher) {
- context_switcher_ = switcher;
- }
-
- StringTracker* string_tracker() { return string_tracker_; }
-
- unibrow::Mapping<unibrow::Ecma262UnCanonicalize>* jsregexp_uncanonicalize() {
- return &jsregexp_uncanonicalize_;
- }
-
- unibrow::Mapping<unibrow::CanonicalizationRange>* jsregexp_canonrange() {
- return &jsregexp_canonrange_;
- }
-
- StringInputBuffer* objects_string_compare_buffer_a() {
- return &objects_string_compare_buffer_a_;
- }
-
- StringInputBuffer* objects_string_compare_buffer_b() {
- return &objects_string_compare_buffer_b_;
- }
-
- StaticResource<StringInputBuffer>* objects_string_input_buffer() {
- return &objects_string_input_buffer_;
- }
-
- AstSentinels* ast_sentinels() { return ast_sentinels_; }
-
- RuntimeState* runtime_state() { return &runtime_state_; }
-
- StringInputBuffer* liveedit_compare_substrings_buf1() {
- return &liveedit_compare_substrings_buf1_;
- }
-
- StringInputBuffer* liveedit_compare_substrings_buf2() {
- return &liveedit_compare_substrings_buf2_;
- }
-
- StaticResource<SafeStringInputBuffer>* compiler_safe_string_input_buffer() {
- return &compiler_safe_string_input_buffer_;
- }
-
- Builtins* builtins() { return &builtins_; }
-
- unibrow::Mapping<unibrow::Ecma262Canonicalize>*
- regexp_macro_assembler_canonicalize() {
- return &regexp_macro_assembler_canonicalize_;
- }
-
- RegExpStack* regexp_stack() { return regexp_stack_; }
-
- unibrow::Mapping<unibrow::Ecma262Canonicalize>*
- interp_canonicalize_mapping() {
- return &interp_canonicalize_mapping_;
- }
-
- ZoneObjectList* frame_element_constant_list() {
- return &frame_element_constant_list_;
- }
-
- ZoneObjectList* result_constant_list() {
- return &result_constant_list_;
- }
-
- void* PreallocatedStorageNew(size_t size);
- void PreallocatedStorageDelete(void* p);
- void PreallocatedStorageInit(size_t size);
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- Debugger* debugger() { return debugger_; }
- Debug* debug() { return debug_; }
-#endif
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
- ProducerHeapProfile* producer_heap_profile() {
- return producer_heap_profile_;
- }
-#endif
-
-#ifdef DEBUG
- HistogramInfo* heap_histograms() { return heap_histograms_; }
-
- JSObject::SpillInformation* js_spill_information() {
- return &js_spill_information_;
- }
-
- int* code_kind_statistics() { return code_kind_statistics_; }
-#endif
-
-#if defined(V8_TARGET_ARCH_ARM) && !defined(__arm__) || \
- defined(V8_TARGET_ARCH_MIPS) && !defined(__mips__)
- bool simulator_initialized() { return simulator_initialized_; }
- void set_simulator_initialized(bool initialized) {
- simulator_initialized_ = initialized;
- }
-
- HashMap* simulator_i_cache() { return simulator_i_cache_; }
- void set_simulator_i_cache(HashMap* hash_map) {
- simulator_i_cache_ = hash_map;
- }
-
- Redirection* simulator_redirection() {
- return simulator_redirection_;
- }
- void set_simulator_redirection(Redirection* redirection) {
- simulator_redirection_ = redirection;
- }
-#endif
-
- Factory* factory() { return reinterpret_cast<Factory*>(this); }
-
- // SerializerDeserializer state.
- static const int kPartialSnapshotCacheCapacity = 1400;
-
- static const int kJSRegexpStaticOffsetsVectorSize = 50;
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
- Address external_callback() {
- return thread_local_top_.external_callback_;
- }
- void set_external_callback(Address callback) {
- thread_local_top_.external_callback_ = callback;
- }
-#endif
-
-#ifdef ENABLE_VMSTATE_TRACKING
- StateTag current_vm_state() {
- return thread_local_top_.current_vm_state_;
- }
-
- void SetCurrentVMState(StateTag state) {
- if (RuntimeProfiler::IsEnabled()) {
- if (state == JS) {
- // JS or non-JS -> JS transition.
- RuntimeProfiler::IsolateEnteredJS(this);
- } else if (thread_local_top_.current_vm_state_ == JS) {
- // JS -> non-JS transition.
- ASSERT(RuntimeProfiler::IsSomeIsolateInJS());
- RuntimeProfiler::IsolateExitedJS(this);
- }
- }
- thread_local_top_.current_vm_state_ = state;
- }
-#endif
-
- void ResetEagerOptimizingData();
-
- private:
- Isolate();
-
- // The per-process lock should be acquired before the ThreadDataTable is
- // modified.
- class ThreadDataTable {
- public:
- ThreadDataTable();
- ~ThreadDataTable();
-
- PerIsolateThreadData* Lookup(Isolate* isolate, ThreadId thread_id);
- void Insert(PerIsolateThreadData* data);
- void Remove(Isolate* isolate, ThreadId thread_id);
- void Remove(PerIsolateThreadData* data);
-
- private:
- PerIsolateThreadData* list_;
- };
-
- // These items form a stack synchronously with threads Enter'ing and Exit'ing
- // the Isolate. The top of the stack points to a thread which is currently
- // running the Isolate. When the stack is empty, the Isolate is considered
- // not entered by any thread and can be Disposed.
- // If the same thread enters the Isolate more then once, the entry_count_
- // is incremented rather then a new item pushed to the stack.
- class EntryStackItem {
- public:
- EntryStackItem(PerIsolateThreadData* previous_thread_data,
- Isolate* previous_isolate,
- EntryStackItem* previous_item)
- : entry_count(1),
- previous_thread_data(previous_thread_data),
- previous_isolate(previous_isolate),
- previous_item(previous_item) { }
-
- int entry_count;
- PerIsolateThreadData* previous_thread_data;
- Isolate* previous_isolate;
- EntryStackItem* previous_item;
-
- DISALLOW_COPY_AND_ASSIGN(EntryStackItem);
- };
-
- // This mutex protects highest_thread_id_, thread_data_table_ and
- // default_isolate_.
- static Mutex* process_wide_mutex_;
-
- static Thread::LocalStorageKey per_isolate_thread_data_key_;
- static Thread::LocalStorageKey isolate_key_;
- static Thread::LocalStorageKey thread_id_key_;
- static Isolate* default_isolate_;
- static ThreadDataTable* thread_data_table_;
- static ThreadId highest_thread_id_;
-
- bool PreInit();
-
- void Deinit();
-
- static void SetIsolateThreadLocals(Isolate* isolate,
- PerIsolateThreadData* data);
-
- enum State {
- UNINITIALIZED, // Some components may not have been allocated.
- PREINITIALIZED, // Components have been allocated but not initialized.
- INITIALIZED // All components are fully initialized.
- };
-
- State state_;
- EntryStackItem* entry_stack_;
-
- // Allocate and insert PerIsolateThreadData into the ThreadDataTable
- // (regardless of whether such data already exists).
- PerIsolateThreadData* AllocatePerIsolateThreadData(ThreadId thread_id);
-
- // Find the PerThread for this particular (isolate, thread) combination.
- // If one does not yet exist, allocate a new one.
- PerIsolateThreadData* FindOrAllocatePerThreadDataForThisThread();
-
- // PreInits and returns a default isolate. Needed when a new thread tries
- // to create a Locker for the first time (the lock itself is in the isolate).
- static Isolate* GetDefaultIsolateForLocking();
-
- // Initializes the current thread to run this Isolate.
- // Not thread-safe. Multiple threads should not Enter/Exit the same isolate
- // at the same time, this should be prevented using external locking.
- void Enter();
-
- // Exits the current thread. The previosuly entered Isolate is restored
- // for the thread.
- // Not thread-safe. Multiple threads should not Enter/Exit the same isolate
- // at the same time, this should be prevented using external locking.
- void Exit();
-
- void PreallocatedMemoryThreadStart();
- void PreallocatedMemoryThreadStop();
- void InitializeThreadLocal();
-
- void PrintStackTrace(FILE* out, ThreadLocalTop* thread);
- void MarkCompactPrologue(bool is_compacting,
- ThreadLocalTop* archived_thread_data);
- void MarkCompactEpilogue(bool is_compacting,
- ThreadLocalTop* archived_thread_data);
-
- void FillCache();
-
- int stack_trace_nesting_level_;
- StringStream* incomplete_message_;
- // The preallocated memory thread singleton.
- PreallocatedMemoryThread* preallocated_memory_thread_;
- Address isolate_addresses_[k_isolate_address_count + 1]; // NOLINT
- NoAllocationStringAllocator* preallocated_message_space_;
-
- Bootstrapper* bootstrapper_;
- RuntimeProfiler* runtime_profiler_;
- CompilationCache* compilation_cache_;
- Counters* counters_;
- CodeRange* code_range_;
- Mutex* break_access_;
- Heap heap_;
- Logger* logger_;
- StackGuard stack_guard_;
- StatsTable* stats_table_;
- StubCache* stub_cache_;
- DeoptimizerData* deoptimizer_data_;
- ThreadLocalTop thread_local_top_;
- bool capture_stack_trace_for_uncaught_exceptions_;
- int stack_trace_for_uncaught_exceptions_frame_limit_;
- StackTrace::StackTraceOptions stack_trace_for_uncaught_exceptions_options_;
- TranscendentalCache* transcendental_cache_;
- MemoryAllocator* memory_allocator_;
- KeyedLookupCache* keyed_lookup_cache_;
- ContextSlotCache* context_slot_cache_;
- DescriptorLookupCache* descriptor_lookup_cache_;
- v8::ImplementationUtilities::HandleScopeData handle_scope_data_;
- HandleScopeImplementer* handle_scope_implementer_;
- ScannerConstants* scanner_constants_;
- Zone zone_;
- PreallocatedStorage in_use_list_;
- PreallocatedStorage free_list_;
- bool preallocated_storage_preallocated_;
- PcToCodeCache* pc_to_code_cache_;
- StringInputBuffer* write_input_buffer_;
- GlobalHandles* global_handles_;
- ContextSwitcher* context_switcher_;
- ThreadManager* thread_manager_;
- AstSentinels* ast_sentinels_;
- RuntimeState runtime_state_;
- StringInputBuffer liveedit_compare_substrings_buf1_;
- StringInputBuffer liveedit_compare_substrings_buf2_;
- StaticResource<SafeStringInputBuffer> compiler_safe_string_input_buffer_;
- Builtins builtins_;
- StringTracker* string_tracker_;
- unibrow::Mapping<unibrow::Ecma262UnCanonicalize> jsregexp_uncanonicalize_;
- unibrow::Mapping<unibrow::CanonicalizationRange> jsregexp_canonrange_;
- StringInputBuffer objects_string_compare_buffer_a_;
- StringInputBuffer objects_string_compare_buffer_b_;
- StaticResource<StringInputBuffer> objects_string_input_buffer_;
- unibrow::Mapping<unibrow::Ecma262Canonicalize>
- regexp_macro_assembler_canonicalize_;
- RegExpStack* regexp_stack_;
- unibrow::Mapping<unibrow::Ecma262Canonicalize> interp_canonicalize_mapping_;
- ZoneObjectList frame_element_constant_list_;
- ZoneObjectList result_constant_list_;
-
-#if defined(V8_TARGET_ARCH_ARM) && !defined(__arm__) || \
- defined(V8_TARGET_ARCH_MIPS) && !defined(__mips__)
- bool simulator_initialized_;
- HashMap* simulator_i_cache_;
- Redirection* simulator_redirection_;
-#endif
-
-#ifdef DEBUG
- // A static array of histogram info for each type.
- HistogramInfo heap_histograms_[LAST_TYPE + 1];
- JSObject::SpillInformation js_spill_information_;
- int code_kind_statistics_[Code::NUMBER_OF_KINDS];
-#endif
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- Debugger* debugger_;
- Debug* debug_;
-#endif
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
- ProducerHeapProfile* producer_heap_profile_;
-#endif
-
-#define GLOBAL_BACKING_STORE(type, name, initialvalue) \
- type name##_;
- ISOLATE_INIT_LIST(GLOBAL_BACKING_STORE)
-#undef GLOBAL_BACKING_STORE
-
-#define GLOBAL_ARRAY_BACKING_STORE(type, name, length) \
- type name##_[length];
- ISOLATE_INIT_ARRAY_LIST(GLOBAL_ARRAY_BACKING_STORE)
-#undef GLOBAL_ARRAY_BACKING_STORE
-
-#ifdef DEBUG
- // This class is huge and has a number of fields controlled by
- // preprocessor defines. Make sure the offsets of these fields agree
- // between compilation units.
-#define ISOLATE_FIELD_OFFSET(type, name, ignored) \
- static const intptr_t name##_debug_offset_;
- ISOLATE_INIT_LIST(ISOLATE_FIELD_OFFSET)
- ISOLATE_INIT_ARRAY_LIST(ISOLATE_FIELD_OFFSET)
-#undef ISOLATE_FIELD_OFFSET
-#endif
-
- friend class ExecutionAccess;
- friend class IsolateInitializer;
- friend class v8::Isolate;
- friend class v8::Locker;
-
- DISALLOW_COPY_AND_ASSIGN(Isolate);
-};
-
-
-// If the GCC version is 4.1.x or 4.2.x an additional field is added to the
-// class as a work around for a bug in the generated code found with these
-// versions of GCC. See V8 issue 122 for details.
-class SaveContext BASE_EMBEDDED {
- public:
- explicit SaveContext(Isolate* isolate) : prev_(isolate->save_context()) {
- if (isolate->context() != NULL) {
- context_ = Handle<Context>(isolate->context());
-#if __GNUC_VERSION__ >= 40100 && __GNUC_VERSION__ < 40300
- dummy_ = Handle<Context>(isolate->context());
-#endif
- }
- isolate->set_save_context(this);
-
- // If there is no JS frame under the current C frame, use the value 0.
- JavaScriptFrameIterator it(isolate);
- js_sp_ = it.done() ? 0 : it.frame()->sp();
- }
-
- ~SaveContext() {
- if (context_.is_null()) {
- Isolate* isolate = Isolate::Current();
- isolate->set_context(NULL);
- isolate->set_save_context(prev_);
- } else {
- Isolate* isolate = context_->GetIsolate();
- isolate->set_context(*context_);
- isolate->set_save_context(prev_);
- }
- }
-
- Handle<Context> context() { return context_; }
- SaveContext* prev() { return prev_; }
-
- // Returns true if this save context is below a given JavaScript frame.
- bool below(JavaScriptFrame* frame) {
- return (js_sp_ == 0) || (frame->sp() < js_sp_);
- }
-
- private:
- Handle<Context> context_;
-#if __GNUC_VERSION__ >= 40100 && __GNUC_VERSION__ < 40300
- Handle<Context> dummy_;
-#endif
- SaveContext* prev_;
- Address js_sp_; // The top JS frame's sp when saving context.
-};
-
-
-class AssertNoContextChange BASE_EMBEDDED {
-#ifdef DEBUG
- public:
- AssertNoContextChange() :
- scope_(Isolate::Current()),
- context_(Isolate::Current()->context(), Isolate::Current()) {
- }
-
- ~AssertNoContextChange() {
- ASSERT(Isolate::Current()->context() == *context_);
- }
-
- private:
- HandleScope scope_;
- Handle<Context> context_;
-#else
- public:
- AssertNoContextChange() { }
-#endif
-};
-
-
-class ExecutionAccess BASE_EMBEDDED {
- public:
- explicit ExecutionAccess(Isolate* isolate) : isolate_(isolate) {
- Lock(isolate);
- }
- ~ExecutionAccess() { Unlock(isolate_); }
-
- static void Lock(Isolate* isolate) { isolate->break_access_->Lock(); }
- static void Unlock(Isolate* isolate) { isolate->break_access_->Unlock(); }
-
- static bool TryLock(Isolate* isolate) {
- return isolate->break_access_->TryLock();
- }
-
- private:
- Isolate* isolate_;
-};
-
-
-// Support for checking for stack-overflows in C++ code.
-class StackLimitCheck BASE_EMBEDDED {
- public:
- explicit StackLimitCheck(Isolate* isolate) : isolate_(isolate) { }
-
- bool HasOverflowed() const {
- StackGuard* stack_guard = isolate_->stack_guard();
- // Stack has overflowed in C++ code only if stack pointer exceeds the C++
- // stack guard and the limits are not set to interrupt values.
- // TODO(214): Stack overflows are ignored if a interrupt is pending. This
- // code should probably always use the initial C++ limit.
- return (reinterpret_cast<uintptr_t>(this) < stack_guard->climit()) &&
- stack_guard->IsStackOverflow();
- }
- private:
- Isolate* isolate_;
-};
-
-
-// Support for temporarily postponing interrupts. When the outermost
-// postpone scope is left the interrupts will be re-enabled and any
-// interrupts that occurred while in the scope will be taken into
-// account.
-class PostponeInterruptsScope BASE_EMBEDDED {
- public:
- explicit PostponeInterruptsScope(Isolate* isolate)
- : stack_guard_(isolate->stack_guard()) {
- stack_guard_->thread_local_.postpone_interrupts_nesting_++;
- stack_guard_->DisableInterrupts();
- }
-
- ~PostponeInterruptsScope() {
- if (--stack_guard_->thread_local_.postpone_interrupts_nesting_ == 0) {
- stack_guard_->EnableInterrupts();
- }
- }
- private:
- StackGuard* stack_guard_;
-};
-
-
-// Temporary macros for accessing current isolate and its subobjects.
-// They provide better readability, especially when used a lot in the code.
-#define HEAP (v8::internal::Isolate::Current()->heap())
-#define FACTORY (v8::internal::Isolate::Current()->factory())
-#define ISOLATE (v8::internal::Isolate::Current())
-#define ZONE (v8::internal::Isolate::Current()->zone())
-#define LOGGER (v8::internal::Isolate::Current()->logger())
-
-
-// Tells whether the global context is marked with out of memory.
-inline bool Context::has_out_of_memory() {
- return global_context()->out_of_memory()->IsTrue();
-}
-
-
-// Mark the global context with out of memory.
-inline void Context::mark_out_of_memory() {
- global_context()->set_out_of_memory(HEAP->true_value());
-}
-
-
-// Temporary macro to be used to flag definitions that are indeed static
-// and not per-isolate. (It would be great to be able to grep for [static]!)
-#define RLYSTC static
-
-
-// Temporary macro to be used to flag classes that should be static.
-#define STATIC_CLASS class
-
-
-// Temporary macro to be used to flag classes that are completely converted
-// to be isolate-friendly. Their mix of static/nonstatic methods/fields is
-// correct.
-#define ISOLATED_CLASS class
-
-} } // namespace v8::internal
-
-// TODO(isolates): Get rid of these -inl.h includes and place them only where
-// they're needed.
-#include "allocation-inl.h"
-#include "zone-inl.h"
-#include "frames-inl.h"
-
-#endif // V8_ISOLATE_H_
diff --git a/src/3rdparty/v8/src/json.js b/src/3rdparty/v8/src/json.js
deleted file mode 100644
index 7a6189c..0000000
--- a/src/3rdparty/v8/src/json.js
+++ /dev/null
@@ -1,342 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-var $JSON = global.JSON;
-
-function Revive(holder, name, reviver) {
- var val = holder[name];
- if (IS_OBJECT(val)) {
- if (IS_ARRAY(val)) {
- var length = val.length;
- for (var i = 0; i < length; i++) {
- var newElement = Revive(val, $String(i), reviver);
- val[i] = newElement;
- }
- } else {
- for (var p in val) {
- if (%_CallFunction(val, p, ObjectHasOwnProperty)) {
- var newElement = Revive(val, p, reviver);
- if (IS_UNDEFINED(newElement)) {
- delete val[p];
- } else {
- val[p] = newElement;
- }
- }
- }
- }
- }
- return %_CallFunction(holder, name, val, reviver);
-}
-
-function JSONParse(text, reviver) {
- var unfiltered = %ParseJson(TO_STRING_INLINE(text));
- if (IS_FUNCTION(reviver)) {
- return Revive({'': unfiltered}, '', reviver);
- } else {
- return unfiltered;
- }
-}
-
-function SerializeArray(value, replacer, stack, indent, gap) {
- if (!%PushIfAbsent(stack, value)) {
- throw MakeTypeError('circular_structure', $Array());
- }
- var stepback = indent;
- indent += gap;
- var partial = new InternalArray();
- var len = value.length;
- for (var i = 0; i < len; i++) {
- var strP = JSONSerialize($String(i), value, replacer, stack,
- indent, gap);
- if (IS_UNDEFINED(strP)) {
- strP = "null";
- }
- partial.push(strP);
- }
- var final;
- if (gap == "") {
- final = "[" + partial.join(",") + "]";
- } else if (partial.length > 0) {
- var separator = ",\n" + indent;
- final = "[\n" + indent + partial.join(separator) + "\n" +
- stepback + "]";
- } else {
- final = "[]";
- }
- stack.pop();
- return final;
-}
-
-function SerializeObject(value, replacer, stack, indent, gap) {
- if (!%PushIfAbsent(stack, value)) {
- throw MakeTypeError('circular_structure', $Array());
- }
- var stepback = indent;
- indent += gap;
- var partial = new InternalArray();
- if (IS_ARRAY(replacer)) {
- var length = replacer.length;
- for (var i = 0; i < length; i++) {
- if (%_CallFunction(replacer, i, ObjectHasOwnProperty)) {
- var p = replacer[i];
- var strP = JSONSerialize(p, value, replacer, stack, indent, gap);
- if (!IS_UNDEFINED(strP)) {
- var member = %QuoteJSONString(p) + ":";
- if (gap != "") member += " ";
- member += strP;
- partial.push(member);
- }
- }
- }
- } else {
- for (var p in value) {
- if (%_CallFunction(value, p, ObjectHasOwnProperty)) {
- var strP = JSONSerialize(p, value, replacer, stack, indent, gap);
- if (!IS_UNDEFINED(strP)) {
- var member = %QuoteJSONString(p) + ":";
- if (gap != "") member += " ";
- member += strP;
- partial.push(member);
- }
- }
- }
- }
- var final;
- if (gap == "") {
- final = "{" + partial.join(",") + "}";
- } else if (partial.length > 0) {
- var separator = ",\n" + indent;
- final = "{\n" + indent + partial.join(separator) + "\n" +
- stepback + "}";
- } else {
- final = "{}";
- }
- stack.pop();
- return final;
-}
-
-function JSONSerialize(key, holder, replacer, stack, indent, gap) {
- var value = holder[key];
- if (IS_SPEC_OBJECT(value)) {
- var toJSON = value.toJSON;
- if (IS_FUNCTION(toJSON)) {
- value = %_CallFunction(value, key, toJSON);
- }
- }
- if (IS_FUNCTION(replacer)) {
- value = %_CallFunction(holder, key, value, replacer);
- }
- if (IS_STRING(value)) {
- return %QuoteJSONString(value);
- } else if (IS_NUMBER(value)) {
- return NUMBER_IS_FINITE(value) ? $String(value) : "null";
- } else if (IS_BOOLEAN(value)) {
- return value ? "true" : "false";
- } else if (IS_NULL(value)) {
- return "null";
- } else if (IS_SPEC_OBJECT(value) && !(typeof value == "function")) {
- // Non-callable object. If it's a primitive wrapper, it must be unwrapped.
- if (IS_ARRAY(value)) {
- return SerializeArray(value, replacer, stack, indent, gap);
- } else if (IS_NUMBER_WRAPPER(value)) {
- value = ToNumber(value);
- return NUMBER_IS_FINITE(value) ? ToString(value) : "null";
- } else if (IS_STRING_WRAPPER(value)) {
- return %QuoteJSONString(ToString(value));
- } else if (IS_BOOLEAN_WRAPPER(value)) {
- return %_ValueOf(value) ? "true" : "false";
- } else {
- return SerializeObject(value, replacer, stack, indent, gap);
- }
- }
- // Undefined or a callable object.
- return void 0;
-}
-
-
-function BasicSerializeArray(value, stack, builder) {
- var len = value.length;
- if (len == 0) {
- builder.push("[]");
- return;
- }
- if (!%PushIfAbsent(stack, value)) {
- throw MakeTypeError('circular_structure', $Array());
- }
- builder.push("[");
- var val = value[0];
- if (IS_STRING(val)) {
- // First entry is a string. Remaining entries are likely to be strings too.
- builder.push(%QuoteJSONString(val));
- for (var i = 1; i < len; i++) {
- val = value[i];
- if (IS_STRING(val)) {
- builder.push(%QuoteJSONStringComma(val));
- } else {
- builder.push(",");
- var before = builder.length;
- BasicJSONSerialize(i, value[i], stack, builder);
- if (before == builder.length) builder[before - 1] = ",null";
- }
- }
- } else if (IS_NUMBER(val)) {
- // First entry is a number. Remaining entries are likely to be numbers too.
- builder.push(NUMBER_IS_FINITE(val) ? %_NumberToString(val) : "null");
- for (var i = 1; i < len; i++) {
- builder.push(",");
- val = value[i];
- if (IS_NUMBER(val)) {
- builder.push(NUMBER_IS_FINITE(val)
- ? %_NumberToString(val)
- : "null");
- } else {
- var before = builder.length;
- BasicJSONSerialize(i, value[i], stack, builder);
- if (before == builder.length) builder[before - 1] = ",null";
- }
- }
- } else {
- var before = builder.length;
- BasicJSONSerialize(0, val, stack, builder);
- if (before == builder.length) builder.push("null");
- for (var i = 1; i < len; i++) {
- builder.push(",");
- before = builder.length;
- val = value[i];
- BasicJSONSerialize(i, val, stack, builder);
- if (before == builder.length) builder[before - 1] = ",null";
- }
- }
- stack.pop();
- builder.push("]");
-}
-
-
-function BasicSerializeObject(value, stack, builder) {
- if (!%PushIfAbsent(stack, value)) {
- throw MakeTypeError('circular_structure', $Array());
- }
- builder.push("{");
- var first = true;
- for (var p in value) {
- if (%HasLocalProperty(value, p)) {
- if (!first) {
- builder.push(%QuoteJSONStringComma(p));
- } else {
- builder.push(%QuoteJSONString(p));
- }
- builder.push(":");
- var before = builder.length;
- BasicJSONSerialize(p, value[p], stack, builder);
- if (before == builder.length) {
- builder.pop();
- builder.pop();
- } else {
- first = false;
- }
- }
- }
- stack.pop();
- builder.push("}");
-}
-
-
-function BasicJSONSerialize(key, value, stack, builder) {
- if (IS_SPEC_OBJECT(value)) {
- var toJSON = value.toJSON;
- if (IS_FUNCTION(toJSON)) {
- value = %_CallFunction(value, ToString(key), toJSON);
- }
- }
- if (IS_STRING(value)) {
- builder.push(%QuoteJSONString(value));
- } else if (IS_NUMBER(value)) {
- builder.push(NUMBER_IS_FINITE(value) ? %_NumberToString(value) : "null");
- } else if (IS_BOOLEAN(value)) {
- builder.push(value ? "true" : "false");
- } else if (IS_NULL(value)) {
- builder.push("null");
- } else if (IS_SPEC_OBJECT(value) && !(typeof value == "function")) {
- // Value is a non-callable object.
- // Unwrap value if necessary
- if (IS_NUMBER_WRAPPER(value)) {
- value = ToNumber(value);
- builder.push(NUMBER_IS_FINITE(value) ? %_NumberToString(value) : "null");
- } else if (IS_STRING_WRAPPER(value)) {
- builder.push(%QuoteJSONString(ToString(value)));
- } else if (IS_BOOLEAN_WRAPPER(value)) {
- builder.push(%_ValueOf(value) ? "true" : "false");
- } else if (IS_ARRAY(value)) {
- BasicSerializeArray(value, stack, builder);
- } else {
- BasicSerializeObject(value, stack, builder);
- }
- }
-}
-
-
-function JSONStringify(value, replacer, space) {
- if (%_ArgumentsLength() == 1) {
- var builder = new InternalArray();
- BasicJSONSerialize('', value, new InternalArray(), builder);
- if (builder.length == 0) return;
- var result = %_FastAsciiArrayJoin(builder, "");
- if (!IS_UNDEFINED(result)) return result;
- return %StringBuilderConcat(builder, builder.length, "");
- }
- if (IS_OBJECT(space)) {
- // Unwrap 'space' if it is wrapped
- if (IS_NUMBER_WRAPPER(space)) {
- space = ToNumber(space);
- } else if (IS_STRING_WRAPPER(space)) {
- space = ToString(space);
- }
- }
- var gap;
- if (IS_NUMBER(space)) {
- space = MathMax(0, MathMin(ToInteger(space), 10));
- gap = SubString(" ", 0, space);
- } else if (IS_STRING(space)) {
- if (space.length > 10) {
- gap = SubString(space, 0, 10);
- } else {
- gap = space;
- }
- } else {
- gap = "";
- }
- return JSONSerialize('', {'': value}, replacer, new InternalArray(), "", gap);
-}
-
-function SetupJSON() {
- InstallFunctions($JSON, DONT_ENUM, $Array(
- "parse", JSONParse,
- "stringify", JSONStringify
- ));
-}
-
-SetupJSON();
diff --git a/src/3rdparty/v8/src/jsregexp.cc b/src/3rdparty/v8/src/jsregexp.cc
deleted file mode 100644
index 06aae35..0000000
--- a/src/3rdparty/v8/src/jsregexp.cc
+++ /dev/null
@@ -1,5371 +0,0 @@
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "ast.h"
-#include "compiler.h"
-#include "execution.h"
-#include "factory.h"
-#include "jsregexp.h"
-#include "platform.h"
-#include "string-search.h"
-#include "runtime.h"
-#include "compilation-cache.h"
-#include "string-stream.h"
-#include "parser.h"
-#include "regexp-macro-assembler.h"
-#include "regexp-macro-assembler-tracer.h"
-#include "regexp-macro-assembler-irregexp.h"
-#include "regexp-stack.h"
-
-#ifndef V8_INTERPRETED_REGEXP
-#if V8_TARGET_ARCH_IA32
-#include "ia32/regexp-macro-assembler-ia32.h"
-#elif V8_TARGET_ARCH_X64
-#include "x64/regexp-macro-assembler-x64.h"
-#elif V8_TARGET_ARCH_ARM
-#include "arm/regexp-macro-assembler-arm.h"
-#elif V8_TARGET_ARCH_MIPS
-#include "mips/regexp-macro-assembler-mips.h"
-#else
-#error Unsupported target architecture.
-#endif
-#endif
-
-#include "interpreter-irregexp.h"
-
-
-namespace v8 {
-namespace internal {
-
-Handle<Object> RegExpImpl::CreateRegExpLiteral(Handle<JSFunction> constructor,
- Handle<String> pattern,
- Handle<String> flags,
- bool* has_pending_exception) {
- // Call the construct code with 2 arguments.
- Object** argv[2] = { Handle<Object>::cast(pattern).location(),
- Handle<Object>::cast(flags).location() };
- return Execution::New(constructor, 2, argv, has_pending_exception);
-}
-
-
-static JSRegExp::Flags RegExpFlagsFromString(Handle<String> str) {
- int flags = JSRegExp::NONE;
- for (int i = 0; i < str->length(); i++) {
- switch (str->Get(i)) {
- case 'i':
- flags |= JSRegExp::IGNORE_CASE;
- break;
- case 'g':
- flags |= JSRegExp::GLOBAL;
- break;
- case 'm':
- flags |= JSRegExp::MULTILINE;
- break;
- }
- }
- return JSRegExp::Flags(flags);
-}
-
-
-static inline void ThrowRegExpException(Handle<JSRegExp> re,
- Handle<String> pattern,
- Handle<String> error_text,
- const char* message) {
- Isolate* isolate = re->GetIsolate();
- Factory* factory = isolate->factory();
- Handle<FixedArray> elements = factory->NewFixedArray(2);
- elements->set(0, *pattern);
- elements->set(1, *error_text);
- Handle<JSArray> array = factory->NewJSArrayWithElements(elements);
- Handle<Object> regexp_err = factory->NewSyntaxError(message, array);
- isolate->Throw(*regexp_err);
-}
-
-
-// Generic RegExp methods. Dispatches to implementation specific methods.
-
-
-Handle<Object> RegExpImpl::Compile(Handle<JSRegExp> re,
- Handle<String> pattern,
- Handle<String> flag_str) {
- Isolate* isolate = re->GetIsolate();
- JSRegExp::Flags flags = RegExpFlagsFromString(flag_str);
- CompilationCache* compilation_cache = isolate->compilation_cache();
- Handle<FixedArray> cached = compilation_cache->LookupRegExp(pattern, flags);
- bool in_cache = !cached.is_null();
- LOG(isolate, RegExpCompileEvent(re, in_cache));
-
- Handle<Object> result;
- if (in_cache) {
- re->set_data(*cached);
- return re;
- }
- pattern = FlattenGetString(pattern);
- CompilationZoneScope zone_scope(DELETE_ON_EXIT);
- PostponeInterruptsScope postpone(isolate);
- RegExpCompileData parse_result;
- FlatStringReader reader(isolate, pattern);
- if (!RegExpParser::ParseRegExp(&reader, flags.is_multiline(),
- &parse_result)) {
- // Throw an exception if we fail to parse the pattern.
- ThrowRegExpException(re,
- pattern,
- parse_result.error,
- "malformed_regexp");
- return Handle<Object>::null();
- }
-
- if (parse_result.simple && !flags.is_ignore_case()) {
- // Parse-tree is a single atom that is equal to the pattern.
- AtomCompile(re, pattern, flags, pattern);
- } else if (parse_result.tree->IsAtom() &&
- !flags.is_ignore_case() &&
- parse_result.capture_count == 0) {
- RegExpAtom* atom = parse_result.tree->AsAtom();
- Vector<const uc16> atom_pattern = atom->data();
- Handle<String> atom_string =
- isolate->factory()->NewStringFromTwoByte(atom_pattern);
- AtomCompile(re, pattern, flags, atom_string);
- } else {
- IrregexpInitialize(re, pattern, flags, parse_result.capture_count);
- }
- ASSERT(re->data()->IsFixedArray());
- // Compilation succeeded so the data is set on the regexp
- // and we can store it in the cache.
- Handle<FixedArray> data(FixedArray::cast(re->data()));
- compilation_cache->PutRegExp(pattern, flags, data);
-
- return re;
-}
-
-
-Handle<Object> RegExpImpl::Exec(Handle<JSRegExp> regexp,
- Handle<String> subject,
- int index,
- Handle<JSArray> last_match_info) {
- switch (regexp->TypeTag()) {
- case JSRegExp::ATOM:
- return AtomExec(regexp, subject, index, last_match_info);
- case JSRegExp::IRREGEXP: {
- Handle<Object> result =
- IrregexpExec(regexp, subject, index, last_match_info);
- ASSERT(!result.is_null() || Isolate::Current()->has_pending_exception());
- return result;
- }
- default:
- UNREACHABLE();
- return Handle<Object>::null();
- }
-}
-
-
-// RegExp Atom implementation: Simple string search using indexOf.
-
-
-void RegExpImpl::AtomCompile(Handle<JSRegExp> re,
- Handle<String> pattern,
- JSRegExp::Flags flags,
- Handle<String> match_pattern) {
- re->GetIsolate()->factory()->SetRegExpAtomData(re,
- JSRegExp::ATOM,
- pattern,
- flags,
- match_pattern);
-}
-
-
-static void SetAtomLastCapture(FixedArray* array,
- String* subject,
- int from,
- int to) {
- NoHandleAllocation no_handles;
- RegExpImpl::SetLastCaptureCount(array, 2);
- RegExpImpl::SetLastSubject(array, subject);
- RegExpImpl::SetLastInput(array, subject);
- RegExpImpl::SetCapture(array, 0, from);
- RegExpImpl::SetCapture(array, 1, to);
-}
-
- /* template <typename SubjectChar>, typename PatternChar>
-static int ReStringMatch(Vector<const SubjectChar> sub_vector,
- Vector<const PatternChar> pat_vector,
- int start_index) {
-
- int pattern_length = pat_vector.length();
- if (pattern_length == 0) return start_index;
-
- int subject_length = sub_vector.length();
- if (start_index + pattern_length > subject_length) return -1;
- return SearchString(sub_vector, pat_vector, start_index);
-}
- */
-Handle<Object> RegExpImpl::AtomExec(Handle<JSRegExp> re,
- Handle<String> subject,
- int index,
- Handle<JSArray> last_match_info) {
- Isolate* isolate = re->GetIsolate();
-
- ASSERT(0 <= index);
- ASSERT(index <= subject->length());
-
- if (!subject->IsFlat()) FlattenString(subject);
- AssertNoAllocation no_heap_allocation; // ensure vectors stay valid
- // Extract flattened substrings of cons strings before determining asciiness.
- String* seq_sub = *subject;
- if (seq_sub->IsConsString()) seq_sub = ConsString::cast(seq_sub)->first();
-
- String* needle = String::cast(re->DataAt(JSRegExp::kAtomPatternIndex));
- int needle_len = needle->length();
-
- if (needle_len != 0) {
- if (index + needle_len > subject->length())
- return isolate->factory()->null_value();
-
- // dispatch on type of strings
- index = (needle->IsAsciiRepresentation()
- ? (seq_sub->IsAsciiRepresentation()
- ? SearchString(isolate,
- seq_sub->ToAsciiVector(),
- needle->ToAsciiVector(),
- index)
- : SearchString(isolate,
- seq_sub->ToUC16Vector(),
- needle->ToAsciiVector(),
- index))
- : (seq_sub->IsAsciiRepresentation()
- ? SearchString(isolate,
- seq_sub->ToAsciiVector(),
- needle->ToUC16Vector(),
- index)
- : SearchString(isolate,
- seq_sub->ToUC16Vector(),
- needle->ToUC16Vector(),
- index)));
- if (index == -1) return FACTORY->null_value();
- }
- ASSERT(last_match_info->HasFastElements());
-
- {
- NoHandleAllocation no_handles;
- FixedArray* array = FixedArray::cast(last_match_info->elements());
- SetAtomLastCapture(array, *subject, index, index + needle_len);
- }
- return last_match_info;
-}
-
-
-// Irregexp implementation.
-
-// Ensures that the regexp object contains a compiled version of the
-// source for either ASCII or non-ASCII strings.
-// If the compiled version doesn't already exist, it is compiled
-// from the source pattern.
-// If compilation fails, an exception is thrown and this function
-// returns false.
-bool RegExpImpl::EnsureCompiledIrregexp(Handle<JSRegExp> re, bool is_ascii) {
- Object* compiled_code = re->DataAt(JSRegExp::code_index(is_ascii));
-#ifdef V8_INTERPRETED_REGEXP
- if (compiled_code->IsByteArray()) return true;
-#else // V8_INTERPRETED_REGEXP (RegExp native code)
- if (compiled_code->IsCode()) return true;
-#endif
- return CompileIrregexp(re, is_ascii);
-}
-
-
-bool RegExpImpl::CompileIrregexp(Handle<JSRegExp> re, bool is_ascii) {
- // Compile the RegExp.
- Isolate* isolate = re->GetIsolate();
- CompilationZoneScope zone_scope(DELETE_ON_EXIT);
- PostponeInterruptsScope postpone(isolate);
- Object* entry = re->DataAt(JSRegExp::code_index(is_ascii));
- if (entry->IsJSObject()) {
- // If it's a JSObject, a previous compilation failed and threw this object.
- // Re-throw the object without trying again.
- isolate->Throw(entry);
- return false;
- }
- ASSERT(entry->IsTheHole());
-
- JSRegExp::Flags flags = re->GetFlags();
-
- Handle<String> pattern(re->Pattern());
- if (!pattern->IsFlat()) {
- FlattenString(pattern);
- }
-
- RegExpCompileData compile_data;
- FlatStringReader reader(isolate, pattern);
- if (!RegExpParser::ParseRegExp(&reader, flags.is_multiline(),
- &compile_data)) {
- // Throw an exception if we fail to parse the pattern.
- // THIS SHOULD NOT HAPPEN. We already pre-parsed it successfully once.
- ThrowRegExpException(re,
- pattern,
- compile_data.error,
- "malformed_regexp");
- return false;
- }
- RegExpEngine::CompilationResult result =
- RegExpEngine::Compile(&compile_data,
- flags.is_ignore_case(),
- flags.is_multiline(),
- pattern,
- is_ascii);
- if (result.error_message != NULL) {
- // Unable to compile regexp.
- Factory* factory = isolate->factory();
- Handle<FixedArray> elements = factory->NewFixedArray(2);
- elements->set(0, *pattern);
- Handle<String> error_message =
- factory->NewStringFromUtf8(CStrVector(result.error_message));
- elements->set(1, *error_message);
- Handle<JSArray> array = factory->NewJSArrayWithElements(elements);
- Handle<Object> regexp_err =
- factory->NewSyntaxError("malformed_regexp", array);
- isolate->Throw(*regexp_err);
- re->SetDataAt(JSRegExp::code_index(is_ascii), *regexp_err);
- return false;
- }
-
- Handle<FixedArray> data = Handle<FixedArray>(FixedArray::cast(re->data()));
- data->set(JSRegExp::code_index(is_ascii), result.code);
- int register_max = IrregexpMaxRegisterCount(*data);
- if (result.num_registers > register_max) {
- SetIrregexpMaxRegisterCount(*data, result.num_registers);
- }
-
- return true;
-}
-
-
-int RegExpImpl::IrregexpMaxRegisterCount(FixedArray* re) {
- return Smi::cast(
- re->get(JSRegExp::kIrregexpMaxRegisterCountIndex))->value();
-}
-
-
-void RegExpImpl::SetIrregexpMaxRegisterCount(FixedArray* re, int value) {
- re->set(JSRegExp::kIrregexpMaxRegisterCountIndex, Smi::FromInt(value));
-}
-
-
-int RegExpImpl::IrregexpNumberOfCaptures(FixedArray* re) {
- return Smi::cast(re->get(JSRegExp::kIrregexpCaptureCountIndex))->value();
-}
-
-
-int RegExpImpl::IrregexpNumberOfRegisters(FixedArray* re) {
- return Smi::cast(re->get(JSRegExp::kIrregexpMaxRegisterCountIndex))->value();
-}
-
-
-ByteArray* RegExpImpl::IrregexpByteCode(FixedArray* re, bool is_ascii) {
- return ByteArray::cast(re->get(JSRegExp::code_index(is_ascii)));
-}
-
-
-Code* RegExpImpl::IrregexpNativeCode(FixedArray* re, bool is_ascii) {
- return Code::cast(re->get(JSRegExp::code_index(is_ascii)));
-}
-
-
-void RegExpImpl::IrregexpInitialize(Handle<JSRegExp> re,
- Handle<String> pattern,
- JSRegExp::Flags flags,
- int capture_count) {
- // Initialize compiled code entries to null.
- re->GetIsolate()->factory()->SetRegExpIrregexpData(re,
- JSRegExp::IRREGEXP,
- pattern,
- flags,
- capture_count);
-}
-
-
-int RegExpImpl::IrregexpPrepare(Handle<JSRegExp> regexp,
- Handle<String> subject) {
- if (!subject->IsFlat()) {
- FlattenString(subject);
- }
- // Check the asciiness of the underlying storage.
- bool is_ascii;
- {
- AssertNoAllocation no_gc;
- String* sequential_string = *subject;
- if (subject->IsConsString()) {
- sequential_string = ConsString::cast(*subject)->first();
- }
- is_ascii = sequential_string->IsAsciiRepresentation();
- }
- if (!EnsureCompiledIrregexp(regexp, is_ascii)) {
- return -1;
- }
-#ifdef V8_INTERPRETED_REGEXP
- // Byte-code regexp needs space allocated for all its registers.
- return IrregexpNumberOfRegisters(FixedArray::cast(regexp->data()));
-#else // V8_INTERPRETED_REGEXP
- // Native regexp only needs room to output captures. Registers are handled
- // internally.
- return (IrregexpNumberOfCaptures(FixedArray::cast(regexp->data())) + 1) * 2;
-#endif // V8_INTERPRETED_REGEXP
-}
-
-
-RegExpImpl::IrregexpResult RegExpImpl::IrregexpExecOnce(
- Handle<JSRegExp> regexp,
- Handle<String> subject,
- int index,
- Vector<int> output) {
- Isolate* isolate = regexp->GetIsolate();
-
- Handle<FixedArray> irregexp(FixedArray::cast(regexp->data()), isolate);
-
- ASSERT(index >= 0);
- ASSERT(index <= subject->length());
- ASSERT(subject->IsFlat());
-
- // A flat ASCII string might have a two-byte first part.
- if (subject->IsConsString()) {
- subject = Handle<String>(ConsString::cast(*subject)->first(), isolate);
- }
-
-#ifndef V8_INTERPRETED_REGEXP
- ASSERT(output.length() >= (IrregexpNumberOfCaptures(*irregexp) + 1) * 2);
- do {
- bool is_ascii = subject->IsAsciiRepresentation();
- Handle<Code> code(IrregexpNativeCode(*irregexp, is_ascii), isolate);
- NativeRegExpMacroAssembler::Result res =
- NativeRegExpMacroAssembler::Match(code,
- subject,
- output.start(),
- output.length(),
- index,
- isolate);
- if (res != NativeRegExpMacroAssembler::RETRY) {
- ASSERT(res != NativeRegExpMacroAssembler::EXCEPTION ||
- isolate->has_pending_exception());
- STATIC_ASSERT(
- static_cast<int>(NativeRegExpMacroAssembler::SUCCESS) == RE_SUCCESS);
- STATIC_ASSERT(
- static_cast<int>(NativeRegExpMacroAssembler::FAILURE) == RE_FAILURE);
- STATIC_ASSERT(static_cast<int>(NativeRegExpMacroAssembler::EXCEPTION)
- == RE_EXCEPTION);
- return static_cast<IrregexpResult>(res);
- }
- // If result is RETRY, the string has changed representation, and we
- // must restart from scratch.
- // In this case, it means we must make sure we are prepared to handle
- // the, potentially, different subject (the string can switch between
- // being internal and external, and even between being ASCII and UC16,
- // but the characters are always the same).
- IrregexpPrepare(regexp, subject);
- } while (true);
- UNREACHABLE();
- return RE_EXCEPTION;
-#else // V8_INTERPRETED_REGEXP
-
- ASSERT(output.length() >= IrregexpNumberOfRegisters(*irregexp));
- bool is_ascii = subject->IsAsciiRepresentation();
- // We must have done EnsureCompiledIrregexp, so we can get the number of
- // registers.
- int* register_vector = output.start();
- int number_of_capture_registers =
- (IrregexpNumberOfCaptures(*irregexp) + 1) * 2;
- for (int i = number_of_capture_registers - 1; i >= 0; i--) {
- register_vector[i] = -1;
- }
- Handle<ByteArray> byte_codes(IrregexpByteCode(*irregexp, is_ascii), isolate);
-
- if (IrregexpInterpreter::Match(isolate,
- byte_codes,
- subject,
- register_vector,
- index)) {
- return RE_SUCCESS;
- }
- return RE_FAILURE;
-#endif // V8_INTERPRETED_REGEXP
-}
-
-
-Handle<Object> RegExpImpl::IrregexpExec(Handle<JSRegExp> jsregexp,
- Handle<String> subject,
- int previous_index,
- Handle<JSArray> last_match_info) {
- ASSERT_EQ(jsregexp->TypeTag(), JSRegExp::IRREGEXP);
-
- // Prepare space for the return values.
-#ifdef V8_INTERPRETED_REGEXP
-#ifdef DEBUG
- if (FLAG_trace_regexp_bytecodes) {
- String* pattern = jsregexp->Pattern();
- PrintF("\n\nRegexp match: /%s/\n\n", *(pattern->ToCString()));
- PrintF("\n\nSubject string: '%s'\n\n", *(subject->ToCString()));
- }
-#endif
-#endif
- int required_registers = RegExpImpl::IrregexpPrepare(jsregexp, subject);
- if (required_registers < 0) {
- // Compiling failed with an exception.
- ASSERT(Isolate::Current()->has_pending_exception());
- return Handle<Object>::null();
- }
-
- OffsetsVector registers(required_registers);
-
- IrregexpResult res = RegExpImpl::IrregexpExecOnce(
- jsregexp, subject, previous_index, Vector<int>(registers.vector(),
- registers.length()));
- if (res == RE_SUCCESS) {
- int capture_register_count =
- (IrregexpNumberOfCaptures(FixedArray::cast(jsregexp->data())) + 1) * 2;
- last_match_info->EnsureSize(capture_register_count + kLastMatchOverhead);
- AssertNoAllocation no_gc;
- int* register_vector = registers.vector();
- FixedArray* array = FixedArray::cast(last_match_info->elements());
- for (int i = 0; i < capture_register_count; i += 2) {
- SetCapture(array, i, register_vector[i]);
- SetCapture(array, i + 1, register_vector[i + 1]);
- }
- SetLastCaptureCount(array, capture_register_count);
- SetLastSubject(array, *subject);
- SetLastInput(array, *subject);
- return last_match_info;
- }
- if (res == RE_EXCEPTION) {
- ASSERT(Isolate::Current()->has_pending_exception());
- return Handle<Object>::null();
- }
- ASSERT(res == RE_FAILURE);
- return Isolate::Current()->factory()->null_value();
-}
-
-
-// -------------------------------------------------------------------
-// Implementation of the Irregexp regular expression engine.
-//
-// The Irregexp regular expression engine is intended to be a complete
-// implementation of ECMAScript regular expressions. It generates either
-// bytecodes or native code.
-
-// The Irregexp regexp engine is structured in three steps.
-// 1) The parser generates an abstract syntax tree. See ast.cc.
-// 2) From the AST a node network is created. The nodes are all
-// subclasses of RegExpNode. The nodes represent states when
-// executing a regular expression. Several optimizations are
-// performed on the node network.
-// 3) From the nodes we generate either byte codes or native code
-// that can actually execute the regular expression (perform
-// the search). The code generation step is described in more
-// detail below.
-
-// Code generation.
-//
-// The nodes are divided into four main categories.
-// * Choice nodes
-// These represent places where the regular expression can
-// match in more than one way. For example on entry to an
-// alternation (foo|bar) or a repetition (*, +, ? or {}).
-// * Action nodes
-// These represent places where some action should be
-// performed. Examples include recording the current position
-// in the input string to a register (in order to implement
-// captures) or other actions on register for example in order
-// to implement the counters needed for {} repetitions.
-// * Matching nodes
-// These attempt to match some element part of the input string.
-// Examples of elements include character classes, plain strings
-// or back references.
-// * End nodes
-// These are used to implement the actions required on finding
-// a successful match or failing to find a match.
-//
-// The code generated (whether as byte codes or native code) maintains
-// some state as it runs. This consists of the following elements:
-//
-// * The capture registers. Used for string captures.
-// * Other registers. Used for counters etc.
-// * The current position.
-// * The stack of backtracking information. Used when a matching node
-// fails to find a match and needs to try an alternative.
-//
-// Conceptual regular expression execution model:
-//
-// There is a simple conceptual model of regular expression execution
-// which will be presented first. The actual code generated is a more
-// efficient simulation of the simple conceptual model:
-//
-// * Choice nodes are implemented as follows:
-// For each choice except the last {
-// push current position
-// push backtrack code location
-// <generate code to test for choice>
-// backtrack code location:
-// pop current position
-// }
-// <generate code to test for last choice>
-//
-// * Actions nodes are generated as follows
-// <push affected registers on backtrack stack>
-// <generate code to perform action>
-// push backtrack code location
-// <generate code to test for following nodes>
-// backtrack code location:
-// <pop affected registers to restore their state>
-// <pop backtrack location from stack and go to it>
-//
-// * Matching nodes are generated as follows:
-// if input string matches at current position
-// update current position
-// <generate code to test for following nodes>
-// else
-// <pop backtrack location from stack and go to it>
-//
-// Thus it can be seen that the current position is saved and restored
-// by the choice nodes, whereas the registers are saved and restored by
-// by the action nodes that manipulate them.
-//
-// The other interesting aspect of this model is that nodes are generated
-// at the point where they are needed by a recursive call to Emit(). If
-// the node has already been code generated then the Emit() call will
-// generate a jump to the previously generated code instead. In order to
-// limit recursion it is possible for the Emit() function to put the node
-// on a work list for later generation and instead generate a jump. The
-// destination of the jump is resolved later when the code is generated.
-//
-// Actual regular expression code generation.
-//
-// Code generation is actually more complicated than the above. In order
-// to improve the efficiency of the generated code some optimizations are
-// performed
-//
-// * Choice nodes have 1-character lookahead.
-// A choice node looks at the following character and eliminates some of
-// the choices immediately based on that character. This is not yet
-// implemented.
-// * Simple greedy loops store reduced backtracking information.
-// A quantifier like /.*foo/m will greedily match the whole input. It will
-// then need to backtrack to a point where it can match "foo". The naive
-// implementation of this would push each character position onto the
-// backtracking stack, then pop them off one by one. This would use space
-// proportional to the length of the input string. However since the "."
-// can only match in one way and always has a constant length (in this case
-// of 1) it suffices to store the current position on the top of the stack
-// once. Matching now becomes merely incrementing the current position and
-// backtracking becomes decrementing the current position and checking the
-// result against the stored current position. This is faster and saves
-// space.
-// * The current state is virtualized.
-// This is used to defer expensive operations until it is clear that they
-// are needed and to generate code for a node more than once, allowing
-// specialized an efficient versions of the code to be created. This is
-// explained in the section below.
-//
-// Execution state virtualization.
-//
-// Instead of emitting code, nodes that manipulate the state can record their
-// manipulation in an object called the Trace. The Trace object can record a
-// current position offset, an optional backtrack code location on the top of
-// the virtualized backtrack stack and some register changes. When a node is
-// to be emitted it can flush the Trace or update it. Flushing the Trace
-// will emit code to bring the actual state into line with the virtual state.
-// Avoiding flushing the state can postpone some work (eg updates of capture
-// registers). Postponing work can save time when executing the regular
-// expression since it may be found that the work never has to be done as a
-// failure to match can occur. In addition it is much faster to jump to a
-// known backtrack code location than it is to pop an unknown backtrack
-// location from the stack and jump there.
-//
-// The virtual state found in the Trace affects code generation. For example
-// the virtual state contains the difference between the actual current
-// position and the virtual current position, and matching code needs to use
-// this offset to attempt a match in the correct location of the input
-// string. Therefore code generated for a non-trivial trace is specialized
-// to that trace. The code generator therefore has the ability to generate
-// code for each node several times. In order to limit the size of the
-// generated code there is an arbitrary limit on how many specialized sets of
-// code may be generated for a given node. If the limit is reached, the
-// trace is flushed and a generic version of the code for a node is emitted.
-// This is subsequently used for that node. The code emitted for non-generic
-// trace is not recorded in the node and so it cannot currently be reused in
-// the event that code generation is requested for an identical trace.
-
-
-void RegExpTree::AppendToText(RegExpText* text) {
- UNREACHABLE();
-}
-
-
-void RegExpAtom::AppendToText(RegExpText* text) {
- text->AddElement(TextElement::Atom(this));
-}
-
-
-void RegExpCharacterClass::AppendToText(RegExpText* text) {
- text->AddElement(TextElement::CharClass(this));
-}
-
-
-void RegExpText::AppendToText(RegExpText* text) {
- for (int i = 0; i < elements()->length(); i++)
- text->AddElement(elements()->at(i));
-}
-
-
-TextElement TextElement::Atom(RegExpAtom* atom) {
- TextElement result = TextElement(ATOM);
- result.data.u_atom = atom;
- return result;
-}
-
-
-TextElement TextElement::CharClass(
- RegExpCharacterClass* char_class) {
- TextElement result = TextElement(CHAR_CLASS);
- result.data.u_char_class = char_class;
- return result;
-}
-
-
-int TextElement::length() {
- if (type == ATOM) {
- return data.u_atom->length();
- } else {
- ASSERT(type == CHAR_CLASS);
- return 1;
- }
-}
-
-
-DispatchTable* ChoiceNode::GetTable(bool ignore_case) {
- if (table_ == NULL) {
- table_ = new DispatchTable();
- DispatchTableConstructor cons(table_, ignore_case);
- cons.BuildTable(this);
- }
- return table_;
-}
-
-
-class RegExpCompiler {
- public:
- RegExpCompiler(int capture_count, bool ignore_case, bool is_ascii);
-
- int AllocateRegister() {
- if (next_register_ >= RegExpMacroAssembler::kMaxRegister) {
- reg_exp_too_big_ = true;
- return next_register_;
- }
- return next_register_++;
- }
-
- RegExpEngine::CompilationResult Assemble(RegExpMacroAssembler* assembler,
- RegExpNode* start,
- int capture_count,
- Handle<String> pattern);
-
- inline void AddWork(RegExpNode* node) { work_list_->Add(node); }
-
- static const int kImplementationOffset = 0;
- static const int kNumberOfRegistersOffset = 0;
- static const int kCodeOffset = 1;
-
- RegExpMacroAssembler* macro_assembler() { return macro_assembler_; }
- EndNode* accept() { return accept_; }
-
- static const int kMaxRecursion = 100;
- inline int recursion_depth() { return recursion_depth_; }
- inline void IncrementRecursionDepth() { recursion_depth_++; }
- inline void DecrementRecursionDepth() { recursion_depth_--; }
-
- void SetRegExpTooBig() { reg_exp_too_big_ = true; }
-
- inline bool ignore_case() { return ignore_case_; }
- inline bool ascii() { return ascii_; }
-
- static const int kNoRegister = -1;
- private:
- EndNode* accept_;
- int next_register_;
- List<RegExpNode*>* work_list_;
- int recursion_depth_;
- RegExpMacroAssembler* macro_assembler_;
- bool ignore_case_;
- bool ascii_;
- bool reg_exp_too_big_;
-};
-
-
-class RecursionCheck {
- public:
- explicit RecursionCheck(RegExpCompiler* compiler) : compiler_(compiler) {
- compiler->IncrementRecursionDepth();
- }
- ~RecursionCheck() { compiler_->DecrementRecursionDepth(); }
- private:
- RegExpCompiler* compiler_;
-};
-
-
-static RegExpEngine::CompilationResult IrregexpRegExpTooBig() {
- return RegExpEngine::CompilationResult("RegExp too big");
-}
-
-
-// Attempts to compile the regexp using an Irregexp code generator. Returns
-// a fixed array or a null handle depending on whether it succeeded.
-RegExpCompiler::RegExpCompiler(int capture_count, bool ignore_case, bool ascii)
- : next_register_(2 * (capture_count + 1)),
- work_list_(NULL),
- recursion_depth_(0),
- ignore_case_(ignore_case),
- ascii_(ascii),
- reg_exp_too_big_(false) {
- accept_ = new EndNode(EndNode::ACCEPT);
- ASSERT(next_register_ - 1 <= RegExpMacroAssembler::kMaxRegister);
-}
-
-
-RegExpEngine::CompilationResult RegExpCompiler::Assemble(
- RegExpMacroAssembler* macro_assembler,
- RegExpNode* start,
- int capture_count,
- Handle<String> pattern) {
-#ifdef DEBUG
- if (FLAG_trace_regexp_assembler)
- macro_assembler_ = new RegExpMacroAssemblerTracer(macro_assembler);
- else
-#endif
- macro_assembler_ = macro_assembler;
- List <RegExpNode*> work_list(0);
- work_list_ = &work_list;
- Label fail;
- macro_assembler_->PushBacktrack(&fail);
- Trace new_trace;
- start->Emit(this, &new_trace);
- macro_assembler_->Bind(&fail);
- macro_assembler_->Fail();
- while (!work_list.is_empty()) {
- work_list.RemoveLast()->Emit(this, &new_trace);
- }
- if (reg_exp_too_big_) return IrregexpRegExpTooBig();
-
- Handle<Object> code = macro_assembler_->GetCode(pattern);
- work_list_ = NULL;
-#ifdef DEBUG
- if (FLAG_print_code) {
- Handle<Code>::cast(code)->Disassemble(*pattern->ToCString());
- }
- if (FLAG_trace_regexp_assembler) {
- delete macro_assembler_;
- }
-#endif
- return RegExpEngine::CompilationResult(*code, next_register_);
-}
-
-
-bool Trace::DeferredAction::Mentions(int that) {
- if (type() == ActionNode::CLEAR_CAPTURES) {
- Interval range = static_cast<DeferredClearCaptures*>(this)->range();
- return range.Contains(that);
- } else {
- return reg() == that;
- }
-}
-
-
-bool Trace::mentions_reg(int reg) {
- for (DeferredAction* action = actions_;
- action != NULL;
- action = action->next()) {
- if (action->Mentions(reg))
- return true;
- }
- return false;
-}
-
-
-bool Trace::GetStoredPosition(int reg, int* cp_offset) {
- ASSERT_EQ(0, *cp_offset);
- for (DeferredAction* action = actions_;
- action != NULL;
- action = action->next()) {
- if (action->Mentions(reg)) {
- if (action->type() == ActionNode::STORE_POSITION) {
- *cp_offset = static_cast<DeferredCapture*>(action)->cp_offset();
- return true;
- } else {
- return false;
- }
- }
- }
- return false;
-}
-
-
-int Trace::FindAffectedRegisters(OutSet* affected_registers) {
- int max_register = RegExpCompiler::kNoRegister;
- for (DeferredAction* action = actions_;
- action != NULL;
- action = action->next()) {
- if (action->type() == ActionNode::CLEAR_CAPTURES) {
- Interval range = static_cast<DeferredClearCaptures*>(action)->range();
- for (int i = range.from(); i <= range.to(); i++)
- affected_registers->Set(i);
- if (range.to() > max_register) max_register = range.to();
- } else {
- affected_registers->Set(action->reg());
- if (action->reg() > max_register) max_register = action->reg();
- }
- }
- return max_register;
-}
-
-
-void Trace::RestoreAffectedRegisters(RegExpMacroAssembler* assembler,
- int max_register,
- OutSet& registers_to_pop,
- OutSet& registers_to_clear) {
- for (int reg = max_register; reg >= 0; reg--) {
- if (registers_to_pop.Get(reg)) assembler->PopRegister(reg);
- else if (registers_to_clear.Get(reg)) {
- int clear_to = reg;
- while (reg > 0 && registers_to_clear.Get(reg - 1)) {
- reg--;
- }
- assembler->ClearRegisters(reg, clear_to);
- }
- }
-}
-
-
-void Trace::PerformDeferredActions(RegExpMacroAssembler* assembler,
- int max_register,
- OutSet& affected_registers,
- OutSet* registers_to_pop,
- OutSet* registers_to_clear) {
- // The "+1" is to avoid a push_limit of zero if stack_limit_slack() is 1.
- const int push_limit = (assembler->stack_limit_slack() + 1) / 2;
-
- // Count pushes performed to force a stack limit check occasionally.
- int pushes = 0;
-
- for (int reg = 0; reg <= max_register; reg++) {
- if (!affected_registers.Get(reg)) {
- continue;
- }
-
- // The chronologically first deferred action in the trace
- // is used to infer the action needed to restore a register
- // to its previous state (or not, if it's safe to ignore it).
- enum DeferredActionUndoType { IGNORE, RESTORE, CLEAR };
- DeferredActionUndoType undo_action = IGNORE;
-
- int value = 0;
- bool absolute = false;
- bool clear = false;
- int store_position = -1;
- // This is a little tricky because we are scanning the actions in reverse
- // historical order (newest first).
- for (DeferredAction* action = actions_;
- action != NULL;
- action = action->next()) {
- if (action->Mentions(reg)) {
- switch (action->type()) {
- case ActionNode::SET_REGISTER: {
- Trace::DeferredSetRegister* psr =
- static_cast<Trace::DeferredSetRegister*>(action);
- if (!absolute) {
- value += psr->value();
- absolute = true;
- }
- // SET_REGISTER is currently only used for newly introduced loop
- // counters. They can have a significant previous value if they
- // occour in a loop. TODO(lrn): Propagate this information, so
- // we can set undo_action to IGNORE if we know there is no value to
- // restore.
- undo_action = RESTORE;
- ASSERT_EQ(store_position, -1);
- ASSERT(!clear);
- break;
- }
- case ActionNode::INCREMENT_REGISTER:
- if (!absolute) {
- value++;
- }
- ASSERT_EQ(store_position, -1);
- ASSERT(!clear);
- undo_action = RESTORE;
- break;
- case ActionNode::STORE_POSITION: {
- Trace::DeferredCapture* pc =
- static_cast<Trace::DeferredCapture*>(action);
- if (!clear && store_position == -1) {
- store_position = pc->cp_offset();
- }
-
- // For captures we know that stores and clears alternate.
- // Other register, are never cleared, and if the occur
- // inside a loop, they might be assigned more than once.
- if (reg <= 1) {
- // Registers zero and one, aka "capture zero", is
- // always set correctly if we succeed. There is no
- // need to undo a setting on backtrack, because we
- // will set it again or fail.
- undo_action = IGNORE;
- } else {
- undo_action = pc->is_capture() ? CLEAR : RESTORE;
- }
- ASSERT(!absolute);
- ASSERT_EQ(value, 0);
- break;
- }
- case ActionNode::CLEAR_CAPTURES: {
- // Since we're scanning in reverse order, if we've already
- // set the position we have to ignore historically earlier
- // clearing operations.
- if (store_position == -1) {
- clear = true;
- }
- undo_action = RESTORE;
- ASSERT(!absolute);
- ASSERT_EQ(value, 0);
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
- }
- }
- // Prepare for the undo-action (e.g., push if it's going to be popped).
- if (undo_action == RESTORE) {
- pushes++;
- RegExpMacroAssembler::StackCheckFlag stack_check =
- RegExpMacroAssembler::kNoStackLimitCheck;
- if (pushes == push_limit) {
- stack_check = RegExpMacroAssembler::kCheckStackLimit;
- pushes = 0;
- }
-
- assembler->PushRegister(reg, stack_check);
- registers_to_pop->Set(reg);
- } else if (undo_action == CLEAR) {
- registers_to_clear->Set(reg);
- }
- // Perform the chronologically last action (or accumulated increment)
- // for the register.
- if (store_position != -1) {
- assembler->WriteCurrentPositionToRegister(reg, store_position);
- } else if (clear) {
- assembler->ClearRegisters(reg, reg);
- } else if (absolute) {
- assembler->SetRegister(reg, value);
- } else if (value != 0) {
- assembler->AdvanceRegister(reg, value);
- }
- }
-}
-
-
-// This is called as we come into a loop choice node and some other tricky
-// nodes. It normalizes the state of the code generator to ensure we can
-// generate generic code.
-void Trace::Flush(RegExpCompiler* compiler, RegExpNode* successor) {
- RegExpMacroAssembler* assembler = compiler->macro_assembler();
-
- ASSERT(!is_trivial());
-
- if (actions_ == NULL && backtrack() == NULL) {
- // Here we just have some deferred cp advances to fix and we are back to
- // a normal situation. We may also have to forget some information gained
- // through a quick check that was already performed.
- if (cp_offset_ != 0) assembler->AdvanceCurrentPosition(cp_offset_);
- // Create a new trivial state and generate the node with that.
- Trace new_state;
- successor->Emit(compiler, &new_state);
- return;
- }
-
- // Generate deferred actions here along with code to undo them again.
- OutSet affected_registers;
-
- if (backtrack() != NULL) {
- // Here we have a concrete backtrack location. These are set up by choice
- // nodes and so they indicate that we have a deferred save of the current
- // position which we may need to emit here.
- assembler->PushCurrentPosition();
- }
-
- int max_register = FindAffectedRegisters(&affected_registers);
- OutSet registers_to_pop;
- OutSet registers_to_clear;
- PerformDeferredActions(assembler,
- max_register,
- affected_registers,
- &registers_to_pop,
- &registers_to_clear);
- if (cp_offset_ != 0) {
- assembler->AdvanceCurrentPosition(cp_offset_);
- }
-
- // Create a new trivial state and generate the node with that.
- Label undo;
- assembler->PushBacktrack(&undo);
- Trace new_state;
- successor->Emit(compiler, &new_state);
-
- // On backtrack we need to restore state.
- assembler->Bind(&undo);
- RestoreAffectedRegisters(assembler,
- max_register,
- registers_to_pop,
- registers_to_clear);
- if (backtrack() == NULL) {
- assembler->Backtrack();
- } else {
- assembler->PopCurrentPosition();
- assembler->GoTo(backtrack());
- }
-}
-
-
-void NegativeSubmatchSuccess::Emit(RegExpCompiler* compiler, Trace* trace) {
- RegExpMacroAssembler* assembler = compiler->macro_assembler();
-
- // Omit flushing the trace. We discard the entire stack frame anyway.
-
- if (!label()->is_bound()) {
- // We are completely independent of the trace, since we ignore it,
- // so this code can be used as the generic version.
- assembler->Bind(label());
- }
-
- // Throw away everything on the backtrack stack since the start
- // of the negative submatch and restore the character position.
- assembler->ReadCurrentPositionFromRegister(current_position_register_);
- assembler->ReadStackPointerFromRegister(stack_pointer_register_);
- if (clear_capture_count_ > 0) {
- // Clear any captures that might have been performed during the success
- // of the body of the negative look-ahead.
- int clear_capture_end = clear_capture_start_ + clear_capture_count_ - 1;
- assembler->ClearRegisters(clear_capture_start_, clear_capture_end);
- }
- // Now that we have unwound the stack we find at the top of the stack the
- // backtrack that the BeginSubmatch node got.
- assembler->Backtrack();
-}
-
-
-void EndNode::Emit(RegExpCompiler* compiler, Trace* trace) {
- if (!trace->is_trivial()) {
- trace->Flush(compiler, this);
- return;
- }
- RegExpMacroAssembler* assembler = compiler->macro_assembler();
- if (!label()->is_bound()) {
- assembler->Bind(label());
- }
- switch (action_) {
- case ACCEPT:
- assembler->Succeed();
- return;
- case BACKTRACK:
- assembler->GoTo(trace->backtrack());
- return;
- case NEGATIVE_SUBMATCH_SUCCESS:
- // This case is handled in a different virtual method.
- UNREACHABLE();
- }
- UNIMPLEMENTED();
-}
-
-
-void GuardedAlternative::AddGuard(Guard* guard) {
- if (guards_ == NULL)
- guards_ = new ZoneList<Guard*>(1);
- guards_->Add(guard);
-}
-
-
-ActionNode* ActionNode::SetRegister(int reg,
- int val,
- RegExpNode* on_success) {
- ActionNode* result = new ActionNode(SET_REGISTER, on_success);
- result->data_.u_store_register.reg = reg;
- result->data_.u_store_register.value = val;
- return result;
-}
-
-
-ActionNode* ActionNode::IncrementRegister(int reg, RegExpNode* on_success) {
- ActionNode* result = new ActionNode(INCREMENT_REGISTER, on_success);
- result->data_.u_increment_register.reg = reg;
- return result;
-}
-
-
-ActionNode* ActionNode::StorePosition(int reg,
- bool is_capture,
- RegExpNode* on_success) {
- ActionNode* result = new ActionNode(STORE_POSITION, on_success);
- result->data_.u_position_register.reg = reg;
- result->data_.u_position_register.is_capture = is_capture;
- return result;
-}
-
-
-ActionNode* ActionNode::ClearCaptures(Interval range,
- RegExpNode* on_success) {
- ActionNode* result = new ActionNode(CLEAR_CAPTURES, on_success);
- result->data_.u_clear_captures.range_from = range.from();
- result->data_.u_clear_captures.range_to = range.to();
- return result;
-}
-
-
-ActionNode* ActionNode::BeginSubmatch(int stack_reg,
- int position_reg,
- RegExpNode* on_success) {
- ActionNode* result = new ActionNode(BEGIN_SUBMATCH, on_success);
- result->data_.u_submatch.stack_pointer_register = stack_reg;
- result->data_.u_submatch.current_position_register = position_reg;
- return result;
-}
-
-
-ActionNode* ActionNode::PositiveSubmatchSuccess(int stack_reg,
- int position_reg,
- int clear_register_count,
- int clear_register_from,
- RegExpNode* on_success) {
- ActionNode* result = new ActionNode(POSITIVE_SUBMATCH_SUCCESS, on_success);
- result->data_.u_submatch.stack_pointer_register = stack_reg;
- result->data_.u_submatch.current_position_register = position_reg;
- result->data_.u_submatch.clear_register_count = clear_register_count;
- result->data_.u_submatch.clear_register_from = clear_register_from;
- return result;
-}
-
-
-ActionNode* ActionNode::EmptyMatchCheck(int start_register,
- int repetition_register,
- int repetition_limit,
- RegExpNode* on_success) {
- ActionNode* result = new ActionNode(EMPTY_MATCH_CHECK, on_success);
- result->data_.u_empty_match_check.start_register = start_register;
- result->data_.u_empty_match_check.repetition_register = repetition_register;
- result->data_.u_empty_match_check.repetition_limit = repetition_limit;
- return result;
-}
-
-
-#define DEFINE_ACCEPT(Type) \
- void Type##Node::Accept(NodeVisitor* visitor) { \
- visitor->Visit##Type(this); \
- }
-FOR_EACH_NODE_TYPE(DEFINE_ACCEPT)
-#undef DEFINE_ACCEPT
-
-
-void LoopChoiceNode::Accept(NodeVisitor* visitor) {
- visitor->VisitLoopChoice(this);
-}
-
-
-// -------------------------------------------------------------------
-// Emit code.
-
-
-void ChoiceNode::GenerateGuard(RegExpMacroAssembler* macro_assembler,
- Guard* guard,
- Trace* trace) {
- switch (guard->op()) {
- case Guard::LT:
- ASSERT(!trace->mentions_reg(guard->reg()));
- macro_assembler->IfRegisterGE(guard->reg(),
- guard->value(),
- trace->backtrack());
- break;
- case Guard::GEQ:
- ASSERT(!trace->mentions_reg(guard->reg()));
- macro_assembler->IfRegisterLT(guard->reg(),
- guard->value(),
- trace->backtrack());
- break;
- }
-}
-
-
-// Returns the number of characters in the equivalence class, omitting those
-// that cannot occur in the source string because it is ASCII.
-static int GetCaseIndependentLetters(Isolate* isolate,
- uc16 character,
- bool ascii_subject,
- unibrow::uchar* letters) {
- int length =
- isolate->jsregexp_uncanonicalize()->get(character, '\0', letters);
- // Unibrow returns 0 or 1 for characters where case independence is
- // trivial.
- if (length == 0) {
- letters[0] = character;
- length = 1;
- }
- if (!ascii_subject || character <= String::kMaxAsciiCharCode) {
- return length;
- }
- // The standard requires that non-ASCII characters cannot have ASCII
- // character codes in their equivalence class.
- return 0;
-}
-
-
-static inline bool EmitSimpleCharacter(Isolate* isolate,
- RegExpCompiler* compiler,
- uc16 c,
- Label* on_failure,
- int cp_offset,
- bool check,
- bool preloaded) {
- RegExpMacroAssembler* assembler = compiler->macro_assembler();
- bool bound_checked = false;
- if (!preloaded) {
- assembler->LoadCurrentCharacter(
- cp_offset,
- on_failure,
- check);
- bound_checked = true;
- }
- assembler->CheckNotCharacter(c, on_failure);
- return bound_checked;
-}
-
-
-// Only emits non-letters (things that don't have case). Only used for case
-// independent matches.
-static inline bool EmitAtomNonLetter(Isolate* isolate,
- RegExpCompiler* compiler,
- uc16 c,
- Label* on_failure,
- int cp_offset,
- bool check,
- bool preloaded) {
- RegExpMacroAssembler* macro_assembler = compiler->macro_assembler();
- bool ascii = compiler->ascii();
- unibrow::uchar chars[unibrow::Ecma262UnCanonicalize::kMaxWidth];
- int length = GetCaseIndependentLetters(isolate, c, ascii, chars);
- if (length < 1) {
- // This can't match. Must be an ASCII subject and a non-ASCII character.
- // We do not need to do anything since the ASCII pass already handled this.
- return false; // Bounds not checked.
- }
- bool checked = false;
- // We handle the length > 1 case in a later pass.
- if (length == 1) {
- if (ascii && c > String::kMaxAsciiCharCodeU) {
- // Can't match - see above.
- return false; // Bounds not checked.
- }
- if (!preloaded) {
- macro_assembler->LoadCurrentCharacter(cp_offset, on_failure, check);
- checked = check;
- }
- macro_assembler->CheckNotCharacter(c, on_failure);
- }
- return checked;
-}
-
-
-static bool ShortCutEmitCharacterPair(RegExpMacroAssembler* macro_assembler,
- bool ascii,
- uc16 c1,
- uc16 c2,
- Label* on_failure) {
- uc16 char_mask;
- if (ascii) {
- char_mask = String::kMaxAsciiCharCode;
- } else {
- char_mask = String::kMaxUC16CharCode;
- }
- uc16 exor = c1 ^ c2;
- // Check whether exor has only one bit set.
- if (((exor - 1) & exor) == 0) {
- // If c1 and c2 differ only by one bit.
- // Ecma262UnCanonicalize always gives the highest number last.
- ASSERT(c2 > c1);
- uc16 mask = char_mask ^ exor;
- macro_assembler->CheckNotCharacterAfterAnd(c1, mask, on_failure);
- return true;
- }
- ASSERT(c2 > c1);
- uc16 diff = c2 - c1;
- if (((diff - 1) & diff) == 0 && c1 >= diff) {
- // If the characters differ by 2^n but don't differ by one bit then
- // subtract the difference from the found character, then do the or
- // trick. We avoid the theoretical case where negative numbers are
- // involved in order to simplify code generation.
- uc16 mask = char_mask ^ diff;
- macro_assembler->CheckNotCharacterAfterMinusAnd(c1 - diff,
- diff,
- mask,
- on_failure);
- return true;
- }
- return false;
-}
-
-
-typedef bool EmitCharacterFunction(Isolate* isolate,
- RegExpCompiler* compiler,
- uc16 c,
- Label* on_failure,
- int cp_offset,
- bool check,
- bool preloaded);
-
-// Only emits letters (things that have case). Only used for case independent
-// matches.
-static inline bool EmitAtomLetter(Isolate* isolate,
- RegExpCompiler* compiler,
- uc16 c,
- Label* on_failure,
- int cp_offset,
- bool check,
- bool preloaded) {
- RegExpMacroAssembler* macro_assembler = compiler->macro_assembler();
- bool ascii = compiler->ascii();
- unibrow::uchar chars[unibrow::Ecma262UnCanonicalize::kMaxWidth];
- int length = GetCaseIndependentLetters(isolate, c, ascii, chars);
- if (length <= 1) return false;
- // We may not need to check against the end of the input string
- // if this character lies before a character that matched.
- if (!preloaded) {
- macro_assembler->LoadCurrentCharacter(cp_offset, on_failure, check);
- }
- Label ok;
- ASSERT(unibrow::Ecma262UnCanonicalize::kMaxWidth == 4);
- switch (length) {
- case 2: {
- if (ShortCutEmitCharacterPair(macro_assembler,
- ascii,
- chars[0],
- chars[1],
- on_failure)) {
- } else {
- macro_assembler->CheckCharacter(chars[0], &ok);
- macro_assembler->CheckNotCharacter(chars[1], on_failure);
- macro_assembler->Bind(&ok);
- }
- break;
- }
- case 4:
- macro_assembler->CheckCharacter(chars[3], &ok);
- // Fall through!
- case 3:
- macro_assembler->CheckCharacter(chars[0], &ok);
- macro_assembler->CheckCharacter(chars[1], &ok);
- macro_assembler->CheckNotCharacter(chars[2], on_failure);
- macro_assembler->Bind(&ok);
- break;
- default:
- UNREACHABLE();
- break;
- }
- return true;
-}
-
-
-static void EmitCharClass(RegExpMacroAssembler* macro_assembler,
- RegExpCharacterClass* cc,
- bool ascii,
- Label* on_failure,
- int cp_offset,
- bool check_offset,
- bool preloaded) {
- ZoneList<CharacterRange>* ranges = cc->ranges();
- int max_char;
- if (ascii) {
- max_char = String::kMaxAsciiCharCode;
- } else {
- max_char = String::kMaxUC16CharCode;
- }
-
- Label success;
-
- Label* char_is_in_class =
- cc->is_negated() ? on_failure : &success;
-
- int range_count = ranges->length();
-
- int last_valid_range = range_count - 1;
- while (last_valid_range >= 0) {
- CharacterRange& range = ranges->at(last_valid_range);
- if (range.from() <= max_char) {
- break;
- }
- last_valid_range--;
- }
-
- if (last_valid_range < 0) {
- if (!cc->is_negated()) {
- // TODO(plesner): We can remove this when the node level does our
- // ASCII optimizations for us.
- macro_assembler->GoTo(on_failure);
- }
- if (check_offset) {
- macro_assembler->CheckPosition(cp_offset, on_failure);
- }
- return;
- }
-
- if (last_valid_range == 0 &&
- !cc->is_negated() &&
- ranges->at(0).IsEverything(max_char)) {
- // This is a common case hit by non-anchored expressions.
- if (check_offset) {
- macro_assembler->CheckPosition(cp_offset, on_failure);
- }
- return;
- }
-
- if (!preloaded) {
- macro_assembler->LoadCurrentCharacter(cp_offset, on_failure, check_offset);
- }
-
- if (cc->is_standard() &&
- macro_assembler->CheckSpecialCharacterClass(cc->standard_type(),
- on_failure)) {
- return;
- }
-
- for (int i = 0; i < last_valid_range; i++) {
- CharacterRange& range = ranges->at(i);
- Label next_range;
- uc16 from = range.from();
- uc16 to = range.to();
- if (from > max_char) {
- continue;
- }
- if (to > max_char) to = max_char;
- if (to == from) {
- macro_assembler->CheckCharacter(to, char_is_in_class);
- } else {
- if (from != 0) {
- macro_assembler->CheckCharacterLT(from, &next_range);
- }
- if (to != max_char) {
- macro_assembler->CheckCharacterLT(to + 1, char_is_in_class);
- } else {
- macro_assembler->GoTo(char_is_in_class);
- }
- }
- macro_assembler->Bind(&next_range);
- }
-
- CharacterRange& range = ranges->at(last_valid_range);
- uc16 from = range.from();
- uc16 to = range.to();
-
- if (to > max_char) to = max_char;
- ASSERT(to >= from);
-
- if (to == from) {
- if (cc->is_negated()) {
- macro_assembler->CheckCharacter(to, on_failure);
- } else {
- macro_assembler->CheckNotCharacter(to, on_failure);
- }
- } else {
- if (from != 0) {
- if (cc->is_negated()) {
- macro_assembler->CheckCharacterLT(from, &success);
- } else {
- macro_assembler->CheckCharacterLT(from, on_failure);
- }
- }
- if (to != String::kMaxUC16CharCode) {
- if (cc->is_negated()) {
- macro_assembler->CheckCharacterLT(to + 1, on_failure);
- } else {
- macro_assembler->CheckCharacterGT(to, on_failure);
- }
- } else {
- if (cc->is_negated()) {
- macro_assembler->GoTo(on_failure);
- }
- }
- }
- macro_assembler->Bind(&success);
-}
-
-
-RegExpNode::~RegExpNode() {
-}
-
-
-RegExpNode::LimitResult RegExpNode::LimitVersions(RegExpCompiler* compiler,
- Trace* trace) {
- // If we are generating a greedy loop then don't stop and don't reuse code.
- if (trace->stop_node() != NULL) {
- return CONTINUE;
- }
-
- RegExpMacroAssembler* macro_assembler = compiler->macro_assembler();
- if (trace->is_trivial()) {
- if (label_.is_bound()) {
- // We are being asked to generate a generic version, but that's already
- // been done so just go to it.
- macro_assembler->GoTo(&label_);
- return DONE;
- }
- if (compiler->recursion_depth() >= RegExpCompiler::kMaxRecursion) {
- // To avoid too deep recursion we push the node to the work queue and just
- // generate a goto here.
- compiler->AddWork(this);
- macro_assembler->GoTo(&label_);
- return DONE;
- }
- // Generate generic version of the node and bind the label for later use.
- macro_assembler->Bind(&label_);
- return CONTINUE;
- }
-
- // We are being asked to make a non-generic version. Keep track of how many
- // non-generic versions we generate so as not to overdo it.
- trace_count_++;
- if (FLAG_regexp_optimization &&
- trace_count_ < kMaxCopiesCodeGenerated &&
- compiler->recursion_depth() <= RegExpCompiler::kMaxRecursion) {
- return CONTINUE;
- }
-
- // If we get here code has been generated for this node too many times or
- // recursion is too deep. Time to switch to a generic version. The code for
- // generic versions above can handle deep recursion properly.
- trace->Flush(compiler, this);
- return DONE;
-}
-
-
-int ActionNode::EatsAtLeast(int still_to_find,
- int recursion_depth,
- bool not_at_start) {
- if (recursion_depth > RegExpCompiler::kMaxRecursion) return 0;
- if (type_ == POSITIVE_SUBMATCH_SUCCESS) return 0; // Rewinds input!
- return on_success()->EatsAtLeast(still_to_find,
- recursion_depth + 1,
- not_at_start);
-}
-
-
-int AssertionNode::EatsAtLeast(int still_to_find,
- int recursion_depth,
- bool not_at_start) {
- if (recursion_depth > RegExpCompiler::kMaxRecursion) return 0;
- // If we know we are not at the start and we are asked "how many characters
- // will you match if you succeed?" then we can answer anything since false
- // implies false. So lets just return the max answer (still_to_find) since
- // that won't prevent us from preloading a lot of characters for the other
- // branches in the node graph.
- if (type() == AT_START && not_at_start) return still_to_find;
- return on_success()->EatsAtLeast(still_to_find,
- recursion_depth + 1,
- not_at_start);
-}
-
-
-int BackReferenceNode::EatsAtLeast(int still_to_find,
- int recursion_depth,
- bool not_at_start) {
- if (recursion_depth > RegExpCompiler::kMaxRecursion) return 0;
- return on_success()->EatsAtLeast(still_to_find,
- recursion_depth + 1,
- not_at_start);
-}
-
-
-int TextNode::EatsAtLeast(int still_to_find,
- int recursion_depth,
- bool not_at_start) {
- int answer = Length();
- if (answer >= still_to_find) return answer;
- if (recursion_depth > RegExpCompiler::kMaxRecursion) return answer;
- // We are not at start after this node so we set the last argument to 'true'.
- return answer + on_success()->EatsAtLeast(still_to_find - answer,
- recursion_depth + 1,
- true);
-}
-
-
-int NegativeLookaheadChoiceNode::EatsAtLeast(int still_to_find,
- int recursion_depth,
- bool not_at_start) {
- if (recursion_depth > RegExpCompiler::kMaxRecursion) return 0;
- // Alternative 0 is the negative lookahead, alternative 1 is what comes
- // afterwards.
- RegExpNode* node = alternatives_->at(1).node();
- return node->EatsAtLeast(still_to_find, recursion_depth + 1, not_at_start);
-}
-
-
-void NegativeLookaheadChoiceNode::GetQuickCheckDetails(
- QuickCheckDetails* details,
- RegExpCompiler* compiler,
- int filled_in,
- bool not_at_start) {
- // Alternative 0 is the negative lookahead, alternative 1 is what comes
- // afterwards.
- RegExpNode* node = alternatives_->at(1).node();
- return node->GetQuickCheckDetails(details, compiler, filled_in, not_at_start);
-}
-
-
-int ChoiceNode::EatsAtLeastHelper(int still_to_find,
- int recursion_depth,
- RegExpNode* ignore_this_node,
- bool not_at_start) {
- if (recursion_depth > RegExpCompiler::kMaxRecursion) return 0;
- int min = 100;
- int choice_count = alternatives_->length();
- for (int i = 0; i < choice_count; i++) {
- RegExpNode* node = alternatives_->at(i).node();
- if (node == ignore_this_node) continue;
- int node_eats_at_least = node->EatsAtLeast(still_to_find,
- recursion_depth + 1,
- not_at_start);
- if (node_eats_at_least < min) min = node_eats_at_least;
- }
- return min;
-}
-
-
-int LoopChoiceNode::EatsAtLeast(int still_to_find,
- int recursion_depth,
- bool not_at_start) {
- return EatsAtLeastHelper(still_to_find,
- recursion_depth,
- loop_node_,
- not_at_start);
-}
-
-
-int ChoiceNode::EatsAtLeast(int still_to_find,
- int recursion_depth,
- bool not_at_start) {
- return EatsAtLeastHelper(still_to_find,
- recursion_depth,
- NULL,
- not_at_start);
-}
-
-
-// Takes the left-most 1-bit and smears it out, setting all bits to its right.
-static inline uint32_t SmearBitsRight(uint32_t v) {
- v |= v >> 1;
- v |= v >> 2;
- v |= v >> 4;
- v |= v >> 8;
- v |= v >> 16;
- return v;
-}
-
-
-bool QuickCheckDetails::Rationalize(bool asc) {
- bool found_useful_op = false;
- uint32_t char_mask;
- if (asc) {
- char_mask = String::kMaxAsciiCharCode;
- } else {
- char_mask = String::kMaxUC16CharCode;
- }
- mask_ = 0;
- value_ = 0;
- int char_shift = 0;
- for (int i = 0; i < characters_; i++) {
- Position* pos = &positions_[i];
- if ((pos->mask & String::kMaxAsciiCharCode) != 0) {
- found_useful_op = true;
- }
- mask_ |= (pos->mask & char_mask) << char_shift;
- value_ |= (pos->value & char_mask) << char_shift;
- char_shift += asc ? 8 : 16;
- }
- return found_useful_op;
-}
-
-
-bool RegExpNode::EmitQuickCheck(RegExpCompiler* compiler,
- Trace* trace,
- bool preload_has_checked_bounds,
- Label* on_possible_success,
- QuickCheckDetails* details,
- bool fall_through_on_failure) {
- if (details->characters() == 0) return false;
- GetQuickCheckDetails(details, compiler, 0, trace->at_start() == Trace::FALSE);
- if (details->cannot_match()) return false;
- if (!details->Rationalize(compiler->ascii())) return false;
- ASSERT(details->characters() == 1 ||
- compiler->macro_assembler()->CanReadUnaligned());
- uint32_t mask = details->mask();
- uint32_t value = details->value();
-
- RegExpMacroAssembler* assembler = compiler->macro_assembler();
-
- if (trace->characters_preloaded() != details->characters()) {
- assembler->LoadCurrentCharacter(trace->cp_offset(),
- trace->backtrack(),
- !preload_has_checked_bounds,
- details->characters());
- }
-
-
- bool need_mask = true;
-
- if (details->characters() == 1) {
- // If number of characters preloaded is 1 then we used a byte or 16 bit
- // load so the value is already masked down.
- uint32_t char_mask;
- if (compiler->ascii()) {
- char_mask = String::kMaxAsciiCharCode;
- } else {
- char_mask = String::kMaxUC16CharCode;
- }
- if ((mask & char_mask) == char_mask) need_mask = false;
- mask &= char_mask;
- } else {
- // For 2-character preloads in ASCII mode or 1-character preloads in
- // TWO_BYTE mode we also use a 16 bit load with zero extend.
- if (details->characters() == 2 && compiler->ascii()) {
- if ((mask & 0x7f7f) == 0x7f7f) need_mask = false;
- } else if (details->characters() == 1 && !compiler->ascii()) {
- if ((mask & 0xffff) == 0xffff) need_mask = false;
- } else {
- if (mask == 0xffffffff) need_mask = false;
- }
- }
-
- if (fall_through_on_failure) {
- if (need_mask) {
- assembler->CheckCharacterAfterAnd(value, mask, on_possible_success);
- } else {
- assembler->CheckCharacter(value, on_possible_success);
- }
- } else {
- if (need_mask) {
- assembler->CheckNotCharacterAfterAnd(value, mask, trace->backtrack());
- } else {
- assembler->CheckNotCharacter(value, trace->backtrack());
- }
- }
- return true;
-}
-
-
-// Here is the meat of GetQuickCheckDetails (see also the comment on the
-// super-class in the .h file).
-//
-// We iterate along the text object, building up for each character a
-// mask and value that can be used to test for a quick failure to match.
-// The masks and values for the positions will be combined into a single
-// machine word for the current character width in order to be used in
-// generating a quick check.
-void TextNode::GetQuickCheckDetails(QuickCheckDetails* details,
- RegExpCompiler* compiler,
- int characters_filled_in,
- bool not_at_start) {
- Isolate* isolate = Isolate::Current();
- ASSERT(characters_filled_in < details->characters());
- int characters = details->characters();
- int char_mask;
- int char_shift;
- if (compiler->ascii()) {
- char_mask = String::kMaxAsciiCharCode;
- char_shift = 8;
- } else {
- char_mask = String::kMaxUC16CharCode;
- char_shift = 16;
- }
- for (int k = 0; k < elms_->length(); k++) {
- TextElement elm = elms_->at(k);
- if (elm.type == TextElement::ATOM) {
- Vector<const uc16> quarks = elm.data.u_atom->data();
- for (int i = 0; i < characters && i < quarks.length(); i++) {
- QuickCheckDetails::Position* pos =
- details->positions(characters_filled_in);
- uc16 c = quarks[i];
- if (c > char_mask) {
- // If we expect a non-ASCII character from an ASCII string,
- // there is no way we can match. Not even case independent
- // matching can turn an ASCII character into non-ASCII or
- // vice versa.
- details->set_cannot_match();
- pos->determines_perfectly = false;
- return;
- }
- if (compiler->ignore_case()) {
- unibrow::uchar chars[unibrow::Ecma262UnCanonicalize::kMaxWidth];
- int length = GetCaseIndependentLetters(isolate, c, compiler->ascii(),
- chars);
- ASSERT(length != 0); // Can only happen if c > char_mask (see above).
- if (length == 1) {
- // This letter has no case equivalents, so it's nice and simple
- // and the mask-compare will determine definitely whether we have
- // a match at this character position.
- pos->mask = char_mask;
- pos->value = c;
- pos->determines_perfectly = true;
- } else {
- uint32_t common_bits = char_mask;
- uint32_t bits = chars[0];
- for (int j = 1; j < length; j++) {
- uint32_t differing_bits = ((chars[j] & common_bits) ^ bits);
- common_bits ^= differing_bits;
- bits &= common_bits;
- }
- // If length is 2 and common bits has only one zero in it then
- // our mask and compare instruction will determine definitely
- // whether we have a match at this character position. Otherwise
- // it can only be an approximate check.
- uint32_t one_zero = (common_bits | ~char_mask);
- if (length == 2 && ((~one_zero) & ((~one_zero) - 1)) == 0) {
- pos->determines_perfectly = true;
- }
- pos->mask = common_bits;
- pos->value = bits;
- }
- } else {
- // Don't ignore case. Nice simple case where the mask-compare will
- // determine definitely whether we have a match at this character
- // position.
- pos->mask = char_mask;
- pos->value = c;
- pos->determines_perfectly = true;
- }
- characters_filled_in++;
- ASSERT(characters_filled_in <= details->characters());
- if (characters_filled_in == details->characters()) {
- return;
- }
- }
- } else {
- QuickCheckDetails::Position* pos =
- details->positions(characters_filled_in);
- RegExpCharacterClass* tree = elm.data.u_char_class;
- ZoneList<CharacterRange>* ranges = tree->ranges();
- if (tree->is_negated()) {
- // A quick check uses multi-character mask and compare. There is no
- // useful way to incorporate a negative char class into this scheme
- // so we just conservatively create a mask and value that will always
- // succeed.
- pos->mask = 0;
- pos->value = 0;
- } else {
- int first_range = 0;
- while (ranges->at(first_range).from() > char_mask) {
- first_range++;
- if (first_range == ranges->length()) {
- details->set_cannot_match();
- pos->determines_perfectly = false;
- return;
- }
- }
- CharacterRange range = ranges->at(first_range);
- uc16 from = range.from();
- uc16 to = range.to();
- if (to > char_mask) {
- to = char_mask;
- }
- uint32_t differing_bits = (from ^ to);
- // A mask and compare is only perfect if the differing bits form a
- // number like 00011111 with one single block of trailing 1s.
- if ((differing_bits & (differing_bits + 1)) == 0 &&
- from + differing_bits == to) {
- pos->determines_perfectly = true;
- }
- uint32_t common_bits = ~SmearBitsRight(differing_bits);
- uint32_t bits = (from & common_bits);
- for (int i = first_range + 1; i < ranges->length(); i++) {
- CharacterRange range = ranges->at(i);
- uc16 from = range.from();
- uc16 to = range.to();
- if (from > char_mask) continue;
- if (to > char_mask) to = char_mask;
- // Here we are combining more ranges into the mask and compare
- // value. With each new range the mask becomes more sparse and
- // so the chances of a false positive rise. A character class
- // with multiple ranges is assumed never to be equivalent to a
- // mask and compare operation.
- pos->determines_perfectly = false;
- uint32_t new_common_bits = (from ^ to);
- new_common_bits = ~SmearBitsRight(new_common_bits);
- common_bits &= new_common_bits;
- bits &= new_common_bits;
- uint32_t differing_bits = (from & common_bits) ^ bits;
- common_bits ^= differing_bits;
- bits &= common_bits;
- }
- pos->mask = common_bits;
- pos->value = bits;
- }
- characters_filled_in++;
- ASSERT(characters_filled_in <= details->characters());
- if (characters_filled_in == details->characters()) {
- return;
- }
- }
- }
- ASSERT(characters_filled_in != details->characters());
- on_success()-> GetQuickCheckDetails(details,
- compiler,
- characters_filled_in,
- true);
-}
-
-
-void QuickCheckDetails::Clear() {
- for (int i = 0; i < characters_; i++) {
- positions_[i].mask = 0;
- positions_[i].value = 0;
- positions_[i].determines_perfectly = false;
- }
- characters_ = 0;
-}
-
-
-void QuickCheckDetails::Advance(int by, bool ascii) {
- ASSERT(by >= 0);
- if (by >= characters_) {
- Clear();
- return;
- }
- for (int i = 0; i < characters_ - by; i++) {
- positions_[i] = positions_[by + i];
- }
- for (int i = characters_ - by; i < characters_; i++) {
- positions_[i].mask = 0;
- positions_[i].value = 0;
- positions_[i].determines_perfectly = false;
- }
- characters_ -= by;
- // We could change mask_ and value_ here but we would never advance unless
- // they had already been used in a check and they won't be used again because
- // it would gain us nothing. So there's no point.
-}
-
-
-void QuickCheckDetails::Merge(QuickCheckDetails* other, int from_index) {
- ASSERT(characters_ == other->characters_);
- if (other->cannot_match_) {
- return;
- }
- if (cannot_match_) {
- *this = *other;
- return;
- }
- for (int i = from_index; i < characters_; i++) {
- QuickCheckDetails::Position* pos = positions(i);
- QuickCheckDetails::Position* other_pos = other->positions(i);
- if (pos->mask != other_pos->mask ||
- pos->value != other_pos->value ||
- !other_pos->determines_perfectly) {
- // Our mask-compare operation will be approximate unless we have the
- // exact same operation on both sides of the alternation.
- pos->determines_perfectly = false;
- }
- pos->mask &= other_pos->mask;
- pos->value &= pos->mask;
- other_pos->value &= pos->mask;
- uc16 differing_bits = (pos->value ^ other_pos->value);
- pos->mask &= ~differing_bits;
- pos->value &= pos->mask;
- }
-}
-
-
-class VisitMarker {
- public:
- explicit VisitMarker(NodeInfo* info) : info_(info) {
- ASSERT(!info->visited);
- info->visited = true;
- }
- ~VisitMarker() {
- info_->visited = false;
- }
- private:
- NodeInfo* info_;
-};
-
-
-void LoopChoiceNode::GetQuickCheckDetails(QuickCheckDetails* details,
- RegExpCompiler* compiler,
- int characters_filled_in,
- bool not_at_start) {
- if (body_can_be_zero_length_ || info()->visited) return;
- VisitMarker marker(info());
- return ChoiceNode::GetQuickCheckDetails(details,
- compiler,
- characters_filled_in,
- not_at_start);
-}
-
-
-void ChoiceNode::GetQuickCheckDetails(QuickCheckDetails* details,
- RegExpCompiler* compiler,
- int characters_filled_in,
- bool not_at_start) {
- not_at_start = (not_at_start || not_at_start_);
- int choice_count = alternatives_->length();
- ASSERT(choice_count > 0);
- alternatives_->at(0).node()->GetQuickCheckDetails(details,
- compiler,
- characters_filled_in,
- not_at_start);
- for (int i = 1; i < choice_count; i++) {
- QuickCheckDetails new_details(details->characters());
- RegExpNode* node = alternatives_->at(i).node();
- node->GetQuickCheckDetails(&new_details, compiler,
- characters_filled_in,
- not_at_start);
- // Here we merge the quick match details of the two branches.
- details->Merge(&new_details, characters_filled_in);
- }
-}
-
-
-// Check for [0-9A-Z_a-z].
-static void EmitWordCheck(RegExpMacroAssembler* assembler,
- Label* word,
- Label* non_word,
- bool fall_through_on_word) {
- if (assembler->CheckSpecialCharacterClass(
- fall_through_on_word ? 'w' : 'W',
- fall_through_on_word ? non_word : word)) {
- // Optimized implementation available.
- return;
- }
- assembler->CheckCharacterGT('z', non_word);
- assembler->CheckCharacterLT('0', non_word);
- assembler->CheckCharacterGT('a' - 1, word);
- assembler->CheckCharacterLT('9' + 1, word);
- assembler->CheckCharacterLT('A', non_word);
- assembler->CheckCharacterLT('Z' + 1, word);
- if (fall_through_on_word) {
- assembler->CheckNotCharacter('_', non_word);
- } else {
- assembler->CheckCharacter('_', word);
- }
-}
-
-
-// Emit the code to check for a ^ in multiline mode (1-character lookbehind
-// that matches newline or the start of input).
-static void EmitHat(RegExpCompiler* compiler,
- RegExpNode* on_success,
- Trace* trace) {
- RegExpMacroAssembler* assembler = compiler->macro_assembler();
- // We will be loading the previous character into the current character
- // register.
- Trace new_trace(*trace);
- new_trace.InvalidateCurrentCharacter();
-
- Label ok;
- if (new_trace.cp_offset() == 0) {
- // The start of input counts as a newline in this context, so skip to
- // ok if we are at the start.
- assembler->CheckAtStart(&ok);
- }
- // We already checked that we are not at the start of input so it must be
- // OK to load the previous character.
- assembler->LoadCurrentCharacter(new_trace.cp_offset() -1,
- new_trace.backtrack(),
- false);
- if (!assembler->CheckSpecialCharacterClass('n',
- new_trace.backtrack())) {
- // Newline means \n, \r, 0x2028 or 0x2029.
- if (!compiler->ascii()) {
- assembler->CheckCharacterAfterAnd(0x2028, 0xfffe, &ok);
- }
- assembler->CheckCharacter('\n', &ok);
- assembler->CheckNotCharacter('\r', new_trace.backtrack());
- }
- assembler->Bind(&ok);
- on_success->Emit(compiler, &new_trace);
-}
-
-
-// Emit the code to handle \b and \B (word-boundary or non-word-boundary)
-// when we know whether the next character must be a word character or not.
-static void EmitHalfBoundaryCheck(AssertionNode::AssertionNodeType type,
- RegExpCompiler* compiler,
- RegExpNode* on_success,
- Trace* trace) {
- RegExpMacroAssembler* assembler = compiler->macro_assembler();
- Label done;
-
- Trace new_trace(*trace);
-
- bool expect_word_character = (type == AssertionNode::AFTER_WORD_CHARACTER);
- Label* on_word = expect_word_character ? &done : new_trace.backtrack();
- Label* on_non_word = expect_word_character ? new_trace.backtrack() : &done;
-
- // Check whether previous character was a word character.
- switch (trace->at_start()) {
- case Trace::TRUE:
- if (expect_word_character) {
- assembler->GoTo(on_non_word);
- }
- break;
- case Trace::UNKNOWN:
- ASSERT_EQ(0, trace->cp_offset());
- assembler->CheckAtStart(on_non_word);
- // Fall through.
- case Trace::FALSE:
- int prev_char_offset = trace->cp_offset() - 1;
- assembler->LoadCurrentCharacter(prev_char_offset, NULL, false, 1);
- EmitWordCheck(assembler, on_word, on_non_word, expect_word_character);
- // We may or may not have loaded the previous character.
- new_trace.InvalidateCurrentCharacter();
- }
-
- assembler->Bind(&done);
-
- on_success->Emit(compiler, &new_trace);
-}
-
-
-// Emit the code to handle \b and \B (word-boundary or non-word-boundary).
-static void EmitBoundaryCheck(AssertionNode::AssertionNodeType type,
- RegExpCompiler* compiler,
- RegExpNode* on_success,
- Trace* trace) {
- RegExpMacroAssembler* assembler = compiler->macro_assembler();
- Label before_non_word;
- Label before_word;
- if (trace->characters_preloaded() != 1) {
- assembler->LoadCurrentCharacter(trace->cp_offset(), &before_non_word);
- }
- // Fall through on non-word.
- EmitWordCheck(assembler, &before_word, &before_non_word, false);
-
- // We will be loading the previous character into the current character
- // register.
- Trace new_trace(*trace);
- new_trace.InvalidateCurrentCharacter();
-
- Label ok;
- Label* boundary;
- Label* not_boundary;
- if (type == AssertionNode::AT_BOUNDARY) {
- boundary = &ok;
- not_boundary = new_trace.backtrack();
- } else {
- not_boundary = &ok;
- boundary = new_trace.backtrack();
- }
-
- // Next character is not a word character.
- assembler->Bind(&before_non_word);
- if (new_trace.cp_offset() == 0) {
- // The start of input counts as a non-word character, so the question is
- // decided if we are at the start.
- assembler->CheckAtStart(not_boundary);
- }
- // We already checked that we are not at the start of input so it must be
- // OK to load the previous character.
- assembler->LoadCurrentCharacter(new_trace.cp_offset() - 1,
- &ok, // Unused dummy label in this call.
- false);
- // Fall through on non-word.
- EmitWordCheck(assembler, boundary, not_boundary, false);
- assembler->GoTo(not_boundary);
-
- // Next character is a word character.
- assembler->Bind(&before_word);
- if (new_trace.cp_offset() == 0) {
- // The start of input counts as a non-word character, so the question is
- // decided if we are at the start.
- assembler->CheckAtStart(boundary);
- }
- // We already checked that we are not at the start of input so it must be
- // OK to load the previous character.
- assembler->LoadCurrentCharacter(new_trace.cp_offset() - 1,
- &ok, // Unused dummy label in this call.
- false);
- bool fall_through_on_word = (type == AssertionNode::AT_NON_BOUNDARY);
- EmitWordCheck(assembler, not_boundary, boundary, fall_through_on_word);
-
- assembler->Bind(&ok);
-
- on_success->Emit(compiler, &new_trace);
-}
-
-
-void AssertionNode::GetQuickCheckDetails(QuickCheckDetails* details,
- RegExpCompiler* compiler,
- int filled_in,
- bool not_at_start) {
- if (type_ == AT_START && not_at_start) {
- details->set_cannot_match();
- return;
- }
- return on_success()->GetQuickCheckDetails(details,
- compiler,
- filled_in,
- not_at_start);
-}
-
-
-void AssertionNode::Emit(RegExpCompiler* compiler, Trace* trace) {
- RegExpMacroAssembler* assembler = compiler->macro_assembler();
- switch (type_) {
- case AT_END: {
- Label ok;
- assembler->CheckPosition(trace->cp_offset(), &ok);
- assembler->GoTo(trace->backtrack());
- assembler->Bind(&ok);
- break;
- }
- case AT_START: {
- if (trace->at_start() == Trace::FALSE) {
- assembler->GoTo(trace->backtrack());
- return;
- }
- if (trace->at_start() == Trace::UNKNOWN) {
- assembler->CheckNotAtStart(trace->backtrack());
- Trace at_start_trace = *trace;
- at_start_trace.set_at_start(true);
- on_success()->Emit(compiler, &at_start_trace);
- return;
- }
- }
- break;
- case AFTER_NEWLINE:
- EmitHat(compiler, on_success(), trace);
- return;
- case AT_BOUNDARY:
- case AT_NON_BOUNDARY: {
- EmitBoundaryCheck(type_, compiler, on_success(), trace);
- return;
- }
- case AFTER_WORD_CHARACTER:
- case AFTER_NONWORD_CHARACTER: {
- EmitHalfBoundaryCheck(type_, compiler, on_success(), trace);
- }
- }
- on_success()->Emit(compiler, trace);
-}
-
-
-static bool DeterminedAlready(QuickCheckDetails* quick_check, int offset) {
- if (quick_check == NULL) return false;
- if (offset >= quick_check->characters()) return false;
- return quick_check->positions(offset)->determines_perfectly;
-}
-
-
-static void UpdateBoundsCheck(int index, int* checked_up_to) {
- if (index > *checked_up_to) {
- *checked_up_to = index;
- }
-}
-
-
-// We call this repeatedly to generate code for each pass over the text node.
-// The passes are in increasing order of difficulty because we hope one
-// of the first passes will fail in which case we are saved the work of the
-// later passes. for example for the case independent regexp /%[asdfghjkl]a/
-// we will check the '%' in the first pass, the case independent 'a' in the
-// second pass and the character class in the last pass.
-//
-// The passes are done from right to left, so for example to test for /bar/
-// we will first test for an 'r' with offset 2, then an 'a' with offset 1
-// and then a 'b' with offset 0. This means we can avoid the end-of-input
-// bounds check most of the time. In the example we only need to check for
-// end-of-input when loading the putative 'r'.
-//
-// A slight complication involves the fact that the first character may already
-// be fetched into a register by the previous node. In this case we want to
-// do the test for that character first. We do this in separate passes. The
-// 'preloaded' argument indicates that we are doing such a 'pass'. If such a
-// pass has been performed then subsequent passes will have true in
-// first_element_checked to indicate that that character does not need to be
-// checked again.
-//
-// In addition to all this we are passed a Trace, which can
-// contain an AlternativeGeneration object. In this AlternativeGeneration
-// object we can see details of any quick check that was already passed in
-// order to get to the code we are now generating. The quick check can involve
-// loading characters, which means we do not need to recheck the bounds
-// up to the limit the quick check already checked. In addition the quick
-// check can have involved a mask and compare operation which may simplify
-// or obviate the need for further checks at some character positions.
-void TextNode::TextEmitPass(RegExpCompiler* compiler,
- TextEmitPassType pass,
- bool preloaded,
- Trace* trace,
- bool first_element_checked,
- int* checked_up_to) {
- Isolate* isolate = Isolate::Current();
- RegExpMacroAssembler* assembler = compiler->macro_assembler();
- bool ascii = compiler->ascii();
- Label* backtrack = trace->backtrack();
- QuickCheckDetails* quick_check = trace->quick_check_performed();
- int element_count = elms_->length();
- for (int i = preloaded ? 0 : element_count - 1; i >= 0; i--) {
- TextElement elm = elms_->at(i);
- int cp_offset = trace->cp_offset() + elm.cp_offset;
- if (elm.type == TextElement::ATOM) {
- Vector<const uc16> quarks = elm.data.u_atom->data();
- for (int j = preloaded ? 0 : quarks.length() - 1; j >= 0; j--) {
- if (first_element_checked && i == 0 && j == 0) continue;
- if (DeterminedAlready(quick_check, elm.cp_offset + j)) continue;
- EmitCharacterFunction* emit_function = NULL;
- switch (pass) {
- case NON_ASCII_MATCH:
- ASSERT(ascii);
- if (quarks[j] > String::kMaxAsciiCharCode) {
- assembler->GoTo(backtrack);
- return;
- }
- break;
- case NON_LETTER_CHARACTER_MATCH:
- emit_function = &EmitAtomNonLetter;
- break;
- case SIMPLE_CHARACTER_MATCH:
- emit_function = &EmitSimpleCharacter;
- break;
- case CASE_CHARACTER_MATCH:
- emit_function = &EmitAtomLetter;
- break;
- default:
- break;
- }
- if (emit_function != NULL) {
- bool bound_checked = emit_function(isolate,
- compiler,
- quarks[j],
- backtrack,
- cp_offset + j,
- *checked_up_to < cp_offset + j,
- preloaded);
- if (bound_checked) UpdateBoundsCheck(cp_offset + j, checked_up_to);
- }
- }
- } else {
- ASSERT_EQ(elm.type, TextElement::CHAR_CLASS);
- if (pass == CHARACTER_CLASS_MATCH) {
- if (first_element_checked && i == 0) continue;
- if (DeterminedAlready(quick_check, elm.cp_offset)) continue;
- RegExpCharacterClass* cc = elm.data.u_char_class;
- EmitCharClass(assembler,
- cc,
- ascii,
- backtrack,
- cp_offset,
- *checked_up_to < cp_offset,
- preloaded);
- UpdateBoundsCheck(cp_offset, checked_up_to);
- }
- }
- }
-}
-
-
-int TextNode::Length() {
- TextElement elm = elms_->last();
- ASSERT(elm.cp_offset >= 0);
- if (elm.type == TextElement::ATOM) {
- return elm.cp_offset + elm.data.u_atom->data().length();
- } else {
- return elm.cp_offset + 1;
- }
-}
-
-
-bool TextNode::SkipPass(int int_pass, bool ignore_case) {
- TextEmitPassType pass = static_cast<TextEmitPassType>(int_pass);
- if (ignore_case) {
- return pass == SIMPLE_CHARACTER_MATCH;
- } else {
- return pass == NON_LETTER_CHARACTER_MATCH || pass == CASE_CHARACTER_MATCH;
- }
-}
-
-
-// This generates the code to match a text node. A text node can contain
-// straight character sequences (possibly to be matched in a case-independent
-// way) and character classes. For efficiency we do not do this in a single
-// pass from left to right. Instead we pass over the text node several times,
-// emitting code for some character positions every time. See the comment on
-// TextEmitPass for details.
-void TextNode::Emit(RegExpCompiler* compiler, Trace* trace) {
- LimitResult limit_result = LimitVersions(compiler, trace);
- if (limit_result == DONE) return;
- ASSERT(limit_result == CONTINUE);
-
- if (trace->cp_offset() + Length() > RegExpMacroAssembler::kMaxCPOffset) {
- compiler->SetRegExpTooBig();
- return;
- }
-
- if (compiler->ascii()) {
- int dummy = 0;
- TextEmitPass(compiler, NON_ASCII_MATCH, false, trace, false, &dummy);
- }
-
- bool first_elt_done = false;
- int bound_checked_to = trace->cp_offset() - 1;
- bound_checked_to += trace->bound_checked_up_to();
-
- // If a character is preloaded into the current character register then
- // check that now.
- if (trace->characters_preloaded() == 1) {
- for (int pass = kFirstRealPass; pass <= kLastPass; pass++) {
- if (!SkipPass(pass, compiler->ignore_case())) {
- TextEmitPass(compiler,
- static_cast<TextEmitPassType>(pass),
- true,
- trace,
- false,
- &bound_checked_to);
- }
- }
- first_elt_done = true;
- }
-
- for (int pass = kFirstRealPass; pass <= kLastPass; pass++) {
- if (!SkipPass(pass, compiler->ignore_case())) {
- TextEmitPass(compiler,
- static_cast<TextEmitPassType>(pass),
- false,
- trace,
- first_elt_done,
- &bound_checked_to);
- }
- }
-
- Trace successor_trace(*trace);
- successor_trace.set_at_start(false);
- successor_trace.AdvanceCurrentPositionInTrace(Length(), compiler);
- RecursionCheck rc(compiler);
- on_success()->Emit(compiler, &successor_trace);
-}
-
-
-void Trace::InvalidateCurrentCharacter() {
- characters_preloaded_ = 0;
-}
-
-
-void Trace::AdvanceCurrentPositionInTrace(int by, RegExpCompiler* compiler) {
- ASSERT(by > 0);
- // We don't have an instruction for shifting the current character register
- // down or for using a shifted value for anything so lets just forget that
- // we preloaded any characters into it.
- characters_preloaded_ = 0;
- // Adjust the offsets of the quick check performed information. This
- // information is used to find out what we already determined about the
- // characters by means of mask and compare.
- quick_check_performed_.Advance(by, compiler->ascii());
- cp_offset_ += by;
- if (cp_offset_ > RegExpMacroAssembler::kMaxCPOffset) {
- compiler->SetRegExpTooBig();
- cp_offset_ = 0;
- }
- bound_checked_up_to_ = Max(0, bound_checked_up_to_ - by);
-}
-
-
-void TextNode::MakeCaseIndependent(bool is_ascii) {
- int element_count = elms_->length();
- for (int i = 0; i < element_count; i++) {
- TextElement elm = elms_->at(i);
- if (elm.type == TextElement::CHAR_CLASS) {
- RegExpCharacterClass* cc = elm.data.u_char_class;
- // None of the standard character classses is different in the case
- // independent case and it slows us down if we don't know that.
- if (cc->is_standard()) continue;
- ZoneList<CharacterRange>* ranges = cc->ranges();
- int range_count = ranges->length();
- for (int j = 0; j < range_count; j++) {
- ranges->at(j).AddCaseEquivalents(ranges, is_ascii);
- }
- }
- }
-}
-
-
-int TextNode::GreedyLoopTextLength() {
- TextElement elm = elms_->at(elms_->length() - 1);
- if (elm.type == TextElement::CHAR_CLASS) {
- return elm.cp_offset + 1;
- } else {
- return elm.cp_offset + elm.data.u_atom->data().length();
- }
-}
-
-
-// Finds the fixed match length of a sequence of nodes that goes from
-// this alternative and back to this choice node. If there are variable
-// length nodes or other complications in the way then return a sentinel
-// value indicating that a greedy loop cannot be constructed.
-int ChoiceNode::GreedyLoopTextLength(GuardedAlternative* alternative) {
- int length = 0;
- RegExpNode* node = alternative->node();
- // Later we will generate code for all these text nodes using recursion
- // so we have to limit the max number.
- int recursion_depth = 0;
- while (node != this) {
- if (recursion_depth++ > RegExpCompiler::kMaxRecursion) {
- return kNodeIsTooComplexForGreedyLoops;
- }
- int node_length = node->GreedyLoopTextLength();
- if (node_length == kNodeIsTooComplexForGreedyLoops) {
- return kNodeIsTooComplexForGreedyLoops;
- }
- length += node_length;
- SeqRegExpNode* seq_node = static_cast<SeqRegExpNode*>(node);
- node = seq_node->on_success();
- }
- return length;
-}
-
-
-void LoopChoiceNode::AddLoopAlternative(GuardedAlternative alt) {
- ASSERT_EQ(loop_node_, NULL);
- AddAlternative(alt);
- loop_node_ = alt.node();
-}
-
-
-void LoopChoiceNode::AddContinueAlternative(GuardedAlternative alt) {
- ASSERT_EQ(continue_node_, NULL);
- AddAlternative(alt);
- continue_node_ = alt.node();
-}
-
-
-void LoopChoiceNode::Emit(RegExpCompiler* compiler, Trace* trace) {
- RegExpMacroAssembler* macro_assembler = compiler->macro_assembler();
- if (trace->stop_node() == this) {
- int text_length = GreedyLoopTextLength(&(alternatives_->at(0)));
- ASSERT(text_length != kNodeIsTooComplexForGreedyLoops);
- // Update the counter-based backtracking info on the stack. This is an
- // optimization for greedy loops (see below).
- ASSERT(trace->cp_offset() == text_length);
- macro_assembler->AdvanceCurrentPosition(text_length);
- macro_assembler->GoTo(trace->loop_label());
- return;
- }
- ASSERT(trace->stop_node() == NULL);
- if (!trace->is_trivial()) {
- trace->Flush(compiler, this);
- return;
- }
- ChoiceNode::Emit(compiler, trace);
-}
-
-
-int ChoiceNode::CalculatePreloadCharacters(RegExpCompiler* compiler,
- bool not_at_start) {
- int preload_characters = EatsAtLeast(4, 0, not_at_start);
- if (compiler->macro_assembler()->CanReadUnaligned()) {
- bool ascii = compiler->ascii();
- if (ascii) {
- if (preload_characters > 4) preload_characters = 4;
- // We can't preload 3 characters because there is no machine instruction
- // to do that. We can't just load 4 because we could be reading
- // beyond the end of the string, which could cause a memory fault.
- if (preload_characters == 3) preload_characters = 2;
- } else {
- if (preload_characters > 2) preload_characters = 2;
- }
- } else {
- if (preload_characters > 1) preload_characters = 1;
- }
- return preload_characters;
-}
-
-
-// This class is used when generating the alternatives in a choice node. It
-// records the way the alternative is being code generated.
-class AlternativeGeneration: public Malloced {
- public:
- AlternativeGeneration()
- : possible_success(),
- expects_preload(false),
- after(),
- quick_check_details() { }
- Label possible_success;
- bool expects_preload;
- Label after;
- QuickCheckDetails quick_check_details;
-};
-
-
-// Creates a list of AlternativeGenerations. If the list has a reasonable
-// size then it is on the stack, otherwise the excess is on the heap.
-class AlternativeGenerationList {
- public:
- explicit AlternativeGenerationList(int count)
- : alt_gens_(count) {
- for (int i = 0; i < count && i < kAFew; i++) {
- alt_gens_.Add(a_few_alt_gens_ + i);
- }
- for (int i = kAFew; i < count; i++) {
- alt_gens_.Add(new AlternativeGeneration());
- }
- }
- ~AlternativeGenerationList() {
- for (int i = kAFew; i < alt_gens_.length(); i++) {
- delete alt_gens_[i];
- alt_gens_[i] = NULL;
- }
- }
-
- AlternativeGeneration* at(int i) {
- return alt_gens_[i];
- }
- private:
- static const int kAFew = 10;
- ZoneList<AlternativeGeneration*> alt_gens_;
- AlternativeGeneration a_few_alt_gens_[kAFew];
-};
-
-
-/* Code generation for choice nodes.
- *
- * We generate quick checks that do a mask and compare to eliminate a
- * choice. If the quick check succeeds then it jumps to the continuation to
- * do slow checks and check subsequent nodes. If it fails (the common case)
- * it falls through to the next choice.
- *
- * Here is the desired flow graph. Nodes directly below each other imply
- * fallthrough. Alternatives 1 and 2 have quick checks. Alternative
- * 3 doesn't have a quick check so we have to call the slow check.
- * Nodes are marked Qn for quick checks and Sn for slow checks. The entire
- * regexp continuation is generated directly after the Sn node, up to the
- * next GoTo if we decide to reuse some already generated code. Some
- * nodes expect preload_characters to be preloaded into the current
- * character register. R nodes do this preloading. Vertices are marked
- * F for failures and S for success (possible success in the case of quick
- * nodes). L, V, < and > are used as arrow heads.
- *
- * ----------> R
- * |
- * V
- * Q1 -----> S1
- * | S /
- * F| /
- * | F/
- * | /
- * | R
- * | /
- * V L
- * Q2 -----> S2
- * | S /
- * F| /
- * | F/
- * | /
- * | R
- * | /
- * V L
- * S3
- * |
- * F|
- * |
- * R
- * |
- * backtrack V
- * <----------Q4
- * \ F |
- * \ |S
- * \ F V
- * \-----S4
- *
- * For greedy loops we reverse our expectation and expect to match rather
- * than fail. Therefore we want the loop code to look like this (U is the
- * unwind code that steps back in the greedy loop). The following alternatives
- * look the same as above.
- * _____
- * / \
- * V |
- * ----------> S1 |
- * /| |
- * / |S |
- * F/ \_____/
- * /
- * |<-----------
- * | \
- * V \
- * Q2 ---> S2 \
- * | S / |
- * F| / |
- * | F/ |
- * | / |
- * | R |
- * | / |
- * F VL |
- * <------U |
- * back |S |
- * \______________/
- */
-
-
-void ChoiceNode::Emit(RegExpCompiler* compiler, Trace* trace) {
- RegExpMacroAssembler* macro_assembler = compiler->macro_assembler();
- int choice_count = alternatives_->length();
-#ifdef DEBUG
- for (int i = 0; i < choice_count - 1; i++) {
- GuardedAlternative alternative = alternatives_->at(i);
- ZoneList<Guard*>* guards = alternative.guards();
- int guard_count = (guards == NULL) ? 0 : guards->length();
- for (int j = 0; j < guard_count; j++) {
- ASSERT(!trace->mentions_reg(guards->at(j)->reg()));
- }
- }
-#endif
-
- LimitResult limit_result = LimitVersions(compiler, trace);
- if (limit_result == DONE) return;
- ASSERT(limit_result == CONTINUE);
-
- int new_flush_budget = trace->flush_budget() / choice_count;
- if (trace->flush_budget() == 0 && trace->actions() != NULL) {
- trace->Flush(compiler, this);
- return;
- }
-
- RecursionCheck rc(compiler);
-
- Trace* current_trace = trace;
-
- int text_length = GreedyLoopTextLength(&(alternatives_->at(0)));
- bool greedy_loop = false;
- Label greedy_loop_label;
- Trace counter_backtrack_trace;
- counter_backtrack_trace.set_backtrack(&greedy_loop_label);
- if (not_at_start()) counter_backtrack_trace.set_at_start(false);
-
- if (choice_count > 1 && text_length != kNodeIsTooComplexForGreedyLoops) {
- // Here we have special handling for greedy loops containing only text nodes
- // and other simple nodes. These are handled by pushing the current
- // position on the stack and then incrementing the current position each
- // time around the switch. On backtrack we decrement the current position
- // and check it against the pushed value. This avoids pushing backtrack
- // information for each iteration of the loop, which could take up a lot of
- // space.
- greedy_loop = true;
- ASSERT(trace->stop_node() == NULL);
- macro_assembler->PushCurrentPosition();
- current_trace = &counter_backtrack_trace;
- Label greedy_match_failed;
- Trace greedy_match_trace;
- if (not_at_start()) greedy_match_trace.set_at_start(false);
- greedy_match_trace.set_backtrack(&greedy_match_failed);
- Label loop_label;
- macro_assembler->Bind(&loop_label);
- greedy_match_trace.set_stop_node(this);
- greedy_match_trace.set_loop_label(&loop_label);
- alternatives_->at(0).node()->Emit(compiler, &greedy_match_trace);
- macro_assembler->Bind(&greedy_match_failed);
- }
-
- Label second_choice; // For use in greedy matches.
- macro_assembler->Bind(&second_choice);
-
- int first_normal_choice = greedy_loop ? 1 : 0;
-
- int preload_characters =
- CalculatePreloadCharacters(compiler,
- current_trace->at_start() == Trace::FALSE);
- bool preload_is_current =
- (current_trace->characters_preloaded() == preload_characters);
- bool preload_has_checked_bounds = preload_is_current;
-
- AlternativeGenerationList alt_gens(choice_count);
-
- // For now we just call all choices one after the other. The idea ultimately
- // is to use the Dispatch table to try only the relevant ones.
- for (int i = first_normal_choice; i < choice_count; i++) {
- GuardedAlternative alternative = alternatives_->at(i);
- AlternativeGeneration* alt_gen = alt_gens.at(i);
- alt_gen->quick_check_details.set_characters(preload_characters);
- ZoneList<Guard*>* guards = alternative.guards();
- int guard_count = (guards == NULL) ? 0 : guards->length();
- Trace new_trace(*current_trace);
- new_trace.set_characters_preloaded(preload_is_current ?
- preload_characters :
- 0);
- if (preload_has_checked_bounds) {
- new_trace.set_bound_checked_up_to(preload_characters);
- }
- new_trace.quick_check_performed()->Clear();
- if (not_at_start_) new_trace.set_at_start(Trace::FALSE);
- alt_gen->expects_preload = preload_is_current;
- bool generate_full_check_inline = false;
- if (FLAG_regexp_optimization &&
- try_to_emit_quick_check_for_alternative(i) &&
- alternative.node()->EmitQuickCheck(compiler,
- &new_trace,
- preload_has_checked_bounds,
- &alt_gen->possible_success,
- &alt_gen->quick_check_details,
- i < choice_count - 1)) {
- // Quick check was generated for this choice.
- preload_is_current = true;
- preload_has_checked_bounds = true;
- // On the last choice in the ChoiceNode we generated the quick
- // check to fall through on possible success. So now we need to
- // generate the full check inline.
- if (i == choice_count - 1) {
- macro_assembler->Bind(&alt_gen->possible_success);
- new_trace.set_quick_check_performed(&alt_gen->quick_check_details);
- new_trace.set_characters_preloaded(preload_characters);
- new_trace.set_bound_checked_up_to(preload_characters);
- generate_full_check_inline = true;
- }
- } else if (alt_gen->quick_check_details.cannot_match()) {
- if (i == choice_count - 1 && !greedy_loop) {
- macro_assembler->GoTo(trace->backtrack());
- }
- continue;
- } else {
- // No quick check was generated. Put the full code here.
- // If this is not the first choice then there could be slow checks from
- // previous cases that go here when they fail. There's no reason to
- // insist that they preload characters since the slow check we are about
- // to generate probably can't use it.
- if (i != first_normal_choice) {
- alt_gen->expects_preload = false;
- new_trace.InvalidateCurrentCharacter();
- }
- if (i < choice_count - 1) {
- new_trace.set_backtrack(&alt_gen->after);
- }
- generate_full_check_inline = true;
- }
- if (generate_full_check_inline) {
- if (new_trace.actions() != NULL) {
- new_trace.set_flush_budget(new_flush_budget);
- }
- for (int j = 0; j < guard_count; j++) {
- GenerateGuard(macro_assembler, guards->at(j), &new_trace);
- }
- alternative.node()->Emit(compiler, &new_trace);
- preload_is_current = false;
- }
- macro_assembler->Bind(&alt_gen->after);
- }
- if (greedy_loop) {
- macro_assembler->Bind(&greedy_loop_label);
- // If we have unwound to the bottom then backtrack.
- macro_assembler->CheckGreedyLoop(trace->backtrack());
- // Otherwise try the second priority at an earlier position.
- macro_assembler->AdvanceCurrentPosition(-text_length);
- macro_assembler->GoTo(&second_choice);
- }
-
- // At this point we need to generate slow checks for the alternatives where
- // the quick check was inlined. We can recognize these because the associated
- // label was bound.
- for (int i = first_normal_choice; i < choice_count - 1; i++) {
- AlternativeGeneration* alt_gen = alt_gens.at(i);
- Trace new_trace(*current_trace);
- // If there are actions to be flushed we have to limit how many times
- // they are flushed. Take the budget of the parent trace and distribute
- // it fairly amongst the children.
- if (new_trace.actions() != NULL) {
- new_trace.set_flush_budget(new_flush_budget);
- }
- EmitOutOfLineContinuation(compiler,
- &new_trace,
- alternatives_->at(i),
- alt_gen,
- preload_characters,
- alt_gens.at(i + 1)->expects_preload);
- }
-}
-
-
-void ChoiceNode::EmitOutOfLineContinuation(RegExpCompiler* compiler,
- Trace* trace,
- GuardedAlternative alternative,
- AlternativeGeneration* alt_gen,
- int preload_characters,
- bool next_expects_preload) {
- if (!alt_gen->possible_success.is_linked()) return;
-
- RegExpMacroAssembler* macro_assembler = compiler->macro_assembler();
- macro_assembler->Bind(&alt_gen->possible_success);
- Trace out_of_line_trace(*trace);
- out_of_line_trace.set_characters_preloaded(preload_characters);
- out_of_line_trace.set_quick_check_performed(&alt_gen->quick_check_details);
- if (not_at_start_) out_of_line_trace.set_at_start(Trace::FALSE);
- ZoneList<Guard*>* guards = alternative.guards();
- int guard_count = (guards == NULL) ? 0 : guards->length();
- if (next_expects_preload) {
- Label reload_current_char;
- out_of_line_trace.set_backtrack(&reload_current_char);
- for (int j = 0; j < guard_count; j++) {
- GenerateGuard(macro_assembler, guards->at(j), &out_of_line_trace);
- }
- alternative.node()->Emit(compiler, &out_of_line_trace);
- macro_assembler->Bind(&reload_current_char);
- // Reload the current character, since the next quick check expects that.
- // We don't need to check bounds here because we only get into this
- // code through a quick check which already did the checked load.
- macro_assembler->LoadCurrentCharacter(trace->cp_offset(),
- NULL,
- false,
- preload_characters);
- macro_assembler->GoTo(&(alt_gen->after));
- } else {
- out_of_line_trace.set_backtrack(&(alt_gen->after));
- for (int j = 0; j < guard_count; j++) {
- GenerateGuard(macro_assembler, guards->at(j), &out_of_line_trace);
- }
- alternative.node()->Emit(compiler, &out_of_line_trace);
- }
-}
-
-
-void ActionNode::Emit(RegExpCompiler* compiler, Trace* trace) {
- RegExpMacroAssembler* assembler = compiler->macro_assembler();
- LimitResult limit_result = LimitVersions(compiler, trace);
- if (limit_result == DONE) return;
- ASSERT(limit_result == CONTINUE);
-
- RecursionCheck rc(compiler);
-
- switch (type_) {
- case STORE_POSITION: {
- Trace::DeferredCapture
- new_capture(data_.u_position_register.reg,
- data_.u_position_register.is_capture,
- trace);
- Trace new_trace = *trace;
- new_trace.add_action(&new_capture);
- on_success()->Emit(compiler, &new_trace);
- break;
- }
- case INCREMENT_REGISTER: {
- Trace::DeferredIncrementRegister
- new_increment(data_.u_increment_register.reg);
- Trace new_trace = *trace;
- new_trace.add_action(&new_increment);
- on_success()->Emit(compiler, &new_trace);
- break;
- }
- case SET_REGISTER: {
- Trace::DeferredSetRegister
- new_set(data_.u_store_register.reg, data_.u_store_register.value);
- Trace new_trace = *trace;
- new_trace.add_action(&new_set);
- on_success()->Emit(compiler, &new_trace);
- break;
- }
- case CLEAR_CAPTURES: {
- Trace::DeferredClearCaptures
- new_capture(Interval(data_.u_clear_captures.range_from,
- data_.u_clear_captures.range_to));
- Trace new_trace = *trace;
- new_trace.add_action(&new_capture);
- on_success()->Emit(compiler, &new_trace);
- break;
- }
- case BEGIN_SUBMATCH:
- if (!trace->is_trivial()) {
- trace->Flush(compiler, this);
- } else {
- assembler->WriteCurrentPositionToRegister(
- data_.u_submatch.current_position_register, 0);
- assembler->WriteStackPointerToRegister(
- data_.u_submatch.stack_pointer_register);
- on_success()->Emit(compiler, trace);
- }
- break;
- case EMPTY_MATCH_CHECK: {
- int start_pos_reg = data_.u_empty_match_check.start_register;
- int stored_pos = 0;
- int rep_reg = data_.u_empty_match_check.repetition_register;
- bool has_minimum = (rep_reg != RegExpCompiler::kNoRegister);
- bool know_dist = trace->GetStoredPosition(start_pos_reg, &stored_pos);
- if (know_dist && !has_minimum && stored_pos == trace->cp_offset()) {
- // If we know we haven't advanced and there is no minimum we
- // can just backtrack immediately.
- assembler->GoTo(trace->backtrack());
- } else if (know_dist && stored_pos < trace->cp_offset()) {
- // If we know we've advanced we can generate the continuation
- // immediately.
- on_success()->Emit(compiler, trace);
- } else if (!trace->is_trivial()) {
- trace->Flush(compiler, this);
- } else {
- Label skip_empty_check;
- // If we have a minimum number of repetitions we check the current
- // number first and skip the empty check if it's not enough.
- if (has_minimum) {
- int limit = data_.u_empty_match_check.repetition_limit;
- assembler->IfRegisterLT(rep_reg, limit, &skip_empty_check);
- }
- // If the match is empty we bail out, otherwise we fall through
- // to the on-success continuation.
- assembler->IfRegisterEqPos(data_.u_empty_match_check.start_register,
- trace->backtrack());
- assembler->Bind(&skip_empty_check);
- on_success()->Emit(compiler, trace);
- }
- break;
- }
- case POSITIVE_SUBMATCH_SUCCESS: {
- if (!trace->is_trivial()) {
- trace->Flush(compiler, this);
- return;
- }
- assembler->ReadCurrentPositionFromRegister(
- data_.u_submatch.current_position_register);
- assembler->ReadStackPointerFromRegister(
- data_.u_submatch.stack_pointer_register);
- int clear_register_count = data_.u_submatch.clear_register_count;
- if (clear_register_count == 0) {
- on_success()->Emit(compiler, trace);
- return;
- }
- int clear_registers_from = data_.u_submatch.clear_register_from;
- Label clear_registers_backtrack;
- Trace new_trace = *trace;
- new_trace.set_backtrack(&clear_registers_backtrack);
- on_success()->Emit(compiler, &new_trace);
-
- assembler->Bind(&clear_registers_backtrack);
- int clear_registers_to = clear_registers_from + clear_register_count - 1;
- assembler->ClearRegisters(clear_registers_from, clear_registers_to);
-
- ASSERT(trace->backtrack() == NULL);
- assembler->Backtrack();
- return;
- }
- default:
- UNREACHABLE();
- }
-}
-
-
-void BackReferenceNode::Emit(RegExpCompiler* compiler, Trace* trace) {
- RegExpMacroAssembler* assembler = compiler->macro_assembler();
- if (!trace->is_trivial()) {
- trace->Flush(compiler, this);
- return;
- }
-
- LimitResult limit_result = LimitVersions(compiler, trace);
- if (limit_result == DONE) return;
- ASSERT(limit_result == CONTINUE);
-
- RecursionCheck rc(compiler);
-
- ASSERT_EQ(start_reg_ + 1, end_reg_);
- if (compiler->ignore_case()) {
- assembler->CheckNotBackReferenceIgnoreCase(start_reg_,
- trace->backtrack());
- } else {
- assembler->CheckNotBackReference(start_reg_, trace->backtrack());
- }
- on_success()->Emit(compiler, trace);
-}
-
-
-// -------------------------------------------------------------------
-// Dot/dotty output
-
-
-#ifdef DEBUG
-
-
-class DotPrinter: public NodeVisitor {
- public:
- explicit DotPrinter(bool ignore_case)
- : ignore_case_(ignore_case),
- stream_(&alloc_) { }
- void PrintNode(const char* label, RegExpNode* node);
- void Visit(RegExpNode* node);
- void PrintAttributes(RegExpNode* from);
- StringStream* stream() { return &stream_; }
- void PrintOnFailure(RegExpNode* from, RegExpNode* to);
-#define DECLARE_VISIT(Type) \
- virtual void Visit##Type(Type##Node* that);
-FOR_EACH_NODE_TYPE(DECLARE_VISIT)
-#undef DECLARE_VISIT
- private:
- bool ignore_case_;
- HeapStringAllocator alloc_;
- StringStream stream_;
-};
-
-
-void DotPrinter::PrintNode(const char* label, RegExpNode* node) {
- stream()->Add("digraph G {\n graph [label=\"");
- for (int i = 0; label[i]; i++) {
- switch (label[i]) {
- case '\\':
- stream()->Add("\\\\");
- break;
- case '"':
- stream()->Add("\"");
- break;
- default:
- stream()->Put(label[i]);
- break;
- }
- }
- stream()->Add("\"];\n");
- Visit(node);
- stream()->Add("}\n");
- printf("%s", *(stream()->ToCString()));
-}
-
-
-void DotPrinter::Visit(RegExpNode* node) {
- if (node->info()->visited) return;
- node->info()->visited = true;
- node->Accept(this);
-}
-
-
-void DotPrinter::PrintOnFailure(RegExpNode* from, RegExpNode* on_failure) {
- stream()->Add(" n%p -> n%p [style=dotted];\n", from, on_failure);
- Visit(on_failure);
-}
-
-
-class TableEntryBodyPrinter {
- public:
- TableEntryBodyPrinter(StringStream* stream, ChoiceNode* choice)
- : stream_(stream), choice_(choice) { }
- void Call(uc16 from, DispatchTable::Entry entry) {
- OutSet* out_set = entry.out_set();
- for (unsigned i = 0; i < OutSet::kFirstLimit; i++) {
- if (out_set->Get(i)) {
- stream()->Add(" n%p:s%io%i -> n%p;\n",
- choice(),
- from,
- i,
- choice()->alternatives()->at(i).node());
- }
- }
- }
- private:
- StringStream* stream() { return stream_; }
- ChoiceNode* choice() { return choice_; }
- StringStream* stream_;
- ChoiceNode* choice_;
-};
-
-
-class TableEntryHeaderPrinter {
- public:
- explicit TableEntryHeaderPrinter(StringStream* stream)
- : first_(true), stream_(stream) { }
- void Call(uc16 from, DispatchTable::Entry entry) {
- if (first_) {
- first_ = false;
- } else {
- stream()->Add("|");
- }
- stream()->Add("{\\%k-\\%k|{", from, entry.to());
- OutSet* out_set = entry.out_set();
- int priority = 0;
- for (unsigned i = 0; i < OutSet::kFirstLimit; i++) {
- if (out_set->Get(i)) {
- if (priority > 0) stream()->Add("|");
- stream()->Add("<s%io%i> %i", from, i, priority);
- priority++;
- }
- }
- stream()->Add("}}");
- }
- private:
- bool first_;
- StringStream* stream() { return stream_; }
- StringStream* stream_;
-};
-
-
-class AttributePrinter {
- public:
- explicit AttributePrinter(DotPrinter* out)
- : out_(out), first_(true) { }
- void PrintSeparator() {
- if (first_) {
- first_ = false;
- } else {
- out_->stream()->Add("|");
- }
- }
- void PrintBit(const char* name, bool value) {
- if (!value) return;
- PrintSeparator();
- out_->stream()->Add("{%s}", name);
- }
- void PrintPositive(const char* name, int value) {
- if (value < 0) return;
- PrintSeparator();
- out_->stream()->Add("{%s|%x}", name, value);
- }
- private:
- DotPrinter* out_;
- bool first_;
-};
-
-
-void DotPrinter::PrintAttributes(RegExpNode* that) {
- stream()->Add(" a%p [shape=Mrecord, color=grey, fontcolor=grey, "
- "margin=0.1, fontsize=10, label=\"{",
- that);
- AttributePrinter printer(this);
- NodeInfo* info = that->info();
- printer.PrintBit("NI", info->follows_newline_interest);
- printer.PrintBit("WI", info->follows_word_interest);
- printer.PrintBit("SI", info->follows_start_interest);
- Label* label = that->label();
- if (label->is_bound())
- printer.PrintPositive("@", label->pos());
- stream()->Add("}\"];\n");
- stream()->Add(" a%p -> n%p [style=dashed, color=grey, "
- "arrowhead=none];\n", that, that);
-}
-
-
-static const bool kPrintDispatchTable = false;
-void DotPrinter::VisitChoice(ChoiceNode* that) {
- if (kPrintDispatchTable) {
- stream()->Add(" n%p [shape=Mrecord, label=\"", that);
- TableEntryHeaderPrinter header_printer(stream());
- that->GetTable(ignore_case_)->ForEach(&header_printer);
- stream()->Add("\"]\n", that);
- PrintAttributes(that);
- TableEntryBodyPrinter body_printer(stream(), that);
- that->GetTable(ignore_case_)->ForEach(&body_printer);
- } else {
- stream()->Add(" n%p [shape=Mrecord, label=\"?\"];\n", that);
- for (int i = 0; i < that->alternatives()->length(); i++) {
- GuardedAlternative alt = that->alternatives()->at(i);
- stream()->Add(" n%p -> n%p;\n", that, alt.node());
- }
- }
- for (int i = 0; i < that->alternatives()->length(); i++) {
- GuardedAlternative alt = that->alternatives()->at(i);
- alt.node()->Accept(this);
- }
-}
-
-
-void DotPrinter::VisitText(TextNode* that) {
- stream()->Add(" n%p [label=\"", that);
- for (int i = 0; i < that->elements()->length(); i++) {
- if (i > 0) stream()->Add(" ");
- TextElement elm = that->elements()->at(i);
- switch (elm.type) {
- case TextElement::ATOM: {
- stream()->Add("'%w'", elm.data.u_atom->data());
- break;
- }
- case TextElement::CHAR_CLASS: {
- RegExpCharacterClass* node = elm.data.u_char_class;
- stream()->Add("[");
- if (node->is_negated())
- stream()->Add("^");
- for (int j = 0; j < node->ranges()->length(); j++) {
- CharacterRange range = node->ranges()->at(j);
- stream()->Add("%k-%k", range.from(), range.to());
- }
- stream()->Add("]");
- break;
- }
- default:
- UNREACHABLE();
- }
- }
- stream()->Add("\", shape=box, peripheries=2];\n");
- PrintAttributes(that);
- stream()->Add(" n%p -> n%p;\n", that, that->on_success());
- Visit(that->on_success());
-}
-
-
-void DotPrinter::VisitBackReference(BackReferenceNode* that) {
- stream()->Add(" n%p [label=\"$%i..$%i\", shape=doubleoctagon];\n",
- that,
- that->start_register(),
- that->end_register());
- PrintAttributes(that);
- stream()->Add(" n%p -> n%p;\n", that, that->on_success());
- Visit(that->on_success());
-}
-
-
-void DotPrinter::VisitEnd(EndNode* that) {
- stream()->Add(" n%p [style=bold, shape=point];\n", that);
- PrintAttributes(that);
-}
-
-
-void DotPrinter::VisitAssertion(AssertionNode* that) {
- stream()->Add(" n%p [", that);
- switch (that->type()) {
- case AssertionNode::AT_END:
- stream()->Add("label=\"$\", shape=septagon");
- break;
- case AssertionNode::AT_START:
- stream()->Add("label=\"^\", shape=septagon");
- break;
- case AssertionNode::AT_BOUNDARY:
- stream()->Add("label=\"\\b\", shape=septagon");
- break;
- case AssertionNode::AT_NON_BOUNDARY:
- stream()->Add("label=\"\\B\", shape=septagon");
- break;
- case AssertionNode::AFTER_NEWLINE:
- stream()->Add("label=\"(?<=\\n)\", shape=septagon");
- break;
- case AssertionNode::AFTER_WORD_CHARACTER:
- stream()->Add("label=\"(?<=\\w)\", shape=septagon");
- break;
- case AssertionNode::AFTER_NONWORD_CHARACTER:
- stream()->Add("label=\"(?<=\\W)\", shape=septagon");
- break;
- }
- stream()->Add("];\n");
- PrintAttributes(that);
- RegExpNode* successor = that->on_success();
- stream()->Add(" n%p -> n%p;\n", that, successor);
- Visit(successor);
-}
-
-
-void DotPrinter::VisitAction(ActionNode* that) {
- stream()->Add(" n%p [", that);
- switch (that->type_) {
- case ActionNode::SET_REGISTER:
- stream()->Add("label=\"$%i:=%i\", shape=octagon",
- that->data_.u_store_register.reg,
- that->data_.u_store_register.value);
- break;
- case ActionNode::INCREMENT_REGISTER:
- stream()->Add("label=\"$%i++\", shape=octagon",
- that->data_.u_increment_register.reg);
- break;
- case ActionNode::STORE_POSITION:
- stream()->Add("label=\"$%i:=$pos\", shape=octagon",
- that->data_.u_position_register.reg);
- break;
- case ActionNode::BEGIN_SUBMATCH:
- stream()->Add("label=\"$%i:=$pos,begin\", shape=septagon",
- that->data_.u_submatch.current_position_register);
- break;
- case ActionNode::POSITIVE_SUBMATCH_SUCCESS:
- stream()->Add("label=\"escape\", shape=septagon");
- break;
- case ActionNode::EMPTY_MATCH_CHECK:
- stream()->Add("label=\"$%i=$pos?,$%i<%i?\", shape=septagon",
- that->data_.u_empty_match_check.start_register,
- that->data_.u_empty_match_check.repetition_register,
- that->data_.u_empty_match_check.repetition_limit);
- break;
- case ActionNode::CLEAR_CAPTURES: {
- stream()->Add("label=\"clear $%i to $%i\", shape=septagon",
- that->data_.u_clear_captures.range_from,
- that->data_.u_clear_captures.range_to);
- break;
- }
- }
- stream()->Add("];\n");
- PrintAttributes(that);
- RegExpNode* successor = that->on_success();
- stream()->Add(" n%p -> n%p;\n", that, successor);
- Visit(successor);
-}
-
-
-class DispatchTableDumper {
- public:
- explicit DispatchTableDumper(StringStream* stream) : stream_(stream) { }
- void Call(uc16 key, DispatchTable::Entry entry);
- StringStream* stream() { return stream_; }
- private:
- StringStream* stream_;
-};
-
-
-void DispatchTableDumper::Call(uc16 key, DispatchTable::Entry entry) {
- stream()->Add("[%k-%k]: {", key, entry.to());
- OutSet* set = entry.out_set();
- bool first = true;
- for (unsigned i = 0; i < OutSet::kFirstLimit; i++) {
- if (set->Get(i)) {
- if (first) {
- first = false;
- } else {
- stream()->Add(", ");
- }
- stream()->Add("%i", i);
- }
- }
- stream()->Add("}\n");
-}
-
-
-void DispatchTable::Dump() {
- HeapStringAllocator alloc;
- StringStream stream(&alloc);
- DispatchTableDumper dumper(&stream);
- tree()->ForEach(&dumper);
- OS::PrintError("%s", *stream.ToCString());
-}
-
-
-void RegExpEngine::DotPrint(const char* label,
- RegExpNode* node,
- bool ignore_case) {
- DotPrinter printer(ignore_case);
- printer.PrintNode(label, node);
-}
-
-
-#endif // DEBUG
-
-
-// -------------------------------------------------------------------
-// Tree to graph conversion
-
-static const int kSpaceRangeCount = 20;
-static const int kSpaceRangeAsciiCount = 4;
-static const uc16 kSpaceRanges[kSpaceRangeCount] = { 0x0009, 0x000D, 0x0020,
- 0x0020, 0x00A0, 0x00A0, 0x1680, 0x1680, 0x180E, 0x180E, 0x2000, 0x200A,
- 0x2028, 0x2029, 0x202F, 0x202F, 0x205F, 0x205F, 0x3000, 0x3000 };
-
-static const int kWordRangeCount = 8;
-static const uc16 kWordRanges[kWordRangeCount] = { '0', '9', 'A', 'Z', '_',
- '_', 'a', 'z' };
-
-static const int kDigitRangeCount = 2;
-static const uc16 kDigitRanges[kDigitRangeCount] = { '0', '9' };
-
-static const int kLineTerminatorRangeCount = 6;
-static const uc16 kLineTerminatorRanges[kLineTerminatorRangeCount] = { 0x000A,
- 0x000A, 0x000D, 0x000D, 0x2028, 0x2029 };
-
-RegExpNode* RegExpAtom::ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success) {
- ZoneList<TextElement>* elms = new ZoneList<TextElement>(1);
- elms->Add(TextElement::Atom(this));
- return new TextNode(elms, on_success);
-}
-
-
-RegExpNode* RegExpText::ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success) {
- return new TextNode(elements(), on_success);
-}
-
-static bool CompareInverseRanges(ZoneList<CharacterRange>* ranges,
- const uc16* special_class,
- int length) {
- ASSERT(ranges->length() != 0);
- ASSERT(length != 0);
- ASSERT(special_class[0] != 0);
- if (ranges->length() != (length >> 1) + 1) {
- return false;
- }
- CharacterRange range = ranges->at(0);
- if (range.from() != 0) {
- return false;
- }
- for (int i = 0; i < length; i += 2) {
- if (special_class[i] != (range.to() + 1)) {
- return false;
- }
- range = ranges->at((i >> 1) + 1);
- if (special_class[i+1] != range.from() - 1) {
- return false;
- }
- }
- if (range.to() != 0xffff) {
- return false;
- }
- return true;
-}
-
-
-static bool CompareRanges(ZoneList<CharacterRange>* ranges,
- const uc16* special_class,
- int length) {
- if (ranges->length() * 2 != length) {
- return false;
- }
- for (int i = 0; i < length; i += 2) {
- CharacterRange range = ranges->at(i >> 1);
- if (range.from() != special_class[i] || range.to() != special_class[i+1]) {
- return false;
- }
- }
- return true;
-}
-
-
-bool RegExpCharacterClass::is_standard() {
- // TODO(lrn): Remove need for this function, by not throwing away information
- // along the way.
- if (is_negated_) {
- return false;
- }
- if (set_.is_standard()) {
- return true;
- }
- if (CompareRanges(set_.ranges(), kSpaceRanges, kSpaceRangeCount)) {
- set_.set_standard_set_type('s');
- return true;
- }
- if (CompareInverseRanges(set_.ranges(), kSpaceRanges, kSpaceRangeCount)) {
- set_.set_standard_set_type('S');
- return true;
- }
- if (CompareInverseRanges(set_.ranges(),
- kLineTerminatorRanges,
- kLineTerminatorRangeCount)) {
- set_.set_standard_set_type('.');
- return true;
- }
- if (CompareRanges(set_.ranges(),
- kLineTerminatorRanges,
- kLineTerminatorRangeCount)) {
- set_.set_standard_set_type('n');
- return true;
- }
- if (CompareRanges(set_.ranges(), kWordRanges, kWordRangeCount)) {
- set_.set_standard_set_type('w');
- return true;
- }
- if (CompareInverseRanges(set_.ranges(), kWordRanges, kWordRangeCount)) {
- set_.set_standard_set_type('W');
- return true;
- }
- return false;
-}
-
-
-RegExpNode* RegExpCharacterClass::ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success) {
- return new TextNode(this, on_success);
-}
-
-
-RegExpNode* RegExpDisjunction::ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success) {
- ZoneList<RegExpTree*>* alternatives = this->alternatives();
- int length = alternatives->length();
- ChoiceNode* result = new ChoiceNode(length);
- for (int i = 0; i < length; i++) {
- GuardedAlternative alternative(alternatives->at(i)->ToNode(compiler,
- on_success));
- result->AddAlternative(alternative);
- }
- return result;
-}
-
-
-RegExpNode* RegExpQuantifier::ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success) {
- return ToNode(min(),
- max(),
- is_greedy(),
- body(),
- compiler,
- on_success);
-}
-
-
-RegExpNode* RegExpQuantifier::ToNode(int min,
- int max,
- bool is_greedy,
- RegExpTree* body,
- RegExpCompiler* compiler,
- RegExpNode* on_success,
- bool not_at_start) {
- // x{f, t} becomes this:
- //
- // (r++)<-.
- // | `
- // | (x)
- // v ^
- // (r=0)-->(?)---/ [if r < t]
- // |
- // [if r >= f] \----> ...
- //
-
- // 15.10.2.5 RepeatMatcher algorithm.
- // The parser has already eliminated the case where max is 0. In the case
- // where max_match is zero the parser has removed the quantifier if min was
- // > 0 and removed the atom if min was 0. See AddQuantifierToAtom.
-
- // If we know that we cannot match zero length then things are a little
- // simpler since we don't need to make the special zero length match check
- // from step 2.1. If the min and max are small we can unroll a little in
- // this case.
- static const int kMaxUnrolledMinMatches = 3; // Unroll (foo)+ and (foo){3,}
- static const int kMaxUnrolledMaxMatches = 3; // Unroll (foo)? and (foo){x,3}
- if (max == 0) return on_success; // This can happen due to recursion.
- bool body_can_be_empty = (body->min_match() == 0);
- int body_start_reg = RegExpCompiler::kNoRegister;
- Interval capture_registers = body->CaptureRegisters();
- bool needs_capture_clearing = !capture_registers.is_empty();
- if (body_can_be_empty) {
- body_start_reg = compiler->AllocateRegister();
- } else if (FLAG_regexp_optimization && !needs_capture_clearing) {
- // Only unroll if there are no captures and the body can't be
- // empty.
- if (min > 0 && min <= kMaxUnrolledMinMatches) {
- int new_max = (max == kInfinity) ? max : max - min;
- // Recurse once to get the loop or optional matches after the fixed ones.
- RegExpNode* answer = ToNode(
- 0, new_max, is_greedy, body, compiler, on_success, true);
- // Unroll the forced matches from 0 to min. This can cause chains of
- // TextNodes (which the parser does not generate). These should be
- // combined if it turns out they hinder good code generation.
- for (int i = 0; i < min; i++) {
- answer = body->ToNode(compiler, answer);
- }
- return answer;
- }
- if (max <= kMaxUnrolledMaxMatches) {
- ASSERT(min == 0);
- // Unroll the optional matches up to max.
- RegExpNode* answer = on_success;
- for (int i = 0; i < max; i++) {
- ChoiceNode* alternation = new ChoiceNode(2);
- if (is_greedy) {
- alternation->AddAlternative(GuardedAlternative(body->ToNode(compiler,
- answer)));
- alternation->AddAlternative(GuardedAlternative(on_success));
- } else {
- alternation->AddAlternative(GuardedAlternative(on_success));
- alternation->AddAlternative(GuardedAlternative(body->ToNode(compiler,
- answer)));
- }
- answer = alternation;
- if (not_at_start) alternation->set_not_at_start();
- }
- return answer;
- }
- }
- bool has_min = min > 0;
- bool has_max = max < RegExpTree::kInfinity;
- bool needs_counter = has_min || has_max;
- int reg_ctr = needs_counter
- ? compiler->AllocateRegister()
- : RegExpCompiler::kNoRegister;
- LoopChoiceNode* center = new LoopChoiceNode(body->min_match() == 0);
- if (not_at_start) center->set_not_at_start();
- RegExpNode* loop_return = needs_counter
- ? static_cast<RegExpNode*>(ActionNode::IncrementRegister(reg_ctr, center))
- : static_cast<RegExpNode*>(center);
- if (body_can_be_empty) {
- // If the body can be empty we need to check if it was and then
- // backtrack.
- loop_return = ActionNode::EmptyMatchCheck(body_start_reg,
- reg_ctr,
- min,
- loop_return);
- }
- RegExpNode* body_node = body->ToNode(compiler, loop_return);
- if (body_can_be_empty) {
- // If the body can be empty we need to store the start position
- // so we can bail out if it was empty.
- body_node = ActionNode::StorePosition(body_start_reg, false, body_node);
- }
- if (needs_capture_clearing) {
- // Before entering the body of this loop we need to clear captures.
- body_node = ActionNode::ClearCaptures(capture_registers, body_node);
- }
- GuardedAlternative body_alt(body_node);
- if (has_max) {
- Guard* body_guard = new Guard(reg_ctr, Guard::LT, max);
- body_alt.AddGuard(body_guard);
- }
- GuardedAlternative rest_alt(on_success);
- if (has_min) {
- Guard* rest_guard = new Guard(reg_ctr, Guard::GEQ, min);
- rest_alt.AddGuard(rest_guard);
- }
- if (is_greedy) {
- center->AddLoopAlternative(body_alt);
- center->AddContinueAlternative(rest_alt);
- } else {
- center->AddContinueAlternative(rest_alt);
- center->AddLoopAlternative(body_alt);
- }
- if (needs_counter) {
- return ActionNode::SetRegister(reg_ctr, 0, center);
- } else {
- return center;
- }
-}
-
-
-RegExpNode* RegExpAssertion::ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success) {
- NodeInfo info;
- switch (type()) {
- case START_OF_LINE:
- return AssertionNode::AfterNewline(on_success);
- case START_OF_INPUT:
- return AssertionNode::AtStart(on_success);
- case BOUNDARY:
- return AssertionNode::AtBoundary(on_success);
- case NON_BOUNDARY:
- return AssertionNode::AtNonBoundary(on_success);
- case END_OF_INPUT:
- return AssertionNode::AtEnd(on_success);
- case END_OF_LINE: {
- // Compile $ in multiline regexps as an alternation with a positive
- // lookahead in one side and an end-of-input on the other side.
- // We need two registers for the lookahead.
- int stack_pointer_register = compiler->AllocateRegister();
- int position_register = compiler->AllocateRegister();
- // The ChoiceNode to distinguish between a newline and end-of-input.
- ChoiceNode* result = new ChoiceNode(2);
- // Create a newline atom.
- ZoneList<CharacterRange>* newline_ranges =
- new ZoneList<CharacterRange>(3);
- CharacterRange::AddClassEscape('n', newline_ranges);
- RegExpCharacterClass* newline_atom = new RegExpCharacterClass('n');
- TextNode* newline_matcher = new TextNode(
- newline_atom,
- ActionNode::PositiveSubmatchSuccess(stack_pointer_register,
- position_register,
- 0, // No captures inside.
- -1, // Ignored if no captures.
- on_success));
- // Create an end-of-input matcher.
- RegExpNode* end_of_line = ActionNode::BeginSubmatch(
- stack_pointer_register,
- position_register,
- newline_matcher);
- // Add the two alternatives to the ChoiceNode.
- GuardedAlternative eol_alternative(end_of_line);
- result->AddAlternative(eol_alternative);
- GuardedAlternative end_alternative(AssertionNode::AtEnd(on_success));
- result->AddAlternative(end_alternative);
- return result;
- }
- default:
- UNREACHABLE();
- }
- return on_success;
-}
-
-
-RegExpNode* RegExpBackReference::ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success) {
- return new BackReferenceNode(RegExpCapture::StartRegister(index()),
- RegExpCapture::EndRegister(index()),
- on_success);
-}
-
-
-RegExpNode* RegExpEmpty::ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success) {
- return on_success;
-}
-
-
-RegExpNode* RegExpLookahead::ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success) {
- int stack_pointer_register = compiler->AllocateRegister();
- int position_register = compiler->AllocateRegister();
-
- const int registers_per_capture = 2;
- const int register_of_first_capture = 2;
- int register_count = capture_count_ * registers_per_capture;
- int register_start =
- register_of_first_capture + capture_from_ * registers_per_capture;
-
- RegExpNode* success;
- if (is_positive()) {
- RegExpNode* node = ActionNode::BeginSubmatch(
- stack_pointer_register,
- position_register,
- body()->ToNode(
- compiler,
- ActionNode::PositiveSubmatchSuccess(stack_pointer_register,
- position_register,
- register_count,
- register_start,
- on_success)));
- return node;
- } else {
- // We use a ChoiceNode for a negative lookahead because it has most of
- // the characteristics we need. It has the body of the lookahead as its
- // first alternative and the expression after the lookahead of the second
- // alternative. If the first alternative succeeds then the
- // NegativeSubmatchSuccess will unwind the stack including everything the
- // choice node set up and backtrack. If the first alternative fails then
- // the second alternative is tried, which is exactly the desired result
- // for a negative lookahead. The NegativeLookaheadChoiceNode is a special
- // ChoiceNode that knows to ignore the first exit when calculating quick
- // checks.
- GuardedAlternative body_alt(
- body()->ToNode(
- compiler,
- success = new NegativeSubmatchSuccess(stack_pointer_register,
- position_register,
- register_count,
- register_start)));
- ChoiceNode* choice_node =
- new NegativeLookaheadChoiceNode(body_alt,
- GuardedAlternative(on_success));
- return ActionNode::BeginSubmatch(stack_pointer_register,
- position_register,
- choice_node);
- }
-}
-
-
-RegExpNode* RegExpCapture::ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success) {
- return ToNode(body(), index(), compiler, on_success);
-}
-
-
-RegExpNode* RegExpCapture::ToNode(RegExpTree* body,
- int index,
- RegExpCompiler* compiler,
- RegExpNode* on_success) {
- int start_reg = RegExpCapture::StartRegister(index);
- int end_reg = RegExpCapture::EndRegister(index);
- RegExpNode* store_end = ActionNode::StorePosition(end_reg, true, on_success);
- RegExpNode* body_node = body->ToNode(compiler, store_end);
- return ActionNode::StorePosition(start_reg, true, body_node);
-}
-
-
-RegExpNode* RegExpAlternative::ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success) {
- ZoneList<RegExpTree*>* children = nodes();
- RegExpNode* current = on_success;
- for (int i = children->length() - 1; i >= 0; i--) {
- current = children->at(i)->ToNode(compiler, current);
- }
- return current;
-}
-
-
-static void AddClass(const uc16* elmv,
- int elmc,
- ZoneList<CharacterRange>* ranges) {
- for (int i = 0; i < elmc; i += 2) {
- ASSERT(elmv[i] <= elmv[i + 1]);
- ranges->Add(CharacterRange(elmv[i], elmv[i + 1]));
- }
-}
-
-
-static void AddClassNegated(const uc16 *elmv,
- int elmc,
- ZoneList<CharacterRange>* ranges) {
- ASSERT(elmv[0] != 0x0000);
- ASSERT(elmv[elmc-1] != String::kMaxUC16CharCode);
- uc16 last = 0x0000;
- for (int i = 0; i < elmc; i += 2) {
- ASSERT(last <= elmv[i] - 1);
- ASSERT(elmv[i] <= elmv[i + 1]);
- ranges->Add(CharacterRange(last, elmv[i] - 1));
- last = elmv[i + 1] + 1;
- }
- ranges->Add(CharacterRange(last, String::kMaxUC16CharCode));
-}
-
-
-void CharacterRange::AddClassEscape(uc16 type,
- ZoneList<CharacterRange>* ranges) {
- switch (type) {
- case 's':
- AddClass(kSpaceRanges, kSpaceRangeCount, ranges);
- break;
- case 'S':
- AddClassNegated(kSpaceRanges, kSpaceRangeCount, ranges);
- break;
- case 'w':
- AddClass(kWordRanges, kWordRangeCount, ranges);
- break;
- case 'W':
- AddClassNegated(kWordRanges, kWordRangeCount, ranges);
- break;
- case 'd':
- AddClass(kDigitRanges, kDigitRangeCount, ranges);
- break;
- case 'D':
- AddClassNegated(kDigitRanges, kDigitRangeCount, ranges);
- break;
- case '.':
- AddClassNegated(kLineTerminatorRanges,
- kLineTerminatorRangeCount,
- ranges);
- break;
- // This is not a character range as defined by the spec but a
- // convenient shorthand for a character class that matches any
- // character.
- case '*':
- ranges->Add(CharacterRange::Everything());
- break;
- // This is the set of characters matched by the $ and ^ symbols
- // in multiline mode.
- case 'n':
- AddClass(kLineTerminatorRanges,
- kLineTerminatorRangeCount,
- ranges);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-Vector<const uc16> CharacterRange::GetWordBounds() {
- return Vector<const uc16>(kWordRanges, kWordRangeCount);
-}
-
-
-class CharacterRangeSplitter {
- public:
- CharacterRangeSplitter(ZoneList<CharacterRange>** included,
- ZoneList<CharacterRange>** excluded)
- : included_(included),
- excluded_(excluded) { }
- void Call(uc16 from, DispatchTable::Entry entry);
-
- static const int kInBase = 0;
- static const int kInOverlay = 1;
-
- private:
- ZoneList<CharacterRange>** included_;
- ZoneList<CharacterRange>** excluded_;
-};
-
-
-void CharacterRangeSplitter::Call(uc16 from, DispatchTable::Entry entry) {
- if (!entry.out_set()->Get(kInBase)) return;
- ZoneList<CharacterRange>** target = entry.out_set()->Get(kInOverlay)
- ? included_
- : excluded_;
- if (*target == NULL) *target = new ZoneList<CharacterRange>(2);
- (*target)->Add(CharacterRange(entry.from(), entry.to()));
-}
-
-
-void CharacterRange::Split(ZoneList<CharacterRange>* base,
- Vector<const uc16> overlay,
- ZoneList<CharacterRange>** included,
- ZoneList<CharacterRange>** excluded) {
- ASSERT_EQ(NULL, *included);
- ASSERT_EQ(NULL, *excluded);
- DispatchTable table;
- for (int i = 0; i < base->length(); i++)
- table.AddRange(base->at(i), CharacterRangeSplitter::kInBase);
- for (int i = 0; i < overlay.length(); i += 2) {
- table.AddRange(CharacterRange(overlay[i], overlay[i+1]),
- CharacterRangeSplitter::kInOverlay);
- }
- CharacterRangeSplitter callback(included, excluded);
- table.ForEach(&callback);
-}
-
-
-static void AddUncanonicals(Isolate* isolate,
- ZoneList<CharacterRange>* ranges,
- int bottom,
- int top);
-
-
-void CharacterRange::AddCaseEquivalents(ZoneList<CharacterRange>* ranges,
- bool is_ascii) {
- Isolate* isolate = Isolate::Current();
- uc16 bottom = from();
- uc16 top = to();
- if (is_ascii) {
- if (bottom > String::kMaxAsciiCharCode) return;
- if (top > String::kMaxAsciiCharCode) top = String::kMaxAsciiCharCode;
- }
- unibrow::uchar chars[unibrow::Ecma262UnCanonicalize::kMaxWidth];
- if (top == bottom) {
- // If this is a singleton we just expand the one character.
- int length = isolate->jsregexp_uncanonicalize()->get(bottom, '\0', chars);
- for (int i = 0; i < length; i++) {
- uc32 chr = chars[i];
- if (chr != bottom) {
- ranges->Add(CharacterRange::Singleton(chars[i]));
- }
- }
- } else {
- // If this is a range we expand the characters block by block,
- // expanding contiguous subranges (blocks) one at a time.
- // The approach is as follows. For a given start character we
- // look up the remainder of the block that contains it (represented
- // by the end point), for instance we find 'z' if the character
- // is 'c'. A block is characterized by the property
- // that all characters uncanonicalize in the same way, except that
- // each entry in the result is incremented by the distance from the first
- // element. So a-z is a block because 'a' uncanonicalizes to ['a', 'A'] and
- // the k'th letter uncanonicalizes to ['a' + k, 'A' + k].
- // Once we've found the end point we look up its uncanonicalization
- // and produce a range for each element. For instance for [c-f]
- // we look up ['z', 'Z'] and produce [c-f] and [C-F]. We then only
- // add a range if it is not already contained in the input, so [c-f]
- // will be skipped but [C-F] will be added. If this range is not
- // completely contained in a block we do this for all the blocks
- // covered by the range (handling characters that is not in a block
- // as a "singleton block").
- unibrow::uchar range[unibrow::Ecma262UnCanonicalize::kMaxWidth];
- int pos = bottom;
- while (pos < top) {
- int length = isolate->jsregexp_canonrange()->get(pos, '\0', range);
- uc16 block_end;
- if (length == 0) {
- block_end = pos;
- } else {
- ASSERT_EQ(1, length);
- block_end = range[0];
- }
- int end = (block_end > top) ? top : block_end;
- length = isolate->jsregexp_uncanonicalize()->get(block_end, '\0', range);
- for (int i = 0; i < length; i++) {
- uc32 c = range[i];
- uc16 range_from = c - (block_end - pos);
- uc16 range_to = c - (block_end - end);
- if (!(bottom <= range_from && range_to <= top)) {
- ranges->Add(CharacterRange(range_from, range_to));
- }
- }
- pos = end + 1;
- }
- }
-}
-
-
-bool CharacterRange::IsCanonical(ZoneList<CharacterRange>* ranges) {
- ASSERT_NOT_NULL(ranges);
- int n = ranges->length();
- if (n <= 1) return true;
- int max = ranges->at(0).to();
- for (int i = 1; i < n; i++) {
- CharacterRange next_range = ranges->at(i);
- if (next_range.from() <= max + 1) return false;
- max = next_range.to();
- }
- return true;
-}
-
-SetRelation CharacterRange::WordCharacterRelation(
- ZoneList<CharacterRange>* range) {
- ASSERT(IsCanonical(range));
- int i = 0; // Word character range index.
- int j = 0; // Argument range index.
- ASSERT_NE(0, kWordRangeCount);
- SetRelation result;
- if (range->length() == 0) {
- result.SetElementsInSecondSet();
- return result;
- }
- CharacterRange argument_range = range->at(0);
- CharacterRange word_range = CharacterRange(kWordRanges[0], kWordRanges[1]);
- while (i < kWordRangeCount && j < range->length()) {
- // Check the two ranges for the five cases:
- // - no overlap.
- // - partial overlap (there are elements in both ranges that isn't
- // in the other, and there are also elements that are in both).
- // - argument range entirely inside word range.
- // - word range entirely inside argument range.
- // - ranges are completely equal.
-
- // First check for no overlap. The earlier range is not in the other set.
- if (argument_range.from() > word_range.to()) {
- // Ranges are disjoint. The earlier word range contains elements that
- // cannot be in the argument set.
- result.SetElementsInSecondSet();
- } else if (word_range.from() > argument_range.to()) {
- // Ranges are disjoint. The earlier argument range contains elements that
- // cannot be in the word set.
- result.SetElementsInFirstSet();
- } else if (word_range.from() <= argument_range.from() &&
- word_range.to() >= argument_range.from()) {
- result.SetElementsInBothSets();
- // argument range completely inside word range.
- if (word_range.from() < argument_range.from() ||
- word_range.to() > argument_range.from()) {
- result.SetElementsInSecondSet();
- }
- } else if (word_range.from() >= argument_range.from() &&
- word_range.to() <= argument_range.from()) {
- result.SetElementsInBothSets();
- result.SetElementsInFirstSet();
- } else {
- // There is overlap, and neither is a subrange of the other
- result.SetElementsInFirstSet();
- result.SetElementsInSecondSet();
- result.SetElementsInBothSets();
- }
- if (result.NonTrivialIntersection()) {
- // The result is as (im)precise as we can possibly make it.
- return result;
- }
- // Progress the range(s) with minimal to-character.
- uc16 word_to = word_range.to();
- uc16 argument_to = argument_range.to();
- if (argument_to <= word_to) {
- j++;
- if (j < range->length()) {
- argument_range = range->at(j);
- }
- }
- if (word_to <= argument_to) {
- i += 2;
- if (i < kWordRangeCount) {
- word_range = CharacterRange(kWordRanges[i], kWordRanges[i + 1]);
- }
- }
- }
- // Check if anything wasn't compared in the loop.
- if (i < kWordRangeCount) {
- // word range contains something not in argument range.
- result.SetElementsInSecondSet();
- } else if (j < range->length()) {
- // Argument range contains something not in word range.
- result.SetElementsInFirstSet();
- }
-
- return result;
-}
-
-
-static void AddUncanonicals(Isolate* isolate,
- ZoneList<CharacterRange>* ranges,
- int bottom,
- int top) {
- unibrow::uchar chars[unibrow::Ecma262UnCanonicalize::kMaxWidth];
- // Zones with no case mappings. There is a DEBUG-mode loop to assert that
- // this table is correct.
- // 0x0600 - 0x0fff
- // 0x1100 - 0x1cff
- // 0x2000 - 0x20ff
- // 0x2200 - 0x23ff
- // 0x2500 - 0x2bff
- // 0x2e00 - 0xa5ff
- // 0xa800 - 0xfaff
- // 0xfc00 - 0xfeff
- const int boundary_count = 18;
- int boundaries[] = {
- 0x600, 0x1000, 0x1100, 0x1d00, 0x2000, 0x2100, 0x2200, 0x2400, 0x2500,
- 0x2c00, 0x2e00, 0xa600, 0xa800, 0xfb00, 0xfc00, 0xff00};
-
- // Special ASCII rule from spec can save us some work here.
- if (bottom == 0x80 && top == 0xffff) return;
-
- if (top <= boundaries[0]) {
- CharacterRange range(bottom, top);
- range.AddCaseEquivalents(ranges, false);
- return;
- }
-
- // Split up very large ranges. This helps remove ranges where there are no
- // case mappings.
- for (int i = 0; i < boundary_count; i++) {
- if (bottom < boundaries[i] && top >= boundaries[i]) {
- AddUncanonicals(isolate, ranges, bottom, boundaries[i] - 1);
- AddUncanonicals(isolate, ranges, boundaries[i], top);
- return;
- }
- }
-
- // If we are completely in a zone with no case mappings then we are done.
- for (int i = 0; i < boundary_count; i += 2) {
- if (bottom >= boundaries[i] && top < boundaries[i + 1]) {
-#ifdef DEBUG
- for (int j = bottom; j <= top; j++) {
- unsigned current_char = j;
- int length = isolate->jsregexp_uncanonicalize()->get(current_char,
- '\0', chars);
- for (int k = 0; k < length; k++) {
- ASSERT(chars[k] == current_char);
- }
- }
-#endif
- return;
- }
- }
-
- // Step through the range finding equivalent characters.
- ZoneList<unibrow::uchar> *characters = new ZoneList<unibrow::uchar>(100);
- for (int i = bottom; i <= top; i++) {
- int length = isolate->jsregexp_uncanonicalize()->get(i, '\0', chars);
- for (int j = 0; j < length; j++) {
- uc32 chr = chars[j];
- if (chr != i && (chr < bottom || chr > top)) {
- characters->Add(chr);
- }
- }
- }
-
- // Step through the equivalent characters finding simple ranges and
- // adding ranges to the character class.
- if (characters->length() > 0) {
- int new_from = characters->at(0);
- int new_to = new_from;
- for (int i = 1; i < characters->length(); i++) {
- int chr = characters->at(i);
- if (chr == new_to + 1) {
- new_to++;
- } else {
- if (new_to == new_from) {
- ranges->Add(CharacterRange::Singleton(new_from));
- } else {
- ranges->Add(CharacterRange(new_from, new_to));
- }
- new_from = new_to = chr;
- }
- }
- if (new_to == new_from) {
- ranges->Add(CharacterRange::Singleton(new_from));
- } else {
- ranges->Add(CharacterRange(new_from, new_to));
- }
- }
-}
-
-
-ZoneList<CharacterRange>* CharacterSet::ranges() {
- if (ranges_ == NULL) {
- ranges_ = new ZoneList<CharacterRange>(2);
- CharacterRange::AddClassEscape(standard_set_type_, ranges_);
- }
- return ranges_;
-}
-
-
-// Move a number of elements in a zonelist to another position
-// in the same list. Handles overlapping source and target areas.
-static void MoveRanges(ZoneList<CharacterRange>* list,
- int from,
- int to,
- int count) {
- // Ranges are potentially overlapping.
- if (from < to) {
- for (int i = count - 1; i >= 0; i--) {
- list->at(to + i) = list->at(from + i);
- }
- } else {
- for (int i = 0; i < count; i++) {
- list->at(to + i) = list->at(from + i);
- }
- }
-}
-
-
-static int InsertRangeInCanonicalList(ZoneList<CharacterRange>* list,
- int count,
- CharacterRange insert) {
- // Inserts a range into list[0..count[, which must be sorted
- // by from value and non-overlapping and non-adjacent, using at most
- // list[0..count] for the result. Returns the number of resulting
- // canonicalized ranges. Inserting a range may collapse existing ranges into
- // fewer ranges, so the return value can be anything in the range 1..count+1.
- uc16 from = insert.from();
- uc16 to = insert.to();
- int start_pos = 0;
- int end_pos = count;
- for (int i = count - 1; i >= 0; i--) {
- CharacterRange current = list->at(i);
- if (current.from() > to + 1) {
- end_pos = i;
- } else if (current.to() + 1 < from) {
- start_pos = i + 1;
- break;
- }
- }
-
- // Inserted range overlaps, or is adjacent to, ranges at positions
- // [start_pos..end_pos[. Ranges before start_pos or at or after end_pos are
- // not affected by the insertion.
- // If start_pos == end_pos, the range must be inserted before start_pos.
- // if start_pos < end_pos, the entire range from start_pos to end_pos
- // must be merged with the insert range.
-
- if (start_pos == end_pos) {
- // Insert between existing ranges at position start_pos.
- if (start_pos < count) {
- MoveRanges(list, start_pos, start_pos + 1, count - start_pos);
- }
- list->at(start_pos) = insert;
- return count + 1;
- }
- if (start_pos + 1 == end_pos) {
- // Replace single existing range at position start_pos.
- CharacterRange to_replace = list->at(start_pos);
- int new_from = Min(to_replace.from(), from);
- int new_to = Max(to_replace.to(), to);
- list->at(start_pos) = CharacterRange(new_from, new_to);
- return count;
- }
- // Replace a number of existing ranges from start_pos to end_pos - 1.
- // Move the remaining ranges down.
-
- int new_from = Min(list->at(start_pos).from(), from);
- int new_to = Max(list->at(end_pos - 1).to(), to);
- if (end_pos < count) {
- MoveRanges(list, end_pos, start_pos + 1, count - end_pos);
- }
- list->at(start_pos) = CharacterRange(new_from, new_to);
- return count - (end_pos - start_pos) + 1;
-}
-
-
-void CharacterSet::Canonicalize() {
- // Special/default classes are always considered canonical. The result
- // of calling ranges() will be sorted.
- if (ranges_ == NULL) return;
- CharacterRange::Canonicalize(ranges_);
-}
-
-
-void CharacterRange::Canonicalize(ZoneList<CharacterRange>* character_ranges) {
- if (character_ranges->length() <= 1) return;
- // Check whether ranges are already canonical (increasing, non-overlapping,
- // non-adjacent).
- int n = character_ranges->length();
- int max = character_ranges->at(0).to();
- int i = 1;
- while (i < n) {
- CharacterRange current = character_ranges->at(i);
- if (current.from() <= max + 1) {
- break;
- }
- max = current.to();
- i++;
- }
- // Canonical until the i'th range. If that's all of them, we are done.
- if (i == n) return;
-
- // The ranges at index i and forward are not canonicalized. Make them so by
- // doing the equivalent of insertion sort (inserting each into the previous
- // list, in order).
- // Notice that inserting a range can reduce the number of ranges in the
- // result due to combining of adjacent and overlapping ranges.
- int read = i; // Range to insert.
- int num_canonical = i; // Length of canonicalized part of list.
- do {
- num_canonical = InsertRangeInCanonicalList(character_ranges,
- num_canonical,
- character_ranges->at(read));
- read++;
- } while (read < n);
- character_ranges->Rewind(num_canonical);
-
- ASSERT(CharacterRange::IsCanonical(character_ranges));
-}
-
-
-// Utility function for CharacterRange::Merge. Adds a range at the end of
-// a canonicalized range list, if necessary merging the range with the last
-// range of the list.
-static void AddRangeToSet(ZoneList<CharacterRange>* set, CharacterRange range) {
- if (set == NULL) return;
- ASSERT(set->length() == 0 || set->at(set->length() - 1).to() < range.from());
- int n = set->length();
- if (n > 0) {
- CharacterRange lastRange = set->at(n - 1);
- if (lastRange.to() == range.from() - 1) {
- set->at(n - 1) = CharacterRange(lastRange.from(), range.to());
- return;
- }
- }
- set->Add(range);
-}
-
-
-static void AddRangeToSelectedSet(int selector,
- ZoneList<CharacterRange>* first_set,
- ZoneList<CharacterRange>* second_set,
- ZoneList<CharacterRange>* intersection_set,
- CharacterRange range) {
- switch (selector) {
- case kInsideFirst:
- AddRangeToSet(first_set, range);
- break;
- case kInsideSecond:
- AddRangeToSet(second_set, range);
- break;
- case kInsideBoth:
- AddRangeToSet(intersection_set, range);
- break;
- }
-}
-
-
-
-void CharacterRange::Merge(ZoneList<CharacterRange>* first_set,
- ZoneList<CharacterRange>* second_set,
- ZoneList<CharacterRange>* first_set_only_out,
- ZoneList<CharacterRange>* second_set_only_out,
- ZoneList<CharacterRange>* both_sets_out) {
- // Inputs are canonicalized.
- ASSERT(CharacterRange::IsCanonical(first_set));
- ASSERT(CharacterRange::IsCanonical(second_set));
- // Outputs are empty, if applicable.
- ASSERT(first_set_only_out == NULL || first_set_only_out->length() == 0);
- ASSERT(second_set_only_out == NULL || second_set_only_out->length() == 0);
- ASSERT(both_sets_out == NULL || both_sets_out->length() == 0);
-
- // Merge sets by iterating through the lists in order of lowest "from" value,
- // and putting intervals into one of three sets.
-
- if (first_set->length() == 0) {
- second_set_only_out->AddAll(*second_set);
- return;
- }
- if (second_set->length() == 0) {
- first_set_only_out->AddAll(*first_set);
- return;
- }
- // Indices into input lists.
- int i1 = 0;
- int i2 = 0;
- // Cache length of input lists.
- int n1 = first_set->length();
- int n2 = second_set->length();
- // Current range. May be invalid if state is kInsideNone.
- int from = 0;
- int to = -1;
- // Where current range comes from.
- int state = kInsideNone;
-
- while (i1 < n1 || i2 < n2) {
- CharacterRange next_range;
- int range_source;
- if (i2 == n2 ||
- (i1 < n1 && first_set->at(i1).from() < second_set->at(i2).from())) {
- // Next smallest element is in first set.
- next_range = first_set->at(i1++);
- range_source = kInsideFirst;
- } else {
- // Next smallest element is in second set.
- next_range = second_set->at(i2++);
- range_source = kInsideSecond;
- }
- if (to < next_range.from()) {
- // Ranges disjoint: |current| |next|
- AddRangeToSelectedSet(state,
- first_set_only_out,
- second_set_only_out,
- both_sets_out,
- CharacterRange(from, to));
- from = next_range.from();
- to = next_range.to();
- state = range_source;
- } else {
- if (from < next_range.from()) {
- AddRangeToSelectedSet(state,
- first_set_only_out,
- second_set_only_out,
- both_sets_out,
- CharacterRange(from, next_range.from()-1));
- }
- if (to < next_range.to()) {
- // Ranges overlap: |current|
- // |next|
- AddRangeToSelectedSet(state | range_source,
- first_set_only_out,
- second_set_only_out,
- both_sets_out,
- CharacterRange(next_range.from(), to));
- from = to + 1;
- to = next_range.to();
- state = range_source;
- } else {
- // Range included: |current| , possibly ending at same character.
- // |next|
- AddRangeToSelectedSet(
- state | range_source,
- first_set_only_out,
- second_set_only_out,
- both_sets_out,
- CharacterRange(next_range.from(), next_range.to()));
- from = next_range.to() + 1;
- // If ranges end at same character, both ranges are consumed completely.
- if (next_range.to() == to) state = kInsideNone;
- }
- }
- }
- AddRangeToSelectedSet(state,
- first_set_only_out,
- second_set_only_out,
- both_sets_out,
- CharacterRange(from, to));
-}
-
-
-void CharacterRange::Negate(ZoneList<CharacterRange>* ranges,
- ZoneList<CharacterRange>* negated_ranges) {
- ASSERT(CharacterRange::IsCanonical(ranges));
- ASSERT_EQ(0, negated_ranges->length());
- int range_count = ranges->length();
- uc16 from = 0;
- int i = 0;
- if (range_count > 0 && ranges->at(0).from() == 0) {
- from = ranges->at(0).to();
- i = 1;
- }
- while (i < range_count) {
- CharacterRange range = ranges->at(i);
- negated_ranges->Add(CharacterRange(from + 1, range.from() - 1));
- from = range.to();
- i++;
- }
- if (from < String::kMaxUC16CharCode) {
- negated_ranges->Add(CharacterRange(from + 1, String::kMaxUC16CharCode));
- }
-}
-
-
-
-// -------------------------------------------------------------------
-// Interest propagation
-
-
-RegExpNode* RegExpNode::TryGetSibling(NodeInfo* info) {
- for (int i = 0; i < siblings_.length(); i++) {
- RegExpNode* sibling = siblings_.Get(i);
- if (sibling->info()->Matches(info))
- return sibling;
- }
- return NULL;
-}
-
-
-RegExpNode* RegExpNode::EnsureSibling(NodeInfo* info, bool* cloned) {
- ASSERT_EQ(false, *cloned);
- siblings_.Ensure(this);
- RegExpNode* result = TryGetSibling(info);
- if (result != NULL) return result;
- result = this->Clone();
- NodeInfo* new_info = result->info();
- new_info->ResetCompilationState();
- new_info->AddFromPreceding(info);
- AddSibling(result);
- *cloned = true;
- return result;
-}
-
-
-template <class C>
-static RegExpNode* PropagateToEndpoint(C* node, NodeInfo* info) {
- NodeInfo full_info(*node->info());
- full_info.AddFromPreceding(info);
- bool cloned = false;
- return RegExpNode::EnsureSibling(node, &full_info, &cloned);
-}
-
-
-// -------------------------------------------------------------------
-// Splay tree
-
-
-OutSet* OutSet::Extend(unsigned value) {
- if (Get(value))
- return this;
- if (successors() != NULL) {
- for (int i = 0; i < successors()->length(); i++) {
- OutSet* successor = successors()->at(i);
- if (successor->Get(value))
- return successor;
- }
- } else {
- successors_ = new ZoneList<OutSet*>(2);
- }
- OutSet* result = new OutSet(first_, remaining_);
- result->Set(value);
- successors()->Add(result);
- return result;
-}
-
-
-void OutSet::Set(unsigned value) {
- if (value < kFirstLimit) {
- first_ |= (1 << value);
- } else {
- if (remaining_ == NULL)
- remaining_ = new ZoneList<unsigned>(1);
- if (remaining_->is_empty() || !remaining_->Contains(value))
- remaining_->Add(value);
- }
-}
-
-
-bool OutSet::Get(unsigned value) {
- if (value < kFirstLimit) {
- return (first_ & (1 << value)) != 0;
- } else if (remaining_ == NULL) {
- return false;
- } else {
- return remaining_->Contains(value);
- }
-}
-
-
-const uc16 DispatchTable::Config::kNoKey = unibrow::Utf8::kBadChar;
-const DispatchTable::Entry DispatchTable::Config::kNoValue;
-
-
-void DispatchTable::AddRange(CharacterRange full_range, int value) {
- CharacterRange current = full_range;
- if (tree()->is_empty()) {
- // If this is the first range we just insert into the table.
- ZoneSplayTree<Config>::Locator loc;
- ASSERT_RESULT(tree()->Insert(current.from(), &loc));
- loc.set_value(Entry(current.from(), current.to(), empty()->Extend(value)));
- return;
- }
- // First see if there is a range to the left of this one that
- // overlaps.
- ZoneSplayTree<Config>::Locator loc;
- if (tree()->FindGreatestLessThan(current.from(), &loc)) {
- Entry* entry = &loc.value();
- // If we've found a range that overlaps with this one, and it
- // starts strictly to the left of this one, we have to fix it
- // because the following code only handles ranges that start on
- // or after the start point of the range we're adding.
- if (entry->from() < current.from() && entry->to() >= current.from()) {
- // Snap the overlapping range in half around the start point of
- // the range we're adding.
- CharacterRange left(entry->from(), current.from() - 1);
- CharacterRange right(current.from(), entry->to());
- // The left part of the overlapping range doesn't overlap.
- // Truncate the whole entry to be just the left part.
- entry->set_to(left.to());
- // The right part is the one that overlaps. We add this part
- // to the map and let the next step deal with merging it with
- // the range we're adding.
- ZoneSplayTree<Config>::Locator loc;
- ASSERT_RESULT(tree()->Insert(right.from(), &loc));
- loc.set_value(Entry(right.from(),
- right.to(),
- entry->out_set()));
- }
- }
- while (current.is_valid()) {
- if (tree()->FindLeastGreaterThan(current.from(), &loc) &&
- (loc.value().from() <= current.to()) &&
- (loc.value().to() >= current.from())) {
- Entry* entry = &loc.value();
- // We have overlap. If there is space between the start point of
- // the range we're adding and where the overlapping range starts
- // then we have to add a range covering just that space.
- if (current.from() < entry->from()) {
- ZoneSplayTree<Config>::Locator ins;
- ASSERT_RESULT(tree()->Insert(current.from(), &ins));
- ins.set_value(Entry(current.from(),
- entry->from() - 1,
- empty()->Extend(value)));
- current.set_from(entry->from());
- }
- ASSERT_EQ(current.from(), entry->from());
- // If the overlapping range extends beyond the one we want to add
- // we have to snap the right part off and add it separately.
- if (entry->to() > current.to()) {
- ZoneSplayTree<Config>::Locator ins;
- ASSERT_RESULT(tree()->Insert(current.to() + 1, &ins));
- ins.set_value(Entry(current.to() + 1,
- entry->to(),
- entry->out_set()));
- entry->set_to(current.to());
- }
- ASSERT(entry->to() <= current.to());
- // The overlapping range is now completely contained by the range
- // we're adding so we can just update it and move the start point
- // of the range we're adding just past it.
- entry->AddValue(value);
- // Bail out if the last interval ended at 0xFFFF since otherwise
- // adding 1 will wrap around to 0.
- if (entry->to() == String::kMaxUC16CharCode)
- break;
- ASSERT(entry->to() + 1 > current.from());
- current.set_from(entry->to() + 1);
- } else {
- // There is no overlap so we can just add the range
- ZoneSplayTree<Config>::Locator ins;
- ASSERT_RESULT(tree()->Insert(current.from(), &ins));
- ins.set_value(Entry(current.from(),
- current.to(),
- empty()->Extend(value)));
- break;
- }
- }
-}
-
-
-OutSet* DispatchTable::Get(uc16 value) {
- ZoneSplayTree<Config>::Locator loc;
- if (!tree()->FindGreatestLessThan(value, &loc))
- return empty();
- Entry* entry = &loc.value();
- if (value <= entry->to())
- return entry->out_set();
- else
- return empty();
-}
-
-
-// -------------------------------------------------------------------
-// Analysis
-
-
-void Analysis::EnsureAnalyzed(RegExpNode* that) {
- StackLimitCheck check(Isolate::Current());
- if (check.HasOverflowed()) {
- fail("Stack overflow");
- return;
- }
- if (that->info()->been_analyzed || that->info()->being_analyzed)
- return;
- that->info()->being_analyzed = true;
- that->Accept(this);
- that->info()->being_analyzed = false;
- that->info()->been_analyzed = true;
-}
-
-
-void Analysis::VisitEnd(EndNode* that) {
- // nothing to do
-}
-
-
-void TextNode::CalculateOffsets() {
- int element_count = elements()->length();
- // Set up the offsets of the elements relative to the start. This is a fixed
- // quantity since a TextNode can only contain fixed-width things.
- int cp_offset = 0;
- for (int i = 0; i < element_count; i++) {
- TextElement& elm = elements()->at(i);
- elm.cp_offset = cp_offset;
- if (elm.type == TextElement::ATOM) {
- cp_offset += elm.data.u_atom->data().length();
- } else {
- cp_offset++;
- Vector<const uc16> quarks = elm.data.u_atom->data();
- }
- }
-}
-
-
-void Analysis::VisitText(TextNode* that) {
- if (ignore_case_) {
- that->MakeCaseIndependent(is_ascii_);
- }
- EnsureAnalyzed(that->on_success());
- if (!has_failed()) {
- that->CalculateOffsets();
- }
-}
-
-
-void Analysis::VisitAction(ActionNode* that) {
- RegExpNode* target = that->on_success();
- EnsureAnalyzed(target);
- if (!has_failed()) {
- // If the next node is interested in what it follows then this node
- // has to be interested too so it can pass the information on.
- that->info()->AddFromFollowing(target->info());
- }
-}
-
-
-void Analysis::VisitChoice(ChoiceNode* that) {
- NodeInfo* info = that->info();
- for (int i = 0; i < that->alternatives()->length(); i++) {
- RegExpNode* node = that->alternatives()->at(i).node();
- EnsureAnalyzed(node);
- if (has_failed()) return;
- // Anything the following nodes need to know has to be known by
- // this node also, so it can pass it on.
- info->AddFromFollowing(node->info());
- }
-}
-
-
-void Analysis::VisitLoopChoice(LoopChoiceNode* that) {
- NodeInfo* info = that->info();
- for (int i = 0; i < that->alternatives()->length(); i++) {
- RegExpNode* node = that->alternatives()->at(i).node();
- if (node != that->loop_node()) {
- EnsureAnalyzed(node);
- if (has_failed()) return;
- info->AddFromFollowing(node->info());
- }
- }
- // Check the loop last since it may need the value of this node
- // to get a correct result.
- EnsureAnalyzed(that->loop_node());
- if (!has_failed()) {
- info->AddFromFollowing(that->loop_node()->info());
- }
-}
-
-
-void Analysis::VisitBackReference(BackReferenceNode* that) {
- EnsureAnalyzed(that->on_success());
-}
-
-
-void Analysis::VisitAssertion(AssertionNode* that) {
- EnsureAnalyzed(that->on_success());
- AssertionNode::AssertionNodeType type = that->type();
- if (type == AssertionNode::AT_BOUNDARY ||
- type == AssertionNode::AT_NON_BOUNDARY) {
- // Check if the following character is known to be a word character
- // or known to not be a word character.
- ZoneList<CharacterRange>* following_chars = that->FirstCharacterSet();
-
- CharacterRange::Canonicalize(following_chars);
-
- SetRelation word_relation =
- CharacterRange::WordCharacterRelation(following_chars);
- if (word_relation.Disjoint()) {
- // Includes the case where following_chars is empty (e.g., end-of-input).
- // Following character is definitely *not* a word character.
- type = (type == AssertionNode::AT_BOUNDARY) ?
- AssertionNode::AFTER_WORD_CHARACTER :
- AssertionNode::AFTER_NONWORD_CHARACTER;
- that->set_type(type);
- } else if (word_relation.ContainedIn()) {
- // Following character is definitely a word character.
- type = (type == AssertionNode::AT_BOUNDARY) ?
- AssertionNode::AFTER_NONWORD_CHARACTER :
- AssertionNode::AFTER_WORD_CHARACTER;
- that->set_type(type);
- }
- }
-}
-
-
-ZoneList<CharacterRange>* RegExpNode::FirstCharacterSet() {
- if (first_character_set_ == NULL) {
- if (ComputeFirstCharacterSet(kFirstCharBudget) < 0) {
- // If we can't find an exact solution within the budget, we
- // set the value to the set of every character, i.e., all characters
- // are possible.
- ZoneList<CharacterRange>* all_set = new ZoneList<CharacterRange>(1);
- all_set->Add(CharacterRange::Everything());
- first_character_set_ = all_set;
- }
- }
- return first_character_set_;
-}
-
-
-int RegExpNode::ComputeFirstCharacterSet(int budget) {
- // Default behavior is to not be able to determine the first character.
- return kComputeFirstCharacterSetFail;
-}
-
-
-int LoopChoiceNode::ComputeFirstCharacterSet(int budget) {
- budget--;
- if (budget >= 0) {
- // Find loop min-iteration. It's the value of the guarded choice node
- // with a GEQ guard, if any.
- int min_repetition = 0;
-
- for (int i = 0; i <= 1; i++) {
- GuardedAlternative alternative = alternatives()->at(i);
- ZoneList<Guard*>* guards = alternative.guards();
- if (guards != NULL && guards->length() > 0) {
- Guard* guard = guards->at(0);
- if (guard->op() == Guard::GEQ) {
- min_repetition = guard->value();
- break;
- }
- }
- }
-
- budget = loop_node()->ComputeFirstCharacterSet(budget);
- if (budget >= 0) {
- ZoneList<CharacterRange>* character_set =
- loop_node()->first_character_set();
- if (body_can_be_zero_length() || min_repetition == 0) {
- budget = continue_node()->ComputeFirstCharacterSet(budget);
- if (budget < 0) return budget;
- ZoneList<CharacterRange>* body_set =
- continue_node()->first_character_set();
- ZoneList<CharacterRange>* union_set =
- new ZoneList<CharacterRange>(Max(character_set->length(),
- body_set->length()));
- CharacterRange::Merge(character_set,
- body_set,
- union_set,
- union_set,
- union_set);
- character_set = union_set;
- }
- set_first_character_set(character_set);
- }
- }
- return budget;
-}
-
-
-int NegativeLookaheadChoiceNode::ComputeFirstCharacterSet(int budget) {
- budget--;
- if (budget >= 0) {
- GuardedAlternative successor = this->alternatives()->at(1);
- RegExpNode* successor_node = successor.node();
- budget = successor_node->ComputeFirstCharacterSet(budget);
- if (budget >= 0) {
- set_first_character_set(successor_node->first_character_set());
- }
- }
- return budget;
-}
-
-
-// The first character set of an EndNode is unknowable. Just use the
-// default implementation that fails and returns all characters as possible.
-
-
-int AssertionNode::ComputeFirstCharacterSet(int budget) {
- budget -= 1;
- if (budget >= 0) {
- switch (type_) {
- case AT_END: {
- set_first_character_set(new ZoneList<CharacterRange>(0));
- break;
- }
- case AT_START:
- case AT_BOUNDARY:
- case AT_NON_BOUNDARY:
- case AFTER_NEWLINE:
- case AFTER_NONWORD_CHARACTER:
- case AFTER_WORD_CHARACTER: {
- ASSERT_NOT_NULL(on_success());
- budget = on_success()->ComputeFirstCharacterSet(budget);
- if (budget >= 0) {
- set_first_character_set(on_success()->first_character_set());
- }
- break;
- }
- }
- }
- return budget;
-}
-
-
-int ActionNode::ComputeFirstCharacterSet(int budget) {
- if (type_ == POSITIVE_SUBMATCH_SUCCESS) return kComputeFirstCharacterSetFail;
- budget--;
- if (budget >= 0) {
- ASSERT_NOT_NULL(on_success());
- budget = on_success()->ComputeFirstCharacterSet(budget);
- if (budget >= 0) {
- set_first_character_set(on_success()->first_character_set());
- }
- }
- return budget;
-}
-
-
-int BackReferenceNode::ComputeFirstCharacterSet(int budget) {
- // We don't know anything about the first character of a backreference
- // at this point.
- // The potential first characters are the first characters of the capture,
- // and the first characters of the on_success node, depending on whether the
- // capture can be empty and whether it is known to be participating or known
- // not to be.
- return kComputeFirstCharacterSetFail;
-}
-
-
-int TextNode::ComputeFirstCharacterSet(int budget) {
- budget--;
- if (budget >= 0) {
- ASSERT_NE(0, elements()->length());
- TextElement text = elements()->at(0);
- if (text.type == TextElement::ATOM) {
- RegExpAtom* atom = text.data.u_atom;
- ASSERT_NE(0, atom->length());
- uc16 first_char = atom->data()[0];
- ZoneList<CharacterRange>* range = new ZoneList<CharacterRange>(1);
- range->Add(CharacterRange(first_char, first_char));
- set_first_character_set(range);
- } else {
- ASSERT(text.type == TextElement::CHAR_CLASS);
- RegExpCharacterClass* char_class = text.data.u_char_class;
- ZoneList<CharacterRange>* ranges = char_class->ranges();
- // TODO(lrn): Canonicalize ranges when they are created
- // instead of waiting until now.
- CharacterRange::Canonicalize(ranges);
- if (char_class->is_negated()) {
- int length = ranges->length();
- int new_length = length + 1;
- if (length > 0) {
- if (ranges->at(0).from() == 0) new_length--;
- if (ranges->at(length - 1).to() == String::kMaxUC16CharCode) {
- new_length--;
- }
- }
- ZoneList<CharacterRange>* negated_ranges =
- new ZoneList<CharacterRange>(new_length);
- CharacterRange::Negate(ranges, negated_ranges);
- set_first_character_set(negated_ranges);
- } else {
- set_first_character_set(ranges);
- }
- }
- }
- return budget;
-}
-
-
-
-// -------------------------------------------------------------------
-// Dispatch table construction
-
-
-void DispatchTableConstructor::VisitEnd(EndNode* that) {
- AddRange(CharacterRange::Everything());
-}
-
-
-void DispatchTableConstructor::BuildTable(ChoiceNode* node) {
- node->set_being_calculated(true);
- ZoneList<GuardedAlternative>* alternatives = node->alternatives();
- for (int i = 0; i < alternatives->length(); i++) {
- set_choice_index(i);
- alternatives->at(i).node()->Accept(this);
- }
- node->set_being_calculated(false);
-}
-
-
-class AddDispatchRange {
- public:
- explicit AddDispatchRange(DispatchTableConstructor* constructor)
- : constructor_(constructor) { }
- void Call(uc32 from, DispatchTable::Entry entry);
- private:
- DispatchTableConstructor* constructor_;
-};
-
-
-void AddDispatchRange::Call(uc32 from, DispatchTable::Entry entry) {
- CharacterRange range(from, entry.to());
- constructor_->AddRange(range);
-}
-
-
-void DispatchTableConstructor::VisitChoice(ChoiceNode* node) {
- if (node->being_calculated())
- return;
- DispatchTable* table = node->GetTable(ignore_case_);
- AddDispatchRange adder(this);
- table->ForEach(&adder);
-}
-
-
-void DispatchTableConstructor::VisitBackReference(BackReferenceNode* that) {
- // TODO(160): Find the node that we refer back to and propagate its start
- // set back to here. For now we just accept anything.
- AddRange(CharacterRange::Everything());
-}
-
-
-void DispatchTableConstructor::VisitAssertion(AssertionNode* that) {
- RegExpNode* target = that->on_success();
- target->Accept(this);
-}
-
-
-static int CompareRangeByFrom(const CharacterRange* a,
- const CharacterRange* b) {
- return Compare<uc16>(a->from(), b->from());
-}
-
-
-void DispatchTableConstructor::AddInverse(ZoneList<CharacterRange>* ranges) {
- ranges->Sort(CompareRangeByFrom);
- uc16 last = 0;
- for (int i = 0; i < ranges->length(); i++) {
- CharacterRange range = ranges->at(i);
- if (last < range.from())
- AddRange(CharacterRange(last, range.from() - 1));
- if (range.to() >= last) {
- if (range.to() == String::kMaxUC16CharCode) {
- return;
- } else {
- last = range.to() + 1;
- }
- }
- }
- AddRange(CharacterRange(last, String::kMaxUC16CharCode));
-}
-
-
-void DispatchTableConstructor::VisitText(TextNode* that) {
- TextElement elm = that->elements()->at(0);
- switch (elm.type) {
- case TextElement::ATOM: {
- uc16 c = elm.data.u_atom->data()[0];
- AddRange(CharacterRange(c, c));
- break;
- }
- case TextElement::CHAR_CLASS: {
- RegExpCharacterClass* tree = elm.data.u_char_class;
- ZoneList<CharacterRange>* ranges = tree->ranges();
- if (tree->is_negated()) {
- AddInverse(ranges);
- } else {
- for (int i = 0; i < ranges->length(); i++)
- AddRange(ranges->at(i));
- }
- break;
- }
- default: {
- UNIMPLEMENTED();
- }
- }
-}
-
-
-void DispatchTableConstructor::VisitAction(ActionNode* that) {
- RegExpNode* target = that->on_success();
- target->Accept(this);
-}
-
-
-RegExpEngine::CompilationResult RegExpEngine::Compile(RegExpCompileData* data,
- bool ignore_case,
- bool is_multiline,
- Handle<String> pattern,
- bool is_ascii) {
- if ((data->capture_count + 1) * 2 - 1 > RegExpMacroAssembler::kMaxRegister) {
- return IrregexpRegExpTooBig();
- }
- RegExpCompiler compiler(data->capture_count, ignore_case, is_ascii);
- // Wrap the body of the regexp in capture #0.
- RegExpNode* captured_body = RegExpCapture::ToNode(data->tree,
- 0,
- &compiler,
- compiler.accept());
- RegExpNode* node = captured_body;
- bool is_end_anchored = data->tree->IsAnchoredAtEnd();
- bool is_start_anchored = data->tree->IsAnchoredAtStart();
- int max_length = data->tree->max_match();
- if (!is_start_anchored) {
- // Add a .*? at the beginning, outside the body capture, unless
- // this expression is anchored at the beginning.
- RegExpNode* loop_node =
- RegExpQuantifier::ToNode(0,
- RegExpTree::kInfinity,
- false,
- new RegExpCharacterClass('*'),
- &compiler,
- captured_body,
- data->contains_anchor);
-
- if (data->contains_anchor) {
- // Unroll loop once, to take care of the case that might start
- // at the start of input.
- ChoiceNode* first_step_node = new ChoiceNode(2);
- first_step_node->AddAlternative(GuardedAlternative(captured_body));
- first_step_node->AddAlternative(GuardedAlternative(
- new TextNode(new RegExpCharacterClass('*'), loop_node)));
- node = first_step_node;
- } else {
- node = loop_node;
- }
- }
- data->node = node;
- Analysis analysis(ignore_case, is_ascii);
- analysis.EnsureAnalyzed(node);
- if (analysis.has_failed()) {
- const char* error_message = analysis.error_message();
- return CompilationResult(error_message);
- }
-
- NodeInfo info = *node->info();
-
- // Create the correct assembler for the architecture.
-#ifndef V8_INTERPRETED_REGEXP
- // Native regexp implementation.
-
- NativeRegExpMacroAssembler::Mode mode =
- is_ascii ? NativeRegExpMacroAssembler::ASCII
- : NativeRegExpMacroAssembler::UC16;
-
-#if V8_TARGET_ARCH_IA32
- RegExpMacroAssemblerIA32 macro_assembler(mode, (data->capture_count + 1) * 2);
-#elif V8_TARGET_ARCH_X64
- RegExpMacroAssemblerX64 macro_assembler(mode, (data->capture_count + 1) * 2);
-#elif V8_TARGET_ARCH_ARM
- RegExpMacroAssemblerARM macro_assembler(mode, (data->capture_count + 1) * 2);
-#elif V8_TARGET_ARCH_MIPS
- RegExpMacroAssemblerMIPS macro_assembler(mode, (data->capture_count + 1) * 2);
-#endif
-
-#else // V8_INTERPRETED_REGEXP
- // Interpreted regexp implementation.
- EmbeddedVector<byte, 1024> codes;
- RegExpMacroAssemblerIrregexp macro_assembler(codes);
-#endif // V8_INTERPRETED_REGEXP
-
- // Inserted here, instead of in Assembler, because it depends on information
- // in the AST that isn't replicated in the Node structure.
- static const int kMaxBacksearchLimit = 1024;
- if (is_end_anchored &&
- !is_start_anchored &&
- max_length < kMaxBacksearchLimit) {
- macro_assembler.SetCurrentPositionFromEnd(max_length);
- }
-
- return compiler.Assemble(&macro_assembler,
- node,
- data->capture_count,
- pattern);
-}
-
-
-}} // namespace v8::internal
diff --git a/src/3rdparty/v8/src/jsregexp.h b/src/3rdparty/v8/src/jsregexp.h
deleted file mode 100644
index 3ed5a7e..0000000
--- a/src/3rdparty/v8/src/jsregexp.h
+++ /dev/null
@@ -1,1483 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_JSREGEXP_H_
-#define V8_JSREGEXP_H_
-
-#include "macro-assembler.h"
-#include "zone-inl.h"
-
-namespace v8 {
-namespace internal {
-
-
-class RegExpMacroAssembler;
-
-
-class RegExpImpl {
- public:
- // Whether V8 is compiled with native regexp support or not.
- static bool UsesNativeRegExp() {
-#ifdef V8_INTERPRETED_REGEXP
- return false;
-#else
- return true;
-#endif
- }
-
- // Creates a regular expression literal in the old space.
- // This function calls the garbage collector if necessary.
- static Handle<Object> CreateRegExpLiteral(Handle<JSFunction> constructor,
- Handle<String> pattern,
- Handle<String> flags,
- bool* has_pending_exception);
-
- // Returns a string representation of a regular expression.
- // Implements RegExp.prototype.toString, see ECMA-262 section 15.10.6.4.
- // This function calls the garbage collector if necessary.
- static Handle<String> ToString(Handle<Object> value);
-
- // Parses the RegExp pattern and prepares the JSRegExp object with
- // generic data and choice of implementation - as well as what
- // the implementation wants to store in the data field.
- // Returns false if compilation fails.
- static Handle<Object> Compile(Handle<JSRegExp> re,
- Handle<String> pattern,
- Handle<String> flags);
-
- // See ECMA-262 section 15.10.6.2.
- // This function calls the garbage collector if necessary.
- static Handle<Object> Exec(Handle<JSRegExp> regexp,
- Handle<String> subject,
- int index,
- Handle<JSArray> lastMatchInfo);
-
- // Prepares a JSRegExp object with Irregexp-specific data.
- static void IrregexpInitialize(Handle<JSRegExp> re,
- Handle<String> pattern,
- JSRegExp::Flags flags,
- int capture_register_count);
-
-
- static void AtomCompile(Handle<JSRegExp> re,
- Handle<String> pattern,
- JSRegExp::Flags flags,
- Handle<String> match_pattern);
-
- static Handle<Object> AtomExec(Handle<JSRegExp> regexp,
- Handle<String> subject,
- int index,
- Handle<JSArray> lastMatchInfo);
-
- enum IrregexpResult { RE_FAILURE = 0, RE_SUCCESS = 1, RE_EXCEPTION = -1 };
-
- // Prepare a RegExp for being executed one or more times (using
- // IrregexpExecOnce) on the subject.
- // This ensures that the regexp is compiled for the subject, and that
- // the subject is flat.
- // Returns the number of integer spaces required by IrregexpExecOnce
- // as its "registers" argument. If the regexp cannot be compiled,
- // an exception is set as pending, and this function returns negative.
- static int IrregexpPrepare(Handle<JSRegExp> regexp,
- Handle<String> subject);
-
- // Execute a regular expression once on the subject, starting from
- // character "index".
- // If successful, returns RE_SUCCESS and set the capture positions
- // in the first registers.
- // If matching fails, returns RE_FAILURE.
- // If execution fails, sets a pending exception and returns RE_EXCEPTION.
- static IrregexpResult IrregexpExecOnce(Handle<JSRegExp> regexp,
- Handle<String> subject,
- int index,
- Vector<int> registers);
-
- // Execute an Irregexp bytecode pattern.
- // On a successful match, the result is a JSArray containing
- // captured positions. On a failure, the result is the null value.
- // Returns an empty handle in case of an exception.
- static Handle<Object> IrregexpExec(Handle<JSRegExp> regexp,
- Handle<String> subject,
- int index,
- Handle<JSArray> lastMatchInfo);
-
- // Array index in the lastMatchInfo array.
- static const int kLastCaptureCount = 0;
- static const int kLastSubject = 1;
- static const int kLastInput = 2;
- static const int kFirstCapture = 3;
- static const int kLastMatchOverhead = 3;
-
- // Direct offset into the lastMatchInfo array.
- static const int kLastCaptureCountOffset =
- FixedArray::kHeaderSize + kLastCaptureCount * kPointerSize;
- static const int kLastSubjectOffset =
- FixedArray::kHeaderSize + kLastSubject * kPointerSize;
- static const int kLastInputOffset =
- FixedArray::kHeaderSize + kLastInput * kPointerSize;
- static const int kFirstCaptureOffset =
- FixedArray::kHeaderSize + kFirstCapture * kPointerSize;
-
- // Used to access the lastMatchInfo array.
- static int GetCapture(FixedArray* array, int index) {
- return Smi::cast(array->get(index + kFirstCapture))->value();
- }
-
- static void SetLastCaptureCount(FixedArray* array, int to) {
- array->set(kLastCaptureCount, Smi::FromInt(to));
- }
-
- static void SetLastSubject(FixedArray* array, String* to) {
- array->set(kLastSubject, to);
- }
-
- static void SetLastInput(FixedArray* array, String* to) {
- array->set(kLastInput, to);
- }
-
- static void SetCapture(FixedArray* array, int index, int to) {
- array->set(index + kFirstCapture, Smi::FromInt(to));
- }
-
- static int GetLastCaptureCount(FixedArray* array) {
- return Smi::cast(array->get(kLastCaptureCount))->value();
- }
-
- // For acting on the JSRegExp data FixedArray.
- static int IrregexpMaxRegisterCount(FixedArray* re);
- static void SetIrregexpMaxRegisterCount(FixedArray* re, int value);
- static int IrregexpNumberOfCaptures(FixedArray* re);
- static int IrregexpNumberOfRegisters(FixedArray* re);
- static ByteArray* IrregexpByteCode(FixedArray* re, bool is_ascii);
- static Code* IrregexpNativeCode(FixedArray* re, bool is_ascii);
-
- private:
- static String* last_ascii_string_;
- static String* two_byte_cached_string_;
-
- static bool CompileIrregexp(Handle<JSRegExp> re, bool is_ascii);
- static inline bool EnsureCompiledIrregexp(Handle<JSRegExp> re, bool is_ascii);
-
-
- // Set the subject cache. The previous string buffer is not deleted, so the
- // caller should ensure that it doesn't leak.
- static void SetSubjectCache(String* subject,
- char* utf8_subject,
- int uft8_length,
- int character_position,
- int utf8_position);
-
- // A one element cache of the last utf8_subject string and its length. The
- // subject JS String object is cached in the heap. We also cache a
- // translation between position and utf8 position.
- static char* utf8_subject_cache_;
- static int utf8_length_cache_;
- static int utf8_position_;
- static int character_position_;
-};
-
-
-// Represents the location of one element relative to the intersection of
-// two sets. Corresponds to the four areas of a Venn diagram.
-enum ElementInSetsRelation {
- kInsideNone = 0,
- kInsideFirst = 1,
- kInsideSecond = 2,
- kInsideBoth = 3
-};
-
-
-// Represents the relation of two sets.
-// Sets can be either disjoint, partially or fully overlapping, or equal.
-class SetRelation BASE_EMBEDDED {
- public:
- // Relation is represented by a bit saying whether there are elements in
- // one set that is not in the other, and a bit saying that there are elements
- // that are in both sets.
-
- // Location of an element. Corresponds to the internal areas of
- // a Venn diagram.
- enum {
- kInFirst = 1 << kInsideFirst,
- kInSecond = 1 << kInsideSecond,
- kInBoth = 1 << kInsideBoth
- };
- SetRelation() : bits_(0) {}
- ~SetRelation() {}
- // Add the existence of objects in a particular
- void SetElementsInFirstSet() { bits_ |= kInFirst; }
- void SetElementsInSecondSet() { bits_ |= kInSecond; }
- void SetElementsInBothSets() { bits_ |= kInBoth; }
- // Check the currently known relation of the sets (common functions only,
- // for other combinations, use value() to get the bits and check them
- // manually).
- // Sets are completely disjoint.
- bool Disjoint() { return (bits_ & kInBoth) == 0; }
- // Sets are equal.
- bool Equals() { return (bits_ & (kInFirst | kInSecond)) == 0; }
- // First set contains second.
- bool Contains() { return (bits_ & kInSecond) == 0; }
- // Second set contains first.
- bool ContainedIn() { return (bits_ & kInFirst) == 0; }
- bool NonTrivialIntersection() {
- return (bits_ == (kInFirst | kInSecond | kInBoth));
- }
- int value() { return bits_; }
- private:
- int bits_;
-};
-
-
-class CharacterRange {
- public:
- CharacterRange() : from_(0), to_(0) { }
- // For compatibility with the CHECK_OK macro
- CharacterRange(void* null) { ASSERT_EQ(NULL, null); } //NOLINT
- CharacterRange(uc16 from, uc16 to) : from_(from), to_(to) { }
- static void AddClassEscape(uc16 type, ZoneList<CharacterRange>* ranges);
- static Vector<const uc16> GetWordBounds();
- static inline CharacterRange Singleton(uc16 value) {
- return CharacterRange(value, value);
- }
- static inline CharacterRange Range(uc16 from, uc16 to) {
- ASSERT(from <= to);
- return CharacterRange(from, to);
- }
- static inline CharacterRange Everything() {
- return CharacterRange(0, 0xFFFF);
- }
- bool Contains(uc16 i) { return from_ <= i && i <= to_; }
- uc16 from() const { return from_; }
- void set_from(uc16 value) { from_ = value; }
- uc16 to() const { return to_; }
- void set_to(uc16 value) { to_ = value; }
- bool is_valid() { return from_ <= to_; }
- bool IsEverything(uc16 max) { return from_ == 0 && to_ >= max; }
- bool IsSingleton() { return (from_ == to_); }
- void AddCaseEquivalents(ZoneList<CharacterRange>* ranges, bool is_ascii);
- static void Split(ZoneList<CharacterRange>* base,
- Vector<const uc16> overlay,
- ZoneList<CharacterRange>** included,
- ZoneList<CharacterRange>** excluded);
- // Whether a range list is in canonical form: Ranges ordered by from value,
- // and ranges non-overlapping and non-adjacent.
- static bool IsCanonical(ZoneList<CharacterRange>* ranges);
- // Convert range list to canonical form. The characters covered by the ranges
- // will still be the same, but no character is in more than one range, and
- // adjacent ranges are merged. The resulting list may be shorter than the
- // original, but cannot be longer.
- static void Canonicalize(ZoneList<CharacterRange>* ranges);
- // Check how the set of characters defined by a CharacterRange list relates
- // to the set of word characters. List must be in canonical form.
- static SetRelation WordCharacterRelation(ZoneList<CharacterRange>* ranges);
- // Takes two character range lists (representing character sets) in canonical
- // form and merges them.
- // The characters that are only covered by the first set are added to
- // first_set_only_out. the characters that are only in the second set are
- // added to second_set_only_out, and the characters that are in both are
- // added to both_sets_out.
- // The pointers to first_set_only_out, second_set_only_out and both_sets_out
- // should be to empty lists, but they need not be distinct, and may be NULL.
- // If NULL, the characters are dropped, and if two arguments are the same
- // pointer, the result is the union of the two sets that would be created
- // if the pointers had been distinct.
- // This way, the Merge function can compute all the usual set operations:
- // union (all three out-sets are equal), intersection (only both_sets_out is
- // non-NULL), and set difference (only first_set is non-NULL).
- static void Merge(ZoneList<CharacterRange>* first_set,
- ZoneList<CharacterRange>* second_set,
- ZoneList<CharacterRange>* first_set_only_out,
- ZoneList<CharacterRange>* second_set_only_out,
- ZoneList<CharacterRange>* both_sets_out);
- // Negate the contents of a character range in canonical form.
- static void Negate(ZoneList<CharacterRange>* src,
- ZoneList<CharacterRange>* dst);
- static const int kStartMarker = (1 << 24);
- static const int kPayloadMask = (1 << 24) - 1;
-
- private:
- uc16 from_;
- uc16 to_;
-};
-
-
-// A set of unsigned integers that behaves especially well on small
-// integers (< 32). May do zone-allocation.
-class OutSet: public ZoneObject {
- public:
- OutSet() : first_(0), remaining_(NULL), successors_(NULL) { }
- OutSet* Extend(unsigned value);
- bool Get(unsigned value);
- static const unsigned kFirstLimit = 32;
-
- private:
- // Destructively set a value in this set. In most cases you want
- // to use Extend instead to ensure that only one instance exists
- // that contains the same values.
- void Set(unsigned value);
-
- // The successors are a list of sets that contain the same values
- // as this set and the one more value that is not present in this
- // set.
- ZoneList<OutSet*>* successors() { return successors_; }
-
- OutSet(uint32_t first, ZoneList<unsigned>* remaining)
- : first_(first), remaining_(remaining), successors_(NULL) { }
- uint32_t first_;
- ZoneList<unsigned>* remaining_;
- ZoneList<OutSet*>* successors_;
- friend class Trace;
-};
-
-
-// A mapping from integers, specified as ranges, to a set of integers.
-// Used for mapping character ranges to choices.
-class DispatchTable : public ZoneObject {
- public:
- class Entry {
- public:
- Entry() : from_(0), to_(0), out_set_(NULL) { }
- Entry(uc16 from, uc16 to, OutSet* out_set)
- : from_(from), to_(to), out_set_(out_set) { }
- uc16 from() { return from_; }
- uc16 to() { return to_; }
- void set_to(uc16 value) { to_ = value; }
- void AddValue(int value) { out_set_ = out_set_->Extend(value); }
- OutSet* out_set() { return out_set_; }
- private:
- uc16 from_;
- uc16 to_;
- OutSet* out_set_;
- };
-
- class Config {
- public:
- typedef uc16 Key;
- typedef Entry Value;
- static const uc16 kNoKey;
- static const Entry kNoValue;
- static inline int Compare(uc16 a, uc16 b) {
- if (a == b)
- return 0;
- else if (a < b)
- return -1;
- else
- return 1;
- }
- };
-
- void AddRange(CharacterRange range, int value);
- OutSet* Get(uc16 value);
- void Dump();
-
- template <typename Callback>
- void ForEach(Callback* callback) { return tree()->ForEach(callback); }
- private:
- // There can't be a static empty set since it allocates its
- // successors in a zone and caches them.
- OutSet* empty() { return &empty_; }
- OutSet empty_;
- ZoneSplayTree<Config>* tree() { return &tree_; }
- ZoneSplayTree<Config> tree_;
-};
-
-
-#define FOR_EACH_NODE_TYPE(VISIT) \
- VISIT(End) \
- VISIT(Action) \
- VISIT(Choice) \
- VISIT(BackReference) \
- VISIT(Assertion) \
- VISIT(Text)
-
-
-#define FOR_EACH_REG_EXP_TREE_TYPE(VISIT) \
- VISIT(Disjunction) \
- VISIT(Alternative) \
- VISIT(Assertion) \
- VISIT(CharacterClass) \
- VISIT(Atom) \
- VISIT(Quantifier) \
- VISIT(Capture) \
- VISIT(Lookahead) \
- VISIT(BackReference) \
- VISIT(Empty) \
- VISIT(Text)
-
-
-#define FORWARD_DECLARE(Name) class RegExp##Name;
-FOR_EACH_REG_EXP_TREE_TYPE(FORWARD_DECLARE)
-#undef FORWARD_DECLARE
-
-
-class TextElement {
- public:
- enum Type {UNINITIALIZED, ATOM, CHAR_CLASS};
- TextElement() : type(UNINITIALIZED) { }
- explicit TextElement(Type t) : type(t), cp_offset(-1) { }
- static TextElement Atom(RegExpAtom* atom);
- static TextElement CharClass(RegExpCharacterClass* char_class);
- int length();
- Type type;
- union {
- RegExpAtom* u_atom;
- RegExpCharacterClass* u_char_class;
- } data;
- int cp_offset;
-};
-
-
-class Trace;
-
-
-struct NodeInfo {
- NodeInfo()
- : being_analyzed(false),
- been_analyzed(false),
- follows_word_interest(false),
- follows_newline_interest(false),
- follows_start_interest(false),
- at_end(false),
- visited(false) { }
-
- // Returns true if the interests and assumptions of this node
- // matches the given one.
- bool Matches(NodeInfo* that) {
- return (at_end == that->at_end) &&
- (follows_word_interest == that->follows_word_interest) &&
- (follows_newline_interest == that->follows_newline_interest) &&
- (follows_start_interest == that->follows_start_interest);
- }
-
- // Updates the interests of this node given the interests of the
- // node preceding it.
- void AddFromPreceding(NodeInfo* that) {
- at_end |= that->at_end;
- follows_word_interest |= that->follows_word_interest;
- follows_newline_interest |= that->follows_newline_interest;
- follows_start_interest |= that->follows_start_interest;
- }
-
- bool HasLookbehind() {
- return follows_word_interest ||
- follows_newline_interest ||
- follows_start_interest;
- }
-
- // Sets the interests of this node to include the interests of the
- // following node.
- void AddFromFollowing(NodeInfo* that) {
- follows_word_interest |= that->follows_word_interest;
- follows_newline_interest |= that->follows_newline_interest;
- follows_start_interest |= that->follows_start_interest;
- }
-
- void ResetCompilationState() {
- being_analyzed = false;
- been_analyzed = false;
- }
-
- bool being_analyzed: 1;
- bool been_analyzed: 1;
-
- // These bits are set of this node has to know what the preceding
- // character was.
- bool follows_word_interest: 1;
- bool follows_newline_interest: 1;
- bool follows_start_interest: 1;
-
- bool at_end: 1;
- bool visited: 1;
-};
-
-
-class SiblingList {
- public:
- SiblingList() : list_(NULL) { }
- int length() {
- return list_ == NULL ? 0 : list_->length();
- }
- void Ensure(RegExpNode* parent) {
- if (list_ == NULL) {
- list_ = new ZoneList<RegExpNode*>(2);
- list_->Add(parent);
- }
- }
- void Add(RegExpNode* node) { list_->Add(node); }
- RegExpNode* Get(int index) { return list_->at(index); }
- private:
- ZoneList<RegExpNode*>* list_;
-};
-
-
-// Details of a quick mask-compare check that can look ahead in the
-// input stream.
-class QuickCheckDetails {
- public:
- QuickCheckDetails()
- : characters_(0),
- mask_(0),
- value_(0),
- cannot_match_(false) { }
- explicit QuickCheckDetails(int characters)
- : characters_(characters),
- mask_(0),
- value_(0),
- cannot_match_(false) { }
- bool Rationalize(bool ascii);
- // Merge in the information from another branch of an alternation.
- void Merge(QuickCheckDetails* other, int from_index);
- // Advance the current position by some amount.
- void Advance(int by, bool ascii);
- void Clear();
- bool cannot_match() { return cannot_match_; }
- void set_cannot_match() { cannot_match_ = true; }
- struct Position {
- Position() : mask(0), value(0), determines_perfectly(false) { }
- uc16 mask;
- uc16 value;
- bool determines_perfectly;
- };
- int characters() { return characters_; }
- void set_characters(int characters) { characters_ = characters; }
- Position* positions(int index) {
- ASSERT(index >= 0);
- ASSERT(index < characters_);
- return positions_ + index;
- }
- uint32_t mask() { return mask_; }
- uint32_t value() { return value_; }
-
- private:
- // How many characters do we have quick check information from. This is
- // the same for all branches of a choice node.
- int characters_;
- Position positions_[4];
- // These values are the condensate of the above array after Rationalize().
- uint32_t mask_;
- uint32_t value_;
- // If set to true, there is no way this quick check can match at all.
- // E.g., if it requires to be at the start of the input, and isn't.
- bool cannot_match_;
-};
-
-
-class RegExpNode: public ZoneObject {
- public:
- RegExpNode() : first_character_set_(NULL), trace_count_(0) { }
- virtual ~RegExpNode();
- virtual void Accept(NodeVisitor* visitor) = 0;
- // Generates a goto to this node or actually generates the code at this point.
- virtual void Emit(RegExpCompiler* compiler, Trace* trace) = 0;
- // How many characters must this node consume at a minimum in order to
- // succeed. If we have found at least 'still_to_find' characters that
- // must be consumed there is no need to ask any following nodes whether
- // they are sure to eat any more characters. The not_at_start argument is
- // used to indicate that we know we are not at the start of the input. In
- // this case anchored branches will always fail and can be ignored when
- // determining how many characters are consumed on success.
- virtual int EatsAtLeast(int still_to_find,
- int recursion_depth,
- bool not_at_start) = 0;
- // Emits some quick code that checks whether the preloaded characters match.
- // Falls through on certain failure, jumps to the label on possible success.
- // If the node cannot make a quick check it does nothing and returns false.
- bool EmitQuickCheck(RegExpCompiler* compiler,
- Trace* trace,
- bool preload_has_checked_bounds,
- Label* on_possible_success,
- QuickCheckDetails* details_return,
- bool fall_through_on_failure);
- // For a given number of characters this returns a mask and a value. The
- // next n characters are anded with the mask and compared with the value.
- // A comparison failure indicates the node cannot match the next n characters.
- // A comparison success indicates the node may match.
- virtual void GetQuickCheckDetails(QuickCheckDetails* details,
- RegExpCompiler* compiler,
- int characters_filled_in,
- bool not_at_start) = 0;
- static const int kNodeIsTooComplexForGreedyLoops = -1;
- virtual int GreedyLoopTextLength() { return kNodeIsTooComplexForGreedyLoops; }
- Label* label() { return &label_; }
- // If non-generic code is generated for a node (ie the node is not at the
- // start of the trace) then it cannot be reused. This variable sets a limit
- // on how often we allow that to happen before we insist on starting a new
- // trace and generating generic code for a node that can be reused by flushing
- // the deferred actions in the current trace and generating a goto.
- static const int kMaxCopiesCodeGenerated = 10;
-
- NodeInfo* info() { return &info_; }
-
- void AddSibling(RegExpNode* node) { siblings_.Add(node); }
-
- // Static version of EnsureSibling that expresses the fact that the
- // result has the same type as the input.
- template <class C>
- static C* EnsureSibling(C* node, NodeInfo* info, bool* cloned) {
- return static_cast<C*>(node->EnsureSibling(info, cloned));
- }
-
- SiblingList* siblings() { return &siblings_; }
- void set_siblings(SiblingList* other) { siblings_ = *other; }
-
- // Return the set of possible next characters recognized by the regexp
- // (or a safe subset, potentially the set of all characters).
- ZoneList<CharacterRange>* FirstCharacterSet();
-
- // Compute (if possible within the budget of traversed nodes) the
- // possible first characters of the input matched by this node and
- // its continuation. Returns the remaining budget after the computation.
- // If the budget is spent, the result is negative, and the cached
- // first_character_set_ value isn't set.
- virtual int ComputeFirstCharacterSet(int budget);
-
- // Get and set the cached first character set value.
- ZoneList<CharacterRange>* first_character_set() {
- return first_character_set_;
- }
- void set_first_character_set(ZoneList<CharacterRange>* character_set) {
- first_character_set_ = character_set;
- }
-
- protected:
- enum LimitResult { DONE, CONTINUE };
- static const int kComputeFirstCharacterSetFail = -1;
-
- LimitResult LimitVersions(RegExpCompiler* compiler, Trace* trace);
-
- // Returns a sibling of this node whose interests and assumptions
- // match the ones in the given node info. If no sibling exists NULL
- // is returned.
- RegExpNode* TryGetSibling(NodeInfo* info);
-
- // Returns a sibling of this node whose interests match the ones in
- // the given node info. The info must not contain any assertions.
- // If no node exists a new one will be created by cloning the current
- // node. The result will always be an instance of the same concrete
- // class as this node.
- RegExpNode* EnsureSibling(NodeInfo* info, bool* cloned);
-
- // Returns a clone of this node initialized using the copy constructor
- // of its concrete class. Note that the node may have to be pre-
- // processed before it is on a usable state.
- virtual RegExpNode* Clone() = 0;
-
- private:
- static const int kFirstCharBudget = 10;
- Label label_;
- NodeInfo info_;
- SiblingList siblings_;
- ZoneList<CharacterRange>* first_character_set_;
- // This variable keeps track of how many times code has been generated for
- // this node (in different traces). We don't keep track of where the
- // generated code is located unless the code is generated at the start of
- // a trace, in which case it is generic and can be reused by flushing the
- // deferred operations in the current trace and generating a goto.
- int trace_count_;
-};
-
-
-// A simple closed interval.
-class Interval {
- public:
- Interval() : from_(kNone), to_(kNone) { }
- Interval(int from, int to) : from_(from), to_(to) { }
- Interval Union(Interval that) {
- if (that.from_ == kNone)
- return *this;
- else if (from_ == kNone)
- return that;
- else
- return Interval(Min(from_, that.from_), Max(to_, that.to_));
- }
- bool Contains(int value) {
- return (from_ <= value) && (value <= to_);
- }
- bool is_empty() { return from_ == kNone; }
- int from() { return from_; }
- int to() { return to_; }
- static Interval Empty() { return Interval(); }
- static const int kNone = -1;
- private:
- int from_;
- int to_;
-};
-
-
-class SeqRegExpNode: public RegExpNode {
- public:
- explicit SeqRegExpNode(RegExpNode* on_success)
- : on_success_(on_success) { }
- RegExpNode* on_success() { return on_success_; }
- void set_on_success(RegExpNode* node) { on_success_ = node; }
- private:
- RegExpNode* on_success_;
-};
-
-
-class ActionNode: public SeqRegExpNode {
- public:
- enum Type {
- SET_REGISTER,
- INCREMENT_REGISTER,
- STORE_POSITION,
- BEGIN_SUBMATCH,
- POSITIVE_SUBMATCH_SUCCESS,
- EMPTY_MATCH_CHECK,
- CLEAR_CAPTURES
- };
- static ActionNode* SetRegister(int reg, int val, RegExpNode* on_success);
- static ActionNode* IncrementRegister(int reg, RegExpNode* on_success);
- static ActionNode* StorePosition(int reg,
- bool is_capture,
- RegExpNode* on_success);
- static ActionNode* ClearCaptures(Interval range, RegExpNode* on_success);
- static ActionNode* BeginSubmatch(int stack_pointer_reg,
- int position_reg,
- RegExpNode* on_success);
- static ActionNode* PositiveSubmatchSuccess(int stack_pointer_reg,
- int restore_reg,
- int clear_capture_count,
- int clear_capture_from,
- RegExpNode* on_success);
- static ActionNode* EmptyMatchCheck(int start_register,
- int repetition_register,
- int repetition_limit,
- RegExpNode* on_success);
- virtual void Accept(NodeVisitor* visitor);
- virtual void Emit(RegExpCompiler* compiler, Trace* trace);
- virtual int EatsAtLeast(int still_to_find,
- int recursion_depth,
- bool not_at_start);
- virtual void GetQuickCheckDetails(QuickCheckDetails* details,
- RegExpCompiler* compiler,
- int filled_in,
- bool not_at_start) {
- return on_success()->GetQuickCheckDetails(
- details, compiler, filled_in, not_at_start);
- }
- Type type() { return type_; }
- // TODO(erikcorry): We should allow some action nodes in greedy loops.
- virtual int GreedyLoopTextLength() { return kNodeIsTooComplexForGreedyLoops; }
- virtual ActionNode* Clone() { return new ActionNode(*this); }
- virtual int ComputeFirstCharacterSet(int budget);
- private:
- union {
- struct {
- int reg;
- int value;
- } u_store_register;
- struct {
- int reg;
- } u_increment_register;
- struct {
- int reg;
- bool is_capture;
- } u_position_register;
- struct {
- int stack_pointer_register;
- int current_position_register;
- int clear_register_count;
- int clear_register_from;
- } u_submatch;
- struct {
- int start_register;
- int repetition_register;
- int repetition_limit;
- } u_empty_match_check;
- struct {
- int range_from;
- int range_to;
- } u_clear_captures;
- } data_;
- ActionNode(Type type, RegExpNode* on_success)
- : SeqRegExpNode(on_success),
- type_(type) { }
- Type type_;
- friend class DotPrinter;
-};
-
-
-class TextNode: public SeqRegExpNode {
- public:
- TextNode(ZoneList<TextElement>* elms,
- RegExpNode* on_success)
- : SeqRegExpNode(on_success),
- elms_(elms) { }
- TextNode(RegExpCharacterClass* that,
- RegExpNode* on_success)
- : SeqRegExpNode(on_success),
- elms_(new ZoneList<TextElement>(1)) {
- elms_->Add(TextElement::CharClass(that));
- }
- virtual void Accept(NodeVisitor* visitor);
- virtual void Emit(RegExpCompiler* compiler, Trace* trace);
- virtual int EatsAtLeast(int still_to_find,
- int recursion_depth,
- bool not_at_start);
- virtual void GetQuickCheckDetails(QuickCheckDetails* details,
- RegExpCompiler* compiler,
- int characters_filled_in,
- bool not_at_start);
- ZoneList<TextElement>* elements() { return elms_; }
- void MakeCaseIndependent(bool is_ascii);
- virtual int GreedyLoopTextLength();
- virtual TextNode* Clone() {
- TextNode* result = new TextNode(*this);
- result->CalculateOffsets();
- return result;
- }
- void CalculateOffsets();
- virtual int ComputeFirstCharacterSet(int budget);
- private:
- enum TextEmitPassType {
- NON_ASCII_MATCH, // Check for characters that can't match.
- SIMPLE_CHARACTER_MATCH, // Case-dependent single character check.
- NON_LETTER_CHARACTER_MATCH, // Check characters that have no case equivs.
- CASE_CHARACTER_MATCH, // Case-independent single character check.
- CHARACTER_CLASS_MATCH // Character class.
- };
- static bool SkipPass(int pass, bool ignore_case);
- static const int kFirstRealPass = SIMPLE_CHARACTER_MATCH;
- static const int kLastPass = CHARACTER_CLASS_MATCH;
- void TextEmitPass(RegExpCompiler* compiler,
- TextEmitPassType pass,
- bool preloaded,
- Trace* trace,
- bool first_element_checked,
- int* checked_up_to);
- int Length();
- ZoneList<TextElement>* elms_;
-};
-
-
-class AssertionNode: public SeqRegExpNode {
- public:
- enum AssertionNodeType {
- AT_END,
- AT_START,
- AT_BOUNDARY,
- AT_NON_BOUNDARY,
- AFTER_NEWLINE,
- // Types not directly expressible in regexp syntax.
- // Used for modifying a boundary node if its following character is
- // known to be word and/or non-word.
- AFTER_NONWORD_CHARACTER,
- AFTER_WORD_CHARACTER
- };
- static AssertionNode* AtEnd(RegExpNode* on_success) {
- return new AssertionNode(AT_END, on_success);
- }
- static AssertionNode* AtStart(RegExpNode* on_success) {
- return new AssertionNode(AT_START, on_success);
- }
- static AssertionNode* AtBoundary(RegExpNode* on_success) {
- return new AssertionNode(AT_BOUNDARY, on_success);
- }
- static AssertionNode* AtNonBoundary(RegExpNode* on_success) {
- return new AssertionNode(AT_NON_BOUNDARY, on_success);
- }
- static AssertionNode* AfterNewline(RegExpNode* on_success) {
- return new AssertionNode(AFTER_NEWLINE, on_success);
- }
- virtual void Accept(NodeVisitor* visitor);
- virtual void Emit(RegExpCompiler* compiler, Trace* trace);
- virtual int EatsAtLeast(int still_to_find,
- int recursion_depth,
- bool not_at_start);
- virtual void GetQuickCheckDetails(QuickCheckDetails* details,
- RegExpCompiler* compiler,
- int filled_in,
- bool not_at_start);
- virtual int ComputeFirstCharacterSet(int budget);
- virtual AssertionNode* Clone() { return new AssertionNode(*this); }
- AssertionNodeType type() { return type_; }
- void set_type(AssertionNodeType type) { type_ = type; }
- private:
- AssertionNode(AssertionNodeType t, RegExpNode* on_success)
- : SeqRegExpNode(on_success), type_(t) { }
- AssertionNodeType type_;
-};
-
-
-class BackReferenceNode: public SeqRegExpNode {
- public:
- BackReferenceNode(int start_reg,
- int end_reg,
- RegExpNode* on_success)
- : SeqRegExpNode(on_success),
- start_reg_(start_reg),
- end_reg_(end_reg) { }
- virtual void Accept(NodeVisitor* visitor);
- int start_register() { return start_reg_; }
- int end_register() { return end_reg_; }
- virtual void Emit(RegExpCompiler* compiler, Trace* trace);
- virtual int EatsAtLeast(int still_to_find,
- int recursion_depth,
- bool not_at_start);
- virtual void GetQuickCheckDetails(QuickCheckDetails* details,
- RegExpCompiler* compiler,
- int characters_filled_in,
- bool not_at_start) {
- return;
- }
- virtual BackReferenceNode* Clone() { return new BackReferenceNode(*this); }
- virtual int ComputeFirstCharacterSet(int budget);
- private:
- int start_reg_;
- int end_reg_;
-};
-
-
-class EndNode: public RegExpNode {
- public:
- enum Action { ACCEPT, BACKTRACK, NEGATIVE_SUBMATCH_SUCCESS };
- explicit EndNode(Action action) : action_(action) { }
- virtual void Accept(NodeVisitor* visitor);
- virtual void Emit(RegExpCompiler* compiler, Trace* trace);
- virtual int EatsAtLeast(int still_to_find,
- int recursion_depth,
- bool not_at_start) { return 0; }
- virtual void GetQuickCheckDetails(QuickCheckDetails* details,
- RegExpCompiler* compiler,
- int characters_filled_in,
- bool not_at_start) {
- // Returning 0 from EatsAtLeast should ensure we never get here.
- UNREACHABLE();
- }
- virtual EndNode* Clone() { return new EndNode(*this); }
- private:
- Action action_;
-};
-
-
-class NegativeSubmatchSuccess: public EndNode {
- public:
- NegativeSubmatchSuccess(int stack_pointer_reg,
- int position_reg,
- int clear_capture_count,
- int clear_capture_start)
- : EndNode(NEGATIVE_SUBMATCH_SUCCESS),
- stack_pointer_register_(stack_pointer_reg),
- current_position_register_(position_reg),
- clear_capture_count_(clear_capture_count),
- clear_capture_start_(clear_capture_start) { }
- virtual void Emit(RegExpCompiler* compiler, Trace* trace);
-
- private:
- int stack_pointer_register_;
- int current_position_register_;
- int clear_capture_count_;
- int clear_capture_start_;
-};
-
-
-class Guard: public ZoneObject {
- public:
- enum Relation { LT, GEQ };
- Guard(int reg, Relation op, int value)
- : reg_(reg),
- op_(op),
- value_(value) { }
- int reg() { return reg_; }
- Relation op() { return op_; }
- int value() { return value_; }
-
- private:
- int reg_;
- Relation op_;
- int value_;
-};
-
-
-class GuardedAlternative {
- public:
- explicit GuardedAlternative(RegExpNode* node) : node_(node), guards_(NULL) { }
- void AddGuard(Guard* guard);
- RegExpNode* node() { return node_; }
- void set_node(RegExpNode* node) { node_ = node; }
- ZoneList<Guard*>* guards() { return guards_; }
-
- private:
- RegExpNode* node_;
- ZoneList<Guard*>* guards_;
-};
-
-
-class AlternativeGeneration;
-
-
-class ChoiceNode: public RegExpNode {
- public:
- explicit ChoiceNode(int expected_size)
- : alternatives_(new ZoneList<GuardedAlternative>(expected_size)),
- table_(NULL),
- not_at_start_(false),
- being_calculated_(false) { }
- virtual void Accept(NodeVisitor* visitor);
- void AddAlternative(GuardedAlternative node) { alternatives()->Add(node); }
- ZoneList<GuardedAlternative>* alternatives() { return alternatives_; }
- DispatchTable* GetTable(bool ignore_case);
- virtual void Emit(RegExpCompiler* compiler, Trace* trace);
- virtual int EatsAtLeast(int still_to_find,
- int recursion_depth,
- bool not_at_start);
- int EatsAtLeastHelper(int still_to_find,
- int recursion_depth,
- RegExpNode* ignore_this_node,
- bool not_at_start);
- virtual void GetQuickCheckDetails(QuickCheckDetails* details,
- RegExpCompiler* compiler,
- int characters_filled_in,
- bool not_at_start);
- virtual ChoiceNode* Clone() { return new ChoiceNode(*this); }
-
- bool being_calculated() { return being_calculated_; }
- bool not_at_start() { return not_at_start_; }
- void set_not_at_start() { not_at_start_ = true; }
- void set_being_calculated(bool b) { being_calculated_ = b; }
- virtual bool try_to_emit_quick_check_for_alternative(int i) { return true; }
-
- protected:
- int GreedyLoopTextLength(GuardedAlternative* alternative);
- ZoneList<GuardedAlternative>* alternatives_;
-
- private:
- friend class DispatchTableConstructor;
- friend class Analysis;
- void GenerateGuard(RegExpMacroAssembler* macro_assembler,
- Guard* guard,
- Trace* trace);
- int CalculatePreloadCharacters(RegExpCompiler* compiler, bool not_at_start);
- void EmitOutOfLineContinuation(RegExpCompiler* compiler,
- Trace* trace,
- GuardedAlternative alternative,
- AlternativeGeneration* alt_gen,
- int preload_characters,
- bool next_expects_preload);
- DispatchTable* table_;
- // If true, this node is never checked at the start of the input.
- // Allows a new trace to start with at_start() set to false.
- bool not_at_start_;
- bool being_calculated_;
-};
-
-
-class NegativeLookaheadChoiceNode: public ChoiceNode {
- public:
- explicit NegativeLookaheadChoiceNode(GuardedAlternative this_must_fail,
- GuardedAlternative then_do_this)
- : ChoiceNode(2) {
- AddAlternative(this_must_fail);
- AddAlternative(then_do_this);
- }
- virtual int EatsAtLeast(int still_to_find,
- int recursion_depth,
- bool not_at_start);
- virtual void GetQuickCheckDetails(QuickCheckDetails* details,
- RegExpCompiler* compiler,
- int characters_filled_in,
- bool not_at_start);
- // For a negative lookahead we don't emit the quick check for the
- // alternative that is expected to fail. This is because quick check code
- // starts by loading enough characters for the alternative that takes fewest
- // characters, but on a negative lookahead the negative branch did not take
- // part in that calculation (EatsAtLeast) so the assumptions don't hold.
- virtual bool try_to_emit_quick_check_for_alternative(int i) { return i != 0; }
- virtual int ComputeFirstCharacterSet(int budget);
-};
-
-
-class LoopChoiceNode: public ChoiceNode {
- public:
- explicit LoopChoiceNode(bool body_can_be_zero_length)
- : ChoiceNode(2),
- loop_node_(NULL),
- continue_node_(NULL),
- body_can_be_zero_length_(body_can_be_zero_length) { }
- void AddLoopAlternative(GuardedAlternative alt);
- void AddContinueAlternative(GuardedAlternative alt);
- virtual void Emit(RegExpCompiler* compiler, Trace* trace);
- virtual int EatsAtLeast(int still_to_find,
- int recursion_depth,
- bool not_at_start);
- virtual void GetQuickCheckDetails(QuickCheckDetails* details,
- RegExpCompiler* compiler,
- int characters_filled_in,
- bool not_at_start);
- virtual int ComputeFirstCharacterSet(int budget);
- virtual LoopChoiceNode* Clone() { return new LoopChoiceNode(*this); }
- RegExpNode* loop_node() { return loop_node_; }
- RegExpNode* continue_node() { return continue_node_; }
- bool body_can_be_zero_length() { return body_can_be_zero_length_; }
- virtual void Accept(NodeVisitor* visitor);
-
- private:
- // AddAlternative is made private for loop nodes because alternatives
- // should not be added freely, we need to keep track of which node
- // goes back to the node itself.
- void AddAlternative(GuardedAlternative node) {
- ChoiceNode::AddAlternative(node);
- }
-
- RegExpNode* loop_node_;
- RegExpNode* continue_node_;
- bool body_can_be_zero_length_;
-};
-
-
-// There are many ways to generate code for a node. This class encapsulates
-// the current way we should be generating. In other words it encapsulates
-// the current state of the code generator. The effect of this is that we
-// generate code for paths that the matcher can take through the regular
-// expression. A given node in the regexp can be code-generated several times
-// as it can be part of several traces. For example for the regexp:
-// /foo(bar|ip)baz/ the code to match baz will be generated twice, once as part
-// of the foo-bar-baz trace and once as part of the foo-ip-baz trace. The code
-// to match foo is generated only once (the traces have a common prefix). The
-// code to store the capture is deferred and generated (twice) after the places
-// where baz has been matched.
-class Trace {
- public:
- // A value for a property that is either known to be true, know to be false,
- // or not known.
- enum TriBool {
- UNKNOWN = -1, FALSE = 0, TRUE = 1
- };
-
- class DeferredAction {
- public:
- DeferredAction(ActionNode::Type type, int reg)
- : type_(type), reg_(reg), next_(NULL) { }
- DeferredAction* next() { return next_; }
- bool Mentions(int reg);
- int reg() { return reg_; }
- ActionNode::Type type() { return type_; }
- private:
- ActionNode::Type type_;
- int reg_;
- DeferredAction* next_;
- friend class Trace;
- };
-
- class DeferredCapture : public DeferredAction {
- public:
- DeferredCapture(int reg, bool is_capture, Trace* trace)
- : DeferredAction(ActionNode::STORE_POSITION, reg),
- cp_offset_(trace->cp_offset()),
- is_capture_(is_capture) { }
- int cp_offset() { return cp_offset_; }
- bool is_capture() { return is_capture_; }
- private:
- int cp_offset_;
- bool is_capture_;
- void set_cp_offset(int cp_offset) { cp_offset_ = cp_offset; }
- };
-
- class DeferredSetRegister : public DeferredAction {
- public:
- DeferredSetRegister(int reg, int value)
- : DeferredAction(ActionNode::SET_REGISTER, reg),
- value_(value) { }
- int value() { return value_; }
- private:
- int value_;
- };
-
- class DeferredClearCaptures : public DeferredAction {
- public:
- explicit DeferredClearCaptures(Interval range)
- : DeferredAction(ActionNode::CLEAR_CAPTURES, -1),
- range_(range) { }
- Interval range() { return range_; }
- private:
- Interval range_;
- };
-
- class DeferredIncrementRegister : public DeferredAction {
- public:
- explicit DeferredIncrementRegister(int reg)
- : DeferredAction(ActionNode::INCREMENT_REGISTER, reg) { }
- };
-
- Trace()
- : cp_offset_(0),
- actions_(NULL),
- backtrack_(NULL),
- stop_node_(NULL),
- loop_label_(NULL),
- characters_preloaded_(0),
- bound_checked_up_to_(0),
- flush_budget_(100),
- at_start_(UNKNOWN) { }
-
- // End the trace. This involves flushing the deferred actions in the trace
- // and pushing a backtrack location onto the backtrack stack. Once this is
- // done we can start a new trace or go to one that has already been
- // generated.
- void Flush(RegExpCompiler* compiler, RegExpNode* successor);
- int cp_offset() { return cp_offset_; }
- DeferredAction* actions() { return actions_; }
- // A trivial trace is one that has no deferred actions or other state that
- // affects the assumptions used when generating code. There is no recorded
- // backtrack location in a trivial trace, so with a trivial trace we will
- // generate code that, on a failure to match, gets the backtrack location
- // from the backtrack stack rather than using a direct jump instruction. We
- // always start code generation with a trivial trace and non-trivial traces
- // are created as we emit code for nodes or add to the list of deferred
- // actions in the trace. The location of the code generated for a node using
- // a trivial trace is recorded in a label in the node so that gotos can be
- // generated to that code.
- bool is_trivial() {
- return backtrack_ == NULL &&
- actions_ == NULL &&
- cp_offset_ == 0 &&
- characters_preloaded_ == 0 &&
- bound_checked_up_to_ == 0 &&
- quick_check_performed_.characters() == 0 &&
- at_start_ == UNKNOWN;
- }
- TriBool at_start() { return at_start_; }
- void set_at_start(bool at_start) { at_start_ = at_start ? TRUE : FALSE; }
- Label* backtrack() { return backtrack_; }
- Label* loop_label() { return loop_label_; }
- RegExpNode* stop_node() { return stop_node_; }
- int characters_preloaded() { return characters_preloaded_; }
- int bound_checked_up_to() { return bound_checked_up_to_; }
- int flush_budget() { return flush_budget_; }
- QuickCheckDetails* quick_check_performed() { return &quick_check_performed_; }
- bool mentions_reg(int reg);
- // Returns true if a deferred position store exists to the specified
- // register and stores the offset in the out-parameter. Otherwise
- // returns false.
- bool GetStoredPosition(int reg, int* cp_offset);
- // These set methods and AdvanceCurrentPositionInTrace should be used only on
- // new traces - the intention is that traces are immutable after creation.
- void add_action(DeferredAction* new_action) {
- ASSERT(new_action->next_ == NULL);
- new_action->next_ = actions_;
- actions_ = new_action;
- }
- void set_backtrack(Label* backtrack) { backtrack_ = backtrack; }
- void set_stop_node(RegExpNode* node) { stop_node_ = node; }
- void set_loop_label(Label* label) { loop_label_ = label; }
- void set_characters_preloaded(int count) { characters_preloaded_ = count; }
- void set_bound_checked_up_to(int to) { bound_checked_up_to_ = to; }
- void set_flush_budget(int to) { flush_budget_ = to; }
- void set_quick_check_performed(QuickCheckDetails* d) {
- quick_check_performed_ = *d;
- }
- void InvalidateCurrentCharacter();
- void AdvanceCurrentPositionInTrace(int by, RegExpCompiler* compiler);
- private:
- int FindAffectedRegisters(OutSet* affected_registers);
- void PerformDeferredActions(RegExpMacroAssembler* macro,
- int max_register,
- OutSet& affected_registers,
- OutSet* registers_to_pop,
- OutSet* registers_to_clear);
- void RestoreAffectedRegisters(RegExpMacroAssembler* macro,
- int max_register,
- OutSet& registers_to_pop,
- OutSet& registers_to_clear);
- int cp_offset_;
- DeferredAction* actions_;
- Label* backtrack_;
- RegExpNode* stop_node_;
- Label* loop_label_;
- int characters_preloaded_;
- int bound_checked_up_to_;
- QuickCheckDetails quick_check_performed_;
- int flush_budget_;
- TriBool at_start_;
-};
-
-
-class NodeVisitor {
- public:
- virtual ~NodeVisitor() { }
-#define DECLARE_VISIT(Type) \
- virtual void Visit##Type(Type##Node* that) = 0;
-FOR_EACH_NODE_TYPE(DECLARE_VISIT)
-#undef DECLARE_VISIT
- virtual void VisitLoopChoice(LoopChoiceNode* that) { VisitChoice(that); }
-};
-
-
-// Node visitor used to add the start set of the alternatives to the
-// dispatch table of a choice node.
-class DispatchTableConstructor: public NodeVisitor {
- public:
- DispatchTableConstructor(DispatchTable* table, bool ignore_case)
- : table_(table),
- choice_index_(-1),
- ignore_case_(ignore_case) { }
-
- void BuildTable(ChoiceNode* node);
-
- void AddRange(CharacterRange range) {
- table()->AddRange(range, choice_index_);
- }
-
- void AddInverse(ZoneList<CharacterRange>* ranges);
-
-#define DECLARE_VISIT(Type) \
- virtual void Visit##Type(Type##Node* that);
-FOR_EACH_NODE_TYPE(DECLARE_VISIT)
-#undef DECLARE_VISIT
-
- DispatchTable* table() { return table_; }
- void set_choice_index(int value) { choice_index_ = value; }
-
- protected:
- DispatchTable* table_;
- int choice_index_;
- bool ignore_case_;
-};
-
-
-// Assertion propagation moves information about assertions such as
-// \b to the affected nodes. For instance, in /.\b./ information must
-// be propagated to the first '.' that whatever follows needs to know
-// if it matched a word or a non-word, and to the second '.' that it
-// has to check if it succeeds a word or non-word. In this case the
-// result will be something like:
-//
-// +-------+ +------------+
-// | . | | . |
-// +-------+ ---> +------------+
-// | word? | | check word |
-// +-------+ +------------+
-class Analysis: public NodeVisitor {
- public:
- Analysis(bool ignore_case, bool is_ascii)
- : ignore_case_(ignore_case),
- is_ascii_(is_ascii),
- error_message_(NULL) { }
- void EnsureAnalyzed(RegExpNode* node);
-
-#define DECLARE_VISIT(Type) \
- virtual void Visit##Type(Type##Node* that);
-FOR_EACH_NODE_TYPE(DECLARE_VISIT)
-#undef DECLARE_VISIT
- virtual void VisitLoopChoice(LoopChoiceNode* that);
-
- bool has_failed() { return error_message_ != NULL; }
- const char* error_message() {
- ASSERT(error_message_ != NULL);
- return error_message_;
- }
- void fail(const char* error_message) {
- error_message_ = error_message;
- }
- private:
- bool ignore_case_;
- bool is_ascii_;
- const char* error_message_;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(Analysis);
-};
-
-
-struct RegExpCompileData {
- RegExpCompileData()
- : tree(NULL),
- node(NULL),
- simple(true),
- contains_anchor(false),
- capture_count(0) { }
- RegExpTree* tree;
- RegExpNode* node;
- bool simple;
- bool contains_anchor;
- Handle<String> error;
- int capture_count;
-};
-
-
-class RegExpEngine: public AllStatic {
- public:
- struct CompilationResult {
- explicit CompilationResult(const char* error_message)
- : error_message(error_message),
- code(HEAP->the_hole_value()),
- num_registers(0) {}
- CompilationResult(Object* code, int registers)
- : error_message(NULL),
- code(code),
- num_registers(registers) {}
- const char* error_message;
- Object* code;
- int num_registers;
- };
-
- static CompilationResult Compile(RegExpCompileData* input,
- bool ignore_case,
- bool multiline,
- Handle<String> pattern,
- bool is_ascii);
-
- static void DotPrint(const char* label, RegExpNode* node, bool ignore_case);
-};
-
-
-class OffsetsVector {
- public:
- inline OffsetsVector(int num_registers)
- : offsets_vector_length_(num_registers) {
- if (offsets_vector_length_ > Isolate::kJSRegexpStaticOffsetsVectorSize) {
- vector_ = NewArray<int>(offsets_vector_length_);
- } else {
- vector_ = Isolate::Current()->jsregexp_static_offsets_vector();
- }
- }
- inline ~OffsetsVector() {
- if (offsets_vector_length_ > Isolate::kJSRegexpStaticOffsetsVectorSize) {
- DeleteArray(vector_);
- vector_ = NULL;
- }
- }
- inline int* vector() { return vector_; }
- inline int length() { return offsets_vector_length_; }
-
- static const int kStaticOffsetsVectorSize = 50;
-
- private:
- static Address static_offsets_vector_address(Isolate* isolate) {
- return reinterpret_cast<Address>(isolate->jsregexp_static_offsets_vector());
- }
-
- int* vector_;
- int offsets_vector_length_;
-
- friend class ExternalReference;
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_JSREGEXP_H_
diff --git a/src/3rdparty/v8/src/jump-target-heavy-inl.h b/src/3rdparty/v8/src/jump-target-heavy-inl.h
deleted file mode 100644
index 0a2a569..0000000
--- a/src/3rdparty/v8/src/jump-target-heavy-inl.h
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_JUMP_TARGET_HEAVY_INL_H_
-#define V8_JUMP_TARGET_HEAVY_INL_H_
-
-#include "virtual-frame-inl.h"
-
-namespace v8 {
-namespace internal {
-
-void JumpTarget::InitializeEntryElement(int index, FrameElement* target) {
- FrameElement* element = &entry_frame_->elements_[index];
- element->clear_copied();
- if (target->is_register()) {
- entry_frame_->set_register_location(target->reg(), index);
- } else if (target->is_copy()) {
- entry_frame_->elements_[target->index()].set_copied();
- }
- if (direction_ == BIDIRECTIONAL && !target->is_copy()) {
- element->set_type_info(TypeInfo::Unknown());
- }
-}
-
-} } // namespace v8::internal
-
-#endif // V8_JUMP_TARGET_HEAVY_INL_H_
diff --git a/src/3rdparty/v8/src/jump-target-heavy.cc b/src/3rdparty/v8/src/jump-target-heavy.cc
deleted file mode 100644
index f73e027..0000000
--- a/src/3rdparty/v8/src/jump-target-heavy.cc
+++ /dev/null
@@ -1,427 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "codegen-inl.h"
-#include "jump-target-inl.h"
-#include "register-allocator-inl.h"
-
-namespace v8 {
-namespace internal {
-
-
-void JumpTarget::Jump(Result* arg) {
- ASSERT(cgen()->has_valid_frame());
-
- cgen()->frame()->Push(arg);
- DoJump();
-}
-
-
-void JumpTarget::Branch(Condition cc, Result* arg, Hint hint) {
- ASSERT(cgen()->has_valid_frame());
-
- // We want to check that non-frame registers at the call site stay in
- // the same registers on the fall-through branch.
-#ifdef DEBUG
- Result::Type arg_type = arg->type();
- Register arg_reg = arg->is_register() ? arg->reg() : no_reg;
-#endif
-
- cgen()->frame()->Push(arg);
- DoBranch(cc, hint);
- *arg = cgen()->frame()->Pop();
-
- ASSERT(arg->type() == arg_type);
- ASSERT(!arg->is_register() || arg->reg().is(arg_reg));
-}
-
-
-void JumpTarget::Branch(Condition cc, Result* arg0, Result* arg1, Hint hint) {
- ASSERT(cgen()->has_valid_frame());
-
- // We want to check that non-frame registers at the call site stay in
- // the same registers on the fall-through branch.
-#ifdef DEBUG
- Result::Type arg0_type = arg0->type();
- Register arg0_reg = arg0->is_register() ? arg0->reg() : no_reg;
- Result::Type arg1_type = arg1->type();
- Register arg1_reg = arg1->is_register() ? arg1->reg() : no_reg;
-#endif
-
- cgen()->frame()->Push(arg0);
- cgen()->frame()->Push(arg1);
- DoBranch(cc, hint);
- *arg1 = cgen()->frame()->Pop();
- *arg0 = cgen()->frame()->Pop();
-
- ASSERT(arg0->type() == arg0_type);
- ASSERT(!arg0->is_register() || arg0->reg().is(arg0_reg));
- ASSERT(arg1->type() == arg1_type);
- ASSERT(!arg1->is_register() || arg1->reg().is(arg1_reg));
-}
-
-
-void BreakTarget::Branch(Condition cc, Result* arg, Hint hint) {
- ASSERT(cgen()->has_valid_frame());
-
- int count = cgen()->frame()->height() - expected_height_;
- if (count > 0) {
- // We negate and branch here rather than using DoBranch's negate
- // and branch. This gives us a hook to remove statement state
- // from the frame.
- JumpTarget fall_through;
- // Branch to fall through will not negate, because it is a
- // forward-only target.
- fall_through.Branch(NegateCondition(cc), NegateHint(hint));
- Jump(arg); // May emit merge code here.
- fall_through.Bind();
- } else {
-#ifdef DEBUG
- Result::Type arg_type = arg->type();
- Register arg_reg = arg->is_register() ? arg->reg() : no_reg;
-#endif
- cgen()->frame()->Push(arg);
- DoBranch(cc, hint);
- *arg = cgen()->frame()->Pop();
- ASSERT(arg->type() == arg_type);
- ASSERT(!arg->is_register() || arg->reg().is(arg_reg));
- }
-}
-
-
-void JumpTarget::Bind(Result* arg) {
- if (cgen()->has_valid_frame()) {
- cgen()->frame()->Push(arg);
- }
- DoBind();
- *arg = cgen()->frame()->Pop();
-}
-
-
-void JumpTarget::Bind(Result* arg0, Result* arg1) {
- if (cgen()->has_valid_frame()) {
- cgen()->frame()->Push(arg0);
- cgen()->frame()->Push(arg1);
- }
- DoBind();
- *arg1 = cgen()->frame()->Pop();
- *arg0 = cgen()->frame()->Pop();
-}
-
-
-void JumpTarget::ComputeEntryFrame() {
- // Given: a collection of frames reaching by forward CFG edges and
- // the directionality of the block. Compute: an entry frame for the
- // block.
-
- Isolate::Current()->counters()->compute_entry_frame()->Increment();
-#ifdef DEBUG
- if (Isolate::Current()->jump_target_compiling_deferred_code()) {
- ASSERT(reaching_frames_.length() > 1);
- VirtualFrame* frame = reaching_frames_[0];
- bool all_identical = true;
- for (int i = 1; i < reaching_frames_.length(); i++) {
- if (!frame->Equals(reaching_frames_[i])) {
- all_identical = false;
- break;
- }
- }
- ASSERT(!all_identical || all_identical);
- }
-#endif
-
- // Choose an initial frame.
- VirtualFrame* initial_frame = reaching_frames_[0];
-
- // A list of pointers to frame elements in the entry frame. NULL
- // indicates that the element has not yet been determined.
- int length = initial_frame->element_count();
- ZoneList<FrameElement*> elements(length);
-
- // Initially populate the list of elements based on the initial
- // frame.
- for (int i = 0; i < length; i++) {
- FrameElement element = initial_frame->elements_[i];
- // We do not allow copies or constants in bidirectional frames.
- if (direction_ == BIDIRECTIONAL) {
- if (element.is_constant() || element.is_copy()) {
- elements.Add(NULL);
- continue;
- }
- }
- elements.Add(&initial_frame->elements_[i]);
- }
-
- // Compute elements based on the other reaching frames.
- if (reaching_frames_.length() > 1) {
- for (int i = 0; i < length; i++) {
- FrameElement* element = elements[i];
- for (int j = 1; j < reaching_frames_.length(); j++) {
- // Element computation is monotonic: new information will not
- // change our decision about undetermined or invalid elements.
- if (element == NULL || !element->is_valid()) break;
-
- FrameElement* other = &reaching_frames_[j]->elements_[i];
- element = element->Combine(other);
- if (element != NULL && !element->is_copy()) {
- ASSERT(other != NULL);
- // We overwrite the number information of one of the incoming frames.
- // This is safe because we only use the frame for emitting merge code.
- // The number information of incoming frames is not used anymore.
- element->set_type_info(TypeInfo::Combine(element->type_info(),
- other->type_info()));
- }
- }
- elements[i] = element;
- }
- }
-
- // Build the new frame. A freshly allocated frame has memory elements
- // for the parameters and some platform-dependent elements (e.g.,
- // return address). Replace those first.
- entry_frame_ = new VirtualFrame();
- int index = 0;
- for (; index < entry_frame_->element_count(); index++) {
- FrameElement* target = elements[index];
- // If the element is determined, set it now. Count registers. Mark
- // elements as copied exactly when they have a copy. Undetermined
- // elements are initially recorded as if in memory.
- if (target != NULL) {
- entry_frame_->elements_[index] = *target;
- InitializeEntryElement(index, target);
- }
- }
- // Then fill in the rest of the frame with new elements.
- for (; index < length; index++) {
- FrameElement* target = elements[index];
- if (target == NULL) {
- entry_frame_->elements_.Add(
- FrameElement::MemoryElement(TypeInfo::Uninitialized()));
- } else {
- entry_frame_->elements_.Add(*target);
- InitializeEntryElement(index, target);
- }
- }
-
- // Allocate any still-undetermined frame elements to registers or
- // memory, from the top down.
- for (int i = length - 1; i >= 0; i--) {
- if (elements[i] == NULL) {
- // Loop over all the reaching frames to check whether the element
- // is synced on all frames and to count the registers it occupies.
- bool is_synced = true;
- RegisterFile candidate_registers;
- int best_count = kMinInt;
- int best_reg_num = RegisterAllocator::kInvalidRegister;
- TypeInfo info = TypeInfo::Uninitialized();
-
- for (int j = 0; j < reaching_frames_.length(); j++) {
- FrameElement element = reaching_frames_[j]->elements_[i];
- if (direction_ == BIDIRECTIONAL) {
- info = TypeInfo::Unknown();
- } else if (!element.is_copy()) {
- info = TypeInfo::Combine(info, element.type_info());
- } else {
- // New elements will not be copies, so get number information from
- // backing element in the reaching frame.
- info = TypeInfo::Combine(info,
- reaching_frames_[j]->elements_[element.index()].type_info());
- }
- is_synced = is_synced && element.is_synced();
- if (element.is_register() && !entry_frame_->is_used(element.reg())) {
- // Count the register occurrence and remember it if better
- // than the previous best.
- int num = RegisterAllocator::ToNumber(element.reg());
- candidate_registers.Use(num);
- if (candidate_registers.count(num) > best_count) {
- best_count = candidate_registers.count(num);
- best_reg_num = num;
- }
- }
- }
-
- // We must have a number type information now (not for copied elements).
- ASSERT(entry_frame_->elements_[i].is_copy()
- || !info.IsUninitialized());
-
- // If the value is synced on all frames, put it in memory. This
- // costs nothing at the merge code but will incur a
- // memory-to-register move when the value is needed later.
- if (is_synced) {
- // Already recorded as a memory element.
- // Set combined number info.
- entry_frame_->elements_[i].set_type_info(info);
- continue;
- }
-
- // Try to put it in a register. If there was no best choice
- // consider any free register.
- if (best_reg_num == RegisterAllocator::kInvalidRegister) {
- for (int j = 0; j < RegisterAllocator::kNumRegisters; j++) {
- if (!entry_frame_->is_used(j)) {
- best_reg_num = j;
- break;
- }
- }
- }
-
- if (best_reg_num != RegisterAllocator::kInvalidRegister) {
- // If there was a register choice, use it. Preserve the copied
- // flag on the element.
- bool is_copied = entry_frame_->elements_[i].is_copied();
- Register reg = RegisterAllocator::ToRegister(best_reg_num);
- entry_frame_->elements_[i] =
- FrameElement::RegisterElement(reg, FrameElement::NOT_SYNCED,
- TypeInfo::Uninitialized());
- if (is_copied) entry_frame_->elements_[i].set_copied();
- entry_frame_->set_register_location(reg, i);
- }
- // Set combined number info.
- entry_frame_->elements_[i].set_type_info(info);
- }
- }
-
- // If we have incoming backward edges assert we forget all number information.
-#ifdef DEBUG
- if (direction_ == BIDIRECTIONAL) {
- for (int i = 0; i < length; ++i) {
- if (!entry_frame_->elements_[i].is_copy()) {
- ASSERT(entry_frame_->elements_[i].type_info().IsUnknown());
- }
- }
- }
-#endif
-
- // The stack pointer is at the highest synced element or the base of
- // the expression stack.
- int stack_pointer = length - 1;
- while (stack_pointer >= entry_frame_->expression_base_index() &&
- !entry_frame_->elements_[stack_pointer].is_synced()) {
- stack_pointer--;
- }
- entry_frame_->stack_pointer_ = stack_pointer;
-}
-
-
-FrameRegisterState::FrameRegisterState(VirtualFrame* frame) {
- // Copy the register locations from the code generator's frame.
- // These are the registers that will be spilled on entry to the
- // deferred code and restored on exit.
- int sp_offset = frame->fp_relative(frame->stack_pointer_);
- for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
- int loc = frame->register_location(i);
- if (loc == VirtualFrame::kIllegalIndex) {
- registers_[i] = kIgnore;
- } else if (frame->elements_[loc].is_synced()) {
- // Needs to be restored on exit but not saved on entry.
- registers_[i] = frame->fp_relative(loc) | kSyncedFlag;
- } else {
- int offset = frame->fp_relative(loc);
- registers_[i] = (offset < sp_offset) ? kPush : offset;
- }
- }
-}
-
-
-void JumpTarget::Unuse() {
- reaching_frames_.Clear();
- merge_labels_.Clear();
- entry_frame_ = NULL;
- entry_label_.Unuse();
-}
-
-
-void JumpTarget::AddReachingFrame(VirtualFrame* frame) {
- ASSERT(reaching_frames_.length() == merge_labels_.length());
- ASSERT(entry_frame_ == NULL);
- Label fresh;
- merge_labels_.Add(fresh);
- reaching_frames_.Add(frame);
-}
-
-
-// -------------------------------------------------------------------------
-// BreakTarget implementation.
-
-void BreakTarget::set_direction(Directionality direction) {
- JumpTarget::set_direction(direction);
- ASSERT(cgen()->has_valid_frame());
- expected_height_ = cgen()->frame()->height();
-}
-
-
-void BreakTarget::CopyTo(BreakTarget* destination) {
- ASSERT(destination != NULL);
- destination->direction_ = direction_;
- destination->reaching_frames_.Rewind(0);
- destination->reaching_frames_.AddAll(reaching_frames_);
- destination->merge_labels_.Rewind(0);
- destination->merge_labels_.AddAll(merge_labels_);
- destination->entry_frame_ = entry_frame_;
- destination->entry_label_ = entry_label_;
- destination->expected_height_ = expected_height_;
-}
-
-
-void BreakTarget::Branch(Condition cc, Hint hint) {
- ASSERT(cgen()->has_valid_frame());
-
- int count = cgen()->frame()->height() - expected_height_;
- if (count > 0) {
- // We negate and branch here rather than using DoBranch's negate
- // and branch. This gives us a hook to remove statement state
- // from the frame.
- JumpTarget fall_through;
- // Branch to fall through will not negate, because it is a
- // forward-only target.
- fall_through.Branch(NegateCondition(cc), NegateHint(hint));
- Jump(); // May emit merge code here.
- fall_through.Bind();
- } else {
- DoBranch(cc, hint);
- }
-}
-
-
-DeferredCode::DeferredCode()
- : masm_(CodeGeneratorScope::Current(Isolate::Current())->masm()),
- statement_position_(masm_->positions_recorder()->
- current_statement_position()),
- position_(masm_->positions_recorder()->current_position()),
- frame_state_(CodeGeneratorScope::Current(Isolate::Current())->frame()) {
- ASSERT(statement_position_ != RelocInfo::kNoPosition);
- ASSERT(position_ != RelocInfo::kNoPosition);
-
- CodeGeneratorScope::Current(Isolate::Current())->AddDeferred(this);
-#ifdef DEBUG
- comment_ = "";
-#endif
-}
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/jump-target-heavy.h b/src/3rdparty/v8/src/jump-target-heavy.h
deleted file mode 100644
index bf97756..0000000
--- a/src/3rdparty/v8/src/jump-target-heavy.h
+++ /dev/null
@@ -1,238 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_JUMP_TARGET_HEAVY_H_
-#define V8_JUMP_TARGET_HEAVY_H_
-
-#include "macro-assembler.h"
-#include "zone-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// Forward declarations.
-class FrameElement;
-class Result;
-class VirtualFrame;
-
-// -------------------------------------------------------------------------
-// Jump targets
-//
-// A jump target is an abstraction of a basic-block entry in generated
-// code. It collects all the virtual frames reaching the block by
-// forward jumps and pairs them with labels for the merge code along
-// all forward-reaching paths. When bound, an expected frame for the
-// block is determined and code is generated to merge to the expected
-// frame. For backward jumps, the merge code is generated at the edge
-// leaving the predecessor block.
-//
-// A jump target must have been reached via control flow (either by
-// jumping, branching, or falling through) at the time it is bound.
-// In particular, this means that at least one of the control-flow
-// graph edges reaching the target must be a forward edge.
-
-class JumpTarget : public ZoneObject { // Shadows are dynamically allocated.
- public:
- // Forward-only jump targets can only be reached by forward CFG edges.
- enum Directionality { FORWARD_ONLY, BIDIRECTIONAL };
-
- // Construct a jump target used to generate code and to provide
- // access to a current frame.
- explicit JumpTarget(Directionality direction)
- : direction_(direction),
- reaching_frames_(0),
- merge_labels_(0),
- entry_frame_(NULL) {
- }
-
- // Construct a jump target.
- JumpTarget()
- : direction_(FORWARD_ONLY),
- reaching_frames_(0),
- merge_labels_(0),
- entry_frame_(NULL) {
- }
-
- virtual ~JumpTarget() {}
-
- // Set the direction of the jump target.
- virtual void set_direction(Directionality direction) {
- direction_ = direction;
- }
-
- // Treat the jump target as a fresh one. The state is reset.
- void Unuse();
-
- inline CodeGenerator* cgen();
-
- Label* entry_label() { return &entry_label_; }
-
- VirtualFrame* entry_frame() const { return entry_frame_; }
- void set_entry_frame(VirtualFrame* frame) {
- entry_frame_ = frame;
- }
-
- // Predicates testing the state of the encapsulated label.
- bool is_bound() const { return entry_label_.is_bound(); }
- bool is_linked() const {
- return !is_bound() && !reaching_frames_.is_empty();
- }
- bool is_unused() const {
- // This is !is_bound() && !is_linked().
- return !is_bound() && reaching_frames_.is_empty();
- }
-
- // Emit a jump to the target. There must be a current frame at the
- // jump and there will be no current frame after the jump.
- virtual void Jump();
- virtual void Jump(Result* arg);
-
- // Emit a conditional branch to the target. There must be a current
- // frame at the branch. The current frame will fall through to the
- // code after the branch. The arg is a result that is live both at
- // the target and the fall-through.
- virtual void Branch(Condition cc, Hint hint = no_hint);
- virtual void Branch(Condition cc, Result* arg, Hint hint = no_hint);
- void Branch(Condition cc,
- Result* arg0,
- Result* arg1,
- Hint hint = no_hint);
-
- // Bind a jump target. If there is no current frame at the binding
- // site, there must be at least one frame reaching via a forward
- // jump.
- virtual void Bind();
- virtual void Bind(Result* arg);
- void Bind(Result* arg0, Result* arg1);
-
- // Emit a call to a jump target. There must be a current frame at
- // the call. The frame at the target is the same as the current
- // frame except for an extra return address on top of it. The frame
- // after the call is the same as the frame before the call.
- void Call();
-
- protected:
- // Directionality flag set at initialization time.
- Directionality direction_;
-
- // A list of frames reaching this block via forward jumps.
- ZoneList<VirtualFrame*> reaching_frames_;
-
- // A parallel list of labels for merge code.
- ZoneList<Label> merge_labels_;
-
- // The frame used on entry to the block and expected at backward
- // jumps to the block. Set when the jump target is bound, but may
- // or may not be set for forward-only blocks.
- VirtualFrame* entry_frame_;
-
- // The actual entry label of the block.
- Label entry_label_;
-
- // Implementations of Jump, Branch, and Bind with all arguments and
- // return values using the virtual frame.
- void DoJump();
- void DoBranch(Condition cc, Hint hint);
- void DoBind();
-
- private:
- // Add a virtual frame reaching this labeled block via a forward jump,
- // and a corresponding merge code label.
- void AddReachingFrame(VirtualFrame* frame);
-
- // Perform initialization required during entry frame computation
- // after setting the virtual frame element at index in frame to be
- // target.
- inline void InitializeEntryElement(int index, FrameElement* target);
-
- // Compute a frame to use for entry to this block.
- void ComputeEntryFrame();
-
- DISALLOW_COPY_AND_ASSIGN(JumpTarget);
-};
-
-
-// -------------------------------------------------------------------------
-// Break targets
-//
-// A break target is a jump target that can be used to break out of a
-// statement that keeps extra state on the stack (eg, for/in or
-// try/finally). They know the expected stack height at the target
-// and will drop state from nested statements as part of merging.
-//
-// Break targets are used for return, break, and continue targets.
-
-class BreakTarget : public JumpTarget {
- public:
- // Construct a break target.
- BreakTarget() {}
- explicit BreakTarget(JumpTarget::Directionality direction)
- : JumpTarget(direction) { }
-
- virtual ~BreakTarget() {}
-
- // Set the direction of the break target.
- virtual void set_direction(Directionality direction);
-
- // Copy the state of this break target to the destination. The
- // lists of forward-reaching frames and merge-point labels are
- // copied. All virtual frame pointers are copied, not the
- // pointed-to frames. The previous state of the destination is
- // overwritten, without deallocating pointed-to virtual frames.
- void CopyTo(BreakTarget* destination);
-
- // Emit a jump to the target. There must be a current frame at the
- // jump and there will be no current frame after the jump.
- virtual void Jump();
- virtual void Jump(Result* arg);
-
- // Emit a conditional branch to the target. There must be a current
- // frame at the branch. The current frame will fall through to the
- // code after the branch.
- virtual void Branch(Condition cc, Hint hint = no_hint);
- virtual void Branch(Condition cc, Result* arg, Hint hint = no_hint);
-
- // Bind a break target. If there is no current frame at the binding
- // site, there must be at least one frame reaching via a forward
- // jump.
- virtual void Bind();
- virtual void Bind(Result* arg);
-
- // Setter for expected height.
- void set_expected_height(int expected) { expected_height_ = expected; }
-
- private:
- // The expected height of the expression stack where the target will
- // be bound, statically known at initialization time.
- int expected_height_;
-
- DISALLOW_COPY_AND_ASSIGN(BreakTarget);
-};
-
-} } // namespace v8::internal
-
-#endif // V8_JUMP_TARGET_HEAVY_H_
diff --git a/src/3rdparty/v8/src/jump-target-inl.h b/src/3rdparty/v8/src/jump-target-inl.h
deleted file mode 100644
index 545328c..0000000
--- a/src/3rdparty/v8/src/jump-target-inl.h
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_JUMP_TARGET_INL_H_
-#define V8_JUMP_TARGET_INL_H_
-
-#include "virtual-frame-inl.h"
-
-#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64
-#include "jump-target-heavy-inl.h"
-#else
-#include "jump-target-light-inl.h"
-#endif
-
-namespace v8 {
-namespace internal {
-
-CodeGenerator* JumpTarget::cgen() {
- return CodeGeneratorScope::Current(Isolate::Current());
-}
-
-} } // namespace v8::internal
-
-#endif // V8_JUMP_TARGET_INL_H_
diff --git a/src/3rdparty/v8/src/jump-target-light-inl.h b/src/3rdparty/v8/src/jump-target-light-inl.h
deleted file mode 100644
index e8f1a5f..0000000
--- a/src/3rdparty/v8/src/jump-target-light-inl.h
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_JUMP_TARGET_LIGHT_INL_H_
-#define V8_JUMP_TARGET_LIGHT_INL_H_
-
-#include "virtual-frame-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// Construct a jump target.
-JumpTarget::JumpTarget(Directionality direction)
- : entry_frame_set_(false),
- direction_(direction),
- entry_frame_(kInvalidVirtualFrameInitializer) {
-}
-
-JumpTarget::JumpTarget()
- : entry_frame_set_(false),
- direction_(FORWARD_ONLY),
- entry_frame_(kInvalidVirtualFrameInitializer) {
-}
-
-
-BreakTarget::BreakTarget() { }
-BreakTarget::BreakTarget(JumpTarget::Directionality direction)
- : JumpTarget(direction) { }
-
-} } // namespace v8::internal
-
-#endif // V8_JUMP_TARGET_LIGHT_INL_H_
diff --git a/src/3rdparty/v8/src/jump-target-light.cc b/src/3rdparty/v8/src/jump-target-light.cc
deleted file mode 100644
index 1d89474..0000000
--- a/src/3rdparty/v8/src/jump-target-light.cc
+++ /dev/null
@@ -1,111 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "codegen-inl.h"
-#include "jump-target-inl.h"
-
-namespace v8 {
-namespace internal {
-
-
-DeferredCode::DeferredCode()
- : masm_(CodeGeneratorScope::Current(Isolate::Current())->masm()),
- statement_position_(masm_->positions_recorder()->
- current_statement_position()),
- position_(masm_->positions_recorder()->current_position()),
- frame_state_(*CodeGeneratorScope::Current(Isolate::Current())->frame()) {
- ASSERT(statement_position_ != RelocInfo::kNoPosition);
- ASSERT(position_ != RelocInfo::kNoPosition);
-
- CodeGeneratorScope::Current(Isolate::Current())->AddDeferred(this);
-
-#ifdef DEBUG
- comment_ = "";
-#endif
-}
-
-
-// -------------------------------------------------------------------------
-// BreakTarget implementation.
-
-
-void BreakTarget::SetExpectedHeight() {
- expected_height_ = cgen()->frame()->height();
-}
-
-
-void BreakTarget::Jump() {
- ASSERT(cgen()->has_valid_frame());
-
- int count = cgen()->frame()->height() - expected_height_;
- if (count > 0) {
- cgen()->frame()->Drop(count);
- }
- DoJump();
-}
-
-
-void BreakTarget::Branch(Condition cc, Hint hint) {
- if (cc == al) {
- Jump();
- return;
- }
-
- ASSERT(cgen()->has_valid_frame());
-
- int count = cgen()->frame()->height() - expected_height_;
- if (count > 0) {
- // We negate and branch here rather than using DoBranch's negate
- // and branch. This gives us a hook to remove statement state
- // from the frame.
- JumpTarget fall_through;
- // Branch to fall through will not negate, because it is a
- // forward-only target.
- fall_through.Branch(NegateCondition(cc), NegateHint(hint));
- // Emit merge code.
- cgen()->frame()->Drop(count);
- DoJump();
- fall_through.Bind();
- } else {
- DoBranch(cc, hint);
- }
-}
-
-
-void BreakTarget::Bind() {
- if (cgen()->has_valid_frame()) {
- int count = cgen()->frame()->height() - expected_height_;
- if (count > 0) {
- cgen()->frame()->Drop(count);
- }
- }
- DoBind();
-}
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/jump-target-light.h b/src/3rdparty/v8/src/jump-target-light.h
deleted file mode 100644
index 0d65306..0000000
--- a/src/3rdparty/v8/src/jump-target-light.h
+++ /dev/null
@@ -1,193 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_JUMP_TARGET_LIGHT_H_
-#define V8_JUMP_TARGET_LIGHT_H_
-
-#include "macro-assembler.h"
-#include "zone-inl.h"
-#include "virtual-frame.h"
-
-namespace v8 {
-namespace internal {
-
-// Forward declarations.
-class FrameElement;
-class Result;
-
-// -------------------------------------------------------------------------
-// Jump targets
-//
-// A jump target is an abstraction of a basic-block entry in generated
-// code. It collects all the virtual frames reaching the block by
-// forward jumps and pairs them with labels for the merge code along
-// all forward-reaching paths. When bound, an expected frame for the
-// block is determined and code is generated to merge to the expected
-// frame. For backward jumps, the merge code is generated at the edge
-// leaving the predecessor block.
-//
-// A jump target must have been reached via control flow (either by
-// jumping, branching, or falling through) at the time it is bound.
-// In particular, this means that at least one of the control-flow
-// graph edges reaching the target must be a forward edge.
-
-class JumpTarget : public ZoneObject { // Shadows are dynamically allocated.
- public:
- // Forward-only jump targets can only be reached by forward CFG edges.
- enum Directionality { FORWARD_ONLY, BIDIRECTIONAL };
-
- // Construct a jump target.
- explicit inline JumpTarget(Directionality direction);
-
- inline JumpTarget();
-
- virtual ~JumpTarget() {}
-
- void Unuse() {
- entry_frame_set_ = false;
- entry_label_.Unuse();
- }
-
- inline CodeGenerator* cgen();
-
- Label* entry_label() { return &entry_label_; }
-
- const VirtualFrame* entry_frame() const {
- return entry_frame_set_ ? &entry_frame_ : NULL;
- }
-
- void set_entry_frame(VirtualFrame* frame) {
- entry_frame_ = *frame;
- entry_frame_set_ = true;
- }
-
- // Predicates testing the state of the encapsulated label.
- bool is_bound() const { return entry_label_.is_bound(); }
- bool is_linked() const { return entry_label_.is_linked(); }
- bool is_unused() const { return entry_label_.is_unused(); }
-
- // Copy the state of this jump target to the destination.
- inline void CopyTo(JumpTarget* destination) {
- *destination = *this;
- }
-
- // Emit a jump to the target. There must be a current frame at the
- // jump and there will be no current frame after the jump.
- virtual void Jump();
-
- // Emit a conditional branch to the target. There must be a current
- // frame at the branch. The current frame will fall through to the
- // code after the branch.
- virtual void Branch(Condition cc, Hint hint = no_hint);
-
- // Bind a jump target. If there is no current frame at the binding
- // site, there must be at least one frame reaching via a forward
- // jump.
- virtual void Bind();
-
- // Emit a call to a jump target. There must be a current frame at
- // the call. The frame at the target is the same as the current
- // frame except for an extra return address on top of it. The frame
- // after the call is the same as the frame before the call.
- void Call();
-
- protected:
- // Has an entry frame been found?
- bool entry_frame_set_;
-
- // Can we branch backwards to this label?
- Directionality direction_;
-
- // The frame used on entry to the block and expected at backward
- // jumps to the block. Set the first time something branches to this
- // jump target.
- VirtualFrame entry_frame_;
-
- // The actual entry label of the block.
- Label entry_label_;
-
- // Implementations of Jump, Branch, and Bind with all arguments and
- // return values using the virtual frame.
- void DoJump();
- void DoBranch(Condition cc, Hint hint);
- void DoBind();
-};
-
-
-// -------------------------------------------------------------------------
-// Break targets
-//
-// A break target is a jump target that can be used to break out of a
-// statement that keeps extra state on the stack (eg, for/in or
-// try/finally). They know the expected stack height at the target
-// and will drop state from nested statements as part of merging.
-//
-// Break targets are used for return, break, and continue targets.
-
-class BreakTarget : public JumpTarget {
- public:
- // Construct a break target.
- inline BreakTarget();
-
- inline BreakTarget(JumpTarget::Directionality direction);
-
- virtual ~BreakTarget() {}
-
- // Copy the state of this jump target to the destination.
- inline void CopyTo(BreakTarget* destination) {
- *destination = *this;
- }
-
- // Emit a jump to the target. There must be a current frame at the
- // jump and there will be no current frame after the jump.
- virtual void Jump();
-
- // Emit a conditional branch to the target. There must be a current
- // frame at the branch. The current frame will fall through to the
- // code after the branch.
- virtual void Branch(Condition cc, Hint hint = no_hint);
-
- // Bind a break target. If there is no current frame at the binding
- // site, there must be at least one frame reaching via a forward
- // jump.
- virtual void Bind();
-
- // Setter for expected height.
- void set_expected_height(int expected) { expected_height_ = expected; }
-
- // Uses the current frame to set the expected height.
- void SetExpectedHeight();
-
- private:
- // The expected height of the expression stack where the target will
- // be bound, statically known at initialization time.
- int expected_height_;
-};
-
-} } // namespace v8::internal
-
-#endif // V8_JUMP_TARGET_LIGHT_H_
diff --git a/src/3rdparty/v8/src/jump-target.cc b/src/3rdparty/v8/src/jump-target.cc
deleted file mode 100644
index 72aada8..0000000
--- a/src/3rdparty/v8/src/jump-target.cc
+++ /dev/null
@@ -1,91 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "codegen-inl.h"
-#include "jump-target-inl.h"
-#include "register-allocator-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// JumpTarget implementation.
-
-void JumpTarget::Jump() {
- DoJump();
-}
-
-
-void JumpTarget::Branch(Condition cc, Hint hint) {
- DoBranch(cc, hint);
-}
-
-
-void JumpTarget::Bind() {
- DoBind();
-}
-
-
-// -------------------------------------------------------------------------
-// ShadowTarget implementation.
-
-ShadowTarget::ShadowTarget(BreakTarget* shadowed) {
- ASSERT(shadowed != NULL);
- other_target_ = shadowed;
-
-#ifdef DEBUG
- is_shadowing_ = true;
-#endif
- // While shadowing this shadow target saves the state of the original.
- shadowed->CopyTo(this);
-
- // The original's state is reset.
- shadowed->Unuse();
- ASSERT(cgen()->has_valid_frame());
- shadowed->set_expected_height(cgen()->frame()->height());
-}
-
-
-void ShadowTarget::StopShadowing() {
- ASSERT(is_shadowing_);
-
- // The states of this target, which was shadowed, and the original
- // target, which was shadowing, are swapped.
- BreakTarget temp;
- other_target_->CopyTo(&temp);
- CopyTo(other_target_);
- temp.CopyTo(this);
- temp.Unuse();
-
-#ifdef DEBUG
- is_shadowing_ = false;
-#endif
-}
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/jump-target.h b/src/3rdparty/v8/src/jump-target.h
deleted file mode 100644
index a0d2686..0000000
--- a/src/3rdparty/v8/src/jump-target.h
+++ /dev/null
@@ -1,90 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_JUMP_TARGET_H_
-#define V8_JUMP_TARGET_H_
-
-#if V8_TARGET_ARCH_IA32
-#include "jump-target-heavy.h"
-#elif V8_TARGET_ARCH_X64
-#include "jump-target-heavy.h"
-#elif V8_TARGET_ARCH_ARM
-#include "jump-target-light.h"
-#elif V8_TARGET_ARCH_MIPS
-#include "jump-target-light.h"
-#else
-#error Unsupported target architecture.
-#endif
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// Shadow break targets
-//
-// A shadow break target represents a break target that is temporarily
-// shadowed by another one (represented by the original during
-// shadowing). They are used to catch jumps to labels in certain
-// contexts, e.g. try blocks. After shadowing ends, the formerly
-// shadowed target is again represented by the original and the
-// ShadowTarget can be used as a jump target in its own right,
-// representing the formerly shadowing target.
-
-class ShadowTarget : public BreakTarget {
- public:
- // Construct a shadow jump target. After construction the shadow
- // target object holds the state of the original target, and the
- // original target is actually a fresh one that intercepts control
- // flow intended for the shadowed one.
- explicit ShadowTarget(BreakTarget* shadowed);
-
- virtual ~ShadowTarget() {}
-
- // End shadowing. After shadowing ends, the original jump target
- // again gives access to the formerly shadowed target and the shadow
- // target object gives access to the formerly shadowing target.
- void StopShadowing();
-
- // During shadowing, the currently shadowing target. After
- // shadowing, the target that was shadowed.
- BreakTarget* other_target() const { return other_target_; }
-
- private:
- // During shadowing, the currently shadowing target. After
- // shadowing, the target that was shadowed.
- BreakTarget* other_target_;
-
-#ifdef DEBUG
- bool is_shadowing_;
-#endif
-
- DISALLOW_COPY_AND_ASSIGN(ShadowTarget);
-};
-
-} } // namespace v8::internal
-
-#endif // V8_JUMP_TARGET_H_
diff --git a/src/3rdparty/v8/src/list-inl.h b/src/3rdparty/v8/src/list-inl.h
deleted file mode 100644
index eeaea65..0000000
--- a/src/3rdparty/v8/src/list-inl.h
+++ /dev/null
@@ -1,206 +0,0 @@
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_LIST_INL_H_
-#define V8_LIST_INL_H_
-
-#include "list.h"
-
-namespace v8 {
-namespace internal {
-
-
-template<typename T, class P>
-void List<T, P>::Add(const T& element) {
- if (length_ < capacity_) {
- data_[length_++] = element;
- } else {
- List<T, P>::ResizeAdd(element);
- }
-}
-
-
-template<typename T, class P>
-void List<T, P>::AddAll(const List<T, P>& other) {
- int result_length = length_ + other.length_;
- if (capacity_ < result_length) Resize(result_length);
- for (int i = 0; i < other.length_; i++) {
- data_[length_ + i] = other.data_[i];
- }
- length_ = result_length;
-}
-
-
-// Use two layers of inlining so that the non-inlined function can
-// use the same implementation as the inlined version.
-template<typename T, class P>
-void List<T, P>::ResizeAdd(const T& element) {
- ResizeAddInternal(element);
-}
-
-
-template<typename T, class P>
-void List<T, P>::ResizeAddInternal(const T& element) {
- ASSERT(length_ >= capacity_);
- // Grow the list capacity by 50%, but make sure to let it grow
- // even when the capacity is zero (possible initial case).
- int new_capacity = 1 + capacity_ + (capacity_ >> 1);
- // Since the element reference could be an element of the list, copy
- // it out of the old backing storage before resizing.
- T temp = element;
- Resize(new_capacity);
- data_[length_++] = temp;
-}
-
-
-template<typename T, class P>
-void List<T, P>::Resize(int new_capacity) {
- T* new_data = List<T, P>::NewData(new_capacity);
- memcpy(new_data, data_, capacity_ * sizeof(T));
- List<T, P>::DeleteData(data_);
- data_ = new_data;
- capacity_ = new_capacity;
-}
-
-
-template<typename T, class P>
-Vector<T> List<T, P>::AddBlock(T value, int count) {
- int start = length_;
- for (int i = 0; i < count; i++) Add(value);
- return Vector<T>(&data_[start], count);
-}
-
-
-template<typename T, class P>
-void List<T, P>::InsertAt(int index, const T& elm) {
- ASSERT(index >= 0 && index <= length_);
- Add(elm);
- for (int i = length_ - 1; i > index; --i) {
- data_[i] = data_[i - 1];
- }
- data_[index] = elm;
-}
-
-
-template<typename T, class P>
-T List<T, P>::Remove(int i) {
- T element = at(i);
- length_--;
- while (i < length_) {
- data_[i] = data_[i + 1];
- i++;
- }
- return element;
-}
-
-
-template<typename T, class P>
-bool List<T, P>::RemoveElement(const T& elm) {
- for (int i = 0; i < length_; i++) {
- if (data_[i] == elm) {
- Remove(i);
- return true;
- }
- }
- return false;
-}
-
-
-template<typename T, class P>
-void List<T, P>::Clear() {
- DeleteData(data_);
- Initialize(0);
-}
-
-
-template<typename T, class P>
-void List<T, P>::Rewind(int pos) {
- length_ = pos;
-}
-
-
-template<typename T, class P>
-void List<T, P>::Iterate(void (*callback)(T* x)) {
- for (int i = 0; i < length_; i++) callback(&data_[i]);
-}
-
-
-template<typename T, class P>
-template<class Visitor>
-void List<T, P>::Iterate(Visitor* visitor) {
- for (int i = 0; i < length_; i++) visitor->Apply(&data_[i]);
-}
-
-
-template<typename T, class P>
-bool List<T, P>::Contains(const T& elm) const {
- for (int i = 0; i < length_; i++) {
- if (data_[i] == elm)
- return true;
- }
- return false;
-}
-
-
-template<typename T, class P>
-int List<T, P>::CountOccurrences(const T& elm, int start, int end) const {
- int result = 0;
- for (int i = start; i <= end; i++) {
- if (data_[i] == elm) ++result;
- }
- return result;
-}
-
-
-template<typename T, class P>
-void List<T, P>::Sort(int (*cmp)(const T* x, const T* y)) {
- ToVector().Sort(cmp);
-#ifdef DEBUG
- for (int i = 1; i < length_; i++)
- ASSERT(cmp(&data_[i - 1], &data_[i]) <= 0);
-#endif
-}
-
-
-template<typename T, class P>
-void List<T, P>::Sort() {
- Sort(PointerValueCompare<T>);
-}
-
-
-template<typename T, class P>
-void List<T, P>::Initialize(int capacity) {
- ASSERT(capacity >= 0);
- data_ = (capacity > 0) ? NewData(capacity) : NULL;
- capacity_ = capacity;
- length_ = 0;
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_LIST_INL_H_
diff --git a/src/3rdparty/v8/src/list.h b/src/3rdparty/v8/src/list.h
deleted file mode 100644
index 9a2e698..0000000
--- a/src/3rdparty/v8/src/list.h
+++ /dev/null
@@ -1,164 +0,0 @@
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_LIST_H_
-#define V8_LIST_H_
-
-namespace v8 {
-namespace internal {
-
-
-// ----------------------------------------------------------------------------
-// The list is a template for very light-weight lists. We are not
-// using the STL because we want full control over space and speed of
-// the code. This implementation is based on code by Robert Griesemer
-// and Rob Pike.
-//
-// The list is parameterized by the type of its elements (T) and by an
-// allocation policy (P). The policy is used for allocating lists in
-// the C free store or the zone; see zone.h.
-
-// Forward defined as
-// template <typename T, class P = FreeStoreAllocationPolicy> class List;
-template <typename T, class P>
-class List {
- public:
-
- List() { Initialize(0); }
- INLINE(explicit List(int capacity)) { Initialize(capacity); }
- INLINE(~List()) { DeleteData(data_); }
-
- // Deallocates memory used by the list and leaves the list in a consistent
- // empty state.
- void Free() {
- DeleteData(data_);
- Initialize(0);
- }
-
- INLINE(void* operator new(size_t size)) {
- return P::New(static_cast<int>(size));
- }
- INLINE(void operator delete(void* p, size_t)) { return P::Delete(p); }
-
- // Returns a reference to the element at index i. This reference is
- // not safe to use after operations that can change the list's
- // backing store (eg, Add).
- inline T& operator[](int i) const {
- ASSERT(0 <= i);
- ASSERT(i < length_);
- return data_[i];
- }
- inline T& at(int i) const { return operator[](i); }
- inline T& last() const { return at(length_ - 1); }
- inline T& first() const { return at(0); }
-
- INLINE(bool is_empty() const) { return length_ == 0; }
- INLINE(int length() const) { return length_; }
- INLINE(int capacity() const) { return capacity_; }
-
- Vector<T> ToVector() { return Vector<T>(data_, length_); }
-
- Vector<const T> ToConstVector() { return Vector<const T>(data_, length_); }
-
- // Adds a copy of the given 'element' to the end of the list,
- // expanding the list if necessary.
- void Add(const T& element);
-
- // Add all the elements from the argument list to this list.
- void AddAll(const List<T, P>& other);
-
- // Inserts the element at the specific index.
- void InsertAt(int index, const T& element);
-
- // Added 'count' elements with the value 'value' and returns a
- // vector that allows access to the elements. The vector is valid
- // until the next change is made to this list.
- Vector<T> AddBlock(T value, int count);
-
- // Removes the i'th element without deleting it even if T is a
- // pointer type; moves all elements above i "down". Returns the
- // removed element. This function's complexity is linear in the
- // size of the list.
- T Remove(int i);
-
- // Remove the given element from the list. Returns whether or not
- // the input is included in the list in the first place.
- bool RemoveElement(const T& elm);
-
- // Removes the last element without deleting it even if T is a
- // pointer type. Returns the removed element.
- INLINE(T RemoveLast()) { return Remove(length_ - 1); }
-
- // Clears the list by setting the length to zero. Even if T is a
- // pointer type, clearing the list doesn't delete the entries.
- INLINE(void Clear());
-
- // Drops all but the first 'pos' elements from the list.
- INLINE(void Rewind(int pos));
-
- // Drop the last 'count' elements from the list.
- INLINE(void RewindBy(int count)) { Rewind(length_ - count); }
-
- bool Contains(const T& elm) const;
- int CountOccurrences(const T& elm, int start, int end) const;
-
- // Iterate through all list entries, starting at index 0.
- void Iterate(void (*callback)(T* x));
- template<class Visitor>
- void Iterate(Visitor* visitor);
-
- // Sort all list entries (using QuickSort)
- void Sort(int (*cmp)(const T* x, const T* y));
- void Sort();
-
- INLINE(void Initialize(int capacity));
-
- private:
- T* data_;
- int capacity_;
- int length_;
-
- INLINE(T* NewData(int n)) { return static_cast<T*>(P::New(n * sizeof(T))); }
- INLINE(void DeleteData(T* data)) { P::Delete(data); }
-
- // Increase the capacity of a full list, and add an element.
- // List must be full already.
- void ResizeAdd(const T& element);
-
- // Inlined implementation of ResizeAdd, shared by inlined and
- // non-inlined versions of ResizeAdd.
- void ResizeAddInternal(const T& element);
-
- // Resize the list.
- void Resize(int new_capacity);
-
- DISALLOW_COPY_AND_ASSIGN(List);
-};
-
-} } // namespace v8::internal
-
-#endif // V8_LIST_H_
diff --git a/src/3rdparty/v8/src/lithium-allocator-inl.h b/src/3rdparty/v8/src/lithium-allocator-inl.h
deleted file mode 100644
index c0beaaf..0000000
--- a/src/3rdparty/v8/src/lithium-allocator-inl.h
+++ /dev/null
@@ -1,142 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_LITHIUM_ALLOCATOR_INL_H_
-#define V8_LITHIUM_ALLOCATOR_INL_H_
-
-#include "lithium-allocator.h"
-
-#if V8_TARGET_ARCH_IA32
-#include "ia32/lithium-ia32.h"
-#elif V8_TARGET_ARCH_X64
-#include "x64/lithium-x64.h"
-#elif V8_TARGET_ARCH_ARM
-#include "arm/lithium-arm.h"
-#elif V8_TARGET_ARCH_MIPS
-#include "mips/lithium-mips.h"
-#else
-#error "Unknown architecture."
-#endif
-
-namespace v8 {
-namespace internal {
-
-bool LAllocator::IsGapAt(int index) { return chunk_->IsGapAt(index); }
-
-
-LInstruction* LAllocator::InstructionAt(int index) {
- return chunk_->instructions()->at(index);
-}
-
-
-LGap* LAllocator::GapAt(int index) {
- return chunk_->GetGapAt(index);
-}
-
-
-TempIterator::TempIterator(LInstruction* instr)
- : instr_(instr),
- limit_(instr->TempCount()),
- current_(0) {
- current_ = AdvanceToNext(0);
-}
-
-
-bool TempIterator::HasNext() { return current_ < limit_; }
-
-
-LOperand* TempIterator::Next() {
- ASSERT(HasNext());
- return instr_->TempAt(current_);
-}
-
-
-int TempIterator::AdvanceToNext(int start) {
- while (start < limit_ && instr_->TempAt(start) == NULL) start++;
- return start;
-}
-
-
-void TempIterator::Advance() {
- current_ = AdvanceToNext(current_ + 1);
-}
-
-
-InputIterator::InputIterator(LInstruction* instr)
- : instr_(instr),
- limit_(instr->InputCount()),
- current_(0) {
- current_ = AdvanceToNext(0);
-}
-
-
-bool InputIterator::HasNext() { return current_ < limit_; }
-
-
-LOperand* InputIterator::Next() {
- ASSERT(HasNext());
- return instr_->InputAt(current_);
-}
-
-
-void InputIterator::Advance() {
- current_ = AdvanceToNext(current_ + 1);
-}
-
-
-int InputIterator::AdvanceToNext(int start) {
- while (start < limit_ && instr_->InputAt(start)->IsConstantOperand()) start++;
- return start;
-}
-
-
-UseIterator::UseIterator(LInstruction* instr)
- : input_iterator_(instr), env_iterator_(instr->environment()) { }
-
-
-bool UseIterator::HasNext() {
- return input_iterator_.HasNext() || env_iterator_.HasNext();
-}
-
-
-LOperand* UseIterator::Next() {
- ASSERT(HasNext());
- return input_iterator_.HasNext()
- ? input_iterator_.Next()
- : env_iterator_.Next();
-}
-
-
-void UseIterator::Advance() {
- input_iterator_.HasNext()
- ? input_iterator_.Advance()
- : env_iterator_.Advance();
-}
-
-} } // namespace v8::internal
-
-#endif // V8_LITHIUM_ALLOCATOR_INL_H_
diff --git a/src/3rdparty/v8/src/lithium-allocator.cc b/src/3rdparty/v8/src/lithium-allocator.cc
deleted file mode 100644
index f62a7db..0000000
--- a/src/3rdparty/v8/src/lithium-allocator.cc
+++ /dev/null
@@ -1,2105 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-#include "lithium-allocator-inl.h"
-
-#include "hydrogen.h"
-#include "string-stream.h"
-
-#if V8_TARGET_ARCH_IA32
-#include "ia32/lithium-ia32.h"
-#elif V8_TARGET_ARCH_X64
-#include "x64/lithium-x64.h"
-#elif V8_TARGET_ARCH_ARM
-#include "arm/lithium-arm.h"
-#elif V8_TARGET_ARCH_MIPS
-#include "mips/lithium-mips.h"
-#else
-#error "Unknown architecture."
-#endif
-
-namespace v8 {
-namespace internal {
-
-
-#define DEFINE_OPERAND_CACHE(name, type) \
- name name::cache[name::kNumCachedOperands]; \
- void name::SetupCache() { \
- for (int i = 0; i < kNumCachedOperands; i++) { \
- cache[i].ConvertTo(type, i); \
- } \
- } \
- static bool name##_initialize() { \
- name::SetupCache(); \
- return true; \
- } \
- static bool name##_cache_initialized = name##_initialize();
-
-DEFINE_OPERAND_CACHE(LConstantOperand, CONSTANT_OPERAND)
-DEFINE_OPERAND_CACHE(LStackSlot, STACK_SLOT)
-DEFINE_OPERAND_CACHE(LDoubleStackSlot, DOUBLE_STACK_SLOT)
-DEFINE_OPERAND_CACHE(LRegister, REGISTER)
-DEFINE_OPERAND_CACHE(LDoubleRegister, DOUBLE_REGISTER)
-
-#undef DEFINE_OPERAND_CACHE
-
-
-static inline LifetimePosition Min(LifetimePosition a, LifetimePosition b) {
- return a.Value() < b.Value() ? a : b;
-}
-
-
-static inline LifetimePosition Max(LifetimePosition a, LifetimePosition b) {
- return a.Value() > b.Value() ? a : b;
-}
-
-
-UsePosition::UsePosition(LifetimePosition pos, LOperand* operand)
- : operand_(operand),
- hint_(NULL),
- pos_(pos),
- next_(NULL),
- requires_reg_(false),
- register_beneficial_(true) {
- if (operand_ != NULL && operand_->IsUnallocated()) {
- LUnallocated* unalloc = LUnallocated::cast(operand_);
- requires_reg_ = unalloc->HasRegisterPolicy();
- register_beneficial_ = !unalloc->HasAnyPolicy();
- }
- ASSERT(pos_.IsValid());
-}
-
-
-bool UsePosition::HasHint() const {
- return hint_ != NULL && !hint_->IsUnallocated();
-}
-
-
-bool UsePosition::RequiresRegister() const {
- return requires_reg_;
-}
-
-
-bool UsePosition::RegisterIsBeneficial() const {
- return register_beneficial_;
-}
-
-
-void UseInterval::SplitAt(LifetimePosition pos) {
- ASSERT(Contains(pos) && pos.Value() != start().Value());
- UseInterval* after = new UseInterval(pos, end_);
- after->next_ = next_;
- next_ = after;
- end_ = pos;
-}
-
-
-#ifdef DEBUG
-
-
-void LiveRange::Verify() const {
- UsePosition* cur = first_pos_;
- while (cur != NULL) {
- ASSERT(Start().Value() <= cur->pos().Value() &&
- cur->pos().Value() <= End().Value());
- cur = cur->next();
- }
-}
-
-
-bool LiveRange::HasOverlap(UseInterval* target) const {
- UseInterval* current_interval = first_interval_;
- while (current_interval != NULL) {
- // Intervals overlap if the start of one is contained in the other.
- if (current_interval->Contains(target->start()) ||
- target->Contains(current_interval->start())) {
- return true;
- }
- current_interval = current_interval->next();
- }
- return false;
-}
-
-
-#endif
-
-
-LiveRange::LiveRange(int id)
- : id_(id),
- spilled_(false),
- assigned_register_(kInvalidAssignment),
- assigned_register_kind_(NONE),
- last_interval_(NULL),
- first_interval_(NULL),
- first_pos_(NULL),
- parent_(NULL),
- next_(NULL),
- current_interval_(NULL),
- last_processed_use_(NULL),
- spill_start_index_(kMaxInt) {
- spill_operand_ = new LUnallocated(LUnallocated::IGNORE);
-}
-
-
-void LiveRange::set_assigned_register(int reg, RegisterKind register_kind) {
- ASSERT(!HasRegisterAssigned() && !IsSpilled());
- assigned_register_ = reg;
- assigned_register_kind_ = register_kind;
- ConvertOperands();
-}
-
-
-void LiveRange::MakeSpilled() {
- ASSERT(!IsSpilled());
- ASSERT(TopLevel()->HasAllocatedSpillOperand());
- spilled_ = true;
- assigned_register_ = kInvalidAssignment;
- ConvertOperands();
-}
-
-
-bool LiveRange::HasAllocatedSpillOperand() const {
- return spill_operand_ != NULL && !spill_operand_->IsUnallocated();
-}
-
-
-void LiveRange::SetSpillOperand(LOperand* operand) {
- ASSERT(!operand->IsUnallocated());
- ASSERT(spill_operand_ != NULL);
- ASSERT(spill_operand_->IsUnallocated());
- spill_operand_->ConvertTo(operand->kind(), operand->index());
-}
-
-
-UsePosition* LiveRange::NextUsePosition(LifetimePosition start) {
- UsePosition* use_pos = last_processed_use_;
- if (use_pos == NULL) use_pos = first_pos();
- while (use_pos != NULL && use_pos->pos().Value() < start.Value()) {
- use_pos = use_pos->next();
- }
- last_processed_use_ = use_pos;
- return use_pos;
-}
-
-
-UsePosition* LiveRange::NextUsePositionRegisterIsBeneficial(
- LifetimePosition start) {
- UsePosition* pos = NextUsePosition(start);
- while (pos != NULL && !pos->RegisterIsBeneficial()) {
- pos = pos->next();
- }
- return pos;
-}
-
-
-UsePosition* LiveRange::NextRegisterPosition(LifetimePosition start) {
- UsePosition* pos = NextUsePosition(start);
- while (pos != NULL && !pos->RequiresRegister()) {
- pos = pos->next();
- }
- return pos;
-}
-
-
-bool LiveRange::CanBeSpilled(LifetimePosition pos) {
- // TODO(kmillikin): Comment. Now.
- if (pos.Value() <= Start().Value() && HasRegisterAssigned()) return false;
-
- // We cannot spill a live range that has a use requiring a register
- // at the current or the immediate next position.
- UsePosition* use_pos = NextRegisterPosition(pos);
- if (use_pos == NULL) return true;
- return use_pos->pos().Value() > pos.NextInstruction().Value();
-}
-
-
-UsePosition* LiveRange::FirstPosWithHint() const {
- UsePosition* pos = first_pos_;
- while (pos != NULL && !pos->HasHint()) pos = pos->next();
- return pos;
-}
-
-
-LOperand* LiveRange::CreateAssignedOperand() {
- LOperand* op = NULL;
- if (HasRegisterAssigned()) {
- ASSERT(!IsSpilled());
- if (IsDouble()) {
- op = LDoubleRegister::Create(assigned_register());
- } else {
- op = LRegister::Create(assigned_register());
- }
- } else if (IsSpilled()) {
- ASSERT(!HasRegisterAssigned());
- op = TopLevel()->GetSpillOperand();
- ASSERT(!op->IsUnallocated());
- } else {
- LUnallocated* unalloc = new LUnallocated(LUnallocated::NONE);
- unalloc->set_virtual_register(id_);
- op = unalloc;
- }
- return op;
-}
-
-
-UseInterval* LiveRange::FirstSearchIntervalForPosition(
- LifetimePosition position) const {
- if (current_interval_ == NULL) return first_interval_;
- if (current_interval_->start().Value() > position.Value()) {
- current_interval_ = NULL;
- return first_interval_;
- }
- return current_interval_;
-}
-
-
-void LiveRange::AdvanceLastProcessedMarker(
- UseInterval* to_start_of, LifetimePosition but_not_past) const {
- if (to_start_of == NULL) return;
- if (to_start_of->start().Value() > but_not_past.Value()) return;
- LifetimePosition start =
- current_interval_ == NULL ? LifetimePosition::Invalid()
- : current_interval_->start();
- if (to_start_of->start().Value() > start.Value()) {
- current_interval_ = to_start_of;
- }
-}
-
-
-void LiveRange::SplitAt(LifetimePosition position, LiveRange* result) {
- ASSERT(Start().Value() < position.Value());
- ASSERT(result->IsEmpty());
- // Find the last interval that ends before the position. If the
- // position is contained in one of the intervals in the chain, we
- // split that interval and use the first part.
- UseInterval* current = FirstSearchIntervalForPosition(position);
-
- // If the split position coincides with the beginning of a use interval
- // we need to split use positons in a special way.
- bool split_at_start = false;
-
- while (current != NULL) {
- if (current->Contains(position)) {
- current->SplitAt(position);
- break;
- }
- UseInterval* next = current->next();
- if (next->start().Value() >= position.Value()) {
- split_at_start = (next->start().Value() == position.Value());
- break;
- }
- current = next;
- }
-
- // Partition original use intervals to the two live ranges.
- UseInterval* before = current;
- UseInterval* after = before->next();
- result->last_interval_ = (last_interval_ == before)
- ? after // Only interval in the range after split.
- : last_interval_; // Last interval of the original range.
- result->first_interval_ = after;
- last_interval_ = before;
-
- // Find the last use position before the split and the first use
- // position after it.
- UsePosition* use_after = first_pos_;
- UsePosition* use_before = NULL;
- if (split_at_start) {
- // The split position coincides with the beginning of a use interval (the
- // end of a lifetime hole). Use at this position should be attributed to
- // the split child because split child owns use interval covering it.
- while (use_after != NULL && use_after->pos().Value() < position.Value()) {
- use_before = use_after;
- use_after = use_after->next();
- }
- } else {
- while (use_after != NULL && use_after->pos().Value() <= position.Value()) {
- use_before = use_after;
- use_after = use_after->next();
- }
- }
-
- // Partition original use positions to the two live ranges.
- if (use_before != NULL) {
- use_before->next_ = NULL;
- } else {
- first_pos_ = NULL;
- }
- result->first_pos_ = use_after;
-
- // Link the new live range in the chain before any of the other
- // ranges linked from the range before the split.
- result->parent_ = (parent_ == NULL) ? this : parent_;
- result->next_ = next_;
- next_ = result;
-
-#ifdef DEBUG
- Verify();
- result->Verify();
-#endif
-}
-
-
-// This implements an ordering on live ranges so that they are ordered by their
-// start positions. This is needed for the correctness of the register
-// allocation algorithm. If two live ranges start at the same offset then there
-// is a tie breaker based on where the value is first used. This part of the
-// ordering is merely a heuristic.
-bool LiveRange::ShouldBeAllocatedBefore(const LiveRange* other) const {
- LifetimePosition start = Start();
- LifetimePosition other_start = other->Start();
- if (start.Value() == other_start.Value()) {
- UsePosition* pos = FirstPosWithHint();
- if (pos == NULL) return false;
- UsePosition* other_pos = other->first_pos();
- if (other_pos == NULL) return true;
- return pos->pos().Value() < other_pos->pos().Value();
- }
- return start.Value() < other_start.Value();
-}
-
-
-void LiveRange::ShortenTo(LifetimePosition start) {
- LAllocator::TraceAlloc("Shorten live range %d to [%d\n", id_, start.Value());
- ASSERT(first_interval_ != NULL);
- ASSERT(first_interval_->start().Value() <= start.Value());
- ASSERT(start.Value() < first_interval_->end().Value());
- first_interval_->set_start(start);
-}
-
-
-void LiveRange::EnsureInterval(LifetimePosition start, LifetimePosition end) {
- LAllocator::TraceAlloc("Ensure live range %d in interval [%d %d[\n",
- id_,
- start.Value(),
- end.Value());
- LifetimePosition new_end = end;
- while (first_interval_ != NULL &&
- first_interval_->start().Value() <= end.Value()) {
- if (first_interval_->end().Value() > end.Value()) {
- new_end = first_interval_->end();
- }
- first_interval_ = first_interval_->next();
- }
-
- UseInterval* new_interval = new UseInterval(start, new_end);
- new_interval->next_ = first_interval_;
- first_interval_ = new_interval;
- if (new_interval->next() == NULL) {
- last_interval_ = new_interval;
- }
-}
-
-
-void LiveRange::AddUseInterval(LifetimePosition start, LifetimePosition end) {
- LAllocator::TraceAlloc("Add to live range %d interval [%d %d[\n",
- id_,
- start.Value(),
- end.Value());
- if (first_interval_ == NULL) {
- UseInterval* interval = new UseInterval(start, end);
- first_interval_ = interval;
- last_interval_ = interval;
- } else {
- if (end.Value() == first_interval_->start().Value()) {
- first_interval_->set_start(start);
- } else if (end.Value() < first_interval_->start().Value()) {
- UseInterval* interval = new UseInterval(start, end);
- interval->set_next(first_interval_);
- first_interval_ = interval;
- } else {
- // Order of instruction's processing (see ProcessInstructions) guarantees
- // that each new use interval either precedes or intersects with
- // last added interval.
- ASSERT(start.Value() < first_interval_->end().Value());
- first_interval_->start_ = Min(start, first_interval_->start_);
- first_interval_->end_ = Max(end, first_interval_->end_);
- }
- }
-}
-
-
-UsePosition* LiveRange::AddUsePosition(LifetimePosition pos,
- LOperand* operand) {
- LAllocator::TraceAlloc("Add to live range %d use position %d\n",
- id_,
- pos.Value());
- UsePosition* use_pos = new UsePosition(pos, operand);
- UsePosition* prev = NULL;
- UsePosition* current = first_pos_;
- while (current != NULL && current->pos().Value() < pos.Value()) {
- prev = current;
- current = current->next();
- }
-
- if (prev == NULL) {
- use_pos->set_next(first_pos_);
- first_pos_ = use_pos;
- } else {
- use_pos->next_ = prev->next_;
- prev->next_ = use_pos;
- }
-
- return use_pos;
-}
-
-
-void LiveRange::ConvertOperands() {
- LOperand* op = CreateAssignedOperand();
- UsePosition* use_pos = first_pos();
- while (use_pos != NULL) {
- ASSERT(Start().Value() <= use_pos->pos().Value() &&
- use_pos->pos().Value() <= End().Value());
-
- if (use_pos->HasOperand()) {
- ASSERT(op->IsRegister() || op->IsDoubleRegister() ||
- !use_pos->RequiresRegister());
- use_pos->operand()->ConvertTo(op->kind(), op->index());
- }
- use_pos = use_pos->next();
- }
-}
-
-
-bool LiveRange::CanCover(LifetimePosition position) const {
- if (IsEmpty()) return false;
- return Start().Value() <= position.Value() &&
- position.Value() < End().Value();
-}
-
-
-bool LiveRange::Covers(LifetimePosition position) {
- if (!CanCover(position)) return false;
- UseInterval* start_search = FirstSearchIntervalForPosition(position);
- for (UseInterval* interval = start_search;
- interval != NULL;
- interval = interval->next()) {
- ASSERT(interval->next() == NULL ||
- interval->next()->start().Value() >= interval->start().Value());
- AdvanceLastProcessedMarker(interval, position);
- if (interval->Contains(position)) return true;
- if (interval->start().Value() > position.Value()) return false;
- }
- return false;
-}
-
-
-LifetimePosition LiveRange::FirstIntersection(LiveRange* other) {
- UseInterval* b = other->first_interval();
- if (b == NULL) return LifetimePosition::Invalid();
- LifetimePosition advance_last_processed_up_to = b->start();
- UseInterval* a = FirstSearchIntervalForPosition(b->start());
- while (a != NULL && b != NULL) {
- if (a->start().Value() > other->End().Value()) break;
- if (b->start().Value() > End().Value()) break;
- LifetimePosition cur_intersection = a->Intersect(b);
- if (cur_intersection.IsValid()) {
- return cur_intersection;
- }
- if (a->start().Value() < b->start().Value()) {
- a = a->next();
- if (a == NULL || a->start().Value() > other->End().Value()) break;
- AdvanceLastProcessedMarker(a, advance_last_processed_up_to);
- } else {
- b = b->next();
- }
- }
- return LifetimePosition::Invalid();
-}
-
-
-LAllocator::LAllocator(int num_values, HGraph* graph)
- : chunk_(NULL),
- live_in_sets_(graph->blocks()->length()),
- live_ranges_(num_values * 2),
- fixed_live_ranges_(NULL),
- fixed_double_live_ranges_(NULL),
- unhandled_live_ranges_(num_values * 2),
- active_live_ranges_(8),
- inactive_live_ranges_(8),
- reusable_slots_(8),
- next_virtual_register_(num_values),
- first_artificial_register_(num_values),
- mode_(NONE),
- num_registers_(-1),
- graph_(graph),
- has_osr_entry_(false) {}
-
-
-void LAllocator::InitializeLivenessAnalysis() {
- // Initialize the live_in sets for each block to NULL.
- int block_count = graph_->blocks()->length();
- live_in_sets_.Initialize(block_count);
- live_in_sets_.AddBlock(NULL, block_count);
-}
-
-
-BitVector* LAllocator::ComputeLiveOut(HBasicBlock* block) {
- // Compute live out for the given block, except not including backward
- // successor edges.
- BitVector* live_out = new BitVector(next_virtual_register_);
-
- // Process all successor blocks.
- HBasicBlock* successor = block->end()->FirstSuccessor();
- while (successor != NULL) {
- // Add values live on entry to the successor. Note the successor's
- // live_in will not be computed yet for backwards edges.
- BitVector* live_in = live_in_sets_[successor->block_id()];
- if (live_in != NULL) live_out->Union(*live_in);
-
- // All phi input operands corresponding to this successor edge are live
- // out from this block.
- int index = successor->PredecessorIndexOf(block);
- const ZoneList<HPhi*>* phis = successor->phis();
- for (int i = 0; i < phis->length(); ++i) {
- HPhi* phi = phis->at(i);
- if (!phi->OperandAt(index)->IsConstant()) {
- live_out->Add(phi->OperandAt(index)->id());
- }
- }
-
- // Check if we are done with second successor.
- if (successor == block->end()->SecondSuccessor()) break;
-
- successor = block->end()->SecondSuccessor();
- }
-
- return live_out;
-}
-
-
-void LAllocator::AddInitialIntervals(HBasicBlock* block,
- BitVector* live_out) {
- // Add an interval that includes the entire block to the live range for
- // each live_out value.
- LifetimePosition start = LifetimePosition::FromInstructionIndex(
- block->first_instruction_index());
- LifetimePosition end = LifetimePosition::FromInstructionIndex(
- block->last_instruction_index()).NextInstruction();
- BitVector::Iterator iterator(live_out);
- while (!iterator.Done()) {
- int operand_index = iterator.Current();
- LiveRange* range = LiveRangeFor(operand_index);
- range->AddUseInterval(start, end);
- iterator.Advance();
- }
-}
-
-
-int LAllocator::FixedDoubleLiveRangeID(int index) {
- return -index - 1 - Register::kNumAllocatableRegisters;
-}
-
-
-LOperand* LAllocator::AllocateFixed(LUnallocated* operand,
- int pos,
- bool is_tagged) {
- TraceAlloc("Allocating fixed reg for op %d\n", operand->virtual_register());
- ASSERT(operand->HasFixedPolicy());
- if (operand->policy() == LUnallocated::FIXED_SLOT) {
- operand->ConvertTo(LOperand::STACK_SLOT, operand->fixed_index());
- } else if (operand->policy() == LUnallocated::FIXED_REGISTER) {
- int reg_index = operand->fixed_index();
- operand->ConvertTo(LOperand::REGISTER, reg_index);
- } else if (operand->policy() == LUnallocated::FIXED_DOUBLE_REGISTER) {
- int reg_index = operand->fixed_index();
- operand->ConvertTo(LOperand::DOUBLE_REGISTER, reg_index);
- } else {
- UNREACHABLE();
- }
- if (is_tagged) {
- TraceAlloc("Fixed reg is tagged at %d\n", pos);
- LInstruction* instr = InstructionAt(pos);
- if (instr->HasPointerMap()) {
- instr->pointer_map()->RecordPointer(operand);
- }
- }
- return operand;
-}
-
-
-LiveRange* LAllocator::FixedLiveRangeFor(int index) {
- ASSERT(index < Register::kNumAllocatableRegisters);
- LiveRange* result = fixed_live_ranges_[index];
- if (result == NULL) {
- result = new LiveRange(FixedLiveRangeID(index));
- ASSERT(result->IsFixed());
- result->set_assigned_register(index, GENERAL_REGISTERS);
- fixed_live_ranges_[index] = result;
- }
- return result;
-}
-
-
-LiveRange* LAllocator::FixedDoubleLiveRangeFor(int index) {
- ASSERT(index < DoubleRegister::kNumAllocatableRegisters);
- LiveRange* result = fixed_double_live_ranges_[index];
- if (result == NULL) {
- result = new LiveRange(FixedDoubleLiveRangeID(index));
- ASSERT(result->IsFixed());
- result->set_assigned_register(index, DOUBLE_REGISTERS);
- fixed_double_live_ranges_[index] = result;
- }
- return result;
-}
-
-
-LiveRange* LAllocator::LiveRangeFor(int index) {
- if (index >= live_ranges_.length()) {
- live_ranges_.AddBlock(NULL, index - live_ranges_.length() + 1);
- }
- LiveRange* result = live_ranges_[index];
- if (result == NULL) {
- result = new LiveRange(index);
- live_ranges_[index] = result;
- }
- return result;
-}
-
-
-LGap* LAllocator::GetLastGap(HBasicBlock* block) {
- int last_instruction = block->last_instruction_index();
- int index = chunk_->NearestGapPos(last_instruction);
- return GapAt(index);
-}
-
-
-HPhi* LAllocator::LookupPhi(LOperand* operand) const {
- if (!operand->IsUnallocated()) return NULL;
- int index = operand->VirtualRegister();
- HValue* instr = graph_->LookupValue(index);
- if (instr != NULL && instr->IsPhi()) {
- return HPhi::cast(instr);
- }
- return NULL;
-}
-
-
-LiveRange* LAllocator::LiveRangeFor(LOperand* operand) {
- if (operand->IsUnallocated()) {
- return LiveRangeFor(LUnallocated::cast(operand)->virtual_register());
- } else if (operand->IsRegister()) {
- return FixedLiveRangeFor(operand->index());
- } else if (operand->IsDoubleRegister()) {
- return FixedDoubleLiveRangeFor(operand->index());
- } else {
- return NULL;
- }
-}
-
-
-void LAllocator::Define(LifetimePosition position,
- LOperand* operand,
- LOperand* hint) {
- LiveRange* range = LiveRangeFor(operand);
- if (range == NULL) return;
-
- if (range->IsEmpty() || range->Start().Value() > position.Value()) {
- // Can happen if there is a definition without use.
- range->AddUseInterval(position, position.NextInstruction());
- range->AddUsePosition(position.NextInstruction(), NULL);
- } else {
- range->ShortenTo(position);
- }
-
- if (operand->IsUnallocated()) {
- LUnallocated* unalloc_operand = LUnallocated::cast(operand);
- range->AddUsePosition(position, unalloc_operand)->set_hint(hint);
- }
-}
-
-
-void LAllocator::Use(LifetimePosition block_start,
- LifetimePosition position,
- LOperand* operand,
- LOperand* hint) {
- LiveRange* range = LiveRangeFor(operand);
- if (range == NULL) return;
- if (operand->IsUnallocated()) {
- LUnallocated* unalloc_operand = LUnallocated::cast(operand);
- range->AddUsePosition(position, unalloc_operand)->set_hint(hint);
- }
- range->AddUseInterval(block_start, position);
-}
-
-
-void LAllocator::AddConstraintsGapMove(int index,
- LOperand* from,
- LOperand* to) {
- LGap* gap = GapAt(index);
- LParallelMove* move = gap->GetOrCreateParallelMove(LGap::START);
- if (from->IsUnallocated()) {
- const ZoneList<LMoveOperands>* move_operands = move->move_operands();
- for (int i = 0; i < move_operands->length(); ++i) {
- LMoveOperands cur = move_operands->at(i);
- LOperand* cur_to = cur.destination();
- if (cur_to->IsUnallocated()) {
- if (cur_to->VirtualRegister() == from->VirtualRegister()) {
- move->AddMove(cur.source(), to);
- return;
- }
- }
- }
- }
- move->AddMove(from, to);
-}
-
-
-void LAllocator::MeetRegisterConstraints(HBasicBlock* block) {
- int start = block->first_instruction_index();
- int end = block->last_instruction_index();
- for (int i = start; i <= end; ++i) {
- if (IsGapAt(i)) {
- LInstruction* instr = NULL;
- LInstruction* prev_instr = NULL;
- if (i < end) instr = InstructionAt(i + 1);
- if (i > start) prev_instr = InstructionAt(i - 1);
- MeetConstraintsBetween(prev_instr, instr, i);
- }
- }
-}
-
-
-void LAllocator::MeetConstraintsBetween(LInstruction* first,
- LInstruction* second,
- int gap_index) {
- // Handle fixed temporaries.
- if (first != NULL) {
- for (TempIterator it(first); it.HasNext(); it.Advance()) {
- LUnallocated* temp = LUnallocated::cast(it.Next());
- if (temp->HasFixedPolicy()) {
- AllocateFixed(temp, gap_index - 1, false);
- }
- }
- }
-
- // Handle fixed output operand.
- if (first != NULL && first->Output() != NULL) {
- LUnallocated* first_output = LUnallocated::cast(first->Output());
- LiveRange* range = LiveRangeFor(first_output->VirtualRegister());
- bool assigned = false;
- if (first_output->HasFixedPolicy()) {
- LUnallocated* output_copy = first_output->CopyUnconstrained();
- bool is_tagged = HasTaggedValue(first_output->VirtualRegister());
- AllocateFixed(first_output, gap_index, is_tagged);
-
- // This value is produced on the stack, we never need to spill it.
- if (first_output->IsStackSlot()) {
- range->SetSpillOperand(first_output);
- range->SetSpillStartIndex(gap_index - 1);
- assigned = true;
- }
- chunk_->AddGapMove(gap_index, first_output, output_copy);
- }
-
- if (!assigned) {
- range->SetSpillStartIndex(gap_index);
-
- // This move to spill operand is not a real use. Liveness analysis
- // and splitting of live ranges do not account for it.
- // Thus it should be inserted to a lifetime position corresponding to
- // the instruction end.
- LGap* gap = GapAt(gap_index);
- LParallelMove* move = gap->GetOrCreateParallelMove(LGap::BEFORE);
- move->AddMove(first_output, range->GetSpillOperand());
- }
- }
-
- // Handle fixed input operands of second instruction.
- if (second != NULL) {
- for (UseIterator it(second); it.HasNext(); it.Advance()) {
- LUnallocated* cur_input = LUnallocated::cast(it.Next());
- if (cur_input->HasFixedPolicy()) {
- LUnallocated* input_copy = cur_input->CopyUnconstrained();
- bool is_tagged = HasTaggedValue(cur_input->VirtualRegister());
- AllocateFixed(cur_input, gap_index + 1, is_tagged);
- AddConstraintsGapMove(gap_index, input_copy, cur_input);
- } else if (cur_input->policy() == LUnallocated::WRITABLE_REGISTER) {
- // The live range of writable input registers always goes until the end
- // of the instruction.
- ASSERT(!cur_input->IsUsedAtStart());
-
- LUnallocated* input_copy = cur_input->CopyUnconstrained();
- cur_input->set_virtual_register(next_virtual_register_++);
-
- if (RequiredRegisterKind(input_copy->virtual_register()) ==
- DOUBLE_REGISTERS) {
- double_artificial_registers_.Add(
- cur_input->virtual_register() - first_artificial_register_);
- }
-
- AddConstraintsGapMove(gap_index, input_copy, cur_input);
- }
- }
- }
-
- // Handle "output same as input" for second instruction.
- if (second != NULL && second->Output() != NULL) {
- LUnallocated* second_output = LUnallocated::cast(second->Output());
- if (second_output->HasSameAsInputPolicy()) {
- LUnallocated* cur_input = LUnallocated::cast(second->FirstInput());
- int output_vreg = second_output->VirtualRegister();
- int input_vreg = cur_input->VirtualRegister();
-
- LUnallocated* input_copy = cur_input->CopyUnconstrained();
- cur_input->set_virtual_register(second_output->virtual_register());
- AddConstraintsGapMove(gap_index, input_copy, cur_input);
-
- if (HasTaggedValue(input_vreg) && !HasTaggedValue(output_vreg)) {
- int index = gap_index + 1;
- LInstruction* instr = InstructionAt(index);
- if (instr->HasPointerMap()) {
- instr->pointer_map()->RecordPointer(input_copy);
- }
- } else if (!HasTaggedValue(input_vreg) && HasTaggedValue(output_vreg)) {
- // The input is assumed to immediately have a tagged representation,
- // before the pointer map can be used. I.e. the pointer map at the
- // instruction will include the output operand (whose value at the
- // beginning of the instruction is equal to the input operand). If
- // this is not desired, then the pointer map at this instruction needs
- // to be adjusted manually.
- }
- }
- }
-}
-
-
-void LAllocator::ProcessInstructions(HBasicBlock* block, BitVector* live) {
- int block_start = block->first_instruction_index();
- int index = block->last_instruction_index();
-
- LifetimePosition block_start_position =
- LifetimePosition::FromInstructionIndex(block_start);
-
- while (index >= block_start) {
- LifetimePosition curr_position =
- LifetimePosition::FromInstructionIndex(index);
-
- if (IsGapAt(index)) {
- // We have a gap at this position.
- LGap* gap = GapAt(index);
- LParallelMove* move = gap->GetOrCreateParallelMove(LGap::START);
- const ZoneList<LMoveOperands>* move_operands = move->move_operands();
- for (int i = 0; i < move_operands->length(); ++i) {
- LMoveOperands* cur = &move_operands->at(i);
- if (cur->IsIgnored()) continue;
- LOperand* from = cur->source();
- LOperand* to = cur->destination();
- HPhi* phi = LookupPhi(to);
- LOperand* hint = to;
- if (phi != NULL) {
- // This is a phi resolving move.
- if (!phi->block()->IsLoopHeader()) {
- hint = LiveRangeFor(phi->id())->FirstHint();
- }
- } else {
- if (to->IsUnallocated()) {
- if (live->Contains(to->VirtualRegister())) {
- Define(curr_position, to, from);
- live->Remove(to->VirtualRegister());
- } else {
- cur->Eliminate();
- continue;
- }
- } else {
- Define(curr_position, to, from);
- }
- }
- Use(block_start_position, curr_position, from, hint);
- if (from->IsUnallocated()) {
- live->Add(from->VirtualRegister());
- }
- }
- } else {
- ASSERT(!IsGapAt(index));
- LInstruction* instr = InstructionAt(index);
-
- if (instr != NULL) {
- LOperand* output = instr->Output();
- if (output != NULL) {
- if (output->IsUnallocated()) live->Remove(output->VirtualRegister());
- Define(curr_position, output, NULL);
- }
-
- if (instr->IsMarkedAsCall()) {
- for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
- if (output == NULL || !output->IsRegister() ||
- output->index() != i) {
- LiveRange* range = FixedLiveRangeFor(i);
- range->AddUseInterval(curr_position,
- curr_position.InstructionEnd());
- }
- }
- }
-
- if (instr->IsMarkedAsCall() || instr->IsMarkedAsSaveDoubles()) {
- for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; ++i) {
- if (output == NULL || !output->IsDoubleRegister() ||
- output->index() != i) {
- LiveRange* range = FixedDoubleLiveRangeFor(i);
- range->AddUseInterval(curr_position,
- curr_position.InstructionEnd());
- }
- }
- }
-
- for (UseIterator it(instr); it.HasNext(); it.Advance()) {
- LOperand* input = it.Next();
-
- LifetimePosition use_pos;
- if (input->IsUnallocated() &&
- LUnallocated::cast(input)->IsUsedAtStart()) {
- use_pos = curr_position;
- } else {
- use_pos = curr_position.InstructionEnd();
- }
-
- Use(block_start_position, use_pos, input, NULL);
- if (input->IsUnallocated()) live->Add(input->VirtualRegister());
- }
-
- for (TempIterator it(instr); it.HasNext(); it.Advance()) {
- LOperand* temp = it.Next();
- if (instr->IsMarkedAsCall()) {
- if (temp->IsRegister()) continue;
- if (temp->IsUnallocated()) {
- LUnallocated* temp_unalloc = LUnallocated::cast(temp);
- if (temp_unalloc->HasFixedPolicy()) {
- continue;
- }
- }
- }
- Use(block_start_position, curr_position.InstructionEnd(), temp, NULL);
- Define(curr_position, temp, NULL);
- }
- }
- }
-
- index = index - 1;
- }
-}
-
-
-void LAllocator::ResolvePhis(HBasicBlock* block) {
- const ZoneList<HPhi*>* phis = block->phis();
- for (int i = 0; i < phis->length(); ++i) {
- HPhi* phi = phis->at(i);
- LUnallocated* phi_operand = new LUnallocated(LUnallocated::NONE);
- phi_operand->set_virtual_register(phi->id());
- for (int j = 0; j < phi->OperandCount(); ++j) {
- HValue* op = phi->OperandAt(j);
- LOperand* operand = NULL;
- if (op->IsConstant() && op->EmitAtUses()) {
- HConstant* constant = HConstant::cast(op);
- operand = chunk_->DefineConstantOperand(constant);
- } else {
- ASSERT(!op->EmitAtUses());
- LUnallocated* unalloc = new LUnallocated(LUnallocated::NONE);
- unalloc->set_virtual_register(op->id());
- operand = unalloc;
- }
- HBasicBlock* cur_block = block->predecessors()->at(j);
- // The gap move must be added without any special processing as in
- // the AddConstraintsGapMove.
- chunk_->AddGapMove(cur_block->last_instruction_index() - 1,
- operand,
- phi_operand);
- }
-
- LiveRange* live_range = LiveRangeFor(phi->id());
- LLabel* label = chunk_->GetLabel(phi->block()->block_id());
- label->GetOrCreateParallelMove(LGap::START)->
- AddMove(phi_operand, live_range->GetSpillOperand());
- live_range->SetSpillStartIndex(phi->block()->first_instruction_index());
- }
-}
-
-
-void LAllocator::Allocate(LChunk* chunk) {
- ASSERT(chunk_ == NULL);
- chunk_ = chunk;
- MeetRegisterConstraints();
- ResolvePhis();
- BuildLiveRanges();
- AllocateGeneralRegisters();
- AllocateDoubleRegisters();
- PopulatePointerMaps();
- if (has_osr_entry_) ProcessOsrEntry();
- ConnectRanges();
- ResolveControlFlow();
-}
-
-
-void LAllocator::MeetRegisterConstraints() {
- HPhase phase("Register constraints", chunk_);
- first_artificial_register_ = next_virtual_register_;
- const ZoneList<HBasicBlock*>* blocks = graph_->blocks();
- for (int i = 0; i < blocks->length(); ++i) {
- HBasicBlock* block = blocks->at(i);
- MeetRegisterConstraints(block);
- }
-}
-
-
-void LAllocator::ResolvePhis() {
- HPhase phase("Resolve phis", chunk_);
-
- // Process the blocks in reverse order.
- const ZoneList<HBasicBlock*>* blocks = graph_->blocks();
- for (int block_id = blocks->length() - 1; block_id >= 0; --block_id) {
- HBasicBlock* block = blocks->at(block_id);
- ResolvePhis(block);
- }
-}
-
-
-void LAllocator::ResolveControlFlow(LiveRange* range,
- HBasicBlock* block,
- HBasicBlock* pred) {
- LifetimePosition pred_end =
- LifetimePosition::FromInstructionIndex(pred->last_instruction_index());
- LifetimePosition cur_start =
- LifetimePosition::FromInstructionIndex(block->first_instruction_index());
- LiveRange* pred_cover = NULL;
- LiveRange* cur_cover = NULL;
- LiveRange* cur_range = range;
- while (cur_range != NULL && (cur_cover == NULL || pred_cover == NULL)) {
- if (cur_range->CanCover(cur_start)) {
- ASSERT(cur_cover == NULL);
- cur_cover = cur_range;
- }
- if (cur_range->CanCover(pred_end)) {
- ASSERT(pred_cover == NULL);
- pred_cover = cur_range;
- }
- cur_range = cur_range->next();
- }
-
- if (cur_cover->IsSpilled()) return;
- ASSERT(pred_cover != NULL && cur_cover != NULL);
- if (pred_cover != cur_cover) {
- LOperand* pred_op = pred_cover->CreateAssignedOperand();
- LOperand* cur_op = cur_cover->CreateAssignedOperand();
- if (!pred_op->Equals(cur_op)) {
- LGap* gap = NULL;
- if (block->predecessors()->length() == 1) {
- gap = GapAt(block->first_instruction_index());
- } else {
- ASSERT(pred->end()->SecondSuccessor() == NULL);
- gap = GetLastGap(pred);
-
- // We are going to insert a move before the branch instruction.
- // Some branch instructions (e.g. loops' back edges)
- // can potentially cause a GC so they have a pointer map.
- // By insterting a move we essentially create a copy of a
- // value which is invisible to PopulatePointerMaps(), because we store
- // it into a location different from the operand of a live range
- // covering a branch instruction.
- // Thus we need to manually record a pointer.
- if (HasTaggedValue(range->id())) {
- LInstruction* branch = InstructionAt(pred->last_instruction_index());
- if (branch->HasPointerMap()) {
- branch->pointer_map()->RecordPointer(cur_op);
- }
- }
- }
- gap->GetOrCreateParallelMove(LGap::START)->AddMove(pred_op, cur_op);
- }
- }
-}
-
-
-LParallelMove* LAllocator::GetConnectingParallelMove(LifetimePosition pos) {
- int index = pos.InstructionIndex();
- if (IsGapAt(index)) {
- LGap* gap = GapAt(index);
- return gap->GetOrCreateParallelMove(
- pos.IsInstructionStart() ? LGap::START : LGap::END);
- }
- int gap_pos = pos.IsInstructionStart() ? (index - 1) : (index + 1);
- return GapAt(gap_pos)->GetOrCreateParallelMove(
- (gap_pos < index) ? LGap::AFTER : LGap::BEFORE);
-}
-
-
-HBasicBlock* LAllocator::GetBlock(LifetimePosition pos) {
- LGap* gap = GapAt(chunk_->NearestGapPos(pos.InstructionIndex()));
- return gap->block();
-}
-
-
-void LAllocator::ConnectRanges() {
- HPhase phase("Connect ranges", this);
- for (int i = 0; i < live_ranges()->length(); ++i) {
- LiveRange* first_range = live_ranges()->at(i);
- if (first_range == NULL || first_range->parent() != NULL) continue;
-
- LiveRange* second_range = first_range->next();
- while (second_range != NULL) {
- LifetimePosition pos = second_range->Start();
-
- if (!second_range->IsSpilled()) {
- // Add gap move if the two live ranges touch and there is no block
- // boundary.
- if (first_range->End().Value() == pos.Value()) {
- bool should_insert = true;
- if (IsBlockBoundary(pos)) {
- should_insert = CanEagerlyResolveControlFlow(GetBlock(pos));
- }
- if (should_insert) {
- LParallelMove* move = GetConnectingParallelMove(pos);
- LOperand* prev_operand = first_range->CreateAssignedOperand();
- LOperand* cur_operand = second_range->CreateAssignedOperand();
- move->AddMove(prev_operand, cur_operand);
- }
- }
- }
-
- first_range = second_range;
- second_range = second_range->next();
- }
- }
-}
-
-
-bool LAllocator::CanEagerlyResolveControlFlow(HBasicBlock* block) const {
- if (block->predecessors()->length() != 1) return false;
- return block->predecessors()->first()->block_id() == block->block_id() - 1;
-}
-
-
-void LAllocator::ResolveControlFlow() {
- HPhase phase("Resolve control flow", this);
- const ZoneList<HBasicBlock*>* blocks = graph_->blocks();
- for (int block_id = 1; block_id < blocks->length(); ++block_id) {
- HBasicBlock* block = blocks->at(block_id);
- if (CanEagerlyResolveControlFlow(block)) continue;
- BitVector* live = live_in_sets_[block->block_id()];
- BitVector::Iterator iterator(live);
- while (!iterator.Done()) {
- int operand_index = iterator.Current();
- for (int i = 0; i < block->predecessors()->length(); ++i) {
- HBasicBlock* cur = block->predecessors()->at(i);
- LiveRange* cur_range = LiveRangeFor(operand_index);
- ResolveControlFlow(cur_range, block, cur);
- }
- iterator.Advance();
- }
- }
-}
-
-
-void LAllocator::BuildLiveRanges() {
- HPhase phase("Build live ranges", this);
- InitializeLivenessAnalysis();
- // Process the blocks in reverse order.
- const ZoneList<HBasicBlock*>* blocks = graph_->blocks();
- for (int block_id = blocks->length() - 1; block_id >= 0; --block_id) {
- HBasicBlock* block = blocks->at(block_id);
- BitVector* live = ComputeLiveOut(block);
- // Initially consider all live_out values live for the entire block. We
- // will shorten these intervals if necessary.
- AddInitialIntervals(block, live);
-
- // Process the instructions in reverse order, generating and killing
- // live values.
- ProcessInstructions(block, live);
- // All phi output operands are killed by this block.
- const ZoneList<HPhi*>* phis = block->phis();
- for (int i = 0; i < phis->length(); ++i) {
- // The live range interval already ends at the first instruction of the
- // block.
- HPhi* phi = phis->at(i);
- live->Remove(phi->id());
-
- LOperand* hint = NULL;
- LOperand* phi_operand = NULL;
- LGap* gap = GetLastGap(phi->block()->predecessors()->at(0));
- LParallelMove* move = gap->GetOrCreateParallelMove(LGap::START);
- for (int j = 0; j < move->move_operands()->length(); ++j) {
- LOperand* to = move->move_operands()->at(j).destination();
- if (to->IsUnallocated() && to->VirtualRegister() == phi->id()) {
- hint = move->move_operands()->at(j).source();
- phi_operand = to;
- break;
- }
- }
- ASSERT(hint != NULL);
-
- LifetimePosition block_start = LifetimePosition::FromInstructionIndex(
- block->first_instruction_index());
- Define(block_start, phi_operand, hint);
- }
-
- // Now live is live_in for this block except not including values live
- // out on backward successor edges.
- live_in_sets_[block_id] = live;
-
- // If this block is a loop header go back and patch up the necessary
- // predecessor blocks.
- if (block->IsLoopHeader()) {
- // TODO(kmillikin): Need to be able to get the last block of the loop
- // in the loop information. Add a live range stretching from the first
- // loop instruction to the last for each value live on entry to the
- // header.
- HBasicBlock* back_edge = block->loop_information()->GetLastBackEdge();
- BitVector::Iterator iterator(live);
- LifetimePosition start = LifetimePosition::FromInstructionIndex(
- block->first_instruction_index());
- LifetimePosition end = LifetimePosition::FromInstructionIndex(
- back_edge->last_instruction_index()).NextInstruction();
- while (!iterator.Done()) {
- int operand_index = iterator.Current();
- LiveRange* range = LiveRangeFor(operand_index);
- range->EnsureInterval(start, end);
- iterator.Advance();
- }
-
- for (int i = block->block_id() + 1; i <= back_edge->block_id(); ++i) {
- live_in_sets_[i]->Union(*live);
- }
- }
-
-#ifdef DEBUG
- if (block_id == 0) {
- BitVector::Iterator iterator(live);
- bool found = false;
- while (!iterator.Done()) {
- found = true;
- int operand_index = iterator.Current();
- PrintF("Function: %s\n",
- *chunk_->info()->function()->debug_name()->ToCString());
- PrintF("Value %d used before first definition!\n", operand_index);
- LiveRange* range = LiveRangeFor(operand_index);
- PrintF("First use is at %d\n", range->first_pos()->pos().Value());
- iterator.Advance();
- }
- ASSERT(!found);
- }
-#endif
- }
-}
-
-
-bool LAllocator::SafePointsAreInOrder() const {
- const ZoneList<LPointerMap*>* pointer_maps = chunk_->pointer_maps();
- int safe_point = 0;
- for (int i = 0; i < pointer_maps->length(); ++i) {
- LPointerMap* map = pointer_maps->at(i);
- if (safe_point > map->lithium_position()) return false;
- safe_point = map->lithium_position();
- }
- return true;
-}
-
-
-void LAllocator::PopulatePointerMaps() {
- HPhase phase("Populate pointer maps", this);
- const ZoneList<LPointerMap*>* pointer_maps = chunk_->pointer_maps();
-
- ASSERT(SafePointsAreInOrder());
-
- // Iterate over all safe point positions and record a pointer
- // for all spilled live ranges at this point.
- int first_safe_point_index = 0;
- int last_range_start = 0;
- for (int range_idx = 0; range_idx < live_ranges()->length(); ++range_idx) {
- LiveRange* range = live_ranges()->at(range_idx);
- if (range == NULL) continue;
- // Iterate over the first parts of multi-part live ranges.
- if (range->parent() != NULL) continue;
- // Skip non-pointer values.
- if (!HasTaggedValue(range->id())) continue;
- // Skip empty live ranges.
- if (range->IsEmpty()) continue;
-
- // Find the extent of the range and its children.
- int start = range->Start().InstructionIndex();
- int end = 0;
- for (LiveRange* cur = range; cur != NULL; cur = cur->next()) {
- LifetimePosition this_end = cur->End();
- if (this_end.InstructionIndex() > end) end = this_end.InstructionIndex();
- ASSERT(cur->Start().InstructionIndex() >= start);
- }
-
- // Most of the ranges are in order, but not all. Keep an eye on when
- // they step backwards and reset the first_safe_point_index so we don't
- // miss any safe points.
- if (start < last_range_start) {
- first_safe_point_index = 0;
- }
- last_range_start = start;
-
- // Step across all the safe points that are before the start of this range,
- // recording how far we step in order to save doing this for the next range.
- while (first_safe_point_index < pointer_maps->length()) {
- LPointerMap* map = pointer_maps->at(first_safe_point_index);
- int safe_point = map->lithium_position();
- if (safe_point >= start) break;
- first_safe_point_index++;
- }
-
- // Step through the safe points to see whether they are in the range.
- for (int safe_point_index = first_safe_point_index;
- safe_point_index < pointer_maps->length();
- ++safe_point_index) {
- LPointerMap* map = pointer_maps->at(safe_point_index);
- int safe_point = map->lithium_position();
-
- // The safe points are sorted so we can stop searching here.
- if (safe_point - 1 > end) break;
-
- // Advance to the next active range that covers the current
- // safe point position.
- LifetimePosition safe_point_pos =
- LifetimePosition::FromInstructionIndex(safe_point);
- LiveRange* cur = range;
- while (cur != NULL && !cur->Covers(safe_point_pos.PrevInstruction())) {
- cur = cur->next();
- }
- if (cur == NULL) continue;
-
- // Check if the live range is spilled and the safe point is after
- // the spill position.
- if (range->HasAllocatedSpillOperand() &&
- safe_point >= range->spill_start_index()) {
- TraceAlloc("Pointer for range %d (spilled at %d) at safe point %d\n",
- range->id(), range->spill_start_index(), safe_point);
- map->RecordPointer(range->GetSpillOperand());
- }
-
- if (!cur->IsSpilled()) {
- TraceAlloc("Pointer in register for range %d (start at %d) "
- "at safe point %d\n",
- cur->id(), cur->Start().Value(), safe_point);
- LOperand* operand = cur->CreateAssignedOperand();
- ASSERT(!operand->IsStackSlot());
- map->RecordPointer(operand);
- }
- }
- }
-}
-
-
-void LAllocator::ProcessOsrEntry() {
- const ZoneList<LInstruction*>* instrs = chunk_->instructions();
-
- // Linear search for the OSR entry instruction in the chunk.
- int index = -1;
- while (++index < instrs->length() &&
- !instrs->at(index)->IsOsrEntry()) {
- }
- ASSERT(index < instrs->length());
- LOsrEntry* instruction = LOsrEntry::cast(instrs->at(index));
-
- LifetimePosition position = LifetimePosition::FromInstructionIndex(index);
- for (int i = 0; i < live_ranges()->length(); ++i) {
- LiveRange* range = live_ranges()->at(i);
- if (range != NULL) {
- if (range->Covers(position) &&
- range->HasRegisterAssigned() &&
- range->TopLevel()->HasAllocatedSpillOperand()) {
- int reg_index = range->assigned_register();
- LOperand* spill_operand = range->TopLevel()->GetSpillOperand();
- if (range->IsDouble()) {
- instruction->MarkSpilledDoubleRegister(reg_index, spill_operand);
- } else {
- instruction->MarkSpilledRegister(reg_index, spill_operand);
- }
- }
- }
- }
-}
-
-
-void LAllocator::AllocateGeneralRegisters() {
- HPhase phase("Allocate general registers", this);
- num_registers_ = Register::kNumAllocatableRegisters;
- mode_ = GENERAL_REGISTERS;
- AllocateRegisters();
-}
-
-
-void LAllocator::AllocateDoubleRegisters() {
- HPhase phase("Allocate double registers", this);
- num_registers_ = DoubleRegister::kNumAllocatableRegisters;
- mode_ = DOUBLE_REGISTERS;
- AllocateRegisters();
-}
-
-
-void LAllocator::AllocateRegisters() {
- ASSERT(mode_ != NONE);
- ASSERT(unhandled_live_ranges_.is_empty());
-
- for (int i = 0; i < live_ranges_.length(); ++i) {
- if (live_ranges_[i] != NULL) {
- if (RequiredRegisterKind(live_ranges_[i]->id()) == mode_) {
- AddToUnhandledUnsorted(live_ranges_[i]);
- }
- }
- }
- SortUnhandled();
- ASSERT(UnhandledIsSorted());
-
- ASSERT(reusable_slots_.is_empty());
- ASSERT(active_live_ranges_.is_empty());
- ASSERT(inactive_live_ranges_.is_empty());
-
- if (mode_ == DOUBLE_REGISTERS) {
- for (int i = 0; i < fixed_double_live_ranges_.length(); ++i) {
- LiveRange* current = fixed_double_live_ranges_.at(i);
- if (current != NULL) {
- AddToInactive(current);
- }
- }
- } else {
- for (int i = 0; i < fixed_live_ranges_.length(); ++i) {
- LiveRange* current = fixed_live_ranges_.at(i);
- if (current != NULL) {
- AddToInactive(current);
- }
- }
- }
-
- while (!unhandled_live_ranges_.is_empty()) {
- ASSERT(UnhandledIsSorted());
- LiveRange* current = unhandled_live_ranges_.RemoveLast();
- ASSERT(UnhandledIsSorted());
- LifetimePosition position = current->Start();
- TraceAlloc("Processing interval %d start=%d\n",
- current->id(),
- position.Value());
-
- if (current->HasAllocatedSpillOperand()) {
- TraceAlloc("Live range %d already has a spill operand\n", current->id());
- LifetimePosition next_pos = position;
- if (IsGapAt(next_pos.InstructionIndex())) {
- next_pos = next_pos.NextInstruction();
- }
- UsePosition* pos = current->NextUsePositionRegisterIsBeneficial(next_pos);
- // If the range already has a spill operand and it doesn't need a
- // register immediately, split it and spill the first part of the range.
- if (pos == NULL) {
- Spill(current);
- continue;
- } else if (pos->pos().Value() >
- current->Start().NextInstruction().Value()) {
- // Do not spill live range eagerly if use position that can benefit from
- // the register is too close to the start of live range.
- SpillBetween(current, current->Start(), pos->pos());
- ASSERT(UnhandledIsSorted());
- continue;
- }
- }
-
- for (int i = 0; i < active_live_ranges_.length(); ++i) {
- LiveRange* cur_active = active_live_ranges_.at(i);
- if (cur_active->End().Value() <= position.Value()) {
- ActiveToHandled(cur_active);
- --i; // The live range was removed from the list of active live ranges.
- } else if (!cur_active->Covers(position)) {
- ActiveToInactive(cur_active);
- --i; // The live range was removed from the list of active live ranges.
- }
- }
-
- for (int i = 0; i < inactive_live_ranges_.length(); ++i) {
- LiveRange* cur_inactive = inactive_live_ranges_.at(i);
- if (cur_inactive->End().Value() <= position.Value()) {
- InactiveToHandled(cur_inactive);
- --i; // Live range was removed from the list of inactive live ranges.
- } else if (cur_inactive->Covers(position)) {
- InactiveToActive(cur_inactive);
- --i; // Live range was removed from the list of inactive live ranges.
- }
- }
-
- ASSERT(!current->HasRegisterAssigned() && !current->IsSpilled());
-
- bool result = TryAllocateFreeReg(current);
- if (!result) {
- AllocateBlockedReg(current);
- }
-
- if (current->HasRegisterAssigned()) {
- AddToActive(current);
- }
- }
-
- reusable_slots_.Rewind(0);
- active_live_ranges_.Rewind(0);
- inactive_live_ranges_.Rewind(0);
-}
-
-
-const char* LAllocator::RegisterName(int allocation_index) {
- ASSERT(mode_ != NONE);
- if (mode_ == GENERAL_REGISTERS) {
- return Register::AllocationIndexToString(allocation_index);
- } else {
- return DoubleRegister::AllocationIndexToString(allocation_index);
- }
-}
-
-
-void LAllocator::TraceAlloc(const char* msg, ...) {
- if (FLAG_trace_alloc) {
- va_list arguments;
- va_start(arguments, msg);
- OS::VPrint(msg, arguments);
- va_end(arguments);
- }
-}
-
-
-bool LAllocator::HasTaggedValue(int virtual_register) const {
- HValue* value = graph_->LookupValue(virtual_register);
- if (value == NULL) return false;
- return value->representation().IsTagged();
-}
-
-
-RegisterKind LAllocator::RequiredRegisterKind(int virtual_register) const {
- if (virtual_register < first_artificial_register_) {
- HValue* value = graph_->LookupValue(virtual_register);
- if (value != NULL && value->representation().IsDouble()) {
- return DOUBLE_REGISTERS;
- }
- } else if (double_artificial_registers_.Contains(
- virtual_register - first_artificial_register_)) {
- return DOUBLE_REGISTERS;
- }
-
- return GENERAL_REGISTERS;
-}
-
-
-void LAllocator::RecordDefinition(HInstruction* instr, LUnallocated* operand) {
- operand->set_virtual_register(instr->id());
-}
-
-
-void LAllocator::RecordTemporary(LUnallocated* operand) {
- ASSERT(next_virtual_register_ < LUnallocated::kMaxVirtualRegisters);
- if (!operand->HasFixedPolicy()) {
- operand->set_virtual_register(next_virtual_register_++);
- }
-}
-
-
-void LAllocator::RecordUse(HValue* value, LUnallocated* operand) {
- operand->set_virtual_register(value->id());
-}
-
-
-int LAllocator::max_initial_value_ids() {
- return LUnallocated::kMaxVirtualRegisters / 32;
-}
-
-
-void LAllocator::AddToActive(LiveRange* range) {
- TraceAlloc("Add live range %d to active\n", range->id());
- active_live_ranges_.Add(range);
-}
-
-
-void LAllocator::AddToInactive(LiveRange* range) {
- TraceAlloc("Add live range %d to inactive\n", range->id());
- inactive_live_ranges_.Add(range);
-}
-
-
-void LAllocator::AddToUnhandledSorted(LiveRange* range) {
- if (range == NULL || range->IsEmpty()) return;
- ASSERT(!range->HasRegisterAssigned() && !range->IsSpilled());
- for (int i = unhandled_live_ranges_.length() - 1; i >= 0; --i) {
- LiveRange* cur_range = unhandled_live_ranges_.at(i);
- if (range->ShouldBeAllocatedBefore(cur_range)) {
- TraceAlloc("Add live range %d to unhandled at %d\n", range->id(), i + 1);
- unhandled_live_ranges_.InsertAt(i + 1, range);
- ASSERT(UnhandledIsSorted());
- return;
- }
- }
- TraceAlloc("Add live range %d to unhandled at start\n", range->id());
- unhandled_live_ranges_.InsertAt(0, range);
- ASSERT(UnhandledIsSorted());
-}
-
-
-void LAllocator::AddToUnhandledUnsorted(LiveRange* range) {
- if (range == NULL || range->IsEmpty()) return;
- ASSERT(!range->HasRegisterAssigned() && !range->IsSpilled());
- TraceAlloc("Add live range %d to unhandled unsorted at end\n", range->id());
- unhandled_live_ranges_.Add(range);
-}
-
-
-static int UnhandledSortHelper(LiveRange* const* a, LiveRange* const* b) {
- ASSERT(!(*a)->ShouldBeAllocatedBefore(*b) ||
- !(*b)->ShouldBeAllocatedBefore(*a));
- if ((*a)->ShouldBeAllocatedBefore(*b)) return 1;
- if ((*b)->ShouldBeAllocatedBefore(*a)) return -1;
- return (*a)->id() - (*b)->id();
-}
-
-
-// Sort the unhandled live ranges so that the ranges to be processed first are
-// at the end of the array list. This is convenient for the register allocation
-// algorithm because it is efficient to remove elements from the end.
-void LAllocator::SortUnhandled() {
- TraceAlloc("Sort unhandled\n");
- unhandled_live_ranges_.Sort(&UnhandledSortHelper);
-}
-
-
-bool LAllocator::UnhandledIsSorted() {
- int len = unhandled_live_ranges_.length();
- for (int i = 1; i < len; i++) {
- LiveRange* a = unhandled_live_ranges_.at(i - 1);
- LiveRange* b = unhandled_live_ranges_.at(i);
- if (a->Start().Value() < b->Start().Value()) return false;
- }
- return true;
-}
-
-
-void LAllocator::FreeSpillSlot(LiveRange* range) {
- // Check that we are the last range.
- if (range->next() != NULL) return;
-
- if (!range->TopLevel()->HasAllocatedSpillOperand()) return;
-
- int index = range->TopLevel()->GetSpillOperand()->index();
- if (index >= 0) {
- reusable_slots_.Add(range);
- }
-}
-
-
-LOperand* LAllocator::TryReuseSpillSlot(LiveRange* range) {
- if (reusable_slots_.is_empty()) return NULL;
- if (reusable_slots_.first()->End().Value() >
- range->TopLevel()->Start().Value()) {
- return NULL;
- }
- LOperand* result = reusable_slots_.first()->TopLevel()->GetSpillOperand();
- reusable_slots_.Remove(0);
- return result;
-}
-
-
-void LAllocator::ActiveToHandled(LiveRange* range) {
- ASSERT(active_live_ranges_.Contains(range));
- active_live_ranges_.RemoveElement(range);
- TraceAlloc("Moving live range %d from active to handled\n", range->id());
- FreeSpillSlot(range);
-}
-
-
-void LAllocator::ActiveToInactive(LiveRange* range) {
- ASSERT(active_live_ranges_.Contains(range));
- active_live_ranges_.RemoveElement(range);
- inactive_live_ranges_.Add(range);
- TraceAlloc("Moving live range %d from active to inactive\n", range->id());
-}
-
-
-void LAllocator::InactiveToHandled(LiveRange* range) {
- ASSERT(inactive_live_ranges_.Contains(range));
- inactive_live_ranges_.RemoveElement(range);
- TraceAlloc("Moving live range %d from inactive to handled\n", range->id());
- FreeSpillSlot(range);
-}
-
-
-void LAllocator::InactiveToActive(LiveRange* range) {
- ASSERT(inactive_live_ranges_.Contains(range));
- inactive_live_ranges_.RemoveElement(range);
- active_live_ranges_.Add(range);
- TraceAlloc("Moving live range %d from inactive to active\n", range->id());
-}
-
-
-// TryAllocateFreeReg and AllocateBlockedReg assume this
-// when allocating local arrays.
-STATIC_ASSERT(DoubleRegister::kNumAllocatableRegisters >=
- Register::kNumAllocatableRegisters);
-
-
-bool LAllocator::TryAllocateFreeReg(LiveRange* current) {
- LifetimePosition free_until_pos[DoubleRegister::kNumAllocatableRegisters];
-
- for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) {
- free_until_pos[i] = LifetimePosition::MaxPosition();
- }
-
- for (int i = 0; i < active_live_ranges_.length(); ++i) {
- LiveRange* cur_active = active_live_ranges_.at(i);
- free_until_pos[cur_active->assigned_register()] =
- LifetimePosition::FromInstructionIndex(0);
- }
-
- for (int i = 0; i < inactive_live_ranges_.length(); ++i) {
- LiveRange* cur_inactive = inactive_live_ranges_.at(i);
- ASSERT(cur_inactive->End().Value() > current->Start().Value());
- LifetimePosition next_intersection =
- cur_inactive->FirstIntersection(current);
- if (!next_intersection.IsValid()) continue;
- int cur_reg = cur_inactive->assigned_register();
- free_until_pos[cur_reg] = Min(free_until_pos[cur_reg], next_intersection);
- }
-
- UsePosition* hinted_use = current->FirstPosWithHint();
- if (hinted_use != NULL) {
- LOperand* hint = hinted_use->hint();
- if (hint->IsRegister() || hint->IsDoubleRegister()) {
- int register_index = hint->index();
- TraceAlloc(
- "Found reg hint %s (free until [%d) for live range %d (end %d[).\n",
- RegisterName(register_index),
- free_until_pos[register_index].Value(),
- current->id(),
- current->End().Value());
-
- // The desired register is free until the end of the current live range.
- if (free_until_pos[register_index].Value() >= current->End().Value()) {
- TraceAlloc("Assigning preferred reg %s to live range %d\n",
- RegisterName(register_index),
- current->id());
- current->set_assigned_register(register_index, mode_);
- return true;
- }
- }
- }
-
- // Find the register which stays free for the longest time.
- int reg = 0;
- for (int i = 1; i < RegisterCount(); ++i) {
- if (free_until_pos[i].Value() > free_until_pos[reg].Value()) {
- reg = i;
- }
- }
-
- LifetimePosition pos = free_until_pos[reg];
-
- if (pos.Value() <= current->Start().Value()) {
- // All registers are blocked.
- return false;
- }
-
- if (pos.Value() < current->End().Value()) {
- // Register reg is available at the range start but becomes blocked before
- // the range end. Split current at position where it becomes blocked.
- LiveRange* tail = SplitAt(current, pos);
- AddToUnhandledSorted(tail);
- }
-
-
- // Register reg is available at the range start and is free until
- // the range end.
- ASSERT(pos.Value() >= current->End().Value());
- TraceAlloc("Assigning free reg %s to live range %d\n",
- RegisterName(reg),
- current->id());
- current->set_assigned_register(reg, mode_);
-
- return true;
-}
-
-
-void LAllocator::AllocateBlockedReg(LiveRange* current) {
- UsePosition* register_use = current->NextRegisterPosition(current->Start());
- if (register_use == NULL) {
- // There is no use in the current live range that requires a register.
- // We can just spill it.
- Spill(current);
- return;
- }
-
-
- LifetimePosition use_pos[DoubleRegister::kNumAllocatableRegisters];
- LifetimePosition block_pos[DoubleRegister::kNumAllocatableRegisters];
-
- for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) {
- use_pos[i] = block_pos[i] = LifetimePosition::MaxPosition();
- }
-
- for (int i = 0; i < active_live_ranges_.length(); ++i) {
- LiveRange* range = active_live_ranges_[i];
- int cur_reg = range->assigned_register();
- if (range->IsFixed() || !range->CanBeSpilled(current->Start())) {
- block_pos[cur_reg] = use_pos[cur_reg] =
- LifetimePosition::FromInstructionIndex(0);
- } else {
- UsePosition* next_use = range->NextUsePositionRegisterIsBeneficial(
- current->Start());
- if (next_use == NULL) {
- use_pos[cur_reg] = range->End();
- } else {
- use_pos[cur_reg] = next_use->pos();
- }
- }
- }
-
- for (int i = 0; i < inactive_live_ranges_.length(); ++i) {
- LiveRange* range = inactive_live_ranges_.at(i);
- ASSERT(range->End().Value() > current->Start().Value());
- LifetimePosition next_intersection = range->FirstIntersection(current);
- if (!next_intersection.IsValid()) continue;
- int cur_reg = range->assigned_register();
- if (range->IsFixed()) {
- block_pos[cur_reg] = Min(block_pos[cur_reg], next_intersection);
- use_pos[cur_reg] = Min(block_pos[cur_reg], use_pos[cur_reg]);
- } else {
- use_pos[cur_reg] = Min(use_pos[cur_reg], next_intersection);
- }
- }
-
- int reg = 0;
- for (int i = 1; i < RegisterCount(); ++i) {
- if (use_pos[i].Value() > use_pos[reg].Value()) {
- reg = i;
- }
- }
-
- LifetimePosition pos = use_pos[reg];
-
- if (pos.Value() < register_use->pos().Value()) {
- // All registers are blocked before the first use that requires a register.
- // Spill starting part of live range up to that use.
- //
- // Corner case: the first use position is equal to the start of the range.
- // In this case we have nothing to spill and SpillBetween will just return
- // this range to the list of unhandled ones. This will lead to the infinite
- // loop.
- ASSERT(current->Start().Value() < register_use->pos().Value());
- SpillBetween(current, current->Start(), register_use->pos());
- return;
- }
-
- if (block_pos[reg].Value() < current->End().Value()) {
- // Register becomes blocked before the current range end. Split before that
- // position.
- LiveRange* tail = SplitBetween(current,
- current->Start(),
- block_pos[reg].InstructionStart());
- AddToUnhandledSorted(tail);
- }
-
- // Register reg is not blocked for the whole range.
- ASSERT(block_pos[reg].Value() >= current->End().Value());
- TraceAlloc("Assigning blocked reg %s to live range %d\n",
- RegisterName(reg),
- current->id());
- current->set_assigned_register(reg, mode_);
-
- // This register was not free. Thus we need to find and spill
- // parts of active and inactive live regions that use the same register
- // at the same lifetime positions as current.
- SplitAndSpillIntersecting(current);
-}
-
-
-void LAllocator::SplitAndSpillIntersecting(LiveRange* current) {
- ASSERT(current->HasRegisterAssigned());
- int reg = current->assigned_register();
- LifetimePosition split_pos = current->Start();
- for (int i = 0; i < active_live_ranges_.length(); ++i) {
- LiveRange* range = active_live_ranges_[i];
- if (range->assigned_register() == reg) {
- UsePosition* next_pos = range->NextRegisterPosition(current->Start());
- if (next_pos == NULL) {
- SpillAfter(range, split_pos);
- } else {
- SpillBetween(range, split_pos, next_pos->pos());
- }
- ActiveToHandled(range);
- --i;
- }
- }
-
- for (int i = 0; i < inactive_live_ranges_.length(); ++i) {
- LiveRange* range = inactive_live_ranges_[i];
- ASSERT(range->End().Value() > current->Start().Value());
- if (range->assigned_register() == reg && !range->IsFixed()) {
- LifetimePosition next_intersection = range->FirstIntersection(current);
- if (next_intersection.IsValid()) {
- UsePosition* next_pos = range->NextRegisterPosition(current->Start());
- if (next_pos == NULL) {
- SpillAfter(range, split_pos);
- } else {
- next_intersection = Min(next_intersection, next_pos->pos());
- SpillBetween(range, split_pos, next_intersection);
- }
- InactiveToHandled(range);
- --i;
- }
- }
- }
-}
-
-
-bool LAllocator::IsBlockBoundary(LifetimePosition pos) {
- return pos.IsInstructionStart() &&
- InstructionAt(pos.InstructionIndex())->IsLabel();
-}
-
-
-LiveRange* LAllocator::SplitAt(LiveRange* range, LifetimePosition pos) {
- ASSERT(!range->IsFixed());
- TraceAlloc("Splitting live range %d at %d\n", range->id(), pos.Value());
-
- if (pos.Value() <= range->Start().Value()) return range;
-
- // We can't properly connect liveranges if split occured at the end
- // of control instruction.
- ASSERT(pos.IsInstructionStart() ||
- !chunk_->instructions()->at(pos.InstructionIndex())->IsControl());
-
- LiveRange* result = LiveRangeFor(next_virtual_register_++);
- range->SplitAt(pos, result);
- return result;
-}
-
-
-LiveRange* LAllocator::SplitBetween(LiveRange* range,
- LifetimePosition start,
- LifetimePosition end) {
- ASSERT(!range->IsFixed());
- TraceAlloc("Splitting live range %d in position between [%d, %d]\n",
- range->id(),
- start.Value(),
- end.Value());
-
- LifetimePosition split_pos = FindOptimalSplitPos(start, end);
- ASSERT(split_pos.Value() >= start.Value());
- return SplitAt(range, split_pos);
-}
-
-
-LifetimePosition LAllocator::FindOptimalSplitPos(LifetimePosition start,
- LifetimePosition end) {
- int start_instr = start.InstructionIndex();
- int end_instr = end.InstructionIndex();
- ASSERT(start_instr <= end_instr);
-
- // We have no choice
- if (start_instr == end_instr) return end;
-
- HBasicBlock* end_block = GetBlock(start);
- HBasicBlock* start_block = GetBlock(end);
-
- if (end_block == start_block) {
- // The interval is split in the same basic block. Split at latest possible
- // position.
- return end;
- }
-
- HBasicBlock* block = end_block;
- // Find header of outermost loop.
- while (block->parent_loop_header() != NULL &&
- block->parent_loop_header()->block_id() > start_block->block_id()) {
- block = block->parent_loop_header();
- }
-
- if (block == end_block) return end;
-
- return LifetimePosition::FromInstructionIndex(
- block->first_instruction_index());
-}
-
-
-void LAllocator::SpillAfter(LiveRange* range, LifetimePosition pos) {
- LiveRange* second_part = SplitAt(range, pos);
- Spill(second_part);
-}
-
-
-void LAllocator::SpillBetween(LiveRange* range,
- LifetimePosition start,
- LifetimePosition end) {
- ASSERT(start.Value() < end.Value());
- LiveRange* second_part = SplitAt(range, start);
-
- if (second_part->Start().Value() < end.Value()) {
- // The split result intersects with [start, end[.
- // Split it at position between ]start+1, end[, spill the middle part
- // and put the rest to unhandled.
- LiveRange* third_part = SplitBetween(
- second_part,
- second_part->Start().InstructionEnd(),
- end.PrevInstruction().InstructionEnd());
-
- ASSERT(third_part != second_part);
-
- Spill(second_part);
- AddToUnhandledSorted(third_part);
- } else {
- // The split result does not intersect with [start, end[.
- // Nothing to spill. Just put it to unhandled as whole.
- AddToUnhandledSorted(second_part);
- }
-}
-
-
-void LAllocator::Spill(LiveRange* range) {
- ASSERT(!range->IsSpilled());
- TraceAlloc("Spilling live range %d\n", range->id());
- LiveRange* first = range->TopLevel();
-
- if (!first->HasAllocatedSpillOperand()) {
- LOperand* op = TryReuseSpillSlot(range);
- if (op == NULL) op = chunk_->GetNextSpillSlot(mode_ == DOUBLE_REGISTERS);
- first->SetSpillOperand(op);
- }
- range->MakeSpilled();
-}
-
-
-int LAllocator::RegisterCount() const {
- return num_registers_;
-}
-
-
-#ifdef DEBUG
-
-
-void LAllocator::Verify() const {
- for (int i = 0; i < live_ranges()->length(); ++i) {
- LiveRange* current = live_ranges()->at(i);
- if (current != NULL) current->Verify();
- }
-}
-
-
-#endif
-
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/lithium-allocator.h b/src/3rdparty/v8/src/lithium-allocator.h
deleted file mode 100644
index f109c45..0000000
--- a/src/3rdparty/v8/src/lithium-allocator.h
+++ /dev/null
@@ -1,630 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_LITHIUM_ALLOCATOR_H_
-#define V8_LITHIUM_ALLOCATOR_H_
-
-#include "v8.h"
-
-#include "data-flow.h"
-#include "lithium.h"
-#include "zone.h"
-
-namespace v8 {
-namespace internal {
-
-// Forward declarations.
-class HBasicBlock;
-class HGraph;
-class HInstruction;
-class HPhi;
-class HTracer;
-class HValue;
-class BitVector;
-class StringStream;
-
-class LArgument;
-class LChunk;
-class LOperand;
-class LUnallocated;
-class LConstantOperand;
-class LGap;
-class LParallelMove;
-class LPointerMap;
-class LStackSlot;
-class LRegister;
-
-
-// This class represents a single point of a LOperand's lifetime.
-// For each lithium instruction there are exactly two lifetime positions:
-// the beginning and the end of the instruction. Lifetime positions for
-// different lithium instructions are disjoint.
-class LifetimePosition {
- public:
- // Return the lifetime position that corresponds to the beginning of
- // the instruction with the given index.
- static LifetimePosition FromInstructionIndex(int index) {
- return LifetimePosition(index * kStep);
- }
-
- // Returns a numeric representation of this lifetime position.
- int Value() const {
- return value_;
- }
-
- // Returns the index of the instruction to which this lifetime position
- // corresponds.
- int InstructionIndex() const {
- ASSERT(IsValid());
- return value_ / kStep;
- }
-
- // Returns true if this lifetime position corresponds to the instruction
- // start.
- bool IsInstructionStart() const {
- return (value_ & (kStep - 1)) == 0;
- }
-
- // Returns the lifetime position for the start of the instruction which
- // corresponds to this lifetime position.
- LifetimePosition InstructionStart() const {
- ASSERT(IsValid());
- return LifetimePosition(value_ & ~(kStep - 1));
- }
-
- // Returns the lifetime position for the end of the instruction which
- // corresponds to this lifetime position.
- LifetimePosition InstructionEnd() const {
- ASSERT(IsValid());
- return LifetimePosition(InstructionStart().Value() + kStep/2);
- }
-
- // Returns the lifetime position for the beginning of the next instruction.
- LifetimePosition NextInstruction() const {
- ASSERT(IsValid());
- return LifetimePosition(InstructionStart().Value() + kStep);
- }
-
- // Returns the lifetime position for the beginning of the previous
- // instruction.
- LifetimePosition PrevInstruction() const {
- ASSERT(IsValid());
- ASSERT(value_ > 1);
- return LifetimePosition(InstructionStart().Value() - kStep);
- }
-
- // Constructs the lifetime position which does not correspond to any
- // instruction.
- LifetimePosition() : value_(-1) {}
-
- // Returns true if this lifetime positions corrensponds to some
- // instruction.
- bool IsValid() const { return value_ != -1; }
-
- static inline LifetimePosition Invalid() { return LifetimePosition(); }
-
- static inline LifetimePosition MaxPosition() {
- // We have to use this kind of getter instead of static member due to
- // crash bug in GDB.
- return LifetimePosition(kMaxInt);
- }
-
- private:
- static const int kStep = 2;
-
- // Code relies on kStep being a power of two.
- STATIC_ASSERT(IS_POWER_OF_TWO(kStep));
-
- explicit LifetimePosition(int value) : value_(value) { }
-
- int value_;
-};
-
-
-enum RegisterKind {
- NONE,
- GENERAL_REGISTERS,
- DOUBLE_REGISTERS
-};
-
-
-// A register-allocator view of a Lithium instruction. It contains the id of
-// the output operand and a list of input operand uses.
-
-class LInstruction;
-class LEnvironment;
-
-// Iterator for non-null temp operands.
-class TempIterator BASE_EMBEDDED {
- public:
- inline explicit TempIterator(LInstruction* instr);
- inline bool HasNext();
- inline LOperand* Next();
- inline void Advance();
-
- private:
- inline int AdvanceToNext(int start);
- LInstruction* instr_;
- int limit_;
- int current_;
-};
-
-
-// Iterator for non-constant input operands.
-class InputIterator BASE_EMBEDDED {
- public:
- inline explicit InputIterator(LInstruction* instr);
- inline bool HasNext();
- inline LOperand* Next();
- inline void Advance();
-
- private:
- inline int AdvanceToNext(int start);
- LInstruction* instr_;
- int limit_;
- int current_;
-};
-
-
-class UseIterator BASE_EMBEDDED {
- public:
- inline explicit UseIterator(LInstruction* instr);
- inline bool HasNext();
- inline LOperand* Next();
- inline void Advance();
-
- private:
- InputIterator input_iterator_;
- DeepIterator env_iterator_;
-};
-
-
-// Representation of the non-empty interval [start,end[.
-class UseInterval: public ZoneObject {
- public:
- UseInterval(LifetimePosition start, LifetimePosition end)
- : start_(start), end_(end), next_(NULL) {
- ASSERT(start.Value() < end.Value());
- }
-
- LifetimePosition start() const { return start_; }
- LifetimePosition end() const { return end_; }
- UseInterval* next() const { return next_; }
-
- // Split this interval at the given position without effecting the
- // live range that owns it. The interval must contain the position.
- void SplitAt(LifetimePosition pos);
-
- // If this interval intersects with other return smallest position
- // that belongs to both of them.
- LifetimePosition Intersect(const UseInterval* other) const {
- if (other->start().Value() < start_.Value()) return other->Intersect(this);
- if (other->start().Value() < end_.Value()) return other->start();
- return LifetimePosition::Invalid();
- }
-
- bool Contains(LifetimePosition point) const {
- return start_.Value() <= point.Value() && point.Value() < end_.Value();
- }
-
- private:
- void set_start(LifetimePosition start) { start_ = start; }
- void set_next(UseInterval* next) { next_ = next; }
-
- LifetimePosition start_;
- LifetimePosition end_;
- UseInterval* next_;
-
- friend class LiveRange; // Assigns to start_.
-};
-
-// Representation of a use position.
-class UsePosition: public ZoneObject {
- public:
- UsePosition(LifetimePosition pos, LOperand* operand);
-
- LOperand* operand() const { return operand_; }
- bool HasOperand() const { return operand_ != NULL; }
-
- LOperand* hint() const { return hint_; }
- void set_hint(LOperand* hint) { hint_ = hint; }
- bool HasHint() const;
- bool RequiresRegister() const;
- bool RegisterIsBeneficial() const;
-
- LifetimePosition pos() const { return pos_; }
- UsePosition* next() const { return next_; }
-
- private:
- void set_next(UsePosition* next) { next_ = next; }
-
- LOperand* operand_;
- LOperand* hint_;
- LifetimePosition pos_;
- UsePosition* next_;
- bool requires_reg_;
- bool register_beneficial_;
-
- friend class LiveRange;
-};
-
-// Representation of SSA values' live ranges as a collection of (continuous)
-// intervals over the instruction ordering.
-class LiveRange: public ZoneObject {
- public:
- static const int kInvalidAssignment = 0x7fffffff;
-
- explicit LiveRange(int id);
-
- UseInterval* first_interval() const { return first_interval_; }
- UsePosition* first_pos() const { return first_pos_; }
- LiveRange* parent() const { return parent_; }
- LiveRange* TopLevel() { return (parent_ == NULL) ? this : parent_; }
- LiveRange* next() const { return next_; }
- bool IsChild() const { return parent() != NULL; }
- int id() const { return id_; }
- bool IsFixed() const { return id_ < 0; }
- bool IsEmpty() const { return first_interval() == NULL; }
- LOperand* CreateAssignedOperand();
- int assigned_register() const { return assigned_register_; }
- int spill_start_index() const { return spill_start_index_; }
- void set_assigned_register(int reg, RegisterKind register_kind);
- void MakeSpilled();
-
- // Returns use position in this live range that follows both start
- // and last processed use position.
- // Modifies internal state of live range!
- UsePosition* NextUsePosition(LifetimePosition start);
-
- // Returns use position for which register is required in this live
- // range and which follows both start and last processed use position
- // Modifies internal state of live range!
- UsePosition* NextRegisterPosition(LifetimePosition start);
-
- // Returns use position for which register is beneficial in this live
- // range and which follows both start and last processed use position
- // Modifies internal state of live range!
- UsePosition* NextUsePositionRegisterIsBeneficial(LifetimePosition start);
-
- // Can this live range be spilled at this position.
- bool CanBeSpilled(LifetimePosition pos);
-
- // Split this live range at the given position which must follow the start of
- // the range.
- // All uses following the given position will be moved from this
- // live range to the result live range.
- void SplitAt(LifetimePosition position, LiveRange* result);
-
- bool IsDouble() const { return assigned_register_kind_ == DOUBLE_REGISTERS; }
- bool HasRegisterAssigned() const {
- return assigned_register_ != kInvalidAssignment;
- }
- bool IsSpilled() const { return spilled_; }
- UsePosition* FirstPosWithHint() const;
-
- LOperand* FirstHint() const {
- UsePosition* pos = FirstPosWithHint();
- if (pos != NULL) return pos->hint();
- return NULL;
- }
-
- LifetimePosition Start() const {
- ASSERT(!IsEmpty());
- return first_interval()->start();
- }
-
- LifetimePosition End() const {
- ASSERT(!IsEmpty());
- return last_interval_->end();
- }
-
- bool HasAllocatedSpillOperand() const;
- LOperand* GetSpillOperand() const { return spill_operand_; }
- void SetSpillOperand(LOperand* operand);
-
- void SetSpillStartIndex(int start) {
- spill_start_index_ = Min(start, spill_start_index_);
- }
-
- bool ShouldBeAllocatedBefore(const LiveRange* other) const;
- bool CanCover(LifetimePosition position) const;
- bool Covers(LifetimePosition position);
- LifetimePosition FirstIntersection(LiveRange* other);
-
- // Add a new interval or a new use position to this live range.
- void EnsureInterval(LifetimePosition start, LifetimePosition end);
- void AddUseInterval(LifetimePosition start, LifetimePosition end);
- UsePosition* AddUsePosition(LifetimePosition pos, LOperand* operand);
-
- // Shorten the most recently added interval by setting a new start.
- void ShortenTo(LifetimePosition start);
-
-#ifdef DEBUG
- // True if target overlaps an existing interval.
- bool HasOverlap(UseInterval* target) const;
- void Verify() const;
-#endif
-
- private:
- void ConvertOperands();
- UseInterval* FirstSearchIntervalForPosition(LifetimePosition position) const;
- void AdvanceLastProcessedMarker(UseInterval* to_start_of,
- LifetimePosition but_not_past) const;
-
- int id_;
- bool spilled_;
- int assigned_register_;
- RegisterKind assigned_register_kind_;
- UseInterval* last_interval_;
- UseInterval* first_interval_;
- UsePosition* first_pos_;
- LiveRange* parent_;
- LiveRange* next_;
- // This is used as a cache, it doesn't affect correctness.
- mutable UseInterval* current_interval_;
- UsePosition* last_processed_use_;
- LOperand* spill_operand_;
- int spill_start_index_;
-};
-
-
-class GrowableBitVector BASE_EMBEDDED {
- public:
- GrowableBitVector() : bits_(NULL) { }
-
- bool Contains(int value) const {
- if (!InBitsRange(value)) return false;
- return bits_->Contains(value);
- }
-
- void Add(int value) {
- EnsureCapacity(value);
- bits_->Add(value);
- }
-
- private:
- static const int kInitialLength = 1024;
-
- bool InBitsRange(int value) const {
- return bits_ != NULL && bits_->length() > value;
- }
-
- void EnsureCapacity(int value) {
- if (InBitsRange(value)) return;
- int new_length = bits_ == NULL ? kInitialLength : bits_->length();
- while (new_length <= value) new_length *= 2;
- BitVector* new_bits = new BitVector(new_length);
- if (bits_ != NULL) new_bits->CopyFrom(*bits_);
- bits_ = new_bits;
- }
-
- BitVector* bits_;
-};
-
-
-class LAllocator BASE_EMBEDDED {
- public:
- LAllocator(int first_virtual_register, HGraph* graph);
-
- static void TraceAlloc(const char* msg, ...);
-
- // Lithium translation support.
- // Record a use of an input operand in the current instruction.
- void RecordUse(HValue* value, LUnallocated* operand);
- // Record the definition of the output operand.
- void RecordDefinition(HInstruction* instr, LUnallocated* operand);
- // Record a temporary operand.
- void RecordTemporary(LUnallocated* operand);
-
- // Checks whether the value of a given virtual register is tagged.
- bool HasTaggedValue(int virtual_register) const;
-
- // Returns the register kind required by the given virtual register.
- RegisterKind RequiredRegisterKind(int virtual_register) const;
-
- // Control max function size.
- static int max_initial_value_ids();
-
- void Allocate(LChunk* chunk);
-
- const ZoneList<LiveRange*>* live_ranges() const { return &live_ranges_; }
- const Vector<LiveRange*>* fixed_live_ranges() const {
- return &fixed_live_ranges_;
- }
- const Vector<LiveRange*>* fixed_double_live_ranges() const {
- return &fixed_double_live_ranges_;
- }
-
- LChunk* chunk() const { return chunk_; }
- HGraph* graph() const { return graph_; }
-
- void MarkAsOsrEntry() {
- // There can be only one.
- ASSERT(!has_osr_entry_);
- // Simply set a flag to find and process instruction later.
- has_osr_entry_ = true;
- }
-
-#ifdef DEBUG
- void Verify() const;
-#endif
-
- private:
- void MeetRegisterConstraints();
- void ResolvePhis();
- void BuildLiveRanges();
- void AllocateGeneralRegisters();
- void AllocateDoubleRegisters();
- void ConnectRanges();
- void ResolveControlFlow();
- void PopulatePointerMaps();
- void ProcessOsrEntry();
- void AllocateRegisters();
- bool CanEagerlyResolveControlFlow(HBasicBlock* block) const;
- inline bool SafePointsAreInOrder() const;
-
- // Liveness analysis support.
- void InitializeLivenessAnalysis();
- BitVector* ComputeLiveOut(HBasicBlock* block);
- void AddInitialIntervals(HBasicBlock* block, BitVector* live_out);
- void ProcessInstructions(HBasicBlock* block, BitVector* live);
- void MeetRegisterConstraints(HBasicBlock* block);
- void MeetConstraintsBetween(LInstruction* first,
- LInstruction* second,
- int gap_index);
- void ResolvePhis(HBasicBlock* block);
-
- // Helper methods for building intervals.
- LOperand* AllocateFixed(LUnallocated* operand, int pos, bool is_tagged);
- LiveRange* LiveRangeFor(LOperand* operand);
- void Define(LifetimePosition position, LOperand* operand, LOperand* hint);
- void Use(LifetimePosition block_start,
- LifetimePosition position,
- LOperand* operand,
- LOperand* hint);
- void AddConstraintsGapMove(int index, LOperand* from, LOperand* to);
-
- // Helper methods for updating the life range lists.
- void AddToActive(LiveRange* range);
- void AddToInactive(LiveRange* range);
- void AddToUnhandledSorted(LiveRange* range);
- void AddToUnhandledUnsorted(LiveRange* range);
- void SortUnhandled();
- bool UnhandledIsSorted();
- void ActiveToHandled(LiveRange* range);
- void ActiveToInactive(LiveRange* range);
- void InactiveToHandled(LiveRange* range);
- void InactiveToActive(LiveRange* range);
- void FreeSpillSlot(LiveRange* range);
- LOperand* TryReuseSpillSlot(LiveRange* range);
-
- // Helper methods for allocating registers.
- bool TryAllocateFreeReg(LiveRange* range);
- void AllocateBlockedReg(LiveRange* range);
-
- // Live range splitting helpers.
-
- // Split the given range at the given position.
- // If range starts at or after the given position then the
- // original range is returned.
- // Otherwise returns the live range that starts at pos and contains
- // all uses from the original range that follow pos. Uses at pos will
- // still be owned by the original range after splitting.
- LiveRange* SplitAt(LiveRange* range, LifetimePosition pos);
-
- // Split the given range in a position from the interval [start, end].
- LiveRange* SplitBetween(LiveRange* range,
- LifetimePosition start,
- LifetimePosition end);
-
- // Find a lifetime position in the interval [start, end] which
- // is optimal for splitting: it is either header of the outermost
- // loop covered by this interval or the latest possible position.
- LifetimePosition FindOptimalSplitPos(LifetimePosition start,
- LifetimePosition end);
-
- // Spill the given life range after position pos.
- void SpillAfter(LiveRange* range, LifetimePosition pos);
-
- // Spill the given life range after position start and up to position end.
- void SpillBetween(LiveRange* range,
- LifetimePosition start,
- LifetimePosition end);
-
- void SplitAndSpillIntersecting(LiveRange* range);
-
- void Spill(LiveRange* range);
- bool IsBlockBoundary(LifetimePosition pos);
-
- // Helper methods for resolving control flow.
- void ResolveControlFlow(LiveRange* range,
- HBasicBlock* block,
- HBasicBlock* pred);
-
- // Return parallel move that should be used to connect ranges split at the
- // given position.
- LParallelMove* GetConnectingParallelMove(LifetimePosition pos);
-
- // Return the block which contains give lifetime position.
- HBasicBlock* GetBlock(LifetimePosition pos);
-
- // Helper methods for the fixed registers.
- int RegisterCount() const;
- static int FixedLiveRangeID(int index) { return -index - 1; }
- static int FixedDoubleLiveRangeID(int index);
- LiveRange* FixedLiveRangeFor(int index);
- LiveRange* FixedDoubleLiveRangeFor(int index);
- LiveRange* LiveRangeFor(int index);
- HPhi* LookupPhi(LOperand* operand) const;
- LGap* GetLastGap(HBasicBlock* block);
-
- const char* RegisterName(int allocation_index);
-
- inline bool IsGapAt(int index);
-
- inline LInstruction* InstructionAt(int index);
-
- inline LGap* GapAt(int index);
-
- LChunk* chunk_;
-
- // During liveness analysis keep a mapping from block id to live_in sets
- // for blocks already analyzed.
- ZoneList<BitVector*> live_in_sets_;
-
- // Liveness analysis results.
- ZoneList<LiveRange*> live_ranges_;
-
- // Lists of live ranges
- EmbeddedVector<LiveRange*, Register::kNumAllocatableRegisters>
- fixed_live_ranges_;
- EmbeddedVector<LiveRange*, DoubleRegister::kNumAllocatableRegisters>
- fixed_double_live_ranges_;
- ZoneList<LiveRange*> unhandled_live_ranges_;
- ZoneList<LiveRange*> active_live_ranges_;
- ZoneList<LiveRange*> inactive_live_ranges_;
- ZoneList<LiveRange*> reusable_slots_;
-
- // Next virtual register number to be assigned to temporaries.
- int next_virtual_register_;
- int first_artificial_register_;
- GrowableBitVector double_artificial_registers_;
-
- RegisterKind mode_;
- int num_registers_;
-
- HGraph* graph_;
-
- bool has_osr_entry_;
-
- DISALLOW_COPY_AND_ASSIGN(LAllocator);
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_LITHIUM_ALLOCATOR_H_
diff --git a/src/3rdparty/v8/src/lithium.cc b/src/3rdparty/v8/src/lithium.cc
deleted file mode 100644
index aeac2db..0000000
--- a/src/3rdparty/v8/src/lithium.cc
+++ /dev/null
@@ -1,169 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-#include "lithium.h"
-
-namespace v8 {
-namespace internal {
-
-
-void LOperand::PrintTo(StringStream* stream) {
- LUnallocated* unalloc = NULL;
- switch (kind()) {
- case INVALID:
- break;
- case UNALLOCATED:
- unalloc = LUnallocated::cast(this);
- stream->Add("v%d", unalloc->virtual_register());
- switch (unalloc->policy()) {
- case LUnallocated::NONE:
- break;
- case LUnallocated::FIXED_REGISTER: {
- const char* register_name =
- Register::AllocationIndexToString(unalloc->fixed_index());
- stream->Add("(=%s)", register_name);
- break;
- }
- case LUnallocated::FIXED_DOUBLE_REGISTER: {
- const char* double_register_name =
- DoubleRegister::AllocationIndexToString(unalloc->fixed_index());
- stream->Add("(=%s)", double_register_name);
- break;
- }
- case LUnallocated::FIXED_SLOT:
- stream->Add("(=%dS)", unalloc->fixed_index());
- break;
- case LUnallocated::MUST_HAVE_REGISTER:
- stream->Add("(R)");
- break;
- case LUnallocated::WRITABLE_REGISTER:
- stream->Add("(WR)");
- break;
- case LUnallocated::SAME_AS_FIRST_INPUT:
- stream->Add("(1)");
- break;
- case LUnallocated::ANY:
- stream->Add("(-)");
- break;
- case LUnallocated::IGNORE:
- stream->Add("(0)");
- break;
- }
- break;
- case CONSTANT_OPERAND:
- stream->Add("[constant:%d]", index());
- break;
- case STACK_SLOT:
- stream->Add("[stack:%d]", index());
- break;
- case DOUBLE_STACK_SLOT:
- stream->Add("[double_stack:%d]", index());
- break;
- case REGISTER:
- stream->Add("[%s|R]", Register::AllocationIndexToString(index()));
- break;
- case DOUBLE_REGISTER:
- stream->Add("[%s|R]", DoubleRegister::AllocationIndexToString(index()));
- break;
- case ARGUMENT:
- stream->Add("[arg:%d]", index());
- break;
- }
-}
-
-
-int LOperand::VirtualRegister() {
- LUnallocated* unalloc = LUnallocated::cast(this);
- return unalloc->virtual_register();
-}
-
-
-bool LParallelMove::IsRedundant() const {
- for (int i = 0; i < move_operands_.length(); ++i) {
- if (!move_operands_[i].IsRedundant()) return false;
- }
- return true;
-}
-
-
-void LParallelMove::PrintDataTo(StringStream* stream) const {
- bool first = true;
- for (int i = 0; i < move_operands_.length(); ++i) {
- if (!move_operands_[i].IsEliminated()) {
- LOperand* source = move_operands_[i].source();
- LOperand* destination = move_operands_[i].destination();
- if (!first) stream->Add(" ");
- first = false;
- if (source->Equals(destination)) {
- destination->PrintTo(stream);
- } else {
- destination->PrintTo(stream);
- stream->Add(" = ");
- source->PrintTo(stream);
- }
- stream->Add(";");
- }
- }
-}
-
-
-void LEnvironment::PrintTo(StringStream* stream) {
- stream->Add("[id=%d|", ast_id());
- stream->Add("[parameters=%d|", parameter_count());
- stream->Add("[arguments_stack_height=%d|", arguments_stack_height());
- for (int i = 0; i < values_.length(); ++i) {
- if (i != 0) stream->Add(";");
- if (values_[i] == NULL) {
- stream->Add("[hole]");
- } else {
- values_[i]->PrintTo(stream);
- }
- }
- stream->Add("]");
-}
-
-
-void LPointerMap::RecordPointer(LOperand* op) {
- // Do not record arguments as pointers.
- if (op->IsStackSlot() && op->index() < 0) return;
- ASSERT(!op->IsDoubleRegister() && !op->IsDoubleStackSlot());
- pointer_operands_.Add(op);
-}
-
-
-void LPointerMap::PrintTo(StringStream* stream) {
- stream->Add("{");
- for (int i = 0; i < pointer_operands_.length(); ++i) {
- if (i != 0) stream->Add(";");
- pointer_operands_[i]->PrintTo(stream);
- }
- stream->Add("} @%d", position());
-}
-
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/lithium.h b/src/3rdparty/v8/src/lithium.h
deleted file mode 100644
index d85a87c..0000000
--- a/src/3rdparty/v8/src/lithium.h
+++ /dev/null
@@ -1,592 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_LITHIUM_H_
-#define V8_LITHIUM_H_
-
-#include "hydrogen.h"
-#include "safepoint-table.h"
-
-namespace v8 {
-namespace internal {
-
-class LOperand: public ZoneObject {
- public:
- enum Kind {
- INVALID,
- UNALLOCATED,
- CONSTANT_OPERAND,
- STACK_SLOT,
- DOUBLE_STACK_SLOT,
- REGISTER,
- DOUBLE_REGISTER,
- ARGUMENT
- };
-
- LOperand() : value_(KindField::encode(INVALID)) { }
-
- Kind kind() const { return KindField::decode(value_); }
- int index() const { return static_cast<int>(value_) >> kKindFieldWidth; }
- bool IsConstantOperand() const { return kind() == CONSTANT_OPERAND; }
- bool IsStackSlot() const { return kind() == STACK_SLOT; }
- bool IsDoubleStackSlot() const { return kind() == DOUBLE_STACK_SLOT; }
- bool IsRegister() const { return kind() == REGISTER; }
- bool IsDoubleRegister() const { return kind() == DOUBLE_REGISTER; }
- bool IsArgument() const { return kind() == ARGUMENT; }
- bool IsUnallocated() const { return kind() == UNALLOCATED; }
- bool Equals(LOperand* other) const { return value_ == other->value_; }
- int VirtualRegister();
-
- void PrintTo(StringStream* stream);
- void ConvertTo(Kind kind, int index) {
- value_ = KindField::encode(kind);
- value_ |= index << kKindFieldWidth;
- ASSERT(this->index() == index);
- }
-
- protected:
- static const int kKindFieldWidth = 3;
- class KindField : public BitField<Kind, 0, kKindFieldWidth> { };
-
- LOperand(Kind kind, int index) { ConvertTo(kind, index); }
-
- unsigned value_;
-};
-
-
-class LUnallocated: public LOperand {
- public:
- enum Policy {
- NONE,
- ANY,
- FIXED_REGISTER,
- FIXED_DOUBLE_REGISTER,
- FIXED_SLOT,
- MUST_HAVE_REGISTER,
- WRITABLE_REGISTER,
- SAME_AS_FIRST_INPUT,
- IGNORE
- };
-
- // Lifetime of operand inside the instruction.
- enum Lifetime {
- // USED_AT_START operand is guaranteed to be live only at
- // instruction start. Register allocator is free to assign the same register
- // to some other operand used inside instruction (i.e. temporary or
- // output).
- USED_AT_START,
-
- // USED_AT_END operand is treated as live until the end of
- // instruction. This means that register allocator will not reuse it's
- // register for any other operand inside instruction.
- USED_AT_END
- };
-
- explicit LUnallocated(Policy policy) : LOperand(UNALLOCATED, 0) {
- Initialize(policy, 0, USED_AT_END);
- }
-
- LUnallocated(Policy policy, int fixed_index) : LOperand(UNALLOCATED, 0) {
- Initialize(policy, fixed_index, USED_AT_END);
- }
-
- LUnallocated(Policy policy, Lifetime lifetime) : LOperand(UNALLOCATED, 0) {
- Initialize(policy, 0, lifetime);
- }
-
- // The superclass has a KindField. Some policies have a signed fixed
- // index in the upper bits.
- static const int kPolicyWidth = 4;
- static const int kLifetimeWidth = 1;
- static const int kVirtualRegisterWidth = 17;
-
- static const int kPolicyShift = kKindFieldWidth;
- static const int kLifetimeShift = kPolicyShift + kPolicyWidth;
- static const int kVirtualRegisterShift = kLifetimeShift + kLifetimeWidth;
- static const int kFixedIndexShift =
- kVirtualRegisterShift + kVirtualRegisterWidth;
-
- class PolicyField : public BitField<Policy, kPolicyShift, kPolicyWidth> { };
-
- class LifetimeField
- : public BitField<Lifetime, kLifetimeShift, kLifetimeWidth> {
- };
-
- class VirtualRegisterField
- : public BitField<unsigned,
- kVirtualRegisterShift,
- kVirtualRegisterWidth> {
- };
-
- static const int kMaxVirtualRegisters = 1 << (kVirtualRegisterWidth + 1);
- static const int kMaxFixedIndices = 128;
-
- bool HasIgnorePolicy() const { return policy() == IGNORE; }
- bool HasNoPolicy() const { return policy() == NONE; }
- bool HasAnyPolicy() const {
- return policy() == ANY;
- }
- bool HasFixedPolicy() const {
- return policy() == FIXED_REGISTER ||
- policy() == FIXED_DOUBLE_REGISTER ||
- policy() == FIXED_SLOT;
- }
- bool HasRegisterPolicy() const {
- return policy() == WRITABLE_REGISTER || policy() == MUST_HAVE_REGISTER;
- }
- bool HasSameAsInputPolicy() const {
- return policy() == SAME_AS_FIRST_INPUT;
- }
- Policy policy() const { return PolicyField::decode(value_); }
- void set_policy(Policy policy) {
- value_ &= ~PolicyField::mask();
- value_ |= PolicyField::encode(policy);
- }
- int fixed_index() const {
- return static_cast<int>(value_) >> kFixedIndexShift;
- }
-
- unsigned virtual_register() const {
- return VirtualRegisterField::decode(value_);
- }
-
- void set_virtual_register(unsigned id) {
- value_ &= ~VirtualRegisterField::mask();
- value_ |= VirtualRegisterField::encode(id);
- }
-
- LUnallocated* CopyUnconstrained() {
- LUnallocated* result = new LUnallocated(ANY);
- result->set_virtual_register(virtual_register());
- return result;
- }
-
- static LUnallocated* cast(LOperand* op) {
- ASSERT(op->IsUnallocated());
- return reinterpret_cast<LUnallocated*>(op);
- }
-
- bool IsUsedAtStart() {
- return LifetimeField::decode(value_) == USED_AT_START;
- }
-
- private:
- void Initialize(Policy policy, int fixed_index, Lifetime lifetime) {
- value_ |= PolicyField::encode(policy);
- value_ |= LifetimeField::encode(lifetime);
- value_ |= fixed_index << kFixedIndexShift;
- ASSERT(this->fixed_index() == fixed_index);
- }
-};
-
-
-class LMoveOperands BASE_EMBEDDED {
- public:
- LMoveOperands(LOperand* source, LOperand* destination)
- : source_(source), destination_(destination) {
- }
-
- LOperand* source() const { return source_; }
- void set_source(LOperand* operand) { source_ = operand; }
-
- LOperand* destination() const { return destination_; }
- void set_destination(LOperand* operand) { destination_ = operand; }
-
- // The gap resolver marks moves as "in-progress" by clearing the
- // destination (but not the source).
- bool IsPending() const {
- return destination_ == NULL && source_ != NULL;
- }
-
- // True if this move a move into the given destination operand.
- bool Blocks(LOperand* operand) const {
- return !IsEliminated() && source()->Equals(operand);
- }
-
- // A move is redundant if it's been eliminated, if its source and
- // destination are the same, or if its destination is unneeded.
- bool IsRedundant() const {
- return IsEliminated() || source_->Equals(destination_) || IsIgnored();
- }
-
- bool IsIgnored() const {
- return destination_ != NULL &&
- destination_->IsUnallocated() &&
- LUnallocated::cast(destination_)->HasIgnorePolicy();
- }
-
- // We clear both operands to indicate move that's been eliminated.
- void Eliminate() { source_ = destination_ = NULL; }
- bool IsEliminated() const {
- ASSERT(source_ != NULL || destination_ == NULL);
- return source_ == NULL;
- }
-
- private:
- LOperand* source_;
- LOperand* destination_;
-};
-
-
-class LConstantOperand: public LOperand {
- public:
- static LConstantOperand* Create(int index) {
- ASSERT(index >= 0);
- if (index < kNumCachedOperands) return &cache[index];
- return new LConstantOperand(index);
- }
-
- static LConstantOperand* cast(LOperand* op) {
- ASSERT(op->IsConstantOperand());
- return reinterpret_cast<LConstantOperand*>(op);
- }
-
- static void SetupCache();
-
- private:
- static const int kNumCachedOperands = 128;
- static LConstantOperand cache[];
-
- LConstantOperand() : LOperand() { }
- explicit LConstantOperand(int index) : LOperand(CONSTANT_OPERAND, index) { }
-};
-
-
-class LArgument: public LOperand {
- public:
- explicit LArgument(int index) : LOperand(ARGUMENT, index) { }
-
- static LArgument* cast(LOperand* op) {
- ASSERT(op->IsArgument());
- return reinterpret_cast<LArgument*>(op);
- }
-};
-
-
-class LStackSlot: public LOperand {
- public:
- static LStackSlot* Create(int index) {
- ASSERT(index >= 0);
- if (index < kNumCachedOperands) return &cache[index];
- return new LStackSlot(index);
- }
-
- static LStackSlot* cast(LOperand* op) {
- ASSERT(op->IsStackSlot());
- return reinterpret_cast<LStackSlot*>(op);
- }
-
- static void SetupCache();
-
- private:
- static const int kNumCachedOperands = 128;
- static LStackSlot cache[];
-
- LStackSlot() : LOperand() { }
- explicit LStackSlot(int index) : LOperand(STACK_SLOT, index) { }
-};
-
-
-class LDoubleStackSlot: public LOperand {
- public:
- static LDoubleStackSlot* Create(int index) {
- ASSERT(index >= 0);
- if (index < kNumCachedOperands) return &cache[index];
- return new LDoubleStackSlot(index);
- }
-
- static LDoubleStackSlot* cast(LOperand* op) {
- ASSERT(op->IsStackSlot());
- return reinterpret_cast<LDoubleStackSlot*>(op);
- }
-
- static void SetupCache();
-
- private:
- static const int kNumCachedOperands = 128;
- static LDoubleStackSlot cache[];
-
- LDoubleStackSlot() : LOperand() { }
- explicit LDoubleStackSlot(int index) : LOperand(DOUBLE_STACK_SLOT, index) { }
-};
-
-
-class LRegister: public LOperand {
- public:
- static LRegister* Create(int index) {
- ASSERT(index >= 0);
- if (index < kNumCachedOperands) return &cache[index];
- return new LRegister(index);
- }
-
- static LRegister* cast(LOperand* op) {
- ASSERT(op->IsRegister());
- return reinterpret_cast<LRegister*>(op);
- }
-
- static void SetupCache();
-
- private:
- static const int kNumCachedOperands = 16;
- static LRegister cache[];
-
- LRegister() : LOperand() { }
- explicit LRegister(int index) : LOperand(REGISTER, index) { }
-};
-
-
-class LDoubleRegister: public LOperand {
- public:
- static LDoubleRegister* Create(int index) {
- ASSERT(index >= 0);
- if (index < kNumCachedOperands) return &cache[index];
- return new LDoubleRegister(index);
- }
-
- static LDoubleRegister* cast(LOperand* op) {
- ASSERT(op->IsDoubleRegister());
- return reinterpret_cast<LDoubleRegister*>(op);
- }
-
- static void SetupCache();
-
- private:
- static const int kNumCachedOperands = 16;
- static LDoubleRegister cache[];
-
- LDoubleRegister() : LOperand() { }
- explicit LDoubleRegister(int index) : LOperand(DOUBLE_REGISTER, index) { }
-};
-
-
-class LParallelMove : public ZoneObject {
- public:
- LParallelMove() : move_operands_(4) { }
-
- void AddMove(LOperand* from, LOperand* to) {
- move_operands_.Add(LMoveOperands(from, to));
- }
-
- bool IsRedundant() const;
-
- const ZoneList<LMoveOperands>* move_operands() const {
- return &move_operands_;
- }
-
- void PrintDataTo(StringStream* stream) const;
-
- private:
- ZoneList<LMoveOperands> move_operands_;
-};
-
-
-class LPointerMap: public ZoneObject {
- public:
- explicit LPointerMap(int position)
- : pointer_operands_(8), position_(position), lithium_position_(-1) { }
-
- const ZoneList<LOperand*>* operands() const { return &pointer_operands_; }
- int position() const { return position_; }
- int lithium_position() const { return lithium_position_; }
-
- void set_lithium_position(int pos) {
- ASSERT(lithium_position_ == -1);
- lithium_position_ = pos;
- }
-
- void RecordPointer(LOperand* op);
- void PrintTo(StringStream* stream);
-
- private:
- ZoneList<LOperand*> pointer_operands_;
- int position_;
- int lithium_position_;
-};
-
-
-class LEnvironment: public ZoneObject {
- public:
- LEnvironment(Handle<JSFunction> closure,
- int ast_id,
- int parameter_count,
- int argument_count,
- int value_count,
- LEnvironment* outer)
- : closure_(closure),
- arguments_stack_height_(argument_count),
- deoptimization_index_(Safepoint::kNoDeoptimizationIndex),
- translation_index_(-1),
- ast_id_(ast_id),
- parameter_count_(parameter_count),
- values_(value_count),
- representations_(value_count),
- spilled_registers_(NULL),
- spilled_double_registers_(NULL),
- outer_(outer) {
- }
-
- Handle<JSFunction> closure() const { return closure_; }
- int arguments_stack_height() const { return arguments_stack_height_; }
- int deoptimization_index() const { return deoptimization_index_; }
- int translation_index() const { return translation_index_; }
- int ast_id() const { return ast_id_; }
- int parameter_count() const { return parameter_count_; }
- LOperand** spilled_registers() const { return spilled_registers_; }
- LOperand** spilled_double_registers() const {
- return spilled_double_registers_;
- }
- const ZoneList<LOperand*>* values() const { return &values_; }
- LEnvironment* outer() const { return outer_; }
-
- void AddValue(LOperand* operand, Representation representation) {
- values_.Add(operand);
- representations_.Add(representation);
- }
-
- bool HasTaggedValueAt(int index) const {
- return representations_[index].IsTagged();
- }
-
- void Register(int deoptimization_index, int translation_index) {
- ASSERT(!HasBeenRegistered());
- deoptimization_index_ = deoptimization_index;
- translation_index_ = translation_index;
- }
- bool HasBeenRegistered() const {
- return deoptimization_index_ != Safepoint::kNoDeoptimizationIndex;
- }
-
- void SetSpilledRegisters(LOperand** registers,
- LOperand** double_registers) {
- spilled_registers_ = registers;
- spilled_double_registers_ = double_registers;
- }
-
- void PrintTo(StringStream* stream);
-
- private:
- Handle<JSFunction> closure_;
- int arguments_stack_height_;
- int deoptimization_index_;
- int translation_index_;
- int ast_id_;
- int parameter_count_;
- ZoneList<LOperand*> values_;
- ZoneList<Representation> representations_;
-
- // Allocation index indexed arrays of spill slot operands for registers
- // that are also in spill slots at an OSR entry. NULL for environments
- // that do not correspond to an OSR entry.
- LOperand** spilled_registers_;
- LOperand** spilled_double_registers_;
-
- LEnvironment* outer_;
-
- friend class LCodegen;
-};
-
-
-// Iterates over the non-null, non-constant operands in an environment.
-class ShallowIterator BASE_EMBEDDED {
- public:
- explicit ShallowIterator(LEnvironment* env)
- : env_(env),
- limit_(env != NULL ? env->values()->length() : 0),
- current_(0) {
- current_ = AdvanceToNext(0);
- }
-
- inline bool HasNext() {
- return env_ != NULL && current_ < limit_;
- }
-
- inline LOperand* Next() {
- ASSERT(HasNext());
- return env_->values()->at(current_);
- }
-
- inline void Advance() {
- current_ = AdvanceToNext(current_ + 1);
- }
-
- inline LEnvironment* env() { return env_; }
-
- private:
- inline bool ShouldSkip(LOperand* op) {
- return op == NULL || op->IsConstantOperand() || op->IsArgument();
- }
-
- inline int AdvanceToNext(int start) {
- while (start < limit_ && ShouldSkip(env_->values()->at(start))) {
- start++;
- }
- return start;
- }
-
- LEnvironment* env_;
- int limit_;
- int current_;
-};
-
-
-// Iterator for non-null, non-constant operands incl. outer environments.
-class DeepIterator BASE_EMBEDDED {
- public:
- explicit DeepIterator(LEnvironment* env)
- : current_iterator_(env) { }
-
- inline bool HasNext() {
- if (current_iterator_.HasNext()) return true;
- if (current_iterator_.env() == NULL) return false;
- AdvanceToOuter();
- return current_iterator_.HasNext();
- }
-
- inline LOperand* Next() {
- ASSERT(current_iterator_.HasNext());
- return current_iterator_.Next();
- }
-
- inline void Advance() {
- if (current_iterator_.HasNext()) {
- current_iterator_.Advance();
- } else {
- AdvanceToOuter();
- }
- }
-
- private:
- inline void AdvanceToOuter() {
- current_iterator_ = ShallowIterator(current_iterator_.env()->outer());
- }
-
- ShallowIterator current_iterator_;
-};
-
-} } // namespace v8::internal
-
-#endif // V8_LITHIUM_H_
diff --git a/src/3rdparty/v8/src/liveedit-debugger.js b/src/3rdparty/v8/src/liveedit-debugger.js
deleted file mode 100644
index e05c53c..0000000
--- a/src/3rdparty/v8/src/liveedit-debugger.js
+++ /dev/null
@@ -1,1082 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// LiveEdit feature implementation. The script should be executed after
-// debug-debugger.js.
-
-// A LiveEdit namespace. It contains functions that modifies JavaScript code
-// according to changes of script source (if possible).
-//
-// When new script source is put in, the difference is calculated textually,
-// in form of list of delete/add/change chunks. The functions that include
-// change chunk(s) get recompiled, or their enclosing functions are
-// recompiled instead.
-// If the function may not be recompiled (e.g. it was completely erased in new
-// version of the script) it remains unchanged, but the code that could
-// create a new instance of this function goes away. An old version of script
-// is created to back up this obsolete function.
-// All unchanged functions have their positions updated accordingly.
-//
-// LiveEdit namespace is declared inside a single function constructor.
-Debug.LiveEdit = new function() {
-
- // Forward declaration for minifier.
- var FunctionStatus;
-
- // Applies the change to the script.
- // The change is in form of list of chunks encoded in a single array as
- // a series of triplets (pos1_start, pos1_end, pos2_end)
- function ApplyPatchMultiChunk(script, diff_array, new_source, preview_only,
- change_log) {
-
- var old_source = script.source;
-
- // Gather compile information about old version of script.
- var old_compile_info = GatherCompileInfo(old_source, script);
-
- // Build tree structures for old and new versions of the script.
- var root_old_node = BuildCodeInfoTree(old_compile_info);
-
- var pos_translator = new PosTranslator(diff_array);
-
- // Analyze changes.
- MarkChangedFunctions(root_old_node, pos_translator.GetChunks());
-
- // Find all SharedFunctionInfo's that were compiled from this script.
- FindLiveSharedInfos(root_old_node, script);
-
- // Gather compile information about new version of script.
- var new_compile_info;
- try {
- new_compile_info = GatherCompileInfo(new_source, script);
- } catch (e) {
- throw new Failure("Failed to compile new version of script: " + e);
- }
- var root_new_node = BuildCodeInfoTree(new_compile_info);
-
- // Link recompiled script data with other data.
- FindCorrespondingFunctions(root_old_node, root_new_node);
-
- // Prepare to-do lists.
- var replace_code_list = new Array();
- var link_to_old_script_list = new Array();
- var link_to_original_script_list = new Array();
- var update_positions_list = new Array();
-
- function HarvestTodo(old_node) {
- function CollectDamaged(node) {
- link_to_old_script_list.push(node);
- for (var i = 0; i < node.children.length; i++) {
- CollectDamaged(node.children[i]);
- }
- }
-
- // Recursively collects all newly compiled functions that are going into
- // business and should have link to the actual script updated.
- function CollectNew(node_list) {
- for (var i = 0; i < node_list.length; i++) {
- link_to_original_script_list.push(node_list[i]);
- CollectNew(node_list[i].children);
- }
- }
-
- if (old_node.status == FunctionStatus.DAMAGED) {
- CollectDamaged(old_node);
- return;
- }
- if (old_node.status == FunctionStatus.UNCHANGED) {
- update_positions_list.push(old_node);
- } else if (old_node.status == FunctionStatus.SOURCE_CHANGED) {
- update_positions_list.push(old_node);
- } else if (old_node.status == FunctionStatus.CHANGED) {
- replace_code_list.push(old_node);
- CollectNew(old_node.unmatched_new_nodes);
- }
- for (var i = 0; i < old_node.children.length; i++) {
- HarvestTodo(old_node.children[i]);
- }
- }
-
- var preview_description = {
- change_tree: DescribeChangeTree(root_old_node),
- textual_diff: {
- old_len: old_source.length,
- new_len: new_source.length,
- chunks: diff_array
- },
- updated: false
- };
-
- if (preview_only) {
- return preview_description;
- }
-
- HarvestTodo(root_old_node);
-
- // Collect shared infos for functions whose code need to be patched.
- var replaced_function_infos = new Array();
- for (var i = 0; i < replace_code_list.length; i++) {
- var live_shared_function_infos =
- replace_code_list[i].live_shared_function_infos;
-
- if (live_shared_function_infos) {
- for (var j = 0; j < live_shared_function_infos.length; j++) {
- replaced_function_infos.push(live_shared_function_infos[j]);
- }
- }
- }
-
- // We haven't changed anything before this line yet.
- // Committing all changes.
-
- // Check that function being patched is not currently on stack or drop them.
- var dropped_functions_number =
- CheckStackActivations(replaced_function_infos, change_log);
-
- preview_description.stack_modified = dropped_functions_number != 0;
-
- // Start with breakpoints. Convert their line/column positions and
- // temporary remove.
- var break_points_restorer = TemporaryRemoveBreakPoints(script, change_log);
-
- var old_script;
-
- // Create an old script only if there are function that should be linked
- // to old version.
- if (link_to_old_script_list.length == 0) {
- %LiveEditReplaceScript(script, new_source, null);
- old_script = void 0;
- } else {
- var old_script_name = CreateNameForOldScript(script);
-
- // Update the script text and create a new script representing an old
- // version of the script.
- old_script = %LiveEditReplaceScript(script, new_source,
- old_script_name);
-
- var link_to_old_script_report = new Array();
- change_log.push( { linked_to_old_script: link_to_old_script_report } );
-
- // We need to link to old script all former nested functions.
- for (var i = 0; i < link_to_old_script_list.length; i++) {
- LinkToOldScript(link_to_old_script_list[i], old_script,
- link_to_old_script_report);
- }
-
- preview_description.created_script_name = old_script_name;
- }
-
- // Link to an actual script all the functions that we are going to use.
- for (var i = 0; i < link_to_original_script_list.length; i++) {
- %LiveEditFunctionSetScript(
- link_to_original_script_list[i].info.shared_function_info, script);
- }
-
- for (var i = 0; i < replace_code_list.length; i++) {
- PatchFunctionCode(replace_code_list[i], change_log);
- }
-
- var position_patch_report = new Array();
- change_log.push( {position_patched: position_patch_report} );
-
- for (var i = 0; i < update_positions_list.length; i++) {
- // TODO(LiveEdit): take into account wether it's source_changed or
- // unchanged and whether positions changed at all.
- PatchPositions(update_positions_list[i], diff_array,
- position_patch_report);
-
- if (update_positions_list[i].live_shared_function_infos) {
- update_positions_list[i].live_shared_function_infos.
- forEach(function (info) {
- %LiveEditFunctionSourceUpdated(info.raw_array);
- });
- }
- }
-
- break_points_restorer(pos_translator, old_script);
-
- preview_description.updated = true;
- return preview_description;
- }
- // Function is public.
- this.ApplyPatchMultiChunk = ApplyPatchMultiChunk;
-
-
- // Fully compiles source string as a script. Returns Array of
- // FunctionCompileInfo -- a descriptions of all functions of the script.
- // Elements of array are ordered by start positions of functions (from top
- // to bottom) in the source. Fields outer_index and next_sibling_index help
- // to navigate the nesting structure of functions.
- //
- // All functions get compiled linked to script provided as parameter script.
- // TODO(LiveEdit): consider not using actual scripts as script, because
- // we have to manually erase all links right after compile.
- function GatherCompileInfo(source, script) {
- // Get function info, elements are partially sorted (it is a tree of
- // nested functions serialized as parent followed by serialized children.
- var raw_compile_info = %LiveEditGatherCompileInfo(script, source);
-
- // Sort function infos by start position field.
- var compile_info = new Array();
- var old_index_map = new Array();
- for (var i = 0; i < raw_compile_info.length; i++) {
- var info = new FunctionCompileInfo(raw_compile_info[i]);
- // Remove all links to the actual script. Breakpoints system and
- // LiveEdit itself believe that any function in heap that points to a
- // particular script is a regular function.
- // For some functions we will restore this link later.
- %LiveEditFunctionSetScript(info.shared_function_info, void 0);
- compile_info.push(info);
- old_index_map.push(i);
- }
-
- for (var i = 0; i < compile_info.length; i++) {
- var k = i;
- for (var j = i + 1; j < compile_info.length; j++) {
- if (compile_info[k].start_position > compile_info[j].start_position) {
- k = j;
- }
- }
- if (k != i) {
- var temp_info = compile_info[k];
- var temp_index = old_index_map[k];
- compile_info[k] = compile_info[i];
- old_index_map[k] = old_index_map[i];
- compile_info[i] = temp_info;
- old_index_map[i] = temp_index;
- }
- }
-
- // After sorting update outer_inder field using old_index_map. Also
- // set next_sibling_index field.
- var current_index = 0;
-
- // The recursive function, that goes over all children of a particular
- // node (i.e. function info).
- function ResetIndexes(new_parent_index, old_parent_index) {
- var previous_sibling = -1;
- while (current_index < compile_info.length &&
- compile_info[current_index].outer_index == old_parent_index) {
- var saved_index = current_index;
- compile_info[saved_index].outer_index = new_parent_index;
- if (previous_sibling != -1) {
- compile_info[previous_sibling].next_sibling_index = saved_index;
- }
- previous_sibling = saved_index;
- current_index++;
- ResetIndexes(saved_index, old_index_map[saved_index]);
- }
- if (previous_sibling != -1) {
- compile_info[previous_sibling].next_sibling_index = -1;
- }
- }
-
- ResetIndexes(-1, -1);
- Assert(current_index == compile_info.length);
-
- return compile_info;
- }
-
-
- // Replaces function's Code.
- function PatchFunctionCode(old_node, change_log) {
- var new_info = old_node.corresponding_node.info;
- if (old_node.live_shared_function_infos) {
- old_node.live_shared_function_infos.forEach(function (old_info) {
- %LiveEditReplaceFunctionCode(new_info.raw_array,
- old_info.raw_array);
-
- // The function got a new code. However, this new code brings all new
- // instances of SharedFunctionInfo for nested functions. However,
- // we want the original instances to be used wherever possible.
- // (This is because old instances and new instances will be both
- // linked to a script and breakpoints subsystem does not really
- // expects this; neither does LiveEdit subsystem on next call).
- for (var i = 0; i < old_node.children.length; i++) {
- if (old_node.children[i].corresponding_node) {
- var corresponding_child_info =
- old_node.children[i].corresponding_node.info.
- shared_function_info;
-
- if (old_node.children[i].live_shared_function_infos) {
- old_node.children[i].live_shared_function_infos.
- forEach(function (old_child_info) {
- %LiveEditReplaceRefToNestedFunction(old_info.info,
- corresponding_child_info,
- old_child_info.info);
- });
- }
- }
- }
- });
-
- change_log.push( {function_patched: new_info.function_name} );
- } else {
- change_log.push( {function_patched: new_info.function_name,
- function_info_not_found: true} );
- }
- }
-
-
- // Makes a function associated with another instance of a script (the
- // one representing its old version). This way the function still
- // may access its own text.
- function LinkToOldScript(old_info_node, old_script, report_array) {
- if (old_info_node.live_shared_function_infos) {
- old_info_node.live_shared_function_infos.
- forEach(function (info) {
- %LiveEditFunctionSetScript(info.info, old_script);
- });
-
- report_array.push( { name: old_info_node.info.function_name } );
- } else {
- report_array.push(
- { name: old_info_node.info.function_name, not_found: true } );
- }
- }
-
-
- // Returns function that restores breakpoints.
- function TemporaryRemoveBreakPoints(original_script, change_log) {
- var script_break_points = GetScriptBreakPoints(original_script);
-
- var break_points_update_report = [];
- change_log.push( { break_points_update: break_points_update_report } );
-
- var break_point_old_positions = [];
- for (var i = 0; i < script_break_points.length; i++) {
- var break_point = script_break_points[i];
-
- break_point.clear();
-
- // TODO(LiveEdit): be careful with resource offset here.
- var break_point_position = Debug.findScriptSourcePosition(original_script,
- break_point.line(), break_point.column());
-
- var old_position_description = {
- position: break_point_position,
- line: break_point.line(),
- column: break_point.column()
- }
- break_point_old_positions.push(old_position_description);
- }
-
-
- // Restores breakpoints and creates their copies in the "old" copy of
- // the script.
- return function (pos_translator, old_script_copy_opt) {
- // Update breakpoints (change positions and restore them in old version
- // of script.
- for (var i = 0; i < script_break_points.length; i++) {
- var break_point = script_break_points[i];
- if (old_script_copy_opt) {
- var clone = break_point.cloneForOtherScript(old_script_copy_opt);
- clone.set(old_script_copy_opt);
-
- break_points_update_report.push( {
- type: "copied_to_old",
- id: break_point.number(),
- new_id: clone.number(),
- positions: break_point_old_positions[i]
- } );
- }
-
- var updated_position = pos_translator.Translate(
- break_point_old_positions[i].position,
- PosTranslator.ShiftWithTopInsideChunkHandler);
-
- var new_location =
- original_script.locationFromPosition(updated_position, false);
-
- break_point.update_positions(new_location.line, new_location.column);
-
- var new_position_description = {
- position: updated_position,
- line: new_location.line,
- column: new_location.column
- }
-
- break_point.set(original_script);
-
- break_points_update_report.push( { type: "position_changed",
- id: break_point.number(),
- old_positions: break_point_old_positions[i],
- new_positions: new_position_description
- } );
- }
- }
- }
-
-
- function Assert(condition, message) {
- if (!condition) {
- if (message) {
- throw "Assert " + message;
- } else {
- throw "Assert";
- }
- }
- }
-
- function DiffChunk(pos1, pos2, len1, len2) {
- this.pos1 = pos1;
- this.pos2 = pos2;
- this.len1 = len1;
- this.len2 = len2;
- }
-
- function PosTranslator(diff_array) {
- var chunks = new Array();
- var current_diff = 0;
- for (var i = 0; i < diff_array.length; i += 3) {
- var pos1_begin = diff_array[i];
- var pos2_begin = pos1_begin + current_diff;
- var pos1_end = diff_array[i + 1];
- var pos2_end = diff_array[i + 2];
- chunks.push(new DiffChunk(pos1_begin, pos2_begin, pos1_end - pos1_begin,
- pos2_end - pos2_begin));
- current_diff = pos2_end - pos1_end;
- }
- this.chunks = chunks;
- }
- PosTranslator.prototype.GetChunks = function() {
- return this.chunks;
- }
-
- PosTranslator.prototype.Translate = function(pos, inside_chunk_handler) {
- var array = this.chunks;
- if (array.length == 0 || pos < array[0].pos1) {
- return pos;
- }
- var chunk_index1 = 0;
- var chunk_index2 = array.length - 1;
-
- while (chunk_index1 < chunk_index2) {
- var middle_index = Math.floor((chunk_index1 + chunk_index2) / 2);
- if (pos < array[middle_index + 1].pos1) {
- chunk_index2 = middle_index;
- } else {
- chunk_index1 = middle_index + 1;
- }
- }
- var chunk = array[chunk_index1];
- if (pos >= chunk.pos1 + chunk.len1) {
- return pos + chunk.pos2 + chunk.len2 - chunk.pos1 - chunk.len1;
- }
-
- if (!inside_chunk_handler) {
- inside_chunk_handler = PosTranslator.DefaultInsideChunkHandler;
- }
- return inside_chunk_handler(pos, chunk);
- }
-
- PosTranslator.DefaultInsideChunkHandler = function(pos, diff_chunk) {
- Assert(false, "Cannot translate position in changed area");
- }
-
- PosTranslator.ShiftWithTopInsideChunkHandler =
- function(pos, diff_chunk) {
- // We carelessly do not check whether we stay inside the chunk after
- // translation.
- return pos - diff_chunk.pos1 + diff_chunk.pos2;
- }
-
- var FunctionStatus = {
- // No change to function or its inner functions; however its positions
- // in script may have been shifted.
- UNCHANGED: "unchanged",
- // The code of a function remains unchanged, but something happened inside
- // some inner functions.
- SOURCE_CHANGED: "source changed",
- // The code of a function is changed or some nested function cannot be
- // properly patched so this function must be recompiled.
- CHANGED: "changed",
- // Function is changed but cannot be patched.
- DAMAGED: "damaged"
- }
-
- function CodeInfoTreeNode(code_info, children, array_index) {
- this.info = code_info;
- this.children = children;
- // an index in array of compile_info
- this.array_index = array_index;
- this.parent = void 0;
-
- this.status = FunctionStatus.UNCHANGED;
- // Status explanation is used for debugging purposes and will be shown
- // in user UI if some explanations are needed.
- this.status_explanation = void 0;
- this.new_start_pos = void 0;
- this.new_end_pos = void 0;
- this.corresponding_node = void 0;
- this.unmatched_new_nodes = void 0;
-
- // 'Textual' correspondence/matching is weaker than 'pure'
- // correspondence/matching. We need 'textual' level for visual presentation
- // in UI, we use 'pure' level for actual code manipulation.
- // Sometimes only function body is changed (functions in old and new script
- // textually correspond), but we cannot patch the code, so we see them
- // as an old function deleted and new function created.
- this.textual_corresponding_node = void 0;
- this.textually_unmatched_new_nodes = void 0;
-
- this.live_shared_function_infos = void 0;
- }
-
- // From array of function infos that is implicitly a tree creates
- // an actual tree of functions in script.
- function BuildCodeInfoTree(code_info_array) {
- // Throughtout all function we iterate over input array.
- var index = 0;
-
- // Recursive function that builds a branch of tree.
- function BuildNode() {
- var my_index = index;
- index++;
- var child_array = new Array();
- while (index < code_info_array.length &&
- code_info_array[index].outer_index == my_index) {
- child_array.push(BuildNode());
- }
- var node = new CodeInfoTreeNode(code_info_array[my_index], child_array,
- my_index);
- for (var i = 0; i < child_array.length; i++) {
- child_array[i].parent = node;
- }
- return node;
- }
-
- var root = BuildNode();
- Assert(index == code_info_array.length);
- return root;
- }
-
- // Applies a list of the textual diff chunks onto the tree of functions.
- // Determines status of each function (from unchanged to damaged). However
- // children of unchanged functions are ignored.
- function MarkChangedFunctions(code_info_tree, chunks) {
-
- // A convenient interator over diff chunks that also translates
- // positions from old to new in a current non-changed part of script.
- var chunk_it = new function() {
- var chunk_index = 0;
- var pos_diff = 0;
- this.current = function() { return chunks[chunk_index]; }
- this.next = function() {
- var chunk = chunks[chunk_index];
- pos_diff = chunk.pos2 + chunk.len2 - (chunk.pos1 + chunk.len1);
- chunk_index++;
- }
- this.done = function() { return chunk_index >= chunks.length; }
- this.TranslatePos = function(pos) { return pos + pos_diff; }
- };
-
- // A recursive function that processes internals of a function and all its
- // inner functions. Iterator chunk_it initially points to a chunk that is
- // below function start.
- function ProcessInternals(info_node) {
- info_node.new_start_pos = chunk_it.TranslatePos(
- info_node.info.start_position);
- var child_index = 0;
- var code_changed = false;
- var source_changed = false;
- // Simultaneously iterates over child functions and over chunks.
- while (!chunk_it.done() &&
- chunk_it.current().pos1 < info_node.info.end_position) {
- if (child_index < info_node.children.length) {
- var child = info_node.children[child_index];
-
- if (child.info.end_position <= chunk_it.current().pos1) {
- ProcessUnchangedChild(child);
- child_index++;
- continue;
- } else if (child.info.start_position >=
- chunk_it.current().pos1 + chunk_it.current().len1) {
- code_changed = true;
- chunk_it.next();
- continue;
- } else if (child.info.start_position <= chunk_it.current().pos1 &&
- child.info.end_position >= chunk_it.current().pos1 +
- chunk_it.current().len1) {
- ProcessInternals(child);
- source_changed = source_changed ||
- ( child.status != FunctionStatus.UNCHANGED );
- code_changed = code_changed ||
- ( child.status == FunctionStatus.DAMAGED );
- child_index++;
- continue;
- } else {
- code_changed = true;
- child.status = FunctionStatus.DAMAGED;
- child.status_explanation =
- "Text diff overlaps with function boundary";
- child_index++;
- continue;
- }
- } else {
- if (chunk_it.current().pos1 + chunk_it.current().len1 <=
- info_node.info.end_position) {
- info_node.status = FunctionStatus.CHANGED;
- chunk_it.next();
- continue;
- } else {
- info_node.status = FunctionStatus.DAMAGED;
- info_node.status_explanation =
- "Text diff overlaps with function boundary";
- return;
- }
- }
- Assert("Unreachable", false);
- }
- while (child_index < info_node.children.length) {
- var child = info_node.children[child_index];
- ProcessUnchangedChild(child);
- child_index++;
- }
- if (code_changed) {
- info_node.status = FunctionStatus.CHANGED;
- } else if (source_changed) {
- info_node.status = FunctionStatus.SOURCE_CHANGED;
- }
- info_node.new_end_pos =
- chunk_it.TranslatePos(info_node.info.end_position);
- }
-
- function ProcessUnchangedChild(node) {
- node.new_start_pos = chunk_it.TranslatePos(node.info.start_position);
- node.new_end_pos = chunk_it.TranslatePos(node.info.end_position);
- }
-
- ProcessInternals(code_info_tree);
- }
-
- // For ecah old function (if it is not damaged) tries to find a corresponding
- // function in new script. Typically it should succeed (non-damaged functions
- // by definition may only have changes inside their bodies). However there are
- // reasons for corresponence not to be found; function with unmodified text
- // in new script may become enclosed into other function; the innocent change
- // inside function body may in fact be something like "} function B() {" that
- // splits a function into 2 functions.
- function FindCorrespondingFunctions(old_code_tree, new_code_tree) {
-
- // A recursive function that tries to find a correspondence for all
- // child functions and for their inner functions.
- function ProcessChildren(old_node, new_node) {
- var old_children = old_node.children;
- var new_children = new_node.children;
-
- var unmatched_new_nodes_list = [];
- var textually_unmatched_new_nodes_list = [];
-
- var old_index = 0;
- var new_index = 0;
- while (old_index < old_children.length) {
- if (old_children[old_index].status == FunctionStatus.DAMAGED) {
- old_index++;
- } else if (new_index < new_children.length) {
- if (new_children[new_index].info.start_position <
- old_children[old_index].new_start_pos) {
- unmatched_new_nodes_list.push(new_children[new_index]);
- textually_unmatched_new_nodes_list.push(new_children[new_index]);
- new_index++;
- } else if (new_children[new_index].info.start_position ==
- old_children[old_index].new_start_pos) {
- if (new_children[new_index].info.end_position ==
- old_children[old_index].new_end_pos) {
- old_children[old_index].corresponding_node =
- new_children[new_index];
- old_children[old_index].textual_corresponding_node =
- new_children[new_index];
- if (old_children[old_index].status != FunctionStatus.UNCHANGED) {
- ProcessChildren(old_children[old_index],
- new_children[new_index]);
- if (old_children[old_index].status == FunctionStatus.DAMAGED) {
- unmatched_new_nodes_list.push(
- old_children[old_index].corresponding_node);
- old_children[old_index].corresponding_node = void 0;
- old_node.status = FunctionStatus.CHANGED;
- }
- }
- } else {
- old_children[old_index].status = FunctionStatus.DAMAGED;
- old_children[old_index].status_explanation =
- "No corresponding function in new script found";
- old_node.status = FunctionStatus.CHANGED;
- unmatched_new_nodes_list.push(new_children[new_index]);
- textually_unmatched_new_nodes_list.push(new_children[new_index]);
- }
- new_index++;
- old_index++;
- } else {
- old_children[old_index].status = FunctionStatus.DAMAGED;
- old_children[old_index].status_explanation =
- "No corresponding function in new script found";
- old_node.status = FunctionStatus.CHANGED;
- old_index++;
- }
- } else {
- old_children[old_index].status = FunctionStatus.DAMAGED;
- old_children[old_index].status_explanation =
- "No corresponding function in new script found";
- old_node.status = FunctionStatus.CHANGED;
- old_index++;
- }
- }
-
- while (new_index < new_children.length) {
- unmatched_new_nodes_list.push(new_children[new_index]);
- textually_unmatched_new_nodes_list.push(new_children[new_index]);
- new_index++;
- }
-
- if (old_node.status == FunctionStatus.CHANGED) {
- var why_wrong_expectations =
- WhyFunctionExpectationsDiffer(old_node.info, new_node.info);
- if (why_wrong_expectations) {
- old_node.status = FunctionStatus.DAMAGED;
- old_node.status_explanation = why_wrong_expectations;
- }
- }
- old_node.unmatched_new_nodes = unmatched_new_nodes_list;
- old_node.textually_unmatched_new_nodes =
- textually_unmatched_new_nodes_list;
- }
-
- ProcessChildren(old_code_tree, new_code_tree);
-
- old_code_tree.corresponding_node = new_code_tree;
- old_code_tree.textual_corresponding_node = new_code_tree;
-
- Assert(old_code_tree.status != FunctionStatus.DAMAGED,
- "Script became damaged");
- }
-
- function FindLiveSharedInfos(old_code_tree, script) {
- var shared_raw_list = %LiveEditFindSharedFunctionInfosForScript(script);
-
- var shared_infos = new Array();
-
- for (var i = 0; i < shared_raw_list.length; i++) {
- shared_infos.push(new SharedInfoWrapper(shared_raw_list[i]));
- }
-
- // Finds all SharedFunctionInfos that corresponds to compile info
- // in old version of the script.
- function FindFunctionInfos(compile_info) {
- var wrappers = [];
-
- for (var i = 0; i < shared_infos.length; i++) {
- var wrapper = shared_infos[i];
- if (wrapper.start_position == compile_info.start_position &&
- wrapper.end_position == compile_info.end_position) {
- wrappers.push(wrapper);
- }
- }
-
- if (wrappers.length > 0) {
- return wrappers;
- }
- }
-
- function TraverseTree(node) {
- node.live_shared_function_infos = FindFunctionInfos(node.info);
-
- for (var i = 0; i < node.children.length; i++) {
- TraverseTree(node.children[i]);
- }
- }
-
- TraverseTree(old_code_tree);
- }
-
-
- // An object describing function compilation details. Its index fields
- // apply to indexes inside array that stores these objects.
- function FunctionCompileInfo(raw_array) {
- this.function_name = raw_array[0];
- this.start_position = raw_array[1];
- this.end_position = raw_array[2];
- this.param_num = raw_array[3];
- this.code = raw_array[4];
- this.code_scope_info = raw_array[5];
- this.scope_info = raw_array[6];
- this.outer_index = raw_array[7];
- this.shared_function_info = raw_array[8];
- this.next_sibling_index = null;
- this.raw_array = raw_array;
- }
-
- function SharedInfoWrapper(raw_array) {
- this.function_name = raw_array[0];
- this.start_position = raw_array[1];
- this.end_position = raw_array[2];
- this.info = raw_array[3];
- this.raw_array = raw_array;
- }
-
- // Changes positions (including all statments) in function.
- function PatchPositions(old_info_node, diff_array, report_array) {
- if (old_info_node.live_shared_function_infos) {
- old_info_node.live_shared_function_infos.forEach(function (info) {
- %LiveEditPatchFunctionPositions(info.raw_array,
- diff_array);
- });
-
- report_array.push( { name: old_info_node.info.function_name } );
- } else {
- // TODO(LiveEdit): function is not compiled yet or is already collected.
- report_array.push(
- { name: old_info_node.info.function_name, info_not_found: true } );
- }
- }
-
- // Adds a suffix to script name to mark that it is old version.
- function CreateNameForOldScript(script) {
- // TODO(635): try better than this; support several changes.
- return script.name + " (old)";
- }
-
- // Compares a function interface old and new version, whether it
- // changed or not. Returns explanation if they differ.
- function WhyFunctionExpectationsDiffer(function_info1, function_info2) {
- // Check that function has the same number of parameters (there may exist
- // an adapter, that won't survive function parameter number change).
- if (function_info1.param_num != function_info2.param_num) {
- return "Changed parameter number: " + function_info1.param_num +
- " and " + function_info2.param_num;
- }
- var scope_info1 = function_info1.scope_info;
- var scope_info2 = function_info2.scope_info;
-
- var scope_info1_text;
- var scope_info2_text;
-
- if (scope_info1) {
- scope_info1_text = scope_info1.toString();
- } else {
- scope_info1_text = "";
- }
- if (scope_info2) {
- scope_info2_text = scope_info2.toString();
- } else {
- scope_info2_text = "";
- }
-
- if (scope_info1_text != scope_info2_text) {
- return "Incompatible variable maps: [" + scope_info1_text +
- "] and [" + scope_info2_text + "]";
- }
- // No differences. Return undefined.
- return;
- }
-
- // Minifier forward declaration.
- var FunctionPatchabilityStatus;
-
- // For array of wrapped shared function infos checks that none of them
- // have activations on stack (of any thread). Throws a Failure exception
- // if this proves to be false.
- function CheckStackActivations(shared_wrapper_list, change_log) {
- var shared_list = new Array();
- for (var i = 0; i < shared_wrapper_list.length; i++) {
- shared_list[i] = shared_wrapper_list[i].info;
- }
- var result = %LiveEditCheckAndDropActivations(shared_list, true);
- if (result[shared_list.length]) {
- // Extra array element may contain error message.
- throw new Failure(result[shared_list.length]);
- }
-
- var problems = new Array();
- var dropped = new Array();
- for (var i = 0; i < shared_list.length; i++) {
- var shared = shared_wrapper_list[i];
- if (result[i] == FunctionPatchabilityStatus.REPLACED_ON_ACTIVE_STACK) {
- dropped.push({ name: shared.function_name } );
- } else if (result[i] != FunctionPatchabilityStatus.AVAILABLE_FOR_PATCH) {
- var description = {
- name: shared.function_name,
- start_pos: shared.start_position,
- end_pos: shared.end_position,
- replace_problem:
- FunctionPatchabilityStatus.SymbolName(result[i])
- };
- problems.push(description);
- }
- }
- if (dropped.length > 0) {
- change_log.push({ dropped_from_stack: dropped });
- }
- if (problems.length > 0) {
- change_log.push( { functions_on_stack: problems } );
- throw new Failure("Blocked by functions on stack");
- }
-
- return dropped.length;
- }
-
- // A copy of the FunctionPatchabilityStatus enum from liveedit.h
- var FunctionPatchabilityStatus = {
- AVAILABLE_FOR_PATCH: 1,
- BLOCKED_ON_ACTIVE_STACK: 2,
- BLOCKED_ON_OTHER_STACK: 3,
- BLOCKED_UNDER_NATIVE_CODE: 4,
- REPLACED_ON_ACTIVE_STACK: 5
- }
-
- FunctionPatchabilityStatus.SymbolName = function(code) {
- var enum = FunctionPatchabilityStatus;
- for (name in enum) {
- if (enum[name] == code) {
- return name;
- }
- }
- }
-
-
- // A logical failure in liveedit process. This means that change_log
- // is valid and consistent description of what happened.
- function Failure(message) {
- this.message = message;
- }
- // Function (constructor) is public.
- this.Failure = Failure;
-
- Failure.prototype.toString = function() {
- return "LiveEdit Failure: " + this.message;
- }
-
- // A testing entry.
- function GetPcFromSourcePos(func, source_pos) {
- return %GetFunctionCodePositionFromSource(func, source_pos);
- }
- // Function is public.
- this.GetPcFromSourcePos = GetPcFromSourcePos;
-
- // LiveEdit main entry point: changes a script text to a new string.
- function SetScriptSource(script, new_source, preview_only, change_log) {
- var old_source = script.source;
- var diff = CompareStrings(old_source, new_source);
- return ApplyPatchMultiChunk(script, diff, new_source, preview_only,
- change_log);
- }
- // Function is public.
- this.SetScriptSource = SetScriptSource;
-
- function CompareStrings(s1, s2) {
- return %LiveEditCompareStrings(s1, s2);
- }
-
- // Applies the change to the script.
- // The change is always a substring (change_pos, change_pos + change_len)
- // being replaced with a completely different string new_str.
- // This API is a legacy and is obsolete.
- //
- // @param {Script} script that is being changed
- // @param {Array} change_log a list that collects engineer-readable
- // description of what happened.
- function ApplySingleChunkPatch(script, change_pos, change_len, new_str,
- change_log) {
- var old_source = script.source;
-
- // Prepare new source string.
- var new_source = old_source.substring(0, change_pos) +
- new_str + old_source.substring(change_pos + change_len);
-
- return ApplyPatchMultiChunk(script,
- [ change_pos, change_pos + change_len, change_pos + new_str.length],
- new_source, false, change_log);
- }
-
- // Creates JSON description for a change tree.
- function DescribeChangeTree(old_code_tree) {
-
- function ProcessOldNode(node) {
- var child_infos = [];
- for (var i = 0; i < node.children.length; i++) {
- var child = node.children[i];
- if (child.status != FunctionStatus.UNCHANGED) {
- child_infos.push(ProcessOldNode(child));
- }
- }
- var new_child_infos = [];
- if (node.textually_unmatched_new_nodes) {
- for (var i = 0; i < node.textually_unmatched_new_nodes.length; i++) {
- var child = node.textually_unmatched_new_nodes[i];
- new_child_infos.push(ProcessNewNode(child));
- }
- }
- var res = {
- name: node.info.function_name,
- positions: DescribePositions(node),
- status: node.status,
- children: child_infos,
- new_children: new_child_infos
- };
- if (node.status_explanation) {
- res.status_explanation = node.status_explanation;
- }
- if (node.textual_corresponding_node) {
- res.new_positions = DescribePositions(node.textual_corresponding_node);
- }
- return res;
- }
-
- function ProcessNewNode(node) {
- var child_infos = [];
- // Do not list ancestors.
- if (false) {
- for (var i = 0; i < node.children.length; i++) {
- child_infos.push(ProcessNewNode(node.children[i]));
- }
- }
- var res = {
- name: node.info.function_name,
- positions: DescribePositions(node),
- children: child_infos,
- };
- return res;
- }
-
- function DescribePositions(node) {
- return {
- start_position: node.info.start_position,
- end_position: node.info.end_position
- };
- }
-
- return ProcessOldNode(old_code_tree);
- }
-
-
- // Functions are public for tests.
- this.TestApi = {
- PosTranslator: PosTranslator,
- CompareStrings: CompareStrings,
- ApplySingleChunkPatch: ApplySingleChunkPatch
- }
-}
diff --git a/src/3rdparty/v8/src/liveedit.cc b/src/3rdparty/v8/src/liveedit.cc
deleted file mode 100644
index 1466766..0000000
--- a/src/3rdparty/v8/src/liveedit.cc
+++ /dev/null
@@ -1,1693 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#include "v8.h"
-
-#include "liveedit.h"
-
-#include "compiler.h"
-#include "compilation-cache.h"
-#include "debug.h"
-#include "deoptimizer.h"
-#include "global-handles.h"
-#include "parser.h"
-#include "scopeinfo.h"
-#include "scopes.h"
-#include "v8memory.h"
-
-namespace v8 {
-namespace internal {
-
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-
-
-void SetElementNonStrict(Handle<JSObject> object,
- uint32_t index,
- Handle<Object> value) {
- // Ignore return value from SetElement. It can only be a failure if there
- // are element setters causing exceptions and the debugger context has none
- // of these.
- Handle<Object> no_failure;
- no_failure = SetElement(object, index, value, kNonStrictMode);
- ASSERT(!no_failure.is_null());
- USE(no_failure);
-}
-
-// A simple implementation of dynamic programming algorithm. It solves
-// the problem of finding the difference of 2 arrays. It uses a table of results
-// of subproblems. Each cell contains a number together with 2-bit flag
-// that helps building the chunk list.
-class Differencer {
- public:
- explicit Differencer(Comparator::Input* input)
- : input_(input), len1_(input->getLength1()), len2_(input->getLength2()) {
- buffer_ = NewArray<int>(len1_ * len2_);
- }
- ~Differencer() {
- DeleteArray(buffer_);
- }
-
- void Initialize() {
- int array_size = len1_ * len2_;
- for (int i = 0; i < array_size; i++) {
- buffer_[i] = kEmptyCellValue;
- }
- }
-
- // Makes sure that result for the full problem is calculated and stored
- // in the table together with flags showing a path through subproblems.
- void FillTable() {
- CompareUpToTail(0, 0);
- }
-
- void SaveResult(Comparator::Output* chunk_writer) {
- ResultWriter writer(chunk_writer);
-
- int pos1 = 0;
- int pos2 = 0;
- while (true) {
- if (pos1 < len1_) {
- if (pos2 < len2_) {
- Direction dir = get_direction(pos1, pos2);
- switch (dir) {
- case EQ:
- writer.eq();
- pos1++;
- pos2++;
- break;
- case SKIP1:
- writer.skip1(1);
- pos1++;
- break;
- case SKIP2:
- case SKIP_ANY:
- writer.skip2(1);
- pos2++;
- break;
- default:
- UNREACHABLE();
- }
- } else {
- writer.skip1(len1_ - pos1);
- break;
- }
- } else {
- if (len2_ != pos2) {
- writer.skip2(len2_ - pos2);
- }
- break;
- }
- }
- writer.close();
- }
-
- private:
- Comparator::Input* input_;
- int* buffer_;
- int len1_;
- int len2_;
-
- enum Direction {
- EQ = 0,
- SKIP1,
- SKIP2,
- SKIP_ANY,
-
- MAX_DIRECTION_FLAG_VALUE = SKIP_ANY
- };
-
- // Computes result for a subtask and optionally caches it in the buffer table.
- // All results values are shifted to make space for flags in the lower bits.
- int CompareUpToTail(int pos1, int pos2) {
- if (pos1 < len1_) {
- if (pos2 < len2_) {
- int cached_res = get_value4(pos1, pos2);
- if (cached_res == kEmptyCellValue) {
- Direction dir;
- int res;
- if (input_->equals(pos1, pos2)) {
- res = CompareUpToTail(pos1 + 1, pos2 + 1);
- dir = EQ;
- } else {
- int res1 = CompareUpToTail(pos1 + 1, pos2) +
- (1 << kDirectionSizeBits);
- int res2 = CompareUpToTail(pos1, pos2 + 1) +
- (1 << kDirectionSizeBits);
- if (res1 == res2) {
- res = res1;
- dir = SKIP_ANY;
- } else if (res1 < res2) {
- res = res1;
- dir = SKIP1;
- } else {
- res = res2;
- dir = SKIP2;
- }
- }
- set_value4_and_dir(pos1, pos2, res, dir);
- cached_res = res;
- }
- return cached_res;
- } else {
- return (len1_ - pos1) << kDirectionSizeBits;
- }
- } else {
- return (len2_ - pos2) << kDirectionSizeBits;
- }
- }
-
- inline int& get_cell(int i1, int i2) {
- return buffer_[i1 + i2 * len1_];
- }
-
- // Each cell keeps a value plus direction. Value is multiplied by 4.
- void set_value4_and_dir(int i1, int i2, int value4, Direction dir) {
- ASSERT((value4 & kDirectionMask) == 0);
- get_cell(i1, i2) = value4 | dir;
- }
-
- int get_value4(int i1, int i2) {
- return get_cell(i1, i2) & (kMaxUInt32 ^ kDirectionMask);
- }
- Direction get_direction(int i1, int i2) {
- return static_cast<Direction>(get_cell(i1, i2) & kDirectionMask);
- }
-
- static const int kDirectionSizeBits = 2;
- static const int kDirectionMask = (1 << kDirectionSizeBits) - 1;
- static const int kEmptyCellValue = -1 << kDirectionSizeBits;
-
- // This method only holds static assert statement (unfortunately you cannot
- // place one in class scope).
- void StaticAssertHolder() {
- STATIC_ASSERT(MAX_DIRECTION_FLAG_VALUE < (1 << kDirectionSizeBits));
- }
-
- class ResultWriter {
- public:
- explicit ResultWriter(Comparator::Output* chunk_writer)
- : chunk_writer_(chunk_writer), pos1_(0), pos2_(0),
- pos1_begin_(-1), pos2_begin_(-1), has_open_chunk_(false) {
- }
- void eq() {
- FlushChunk();
- pos1_++;
- pos2_++;
- }
- void skip1(int len1) {
- StartChunk();
- pos1_ += len1;
- }
- void skip2(int len2) {
- StartChunk();
- pos2_ += len2;
- }
- void close() {
- FlushChunk();
- }
-
- private:
- Comparator::Output* chunk_writer_;
- int pos1_;
- int pos2_;
- int pos1_begin_;
- int pos2_begin_;
- bool has_open_chunk_;
-
- void StartChunk() {
- if (!has_open_chunk_) {
- pos1_begin_ = pos1_;
- pos2_begin_ = pos2_;
- has_open_chunk_ = true;
- }
- }
-
- void FlushChunk() {
- if (has_open_chunk_) {
- chunk_writer_->AddChunk(pos1_begin_, pos2_begin_,
- pos1_ - pos1_begin_, pos2_ - pos2_begin_);
- has_open_chunk_ = false;
- }
- }
- };
-};
-
-
-void Comparator::CalculateDifference(Comparator::Input* input,
- Comparator::Output* result_writer) {
- Differencer differencer(input);
- differencer.Initialize();
- differencer.FillTable();
- differencer.SaveResult(result_writer);
-}
-
-
-static bool CompareSubstrings(Isolate* isolate, Handle<String> s1, int pos1,
- Handle<String> s2, int pos2, int len) {
- StringInputBuffer& buf1 = *isolate->liveedit_compare_substrings_buf1();
- StringInputBuffer& buf2 = *isolate->liveedit_compare_substrings_buf2();
- buf1.Reset(*s1);
- buf1.Seek(pos1);
- buf2.Reset(*s2);
- buf2.Seek(pos2);
- for (int i = 0; i < len; i++) {
- ASSERT(buf1.has_more() && buf2.has_more());
- if (buf1.GetNext() != buf2.GetNext()) {
- return false;
- }
- }
- return true;
-}
-
-
-// A helper class that writes chunk numbers into JSArray.
-// Each chunk is stored as 3 array elements: (pos1_begin, pos1_end, pos2_end).
-class CompareOutputArrayWriter {
- public:
- CompareOutputArrayWriter()
- : array_(FACTORY->NewJSArray(10)), current_size_(0) {}
-
- Handle<JSArray> GetResult() {
- return array_;
- }
-
- void WriteChunk(int char_pos1, int char_pos2, int char_len1, int char_len2) {
- SetElementNonStrict(array_,
- current_size_,
- Handle<Object>(Smi::FromInt(char_pos1)));
- SetElementNonStrict(array_,
- current_size_ + 1,
- Handle<Object>(Smi::FromInt(char_pos1 + char_len1)));
- SetElementNonStrict(array_,
- current_size_ + 2,
- Handle<Object>(Smi::FromInt(char_pos2 + char_len2)));
- current_size_ += 3;
- }
-
- private:
- Handle<JSArray> array_;
- int current_size_;
-};
-
-
-// Represents 2 strings as 2 arrays of tokens.
-// TODO(LiveEdit): Currently it's actually an array of charactres.
-// Make array of tokens instead.
-class TokensCompareInput : public Comparator::Input {
- public:
- TokensCompareInput(Handle<String> s1, int offset1, int len1,
- Handle<String> s2, int offset2, int len2)
- : s1_(s1), offset1_(offset1), len1_(len1),
- s2_(s2), offset2_(offset2), len2_(len2) {
- }
- virtual int getLength1() {
- return len1_;
- }
- virtual int getLength2() {
- return len2_;
- }
- bool equals(int index1, int index2) {
- return s1_->Get(offset1_ + index1) == s2_->Get(offset2_ + index2);
- }
-
- private:
- Handle<String> s1_;
- int offset1_;
- int len1_;
- Handle<String> s2_;
- int offset2_;
- int len2_;
-};
-
-
-// Stores compare result in JSArray. Converts substring positions
-// to absolute positions.
-class TokensCompareOutput : public Comparator::Output {
- public:
- TokensCompareOutput(CompareOutputArrayWriter* array_writer,
- int offset1, int offset2)
- : array_writer_(array_writer), offset1_(offset1), offset2_(offset2) {
- }
-
- void AddChunk(int pos1, int pos2, int len1, int len2) {
- array_writer_->WriteChunk(pos1 + offset1_, pos2 + offset2_, len1, len2);
- }
-
- private:
- CompareOutputArrayWriter* array_writer_;
- int offset1_;
- int offset2_;
-};
-
-
-// Wraps raw n-elements line_ends array as a list of n+1 lines. The last line
-// never has terminating new line character.
-class LineEndsWrapper {
- public:
- explicit LineEndsWrapper(Handle<String> string)
- : ends_array_(CalculateLineEnds(string, false)),
- string_len_(string->length()) {
- }
- int length() {
- return ends_array_->length() + 1;
- }
- // Returns start for any line including start of the imaginary line after
- // the last line.
- int GetLineStart(int index) {
- if (index == 0) {
- return 0;
- } else {
- return GetLineEnd(index - 1);
- }
- }
- int GetLineEnd(int index) {
- if (index == ends_array_->length()) {
- // End of the last line is always an end of the whole string.
- // If the string ends with a new line character, the last line is an
- // empty string after this character.
- return string_len_;
- } else {
- return GetPosAfterNewLine(index);
- }
- }
-
- private:
- Handle<FixedArray> ends_array_;
- int string_len_;
-
- int GetPosAfterNewLine(int index) {
- return Smi::cast(ends_array_->get(index))->value() + 1;
- }
-};
-
-
-// Represents 2 strings as 2 arrays of lines.
-class LineArrayCompareInput : public Comparator::Input {
- public:
- LineArrayCompareInput(Isolate* isolate, Handle<String> s1, Handle<String> s2,
- LineEndsWrapper line_ends1, LineEndsWrapper line_ends2)
- : isolate_(isolate), s1_(s1), s2_(s2), line_ends1_(line_ends1),
- line_ends2_(line_ends2) {
- }
- int getLength1() {
- return line_ends1_.length();
- }
- int getLength2() {
- return line_ends2_.length();
- }
- bool equals(int index1, int index2) {
- int line_start1 = line_ends1_.GetLineStart(index1);
- int line_start2 = line_ends2_.GetLineStart(index2);
- int line_end1 = line_ends1_.GetLineEnd(index1);
- int line_end2 = line_ends2_.GetLineEnd(index2);
- int len1 = line_end1 - line_start1;
- int len2 = line_end2 - line_start2;
- if (len1 != len2) {
- return false;
- }
- return CompareSubstrings(isolate_, s1_, line_start1, s2_, line_start2,
- len1);
- }
-
- private:
- Isolate* isolate_;
- Handle<String> s1_;
- Handle<String> s2_;
- LineEndsWrapper line_ends1_;
- LineEndsWrapper line_ends2_;
-};
-
-
-// Stores compare result in JSArray. For each chunk tries to conduct
-// a fine-grained nested diff token-wise.
-class TokenizingLineArrayCompareOutput : public Comparator::Output {
- public:
- TokenizingLineArrayCompareOutput(LineEndsWrapper line_ends1,
- LineEndsWrapper line_ends2,
- Handle<String> s1, Handle<String> s2)
- : line_ends1_(line_ends1), line_ends2_(line_ends2), s1_(s1), s2_(s2) {
- }
-
- void AddChunk(int line_pos1, int line_pos2, int line_len1, int line_len2) {
- int char_pos1 = line_ends1_.GetLineStart(line_pos1);
- int char_pos2 = line_ends2_.GetLineStart(line_pos2);
- int char_len1 = line_ends1_.GetLineStart(line_pos1 + line_len1) - char_pos1;
- int char_len2 = line_ends2_.GetLineStart(line_pos2 + line_len2) - char_pos2;
-
- if (char_len1 < CHUNK_LEN_LIMIT && char_len2 < CHUNK_LEN_LIMIT) {
- // Chunk is small enough to conduct a nested token-level diff.
- HandleScope subTaskScope;
-
- TokensCompareInput tokens_input(s1_, char_pos1, char_len1,
- s2_, char_pos2, char_len2);
- TokensCompareOutput tokens_output(&array_writer_, char_pos1,
- char_pos2);
-
- Comparator::CalculateDifference(&tokens_input, &tokens_output);
- } else {
- array_writer_.WriteChunk(char_pos1, char_pos2, char_len1, char_len2);
- }
- }
-
- Handle<JSArray> GetResult() {
- return array_writer_.GetResult();
- }
-
- private:
- static const int CHUNK_LEN_LIMIT = 800;
-
- CompareOutputArrayWriter array_writer_;
- LineEndsWrapper line_ends1_;
- LineEndsWrapper line_ends2_;
- Handle<String> s1_;
- Handle<String> s2_;
-};
-
-
-Handle<JSArray> LiveEdit::CompareStrings(Handle<String> s1,
- Handle<String> s2) {
- LineEndsWrapper line_ends1(s1);
- LineEndsWrapper line_ends2(s2);
-
- LineArrayCompareInput
- input(Isolate::Current(), s1, s2, line_ends1, line_ends2);
- TokenizingLineArrayCompareOutput output(line_ends1, line_ends2, s1, s2);
-
- Comparator::CalculateDifference(&input, &output);
-
- return output.GetResult();
-}
-
-
-static void CompileScriptForTracker(Isolate* isolate, Handle<Script> script) {
- // TODO(635): support extensions.
- PostponeInterruptsScope postpone(isolate);
-
- // Build AST.
- CompilationInfo info(script);
- info.MarkAsGlobal();
- if (ParserApi::Parse(&info)) {
- // Compile the code.
- LiveEditFunctionTracker tracker(info.isolate(), info.function());
- if (Compiler::MakeCodeForLiveEdit(&info)) {
- ASSERT(!info.code().is_null());
- tracker.RecordRootFunctionInfo(info.code());
- } else {
- info.isolate()->StackOverflow();
- }
- }
-}
-
-
-// Unwraps JSValue object, returning its field "value"
-static Handle<Object> UnwrapJSValue(Handle<JSValue> jsValue) {
- return Handle<Object>(jsValue->value());
-}
-
-
-// Wraps any object into a OpaqueReference, that will hide the object
-// from JavaScript.
-static Handle<JSValue> WrapInJSValue(Object* object) {
- Handle<JSFunction> constructor =
- Isolate::Current()->opaque_reference_function();
- Handle<JSValue> result =
- Handle<JSValue>::cast(FACTORY->NewJSObject(constructor));
- result->set_value(object);
- return result;
-}
-
-
-// Simple helper class that creates more or less typed structures over
-// JSArray object. This is an adhoc method of passing structures from C++
-// to JavaScript.
-template<typename S>
-class JSArrayBasedStruct {
- public:
- static S Create() {
- Handle<JSArray> array = FACTORY->NewJSArray(S::kSize_);
- return S(array);
- }
- static S cast(Object* object) {
- JSArray* array = JSArray::cast(object);
- Handle<JSArray> array_handle(array);
- return S(array_handle);
- }
- explicit JSArrayBasedStruct(Handle<JSArray> array) : array_(array) {
- }
- Handle<JSArray> GetJSArray() {
- return array_;
- }
-
- protected:
- void SetField(int field_position, Handle<Object> value) {
- SetElementNonStrict(array_, field_position, value);
- }
- void SetSmiValueField(int field_position, int value) {
- SetElementNonStrict(array_,
- field_position,
- Handle<Smi>(Smi::FromInt(value)));
- }
- Object* GetField(int field_position) {
- return array_->GetElementNoExceptionThrown(field_position);
- }
- int GetSmiValueField(int field_position) {
- Object* res = GetField(field_position);
- return Smi::cast(res)->value();
- }
-
- private:
- Handle<JSArray> array_;
-};
-
-
-// Represents some function compilation details. This structure will be used
-// from JavaScript. It contains Code object, which is kept wrapped
-// into a BlindReference for sanitizing reasons.
-class FunctionInfoWrapper : public JSArrayBasedStruct<FunctionInfoWrapper> {
- public:
- explicit FunctionInfoWrapper(Handle<JSArray> array)
- : JSArrayBasedStruct<FunctionInfoWrapper>(array) {
- }
- void SetInitialProperties(Handle<String> name, int start_position,
- int end_position, int param_num, int parent_index) {
- HandleScope scope;
- this->SetField(kFunctionNameOffset_, name);
- this->SetSmiValueField(kStartPositionOffset_, start_position);
- this->SetSmiValueField(kEndPositionOffset_, end_position);
- this->SetSmiValueField(kParamNumOffset_, param_num);
- this->SetSmiValueField(kParentIndexOffset_, parent_index);
- }
- void SetFunctionCode(Handle<Code> function_code,
- Handle<Object> code_scope_info) {
- Handle<JSValue> code_wrapper = WrapInJSValue(*function_code);
- this->SetField(kCodeOffset_, code_wrapper);
-
- Handle<JSValue> scope_wrapper = WrapInJSValue(*code_scope_info);
- this->SetField(kCodeScopeInfoOffset_, scope_wrapper);
- }
- void SetOuterScopeInfo(Handle<Object> scope_info_array) {
- this->SetField(kOuterScopeInfoOffset_, scope_info_array);
- }
- void SetSharedFunctionInfo(Handle<SharedFunctionInfo> info) {
- Handle<JSValue> info_holder = WrapInJSValue(*info);
- this->SetField(kSharedFunctionInfoOffset_, info_holder);
- }
- int GetParentIndex() {
- return this->GetSmiValueField(kParentIndexOffset_);
- }
- Handle<Code> GetFunctionCode() {
- Handle<Object> raw_result = UnwrapJSValue(Handle<JSValue>(
- JSValue::cast(this->GetField(kCodeOffset_))));
- return Handle<Code>::cast(raw_result);
- }
- Handle<Object> GetCodeScopeInfo() {
- Handle<Object> raw_result = UnwrapJSValue(Handle<JSValue>(
- JSValue::cast(this->GetField(kCodeScopeInfoOffset_))));
- return raw_result;
- }
- int GetStartPosition() {
- return this->GetSmiValueField(kStartPositionOffset_);
- }
- int GetEndPosition() {
- return this->GetSmiValueField(kEndPositionOffset_);
- }
-
- private:
- static const int kFunctionNameOffset_ = 0;
- static const int kStartPositionOffset_ = 1;
- static const int kEndPositionOffset_ = 2;
- static const int kParamNumOffset_ = 3;
- static const int kCodeOffset_ = 4;
- static const int kCodeScopeInfoOffset_ = 5;
- static const int kOuterScopeInfoOffset_ = 6;
- static const int kParentIndexOffset_ = 7;
- static const int kSharedFunctionInfoOffset_ = 8;
- static const int kSize_ = 9;
-
- friend class JSArrayBasedStruct<FunctionInfoWrapper>;
-};
-
-
-// Wraps SharedFunctionInfo along with some of its fields for passing it
-// back to JavaScript. SharedFunctionInfo object itself is additionally
-// wrapped into BlindReference for sanitizing reasons.
-class SharedInfoWrapper : public JSArrayBasedStruct<SharedInfoWrapper> {
- public:
- static bool IsInstance(Handle<JSArray> array) {
- return array->length() == Smi::FromInt(kSize_) &&
- array->GetElementNoExceptionThrown(kSharedInfoOffset_)->IsJSValue();
- }
-
- explicit SharedInfoWrapper(Handle<JSArray> array)
- : JSArrayBasedStruct<SharedInfoWrapper>(array) {
- }
-
- void SetProperties(Handle<String> name, int start_position, int end_position,
- Handle<SharedFunctionInfo> info) {
- HandleScope scope;
- this->SetField(kFunctionNameOffset_, name);
- Handle<JSValue> info_holder = WrapInJSValue(*info);
- this->SetField(kSharedInfoOffset_, info_holder);
- this->SetSmiValueField(kStartPositionOffset_, start_position);
- this->SetSmiValueField(kEndPositionOffset_, end_position);
- }
- Handle<SharedFunctionInfo> GetInfo() {
- Object* element = this->GetField(kSharedInfoOffset_);
- Handle<JSValue> value_wrapper(JSValue::cast(element));
- Handle<Object> raw_result = UnwrapJSValue(value_wrapper);
- return Handle<SharedFunctionInfo>::cast(raw_result);
- }
-
- private:
- static const int kFunctionNameOffset_ = 0;
- static const int kStartPositionOffset_ = 1;
- static const int kEndPositionOffset_ = 2;
- static const int kSharedInfoOffset_ = 3;
- static const int kSize_ = 4;
-
- friend class JSArrayBasedStruct<SharedInfoWrapper>;
-};
-
-
-class FunctionInfoListener {
- public:
- FunctionInfoListener() {
- current_parent_index_ = -1;
- len_ = 0;
- result_ = FACTORY->NewJSArray(10);
- }
-
- void FunctionStarted(FunctionLiteral* fun) {
- HandleScope scope;
- FunctionInfoWrapper info = FunctionInfoWrapper::Create();
- info.SetInitialProperties(fun->name(), fun->start_position(),
- fun->end_position(), fun->num_parameters(),
- current_parent_index_);
- current_parent_index_ = len_;
- SetElementNonStrict(result_, len_, info.GetJSArray());
- len_++;
- }
-
- void FunctionDone() {
- HandleScope scope;
- FunctionInfoWrapper info =
- FunctionInfoWrapper::cast(
- result_->GetElementNoExceptionThrown(current_parent_index_));
- current_parent_index_ = info.GetParentIndex();
- }
-
- // Saves only function code, because for a script function we
- // may never create a SharedFunctionInfo object.
- void FunctionCode(Handle<Code> function_code) {
- FunctionInfoWrapper info =
- FunctionInfoWrapper::cast(
- result_->GetElementNoExceptionThrown(current_parent_index_));
- info.SetFunctionCode(function_code, Handle<Object>(HEAP->null_value()));
- }
-
- // Saves full information about a function: its code, its scope info
- // and a SharedFunctionInfo object.
- void FunctionInfo(Handle<SharedFunctionInfo> shared, Scope* scope) {
- if (!shared->IsSharedFunctionInfo()) {
- return;
- }
- FunctionInfoWrapper info =
- FunctionInfoWrapper::cast(
- result_->GetElementNoExceptionThrown(current_parent_index_));
- info.SetFunctionCode(Handle<Code>(shared->code()),
- Handle<Object>(shared->scope_info()));
- info.SetSharedFunctionInfo(shared);
-
- Handle<Object> scope_info_list(SerializeFunctionScope(scope));
- info.SetOuterScopeInfo(scope_info_list);
- }
-
- Handle<JSArray> GetResult() { return result_; }
-
- private:
- Object* SerializeFunctionScope(Scope* scope) {
- HandleScope handle_scope;
-
- Handle<JSArray> scope_info_list = FACTORY->NewJSArray(10);
- int scope_info_length = 0;
-
- // Saves some description of scope. It stores name and indexes of
- // variables in the whole scope chain. Null-named slots delimit
- // scopes of this chain.
- Scope* outer_scope = scope->outer_scope();
- if (outer_scope == NULL) {
- return HEAP->undefined_value();
- }
- do {
- ZoneList<Variable*> list(10);
- outer_scope->CollectUsedVariables(&list);
- int j = 0;
- for (int i = 0; i < list.length(); i++) {
- Variable* var1 = list[i];
- Slot* slot = var1->AsSlot();
- if (slot != NULL && slot->type() == Slot::CONTEXT) {
- if (j != i) {
- list[j] = var1;
- }
- j++;
- }
- }
-
- // Sort it.
- for (int k = 1; k < j; k++) {
- int l = k;
- for (int m = k + 1; m < j; m++) {
- if (list[l]->AsSlot()->index() > list[m]->AsSlot()->index()) {
- l = m;
- }
- }
- list[k] = list[l];
- }
- for (int i = 0; i < j; i++) {
- SetElementNonStrict(scope_info_list,
- scope_info_length,
- list[i]->name());
- scope_info_length++;
- SetElementNonStrict(
- scope_info_list,
- scope_info_length,
- Handle<Smi>(Smi::FromInt(list[i]->AsSlot()->index())));
- scope_info_length++;
- }
- SetElementNonStrict(scope_info_list,
- scope_info_length,
- Handle<Object>(HEAP->null_value()));
- scope_info_length++;
-
- outer_scope = outer_scope->outer_scope();
- } while (outer_scope != NULL);
-
- return *scope_info_list;
- }
-
- Handle<JSArray> result_;
- int len_;
- int current_parent_index_;
-};
-
-
-JSArray* LiveEdit::GatherCompileInfo(Handle<Script> script,
- Handle<String> source) {
- Isolate* isolate = Isolate::Current();
- CompilationZoneScope zone_scope(DELETE_ON_EXIT);
-
- FunctionInfoListener listener;
- Handle<Object> original_source = Handle<Object>(script->source());
- script->set_source(*source);
- isolate->set_active_function_info_listener(&listener);
- CompileScriptForTracker(isolate, script);
- isolate->set_active_function_info_listener(NULL);
- script->set_source(*original_source);
-
- return *(listener.GetResult());
-}
-
-
-void LiveEdit::WrapSharedFunctionInfos(Handle<JSArray> array) {
- HandleScope scope;
- int len = Smi::cast(array->length())->value();
- for (int i = 0; i < len; i++) {
- Handle<SharedFunctionInfo> info(
- SharedFunctionInfo::cast(array->GetElementNoExceptionThrown(i)));
- SharedInfoWrapper info_wrapper = SharedInfoWrapper::Create();
- Handle<String> name_handle(String::cast(info->name()));
- info_wrapper.SetProperties(name_handle, info->start_position(),
- info->end_position(), info);
- SetElementNonStrict(array, i, info_wrapper.GetJSArray());
- }
-}
-
-
-// Visitor that collects all references to a particular code object,
-// including "CODE_TARGET" references in other code objects.
-// It works in context of ZoneScope.
-class ReferenceCollectorVisitor : public ObjectVisitor {
- public:
- explicit ReferenceCollectorVisitor(Code* original)
- : original_(original), rvalues_(10), reloc_infos_(10), code_entries_(10) {
- }
-
- virtual void VisitPointers(Object** start, Object** end) {
- for (Object** p = start; p < end; p++) {
- if (*p == original_) {
- rvalues_.Add(p);
- }
- }
- }
-
- virtual void VisitCodeEntry(Address entry) {
- if (Code::GetObjectFromEntryAddress(entry) == original_) {
- code_entries_.Add(entry);
- }
- }
-
- virtual void VisitCodeTarget(RelocInfo* rinfo) {
- if (RelocInfo::IsCodeTarget(rinfo->rmode()) &&
- Code::GetCodeFromTargetAddress(rinfo->target_address()) == original_) {
- reloc_infos_.Add(*rinfo);
- }
- }
-
- virtual void VisitDebugTarget(RelocInfo* rinfo) {
- VisitCodeTarget(rinfo);
- }
-
- // Post-visiting method that iterates over all collected references and
- // modifies them.
- void Replace(Code* substitution) {
- for (int i = 0; i < rvalues_.length(); i++) {
- *(rvalues_[i]) = substitution;
- }
- Address substitution_entry = substitution->instruction_start();
- for (int i = 0; i < reloc_infos_.length(); i++) {
- reloc_infos_[i].set_target_address(substitution_entry);
- }
- for (int i = 0; i < code_entries_.length(); i++) {
- Address entry = code_entries_[i];
- Memory::Address_at(entry) = substitution_entry;
- }
- }
-
- private:
- Code* original_;
- ZoneList<Object**> rvalues_;
- ZoneList<RelocInfo> reloc_infos_;
- ZoneList<Address> code_entries_;
-};
-
-
-// Finds all references to original and replaces them with substitution.
-static void ReplaceCodeObject(Code* original, Code* substitution) {
- ASSERT(!HEAP->InNewSpace(substitution));
-
- AssertNoAllocation no_allocations_please;
-
- // A zone scope for ReferenceCollectorVisitor.
- ZoneScope scope(DELETE_ON_EXIT);
-
- ReferenceCollectorVisitor visitor(original);
-
- // Iterate over all roots. Stack frames may have pointer into original code,
- // so temporary replace the pointers with offset numbers
- // in prologue/epilogue.
- {
- HEAP->IterateStrongRoots(&visitor, VISIT_ALL);
- }
-
- // Now iterate over all pointers of all objects, including code_target
- // implicit pointers.
- HeapIterator iterator;
- for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
- obj->Iterate(&visitor);
- }
-
- visitor.Replace(substitution);
-}
-
-
-// Check whether the code is natural function code (not a lazy-compile stub
-// code).
-static bool IsJSFunctionCode(Code* code) {
- return code->kind() == Code::FUNCTION;
-}
-
-
-// Returns true if an instance of candidate were inlined into function's code.
-static bool IsInlined(JSFunction* function, SharedFunctionInfo* candidate) {
- AssertNoAllocation no_gc;
-
- if (function->code()->kind() != Code::OPTIMIZED_FUNCTION) return false;
-
- DeoptimizationInputData* data =
- DeoptimizationInputData::cast(function->code()->deoptimization_data());
-
- if (data == HEAP->empty_fixed_array()) return false;
-
- FixedArray* literals = data->LiteralArray();
-
- int inlined_count = data->InlinedFunctionCount()->value();
- for (int i = 0; i < inlined_count; ++i) {
- JSFunction* inlined = JSFunction::cast(literals->get(i));
- if (inlined->shared() == candidate) return true;
- }
-
- return false;
-}
-
-
-class DependentFunctionsDeoptimizingVisitor : public OptimizedFunctionVisitor {
- public:
- explicit DependentFunctionsDeoptimizingVisitor(
- SharedFunctionInfo* function_info)
- : function_info_(function_info) {}
-
- virtual void EnterContext(Context* context) {
- }
-
- virtual void VisitFunction(JSFunction* function) {
- if (function->shared() == function_info_ ||
- IsInlined(function, function_info_)) {
- Deoptimizer::DeoptimizeFunction(function);
- }
- }
-
- virtual void LeaveContext(Context* context) {
- }
-
- private:
- SharedFunctionInfo* function_info_;
-};
-
-
-static void DeoptimizeDependentFunctions(SharedFunctionInfo* function_info) {
- AssertNoAllocation no_allocation;
-
- DependentFunctionsDeoptimizingVisitor visitor(function_info);
- Deoptimizer::VisitAllOptimizedFunctions(&visitor);
-}
-
-
-MaybeObject* LiveEdit::ReplaceFunctionCode(
- Handle<JSArray> new_compile_info_array,
- Handle<JSArray> shared_info_array) {
- HandleScope scope;
-
- if (!SharedInfoWrapper::IsInstance(shared_info_array)) {
- return Isolate::Current()->ThrowIllegalOperation();
- }
-
- FunctionInfoWrapper compile_info_wrapper(new_compile_info_array);
- SharedInfoWrapper shared_info_wrapper(shared_info_array);
-
- Handle<SharedFunctionInfo> shared_info = shared_info_wrapper.GetInfo();
-
- if (IsJSFunctionCode(shared_info->code())) {
- Handle<Code> code = compile_info_wrapper.GetFunctionCode();
- ReplaceCodeObject(shared_info->code(), *code);
- Handle<Object> code_scope_info = compile_info_wrapper.GetCodeScopeInfo();
- if (code_scope_info->IsFixedArray()) {
- shared_info->set_scope_info(SerializedScopeInfo::cast(*code_scope_info));
- }
- }
-
- if (shared_info->debug_info()->IsDebugInfo()) {
- Handle<DebugInfo> debug_info(DebugInfo::cast(shared_info->debug_info()));
- Handle<Code> new_original_code =
- FACTORY->CopyCode(compile_info_wrapper.GetFunctionCode());
- debug_info->set_original_code(*new_original_code);
- }
-
- int start_position = compile_info_wrapper.GetStartPosition();
- int end_position = compile_info_wrapper.GetEndPosition();
- shared_info->set_start_position(start_position);
- shared_info->set_end_position(end_position);
-
- shared_info->set_construct_stub(
- Isolate::Current()->builtins()->builtin(
- Builtins::kJSConstructStubGeneric));
-
- DeoptimizeDependentFunctions(*shared_info);
- Isolate::Current()->compilation_cache()->Remove(shared_info);
-
- return HEAP->undefined_value();
-}
-
-
-MaybeObject* LiveEdit::FunctionSourceUpdated(
- Handle<JSArray> shared_info_array) {
- HandleScope scope;
-
- if (!SharedInfoWrapper::IsInstance(shared_info_array)) {
- return Isolate::Current()->ThrowIllegalOperation();
- }
-
- SharedInfoWrapper shared_info_wrapper(shared_info_array);
- Handle<SharedFunctionInfo> shared_info = shared_info_wrapper.GetInfo();
-
- DeoptimizeDependentFunctions(*shared_info);
- Isolate::Current()->compilation_cache()->Remove(shared_info);
-
- return HEAP->undefined_value();
-}
-
-
-void LiveEdit::SetFunctionScript(Handle<JSValue> function_wrapper,
- Handle<Object> script_handle) {
- Handle<SharedFunctionInfo> shared_info =
- Handle<SharedFunctionInfo>::cast(UnwrapJSValue(function_wrapper));
- shared_info->set_script(*script_handle);
-
- Isolate::Current()->compilation_cache()->Remove(shared_info);
-}
-
-
-// For a script text change (defined as position_change_array), translates
-// position in unchanged text to position in changed text.
-// Text change is a set of non-overlapping regions in text, that have changed
-// their contents and length. It is specified as array of groups of 3 numbers:
-// (change_begin, change_end, change_end_new_position).
-// Each group describes a change in text; groups are sorted by change_begin.
-// Only position in text beyond any changes may be successfully translated.
-// If a positions is inside some region that changed, result is currently
-// undefined.
-static int TranslatePosition(int original_position,
- Handle<JSArray> position_change_array) {
- int position_diff = 0;
- int array_len = Smi::cast(position_change_array->length())->value();
- // TODO(635): binary search may be used here
- for (int i = 0; i < array_len; i += 3) {
- Object* element = position_change_array->GetElementNoExceptionThrown(i);
- int chunk_start = Smi::cast(element)->value();
- if (original_position < chunk_start) {
- break;
- }
- element = position_change_array->GetElementNoExceptionThrown(i + 1);
- int chunk_end = Smi::cast(element)->value();
- // Position mustn't be inside a chunk.
- ASSERT(original_position >= chunk_end);
- element = position_change_array->GetElementNoExceptionThrown(i + 2);
- int chunk_changed_end = Smi::cast(element)->value();
- position_diff = chunk_changed_end - chunk_end;
- }
-
- return original_position + position_diff;
-}
-
-
-// Auto-growing buffer for writing relocation info code section. This buffer
-// is a simplified version of buffer from Assembler. Unlike Assembler, this
-// class is platform-independent and it works without dealing with instructions.
-// As specified by RelocInfo format, the buffer is filled in reversed order:
-// from upper to lower addresses.
-// It uses NewArray/DeleteArray for memory management.
-class RelocInfoBuffer {
- public:
- RelocInfoBuffer(int buffer_initial_capicity, byte* pc) {
- buffer_size_ = buffer_initial_capicity + kBufferGap;
- buffer_ = NewArray<byte>(buffer_size_);
-
- reloc_info_writer_.Reposition(buffer_ + buffer_size_, pc);
- }
- ~RelocInfoBuffer() {
- DeleteArray(buffer_);
- }
-
- // As specified by RelocInfo format, the buffer is filled in reversed order:
- // from upper to lower addresses.
- void Write(const RelocInfo* rinfo) {
- if (buffer_ + kBufferGap >= reloc_info_writer_.pos()) {
- Grow();
- }
- reloc_info_writer_.Write(rinfo);
- }
-
- Vector<byte> GetResult() {
- // Return the bytes from pos up to end of buffer.
- int result_size =
- static_cast<int>((buffer_ + buffer_size_) - reloc_info_writer_.pos());
- return Vector<byte>(reloc_info_writer_.pos(), result_size);
- }
-
- private:
- void Grow() {
- // Compute new buffer size.
- int new_buffer_size;
- if (buffer_size_ < 2 * KB) {
- new_buffer_size = 4 * KB;
- } else {
- new_buffer_size = 2 * buffer_size_;
- }
- // Some internal data structures overflow for very large buffers,
- // they must ensure that kMaximalBufferSize is not too large.
- if (new_buffer_size > kMaximalBufferSize) {
- V8::FatalProcessOutOfMemory("RelocInfoBuffer::GrowBuffer");
- }
-
- // Setup new buffer.
- byte* new_buffer = NewArray<byte>(new_buffer_size);
-
- // Copy the data.
- int curently_used_size =
- static_cast<int>(buffer_ + buffer_size_ - reloc_info_writer_.pos());
- memmove(new_buffer + new_buffer_size - curently_used_size,
- reloc_info_writer_.pos(), curently_used_size);
-
- reloc_info_writer_.Reposition(
- new_buffer + new_buffer_size - curently_used_size,
- reloc_info_writer_.last_pc());
-
- DeleteArray(buffer_);
- buffer_ = new_buffer;
- buffer_size_ = new_buffer_size;
- }
-
- RelocInfoWriter reloc_info_writer_;
- byte* buffer_;
- int buffer_size_;
-
- static const int kBufferGap = RelocInfoWriter::kMaxSize;
- static const int kMaximalBufferSize = 512*MB;
-};
-
-// Patch positions in code (changes relocation info section) and possibly
-// returns new instance of code.
-static Handle<Code> PatchPositionsInCode(Handle<Code> code,
- Handle<JSArray> position_change_array) {
-
- RelocInfoBuffer buffer_writer(code->relocation_size(),
- code->instruction_start());
-
- {
- AssertNoAllocation no_allocations_please;
- for (RelocIterator it(*code); !it.done(); it.next()) {
- RelocInfo* rinfo = it.rinfo();
- if (RelocInfo::IsPosition(rinfo->rmode())) {
- int position = static_cast<int>(rinfo->data());
- int new_position = TranslatePosition(position,
- position_change_array);
- if (position != new_position) {
- RelocInfo info_copy(rinfo->pc(), rinfo->rmode(), new_position);
- buffer_writer.Write(&info_copy);
- continue;
- }
- }
- buffer_writer.Write(it.rinfo());
- }
- }
-
- Vector<byte> buffer = buffer_writer.GetResult();
-
- if (buffer.length() == code->relocation_size()) {
- // Simply patch relocation area of code.
- memcpy(code->relocation_start(), buffer.start(), buffer.length());
- return code;
- } else {
- // Relocation info section now has different size. We cannot simply
- // rewrite it inside code object. Instead we have to create a new
- // code object.
- Handle<Code> result(FACTORY->CopyCode(code, buffer));
- return result;
- }
-}
-
-
-MaybeObject* LiveEdit::PatchFunctionPositions(
- Handle<JSArray> shared_info_array, Handle<JSArray> position_change_array) {
-
- if (!SharedInfoWrapper::IsInstance(shared_info_array)) {
- return Isolate::Current()->ThrowIllegalOperation();
- }
-
- SharedInfoWrapper shared_info_wrapper(shared_info_array);
- Handle<SharedFunctionInfo> info = shared_info_wrapper.GetInfo();
-
- int old_function_start = info->start_position();
- int new_function_start = TranslatePosition(old_function_start,
- position_change_array);
- int new_function_end = TranslatePosition(info->end_position(),
- position_change_array);
- int new_function_token_pos =
- TranslatePosition(info->function_token_position(), position_change_array);
-
- info->set_start_position(new_function_start);
- info->set_end_position(new_function_end);
- info->set_function_token_position(new_function_token_pos);
-
- if (IsJSFunctionCode(info->code())) {
- // Patch relocation info section of the code.
- Handle<Code> patched_code = PatchPositionsInCode(Handle<Code>(info->code()),
- position_change_array);
- if (*patched_code != info->code()) {
- // Replace all references to the code across the heap. In particular,
- // some stubs may refer to this code and this code may be being executed
- // on stack (it is safe to substitute the code object on stack, because
- // we only change the structure of rinfo and leave instructions
- // untouched).
- ReplaceCodeObject(info->code(), *patched_code);
- }
- }
-
- return HEAP->undefined_value();
-}
-
-
-static Handle<Script> CreateScriptCopy(Handle<Script> original) {
- Handle<String> original_source(String::cast(original->source()));
-
- Handle<Script> copy = FACTORY->NewScript(original_source);
-
- copy->set_name(original->name());
- copy->set_line_offset(original->line_offset());
- copy->set_column_offset(original->column_offset());
- copy->set_data(original->data());
- copy->set_type(original->type());
- copy->set_context_data(original->context_data());
- copy->set_compilation_type(original->compilation_type());
- copy->set_eval_from_shared(original->eval_from_shared());
- copy->set_eval_from_instructions_offset(
- original->eval_from_instructions_offset());
-
- return copy;
-}
-
-
-Object* LiveEdit::ChangeScriptSource(Handle<Script> original_script,
- Handle<String> new_source,
- Handle<Object> old_script_name) {
- Handle<Object> old_script_object;
- if (old_script_name->IsString()) {
- Handle<Script> old_script = CreateScriptCopy(original_script);
- old_script->set_name(String::cast(*old_script_name));
- old_script_object = old_script;
- Isolate::Current()->debugger()->OnAfterCompile(
- old_script, Debugger::SEND_WHEN_DEBUGGING);
- } else {
- old_script_object = Handle<Object>(HEAP->null_value());
- }
-
- original_script->set_source(*new_source);
-
- // Drop line ends so that they will be recalculated.
- original_script->set_line_ends(HEAP->undefined_value());
-
- return *old_script_object;
-}
-
-
-
-void LiveEdit::ReplaceRefToNestedFunction(
- Handle<JSValue> parent_function_wrapper,
- Handle<JSValue> orig_function_wrapper,
- Handle<JSValue> subst_function_wrapper) {
-
- Handle<SharedFunctionInfo> parent_shared =
- Handle<SharedFunctionInfo>::cast(UnwrapJSValue(parent_function_wrapper));
- Handle<SharedFunctionInfo> orig_shared =
- Handle<SharedFunctionInfo>::cast(UnwrapJSValue(orig_function_wrapper));
- Handle<SharedFunctionInfo> subst_shared =
- Handle<SharedFunctionInfo>::cast(UnwrapJSValue(subst_function_wrapper));
-
- for (RelocIterator it(parent_shared->code()); !it.done(); it.next()) {
- if (it.rinfo()->rmode() == RelocInfo::EMBEDDED_OBJECT) {
- if (it.rinfo()->target_object() == *orig_shared) {
- it.rinfo()->set_target_object(*subst_shared);
- }
- }
- }
-}
-
-
-// Check an activation against list of functions. If there is a function
-// that matches, its status in result array is changed to status argument value.
-static bool CheckActivation(Handle<JSArray> shared_info_array,
- Handle<JSArray> result,
- StackFrame* frame,
- LiveEdit::FunctionPatchabilityStatus status) {
- if (!frame->is_java_script()) return false;
-
- Handle<JSFunction> function(
- JSFunction::cast(JavaScriptFrame::cast(frame)->function()));
-
- int len = Smi::cast(shared_info_array->length())->value();
- for (int i = 0; i < len; i++) {
- JSValue* wrapper =
- JSValue::cast(shared_info_array->GetElementNoExceptionThrown(i));
- Handle<SharedFunctionInfo> shared(
- SharedFunctionInfo::cast(wrapper->value()));
-
- if (function->shared() == *shared || IsInlined(*function, *shared)) {
- SetElementNonStrict(result, i, Handle<Smi>(Smi::FromInt(status)));
- return true;
- }
- }
- return false;
-}
-
-
-// Iterates over handler chain and removes all elements that are inside
-// frames being dropped.
-static bool FixTryCatchHandler(StackFrame* top_frame,
- StackFrame* bottom_frame) {
- Address* pointer_address =
- &Memory::Address_at(Isolate::Current()->get_address_from_id(
- Isolate::k_handler_address));
-
- while (*pointer_address < top_frame->sp()) {
- pointer_address = &Memory::Address_at(*pointer_address);
- }
- Address* above_frame_address = pointer_address;
- while (*pointer_address < bottom_frame->fp()) {
- pointer_address = &Memory::Address_at(*pointer_address);
- }
- bool change = *above_frame_address != *pointer_address;
- *above_frame_address = *pointer_address;
- return change;
-}
-
-
-// Removes specified range of frames from stack. There may be 1 or more
-// frames in range. Anyway the bottom frame is restarted rather than dropped,
-// and therefore has to be a JavaScript frame.
-// Returns error message or NULL.
-static const char* DropFrames(Vector<StackFrame*> frames,
- int top_frame_index,
- int bottom_js_frame_index,
- Debug::FrameDropMode* mode,
- Object*** restarter_frame_function_pointer) {
- if (!Debug::kFrameDropperSupported) {
- return "Stack manipulations are not supported in this architecture.";
- }
-
- StackFrame* pre_top_frame = frames[top_frame_index - 1];
- StackFrame* top_frame = frames[top_frame_index];
- StackFrame* bottom_js_frame = frames[bottom_js_frame_index];
-
- ASSERT(bottom_js_frame->is_java_script());
-
- // Check the nature of the top frame.
- Isolate* isolate = Isolate::Current();
- Code* pre_top_frame_code = pre_top_frame->LookupCode();
- if (pre_top_frame_code->is_inline_cache_stub() &&
- pre_top_frame_code->ic_state() == DEBUG_BREAK) {
- // OK, we can drop inline cache calls.
- *mode = Debug::FRAME_DROPPED_IN_IC_CALL;
- } else if (pre_top_frame_code ==
- isolate->debug()->debug_break_slot()) {
- // OK, we can drop debug break slot.
- *mode = Debug::FRAME_DROPPED_IN_DEBUG_SLOT_CALL;
- } else if (pre_top_frame_code ==
- isolate->builtins()->builtin(
- Builtins::kFrameDropper_LiveEdit)) {
- // OK, we can drop our own code.
- *mode = Debug::FRAME_DROPPED_IN_DIRECT_CALL;
- } else if (pre_top_frame_code->kind() == Code::STUB &&
- pre_top_frame_code->major_key()) {
- // Entry from our unit tests, it's fine, we support this case.
- *mode = Debug::FRAME_DROPPED_IN_DIRECT_CALL;
- } else {
- return "Unknown structure of stack above changing function";
- }
-
- Address unused_stack_top = top_frame->sp();
- Address unused_stack_bottom = bottom_js_frame->fp()
- - Debug::kFrameDropperFrameSize * kPointerSize // Size of the new frame.
- + kPointerSize; // Bigger address end is exclusive.
-
- if (unused_stack_top > unused_stack_bottom) {
- return "Not enough space for frame dropper frame";
- }
-
- // Committing now. After this point we should return only NULL value.
-
- FixTryCatchHandler(pre_top_frame, bottom_js_frame);
- // Make sure FixTryCatchHandler is idempotent.
- ASSERT(!FixTryCatchHandler(pre_top_frame, bottom_js_frame));
-
- Handle<Code> code = Isolate::Current()->builtins()->FrameDropper_LiveEdit();
- top_frame->set_pc(code->entry());
- pre_top_frame->SetCallerFp(bottom_js_frame->fp());
-
- *restarter_frame_function_pointer =
- Debug::SetUpFrameDropperFrame(bottom_js_frame, code);
-
- ASSERT((**restarter_frame_function_pointer)->IsJSFunction());
-
- for (Address a = unused_stack_top;
- a < unused_stack_bottom;
- a += kPointerSize) {
- Memory::Object_at(a) = Smi::FromInt(0);
- }
-
- return NULL;
-}
-
-
-static bool IsDropableFrame(StackFrame* frame) {
- return !frame->is_exit();
-}
-
-// Fills result array with statuses of functions. Modifies the stack
-// removing all listed function if possible and if do_drop is true.
-static const char* DropActivationsInActiveThread(
- Handle<JSArray> shared_info_array, Handle<JSArray> result, bool do_drop) {
- Debug* debug = Isolate::Current()->debug();
- ZoneScope scope(DELETE_ON_EXIT);
- Vector<StackFrame*> frames = CreateStackMap();
-
- int array_len = Smi::cast(shared_info_array->length())->value();
-
- int top_frame_index = -1;
- int frame_index = 0;
- for (; frame_index < frames.length(); frame_index++) {
- StackFrame* frame = frames[frame_index];
- if (frame->id() == debug->break_frame_id()) {
- top_frame_index = frame_index;
- break;
- }
- if (CheckActivation(shared_info_array, result, frame,
- LiveEdit::FUNCTION_BLOCKED_UNDER_NATIVE_CODE)) {
- // We are still above break_frame. It is not a target frame,
- // it is a problem.
- return "Debugger mark-up on stack is not found";
- }
- }
-
- if (top_frame_index == -1) {
- // We haven't found break frame, but no function is blocking us anyway.
- return NULL;
- }
-
- bool target_frame_found = false;
- int bottom_js_frame_index = top_frame_index;
- bool c_code_found = false;
-
- for (; frame_index < frames.length(); frame_index++) {
- StackFrame* frame = frames[frame_index];
- if (!IsDropableFrame(frame)) {
- c_code_found = true;
- break;
- }
- if (CheckActivation(shared_info_array, result, frame,
- LiveEdit::FUNCTION_BLOCKED_ON_ACTIVE_STACK)) {
- target_frame_found = true;
- bottom_js_frame_index = frame_index;
- }
- }
-
- if (c_code_found) {
- // There is a C frames on stack. Check that there are no target frames
- // below them.
- for (; frame_index < frames.length(); frame_index++) {
- StackFrame* frame = frames[frame_index];
- if (frame->is_java_script()) {
- if (CheckActivation(shared_info_array, result, frame,
- LiveEdit::FUNCTION_BLOCKED_UNDER_NATIVE_CODE)) {
- // Cannot drop frame under C frames.
- return NULL;
- }
- }
- }
- }
-
- if (!do_drop) {
- // We are in check-only mode.
- return NULL;
- }
-
- if (!target_frame_found) {
- // Nothing to drop.
- return NULL;
- }
-
- Debug::FrameDropMode drop_mode = Debug::FRAMES_UNTOUCHED;
- Object** restarter_frame_function_pointer = NULL;
- const char* error_message = DropFrames(frames, top_frame_index,
- bottom_js_frame_index, &drop_mode,
- &restarter_frame_function_pointer);
-
- if (error_message != NULL) {
- return error_message;
- }
-
- // Adjust break_frame after some frames has been dropped.
- StackFrame::Id new_id = StackFrame::NO_ID;
- for (int i = bottom_js_frame_index + 1; i < frames.length(); i++) {
- if (frames[i]->type() == StackFrame::JAVA_SCRIPT) {
- new_id = frames[i]->id();
- break;
- }
- }
- debug->FramesHaveBeenDropped(new_id, drop_mode,
- restarter_frame_function_pointer);
-
- // Replace "blocked on active" with "replaced on active" status.
- for (int i = 0; i < array_len; i++) {
- if (result->GetElement(i) ==
- Smi::FromInt(LiveEdit::FUNCTION_BLOCKED_ON_ACTIVE_STACK)) {
- Handle<Object> replaced(
- Smi::FromInt(LiveEdit::FUNCTION_REPLACED_ON_ACTIVE_STACK));
- SetElementNonStrict(result, i, replaced);
- }
- }
- return NULL;
-}
-
-
-class InactiveThreadActivationsChecker : public ThreadVisitor {
- public:
- InactiveThreadActivationsChecker(Handle<JSArray> shared_info_array,
- Handle<JSArray> result)
- : shared_info_array_(shared_info_array), result_(result),
- has_blocked_functions_(false) {
- }
- void VisitThread(Isolate* isolate, ThreadLocalTop* top) {
- for (StackFrameIterator it(isolate, top); !it.done(); it.Advance()) {
- has_blocked_functions_ |= CheckActivation(
- shared_info_array_, result_, it.frame(),
- LiveEdit::FUNCTION_BLOCKED_ON_OTHER_STACK);
- }
- }
- bool HasBlockedFunctions() {
- return has_blocked_functions_;
- }
-
- private:
- Handle<JSArray> shared_info_array_;
- Handle<JSArray> result_;
- bool has_blocked_functions_;
-};
-
-
-Handle<JSArray> LiveEdit::CheckAndDropActivations(
- Handle<JSArray> shared_info_array, bool do_drop) {
- int len = Smi::cast(shared_info_array->length())->value();
-
- Handle<JSArray> result = FACTORY->NewJSArray(len);
-
- // Fill the default values.
- for (int i = 0; i < len; i++) {
- SetElementNonStrict(
- result,
- i,
- Handle<Smi>(Smi::FromInt(FUNCTION_AVAILABLE_FOR_PATCH)));
- }
-
-
- // First check inactive threads. Fail if some functions are blocked there.
- InactiveThreadActivationsChecker inactive_threads_checker(shared_info_array,
- result);
- Isolate::Current()->thread_manager()->IterateArchivedThreads(
- &inactive_threads_checker);
- if (inactive_threads_checker.HasBlockedFunctions()) {
- return result;
- }
-
- // Try to drop activations from the current stack.
- const char* error_message =
- DropActivationsInActiveThread(shared_info_array, result, do_drop);
- if (error_message != NULL) {
- // Add error message as an array extra element.
- Vector<const char> vector_message(error_message, StrLength(error_message));
- Handle<String> str = FACTORY->NewStringFromAscii(vector_message);
- SetElementNonStrict(result, len, str);
- }
- return result;
-}
-
-
-LiveEditFunctionTracker::LiveEditFunctionTracker(Isolate* isolate,
- FunctionLiteral* fun)
- : isolate_(isolate) {
- if (isolate_->active_function_info_listener() != NULL) {
- isolate_->active_function_info_listener()->FunctionStarted(fun);
- }
-}
-
-
-LiveEditFunctionTracker::~LiveEditFunctionTracker() {
- if (isolate_->active_function_info_listener() != NULL) {
- isolate_->active_function_info_listener()->FunctionDone();
- }
-}
-
-
-void LiveEditFunctionTracker::RecordFunctionInfo(
- Handle<SharedFunctionInfo> info, FunctionLiteral* lit) {
- if (isolate_->active_function_info_listener() != NULL) {
- isolate_->active_function_info_listener()->FunctionInfo(info, lit->scope());
- }
-}
-
-
-void LiveEditFunctionTracker::RecordRootFunctionInfo(Handle<Code> code) {
- isolate_->active_function_info_listener()->FunctionCode(code);
-}
-
-
-bool LiveEditFunctionTracker::IsActive(Isolate* isolate) {
- return isolate->active_function_info_listener() != NULL;
-}
-
-
-#else // ENABLE_DEBUGGER_SUPPORT
-
-// This ifdef-else-endif section provides working or stub implementation of
-// LiveEditFunctionTracker.
-LiveEditFunctionTracker::LiveEditFunctionTracker(Isolate* isolate,
- FunctionLiteral* fun) {
-}
-
-
-LiveEditFunctionTracker::~LiveEditFunctionTracker() {
-}
-
-
-void LiveEditFunctionTracker::RecordFunctionInfo(
- Handle<SharedFunctionInfo> info, FunctionLiteral* lit) {
-}
-
-
-void LiveEditFunctionTracker::RecordRootFunctionInfo(Handle<Code> code) {
-}
-
-
-bool LiveEditFunctionTracker::IsActive() {
- return false;
-}
-
-#endif // ENABLE_DEBUGGER_SUPPORT
-
-
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/liveedit.h b/src/3rdparty/v8/src/liveedit.h
deleted file mode 100644
index 36c2c76..0000000
--- a/src/3rdparty/v8/src/liveedit.h
+++ /dev/null
@@ -1,179 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_LIVEEDIT_H_
-#define V8_LIVEEDIT_H_
-
-
-
-// Live Edit feature implementation.
-// User should be able to change script on already running VM. This feature
-// matches hot swap features in other frameworks.
-//
-// The basic use-case is when user spots some mistake in function body
-// from debugger and wishes to change the algorithm without restart.
-//
-// A single change always has a form of a simple replacement (in pseudo-code):
-// script.source[positions, positions+length] = new_string;
-// Implementation first determines, which function's body includes this
-// change area. Then both old and new versions of script are fully compiled
-// in order to analyze, whether the function changed its outer scope
-// expectations (or number of parameters). If it didn't, function's code is
-// patched with a newly compiled code. If it did change, enclosing function
-// gets patched. All inner functions are left untouched, whatever happened
-// to them in a new script version. However, new version of code will
-// instantiate newly compiled functions.
-
-
-#include "compiler.h"
-
-namespace v8 {
-namespace internal {
-
-// This class collects some specific information on structure of functions
-// in a particular script. It gets called from compiler all the time, but
-// actually records any data only when liveedit operation is in process;
-// in any other time this class is very cheap.
-//
-// The primary interest of the Tracker is to record function scope structures
-// in order to analyze whether function code maybe safely patched (with new
-// code successfully reading existing data from function scopes). The Tracker
-// also collects compiled function codes.
-class LiveEditFunctionTracker {
- public:
- explicit LiveEditFunctionTracker(Isolate* isolate, FunctionLiteral* fun);
- ~LiveEditFunctionTracker();
- void RecordFunctionInfo(Handle<SharedFunctionInfo> info,
- FunctionLiteral* lit);
- void RecordRootFunctionInfo(Handle<Code> code);
-
- static bool IsActive(Isolate* isolate);
-
- private:
-#ifdef ENABLE_DEBUGGER_SUPPORT
- Isolate* isolate_;
-#endif
-};
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-
-class LiveEdit : AllStatic {
- public:
- static JSArray* GatherCompileInfo(Handle<Script> script,
- Handle<String> source);
-
- static void WrapSharedFunctionInfos(Handle<JSArray> array);
-
- MUST_USE_RESULT static MaybeObject* ReplaceFunctionCode(
- Handle<JSArray> new_compile_info_array,
- Handle<JSArray> shared_info_array);
-
- static MaybeObject* FunctionSourceUpdated(Handle<JSArray> shared_info_array);
-
- // Updates script field in FunctionSharedInfo.
- static void SetFunctionScript(Handle<JSValue> function_wrapper,
- Handle<Object> script_handle);
-
- MUST_USE_RESULT static MaybeObject* PatchFunctionPositions(
- Handle<JSArray> shared_info_array, Handle<JSArray> position_change_array);
-
- // For a script updates its source field. If old_script_name is provided
- // (i.e. is a String), also creates a copy of the script with its original
- // source and sends notification to debugger.
- static Object* ChangeScriptSource(Handle<Script> original_script,
- Handle<String> new_source,
- Handle<Object> old_script_name);
-
- // In a code of a parent function replaces original function as embedded
- // object with a substitution one.
- static void ReplaceRefToNestedFunction(Handle<JSValue> parent_function_shared,
- Handle<JSValue> orig_function_shared,
- Handle<JSValue> subst_function_shared);
-
- // Checks listed functions on stack and return array with corresponding
- // FunctionPatchabilityStatus statuses; extra array element may
- // contain general error message. Modifies the current stack and
- // has restart the lowest found frames and drops all other frames above
- // if possible and if do_drop is true.
- static Handle<JSArray> CheckAndDropActivations(
- Handle<JSArray> shared_info_array, bool do_drop);
-
- // A copy of this is in liveedit-debugger.js.
- enum FunctionPatchabilityStatus {
- FUNCTION_AVAILABLE_FOR_PATCH = 1,
- FUNCTION_BLOCKED_ON_ACTIVE_STACK = 2,
- FUNCTION_BLOCKED_ON_OTHER_STACK = 3,
- FUNCTION_BLOCKED_UNDER_NATIVE_CODE = 4,
- FUNCTION_REPLACED_ON_ACTIVE_STACK = 5
- };
-
- // Compares 2 strings line-by-line, then token-wise and returns diff in form
- // of array of triplets (pos1, pos1_end, pos2_end) describing list
- // of diff chunks.
- static Handle<JSArray> CompareStrings(Handle<String> s1,
- Handle<String> s2);
-};
-
-
-// A general-purpose comparator between 2 arrays.
-class Comparator {
- public:
-
- // Holds 2 arrays of some elements allowing to compare any pair of
- // element from the first array and element from the second array.
- class Input {
- public:
- virtual int getLength1() = 0;
- virtual int getLength2() = 0;
- virtual bool equals(int index1, int index2) = 0;
-
- protected:
- virtual ~Input() {}
- };
-
- // Receives compare result as a series of chunks.
- class Output {
- public:
- // Puts another chunk in result list. Note that technically speaking
- // only 3 arguments actually needed with 4th being derivable.
- virtual void AddChunk(int pos1, int pos2, int len1, int len2) = 0;
-
- protected:
- virtual ~Output() {}
- };
-
- // Finds the difference between 2 arrays of elements.
- static void CalculateDifference(Input* input,
- Output* result_writer);
-};
-
-#endif // ENABLE_DEBUGGER_SUPPORT
-
-
-} } // namespace v8::internal
-
-#endif /* V*_LIVEEDIT_H_ */
diff --git a/src/3rdparty/v8/src/liveobjectlist-inl.h b/src/3rdparty/v8/src/liveobjectlist-inl.h
deleted file mode 100644
index f742de3..0000000
--- a/src/3rdparty/v8/src/liveobjectlist-inl.h
+++ /dev/null
@@ -1,126 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_LIVEOBJECTLIST_INL_H_
-#define V8_LIVEOBJECTLIST_INL_H_
-
-#include "v8.h"
-
-#include "liveobjectlist.h"
-
-namespace v8 {
-namespace internal {
-
-#ifdef LIVE_OBJECT_LIST
-
-void LiveObjectList::GCEpilogue() {
- if (!NeedLOLProcessing()) return;
- GCEpiloguePrivate();
-}
-
-
-void LiveObjectList::GCPrologue() {
- if (!NeedLOLProcessing()) return;
-#ifdef VERIFY_LOL
- if (FLAG_verify_lol) {
- Verify();
- }
-#endif
-}
-
-
-void LiveObjectList::IterateElements(ObjectVisitor* v) {
- if (!NeedLOLProcessing()) return;
- IterateElementsPrivate(v);
-}
-
-
-void LiveObjectList::ProcessNonLive(HeapObject *obj) {
- // Only do work if we have at least one list to process.
- if (last()) DoProcessNonLive(obj);
-}
-
-
-void LiveObjectList::UpdateReferencesForScavengeGC() {
- if (LiveObjectList::NeedLOLProcessing()) {
- UpdateLiveObjectListVisitor update_visitor;
- LiveObjectList::IterateElements(&update_visitor);
- }
-}
-
-
-LiveObjectList* LiveObjectList::FindLolForId(int id,
- LiveObjectList* start_lol) {
- if (id != 0) {
- LiveObjectList* lol = start_lol;
- while (lol != NULL) {
- if (lol->id() == id) {
- return lol;
- }
- lol = lol->prev_;
- }
- }
- return NULL;
-}
-
-
-// Iterates the elements in every lol and returns the one that matches the
-// specified key. If no matching element is found, then it returns NULL.
-template <typename T>
-inline LiveObjectList::Element*
-LiveObjectList::FindElementFor(T (*GetValue)(LiveObjectList::Element*), T key) {
- LiveObjectList *lol = last();
- while (lol != NULL) {
- Element* elements = lol->elements_;
- for (int i = 0; i < lol->obj_count_; i++) {
- Element* element = &elements[i];
- if (GetValue(element) == key) {
- return element;
- }
- }
- lol = lol->prev_;
- }
- return NULL;
-}
-
-
-inline int LiveObjectList::GetElementId(LiveObjectList::Element* element) {
- return element->id_;
-}
-
-
-inline HeapObject*
-LiveObjectList::GetElementObj(LiveObjectList::Element* element) {
- return element->obj_;
-}
-
-#endif // LIVE_OBJECT_LIST
-
-} } // namespace v8::internal
-
-#endif // V8_LIVEOBJECTLIST_INL_H_
-
diff --git a/src/3rdparty/v8/src/liveobjectlist.cc b/src/3rdparty/v8/src/liveobjectlist.cc
deleted file mode 100644
index 5795a6b..0000000
--- a/src/3rdparty/v8/src/liveobjectlist.cc
+++ /dev/null
@@ -1,2589 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifdef LIVE_OBJECT_LIST
-
-#include <ctype.h>
-#include <stdlib.h>
-
-#include "v8.h"
-
-#include "checks.h"
-#include "global-handles.h"
-#include "heap.h"
-#include "inspector.h"
-#include "list-inl.h"
-#include "liveobjectlist-inl.h"
-#include "string-stream.h"
-#include "top.h"
-#include "v8utils.h"
-
-namespace v8 {
-namespace internal {
-
-
-typedef int (*RawComparer)(const void*, const void*);
-
-
-#ifdef CHECK_ALL_OBJECT_TYPES
-
-#define DEBUG_LIVE_OBJECT_TYPES(v) \
- v(Smi, "unexpected: Smi") \
- \
- v(CodeCache, "unexpected: CodeCache") \
- v(BreakPointInfo, "unexpected: BreakPointInfo") \
- v(DebugInfo, "unexpected: DebugInfo") \
- v(TypeSwitchInfo, "unexpected: TypeSwitchInfo") \
- v(SignatureInfo, "unexpected: SignatureInfo") \
- v(Script, "unexpected: Script") \
- v(ObjectTemplateInfo, "unexpected: ObjectTemplateInfo") \
- v(FunctionTemplateInfo, "unexpected: FunctionTemplateInfo") \
- v(CallHandlerInfo, "unexpected: CallHandlerInfo") \
- v(InterceptorInfo, "unexpected: InterceptorInfo") \
- v(AccessCheckInfo, "unexpected: AccessCheckInfo") \
- v(AccessorInfo, "unexpected: AccessorInfo") \
- v(ExternalTwoByteString, "unexpected: ExternalTwoByteString") \
- v(ExternalAsciiString, "unexpected: ExternalAsciiString") \
- v(ExternalString, "unexpected: ExternalString") \
- v(SeqTwoByteString, "unexpected: SeqTwoByteString") \
- v(SeqAsciiString, "unexpected: SeqAsciiString") \
- v(SeqString, "unexpected: SeqString") \
- v(JSFunctionResultCache, "unexpected: JSFunctionResultCache") \
- v(GlobalContext, "unexpected: GlobalContext") \
- v(MapCache, "unexpected: MapCache") \
- v(CodeCacheHashTable, "unexpected: CodeCacheHashTable") \
- v(CompilationCacheTable, "unexpected: CompilationCacheTable") \
- v(SymbolTable, "unexpected: SymbolTable") \
- v(Dictionary, "unexpected: Dictionary") \
- v(HashTable, "unexpected: HashTable") \
- v(DescriptorArray, "unexpected: DescriptorArray") \
- v(ExternalFloatArray, "unexpected: ExternalFloatArray") \
- v(ExternalUnsignedIntArray, "unexpected: ExternalUnsignedIntArray") \
- v(ExternalIntArray, "unexpected: ExternalIntArray") \
- v(ExternalUnsignedShortArray, "unexpected: ExternalUnsignedShortArray") \
- v(ExternalShortArray, "unexpected: ExternalShortArray") \
- v(ExternalUnsignedByteArray, "unexpected: ExternalUnsignedByteArray") \
- v(ExternalByteArray, "unexpected: ExternalByteArray") \
- v(JSValue, "unexpected: JSValue")
-
-#else
-#define DEBUG_LIVE_OBJECT_TYPES(v)
-#endif
-
-
-#define FOR_EACH_LIVE_OBJECT_TYPE(v) \
- DEBUG_LIVE_OBJECT_TYPES(v) \
- \
- v(JSArray, "JSArray") \
- v(JSRegExp, "JSRegExp") \
- v(JSFunction, "JSFunction") \
- v(JSGlobalObject, "JSGlobal") \
- v(JSBuiltinsObject, "JSBuiltins") \
- v(GlobalObject, "Global") \
- v(JSGlobalProxy, "JSGlobalProxy") \
- v(JSObject, "JSObject") \
- \
- v(Context, "meta: Context") \
- v(ByteArray, "meta: ByteArray") \
- v(PixelArray, "meta: PixelArray") \
- v(ExternalArray, "meta: ExternalArray") \
- v(FixedArray, "meta: FixedArray") \
- v(String, "String") \
- v(HeapNumber, "HeapNumber") \
- \
- v(Code, "meta: Code") \
- v(Map, "meta: Map") \
- v(Oddball, "Oddball") \
- v(Proxy, "meta: Proxy") \
- v(SharedFunctionInfo, "meta: SharedFunctionInfo") \
- v(Struct, "meta: Struct") \
- \
- v(HeapObject, "HeapObject")
-
-
-enum /* LiveObjectType */ {
-#define DECLARE_OBJECT_TYPE_ENUM(type, name) kType##type,
- FOR_EACH_LIVE_OBJECT_TYPE(DECLARE_OBJECT_TYPE_ENUM)
- kInvalidLiveObjType,
- kNumberOfTypes
-#undef DECLARE_OBJECT_TYPE_ENUM
-};
-
-
-LiveObjectType GetObjectType(HeapObject* heap_obj) {
- // TODO(mlam): investigate usint Map::instance_type() instead.
-#define CHECK_FOR_OBJECT_TYPE(type, name) \
- if (heap_obj->Is##type()) return kType##type;
- FOR_EACH_LIVE_OBJECT_TYPE(CHECK_FOR_OBJECT_TYPE)
-#undef CHECK_FOR_OBJECT_TYPE
-
- UNREACHABLE();
- return kInvalidLiveObjType;
-}
-
-
-inline const char* GetObjectTypeDesc(LiveObjectType type) {
- static const char* const name[kNumberOfTypes] = {
- #define DEFINE_OBJECT_TYPE_NAME(type, name) name,
- FOR_EACH_LIVE_OBJECT_TYPE(DEFINE_OBJECT_TYPE_NAME)
- "invalid"
- #undef DEFINE_OBJECT_TYPE_NAME
- };
- ASSERT(type < kNumberOfTypes);
- return name[type];
-}
-
-
-const char* GetObjectTypeDesc(HeapObject* heap_obj) {
- LiveObjectType type = GetObjectType(heap_obj);
- return GetObjectTypeDesc(type);
-}
-
-
-bool IsOfType(LiveObjectType type, HeapObject *obj) {
- // Note: there are types that are more general (e.g. JSObject) that would
- // have passed the Is##type_() test for more specialized types (e.g.
- // JSFunction). If we find a more specialized match but we're looking for
- // the general type, then we should reject the ones that matches the
- // specialized type.
-#define CHECK_OBJECT_TYPE(type_, name) \
- if (obj->Is##type_()) return (type == kType##type_);
-
- FOR_EACH_LIVE_OBJECT_TYPE(CHECK_OBJECT_TYPE)
-#undef CHECK_OBJECT_TYPE
-
- return false;
-}
-
-
-const AllocationSpace kInvalidSpace = static_cast<AllocationSpace>(-1);
-
-static AllocationSpace FindSpaceFor(String* space_str) {
- SmartPointer<char> s =
- space_str->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
-
- const char* key_str = *s;
- switch (key_str[0]) {
- case 'c':
- if (strcmp(key_str, "cell") == 0) return CELL_SPACE;
- if (strcmp(key_str, "code") == 0) return CODE_SPACE;
- break;
- case 'l':
- if (strcmp(key_str, "lo") == 0) return LO_SPACE;
- break;
- case 'm':
- if (strcmp(key_str, "map") == 0) return MAP_SPACE;
- break;
- case 'n':
- if (strcmp(key_str, "new") == 0) return NEW_SPACE;
- break;
- case 'o':
- if (strcmp(key_str, "old-pointer") == 0) return OLD_POINTER_SPACE;
- if (strcmp(key_str, "old-data") == 0) return OLD_DATA_SPACE;
- break;
- }
- return kInvalidSpace;
-}
-
-
-static bool InSpace(AllocationSpace space, HeapObject *heap_obj) {
- if (space != LO_SPACE) {
- return Heap::InSpace(heap_obj, space);
- }
-
- // This is an optimization to speed up the check for an object in the LO
- // space by exclusion because we know that all object pointers passed in
- // here are guaranteed to be in the heap. Hence, it is safe to infer
- // using an exclusion test.
- // Note: calling Heap::InSpace(heap_obj, LO_SPACE) is too slow for our
- // filters.
- int first_space = static_cast<int>(FIRST_SPACE);
- int last_space = static_cast<int>(LO_SPACE);
- for (int sp = first_space; sp < last_space; sp++) {
- if (Heap::InSpace(heap_obj, static_cast<AllocationSpace>(sp))) {
- return false;
- }
- }
- SLOW_ASSERT(Heap::InSpace(heap_obj, LO_SPACE));
- return true;
-}
-
-
-static LiveObjectType FindTypeFor(String* type_str) {
- SmartPointer<char> s =
- type_str->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
-
-#define CHECK_OBJECT_TYPE(type_, name) { \
- const char* type_desc = GetObjectTypeDesc(kType##type_); \
- const char* key_str = *s; \
- if (strstr(type_desc, key_str) != NULL) return kType##type_; \
- }
- FOR_EACH_LIVE_OBJECT_TYPE(CHECK_OBJECT_TYPE)
-#undef CHECK_OBJECT_TYPE
-
- return kInvalidLiveObjType;
-}
-
-
-class LolFilter {
- public:
- explicit LolFilter(Handle<JSObject> filter_obj);
-
- inline bool is_active() const { return is_active_; }
- inline bool Matches(HeapObject* obj) {
- return !is_active() || MatchesSlow(obj);
- }
-
- private:
- void InitTypeFilter(Handle<JSObject> filter_obj);
- void InitSpaceFilter(Handle<JSObject> filter_obj);
- void InitPropertyFilter(Handle<JSObject> filter_obj);
- bool MatchesSlow(HeapObject* obj);
-
- bool is_active_;
- LiveObjectType type_;
- AllocationSpace space_;
- Handle<String> prop_;
-};
-
-
-LolFilter::LolFilter(Handle<JSObject> filter_obj)
- : is_active_(false),
- type_(kInvalidLiveObjType),
- space_(kInvalidSpace),
- prop_() {
- if (filter_obj.is_null()) return;
-
- InitTypeFilter(filter_obj);
- InitSpaceFilter(filter_obj);
- InitPropertyFilter(filter_obj);
-}
-
-
-void LolFilter::InitTypeFilter(Handle<JSObject> filter_obj) {
- Handle<String> type_sym = Factory::LookupAsciiSymbol("type");
- MaybeObject* maybe_result = filter_obj->GetProperty(*type_sym);
- Object* type_obj;
- if (maybe_result->ToObject(&type_obj)) {
- if (type_obj->IsString()) {
- String* type_str = String::cast(type_obj);
- type_ = FindTypeFor(type_str);
- if (type_ != kInvalidLiveObjType) {
- is_active_ = true;
- }
- }
- }
-}
-
-
-void LolFilter::InitSpaceFilter(Handle<JSObject> filter_obj) {
- Handle<String> space_sym = Factory::LookupAsciiSymbol("space");
- MaybeObject* maybe_result = filter_obj->GetProperty(*space_sym);
- Object* space_obj;
- if (maybe_result->ToObject(&space_obj)) {
- if (space_obj->IsString()) {
- String* space_str = String::cast(space_obj);
- space_ = FindSpaceFor(space_str);
- if (space_ != kInvalidSpace) {
- is_active_ = true;
- }
- }
- }
-}
-
-
-void LolFilter::InitPropertyFilter(Handle<JSObject> filter_obj) {
- Handle<String> prop_sym = Factory::LookupAsciiSymbol("prop");
- MaybeObject* maybe_result = filter_obj->GetProperty(*prop_sym);
- Object* prop_obj;
- if (maybe_result->ToObject(&prop_obj)) {
- if (prop_obj->IsString()) {
- prop_ = Handle<String>(String::cast(prop_obj));
- is_active_ = true;
- }
- }
-}
-
-
-bool LolFilter::MatchesSlow(HeapObject* obj) {
- if ((type_ != kInvalidLiveObjType) && !IsOfType(type_, obj)) {
- return false; // Fail because obj is not of the type of interest.
- }
- if ((space_ != kInvalidSpace) && !InSpace(space_, obj)) {
- return false; // Fail because obj is not in the space of interest.
- }
- if (!prop_.is_null() && obj->IsJSObject()) {
- LookupResult result;
- obj->Lookup(*prop_, &result);
- if (!result.IsProperty()) {
- return false; // Fail because obj does not have the property of interest.
- }
- }
- return true;
-}
-
-
-class LolIterator {
- public:
- LolIterator(LiveObjectList* older, LiveObjectList* newer)
- : older_(older),
- newer_(newer),
- curr_(0),
- elements_(0),
- count_(0),
- index_(0) { }
-
- inline void Init() {
- SetCurrent(newer_);
- // If the elements_ list is empty, then move on to the next list as long
- // as we're not at the last list (indicated by done()).
- while ((elements_ == NULL) && !Done()) {
- SetCurrent(curr_->prev_);
- }
- }
-
- inline bool Done() const {
- return (curr_ == older_);
- }
-
- // Object level iteration.
- inline void Next() {
- index_++;
- if (index_ >= count_) {
- // Iterate backwards until we get to the oldest list.
- while (!Done()) {
- SetCurrent(curr_->prev_);
- // If we have elements to process, we're good to go.
- if (elements_ != NULL) break;
-
- // Else, we should advance to the next older list.
- }
- }
- }
-
- inline int Id() const {
- return elements_[index_].id_;
- }
- inline HeapObject* Obj() const {
- return elements_[index_].obj_;
- }
-
- inline int LolObjCount() const {
- if (curr_ != NULL) return curr_->obj_count_;
- return 0;
- }
-
- protected:
- inline void SetCurrent(LiveObjectList* new_curr) {
- curr_ = new_curr;
- if (curr_ != NULL) {
- elements_ = curr_->elements_;
- count_ = curr_->obj_count_;
- index_ = 0;
- }
- }
-
- LiveObjectList* older_;
- LiveObjectList* newer_;
- LiveObjectList* curr_;
- LiveObjectList::Element* elements_;
- int count_;
- int index_;
-};
-
-
-class LolForwardIterator : public LolIterator {
- public:
- LolForwardIterator(LiveObjectList* first, LiveObjectList* last)
- : LolIterator(first, last) {
- }
-
- inline void Init() {
- SetCurrent(older_);
- // If the elements_ list is empty, then move on to the next list as long
- // as we're not at the last list (indicated by Done()).
- while ((elements_ == NULL) && !Done()) {
- SetCurrent(curr_->next_);
- }
- }
-
- inline bool Done() const {
- return (curr_ == newer_);
- }
-
- // Object level iteration.
- inline void Next() {
- index_++;
- if (index_ >= count_) {
- // Done with current list. Move on to the next.
- while (!Done()) { // If not at the last list already, ...
- SetCurrent(curr_->next_);
- // If we have elements to process, we're good to go.
- if (elements_ != NULL) break;
-
- // Else, we should advance to the next list.
- }
- }
- }
-};
-
-
-// Minimizes the white space in a string. Tabs and newlines are replaced
-// with a space where appropriate.
-static int CompactString(char* str) {
- char* src = str;
- char* dst = str;
- char prev_ch = 0;
- while (*dst != '\0') {
- char ch = *src++;
- // We will treat non-ascii chars as '?'.
- if ((ch & 0x80) != 0) {
- ch = '?';
- }
- // Compact contiguous whitespace chars into a single ' '.
- if (isspace(ch)) {
- if (prev_ch != ' ') *dst++ = ' ';
- prev_ch = ' ';
- continue;
- }
- *dst++ = ch;
- prev_ch = ch;
- }
- return (dst - str);
-}
-
-
-// Generates a custom description based on the specific type of
-// object we're looking at. We only generate specialized
-// descriptions where we can. In all other cases, we emit the
-// generic info.
-static void GenerateObjectDesc(HeapObject* obj,
- char* buffer,
- int buffer_size) {
- Vector<char> buffer_v(buffer, buffer_size);
- ASSERT(obj != NULL);
- if (obj->IsJSArray()) {
- JSArray* jsarray = JSArray::cast(obj);
- double length = jsarray->length()->Number();
- OS::SNPrintF(buffer_v,
- "%p <%s> len %g",
- reinterpret_cast<void*>(obj),
- GetObjectTypeDesc(obj),
- length);
-
- } else if (obj->IsString()) {
- String *str = String::cast(obj);
- // Only grab up to 160 chars in case they are double byte.
- // We'll only dump 80 of them after we compact them.
- const int kMaxCharToDump = 80;
- const int kMaxBufferSize = kMaxCharToDump * 2;
- SmartPointer<char> str_sp = str->ToCString(DISALLOW_NULLS,
- ROBUST_STRING_TRAVERSAL,
- 0,
- kMaxBufferSize);
- char* str_cstr = *str_sp;
- int length = CompactString(str_cstr);
- OS::SNPrintF(buffer_v,
- "%p <%s> '%.80s%s'",
- reinterpret_cast<void*>(obj),
- GetObjectTypeDesc(obj),
- str_cstr,
- (length > kMaxCharToDump) ? "..." : "");
-
- } else if (obj->IsJSFunction() || obj->IsSharedFunctionInfo()) {
- SharedFunctionInfo* sinfo;
- if (obj->IsJSFunction()) {
- JSFunction* func = JSFunction::cast(obj);
- sinfo = func->shared();
- } else {
- sinfo = SharedFunctionInfo::cast(obj);
- }
-
- String* name = sinfo->DebugName();
- SmartPointer<char> name_sp =
- name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- char* name_cstr = *name_sp;
-
- HeapStringAllocator string_allocator;
- StringStream stream(&string_allocator);
- sinfo->SourceCodePrint(&stream, 50);
- SmartPointer<const char> source_sp = stream.ToCString();
- const char* source_cstr = *source_sp;
-
- OS::SNPrintF(buffer_v,
- "%p <%s> '%s' %s",
- reinterpret_cast<void*>(obj),
- GetObjectTypeDesc(obj),
- name_cstr,
- source_cstr);
-
- } else if (obj->IsFixedArray()) {
- FixedArray* fixed = FixedArray::cast(obj);
-
- OS::SNPrintF(buffer_v,
- "%p <%s> len %d",
- reinterpret_cast<void*>(obj),
- GetObjectTypeDesc(obj),
- fixed->length());
-
- } else {
- OS::SNPrintF(buffer_v,
- "%p <%s>",
- reinterpret_cast<void*>(obj),
- GetObjectTypeDesc(obj));
- }
-}
-
-
-// Utility function for filling in a line of detail in a verbose dump.
-static bool AddObjDetail(Handle<FixedArray> arr,
- int index,
- int obj_id,
- Handle<HeapObject> target,
- const char* desc_str,
- Handle<String> id_sym,
- Handle<String> desc_sym,
- Handle<String> size_sym,
- Handle<JSObject> detail,
- Handle<String> desc,
- Handle<Object> error) {
- detail = Factory::NewJSObject(Top::object_function());
- if (detail->IsFailure()) {
- error = detail;
- return false;
- }
-
- int size = 0;
- char buffer[512];
- if (desc_str == NULL) {
- ASSERT(!target.is_null());
- HeapObject* obj = *target;
- GenerateObjectDesc(obj, buffer, sizeof(buffer));
- desc_str = buffer;
- size = obj->Size();
- }
- desc = Factory::NewStringFromAscii(CStrVector(desc_str));
- if (desc->IsFailure()) {
- error = desc;
- return false;
- }
-
- { MaybeObject* maybe_result = detail->SetProperty(*id_sym,
- Smi::FromInt(obj_id),
- NONE,
- kNonStrictMode);
- if (maybe_result->IsFailure()) return false;
- }
- { MaybeObject* maybe_result = detail->SetProperty(*desc_sym,
- *desc,
- NONE,
- kNonStrictMode);
- if (maybe_result->IsFailure()) return false;
- }
- { MaybeObject* maybe_result = detail->SetProperty(*size_sym,
- Smi::FromInt(size),
- NONE,
- kNonStrictMode);
- if (maybe_result->IsFailure()) return false;
- }
-
- arr->set(index, *detail);
- return true;
-}
-
-
-class DumpWriter {
- public:
- virtual ~DumpWriter() {}
-
- virtual void ComputeTotalCountAndSize(LolFilter* filter,
- int* count,
- int* size) = 0;
- virtual bool Write(Handle<FixedArray> elements_arr,
- int start,
- int dump_limit,
- LolFilter* filter,
- Handle<Object> error) = 0;
-};
-
-
-class LolDumpWriter: public DumpWriter {
- public:
- LolDumpWriter(LiveObjectList* older, LiveObjectList* newer)
- : older_(older), newer_(newer) {
- }
-
- void ComputeTotalCountAndSize(LolFilter* filter, int* count, int* size) {
- *count = 0;
- *size = 0;
-
- LolIterator it(older_, newer_);
- for (it.Init(); !it.Done(); it.Next()) {
- HeapObject* heap_obj = it.Obj();
- if (!filter->Matches(heap_obj)) {
- continue;
- }
-
- *size += heap_obj->Size();
- (*count)++;
- }
- }
-
- bool Write(Handle<FixedArray> elements_arr,
- int start,
- int dump_limit,
- LolFilter* filter,
- Handle<Object> error) {
- // The lols are listed in latest to earliest. We want to dump from
- // earliest to latest. So, compute the last element to start with.
- int index = 0;
- int count = 0;
-
- // Prefetch some needed symbols.
- Handle<String> id_sym = Factory::LookupAsciiSymbol("id");
- Handle<String> desc_sym = Factory::LookupAsciiSymbol("desc");
- Handle<String> size_sym = Factory::LookupAsciiSymbol("size");
-
- // Fill the array with the lol object details.
- Handle<JSObject> detail;
- Handle<String> desc;
- Handle<HeapObject> target;
-
- LiveObjectList* first_lol = (older_ != NULL) ?
- older_->next_ : LiveObjectList::first_;
- LiveObjectList* last_lol = (newer_ != NULL) ? newer_->next_ : NULL;
-
- LolForwardIterator it(first_lol, last_lol);
- for (it.Init(); !it.Done() && (index < dump_limit); it.Next()) {
- HeapObject* heap_obj = it.Obj();
-
- // Skip objects that have been filtered out.
- if (!filter->Matches(heap_obj)) {
- continue;
- }
-
- // Only report objects that are in the section of interest.
- if (count >= start) {
- target = Handle<HeapObject>(heap_obj);
- bool success = AddObjDetail(elements_arr,
- index++,
- it.Id(),
- target,
- NULL,
- id_sym,
- desc_sym,
- size_sym,
- detail,
- desc,
- error);
- if (!success) return false;
- }
- count++;
- }
- return true;
- }
-
- private:
- LiveObjectList* older_;
- LiveObjectList* newer_;
-};
-
-
-class RetainersDumpWriter: public DumpWriter {
- public:
- RetainersDumpWriter(Handle<HeapObject> target,
- Handle<JSObject> instance_filter,
- Handle<JSFunction> args_function)
- : target_(target),
- instance_filter_(instance_filter),
- args_function_(args_function) {
- }
-
- void ComputeTotalCountAndSize(LolFilter* filter, int* count, int* size) {
- Handle<FixedArray> retainers_arr;
- Handle<Object> error;
-
- *size = -1;
- LiveObjectList::GetRetainers(target_,
- instance_filter_,
- retainers_arr,
- 0,
- Smi::kMaxValue,
- count,
- filter,
- NULL,
- *args_function_,
- error);
- }
-
- bool Write(Handle<FixedArray> elements_arr,
- int start,
- int dump_limit,
- LolFilter* filter,
- Handle<Object> error) {
- int dummy;
- int count;
-
- // Fill the retainer objects.
- count = LiveObjectList::GetRetainers(target_,
- instance_filter_,
- elements_arr,
- start,
- dump_limit,
- &dummy,
- filter,
- NULL,
- *args_function_,
- error);
- if (count < 0) {
- return false;
- }
- return true;
- }
-
- private:
- Handle<HeapObject> target_;
- Handle<JSObject> instance_filter_;
- Handle<JSFunction> args_function_;
-};
-
-
-class LiveObjectSummary {
- public:
- explicit LiveObjectSummary(LolFilter* filter)
- : total_count_(0),
- total_size_(0),
- found_root_(false),
- found_weak_root_(false),
- filter_(filter) {
- memset(counts_, 0, sizeof(counts_[0]) * kNumberOfEntries);
- memset(sizes_, 0, sizeof(sizes_[0]) * kNumberOfEntries);
- }
-
- void Add(HeapObject* heap_obj) {
- int size = heap_obj->Size();
- LiveObjectType type = GetObjectType(heap_obj);
- ASSERT(type != kInvalidLiveObjType);
- counts_[type]++;
- sizes_[type] += size;
- total_count_++;
- total_size_ += size;
- }
-
- void set_found_root() { found_root_ = true; }
- void set_found_weak_root() { found_weak_root_ = true; }
-
- inline int Count(LiveObjectType type) {
- return counts_[type];
- }
- inline int Size(LiveObjectType type) {
- return sizes_[type];
- }
- inline int total_count() {
- return total_count_;
- }
- inline int total_size() {
- return total_size_;
- }
- inline bool found_root() {
- return found_root_;
- }
- inline bool found_weak_root() {
- return found_weak_root_;
- }
- int GetNumberOfEntries() {
- int entries = 0;
- for (int i = 0; i < kNumberOfEntries; i++) {
- if (counts_[i]) entries++;
- }
- return entries;
- }
-
- inline LolFilter* filter() { return filter_; }
-
- static const int kNumberOfEntries = kNumberOfTypes;
-
- private:
- int counts_[kNumberOfEntries];
- int sizes_[kNumberOfEntries];
- int total_count_;
- int total_size_;
- bool found_root_;
- bool found_weak_root_;
-
- LolFilter *filter_;
-};
-
-
-// Abstraction for a summary writer.
-class SummaryWriter {
- public:
- virtual ~SummaryWriter() {}
- virtual void Write(LiveObjectSummary* summary) = 0;
-};
-
-
-// A summary writer for filling in a summary of lol lists and diffs.
-class LolSummaryWriter: public SummaryWriter {
- public:
- LolSummaryWriter(LiveObjectList *older_lol,
- LiveObjectList *newer_lol)
- : older_(older_lol), newer_(newer_lol) {
- }
-
- void Write(LiveObjectSummary* summary) {
- LolFilter* filter = summary->filter();
-
- // Fill the summary with the lol object details.
- LolIterator it(older_, newer_);
- for (it.Init(); !it.Done(); it.Next()) {
- HeapObject* heap_obj = it.Obj();
- if (!filter->Matches(heap_obj)) {
- continue;
- }
- summary->Add(heap_obj);
- }
- }
-
- private:
- LiveObjectList* older_;
- LiveObjectList* newer_;
-};
-
-
-// A summary writer for filling in a retainers list.
-class RetainersSummaryWriter: public SummaryWriter {
- public:
- RetainersSummaryWriter(Handle<HeapObject> target,
- Handle<JSObject> instance_filter,
- Handle<JSFunction> args_function)
- : target_(target),
- instance_filter_(instance_filter),
- args_function_(args_function) {
- }
-
- void Write(LiveObjectSummary* summary) {
- Handle<FixedArray> retainers_arr;
- Handle<Object> error;
- int dummy_total_count;
- LiveObjectList::GetRetainers(target_,
- instance_filter_,
- retainers_arr,
- 0,
- Smi::kMaxValue,
- &dummy_total_count,
- summary->filter(),
- summary,
- *args_function_,
- error);
- }
-
- private:
- Handle<HeapObject> target_;
- Handle<JSObject> instance_filter_;
- Handle<JSFunction> args_function_;
-};
-
-
-uint32_t LiveObjectList::next_element_id_ = 1;
-int LiveObjectList::list_count_ = 0;
-int LiveObjectList::last_id_ = 0;
-LiveObjectList* LiveObjectList::first_ = NULL;
-LiveObjectList* LiveObjectList::last_ = NULL;
-
-
-LiveObjectList::LiveObjectList(LiveObjectList* prev, int capacity)
- : prev_(prev),
- next_(NULL),
- capacity_(capacity),
- obj_count_(0) {
- elements_ = NewArray<Element>(capacity);
- id_ = ++last_id_;
-
- list_count_++;
-}
-
-
-LiveObjectList::~LiveObjectList() {
- DeleteArray<Element>(elements_);
- delete prev_;
-}
-
-
-int LiveObjectList::GetTotalObjCountAndSize(int* size_p) {
- int size = 0;
- int count = 0;
- LiveObjectList *lol = this;
- do {
- // Only compute total size if requested i.e. when size_p is not null.
- if (size_p != NULL) {
- Element* elements = lol->elements_;
- for (int i = 0; i < lol->obj_count_; i++) {
- HeapObject* heap_obj = elements[i].obj_;
- size += heap_obj->Size();
- }
- }
- count += lol->obj_count_;
- lol = lol->prev_;
- } while (lol != NULL);
-
- if (size_p != NULL) {
- *size_p = size;
- }
- return count;
-}
-
-
-// Adds an object to the lol.
-// Returns true if successful, else returns false.
-bool LiveObjectList::Add(HeapObject* obj) {
- // If the object is already accounted for in the prev list which we inherit
- // from, then no need to add it to this list.
- if ((prev() != NULL) && (prev()->Find(obj) != NULL)) {
- return true;
- }
- ASSERT(obj_count_ <= capacity_);
- if (obj_count_ == capacity_) {
- // The heap must have grown and we have more objects than capacity to store
- // them.
- return false; // Fail this addition.
- }
- Element& element = elements_[obj_count_++];
- element.id_ = next_element_id_++;
- element.obj_ = obj;
- return true;
-}
-
-
-// Comparator used for sorting and searching the lol.
-int LiveObjectList::CompareElement(const Element* a, const Element* b) {
- const HeapObject* obj1 = a->obj_;
- const HeapObject* obj2 = b->obj_;
- // For lol elements, it doesn't matter which comes first if 2 elements point
- // to the same object (which gets culled later). Hence, we only care about
- // the the greater than / less than relationships.
- return (obj1 > obj2) ? 1 : (obj1 == obj2) ? 0 : -1;
-}
-
-
-// Looks for the specified object in the lol, and returns its element if found.
-LiveObjectList::Element* LiveObjectList::Find(HeapObject* obj) {
- LiveObjectList* lol = this;
- Element key;
- Element* result = NULL;
-
- key.obj_ = obj;
- // Iterate through the chain of lol's to look for the object.
- while ((result == NULL) && (lol != NULL)) {
- result = reinterpret_cast<Element*>(
- bsearch(&key, lol->elements_, lol->obj_count_,
- sizeof(Element),
- reinterpret_cast<RawComparer>(CompareElement)));
- lol = lol->prev_;
- }
- return result;
-}
-
-
-// "Nullifies" (convert the HeapObject* into an SMI) so that it will get cleaned
-// up in the GCEpilogue, while preserving the sort order of the lol.
-// NOTE: the lols need to be already sorted before NullifyMostRecent() is
-// called.
-void LiveObjectList::NullifyMostRecent(HeapObject* obj) {
- LiveObjectList* lol = last();
- Element key;
- Element* result = NULL;
-
- key.obj_ = obj;
- // Iterate through the chain of lol's to look for the object.
- while (lol != NULL) {
- result = reinterpret_cast<Element*>(
- bsearch(&key, lol->elements_, lol->obj_count_,
- sizeof(Element),
- reinterpret_cast<RawComparer>(CompareElement)));
- if (result != NULL) {
- // Since there may be more than one (we are nullifying dup's after all),
- // find the first in the current lol, and nullify that. The lol should
- // be sorted already to make this easy (see the use of SortAll()).
- int i = result - lol->elements_;
-
- // NOTE: we sort the lol in increasing order. So, if an object has been
- // "nullified" (its lowest bit will be cleared to make it look like an
- // SMI), it would/should show up before the equivalent dups that have not
- // yet been "nullified". Hence, we should be searching backwards for the
- // first occurence of a matching object and nullify that instance. This
- // will ensure that we preserve the expected sorting order.
- for (i--; i > 0; i--) {
- Element* element = &lol->elements_[i];
- HeapObject* curr_obj = element->obj_;
- if (curr_obj != obj) {
- break; // No more matches. Let's move on.
- }
- result = element; // Let this earlier match be the result.
- }
-
- // Nullify the object.
- NullifyNonLivePointer(&result->obj_);
- return;
- }
- lol = lol->prev_;
- }
-}
-
-
-// Sorts the lol.
-void LiveObjectList::Sort() {
- if (obj_count_ > 0) {
- Vector<Element> elements_v(elements_, obj_count_);
- elements_v.Sort(CompareElement);
- }
-}
-
-
-// Sorts all captured lols starting from the latest.
-void LiveObjectList::SortAll() {
- LiveObjectList* lol = last();
- while (lol != NULL) {
- lol->Sort();
- lol = lol->prev_;
- }
-}
-
-
-// Counts the number of objects in the heap.
-static int CountHeapObjects() {
- int count = 0;
- // Iterate over all the heap spaces and count the number of objects.
- HeapIterator iterator(HeapIterator::kFilterFreeListNodes);
- HeapObject* heap_obj = NULL;
- while ((heap_obj = iterator.next()) != NULL) {
- count++;
- }
- return count;
-}
-
-
-// Captures a current snapshot of all objects in the heap.
-MaybeObject* LiveObjectList::Capture() {
- HandleScope scope;
-
- // Count the number of objects in the heap.
- int total_count = CountHeapObjects();
- int count = total_count;
- int size = 0;
-
- LiveObjectList* last_lol = last();
- if (last_lol != NULL) {
- count -= last_lol->TotalObjCount();
- }
-
- LiveObjectList* lol;
-
- // Create a lol large enough to track all the objects.
- lol = new LiveObjectList(last_lol, count);
- if (lol == NULL) {
- return NULL; // No memory to proceed.
- }
-
- // The HeapIterator needs to be in its own scope because it disables
- // allocation, and we need allocate below.
- {
- // Iterate over all the heap spaces and add the objects.
- HeapIterator iterator(HeapIterator::kFilterFreeListNodes);
- HeapObject* heap_obj = NULL;
- bool failed = false;
- while (!failed && (heap_obj = iterator.next()) != NULL) {
- failed = !lol->Add(heap_obj);
- size += heap_obj->Size();
- }
- ASSERT(!failed);
-
- lol->Sort();
-
- // Add the current lol to the list of lols.
- if (last_ != NULL) {
- last_->next_ = lol;
- } else {
- first_ = lol;
- }
- last_ = lol;
-
-#ifdef VERIFY_LOL
- if (FLAG_verify_lol) {
- Verify(true);
- }
-#endif
- }
-
- Handle<String> id_sym = Factory::LookupAsciiSymbol("id");
- Handle<String> count_sym = Factory::LookupAsciiSymbol("count");
- Handle<String> size_sym = Factory::LookupAsciiSymbol("size");
-
- Handle<JSObject> result = Factory::NewJSObject(Top::object_function());
- if (result->IsFailure()) return Object::cast(*result);
-
- { MaybeObject* maybe_result = result->SetProperty(*id_sym,
- Smi::FromInt(lol->id()),
- NONE,
- kNonStrictMode);
- if (maybe_result->IsFailure()) return maybe_result;
- }
- { MaybeObject* maybe_result = result->SetProperty(*count_sym,
- Smi::FromInt(total_count),
- NONE,
- kNonStrictMode);
- if (maybe_result->IsFailure()) return maybe_result;
- }
- { MaybeObject* maybe_result = result->SetProperty(*size_sym,
- Smi::FromInt(size),
- NONE,
- kNonStrictMode);
- if (maybe_result->IsFailure()) return maybe_result;
- }
-
- return *result;
-}
-
-
-// Delete doesn't actually deletes an lol. It just marks it as invisible since
-// its contents are considered to be part of subsequent lists as well. The
-// only time we'll actually delete the lol is when we Reset() or if the lol is
-// invisible, and its element count reaches 0.
-bool LiveObjectList::Delete(int id) {
- LiveObjectList *lol = last();
- while (lol != NULL) {
- if (lol->id() == id) {
- break;
- }
- lol = lol->prev_;
- }
-
- // If no lol is found for this id, then we fail to delete.
- if (lol == NULL) return false;
-
- // Else, mark the lol as invisible i.e. id == 0.
- lol->id_ = 0;
- list_count_--;
- ASSERT(list_count_ >= 0);
- if (lol->obj_count_ == 0) {
- // Point the next lol's prev to this lol's prev.
- LiveObjectList* next = lol->next_;
- LiveObjectList* prev = lol->prev_;
- // Point next's prev to prev.
- if (next != NULL) {
- next->prev_ = lol->prev_;
- } else {
- last_ = lol->prev_;
- }
- // Point prev's next to next.
- if (prev != NULL) {
- prev->next_ = lol->next_;
- } else {
- first_ = lol->next_;
- }
-
- lol->prev_ = NULL;
- lol->next_ = NULL;
-
- // Delete this now empty and invisible lol.
- delete lol;
- }
-
- // Just in case we've marked everything invisible, then clean up completely.
- if (list_count_ == 0) {
- Reset();
- }
-
- return true;
-}
-
-
-MaybeObject* LiveObjectList::Dump(int older_id,
- int newer_id,
- int start_idx,
- int dump_limit,
- Handle<JSObject> filter_obj) {
- if ((older_id < 0) || (newer_id < 0) || (last() == NULL)) {
- return Failure::Exception(); // Fail: 0 is not a valid lol id.
- }
- if (newer_id < older_id) {
- // They are not in the expected order. Swap them.
- int temp = older_id;
- older_id = newer_id;
- newer_id = temp;
- }
-
- LiveObjectList *newer_lol = FindLolForId(newer_id, last());
- LiveObjectList *older_lol = FindLolForId(older_id, newer_lol);
-
- // If the id is defined, and we can't find a LOL for it, then we have an
- // invalid id.
- if ((newer_id != 0) && (newer_lol == NULL)) {
- return Failure::Exception(); // Fail: the newer lol id is invalid.
- }
- if ((older_id != 0) && (older_lol == NULL)) {
- return Failure::Exception(); // Fail: the older lol id is invalid.
- }
-
- LolFilter filter(filter_obj);
- LolDumpWriter writer(older_lol, newer_lol);
- return DumpPrivate(&writer, start_idx, dump_limit, &filter);
-}
-
-
-MaybeObject* LiveObjectList::DumpPrivate(DumpWriter* writer,
- int start,
- int dump_limit,
- LolFilter* filter) {
- HandleScope scope;
-
- // Calculate the number of entries of the dump.
- int count = -1;
- int size = -1;
- writer->ComputeTotalCountAndSize(filter, &count, &size);
-
- // Adjust for where to start the dump.
- if ((start < 0) || (start >= count)) {
- return Failure::Exception(); // invalid start.
- }
-
- int remaining_count = count - start;
- if (dump_limit > remaining_count) {
- dump_limit = remaining_count;
- }
-
- // Allocate an array to hold the result.
- Handle<FixedArray> elements_arr = Factory::NewFixedArray(dump_limit);
- if (elements_arr->IsFailure()) return Object::cast(*elements_arr);
-
- // Fill in the dump.
- Handle<Object> error;
- bool success = writer->Write(elements_arr,
- start,
- dump_limit,
- filter,
- error);
- if (!success) return Object::cast(*error);
-
- MaybeObject* maybe_result;
-
- // Allocate the result body.
- Handle<JSObject> body = Factory::NewJSObject(Top::object_function());
- if (body->IsFailure()) return Object::cast(*body);
-
- // Set the updated body.count.
- Handle<String> count_sym = Factory::LookupAsciiSymbol("count");
- maybe_result = body->SetProperty(*count_sym,
- Smi::FromInt(count),
- NONE,
- kNonStrictMode);
- if (maybe_result->IsFailure()) return maybe_result;
-
- // Set the updated body.size if appropriate.
- if (size >= 0) {
- Handle<String> size_sym = Factory::LookupAsciiSymbol("size");
- maybe_result = body->SetProperty(*size_sym,
- Smi::FromInt(size),
- NONE,
- kNonStrictMode);
- if (maybe_result->IsFailure()) return maybe_result;
- }
-
- // Set body.first_index.
- Handle<String> first_sym = Factory::LookupAsciiSymbol("first_index");
- maybe_result = body->SetProperty(*first_sym,
- Smi::FromInt(start),
- NONE,
- kNonStrictMode);
- if (maybe_result->IsFailure()) return maybe_result;
-
- // Allocate the JSArray of the elements.
- Handle<JSObject> elements = Factory::NewJSObject(Top::array_function());
- if (elements->IsFailure()) return Object::cast(*elements);
- Handle<JSArray>::cast(elements)->SetContent(*elements_arr);
-
- // Set body.elements.
- Handle<String> elements_sym = Factory::LookupAsciiSymbol("elements");
- maybe_result = body->SetProperty(*elements_sym,
- *elements,
- NONE,
- kNonStrictMode);
- if (maybe_result->IsFailure()) return maybe_result;
-
- return *body;
-}
-
-
-MaybeObject* LiveObjectList::Summarize(int older_id,
- int newer_id,
- Handle<JSObject> filter_obj) {
- if ((older_id < 0) || (newer_id < 0) || (last() == NULL)) {
- return Failure::Exception(); // Fail: 0 is not a valid lol id.
- }
- if (newer_id < older_id) {
- // They are not in the expected order. Swap them.
- int temp = older_id;
- older_id = newer_id;
- newer_id = temp;
- }
-
- LiveObjectList *newer_lol = FindLolForId(newer_id, last());
- LiveObjectList *older_lol = FindLolForId(older_id, newer_lol);
-
- // If the id is defined, and we can't find a LOL for it, then we have an
- // invalid id.
- if ((newer_id != 0) && (newer_lol == NULL)) {
- return Failure::Exception(); // Fail: the newer lol id is invalid.
- }
- if ((older_id != 0) && (older_lol == NULL)) {
- return Failure::Exception(); // Fail: the older lol id is invalid.
- }
-
- LolFilter filter(filter_obj);
- LolSummaryWriter writer(older_lol, newer_lol);
- return SummarizePrivate(&writer, &filter, false);
-}
-
-
-// Creates a summary report for the debugger.
-// Note: the SummaryWriter takes care of iterating over objects and filling in
-// the summary.
-MaybeObject* LiveObjectList::SummarizePrivate(SummaryWriter* writer,
- LolFilter* filter,
- bool is_tracking_roots) {
- HandleScope scope;
- MaybeObject* maybe_result;
-
- LiveObjectSummary summary(filter);
- writer->Write(&summary);
-
- // The result body will look like this:
- // body: {
- // count: <total_count>,
- // size: <total_size>,
- // found_root: <boolean>, // optional.
- // found_weak_root: <boolean>, // optional.
- // summary: [
- // {
- // desc: "<object type name>",
- // count: <count>,
- // size: size
- // },
- // ...
- // ]
- // }
-
- // Prefetch some needed symbols.
- Handle<String> desc_sym = Factory::LookupAsciiSymbol("desc");
- Handle<String> count_sym = Factory::LookupAsciiSymbol("count");
- Handle<String> size_sym = Factory::LookupAsciiSymbol("size");
- Handle<String> summary_sym = Factory::LookupAsciiSymbol("summary");
-
- // Allocate the summary array.
- int entries_count = summary.GetNumberOfEntries();
- Handle<FixedArray> summary_arr =
- Factory::NewFixedArray(entries_count);
- if (summary_arr->IsFailure()) return Object::cast(*summary_arr);
-
- int idx = 0;
- for (int i = 0; i < LiveObjectSummary::kNumberOfEntries; i++) {
- // Allocate the summary record.
- Handle<JSObject> detail = Factory::NewJSObject(Top::object_function());
- if (detail->IsFailure()) return Object::cast(*detail);
-
- // Fill in the summary record.
- LiveObjectType type = static_cast<LiveObjectType>(i);
- int count = summary.Count(type);
- if (count) {
- const char* desc_cstr = GetObjectTypeDesc(type);
- Handle<String> desc = Factory::LookupAsciiSymbol(desc_cstr);
- int size = summary.Size(type);
-
- maybe_result = detail->SetProperty(*desc_sym,
- *desc,
- NONE,
- kNonStrictMode);
- if (maybe_result->IsFailure()) return maybe_result;
- maybe_result = detail->SetProperty(*count_sym,
- Smi::FromInt(count),
- NONE,
- kNonStrictMode);
- if (maybe_result->IsFailure()) return maybe_result;
- maybe_result = detail->SetProperty(*size_sym,
- Smi::FromInt(size),
- NONE,
- kNonStrictMode);
- if (maybe_result->IsFailure()) return maybe_result;
-
- summary_arr->set(idx++, *detail);
- }
- }
-
- // Wrap the summary fixed array in a JS array.
- Handle<JSObject> summary_obj = Factory::NewJSObject(Top::array_function());
- if (summary_obj->IsFailure()) return Object::cast(*summary_obj);
- Handle<JSArray>::cast(summary_obj)->SetContent(*summary_arr);
-
- // Create the body object.
- Handle<JSObject> body = Factory::NewJSObject(Top::object_function());
- if (body->IsFailure()) return Object::cast(*body);
-
- // Fill out the body object.
- int total_count = summary.total_count();
- int total_size = summary.total_size();
- maybe_result = body->SetProperty(*count_sym,
- Smi::FromInt(total_count),
- NONE,
- kNonStrictMode);
- if (maybe_result->IsFailure()) return maybe_result;
-
- maybe_result = body->SetProperty(*size_sym,
- Smi::FromInt(total_size),
- NONE,
- kNonStrictMode);
- if (maybe_result->IsFailure()) return maybe_result;
-
- if (is_tracking_roots) {
- int found_root = summary.found_root();
- int found_weak_root = summary.found_weak_root();
- Handle<String> root_sym = Factory::LookupAsciiSymbol("found_root");
- Handle<String> weak_root_sym =
- Factory::LookupAsciiSymbol("found_weak_root");
- maybe_result = body->SetProperty(*root_sym,
- Smi::FromInt(found_root),
- NONE,
- kNonStrictMode);
- if (maybe_result->IsFailure()) return maybe_result;
- maybe_result = body->SetProperty(*weak_root_sym,
- Smi::FromInt(found_weak_root),
- NONE,
- kNonStrictMode);
- if (maybe_result->IsFailure()) return maybe_result;
- }
-
- maybe_result = body->SetProperty(*summary_sym,
- *summary_obj,
- NONE,
- kNonStrictMode);
- if (maybe_result->IsFailure()) return maybe_result;
-
- return *body;
-}
-
-
-// Returns an array listing the captured lols.
-// Note: only dumps the section starting at start_idx and only up to
-// dump_limit entries.
-MaybeObject* LiveObjectList::Info(int start_idx, int dump_limit) {
- HandleScope scope;
- MaybeObject* maybe_result;
-
- int total_count = LiveObjectList::list_count();
- int dump_count = total_count;
-
- // Adjust for where to start the dump.
- if (total_count == 0) {
- start_idx = 0; // Ensure this to get an empty list.
- } else if ((start_idx < 0) || (start_idx >= total_count)) {
- return Failure::Exception(); // invalid start.
- }
- dump_count -= start_idx;
-
- // Adjust for the dump limit.
- if (dump_count > dump_limit) {
- dump_count = dump_limit;
- }
-
- // Allocate an array to hold the result.
- Handle<FixedArray> list = Factory::NewFixedArray(dump_count);
- if (list->IsFailure()) return Object::cast(*list);
-
- // Prefetch some needed symbols.
- Handle<String> id_sym = Factory::LookupAsciiSymbol("id");
- Handle<String> count_sym = Factory::LookupAsciiSymbol("count");
- Handle<String> size_sym = Factory::LookupAsciiSymbol("size");
-
- // Fill the array with the lol details.
- int idx = 0;
- LiveObjectList* lol = first_;
- while ((lol != NULL) && (idx < start_idx)) { // Skip tail entries.
- if (lol->id() != 0) {
- idx++;
- }
- lol = lol->next();
- }
- idx = 0;
- while ((lol != NULL) && (dump_limit != 0)) {
- if (lol->id() != 0) {
- int count;
- int size;
- count = lol->GetTotalObjCountAndSize(&size);
-
- Handle<JSObject> detail = Factory::NewJSObject(Top::object_function());
- if (detail->IsFailure()) return Object::cast(*detail);
-
- maybe_result = detail->SetProperty(*id_sym,
- Smi::FromInt(lol->id()),
- NONE,
- kNonStrictMode);
- if (maybe_result->IsFailure()) return maybe_result;
- maybe_result = detail->SetProperty(*count_sym,
- Smi::FromInt(count),
- NONE,
- kNonStrictMode);
- if (maybe_result->IsFailure()) return maybe_result;
- maybe_result = detail->SetProperty(*size_sym,
- Smi::FromInt(size),
- NONE,
- kNonStrictMode);
- if (maybe_result->IsFailure()) return maybe_result;
- list->set(idx++, *detail);
- dump_limit--;
- }
- lol = lol->next();
- }
-
- // Return the result as a JS array.
- Handle<JSObject> lols = Factory::NewJSObject(Top::array_function());
- Handle<JSArray>::cast(lols)->SetContent(*list);
-
- Handle<JSObject> result = Factory::NewJSObject(Top::object_function());
- if (result->IsFailure()) return Object::cast(*result);
-
- maybe_result = result->SetProperty(*count_sym,
- Smi::FromInt(total_count),
- NONE,
- kNonStrictMode);
- if (maybe_result->IsFailure()) return maybe_result;
-
- Handle<String> first_sym = Factory::LookupAsciiSymbol("first_index");
- maybe_result = result->SetProperty(*first_sym,
- Smi::FromInt(start_idx),
- NONE,
- kNonStrictMode);
- if (maybe_result->IsFailure()) return maybe_result;
-
- Handle<String> lists_sym = Factory::LookupAsciiSymbol("lists");
- maybe_result = result->SetProperty(*lists_sym,
- *lols,
- NONE,
- kNonStrictMode);
- if (maybe_result->IsFailure()) return maybe_result;
-
- return *result;
-}
-
-
-// Deletes all captured lols.
-void LiveObjectList::Reset() {
- LiveObjectList *lol = last();
- // Just delete the last. Each lol will delete it's prev automatically.
- delete lol;
-
- next_element_id_ = 1;
- list_count_ = 0;
- last_id_ = 0;
- first_ = NULL;
- last_ = NULL;
-}
-
-
-// Gets the object for the specified obj id.
-Object* LiveObjectList::GetObj(int obj_id) {
- Element* element = FindElementFor<int>(GetElementId, obj_id);
- if (element != NULL) {
- return Object::cast(element->obj_);
- }
- return Heap::undefined_value();
-}
-
-
-// Gets the obj id for the specified address if valid.
-int LiveObjectList::GetObjId(Object* obj) {
- // Make a heap object pointer from the address.
- HeapObject* hobj = HeapObject::cast(obj);
- Element* element = FindElementFor<HeapObject*>(GetElementObj, hobj);
- if (element != NULL) {
- return element->id_;
- }
- return 0; // Invalid address.
-}
-
-
-// Gets the obj id for the specified address if valid.
-Object* LiveObjectList::GetObjId(Handle<String> address) {
- SmartPointer<char> addr_str =
- address->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
-
- // Extract the address value from the string.
- int value = static_cast<int>(StringToInt(*address, 16));
- Object* obj = reinterpret_cast<Object*>(value);
- return Smi::FromInt(GetObjId(obj));
-}
-
-
-// Helper class for copying HeapObjects.
-class LolVisitor: public ObjectVisitor {
- public:
-
- LolVisitor(HeapObject* target, Handle<HeapObject> handle_to_skip)
- : target_(target), handle_to_skip_(handle_to_skip), found_(false) {}
-
- void VisitPointer(Object** p) { CheckPointer(p); }
-
- void VisitPointers(Object** start, Object** end) {
- // Check all HeapObject pointers in [start, end).
- for (Object** p = start; !found() && p < end; p++) CheckPointer(p);
- }
-
- inline bool found() const { return found_; }
- inline bool reset() { return found_ = false; }
-
- private:
- inline void CheckPointer(Object** p) {
- Object* object = *p;
- if (HeapObject::cast(object) == target_) {
- // We may want to skip this handle because the handle may be a local
- // handle in a handle scope in one of our callers. Once we return,
- // that handle will be popped. Hence, we don't want to count it as
- // a root that would have kept the target object alive.
- if (!handle_to_skip_.is_null() &&
- handle_to_skip_.location() == reinterpret_cast<HeapObject**>(p)) {
- return; // Skip this handle.
- }
- found_ = true;
- }
- }
-
- HeapObject* target_;
- Handle<HeapObject> handle_to_skip_;
- bool found_;
-};
-
-
-inline bool AddRootRetainerIfFound(const LolVisitor& visitor,
- LolFilter* filter,
- LiveObjectSummary *summary,
- void (*SetRootFound)(LiveObjectSummary *s),
- int start,
- int dump_limit,
- int* total_count,
- Handle<FixedArray> retainers_arr,
- int* count,
- int* index,
- const char* root_name,
- Handle<String> id_sym,
- Handle<String> desc_sym,
- Handle<String> size_sym,
- Handle<Object> error) {
- HandleScope scope;
-
- // Scratch handles.
- Handle<JSObject> detail;
- Handle<String> desc;
- Handle<HeapObject> retainer;
-
- if (visitor.found()) {
- if (!filter->is_active()) {
- (*total_count)++;
- if (summary) {
- SetRootFound(summary);
- } else if ((*total_count > start) && ((*index) < dump_limit)) {
- (*count)++;
- if (!retainers_arr.is_null()) {
- return AddObjDetail(retainers_arr,
- (*index)++,
- 0,
- retainer,
- root_name,
- id_sym,
- desc_sym,
- size_sym,
- detail,
- desc,
- error);
- }
- }
- }
- }
- return true;
-}
-
-
-inline void SetFoundRoot(LiveObjectSummary *summary) {
- summary->set_found_root();
-}
-
-
-inline void SetFoundWeakRoot(LiveObjectSummary *summary) {
- summary->set_found_weak_root();
-}
-
-
-int LiveObjectList::GetRetainers(Handle<HeapObject> target,
- Handle<JSObject> instance_filter,
- Handle<FixedArray> retainers_arr,
- int start,
- int dump_limit,
- int* total_count,
- LolFilter* filter,
- LiveObjectSummary *summary,
- JSFunction* arguments_function,
- Handle<Object> error) {
- HandleScope scope;
-
- // Scratch handles.
- Handle<JSObject> detail;
- Handle<String> desc;
- Handle<HeapObject> retainer;
-
- // Prefetch some needed symbols.
- Handle<String> id_sym = Factory::LookupAsciiSymbol("id");
- Handle<String> desc_sym = Factory::LookupAsciiSymbol("desc");
- Handle<String> size_sym = Factory::LookupAsciiSymbol("size");
-
- NoHandleAllocation ha;
- int count = 0;
- int index = 0;
- Handle<JSObject> last_obj;
-
- *total_count = 0;
-
- // Iterate roots.
- LolVisitor lol_visitor(*target, target);
- Heap::IterateStrongRoots(&lol_visitor, VISIT_ALL);
- if (!AddRootRetainerIfFound(lol_visitor,
- filter,
- summary,
- SetFoundRoot,
- start,
- dump_limit,
- total_count,
- retainers_arr,
- &count,
- &index,
- "<root>",
- id_sym,
- desc_sym,
- size_sym,
- error)) {
- return -1;
- }
-
- lol_visitor.reset();
- Heap::IterateWeakRoots(&lol_visitor, VISIT_ALL);
- if (!AddRootRetainerIfFound(lol_visitor,
- filter,
- summary,
- SetFoundWeakRoot,
- start,
- dump_limit,
- total_count,
- retainers_arr,
- &count,
- &index,
- "<weak root>",
- id_sym,
- desc_sym,
- size_sym,
- error)) {
- return -1;
- }
-
- // Iterate the live object lists.
- LolIterator it(NULL, last());
- for (it.Init(); !it.Done() && (index < dump_limit); it.Next()) {
- HeapObject* heap_obj = it.Obj();
-
- // Only look at all JSObjects.
- if (heap_obj->IsJSObject()) {
- // Skip context extension objects and argument arrays as these are
- // checked in the context of functions using them.
- JSObject* obj = JSObject::cast(heap_obj);
- if (obj->IsJSContextExtensionObject() ||
- obj->map()->constructor() == arguments_function) {
- continue;
- }
-
- // Check if the JS object has a reference to the object looked for.
- if (obj->ReferencesObject(*target)) {
- // Check instance filter if supplied. This is normally used to avoid
- // references from mirror objects (see Runtime_IsInPrototypeChain).
- if (!instance_filter->IsUndefined()) {
- Object* V = obj;
- while (true) {
- Object* prototype = V->GetPrototype();
- if (prototype->IsNull()) {
- break;
- }
- if (*instance_filter == prototype) {
- obj = NULL; // Don't add this object.
- break;
- }
- V = prototype;
- }
- }
-
- if (obj != NULL) {
- // Skip objects that have been filtered out.
- if (filter->Matches(heap_obj)) {
- continue;
- }
-
- // Valid reference found add to instance array if supplied an update
- // count.
- last_obj = Handle<JSObject>(obj);
- (*total_count)++;
-
- if (summary != NULL) {
- summary->Add(heap_obj);
- } else if ((*total_count > start) && (index < dump_limit)) {
- count++;
- if (!retainers_arr.is_null()) {
- retainer = Handle<HeapObject>(heap_obj);
- bool success = AddObjDetail(retainers_arr,
- index++,
- it.Id(),
- retainer,
- NULL,
- id_sym,
- desc_sym,
- size_sym,
- detail,
- desc,
- error);
- if (!success) return -1;
- }
- }
- }
- }
- }
- }
-
- // Check for circular reference only. This can happen when the object is only
- // referenced from mirrors and has a circular reference in which case the
- // object is not really alive and would have been garbage collected if not
- // referenced from the mirror.
-
- if (*total_count == 1 && !last_obj.is_null() && *last_obj == *target) {
- count = 0;
- *total_count = 0;
- }
-
- return count;
-}
-
-
-MaybeObject* LiveObjectList::GetObjRetainers(int obj_id,
- Handle<JSObject> instance_filter,
- bool verbose,
- int start,
- int dump_limit,
- Handle<JSObject> filter_obj) {
- HandleScope scope;
-
- // Get the target object.
- HeapObject* heap_obj = HeapObject::cast(GetObj(obj_id));
- if (heap_obj == Heap::undefined_value()) {
- return heap_obj;
- }
-
- Handle<HeapObject> target = Handle<HeapObject>(heap_obj);
-
- // Get the constructor function for context extension and arguments array.
- JSObject* arguments_boilerplate =
- Top::context()->global_context()->arguments_boilerplate();
- JSFunction* arguments_function =
- JSFunction::cast(arguments_boilerplate->map()->constructor());
-
- Handle<JSFunction> args_function = Handle<JSFunction>(arguments_function);
- LolFilter filter(filter_obj);
-
- if (!verbose) {
- RetainersSummaryWriter writer(target, instance_filter, args_function);
- return SummarizePrivate(&writer, &filter, true);
-
- } else {
- RetainersDumpWriter writer(target, instance_filter, args_function);
- Object* body_obj;
- MaybeObject* maybe_result =
- DumpPrivate(&writer, start, dump_limit, &filter);
- if (!maybe_result->ToObject(&body_obj)) {
- return maybe_result;
- }
-
- // Set body.id.
- Handle<JSObject> body = Handle<JSObject>(JSObject::cast(body_obj));
- Handle<String> id_sym = Factory::LookupAsciiSymbol("id");
- maybe_result = body->SetProperty(*id_sym,
- Smi::FromInt(obj_id),
- NONE,
- kNonStrictMode);
- if (maybe_result->IsFailure()) return maybe_result;
-
- return *body;
- }
-}
-
-
-Object* LiveObjectList::PrintObj(int obj_id) {
- Object* obj = GetObj(obj_id);
- if (!obj) {
- return Heap::undefined_value();
- }
-
- EmbeddedVector<char, 128> temp_filename;
- static int temp_count = 0;
- const char* path_prefix = ".";
-
- if (FLAG_lol_workdir) {
- path_prefix = FLAG_lol_workdir;
- }
- OS::SNPrintF(temp_filename, "%s/lol-print-%d", path_prefix, ++temp_count);
-
- FILE* f = OS::FOpen(temp_filename.start(), "w+");
-
- PrintF(f, "@%d ", LiveObjectList::GetObjId(obj));
-#ifdef OBJECT_PRINT
-#ifdef INSPECTOR
- Inspector::DumpObjectType(f, obj);
-#endif // INSPECTOR
- PrintF(f, "\n");
- obj->Print(f);
-#else // !OBJECT_PRINT
- obj->ShortPrint(f);
-#endif // !OBJECT_PRINT
- PrintF(f, "\n");
- Flush(f);
- fclose(f);
-
- // Create a string from the temp_file.
- // Note: the mmapped resource will take care of closing the file.
- MemoryMappedExternalResource* resource =
- new MemoryMappedExternalResource(temp_filename.start(), true);
- if (resource->exists() && !resource->is_empty()) {
- ASSERT(resource->IsAscii());
- Handle<String> dump_string =
- Factory::NewExternalStringFromAscii(resource);
- ExternalStringTable::AddString(*dump_string);
- return *dump_string;
- } else {
- delete resource;
- }
- return Heap::undefined_value();
-}
-
-
-class LolPathTracer: public PathTracer {
- public:
- LolPathTracer(FILE* out,
- Object* search_target,
- WhatToFind what_to_find)
- : PathTracer(search_target, what_to_find, VISIT_ONLY_STRONG), out_(out) {}
-
- private:
- void ProcessResults();
-
- FILE* out_;
-};
-
-
-void LolPathTracer::ProcessResults() {
- if (found_target_) {
- PrintF(out_, "=====================================\n");
- PrintF(out_, "==== Path to object ====\n");
- PrintF(out_, "=====================================\n\n");
-
- ASSERT(!object_stack_.is_empty());
- Object* prev = NULL;
- for (int i = 0, index = 0; i < object_stack_.length(); i++) {
- Object* obj = object_stack_[i];
-
- // Skip this object if it is basically the internals of the
- // previous object (which would have dumped its details already).
- if (prev && prev->IsJSObject() &&
- (obj != search_target_)) {
- JSObject* jsobj = JSObject::cast(prev);
- if (obj->IsFixedArray() &&
- jsobj->properties() == FixedArray::cast(obj)) {
- // Skip this one because it would have been printed as the
- // properties of the last object already.
- continue;
- } else if (obj->IsHeapObject() &&
- jsobj->elements() == HeapObject::cast(obj)) {
- // Skip this one because it would have been printed as the
- // elements of the last object already.
- continue;
- }
- }
-
- // Print a connecting arrow.
- if (i > 0) PrintF(out_, "\n |\n |\n V\n\n");
-
- // Print the object index.
- PrintF(out_, "[%d] ", ++index);
-
- // Print the LOL object ID:
- int id = LiveObjectList::GetObjId(obj);
- if (id > 0) PrintF(out_, "@%d ", id);
-
-#ifdef OBJECT_PRINT
-#ifdef INSPECTOR
- Inspector::DumpObjectType(out_, obj);
-#endif // INSPECTOR
- PrintF(out_, "\n");
- obj->Print(out_);
-#else // !OBJECT_PRINT
- obj->ShortPrint(out_);
- PrintF(out_, "\n");
-#endif // !OBJECT_PRINT
- Flush(out_);
- }
- PrintF(out_, "\n");
- PrintF(out_, "=====================================\n\n");
- Flush(out_);
- }
-}
-
-
-Object* LiveObjectList::GetPathPrivate(HeapObject* obj1, HeapObject* obj2) {
- EmbeddedVector<char, 128> temp_filename;
- static int temp_count = 0;
- const char* path_prefix = ".";
-
- if (FLAG_lol_workdir) {
- path_prefix = FLAG_lol_workdir;
- }
- OS::SNPrintF(temp_filename, "%s/lol-getpath-%d", path_prefix, ++temp_count);
-
- FILE* f = OS::FOpen(temp_filename.start(), "w+");
-
- // Save the previous verbosity.
- bool prev_verbosity = FLAG_use_verbose_printer;
- FLAG_use_verbose_printer = false;
-
- // Dump the paths.
- {
- // The tracer needs to be scoped because its usage asserts no allocation,
- // and we need to allocate the result string below.
- LolPathTracer tracer(f, obj2, LolPathTracer::FIND_FIRST);
-
- bool found = false;
- if (obj1 == NULL) {
- // Check for ObjectGroups that references this object.
- // TODO(mlam): refactor this to be more modular.
- {
- List<ObjectGroup*>* groups = GlobalHandles::ObjectGroups();
- for (int i = 0; i < groups->length(); i++) {
- ObjectGroup* group = groups->at(i);
- if (group == NULL) continue;
-
- bool found_group = false;
- List<Object**>& objects = group->objects_;
- for (int j = 0; j < objects.length(); j++) {
- Object* object = *objects[j];
- HeapObject* hobj = HeapObject::cast(object);
- if (obj2 == hobj) {
- found_group = true;
- break;
- }
- }
-
- if (found_group) {
- PrintF(f,
- "obj %p is a member of object group %p {\n",
- reinterpret_cast<void*>(obj2),
- reinterpret_cast<void*>(group));
- for (int j = 0; j < objects.length(); j++) {
- Object* object = *objects[j];
- if (!object->IsHeapObject()) continue;
-
- HeapObject* hobj = HeapObject::cast(object);
- int id = GetObjId(hobj);
- if (id != 0) {
- PrintF(f, " @%d:", id);
- } else {
- PrintF(f, " <no id>:");
- }
-
- char buffer[512];
- GenerateObjectDesc(hobj, buffer, sizeof(buffer));
- PrintF(f, " %s", buffer);
- if (hobj == obj2) {
- PrintF(f, " <===");
- }
- PrintF(f, "\n");
- }
- PrintF(f, "}\n");
- }
- }
- }
-
- PrintF(f, "path from roots to obj %p\n", reinterpret_cast<void*>(obj2));
- Heap::IterateRoots(&tracer, VISIT_ONLY_STRONG);
- found = tracer.found();
-
- if (!found) {
- PrintF(f, " No paths found. Checking symbol tables ...\n");
- SymbolTable* symbol_table = Heap::raw_unchecked_symbol_table();
- tracer.VisitPointers(reinterpret_cast<Object**>(&symbol_table),
- reinterpret_cast<Object**>(&symbol_table)+1);
- found = tracer.found();
- if (!found) {
- symbol_table->IteratePrefix(&tracer);
- found = tracer.found();
- }
- }
-
- if (!found) {
- PrintF(f, " No paths found. Checking weak roots ...\n");
- // Check weak refs next.
- GlobalHandles::IterateWeakRoots(&tracer);
- found = tracer.found();
- }
-
- } else {
- PrintF(f, "path from obj %p to obj %p:\n",
- reinterpret_cast<void*>(obj1), reinterpret_cast<void*>(obj2));
- tracer.TracePathFrom(reinterpret_cast<Object**>(&obj1));
- found = tracer.found();
- }
-
- if (!found) {
- PrintF(f, " No paths found\n\n");
- }
- }
-
- // Flush and clean up the dumped file.
- Flush(f);
- fclose(f);
-
- // Restore the previous verbosity.
- FLAG_use_verbose_printer = prev_verbosity;
-
- // Create a string from the temp_file.
- // Note: the mmapped resource will take care of closing the file.
- MemoryMappedExternalResource* resource =
- new MemoryMappedExternalResource(temp_filename.start(), true);
- if (resource->exists() && !resource->is_empty()) {
- ASSERT(resource->IsAscii());
- Handle<String> path_string =
- Factory::NewExternalStringFromAscii(resource);
- ExternalStringTable::AddString(*path_string);
- return *path_string;
- } else {
- delete resource;
- }
- return Heap::undefined_value();
-}
-
-
-Object* LiveObjectList::GetPath(int obj_id1,
- int obj_id2,
- Handle<JSObject> instance_filter) {
- HandleScope scope;
-
- // Get the target object.
- HeapObject* obj1 = NULL;
- if (obj_id1 != 0) {
- obj1 = HeapObject::cast(GetObj(obj_id1));
- if (obj1 == Heap::undefined_value()) {
- return obj1;
- }
- }
-
- HeapObject* obj2 = HeapObject::cast(GetObj(obj_id2));
- if (obj2 == Heap::undefined_value()) {
- return obj2;
- }
-
- return GetPathPrivate(obj1, obj2);
-}
-
-
-void LiveObjectList::DoProcessNonLive(HeapObject *obj) {
- // We should only be called if we have at least one lol to search.
- ASSERT(last() != NULL);
- Element* element = last()->Find(obj);
- if (element != NULL) {
- NullifyNonLivePointer(&element->obj_);
- }
-}
-
-
-void LiveObjectList::IterateElementsPrivate(ObjectVisitor* v) {
- LiveObjectList* lol = last();
- while (lol != NULL) {
- Element* elements = lol->elements_;
- int count = lol->obj_count_;
- for (int i = 0; i < count; i++) {
- HeapObject** p = &elements[i].obj_;
- v->VisitPointer(reinterpret_cast<Object **>(p));
- }
- lol = lol->prev_;
- }
-}
-
-
-// Purpose: Called by GCEpilogue to purge duplicates. Not to be called by
-// anyone else.
-void LiveObjectList::PurgeDuplicates() {
- bool is_sorted = false;
- LiveObjectList* lol = last();
- if (!lol) {
- return; // Nothing to purge.
- }
-
- int total_count = lol->TotalObjCount();
- if (!total_count) {
- return; // Nothing to purge.
- }
-
- Element* elements = NewArray<Element>(total_count);
- int count = 0;
-
- // Copy all the object elements into a consecutive array.
- while (lol) {
- memcpy(&elements[count], lol->elements_, lol->obj_count_ * sizeof(Element));
- count += lol->obj_count_;
- lol = lol->prev_;
- }
- qsort(elements, total_count, sizeof(Element),
- reinterpret_cast<RawComparer>(CompareElement));
-
- ASSERT(count == total_count);
-
- // Iterate over all objects in the consolidated list and check for dups.
- total_count--;
- for (int i = 0; i < total_count; ) {
- Element* curr = &elements[i];
- HeapObject* curr_obj = curr->obj_;
- int j = i+1;
- bool done = false;
-
- while (!done && (j < total_count)) {
- // Process if the element's object is still live after the current GC.
- // Non-live objects will be converted to SMIs i.e. not HeapObjects.
- if (curr_obj->IsHeapObject()) {
- Element* next = &elements[j];
- HeapObject* next_obj = next->obj_;
- if (next_obj->IsHeapObject()) {
- if (curr_obj != next_obj) {
- done = true;
- continue; // Live object but no match. Move on.
- }
-
- // NOTE: we've just GCed the LOLs. Hence, they are no longer sorted.
- // Since we detected at least one need to search for entries, we'll
- // sort it to enable the use of NullifyMostRecent() below. We only
- // need to sort it once (except for one exception ... see below).
- if (!is_sorted) {
- SortAll();
- is_sorted = true;
- }
-
- // We have a match. Need to nullify the most recent ref to this
- // object. We'll keep the oldest ref:
- // Note: we will nullify the element record in the LOL
- // database, not in the local sorted copy of the elements.
- NullifyMostRecent(curr_obj);
- }
- }
- // Either the object was already marked for purging, or we just marked
- // it. Either way, if there's more than one dup, then we need to check
- // the next element for another possible dup against the current as well
- // before we move on. So, here we go.
- j++;
- }
-
- // We can move on to checking the match on the next element.
- i = j;
- }
-
- DeleteArray<Element>(elements);
-}
-
-
-// Purpose: Purges dead objects and resorts the LOLs.
-void LiveObjectList::GCEpiloguePrivate() {
- // Note: During the GC, ConsStrings may be collected and pointers may be
- // forwarded to its constituent string. As a result, we may find dupes of
- // objects references in the LOL list.
- // Another common way we get dups is that free chunks that have been swept
- // in the oldGen heap may be kept as ByteArray objects in a free list.
- //
- // When we promote live objects from the youngGen, the object may be moved
- // to the start of these free chunks. Since there is no free or move event
- // for the free chunks, their addresses will show up 2 times: once for their
- // original free ByteArray selves, and once for the newly promoted youngGen
- // object. Hence, we can get a duplicate address in the LOL again.
- //
- // We need to eliminate these dups because the LOL implementation expects to
- // only have at most one unique LOL reference to any object at any time.
- PurgeDuplicates();
-
- // After the GC, sweep away all free'd Elements and compact.
- LiveObjectList *prev = NULL;
- LiveObjectList *next = NULL;
-
- // Iterating from the youngest lol to the oldest lol.
- for (LiveObjectList *lol = last(); lol; lol = prev) {
- Element* elements = lol->elements_;
- prev = lol->prev(); // Save the prev.
-
- // Remove any references to collected objects.
- int i = 0;
- while (i < lol->obj_count_) {
- Element& element = elements[i];
- if (!element.obj_->IsHeapObject()) {
- // If the HeapObject address was converted into a SMI, then this
- // is a dead object. Copy the last element over this one.
- element = elements[lol->obj_count_ - 1];
- lol->obj_count_--;
- // We've just moved the last element into this index. We'll revisit
- // this index again. Hence, no need to increment the iterator.
- } else {
- i++; // Look at the next element next.
- }
- }
-
- int new_count = lol->obj_count_;
-
- // Check if there are any more elements to keep after purging the dead ones.
- if (new_count == 0) {
- DeleteArray<Element>(elements);
- lol->elements_ = NULL;
- lol->capacity_ = 0;
- ASSERT(lol->obj_count_ == 0);
-
- // If the list is also invisible, the clean up the list as well.
- if (lol->id_ == 0) {
- // Point the next lol's prev to this lol's prev.
- if (next) {
- next->prev_ = lol->prev_;
- } else {
- last_ = lol->prev_;
- }
-
- // Delete this now empty and invisible lol.
- delete lol;
-
- // Don't point the next to this lol since it is now deleted.
- // Leave the next pointer pointing to the current lol.
- continue;
- }
-
- } else {
- // If the obj_count_ is less than the capacity and the difference is
- // greater than a specified threshold, then we should shrink the list.
- int diff = lol->capacity_ - new_count;
- const int kMaxUnusedSpace = 64;
- if (diff > kMaxUnusedSpace) { // Threshold for shrinking.
- // Shrink the list.
- Element *new_elements = NewArray<Element>(new_count);
- memcpy(new_elements, elements, new_count * sizeof(Element));
-
- DeleteArray<Element>(elements);
- lol->elements_ = new_elements;
- lol->capacity_ = new_count;
- }
- ASSERT(lol->obj_count_ == new_count);
-
- lol->Sort(); // We've moved objects. Re-sort in case.
- }
-
- // Save the next (for the previous link) in case we need it later.
- next = lol;
- }
-
-#ifdef VERIFY_LOL
- if (FLAG_verify_lol) {
- Verify();
- }
-#endif
-}
-
-
-#ifdef VERIFY_LOL
-void LiveObjectList::Verify(bool match_heap_exactly) {
- OS::Print("Verifying the LiveObjectList database:\n");
-
- LiveObjectList* lol = last();
- if (lol == NULL) {
- OS::Print(" No lol database to verify\n");
- return;
- }
-
- OS::Print(" Preparing the lol database ...\n");
- int total_count = lol->TotalObjCount();
-
- Element* elements = NewArray<Element>(total_count);
- int count = 0;
-
- // Copy all the object elements into a consecutive array.
- OS::Print(" Copying the lol database ...\n");
- while (lol != NULL) {
- memcpy(&elements[count], lol->elements_, lol->obj_count_ * sizeof(Element));
- count += lol->obj_count_;
- lol = lol->prev_;
- }
- qsort(elements, total_count, sizeof(Element),
- reinterpret_cast<RawComparer>(CompareElement));
-
- ASSERT(count == total_count);
-
- // Iterate over all objects in the heap and check for:
- // 1. object in LOL but not in heap i.e. error.
- // 2. object in heap but not in LOL (possibly not an error). Usually
- // just means that we don't have the a capture of the latest heap.
- // That is unless we did this verify immediately after a capture,
- // and specified match_heap_exactly = true.
-
- int number_of_heap_objects = 0;
- int number_of_matches = 0;
- int number_not_in_heap = total_count;
- int number_not_in_lol = 0;
-
- OS::Print(" Start verify ...\n");
- OS::Print(" Verifying ...");
- Flush();
- HeapIterator iterator(HeapIterator::kFilterFreeListNodes);
- HeapObject* heap_obj = NULL;
- while ((heap_obj = iterator.next()) != NULL) {
- number_of_heap_objects++;
-
- // Check if the heap_obj is in the lol.
- Element key;
- key.obj_ = heap_obj;
-
- Element* result = reinterpret_cast<Element*>(
- bsearch(&key, elements, total_count, sizeof(Element),
- reinterpret_cast<RawComparer>(CompareElement)));
-
- if (result != NULL) {
- number_of_matches++;
- number_not_in_heap--;
- // Mark it as found by changing it into a SMI (mask off low bit).
- // Note: we cannot use HeapObject::cast() here because it asserts that
- // the HeapObject bit is set on the address, but we're unsetting it on
- // purpose here for our marking.
- result->obj_ = reinterpret_cast<HeapObject*>(heap_obj->address());
-
- } else {
- number_not_in_lol++;
- if (match_heap_exactly) {
- OS::Print("heap object %p NOT in lol database\n", heap_obj);
- }
- }
- // Show some sign of life.
- if (number_of_heap_objects % 1000 == 0) {
- OS::Print(".");
- fflush(stdout);
- }
- }
- OS::Print("\n");
-
- // Reporting lol objects not found in the heap.
- if (number_not_in_heap) {
- int found = 0;
- for (int i = 0; (i < total_count) && (found < number_not_in_heap); i++) {
- Element& element = elements[i];
- if (element.obj_->IsHeapObject()) {
- OS::Print("lol database object [%d of %d] %p NOT in heap\n",
- i, total_count, element.obj_);
- found++;
- }
- }
- }
-
- DeleteArray<Element>(elements);
-
- OS::Print("number of objects in lol database %d\n", total_count);
- OS::Print("number of heap objects .......... %d\n", number_of_heap_objects);
- OS::Print("number of matches ............... %d\n", number_of_matches);
- OS::Print("number NOT in heap .............. %d\n", number_not_in_heap);
- OS::Print("number NOT in lol database ...... %d\n", number_not_in_lol);
-
- if (number_of_matches != total_count) {
- OS::Print(" *** ERROR: "
- "NOT all lol database objects match heap objects.\n");
- }
- if (number_not_in_heap != 0) {
- OS::Print(" *** ERROR: %d lol database objects not found in heap.\n",
- number_not_in_heap);
- }
- if (match_heap_exactly) {
- if (!(number_not_in_lol == 0)) {
- OS::Print(" *** ERROR: %d heap objects NOT found in lol database.\n",
- number_not_in_lol);
- }
- }
-
- ASSERT(number_of_matches == total_count);
- ASSERT(number_not_in_heap == 0);
- ASSERT(number_not_in_lol == (number_of_heap_objects - total_count));
- if (match_heap_exactly) {
- ASSERT(total_count == number_of_heap_objects);
- ASSERT(number_not_in_lol == 0);
- }
-
- OS::Print(" Verify the lol database is sorted ...\n");
- lol = last();
- while (lol != NULL) {
- Element* elements = lol->elements_;
- for (int i = 0; i < lol->obj_count_ - 1; i++) {
- if (elements[i].obj_ >= elements[i+1].obj_) {
- OS::Print(" *** ERROR: lol %p obj[%d] %p > obj[%d] %p\n",
- lol, i, elements[i].obj_, i+1, elements[i+1].obj_);
- }
- }
- lol = lol->prev_;
- }
-
- OS::Print(" DONE verifying.\n\n\n");
-}
-
-
-void LiveObjectList::VerifyNotInFromSpace() {
- OS::Print("VerifyNotInFromSpace() ...\n");
- LolIterator it(NULL, last());
- int i = 0;
- for (it.Init(); !it.Done(); it.Next()) {
- HeapObject* heap_obj = it.Obj();
- if (Heap::InFromSpace(heap_obj)) {
- OS::Print(" ERROR: VerifyNotInFromSpace: [%d] obj %p in From space %p\n",
- i++, heap_obj, Heap::new_space()->FromSpaceLow());
- }
- }
-}
-#endif // VERIFY_LOL
-
-
-} } // namespace v8::internal
-
-#endif // LIVE_OBJECT_LIST
-
diff --git a/src/3rdparty/v8/src/liveobjectlist.h b/src/3rdparty/v8/src/liveobjectlist.h
deleted file mode 100644
index 23e418d..0000000
--- a/src/3rdparty/v8/src/liveobjectlist.h
+++ /dev/null
@@ -1,322 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_LIVEOBJECTLIST_H_
-#define V8_LIVEOBJECTLIST_H_
-
-#include "v8.h"
-
-#include "checks.h"
-#include "heap.h"
-#include "objects.h"
-#include "globals.h"
-
-namespace v8 {
-namespace internal {
-
-#ifdef LIVE_OBJECT_LIST
-
-#ifdef DEBUG
-// The following symbol when defined enables thorough verification of lol data.
-// FLAG_verify_lol will also need to set to true to enable the verification.
-#define VERIFY_LOL
-#endif
-
-
-typedef int LiveObjectType;
-class LolFilter;
-class LiveObjectSummary;
-class DumpWriter;
-class SummaryWriter;
-
-
-// The LiveObjectList is both a mechanism for tracking a live capture of
-// objects in the JS heap, as well as is the data structure which represents
-// each of those captures. Unlike a snapshot, the lol is live. For example,
-// if an object in a captured lol dies and is collected by the GC, the lol
-// will reflect that the object is no longer available. The term
-// LiveObjectList (and lol) is used to describe both the mechanism and the
-// data structure depending on context of use.
-//
-// In captured lols, objects are tracked using their address and an object id.
-// The object id is unique. Once assigned to an object, the object id can never
-// be assigned to another object. That is unless all captured lols are deleted
-// which allows the user to start over with a fresh set of lols and object ids.
-// The uniqueness of the object ids allows the user to track specific objects
-// and inspect its longevity while debugging JS code in execution.
-//
-// The lol comes with utility functions to capture, dump, summarize, and diff
-// captured lols amongst other functionality. These functionality are
-// accessible via the v8 debugger interface.
-class LiveObjectList {
- public:
- inline static void GCEpilogue();
- inline static void GCPrologue();
- inline static void IterateElements(ObjectVisitor* v);
- inline static void ProcessNonLive(HeapObject *obj);
- inline static void UpdateReferencesForScavengeGC();
-
- // Note: LOLs can be listed by calling Dump(0, <lol id>), and 2 LOLs can be
- // compared/diff'ed using Dump(<lol id1>, <lol id2>, ...). This will yield
- // a verbose dump of all the objects in the resultant lists.
- // Similarly, a summarized result of a LOL listing or a diff can be
- // attained using the Summarize(0, <lol id>) and Summarize(<lol id1,
- // <lol id2>, ...) respectively.
-
- static MaybeObject* Capture();
- static bool Delete(int id);
- static MaybeObject* Dump(int id1,
- int id2,
- int start_idx,
- int dump_limit,
- Handle<JSObject> filter_obj);
- static MaybeObject* Info(int start_idx, int dump_limit);
- static MaybeObject* Summarize(int id1, int id2, Handle<JSObject> filter_obj);
-
- static void Reset();
- static Object* GetObj(int obj_id);
- static int GetObjId(Object* obj);
- static Object* GetObjId(Handle<String> address);
- static MaybeObject* GetObjRetainers(int obj_id,
- Handle<JSObject> instance_filter,
- bool verbose,
- int start,
- int count,
- Handle<JSObject> filter_obj);
-
- static Object* GetPath(int obj_id1,
- int obj_id2,
- Handle<JSObject> instance_filter);
- static Object* PrintObj(int obj_id);
-
- private:
-
- struct Element {
- int id_;
- HeapObject* obj_;
- };
-
- explicit LiveObjectList(LiveObjectList* prev, int capacity);
- ~LiveObjectList();
-
- static void GCEpiloguePrivate();
- static void IterateElementsPrivate(ObjectVisitor* v);
-
- static void DoProcessNonLive(HeapObject *obj);
-
- static int CompareElement(const Element* a, const Element* b);
-
- static Object* GetPathPrivate(HeapObject* obj1, HeapObject* obj2);
-
- static int GetRetainers(Handle<HeapObject> target,
- Handle<JSObject> instance_filter,
- Handle<FixedArray> retainers_arr,
- int start,
- int dump_limit,
- int* total_count,
- LolFilter* filter,
- LiveObjectSummary *summary,
- JSFunction* arguments_function,
- Handle<Object> error);
-
- static MaybeObject* DumpPrivate(DumpWriter* writer,
- int start,
- int dump_limit,
- LolFilter* filter);
- static MaybeObject* SummarizePrivate(SummaryWriter* writer,
- LolFilter* filter,
- bool is_tracking_roots);
-
- static bool NeedLOLProcessing() { return (last() != NULL); }
- static void NullifyNonLivePointer(HeapObject **p) {
- // Mask out the low bit that marks this as a heap object. We'll use this
- // cleared bit as an indicator that this pointer needs to be collected.
- //
- // Meanwhile, we still preserve its approximate value so that we don't
- // have to resort the elements list all the time.
- //
- // Note: Doing so also makes this HeapObject* look like an SMI. Hence,
- // GC pointer updater will ignore it when it gets scanned.
- *p = reinterpret_cast<HeapObject*>((*p)->address());
- }
-
- LiveObjectList* prev() { return prev_; }
- LiveObjectList* next() { return next_; }
- int id() { return id_; }
-
- static int list_count() { return list_count_; }
- static LiveObjectList* last() { return last_; }
-
- inline static LiveObjectList* FindLolForId(int id, LiveObjectList* start_lol);
- int TotalObjCount() { return GetTotalObjCountAndSize(NULL); }
- int GetTotalObjCountAndSize(int* size_p);
-
- bool Add(HeapObject* obj);
- Element* Find(HeapObject* obj);
- static void NullifyMostRecent(HeapObject* obj);
- void Sort();
- static void SortAll();
-
- static void PurgeDuplicates(); // Only to be called by GCEpilogue.
-
-#ifdef VERIFY_LOL
- static void Verify(bool match_heap_exactly = false);
- static void VerifyNotInFromSpace();
-#endif
-
- // Iterates the elements in every lol and returns the one that matches the
- // specified key. If no matching element is found, then it returns NULL.
- template <typename T>
- inline static LiveObjectList::Element*
- FindElementFor(T (*GetValue)(LiveObjectList::Element*), T key);
-
- inline static int GetElementId(Element* element);
- inline static HeapObject* GetElementObj(Element* element);
-
- // Instance fields.
- LiveObjectList* prev_;
- LiveObjectList* next_;
- int id_;
- int capacity_;
- int obj_count_;
- Element *elements_;
-
- // Statics for managing all the lists.
- static uint32_t next_element_id_;
- static int list_count_;
- static int last_id_;
- static LiveObjectList* first_;
- static LiveObjectList* last_;
-
- friend class LolIterator;
- friend class LolForwardIterator;
- friend class LolDumpWriter;
- friend class RetainersDumpWriter;
- friend class RetainersSummaryWriter;
- friend class UpdateLiveObjectListVisitor;
-};
-
-
-// Helper class for updating the LiveObjectList HeapObject pointers.
-class UpdateLiveObjectListVisitor: public ObjectVisitor {
- public:
-
- void VisitPointer(Object** p) { UpdatePointer(p); }
-
- void VisitPointers(Object** start, Object** end) {
- // Copy all HeapObject pointers in [start, end).
- for (Object** p = start; p < end; p++) UpdatePointer(p);
- }
-
- private:
- // Based on Heap::ScavengeObject() but only does forwarding of pointers
- // to live new space objects, and not actually keep them alive.
- void UpdatePointer(Object** p) {
- Object* object = *p;
- if (!Heap::InNewSpace(object)) return;
-
- HeapObject* heap_obj = HeapObject::cast(object);
- ASSERT(Heap::InFromSpace(heap_obj));
-
- // We use the first word (where the map pointer usually is) of a heap
- // object to record the forwarding pointer. A forwarding pointer can
- // point to an old space, the code space, or the to space of the new
- // generation.
- MapWord first_word = heap_obj->map_word();
-
- // If the first word is a forwarding address, the object has already been
- // copied.
- if (first_word.IsForwardingAddress()) {
- *p = first_word.ToForwardingAddress();
- return;
-
- // Else, it's a dead object.
- } else {
- LiveObjectList::NullifyNonLivePointer(reinterpret_cast<HeapObject**>(p));
- }
- }
-};
-
-
-#else // !LIVE_OBJECT_LIST
-
-
-class LiveObjectList {
- public:
- inline static void GCEpilogue() {}
- inline static void GCPrologue() {}
- inline static void IterateElements(ObjectVisitor* v) {}
- inline static void ProcessNonLive(HeapObject* obj) {}
- inline static void UpdateReferencesForScavengeGC() {}
-
- inline static MaybeObject* Capture() { return HEAP->undefined_value(); }
- inline static bool Delete(int id) { return false; }
- inline static MaybeObject* Dump(int id1,
- int id2,
- int start_idx,
- int dump_limit,
- Handle<JSObject> filter_obj) {
- return HEAP->undefined_value();
- }
- inline static MaybeObject* Info(int start_idx, int dump_limit) {
- return HEAP->undefined_value();
- }
- inline static MaybeObject* Summarize(int id1,
- int id2,
- Handle<JSObject> filter_obj) {
- return HEAP->undefined_value();
- }
-
- inline static void Reset() {}
- inline static Object* GetObj(int obj_id) { return HEAP->undefined_value(); }
- inline static Object* GetObjId(Handle<String> address) {
- return HEAP->undefined_value();
- }
- inline static MaybeObject* GetObjRetainers(int obj_id,
- Handle<JSObject> instance_filter,
- bool verbose,
- int start,
- int count,
- Handle<JSObject> filter_obj) {
- return HEAP->undefined_value();
- }
-
- inline static Object* GetPath(int obj_id1,
- int obj_id2,
- Handle<JSObject> instance_filter) {
- return HEAP->undefined_value();
- }
- inline static Object* PrintObj(int obj_id) { return HEAP->undefined_value(); }
-};
-
-
-#endif // LIVE_OBJECT_LIST
-
-} } // namespace v8::internal
-
-#endif // V8_LIVEOBJECTLIST_H_
-
diff --git a/src/3rdparty/v8/src/log-inl.h b/src/3rdparty/v8/src/log-inl.h
deleted file mode 100644
index 02238fe..0000000
--- a/src/3rdparty/v8/src/log-inl.h
+++ /dev/null
@@ -1,59 +0,0 @@
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_LOG_INL_H_
-#define V8_LOG_INL_H_
-
-#include "log.h"
-#include "cpu-profiler.h"
-
-namespace v8 {
-namespace internal {
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
-
-Logger::LogEventsAndTags Logger::ToNativeByScript(Logger::LogEventsAndTags tag,
- Script* script) {
- if ((tag == FUNCTION_TAG || tag == LAZY_COMPILE_TAG || tag == SCRIPT_TAG)
- && script->type()->value() == Script::TYPE_NATIVE) {
- switch (tag) {
- case FUNCTION_TAG: return NATIVE_FUNCTION_TAG;
- case LAZY_COMPILE_TAG: return NATIVE_LAZY_COMPILE_TAG;
- case SCRIPT_TAG: return NATIVE_SCRIPT_TAG;
- default: return tag;
- }
- } else {
- return tag;
- }
-}
-
-#endif // ENABLE_LOGGING_AND_PROFILING
-
-
-} } // namespace v8::internal
-
-#endif // V8_LOG_INL_H_
diff --git a/src/3rdparty/v8/src/log-utils.cc b/src/3rdparty/v8/src/log-utils.cc
deleted file mode 100644
index a854ade..0000000
--- a/src/3rdparty/v8/src/log-utils.cc
+++ /dev/null
@@ -1,423 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "log-utils.h"
-#include "string-stream.h"
-
-namespace v8 {
-namespace internal {
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
-
-LogDynamicBuffer::LogDynamicBuffer(
- int block_size, int max_size, const char* seal, int seal_size)
- : block_size_(block_size),
- max_size_(max_size - (max_size % block_size_)),
- seal_(seal),
- seal_size_(seal_size),
- blocks_(max_size_ / block_size_ + 1),
- write_pos_(0), block_index_(0), block_write_pos_(0), is_sealed_(false) {
- ASSERT(BlocksCount() > 0);
- AllocateBlock(0);
- for (int i = 1; i < BlocksCount(); ++i) {
- blocks_[i] = NULL;
- }
-}
-
-
-LogDynamicBuffer::~LogDynamicBuffer() {
- for (int i = 0; i < BlocksCount(); ++i) {
- DeleteArray(blocks_[i]);
- }
-}
-
-
-int LogDynamicBuffer::Read(int from_pos, char* dest_buf, int buf_size) {
- if (buf_size == 0) return 0;
- int read_pos = from_pos;
- int block_read_index = BlockIndex(from_pos);
- int block_read_pos = PosInBlock(from_pos);
- int dest_buf_pos = 0;
- // Read until dest_buf is filled, or write_pos_ encountered.
- while (read_pos < write_pos_ && dest_buf_pos < buf_size) {
- const int read_size = Min(write_pos_ - read_pos,
- Min(buf_size - dest_buf_pos, block_size_ - block_read_pos));
- memcpy(dest_buf + dest_buf_pos,
- blocks_[block_read_index] + block_read_pos, read_size);
- block_read_pos += read_size;
- dest_buf_pos += read_size;
- read_pos += read_size;
- if (block_read_pos == block_size_) {
- block_read_pos = 0;
- ++block_read_index;
- }
- }
- return dest_buf_pos;
-}
-
-
-int LogDynamicBuffer::Seal() {
- WriteInternal(seal_, seal_size_);
- is_sealed_ = true;
- return 0;
-}
-
-
-int LogDynamicBuffer::Write(const char* data, int data_size) {
- if (is_sealed_) {
- return 0;
- }
- if ((write_pos_ + data_size) <= (max_size_ - seal_size_)) {
- return WriteInternal(data, data_size);
- } else {
- return Seal();
- }
-}
-
-
-int LogDynamicBuffer::WriteInternal(const char* data, int data_size) {
- int data_pos = 0;
- while (data_pos < data_size) {
- const int write_size =
- Min(data_size - data_pos, block_size_ - block_write_pos_);
- memcpy(blocks_[block_index_] + block_write_pos_, data + data_pos,
- write_size);
- block_write_pos_ += write_size;
- data_pos += write_size;
- if (block_write_pos_ == block_size_) {
- block_write_pos_ = 0;
- AllocateBlock(++block_index_);
- }
- }
- write_pos_ += data_size;
- return data_size;
-}
-
-// Must be the same message as in Logger::PauseProfiler.
-const char* const Log::kDynamicBufferSeal = "profiler,\"pause\"\n";
-
-Log::Log(Logger* logger)
- : write_to_file_(false),
- is_stopped_(false),
- output_handle_(NULL),
- output_code_handle_(NULL),
- output_buffer_(NULL),
- mutex_(NULL),
- message_buffer_(NULL),
- logger_(logger) {
-}
-
-
-static void AddIsolateIdIfNeeded(StringStream* stream) {
- Isolate* isolate = Isolate::Current();
- if (isolate->IsDefaultIsolate()) return;
- stream->Add("isolate-%p-", isolate);
-}
-
-
-void Log::Initialize() {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- mutex_ = OS::CreateMutex();
- message_buffer_ = NewArray<char>(kMessageBufferSize);
-
- // --log-all enables all the log flags.
- if (FLAG_log_all) {
- FLAG_log_runtime = true;
- FLAG_log_api = true;
- FLAG_log_code = true;
- FLAG_log_gc = true;
- FLAG_log_suspect = true;
- FLAG_log_handles = true;
- FLAG_log_regexp = true;
- }
-
- // --prof implies --log-code.
- if (FLAG_prof) FLAG_log_code = true;
-
- // --prof_lazy controls --log-code, implies --noprof_auto.
- if (FLAG_prof_lazy) {
- FLAG_log_code = false;
- FLAG_prof_auto = false;
- }
-
- bool start_logging = FLAG_log || FLAG_log_runtime || FLAG_log_api
- || FLAG_log_code || FLAG_log_gc || FLAG_log_handles || FLAG_log_suspect
- || FLAG_log_regexp || FLAG_log_state_changes;
-
- bool open_log_file = start_logging || FLAG_prof_lazy;
-
- // If we're logging anything, we need to open the log file.
- if (open_log_file) {
- if (strcmp(FLAG_logfile, "-") == 0) {
- OpenStdout();
- } else if (strcmp(FLAG_logfile, "*") == 0) {
- OpenMemoryBuffer();
- } else {
- if (strchr(FLAG_logfile, '%') != NULL ||
- !Isolate::Current()->IsDefaultIsolate()) {
- // If there's a '%' in the log file name we have to expand
- // placeholders.
- HeapStringAllocator allocator;
- StringStream stream(&allocator);
- AddIsolateIdIfNeeded(&stream);
- for (const char* p = FLAG_logfile; *p; p++) {
- if (*p == '%') {
- p++;
- switch (*p) {
- case '\0':
- // If there's a % at the end of the string we back up
- // one character so we can escape the loop properly.
- p--;
- break;
- case 't': {
- // %t expands to the current time in milliseconds.
- double time = OS::TimeCurrentMillis();
- stream.Add("%.0f", FmtElm(time));
- break;
- }
- case '%':
- // %% expands (contracts really) to %.
- stream.Put('%');
- break;
- default:
- // All other %'s expand to themselves.
- stream.Put('%');
- stream.Put(*p);
- break;
- }
- } else {
- stream.Put(*p);
- }
- }
- SmartPointer<const char> expanded = stream.ToCString();
- OpenFile(*expanded);
- } else {
- OpenFile(FLAG_logfile);
- }
- }
- }
-#endif
-}
-
-
-void Log::OpenStdout() {
- ASSERT(!IsEnabled());
- output_handle_ = stdout;
- write_to_file_ = true;
-}
-
-
-static const char kCodeLogExt[] = ".code";
-
-
-void Log::OpenFile(const char* name) {
- ASSERT(!IsEnabled());
- output_handle_ = OS::FOpen(name, OS::LogFileOpenMode);
- write_to_file_ = true;
- if (FLAG_ll_prof) {
- // Open a file for logging the contents of code objects so that
- // they can be disassembled later.
- size_t name_len = strlen(name);
- ScopedVector<char> code_name(
- static_cast<int>(name_len + sizeof(kCodeLogExt)));
- memcpy(code_name.start(), name, name_len);
- memcpy(code_name.start() + name_len, kCodeLogExt, sizeof(kCodeLogExt));
- output_code_handle_ = OS::FOpen(code_name.start(), OS::LogFileOpenMode);
- }
-}
-
-
-void Log::OpenMemoryBuffer() {
- ASSERT(!IsEnabled());
- output_buffer_ = new LogDynamicBuffer(
- kDynamicBufferBlockSize, kMaxDynamicBufferSize,
- kDynamicBufferSeal, StrLength(kDynamicBufferSeal));
- write_to_file_ = false;
-}
-
-
-void Log::Close() {
- if (write_to_file_) {
- if (output_handle_ != NULL) fclose(output_handle_);
- output_handle_ = NULL;
- if (output_code_handle_ != NULL) fclose(output_code_handle_);
- output_code_handle_ = NULL;
- } else {
- delete output_buffer_;
- output_buffer_ = NULL;
- }
-
- DeleteArray(message_buffer_);
- message_buffer_ = NULL;
-
- delete mutex_;
- mutex_ = NULL;
-
- is_stopped_ = false;
-}
-
-
-int Log::GetLogLines(int from_pos, char* dest_buf, int max_size) {
- if (write_to_file_) return 0;
- ASSERT(output_buffer_ != NULL);
- ASSERT(from_pos >= 0);
- ASSERT(max_size >= 0);
- int actual_size = output_buffer_->Read(from_pos, dest_buf, max_size);
- ASSERT(actual_size <= max_size);
- if (actual_size == 0) return 0;
-
- // Find previous log line boundary.
- char* end_pos = dest_buf + actual_size - 1;
- while (end_pos >= dest_buf && *end_pos != '\n') --end_pos;
- actual_size = static_cast<int>(end_pos - dest_buf + 1);
- // If the assertion below is hit, it means that there was no line end
- // found --- something wrong has happened.
- ASSERT(actual_size > 0);
- ASSERT(actual_size <= max_size);
- return actual_size;
-}
-
-
-LogMessageBuilder::LogMessageBuilder(Logger* logger)
- : log_(logger->log_),
- sl(log_->mutex_),
- pos_(0) {
- ASSERT(log_->message_buffer_ != NULL);
-}
-
-
-void LogMessageBuilder::Append(const char* format, ...) {
- Vector<char> buf(log_->message_buffer_ + pos_,
- Log::kMessageBufferSize - pos_);
- va_list args;
- va_start(args, format);
- AppendVA(format, args);
- va_end(args);
- ASSERT(pos_ <= Log::kMessageBufferSize);
-}
-
-
-void LogMessageBuilder::AppendVA(const char* format, va_list args) {
- Vector<char> buf(log_->message_buffer_ + pos_,
- Log::kMessageBufferSize - pos_);
- int result = v8::internal::OS::VSNPrintF(buf, format, args);
-
- // Result is -1 if output was truncated.
- if (result >= 0) {
- pos_ += result;
- } else {
- pos_ = Log::kMessageBufferSize;
- }
- ASSERT(pos_ <= Log::kMessageBufferSize);
-}
-
-
-void LogMessageBuilder::Append(const char c) {
- if (pos_ < Log::kMessageBufferSize) {
- log_->message_buffer_[pos_++] = c;
- }
- ASSERT(pos_ <= Log::kMessageBufferSize);
-}
-
-
-void LogMessageBuilder::Append(String* str) {
- AssertNoAllocation no_heap_allocation; // Ensure string stay valid.
- int length = str->length();
- for (int i = 0; i < length; i++) {
- Append(static_cast<char>(str->Get(i)));
- }
-}
-
-
-void LogMessageBuilder::AppendAddress(Address addr) {
- Append("0x%" V8PRIxPTR, addr);
-}
-
-
-void LogMessageBuilder::AppendDetailed(String* str, bool show_impl_info) {
- AssertNoAllocation no_heap_allocation; // Ensure string stay valid.
- int len = str->length();
- if (len > 0x1000)
- len = 0x1000;
- if (show_impl_info) {
- Append(str->IsAsciiRepresentation() ? 'a' : '2');
- if (StringShape(str).IsExternal())
- Append('e');
- if (StringShape(str).IsSymbol())
- Append('#');
- Append(":%i:", str->length());
- }
- for (int i = 0; i < len; i++) {
- uc32 c = str->Get(i);
- if (c > 0xff) {
- Append("\\u%04x", c);
- } else if (c < 32 || c > 126) {
- Append("\\x%02x", c);
- } else if (c == ',') {
- Append("\\,");
- } else if (c == '\\') {
- Append("\\\\");
- } else if (c == '\"') {
- Append("\"\"");
- } else {
- Append("%lc", c);
- }
- }
-}
-
-
-void LogMessageBuilder::AppendStringPart(const char* str, int len) {
- if (pos_ + len > Log::kMessageBufferSize) {
- len = Log::kMessageBufferSize - pos_;
- ASSERT(len >= 0);
- if (len == 0) return;
- }
- Vector<char> buf(log_->message_buffer_ + pos_,
- Log::kMessageBufferSize - pos_);
- OS::StrNCpy(buf, str, len);
- pos_ += len;
- ASSERT(pos_ <= Log::kMessageBufferSize);
-}
-
-
-void LogMessageBuilder::WriteToLogFile() {
- ASSERT(pos_ <= Log::kMessageBufferSize);
- const int written = log_->write_to_file_ ?
- log_->WriteToFile(log_->message_buffer_, pos_) :
- log_->WriteToMemory(log_->message_buffer_, pos_);
- if (written != pos_) {
- log_->stop();
- log_->logger_->LogFailure();
- }
-}
-
-
-#endif // ENABLE_LOGGING_AND_PROFILING
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/log-utils.h b/src/3rdparty/v8/src/log-utils.h
deleted file mode 100644
index 255c73c..0000000
--- a/src/3rdparty/v8/src/log-utils.h
+++ /dev/null
@@ -1,229 +0,0 @@
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_LOG_UTILS_H_
-#define V8_LOG_UTILS_H_
-
-namespace v8 {
-namespace internal {
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
-
-class Logger;
-
-// A memory buffer that increments its size as you write in it. Size
-// is incremented with 'block_size' steps, never exceeding 'max_size'.
-// During growth, memory contents are never copied. At the end of the
-// buffer an amount of memory specified in 'seal_size' is reserved.
-// When writing position reaches max_size - seal_size, buffer auto-seals
-// itself with 'seal' and allows no further writes. Data pointed by
-// 'seal' must be available during entire LogDynamicBuffer lifetime.
-//
-// An instance of this class is created dynamically by Log.
-class LogDynamicBuffer {
- public:
- LogDynamicBuffer(
- int block_size, int max_size, const char* seal, int seal_size);
-
- ~LogDynamicBuffer();
-
- // Reads contents of the buffer starting from 'from_pos'. Upon
- // return, 'dest_buf' is filled with the data. Actual amount of data
- // filled is returned, it is <= 'buf_size'.
- int Read(int from_pos, char* dest_buf, int buf_size);
-
- // Writes 'data' to the buffer, making it larger if necessary. If
- // data is too big to fit in the buffer, it doesn't get written at
- // all. In that case, buffer auto-seals itself and stops to accept
- // any incoming writes. Returns amount of data written (it is either
- // 'data_size', or 0, if 'data' is too big).
- int Write(const char* data, int data_size);
-
- private:
- void AllocateBlock(int index) {
- blocks_[index] = NewArray<char>(block_size_);
- }
-
- int BlockIndex(int pos) const { return pos / block_size_; }
-
- int BlocksCount() const { return BlockIndex(max_size_) + 1; }
-
- int PosInBlock(int pos) const { return pos % block_size_; }
-
- int Seal();
-
- int WriteInternal(const char* data, int data_size);
-
- const int block_size_;
- const int max_size_;
- const char* seal_;
- const int seal_size_;
- ScopedVector<char*> blocks_;
- int write_pos_;
- int block_index_;
- int block_write_pos_;
- bool is_sealed_;
-};
-
-
-// Functions and data for performing output of log messages.
-class Log {
- public:
-
- // Performs process-wide initialization.
- void Initialize();
-
- // Disables logging, but preserves acquired resources.
- void stop() { is_stopped_ = true; }
-
- // Frees all resources acquired in Initialize and Open... functions.
- void Close();
-
- // See description in include/v8.h.
- int GetLogLines(int from_pos, char* dest_buf, int max_size);
-
- // Returns whether logging is enabled.
- bool IsEnabled() {
- return !is_stopped_ && (output_handle_ != NULL || output_buffer_ != NULL);
- }
-
- // Size of buffer used for formatting log messages.
- static const int kMessageBufferSize = v8::V8::kMinimumSizeForLogLinesBuffer;
-
- private:
- explicit Log(Logger* logger);
-
- // Opens stdout for logging.
- void OpenStdout();
-
- // Opens file for logging.
- void OpenFile(const char* name);
-
- // Opens memory buffer for logging.
- void OpenMemoryBuffer();
-
- // Implementation of writing to a log file.
- int WriteToFile(const char* msg, int length) {
- ASSERT(output_handle_ != NULL);
- size_t rv = fwrite(msg, 1, length, output_handle_);
- ASSERT(static_cast<size_t>(length) == rv);
- USE(rv);
- fflush(output_handle_);
- return length;
- }
-
- // Implementation of writing to a memory buffer.
- int WriteToMemory(const char* msg, int length) {
- ASSERT(output_buffer_ != NULL);
- return output_buffer_->Write(msg, length);
- }
-
- bool write_to_file_;
-
- // Whether logging is stopped (e.g. due to insufficient resources).
- bool is_stopped_;
-
- // When logging is active, either output_handle_ or output_buffer_ is used
- // to store a pointer to log destination. If logging was opened via OpenStdout
- // or OpenFile, then output_handle_ is used. If logging was opened
- // via OpenMemoryBuffer, then output_buffer_ is used.
- // mutex_ should be acquired before using output_handle_ or output_buffer_.
- FILE* output_handle_;
-
- // Used when low-level profiling is active to save code object contents.
- FILE* output_code_handle_;
-
- LogDynamicBuffer* output_buffer_;
-
- // Size of dynamic buffer block (and dynamic buffer initial size).
- static const int kDynamicBufferBlockSize = 65536;
-
- // Maximum size of dynamic buffer.
- static const int kMaxDynamicBufferSize = 50 * 1024 * 1024;
-
- // Message to "seal" dynamic buffer with.
- static const char* const kDynamicBufferSeal;
-
- // mutex_ is a Mutex used for enforcing exclusive
- // access to the formatting buffer and the log file or log memory buffer.
- Mutex* mutex_;
-
- // Buffer used for formatting log messages. This is a singleton buffer and
- // mutex_ should be acquired before using it.
- char* message_buffer_;
-
- Logger* logger_;
-
- friend class Logger;
- friend class LogMessageBuilder;
-};
-
-
-// Utility class for formatting log messages. It fills the message into the
-// static buffer in Log.
-class LogMessageBuilder BASE_EMBEDDED {
- public:
- // Create a message builder starting from position 0. This acquires the mutex
- // in the log as well.
- explicit LogMessageBuilder(Logger* logger);
- ~LogMessageBuilder() { }
-
- // Append string data to the log message.
- void Append(const char* format, ...);
-
- // Append string data to the log message.
- void AppendVA(const char* format, va_list args);
-
- // Append a character to the log message.
- void Append(const char c);
-
- // Append a heap string.
- void Append(String* str);
-
- // Appends an address.
- void AppendAddress(Address addr);
-
- void AppendDetailed(String* str, bool show_impl_info);
-
- // Append a portion of a string.
- void AppendStringPart(const char* str, int len);
-
- // Write the log message to the log file currently opened.
- void WriteToLogFile();
-
- private:
-
- Log* log_;
- ScopedLock sl;
- int pos_;
-};
-
-#endif // ENABLE_LOGGING_AND_PROFILING
-
-} } // namespace v8::internal
-
-#endif // V8_LOG_UTILS_H_
diff --git a/src/3rdparty/v8/src/log.cc b/src/3rdparty/v8/src/log.cc
deleted file mode 100644
index 5e8c738..0000000
--- a/src/3rdparty/v8/src/log.cc
+++ /dev/null
@@ -1,1666 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include <stdarg.h>
-
-#include "v8.h"
-
-#include "bootstrapper.h"
-#include "code-stubs.h"
-#include "deoptimizer.h"
-#include "global-handles.h"
-#include "log.h"
-#include "macro-assembler.h"
-#include "runtime-profiler.h"
-#include "serialize.h"
-#include "string-stream.h"
-#include "vm-state-inl.h"
-
-namespace v8 {
-namespace internal {
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
-
-//
-// Sliding state window. Updates counters to keep track of the last
-// window of kBufferSize states. This is useful to track where we
-// spent our time.
-//
-class SlidingStateWindow {
- public:
- explicit SlidingStateWindow(Isolate* isolate);
- ~SlidingStateWindow();
- void AddState(StateTag state);
-
- private:
- static const int kBufferSize = 256;
- Counters* counters_;
- int current_index_;
- bool is_full_;
- byte buffer_[kBufferSize];
-
-
- void IncrementStateCounter(StateTag state) {
- counters_->state_counters(state)->Increment();
- }
-
-
- void DecrementStateCounter(StateTag state) {
- counters_->state_counters(state)->Decrement();
- }
-};
-
-
-//
-// The Profiler samples pc and sp values for the main thread.
-// Each sample is appended to a circular buffer.
-// An independent thread removes data and writes it to the log.
-// This design minimizes the time spent in the sampler.
-//
-class Profiler: public Thread {
- public:
- explicit Profiler(Isolate* isolate);
- void Engage();
- void Disengage();
-
- // Inserts collected profiling data into buffer.
- void Insert(TickSample* sample) {
- if (paused_)
- return;
-
- if (Succ(head_) == tail_) {
- overflow_ = true;
- } else {
- buffer_[head_] = *sample;
- head_ = Succ(head_);
- buffer_semaphore_->Signal(); // Tell we have an element.
- }
- }
-
- // Waits for a signal and removes profiling data.
- bool Remove(TickSample* sample) {
- buffer_semaphore_->Wait(); // Wait for an element.
- *sample = buffer_[tail_];
- bool result = overflow_;
- tail_ = Succ(tail_);
- overflow_ = false;
- return result;
- }
-
- void Run();
-
- // Pause and Resume TickSample data collection.
- bool paused() const { return paused_; }
- void pause() { paused_ = true; }
- void resume() { paused_ = false; }
-
- private:
- // Returns the next index in the cyclic buffer.
- int Succ(int index) { return (index + 1) % kBufferSize; }
-
- // Cyclic buffer for communicating profiling samples
- // between the signal handler and the worker thread.
- static const int kBufferSize = 128;
- TickSample buffer_[kBufferSize]; // Buffer storage.
- int head_; // Index to the buffer head.
- int tail_; // Index to the buffer tail.
- bool overflow_; // Tell whether a buffer overflow has occurred.
- Semaphore* buffer_semaphore_; // Sempahore used for buffer synchronization.
-
- // Tells whether profiler is engaged, that is, processing thread is stated.
- bool engaged_;
-
- // Tells whether worker thread should continue running.
- bool running_;
-
- // Tells whether we are currently recording tick samples.
- bool paused_;
-};
-
-
-//
-// StackTracer implementation
-//
-void StackTracer::Trace(Isolate* isolate, TickSample* sample) {
- ASSERT(isolate->IsInitialized());
-
- sample->tos = NULL;
- sample->frames_count = 0;
- sample->has_external_callback = false;
-
- // Avoid collecting traces while doing GC.
- if (sample->state == GC) return;
-
- const Address js_entry_sp =
- Isolate::js_entry_sp(isolate->thread_local_top());
- if (js_entry_sp == 0) {
- // Not executing JS now.
- return;
- }
-
- const Address callback = isolate->external_callback();
- if (callback != NULL) {
- sample->external_callback = callback;
- sample->has_external_callback = true;
- } else {
- // Sample potential return address value for frameless invocation of
- // stubs (we'll figure out later, if this value makes sense).
- sample->tos = Memory::Address_at(sample->sp);
- sample->has_external_callback = false;
- }
-
- SafeStackTraceFrameIterator it(isolate,
- sample->fp, sample->sp,
- sample->sp, js_entry_sp);
- int i = 0;
- while (!it.done() && i < TickSample::kMaxFramesCount) {
- sample->stack[i++] = it.frame()->pc();
- it.Advance();
- }
- sample->frames_count = i;
-}
-
-
-//
-// Ticker used to provide ticks to the profiler and the sliding state
-// window.
-//
-class Ticker: public Sampler {
- public:
- Ticker(Isolate* isolate, int interval):
- Sampler(isolate, interval),
- window_(NULL),
- profiler_(NULL) {}
-
- ~Ticker() { if (IsActive()) Stop(); }
-
- virtual void Tick(TickSample* sample) {
- if (profiler_) profiler_->Insert(sample);
- if (window_) window_->AddState(sample->state);
- }
-
- void SetWindow(SlidingStateWindow* window) {
- window_ = window;
- if (!IsActive()) Start();
- }
-
- void ClearWindow() {
- window_ = NULL;
- if (!profiler_ && IsActive() && !RuntimeProfiler::IsEnabled()) Stop();
- }
-
- void SetProfiler(Profiler* profiler) {
- ASSERT(profiler_ == NULL);
- profiler_ = profiler;
- IncreaseProfilingDepth();
- if (!FLAG_prof_lazy && !IsActive()) Start();
- }
-
- void ClearProfiler() {
- DecreaseProfilingDepth();
- profiler_ = NULL;
- if (!window_ && IsActive() && !RuntimeProfiler::IsEnabled()) Stop();
- }
-
- protected:
- virtual void DoSampleStack(TickSample* sample) {
- StackTracer::Trace(isolate(), sample);
- }
-
- private:
- SlidingStateWindow* window_;
- Profiler* profiler_;
-};
-
-
-//
-// SlidingStateWindow implementation.
-//
-SlidingStateWindow::SlidingStateWindow(Isolate* isolate)
- : counters_(isolate->counters()), current_index_(0), is_full_(false) {
- for (int i = 0; i < kBufferSize; i++) {
- buffer_[i] = static_cast<byte>(OTHER);
- }
- isolate->logger()->ticker_->SetWindow(this);
-}
-
-
-SlidingStateWindow::~SlidingStateWindow() {
- LOGGER->ticker_->ClearWindow();
-}
-
-
-void SlidingStateWindow::AddState(StateTag state) {
- if (is_full_) {
- DecrementStateCounter(static_cast<StateTag>(buffer_[current_index_]));
- } else if (current_index_ == kBufferSize - 1) {
- is_full_ = true;
- }
- buffer_[current_index_] = static_cast<byte>(state);
- IncrementStateCounter(state);
- ASSERT(IsPowerOf2(kBufferSize));
- current_index_ = (current_index_ + 1) & (kBufferSize - 1);
-}
-
-
-//
-// Profiler implementation.
-//
-Profiler::Profiler(Isolate* isolate)
- : Thread(isolate, "v8:Profiler"),
- head_(0),
- tail_(0),
- overflow_(false),
- buffer_semaphore_(OS::CreateSemaphore(0)),
- engaged_(false),
- running_(false),
- paused_(false) {
-}
-
-
-void Profiler::Engage() {
- if (engaged_) return;
- engaged_ = true;
-
- // TODO(mnaganov): This is actually "Chromium" mode. Flags need to be revised.
- // http://code.google.com/p/v8/issues/detail?id=487
- if (!FLAG_prof_lazy) {
- OS::LogSharedLibraryAddresses();
- }
-
- // Start thread processing the profiler buffer.
- running_ = true;
- Start();
-
- // Register to get ticks.
- LOGGER->ticker_->SetProfiler(this);
-
- LOGGER->ProfilerBeginEvent();
-}
-
-
-void Profiler::Disengage() {
- if (!engaged_) return;
-
- // Stop receiving ticks.
- LOGGER->ticker_->ClearProfiler();
-
- // Terminate the worker thread by setting running_ to false,
- // inserting a fake element in the queue and then wait for
- // the thread to terminate.
- running_ = false;
- TickSample sample;
- // Reset 'paused_' flag, otherwise semaphore may not be signalled.
- resume();
- Insert(&sample);
- Join();
-
- LOG(ISOLATE, UncheckedStringEvent("profiler", "end"));
-}
-
-
-void Profiler::Run() {
- TickSample sample;
- bool overflow = Remove(&sample);
- i::Isolate* isolate = ISOLATE;
- while (running_) {
- LOG(isolate, TickEvent(&sample, overflow));
- overflow = Remove(&sample);
- }
-}
-
-
-//
-// Logger class implementation.
-//
-
-Logger::Logger()
- : ticker_(NULL),
- profiler_(NULL),
- sliding_state_window_(NULL),
- log_events_(NULL),
- logging_nesting_(0),
- cpu_profiler_nesting_(0),
- heap_profiler_nesting_(0),
- log_(new Log(this)),
- is_initialized_(false),
- last_address_(NULL),
- prev_sp_(NULL),
- prev_function_(NULL),
- prev_to_(NULL),
- prev_code_(NULL) {
-}
-
-Logger::~Logger() {
- delete log_;
-}
-
-#define DECLARE_EVENT(ignore1, name) name,
-static const char* const kLogEventsNames[Logger::NUMBER_OF_LOG_EVENTS] = {
- LOG_EVENTS_AND_TAGS_LIST(DECLARE_EVENT)
-};
-#undef DECLARE_EVENT
-
-
-void Logger::ProfilerBeginEvent() {
- if (!log_->IsEnabled()) return;
- LogMessageBuilder msg(this);
- msg.Append("profiler,\"begin\",%d\n", kSamplingIntervalMs);
- msg.WriteToLogFile();
-}
-
-#endif // ENABLE_LOGGING_AND_PROFILING
-
-
-void Logger::StringEvent(const char* name, const char* value) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- if (FLAG_log) UncheckedStringEvent(name, value);
-#endif
-}
-
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
-void Logger::UncheckedStringEvent(const char* name, const char* value) {
- if (!log_->IsEnabled()) return;
- LogMessageBuilder msg(this);
- msg.Append("%s,\"%s\"\n", name, value);
- msg.WriteToLogFile();
-}
-#endif
-
-
-void Logger::IntEvent(const char* name, int value) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- if (FLAG_log) UncheckedIntEvent(name, value);
-#endif
-}
-
-
-void Logger::IntPtrTEvent(const char* name, intptr_t value) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- if (FLAG_log) UncheckedIntPtrTEvent(name, value);
-#endif
-}
-
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
-void Logger::UncheckedIntEvent(const char* name, int value) {
- if (!log_->IsEnabled()) return;
- LogMessageBuilder msg(this);
- msg.Append("%s,%d\n", name, value);
- msg.WriteToLogFile();
-}
-#endif
-
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
-void Logger::UncheckedIntPtrTEvent(const char* name, intptr_t value) {
- if (!log_->IsEnabled()) return;
- LogMessageBuilder msg(this);
- msg.Append("%s,%" V8_PTR_PREFIX "d\n", name, value);
- msg.WriteToLogFile();
-}
-#endif
-
-
-void Logger::HandleEvent(const char* name, Object** location) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!log_->IsEnabled() || !FLAG_log_handles) return;
- LogMessageBuilder msg(this);
- msg.Append("%s,0x%" V8PRIxPTR "\n", name, location);
- msg.WriteToLogFile();
-#endif
-}
-
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
-// ApiEvent is private so all the calls come from the Logger class. It is the
-// caller's responsibility to ensure that log is enabled and that
-// FLAG_log_api is true.
-void Logger::ApiEvent(const char* format, ...) {
- ASSERT(log_->IsEnabled() && FLAG_log_api);
- LogMessageBuilder msg(this);
- va_list ap;
- va_start(ap, format);
- msg.AppendVA(format, ap);
- va_end(ap);
- msg.WriteToLogFile();
-}
-#endif
-
-
-void Logger::ApiNamedSecurityCheck(Object* key) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!log_->IsEnabled() || !FLAG_log_api) return;
- if (key->IsString()) {
- SmartPointer<char> str =
- String::cast(key)->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- ApiEvent("api,check-security,\"%s\"\n", *str);
- } else if (key->IsUndefined()) {
- ApiEvent("api,check-security,undefined\n");
- } else {
- ApiEvent("api,check-security,['no-name']\n");
- }
-#endif
-}
-
-
-void Logger::SharedLibraryEvent(const char* library_path,
- uintptr_t start,
- uintptr_t end) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!log_->IsEnabled() || !FLAG_prof) return;
- LogMessageBuilder msg(this);
- msg.Append("shared-library,\"%s\",0x%08" V8PRIxPTR ",0x%08" V8PRIxPTR "\n",
- library_path,
- start,
- end);
- msg.WriteToLogFile();
-#endif
-}
-
-
-void Logger::SharedLibraryEvent(const wchar_t* library_path,
- uintptr_t start,
- uintptr_t end) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!log_->IsEnabled() || !FLAG_prof) return;
- LogMessageBuilder msg(this);
- msg.Append("shared-library,\"%ls\",0x%08" V8PRIxPTR ",0x%08" V8PRIxPTR "\n",
- library_path,
- start,
- end);
- msg.WriteToLogFile();
-#endif
-}
-
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
-void Logger::LogRegExpSource(Handle<JSRegExp> regexp) {
- // Prints "/" + re.source + "/" +
- // (re.global?"g":"") + (re.ignorecase?"i":"") + (re.multiline?"m":"")
- LogMessageBuilder msg(this);
-
- Handle<Object> source = GetProperty(regexp, "source");
- if (!source->IsString()) {
- msg.Append("no source");
- return;
- }
-
- switch (regexp->TypeTag()) {
- case JSRegExp::ATOM:
- msg.Append('a');
- break;
- default:
- break;
- }
- msg.Append('/');
- msg.AppendDetailed(*Handle<String>::cast(source), false);
- msg.Append('/');
-
- // global flag
- Handle<Object> global = GetProperty(regexp, "global");
- if (global->IsTrue()) {
- msg.Append('g');
- }
- // ignorecase flag
- Handle<Object> ignorecase = GetProperty(regexp, "ignoreCase");
- if (ignorecase->IsTrue()) {
- msg.Append('i');
- }
- // multiline flag
- Handle<Object> multiline = GetProperty(regexp, "multiline");
- if (multiline->IsTrue()) {
- msg.Append('m');
- }
-
- msg.WriteToLogFile();
-}
-#endif // ENABLE_LOGGING_AND_PROFILING
-
-
-void Logger::RegExpCompileEvent(Handle<JSRegExp> regexp, bool in_cache) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!log_->IsEnabled() || !FLAG_log_regexp) return;
- LogMessageBuilder msg(this);
- msg.Append("regexp-compile,");
- LogRegExpSource(regexp);
- msg.Append(in_cache ? ",hit\n" : ",miss\n");
- msg.WriteToLogFile();
-#endif
-}
-
-
-void Logger::LogRuntime(Vector<const char> format, JSArray* args) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!log_->IsEnabled() || !FLAG_log_runtime) return;
- HandleScope scope;
- LogMessageBuilder msg(this);
- for (int i = 0; i < format.length(); i++) {
- char c = format[i];
- if (c == '%' && i <= format.length() - 2) {
- i++;
- ASSERT('0' <= format[i] && format[i] <= '9');
- MaybeObject* maybe = args->GetElement(format[i] - '0');
- Object* obj;
- if (!maybe->ToObject(&obj)) {
- msg.Append("<exception>");
- continue;
- }
- i++;
- switch (format[i]) {
- case 's':
- msg.AppendDetailed(String::cast(obj), false);
- break;
- case 'S':
- msg.AppendDetailed(String::cast(obj), true);
- break;
- case 'r':
- Logger::LogRegExpSource(Handle<JSRegExp>(JSRegExp::cast(obj)));
- break;
- case 'x':
- msg.Append("0x%x", Smi::cast(obj)->value());
- break;
- case 'i':
- msg.Append("%i", Smi::cast(obj)->value());
- break;
- default:
- UNREACHABLE();
- }
- } else {
- msg.Append(c);
- }
- }
- msg.Append('\n');
- msg.WriteToLogFile();
-#endif
-}
-
-
-void Logger::ApiIndexedSecurityCheck(uint32_t index) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!log_->IsEnabled() || !FLAG_log_api) return;
- ApiEvent("api,check-security,%u\n", index);
-#endif
-}
-
-
-void Logger::ApiNamedPropertyAccess(const char* tag,
- JSObject* holder,
- Object* name) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- ASSERT(name->IsString());
- if (!log_->IsEnabled() || !FLAG_log_api) return;
- String* class_name_obj = holder->class_name();
- SmartPointer<char> class_name =
- class_name_obj->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- SmartPointer<char> property_name =
- String::cast(name)->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- ApiEvent("api,%s,\"%s\",\"%s\"\n", tag, *class_name, *property_name);
-#endif
-}
-
-void Logger::ApiIndexedPropertyAccess(const char* tag,
- JSObject* holder,
- uint32_t index) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!log_->IsEnabled() || !FLAG_log_api) return;
- String* class_name_obj = holder->class_name();
- SmartPointer<char> class_name =
- class_name_obj->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- ApiEvent("api,%s,\"%s\",%u\n", tag, *class_name, index);
-#endif
-}
-
-void Logger::ApiObjectAccess(const char* tag, JSObject* object) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!log_->IsEnabled() || !FLAG_log_api) return;
- String* class_name_obj = object->class_name();
- SmartPointer<char> class_name =
- class_name_obj->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- ApiEvent("api,%s,\"%s\"\n", tag, *class_name);
-#endif
-}
-
-
-void Logger::ApiEntryCall(const char* name) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!log_->IsEnabled() || !FLAG_log_api) return;
- ApiEvent("api,%s\n", name);
-#endif
-}
-
-
-void Logger::NewEvent(const char* name, void* object, size_t size) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!log_->IsEnabled() || !FLAG_log) return;
- LogMessageBuilder msg(this);
- msg.Append("new,%s,0x%" V8PRIxPTR ",%u\n", name, object,
- static_cast<unsigned int>(size));
- msg.WriteToLogFile();
-#endif
-}
-
-
-void Logger::DeleteEvent(const char* name, void* object) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!log_->IsEnabled() || !FLAG_log) return;
- LogMessageBuilder msg(this);
- msg.Append("delete,%s,0x%" V8PRIxPTR "\n", name, object);
- msg.WriteToLogFile();
-#endif
-}
-
-
-void Logger::NewEventStatic(const char* name, void* object, size_t size) {
- LOGGER->NewEvent(name, object, size);
-}
-
-
-void Logger::DeleteEventStatic(const char* name, void* object) {
- LOGGER->DeleteEvent(name, object);
-}
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
-void Logger::CallbackEventInternal(const char* prefix, const char* name,
- Address entry_point) {
- if (!log_->IsEnabled() || !FLAG_log_code) return;
- LogMessageBuilder msg(this);
- msg.Append("%s,%s,",
- kLogEventsNames[CODE_CREATION_EVENT],
- kLogEventsNames[CALLBACK_TAG]);
- msg.AppendAddress(entry_point);
- msg.Append(",1,\"%s%s\"", prefix, name);
- msg.Append('\n');
- msg.WriteToLogFile();
-}
-#endif
-
-
-void Logger::CallbackEvent(String* name, Address entry_point) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!log_->IsEnabled() || !FLAG_log_code) return;
- SmartPointer<char> str =
- name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- CallbackEventInternal("", *str, entry_point);
-#endif
-}
-
-
-void Logger::GetterCallbackEvent(String* name, Address entry_point) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!log_->IsEnabled() || !FLAG_log_code) return;
- SmartPointer<char> str =
- name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- CallbackEventInternal("get ", *str, entry_point);
-#endif
-}
-
-
-void Logger::SetterCallbackEvent(String* name, Address entry_point) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!log_->IsEnabled() || !FLAG_log_code) return;
- SmartPointer<char> str =
- name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- CallbackEventInternal("set ", *str, entry_point);
-#endif
-}
-
-
-void Logger::CodeCreateEvent(LogEventsAndTags tag,
- Code* code,
- const char* comment) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!log_->IsEnabled() || !FLAG_log_code) return;
- LogMessageBuilder msg(this);
- msg.Append("%s,%s,",
- kLogEventsNames[CODE_CREATION_EVENT],
- kLogEventsNames[tag]);
- msg.AppendAddress(code->address());
- msg.Append(",%d,\"", code->ExecutableSize());
- for (const char* p = comment; *p != '\0'; p++) {
- if (*p == '"') {
- msg.Append('\\');
- }
- msg.Append(*p);
- }
- msg.Append('"');
- LowLevelCodeCreateEvent(code, &msg);
- msg.Append('\n');
- msg.WriteToLogFile();
-#endif
-}
-
-
-void Logger::CodeCreateEvent(LogEventsAndTags tag,
- Code* code,
- String* name) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- if (name != NULL) {
- SmartPointer<char> str =
- name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- CodeCreateEvent(tag, code, *str);
- } else {
- CodeCreateEvent(tag, code, "");
- }
-#endif
-}
-
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
-// ComputeMarker must only be used when SharedFunctionInfo is known.
-static const char* ComputeMarker(Code* code) {
- switch (code->kind()) {
- case Code::FUNCTION: return code->optimizable() ? "~" : "";
- case Code::OPTIMIZED_FUNCTION: return "*";
- default: return "";
- }
-}
-#endif
-
-
-void Logger::CodeCreateEvent(LogEventsAndTags tag,
- Code* code,
- SharedFunctionInfo* shared,
- String* name) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!log_->IsEnabled() || !FLAG_log_code) return;
- if (code == Isolate::Current()->builtins()->builtin(
- Builtins::kLazyCompile))
- return;
-
- LogMessageBuilder msg(this);
- SmartPointer<char> str =
- name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- msg.Append("%s,%s,",
- kLogEventsNames[CODE_CREATION_EVENT],
- kLogEventsNames[tag]);
- msg.AppendAddress(code->address());
- msg.Append(",%d,\"%s\",", code->ExecutableSize(), *str);
- msg.AppendAddress(shared->address());
- msg.Append(",%s", ComputeMarker(code));
- LowLevelCodeCreateEvent(code, &msg);
- msg.Append('\n');
- msg.WriteToLogFile();
-#endif
-}
-
-
-// Although, it is possible to extract source and line from
-// the SharedFunctionInfo object, we left it to caller
-// to leave logging functions free from heap allocations.
-void Logger::CodeCreateEvent(LogEventsAndTags tag,
- Code* code,
- SharedFunctionInfo* shared,
- String* source, int line) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!log_->IsEnabled() || !FLAG_log_code) return;
- LogMessageBuilder msg(this);
- SmartPointer<char> name =
- shared->DebugName()->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- SmartPointer<char> sourcestr =
- source->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- msg.Append("%s,%s,",
- kLogEventsNames[CODE_CREATION_EVENT],
- kLogEventsNames[tag]);
- msg.AppendAddress(code->address());
- msg.Append(",%d,\"%s %s:%d\",",
- code->ExecutableSize(),
- *name,
- *sourcestr,
- line);
- msg.AppendAddress(shared->address());
- msg.Append(",%s", ComputeMarker(code));
- LowLevelCodeCreateEvent(code, &msg);
- msg.Append('\n');
- msg.WriteToLogFile();
-#endif
-}
-
-
-void Logger::CodeCreateEvent(LogEventsAndTags tag, Code* code, int args_count) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!log_->IsEnabled() || !FLAG_log_code) return;
- LogMessageBuilder msg(this);
- msg.Append("%s,%s,",
- kLogEventsNames[CODE_CREATION_EVENT],
- kLogEventsNames[tag]);
- msg.AppendAddress(code->address());
- msg.Append(",%d,\"args_count: %d\"", code->ExecutableSize(), args_count);
- LowLevelCodeCreateEvent(code, &msg);
- msg.Append('\n');
- msg.WriteToLogFile();
-#endif
-}
-
-
-void Logger::CodeMovingGCEvent() {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!log_->IsEnabled() || !FLAG_log_code || !FLAG_ll_prof) return;
- LogMessageBuilder msg(this);
- msg.Append("%s\n", kLogEventsNames[CODE_MOVING_GC]);
- msg.WriteToLogFile();
- OS::SignalCodeMovingGC();
-#endif
-}
-
-
-void Logger::RegExpCodeCreateEvent(Code* code, String* source) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!log_->IsEnabled() || !FLAG_log_code) return;
- LogMessageBuilder msg(this);
- msg.Append("%s,%s,",
- kLogEventsNames[CODE_CREATION_EVENT],
- kLogEventsNames[REG_EXP_TAG]);
- msg.AppendAddress(code->address());
- msg.Append(",%d,\"", code->ExecutableSize());
- msg.AppendDetailed(source, false);
- msg.Append('\"');
- LowLevelCodeCreateEvent(code, &msg);
- msg.Append('\n');
- msg.WriteToLogFile();
-#endif
-}
-
-
-void Logger::CodeMoveEvent(Address from, Address to) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- MoveEventInternal(CODE_MOVE_EVENT, from, to);
-#endif
-}
-
-
-void Logger::CodeDeleteEvent(Address from) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- DeleteEventInternal(CODE_DELETE_EVENT, from);
-#endif
-}
-
-
-void Logger::SnapshotPositionEvent(Address addr, int pos) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!log_->IsEnabled() || !FLAG_log_snapshot_positions) return;
- LogMessageBuilder msg(this);
- msg.Append("%s,", kLogEventsNames[SNAPSHOT_POSITION_EVENT]);
- msg.AppendAddress(addr);
- msg.Append(",%d", pos);
- msg.Append('\n');
- msg.WriteToLogFile();
-#endif
-}
-
-
-void Logger::SharedFunctionInfoMoveEvent(Address from, Address to) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- MoveEventInternal(SHARED_FUNC_MOVE_EVENT, from, to);
-#endif
-}
-
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
-void Logger::MoveEventInternal(LogEventsAndTags event,
- Address from,
- Address to) {
- if (!log_->IsEnabled() || !FLAG_log_code) return;
- LogMessageBuilder msg(this);
- msg.Append("%s,", kLogEventsNames[event]);
- msg.AppendAddress(from);
- msg.Append(',');
- msg.AppendAddress(to);
- msg.Append('\n');
- msg.WriteToLogFile();
-}
-#endif
-
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
-void Logger::DeleteEventInternal(LogEventsAndTags event, Address from) {
- if (!log_->IsEnabled() || !FLAG_log_code) return;
- LogMessageBuilder msg(this);
- msg.Append("%s,", kLogEventsNames[event]);
- msg.AppendAddress(from);
- msg.Append('\n');
- msg.WriteToLogFile();
-}
-#endif
-
-
-void Logger::ResourceEvent(const char* name, const char* tag) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!log_->IsEnabled() || !FLAG_log) return;
- LogMessageBuilder msg(this);
- msg.Append("%s,%s,", name, tag);
-
- uint32_t sec, usec;
- if (OS::GetUserTime(&sec, &usec) != -1) {
- msg.Append("%d,%d,", sec, usec);
- }
- msg.Append("%.0f", OS::TimeCurrentMillis());
-
- msg.Append('\n');
- msg.WriteToLogFile();
-#endif
-}
-
-
-void Logger::SuspectReadEvent(String* name, Object* obj) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!log_->IsEnabled() || !FLAG_log_suspect) return;
- LogMessageBuilder msg(this);
- String* class_name = obj->IsJSObject()
- ? JSObject::cast(obj)->class_name()
- : HEAP->empty_string();
- msg.Append("suspect-read,");
- msg.Append(class_name);
- msg.Append(',');
- msg.Append('"');
- msg.Append(name);
- msg.Append('"');
- msg.Append('\n');
- msg.WriteToLogFile();
-#endif
-}
-
-
-void Logger::HeapSampleBeginEvent(const char* space, const char* kind) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!log_->IsEnabled() || !FLAG_log_gc) return;
- LogMessageBuilder msg(this);
- // Using non-relative system time in order to be able to synchronize with
- // external memory profiling events (e.g. DOM memory size).
- msg.Append("heap-sample-begin,\"%s\",\"%s\",%.0f\n",
- space, kind, OS::TimeCurrentMillis());
- msg.WriteToLogFile();
-#endif
-}
-
-
-void Logger::HeapSampleStats(const char* space, const char* kind,
- intptr_t capacity, intptr_t used) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!log_->IsEnabled() || !FLAG_log_gc) return;
- LogMessageBuilder msg(this);
- msg.Append("heap-sample-stats,\"%s\",\"%s\","
- "%" V8_PTR_PREFIX "d,%" V8_PTR_PREFIX "d\n",
- space, kind, capacity, used);
- msg.WriteToLogFile();
-#endif
-}
-
-
-void Logger::HeapSampleEndEvent(const char* space, const char* kind) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!log_->IsEnabled() || !FLAG_log_gc) return;
- LogMessageBuilder msg(this);
- msg.Append("heap-sample-end,\"%s\",\"%s\"\n", space, kind);
- msg.WriteToLogFile();
-#endif
-}
-
-
-void Logger::HeapSampleItemEvent(const char* type, int number, int bytes) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!log_->IsEnabled() || !FLAG_log_gc) return;
- LogMessageBuilder msg(this);
- msg.Append("heap-sample-item,%s,%d,%d\n", type, number, bytes);
- msg.WriteToLogFile();
-#endif
-}
-
-
-void Logger::HeapSampleJSConstructorEvent(const char* constructor,
- int number, int bytes) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!log_->IsEnabled() || !FLAG_log_gc) return;
- LogMessageBuilder msg(this);
- msg.Append("heap-js-cons-item,%s,%d,%d\n", constructor, number, bytes);
- msg.WriteToLogFile();
-#endif
-}
-
-// Event starts with comma, so we don't have it in the format string.
-static const char kEventText[] = "heap-js-ret-item,%s";
-// We take placeholder strings into account, but it's OK to be conservative.
-static const int kEventTextLen = sizeof(kEventText)/sizeof(kEventText[0]);
-
-void Logger::HeapSampleJSRetainersEvent(
- const char* constructor, const char* event) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!log_->IsEnabled() || !FLAG_log_gc) return;
- const int cons_len = StrLength(constructor);
- const int event_len = StrLength(event);
- int pos = 0;
- // Retainer lists can be long. We may need to split them into multiple events.
- do {
- LogMessageBuilder msg(this);
- msg.Append(kEventText, constructor);
- int to_write = event_len - pos;
- if (to_write > Log::kMessageBufferSize - (cons_len + kEventTextLen)) {
- int cut_pos = pos + Log::kMessageBufferSize - (cons_len + kEventTextLen);
- ASSERT(cut_pos < event_len);
- while (cut_pos > pos && event[cut_pos] != ',') --cut_pos;
- if (event[cut_pos] != ',') {
- // Crash in debug mode, skip in release mode.
- ASSERT(false);
- return;
- }
- // Append a piece of event that fits, without trailing comma.
- msg.AppendStringPart(event + pos, cut_pos - pos);
- // Start next piece with comma.
- pos = cut_pos;
- } else {
- msg.Append("%s", event + pos);
- pos += event_len;
- }
- msg.Append('\n');
- msg.WriteToLogFile();
- } while (pos < event_len);
-#endif
-}
-
-
-void Logger::HeapSampleJSProducerEvent(const char* constructor,
- Address* stack) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!log_->IsEnabled() || !FLAG_log_gc) return;
- LogMessageBuilder msg(this);
- msg.Append("heap-js-prod-item,%s", constructor);
- while (*stack != NULL) {
- msg.Append(",0x%" V8PRIxPTR, *stack++);
- }
- msg.Append("\n");
- msg.WriteToLogFile();
-#endif
-}
-
-
-void Logger::DebugTag(const char* call_site_tag) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!log_->IsEnabled() || !FLAG_log) return;
- LogMessageBuilder msg(this);
- msg.Append("debug-tag,%s\n", call_site_tag);
- msg.WriteToLogFile();
-#endif
-}
-
-
-void Logger::DebugEvent(const char* event_type, Vector<uint16_t> parameter) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!log_->IsEnabled() || !FLAG_log) return;
- StringBuilder s(parameter.length() + 1);
- for (int i = 0; i < parameter.length(); ++i) {
- s.AddCharacter(static_cast<char>(parameter[i]));
- }
- char* parameter_string = s.Finalize();
- LogMessageBuilder msg(this);
- msg.Append("debug-queue-event,%s,%15.3f,%s\n",
- event_type,
- OS::TimeCurrentMillis(),
- parameter_string);
- DeleteArray(parameter_string);
- msg.WriteToLogFile();
-#endif
-}
-
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
-void Logger::TickEvent(TickSample* sample, bool overflow) {
- if (!log_->IsEnabled() || !FLAG_prof) return;
- LogMessageBuilder msg(this);
- msg.Append("%s,", kLogEventsNames[TICK_EVENT]);
- msg.AppendAddress(sample->pc);
- msg.Append(',');
- msg.AppendAddress(sample->sp);
- if (sample->has_external_callback) {
- msg.Append(",1,");
- msg.AppendAddress(sample->external_callback);
- } else {
- msg.Append(",0,");
- msg.AppendAddress(sample->tos);
- }
- msg.Append(",%d", static_cast<int>(sample->state));
- if (overflow) {
- msg.Append(",overflow");
- }
- for (int i = 0; i < sample->frames_count; ++i) {
- msg.Append(',');
- msg.AppendAddress(sample->stack[i]);
- }
- msg.Append('\n');
- msg.WriteToLogFile();
-}
-
-
-int Logger::GetActiveProfilerModules() {
- int result = PROFILER_MODULE_NONE;
- if (profiler_ != NULL && !profiler_->paused()) {
- result |= PROFILER_MODULE_CPU;
- }
- if (FLAG_log_gc) {
- result |= PROFILER_MODULE_HEAP_STATS | PROFILER_MODULE_JS_CONSTRUCTORS;
- }
- return result;
-}
-
-
-void Logger::PauseProfiler(int flags, int tag) {
- if (!log_->IsEnabled()) return;
- if (profiler_ != NULL && (flags & PROFILER_MODULE_CPU)) {
- // It is OK to have negative nesting.
- if (--cpu_profiler_nesting_ == 0) {
- profiler_->pause();
- if (FLAG_prof_lazy) {
- if (!FLAG_sliding_state_window && !RuntimeProfiler::IsEnabled()) {
- ticker_->Stop();
- }
- FLAG_log_code = false;
- // Must be the same message as Log::kDynamicBufferSeal.
- LOG(ISOLATE, UncheckedStringEvent("profiler", "pause"));
- }
- --logging_nesting_;
- }
- }
- if (flags &
- (PROFILER_MODULE_HEAP_STATS | PROFILER_MODULE_JS_CONSTRUCTORS)) {
- if (--heap_profiler_nesting_ == 0) {
- FLAG_log_gc = false;
- --logging_nesting_;
- }
- }
- if (tag != 0) {
- UncheckedIntEvent("close-tag", tag);
- }
-}
-
-
-void Logger::ResumeProfiler(int flags, int tag) {
- if (!log_->IsEnabled()) return;
- if (tag != 0) {
- UncheckedIntEvent("open-tag", tag);
- }
- if (profiler_ != NULL && (flags & PROFILER_MODULE_CPU)) {
- if (cpu_profiler_nesting_++ == 0) {
- ++logging_nesting_;
- if (FLAG_prof_lazy) {
- profiler_->Engage();
- LOG(ISOLATE, UncheckedStringEvent("profiler", "resume"));
- FLAG_log_code = true;
- LogCompiledFunctions();
- LogAccessorCallbacks();
- if (!FLAG_sliding_state_window && !ticker_->IsActive()) {
- ticker_->Start();
- }
- }
- profiler_->resume();
- }
- }
- if (flags &
- (PROFILER_MODULE_HEAP_STATS | PROFILER_MODULE_JS_CONSTRUCTORS)) {
- if (heap_profiler_nesting_++ == 0) {
- ++logging_nesting_;
- FLAG_log_gc = true;
- }
- }
-}
-
-
-// This function can be called when Log's mutex is acquired,
-// either from main or Profiler's thread.
-void Logger::LogFailure() {
- PauseProfiler(PROFILER_MODULE_CPU, 0);
-}
-
-
-bool Logger::IsProfilerSamplerActive() {
- return ticker_->IsActive();
-}
-
-
-int Logger::GetLogLines(int from_pos, char* dest_buf, int max_size) {
- return log_->GetLogLines(from_pos, dest_buf, max_size);
-}
-
-
-class EnumerateOptimizedFunctionsVisitor: public OptimizedFunctionVisitor {
- public:
- EnumerateOptimizedFunctionsVisitor(Handle<SharedFunctionInfo>* sfis,
- Handle<Code>* code_objects,
- int* count)
- : sfis_(sfis), code_objects_(code_objects), count_(count) { }
-
- virtual void EnterContext(Context* context) {}
- virtual void LeaveContext(Context* context) {}
-
- virtual void VisitFunction(JSFunction* function) {
- if (sfis_ != NULL) {
- sfis_[*count_] = Handle<SharedFunctionInfo>(function->shared());
- }
- if (code_objects_ != NULL) {
- ASSERT(function->code()->kind() == Code::OPTIMIZED_FUNCTION);
- code_objects_[*count_] = Handle<Code>(function->code());
- }
- *count_ = *count_ + 1;
- }
-
- private:
- Handle<SharedFunctionInfo>* sfis_;
- Handle<Code>* code_objects_;
- int* count_;
-};
-
-
-static int EnumerateCompiledFunctions(Handle<SharedFunctionInfo>* sfis,
- Handle<Code>* code_objects) {
- AssertNoAllocation no_alloc;
- int compiled_funcs_count = 0;
-
- // Iterate the heap to find shared function info objects and record
- // the unoptimized code for them.
- HeapIterator iterator;
- for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
- if (!obj->IsSharedFunctionInfo()) continue;
- SharedFunctionInfo* sfi = SharedFunctionInfo::cast(obj);
- if (sfi->is_compiled()
- && (!sfi->script()->IsScript()
- || Script::cast(sfi->script())->HasValidSource())) {
- if (sfis != NULL) {
- sfis[compiled_funcs_count] = Handle<SharedFunctionInfo>(sfi);
- }
- if (code_objects != NULL) {
- code_objects[compiled_funcs_count] = Handle<Code>(sfi->code());
- }
- ++compiled_funcs_count;
- }
- }
-
- // Iterate all optimized functions in all contexts.
- EnumerateOptimizedFunctionsVisitor visitor(sfis,
- code_objects,
- &compiled_funcs_count);
- Deoptimizer::VisitAllOptimizedFunctions(&visitor);
-
- return compiled_funcs_count;
-}
-
-
-void Logger::LogCodeObject(Object* object) {
- if (FLAG_log_code) {
- Code* code_object = Code::cast(object);
- LogEventsAndTags tag = Logger::STUB_TAG;
- const char* description = "Unknown code from the snapshot";
- switch (code_object->kind()) {
- case Code::FUNCTION:
- case Code::OPTIMIZED_FUNCTION:
- return; // We log this later using LogCompiledFunctions.
- case Code::BINARY_OP_IC: // fall through
- case Code::TYPE_RECORDING_BINARY_OP_IC: // fall through
- case Code::COMPARE_IC: // fall through
- case Code::STUB:
- description =
- CodeStub::MajorName(CodeStub::GetMajorKey(code_object), true);
- if (description == NULL)
- description = "A stub from the snapshot";
- tag = Logger::STUB_TAG;
- break;
- case Code::BUILTIN:
- description = "A builtin from the snapshot";
- tag = Logger::BUILTIN_TAG;
- break;
- case Code::KEYED_LOAD_IC:
- description = "A keyed load IC from the snapshot";
- tag = Logger::KEYED_LOAD_IC_TAG;
- break;
- case Code::KEYED_EXTERNAL_ARRAY_LOAD_IC:
- description = "A keyed external array load IC from the snapshot";
- tag = Logger::KEYED_EXTERNAL_ARRAY_LOAD_IC_TAG;
- break;
- case Code::LOAD_IC:
- description = "A load IC from the snapshot";
- tag = Logger::LOAD_IC_TAG;
- break;
- case Code::STORE_IC:
- description = "A store IC from the snapshot";
- tag = Logger::STORE_IC_TAG;
- break;
- case Code::KEYED_STORE_IC:
- description = "A keyed store IC from the snapshot";
- tag = Logger::KEYED_STORE_IC_TAG;
- break;
- case Code::KEYED_EXTERNAL_ARRAY_STORE_IC:
- description = "A keyed external array store IC from the snapshot";
- tag = Logger::KEYED_EXTERNAL_ARRAY_STORE_IC_TAG;
- break;
- case Code::CALL_IC:
- description = "A call IC from the snapshot";
- tag = Logger::CALL_IC_TAG;
- break;
- case Code::KEYED_CALL_IC:
- description = "A keyed call IC from the snapshot";
- tag = Logger::KEYED_CALL_IC_TAG;
- break;
- }
- PROFILE(ISOLATE, CodeCreateEvent(tag, code_object, description));
- }
-}
-
-
-void Logger::LogCodeInfo() {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!log_->IsEnabled() || !FLAG_log_code || !FLAG_ll_prof) return;
-#if V8_TARGET_ARCH_IA32
- const char arch[] = "ia32";
-#elif V8_TARGET_ARCH_X64
- const char arch[] = "x64";
-#elif V8_TARGET_ARCH_ARM
- const char arch[] = "arm";
-#else
- const char arch[] = "unknown";
-#endif
- LogMessageBuilder msg(this);
- msg.Append("code-info,%s,%d\n", arch, Code::kHeaderSize);
- msg.WriteToLogFile();
-#endif // ENABLE_LOGGING_AND_PROFILING
-}
-
-
-void Logger::LowLevelCodeCreateEvent(Code* code, LogMessageBuilder* msg) {
- if (!FLAG_ll_prof || log_->output_code_handle_ == NULL) return;
- int pos = static_cast<int>(ftell(log_->output_code_handle_));
- size_t rv = fwrite(code->instruction_start(), 1, code->instruction_size(),
- log_->output_code_handle_);
- ASSERT(static_cast<size_t>(code->instruction_size()) == rv);
- USE(rv);
- msg->Append(",%d", pos);
-}
-
-
-void Logger::LogCodeObjects() {
- AssertNoAllocation no_alloc;
- HeapIterator iterator;
- for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
- if (obj->IsCode()) LogCodeObject(obj);
- }
-}
-
-
-void Logger::LogCompiledFunctions() {
- HandleScope scope;
- const int compiled_funcs_count = EnumerateCompiledFunctions(NULL, NULL);
- ScopedVector< Handle<SharedFunctionInfo> > sfis(compiled_funcs_count);
- ScopedVector< Handle<Code> > code_objects(compiled_funcs_count);
- EnumerateCompiledFunctions(sfis.start(), code_objects.start());
-
- // During iteration, there can be heap allocation due to
- // GetScriptLineNumber call.
- for (int i = 0; i < compiled_funcs_count; ++i) {
- if (*code_objects[i] == Isolate::Current()->builtins()->builtin(
- Builtins::kLazyCompile))
- continue;
- Handle<SharedFunctionInfo> shared = sfis[i];
- Handle<String> func_name(shared->DebugName());
- if (shared->script()->IsScript()) {
- Handle<Script> script(Script::cast(shared->script()));
- if (script->name()->IsString()) {
- Handle<String> script_name(String::cast(script->name()));
- int line_num = GetScriptLineNumber(script, shared->start_position());
- if (line_num > 0) {
- PROFILE(ISOLATE,
- CodeCreateEvent(
- Logger::ToNativeByScript(Logger::LAZY_COMPILE_TAG, *script),
- *code_objects[i], *shared,
- *script_name, line_num + 1));
- } else {
- // Can't distinguish eval and script here, so always use Script.
- PROFILE(ISOLATE,
- CodeCreateEvent(
- Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script),
- *code_objects[i], *shared, *script_name));
- }
- } else {
- PROFILE(ISOLATE,
- CodeCreateEvent(
- Logger::ToNativeByScript(Logger::LAZY_COMPILE_TAG, *script),
- *code_objects[i], *shared, *func_name));
- }
- } else if (shared->IsApiFunction()) {
- // API function.
- FunctionTemplateInfo* fun_data = shared->get_api_func_data();
- Object* raw_call_data = fun_data->call_code();
- if (!raw_call_data->IsUndefined()) {
- CallHandlerInfo* call_data = CallHandlerInfo::cast(raw_call_data);
- Object* callback_obj = call_data->callback();
- Address entry_point = v8::ToCData<Address>(callback_obj);
- PROFILE(ISOLATE, CallbackEvent(*func_name, entry_point));
- }
- } else {
- PROFILE(ISOLATE,
- CodeCreateEvent(
- Logger::LAZY_COMPILE_TAG, *code_objects[i],
- *shared, *func_name));
- }
- }
-}
-
-
-void Logger::LogAccessorCallbacks() {
- AssertNoAllocation no_alloc;
- HeapIterator iterator;
- i::Isolate* isolate = ISOLATE;
- for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
- if (!obj->IsAccessorInfo()) continue;
- AccessorInfo* ai = AccessorInfo::cast(obj);
- if (!ai->name()->IsString()) continue;
- String* name = String::cast(ai->name());
- Address getter_entry = v8::ToCData<Address>(ai->getter());
- if (getter_entry != 0) {
- PROFILE(isolate, GetterCallbackEvent(name, getter_entry));
- }
- Address setter_entry = v8::ToCData<Address>(ai->setter());
- if (setter_entry != 0) {
- PROFILE(isolate, SetterCallbackEvent(name, setter_entry));
- }
- }
-}
-
-#endif
-
-
-bool Logger::Setup() {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- // Tests and EnsureInitialize() can call this twice in a row. It's harmless.
- if (is_initialized_) return true;
- is_initialized_ = true;
-
- // --ll-prof implies --log-code and --log-snapshot-positions.
- if (FLAG_ll_prof) {
- FLAG_log_code = true;
- FLAG_log_snapshot_positions = true;
- }
-
- // --prof_lazy controls --log-code, implies --noprof_auto.
- if (FLAG_prof_lazy) {
- FLAG_log_code = false;
- FLAG_prof_auto = false;
- }
-
- // TODO(isolates): this assert introduces cyclic dependency (logger
- // -> thread local top -> heap -> logger).
- // ASSERT(VMState::is_outermost_external());
-
- log_->Initialize();
-
- if (FLAG_ll_prof) LogCodeInfo();
-
- ticker_ = new Ticker(Isolate::Current(), kSamplingIntervalMs);
-
- Isolate* isolate = Isolate::Current();
- if (FLAG_sliding_state_window && sliding_state_window_ == NULL) {
- sliding_state_window_ = new SlidingStateWindow(isolate);
- }
-
- bool start_logging = FLAG_log || FLAG_log_runtime || FLAG_log_api
- || FLAG_log_code || FLAG_log_gc || FLAG_log_handles || FLAG_log_suspect
- || FLAG_log_regexp || FLAG_log_state_changes;
-
- if (start_logging) {
- logging_nesting_ = 1;
- }
-
- if (FLAG_prof) {
- profiler_ = new Profiler(isolate);
- if (!FLAG_prof_auto) {
- profiler_->pause();
- } else {
- logging_nesting_ = 1;
- }
- if (!FLAG_prof_lazy) {
- profiler_->Engage();
- }
- }
-
- return true;
-
-#else
- return false;
-#endif
-}
-
-
-Sampler* Logger::sampler() {
- return ticker_;
-}
-
-
-void Logger::EnsureTickerStarted() {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- ASSERT(ticker_ != NULL);
- if (!ticker_->IsActive()) ticker_->Start();
-#endif
-}
-
-
-void Logger::EnsureTickerStopped() {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- if (ticker_ != NULL && ticker_->IsActive()) ticker_->Stop();
-#endif
-}
-
-
-void Logger::TearDown() {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!is_initialized_) return;
- is_initialized_ = false;
-
- // Stop the profiler before closing the file.
- if (profiler_ != NULL) {
- profiler_->Disengage();
- delete profiler_;
- profiler_ = NULL;
- }
-
- delete sliding_state_window_;
- sliding_state_window_ = NULL;
-
- delete ticker_;
- ticker_ = NULL;
-
- log_->Close();
-#endif
-}
-
-
-void Logger::EnableSlidingStateWindow() {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- // If the ticker is NULL, Logger::Setup has not been called yet. In
- // that case, we set the sliding_state_window flag so that the
- // sliding window computation will be started when Logger::Setup is
- // called.
- if (ticker_ == NULL) {
- FLAG_sliding_state_window = true;
- return;
- }
- // Otherwise, if the sliding state window computation has not been
- // started we do it now.
- if (sliding_state_window_ == NULL) {
- sliding_state_window_ = new SlidingStateWindow(Isolate::Current());
- }
-#endif
-}
-
-
-Mutex* SamplerRegistry::mutex_ = OS::CreateMutex();
-List<Sampler*>* SamplerRegistry::active_samplers_ = NULL;
-
-
-bool SamplerRegistry::IterateActiveSamplers(VisitSampler func, void* param) {
- ScopedLock lock(mutex_);
- for (int i = 0;
- ActiveSamplersExist() && i < active_samplers_->length();
- ++i) {
- func(active_samplers_->at(i), param);
- }
- return ActiveSamplersExist();
-}
-
-
-static void ComputeCpuProfiling(Sampler* sampler, void* flag_ptr) {
- bool* flag = reinterpret_cast<bool*>(flag_ptr);
- *flag |= sampler->IsProfiling();
-}
-
-
-SamplerRegistry::State SamplerRegistry::GetState() {
- bool flag = false;
- if (!IterateActiveSamplers(&ComputeCpuProfiling, &flag)) {
- return HAS_NO_SAMPLERS;
- }
- return flag ? HAS_CPU_PROFILING_SAMPLERS : HAS_SAMPLERS;
-}
-
-
-void SamplerRegistry::AddActiveSampler(Sampler* sampler) {
- ASSERT(sampler->IsActive());
- ScopedLock lock(mutex_);
- if (active_samplers_ == NULL) {
- active_samplers_ = new List<Sampler*>;
- } else {
- ASSERT(!active_samplers_->Contains(sampler));
- }
- active_samplers_->Add(sampler);
-}
-
-
-void SamplerRegistry::RemoveActiveSampler(Sampler* sampler) {
- ASSERT(sampler->IsActive());
- ScopedLock lock(mutex_);
- ASSERT(active_samplers_ != NULL);
- bool removed = active_samplers_->RemoveElement(sampler);
- ASSERT(removed);
- USE(removed);
-}
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/log.h b/src/3rdparty/v8/src/log.h
deleted file mode 100644
index 4fb0e23..0000000
--- a/src/3rdparty/v8/src/log.h
+++ /dev/null
@@ -1,446 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_LOG_H_
-#define V8_LOG_H_
-
-#include "platform.h"
-#include "log-utils.h"
-
-namespace v8 {
-namespace internal {
-
-// Logger is used for collecting logging information from V8 during
-// execution. The result is dumped to a file.
-//
-// Available command line flags:
-//
-// --log
-// Minimal logging (no API, code, or GC sample events), default is off.
-//
-// --log-all
-// Log all events to the file, default is off. This is the same as combining
-// --log-api, --log-code, --log-gc, and --log-regexp.
-//
-// --log-api
-// Log API events to the logfile, default is off. --log-api implies --log.
-//
-// --log-code
-// Log code (create, move, and delete) events to the logfile, default is off.
-// --log-code implies --log.
-//
-// --log-gc
-// Log GC heap samples after each GC that can be processed by hp2ps, default
-// is off. --log-gc implies --log.
-//
-// --log-regexp
-// Log creation and use of regular expressions, Default is off.
-// --log-regexp implies --log.
-//
-// --logfile <filename>
-// Specify the name of the logfile, default is "v8.log".
-//
-// --prof
-// Collect statistical profiling information (ticks), default is off. The
-// tick profiler requires code events, so --prof implies --log-code.
-
-// Forward declarations.
-class Ticker;
-class Profiler;
-class Semaphore;
-class SlidingStateWindow;
-class LogMessageBuilder;
-
-#undef LOG
-#ifdef ENABLE_LOGGING_AND_PROFILING
-#define LOG(isolate, Call) \
- do { \
- v8::internal::Logger* logger = \
- (isolate)->logger(); \
- if (logger->is_logging()) \
- logger->Call; \
- } while (false)
-#else
-#define LOG(isolate, Call) ((void) 0)
-#endif
-
-#define LOG_EVENTS_AND_TAGS_LIST(V) \
- V(CODE_CREATION_EVENT, "code-creation") \
- V(CODE_MOVE_EVENT, "code-move") \
- V(CODE_DELETE_EVENT, "code-delete") \
- V(CODE_MOVING_GC, "code-moving-gc") \
- V(SHARED_FUNC_MOVE_EVENT, "sfi-move") \
- V(SNAPSHOT_POSITION_EVENT, "snapshot-pos") \
- V(TICK_EVENT, "tick") \
- V(REPEAT_META_EVENT, "repeat") \
- V(BUILTIN_TAG, "Builtin") \
- V(CALL_DEBUG_BREAK_TAG, "CallDebugBreak") \
- V(CALL_DEBUG_PREPARE_STEP_IN_TAG, "CallDebugPrepareStepIn") \
- V(CALL_IC_TAG, "CallIC") \
- V(CALL_INITIALIZE_TAG, "CallInitialize") \
- V(CALL_MEGAMORPHIC_TAG, "CallMegamorphic") \
- V(CALL_MISS_TAG, "CallMiss") \
- V(CALL_NORMAL_TAG, "CallNormal") \
- V(CALL_PRE_MONOMORPHIC_TAG, "CallPreMonomorphic") \
- V(KEYED_CALL_DEBUG_BREAK_TAG, "KeyedCallDebugBreak") \
- V(KEYED_CALL_DEBUG_PREPARE_STEP_IN_TAG, \
- "KeyedCallDebugPrepareStepIn") \
- V(KEYED_CALL_IC_TAG, "KeyedCallIC") \
- V(KEYED_CALL_INITIALIZE_TAG, "KeyedCallInitialize") \
- V(KEYED_CALL_MEGAMORPHIC_TAG, "KeyedCallMegamorphic") \
- V(KEYED_CALL_MISS_TAG, "KeyedCallMiss") \
- V(KEYED_CALL_NORMAL_TAG, "KeyedCallNormal") \
- V(KEYED_CALL_PRE_MONOMORPHIC_TAG, "KeyedCallPreMonomorphic") \
- V(CALLBACK_TAG, "Callback") \
- V(EVAL_TAG, "Eval") \
- V(FUNCTION_TAG, "Function") \
- V(KEYED_LOAD_IC_TAG, "KeyedLoadIC") \
- V(KEYED_EXTERNAL_ARRAY_LOAD_IC_TAG, "KeyedExternalArrayLoadIC") \
- V(KEYED_STORE_IC_TAG, "KeyedStoreIC") \
- V(KEYED_EXTERNAL_ARRAY_STORE_IC_TAG, "KeyedExternalArrayStoreIC")\
- V(LAZY_COMPILE_TAG, "LazyCompile") \
- V(LOAD_IC_TAG, "LoadIC") \
- V(REG_EXP_TAG, "RegExp") \
- V(SCRIPT_TAG, "Script") \
- V(STORE_IC_TAG, "StoreIC") \
- V(STUB_TAG, "Stub") \
- V(NATIVE_FUNCTION_TAG, "Function") \
- V(NATIVE_LAZY_COMPILE_TAG, "LazyCompile") \
- V(NATIVE_SCRIPT_TAG, "Script")
-// Note that 'NATIVE_' cases for functions and scripts are mapped onto
-// original tags when writing to the log.
-
-
-class Sampler;
-
-
-class Logger {
- public:
-#define DECLARE_ENUM(enum_item, ignore) enum_item,
- enum LogEventsAndTags {
- LOG_EVENTS_AND_TAGS_LIST(DECLARE_ENUM)
- NUMBER_OF_LOG_EVENTS
- };
-#undef DECLARE_ENUM
-
- // Acquires resources for logging if the right flags are set.
- bool Setup();
-
- void EnsureTickerStarted();
- void EnsureTickerStopped();
-
- Sampler* sampler();
-
- // Frees resources acquired in Setup.
- void TearDown();
-
- // Enable the computation of a sliding window of states.
- void EnableSlidingStateWindow();
-
- // Emits an event with a string value -> (name, value).
- void StringEvent(const char* name, const char* value);
-
- // Emits an event with an int value -> (name, value).
- void IntEvent(const char* name, int value);
- void IntPtrTEvent(const char* name, intptr_t value);
-
- // Emits an event with an handle value -> (name, location).
- void HandleEvent(const char* name, Object** location);
-
- // Emits memory management events for C allocated structures.
- void NewEvent(const char* name, void* object, size_t size);
- void DeleteEvent(const char* name, void* object);
-
- // Static versions of the above, operate on current isolate's logger.
- // Used in TRACK_MEMORY(TypeName) defined in globals.h
- static void NewEventStatic(const char* name, void* object, size_t size);
- static void DeleteEventStatic(const char* name, void* object);
-
- // Emits an event with a tag, and some resource usage information.
- // -> (name, tag, <rusage information>).
- // Currently, the resource usage information is a process time stamp
- // and a real time timestamp.
- void ResourceEvent(const char* name, const char* tag);
-
- // Emits an event that an undefined property was read from an
- // object.
- void SuspectReadEvent(String* name, Object* obj);
-
- // Emits an event when a message is put on or read from a debugging queue.
- // DebugTag lets us put a call-site specific label on the event.
- void DebugTag(const char* call_site_tag);
- void DebugEvent(const char* event_type, Vector<uint16_t> parameter);
-
-
- // ==== Events logged by --log-api. ====
- void ApiNamedSecurityCheck(Object* key);
- void ApiIndexedSecurityCheck(uint32_t index);
- void ApiNamedPropertyAccess(const char* tag, JSObject* holder, Object* name);
- void ApiIndexedPropertyAccess(const char* tag,
- JSObject* holder,
- uint32_t index);
- void ApiObjectAccess(const char* tag, JSObject* obj);
- void ApiEntryCall(const char* name);
-
-
- // ==== Events logged by --log-code. ====
- // Emits a code event for a callback function.
- void CallbackEvent(String* name, Address entry_point);
- void GetterCallbackEvent(String* name, Address entry_point);
- void SetterCallbackEvent(String* name, Address entry_point);
- // Emits a code create event.
- void CodeCreateEvent(LogEventsAndTags tag,
- Code* code, const char* source);
- void CodeCreateEvent(LogEventsAndTags tag,
- Code* code, String* name);
- void CodeCreateEvent(LogEventsAndTags tag,
- Code* code,
- SharedFunctionInfo* shared,
- String* name);
- void CodeCreateEvent(LogEventsAndTags tag,
- Code* code,
- SharedFunctionInfo* shared,
- String* source, int line);
- void CodeCreateEvent(LogEventsAndTags tag, Code* code, int args_count);
- void CodeMovingGCEvent();
- // Emits a code create event for a RegExp.
- void RegExpCodeCreateEvent(Code* code, String* source);
- // Emits a code move event.
- void CodeMoveEvent(Address from, Address to);
- // Emits a code delete event.
- void CodeDeleteEvent(Address from);
-
- void SharedFunctionInfoMoveEvent(Address from, Address to);
-
- void SnapshotPositionEvent(Address addr, int pos);
-
- // ==== Events logged by --log-gc. ====
- // Heap sampling events: start, end, and individual types.
- void HeapSampleBeginEvent(const char* space, const char* kind);
- void HeapSampleEndEvent(const char* space, const char* kind);
- void HeapSampleItemEvent(const char* type, int number, int bytes);
- void HeapSampleJSConstructorEvent(const char* constructor,
- int number, int bytes);
- void HeapSampleJSRetainersEvent(const char* constructor,
- const char* event);
- void HeapSampleJSProducerEvent(const char* constructor,
- Address* stack);
- void HeapSampleStats(const char* space, const char* kind,
- intptr_t capacity, intptr_t used);
-
- void SharedLibraryEvent(const char* library_path,
- uintptr_t start,
- uintptr_t end);
- void SharedLibraryEvent(const wchar_t* library_path,
- uintptr_t start,
- uintptr_t end);
-
- // ==== Events logged by --log-regexp ====
- // Regexp compilation and execution events.
-
- void RegExpCompileEvent(Handle<JSRegExp> regexp, bool in_cache);
-
- // Log an event reported from generated code
- void LogRuntime(Vector<const char> format, JSArray* args);
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
- bool is_logging() {
- return logging_nesting_ > 0;
- }
-
- // Pause/Resume collection of profiling data.
- // When data collection is paused, CPU Tick events are discarded until
- // data collection is Resumed.
- void PauseProfiler(int flags, int tag);
- void ResumeProfiler(int flags, int tag);
- int GetActiveProfilerModules();
-
- // If logging is performed into a memory buffer, allows to
- // retrieve previously written messages. See v8.h.
- int GetLogLines(int from_pos, char* dest_buf, int max_size);
-
- // Logs all compiled functions found in the heap.
- void LogCompiledFunctions();
- // Logs all accessor callbacks found in the heap.
- void LogAccessorCallbacks();
- // Used for logging stubs found in the snapshot.
- void LogCodeObjects();
-
- // Converts tag to a corresponding NATIVE_... if the script is native.
- INLINE(static LogEventsAndTags ToNativeByScript(LogEventsAndTags, Script*));
-
- // Profiler's sampling interval (in milliseconds).
- static const int kSamplingIntervalMs = 1;
-
- // Callback from Log, stops profiling in case of insufficient resources.
- void LogFailure();
-
- private:
- Logger();
- ~Logger();
-
- // Emits the profiler's first message.
- void ProfilerBeginEvent();
-
- // Emits callback event messages.
- void CallbackEventInternal(const char* prefix,
- const char* name,
- Address entry_point);
-
- // Internal configurable move event.
- void MoveEventInternal(LogEventsAndTags event, Address from, Address to);
-
- // Internal configurable move event.
- void DeleteEventInternal(LogEventsAndTags event, Address from);
-
- // Emits the source code of a regexp. Used by regexp events.
- void LogRegExpSource(Handle<JSRegExp> regexp);
-
- // Used for logging stubs found in the snapshot.
- void LogCodeObject(Object* code_object);
-
- // Emits general information about generated code.
- void LogCodeInfo();
-
- // Handles code creation when low-level profiling is active.
- void LowLevelCodeCreateEvent(Code* code, LogMessageBuilder* msg);
-
- // Emits a profiler tick event. Used by the profiler thread.
- void TickEvent(TickSample* sample, bool overflow);
-
- void ApiEvent(const char* name, ...);
-
- // Logs a StringEvent regardless of whether FLAG_log is true.
- void UncheckedStringEvent(const char* name, const char* value);
-
- // Logs an IntEvent regardless of whether FLAG_log is true.
- void UncheckedIntEvent(const char* name, int value);
- void UncheckedIntPtrTEvent(const char* name, intptr_t value);
-
- // Returns whether profiler's sampler is active.
- bool IsProfilerSamplerActive();
-
- // The sampler used by the profiler and the sliding state window.
- Ticker* ticker_;
-
- // When the statistical profile is active, profiler_
- // points to a Profiler, that handles collection
- // of samples.
- Profiler* profiler_;
-
- // SlidingStateWindow instance keeping a sliding window of the most
- // recent VM states.
- SlidingStateWindow* sliding_state_window_;
-
- // An array of log events names.
- const char* const* log_events_;
-
- // Internal implementation classes with access to
- // private members.
- friend class EventLog;
- friend class Isolate;
- friend class LogMessageBuilder;
- friend class TimeLog;
- friend class Profiler;
- friend class SlidingStateWindow;
- friend class StackTracer;
- friend class VMState;
-
- friend class LoggerTestHelper;
-
-
- int logging_nesting_;
- int cpu_profiler_nesting_;
- int heap_profiler_nesting_;
-
- Log* log_;
-
- // Guards against multiple calls to TearDown() that can happen in some tests.
- // 'true' between Setup() and TearDown().
- bool is_initialized_;
-
- // Support for 'incremental addresses' in compressed logs:
- // LogMessageBuilder::AppendAddress(Address addr)
- Address last_address_;
- // Logger::TickEvent(...)
- Address prev_sp_;
- Address prev_function_;
- // Logger::MoveEventInternal(...)
- Address prev_to_;
- // Logger::FunctionCreateEvent(...)
- Address prev_code_;
-
- friend class CpuProfiler;
-#else
- bool is_logging() { return false; }
-#endif
-};
-
-
-// Process wide registry of samplers.
-class SamplerRegistry : public AllStatic {
- public:
- enum State {
- HAS_NO_SAMPLERS,
- HAS_SAMPLERS,
- HAS_CPU_PROFILING_SAMPLERS
- };
-
- typedef void (*VisitSampler)(Sampler*, void*);
-
- static State GetState();
-
- // Iterates over all active samplers keeping the internal lock held.
- // Returns whether there are any active samplers.
- static bool IterateActiveSamplers(VisitSampler func, void* param);
-
- // Adds/Removes an active sampler.
- static void AddActiveSampler(Sampler* sampler);
- static void RemoveActiveSampler(Sampler* sampler);
-
- private:
- static bool ActiveSamplersExist() {
- return active_samplers_ != NULL && !active_samplers_->is_empty();
- }
-
- static Mutex* mutex_; // Protects the state below.
- static List<Sampler*>* active_samplers_;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(SamplerRegistry);
-};
-
-
-// Class that extracts stack trace, used for profiling.
-class StackTracer : public AllStatic {
- public:
- static void Trace(Isolate* isolate, TickSample* sample);
-};
-
-} } // namespace v8::internal
-
-
-#endif // V8_LOG_H_
diff --git a/src/3rdparty/v8/src/macro-assembler.h b/src/3rdparty/v8/src/macro-assembler.h
deleted file mode 100644
index 30838bd..0000000
--- a/src/3rdparty/v8/src/macro-assembler.h
+++ /dev/null
@@ -1,120 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_MACRO_ASSEMBLER_H_
-#define V8_MACRO_ASSEMBLER_H_
-
-
-// Helper types to make boolean flag easier to read at call-site.
-enum InvokeFlag {
- CALL_FUNCTION,
- JUMP_FUNCTION
-};
-
-
-enum CodeLocation {
- IN_JAVASCRIPT,
- IN_JS_ENTRY,
- IN_C_ENTRY
-};
-
-
-enum HandlerType {
- TRY_CATCH_HANDLER,
- TRY_FINALLY_HANDLER,
- JS_ENTRY_HANDLER
-};
-
-
-// Types of uncatchable exceptions.
-enum UncatchableExceptionType {
- OUT_OF_MEMORY,
- TERMINATION
-};
-
-
-// Invalid depth in prototype chain.
-const int kInvalidProtoDepth = -1;
-
-#if V8_TARGET_ARCH_IA32
-#include "assembler.h"
-#include "ia32/assembler-ia32.h"
-#include "ia32/assembler-ia32-inl.h"
-#include "code.h" // must be after assembler_*.h
-#include "ia32/macro-assembler-ia32.h"
-#elif V8_TARGET_ARCH_X64
-#include "assembler.h"
-#include "x64/assembler-x64.h"
-#include "x64/assembler-x64-inl.h"
-#include "code.h" // must be after assembler_*.h
-#include "x64/macro-assembler-x64.h"
-#elif V8_TARGET_ARCH_ARM
-#include "arm/constants-arm.h"
-#include "assembler.h"
-#include "arm/assembler-arm.h"
-#include "arm/assembler-arm-inl.h"
-#include "code.h" // must be after assembler_*.h
-#include "arm/macro-assembler-arm.h"
-#elif V8_TARGET_ARCH_MIPS
-#include "mips/constants-mips.h"
-#include "assembler.h"
-#include "mips/assembler-mips.h"
-#include "mips/assembler-mips-inl.h"
-#include "code.h" // must be after assembler_*.h
-#include "mips/macro-assembler-mips.h"
-#else
-#error Unsupported target architecture.
-#endif
-
-namespace v8 {
-namespace internal {
-
-// Support for "structured" code comments.
-#ifdef DEBUG
-
-class Comment {
- public:
- Comment(MacroAssembler* masm, const char* msg);
- ~Comment();
-
- private:
- MacroAssembler* masm_;
- const char* msg_;
-};
-
-#else
-
-class Comment {
- public:
- Comment(MacroAssembler*, const char*) {}
-};
-
-#endif // DEBUG
-
-} } // namespace v8::internal
-
-#endif // V8_MACRO_ASSEMBLER_H_
diff --git a/src/3rdparty/v8/src/macros.py b/src/3rdparty/v8/src/macros.py
deleted file mode 100644
index 69f36c0..0000000
--- a/src/3rdparty/v8/src/macros.py
+++ /dev/null
@@ -1,178 +0,0 @@
-# Copyright 2006-2009 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-# Dictionary that is passed as defines for js2c.py.
-# Used for defines that must be defined for all native js files.
-
-const NONE = 0;
-const READ_ONLY = 1;
-const DONT_ENUM = 2;
-const DONT_DELETE = 4;
-
-# Constants used for getter and setter operations.
-const GETTER = 0;
-const SETTER = 1;
-
-# These definitions must match the index of the properties in objects.h.
-const kApiTagOffset = 0;
-const kApiPropertyListOffset = 1;
-const kApiSerialNumberOffset = 2;
-const kApiConstructorOffset = 2;
-const kApiPrototypeTemplateOffset = 5;
-const kApiParentTemplateOffset = 6;
-
-const NO_HINT = 0;
-const NUMBER_HINT = 1;
-const STRING_HINT = 2;
-
-const kFunctionTag = 0;
-const kNewObjectTag = 1;
-
-# For date.js.
-const HoursPerDay = 24;
-const MinutesPerHour = 60;
-const SecondsPerMinute = 60;
-const msPerSecond = 1000;
-const msPerMinute = 60000;
-const msPerHour = 3600000;
-const msPerDay = 86400000;
-const msPerMonth = 2592000000;
-
-# For apinatives.js
-const kUninitialized = -1;
-
-# Note: kDayZeroInJulianDay = ToJulianDay(1970, 0, 1).
-const kInvalidDate = 'Invalid Date';
-const kDayZeroInJulianDay = 2440588;
-const kMonthMask = 0x1e0;
-const kDayMask = 0x01f;
-const kYearShift = 9;
-const kMonthShift = 5;
-
-# Limits for parts of the date, so that we support all the dates that
-# ECMA 262 - 15.9.1.1 requires us to, but at the same time be sure that
-# the date (days since 1970) is in SMI range.
-const kMinYear = -1000000;
-const kMaxYear = 1000000;
-const kMinMonth = -10000000;
-const kMaxMonth = 10000000;
-const kMinDate = -100000000;
-const kMaxDate = 100000000;
-
-# Native cache ids.
-const STRING_TO_REGEXP_CACHE_ID = 0;
-
-# Type query macros.
-#
-# Note: We have special support for typeof(foo) === 'bar' in the compiler.
-# It will *not* generate a runtime typeof call for the most important
-# values of 'bar'.
-macro IS_NULL(arg) = (arg === null);
-macro IS_NULL_OR_UNDEFINED(arg) = (arg == null);
-macro IS_UNDEFINED(arg) = (typeof(arg) === 'undefined');
-macro IS_NUMBER(arg) = (typeof(arg) === 'number');
-macro IS_STRING(arg) = (typeof(arg) === 'string');
-macro IS_BOOLEAN(arg) = (typeof(arg) === 'boolean');
-macro IS_OBJECT(arg) = (%_IsObject(arg));
-macro IS_ARRAY(arg) = (%_IsArray(arg));
-macro IS_FUNCTION(arg) = (%_IsFunction(arg));
-macro IS_REGEXP(arg) = (%_IsRegExp(arg));
-macro IS_DATE(arg) = (%_ClassOf(arg) === 'Date');
-macro IS_NUMBER_WRAPPER(arg) = (%_ClassOf(arg) === 'Number');
-macro IS_STRING_WRAPPER(arg) = (%_ClassOf(arg) === 'String');
-macro IS_BOOLEAN_WRAPPER(arg) = (%_ClassOf(arg) === 'Boolean');
-macro IS_ERROR(arg) = (%_ClassOf(arg) === 'Error');
-macro IS_SCRIPT(arg) = (%_ClassOf(arg) === 'Script');
-macro IS_ARGUMENTS(arg) = (%_ClassOf(arg) === 'Arguments');
-macro IS_GLOBAL(arg) = (%_ClassOf(arg) === 'global');
-macro IS_UNDETECTABLE(arg) = (%_IsUndetectableObject(arg));
-macro FLOOR(arg) = $floor(arg);
-
-# Macro for ECMAScript 5 queries of the type:
-# "Type(O) is object."
-# This is the same as being either a function or an object in V8 terminology.
-# In addition, an undetectable object is also included by this.
-macro IS_SPEC_OBJECT(arg) = (%_IsSpecObject(arg));
-
-# Inline macros. Use %IS_VAR to make sure arg is evaluated only once.
-macro NUMBER_IS_NAN(arg) = (!%_IsSmi(%IS_VAR(arg)) && !(arg == arg));
-macro NUMBER_IS_FINITE(arg) = (%_IsSmi(%IS_VAR(arg)) || arg - arg == 0);
-macro TO_INTEGER(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : %NumberToInteger(ToNumber(arg)));
-macro TO_INTEGER_MAP_MINUS_ZERO(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : %NumberToIntegerMapMinusZero(ToNumber(arg)));
-macro TO_INT32(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : (arg >> 0));
-macro TO_UINT32(arg) = (arg >>> 0);
-macro TO_STRING_INLINE(arg) = (IS_STRING(%IS_VAR(arg)) ? arg : NonStringToString(arg));
-macro TO_NUMBER_INLINE(arg) = (IS_NUMBER(%IS_VAR(arg)) ? arg : NonNumberToNumber(arg));
-
-
-# Macros implemented in Python.
-python macro CHAR_CODE(str) = ord(str[1]);
-
-# Constants used on an array to implement the properties of the RegExp object.
-const REGEXP_NUMBER_OF_CAPTURES = 0;
-const REGEXP_FIRST_CAPTURE = 3;
-
-# We can't put macros in macros so we use constants here.
-# REGEXP_NUMBER_OF_CAPTURES
-macro NUMBER_OF_CAPTURES(array) = ((array)[0]);
-
-# Limit according to ECMA 262 15.9.1.1
-const MAX_TIME_MS = 8640000000000000;
-# Limit which is MAX_TIME_MS + msPerMonth.
-const MAX_TIME_BEFORE_UTC = 8640002592000000;
-
-# Gets the value of a Date object. If arg is not a Date object
-# a type error is thrown.
-macro DATE_VALUE(arg) = (%_ClassOf(arg) === 'Date' ? %_ValueOf(arg) : ThrowDateTypeError());
-macro DAY(time) = ($floor(time / 86400000));
-macro NAN_OR_DATE_FROM_TIME(time) = (NUMBER_IS_NAN(time) ? time : DateFromTime(time));
-macro HOUR_FROM_TIME(time) = (Modulo($floor(time / 3600000), 24));
-macro MIN_FROM_TIME(time) = (Modulo($floor(time / 60000), 60));
-macro NAN_OR_MIN_FROM_TIME(time) = (NUMBER_IS_NAN(time) ? time : MIN_FROM_TIME(time));
-macro SEC_FROM_TIME(time) = (Modulo($floor(time / 1000), 60));
-macro NAN_OR_SEC_FROM_TIME(time) = (NUMBER_IS_NAN(time) ? time : SEC_FROM_TIME(time));
-macro MS_FROM_TIME(time) = (Modulo(time, 1000));
-macro NAN_OR_MS_FROM_TIME(time) = (NUMBER_IS_NAN(time) ? time : MS_FROM_TIME(time));
-
-# Last input and last subject of regexp matches.
-macro LAST_SUBJECT(array) = ((array)[1]);
-macro LAST_INPUT(array) = ((array)[2]);
-
-# REGEXP_FIRST_CAPTURE
-macro CAPTURE(index) = (3 + (index));
-const CAPTURE0 = 3;
-const CAPTURE1 = 4;
-
-# PropertyDescriptor return value indices - must match
-# PropertyDescriptorIndices in runtime.cc.
-const IS_ACCESSOR_INDEX = 0;
-const VALUE_INDEX = 1;
-const GETTER_INDEX = 2;
-const SETTER_INDEX = 3;
-const WRITABLE_INDEX = 4;
-const ENUMERABLE_INDEX = 5;
-const CONFIGURABLE_INDEX = 6;
diff --git a/src/3rdparty/v8/src/mark-compact.cc b/src/3rdparty/v8/src/mark-compact.cc
deleted file mode 100644
index 73bf2f2..0000000
--- a/src/3rdparty/v8/src/mark-compact.cc
+++ /dev/null
@@ -1,3092 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "compilation-cache.h"
-#include "execution.h"
-#include "heap-profiler.h"
-#include "gdb-jit.h"
-#include "global-handles.h"
-#include "ic-inl.h"
-#include "liveobjectlist-inl.h"
-#include "mark-compact.h"
-#include "objects-visiting.h"
-#include "stub-cache.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// MarkCompactCollector
-
-MarkCompactCollector::MarkCompactCollector() : // NOLINT
-#ifdef DEBUG
- state_(IDLE),
-#endif
- force_compaction_(false),
- compacting_collection_(false),
- compact_on_next_gc_(false),
- previous_marked_count_(0),
- tracer_(NULL),
-#ifdef DEBUG
- live_young_objects_size_(0),
- live_old_pointer_objects_size_(0),
- live_old_data_objects_size_(0),
- live_code_objects_size_(0),
- live_map_objects_size_(0),
- live_cell_objects_size_(0),
- live_lo_objects_size_(0),
- live_bytes_(0),
-#endif
- heap_(NULL),
- code_flusher_(NULL) { }
-
-
-void MarkCompactCollector::CollectGarbage() {
- // Make sure that Prepare() has been called. The individual steps below will
- // update the state as they proceed.
- ASSERT(state_ == PREPARE_GC);
-
- // Prepare has selected whether to compact the old generation or not.
- // Tell the tracer.
- if (IsCompacting()) tracer_->set_is_compacting();
-
- MarkLiveObjects();
-
- if (FLAG_collect_maps) ClearNonLiveTransitions();
-
- SweepLargeObjectSpace();
-
- if (IsCompacting()) {
- GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_COMPACT);
- EncodeForwardingAddresses();
-
- heap()->MarkMapPointersAsEncoded(true);
- UpdatePointers();
- heap()->MarkMapPointersAsEncoded(false);
- heap()->isolate()->pc_to_code_cache()->Flush();
-
- RelocateObjects();
- } else {
- SweepSpaces();
- heap()->isolate()->pc_to_code_cache()->Flush();
- }
-
- Finish();
-
- // Save the count of marked objects remaining after the collection and
- // null out the GC tracer.
- previous_marked_count_ = tracer_->marked_count();
- ASSERT(previous_marked_count_ == 0);
- tracer_ = NULL;
-}
-
-
-void MarkCompactCollector::Prepare(GCTracer* tracer) {
- // Rather than passing the tracer around we stash it in a static member
- // variable.
- tracer_ = tracer;
-
-#ifdef DEBUG
- ASSERT(state_ == IDLE);
- state_ = PREPARE_GC;
-#endif
- ASSERT(!FLAG_always_compact || !FLAG_never_compact);
-
- compacting_collection_ =
- FLAG_always_compact || force_compaction_ || compact_on_next_gc_;
- compact_on_next_gc_ = false;
-
- if (FLAG_never_compact) compacting_collection_ = false;
- if (!heap()->map_space()->MapPointersEncodable())
- compacting_collection_ = false;
- if (FLAG_collect_maps) CreateBackPointers();
-#ifdef ENABLE_GDB_JIT_INTERFACE
- if (FLAG_gdbjit) {
- // If GDBJIT interface is active disable compaction.
- compacting_collection_ = false;
- }
-#endif
-
- PagedSpaces spaces;
- for (PagedSpace* space = spaces.next();
- space != NULL; space = spaces.next()) {
- space->PrepareForMarkCompact(compacting_collection_);
- }
-
-#ifdef DEBUG
- live_bytes_ = 0;
- live_young_objects_size_ = 0;
- live_old_pointer_objects_size_ = 0;
- live_old_data_objects_size_ = 0;
- live_code_objects_size_ = 0;
- live_map_objects_size_ = 0;
- live_cell_objects_size_ = 0;
- live_lo_objects_size_ = 0;
-#endif
-}
-
-
-void MarkCompactCollector::Finish() {
-#ifdef DEBUG
- ASSERT(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS);
- state_ = IDLE;
-#endif
- // The stub cache is not traversed during GC; clear the cache to
- // force lazy re-initialization of it. This must be done after the
- // GC, because it relies on the new address of certain old space
- // objects (empty string, illegal builtin).
- heap()->isolate()->stub_cache()->Clear();
-
- heap()->external_string_table_.CleanUp();
-
- // If we've just compacted old space there's no reason to check the
- // fragmentation limit. Just return.
- if (HasCompacted()) return;
-
- // We compact the old generation on the next GC if it has gotten too
- // fragmented (ie, we could recover an expected amount of space by
- // reclaiming the waste and free list blocks).
- static const int kFragmentationLimit = 15; // Percent.
- static const int kFragmentationAllowed = 1 * MB; // Absolute.
- intptr_t old_gen_recoverable = 0;
- intptr_t old_gen_used = 0;
-
- OldSpaces spaces;
- for (OldSpace* space = spaces.next(); space != NULL; space = spaces.next()) {
- old_gen_recoverable += space->Waste() + space->AvailableFree();
- old_gen_used += space->Size();
- }
-
- int old_gen_fragmentation =
- static_cast<int>((old_gen_recoverable * 100.0) / old_gen_used);
- if (old_gen_fragmentation > kFragmentationLimit &&
- old_gen_recoverable > kFragmentationAllowed) {
- compact_on_next_gc_ = true;
- }
-}
-
-
-// -------------------------------------------------------------------------
-// Phase 1: tracing and marking live objects.
-// before: all objects are in normal state.
-// after: a live object's map pointer is marked as '00'.
-
-// Marking all live objects in the heap as part of mark-sweep or mark-compact
-// collection. Before marking, all objects are in their normal state. After
-// marking, live objects' map pointers are marked indicating that the object
-// has been found reachable.
-//
-// The marking algorithm is a (mostly) depth-first (because of possible stack
-// overflow) traversal of the graph of objects reachable from the roots. It
-// uses an explicit stack of pointers rather than recursion. The young
-// generation's inactive ('from') space is used as a marking stack. The
-// objects in the marking stack are the ones that have been reached and marked
-// but their children have not yet been visited.
-//
-// The marking stack can overflow during traversal. In that case, we set an
-// overflow flag. When the overflow flag is set, we continue marking objects
-// reachable from the objects on the marking stack, but no longer push them on
-// the marking stack. Instead, we mark them as both marked and overflowed.
-// When the stack is in the overflowed state, objects marked as overflowed
-// have been reached and marked but their children have not been visited yet.
-// After emptying the marking stack, we clear the overflow flag and traverse
-// the heap looking for objects marked as overflowed, push them on the stack,
-// and continue with marking. This process repeats until all reachable
-// objects have been marked.
-
-class CodeFlusher {
- public:
- explicit CodeFlusher(Isolate* isolate)
- : isolate_(isolate),
- jsfunction_candidates_head_(NULL),
- shared_function_info_candidates_head_(NULL) {}
-
- void AddCandidate(SharedFunctionInfo* shared_info) {
- SetNextCandidate(shared_info, shared_function_info_candidates_head_);
- shared_function_info_candidates_head_ = shared_info;
- }
-
- void AddCandidate(JSFunction* function) {
- ASSERT(function->unchecked_code() ==
- function->unchecked_shared()->unchecked_code());
-
- SetNextCandidate(function, jsfunction_candidates_head_);
- jsfunction_candidates_head_ = function;
- }
-
- void ProcessCandidates() {
- ProcessSharedFunctionInfoCandidates();
- ProcessJSFunctionCandidates();
- }
-
- private:
- void ProcessJSFunctionCandidates() {
- Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kLazyCompile);
-
- JSFunction* candidate = jsfunction_candidates_head_;
- JSFunction* next_candidate;
- while (candidate != NULL) {
- next_candidate = GetNextCandidate(candidate);
-
- SharedFunctionInfo* shared = candidate->unchecked_shared();
-
- Code* code = shared->unchecked_code();
- if (!code->IsMarked()) {
- shared->set_code(lazy_compile);
- candidate->set_code(lazy_compile);
- } else {
- candidate->set_code(shared->unchecked_code());
- }
-
- candidate = next_candidate;
- }
-
- jsfunction_candidates_head_ = NULL;
- }
-
-
- void ProcessSharedFunctionInfoCandidates() {
- Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kLazyCompile);
-
- SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
- SharedFunctionInfo* next_candidate;
- while (candidate != NULL) {
- next_candidate = GetNextCandidate(candidate);
- SetNextCandidate(candidate, NULL);
-
- Code* code = candidate->unchecked_code();
- if (!code->IsMarked()) {
- candidate->set_code(lazy_compile);
- }
-
- candidate = next_candidate;
- }
-
- shared_function_info_candidates_head_ = NULL;
- }
-
- static JSFunction** GetNextCandidateField(JSFunction* candidate) {
- return reinterpret_cast<JSFunction**>(
- candidate->address() + JSFunction::kCodeEntryOffset);
- }
-
- static JSFunction* GetNextCandidate(JSFunction* candidate) {
- return *GetNextCandidateField(candidate);
- }
-
- static void SetNextCandidate(JSFunction* candidate,
- JSFunction* next_candidate) {
- *GetNextCandidateField(candidate) = next_candidate;
- }
-
- STATIC_ASSERT(kPointerSize <= Code::kHeaderSize - Code::kHeaderPaddingStart);
-
- static SharedFunctionInfo** GetNextCandidateField(
- SharedFunctionInfo* candidate) {
- Code* code = candidate->unchecked_code();
- return reinterpret_cast<SharedFunctionInfo**>(
- code->address() + Code::kHeaderPaddingStart);
- }
-
- static SharedFunctionInfo* GetNextCandidate(SharedFunctionInfo* candidate) {
- return *GetNextCandidateField(candidate);
- }
-
- static void SetNextCandidate(SharedFunctionInfo* candidate,
- SharedFunctionInfo* next_candidate) {
- *GetNextCandidateField(candidate) = next_candidate;
- }
-
- Isolate* isolate_;
- JSFunction* jsfunction_candidates_head_;
- SharedFunctionInfo* shared_function_info_candidates_head_;
-
- DISALLOW_COPY_AND_ASSIGN(CodeFlusher);
-};
-
-
-MarkCompactCollector::~MarkCompactCollector() {
- if (code_flusher_ != NULL) {
- delete code_flusher_;
- code_flusher_ = NULL;
- }
-}
-
-
-static inline HeapObject* ShortCircuitConsString(Object** p) {
- // Optimization: If the heap object pointed to by p is a non-symbol
- // cons string whose right substring is HEAP->empty_string, update
- // it in place to its left substring. Return the updated value.
- //
- // Here we assume that if we change *p, we replace it with a heap object
- // (ie, the left substring of a cons string is always a heap object).
- //
- // The check performed is:
- // object->IsConsString() && !object->IsSymbol() &&
- // (ConsString::cast(object)->second() == HEAP->empty_string())
- // except the maps for the object and its possible substrings might be
- // marked.
- HeapObject* object = HeapObject::cast(*p);
- MapWord map_word = object->map_word();
- map_word.ClearMark();
- InstanceType type = map_word.ToMap()->instance_type();
- if ((type & kShortcutTypeMask) != kShortcutTypeTag) return object;
-
- Object* second = reinterpret_cast<ConsString*>(object)->unchecked_second();
- Heap* heap = map_word.ToMap()->heap();
- if (second != heap->raw_unchecked_empty_string()) {
- return object;
- }
-
- // Since we don't have the object's start, it is impossible to update the
- // page dirty marks. Therefore, we only replace the string with its left
- // substring when page dirty marks do not change.
- Object* first = reinterpret_cast<ConsString*>(object)->unchecked_first();
- if (!heap->InNewSpace(object) && heap->InNewSpace(first)) return object;
-
- *p = first;
- return HeapObject::cast(first);
-}
-
-
-class StaticMarkingVisitor : public StaticVisitorBase {
- public:
- static inline void IterateBody(Map* map, HeapObject* obj) {
- table_.GetVisitor(map)(map, obj);
- }
-
- static void Initialize() {
- table_.Register(kVisitShortcutCandidate,
- &FixedBodyVisitor<StaticMarkingVisitor,
- ConsString::BodyDescriptor,
- void>::Visit);
-
- table_.Register(kVisitConsString,
- &FixedBodyVisitor<StaticMarkingVisitor,
- ConsString::BodyDescriptor,
- void>::Visit);
-
-
- table_.Register(kVisitFixedArray,
- &FlexibleBodyVisitor<StaticMarkingVisitor,
- FixedArray::BodyDescriptor,
- void>::Visit);
-
- table_.Register(kVisitGlobalContext,
- &FixedBodyVisitor<StaticMarkingVisitor,
- Context::MarkCompactBodyDescriptor,
- void>::Visit);
-
- table_.Register(kVisitByteArray, &DataObjectVisitor::Visit);
- table_.Register(kVisitSeqAsciiString, &DataObjectVisitor::Visit);
- table_.Register(kVisitSeqTwoByteString, &DataObjectVisitor::Visit);
-
- table_.Register(kVisitOddball,
- &FixedBodyVisitor<StaticMarkingVisitor,
- Oddball::BodyDescriptor,
- void>::Visit);
- table_.Register(kVisitMap,
- &FixedBodyVisitor<StaticMarkingVisitor,
- Map::BodyDescriptor,
- void>::Visit);
-
- table_.Register(kVisitCode, &VisitCode);
-
- table_.Register(kVisitSharedFunctionInfo,
- &VisitSharedFunctionInfoAndFlushCode);
-
- table_.Register(kVisitJSFunction,
- &VisitJSFunctionAndFlushCode);
-
- table_.Register(kVisitPropertyCell,
- &FixedBodyVisitor<StaticMarkingVisitor,
- JSGlobalPropertyCell::BodyDescriptor,
- void>::Visit);
-
- table_.RegisterSpecializations<DataObjectVisitor,
- kVisitDataObject,
- kVisitDataObjectGeneric>();
-
- table_.RegisterSpecializations<JSObjectVisitor,
- kVisitJSObject,
- kVisitJSObjectGeneric>();
-
- table_.RegisterSpecializations<StructObjectVisitor,
- kVisitStruct,
- kVisitStructGeneric>();
- }
-
- INLINE(static void VisitPointer(Heap* heap, Object** p)) {
- MarkObjectByPointer(heap, p);
- }
-
- INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) {
- // Mark all objects pointed to in [start, end).
- const int kMinRangeForMarkingRecursion = 64;
- if (end - start >= kMinRangeForMarkingRecursion) {
- if (VisitUnmarkedObjects(heap, start, end)) return;
- // We are close to a stack overflow, so just mark the objects.
- }
- for (Object** p = start; p < end; p++) MarkObjectByPointer(heap, p);
- }
-
- static inline void VisitCodeTarget(Heap* heap, RelocInfo* rinfo) {
- ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
- Code* code = Code::GetCodeFromTargetAddress(rinfo->target_address());
- if (FLAG_cleanup_ics_at_gc && code->is_inline_cache_stub()) {
- IC::Clear(rinfo->pc());
- // Please note targets for cleared inline cached do not have to be
- // marked since they are contained in HEAP->non_monomorphic_cache().
- } else {
- heap->mark_compact_collector()->MarkObject(code);
- }
- }
-
- static void VisitGlobalPropertyCell(Heap* heap, RelocInfo* rinfo) {
- ASSERT(rinfo->rmode() == RelocInfo::GLOBAL_PROPERTY_CELL);
- Object* cell = rinfo->target_cell();
- Object* old_cell = cell;
- VisitPointer(heap, &cell);
- if (cell != old_cell) {
- rinfo->set_target_cell(reinterpret_cast<JSGlobalPropertyCell*>(cell));
- }
- }
-
- static inline void VisitDebugTarget(Heap* heap, RelocInfo* rinfo) {
- ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
- rinfo->IsPatchedReturnSequence()) ||
- (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
- rinfo->IsPatchedDebugBreakSlotSequence()));
- HeapObject* code = Code::GetCodeFromTargetAddress(rinfo->call_address());
- heap->mark_compact_collector()->MarkObject(code);
- }
-
- // Mark object pointed to by p.
- INLINE(static void MarkObjectByPointer(Heap* heap, Object** p)) {
- if (!(*p)->IsHeapObject()) return;
- HeapObject* object = ShortCircuitConsString(p);
- if (!object->IsMarked()) {
- heap->mark_compact_collector()->MarkUnmarkedObject(object);
- }
- }
-
-
- // Visit an unmarked object.
- INLINE(static void VisitUnmarkedObject(MarkCompactCollector* collector,
- HeapObject* obj)) {
-#ifdef DEBUG
- ASSERT(Isolate::Current()->heap()->Contains(obj));
- ASSERT(!obj->IsMarked());
-#endif
- Map* map = obj->map();
- collector->SetMark(obj);
- // Mark the map pointer and the body.
- if (!map->IsMarked()) collector->MarkUnmarkedObject(map);
- IterateBody(map, obj);
- }
-
- // Visit all unmarked objects pointed to by [start, end).
- // Returns false if the operation fails (lack of stack space).
- static inline bool VisitUnmarkedObjects(Heap* heap,
- Object** start,
- Object** end) {
- // Return false is we are close to the stack limit.
- StackLimitCheck check(heap->isolate());
- if (check.HasOverflowed()) return false;
-
- MarkCompactCollector* collector = heap->mark_compact_collector();
- // Visit the unmarked objects.
- for (Object** p = start; p < end; p++) {
- if (!(*p)->IsHeapObject()) continue;
- HeapObject* obj = HeapObject::cast(*p);
- if (obj->IsMarked()) continue;
- VisitUnmarkedObject(collector, obj);
- }
- return true;
- }
-
- static inline void VisitExternalReference(Address* p) { }
- static inline void VisitRuntimeEntry(RelocInfo* rinfo) { }
-
- private:
- class DataObjectVisitor {
- public:
- template<int size>
- static void VisitSpecialized(Map* map, HeapObject* object) {
- }
-
- static void Visit(Map* map, HeapObject* object) {
- }
- };
-
- typedef FlexibleBodyVisitor<StaticMarkingVisitor,
- JSObject::BodyDescriptor,
- void> JSObjectVisitor;
-
- typedef FlexibleBodyVisitor<StaticMarkingVisitor,
- StructBodyDescriptor,
- void> StructObjectVisitor;
-
- static void VisitCode(Map* map, HeapObject* object) {
- reinterpret_cast<Code*>(object)->CodeIterateBody<StaticMarkingVisitor>(
- map->heap());
- }
-
- // Code flushing support.
-
- // How many collections newly compiled code object will survive before being
- // flushed.
- static const int kCodeAgeThreshold = 5;
-
- inline static bool HasSourceCode(Heap* heap, SharedFunctionInfo* info) {
- Object* undefined = heap->raw_unchecked_undefined_value();
- return (info->script() != undefined) &&
- (reinterpret_cast<Script*>(info->script())->source() != undefined);
- }
-
-
- inline static bool IsCompiled(JSFunction* function) {
- return function->unchecked_code() !=
- function->GetIsolate()->builtins()->builtin(Builtins::kLazyCompile);
- }
-
- inline static bool IsCompiled(SharedFunctionInfo* function) {
- return function->unchecked_code() !=
- function->GetIsolate()->builtins()->builtin(Builtins::kLazyCompile);
- }
-
- inline static bool IsFlushable(Heap* heap, JSFunction* function) {
- SharedFunctionInfo* shared_info = function->unchecked_shared();
-
- // Code is either on stack, in compilation cache or referenced
- // by optimized version of function.
- if (function->unchecked_code()->IsMarked()) {
- shared_info->set_code_age(0);
- return false;
- }
-
- // We do not flush code for optimized functions.
- if (function->code() != shared_info->unchecked_code()) {
- return false;
- }
-
- return IsFlushable(heap, shared_info);
- }
-
- inline static bool IsFlushable(Heap* heap, SharedFunctionInfo* shared_info) {
- // Code is either on stack, in compilation cache or referenced
- // by optimized version of function.
- if (shared_info->unchecked_code()->IsMarked()) {
- shared_info->set_code_age(0);
- return false;
- }
-
- // The function must be compiled and have the source code available,
- // to be able to recompile it in case we need the function again.
- if (!(shared_info->is_compiled() && HasSourceCode(heap, shared_info))) {
- return false;
- }
-
- // We never flush code for Api functions.
- Object* function_data = shared_info->function_data();
- if (function_data->IsHeapObject() &&
- (SafeMap(function_data)->instance_type() ==
- FUNCTION_TEMPLATE_INFO_TYPE)) {
- return false;
- }
-
- // Only flush code for functions.
- if (shared_info->code()->kind() != Code::FUNCTION) return false;
-
- // Function must be lazy compilable.
- if (!shared_info->allows_lazy_compilation()) return false;
-
- // If this is a full script wrapped in a function we do no flush the code.
- if (shared_info->is_toplevel()) return false;
-
- // Age this shared function info.
- if (shared_info->code_age() < kCodeAgeThreshold) {
- shared_info->set_code_age(shared_info->code_age() + 1);
- return false;
- }
-
- return true;
- }
-
-
- static bool FlushCodeForFunction(Heap* heap, JSFunction* function) {
- if (!IsFlushable(heap, function)) return false;
-
- // This function's code looks flushable. But we have to postpone the
- // decision until we see all functions that point to the same
- // SharedFunctionInfo because some of them might be optimized.
- // That would make the nonoptimized version of the code nonflushable,
- // because it is required for bailing out from optimized code.
- heap->mark_compact_collector()->code_flusher()->AddCandidate(function);
- return true;
- }
-
-
- static inline Map* SafeMap(Object* obj) {
- MapWord map_word = HeapObject::cast(obj)->map_word();
- map_word.ClearMark();
- map_word.ClearOverflow();
- return map_word.ToMap();
- }
-
-
- static inline bool IsJSBuiltinsObject(Object* obj) {
- return obj->IsHeapObject() &&
- (SafeMap(obj)->instance_type() == JS_BUILTINS_OBJECT_TYPE);
- }
-
-
- static inline bool IsValidNotBuiltinContext(Object* ctx) {
- if (!ctx->IsHeapObject()) return false;
-
- Map* map = SafeMap(ctx);
- Heap* heap = map->heap();
- if (!(map == heap->raw_unchecked_context_map() ||
- map == heap->raw_unchecked_catch_context_map() ||
- map == heap->raw_unchecked_global_context_map())) {
- return false;
- }
-
- Context* context = reinterpret_cast<Context*>(ctx);
-
- if (IsJSBuiltinsObject(context->global())) {
- return false;
- }
-
- return true;
- }
-
-
- static void VisitSharedFunctionInfoGeneric(Map* map, HeapObject* object) {
- SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(object);
-
- if (shared->IsInobjectSlackTrackingInProgress()) shared->DetachInitialMap();
-
- FixedBodyVisitor<StaticMarkingVisitor,
- SharedFunctionInfo::BodyDescriptor,
- void>::Visit(map, object);
- }
-
-
- static void VisitSharedFunctionInfoAndFlushCode(Map* map,
- HeapObject* object) {
- MarkCompactCollector* collector = map->heap()->mark_compact_collector();
- if (!collector->is_code_flushing_enabled()) {
- VisitSharedFunctionInfoGeneric(map, object);
- return;
- }
- VisitSharedFunctionInfoAndFlushCodeGeneric(map, object, false);
- }
-
-
- static void VisitSharedFunctionInfoAndFlushCodeGeneric(
- Map* map, HeapObject* object, bool known_flush_code_candidate) {
- Heap* heap = map->heap();
- SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(object);
-
- if (shared->IsInobjectSlackTrackingInProgress()) shared->DetachInitialMap();
-
- if (!known_flush_code_candidate) {
- known_flush_code_candidate = IsFlushable(heap, shared);
- if (known_flush_code_candidate) {
- heap->mark_compact_collector()->code_flusher()->AddCandidate(shared);
- }
- }
-
- VisitSharedFunctionInfoFields(heap, object, known_flush_code_candidate);
- }
-
-
- static void VisitCodeEntry(Heap* heap, Address entry_address) {
- Object* code = Code::GetObjectFromEntryAddress(entry_address);
- Object* old_code = code;
- VisitPointer(heap, &code);
- if (code != old_code) {
- Memory::Address_at(entry_address) =
- reinterpret_cast<Code*>(code)->entry();
- }
- }
-
-
- static void VisitJSFunctionAndFlushCode(Map* map, HeapObject* object) {
- Heap* heap = map->heap();
- MarkCompactCollector* collector = heap->mark_compact_collector();
- if (!collector->is_code_flushing_enabled()) {
- VisitJSFunction(map, object);
- return;
- }
-
- JSFunction* jsfunction = reinterpret_cast<JSFunction*>(object);
- // The function must have a valid context and not be a builtin.
- bool flush_code_candidate = false;
- if (IsValidNotBuiltinContext(jsfunction->unchecked_context())) {
- flush_code_candidate = FlushCodeForFunction(heap, jsfunction);
- }
-
- if (!flush_code_candidate) {
- collector->MarkObject(jsfunction->unchecked_shared()->unchecked_code());
-
- if (jsfunction->unchecked_code()->kind() == Code::OPTIMIZED_FUNCTION) {
- // For optimized functions we should retain both non-optimized version
- // of it's code and non-optimized version of all inlined functions.
- // This is required to support bailing out from inlined code.
- DeoptimizationInputData* data =
- reinterpret_cast<DeoptimizationInputData*>(
- jsfunction->unchecked_code()->unchecked_deoptimization_data());
-
- FixedArray* literals = data->UncheckedLiteralArray();
-
- for (int i = 0, count = data->InlinedFunctionCount()->value();
- i < count;
- i++) {
- JSFunction* inlined = reinterpret_cast<JSFunction*>(literals->get(i));
- collector->MarkObject(inlined->unchecked_shared()->unchecked_code());
- }
- }
- }
-
- VisitJSFunctionFields(map,
- reinterpret_cast<JSFunction*>(object),
- flush_code_candidate);
- }
-
-
- static void VisitJSFunction(Map* map, HeapObject* object) {
- VisitJSFunctionFields(map,
- reinterpret_cast<JSFunction*>(object),
- false);
- }
-
-
-#define SLOT_ADDR(obj, offset) \
- reinterpret_cast<Object**>((obj)->address() + offset)
-
-
- static inline void VisitJSFunctionFields(Map* map,
- JSFunction* object,
- bool flush_code_candidate) {
- Heap* heap = map->heap();
- MarkCompactCollector* collector = heap->mark_compact_collector();
-
- VisitPointers(heap,
- SLOT_ADDR(object, JSFunction::kPropertiesOffset),
- SLOT_ADDR(object, JSFunction::kCodeEntryOffset));
-
- if (!flush_code_candidate) {
- VisitCodeEntry(heap, object->address() + JSFunction::kCodeEntryOffset);
- } else {
- // Don't visit code object.
-
- // Visit shared function info to avoid double checking of it's
- // flushability.
- SharedFunctionInfo* shared_info = object->unchecked_shared();
- if (!shared_info->IsMarked()) {
- Map* shared_info_map = shared_info->map();
- collector->SetMark(shared_info);
- collector->MarkObject(shared_info_map);
- VisitSharedFunctionInfoAndFlushCodeGeneric(shared_info_map,
- shared_info,
- true);
- }
- }
-
- VisitPointers(heap,
- SLOT_ADDR(object,
- JSFunction::kCodeEntryOffset + kPointerSize),
- SLOT_ADDR(object, JSFunction::kNonWeakFieldsEndOffset));
-
- // Don't visit the next function list field as it is a weak reference.
- }
-
-
- static void VisitSharedFunctionInfoFields(Heap* heap,
- HeapObject* object,
- bool flush_code_candidate) {
- VisitPointer(heap, SLOT_ADDR(object, SharedFunctionInfo::kNameOffset));
-
- if (!flush_code_candidate) {
- VisitPointer(heap, SLOT_ADDR(object, SharedFunctionInfo::kCodeOffset));
- }
-
- VisitPointers(heap,
- SLOT_ADDR(object, SharedFunctionInfo::kScopeInfoOffset),
- SLOT_ADDR(object, SharedFunctionInfo::kSize));
- }
-
- #undef SLOT_ADDR
-
- typedef void (*Callback)(Map* map, HeapObject* object);
-
- static VisitorDispatchTable<Callback> table_;
-};
-
-
-VisitorDispatchTable<StaticMarkingVisitor::Callback>
- StaticMarkingVisitor::table_;
-
-
-class MarkingVisitor : public ObjectVisitor {
- public:
- explicit MarkingVisitor(Heap* heap) : heap_(heap) { }
-
- void VisitPointer(Object** p) {
- StaticMarkingVisitor::VisitPointer(heap_, p);
- }
-
- void VisitPointers(Object** start, Object** end) {
- StaticMarkingVisitor::VisitPointers(heap_, start, end);
- }
-
- void VisitCodeTarget(Heap* heap, RelocInfo* rinfo) {
- StaticMarkingVisitor::VisitCodeTarget(heap, rinfo);
- }
-
- void VisitGlobalPropertyCell(Heap* heap, RelocInfo* rinfo) {
- StaticMarkingVisitor::VisitGlobalPropertyCell(heap, rinfo);
- }
-
- void VisitDebugTarget(Heap* heap, RelocInfo* rinfo) {
- StaticMarkingVisitor::VisitDebugTarget(heap, rinfo);
- }
-
- private:
- Heap* heap_;
-};
-
-
-class CodeMarkingVisitor : public ThreadVisitor {
- public:
- explicit CodeMarkingVisitor(MarkCompactCollector* collector)
- : collector_(collector) {}
-
- void VisitThread(Isolate* isolate, ThreadLocalTop* top) {
- for (StackFrameIterator it(isolate, top); !it.done(); it.Advance()) {
- collector_->MarkObject(it.frame()->unchecked_code());
- }
- }
-
- private:
- MarkCompactCollector* collector_;
-};
-
-
-class SharedFunctionInfoMarkingVisitor : public ObjectVisitor {
- public:
- explicit SharedFunctionInfoMarkingVisitor(MarkCompactCollector* collector)
- : collector_(collector) {}
-
- void VisitPointers(Object** start, Object** end) {
- for (Object** p = start; p < end; p++) VisitPointer(p);
- }
-
- void VisitPointer(Object** slot) {
- Object* obj = *slot;
- if (obj->IsSharedFunctionInfo()) {
- SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(obj);
- collector_->MarkObject(shared->unchecked_code());
- collector_->MarkObject(shared);
- }
- }
-
- private:
- MarkCompactCollector* collector_;
-};
-
-
-void MarkCompactCollector::PrepareForCodeFlushing() {
- ASSERT(heap() == Isolate::Current()->heap());
-
- if (!FLAG_flush_code) {
- EnableCodeFlushing(false);
- return;
- }
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- if (heap()->isolate()->debug()->IsLoaded() ||
- heap()->isolate()->debug()->has_break_points()) {
- EnableCodeFlushing(false);
- return;
- }
-#endif
- EnableCodeFlushing(true);
-
- // Ensure that empty descriptor array is marked. Method MarkDescriptorArray
- // relies on it being marked before any other descriptor array.
- MarkObject(heap()->raw_unchecked_empty_descriptor_array());
-
- // Make sure we are not referencing the code from the stack.
- ASSERT(this == heap()->mark_compact_collector());
- for (StackFrameIterator it; !it.done(); it.Advance()) {
- MarkObject(it.frame()->unchecked_code());
- }
-
- // Iterate the archived stacks in all threads to check if
- // the code is referenced.
- CodeMarkingVisitor code_marking_visitor(this);
- heap()->isolate()->thread_manager()->IterateArchivedThreads(
- &code_marking_visitor);
-
- SharedFunctionInfoMarkingVisitor visitor(this);
- heap()->isolate()->compilation_cache()->IterateFunctions(&visitor);
- heap()->isolate()->handle_scope_implementer()->Iterate(&visitor);
-
- ProcessMarkingStack();
-}
-
-
-// Visitor class for marking heap roots.
-class RootMarkingVisitor : public ObjectVisitor {
- public:
- explicit RootMarkingVisitor(Heap* heap)
- : collector_(heap->mark_compact_collector()) { }
-
- void VisitPointer(Object** p) {
- MarkObjectByPointer(p);
- }
-
- void VisitPointers(Object** start, Object** end) {
- for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
- }
-
- private:
- void MarkObjectByPointer(Object** p) {
- if (!(*p)->IsHeapObject()) return;
-
- // Replace flat cons strings in place.
- HeapObject* object = ShortCircuitConsString(p);
- if (object->IsMarked()) return;
-
- Map* map = object->map();
- // Mark the object.
- collector_->SetMark(object);
-
- // Mark the map pointer and body, and push them on the marking stack.
- collector_->MarkObject(map);
- StaticMarkingVisitor::IterateBody(map, object);
-
- // Mark all the objects reachable from the map and body. May leave
- // overflowed objects in the heap.
- collector_->EmptyMarkingStack();
- }
-
- MarkCompactCollector* collector_;
-};
-
-
-// Helper class for pruning the symbol table.
-class SymbolTableCleaner : public ObjectVisitor {
- public:
- explicit SymbolTableCleaner(Heap* heap)
- : heap_(heap), pointers_removed_(0) { }
-
- virtual void VisitPointers(Object** start, Object** end) {
- // Visit all HeapObject pointers in [start, end).
- for (Object** p = start; p < end; p++) {
- if ((*p)->IsHeapObject() && !HeapObject::cast(*p)->IsMarked()) {
- // Check if the symbol being pruned is an external symbol. We need to
- // delete the associated external data as this symbol is going away.
-
- // Since no objects have yet been moved we can safely access the map of
- // the object.
- if ((*p)->IsExternalString()) {
- heap_->FinalizeExternalString(String::cast(*p));
- }
- // Set the entry to null_value (as deleted).
- *p = heap_->raw_unchecked_null_value();
- pointers_removed_++;
- }
- }
- }
-
- int PointersRemoved() {
- return pointers_removed_;
- }
- private:
- Heap* heap_;
- int pointers_removed_;
-};
-
-
-// Implementation of WeakObjectRetainer for mark compact GCs. All marked objects
-// are retained.
-class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
- public:
- virtual Object* RetainAs(Object* object) {
- MapWord first_word = HeapObject::cast(object)->map_word();
- if (first_word.IsMarked()) {
- return object;
- } else {
- return NULL;
- }
- }
-};
-
-
-void MarkCompactCollector::MarkUnmarkedObject(HeapObject* object) {
- ASSERT(!object->IsMarked());
- ASSERT(HEAP->Contains(object));
- if (object->IsMap()) {
- Map* map = Map::cast(object);
- if (FLAG_cleanup_caches_in_maps_at_gc) {
- map->ClearCodeCache(heap());
- }
- SetMark(map);
- if (FLAG_collect_maps &&
- map->instance_type() >= FIRST_JS_OBJECT_TYPE &&
- map->instance_type() <= JS_FUNCTION_TYPE) {
- MarkMapContents(map);
- } else {
- marking_stack_.Push(map);
- }
- } else {
- SetMark(object);
- marking_stack_.Push(object);
- }
-}
-
-
-void MarkCompactCollector::MarkMapContents(Map* map) {
- MarkDescriptorArray(reinterpret_cast<DescriptorArray*>(
- *HeapObject::RawField(map, Map::kInstanceDescriptorsOffset)));
-
- // Mark the Object* fields of the Map.
- // Since the descriptor array has been marked already, it is fine
- // that one of these fields contains a pointer to it.
- Object** start_slot = HeapObject::RawField(map,
- Map::kPointerFieldsBeginOffset);
-
- Object** end_slot = HeapObject::RawField(map, Map::kPointerFieldsEndOffset);
-
- StaticMarkingVisitor::VisitPointers(map->heap(), start_slot, end_slot);
-}
-
-
-void MarkCompactCollector::MarkDescriptorArray(
- DescriptorArray* descriptors) {
- if (descriptors->IsMarked()) return;
- // Empty descriptor array is marked as a root before any maps are marked.
- ASSERT(descriptors != HEAP->raw_unchecked_empty_descriptor_array());
- SetMark(descriptors);
-
- FixedArray* contents = reinterpret_cast<FixedArray*>(
- descriptors->get(DescriptorArray::kContentArrayIndex));
- ASSERT(contents->IsHeapObject());
- ASSERT(!contents->IsMarked());
- ASSERT(contents->IsFixedArray());
- ASSERT(contents->length() >= 2);
- SetMark(contents);
- // Contents contains (value, details) pairs. If the details say that the type
- // of descriptor is MAP_TRANSITION, CONSTANT_TRANSITION,
- // EXTERNAL_ARRAY_TRANSITION or NULL_DESCRIPTOR, we don't mark the value as
- // live. Only for MAP_TRANSITION, EXTERNAL_ARRAY_TRANSITION and
- // CONSTANT_TRANSITION is the value an Object* (a Map*).
- for (int i = 0; i < contents->length(); i += 2) {
- // If the pair (value, details) at index i, i+1 is not
- // a transition or null descriptor, mark the value.
- PropertyDetails details(Smi::cast(contents->get(i + 1)));
- if (details.type() < FIRST_PHANTOM_PROPERTY_TYPE) {
- HeapObject* object = reinterpret_cast<HeapObject*>(contents->get(i));
- if (object->IsHeapObject() && !object->IsMarked()) {
- SetMark(object);
- marking_stack_.Push(object);
- }
- }
- }
- // The DescriptorArray descriptors contains a pointer to its contents array,
- // but the contents array is already marked.
- marking_stack_.Push(descriptors);
-}
-
-
-void MarkCompactCollector::CreateBackPointers() {
- HeapObjectIterator iterator(heap()->map_space());
- for (HeapObject* next_object = iterator.next();
- next_object != NULL; next_object = iterator.next()) {
- if (next_object->IsMap()) { // Could also be ByteArray on free list.
- Map* map = Map::cast(next_object);
- if (map->instance_type() >= FIRST_JS_OBJECT_TYPE &&
- map->instance_type() <= JS_FUNCTION_TYPE) {
- map->CreateBackPointers();
- } else {
- ASSERT(map->instance_descriptors() == heap()->empty_descriptor_array());
- }
- }
- }
-}
-
-
-static int OverflowObjectSize(HeapObject* obj) {
- // Recover the normal map pointer, it might be marked as live and
- // overflowed.
- MapWord map_word = obj->map_word();
- map_word.ClearMark();
- map_word.ClearOverflow();
- return obj->SizeFromMap(map_word.ToMap());
-}
-
-
-class OverflowedObjectsScanner : public AllStatic {
- public:
- // Fill the marking stack with overflowed objects returned by the given
- // iterator. Stop when the marking stack is filled or the end of the space
- // is reached, whichever comes first.
- template<class T>
- static inline void ScanOverflowedObjects(MarkCompactCollector* collector,
- T* it) {
- // The caller should ensure that the marking stack is initially not full,
- // so that we don't waste effort pointlessly scanning for objects.
- ASSERT(!collector->marking_stack_.is_full());
-
- for (HeapObject* object = it->next(); object != NULL; object = it->next()) {
- if (object->IsOverflowed()) {
- object->ClearOverflow();
- ASSERT(object->IsMarked());
- ASSERT(HEAP->Contains(object));
- collector->marking_stack_.Push(object);
- if (collector->marking_stack_.is_full()) return;
- }
- }
- }
-};
-
-
-bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) {
- return (*p)->IsHeapObject() && !HeapObject::cast(*p)->IsMarked();
-}
-
-
-void MarkCompactCollector::MarkSymbolTable() {
- SymbolTable* symbol_table = heap()->raw_unchecked_symbol_table();
- // Mark the symbol table itself.
- SetMark(symbol_table);
- // Explicitly mark the prefix.
- MarkingVisitor marker(heap());
- symbol_table->IteratePrefix(&marker);
- ProcessMarkingStack();
-}
-
-
-void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
- // Mark the heap roots including global variables, stack variables,
- // etc., and all objects reachable from them.
- heap()->IterateStrongRoots(visitor, VISIT_ONLY_STRONG);
-
- // Handle the symbol table specially.
- MarkSymbolTable();
-
- // There may be overflowed objects in the heap. Visit them now.
- while (marking_stack_.overflowed()) {
- RefillMarkingStack();
- EmptyMarkingStack();
- }
-}
-
-
-void MarkCompactCollector::MarkObjectGroups() {
- List<ObjectGroup*>* object_groups =
- heap()->isolate()->global_handles()->object_groups();
-
- for (int i = 0; i < object_groups->length(); i++) {
- ObjectGroup* entry = object_groups->at(i);
- if (entry == NULL) continue;
-
- List<Object**>& objects = entry->objects_;
- bool group_marked = false;
- for (int j = 0; j < objects.length(); j++) {
- Object* object = *objects[j];
- if (object->IsHeapObject() && HeapObject::cast(object)->IsMarked()) {
- group_marked = true;
- break;
- }
- }
-
- if (!group_marked) continue;
-
- // An object in the group is marked, so mark as gray all white heap
- // objects in the group.
- for (int j = 0; j < objects.length(); ++j) {
- if ((*objects[j])->IsHeapObject()) {
- MarkObject(HeapObject::cast(*objects[j]));
- }
- }
-
- // Once the entire group has been colored gray, set the object group
- // to NULL so it won't be processed again.
- delete entry;
- object_groups->at(i) = NULL;
- }
-}
-
-
-void MarkCompactCollector::MarkImplicitRefGroups() {
- List<ImplicitRefGroup*>* ref_groups =
- heap()->isolate()->global_handles()->implicit_ref_groups();
-
- for (int i = 0; i < ref_groups->length(); i++) {
- ImplicitRefGroup* entry = ref_groups->at(i);
- if (entry == NULL) continue;
-
- if (!entry->parent_->IsMarked()) continue;
-
- List<Object**>& children = entry->children_;
- // A parent object is marked, so mark as gray all child white heap
- // objects.
- for (int j = 0; j < children.length(); ++j) {
- if ((*children[j])->IsHeapObject()) {
- MarkObject(HeapObject::cast(*children[j]));
- }
- }
-
- // Once the entire group has been colored gray, set the group
- // to NULL so it won't be processed again.
- delete entry;
- ref_groups->at(i) = NULL;
- }
-}
-
-
-// Mark all objects reachable from the objects on the marking stack.
-// Before: the marking stack contains zero or more heap object pointers.
-// After: the marking stack is empty, and all objects reachable from the
-// marking stack have been marked, or are overflowed in the heap.
-void MarkCompactCollector::EmptyMarkingStack() {
- while (!marking_stack_.is_empty()) {
- HeapObject* object = marking_stack_.Pop();
- ASSERT(object->IsHeapObject());
- ASSERT(heap()->Contains(object));
- ASSERT(object->IsMarked());
- ASSERT(!object->IsOverflowed());
-
- // Because the object is marked, we have to recover the original map
- // pointer and use it to mark the object's body.
- MapWord map_word = object->map_word();
- map_word.ClearMark();
- Map* map = map_word.ToMap();
- MarkObject(map);
-
- StaticMarkingVisitor::IterateBody(map, object);
- }
-}
-
-
-// Sweep the heap for overflowed objects, clear their overflow bits, and
-// push them on the marking stack. Stop early if the marking stack fills
-// before sweeping completes. If sweeping completes, there are no remaining
-// overflowed objects in the heap so the overflow flag on the markings stack
-// is cleared.
-void MarkCompactCollector::RefillMarkingStack() {
- ASSERT(marking_stack_.overflowed());
-
- SemiSpaceIterator new_it(heap()->new_space(), &OverflowObjectSize);
- OverflowedObjectsScanner::ScanOverflowedObjects(this, &new_it);
- if (marking_stack_.is_full()) return;
-
- HeapObjectIterator old_pointer_it(heap()->old_pointer_space(),
- &OverflowObjectSize);
- OverflowedObjectsScanner::ScanOverflowedObjects(this, &old_pointer_it);
- if (marking_stack_.is_full()) return;
-
- HeapObjectIterator old_data_it(heap()->old_data_space(), &OverflowObjectSize);
- OverflowedObjectsScanner::ScanOverflowedObjects(this, &old_data_it);
- if (marking_stack_.is_full()) return;
-
- HeapObjectIterator code_it(heap()->code_space(), &OverflowObjectSize);
- OverflowedObjectsScanner::ScanOverflowedObjects(this, &code_it);
- if (marking_stack_.is_full()) return;
-
- HeapObjectIterator map_it(heap()->map_space(), &OverflowObjectSize);
- OverflowedObjectsScanner::ScanOverflowedObjects(this, &map_it);
- if (marking_stack_.is_full()) return;
-
- HeapObjectIterator cell_it(heap()->cell_space(), &OverflowObjectSize);
- OverflowedObjectsScanner::ScanOverflowedObjects(this, &cell_it);
- if (marking_stack_.is_full()) return;
-
- LargeObjectIterator lo_it(heap()->lo_space(), &OverflowObjectSize);
- OverflowedObjectsScanner::ScanOverflowedObjects(this, &lo_it);
- if (marking_stack_.is_full()) return;
-
- marking_stack_.clear_overflowed();
-}
-
-
-// Mark all objects reachable (transitively) from objects on the marking
-// stack. Before: the marking stack contains zero or more heap object
-// pointers. After: the marking stack is empty and there are no overflowed
-// objects in the heap.
-void MarkCompactCollector::ProcessMarkingStack() {
- EmptyMarkingStack();
- while (marking_stack_.overflowed()) {
- RefillMarkingStack();
- EmptyMarkingStack();
- }
-}
-
-
-void MarkCompactCollector::ProcessExternalMarking() {
- bool work_to_do = true;
- ASSERT(marking_stack_.is_empty());
- while (work_to_do) {
- MarkObjectGroups();
- MarkImplicitRefGroups();
- work_to_do = !marking_stack_.is_empty();
- ProcessMarkingStack();
- }
-}
-
-
-void MarkCompactCollector::MarkLiveObjects() {
- GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_MARK);
- // The recursive GC marker detects when it is nearing stack overflow,
- // and switches to a different marking system. JS interrupts interfere
- // with the C stack limit check.
- PostponeInterruptsScope postpone(heap()->isolate());
-
-#ifdef DEBUG
- ASSERT(state_ == PREPARE_GC);
- state_ = MARK_LIVE_OBJECTS;
-#endif
- // The to space contains live objects, the from space is used as a marking
- // stack.
- marking_stack_.Initialize(heap()->new_space()->FromSpaceLow(),
- heap()->new_space()->FromSpaceHigh());
-
- ASSERT(!marking_stack_.overflowed());
-
- PrepareForCodeFlushing();
-
- RootMarkingVisitor root_visitor(heap());
- MarkRoots(&root_visitor);
-
- // The objects reachable from the roots are marked, yet unreachable
- // objects are unmarked. Mark objects reachable due to host
- // application specific logic.
- ProcessExternalMarking();
-
- // The objects reachable from the roots or object groups are marked,
- // yet unreachable objects are unmarked. Mark objects reachable
- // only from weak global handles.
- //
- // First we identify nonlive weak handles and mark them as pending
- // destruction.
- heap()->isolate()->global_handles()->IdentifyWeakHandles(
- &IsUnmarkedHeapObject);
- // Then we mark the objects and process the transitive closure.
- heap()->isolate()->global_handles()->IterateWeakRoots(&root_visitor);
- while (marking_stack_.overflowed()) {
- RefillMarkingStack();
- EmptyMarkingStack();
- }
-
- // Repeat host application specific marking to mark unmarked objects
- // reachable from the weak roots.
- ProcessExternalMarking();
-
- // Prune the symbol table removing all symbols only pointed to by the
- // symbol table. Cannot use symbol_table() here because the symbol
- // table is marked.
- SymbolTable* symbol_table = heap()->raw_unchecked_symbol_table();
- SymbolTableCleaner v(heap());
- symbol_table->IterateElements(&v);
- symbol_table->ElementsRemoved(v.PointersRemoved());
- heap()->external_string_table_.Iterate(&v);
- heap()->external_string_table_.CleanUp();
-
- // Process the weak references.
- MarkCompactWeakObjectRetainer mark_compact_object_retainer;
- heap()->ProcessWeakReferences(&mark_compact_object_retainer);
-
- // Remove object groups after marking phase.
- heap()->isolate()->global_handles()->RemoveObjectGroups();
- heap()->isolate()->global_handles()->RemoveImplicitRefGroups();
-
- // Flush code from collected candidates.
- if (is_code_flushing_enabled()) {
- code_flusher_->ProcessCandidates();
- }
-
- // Clean up dead objects from the runtime profiler.
- heap()->isolate()->runtime_profiler()->RemoveDeadSamples();
-}
-
-
-#ifdef DEBUG
-void MarkCompactCollector::UpdateLiveObjectCount(HeapObject* obj) {
- live_bytes_ += obj->Size();
- if (heap()->new_space()->Contains(obj)) {
- live_young_objects_size_ += obj->Size();
- } else if (heap()->map_space()->Contains(obj)) {
- ASSERT(obj->IsMap());
- live_map_objects_size_ += obj->Size();
- } else if (heap()->cell_space()->Contains(obj)) {
- ASSERT(obj->IsJSGlobalPropertyCell());
- live_cell_objects_size_ += obj->Size();
- } else if (heap()->old_pointer_space()->Contains(obj)) {
- live_old_pointer_objects_size_ += obj->Size();
- } else if (heap()->old_data_space()->Contains(obj)) {
- live_old_data_objects_size_ += obj->Size();
- } else if (heap()->code_space()->Contains(obj)) {
- live_code_objects_size_ += obj->Size();
- } else if (heap()->lo_space()->Contains(obj)) {
- live_lo_objects_size_ += obj->Size();
- } else {
- UNREACHABLE();
- }
-}
-#endif // DEBUG
-
-
-void MarkCompactCollector::SweepLargeObjectSpace() {
-#ifdef DEBUG
- ASSERT(state_ == MARK_LIVE_OBJECTS);
- state_ =
- compacting_collection_ ? ENCODE_FORWARDING_ADDRESSES : SWEEP_SPACES;
-#endif
- // Deallocate unmarked objects and clear marked bits for marked objects.
- heap()->lo_space()->FreeUnmarkedObjects();
-}
-
-
-// Safe to use during marking phase only.
-bool MarkCompactCollector::SafeIsMap(HeapObject* object) {
- MapWord metamap = object->map_word();
- metamap.ClearMark();
- return metamap.ToMap()->instance_type() == MAP_TYPE;
-}
-
-
-void MarkCompactCollector::ClearNonLiveTransitions() {
- HeapObjectIterator map_iterator(heap() ->map_space(), &SizeOfMarkedObject);
- // Iterate over the map space, setting map transitions that go from
- // a marked map to an unmarked map to null transitions. At the same time,
- // set all the prototype fields of maps back to their original value,
- // dropping the back pointers temporarily stored in the prototype field.
- // Setting the prototype field requires following the linked list of
- // back pointers, reversing them all at once. This allows us to find
- // those maps with map transitions that need to be nulled, and only
- // scan the descriptor arrays of those maps, not all maps.
- // All of these actions are carried out only on maps of JSObjects
- // and related subtypes.
- for (HeapObject* obj = map_iterator.next();
- obj != NULL; obj = map_iterator.next()) {
- Map* map = reinterpret_cast<Map*>(obj);
- if (!map->IsMarked() && map->IsByteArray()) continue;
-
- ASSERT(SafeIsMap(map));
- // Only JSObject and subtypes have map transitions and back pointers.
- if (map->instance_type() < FIRST_JS_OBJECT_TYPE) continue;
- if (map->instance_type() > JS_FUNCTION_TYPE) continue;
-
- if (map->IsMarked() && map->attached_to_shared_function_info()) {
- // This map is used for inobject slack tracking and has been detached
- // from SharedFunctionInfo during the mark phase.
- // Since it survived the GC, reattach it now.
- map->unchecked_constructor()->unchecked_shared()->AttachInitialMap(map);
- }
-
- // Follow the chain of back pointers to find the prototype.
- Map* current = map;
- while (SafeIsMap(current)) {
- current = reinterpret_cast<Map*>(current->prototype());
- ASSERT(current->IsHeapObject());
- }
- Object* real_prototype = current;
-
- // Follow back pointers, setting them to prototype,
- // clearing map transitions when necessary.
- current = map;
- bool on_dead_path = !current->IsMarked();
- Object* next;
- while (SafeIsMap(current)) {
- next = current->prototype();
- // There should never be a dead map above a live map.
- ASSERT(on_dead_path || current->IsMarked());
-
- // A live map above a dead map indicates a dead transition.
- // This test will always be false on the first iteration.
- if (on_dead_path && current->IsMarked()) {
- on_dead_path = false;
- current->ClearNonLiveTransitions(heap(), real_prototype);
- }
- *HeapObject::RawField(current, Map::kPrototypeOffset) =
- real_prototype;
- current = reinterpret_cast<Map*>(next);
- }
- }
-}
-
-// -------------------------------------------------------------------------
-// Phase 2: Encode forwarding addresses.
-// When compacting, forwarding addresses for objects in old space and map
-// space are encoded in their map pointer word (along with an encoding of
-// their map pointers).
-//
-// The excact encoding is described in the comments for class MapWord in
-// objects.h.
-//
-// An address range [start, end) can have both live and non-live objects.
-// Maximal non-live regions are marked so they can be skipped on subsequent
-// sweeps of the heap. A distinguished map-pointer encoding is used to mark
-// free regions of one-word size (in which case the next word is the start
-// of a live object). A second distinguished map-pointer encoding is used
-// to mark free regions larger than one word, and the size of the free
-// region (including the first word) is written to the second word of the
-// region.
-//
-// Any valid map page offset must lie in the object area of the page, so map
-// page offsets less than Page::kObjectStartOffset are invalid. We use a
-// pair of distinguished invalid map encodings (for single word and multiple
-// words) to indicate free regions in the page found during computation of
-// forwarding addresses and skipped over in subsequent sweeps.
-
-
-// Encode a free region, defined by the given start address and size, in the
-// first word or two of the region.
-void EncodeFreeRegion(Address free_start, int free_size) {
- ASSERT(free_size >= kIntSize);
- if (free_size == kIntSize) {
- Memory::uint32_at(free_start) = MarkCompactCollector::kSingleFreeEncoding;
- } else {
- ASSERT(free_size >= 2 * kIntSize);
- Memory::uint32_at(free_start) = MarkCompactCollector::kMultiFreeEncoding;
- Memory::int_at(free_start + kIntSize) = free_size;
- }
-
-#ifdef DEBUG
- // Zap the body of the free region.
- if (FLAG_enable_slow_asserts) {
- for (int offset = 2 * kIntSize;
- offset < free_size;
- offset += kPointerSize) {
- Memory::Address_at(free_start + offset) = kZapValue;
- }
- }
-#endif
-}
-
-
-// Try to promote all objects in new space. Heap numbers and sequential
-// strings are promoted to the code space, large objects to large object space,
-// and all others to the old space.
-inline MaybeObject* MCAllocateFromNewSpace(Heap* heap,
- HeapObject* object,
- int object_size) {
- MaybeObject* forwarded;
- if (object_size > heap->MaxObjectSizeInPagedSpace()) {
- forwarded = Failure::Exception();
- } else {
- OldSpace* target_space = heap->TargetSpace(object);
- ASSERT(target_space == heap->old_pointer_space() ||
- target_space == heap->old_data_space());
- forwarded = target_space->MCAllocateRaw(object_size);
- }
- Object* result;
- if (!forwarded->ToObject(&result)) {
- result = heap->new_space()->MCAllocateRaw(object_size)->ToObjectUnchecked();
- }
- return result;
-}
-
-
-// Allocation functions for the paged spaces call the space's MCAllocateRaw.
-MUST_USE_RESULT inline MaybeObject* MCAllocateFromOldPointerSpace(
- Heap *heap,
- HeapObject* ignore,
- int object_size) {
- return heap->old_pointer_space()->MCAllocateRaw(object_size);
-}
-
-
-MUST_USE_RESULT inline MaybeObject* MCAllocateFromOldDataSpace(
- Heap* heap,
- HeapObject* ignore,
- int object_size) {
- return heap->old_data_space()->MCAllocateRaw(object_size);
-}
-
-
-MUST_USE_RESULT inline MaybeObject* MCAllocateFromCodeSpace(
- Heap* heap,
- HeapObject* ignore,
- int object_size) {
- return heap->code_space()->MCAllocateRaw(object_size);
-}
-
-
-MUST_USE_RESULT inline MaybeObject* MCAllocateFromMapSpace(
- Heap* heap,
- HeapObject* ignore,
- int object_size) {
- return heap->map_space()->MCAllocateRaw(object_size);
-}
-
-
-MUST_USE_RESULT inline MaybeObject* MCAllocateFromCellSpace(
- Heap* heap, HeapObject* ignore, int object_size) {
- return heap->cell_space()->MCAllocateRaw(object_size);
-}
-
-
-// The forwarding address is encoded at the same offset as the current
-// to-space object, but in from space.
-inline void EncodeForwardingAddressInNewSpace(Heap* heap,
- HeapObject* old_object,
- int object_size,
- Object* new_object,
- int* ignored) {
- int offset =
- heap->new_space()->ToSpaceOffsetForAddress(old_object->address());
- Memory::Address_at(heap->new_space()->FromSpaceLow() + offset) =
- HeapObject::cast(new_object)->address();
-}
-
-
-// The forwarding address is encoded in the map pointer of the object as an
-// offset (in terms of live bytes) from the address of the first live object
-// in the page.
-inline void EncodeForwardingAddressInPagedSpace(Heap* heap,
- HeapObject* old_object,
- int object_size,
- Object* new_object,
- int* offset) {
- // Record the forwarding address of the first live object if necessary.
- if (*offset == 0) {
- Page::FromAddress(old_object->address())->mc_first_forwarded =
- HeapObject::cast(new_object)->address();
- }
-
- MapWord encoding =
- MapWord::EncodeAddress(old_object->map()->address(), *offset);
- old_object->set_map_word(encoding);
- *offset += object_size;
- ASSERT(*offset <= Page::kObjectAreaSize);
-}
-
-
-// Most non-live objects are ignored.
-inline void IgnoreNonLiveObject(HeapObject* object, Isolate* isolate) {}
-
-
-// Function template that, given a range of addresses (eg, a semispace or a
-// paged space page), iterates through the objects in the range to clear
-// mark bits and compute and encode forwarding addresses. As a side effect,
-// maximal free chunks are marked so that they can be skipped on subsequent
-// sweeps.
-//
-// The template parameters are an allocation function, a forwarding address
-// encoding function, and a function to process non-live objects.
-template<MarkCompactCollector::AllocationFunction Alloc,
- MarkCompactCollector::EncodingFunction Encode,
- MarkCompactCollector::ProcessNonLiveFunction ProcessNonLive>
-inline void EncodeForwardingAddressesInRange(MarkCompactCollector* collector,
- Address start,
- Address end,
- int* offset) {
- // The start address of the current free region while sweeping the space.
- // This address is set when a transition from live to non-live objects is
- // encountered. A value (an encoding of the 'next free region' pointer)
- // is written to memory at this address when a transition from non-live to
- // live objects is encountered.
- Address free_start = NULL;
-
- // A flag giving the state of the previously swept object. Initially true
- // to ensure that free_start is initialized to a proper address before
- // trying to write to it.
- bool is_prev_alive = true;
-
- int object_size; // Will be set on each iteration of the loop.
- for (Address current = start; current < end; current += object_size) {
- HeapObject* object = HeapObject::FromAddress(current);
- if (object->IsMarked()) {
- object->ClearMark();
- collector->tracer()->decrement_marked_count();
- object_size = object->Size();
-
- Object* forwarded =
- Alloc(collector->heap(), object, object_size)->ToObjectUnchecked();
- Encode(collector->heap(), object, object_size, forwarded, offset);
-
-#ifdef DEBUG
- if (FLAG_gc_verbose) {
- PrintF("forward %p -> %p.\n", object->address(),
- HeapObject::cast(forwarded)->address());
- }
-#endif
- if (!is_prev_alive) { // Transition from non-live to live.
- EncodeFreeRegion(free_start, static_cast<int>(current - free_start));
- is_prev_alive = true;
- }
- } else { // Non-live object.
- object_size = object->Size();
- ProcessNonLive(object, collector->heap()->isolate());
- if (is_prev_alive) { // Transition from live to non-live.
- free_start = current;
- is_prev_alive = false;
- }
- LiveObjectList::ProcessNonLive(object);
- }
- }
-
- // If we ended on a free region, mark it.
- if (!is_prev_alive) {
- EncodeFreeRegion(free_start, static_cast<int>(end - free_start));
- }
-}
-
-
-// Functions to encode the forwarding pointers in each compactable space.
-void MarkCompactCollector::EncodeForwardingAddressesInNewSpace() {
- int ignored;
- EncodeForwardingAddressesInRange<MCAllocateFromNewSpace,
- EncodeForwardingAddressInNewSpace,
- IgnoreNonLiveObject>(
- this,
- heap()->new_space()->bottom(),
- heap()->new_space()->top(),
- &ignored);
-}
-
-
-template<MarkCompactCollector::AllocationFunction Alloc,
- MarkCompactCollector::ProcessNonLiveFunction ProcessNonLive>
-void MarkCompactCollector::EncodeForwardingAddressesInPagedSpace(
- PagedSpace* space) {
- PageIterator it(space, PageIterator::PAGES_IN_USE);
- while (it.has_next()) {
- Page* p = it.next();
-
- // The offset of each live object in the page from the first live object
- // in the page.
- int offset = 0;
- EncodeForwardingAddressesInRange<Alloc,
- EncodeForwardingAddressInPagedSpace,
- ProcessNonLive>(
- this,
- p->ObjectAreaStart(),
- p->AllocationTop(),
- &offset);
- }
-}
-
-
-// We scavange new space simultaneously with sweeping. This is done in two
-// passes.
-// The first pass migrates all alive objects from one semispace to another or
-// promotes them to old space. Forwading address is written directly into
-// first word of object without any encoding. If object is dead we are writing
-// NULL as a forwarding address.
-// The second pass updates pointers to new space in all spaces. It is possible
-// to encounter pointers to dead objects during traversal of dirty regions we
-// should clear them to avoid encountering them during next dirty regions
-// iteration.
-static void MigrateObject(Heap* heap,
- Address dst,
- Address src,
- int size,
- bool to_old_space) {
- if (to_old_space) {
- heap->CopyBlockToOldSpaceAndUpdateRegionMarks(dst, src, size);
- } else {
- heap->CopyBlock(dst, src, size);
- }
-
- Memory::Address_at(src) = dst;
-}
-
-
-class StaticPointersToNewGenUpdatingVisitor : public
- StaticNewSpaceVisitor<StaticPointersToNewGenUpdatingVisitor> {
- public:
- static inline void VisitPointer(Heap* heap, Object** p) {
- if (!(*p)->IsHeapObject()) return;
-
- HeapObject* obj = HeapObject::cast(*p);
- Address old_addr = obj->address();
-
- if (heap->new_space()->Contains(obj)) {
- ASSERT(heap->InFromSpace(*p));
- *p = HeapObject::FromAddress(Memory::Address_at(old_addr));
- }
- }
-};
-
-
-// Visitor for updating pointers from live objects in old spaces to new space.
-// It does not expect to encounter pointers to dead objects.
-class PointersToNewGenUpdatingVisitor: public ObjectVisitor {
- public:
- explicit PointersToNewGenUpdatingVisitor(Heap* heap) : heap_(heap) { }
-
- void VisitPointer(Object** p) {
- StaticPointersToNewGenUpdatingVisitor::VisitPointer(heap_, p);
- }
-
- void VisitPointers(Object** start, Object** end) {
- for (Object** p = start; p < end; p++) {
- StaticPointersToNewGenUpdatingVisitor::VisitPointer(heap_, p);
- }
- }
-
- void VisitCodeTarget(RelocInfo* rinfo) {
- ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
- Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
- VisitPointer(&target);
- rinfo->set_target_address(Code::cast(target)->instruction_start());
- }
-
- void VisitDebugTarget(RelocInfo* rinfo) {
- ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
- rinfo->IsPatchedReturnSequence()) ||
- (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
- rinfo->IsPatchedDebugBreakSlotSequence()));
- Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
- VisitPointer(&target);
- rinfo->set_call_address(Code::cast(target)->instruction_start());
- }
- private:
- Heap* heap_;
-};
-
-
-// Visitor for updating pointers from live objects in old spaces to new space.
-// It can encounter pointers to dead objects in new space when traversing map
-// space (see comment for MigrateObject).
-static void UpdatePointerToNewGen(HeapObject** p) {
- if (!(*p)->IsHeapObject()) return;
-
- Address old_addr = (*p)->address();
- ASSERT(HEAP->InFromSpace(*p));
-
- Address new_addr = Memory::Address_at(old_addr);
-
- if (new_addr == NULL) {
- // We encountered pointer to a dead object. Clear it so we will
- // not visit it again during next iteration of dirty regions.
- *p = NULL;
- } else {
- *p = HeapObject::FromAddress(new_addr);
- }
-}
-
-
-static String* UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
- Object** p) {
- Address old_addr = HeapObject::cast(*p)->address();
- Address new_addr = Memory::Address_at(old_addr);
- return String::cast(HeapObject::FromAddress(new_addr));
-}
-
-
-static bool TryPromoteObject(Heap* heap, HeapObject* object, int object_size) {
- Object* result;
-
- if (object_size > heap->MaxObjectSizeInPagedSpace()) {
- MaybeObject* maybe_result =
- heap->lo_space()->AllocateRawFixedArray(object_size);
- if (maybe_result->ToObject(&result)) {
- HeapObject* target = HeapObject::cast(result);
- MigrateObject(heap, target->address(), object->address(), object_size,
- true);
- heap->mark_compact_collector()->tracer()->
- increment_promoted_objects_size(object_size);
- return true;
- }
- } else {
- OldSpace* target_space = heap->TargetSpace(object);
-
- ASSERT(target_space == heap->old_pointer_space() ||
- target_space == heap->old_data_space());
- MaybeObject* maybe_result = target_space->AllocateRaw(object_size);
- if (maybe_result->ToObject(&result)) {
- HeapObject* target = HeapObject::cast(result);
- MigrateObject(heap,
- target->address(),
- object->address(),
- object_size,
- target_space == heap->old_pointer_space());
- heap->mark_compact_collector()->tracer()->
- increment_promoted_objects_size(object_size);
- return true;
- }
- }
-
- return false;
-}
-
-
-static void SweepNewSpace(Heap* heap, NewSpace* space) {
- heap->CheckNewSpaceExpansionCriteria();
-
- Address from_bottom = space->bottom();
- Address from_top = space->top();
-
- // Flip the semispaces. After flipping, to space is empty, from space has
- // live objects.
- space->Flip();
- space->ResetAllocationInfo();
-
- int size = 0;
- int survivors_size = 0;
-
- // First pass: traverse all objects in inactive semispace, remove marks,
- // migrate live objects and write forwarding addresses.
- for (Address current = from_bottom; current < from_top; current += size) {
- HeapObject* object = HeapObject::FromAddress(current);
-
- if (object->IsMarked()) {
- object->ClearMark();
- heap->mark_compact_collector()->tracer()->decrement_marked_count();
-
- size = object->Size();
- survivors_size += size;
-
- // Aggressively promote young survivors to the old space.
- if (TryPromoteObject(heap, object, size)) {
- continue;
- }
-
- // Promotion failed. Just migrate object to another semispace.
- // Allocation cannot fail at this point: semispaces are of equal size.
- Object* target = space->AllocateRaw(size)->ToObjectUnchecked();
-
- MigrateObject(heap,
- HeapObject::cast(target)->address(),
- current,
- size,
- false);
- } else {
- // Process the dead object before we write a NULL into its header.
- LiveObjectList::ProcessNonLive(object);
-
- size = object->Size();
- Memory::Address_at(current) = NULL;
- }
- }
-
- // Second pass: find pointers to new space and update them.
- PointersToNewGenUpdatingVisitor updating_visitor(heap);
-
- // Update pointers in to space.
- Address current = space->bottom();
- while (current < space->top()) {
- HeapObject* object = HeapObject::FromAddress(current);
- current +=
- StaticPointersToNewGenUpdatingVisitor::IterateBody(object->map(),
- object);
- }
-
- // Update roots.
- heap->IterateRoots(&updating_visitor, VISIT_ALL_IN_SCAVENGE);
- LiveObjectList::IterateElements(&updating_visitor);
-
- // Update pointers in old spaces.
- heap->IterateDirtyRegions(heap->old_pointer_space(),
- &Heap::IteratePointersInDirtyRegion,
- &UpdatePointerToNewGen,
- heap->WATERMARK_SHOULD_BE_VALID);
-
- heap->lo_space()->IterateDirtyRegions(&UpdatePointerToNewGen);
-
- // Update pointers from cells.
- HeapObjectIterator cell_iterator(heap->cell_space());
- for (HeapObject* cell = cell_iterator.next();
- cell != NULL;
- cell = cell_iterator.next()) {
- if (cell->IsJSGlobalPropertyCell()) {
- Address value_address =
- reinterpret_cast<Address>(cell) +
- (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag);
- updating_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
- }
- }
-
- // Update pointer from the global contexts list.
- updating_visitor.VisitPointer(heap->global_contexts_list_address());
-
- // Update pointers from external string table.
- heap->UpdateNewSpaceReferencesInExternalStringTable(
- &UpdateNewSpaceReferenceInExternalStringTableEntry);
-
- // All pointers were updated. Update auxiliary allocation info.
- heap->IncrementYoungSurvivorsCounter(survivors_size);
- space->set_age_mark(space->top());
-
- // Update JSFunction pointers from the runtime profiler.
- heap->isolate()->runtime_profiler()->UpdateSamplesAfterScavenge();
-}
-
-
-static void SweepSpace(Heap* heap, PagedSpace* space) {
- PageIterator it(space, PageIterator::PAGES_IN_USE);
-
- // During sweeping of paged space we are trying to find longest sequences
- // of pages without live objects and free them (instead of putting them on
- // the free list).
-
- // Page preceding current.
- Page* prev = Page::FromAddress(NULL);
-
- // First empty page in a sequence.
- Page* first_empty_page = Page::FromAddress(NULL);
-
- // Page preceding first empty page.
- Page* prec_first_empty_page = Page::FromAddress(NULL);
-
- // If last used page of space ends with a sequence of dead objects
- // we can adjust allocation top instead of puting this free area into
- // the free list. Thus during sweeping we keep track of such areas
- // and defer their deallocation until the sweeping of the next page
- // is done: if one of the next pages contains live objects we have
- // to put such area into the free list.
- Address last_free_start = NULL;
- int last_free_size = 0;
-
- while (it.has_next()) {
- Page* p = it.next();
-
- bool is_previous_alive = true;
- Address free_start = NULL;
- HeapObject* object;
-
- for (Address current = p->ObjectAreaStart();
- current < p->AllocationTop();
- current += object->Size()) {
- object = HeapObject::FromAddress(current);
- if (object->IsMarked()) {
- object->ClearMark();
- heap->mark_compact_collector()->tracer()->decrement_marked_count();
-
- if (!is_previous_alive) { // Transition from free to live.
- space->DeallocateBlock(free_start,
- static_cast<int>(current - free_start),
- true);
- is_previous_alive = true;
- }
- } else {
- heap->mark_compact_collector()->ReportDeleteIfNeeded(
- object, heap->isolate());
- if (is_previous_alive) { // Transition from live to free.
- free_start = current;
- is_previous_alive = false;
- }
- LiveObjectList::ProcessNonLive(object);
- }
- // The object is now unmarked for the call to Size() at the top of the
- // loop.
- }
-
- bool page_is_empty = (p->ObjectAreaStart() == p->AllocationTop())
- || (!is_previous_alive && free_start == p->ObjectAreaStart());
-
- if (page_is_empty) {
- // This page is empty. Check whether we are in the middle of
- // sequence of empty pages and start one if not.
- if (!first_empty_page->is_valid()) {
- first_empty_page = p;
- prec_first_empty_page = prev;
- }
-
- if (!is_previous_alive) {
- // There are dead objects on this page. Update space accounting stats
- // without putting anything into free list.
- int size_in_bytes = static_cast<int>(p->AllocationTop() - free_start);
- if (size_in_bytes > 0) {
- space->DeallocateBlock(free_start, size_in_bytes, false);
- }
- }
- } else {
- // This page is not empty. Sequence of empty pages ended on the previous
- // one.
- if (first_empty_page->is_valid()) {
- space->FreePages(prec_first_empty_page, prev);
- prec_first_empty_page = first_empty_page = Page::FromAddress(NULL);
- }
-
- // If there is a free ending area on one of the previous pages we have
- // deallocate that area and put it on the free list.
- if (last_free_size > 0) {
- Page::FromAddress(last_free_start)->
- SetAllocationWatermark(last_free_start);
- space->DeallocateBlock(last_free_start, last_free_size, true);
- last_free_start = NULL;
- last_free_size = 0;
- }
-
- // If the last region of this page was not live we remember it.
- if (!is_previous_alive) {
- ASSERT(last_free_size == 0);
- last_free_size = static_cast<int>(p->AllocationTop() - free_start);
- last_free_start = free_start;
- }
- }
-
- prev = p;
- }
-
- // We reached end of space. See if we need to adjust allocation top.
- Address new_allocation_top = NULL;
-
- if (first_empty_page->is_valid()) {
- // Last used pages in space are empty. We can move allocation top backwards
- // to the beginning of first empty page.
- ASSERT(prev == space->AllocationTopPage());
-
- new_allocation_top = first_empty_page->ObjectAreaStart();
- }
-
- if (last_free_size > 0) {
- // There was a free ending area on the previous page.
- // Deallocate it without putting it into freelist and move allocation
- // top to the beginning of this free area.
- space->DeallocateBlock(last_free_start, last_free_size, false);
- new_allocation_top = last_free_start;
- }
-
- if (new_allocation_top != NULL) {
-#ifdef DEBUG
- Page* new_allocation_top_page = Page::FromAllocationTop(new_allocation_top);
- if (!first_empty_page->is_valid()) {
- ASSERT(new_allocation_top_page == space->AllocationTopPage());
- } else if (last_free_size > 0) {
- ASSERT(new_allocation_top_page == prec_first_empty_page);
- } else {
- ASSERT(new_allocation_top_page == first_empty_page);
- }
-#endif
-
- space->SetTop(new_allocation_top);
- }
-}
-
-
-void MarkCompactCollector::EncodeForwardingAddresses() {
- ASSERT(state_ == ENCODE_FORWARDING_ADDRESSES);
- // Objects in the active semispace of the young generation may be
- // relocated to the inactive semispace (if not promoted). Set the
- // relocation info to the beginning of the inactive semispace.
- heap()->new_space()->MCResetRelocationInfo();
-
- // Compute the forwarding pointers in each space.
- EncodeForwardingAddressesInPagedSpace<MCAllocateFromOldPointerSpace,
- ReportDeleteIfNeeded>(
- heap()->old_pointer_space());
-
- EncodeForwardingAddressesInPagedSpace<MCAllocateFromOldDataSpace,
- IgnoreNonLiveObject>(
- heap()->old_data_space());
-
- EncodeForwardingAddressesInPagedSpace<MCAllocateFromCodeSpace,
- ReportDeleteIfNeeded>(
- heap()->code_space());
-
- EncodeForwardingAddressesInPagedSpace<MCAllocateFromCellSpace,
- IgnoreNonLiveObject>(
- heap()->cell_space());
-
-
- // Compute new space next to last after the old and code spaces have been
- // compacted. Objects in new space can be promoted to old or code space.
- EncodeForwardingAddressesInNewSpace();
-
- // Compute map space last because computing forwarding addresses
- // overwrites non-live objects. Objects in the other spaces rely on
- // non-live map pointers to get the sizes of non-live objects.
- EncodeForwardingAddressesInPagedSpace<MCAllocateFromMapSpace,
- IgnoreNonLiveObject>(
- heap()->map_space());
-
- // Write relocation info to the top page, so we can use it later. This is
- // done after promoting objects from the new space so we get the correct
- // allocation top.
- heap()->old_pointer_space()->MCWriteRelocationInfoToPage();
- heap()->old_data_space()->MCWriteRelocationInfoToPage();
- heap()->code_space()->MCWriteRelocationInfoToPage();
- heap()->map_space()->MCWriteRelocationInfoToPage();
- heap()->cell_space()->MCWriteRelocationInfoToPage();
-}
-
-
-class MapIterator : public HeapObjectIterator {
- public:
- explicit MapIterator(Heap* heap)
- : HeapObjectIterator(heap->map_space(), &SizeCallback) { }
-
- MapIterator(Heap* heap, Address start)
- : HeapObjectIterator(heap->map_space(), start, &SizeCallback) { }
-
- private:
- static int SizeCallback(HeapObject* unused) {
- USE(unused);
- return Map::kSize;
- }
-};
-
-
-class MapCompact {
- public:
- explicit MapCompact(Heap* heap, int live_maps)
- : heap_(heap),
- live_maps_(live_maps),
- to_evacuate_start_(heap->map_space()->TopAfterCompaction(live_maps)),
- vacant_map_it_(heap),
- map_to_evacuate_it_(heap, to_evacuate_start_),
- first_map_to_evacuate_(
- reinterpret_cast<Map*>(HeapObject::FromAddress(to_evacuate_start_))) {
- }
-
- void CompactMaps() {
- // As we know the number of maps to evacuate beforehand,
- // we stop then there is no more vacant maps.
- for (Map* next_vacant_map = NextVacantMap();
- next_vacant_map;
- next_vacant_map = NextVacantMap()) {
- EvacuateMap(next_vacant_map, NextMapToEvacuate());
- }
-
-#ifdef DEBUG
- CheckNoMapsToEvacuate();
-#endif
- }
-
- void UpdateMapPointersInRoots() {
- MapUpdatingVisitor map_updating_visitor;
- heap()->IterateRoots(&map_updating_visitor, VISIT_ONLY_STRONG);
- heap()->isolate()->global_handles()->IterateWeakRoots(
- &map_updating_visitor);
- LiveObjectList::IterateElements(&map_updating_visitor);
- }
-
- void UpdateMapPointersInPagedSpace(PagedSpace* space) {
- ASSERT(space != heap()->map_space());
-
- PageIterator it(space, PageIterator::PAGES_IN_USE);
- while (it.has_next()) {
- Page* p = it.next();
- UpdateMapPointersInRange(heap(),
- p->ObjectAreaStart(),
- p->AllocationTop());
- }
- }
-
- void UpdateMapPointersInNewSpace() {
- NewSpace* space = heap()->new_space();
- UpdateMapPointersInRange(heap(), space->bottom(), space->top());
- }
-
- void UpdateMapPointersInLargeObjectSpace() {
- LargeObjectIterator it(heap()->lo_space());
- for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
- UpdateMapPointersInObject(heap(), obj);
- }
-
- void Finish() {
- heap()->map_space()->FinishCompaction(to_evacuate_start_, live_maps_);
- }
-
- inline Heap* heap() const { return heap_; }
-
- private:
- Heap* heap_;
- int live_maps_;
- Address to_evacuate_start_;
- MapIterator vacant_map_it_;
- MapIterator map_to_evacuate_it_;
- Map* first_map_to_evacuate_;
-
- // Helper class for updating map pointers in HeapObjects.
- class MapUpdatingVisitor: public ObjectVisitor {
- public:
- MapUpdatingVisitor() {}
-
- void VisitPointer(Object** p) {
- UpdateMapPointer(p);
- }
-
- void VisitPointers(Object** start, Object** end) {
- for (Object** p = start; p < end; p++) UpdateMapPointer(p);
- }
-
- private:
- void UpdateMapPointer(Object** p) {
- if (!(*p)->IsHeapObject()) return;
- HeapObject* old_map = reinterpret_cast<HeapObject*>(*p);
-
- // Moved maps are tagged with overflowed map word. They are the only
- // objects those map word is overflowed as marking is already complete.
- MapWord map_word = old_map->map_word();
- if (!map_word.IsOverflowed()) return;
-
- *p = GetForwardedMap(map_word);
- }
- };
-
- static Map* NextMap(MapIterator* it, HeapObject* last, bool live) {
- while (true) {
- HeapObject* next = it->next();
- ASSERT(next != NULL);
- if (next == last)
- return NULL;
- ASSERT(!next->IsOverflowed());
- ASSERT(!next->IsMarked());
- ASSERT(next->IsMap() || FreeListNode::IsFreeListNode(next));
- if (next->IsMap() == live)
- return reinterpret_cast<Map*>(next);
- }
- }
-
- Map* NextVacantMap() {
- Map* map = NextMap(&vacant_map_it_, first_map_to_evacuate_, false);
- ASSERT(map == NULL || FreeListNode::IsFreeListNode(map));
- return map;
- }
-
- Map* NextMapToEvacuate() {
- Map* map = NextMap(&map_to_evacuate_it_, NULL, true);
- ASSERT(map != NULL);
- ASSERT(map->IsMap());
- return map;
- }
-
- static void EvacuateMap(Map* vacant_map, Map* map_to_evacuate) {
- ASSERT(FreeListNode::IsFreeListNode(vacant_map));
- ASSERT(map_to_evacuate->IsMap());
-
- ASSERT(Map::kSize % 4 == 0);
-
- map_to_evacuate->heap()->CopyBlockToOldSpaceAndUpdateRegionMarks(
- vacant_map->address(), map_to_evacuate->address(), Map::kSize);
-
- ASSERT(vacant_map->IsMap()); // Due to memcpy above.
-
- MapWord forwarding_map_word = MapWord::FromMap(vacant_map);
- forwarding_map_word.SetOverflow();
- map_to_evacuate->set_map_word(forwarding_map_word);
-
- ASSERT(map_to_evacuate->map_word().IsOverflowed());
- ASSERT(GetForwardedMap(map_to_evacuate->map_word()) == vacant_map);
- }
-
- static Map* GetForwardedMap(MapWord map_word) {
- ASSERT(map_word.IsOverflowed());
- map_word.ClearOverflow();
- Map* new_map = map_word.ToMap();
- ASSERT_MAP_ALIGNED(new_map->address());
- return new_map;
- }
-
- static int UpdateMapPointersInObject(Heap* heap, HeapObject* obj) {
- ASSERT(!obj->IsMarked());
- Map* map = obj->map();
- ASSERT(heap->map_space()->Contains(map));
- MapWord map_word = map->map_word();
- ASSERT(!map_word.IsMarked());
- if (map_word.IsOverflowed()) {
- Map* new_map = GetForwardedMap(map_word);
- ASSERT(heap->map_space()->Contains(new_map));
- obj->set_map(new_map);
-
-#ifdef DEBUG
- if (FLAG_gc_verbose) {
- PrintF("update %p : %p -> %p\n",
- obj->address(),
- reinterpret_cast<void*>(map),
- reinterpret_cast<void*>(new_map));
- }
-#endif
- }
-
- int size = obj->SizeFromMap(map);
- MapUpdatingVisitor map_updating_visitor;
- obj->IterateBody(map->instance_type(), size, &map_updating_visitor);
- return size;
- }
-
- static void UpdateMapPointersInRange(Heap* heap, Address start, Address end) {
- HeapObject* object;
- int size;
- for (Address current = start; current < end; current += size) {
- object = HeapObject::FromAddress(current);
- size = UpdateMapPointersInObject(heap, object);
- ASSERT(size > 0);
- }
- }
-
-#ifdef DEBUG
- void CheckNoMapsToEvacuate() {
- if (!FLAG_enable_slow_asserts)
- return;
-
- for (HeapObject* obj = map_to_evacuate_it_.next();
- obj != NULL; obj = map_to_evacuate_it_.next())
- ASSERT(FreeListNode::IsFreeListNode(obj));
- }
-#endif
-};
-
-
-void MarkCompactCollector::SweepSpaces() {
- GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP);
-
- ASSERT(state_ == SWEEP_SPACES);
- ASSERT(!IsCompacting());
- // Noncompacting collections simply sweep the spaces to clear the mark
- // bits and free the nonlive blocks (for old and map spaces). We sweep
- // the map space last because freeing non-live maps overwrites them and
- // the other spaces rely on possibly non-live maps to get the sizes for
- // non-live objects.
- SweepSpace(heap(), heap()->old_pointer_space());
- SweepSpace(heap(), heap()->old_data_space());
- SweepSpace(heap(), heap()->code_space());
- SweepSpace(heap(), heap()->cell_space());
- { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE);
- SweepNewSpace(heap(), heap()->new_space());
- }
- SweepSpace(heap(), heap()->map_space());
-
- heap()->IterateDirtyRegions(heap()->map_space(),
- &heap()->IteratePointersInDirtyMapsRegion,
- &UpdatePointerToNewGen,
- heap()->WATERMARK_SHOULD_BE_VALID);
-
- intptr_t live_maps_size = heap()->map_space()->Size();
- int live_maps = static_cast<int>(live_maps_size / Map::kSize);
- ASSERT(live_map_objects_size_ == live_maps_size);
-
- if (heap()->map_space()->NeedsCompaction(live_maps)) {
- MapCompact map_compact(heap(), live_maps);
-
- map_compact.CompactMaps();
- map_compact.UpdateMapPointersInRoots();
-
- PagedSpaces spaces;
- for (PagedSpace* space = spaces.next();
- space != NULL; space = spaces.next()) {
- if (space == heap()->map_space()) continue;
- map_compact.UpdateMapPointersInPagedSpace(space);
- }
- map_compact.UpdateMapPointersInNewSpace();
- map_compact.UpdateMapPointersInLargeObjectSpace();
-
- map_compact.Finish();
- }
-}
-
-
-// Iterate the live objects in a range of addresses (eg, a page or a
-// semispace). The live regions of the range have been linked into a list.
-// The first live region is [first_live_start, first_live_end), and the last
-// address in the range is top. The callback function is used to get the
-// size of each live object.
-int MarkCompactCollector::IterateLiveObjectsInRange(
- Address start,
- Address end,
- LiveObjectCallback size_func) {
- int live_objects_size = 0;
- Address current = start;
- while (current < end) {
- uint32_t encoded_map = Memory::uint32_at(current);
- if (encoded_map == kSingleFreeEncoding) {
- current += kPointerSize;
- } else if (encoded_map == kMultiFreeEncoding) {
- current += Memory::int_at(current + kIntSize);
- } else {
- int size = (this->*size_func)(HeapObject::FromAddress(current));
- current += size;
- live_objects_size += size;
- }
- }
- return live_objects_size;
-}
-
-
-int MarkCompactCollector::IterateLiveObjects(
- NewSpace* space, LiveObjectCallback size_f) {
- ASSERT(MARK_LIVE_OBJECTS < state_ && state_ <= RELOCATE_OBJECTS);
- return IterateLiveObjectsInRange(space->bottom(), space->top(), size_f);
-}
-
-
-int MarkCompactCollector::IterateLiveObjects(
- PagedSpace* space, LiveObjectCallback size_f) {
- ASSERT(MARK_LIVE_OBJECTS < state_ && state_ <= RELOCATE_OBJECTS);
- int total = 0;
- PageIterator it(space, PageIterator::PAGES_IN_USE);
- while (it.has_next()) {
- Page* p = it.next();
- total += IterateLiveObjectsInRange(p->ObjectAreaStart(),
- p->AllocationTop(),
- size_f);
- }
- return total;
-}
-
-
-// -------------------------------------------------------------------------
-// Phase 3: Update pointers
-
-// Helper class for updating pointers in HeapObjects.
-class UpdatingVisitor: public ObjectVisitor {
- public:
- explicit UpdatingVisitor(Heap* heap) : heap_(heap) {}
-
- void VisitPointer(Object** p) {
- UpdatePointer(p);
- }
-
- void VisitPointers(Object** start, Object** end) {
- // Mark all HeapObject pointers in [start, end)
- for (Object** p = start; p < end; p++) UpdatePointer(p);
- }
-
- void VisitCodeTarget(RelocInfo* rinfo) {
- ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
- Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
- VisitPointer(&target);
- rinfo->set_target_address(
- reinterpret_cast<Code*>(target)->instruction_start());
- }
-
- void VisitDebugTarget(RelocInfo* rinfo) {
- ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
- rinfo->IsPatchedReturnSequence()) ||
- (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
- rinfo->IsPatchedDebugBreakSlotSequence()));
- Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
- VisitPointer(&target);
- rinfo->set_call_address(
- reinterpret_cast<Code*>(target)->instruction_start());
- }
-
- inline Heap* heap() const { return heap_; }
-
- private:
- void UpdatePointer(Object** p) {
- if (!(*p)->IsHeapObject()) return;
-
- HeapObject* obj = HeapObject::cast(*p);
- Address old_addr = obj->address();
- Address new_addr;
- ASSERT(!heap()->InFromSpace(obj));
-
- if (heap()->new_space()->Contains(obj)) {
- Address forwarding_pointer_addr =
- heap()->new_space()->FromSpaceLow() +
- heap()->new_space()->ToSpaceOffsetForAddress(old_addr);
- new_addr = Memory::Address_at(forwarding_pointer_addr);
-
-#ifdef DEBUG
- ASSERT(heap()->old_pointer_space()->Contains(new_addr) ||
- heap()->old_data_space()->Contains(new_addr) ||
- heap()->new_space()->FromSpaceContains(new_addr) ||
- heap()->lo_space()->Contains(HeapObject::FromAddress(new_addr)));
-
- if (heap()->new_space()->FromSpaceContains(new_addr)) {
- ASSERT(heap()->new_space()->FromSpaceOffsetForAddress(new_addr) <=
- heap()->new_space()->ToSpaceOffsetForAddress(old_addr));
- }
-#endif
-
- } else if (heap()->lo_space()->Contains(obj)) {
- // Don't move objects in the large object space.
- return;
-
- } else {
-#ifdef DEBUG
- PagedSpaces spaces;
- PagedSpace* original_space = spaces.next();
- while (original_space != NULL) {
- if (original_space->Contains(obj)) break;
- original_space = spaces.next();
- }
- ASSERT(original_space != NULL);
-#endif
- new_addr = MarkCompactCollector::GetForwardingAddressInOldSpace(obj);
- ASSERT(original_space->Contains(new_addr));
- ASSERT(original_space->MCSpaceOffsetForAddress(new_addr) <=
- original_space->MCSpaceOffsetForAddress(old_addr));
- }
-
- *p = HeapObject::FromAddress(new_addr);
-
-#ifdef DEBUG
- if (FLAG_gc_verbose) {
- PrintF("update %p : %p -> %p\n",
- reinterpret_cast<Address>(p), old_addr, new_addr);
- }
-#endif
- }
-
- Heap* heap_;
-};
-
-
-void MarkCompactCollector::UpdatePointers() {
-#ifdef DEBUG
- ASSERT(state_ == ENCODE_FORWARDING_ADDRESSES);
- state_ = UPDATE_POINTERS;
-#endif
- UpdatingVisitor updating_visitor(heap());
- heap()->isolate()->runtime_profiler()->UpdateSamplesAfterCompact(
- &updating_visitor);
- heap()->IterateRoots(&updating_visitor, VISIT_ONLY_STRONG);
- heap()->isolate()->global_handles()->IterateWeakRoots(&updating_visitor);
-
- // Update the pointer to the head of the weak list of global contexts.
- updating_visitor.VisitPointer(&heap()->global_contexts_list_);
-
- LiveObjectList::IterateElements(&updating_visitor);
-
- int live_maps_size = IterateLiveObjects(
- heap()->map_space(), &MarkCompactCollector::UpdatePointersInOldObject);
- int live_pointer_olds_size = IterateLiveObjects(
- heap()->old_pointer_space(),
- &MarkCompactCollector::UpdatePointersInOldObject);
- int live_data_olds_size = IterateLiveObjects(
- heap()->old_data_space(),
- &MarkCompactCollector::UpdatePointersInOldObject);
- int live_codes_size = IterateLiveObjects(
- heap()->code_space(), &MarkCompactCollector::UpdatePointersInOldObject);
- int live_cells_size = IterateLiveObjects(
- heap()->cell_space(), &MarkCompactCollector::UpdatePointersInOldObject);
- int live_news_size = IterateLiveObjects(
- heap()->new_space(), &MarkCompactCollector::UpdatePointersInNewObject);
-
- // Large objects do not move, the map word can be updated directly.
- LargeObjectIterator it(heap()->lo_space());
- for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) {
- UpdatePointersInNewObject(obj);
- }
-
- USE(live_maps_size);
- USE(live_pointer_olds_size);
- USE(live_data_olds_size);
- USE(live_codes_size);
- USE(live_cells_size);
- USE(live_news_size);
- ASSERT(live_maps_size == live_map_objects_size_);
- ASSERT(live_data_olds_size == live_old_data_objects_size_);
- ASSERT(live_pointer_olds_size == live_old_pointer_objects_size_);
- ASSERT(live_codes_size == live_code_objects_size_);
- ASSERT(live_cells_size == live_cell_objects_size_);
- ASSERT(live_news_size == live_young_objects_size_);
-}
-
-
-int MarkCompactCollector::UpdatePointersInNewObject(HeapObject* obj) {
- // Keep old map pointers
- Map* old_map = obj->map();
- ASSERT(old_map->IsHeapObject());
-
- Address forwarded = GetForwardingAddressInOldSpace(old_map);
-
- ASSERT(heap()->map_space()->Contains(old_map));
- ASSERT(heap()->map_space()->Contains(forwarded));
-#ifdef DEBUG
- if (FLAG_gc_verbose) {
- PrintF("update %p : %p -> %p\n", obj->address(), old_map->address(),
- forwarded);
- }
-#endif
- // Update the map pointer.
- obj->set_map(reinterpret_cast<Map*>(HeapObject::FromAddress(forwarded)));
-
- // We have to compute the object size relying on the old map because
- // map objects are not relocated yet.
- int obj_size = obj->SizeFromMap(old_map);
-
- // Update pointers in the object body.
- UpdatingVisitor updating_visitor(heap());
- obj->IterateBody(old_map->instance_type(), obj_size, &updating_visitor);
- return obj_size;
-}
-
-
-int MarkCompactCollector::UpdatePointersInOldObject(HeapObject* obj) {
- // Decode the map pointer.
- MapWord encoding = obj->map_word();
- Address map_addr = encoding.DecodeMapAddress(heap()->map_space());
- ASSERT(heap()->map_space()->Contains(HeapObject::FromAddress(map_addr)));
-
- // At this point, the first word of map_addr is also encoded, cannot
- // cast it to Map* using Map::cast.
- Map* map = reinterpret_cast<Map*>(HeapObject::FromAddress(map_addr));
- int obj_size = obj->SizeFromMap(map);
- InstanceType type = map->instance_type();
-
- // Update map pointer.
- Address new_map_addr = GetForwardingAddressInOldSpace(map);
- int offset = encoding.DecodeOffset();
- obj->set_map_word(MapWord::EncodeAddress(new_map_addr, offset));
-
-#ifdef DEBUG
- if (FLAG_gc_verbose) {
- PrintF("update %p : %p -> %p\n", obj->address(),
- map_addr, new_map_addr);
- }
-#endif
-
- // Update pointers in the object body.
- UpdatingVisitor updating_visitor(heap());
- obj->IterateBody(type, obj_size, &updating_visitor);
- return obj_size;
-}
-
-
-Address MarkCompactCollector::GetForwardingAddressInOldSpace(HeapObject* obj) {
- // Object should either in old or map space.
- MapWord encoding = obj->map_word();
-
- // Offset to the first live object's forwarding address.
- int offset = encoding.DecodeOffset();
- Address obj_addr = obj->address();
-
- // Find the first live object's forwarding address.
- Page* p = Page::FromAddress(obj_addr);
- Address first_forwarded = p->mc_first_forwarded;
-
- // Page start address of forwarded address.
- Page* forwarded_page = Page::FromAddress(first_forwarded);
- int forwarded_offset = forwarded_page->Offset(first_forwarded);
-
- // Find end of allocation in the page of first_forwarded.
- int mc_top_offset = forwarded_page->AllocationWatermarkOffset();
-
- // Check if current object's forward pointer is in the same page
- // as the first live object's forwarding pointer
- if (forwarded_offset + offset < mc_top_offset) {
- // In the same page.
- return first_forwarded + offset;
- }
-
- // Must be in the next page, NOTE: this may cross chunks.
- Page* next_page = forwarded_page->next_page();
- ASSERT(next_page->is_valid());
-
- offset -= (mc_top_offset - forwarded_offset);
- offset += Page::kObjectStartOffset;
-
- ASSERT_PAGE_OFFSET(offset);
- ASSERT(next_page->OffsetToAddress(offset) < next_page->AllocationTop());
-
- return next_page->OffsetToAddress(offset);
-}
-
-
-// -------------------------------------------------------------------------
-// Phase 4: Relocate objects
-
-void MarkCompactCollector::RelocateObjects() {
-#ifdef DEBUG
- ASSERT(state_ == UPDATE_POINTERS);
- state_ = RELOCATE_OBJECTS;
-#endif
- // Relocates objects, always relocate map objects first. Relocating
- // objects in other space relies on map objects to get object size.
- int live_maps_size = IterateLiveObjects(
- heap()->map_space(), &MarkCompactCollector::RelocateMapObject);
- int live_pointer_olds_size = IterateLiveObjects(
- heap()->old_pointer_space(),
- &MarkCompactCollector::RelocateOldPointerObject);
- int live_data_olds_size = IterateLiveObjects(
- heap()->old_data_space(), &MarkCompactCollector::RelocateOldDataObject);
- int live_codes_size = IterateLiveObjects(
- heap()->code_space(), &MarkCompactCollector::RelocateCodeObject);
- int live_cells_size = IterateLiveObjects(
- heap()->cell_space(), &MarkCompactCollector::RelocateCellObject);
- int live_news_size = IterateLiveObjects(
- heap()->new_space(), &MarkCompactCollector::RelocateNewObject);
-
- USE(live_maps_size);
- USE(live_pointer_olds_size);
- USE(live_data_olds_size);
- USE(live_codes_size);
- USE(live_cells_size);
- USE(live_news_size);
- ASSERT(live_maps_size == live_map_objects_size_);
- ASSERT(live_data_olds_size == live_old_data_objects_size_);
- ASSERT(live_pointer_olds_size == live_old_pointer_objects_size_);
- ASSERT(live_codes_size == live_code_objects_size_);
- ASSERT(live_cells_size == live_cell_objects_size_);
- ASSERT(live_news_size == live_young_objects_size_);
-
- // Flip from and to spaces
- heap()->new_space()->Flip();
-
- heap()->new_space()->MCCommitRelocationInfo();
-
- // Set age_mark to bottom in to space
- Address mark = heap()->new_space()->bottom();
- heap()->new_space()->set_age_mark(mark);
-
- PagedSpaces spaces;
- for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next())
- space->MCCommitRelocationInfo();
-
- heap()->CheckNewSpaceExpansionCriteria();
- heap()->IncrementYoungSurvivorsCounter(live_news_size);
-}
-
-
-int MarkCompactCollector::RelocateMapObject(HeapObject* obj) {
- // Recover map pointer.
- MapWord encoding = obj->map_word();
- Address map_addr = encoding.DecodeMapAddress(heap()->map_space());
- ASSERT(heap()->map_space()->Contains(HeapObject::FromAddress(map_addr)));
-
- // Get forwarding address before resetting map pointer
- Address new_addr = GetForwardingAddressInOldSpace(obj);
-
- // Reset map pointer. The meta map object may not be copied yet so
- // Map::cast does not yet work.
- obj->set_map(reinterpret_cast<Map*>(HeapObject::FromAddress(map_addr)));
-
- Address old_addr = obj->address();
-
- if (new_addr != old_addr) {
- // Move contents.
- heap()->MoveBlockToOldSpaceAndUpdateRegionMarks(new_addr,
- old_addr,
- Map::kSize);
- }
-
-#ifdef DEBUG
- if (FLAG_gc_verbose) {
- PrintF("relocate %p -> %p\n", old_addr, new_addr);
- }
-#endif
-
- return Map::kSize;
-}
-
-
-static inline int RestoreMap(HeapObject* obj,
- PagedSpace* space,
- Address new_addr,
- Address map_addr) {
- // This must be a non-map object, and the function relies on the
- // assumption that the Map space is compacted before the other paged
- // spaces (see RelocateObjects).
-
- // Reset map pointer.
- obj->set_map(Map::cast(HeapObject::FromAddress(map_addr)));
-
- int obj_size = obj->Size();
- ASSERT_OBJECT_SIZE(obj_size);
-
- ASSERT(space->MCSpaceOffsetForAddress(new_addr) <=
- space->MCSpaceOffsetForAddress(obj->address()));
-
-#ifdef DEBUG
- if (FLAG_gc_verbose) {
- PrintF("relocate %p -> %p\n", obj->address(), new_addr);
- }
-#endif
-
- return obj_size;
-}
-
-
-int MarkCompactCollector::RelocateOldNonCodeObject(HeapObject* obj,
- PagedSpace* space) {
- // Recover map pointer.
- MapWord encoding = obj->map_word();
- Address map_addr = encoding.DecodeMapAddress(heap()->map_space());
- ASSERT(heap()->map_space()->Contains(map_addr));
-
- // Get forwarding address before resetting map pointer.
- Address new_addr = GetForwardingAddressInOldSpace(obj);
-
- // Reset the map pointer.
- int obj_size = RestoreMap(obj, space, new_addr, map_addr);
-
- Address old_addr = obj->address();
-
- if (new_addr != old_addr) {
- // Move contents.
- if (space == heap()->old_data_space()) {
- heap()->MoveBlock(new_addr, old_addr, obj_size);
- } else {
- heap()->MoveBlockToOldSpaceAndUpdateRegionMarks(new_addr,
- old_addr,
- obj_size);
- }
- }
-
- ASSERT(!HeapObject::FromAddress(new_addr)->IsCode());
-
- HeapObject* copied_to = HeapObject::FromAddress(new_addr);
- if (copied_to->IsSharedFunctionInfo()) {
- PROFILE(heap()->isolate(),
- SharedFunctionInfoMoveEvent(old_addr, new_addr));
- }
- HEAP_PROFILE(heap(), ObjectMoveEvent(old_addr, new_addr));
-
- return obj_size;
-}
-
-
-int MarkCompactCollector::RelocateOldPointerObject(HeapObject* obj) {
- return RelocateOldNonCodeObject(obj, heap()->old_pointer_space());
-}
-
-
-int MarkCompactCollector::RelocateOldDataObject(HeapObject* obj) {
- return RelocateOldNonCodeObject(obj, heap()->old_data_space());
-}
-
-
-int MarkCompactCollector::RelocateCellObject(HeapObject* obj) {
- return RelocateOldNonCodeObject(obj, heap()->cell_space());
-}
-
-
-int MarkCompactCollector::RelocateCodeObject(HeapObject* obj) {
- // Recover map pointer.
- MapWord encoding = obj->map_word();
- Address map_addr = encoding.DecodeMapAddress(heap()->map_space());
- ASSERT(heap()->map_space()->Contains(HeapObject::FromAddress(map_addr)));
-
- // Get forwarding address before resetting map pointer
- Address new_addr = GetForwardingAddressInOldSpace(obj);
-
- // Reset the map pointer.
- int obj_size = RestoreMap(obj, heap()->code_space(), new_addr, map_addr);
-
- Address old_addr = obj->address();
-
- if (new_addr != old_addr) {
- // Move contents.
- heap()->MoveBlock(new_addr, old_addr, obj_size);
- }
-
- HeapObject* copied_to = HeapObject::FromAddress(new_addr);
- if (copied_to->IsCode()) {
- // May also update inline cache target.
- Code::cast(copied_to)->Relocate(new_addr - old_addr);
- // Notify the logger that compiled code has moved.
- PROFILE(heap()->isolate(), CodeMoveEvent(old_addr, new_addr));
- }
- HEAP_PROFILE(heap(), ObjectMoveEvent(old_addr, new_addr));
-
- return obj_size;
-}
-
-
-int MarkCompactCollector::RelocateNewObject(HeapObject* obj) {
- int obj_size = obj->Size();
-
- // Get forwarding address
- Address old_addr = obj->address();
- int offset = heap()->new_space()->ToSpaceOffsetForAddress(old_addr);
-
- Address new_addr =
- Memory::Address_at(heap()->new_space()->FromSpaceLow() + offset);
-
-#ifdef DEBUG
- if (heap()->new_space()->FromSpaceContains(new_addr)) {
- ASSERT(heap()->new_space()->FromSpaceOffsetForAddress(new_addr) <=
- heap()->new_space()->ToSpaceOffsetForAddress(old_addr));
- } else {
- ASSERT(heap()->TargetSpace(obj) == heap()->old_pointer_space() ||
- heap()->TargetSpace(obj) == heap()->old_data_space());
- }
-#endif
-
- // New and old addresses cannot overlap.
- if (heap()->InNewSpace(HeapObject::FromAddress(new_addr))) {
- heap()->CopyBlock(new_addr, old_addr, obj_size);
- } else {
- heap()->CopyBlockToOldSpaceAndUpdateRegionMarks(new_addr,
- old_addr,
- obj_size);
- }
-
-#ifdef DEBUG
- if (FLAG_gc_verbose) {
- PrintF("relocate %p -> %p\n", old_addr, new_addr);
- }
-#endif
-
- HeapObject* copied_to = HeapObject::FromAddress(new_addr);
- if (copied_to->IsSharedFunctionInfo()) {
- PROFILE(heap()->isolate(),
- SharedFunctionInfoMoveEvent(old_addr, new_addr));
- }
- HEAP_PROFILE(heap(), ObjectMoveEvent(old_addr, new_addr));
-
- return obj_size;
-}
-
-
-void MarkCompactCollector::EnableCodeFlushing(bool enable) {
- if (enable) {
- if (code_flusher_ != NULL) return;
- code_flusher_ = new CodeFlusher(heap()->isolate());
- } else {
- if (code_flusher_ == NULL) return;
- delete code_flusher_;
- code_flusher_ = NULL;
- }
-}
-
-
-void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj,
- Isolate* isolate) {
-#ifdef ENABLE_GDB_JIT_INTERFACE
- if (obj->IsCode()) {
- GDBJITInterface::RemoveCode(reinterpret_cast<Code*>(obj));
- }
-#endif
-#ifdef ENABLE_LOGGING_AND_PROFILING
- if (obj->IsCode()) {
- PROFILE(isolate, CodeDeleteEvent(obj->address()));
- }
-#endif
-}
-
-
-int MarkCompactCollector::SizeOfMarkedObject(HeapObject* obj) {
- MapWord map_word = obj->map_word();
- map_word.ClearMark();
- return obj->SizeFromMap(map_word.ToMap());
-}
-
-
-void MarkCompactCollector::Initialize() {
- StaticPointersToNewGenUpdatingVisitor::Initialize();
- StaticMarkingVisitor::Initialize();
-}
-
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/mark-compact.h b/src/3rdparty/v8/src/mark-compact.h
deleted file mode 100644
index 04d0ff6..0000000
--- a/src/3rdparty/v8/src/mark-compact.h
+++ /dev/null
@@ -1,506 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_MARK_COMPACT_H_
-#define V8_MARK_COMPACT_H_
-
-#include "spaces.h"
-
-namespace v8 {
-namespace internal {
-
-// Callback function, returns whether an object is alive. The heap size
-// of the object is returned in size. It optionally updates the offset
-// to the first live object in the page (only used for old and map objects).
-typedef bool (*IsAliveFunction)(HeapObject* obj, int* size, int* offset);
-
-// Forward declarations.
-class CodeFlusher;
-class GCTracer;
-class MarkingVisitor;
-class RootMarkingVisitor;
-
-
-// ----------------------------------------------------------------------------
-// Marking stack for tracing live objects.
-
-class MarkingStack {
- public:
- MarkingStack() : low_(NULL), top_(NULL), high_(NULL), overflowed_(false) { }
-
- void Initialize(Address low, Address high) {
- top_ = low_ = reinterpret_cast<HeapObject**>(low);
- high_ = reinterpret_cast<HeapObject**>(high);
- overflowed_ = false;
- }
-
- bool is_full() const { return top_ >= high_; }
-
- bool is_empty() const { return top_ <= low_; }
-
- bool overflowed() const { return overflowed_; }
-
- void clear_overflowed() { overflowed_ = false; }
-
- // Push the (marked) object on the marking stack if there is room,
- // otherwise mark the object as overflowed and wait for a rescan of the
- // heap.
- void Push(HeapObject* object) {
- CHECK(object->IsHeapObject());
- if (is_full()) {
- object->SetOverflow();
- overflowed_ = true;
- } else {
- *(top_++) = object;
- }
- }
-
- HeapObject* Pop() {
- ASSERT(!is_empty());
- HeapObject* object = *(--top_);
- CHECK(object->IsHeapObject());
- return object;
- }
-
- private:
- HeapObject** low_;
- HeapObject** top_;
- HeapObject** high_;
- bool overflowed_;
-
- DISALLOW_COPY_AND_ASSIGN(MarkingStack);
-};
-
-
-// -------------------------------------------------------------------------
-// Mark-Compact collector
-
-class OverflowedObjectsScanner;
-
-class MarkCompactCollector {
- public:
- // Type of functions to compute forwarding addresses of objects in
- // compacted spaces. Given an object and its size, return a (non-failure)
- // Object* that will be the object after forwarding. There is a separate
- // allocation function for each (compactable) space based on the location
- // of the object before compaction.
- typedef MaybeObject* (*AllocationFunction)(Heap* heap,
- HeapObject* object,
- int object_size);
-
- // Type of functions to encode the forwarding address for an object.
- // Given the object, its size, and the new (non-failure) object it will be
- // forwarded to, encode the forwarding address. For paged spaces, the
- // 'offset' input/output parameter contains the offset of the forwarded
- // object from the forwarding address of the previous live object in the
- // page as input, and is updated to contain the offset to be used for the
- // next live object in the same page. For spaces using a different
- // encoding (ie, contiguous spaces), the offset parameter is ignored.
- typedef void (*EncodingFunction)(Heap* heap,
- HeapObject* old_object,
- int object_size,
- Object* new_object,
- int* offset);
-
- // Type of functions to process non-live objects.
- typedef void (*ProcessNonLiveFunction)(HeapObject* object, Isolate* isolate);
-
- // Pointer to member function, used in IterateLiveObjects.
- typedef int (MarkCompactCollector::*LiveObjectCallback)(HeapObject* obj);
-
- // Set the global force_compaction flag, it must be called before Prepare
- // to take effect.
- void SetForceCompaction(bool value) {
- force_compaction_ = value;
- }
-
-
- static void Initialize();
-
- // Prepares for GC by resetting relocation info in old and map spaces and
- // choosing spaces to compact.
- void Prepare(GCTracer* tracer);
-
- // Performs a global garbage collection.
- void CollectGarbage();
-
- // True if the last full GC performed heap compaction.
- bool HasCompacted() { return compacting_collection_; }
-
- // True after the Prepare phase if the compaction is taking place.
- bool IsCompacting() {
-#ifdef DEBUG
- // For the purposes of asserts we don't want this to keep returning true
- // after the collection is completed.
- return state_ != IDLE && compacting_collection_;
-#else
- return compacting_collection_;
-#endif
- }
-
- // The count of the number of objects left marked at the end of the last
- // completed full GC (expected to be zero).
- int previous_marked_count() { return previous_marked_count_; }
-
- // During a full GC, there is a stack-allocated GCTracer that is used for
- // bookkeeping information. Return a pointer to that tracer.
- GCTracer* tracer() { return tracer_; }
-
-#ifdef DEBUG
- // Checks whether performing mark-compact collection.
- bool in_use() { return state_ > PREPARE_GC; }
- bool are_map_pointers_encoded() { return state_ == UPDATE_POINTERS; }
-#endif
-
- // Determine type of object and emit deletion log event.
- static void ReportDeleteIfNeeded(HeapObject* obj, Isolate* isolate);
-
- // Returns size of a possibly marked object.
- static int SizeOfMarkedObject(HeapObject* obj);
-
- // Distinguishable invalid map encodings (for single word and multiple words)
- // that indicate free regions.
- static const uint32_t kSingleFreeEncoding = 0;
- static const uint32_t kMultiFreeEncoding = 1;
-
- inline Heap* heap() const { return heap_; }
-
- CodeFlusher* code_flusher() { return code_flusher_; }
- inline bool is_code_flushing_enabled() const { return code_flusher_ != NULL; }
- void EnableCodeFlushing(bool enable);
-
- private:
- MarkCompactCollector();
- ~MarkCompactCollector();
-
-#ifdef DEBUG
- enum CollectorState {
- IDLE,
- PREPARE_GC,
- MARK_LIVE_OBJECTS,
- SWEEP_SPACES,
- ENCODE_FORWARDING_ADDRESSES,
- UPDATE_POINTERS,
- RELOCATE_OBJECTS
- };
-
- // The current stage of the collector.
- CollectorState state_;
-#endif
-
- // Global flag that forces a compaction.
- bool force_compaction_;
-
- // Global flag indicating whether spaces were compacted on the last GC.
- bool compacting_collection_;
-
- // Global flag indicating whether spaces will be compacted on the next GC.
- bool compact_on_next_gc_;
-
- // The number of objects left marked at the end of the last completed full
- // GC (expected to be zero).
- int previous_marked_count_;
-
- // A pointer to the current stack-allocated GC tracer object during a full
- // collection (NULL before and after).
- GCTracer* tracer_;
-
- // Finishes GC, performs heap verification if enabled.
- void Finish();
-
- // -----------------------------------------------------------------------
- // Phase 1: Marking live objects.
- //
- // Before: The heap has been prepared for garbage collection by
- // MarkCompactCollector::Prepare() and is otherwise in its
- // normal state.
- //
- // After: Live objects are marked and non-live objects are unmarked.
-
-
- friend class RootMarkingVisitor;
- friend class MarkingVisitor;
- friend class StaticMarkingVisitor;
- friend class CodeMarkingVisitor;
- friend class SharedFunctionInfoMarkingVisitor;
-
- void PrepareForCodeFlushing();
-
- // Marking operations for objects reachable from roots.
- void MarkLiveObjects();
-
- void MarkUnmarkedObject(HeapObject* obj);
-
- inline void MarkObject(HeapObject* obj) {
- if (!obj->IsMarked()) MarkUnmarkedObject(obj);
- }
-
- inline void SetMark(HeapObject* obj);
-
- // Creates back pointers for all map transitions, stores them in
- // the prototype field. The original prototype pointers are restored
- // in ClearNonLiveTransitions(). All JSObject maps
- // connected by map transitions have the same prototype object, which
- // is why we can use this field temporarily for back pointers.
- void CreateBackPointers();
-
- // Mark a Map and its DescriptorArray together, skipping transitions.
- void MarkMapContents(Map* map);
- void MarkDescriptorArray(DescriptorArray* descriptors);
-
- // Mark the heap roots and all objects reachable from them.
- void MarkRoots(RootMarkingVisitor* visitor);
-
- // Mark the symbol table specially. References to symbols from the
- // symbol table are weak.
- void MarkSymbolTable();
-
- // Mark objects in object groups that have at least one object in the
- // group marked.
- void MarkObjectGroups();
-
- // Mark objects in implicit references groups if their parent object
- // is marked.
- void MarkImplicitRefGroups();
-
- // Mark all objects which are reachable due to host application
- // logic like object groups or implicit references' groups.
- void ProcessExternalMarking();
-
- // Mark objects reachable (transitively) from objects in the marking stack
- // or overflowed in the heap.
- void ProcessMarkingStack();
-
- // Mark objects reachable (transitively) from objects in the marking
- // stack. This function empties the marking stack, but may leave
- // overflowed objects in the heap, in which case the marking stack's
- // overflow flag will be set.
- void EmptyMarkingStack();
-
- // Refill the marking stack with overflowed objects from the heap. This
- // function either leaves the marking stack full or clears the overflow
- // flag on the marking stack.
- void RefillMarkingStack();
-
- // Callback function for telling whether the object *p is an unmarked
- // heap object.
- static bool IsUnmarkedHeapObject(Object** p);
-
-#ifdef DEBUG
- void UpdateLiveObjectCount(HeapObject* obj);
-#endif
-
- // We sweep the large object space in the same way whether we are
- // compacting or not, because the large object space is never compacted.
- void SweepLargeObjectSpace();
-
- // Test whether a (possibly marked) object is a Map.
- static inline bool SafeIsMap(HeapObject* object);
-
- // Map transitions from a live map to a dead map must be killed.
- // We replace them with a null descriptor, with the same key.
- void ClearNonLiveTransitions();
-
- // -----------------------------------------------------------------------
- // Phase 2: Sweeping to clear mark bits and free non-live objects for
- // a non-compacting collection, or else computing and encoding
- // forwarding addresses for a compacting collection.
- //
- // Before: Live objects are marked and non-live objects are unmarked.
- //
- // After: (Non-compacting collection.) Live objects are unmarked,
- // non-live regions have been added to their space's free
- // list.
- //
- // After: (Compacting collection.) The forwarding address of live
- // objects in the paged spaces is encoded in their map word
- // along with their (non-forwarded) map pointer.
- //
- // The forwarding address of live objects in the new space is
- // written to their map word's offset in the inactive
- // semispace.
- //
- // Bookkeeping data is written to the page header of
- // eached paged-space page that contains live objects after
- // compaction:
- //
- // The allocation watermark field is used to track the
- // relocation top address, the address of the first word
- // after the end of the last live object in the page after
- // compaction.
- //
- // The Page::mc_page_index field contains the zero-based index of the
- // page in its space. This word is only used for map space pages, in
- // order to encode the map addresses in 21 bits to free 11
- // bits per map word for the forwarding address.
- //
- // The Page::mc_first_forwarded field contains the (nonencoded)
- // forwarding address of the first live object in the page.
- //
- // In both the new space and the paged spaces, a linked list
- // of live regions is constructructed (linked through
- // pointers in the non-live region immediately following each
- // live region) to speed further passes of the collector.
-
- // Encodes forwarding addresses of objects in compactable parts of the
- // heap.
- void EncodeForwardingAddresses();
-
- // Encodes the forwarding addresses of objects in new space.
- void EncodeForwardingAddressesInNewSpace();
-
- // Function template to encode the forwarding addresses of objects in
- // paged spaces, parameterized by allocation and non-live processing
- // functions.
- template<AllocationFunction Alloc, ProcessNonLiveFunction ProcessNonLive>
- void EncodeForwardingAddressesInPagedSpace(PagedSpace* space);
-
- // Iterates live objects in a space, passes live objects
- // to a callback function which returns the heap size of the object.
- // Returns the number of live objects iterated.
- int IterateLiveObjects(NewSpace* space, LiveObjectCallback size_f);
- int IterateLiveObjects(PagedSpace* space, LiveObjectCallback size_f);
-
- // Iterates the live objects between a range of addresses, returning the
- // number of live objects.
- int IterateLiveObjectsInRange(Address start, Address end,
- LiveObjectCallback size_func);
-
- // If we are not compacting the heap, we simply sweep the spaces except
- // for the large object space, clearing mark bits and adding unmarked
- // regions to each space's free list.
- void SweepSpaces();
-
- // -----------------------------------------------------------------------
- // Phase 3: Updating pointers in live objects.
- //
- // Before: Same as after phase 2 (compacting collection).
- //
- // After: All pointers in live objects, including encoded map
- // pointers, are updated to point to their target's new
- // location.
-
- friend class UpdatingVisitor; // helper for updating visited objects
-
- // Updates pointers in all spaces.
- void UpdatePointers();
-
- // Updates pointers in an object in new space.
- // Returns the heap size of the object.
- int UpdatePointersInNewObject(HeapObject* obj);
-
- // Updates pointers in an object in old spaces.
- // Returns the heap size of the object.
- int UpdatePointersInOldObject(HeapObject* obj);
-
- // Calculates the forwarding address of an object in an old space.
- static Address GetForwardingAddressInOldSpace(HeapObject* obj);
-
- // -----------------------------------------------------------------------
- // Phase 4: Relocating objects.
- //
- // Before: Pointers to live objects are updated to point to their
- // target's new location.
- //
- // After: Objects have been moved to their new addresses.
-
- // Relocates objects in all spaces.
- void RelocateObjects();
-
- // Converts a code object's inline target to addresses, convention from
- // address to target happens in the marking phase.
- int ConvertCodeICTargetToAddress(HeapObject* obj);
-
- // Relocate a map object.
- int RelocateMapObject(HeapObject* obj);
-
- // Relocates an old object.
- int RelocateOldPointerObject(HeapObject* obj);
- int RelocateOldDataObject(HeapObject* obj);
-
- // Relocate a property cell object.
- int RelocateCellObject(HeapObject* obj);
-
- // Helper function.
- inline int RelocateOldNonCodeObject(HeapObject* obj,
- PagedSpace* space);
-
- // Relocates an object in the code space.
- int RelocateCodeObject(HeapObject* obj);
-
- // Copy a new object.
- int RelocateNewObject(HeapObject* obj);
-
-#ifdef DEBUG
- // -----------------------------------------------------------------------
- // Debugging variables, functions and classes
- // Counters used for debugging the marking phase of mark-compact or
- // mark-sweep collection.
-
- // Size of live objects in Heap::to_space_.
- int live_young_objects_size_;
-
- // Size of live objects in Heap::old_pointer_space_.
- int live_old_pointer_objects_size_;
-
- // Size of live objects in Heap::old_data_space_.
- int live_old_data_objects_size_;
-
- // Size of live objects in Heap::code_space_.
- int live_code_objects_size_;
-
- // Size of live objects in Heap::map_space_.
- int live_map_objects_size_;
-
- // Size of live objects in Heap::cell_space_.
- int live_cell_objects_size_;
-
- // Size of live objects in Heap::lo_space_.
- int live_lo_objects_size_;
-
- // Number of live bytes in this collection.
- int live_bytes_;
-
- friend class MarkObjectVisitor;
- static void VisitObject(HeapObject* obj);
-
- friend class UnmarkObjectVisitor;
- static void UnmarkObject(HeapObject* obj);
-#endif
-
- Heap* heap_;
- MarkingStack marking_stack_;
- CodeFlusher* code_flusher_;
-
- friend class Heap;
- friend class OverflowedObjectsScanner;
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_MARK_COMPACT_H_
diff --git a/src/3rdparty/v8/src/math.js b/src/3rdparty/v8/src/math.js
deleted file mode 100644
index 70b8c57..0000000
--- a/src/3rdparty/v8/src/math.js
+++ /dev/null
@@ -1,264 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-// Keep reference to original values of some global properties. This
-// has the added benefit that the code in this file is isolated from
-// changes to these properties.
-const $floor = MathFloor;
-const $random = MathRandom;
-const $abs = MathAbs;
-
-// Instance class name can only be set on functions. That is the only
-// purpose for MathConstructor.
-function MathConstructor() {}
-%FunctionSetInstanceClassName(MathConstructor, 'Math');
-const $Math = new MathConstructor();
-$Math.__proto__ = global.Object.prototype;
-%SetProperty(global, "Math", $Math, DONT_ENUM);
-
-// ECMA 262 - 15.8.2.1
-function MathAbs(x) {
- if (%_IsSmi(x)) return x >= 0 ? x : -x;
- if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
- if (x === 0) return 0; // To handle -0.
- return x > 0 ? x : -x;
-}
-
-// ECMA 262 - 15.8.2.2
-function MathAcos(x) {
- if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
- return %Math_acos(x);
-}
-
-// ECMA 262 - 15.8.2.3
-function MathAsin(x) {
- if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
- return %Math_asin(x);
-}
-
-// ECMA 262 - 15.8.2.4
-function MathAtan(x) {
- if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
- return %Math_atan(x);
-}
-
-// ECMA 262 - 15.8.2.5
-// The naming of y and x matches the spec, as does the order in which
-// ToNumber (valueOf) is called.
-function MathAtan2(y, x) {
- if (!IS_NUMBER(y)) y = NonNumberToNumber(y);
- if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
- return %Math_atan2(y, x);
-}
-
-// ECMA 262 - 15.8.2.6
-function MathCeil(x) {
- if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
- return %Math_ceil(x);
-}
-
-// ECMA 262 - 15.8.2.7
-function MathCos(x) {
- if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
- return %_MathCos(x);
-}
-
-// ECMA 262 - 15.8.2.8
-function MathExp(x) {
- if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
- return %Math_exp(x);
-}
-
-// ECMA 262 - 15.8.2.9
-function MathFloor(x) {
- if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
- // It's more common to call this with a positive number that's out
- // of range than negative numbers; check the upper bound first.
- if (x < 0x80000000 && x > 0) {
- // Numbers in the range [0, 2^31) can be floored by converting
- // them to an unsigned 32-bit value using the shift operator.
- // We avoid doing so for -0, because the result of Math.floor(-0)
- // has to be -0, which wouldn't be the case with the shift.
- return TO_UINT32(x);
- } else {
- return %Math_floor(x);
- }
-}
-
-// ECMA 262 - 15.8.2.10
-function MathLog(x) {
- if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
- return %_MathLog(x);
-}
-
-// ECMA 262 - 15.8.2.11
-function MathMax(arg1, arg2) { // length == 2
- var length = %_ArgumentsLength();
- if (length == 0) {
- return -1/0; // Compiler constant-folds this to -Infinity.
- }
- var r = arg1;
- if (!IS_NUMBER(r)) r = NonNumberToNumber(r);
- if (NUMBER_IS_NAN(r)) return r;
- for (var i = 1; i < length; i++) {
- var n = %_Arguments(i);
- if (!IS_NUMBER(n)) n = NonNumberToNumber(n);
- if (NUMBER_IS_NAN(n)) return n;
- // Make sure +0 is considered greater than -0. -0 is never a Smi, +0 can be
- // a Smi or heap number.
- if (n > r || (r === 0 && n === 0 && !%_IsSmi(r) && 1 / r < 0)) r = n;
- }
- return r;
-}
-
-// ECMA 262 - 15.8.2.12
-function MathMin(arg1, arg2) { // length == 2
- var length = %_ArgumentsLength();
- if (length == 0) {
- return 1/0; // Compiler constant-folds this to Infinity.
- }
- var r = arg1;
- if (!IS_NUMBER(r)) r = NonNumberToNumber(r);
- if (NUMBER_IS_NAN(r)) return r;
- for (var i = 1; i < length; i++) {
- var n = %_Arguments(i);
- if (!IS_NUMBER(n)) n = NonNumberToNumber(n);
- if (NUMBER_IS_NAN(n)) return n;
- // Make sure -0 is considered less than +0. -0 is never a Smi, +0 can b a
- // Smi or a heap number.
- if (n < r || (r === 0 && n === 0 && !%_IsSmi(n) && 1 / n < 0)) r = n;
- }
- return r;
-}
-
-// ECMA 262 - 15.8.2.13
-function MathPow(x, y) {
- if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
- if (!IS_NUMBER(y)) y = NonNumberToNumber(y);
- return %_MathPow(x, y);
-}
-
-// ECMA 262 - 15.8.2.14
-function MathRandom() {
- return %_RandomHeapNumber();
-}
-
-// ECMA 262 - 15.8.2.15
-function MathRound(x) {
- if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
- return %RoundNumber(x);
-}
-
-// ECMA 262 - 15.8.2.16
-function MathSin(x) {
- if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
- return %_MathSin(x);
-}
-
-// ECMA 262 - 15.8.2.17
-function MathSqrt(x) {
- if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
- return %_MathSqrt(x);
-}
-
-// ECMA 262 - 15.8.2.18
-function MathTan(x) {
- if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
- return %Math_tan(x);
-}
-
-
-// -------------------------------------------------------------------
-
-function SetupMath() {
- // Setup math constants.
- // ECMA-262, section 15.8.1.1.
- %OptimizeObjectForAddingMultipleProperties($Math, 8);
- %SetProperty($Math,
- "E",
- 2.7182818284590452354,
- DONT_ENUM | DONT_DELETE | READ_ONLY);
- // ECMA-262, section 15.8.1.2.
- %SetProperty($Math,
- "LN10",
- 2.302585092994046,
- DONT_ENUM | DONT_DELETE | READ_ONLY);
- // ECMA-262, section 15.8.1.3.
- %SetProperty($Math,
- "LN2",
- 0.6931471805599453,
- DONT_ENUM | DONT_DELETE | READ_ONLY);
- // ECMA-262, section 15.8.1.4.
- %SetProperty($Math,
- "LOG2E",
- 1.4426950408889634,
- DONT_ENUM | DONT_DELETE | READ_ONLY);
- %SetProperty($Math,
- "LOG10E",
- 0.4342944819032518,
- DONT_ENUM | DONT_DELETE | READ_ONLY);
- %SetProperty($Math,
- "PI",
- 3.1415926535897932,
- DONT_ENUM | DONT_DELETE | READ_ONLY);
- %SetProperty($Math,
- "SQRT1_2",
- 0.7071067811865476,
- DONT_ENUM | DONT_DELETE | READ_ONLY);
- %SetProperty($Math,
- "SQRT2",
- 1.4142135623730951,
- DONT_ENUM | DONT_DELETE | READ_ONLY);
- %ToFastProperties($Math);
-
- // Setup non-enumerable functions of the Math object and
- // set their names.
- InstallFunctionsOnHiddenPrototype($Math, DONT_ENUM, $Array(
- "random", MathRandom,
- "abs", MathAbs,
- "acos", MathAcos,
- "asin", MathAsin,
- "atan", MathAtan,
- "ceil", MathCeil,
- "cos", MathCos,
- "exp", MathExp,
- "floor", MathFloor,
- "log", MathLog,
- "round", MathRound,
- "sin", MathSin,
- "sqrt", MathSqrt,
- "tan", MathTan,
- "atan2", MathAtan2,
- "pow", MathPow,
- "max", MathMax,
- "min", MathMin
- ));
-};
-
-
-SetupMath();
diff --git a/src/3rdparty/v8/src/messages.cc b/src/3rdparty/v8/src/messages.cc
deleted file mode 100644
index cab982c..0000000
--- a/src/3rdparty/v8/src/messages.cc
+++ /dev/null
@@ -1,166 +0,0 @@
-
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "api.h"
-#include "execution.h"
-#include "messages.h"
-#include "spaces-inl.h"
-
-namespace v8 {
-namespace internal {
-
-
-// If no message listeners have been registered this one is called
-// by default.
-void MessageHandler::DefaultMessageReport(const MessageLocation* loc,
- Handle<Object> message_obj) {
- SmartPointer<char> str = GetLocalizedMessage(message_obj);
- if (loc == NULL) {
- PrintF("%s\n", *str);
- } else {
- HandleScope scope;
- Handle<Object> data(loc->script()->name());
- SmartPointer<char> data_str;
- if (data->IsString())
- data_str = Handle<String>::cast(data)->ToCString(DISALLOW_NULLS);
- PrintF("%s:%i: %s\n", *data_str ? *data_str : "<unknown>",
- loc->start_pos(), *str);
- }
-}
-
-
-void MessageHandler::ReportMessage(const char* msg) {
- PrintF("%s\n", msg);
-}
-
-
-Handle<JSMessageObject> MessageHandler::MakeMessageObject(
- const char* type,
- MessageLocation* loc,
- Vector< Handle<Object> > args,
- Handle<String> stack_trace,
- Handle<JSArray> stack_frames) {
- Handle<String> type_handle = FACTORY->LookupAsciiSymbol(type);
- Handle<FixedArray> arguments_elements =
- FACTORY->NewFixedArray(args.length());
- for (int i = 0; i < args.length(); i++) {
- arguments_elements->set(i, *args[i]);
- }
- Handle<JSArray> arguments_handle =
- FACTORY->NewJSArrayWithElements(arguments_elements);
-
- int start = 0;
- int end = 0;
- Handle<Object> script_handle = FACTORY->undefined_value();
- if (loc) {
- start = loc->start_pos();
- end = loc->end_pos();
- script_handle = GetScriptWrapper(loc->script());
- }
-
- Handle<Object> stack_trace_handle = stack_trace.is_null()
- ? FACTORY->undefined_value()
- : Handle<Object>::cast(stack_trace);
-
- Handle<Object> stack_frames_handle = stack_frames.is_null()
- ? FACTORY->undefined_value()
- : Handle<Object>::cast(stack_frames);
-
- Handle<JSMessageObject> message =
- FACTORY->NewJSMessageObject(type_handle,
- arguments_handle,
- start,
- end,
- script_handle,
- stack_trace_handle,
- stack_frames_handle);
-
- return message;
-}
-
-
-void MessageHandler::ReportMessage(MessageLocation* loc,
- Handle<Object> message) {
- v8::Local<v8::Message> api_message_obj = v8::Utils::MessageToLocal(message);
-
- v8::NeanderArray global_listeners(FACTORY->message_listeners());
- int global_length = global_listeners.length();
- if (global_length == 0) {
- DefaultMessageReport(loc, message);
- } else {
- for (int i = 0; i < global_length; i++) {
- HandleScope scope;
- if (global_listeners.get(i)->IsUndefined()) continue;
- v8::NeanderObject listener(JSObject::cast(global_listeners.get(i)));
- Handle<Proxy> callback_obj(Proxy::cast(listener.get(0)));
- v8::MessageCallback callback =
- FUNCTION_CAST<v8::MessageCallback>(callback_obj->proxy());
- Handle<Object> callback_data(listener.get(1));
- callback(api_message_obj, v8::Utils::ToLocal(callback_data));
- }
- }
-}
-
-
-Handle<String> MessageHandler::GetMessage(Handle<Object> data) {
- Handle<String> fmt_str = FACTORY->LookupAsciiSymbol("FormatMessage");
- Handle<JSFunction> fun =
- Handle<JSFunction>(
- JSFunction::cast(
- Isolate::Current()->js_builtins_object()->
- GetPropertyNoExceptionThrown(*fmt_str)));
- Object** argv[1] = { data.location() };
-
- bool caught_exception;
- Handle<Object> result =
- Execution::TryCall(fun,
- Isolate::Current()->js_builtins_object(), 1, argv, &caught_exception);
-
- if (caught_exception || !result->IsString()) {
- return FACTORY->LookupAsciiSymbol("<error>");
- }
- Handle<String> result_string = Handle<String>::cast(result);
- // A string that has been obtained from JS code in this way is
- // likely to be a complicated ConsString of some sort. We flatten it
- // here to improve the efficiency of converting it to a C string and
- // other operations that are likely to take place (see GetLocalizedMessage
- // for example).
- FlattenString(result_string);
- return result_string;
-}
-
-
-SmartPointer<char> MessageHandler::GetLocalizedMessage(Handle<Object> data) {
- HandleScope scope;
- return GetMessage(data)->ToCString(DISALLOW_NULLS);
-}
-
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/messages.h b/src/3rdparty/v8/src/messages.h
deleted file mode 100644
index 48f3244..0000000
--- a/src/3rdparty/v8/src/messages.h
+++ /dev/null
@@ -1,114 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// The infrastructure used for (localized) message reporting in V8.
-//
-// Note: there's a big unresolved issue about ownership of the data
-// structures used by this framework.
-
-#ifndef V8_MESSAGES_H_
-#define V8_MESSAGES_H_
-
-#include "handles-inl.h"
-
-// Forward declaration of MessageLocation.
-namespace v8 {
-namespace internal {
-class MessageLocation;
-} } // namespace v8::internal
-
-
-class V8Message {
- public:
- V8Message(char* type,
- v8::internal::Handle<v8::internal::JSArray> args,
- const v8::internal::MessageLocation* loc) :
- type_(type), args_(args), loc_(loc) { }
- char* type() const { return type_; }
- v8::internal::Handle<v8::internal::JSArray> args() const { return args_; }
- const v8::internal::MessageLocation* loc() const { return loc_; }
- private:
- char* type_;
- v8::internal::Handle<v8::internal::JSArray> const args_;
- const v8::internal::MessageLocation* loc_;
-};
-
-
-namespace v8 {
-namespace internal {
-
-struct Language;
-class SourceInfo;
-
-class MessageLocation {
- public:
- MessageLocation(Handle<Script> script,
- int start_pos,
- int end_pos)
- : script_(script),
- start_pos_(start_pos),
- end_pos_(end_pos) { }
- MessageLocation() : start_pos_(-1), end_pos_(-1) { }
-
- Handle<Script> script() const { return script_; }
- int start_pos() const { return start_pos_; }
- int end_pos() const { return end_pos_; }
-
- private:
- Handle<Script> script_;
- int start_pos_;
- int end_pos_;
-};
-
-
-// A message handler is a convenience interface for accessing the list
-// of message listeners registered in an environment
-class MessageHandler {
- public:
- // Report a message (w/o JS heap allocation).
- static void ReportMessage(const char* msg);
-
- // Returns a message object for the API to use.
- static Handle<JSMessageObject> MakeMessageObject(
- const char* type,
- MessageLocation* loc,
- Vector< Handle<Object> > args,
- Handle<String> stack_trace,
- Handle<JSArray> stack_frames);
-
- // Report a formatted message (needs JS allocation).
- static void ReportMessage(MessageLocation* loc, Handle<Object> message);
-
- static void DefaultMessageReport(const MessageLocation* loc,
- Handle<Object> message_obj);
- static Handle<String> GetMessage(Handle<Object> data);
- static SmartPointer<char> GetLocalizedMessage(Handle<Object> data);
-};
-
-} } // namespace v8::internal
-
-#endif // V8_MESSAGES_H_
diff --git a/src/3rdparty/v8/src/messages.js b/src/3rdparty/v8/src/messages.js
deleted file mode 100644
index 3eb056f..0000000
--- a/src/3rdparty/v8/src/messages.js
+++ /dev/null
@@ -1,1090 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-// -------------------------------------------------------------------
-//
-// Matches Script::Type from objects.h
-var TYPE_NATIVE = 0;
-var TYPE_EXTENSION = 1;
-var TYPE_NORMAL = 2;
-
-// Matches Script::CompilationType from objects.h
-var COMPILATION_TYPE_HOST = 0;
-var COMPILATION_TYPE_EVAL = 1;
-var COMPILATION_TYPE_JSON = 2;
-
-// Matches Messages::kNoLineNumberInfo from v8.h
-var kNoLineNumberInfo = 0;
-
-// If this object gets passed to an error constructor the error will
-// get an accessor for .message that constructs a descriptive error
-// message on access.
-var kAddMessageAccessorsMarker = { };
-
-var kMessages = 0;
-
-var kReplacementMarkers = [ "%0", "%1", "%2", "%3" ];
-
-function FormatString(format, message) {
- var args = %MessageGetArguments(message);
- var result = "";
- var arg_num = 0;
- for (var i = 0; i < format.length; i++) {
- var str = format[i];
- for (arg_num = 0; arg_num < kReplacementMarkers.length; arg_num++) {
- if (format[i] !== kReplacementMarkers[arg_num]) continue;
- try {
- str = ToDetailString(args[arg_num]);
- } catch (e) {
- str = "#<error>";
- }
- }
- result += str;
- }
- return result;
-}
-
-
-// To check if something is a native error we need to check the
-// concrete native error types. It is not enough to check "obj
-// instanceof $Error" because user code can replace
-// NativeError.prototype.__proto__. User code cannot replace
-// NativeError.prototype though and therefore this is a safe test.
-function IsNativeErrorObject(obj) {
- return (obj instanceof $Error) ||
- (obj instanceof $EvalError) ||
- (obj instanceof $RangeError) ||
- (obj instanceof $ReferenceError) ||
- (obj instanceof $SyntaxError) ||
- (obj instanceof $TypeError) ||
- (obj instanceof $URIError);
-}
-
-
-// When formatting internally created error messages, do not
-// invoke overwritten error toString methods but explicitly use
-// the error to string method. This is to avoid leaking error
-// objects between script tags in a browser setting.
-function ToStringCheckErrorObject(obj) {
- if (IsNativeErrorObject(obj)) {
- return %_CallFunction(obj, errorToString);
- } else {
- return ToString(obj);
- }
-}
-
-
-function ToDetailString(obj) {
- if (obj != null && IS_OBJECT(obj) && obj.toString === $Object.prototype.toString) {
- var constructor = obj.constructor;
- if (!constructor) return ToStringCheckErrorObject(obj);
- var constructorName = constructor.name;
- if (!constructorName || !IS_STRING(constructorName)) {
- return ToStringCheckErrorObject(obj);
- }
- return "#<" + constructorName + ">";
- } else {
- return ToStringCheckErrorObject(obj);
- }
-}
-
-
-function MakeGenericError(constructor, type, args) {
- if (IS_UNDEFINED(args)) {
- args = [];
- }
- var e = new constructor(kAddMessageAccessorsMarker);
- e.type = type;
- e.arguments = args;
- return e;
-}
-
-
-/**
- * Setup the Script function and constructor.
- */
-%FunctionSetInstanceClassName(Script, 'Script');
-%SetProperty(Script.prototype, 'constructor', Script, DONT_ENUM);
-%SetCode(Script, function(x) {
- // Script objects can only be created by the VM.
- throw new $Error("Not supported");
-});
-
-
-// Helper functions; called from the runtime system.
-function FormatMessage(message) {
- if (kMessages === 0) {
- kMessages = {
- // Error
- cyclic_proto: ["Cyclic __proto__ value"],
- // TypeError
- unexpected_token: ["Unexpected token ", "%0"],
- unexpected_token_number: ["Unexpected number"],
- unexpected_token_string: ["Unexpected string"],
- unexpected_token_identifier: ["Unexpected identifier"],
- unexpected_strict_reserved: ["Unexpected strict mode reserved word"],
- unexpected_eos: ["Unexpected end of input"],
- malformed_regexp: ["Invalid regular expression: /", "%0", "/: ", "%1"],
- unterminated_regexp: ["Invalid regular expression: missing /"],
- regexp_flags: ["Cannot supply flags when constructing one RegExp from another"],
- incompatible_method_receiver: ["Method ", "%0", " called on incompatible receiver ", "%1"],
- invalid_lhs_in_assignment: ["Invalid left-hand side in assignment"],
- invalid_lhs_in_for_in: ["Invalid left-hand side in for-in"],
- invalid_lhs_in_postfix_op: ["Invalid left-hand side expression in postfix operation"],
- invalid_lhs_in_prefix_op: ["Invalid left-hand side expression in prefix operation"],
- multiple_defaults_in_switch: ["More than one default clause in switch statement"],
- newline_after_throw: ["Illegal newline after throw"],
- redeclaration: ["%0", " '", "%1", "' has already been declared"],
- no_catch_or_finally: ["Missing catch or finally after try"],
- unknown_label: ["Undefined label '", "%0", "'"],
- uncaught_exception: ["Uncaught ", "%0"],
- stack_trace: ["Stack Trace:\n", "%0"],
- called_non_callable: ["%0", " is not a function"],
- undefined_method: ["Object ", "%1", " has no method '", "%0", "'"],
- property_not_function: ["Property '", "%0", "' of object ", "%1", " is not a function"],
- cannot_convert_to_primitive: ["Cannot convert object to primitive value"],
- not_constructor: ["%0", " is not a constructor"],
- not_defined: ["%0", " is not defined"],
- non_object_property_load: ["Cannot read property '", "%0", "' of ", "%1"],
- non_object_property_store: ["Cannot set property '", "%0", "' of ", "%1"],
- non_object_property_call: ["Cannot call method '", "%0", "' of ", "%1"],
- with_expression: ["%0", " has no properties"],
- illegal_invocation: ["Illegal invocation"],
- no_setter_in_callback: ["Cannot set property ", "%0", " of ", "%1", " which has only a getter"],
- apply_non_function: ["Function.prototype.apply was called on ", "%0", ", which is a ", "%1", " and not a function"],
- apply_wrong_args: ["Function.prototype.apply: Arguments list has wrong type"],
- invalid_in_operator_use: ["Cannot use 'in' operator to search for '", "%0", "' in ", "%1"],
- instanceof_function_expected: ["Expecting a function in instanceof check, but got ", "%0"],
- instanceof_nonobject_proto: ["Function has non-object prototype '", "%0", "' in instanceof check"],
- null_to_object: ["Cannot convert null to object"],
- reduce_no_initial: ["Reduce of empty array with no initial value"],
- getter_must_be_callable: ["Getter must be a function: ", "%0"],
- setter_must_be_callable: ["Setter must be a function: ", "%0"],
- value_and_accessor: ["Invalid property. A property cannot both have accessors and be writable or have a value: ", "%0"],
- proto_object_or_null: ["Object prototype may only be an Object or null"],
- property_desc_object: ["Property description must be an object: ", "%0"],
- redefine_disallowed: ["Cannot redefine property: ", "%0"],
- define_disallowed: ["Cannot define property, object is not extensible: ", "%0"],
- // RangeError
- invalid_array_length: ["Invalid array length"],
- stack_overflow: ["Maximum call stack size exceeded"],
- // SyntaxError
- unable_to_parse: ["Parse error"],
- duplicate_regexp_flag: ["Duplicate RegExp flag ", "%0"],
- invalid_regexp: ["Invalid RegExp pattern /", "%0", "/"],
- illegal_break: ["Illegal break statement"],
- illegal_continue: ["Illegal continue statement"],
- illegal_return: ["Illegal return statement"],
- error_loading_debugger: ["Error loading debugger"],
- no_input_to_regexp: ["No input to ", "%0"],
- invalid_json: ["String '", "%0", "' is not valid JSON"],
- circular_structure: ["Converting circular structure to JSON"],
- obj_ctor_property_non_object: ["Object.", "%0", " called on non-object"],
- array_indexof_not_defined: ["Array.getIndexOf: Argument undefined"],
- object_not_extensible: ["Can't add property ", "%0", ", object is not extensible"],
- illegal_access: ["Illegal access"],
- invalid_preparser_data: ["Invalid preparser data for function ", "%0"],
- strict_mode_with: ["Strict mode code may not include a with statement"],
- strict_catch_variable: ["Catch variable may not be eval or arguments in strict mode"],
- too_many_parameters: ["Too many parameters in function definition"],
- strict_param_name: ["Parameter name eval or arguments is not allowed in strict mode"],
- strict_param_dupe: ["Strict mode function may not have duplicate parameter names"],
- strict_var_name: ["Variable name may not be eval or arguments in strict mode"],
- strict_function_name: ["Function name may not be eval or arguments in strict mode"],
- strict_octal_literal: ["Octal literals are not allowed in strict mode."],
- strict_duplicate_property: ["Duplicate data property in object literal not allowed in strict mode"],
- accessor_data_property: ["Object literal may not have data and accessor property with the same name"],
- accessor_get_set: ["Object literal may not have multiple get/set accessors with the same name"],
- strict_lhs_assignment: ["Assignment to eval or arguments is not allowed in strict mode"],
- strict_lhs_postfix: ["Postfix increment/decrement may not have eval or arguments operand in strict mode"],
- strict_lhs_prefix: ["Prefix increment/decrement may not have eval or arguments operand in strict mode"],
- strict_reserved_word: ["Use of future reserved word in strict mode"],
- strict_delete: ["Delete of an unqualified identifier in strict mode."],
- strict_delete_property: ["Cannot delete property '", "%0", "' of ", "%1"],
- strict_const: ["Use of const in strict mode."],
- strict_function: ["In strict mode code, functions can only be declared at top level or immediately within another function." ],
- strict_read_only_property: ["Cannot assign to read only property '", "%0", "' of ", "%1"],
- strict_cannot_assign: ["Cannot assign to read only '", "%0", "' in strict mode"],
- strict_arguments_callee: ["Cannot access property 'callee' of strict mode arguments"],
- strict_arguments_caller: ["Cannot access property 'caller' of strict mode arguments"],
- strict_function_caller: ["Cannot access property 'caller' of a strict mode function"],
- strict_function_arguments: ["Cannot access property 'arguments' of a strict mode function"],
- strict_caller: ["Illegal access to a strict mode caller function."],
- };
- }
- var message_type = %MessageGetType(message);
- var format = kMessages[message_type];
- if (!format) return "<unknown message " + message_type + ">";
- return FormatString(format, message);
-}
-
-
-function GetLineNumber(message) {
- var start_position = %MessageGetStartPosition(message);
- if (start_position == -1) return kNoLineNumberInfo;
- var script = %MessageGetScript(message);
- var location = script.locationFromPosition(start_position, true);
- if (location == null) return kNoLineNumberInfo;
- return location.line + 1;
-}
-
-
-// Returns the source code line containing the given source
-// position, or the empty string if the position is invalid.
-function GetSourceLine(message) {
- var script = %MessageGetScript(message);
- var start_position = %MessageGetStartPosition(message);
- var location = script.locationFromPosition(start_position, true);
- if (location == null) return "";
- location.restrict();
- return location.sourceText();
-}
-
-
-function MakeTypeError(type, args) {
- return MakeGenericError($TypeError, type, args);
-}
-
-
-function MakeRangeError(type, args) {
- return MakeGenericError($RangeError, type, args);
-}
-
-
-function MakeSyntaxError(type, args) {
- return MakeGenericError($SyntaxError, type, args);
-}
-
-
-function MakeReferenceError(type, args) {
- return MakeGenericError($ReferenceError, type, args);
-}
-
-
-function MakeEvalError(type, args) {
- return MakeGenericError($EvalError, type, args);
-}
-
-
-function MakeError(type, args) {
- return MakeGenericError($Error, type, args);
-}
-
-/**
- * Find a line number given a specific source position.
- * @param {number} position The source position.
- * @return {number} 0 if input too small, -1 if input too large,
- else the line number.
- */
-Script.prototype.lineFromPosition = function(position) {
- var lower = 0;
- var upper = this.lineCount() - 1;
- var line_ends = this.line_ends;
-
- // We'll never find invalid positions so bail right away.
- if (position > line_ends[upper]) {
- return -1;
- }
-
- // This means we don't have to safe-guard indexing line_ends[i - 1].
- if (position <= line_ends[0]) {
- return 0;
- }
-
- // Binary search to find line # from position range.
- while (upper >= 1) {
- var i = (lower + upper) >> 1;
-
- if (position > line_ends[i]) {
- lower = i + 1;
- } else if (position <= line_ends[i - 1]) {
- upper = i - 1;
- } else {
- return i;
- }
- }
-
- return -1;
-}
-
-/**
- * Get information on a specific source position.
- * @param {number} position The source position
- * @param {boolean} include_resource_offset Set to true to have the resource
- * offset added to the location
- * @return {SourceLocation}
- * If line is negative or not in the source null is returned.
- */
-Script.prototype.locationFromPosition = function (position,
- include_resource_offset) {
- var line = this.lineFromPosition(position);
- if (line == -1) return null;
-
- // Determine start, end and column.
- var line_ends = this.line_ends;
- var start = line == 0 ? 0 : line_ends[line - 1] + 1;
- var end = line_ends[line];
- if (end > 0 && %_CallFunction(this.source, end - 1, StringCharAt) == '\r') end--;
- var column = position - start;
-
- // Adjust according to the offset within the resource.
- if (include_resource_offset) {
- line += this.line_offset;
- if (line == this.line_offset) {
- column += this.column_offset;
- }
- }
-
- return new SourceLocation(this, position, line, column, start, end);
-};
-
-
-/**
- * Get information on a specific source line and column possibly offset by a
- * fixed source position. This function is used to find a source position from
- * a line and column position. The fixed source position offset is typically
- * used to find a source position in a function based on a line and column in
- * the source for the function alone. The offset passed will then be the
- * start position of the source for the function within the full script source.
- * @param {number} opt_line The line within the source. Default value is 0
- * @param {number} opt_column The column in within the line. Default value is 0
- * @param {number} opt_offset_position The offset from the begining of the
- * source from where the line and column calculation starts. Default value is 0
- * @return {SourceLocation}
- * If line is negative or not in the source null is returned.
- */
-Script.prototype.locationFromLine = function (opt_line, opt_column, opt_offset_position) {
- // Default is the first line in the script. Lines in the script is relative
- // to the offset within the resource.
- var line = 0;
- if (!IS_UNDEFINED(opt_line)) {
- line = opt_line - this.line_offset;
- }
-
- // Default is first column. If on the first line add the offset within the
- // resource.
- var column = opt_column || 0;
- if (line == 0) {
- column -= this.column_offset
- }
-
- var offset_position = opt_offset_position || 0;
- if (line < 0 || column < 0 || offset_position < 0) return null;
- if (line == 0) {
- return this.locationFromPosition(offset_position + column, false);
- } else {
- // Find the line where the offset position is located.
- var offset_line = this.lineFromPosition(offset_position);
-
- if (offset_line == -1 || offset_line + line >= this.lineCount()) {
- return null;
- }
-
- return this.locationFromPosition(this.line_ends[offset_line + line - 1] + 1 + column); // line > 0 here.
- }
-}
-
-
-/**
- * Get a slice of source code from the script. The boundaries for the slice is
- * specified in lines.
- * @param {number} opt_from_line The first line (zero bound) in the slice.
- * Default is 0
- * @param {number} opt_to_column The last line (zero bound) in the slice (non
- * inclusive). Default is the number of lines in the script
- * @return {SourceSlice} The source slice or null of the parameters where
- * invalid
- */
-Script.prototype.sourceSlice = function (opt_from_line, opt_to_line) {
- var from_line = IS_UNDEFINED(opt_from_line) ? this.line_offset : opt_from_line;
- var to_line = IS_UNDEFINED(opt_to_line) ? this.line_offset + this.lineCount() : opt_to_line
-
- // Adjust according to the offset within the resource.
- from_line -= this.line_offset;
- to_line -= this.line_offset;
- if (from_line < 0) from_line = 0;
- if (to_line > this.lineCount()) to_line = this.lineCount();
-
- // Check parameters.
- if (from_line >= this.lineCount() ||
- to_line < 0 ||
- from_line > to_line) {
- return null;
- }
-
- var line_ends = this.line_ends;
- var from_position = from_line == 0 ? 0 : line_ends[from_line - 1] + 1;
- var to_position = to_line == 0 ? 0 : line_ends[to_line - 1] + 1;
-
- // Return a source slice with line numbers re-adjusted to the resource.
- return new SourceSlice(this, from_line + this.line_offset, to_line + this.line_offset,
- from_position, to_position);
-}
-
-
-Script.prototype.sourceLine = function (opt_line) {
- // Default is the first line in the script. Lines in the script are relative
- // to the offset within the resource.
- var line = 0;
- if (!IS_UNDEFINED(opt_line)) {
- line = opt_line - this.line_offset;
- }
-
- // Check parameter.
- if (line < 0 || this.lineCount() <= line) {
- return null;
- }
-
- // Return the source line.
- var line_ends = this.line_ends;
- var start = line == 0 ? 0 : line_ends[line - 1] + 1;
- var end = line_ends[line];
- return %_CallFunction(this.source, start, end, StringSubstring);
-}
-
-
-/**
- * Returns the number of source lines.
- * @return {number}
- * Number of source lines.
- */
-Script.prototype.lineCount = function() {
- // Return number of source lines.
- return this.line_ends.length;
-};
-
-
-/**
- * Returns the name of script if available, contents of sourceURL comment
- * otherwise. See
- * http://fbug.googlecode.com/svn/branches/firebug1.1/docs/ReleaseNotes_1.1.txt
- * for details on using //@ sourceURL comment to identify scritps that don't
- * have name.
- *
- * @return {?string} script name if present, value for //@ sourceURL comment
- * otherwise.
- */
-Script.prototype.nameOrSourceURL = function() {
- if (this.name)
- return this.name;
- // TODO(608): the spaces in a regexp below had to be escaped as \040
- // because this file is being processed by js2c whose handling of spaces
- // in regexps is broken. Also, ['"] are excluded from allowed URLs to
- // avoid matches against sources that invoke evals with sourceURL.
- // A better solution would be to detect these special comments in
- // the scanner/parser.
- var source = ToString(this.source);
- var sourceUrlPos = %StringIndexOf(source, "sourceURL=", 0);
- if (sourceUrlPos > 4) {
- var sourceUrlPattern =
- /\/\/@[\040\t]sourceURL=[\040\t]*([^\s\'\"]*)[\040\t]*$/gm;
- // Don't reuse lastMatchInfo here, so we create a new array with room
- // for four captures (array with length one longer than the index
- // of the fourth capture, where the numbering is zero-based).
- var matchInfo = new InternalArray(CAPTURE(3) + 1);
- var match =
- %_RegExpExec(sourceUrlPattern, source, sourceUrlPos - 4, matchInfo);
- if (match) {
- return SubString(source, matchInfo[CAPTURE(2)], matchInfo[CAPTURE(3)]);
- }
- }
- return this.name;
-}
-
-
-/**
- * Class for source location. A source location is a position within some
- * source with the following properties:
- * script : script object for the source
- * line : source line number
- * column : source column within the line
- * position : position within the source
- * start : position of start of source context (inclusive)
- * end : position of end of source context (not inclusive)
- * Source text for the source context is the character interval [start, end[. In
- * most cases end will point to a newline character. It might point just past
- * the final position of the source if the last source line does not end with a
- * newline character.
- * @param {Script} script The Script object for which this is a location
- * @param {number} position Source position for the location
- * @param {number} line The line number for the location
- * @param {number} column The column within the line for the location
- * @param {number} start Source position for start of source context
- * @param {number} end Source position for end of source context
- * @constructor
- */
-function SourceLocation(script, position, line, column, start, end) {
- this.script = script;
- this.position = position;
- this.line = line;
- this.column = column;
- this.start = start;
- this.end = end;
-}
-
-
-const kLineLengthLimit = 78;
-
-/**
- * Restrict source location start and end positions to make the source slice
- * no more that a certain number of characters wide.
- * @param {number} opt_limit The with limit of the source text with a default
- * of 78
- * @param {number} opt_before The number of characters to prefer before the
- * position with a default value of 10 less that the limit
- */
-SourceLocation.prototype.restrict = function (opt_limit, opt_before) {
- // Find the actual limit to use.
- var limit;
- var before;
- if (!IS_UNDEFINED(opt_limit)) {
- limit = opt_limit;
- } else {
- limit = kLineLengthLimit;
- }
- if (!IS_UNDEFINED(opt_before)) {
- before = opt_before;
- } else {
- // If no before is specified center for small limits and perfer more source
- // before the the position that after for longer limits.
- if (limit <= 20) {
- before = $floor(limit / 2);
- } else {
- before = limit - 10;
- }
- }
- if (before >= limit) {
- before = limit - 1;
- }
-
- // If the [start, end[ interval is too big we restrict
- // it in one or both ends. We make sure to always produce
- // restricted intervals of maximum allowed size.
- if (this.end - this.start > limit) {
- var start_limit = this.position - before;
- var end_limit = this.position + limit - before;
- if (this.start < start_limit && end_limit < this.end) {
- this.start = start_limit;
- this.end = end_limit;
- } else if (this.start < start_limit) {
- this.start = this.end - limit;
- } else {
- this.end = this.start + limit;
- }
- }
-};
-
-
-/**
- * Get the source text for a SourceLocation
- * @return {String}
- * Source text for this location.
- */
-SourceLocation.prototype.sourceText = function () {
- return %_CallFunction(this.script.source, this.start, this.end, StringSubstring);
-};
-
-
-/**
- * Class for a source slice. A source slice is a part of a script source with
- * the following properties:
- * script : script object for the source
- * from_line : line number for the first line in the slice
- * to_line : source line number for the last line in the slice
- * from_position : position of the first character in the slice
- * to_position : position of the last character in the slice
- * The to_line and to_position are not included in the slice, that is the lines
- * in the slice are [from_line, to_line[. Likewise the characters in the slice
- * are [from_position, to_position[.
- * @param {Script} script The Script object for the source slice
- * @param {number} from_line
- * @param {number} to_line
- * @param {number} from_position
- * @param {number} to_position
- * @constructor
- */
-function SourceSlice(script, from_line, to_line, from_position, to_position) {
- this.script = script;
- this.from_line = from_line;
- this.to_line = to_line;
- this.from_position = from_position;
- this.to_position = to_position;
-}
-
-
-/**
- * Get the source text for a SourceSlice
- * @return {String} Source text for this slice. The last line will include
- * the line terminating characters (if any)
- */
-SourceSlice.prototype.sourceText = function () {
- return %_CallFunction(this.script.source,
- this.from_position,
- this.to_position,
- StringSubstring);
-};
-
-
-// Returns the offset of the given position within the containing
-// line.
-function GetPositionInLine(message) {
- var script = %MessageGetScript(message);
- var start_position = %MessageGetStartPosition(message);
- var location = script.locationFromPosition(start_position, false);
- if (location == null) return -1;
- location.restrict();
- return start_position - location.start;
-}
-
-
-function GetStackTraceLine(recv, fun, pos, isGlobal) {
- return FormatSourcePosition(new CallSite(recv, fun, pos));
-}
-
-// ----------------------------------------------------------------------------
-// Error implementation
-
-// Defines accessors for a property that is calculated the first time
-// the property is read.
-function DefineOneShotAccessor(obj, name, fun) {
- // Note that the accessors consistently operate on 'obj', not 'this'.
- // Since the object may occur in someone else's prototype chain we
- // can't rely on 'this' being the same as 'obj'.
- var hasBeenSet = false;
- var value;
- obj.__defineGetter__(name, function () {
- if (hasBeenSet) {
- return value;
- }
- hasBeenSet = true;
- value = fun(obj);
- return value;
- });
- obj.__defineSetter__(name, function (v) {
- hasBeenSet = true;
- value = v;
- });
-}
-
-function CallSite(receiver, fun, pos) {
- this.receiver = receiver;
- this.fun = fun;
- this.pos = pos;
-}
-
-CallSite.prototype.getThis = function () {
- return this.receiver;
-};
-
-CallSite.prototype.getTypeName = function () {
- var constructor = this.receiver.constructor;
- if (!constructor)
- return %_CallFunction(this.receiver, ObjectToString);
- var constructorName = constructor.name;
- if (!constructorName)
- return %_CallFunction(this.receiver, ObjectToString);
- return constructorName;
-};
-
-CallSite.prototype.isToplevel = function () {
- if (this.receiver == null)
- return true;
- return IS_GLOBAL(this.receiver);
-};
-
-CallSite.prototype.isEval = function () {
- var script = %FunctionGetScript(this.fun);
- return script && script.compilation_type == COMPILATION_TYPE_EVAL;
-};
-
-CallSite.prototype.getEvalOrigin = function () {
- var script = %FunctionGetScript(this.fun);
- return FormatEvalOrigin(script);
-};
-
-CallSite.prototype.getScriptNameOrSourceURL = function () {
- var script = %FunctionGetScript(this.fun);
- return script ? script.nameOrSourceURL() : null;
-};
-
-CallSite.prototype.getFunction = function () {
- return this.fun;
-};
-
-CallSite.prototype.getFunctionName = function () {
- // See if the function knows its own name
- var name = this.fun.name;
- if (name) {
- return name;
- } else {
- return %FunctionGetInferredName(this.fun);
- }
- // Maybe this is an evaluation?
- var script = %FunctionGetScript(this.fun);
- if (script && script.compilation_type == COMPILATION_TYPE_EVAL)
- return "eval";
- return null;
-};
-
-CallSite.prototype.getMethodName = function () {
- // See if we can find a unique property on the receiver that holds
- // this function.
- var ownName = this.fun.name;
- if (ownName && this.receiver &&
- (%_CallFunction(this.receiver, ownName, ObjectLookupGetter) === this.fun ||
- %_CallFunction(this.receiver, ownName, ObjectLookupSetter) === this.fun ||
- this.receiver[ownName] === this.fun)) {
- // To handle DontEnum properties we guess that the method has
- // the same name as the function.
- return ownName;
- }
- var name = null;
- for (var prop in this.receiver) {
- if (this.receiver.__lookupGetter__(prop) === this.fun ||
- this.receiver.__lookupSetter__(prop) === this.fun ||
- (!this.receiver.__lookupGetter__(prop) && this.receiver[prop] === this.fun)) {
- // If we find more than one match bail out to avoid confusion.
- if (name)
- return null;
- name = prop;
- }
- }
- if (name)
- return name;
- return null;
-};
-
-CallSite.prototype.getFileName = function () {
- var script = %FunctionGetScript(this.fun);
- return script ? script.name : null;
-};
-
-CallSite.prototype.getLineNumber = function () {
- if (this.pos == -1)
- return null;
- var script = %FunctionGetScript(this.fun);
- var location = null;
- if (script) {
- location = script.locationFromPosition(this.pos, true);
- }
- return location ? location.line + 1 : null;
-};
-
-CallSite.prototype.getColumnNumber = function () {
- if (this.pos == -1)
- return null;
- var script = %FunctionGetScript(this.fun);
- var location = null;
- if (script) {
- location = script.locationFromPosition(this.pos, true);
- }
- return location ? location.column + 1: null;
-};
-
-CallSite.prototype.isNative = function () {
- var script = %FunctionGetScript(this.fun);
- return script ? (script.type == TYPE_NATIVE) : false;
-};
-
-CallSite.prototype.getPosition = function () {
- return this.pos;
-};
-
-CallSite.prototype.isConstructor = function () {
- var constructor = this.receiver ? this.receiver.constructor : null;
- if (!constructor)
- return false;
- return this.fun === constructor;
-};
-
-function FormatEvalOrigin(script) {
- var sourceURL = script.nameOrSourceURL();
- if (sourceURL)
- return sourceURL;
-
- var eval_origin = "eval at ";
- if (script.eval_from_function_name) {
- eval_origin += script.eval_from_function_name;
- } else {
- eval_origin += "<anonymous>";
- }
-
- var eval_from_script = script.eval_from_script;
- if (eval_from_script) {
- if (eval_from_script.compilation_type == COMPILATION_TYPE_EVAL) {
- // eval script originated from another eval.
- eval_origin += " (" + FormatEvalOrigin(eval_from_script) + ")";
- } else {
- // eval script originated from "real" source.
- if (eval_from_script.name) {
- eval_origin += " (" + eval_from_script.name;
- var location = eval_from_script.locationFromPosition(script.eval_from_script_position, true);
- if (location) {
- eval_origin += ":" + (location.line + 1);
- eval_origin += ":" + (location.column + 1);
- }
- eval_origin += ")"
- } else {
- eval_origin += " (unknown source)";
- }
- }
- }
-
- return eval_origin;
-};
-
-function FormatSourcePosition(frame) {
- var fileName;
- var fileLocation = "";
- if (frame.isNative()) {
- fileLocation = "native";
- } else if (frame.isEval()) {
- fileName = frame.getScriptNameOrSourceURL();
- if (!fileName)
- fileLocation = frame.getEvalOrigin();
- } else {
- fileName = frame.getFileName();
- }
-
- if (fileName) {
- fileLocation += fileName;
- var lineNumber = frame.getLineNumber();
- if (lineNumber != null) {
- fileLocation += ":" + lineNumber;
- var columnNumber = frame.getColumnNumber();
- if (columnNumber) {
- fileLocation += ":" + columnNumber;
- }
- }
- }
-
- if (!fileLocation) {
- fileLocation = "unknown source";
- }
- var line = "";
- var functionName = frame.getFunction().name;
- var addPrefix = true;
- var isConstructor = frame.isConstructor();
- var isMethodCall = !(frame.isToplevel() || isConstructor);
- if (isMethodCall) {
- var methodName = frame.getMethodName();
- line += frame.getTypeName() + ".";
- if (functionName) {
- line += functionName;
- if (methodName && (methodName != functionName)) {
- line += " [as " + methodName + "]";
- }
- } else {
- line += methodName || "<anonymous>";
- }
- } else if (isConstructor) {
- line += "new " + (functionName || "<anonymous>");
- } else if (functionName) {
- line += functionName;
- } else {
- line += fileLocation;
- addPrefix = false;
- }
- if (addPrefix) {
- line += " (" + fileLocation + ")";
- }
- return line;
-}
-
-function FormatStackTrace(error, frames) {
- var lines = [];
- try {
- lines.push(error.toString());
- } catch (e) {
- try {
- lines.push("<error: " + e + ">");
- } catch (ee) {
- lines.push("<error>");
- }
- }
- for (var i = 0; i < frames.length; i++) {
- var frame = frames[i];
- var line;
- try {
- line = FormatSourcePosition(frame);
- } catch (e) {
- try {
- line = "<error: " + e + ">";
- } catch (ee) {
- // Any code that reaches this point is seriously nasty!
- line = "<error>";
- }
- }
- lines.push(" at " + line);
- }
- return lines.join("\n");
-}
-
-function FormatRawStackTrace(error, raw_stack) {
- var frames = [ ];
- for (var i = 0; i < raw_stack.length; i += 4) {
- var recv = raw_stack[i];
- var fun = raw_stack[i + 1];
- var code = raw_stack[i + 2];
- var pc = raw_stack[i + 3];
- var pos = %FunctionGetPositionForOffset(code, pc);
- frames.push(new CallSite(recv, fun, pos));
- }
- if (IS_FUNCTION($Error.prepareStackTrace)) {
- return $Error.prepareStackTrace(error, frames);
- } else {
- return FormatStackTrace(error, frames);
- }
-}
-
-function DefineError(f) {
- // Store the error function in both the global object
- // and the runtime object. The function is fetched
- // from the runtime object when throwing errors from
- // within the runtime system to avoid strange side
- // effects when overwriting the error functions from
- // user code.
- var name = f.name;
- %SetProperty(global, name, f, DONT_ENUM);
- this['$' + name] = f;
- // Configure the error function.
- if (name == 'Error') {
- // The prototype of the Error object must itself be an error.
- // However, it can't be an instance of the Error object because
- // it hasn't been properly configured yet. Instead we create a
- // special not-a-true-error-but-close-enough object.
- function ErrorPrototype() {}
- %FunctionSetPrototype(ErrorPrototype, $Object.prototype);
- %FunctionSetInstanceClassName(ErrorPrototype, 'Error');
- %FunctionSetPrototype(f, new ErrorPrototype());
- } else {
- %FunctionSetPrototype(f, new $Error());
- }
- %FunctionSetInstanceClassName(f, 'Error');
- %SetProperty(f.prototype, 'constructor', f, DONT_ENUM);
- // The name property on the prototype of error objects is not
- // specified as being read-one and dont-delete. However, allowing
- // overwriting allows leaks of error objects between script blocks
- // in the same context in a browser setting. Therefore we fix the
- // name.
- %SetProperty(f.prototype, "name", name, READ_ONLY | DONT_DELETE);
- %SetCode(f, function(m) {
- if (%_IsConstructCall()) {
- // Define all the expected properties directly on the error
- // object. This avoids going through getters and setters defined
- // on prototype objects.
- %IgnoreAttributesAndSetProperty(this, 'stack', void 0);
- %IgnoreAttributesAndSetProperty(this, 'arguments', void 0);
- %IgnoreAttributesAndSetProperty(this, 'type', void 0);
- if (m === kAddMessageAccessorsMarker) {
- // DefineOneShotAccessor always inserts a message property and
- // ignores setters.
- DefineOneShotAccessor(this, 'message', function (obj) {
- return FormatMessage(%NewMessageObject(obj.type, obj.arguments));
- });
- } else if (!IS_UNDEFINED(m)) {
- %IgnoreAttributesAndSetProperty(this, 'message', ToString(m));
- }
- captureStackTrace(this, f);
- } else {
- return new f(m);
- }
- });
-}
-
-function captureStackTrace(obj, cons_opt) {
- var stackTraceLimit = $Error.stackTraceLimit;
- if (!stackTraceLimit || !IS_NUMBER(stackTraceLimit)) return;
- if (stackTraceLimit < 0 || stackTraceLimit > 10000)
- stackTraceLimit = 10000;
- var raw_stack = %CollectStackTrace(cons_opt
- ? cons_opt
- : captureStackTrace, stackTraceLimit);
- DefineOneShotAccessor(obj, 'stack', function (obj) {
- return FormatRawStackTrace(obj, raw_stack);
- });
-};
-
-$Math.__proto__ = global.Object.prototype;
-
-DefineError(function Error() { });
-DefineError(function TypeError() { });
-DefineError(function RangeError() { });
-DefineError(function SyntaxError() { });
-DefineError(function ReferenceError() { });
-DefineError(function EvalError() { });
-DefineError(function URIError() { });
-
-$Error.captureStackTrace = captureStackTrace;
-
-// Setup extra properties of the Error.prototype object.
-$Error.prototype.message = '';
-
-// Global list of error objects visited during errorToString. This is
-// used to detect cycles in error toString formatting.
-var visited_errors = new $Array();
-var cyclic_error_marker = new $Object();
-
-function errorToStringDetectCycle() {
- if (!%PushIfAbsent(visited_errors, this)) throw cyclic_error_marker;
- try {
- var type = this.type;
- if (type && !%_CallFunction(this, "message", ObjectHasOwnProperty)) {
- var formatted = FormatMessage(%NewMessageObject(type, this.arguments));
- return this.name + ": " + formatted;
- }
- var message = %_CallFunction(this, "message", ObjectHasOwnProperty)
- ? (": " + this.message)
- : "";
- return this.name + message;
- } finally {
- visited_errors.length = visited_errors.length - 1;
- }
-}
-
-function errorToString() {
- // This helper function is needed because access to properties on
- // the builtins object do not work inside of a catch clause.
- function isCyclicErrorMarker(o) { return o === cyclic_error_marker; }
-
- try {
- return %_CallFunction(this, errorToStringDetectCycle);
- } catch(e) {
- // If this error message was encountered already return the empty
- // string for it instead of recursively formatting it.
- if (isCyclicErrorMarker(e)) return '';
- else throw e;
- }
-}
-
-
-InstallFunctions($Error.prototype, DONT_ENUM, ['toString', errorToString]);
-
-// Boilerplate for exceptions for stack overflows. Used from
-// Isolate::StackOverflow().
-const kStackOverflowBoilerplate = MakeRangeError('stack_overflow', []);
diff --git a/src/3rdparty/v8/src/mips/assembler-mips-inl.h b/src/3rdparty/v8/src/mips/assembler-mips-inl.h
deleted file mode 100644
index f7453d1..0000000
--- a/src/3rdparty/v8/src/mips/assembler-mips-inl.h
+++ /dev/null
@@ -1,335 +0,0 @@
-// Copyright (c) 1994-2006 Sun Microsystems Inc.
-// All Rights Reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// - Redistributions of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-//
-// - Redistribution in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// - Neither the name of Sun Microsystems or the names of contributors may
-// be used to endorse or promote products derived from this software without
-// specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
-// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
-// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// The original source code covered by the above license above has been
-// modified significantly by Google Inc.
-// Copyright 2010 the V8 project authors. All rights reserved.
-
-
-#ifndef V8_MIPS_ASSEMBLER_MIPS_INL_H_
-#define V8_MIPS_ASSEMBLER_MIPS_INL_H_
-
-#include "mips/assembler-mips.h"
-#include "cpu.h"
-#include "debug.h"
-
-
-namespace v8 {
-namespace internal {
-
-// -----------------------------------------------------------------------------
-// Operand and MemOperand
-
-Operand::Operand(int32_t immediate, RelocInfo::Mode rmode) {
- rm_ = no_reg;
- imm32_ = immediate;
- rmode_ = rmode;
-}
-
-
-Operand::Operand(const ExternalReference& f) {
- rm_ = no_reg;
- imm32_ = reinterpret_cast<int32_t>(f.address());
- rmode_ = RelocInfo::EXTERNAL_REFERENCE;
-}
-
-
-Operand::Operand(Smi* value) {
- rm_ = no_reg;
- imm32_ = reinterpret_cast<intptr_t>(value);
- rmode_ = RelocInfo::NONE;
-}
-
-
-Operand::Operand(Register rm) {
- rm_ = rm;
-}
-
-
-bool Operand::is_reg() const {
- return rm_.is_valid();
-}
-
-
-
-// -----------------------------------------------------------------------------
-// RelocInfo
-
-void RelocInfo::apply(intptr_t delta) {
- // On MIPS we do not use pc relative addressing, so we don't need to patch the
- // code here.
-}
-
-
-Address RelocInfo::target_address() {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
- return Assembler::target_address_at(pc_);
-}
-
-
-Address RelocInfo::target_address_address() {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY
- || rmode_ == EMBEDDED_OBJECT
- || rmode_ == EXTERNAL_REFERENCE);
- // Read the address of the word containing the target_address in an
- // instruction stream.
- // The only architecture-independent user of this function is the serializer.
- // The serializer uses it to find out how many raw bytes of instruction to
- // output before the next target.
- // For an instructions like LUI/ORI where the target bits are mixed into the
- // instruction bits, the size of the target will be zero, indicating that the
- // serializer should not step forward in memory after a target is resolved
- // and written. In this case the target_address_address function should
- // return the end of the instructions to be patched, allowing the
- // deserializer to deserialize the instructions as raw bytes and put them in
- // place, ready to be patched with the target. In our case, that is the
- // address of the instruction that follows LUI/ORI instruction pair.
- return reinterpret_cast<Address>(
- pc_ + Assembler::kInstructionsFor32BitConstant * Assembler::kInstrSize);
-}
-
-
-int RelocInfo::target_address_size() {
- return Assembler::kExternalTargetSize;
-}
-
-
-void RelocInfo::set_target_address(Address target) {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
- Assembler::set_target_address_at(pc_, target);
-}
-
-
-Object* RelocInfo::target_object() {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return reinterpret_cast<Object*>(Assembler::target_address_at(pc_));
-}
-
-
-Handle<Object> RelocInfo::target_object_handle(Assembler *origin) {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return Handle<Object>(reinterpret_cast<Object**>(
- Assembler::target_address_at(pc_)));
-}
-
-
-Object** RelocInfo::target_object_address() {
- // Provide a "natural pointer" to the embedded object,
- // which can be de-referenced during heap iteration.
- ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- // TODO(mips): Commenting out, to simplify arch-independent changes.
- // GC won't work like this, but this commit is for asm/disasm/sim.
- // reconstructed_obj_ptr_ =
- // reinterpret_cast<Object*>(Assembler::target_address_at(pc_));
- // return &reconstructed_obj_ptr_;
- return NULL;
-}
-
-
-void RelocInfo::set_target_object(Object* target) {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- Assembler::set_target_address_at(pc_, reinterpret_cast<Address>(target));
-}
-
-
-Address* RelocInfo::target_reference_address() {
- ASSERT(rmode_ == EXTERNAL_REFERENCE);
- // TODO(mips): Commenting out, to simplify arch-independent changes.
- // GC won't work like this, but this commit is for asm/disasm/sim.
- // reconstructed_adr_ptr_ = Assembler::target_address_at(pc_);
- // return &reconstructed_adr_ptr_;
- return NULL;
-}
-
-
-Handle<JSGlobalPropertyCell> RelocInfo::target_cell_handle() {
- ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
- Address address = Memory::Address_at(pc_);
- return Handle<JSGlobalPropertyCell>(
- reinterpret_cast<JSGlobalPropertyCell**>(address));
-}
-
-
-JSGlobalPropertyCell* RelocInfo::target_cell() {
- ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
- Address address = Memory::Address_at(pc_);
- Object* object = HeapObject::FromAddress(
- address - JSGlobalPropertyCell::kValueOffset);
- return reinterpret_cast<JSGlobalPropertyCell*>(object);
-}
-
-
-void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell) {
- ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
- Address address = cell->address() + JSGlobalPropertyCell::kValueOffset;
- Memory::Address_at(pc_) = address;
-}
-
-
-Address RelocInfo::call_address() {
- ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
- (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
- // The pc_ offset of 0 assumes mips patched return sequence per
- // debug-mips.cc BreakLocationIterator::SetDebugBreakAtReturn(), or
- // debug break slot per BreakLocationIterator::SetDebugBreakAtSlot().
- return Assembler::target_address_at(pc_);
-}
-
-
-void RelocInfo::set_call_address(Address target) {
- ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
- (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
- // The pc_ offset of 0 assumes mips patched return sequence per
- // debug-mips.cc BreakLocationIterator::SetDebugBreakAtReturn(), or
- // debug break slot per BreakLocationIterator::SetDebugBreakAtSlot().
- Assembler::set_target_address_at(pc_, target);
-}
-
-
-Object* RelocInfo::call_object() {
- return *call_object_address();
-}
-
-
-Object** RelocInfo::call_object_address() {
- ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
- (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
- return reinterpret_cast<Object**>(pc_ + 2 * Assembler::kInstrSize);
-}
-
-
-void RelocInfo::set_call_object(Object* target) {
- *call_object_address() = target;
-}
-
-
-bool RelocInfo::IsPatchedReturnSequence() {
- Instr instr0 = Assembler::instr_at(pc_);
- Instr instr1 = Assembler::instr_at(pc_ + 1 * Assembler::kInstrSize);
- Instr instr2 = Assembler::instr_at(pc_ + 2 * Assembler::kInstrSize);
- bool patched_return = ((instr0 & kOpcodeMask) == LUI &&
- (instr1 & kOpcodeMask) == ORI &&
- (instr2 & kOpcodeMask) == SPECIAL &&
- (instr2 & kFunctionFieldMask) == JALR);
- return patched_return;
-}
-
-
-bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
- Instr current_instr = Assembler::instr_at(pc_);
- return !Assembler::IsNop(current_instr, Assembler::DEBUG_BREAK_NOP);
-}
-
-
-void RelocInfo::Visit(ObjectVisitor* visitor) {
- RelocInfo::Mode mode = rmode();
- if (mode == RelocInfo::EMBEDDED_OBJECT) {
- // RelocInfo is needed when pointer must be updated/serialized, such as
- // UpdatingVisitor in mark-compact.cc or Serializer in serialize.cc.
- // It is ignored by visitors that do not need it.
- // Commenting out, to simplify arch-independednt changes.
- // GC won't work like this, but this commit is for asm/disasm/sim.
- // visitor->VisitPointer(target_object_address(), this);
- } else if (RelocInfo::IsCodeTarget(mode)) {
- visitor->VisitCodeTarget(this);
- } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
- // RelocInfo is needed when external-references must be serialized by
- // Serializer Visitor in serialize.cc. It is ignored by visitors that
- // do not need it.
- // Commenting out, to simplify arch-independednt changes.
- // Serializer won't work like this, but this commit is for asm/disasm/sim.
- // visitor->VisitExternalReference(target_reference_address(), this);
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // TODO(isolates): Get a cached isolate below.
- } else if (((RelocInfo::IsJSReturn(mode) &&
- IsPatchedReturnSequence()) ||
- (RelocInfo::IsDebugBreakSlot(mode) &&
- IsPatchedDebugBreakSlotSequence())) &&
- Isolate::Current()->debug()->has_break_points()) {
- visitor->VisitDebugTarget(this);
-#endif
- } else if (mode == RelocInfo::RUNTIME_ENTRY) {
- visitor->VisitRuntimeEntry(this);
- }
-}
-
-
-template<typename StaticVisitor>
-void RelocInfo::Visit(Heap* heap) {
- RelocInfo::Mode mode = rmode();
- if (mode == RelocInfo::EMBEDDED_OBJECT) {
- StaticVisitor::VisitPointer(heap, target_object_address());
- } else if (RelocInfo::IsCodeTarget(mode)) {
- StaticVisitor::VisitCodeTarget(this);
- } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
- StaticVisitor::VisitExternalReference(target_reference_address());
-#ifdef ENABLE_DEBUGGER_SUPPORT
- } else if (heap->isolate()->debug()->has_break_points() &&
- ((RelocInfo::IsJSReturn(mode) &&
- IsPatchedReturnSequence()) ||
- (RelocInfo::IsDebugBreakSlot(mode) &&
- IsPatchedDebugBreakSlotSequence()))) {
- StaticVisitor::VisitDebugTarget(this);
-#endif
- } else if (mode == RelocInfo::RUNTIME_ENTRY) {
- StaticVisitor::VisitRuntimeEntry(this);
- }
-}
-
-
-// -----------------------------------------------------------------------------
-// Assembler
-
-
-void Assembler::CheckBuffer() {
- if (buffer_space() <= kGap) {
- GrowBuffer();
- }
-}
-
-
-void Assembler::CheckTrampolinePoolQuick() {
- if (pc_offset() >= next_buffer_check_) {
- CheckTrampolinePool();
- }
-}
-
-
-void Assembler::emit(Instr x) {
- CheckBuffer();
- *reinterpret_cast<Instr*>(pc_) = x;
- pc_ += kInstrSize;
- CheckTrampolinePoolQuick();
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_MIPS_ASSEMBLER_MIPS_INL_H_
diff --git a/src/3rdparty/v8/src/mips/assembler-mips.cc b/src/3rdparty/v8/src/mips/assembler-mips.cc
deleted file mode 100644
index 7d00da1..0000000
--- a/src/3rdparty/v8/src/mips/assembler-mips.cc
+++ /dev/null
@@ -1,2093 +0,0 @@
-// Copyright (c) 1994-2006 Sun Microsystems Inc.
-// All Rights Reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// - Redistributions of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-//
-// - Redistribution in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// - Neither the name of Sun Microsystems or the names of contributors may
-// be used to endorse or promote products derived from this software without
-// specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
-// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
-// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// The original source code covered by the above license above has been
-// modified significantly by Google Inc.
-// Copyright 2010 the V8 project authors. All rights reserved.
-
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_MIPS)
-
-#include "mips/assembler-mips-inl.h"
-#include "serialize.h"
-
-namespace v8 {
-namespace internal {
-
-CpuFeatures::CpuFeatures()
- : supported_(0),
- enabled_(0),
- found_by_runtime_probing_(0) {
-}
-
-void CpuFeatures::Probe(bool portable) {
- // If the compiler is allowed to use fpu then we can use fpu too in our
- // code generation.
-#if !defined(__mips__)
- // For the simulator=mips build, use FPU when FLAG_enable_fpu is enabled.
- if (FLAG_enable_fpu) {
- supported_ |= 1u << FPU;
- }
-#else
- if (portable && Serializer::enabled()) {
- supported_ |= OS::CpuFeaturesImpliedByPlatform();
- return; // No features if we might serialize.
- }
-
- if (OS::MipsCpuHasFeature(FPU)) {
- // This implementation also sets the FPU flags if
- // runtime detection of FPU returns true.
- supported_ |= 1u << FPU;
- found_by_runtime_probing_ |= 1u << FPU;
- }
-
- if (!portable) found_by_runtime_probing_ = 0;
-#endif
-}
-
-
-int ToNumber(Register reg) {
- ASSERT(reg.is_valid());
- const int kNumbers[] = {
- 0, // zero_reg
- 1, // at
- 2, // v0
- 3, // v1
- 4, // a0
- 5, // a1
- 6, // a2
- 7, // a3
- 8, // t0
- 9, // t1
- 10, // t2
- 11, // t3
- 12, // t4
- 13, // t5
- 14, // t6
- 15, // t7
- 16, // s0
- 17, // s1
- 18, // s2
- 19, // s3
- 20, // s4
- 21, // s5
- 22, // s6
- 23, // s7
- 24, // t8
- 25, // t9
- 26, // k0
- 27, // k1
- 28, // gp
- 29, // sp
- 30, // s8_fp
- 31, // ra
- };
- return kNumbers[reg.code()];
-}
-
-
-Register ToRegister(int num) {
- ASSERT(num >= 0 && num < kNumRegisters);
- const Register kRegisters[] = {
- zero_reg,
- at,
- v0, v1,
- a0, a1, a2, a3,
- t0, t1, t2, t3, t4, t5, t6, t7,
- s0, s1, s2, s3, s4, s5, s6, s7,
- t8, t9,
- k0, k1,
- gp,
- sp,
- s8_fp,
- ra
- };
- return kRegisters[num];
-}
-
-
-// -----------------------------------------------------------------------------
-// Implementation of RelocInfo.
-
-const int RelocInfo::kApplyMask = 0;
-
-
-bool RelocInfo::IsCodedSpecially() {
- // The deserializer needs to know whether a pointer is specially coded. Being
- // specially coded on MIPS means that it is a lui/ori instruction, and that is
- // always the case inside code objects.
- return true;
-}
-
-
-// Patch the code at the current address with the supplied instructions.
-void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
- Instr* pc = reinterpret_cast<Instr*>(pc_);
- Instr* instr = reinterpret_cast<Instr*>(instructions);
- for (int i = 0; i < instruction_count; i++) {
- *(pc + i) = *(instr + i);
- }
-
- // Indicate that code has changed.
- CPU::FlushICache(pc_, instruction_count * Assembler::kInstrSize);
-}
-
-
-// Patch the code at the current PC with a call to the target address.
-// Additional guard instructions can be added if required.
-void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
- // Patch the code at the current address with a call to the target.
- UNIMPLEMENTED_MIPS();
-}
-
-
-// -----------------------------------------------------------------------------
-// Implementation of Operand and MemOperand.
-// See assembler-mips-inl.h for inlined constructors.
-
-Operand::Operand(Handle<Object> handle) {
- rm_ = no_reg;
- // Verify all Objects referred by code are NOT in new space.
- Object* obj = *handle;
- ASSERT(!HEAP->InNewSpace(obj));
- if (obj->IsHeapObject()) {
- imm32_ = reinterpret_cast<intptr_t>(handle.location());
- rmode_ = RelocInfo::EMBEDDED_OBJECT;
- } else {
- // No relocation needed.
- imm32_ = reinterpret_cast<intptr_t>(obj);
- rmode_ = RelocInfo::NONE;
- }
-}
-
-
-MemOperand::MemOperand(Register rm, int32_t offset) : Operand(rm) {
- offset_ = offset;
-}
-
-
-// -----------------------------------------------------------------------------
-// Specific instructions, constants, and masks.
-
-static const int kNegOffset = 0x00008000;
-// addiu(sp, sp, 4) aka Pop() operation or part of Pop(r)
-// operations as post-increment of sp.
-const Instr kPopInstruction = ADDIU | (sp.code() << kRsShift)
- | (sp.code() << kRtShift) | (kPointerSize & kImm16Mask);
-// addiu(sp, sp, -4) part of Push(r) operation as pre-decrement of sp.
-const Instr kPushInstruction = ADDIU | (sp.code() << kRsShift)
- | (sp.code() << kRtShift) | (-kPointerSize & kImm16Mask);
-// sw(r, MemOperand(sp, 0))
-const Instr kPushRegPattern = SW | (sp.code() << kRsShift)
- | (0 & kImm16Mask);
-// lw(r, MemOperand(sp, 0))
-const Instr kPopRegPattern = LW | (sp.code() << kRsShift)
- | (0 & kImm16Mask);
-
-const Instr kLwRegFpOffsetPattern = LW | (s8_fp.code() << kRsShift)
- | (0 & kImm16Mask);
-
-const Instr kSwRegFpOffsetPattern = SW | (s8_fp.code() << kRsShift)
- | (0 & kImm16Mask);
-
-const Instr kLwRegFpNegOffsetPattern = LW | (s8_fp.code() << kRsShift)
- | (kNegOffset & kImm16Mask);
-
-const Instr kSwRegFpNegOffsetPattern = SW | (s8_fp.code() << kRsShift)
- | (kNegOffset & kImm16Mask);
-// A mask for the Rt register for push, pop, lw, sw instructions.
-const Instr kRtMask = kRtFieldMask;
-const Instr kLwSwInstrTypeMask = 0xffe00000;
-const Instr kLwSwInstrArgumentMask = ~kLwSwInstrTypeMask;
-const Instr kLwSwOffsetMask = kImm16Mask;
-
-
-// Spare buffer.
-static const int kMinimalBufferSize = 4 * KB;
-
-
-Assembler::Assembler(void* buffer, int buffer_size)
- : AssemblerBase(Isolate::Current()),
- positions_recorder_(this),
- allow_peephole_optimization_(false) {
- // BUG(3245989): disable peephole optimization if crankshaft is enabled.
- allow_peephole_optimization_ = FLAG_peephole_optimization;
- if (buffer == NULL) {
- // Do our own buffer management.
- if (buffer_size <= kMinimalBufferSize) {
- buffer_size = kMinimalBufferSize;
-
- if (isolate()->assembler_spare_buffer() != NULL) {
- buffer = isolate()->assembler_spare_buffer();
- isolate()->set_assembler_spare_buffer(NULL);
- }
- }
- if (buffer == NULL) {
- buffer_ = NewArray<byte>(buffer_size);
- } else {
- buffer_ = static_cast<byte*>(buffer);
- }
- buffer_size_ = buffer_size;
- own_buffer_ = true;
-
- } else {
- // Use externally provided buffer instead.
- ASSERT(buffer_size > 0);
- buffer_ = static_cast<byte*>(buffer);
- buffer_size_ = buffer_size;
- own_buffer_ = false;
- }
-
- // Setup buffer pointers.
- ASSERT(buffer_ != NULL);
- pc_ = buffer_;
- reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
-
- last_trampoline_pool_end_ = 0;
- no_trampoline_pool_before_ = 0;
- trampoline_pool_blocked_nesting_ = 0;
- next_buffer_check_ = kMaxBranchOffset - kTrampolineSize;
-}
-
-
-Assembler::~Assembler() {
- if (own_buffer_) {
- if (isolate()->assembler_spare_buffer() == NULL &&
- buffer_size_ == kMinimalBufferSize) {
- isolate()->set_assembler_spare_buffer(buffer_);
- } else {
- DeleteArray(buffer_);
- }
- }
-}
-
-
-void Assembler::GetCode(CodeDesc* desc) {
- ASSERT(pc_ <= reloc_info_writer.pos()); // No overlap.
- // Setup code descriptor.
- desc->buffer = buffer_;
- desc->buffer_size = buffer_size_;
- desc->instr_size = pc_offset();
- desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
-}
-
-
-void Assembler::Align(int m) {
- ASSERT(m >= 4 && IsPowerOf2(m));
- while ((pc_offset() & (m - 1)) != 0) {
- nop();
- }
-}
-
-
-void Assembler::CodeTargetAlign() {
- // No advantage to aligning branch/call targets to more than
- // single instruction, that I am aware of.
- Align(4);
-}
-
-
-Register Assembler::GetRt(Instr instr) {
- Register rt;
- rt.code_ = (instr & kRtMask) >> kRtShift;
- return rt;
-}
-
-
-bool Assembler::IsPop(Instr instr) {
- return (instr & ~kRtMask) == kPopRegPattern;
-}
-
-
-bool Assembler::IsPush(Instr instr) {
- return (instr & ~kRtMask) == kPushRegPattern;
-}
-
-
-bool Assembler::IsSwRegFpOffset(Instr instr) {
- return ((instr & kLwSwInstrTypeMask) == kSwRegFpOffsetPattern);
-}
-
-
-bool Assembler::IsLwRegFpOffset(Instr instr) {
- return ((instr & kLwSwInstrTypeMask) == kLwRegFpOffsetPattern);
-}
-
-
-bool Assembler::IsSwRegFpNegOffset(Instr instr) {
- return ((instr & (kLwSwInstrTypeMask | kNegOffset)) ==
- kSwRegFpNegOffsetPattern);
-}
-
-
-bool Assembler::IsLwRegFpNegOffset(Instr instr) {
- return ((instr & (kLwSwInstrTypeMask | kNegOffset)) ==
- kLwRegFpNegOffsetPattern);
-}
-
-
-// Labels refer to positions in the (to be) generated code.
-// There are bound, linked, and unused labels.
-//
-// Bound labels refer to known positions in the already
-// generated code. pos() is the position the label refers to.
-//
-// Linked labels refer to unknown positions in the code
-// to be generated; pos() is the position of the last
-// instruction using the label.
-
-// The link chain is terminated by a value in the instruction of -1,
-// which is an otherwise illegal value (branch -1 is inf loop).
-// The instruction 16-bit offset field addresses 32-bit words, but in
-// code is conv to an 18-bit value addressing bytes, hence the -4 value.
-
-const int kEndOfChain = -4;
-
-
-bool Assembler::IsBranch(Instr instr) {
- uint32_t opcode = ((instr & kOpcodeMask));
- uint32_t rt_field = ((instr & kRtFieldMask));
- uint32_t rs_field = ((instr & kRsFieldMask));
- uint32_t label_constant = (instr & ~kImm16Mask);
- // Checks if the instruction is a branch.
- return opcode == BEQ ||
- opcode == BNE ||
- opcode == BLEZ ||
- opcode == BGTZ ||
- opcode == BEQL ||
- opcode == BNEL ||
- opcode == BLEZL ||
- opcode == BGTZL||
- (opcode == REGIMM && (rt_field == BLTZ || rt_field == BGEZ ||
- rt_field == BLTZAL || rt_field == BGEZAL)) ||
- (opcode == COP1 && rs_field == BC1) || // Coprocessor branch.
- label_constant == 0; // Emitted label const in reg-exp engine.
-}
-
-
-bool Assembler::IsNop(Instr instr, unsigned int type) {
- // See Assembler::nop(type).
- ASSERT(type < 32);
- uint32_t opcode = ((instr & kOpcodeMask));
- uint32_t rt = ((instr & kRtFieldMask) >> kRtShift);
- uint32_t rs = ((instr & kRsFieldMask) >> kRsShift);
- uint32_t sa = ((instr & kSaFieldMask) >> kSaShift);
-
- // nop(type) == sll(zero_reg, zero_reg, type);
- // Technically all these values will be 0 but
- // this makes more sense to the reader.
-
- bool ret = (opcode == SLL &&
- rt == static_cast<uint32_t>(ToNumber(zero_reg)) &&
- rs == static_cast<uint32_t>(ToNumber(zero_reg)) &&
- sa == type);
-
- return ret;
-}
-
-
-int32_t Assembler::GetBranchOffset(Instr instr) {
- ASSERT(IsBranch(instr));
- return ((int16_t)(instr & kImm16Mask)) << 2;
-}
-
-
-bool Assembler::IsLw(Instr instr) {
- return ((instr & kOpcodeMask) == LW);
-}
-
-
-int16_t Assembler::GetLwOffset(Instr instr) {
- ASSERT(IsLw(instr));
- return ((instr & kImm16Mask));
-}
-
-
-Instr Assembler::SetLwOffset(Instr instr, int16_t offset) {
- ASSERT(IsLw(instr));
-
- // We actually create a new lw instruction based on the original one.
- Instr temp_instr = LW | (instr & kRsFieldMask) | (instr & kRtFieldMask)
- | (offset & kImm16Mask);
-
- return temp_instr;
-}
-
-
-bool Assembler::IsSw(Instr instr) {
- return ((instr & kOpcodeMask) == SW);
-}
-
-
-Instr Assembler::SetSwOffset(Instr instr, int16_t offset) {
- ASSERT(IsSw(instr));
- return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
-}
-
-
-bool Assembler::IsAddImmediate(Instr instr) {
- return ((instr & kOpcodeMask) == ADDIU);
-}
-
-
-Instr Assembler::SetAddImmediateOffset(Instr instr, int16_t offset) {
- ASSERT(IsAddImmediate(instr));
- return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
-}
-
-
-int Assembler::target_at(int32_t pos) {
- Instr instr = instr_at(pos);
- if ((instr & ~kImm16Mask) == 0) {
- // Emitted label constant, not part of a branch.
- if (instr == 0) {
- return kEndOfChain;
- } else {
- int32_t imm18 =((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
- return (imm18 + pos);
- }
- }
- // Check we have a branch instruction.
- ASSERT(IsBranch(instr));
- // Do NOT change this to <<2. We rely on arithmetic shifts here, assuming
- // the compiler uses arithmectic shifts for signed integers.
- int32_t imm18 = ((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
-
- if (imm18 == kEndOfChain) {
- // EndOfChain sentinel is returned directly, not relative to pc or pos.
- return kEndOfChain;
- } else {
- return pos + kBranchPCOffset + imm18;
- }
-}
-
-
-void Assembler::target_at_put(int32_t pos, int32_t target_pos) {
- Instr instr = instr_at(pos);
- if ((instr & ~kImm16Mask) == 0) {
- ASSERT(target_pos == kEndOfChain || target_pos >= 0);
- // Emitted label constant, not part of a branch.
- // Make label relative to Code* of generated Code object.
- instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag));
- return;
- }
-
- ASSERT(IsBranch(instr));
- int32_t imm18 = target_pos - (pos + kBranchPCOffset);
- ASSERT((imm18 & 3) == 0);
-
- instr &= ~kImm16Mask;
- int32_t imm16 = imm18 >> 2;
- ASSERT(is_int16(imm16));
-
- instr_at_put(pos, instr | (imm16 & kImm16Mask));
-}
-
-
-void Assembler::print(Label* L) {
- if (L->is_unused()) {
- PrintF("unused label\n");
- } else if (L->is_bound()) {
- PrintF("bound label to %d\n", L->pos());
- } else if (L->is_linked()) {
- Label l = *L;
- PrintF("unbound label");
- while (l.is_linked()) {
- PrintF("@ %d ", l.pos());
- Instr instr = instr_at(l.pos());
- if ((instr & ~kImm16Mask) == 0) {
- PrintF("value\n");
- } else {
- PrintF("%d\n", instr);
- }
- next(&l);
- }
- } else {
- PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
- }
-}
-
-
-void Assembler::bind_to(Label* L, int pos) {
- ASSERT(0 <= pos && pos <= pc_offset()); // Must have valid binding position.
- while (L->is_linked()) {
- int32_t fixup_pos = L->pos();
- int32_t dist = pos - fixup_pos;
- next(L); // Call next before overwriting link with target at fixup_pos.
- if (dist > kMaxBranchOffset) {
- do {
- int32_t trampoline_pos = get_trampoline_entry(fixup_pos);
- ASSERT((trampoline_pos - fixup_pos) <= kMaxBranchOffset);
- target_at_put(fixup_pos, trampoline_pos);
- fixup_pos = trampoline_pos;
- dist = pos - fixup_pos;
- } while (dist > kMaxBranchOffset);
- } else if (dist < -kMaxBranchOffset) {
- do {
- int32_t trampoline_pos = get_trampoline_entry(fixup_pos, false);
- ASSERT((trampoline_pos - fixup_pos) >= -kMaxBranchOffset);
- target_at_put(fixup_pos, trampoline_pos);
- fixup_pos = trampoline_pos;
- dist = pos - fixup_pos;
- } while (dist < -kMaxBranchOffset);
- };
- target_at_put(fixup_pos, pos);
- }
- L->bind_to(pos);
-
- // Keep track of the last bound label so we don't eliminate any instructions
- // before a bound label.
- if (pos > last_bound_pos_)
- last_bound_pos_ = pos;
-}
-
-
-void Assembler::link_to(Label* L, Label* appendix) {
- if (appendix->is_linked()) {
- if (L->is_linked()) {
- // Append appendix to L's list.
- int fixup_pos;
- int link = L->pos();
- do {
- fixup_pos = link;
- link = target_at(fixup_pos);
- } while (link > 0);
- ASSERT(link == kEndOfChain);
- target_at_put(fixup_pos, appendix->pos());
- } else {
- // L is empty, simply use appendix.
- *L = *appendix;
- }
- }
- appendix->Unuse(); // Appendix should not be used anymore.
-}
-
-
-void Assembler::bind(Label* L) {
- ASSERT(!L->is_bound()); // Label can only be bound once.
- bind_to(L, pc_offset());
-}
-
-
-void Assembler::next(Label* L) {
- ASSERT(L->is_linked());
- int link = target_at(L->pos());
- ASSERT(link > 0 || link == kEndOfChain);
- if (link == kEndOfChain) {
- L->Unuse();
- } else if (link > 0) {
- L->link_to(link);
- }
-}
-
-
-// We have to use a temporary register for things that can be relocated even
-// if they can be encoded in the MIPS's 16 bits of immediate-offset instruction
-// space. There is no guarantee that the relocated location can be similarly
-// encoded.
-bool Assembler::MustUseReg(RelocInfo::Mode rmode) {
- return rmode != RelocInfo::NONE;
-}
-
-
-void Assembler::GenInstrRegister(Opcode opcode,
- Register rs,
- Register rt,
- Register rd,
- uint16_t sa,
- SecondaryField func) {
- ASSERT(rd.is_valid() && rs.is_valid() && rt.is_valid() && is_uint5(sa));
- Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
- | (rd.code() << kRdShift) | (sa << kSaShift) | func;
- emit(instr);
-}
-
-
-void Assembler::GenInstrRegister(Opcode opcode,
- Register rs,
- Register rt,
- uint16_t msb,
- uint16_t lsb,
- SecondaryField func) {
- ASSERT(rs.is_valid() && rt.is_valid() && is_uint5(msb) && is_uint5(lsb));
- Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
- | (msb << kRdShift) | (lsb << kSaShift) | func;
- emit(instr);
-}
-
-
-void Assembler::GenInstrRegister(Opcode opcode,
- SecondaryField fmt,
- FPURegister ft,
- FPURegister fs,
- FPURegister fd,
- SecondaryField func) {
- ASSERT(fd.is_valid() && fs.is_valid() && ft.is_valid());
- ASSERT(isolate()->cpu_features()->IsEnabled(FPU));
- Instr instr = opcode | fmt | (ft.code() << kFtShift) | (fs.code() << kFsShift)
- | (fd.code() << kFdShift) | func;
- emit(instr);
-}
-
-
-void Assembler::GenInstrRegister(Opcode opcode,
- SecondaryField fmt,
- Register rt,
- FPURegister fs,
- FPURegister fd,
- SecondaryField func) {
- ASSERT(fd.is_valid() && fs.is_valid() && rt.is_valid());
- ASSERT(isolate()->cpu_features()->IsEnabled(FPU));
- Instr instr = opcode | fmt | (rt.code() << kRtShift)
- | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
- emit(instr);
-}
-
-
-void Assembler::GenInstrRegister(Opcode opcode,
- SecondaryField fmt,
- Register rt,
- FPUControlRegister fs,
- SecondaryField func) {
- ASSERT(fs.is_valid() && rt.is_valid());
- ASSERT(isolate()->cpu_features()->IsEnabled(FPU));
- Instr instr =
- opcode | fmt | (rt.code() << kRtShift) | (fs.code() << kFsShift) | func;
- emit(instr);
-}
-
-
-// Instructions with immediate value.
-// Registers are in the order of the instruction encoding, from left to right.
-void Assembler::GenInstrImmediate(Opcode opcode,
- Register rs,
- Register rt,
- int32_t j) {
- ASSERT(rs.is_valid() && rt.is_valid() && (is_int16(j) || is_uint16(j)));
- Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
- | (j & kImm16Mask);
- emit(instr);
-}
-
-
-void Assembler::GenInstrImmediate(Opcode opcode,
- Register rs,
- SecondaryField SF,
- int32_t j) {
- ASSERT(rs.is_valid() && (is_int16(j) || is_uint16(j)));
- Instr instr = opcode | (rs.code() << kRsShift) | SF | (j & kImm16Mask);
- emit(instr);
-}
-
-
-void Assembler::GenInstrImmediate(Opcode opcode,
- Register rs,
- FPURegister ft,
- int32_t j) {
- ASSERT(rs.is_valid() && ft.is_valid() && (is_int16(j) || is_uint16(j)));
- ASSERT(isolate()->cpu_features()->IsEnabled(FPU));
- Instr instr = opcode | (rs.code() << kRsShift) | (ft.code() << kFtShift)
- | (j & kImm16Mask);
- emit(instr);
-}
-
-
-// Registers are in the order of the instruction encoding, from left to right.
-void Assembler::GenInstrJump(Opcode opcode,
- uint32_t address) {
- BlockTrampolinePoolScope block_trampoline_pool(this);
- ASSERT(is_uint26(address));
- Instr instr = opcode | address;
- emit(instr);
- BlockTrampolinePoolFor(1); // For associated delay slot.
-}
-
-
-// Returns the next free label entry from the next trampoline pool.
-int32_t Assembler::get_label_entry(int32_t pos, bool next_pool) {
- int trampoline_count = trampolines_.length();
- int32_t label_entry = 0;
- ASSERT(trampoline_count > 0);
-
- if (next_pool) {
- for (int i = 0; i < trampoline_count; i++) {
- if (trampolines_[i].start() > pos) {
- label_entry = trampolines_[i].take_label();
- break;
- }
- }
- } else { // Caller needs a label entry from the previous pool.
- for (int i = trampoline_count-1; i >= 0; i--) {
- if (trampolines_[i].end() < pos) {
- label_entry = trampolines_[i].take_label();
- break;
- }
- }
- }
- return label_entry;
-}
-
-
-// Returns the next free trampoline entry from the next trampoline pool.
-int32_t Assembler::get_trampoline_entry(int32_t pos, bool next_pool) {
- int trampoline_count = trampolines_.length();
- int32_t trampoline_entry = 0;
- ASSERT(trampoline_count > 0);
-
- if (next_pool) {
- for (int i = 0; i < trampoline_count; i++) {
- if (trampolines_[i].start() > pos) {
- trampoline_entry = trampolines_[i].take_slot();
- break;
- }
- }
- } else { // Caller needs a trampoline entry from the previous pool.
- for (int i = trampoline_count-1; i >= 0; i--) {
- if (trampolines_[i].end() < pos) {
- trampoline_entry = trampolines_[i].take_slot();
- break;
- }
- }
- }
- return trampoline_entry;
-}
-
-
-int32_t Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
- int32_t target_pos;
- int32_t pc_offset_v = pc_offset();
-
- if (L->is_bound()) {
- target_pos = L->pos();
- int32_t dist = pc_offset_v - target_pos;
- if (dist > kMaxBranchOffset) {
- do {
- int32_t trampoline_pos = get_trampoline_entry(target_pos);
- ASSERT((trampoline_pos - target_pos) > 0);
- ASSERT((trampoline_pos - target_pos) <= kMaxBranchOffset);
- target_at_put(trampoline_pos, target_pos);
- target_pos = trampoline_pos;
- dist = pc_offset_v - target_pos;
- } while (dist > kMaxBranchOffset);
- } else if (dist < -kMaxBranchOffset) {
- do {
- int32_t trampoline_pos = get_trampoline_entry(target_pos, false);
- ASSERT((target_pos - trampoline_pos) > 0);
- ASSERT((target_pos - trampoline_pos) <= kMaxBranchOffset);
- target_at_put(trampoline_pos, target_pos);
- target_pos = trampoline_pos;
- dist = pc_offset_v - target_pos;
- } while (dist < -kMaxBranchOffset);
- }
- } else {
- if (L->is_linked()) {
- target_pos = L->pos(); // L's link.
- int32_t dist = pc_offset_v - target_pos;
- if (dist > kMaxBranchOffset) {
- do {
- int32_t label_pos = get_label_entry(target_pos);
- ASSERT((label_pos - target_pos) < kMaxBranchOffset);
- label_at_put(L, label_pos);
- target_pos = label_pos;
- dist = pc_offset_v - target_pos;
- } while (dist > kMaxBranchOffset);
- } else if (dist < -kMaxBranchOffset) {
- do {
- int32_t label_pos = get_label_entry(target_pos, false);
- ASSERT((label_pos - target_pos) > -kMaxBranchOffset);
- label_at_put(L, label_pos);
- target_pos = label_pos;
- dist = pc_offset_v - target_pos;
- } while (dist < -kMaxBranchOffset);
- }
- L->link_to(pc_offset());
- } else {
- L->link_to(pc_offset());
- return kEndOfChain;
- }
- }
-
- int32_t offset = target_pos - (pc_offset() + kBranchPCOffset);
- ASSERT((offset & 3) == 0);
- ASSERT(is_int16(offset >> 2));
-
- return offset;
-}
-
-
-void Assembler::label_at_put(Label* L, int at_offset) {
- int target_pos;
- if (L->is_bound()) {
- target_pos = L->pos();
- instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
- } else {
- if (L->is_linked()) {
- target_pos = L->pos(); // L's link.
- int32_t imm18 = target_pos - at_offset;
- ASSERT((imm18 & 3) == 0);
- int32_t imm16 = imm18 >> 2;
- ASSERT(is_int16(imm16));
- instr_at_put(at_offset, (imm16 & kImm16Mask));
- } else {
- target_pos = kEndOfChain;
- instr_at_put(at_offset, 0);
- }
- L->link_to(at_offset);
- }
-}
-
-
-//------- Branch and jump instructions --------
-
-void Assembler::b(int16_t offset) {
- beq(zero_reg, zero_reg, offset);
-}
-
-
-void Assembler::bal(int16_t offset) {
- positions_recorder()->WriteRecordedPositions();
- bgezal(zero_reg, offset);
-}
-
-
-void Assembler::beq(Register rs, Register rt, int16_t offset) {
- BlockTrampolinePoolScope block_trampoline_pool(this);
- GenInstrImmediate(BEQ, rs, rt, offset);
- BlockTrampolinePoolFor(1); // For associated delay slot.
-}
-
-
-void Assembler::bgez(Register rs, int16_t offset) {
- BlockTrampolinePoolScope block_trampoline_pool(this);
- GenInstrImmediate(REGIMM, rs, BGEZ, offset);
- BlockTrampolinePoolFor(1); // For associated delay slot.
-}
-
-
-void Assembler::bgezal(Register rs, int16_t offset) {
- BlockTrampolinePoolScope block_trampoline_pool(this);
- positions_recorder()->WriteRecordedPositions();
- GenInstrImmediate(REGIMM, rs, BGEZAL, offset);
- BlockTrampolinePoolFor(1); // For associated delay slot.
-}
-
-
-void Assembler::bgtz(Register rs, int16_t offset) {
- BlockTrampolinePoolScope block_trampoline_pool(this);
- GenInstrImmediate(BGTZ, rs, zero_reg, offset);
- BlockTrampolinePoolFor(1); // For associated delay slot.
-}
-
-
-void Assembler::blez(Register rs, int16_t offset) {
- BlockTrampolinePoolScope block_trampoline_pool(this);
- GenInstrImmediate(BLEZ, rs, zero_reg, offset);
- BlockTrampolinePoolFor(1); // For associated delay slot.
-}
-
-
-void Assembler::bltz(Register rs, int16_t offset) {
- BlockTrampolinePoolScope block_trampoline_pool(this);
- GenInstrImmediate(REGIMM, rs, BLTZ, offset);
- BlockTrampolinePoolFor(1); // For associated delay slot.
-}
-
-
-void Assembler::bltzal(Register rs, int16_t offset) {
- BlockTrampolinePoolScope block_trampoline_pool(this);
- positions_recorder()->WriteRecordedPositions();
- GenInstrImmediate(REGIMM, rs, BLTZAL, offset);
- BlockTrampolinePoolFor(1); // For associated delay slot.
-}
-
-
-void Assembler::bne(Register rs, Register rt, int16_t offset) {
- BlockTrampolinePoolScope block_trampoline_pool(this);
- GenInstrImmediate(BNE, rs, rt, offset);
- BlockTrampolinePoolFor(1); // For associated delay slot.
-}
-
-
-void Assembler::j(int32_t target) {
- ASSERT(is_uint28(target) && ((target & 3) == 0));
- GenInstrJump(J, target >> 2);
-}
-
-
-void Assembler::jr(Register rs) {
- BlockTrampolinePoolScope block_trampoline_pool(this);
- if (rs.is(ra)) {
- positions_recorder()->WriteRecordedPositions();
- }
- GenInstrRegister(SPECIAL, rs, zero_reg, zero_reg, 0, JR);
- BlockTrampolinePoolFor(1); // For associated delay slot.
-}
-
-
-void Assembler::jal(int32_t target) {
- positions_recorder()->WriteRecordedPositions();
- ASSERT(is_uint28(target) && ((target & 3) == 0));
- GenInstrJump(JAL, target >> 2);
-}
-
-
-void Assembler::jalr(Register rs, Register rd) {
- BlockTrampolinePoolScope block_trampoline_pool(this);
- positions_recorder()->WriteRecordedPositions();
- GenInstrRegister(SPECIAL, rs, zero_reg, rd, 0, JALR);
- BlockTrampolinePoolFor(1); // For associated delay slot.
-}
-
-
-//-------Data-processing-instructions---------
-
-// Arithmetic.
-
-void Assembler::addu(Register rd, Register rs, Register rt) {
- GenInstrRegister(SPECIAL, rs, rt, rd, 0, ADDU);
-}
-
-
-void Assembler::addiu(Register rd, Register rs, int32_t j) {
- GenInstrImmediate(ADDIU, rs, rd, j);
-
- // Eliminate pattern: push(r), pop().
- // addiu(sp, sp, Operand(-kPointerSize));
- // sw(src, MemOperand(sp, 0);
- // addiu(sp, sp, Operand(kPointerSize));
- // Both instructions can be eliminated.
- if (can_peephole_optimize(3) &&
- // Pattern.
- instr_at(pc_ - 1 * kInstrSize) == kPopInstruction &&
- (instr_at(pc_ - 2 * kInstrSize) & ~kRtMask) == kPushRegPattern &&
- (instr_at(pc_ - 3 * kInstrSize)) == kPushInstruction) {
- pc_ -= 3 * kInstrSize;
- if (FLAG_print_peephole_optimization) {
- PrintF("%x push(reg)/pop() eliminated\n", pc_offset());
- }
- }
-
- // Eliminate pattern: push(ry), pop(rx).
- // addiu(sp, sp, -kPointerSize)
- // sw(ry, MemOperand(sp, 0)
- // lw(rx, MemOperand(sp, 0)
- // addiu(sp, sp, kPointerSize);
- // Both instructions can be eliminated if ry = rx.
- // If ry != rx, a register copy from ry to rx is inserted
- // after eliminating the push and the pop instructions.
- if (can_peephole_optimize(4)) {
- Instr pre_push_sp_set = instr_at(pc_ - 4 * kInstrSize);
- Instr push_instr = instr_at(pc_ - 3 * kInstrSize);
- Instr pop_instr = instr_at(pc_ - 2 * kInstrSize);
- Instr post_pop_sp_set = instr_at(pc_ - 1 * kInstrSize);
-
- if (IsPush(push_instr) &&
- IsPop(pop_instr) && pre_push_sp_set == kPushInstruction &&
- post_pop_sp_set == kPopInstruction) {
- if ((pop_instr & kRtMask) != (push_instr & kRtMask)) {
- // For consecutive push and pop on different registers,
- // we delete both the push & pop and insert a register move.
- // push ry, pop rx --> mov rx, ry.
- Register reg_pushed, reg_popped;
- reg_pushed = GetRt(push_instr);
- reg_popped = GetRt(pop_instr);
- pc_ -= 4 * kInstrSize;
- // Insert a mov instruction, which is better than a pair of push & pop.
- or_(reg_popped, reg_pushed, zero_reg);
- if (FLAG_print_peephole_optimization) {
- PrintF("%x push/pop (diff reg) replaced by a reg move\n",
- pc_offset());
- }
- } else {
- // For consecutive push and pop on the same register,
- // both the push and the pop can be deleted.
- pc_ -= 4 * kInstrSize;
- if (FLAG_print_peephole_optimization) {
- PrintF("%x push/pop (same reg) eliminated\n", pc_offset());
- }
- }
- }
- }
-
- if (can_peephole_optimize(5)) {
- Instr pre_push_sp_set = instr_at(pc_ - 5 * kInstrSize);
- Instr mem_write_instr = instr_at(pc_ - 4 * kInstrSize);
- Instr lw_instr = instr_at(pc_ - 3 * kInstrSize);
- Instr mem_read_instr = instr_at(pc_ - 2 * kInstrSize);
- Instr post_pop_sp_set = instr_at(pc_ - 1 * kInstrSize);
-
- if (IsPush(mem_write_instr) &&
- pre_push_sp_set == kPushInstruction &&
- IsPop(mem_read_instr) &&
- post_pop_sp_set == kPopInstruction) {
- if ((IsLwRegFpOffset(lw_instr) ||
- IsLwRegFpNegOffset(lw_instr))) {
- if ((mem_write_instr & kRtMask) ==
- (mem_read_instr & kRtMask)) {
- // Pattern: push & pop from/to same register,
- // with a fp+offset lw in between.
- //
- // The following:
- // addiu sp, sp, -4
- // sw rx, [sp, #0]!
- // lw rz, [fp, #-24]
- // lw rx, [sp, 0],
- // addiu sp, sp, 4
- //
- // Becomes:
- // if(rx == rz)
- // delete all
- // else
- // lw rz, [fp, #-24]
-
- if ((mem_write_instr & kRtMask) == (lw_instr & kRtMask)) {
- pc_ -= 5 * kInstrSize;
- } else {
- pc_ -= 5 * kInstrSize;
- // Reinsert back the lw rz.
- emit(lw_instr);
- }
- if (FLAG_print_peephole_optimization) {
- PrintF("%x push/pop -dead ldr fp+offset in middle\n", pc_offset());
- }
- } else {
- // Pattern: push & pop from/to different registers
- // with a fp + offset lw in between.
- //
- // The following:
- // addiu sp, sp ,-4
- // sw rx, [sp, 0]
- // lw rz, [fp, #-24]
- // lw ry, [sp, 0]
- // addiu sp, sp, 4
- //
- // Becomes:
- // if(ry == rz)
- // mov ry, rx;
- // else if(rx != rz)
- // lw rz, [fp, #-24]
- // mov ry, rx
- // else if((ry != rz) || (rx == rz)) becomes:
- // mov ry, rx
- // lw rz, [fp, #-24]
-
- Register reg_pushed, reg_popped;
- if ((mem_read_instr & kRtMask) == (lw_instr & kRtMask)) {
- reg_pushed = GetRt(mem_write_instr);
- reg_popped = GetRt(mem_read_instr);
- pc_ -= 5 * kInstrSize;
- or_(reg_popped, reg_pushed, zero_reg); // Move instruction.
- } else if ((mem_write_instr & kRtMask)
- != (lw_instr & kRtMask)) {
- reg_pushed = GetRt(mem_write_instr);
- reg_popped = GetRt(mem_read_instr);
- pc_ -= 5 * kInstrSize;
- emit(lw_instr);
- or_(reg_popped, reg_pushed, zero_reg); // Move instruction.
- } else if (((mem_read_instr & kRtMask)
- != (lw_instr & kRtMask)) ||
- ((mem_write_instr & kRtMask)
- == (lw_instr & kRtMask)) ) {
- reg_pushed = GetRt(mem_write_instr);
- reg_popped = GetRt(mem_read_instr);
- pc_ -= 5 * kInstrSize;
- or_(reg_popped, reg_pushed, zero_reg); // Move instruction.
- emit(lw_instr);
- }
- if (FLAG_print_peephole_optimization) {
- PrintF("%x push/pop (ldr fp+off in middle)\n", pc_offset());
- }
- }
- }
- }
- }
-}
-
-
-void Assembler::subu(Register rd, Register rs, Register rt) {
- GenInstrRegister(SPECIAL, rs, rt, rd, 0, SUBU);
-}
-
-
-void Assembler::mul(Register rd, Register rs, Register rt) {
- GenInstrRegister(SPECIAL2, rs, rt, rd, 0, MUL);
-}
-
-
-void Assembler::mult(Register rs, Register rt) {
- GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULT);
-}
-
-
-void Assembler::multu(Register rs, Register rt) {
- GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULTU);
-}
-
-
-void Assembler::div(Register rs, Register rt) {
- GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIV);
-}
-
-
-void Assembler::divu(Register rs, Register rt) {
- GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIVU);
-}
-
-
-// Logical.
-
-void Assembler::and_(Register rd, Register rs, Register rt) {
- GenInstrRegister(SPECIAL, rs, rt, rd, 0, AND);
-}
-
-
-void Assembler::andi(Register rt, Register rs, int32_t j) {
- GenInstrImmediate(ANDI, rs, rt, j);
-}
-
-
-void Assembler::or_(Register rd, Register rs, Register rt) {
- GenInstrRegister(SPECIAL, rs, rt, rd, 0, OR);
-}
-
-
-void Assembler::ori(Register rt, Register rs, int32_t j) {
- GenInstrImmediate(ORI, rs, rt, j);
-}
-
-
-void Assembler::xor_(Register rd, Register rs, Register rt) {
- GenInstrRegister(SPECIAL, rs, rt, rd, 0, XOR);
-}
-
-
-void Assembler::xori(Register rt, Register rs, int32_t j) {
- GenInstrImmediate(XORI, rs, rt, j);
-}
-
-
-void Assembler::nor(Register rd, Register rs, Register rt) {
- GenInstrRegister(SPECIAL, rs, rt, rd, 0, NOR);
-}
-
-
-// Shifts.
-void Assembler::sll(Register rd,
- Register rt,
- uint16_t sa,
- bool coming_from_nop) {
- // Don't allow nop instructions in the form sll zero_reg, zero_reg to be
- // generated using the sll instruction. They must be generated using
- // nop(int/NopMarkerTypes) or MarkCode(int/NopMarkerTypes) pseudo
- // instructions.
- ASSERT(coming_from_nop || !(rd.is(zero_reg) && rt.is(zero_reg)));
- GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SLL);
-}
-
-
-void Assembler::sllv(Register rd, Register rt, Register rs) {
- GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLLV);
-}
-
-
-void Assembler::srl(Register rd, Register rt, uint16_t sa) {
- GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SRL);
-}
-
-
-void Assembler::srlv(Register rd, Register rt, Register rs) {
- GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRLV);
-}
-
-
-void Assembler::sra(Register rd, Register rt, uint16_t sa) {
- GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SRA);
-}
-
-
-void Assembler::srav(Register rd, Register rt, Register rs) {
- GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRAV);
-}
-
-
-void Assembler::rotr(Register rd, Register rt, uint16_t sa) {
- // Should be called via MacroAssembler::Ror.
- ASSERT(rd.is_valid() && rt.is_valid() && is_uint5(sa));
- ASSERT(mips32r2);
- Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift)
- | (rd.code() << kRdShift) | (sa << kSaShift) | SRL;
- emit(instr);
-}
-
-
-void Assembler::rotrv(Register rd, Register rt, Register rs) {
- // Should be called via MacroAssembler::Ror.
- ASSERT(rd.is_valid() && rt.is_valid() && rs.is_valid() );
- ASSERT(mips32r2);
- Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift)
- | (rd.code() << kRdShift) | (1 << kSaShift) | SRLV;
- emit(instr);
-}
-
-
-//------------Memory-instructions-------------
-
-// Helper for base-reg + offset, when offset is larger than int16.
-void Assembler::LoadRegPlusOffsetToAt(const MemOperand& src) {
- ASSERT(!src.rm().is(at));
- lui(at, src.offset_ >> kLuiShift);
- ori(at, at, src.offset_ & kImm16Mask); // Load 32-bit offset.
- addu(at, at, src.rm()); // Add base register.
-}
-
-
-void Assembler::lb(Register rd, const MemOperand& rs) {
- if (is_int16(rs.offset_)) {
- GenInstrImmediate(LB, rs.rm(), rd, rs.offset_);
- } else { // Offset > 16 bits, use multiple instructions to load.
- LoadRegPlusOffsetToAt(rs);
- GenInstrImmediate(LB, at, rd, 0); // Equiv to lb(rd, MemOperand(at, 0));
- }
-}
-
-
-void Assembler::lbu(Register rd, const MemOperand& rs) {
- if (is_int16(rs.offset_)) {
- GenInstrImmediate(LBU, rs.rm(), rd, rs.offset_);
- } else { // Offset > 16 bits, use multiple instructions to load.
- LoadRegPlusOffsetToAt(rs);
- GenInstrImmediate(LBU, at, rd, 0); // Equiv to lbu(rd, MemOperand(at, 0));
- }
-}
-
-
-void Assembler::lh(Register rd, const MemOperand& rs) {
- if (is_int16(rs.offset_)) {
- GenInstrImmediate(LH, rs.rm(), rd, rs.offset_);
- } else { // Offset > 16 bits, use multiple instructions to load.
- LoadRegPlusOffsetToAt(rs);
- GenInstrImmediate(LH, at, rd, 0); // Equiv to lh(rd, MemOperand(at, 0));
- }
-}
-
-
-void Assembler::lhu(Register rd, const MemOperand& rs) {
- if (is_int16(rs.offset_)) {
- GenInstrImmediate(LHU, rs.rm(), rd, rs.offset_);
- } else { // Offset > 16 bits, use multiple instructions to load.
- LoadRegPlusOffsetToAt(rs);
- GenInstrImmediate(LHU, at, rd, 0); // Equiv to lhu(rd, MemOperand(at, 0));
- }
-}
-
-
-void Assembler::lw(Register rd, const MemOperand& rs) {
- if (is_int16(rs.offset_)) {
- GenInstrImmediate(LW, rs.rm(), rd, rs.offset_);
- } else { // Offset > 16 bits, use multiple instructions to load.
- LoadRegPlusOffsetToAt(rs);
- GenInstrImmediate(LW, at, rd, 0); // Equiv to lw(rd, MemOperand(at, 0));
- }
-
- if (can_peephole_optimize(2)) {
- Instr sw_instr = instr_at(pc_ - 2 * kInstrSize);
- Instr lw_instr = instr_at(pc_ - 1 * kInstrSize);
-
- if ((IsSwRegFpOffset(sw_instr) &&
- IsLwRegFpOffset(lw_instr)) ||
- (IsSwRegFpNegOffset(sw_instr) &&
- IsLwRegFpNegOffset(lw_instr))) {
- if ((lw_instr & kLwSwInstrArgumentMask) ==
- (sw_instr & kLwSwInstrArgumentMask)) {
- // Pattern: Lw/sw same fp+offset, same register.
- //
- // The following:
- // sw rx, [fp, #-12]
- // lw rx, [fp, #-12]
- //
- // Becomes:
- // sw rx, [fp, #-12]
-
- pc_ -= 1 * kInstrSize;
- if (FLAG_print_peephole_optimization) {
- PrintF("%x sw/lw (fp + same offset), same reg\n", pc_offset());
- }
- } else if ((lw_instr & kLwSwOffsetMask) ==
- (sw_instr & kLwSwOffsetMask)) {
- // Pattern: Lw/sw same fp+offset, different register.
- //
- // The following:
- // sw rx, [fp, #-12]
- // lw ry, [fp, #-12]
- //
- // Becomes:
- // sw rx, [fp, #-12]
- // mov ry, rx
-
- Register reg_stored, reg_loaded;
- reg_stored = GetRt(sw_instr);
- reg_loaded = GetRt(lw_instr);
- pc_ -= 1 * kInstrSize;
- // Insert a mov instruction, which is better than lw.
- or_(reg_loaded, reg_stored, zero_reg); // Move instruction.
- if (FLAG_print_peephole_optimization) {
- PrintF("%x sw/lw (fp + same offset), diff reg \n", pc_offset());
- }
- }
- }
- }
-}
-
-
-void Assembler::lwl(Register rd, const MemOperand& rs) {
- GenInstrImmediate(LWL, rs.rm(), rd, rs.offset_);
-}
-
-
-void Assembler::lwr(Register rd, const MemOperand& rs) {
- GenInstrImmediate(LWR, rs.rm(), rd, rs.offset_);
-}
-
-
-void Assembler::sb(Register rd, const MemOperand& rs) {
- if (is_int16(rs.offset_)) {
- GenInstrImmediate(SB, rs.rm(), rd, rs.offset_);
- } else { // Offset > 16 bits, use multiple instructions to store.
- LoadRegPlusOffsetToAt(rs);
- GenInstrImmediate(SB, at, rd, 0); // Equiv to sb(rd, MemOperand(at, 0));
- }
-}
-
-
-void Assembler::sh(Register rd, const MemOperand& rs) {
- if (is_int16(rs.offset_)) {
- GenInstrImmediate(SH, rs.rm(), rd, rs.offset_);
- } else { // Offset > 16 bits, use multiple instructions to store.
- LoadRegPlusOffsetToAt(rs);
- GenInstrImmediate(SH, at, rd, 0); // Equiv to sh(rd, MemOperand(at, 0));
- }
-}
-
-
-void Assembler::sw(Register rd, const MemOperand& rs) {
- if (is_int16(rs.offset_)) {
- GenInstrImmediate(SW, rs.rm(), rd, rs.offset_);
- } else { // Offset > 16 bits, use multiple instructions to store.
- LoadRegPlusOffsetToAt(rs);
- GenInstrImmediate(SW, at, rd, 0); // Equiv to sw(rd, MemOperand(at, 0));
- }
-
- // Eliminate pattern: pop(), push(r).
- // addiu sp, sp, Operand(kPointerSize);
- // addiu sp, sp, Operand(-kPointerSize);
- // -> sw r, MemOpernad(sp, 0);
- if (can_peephole_optimize(3) &&
- // Pattern.
- instr_at(pc_ - 1 * kInstrSize) ==
- (kPushRegPattern | (rd.code() << kRtShift)) &&
- instr_at(pc_ - 2 * kInstrSize) == kPushInstruction &&
- instr_at(pc_ - 3 * kInstrSize) == kPopInstruction) {
- pc_ -= 3 * kInstrSize;
- GenInstrImmediate(SW, rs.rm(), rd, rs.offset_);
- if (FLAG_print_peephole_optimization) {
- PrintF("%x pop()/push(reg) eliminated\n", pc_offset());
- }
- }
-}
-
-
-void Assembler::swl(Register rd, const MemOperand& rs) {
- GenInstrImmediate(SWL, rs.rm(), rd, rs.offset_);
-}
-
-
-void Assembler::swr(Register rd, const MemOperand& rs) {
- GenInstrImmediate(SWR, rs.rm(), rd, rs.offset_);
-}
-
-
-void Assembler::lui(Register rd, int32_t j) {
- GenInstrImmediate(LUI, zero_reg, rd, j);
-}
-
-
-//-------------Misc-instructions--------------
-
-// Break / Trap instructions.
-void Assembler::break_(uint32_t code) {
- ASSERT((code & ~0xfffff) == 0);
- Instr break_instr = SPECIAL | BREAK | (code << 6);
- emit(break_instr);
-}
-
-
-void Assembler::tge(Register rs, Register rt, uint16_t code) {
- ASSERT(is_uint10(code));
- Instr instr = SPECIAL | TGE | rs.code() << kRsShift
- | rt.code() << kRtShift | code << 6;
- emit(instr);
-}
-
-
-void Assembler::tgeu(Register rs, Register rt, uint16_t code) {
- ASSERT(is_uint10(code));
- Instr instr = SPECIAL | TGEU | rs.code() << kRsShift
- | rt.code() << kRtShift | code << 6;
- emit(instr);
-}
-
-
-void Assembler::tlt(Register rs, Register rt, uint16_t code) {
- ASSERT(is_uint10(code));
- Instr instr =
- SPECIAL | TLT | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
- emit(instr);
-}
-
-
-void Assembler::tltu(Register rs, Register rt, uint16_t code) {
- ASSERT(is_uint10(code));
- Instr instr =
- SPECIAL | TLTU | rs.code() << kRsShift
- | rt.code() << kRtShift | code << 6;
- emit(instr);
-}
-
-
-void Assembler::teq(Register rs, Register rt, uint16_t code) {
- ASSERT(is_uint10(code));
- Instr instr =
- SPECIAL | TEQ | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
- emit(instr);
-}
-
-
-void Assembler::tne(Register rs, Register rt, uint16_t code) {
- ASSERT(is_uint10(code));
- Instr instr =
- SPECIAL | TNE | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
- emit(instr);
-}
-
-
-// Move from HI/LO register.
-
-void Assembler::mfhi(Register rd) {
- GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFHI);
-}
-
-
-void Assembler::mflo(Register rd) {
- GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFLO);
-}
-
-
-// Set on less than instructions.
-void Assembler::slt(Register rd, Register rs, Register rt) {
- GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLT);
-}
-
-
-void Assembler::sltu(Register rd, Register rs, Register rt) {
- GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLTU);
-}
-
-
-void Assembler::slti(Register rt, Register rs, int32_t j) {
- GenInstrImmediate(SLTI, rs, rt, j);
-}
-
-
-void Assembler::sltiu(Register rt, Register rs, int32_t j) {
- GenInstrImmediate(SLTIU, rs, rt, j);
-}
-
-
-// Conditional move.
-void Assembler::movz(Register rd, Register rs, Register rt) {
- GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVZ);
-}
-
-
-void Assembler::movn(Register rd, Register rs, Register rt) {
- GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVN);
-}
-
-
-void Assembler::movt(Register rd, Register rs, uint16_t cc) {
- Register rt;
- rt.code_ = (cc & 0x0003) << 2 | 1;
- GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
-}
-
-
-void Assembler::movf(Register rd, Register rs, uint16_t cc) {
- Register rt;
- rt.code_ = (cc & 0x0003) << 2 | 0;
- GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
-}
-
-
-// Bit twiddling.
-void Assembler::clz(Register rd, Register rs) {
- // Clz instr requires same GPR number in 'rd' and 'rt' fields.
- GenInstrRegister(SPECIAL2, rs, rd, rd, 0, CLZ);
-}
-
-
-void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
- // Should be called via MacroAssembler::Ins.
- // Ins instr has 'rt' field as dest, and two uint5: msb, lsb.
- ASSERT(mips32r2);
- GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1, pos, INS);
-}
-
-
-void Assembler::ext_(Register rt, Register rs, uint16_t pos, uint16_t size) {
- // Should be called via MacroAssembler::Ext.
- // Ext instr has 'rt' field as dest, and two uint5: msb, lsb.
- ASSERT(mips32r2);
- GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos, EXT);
-}
-
-
-//--------Coprocessor-instructions----------------
-
-// Load, store, move.
-void Assembler::lwc1(FPURegister fd, const MemOperand& src) {
- GenInstrImmediate(LWC1, src.rm(), fd, src.offset_);
-}
-
-
-void Assembler::ldc1(FPURegister fd, const MemOperand& src) {
- // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
- // load to two 32-bit loads.
- GenInstrImmediate(LWC1, src.rm(), fd, src.offset_);
- FPURegister nextfpreg;
- nextfpreg.setcode(fd.code() + 1);
- GenInstrImmediate(LWC1, src.rm(), nextfpreg, src.offset_ + 4);
-}
-
-
-void Assembler::swc1(FPURegister fd, const MemOperand& src) {
- GenInstrImmediate(SWC1, src.rm(), fd, src.offset_);
-}
-
-
-void Assembler::sdc1(FPURegister fd, const MemOperand& src) {
- // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
- // store to two 32-bit stores.
- GenInstrImmediate(SWC1, src.rm(), fd, src.offset_);
- FPURegister nextfpreg;
- nextfpreg.setcode(fd.code() + 1);
- GenInstrImmediate(SWC1, src.rm(), nextfpreg, src.offset_ + 4);
-}
-
-
-void Assembler::mtc1(Register rt, FPURegister fs) {
- GenInstrRegister(COP1, MTC1, rt, fs, f0);
-}
-
-
-void Assembler::mfc1(Register rt, FPURegister fs) {
- GenInstrRegister(COP1, MFC1, rt, fs, f0);
-}
-
-
-void Assembler::ctc1(Register rt, FPUControlRegister fs) {
- GenInstrRegister(COP1, CTC1, rt, fs);
-}
-
-
-void Assembler::cfc1(Register rt, FPUControlRegister fs) {
- GenInstrRegister(COP1, CFC1, rt, fs);
-}
-
-
-// Arithmetic.
-
-void Assembler::add_d(FPURegister fd, FPURegister fs, FPURegister ft) {
- GenInstrRegister(COP1, D, ft, fs, fd, ADD_D);
-}
-
-
-void Assembler::sub_d(FPURegister fd, FPURegister fs, FPURegister ft) {
- GenInstrRegister(COP1, D, ft, fs, fd, SUB_D);
-}
-
-
-void Assembler::mul_d(FPURegister fd, FPURegister fs, FPURegister ft) {
- GenInstrRegister(COP1, D, ft, fs, fd, MUL_D);
-}
-
-
-void Assembler::div_d(FPURegister fd, FPURegister fs, FPURegister ft) {
- GenInstrRegister(COP1, D, ft, fs, fd, DIV_D);
-}
-
-
-void Assembler::abs_d(FPURegister fd, FPURegister fs) {
- GenInstrRegister(COP1, D, f0, fs, fd, ABS_D);
-}
-
-
-void Assembler::mov_d(FPURegister fd, FPURegister fs) {
- GenInstrRegister(COP1, D, f0, fs, fd, MOV_D);
-}
-
-
-void Assembler::neg_d(FPURegister fd, FPURegister fs) {
- GenInstrRegister(COP1, D, f0, fs, fd, NEG_D);
-}
-
-
-void Assembler::sqrt_d(FPURegister fd, FPURegister fs) {
- GenInstrRegister(COP1, D, f0, fs, fd, SQRT_D);
-}
-
-
-// Conversions.
-
-void Assembler::cvt_w_s(FPURegister fd, FPURegister fs) {
- GenInstrRegister(COP1, S, f0, fs, fd, CVT_W_S);
-}
-
-
-void Assembler::cvt_w_d(FPURegister fd, FPURegister fs) {
- GenInstrRegister(COP1, D, f0, fs, fd, CVT_W_D);
-}
-
-
-void Assembler::trunc_w_s(FPURegister fd, FPURegister fs) {
- GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_W_S);
-}
-
-
-void Assembler::trunc_w_d(FPURegister fd, FPURegister fs) {
- GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_W_D);
-}
-
-
-void Assembler::round_w_s(FPURegister fd, FPURegister fs) {
- GenInstrRegister(COP1, S, f0, fs, fd, ROUND_W_S);
-}
-
-
-void Assembler::round_w_d(FPURegister fd, FPURegister fs) {
- GenInstrRegister(COP1, D, f0, fs, fd, ROUND_W_D);
-}
-
-
-void Assembler::floor_w_s(FPURegister fd, FPURegister fs) {
- GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_W_S);
-}
-
-
-void Assembler::floor_w_d(FPURegister fd, FPURegister fs) {
- GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_W_D);
-}
-
-
-void Assembler::ceil_w_s(FPURegister fd, FPURegister fs) {
- GenInstrRegister(COP1, S, f0, fs, fd, CEIL_W_S);
-}
-
-
-void Assembler::ceil_w_d(FPURegister fd, FPURegister fs) {
- GenInstrRegister(COP1, D, f0, fs, fd, CEIL_W_D);
-}
-
-
-void Assembler::cvt_l_s(FPURegister fd, FPURegister fs) {
- ASSERT(mips32r2);
- GenInstrRegister(COP1, S, f0, fs, fd, CVT_L_S);
-}
-
-
-void Assembler::cvt_l_d(FPURegister fd, FPURegister fs) {
- ASSERT(mips32r2);
- GenInstrRegister(COP1, D, f0, fs, fd, CVT_L_D);
-}
-
-
-void Assembler::trunc_l_s(FPURegister fd, FPURegister fs) {
- ASSERT(mips32r2);
- GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_L_S);
-}
-
-
-void Assembler::trunc_l_d(FPURegister fd, FPURegister fs) {
- ASSERT(mips32r2);
- GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_L_D);
-}
-
-
-void Assembler::round_l_s(FPURegister fd, FPURegister fs) {
- GenInstrRegister(COP1, S, f0, fs, fd, ROUND_L_S);
-}
-
-
-void Assembler::round_l_d(FPURegister fd, FPURegister fs) {
- GenInstrRegister(COP1, D, f0, fs, fd, ROUND_L_D);
-}
-
-
-void Assembler::floor_l_s(FPURegister fd, FPURegister fs) {
- GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_L_S);
-}
-
-
-void Assembler::floor_l_d(FPURegister fd, FPURegister fs) {
- GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_L_D);
-}
-
-
-void Assembler::ceil_l_s(FPURegister fd, FPURegister fs) {
- GenInstrRegister(COP1, S, f0, fs, fd, CEIL_L_S);
-}
-
-
-void Assembler::ceil_l_d(FPURegister fd, FPURegister fs) {
- GenInstrRegister(COP1, D, f0, fs, fd, CEIL_L_D);
-}
-
-
-void Assembler::cvt_s_w(FPURegister fd, FPURegister fs) {
- GenInstrRegister(COP1, W, f0, fs, fd, CVT_S_W);
-}
-
-
-void Assembler::cvt_s_l(FPURegister fd, FPURegister fs) {
- ASSERT(mips32r2);
- GenInstrRegister(COP1, L, f0, fs, fd, CVT_S_L);
-}
-
-
-void Assembler::cvt_s_d(FPURegister fd, FPURegister fs) {
- GenInstrRegister(COP1, D, f0, fs, fd, CVT_S_D);
-}
-
-
-void Assembler::cvt_d_w(FPURegister fd, FPURegister fs) {
- GenInstrRegister(COP1, W, f0, fs, fd, CVT_D_W);
-}
-
-
-void Assembler::cvt_d_l(FPURegister fd, FPURegister fs) {
- ASSERT(mips32r2);
- GenInstrRegister(COP1, L, f0, fs, fd, CVT_D_L);
-}
-
-
-void Assembler::cvt_d_s(FPURegister fd, FPURegister fs) {
- GenInstrRegister(COP1, S, f0, fs, fd, CVT_D_S);
-}
-
-
-// Conditions.
-void Assembler::c(FPUCondition cond, SecondaryField fmt,
- FPURegister fs, FPURegister ft, uint16_t cc) {
- ASSERT(isolate()->cpu_features()->IsEnabled(FPU));
- ASSERT(is_uint3(cc));
- ASSERT((fmt & ~(31 << kRsShift)) == 0);
- Instr instr = COP1 | fmt | ft.code() << 16 | fs.code() << kFsShift
- | cc << 8 | 3 << 4 | cond;
- emit(instr);
-}
-
-
-void Assembler::fcmp(FPURegister src1, const double src2,
- FPUCondition cond) {
- ASSERT(isolate()->cpu_features()->IsSupported(FPU));
- ASSERT(src2 == 0.0);
- mtc1(zero_reg, f14);
- cvt_d_w(f14, f14);
- c(cond, D, src1, f14, 0);
-}
-
-
-void Assembler::bc1f(int16_t offset, uint16_t cc) {
- ASSERT(isolate()->cpu_features()->IsEnabled(FPU));
- ASSERT(is_uint3(cc));
- Instr instr = COP1 | BC1 | cc << 18 | 0 << 16 | (offset & kImm16Mask);
- emit(instr);
-}
-
-
-void Assembler::bc1t(int16_t offset, uint16_t cc) {
- ASSERT(isolate()->cpu_features()->IsEnabled(FPU));
- ASSERT(is_uint3(cc));
- Instr instr = COP1 | BC1 | cc << 18 | 1 << 16 | (offset & kImm16Mask);
- emit(instr);
-}
-
-
-// Debugging.
-void Assembler::RecordJSReturn() {
- positions_recorder()->WriteRecordedPositions();
- CheckBuffer();
- RecordRelocInfo(RelocInfo::JS_RETURN);
-}
-
-
-void Assembler::RecordDebugBreakSlot() {
- positions_recorder()->WriteRecordedPositions();
- CheckBuffer();
- RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT);
-}
-
-
-void Assembler::RecordComment(const char* msg) {
- if (FLAG_code_comments) {
- CheckBuffer();
- RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
- }
-}
-
-
-void Assembler::GrowBuffer() {
- if (!own_buffer_) FATAL("external code buffer is too small");
-
- // Compute new buffer size.
- CodeDesc desc; // The new buffer.
- if (buffer_size_ < 4*KB) {
- desc.buffer_size = 4*KB;
- } else if (buffer_size_ < 1*MB) {
- desc.buffer_size = 2*buffer_size_;
- } else {
- desc.buffer_size = buffer_size_ + 1*MB;
- }
- CHECK_GT(desc.buffer_size, 0); // No overflow.
-
- // Setup new buffer.
- desc.buffer = NewArray<byte>(desc.buffer_size);
-
- desc.instr_size = pc_offset();
- desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
-
- // Copy the data.
- int pc_delta = desc.buffer - buffer_;
- int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
- memmove(desc.buffer, buffer_, desc.instr_size);
- memmove(reloc_info_writer.pos() + rc_delta,
- reloc_info_writer.pos(), desc.reloc_size);
-
- // Switch buffers.
- DeleteArray(buffer_);
- buffer_ = desc.buffer;
- buffer_size_ = desc.buffer_size;
- pc_ += pc_delta;
- reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
- reloc_info_writer.last_pc() + pc_delta);
-
- // On ia32 and ARM pc relative addressing is used, and we thus need to apply a
- // shift by pc_delta. But on MIPS the target address it directly loaded, so
- // we do not need to relocate here.
-
- ASSERT(!overflow());
-}
-
-
-void Assembler::db(uint8_t data) {
- CheckBuffer();
- *reinterpret_cast<uint8_t*>(pc_) = data;
- pc_ += sizeof(uint8_t);
-}
-
-
-void Assembler::dd(uint32_t data) {
- CheckBuffer();
- *reinterpret_cast<uint32_t*>(pc_) = data;
- pc_ += sizeof(uint32_t);
-}
-
-
-void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
- RelocInfo rinfo(pc_, rmode, data); // We do not try to reuse pool constants.
- if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::DEBUG_BREAK_SLOT) {
- // Adjust code for new modes.
- ASSERT(RelocInfo::IsDebugBreakSlot(rmode)
- || RelocInfo::IsJSReturn(rmode)
- || RelocInfo::IsComment(rmode)
- || RelocInfo::IsPosition(rmode));
- // These modes do not need an entry in the constant pool.
- }
- if (rinfo.rmode() != RelocInfo::NONE) {
- // Don't record external references unless the heap will be serialized.
- if (rmode == RelocInfo::EXTERNAL_REFERENCE &&
- !Serializer::enabled() &&
- !FLAG_debug_code) {
- return;
- }
- ASSERT(buffer_space() >= kMaxRelocSize); // Too late to grow buffer here.
- reloc_info_writer.Write(&rinfo);
- }
-}
-
-
-void Assembler::BlockTrampolinePoolFor(int instructions) {
- BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize);
-}
-
-
-void Assembler::CheckTrampolinePool(bool force_emit) {
- // Calculate the offset of the next check.
- next_buffer_check_ = pc_offset() + kCheckConstInterval;
-
- int dist = pc_offset() - last_trampoline_pool_end_;
-
- if (dist <= kMaxDistBetweenPools && !force_emit) {
- return;
- }
-
- // Some small sequences of instructions must not be broken up by the
- // insertion of a trampoline pool; such sequences are protected by setting
- // either trampoline_pool_blocked_nesting_ or no_trampoline_pool_before_,
- // which are both checked here. Also, recursive calls to CheckTrampolinePool
- // are blocked by trampoline_pool_blocked_nesting_.
- if ((trampoline_pool_blocked_nesting_ > 0) ||
- (pc_offset() < no_trampoline_pool_before_)) {
- // Emission is currently blocked; make sure we try again as soon as
- // possible.
- if (trampoline_pool_blocked_nesting_ > 0) {
- next_buffer_check_ = pc_offset() + kInstrSize;
- } else {
- next_buffer_check_ = no_trampoline_pool_before_;
- }
- return;
- }
-
- // First we emit jump (2 instructions), then we emit trampoline pool.
- { BlockTrampolinePoolScope block_trampoline_pool(this);
- Label after_pool;
- b(&after_pool);
- nop();
-
- int pool_start = pc_offset();
- for (int i = 0; i < kSlotsPerTrampoline; i++) {
- b(&after_pool);
- nop();
- }
- for (int i = 0; i < kLabelsPerTrampoline; i++) {
- emit(0);
- }
- last_trampoline_pool_end_ = pc_offset() - kInstrSize;
- bind(&after_pool);
- trampolines_.Add(Trampoline(pool_start,
- kSlotsPerTrampoline,
- kLabelsPerTrampoline));
-
- // Since a trampoline pool was just emitted,
- // move the check offset forward by the standard interval.
- next_buffer_check_ = last_trampoline_pool_end_ + kMaxDistBetweenPools;
- }
- return;
-}
-
-
-Address Assembler::target_address_at(Address pc) {
- Instr instr1 = instr_at(pc);
- Instr instr2 = instr_at(pc + kInstrSize);
- // Check we have 2 instructions generated by li.
- ASSERT(((instr1 & kOpcodeMask) == LUI && (instr2 & kOpcodeMask) == ORI) ||
- ((instr1 == nopInstr) && ((instr2 & kOpcodeMask) == ADDI ||
- (instr2 & kOpcodeMask) == ORI ||
- (instr2 & kOpcodeMask) == LUI)));
- // Interpret these 2 instructions.
- if (instr1 == nopInstr) {
- if ((instr2 & kOpcodeMask) == ADDI) {
- return reinterpret_cast<Address>(((instr2 & kImm16Mask) << 16) >> 16);
- } else if ((instr2 & kOpcodeMask) == ORI) {
- return reinterpret_cast<Address>(instr2 & kImm16Mask);
- } else if ((instr2 & kOpcodeMask) == LUI) {
- return reinterpret_cast<Address>((instr2 & kImm16Mask) << 16);
- }
- } else if ((instr1 & kOpcodeMask) == LUI && (instr2 & kOpcodeMask) == ORI) {
- // 32 bit value.
- return reinterpret_cast<Address>(
- (instr1 & kImm16Mask) << 16 | (instr2 & kImm16Mask));
- }
-
- // We should never get here.
- UNREACHABLE();
- return (Address)0x0;
-}
-
-
-void Assembler::set_target_address_at(Address pc, Address target) {
- // On MIPS we need to patch the code to generate.
-
- // First check we have a li.
- Instr instr2 = instr_at(pc + kInstrSize);
-#ifdef DEBUG
- Instr instr1 = instr_at(pc);
-
- // Check we have indeed the result from a li with MustUseReg true.
- CHECK(((instr1 & kOpcodeMask) == LUI && (instr2 & kOpcodeMask) == ORI) ||
- ((instr1 == 0) && ((instr2 & kOpcodeMask)== ADDIU ||
- (instr2 & kOpcodeMask)== ORI ||
- (instr2 & kOpcodeMask)== LUI)));
-#endif
-
- uint32_t rt_code = (instr2 & kRtFieldMask);
- uint32_t* p = reinterpret_cast<uint32_t*>(pc);
- uint32_t itarget = reinterpret_cast<uint32_t>(target);
-
- if (is_int16(itarget)) {
- // nop.
- // addiu rt zero_reg j.
- *p = nopInstr;
- *(p+1) = ADDIU | rt_code | (itarget & kImm16Mask);
- } else if (!(itarget & kHiMask)) {
- // nop.
- // ori rt zero_reg j.
- *p = nopInstr;
- *(p+1) = ORI | rt_code | (itarget & kImm16Mask);
- } else if (!(itarget & kImm16Mask)) {
- // nop.
- // lui rt (kHiMask & itarget) >> kLuiShift.
- *p = nopInstr;
- *(p+1) = LUI | rt_code | ((itarget & kHiMask) >> kLuiShift);
- } else {
- // lui rt (kHiMask & itarget) >> kLuiShift.
- // ori rt rt, (kImm16Mask & itarget).
- *p = LUI | rt_code | ((itarget & kHiMask) >> kLuiShift);
- *(p+1) = ORI | rt_code | (rt_code << 5) | (itarget & kImm16Mask);
- }
-
- CPU::FlushICache(pc, 2 * sizeof(int32_t));
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_MIPS
diff --git a/src/3rdparty/v8/src/mips/assembler-mips.h b/src/3rdparty/v8/src/mips/assembler-mips.h
deleted file mode 100644
index 5a6e271..0000000
--- a/src/3rdparty/v8/src/mips/assembler-mips.h
+++ /dev/null
@@ -1,1066 +0,0 @@
-// Copyright (c) 1994-2006 Sun Microsystems Inc.
-// All Rights Reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// - Redistributions of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-//
-// - Redistribution in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// - Neither the name of Sun Microsystems or the names of contributors may
-// be used to endorse or promote products derived from this software without
-// specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
-// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
-// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// The original source code covered by the above license above has been
-// modified significantly by Google Inc.
-// Copyright 2010 the V8 project authors. All rights reserved.
-
-
-#ifndef V8_MIPS_ASSEMBLER_MIPS_H_
-#define V8_MIPS_ASSEMBLER_MIPS_H_
-
-#include <stdio.h>
-#include "assembler.h"
-#include "constants-mips.h"
-#include "serialize.h"
-
-namespace v8 {
-namespace internal {
-
-// CPU Registers.
-//
-// 1) We would prefer to use an enum, but enum values are assignment-
-// compatible with int, which has caused code-generation bugs.
-//
-// 2) We would prefer to use a class instead of a struct but we don't like
-// the register initialization to depend on the particular initialization
-// order (which appears to be different on OS X, Linux, and Windows for the
-// installed versions of C++ we tried). Using a struct permits C-style
-// "initialization". Also, the Register objects cannot be const as this
-// forces initialization stubs in MSVC, making us dependent on initialization
-// order.
-//
-// 3) By not using an enum, we are possibly preventing the compiler from
-// doing certain constant folds, which may significantly reduce the
-// code generated for some assembly instructions (because they boil down
-// to a few constants). If this is a problem, we could change the code
-// such that we use an enum in optimized mode, and the struct in debug
-// mode. This way we get the compile-time error checking in debug mode
-// and best performance in optimized code.
-
-
-// -----------------------------------------------------------------------------
-// Implementation of Register and FPURegister
-
-// Core register.
-struct Register {
- static const int kNumRegisters = v8::internal::kNumRegisters;
- static const int kNumAllocatableRegisters = 14; // v0 through t7
-
- static int ToAllocationIndex(Register reg) {
- return reg.code() - 2; // zero_reg and 'at' are skipped.
- }
-
- static Register FromAllocationIndex(int index) {
- ASSERT(index >= 0 && index < kNumAllocatableRegisters);
- return from_code(index + 2); // zero_reg and 'at' are skipped.
- }
-
- static const char* AllocationIndexToString(int index) {
- ASSERT(index >= 0 && index < kNumAllocatableRegisters);
- const char* const names[] = {
- "v0",
- "v1",
- "a0",
- "a1",
- "a2",
- "a3",
- "t0",
- "t1",
- "t2",
- "t3",
- "t4",
- "t5",
- "t6",
- "t7",
- };
- return names[index];
- }
-
- static Register from_code(int code) {
- Register r = { code };
- return r;
- }
-
- bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
- bool is(Register reg) const { return code_ == reg.code_; }
- int code() const {
- ASSERT(is_valid());
- return code_;
- }
- int bit() const {
- ASSERT(is_valid());
- return 1 << code_;
- }
-
- // Unfortunately we can't make this private in a struct.
- int code_;
-};
-
-const Register no_reg = { -1 };
-
-const Register zero_reg = { 0 };
-const Register at = { 1 };
-const Register v0 = { 2 };
-const Register v1 = { 3 };
-const Register a0 = { 4 };
-const Register a1 = { 5 };
-const Register a2 = { 6 };
-const Register a3 = { 7 };
-const Register t0 = { 8 };
-const Register t1 = { 9 };
-const Register t2 = { 10 };
-const Register t3 = { 11 };
-const Register t4 = { 12 };
-const Register t5 = { 13 };
-const Register t6 = { 14 };
-const Register t7 = { 15 };
-const Register s0 = { 16 };
-const Register s1 = { 17 };
-const Register s2 = { 18 };
-const Register s3 = { 19 };
-const Register s4 = { 20 };
-const Register s5 = { 21 };
-const Register s6 = { 22 };
-const Register s7 = { 23 };
-const Register t8 = { 24 };
-const Register t9 = { 25 };
-const Register k0 = { 26 };
-const Register k1 = { 27 };
-const Register gp = { 28 };
-const Register sp = { 29 };
-const Register s8_fp = { 30 };
-const Register ra = { 31 };
-
-
-int ToNumber(Register reg);
-
-Register ToRegister(int num);
-
-// Coprocessor register.
-struct FPURegister {
- static const int kNumRegisters = v8::internal::kNumFPURegisters;
- // f0 has been excluded from allocation. This is following ia32
- // where xmm0 is excluded.
- static const int kNumAllocatableRegisters = 15;
-
- static int ToAllocationIndex(FPURegister reg) {
- ASSERT(reg.code() != 0);
- ASSERT(reg.code() % 2 == 0);
- return (reg.code() / 2) - 1;
- }
-
- static FPURegister FromAllocationIndex(int index) {
- ASSERT(index >= 0 && index < kNumAllocatableRegisters);
- return from_code((index + 1) * 2);
- }
-
- static const char* AllocationIndexToString(int index) {
- ASSERT(index >= 0 && index < kNumAllocatableRegisters);
- const char* const names[] = {
- "f2",
- "f4",
- "f6",
- "f8",
- "f10",
- "f12",
- "f14",
- "f16",
- "f18",
- "f20",
- "f22",
- "f24",
- "f26",
- "f28",
- "f30"
- };
- return names[index];
- }
-
- static FPURegister from_code(int code) {
- FPURegister r = { code };
- return r;
- }
-
- bool is_valid() const { return 0 <= code_ && code_ < kNumFPURegisters ; }
- bool is(FPURegister creg) const { return code_ == creg.code_; }
- int code() const {
- ASSERT(is_valid());
- return code_;
- }
- int bit() const {
- ASSERT(is_valid());
- return 1 << code_;
- }
- void setcode(int f) {
- code_ = f;
- ASSERT(is_valid());
- }
- // Unfortunately we can't make this private in a struct.
- int code_;
-};
-
-typedef FPURegister DoubleRegister;
-
-const FPURegister no_creg = { -1 };
-
-const FPURegister f0 = { 0 }; // Return value in hard float mode.
-const FPURegister f1 = { 1 };
-const FPURegister f2 = { 2 };
-const FPURegister f3 = { 3 };
-const FPURegister f4 = { 4 };
-const FPURegister f5 = { 5 };
-const FPURegister f6 = { 6 };
-const FPURegister f7 = { 7 };
-const FPURegister f8 = { 8 };
-const FPURegister f9 = { 9 };
-const FPURegister f10 = { 10 };
-const FPURegister f11 = { 11 };
-const FPURegister f12 = { 12 }; // Arg 0 in hard float mode.
-const FPURegister f13 = { 13 };
-const FPURegister f14 = { 14 }; // Arg 1 in hard float mode.
-const FPURegister f15 = { 15 };
-const FPURegister f16 = { 16 };
-const FPURegister f17 = { 17 };
-const FPURegister f18 = { 18 };
-const FPURegister f19 = { 19 };
-const FPURegister f20 = { 20 };
-const FPURegister f21 = { 21 };
-const FPURegister f22 = { 22 };
-const FPURegister f23 = { 23 };
-const FPURegister f24 = { 24 };
-const FPURegister f25 = { 25 };
-const FPURegister f26 = { 26 };
-const FPURegister f27 = { 27 };
-const FPURegister f28 = { 28 };
-const FPURegister f29 = { 29 };
-const FPURegister f30 = { 30 };
-const FPURegister f31 = { 31 };
-
-// FPU (coprocessor 1) control registers.
-// Currently only FCSR (#31) is implemented.
-struct FPUControlRegister {
- static const int kFCSRRegister = 31;
- static const int kInvalidFPUControlRegister = -1;
-
- bool is_valid() const { return code_ == kFCSRRegister; }
- bool is(FPUControlRegister creg) const { return code_ == creg.code_; }
- int code() const {
- ASSERT(is_valid());
- return code_;
- }
- int bit() const {
- ASSERT(is_valid());
- return 1 << code_;
- }
- void setcode(int f) {
- code_ = f;
- ASSERT(is_valid());
- }
- // Unfortunately we can't make this private in a struct.
- int code_;
-};
-
-const FPUControlRegister no_fpucreg = { -1 };
-const FPUControlRegister FCSR = { kFCSRRegister };
-
-
-// -----------------------------------------------------------------------------
-// Machine instruction Operands.
-
-// Class Operand represents a shifter operand in data processing instructions.
-class Operand BASE_EMBEDDED {
- public:
- // Immediate.
- INLINE(explicit Operand(int32_t immediate,
- RelocInfo::Mode rmode = RelocInfo::NONE));
- INLINE(explicit Operand(const ExternalReference& f));
- INLINE(explicit Operand(const char* s));
- INLINE(explicit Operand(Object** opp));
- INLINE(explicit Operand(Context** cpp));
- explicit Operand(Handle<Object> handle);
- INLINE(explicit Operand(Smi* value));
-
- // Register.
- INLINE(explicit Operand(Register rm));
-
- // Return true if this is a register operand.
- INLINE(bool is_reg() const);
-
- Register rm() const { return rm_; }
-
- private:
- Register rm_;
- int32_t imm32_; // Valid if rm_ == no_reg
- RelocInfo::Mode rmode_;
-
- friend class Assembler;
- friend class MacroAssembler;
-};
-
-
-// On MIPS we have only one adressing mode with base_reg + offset.
-// Class MemOperand represents a memory operand in load and store instructions.
-class MemOperand : public Operand {
- public:
-
- explicit MemOperand(Register rn, int32_t offset = 0);
-
- private:
- int32_t offset_;
-
- friend class Assembler;
-};
-
-
-// CpuFeatures keeps track of which features are supported by the target CPU.
-// Supported features must be enabled by a Scope before use.
-class CpuFeatures {
- public:
- // Detect features of the target CPU. Set safe defaults if the serializer
- // is enabled (snapshots must be portable).
- void Probe(bool portable);
-
- // Check whether a feature is supported by the target CPU.
- bool IsSupported(CpuFeature f) const {
- if (f == FPU && !FLAG_enable_fpu) return false;
- return (supported_ & (1u << f)) != 0;
- }
-
- // Check whether a feature is currently enabled.
- bool IsEnabled(CpuFeature f) const {
- return (enabled_ & (1u << f)) != 0;
- }
-
- // Enable a specified feature within a scope.
- class Scope BASE_EMBEDDED {
-#ifdef DEBUG
- public:
- explicit Scope(CpuFeature f)
- : cpu_features_(Isolate::Current()->cpu_features()),
- isolate_(Isolate::Current()) {
- ASSERT(cpu_features_->IsSupported(f));
- ASSERT(!Serializer::enabled() ||
- (cpu_features_->found_by_runtime_probing_ & (1u << f)) == 0);
- old_enabled_ = cpu_features_->enabled_;
- cpu_features_->enabled_ |= 1u << f;
- }
- ~Scope() {
- ASSERT_EQ(Isolate::Current(), isolate_);
- cpu_features_->enabled_ = old_enabled_;
- }
- private:
- unsigned old_enabled_;
- CpuFeatures* cpu_features_;
- Isolate* isolate_;
-#else
- public:
- explicit Scope(CpuFeature f) {}
-#endif
- };
-
- private:
- CpuFeatures();
-
- unsigned supported_;
- unsigned enabled_;
- unsigned found_by_runtime_probing_;
-
- friend class Isolate;
-
- DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
-};
-
-
-class Assembler : public AssemblerBase {
- public:
- // Create an assembler. Instructions and relocation information are emitted
- // into a buffer, with the instructions starting from the beginning and the
- // relocation information starting from the end of the buffer. See CodeDesc
- // for a detailed comment on the layout (globals.h).
- //
- // If the provided buffer is NULL, the assembler allocates and grows its own
- // buffer, and buffer_size determines the initial buffer size. The buffer is
- // owned by the assembler and deallocated upon destruction of the assembler.
- //
- // If the provided buffer is not NULL, the assembler uses the provided buffer
- // for code generation and assumes its size to be buffer_size. If the buffer
- // is too small, a fatal error occurs. No deallocation of the buffer is done
- // upon destruction of the assembler.
- Assembler(void* buffer, int buffer_size);
- ~Assembler();
-
- // Overrides the default provided by FLAG_debug_code.
- void set_emit_debug_code(bool value) { emit_debug_code_ = value; }
-
- // GetCode emits any pending (non-emitted) code and fills the descriptor
- // desc. GetCode() is idempotent; it returns the same result if no other
- // Assembler functions are invoked in between GetCode() calls.
- void GetCode(CodeDesc* desc);
-
- // Label operations & relative jumps (PPUM Appendix D).
- //
- // Takes a branch opcode (cc) and a label (L) and generates
- // either a backward branch or a forward branch and links it
- // to the label fixup chain. Usage:
- //
- // Label L; // unbound label
- // j(cc, &L); // forward branch to unbound label
- // bind(&L); // bind label to the current pc
- // j(cc, &L); // backward branch to bound label
- // bind(&L); // illegal: a label may be bound only once
- //
- // Note: The same Label can be used for forward and backward branches
- // but it may be bound only once.
- void bind(Label* L); // binds an unbound label L to the current code position
-
- // Returns the branch offset to the given label from the current code position
- // Links the label to the current position if it is still unbound
- // Manages the jump elimination optimization if the second parameter is true.
- int32_t branch_offset(Label* L, bool jump_elimination_allowed);
- int32_t shifted_branch_offset(Label* L, bool jump_elimination_allowed) {
- int32_t o = branch_offset(L, jump_elimination_allowed);
- ASSERT((o & 3) == 0); // Assert the offset is aligned.
- return o >> 2;
- }
-
- // Puts a labels target address at the given position.
- // The high 8 bits are set to zero.
- void label_at_put(Label* L, int at_offset);
-
- // Read/Modify the code target address in the branch/call instruction at pc.
- static Address target_address_at(Address pc);
- static void set_target_address_at(Address pc, Address target);
-
- // This sets the branch destination (which gets loaded at the call address).
- // This is for calls and branches within generated code.
- inline static void set_target_at(Address instruction_payload,
- Address target) {
- set_target_address_at(instruction_payload, target);
- }
-
- // This sets the branch destination.
- // This is for calls and branches to runtime code.
- inline static void set_external_target_at(Address instruction_payload,
- Address target) {
- set_target_address_at(instruction_payload, target);
- }
-
- // Size of an instruction.
- static const int kInstrSize = sizeof(Instr);
-
- // Difference between address of current opcode and target address offset.
- static const int kBranchPCOffset = 4;
-
- // Here we are patching the address in the LUI/ORI instruction pair.
- // These values are used in the serialization process and must be zero for
- // MIPS platform, as Code, Embedded Object or External-reference pointers
- // are split across two consecutive instructions and don't exist separately
- // in the code, so the serializer should not step forwards in memory after
- // a target is resolved and written.
- static const int kCallTargetSize = 0 * kInstrSize;
- static const int kExternalTargetSize = 0 * kInstrSize;
-
- // Number of consecutive instructions used to store 32bit constant.
- // Used in RelocInfo::target_address_address() function to tell serializer
- // address of the instruction that follows LUI/ORI instruction pair.
- static const int kInstructionsFor32BitConstant = 2;
-
- // Distance between the instruction referring to the address of the call
- // target and the return address.
- static const int kCallTargetAddressOffset = 4 * kInstrSize;
-
- // Distance between start of patched return sequence and the emitted address
- // to jump to.
- static const int kPatchReturnSequenceAddressOffset = 0;
-
- // Distance between start of patched debug break slot and the emitted address
- // to jump to.
- static const int kPatchDebugBreakSlotAddressOffset = 0 * kInstrSize;
-
- // Difference between address of current opcode and value read from pc
- // register.
- static const int kPcLoadDelta = 4;
-
- // Number of instructions used for the JS return sequence. The constant is
- // used by the debugger to patch the JS return sequence.
- static const int kJSReturnSequenceInstructions = 7;
- static const int kDebugBreakSlotInstructions = 4;
- static const int kDebugBreakSlotLength =
- kDebugBreakSlotInstructions * kInstrSize;
-
-
- // ---------------------------------------------------------------------------
- // Code generation.
-
- // Insert the smallest number of nop instructions
- // possible to align the pc offset to a multiple
- // of m. m must be a power of 2 (>= 4).
- void Align(int m);
- // Aligns code to something that's optimal for a jump target for the platform.
- void CodeTargetAlign();
-
- // Different nop operations are used by the code generator to detect certain
- // states of the generated code.
- enum NopMarkerTypes {
- NON_MARKING_NOP = 0,
- DEBUG_BREAK_NOP,
- // IC markers.
- PROPERTY_ACCESS_INLINED,
- PROPERTY_ACCESS_INLINED_CONTEXT,
- PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE,
- // Helper values.
- LAST_CODE_MARKER,
- FIRST_IC_MARKER = PROPERTY_ACCESS_INLINED
- };
-
- // type == 0 is the default non-marking type.
- void nop(unsigned int type = 0) {
- ASSERT(type < 32);
- sll(zero_reg, zero_reg, type, true);
- }
-
-
- //------- Branch and jump instructions --------
- // We don't use likely variant of instructions.
- void b(int16_t offset);
- void b(Label* L) { b(branch_offset(L, false)>>2); }
- void bal(int16_t offset);
- void bal(Label* L) { bal(branch_offset(L, false)>>2); }
-
- void beq(Register rs, Register rt, int16_t offset);
- void beq(Register rs, Register rt, Label* L) {
- beq(rs, rt, branch_offset(L, false) >> 2);
- }
- void bgez(Register rs, int16_t offset);
- void bgezal(Register rs, int16_t offset);
- void bgtz(Register rs, int16_t offset);
- void blez(Register rs, int16_t offset);
- void bltz(Register rs, int16_t offset);
- void bltzal(Register rs, int16_t offset);
- void bne(Register rs, Register rt, int16_t offset);
- void bne(Register rs, Register rt, Label* L) {
- bne(rs, rt, branch_offset(L, false)>>2);
- }
-
- // Never use the int16_t b(l)cond version with a branch offset
- // instead of using the Label* version. See Twiki for infos.
-
- // Jump targets must be in the current 256 MB-aligned region. ie 28 bits.
- void j(int32_t target);
- void jal(int32_t target);
- void jalr(Register rs, Register rd = ra);
- void jr(Register target);
-
-
- //-------Data-processing-instructions---------
-
- // Arithmetic.
- void addu(Register rd, Register rs, Register rt);
- void subu(Register rd, Register rs, Register rt);
- void mult(Register rs, Register rt);
- void multu(Register rs, Register rt);
- void div(Register rs, Register rt);
- void divu(Register rs, Register rt);
- void mul(Register rd, Register rs, Register rt);
-
- void addiu(Register rd, Register rs, int32_t j);
-
- // Logical.
- void and_(Register rd, Register rs, Register rt);
- void or_(Register rd, Register rs, Register rt);
- void xor_(Register rd, Register rs, Register rt);
- void nor(Register rd, Register rs, Register rt);
-
- void andi(Register rd, Register rs, int32_t j);
- void ori(Register rd, Register rs, int32_t j);
- void xori(Register rd, Register rs, int32_t j);
- void lui(Register rd, int32_t j);
-
- // Shifts.
- // Please note: sll(zero_reg, zero_reg, x) instructions are reserved as nop
- // and may cause problems in normal code. coming_from_nop makes sure this
- // doesn't happen.
- void sll(Register rd, Register rt, uint16_t sa, bool coming_from_nop = false);
- void sllv(Register rd, Register rt, Register rs);
- void srl(Register rd, Register rt, uint16_t sa);
- void srlv(Register rd, Register rt, Register rs);
- void sra(Register rt, Register rd, uint16_t sa);
- void srav(Register rt, Register rd, Register rs);
- void rotr(Register rd, Register rt, uint16_t sa);
- void rotrv(Register rd, Register rt, Register rs);
-
-
- //------------Memory-instructions-------------
-
- void lb(Register rd, const MemOperand& rs);
- void lbu(Register rd, const MemOperand& rs);
- void lh(Register rd, const MemOperand& rs);
- void lhu(Register rd, const MemOperand& rs);
- void lw(Register rd, const MemOperand& rs);
- void lwl(Register rd, const MemOperand& rs);
- void lwr(Register rd, const MemOperand& rs);
- void sb(Register rd, const MemOperand& rs);
- void sh(Register rd, const MemOperand& rs);
- void sw(Register rd, const MemOperand& rs);
- void swl(Register rd, const MemOperand& rs);
- void swr(Register rd, const MemOperand& rs);
-
-
- //-------------Misc-instructions--------------
-
- // Break / Trap instructions.
- void break_(uint32_t code);
- void tge(Register rs, Register rt, uint16_t code);
- void tgeu(Register rs, Register rt, uint16_t code);
- void tlt(Register rs, Register rt, uint16_t code);
- void tltu(Register rs, Register rt, uint16_t code);
- void teq(Register rs, Register rt, uint16_t code);
- void tne(Register rs, Register rt, uint16_t code);
-
- // Move from HI/LO register.
- void mfhi(Register rd);
- void mflo(Register rd);
-
- // Set on less than.
- void slt(Register rd, Register rs, Register rt);
- void sltu(Register rd, Register rs, Register rt);
- void slti(Register rd, Register rs, int32_t j);
- void sltiu(Register rd, Register rs, int32_t j);
-
- // Conditional move.
- void movz(Register rd, Register rs, Register rt);
- void movn(Register rd, Register rs, Register rt);
- void movt(Register rd, Register rs, uint16_t cc = 0);
- void movf(Register rd, Register rs, uint16_t cc = 0);
-
- // Bit twiddling.
- void clz(Register rd, Register rs);
- void ins_(Register rt, Register rs, uint16_t pos, uint16_t size);
- void ext_(Register rt, Register rs, uint16_t pos, uint16_t size);
-
- //--------Coprocessor-instructions----------------
-
- // Load, store, and move.
- void lwc1(FPURegister fd, const MemOperand& src);
- void ldc1(FPURegister fd, const MemOperand& src);
-
- void swc1(FPURegister fs, const MemOperand& dst);
- void sdc1(FPURegister fs, const MemOperand& dst);
-
- void mtc1(Register rt, FPURegister fs);
- void mfc1(Register rt, FPURegister fs);
-
- void ctc1(Register rt, FPUControlRegister fs);
- void cfc1(Register rt, FPUControlRegister fs);
-
- // Arithmetic.
- void add_d(FPURegister fd, FPURegister fs, FPURegister ft);
- void sub_d(FPURegister fd, FPURegister fs, FPURegister ft);
- void mul_d(FPURegister fd, FPURegister fs, FPURegister ft);
- void div_d(FPURegister fd, FPURegister fs, FPURegister ft);
- void abs_d(FPURegister fd, FPURegister fs);
- void mov_d(FPURegister fd, FPURegister fs);
- void neg_d(FPURegister fd, FPURegister fs);
- void sqrt_d(FPURegister fd, FPURegister fs);
-
- // Conversion.
- void cvt_w_s(FPURegister fd, FPURegister fs);
- void cvt_w_d(FPURegister fd, FPURegister fs);
- void trunc_w_s(FPURegister fd, FPURegister fs);
- void trunc_w_d(FPURegister fd, FPURegister fs);
- void round_w_s(FPURegister fd, FPURegister fs);
- void round_w_d(FPURegister fd, FPURegister fs);
- void floor_w_s(FPURegister fd, FPURegister fs);
- void floor_w_d(FPURegister fd, FPURegister fs);
- void ceil_w_s(FPURegister fd, FPURegister fs);
- void ceil_w_d(FPURegister fd, FPURegister fs);
-
- void cvt_l_s(FPURegister fd, FPURegister fs);
- void cvt_l_d(FPURegister fd, FPURegister fs);
- void trunc_l_s(FPURegister fd, FPURegister fs);
- void trunc_l_d(FPURegister fd, FPURegister fs);
- void round_l_s(FPURegister fd, FPURegister fs);
- void round_l_d(FPURegister fd, FPURegister fs);
- void floor_l_s(FPURegister fd, FPURegister fs);
- void floor_l_d(FPURegister fd, FPURegister fs);
- void ceil_l_s(FPURegister fd, FPURegister fs);
- void ceil_l_d(FPURegister fd, FPURegister fs);
-
- void cvt_s_w(FPURegister fd, FPURegister fs);
- void cvt_s_l(FPURegister fd, FPURegister fs);
- void cvt_s_d(FPURegister fd, FPURegister fs);
-
- void cvt_d_w(FPURegister fd, FPURegister fs);
- void cvt_d_l(FPURegister fd, FPURegister fs);
- void cvt_d_s(FPURegister fd, FPURegister fs);
-
- // Conditions and branches.
- void c(FPUCondition cond, SecondaryField fmt,
- FPURegister ft, FPURegister fs, uint16_t cc = 0);
-
- void bc1f(int16_t offset, uint16_t cc = 0);
- void bc1f(Label* L, uint16_t cc = 0) { bc1f(branch_offset(L, false)>>2, cc); }
- void bc1t(int16_t offset, uint16_t cc = 0);
- void bc1t(Label* L, uint16_t cc = 0) { bc1t(branch_offset(L, false)>>2, cc); }
- void fcmp(FPURegister src1, const double src2, FPUCondition cond);
-
- // Check the code size generated from label to here.
- int InstructionsGeneratedSince(Label* l) {
- return (pc_offset() - l->pos()) / kInstrSize;
- }
-
- // Class for scoping postponing the trampoline pool generation.
- class BlockTrampolinePoolScope {
- public:
- explicit BlockTrampolinePoolScope(Assembler* assem) : assem_(assem) {
- assem_->StartBlockTrampolinePool();
- }
- ~BlockTrampolinePoolScope() {
- assem_->EndBlockTrampolinePool();
- }
-
- private:
- Assembler* assem_;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(BlockTrampolinePoolScope);
- };
-
- // Debugging.
-
- // Mark address of the ExitJSFrame code.
- void RecordJSReturn();
-
- // Mark address of a debug break slot.
- void RecordDebugBreakSlot();
-
- // Record a comment relocation entry that can be used by a disassembler.
- // Use --code-comments to enable.
- void RecordComment(const char* msg);
-
- // Writes a single byte or word of data in the code stream. Used for
- // inline tables, e.g., jump-tables.
- void db(uint8_t data);
- void dd(uint32_t data);
-
- int32_t pc_offset() const { return pc_ - buffer_; }
-
- PositionsRecorder* positions_recorder() { return &positions_recorder_; }
-
- bool can_peephole_optimize(int instructions) {
- if (!allow_peephole_optimization_) return false;
- if (last_bound_pos_ > pc_offset() - instructions * kInstrSize) return false;
- return reloc_info_writer.last_pc() <= pc_ - instructions * kInstrSize;
- }
-
- // Postpone the generation of the trampoline pool for the specified number of
- // instructions.
- void BlockTrampolinePoolFor(int instructions);
-
- // Check if there is less than kGap bytes available in the buffer.
- // If this is the case, we need to grow the buffer before emitting
- // an instruction or relocation information.
- inline bool overflow() const { return pc_ >= reloc_info_writer.pos() - kGap; }
-
- // Get the number of bytes available in the buffer.
- inline int available_space() const { return reloc_info_writer.pos() - pc_; }
-
- // Read/patch instructions.
- static Instr instr_at(byte* pc) { return *reinterpret_cast<Instr*>(pc); }
- static void instr_at_put(byte* pc, Instr instr) {
- *reinterpret_cast<Instr*>(pc) = instr;
- }
- Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); }
- void instr_at_put(int pos, Instr instr) {
- *reinterpret_cast<Instr*>(buffer_ + pos) = instr;
- }
-
- // Check if an instruction is a branch of some kind.
- static bool IsBranch(Instr instr);
-
- static bool IsNop(Instr instr, unsigned int type);
- static bool IsPop(Instr instr);
- static bool IsPush(Instr instr);
- static bool IsLwRegFpOffset(Instr instr);
- static bool IsSwRegFpOffset(Instr instr);
- static bool IsLwRegFpNegOffset(Instr instr);
- static bool IsSwRegFpNegOffset(Instr instr);
-
- static Register GetRt(Instr instr);
-
- static int32_t GetBranchOffset(Instr instr);
- static bool IsLw(Instr instr);
- static int16_t GetLwOffset(Instr instr);
- static Instr SetLwOffset(Instr instr, int16_t offset);
-
- static bool IsSw(Instr instr);
- static Instr SetSwOffset(Instr instr, int16_t offset);
- static bool IsAddImmediate(Instr instr);
- static Instr SetAddImmediateOffset(Instr instr, int16_t offset);
-
- void CheckTrampolinePool(bool force_emit = false);
-
- protected:
- bool emit_debug_code() const { return emit_debug_code_; }
-
- int32_t buffer_space() const { return reloc_info_writer.pos() - pc_; }
-
- // Decode branch instruction at pos and return branch target pos.
- int target_at(int32_t pos);
-
- // Patch branch instruction at pos to branch to given branch target pos.
- void target_at_put(int32_t pos, int32_t target_pos);
-
- // Say if we need to relocate with this mode.
- bool MustUseReg(RelocInfo::Mode rmode);
-
- // Record reloc info for current pc_.
- void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
-
- // Block the emission of the trampoline pool before pc_offset.
- void BlockTrampolinePoolBefore(int pc_offset) {
- if (no_trampoline_pool_before_ < pc_offset)
- no_trampoline_pool_before_ = pc_offset;
- }
-
- void StartBlockTrampolinePool() {
- trampoline_pool_blocked_nesting_++;
- }
- void EndBlockTrampolinePool() {
- trampoline_pool_blocked_nesting_--;
- }
-
- bool is_trampoline_pool_blocked() const {
- return trampoline_pool_blocked_nesting_ > 0;
- }
-
- private:
- // Code buffer:
- // The buffer into which code and relocation info are generated.
- byte* buffer_;
- int buffer_size_;
- // True if the assembler owns the buffer, false if buffer is external.
- bool own_buffer_;
-
- // Buffer size and constant pool distance are checked together at regular
- // intervals of kBufferCheckInterval emitted bytes.
- static const int kBufferCheckInterval = 1*KB/2;
-
- // Code generation.
- // The relocation writer's position is at least kGap bytes below the end of
- // the generated instructions. This is so that multi-instruction sequences do
- // not have to check for overflow. The same is true for writes of large
- // relocation info entries.
- static const int kGap = 32;
- byte* pc_; // The program counter - moves forward.
-
-
- // Repeated checking whether the trampoline pool should be emitted is rather
- // expensive. By default we only check again once a number of instructions
- // has been generated.
- static const int kCheckConstIntervalInst = 32;
- static const int kCheckConstInterval = kCheckConstIntervalInst * kInstrSize;
-
- int next_buffer_check_; // pc offset of next buffer check.
-
- // Emission of the trampoline pool may be blocked in some code sequences.
- int trampoline_pool_blocked_nesting_; // Block emission if this is not zero.
- int no_trampoline_pool_before_; // Block emission before this pc offset.
-
- // Keep track of the last emitted pool to guarantee a maximal distance.
- int last_trampoline_pool_end_; // pc offset of the end of the last pool.
-
- // Relocation information generation.
- // Each relocation is encoded as a variable size value.
- static const int kMaxRelocSize = RelocInfoWriter::kMaxSize;
- RelocInfoWriter reloc_info_writer;
-
- // The bound position, before this we cannot do instruction elimination.
- int last_bound_pos_;
-
- // Code emission.
- inline void CheckBuffer();
- void GrowBuffer();
- inline void emit(Instr x);
- inline void CheckTrampolinePoolQuick();
-
- // Instruction generation.
- // We have 3 different kind of encoding layout on MIPS.
- // However due to many different types of objects encoded in the same fields
- // we have quite a few aliases for each mode.
- // Using the same structure to refer to Register and FPURegister would spare a
- // few aliases, but mixing both does not look clean to me.
- // Anyway we could surely implement this differently.
-
- void GenInstrRegister(Opcode opcode,
- Register rs,
- Register rt,
- Register rd,
- uint16_t sa = 0,
- SecondaryField func = NULLSF);
-
- void GenInstrRegister(Opcode opcode,
- Register rs,
- Register rt,
- uint16_t msb,
- uint16_t lsb,
- SecondaryField func);
-
- void GenInstrRegister(Opcode opcode,
- SecondaryField fmt,
- FPURegister ft,
- FPURegister fs,
- FPURegister fd,
- SecondaryField func = NULLSF);
-
- void GenInstrRegister(Opcode opcode,
- SecondaryField fmt,
- Register rt,
- FPURegister fs,
- FPURegister fd,
- SecondaryField func = NULLSF);
-
- void GenInstrRegister(Opcode opcode,
- SecondaryField fmt,
- Register rt,
- FPUControlRegister fs,
- SecondaryField func = NULLSF);
-
-
- void GenInstrImmediate(Opcode opcode,
- Register rs,
- Register rt,
- int32_t j);
- void GenInstrImmediate(Opcode opcode,
- Register rs,
- SecondaryField SF,
- int32_t j);
- void GenInstrImmediate(Opcode opcode,
- Register r1,
- FPURegister r2,
- int32_t j);
-
-
- void GenInstrJump(Opcode opcode,
- uint32_t address);
-
- // Helpers.
- void LoadRegPlusOffsetToAt(const MemOperand& src);
-
- // Labels.
- void print(Label* L);
- void bind_to(Label* L, int pos);
- void link_to(Label* L, Label* appendix);
- void next(Label* L);
-
- // One trampoline consists of:
- // - space for trampoline slots,
- // - space for labels.
- //
- // Space for trampoline slots is equal to slot_count * 2 * kInstrSize.
- // Space for trampoline slots preceeds space for labels. Each label is of one
- // instruction size, so total amount for labels is equal to
- // label_count * kInstrSize.
- class Trampoline {
- public:
- Trampoline(int start, int slot_count, int label_count) {
- start_ = start;
- next_slot_ = start;
- free_slot_count_ = slot_count;
- next_label_ = start + slot_count * 2 * kInstrSize;
- free_label_count_ = label_count;
- end_ = next_label_ + (label_count - 1) * kInstrSize;
- }
- int start() {
- return start_;
- }
- int end() {
- return end_;
- }
- int take_slot() {
- int trampoline_slot = next_slot_;
- ASSERT(free_slot_count_ > 0);
- free_slot_count_--;
- next_slot_ += 2 * kInstrSize;
- return trampoline_slot;
- }
- int take_label() {
- int label_pos = next_label_;
- ASSERT(free_label_count_ > 0);
- free_label_count_--;
- next_label_ += kInstrSize;
- return label_pos;
- }
- private:
- int start_;
- int end_;
- int next_slot_;
- int free_slot_count_;
- int next_label_;
- int free_label_count_;
- };
-
- int32_t get_label_entry(int32_t pos, bool next_pool = true);
- int32_t get_trampoline_entry(int32_t pos, bool next_pool = true);
-
- static const int kSlotsPerTrampoline = 2304;
- static const int kLabelsPerTrampoline = 8;
- static const int kTrampolineInst =
- 2 * kSlotsPerTrampoline + kLabelsPerTrampoline;
- static const int kTrampolineSize = kTrampolineInst * kInstrSize;
- static const int kMaxBranchOffset = (1 << (18 - 1)) - 1;
- static const int kMaxDistBetweenPools =
- kMaxBranchOffset - 2 * kTrampolineSize;
-
- List<Trampoline> trampolines_;
-
- friend class RegExpMacroAssemblerMIPS;
- friend class RelocInfo;
- friend class CodePatcher;
- friend class BlockTrampolinePoolScope;
-
- PositionsRecorder positions_recorder_;
- bool allow_peephole_optimization_;
- bool emit_debug_code_;
- friend class PositionsRecorder;
- friend class EnsureSpace;
-};
-
-
-class EnsureSpace BASE_EMBEDDED {
- public:
- explicit EnsureSpace(Assembler* assembler) {
- assembler->CheckBuffer();
- }
-};
-
-} } // namespace v8::internal
-
-#endif // V8_ARM_ASSEMBLER_MIPS_H_
diff --git a/src/3rdparty/v8/src/mips/builtins-mips.cc b/src/3rdparty/v8/src/mips/builtins-mips.cc
deleted file mode 100644
index b4bab8e..0000000
--- a/src/3rdparty/v8/src/mips/builtins-mips.cc
+++ /dev/null
@@ -1,148 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_MIPS)
-
-#include "codegen-inl.h"
-#include "debug.h"
-#include "deoptimizer.h"
-#include "full-codegen.h"
-#include "runtime.h"
-
-namespace v8 {
-namespace internal {
-
-
-#define __ ACCESS_MASM(masm)
-
-
-void Builtins::Generate_Adaptor(MacroAssembler* masm,
- CFunctionId id,
- BuiltinExtraArguments extra_args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void Builtins::Generate_JSConstructStubCountdown(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-#undef __
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_MIPS
diff --git a/src/3rdparty/v8/src/mips/code-stubs-mips.cc b/src/3rdparty/v8/src/mips/code-stubs-mips.cc
deleted file mode 100644
index 6cc272c..0000000
--- a/src/3rdparty/v8/src/mips/code-stubs-mips.cc
+++ /dev/null
@@ -1,752 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_MIPS)
-
-#include "bootstrapper.h"
-#include "code-stubs.h"
-#include "codegen-inl.h"
-#include "regexp-macro-assembler.h"
-
-namespace v8 {
-namespace internal {
-
-
-#define __ ACCESS_MASM(masm)
-
-
-void ToNumberStub::Generate(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FastNewClosureStub::Generate(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FastNewContextStub::Generate(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-// Takes a Smi and converts to an IEEE 64 bit floating point value in two
-// registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and
-// 52 fraction bits (20 in the first word, 32 in the second). Zeros is a
-// scratch register. Destroys the source register. No GC occurs during this
-// stub so you don't have to set up the frame.
-class ConvertToDoubleStub : public CodeStub {
- public:
- ConvertToDoubleStub(Register result_reg_1,
- Register result_reg_2,
- Register source_reg,
- Register scratch_reg)
- : result1_(result_reg_1),
- result2_(result_reg_2),
- source_(source_reg),
- zeros_(scratch_reg) { }
-
- private:
- Register result1_;
- Register result2_;
- Register source_;
- Register zeros_;
-
- // Minor key encoding in 16 bits.
- class ModeBits: public BitField<OverwriteMode, 0, 2> {};
- class OpBits: public BitField<Token::Value, 2, 14> {};
-
- Major MajorKey() { return ConvertToDouble; }
- int MinorKey() {
- // Encode the parameters in a unique 16 bit value.
- return result1_.code() +
- (result2_.code() << 4) +
- (source_.code() << 8) +
- (zeros_.code() << 12);
- }
-
- void Generate(MacroAssembler* masm);
-
- const char* GetName() { return "ConvertToDoubleStub"; }
-
-#ifdef DEBUG
- void Print() { PrintF("ConvertToDoubleStub\n"); }
-#endif
-};
-
-
-void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-class FloatingPointHelper : public AllStatic {
- public:
-
- enum Destination {
- kFPURegisters,
- kCoreRegisters
- };
-
-
- // Loads smis from a0 and a1 (right and left in binary operations) into
- // floating point registers. Depending on the destination the values ends up
- // either f14 and f12 or in a2/a3 and a0/a1 respectively. If the destination
- // is floating point registers FPU must be supported. If core registers are
- // requested when FPU is supported f12 and f14 will be scratched.
- static void LoadSmis(MacroAssembler* masm,
- Destination destination,
- Register scratch1,
- Register scratch2);
-
- // Loads objects from a0 and a1 (right and left in binary operations) into
- // floating point registers. Depending on the destination the values ends up
- // either f14 and f12 or in a2/a3 and a0/a1 respectively. If the destination
- // is floating point registers FPU must be supported. If core registers are
- // requested when FPU is supported f12 and f14 will still be scratched. If
- // either a0 or a1 is not a number (not smi and not heap number object) the
- // not_number label is jumped to with a0 and a1 intact.
- static void LoadOperands(MacroAssembler* masm,
- FloatingPointHelper::Destination destination,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* not_number);
- // Loads the number from object into dst as a 32-bit integer if possible. If
- // the object is not a 32-bit integer control continues at the label
- // not_int32. If FPU is supported double_scratch is used but not scratch2.
- static void LoadNumberAsInteger(MacroAssembler* masm,
- Register object,
- Register dst,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- FPURegister double_scratch,
- Label* not_int32);
- private:
- static void LoadNumber(MacroAssembler* masm,
- FloatingPointHelper::Destination destination,
- Register object,
- FPURegister dst,
- Register dst1,
- Register dst2,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* not_number);
-};
-
-
-void FloatingPointHelper::LoadSmis(MacroAssembler* masm,
- FloatingPointHelper::Destination destination,
- Register scratch1,
- Register scratch2) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FloatingPointHelper::LoadOperands(
- MacroAssembler* masm,
- FloatingPointHelper::Destination destination,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* slow) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
- Destination destination,
- Register object,
- FPURegister dst,
- Register dst1,
- Register dst2,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* not_number) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FloatingPointHelper::LoadNumberAsInteger(MacroAssembler* masm,
- Register object,
- Register dst,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- FPURegister double_scratch,
- Label* not_int32) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-// See comment for class, this does NOT work for int32's that are in Smi range.
-void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void EmitNanCheck(MacroAssembler* masm, Condition cc) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
- Register object,
- Register result,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- bool object_is_smi,
- Label* not_found) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void NumberToStringStub::Generate(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-// On entry lhs_ (lhs) and rhs_ (rhs) are the things to be compared.
-// On exit, v0 is 0, positive, or negative (smi) to indicate the result
-// of the comparison.
-void CompareStub::Generate(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-// This stub does not handle the inlined cases (Smis, Booleans, undefined).
-// The stub returns zero for false, and a non-zero value for true.
-void ToBooleanStub::Generate(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-// We fall into this code if the operands were Smis, but the result was
-// not (eg. overflow). We branch into this code (to the not_smi label) if
-// the operands were not both Smi. The operands are in lhs and rhs.
-// To call the C-implemented binary fp operation routines we need to end up
-// with the double precision floating point operands in a0 and a1 (for the
-// value in a1) and a2 and a3 (for the value in a0).
-void GenericBinaryOpStub::HandleBinaryOpSlowCases(MacroAssembler* masm,
- Label* not_smi,
- Register lhs,
- Register rhs,
- const Builtins::JavaScript& builtin) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-// For bitwise ops where the inputs are not both Smis we here try to determine
-// whether both inputs are either Smis or at least heap numbers that can be
-// represented by a 32 bit signed value. We truncate towards zero as required
-// by the ES spec. If this is the case we do the bitwise op and see if the
-// result is a Smi. If so, great, otherwise we try to find a heap number to
-// write the answer into (either by allocating or by overwriting).
-// On entry the operands are in lhs (x) and rhs (y). (Result = x op y).
-// On exit the result is in v0.
-void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm,
- Register lhs,
- Register rhs) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
- GenericBinaryOpStub stub(key, type_info);
- return stub.GetCode();
-}
-
-
-Handle<Code> GetTypeRecordingBinaryOpStub(int key,
- TRBinaryOpIC::TypeInfo type_info,
- TRBinaryOpIC::TypeInfo result_type_info) {
- TypeRecordingBinaryOpStub stub(key, type_info, result_type_info);
- return stub.GetCode();
-}
-
-
-void TypeRecordingBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void TypeRecordingBinaryOpStub::GenerateTypeTransitionWithSavedArgs(
- MacroAssembler* masm) {
- UNIMPLEMENTED();
-}
-
-
-void TypeRecordingBinaryOpStub::Generate(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-const char* TypeRecordingBinaryOpStub::GetName() {
- UNIMPLEMENTED_MIPS();
- return name_;
-}
-
-
-
-void TypeRecordingBinaryOpStub::GenerateSmiSmiOperation(
- MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void TypeRecordingBinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
- bool smi_operands,
- Label* not_numbers,
- Label* gc_required) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-// Generate the smi code. If the operation on smis are successful this return is
-// generated. If the result is not a smi and heap number allocation is not
-// requested the code falls through. If number allocation is requested but a
-// heap number cannot be allocated the code jumps to the lable gc_required.
-void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm,
- Label* gc_required,
- SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void TypeRecordingBinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void TypeRecordingBinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void TypeRecordingBinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void TypeRecordingBinaryOpStub::GenerateHeapResultAllocation(
- MacroAssembler* masm,
- Register result,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void TypeRecordingBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-
-void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
- UNIMPLEMENTED_MIPS();
- return Runtime::kAbort;
-}
-
-
-void StackCheckStub::Generate(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-bool CEntryStub::NeedsImmovableCode() {
- return true;
-}
-
-
-void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
- UncatchableExceptionType type) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CEntryStub::GenerateCore(MacroAssembler* masm,
- Label* throw_normal_exception,
- Label* throw_termination_exception,
- Label* throw_out_of_memory_exception,
- bool do_gc,
- bool always_allocate) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CEntryStub::Generate(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-// Uses registers a0 to t0. Expected input is
-// object in a0 (or at sp+1*kPointerSize) and function in
-// a1 (or at sp), depending on whether or not
-// args_in_registers() is true.
-void InstanceofStub::Generate(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void RegExpExecStub::Generate(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CallFunctionStub::Generate(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-// Unfortunately you have to run without snapshots to see most of these
-// names in the profile since most compare stubs end up in the snapshot.
-const char* CompareStub::GetName() {
- UNIMPLEMENTED_MIPS();
- return name_;
-}
-
-
-int CompareStub::MinorKey() {
- UNIMPLEMENTED_MIPS();
- return 0;
-}
-
-
-// StringCharCodeAtGenerator
-
-void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void StringCharCodeAtGenerator::GenerateSlow(
- MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-// -------------------------------------------------------------------------
-// StringCharFromCodeGenerator
-
-void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void StringCharFromCodeGenerator::GenerateSlow(
- MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-// -------------------------------------------------------------------------
-// StringCharAtGenerator
-
-void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void StringCharAtGenerator::GenerateSlow(
- MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-class StringHelper : public AllStatic {
- public:
- // Generate code for copying characters using a simple loop. This should only
- // be used in places where the number of characters is small and the
- // additional setup and checking in GenerateCopyCharactersLong adds too much
- // overhead. Copying of overlapping regions is not supported.
- // Dest register ends at the position after the last character written.
- static void GenerateCopyCharacters(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- Register scratch,
- bool ascii);
-
- // Generate code for copying a large number of characters. This function
- // is allowed to spend extra time setting up conditions to make copying
- // faster. Copying of overlapping regions is not supported.
- // Dest register ends at the position after the last character written.
- static void GenerateCopyCharactersLong(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Register scratch5,
- int flags);
-
-
- // Probe the symbol table for a two character string. If the string is
- // not found by probing a jump to the label not_found is performed. This jump
- // does not guarantee that the string is not in the symbol table. If the
- // string is found the code falls through with the string in register r0.
- // Contents of both c1 and c2 registers are modified. At the exit c1 is
- // guaranteed to contain halfword with low and high bytes equal to
- // initial contents of c1 and c2 respectively.
- static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
- Register c1,
- Register c2,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Register scratch5,
- Label* not_found);
-
- // Generate string hash.
- static void GenerateHashInit(MacroAssembler* masm,
- Register hash,
- Register character);
-
- static void GenerateHashAddCharacter(MacroAssembler* masm,
- Register hash,
- Register character);
-
- static void GenerateHashGetHash(MacroAssembler* masm,
- Register hash);
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
-};
-
-
-void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- Register scratch,
- bool ascii) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-enum CopyCharactersFlags {
- COPY_ASCII = 1,
- DEST_ALWAYS_ALIGNED = 2
-};
-
-
-void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Register scratch5,
- int flags) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
- Register c1,
- Register c2,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Register scratch5,
- Label* not_found) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void StringHelper::GenerateHashInit(MacroAssembler* masm,
- Register hash,
- Register character) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
- Register hash,
- Register character) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
- Register hash) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void SubStringStub::Generate(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
- Register right,
- Register left,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void StringCompareStub::Generate(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void StringAddStub::Generate(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void GenerateFastPixelArrayLoad(MacroAssembler* masm,
- Register receiver,
- Register key,
- Register elements_map,
- Register elements,
- Register scratch1,
- Register scratch2,
- Register result,
- Label* not_pixel_array,
- Label* key_not_smi,
- Label* out_of_range) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-#undef __
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_MIPS
-
diff --git a/src/3rdparty/v8/src/mips/code-stubs-mips.h b/src/3rdparty/v8/src/mips/code-stubs-mips.h
deleted file mode 100644
index 675730a..0000000
--- a/src/3rdparty/v8/src/mips/code-stubs-mips.h
+++ /dev/null
@@ -1,511 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_MIPS_CODE_STUBS_ARM_H_
-#define V8_MIPS_CODE_STUBS_ARM_H_
-
-#include "ic-inl.h"
-
-
-namespace v8 {
-namespace internal {
-
-
-// Compute a transcendental math function natively, or call the
-// TranscendentalCache runtime function.
-class TranscendentalCacheStub: public CodeStub {
- public:
- explicit TranscendentalCacheStub(TranscendentalCache::Type type)
- : type_(type) {}
- void Generate(MacroAssembler* masm);
- private:
- TranscendentalCache::Type type_;
- Major MajorKey() { return TranscendentalCache; }
- int MinorKey() { return type_; }
- Runtime::FunctionId RuntimeFunction();
-};
-
-
-class ToBooleanStub: public CodeStub {
- public:
- explicit ToBooleanStub(Register tos) : tos_(tos) { }
-
- void Generate(MacroAssembler* masm);
-
- private:
- Register tos_;
- Major MajorKey() { return ToBoolean; }
- int MinorKey() { return tos_.code(); }
-};
-
-
-class GenericBinaryOpStub : public CodeStub {
- public:
- static const int kUnknownIntValue = -1;
-
- GenericBinaryOpStub(Token::Value op,
- OverwriteMode mode,
- Register lhs,
- Register rhs,
- int constant_rhs = kUnknownIntValue)
- : op_(op),
- mode_(mode),
- lhs_(lhs),
- rhs_(rhs),
- constant_rhs_(constant_rhs),
- specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op, constant_rhs)),
- runtime_operands_type_(BinaryOpIC::UNINIT_OR_SMI),
- name_(NULL) { }
-
- GenericBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info)
- : op_(OpBits::decode(key)),
- mode_(ModeBits::decode(key)),
- lhs_(LhsRegister(RegisterBits::decode(key))),
- rhs_(RhsRegister(RegisterBits::decode(key))),
- constant_rhs_(KnownBitsForMinorKey(KnownIntBits::decode(key))),
- specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op_, constant_rhs_)),
- runtime_operands_type_(type_info),
- name_(NULL) { }
-
- private:
- Token::Value op_;
- OverwriteMode mode_;
- Register lhs_;
- Register rhs_;
- int constant_rhs_;
- bool specialized_on_rhs_;
- BinaryOpIC::TypeInfo runtime_operands_type_;
- char* name_;
-
- static const int kMaxKnownRhs = 0x40000000;
- static const int kKnownRhsKeyBits = 6;
-
- // Minor key encoding in 16 bits.
- class ModeBits: public BitField<OverwriteMode, 0, 2> {};
- class OpBits: public BitField<Token::Value, 2, 6> {};
- class TypeInfoBits: public BitField<int, 8, 3> {};
- class RegisterBits: public BitField<bool, 11, 1> {};
- class KnownIntBits: public BitField<int, 12, kKnownRhsKeyBits> {};
-
- Major MajorKey() { return GenericBinaryOp; }
- int MinorKey() {
- ASSERT((lhs_.is(a0) && rhs_.is(a1)) ||
- (lhs_.is(a1) && rhs_.is(a0)));
- // Encode the parameters in a unique 16 bit value.
- return OpBits::encode(op_)
- | ModeBits::encode(mode_)
- | KnownIntBits::encode(MinorKeyForKnownInt())
- | TypeInfoBits::encode(runtime_operands_type_)
- | RegisterBits::encode(lhs_.is(a0));
- }
-
- void Generate(MacroAssembler* masm);
- void HandleNonSmiBitwiseOp(MacroAssembler* masm,
- Register lhs,
- Register rhs);
- void HandleBinaryOpSlowCases(MacroAssembler* masm,
- Label* not_smi,
- Register lhs,
- Register rhs,
- const Builtins::JavaScript& builtin);
- void GenerateTypeTransition(MacroAssembler* masm);
-
- static bool RhsIsOneWeWantToOptimizeFor(Token::Value op, int constant_rhs) {
- if (constant_rhs == kUnknownIntValue) return false;
- if (op == Token::DIV) return constant_rhs >= 2 && constant_rhs <= 3;
- if (op == Token::MOD) {
- if (constant_rhs <= 1) return false;
- if (constant_rhs <= 10) return true;
- if (constant_rhs <= kMaxKnownRhs && IsPowerOf2(constant_rhs)) return true;
- return false;
- }
- return false;
- }
-
- int MinorKeyForKnownInt() {
- if (!specialized_on_rhs_) return 0;
- if (constant_rhs_ <= 10) return constant_rhs_ + 1;
- ASSERT(IsPowerOf2(constant_rhs_));
- int key = 12;
- int d = constant_rhs_;
- while ((d & 1) == 0) {
- key++;
- d >>= 1;
- }
- ASSERT(key >= 0 && key < (1 << kKnownRhsKeyBits));
- return key;
- }
-
- int KnownBitsForMinorKey(int key) {
- if (!key) return 0;
- if (key <= 11) return key - 1;
- int d = 1;
- while (key != 12) {
- key--;
- d <<= 1;
- }
- return d;
- }
-
- Register LhsRegister(bool lhs_is_a0) {
- return lhs_is_a0 ? a0 : a1;
- }
-
- Register RhsRegister(bool lhs_is_a0) {
- return lhs_is_a0 ? a1 : a0;
- }
-
- bool HasSmiSmiFastPath() {
- return op_ != Token::DIV;
- }
-
- bool ShouldGenerateSmiCode() {
- return ((op_ != Token::DIV && op_ != Token::MOD) || specialized_on_rhs_) &&
- runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS &&
- runtime_operands_type_ != BinaryOpIC::STRINGS;
- }
-
- bool ShouldGenerateFPCode() {
- return runtime_operands_type_ != BinaryOpIC::STRINGS;
- }
-
- virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
-
- virtual InlineCacheState GetICState() {
- return BinaryOpIC::ToState(runtime_operands_type_);
- }
-
- const char* GetName();
-
- virtual void FinishCode(Code* code) {
- code->set_binary_op_type(runtime_operands_type_);
- }
-
-#ifdef DEBUG
- void Print() {
- if (!specialized_on_rhs_) {
- PrintF("GenericBinaryOpStub (%s)\n", Token::String(op_));
- } else {
- PrintF("GenericBinaryOpStub (%s by %d)\n",
- Token::String(op_),
- constant_rhs_);
- }
- }
-#endif
-};
-
-class TypeRecordingBinaryOpStub: public CodeStub {
- public:
- TypeRecordingBinaryOpStub(Token::Value op, OverwriteMode mode)
- : op_(op),
- mode_(mode),
- operands_type_(TRBinaryOpIC::UNINITIALIZED),
- result_type_(TRBinaryOpIC::UNINITIALIZED),
- name_(NULL) {
- UNIMPLEMENTED_MIPS();
- }
-
- TypeRecordingBinaryOpStub(
- int key,
- TRBinaryOpIC::TypeInfo operands_type,
- TRBinaryOpIC::TypeInfo result_type = TRBinaryOpIC::UNINITIALIZED)
- : op_(OpBits::decode(key)),
- mode_(ModeBits::decode(key)),
- use_fpu_(FPUBits::decode(key)),
- operands_type_(operands_type),
- result_type_(result_type),
- name_(NULL) { }
-
- private:
- enum SmiCodeGenerateHeapNumberResults {
- ALLOW_HEAPNUMBER_RESULTS,
- NO_HEAPNUMBER_RESULTS
- };
-
- Token::Value op_;
- OverwriteMode mode_;
- bool use_fpu_;
-
- // Operand type information determined at runtime.
- TRBinaryOpIC::TypeInfo operands_type_;
- TRBinaryOpIC::TypeInfo result_type_;
-
- char* name_;
-
- const char* GetName();
-
-#ifdef DEBUG
- void Print() {
- PrintF("TypeRecordingBinaryOpStub %d (op %s), "
- "(mode %d, runtime_type_info %s)\n",
- MinorKey(),
- Token::String(op_),
- static_cast<int>(mode_),
- TRBinaryOpIC::GetName(operands_type_));
- }
-#endif
-
- // Minor key encoding in 16 bits RRRTTTVOOOOOOOMM.
- class ModeBits: public BitField<OverwriteMode, 0, 2> {};
- class OpBits: public BitField<Token::Value, 2, 7> {};
- class FPUBits: public BitField<bool, 9, 1> {};
- class OperandTypeInfoBits: public BitField<TRBinaryOpIC::TypeInfo, 10, 3> {};
- class ResultTypeInfoBits: public BitField<TRBinaryOpIC::TypeInfo, 13, 3> {};
-
- Major MajorKey() { return TypeRecordingBinaryOp; }
- int MinorKey() {
- return OpBits::encode(op_)
- | ModeBits::encode(mode_)
- | FPUBits::encode(use_fpu_)
- | OperandTypeInfoBits::encode(operands_type_)
- | ResultTypeInfoBits::encode(result_type_);
- }
-
- void Generate(MacroAssembler* masm);
- void GenerateGeneric(MacroAssembler* masm);
- void GenerateSmiSmiOperation(MacroAssembler* masm);
- void GenerateFPOperation(MacroAssembler* masm,
- bool smi_operands,
- Label* not_numbers,
- Label* gc_required);
- void GenerateSmiCode(MacroAssembler* masm,
- Label* gc_required,
- SmiCodeGenerateHeapNumberResults heapnumber_results);
- void GenerateLoadArguments(MacroAssembler* masm);
- void GenerateReturn(MacroAssembler* masm);
- void GenerateUninitializedStub(MacroAssembler* masm);
- void GenerateSmiStub(MacroAssembler* masm);
- void GenerateInt32Stub(MacroAssembler* masm);
- void GenerateHeapNumberStub(MacroAssembler* masm);
- void GenerateStringStub(MacroAssembler* masm);
- void GenerateGenericStub(MacroAssembler* masm);
- void GenerateAddStrings(MacroAssembler* masm);
- void GenerateCallRuntime(MacroAssembler* masm);
-
- void GenerateHeapResultAllocation(MacroAssembler* masm,
- Register result,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
- void GenerateRegisterArgsPush(MacroAssembler* masm);
- void GenerateTypeTransition(MacroAssembler* masm);
- void GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm);
-
- virtual int GetCodeKind() { return Code::TYPE_RECORDING_BINARY_OP_IC; }
-
- virtual InlineCacheState GetICState() {
- return TRBinaryOpIC::ToState(operands_type_);
- }
-
- virtual void FinishCode(Code* code) {
- code->set_type_recording_binary_op_type(operands_type_);
- code->set_type_recording_binary_op_result_type(result_type_);
- }
-
- friend class CodeGenerator;
-};
-
-
-// Flag that indicates how to generate code for the stub StringAddStub.
-enum StringAddFlags {
- NO_STRING_ADD_FLAGS = 0,
- NO_STRING_CHECK_IN_STUB = 1 << 0 // Omit string check in stub.
-};
-
-
-class StringAddStub: public CodeStub {
- public:
- explicit StringAddStub(StringAddFlags flags) {
- string_check_ = ((flags & NO_STRING_CHECK_IN_STUB) == 0);
- }
-
- private:
- Major MajorKey() { return StringAdd; }
- int MinorKey() { return string_check_ ? 0 : 1; }
-
- void Generate(MacroAssembler* masm);
-
- // Should the stub check whether arguments are strings?
- bool string_check_;
-};
-
-
-class SubStringStub: public CodeStub {
- public:
- SubStringStub() {}
-
- private:
- Major MajorKey() { return SubString; }
- int MinorKey() { return 0; }
-
- void Generate(MacroAssembler* masm);
-};
-
-
-class StringCompareStub: public CodeStub {
- public:
- StringCompareStub() { }
-
- // Compare two flat ASCII strings and returns result in v0.
- // Does not use the stack.
- static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4);
-
- private:
- Major MajorKey() { return StringCompare; }
- int MinorKey() { return 0; }
-
- void Generate(MacroAssembler* masm);
-};
-
-
-// This stub can convert a signed int32 to a heap number (double). It does
-// not work for int32s that are in Smi range! No GC occurs during this stub
-// so you don't have to set up the frame.
-class WriteInt32ToHeapNumberStub : public CodeStub {
- public:
- WriteInt32ToHeapNumberStub(Register the_int,
- Register the_heap_number,
- Register scratch,
- Register scratch2)
- : the_int_(the_int),
- the_heap_number_(the_heap_number),
- scratch_(scratch),
- sign_(scratch2) { }
-
- private:
- Register the_int_;
- Register the_heap_number_;
- Register scratch_;
- Register sign_;
-
- // Minor key encoding in 16 bits.
- class IntRegisterBits: public BitField<int, 0, 4> {};
- class HeapNumberRegisterBits: public BitField<int, 4, 4> {};
- class ScratchRegisterBits: public BitField<int, 8, 4> {};
-
- Major MajorKey() { return WriteInt32ToHeapNumber; }
- int MinorKey() {
- // Encode the parameters in a unique 16 bit value.
- return IntRegisterBits::encode(the_int_.code())
- | HeapNumberRegisterBits::encode(the_heap_number_.code())
- | ScratchRegisterBits::encode(scratch_.code());
- }
-
- void Generate(MacroAssembler* masm);
-
- const char* GetName() { return "WriteInt32ToHeapNumberStub"; }
-
-#ifdef DEBUG
- void Print() { PrintF("WriteInt32ToHeapNumberStub\n"); }
-#endif
-};
-
-
-class NumberToStringStub: public CodeStub {
- public:
- NumberToStringStub() { }
-
- // Generate code to do a lookup in the number string cache. If the number in
- // the register object is found in the cache the generated code falls through
- // with the result in the result register. The object and the result register
- // can be the same. If the number is not found in the cache the code jumps to
- // the label not_found with only the content of register object unchanged.
- static void GenerateLookupNumberStringCache(MacroAssembler* masm,
- Register object,
- Register result,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- bool object_is_smi,
- Label* not_found);
-
- private:
- Major MajorKey() { return NumberToString; }
- int MinorKey() { return 0; }
-
- void Generate(MacroAssembler* masm);
-
- const char* GetName() { return "NumberToStringStub"; }
-
-#ifdef DEBUG
- void Print() {
- PrintF("NumberToStringStub\n");
- }
-#endif
-};
-
-
-// Enter C code from generated RegExp code in a way that allows
-// the C code to fix the return address in case of a GC.
-// Currently only needed on ARM and MIPS.
-class RegExpCEntryStub: public CodeStub {
- public:
- RegExpCEntryStub() {}
- virtual ~RegExpCEntryStub() {}
- void Generate(MacroAssembler* masm);
-
- private:
- Major MajorKey() { return RegExpCEntry; }
- int MinorKey() { return 0; }
-
- bool NeedsImmovableCode() { return true; }
-
- const char* GetName() { return "RegExpCEntryStub"; }
-};
-
-
-// Generate code the to load an element from a pixel array. The receiver is
-// assumed to not be a smi and to have elements, the caller must guarantee this
-// precondition. If the receiver does not have elements that are pixel arrays,
-// the generated code jumps to not_pixel_array. If key is not a smi, then the
-// generated code branches to key_not_smi. Callers can specify NULL for
-// key_not_smi to signal that a smi check has already been performed on key so
-// that the smi check is not generated . If key is not a valid index within the
-// bounds of the pixel array, the generated code jumps to out_of_range.
-void GenerateFastPixelArrayLoad(MacroAssembler* masm,
- Register receiver,
- Register key,
- Register elements_map,
- Register elements,
- Register scratch1,
- Register scratch2,
- Register result,
- Label* not_pixel_array,
- Label* key_not_smi,
- Label* out_of_range);
-
-
-} } // namespace v8::internal
-
-#endif // V8_MIPS_CODE_STUBS_ARM_H_
diff --git a/src/3rdparty/v8/src/mips/codegen-mips-inl.h b/src/3rdparty/v8/src/mips/codegen-mips-inl.h
deleted file mode 100644
index be9ae9e..0000000
--- a/src/3rdparty/v8/src/mips/codegen-mips-inl.h
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#ifndef V8_MIPS_CODEGEN_MIPS_INL_H_
-#define V8_MIPS_CODEGEN_MIPS_INL_H_
-
-#include "virtual-frame-mips.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm_)
-
-// Platform-specific inline functions.
-
-void DeferredCode::Jump() {
- __ b(&entry_label_);
- __ nop();
-}
-
-
-// Note: this has been hacked for submisson. Mips branches require two
-// additional operands: Register src1, const Operand& src2.
-void DeferredCode::Branch(Condition cond) {
- __ Branch(&entry_label_, cond, zero_reg, Operand(0));
-}
-
-
-void Reference::GetValueAndSpill() {
- GetValue();
-}
-
-
-#undef __
-
-} } // namespace v8::internal
-
-#endif // V8_MIPS_CODEGEN_MIPS_INL_H_
-
diff --git a/src/3rdparty/v8/src/mips/codegen-mips.cc b/src/3rdparty/v8/src/mips/codegen-mips.cc
deleted file mode 100644
index c1149df..0000000
--- a/src/3rdparty/v8/src/mips/codegen-mips.cc
+++ /dev/null
@@ -1,1213 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_MIPS)
-
-#include "bootstrapper.h"
-#include "code-stubs.h"
-#include "codegen-inl.h"
-#include "compiler.h"
-#include "debug.h"
-#include "ic-inl.h"
-#include "jsregexp.h"
-#include "jump-target-inl.h"
-#include "parser.h"
-#include "regexp-macro-assembler.h"
-#include "regexp-stack.h"
-#include "register-allocator-inl.h"
-#include "runtime.h"
-#include "scopes.h"
-#include "stub-cache.h"
-#include "virtual-frame-inl.h"
-#include "virtual-frame-mips-inl.h"
-
-namespace v8 {
-namespace internal {
-
-
-#define __ ACCESS_MASM(masm_)
-
-// -------------------------------------------------------------------------
-// Platform-specific DeferredCode functions.
-
-void DeferredCode::SaveRegisters() {
- // On MIPS you either have a completely spilled frame or you
- // handle it yourself, but at the moment there's no automation
- // of registers and deferred code.
-}
-
-
-void DeferredCode::RestoreRegisters() {
-}
-
-
-// -------------------------------------------------------------------------
-// Platform-specific RuntimeCallHelper functions.
-
-void VirtualFrameRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
- frame_state_->frame()->AssertIsSpilled();
-}
-
-
-void VirtualFrameRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
-}
-
-
-void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
- masm->EnterInternalFrame();
-}
-
-
-void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
- masm->LeaveInternalFrame();
-}
-
-
-// -----------------------------------------------------------------------------
-// CodeGenState implementation.
-
-CodeGenState::CodeGenState(CodeGenerator* owner)
- : owner_(owner),
- previous_(owner->state()) {
- owner->set_state(this);
-}
-
-
-ConditionCodeGenState::ConditionCodeGenState(CodeGenerator* owner,
- JumpTarget* true_target,
- JumpTarget* false_target)
- : CodeGenState(owner),
- true_target_(true_target),
- false_target_(false_target) {
- owner->set_state(this);
-}
-
-
-TypeInfoCodeGenState::TypeInfoCodeGenState(CodeGenerator* owner,
- Slot* slot,
- TypeInfo type_info)
- : CodeGenState(owner),
- slot_(slot) {
- owner->set_state(this);
- old_type_info_ = owner->set_type_info(slot, type_info);
-}
-
-
-CodeGenState::~CodeGenState() {
- ASSERT(owner_->state() == this);
- owner_->set_state(previous_);
-}
-
-
-TypeInfoCodeGenState::~TypeInfoCodeGenState() {
- owner()->set_type_info(slot_, old_type_info_);
-}
-
-
-// -----------------------------------------------------------------------------
-// CodeGenerator implementation.
-
-CodeGenerator::CodeGenerator(MacroAssembler* masm)
- : deferred_(8),
- masm_(masm),
- info_(NULL),
- frame_(NULL),
- allocator_(NULL),
- cc_reg_(cc_always),
- state_(NULL),
- loop_nesting_(0),
- type_info_(NULL),
- function_return_(JumpTarget::BIDIRECTIONAL),
- function_return_is_shadowed_(false) {
-}
-
-
-// Calling conventions:
-// fp: caller's frame pointer
-// sp: stack pointer
-// a1: called JS function
-// cp: callee's context
-
-void CodeGenerator::Generate(CompilationInfo* info) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-int CodeGenerator::NumberOfSlot(Slot* slot) {
- UNIMPLEMENTED_MIPS();
- return 0;
-}
-
-
-MemOperand CodeGenerator::SlotOperand(Slot* slot, Register tmp) {
- UNIMPLEMENTED_MIPS();
- return MemOperand(zero_reg, 0);
-}
-
-
-MemOperand CodeGenerator::ContextSlotOperandCheckExtensions(
- Slot* slot,
- Register tmp,
- Register tmp2,
- JumpTarget* slow) {
- UNIMPLEMENTED_MIPS();
- return MemOperand(zero_reg, 0);
-}
-
-
-void CodeGenerator::LoadCondition(Expression* x,
- JumpTarget* true_target,
- JumpTarget* false_target,
- bool force_cc) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::Load(Expression* x) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::LoadGlobal() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::LoadGlobalReceiver(Register scratch) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-ArgumentsAllocationMode CodeGenerator::ArgumentsMode() {
- UNIMPLEMENTED_MIPS();
- return EAGER_ARGUMENTS_ALLOCATION;
-}
-
-
-void CodeGenerator::StoreArgumentsObject(bool initial) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::LoadTypeofExpression(Expression* x) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-Reference::Reference(CodeGenerator* cgen,
- Expression* expression,
- bool persist_after_get)
- : cgen_(cgen),
- expression_(expression),
- type_(ILLEGAL),
- persist_after_get_(persist_after_get) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-Reference::~Reference() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::LoadReference(Reference* ref) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::UnloadReference(Reference* ref) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-// ECMA-262, section 9.2, page 30: ToBoolean(). Convert the given
-// register to a boolean in the condition code register. The code
-// may jump to 'false_target' in case the register converts to 'false'.
-void CodeGenerator::ToBoolean(JumpTarget* true_target,
- JumpTarget* false_target) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenericBinaryOperation(Token::Value op,
- OverwriteMode overwrite_mode,
- GenerateInlineSmi inline_smi,
- int constant_rhs) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-class DeferredInlineSmiOperation: public DeferredCode {
- public:
- DeferredInlineSmiOperation(Token::Value op,
- int value,
- bool reversed,
- OverwriteMode overwrite_mode,
- Register tos)
- : op_(op),
- value_(value),
- reversed_(reversed),
- overwrite_mode_(overwrite_mode),
- tos_register_(tos) {
- set_comment("[ DeferredInlinedSmiOperation");
- }
-
- virtual void Generate();
- // This stub makes explicit calls to SaveRegisters(), RestoreRegisters() and
- // Exit(). Currently on MIPS SaveRegisters() and RestoreRegisters() are empty
- // methods, it is the responsibility of the deferred code to save and restore
- // registers.
- virtual bool AutoSaveAndRestore() { return false; }
-
- void JumpToNonSmiInput(Condition cond, Register cmp1, const Operand& cmp2);
- void JumpToAnswerOutOfRange(Condition cond,
- Register cmp1,
- const Operand& cmp2);
-
- private:
- void GenerateNonSmiInput();
- void GenerateAnswerOutOfRange();
- void WriteNonSmiAnswer(Register answer,
- Register heap_number,
- Register scratch);
-
- Token::Value op_;
- int value_;
- bool reversed_;
- OverwriteMode overwrite_mode_;
- Register tos_register_;
- Label non_smi_input_;
- Label answer_out_of_range_;
-};
-
-
-// For bit operations we try harder and handle the case where the input is not
-// a Smi but a 32bits integer without calling the generic stub.
-void DeferredInlineSmiOperation::JumpToNonSmiInput(Condition cond,
- Register cmp1,
- const Operand& cmp2) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-// For bit operations the result is always 32bits so we handle the case where
-// the result does not fit in a Smi without calling the generic stub.
-void DeferredInlineSmiOperation::JumpToAnswerOutOfRange(Condition cond,
- Register cmp1,
- const Operand& cmp2) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-// On entry the non-constant side of the binary operation is in tos_register_
-// and the constant smi side is nowhere. The tos_register_ is not used by the
-// virtual frame. On exit the answer is in the tos_register_ and the virtual
-// frame is unchanged.
-void DeferredInlineSmiOperation::Generate() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-// Convert and write the integer answer into heap_number.
-void DeferredInlineSmiOperation::WriteNonSmiAnswer(Register answer,
- Register heap_number,
- Register scratch) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void DeferredInlineSmiOperation::GenerateNonSmiInput() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void DeferredInlineSmiOperation::GenerateAnswerOutOfRange() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::SmiOperation(Token::Value op,
- Handle<Object> value,
- bool reversed,
- OverwriteMode mode) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-// On MIPS we load registers condReg1 and condReg2 with the values which should
-// be compared. With the CodeGenerator::cc_reg_ condition, functions will be
-// able to evaluate correctly the condition. (eg CodeGenerator::Branch)
-void CodeGenerator::Comparison(Condition cc,
- Expression* left,
- Expression* right,
- bool strict) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
- CallFunctionFlags flags,
- int position) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::CallApplyLazy(Expression* applicand,
- Expression* receiver,
- VariableProxy* arguments,
- int position) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::Branch(bool if_true, JumpTarget* target) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::CheckStack() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitBlock(Block* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitDeclaration(Declaration* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitExpressionStatement(ExpressionStatement* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitEmptyStatement(EmptyStatement* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitIfStatement(IfStatement* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitContinueStatement(ContinueStatement* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitBreakStatement(BreakStatement* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateReturnSequence() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitWithExitStatement(WithExitStatement* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitWhileStatement(WhileStatement* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitForStatement(ForStatement* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitForInStatement(ForInStatement* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::InstantiateFunction(
- Handle<SharedFunctionInfo> function_info,
- bool pretenure) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitSharedFunctionInfoLiteral(
- SharedFunctionInfoLiteral* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitConditional(Conditional* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot,
- TypeofState state) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::LoadFromGlobalSlotCheckExtensions(Slot* slot,
- TypeofState typeof_state,
- JumpTarget* slow) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::EmitDynamicLoadFromSlotFastCase(Slot* slot,
- TypeofState typeof_state,
- JumpTarget* slow,
- JumpTarget* done) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitSlot(Slot* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitVariableProxy(VariableProxy* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitLiteral(Literal* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::EmitSlotAssignment(Assignment* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::EmitNamedPropertyAssignment(Assignment* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::EmitKeyedPropertyAssignment(Assignment* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitAssignment(Assignment* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitThrow(Throw* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitProperty(Property* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitCall(Call* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitCallNew(CallNew* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateLog(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateMathSqrt(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-class DeferredStringCharCodeAt : public DeferredCode {
- public:
- DeferredStringCharCodeAt(Register object,
- Register index,
- Register scratch,
- Register result)
- : result_(result),
- char_code_at_generator_(object,
- index,
- scratch,
- result,
- &need_conversion_,
- &need_conversion_,
- &index_out_of_range_,
- STRING_INDEX_IS_NUMBER) {}
-
- StringCharCodeAtGenerator* fast_case_generator() {
- return &char_code_at_generator_;
- }
-
- virtual void Generate() {
- UNIMPLEMENTED_MIPS();
- }
-
- private:
- Register result_;
-
- Label need_conversion_;
- Label index_out_of_range_;
-
- StringCharCodeAtGenerator char_code_at_generator_;
-};
-
-
-void CodeGenerator::GenerateStringCharCodeAt(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-class DeferredStringCharFromCode : public DeferredCode {
- public:
- DeferredStringCharFromCode(Register code,
- Register result)
- : char_from_code_generator_(code, result) {}
-
- StringCharFromCodeGenerator* fast_case_generator() {
- return &char_from_code_generator_;
- }
-
- virtual void Generate() {
- VirtualFrameRuntimeCallHelper call_helper(frame_state());
- char_from_code_generator_.GenerateSlow(masm(), call_helper);
- }
-
- private:
- StringCharFromCodeGenerator char_from_code_generator_;
-};
-
-
-void CodeGenerator::GenerateStringCharFromCode(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-class DeferredStringCharAt : public DeferredCode {
- public:
- DeferredStringCharAt(Register object,
- Register index,
- Register scratch1,
- Register scratch2,
- Register result)
- : result_(result),
- char_at_generator_(object,
- index,
- scratch1,
- scratch2,
- result,
- &need_conversion_,
- &need_conversion_,
- &index_out_of_range_,
- STRING_INDEX_IS_NUMBER) {}
-
- StringCharAtGenerator* fast_case_generator() {
- return &char_at_generator_;
- }
-
- virtual void Generate() {
- UNIMPLEMENTED_MIPS();
-}
-
- private:
- Register result_;
-
- Label need_conversion_;
- Label index_out_of_range_;
-
- StringCharAtGenerator char_at_generator_;
-};
-
-
-void CodeGenerator::GenerateStringCharAt(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateIsRegExp(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateIsSpecObject(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-class DeferredIsStringWrapperSafeForDefaultValueOf : public DeferredCode {
- public:
- DeferredIsStringWrapperSafeForDefaultValueOf(Register object,
- Register map_result,
- Register scratch1,
- Register scratch2)
- : object_(object),
- map_result_(map_result),
- scratch1_(scratch1),
- scratch2_(scratch2) { }
-
- virtual void Generate() {
- UNIMPLEMENTED_MIPS();
- }
-
- private:
- Register object_;
- Register map_result_;
- Register scratch1_;
- Register scratch2_;
-};
-
-
-void CodeGenerator::GenerateIsStringWrapperSafeForDefaultValueOf(
- ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateIsUndetectableObject(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateArguments(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateRandomHeapNumber(
- ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateStringAdd(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateSubString(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateStringCompare(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateRegExpExec(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateRegExpConstructResult(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-class DeferredSearchCache: public DeferredCode {
- public:
- DeferredSearchCache(Register dst, Register cache, Register key)
- : dst_(dst), cache_(cache), key_(key) {
- set_comment("[ DeferredSearchCache");
- }
-
- virtual void Generate();
-
- private:
- Register dst_, cache_, key_;
-};
-
-
-void DeferredSearchCache::Generate() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateGetFromCache(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateNumberToString(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-class DeferredSwapElements: public DeferredCode {
- public:
- DeferredSwapElements(Register object, Register index1, Register index2)
- : object_(object), index1_(index1), index2_(index2) {
- set_comment("[ DeferredSwapElements");
- }
-
- virtual void Generate();
-
- private:
- Register object_, index1_, index2_;
-};
-
-
-void DeferredSwapElements::Generate() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateSwapElements(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateCallFunction(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateMathLog(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateIsRegExpEquivalent(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateHasCachedArrayIndex(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateGetCachedArrayIndex(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateFastAsciiArrayJoin(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-class DeferredCountOperation: public DeferredCode {
- public:
- DeferredCountOperation(Register value,
- bool is_increment,
- bool is_postfix,
- int target_size)
- : value_(value),
- is_increment_(is_increment),
- is_postfix_(is_postfix),
- target_size_(target_size) {}
-
- virtual void Generate() {
- UNIMPLEMENTED_MIPS();
- }
-
- private:
- Register value_;
- bool is_increment_;
- bool is_postfix_;
- int target_size_;
-};
-
-
-void CodeGenerator::VisitCountOperation(CountOperation* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateLogicalBooleanOperation(BinaryOperation* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitThisFunction(ThisFunction* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitCompareToNull(CompareToNull* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-class DeferredReferenceGetNamedValue: public DeferredCode {
- public:
- explicit DeferredReferenceGetNamedValue(Register receiver,
- Handle<String> name,
- bool is_contextual)
- : receiver_(receiver),
- name_(name),
- is_contextual_(is_contextual),
- is_dont_delete_(false) {
- set_comment(is_contextual
- ? "[ DeferredReferenceGetNamedValue (contextual)"
- : "[ DeferredReferenceGetNamedValue");
- }
-
- virtual void Generate();
-
- void set_is_dont_delete(bool value) {
- ASSERT(is_contextual_);
- is_dont_delete_ = value;
- }
-
- private:
- Register receiver_;
- Handle<String> name_;
- bool is_contextual_;
- bool is_dont_delete_;
-};
-
-
-
-void DeferredReferenceGetNamedValue::Generate() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-class DeferredReferenceGetKeyedValue: public DeferredCode {
- public:
- DeferredReferenceGetKeyedValue(Register key, Register receiver)
- : key_(key), receiver_(receiver) {
- set_comment("[ DeferredReferenceGetKeyedValue");
- }
-
- virtual void Generate();
-
- private:
- Register key_;
- Register receiver_;
-};
-
-
-void DeferredReferenceGetKeyedValue::Generate() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-class DeferredReferenceSetKeyedValue: public DeferredCode {
- public:
- DeferredReferenceSetKeyedValue(Register value,
- Register key,
- Register receiver)
- : value_(value), key_(key), receiver_(receiver) {
- set_comment("[ DeferredReferenceSetKeyedValue");
- }
-
- virtual void Generate();
-
- private:
- Register value_;
- Register key_;
- Register receiver_;
-};
-
-
-void DeferredReferenceSetKeyedValue::Generate() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-class DeferredReferenceSetNamedValue: public DeferredCode {
- public:
- DeferredReferenceSetNamedValue(Register value,
- Register receiver,
- Handle<String> name)
- : value_(value), receiver_(receiver), name_(name) {
- set_comment("[ DeferredReferenceSetNamedValue");
- }
-
- virtual void Generate();
-
- private:
- Register value_;
- Register receiver_;
- Handle<String> name_;
-};
-
-
-void DeferredReferenceSetNamedValue::Generate() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::EmitNamedStore(Handle<String> name, bool is_contextual) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::EmitKeyedLoad() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::EmitKeyedStore(StaticType* key_type,
- WriteBarrierCharacter wb_info) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-#ifdef DEBUG
-bool CodeGenerator::HasValidEntryRegisters() {
- UNIMPLEMENTED_MIPS();
- return false;
-}
-#endif
-
-
-#undef __
-#define __ ACCESS_MASM(masm)
-
-// -----------------------------------------------------------------------------
-// Reference support.
-
-
-Handle<String> Reference::GetName() {
- UNIMPLEMENTED_MIPS();
- return Handle<String>();
-}
-
-
-void Reference::DupIfPersist() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void Reference::GetValue() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void Reference::SetValue(InitState init_state, WriteBarrierCharacter wb_info) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-const char* GenericBinaryOpStub::GetName() {
- UNIMPLEMENTED_MIPS();
- return name_;
-}
-
-
-#undef __
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_MIPS
diff --git a/src/3rdparty/v8/src/mips/codegen-mips.h b/src/3rdparty/v8/src/mips/codegen-mips.h
deleted file mode 100644
index 0a2cd45..0000000
--- a/src/3rdparty/v8/src/mips/codegen-mips.h
+++ /dev/null
@@ -1,633 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#ifndef V8_MIPS_CODEGEN_MIPS_H_
-#define V8_MIPS_CODEGEN_MIPS_H_
-
-
-#include "ast.h"
-#include "code-stubs-mips.h"
-#include "ic-inl.h"
-
-namespace v8 {
-namespace internal {
-
-#if(defined(__mips_hard_float) && __mips_hard_float != 0)
-// Use floating-point coprocessor instructions. This flag is raised when
-// -mhard-float is passed to the compiler.
-static const bool IsMipsSoftFloatABI = false;
-#elif(defined(__mips_soft_float) && __mips_soft_float != 0)
-// Not using floating-point coprocessor instructions. This flag is raised when
-// -msoft-float is passed to the compiler.
-static const bool IsMipsSoftFloatABI = true;
-#else
-static const bool IsMipsSoftFloatABI = true;
-#endif
-
-// Forward declarations
-class CompilationInfo;
-class DeferredCode;
-class JumpTarget;
-class RegisterAllocator;
-class RegisterFile;
-
-enum InitState { CONST_INIT, NOT_CONST_INIT };
-enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
-enum GenerateInlineSmi { DONT_GENERATE_INLINE_SMI, GENERATE_INLINE_SMI };
-enum WriteBarrierCharacter { UNLIKELY_SMI, LIKELY_SMI, NEVER_NEWSPACE };
-
-
-// -----------------------------------------------------------------------------
-// Reference support
-
-// A reference is a C++ stack-allocated object that keeps an ECMA
-// reference on the execution stack while in scope. For variables
-// the reference is empty, indicating that it isn't necessary to
-// store state on the stack for keeping track of references to those.
-// For properties, we keep either one (named) or two (indexed) values
-// on the execution stack to represent the reference.
-class Reference BASE_EMBEDDED {
- public:
- // The values of the types is important, see size().
- enum Type { UNLOADED = -2, ILLEGAL = -1, SLOT = 0, NAMED = 1, KEYED = 2 };
- Reference(CodeGenerator* cgen,
- Expression* expression,
- bool persist_after_get = false);
- ~Reference();
-
- Expression* expression() const { return expression_; }
- Type type() const { return type_; }
- void set_type(Type value) {
- ASSERT_EQ(ILLEGAL, type_);
- type_ = value;
- }
-
- void set_unloaded() {
- ASSERT_NE(ILLEGAL, type_);
- ASSERT_NE(UNLOADED, type_);
- type_ = UNLOADED;
- }
- // The size the reference takes up on the stack.
- int size() const {
- return (type_ < SLOT) ? 0 : type_;
- }
-
- bool is_illegal() const { return type_ == ILLEGAL; }
- bool is_slot() const { return type_ == SLOT; }
- bool is_property() const { return type_ == NAMED || type_ == KEYED; }
- bool is_unloaded() const { return type_ == UNLOADED; }
-
- // Return the name. Only valid for named property references.
- Handle<String> GetName();
-
- // Generate code to push the value of the reference on top of the
- // expression stack. The reference is expected to be already on top of
- // the expression stack, and it is consumed by the call unless the
- // reference is for a compound assignment.
- // If the reference is not consumed, it is left in place under its value.
- void GetValue();
-
- // Generate code to pop a reference, push the value of the reference,
- // and then spill the stack frame.
- inline void GetValueAndSpill();
-
- // Generate code to store the value on top of the expression stack in the
- // reference. The reference is expected to be immediately below the value
- // on the expression stack. The value is stored in the location specified
- // by the reference, and is left on top of the stack, after the reference
- // is popped from beneath it (unloaded).
- void SetValue(InitState init_state, WriteBarrierCharacter wb);
-
- // This is in preparation for something that uses the reference on the stack.
- // If we need this reference afterwards get then dup it now. Otherwise mark
- // it as used.
- inline void DupIfPersist();
-
- private:
- CodeGenerator* cgen_;
- Expression* expression_;
- Type type_;
- // Keep the reference on the stack after get, so it can be used by set later.
- bool persist_after_get_;
-};
-
-
-// -----------------------------------------------------------------------------
-// Code generation state
-
-// The state is passed down the AST by the code generator (and back up, in
-// the form of the state of the label pair). It is threaded through the
-// call stack. Constructing a state implicitly pushes it on the owning code
-// generator's stack of states, and destroying one implicitly pops it.
-
-class CodeGenState BASE_EMBEDDED {
- public:
- // Create an initial code generator state. Destroying the initial state
- // leaves the code generator with a NULL state.
- explicit CodeGenState(CodeGenerator* owner);
-
-
-
- // Destroy a code generator state and restore the owning code generator's
- // previous state.
- virtual ~CodeGenState();
-
- virtual JumpTarget* true_target() const { return NULL; }
- virtual JumpTarget* false_target() const { return NULL; }
-
- protected:
- inline CodeGenerator* owner() { return owner_; }
- inline CodeGenState* previous() const { return previous_; }
-
- private:
- // The owning code generator.
- CodeGenerator* owner_;
-
-
-
- // The previous state of the owning code generator, restored when
- // this state is destroyed.
- CodeGenState* previous_;
-};
-
-
-class ConditionCodeGenState : public CodeGenState {
- public:
- // Create a code generator state based on a code generator's current
- // state. The new state has its own pair of branch labels.
- ConditionCodeGenState(CodeGenerator* owner,
- JumpTarget* true_target,
- JumpTarget* false_target);
-
- virtual JumpTarget* true_target() const { return true_target_; }
- virtual JumpTarget* false_target() const { return false_target_; }
-
- private:
- JumpTarget* true_target_;
- JumpTarget* false_target_;
-};
-
-
-class TypeInfoCodeGenState : public CodeGenState {
- public:
- TypeInfoCodeGenState(CodeGenerator* owner,
- Slot* slot_number,
- TypeInfo info);
- virtual ~TypeInfoCodeGenState();
-
- virtual JumpTarget* true_target() const { return previous()->true_target(); }
- virtual JumpTarget* false_target() const {
- return previous()->false_target();
- }
-
- private:
- Slot* slot_;
- TypeInfo old_type_info_;
-};
-
-
-// -------------------------------------------------------------------------
-// Arguments allocation mode
-
-enum ArgumentsAllocationMode {
- NO_ARGUMENTS_ALLOCATION,
- EAGER_ARGUMENTS_ALLOCATION,
- LAZY_ARGUMENTS_ALLOCATION
-};
-
-
-// -----------------------------------------------------------------------------
-// CodeGenerator
-
-class CodeGenerator: public AstVisitor {
- public:
- // Compilation mode. Either the compiler is used as the primary
- // compiler and needs to setup everything or the compiler is used as
- // the secondary compiler for split compilation and has to handle
- // bailouts.
- enum Mode {
- PRIMARY,
- SECONDARY
- };
-
- static bool MakeCode(CompilationInfo* info);
-
- // Printing of AST, etc. as requested by flags.
- static void MakeCodePrologue(CompilationInfo* info);
-
- // Allocate and install the code.
- static Handle<Code> MakeCodeEpilogue(MacroAssembler* masm,
- Code::Flags flags,
- CompilationInfo* info);
-
- // Print the code after compiling it.
- static void PrintCode(Handle<Code> code, CompilationInfo* info);
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
- static bool ShouldGenerateLog(Expression* type);
-#endif
-
- static void SetFunctionInfo(Handle<JSFunction> fun,
- FunctionLiteral* lit,
- bool is_toplevel,
- Handle<Script> script);
-
- static bool RecordPositions(MacroAssembler* masm,
- int pos,
- bool right_here = false);
-
- // Accessors
- MacroAssembler* masm() { return masm_; }
- VirtualFrame* frame() const { return frame_; }
- inline Handle<Script> script();
-
- bool has_valid_frame() const { return frame_ != NULL; }
-
- // Set the virtual frame to be new_frame, with non-frame register
- // reference counts given by non_frame_registers. The non-frame
- // register reference counts of the old frame are returned in
- // non_frame_registers.
- void SetFrame(VirtualFrame* new_frame, RegisterFile* non_frame_registers);
-
- void DeleteFrame();
-
- RegisterAllocator* allocator() const { return allocator_; }
-
- CodeGenState* state() { return state_; }
- void set_state(CodeGenState* state) { state_ = state; }
-
- TypeInfo type_info(Slot* slot) {
- int index = NumberOfSlot(slot);
- if (index == kInvalidSlotNumber) return TypeInfo::Unknown();
- return (*type_info_)[index];
- }
-
- TypeInfo set_type_info(Slot* slot, TypeInfo info) {
- int index = NumberOfSlot(slot);
- ASSERT(index >= kInvalidSlotNumber);
- if (index != kInvalidSlotNumber) {
- TypeInfo previous_value = (*type_info_)[index];
- (*type_info_)[index] = info;
- return previous_value;
- }
- return TypeInfo::Unknown();
- }
- void AddDeferred(DeferredCode* code) { deferred_.Add(code); }
-
- // Constants related to patching of inlined load/store.
- static int GetInlinedKeyedLoadInstructionsAfterPatch() {
- // This is in correlation with the padding in MacroAssembler::Abort.
- return FLAG_debug_code ? 45 : 20;
- }
- static const int kInlinedKeyedStoreInstructionsAfterPatch = 9;
- static int GetInlinedNamedStoreInstructionsAfterPatch() {
- ASSERT(Isolate::Current()->inlined_write_barrier_size() != -1);
- // Magic number 5: instruction count after patched map load:
- // li: 2 (liu & ori), Branch : 2 (bne & nop), sw : 1
- return Isolate::Current()->inlined_write_barrier_size() + 5;
- }
-
- private:
- // Type of a member function that generates inline code for a native function.
- typedef void (CodeGenerator::*InlineFunctionGenerator)
- (ZoneList<Expression*>*);
-
- static const InlineFunctionGenerator kInlineFunctionGenerators[];
-
-
- // Construction/Destruction.
- explicit CodeGenerator(MacroAssembler* masm);
-
- // Accessors.
- inline bool is_eval();
- inline Scope* scope();
- inline bool is_strict_mode();
- inline StrictModeFlag strict_mode_flag();
-
- // Generating deferred code.
- void ProcessDeferred();
-
- static const int kInvalidSlotNumber = -1;
-
- int NumberOfSlot(Slot* slot);
- // State
- bool has_cc() const { return cc_reg_ != cc_always; }
-
- JumpTarget* true_target() const { return state_->true_target(); }
- JumpTarget* false_target() const { return state_->false_target(); }
-
- // Track loop nesting level.
- int loop_nesting() const { return loop_nesting_; }
- void IncrementLoopNesting() { loop_nesting_++; }
- void DecrementLoopNesting() { loop_nesting_--; }
-
- // Node visitors.
- void VisitStatements(ZoneList<Statement*>* statements);
-
- virtual void VisitSlot(Slot* node);
-#define DEF_VISIT(type) \
- virtual void Visit##type(type* node);
- AST_NODE_LIST(DEF_VISIT)
-#undef DEF_VISIT
-
- // Main code generation function
- void Generate(CompilationInfo* info);
-
- // Generate the return sequence code. Should be called no more than
- // once per compiled function, immediately after binding the return
- // target (which can not be done more than once). The return value should
- // be in v0.
- void GenerateReturnSequence();
-
- // Returns the arguments allocation mode.
- ArgumentsAllocationMode ArgumentsMode();
-
- // Store the arguments object and allocate it if necessary.
- void StoreArgumentsObject(bool initial);
-
- // The following are used by class Reference.
- void LoadReference(Reference* ref);
- void UnloadReference(Reference* ref);
-
- MemOperand SlotOperand(Slot* slot, Register tmp);
-
- MemOperand ContextSlotOperandCheckExtensions(Slot* slot,
- Register tmp,
- Register tmp2,
- JumpTarget* slow);
-
- void LoadCondition(Expression* x,
- JumpTarget* true_target,
- JumpTarget* false_target,
- bool force_cc);
- void Load(Expression* x);
- void LoadGlobal();
- void LoadGlobalReceiver(Register scratch);
-
-
- // Special code for typeof expressions: Unfortunately, we must
- // be careful when loading the expression in 'typeof'
- // expressions. We are not allowed to throw reference errors for
- // non-existing properties of the global object, so we must make it
- // look like an explicit property access, instead of an access
- // through the context chain.
- void LoadTypeofExpression(Expression* x);
-
- // Store a keyed property. Key and receiver are on the stack and the value is
- // in a0. Result is returned in r0.
- void EmitKeyedStore(StaticType* key_type, WriteBarrierCharacter wb_info);
-
- // Read a value from a slot and leave it on top of the expression stack.
- void LoadFromSlot(Slot* slot, TypeofState typeof_state);
- void LoadFromGlobalSlotCheckExtensions(Slot* slot,
- TypeofState typeof_state,
- JumpTarget* slow);
- void LoadFromSlotCheckForArguments(Slot* slot, TypeofState state);
-
- // Support for loading from local/global variables and arguments
- // whose location is known unless they are shadowed by
- // eval-introduced bindings. Generates no code for unsupported slot
- // types and therefore expects to fall through to the slow jump target.
- void EmitDynamicLoadFromSlotFastCase(Slot* slot,
- TypeofState typeof_state,
- JumpTarget* slow,
- JumpTarget* done);
-
- // Store the value on top of the stack to a slot.
- void StoreToSlot(Slot* slot, InitState init_state);
-
- // Support for compiling assignment expressions.
- void EmitSlotAssignment(Assignment* node);
- void EmitNamedPropertyAssignment(Assignment* node);
- void EmitKeyedPropertyAssignment(Assignment* node);
-
- // Load a named property, returning it in v0. The receiver is passed on the
- // stack, and remains there.
- void EmitNamedLoad(Handle<String> name, bool is_contextual);
-
- // Store to a named property. If the store is contextual, value is passed on
- // the frame and consumed. Otherwise, receiver and value are passed on the
- // frame and consumed. The result is returned in v0.
- void EmitNamedStore(Handle<String> name, bool is_contextual);
-
- // Load a keyed property, leaving it in v0. The receiver and key are
- // passed on the stack, and remain there.
- void EmitKeyedLoad();
-
- void ToBoolean(JumpTarget* true_target, JumpTarget* false_target);
-
- // Generate code that computes a shortcutting logical operation.
- void GenerateLogicalBooleanOperation(BinaryOperation* node);
-
- void GenericBinaryOperation(Token::Value op,
- OverwriteMode overwrite_mode,
- GenerateInlineSmi inline_smi,
- int known_rhs =
- GenericBinaryOpStub::kUnknownIntValue);
-
- void VirtualFrameBinaryOperation(Token::Value op,
- OverwriteMode overwrite_mode,
- int known_rhs =
- GenericBinaryOpStub::kUnknownIntValue);
-
- void SmiOperation(Token::Value op,
- Handle<Object> value,
- bool reversed,
- OverwriteMode mode);
-
- void Comparison(Condition cc,
- Expression* left,
- Expression* right,
- bool strict = false);
-
- void CallWithArguments(ZoneList<Expression*>* arguments,
- CallFunctionFlags flags,
- int position);
-
- // An optimized implementation of expressions of the form
- // x.apply(y, arguments). We call x the applicand and y the receiver.
- // The optimization avoids allocating an arguments object if possible.
- void CallApplyLazy(Expression* applicand,
- Expression* receiver,
- VariableProxy* arguments,
- int position);
-
- // Control flow
- void Branch(bool if_true, JumpTarget* target);
- void CheckStack();
-
- bool CheckForInlineRuntimeCall(CallRuntime* node);
-
- static Handle<Code> ComputeLazyCompile(int argc);
- void ProcessDeclarations(ZoneList<Declaration*>* declarations);
-
- // Declare global variables and functions in the given array of
- // name/value pairs.
- void DeclareGlobals(Handle<FixedArray> pairs);
-
- // Instantiate the function based on the shared function info.
- void InstantiateFunction(Handle<SharedFunctionInfo> function_info,
- bool pretenure);
-
- // Support for type checks.
- void GenerateIsSmi(ZoneList<Expression*>* args);
- void GenerateIsNonNegativeSmi(ZoneList<Expression*>* args);
- void GenerateIsArray(ZoneList<Expression*>* args);
- void GenerateIsRegExp(ZoneList<Expression*>* args);
-
- // Support for construct call checks.
- void GenerateIsConstructCall(ZoneList<Expression*>* args);
-
- // Support for arguments.length and arguments[?].
- void GenerateArgumentsLength(ZoneList<Expression*>* args);
- void GenerateArguments(ZoneList<Expression*>* args);
-
- // Support for accessing the class and value fields of an object.
- void GenerateClassOf(ZoneList<Expression*>* args);
- void GenerateValueOf(ZoneList<Expression*>* args);
- void GenerateSetValueOf(ZoneList<Expression*>* args);
-
- // Fast support for charCodeAt(n).
- void GenerateStringCharCodeAt(ZoneList<Expression*>* args);
-
- // Fast support for string.charAt(n) and string[n].
- void GenerateStringCharFromCode(ZoneList<Expression*>* args);
-
- // Fast support for string.charAt(n) and string[n].
- void GenerateStringCharAt(ZoneList<Expression*>* args);
-
- // Fast support for object equality testing.
- void GenerateObjectEquals(ZoneList<Expression*>* args);
-
- void GenerateLog(ZoneList<Expression*>* args);
-
- // Fast support for Math.random().
- void GenerateRandomHeapNumber(ZoneList<Expression*>* args);
-
- void GenerateIsObject(ZoneList<Expression*>* args);
- void GenerateIsSpecObject(ZoneList<Expression*>* args);
- void GenerateIsFunction(ZoneList<Expression*>* args);
- void GenerateIsUndetectableObject(ZoneList<Expression*>* args);
- void GenerateStringAdd(ZoneList<Expression*>* args);
- void GenerateSubString(ZoneList<Expression*>* args);
- void GenerateStringCompare(ZoneList<Expression*>* args);
- void GenerateIsStringWrapperSafeForDefaultValueOf(
- ZoneList<Expression*>* args);
-
- // Support for direct calls from JavaScript to native RegExp code.
- void GenerateRegExpExec(ZoneList<Expression*>* args);
-
- void GenerateRegExpConstructResult(ZoneList<Expression*>* args);
-
- // Support for fast native caches.
- void GenerateGetFromCache(ZoneList<Expression*>* args);
-
- // Fast support for number to string.
- void GenerateNumberToString(ZoneList<Expression*>* args);
-
- // Fast swapping of elements.
- void GenerateSwapElements(ZoneList<Expression*>* args);
-
- // Fast call for custom callbacks.
- void GenerateCallFunction(ZoneList<Expression*>* args);
-
- // Fast call to math functions.
- void GenerateMathPow(ZoneList<Expression*>* args);
- void GenerateMathSin(ZoneList<Expression*>* args);
- void GenerateMathCos(ZoneList<Expression*>* args);
- void GenerateMathSqrt(ZoneList<Expression*>* args);
- void GenerateMathLog(ZoneList<Expression*>* args);
-
- void GenerateIsRegExpEquivalent(ZoneList<Expression*>* args);
-
- void GenerateHasCachedArrayIndex(ZoneList<Expression*>* args);
- void GenerateGetCachedArrayIndex(ZoneList<Expression*>* args);
- void GenerateFastAsciiArrayJoin(ZoneList<Expression*>* args);
-
- // Simple condition analysis.
- enum ConditionAnalysis {
- ALWAYS_TRUE,
- ALWAYS_FALSE,
- DONT_KNOW
- };
- ConditionAnalysis AnalyzeCondition(Expression* cond);
-
- // Methods used to indicate which source code is generated for. Source
- // positions are collected by the assembler and emitted with the relocation
- // information.
- void CodeForFunctionPosition(FunctionLiteral* fun);
- void CodeForReturnPosition(FunctionLiteral* fun);
- void CodeForStatementPosition(Statement* node);
- void CodeForDoWhileConditionPosition(DoWhileStatement* stmt);
- void CodeForSourcePosition(int pos);
-
-#ifdef DEBUG
- // True if the registers are valid for entry to a block.
- bool HasValidEntryRegisters();
-#endif
-
- List<DeferredCode*> deferred_;
-
- // Assembler
- MacroAssembler* masm_; // to generate code
-
- CompilationInfo* info_;
-
- // Code generation state
- VirtualFrame* frame_;
- RegisterAllocator* allocator_;
- Condition cc_reg_;
- CodeGenState* state_;
- int loop_nesting_;
-
- Vector<TypeInfo>* type_info_;
- // Jump targets
- BreakTarget function_return_;
-
- // True if the function return is shadowed (ie, jumping to the target
- // function_return_ does not jump to the true function return, but rather
- // to some unlinking code).
- bool function_return_is_shadowed_;
-
- friend class VirtualFrame;
- friend class Isolate;
- friend class JumpTarget;
- friend class Reference;
- friend class FastCodeGenerator;
- friend class FullCodeGenerator;
- friend class FullCodeGenSyntaxChecker;
- friend class InlineRuntimeFunctionsTable;
- friend class LCodeGen;
-
- DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_MIPS_CODEGEN_MIPS_H_
diff --git a/src/3rdparty/v8/src/mips/constants-mips.cc b/src/3rdparty/v8/src/mips/constants-mips.cc
deleted file mode 100644
index 16e49c9..0000000
--- a/src/3rdparty/v8/src/mips/constants-mips.cc
+++ /dev/null
@@ -1,352 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_MIPS)
-
-#include "constants-mips.h"
-
-namespace v8 {
-namespace internal {
-
-
-// -----------------------------------------------------------------------------
-// Registers
-
-
-// These register names are defined in a way to match the native disassembler
-// formatting. See for example the command "objdump -d <binary file>".
-const char* Registers::names_[kNumSimuRegisters] = {
- "zero_reg",
- "at",
- "v0", "v1",
- "a0", "a1", "a2", "a3",
- "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7",
- "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
- "t8", "t9",
- "k0", "k1",
- "gp",
- "sp",
- "fp",
- "ra",
- "LO", "HI",
- "pc"
-};
-
-// List of alias names which can be used when referring to MIPS registers.
-const Registers::RegisterAlias Registers::aliases_[] = {
- {0, "zero"},
- {23, "cp"},
- {30, "s8"},
- {30, "s8_fp"},
- {kInvalidRegister, NULL}
-};
-
-const char* Registers::Name(int reg) {
- const char* result;
- if ((0 <= reg) && (reg < kNumSimuRegisters)) {
- result = names_[reg];
- } else {
- result = "noreg";
- }
- return result;
-}
-
-
-int Registers::Number(const char* name) {
- // Look through the canonical names.
- for (int i = 0; i < kNumSimuRegisters; i++) {
- if (strcmp(names_[i], name) == 0) {
- return i;
- }
- }
-
- // Look through the alias names.
- int i = 0;
- while (aliases_[i].reg != kInvalidRegister) {
- if (strcmp(aliases_[i].name, name) == 0) {
- return aliases_[i].reg;
- }
- i++;
- }
-
- // No register with the reguested name found.
- return kInvalidRegister;
-}
-
-
-const char* FPURegisters::names_[kNumFPURegisters] = {
- "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8", "f9", "f10", "f11",
- "f12", "f13", "f14", "f15", "f16", "f17", "f18", "f19", "f20", "f21",
- "f22", "f23", "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31"
-};
-
-// List of alias names which can be used when referring to MIPS registers.
-const FPURegisters::RegisterAlias FPURegisters::aliases_[] = {
- {kInvalidRegister, NULL}
-};
-
-const char* FPURegisters::Name(int creg) {
- const char* result;
- if ((0 <= creg) && (creg < kNumFPURegisters)) {
- result = names_[creg];
- } else {
- result = "nocreg";
- }
- return result;
-}
-
-
-int FPURegisters::Number(const char* name) {
- // Look through the canonical names.
- for (int i = 0; i < kNumFPURegisters; i++) {
- if (strcmp(names_[i], name) == 0) {
- return i;
- }
- }
-
- // Look through the alias names.
- int i = 0;
- while (aliases_[i].creg != kInvalidRegister) {
- if (strcmp(aliases_[i].name, name) == 0) {
- return aliases_[i].creg;
- }
- i++;
- }
-
- // No Cregister with the reguested name found.
- return kInvalidFPURegister;
-}
-
-
-// -----------------------------------------------------------------------------
-// Instruction
-
-bool Instruction::IsForbiddenInBranchDelay() const {
- const int op = OpcodeFieldRaw();
- switch (op) {
- case J:
- case JAL:
- case BEQ:
- case BNE:
- case BLEZ:
- case BGTZ:
- case BEQL:
- case BNEL:
- case BLEZL:
- case BGTZL:
- return true;
- case REGIMM:
- switch (RtFieldRaw()) {
- case BLTZ:
- case BGEZ:
- case BLTZAL:
- case BGEZAL:
- return true;
- default:
- return false;
- };
- break;
- case SPECIAL:
- switch (FunctionFieldRaw()) {
- case JR:
- case JALR:
- return true;
- default:
- return false;
- };
- break;
- default:
- return false;
- };
-}
-
-
-bool Instruction::IsLinkingInstruction() const {
- const int op = OpcodeFieldRaw();
- switch (op) {
- case JAL:
- case REGIMM:
- switch (RtFieldRaw()) {
- case BGEZAL:
- case BLTZAL:
- return true;
- default:
- return false;
- };
- case SPECIAL:
- switch (FunctionFieldRaw()) {
- case JALR:
- return true;
- default:
- return false;
- };
- default:
- return false;
- };
-}
-
-
-bool Instruction::IsTrap() const {
- if (OpcodeFieldRaw() != SPECIAL) {
- return false;
- } else {
- switch (FunctionFieldRaw()) {
- case BREAK:
- case TGE:
- case TGEU:
- case TLT:
- case TLTU:
- case TEQ:
- case TNE:
- return true;
- default:
- return false;
- };
- }
-}
-
-
-Instruction::Type Instruction::InstructionType() const {
- switch (OpcodeFieldRaw()) {
- case SPECIAL:
- switch (FunctionFieldRaw()) {
- case JR:
- case JALR:
- case BREAK:
- case SLL:
- case SRL:
- case SRA:
- case SLLV:
- case SRLV:
- case SRAV:
- case MFHI:
- case MFLO:
- case MULT:
- case MULTU:
- case DIV:
- case DIVU:
- case ADD:
- case ADDU:
- case SUB:
- case SUBU:
- case AND:
- case OR:
- case XOR:
- case NOR:
- case SLT:
- case SLTU:
- case TGE:
- case TGEU:
- case TLT:
- case TLTU:
- case TEQ:
- case TNE:
- case MOVZ:
- case MOVN:
- case MOVCI:
- return kRegisterType;
- default:
- UNREACHABLE();
- };
- break;
- case SPECIAL2:
- switch (FunctionFieldRaw()) {
- case MUL:
- case CLZ:
- return kRegisterType;
- default:
- UNREACHABLE();
- };
- break;
- case SPECIAL3:
- switch (FunctionFieldRaw()) {
- case INS:
- case EXT:
- return kRegisterType;
- default:
- UNREACHABLE();
- };
- break;
- case COP1: // Coprocessor instructions
- switch (RsFieldRawNoAssert()) {
- case BC1: // branch on coprocessor condition
- return kImmediateType;
- default:
- return kRegisterType;
- };
- break;
- // 16 bits Immediate type instructions. eg: addi dest, src, imm16
- case REGIMM:
- case BEQ:
- case BNE:
- case BLEZ:
- case BGTZ:
- case ADDI:
- case ADDIU:
- case SLTI:
- case SLTIU:
- case ANDI:
- case ORI:
- case XORI:
- case LUI:
- case BEQL:
- case BNEL:
- case BLEZL:
- case BGTZL:
- case LB:
- case LH:
- case LWL:
- case LW:
- case LBU:
- case LHU:
- case LWR:
- case SB:
- case SH:
- case SWL:
- case SW:
- case SWR:
- case LWC1:
- case LDC1:
- case SWC1:
- case SDC1:
- return kImmediateType;
- // 26 bits immediate type instructions. eg: j imm26
- case J:
- case JAL:
- return kJumpType;
- default:
- UNREACHABLE();
- };
- return kUnsupported;
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_MIPS
diff --git a/src/3rdparty/v8/src/mips/constants-mips.h b/src/3rdparty/v8/src/mips/constants-mips.h
deleted file mode 100644
index b20e9a2..0000000
--- a/src/3rdparty/v8/src/mips/constants-mips.h
+++ /dev/null
@@ -1,723 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_MIPS_CONSTANTS_H_
-#define V8_MIPS_CONSTANTS_H_
-
-// UNIMPLEMENTED_ macro for MIPS.
-#ifdef DEBUG
-#define UNIMPLEMENTED_MIPS() \
- v8::internal::PrintF("%s, \tline %d: \tfunction %s not implemented. \n", \
- __FILE__, __LINE__, __func__)
-#else
-#define UNIMPLEMENTED_MIPS()
-#endif
-
-#define UNSUPPORTED_MIPS() v8::internal::PrintF("Unsupported instruction.\n")
-
-
-#ifdef _MIPS_ARCH_MIPS32R2
- #define mips32r2 1
-#else
- #define mips32r2 0
-#endif
-
-
-// Defines constants and accessor classes to assemble, disassemble and
-// simulate MIPS32 instructions.
-//
-// See: MIPS32 Architecture For Programmers
-// Volume II: The MIPS32 Instruction Set
-// Try www.cs.cornell.edu/courses/cs3410/2008fa/MIPS_Vol2.pdf.
-
-namespace v8 {
-namespace internal {
-
-// -----------------------------------------------------------------------------
-// Registers and FPURegister.
-
-// Number of general purpose registers.
-static const int kNumRegisters = 32;
-static const int kInvalidRegister = -1;
-
-// Number of registers with HI, LO, and pc.
-static const int kNumSimuRegisters = 35;
-
-// In the simulator, the PC register is simulated as the 34th register.
-static const int kPCRegister = 34;
-
-// Number coprocessor registers.
-static const int kNumFPURegisters = 32;
-static const int kInvalidFPURegister = -1;
-
-// FPU (coprocessor 1) control registers. Currently only FCSR is implemented.
-static const int kFCSRRegister = 31;
-static const int kInvalidFPUControlRegister = -1;
-static const uint32_t kFPUInvalidResult = (uint32_t) (1 << 31) - 1;
-
-// FCSR constants.
-static const uint32_t kFCSRFlagMask = (1 << 6) - 1;
-static const uint32_t kFCSRFlagShift = 2;
-
-// Helper functions for converting between register numbers and names.
-class Registers {
- public:
- // Return the name of the register.
- static const char* Name(int reg);
-
- // Lookup the register number for the name provided.
- static int Number(const char* name);
-
- struct RegisterAlias {
- int reg;
- const char *name;
- };
-
- static const int32_t kMaxValue = 0x7fffffff;
- static const int32_t kMinValue = 0x80000000;
-
- private:
-
- static const char* names_[kNumSimuRegisters];
- static const RegisterAlias aliases_[];
-};
-
-// Helper functions for converting between register numbers and names.
-class FPURegisters {
- public:
- // Return the name of the register.
- static const char* Name(int reg);
-
- // Lookup the register number for the name provided.
- static int Number(const char* name);
-
- struct RegisterAlias {
- int creg;
- const char *name;
- };
-
- private:
-
- static const char* names_[kNumFPURegisters];
- static const RegisterAlias aliases_[];
-};
-
-
-// -----------------------------------------------------------------------------
-// Instructions encoding constants.
-
-// On MIPS all instructions are 32 bits.
-typedef int32_t Instr;
-
-typedef unsigned char byte_;
-
-// Special Software Interrupt codes when used in the presence of the MIPS
-// simulator.
-enum SoftwareInterruptCodes {
- // Transition to C code.
- call_rt_redirected = 0xfffff
-};
-
-// ----- Fields offset and length.
-static const int kOpcodeShift = 26;
-static const int kOpcodeBits = 6;
-static const int kRsShift = 21;
-static const int kRsBits = 5;
-static const int kRtShift = 16;
-static const int kRtBits = 5;
-static const int kRdShift = 11;
-static const int kRdBits = 5;
-static const int kSaShift = 6;
-static const int kSaBits = 5;
-static const int kFunctionShift = 0;
-static const int kFunctionBits = 6;
-static const int kLuiShift = 16;
-
-static const int kImm16Shift = 0;
-static const int kImm16Bits = 16;
-static const int kImm26Shift = 0;
-static const int kImm26Bits = 26;
-
-static const int kFsShift = 11;
-static const int kFsBits = 5;
-static const int kFtShift = 16;
-static const int kFtBits = 5;
-static const int kFdShift = 6;
-static const int kFdBits = 5;
-static const int kFCccShift = 8;
-static const int kFCccBits = 3;
-static const int kFBccShift = 18;
-static const int kFBccBits = 3;
-static const int kFBtrueShift = 16;
-static const int kFBtrueBits = 1;
-
-// ----- Miscellianous useful masks.
-// Instruction bit masks.
-static const int kOpcodeMask = ((1 << kOpcodeBits) - 1) << kOpcodeShift;
-static const int kImm16Mask = ((1 << kImm16Bits) - 1) << kImm16Shift;
-static const int kImm26Mask = ((1 << kImm26Bits) - 1) << kImm26Shift;
-static const int kRsFieldMask = ((1 << kRsBits) - 1) << kRsShift;
-static const int kRtFieldMask = ((1 << kRtBits) - 1) << kRtShift;
-static const int kRdFieldMask = ((1 << kRdBits) - 1) << kRdShift;
-static const int kSaFieldMask = ((1 << kSaBits) - 1) << kSaShift;
-static const int kFunctionFieldMask =
- ((1 << kFunctionBits) - 1) << kFunctionShift;
-// Misc masks.
-static const int kHiMask = 0xffff << 16;
-static const int kLoMask = 0xffff;
-static const int kSignMask = 0x80000000;
-
-
-// ----- MIPS Opcodes and Function Fields.
-// We use this presentation to stay close to the table representation in
-// MIPS32 Architecture For Programmers, Volume II: The MIPS32 Instruction Set.
-enum Opcode {
- SPECIAL = 0 << kOpcodeShift,
- REGIMM = 1 << kOpcodeShift,
-
- J = ((0 << 3) + 2) << kOpcodeShift,
- JAL = ((0 << 3) + 3) << kOpcodeShift,
- BEQ = ((0 << 3) + 4) << kOpcodeShift,
- BNE = ((0 << 3) + 5) << kOpcodeShift,
- BLEZ = ((0 << 3) + 6) << kOpcodeShift,
- BGTZ = ((0 << 3) + 7) << kOpcodeShift,
-
- ADDI = ((1 << 3) + 0) << kOpcodeShift,
- ADDIU = ((1 << 3) + 1) << kOpcodeShift,
- SLTI = ((1 << 3) + 2) << kOpcodeShift,
- SLTIU = ((1 << 3) + 3) << kOpcodeShift,
- ANDI = ((1 << 3) + 4) << kOpcodeShift,
- ORI = ((1 << 3) + 5) << kOpcodeShift,
- XORI = ((1 << 3) + 6) << kOpcodeShift,
- LUI = ((1 << 3) + 7) << kOpcodeShift,
-
- COP1 = ((2 << 3) + 1) << kOpcodeShift, // Coprocessor 1 class
- BEQL = ((2 << 3) + 4) << kOpcodeShift,
- BNEL = ((2 << 3) + 5) << kOpcodeShift,
- BLEZL = ((2 << 3) + 6) << kOpcodeShift,
- BGTZL = ((2 << 3) + 7) << kOpcodeShift,
-
- SPECIAL2 = ((3 << 3) + 4) << kOpcodeShift,
- SPECIAL3 = ((3 << 3) + 7) << kOpcodeShift,
-
- LB = ((4 << 3) + 0) << kOpcodeShift,
- LH = ((4 << 3) + 1) << kOpcodeShift,
- LWL = ((4 << 3) + 2) << kOpcodeShift,
- LW = ((4 << 3) + 3) << kOpcodeShift,
- LBU = ((4 << 3) + 4) << kOpcodeShift,
- LHU = ((4 << 3) + 5) << kOpcodeShift,
- LWR = ((4 << 3) + 6) << kOpcodeShift,
- SB = ((5 << 3) + 0) << kOpcodeShift,
- SH = ((5 << 3) + 1) << kOpcodeShift,
- SWL = ((5 << 3) + 2) << kOpcodeShift,
- SW = ((5 << 3) + 3) << kOpcodeShift,
- SWR = ((5 << 3) + 6) << kOpcodeShift,
-
- LWC1 = ((6 << 3) + 1) << kOpcodeShift,
- LDC1 = ((6 << 3) + 5) << kOpcodeShift,
-
- SWC1 = ((7 << 3) + 1) << kOpcodeShift,
- SDC1 = ((7 << 3) + 5) << kOpcodeShift
-};
-
-enum SecondaryField {
- // SPECIAL Encoding of Function Field.
- SLL = ((0 << 3) + 0),
- SRL = ((0 << 3) + 2),
- SRA = ((0 << 3) + 3),
- SLLV = ((0 << 3) + 4),
- SRLV = ((0 << 3) + 6),
- SRAV = ((0 << 3) + 7),
- MOVCI = ((0 << 3) + 1),
-
- JR = ((1 << 3) + 0),
- JALR = ((1 << 3) + 1),
- MOVZ = ((1 << 3) + 2),
- MOVN = ((1 << 3) + 3),
- BREAK = ((1 << 3) + 5),
-
- MFHI = ((2 << 3) + 0),
- MFLO = ((2 << 3) + 2),
-
- MULT = ((3 << 3) + 0),
- MULTU = ((3 << 3) + 1),
- DIV = ((3 << 3) + 2),
- DIVU = ((3 << 3) + 3),
-
- ADD = ((4 << 3) + 0),
- ADDU = ((4 << 3) + 1),
- SUB = ((4 << 3) + 2),
- SUBU = ((4 << 3) + 3),
- AND = ((4 << 3) + 4),
- OR = ((4 << 3) + 5),
- XOR = ((4 << 3) + 6),
- NOR = ((4 << 3) + 7),
-
- SLT = ((5 << 3) + 2),
- SLTU = ((5 << 3) + 3),
-
- TGE = ((6 << 3) + 0),
- TGEU = ((6 << 3) + 1),
- TLT = ((6 << 3) + 2),
- TLTU = ((6 << 3) + 3),
- TEQ = ((6 << 3) + 4),
- TNE = ((6 << 3) + 6),
-
- // SPECIAL2 Encoding of Function Field.
- MUL = ((0 << 3) + 2),
- CLZ = ((4 << 3) + 0),
- CLO = ((4 << 3) + 1),
-
- // SPECIAL3 Encoding of Function Field.
- EXT = ((0 << 3) + 0),
- INS = ((0 << 3) + 4),
-
- // REGIMM encoding of rt Field.
- BLTZ = ((0 << 3) + 0) << 16,
- BGEZ = ((0 << 3) + 1) << 16,
- BLTZAL = ((2 << 3) + 0) << 16,
- BGEZAL = ((2 << 3) + 1) << 16,
-
- // COP1 Encoding of rs Field.
- MFC1 = ((0 << 3) + 0) << 21,
- CFC1 = ((0 << 3) + 2) << 21,
- MFHC1 = ((0 << 3) + 3) << 21,
- MTC1 = ((0 << 3) + 4) << 21,
- CTC1 = ((0 << 3) + 6) << 21,
- MTHC1 = ((0 << 3) + 7) << 21,
- BC1 = ((1 << 3) + 0) << 21,
- S = ((2 << 3) + 0) << 21,
- D = ((2 << 3) + 1) << 21,
- W = ((2 << 3) + 4) << 21,
- L = ((2 << 3) + 5) << 21,
- PS = ((2 << 3) + 6) << 21,
- // COP1 Encoding of Function Field When rs=S.
- ROUND_L_S = ((1 << 3) + 0),
- TRUNC_L_S = ((1 << 3) + 1),
- CEIL_L_S = ((1 << 3) + 2),
- FLOOR_L_S = ((1 << 3) + 3),
- ROUND_W_S = ((1 << 3) + 4),
- TRUNC_W_S = ((1 << 3) + 5),
- CEIL_W_S = ((1 << 3) + 6),
- FLOOR_W_S = ((1 << 3) + 7),
- CVT_D_S = ((4 << 3) + 1),
- CVT_W_S = ((4 << 3) + 4),
- CVT_L_S = ((4 << 3) + 5),
- CVT_PS_S = ((4 << 3) + 6),
- // COP1 Encoding of Function Field When rs=D.
- ADD_D = ((0 << 3) + 0),
- SUB_D = ((0 << 3) + 1),
- MUL_D = ((0 << 3) + 2),
- DIV_D = ((0 << 3) + 3),
- SQRT_D = ((0 << 3) + 4),
- ABS_D = ((0 << 3) + 5),
- MOV_D = ((0 << 3) + 6),
- NEG_D = ((0 << 3) + 7),
- ROUND_L_D = ((1 << 3) + 0),
- TRUNC_L_D = ((1 << 3) + 1),
- CEIL_L_D = ((1 << 3) + 2),
- FLOOR_L_D = ((1 << 3) + 3),
- ROUND_W_D = ((1 << 3) + 4),
- TRUNC_W_D = ((1 << 3) + 5),
- CEIL_W_D = ((1 << 3) + 6),
- FLOOR_W_D = ((1 << 3) + 7),
- CVT_S_D = ((4 << 3) + 0),
- CVT_W_D = ((4 << 3) + 4),
- CVT_L_D = ((4 << 3) + 5),
- C_F_D = ((6 << 3) + 0),
- C_UN_D = ((6 << 3) + 1),
- C_EQ_D = ((6 << 3) + 2),
- C_UEQ_D = ((6 << 3) + 3),
- C_OLT_D = ((6 << 3) + 4),
- C_ULT_D = ((6 << 3) + 5),
- C_OLE_D = ((6 << 3) + 6),
- C_ULE_D = ((6 << 3) + 7),
- // COP1 Encoding of Function Field When rs=W or L.
- CVT_S_W = ((4 << 3) + 0),
- CVT_D_W = ((4 << 3) + 1),
- CVT_S_L = ((4 << 3) + 0),
- CVT_D_L = ((4 << 3) + 1),
- // COP1 Encoding of Function Field When rs=PS.
-
- NULLSF = 0
-};
-
-
-// ----- Emulated conditions.
-// On MIPS we use this enum to abstract from conditionnal branch instructions.
-// the 'U' prefix is used to specify unsigned comparisons.
-enum Condition {
- // Any value < 0 is considered no_condition.
- kNoCondition = -1,
-
- overflow = 0,
- no_overflow = 1,
- Uless = 2,
- Ugreater_equal= 3,
- equal = 4,
- not_equal = 5,
- Uless_equal = 6,
- Ugreater = 7,
- negative = 8,
- positive = 9,
- parity_even = 10,
- parity_odd = 11,
- less = 12,
- greater_equal = 13,
- less_equal = 14,
- greater = 15,
-
- cc_always = 16,
-
- // aliases
- carry = Uless,
- not_carry = Ugreater_equal,
- zero = equal,
- eq = equal,
- not_zero = not_equal,
- ne = not_equal,
- nz = not_equal,
- sign = negative,
- not_sign = positive,
- mi = negative,
- pl = positive,
- hi = Ugreater,
- ls = Uless_equal,
- ge = greater_equal,
- lt = less,
- gt = greater,
- le = less_equal,
- hs = Ugreater_equal,
- lo = Uless,
- al = cc_always,
-
- cc_default = kNoCondition
-};
-
-
-// Returns the equivalent of !cc.
-// Negation of the default kNoCondition (-1) results in a non-default
-// no_condition value (-2). As long as tests for no_condition check
-// for condition < 0, this will work as expected.
-inline Condition NegateCondition(Condition cc) {
- ASSERT(cc != cc_always);
- return static_cast<Condition>(cc ^ 1);
-}
-
-
-inline Condition ReverseCondition(Condition cc) {
- switch (cc) {
- case Uless:
- return Ugreater;
- case Ugreater:
- return Uless;
- case Ugreater_equal:
- return Uless_equal;
- case Uless_equal:
- return Ugreater_equal;
- case less:
- return greater;
- case greater:
- return less;
- case greater_equal:
- return less_equal;
- case less_equal:
- return greater_equal;
- default:
- return cc;
- };
-}
-
-
-// ----- Coprocessor conditions.
-enum FPUCondition {
- F, // False
- UN, // Unordered
- EQ, // Equal
- UEQ, // Unordered or Equal
- OLT, // Ordered or Less Than
- ULT, // Unordered or Less Than
- OLE, // Ordered or Less Than or Equal
- ULE // Unordered or Less Than or Equal
-};
-
-
-// -----------------------------------------------------------------------------
-// Hints.
-
-// Branch hints are not used on the MIPS. They are defined so that they can
-// appear in shared function signatures, but will be ignored in MIPS
-// implementations.
-enum Hint {
- no_hint = 0
-};
-
-
-inline Hint NegateHint(Hint hint) {
- return no_hint;
-}
-
-
-// -----------------------------------------------------------------------------
-// Specific instructions, constants, and masks.
-// These constants are declared in assembler-mips.cc, as they use named
-// registers and other constants.
-
-// addiu(sp, sp, 4) aka Pop() operation or part of Pop(r)
-// operations as post-increment of sp.
-extern const Instr kPopInstruction;
-// addiu(sp, sp, -4) part of Push(r) operation as pre-decrement of sp.
-extern const Instr kPushInstruction;
-// sw(r, MemOperand(sp, 0))
-extern const Instr kPushRegPattern;
-// lw(r, MemOperand(sp, 0))
-extern const Instr kPopRegPattern;
-extern const Instr kLwRegFpOffsetPattern;
-extern const Instr kSwRegFpOffsetPattern;
-extern const Instr kLwRegFpNegOffsetPattern;
-extern const Instr kSwRegFpNegOffsetPattern;
-// A mask for the Rt register for push, pop, lw, sw instructions.
-extern const Instr kRtMask;
-extern const Instr kLwSwInstrTypeMask;
-extern const Instr kLwSwInstrArgumentMask;
-extern const Instr kLwSwOffsetMask;
-
-// Break 0xfffff, reserved for redirected real time call.
-const Instr rtCallRedirInstr = SPECIAL | BREAK | call_rt_redirected << 6;
-// A nop instruction. (Encoding of sll 0 0 0).
-const Instr nopInstr = 0;
-
-class Instruction {
- public:
- enum {
- kInstrSize = 4,
- kInstrSizeLog2 = 2,
- // On MIPS PC cannot actually be directly accessed. We behave as if PC was
- // always the value of the current instruction being executed.
- kPCReadOffset = 0
- };
-
- // Get the raw instruction bits.
- inline Instr InstructionBits() const {
- return *reinterpret_cast<const Instr*>(this);
- }
-
- // Set the raw instruction bits to value.
- inline void SetInstructionBits(Instr value) {
- *reinterpret_cast<Instr*>(this) = value;
- }
-
- // Read one particular bit out of the instruction bits.
- inline int Bit(int nr) const {
- return (InstructionBits() >> nr) & 1;
- }
-
- // Read a bit field out of the instruction bits.
- inline int Bits(int hi, int lo) const {
- return (InstructionBits() >> lo) & ((2 << (hi - lo)) - 1);
- }
-
- // Instruction type.
- enum Type {
- kRegisterType,
- kImmediateType,
- kJumpType,
- kUnsupported = -1
- };
-
- // Get the encoding type of the instruction.
- Type InstructionType() const;
-
-
- // Accessors for the different named fields used in the MIPS encoding.
- inline Opcode OpcodeValue() const {
- return static_cast<Opcode>(
- Bits(kOpcodeShift + kOpcodeBits - 1, kOpcodeShift));
- }
-
- inline int RsValue() const {
- ASSERT(InstructionType() == kRegisterType ||
- InstructionType() == kImmediateType);
- return Bits(kRsShift + kRsBits - 1, kRsShift);
- }
-
- inline int RtValue() const {
- ASSERT(InstructionType() == kRegisterType ||
- InstructionType() == kImmediateType);
- return Bits(kRtShift + kRtBits - 1, kRtShift);
- }
-
- inline int RdValue() const {
- ASSERT(InstructionType() == kRegisterType);
- return Bits(kRdShift + kRdBits - 1, kRdShift);
- }
-
- inline int SaValue() const {
- ASSERT(InstructionType() == kRegisterType);
- return Bits(kSaShift + kSaBits - 1, kSaShift);
- }
-
- inline int FunctionValue() const {
- ASSERT(InstructionType() == kRegisterType ||
- InstructionType() == kImmediateType);
- return Bits(kFunctionShift + kFunctionBits - 1, kFunctionShift);
- }
-
- inline int FdValue() const {
- return Bits(kFdShift + kFdBits - 1, kFdShift);
- }
-
- inline int FsValue() const {
- return Bits(kFsShift + kFsBits - 1, kFsShift);
- }
-
- inline int FtValue() const {
- return Bits(kFtShift + kFtBits - 1, kFtShift);
- }
-
- // Float Compare condition code instruction bits.
- inline int FCccValue() const {
- return Bits(kFCccShift + kFCccBits - 1, kFCccShift);
- }
-
- // Float Branch condition code instruction bits.
- inline int FBccValue() const {
- return Bits(kFBccShift + kFBccBits - 1, kFBccShift);
- }
-
- // Float Branch true/false instruction bit.
- inline int FBtrueValue() const {
- return Bits(kFBtrueShift + kFBtrueBits - 1, kFBtrueShift);
- }
-
- // Return the fields at their original place in the instruction encoding.
- inline Opcode OpcodeFieldRaw() const {
- return static_cast<Opcode>(InstructionBits() & kOpcodeMask);
- }
-
- inline int RsFieldRaw() const {
- ASSERT(InstructionType() == kRegisterType ||
- InstructionType() == kImmediateType);
- return InstructionBits() & kRsFieldMask;
- }
-
- // Same as above function, but safe to call within InstructionType().
- inline int RsFieldRawNoAssert() const {
- return InstructionBits() & kRsFieldMask;
- }
-
- inline int RtFieldRaw() const {
- ASSERT(InstructionType() == kRegisterType ||
- InstructionType() == kImmediateType);
- return InstructionBits() & kRtFieldMask;
- }
-
- inline int RdFieldRaw() const {
- ASSERT(InstructionType() == kRegisterType);
- return InstructionBits() & kRdFieldMask;
- }
-
- inline int SaFieldRaw() const {
- ASSERT(InstructionType() == kRegisterType);
- return InstructionBits() & kSaFieldMask;
- }
-
- inline int FunctionFieldRaw() const {
- return InstructionBits() & kFunctionFieldMask;
- }
-
- // Get the secondary field according to the opcode.
- inline int SecondaryValue() const {
- Opcode op = OpcodeFieldRaw();
- switch (op) {
- case SPECIAL:
- case SPECIAL2:
- return FunctionValue();
- case COP1:
- return RsValue();
- case REGIMM:
- return RtValue();
- default:
- return NULLSF;
- }
- }
-
- inline int32_t Imm16Value() const {
- ASSERT(InstructionType() == kImmediateType);
- return Bits(kImm16Shift + kImm16Bits - 1, kImm16Shift);
- }
-
- inline int32_t Imm26Value() const {
- ASSERT(InstructionType() == kJumpType);
- return Bits(kImm16Shift + kImm26Bits - 1, kImm26Shift);
- }
-
- // Say if the instruction should not be used in a branch delay slot.
- bool IsForbiddenInBranchDelay() const;
- // Say if the instruction 'links'. eg: jal, bal.
- bool IsLinkingInstruction() const;
- // Say if the instruction is a break or a trap.
- bool IsTrap() const;
-
- // Instructions are read of out a code stream. The only way to get a
- // reference to an instruction is to convert a pointer. There is no way
- // to allocate or create instances of class Instruction.
- // Use the At(pc) function to create references to Instruction.
- static Instruction* At(byte_* pc) {
- return reinterpret_cast<Instruction*>(pc);
- }
-
- private:
- // We need to prevent the creation of instances of class Instruction.
- DISALLOW_IMPLICIT_CONSTRUCTORS(Instruction);
-};
-
-
-// -----------------------------------------------------------------------------
-// MIPS assembly various constants.
-
-
-static const int kArgsSlotsSize = 4 * Instruction::kInstrSize;
-static const int kArgsSlotsNum = 4;
-// C/C++ argument slots size.
-static const int kCArgsSlotsSize = 4 * Instruction::kInstrSize;
-// JS argument slots size.
-static const int kJSArgsSlotsSize = 0 * Instruction::kInstrSize;
-// Assembly builtins argument slots size.
-static const int kBArgsSlotsSize = 0 * Instruction::kInstrSize;
-
-static const int kBranchReturnOffset = 2 * Instruction::kInstrSize;
-
-static const int kDoubleAlignmentBits = 3;
-static const int kDoubleAlignment = (1 << kDoubleAlignmentBits);
-static const int kDoubleAlignmentMask = kDoubleAlignment - 1;
-
-
-} } // namespace v8::internal
-
-#endif // #ifndef V8_MIPS_CONSTANTS_H_
-
diff --git a/src/3rdparty/v8/src/mips/cpu-mips.cc b/src/3rdparty/v8/src/mips/cpu-mips.cc
deleted file mode 100644
index 36f577b..0000000
--- a/src/3rdparty/v8/src/mips/cpu-mips.cc
+++ /dev/null
@@ -1,90 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// CPU specific code for arm independent of OS goes here.
-
-#include <sys/syscall.h>
-#include <unistd.h>
-
-#ifdef __mips
-#include <asm/cachectl.h>
-#endif // #ifdef __mips
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_MIPS)
-
-#include "cpu.h"
-#include "macro-assembler.h"
-
-#include "simulator.h" // For cache flushing.
-
-namespace v8 {
-namespace internal {
-
-
-void CPU::Setup() {
- CpuFeatures* cpu_features = Isolate::Current()->cpu_features();
- cpu_features->Probe(true);
- if (!cpu_features->IsSupported(FPU) || Serializer::enabled()) {
- V8::DisableCrankshaft();
- }
-}
-
-
-void CPU::FlushICache(void* start, size_t size) {
-#if !defined (USE_SIMULATOR)
- int res;
-
- // See http://www.linux-mips.org/wiki/Cacheflush_Syscall
- res = syscall(__NR_cacheflush, start, size, ICACHE);
-
- if (res) {
- V8_Fatal(__FILE__, __LINE__, "Failed to flush the instruction cache");
- }
-
-#else // USE_SIMULATOR.
- // Not generating mips instructions for C-code. This means that we are
- // building a mips emulator based target. We should notify the simulator
- // that the Icache was flushed.
- // None of this code ends up in the snapshot so there are no issues
- // around whether or not to generate the code when building snapshots.
- Simulator::FlushICache(Isolate::Current()->simulator_i_cache(), start, size);
-#endif // USE_SIMULATOR.
-}
-
-
-void CPU::DebugBreak() {
-#ifdef __mips
- asm volatile("break");
-#endif // #ifdef __mips
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_MIPS
diff --git a/src/3rdparty/v8/src/mips/debug-mips.cc b/src/3rdparty/v8/src/mips/debug-mips.cc
deleted file mode 100644
index 35df69b..0000000
--- a/src/3rdparty/v8/src/mips/debug-mips.cc
+++ /dev/null
@@ -1,155 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_MIPS)
-
-#include "codegen-inl.h"
-#include "debug.h"
-
-namespace v8 {
-namespace internal {
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-
-bool BreakLocationIterator::IsDebugBreakAtReturn() {
- UNIMPLEMENTED_MIPS();
- return false;
-}
-
-
-void BreakLocationIterator::SetDebugBreakAtReturn() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-// Restore the JS frame exit code.
-void BreakLocationIterator::ClearDebugBreakAtReturn() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-// A debug break in the exit code is identified by the JS frame exit code
-// having been patched with li/call psuedo-instrunction (liu/ori/jalr)
-bool Debug::IsDebugBreakAtReturn(RelocInfo* rinfo) {
- UNIMPLEMENTED_MIPS();
- return false;
-}
-
-
-bool BreakLocationIterator::IsDebugBreakAtSlot() {
- UNIMPLEMENTED_MIPS();
- return false;
-}
-
-
-void BreakLocationIterator::SetDebugBreakAtSlot() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void BreakLocationIterator::ClearDebugBreakAtSlot() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-#define __ ACCESS_MASM(masm)
-
-
-void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void Debug::GenerateStoreICDebugBreak(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void Debug::GenerateCallICDebugBreak(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void Debug::GenerateConstructCallDebugBreak(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void Debug::GenerateStubNoRegistersDebugBreak(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void Debug::GenerateSlot(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void Debug::GenerateSlotDebugBreak(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void Debug::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-const bool Debug::kFrameDropperSupported = false;
-
-#undef __
-
-
-#endif // ENABLE_DEBUGGER_SUPPORT
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_MIPS
diff --git a/src/3rdparty/v8/src/mips/deoptimizer-mips.cc b/src/3rdparty/v8/src/mips/deoptimizer-mips.cc
deleted file mode 100644
index 4b69859..0000000
--- a/src/3rdparty/v8/src/mips/deoptimizer-mips.cc
+++ /dev/null
@@ -1,91 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "codegen.h"
-#include "deoptimizer.h"
-#include "full-codegen.h"
-#include "safepoint-table.h"
-
-// Note: this file was taken from the X64 version. ARM has a partially working
-// lithium implementation, but for now it is not ported to mips.
-
-namespace v8 {
-namespace internal {
-
-
-int Deoptimizer::table_entry_size_ = 10;
-
-
-int Deoptimizer::patch_size() {
- const int kCallInstructionSizeInWords = 3;
- return kCallInstructionSizeInWords * Assembler::kInstrSize;
-}
-
-
-void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
- UNIMPLEMENTED();
-}
-
-
-void Deoptimizer::PatchStackCheckCodeAt(Address pc_after,
- Code* check_code,
- Code* replacement_code) {
- UNIMPLEMENTED();
-}
-
-
-void Deoptimizer::RevertStackCheckCodeAt(Address pc_after,
- Code* check_code,
- Code* replacement_code) {
- UNIMPLEMENTED();
-}
-
-
-void Deoptimizer::DoComputeOsrOutputFrame() {
- UNIMPLEMENTED();
-}
-
-
-void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
- int frame_index) {
- UNIMPLEMENTED();
-}
-
-
-void Deoptimizer::EntryGenerator::Generate() {
- UNIMPLEMENTED();
-}
-
-
-void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
- UNIMPLEMENTED();
-}
-
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/mips/disasm-mips.cc b/src/3rdparty/v8/src/mips/disasm-mips.cc
deleted file mode 100644
index b7ceb2b..0000000
--- a/src/3rdparty/v8/src/mips/disasm-mips.cc
+++ /dev/null
@@ -1,1023 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// A Disassembler object is used to disassemble a block of code instruction by
-// instruction. The default implementation of the NameConverter object can be
-// overriden to modify register names or to do symbol lookup on addresses.
-//
-// The example below will disassemble a block of code and print it to stdout.
-//
-// NameConverter converter;
-// Disassembler d(converter);
-// for (byte_* pc = begin; pc < end;) {
-// v8::internal::EmbeddedVector<char, 256> buffer;
-// byte* prev_pc = pc;
-// pc += d.InstructionDecode(buffer, pc);
-// printf("%p %08x %s\n",
-// prev_pc, *reinterpret_cast<int32_t*>(prev_pc), buffer);
-// }
-//
-// The Disassembler class also has a convenience method to disassemble a block
-// of code into a FILE*, meaning that the above functionality could also be
-// achieved by just calling Disassembler::Disassemble(stdout, begin, end);
-
-
-#include <assert.h>
-#include <stdio.h>
-#include <stdarg.h>
-#include <string.h>
-#ifndef WIN32
-#include <stdint.h>
-#endif
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_MIPS)
-
-#include "mips/constants-mips.h"
-#include "disasm.h"
-#include "macro-assembler.h"
-#include "platform.h"
-
-namespace v8 {
-namespace internal {
-
-//------------------------------------------------------------------------------
-
-// Decoder decodes and disassembles instructions into an output buffer.
-// It uses the converter to convert register names and call destinations into
-// more informative description.
-class Decoder {
- public:
- Decoder(const disasm::NameConverter& converter,
- v8::internal::Vector<char> out_buffer)
- : converter_(converter),
- out_buffer_(out_buffer),
- out_buffer_pos_(0) {
- out_buffer_[out_buffer_pos_] = '\0';
- }
-
- ~Decoder() {}
-
- // Writes one disassembled instruction into 'buffer' (0-terminated).
- // Returns the length of the disassembled machine instruction in bytes.
- int InstructionDecode(byte_* instruction);
-
- private:
- // Bottleneck functions to print into the out_buffer.
- void PrintChar(const char ch);
- void Print(const char* str);
-
- // Printing of common values.
- void PrintRegister(int reg);
- void PrintFPURegister(int freg);
- void PrintRs(Instruction* instr);
- void PrintRt(Instruction* instr);
- void PrintRd(Instruction* instr);
- void PrintFs(Instruction* instr);
- void PrintFt(Instruction* instr);
- void PrintFd(Instruction* instr);
- void PrintSa(Instruction* instr);
- void PrintSd(Instruction* instr);
- void PrintBc(Instruction* instr);
- void PrintCc(Instruction* instr);
- void PrintFunction(Instruction* instr);
- void PrintSecondaryField(Instruction* instr);
- void PrintUImm16(Instruction* instr);
- void PrintSImm16(Instruction* instr);
- void PrintXImm16(Instruction* instr);
- void PrintImm26(Instruction* instr);
- void PrintCode(Instruction* instr); // For break and trap instructions.
- // Printing of instruction name.
- void PrintInstructionName(Instruction* instr);
-
- // Handle formatting of instructions and their options.
- int FormatRegister(Instruction* instr, const char* option);
- int FormatFPURegister(Instruction* instr, const char* option);
- int FormatOption(Instruction* instr, const char* option);
- void Format(Instruction* instr, const char* format);
- void Unknown(Instruction* instr);
-
- // Each of these functions decodes one particular instruction type.
- void DecodeTypeRegister(Instruction* instr);
- void DecodeTypeImmediate(Instruction* instr);
- void DecodeTypeJump(Instruction* instr);
-
- const disasm::NameConverter& converter_;
- v8::internal::Vector<char> out_buffer_;
- int out_buffer_pos_;
-
- DISALLOW_COPY_AND_ASSIGN(Decoder);
-};
-
-
-// Support for assertions in the Decoder formatting functions.
-#define STRING_STARTS_WITH(string, compare_string) \
- (strncmp(string, compare_string, strlen(compare_string)) == 0)
-
-
-// Append the ch to the output buffer.
-void Decoder::PrintChar(const char ch) {
- out_buffer_[out_buffer_pos_++] = ch;
-}
-
-
-// Append the str to the output buffer.
-void Decoder::Print(const char* str) {
- char cur = *str++;
- while (cur != '\0' && (out_buffer_pos_ < (out_buffer_.length() - 1))) {
- PrintChar(cur);
- cur = *str++;
- }
- out_buffer_[out_buffer_pos_] = 0;
-}
-
-
-// Print the register name according to the active name converter.
-void Decoder::PrintRegister(int reg) {
- Print(converter_.NameOfCPURegister(reg));
-}
-
-
-void Decoder::PrintRs(Instruction* instr) {
- int reg = instr->RsValue();
- PrintRegister(reg);
-}
-
-
-void Decoder::PrintRt(Instruction* instr) {
- int reg = instr->RtValue();
- PrintRegister(reg);
-}
-
-
-void Decoder::PrintRd(Instruction* instr) {
- int reg = instr->RdValue();
- PrintRegister(reg);
-}
-
-
-// Print the FPUregister name according to the active name converter.
-void Decoder::PrintFPURegister(int freg) {
- Print(converter_.NameOfXMMRegister(freg));
-}
-
-
-void Decoder::PrintFs(Instruction* instr) {
- int freg = instr->RsValue();
- PrintFPURegister(freg);
-}
-
-
-void Decoder::PrintFt(Instruction* instr) {
- int freg = instr->RtValue();
- PrintFPURegister(freg);
-}
-
-
-void Decoder::PrintFd(Instruction* instr) {
- int freg = instr->RdValue();
- PrintFPURegister(freg);
-}
-
-
-// Print the integer value of the sa field.
-void Decoder::PrintSa(Instruction* instr) {
- int sa = instr->SaValue();
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", sa);
-}
-
-
-// Print the integer value of the rd field, (when it is not used as reg).
-void Decoder::PrintSd(Instruction* instr) {
- int sd = instr->RdValue();
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", sd);
-}
-
-
-// Print the integer value of the cc field for the bc1t/f instructions.
-void Decoder::PrintBc(Instruction* instr) {
- int cc = instr->FBccValue();
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", cc);
-}
-
-
-// Print the integer value of the cc field for the FP compare instructions.
-void Decoder::PrintCc(Instruction* instr) {
- int cc = instr->FCccValue();
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "cc(%d)", cc);
-}
-
-
-// Print 16-bit unsigned immediate value.
-void Decoder::PrintUImm16(Instruction* instr) {
- int32_t imm = instr->Imm16Value();
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "%u", imm);
-}
-
-
-// Print 16-bit signed immediate value.
-void Decoder::PrintSImm16(Instruction* instr) {
- int32_t imm = ((instr->Imm16Value())<<16)>>16;
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm);
-}
-
-
-// Print 16-bit hexa immediate value.
-void Decoder::PrintXImm16(Instruction* instr) {
- int32_t imm = instr->Imm16Value();
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", imm);
-}
-
-
-// Print 26-bit immediate value.
-void Decoder::PrintImm26(Instruction* instr) {
- int32_t imm = instr->Imm26Value();
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm);
-}
-
-
-// Print 26-bit immediate value.
-void Decoder::PrintCode(Instruction* instr) {
- if (instr->OpcodeFieldRaw() != SPECIAL)
- return; // Not a break or trap instruction.
- switch (instr->FunctionFieldRaw()) {
- case BREAK: {
- int32_t code = instr->Bits(25, 6);
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "0x%05x (%d)", code, code);
- break;
- }
- case TGE:
- case TGEU:
- case TLT:
- case TLTU:
- case TEQ:
- case TNE: {
- int32_t code = instr->Bits(15, 6);
- out_buffer_pos_ +=
- OS::SNPrintF(out_buffer_ + out_buffer_pos_, "0x%03x", code);
- break;
- }
- default: // Not a break or trap instruction.
- break;
- };
-}
-
-
-// Printing of instruction name.
-void Decoder::PrintInstructionName(Instruction* instr) {
-}
-
-
-// Handle all register based formatting in this function to reduce the
-// complexity of FormatOption.
-int Decoder::FormatRegister(Instruction* instr, const char* format) {
- ASSERT(format[0] == 'r');
- if (format[1] == 's') { // 'rs: Rs register
- int reg = instr->RsValue();
- PrintRegister(reg);
- return 2;
- } else if (format[1] == 't') { // 'rt: rt register
- int reg = instr->RtValue();
- PrintRegister(reg);
- return 2;
- } else if (format[1] == 'd') { // 'rd: rd register
- int reg = instr->RdValue();
- PrintRegister(reg);
- return 2;
- }
- UNREACHABLE();
- return -1;
-}
-
-
-// Handle all FPUregister based formatting in this function to reduce the
-// complexity of FormatOption.
-int Decoder::FormatFPURegister(Instruction* instr, const char* format) {
- ASSERT(format[0] == 'f');
- if (format[1] == 's') { // 'fs: fs register
- int reg = instr->FsValue();
- PrintFPURegister(reg);
- return 2;
- } else if (format[1] == 't') { // 'ft: ft register
- int reg = instr->FtValue();
- PrintFPURegister(reg);
- return 2;
- } else if (format[1] == 'd') { // 'fd: fd register
- int reg = instr->FdValue();
- PrintFPURegister(reg);
- return 2;
- }
- UNREACHABLE();
- return -1;
-}
-
-
-// FormatOption takes a formatting string and interprets it based on
-// the current instructions. The format string points to the first
-// character of the option string (the option escape has already been
-// consumed by the caller.) FormatOption returns the number of
-// characters that were consumed from the formatting string.
-int Decoder::FormatOption(Instruction* instr, const char* format) {
- switch (format[0]) {
- case 'c': { // 'code for break or trap instructions
- ASSERT(STRING_STARTS_WITH(format, "code"));
- PrintCode(instr);
- return 4;
- }
- case 'i': { // 'imm16u or 'imm26
- if (format[3] == '1') {
- ASSERT(STRING_STARTS_WITH(format, "imm16"));
- if (format[5] == 's') {
- ASSERT(STRING_STARTS_WITH(format, "imm16s"));
- PrintSImm16(instr);
- } else if (format[5] == 'u') {
- ASSERT(STRING_STARTS_WITH(format, "imm16u"));
- PrintSImm16(instr);
- } else {
- ASSERT(STRING_STARTS_WITH(format, "imm16x"));
- PrintXImm16(instr);
- }
- return 6;
- } else {
- ASSERT(STRING_STARTS_WITH(format, "imm26"));
- PrintImm26(instr);
- return 5;
- }
- }
- case 'r': { // 'r: registers
- return FormatRegister(instr, format);
- }
- case 'f': { // 'f: FPUregisters
- return FormatFPURegister(instr, format);
- }
- case 's': { // 'sa
- switch (format[1]) {
- case 'a': {
- ASSERT(STRING_STARTS_WITH(format, "sa"));
- PrintSa(instr);
- return 2;
- }
- case 'd': {
- ASSERT(STRING_STARTS_WITH(format, "sd"));
- PrintSd(instr);
- return 2;
- }
- }
- }
- case 'b': { // 'bc - Special for bc1 cc field.
- ASSERT(STRING_STARTS_WITH(format, "bc"));
- PrintBc(instr);
- return 2;
- }
- case 'C': { // 'Cc - Special for c.xx.d cc field.
- ASSERT(STRING_STARTS_WITH(format, "Cc"));
- PrintCc(instr);
- return 2;
- }
- };
- UNREACHABLE();
- return -1;
-}
-
-
-// Format takes a formatting string for a whole instruction and prints it into
-// the output buffer. All escaped options are handed to FormatOption to be
-// parsed further.
-void Decoder::Format(Instruction* instr, const char* format) {
- char cur = *format++;
- while ((cur != 0) && (out_buffer_pos_ < (out_buffer_.length() - 1))) {
- if (cur == '\'') { // Single quote is used as the formatting escape.
- format += FormatOption(instr, format);
- } else {
- out_buffer_[out_buffer_pos_++] = cur;
- }
- cur = *format++;
- }
- out_buffer_[out_buffer_pos_] = '\0';
-}
-
-
-// For currently unimplemented decodings the disassembler calls Unknown(instr)
-// which will just print "unknown" of the instruction bits.
-void Decoder::Unknown(Instruction* instr) {
- Format(instr, "unknown");
-}
-
-
-void Decoder::DecodeTypeRegister(Instruction* instr) {
- switch (instr->OpcodeFieldRaw()) {
- case COP1: // Coprocessor instructions
- switch (instr->RsFieldRaw()) {
- case BC1: // bc1 handled in DecodeTypeImmediate.
- UNREACHABLE();
- break;
- case MFC1:
- Format(instr, "mfc1 'rt, 'fs");
- break;
- case MFHC1:
- Format(instr, "mfhc1 'rt, 'fs");
- break;
- case MTC1:
- Format(instr, "mtc1 'rt, 'fs");
- break;
- // These are called "fs" too, although they are not FPU registers.
- case CTC1:
- Format(instr, "ctc1 'rt, 'fs");
- break;
- case CFC1:
- Format(instr, "cfc1 'rt, 'fs");
- break;
- case MTHC1:
- Format(instr, "mthc1 'rt, 'fs");
- break;
- case D:
- switch (instr->FunctionFieldRaw()) {
- case ADD_D:
- Format(instr, "add.d 'fd, 'fs, 'ft");
- break;
- case SUB_D:
- Format(instr, "sub.d 'fd, 'fs, 'ft");
- break;
- case MUL_D:
- Format(instr, "mul.d 'fd, 'fs, 'ft");
- break;
- case DIV_D:
- Format(instr, "div.d 'fd, 'fs, 'ft");
- break;
- case ABS_D:
- Format(instr, "abs.d 'fd, 'fs");
- break;
- case MOV_D:
- Format(instr, "mov.d 'fd, 'fs");
- break;
- case NEG_D:
- Format(instr, "neg.d 'fd, 'fs");
- break;
- case SQRT_D:
- Format(instr, "sqrt.d 'fd, 'fs");
- break;
- case CVT_W_D:
- Format(instr, "cvt.w.d 'fd, 'fs");
- break;
- case CVT_L_D: {
- if (mips32r2) {
- Format(instr, "cvt.l.d 'fd, 'fs");
- } else {
- Unknown(instr);
- }
- break;
- }
- case TRUNC_W_D:
- Format(instr, "trunc.w.d 'fd, 'fs");
- break;
- case TRUNC_L_D: {
- if (mips32r2) {
- Format(instr, "trunc.l.d 'fd, 'fs");
- } else {
- Unknown(instr);
- }
- break;
- }
- case ROUND_W_D:
- Format(instr, "round.w.d 'fd, 'fs");
- break;
- case FLOOR_W_D:
- Format(instr, "floor.w.d 'fd, 'fs");
- break;
- case CEIL_W_D:
- Format(instr, "ceil.w.d 'fd, 'fs");
- break;
- case CVT_S_D:
- Format(instr, "cvt.s.d 'fd, 'fs");
- break;
- case C_F_D:
- Format(instr, "c.f.d 'fs, 'ft, 'Cc");
- break;
- case C_UN_D:
- Format(instr, "c.un.d 'fs, 'ft, 'Cc");
- break;
- case C_EQ_D:
- Format(instr, "c.eq.d 'fs, 'ft, 'Cc");
- break;
- case C_UEQ_D:
- Format(instr, "c.ueq.d 'fs, 'ft, 'Cc");
- break;
- case C_OLT_D:
- Format(instr, "c.olt.d 'fs, 'ft, 'Cc");
- break;
- case C_ULT_D:
- Format(instr, "c.ult.d 'fs, 'ft, 'Cc");
- break;
- case C_OLE_D:
- Format(instr, "c.ole.d 'fs, 'ft, 'Cc");
- break;
- case C_ULE_D:
- Format(instr, "c.ule.d 'fs, 'ft, 'Cc");
- break;
- default:
- Format(instr, "unknown.cop1.d");
- break;
- }
- break;
- case S:
- UNIMPLEMENTED_MIPS();
- break;
- case W:
- switch (instr->FunctionFieldRaw()) {
- case CVT_S_W: // Convert word to float (single).
- Format(instr, "cvt.s.w 'fd, 'fs");
- break;
- case CVT_D_W: // Convert word to double.
- Format(instr, "cvt.d.w 'fd, 'fs");
- break;
- default:
- UNREACHABLE();
- }
- break;
- case L:
- switch (instr->FunctionFieldRaw()) {
- case CVT_D_L: {
- if (mips32r2) {
- Format(instr, "cvt.d.l 'fd, 'fs");
- } else {
- Unknown(instr);
- }
- break;
- }
- case CVT_S_L: {
- if (mips32r2) {
- Format(instr, "cvt.s.l 'fd, 'fs");
- } else {
- Unknown(instr);
- }
- break;
- }
- default:
- UNREACHABLE();
- }
- break;
- case PS:
- UNIMPLEMENTED_MIPS();
- break;
- default:
- UNREACHABLE();
- }
- break;
- case SPECIAL:
- switch (instr->FunctionFieldRaw()) {
- case JR:
- Format(instr, "jr 'rs");
- break;
- case JALR:
- Format(instr, "jalr 'rs");
- break;
- case SLL:
- if ( 0x0 == static_cast<int>(instr->InstructionBits()))
- Format(instr, "nop");
- else
- Format(instr, "sll 'rd, 'rt, 'sa");
- break;
- case SRL:
- if (instr->RsValue() == 0) {
- Format(instr, "srl 'rd, 'rt, 'sa");
- } else {
- if (mips32r2) {
- Format(instr, "rotr 'rd, 'rt, 'sa");
- } else {
- Unknown(instr);
- }
- }
- break;
- case SRA:
- Format(instr, "sra 'rd, 'rt, 'sa");
- break;
- case SLLV:
- Format(instr, "sllv 'rd, 'rt, 'rs");
- break;
- case SRLV:
- if (instr->SaValue() == 0) {
- Format(instr, "srlv 'rd, 'rt, 'rs");
- } else {
- if (mips32r2) {
- Format(instr, "rotrv 'rd, 'rt, 'rs");
- } else {
- Unknown(instr);
- }
- }
- break;
- case SRAV:
- Format(instr, "srav 'rd, 'rt, 'rs");
- break;
- case MFHI:
- Format(instr, "mfhi 'rd");
- break;
- case MFLO:
- Format(instr, "mflo 'rd");
- break;
- case MULT:
- Format(instr, "mult 'rs, 'rt");
- break;
- case MULTU:
- Format(instr, "multu 'rs, 'rt");
- break;
- case DIV:
- Format(instr, "div 'rs, 'rt");
- break;
- case DIVU:
- Format(instr, "divu 'rs, 'rt");
- break;
- case ADD:
- Format(instr, "add 'rd, 'rs, 'rt");
- break;
- case ADDU:
- Format(instr, "addu 'rd, 'rs, 'rt");
- break;
- case SUB:
- Format(instr, "sub 'rd, 'rs, 'rt");
- break;
- case SUBU:
- Format(instr, "sub 'rd, 'rs, 'rt");
- break;
- case AND:
- Format(instr, "and 'rd, 'rs, 'rt");
- break;
- case OR:
- if (0 == instr->RsValue()) {
- Format(instr, "mov 'rd, 'rt");
- } else if (0 == instr->RtValue()) {
- Format(instr, "mov 'rd, 'rs");
- } else {
- Format(instr, "or 'rd, 'rs, 'rt");
- }
- break;
- case XOR:
- Format(instr, "xor 'rd, 'rs, 'rt");
- break;
- case NOR:
- Format(instr, "nor 'rd, 'rs, 'rt");
- break;
- case SLT:
- Format(instr, "slt 'rd, 'rs, 'rt");
- break;
- case SLTU:
- Format(instr, "sltu 'rd, 'rs, 'rt");
- break;
- case BREAK:
- Format(instr, "break, code: 'code");
- break;
- case TGE:
- Format(instr, "tge 'rs, 'rt, code: 'code");
- break;
- case TGEU:
- Format(instr, "tgeu 'rs, 'rt, code: 'code");
- break;
- case TLT:
- Format(instr, "tlt 'rs, 'rt, code: 'code");
- break;
- case TLTU:
- Format(instr, "tltu 'rs, 'rt, code: 'code");
- break;
- case TEQ:
- Format(instr, "teq 'rs, 'rt, code: 'code");
- break;
- case TNE:
- Format(instr, "tne 'rs, 'rt, code: 'code");
- break;
- case MOVZ:
- Format(instr, "movz 'rd, 'rs, 'rt");
- break;
- case MOVN:
- Format(instr, "movn 'rd, 'rs, 'rt");
- break;
- case MOVCI:
- if (instr->Bit(16)) {
- Format(instr, "movt 'rd, 'rs, 'Cc");
- } else {
- Format(instr, "movf 'rd, 'rs, 'Cc");
- }
- break;
- default:
- UNREACHABLE();
- }
- break;
- case SPECIAL2:
- switch (instr->FunctionFieldRaw()) {
- case MUL:
- Format(instr, "mul 'rd, 'rs, 'rt");
- break;
- case CLZ:
- Format(instr, "clz 'rd, 'rs");
- break;
- default:
- UNREACHABLE();
- }
- break;
- case SPECIAL3:
- switch (instr->FunctionFieldRaw()) {
- case INS: {
- if (mips32r2) {
- Format(instr, "ins 'rt, 'rs, 'sd, 'sa");
- } else {
- Unknown(instr);
- }
- break;
- }
- case EXT: {
- if (mips32r2) {
- Format(instr, "ext 'rt, 'rs, 'sd, 'sa");
- } else {
- Unknown(instr);
- }
- break;
- }
- default:
- UNREACHABLE();
- }
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void Decoder::DecodeTypeImmediate(Instruction* instr) {
- switch (instr->OpcodeFieldRaw()) {
- // ------------- REGIMM class.
- case COP1:
- switch (instr->RsFieldRaw()) {
- case BC1:
- if (instr->FBtrueValue()) {
- Format(instr, "bc1t 'bc, 'imm16u");
- } else {
- Format(instr, "bc1f 'bc, 'imm16u");
- }
- break;
- default:
- UNREACHABLE();
- };
- break; // Case COP1.
- case REGIMM:
- switch (instr->RtFieldRaw()) {
- case BLTZ:
- Format(instr, "bltz 'rs, 'imm16u");
- break;
- case BLTZAL:
- Format(instr, "bltzal 'rs, 'imm16u");
- break;
- case BGEZ:
- Format(instr, "bgez 'rs, 'imm16u");
- break;
- case BGEZAL:
- Format(instr, "bgezal 'rs, 'imm16u");
- break;
- default:
- UNREACHABLE();
- }
- break; // Case REGIMM.
- // ------------- Branch instructions.
- case BEQ:
- Format(instr, "beq 'rs, 'rt, 'imm16u");
- break;
- case BNE:
- Format(instr, "bne 'rs, 'rt, 'imm16u");
- break;
- case BLEZ:
- Format(instr, "blez 'rs, 'imm16u");
- break;
- case BGTZ:
- Format(instr, "bgtz 'rs, 'imm16u");
- break;
- // ------------- Arithmetic instructions.
- case ADDI:
- Format(instr, "addi 'rt, 'rs, 'imm16s");
- break;
- case ADDIU:
- Format(instr, "addiu 'rt, 'rs, 'imm16s");
- break;
- case SLTI:
- Format(instr, "slti 'rt, 'rs, 'imm16s");
- break;
- case SLTIU:
- Format(instr, "sltiu 'rt, 'rs, 'imm16u");
- break;
- case ANDI:
- Format(instr, "andi 'rt, 'rs, 'imm16x");
- break;
- case ORI:
- Format(instr, "ori 'rt, 'rs, 'imm16x");
- break;
- case XORI:
- Format(instr, "xori 'rt, 'rs, 'imm16x");
- break;
- case LUI:
- Format(instr, "lui 'rt, 'imm16x");
- break;
- // ------------- Memory instructions.
- case LB:
- Format(instr, "lb 'rt, 'imm16s('rs)");
- break;
- case LH:
- Format(instr, "lh 'rt, 'imm16s('rs)");
- break;
- case LWL:
- Format(instr, "lwl 'rt, 'imm16s('rs)");
- break;
- case LW:
- Format(instr, "lw 'rt, 'imm16s('rs)");
- break;
- case LBU:
- Format(instr, "lbu 'rt, 'imm16s('rs)");
- break;
- case LHU:
- Format(instr, "lhu 'rt, 'imm16s('rs)");
- break;
- case LWR:
- Format(instr, "lwr 'rt, 'imm16s('rs)");
- break;
- case SB:
- Format(instr, "sb 'rt, 'imm16s('rs)");
- break;
- case SH:
- Format(instr, "sh 'rt, 'imm16s('rs)");
- break;
- case SWL:
- Format(instr, "swl 'rt, 'imm16s('rs)");
- break;
- case SW:
- Format(instr, "sw 'rt, 'imm16s('rs)");
- break;
- case SWR:
- Format(instr, "swr 'rt, 'imm16s('rs)");
- break;
- case LWC1:
- Format(instr, "lwc1 'ft, 'imm16s('rs)");
- break;
- case LDC1:
- Format(instr, "ldc1 'ft, 'imm16s('rs)");
- break;
- case SWC1:
- Format(instr, "swc1 'ft, 'imm16s('rs)");
- break;
- case SDC1:
- Format(instr, "sdc1 'ft, 'imm16s('rs)");
- break;
- default:
- UNREACHABLE();
- break;
- };
-}
-
-
-void Decoder::DecodeTypeJump(Instruction* instr) {
- switch (instr->OpcodeFieldRaw()) {
- case J:
- Format(instr, "j 'imm26");
- break;
- case JAL:
- Format(instr, "jal 'imm26");
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-// Disassemble the instruction at *instr_ptr into the output buffer.
-int Decoder::InstructionDecode(byte_* instr_ptr) {
- Instruction* instr = Instruction::At(instr_ptr);
- // Print raw instruction bytes.
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "%08x ",
- instr->InstructionBits());
- switch (instr->InstructionType()) {
- case Instruction::kRegisterType: {
- DecodeTypeRegister(instr);
- break;
- }
- case Instruction::kImmediateType: {
- DecodeTypeImmediate(instr);
- break;
- }
- case Instruction::kJumpType: {
- DecodeTypeJump(instr);
- break;
- }
- default: {
- UNSUPPORTED_MIPS();
- }
- }
- return Instruction::kInstrSize;
-}
-
-
-} } // namespace v8::internal
-
-
-
-//------------------------------------------------------------------------------
-
-namespace disasm {
-
-using v8::internal::byte_;
-
-const char* NameConverter::NameOfAddress(byte_* addr) const {
- v8::internal::OS::SNPrintF(tmp_buffer_, "%p", addr);
- return tmp_buffer_.start();
-}
-
-
-const char* NameConverter::NameOfConstant(byte_* addr) const {
- return NameOfAddress(addr);
-}
-
-
-const char* NameConverter::NameOfCPURegister(int reg) const {
- return v8::internal::Registers::Name(reg);
-}
-
-
-const char* NameConverter::NameOfXMMRegister(int reg) const {
- return v8::internal::FPURegisters::Name(reg);
-}
-
-
-const char* NameConverter::NameOfByteCPURegister(int reg) const {
- UNREACHABLE(); // MIPS does not have the concept of a byte register
- return "nobytereg";
-}
-
-
-const char* NameConverter::NameInCode(byte_* addr) const {
- // The default name converter is called for unknown code. So we will not try
- // to access any memory.
- return "";
-}
-
-
-//------------------------------------------------------------------------------
-
-Disassembler::Disassembler(const NameConverter& converter)
- : converter_(converter) {}
-
-
-Disassembler::~Disassembler() {}
-
-
-int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer,
- byte_* instruction) {
- v8::internal::Decoder d(converter_, buffer);
- return d.InstructionDecode(instruction);
-}
-
-
-// The MIPS assembler does not currently use constant pools.
-int Disassembler::ConstantPoolSizeAt(byte_* instruction) {
- return -1;
-}
-
-
-void Disassembler::Disassemble(FILE* f, byte_* begin, byte_* end) {
- NameConverter converter;
- Disassembler d(converter);
- for (byte_* pc = begin; pc < end;) {
- v8::internal::EmbeddedVector<char, 128> buffer;
- buffer[0] = '\0';
- byte_* prev_pc = pc;
- pc += d.InstructionDecode(buffer, pc);
- fprintf(f, "%p %08x %s\n",
- prev_pc, *reinterpret_cast<int32_t*>(prev_pc), buffer.start());
- }
-}
-
-
-#undef UNSUPPORTED
-
-} // namespace disasm
-
-#endif // V8_TARGET_ARCH_MIPS
diff --git a/src/3rdparty/v8/src/mips/frames-mips.cc b/src/3rdparty/v8/src/mips/frames-mips.cc
deleted file mode 100644
index e2e0c91..0000000
--- a/src/3rdparty/v8/src/mips/frames-mips.cc
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_MIPS)
-
-#include "frames-inl.h"
-#include "mips/assembler-mips-inl.h"
-
-namespace v8 {
-namespace internal {
-
-
-Address ExitFrame::ComputeStackPointer(Address fp) {
- UNIMPLEMENTED_MIPS();
- return fp;
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_MIPS
diff --git a/src/3rdparty/v8/src/mips/frames-mips.h b/src/3rdparty/v8/src/mips/frames-mips.h
deleted file mode 100644
index f507590..0000000
--- a/src/3rdparty/v8/src/mips/frames-mips.h
+++ /dev/null
@@ -1,179 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-
-#ifndef V8_MIPS_FRAMES_MIPS_H_
-#define V8_MIPS_FRAMES_MIPS_H_
-
-
-namespace v8 {
-namespace internal {
-
-// Register lists.
-// Note that the bit values must match those used in actual instruction
-// encoding.
-static const int kNumRegs = 32;
-
-static const RegList kJSCallerSaved =
- 1 << 2 | // v0
- 1 << 4 | // a0
- 1 << 5 | // a1
- 1 << 6 | // a2
- 1 << 7; // a3
-
-static const int kNumJSCallerSaved = 5;
-
-
-// Return the code of the n-th caller-saved register available to JavaScript
-// e.g. JSCallerSavedReg(0) returns a0.code() == 4.
-int JSCallerSavedCode(int n);
-
-
-// Callee-saved registers preserved when switching from C to JavaScript.
-static const RegList kCalleeSaved =
- // Saved temporaries.
- 1 << 16 | 1 << 17 | 1 << 18 | 1 << 19 |
- 1 << 20 | 1 << 21 | 1 << 22 | 1 << 23 |
- // gp, sp, fp
- 1 << 28 | 1 << 29 | 1 << 30;
-
-static const int kNumCalleeSaved = 11;
-
-
-// Number of registers for which space is reserved in safepoints. Must be a
-// multiple of 8.
-// TODO(mips): Only 8 registers may actually be sufficient. Revisit.
-static const int kNumSafepointRegisters = 16;
-
-// Define the list of registers actually saved at safepoints.
-// Note that the number of saved registers may be smaller than the reserved
-// space, i.e. kNumSafepointSavedRegisters <= kNumSafepointRegisters.
-static const RegList kSafepointSavedRegisters = kJSCallerSaved | kCalleeSaved;
-static const int kNumSafepointSavedRegisters =
- kNumJSCallerSaved + kNumCalleeSaved;
-
-typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved];
-
-
-// ----------------------------------------------------
-
-class StackHandlerConstants : public AllStatic {
- public:
- static const int kNextOffset = 0 * kPointerSize;
- static const int kStateOffset = 1 * kPointerSize;
- static const int kFPOffset = 2 * kPointerSize;
- static const int kPCOffset = 3 * kPointerSize;
-
- static const int kSize = kPCOffset + kPointerSize;
-};
-
-
-class EntryFrameConstants : public AllStatic {
- public:
- static const int kCallerFPOffset = -3 * kPointerSize;
-};
-
-
-class ExitFrameConstants : public AllStatic {
- public:
- static const int kDebugMarkOffset = -1 * kPointerSize;
- // Must be the same as kDebugMarkOffset. Alias introduced when upgrading.
- static const int kCodeOffset = -1 * kPointerSize;
- static const int kSPOffset = -1 * kPointerSize;
-
- // TODO(mips): Use a patched sp value on the stack instead.
- // A marker of 0 indicates that double registers are saved.
- static const int kMarkerOffset = -2 * kPointerSize;
-
- // The caller fields are below the frame pointer on the stack.
- static const int kCallerFPOffset = +0 * kPointerSize;
- // The calling JS function is between FP and PC.
- static const int kCallerPCOffset = +1 * kPointerSize;
-
- // FP-relative displacement of the caller's SP.
- static const int kCallerSPDisplacement = +3 * kPointerSize;
-};
-
-
-class StandardFrameConstants : public AllStatic {
- public:
- static const int kExpressionsOffset = -3 * kPointerSize;
- static const int kMarkerOffset = -2 * kPointerSize;
- static const int kContextOffset = -1 * kPointerSize;
- static const int kCallerFPOffset = 0 * kPointerSize;
- static const int kCallerPCOffset = +1 * kPointerSize;
- static const int kCallerSPOffset = +2 * kPointerSize;
-
- // Size of the MIPS 4 32-bit argument slots.
- // This is just an alias with a shorter name. Use it from now on.
- static const int kRArgsSlotsSize = 4 * kPointerSize;
- static const int kRegularArgsSlotsSize = kRArgsSlotsSize;
-
- // C/C++ argument slots size.
- static const int kCArgsSlotsSize = 4 * kPointerSize;
- // JS argument slots size.
- static const int kJSArgsSlotsSize = 0 * kPointerSize;
- // Assembly builtins argument slots size.
- static const int kBArgsSlotsSize = 0 * kPointerSize;
-};
-
-
-class JavaScriptFrameConstants : public AllStatic {
- public:
- // FP-relative.
- static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
- static const int kLastParameterOffset = +2 * kPointerSize;
- static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
-
- // Caller SP-relative.
- static const int kParam0Offset = -2 * kPointerSize;
- static const int kReceiverOffset = -1 * kPointerSize;
-};
-
-
-class ArgumentsAdaptorFrameConstants : public AllStatic {
- public:
- static const int kLengthOffset = StandardFrameConstants::kExpressionsOffset;
-};
-
-
-class InternalFrameConstants : public AllStatic {
- public:
- static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
-};
-
-
-inline Object* JavaScriptFrame::function_slot_object() const {
- const int offset = JavaScriptFrameConstants::kFunctionOffset;
- return Memory::Object_at(fp() + offset);
-}
-
-
-} } // namespace v8::internal
-
-#endif
diff --git a/src/3rdparty/v8/src/mips/full-codegen-mips.cc b/src/3rdparty/v8/src/mips/full-codegen-mips.cc
deleted file mode 100644
index 87507ff..0000000
--- a/src/3rdparty/v8/src/mips/full-codegen-mips.cc
+++ /dev/null
@@ -1,727 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_MIPS)
-
-// Note on Mips implementation:
-//
-// The result_register() for mips is the 'v0' register, which is defined
-// by the ABI to contain function return values. However, the first
-// parameter to a function is defined to be 'a0'. So there are many
-// places where we have to move a previous result in v0 to a0 for the
-// next call: mov(a0, v0). This is not needed on the other architectures.
-
-#include "code-stubs.h"
-#include "codegen-inl.h"
-#include "compiler.h"
-#include "debug.h"
-#include "full-codegen.h"
-#include "parser.h"
-#include "scopes.h"
-#include "stub-cache.h"
-
-#include "mips/code-stubs-mips.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm_)
-
-// Generate code for a JS function. On entry to the function the receiver
-// and arguments have been pushed on the stack left to right. The actual
-// argument count matches the formal parameter count expected by the
-// function.
-//
-// The live registers are:
-// o a1: the JS function object being called (ie, ourselves)
-// o cp: our context
-// o fp: our caller's frame pointer
-// o sp: stack pointer
-// o ra: return address
-//
-// The function builds a JS frame. Please see JavaScriptFrameConstants in
-// frames-mips.h for its layout.
-void FullCodeGenerator::Generate(CompilationInfo* info) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::ClearAccumulator() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::EmitReturnSequence() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::EffectContext::Plug(Slot* slot) const {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::Plug(Slot* slot) const {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::StackValueContext::Plug(Slot* slot) const {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::TestContext::Plug(Slot* slot) const {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::EffectContext::Plug(Heap::RootListIndex index) const {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::Plug(
- Heap::RootListIndex index) const {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::StackValueContext::Plug(
- Heap::RootListIndex index) const {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::TestContext::Plug(Heap::RootListIndex index) const {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::EffectContext::Plug(Handle<Object> lit) const {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::Plug(
- Handle<Object> lit) const {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::EffectContext::DropAndPlug(int count,
- Register reg) const {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::DropAndPlug(
- int count,
- Register reg) const {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::StackValueContext::DropAndPlug(int count,
- Register reg) const {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::TestContext::DropAndPlug(int count,
- Register reg) const {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::EffectContext::Plug(Label* materialize_true,
- Label* materialize_false) const {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::Plug(
- Label* materialize_true,
- Label* materialize_false) const {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::StackValueContext::Plug(
- Label* materialize_true,
- Label* materialize_false) const {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::TestContext::Plug(Label* materialize_true,
- Label* materialize_false) const {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::EffectContext::Plug(bool flag) const {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::Plug(bool flag) const {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::StackValueContext::Plug(bool flag) const {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::TestContext::Plug(bool flag) const {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::DoTest(Label* if_true,
- Label* if_false,
- Label* fall_through) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-// Original prototype for mips, needs arch-indep change. Leave out for now.
-// void FullCodeGenerator::Split(Condition cc,
-// Register lhs,
-// const Operand& rhs,
-// Label* if_true,
-// Label* if_false,
-// Label* fall_through) {
-void FullCodeGenerator::Split(Condition cc,
- Label* if_true,
- Label* if_false,
- Label* fall_through) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-MemOperand FullCodeGenerator::EmitSlotSearch(Slot* slot, Register scratch) {
- UNIMPLEMENTED_MIPS();
- return MemOperand(zero_reg, 0);
-}
-
-
-void FullCodeGenerator::Move(Register destination, Slot* source) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::PrepareForBailoutBeforeSplit(State state,
- bool should_normalize,
- Label* if_true,
- Label* if_false) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::Move(Slot* dst,
- Register src,
- Register scratch1,
- Register scratch2) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::EmitDeclaration(Variable* variable,
- Variable::Mode mode,
- FunctionLiteral* function) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::VisitDeclaration(Declaration* decl) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
- bool pretenure) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(
- Slot* slot,
- Label* slow) {
- UNIMPLEMENTED_MIPS();
- return MemOperand(zero_reg, 0);
-}
-
-
-void FullCodeGenerator::EmitDynamicLoadFromSlotFastCase(
- Slot* slot,
- TypeofState typeof_state,
- Label* slow,
- Label* done) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::EmitLoadGlobalSlotCheckExtensions(
- Slot* slot,
- TypeofState typeof_state,
- Label* slow) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::EmitVariableLoad(Variable* var) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::VisitAssignment(Assignment* expr) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::EmitInlineSmiBinaryOp(Expression* expr,
- Token::Value op,
- OverwriteMode mode,
- Expression* left,
- Expression* right) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::EmitBinaryOp(Token::Value op,
- OverwriteMode mode) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::EmitVariableAssignment(Variable* var,
- Token::Value op) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::VisitProperty(Property* expr) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::EmitCallWithIC(Call* expr,
- Handle<Object> name,
- RelocInfo::Mode mode) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
- Expression* key,
- RelocInfo::Mode mode) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::EmitCallWithStub(Call* expr) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::VisitCall(Call* expr) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::VisitCallNew(CallNew* expr) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::EmitIsSmi(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::EmitIsNonNegativeSmi(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::EmitIsSpecObject(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
- ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::EmitIsFunction(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::EmitIsArray(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::EmitIsRegExp(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::EmitIsConstructCall(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::EmitObjectEquals(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::EmitArguments(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::EmitArgumentsLength(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::EmitLog(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::EmitSubString(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::EmitRegExpExec(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::EmitValueOf(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::EmitMathPow(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::EmitSetValueOf(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::EmitNumberToString(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::EmitStringCharFromCode(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::EmitStringCharCodeAt(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::EmitStringCharAt(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::EmitStringAdd(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::EmitStringCompare(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::EmitMathSin(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::EmitMathCos(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::EmitMathSqrt(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::EmitMathLog(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::EmitCallFunction(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::EmitRegExpConstructResult(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::EmitGetFromCache(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::EmitIsRegExpEquivalent(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::EmitHasCachedArrayIndex(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::EmitGetCachedArrayIndex(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-bool FullCodeGenerator::TryLiteralCompare(Token::Value op,
- Expression* left,
- Expression* right,
- Label* if_true,
- Label* if_false,
- Label* fall_through) {
- UNIMPLEMENTED_MIPS();
- return false;
-}
-
-
-void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::VisitCompareToNull(CompareToNull* expr) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-Register FullCodeGenerator::result_register() {
- UNIMPLEMENTED_MIPS();
- return v0;
-}
-
-
-Register FullCodeGenerator::context_register() {
- UNIMPLEMENTED_MIPS();
- return cp;
-}
-
-
-void FullCodeGenerator::EmitCallIC(Handle<Code> ic, RelocInfo::Mode mode) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-// ----------------------------------------------------------------------------
-// Non-local control flow support.
-
-void FullCodeGenerator::EnterFinallyBlock() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::ExitFinallyBlock() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-#undef __
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_MIPS
diff --git a/src/3rdparty/v8/src/mips/ic-mips.cc b/src/3rdparty/v8/src/mips/ic-mips.cc
deleted file mode 100644
index fa8a7bb..0000000
--- a/src/3rdparty/v8/src/mips/ic-mips.cc
+++ /dev/null
@@ -1,244 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_MIPS)
-
-#include "codegen-inl.h"
-#include "code-stubs.h"
-#include "ic-inl.h"
-#include "runtime.h"
-#include "stub-cache.h"
-
-namespace v8 {
-namespace internal {
-
-
-// ----------------------------------------------------------------------------
-// Static IC stub generators.
-//
-
-#define __ ACCESS_MASM(masm)
-
-
-void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void LoadIC::GenerateStringLength(MacroAssembler* masm, bool support_wrappers) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-// Defined in ic.cc.
-Object* CallIC_Miss(Arguments args);
-
-
-void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void KeyedCallIC::GenerateMiss(MacroAssembler* masm, int argc) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-// Defined in ic.cc.
-Object* LoadIC_Miss(Arguments args);
-
-void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void LoadIC::GenerateNormal(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void LoadIC::GenerateMiss(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
- UNIMPLEMENTED_MIPS();
- return false;
-}
-
-
-bool LoadIC::PatchInlinedContextualLoad(Address address,
- Object* map,
- Object* cell,
- bool is_dont_delete) {
- UNIMPLEMENTED_MIPS();
- return false;
-}
-
-
-bool StoreIC::PatchInlinedStore(Address address, Object* map, int offset) {
- UNIMPLEMENTED_MIPS();
- return false;
-}
-
-
-bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
- UNIMPLEMENTED_MIPS();
- return false;
-}
-
-
-bool KeyedStoreIC::PatchInlinedStore(Address address, Object* map) {
- UNIMPLEMENTED_MIPS();
- return false;
-}
-
-
-Object* KeyedLoadIC_Miss(Arguments args);
-
-
-void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void StoreIC::GenerateMiss(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void StoreIC::GenerateArrayLength(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void StoreIC::GenerateNormal(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void StoreIC::GenerateGlobalProxy(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-#undef __
-
-
-Condition CompareIC::ComputeCondition(Token::Value op) {
- UNIMPLEMENTED_MIPS();
- return kNoCondition;
-}
-
-
-void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void PatchInlinedSmiCode(Address address) {
- // Currently there is no smi inlining in the MIPS full code generator.
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_MIPS
diff --git a/src/3rdparty/v8/src/mips/jump-target-mips.cc b/src/3rdparty/v8/src/mips/jump-target-mips.cc
deleted file mode 100644
index bd6d60b..0000000
--- a/src/3rdparty/v8/src/mips/jump-target-mips.cc
+++ /dev/null
@@ -1,80 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_MIPS)
-
-#include "codegen-inl.h"
-#include "jump-target-inl.h"
-#include "register-allocator-inl.h"
-#include "virtual-frame-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// JumpTarget implementation.
-
-#define __ ACCESS_MASM(cgen()->masm())
-
-// BRANCH_ARGS_CHECK checks that conditional jump arguments are correct.
-#define BRANCH_ARGS_CHECK(cond, rs, rt) ASSERT( \
- (cond == cc_always && rs.is(zero_reg) && rt.rm().is(zero_reg)) || \
- (cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
-
-
-void JumpTarget::DoJump() {
- UNIMPLEMENTED_MIPS();
-}
-
-// Original prototype for mips, needs arch-indep change. Leave out for now.
-// void JumpTarget::DoBranch(Condition cc, Hint ignored,
-// Register src1, const Operand& src2) {
-void JumpTarget::DoBranch(Condition cc, Hint ignored) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void JumpTarget::Call() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void JumpTarget::DoBind() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-#undef __
-#undef BRANCH_ARGS_CHECK
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_MIPS
diff --git a/src/3rdparty/v8/src/mips/lithium-codegen-mips.h b/src/3rdparty/v8/src/mips/lithium-codegen-mips.h
deleted file mode 100644
index 345d912..0000000
--- a/src/3rdparty/v8/src/mips/lithium-codegen-mips.h
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_MIPS_LITHIUM_CODEGEN_MIPS_H_
-#define V8_MIPS_LITHIUM_CODEGEN_MIPS_H_
-
-#include "mips/lithium-mips.h"
-
-#include "deoptimizer.h"
-#include "safepoint-table.h"
-#include "scopes.h"
-
-// Note: this file was taken from the X64 version. ARM has a partially working
-// lithium implementation, but for now it is not ported to mips.
-
-namespace v8 {
-namespace internal {
-
-// Forward declarations.
-class LDeferredCode;
-
-class LCodeGen BASE_EMBEDDED {
- public:
- LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info) { }
-
- // Try to generate code for the entire chunk, but it may fail if the
- // chunk contains constructs we cannot handle. Returns true if the
- // code generation attempt succeeded.
- bool GenerateCode() {
- UNIMPLEMENTED();
- return false;
- }
-
- // Finish the code by setting stack height, safepoint, and bailout
- // information on it.
- void FinishCode(Handle<Code> code) { UNIMPLEMENTED(); }
-};
-
-} } // namespace v8::internal
-
-#endif // V8_MIPS_LITHIUM_CODEGEN_MIPS_H_
diff --git a/src/3rdparty/v8/src/mips/lithium-mips.h b/src/3rdparty/v8/src/mips/lithium-mips.h
deleted file mode 100644
index e11dfab..0000000
--- a/src/3rdparty/v8/src/mips/lithium-mips.h
+++ /dev/null
@@ -1,304 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_MIPS_LITHIUM_MIPS_H_
-#define V8_MIPS_LITHIUM_MIPS_H_
-
-#include "hydrogen.h"
-#include "lithium-allocator.h"
-#include "lithium.h"
-#include "safepoint-table.h"
-
-// Note: this file was taken from the X64 version. ARM has a partially working
-// lithium implementation, but for now it is not ported to mips.
-
-namespace v8 {
-namespace internal {
-
-// Forward declarations.
-class LCodeGen;
-class LEnvironment;
-class Translation;
-
-class LInstruction: public ZoneObject {
- public:
- LInstruction() { }
- virtual ~LInstruction() { }
-
- // Predicates should be generated by macro as in lithium-ia32.h.
- virtual bool IsLabel() const {
- UNIMPLEMENTED();
- return false;
- }
- virtual bool IsOsrEntry() const {
- UNIMPLEMENTED();
- return false;
- }
-
- LPointerMap* pointer_map() const {
- UNIMPLEMENTED();
- return NULL;
- }
-
- bool HasPointerMap() const {
- UNIMPLEMENTED();
- return false;
- }
-
- void set_environment(LEnvironment* env) { UNIMPLEMENTED(); }
-
- LEnvironment* environment() const {
- UNIMPLEMENTED();
- return NULL;
- }
-
- bool HasEnvironment() const {
- UNIMPLEMENTED();
- return NULL;
- }
-
- virtual void PrintTo(StringStream* stream) const { UNIMPLEMENTED(); }
-
- virtual bool IsControl() const {
- UNIMPLEMENTED();
- return false;
- }
-
- void MarkAsCall() { UNIMPLEMENTED(); }
- void MarkAsSaveDoubles() { UNIMPLEMENTED(); }
-
- // Interface to the register allocator and iterators.
- bool IsMarkedAsCall() const {
- UNIMPLEMENTED();
- return false;
- }
-
- bool IsMarkedAsSaveDoubles() const {
- UNIMPLEMENTED();
- return false;
- }
-
- virtual bool HasResult() const {
- UNIMPLEMENTED();
- return false;
- }
-
- virtual LOperand* result() {
- UNIMPLEMENTED();
- return NULL;
- }
-
- virtual int InputCount() {
- UNIMPLEMENTED();
- return 0;
- }
-
- virtual LOperand* InputAt(int i) {
- UNIMPLEMENTED();
- return NULL;
- }
-
- virtual int TempCount() {
- UNIMPLEMENTED();
- return 0;
- }
-
- virtual LOperand* TempAt(int i) {
- UNIMPLEMENTED();
- return NULL;
- }
-
- LOperand* FirstInput() {
- UNIMPLEMENTED();
- return NULL;
- }
-
- LOperand* Output() {
- UNIMPLEMENTED();
- return NULL;
- }
-
-#ifdef DEBUG
- void VerifyCall() { UNIMPLEMENTED(); }
-#endif
-};
-
-
-class LGap: public LInstruction {
- public:
- explicit LGap(HBasicBlock* block) { }
-
- HBasicBlock* block() const {
- UNIMPLEMENTED();
- return NULL;
- }
-
- enum InnerPosition {
- BEFORE,
- START,
- END,
- AFTER,
- FIRST_INNER_POSITION = BEFORE,
- LAST_INNER_POSITION = AFTER
- };
-
- LParallelMove* GetOrCreateParallelMove(InnerPosition pos) {
- UNIMPLEMENTED();
- return NULL;
- }
-
- LParallelMove* GetParallelMove(InnerPosition pos) {
- UNIMPLEMENTED();
- return NULL;
- }
-};
-
-
-class LLabel: public LGap {
- public:
- explicit LLabel(HBasicBlock* block) : LGap(block) { }
-};
-
-
-class LOsrEntry: public LInstruction {
- public:
- // Function could be generated by a macro as in lithium-ia32.h.
- static LOsrEntry* cast(LInstruction* instr) {
- UNIMPLEMENTED();
- return NULL;
- }
-
- LOperand** SpilledRegisterArray() {
- UNIMPLEMENTED();
- return NULL;
- }
- LOperand** SpilledDoubleRegisterArray() {
- UNIMPLEMENTED();
- return NULL;
- }
-
- void MarkSpilledRegister(int allocation_index, LOperand* spill_operand) {
- UNIMPLEMENTED();
- }
- void MarkSpilledDoubleRegister(int allocation_index,
- LOperand* spill_operand) {
- UNIMPLEMENTED();
- }
-};
-
-
-class LChunk: public ZoneObject {
- public:
- explicit LChunk(CompilationInfo* info, HGraph* graph) { }
-
- HGraph* graph() const {
- UNIMPLEMENTED();
- return NULL;
- }
-
- CompilationInfo* info() const { return NULL; }
-
- const ZoneList<LPointerMap*>* pointer_maps() const {
- UNIMPLEMENTED();
- return NULL;
- }
-
- LOperand* GetNextSpillSlot(bool double_slot) {
- UNIMPLEMENTED();
- return NULL;
- }
-
- LConstantOperand* DefineConstantOperand(HConstant* constant) {
- UNIMPLEMENTED();
- return NULL;
- }
-
- LLabel* GetLabel(int block_id) const {
- UNIMPLEMENTED();
- return NULL;
- }
-
- const ZoneList<LInstruction*>* instructions() const {
- UNIMPLEMENTED();
- return NULL;
- }
-
- int GetParameterStackSlot(int index) const {
- UNIMPLEMENTED();
- return 0;
- }
-
- void AddGapMove(int index, LOperand* from, LOperand* to) { UNIMPLEMENTED(); }
-
- LGap* GetGapAt(int index) const {
- UNIMPLEMENTED();
- return NULL;
- }
-
- bool IsGapAt(int index) const {
- UNIMPLEMENTED();
- return false;
- }
-
- int NearestGapPos(int index) const {
- UNIMPLEMENTED();
- return 0;
- }
-
- void MarkEmptyBlocks() { UNIMPLEMENTED(); }
-
-#ifdef DEBUG
- void Verify() { UNIMPLEMENTED(); }
-#endif
-};
-
-
-class LChunkBuilder BASE_EMBEDDED {
- public:
- LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator) { }
-
- // Build the sequence for the graph.
- LChunk* Build() {
- UNIMPLEMENTED();
- return NULL;
- };
-
- // Declare methods that deal with the individual node types.
-#define DECLARE_DO(type) LInstruction* Do##type(H##type* node) { \
- UNIMPLEMENTED(); \
- return NULL; \
- }
- HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
-#undef DECLARE_DO
-
- DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_MIPS_LITHIUM_MIPS_H_
diff --git a/src/3rdparty/v8/src/mips/macro-assembler-mips.cc b/src/3rdparty/v8/src/mips/macro-assembler-mips.cc
deleted file mode 100644
index bd4ab48..0000000
--- a/src/3rdparty/v8/src/mips/macro-assembler-mips.cc
+++ /dev/null
@@ -1,3327 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include <limits.h> // For LONG_MIN, LONG_MAX
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_MIPS)
-
-#include "bootstrapper.h"
-#include "codegen-inl.h"
-#include "debug.h"
-#include "runtime.h"
-
-namespace v8 {
-namespace internal {
-
-MacroAssembler::MacroAssembler(void* buffer, int size)
- : Assembler(buffer, size),
- generating_stub_(false),
- allow_stub_calls_(true),
- code_object_(HEAP->undefined_value()) {
-}
-
-
-// Arguments macros
-#define COND_TYPED_ARGS Condition cond, Register r1, const Operand& r2
-#define COND_ARGS cond, r1, r2
-
-#define REGISTER_TARGET_BODY(Name) \
-void MacroAssembler::Name(Register target, \
- BranchDelaySlot bd) { \
- Name(Operand(target), bd); \
-} \
-void MacroAssembler::Name(Register target, COND_TYPED_ARGS, \
- BranchDelaySlot bd) { \
- Name(Operand(target), COND_ARGS, bd); \
-}
-
-
-#define INT_PTR_TARGET_BODY(Name) \
-void MacroAssembler::Name(intptr_t target, RelocInfo::Mode rmode, \
- BranchDelaySlot bd) { \
- Name(Operand(target, rmode), bd); \
-} \
-void MacroAssembler::Name(intptr_t target, \
- RelocInfo::Mode rmode, \
- COND_TYPED_ARGS, \
- BranchDelaySlot bd) { \
- Name(Operand(target, rmode), COND_ARGS, bd); \
-}
-
-
-#define BYTE_PTR_TARGET_BODY(Name) \
-void MacroAssembler::Name(byte* target, RelocInfo::Mode rmode, \
- BranchDelaySlot bd) { \
- Name(reinterpret_cast<intptr_t>(target), rmode, bd); \
-} \
-void MacroAssembler::Name(byte* target, \
- RelocInfo::Mode rmode, \
- COND_TYPED_ARGS, \
- BranchDelaySlot bd) { \
- Name(reinterpret_cast<intptr_t>(target), rmode, COND_ARGS, bd); \
-}
-
-
-#define CODE_TARGET_BODY(Name) \
-void MacroAssembler::Name(Handle<Code> target, RelocInfo::Mode rmode, \
- BranchDelaySlot bd) { \
- Name(reinterpret_cast<intptr_t>(target.location()), rmode, bd); \
-} \
-void MacroAssembler::Name(Handle<Code> target, \
- RelocInfo::Mode rmode, \
- COND_TYPED_ARGS, \
- BranchDelaySlot bd) { \
- Name(reinterpret_cast<intptr_t>(target.location()), rmode, COND_ARGS, bd); \
-}
-
-
-REGISTER_TARGET_BODY(Jump)
-REGISTER_TARGET_BODY(Call)
-INT_PTR_TARGET_BODY(Jump)
-INT_PTR_TARGET_BODY(Call)
-BYTE_PTR_TARGET_BODY(Jump)
-BYTE_PTR_TARGET_BODY(Call)
-CODE_TARGET_BODY(Jump)
-CODE_TARGET_BODY(Call)
-
-#undef COND_TYPED_ARGS
-#undef COND_ARGS
-#undef REGISTER_TARGET_BODY
-#undef BYTE_PTR_TARGET_BODY
-#undef CODE_TARGET_BODY
-
-
-void MacroAssembler::Ret(BranchDelaySlot bd) {
- Jump(Operand(ra), bd);
-}
-
-
-void MacroAssembler::Ret(Condition cond, Register r1, const Operand& r2,
- BranchDelaySlot bd) {
- Jump(Operand(ra), cond, r1, r2, bd);
-}
-
-
-void MacroAssembler::LoadRoot(Register destination,
- Heap::RootListIndex index) {
- lw(destination, MemOperand(s6, index << kPointerSizeLog2));
-}
-
-
-void MacroAssembler::LoadRoot(Register destination,
- Heap::RootListIndex index,
- Condition cond,
- Register src1, const Operand& src2) {
- Branch(2, NegateCondition(cond), src1, src2);
- lw(destination, MemOperand(s6, index << kPointerSizeLog2));
-}
-
-
-void MacroAssembler::StoreRoot(Register source,
- Heap::RootListIndex index) {
- sw(source, MemOperand(s6, index << kPointerSizeLog2));
-}
-
-
-void MacroAssembler::StoreRoot(Register source,
- Heap::RootListIndex index,
- Condition cond,
- Register src1, const Operand& src2) {
- Branch(2, NegateCondition(cond), src1, src2);
- sw(source, MemOperand(s6, index << kPointerSizeLog2));
-}
-
-
-void MacroAssembler::RecordWriteHelper(Register object,
- Register address,
- Register scratch) {
- if (FLAG_debug_code) {
- // Check that the object is not in new space.
- Label not_in_new_space;
- InNewSpace(object, scratch, ne, &not_in_new_space);
- Abort("new-space object passed to RecordWriteHelper");
- bind(&not_in_new_space);
- }
-
- // Calculate page address: Clear bits from 0 to kPageSizeBits.
- if (mips32r2) {
- Ins(object, zero_reg, 0, kPageSizeBits);
- } else {
- // The Ins macro is slow on r1, so use shifts instead.
- srl(object, object, kPageSizeBits);
- sll(object, object, kPageSizeBits);
- }
-
- // Calculate region number.
- Ext(address, address, Page::kRegionSizeLog2,
- kPageSizeBits - Page::kRegionSizeLog2);
-
- // Mark region dirty.
- lw(scratch, MemOperand(object, Page::kDirtyFlagOffset));
- li(at, Operand(1));
- sllv(at, at, address);
- or_(scratch, scratch, at);
- sw(scratch, MemOperand(object, Page::kDirtyFlagOffset));
-}
-
-
-void MacroAssembler::InNewSpace(Register object,
- Register scratch,
- Condition cc,
- Label* branch) {
- ASSERT(cc == eq || cc == ne);
- And(scratch, object, Operand(ExternalReference::new_space_mask(isolate())));
- Branch(branch, cc, scratch,
- Operand(ExternalReference::new_space_start(isolate())));
-}
-
-
-// Will clobber 4 registers: object, scratch0, scratch1, at. The
-// register 'object' contains a heap object pointer. The heap object
-// tag is shifted away.
-void MacroAssembler::RecordWrite(Register object,
- Operand offset,
- Register scratch0,
- Register scratch1) {
- // The compiled code assumes that record write doesn't change the
- // context register, so we check that none of the clobbered
- // registers are cp.
- ASSERT(!object.is(cp) && !scratch0.is(cp) && !scratch1.is(cp));
-
- Label done;
-
- // First, test that the object is not in the new space. We cannot set
- // region marks for new space pages.
- InNewSpace(object, scratch0, eq, &done);
-
- // Add offset into the object.
- Addu(scratch0, object, offset);
-
- // Record the actual write.
- RecordWriteHelper(object, scratch0, scratch1);
-
- bind(&done);
-
- // Clobber all input registers when running with the debug-code flag
- // turned on to provoke errors.
- if (FLAG_debug_code) {
- li(object, Operand(BitCast<int32_t>(kZapValue)));
- li(scratch0, Operand(BitCast<int32_t>(kZapValue)));
- li(scratch1, Operand(BitCast<int32_t>(kZapValue)));
- }
-}
-
-
-// Will clobber 4 registers: object, address, scratch, ip. The
-// register 'object' contains a heap object pointer. The heap object
-// tag is shifted away.
-void MacroAssembler::RecordWrite(Register object,
- Register address,
- Register scratch) {
- // The compiled code assumes that record write doesn't change the
- // context register, so we check that none of the clobbered
- // registers are cp.
- ASSERT(!object.is(cp) && !address.is(cp) && !scratch.is(cp));
-
- Label done;
-
- // First, test that the object is not in the new space. We cannot set
- // region marks for new space pages.
- InNewSpace(object, scratch, eq, &done);
-
- // Record the actual write.
- RecordWriteHelper(object, address, scratch);
-
- bind(&done);
-
- // Clobber all input registers when running with the debug-code flag
- // turned on to provoke errors.
- if (FLAG_debug_code) {
- li(object, Operand(BitCast<int32_t>(kZapValue)));
- li(address, Operand(BitCast<int32_t>(kZapValue)));
- li(scratch, Operand(BitCast<int32_t>(kZapValue)));
- }
-}
-
-
-// -----------------------------------------------------------------------------
-// Allocation support
-
-
-void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
- Register scratch,
- Label* miss) {
- Label same_contexts;
-
- ASSERT(!holder_reg.is(scratch));
- ASSERT(!holder_reg.is(at));
- ASSERT(!scratch.is(at));
-
- // Load current lexical context from the stack frame.
- lw(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
- // In debug mode, make sure the lexical context is set.
-#ifdef DEBUG
- Check(ne, "we should not have an empty lexical context",
- scratch, Operand(zero_reg));
-#endif
-
- // Load the global context of the current context.
- int offset = Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
- lw(scratch, FieldMemOperand(scratch, offset));
- lw(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset));
-
- // Check the context is a global context.
- if (FLAG_debug_code) {
- // TODO(119): Avoid push(holder_reg)/pop(holder_reg).
- Push(holder_reg); // Temporarily save holder on the stack.
- // Read the first word and compare to the global_context_map.
- lw(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
- LoadRoot(at, Heap::kGlobalContextMapRootIndex);
- Check(eq, "JSGlobalObject::global_context should be a global context.",
- holder_reg, Operand(at));
- Pop(holder_reg); // Restore holder.
- }
-
- // Check if both contexts are the same.
- lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kContextOffset));
- Branch(&same_contexts, eq, scratch, Operand(at));
-
- // Check the context is a global context.
- if (FLAG_debug_code) {
- // TODO(119): Avoid push(holder_reg)/pop(holder_reg).
- Push(holder_reg); // Temporarily save holder on the stack.
- mov(holder_reg, at); // Move at to its holding place.
- LoadRoot(at, Heap::kNullValueRootIndex);
- Check(ne, "JSGlobalProxy::context() should not be null.",
- holder_reg, Operand(at));
-
- lw(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
- LoadRoot(at, Heap::kGlobalContextMapRootIndex);
- Check(eq, "JSGlobalObject::global_context should be a global context.",
- holder_reg, Operand(at));
- // Restore at is not needed. at is reloaded below.
- Pop(holder_reg); // Restore holder.
- // Restore at to holder's context.
- lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kContextOffset));
- }
-
- // Check that the security token in the calling global object is
- // compatible with the security token in the receiving global
- // object.
- int token_offset = Context::kHeaderSize +
- Context::SECURITY_TOKEN_INDEX * kPointerSize;
-
- lw(scratch, FieldMemOperand(scratch, token_offset));
- lw(at, FieldMemOperand(at, token_offset));
- Branch(miss, ne, scratch, Operand(at));
-
- bind(&same_contexts);
-}
-
-
-// ---------------------------------------------------------------------------
-// Instruction macros
-
-void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) {
- if (rt.is_reg()) {
- addu(rd, rs, rt.rm());
- } else {
- if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
- addiu(rd, rs, rt.imm32_);
- } else {
- // li handles the relocation.
- ASSERT(!rs.is(at));
- li(at, rt);
- addu(rd, rs, at);
- }
- }
-}
-
-
-void MacroAssembler::Subu(Register rd, Register rs, const Operand& rt) {
- if (rt.is_reg()) {
- subu(rd, rs, rt.rm());
- } else {
- if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
- addiu(rd, rs, -rt.imm32_); // No subiu instr, use addiu(x, y, -imm).
- } else {
- // li handles the relocation.
- ASSERT(!rs.is(at));
- li(at, rt);
- subu(rd, rs, at);
- }
- }
-}
-
-
-void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) {
- if (rt.is_reg()) {
- mul(rd, rs, rt.rm());
- } else {
- // li handles the relocation.
- ASSERT(!rs.is(at));
- li(at, rt);
- mul(rd, rs, at);
- }
-}
-
-
-void MacroAssembler::Mult(Register rs, const Operand& rt) {
- if (rt.is_reg()) {
- mult(rs, rt.rm());
- } else {
- // li handles the relocation.
- ASSERT(!rs.is(at));
- li(at, rt);
- mult(rs, at);
- }
-}
-
-
-void MacroAssembler::Multu(Register rs, const Operand& rt) {
- if (rt.is_reg()) {
- multu(rs, rt.rm());
- } else {
- // li handles the relocation.
- ASSERT(!rs.is(at));
- li(at, rt);
- multu(rs, at);
- }
-}
-
-
-void MacroAssembler::Div(Register rs, const Operand& rt) {
- if (rt.is_reg()) {
- div(rs, rt.rm());
- } else {
- // li handles the relocation.
- ASSERT(!rs.is(at));
- li(at, rt);
- div(rs, at);
- }
-}
-
-
-void MacroAssembler::Divu(Register rs, const Operand& rt) {
- if (rt.is_reg()) {
- divu(rs, rt.rm());
- } else {
- // li handles the relocation.
- ASSERT(!rs.is(at));
- li(at, rt);
- divu(rs, at);
- }
-}
-
-
-void MacroAssembler::And(Register rd, Register rs, const Operand& rt) {
- if (rt.is_reg()) {
- and_(rd, rs, rt.rm());
- } else {
- if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
- andi(rd, rs, rt.imm32_);
- } else {
- // li handles the relocation.
- ASSERT(!rs.is(at));
- li(at, rt);
- and_(rd, rs, at);
- }
- }
-}
-
-
-void MacroAssembler::Or(Register rd, Register rs, const Operand& rt) {
- if (rt.is_reg()) {
- or_(rd, rs, rt.rm());
- } else {
- if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
- ori(rd, rs, rt.imm32_);
- } else {
- // li handles the relocation.
- ASSERT(!rs.is(at));
- li(at, rt);
- or_(rd, rs, at);
- }
- }
-}
-
-
-void MacroAssembler::Xor(Register rd, Register rs, const Operand& rt) {
- if (rt.is_reg()) {
- xor_(rd, rs, rt.rm());
- } else {
- if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
- xori(rd, rs, rt.imm32_);
- } else {
- // li handles the relocation.
- ASSERT(!rs.is(at));
- li(at, rt);
- xor_(rd, rs, at);
- }
- }
-}
-
-
-void MacroAssembler::Nor(Register rd, Register rs, const Operand& rt) {
- if (rt.is_reg()) {
- nor(rd, rs, rt.rm());
- } else {
- // li handles the relocation.
- ASSERT(!rs.is(at));
- li(at, rt);
- nor(rd, rs, at);
- }
-}
-
-
-void MacroAssembler::Slt(Register rd, Register rs, const Operand& rt) {
- if (rt.is_reg()) {
- slt(rd, rs, rt.rm());
- } else {
- if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
- slti(rd, rs, rt.imm32_);
- } else {
- // li handles the relocation.
- ASSERT(!rs.is(at));
- li(at, rt);
- slt(rd, rs, at);
- }
- }
-}
-
-
-void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
- if (rt.is_reg()) {
- sltu(rd, rs, rt.rm());
- } else {
- if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
- sltiu(rd, rs, rt.imm32_);
- } else {
- // li handles the relocation.
- ASSERT(!rs.is(at));
- li(at, rt);
- sltu(rd, rs, at);
- }
- }
-}
-
-
-void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) {
- if (mips32r2) {
- if (rt.is_reg()) {
- rotrv(rd, rs, rt.rm());
- } else {
- rotr(rd, rs, rt.imm32_);
- }
- } else {
- if (rt.is_reg()) {
- subu(at, zero_reg, rt.rm());
- sllv(at, rs, at);
- srlv(rd, rs, rt.rm());
- or_(rd, rd, at);
- } else {
- if (rt.imm32_ == 0) {
- srl(rd, rs, 0);
- } else {
- srl(at, rs, rt.imm32_);
- sll(rd, rs, (0x20 - rt.imm32_) & 0x1f);
- or_(rd, rd, at);
- }
- }
- }
-}
-
-
-//------------Pseudo-instructions-------------
-
-void MacroAssembler::li(Register rd, Operand j, bool gen2instr) {
- ASSERT(!j.is_reg());
- BlockTrampolinePoolScope block_trampoline_pool(this);
- if (!MustUseReg(j.rmode_) && !gen2instr) {
- // Normal load of an immediate value which does not need Relocation Info.
- if (is_int16(j.imm32_)) {
- addiu(rd, zero_reg, j.imm32_);
- } else if (!(j.imm32_ & kHiMask)) {
- ori(rd, zero_reg, j.imm32_);
- } else if (!(j.imm32_ & kImm16Mask)) {
- lui(rd, (j.imm32_ & kHiMask) >> kLuiShift);
- } else {
- lui(rd, (j.imm32_ & kHiMask) >> kLuiShift);
- ori(rd, rd, (j.imm32_ & kImm16Mask));
- }
- } else if (MustUseReg(j.rmode_) || gen2instr) {
- if (MustUseReg(j.rmode_)) {
- RecordRelocInfo(j.rmode_, j.imm32_);
- }
- // We need always the same number of instructions as we may need to patch
- // this code to load another value which may need 2 instructions to load.
- if (is_int16(j.imm32_)) {
- nop();
- addiu(rd, zero_reg, j.imm32_);
- } else if (!(j.imm32_ & kHiMask)) {
- nop();
- ori(rd, zero_reg, j.imm32_);
- } else if (!(j.imm32_ & kImm16Mask)) {
- nop();
- lui(rd, (j.imm32_ & kHiMask) >> kLuiShift);
- } else {
- lui(rd, (j.imm32_ & kHiMask) >> kLuiShift);
- ori(rd, rd, (j.imm32_ & kImm16Mask));
- }
- }
-}
-
-
-// Exception-generating instructions and debugging support
-void MacroAssembler::stop(const char* msg) {
- // TO_UPGRADE: Just a break for now. Maybe we could upgrade it.
- // We use the 0x54321 value to be able to find it easily when reading memory.
- break_(0x54321);
-}
-
-
-void MacroAssembler::MultiPush(RegList regs) {
- int16_t NumSaved = 0;
- int16_t NumToPush = NumberOfBitsSet(regs);
-
- addiu(sp, sp, -4 * NumToPush);
- for (int16_t i = kNumRegisters; i > 0; i--) {
- if ((regs & (1 << i)) != 0) {
- sw(ToRegister(i), MemOperand(sp, 4 * (NumToPush - ++NumSaved)));
- }
- }
-}
-
-
-void MacroAssembler::MultiPushReversed(RegList regs) {
- int16_t NumSaved = 0;
- int16_t NumToPush = NumberOfBitsSet(regs);
-
- addiu(sp, sp, -4 * NumToPush);
- for (int16_t i = 0; i < kNumRegisters; i++) {
- if ((regs & (1 << i)) != 0) {
- sw(ToRegister(i), MemOperand(sp, 4 * (NumToPush - ++NumSaved)));
- }
- }
-}
-
-
-void MacroAssembler::MultiPop(RegList regs) {
- int16_t NumSaved = 0;
-
- for (int16_t i = 0; i < kNumRegisters; i++) {
- if ((regs & (1 << i)) != 0) {
- lw(ToRegister(i), MemOperand(sp, 4 * (NumSaved++)));
- }
- }
- addiu(sp, sp, 4 * NumSaved);
-}
-
-
-void MacroAssembler::MultiPopReversed(RegList regs) {
- int16_t NumSaved = 0;
-
- for (int16_t i = kNumRegisters; i > 0; i--) {
- if ((regs & (1 << i)) != 0) {
- lw(ToRegister(i), MemOperand(sp, 4 * (NumSaved++)));
- }
- }
- addiu(sp, sp, 4 * NumSaved);
-}
-
-
-void MacroAssembler::Ext(Register rt,
- Register rs,
- uint16_t pos,
- uint16_t size) {
- ASSERT(pos < 32);
- ASSERT(pos + size < 32);
-
- if (mips32r2) {
- ext_(rt, rs, pos, size);
- } else {
- // Move rs to rt and shift it left then right to get the
- // desired bitfield on the right side and zeroes on the left.
- sll(rt, rs, 32 - (pos + size));
- srl(rt, rt, 32 - size);
- }
-}
-
-
-void MacroAssembler::Ins(Register rt,
- Register rs,
- uint16_t pos,
- uint16_t size) {
- ASSERT(pos < 32);
- ASSERT(pos + size < 32);
-
- if (mips32r2) {
- ins_(rt, rs, pos, size);
- } else {
- ASSERT(!rt.is(t8) && !rs.is(t8));
-
- srl(t8, rt, pos + size);
- // The left chunk from rt that needs to
- // be saved is on the right side of t8.
- sll(at, t8, pos + size);
- // The 'at' register now contains the left chunk on
- // the left (proper position) and zeroes.
- sll(t8, rt, 32 - pos);
- // t8 now contains the right chunk on the left and zeroes.
- srl(t8, t8, 32 - pos);
- // t8 now contains the right chunk on
- // the right (proper position) and zeroes.
- or_(rt, at, t8);
- // rt now contains the left and right chunks from the original rt
- // in their proper position and zeroes in the middle.
- sll(t8, rs, 32 - size);
- // t8 now contains the chunk from rs on the left and zeroes.
- srl(t8, t8, 32 - size - pos);
- // t8 now contains the original chunk from rs in
- // the middle (proper position).
- or_(rt, rt, t8);
- // rt now contains the result of the ins instruction in R2 mode.
- }
-}
-
-
-void MacroAssembler::Cvt_d_uw(FPURegister fd, FPURegister fs) {
- // Move the data from fs to t4.
- mfc1(t4, fs);
- return Cvt_d_uw(fd, t4);
-}
-
-
-void MacroAssembler::Cvt_d_uw(FPURegister fd, Register rs) {
- // Convert rs to a FP value in fd (and fd + 1).
- // We do this by converting rs minus the MSB to avoid sign conversion,
- // then adding 2^31-1 and 1 to the result.
-
- ASSERT(!fd.is(f20));
- ASSERT(!rs.is(t9));
- ASSERT(!rs.is(t8));
-
- // Save rs's MSB to t8
- And(t8, rs, 0x80000000);
- // Remove rs's MSB.
- And(t9, rs, 0x7FFFFFFF);
- // Move t9 to fd
- mtc1(t9, fd);
-
- // Convert fd to a real FP value.
- cvt_d_w(fd, fd);
-
- Label conversion_done;
-
- // If rs's MSB was 0, it's done.
- // Otherwise we need to add that to the FP register.
- Branch(&conversion_done, eq, t8, Operand(zero_reg));
-
- // First load 2^31 - 1 into f20.
- Or(t9, zero_reg, 0x7FFFFFFF);
- mtc1(t9, f20);
-
- // Convert it to FP and add it to fd.
- cvt_d_w(f20, f20);
- add_d(fd, fd, f20);
- // Now add 1.
- Or(t9, zero_reg, 1);
- mtc1(t9, f20);
-
- cvt_d_w(f20, f20);
- add_d(fd, fd, f20);
- bind(&conversion_done);
-}
-
-
-void MacroAssembler::Trunc_uw_d(FPURegister fd, FPURegister fs) {
- Trunc_uw_d(fs, t4);
- mtc1(t4, fd);
-}
-
-
-void MacroAssembler::Trunc_uw_d(FPURegister fd, Register rs) {
- ASSERT(!fd.is(f22));
- ASSERT(!rs.is(t6));
-
- // Load 2^31 into f22.
- Or(t6, zero_reg, 0x80000000);
- Cvt_d_uw(f22, t6);
-
- // Test if f22 > fd.
- c(OLT, D, fd, f22);
-
- Label simple_convert;
- // If fd < 2^31 we can convert it normally.
- bc1t(&simple_convert);
-
- // First we subtract 2^31 from fd, then trunc it to rs
- // and add 2^31 to rs.
-
- sub_d(f22, fd, f22);
- trunc_w_d(f22, f22);
- mfc1(rs, f22);
- or_(rs, rs, t6);
-
- Label done;
- Branch(&done);
- // Simple conversion.
- bind(&simple_convert);
- trunc_w_d(f22, fd);
- mfc1(rs, f22);
-
- bind(&done);
-}
-
-
-// Tries to get a signed int32 out of a double precision floating point heap
-// number. Rounds towards 0. Branch to 'not_int32' if the double is out of the
-// 32bits signed integer range.
-// This method implementation differs from the ARM version for performance
-// reasons.
-void MacroAssembler::ConvertToInt32(Register source,
- Register dest,
- Register scratch,
- Register scratch2,
- FPURegister double_scratch,
- Label *not_int32) {
- Label right_exponent, done;
- // Get exponent word (ENDIAN issues).
- lw(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset));
- // Get exponent alone in scratch2.
- And(scratch2, scratch, Operand(HeapNumber::kExponentMask));
- // Load dest with zero. We use this either for the final shift or
- // for the answer.
- mov(dest, zero_reg);
- // Check whether the exponent matches a 32 bit signed int that is not a Smi.
- // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). This is
- // the exponent that we are fastest at and also the highest exponent we can
- // handle here.
- const uint32_t non_smi_exponent =
- (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
- // If we have a match of the int32-but-not-Smi exponent then skip some logic.
- Branch(&right_exponent, eq, scratch2, Operand(non_smi_exponent));
- // If the exponent is higher than that then go to not_int32 case. This
- // catches numbers that don't fit in a signed int32, infinities and NaNs.
- Branch(not_int32, gt, scratch2, Operand(non_smi_exponent));
-
- // We know the exponent is smaller than 30 (biased). If it is less than
- // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
- // it rounds to zero.
- const uint32_t zero_exponent =
- (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
- Subu(scratch2, scratch2, Operand(zero_exponent));
- // Dest already has a Smi zero.
- Branch(&done, lt, scratch2, Operand(zero_reg));
- if (!Isolate::Current()->cpu_features()->IsSupported(FPU)) {
- // We have a shifted exponent between 0 and 30 in scratch2.
- srl(dest, scratch2, HeapNumber::kExponentShift);
- // We now have the exponent in dest. Subtract from 30 to get
- // how much to shift down.
- li(at, Operand(30));
- subu(dest, at, dest);
- }
- bind(&right_exponent);
- if (Isolate::Current()->cpu_features()->IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
- // MIPS FPU instructions implementing double precision to integer
- // conversion using round to zero. Since the FP value was qualified
- // above, the resulting integer should be a legal int32.
- // The original 'Exponent' word is still in scratch.
- lwc1(double_scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset));
- mtc1(scratch, FPURegister::from_code(double_scratch.code() + 1));
- trunc_w_d(double_scratch, double_scratch);
- mfc1(dest, double_scratch);
- } else {
- // On entry, dest has final downshift, scratch has original sign/exp/mant.
- // Save sign bit in top bit of dest.
- And(scratch2, scratch, Operand(0x80000000));
- Or(dest, dest, Operand(scratch2));
- // Put back the implicit 1, just above mantissa field.
- Or(scratch, scratch, Operand(1 << HeapNumber::kExponentShift));
-
- // Shift up the mantissa bits to take up the space the exponent used to
- // take. We just orred in the implicit bit so that took care of one and
- // we want to leave the sign bit 0 so we subtract 2 bits from the shift
- // distance. But we want to clear the sign-bit so shift one more bit
- // left, then shift right one bit.
- const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
- sll(scratch, scratch, shift_distance + 1);
- srl(scratch, scratch, 1);
-
- // Get the second half of the double. For some exponents we don't
- // actually need this because the bits get shifted out again, but
- // it's probably slower to test than just to do it.
- lw(scratch2, FieldMemOperand(source, HeapNumber::kMantissaOffset));
- // Extract the top 10 bits, and insert those bottom 10 bits of scratch.
- // The width of the field here is the same as the shift amount above.
- const int field_width = shift_distance;
- Ext(scratch2, scratch2, 32-shift_distance, field_width);
- Ins(scratch, scratch2, 0, field_width);
- // Move down according to the exponent.
- srlv(scratch, scratch, dest);
- // Prepare the negative version of our integer.
- subu(scratch2, zero_reg, scratch);
- // Trick to check sign bit (msb) held in dest, count leading zero.
- // 0 indicates negative, save negative version with conditional move.
- clz(dest, dest);
- movz(scratch, scratch2, dest);
- mov(dest, scratch);
- }
- bind(&done);
-}
-
-
-// Emulated condtional branches do not emit a nop in the branch delay slot.
-//
-// BRANCH_ARGS_CHECK checks that conditional jump arguments are correct.
-#define BRANCH_ARGS_CHECK(cond, rs, rt) ASSERT( \
- (cond == cc_always && rs.is(zero_reg) && rt.rm().is(zero_reg)) || \
- (cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
-
-
-void MacroAssembler::Branch(int16_t offset, BranchDelaySlot bdslot) {
- b(offset);
-
- // Emit a nop in the branch delay slot if required.
- if (bdslot == PROTECT)
- nop();
-}
-
-
-void MacroAssembler::Branch(int16_t offset, Condition cond, Register rs,
- const Operand& rt,
- BranchDelaySlot bdslot) {
- BRANCH_ARGS_CHECK(cond, rs, rt);
- ASSERT(!rs.is(zero_reg));
- Register r2 = no_reg;
- Register scratch = at;
-
- if (rt.is_reg()) {
- // We don't want any other register but scratch clobbered.
- ASSERT(!scratch.is(rs) && !scratch.is(rt.rm_));
- r2 = rt.rm_;
- switch (cond) {
- case cc_always:
- b(offset);
- break;
- case eq:
- beq(rs, r2, offset);
- break;
- case ne:
- bne(rs, r2, offset);
- break;
- // Signed comparison
- case greater:
- if (r2.is(zero_reg)) {
- bgtz(rs, offset);
- } else {
- slt(scratch, r2, rs);
- bne(scratch, zero_reg, offset);
- }
- break;
- case greater_equal:
- if (r2.is(zero_reg)) {
- bgez(rs, offset);
- } else {
- slt(scratch, rs, r2);
- beq(scratch, zero_reg, offset);
- }
- break;
- case less:
- if (r2.is(zero_reg)) {
- bltz(rs, offset);
- } else {
- slt(scratch, rs, r2);
- bne(scratch, zero_reg, offset);
- }
- break;
- case less_equal:
- if (r2.is(zero_reg)) {
- blez(rs, offset);
- } else {
- slt(scratch, r2, rs);
- beq(scratch, zero_reg, offset);
- }
- break;
- // Unsigned comparison.
- case Ugreater:
- if (r2.is(zero_reg)) {
- bgtz(rs, offset);
- } else {
- sltu(scratch, r2, rs);
- bne(scratch, zero_reg, offset);
- }
- break;
- case Ugreater_equal:
- if (r2.is(zero_reg)) {
- bgez(rs, offset);
- } else {
- sltu(scratch, rs, r2);
- beq(scratch, zero_reg, offset);
- }
- break;
- case Uless:
- if (r2.is(zero_reg)) {
- b(offset);
- } else {
- sltu(scratch, rs, r2);
- bne(scratch, zero_reg, offset);
- }
- break;
- case Uless_equal:
- if (r2.is(zero_reg)) {
- b(offset);
- } else {
- sltu(scratch, r2, rs);
- beq(scratch, zero_reg, offset);
- }
- break;
- default:
- UNREACHABLE();
- }
- } else {
- // Be careful to always use shifted_branch_offset only just before the
- // branch instruction, as the location will be remember for patching the
- // target.
- switch (cond) {
- case cc_always:
- b(offset);
- break;
- case eq:
- // We don't want any other register but scratch clobbered.
- ASSERT(!scratch.is(rs));
- r2 = scratch;
- li(r2, rt);
- beq(rs, r2, offset);
- break;
- case ne:
- // We don't want any other register but scratch clobbered.
- ASSERT(!scratch.is(rs));
- r2 = scratch;
- li(r2, rt);
- bne(rs, r2, offset);
- break;
- // Signed comparison
- case greater:
- if (rt.imm32_ == 0) {
- bgtz(rs, offset);
- } else {
- r2 = scratch;
- li(r2, rt);
- slt(scratch, r2, rs);
- bne(scratch, zero_reg, offset);
- }
- break;
- case greater_equal:
- if (rt.imm32_ == 0) {
- bgez(rs, offset);
- } else if (is_int16(rt.imm32_)) {
- slti(scratch, rs, rt.imm32_);
- beq(scratch, zero_reg, offset);
- } else {
- r2 = scratch;
- li(r2, rt);
- sltu(scratch, rs, r2);
- beq(scratch, zero_reg, offset);
- }
- break;
- case less:
- if (rt.imm32_ == 0) {
- bltz(rs, offset);
- } else if (is_int16(rt.imm32_)) {
- slti(scratch, rs, rt.imm32_);
- bne(scratch, zero_reg, offset);
- } else {
- r2 = scratch;
- li(r2, rt);
- slt(scratch, rs, r2);
- bne(scratch, zero_reg, offset);
- }
- break;
- case less_equal:
- if (rt.imm32_ == 0) {
- blez(rs, offset);
- } else {
- r2 = scratch;
- li(r2, rt);
- slt(scratch, r2, rs);
- beq(scratch, zero_reg, offset);
- }
- break;
- // Unsigned comparison.
- case Ugreater:
- if (rt.imm32_ == 0) {
- bgtz(rs, offset);
- } else {
- r2 = scratch;
- li(r2, rt);
- sltu(scratch, r2, rs);
- bne(scratch, zero_reg, offset);
- }
- break;
- case Ugreater_equal:
- if (rt.imm32_ == 0) {
- bgez(rs, offset);
- } else if (is_int16(rt.imm32_)) {
- sltiu(scratch, rs, rt.imm32_);
- beq(scratch, zero_reg, offset);
- } else {
- r2 = scratch;
- li(r2, rt);
- sltu(scratch, rs, r2);
- beq(scratch, zero_reg, offset);
- }
- break;
- case Uless:
- if (rt.imm32_ == 0) {
- b(offset);
- } else if (is_int16(rt.imm32_)) {
- sltiu(scratch, rs, rt.imm32_);
- bne(scratch, zero_reg, offset);
- } else {
- r2 = scratch;
- li(r2, rt);
- sltu(scratch, rs, r2);
- bne(scratch, zero_reg, offset);
- }
- break;
- case Uless_equal:
- if (rt.imm32_ == 0) {
- b(offset);
- } else {
- r2 = scratch;
- li(r2, rt);
- sltu(scratch, r2, rs);
- beq(scratch, zero_reg, offset);
- }
- break;
- default:
- UNREACHABLE();
- }
- }
- // Emit a nop in the branch delay slot if required.
- if (bdslot == PROTECT)
- nop();
-}
-
-
-void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) {
- // We use branch_offset as an argument for the branch instructions to be sure
- // it is called just before generating the branch instruction, as needed.
-
- b(shifted_branch_offset(L, false));
-
- // Emit a nop in the branch delay slot if required.
- if (bdslot == PROTECT)
- nop();
-}
-
-
-void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
- const Operand& rt,
- BranchDelaySlot bdslot) {
- BRANCH_ARGS_CHECK(cond, rs, rt);
-
- int32_t offset;
- Register r2 = no_reg;
- Register scratch = at;
- if (rt.is_reg()) {
- r2 = rt.rm_;
- // Be careful to always use shifted_branch_offset only just before the
- // branch instruction, as the location will be remember for patching the
- // target.
- switch (cond) {
- case cc_always:
- offset = shifted_branch_offset(L, false);
- b(offset);
- break;
- case eq:
- offset = shifted_branch_offset(L, false);
- beq(rs, r2, offset);
- break;
- case ne:
- offset = shifted_branch_offset(L, false);
- bne(rs, r2, offset);
- break;
- // Signed comparison
- case greater:
- if (r2.is(zero_reg)) {
- offset = shifted_branch_offset(L, false);
- bgtz(rs, offset);
- } else {
- slt(scratch, r2, rs);
- offset = shifted_branch_offset(L, false);
- bne(scratch, zero_reg, offset);
- }
- break;
- case greater_equal:
- if (r2.is(zero_reg)) {
- offset = shifted_branch_offset(L, false);
- bgez(rs, offset);
- } else {
- slt(scratch, rs, r2);
- offset = shifted_branch_offset(L, false);
- beq(scratch, zero_reg, offset);
- }
- break;
- case less:
- if (r2.is(zero_reg)) {
- offset = shifted_branch_offset(L, false);
- bltz(rs, offset);
- } else {
- slt(scratch, rs, r2);
- offset = shifted_branch_offset(L, false);
- bne(scratch, zero_reg, offset);
- }
- break;
- case less_equal:
- if (r2.is(zero_reg)) {
- offset = shifted_branch_offset(L, false);
- blez(rs, offset);
- } else {
- slt(scratch, r2, rs);
- offset = shifted_branch_offset(L, false);
- beq(scratch, zero_reg, offset);
- }
- break;
- // Unsigned comparison.
- case Ugreater:
- if (r2.is(zero_reg)) {
- offset = shifted_branch_offset(L, false);
- bgtz(rs, offset);
- } else {
- sltu(scratch, r2, rs);
- offset = shifted_branch_offset(L, false);
- bne(scratch, zero_reg, offset);
- }
- break;
- case Ugreater_equal:
- if (r2.is(zero_reg)) {
- offset = shifted_branch_offset(L, false);
- bgez(rs, offset);
- } else {
- sltu(scratch, rs, r2);
- offset = shifted_branch_offset(L, false);
- beq(scratch, zero_reg, offset);
- }
- break;
- case Uless:
- if (r2.is(zero_reg)) {
- offset = shifted_branch_offset(L, false);
- b(offset);
- } else {
- sltu(scratch, rs, r2);
- offset = shifted_branch_offset(L, false);
- bne(scratch, zero_reg, offset);
- }
- break;
- case Uless_equal:
- if (r2.is(zero_reg)) {
- offset = shifted_branch_offset(L, false);
- b(offset);
- } else {
- sltu(scratch, r2, rs);
- offset = shifted_branch_offset(L, false);
- beq(scratch, zero_reg, offset);
- }
- break;
- default:
- UNREACHABLE();
- }
- } else {
- // Be careful to always use shifted_branch_offset only just before the
- // branch instruction, as the location will be remember for patching the
- // target.
- switch (cond) {
- case cc_always:
- offset = shifted_branch_offset(L, false);
- b(offset);
- break;
- case eq:
- r2 = scratch;
- li(r2, rt);
- offset = shifted_branch_offset(L, false);
- beq(rs, r2, offset);
- break;
- case ne:
- r2 = scratch;
- li(r2, rt);
- offset = shifted_branch_offset(L, false);
- bne(rs, r2, offset);
- break;
- // Signed comparison
- case greater:
- if (rt.imm32_ == 0) {
- offset = shifted_branch_offset(L, false);
- bgtz(rs, offset);
- } else {
- r2 = scratch;
- li(r2, rt);
- slt(scratch, r2, rs);
- offset = shifted_branch_offset(L, false);
- bne(scratch, zero_reg, offset);
- }
- break;
- case greater_equal:
- if (rt.imm32_ == 0) {
- offset = shifted_branch_offset(L, false);
- bgez(rs, offset);
- } else if (is_int16(rt.imm32_)) {
- slti(scratch, rs, rt.imm32_);
- offset = shifted_branch_offset(L, false);
- beq(scratch, zero_reg, offset);
- } else {
- r2 = scratch;
- li(r2, rt);
- sltu(scratch, rs, r2);
- offset = shifted_branch_offset(L, false);
- beq(scratch, zero_reg, offset);
- }
- break;
- case less:
- if (rt.imm32_ == 0) {
- offset = shifted_branch_offset(L, false);
- bltz(rs, offset);
- } else if (is_int16(rt.imm32_)) {
- slti(scratch, rs, rt.imm32_);
- offset = shifted_branch_offset(L, false);
- bne(scratch, zero_reg, offset);
- } else {
- r2 = scratch;
- li(r2, rt);
- slt(scratch, rs, r2);
- offset = shifted_branch_offset(L, false);
- bne(scratch, zero_reg, offset);
- }
- break;
- case less_equal:
- if (rt.imm32_ == 0) {
- offset = shifted_branch_offset(L, false);
- blez(rs, offset);
- } else {
- r2 = scratch;
- li(r2, rt);
- slt(scratch, r2, rs);
- offset = shifted_branch_offset(L, false);
- beq(scratch, zero_reg, offset);
- }
- break;
- // Unsigned comparison.
- case Ugreater:
- if (rt.imm32_ == 0) {
- offset = shifted_branch_offset(L, false);
- bgtz(rs, offset);
- } else {
- r2 = scratch;
- li(r2, rt);
- sltu(scratch, r2, rs);
- offset = shifted_branch_offset(L, false);
- bne(scratch, zero_reg, offset);
- }
- break;
- case Ugreater_equal:
- if (rt.imm32_ == 0) {
- offset = shifted_branch_offset(L, false);
- bgez(rs, offset);
- } else if (is_int16(rt.imm32_)) {
- sltiu(scratch, rs, rt.imm32_);
- offset = shifted_branch_offset(L, false);
- beq(scratch, zero_reg, offset);
- } else {
- r2 = scratch;
- li(r2, rt);
- sltu(scratch, rs, r2);
- offset = shifted_branch_offset(L, false);
- beq(scratch, zero_reg, offset);
- }
- break;
- case Uless:
- if (rt.imm32_ == 0) {
- offset = shifted_branch_offset(L, false);
- b(offset);
- } else if (is_int16(rt.imm32_)) {
- sltiu(scratch, rs, rt.imm32_);
- offset = shifted_branch_offset(L, false);
- bne(scratch, zero_reg, offset);
- } else {
- r2 = scratch;
- li(r2, rt);
- sltu(scratch, rs, r2);
- offset = shifted_branch_offset(L, false);
- bne(scratch, zero_reg, offset);
- }
- break;
- case Uless_equal:
- if (rt.imm32_ == 0) {
- offset = shifted_branch_offset(L, false);
- b(offset);
- } else {
- r2 = scratch;
- li(r2, rt);
- sltu(scratch, r2, rs);
- offset = shifted_branch_offset(L, false);
- beq(scratch, zero_reg, offset);
- }
- break;
- default:
- UNREACHABLE();
- }
- }
- // Check that offset could actually hold on an int16_t.
- ASSERT(is_int16(offset));
- // Emit a nop in the branch delay slot if required.
- if (bdslot == PROTECT)
- nop();
-}
-
-
-// We need to use a bgezal or bltzal, but they can't be used directly with the
-// slt instructions. We could use sub or add instead but we would miss overflow
-// cases, so we keep slt and add an intermediate third instruction.
-void MacroAssembler::BranchAndLink(int16_t offset,
- BranchDelaySlot bdslot) {
- bal(offset);
-
- // Emit a nop in the branch delay slot if required.
- if (bdslot == PROTECT)
- nop();
-}
-
-
-void MacroAssembler::BranchAndLink(int16_t offset, Condition cond, Register rs,
- const Operand& rt,
- BranchDelaySlot bdslot) {
- BRANCH_ARGS_CHECK(cond, rs, rt);
- Register r2 = no_reg;
- Register scratch = at;
-
- if (rt.is_reg()) {
- r2 = rt.rm_;
- } else if (cond != cc_always) {
- r2 = scratch;
- li(r2, rt);
- }
-
- switch (cond) {
- case cc_always:
- bal(offset);
- break;
- case eq:
- bne(rs, r2, 2);
- nop();
- bal(offset);
- break;
- case ne:
- beq(rs, r2, 2);
- nop();
- bal(offset);
- break;
-
- // Signed comparison
- case greater:
- slt(scratch, r2, rs);
- addiu(scratch, scratch, -1);
- bgezal(scratch, offset);
- break;
- case greater_equal:
- slt(scratch, rs, r2);
- addiu(scratch, scratch, -1);
- bltzal(scratch, offset);
- break;
- case less:
- slt(scratch, rs, r2);
- addiu(scratch, scratch, -1);
- bgezal(scratch, offset);
- break;
- case less_equal:
- slt(scratch, r2, rs);
- addiu(scratch, scratch, -1);
- bltzal(scratch, offset);
- break;
-
- // Unsigned comparison.
- case Ugreater:
- sltu(scratch, r2, rs);
- addiu(scratch, scratch, -1);
- bgezal(scratch, offset);
- break;
- case Ugreater_equal:
- sltu(scratch, rs, r2);
- addiu(scratch, scratch, -1);
- bltzal(scratch, offset);
- break;
- case Uless:
- sltu(scratch, rs, r2);
- addiu(scratch, scratch, -1);
- bgezal(scratch, offset);
- break;
- case Uless_equal:
- sltu(scratch, r2, rs);
- addiu(scratch, scratch, -1);
- bltzal(scratch, offset);
- break;
-
- default:
- UNREACHABLE();
- }
- // Emit a nop in the branch delay slot if required.
- if (bdslot == PROTECT)
- nop();
-}
-
-
-void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {
- bal(shifted_branch_offset(L, false));
-
- // Emit a nop in the branch delay slot if required.
- if (bdslot == PROTECT)
- nop();
-}
-
-
-void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
- const Operand& rt,
- BranchDelaySlot bdslot) {
- BRANCH_ARGS_CHECK(cond, rs, rt);
-
- int32_t offset;
- Register r2 = no_reg;
- Register scratch = at;
- if (rt.is_reg()) {
- r2 = rt.rm_;
- } else if (cond != cc_always) {
- r2 = scratch;
- li(r2, rt);
- }
-
- switch (cond) {
- case cc_always:
- offset = shifted_branch_offset(L, false);
- bal(offset);
- break;
- case eq:
- bne(rs, r2, 2);
- nop();
- offset = shifted_branch_offset(L, false);
- bal(offset);
- break;
- case ne:
- beq(rs, r2, 2);
- nop();
- offset = shifted_branch_offset(L, false);
- bal(offset);
- break;
-
- // Signed comparison
- case greater:
- slt(scratch, r2, rs);
- addiu(scratch, scratch, -1);
- offset = shifted_branch_offset(L, false);
- bgezal(scratch, offset);
- break;
- case greater_equal:
- slt(scratch, rs, r2);
- addiu(scratch, scratch, -1);
- offset = shifted_branch_offset(L, false);
- bltzal(scratch, offset);
- break;
- case less:
- slt(scratch, rs, r2);
- addiu(scratch, scratch, -1);
- offset = shifted_branch_offset(L, false);
- bgezal(scratch, offset);
- break;
- case less_equal:
- slt(scratch, r2, rs);
- addiu(scratch, scratch, -1);
- offset = shifted_branch_offset(L, false);
- bltzal(scratch, offset);
- break;
-
- // Unsigned comparison.
- case Ugreater:
- sltu(scratch, r2, rs);
- addiu(scratch, scratch, -1);
- offset = shifted_branch_offset(L, false);
- bgezal(scratch, offset);
- break;
- case Ugreater_equal:
- sltu(scratch, rs, r2);
- addiu(scratch, scratch, -1);
- offset = shifted_branch_offset(L, false);
- bltzal(scratch, offset);
- break;
- case Uless:
- sltu(scratch, rs, r2);
- addiu(scratch, scratch, -1);
- offset = shifted_branch_offset(L, false);
- bgezal(scratch, offset);
- break;
- case Uless_equal:
- sltu(scratch, r2, rs);
- addiu(scratch, scratch, -1);
- offset = shifted_branch_offset(L, false);
- bltzal(scratch, offset);
- break;
-
- default:
- UNREACHABLE();
- }
-
- // Check that offset could actually hold on an int16_t.
- ASSERT(is_int16(offset));
-
- // Emit a nop in the branch delay slot if required.
- if (bdslot == PROTECT)
- nop();
-}
-
-
-void MacroAssembler::Jump(const Operand& target, BranchDelaySlot bdslot) {
- BlockTrampolinePoolScope block_trampoline_pool(this);
- if (target.is_reg()) {
- jr(target.rm());
- } else {
- if (!MustUseReg(target.rmode_)) {
- j(target.imm32_);
- } else {
- li(t9, target);
- jr(t9);
- }
- }
- // Emit a nop in the branch delay slot if required.
- if (bdslot == PROTECT)
- nop();
-}
-
-
-void MacroAssembler::Jump(const Operand& target,
- Condition cond, Register rs, const Operand& rt,
- BranchDelaySlot bdslot) {
- BlockTrampolinePoolScope block_trampoline_pool(this);
- BRANCH_ARGS_CHECK(cond, rs, rt);
- if (target.is_reg()) {
- if (cond == cc_always) {
- jr(target.rm());
- } else {
- Branch(2, NegateCondition(cond), rs, rt);
- jr(target.rm());
- }
- } else { // Not register target.
- if (!MustUseReg(target.rmode_)) {
- if (cond == cc_always) {
- j(target.imm32_);
- } else {
- Branch(2, NegateCondition(cond), rs, rt);
- j(target.imm32_); // Will generate only one instruction.
- }
- } else { // MustUseReg(target)
- li(t9, target);
- if (cond == cc_always) {
- jr(t9);
- } else {
- Branch(2, NegateCondition(cond), rs, rt);
- jr(t9); // Will generate only one instruction.
- }
- }
- }
- // Emit a nop in the branch delay slot if required.
- if (bdslot == PROTECT)
- nop();
-}
-
-
-// Note: To call gcc-compiled C code on mips, you must call thru t9.
-void MacroAssembler::Call(const Operand& target, BranchDelaySlot bdslot) {
- BlockTrampolinePoolScope block_trampoline_pool(this);
- if (target.is_reg()) {
- jalr(target.rm());
- } else { // !target.is_reg()
- if (!MustUseReg(target.rmode_)) {
- jal(target.imm32_);
- } else { // MustUseReg(target)
- li(t9, target);
- jalr(t9);
- }
- }
- // Emit a nop in the branch delay slot if required.
- if (bdslot == PROTECT)
- nop();
-}
-
-
-// Note: To call gcc-compiled C code on mips, you must call thru t9.
-void MacroAssembler::Call(const Operand& target,
- Condition cond, Register rs, const Operand& rt,
- BranchDelaySlot bdslot) {
- BlockTrampolinePoolScope block_trampoline_pool(this);
- BRANCH_ARGS_CHECK(cond, rs, rt);
- if (target.is_reg()) {
- if (cond == cc_always) {
- jalr(target.rm());
- } else {
- Branch(2, NegateCondition(cond), rs, rt);
- jalr(target.rm());
- }
- } else { // !target.is_reg()
- if (!MustUseReg(target.rmode_)) {
- if (cond == cc_always) {
- jal(target.imm32_);
- } else {
- Branch(2, NegateCondition(cond), rs, rt);
- jal(target.imm32_); // Will generate only one instruction.
- }
- } else { // MustUseReg(target)
- li(t9, target);
- if (cond == cc_always) {
- jalr(t9);
- } else {
- Branch(2, NegateCondition(cond), rs, rt);
- jalr(t9); // Will generate only one instruction.
- }
- }
- }
- // Emit a nop in the branch delay slot if required.
- if (bdslot == PROTECT)
- nop();
-}
-
-
-void MacroAssembler::Drop(int count,
- Condition cond,
- Register reg,
- const Operand& op) {
- if (count <= 0) {
- return;
- }
-
- Label skip;
-
- if (cond != al) {
- Branch(&skip, NegateCondition(cond), reg, op);
- }
-
- if (count > 0) {
- addiu(sp, sp, count * kPointerSize);
- }
-
- if (cond != al) {
- bind(&skip);
- }
-}
-
-
-void MacroAssembler::DropAndRet(int drop,
- Condition cond,
- Register r1,
- const Operand& r2) {
- // This is a workaround to make sure only one branch instruction is
- // generated. It relies on Drop and Ret not creating branches if
- // cond == cc_always.
- Label skip;
- if (cond != cc_always) {
- Branch(&skip, NegateCondition(cond), r1, r2);
- }
-
- Drop(drop);
- Ret();
-
- if (cond != cc_always) {
- bind(&skip);
- }
-}
-
-
-void MacroAssembler::Swap(Register reg1,
- Register reg2,
- Register scratch) {
- if (scratch.is(no_reg)) {
- Xor(reg1, reg1, Operand(reg2));
- Xor(reg2, reg2, Operand(reg1));
- Xor(reg1, reg1, Operand(reg2));
- } else {
- mov(scratch, reg1);
- mov(reg1, reg2);
- mov(reg2, scratch);
- }
-}
-
-
-void MacroAssembler::Call(Label* target) {
- BranchAndLink(target);
-}
-
-
-void MacroAssembler::Move(Register dst, Register src) {
- if (!dst.is(src)) {
- mov(dst, src);
- }
-}
-
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-
-void MacroAssembler::DebugBreak() {
- ASSERT(allow_stub_calls());
- mov(a0, zero_reg);
- li(a1, Operand(ExternalReference(Runtime::kDebugBreak, isolate())));
- CEntryStub ces(1);
- Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
-}
-
-#endif // ENABLE_DEBUGGER_SUPPORT
-
-
-// ---------------------------------------------------------------------------
-// Exception handling
-
-void MacroAssembler::PushTryHandler(CodeLocation try_location,
- HandlerType type) {
- // Adjust this code if not the case.
- ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
- // The return address is passed in register ra.
- if (try_location == IN_JAVASCRIPT) {
- if (type == TRY_CATCH_HANDLER) {
- li(t0, Operand(StackHandler::TRY_CATCH));
- } else {
- li(t0, Operand(StackHandler::TRY_FINALLY));
- }
- ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize
- && StackHandlerConstants::kFPOffset == 2 * kPointerSize
- && StackHandlerConstants::kPCOffset == 3 * kPointerSize
- && StackHandlerConstants::kNextOffset == 0 * kPointerSize);
- // Save the current handler as the next handler.
- li(t2, Operand(ExternalReference(Isolate::k_handler_address, isolate())));
- lw(t1, MemOperand(t2));
-
- addiu(sp, sp, -StackHandlerConstants::kSize);
- sw(ra, MemOperand(sp, 12));
- sw(fp, MemOperand(sp, 8));
- sw(t0, MemOperand(sp, 4));
- sw(t1, MemOperand(sp, 0));
-
- // Link this handler as the new current one.
- sw(sp, MemOperand(t2));
-
- } else {
- // Must preserve a0-a3, and s0 (argv).
- ASSERT(try_location == IN_JS_ENTRY);
- ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize
- && StackHandlerConstants::kFPOffset == 2 * kPointerSize
- && StackHandlerConstants::kPCOffset == 3 * kPointerSize
- && StackHandlerConstants::kNextOffset == 0 * kPointerSize);
-
- // The frame pointer does not point to a JS frame so we save NULL
- // for fp. We expect the code throwing an exception to check fp
- // before dereferencing it to restore the context.
- li(t0, Operand(StackHandler::ENTRY));
-
- // Save the current handler as the next handler.
- li(t2, Operand(ExternalReference(Isolate::k_handler_address, isolate())));
- lw(t1, MemOperand(t2));
-
- addiu(sp, sp, -StackHandlerConstants::kSize);
- sw(ra, MemOperand(sp, 12));
- sw(zero_reg, MemOperand(sp, 8));
- sw(t0, MemOperand(sp, 4));
- sw(t1, MemOperand(sp, 0));
-
- // Link this handler as the new current one.
- sw(sp, MemOperand(t2));
- }
-}
-
-
-void MacroAssembler::PopTryHandler() {
- ASSERT_EQ(0, StackHandlerConstants::kNextOffset);
- pop(a1);
- Addu(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
- li(at, Operand(ExternalReference(Isolate::k_handler_address, isolate())));
- sw(a1, MemOperand(at));
-}
-
-
-void MacroAssembler::AllocateInNewSpace(int object_size,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- AllocationFlags flags) {
- if (!FLAG_inline_new) {
- if (FLAG_debug_code) {
- // Trash the registers to simulate an allocation failure.
- li(result, 0x7091);
- li(scratch1, 0x7191);
- li(scratch2, 0x7291);
- }
- jmp(gc_required);
- return;
- }
-
- ASSERT(!result.is(scratch1));
- ASSERT(!result.is(scratch2));
- ASSERT(!scratch1.is(scratch2));
- ASSERT(!scratch1.is(t9));
- ASSERT(!scratch2.is(t9));
- ASSERT(!result.is(t9));
-
- // Make object size into bytes.
- if ((flags & SIZE_IN_WORDS) != 0) {
- object_size *= kPointerSize;
- }
- ASSERT_EQ(0, object_size & kObjectAlignmentMask);
-
- // Check relative positions of allocation top and limit addresses.
- // ARM adds additional checks to make sure the ldm instruction can be
- // used. On MIPS we don't have ldm so we don't need additional checks either.
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate());
- ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address(isolate());
- intptr_t top =
- reinterpret_cast<intptr_t>(new_space_allocation_top.address());
- intptr_t limit =
- reinterpret_cast<intptr_t>(new_space_allocation_limit.address());
- ASSERT((limit - top) == kPointerSize);
-
- // Set up allocation top address and object size registers.
- Register topaddr = scratch1;
- Register obj_size_reg = scratch2;
- li(topaddr, Operand(new_space_allocation_top));
- li(obj_size_reg, Operand(object_size));
-
- // This code stores a temporary value in t9.
- if ((flags & RESULT_CONTAINS_TOP) == 0) {
- // Load allocation top into result and allocation limit into t9.
- lw(result, MemOperand(topaddr));
- lw(t9, MemOperand(topaddr, kPointerSize));
- } else {
- if (FLAG_debug_code) {
- // Assert that result actually contains top on entry. t9 is used
- // immediately below so this use of t9 does not cause difference with
- // respect to register content between debug and release mode.
- lw(t9, MemOperand(topaddr));
- Check(eq, "Unexpected allocation top", result, Operand(t9));
- }
- // Load allocation limit into t9. Result already contains allocation top.
- lw(t9, MemOperand(topaddr, limit - top));
- }
-
- // Calculate new top and bail out if new space is exhausted. Use result
- // to calculate the new top.
- Addu(scratch2, result, Operand(obj_size_reg));
- Branch(gc_required, Ugreater, scratch2, Operand(t9));
- sw(scratch2, MemOperand(topaddr));
-
- // Tag object if requested.
- if ((flags & TAG_OBJECT) != 0) {
- Addu(result, result, Operand(kHeapObjectTag));
- }
-}
-
-
-void MacroAssembler::AllocateInNewSpace(Register object_size,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- AllocationFlags flags) {
- if (!FLAG_inline_new) {
- if (FLAG_debug_code) {
- // Trash the registers to simulate an allocation failure.
- li(result, 0x7091);
- li(scratch1, 0x7191);
- li(scratch2, 0x7291);
- }
- jmp(gc_required);
- return;
- }
-
- ASSERT(!result.is(scratch1));
- ASSERT(!result.is(scratch2));
- ASSERT(!scratch1.is(scratch2));
- ASSERT(!scratch1.is(t9) && !scratch2.is(t9) && !result.is(t9));
-
- // Check relative positions of allocation top and limit addresses.
- // ARM adds additional checks to make sure the ldm instruction can be
- // used. On MIPS we don't have ldm so we don't need additional checks either.
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate());
- ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address(isolate());
- intptr_t top =
- reinterpret_cast<intptr_t>(new_space_allocation_top.address());
- intptr_t limit =
- reinterpret_cast<intptr_t>(new_space_allocation_limit.address());
- ASSERT((limit - top) == kPointerSize);
-
- // Set up allocation top address and object size registers.
- Register topaddr = scratch1;
- li(topaddr, Operand(new_space_allocation_top));
-
- // This code stores a temporary value in t9.
- if ((flags & RESULT_CONTAINS_TOP) == 0) {
- // Load allocation top into result and allocation limit into t9.
- lw(result, MemOperand(topaddr));
- lw(t9, MemOperand(topaddr, kPointerSize));
- } else {
- if (FLAG_debug_code) {
- // Assert that result actually contains top on entry. t9 is used
- // immediately below so this use of t9 does not cause difference with
- // respect to register content between debug and release mode.
- lw(t9, MemOperand(topaddr));
- Check(eq, "Unexpected allocation top", result, Operand(t9));
- }
- // Load allocation limit into t9. Result already contains allocation top.
- lw(t9, MemOperand(topaddr, limit - top));
- }
-
- // Calculate new top and bail out if new space is exhausted. Use result
- // to calculate the new top. Object size may be in words so a shift is
- // required to get the number of bytes.
- if ((flags & SIZE_IN_WORDS) != 0) {
- sll(scratch2, object_size, kPointerSizeLog2);
- Addu(scratch2, result, scratch2);
- } else {
- Addu(scratch2, result, Operand(object_size));
- }
- Branch(gc_required, Ugreater, scratch2, Operand(t9));
-
- // Update allocation top. result temporarily holds the new top.
- if (FLAG_debug_code) {
- And(t9, scratch2, Operand(kObjectAlignmentMask));
- Check(eq, "Unaligned allocation in new space", t9, Operand(zero_reg));
- }
- sw(scratch2, MemOperand(topaddr));
-
- // Tag object if requested.
- if ((flags & TAG_OBJECT) != 0) {
- Addu(result, result, Operand(kHeapObjectTag));
- }
-}
-
-
-void MacroAssembler::UndoAllocationInNewSpace(Register object,
- Register scratch) {
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate());
-
- // Make sure the object has no tag before resetting top.
- And(object, object, Operand(~kHeapObjectTagMask));
-#ifdef DEBUG
- // Check that the object un-allocated is below the current top.
- li(scratch, Operand(new_space_allocation_top));
- lw(scratch, MemOperand(scratch));
- Check(less, "Undo allocation of non allocated memory",
- object, Operand(scratch));
-#endif
- // Write the address of the object to un-allocate as the current top.
- li(scratch, Operand(new_space_allocation_top));
- sw(object, MemOperand(scratch));
-}
-
-
-void MacroAssembler::AllocateTwoByteString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required) {
- // Calculate the number of bytes needed for the characters in the string while
- // observing object alignment.
- ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
- sll(scratch1, length, 1); // Length in bytes, not chars.
- addiu(scratch1, scratch1,
- kObjectAlignmentMask + SeqTwoByteString::kHeaderSize);
- And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
-
- // Allocate two-byte string in new space.
- AllocateInNewSpace(scratch1,
- result,
- scratch2,
- scratch3,
- gc_required,
- TAG_OBJECT);
-
- // Set the map, length and hash field.
- InitializeNewString(result,
- length,
- Heap::kStringMapRootIndex,
- scratch1,
- scratch2);
-}
-
-
-void MacroAssembler::AllocateAsciiString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required) {
- // Calculate the number of bytes needed for the characters in the string
- // while observing object alignment.
- ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
- ASSERT(kCharSize == 1);
- addiu(scratch1, length, kObjectAlignmentMask + SeqAsciiString::kHeaderSize);
- And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
-
- // Allocate ASCII string in new space.
- AllocateInNewSpace(scratch1,
- result,
- scratch2,
- scratch3,
- gc_required,
- TAG_OBJECT);
-
- // Set the map, length and hash field.
- InitializeNewString(result,
- length,
- Heap::kAsciiStringMapRootIndex,
- scratch1,
- scratch2);
-}
-
-
-void MacroAssembler::AllocateTwoByteConsString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- AllocateInNewSpace(ConsString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
- InitializeNewString(result,
- length,
- Heap::kConsStringMapRootIndex,
- scratch1,
- scratch2);
-}
-
-
-void MacroAssembler::AllocateAsciiConsString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- AllocateInNewSpace(ConsString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
- InitializeNewString(result,
- length,
- Heap::kConsAsciiStringMapRootIndex,
- scratch1,
- scratch2);
-}
-
-
-// Allocates a heap number or jumps to the label if the young space is full and
-// a scavenge is needed.
-void MacroAssembler::AllocateHeapNumber(Register result,
- Register scratch1,
- Register scratch2,
- Register heap_number_map,
- Label* need_gc) {
- // Allocate an object in the heap for the heap number and tag it as a heap
- // object.
- AllocateInNewSpace(HeapNumber::kSize,
- result,
- scratch1,
- scratch2,
- need_gc,
- TAG_OBJECT);
-
- // Store heap number map in the allocated object.
- AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- sw(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
-}
-
-
-void MacroAssembler::AllocateHeapNumberWithValue(Register result,
- FPURegister value,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
- AllocateHeapNumber(result, scratch1, scratch2, t6, gc_required);
- sdc1(value, FieldMemOperand(result, HeapNumber::kValueOffset));
-}
-
-
-// Copies a fixed number of fields of heap objects from src to dst.
-void MacroAssembler::CopyFields(Register dst,
- Register src,
- RegList temps,
- int field_count) {
- ASSERT((temps & dst.bit()) == 0);
- ASSERT((temps & src.bit()) == 0);
- // Primitive implementation using only one temporary register.
-
- Register tmp = no_reg;
- // Find a temp register in temps list.
- for (int i = 0; i < kNumRegisters; i++) {
- if ((temps & (1 << i)) != 0) {
- tmp.code_ = i;
- break;
- }
- }
- ASSERT(!tmp.is(no_reg));
-
- for (int i = 0; i < field_count; i++) {
- lw(tmp, FieldMemOperand(src, i * kPointerSize));
- sw(tmp, FieldMemOperand(dst, i * kPointerSize));
- }
-}
-
-
-void MacroAssembler::CheckMap(Register obj,
- Register scratch,
- Handle<Map> map,
- Label* fail,
- bool is_heap_object) {
- if (!is_heap_object) {
- JumpIfSmi(obj, fail);
- }
- lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
- li(at, Operand(map));
- Branch(fail, ne, scratch, Operand(at));
-}
-
-
-void MacroAssembler::CheckMap(Register obj,
- Register scratch,
- Heap::RootListIndex index,
- Label* fail,
- bool is_heap_object) {
- if (!is_heap_object) {
- JumpIfSmi(obj, fail);
- }
- lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
- LoadRoot(at, index);
- Branch(fail, ne, scratch, Operand(at));
-}
-
-
-// -----------------------------------------------------------------------------
-// JavaScript invokes
-
-void MacroAssembler::InvokePrologue(const ParameterCount& expected,
- const ParameterCount& actual,
- Handle<Code> code_constant,
- Register code_reg,
- Label* done,
- InvokeFlag flag,
- PostCallGenerator* post_call_generator) {
- bool definitely_matches = false;
- Label regular_invoke;
-
- // Check whether the expected and actual arguments count match. If not,
- // setup registers according to contract with ArgumentsAdaptorTrampoline:
- // a0: actual arguments count
- // a1: function (passed through to callee)
- // a2: expected arguments count
- // a3: callee code entry
-
- // The code below is made a lot easier because the calling code already sets
- // up actual and expected registers according to the contract if values are
- // passed in registers.
- ASSERT(actual.is_immediate() || actual.reg().is(a0));
- ASSERT(expected.is_immediate() || expected.reg().is(a2));
- ASSERT((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(a3));
-
- if (expected.is_immediate()) {
- ASSERT(actual.is_immediate());
- if (expected.immediate() == actual.immediate()) {
- definitely_matches = true;
- } else {
- li(a0, Operand(actual.immediate()));
- const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
- if (expected.immediate() == sentinel) {
- // Don't worry about adapting arguments for builtins that
- // don't want that done. Skip adaption code by making it look
- // like we have a match between expected and actual number of
- // arguments.
- definitely_matches = true;
- } else {
- li(a2, Operand(expected.immediate()));
- }
- }
- } else {
- if (actual.is_immediate()) {
- Branch(&regular_invoke, eq, expected.reg(), Operand(actual.immediate()));
- li(a0, Operand(actual.immediate()));
- } else {
- Branch(&regular_invoke, eq, expected.reg(), Operand(actual.reg()));
- }
- }
-
- if (!definitely_matches) {
- if (!code_constant.is_null()) {
- li(a3, Operand(code_constant));
- addiu(a3, a3, Code::kHeaderSize - kHeapObjectTag);
- }
-
- Handle<Code> adaptor =
- isolate()->builtins()->ArgumentsAdaptorTrampoline();
- if (flag == CALL_FUNCTION) {
- Call(adaptor, RelocInfo::CODE_TARGET);
- if (post_call_generator != NULL) post_call_generator->Generate();
- jmp(done);
- } else {
- Jump(adaptor, RelocInfo::CODE_TARGET);
- }
- bind(&regular_invoke);
- }
-}
-
-
-void MacroAssembler::InvokeCode(Register code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag,
- PostCallGenerator* post_call_generator) {
- Label done;
-
- InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag,
- post_call_generator);
- if (flag == CALL_FUNCTION) {
- Call(code);
- } else {
- ASSERT(flag == JUMP_FUNCTION);
- Jump(code);
- }
- // Continue here if InvokePrologue does handle the invocation due to
- // mismatched parameter counts.
- bind(&done);
-}
-
-
-void MacroAssembler::InvokeCode(Handle<Code> code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- RelocInfo::Mode rmode,
- InvokeFlag flag) {
- Label done;
-
- InvokePrologue(expected, actual, code, no_reg, &done, flag);
- if (flag == CALL_FUNCTION) {
- Call(code, rmode);
- } else {
- Jump(code, rmode);
- }
- // Continue here if InvokePrologue does handle the invocation due to
- // mismatched parameter counts.
- bind(&done);
-}
-
-
-void MacroAssembler::InvokeFunction(Register function,
- const ParameterCount& actual,
- InvokeFlag flag,
- PostCallGenerator* post_call_generator) {
- // Contract with called JS functions requires that function is passed in a1.
- ASSERT(function.is(a1));
- Register expected_reg = a2;
- Register code_reg = a3;
-
- lw(code_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
- lw(expected_reg,
- FieldMemOperand(code_reg,
- SharedFunctionInfo::kFormalParameterCountOffset));
- sra(expected_reg, expected_reg, kSmiTagSize);
- lw(code_reg, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
-
- ParameterCount expected(expected_reg);
- InvokeCode(code_reg, expected, actual, flag, post_call_generator);
-}
-
-
-void MacroAssembler::InvokeFunction(JSFunction* function,
- const ParameterCount& actual,
- InvokeFlag flag) {
- ASSERT(function->is_compiled());
-
- // Get the function and setup the context.
- li(a1, Operand(Handle<JSFunction>(function)));
- lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
-
- // Invoke the cached code.
- Handle<Code> code(function->code());
- ParameterCount expected(function->shared()->formal_parameter_count());
- if (V8::UseCrankshaft()) {
- UNIMPLEMENTED_MIPS();
- } else {
- InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET, flag);
- }
-}
-
-
-void MacroAssembler::IsObjectJSObjectType(Register heap_object,
- Register map,
- Register scratch,
- Label* fail) {
- lw(map, FieldMemOperand(heap_object, HeapObject::kMapOffset));
- IsInstanceJSObjectType(map, scratch, fail);
-}
-
-
-void MacroAssembler::IsInstanceJSObjectType(Register map,
- Register scratch,
- Label* fail) {
- lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
- Branch(fail, lt, scratch, Operand(FIRST_JS_OBJECT_TYPE));
- Branch(fail, gt, scratch, Operand(LAST_JS_OBJECT_TYPE));
-}
-
-
-void MacroAssembler::IsObjectJSStringType(Register object,
- Register scratch,
- Label* fail) {
- ASSERT(kNotStringTag != 0);
-
- lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
- lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
- And(scratch, scratch, Operand(kIsNotStringMask));
- Branch(fail, ne, scratch, Operand(zero_reg));
-}
-
-
-// ---------------------------------------------------------------------------
-// Support functions.
-
-
-void MacroAssembler::TryGetFunctionPrototype(Register function,
- Register result,
- Register scratch,
- Label* miss) {
- // Check that the receiver isn't a smi.
- JumpIfSmi(function, miss);
-
- // Check that the function really is a function. Load map into result reg.
- GetObjectType(function, result, scratch);
- Branch(miss, ne, scratch, Operand(JS_FUNCTION_TYPE));
-
- // Make sure that the function has an instance prototype.
- Label non_instance;
- lbu(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
- And(scratch, scratch, Operand(1 << Map::kHasNonInstancePrototype));
- Branch(&non_instance, ne, scratch, Operand(zero_reg));
-
- // Get the prototype or initial map from the function.
- lw(result,
- FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
-
- // If the prototype or initial map is the hole, don't return it and
- // simply miss the cache instead. This will allow us to allocate a
- // prototype object on-demand in the runtime system.
- LoadRoot(t8, Heap::kTheHoleValueRootIndex);
- Branch(miss, eq, result, Operand(t8));
-
- // If the function does not have an initial map, we're done.
- Label done;
- GetObjectType(result, scratch, scratch);
- Branch(&done, ne, scratch, Operand(MAP_TYPE));
-
- // Get the prototype from the initial map.
- lw(result, FieldMemOperand(result, Map::kPrototypeOffset));
- jmp(&done);
-
- // Non-instance prototype: Fetch prototype from constructor field
- // in initial map.
- bind(&non_instance);
- lw(result, FieldMemOperand(result, Map::kConstructorOffset));
-
- // All done.
- bind(&done);
-}
-
-
-void MacroAssembler::GetObjectType(Register object,
- Register map,
- Register type_reg) {
- lw(map, FieldMemOperand(object, HeapObject::kMapOffset));
- lbu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
-}
-
-
-// -----------------------------------------------------------------------------
-// Runtime calls
-
-void MacroAssembler::CallStub(CodeStub* stub, Condition cond,
- Register r1, const Operand& r2) {
- ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
- Call(stub->GetCode(), RelocInfo::CODE_TARGET, cond, r1, r2);
-}
-
-
-void MacroAssembler::TailCallStub(CodeStub* stub) {
- ASSERT(allow_stub_calls()); // stub calls are not allowed in some stubs
- Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
-}
-
-
-void MacroAssembler::IllegalOperation(int num_arguments) {
- if (num_arguments > 0) {
- addiu(sp, sp, num_arguments * kPointerSize);
- }
- LoadRoot(v0, Heap::kUndefinedValueRootIndex);
-}
-
-
-void MacroAssembler::IndexFromHash(Register hash,
- Register index) {
- // If the hash field contains an array index pick it out. The assert checks
- // that the constants for the maximum number of digits for an array index
- // cached in the hash field and the number of bits reserved for it does not
- // conflict.
- ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
- (1 << String::kArrayIndexValueBits));
- // We want the smi-tagged index in key. kArrayIndexValueMask has zeros in
- // the low kHashShift bits.
- STATIC_ASSERT(kSmiTag == 0);
- Ext(hash, hash, String::kHashShift, String::kArrayIndexValueBits);
- sll(index, hash, kSmiTagSize);
-}
-
-
-void MacroAssembler::ObjectToDoubleFPURegister(Register object,
- FPURegister result,
- Register scratch1,
- Register scratch2,
- Register heap_number_map,
- Label* not_number,
- ObjectToDoubleFlags flags) {
- Label done;
- if ((flags & OBJECT_NOT_SMI) == 0) {
- Label not_smi;
- JumpIfNotSmi(object, &not_smi);
- // Remove smi tag and convert to double.
- sra(scratch1, object, kSmiTagSize);
- mtc1(scratch1, result);
- cvt_d_w(result, result);
- Branch(&done);
- bind(&not_smi);
- }
- // Check for heap number and load double value from it.
- lw(scratch1, FieldMemOperand(object, HeapObject::kMapOffset));
- Branch(not_number, ne, scratch1, Operand(heap_number_map));
-
- if ((flags & AVOID_NANS_AND_INFINITIES) != 0) {
- // If exponent is all ones the number is either a NaN or +/-Infinity.
- Register exponent = scratch1;
- Register mask_reg = scratch2;
- lw(exponent, FieldMemOperand(object, HeapNumber::kExponentOffset));
- li(mask_reg, HeapNumber::kExponentMask);
-
- And(exponent, exponent, mask_reg);
- Branch(not_number, eq, exponent, Operand(mask_reg));
- }
- ldc1(result, FieldMemOperand(object, HeapNumber::kValueOffset));
- bind(&done);
-}
-
-
-
-void MacroAssembler::SmiToDoubleFPURegister(Register smi,
- FPURegister value,
- Register scratch1) {
- sra(scratch1, smi, kSmiTagSize);
- mtc1(scratch1, value);
- cvt_d_w(value, value);
-}
-
-
-void MacroAssembler::CallRuntime(const Runtime::Function* f,
- int num_arguments) {
- // All parameters are on the stack. v0 has the return value after call.
-
- // If the expected number of arguments of the runtime function is
- // constant, we check that the actual number of arguments match the
- // expectation.
- if (f->nargs >= 0 && f->nargs != num_arguments) {
- IllegalOperation(num_arguments);
- return;
- }
-
- // TODO(1236192): Most runtime routines don't need the number of
- // arguments passed in because it is constant. At some point we
- // should remove this need and make the runtime routine entry code
- // smarter.
- li(a0, num_arguments);
- li(a1, Operand(ExternalReference(f, isolate())));
- CEntryStub stub(1);
- CallStub(&stub);
-}
-
-
-void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
- const Runtime::Function* function = Runtime::FunctionForId(id);
- li(a0, Operand(function->nargs));
- li(a1, Operand(ExternalReference(function, isolate())));
- CEntryStub stub(1);
- stub.SaveDoubles();
- CallStub(&stub);
-}
-
-
-void MacroAssembler::CallRuntime(Runtime::FunctionId fid, int num_arguments) {
- CallRuntime(Runtime::FunctionForId(fid), num_arguments);
-}
-
-
-void MacroAssembler::CallExternalReference(const ExternalReference& ext,
- int num_arguments) {
- li(a0, Operand(num_arguments));
- li(a1, Operand(ext));
-
- CEntryStub stub(1);
- CallStub(&stub);
-}
-
-
-void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
- int num_arguments,
- int result_size) {
- // TODO(1236192): Most runtime routines don't need the number of
- // arguments passed in because it is constant. At some point we
- // should remove this need and make the runtime routine entry code
- // smarter.
- li(a0, Operand(num_arguments));
- JumpToExternalReference(ext);
-}
-
-
-void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
- int num_arguments,
- int result_size) {
- TailCallExternalReference(ExternalReference(fid, isolate()),
- num_arguments,
- result_size);
-}
-
-
-void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
- li(a1, Operand(builtin));
- CEntryStub stub(1);
- Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
-}
-
-
-void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
- InvokeJSFlags flags,
- PostCallGenerator* post_call_generator) {
- GetBuiltinEntry(t9, id);
- if (flags == CALL_JS) {
- Call(t9);
- if (post_call_generator != NULL) post_call_generator->Generate();
- } else {
- ASSERT(flags == JUMP_JS);
- Jump(t9);
- }
-}
-
-
-void MacroAssembler::GetBuiltinFunction(Register target,
- Builtins::JavaScript id) {
- // Load the builtins object into target register.
- lw(target, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
- lw(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
- // Load the JavaScript builtin function from the builtins object.
- lw(target, FieldMemOperand(target,
- JSBuiltinsObject::OffsetOfFunctionWithId(id)));
-}
-
-
-void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
- ASSERT(!target.is(a1));
- GetBuiltinFunction(a1, id);
- // Load the code entry point from the builtins object.
- lw(target, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
-}
-
-
-void MacroAssembler::SetCounter(StatsCounter* counter, int value,
- Register scratch1, Register scratch2) {
- if (FLAG_native_code_counters && counter->Enabled()) {
- li(scratch1, Operand(value));
- li(scratch2, Operand(ExternalReference(counter)));
- sw(scratch1, MemOperand(scratch2));
- }
-}
-
-
-void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
- Register scratch1, Register scratch2) {
- ASSERT(value > 0);
- if (FLAG_native_code_counters && counter->Enabled()) {
- li(scratch2, Operand(ExternalReference(counter)));
- lw(scratch1, MemOperand(scratch2));
- Addu(scratch1, scratch1, Operand(value));
- sw(scratch1, MemOperand(scratch2));
- }
-}
-
-
-void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
- Register scratch1, Register scratch2) {
- ASSERT(value > 0);
- if (FLAG_native_code_counters && counter->Enabled()) {
- li(scratch2, Operand(ExternalReference(counter)));
- lw(scratch1, MemOperand(scratch2));
- Subu(scratch1, scratch1, Operand(value));
- sw(scratch1, MemOperand(scratch2));
- }
-}
-
-
-// -----------------------------------------------------------------------------
-// Debugging
-
-void MacroAssembler::Assert(Condition cc, const char* msg,
- Register rs, Operand rt) {
- if (FLAG_debug_code)
- Check(cc, msg, rs, rt);
-}
-
-
-void MacroAssembler::AssertRegisterIsRoot(Register reg,
- Heap::RootListIndex index) {
- if (FLAG_debug_code) {
- LoadRoot(at, index);
- Check(eq, "Register did not match expected root", reg, Operand(at));
- }
-}
-
-
-void MacroAssembler::AssertFastElements(Register elements) {
- if (FLAG_debug_code) {
- ASSERT(!elements.is(at));
- Label ok;
- Push(elements);
- lw(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
- LoadRoot(at, Heap::kFixedArrayMapRootIndex);
- Branch(&ok, eq, elements, Operand(at));
- LoadRoot(at, Heap::kFixedCOWArrayMapRootIndex);
- Branch(&ok, eq, elements, Operand(at));
- Abort("JSObject with fast elements map has slow elements");
- bind(&ok);
- Pop(elements);
- }
-}
-
-
-void MacroAssembler::Check(Condition cc, const char* msg,
- Register rs, Operand rt) {
- Label L;
- Branch(&L, cc, rs, rt);
- Abort(msg);
- // will not return here
- bind(&L);
-}
-
-
-void MacroAssembler::Abort(const char* msg) {
- Label abort_start;
- bind(&abort_start);
- // We want to pass the msg string like a smi to avoid GC
- // problems, however msg is not guaranteed to be aligned
- // properly. Instead, we pass an aligned pointer that is
- // a proper v8 smi, but also pass the alignment difference
- // from the real pointer as a smi.
- intptr_t p1 = reinterpret_cast<intptr_t>(msg);
- intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
- ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
-#ifdef DEBUG
- if (msg != NULL) {
- RecordComment("Abort message: ");
- RecordComment(msg);
- }
-#endif
- // Disable stub call restrictions to always allow calls to abort.
- AllowStubCallsScope allow_scope(this, true);
-
- li(a0, Operand(p0));
- Push(a0);
- li(a0, Operand(Smi::FromInt(p1 - p0)));
- Push(a0);
- CallRuntime(Runtime::kAbort, 2);
- // will not return here
- if (is_trampoline_pool_blocked()) {
- // If the calling code cares about the exact number of
- // instructions generated, we insert padding here to keep the size
- // of the Abort macro constant.
- // Currently in debug mode with debug_code enabled the number of
- // generated instructions is 14, so we use this as a maximum value.
- static const int kExpectedAbortInstructions = 14;
- int abort_instructions = InstructionsGeneratedSince(&abort_start);
- ASSERT(abort_instructions <= kExpectedAbortInstructions);
- while (abort_instructions++ < kExpectedAbortInstructions) {
- nop();
- }
- }
-}
-
-
-void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
- if (context_chain_length > 0) {
- // Move up the chain of contexts to the context containing the slot.
- lw(dst, MemOperand(cp, Context::SlotOffset(Context::CLOSURE_INDEX)));
- // Load the function context (which is the incoming, outer context).
- lw(dst, FieldMemOperand(dst, JSFunction::kContextOffset));
- for (int i = 1; i < context_chain_length; i++) {
- lw(dst, MemOperand(dst, Context::SlotOffset(Context::CLOSURE_INDEX)));
- lw(dst, FieldMemOperand(dst, JSFunction::kContextOffset));
- }
- // The context may be an intermediate context, not a function context.
- lw(dst, MemOperand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX)));
- } else { // Slot is in the current function context.
- // The context may be an intermediate context, not a function context.
- lw(dst, MemOperand(cp, Context::SlotOffset(Context::FCONTEXT_INDEX)));
- }
-}
-
-
-void MacroAssembler::LoadGlobalFunction(int index, Register function) {
- // Load the global or builtins object from the current context.
- lw(function, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
- // Load the global context from the global or builtins object.
- lw(function, FieldMemOperand(function,
- GlobalObject::kGlobalContextOffset));
- // Load the function from the global context.
- lw(function, MemOperand(function, Context::SlotOffset(index)));
-}
-
-
-void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
- Register map,
- Register scratch) {
- // Load the initial map. The global functions all have initial maps.
- lw(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
- if (FLAG_debug_code) {
- Label ok, fail;
- CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, false);
- Branch(&ok);
- bind(&fail);
- Abort("Global functions must have initial map");
- bind(&ok);
- }
-}
-
-
-void MacroAssembler::EnterFrame(StackFrame::Type type) {
- addiu(sp, sp, -5 * kPointerSize);
- li(t8, Operand(Smi::FromInt(type)));
- li(t9, Operand(CodeObject()));
- sw(ra, MemOperand(sp, 4 * kPointerSize));
- sw(fp, MemOperand(sp, 3 * kPointerSize));
- sw(cp, MemOperand(sp, 2 * kPointerSize));
- sw(t8, MemOperand(sp, 1 * kPointerSize));
- sw(t9, MemOperand(sp, 0 * kPointerSize));
- addiu(fp, sp, 3 * kPointerSize);
-}
-
-
-void MacroAssembler::LeaveFrame(StackFrame::Type type) {
- mov(sp, fp);
- lw(fp, MemOperand(sp, 0 * kPointerSize));
- lw(ra, MemOperand(sp, 1 * kPointerSize));
- addiu(sp, sp, 2 * kPointerSize);
-}
-
-
-void MacroAssembler::EnterExitFrame(Register hold_argc,
- Register hold_argv,
- Register hold_function,
- bool save_doubles) {
- // a0 is argc.
- sll(t8, a0, kPointerSizeLog2);
- addu(hold_argv, sp, t8);
- addiu(hold_argv, hold_argv, -kPointerSize);
-
- // Compute callee's stack pointer before making changes and save it as
- // t9 register so that it is restored as sp register on exit, thereby
- // popping the args.
- // t9 = sp + kPointerSize * #args
- addu(t9, sp, t8);
-
- // Compute the argv pointer and keep it in a callee-saved register.
- // This only seems to be needed for crankshaft and may cause problems
- // so it's disabled for now.
- // Subu(s6, t9, Operand(kPointerSize));
-
- // Align the stack at this point.
- AlignStack(0);
-
- // Save registers.
- addiu(sp, sp, -12);
- sw(t9, MemOperand(sp, 8));
- sw(ra, MemOperand(sp, 4));
- sw(fp, MemOperand(sp, 0));
- mov(fp, sp); // Setup new frame pointer.
-
- li(t8, Operand(CodeObject()));
- Push(t8); // Accessed from ExitFrame::code_slot.
-
- // Save the frame pointer and the context in top.
- li(t8, Operand(ExternalReference(Isolate::k_c_entry_fp_address, isolate())));
- sw(fp, MemOperand(t8));
- li(t8, Operand(ExternalReference(Isolate::k_context_address, isolate())));
- sw(cp, MemOperand(t8));
-
- // Setup argc and the builtin function in callee-saved registers.
- mov(hold_argc, a0);
- mov(hold_function, a1);
-
- // Optionally save all double registers.
- if (save_doubles) {
-#ifdef DEBUG
- int frame_alignment = ActivationFrameAlignment();
-#endif
- // The stack alignment code above made sp unaligned, so add space for one
- // more double register and use aligned addresses.
- ASSERT(kDoubleSize == frame_alignment);
- // Mark the frame as containing doubles by pushing a non-valid return
- // address, i.e. 0.
- ASSERT(ExitFrameConstants::kMarkerOffset == -2 * kPointerSize);
- push(zero_reg); // Marker and alignment word.
- int space = FPURegister::kNumRegisters * kDoubleSize + kPointerSize;
- Subu(sp, sp, Operand(space));
- // Remember: we only need to save every 2nd double FPU value.
- for (int i = 0; i < FPURegister::kNumRegisters; i+=2) {
- FPURegister reg = FPURegister::from_code(i);
- sdc1(reg, MemOperand(sp, i * kDoubleSize + kPointerSize));
- }
- // Note that f0 will be accessible at fp - 2*kPointerSize -
- // FPURegister::kNumRegisters * kDoubleSize, since the code slot and the
- // alignment word were pushed after the fp.
- }
-}
-
-
-void MacroAssembler::LeaveExitFrame(bool save_doubles) {
- // Optionally restore all double registers.
- if (save_doubles) {
- // TODO(regis): Use vldrm instruction.
- // Remember: we only need to restore every 2nd double FPU value.
- for (int i = 0; i < FPURegister::kNumRegisters; i+=2) {
- FPURegister reg = FPURegister::from_code(i);
- // Register f30-f31 is just below the marker.
- const int offset = ExitFrameConstants::kMarkerOffset;
- ldc1(reg, MemOperand(fp,
- (i - FPURegister::kNumRegisters) * kDoubleSize + offset));
- }
- }
-
- // Clear top frame.
- li(t8, Operand(ExternalReference(Isolate::k_c_entry_fp_address, isolate())));
- sw(zero_reg, MemOperand(t8));
-
- // Restore current context from top and clear it in debug mode.
- li(t8, Operand(ExternalReference(Isolate::k_context_address, isolate())));
- lw(cp, MemOperand(t8));
-#ifdef DEBUG
- sw(a3, MemOperand(t8));
-#endif
-
- // Pop the arguments, restore registers, and return.
- mov(sp, fp); // Respect ABI stack constraint.
- lw(fp, MemOperand(sp, 0));
- lw(ra, MemOperand(sp, 4));
- lw(sp, MemOperand(sp, 8));
- jr(ra);
- nop(); // Branch delay slot nop.
-}
-
-
-void MacroAssembler::InitializeNewString(Register string,
- Register length,
- Heap::RootListIndex map_index,
- Register scratch1,
- Register scratch2) {
- sll(scratch1, length, kSmiTagSize);
- LoadRoot(scratch2, map_index);
- sw(scratch1, FieldMemOperand(string, String::kLengthOffset));
- li(scratch1, Operand(String::kEmptyHashField));
- sw(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
- sw(scratch1, FieldMemOperand(string, String::kHashFieldOffset));
-}
-
-
-int MacroAssembler::ActivationFrameAlignment() {
-#if defined(V8_HOST_ARCH_MIPS)
- // Running on the real platform. Use the alignment as mandated by the local
- // environment.
- // Note: This will break if we ever start generating snapshots on one Mips
- // platform for another Mips platform with a different alignment.
- return OS::ActivationFrameAlignment();
-#else // defined(V8_HOST_ARCH_MIPS)
- // If we are using the simulator then we should always align to the expected
- // alignment. As the simulator is used to generate snapshots we do not know
- // if the target platform will need alignment, so this is controlled from a
- // flag.
- return FLAG_sim_stack_alignment;
-#endif // defined(V8_HOST_ARCH_MIPS)
-}
-
-
-void MacroAssembler::AlignStack(int offset) {
- // On MIPS an offset of 0 aligns to 0 modulo 8 bytes,
- // and an offset of 1 aligns to 4 modulo 8 bytes.
-#if defined(V8_HOST_ARCH_MIPS)
- // Running on the real platform. Use the alignment as mandated by the local
- // environment.
- // Note: This will break if we ever start generating snapshots on one MIPS
- // platform for another MIPS platform with a different alignment.
- int activation_frame_alignment = OS::ActivationFrameAlignment();
-#else // defined(V8_HOST_ARCH_MIPS)
- // If we are using the simulator then we should always align to the expected
- // alignment. As the simulator is used to generate snapshots we do not know
- // if the target platform will need alignment, so we will always align at
- // this point here.
- int activation_frame_alignment = 2 * kPointerSize;
-#endif // defined(V8_HOST_ARCH_MIPS)
- if (activation_frame_alignment != kPointerSize) {
- // This code needs to be made more general if this assert doesn't hold.
- ASSERT(activation_frame_alignment == 2 * kPointerSize);
- if (offset == 0) {
- andi(t8, sp, activation_frame_alignment - 1);
- Push(zero_reg, eq, t8, zero_reg);
- } else {
- andi(t8, sp, activation_frame_alignment - 1);
- addiu(t8, t8, -4);
- Push(zero_reg, eq, t8, zero_reg);
- }
- }
-}
-
-
-
-void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
- Register reg,
- Register scratch,
- Label* not_power_of_two_or_zero) {
- Subu(scratch, reg, Operand(1));
- Branch(USE_DELAY_SLOT, not_power_of_two_or_zero, lt,
- scratch, Operand(zero_reg));
- and_(at, scratch, reg); // In the delay slot.
- Branch(not_power_of_two_or_zero, ne, at, Operand(zero_reg));
-}
-
-
-void MacroAssembler::JumpIfNotBothSmi(Register reg1,
- Register reg2,
- Label* on_not_both_smi) {
- STATIC_ASSERT(kSmiTag == 0);
- ASSERT_EQ(1, kSmiTagMask);
- or_(at, reg1, reg2);
- andi(at, at, kSmiTagMask);
- Branch(on_not_both_smi, ne, at, Operand(zero_reg));
-}
-
-
-void MacroAssembler::JumpIfEitherSmi(Register reg1,
- Register reg2,
- Label* on_either_smi) {
- STATIC_ASSERT(kSmiTag == 0);
- ASSERT_EQ(1, kSmiTagMask);
- // Both Smi tags must be 1 (not Smi).
- and_(at, reg1, reg2);
- andi(at, at, kSmiTagMask);
- Branch(on_either_smi, eq, at, Operand(zero_reg));
-}
-
-
-void MacroAssembler::AbortIfSmi(Register object) {
- STATIC_ASSERT(kSmiTag == 0);
- andi(at, object, kSmiTagMask);
- Assert(ne, "Operand is a smi", at, Operand(zero_reg));
-}
-
-
-void MacroAssembler::AbortIfNotSmi(Register object) {
- STATIC_ASSERT(kSmiTag == 0);
- andi(at, object, kSmiTagMask);
- Assert(eq, "Operand is a smi", at, Operand(zero_reg));
-}
-
-
-void MacroAssembler::AbortIfNotRootValue(Register src,
- Heap::RootListIndex root_value_index,
- const char* message) {
- ASSERT(!src.is(at));
- LoadRoot(at, root_value_index);
- Assert(eq, message, src, Operand(at));
-}
-
-
-void MacroAssembler::JumpIfNotHeapNumber(Register object,
- Register heap_number_map,
- Register scratch,
- Label* on_not_heap_number) {
- lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
- AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- Branch(on_not_heap_number, ne, scratch, Operand(heap_number_map));
-}
-
-
-void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
- Register first,
- Register second,
- Register scratch1,
- Register scratch2,
- Label* failure) {
- // Test that both first and second are sequential ASCII strings.
- // Assume that they are non-smis.
- lw(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
- lw(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
- lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
- lbu(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
-
- JumpIfBothInstanceTypesAreNotSequentialAscii(scratch1,
- scratch2,
- scratch1,
- scratch2,
- failure);
-}
-
-
-void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first,
- Register second,
- Register scratch1,
- Register scratch2,
- Label* failure) {
- // Check that neither is a smi.
- STATIC_ASSERT(kSmiTag == 0);
- And(scratch1, first, Operand(second));
- And(scratch1, scratch1, Operand(kSmiTagMask));
- Branch(failure, eq, scratch1, Operand(zero_reg));
- JumpIfNonSmisNotBothSequentialAsciiStrings(first,
- second,
- scratch1,
- scratch2,
- failure);
-}
-
-
-void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
- Register first,
- Register second,
- Register scratch1,
- Register scratch2,
- Label* failure) {
- int kFlatAsciiStringMask =
- kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
- int kFlatAsciiStringTag = ASCII_STRING_TYPE;
- ASSERT(kFlatAsciiStringTag <= 0xffff); // Ensure this fits 16-bit immed.
- andi(scratch1, first, kFlatAsciiStringMask);
- Branch(failure, ne, scratch1, Operand(kFlatAsciiStringTag));
- andi(scratch2, second, kFlatAsciiStringMask);
- Branch(failure, ne, scratch2, Operand(kFlatAsciiStringTag));
-}
-
-
-void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type,
- Register scratch,
- Label* failure) {
- int kFlatAsciiStringMask =
- kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
- int kFlatAsciiStringTag = ASCII_STRING_TYPE;
- And(scratch, type, Operand(kFlatAsciiStringMask));
- Branch(failure, ne, scratch, Operand(kFlatAsciiStringTag));
-}
-
-
-static const int kRegisterPassedArguments = 4;
-
-void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
- int frame_alignment = ActivationFrameAlignment();
-
- // Reserve space for Isolate address which is always passed as last parameter
- num_arguments += 1;
-
- // Up to four simple arguments are passed in registers a0..a3.
- // Those four arguments must have reserved argument slots on the stack for
- // mips, even though those argument slots are not normally used.
- // Remaining arguments are pushed on the stack, above (higher address than)
- // the argument slots.
- ASSERT(StandardFrameConstants::kCArgsSlotsSize % kPointerSize == 0);
- int stack_passed_arguments = ((num_arguments <= kRegisterPassedArguments) ?
- 0 : num_arguments - kRegisterPassedArguments) +
- (StandardFrameConstants::kCArgsSlotsSize /
- kPointerSize);
- if (frame_alignment > kPointerSize) {
- // Make stack end at alignment and make room for num_arguments - 4 words
- // and the original value of sp.
- mov(scratch, sp);
- Subu(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
- ASSERT(IsPowerOf2(frame_alignment));
- And(sp, sp, Operand(-frame_alignment));
- sw(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
- } else {
- Subu(sp, sp, Operand(stack_passed_arguments * kPointerSize));
- }
-}
-
-
-void MacroAssembler::CallCFunction(ExternalReference function,
- int num_arguments) {
- CallCFunctionHelper(no_reg, function, at, num_arguments);
-}
-
-
-void MacroAssembler::CallCFunction(Register function,
- Register scratch,
- int num_arguments) {
- CallCFunctionHelper(function,
- ExternalReference::the_hole_value_location(isolate()),
- scratch,
- num_arguments);
-}
-
-
-void MacroAssembler::CallCFunctionHelper(Register function,
- ExternalReference function_reference,
- Register scratch,
- int num_arguments) {
- // Push Isolate address as the last argument.
- if (num_arguments < kRegisterPassedArguments) {
- Register arg_to_reg[] = {a0, a1, a2, a3};
- Register r = arg_to_reg[num_arguments];
- li(r, Operand(ExternalReference::isolate_address()));
- } else {
- int stack_passed_arguments = num_arguments - kRegisterPassedArguments +
- (StandardFrameConstants::kCArgsSlotsSize /
- kPointerSize);
- // Push Isolate address on the stack after the arguments.
- li(scratch, Operand(ExternalReference::isolate_address()));
- sw(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
- }
- num_arguments += 1;
-
- // Make sure that the stack is aligned before calling a C function unless
- // running in the simulator. The simulator has its own alignment check which
- // provides more information.
- // The argument stots are presumed to have been set up by
- // PrepareCallCFunction. The C function must be called via t9, for mips ABI.
-
-#if defined(V8_HOST_ARCH_MIPS)
- if (emit_debug_code()) {
- int frame_alignment = OS::ActivationFrameAlignment();
- int frame_alignment_mask = frame_alignment - 1;
- if (frame_alignment > kPointerSize) {
- ASSERT(IsPowerOf2(frame_alignment));
- Label alignment_as_expected;
- And(at, sp, Operand(frame_alignment_mask));
- Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
- // Don't use Check here, as it will call Runtime_Abort possibly
- // re-entering here.
- stop("Unexpected alignment in CallCFunction");
- bind(&alignment_as_expected);
- }
- }
-#endif // V8_HOST_ARCH_MIPS
-
- // Just call directly. The function called cannot cause a GC, or
- // allow preemption, so the return address in the link register
- // stays correct.
- if (!function.is(t9)) {
- mov(t9, function);
- function = t9;
- }
-
- if (function.is(no_reg)) {
- li(t9, Operand(function_reference));
- function = t9;
- }
-
- Call(function);
-
- ASSERT(StandardFrameConstants::kCArgsSlotsSize % kPointerSize == 0);
- int stack_passed_arguments = ((num_arguments <= kRegisterPassedArguments) ?
- 0 : num_arguments - kRegisterPassedArguments) +
- (StandardFrameConstants::kCArgsSlotsSize /
- kPointerSize);
-
- if (OS::ActivationFrameAlignment() > kPointerSize) {
- lw(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
- } else {
- Addu(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize)));
- }
-}
-
-
-#undef BRANCH_ARGS_CHECK
-
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-CodePatcher::CodePatcher(byte* address, int instructions)
- : address_(address),
- instructions_(instructions),
- size_(instructions * Assembler::kInstrSize),
- masm_(address, size_ + Assembler::kGap) {
- // Create a new macro assembler pointing to the address of the code to patch.
- // The size is adjusted with kGap on order for the assembler to generate size
- // bytes of instructions without failing with buffer size constraints.
- ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
-}
-
-
-CodePatcher::~CodePatcher() {
- // Indicate that code has changed.
- CPU::FlushICache(address_, size_);
-
- // Check that the code was patched as expected.
- ASSERT(masm_.pc_ == address_ + size_);
- ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
-}
-
-
-void CodePatcher::Emit(Instr x) {
- masm()->emit(x);
-}
-
-
-void CodePatcher::Emit(Address addr) {
- masm()->emit(reinterpret_cast<Instr>(addr));
-}
-
-
-#endif // ENABLE_DEBUGGER_SUPPORT
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_MIPS
diff --git a/src/3rdparty/v8/src/mips/macro-assembler-mips.h b/src/3rdparty/v8/src/mips/macro-assembler-mips.h
deleted file mode 100644
index 7ff9e17..0000000
--- a/src/3rdparty/v8/src/mips/macro-assembler-mips.h
+++ /dev/null
@@ -1,1058 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
-#define V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
-
-#include "assembler.h"
-#include "mips/assembler-mips.h"
-
-namespace v8 {
-namespace internal {
-
-// Forward declaration.
-class JumpTarget;
-class PostCallGenerator;
-
-// Reserved Register Usage Summary.
-//
-// Registers t8, t9, and at are reserved for use by the MacroAssembler.
-//
-// The programmer should know that the MacroAssembler may clobber these three,
-// but won't touch other registers except in special cases.
-//
-// Per the MIPS ABI, register t9 must be used for indirect function call
-// via 'jalr t9' or 'jr t9' instructions. This is relied upon by gcc when
-// trying to update gp register for position-independent-code. Whenever
-// MIPS generated code calls C code, it must be via t9 register.
-
-// Registers aliases
-// cp is assumed to be a callee saved register.
-const Register roots = s6; // Roots array pointer.
-const Register cp = s7; // JavaScript context pointer
-const Register fp = s8_fp; // Alias fp
-// Register used for condition evaluation.
-const Register condReg1 = s4;
-const Register condReg2 = s5;
-
-enum InvokeJSFlags {
- CALL_JS,
- JUMP_JS
-};
-
-
-// Flags used for the AllocateInNewSpace functions.
-enum AllocationFlags {
- // No special flags.
- NO_ALLOCATION_FLAGS = 0,
- // Return the pointer to the allocated already tagged as a heap object.
- TAG_OBJECT = 1 << 0,
- // The content of the result register already contains the allocation top in
- // new space.
- RESULT_CONTAINS_TOP = 1 << 1,
- // Specify that the requested size of the space to allocate is specified in
- // words instead of bytes.
- SIZE_IN_WORDS = 1 << 2
-};
-
-// Flags used for the ObjectToDoubleFPURegister function.
-enum ObjectToDoubleFlags {
- // No special flags.
- NO_OBJECT_TO_DOUBLE_FLAGS = 0,
- // Object is known to be a non smi.
- OBJECT_NOT_SMI = 1 << 0,
- // Don't load NaNs or infinities, branch to the non number case instead.
- AVOID_NANS_AND_INFINITIES = 1 << 1
-};
-
-// Allow programmer to use Branch Delay Slot of Branches, Jumps, Calls.
-enum BranchDelaySlot {
- USE_DELAY_SLOT,
- PROTECT
-};
-
-// MacroAssembler implements a collection of frequently used macros.
-class MacroAssembler: public Assembler {
- public:
- MacroAssembler(void* buffer, int size);
-
-// Arguments macros
-#define COND_TYPED_ARGS Condition cond, Register r1, const Operand& r2
-#define COND_ARGS cond, r1, r2
-
-// ** Prototypes
-
-// * Prototypes for functions with no target (eg Ret()).
-#define DECLARE_NOTARGET_PROTOTYPE(Name) \
- void Name(BranchDelaySlot bd = PROTECT); \
- void Name(COND_TYPED_ARGS, BranchDelaySlot bd = PROTECT); \
- inline void Name(BranchDelaySlot bd, COND_TYPED_ARGS) { \
- Name(COND_ARGS, bd); \
- }
-
-// * Prototypes for functions with a target.
-
-// Cases when relocation may be needed.
-#define DECLARE_RELOC_PROTOTYPE(Name, target_type) \
- void Name(target_type target, \
- RelocInfo::Mode rmode, \
- BranchDelaySlot bd = PROTECT); \
- inline void Name(BranchDelaySlot bd, \
- target_type target, \
- RelocInfo::Mode rmode) { \
- Name(target, rmode, bd); \
- } \
- void Name(target_type target, \
- RelocInfo::Mode rmode, \
- COND_TYPED_ARGS, \
- BranchDelaySlot bd = PROTECT); \
- inline void Name(BranchDelaySlot bd, \
- target_type target, \
- RelocInfo::Mode rmode, \
- COND_TYPED_ARGS) { \
- Name(target, rmode, COND_ARGS, bd); \
- }
-
-// Cases when relocation is not needed.
-#define DECLARE_NORELOC_PROTOTYPE(Name, target_type) \
- void Name(target_type target, BranchDelaySlot bd = PROTECT); \
- inline void Name(BranchDelaySlot bd, target_type target) { \
- Name(target, bd); \
- } \
- void Name(target_type target, \
- COND_TYPED_ARGS, \
- BranchDelaySlot bd = PROTECT); \
- inline void Name(BranchDelaySlot bd, \
- target_type target, \
- COND_TYPED_ARGS) { \
- Name(target, COND_ARGS, bd); \
- }
-
-// ** Target prototypes.
-
-#define DECLARE_JUMP_CALL_PROTOTYPES(Name) \
- DECLARE_NORELOC_PROTOTYPE(Name, Register) \
- DECLARE_NORELOC_PROTOTYPE(Name, const Operand&) \
- DECLARE_RELOC_PROTOTYPE(Name, byte*) \
- DECLARE_RELOC_PROTOTYPE(Name, Handle<Code>)
-
-#define DECLARE_BRANCH_PROTOTYPES(Name) \
- DECLARE_NORELOC_PROTOTYPE(Name, Label*) \
- DECLARE_NORELOC_PROTOTYPE(Name, int16_t)
-
-
-DECLARE_JUMP_CALL_PROTOTYPES(Jump)
-DECLARE_JUMP_CALL_PROTOTYPES(Call)
-
-DECLARE_BRANCH_PROTOTYPES(Branch)
-DECLARE_BRANCH_PROTOTYPES(BranchAndLink)
-
-DECLARE_NOTARGET_PROTOTYPE(Ret)
-
-#undef COND_TYPED_ARGS
-#undef COND_ARGS
-#undef DECLARE_NOTARGET_PROTOTYPE
-#undef DECLARE_NORELOC_PROTOTYPE
-#undef DECLARE_RELOC_PROTOTYPE
-#undef DECLARE_JUMP_CALL_PROTOTYPES
-#undef DECLARE_BRANCH_PROTOTYPES
-
- // Emit code to discard a non-negative number of pointer-sized elements
- // from the stack, clobbering only the sp register.
- void Drop(int count,
- Condition cond = cc_always,
- Register reg = no_reg,
- const Operand& op = Operand(no_reg));
-
- void DropAndRet(int drop = 0,
- Condition cond = cc_always,
- Register reg = no_reg,
- const Operand& op = Operand(no_reg));
-
- // Swap two registers. If the scratch register is omitted then a slightly
- // less efficient form using xor instead of mov is emitted.
- void Swap(Register reg1, Register reg2, Register scratch = no_reg);
-
- void Call(Label* target);
- // May do nothing if the registers are identical.
- void Move(Register dst, Register src);
-
-
- // Jump unconditionally to given label.
- // We NEED a nop in the branch delay slot, as it used by v8, for example in
- // CodeGenerator::ProcessDeferred().
- // Currently the branch delay slot is filled by the MacroAssembler.
- // Use rather b(Label) for code generation.
- void jmp(Label* L) {
- Branch(L);
- }
-
- // Load an object from the root table.
- void LoadRoot(Register destination,
- Heap::RootListIndex index);
- void LoadRoot(Register destination,
- Heap::RootListIndex index,
- Condition cond, Register src1, const Operand& src2);
-
- // Store an object to the root table.
- void StoreRoot(Register source,
- Heap::RootListIndex index);
- void StoreRoot(Register source,
- Heap::RootListIndex index,
- Condition cond, Register src1, const Operand& src2);
-
-
- // Check if object is in new space.
- // scratch can be object itself, but it will be clobbered.
- void InNewSpace(Register object,
- Register scratch,
- Condition cc, // eq for new space, ne otherwise.
- Label* branch);
-
-
- // For the page containing |object| mark the region covering [address]
- // dirty. The object address must be in the first 8K of an allocated page.
- void RecordWriteHelper(Register object,
- Register address,
- Register scratch);
-
- // For the page containing |object| mark the region covering
- // [object+offset] dirty. The object address must be in the first 8K
- // of an allocated page. The 'scratch' registers are used in the
- // implementation and all 3 registers are clobbered by the
- // operation, as well as the 'at' register. RecordWrite updates the
- // write barrier even when storing smis.
- void RecordWrite(Register object,
- Operand offset,
- Register scratch0,
- Register scratch1);
-
- // For the page containing |object| mark the region covering
- // [address] dirty. The object address must be in the first 8K of an
- // allocated page. All 3 registers are clobbered by the operation,
- // as well as the ip register. RecordWrite updates the write barrier
- // even when storing smis.
- void RecordWrite(Register object,
- Register address,
- Register scratch);
-
-
- // ---------------------------------------------------------------------------
- // Inline caching support
-
- // Generate code for checking access rights - used for security checks
- // on access to global objects across environments. The holder register
- // is left untouched, whereas both scratch registers are clobbered.
- void CheckAccessGlobalProxy(Register holder_reg,
- Register scratch,
- Label* miss);
-
- inline void MarkCode(NopMarkerTypes type) {
- nop(type);
- }
-
- // Check if the given instruction is a 'type' marker.
- // ie. check if it is a sll zero_reg, zero_reg, <type> (referenced as
- // nop(type)). These instructions are generated to mark special location in
- // the code, like some special IC code.
- static inline bool IsMarkedCode(Instr instr, int type) {
- ASSERT((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER));
- return IsNop(instr, type);
- }
-
-
- static inline int GetCodeMarker(Instr instr) {
- uint32_t opcode = ((instr & kOpcodeMask));
- uint32_t rt = ((instr & kRtFieldMask) >> kRtShift);
- uint32_t rs = ((instr & kRsFieldMask) >> kRsShift);
- uint32_t sa = ((instr & kSaFieldMask) >> kSaShift);
-
- // Return <n> if we have a sll zero_reg, zero_reg, n
- // else return -1.
- bool sllzz = (opcode == SLL &&
- rt == static_cast<uint32_t>(ToNumber(zero_reg)) &&
- rs == static_cast<uint32_t>(ToNumber(zero_reg)));
- int type =
- (sllzz && FIRST_IC_MARKER <= sa && sa < LAST_CODE_MARKER) ? sa : -1;
- ASSERT((type == -1) ||
- ((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER)));
- return type;
- }
-
-
-
- // ---------------------------------------------------------------------------
- // Allocation support
-
- // Allocate an object in new space. The object_size is specified
- // either in bytes or in words if the allocation flag SIZE_IN_WORDS
- // is passed. If the new space is exhausted control continues at the
- // gc_required label. The allocated object is returned in result. If
- // the flag tag_allocated_object is true the result is tagged as as
- // a heap object. All registers are clobbered also when control
- // continues at the gc_required label.
- void AllocateInNewSpace(int object_size,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- AllocationFlags flags);
- void AllocateInNewSpace(Register object_size,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- AllocationFlags flags);
-
- // Undo allocation in new space. The object passed and objects allocated after
- // it will no longer be allocated. The caller must make sure that no pointers
- // are left to the object(s) no longer allocated as they would be invalid when
- // allocation is undone.
- void UndoAllocationInNewSpace(Register object, Register scratch);
-
-
- void AllocateTwoByteString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required);
- void AllocateAsciiString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required);
- void AllocateTwoByteConsString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
- void AllocateAsciiConsString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
-
- // Allocates a heap number or jumps to the gc_required label if the young
- // space is full and a scavenge is needed. All registers are clobbered also
- // when control continues at the gc_required label.
- void AllocateHeapNumber(Register result,
- Register scratch1,
- Register scratch2,
- Register heap_number_map,
- Label* gc_required);
- void AllocateHeapNumberWithValue(Register result,
- FPURegister value,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
-
- // ---------------------------------------------------------------------------
- // Instruction macros
-
-#define DEFINE_INSTRUCTION(instr) \
- void instr(Register rd, Register rs, const Operand& rt); \
- void instr(Register rd, Register rs, Register rt) { \
- instr(rd, rs, Operand(rt)); \
- } \
- void instr(Register rs, Register rt, int32_t j) { \
- instr(rs, rt, Operand(j)); \
- }
-
-#define DEFINE_INSTRUCTION2(instr) \
- void instr(Register rs, const Operand& rt); \
- void instr(Register rs, Register rt) { \
- instr(rs, Operand(rt)); \
- } \
- void instr(Register rs, int32_t j) { \
- instr(rs, Operand(j)); \
- }
-
- DEFINE_INSTRUCTION(Addu);
- DEFINE_INSTRUCTION(Subu);
- DEFINE_INSTRUCTION(Mul);
- DEFINE_INSTRUCTION2(Mult);
- DEFINE_INSTRUCTION2(Multu);
- DEFINE_INSTRUCTION2(Div);
- DEFINE_INSTRUCTION2(Divu);
-
- DEFINE_INSTRUCTION(And);
- DEFINE_INSTRUCTION(Or);
- DEFINE_INSTRUCTION(Xor);
- DEFINE_INSTRUCTION(Nor);
-
- DEFINE_INSTRUCTION(Slt);
- DEFINE_INSTRUCTION(Sltu);
-
- // MIPS32 R2 instruction macro.
- DEFINE_INSTRUCTION(Ror);
-
-#undef DEFINE_INSTRUCTION
-#undef DEFINE_INSTRUCTION2
-
-
- //------------Pseudo-instructions-------------
-
- void mov(Register rd, Register rt) { or_(rd, rt, zero_reg); }
-
-
- // load int32 in the rd register
- void li(Register rd, Operand j, bool gen2instr = false);
- inline void li(Register rd, int32_t j, bool gen2instr = false) {
- li(rd, Operand(j), gen2instr);
- }
- inline void li(Register dst, Handle<Object> value, bool gen2instr = false) {
- li(dst, Operand(value), gen2instr);
- }
-
- // Exception-generating instructions and debugging support
- void stop(const char* msg);
-
-
- // Push multiple registers on the stack.
- // Registers are saved in numerical order, with higher numbered registers
- // saved in higher memory addresses
- void MultiPush(RegList regs);
- void MultiPushReversed(RegList regs);
-
- void Push(Register src) {
- Addu(sp, sp, Operand(-kPointerSize));
- sw(src, MemOperand(sp, 0));
- }
-
- // Push two registers. Pushes leftmost register first (to highest address).
- void Push(Register src1, Register src2, Condition cond = al) {
- ASSERT(cond == al); // Do not support conditional versions yet.
- Subu(sp, sp, Operand(2 * kPointerSize));
- sw(src1, MemOperand(sp, 1 * kPointerSize));
- sw(src2, MemOperand(sp, 0 * kPointerSize));
- }
-
- // Push three registers. Pushes leftmost register first (to highest address).
- void Push(Register src1, Register src2, Register src3, Condition cond = al) {
- ASSERT(cond == al); // Do not support conditional versions yet.
- Addu(sp, sp, Operand(3 * -kPointerSize));
- sw(src1, MemOperand(sp, 2 * kPointerSize));
- sw(src2, MemOperand(sp, 1 * kPointerSize));
- sw(src3, MemOperand(sp, 0 * kPointerSize));
- }
-
- // Push four registers. Pushes leftmost register first (to highest address).
- void Push(Register src1, Register src2,
- Register src3, Register src4, Condition cond = al) {
- ASSERT(cond == al); // Do not support conditional versions yet.
- Addu(sp, sp, Operand(4 * -kPointerSize));
- sw(src1, MemOperand(sp, 3 * kPointerSize));
- sw(src2, MemOperand(sp, 2 * kPointerSize));
- sw(src3, MemOperand(sp, 1 * kPointerSize));
- sw(src4, MemOperand(sp, 0 * kPointerSize));
- }
-
- inline void push(Register src) { Push(src); }
- inline void pop(Register src) { Pop(src); }
-
- void Push(Register src, Condition cond, Register tst1, Register tst2) {
- // Since we don't have conditionnal execution we use a Branch.
- Branch(3, cond, tst1, Operand(tst2));
- Addu(sp, sp, Operand(-kPointerSize));
- sw(src, MemOperand(sp, 0));
- }
-
-
- // Pops multiple values from the stack and load them in the
- // registers specified in regs. Pop order is the opposite as in MultiPush.
- void MultiPop(RegList regs);
- void MultiPopReversed(RegList regs);
- void Pop(Register dst) {
- lw(dst, MemOperand(sp, 0));
- Addu(sp, sp, Operand(kPointerSize));
- }
- void Pop(uint32_t count = 1) {
- Addu(sp, sp, Operand(count * kPointerSize));
- }
-
- // ---------------------------------------------------------------------------
- // These functions are only used by crankshaft, so they are currently
- // unimplemented.
-
- // Push and pop the registers that can hold pointers, as defined by the
- // RegList constant kSafepointSavedRegisters.
- void PushSafepointRegisters() {
- UNIMPLEMENTED_MIPS();
- }
-
- void PopSafepointRegisters() {
- UNIMPLEMENTED_MIPS();
- }
-
- void PushSafepointRegistersAndDoubles() {
- UNIMPLEMENTED_MIPS();
- }
-
- void PopSafepointRegistersAndDoubles() {
- UNIMPLEMENTED_MIPS();
- }
-
- static int SafepointRegisterStackIndex(int reg_code) {
- UNIMPLEMENTED_MIPS();
- return 0;
- }
-
- // ---------------------------------------------------------------------------
-
- // MIPS32 R2 instruction macro.
- void Ins(Register rt, Register rs, uint16_t pos, uint16_t size);
- void Ext(Register rt, Register rs, uint16_t pos, uint16_t size);
-
- // Convert unsigned word to double.
- void Cvt_d_uw(FPURegister fd, FPURegister fs);
- void Cvt_d_uw(FPURegister fd, Register rs);
-
- // Convert double to unsigned word.
- void Trunc_uw_d(FPURegister fd, FPURegister fs);
- void Trunc_uw_d(FPURegister fd, Register rs);
-
- // Convert the HeapNumber pointed to by source to a 32bits signed integer
- // dest. If the HeapNumber does not fit into a 32bits signed integer branch
- // to not_int32 label. If FPU is available double_scratch is used but not
- // scratch2.
- void ConvertToInt32(Register source,
- Register dest,
- Register scratch,
- Register scratch2,
- FPURegister double_scratch,
- Label *not_int32);
-
- // -------------------------------------------------------------------------
- // Activation frames
-
- void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); }
- void LeaveInternalFrame() { LeaveFrame(StackFrame::INTERNAL); }
-
- void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); }
- void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); }
-
- // Enter exit frame.
- // Expects the number of arguments in register a0 and
- // the builtin function to call in register a1.
- // On output hold_argc, hold_function, and hold_argv are setup.
- void EnterExitFrame(Register hold_argc,
- Register hold_argv,
- Register hold_function,
- bool save_doubles);
-
- // Leave the current exit frame. Expects the return value in v0.
- void LeaveExitFrame(bool save_doubles);
-
- // Align the stack by optionally pushing a Smi zero.
- void AlignStack(int offset); // TODO(mips) : remove this function.
-
- // Get the actual activation frame alignment for target environment.
- static int ActivationFrameAlignment();
-
- void LoadContext(Register dst, int context_chain_length);
-
- void LoadGlobalFunction(int index, Register function);
-
- // Load the initial map from the global function. The registers
- // function and map can be the same, function is then overwritten.
- void LoadGlobalFunctionInitialMap(Register function,
- Register map,
- Register scratch);
-
- // -------------------------------------------------------------------------
- // JavaScript invokes
-
- // Invoke the JavaScript function code by either calling or jumping.
- void InvokeCode(Register code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag,
- PostCallGenerator* post_call_generator = NULL);
-
- void InvokeCode(Handle<Code> code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- RelocInfo::Mode rmode,
- InvokeFlag flag);
-
- // Invoke the JavaScript function in the given register. Changes the
- // current context to the context in the function before invoking.
- void InvokeFunction(Register function,
- const ParameterCount& actual,
- InvokeFlag flag,
- PostCallGenerator* post_call_generator = NULL);
-
- void InvokeFunction(JSFunction* function,
- const ParameterCount& actual,
- InvokeFlag flag);
-
-
- void IsObjectJSObjectType(Register heap_object,
- Register map,
- Register scratch,
- Label* fail);
-
- void IsInstanceJSObjectType(Register map,
- Register scratch,
- Label* fail);
-
- void IsObjectJSStringType(Register object,
- Register scratch,
- Label* fail);
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // -------------------------------------------------------------------------
- // Debugger Support
-
- void DebugBreak();
-#endif
-
-
- // -------------------------------------------------------------------------
- // Exception handling
-
- // Push a new try handler and link into try handler chain.
- // The return address must be passed in register ra.
- // Clobber t0, t1, t2.
- void PushTryHandler(CodeLocation try_location, HandlerType type);
-
- // Unlink the stack handler on top of the stack from the try handler chain.
- // Must preserve the result register.
- void PopTryHandler();
-
- // Copies a fixed number of fields of heap objects from src to dst.
- void CopyFields(Register dst, Register src, RegList temps, int field_count);
-
- // -------------------------------------------------------------------------
- // Support functions.
-
- // Try to get function prototype of a function and puts the value in
- // the result register. Checks that the function really is a
- // function and jumps to the miss label if the fast checks fail. The
- // function register will be untouched; the other registers may be
- // clobbered.
- void TryGetFunctionPrototype(Register function,
- Register result,
- Register scratch,
- Label* miss);
-
- void GetObjectType(Register function,
- Register map,
- Register type_reg);
-
- // Check if the map of an object is equal to a specified map (either
- // given directly or as an index into the root list) and branch to
- // label if not. Skip the smi check if not required (object is known
- // to be a heap object)
- void CheckMap(Register obj,
- Register scratch,
- Handle<Map> map,
- Label* fail,
- bool is_heap_object);
-
- void CheckMap(Register obj,
- Register scratch,
- Heap::RootListIndex index,
- Label* fail,
- bool is_heap_object);
-
- // Generates code for reporting that an illegal operation has
- // occurred.
- void IllegalOperation(int num_arguments);
-
- // Picks out an array index from the hash field.
- // Register use:
- // hash - holds the index's hash. Clobbered.
- // index - holds the overwritten index on exit.
- void IndexFromHash(Register hash, Register index);
-
- // Load the value of a number object into a FPU double register. If the
- // object is not a number a jump to the label not_number is performed
- // and the FPU double register is unchanged.
- void ObjectToDoubleFPURegister(
- Register object,
- FPURegister value,
- Register scratch1,
- Register scratch2,
- Register heap_number_map,
- Label* not_number,
- ObjectToDoubleFlags flags = NO_OBJECT_TO_DOUBLE_FLAGS);
-
- // Load the value of a smi object into a FPU double register. The register
- // scratch1 can be the same register as smi in which case smi will hold the
- // untagged value afterwards.
- void SmiToDoubleFPURegister(Register smi,
- FPURegister value,
- Register scratch1);
-
- // -------------------------------------------------------------------------
- // Runtime calls
-
- // Call a code stub.
- void CallStub(CodeStub* stub, Condition cond = cc_always,
- Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
-
- // Tail call a code stub (jump).
- void TailCallStub(CodeStub* stub);
-
- void CallJSExitStub(CodeStub* stub);
-
- // Call a runtime routine.
- void CallRuntime(const Runtime::Function* f, int num_arguments);
- void CallRuntimeSaveDoubles(Runtime::FunctionId id);
-
- // Convenience function: Same as above, but takes the fid instead.
- void CallRuntime(Runtime::FunctionId fid, int num_arguments);
-
- // Convenience function: call an external reference.
- void CallExternalReference(const ExternalReference& ext,
- int num_arguments);
-
- // Tail call of a runtime routine (jump).
- // Like JumpToExternalReference, but also takes care of passing the number
- // of parameters.
- void TailCallExternalReference(const ExternalReference& ext,
- int num_arguments,
- int result_size);
-
- // Convenience function: tail call a runtime routine (jump).
- void TailCallRuntime(Runtime::FunctionId fid,
- int num_arguments,
- int result_size);
-
- // Before calling a C-function from generated code, align arguments on stack
- // and add space for the four mips argument slots.
- // After aligning the frame, non-register arguments must be stored on the
- // stack, after the argument-slots using helper: CFunctionArgumentOperand().
- // The argument count assumes all arguments are word sized.
- // Some compilers/platforms require the stack to be aligned when calling
- // C++ code.
- // Needs a scratch register to do some arithmetic. This register will be
- // trashed.
- void PrepareCallCFunction(int num_arguments, Register scratch);
-
- // Arguments 1-4 are placed in registers a0 thru a3 respectively.
- // Arguments 5..n are stored to stack using following:
- // sw(t0, CFunctionArgumentOperand(5));
-
- // Calls a C function and cleans up the space for arguments allocated
- // by PrepareCallCFunction. The called function is not allowed to trigger a
- // garbage collection, since that might move the code and invalidate the
- // return address (unless this is somehow accounted for by the called
- // function).
- void CallCFunction(ExternalReference function, int num_arguments);
- void CallCFunction(Register function, Register scratch, int num_arguments);
-
- // Jump to the builtin routine.
- void JumpToExternalReference(const ExternalReference& builtin);
-
- // Invoke specified builtin JavaScript function. Adds an entry to
- // the unresolved list if the name does not resolve.
- void InvokeBuiltin(Builtins::JavaScript id,
- InvokeJSFlags flags,
- PostCallGenerator* post_call_generator = NULL);
-
- // Store the code object for the given builtin in the target register and
- // setup the function in a1.
- void GetBuiltinEntry(Register target, Builtins::JavaScript id);
-
- // Store the function for the given builtin in the target register.
- void GetBuiltinFunction(Register target, Builtins::JavaScript id);
-
- struct Unresolved {
- int pc;
- uint32_t flags; // see Bootstrapper::FixupFlags decoders/encoders.
- const char* name;
- };
-
- Handle<Object> CodeObject() { return code_object_; }
-
- // -------------------------------------------------------------------------
- // StatsCounter support
-
- void SetCounter(StatsCounter* counter, int value,
- Register scratch1, Register scratch2);
- void IncrementCounter(StatsCounter* counter, int value,
- Register scratch1, Register scratch2);
- void DecrementCounter(StatsCounter* counter, int value,
- Register scratch1, Register scratch2);
-
-
- // -------------------------------------------------------------------------
- // Debugging
-
- // Calls Abort(msg) if the condition cc is not satisfied.
- // Use --debug_code to enable.
- void Assert(Condition cc, const char* msg, Register rs, Operand rt);
- void AssertRegisterIsRoot(Register reg, Heap::RootListIndex index);
- void AssertFastElements(Register elements);
-
- // Like Assert(), but always enabled.
- void Check(Condition cc, const char* msg, Register rs, Operand rt);
-
- // Print a message to stdout and abort execution.
- void Abort(const char* msg);
-
- // Verify restrictions about code generated in stubs.
- void set_generating_stub(bool value) { generating_stub_ = value; }
- bool generating_stub() { return generating_stub_; }
- void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
- bool allow_stub_calls() { return allow_stub_calls_; }
-
- // ---------------------------------------------------------------------------
- // Number utilities
-
- // Check whether the value of reg is a power of two and not zero. If not
- // control continues at the label not_power_of_two. If reg is a power of two
- // the register scratch contains the value of (reg - 1) when control falls
- // through.
- void JumpIfNotPowerOfTwoOrZero(Register reg,
- Register scratch,
- Label* not_power_of_two_or_zero);
-
- // -------------------------------------------------------------------------
- // Smi utilities
-
- // Try to convert int32 to smi. If the value is to large, preserve
- // the original value and jump to not_a_smi. Destroys scratch and
- // sets flags.
- // This is only used by crankshaft atm so it is unimplemented on MIPS.
- void TrySmiTag(Register reg, Label* not_a_smi, Register scratch) {
- UNIMPLEMENTED_MIPS();
- }
-
- void SmiTag(Register reg) {
- Addu(reg, reg, reg);
- }
-
- void SmiTag(Register dst, Register src) {
- Addu(dst, src, src);
- }
-
- void SmiUntag(Register reg) {
- sra(reg, reg, kSmiTagSize);
- }
-
- void SmiUntag(Register dst, Register src) {
- sra(dst, src, kSmiTagSize);
- }
-
- // Jump the register contains a smi.
- inline void JumpIfSmi(Register value, Label* smi_label,
- Register scratch = at) {
- ASSERT_EQ(0, kSmiTag);
- andi(scratch, value, kSmiTagMask);
- Branch(smi_label, eq, scratch, Operand(zero_reg));
- }
-
- // Jump if the register contains a non-smi.
- inline void JumpIfNotSmi(Register value, Label* not_smi_label,
- Register scratch = at) {
- ASSERT_EQ(0, kSmiTag);
- andi(scratch, value, kSmiTagMask);
- Branch(not_smi_label, ne, scratch, Operand(zero_reg));
- }
-
- // Jump if either of the registers contain a non-smi.
- void JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi);
- // Jump if either of the registers contain a smi.
- void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi);
-
- // Abort execution if argument is a smi. Used in debug code.
- void AbortIfSmi(Register object);
- void AbortIfNotSmi(Register object);
-
- // Abort execution if argument is not the root value with the given index.
- void AbortIfNotRootValue(Register src,
- Heap::RootListIndex root_value_index,
- const char* message);
-
- // ---------------------------------------------------------------------------
- // HeapNumber utilities
-
- void JumpIfNotHeapNumber(Register object,
- Register heap_number_map,
- Register scratch,
- Label* on_not_heap_number);
-
- // -------------------------------------------------------------------------
- // String utilities
-
- // Checks if both instance types are sequential ASCII strings and jumps to
- // label if either is not.
- void JumpIfBothInstanceTypesAreNotSequentialAscii(
- Register first_object_instance_type,
- Register second_object_instance_type,
- Register scratch1,
- Register scratch2,
- Label* failure);
-
- // Check if instance type is sequential ASCII string and jump to label if
- // it is not.
- void JumpIfInstanceTypeIsNotSequentialAscii(Register type,
- Register scratch,
- Label* failure);
-
- // Test that both first and second are sequential ASCII strings.
- // Assume that they are non-smis.
- void JumpIfNonSmisNotBothSequentialAsciiStrings(Register first,
- Register second,
- Register scratch1,
- Register scratch2,
- Label* failure);
-
- // Test that both first and second are sequential ASCII strings.
- // Check that they are non-smis.
- void JumpIfNotBothSequentialAsciiStrings(Register first,
- Register second,
- Register scratch1,
- Register scratch2,
- Label* failure);
-
- private:
- void CallCFunctionHelper(Register function,
- ExternalReference function_reference,
- Register scratch,
- int num_arguments);
-
- void Jump(intptr_t target, RelocInfo::Mode rmode,
- BranchDelaySlot bd = PROTECT);
- void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = cc_always,
- Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg),
- BranchDelaySlot bd = PROTECT);
- void Call(intptr_t target, RelocInfo::Mode rmode,
- BranchDelaySlot bd = PROTECT);
- void Call(intptr_t target, RelocInfo::Mode rmode, Condition cond = cc_always,
- Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg),
- BranchDelaySlot bd = PROTECT);
-
- // Helper functions for generating invokes.
- void InvokePrologue(const ParameterCount& expected,
- const ParameterCount& actual,
- Handle<Code> code_constant,
- Register code_reg,
- Label* done,
- InvokeFlag flag,
- PostCallGenerator* post_call_generator = NULL);
-
- // Get the code for the given builtin. Returns if able to resolve
- // the function in the 'resolved' flag.
- Handle<Code> ResolveBuiltin(Builtins::JavaScript id, bool* resolved);
-
- // Activation support.
- void EnterFrame(StackFrame::Type type);
- void LeaveFrame(StackFrame::Type type);
-
- void InitializeNewString(Register string,
- Register length,
- Heap::RootListIndex map_index,
- Register scratch1,
- Register scratch2);
-
-
- bool generating_stub_;
- bool allow_stub_calls_;
- // This handle will be patched with the code object on installation.
- Handle<Object> code_object_;
-};
-
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-// The code patcher is used to patch (typically) small parts of code e.g. for
-// debugging and other types of instrumentation. When using the code patcher
-// the exact number of bytes specified must be emitted. It is not legal to emit
-// relocation information. If any of these constraints are violated it causes
-// an assertion to fail.
-class CodePatcher {
- public:
- CodePatcher(byte* address, int instructions);
- virtual ~CodePatcher();
-
- // Macro assembler to emit code.
- MacroAssembler* masm() { return &masm_; }
-
- // Emit an instruction directly.
- void Emit(Instr x);
-
- // Emit an address directly.
- void Emit(Address addr);
-
- private:
- byte* address_; // The address of the code being patched.
- int instructions_; // Number of instructions of the expected patch size.
- int size_; // Number of bytes of the expected patch size.
- MacroAssembler masm_; // Macro assembler used to generate the code.
-};
-#endif // ENABLE_DEBUGGER_SUPPORT
-
-
-// Helper class for generating code or data associated with the code
-// right after a call instruction. As an example this can be used to
-// generate safepoint data after calls for crankshaft.
-class PostCallGenerator {
- public:
- PostCallGenerator() { }
- virtual ~PostCallGenerator() { }
- virtual void Generate() = 0;
-};
-
-
-// -----------------------------------------------------------------------------
-// Static helper functions.
-
-static MemOperand ContextOperand(Register context, int index) {
- return MemOperand(context, Context::SlotOffset(index));
-}
-
-
-static inline MemOperand GlobalObjectOperand() {
- return ContextOperand(cp, Context::GLOBAL_INDEX);
-}
-
-
-// Generate a MemOperand for loading a field from an object.
-static inline MemOperand FieldMemOperand(Register object, int offset) {
- return MemOperand(object, offset - kHeapObjectTag);
-}
-
-
-
-#ifdef GENERATED_CODE_COVERAGE
-#define CODE_COVERAGE_STRINGIFY(x) #x
-#define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
-#define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
-#define ACCESS_MASM(masm) masm->stop(__FILE_LINE__); masm->
-#else
-#define ACCESS_MASM(masm) masm->
-#endif
-
-} } // namespace v8::internal
-
-#endif // V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
-
diff --git a/src/3rdparty/v8/src/mips/regexp-macro-assembler-mips.cc b/src/3rdparty/v8/src/mips/regexp-macro-assembler-mips.cc
deleted file mode 100644
index d1dbc43..0000000
--- a/src/3rdparty/v8/src/mips/regexp-macro-assembler-mips.cc
+++ /dev/null
@@ -1,478 +0,0 @@
-// Copyright 2006-2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_MIPS)
-
-#include "unicode.h"
-#include "log.h"
-#include "code-stubs.h"
-#include "regexp-stack.h"
-#include "macro-assembler.h"
-#include "regexp-macro-assembler.h"
-#include "mips/regexp-macro-assembler-mips.h"
-
-namespace v8 {
-namespace internal {
-
-#ifndef V8_INTERPRETED_REGEXP
-/*
- * This assembler uses the following register assignment convention
- * - t1 : Pointer to current code object (Code*) including heap object tag.
- * - t2 : Current position in input, as negative offset from end of string.
- * Please notice that this is the byte offset, not the character offset!
- * - t3 : Currently loaded character. Must be loaded using
- * LoadCurrentCharacter before using any of the dispatch methods.
- * - t4 : points to tip of backtrack stack
- * - t5 : Unused.
- * - t6 : End of input (points to byte after last character in input).
- * - fp : Frame pointer. Used to access arguments, local variables and
- * RegExp registers.
- * - sp : points to tip of C stack.
- *
- * The remaining registers are free for computations.
- *
- * Each call to a public method should retain this convention.
- * The stack will have the following structure:
- * - direct_call (if 1, direct call from JavaScript code, if 0 call
- * through the runtime system)
- * - stack_area_base (High end of the memory area to use as
- * backtracking stack)
- * - int* capture_array (int[num_saved_registers_], for output).
- * - stack frame header (16 bytes in size)
- * --- sp when called ---
- * - link address
- * - backup of registers s0..s7
- * - end of input (Address of end of string)
- * - start of input (Address of first character in string)
- * - start index (character index of start)
- * --- frame pointer ----
- * - void* input_string (location of a handle containing the string)
- * - Offset of location before start of input (effectively character
- * position -1). Used to initialize capture registers to a non-position.
- * - At start (if 1, we are starting at the start of the
- * string, otherwise 0)
- * - register 0 (Only positions must be stored in the first
- * - register 1 num_saved_registers_ registers)
- * - ...
- * - register num_registers-1
- * --- sp ---
- *
- * The first num_saved_registers_ registers are initialized to point to
- * "character -1" in the string (i.e., char_size() bytes before the first
- * character of the string). The remaining registers start out as garbage.
- *
- * The data up to the return address must be placed there by the calling
- * code, by calling the code entry as cast to a function with the signature:
- * int (*match)(String* input_string,
- * int start_index,
- * Address start,
- * Address end,
- * int* capture_output_array,
- * bool at_start,
- * byte* stack_area_base,
- * bool direct_call)
- * The call is performed by NativeRegExpMacroAssembler::Execute()
- * (in regexp-macro-assembler.cc).
- */
-
-#define __ ACCESS_MASM(masm_)
-
-RegExpMacroAssemblerMIPS::RegExpMacroAssemblerMIPS(
- Mode mode,
- int registers_to_save)
- : masm_(new MacroAssembler(NULL, kRegExpCodeSize)),
- mode_(mode),
- num_registers_(registers_to_save),
- num_saved_registers_(registers_to_save),
- entry_label_(),
- start_label_(),
- success_label_(),
- backtrack_label_(),
- exit_label_() {
- ASSERT_EQ(0, registers_to_save % 2);
- __ jmp(&entry_label_); // We'll write the entry code later.
- __ bind(&start_label_); // And then continue from here.
-}
-
-
-RegExpMacroAssemblerMIPS::~RegExpMacroAssemblerMIPS() {
- delete masm_;
- // Unuse labels in case we throw away the assembler without calling GetCode.
- entry_label_.Unuse();
- start_label_.Unuse();
- success_label_.Unuse();
- backtrack_label_.Unuse();
- exit_label_.Unuse();
- check_preempt_label_.Unuse();
- stack_overflow_label_.Unuse();
-}
-
-
-int RegExpMacroAssemblerMIPS::stack_limit_slack() {
- return RegExpStack::kStackLimitSlack;
-}
-
-
-void RegExpMacroAssemblerMIPS::AdvanceCurrentPosition(int by) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void RegExpMacroAssemblerMIPS::AdvanceRegister(int reg, int by) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void RegExpMacroAssemblerMIPS::Backtrack() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void RegExpMacroAssemblerMIPS::Bind(Label* label) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void RegExpMacroAssemblerMIPS::CheckCharacter(uint32_t c, Label* on_equal) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void RegExpMacroAssemblerMIPS::CheckCharacterGT(uc16 limit, Label* on_greater) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void RegExpMacroAssemblerMIPS::CheckAtStart(Label* on_at_start) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void RegExpMacroAssemblerMIPS::CheckNotAtStart(Label* on_not_at_start) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void RegExpMacroAssemblerMIPS::CheckCharacterLT(uc16 limit, Label* on_less) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void RegExpMacroAssemblerMIPS::CheckCharacters(Vector<const uc16> str,
- int cp_offset,
- Label* on_failure,
- bool check_end_of_string) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void RegExpMacroAssemblerMIPS::CheckGreedyLoop(Label* on_equal) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void RegExpMacroAssemblerMIPS::CheckNotBackReferenceIgnoreCase(
- int start_reg,
- Label* on_no_match) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void RegExpMacroAssemblerMIPS::CheckNotBackReference(
- int start_reg,
- Label* on_no_match) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void RegExpMacroAssemblerMIPS::CheckNotRegistersEqual(int reg1,
- int reg2,
- Label* on_not_equal) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void RegExpMacroAssemblerMIPS::CheckNotCharacter(uint32_t c,
- Label* on_not_equal) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void RegExpMacroAssemblerMIPS::CheckCharacterAfterAnd(uint32_t c,
- uint32_t mask,
- Label* on_equal) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void RegExpMacroAssemblerMIPS::CheckNotCharacterAfterAnd(uint32_t c,
- uint32_t mask,
- Label* on_not_equal) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void RegExpMacroAssemblerMIPS::CheckNotCharacterAfterMinusAnd(
- uc16 c,
- uc16 minus,
- uc16 mask,
- Label* on_not_equal) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-bool RegExpMacroAssemblerMIPS::CheckSpecialCharacterClass(uc16 type,
- Label* on_no_match) {
- UNIMPLEMENTED_MIPS();
- return false;
-}
-
-
-void RegExpMacroAssemblerMIPS::Fail() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-Handle<Object> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
- UNIMPLEMENTED_MIPS();
- return Handle<Object>::null();
-}
-
-
-void RegExpMacroAssemblerMIPS::GoTo(Label* to) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void RegExpMacroAssemblerMIPS::IfRegisterGE(int reg,
- int comparand,
- Label* if_ge) {
- __ lw(a0, register_location(reg));
- BranchOrBacktrack(if_ge, ge, a0, Operand(comparand));
-}
-
-
-void RegExpMacroAssemblerMIPS::IfRegisterLT(int reg,
- int comparand,
- Label* if_lt) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void RegExpMacroAssemblerMIPS::IfRegisterEqPos(int reg,
- Label* if_eq) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-RegExpMacroAssembler::IrregexpImplementation
- RegExpMacroAssemblerMIPS::Implementation() {
- return kMIPSImplementation;
-}
-
-
-void RegExpMacroAssemblerMIPS::LoadCurrentCharacter(int cp_offset,
- Label* on_end_of_input,
- bool check_bounds,
- int characters) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void RegExpMacroAssemblerMIPS::PopCurrentPosition() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void RegExpMacroAssemblerMIPS::PopRegister(int register_index) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-
-void RegExpMacroAssemblerMIPS::PushBacktrack(Label* label) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void RegExpMacroAssemblerMIPS::PushCurrentPosition() {
- Push(current_input_offset());
-}
-
-
-void RegExpMacroAssemblerMIPS::PushRegister(int register_index,
- StackCheckFlag check_stack_limit) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void RegExpMacroAssemblerMIPS::ReadCurrentPositionFromRegister(int reg) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void RegExpMacroAssemblerMIPS::ReadStackPointerFromRegister(int reg) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void RegExpMacroAssemblerMIPS::SetCurrentPositionFromEnd(int by) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void RegExpMacroAssemblerMIPS::SetRegister(int register_index, int to) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void RegExpMacroAssemblerMIPS::Succeed() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void RegExpMacroAssemblerMIPS::WriteCurrentPositionToRegister(int reg,
- int cp_offset) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void RegExpMacroAssemblerMIPS::ClearRegisters(int reg_from, int reg_to) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void RegExpMacroAssemblerMIPS::WriteStackPointerToRegister(int reg) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-// Private methods:
-
-void RegExpMacroAssemblerMIPS::CallCheckStackGuardState(Register scratch) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-// Helper function for reading a value out of a stack frame.
-template <typename T>
-static T& frame_entry(Address re_frame, int frame_offset) {
- return reinterpret_cast<T&>(Memory::int32_at(re_frame + frame_offset));
-}
-
-
-int RegExpMacroAssemblerMIPS::CheckStackGuardState(Address* return_address,
- Code* re_code,
- Address re_frame) {
- UNIMPLEMENTED_MIPS();
- return 0;
-}
-
-
-MemOperand RegExpMacroAssemblerMIPS::register_location(int register_index) {
- UNIMPLEMENTED_MIPS();
- return MemOperand(zero_reg, 0);
-}
-
-
-void RegExpMacroAssemblerMIPS::CheckPosition(int cp_offset,
- Label* on_outside_input) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void RegExpMacroAssemblerMIPS::BranchOrBacktrack(Label* to,
- Condition condition,
- Register rs,
- const Operand& rt) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void RegExpMacroAssemblerMIPS::SafeCall(Label* to, Condition cond, Register rs,
- const Operand& rt) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void RegExpMacroAssemblerMIPS::SafeReturn() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void RegExpMacroAssemblerMIPS::SafeCallTarget(Label* name) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void RegExpMacroAssemblerMIPS::Push(Register source) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void RegExpMacroAssemblerMIPS::Pop(Register target) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void RegExpMacroAssemblerMIPS::CheckPreemption() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void RegExpMacroAssemblerMIPS::CheckStackLimit() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void RegExpMacroAssemblerMIPS::CallCFunctionUsingStub(
- ExternalReference function,
- int num_arguments) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void RegExpMacroAssemblerMIPS::LoadCurrentCharacterUnchecked(int cp_offset,
- int characters) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void RegExpCEntryStub::Generate(MacroAssembler* masm_) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-#undef __
-
-#endif // V8_INTERPRETED_REGEXP
-
-}} // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_MIPS
diff --git a/src/3rdparty/v8/src/mips/regexp-macro-assembler-mips.h b/src/3rdparty/v8/src/mips/regexp-macro-assembler-mips.h
deleted file mode 100644
index 2f4319f..0000000
--- a/src/3rdparty/v8/src/mips/regexp-macro-assembler-mips.h
+++ /dev/null
@@ -1,250 +0,0 @@
-// Copyright 2006-2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#ifndef V8_MIPS_REGEXP_MACRO_ASSEMBLER_MIPS_H_
-#define V8_MIPS_REGEXP_MACRO_ASSEMBLER_MIPS_H_
-
-namespace v8 {
-namespace internal {
-
-#ifdef V8_INTERPRETED_REGEXP
-class RegExpMacroAssemblerMIPS: public RegExpMacroAssembler {
- public:
- RegExpMacroAssemblerMIPS();
- virtual ~RegExpMacroAssemblerMIPS();
-};
-#else // V8_INTERPRETED_REGEXP
-class RegExpMacroAssemblerMIPS: public NativeRegExpMacroAssembler {
- public:
- RegExpMacroAssemblerMIPS(Mode mode, int registers_to_save);
- virtual ~RegExpMacroAssemblerMIPS();
- virtual int stack_limit_slack();
- virtual void AdvanceCurrentPosition(int by);
- virtual void AdvanceRegister(int reg, int by);
- virtual void Backtrack();
- virtual void Bind(Label* label);
- virtual void CheckAtStart(Label* on_at_start);
- virtual void CheckCharacter(uint32_t c, Label* on_equal);
- virtual void CheckCharacterAfterAnd(uint32_t c,
- uint32_t mask,
- Label* on_equal);
- virtual void CheckCharacterGT(uc16 limit, Label* on_greater);
- virtual void CheckCharacterLT(uc16 limit, Label* on_less);
- virtual void CheckCharacters(Vector<const uc16> str,
- int cp_offset,
- Label* on_failure,
- bool check_end_of_string);
- // A "greedy loop" is a loop that is both greedy and with a simple
- // body. It has a particularly simple implementation.
- virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
- virtual void CheckNotAtStart(Label* on_not_at_start);
- virtual void CheckNotBackReference(int start_reg, Label* on_no_match);
- virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
- Label* on_no_match);
- virtual void CheckNotRegistersEqual(int reg1, int reg2, Label* on_not_equal);
- virtual void CheckNotCharacter(uint32_t c, Label* on_not_equal);
- virtual void CheckNotCharacterAfterAnd(uint32_t c,
- uint32_t mask,
- Label* on_not_equal);
- virtual void CheckNotCharacterAfterMinusAnd(uc16 c,
- uc16 minus,
- uc16 mask,
- Label* on_not_equal);
- // Checks whether the given offset from the current position is before
- // the end of the string.
- virtual void CheckPosition(int cp_offset, Label* on_outside_input);
- virtual bool CheckSpecialCharacterClass(uc16 type,
- Label* on_no_match);
- virtual void Fail();
- virtual Handle<Object> GetCode(Handle<String> source);
- virtual void GoTo(Label* label);
- virtual void IfRegisterGE(int reg, int comparand, Label* if_ge);
- virtual void IfRegisterLT(int reg, int comparand, Label* if_lt);
- virtual void IfRegisterEqPos(int reg, Label* if_eq);
- virtual IrregexpImplementation Implementation();
- virtual void LoadCurrentCharacter(int cp_offset,
- Label* on_end_of_input,
- bool check_bounds = true,
- int characters = 1);
- virtual void PopCurrentPosition();
- virtual void PopRegister(int register_index);
- virtual void PushBacktrack(Label* label);
- virtual void PushCurrentPosition();
- virtual void PushRegister(int register_index,
- StackCheckFlag check_stack_limit);
- virtual void ReadCurrentPositionFromRegister(int reg);
- virtual void ReadStackPointerFromRegister(int reg);
- virtual void SetCurrentPositionFromEnd(int by);
- virtual void SetRegister(int register_index, int to);
- virtual void Succeed();
- virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
- virtual void ClearRegisters(int reg_from, int reg_to);
- virtual void WriteStackPointerToRegister(int reg);
-
- // Called from RegExp if the stack-guard is triggered.
- // If the code object is relocated, the return address is fixed before
- // returning.
- static int CheckStackGuardState(Address* return_address,
- Code* re_code,
- Address re_frame);
- private:
- // Offsets from frame_pointer() of function parameters and stored registers.
- static const int kFramePointer = 0;
-
- // Above the frame pointer - Stored registers and stack passed parameters.
- // Registers s0 to s7, fp, and ra.
- static const int kStoredRegisters = kFramePointer;
- // Return address (stored from link register, read into pc on return).
- static const int kReturnAddress = kStoredRegisters + 9 * kPointerSize;
- // Stack frame header.
- static const int kStackFrameHeader = kReturnAddress + kPointerSize;
- // Stack parameters placed by caller.
- static const int kRegisterOutput = kStackFrameHeader + 16;
- static const int kStackHighEnd = kRegisterOutput + kPointerSize;
- static const int kDirectCall = kStackHighEnd + kPointerSize;
- static const int kIsolate = kDirectCall + kPointerSize;
-
- // Below the frame pointer.
- // Register parameters stored by setup code.
- static const int kInputEnd = kFramePointer - kPointerSize;
- static const int kInputStart = kInputEnd - kPointerSize;
- static const int kStartIndex = kInputStart - kPointerSize;
- static const int kInputString = kStartIndex - kPointerSize;
- // When adding local variables remember to push space for them in
- // the frame in GetCode.
- static const int kInputStartMinusOne = kInputString - kPointerSize;
- static const int kAtStart = kInputStartMinusOne - kPointerSize;
- // First register address. Following registers are below it on the stack.
- static const int kRegisterZero = kAtStart - kPointerSize;
-
- // Initial size of code buffer.
- static const size_t kRegExpCodeSize = 1024;
-
- // Load a number of characters at the given offset from the
- // current position, into the current-character register.
- void LoadCurrentCharacterUnchecked(int cp_offset, int character_count);
-
- // Check whether preemption has been requested.
- void CheckPreemption();
-
- // Check whether we are exceeding the stack limit on the backtrack stack.
- void CheckStackLimit();
-
-
- // Generate a call to CheckStackGuardState.
- void CallCheckStackGuardState(Register scratch);
-
- // The ebp-relative location of a regexp register.
- MemOperand register_location(int register_index);
-
- // Register holding the current input position as negative offset from
- // the end of the string.
- inline Register current_input_offset() { return t2; }
-
- // The register containing the current character after LoadCurrentCharacter.
- inline Register current_character() { return t3; }
-
- // Register holding address of the end of the input string.
- inline Register end_of_input_address() { return t6; }
-
- // Register holding the frame address. Local variables, parameters and
- // regexp registers are addressed relative to this.
- inline Register frame_pointer() { return fp; }
-
- // The register containing the backtrack stack top. Provides a meaningful
- // name to the register.
- inline Register backtrack_stackpointer() { return t4; }
-
- // Register holding pointer to the current code object.
- inline Register code_pointer() { return t1; }
-
- // Byte size of chars in the string to match (decided by the Mode argument)
- inline int char_size() { return static_cast<int>(mode_); }
-
- // Equivalent to a conditional branch to the label, unless the label
- // is NULL, in which case it is a conditional Backtrack.
- void BranchOrBacktrack(Label* to,
- Condition condition,
- Register rs,
- const Operand& rt);
-
- // Call and return internally in the generated code in a way that
- // is GC-safe (i.e., doesn't leave absolute code addresses on the stack)
- inline void SafeCall(Label* to,
- Condition cond,
- Register rs,
- const Operand& rt);
- inline void SafeReturn();
- inline void SafeCallTarget(Label* name);
-
- // Pushes the value of a register on the backtrack stack. Decrements the
- // stack pointer by a word size and stores the register's value there.
- inline void Push(Register source);
-
- // Pops a value from the backtrack stack. Reads the word at the stack pointer
- // and increments it by a word size.
- inline void Pop(Register target);
-
- // Calls a C function and cleans up the frame alignment done by
- // by FrameAlign. The called function *is* allowed to trigger a garbage
- // collection, but may not take more than four arguments (no arguments
- // passed on the stack), and the first argument will be a pointer to the
- // return address.
- inline void CallCFunctionUsingStub(ExternalReference function,
- int num_arguments);
-
-
- MacroAssembler* masm_;
-
- // Which mode to generate code for (ASCII or UC16).
- Mode mode_;
-
- // One greater than maximal register index actually used.
- int num_registers_;
-
- // Number of registers to output at the end (the saved registers
- // are always 0..num_saved_registers_-1)
- int num_saved_registers_;
-
- // Labels used internally.
- Label entry_label_;
- Label start_label_;
- Label success_label_;
- Label backtrack_label_;
- Label exit_label_;
- Label check_preempt_label_;
- Label stack_overflow_label_;
-};
-
-#endif // V8_INTERPRETED_REGEXP
-
-
-}} // namespace v8::internal
-
-#endif // V8_MIPS_REGEXP_MACRO_ASSEMBLER_MIPS_H_
-
diff --git a/src/3rdparty/v8/src/mips/register-allocator-mips-inl.h b/src/3rdparty/v8/src/mips/register-allocator-mips-inl.h
deleted file mode 100644
index bbfb31d..0000000
--- a/src/3rdparty/v8/src/mips/register-allocator-mips-inl.h
+++ /dev/null
@@ -1,134 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_IA32_REGISTER_ALLOCATOR_MIPS_INL_H_
-#define V8_IA32_REGISTER_ALLOCATOR_MIPS_INL_H_
-
-#include "v8.h"
-#include "mips/assembler-mips.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// RegisterAllocator implementation.
-
-bool RegisterAllocator::IsReserved(Register reg) {
- // The code for this test relies on the order of register codes.
- return reg.is(cp) || reg.is(s8_fp) || reg.is(sp);
-}
-
-
-int RegisterAllocator::ToNumber(Register reg) {
- ASSERT(reg.is_valid() && !IsReserved(reg));
- const int kNumbers[] = {
- 0, // zero_reg
- 1, // at
- 2, // v0
- 3, // v1
- 4, // a0
- 5, // a1
- 6, // a2
- 7, // a3
- 8, // t0
- 9, // t1
- 10, // t2
- 11, // t3
- 12, // t4
- 13, // t5
- 14, // t
- 15, // t7
- 16, // t8
- 17, // t9
- 18, // s0
- 19, // s1
- 20, // s2
- 21, // s3
- 22, // s4
- 23, // s5
- 24, // s6
- 25, // s7
- 26, // k0
- 27, // k1
- 28, // gp
- 29, // sp
- 30, // s8_fp
- 31, // ra
- };
- return kNumbers[reg.code()];
-}
-
-
-Register RegisterAllocator::ToRegister(int num) {
- ASSERT(num >= 0 && num < kNumRegisters);
- const Register kRegisters[] = {
- zero_reg,
- at,
- v0,
- v1,
- a0,
- a1,
- a2,
- a3,
- t0,
- t1,
- t2,
- t3,
- t4,
- t5,
- t6,
- t7,
- s0,
- s1,
- s2,
- s3,
- s4,
- s5,
- s6,
- s7,
- t8,
- t9,
- k0,
- k1,
- gp,
- sp,
- s8_fp,
- ra
- };
- return kRegisters[num];
-}
-
-
-void RegisterAllocator::Initialize() {
- Reset();
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_IA32_REGISTER_ALLOCATOR_MIPS_INL_H_
-
diff --git a/src/3rdparty/v8/src/mips/register-allocator-mips.cc b/src/3rdparty/v8/src/mips/register-allocator-mips.cc
deleted file mode 100644
index 2c5d61b..0000000
--- a/src/3rdparty/v8/src/mips/register-allocator-mips.cc
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_MIPS)
-
-#include "codegen-inl.h"
-#include "register-allocator-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// Result implementation.
-
-void Result::ToRegister() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void Result::ToRegister(Register target) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-// -------------------------------------------------------------------------
-// RegisterAllocator implementation.
-
-Result RegisterAllocator::AllocateByteRegisterWithoutSpilling() {
- // No byte registers on MIPS.
- UNREACHABLE();
- return Result();
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_MIPS
diff --git a/src/3rdparty/v8/src/mips/register-allocator-mips.h b/src/3rdparty/v8/src/mips/register-allocator-mips.h
deleted file mode 100644
index c448923..0000000
--- a/src/3rdparty/v8/src/mips/register-allocator-mips.h
+++ /dev/null
@@ -1,47 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_MIPS_REGISTER_ALLOCATOR_MIPS_H_
-#define V8_MIPS_REGISTER_ALLOCATOR_MIPS_H_
-
-#include "mips/constants-mips.h"
-
-namespace v8 {
-namespace internal {
-
-class RegisterAllocatorConstants : public AllStatic {
- public:
- // No registers are currently managed by the register allocator on MIPS.
- static const int kNumRegisters = 0;
- static const int kInvalidRegister = -1;
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_MIPS_REGISTER_ALLOCATOR_MIPS_H_
-
diff --git a/src/3rdparty/v8/src/mips/simulator-mips.cc b/src/3rdparty/v8/src/mips/simulator-mips.cc
deleted file mode 100644
index 50ad7a1..0000000
--- a/src/3rdparty/v8/src/mips/simulator-mips.cc
+++ /dev/null
@@ -1,2438 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include <stdlib.h>
-#include <math.h>
-#include <limits.h>
-#include <cstdarg>
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_MIPS)
-
-#include "disasm.h"
-#include "assembler.h"
-#include "globals.h" // Need the BitCast
-#include "mips/constants-mips.h"
-#include "mips/simulator-mips.h"
-
-
-// Only build the simulator if not compiling for real MIPS hardware.
-#if defined(USE_SIMULATOR)
-
-namespace v8 {
-namespace internal {
-
-// Utils functions
-bool HaveSameSign(int32_t a, int32_t b) {
- return ((a ^ b) >= 0);
-}
-
-
-uint32_t get_fcsr_condition_bit(uint32_t cc) {
- if (cc == 0) {
- return 23;
- } else {
- return 24 + cc;
- }
-}
-
-
-// This macro provides a platform independent use of sscanf. The reason for
-// SScanF not being implemented in a platform independent was through
-// ::v8::internal::OS in the same way as SNPrintF is that the Windows C Run-Time
-// Library does not provide vsscanf.
-#define SScanF sscanf // NOLINT
-
-// The MipsDebugger class is used by the simulator while debugging simulated
-// code.
-class MipsDebugger {
- public:
- explicit MipsDebugger(Simulator* sim);
- ~MipsDebugger();
-
- void Stop(Instruction* instr);
- void Debug();
- // Print all registers with a nice formatting.
- void PrintAllRegs();
- void PrintAllRegsIncludingFPU();
-
- private:
- // We set the breakpoint code to 0xfffff to easily recognize it.
- static const Instr kBreakpointInstr = SPECIAL | BREAK | 0xfffff << 6;
- static const Instr kNopInstr = 0x0;
-
- Simulator* sim_;
-
- int32_t GetRegisterValue(int regnum);
- int32_t GetFPURegisterValueInt(int regnum);
- int64_t GetFPURegisterValueLong(int regnum);
- float GetFPURegisterValueFloat(int regnum);
- double GetFPURegisterValueDouble(int regnum);
- bool GetValue(const char* desc, int32_t* value);
-
- // Set or delete a breakpoint. Returns true if successful.
- bool SetBreakpoint(Instruction* breakpc);
- bool DeleteBreakpoint(Instruction* breakpc);
-
- // Undo and redo all breakpoints. This is needed to bracket disassembly and
- // execution to skip past breakpoints when run from the debugger.
- void UndoBreakpoints();
- void RedoBreakpoints();
-};
-
-MipsDebugger::MipsDebugger(Simulator* sim) {
- sim_ = sim;
-}
-
-
-MipsDebugger::~MipsDebugger() {
-}
-
-
-#ifdef GENERATED_CODE_COVERAGE
-static FILE* coverage_log = NULL;
-
-
-static void InitializeCoverage() {
- char* file_name = getenv("V8_GENERATED_CODE_COVERAGE_LOG");
- if (file_name != NULL) {
- coverage_log = fopen(file_name, "aw+");
- }
-}
-
-
-void MipsDebugger::Stop(Instruction* instr) {
- UNIMPLEMENTED_MIPS();
- char* str = reinterpret_cast<char*>(instr->InstructionBits());
- if (strlen(str) > 0) {
- if (coverage_log != NULL) {
- fprintf(coverage_log, "%s\n", str);
- fflush(coverage_log);
- }
- instr->SetInstructionBits(0x0); // Overwrite with nop.
- }
- sim_->set_pc(sim_->get_pc() + Instruction::kInstrSize);
-}
-
-
-#else // ndef GENERATED_CODE_COVERAGE
-
-#define UNSUPPORTED() printf("Unsupported instruction.\n");
-
-static void InitializeCoverage() {}
-
-
-void MipsDebugger::Stop(Instruction* instr) {
- const char* str = reinterpret_cast<char*>(instr->InstructionBits());
- PrintF("Simulator hit %s\n", str);
- sim_->set_pc(sim_->get_pc() + Instruction::kInstrSize);
- Debug();
-}
-#endif // GENERATED_CODE_COVERAGE
-
-
-int32_t MipsDebugger::GetRegisterValue(int regnum) {
- if (regnum == kNumSimuRegisters) {
- return sim_->get_pc();
- } else {
- return sim_->get_register(regnum);
- }
-}
-
-
-int32_t MipsDebugger::GetFPURegisterValueInt(int regnum) {
- if (regnum == kNumFPURegisters) {
- return sim_->get_pc();
- } else {
- return sim_->get_fpu_register(regnum);
- }
-}
-
-
-int64_t MipsDebugger::GetFPURegisterValueLong(int regnum) {
- if (regnum == kNumFPURegisters) {
- return sim_->get_pc();
- } else {
- return sim_->get_fpu_register_long(regnum);
- }
-}
-
-
-float MipsDebugger::GetFPURegisterValueFloat(int regnum) {
- if (regnum == kNumFPURegisters) {
- return sim_->get_pc();
- } else {
- return sim_->get_fpu_register_float(regnum);
- }
-}
-
-
-double MipsDebugger::GetFPURegisterValueDouble(int regnum) {
- if (regnum == kNumFPURegisters) {
- return sim_->get_pc();
- } else {
- return sim_->get_fpu_register_double(regnum);
- }
-}
-
-
-bool MipsDebugger::GetValue(const char* desc, int32_t* value) {
- int regnum = Registers::Number(desc);
- int fpuregnum = FPURegisters::Number(desc);
-
- if (regnum != kInvalidRegister) {
- *value = GetRegisterValue(regnum);
- return true;
- } else if (fpuregnum != kInvalidFPURegister) {
- *value = GetFPURegisterValueInt(fpuregnum);
- return true;
- } else if (strncmp(desc, "0x", 2) == 0) {
- return SScanF(desc, "%x", reinterpret_cast<uint32_t*>(value)) == 1;
- } else {
- return SScanF(desc, "%i", value) == 1;
- }
- return false;
-}
-
-
-bool MipsDebugger::SetBreakpoint(Instruction* breakpc) {
- // Check if a breakpoint can be set. If not return without any side-effects.
- if (sim_->break_pc_ != NULL) {
- return false;
- }
-
- // Set the breakpoint.
- sim_->break_pc_ = breakpc;
- sim_->break_instr_ = breakpc->InstructionBits();
- // Not setting the breakpoint instruction in the code itself. It will be set
- // when the debugger shell continues.
- return true;
-}
-
-
-bool MipsDebugger::DeleteBreakpoint(Instruction* breakpc) {
- if (sim_->break_pc_ != NULL) {
- sim_->break_pc_->SetInstructionBits(sim_->break_instr_);
- }
-
- sim_->break_pc_ = NULL;
- sim_->break_instr_ = 0;
- return true;
-}
-
-
-void MipsDebugger::UndoBreakpoints() {
- if (sim_->break_pc_ != NULL) {
- sim_->break_pc_->SetInstructionBits(sim_->break_instr_);
- }
-}
-
-
-void MipsDebugger::RedoBreakpoints() {
- if (sim_->break_pc_ != NULL) {
- sim_->break_pc_->SetInstructionBits(kBreakpointInstr);
- }
-}
-
-
-void MipsDebugger::PrintAllRegs() {
-#define REG_INFO(n) Registers::Name(n), GetRegisterValue(n), GetRegisterValue(n)
-
- PrintF("\n");
- // at, v0, a0
- PrintF("%3s: 0x%08x %10d\t%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n",
- REG_INFO(1), REG_INFO(2), REG_INFO(4));
- // v1, a1
- PrintF("%26s\t%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n",
- "", REG_INFO(3), REG_INFO(5));
- // a2
- PrintF("%26s\t%26s\t%3s: 0x%08x %10d\n", "", "", REG_INFO(6));
- // a3
- PrintF("%26s\t%26s\t%3s: 0x%08x %10d\n", "", "", REG_INFO(7));
- PrintF("\n");
- // t0-t7, s0-s7
- for (int i = 0; i < 8; i++) {
- PrintF("%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n",
- REG_INFO(8+i), REG_INFO(16+i));
- }
- PrintF("\n");
- // t8, k0, LO
- PrintF("%3s: 0x%08x %10d\t%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n",
- REG_INFO(24), REG_INFO(26), REG_INFO(32));
- // t9, k1, HI
- PrintF("%3s: 0x%08x %10d\t%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n",
- REG_INFO(25), REG_INFO(27), REG_INFO(33));
- // sp, fp, gp
- PrintF("%3s: 0x%08x %10d\t%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n",
- REG_INFO(29), REG_INFO(30), REG_INFO(28));
- // pc
- PrintF("%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n",
- REG_INFO(31), REG_INFO(34));
-
-#undef REG_INFO
-#undef FPU_REG_INFO
-}
-
-
-void MipsDebugger::PrintAllRegsIncludingFPU() {
-#define FPU_REG_INFO(n) FPURegisters::Name(n), FPURegisters::Name(n+1), \
- GetFPURegisterValueInt(n+1), \
- GetFPURegisterValueInt(n), \
- GetFPURegisterValueDouble(n)
-
- PrintAllRegs();
-
- PrintF("\n\n");
- // f0, f1, f2, ... f31
- PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(0) );
- PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(2) );
- PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(4) );
- PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(6) );
- PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(8) );
- PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(10));
- PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(12));
- PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(14));
- PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(16));
- PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(18));
- PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(20));
- PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(22));
- PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(24));
- PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(26));
- PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(28));
- PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(30));
-
-#undef REG_INFO
-#undef FPU_REG_INFO
-}
-
-
-void MipsDebugger::Debug() {
- intptr_t last_pc = -1;
- bool done = false;
-
-#define COMMAND_SIZE 63
-#define ARG_SIZE 255
-
-#define STR(a) #a
-#define XSTR(a) STR(a)
-
- char cmd[COMMAND_SIZE + 1];
- char arg1[ARG_SIZE + 1];
- char arg2[ARG_SIZE + 1];
- char* argv[3] = { cmd, arg1, arg2 };
-
- // make sure to have a proper terminating character if reaching the limit
- cmd[COMMAND_SIZE] = 0;
- arg1[ARG_SIZE] = 0;
- arg2[ARG_SIZE] = 0;
-
- // Undo all set breakpoints while running in the debugger shell. This will
- // make them invisible to all commands.
- UndoBreakpoints();
-
- while (!done && (sim_->get_pc() != Simulator::end_sim_pc)) {
- if (last_pc != sim_->get_pc()) {
- disasm::NameConverter converter;
- disasm::Disassembler dasm(converter);
- // use a reasonably large buffer
- v8::internal::EmbeddedVector<char, 256> buffer;
- dasm.InstructionDecode(buffer,
- reinterpret_cast<byte_*>(sim_->get_pc()));
- PrintF(" 0x%08x %s\n", sim_->get_pc(), buffer.start());
- last_pc = sim_->get_pc();
- }
- char* line = ReadLine("sim> ");
- if (line == NULL) {
- break;
- } else {
- // Use sscanf to parse the individual parts of the command line. At the
- // moment no command expects more than two parameters.
- int argc = SScanF(line,
- "%" XSTR(COMMAND_SIZE) "s "
- "%" XSTR(ARG_SIZE) "s "
- "%" XSTR(ARG_SIZE) "s",
- cmd, arg1, arg2);
- if ((strcmp(cmd, "si") == 0) || (strcmp(cmd, "stepi") == 0)) {
- Instruction* instr = reinterpret_cast<Instruction*>(sim_->get_pc());
- if (!(instr->IsTrap()) ||
- instr->InstructionBits() == rtCallRedirInstr) {
- sim_->InstructionDecode(
- reinterpret_cast<Instruction*>(sim_->get_pc()));
- } else {
- // Allow si to jump over generated breakpoints.
- PrintF("/!\\ Jumping over generated breakpoint.\n");
- sim_->set_pc(sim_->get_pc() + Instruction::kInstrSize);
- }
- } else if ((strcmp(cmd, "c") == 0) || (strcmp(cmd, "cont") == 0)) {
- // Execute the one instruction we broke at with breakpoints disabled.
- sim_->InstructionDecode(reinterpret_cast<Instruction*>(sim_->get_pc()));
- // Leave the debugger shell.
- done = true;
- } else if ((strcmp(cmd, "p") == 0) || (strcmp(cmd, "print") == 0)) {
- if (argc == 2) {
- int32_t value;
- float fvalue;
- if (strcmp(arg1, "all") == 0) {
- PrintAllRegs();
- } else if (strcmp(arg1, "allf") == 0) {
- PrintAllRegsIncludingFPU();
- } else {
- int regnum = Registers::Number(arg1);
- int fpuregnum = FPURegisters::Number(arg1);
-
- if (regnum != kInvalidRegister) {
- value = GetRegisterValue(regnum);
- PrintF("%s: 0x%08x %d \n", arg1, value, value);
- } else if (fpuregnum != kInvalidFPURegister) {
- if (fpuregnum % 2 == 1) {
- value = GetFPURegisterValueInt(fpuregnum);
- fvalue = GetFPURegisterValueFloat(fpuregnum);
- PrintF("%s: 0x%08x %11.4e\n", arg1, value, fvalue);
- } else {
- double dfvalue;
- int32_t lvalue1 = GetFPURegisterValueInt(fpuregnum);
- int32_t lvalue2 = GetFPURegisterValueInt(fpuregnum + 1);
- dfvalue = GetFPURegisterValueDouble(fpuregnum);
- PrintF("%3s,%3s: 0x%08x%08x %16.4e\n",
- FPURegisters::Name(fpuregnum+1),
- FPURegisters::Name(fpuregnum),
- lvalue1,
- lvalue2,
- dfvalue);
- }
- } else {
- PrintF("%s unrecognized\n", arg1);
- }
- }
- } else {
- if (argc == 3) {
- if (strcmp(arg2, "single") == 0) {
- int32_t value;
- float fvalue;
- int fpuregnum = FPURegisters::Number(arg1);
-
- if (fpuregnum != kInvalidFPURegister) {
- value = GetFPURegisterValueInt(fpuregnum);
- fvalue = GetFPURegisterValueFloat(fpuregnum);
- PrintF("%s: 0x%08x %11.4e\n", arg1, value, fvalue);
- } else {
- PrintF("%s unrecognized\n", arg1);
- }
- } else {
- PrintF("print <fpu register> single\n");
- }
- } else {
- PrintF("print <register> or print <fpu register> single\n");
- }
- }
- } else if ((strcmp(cmd, "po") == 0)
- || (strcmp(cmd, "printobject") == 0)) {
- if (argc == 2) {
- int32_t value;
- if (GetValue(arg1, &value)) {
- Object* obj = reinterpret_cast<Object*>(value);
- PrintF("%s: \n", arg1);
-#ifdef DEBUG
- obj->PrintLn();
-#else
- obj->ShortPrint();
- PrintF("\n");
-#endif
- } else {
- PrintF("%s unrecognized\n", arg1);
- }
- } else {
- PrintF("printobject <value>\n");
- }
- } else if (strcmp(cmd, "stack") == 0 || strcmp(cmd, "mem") == 0) {
- int32_t* cur = NULL;
- int32_t* end = NULL;
- int next_arg = 1;
-
- if (strcmp(cmd, "stack") == 0) {
- cur = reinterpret_cast<int32_t*>(sim_->get_register(Simulator::sp));
- } else { // "mem"
- int32_t value;
- if (!GetValue(arg1, &value)) {
- PrintF("%s unrecognized\n", arg1);
- continue;
- }
- cur = reinterpret_cast<int32_t*>(value);
- next_arg++;
- }
-
- int32_t words;
- if (argc == next_arg) {
- words = 10;
- } else if (argc == next_arg + 1) {
- if (!GetValue(argv[next_arg], &words)) {
- words = 10;
- }
- }
- end = cur + words;
-
- while (cur < end) {
- PrintF(" 0x%08x: 0x%08x %10d\n",
- reinterpret_cast<intptr_t>(cur), *cur, *cur);
- cur++;
- }
-
- } else if ((strcmp(cmd, "disasm") == 0) || (strcmp(cmd, "dpc") == 0)) {
- disasm::NameConverter converter;
- disasm::Disassembler dasm(converter);
- // use a reasonably large buffer
- v8::internal::EmbeddedVector<char, 256> buffer;
-
- byte_* cur = NULL;
- byte_* end = NULL;
-
- if (argc == 1) {
- cur = reinterpret_cast<byte_*>(sim_->get_pc());
- end = cur + (10 * Instruction::kInstrSize);
- } else if (argc == 2) {
- int32_t value;
- if (GetValue(arg1, &value)) {
- cur = reinterpret_cast<byte_*>(value);
- // no length parameter passed, assume 10 instructions
- end = cur + (10 * Instruction::kInstrSize);
- }
- } else {
- int32_t value1;
- int32_t value2;
- if (GetValue(arg1, &value1) && GetValue(arg2, &value2)) {
- cur = reinterpret_cast<byte_*>(value1);
- end = cur + (value2 * Instruction::kInstrSize);
- }
- }
-
- while (cur < end) {
- dasm.InstructionDecode(buffer, cur);
- PrintF(" 0x%08x %s\n",
- reinterpret_cast<intptr_t>(cur), buffer.start());
- cur += Instruction::kInstrSize;
- }
- } else if (strcmp(cmd, "gdb") == 0) {
- PrintF("relinquishing control to gdb\n");
- v8::internal::OS::DebugBreak();
- PrintF("regaining control from gdb\n");
- } else if (strcmp(cmd, "break") == 0) {
- if (argc == 2) {
- int32_t value;
- if (GetValue(arg1, &value)) {
- if (!SetBreakpoint(reinterpret_cast<Instruction*>(value))) {
- PrintF("setting breakpoint failed\n");
- }
- } else {
- PrintF("%s unrecognized\n", arg1);
- }
- } else {
- PrintF("break <address>\n");
- }
- } else if (strcmp(cmd, "del") == 0) {
- if (!DeleteBreakpoint(NULL)) {
- PrintF("deleting breakpoint failed\n");
- }
- } else if (strcmp(cmd, "flags") == 0) {
- PrintF("No flags on MIPS !\n");
- } else if (strcmp(cmd, "unstop") == 0) {
- PrintF("Unstop command not implemented on MIPS.");
- } else if ((strcmp(cmd, "stat") == 0) || (strcmp(cmd, "st") == 0)) {
- // Print registers and disassemble
- PrintAllRegs();
- PrintF("\n");
-
- disasm::NameConverter converter;
- disasm::Disassembler dasm(converter);
- // use a reasonably large buffer
- v8::internal::EmbeddedVector<char, 256> buffer;
-
- byte_* cur = NULL;
- byte_* end = NULL;
-
- if (argc == 1) {
- cur = reinterpret_cast<byte_*>(sim_->get_pc());
- end = cur + (10 * Instruction::kInstrSize);
- } else if (argc == 2) {
- int32_t value;
- if (GetValue(arg1, &value)) {
- cur = reinterpret_cast<byte_*>(value);
- // no length parameter passed, assume 10 instructions
- end = cur + (10 * Instruction::kInstrSize);
- }
- } else {
- int32_t value1;
- int32_t value2;
- if (GetValue(arg1, &value1) && GetValue(arg2, &value2)) {
- cur = reinterpret_cast<byte_*>(value1);
- end = cur + (value2 * Instruction::kInstrSize);
- }
- }
-
- while (cur < end) {
- dasm.InstructionDecode(buffer, cur);
- PrintF(" 0x%08x %s\n",
- reinterpret_cast<intptr_t>(cur), buffer.start());
- cur += Instruction::kInstrSize;
- }
- } else if ((strcmp(cmd, "h") == 0) || (strcmp(cmd, "help") == 0)) {
- PrintF("cont\n");
- PrintF(" continue execution (alias 'c')\n");
- PrintF("stepi\n");
- PrintF(" step one instruction (alias 'si')\n");
- PrintF("print <register>\n");
- PrintF(" print register content (alias 'p')\n");
- PrintF(" use register name 'all' to print all registers\n");
- PrintF("printobject <register>\n");
- PrintF(" print an object from a register (alias 'po')\n");
- PrintF("stack [<words>]\n");
- PrintF(" dump stack content, default dump 10 words)\n");
- PrintF("mem <address> [<words>]\n");
- PrintF(" dump memory content, default dump 10 words)\n");
- PrintF("flags\n");
- PrintF(" print flags\n");
- PrintF("disasm [<instructions>]\n");
- PrintF("disasm [[<address>] <instructions>]\n");
- PrintF(" disassemble code, default is 10 instructions from pc\n");
- PrintF("gdb\n");
- PrintF(" enter gdb\n");
- PrintF("break <address>\n");
- PrintF(" set a break point on the address\n");
- PrintF("del\n");
- PrintF(" delete the breakpoint\n");
- PrintF("unstop\n");
- PrintF(" ignore the stop instruction at the current location");
- PrintF(" from now on\n");
- } else {
- PrintF("Unknown command: %s\n", cmd);
- }
- }
- DeleteArray(line);
- }
-
- // Add all the breakpoints back to stop execution and enter the debugger
- // shell when hit.
- RedoBreakpoints();
-
-#undef COMMAND_SIZE
-#undef ARG_SIZE
-
-#undef STR
-#undef XSTR
-}
-
-
-static bool ICacheMatch(void* one, void* two) {
- ASSERT((reinterpret_cast<intptr_t>(one) & CachePage::kPageMask) == 0);
- ASSERT((reinterpret_cast<intptr_t>(two) & CachePage::kPageMask) == 0);
- return one == two;
-}
-
-
-static uint32_t ICacheHash(void* key) {
- return static_cast<uint32_t>(reinterpret_cast<uintptr_t>(key)) >> 2;
-}
-
-
-static bool AllOnOnePage(uintptr_t start, int size) {
- intptr_t start_page = (start & ~CachePage::kPageMask);
- intptr_t end_page = ((start + size) & ~CachePage::kPageMask);
- return start_page == end_page;
-}
-
-
-void Simulator::FlushICache(v8::internal::HashMap* i_cache,
- void* start_addr,
- size_t size) {
- intptr_t start = reinterpret_cast<intptr_t>(start_addr);
- int intra_line = (start & CachePage::kLineMask);
- start -= intra_line;
- size += intra_line;
- size = ((size - 1) | CachePage::kLineMask) + 1;
- int offset = (start & CachePage::kPageMask);
- while (!AllOnOnePage(start, size - 1)) {
- int bytes_to_flush = CachePage::kPageSize - offset;
- FlushOnePage(i_cache, start, bytes_to_flush);
- start += bytes_to_flush;
- size -= bytes_to_flush;
- ASSERT_EQ(0, start & CachePage::kPageMask);
- offset = 0;
- }
- if (size != 0) {
- FlushOnePage(i_cache, start, size);
- }
-}
-
-
-CachePage* Simulator::GetCachePage(v8::internal::HashMap* i_cache, void* page) {
- v8::internal::HashMap::Entry* entry = i_cache->Lookup(page,
- ICacheHash(page),
- true);
- if (entry->value == NULL) {
- CachePage* new_page = new CachePage();
- entry->value = new_page;
- }
- return reinterpret_cast<CachePage*>(entry->value);
-}
-
-
-// Flush from start up to and not including start + size.
-void Simulator::FlushOnePage(v8::internal::HashMap* i_cache,
- intptr_t start,
- int size) {
- ASSERT(size <= CachePage::kPageSize);
- ASSERT(AllOnOnePage(start, size - 1));
- ASSERT((start & CachePage::kLineMask) == 0);
- ASSERT((size & CachePage::kLineMask) == 0);
- void* page = reinterpret_cast<void*>(start & (~CachePage::kPageMask));
- int offset = (start & CachePage::kPageMask);
- CachePage* cache_page = GetCachePage(i_cache, page);
- char* valid_bytemap = cache_page->ValidityByte(offset);
- memset(valid_bytemap, CachePage::LINE_INVALID, size >> CachePage::kLineShift);
-}
-
-
-void Simulator::CheckICache(v8::internal::HashMap* i_cache,
- Instruction* instr) {
- intptr_t address = reinterpret_cast<intptr_t>(instr);
- void* page = reinterpret_cast<void*>(address & (~CachePage::kPageMask));
- void* line = reinterpret_cast<void*>(address & (~CachePage::kLineMask));
- int offset = (address & CachePage::kPageMask);
- CachePage* cache_page = GetCachePage(i_cache, page);
- char* cache_valid_byte = cache_page->ValidityByte(offset);
- bool cache_hit = (*cache_valid_byte == CachePage::LINE_VALID);
- char* cached_line = cache_page->CachedData(offset & ~CachePage::kLineMask);
- if (cache_hit) {
- // Check that the data in memory matches the contents of the I-cache.
- CHECK(memcmp(reinterpret_cast<void*>(instr),
- cache_page->CachedData(offset),
- Instruction::kInstrSize) == 0);
- } else {
- // Cache miss. Load memory into the cache.
- memcpy(cached_line, line, CachePage::kLineLength);
- *cache_valid_byte = CachePage::LINE_VALID;
- }
-}
-
-
-void Simulator::Initialize() {
- if (Isolate::Current()->simulator_initialized()) return;
- Isolate::Current()->set_simulator_initialized(true);
- ::v8::internal::ExternalReference::set_redirector(&RedirectExternalReference);
-}
-
-
-Simulator::Simulator() : isolate_(Isolate::Current()) {
- i_cache_ = isolate_->simulator_i_cache();
- if (i_cache_ == NULL) {
- i_cache_ = new v8::internal::HashMap(&ICacheMatch);
- isolate_->set_simulator_i_cache(i_cache_);
- }
- Initialize();
- // Setup simulator support first. Some of this information is needed to
- // setup the architecture state.
- stack_size_ = 1 * 1024*1024; // allocate 1MB for stack
- stack_ = reinterpret_cast<char*>(malloc(stack_size_));
- pc_modified_ = false;
- icount_ = 0;
- break_count_ = 0;
- break_pc_ = NULL;
- break_instr_ = 0;
-
- // Setup architecture state.
- // All registers are initialized to zero to start with.
- for (int i = 0; i < kNumSimuRegisters; i++) {
- registers_[i] = 0;
- }
- for (int i = 0; i < kNumFPURegisters; i++) {
- FPUregisters_[i] = 0;
- }
- FCSR_ = 0;
-
- // The sp is initialized to point to the bottom (high address) of the
- // allocated stack area. To be safe in potential stack underflows we leave
- // some buffer below.
- registers_[sp] = reinterpret_cast<int32_t>(stack_) + stack_size_ - 64;
- // The ra and pc are initialized to a known bad value that will cause an
- // access violation if the simulator ever tries to execute it.
- registers_[pc] = bad_ra;
- registers_[ra] = bad_ra;
- InitializeCoverage();
- for (int i = 0; i < kNumExceptions; i++) {
- exceptions[i] = 0;
- }
-}
-
-
-// When the generated code calls an external reference we need to catch that in
-// the simulator. The external reference will be a function compiled for the
-// host architecture. We need to call that function instead of trying to
-// execute it with the simulator. We do that by redirecting the external
-// reference to a swi (software-interrupt) instruction that is handled by
-// the simulator. We write the original destination of the jump just at a known
-// offset from the swi instruction so the simulator knows what to call.
-class Redirection {
- public:
- Redirection(void* external_function, ExternalReference::Type type)
- : external_function_(external_function),
- swi_instruction_(rtCallRedirInstr),
- type_(type),
- next_(NULL) {
- Isolate* isolate = Isolate::Current();
- next_ = isolate->simulator_redirection();
- Simulator::current(isolate)->
- FlushICache(isolate->simulator_i_cache(),
- reinterpret_cast<void*>(&swi_instruction_),
- Instruction::kInstrSize);
- isolate->set_simulator_redirection(this);
- }
-
- void* address_of_swi_instruction() {
- return reinterpret_cast<void*>(&swi_instruction_);
- }
-
- void* external_function() { return external_function_; }
- ExternalReference::Type type() { return type_; }
-
- static Redirection* Get(void* external_function,
- ExternalReference::Type type) {
- Isolate* isolate = Isolate::Current();
- Redirection* current = isolate->simulator_redirection();
- for (; current != NULL; current = current->next_) {
- if (current->external_function_ == external_function) return current;
- }
- return new Redirection(external_function, type);
- }
-
- static Redirection* FromSwiInstruction(Instruction* swi_instruction) {
- char* addr_of_swi = reinterpret_cast<char*>(swi_instruction);
- char* addr_of_redirection =
- addr_of_swi - OFFSET_OF(Redirection, swi_instruction_);
- return reinterpret_cast<Redirection*>(addr_of_redirection);
- }
-
- private:
- void* external_function_;
- uint32_t swi_instruction_;
- ExternalReference::Type type_;
- Redirection* next_;
-};
-
-
-void* Simulator::RedirectExternalReference(void* external_function,
- ExternalReference::Type type) {
- Redirection* redirection = Redirection::Get(external_function, type);
- return redirection->address_of_swi_instruction();
-}
-
-
-// Get the active Simulator for the current thread.
-Simulator* Simulator::current(Isolate* isolate) {
- v8::internal::Isolate::PerIsolateThreadData* isolate_data =
- Isolate::CurrentPerIsolateThreadData();
- if (isolate_data == NULL) {
- Isolate::EnterDefaultIsolate();
- isolate_data = Isolate::CurrentPerIsolateThreadData();
- }
- ASSERT(isolate_data != NULL);
-
- Simulator* sim = isolate_data->simulator();
- if (sim == NULL) {
- // TODO(146): delete the simulator object when a thread/isolate goes away.
- sim = new Simulator();
- isolate_data->set_simulator(sim);
- }
- return sim;
-}
-
-
-// Sets the register in the architecture state. It will also deal with updating
-// Simulator internal state for special registers such as PC.
-void Simulator::set_register(int reg, int32_t value) {
- ASSERT((reg >= 0) && (reg < kNumSimuRegisters));
- if (reg == pc) {
- pc_modified_ = true;
- }
-
- // zero register always hold 0.
- registers_[reg] = (reg == 0) ? 0 : value;
-}
-
-
-void Simulator::set_fpu_register(int fpureg, int32_t value) {
- ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters));
- FPUregisters_[fpureg] = value;
-}
-
-
-void Simulator::set_fpu_register_float(int fpureg, float value) {
- ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters));
- *BitCast<float*>(&FPUregisters_[fpureg]) = value;
-}
-
-
-void Simulator::set_fpu_register_double(int fpureg, double value) {
- ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters) && ((fpureg % 2) == 0));
- *BitCast<double*>(&FPUregisters_[fpureg]) = value;
-}
-
-
-// Get the register from the architecture state. This function does handle
-// the special case of accessing the PC register.
-int32_t Simulator::get_register(int reg) const {
- ASSERT((reg >= 0) && (reg < kNumSimuRegisters));
- if (reg == 0)
- return 0;
- else
- return registers_[reg] + ((reg == pc) ? Instruction::kPCReadOffset : 0);
-}
-
-
-int32_t Simulator::get_fpu_register(int fpureg) const {
- ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters));
- return FPUregisters_[fpureg];
-}
-
-
-int64_t Simulator::get_fpu_register_long(int fpureg) const {
- ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters) && ((fpureg % 2) == 0));
- return *BitCast<int64_t*>(
- const_cast<int32_t*>(&FPUregisters_[fpureg]));
-}
-
-
-float Simulator::get_fpu_register_float(int fpureg) const {
- ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters));
- return *BitCast<float*>(
- const_cast<int32_t*>(&FPUregisters_[fpureg]));
-}
-
-
-double Simulator::get_fpu_register_double(int fpureg) const {
- ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters) && ((fpureg % 2) == 0));
- return *BitCast<double*>(const_cast<int32_t*>(&FPUregisters_[fpureg]));
-}
-
-
-// Helper functions for setting and testing the FCSR register's bits.
-void Simulator::set_fcsr_bit(uint32_t cc, bool value) {
- if (value) {
- FCSR_ |= (1 << cc);
- } else {
- FCSR_ &= ~(1 << cc);
- }
-}
-
-
-bool Simulator::test_fcsr_bit(uint32_t cc) {
- return FCSR_ & (1 << cc);
-}
-
-
-// Sets the rounding error codes in FCSR based on the result of the rounding.
-// Returns true if the operation was invalid.
-bool Simulator::set_fcsr_round_error(double original, double rounded) {
- if (!isfinite(original) ||
- rounded > LONG_MAX ||
- rounded < LONG_MIN) {
- set_fcsr_bit(6, true); // Invalid operation.
- return true;
- } else if (original != static_cast<double>(rounded)) {
- set_fcsr_bit(2, true); // Inexact.
- }
- return false;
-}
-
-
-// Raw access to the PC register.
-void Simulator::set_pc(int32_t value) {
- pc_modified_ = true;
- registers_[pc] = value;
-}
-
-
-bool Simulator::has_bad_pc() const {
- return ((registers_[pc] == bad_ra) || (registers_[pc] == end_sim_pc));
-}
-
-
-// Raw access to the PC register without the special adjustment when reading.
-int32_t Simulator::get_pc() const {
- return registers_[pc];
-}
-
-
-// The MIPS cannot do unaligned reads and writes. On some MIPS platforms an
-// interrupt is caused. On others it does a funky rotation thing. For now we
-// simply disallow unaligned reads, but at some point we may want to move to
-// emulating the rotate behaviour. Note that simulator runs have the runtime
-// system running directly on the host system and only generated code is
-// executed in the simulator. Since the host is typically IA32 we will not
-// get the correct MIPS-like behaviour on unaligned accesses.
-
-int Simulator::ReadW(int32_t addr, Instruction* instr) {
- if (addr >=0 && addr < 0x400) {
- // this has to be a NULL-dereference
- MipsDebugger dbg(this);
- dbg.Debug();
- }
- if ((addr & kPointerAlignmentMask) == 0) {
- intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
- return *ptr;
- }
- PrintF("Unaligned read at 0x%08x, pc=%p\n", addr,
- reinterpret_cast<void*>(instr));
- MipsDebugger dbg(this);
- dbg.Debug();
- return 0;
-}
-
-
-void Simulator::WriteW(int32_t addr, int value, Instruction* instr) {
- if (addr >= 0 && addr < 0x400) {
- // this has to be a NULL-dereference
- MipsDebugger dbg(this);
- dbg.Debug();
- }
- if ((addr & kPointerAlignmentMask) == 0) {
- intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
- *ptr = value;
- return;
- }
- PrintF("Unaligned write at 0x%08x, pc=%p\n", addr,
- reinterpret_cast<void*>(instr));
- MipsDebugger dbg(this);
- dbg.Debug();
-}
-
-
-double Simulator::ReadD(int32_t addr, Instruction* instr) {
- if ((addr & kDoubleAlignmentMask) == 0) {
- double* ptr = reinterpret_cast<double*>(addr);
- return *ptr;
- }
- PrintF("Unaligned (double) read at 0x%08x, pc=%p\n", addr,
- reinterpret_cast<void*>(instr));
- OS::Abort();
- return 0;
-}
-
-
-void Simulator::WriteD(int32_t addr, double value, Instruction* instr) {
- if ((addr & kDoubleAlignmentMask) == 0) {
- double* ptr = reinterpret_cast<double*>(addr);
- *ptr = value;
- return;
- }
- PrintF("Unaligned (double) write at 0x%08x, pc=%p\n", addr,
- reinterpret_cast<void*>(instr));
- OS::Abort();
-}
-
-
-uint16_t Simulator::ReadHU(int32_t addr, Instruction* instr) {
- if ((addr & 1) == 0) {
- uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
- return *ptr;
- }
- PrintF("Unaligned unsigned halfword read at 0x%08x, pc=%p\n", addr,
- reinterpret_cast<void*>(instr));
- OS::Abort();
- return 0;
-}
-
-
-int16_t Simulator::ReadH(int32_t addr, Instruction* instr) {
- if ((addr & 1) == 0) {
- int16_t* ptr = reinterpret_cast<int16_t*>(addr);
- return *ptr;
- }
- PrintF("Unaligned signed halfword read at 0x%08x, pc=%p\n", addr,
- reinterpret_cast<void*>(instr));
- OS::Abort();
- return 0;
-}
-
-
-void Simulator::WriteH(int32_t addr, uint16_t value, Instruction* instr) {
- if ((addr & 1) == 0) {
- uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
- *ptr = value;
- return;
- }
- PrintF("Unaligned unsigned halfword write at 0x%08x, pc=%p\n", addr,
- reinterpret_cast<void*>(instr));
- OS::Abort();
-}
-
-
-void Simulator::WriteH(int32_t addr, int16_t value, Instruction* instr) {
- if ((addr & 1) == 0) {
- int16_t* ptr = reinterpret_cast<int16_t*>(addr);
- *ptr = value;
- return;
- }
- PrintF("Unaligned halfword write at 0x%08x, pc=%p\n", addr,
- reinterpret_cast<void*>(instr));
- OS::Abort();
-}
-
-
-uint32_t Simulator::ReadBU(int32_t addr) {
- uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
- return *ptr & 0xff;
-}
-
-
-int32_t Simulator::ReadB(int32_t addr) {
- int8_t* ptr = reinterpret_cast<int8_t*>(addr);
- return *ptr;
-}
-
-
-void Simulator::WriteB(int32_t addr, uint8_t value) {
- uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
- *ptr = value;
-}
-
-
-void Simulator::WriteB(int32_t addr, int8_t value) {
- int8_t* ptr = reinterpret_cast<int8_t*>(addr);
- *ptr = value;
-}
-
-
-// Returns the limit of the stack area to enable checking for stack overflows.
-uintptr_t Simulator::StackLimit() const {
- // Leave a safety margin of 256 bytes to prevent overrunning the stack when
- // pushing values.
- return reinterpret_cast<uintptr_t>(stack_) + 256;
-}
-
-
-// Unsupported instructions use Format to print an error and stop execution.
-void Simulator::Format(Instruction* instr, const char* format) {
- PrintF("Simulator found unsupported instruction:\n 0x%08x: %s\n",
- reinterpret_cast<intptr_t>(instr), format);
- UNIMPLEMENTED_MIPS();
-}
-
-
-// Calls into the V8 runtime are based on this very simple interface.
-// Note: To be able to return two values from some calls the code in runtime.cc
-// uses the ObjectPair which is essentially two 32-bit values stuffed into a
-// 64-bit value. With the code below we assume that all runtime calls return
-// 64 bits of result. If they don't, the v1 result register contains a bogus
-// value, which is fine because it is caller-saved.
-typedef int64_t (*SimulatorRuntimeCall)(int32_t arg0,
- int32_t arg1,
- int32_t arg2,
- int32_t arg3,
- int32_t arg4,
- int32_t arg5);
-typedef double (*SimulatorRuntimeFPCall)(int32_t arg0,
- int32_t arg1,
- int32_t arg2,
- int32_t arg3);
-
-// Software interrupt instructions are used by the simulator to call into the
-// C-based V8 runtime. They are also used for debugging with simulator.
-void Simulator::SoftwareInterrupt(Instruction* instr) {
- // There are several instructions that could get us here,
- // the break_ instruction, or several variants of traps. All
- // Are "SPECIAL" class opcode, and are distinuished by function.
- int32_t func = instr->FunctionFieldRaw();
- int32_t code = (func == BREAK) ? instr->Bits(25, 6) : -1;
-
- // We first check if we met a call_rt_redirected.
- if (instr->InstructionBits() == rtCallRedirInstr) {
- // Check if stack is aligned. Error if not aligned is reported below to
- // include information on the function called.
- bool stack_aligned =
- (get_register(sp)
- & (::v8::internal::FLAG_sim_stack_alignment - 1)) == 0;
- Redirection* redirection = Redirection::FromSwiInstruction(instr);
- int32_t arg0 = get_register(a0);
- int32_t arg1 = get_register(a1);
- int32_t arg2 = get_register(a2);
- int32_t arg3 = get_register(a3);
- int32_t arg4 = 0;
- int32_t arg5 = 0;
-
- // Need to check if sp is valid before assigning arg4, arg5.
- // This is a fix for cctest test-api/CatchStackOverflow which causes
- // the stack to overflow. For some reason arm doesn't need this
- // stack check here.
- int32_t* stack_pointer = reinterpret_cast<int32_t*>(get_register(sp));
- int32_t* stack = reinterpret_cast<int32_t*>(stack_);
- if (stack_pointer >= stack && stack_pointer < stack + stack_size_) {
- arg4 = stack_pointer[0];
- arg5 = stack_pointer[1];
- }
- // This is dodgy but it works because the C entry stubs are never moved.
- // See comment in codegen-arm.cc and bug 1242173.
- int32_t saved_ra = get_register(ra);
-
- intptr_t external =
- reinterpret_cast<int32_t>(redirection->external_function());
-
- // Based on CpuFeatures::IsSupported(FPU), Mips will use either hardware
- // FPU, or gcc soft-float routines. Hardware FPU is simulated in this
- // simulator. Soft-float has additional abstraction of ExternalReference,
- // to support serialization. Finally, when simulated on x86 host, the
- // x86 softfloat routines are used, and this Redirection infrastructure
- // lets simulated-mips make calls into x86 C code.
- // When doing that, the 'double' return type must be handled differently
- // than the usual int64_t return. The data is returned in different
- // registers and cannot be cast from one type to the other. However, the
- // calling arguments are passed the same way in both cases.
- if (redirection->type() == ExternalReference::FP_RETURN_CALL) {
- SimulatorRuntimeFPCall target =
- reinterpret_cast<SimulatorRuntimeFPCall>(external);
- if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
- PrintF("Call to host function at %p with args %08x:%08x %08x:%08x",
- FUNCTION_ADDR(target), arg0, arg1, arg2, arg3);
- if (!stack_aligned) {
- PrintF(" with unaligned stack %08x\n", get_register(sp));
- }
- PrintF("\n");
- }
- double result = target(arg0, arg1, arg2, arg3);
- // fp result -> registers v0 and v1.
- int32_t gpreg_pair[2];
- memcpy(&gpreg_pair[0], &result, 2 * sizeof(int32_t));
- set_register(v0, gpreg_pair[0]);
- set_register(v1, gpreg_pair[1]);
- } else if (redirection->type() == ExternalReference::DIRECT_API_CALL) {
- PrintF("Mips does not yet support ExternalReference::DIRECT_API_CALL\n");
- ASSERT(redirection->type() != ExternalReference::DIRECT_API_CALL);
- } else if (redirection->type() == ExternalReference::DIRECT_GETTER_CALL) {
- PrintF("Mips does not support ExternalReference::DIRECT_GETTER_CALL\n");
- ASSERT(redirection->type() != ExternalReference::DIRECT_GETTER_CALL);
- } else {
- // Builtin call.
- ASSERT(redirection->type() == ExternalReference::BUILTIN_CALL);
- SimulatorRuntimeCall target =
- reinterpret_cast<SimulatorRuntimeCall>(external);
- if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
- PrintF(
- "Call to host function at %p: %08x, %08x, %08x, %08x, %08x, %08x",
- FUNCTION_ADDR(target),
- arg0,
- arg1,
- arg2,
- arg3,
- arg4,
- arg5);
- if (!stack_aligned) {
- PrintF(" with unaligned stack %08x\n", get_register(sp));
- }
- PrintF("\n");
- }
-
- int64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5);
- set_register(v0, static_cast<int32_t>(result));
- set_register(v1, static_cast<int32_t>(result >> 32));
- }
- if (::v8::internal::FLAG_trace_sim) {
- PrintF("Returned %08x : %08x\n", get_register(v1), get_register(v0));
- }
- set_register(ra, saved_ra);
- set_pc(get_register(ra));
-
- } else if (func == BREAK && code >= 0 && code < 16) {
- // First 16 break_ codes interpreted as debug markers.
- MipsDebugger dbg(this);
- ++break_count_;
- PrintF("\n---- break %d marker: %3d (instr count: %8d) ----------"
- "----------------------------------",
- code, break_count_, icount_);
- dbg.PrintAllRegs(); // Print registers and continue running.
- } else {
- // All remaining break_ codes, and all traps are handled here.
- MipsDebugger dbg(this);
- dbg.Debug();
- }
-}
-
-
-void Simulator::SignalExceptions() {
- for (int i = 1; i < kNumExceptions; i++) {
- if (exceptions[i] != 0) {
- V8_Fatal(__FILE__, __LINE__, "Error: Exception %i raised.", i);
- }
- }
-}
-
-
-// Handle execution based on instruction types.
-
-void Simulator::ConfigureTypeRegister(Instruction* instr,
- int32_t& alu_out,
- int64_t& i64hilo,
- uint64_t& u64hilo,
- int32_t& next_pc,
- bool& do_interrupt) {
- // Every local variable declared here needs to be const.
- // This is to make sure that changed values are sent back to
- // DecodeTypeRegister correctly.
-
- // Instruction fields.
- const Opcode op = instr->OpcodeFieldRaw();
- const int32_t rs_reg = instr->RsValue();
- const int32_t rs = get_register(rs_reg);
- const uint32_t rs_u = static_cast<uint32_t>(rs);
- const int32_t rt_reg = instr->RtValue();
- const int32_t rt = get_register(rt_reg);
- const uint32_t rt_u = static_cast<uint32_t>(rt);
- const int32_t rd_reg = instr->RdValue();
- const uint32_t sa = instr->SaValue();
-
- const int32_t fs_reg = instr->FsValue();
-
-
- // ---------- Configuration
- switch (op) {
- case COP1: // Coprocessor instructions
- switch (instr->RsFieldRaw()) {
- case BC1: // Handled in DecodeTypeImmed, should never come here.
- UNREACHABLE();
- break;
- case CFC1:
- // At the moment only FCSR is supported.
- ASSERT(fs_reg == kFCSRRegister);
- alu_out = FCSR_;
- break;
- case MFC1:
- alu_out = get_fpu_register(fs_reg);
- break;
- case MFHC1:
- UNIMPLEMENTED_MIPS();
- break;
- case CTC1:
- case MTC1:
- case MTHC1:
- // Do the store in the execution step.
- break;
- case S:
- case D:
- case W:
- case L:
- case PS:
- // Do everything in the execution step.
- break;
- default:
- UNIMPLEMENTED_MIPS();
- };
- break;
- case SPECIAL:
- switch (instr->FunctionFieldRaw()) {
- case JR:
- case JALR:
- next_pc = get_register(instr->RsValue());
- break;
- case SLL:
- alu_out = rt << sa;
- break;
- case SRL:
- if (rs_reg == 0) {
- // Regular logical right shift of a word by a fixed number of
- // bits instruction. RS field is always equal to 0.
- alu_out = rt_u >> sa;
- } else {
- // Logical right-rotate of a word by a fixed number of bits. This
- // is special case of SRL instruction, added in MIPS32 Release 2.
- // RS field is equal to 00001
- alu_out = (rt_u >> sa) | (rt_u << (32 - sa));
- }
- break;
- case SRA:
- alu_out = rt >> sa;
- break;
- case SLLV:
- alu_out = rt << rs;
- break;
- case SRLV:
- if (sa == 0) {
- // Regular logical right-shift of a word by a variable number of
- // bits instruction. SA field is always equal to 0.
- alu_out = rt_u >> rs;
- } else {
- // Logical right-rotate of a word by a variable number of bits.
- // This is special case od SRLV instruction, added in MIPS32
- // Release 2. SA field is equal to 00001
- alu_out = (rt_u >> rs_u) | (rt_u << (32 - rs_u));
- }
- break;
- case SRAV:
- alu_out = rt >> rs;
- break;
- case MFHI:
- alu_out = get_register(HI);
- break;
- case MFLO:
- alu_out = get_register(LO);
- break;
- case MULT:
- i64hilo = static_cast<int64_t>(rs) * static_cast<int64_t>(rt);
- break;
- case MULTU:
- u64hilo = static_cast<uint64_t>(rs_u) * static_cast<uint64_t>(rt_u);
- break;
- case DIV:
- case DIVU:
- exceptions[kDivideByZero] = rt == 0;
- break;
- case ADD:
- if (HaveSameSign(rs, rt)) {
- if (rs > 0) {
- exceptions[kIntegerOverflow] = rs > (Registers::kMaxValue - rt);
- } else if (rs < 0) {
- exceptions[kIntegerUnderflow] = rs < (Registers::kMinValue - rt);
- }
- }
- alu_out = rs + rt;
- break;
- case ADDU:
- alu_out = rs + rt;
- break;
- case SUB:
- if (!HaveSameSign(rs, rt)) {
- if (rs > 0) {
- exceptions[kIntegerOverflow] = rs > (Registers::kMaxValue + rt);
- } else if (rs < 0) {
- exceptions[kIntegerUnderflow] = rs < (Registers::kMinValue + rt);
- }
- }
- alu_out = rs - rt;
- break;
- case SUBU:
- alu_out = rs - rt;
- break;
- case AND:
- alu_out = rs & rt;
- break;
- case OR:
- alu_out = rs | rt;
- break;
- case XOR:
- alu_out = rs ^ rt;
- break;
- case NOR:
- alu_out = ~(rs | rt);
- break;
- case SLT:
- alu_out = rs < rt ? 1 : 0;
- break;
- case SLTU:
- alu_out = rs_u < rt_u ? 1 : 0;
- break;
- // Break and trap instructions
- case BREAK:
-
- do_interrupt = true;
- break;
- case TGE:
- do_interrupt = rs >= rt;
- break;
- case TGEU:
- do_interrupt = rs_u >= rt_u;
- break;
- case TLT:
- do_interrupt = rs < rt;
- break;
- case TLTU:
- do_interrupt = rs_u < rt_u;
- break;
- case TEQ:
- do_interrupt = rs == rt;
- break;
- case TNE:
- do_interrupt = rs != rt;
- break;
- case MOVN:
- case MOVZ:
- case MOVCI:
- // No action taken on decode.
- break;
- default:
- UNREACHABLE();
- };
- break;
- case SPECIAL2:
- switch (instr->FunctionFieldRaw()) {
- case MUL:
- alu_out = rs_u * rt_u; // Only the lower 32 bits are kept.
- break;
- case CLZ:
- alu_out = __builtin_clz(rs_u);
- break;
- default:
- UNREACHABLE();
- };
- break;
- case SPECIAL3:
- switch (instr->FunctionFieldRaw()) {
- case INS: { // Mips32r2 instruction.
- // Interpret Rd field as 5-bit msb of insert.
- uint16_t msb = rd_reg;
- // Interpret sa field as 5-bit lsb of insert.
- uint16_t lsb = sa;
- uint16_t size = msb - lsb + 1;
- uint32_t mask = (1 << size) - 1;
- alu_out = (rt_u & ~(mask << lsb)) | ((rs_u & mask) << lsb);
- break;
- }
- case EXT: { // Mips32r2 instruction.
- // Interpret Rd field as 5-bit msb of extract.
- uint16_t msb = rd_reg;
- // Interpret sa field as 5-bit lsb of extract.
- uint16_t lsb = sa;
- uint16_t size = msb + 1;
- uint32_t mask = (1 << size) - 1;
- alu_out = (rs_u & (mask << lsb)) >> lsb;
- break;
- }
- default:
- UNREACHABLE();
- };
- break;
- default:
- UNREACHABLE();
- };
-}
-
-
-void Simulator::DecodeTypeRegister(Instruction* instr) {
- // Instruction fields.
- const Opcode op = instr->OpcodeFieldRaw();
- const int32_t rs_reg = instr->RsValue();
- const int32_t rs = get_register(rs_reg);
- const uint32_t rs_u = static_cast<uint32_t>(rs);
- const int32_t rt_reg = instr->RtValue();
- const int32_t rt = get_register(rt_reg);
- const uint32_t rt_u = static_cast<uint32_t>(rt);
- const int32_t rd_reg = instr->RdValue();
-
- const int32_t fs_reg = instr->FsValue();
- const int32_t ft_reg = instr->FtValue();
- const int32_t fd_reg = instr->FdValue();
- int64_t i64hilo = 0;
- uint64_t u64hilo = 0;
-
- // ALU output
- // It should not be used as is. Instructions using it should always
- // initialize it first.
- int32_t alu_out = 0x12345678;
-
- // For break and trap instructions.
- bool do_interrupt = false;
-
- // For jr and jalr
- // Get current pc.
- int32_t current_pc = get_pc();
- // Next pc
- int32_t next_pc = 0;
-
- // Setup the variables if needed before executing the instruction.
- ConfigureTypeRegister(instr,
- alu_out,
- i64hilo,
- u64hilo,
- next_pc,
- do_interrupt);
-
- // ---------- Raise exceptions triggered.
- SignalExceptions();
-
- // ---------- Execution
- switch (op) {
- case COP1:
- switch (instr->RsFieldRaw()) {
- case BC1: // branch on coprocessor condition
- UNREACHABLE();
- break;
- case CFC1:
- set_register(rt_reg, alu_out);
- case MFC1:
- set_register(rt_reg, alu_out);
- break;
- case MFHC1:
- UNIMPLEMENTED_MIPS();
- break;
- case CTC1:
- // At the moment only FCSR is supported.
- ASSERT(fs_reg == kFCSRRegister);
- FCSR_ = registers_[rt_reg];
- break;
- case MTC1:
- FPUregisters_[fs_reg] = registers_[rt_reg];
- break;
- case MTHC1:
- UNIMPLEMENTED_MIPS();
- break;
- case S:
- float f;
- switch (instr->FunctionFieldRaw()) {
- case CVT_D_S:
- f = get_fpu_register_float(fs_reg);
- set_fpu_register_double(fd_reg, static_cast<double>(f));
- break;
- case CVT_W_S:
- case CVT_L_S:
- case TRUNC_W_S:
- case TRUNC_L_S:
- case ROUND_W_S:
- case ROUND_L_S:
- case FLOOR_W_S:
- case FLOOR_L_S:
- case CEIL_W_S:
- case CEIL_L_S:
- case CVT_PS_S:
- UNIMPLEMENTED_MIPS();
- break;
- default:
- UNREACHABLE();
- }
- break;
- case D:
- double ft, fs;
- uint32_t cc, fcsr_cc;
- int64_t i64;
- fs = get_fpu_register_double(fs_reg);
- ft = get_fpu_register_double(ft_reg);
- cc = instr->FCccValue();
- fcsr_cc = get_fcsr_condition_bit(cc);
- switch (instr->FunctionFieldRaw()) {
- case ADD_D:
- set_fpu_register_double(fd_reg, fs + ft);
- break;
- case SUB_D:
- set_fpu_register_double(fd_reg, fs - ft);
- break;
- case MUL_D:
- set_fpu_register_double(fd_reg, fs * ft);
- break;
- case DIV_D:
- set_fpu_register_double(fd_reg, fs / ft);
- break;
- case ABS_D:
- set_fpu_register_double(fd_reg, fs < 0 ? -fs : fs);
- break;
- case MOV_D:
- set_fpu_register_double(fd_reg, fs);
- break;
- case NEG_D:
- set_fpu_register_double(fd_reg, -fs);
- break;
- case SQRT_D:
- set_fpu_register_double(fd_reg, sqrt(fs));
- break;
- case C_UN_D:
- set_fcsr_bit(fcsr_cc, isnan(fs) || isnan(ft));
- break;
- case C_EQ_D:
- set_fcsr_bit(fcsr_cc, (fs == ft));
- break;
- case C_UEQ_D:
- set_fcsr_bit(fcsr_cc, (fs == ft) || (isnan(fs) || isnan(ft)));
- break;
- case C_OLT_D:
- set_fcsr_bit(fcsr_cc, (fs < ft));
- break;
- case C_ULT_D:
- set_fcsr_bit(fcsr_cc, (fs < ft) || (isnan(fs) || isnan(ft)));
- break;
- case C_OLE_D:
- set_fcsr_bit(fcsr_cc, (fs <= ft));
- break;
- case C_ULE_D:
- set_fcsr_bit(fcsr_cc, (fs <= ft) || (isnan(fs) || isnan(ft)));
- break;
- case CVT_W_D: // Convert double to word.
- // Rounding modes are not yet supported.
- ASSERT((FCSR_ & 3) == 0);
- // In rounding mode 0 it should behave like ROUND.
- case ROUND_W_D: // Round double to word.
- {
- double rounded = fs > 0 ? floor(fs + 0.5) : ceil(fs - 0.5);
- int32_t result = static_cast<int32_t>(rounded);
- set_fpu_register(fd_reg, result);
- if (set_fcsr_round_error(fs, rounded)) {
- set_fpu_register(fd_reg, kFPUInvalidResult);
- }
- }
- break;
- case TRUNC_W_D: // Truncate double to word (round towards 0).
- {
- int32_t result = static_cast<int32_t>(fs);
- set_fpu_register(fd_reg, result);
- if (set_fcsr_round_error(fs, static_cast<double>(result))) {
- set_fpu_register(fd_reg, kFPUInvalidResult);
- }
- }
- break;
- case FLOOR_W_D: // Round double to word towards negative infinity.
- {
- double rounded = floor(fs);
- int32_t result = static_cast<int32_t>(rounded);
- set_fpu_register(fd_reg, result);
- if (set_fcsr_round_error(fs, rounded)) {
- set_fpu_register(fd_reg, kFPUInvalidResult);
- }
- }
- break;
- case CEIL_W_D: // Round double to word towards positive infinity.
- {
- double rounded = ceil(fs);
- int32_t result = static_cast<int32_t>(rounded);
- set_fpu_register(fd_reg, result);
- if (set_fcsr_round_error(fs, rounded)) {
- set_fpu_register(fd_reg, kFPUInvalidResult);
- }
- }
- break;
- case CVT_S_D: // Convert double to float (single).
- set_fpu_register_float(fd_reg, static_cast<float>(fs));
- break;
- case CVT_L_D: // Mips32r2: Truncate double to 64-bit long-word.
- i64 = static_cast<int64_t>(fs);
- set_fpu_register(fd_reg, i64 & 0xffffffff);
- set_fpu_register(fd_reg + 1, i64 >> 32);
- break;
- case TRUNC_L_D: // Mips32r2 instruction.
- i64 = static_cast<int64_t>(fs);
- set_fpu_register(fd_reg, i64 & 0xffffffff);
- set_fpu_register(fd_reg + 1, i64 >> 32);
- break;
- case ROUND_L_D: { // Mips32r2 instruction.
- double rounded = fs > 0 ? floor(fs + 0.5) : ceil(fs - 0.5);
- i64 = static_cast<int64_t>(rounded);
- set_fpu_register(fd_reg, i64 & 0xffffffff);
- set_fpu_register(fd_reg + 1, i64 >> 32);
- break;
- }
- case FLOOR_L_D: // Mips32r2 instruction.
- i64 = static_cast<int64_t>(floor(fs));
- set_fpu_register(fd_reg, i64 & 0xffffffff);
- set_fpu_register(fd_reg + 1, i64 >> 32);
- break;
- case CEIL_L_D: // Mips32r2 instruction.
- i64 = static_cast<int64_t>(ceil(fs));
- set_fpu_register(fd_reg, i64 & 0xffffffff);
- set_fpu_register(fd_reg + 1, i64 >> 32);
- break;
- case C_F_D:
- UNIMPLEMENTED_MIPS();
- break;
- default:
- UNREACHABLE();
- }
- break;
- case W:
- switch (instr->FunctionFieldRaw()) {
- case CVT_S_W: // Convert word to float (single).
- alu_out = get_fpu_register(fs_reg);
- set_fpu_register_float(fd_reg, static_cast<float>(alu_out));
- break;
- case CVT_D_W: // Convert word to double.
- alu_out = get_fpu_register(fs_reg);
- set_fpu_register_double(fd_reg, static_cast<double>(alu_out));
- break;
- default:
- UNREACHABLE();
- };
- break;
- case L:
- switch (instr->FunctionFieldRaw()) {
- case CVT_D_L: // Mips32r2 instruction.
- // Watch the signs here, we want 2 32-bit vals
- // to make a sign-64.
- i64 = (uint32_t) get_fpu_register(fs_reg);
- i64 |= ((int64_t) get_fpu_register(fs_reg + 1) << 32);
- set_fpu_register_double(fd_reg, static_cast<double>(i64));
- break;
- case CVT_S_L:
- UNIMPLEMENTED_MIPS();
- break;
- default:
- UNREACHABLE();
- }
- break;
- case PS:
- break;
- default:
- UNREACHABLE();
- };
- break;
- case SPECIAL:
- switch (instr->FunctionFieldRaw()) {
- case JR: {
- Instruction* branch_delay_instr = reinterpret_cast<Instruction*>(
- current_pc+Instruction::kInstrSize);
- BranchDelayInstructionDecode(branch_delay_instr);
- set_pc(next_pc);
- pc_modified_ = true;
- break;
- }
- case JALR: {
- Instruction* branch_delay_instr = reinterpret_cast<Instruction*>(
- current_pc+Instruction::kInstrSize);
- BranchDelayInstructionDecode(branch_delay_instr);
- set_register(31, current_pc + 2* Instruction::kInstrSize);
- set_pc(next_pc);
- pc_modified_ = true;
- break;
- }
- // Instructions using HI and LO registers.
- case MULT:
- set_register(LO, static_cast<int32_t>(i64hilo & 0xffffffff));
- set_register(HI, static_cast<int32_t>(i64hilo >> 32));
- break;
- case MULTU:
- set_register(LO, static_cast<int32_t>(u64hilo & 0xffffffff));
- set_register(HI, static_cast<int32_t>(u64hilo >> 32));
- break;
- case DIV:
- // Divide by zero was checked in the configuration step.
- set_register(LO, rs / rt);
- set_register(HI, rs % rt);
- break;
- case DIVU:
- set_register(LO, rs_u / rt_u);
- set_register(HI, rs_u % rt_u);
- break;
- // Break and trap instructions.
- case BREAK:
- case TGE:
- case TGEU:
- case TLT:
- case TLTU:
- case TEQ:
- case TNE:
- if (do_interrupt) {
- SoftwareInterrupt(instr);
- }
- break;
- // Conditional moves.
- case MOVN:
- if (rt) set_register(rd_reg, rs);
- break;
- case MOVCI: {
- uint32_t cc = instr->FCccValue();
- uint32_t fcsr_cc = get_fcsr_condition_bit(cc);
- if (instr->Bit(16)) { // Read Tf bit
- if (test_fcsr_bit(fcsr_cc)) set_register(rd_reg, rs);
- } else {
- if (!test_fcsr_bit(fcsr_cc)) set_register(rd_reg, rs);
- }
- break;
- }
- case MOVZ:
- if (!rt) set_register(rd_reg, rs);
- break;
- default: // For other special opcodes we do the default operation.
- set_register(rd_reg, alu_out);
- };
- break;
- case SPECIAL2:
- switch (instr->FunctionFieldRaw()) {
- case MUL:
- set_register(rd_reg, alu_out);
- // HI and LO are UNPREDICTABLE after the operation.
- set_register(LO, Unpredictable);
- set_register(HI, Unpredictable);
- break;
- default: // For other special2 opcodes we do the default operation.
- set_register(rd_reg, alu_out);
- }
- break;
- case SPECIAL3:
- switch (instr->FunctionFieldRaw()) {
- case INS:
- // Ins instr leaves result in Rt, rather than Rd.
- set_register(rt_reg, alu_out);
- break;
- case EXT:
- // Ext instr leaves result in Rt, rather than Rd.
- set_register(rt_reg, alu_out);
- break;
- default:
- UNREACHABLE();
- };
- break;
- // Unimplemented opcodes raised an error in the configuration step before,
- // so we can use the default here to set the destination register in common
- // cases.
- default:
- set_register(rd_reg, alu_out);
- };
-}
-
-
-// Type 2: instructions using a 16 bytes immediate. (eg: addi, beq)
-void Simulator::DecodeTypeImmediate(Instruction* instr) {
- // Instruction fields.
- Opcode op = instr->OpcodeFieldRaw();
- int32_t rs = get_register(instr->RsValue());
- uint32_t rs_u = static_cast<uint32_t>(rs);
- int32_t rt_reg = instr->RtValue(); // destination register
- int32_t rt = get_register(rt_reg);
- int16_t imm16 = instr->Imm16Value();
-
- int32_t ft_reg = instr->FtValue(); // destination register
-
- // Zero extended immediate.
- uint32_t oe_imm16 = 0xffff & imm16;
- // Sign extended immediate.
- int32_t se_imm16 = imm16;
-
- // Get current pc.
- int32_t current_pc = get_pc();
- // Next pc.
- int32_t next_pc = bad_ra;
-
- // Used for conditional branch instructions.
- bool do_branch = false;
- bool execute_branch_delay_instruction = false;
-
- // Used for arithmetic instructions.
- int32_t alu_out = 0;
- // Floating point.
- double fp_out = 0.0;
- uint32_t cc, cc_value, fcsr_cc;
-
- // Used for memory instructions.
- int32_t addr = 0x0;
- // Value to be written in memory
- uint32_t mem_value = 0x0;
-
- // ---------- Configuration (and execution for REGIMM)
- switch (op) {
- // ------------- COP1. Coprocessor instructions.
- case COP1:
- switch (instr->RsFieldRaw()) {
- case BC1: // Branch on coprocessor condition.
- cc = instr->FBccValue();
- fcsr_cc = get_fcsr_condition_bit(cc);
- cc_value = test_fcsr_bit(fcsr_cc);
- do_branch = (instr->FBtrueValue()) ? cc_value : !cc_value;
- execute_branch_delay_instruction = true;
- // Set next_pc
- if (do_branch) {
- next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
- } else {
- next_pc = current_pc + kBranchReturnOffset;
- }
- break;
- default:
- UNREACHABLE();
- };
- break;
- // ------------- REGIMM class
- case REGIMM:
- switch (instr->RtFieldRaw()) {
- case BLTZ:
- do_branch = (rs < 0);
- break;
- case BLTZAL:
- do_branch = rs < 0;
- break;
- case BGEZ:
- do_branch = rs >= 0;
- break;
- case BGEZAL:
- do_branch = rs >= 0;
- break;
- default:
- UNREACHABLE();
- };
- switch (instr->RtFieldRaw()) {
- case BLTZ:
- case BLTZAL:
- case BGEZ:
- case BGEZAL:
- // Branch instructions common part.
- execute_branch_delay_instruction = true;
- // Set next_pc
- if (do_branch) {
- next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
- if (instr->IsLinkingInstruction()) {
- set_register(31, current_pc + kBranchReturnOffset);
- }
- } else {
- next_pc = current_pc + kBranchReturnOffset;
- }
- default:
- break;
- };
- break; // case REGIMM
- // ------------- Branch instructions
- // When comparing to zero, the encoding of rt field is always 0, so we don't
- // need to replace rt with zero.
- case BEQ:
- do_branch = (rs == rt);
- break;
- case BNE:
- do_branch = rs != rt;
- break;
- case BLEZ:
- do_branch = rs <= 0;
- break;
- case BGTZ:
- do_branch = rs > 0;
- break;
- // ------------- Arithmetic instructions
- case ADDI:
- if (HaveSameSign(rs, se_imm16)) {
- if (rs > 0) {
- exceptions[kIntegerOverflow] = rs > (Registers::kMaxValue - se_imm16);
- } else if (rs < 0) {
- exceptions[kIntegerUnderflow] =
- rs < (Registers::kMinValue - se_imm16);
- }
- }
- alu_out = rs + se_imm16;
- break;
- case ADDIU:
- alu_out = rs + se_imm16;
- break;
- case SLTI:
- alu_out = (rs < se_imm16) ? 1 : 0;
- break;
- case SLTIU:
- alu_out = (rs_u < static_cast<uint32_t>(se_imm16)) ? 1 : 0;
- break;
- case ANDI:
- alu_out = rs & oe_imm16;
- break;
- case ORI:
- alu_out = rs | oe_imm16;
- break;
- case XORI:
- alu_out = rs ^ oe_imm16;
- break;
- case LUI:
- alu_out = (oe_imm16 << 16);
- break;
- // ------------- Memory instructions
- case LB:
- addr = rs + se_imm16;
- alu_out = ReadB(addr);
- break;
- case LH:
- addr = rs + se_imm16;
- alu_out = ReadH(addr, instr);
- break;
- case LWL: {
- // al_offset is an offset of the effective address within an aligned word
- uint8_t al_offset = (rs + se_imm16) & kPointerAlignmentMask;
- uint8_t byte_shift = kPointerAlignmentMask - al_offset;
- uint32_t mask = (1 << byte_shift * 8) - 1;
- addr = rs + se_imm16 - al_offset;
- alu_out = ReadW(addr, instr);
- alu_out <<= byte_shift * 8;
- alu_out |= rt & mask;
- break;
- }
- case LW:
- addr = rs + se_imm16;
- alu_out = ReadW(addr, instr);
- break;
- case LBU:
- addr = rs + se_imm16;
- alu_out = ReadBU(addr);
- break;
- case LHU:
- addr = rs + se_imm16;
- alu_out = ReadHU(addr, instr);
- break;
- case LWR: {
- // al_offset is an offset of the effective address within an aligned word
- uint8_t al_offset = (rs + se_imm16) & kPointerAlignmentMask;
- uint8_t byte_shift = kPointerAlignmentMask - al_offset;
- uint32_t mask = al_offset ? (~0 << (byte_shift + 1) * 8) : 0;
- addr = rs + se_imm16 - al_offset;
- alu_out = ReadW(addr, instr);
- alu_out = static_cast<uint32_t> (alu_out) >> al_offset * 8;
- alu_out |= rt & mask;
- break;
- }
- case SB:
- addr = rs + se_imm16;
- break;
- case SH:
- addr = rs + se_imm16;
- break;
- case SWL: {
- uint8_t al_offset = (rs + se_imm16) & kPointerAlignmentMask;
- uint8_t byte_shift = kPointerAlignmentMask - al_offset;
- uint32_t mask = byte_shift ? (~0 << (al_offset + 1) * 8) : 0;
- addr = rs + se_imm16 - al_offset;
- mem_value = ReadW(addr, instr) & mask;
- mem_value |= static_cast<uint32_t>(rt) >> byte_shift * 8;
- break;
- }
- case SW:
- addr = rs + se_imm16;
- break;
- case SWR: {
- uint8_t al_offset = (rs + se_imm16) & kPointerAlignmentMask;
- uint32_t mask = (1 << al_offset * 8) - 1;
- addr = rs + se_imm16 - al_offset;
- mem_value = ReadW(addr, instr);
- mem_value = (rt << al_offset * 8) | (mem_value & mask);
- break;
- }
- case LWC1:
- addr = rs + se_imm16;
- alu_out = ReadW(addr, instr);
- break;
- case LDC1:
- addr = rs + se_imm16;
- fp_out = ReadD(addr, instr);
- break;
- case SWC1:
- case SDC1:
- addr = rs + se_imm16;
- break;
- default:
- UNREACHABLE();
- };
-
- // ---------- Raise exceptions triggered.
- SignalExceptions();
-
- // ---------- Execution
- switch (op) {
- // ------------- Branch instructions
- case BEQ:
- case BNE:
- case BLEZ:
- case BGTZ:
- // Branch instructions common part.
- execute_branch_delay_instruction = true;
- // Set next_pc
- if (do_branch) {
- next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
- if (instr->IsLinkingInstruction()) {
- set_register(31, current_pc + 2* Instruction::kInstrSize);
- }
- } else {
- next_pc = current_pc + 2 * Instruction::kInstrSize;
- }
- break;
- // ------------- Arithmetic instructions
- case ADDI:
- case ADDIU:
- case SLTI:
- case SLTIU:
- case ANDI:
- case ORI:
- case XORI:
- case LUI:
- set_register(rt_reg, alu_out);
- break;
- // ------------- Memory instructions
- case LB:
- case LH:
- case LWL:
- case LW:
- case LBU:
- case LHU:
- case LWR:
- set_register(rt_reg, alu_out);
- break;
- case SB:
- WriteB(addr, static_cast<int8_t>(rt));
- break;
- case SH:
- WriteH(addr, static_cast<uint16_t>(rt), instr);
- break;
- case SWL:
- WriteW(addr, mem_value, instr);
- break;
- case SW:
- WriteW(addr, rt, instr);
- break;
- case SWR:
- WriteW(addr, mem_value, instr);
- break;
- case LWC1:
- set_fpu_register(ft_reg, alu_out);
- break;
- case LDC1:
- set_fpu_register_double(ft_reg, fp_out);
- break;
- case SWC1:
- addr = rs + se_imm16;
- WriteW(addr, get_fpu_register(ft_reg), instr);
- break;
- case SDC1:
- addr = rs + se_imm16;
- WriteD(addr, get_fpu_register_double(ft_reg), instr);
- break;
- default:
- break;
- };
-
-
- if (execute_branch_delay_instruction) {
- // Execute branch delay slot
- // We don't check for end_sim_pc. First it should not be met as the current
- // pc is valid. Secondly a jump should always execute its branch delay slot.
- Instruction* branch_delay_instr =
- reinterpret_cast<Instruction*>(current_pc+Instruction::kInstrSize);
- BranchDelayInstructionDecode(branch_delay_instr);
- }
-
- // If needed update pc after the branch delay execution.
- if (next_pc != bad_ra) {
- set_pc(next_pc);
- }
-}
-
-
-// Type 3: instructions using a 26 bytes immediate. (eg: j, jal)
-void Simulator::DecodeTypeJump(Instruction* instr) {
- // Get current pc.
- int32_t current_pc = get_pc();
- // Get unchanged bits of pc.
- int32_t pc_high_bits = current_pc & 0xf0000000;
- // Next pc
- int32_t next_pc = pc_high_bits | (instr->Imm26Value() << 2);
-
- // Execute branch delay slot
- // We don't check for end_sim_pc. First it should not be met as the current pc
- // is valid. Secondly a jump should always execute its branch delay slot.
- Instruction* branch_delay_instr =
- reinterpret_cast<Instruction*>(current_pc+Instruction::kInstrSize);
- BranchDelayInstructionDecode(branch_delay_instr);
-
- // Update pc and ra if necessary.
- // Do this after the branch delay execution.
- if (instr->IsLinkingInstruction()) {
- set_register(31, current_pc + 2* Instruction::kInstrSize);
- }
- set_pc(next_pc);
- pc_modified_ = true;
-}
-
-
-// Executes the current instruction.
-void Simulator::InstructionDecode(Instruction* instr) {
- if (v8::internal::FLAG_check_icache) {
- CheckICache(isolate_->simulator_i_cache(), instr);
- }
- pc_modified_ = false;
- if (::v8::internal::FLAG_trace_sim) {
- disasm::NameConverter converter;
- disasm::Disassembler dasm(converter);
- // use a reasonably large buffer
- v8::internal::EmbeddedVector<char, 256> buffer;
- dasm.InstructionDecode(buffer, reinterpret_cast<byte_*>(instr));
- PrintF(" 0x%08x %s\n", reinterpret_cast<intptr_t>(instr),
- buffer.start());
- }
-
- switch (instr->InstructionType()) {
- case Instruction::kRegisterType:
- DecodeTypeRegister(instr);
- break;
- case Instruction::kImmediateType:
- DecodeTypeImmediate(instr);
- break;
- case Instruction::kJumpType:
- DecodeTypeJump(instr);
- break;
- default:
- UNSUPPORTED();
- }
- if (!pc_modified_) {
- set_register(pc, reinterpret_cast<int32_t>(instr) +
- Instruction::kInstrSize);
- }
-}
-
-
-
-void Simulator::Execute() {
- // Get the PC to simulate. Cannot use the accessor here as we need the
- // raw PC value and not the one used as input to arithmetic instructions.
- int program_counter = get_pc();
- if (::v8::internal::FLAG_stop_sim_at == 0) {
- // Fast version of the dispatch loop without checking whether the simulator
- // should be stopping at a particular executed instruction.
- while (program_counter != end_sim_pc) {
- Instruction* instr = reinterpret_cast<Instruction*>(program_counter);
- icount_++;
- InstructionDecode(instr);
- program_counter = get_pc();
- }
- } else {
- // FLAG_stop_sim_at is at the non-default value. Stop in the debugger when
- // we reach the particular instuction count.
- while (program_counter != end_sim_pc) {
- Instruction* instr = reinterpret_cast<Instruction*>(program_counter);
- icount_++;
- if (icount_ == ::v8::internal::FLAG_stop_sim_at) {
- MipsDebugger dbg(this);
- dbg.Debug();
- } else {
- InstructionDecode(instr);
- }
- program_counter = get_pc();
- }
- }
-}
-
-
-int32_t Simulator::Call(byte_* entry, int argument_count, ...) {
- va_list parameters;
- va_start(parameters, argument_count);
- // Setup arguments
-
- // First four arguments passed in registers.
- ASSERT(argument_count >= 4);
- set_register(a0, va_arg(parameters, int32_t));
- set_register(a1, va_arg(parameters, int32_t));
- set_register(a2, va_arg(parameters, int32_t));
- set_register(a3, va_arg(parameters, int32_t));
-
- // Remaining arguments passed on stack.
- int original_stack = get_register(sp);
- // Compute position of stack on entry to generated code.
- int entry_stack = (original_stack - (argument_count - 4) * sizeof(int32_t)
- - kCArgsSlotsSize);
- if (OS::ActivationFrameAlignment() != 0) {
- entry_stack &= -OS::ActivationFrameAlignment();
- }
- // Store remaining arguments on stack, from low to high memory.
- intptr_t* stack_argument = reinterpret_cast<intptr_t*>(entry_stack);
- for (int i = 4; i < argument_count; i++) {
- stack_argument[i - 4 + kArgsSlotsNum] = va_arg(parameters, int32_t);
- }
- va_end(parameters);
- set_register(sp, entry_stack);
-
- // Prepare to execute the code at entry
- set_register(pc, reinterpret_cast<int32_t>(entry));
- // Put down marker for end of simulation. The simulator will stop simulation
- // when the PC reaches this value. By saving the "end simulation" value into
- // the LR the simulation stops when returning to this call point.
- set_register(ra, end_sim_pc);
-
- // Remember the values of callee-saved registers.
- // The code below assumes that r9 is not used as sb (static base) in
- // simulator code and therefore is regarded as a callee-saved register.
- int32_t s0_val = get_register(s0);
- int32_t s1_val = get_register(s1);
- int32_t s2_val = get_register(s2);
- int32_t s3_val = get_register(s3);
- int32_t s4_val = get_register(s4);
- int32_t s5_val = get_register(s5);
- int32_t s6_val = get_register(s6);
- int32_t s7_val = get_register(s7);
- int32_t gp_val = get_register(gp);
- int32_t sp_val = get_register(sp);
- int32_t fp_val = get_register(fp);
-
- // Setup the callee-saved registers with a known value. To be able to check
- // that they are preserved properly across JS execution.
- int32_t callee_saved_value = icount_;
- set_register(s0, callee_saved_value);
- set_register(s1, callee_saved_value);
- set_register(s2, callee_saved_value);
- set_register(s3, callee_saved_value);
- set_register(s4, callee_saved_value);
- set_register(s5, callee_saved_value);
- set_register(s6, callee_saved_value);
- set_register(s7, callee_saved_value);
- set_register(gp, callee_saved_value);
- set_register(fp, callee_saved_value);
-
- // Start the simulation
- Execute();
-
- // Check that the callee-saved registers have been preserved.
- CHECK_EQ(callee_saved_value, get_register(s0));
- CHECK_EQ(callee_saved_value, get_register(s1));
- CHECK_EQ(callee_saved_value, get_register(s2));
- CHECK_EQ(callee_saved_value, get_register(s3));
- CHECK_EQ(callee_saved_value, get_register(s4));
- CHECK_EQ(callee_saved_value, get_register(s5));
- CHECK_EQ(callee_saved_value, get_register(s6));
- CHECK_EQ(callee_saved_value, get_register(s7));
- CHECK_EQ(callee_saved_value, get_register(gp));
- CHECK_EQ(callee_saved_value, get_register(fp));
-
- // Restore callee-saved registers with the original value.
- set_register(s0, s0_val);
- set_register(s1, s1_val);
- set_register(s2, s2_val);
- set_register(s3, s3_val);
- set_register(s4, s4_val);
- set_register(s5, s5_val);
- set_register(s6, s6_val);
- set_register(s7, s7_val);
- set_register(gp, gp_val);
- set_register(sp, sp_val);
- set_register(fp, fp_val);
-
- // Pop stack passed arguments.
- CHECK_EQ(entry_stack, get_register(sp));
- set_register(sp, original_stack);
-
- int32_t result = get_register(v0);
- return result;
-}
-
-
-uintptr_t Simulator::PushAddress(uintptr_t address) {
- int new_sp = get_register(sp) - sizeof(uintptr_t);
- uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(new_sp);
- *stack_slot = address;
- set_register(sp, new_sp);
- return new_sp;
-}
-
-
-uintptr_t Simulator::PopAddress() {
- int current_sp = get_register(sp);
- uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(current_sp);
- uintptr_t address = *stack_slot;
- set_register(sp, current_sp + sizeof(uintptr_t));
- return address;
-}
-
-
-#undef UNSUPPORTED
-
-} } // namespace v8::internal
-
-#endif // USE_SIMULATOR
-
-#endif // V8_TARGET_ARCH_MIPS
diff --git a/src/3rdparty/v8/src/mips/simulator-mips.h b/src/3rdparty/v8/src/mips/simulator-mips.h
deleted file mode 100644
index 0cd9bbe..0000000
--- a/src/3rdparty/v8/src/mips/simulator-mips.h
+++ /dev/null
@@ -1,394 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-// Declares a Simulator for MIPS instructions if we are not generating a native
-// MIPS binary. This Simulator allows us to run and debug MIPS code generation
-// on regular desktop machines.
-// V8 calls into generated code by "calling" the CALL_GENERATED_CODE macro,
-// which will start execution in the Simulator or forwards to the real entry
-// on a MIPS HW platform.
-
-#ifndef V8_MIPS_SIMULATOR_MIPS_H_
-#define V8_MIPS_SIMULATOR_MIPS_H_
-
-#include "allocation.h"
-#include "constants-mips.h"
-
-#if !defined(USE_SIMULATOR)
-// Running without a simulator on a native mips platform.
-
-namespace v8 {
-namespace internal {
-
-// When running without a simulator we call the entry directly.
-#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
- entry(p0, p1, p2, p3, p4)
-
-typedef int (*mips_regexp_matcher)(String*, int, const byte*, const byte*,
- void*, int*, Address, int, Isolate*);
-
-// Call the generated regexp code directly. The code at the entry address
-// should act as a function matching the type arm_regexp_matcher.
-// The fifth argument is a dummy that reserves the space used for
-// the return address added by the ExitFrame in native calls.
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
- (FUNCTION_CAST<mips_regexp_matcher>(entry)( \
- p0, p1, p2, p3, NULL, p4, p5, p6, p7))
-
-#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
- reinterpret_cast<TryCatch*>(try_catch_address)
-
-// The stack limit beyond which we will throw stack overflow errors in
-// generated code. Because generated code on mips uses the C stack, we
-// just use the C stack limit.
-class SimulatorStack : public v8::internal::AllStatic {
- public:
- static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) {
- return c_limit;
- }
-
- static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
- return try_catch_address;
- }
-
- static inline void UnregisterCTryCatch() { }
-};
-
-} } // namespace v8::internal
-
-// Calculated the stack limit beyond which we will throw stack overflow errors.
-// This macro must be called from a C++ method. It relies on being able to take
-// the address of "this" to get a value on the current execution stack and then
-// calculates the stack limit based on that value.
-// NOTE: The check for overflow is not safe as there is no guarantee that the
-// running thread has its stack in all memory up to address 0x00000000.
-#define GENERATED_CODE_STACK_LIMIT(limit) \
- (reinterpret_cast<uintptr_t>(this) >= limit ? \
- reinterpret_cast<uintptr_t>(this) - limit : 0)
-
-#else // !defined(USE_SIMULATOR)
-// Running with a simulator.
-
-#include "hashmap.h"
-
-namespace v8 {
-namespace internal {
-
-// -----------------------------------------------------------------------------
-// Utility functions
-
-class CachePage {
- public:
- static const int LINE_VALID = 0;
- static const int LINE_INVALID = 1;
-
- static const int kPageShift = 12;
- static const int kPageSize = 1 << kPageShift;
- static const int kPageMask = kPageSize - 1;
- static const int kLineShift = 2; // The cache line is only 4 bytes right now.
- static const int kLineLength = 1 << kLineShift;
- static const int kLineMask = kLineLength - 1;
-
- CachePage() {
- memset(&validity_map_, LINE_INVALID, sizeof(validity_map_));
- }
-
- char* ValidityByte(int offset) {
- return &validity_map_[offset >> kLineShift];
- }
-
- char* CachedData(int offset) {
- return &data_[offset];
- }
-
- private:
- char data_[kPageSize]; // The cached data.
- static const int kValidityMapSize = kPageSize >> kLineShift;
- char validity_map_[kValidityMapSize]; // One byte per line.
-};
-
-class Simulator {
- public:
- friend class MipsDebugger;
-
- // Registers are declared in order. See SMRL chapter 2.
- enum Register {
- no_reg = -1,
- zero_reg = 0,
- at,
- v0, v1,
- a0, a1, a2, a3,
- t0, t1, t2, t3, t4, t5, t6, t7,
- s0, s1, s2, s3, s4, s5, s6, s7,
- t8, t9,
- k0, k1,
- gp,
- sp,
- s8,
- ra,
- // LO, HI, and pc
- LO,
- HI,
- pc, // pc must be the last register.
- kNumSimuRegisters,
- // aliases
- fp = s8
- };
-
- // Coprocessor registers.
- // Generated code will always use doubles. So we will only use even registers.
- enum FPURegister {
- f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11,
- f12, f13, f14, f15, // f12 and f14 are arguments FPURegisters
- f16, f17, f18, f19, f20, f21, f22, f23, f24, f25,
- f26, f27, f28, f29, f30, f31,
- kNumFPURegisters
- };
-
- Simulator();
- ~Simulator();
-
- // The currently executing Simulator instance. Potentially there can be one
- // for each native thread.
- static Simulator* current(v8::internal::Isolate* isolate);
-
- // Accessors for register state. Reading the pc value adheres to the MIPS
- // architecture specification and is off by a 8 from the currently executing
- // instruction.
- void set_register(int reg, int32_t value);
- int32_t get_register(int reg) const;
- // Same for FPURegisters
- void set_fpu_register(int fpureg, int32_t value);
- void set_fpu_register_float(int fpureg, float value);
- void set_fpu_register_double(int fpureg, double value);
- int32_t get_fpu_register(int fpureg) const;
- int64_t get_fpu_register_long(int fpureg) const;
- float get_fpu_register_float(int fpureg) const;
- double get_fpu_register_double(int fpureg) const;
- void set_fcsr_bit(uint32_t cc, bool value);
- bool test_fcsr_bit(uint32_t cc);
- bool set_fcsr_round_error(double original, double rounded);
-
- // Special case of set_register and get_register to access the raw PC value.
- void set_pc(int32_t value);
- int32_t get_pc() const;
-
- // Accessor to the internal simulator stack area.
- uintptr_t StackLimit() const;
-
- // Executes MIPS instructions until the PC reaches end_sim_pc.
- void Execute();
-
- // Call on program start.
- static void Initialize();
-
- // V8 generally calls into generated JS code with 5 parameters and into
- // generated RegExp code with 7 parameters. This is a convenience function,
- // which sets up the simulator state and grabs the result on return.
- int32_t Call(byte* entry, int argument_count, ...);
-
- // Push an address onto the JS stack.
- uintptr_t PushAddress(uintptr_t address);
-
- // Pop an address from the JS stack.
- uintptr_t PopAddress();
-
- // ICache checking.
- static void FlushICache(v8::internal::HashMap* i_cache, void* start,
- size_t size);
-
- // Returns true if pc register contains one of the 'special_values' defined
- // below (bad_ra, end_sim_pc).
- bool has_bad_pc() const;
-
- private:
- enum special_values {
- // Known bad pc value to ensure that the simulator does not execute
- // without being properly setup.
- bad_ra = -1,
- // A pc value used to signal the simulator to stop execution. Generally
- // the ra is set to this value on transition from native C code to
- // simulated execution, so that the simulator can "return" to the native
- // C code.
- end_sim_pc = -2,
- // Unpredictable value.
- Unpredictable = 0xbadbeaf
- };
-
- // Unsupported instructions use Format to print an error and stop execution.
- void Format(Instruction* instr, const char* format);
-
- // Read and write memory.
- inline uint32_t ReadBU(int32_t addr);
- inline int32_t ReadB(int32_t addr);
- inline void WriteB(int32_t addr, uint8_t value);
- inline void WriteB(int32_t addr, int8_t value);
-
- inline uint16_t ReadHU(int32_t addr, Instruction* instr);
- inline int16_t ReadH(int32_t addr, Instruction* instr);
- // Note: Overloaded on the sign of the value.
- inline void WriteH(int32_t addr, uint16_t value, Instruction* instr);
- inline void WriteH(int32_t addr, int16_t value, Instruction* instr);
-
- inline int ReadW(int32_t addr, Instruction* instr);
- inline void WriteW(int32_t addr, int value, Instruction* instr);
-
- inline double ReadD(int32_t addr, Instruction* instr);
- inline void WriteD(int32_t addr, double value, Instruction* instr);
-
- // Operations depending on endianness.
- // Get Double Higher / Lower word.
- inline int32_t GetDoubleHIW(double* addr);
- inline int32_t GetDoubleLOW(double* addr);
- // Set Double Higher / Lower word.
- inline int32_t SetDoubleHIW(double* addr);
- inline int32_t SetDoubleLOW(double* addr);
-
- // Executing is handled based on the instruction type.
- void DecodeTypeRegister(Instruction* instr);
-
- // Helper function for DecodeTypeRegister.
- void ConfigureTypeRegister(Instruction* instr,
- int32_t& alu_out,
- int64_t& i64hilo,
- uint64_t& u64hilo,
- int32_t& next_pc,
- bool& do_interrupt);
-
- void DecodeTypeImmediate(Instruction* instr);
- void DecodeTypeJump(Instruction* instr);
-
- // Used for breakpoints and traps.
- void SoftwareInterrupt(Instruction* instr);
-
- // Executes one instruction.
- void InstructionDecode(Instruction* instr);
- // Execute one instruction placed in a branch delay slot.
- void BranchDelayInstructionDecode(Instruction* instr) {
- if (instr->IsForbiddenInBranchDelay()) {
- V8_Fatal(__FILE__, __LINE__,
- "Eror:Unexpected %i opcode in a branch delay slot.",
- instr->OpcodeValue());
- }
- InstructionDecode(instr);
- }
-
- // ICache.
- static void CheckICache(v8::internal::HashMap* i_cache, Instruction* instr);
- static void FlushOnePage(v8::internal::HashMap* i_cache, intptr_t start,
- int size);
- static CachePage* GetCachePage(v8::internal::HashMap* i_cache, void* page);
-
-
- enum Exception {
- none,
- kIntegerOverflow,
- kIntegerUnderflow,
- kDivideByZero,
- kNumExceptions
- };
- int16_t exceptions[kNumExceptions];
-
- // Exceptions.
- void SignalExceptions();
-
- // Runtime call support.
- static void* RedirectExternalReference(void* external_function,
- ExternalReference::Type type);
-
- // Used for real time calls that takes two double values as arguments and
- // returns a double.
- void SetFpResult(double result);
-
- // Architecture state.
- // Registers.
- int32_t registers_[kNumSimuRegisters];
- // Coprocessor Registers.
- int32_t FPUregisters_[kNumFPURegisters];
- // FPU control register.
- uint32_t FCSR_;
-
- // Simulator support.
- char* stack_;
- size_t stack_size_;
- bool pc_modified_;
- int icount_;
- int break_count_;
-
- // Icache simulation
- v8::internal::HashMap* i_cache_;
-
- // Registered breakpoints.
- Instruction* break_pc_;
- Instr break_instr_;
-
- v8::internal::Isolate* isolate_;
-};
-
-
-// When running with the simulator transition into simulated execution at this
-// point.
-#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
-reinterpret_cast<Object*>(Simulator::current(Isolate::Current())->Call( \
- FUNCTION_ADDR(entry), 5, p0, p1, p2, p3, p4))
-
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
- Simulator::current(Isolate::Current())->Call( \
- entry, 9, p0, p1, p2, p3, NULL, p4, p5, p6, p7)
-
-#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
- try_catch_address == NULL ? \
- NULL : *(reinterpret_cast<TryCatch**>(try_catch_address))
-
-
-// The simulator has its own stack. Thus it has a different stack limit from
-// the C-based native code. Setting the c_limit to indicate a very small
-// stack cause stack overflow errors, since the simulator ignores the input.
-// This is unlikely to be an issue in practice, though it might cause testing
-// trouble down the line.
-class SimulatorStack : public v8::internal::AllStatic {
- public:
- static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) {
- return Simulator::current(Isolate::Current())->StackLimit();
- }
-
- static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
- Simulator* sim = Simulator::current(Isolate::Current());
- return sim->PushAddress(try_catch_address);
- }
-
- static inline void UnregisterCTryCatch() {
- Simulator::current(Isolate::Current())->PopAddress();
- }
-};
-
-} } // namespace v8::internal
-
-#endif // !defined(USE_SIMULATOR)
-#endif // V8_MIPS_SIMULATOR_MIPS_H_
-
diff --git a/src/3rdparty/v8/src/mips/stub-cache-mips.cc b/src/3rdparty/v8/src/mips/stub-cache-mips.cc
deleted file mode 100644
index 1a49558..0000000
--- a/src/3rdparty/v8/src/mips/stub-cache-mips.cc
+++ /dev/null
@@ -1,601 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_MIPS)
-
-#include "ic-inl.h"
-#include "codegen-inl.h"
-#include "stub-cache.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-
-void StubCache::GenerateProbe(MacroAssembler* masm,
- Code::Flags flags,
- Register receiver,
- Register name,
- Register scratch,
- Register extra,
- Register extra2) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
- int index,
- Register prototype) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
- MacroAssembler* masm, int index, Register prototype, Label* miss) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-// Load a fast property out of a holder object (src). In-object properties
-// are loaded directly otherwise the property is loaded from the properties
-// fixed array.
-void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
- Register dst, Register src,
- JSObject* holder, int index) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
- Register receiver,
- Register scratch,
- Label* miss_label) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-// Generate code to load the length from a string object and return the length.
-// If the receiver object is not a string or a wrapped string object the
-// execution continues at the miss label. The register containing the
-// receiver is potentially clobbered.
-void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Label* miss,
- bool support_wrappers) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Label* miss_label) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-// Generate StoreField code, value is passed in a0 register.
-// After executing generated code, the receiver_reg and name_reg
-// may be clobbered.
-void StubCompiler::GenerateStoreField(MacroAssembler* masm,
- JSObject* object,
- int index,
- Map* transition,
- Register receiver_reg,
- Register name_reg,
- Register scratch,
- Label* miss_label) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-class CallInterceptorCompiler BASE_EMBEDDED {
- public:
- CallInterceptorCompiler(StubCompiler* stub_compiler,
- const ParameterCount& arguments,
- Register name)
- : stub_compiler_(stub_compiler),
- arguments_(arguments),
- name_(name) {}
-
- void Compile(MacroAssembler* masm,
- JSObject* object,
- JSObject* holder,
- String* name,
- LookupResult* lookup,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* miss) {
- UNIMPLEMENTED_MIPS();
- }
-
- private:
- void CompileCacheable(MacroAssembler* masm,
- JSObject* object,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- JSObject* interceptor_holder,
- LookupResult* lookup,
- String* name,
- const CallOptimization& optimization,
- Label* miss_label) {
- UNIMPLEMENTED_MIPS();
- }
-
- void CompileRegular(MacroAssembler* masm,
- JSObject* object,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- String* name,
- JSObject* interceptor_holder,
- Label* miss_label) {
- UNIMPLEMENTED_MIPS();
- }
-
- void LoadWithInterceptor(MacroAssembler* masm,
- Register receiver,
- Register holder,
- JSObject* holder_obj,
- Register scratch,
- Label* interceptor_succeeded) {
- UNIMPLEMENTED_MIPS();
- }
-
- StubCompiler* stub_compiler_;
- const ParameterCount& arguments_;
- Register name_;
-};
-
-
-#undef __
-#define __ ACCESS_MASM(masm())
-
-
-Register StubCompiler::CheckPrototypes(JSObject* object,
- Register object_reg,
- JSObject* holder,
- Register holder_reg,
- Register scratch1,
- Register scratch2,
- String* name,
- int save_at_depth,
- Label* miss) {
- UNIMPLEMENTED_MIPS();
- return no_reg;
-}
-
-
-void StubCompiler::GenerateLoadField(JSObject* object,
- JSObject* holder,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- int index,
- String* name,
- Label* miss) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void StubCompiler::GenerateLoadConstant(JSObject* object,
- JSObject* holder,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Object* value,
- String* name,
- Label* miss) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object,
- JSObject* holder,
- Register receiver,
- Register name_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- AccessorInfo* callback,
- String* name,
- Label* miss) {
- UNIMPLEMENTED_MIPS();
- return NULL;
-}
-
-
-void StubCompiler::GenerateLoadInterceptor(JSObject* object,
- JSObject* interceptor_holder,
- LookupResult* lookup,
- Register receiver,
- Register name_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- String* name,
- Label* miss) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CallStubCompiler::GenerateNameCheck(String* name, Label* miss) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CallStubCompiler::GenerateGlobalReceiverCheck(JSObject* object,
- JSObject* holder,
- String* name,
- Label* miss) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CallStubCompiler::GenerateLoadFunctionFromCell(JSGlobalPropertyCell* cell,
- JSFunction* function,
- Label* miss) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-MaybeObject* CallStubCompiler::GenerateMissBranch() {
- UNIMPLEMENTED_MIPS();
- return NULL;
-}
-
-
-MaybeObject* CallStubCompiler::CompileCallField(JSObject* object,
- JSObject* holder,
- int index,
- String* name) {
- UNIMPLEMENTED_MIPS();
- return NULL;
-}
-
-
-MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
- UNIMPLEMENTED_MIPS();
- return NULL;
-}
-
-
-MaybeObject* CallStubCompiler::CompileArrayPopCall(Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
- UNIMPLEMENTED_MIPS();
- return NULL;
-}
-
-
-MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
- Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
- UNIMPLEMENTED_MIPS();
- return NULL;
-}
-
-
-MaybeObject* CallStubCompiler::CompileStringCharAtCall(
- Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
- UNIMPLEMENTED_MIPS();
- return NULL;
-}
-
-
-MaybeObject* CallStubCompiler::CompileStringFromCharCodeCall(
- Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
- UNIMPLEMENTED_MIPS();
- return NULL;
-}
-
-
-MaybeObject* CallStubCompiler::CompileMathFloorCall(Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
- UNIMPLEMENTED_MIPS();
- return NULL;
-}
-
-
-MaybeObject* CallStubCompiler::CompileMathAbsCall(Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
- UNIMPLEMENTED_MIPS();
- return NULL;
-}
-
-
-MaybeObject* CallStubCompiler::CompileFastApiCall(
- const CallOptimization& optimization,
- Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
- UNIMPLEMENTED_MIPS();
- return NULL;
-}
-
-
-MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
- JSObject* holder,
- JSFunction* function,
- String* name,
- CheckType check) {
- UNIMPLEMENTED_MIPS();
- return NULL;
-}
-
-
-MaybeObject* CallStubCompiler::CompileCallInterceptor(JSObject* object,
- JSObject* holder,
- String* name) {
- UNIMPLEMENTED_MIPS();
- return NULL;
-}
-
-
-MaybeObject* CallStubCompiler::CompileCallGlobal(JSObject* object,
- GlobalObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
- UNIMPLEMENTED_MIPS();
- return NULL;
-}
-
-
-MaybeObject* StoreStubCompiler::CompileStoreField(JSObject* object,
- int index,
- Map* transition,
- String* name) {
- UNIMPLEMENTED_MIPS();
- return NULL;
-}
-
-
-MaybeObject* StoreStubCompiler::CompileStoreCallback(JSObject* object,
- AccessorInfo* callback,
- String* name) {
- UNIMPLEMENTED_MIPS();
- return NULL;
-}
-
-
-MaybeObject* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
- String* name) {
- UNIMPLEMENTED_MIPS();
- return NULL;
-}
-
-
-MaybeObject* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
- JSGlobalPropertyCell* cell,
- String* name) {
- UNIMPLEMENTED_MIPS();
- return NULL;
-}
-
-
-MaybeObject* LoadStubCompiler::CompileLoadNonexistent(String* name,
- JSObject* object,
- JSObject* last) {
- UNIMPLEMENTED_MIPS();
- return NULL;
-}
-
-
-MaybeObject* LoadStubCompiler::CompileLoadField(JSObject* object,
- JSObject* holder,
- int index,
- String* name) {
- UNIMPLEMENTED_MIPS();
- return NULL;
-}
-
-
-MaybeObject* LoadStubCompiler::CompileLoadCallback(String* name,
- JSObject* object,
- JSObject* holder,
- AccessorInfo* callback) {
- UNIMPLEMENTED_MIPS();
- return NULL;
-}
-
-
-MaybeObject* LoadStubCompiler::CompileLoadConstant(JSObject* object,
- JSObject* holder,
- Object* value,
- String* name) {
- UNIMPLEMENTED_MIPS();
- return NULL;
-}
-
-
-MaybeObject* LoadStubCompiler::CompileLoadInterceptor(JSObject* object,
- JSObject* holder,
- String* name) {
- UNIMPLEMENTED_MIPS();
- return NULL;
-}
-
-
-MaybeObject* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
- GlobalObject* holder,
- JSGlobalPropertyCell* cell,
- String* name,
- bool is_dont_delete) {
- UNIMPLEMENTED_MIPS();
- return NULL;
-}
-
-
-MaybeObject* KeyedLoadStubCompiler::CompileLoadField(String* name,
- JSObject* receiver,
- JSObject* holder,
- int index) {
- UNIMPLEMENTED_MIPS();
- return NULL;
-}
-
-
-MaybeObject* KeyedLoadStubCompiler::CompileLoadCallback(
- String* name,
- JSObject* receiver,
- JSObject* holder,
- AccessorInfo* callback) {
- UNIMPLEMENTED_MIPS();
- return NULL;
-}
-
-
-MaybeObject* KeyedLoadStubCompiler::CompileLoadConstant(String* name,
- JSObject* receiver,
- JSObject* holder,
- Object* value) {
- UNIMPLEMENTED_MIPS();
- return NULL;
-}
-
-
-MaybeObject* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
- JSObject* holder,
- String* name) {
- UNIMPLEMENTED_MIPS();
- return NULL;
-}
-
-
-MaybeObject* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
- UNIMPLEMENTED_MIPS();
- return NULL;
-}
-
-
-MaybeObject* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
- UNIMPLEMENTED_MIPS();
- return NULL;
-}
-
-
-MaybeObject* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
- UNIMPLEMENTED_MIPS();
- return NULL;
-}
-
-
-MaybeObject* KeyedLoadStubCompiler::CompileLoadSpecialized(JSObject* receiver) {
- UNIMPLEMENTED_MIPS();
- return NULL;
-}
-
-
-MaybeObject* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
- int index,
- Map* transition,
- String* name) {
- UNIMPLEMENTED_MIPS();
- return NULL;
-}
-
-
-MaybeObject* KeyedStoreStubCompiler::CompileStoreSpecialized(
- JSObject* receiver) {
- UNIMPLEMENTED_MIPS();
- return NULL;
-}
-
-
-MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
- UNIMPLEMENTED_MIPS();
- return NULL;
-}
-
-
-MaybeObject* ExternalArrayStubCompiler::CompileKeyedLoadStub(
- JSObject* receiver_object,
- ExternalArrayType array_type,
- Code::Flags flags) {
- UNIMPLEMENTED_MIPS();
- return NULL;
-}
-
-
-MaybeObject* ExternalArrayStubCompiler::CompileKeyedStoreStub(
- JSObject* receiver_object,
- ExternalArrayType array_type,
- Code::Flags flags) {
- UNIMPLEMENTED_MIPS();
- return NULL;
-}
-
-
-#undef __
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_MIPS
diff --git a/src/3rdparty/v8/src/mips/virtual-frame-mips-inl.h b/src/3rdparty/v8/src/mips/virtual-frame-mips-inl.h
deleted file mode 100644
index f0d2fab..0000000
--- a/src/3rdparty/v8/src/mips/virtual-frame-mips-inl.h
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_VIRTUAL_FRAME_MIPS_INL_H_
-#define V8_VIRTUAL_FRAME_MIPS_INL_H_
-
-#include "assembler-mips.h"
-#include "virtual-frame-mips.h"
-
-namespace v8 {
-namespace internal {
-
-
-MemOperand VirtualFrame::ParameterAt(int index) {
- UNIMPLEMENTED_MIPS();
- return MemOperand(zero_reg, 0);
-}
-
-
-// The receiver frame slot.
-MemOperand VirtualFrame::Receiver() {
- UNIMPLEMENTED_MIPS();
- return MemOperand(zero_reg, 0);
-}
-
-
-void VirtualFrame::Forget(int count) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_VIRTUAL_FRAME_MIPS_INL_H_
diff --git a/src/3rdparty/v8/src/mips/virtual-frame-mips.cc b/src/3rdparty/v8/src/mips/virtual-frame-mips.cc
deleted file mode 100644
index 22fe9f0..0000000
--- a/src/3rdparty/v8/src/mips/virtual-frame-mips.cc
+++ /dev/null
@@ -1,307 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_MIPS)
-
-#include "codegen-inl.h"
-#include "register-allocator-inl.h"
-#include "scopes.h"
-#include "virtual-frame-inl.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm())
-
-void VirtualFrame::PopToA1A0() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::PopToA1() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::PopToA0() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::MergeTo(const VirtualFrame* expected,
- Condition cond,
- Register r1,
- const Operand& r2) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::MergeTo(VirtualFrame* expected,
- Condition cond,
- Register r1,
- const Operand& r2) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::MergeTOSTo(
- VirtualFrame::TopOfStack expected_top_of_stack_state,
- Condition cond,
- Register r1,
- const Operand& r2) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::Enter() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::Exit() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::AllocateStackSlots() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-
-void VirtualFrame::PushReceiverSlotAddress() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::PushTryHandler(HandlerType type) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::CallJSFunction(int arg_count) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::CallRuntime(const Runtime::Function* f, int arg_count) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::CallRuntime(Runtime::FunctionId id, int arg_count) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-void VirtualFrame::DebugBreak() {
- UNIMPLEMENTED_MIPS();
-}
-#endif
-
-
-void VirtualFrame::InvokeBuiltin(Builtins::JavaScript id,
- InvokeJSFlags flags,
- int arg_count) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::CallLoadIC(Handle<String> name, RelocInfo::Mode mode) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::CallStoreIC(Handle<String> name, bool is_contextual) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::CallKeyedLoadIC() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::CallKeyedStoreIC() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::CallCodeObject(Handle<Code> code,
- RelocInfo::Mode rmode,
- int dropped_args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-// NO_TOS_REGISTERS, A0_TOS, A1_TOS, A1_A0_TOS, A0_A1_TOS.
-const bool VirtualFrame::kA0InUse[TOS_STATES] =
- { false, true, false, true, true };
-const bool VirtualFrame::kA1InUse[TOS_STATES] =
- { false, false, true, true, true };
-const int VirtualFrame::kVirtualElements[TOS_STATES] =
- { 0, 1, 1, 2, 2 };
-const Register VirtualFrame::kTopRegister[TOS_STATES] =
- { a0, a0, a1, a1, a0 };
-const Register VirtualFrame::kBottomRegister[TOS_STATES] =
- { a0, a0, a1, a0, a1 };
-const Register VirtualFrame::kAllocatedRegisters[
- VirtualFrame::kNumberOfAllocatedRegisters] = { a2, a3, t0, t1, t2 };
-// Popping is done by the transition implied by kStateAfterPop. Of course if
-// there were no stack slots allocated to registers then the physical SP must
-// be adjusted.
-const VirtualFrame::TopOfStack VirtualFrame::kStateAfterPop[TOS_STATES] =
- { NO_TOS_REGISTERS, NO_TOS_REGISTERS, NO_TOS_REGISTERS, A0_TOS, A1_TOS };
-// Pushing is done by the transition implied by kStateAfterPush. Of course if
-// the maximum number of registers was already allocated to the top of stack
-// slots then one register must be physically pushed onto the stack.
-const VirtualFrame::TopOfStack VirtualFrame::kStateAfterPush[TOS_STATES] =
- { A0_TOS, A1_A0_TOS, A0_A1_TOS, A0_A1_TOS, A1_A0_TOS };
-
-
-void VirtualFrame::Drop(int count) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::Pop() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::EmitPop(Register reg) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::SpillAllButCopyTOSToA0() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::SpillAllButCopyTOSToA1() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::SpillAllButCopyTOSToA1A0() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-Register VirtualFrame::Peek() {
- UNIMPLEMENTED_MIPS();
- return no_reg;
-}
-
-
-Register VirtualFrame::Peek2() {
- UNIMPLEMENTED_MIPS();
- return no_reg;
-}
-
-
-void VirtualFrame::Dup() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::Dup2() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-Register VirtualFrame::PopToRegister(Register but_not_to_this_one) {
- UNIMPLEMENTED_MIPS();
- return no_reg;
-}
-
-
-void VirtualFrame::EnsureOneFreeTOSRegister() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::EmitMultiPop(RegList regs) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::EmitPush(Register reg, TypeInfo info) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::SetElementAt(Register reg, int this_far_down) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-Register VirtualFrame::GetTOSRegister() {
- UNIMPLEMENTED_MIPS();
- return no_reg;
-}
-
-
-void VirtualFrame::EmitPush(Operand operand, TypeInfo info) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::EmitPush(MemOperand operand, TypeInfo info) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::EmitPushRoot(Heap::RootListIndex index) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::EmitMultiPush(RegList regs) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::EmitMultiPushReversed(RegList regs) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::SpillAll() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-#undef __
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_MIPS
diff --git a/src/3rdparty/v8/src/mips/virtual-frame-mips.h b/src/3rdparty/v8/src/mips/virtual-frame-mips.h
deleted file mode 100644
index be8b74e..0000000
--- a/src/3rdparty/v8/src/mips/virtual-frame-mips.h
+++ /dev/null
@@ -1,530 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#ifndef V8_MIPS_VIRTUAL_FRAME_MIPS_H_
-#define V8_MIPS_VIRTUAL_FRAME_MIPS_H_
-
-#include "register-allocator.h"
-
-namespace v8 {
-namespace internal {
-
-// This dummy class is only used to create invalid virtual frames.
-extern class InvalidVirtualFrameInitializer {}* kInvalidVirtualFrameInitializer;
-
-
-// -------------------------------------------------------------------------
-// Virtual frames
-//
-// The virtual frame is an abstraction of the physical stack frame. It
-// encapsulates the parameters, frame-allocated locals, and the expression
-// stack. It supports push/pop operations on the expression stack, as well
-// as random access to the expression stack elements, locals, and
-// parameters.
-
-class VirtualFrame : public ZoneObject {
- public:
- class RegisterAllocationScope;
- // A utility class to introduce a scope where the virtual frame is
- // expected to remain spilled. The constructor spills the code
- // generator's current frame, and keeps it spilled.
- class SpilledScope BASE_EMBEDDED {
- public:
- explicit SpilledScope(VirtualFrame* frame)
- : old_is_spilled_(
- Isolate::Current()->is_virtual_frame_in_spilled_scope()) {
- if (frame != NULL) {
- if (!old_is_spilled_) {
- frame->SpillAll();
- } else {
- frame->AssertIsSpilled();
- }
- }
- Isolate::Current()->set_is_virtual_frame_in_spilled_scope(true);
- }
- ~SpilledScope() {
- Isolate::Current()->set_is_virtual_frame_in_spilled_scope(
- old_is_spilled_);
- }
- static bool is_spilled() {
- return Isolate::Current()->is_virtual_frame_in_spilled_scope();
- }
-
- private:
- int old_is_spilled_;
-
- SpilledScope() {}
-
- friend class RegisterAllocationScope;
- };
-
- class RegisterAllocationScope BASE_EMBEDDED {
- public:
- // A utility class to introduce a scope where the virtual frame
- // is not spilled, ie. where register allocation occurs. Eventually
- // when RegisterAllocationScope is ubiquitous it can be removed
- // along with the (by then unused) SpilledScope class.
- inline explicit RegisterAllocationScope(CodeGenerator* cgen);
- inline ~RegisterAllocationScope();
-
- private:
- CodeGenerator* cgen_;
- bool old_is_spilled_;
-
- RegisterAllocationScope() {}
- };
-
- // An illegal index into the virtual frame.
- static const int kIllegalIndex = -1;
-
- // Construct an initial virtual frame on entry to a JS function.
- inline VirtualFrame();
-
- // Construct an invalid virtual frame, used by JumpTargets.
- inline VirtualFrame(InvalidVirtualFrameInitializer* dummy);
-
- // Construct a virtual frame as a clone of an existing one.
- explicit inline VirtualFrame(VirtualFrame* original);
-
- inline CodeGenerator* cgen() const;
- inline MacroAssembler* masm();
-
- // The number of elements on the virtual frame.
- int element_count() const { return element_count_; }
-
- // The height of the virtual expression stack.
- inline int height() const;
-
- bool is_used(int num) {
- switch (num) {
- case 0: { // a0.
- return kA0InUse[top_of_stack_state_];
- }
- case 1: { // a1.
- return kA1InUse[top_of_stack_state_];
- }
- case 2:
- case 3:
- case 4:
- case 5:
- case 6: { // a2 to a3, t0 to t2.
- ASSERT(num - kFirstAllocatedRegister < kNumberOfAllocatedRegisters);
- ASSERT(num >= kFirstAllocatedRegister);
- if ((register_allocation_map_ &
- (1 << (num - kFirstAllocatedRegister))) == 0) {
- return false;
- } else {
- return true;
- }
- }
- default: {
- ASSERT(num < kFirstAllocatedRegister ||
- num >= kFirstAllocatedRegister + kNumberOfAllocatedRegisters);
- return false;
- }
- }
- }
-
- // Add extra in-memory elements to the top of the frame to match an actual
- // frame (eg, the frame after an exception handler is pushed). No code is
- // emitted.
- void Adjust(int count);
-
- // Forget elements from the top of the frame to match an actual frame (eg,
- // the frame after a runtime call). No code is emitted except to bring the
- // frame to a spilled state.
- void Forget(int count);
-
-
- // Spill all values from the frame to memory.
- void SpillAll();
-
- void AssertIsSpilled() const {
- ASSERT(top_of_stack_state_ == NO_TOS_REGISTERS);
- ASSERT(register_allocation_map_ == 0);
- }
-
- void AssertIsNotSpilled() {
- ASSERT(!SpilledScope::is_spilled());
- }
-
- // Spill all occurrences of a specific register from the frame.
- void Spill(Register reg) {
- UNIMPLEMENTED();
- }
-
- // Spill all occurrences of an arbitrary register if possible. Return the
- // register spilled or no_reg if it was not possible to free any register
- // (ie, they all have frame-external references). Unimplemented.
- Register SpillAnyRegister();
-
- // Make this virtual frame have a state identical to an expected virtual
- // frame. As a side effect, code may be emitted to make this frame match
- // the expected one.
- void MergeTo(const VirtualFrame* expected,
- Condition cond = al,
- Register r1 = no_reg,
- const Operand& r2 = Operand(no_reg));
-
- void MergeTo(VirtualFrame* expected,
- Condition cond = al,
- Register r1 = no_reg,
- const Operand& r2 = Operand(no_reg));
-
- // Checks whether this frame can be branched to by the other frame.
- bool IsCompatibleWith(const VirtualFrame* other) const {
- return (tos_known_smi_map_ & (~other->tos_known_smi_map_)) == 0;
- }
-
- inline void ForgetTypeInfo() {
- tos_known_smi_map_ = 0;
- }
-
- // Detach a frame from its code generator, perhaps temporarily. This
- // tells the register allocator that it is free to use frame-internal
- // registers. Used when the code generator's frame is switched from this
- // one to NULL by an unconditional jump.
- void DetachFromCodeGenerator() {
- }
-
- // (Re)attach a frame to its code generator. This informs the register
- // allocator that the frame-internal register references are active again.
- // Used when a code generator's frame is switched from NULL to this one by
- // binding a label.
- void AttachToCodeGenerator() {
- }
-
- // Emit code for the physical JS entry and exit frame sequences. After
- // calling Enter, the virtual frame is ready for use; and after calling
- // Exit it should not be used. Note that Enter does not allocate space in
- // the physical frame for storing frame-allocated locals.
- void Enter();
- void Exit();
-
- // Prepare for returning from the frame by elements in the virtual frame.
- // This avoids generating unnecessary merge code when jumping to the shared
- // return site. No spill code emitted. Value to return should be in v0.
- inline void PrepareForReturn();
-
- // Number of local variables after when we use a loop for allocating.
- static const int kLocalVarBound = 5;
-
- // Allocate and initialize the frame-allocated locals.
- void AllocateStackSlots();
-
- // The current top of the expression stack as an assembly operand.
- MemOperand Top() {
- AssertIsSpilled();
- return MemOperand(sp, 0);
- }
-
- // An element of the expression stack as an assembly operand.
- MemOperand ElementAt(int index) {
- int adjusted_index = index - kVirtualElements[top_of_stack_state_];
- ASSERT(adjusted_index >= 0);
- return MemOperand(sp, adjusted_index * kPointerSize);
- }
-
- bool KnownSmiAt(int index) {
- if (index >= kTOSKnownSmiMapSize) return false;
- return (tos_known_smi_map_ & (1 << index)) != 0;
- }
- // A frame-allocated local as an assembly operand.
- inline MemOperand LocalAt(int index);
-
- // Push the address of the receiver slot on the frame.
- void PushReceiverSlotAddress();
-
- // The function frame slot.
- MemOperand Function() { return MemOperand(fp, kFunctionOffset); }
-
- // The context frame slot.
- MemOperand Context() { return MemOperand(fp, kContextOffset); }
-
- // A parameter as an assembly operand.
- inline MemOperand ParameterAt(int index);
-
- // The receiver frame slot.
- inline MemOperand Receiver();
-
- // Push a try-catch or try-finally handler on top of the virtual frame.
- void PushTryHandler(HandlerType type);
-
- // Call stub given the number of arguments it expects on (and
- // removes from) the stack.
- inline void CallStub(CodeStub* stub, int arg_count);
-
- // Call JS function from top of the stack with arguments
- // taken from the stack.
- void CallJSFunction(int arg_count);
-
- // Call runtime given the number of arguments expected on (and
- // removed from) the stack.
- void CallRuntime(const Runtime::Function* f, int arg_count);
- void CallRuntime(Runtime::FunctionId id, int arg_count);
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- void DebugBreak();
-#endif
-
- // Invoke builtin given the number of arguments it expects on (and
- // removes from) the stack.
- void InvokeBuiltin(Builtins::JavaScript id,
- InvokeJSFlags flag,
- int arg_count);
-
- // Call load IC. Receiver is on the stack and is consumed. Result is returned
- // in v0.
- void CallLoadIC(Handle<String> name, RelocInfo::Mode mode);
-
- // Call store IC. If the load is contextual, value is found on top of the
- // frame. If not, value and receiver are on the frame. Both are consumed.
- // Result is returned in v0.
- void CallStoreIC(Handle<String> name, bool is_contextual);
-
- // Call keyed load IC. Key and receiver are on the stack. Both are consumed.
- // Result is returned in v0.
- void CallKeyedLoadIC();
-
- // Call keyed store IC. Value, key and receiver are on the stack. All three
- // are consumed. Result is returned in v0 (and a0).
- void CallKeyedStoreIC();
-
- // Call into an IC stub given the number of arguments it removes
- // from the stack. Register arguments to the IC stub are implicit,
- // and depend on the type of IC stub.
- void CallCodeObject(Handle<Code> ic,
- RelocInfo::Mode rmode,
- int dropped_args);
-
- // Drop a number of elements from the top of the expression stack. May
- // emit code to affect the physical frame. Does not clobber any registers
- // excepting possibly the stack pointer.
- void Drop(int count);
-
- // Drop one element.
- void Drop() { Drop(1); }
-
- // Pop an element from the top of the expression stack. Discards
- // the result.
- void Pop();
-
- // Pop an element from the top of the expression stack. The register
- // will be one normally used for the top of stack register allocation
- // so you can't hold on to it if you push on the stack.
- Register PopToRegister(Register but_not_to_this_one = no_reg);
-
- // Look at the top of the stack. The register returned is aliased and
- // must be copied to a scratch register before modification.
- Register Peek();
-
- // Look at the value beneath the top of the stack. The register returned is
- // aliased and must be copied to a scratch register before modification.
- Register Peek2();
-
- // Duplicate the top of stack.
- void Dup();
-
- // Duplicate the two elements on top of stack.
- void Dup2();
-
- // Flushes all registers, but it puts a copy of the top-of-stack in a0.
- void SpillAllButCopyTOSToA0();
-
- // Flushes all registers, but it puts a copy of the top-of-stack in a1.
- void SpillAllButCopyTOSToA1();
-
- // Flushes all registers, but it puts a copy of the top-of-stack in a1
- // and the next value on the stack in a0.
- void SpillAllButCopyTOSToA1A0();
-
- // Pop and save an element from the top of the expression stack and
- // emit a corresponding pop instruction.
- void EmitPop(Register reg);
- // Same but for multiple registers
- void EmitMultiPop(RegList regs);
- void EmitMultiPopReversed(RegList regs);
-
-
- // Takes the top two elements and puts them in a0 (top element) and a1
- // (second element).
- void PopToA1A0();
-
- // Takes the top element and puts it in a1.
- void PopToA1();
-
- // Takes the top element and puts it in a0.
- void PopToA0();
-
- // Push an element on top of the expression stack and emit a
- // corresponding push instruction.
- void EmitPush(Register reg, TypeInfo type_info = TypeInfo::Unknown());
- void EmitPush(Operand operand, TypeInfo type_info = TypeInfo::Unknown());
- void EmitPush(MemOperand operand, TypeInfo type_info = TypeInfo::Unknown());
- void EmitPushRoot(Heap::RootListIndex index);
-
- // Overwrite the nth thing on the stack. If the nth position is in a
- // register then this turns into a Move, otherwise an sw. Afterwards
- // you can still use the register even if it is a register that can be
- // used for TOS (a0 or a1).
- void SetElementAt(Register reg, int this_far_down);
-
- // Get a register which is free and which must be immediately used to
- // push on the top of the stack.
- Register GetTOSRegister();
-
- // Same but for multiple registers.
- void EmitMultiPush(RegList regs);
- void EmitMultiPushReversed(RegList regs);
-
- static Register scratch0() { return t4; }
- static Register scratch1() { return t5; }
- static Register scratch2() { return t6; }
-
- private:
- static const int kLocal0Offset = JavaScriptFrameConstants::kLocal0Offset;
- static const int kFunctionOffset = JavaScriptFrameConstants::kFunctionOffset;
- static const int kContextOffset = StandardFrameConstants::kContextOffset;
-
- static const int kHandlerSize = StackHandlerConstants::kSize / kPointerSize;
- static const int kPreallocatedElements = 5 + 8; // 8 expression stack slots.
-
- // 5 states for the top of stack, which can be in memory or in a0 and a1.
- enum TopOfStack { NO_TOS_REGISTERS, A0_TOS, A1_TOS, A1_A0_TOS, A0_A1_TOS,
- TOS_STATES};
- static const int kMaxTOSRegisters = 2;
-
- static const bool kA0InUse[TOS_STATES];
- static const bool kA1InUse[TOS_STATES];
- static const int kVirtualElements[TOS_STATES];
- static const TopOfStack kStateAfterPop[TOS_STATES];
- static const TopOfStack kStateAfterPush[TOS_STATES];
- static const Register kTopRegister[TOS_STATES];
- static const Register kBottomRegister[TOS_STATES];
-
- // We allocate up to 5 locals in registers.
- static const int kNumberOfAllocatedRegisters = 5;
- // r2 to r6 are allocated to locals.
- static const int kFirstAllocatedRegister = 2;
-
- static const Register kAllocatedRegisters[kNumberOfAllocatedRegisters];
-
- static Register AllocatedRegister(int r) {
- ASSERT(r >= 0 && r < kNumberOfAllocatedRegisters);
- return kAllocatedRegisters[r];
- }
-
- // The number of elements on the stack frame.
- int element_count_;
- TopOfStack top_of_stack_state_:3;
- int register_allocation_map_:kNumberOfAllocatedRegisters;
- static const int kTOSKnownSmiMapSize = 4;
- unsigned tos_known_smi_map_:kTOSKnownSmiMapSize;
-
- // The index of the element that is at the processor's stack pointer
- // (the sp register). For now since everything is in memory it is given
- // by the number of elements on the not-very-virtual stack frame.
- int stack_pointer() { return element_count_ - 1; }
-
- // The number of frame-allocated locals and parameters respectively.
- inline int parameter_count() const;
- inline int local_count() const;
-
- // The index of the element that is at the processor's frame pointer
- // (the fp register). The parameters, receiver, function, and context
- // are below the frame pointer.
- inline int frame_pointer() const;
-
- // The index of the first parameter. The receiver lies below the first
- // parameter.
- int param0_index() { return 1; }
-
- // The index of the context slot in the frame. It is immediately
- // below the frame pointer.
- inline int context_index();
-
- // The index of the function slot in the frame. It is below the frame
- // pointer and context slot.
- inline int function_index();
-
- // The index of the first local. Between the frame pointer and the
- // locals lies the return address.
- inline int local0_index() const;
-
- // The index of the base of the expression stack.
- inline int expression_base_index() const;
-
- // Convert a frame index into a frame pointer relative offset into the
- // actual stack.
- inline int fp_relative(int index);
-
- // Spill all elements in registers. Spill the top spilled_args elements
- // on the frame. Sync all other frame elements.
- // Then drop dropped_args elements from the virtual frame, to match
- // the effect of an upcoming call that will drop them from the stack.
- void PrepareForCall(int spilled_args, int dropped_args);
-
- // If all top-of-stack registers are in use then the lowest one is pushed
- // onto the physical stack and made free.
- void EnsureOneFreeTOSRegister();
-
- // Emit instructions to get the top of stack state from where we are to where
- // we want to be.
- void MergeTOSTo(TopOfStack expected_state,
- Condition cond = al,
- Register r1 = no_reg,
- const Operand& r2 = Operand(no_reg));
-
- inline bool Equals(const VirtualFrame* other);
-
- inline void LowerHeight(int count) {
- element_count_ -= count;
- if (count >= kTOSKnownSmiMapSize) {
- tos_known_smi_map_ = 0;
- } else {
- tos_known_smi_map_ >>= count;
- }
- }
-
- inline void RaiseHeight(int count, unsigned known_smi_map = 0) {
- ASSERT(known_smi_map < (1u << count));
- element_count_ += count;
- if (count >= kTOSKnownSmiMapSize) {
- tos_known_smi_map_ = known_smi_map;
- } else {
- tos_known_smi_map_ = ((tos_known_smi_map_ << count) | known_smi_map);
- }
- }
- friend class JumpTarget;
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_MIPS_VIRTUAL_FRAME_MIPS_H_
-
diff --git a/src/3rdparty/v8/src/mirror-debugger.js b/src/3rdparty/v8/src/mirror-debugger.js
deleted file mode 100644
index 99e9819..0000000
--- a/src/3rdparty/v8/src/mirror-debugger.js
+++ /dev/null
@@ -1,2381 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Handle id counters.
-var next_handle_ = 0;
-var next_transient_handle_ = -1;
-
-// Mirror cache.
-var mirror_cache_ = [];
-
-
-/**
- * Clear the mirror handle cache.
- */
-function ClearMirrorCache() {
- next_handle_ = 0;
- mirror_cache_ = [];
-}
-
-
-/**
- * Returns the mirror for a specified value or object.
- *
- * @param {value or Object} value the value or object to retreive the mirror for
- * @param {boolean} transient indicate whether this object is transient and
- * should not be added to the mirror cache. The default is not transient.
- * @returns {Mirror} the mirror reflects the passed value or object
- */
-function MakeMirror(value, opt_transient) {
- var mirror;
-
- // Look for non transient mirrors in the mirror cache.
- if (!opt_transient) {
- for (id in mirror_cache_) {
- mirror = mirror_cache_[id];
- if (mirror.value() === value) {
- return mirror;
- }
- // Special check for NaN as NaN == NaN is false.
- if (mirror.isNumber() && isNaN(mirror.value()) &&
- typeof value == 'number' && isNaN(value)) {
- return mirror;
- }
- }
- }
-
- if (IS_UNDEFINED(value)) {
- mirror = new UndefinedMirror();
- } else if (IS_NULL(value)) {
- mirror = new NullMirror();
- } else if (IS_BOOLEAN(value)) {
- mirror = new BooleanMirror(value);
- } else if (IS_NUMBER(value)) {
- mirror = new NumberMirror(value);
- } else if (IS_STRING(value)) {
- mirror = new StringMirror(value);
- } else if (IS_ARRAY(value)) {
- mirror = new ArrayMirror(value);
- } else if (IS_DATE(value)) {
- mirror = new DateMirror(value);
- } else if (IS_FUNCTION(value)) {
- mirror = new FunctionMirror(value);
- } else if (IS_REGEXP(value)) {
- mirror = new RegExpMirror(value);
- } else if (IS_ERROR(value)) {
- mirror = new ErrorMirror(value);
- } else if (IS_SCRIPT(value)) {
- mirror = new ScriptMirror(value);
- } else {
- mirror = new ObjectMirror(value, OBJECT_TYPE, opt_transient);
- }
-
- mirror_cache_[mirror.handle()] = mirror;
- return mirror;
-}
-
-
-/**
- * Returns the mirror for a specified mirror handle.
- *
- * @param {number} handle the handle to find the mirror for
- * @returns {Mirror or undefiend} the mirror with the requested handle or
- * undefined if no mirror with the requested handle was found
- */
-function LookupMirror(handle) {
- return mirror_cache_[handle];
-}
-
-
-/**
- * Returns the mirror for the undefined value.
- *
- * @returns {Mirror} the mirror reflects the undefined value
- */
-function GetUndefinedMirror() {
- return MakeMirror(void 0);
-}
-
-
-/**
- * Inherit the prototype methods from one constructor into another.
- *
- * The Function.prototype.inherits from lang.js rewritten as a standalone
- * function (not on Function.prototype). NOTE: If this file is to be loaded
- * during bootstrapping this function needs to be revritten using some native
- * functions as prototype setup using normal JavaScript does not work as
- * expected during bootstrapping (see mirror.js in r114903).
- *
- * @param {function} ctor Constructor function which needs to inherit the
- * prototype
- * @param {function} superCtor Constructor function to inherit prototype from
- */
-function inherits(ctor, superCtor) {
- var tempCtor = function(){};
- tempCtor.prototype = superCtor.prototype;
- ctor.super_ = superCtor.prototype;
- ctor.prototype = new tempCtor();
- ctor.prototype.constructor = ctor;
-}
-
-
-// Type names of the different mirrors.
-const UNDEFINED_TYPE = 'undefined';
-const NULL_TYPE = 'null';
-const BOOLEAN_TYPE = 'boolean';
-const NUMBER_TYPE = 'number';
-const STRING_TYPE = 'string';
-const OBJECT_TYPE = 'object';
-const FUNCTION_TYPE = 'function';
-const REGEXP_TYPE = 'regexp';
-const ERROR_TYPE = 'error';
-const PROPERTY_TYPE = 'property';
-const FRAME_TYPE = 'frame';
-const SCRIPT_TYPE = 'script';
-const CONTEXT_TYPE = 'context';
-const SCOPE_TYPE = 'scope';
-
-// Maximum length when sending strings through the JSON protocol.
-const kMaxProtocolStringLength = 80;
-
-// Different kind of properties.
-PropertyKind = {};
-PropertyKind.Named = 1;
-PropertyKind.Indexed = 2;
-
-
-// A copy of the PropertyType enum from global.h
-PropertyType = {};
-PropertyType.Normal = 0;
-PropertyType.Field = 1;
-PropertyType.ConstantFunction = 2;
-PropertyType.Callbacks = 3;
-PropertyType.Interceptor = 4;
-PropertyType.MapTransition = 5;
-PropertyType.ExternalArrayTransition = 6;
-PropertyType.ConstantTransition = 7;
-PropertyType.NullDescriptor = 8;
-
-
-// Different attributes for a property.
-PropertyAttribute = {};
-PropertyAttribute.None = NONE;
-PropertyAttribute.ReadOnly = READ_ONLY;
-PropertyAttribute.DontEnum = DONT_ENUM;
-PropertyAttribute.DontDelete = DONT_DELETE;
-
-
-// A copy of the scope types from runtime.cc.
-ScopeType = { Global: 0,
- Local: 1,
- With: 2,
- Closure: 3,
- Catch: 4 };
-
-
-// Mirror hierarchy:
-// - Mirror
-// - ValueMirror
-// - UndefinedMirror
-// - NullMirror
-// - NumberMirror
-// - StringMirror
-// - ObjectMirror
-// - FunctionMirror
-// - UnresolvedFunctionMirror
-// - ArrayMirror
-// - DateMirror
-// - RegExpMirror
-// - ErrorMirror
-// - PropertyMirror
-// - FrameMirror
-// - ScriptMirror
-
-
-/**
- * Base class for all mirror objects.
- * @param {string} type The type of the mirror
- * @constructor
- */
-function Mirror(type) {
- this.type_ = type;
-};
-
-
-Mirror.prototype.type = function() {
- return this.type_;
-};
-
-
-/**
- * Check whether the mirror reflects a value.
- * @returns {boolean} True if the mirror reflects a value.
- */
-Mirror.prototype.isValue = function() {
- return this instanceof ValueMirror;
-}
-
-
-/**
- * Check whether the mirror reflects the undefined value.
- * @returns {boolean} True if the mirror reflects the undefined value.
- */
-Mirror.prototype.isUndefined = function() {
- return this instanceof UndefinedMirror;
-}
-
-
-/**
- * Check whether the mirror reflects the null value.
- * @returns {boolean} True if the mirror reflects the null value
- */
-Mirror.prototype.isNull = function() {
- return this instanceof NullMirror;
-}
-
-
-/**
- * Check whether the mirror reflects a boolean value.
- * @returns {boolean} True if the mirror reflects a boolean value
- */
-Mirror.prototype.isBoolean = function() {
- return this instanceof BooleanMirror;
-}
-
-
-/**
- * Check whether the mirror reflects a number value.
- * @returns {boolean} True if the mirror reflects a number value
- */
-Mirror.prototype.isNumber = function() {
- return this instanceof NumberMirror;
-}
-
-
-/**
- * Check whether the mirror reflects a string value.
- * @returns {boolean} True if the mirror reflects a string value
- */
-Mirror.prototype.isString = function() {
- return this instanceof StringMirror;
-}
-
-
-/**
- * Check whether the mirror reflects an object.
- * @returns {boolean} True if the mirror reflects an object
- */
-Mirror.prototype.isObject = function() {
- return this instanceof ObjectMirror;
-}
-
-
-/**
- * Check whether the mirror reflects a function.
- * @returns {boolean} True if the mirror reflects a function
- */
-Mirror.prototype.isFunction = function() {
- return this instanceof FunctionMirror;
-}
-
-
-/**
- * Check whether the mirror reflects an unresolved function.
- * @returns {boolean} True if the mirror reflects an unresolved function
- */
-Mirror.prototype.isUnresolvedFunction = function() {
- return this instanceof UnresolvedFunctionMirror;
-}
-
-
-/**
- * Check whether the mirror reflects an array.
- * @returns {boolean} True if the mirror reflects an array
- */
-Mirror.prototype.isArray = function() {
- return this instanceof ArrayMirror;
-}
-
-
-/**
- * Check whether the mirror reflects a date.
- * @returns {boolean} True if the mirror reflects a date
- */
-Mirror.prototype.isDate = function() {
- return this instanceof DateMirror;
-}
-
-
-/**
- * Check whether the mirror reflects a regular expression.
- * @returns {boolean} True if the mirror reflects a regular expression
- */
-Mirror.prototype.isRegExp = function() {
- return this instanceof RegExpMirror;
-}
-
-
-/**
- * Check whether the mirror reflects an error.
- * @returns {boolean} True if the mirror reflects an error
- */
-Mirror.prototype.isError = function() {
- return this instanceof ErrorMirror;
-}
-
-
-/**
- * Check whether the mirror reflects a property.
- * @returns {boolean} True if the mirror reflects a property
- */
-Mirror.prototype.isProperty = function() {
- return this instanceof PropertyMirror;
-}
-
-
-/**
- * Check whether the mirror reflects a stack frame.
- * @returns {boolean} True if the mirror reflects a stack frame
- */
-Mirror.prototype.isFrame = function() {
- return this instanceof FrameMirror;
-}
-
-
-/**
- * Check whether the mirror reflects a script.
- * @returns {boolean} True if the mirror reflects a script
- */
-Mirror.prototype.isScript = function() {
- return this instanceof ScriptMirror;
-}
-
-
-/**
- * Check whether the mirror reflects a context.
- * @returns {boolean} True if the mirror reflects a context
- */
-Mirror.prototype.isContext = function() {
- return this instanceof ContextMirror;
-}
-
-
-/**
- * Check whether the mirror reflects a scope.
- * @returns {boolean} True if the mirror reflects a scope
- */
-Mirror.prototype.isScope = function() {
- return this instanceof ScopeMirror;
-}
-
-
-/**
- * Allocate a handle id for this object.
- */
-Mirror.prototype.allocateHandle_ = function() {
- this.handle_ = next_handle_++;
-}
-
-
-/**
- * Allocate a transient handle id for this object. Transient handles are
- * negative.
- */
-Mirror.prototype.allocateTransientHandle_ = function() {
- this.handle_ = next_transient_handle_--;
-}
-
-
-Mirror.prototype.toText = function() {
- // Simpel to text which is used when on specialization in subclass.
- return "#<" + this.constructor.name + ">";
-}
-
-
-/**
- * Base class for all value mirror objects.
- * @param {string} type The type of the mirror
- * @param {value} value The value reflected by this mirror
- * @param {boolean} transient indicate whether this object is transient with a
- * transient handle
- * @constructor
- * @extends Mirror
- */
-function ValueMirror(type, value, transient) {
- %_CallFunction(this, type, Mirror);
- this.value_ = value;
- if (!transient) {
- this.allocateHandle_();
- } else {
- this.allocateTransientHandle_();
- }
-}
-inherits(ValueMirror, Mirror);
-
-
-Mirror.prototype.handle = function() {
- return this.handle_;
-};
-
-
-/**
- * Check whether this is a primitive value.
- * @return {boolean} True if the mirror reflects a primitive value
- */
-ValueMirror.prototype.isPrimitive = function() {
- var type = this.type();
- return type === 'undefined' ||
- type === 'null' ||
- type === 'boolean' ||
- type === 'number' ||
- type === 'string';
-};
-
-
-/**
- * Get the actual value reflected by this mirror.
- * @return {value} The value reflected by this mirror
- */
-ValueMirror.prototype.value = function() {
- return this.value_;
-};
-
-
-/**
- * Mirror object for Undefined.
- * @constructor
- * @extends ValueMirror
- */
-function UndefinedMirror() {
- %_CallFunction(this, UNDEFINED_TYPE, void 0, ValueMirror);
-}
-inherits(UndefinedMirror, ValueMirror);
-
-
-UndefinedMirror.prototype.toText = function() {
- return 'undefined';
-}
-
-
-/**
- * Mirror object for null.
- * @constructor
- * @extends ValueMirror
- */
-function NullMirror() {
- %_CallFunction(this, NULL_TYPE, null, ValueMirror);
-}
-inherits(NullMirror, ValueMirror);
-
-
-NullMirror.prototype.toText = function() {
- return 'null';
-}
-
-
-/**
- * Mirror object for boolean values.
- * @param {boolean} value The boolean value reflected by this mirror
- * @constructor
- * @extends ValueMirror
- */
-function BooleanMirror(value) {
- %_CallFunction(this, BOOLEAN_TYPE, value, ValueMirror);
-}
-inherits(BooleanMirror, ValueMirror);
-
-
-BooleanMirror.prototype.toText = function() {
- return this.value_ ? 'true' : 'false';
-}
-
-
-/**
- * Mirror object for number values.
- * @param {number} value The number value reflected by this mirror
- * @constructor
- * @extends ValueMirror
- */
-function NumberMirror(value) {
- %_CallFunction(this, NUMBER_TYPE, value, ValueMirror);
-}
-inherits(NumberMirror, ValueMirror);
-
-
-NumberMirror.prototype.toText = function() {
- return %NumberToString(this.value_);
-}
-
-
-/**
- * Mirror object for string values.
- * @param {string} value The string value reflected by this mirror
- * @constructor
- * @extends ValueMirror
- */
-function StringMirror(value) {
- %_CallFunction(this, STRING_TYPE, value, ValueMirror);
-}
-inherits(StringMirror, ValueMirror);
-
-
-StringMirror.prototype.length = function() {
- return this.value_.length;
-};
-
-StringMirror.prototype.getTruncatedValue = function(maxLength) {
- if (maxLength != -1 && this.length() > maxLength) {
- return this.value_.substring(0, maxLength) +
- '... (length: ' + this.length() + ')';
- }
- return this.value_;
-}
-
-StringMirror.prototype.toText = function() {
- return this.getTruncatedValue(kMaxProtocolStringLength);
-}
-
-
-/**
- * Mirror object for objects.
- * @param {object} value The object reflected by this mirror
- * @param {boolean} transient indicate whether this object is transient with a
- * transient handle
- * @constructor
- * @extends ValueMirror
- */
-function ObjectMirror(value, type, transient) {
- %_CallFunction(this, type || OBJECT_TYPE, value, transient, ValueMirror);
-}
-inherits(ObjectMirror, ValueMirror);
-
-
-ObjectMirror.prototype.className = function() {
- return %_ClassOf(this.value_);
-};
-
-
-ObjectMirror.prototype.constructorFunction = function() {
- return MakeMirror(%DebugGetProperty(this.value_, 'constructor'));
-};
-
-
-ObjectMirror.prototype.prototypeObject = function() {
- return MakeMirror(%DebugGetProperty(this.value_, 'prototype'));
-};
-
-
-ObjectMirror.prototype.protoObject = function() {
- return MakeMirror(%DebugGetPrototype(this.value_));
-};
-
-
-ObjectMirror.prototype.hasNamedInterceptor = function() {
- // Get information on interceptors for this object.
- var x = %GetInterceptorInfo(this.value_);
- return (x & 2) != 0;
-};
-
-
-ObjectMirror.prototype.hasIndexedInterceptor = function() {
- // Get information on interceptors for this object.
- var x = %GetInterceptorInfo(this.value_);
- return (x & 1) != 0;
-};
-
-
-/**
- * Return the property names for this object.
- * @param {number} kind Indicate whether named, indexed or both kinds of
- * properties are requested
- * @param {number} limit Limit the number of names returend to the specified
- value
- * @return {Array} Property names for this object
- */
-ObjectMirror.prototype.propertyNames = function(kind, limit) {
- // Find kind and limit and allocate array for the result
- kind = kind || PropertyKind.Named | PropertyKind.Indexed;
-
- var propertyNames;
- var elementNames;
- var total = 0;
-
- // Find all the named properties.
- if (kind & PropertyKind.Named) {
- // Get the local property names.
- propertyNames = %GetLocalPropertyNames(this.value_);
- total += propertyNames.length;
-
- // Get names for named interceptor properties if any.
- if (this.hasNamedInterceptor() && (kind & PropertyKind.Named)) {
- var namedInterceptorNames =
- %GetNamedInterceptorPropertyNames(this.value_);
- if (namedInterceptorNames) {
- propertyNames = propertyNames.concat(namedInterceptorNames);
- total += namedInterceptorNames.length;
- }
- }
- }
-
- // Find all the indexed properties.
- if (kind & PropertyKind.Indexed) {
- // Get the local element names.
- elementNames = %GetLocalElementNames(this.value_);
- total += elementNames.length;
-
- // Get names for indexed interceptor properties.
- if (this.hasIndexedInterceptor() && (kind & PropertyKind.Indexed)) {
- var indexedInterceptorNames =
- %GetIndexedInterceptorElementNames(this.value_);
- if (indexedInterceptorNames) {
- elementNames = elementNames.concat(indexedInterceptorNames);
- total += indexedInterceptorNames.length;
- }
- }
- }
- limit = Math.min(limit || total, total);
-
- var names = new Array(limit);
- var index = 0;
-
- // Copy names for named properties.
- if (kind & PropertyKind.Named) {
- for (var i = 0; index < limit && i < propertyNames.length; i++) {
- names[index++] = propertyNames[i];
- }
- }
-
- // Copy names for indexed properties.
- if (kind & PropertyKind.Indexed) {
- for (var i = 0; index < limit && i < elementNames.length; i++) {
- names[index++] = elementNames[i];
- }
- }
-
- return names;
-};
-
-
-/**
- * Return the properties for this object as an array of PropertyMirror objects.
- * @param {number} kind Indicate whether named, indexed or both kinds of
- * properties are requested
- * @param {number} limit Limit the number of properties returend to the
- specified value
- * @return {Array} Property mirrors for this object
- */
-ObjectMirror.prototype.properties = function(kind, limit) {
- var names = this.propertyNames(kind, limit);
- var properties = new Array(names.length);
- for (var i = 0; i < names.length; i++) {
- properties[i] = this.property(names[i]);
- }
-
- return properties;
-};
-
-
-ObjectMirror.prototype.property = function(name) {
- var details = %DebugGetPropertyDetails(this.value_, %ToString(name));
- if (details) {
- return new PropertyMirror(this, name, details);
- }
-
- // Nothing found.
- return GetUndefinedMirror();
-};
-
-
-
-/**
- * Try to find a property from its value.
- * @param {Mirror} value The property value to look for
- * @return {PropertyMirror} The property with the specified value. If no
- * property was found with the specified value UndefinedMirror is returned
- */
-ObjectMirror.prototype.lookupProperty = function(value) {
- var properties = this.properties();
-
- // Look for property value in properties.
- for (var i = 0; i < properties.length; i++) {
-
- // Skip properties which are defined through assessors.
- var property = properties[i];
- if (property.propertyType() != PropertyType.Callbacks) {
- if (%_ObjectEquals(property.value_, value.value_)) {
- return property;
- }
- }
- }
-
- // Nothing found.
- return GetUndefinedMirror();
-};
-
-
-/**
- * Returns objects which has direct references to this object
- * @param {number} opt_max_objects Optional parameter specifying the maximum
- * number of referencing objects to return.
- * @return {Array} The objects which has direct references to this object.
- */
-ObjectMirror.prototype.referencedBy = function(opt_max_objects) {
- // Find all objects with direct references to this object.
- var result = %DebugReferencedBy(this.value_,
- Mirror.prototype, opt_max_objects || 0);
-
- // Make mirrors for all the references found.
- for (var i = 0; i < result.length; i++) {
- result[i] = MakeMirror(result[i]);
- }
-
- return result;
-};
-
-
-ObjectMirror.prototype.toText = function() {
- var name;
- var ctor = this.constructorFunction();
- if (!ctor.isFunction()) {
- name = this.className();
- } else {
- name = ctor.name();
- if (!name) {
- name = this.className();
- }
- }
- return '#<' + name + '>';
-};
-
-
-/**
- * Mirror object for functions.
- * @param {function} value The function object reflected by this mirror.
- * @constructor
- * @extends ObjectMirror
- */
-function FunctionMirror(value) {
- %_CallFunction(this, value, FUNCTION_TYPE, ObjectMirror);
- this.resolved_ = true;
-}
-inherits(FunctionMirror, ObjectMirror);
-
-
-/**
- * Returns whether the function is resolved.
- * @return {boolean} True if the function is resolved. Unresolved functions can
- * only originate as functions from stack frames
- */
-FunctionMirror.prototype.resolved = function() {
- return this.resolved_;
-};
-
-
-/**
- * Returns the name of the function.
- * @return {string} Name of the function
- */
-FunctionMirror.prototype.name = function() {
- return %FunctionGetName(this.value_);
-};
-
-
-/**
- * Returns the inferred name of the function.
- * @return {string} Name of the function
- */
-FunctionMirror.prototype.inferredName = function() {
- return %FunctionGetInferredName(this.value_);
-};
-
-
-/**
- * Returns the source code for the function.
- * @return {string or undefined} The source code for the function. If the
- * function is not resolved undefined will be returned.
- */
-FunctionMirror.prototype.source = function() {
- // Return source if function is resolved. Otherwise just fall through to
- // return undefined.
- if (this.resolved()) {
- return builtins.FunctionSourceString(this.value_);
- }
-};
-
-
-/**
- * Returns the script object for the function.
- * @return {ScriptMirror or undefined} Script object for the function or
- * undefined if the function has no script
- */
-FunctionMirror.prototype.script = function() {
- // Return script if function is resolved. Otherwise just fall through
- // to return undefined.
- if (this.resolved()) {
- var script = %FunctionGetScript(this.value_);
- if (script) {
- return MakeMirror(script);
- }
- }
-};
-
-
-/**
- * Returns the script source position for the function. Only makes sense
- * for functions which has a script defined.
- * @return {Number or undefined} in-script position for the function
- */
-FunctionMirror.prototype.sourcePosition_ = function() {
- // Return script if function is resolved. Otherwise just fall through
- // to return undefined.
- if (this.resolved()) {
- return %FunctionGetScriptSourcePosition(this.value_);
- }
-};
-
-
-/**
- * Returns the script source location object for the function. Only makes sense
- * for functions which has a script defined.
- * @return {Location or undefined} in-script location for the function begin
- */
-FunctionMirror.prototype.sourceLocation = function() {
- if (this.resolved() && this.script()) {
- return this.script().locationFromPosition(this.sourcePosition_(),
- true);
- }
-};
-
-
-/**
- * Returns objects constructed by this function.
- * @param {number} opt_max_instances Optional parameter specifying the maximum
- * number of instances to return.
- * @return {Array or undefined} The objects constructed by this function.
- */
-FunctionMirror.prototype.constructedBy = function(opt_max_instances) {
- if (this.resolved()) {
- // Find all objects constructed from this function.
- var result = %DebugConstructedBy(this.value_, opt_max_instances || 0);
-
- // Make mirrors for all the instances found.
- for (var i = 0; i < result.length; i++) {
- result[i] = MakeMirror(result[i]);
- }
-
- return result;
- } else {
- return [];
- }
-};
-
-
-FunctionMirror.prototype.toText = function() {
- return this.source();
-}
-
-
-/**
- * Mirror object for unresolved functions.
- * @param {string} value The name for the unresolved function reflected by this
- * mirror.
- * @constructor
- * @extends ObjectMirror
- */
-function UnresolvedFunctionMirror(value) {
- // Construct this using the ValueMirror as an unresolved function is not a
- // real object but just a string.
- %_CallFunction(this, FUNCTION_TYPE, value, ValueMirror);
- this.propertyCount_ = 0;
- this.elementCount_ = 0;
- this.resolved_ = false;
-}
-inherits(UnresolvedFunctionMirror, FunctionMirror);
-
-
-UnresolvedFunctionMirror.prototype.className = function() {
- return 'Function';
-};
-
-
-UnresolvedFunctionMirror.prototype.constructorFunction = function() {
- return GetUndefinedMirror();
-};
-
-
-UnresolvedFunctionMirror.prototype.prototypeObject = function() {
- return GetUndefinedMirror();
-};
-
-
-UnresolvedFunctionMirror.prototype.protoObject = function() {
- return GetUndefinedMirror();
-};
-
-
-UnresolvedFunctionMirror.prototype.name = function() {
- return this.value_;
-};
-
-
-UnresolvedFunctionMirror.prototype.inferredName = function() {
- return undefined;
-};
-
-
-UnresolvedFunctionMirror.prototype.propertyNames = function(kind, limit) {
- return [];
-}
-
-
-/**
- * Mirror object for arrays.
- * @param {Array} value The Array object reflected by this mirror
- * @constructor
- * @extends ObjectMirror
- */
-function ArrayMirror(value) {
- %_CallFunction(this, value, ObjectMirror);
-}
-inherits(ArrayMirror, ObjectMirror);
-
-
-ArrayMirror.prototype.length = function() {
- return this.value_.length;
-};
-
-
-ArrayMirror.prototype.indexedPropertiesFromRange = function(opt_from_index, opt_to_index) {
- var from_index = opt_from_index || 0;
- var to_index = opt_to_index || this.length() - 1;
- if (from_index > to_index) return new Array();
- var values = new Array(to_index - from_index + 1);
- for (var i = from_index; i <= to_index; i++) {
- var details = %DebugGetPropertyDetails(this.value_, %ToString(i));
- var value;
- if (details) {
- value = new PropertyMirror(this, i, details);
- } else {
- value = GetUndefinedMirror();
- }
- values[i - from_index] = value;
- }
- return values;
-}
-
-
-/**
- * Mirror object for dates.
- * @param {Date} value The Date object reflected by this mirror
- * @constructor
- * @extends ObjectMirror
- */
-function DateMirror(value) {
- %_CallFunction(this, value, ObjectMirror);
-}
-inherits(DateMirror, ObjectMirror);
-
-
-DateMirror.prototype.toText = function() {
- var s = JSON.stringify(this.value_);
- return s.substring(1, s.length - 1); // cut quotes
-}
-
-
-/**
- * Mirror object for regular expressions.
- * @param {RegExp} value The RegExp object reflected by this mirror
- * @constructor
- * @extends ObjectMirror
- */
-function RegExpMirror(value) {
- %_CallFunction(this, value, REGEXP_TYPE, ObjectMirror);
-}
-inherits(RegExpMirror, ObjectMirror);
-
-
-/**
- * Returns the source to the regular expression.
- * @return {string or undefined} The source to the regular expression
- */
-RegExpMirror.prototype.source = function() {
- return this.value_.source;
-};
-
-
-/**
- * Returns whether this regular expression has the global (g) flag set.
- * @return {boolean} Value of the global flag
- */
-RegExpMirror.prototype.global = function() {
- return this.value_.global;
-};
-
-
-/**
- * Returns whether this regular expression has the ignore case (i) flag set.
- * @return {boolean} Value of the ignore case flag
- */
-RegExpMirror.prototype.ignoreCase = function() {
- return this.value_.ignoreCase;
-};
-
-
-/**
- * Returns whether this regular expression has the multiline (m) flag set.
- * @return {boolean} Value of the multiline flag
- */
-RegExpMirror.prototype.multiline = function() {
- return this.value_.multiline;
-};
-
-
-RegExpMirror.prototype.toText = function() {
- // Simpel to text which is used when on specialization in subclass.
- return "/" + this.source() + "/";
-}
-
-
-/**
- * Mirror object for error objects.
- * @param {Error} value The error object reflected by this mirror
- * @constructor
- * @extends ObjectMirror
- */
-function ErrorMirror(value) {
- %_CallFunction(this, value, ERROR_TYPE, ObjectMirror);
-}
-inherits(ErrorMirror, ObjectMirror);
-
-
-/**
- * Returns the message for this eror object.
- * @return {string or undefined} The message for this eror object
- */
-ErrorMirror.prototype.message = function() {
- return this.value_.message;
-};
-
-
-ErrorMirror.prototype.toText = function() {
- // Use the same text representation as in messages.js.
- var text;
- try {
- str = %_CallFunction(this.value_, builtins.errorToString);
- } catch (e) {
- str = '#<Error>';
- }
- return str;
-}
-
-
-/**
- * Base mirror object for properties.
- * @param {ObjectMirror} mirror The mirror object having this property
- * @param {string} name The name of the property
- * @param {Array} details Details about the property
- * @constructor
- * @extends Mirror
- */
-function PropertyMirror(mirror, name, details) {
- %_CallFunction(this, PROPERTY_TYPE, Mirror);
- this.mirror_ = mirror;
- this.name_ = name;
- this.value_ = details[0];
- this.details_ = details[1];
- if (details.length > 2) {
- this.exception_ = details[2]
- this.getter_ = details[3];
- this.setter_ = details[4];
- }
-}
-inherits(PropertyMirror, Mirror);
-
-
-PropertyMirror.prototype.isReadOnly = function() {
- return (this.attributes() & PropertyAttribute.ReadOnly) != 0;
-}
-
-
-PropertyMirror.prototype.isEnum = function() {
- return (this.attributes() & PropertyAttribute.DontEnum) == 0;
-}
-
-
-PropertyMirror.prototype.canDelete = function() {
- return (this.attributes() & PropertyAttribute.DontDelete) == 0;
-}
-
-
-PropertyMirror.prototype.name = function() {
- return this.name_;
-}
-
-
-PropertyMirror.prototype.isIndexed = function() {
- for (var i = 0; i < this.name_.length; i++) {
- if (this.name_[i] < '0' || '9' < this.name_[i]) {
- return false;
- }
- }
- return true;
-}
-
-
-PropertyMirror.prototype.value = function() {
- return MakeMirror(this.value_, false);
-}
-
-
-/**
- * Returns whether this property value is an exception.
- * @return {booolean} True if this property value is an exception
- */
-PropertyMirror.prototype.isException = function() {
- return this.exception_ ? true : false;
-}
-
-
-PropertyMirror.prototype.attributes = function() {
- return %DebugPropertyAttributesFromDetails(this.details_);
-}
-
-
-PropertyMirror.prototype.propertyType = function() {
- return %DebugPropertyTypeFromDetails(this.details_);
-}
-
-
-PropertyMirror.prototype.insertionIndex = function() {
- return %DebugPropertyIndexFromDetails(this.details_);
-}
-
-
-/**
- * Returns whether this property has a getter defined through __defineGetter__.
- * @return {booolean} True if this property has a getter
- */
-PropertyMirror.prototype.hasGetter = function() {
- return this.getter_ ? true : false;
-}
-
-
-/**
- * Returns whether this property has a setter defined through __defineSetter__.
- * @return {booolean} True if this property has a setter
- */
-PropertyMirror.prototype.hasSetter = function() {
- return this.setter_ ? true : false;
-}
-
-
-/**
- * Returns the getter for this property defined through __defineGetter__.
- * @return {Mirror} FunctionMirror reflecting the getter function or
- * UndefinedMirror if there is no getter for this property
- */
-PropertyMirror.prototype.getter = function() {
- if (this.hasGetter()) {
- return MakeMirror(this.getter_);
- } else {
- return GetUndefinedMirror();
- }
-}
-
-
-/**
- * Returns the setter for this property defined through __defineSetter__.
- * @return {Mirror} FunctionMirror reflecting the setter function or
- * UndefinedMirror if there is no setter for this property
- */
-PropertyMirror.prototype.setter = function() {
- if (this.hasSetter()) {
- return MakeMirror(this.setter_);
- } else {
- return GetUndefinedMirror();
- }
-}
-
-
-/**
- * Returns whether this property is natively implemented by the host or a set
- * through JavaScript code.
- * @return {boolean} True if the property is
- * UndefinedMirror if there is no setter for this property
- */
-PropertyMirror.prototype.isNative = function() {
- return (this.propertyType() == PropertyType.Interceptor) ||
- ((this.propertyType() == PropertyType.Callbacks) &&
- !this.hasGetter() && !this.hasSetter());
-}
-
-
-const kFrameDetailsFrameIdIndex = 0;
-const kFrameDetailsReceiverIndex = 1;
-const kFrameDetailsFunctionIndex = 2;
-const kFrameDetailsArgumentCountIndex = 3;
-const kFrameDetailsLocalCountIndex = 4;
-const kFrameDetailsSourcePositionIndex = 5;
-const kFrameDetailsConstructCallIndex = 6;
-const kFrameDetailsAtReturnIndex = 7;
-const kFrameDetailsDebuggerFrameIndex = 8;
-const kFrameDetailsFirstDynamicIndex = 9;
-
-const kFrameDetailsNameIndex = 0;
-const kFrameDetailsValueIndex = 1;
-const kFrameDetailsNameValueSize = 2;
-
-/**
- * Wrapper for the frame details information retreived from the VM. The frame
- * details from the VM is an array with the following content. See runtime.cc
- * Runtime_GetFrameDetails.
- * 0: Id
- * 1: Receiver
- * 2: Function
- * 3: Argument count
- * 4: Local count
- * 5: Source position
- * 6: Construct call
- * 7: Is at return
- * 8: Debugger frame
- * Arguments name, value
- * Locals name, value
- * Return value if any
- * @param {number} break_id Current break id
- * @param {number} index Frame number
- * @constructor
- */
-function FrameDetails(break_id, index) {
- this.break_id_ = break_id;
- this.details_ = %GetFrameDetails(break_id, index);
-}
-
-
-FrameDetails.prototype.frameId = function() {
- %CheckExecutionState(this.break_id_);
- return this.details_[kFrameDetailsFrameIdIndex];
-}
-
-
-FrameDetails.prototype.receiver = function() {
- %CheckExecutionState(this.break_id_);
- return this.details_[kFrameDetailsReceiverIndex];
-}
-
-
-FrameDetails.prototype.func = function() {
- %CheckExecutionState(this.break_id_);
- return this.details_[kFrameDetailsFunctionIndex];
-}
-
-
-FrameDetails.prototype.isConstructCall = function() {
- %CheckExecutionState(this.break_id_);
- return this.details_[kFrameDetailsConstructCallIndex];
-}
-
-
-FrameDetails.prototype.isAtReturn = function() {
- %CheckExecutionState(this.break_id_);
- return this.details_[kFrameDetailsAtReturnIndex];
-}
-
-
-FrameDetails.prototype.isDebuggerFrame = function() {
- %CheckExecutionState(this.break_id_);
- return this.details_[kFrameDetailsDebuggerFrameIndex];
-}
-
-
-FrameDetails.prototype.argumentCount = function() {
- %CheckExecutionState(this.break_id_);
- return this.details_[kFrameDetailsArgumentCountIndex];
-}
-
-
-FrameDetails.prototype.argumentName = function(index) {
- %CheckExecutionState(this.break_id_);
- if (index >= 0 && index < this.argumentCount()) {
- return this.details_[kFrameDetailsFirstDynamicIndex +
- index * kFrameDetailsNameValueSize +
- kFrameDetailsNameIndex]
- }
-}
-
-
-FrameDetails.prototype.argumentValue = function(index) {
- %CheckExecutionState(this.break_id_);
- if (index >= 0 && index < this.argumentCount()) {
- return this.details_[kFrameDetailsFirstDynamicIndex +
- index * kFrameDetailsNameValueSize +
- kFrameDetailsValueIndex]
- }
-}
-
-
-FrameDetails.prototype.localCount = function() {
- %CheckExecutionState(this.break_id_);
- return this.details_[kFrameDetailsLocalCountIndex];
-}
-
-
-FrameDetails.prototype.sourcePosition = function() {
- %CheckExecutionState(this.break_id_);
- return this.details_[kFrameDetailsSourcePositionIndex];
-}
-
-
-FrameDetails.prototype.localName = function(index) {
- %CheckExecutionState(this.break_id_);
- if (index >= 0 && index < this.localCount()) {
- var locals_offset = kFrameDetailsFirstDynamicIndex +
- this.argumentCount() * kFrameDetailsNameValueSize
- return this.details_[locals_offset +
- index * kFrameDetailsNameValueSize +
- kFrameDetailsNameIndex]
- }
-}
-
-
-FrameDetails.prototype.localValue = function(index) {
- %CheckExecutionState(this.break_id_);
- if (index >= 0 && index < this.localCount()) {
- var locals_offset = kFrameDetailsFirstDynamicIndex +
- this.argumentCount() * kFrameDetailsNameValueSize
- return this.details_[locals_offset +
- index * kFrameDetailsNameValueSize +
- kFrameDetailsValueIndex]
- }
-}
-
-
-FrameDetails.prototype.returnValue = function() {
- %CheckExecutionState(this.break_id_);
- var return_value_offset =
- kFrameDetailsFirstDynamicIndex +
- (this.argumentCount() + this.localCount()) * kFrameDetailsNameValueSize;
- if (this.details_[kFrameDetailsAtReturnIndex]) {
- return this.details_[return_value_offset];
- }
-}
-
-
-FrameDetails.prototype.scopeCount = function() {
- return %GetScopeCount(this.break_id_, this.frameId());
-}
-
-
-/**
- * Mirror object for stack frames.
- * @param {number} break_id The break id in the VM for which this frame is
- valid
- * @param {number} index The frame index (top frame is index 0)
- * @constructor
- * @extends Mirror
- */
-function FrameMirror(break_id, index) {
- %_CallFunction(this, FRAME_TYPE, Mirror);
- this.break_id_ = break_id;
- this.index_ = index;
- this.details_ = new FrameDetails(break_id, index);
-}
-inherits(FrameMirror, Mirror);
-
-
-FrameMirror.prototype.index = function() {
- return this.index_;
-};
-
-
-FrameMirror.prototype.func = function() {
- // Get the function for this frame from the VM.
- var f = this.details_.func();
-
- // Create a function mirror. NOTE: MakeMirror cannot be used here as the
- // value returned from the VM might be a string if the function for the
- // frame is unresolved.
- if (IS_FUNCTION(f)) {
- return MakeMirror(f);
- } else {
- return new UnresolvedFunctionMirror(f);
- }
-};
-
-
-FrameMirror.prototype.receiver = function() {
- return MakeMirror(this.details_.receiver());
-};
-
-
-FrameMirror.prototype.isConstructCall = function() {
- return this.details_.isConstructCall();
-};
-
-
-FrameMirror.prototype.isAtReturn = function() {
- return this.details_.isAtReturn();
-};
-
-
-FrameMirror.prototype.isDebuggerFrame = function() {
- return this.details_.isDebuggerFrame();
-};
-
-
-FrameMirror.prototype.argumentCount = function() {
- return this.details_.argumentCount();
-};
-
-
-FrameMirror.prototype.argumentName = function(index) {
- return this.details_.argumentName(index);
-};
-
-
-FrameMirror.prototype.argumentValue = function(index) {
- return MakeMirror(this.details_.argumentValue(index));
-};
-
-
-FrameMirror.prototype.localCount = function() {
- return this.details_.localCount();
-};
-
-
-FrameMirror.prototype.localName = function(index) {
- return this.details_.localName(index);
-};
-
-
-FrameMirror.prototype.localValue = function(index) {
- return MakeMirror(this.details_.localValue(index));
-};
-
-
-FrameMirror.prototype.returnValue = function() {
- return MakeMirror(this.details_.returnValue());
-};
-
-
-FrameMirror.prototype.sourcePosition = function() {
- return this.details_.sourcePosition();
-};
-
-
-FrameMirror.prototype.sourceLocation = function() {
- if (this.func().resolved() && this.func().script()) {
- return this.func().script().locationFromPosition(this.sourcePosition(),
- true);
- }
-};
-
-
-FrameMirror.prototype.sourceLine = function() {
- if (this.func().resolved()) {
- var location = this.sourceLocation();
- if (location) {
- return location.line;
- }
- }
-};
-
-
-FrameMirror.prototype.sourceColumn = function() {
- if (this.func().resolved()) {
- var location = this.sourceLocation();
- if (location) {
- return location.column;
- }
- }
-};
-
-
-FrameMirror.prototype.sourceLineText = function() {
- if (this.func().resolved()) {
- var location = this.sourceLocation();
- if (location) {
- return location.sourceText();
- }
- }
-};
-
-
-FrameMirror.prototype.scopeCount = function() {
- return this.details_.scopeCount();
-};
-
-
-FrameMirror.prototype.scope = function(index) {
- return new ScopeMirror(this, index);
-};
-
-
-FrameMirror.prototype.evaluate = function(source, disable_break, opt_context_object) {
- var result = %DebugEvaluate(this.break_id_, this.details_.frameId(),
- source, Boolean(disable_break), opt_context_object);
- return MakeMirror(result);
-};
-
-
-FrameMirror.prototype.invocationText = function() {
- // Format frame invoaction (receiver, function and arguments).
- var result = '';
- var func = this.func();
- var receiver = this.receiver();
- if (this.isConstructCall()) {
- // For constructor frames display new followed by the function name.
- result += 'new ';
- result += func.name() ? func.name() : '[anonymous]';
- } else if (this.isDebuggerFrame()) {
- result += '[debugger]';
- } else {
- // If the receiver has a className which is 'global' don't display it.
- var display_receiver = !receiver.className || receiver.className() != 'global';
- if (display_receiver) {
- result += receiver.toText();
- }
- // Try to find the function as a property in the receiver. Include the
- // prototype chain in the lookup.
- var property = GetUndefinedMirror();
- if (!receiver.isUndefined()) {
- for (var r = receiver; !r.isNull() && property.isUndefined(); r = r.protoObject()) {
- property = r.lookupProperty(func);
- }
- }
- if (!property.isUndefined()) {
- // The function invoked was found on the receiver. Use the property name
- // for the backtrace.
- if (!property.isIndexed()) {
- if (display_receiver) {
- result += '.';
- }
- result += property.name();
- } else {
- result += '[';
- result += property.name();
- result += ']';
- }
- // Also known as - if the name in the function doesn't match the name
- // under which it was looked up.
- if (func.name() && func.name() != property.name()) {
- result += '(aka ' + func.name() + ')';
- }
- } else {
- // The function invoked was not found on the receiver. Use the function
- // name if available for the backtrace.
- if (display_receiver) {
- result += '.';
- }
- result += func.name() ? func.name() : '[anonymous]';
- }
- }
-
- // Render arguments for normal frames.
- if (!this.isDebuggerFrame()) {
- result += '(';
- for (var i = 0; i < this.argumentCount(); i++) {
- if (i != 0) result += ', ';
- if (this.argumentName(i)) {
- result += this.argumentName(i);
- result += '=';
- }
- result += this.argumentValue(i).toText();
- }
- result += ')';
- }
-
- if (this.isAtReturn()) {
- result += ' returning ';
- result += this.returnValue().toText();
- }
-
- return result;
-}
-
-
-FrameMirror.prototype.sourceAndPositionText = function() {
- // Format source and position.
- var result = '';
- var func = this.func();
- if (func.resolved()) {
- if (func.script()) {
- if (func.script().name()) {
- result += func.script().name();
- } else {
- result += '[unnamed]';
- }
- if (!this.isDebuggerFrame()) {
- var location = this.sourceLocation();
- result += ' line ';
- result += !IS_UNDEFINED(location) ? (location.line + 1) : '?';
- result += ' column ';
- result += !IS_UNDEFINED(location) ? (location.column + 1) : '?';
- if (!IS_UNDEFINED(this.sourcePosition())) {
- result += ' (position ' + (this.sourcePosition() + 1) + ')';
- }
- }
- } else {
- result += '[no source]';
- }
- } else {
- result += '[unresolved]';
- }
-
- return result;
-}
-
-
-FrameMirror.prototype.localsText = function() {
- // Format local variables.
- var result = '';
- var locals_count = this.localCount()
- if (locals_count > 0) {
- for (var i = 0; i < locals_count; ++i) {
- result += ' var ';
- result += this.localName(i);
- result += ' = ';
- result += this.localValue(i).toText();
- if (i < locals_count - 1) result += '\n';
- }
- }
-
- return result;
-}
-
-
-FrameMirror.prototype.toText = function(opt_locals) {
- var result = '';
- result += '#' + (this.index() <= 9 ? '0' : '') + this.index();
- result += ' ';
- result += this.invocationText();
- result += ' ';
- result += this.sourceAndPositionText();
- if (opt_locals) {
- result += '\n';
- result += this.localsText();
- }
- return result;
-}
-
-
-const kScopeDetailsTypeIndex = 0;
-const kScopeDetailsObjectIndex = 1;
-
-function ScopeDetails(frame, index) {
- this.break_id_ = frame.break_id_;
- this.details_ = %GetScopeDetails(frame.break_id_,
- frame.details_.frameId(),
- index);
-}
-
-
-ScopeDetails.prototype.type = function() {
- %CheckExecutionState(this.break_id_);
- return this.details_[kScopeDetailsTypeIndex];
-}
-
-
-ScopeDetails.prototype.object = function() {
- %CheckExecutionState(this.break_id_);
- return this.details_[kScopeDetailsObjectIndex];
-}
-
-
-/**
- * Mirror object for scope.
- * @param {FrameMirror} frame The frame this scope is a part of
- * @param {number} index The scope index in the frame
- * @constructor
- * @extends Mirror
- */
-function ScopeMirror(frame, index) {
- %_CallFunction(this, SCOPE_TYPE, Mirror);
- this.frame_index_ = frame.index_;
- this.scope_index_ = index;
- this.details_ = new ScopeDetails(frame, index);
-}
-inherits(ScopeMirror, Mirror);
-
-
-ScopeMirror.prototype.frameIndex = function() {
- return this.frame_index_;
-};
-
-
-ScopeMirror.prototype.scopeIndex = function() {
- return this.scope_index_;
-};
-
-
-ScopeMirror.prototype.scopeType = function() {
- return this.details_.type();
-};
-
-
-ScopeMirror.prototype.scopeObject = function() {
- // For local and closure scopes create a transient mirror as these objects are
- // created on the fly materializing the local or closure scopes and
- // therefore will not preserve identity.
- var transient = this.scopeType() == ScopeType.Local ||
- this.scopeType() == ScopeType.Closure;
- return MakeMirror(this.details_.object(), transient);
-};
-
-
-/**
- * Mirror object for script source.
- * @param {Script} script The script object
- * @constructor
- * @extends Mirror
- */
-function ScriptMirror(script) {
- %_CallFunction(this, SCRIPT_TYPE, Mirror);
- this.script_ = script;
- this.context_ = new ContextMirror(script.context_data);
- this.allocateHandle_();
-}
-inherits(ScriptMirror, Mirror);
-
-
-ScriptMirror.prototype.value = function() {
- return this.script_;
-};
-
-
-ScriptMirror.prototype.name = function() {
- return this.script_.name || this.script_.nameOrSourceURL();
-};
-
-
-ScriptMirror.prototype.id = function() {
- return this.script_.id;
-};
-
-
-ScriptMirror.prototype.source = function() {
- return this.script_.source;
-};
-
-
-ScriptMirror.prototype.lineOffset = function() {
- return this.script_.line_offset;
-};
-
-
-ScriptMirror.prototype.columnOffset = function() {
- return this.script_.column_offset;
-};
-
-
-ScriptMirror.prototype.data = function() {
- return this.script_.data;
-};
-
-
-ScriptMirror.prototype.scriptType = function() {
- return this.script_.type;
-};
-
-
-ScriptMirror.prototype.compilationType = function() {
- return this.script_.compilation_type;
-};
-
-
-ScriptMirror.prototype.lineCount = function() {
- return this.script_.lineCount();
-};
-
-
-ScriptMirror.prototype.locationFromPosition = function(
- position, include_resource_offset) {
- return this.script_.locationFromPosition(position, include_resource_offset);
-}
-
-
-ScriptMirror.prototype.sourceSlice = function (opt_from_line, opt_to_line) {
- return this.script_.sourceSlice(opt_from_line, opt_to_line);
-}
-
-
-ScriptMirror.prototype.context = function() {
- return this.context_;
-};
-
-
-ScriptMirror.prototype.evalFromScript = function() {
- return MakeMirror(this.script_.eval_from_script);
-};
-
-
-ScriptMirror.prototype.evalFromFunctionName = function() {
- return MakeMirror(this.script_.eval_from_function_name);
-};
-
-
-ScriptMirror.prototype.evalFromLocation = function() {
- var eval_from_script = this.evalFromScript();
- if (!eval_from_script.isUndefined()) {
- var position = this.script_.eval_from_script_position;
- return eval_from_script.locationFromPosition(position, true);
- }
-};
-
-
-ScriptMirror.prototype.toText = function() {
- var result = '';
- result += this.name();
- result += ' (lines: ';
- if (this.lineOffset() > 0) {
- result += this.lineOffset();
- result += '-';
- result += this.lineOffset() + this.lineCount() - 1;
- } else {
- result += this.lineCount();
- }
- result += ')';
- return result;
-}
-
-
-/**
- * Mirror object for context.
- * @param {Object} data The context data
- * @constructor
- * @extends Mirror
- */
-function ContextMirror(data) {
- %_CallFunction(this, CONTEXT_TYPE, Mirror);
- this.data_ = data;
- this.allocateHandle_();
-}
-inherits(ContextMirror, Mirror);
-
-
-ContextMirror.prototype.data = function() {
- return this.data_;
-};
-
-
-/**
- * Returns a mirror serializer
- *
- * @param {boolean} details Set to true to include details
- * @param {Object} options Options comtrolling the serialization
- * The following options can be set:
- * includeSource: include ths full source of scripts
- * @returns {MirrorSerializer} mirror serializer
- */
-function MakeMirrorSerializer(details, options) {
- return new JSONProtocolSerializer(details, options);
-}
-
-
-/**
- * Object for serializing a mirror objects and its direct references.
- * @param {boolean} details Indicates whether to include details for the mirror
- * serialized
- * @constructor
- */
-function JSONProtocolSerializer(details, options) {
- this.details_ = details;
- this.options_ = options;
- this.mirrors_ = [ ];
-}
-
-
-/**
- * Returns a serialization of an object reference. The referenced object are
- * added to the serialization state.
- *
- * @param {Mirror} mirror The mirror to serialize
- * @returns {String} JSON serialization
- */
-JSONProtocolSerializer.prototype.serializeReference = function(mirror) {
- return this.serialize_(mirror, true, true);
-}
-
-
-/**
- * Returns a serialization of an object value. The referenced objects are
- * added to the serialization state.
- *
- * @param {Mirror} mirror The mirror to serialize
- * @returns {String} JSON serialization
- */
-JSONProtocolSerializer.prototype.serializeValue = function(mirror) {
- var json = this.serialize_(mirror, false, true);
- return json;
-}
-
-
-/**
- * Returns a serialization of all the objects referenced.
- *
- * @param {Mirror} mirror The mirror to serialize.
- * @returns {Array.<Object>} Array of the referenced objects converted to
- * protcol objects.
- */
-JSONProtocolSerializer.prototype.serializeReferencedObjects = function() {
- // Collect the protocol representation of the referenced objects in an array.
- var content = [];
-
- // Get the number of referenced objects.
- var count = this.mirrors_.length;
-
- for (var i = 0; i < count; i++) {
- content.push(this.serialize_(this.mirrors_[i], false, false));
- }
-
- return content;
-}
-
-
-JSONProtocolSerializer.prototype.includeSource_ = function() {
- return this.options_ && this.options_.includeSource;
-}
-
-
-JSONProtocolSerializer.prototype.inlineRefs_ = function() {
- return this.options_ && this.options_.inlineRefs;
-}
-
-
-JSONProtocolSerializer.prototype.maxStringLength_ = function() {
- if (IS_UNDEFINED(this.options_) ||
- IS_UNDEFINED(this.options_.maxStringLength)) {
- return kMaxProtocolStringLength;
- }
- return this.options_.maxStringLength;
-}
-
-
-JSONProtocolSerializer.prototype.add_ = function(mirror) {
- // If this mirror is already in the list just return.
- for (var i = 0; i < this.mirrors_.length; i++) {
- if (this.mirrors_[i] === mirror) {
- return;
- }
- }
-
- // Add the mirror to the list of mirrors to be serialized.
- this.mirrors_.push(mirror);
-}
-
-
-/**
- * Formats mirror object to protocol reference object with some data that can
- * be used to display the value in debugger.
- * @param {Mirror} mirror Mirror to serialize.
- * @return {Object} Protocol reference object.
- */
-JSONProtocolSerializer.prototype.serializeReferenceWithDisplayData_ =
- function(mirror) {
- var o = {};
- o.ref = mirror.handle();
- o.type = mirror.type();
- switch (mirror.type()) {
- case UNDEFINED_TYPE:
- case NULL_TYPE:
- case BOOLEAN_TYPE:
- case NUMBER_TYPE:
- o.value = mirror.value();
- break;
- case STRING_TYPE:
- o.value = mirror.getTruncatedValue(this.maxStringLength_());
- break;
- case FUNCTION_TYPE:
- o.name = mirror.name();
- o.inferredName = mirror.inferredName();
- if (mirror.script()) {
- o.scriptId = mirror.script().id();
- }
- break;
- case ERROR_TYPE:
- case REGEXP_TYPE:
- o.value = mirror.toText();
- break;
- case OBJECT_TYPE:
- o.className = mirror.className();
- break;
- }
- return o;
-};
-
-
-JSONProtocolSerializer.prototype.serialize_ = function(mirror, reference,
- details) {
- // If serializing a reference to a mirror just return the reference and add
- // the mirror to the referenced mirrors.
- if (reference &&
- (mirror.isValue() || mirror.isScript() || mirror.isContext())) {
- if (this.inlineRefs_() && mirror.isValue()) {
- return this.serializeReferenceWithDisplayData_(mirror);
- } else {
- this.add_(mirror);
- return {'ref' : mirror.handle()};
- }
- }
-
- // Collect the JSON property/value pairs.
- var content = {};
-
- // Add the mirror handle.
- if (mirror.isValue() || mirror.isScript() || mirror.isContext()) {
- content.handle = mirror.handle();
- }
-
- // Always add the type.
- content.type = mirror.type();
-
- switch (mirror.type()) {
- case UNDEFINED_TYPE:
- case NULL_TYPE:
- // Undefined and null are represented just by their type.
- break;
-
- case BOOLEAN_TYPE:
- // Boolean values are simply represented by their value.
- content.value = mirror.value();
- break;
-
- case NUMBER_TYPE:
- // Number values are simply represented by their value.
- content.value = NumberToJSON_(mirror.value());
- break;
-
- case STRING_TYPE:
- // String values might have their value cropped to keep down size.
- if (this.maxStringLength_() != -1 &&
- mirror.length() > this.maxStringLength_()) {
- var str = mirror.getTruncatedValue(this.maxStringLength_());
- content.value = str;
- content.fromIndex = 0;
- content.toIndex = this.maxStringLength_();
- } else {
- content.value = mirror.value();
- }
- content.length = mirror.length();
- break;
-
- case OBJECT_TYPE:
- case FUNCTION_TYPE:
- case ERROR_TYPE:
- case REGEXP_TYPE:
- // Add object representation.
- this.serializeObject_(mirror, content, details);
- break;
-
- case PROPERTY_TYPE:
- throw new Error('PropertyMirror cannot be serialized independeltly')
- break;
-
- case FRAME_TYPE:
- // Add object representation.
- this.serializeFrame_(mirror, content);
- break;
-
- case SCOPE_TYPE:
- // Add object representation.
- this.serializeScope_(mirror, content);
- break;
-
- case SCRIPT_TYPE:
- // Script is represented by id, name and source attributes.
- if (mirror.name()) {
- content.name = mirror.name();
- }
- content.id = mirror.id();
- content.lineOffset = mirror.lineOffset();
- content.columnOffset = mirror.columnOffset();
- content.lineCount = mirror.lineCount();
- if (mirror.data()) {
- content.data = mirror.data();
- }
- if (this.includeSource_()) {
- content.source = mirror.source();
- } else {
- var sourceStart = mirror.source().substring(0, 80);
- content.sourceStart = sourceStart;
- }
- content.sourceLength = mirror.source().length;
- content.scriptType = mirror.scriptType();
- content.compilationType = mirror.compilationType();
- // For compilation type eval emit information on the script from which
- // eval was called if a script is present.
- if (mirror.compilationType() == 1 &&
- mirror.evalFromScript()) {
- content.evalFromScript =
- this.serializeReference(mirror.evalFromScript());
- var evalFromLocation = mirror.evalFromLocation()
- if (evalFromLocation) {
- content.evalFromLocation = { line: evalFromLocation.line,
- column: evalFromLocation.column };
- }
- if (mirror.evalFromFunctionName()) {
- content.evalFromFunctionName = mirror.evalFromFunctionName();
- }
- }
- if (mirror.context()) {
- content.context = this.serializeReference(mirror.context());
- }
- break;
-
- case CONTEXT_TYPE:
- content.data = mirror.data();
- break;
- }
-
- // Always add the text representation.
- content.text = mirror.toText();
-
- // Create and return the JSON string.
- return content;
-}
-
-
-/**
- * Serialize object information to the following JSON format.
- *
- * {"className":"<class name>",
- * "constructorFunction":{"ref":<number>},
- * "protoObject":{"ref":<number>},
- * "prototypeObject":{"ref":<number>},
- * "namedInterceptor":<boolean>,
- * "indexedInterceptor":<boolean>,
- * "properties":[<properties>]}
- */
-JSONProtocolSerializer.prototype.serializeObject_ = function(mirror, content,
- details) {
- // Add general object properties.
- content.className = mirror.className();
- content.constructorFunction =
- this.serializeReference(mirror.constructorFunction());
- content.protoObject = this.serializeReference(mirror.protoObject());
- content.prototypeObject = this.serializeReference(mirror.prototypeObject());
-
- // Add flags to indicate whether there are interceptors.
- if (mirror.hasNamedInterceptor()) {
- content.namedInterceptor = true;
- }
- if (mirror.hasIndexedInterceptor()) {
- content.indexedInterceptor = true;
- }
-
- // Add function specific properties.
- if (mirror.isFunction()) {
- // Add function specific properties.
- content.name = mirror.name();
- if (!IS_UNDEFINED(mirror.inferredName())) {
- content.inferredName = mirror.inferredName();
- }
- content.resolved = mirror.resolved();
- if (mirror.resolved()) {
- content.source = mirror.source();
- }
- if (mirror.script()) {
- content.script = this.serializeReference(mirror.script());
- content.scriptId = mirror.script().id();
-
- serializeLocationFields(mirror.sourceLocation(), content);
- }
- }
-
- // Add date specific properties.
- if (mirror.isDate()) {
- // Add date specific properties.
- content.value = mirror.value();
- }
-
- // Add actual properties - named properties followed by indexed properties.
- var propertyNames = mirror.propertyNames(PropertyKind.Named);
- var propertyIndexes = mirror.propertyNames(PropertyKind.Indexed);
- var p = new Array(propertyNames.length + propertyIndexes.length);
- for (var i = 0; i < propertyNames.length; i++) {
- var propertyMirror = mirror.property(propertyNames[i]);
- p[i] = this.serializeProperty_(propertyMirror);
- if (details) {
- this.add_(propertyMirror.value());
- }
- }
- for (var i = 0; i < propertyIndexes.length; i++) {
- var propertyMirror = mirror.property(propertyIndexes[i]);
- p[propertyNames.length + i] = this.serializeProperty_(propertyMirror);
- if (details) {
- this.add_(propertyMirror.value());
- }
- }
- content.properties = p;
-}
-
-
-/**
- * Serialize location information to the following JSON format:
- *
- * "position":"<position>",
- * "line":"<line>",
- * "column":"<column>",
- *
- * @param {SourceLocation} location The location to serialize, may be undefined.
- */
-function serializeLocationFields (location, content) {
- if (!location) {
- return;
- }
- content.position = location.position;
- var line = location.line;
- if (!IS_UNDEFINED(line)) {
- content.line = line;
- }
- var column = location.column;
- if (!IS_UNDEFINED(column)) {
- content.column = column;
- }
-}
-
-
-/**
- * Serialize property information to the following JSON format for building the
- * array of properties.
- *
- * {"name":"<property name>",
- * "attributes":<number>,
- * "propertyType":<number>,
- * "ref":<number>}
- *
- * If the attribute for the property is PropertyAttribute.None it is not added.
- * If the propertyType for the property is PropertyType.Normal it is not added.
- * Here are a couple of examples.
- *
- * {"name":"hello","ref":1}
- * {"name":"length","attributes":7,"propertyType":3,"ref":2}
- *
- * @param {PropertyMirror} propertyMirror The property to serialize.
- * @returns {Object} Protocol object representing the property.
- */
-JSONProtocolSerializer.prototype.serializeProperty_ = function(propertyMirror) {
- var result = {};
-
- result.name = propertyMirror.name();
- var propertyValue = propertyMirror.value();
- if (this.inlineRefs_() && propertyValue.isValue()) {
- result.value = this.serializeReferenceWithDisplayData_(propertyValue);
- } else {
- if (propertyMirror.attributes() != PropertyAttribute.None) {
- result.attributes = propertyMirror.attributes();
- }
- if (propertyMirror.propertyType() != PropertyType.Normal) {
- result.propertyType = propertyMirror.propertyType();
- }
- result.ref = propertyValue.handle();
- }
- return result;
-}
-
-
-JSONProtocolSerializer.prototype.serializeFrame_ = function(mirror, content) {
- content.index = mirror.index();
- content.receiver = this.serializeReference(mirror.receiver());
- var func = mirror.func();
- content.func = this.serializeReference(func);
- if (func.script()) {
- content.script = this.serializeReference(func.script());
- }
- content.constructCall = mirror.isConstructCall();
- content.atReturn = mirror.isAtReturn();
- if (mirror.isAtReturn()) {
- content.returnValue = this.serializeReference(mirror.returnValue());
- }
- content.debuggerFrame = mirror.isDebuggerFrame();
- var x = new Array(mirror.argumentCount());
- for (var i = 0; i < mirror.argumentCount(); i++) {
- var arg = {};
- var argument_name = mirror.argumentName(i)
- if (argument_name) {
- arg.name = argument_name;
- }
- arg.value = this.serializeReference(mirror.argumentValue(i));
- x[i] = arg;
- }
- content.arguments = x;
- var x = new Array(mirror.localCount());
- for (var i = 0; i < mirror.localCount(); i++) {
- var local = {};
- local.name = mirror.localName(i);
- local.value = this.serializeReference(mirror.localValue(i));
- x[i] = local;
- }
- content.locals = x;
- serializeLocationFields(mirror.sourceLocation(), content);
- var source_line_text = mirror.sourceLineText();
- if (!IS_UNDEFINED(source_line_text)) {
- content.sourceLineText = source_line_text;
- }
-
- content.scopes = [];
- for (var i = 0; i < mirror.scopeCount(); i++) {
- var scope = mirror.scope(i);
- content.scopes.push({
- type: scope.scopeType(),
- index: i
- });
- }
-}
-
-
-JSONProtocolSerializer.prototype.serializeScope_ = function(mirror, content) {
- content.index = mirror.scopeIndex();
- content.frameIndex = mirror.frameIndex();
- content.type = mirror.scopeType();
- content.object = this.inlineRefs_() ?
- this.serializeValue(mirror.scopeObject()) :
- this.serializeReference(mirror.scopeObject());
-}
-
-
-/**
- * Convert a number to a protocol value. For all finite numbers the number
- * itself is returned. For non finite numbers NaN, Infinite and
- * -Infinite the string representation "NaN", "Infinite" or "-Infinite"
- * (not including the quotes) is returned.
- *
- * @param {number} value The number value to convert to a protocol value.
- * @returns {number|string} Protocol value.
- */
-function NumberToJSON_(value) {
- if (isNaN(value)) {
- return 'NaN';
- }
- if (!NUMBER_IS_FINITE(value)) {
- if (value > 0) {
- return 'Infinity';
- } else {
- return '-Infinity';
- }
- }
- return value;
-}
diff --git a/src/3rdparty/v8/src/mksnapshot.cc b/src/3rdparty/v8/src/mksnapshot.cc
deleted file mode 100644
index 6ecbc8c..0000000
--- a/src/3rdparty/v8/src/mksnapshot.cc
+++ /dev/null
@@ -1,256 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include <signal.h>
-#include <string>
-#include <map>
-
-#include "v8.h"
-
-#include "bootstrapper.h"
-#include "natives.h"
-#include "platform.h"
-#include "serialize.h"
-#include "list.h"
-
-// use explicit namespace to avoid clashing with types in namespace v8
-namespace i = v8::internal;
-using namespace v8;
-
-static const unsigned int kMaxCounters = 256;
-
-// A single counter in a counter collection.
-class Counter {
- public:
- static const int kMaxNameSize = 64;
- int32_t* Bind(const char* name) {
- int i;
- for (i = 0; i < kMaxNameSize - 1 && name[i]; i++) {
- name_[i] = name[i];
- }
- name_[i] = '\0';
- return &counter_;
- }
- private:
- int32_t counter_;
- uint8_t name_[kMaxNameSize];
-};
-
-
-// A set of counters and associated information. An instance of this
-// class is stored directly in the memory-mapped counters file if
-// the --save-counters options is used
-class CounterCollection {
- public:
- CounterCollection() {
- magic_number_ = 0xDEADFACE;
- max_counters_ = kMaxCounters;
- max_name_size_ = Counter::kMaxNameSize;
- counters_in_use_ = 0;
- }
- Counter* GetNextCounter() {
- if (counters_in_use_ == kMaxCounters) return NULL;
- return &counters_[counters_in_use_++];
- }
- private:
- uint32_t magic_number_;
- uint32_t max_counters_;
- uint32_t max_name_size_;
- uint32_t counters_in_use_;
- Counter counters_[kMaxCounters];
-};
-
-
-// We statically allocate a set of local counters to be used if we
-// don't want to store the stats in a memory-mapped file
-static CounterCollection local_counters;
-
-
-typedef std::map<std::string, int*> CounterMap;
-typedef std::map<std::string, int*>::iterator CounterMapIterator;
-static CounterMap counter_table_;
-
-
-class CppByteSink : public i::SnapshotByteSink {
- public:
- explicit CppByteSink(const char* snapshot_file)
- : bytes_written_(0),
- partial_sink_(this) {
- fp_ = i::OS::FOpen(snapshot_file, "wb");
- if (fp_ == NULL) {
- i::PrintF("Unable to write to snapshot file \"%s\"\n", snapshot_file);
- exit(1);
- }
- fprintf(fp_, "// Autogenerated snapshot file. Do not edit.\n\n");
- fprintf(fp_, "#include \"v8.h\"\n");
- fprintf(fp_, "#include \"platform.h\"\n\n");
- fprintf(fp_, "#include \"snapshot.h\"\n\n");
- fprintf(fp_, "namespace v8 {\nnamespace internal {\n\n");
- fprintf(fp_, "const byte Snapshot::data_[] = {");
- }
-
- virtual ~CppByteSink() {
- fprintf(fp_, "const int Snapshot::size_ = %d;\n\n", bytes_written_);
- fprintf(fp_, "} } // namespace v8::internal\n");
- fclose(fp_);
- }
-
- void WriteSpaceUsed(
- int new_space_used,
- int pointer_space_used,
- int data_space_used,
- int code_space_used,
- int map_space_used,
- int cell_space_used,
- int large_space_used) {
- fprintf(fp_, "};\n\n");
- fprintf(fp_, "const int Snapshot::new_space_used_ = %d;\n", new_space_used);
- fprintf(fp_,
- "const int Snapshot::pointer_space_used_ = %d;\n",
- pointer_space_used);
- fprintf(fp_,
- "const int Snapshot::data_space_used_ = %d;\n",
- data_space_used);
- fprintf(fp_,
- "const int Snapshot::code_space_used_ = %d;\n",
- code_space_used);
- fprintf(fp_, "const int Snapshot::map_space_used_ = %d;\n", map_space_used);
- fprintf(fp_,
- "const int Snapshot::cell_space_used_ = %d;\n",
- cell_space_used);
- fprintf(fp_,
- "const int Snapshot::large_space_used_ = %d;\n",
- large_space_used);
- }
-
- void WritePartialSnapshot() {
- int length = partial_sink_.Position();
- fprintf(fp_, "};\n\n");
- fprintf(fp_, "const int Snapshot::context_size_ = %d;\n", length);
- fprintf(fp_, "const byte Snapshot::context_data_[] = {\n");
- for (int j = 0; j < length; j++) {
- if ((j & 0x1f) == 0x1f) {
- fprintf(fp_, "\n");
- }
- char byte = partial_sink_.at(j);
- if (j != 0) {
- fprintf(fp_, ",");
- }
- fprintf(fp_, "%d", byte);
- }
- }
-
- virtual void Put(int byte, const char* description) {
- if (bytes_written_ != 0) {
- fprintf(fp_, ",");
- }
- fprintf(fp_, "%d", byte);
- bytes_written_++;
- if ((bytes_written_ & 0x1f) == 0) {
- fprintf(fp_, "\n");
- }
- }
-
- virtual int Position() {
- return bytes_written_;
- }
-
- i::SnapshotByteSink* partial_sink() { return &partial_sink_; }
-
- class PartialSnapshotSink : public i::SnapshotByteSink {
- public:
- explicit PartialSnapshotSink(CppByteSink* parent)
- : parent_(parent),
- data_() { }
- virtual ~PartialSnapshotSink() { data_.Free(); }
- virtual void Put(int byte, const char* description) {
- data_.Add(byte);
- }
- virtual int Position() { return data_.length(); }
- char at(int i) { return data_[i]; }
- private:
- CppByteSink* parent_;
- i::List<char> data_;
- };
-
- private:
- FILE* fp_;
- int bytes_written_;
- PartialSnapshotSink partial_sink_;
-};
-
-
-int main(int argc, char** argv) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- // By default, log code create information in the snapshot.
- i::FLAG_log_code = true;
-#endif
- // Print the usage if an error occurs when parsing the command line
- // flags or if the help flag is set.
- int result = i::FlagList::SetFlagsFromCommandLine(&argc, argv, true);
- if (result > 0 || argc != 2 || i::FLAG_help) {
- ::printf("Usage: %s [flag] ... outfile\n", argv[0]);
- i::FlagList::PrintHelp();
- return !i::FLAG_help;
- }
- i::Serializer::Enable();
- Persistent<Context> context = v8::Context::New();
- ASSERT(!context.IsEmpty());
- // Make sure all builtin scripts are cached.
- { HandleScope scope;
- for (int i = 0; i < i::Natives::GetBuiltinsCount(); i++) {
- i::Isolate::Current()->bootstrapper()->NativesSourceLookup(i);
- }
- }
- // If we don't do this then we end up with a stray root pointing at the
- // context even after we have disposed of the context.
- HEAP->CollectAllGarbage(true);
- i::Object* raw_context = *(v8::Utils::OpenHandle(*context));
- context.Dispose();
- CppByteSink sink(argv[1]);
- // This results in a somewhat smaller snapshot, probably because it gets rid
- // of some things that are cached between garbage collections.
- i::StartupSerializer ser(&sink);
- ser.SerializeStrongReferences();
-
- i::PartialSerializer partial_ser(&ser, sink.partial_sink());
- partial_ser.Serialize(&raw_context);
-
- ser.SerializeWeakReferences();
-
- sink.WritePartialSnapshot();
-
- sink.WriteSpaceUsed(
- partial_ser.CurrentAllocationAddress(i::NEW_SPACE),
- partial_ser.CurrentAllocationAddress(i::OLD_POINTER_SPACE),
- partial_ser.CurrentAllocationAddress(i::OLD_DATA_SPACE),
- partial_ser.CurrentAllocationAddress(i::CODE_SPACE),
- partial_ser.CurrentAllocationAddress(i::MAP_SPACE),
- partial_ser.CurrentAllocationAddress(i::CELL_SPACE),
- partial_ser.CurrentAllocationAddress(i::LO_SPACE));
- return 0;
-}
diff --git a/src/3rdparty/v8/src/natives.h b/src/3rdparty/v8/src/natives.h
deleted file mode 100644
index 639a2d3..0000000
--- a/src/3rdparty/v8/src/natives.h
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_NATIVES_H_
-#define V8_NATIVES_H_
-
-namespace v8 {
-namespace internal {
-
-typedef bool (*NativeSourceCallback)(Vector<const char> name,
- Vector<const char> source,
- int index);
-
-enum NativeType {
- CORE, D8
-};
-
-template <NativeType type>
-class NativesCollection {
- public:
- // Number of built-in scripts.
- static int GetBuiltinsCount();
- // Number of debugger implementation scripts.
- static int GetDebuggerCount();
-
- // These are used to access built-in scripts. The debugger implementation
- // scripts have an index in the interval [0, GetDebuggerCount()). The
- // non-debugger scripts have an index in the interval [GetDebuggerCount(),
- // GetNativesCount()).
- static int GetIndex(const char* name);
- static Vector<const char> GetScriptSource(int index);
- static Vector<const char> GetScriptName(int index);
-};
-
-typedef NativesCollection<CORE> Natives;
-
-} } // namespace v8::internal
-
-#endif // V8_NATIVES_H_
diff --git a/src/3rdparty/v8/src/objects-debug.cc b/src/3rdparty/v8/src/objects-debug.cc
deleted file mode 100644
index dd606dc..0000000
--- a/src/3rdparty/v8/src/objects-debug.cc
+++ /dev/null
@@ -1,722 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "disassembler.h"
-#include "disasm.h"
-#include "jsregexp.h"
-#include "objects-visiting.h"
-
-namespace v8 {
-namespace internal {
-
-#ifdef DEBUG
-
-void MaybeObject::Verify() {
- Object* this_as_object;
- if (ToObject(&this_as_object)) {
- if (this_as_object->IsSmi()) {
- Smi::cast(this_as_object)->SmiVerify();
- } else {
- HeapObject::cast(this_as_object)->HeapObjectVerify();
- }
- } else {
- Failure::cast(this)->FailureVerify();
- }
-}
-
-
-void Object::VerifyPointer(Object* p) {
- if (p->IsHeapObject()) {
- HeapObject::VerifyHeapPointer(p);
- } else {
- ASSERT(p->IsSmi());
- }
-}
-
-
-void Smi::SmiVerify() {
- ASSERT(IsSmi());
-}
-
-
-void Failure::FailureVerify() {
- ASSERT(IsFailure());
-}
-
-
-void HeapObject::HeapObjectVerify() {
- InstanceType instance_type = map()->instance_type();
-
- if (instance_type < FIRST_NONSTRING_TYPE) {
- String::cast(this)->StringVerify();
- return;
- }
-
- switch (instance_type) {
- case MAP_TYPE:
- Map::cast(this)->MapVerify();
- break;
- case HEAP_NUMBER_TYPE:
- HeapNumber::cast(this)->HeapNumberVerify();
- break;
- case FIXED_ARRAY_TYPE:
- FixedArray::cast(this)->FixedArrayVerify();
- break;
- case BYTE_ARRAY_TYPE:
- ByteArray::cast(this)->ByteArrayVerify();
- break;
- case EXTERNAL_PIXEL_ARRAY_TYPE:
- ExternalPixelArray::cast(this)->ExternalPixelArrayVerify();
- break;
- case EXTERNAL_BYTE_ARRAY_TYPE:
- ExternalByteArray::cast(this)->ExternalByteArrayVerify();
- break;
- case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
- ExternalUnsignedByteArray::cast(this)->ExternalUnsignedByteArrayVerify();
- break;
- case EXTERNAL_SHORT_ARRAY_TYPE:
- ExternalShortArray::cast(this)->ExternalShortArrayVerify();
- break;
- case EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE:
- ExternalUnsignedShortArray::cast(this)->
- ExternalUnsignedShortArrayVerify();
- break;
- case EXTERNAL_INT_ARRAY_TYPE:
- ExternalIntArray::cast(this)->ExternalIntArrayVerify();
- break;
- case EXTERNAL_UNSIGNED_INT_ARRAY_TYPE:
- ExternalUnsignedIntArray::cast(this)->ExternalUnsignedIntArrayVerify();
- break;
- case EXTERNAL_FLOAT_ARRAY_TYPE:
- ExternalFloatArray::cast(this)->ExternalFloatArrayVerify();
- break;
- case CODE_TYPE:
- Code::cast(this)->CodeVerify();
- break;
- case ODDBALL_TYPE:
- Oddball::cast(this)->OddballVerify();
- break;
- case JS_OBJECT_TYPE:
- case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
- JSObject::cast(this)->JSObjectVerify();
- break;
- case JS_VALUE_TYPE:
- JSValue::cast(this)->JSValueVerify();
- break;
- case JS_FUNCTION_TYPE:
- JSFunction::cast(this)->JSFunctionVerify();
- break;
- case JS_GLOBAL_PROXY_TYPE:
- JSGlobalProxy::cast(this)->JSGlobalProxyVerify();
- break;
- case JS_GLOBAL_OBJECT_TYPE:
- JSGlobalObject::cast(this)->JSGlobalObjectVerify();
- break;
- case JS_BUILTINS_OBJECT_TYPE:
- JSBuiltinsObject::cast(this)->JSBuiltinsObjectVerify();
- break;
- case JS_GLOBAL_PROPERTY_CELL_TYPE:
- JSGlobalPropertyCell::cast(this)->JSGlobalPropertyCellVerify();
- break;
- case JS_ARRAY_TYPE:
- JSArray::cast(this)->JSArrayVerify();
- break;
- case JS_REGEXP_TYPE:
- JSRegExp::cast(this)->JSRegExpVerify();
- break;
- case FILLER_TYPE:
- break;
- case PROXY_TYPE:
- Proxy::cast(this)->ProxyVerify();
- break;
- case SHARED_FUNCTION_INFO_TYPE:
- SharedFunctionInfo::cast(this)->SharedFunctionInfoVerify();
- break;
- case JS_MESSAGE_OBJECT_TYPE:
- JSMessageObject::cast(this)->JSMessageObjectVerify();
- break;
-
-#define MAKE_STRUCT_CASE(NAME, Name, name) \
- case NAME##_TYPE: \
- Name::cast(this)->Name##Verify(); \
- break;
- STRUCT_LIST(MAKE_STRUCT_CASE)
-#undef MAKE_STRUCT_CASE
-
- default:
- UNREACHABLE();
- break;
- }
-}
-
-
-void HeapObject::VerifyHeapPointer(Object* p) {
- ASSERT(p->IsHeapObject());
- ASSERT(HEAP->Contains(HeapObject::cast(p)));
-}
-
-
-void HeapNumber::HeapNumberVerify() {
- ASSERT(IsHeapNumber());
-}
-
-
-void ByteArray::ByteArrayVerify() {
- ASSERT(IsByteArray());
-}
-
-
-void ExternalPixelArray::ExternalPixelArrayVerify() {
- ASSERT(IsExternalPixelArray());
-}
-
-
-void ExternalByteArray::ExternalByteArrayVerify() {
- ASSERT(IsExternalByteArray());
-}
-
-
-void ExternalUnsignedByteArray::ExternalUnsignedByteArrayVerify() {
- ASSERT(IsExternalUnsignedByteArray());
-}
-
-
-void ExternalShortArray::ExternalShortArrayVerify() {
- ASSERT(IsExternalShortArray());
-}
-
-
-void ExternalUnsignedShortArray::ExternalUnsignedShortArrayVerify() {
- ASSERT(IsExternalUnsignedShortArray());
-}
-
-
-void ExternalIntArray::ExternalIntArrayVerify() {
- ASSERT(IsExternalIntArray());
-}
-
-
-void ExternalUnsignedIntArray::ExternalUnsignedIntArrayVerify() {
- ASSERT(IsExternalUnsignedIntArray());
-}
-
-
-void ExternalFloatArray::ExternalFloatArrayVerify() {
- ASSERT(IsExternalFloatArray());
-}
-
-
-void JSObject::JSObjectVerify() {
- VerifyHeapPointer(properties());
- VerifyHeapPointer(elements());
- if (HasFastProperties()) {
- CHECK_EQ(map()->unused_property_fields(),
- (map()->inobject_properties() + properties()->length() -
- map()->NextFreePropertyIndex()));
- }
- ASSERT(map()->has_fast_elements() ==
- (elements()->map() == GetHeap()->fixed_array_map() ||
- elements()->map() == GetHeap()->fixed_cow_array_map()));
- ASSERT(map()->has_fast_elements() == HasFastElements());
-}
-
-
-void Map::MapVerify() {
- ASSERT(!HEAP->InNewSpace(this));
- ASSERT(FIRST_TYPE <= instance_type() && instance_type() <= LAST_TYPE);
- ASSERT(instance_size() == kVariableSizeSentinel ||
- (kPointerSize <= instance_size() &&
- instance_size() < HEAP->Capacity()));
- VerifyHeapPointer(prototype());
- VerifyHeapPointer(instance_descriptors());
-}
-
-
-void Map::SharedMapVerify() {
- MapVerify();
- ASSERT(is_shared());
- ASSERT_EQ(GetHeap()->empty_descriptor_array(), instance_descriptors());
- ASSERT_EQ(0, pre_allocated_property_fields());
- ASSERT_EQ(0, unused_property_fields());
- ASSERT_EQ(StaticVisitorBase::GetVisitorId(instance_type(), instance_size()),
- visitor_id());
-}
-
-
-void CodeCache::CodeCacheVerify() {
- VerifyHeapPointer(default_cache());
- VerifyHeapPointer(normal_type_cache());
- ASSERT(default_cache()->IsFixedArray());
- ASSERT(normal_type_cache()->IsUndefined()
- || normal_type_cache()->IsCodeCacheHashTable());
-}
-
-
-void FixedArray::FixedArrayVerify() {
- for (int i = 0; i < length(); i++) {
- Object* e = get(i);
- if (e->IsHeapObject()) {
- VerifyHeapPointer(e);
- } else {
- e->Verify();
- }
- }
-}
-
-
-void JSValue::JSValueVerify() {
- Object* v = value();
- if (v->IsHeapObject()) {
- VerifyHeapPointer(v);
- }
-}
-
-
-void JSMessageObject::JSMessageObjectVerify() {
- CHECK(IsJSMessageObject());
- CHECK(type()->IsString());
- CHECK(arguments()->IsJSArray());
- VerifyObjectField(kStartPositionOffset);
- VerifyObjectField(kEndPositionOffset);
- VerifyObjectField(kArgumentsOffset);
- VerifyObjectField(kScriptOffset);
- VerifyObjectField(kStackTraceOffset);
- VerifyObjectField(kStackFramesOffset);
-}
-
-
-void String::StringVerify() {
- CHECK(IsString());
- CHECK(length() >= 0 && length() <= Smi::kMaxValue);
- if (IsSymbol()) {
- CHECK(!HEAP->InNewSpace(this));
- }
-}
-
-
-void JSFunction::JSFunctionVerify() {
- CHECK(IsJSFunction());
- VerifyObjectField(kPrototypeOrInitialMapOffset);
- VerifyObjectField(kNextFunctionLinkOffset);
- CHECK(next_function_link()->IsUndefined() ||
- next_function_link()->IsJSFunction());
-}
-
-
-void SharedFunctionInfo::SharedFunctionInfoVerify() {
- CHECK(IsSharedFunctionInfo());
- VerifyObjectField(kNameOffset);
- VerifyObjectField(kCodeOffset);
- VerifyObjectField(kScopeInfoOffset);
- VerifyObjectField(kInstanceClassNameOffset);
- VerifyObjectField(kFunctionDataOffset);
- VerifyObjectField(kScriptOffset);
- VerifyObjectField(kDebugInfoOffset);
-}
-
-
-void JSGlobalProxy::JSGlobalProxyVerify() {
- CHECK(IsJSGlobalProxy());
- JSObjectVerify();
- VerifyObjectField(JSGlobalProxy::kContextOffset);
- // Make sure that this object has no properties, elements.
- CHECK_EQ(0, properties()->length());
- CHECK(HasFastElements());
- CHECK_EQ(0, FixedArray::cast(elements())->length());
-}
-
-
-void JSGlobalObject::JSGlobalObjectVerify() {
- CHECK(IsJSGlobalObject());
- JSObjectVerify();
- for (int i = GlobalObject::kBuiltinsOffset;
- i < JSGlobalObject::kSize;
- i += kPointerSize) {
- VerifyObjectField(i);
- }
-}
-
-
-void JSBuiltinsObject::JSBuiltinsObjectVerify() {
- CHECK(IsJSBuiltinsObject());
- JSObjectVerify();
- for (int i = GlobalObject::kBuiltinsOffset;
- i < JSBuiltinsObject::kSize;
- i += kPointerSize) {
- VerifyObjectField(i);
- }
-}
-
-
-void Oddball::OddballVerify() {
- CHECK(IsOddball());
- VerifyHeapPointer(to_string());
- Object* number = to_number();
- if (number->IsHeapObject()) {
- ASSERT(number == HEAP->nan_value());
- } else {
- ASSERT(number->IsSmi());
- int value = Smi::cast(number)->value();
- // Hidden oddballs have negative smis.
- const int kLeastHiddenOddballNumber = -4;
- ASSERT(value <= 1);
- ASSERT(value >= kLeastHiddenOddballNumber);
- }
-}
-
-
-void JSGlobalPropertyCell::JSGlobalPropertyCellVerify() {
- CHECK(IsJSGlobalPropertyCell());
- VerifyObjectField(kValueOffset);
-}
-
-
-void Code::CodeVerify() {
- CHECK(IsAligned(reinterpret_cast<intptr_t>(instruction_start()),
- kCodeAlignment));
- Address last_gc_pc = NULL;
- for (RelocIterator it(this); !it.done(); it.next()) {
- it.rinfo()->Verify();
- // Ensure that GC will not iterate twice over the same pointer.
- if (RelocInfo::IsGCRelocMode(it.rinfo()->rmode())) {
- CHECK(it.rinfo()->pc() != last_gc_pc);
- last_gc_pc = it.rinfo()->pc();
- }
- }
-}
-
-
-void JSArray::JSArrayVerify() {
- JSObjectVerify();
- ASSERT(length()->IsNumber() || length()->IsUndefined());
- ASSERT(elements()->IsUndefined() || elements()->IsFixedArray());
-}
-
-
-void JSRegExp::JSRegExpVerify() {
- JSObjectVerify();
- ASSERT(data()->IsUndefined() || data()->IsFixedArray());
- switch (TypeTag()) {
- case JSRegExp::ATOM: {
- FixedArray* arr = FixedArray::cast(data());
- ASSERT(arr->get(JSRegExp::kAtomPatternIndex)->IsString());
- break;
- }
- case JSRegExp::IRREGEXP: {
- bool is_native = RegExpImpl::UsesNativeRegExp();
-
- FixedArray* arr = FixedArray::cast(data());
- Object* ascii_data = arr->get(JSRegExp::kIrregexpASCIICodeIndex);
- // TheHole : Not compiled yet.
- // JSObject: Compilation error.
- // Code/ByteArray: Compiled code.
- ASSERT(ascii_data->IsTheHole() || ascii_data->IsJSObject() ||
- (is_native ? ascii_data->IsCode() : ascii_data->IsByteArray()));
- Object* uc16_data = arr->get(JSRegExp::kIrregexpUC16CodeIndex);
- ASSERT(uc16_data->IsTheHole() || uc16_data->IsJSObject() ||
- (is_native ? uc16_data->IsCode() : uc16_data->IsByteArray()));
- ASSERT(arr->get(JSRegExp::kIrregexpCaptureCountIndex)->IsSmi());
- ASSERT(arr->get(JSRegExp::kIrregexpMaxRegisterCountIndex)->IsSmi());
- break;
- }
- default:
- ASSERT_EQ(JSRegExp::NOT_COMPILED, TypeTag());
- ASSERT(data()->IsUndefined());
- break;
- }
-}
-
-
-void Proxy::ProxyVerify() {
- ASSERT(IsProxy());
-}
-
-
-void AccessorInfo::AccessorInfoVerify() {
- CHECK(IsAccessorInfo());
- VerifyPointer(getter());
- VerifyPointer(setter());
- VerifyPointer(name());
- VerifyPointer(data());
- VerifyPointer(flag());
-}
-
-
-void AccessCheckInfo::AccessCheckInfoVerify() {
- CHECK(IsAccessCheckInfo());
- VerifyPointer(named_callback());
- VerifyPointer(indexed_callback());
- VerifyPointer(data());
-}
-
-
-void InterceptorInfo::InterceptorInfoVerify() {
- CHECK(IsInterceptorInfo());
- VerifyPointer(getter());
- VerifyPointer(setter());
- VerifyPointer(query());
- VerifyPointer(deleter());
- VerifyPointer(enumerator());
- VerifyPointer(data());
-}
-
-
-void CallHandlerInfo::CallHandlerInfoVerify() {
- CHECK(IsCallHandlerInfo());
- VerifyPointer(callback());
- VerifyPointer(data());
-}
-
-
-void TemplateInfo::TemplateInfoVerify() {
- VerifyPointer(tag());
- VerifyPointer(property_list());
-}
-
-void FunctionTemplateInfo::FunctionTemplateInfoVerify() {
- CHECK(IsFunctionTemplateInfo());
- TemplateInfoVerify();
- VerifyPointer(serial_number());
- VerifyPointer(call_code());
- VerifyPointer(property_accessors());
- VerifyPointer(prototype_template());
- VerifyPointer(parent_template());
- VerifyPointer(named_property_handler());
- VerifyPointer(indexed_property_handler());
- VerifyPointer(instance_template());
- VerifyPointer(signature());
- VerifyPointer(access_check_info());
-}
-
-
-void ObjectTemplateInfo::ObjectTemplateInfoVerify() {
- CHECK(IsObjectTemplateInfo());
- TemplateInfoVerify();
- VerifyPointer(constructor());
- VerifyPointer(internal_field_count());
-}
-
-
-void SignatureInfo::SignatureInfoVerify() {
- CHECK(IsSignatureInfo());
- VerifyPointer(receiver());
- VerifyPointer(args());
-}
-
-
-void TypeSwitchInfo::TypeSwitchInfoVerify() {
- CHECK(IsTypeSwitchInfo());
- VerifyPointer(types());
-}
-
-
-void Script::ScriptVerify() {
- CHECK(IsScript());
- VerifyPointer(source());
- VerifyPointer(name());
- line_offset()->SmiVerify();
- column_offset()->SmiVerify();
- VerifyPointer(data());
- VerifyPointer(wrapper());
- type()->SmiVerify();
- VerifyPointer(line_ends());
- VerifyPointer(id());
-}
-
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-void DebugInfo::DebugInfoVerify() {
- CHECK(IsDebugInfo());
- VerifyPointer(shared());
- VerifyPointer(original_code());
- VerifyPointer(code());
- VerifyPointer(break_points());
-}
-
-
-void BreakPointInfo::BreakPointInfoVerify() {
- CHECK(IsBreakPointInfo());
- code_position()->SmiVerify();
- source_position()->SmiVerify();
- statement_position()->SmiVerify();
- VerifyPointer(break_point_objects());
-}
-#endif // ENABLE_DEBUGGER_SUPPORT
-
-
-void JSObject::IncrementSpillStatistics(SpillInformation* info) {
- info->number_of_objects_++;
- // Named properties
- if (HasFastProperties()) {
- info->number_of_objects_with_fast_properties_++;
- info->number_of_fast_used_fields_ += map()->NextFreePropertyIndex();
- info->number_of_fast_unused_fields_ += map()->unused_property_fields();
- } else {
- StringDictionary* dict = property_dictionary();
- info->number_of_slow_used_properties_ += dict->NumberOfElements();
- info->number_of_slow_unused_properties_ +=
- dict->Capacity() - dict->NumberOfElements();
- }
- // Indexed properties
- switch (GetElementsKind()) {
- case FAST_ELEMENTS: {
- info->number_of_objects_with_fast_elements_++;
- int holes = 0;
- FixedArray* e = FixedArray::cast(elements());
- int len = e->length();
- Heap* heap = HEAP;
- for (int i = 0; i < len; i++) {
- if (e->get(i) == heap->the_hole_value()) holes++;
- }
- info->number_of_fast_used_elements_ += len - holes;
- info->number_of_fast_unused_elements_ += holes;
- break;
- }
- case EXTERNAL_PIXEL_ELEMENTS: {
- info->number_of_objects_with_fast_elements_++;
- ExternalPixelArray* e = ExternalPixelArray::cast(elements());
- info->number_of_fast_used_elements_ += e->length();
- break;
- }
- case DICTIONARY_ELEMENTS: {
- NumberDictionary* dict = element_dictionary();
- info->number_of_slow_used_elements_ += dict->NumberOfElements();
- info->number_of_slow_unused_elements_ +=
- dict->Capacity() - dict->NumberOfElements();
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
-}
-
-
-void JSObject::SpillInformation::Clear() {
- number_of_objects_ = 0;
- number_of_objects_with_fast_properties_ = 0;
- number_of_objects_with_fast_elements_ = 0;
- number_of_fast_used_fields_ = 0;
- number_of_fast_unused_fields_ = 0;
- number_of_slow_used_properties_ = 0;
- number_of_slow_unused_properties_ = 0;
- number_of_fast_used_elements_ = 0;
- number_of_fast_unused_elements_ = 0;
- number_of_slow_used_elements_ = 0;
- number_of_slow_unused_elements_ = 0;
-}
-
-void JSObject::SpillInformation::Print() {
- PrintF("\n JSObject Spill Statistics (#%d):\n", number_of_objects_);
-
- PrintF(" - fast properties (#%d): %d (used) %d (unused)\n",
- number_of_objects_with_fast_properties_,
- number_of_fast_used_fields_, number_of_fast_unused_fields_);
-
- PrintF(" - slow properties (#%d): %d (used) %d (unused)\n",
- number_of_objects_ - number_of_objects_with_fast_properties_,
- number_of_slow_used_properties_, number_of_slow_unused_properties_);
-
- PrintF(" - fast elements (#%d): %d (used) %d (unused)\n",
- number_of_objects_with_fast_elements_,
- number_of_fast_used_elements_, number_of_fast_unused_elements_);
-
- PrintF(" - slow elements (#%d): %d (used) %d (unused)\n",
- number_of_objects_ - number_of_objects_with_fast_elements_,
- number_of_slow_used_elements_, number_of_slow_unused_elements_);
-
- PrintF("\n");
-}
-
-
-bool DescriptorArray::IsSortedNoDuplicates() {
- String* current_key = NULL;
- uint32_t current = 0;
- for (int i = 0; i < number_of_descriptors(); i++) {
- String* key = GetKey(i);
- if (key == current_key) {
- PrintDescriptors();
- return false;
- }
- current_key = key;
- uint32_t hash = GetKey(i)->Hash();
- if (hash < current) {
- PrintDescriptors();
- return false;
- }
- current = hash;
- }
- return true;
-}
-
-
-void JSFunctionResultCache::JSFunctionResultCacheVerify() {
- JSFunction::cast(get(kFactoryIndex))->Verify();
-
- int size = Smi::cast(get(kCacheSizeIndex))->value();
- ASSERT(kEntriesIndex <= size);
- ASSERT(size <= length());
- ASSERT_EQ(0, size % kEntrySize);
-
- int finger = Smi::cast(get(kFingerIndex))->value();
- ASSERT(kEntriesIndex <= finger);
- ASSERT((finger < size) || (finger == kEntriesIndex && finger == size));
- ASSERT_EQ(0, finger % kEntrySize);
-
- if (FLAG_enable_slow_asserts) {
- for (int i = kEntriesIndex; i < size; i++) {
- ASSERT(!get(i)->IsTheHole());
- get(i)->Verify();
- }
- for (int i = size; i < length(); i++) {
- ASSERT(get(i)->IsTheHole());
- get(i)->Verify();
- }
- }
-}
-
-
-void NormalizedMapCache::NormalizedMapCacheVerify() {
- FixedArray::cast(this)->Verify();
- if (FLAG_enable_slow_asserts) {
- for (int i = 0; i < length(); i++) {
- Object* e = get(i);
- if (e->IsMap()) {
- Map::cast(e)->SharedMapVerify();
- } else {
- ASSERT(e->IsUndefined());
- }
- }
- }
-}
-
-
-#endif // DEBUG
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/objects-inl.h b/src/3rdparty/v8/src/objects-inl.h
deleted file mode 100644
index 37c51d7..0000000
--- a/src/3rdparty/v8/src/objects-inl.h
+++ /dev/null
@@ -1,4166 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Review notes:
-//
-// - The use of macros in these inline functions may seem superfluous
-// but it is absolutely needed to make sure gcc generates optimal
-// code. gcc is not happy when attempting to inline too deep.
-//
-
-#ifndef V8_OBJECTS_INL_H_
-#define V8_OBJECTS_INL_H_
-
-#include "objects.h"
-#include "contexts.h"
-#include "conversions-inl.h"
-#include "heap.h"
-#include "isolate.h"
-#include "property.h"
-#include "spaces.h"
-#include "v8memory.h"
-
-namespace v8 {
-namespace internal {
-
-PropertyDetails::PropertyDetails(Smi* smi) {
- value_ = smi->value();
-}
-
-
-Smi* PropertyDetails::AsSmi() {
- return Smi::FromInt(value_);
-}
-
-
-PropertyDetails PropertyDetails::AsDeleted() {
- Smi* smi = Smi::FromInt(value_ | DeletedField::encode(1));
- return PropertyDetails(smi);
-}
-
-
-#define CAST_ACCESSOR(type) \
- type* type::cast(Object* object) { \
- ASSERT(object->Is##type()); \
- return reinterpret_cast<type*>(object); \
- }
-
-
-#define INT_ACCESSORS(holder, name, offset) \
- int holder::name() { return READ_INT_FIELD(this, offset); } \
- void holder::set_##name(int value) { WRITE_INT_FIELD(this, offset, value); }
-
-
-#define ACCESSORS(holder, name, type, offset) \
- type* holder::name() { return type::cast(READ_FIELD(this, offset)); } \
- void holder::set_##name(type* value, WriteBarrierMode mode) { \
- WRITE_FIELD(this, offset, value); \
- CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, mode); \
- }
-
-
-// GC-safe accessors do not use HeapObject::GetHeap(), but access TLS instead.
-#define ACCESSORS_GCSAFE(holder, name, type, offset) \
- type* holder::name() { return type::cast(READ_FIELD(this, offset)); } \
- void holder::set_##name(type* value, WriteBarrierMode mode) { \
- WRITE_FIELD(this, offset, value); \
- CONDITIONAL_WRITE_BARRIER(HEAP, this, offset, mode); \
- }
-
-
-#define SMI_ACCESSORS(holder, name, offset) \
- int holder::name() { \
- Object* value = READ_FIELD(this, offset); \
- return Smi::cast(value)->value(); \
- } \
- void holder::set_##name(int value) { \
- WRITE_FIELD(this, offset, Smi::FromInt(value)); \
- }
-
-
-#define BOOL_GETTER(holder, field, name, offset) \
- bool holder::name() { \
- return BooleanBit::get(field(), offset); \
- } \
-
-
-#define BOOL_ACCESSORS(holder, field, name, offset) \
- bool holder::name() { \
- return BooleanBit::get(field(), offset); \
- } \
- void holder::set_##name(bool value) { \
- set_##field(BooleanBit::set(field(), offset, value)); \
- }
-
-
-bool Object::IsInstanceOf(FunctionTemplateInfo* expected) {
- // There is a constraint on the object; check.
- if (!this->IsJSObject()) return false;
- // Fetch the constructor function of the object.
- Object* cons_obj = JSObject::cast(this)->map()->constructor();
- if (!cons_obj->IsJSFunction()) return false;
- JSFunction* fun = JSFunction::cast(cons_obj);
- // Iterate through the chain of inheriting function templates to
- // see if the required one occurs.
- for (Object* type = fun->shared()->function_data();
- type->IsFunctionTemplateInfo();
- type = FunctionTemplateInfo::cast(type)->parent_template()) {
- if (type == expected) return true;
- }
- // Didn't find the required type in the inheritance chain.
- return false;
-}
-
-
-bool Object::IsSmi() {
- return HAS_SMI_TAG(this);
-}
-
-
-bool Object::IsHeapObject() {
- return Internals::HasHeapObjectTag(this);
-}
-
-
-bool Object::IsHeapNumber() {
- return Object::IsHeapObject()
- && HeapObject::cast(this)->map()->instance_type() == HEAP_NUMBER_TYPE;
-}
-
-
-bool Object::IsString() {
- return Object::IsHeapObject()
- && HeapObject::cast(this)->map()->instance_type() < FIRST_NONSTRING_TYPE;
-}
-
-
-bool Object::IsSymbol() {
- if (!this->IsHeapObject()) return false;
- uint32_t type = HeapObject::cast(this)->map()->instance_type();
- // Because the symbol tag is non-zero and no non-string types have the
- // symbol bit set we can test for symbols with a very simple test
- // operation.
- ASSERT(kSymbolTag != 0);
- ASSERT(kNotStringTag + kIsSymbolMask > LAST_TYPE);
- return (type & kIsSymbolMask) != 0;
-}
-
-
-bool Object::IsConsString() {
- if (!this->IsHeapObject()) return false;
- uint32_t type = HeapObject::cast(this)->map()->instance_type();
- return (type & (kIsNotStringMask | kStringRepresentationMask)) ==
- (kStringTag | kConsStringTag);
-}
-
-
-bool Object::IsSeqString() {
- if (!IsString()) return false;
- return StringShape(String::cast(this)).IsSequential();
-}
-
-
-bool Object::IsSeqAsciiString() {
- if (!IsString()) return false;
- return StringShape(String::cast(this)).IsSequential() &&
- String::cast(this)->IsAsciiRepresentation();
-}
-
-
-bool Object::IsSeqTwoByteString() {
- if (!IsString()) return false;
- return StringShape(String::cast(this)).IsSequential() &&
- String::cast(this)->IsTwoByteRepresentation();
-}
-
-
-bool Object::IsExternalString() {
- if (!IsString()) return false;
- return StringShape(String::cast(this)).IsExternal();
-}
-
-
-bool Object::IsExternalAsciiString() {
- if (!IsString()) return false;
- return StringShape(String::cast(this)).IsExternal() &&
- String::cast(this)->IsAsciiRepresentation();
-}
-
-
-bool Object::IsExternalTwoByteString() {
- if (!IsString()) return false;
- return StringShape(String::cast(this)).IsExternal() &&
- String::cast(this)->IsTwoByteRepresentation();
-}
-
-
-StringShape::StringShape(String* str)
- : type_(str->map()->instance_type()) {
- set_valid();
- ASSERT((type_ & kIsNotStringMask) == kStringTag);
-}
-
-
-StringShape::StringShape(Map* map)
- : type_(map->instance_type()) {
- set_valid();
- ASSERT((type_ & kIsNotStringMask) == kStringTag);
-}
-
-
-StringShape::StringShape(InstanceType t)
- : type_(static_cast<uint32_t>(t)) {
- set_valid();
- ASSERT((type_ & kIsNotStringMask) == kStringTag);
-}
-
-
-bool StringShape::IsSymbol() {
- ASSERT(valid());
- ASSERT(kSymbolTag != 0);
- return (type_ & kIsSymbolMask) != 0;
-}
-
-
-bool String::IsAsciiRepresentation() {
- uint32_t type = map()->instance_type();
- return (type & kStringEncodingMask) == kAsciiStringTag;
-}
-
-
-bool String::IsTwoByteRepresentation() {
- uint32_t type = map()->instance_type();
- return (type & kStringEncodingMask) == kTwoByteStringTag;
-}
-
-
-bool String::HasOnlyAsciiChars() {
- uint32_t type = map()->instance_type();
- return (type & kStringEncodingMask) == kAsciiStringTag ||
- (type & kAsciiDataHintMask) == kAsciiDataHintTag;
-}
-
-
-bool StringShape::IsCons() {
- return (type_ & kStringRepresentationMask) == kConsStringTag;
-}
-
-
-bool StringShape::IsExternal() {
- return (type_ & kStringRepresentationMask) == kExternalStringTag;
-}
-
-
-bool StringShape::IsSequential() {
- return (type_ & kStringRepresentationMask) == kSeqStringTag;
-}
-
-
-StringRepresentationTag StringShape::representation_tag() {
- uint32_t tag = (type_ & kStringRepresentationMask);
- return static_cast<StringRepresentationTag>(tag);
-}
-
-
-uint32_t StringShape::full_representation_tag() {
- return (type_ & (kStringRepresentationMask | kStringEncodingMask));
-}
-
-
-STATIC_CHECK((kStringRepresentationMask | kStringEncodingMask) ==
- Internals::kFullStringRepresentationMask);
-
-
-bool StringShape::IsSequentialAscii() {
- return full_representation_tag() == (kSeqStringTag | kAsciiStringTag);
-}
-
-
-bool StringShape::IsSequentialTwoByte() {
- return full_representation_tag() == (kSeqStringTag | kTwoByteStringTag);
-}
-
-
-bool StringShape::IsExternalAscii() {
- return full_representation_tag() == (kExternalStringTag | kAsciiStringTag);
-}
-
-
-bool StringShape::IsExternalTwoByte() {
- return full_representation_tag() == (kExternalStringTag | kTwoByteStringTag);
-}
-
-
-STATIC_CHECK((kExternalStringTag | kTwoByteStringTag) ==
- Internals::kExternalTwoByteRepresentationTag);
-
-
-uc32 FlatStringReader::Get(int index) {
- ASSERT(0 <= index && index <= length_);
- if (is_ascii_) {
- return static_cast<const byte*>(start_)[index];
- } else {
- return static_cast<const uc16*>(start_)[index];
- }
-}
-
-
-bool Object::IsNumber() {
- return IsSmi() || IsHeapNumber();
-}
-
-
-bool Object::IsByteArray() {
- return Object::IsHeapObject()
- && HeapObject::cast(this)->map()->instance_type() == BYTE_ARRAY_TYPE;
-}
-
-
-bool Object::IsExternalPixelArray() {
- return Object::IsHeapObject() &&
- HeapObject::cast(this)->map()->instance_type() ==
- EXTERNAL_PIXEL_ARRAY_TYPE;
-}
-
-
-bool Object::IsExternalArray() {
- if (!Object::IsHeapObject())
- return false;
- InstanceType instance_type =
- HeapObject::cast(this)->map()->instance_type();
- return (instance_type >= FIRST_EXTERNAL_ARRAY_TYPE &&
- instance_type <= LAST_EXTERNAL_ARRAY_TYPE);
-}
-
-
-bool Object::IsExternalByteArray() {
- return Object::IsHeapObject() &&
- HeapObject::cast(this)->map()->instance_type() ==
- EXTERNAL_BYTE_ARRAY_TYPE;
-}
-
-
-bool Object::IsExternalUnsignedByteArray() {
- return Object::IsHeapObject() &&
- HeapObject::cast(this)->map()->instance_type() ==
- EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE;
-}
-
-
-bool Object::IsExternalShortArray() {
- return Object::IsHeapObject() &&
- HeapObject::cast(this)->map()->instance_type() ==
- EXTERNAL_SHORT_ARRAY_TYPE;
-}
-
-
-bool Object::IsExternalUnsignedShortArray() {
- return Object::IsHeapObject() &&
- HeapObject::cast(this)->map()->instance_type() ==
- EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE;
-}
-
-
-bool Object::IsExternalIntArray() {
- return Object::IsHeapObject() &&
- HeapObject::cast(this)->map()->instance_type() ==
- EXTERNAL_INT_ARRAY_TYPE;
-}
-
-
-bool Object::IsExternalUnsignedIntArray() {
- return Object::IsHeapObject() &&
- HeapObject::cast(this)->map()->instance_type() ==
- EXTERNAL_UNSIGNED_INT_ARRAY_TYPE;
-}
-
-
-bool Object::IsExternalFloatArray() {
- return Object::IsHeapObject() &&
- HeapObject::cast(this)->map()->instance_type() ==
- EXTERNAL_FLOAT_ARRAY_TYPE;
-}
-
-
-bool MaybeObject::IsFailure() {
- return HAS_FAILURE_TAG(this);
-}
-
-
-bool MaybeObject::IsRetryAfterGC() {
- return HAS_FAILURE_TAG(this)
- && Failure::cast(this)->type() == Failure::RETRY_AFTER_GC;
-}
-
-
-bool MaybeObject::IsOutOfMemory() {
- return HAS_FAILURE_TAG(this)
- && Failure::cast(this)->IsOutOfMemoryException();
-}
-
-
-bool MaybeObject::IsException() {
- return this == Failure::Exception();
-}
-
-
-bool MaybeObject::IsTheHole() {
- return !IsFailure() && ToObjectUnchecked()->IsTheHole();
-}
-
-
-Failure* Failure::cast(MaybeObject* obj) {
- ASSERT(HAS_FAILURE_TAG(obj));
- return reinterpret_cast<Failure*>(obj);
-}
-
-
-bool Object::IsJSObject() {
- return IsHeapObject()
- && HeapObject::cast(this)->map()->instance_type() >= FIRST_JS_OBJECT_TYPE;
-}
-
-
-bool Object::IsJSContextExtensionObject() {
- return IsHeapObject()
- && (HeapObject::cast(this)->map()->instance_type() ==
- JS_CONTEXT_EXTENSION_OBJECT_TYPE);
-}
-
-
-bool Object::IsMap() {
- return Object::IsHeapObject()
- && HeapObject::cast(this)->map()->instance_type() == MAP_TYPE;
-}
-
-
-bool Object::IsFixedArray() {
- return Object::IsHeapObject()
- && HeapObject::cast(this)->map()->instance_type() == FIXED_ARRAY_TYPE;
-}
-
-
-bool Object::IsDescriptorArray() {
- return IsFixedArray();
-}
-
-
-bool Object::IsDeoptimizationInputData() {
- // Must be a fixed array.
- if (!IsFixedArray()) return false;
-
- // There's no sure way to detect the difference between a fixed array and
- // a deoptimization data array. Since this is used for asserts we can
- // check that the length is zero or else the fixed size plus a multiple of
- // the entry size.
- int length = FixedArray::cast(this)->length();
- if (length == 0) return true;
-
- length -= DeoptimizationInputData::kFirstDeoptEntryIndex;
- return length >= 0 &&
- length % DeoptimizationInputData::kDeoptEntrySize == 0;
-}
-
-
-bool Object::IsDeoptimizationOutputData() {
- if (!IsFixedArray()) return false;
- // There's actually no way to see the difference between a fixed array and
- // a deoptimization data array. Since this is used for asserts we can check
- // that the length is plausible though.
- if (FixedArray::cast(this)->length() % 2 != 0) return false;
- return true;
-}
-
-
-bool Object::IsContext() {
- if (Object::IsHeapObject()) {
- Heap* heap = HeapObject::cast(this)->GetHeap();
- return (HeapObject::cast(this)->map() == heap->context_map() ||
- HeapObject::cast(this)->map() == heap->catch_context_map() ||
- HeapObject::cast(this)->map() == heap->global_context_map());
- }
- return false;
-}
-
-
-bool Object::IsCatchContext() {
- return Object::IsHeapObject() &&
- HeapObject::cast(this)->map() ==
- HeapObject::cast(this)->GetHeap()->catch_context_map();
-}
-
-
-bool Object::IsGlobalContext() {
- return Object::IsHeapObject() &&
- HeapObject::cast(this)->map() ==
- HeapObject::cast(this)->GetHeap()->global_context_map();
-}
-
-
-bool Object::IsJSFunction() {
- return Object::IsHeapObject()
- && HeapObject::cast(this)->map()->instance_type() == JS_FUNCTION_TYPE;
-}
-
-
-template <> inline bool Is<JSFunction>(Object* obj) {
- return obj->IsJSFunction();
-}
-
-
-bool Object::IsCode() {
- return Object::IsHeapObject()
- && HeapObject::cast(this)->map()->instance_type() == CODE_TYPE;
-}
-
-
-bool Object::IsOddball() {
- ASSERT(HEAP->is_safe_to_read_maps());
- return Object::IsHeapObject()
- && HeapObject::cast(this)->map()->instance_type() == ODDBALL_TYPE;
-}
-
-
-bool Object::IsJSGlobalPropertyCell() {
- return Object::IsHeapObject()
- && HeapObject::cast(this)->map()->instance_type()
- == JS_GLOBAL_PROPERTY_CELL_TYPE;
-}
-
-
-bool Object::IsSharedFunctionInfo() {
- return Object::IsHeapObject() &&
- (HeapObject::cast(this)->map()->instance_type() ==
- SHARED_FUNCTION_INFO_TYPE);
-}
-
-
-bool Object::IsJSValue() {
- return Object::IsHeapObject()
- && HeapObject::cast(this)->map()->instance_type() == JS_VALUE_TYPE;
-}
-
-
-bool Object::IsJSMessageObject() {
- return Object::IsHeapObject()
- && (HeapObject::cast(this)->map()->instance_type() ==
- JS_MESSAGE_OBJECT_TYPE);
-}
-
-
-bool Object::IsStringWrapper() {
- return IsJSValue() && JSValue::cast(this)->value()->IsString();
-}
-
-
-bool Object::IsProxy() {
- return Object::IsHeapObject()
- && HeapObject::cast(this)->map()->instance_type() == PROXY_TYPE;
-}
-
-
-bool Object::IsBoolean() {
- return IsOddball() &&
- ((Oddball::cast(this)->kind() & Oddball::kNotBooleanMask) == 0);
-}
-
-
-bool Object::IsJSArray() {
- return Object::IsHeapObject()
- && HeapObject::cast(this)->map()->instance_type() == JS_ARRAY_TYPE;
-}
-
-
-bool Object::IsJSRegExp() {
- return Object::IsHeapObject()
- && HeapObject::cast(this)->map()->instance_type() == JS_REGEXP_TYPE;
-}
-
-
-template <> inline bool Is<JSArray>(Object* obj) {
- return obj->IsJSArray();
-}
-
-
-bool Object::IsHashTable() {
- return Object::IsHeapObject() &&
- HeapObject::cast(this)->map() ==
- HeapObject::cast(this)->GetHeap()->hash_table_map();
-}
-
-
-bool Object::IsDictionary() {
- return IsHashTable() && this !=
- HeapObject::cast(this)->GetHeap()->symbol_table();
-}
-
-
-bool Object::IsSymbolTable() {
- return IsHashTable() && this ==
- HeapObject::cast(this)->GetHeap()->raw_unchecked_symbol_table();
-}
-
-
-bool Object::IsJSFunctionResultCache() {
- if (!IsFixedArray()) return false;
- FixedArray* self = FixedArray::cast(this);
- int length = self->length();
- if (length < JSFunctionResultCache::kEntriesIndex) return false;
- if ((length - JSFunctionResultCache::kEntriesIndex)
- % JSFunctionResultCache::kEntrySize != 0) {
- return false;
- }
-#ifdef DEBUG
- reinterpret_cast<JSFunctionResultCache*>(this)->JSFunctionResultCacheVerify();
-#endif
- return true;
-}
-
-
-bool Object::IsNormalizedMapCache() {
- if (!IsFixedArray()) return false;
- if (FixedArray::cast(this)->length() != NormalizedMapCache::kEntries) {
- return false;
- }
-#ifdef DEBUG
- reinterpret_cast<NormalizedMapCache*>(this)->NormalizedMapCacheVerify();
-#endif
- return true;
-}
-
-
-bool Object::IsCompilationCacheTable() {
- return IsHashTable();
-}
-
-
-bool Object::IsCodeCacheHashTable() {
- return IsHashTable();
-}
-
-
-bool Object::IsMapCache() {
- return IsHashTable();
-}
-
-
-bool Object::IsPrimitive() {
- return IsOddball() || IsNumber() || IsString();
-}
-
-
-bool Object::IsJSGlobalProxy() {
- bool result = IsHeapObject() &&
- (HeapObject::cast(this)->map()->instance_type() ==
- JS_GLOBAL_PROXY_TYPE);
- ASSERT(!result || IsAccessCheckNeeded());
- return result;
-}
-
-
-bool Object::IsGlobalObject() {
- if (!IsHeapObject()) return false;
-
- InstanceType type = HeapObject::cast(this)->map()->instance_type();
- return type == JS_GLOBAL_OBJECT_TYPE ||
- type == JS_BUILTINS_OBJECT_TYPE;
-}
-
-
-bool Object::IsJSGlobalObject() {
- return IsHeapObject() &&
- (HeapObject::cast(this)->map()->instance_type() ==
- JS_GLOBAL_OBJECT_TYPE);
-}
-
-
-bool Object::IsJSBuiltinsObject() {
- return IsHeapObject() &&
- (HeapObject::cast(this)->map()->instance_type() ==
- JS_BUILTINS_OBJECT_TYPE);
-}
-
-
-bool Object::IsUndetectableObject() {
- return IsHeapObject()
- && HeapObject::cast(this)->map()->is_undetectable();
-}
-
-
-bool Object::IsAccessCheckNeeded() {
- return IsHeapObject()
- && HeapObject::cast(this)->map()->is_access_check_needed();
-}
-
-
-bool Object::IsStruct() {
- if (!IsHeapObject()) return false;
- switch (HeapObject::cast(this)->map()->instance_type()) {
-#define MAKE_STRUCT_CASE(NAME, Name, name) case NAME##_TYPE: return true;
- STRUCT_LIST(MAKE_STRUCT_CASE)
-#undef MAKE_STRUCT_CASE
- default: return false;
- }
-}
-
-
-#define MAKE_STRUCT_PREDICATE(NAME, Name, name) \
- bool Object::Is##Name() { \
- return Object::IsHeapObject() \
- && HeapObject::cast(this)->map()->instance_type() == NAME##_TYPE; \
- }
- STRUCT_LIST(MAKE_STRUCT_PREDICATE)
-#undef MAKE_STRUCT_PREDICATE
-
-
-bool Object::IsUndefined() {
- return IsOddball() && Oddball::cast(this)->kind() == Oddball::kUndefined;
-}
-
-
-bool Object::IsNull() {
- return IsOddball() && Oddball::cast(this)->kind() == Oddball::kNull;
-}
-
-
-bool Object::IsTheHole() {
- return IsOddball() && Oddball::cast(this)->kind() == Oddball::kTheHole;
-}
-
-
-bool Object::IsTrue() {
- return IsOddball() && Oddball::cast(this)->kind() == Oddball::kTrue;
-}
-
-
-bool Object::IsFalse() {
- return IsOddball() && Oddball::cast(this)->kind() == Oddball::kFalse;
-}
-
-
-bool Object::IsArgumentsMarker() {
- return IsOddball() && Oddball::cast(this)->kind() == Oddball::kArgumentMarker;
-}
-
-
-double Object::Number() {
- ASSERT(IsNumber());
- return IsSmi()
- ? static_cast<double>(reinterpret_cast<Smi*>(this)->value())
- : reinterpret_cast<HeapNumber*>(this)->value();
-}
-
-
-MaybeObject* Object::ToSmi() {
- if (IsSmi()) return this;
- if (IsHeapNumber()) {
- double value = HeapNumber::cast(this)->value();
- int int_value = FastD2I(value);
- if (value == FastI2D(int_value) && Smi::IsValid(int_value)) {
- return Smi::FromInt(int_value);
- }
- }
- return Failure::Exception();
-}
-
-
-bool Object::HasSpecificClassOf(String* name) {
- return this->IsJSObject() && (JSObject::cast(this)->class_name() == name);
-}
-
-
-MaybeObject* Object::GetElement(uint32_t index) {
- // GetElement can trigger a getter which can cause allocation.
- // This was not always the case. This ASSERT is here to catch
- // leftover incorrect uses.
- ASSERT(HEAP->IsAllocationAllowed());
- return GetElementWithReceiver(this, index);
-}
-
-
-Object* Object::GetElementNoExceptionThrown(uint32_t index) {
- MaybeObject* maybe = GetElementWithReceiver(this, index);
- ASSERT(!maybe->IsFailure());
- Object* result = NULL; // Initialization to please compiler.
- maybe->ToObject(&result);
- return result;
-}
-
-
-MaybeObject* Object::GetProperty(String* key) {
- PropertyAttributes attributes;
- return GetPropertyWithReceiver(this, key, &attributes);
-}
-
-
-MaybeObject* Object::GetProperty(String* key, PropertyAttributes* attributes) {
- return GetPropertyWithReceiver(this, key, attributes);
-}
-
-
-#define FIELD_ADDR(p, offset) \
- (reinterpret_cast<byte*>(p) + offset - kHeapObjectTag)
-
-#define READ_FIELD(p, offset) \
- (*reinterpret_cast<Object**>(FIELD_ADDR(p, offset)))
-
-#define WRITE_FIELD(p, offset, value) \
- (*reinterpret_cast<Object**>(FIELD_ADDR(p, offset)) = value)
-
-// TODO(isolates): Pass heap in to these macros.
-#define WRITE_BARRIER(object, offset) \
- object->GetHeap()->RecordWrite(object->address(), offset);
-
-// CONDITIONAL_WRITE_BARRIER must be issued after the actual
-// write due to the assert validating the written value.
-#define CONDITIONAL_WRITE_BARRIER(heap, object, offset, mode) \
- if (mode == UPDATE_WRITE_BARRIER) { \
- heap->RecordWrite(object->address(), offset); \
- } else { \
- ASSERT(mode == SKIP_WRITE_BARRIER); \
- ASSERT(heap->InNewSpace(object) || \
- !heap->InNewSpace(READ_FIELD(object, offset)) || \
- Page::FromAddress(object->address())-> \
- IsRegionDirty(object->address() + offset)); \
- }
-
-#ifndef V8_TARGET_ARCH_MIPS
- #define READ_DOUBLE_FIELD(p, offset) \
- (*reinterpret_cast<double*>(FIELD_ADDR(p, offset)))
-#else // V8_TARGET_ARCH_MIPS
- // Prevent gcc from using load-double (mips ldc1) on (possibly)
- // non-64-bit aligned HeapNumber::value.
- static inline double read_double_field(HeapNumber* p, int offset) {
- union conversion {
- double d;
- uint32_t u[2];
- } c;
- c.u[0] = (*reinterpret_cast<uint32_t*>(FIELD_ADDR(p, offset)));
- c.u[1] = (*reinterpret_cast<uint32_t*>(FIELD_ADDR(p, offset + 4)));
- return c.d;
- }
- #define READ_DOUBLE_FIELD(p, offset) read_double_field(p, offset)
-#endif // V8_TARGET_ARCH_MIPS
-
-
-#ifndef V8_TARGET_ARCH_MIPS
- #define WRITE_DOUBLE_FIELD(p, offset, value) \
- (*reinterpret_cast<double*>(FIELD_ADDR(p, offset)) = value)
-#else // V8_TARGET_ARCH_MIPS
- // Prevent gcc from using store-double (mips sdc1) on (possibly)
- // non-64-bit aligned HeapNumber::value.
- static inline void write_double_field(HeapNumber* p, int offset,
- double value) {
- union conversion {
- double d;
- uint32_t u[2];
- } c;
- c.d = value;
- (*reinterpret_cast<uint32_t*>(FIELD_ADDR(p, offset))) = c.u[0];
- (*reinterpret_cast<uint32_t*>(FIELD_ADDR(p, offset + 4))) = c.u[1];
- }
- #define WRITE_DOUBLE_FIELD(p, offset, value) \
- write_double_field(p, offset, value)
-#endif // V8_TARGET_ARCH_MIPS
-
-
-#define READ_INT_FIELD(p, offset) \
- (*reinterpret_cast<int*>(FIELD_ADDR(p, offset)))
-
-#define WRITE_INT_FIELD(p, offset, value) \
- (*reinterpret_cast<int*>(FIELD_ADDR(p, offset)) = value)
-
-#define READ_INTPTR_FIELD(p, offset) \
- (*reinterpret_cast<intptr_t*>(FIELD_ADDR(p, offset)))
-
-#define WRITE_INTPTR_FIELD(p, offset, value) \
- (*reinterpret_cast<intptr_t*>(FIELD_ADDR(p, offset)) = value)
-
-#define READ_UINT32_FIELD(p, offset) \
- (*reinterpret_cast<uint32_t*>(FIELD_ADDR(p, offset)))
-
-#define WRITE_UINT32_FIELD(p, offset, value) \
- (*reinterpret_cast<uint32_t*>(FIELD_ADDR(p, offset)) = value)
-
-#define READ_SHORT_FIELD(p, offset) \
- (*reinterpret_cast<uint16_t*>(FIELD_ADDR(p, offset)))
-
-#define WRITE_SHORT_FIELD(p, offset, value) \
- (*reinterpret_cast<uint16_t*>(FIELD_ADDR(p, offset)) = value)
-
-#define READ_BYTE_FIELD(p, offset) \
- (*reinterpret_cast<byte*>(FIELD_ADDR(p, offset)))
-
-#define WRITE_BYTE_FIELD(p, offset, value) \
- (*reinterpret_cast<byte*>(FIELD_ADDR(p, offset)) = value)
-
-
-Object** HeapObject::RawField(HeapObject* obj, int byte_offset) {
- return &READ_FIELD(obj, byte_offset);
-}
-
-
-int Smi::value() {
- return Internals::SmiValue(this);
-}
-
-
-Smi* Smi::FromInt(int value) {
- ASSERT(Smi::IsValid(value));
- int smi_shift_bits = kSmiTagSize + kSmiShiftSize;
- intptr_t tagged_value =
- (static_cast<intptr_t>(value) << smi_shift_bits) | kSmiTag;
- return reinterpret_cast<Smi*>(tagged_value);
-}
-
-
-Smi* Smi::FromIntptr(intptr_t value) {
- ASSERT(Smi::IsValid(value));
- int smi_shift_bits = kSmiTagSize + kSmiShiftSize;
- return reinterpret_cast<Smi*>((value << smi_shift_bits) | kSmiTag);
-}
-
-
-Failure::Type Failure::type() const {
- return static_cast<Type>(value() & kFailureTypeTagMask);
-}
-
-
-bool Failure::IsInternalError() const {
- return type() == INTERNAL_ERROR;
-}
-
-
-bool Failure::IsOutOfMemoryException() const {
- return type() == OUT_OF_MEMORY_EXCEPTION;
-}
-
-
-AllocationSpace Failure::allocation_space() const {
- ASSERT_EQ(RETRY_AFTER_GC, type());
- return static_cast<AllocationSpace>((value() >> kFailureTypeTagSize)
- & kSpaceTagMask);
-}
-
-
-Failure* Failure::InternalError() {
- return Construct(INTERNAL_ERROR);
-}
-
-
-Failure* Failure::Exception() {
- return Construct(EXCEPTION);
-}
-
-
-Failure* Failure::OutOfMemoryException() {
- return Construct(OUT_OF_MEMORY_EXCEPTION);
-}
-
-
-intptr_t Failure::value() const {
- return static_cast<intptr_t>(
- reinterpret_cast<uintptr_t>(this) >> kFailureTagSize);
-}
-
-
-Failure* Failure::RetryAfterGC() {
- return RetryAfterGC(NEW_SPACE);
-}
-
-
-Failure* Failure::RetryAfterGC(AllocationSpace space) {
- ASSERT((space & ~kSpaceTagMask) == 0);
- return Construct(RETRY_AFTER_GC, space);
-}
-
-
-Failure* Failure::Construct(Type type, intptr_t value) {
- uintptr_t info =
- (static_cast<uintptr_t>(value) << kFailureTypeTagSize) | type;
- ASSERT(((info << kFailureTagSize) >> kFailureTagSize) == info);
- return reinterpret_cast<Failure*>((info << kFailureTagSize) | kFailureTag);
-}
-
-
-bool Smi::IsValid(intptr_t value) {
-#ifdef DEBUG
- bool in_range = (value >= kMinValue) && (value <= kMaxValue);
-#endif
-
-#ifdef V8_TARGET_ARCH_X64
- // To be representable as a long smi, the value must be a 32-bit integer.
- bool result = (value == static_cast<int32_t>(value));
-#else
- // To be representable as an tagged small integer, the two
- // most-significant bits of 'value' must be either 00 or 11 due to
- // sign-extension. To check this we add 01 to the two
- // most-significant bits, and check if the most-significant bit is 0
- //
- // CAUTION: The original code below:
- // bool result = ((value + 0x40000000) & 0x80000000) == 0;
- // may lead to incorrect results according to the C language spec, and
- // in fact doesn't work correctly with gcc4.1.1 in some cases: The
- // compiler may produce undefined results in case of signed integer
- // overflow. The computation must be done w/ unsigned ints.
- bool result = (static_cast<uintptr_t>(value + 0x40000000U) < 0x80000000U);
-#endif
- ASSERT(result == in_range);
- return result;
-}
-
-
-MapWord MapWord::FromMap(Map* map) {
- return MapWord(reinterpret_cast<uintptr_t>(map));
-}
-
-
-Map* MapWord::ToMap() {
- return reinterpret_cast<Map*>(value_);
-}
-
-
-bool MapWord::IsForwardingAddress() {
- return HAS_SMI_TAG(reinterpret_cast<Object*>(value_));
-}
-
-
-MapWord MapWord::FromForwardingAddress(HeapObject* object) {
- Address raw = reinterpret_cast<Address>(object) - kHeapObjectTag;
- return MapWord(reinterpret_cast<uintptr_t>(raw));
-}
-
-
-HeapObject* MapWord::ToForwardingAddress() {
- ASSERT(IsForwardingAddress());
- return HeapObject::FromAddress(reinterpret_cast<Address>(value_));
-}
-
-
-bool MapWord::IsMarked() {
- return (value_ & kMarkingMask) == 0;
-}
-
-
-void MapWord::SetMark() {
- value_ &= ~kMarkingMask;
-}
-
-
-void MapWord::ClearMark() {
- value_ |= kMarkingMask;
-}
-
-
-bool MapWord::IsOverflowed() {
- return (value_ & kOverflowMask) != 0;
-}
-
-
-void MapWord::SetOverflow() {
- value_ |= kOverflowMask;
-}
-
-
-void MapWord::ClearOverflow() {
- value_ &= ~kOverflowMask;
-}
-
-
-MapWord MapWord::EncodeAddress(Address map_address, int offset) {
- // Offset is the distance in live bytes from the first live object in the
- // same page. The offset between two objects in the same page should not
- // exceed the object area size of a page.
- ASSERT(0 <= offset && offset < Page::kObjectAreaSize);
-
- uintptr_t compact_offset = offset >> kObjectAlignmentBits;
- ASSERT(compact_offset < (1 << kForwardingOffsetBits));
-
- Page* map_page = Page::FromAddress(map_address);
- ASSERT_MAP_PAGE_INDEX(map_page->mc_page_index);
-
- uintptr_t map_page_offset =
- map_page->Offset(map_address) >> kMapAlignmentBits;
-
- uintptr_t encoding =
- (compact_offset << kForwardingOffsetShift) |
- (map_page_offset << kMapPageOffsetShift) |
- (map_page->mc_page_index << kMapPageIndexShift);
- return MapWord(encoding);
-}
-
-
-Address MapWord::DecodeMapAddress(MapSpace* map_space) {
- int map_page_index =
- static_cast<int>((value_ & kMapPageIndexMask) >> kMapPageIndexShift);
- ASSERT_MAP_PAGE_INDEX(map_page_index);
-
- int map_page_offset = static_cast<int>(
- ((value_ & kMapPageOffsetMask) >> kMapPageOffsetShift) <<
- kMapAlignmentBits);
-
- return (map_space->PageAddress(map_page_index) + map_page_offset);
-}
-
-
-int MapWord::DecodeOffset() {
- // The offset field is represented in the kForwardingOffsetBits
- // most-significant bits.
- uintptr_t offset = (value_ >> kForwardingOffsetShift) << kObjectAlignmentBits;
- ASSERT(offset < static_cast<uintptr_t>(Page::kObjectAreaSize));
- return static_cast<int>(offset);
-}
-
-
-MapWord MapWord::FromEncodedAddress(Address address) {
- return MapWord(reinterpret_cast<uintptr_t>(address));
-}
-
-
-Address MapWord::ToEncodedAddress() {
- return reinterpret_cast<Address>(value_);
-}
-
-
-#ifdef DEBUG
-void HeapObject::VerifyObjectField(int offset) {
- VerifyPointer(READ_FIELD(this, offset));
-}
-
-void HeapObject::VerifySmiField(int offset) {
- ASSERT(READ_FIELD(this, offset)->IsSmi());
-}
-#endif
-
-
-Heap* HeapObject::GetHeap() {
- // During GC, the map pointer in HeapObject is used in various ways that
- // prevent us from retrieving Heap from the map.
- // Assert that we are not in GC, implement GC code in a way that it doesn't
- // pull heap from the map.
- ASSERT(HEAP->is_safe_to_read_maps());
- return map()->heap();
-}
-
-
-Isolate* HeapObject::GetIsolate() {
- return GetHeap()->isolate();
-}
-
-
-Map* HeapObject::map() {
- return map_word().ToMap();
-}
-
-
-void HeapObject::set_map(Map* value) {
- set_map_word(MapWord::FromMap(value));
-}
-
-
-MapWord HeapObject::map_word() {
- return MapWord(reinterpret_cast<uintptr_t>(READ_FIELD(this, kMapOffset)));
-}
-
-
-void HeapObject::set_map_word(MapWord map_word) {
- // WRITE_FIELD does not invoke write barrier, but there is no need
- // here.
- WRITE_FIELD(this, kMapOffset, reinterpret_cast<Object*>(map_word.value_));
-}
-
-
-HeapObject* HeapObject::FromAddress(Address address) {
- ASSERT_TAG_ALIGNED(address);
- return reinterpret_cast<HeapObject*>(address + kHeapObjectTag);
-}
-
-
-Address HeapObject::address() {
- return reinterpret_cast<Address>(this) - kHeapObjectTag;
-}
-
-
-int HeapObject::Size() {
- return SizeFromMap(map());
-}
-
-
-void HeapObject::IteratePointers(ObjectVisitor* v, int start, int end) {
- v->VisitPointers(reinterpret_cast<Object**>(FIELD_ADDR(this, start)),
- reinterpret_cast<Object**>(FIELD_ADDR(this, end)));
-}
-
-
-void HeapObject::IteratePointer(ObjectVisitor* v, int offset) {
- v->VisitPointer(reinterpret_cast<Object**>(FIELD_ADDR(this, offset)));
-}
-
-
-bool HeapObject::IsMarked() {
- return map_word().IsMarked();
-}
-
-
-void HeapObject::SetMark() {
- ASSERT(!IsMarked());
- MapWord first_word = map_word();
- first_word.SetMark();
- set_map_word(first_word);
-}
-
-
-void HeapObject::ClearMark() {
- ASSERT(IsMarked());
- MapWord first_word = map_word();
- first_word.ClearMark();
- set_map_word(first_word);
-}
-
-
-bool HeapObject::IsOverflowed() {
- return map_word().IsOverflowed();
-}
-
-
-void HeapObject::SetOverflow() {
- MapWord first_word = map_word();
- first_word.SetOverflow();
- set_map_word(first_word);
-}
-
-
-void HeapObject::ClearOverflow() {
- ASSERT(IsOverflowed());
- MapWord first_word = map_word();
- first_word.ClearOverflow();
- set_map_word(first_word);
-}
-
-
-double HeapNumber::value() {
- return READ_DOUBLE_FIELD(this, kValueOffset);
-}
-
-
-void HeapNumber::set_value(double value) {
- WRITE_DOUBLE_FIELD(this, kValueOffset, value);
-}
-
-
-int HeapNumber::get_exponent() {
- return ((READ_INT_FIELD(this, kExponentOffset) & kExponentMask) >>
- kExponentShift) - kExponentBias;
-}
-
-
-int HeapNumber::get_sign() {
- return READ_INT_FIELD(this, kExponentOffset) & kSignMask;
-}
-
-
-ACCESSORS(JSObject, properties, FixedArray, kPropertiesOffset)
-
-
-HeapObject* JSObject::elements() {
- Object* array = READ_FIELD(this, kElementsOffset);
- // In the assert below Dictionary is covered under FixedArray.
- ASSERT(array->IsFixedArray() || array->IsExternalArray());
- return reinterpret_cast<HeapObject*>(array);
-}
-
-
-void JSObject::set_elements(HeapObject* value, WriteBarrierMode mode) {
- ASSERT(map()->has_fast_elements() ==
- (value->map() == GetHeap()->fixed_array_map() ||
- value->map() == GetHeap()->fixed_cow_array_map()));
- // In the assert below Dictionary is covered under FixedArray.
- ASSERT(value->IsFixedArray() || value->IsExternalArray());
- WRITE_FIELD(this, kElementsOffset, value);
- CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kElementsOffset, mode);
-}
-
-
-void JSObject::initialize_properties() {
- ASSERT(!GetHeap()->InNewSpace(GetHeap()->empty_fixed_array()));
- WRITE_FIELD(this, kPropertiesOffset, GetHeap()->empty_fixed_array());
-}
-
-
-void JSObject::initialize_elements() {
- ASSERT(map()->has_fast_elements());
- ASSERT(!GetHeap()->InNewSpace(GetHeap()->empty_fixed_array()));
- WRITE_FIELD(this, kElementsOffset, GetHeap()->empty_fixed_array());
-}
-
-
-MaybeObject* JSObject::ResetElements() {
- Object* obj;
- { MaybeObject* maybe_obj = map()->GetFastElementsMap();
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- set_map(Map::cast(obj));
- initialize_elements();
- return this;
-}
-
-
-ACCESSORS(Oddball, to_string, String, kToStringOffset)
-ACCESSORS(Oddball, to_number, Object, kToNumberOffset)
-
-
-byte Oddball::kind() {
- return READ_BYTE_FIELD(this, kKindOffset);
-}
-
-
-void Oddball::set_kind(byte value) {
- WRITE_BYTE_FIELD(this, kKindOffset, value);
-}
-
-
-Object* JSGlobalPropertyCell::value() {
- return READ_FIELD(this, kValueOffset);
-}
-
-
-void JSGlobalPropertyCell::set_value(Object* val, WriteBarrierMode ignored) {
- // The write barrier is not used for global property cells.
- ASSERT(!val->IsJSGlobalPropertyCell());
- WRITE_FIELD(this, kValueOffset, val);
-}
-
-
-int JSObject::GetHeaderSize() {
- InstanceType type = map()->instance_type();
- // Check for the most common kind of JavaScript object before
- // falling into the generic switch. This speeds up the internal
- // field operations considerably on average.
- if (type == JS_OBJECT_TYPE) return JSObject::kHeaderSize;
- switch (type) {
- case JS_GLOBAL_PROXY_TYPE:
- return JSGlobalProxy::kSize;
- case JS_GLOBAL_OBJECT_TYPE:
- return JSGlobalObject::kSize;
- case JS_BUILTINS_OBJECT_TYPE:
- return JSBuiltinsObject::kSize;
- case JS_FUNCTION_TYPE:
- return JSFunction::kSize;
- case JS_VALUE_TYPE:
- return JSValue::kSize;
- case JS_ARRAY_TYPE:
- return JSValue::kSize;
- case JS_REGEXP_TYPE:
- return JSValue::kSize;
- case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
- return JSObject::kHeaderSize;
- case JS_MESSAGE_OBJECT_TYPE:
- return JSMessageObject::kSize;
- default:
- UNREACHABLE();
- return 0;
- }
-}
-
-
-int JSObject::GetInternalFieldCount() {
- ASSERT(1 << kPointerSizeLog2 == kPointerSize);
- // Make sure to adjust for the number of in-object properties. These
- // properties do contribute to the size, but are not internal fields.
- return ((Size() - GetHeaderSize()) >> kPointerSizeLog2) -
- map()->inobject_properties();
-}
-
-
-int JSObject::GetInternalFieldOffset(int index) {
- ASSERT(index < GetInternalFieldCount() && index >= 0);
- return GetHeaderSize() + (kPointerSize * index);
-}
-
-
-Object* JSObject::GetInternalField(int index) {
- ASSERT(index < GetInternalFieldCount() && index >= 0);
- // Internal objects do follow immediately after the header, whereas in-object
- // properties are at the end of the object. Therefore there is no need
- // to adjust the index here.
- return READ_FIELD(this, GetHeaderSize() + (kPointerSize * index));
-}
-
-
-void JSObject::SetInternalField(int index, Object* value) {
- ASSERT(index < GetInternalFieldCount() && index >= 0);
- // Internal objects do follow immediately after the header, whereas in-object
- // properties are at the end of the object. Therefore there is no need
- // to adjust the index here.
- int offset = GetHeaderSize() + (kPointerSize * index);
- WRITE_FIELD(this, offset, value);
- WRITE_BARRIER(this, offset);
-}
-
-
-// Access fast-case object properties at index. The use of these routines
-// is needed to correctly distinguish between properties stored in-object and
-// properties stored in the properties array.
-Object* JSObject::FastPropertyAt(int index) {
- // Adjust for the number of properties stored in the object.
- index -= map()->inobject_properties();
- if (index < 0) {
- int offset = map()->instance_size() + (index * kPointerSize);
- return READ_FIELD(this, offset);
- } else {
- ASSERT(index < properties()->length());
- return properties()->get(index);
- }
-}
-
-
-Object* JSObject::FastPropertyAtPut(int index, Object* value) {
- // Adjust for the number of properties stored in the object.
- index -= map()->inobject_properties();
- if (index < 0) {
- int offset = map()->instance_size() + (index * kPointerSize);
- WRITE_FIELD(this, offset, value);
- WRITE_BARRIER(this, offset);
- } else {
- ASSERT(index < properties()->length());
- properties()->set(index, value);
- }
- return value;
-}
-
-
-int JSObject::GetInObjectPropertyOffset(int index) {
- // Adjust for the number of properties stored in the object.
- index -= map()->inobject_properties();
- ASSERT(index < 0);
- return map()->instance_size() + (index * kPointerSize);
-}
-
-
-Object* JSObject::InObjectPropertyAt(int index) {
- // Adjust for the number of properties stored in the object.
- index -= map()->inobject_properties();
- ASSERT(index < 0);
- int offset = map()->instance_size() + (index * kPointerSize);
- return READ_FIELD(this, offset);
-}
-
-
-Object* JSObject::InObjectPropertyAtPut(int index,
- Object* value,
- WriteBarrierMode mode) {
- // Adjust for the number of properties stored in the object.
- index -= map()->inobject_properties();
- ASSERT(index < 0);
- int offset = map()->instance_size() + (index * kPointerSize);
- WRITE_FIELD(this, offset, value);
- CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, mode);
- return value;
-}
-
-
-
-void JSObject::InitializeBody(int object_size, Object* value) {
- ASSERT(!value->IsHeapObject() || !GetHeap()->InNewSpace(value));
- for (int offset = kHeaderSize; offset < object_size; offset += kPointerSize) {
- WRITE_FIELD(this, offset, value);
- }
-}
-
-
-bool JSObject::HasFastProperties() {
- return !properties()->IsDictionary();
-}
-
-
-int JSObject::MaxFastProperties() {
- // Allow extra fast properties if the object has more than
- // kMaxFastProperties in-object properties. When this is the case,
- // it is very unlikely that the object is being used as a dictionary
- // and there is a good chance that allowing more map transitions
- // will be worth it.
- return Max(map()->inobject_properties(), kMaxFastProperties);
-}
-
-
-void Struct::InitializeBody(int object_size) {
- Object* value = GetHeap()->undefined_value();
- for (int offset = kHeaderSize; offset < object_size; offset += kPointerSize) {
- WRITE_FIELD(this, offset, value);
- }
-}
-
-
-bool Object::ToArrayIndex(uint32_t* index) {
- if (IsSmi()) {
- int value = Smi::cast(this)->value();
- if (value < 0) return false;
- *index = value;
- return true;
- }
- if (IsHeapNumber()) {
- double value = HeapNumber::cast(this)->value();
- uint32_t uint_value = static_cast<uint32_t>(value);
- if (value == static_cast<double>(uint_value)) {
- *index = uint_value;
- return true;
- }
- }
- return false;
-}
-
-
-bool Object::IsStringObjectWithCharacterAt(uint32_t index) {
- if (!this->IsJSValue()) return false;
-
- JSValue* js_value = JSValue::cast(this);
- if (!js_value->value()->IsString()) return false;
-
- String* str = String::cast(js_value->value());
- if (index >= (uint32_t)str->length()) return false;
-
- return true;
-}
-
-
-Object* FixedArray::get(int index) {
- ASSERT(index >= 0 && index < this->length());
- return READ_FIELD(this, kHeaderSize + index * kPointerSize);
-}
-
-
-void FixedArray::set(int index, Smi* value) {
- ASSERT(map() != HEAP->fixed_cow_array_map());
- ASSERT(reinterpret_cast<Object*>(value)->IsSmi());
- int offset = kHeaderSize + index * kPointerSize;
- WRITE_FIELD(this, offset, value);
-}
-
-
-void FixedArray::set(int index, Object* value) {
- ASSERT(map() != HEAP->fixed_cow_array_map());
- ASSERT(index >= 0 && index < this->length());
- int offset = kHeaderSize + index * kPointerSize;
- WRITE_FIELD(this, offset, value);
- WRITE_BARRIER(this, offset);
-}
-
-
-WriteBarrierMode HeapObject::GetWriteBarrierMode(const AssertNoAllocation&) {
- if (GetHeap()->InNewSpace(this)) return SKIP_WRITE_BARRIER;
- return UPDATE_WRITE_BARRIER;
-}
-
-
-void FixedArray::set(int index,
- Object* value,
- WriteBarrierMode mode) {
- ASSERT(map() != HEAP->fixed_cow_array_map());
- ASSERT(index >= 0 && index < this->length());
- int offset = kHeaderSize + index * kPointerSize;
- WRITE_FIELD(this, offset, value);
- CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, mode);
-}
-
-
-void FixedArray::fast_set(FixedArray* array, int index, Object* value) {
- ASSERT(array->map() != HEAP->raw_unchecked_fixed_cow_array_map());
- ASSERT(index >= 0 && index < array->length());
- ASSERT(!HEAP->InNewSpace(value));
- WRITE_FIELD(array, kHeaderSize + index * kPointerSize, value);
-}
-
-
-void FixedArray::set_undefined(int index) {
- ASSERT(map() != HEAP->fixed_cow_array_map());
- set_undefined(GetHeap(), index);
-}
-
-
-void FixedArray::set_undefined(Heap* heap, int index) {
- ASSERT(index >= 0 && index < this->length());
- ASSERT(!heap->InNewSpace(heap->undefined_value()));
- WRITE_FIELD(this, kHeaderSize + index * kPointerSize,
- heap->undefined_value());
-}
-
-
-void FixedArray::set_null(int index) {
- set_null(GetHeap(), index);
-}
-
-
-void FixedArray::set_null(Heap* heap, int index) {
- ASSERT(index >= 0 && index < this->length());
- ASSERT(!heap->InNewSpace(heap->null_value()));
- WRITE_FIELD(this, kHeaderSize + index * kPointerSize, heap->null_value());
-}
-
-
-void FixedArray::set_the_hole(int index) {
- ASSERT(map() != HEAP->fixed_cow_array_map());
- ASSERT(index >= 0 && index < this->length());
- ASSERT(!HEAP->InNewSpace(HEAP->the_hole_value()));
- WRITE_FIELD(this,
- kHeaderSize + index * kPointerSize,
- GetHeap()->the_hole_value());
-}
-
-
-void FixedArray::set_unchecked(int index, Smi* value) {
- ASSERT(reinterpret_cast<Object*>(value)->IsSmi());
- int offset = kHeaderSize + index * kPointerSize;
- WRITE_FIELD(this, offset, value);
-}
-
-
-void FixedArray::set_unchecked(Heap* heap,
- int index,
- Object* value,
- WriteBarrierMode mode) {
- int offset = kHeaderSize + index * kPointerSize;
- WRITE_FIELD(this, offset, value);
- CONDITIONAL_WRITE_BARRIER(heap, this, offset, mode);
-}
-
-
-void FixedArray::set_null_unchecked(Heap* heap, int index) {
- ASSERT(index >= 0 && index < this->length());
- ASSERT(!HEAP->InNewSpace(heap->null_value()));
- WRITE_FIELD(this, kHeaderSize + index * kPointerSize, heap->null_value());
-}
-
-
-Object** FixedArray::data_start() {
- return HeapObject::RawField(this, kHeaderSize);
-}
-
-
-bool DescriptorArray::IsEmpty() {
- ASSERT(this->length() > kFirstIndex ||
- this == HEAP->empty_descriptor_array());
- return length() <= kFirstIndex;
-}
-
-
-void DescriptorArray::fast_swap(FixedArray* array, int first, int second) {
- Object* tmp = array->get(first);
- fast_set(array, first, array->get(second));
- fast_set(array, second, tmp);
-}
-
-
-int DescriptorArray::Search(String* name) {
- SLOW_ASSERT(IsSortedNoDuplicates());
-
- // Check for empty descriptor array.
- int nof = number_of_descriptors();
- if (nof == 0) return kNotFound;
-
- // Fast case: do linear search for small arrays.
- const int kMaxElementsForLinearSearch = 8;
- if (StringShape(name).IsSymbol() && nof < kMaxElementsForLinearSearch) {
- return LinearSearch(name, nof);
- }
-
- // Slow case: perform binary search.
- return BinarySearch(name, 0, nof - 1);
-}
-
-
-int DescriptorArray::SearchWithCache(String* name) {
- int number = GetIsolate()->descriptor_lookup_cache()->Lookup(this, name);
- if (number == DescriptorLookupCache::kAbsent) {
- number = Search(name);
- GetIsolate()->descriptor_lookup_cache()->Update(this, name, number);
- }
- return number;
-}
-
-
-String* DescriptorArray::GetKey(int descriptor_number) {
- ASSERT(descriptor_number < number_of_descriptors());
- return String::cast(get(ToKeyIndex(descriptor_number)));
-}
-
-
-Object* DescriptorArray::GetValue(int descriptor_number) {
- ASSERT(descriptor_number < number_of_descriptors());
- return GetContentArray()->get(ToValueIndex(descriptor_number));
-}
-
-
-Smi* DescriptorArray::GetDetails(int descriptor_number) {
- ASSERT(descriptor_number < number_of_descriptors());
- return Smi::cast(GetContentArray()->get(ToDetailsIndex(descriptor_number)));
-}
-
-
-PropertyType DescriptorArray::GetType(int descriptor_number) {
- ASSERT(descriptor_number < number_of_descriptors());
- return PropertyDetails(GetDetails(descriptor_number)).type();
-}
-
-
-int DescriptorArray::GetFieldIndex(int descriptor_number) {
- return Descriptor::IndexFromValue(GetValue(descriptor_number));
-}
-
-
-JSFunction* DescriptorArray::GetConstantFunction(int descriptor_number) {
- return JSFunction::cast(GetValue(descriptor_number));
-}
-
-
-Object* DescriptorArray::GetCallbacksObject(int descriptor_number) {
- ASSERT(GetType(descriptor_number) == CALLBACKS);
- return GetValue(descriptor_number);
-}
-
-
-AccessorDescriptor* DescriptorArray::GetCallbacks(int descriptor_number) {
- ASSERT(GetType(descriptor_number) == CALLBACKS);
- Proxy* p = Proxy::cast(GetCallbacksObject(descriptor_number));
- return reinterpret_cast<AccessorDescriptor*>(p->proxy());
-}
-
-
-bool DescriptorArray::IsProperty(int descriptor_number) {
- return GetType(descriptor_number) < FIRST_PHANTOM_PROPERTY_TYPE;
-}
-
-
-bool DescriptorArray::IsTransition(int descriptor_number) {
- PropertyType t = GetType(descriptor_number);
- return t == MAP_TRANSITION || t == CONSTANT_TRANSITION ||
- t == EXTERNAL_ARRAY_TRANSITION;
-}
-
-
-bool DescriptorArray::IsNullDescriptor(int descriptor_number) {
- return GetType(descriptor_number) == NULL_DESCRIPTOR;
-}
-
-
-bool DescriptorArray::IsDontEnum(int descriptor_number) {
- return PropertyDetails(GetDetails(descriptor_number)).IsDontEnum();
-}
-
-
-void DescriptorArray::Get(int descriptor_number, Descriptor* desc) {
- desc->Init(GetKey(descriptor_number),
- GetValue(descriptor_number),
- GetDetails(descriptor_number));
-}
-
-
-void DescriptorArray::Set(int descriptor_number, Descriptor* desc) {
- // Range check.
- ASSERT(descriptor_number < number_of_descriptors());
-
- // Make sure none of the elements in desc are in new space.
- ASSERT(!HEAP->InNewSpace(desc->GetKey()));
- ASSERT(!HEAP->InNewSpace(desc->GetValue()));
-
- fast_set(this, ToKeyIndex(descriptor_number), desc->GetKey());
- FixedArray* content_array = GetContentArray();
- fast_set(content_array, ToValueIndex(descriptor_number), desc->GetValue());
- fast_set(content_array, ToDetailsIndex(descriptor_number),
- desc->GetDetails().AsSmi());
-}
-
-
-void DescriptorArray::CopyFrom(int index, DescriptorArray* src, int src_index) {
- Descriptor desc;
- src->Get(src_index, &desc);
- Set(index, &desc);
-}
-
-
-void DescriptorArray::Swap(int first, int second) {
- fast_swap(this, ToKeyIndex(first), ToKeyIndex(second));
- FixedArray* content_array = GetContentArray();
- fast_swap(content_array, ToValueIndex(first), ToValueIndex(second));
- fast_swap(content_array, ToDetailsIndex(first), ToDetailsIndex(second));
-}
-
-
-template<typename Shape, typename Key>
-int HashTable<Shape, Key>::FindEntry(Key key) {
- return FindEntry(GetIsolate(), key);
-}
-
-
-// Find entry for key otherwise return kNotFound.
-template<typename Shape, typename Key>
-int HashTable<Shape, Key>::FindEntry(Isolate* isolate, Key key) {
- uint32_t capacity = Capacity();
- uint32_t entry = FirstProbe(Shape::Hash(key), capacity);
- uint32_t count = 1;
- // EnsureCapacity will guarantee the hash table is never full.
- while (true) {
- Object* element = KeyAt(entry);
- if (element == isolate->heap()->undefined_value()) break; // Empty entry.
- if (element != isolate->heap()->null_value() &&
- Shape::IsMatch(key, element)) return entry;
- entry = NextProbe(entry, count++, capacity);
- }
- return kNotFound;
-}
-
-
-bool NumberDictionary::requires_slow_elements() {
- Object* max_index_object = get(kMaxNumberKeyIndex);
- if (!max_index_object->IsSmi()) return false;
- return 0 !=
- (Smi::cast(max_index_object)->value() & kRequiresSlowElementsMask);
-}
-
-uint32_t NumberDictionary::max_number_key() {
- ASSERT(!requires_slow_elements());
- Object* max_index_object = get(kMaxNumberKeyIndex);
- if (!max_index_object->IsSmi()) return 0;
- uint32_t value = static_cast<uint32_t>(Smi::cast(max_index_object)->value());
- return value >> kRequiresSlowElementsTagSize;
-}
-
-void NumberDictionary::set_requires_slow_elements() {
- set(kMaxNumberKeyIndex, Smi::FromInt(kRequiresSlowElementsMask));
-}
-
-
-// ------------------------------------
-// Cast operations
-
-
-CAST_ACCESSOR(FixedArray)
-CAST_ACCESSOR(DescriptorArray)
-CAST_ACCESSOR(DeoptimizationInputData)
-CAST_ACCESSOR(DeoptimizationOutputData)
-CAST_ACCESSOR(SymbolTable)
-CAST_ACCESSOR(JSFunctionResultCache)
-CAST_ACCESSOR(NormalizedMapCache)
-CAST_ACCESSOR(CompilationCacheTable)
-CAST_ACCESSOR(CodeCacheHashTable)
-CAST_ACCESSOR(MapCache)
-CAST_ACCESSOR(String)
-CAST_ACCESSOR(SeqString)
-CAST_ACCESSOR(SeqAsciiString)
-CAST_ACCESSOR(SeqTwoByteString)
-CAST_ACCESSOR(ConsString)
-CAST_ACCESSOR(ExternalString)
-CAST_ACCESSOR(ExternalAsciiString)
-CAST_ACCESSOR(ExternalTwoByteString)
-CAST_ACCESSOR(JSObject)
-CAST_ACCESSOR(Smi)
-CAST_ACCESSOR(HeapObject)
-CAST_ACCESSOR(HeapNumber)
-CAST_ACCESSOR(Oddball)
-CAST_ACCESSOR(JSGlobalPropertyCell)
-CAST_ACCESSOR(SharedFunctionInfo)
-CAST_ACCESSOR(Map)
-CAST_ACCESSOR(JSFunction)
-CAST_ACCESSOR(GlobalObject)
-CAST_ACCESSOR(JSGlobalProxy)
-CAST_ACCESSOR(JSGlobalObject)
-CAST_ACCESSOR(JSBuiltinsObject)
-CAST_ACCESSOR(Code)
-CAST_ACCESSOR(JSArray)
-CAST_ACCESSOR(JSRegExp)
-CAST_ACCESSOR(Proxy)
-CAST_ACCESSOR(ByteArray)
-CAST_ACCESSOR(ExternalArray)
-CAST_ACCESSOR(ExternalByteArray)
-CAST_ACCESSOR(ExternalUnsignedByteArray)
-CAST_ACCESSOR(ExternalShortArray)
-CAST_ACCESSOR(ExternalUnsignedShortArray)
-CAST_ACCESSOR(ExternalIntArray)
-CAST_ACCESSOR(ExternalUnsignedIntArray)
-CAST_ACCESSOR(ExternalFloatArray)
-CAST_ACCESSOR(ExternalPixelArray)
-CAST_ACCESSOR(Struct)
-
-
-#define MAKE_STRUCT_CAST(NAME, Name, name) CAST_ACCESSOR(Name)
- STRUCT_LIST(MAKE_STRUCT_CAST)
-#undef MAKE_STRUCT_CAST
-
-
-template <typename Shape, typename Key>
-HashTable<Shape, Key>* HashTable<Shape, Key>::cast(Object* obj) {
- ASSERT(obj->IsHashTable());
- return reinterpret_cast<HashTable*>(obj);
-}
-
-
-SMI_ACCESSORS(FixedArray, length, kLengthOffset)
-SMI_ACCESSORS(ByteArray, length, kLengthOffset)
-
-INT_ACCESSORS(ExternalArray, length, kLengthOffset)
-
-
-SMI_ACCESSORS(String, length, kLengthOffset)
-
-
-uint32_t String::hash_field() {
- return READ_UINT32_FIELD(this, kHashFieldOffset);
-}
-
-
-void String::set_hash_field(uint32_t value) {
- WRITE_UINT32_FIELD(this, kHashFieldOffset, value);
-#if V8_HOST_ARCH_64_BIT
- WRITE_UINT32_FIELD(this, kHashFieldOffset + kIntSize, 0);
-#endif
-}
-
-
-bool String::Equals(String* other) {
- if (other == this) return true;
- if (StringShape(this).IsSymbol() && StringShape(other).IsSymbol()) {
- return false;
- }
- return SlowEquals(other);
-}
-
-
-MaybeObject* String::TryFlatten(PretenureFlag pretenure) {
- if (!StringShape(this).IsCons()) return this;
- ConsString* cons = ConsString::cast(this);
- if (cons->second()->length() == 0) return cons->first();
- return SlowTryFlatten(pretenure);
-}
-
-
-String* String::TryFlattenGetString(PretenureFlag pretenure) {
- MaybeObject* flat = TryFlatten(pretenure);
- Object* successfully_flattened;
- if (flat->ToObject(&successfully_flattened)) {
- return String::cast(successfully_flattened);
- }
- return this;
-}
-
-
-uint16_t String::Get(int index) {
- ASSERT(index >= 0 && index < length());
- switch (StringShape(this).full_representation_tag()) {
- case kSeqStringTag | kAsciiStringTag:
- return SeqAsciiString::cast(this)->SeqAsciiStringGet(index);
- case kSeqStringTag | kTwoByteStringTag:
- return SeqTwoByteString::cast(this)->SeqTwoByteStringGet(index);
- case kConsStringTag | kAsciiStringTag:
- case kConsStringTag | kTwoByteStringTag:
- return ConsString::cast(this)->ConsStringGet(index);
- case kExternalStringTag | kAsciiStringTag:
- return ExternalAsciiString::cast(this)->ExternalAsciiStringGet(index);
- case kExternalStringTag | kTwoByteStringTag:
- return ExternalTwoByteString::cast(this)->ExternalTwoByteStringGet(index);
- default:
- break;
- }
-
- UNREACHABLE();
- return 0;
-}
-
-
-void String::Set(int index, uint16_t value) {
- ASSERT(index >= 0 && index < length());
- ASSERT(StringShape(this).IsSequential());
-
- return this->IsAsciiRepresentation()
- ? SeqAsciiString::cast(this)->SeqAsciiStringSet(index, value)
- : SeqTwoByteString::cast(this)->SeqTwoByteStringSet(index, value);
-}
-
-
-bool String::IsFlat() {
- switch (StringShape(this).representation_tag()) {
- case kConsStringTag: {
- String* second = ConsString::cast(this)->second();
- // Only flattened strings have second part empty.
- return second->length() == 0;
- }
- default:
- return true;
- }
-}
-
-
-uint16_t SeqAsciiString::SeqAsciiStringGet(int index) {
- ASSERT(index >= 0 && index < length());
- return READ_BYTE_FIELD(this, kHeaderSize + index * kCharSize);
-}
-
-
-void SeqAsciiString::SeqAsciiStringSet(int index, uint16_t value) {
- ASSERT(index >= 0 && index < length() && value <= kMaxAsciiCharCode);
- WRITE_BYTE_FIELD(this, kHeaderSize + index * kCharSize,
- static_cast<byte>(value));
-}
-
-
-Address SeqAsciiString::GetCharsAddress() {
- return FIELD_ADDR(this, kHeaderSize);
-}
-
-
-char* SeqAsciiString::GetChars() {
- return reinterpret_cast<char*>(GetCharsAddress());
-}
-
-
-Address SeqTwoByteString::GetCharsAddress() {
- return FIELD_ADDR(this, kHeaderSize);
-}
-
-
-uc16* SeqTwoByteString::GetChars() {
- return reinterpret_cast<uc16*>(FIELD_ADDR(this, kHeaderSize));
-}
-
-
-uint16_t SeqTwoByteString::SeqTwoByteStringGet(int index) {
- ASSERT(index >= 0 && index < length());
- return READ_SHORT_FIELD(this, kHeaderSize + index * kShortSize);
-}
-
-
-void SeqTwoByteString::SeqTwoByteStringSet(int index, uint16_t value) {
- ASSERT(index >= 0 && index < length());
- WRITE_SHORT_FIELD(this, kHeaderSize + index * kShortSize, value);
-}
-
-
-int SeqTwoByteString::SeqTwoByteStringSize(InstanceType instance_type) {
- return SizeFor(length());
-}
-
-
-int SeqAsciiString::SeqAsciiStringSize(InstanceType instance_type) {
- return SizeFor(length());
-}
-
-
-String* ConsString::first() {
- return String::cast(READ_FIELD(this, kFirstOffset));
-}
-
-
-Object* ConsString::unchecked_first() {
- return READ_FIELD(this, kFirstOffset);
-}
-
-
-void ConsString::set_first(String* value, WriteBarrierMode mode) {
- WRITE_FIELD(this, kFirstOffset, value);
- CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kFirstOffset, mode);
-}
-
-
-String* ConsString::second() {
- return String::cast(READ_FIELD(this, kSecondOffset));
-}
-
-
-Object* ConsString::unchecked_second() {
- return READ_FIELD(this, kSecondOffset);
-}
-
-
-void ConsString::set_second(String* value, WriteBarrierMode mode) {
- WRITE_FIELD(this, kSecondOffset, value);
- CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kSecondOffset, mode);
-}
-
-
-ExternalAsciiString::Resource* ExternalAsciiString::resource() {
- return *reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset));
-}
-
-
-void ExternalAsciiString::set_resource(
- ExternalAsciiString::Resource* resource) {
- *reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset)) = resource;
-}
-
-
-ExternalTwoByteString::Resource* ExternalTwoByteString::resource() {
- return *reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset));
-}
-
-
-void ExternalTwoByteString::set_resource(
- ExternalTwoByteString::Resource* resource) {
- *reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset)) = resource;
-}
-
-
-void JSFunctionResultCache::MakeZeroSize() {
- set_finger_index(kEntriesIndex);
- set_size(kEntriesIndex);
-}
-
-
-void JSFunctionResultCache::Clear() {
- int cache_size = size();
- Object** entries_start = RawField(this, OffsetOfElementAt(kEntriesIndex));
- MemsetPointer(entries_start,
- GetHeap()->the_hole_value(),
- cache_size - kEntriesIndex);
- MakeZeroSize();
-}
-
-
-int JSFunctionResultCache::size() {
- return Smi::cast(get(kCacheSizeIndex))->value();
-}
-
-
-void JSFunctionResultCache::set_size(int size) {
- set(kCacheSizeIndex, Smi::FromInt(size));
-}
-
-
-int JSFunctionResultCache::finger_index() {
- return Smi::cast(get(kFingerIndex))->value();
-}
-
-
-void JSFunctionResultCache::set_finger_index(int finger_index) {
- set(kFingerIndex, Smi::FromInt(finger_index));
-}
-
-
-byte ByteArray::get(int index) {
- ASSERT(index >= 0 && index < this->length());
- return READ_BYTE_FIELD(this, kHeaderSize + index * kCharSize);
-}
-
-
-void ByteArray::set(int index, byte value) {
- ASSERT(index >= 0 && index < this->length());
- WRITE_BYTE_FIELD(this, kHeaderSize + index * kCharSize, value);
-}
-
-
-int ByteArray::get_int(int index) {
- ASSERT(index >= 0 && (index * kIntSize) < this->length());
- return READ_INT_FIELD(this, kHeaderSize + index * kIntSize);
-}
-
-
-ByteArray* ByteArray::FromDataStartAddress(Address address) {
- ASSERT_TAG_ALIGNED(address);
- return reinterpret_cast<ByteArray*>(address - kHeaderSize + kHeapObjectTag);
-}
-
-
-Address ByteArray::GetDataStartAddress() {
- return reinterpret_cast<Address>(this) - kHeapObjectTag + kHeaderSize;
-}
-
-
-uint8_t* ExternalPixelArray::external_pixel_pointer() {
- return reinterpret_cast<uint8_t*>(external_pointer());
-}
-
-
-uint8_t ExternalPixelArray::get(int index) {
- ASSERT((index >= 0) && (index < this->length()));
- uint8_t* ptr = external_pixel_pointer();
- return ptr[index];
-}
-
-
-void ExternalPixelArray::set(int index, uint8_t value) {
- ASSERT((index >= 0) && (index < this->length()));
- uint8_t* ptr = external_pixel_pointer();
- ptr[index] = value;
-}
-
-
-void* ExternalArray::external_pointer() {
- intptr_t ptr = READ_INTPTR_FIELD(this, kExternalPointerOffset);
- return reinterpret_cast<void*>(ptr);
-}
-
-
-void ExternalArray::set_external_pointer(void* value, WriteBarrierMode mode) {
- intptr_t ptr = reinterpret_cast<intptr_t>(value);
- WRITE_INTPTR_FIELD(this, kExternalPointerOffset, ptr);
-}
-
-
-int8_t ExternalByteArray::get(int index) {
- ASSERT((index >= 0) && (index < this->length()));
- int8_t* ptr = static_cast<int8_t*>(external_pointer());
- return ptr[index];
-}
-
-
-void ExternalByteArray::set(int index, int8_t value) {
- ASSERT((index >= 0) && (index < this->length()));
- int8_t* ptr = static_cast<int8_t*>(external_pointer());
- ptr[index] = value;
-}
-
-
-uint8_t ExternalUnsignedByteArray::get(int index) {
- ASSERT((index >= 0) && (index < this->length()));
- uint8_t* ptr = static_cast<uint8_t*>(external_pointer());
- return ptr[index];
-}
-
-
-void ExternalUnsignedByteArray::set(int index, uint8_t value) {
- ASSERT((index >= 0) && (index < this->length()));
- uint8_t* ptr = static_cast<uint8_t*>(external_pointer());
- ptr[index] = value;
-}
-
-
-int16_t ExternalShortArray::get(int index) {
- ASSERT((index >= 0) && (index < this->length()));
- int16_t* ptr = static_cast<int16_t*>(external_pointer());
- return ptr[index];
-}
-
-
-void ExternalShortArray::set(int index, int16_t value) {
- ASSERT((index >= 0) && (index < this->length()));
- int16_t* ptr = static_cast<int16_t*>(external_pointer());
- ptr[index] = value;
-}
-
-
-uint16_t ExternalUnsignedShortArray::get(int index) {
- ASSERT((index >= 0) && (index < this->length()));
- uint16_t* ptr = static_cast<uint16_t*>(external_pointer());
- return ptr[index];
-}
-
-
-void ExternalUnsignedShortArray::set(int index, uint16_t value) {
- ASSERT((index >= 0) && (index < this->length()));
- uint16_t* ptr = static_cast<uint16_t*>(external_pointer());
- ptr[index] = value;
-}
-
-
-int32_t ExternalIntArray::get(int index) {
- ASSERT((index >= 0) && (index < this->length()));
- int32_t* ptr = static_cast<int32_t*>(external_pointer());
- return ptr[index];
-}
-
-
-void ExternalIntArray::set(int index, int32_t value) {
- ASSERT((index >= 0) && (index < this->length()));
- int32_t* ptr = static_cast<int32_t*>(external_pointer());
- ptr[index] = value;
-}
-
-
-uint32_t ExternalUnsignedIntArray::get(int index) {
- ASSERT((index >= 0) && (index < this->length()));
- uint32_t* ptr = static_cast<uint32_t*>(external_pointer());
- return ptr[index];
-}
-
-
-void ExternalUnsignedIntArray::set(int index, uint32_t value) {
- ASSERT((index >= 0) && (index < this->length()));
- uint32_t* ptr = static_cast<uint32_t*>(external_pointer());
- ptr[index] = value;
-}
-
-
-float ExternalFloatArray::get(int index) {
- ASSERT((index >= 0) && (index < this->length()));
- float* ptr = static_cast<float*>(external_pointer());
- return ptr[index];
-}
-
-
-void ExternalFloatArray::set(int index, float value) {
- ASSERT((index >= 0) && (index < this->length()));
- float* ptr = static_cast<float*>(external_pointer());
- ptr[index] = value;
-}
-
-
-int Map::visitor_id() {
- return READ_BYTE_FIELD(this, kVisitorIdOffset);
-}
-
-
-void Map::set_visitor_id(int id) {
- ASSERT(0 <= id && id < 256);
- WRITE_BYTE_FIELD(this, kVisitorIdOffset, static_cast<byte>(id));
-}
-
-
-int Map::instance_size() {
- return READ_BYTE_FIELD(this, kInstanceSizeOffset) << kPointerSizeLog2;
-}
-
-
-int Map::inobject_properties() {
- return READ_BYTE_FIELD(this, kInObjectPropertiesOffset);
-}
-
-
-int Map::pre_allocated_property_fields() {
- return READ_BYTE_FIELD(this, kPreAllocatedPropertyFieldsOffset);
-}
-
-
-int HeapObject::SizeFromMap(Map* map) {
- int instance_size = map->instance_size();
- if (instance_size != kVariableSizeSentinel) return instance_size;
- // We can ignore the "symbol" bit becase it is only set for symbols
- // and implies a string type.
- int instance_type = static_cast<int>(map->instance_type()) & ~kIsSymbolMask;
- // Only inline the most frequent cases.
- if (instance_type == FIXED_ARRAY_TYPE) {
- return FixedArray::BodyDescriptor::SizeOf(map, this);
- }
- if (instance_type == ASCII_STRING_TYPE) {
- return SeqAsciiString::SizeFor(
- reinterpret_cast<SeqAsciiString*>(this)->length());
- }
- if (instance_type == BYTE_ARRAY_TYPE) {
- return reinterpret_cast<ByteArray*>(this)->ByteArraySize();
- }
- if (instance_type == STRING_TYPE) {
- return SeqTwoByteString::SizeFor(
- reinterpret_cast<SeqTwoByteString*>(this)->length());
- }
- ASSERT(instance_type == CODE_TYPE);
- return reinterpret_cast<Code*>(this)->CodeSize();
-}
-
-
-void Map::set_instance_size(int value) {
- ASSERT_EQ(0, value & (kPointerSize - 1));
- value >>= kPointerSizeLog2;
- ASSERT(0 <= value && value < 256);
- WRITE_BYTE_FIELD(this, kInstanceSizeOffset, static_cast<byte>(value));
-}
-
-
-void Map::set_inobject_properties(int value) {
- ASSERT(0 <= value && value < 256);
- WRITE_BYTE_FIELD(this, kInObjectPropertiesOffset, static_cast<byte>(value));
-}
-
-
-void Map::set_pre_allocated_property_fields(int value) {
- ASSERT(0 <= value && value < 256);
- WRITE_BYTE_FIELD(this,
- kPreAllocatedPropertyFieldsOffset,
- static_cast<byte>(value));
-}
-
-
-InstanceType Map::instance_type() {
- return static_cast<InstanceType>(READ_BYTE_FIELD(this, kInstanceTypeOffset));
-}
-
-
-void Map::set_instance_type(InstanceType value) {
- WRITE_BYTE_FIELD(this, kInstanceTypeOffset, value);
-}
-
-
-int Map::unused_property_fields() {
- return READ_BYTE_FIELD(this, kUnusedPropertyFieldsOffset);
-}
-
-
-void Map::set_unused_property_fields(int value) {
- WRITE_BYTE_FIELD(this, kUnusedPropertyFieldsOffset, Min(value, 255));
-}
-
-
-byte Map::bit_field() {
- return READ_BYTE_FIELD(this, kBitFieldOffset);
-}
-
-
-void Map::set_bit_field(byte value) {
- WRITE_BYTE_FIELD(this, kBitFieldOffset, value);
-}
-
-
-byte Map::bit_field2() {
- return READ_BYTE_FIELD(this, kBitField2Offset);
-}
-
-
-void Map::set_bit_field2(byte value) {
- WRITE_BYTE_FIELD(this, kBitField2Offset, value);
-}
-
-
-void Map::set_non_instance_prototype(bool value) {
- if (value) {
- set_bit_field(bit_field() | (1 << kHasNonInstancePrototype));
- } else {
- set_bit_field(bit_field() & ~(1 << kHasNonInstancePrototype));
- }
-}
-
-
-bool Map::has_non_instance_prototype() {
- return ((1 << kHasNonInstancePrototype) & bit_field()) != 0;
-}
-
-
-void Map::set_function_with_prototype(bool value) {
- if (value) {
- set_bit_field2(bit_field2() | (1 << kFunctionWithPrototype));
- } else {
- set_bit_field2(bit_field2() & ~(1 << kFunctionWithPrototype));
- }
-}
-
-
-bool Map::function_with_prototype() {
- return ((1 << kFunctionWithPrototype) & bit_field2()) != 0;
-}
-
-
-void Map::set_is_access_check_needed(bool access_check_needed) {
- if (access_check_needed) {
- set_bit_field(bit_field() | (1 << kIsAccessCheckNeeded));
- } else {
- set_bit_field(bit_field() & ~(1 << kIsAccessCheckNeeded));
- }
-}
-
-
-bool Map::is_access_check_needed() {
- return ((1 << kIsAccessCheckNeeded) & bit_field()) != 0;
-}
-
-
-void Map::set_is_extensible(bool value) {
- if (value) {
- set_bit_field2(bit_field2() | (1 << kIsExtensible));
- } else {
- set_bit_field2(bit_field2() & ~(1 << kIsExtensible));
- }
-}
-
-bool Map::is_extensible() {
- return ((1 << kIsExtensible) & bit_field2()) != 0;
-}
-
-
-void Map::set_attached_to_shared_function_info(bool value) {
- if (value) {
- set_bit_field2(bit_field2() | (1 << kAttachedToSharedFunctionInfo));
- } else {
- set_bit_field2(bit_field2() & ~(1 << kAttachedToSharedFunctionInfo));
- }
-}
-
-bool Map::attached_to_shared_function_info() {
- return ((1 << kAttachedToSharedFunctionInfo) & bit_field2()) != 0;
-}
-
-
-void Map::set_is_shared(bool value) {
- if (value) {
- set_bit_field2(bit_field2() | (1 << kIsShared));
- } else {
- set_bit_field2(bit_field2() & ~(1 << kIsShared));
- }
-}
-
-bool Map::is_shared() {
- return ((1 << kIsShared) & bit_field2()) != 0;
-}
-
-
-JSFunction* Map::unchecked_constructor() {
- return reinterpret_cast<JSFunction*>(READ_FIELD(this, kConstructorOffset));
-}
-
-
-Code::Flags Code::flags() {
- return static_cast<Flags>(READ_INT_FIELD(this, kFlagsOffset));
-}
-
-
-void Code::set_flags(Code::Flags flags) {
- STATIC_ASSERT(Code::NUMBER_OF_KINDS <= (kFlagsKindMask >> kFlagsKindShift)+1);
- // Make sure that all call stubs have an arguments count.
- ASSERT((ExtractKindFromFlags(flags) != CALL_IC &&
- ExtractKindFromFlags(flags) != KEYED_CALL_IC) ||
- ExtractArgumentsCountFromFlags(flags) >= 0);
- WRITE_INT_FIELD(this, kFlagsOffset, flags);
-}
-
-
-Code::Kind Code::kind() {
- return ExtractKindFromFlags(flags());
-}
-
-
-InLoopFlag Code::ic_in_loop() {
- return ExtractICInLoopFromFlags(flags());
-}
-
-
-InlineCacheState Code::ic_state() {
- InlineCacheState result = ExtractICStateFromFlags(flags());
- // Only allow uninitialized or debugger states for non-IC code
- // objects. This is used in the debugger to determine whether or not
- // a call to code object has been replaced with a debug break call.
- ASSERT(is_inline_cache_stub() ||
- result == UNINITIALIZED ||
- result == DEBUG_BREAK ||
- result == DEBUG_PREPARE_STEP_IN);
- return result;
-}
-
-
-Code::ExtraICState Code::extra_ic_state() {
- ASSERT(is_inline_cache_stub());
- return ExtractExtraICStateFromFlags(flags());
-}
-
-
-PropertyType Code::type() {
- ASSERT(ic_state() == MONOMORPHIC);
- return ExtractTypeFromFlags(flags());
-}
-
-
-int Code::arguments_count() {
- ASSERT(is_call_stub() || is_keyed_call_stub() || kind() == STUB);
- return ExtractArgumentsCountFromFlags(flags());
-}
-
-
-int Code::major_key() {
- ASSERT(kind() == STUB ||
- kind() == BINARY_OP_IC ||
- kind() == TYPE_RECORDING_BINARY_OP_IC ||
- kind() == COMPARE_IC);
- return READ_BYTE_FIELD(this, kStubMajorKeyOffset);
-}
-
-
-void Code::set_major_key(int major) {
- ASSERT(kind() == STUB ||
- kind() == BINARY_OP_IC ||
- kind() == TYPE_RECORDING_BINARY_OP_IC ||
- kind() == COMPARE_IC);
- ASSERT(0 <= major && major < 256);
- WRITE_BYTE_FIELD(this, kStubMajorKeyOffset, major);
-}
-
-
-bool Code::optimizable() {
- ASSERT(kind() == FUNCTION);
- return READ_BYTE_FIELD(this, kOptimizableOffset) == 1;
-}
-
-
-void Code::set_optimizable(bool value) {
- ASSERT(kind() == FUNCTION);
- WRITE_BYTE_FIELD(this, kOptimizableOffset, value ? 1 : 0);
-}
-
-
-bool Code::has_deoptimization_support() {
- ASSERT(kind() == FUNCTION);
- return READ_BYTE_FIELD(this, kHasDeoptimizationSupportOffset) == 1;
-}
-
-
-void Code::set_has_deoptimization_support(bool value) {
- ASSERT(kind() == FUNCTION);
- WRITE_BYTE_FIELD(this, kHasDeoptimizationSupportOffset, value ? 1 : 0);
-}
-
-
-int Code::allow_osr_at_loop_nesting_level() {
- ASSERT(kind() == FUNCTION);
- return READ_BYTE_FIELD(this, kAllowOSRAtLoopNestingLevelOffset);
-}
-
-
-void Code::set_allow_osr_at_loop_nesting_level(int level) {
- ASSERT(kind() == FUNCTION);
- ASSERT(level >= 0 && level <= kMaxLoopNestingMarker);
- WRITE_BYTE_FIELD(this, kAllowOSRAtLoopNestingLevelOffset, level);
-}
-
-
-unsigned Code::stack_slots() {
- ASSERT(kind() == OPTIMIZED_FUNCTION);
- return READ_UINT32_FIELD(this, kStackSlotsOffset);
-}
-
-
-void Code::set_stack_slots(unsigned slots) {
- ASSERT(kind() == OPTIMIZED_FUNCTION);
- WRITE_UINT32_FIELD(this, kStackSlotsOffset, slots);
-}
-
-
-unsigned Code::safepoint_table_offset() {
- ASSERT(kind() == OPTIMIZED_FUNCTION);
- return READ_UINT32_FIELD(this, kSafepointTableOffsetOffset);
-}
-
-
-void Code::set_safepoint_table_offset(unsigned offset) {
- ASSERT(kind() == OPTIMIZED_FUNCTION);
- ASSERT(IsAligned(offset, static_cast<unsigned>(kIntSize)));
- WRITE_UINT32_FIELD(this, kSafepointTableOffsetOffset, offset);
-}
-
-
-unsigned Code::stack_check_table_offset() {
- ASSERT(kind() == FUNCTION);
- return READ_UINT32_FIELD(this, kStackCheckTableOffsetOffset);
-}
-
-
-void Code::set_stack_check_table_offset(unsigned offset) {
- ASSERT(kind() == FUNCTION);
- ASSERT(IsAligned(offset, static_cast<unsigned>(kIntSize)));
- WRITE_UINT32_FIELD(this, kStackCheckTableOffsetOffset, offset);
-}
-
-
-CheckType Code::check_type() {
- ASSERT(is_call_stub() || is_keyed_call_stub());
- byte type = READ_BYTE_FIELD(this, kCheckTypeOffset);
- return static_cast<CheckType>(type);
-}
-
-
-void Code::set_check_type(CheckType value) {
- ASSERT(is_call_stub() || is_keyed_call_stub());
- WRITE_BYTE_FIELD(this, kCheckTypeOffset, value);
-}
-
-
-ExternalArrayType Code::external_array_type() {
- ASSERT(is_external_array_load_stub() || is_external_array_store_stub());
- byte type = READ_BYTE_FIELD(this, kExternalArrayTypeOffset);
- return static_cast<ExternalArrayType>(type);
-}
-
-
-void Code::set_external_array_type(ExternalArrayType value) {
- ASSERT(is_external_array_load_stub() || is_external_array_store_stub());
- WRITE_BYTE_FIELD(this, kExternalArrayTypeOffset, value);
-}
-
-
-byte Code::binary_op_type() {
- ASSERT(is_binary_op_stub());
- return READ_BYTE_FIELD(this, kBinaryOpTypeOffset);
-}
-
-
-void Code::set_binary_op_type(byte value) {
- ASSERT(is_binary_op_stub());
- WRITE_BYTE_FIELD(this, kBinaryOpTypeOffset, value);
-}
-
-
-byte Code::type_recording_binary_op_type() {
- ASSERT(is_type_recording_binary_op_stub());
- return READ_BYTE_FIELD(this, kBinaryOpTypeOffset);
-}
-
-
-void Code::set_type_recording_binary_op_type(byte value) {
- ASSERT(is_type_recording_binary_op_stub());
- WRITE_BYTE_FIELD(this, kBinaryOpTypeOffset, value);
-}
-
-
-byte Code::type_recording_binary_op_result_type() {
- ASSERT(is_type_recording_binary_op_stub());
- return READ_BYTE_FIELD(this, kBinaryOpReturnTypeOffset);
-}
-
-
-void Code::set_type_recording_binary_op_result_type(byte value) {
- ASSERT(is_type_recording_binary_op_stub());
- WRITE_BYTE_FIELD(this, kBinaryOpReturnTypeOffset, value);
-}
-
-
-byte Code::compare_state() {
- ASSERT(is_compare_ic_stub());
- return READ_BYTE_FIELD(this, kCompareStateOffset);
-}
-
-
-void Code::set_compare_state(byte value) {
- ASSERT(is_compare_ic_stub());
- WRITE_BYTE_FIELD(this, kCompareStateOffset, value);
-}
-
-
-bool Code::is_inline_cache_stub() {
- Kind kind = this->kind();
- return kind >= FIRST_IC_KIND && kind <= LAST_IC_KIND;
-}
-
-
-Code::Flags Code::ComputeFlags(Kind kind,
- InLoopFlag in_loop,
- InlineCacheState ic_state,
- ExtraICState extra_ic_state,
- PropertyType type,
- int argc,
- InlineCacheHolderFlag holder) {
- // Extra IC state is only allowed for monomorphic call IC stubs
- // or for store IC stubs.
- ASSERT(extra_ic_state == kNoExtraICState ||
- (kind == CALL_IC && (ic_state == MONOMORPHIC ||
- ic_state == MONOMORPHIC_PROTOTYPE_FAILURE)) ||
- (kind == STORE_IC) ||
- (kind == KEYED_STORE_IC));
- // Compute the bit mask.
- int bits = kind << kFlagsKindShift;
- if (in_loop) bits |= kFlagsICInLoopMask;
- bits |= ic_state << kFlagsICStateShift;
- bits |= type << kFlagsTypeShift;
- bits |= extra_ic_state << kFlagsExtraICStateShift;
- bits |= argc << kFlagsArgumentsCountShift;
- if (holder == PROTOTYPE_MAP) bits |= kFlagsCacheInPrototypeMapMask;
- // Cast to flags and validate result before returning it.
- Flags result = static_cast<Flags>(bits);
- ASSERT(ExtractKindFromFlags(result) == kind);
- ASSERT(ExtractICStateFromFlags(result) == ic_state);
- ASSERT(ExtractICInLoopFromFlags(result) == in_loop);
- ASSERT(ExtractTypeFromFlags(result) == type);
- ASSERT(ExtractExtraICStateFromFlags(result) == extra_ic_state);
- ASSERT(ExtractArgumentsCountFromFlags(result) == argc);
- return result;
-}
-
-
-Code::Flags Code::ComputeMonomorphicFlags(Kind kind,
- PropertyType type,
- ExtraICState extra_ic_state,
- InlineCacheHolderFlag holder,
- InLoopFlag in_loop,
- int argc) {
- return ComputeFlags(
- kind, in_loop, MONOMORPHIC, extra_ic_state, type, argc, holder);
-}
-
-
-Code::Kind Code::ExtractKindFromFlags(Flags flags) {
- int bits = (flags & kFlagsKindMask) >> kFlagsKindShift;
- return static_cast<Kind>(bits);
-}
-
-
-InlineCacheState Code::ExtractICStateFromFlags(Flags flags) {
- int bits = (flags & kFlagsICStateMask) >> kFlagsICStateShift;
- return static_cast<InlineCacheState>(bits);
-}
-
-
-Code::ExtraICState Code::ExtractExtraICStateFromFlags(Flags flags) {
- int bits = (flags & kFlagsExtraICStateMask) >> kFlagsExtraICStateShift;
- return static_cast<ExtraICState>(bits);
-}
-
-
-InLoopFlag Code::ExtractICInLoopFromFlags(Flags flags) {
- int bits = (flags & kFlagsICInLoopMask);
- return bits != 0 ? IN_LOOP : NOT_IN_LOOP;
-}
-
-
-PropertyType Code::ExtractTypeFromFlags(Flags flags) {
- int bits = (flags & kFlagsTypeMask) >> kFlagsTypeShift;
- return static_cast<PropertyType>(bits);
-}
-
-
-int Code::ExtractArgumentsCountFromFlags(Flags flags) {
- return (flags & kFlagsArgumentsCountMask) >> kFlagsArgumentsCountShift;
-}
-
-
-InlineCacheHolderFlag Code::ExtractCacheHolderFromFlags(Flags flags) {
- int bits = (flags & kFlagsCacheInPrototypeMapMask);
- return bits != 0 ? PROTOTYPE_MAP : OWN_MAP;
-}
-
-
-Code::Flags Code::RemoveTypeFromFlags(Flags flags) {
- int bits = flags & ~kFlagsTypeMask;
- return static_cast<Flags>(bits);
-}
-
-
-Code* Code::GetCodeFromTargetAddress(Address address) {
- HeapObject* code = HeapObject::FromAddress(address - Code::kHeaderSize);
- // GetCodeFromTargetAddress might be called when marking objects during mark
- // sweep. reinterpret_cast is therefore used instead of the more appropriate
- // Code::cast. Code::cast does not work when the object's map is
- // marked.
- Code* result = reinterpret_cast<Code*>(code);
- return result;
-}
-
-
-Isolate* Map::isolate() {
- return heap()->isolate();
-}
-
-
-Heap* Map::heap() {
- // NOTE: address() helper is not used to save one instruction.
- Heap* heap = Page::FromAddress(reinterpret_cast<Address>(this))->heap_;
- ASSERT(heap != NULL);
- ASSERT(heap->isolate() == Isolate::Current());
- return heap;
-}
-
-
-Heap* Code::heap() {
- // NOTE: address() helper is not used to save one instruction.
- Heap* heap = Page::FromAddress(reinterpret_cast<Address>(this))->heap_;
- ASSERT(heap != NULL);
- ASSERT(heap->isolate() == Isolate::Current());
- return heap;
-}
-
-
-Isolate* Code::isolate() {
- return heap()->isolate();
-}
-
-
-Heap* JSGlobalPropertyCell::heap() {
- // NOTE: address() helper is not used to save one instruction.
- Heap* heap = Page::FromAddress(reinterpret_cast<Address>(this))->heap_;
- ASSERT(heap != NULL);
- ASSERT(heap->isolate() == Isolate::Current());
- return heap;
-}
-
-
-Isolate* JSGlobalPropertyCell::isolate() {
- return heap()->isolate();
-}
-
-
-Object* Code::GetObjectFromEntryAddress(Address location_of_address) {
- return HeapObject::
- FromAddress(Memory::Address_at(location_of_address) - Code::kHeaderSize);
-}
-
-
-Object* Map::prototype() {
- return READ_FIELD(this, kPrototypeOffset);
-}
-
-
-void Map::set_prototype(Object* value, WriteBarrierMode mode) {
- ASSERT(value->IsNull() || value->IsJSObject());
- WRITE_FIELD(this, kPrototypeOffset, value);
- CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kPrototypeOffset, mode);
-}
-
-
-MaybeObject* Map::GetFastElementsMap() {
- if (has_fast_elements()) return this;
- Object* obj;
- { MaybeObject* maybe_obj = CopyDropTransitions();
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- Map* new_map = Map::cast(obj);
- new_map->set_has_fast_elements(true);
- isolate()->counters()->map_slow_to_fast_elements()->Increment();
- return new_map;
-}
-
-
-MaybeObject* Map::GetSlowElementsMap() {
- if (!has_fast_elements()) return this;
- Object* obj;
- { MaybeObject* maybe_obj = CopyDropTransitions();
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- Map* new_map = Map::cast(obj);
- new_map->set_has_fast_elements(false);
- isolate()->counters()->map_fast_to_slow_elements()->Increment();
- return new_map;
-}
-
-
-ACCESSORS(Map, instance_descriptors, DescriptorArray,
- kInstanceDescriptorsOffset)
-ACCESSORS(Map, code_cache, Object, kCodeCacheOffset)
-ACCESSORS(Map, constructor, Object, kConstructorOffset)
-
-ACCESSORS(JSFunction, shared, SharedFunctionInfo, kSharedFunctionInfoOffset)
-ACCESSORS(JSFunction, literals, FixedArray, kLiteralsOffset)
-ACCESSORS_GCSAFE(JSFunction, next_function_link, Object,
- kNextFunctionLinkOffset)
-
-ACCESSORS(GlobalObject, builtins, JSBuiltinsObject, kBuiltinsOffset)
-ACCESSORS(GlobalObject, global_context, Context, kGlobalContextOffset)
-ACCESSORS(GlobalObject, global_receiver, JSObject, kGlobalReceiverOffset)
-
-ACCESSORS(JSGlobalProxy, context, Object, kContextOffset)
-
-ACCESSORS(AccessorInfo, getter, Object, kGetterOffset)
-ACCESSORS(AccessorInfo, setter, Object, kSetterOffset)
-ACCESSORS(AccessorInfo, data, Object, kDataOffset)
-ACCESSORS(AccessorInfo, name, Object, kNameOffset)
-ACCESSORS(AccessorInfo, flag, Smi, kFlagOffset)
-
-ACCESSORS(AccessCheckInfo, named_callback, Object, kNamedCallbackOffset)
-ACCESSORS(AccessCheckInfo, indexed_callback, Object, kIndexedCallbackOffset)
-ACCESSORS(AccessCheckInfo, data, Object, kDataOffset)
-
-ACCESSORS(InterceptorInfo, getter, Object, kGetterOffset)
-ACCESSORS(InterceptorInfo, setter, Object, kSetterOffset)
-ACCESSORS(InterceptorInfo, query, Object, kQueryOffset)
-ACCESSORS(InterceptorInfo, deleter, Object, kDeleterOffset)
-ACCESSORS(InterceptorInfo, enumerator, Object, kEnumeratorOffset)
-ACCESSORS(InterceptorInfo, data, Object, kDataOffset)
-
-ACCESSORS(CallHandlerInfo, callback, Object, kCallbackOffset)
-ACCESSORS(CallHandlerInfo, data, Object, kDataOffset)
-
-ACCESSORS(TemplateInfo, tag, Object, kTagOffset)
-ACCESSORS(TemplateInfo, property_list, Object, kPropertyListOffset)
-
-ACCESSORS(FunctionTemplateInfo, serial_number, Object, kSerialNumberOffset)
-ACCESSORS(FunctionTemplateInfo, call_code, Object, kCallCodeOffset)
-ACCESSORS(FunctionTemplateInfo, property_accessors, Object,
- kPropertyAccessorsOffset)
-ACCESSORS(FunctionTemplateInfo, prototype_template, Object,
- kPrototypeTemplateOffset)
-ACCESSORS(FunctionTemplateInfo, parent_template, Object, kParentTemplateOffset)
-ACCESSORS(FunctionTemplateInfo, named_property_handler, Object,
- kNamedPropertyHandlerOffset)
-ACCESSORS(FunctionTemplateInfo, indexed_property_handler, Object,
- kIndexedPropertyHandlerOffset)
-ACCESSORS(FunctionTemplateInfo, instance_template, Object,
- kInstanceTemplateOffset)
-ACCESSORS(FunctionTemplateInfo, class_name, Object, kClassNameOffset)
-ACCESSORS(FunctionTemplateInfo, signature, Object, kSignatureOffset)
-ACCESSORS(FunctionTemplateInfo, instance_call_handler, Object,
- kInstanceCallHandlerOffset)
-ACCESSORS(FunctionTemplateInfo, access_check_info, Object,
- kAccessCheckInfoOffset)
-ACCESSORS(FunctionTemplateInfo, flag, Smi, kFlagOffset)
-
-ACCESSORS(ObjectTemplateInfo, constructor, Object, kConstructorOffset)
-ACCESSORS(ObjectTemplateInfo, internal_field_count, Object,
- kInternalFieldCountOffset)
-
-ACCESSORS(SignatureInfo, receiver, Object, kReceiverOffset)
-ACCESSORS(SignatureInfo, args, Object, kArgsOffset)
-
-ACCESSORS(TypeSwitchInfo, types, Object, kTypesOffset)
-
-ACCESSORS(Script, source, Object, kSourceOffset)
-ACCESSORS(Script, name, Object, kNameOffset)
-ACCESSORS(Script, id, Object, kIdOffset)
-ACCESSORS(Script, line_offset, Smi, kLineOffsetOffset)
-ACCESSORS(Script, column_offset, Smi, kColumnOffsetOffset)
-ACCESSORS(Script, data, Object, kDataOffset)
-ACCESSORS(Script, context_data, Object, kContextOffset)
-ACCESSORS(Script, wrapper, Proxy, kWrapperOffset)
-ACCESSORS(Script, type, Smi, kTypeOffset)
-ACCESSORS(Script, compilation_type, Smi, kCompilationTypeOffset)
-ACCESSORS(Script, line_ends, Object, kLineEndsOffset)
-ACCESSORS(Script, eval_from_shared, Object, kEvalFromSharedOffset)
-ACCESSORS(Script, eval_from_instructions_offset, Smi,
- kEvalFrominstructionsOffsetOffset)
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-ACCESSORS(DebugInfo, shared, SharedFunctionInfo, kSharedFunctionInfoIndex)
-ACCESSORS(DebugInfo, original_code, Code, kOriginalCodeIndex)
-ACCESSORS(DebugInfo, code, Code, kPatchedCodeIndex)
-ACCESSORS(DebugInfo, break_points, FixedArray, kBreakPointsStateIndex)
-
-ACCESSORS(BreakPointInfo, code_position, Smi, kCodePositionIndex)
-ACCESSORS(BreakPointInfo, source_position, Smi, kSourcePositionIndex)
-ACCESSORS(BreakPointInfo, statement_position, Smi, kStatementPositionIndex)
-ACCESSORS(BreakPointInfo, break_point_objects, Object, kBreakPointObjectsIndex)
-#endif
-
-ACCESSORS(SharedFunctionInfo, name, Object, kNameOffset)
-ACCESSORS_GCSAFE(SharedFunctionInfo, construct_stub, Code, kConstructStubOffset)
-ACCESSORS_GCSAFE(SharedFunctionInfo, initial_map, Object, kInitialMapOffset)
-ACCESSORS(SharedFunctionInfo, instance_class_name, Object,
- kInstanceClassNameOffset)
-ACCESSORS(SharedFunctionInfo, function_data, Object, kFunctionDataOffset)
-ACCESSORS(SharedFunctionInfo, script, Object, kScriptOffset)
-ACCESSORS(SharedFunctionInfo, debug_info, Object, kDebugInfoOffset)
-ACCESSORS(SharedFunctionInfo, inferred_name, String, kInferredNameOffset)
-ACCESSORS(SharedFunctionInfo, this_property_assignments, Object,
- kThisPropertyAssignmentsOffset)
-
-BOOL_ACCESSORS(FunctionTemplateInfo, flag, hidden_prototype,
- kHiddenPrototypeBit)
-BOOL_ACCESSORS(FunctionTemplateInfo, flag, undetectable, kUndetectableBit)
-BOOL_ACCESSORS(FunctionTemplateInfo, flag, needs_access_check,
- kNeedsAccessCheckBit)
-BOOL_ACCESSORS(SharedFunctionInfo, start_position_and_type, is_expression,
- kIsExpressionBit)
-BOOL_ACCESSORS(SharedFunctionInfo, start_position_and_type, is_toplevel,
- kIsTopLevelBit)
-BOOL_GETTER(SharedFunctionInfo, compiler_hints,
- has_only_simple_this_property_assignments,
- kHasOnlySimpleThisPropertyAssignments)
-BOOL_ACCESSORS(SharedFunctionInfo,
- compiler_hints,
- allows_lazy_compilation,
- kAllowLazyCompilation)
-
-
-#if V8_HOST_ARCH_32_BIT
-SMI_ACCESSORS(SharedFunctionInfo, length, kLengthOffset)
-SMI_ACCESSORS(SharedFunctionInfo, formal_parameter_count,
- kFormalParameterCountOffset)
-SMI_ACCESSORS(SharedFunctionInfo, expected_nof_properties,
- kExpectedNofPropertiesOffset)
-SMI_ACCESSORS(SharedFunctionInfo, num_literals, kNumLiteralsOffset)
-SMI_ACCESSORS(SharedFunctionInfo, start_position_and_type,
- kStartPositionAndTypeOffset)
-SMI_ACCESSORS(SharedFunctionInfo, end_position, kEndPositionOffset)
-SMI_ACCESSORS(SharedFunctionInfo, function_token_position,
- kFunctionTokenPositionOffset)
-SMI_ACCESSORS(SharedFunctionInfo, compiler_hints,
- kCompilerHintsOffset)
-SMI_ACCESSORS(SharedFunctionInfo, this_property_assignments_count,
- kThisPropertyAssignmentsCountOffset)
-SMI_ACCESSORS(SharedFunctionInfo, opt_count, kOptCountOffset)
-#else
-
-#define PSEUDO_SMI_ACCESSORS_LO(holder, name, offset) \
- STATIC_ASSERT(holder::offset % kPointerSize == 0); \
- int holder::name() { \
- int value = READ_INT_FIELD(this, offset); \
- ASSERT(kHeapObjectTag == 1); \
- ASSERT((value & kHeapObjectTag) == 0); \
- return value >> 1; \
- } \
- void holder::set_##name(int value) { \
- ASSERT(kHeapObjectTag == 1); \
- ASSERT((value & 0xC0000000) == 0xC0000000 || \
- (value & 0xC0000000) == 0x000000000); \
- WRITE_INT_FIELD(this, \
- offset, \
- (value << 1) & ~kHeapObjectTag); \
- }
-
-#define PSEUDO_SMI_ACCESSORS_HI(holder, name, offset) \
- STATIC_ASSERT(holder::offset % kPointerSize == kIntSize); \
- INT_ACCESSORS(holder, name, offset)
-
-
-PSEUDO_SMI_ACCESSORS_LO(SharedFunctionInfo, length, kLengthOffset)
-PSEUDO_SMI_ACCESSORS_HI(SharedFunctionInfo,
- formal_parameter_count,
- kFormalParameterCountOffset)
-
-PSEUDO_SMI_ACCESSORS_LO(SharedFunctionInfo,
- expected_nof_properties,
- kExpectedNofPropertiesOffset)
-PSEUDO_SMI_ACCESSORS_HI(SharedFunctionInfo, num_literals, kNumLiteralsOffset)
-
-PSEUDO_SMI_ACCESSORS_LO(SharedFunctionInfo, end_position, kEndPositionOffset)
-PSEUDO_SMI_ACCESSORS_HI(SharedFunctionInfo,
- start_position_and_type,
- kStartPositionAndTypeOffset)
-
-PSEUDO_SMI_ACCESSORS_LO(SharedFunctionInfo,
- function_token_position,
- kFunctionTokenPositionOffset)
-PSEUDO_SMI_ACCESSORS_HI(SharedFunctionInfo,
- compiler_hints,
- kCompilerHintsOffset)
-
-PSEUDO_SMI_ACCESSORS_LO(SharedFunctionInfo,
- this_property_assignments_count,
- kThisPropertyAssignmentsCountOffset)
-PSEUDO_SMI_ACCESSORS_HI(SharedFunctionInfo, opt_count, kOptCountOffset)
-#endif
-
-
-int SharedFunctionInfo::construction_count() {
- return READ_BYTE_FIELD(this, kConstructionCountOffset);
-}
-
-
-void SharedFunctionInfo::set_construction_count(int value) {
- ASSERT(0 <= value && value < 256);
- WRITE_BYTE_FIELD(this, kConstructionCountOffset, static_cast<byte>(value));
-}
-
-
-bool SharedFunctionInfo::live_objects_may_exist() {
- return (compiler_hints() & (1 << kLiveObjectsMayExist)) != 0;
-}
-
-
-void SharedFunctionInfo::set_live_objects_may_exist(bool value) {
- if (value) {
- set_compiler_hints(compiler_hints() | (1 << kLiveObjectsMayExist));
- } else {
- set_compiler_hints(compiler_hints() & ~(1 << kLiveObjectsMayExist));
- }
-}
-
-
-bool SharedFunctionInfo::IsInobjectSlackTrackingInProgress() {
- return initial_map() != HEAP->undefined_value();
-}
-
-
-bool SharedFunctionInfo::optimization_disabled() {
- return BooleanBit::get(compiler_hints(), kOptimizationDisabled);
-}
-
-
-void SharedFunctionInfo::set_optimization_disabled(bool disable) {
- set_compiler_hints(BooleanBit::set(compiler_hints(),
- kOptimizationDisabled,
- disable));
- // If disabling optimizations we reflect that in the code object so
- // it will not be counted as optimizable code.
- if ((code()->kind() == Code::FUNCTION) && disable) {
- code()->set_optimizable(false);
- }
-}
-
-
-bool SharedFunctionInfo::strict_mode() {
- return BooleanBit::get(compiler_hints(), kStrictModeFunction);
-}
-
-
-void SharedFunctionInfo::set_strict_mode(bool value) {
- set_compiler_hints(BooleanBit::set(compiler_hints(),
- kStrictModeFunction,
- value));
-}
-
-
-ACCESSORS(CodeCache, default_cache, FixedArray, kDefaultCacheOffset)
-ACCESSORS(CodeCache, normal_type_cache, Object, kNormalTypeCacheOffset)
-
-bool Script::HasValidSource() {
- Object* src = this->source();
- if (!src->IsString()) return true;
- String* src_str = String::cast(src);
- if (!StringShape(src_str).IsExternal()) return true;
- if (src_str->IsAsciiRepresentation()) {
- return ExternalAsciiString::cast(src)->resource() != NULL;
- } else if (src_str->IsTwoByteRepresentation()) {
- return ExternalTwoByteString::cast(src)->resource() != NULL;
- }
- return true;
-}
-
-
-void SharedFunctionInfo::DontAdaptArguments() {
- ASSERT(code()->kind() == Code::BUILTIN);
- set_formal_parameter_count(kDontAdaptArgumentsSentinel);
-}
-
-
-int SharedFunctionInfo::start_position() {
- return start_position_and_type() >> kStartPositionShift;
-}
-
-
-void SharedFunctionInfo::set_start_position(int start_position) {
- set_start_position_and_type((start_position << kStartPositionShift)
- | (start_position_and_type() & ~kStartPositionMask));
-}
-
-
-Code* SharedFunctionInfo::code() {
- return Code::cast(READ_FIELD(this, kCodeOffset));
-}
-
-
-Code* SharedFunctionInfo::unchecked_code() {
- return reinterpret_cast<Code*>(READ_FIELD(this, kCodeOffset));
-}
-
-
-void SharedFunctionInfo::set_code(Code* value, WriteBarrierMode mode) {
- WRITE_FIELD(this, kCodeOffset, value);
- ASSERT(!Isolate::Current()->heap()->InNewSpace(value));
-}
-
-
-SerializedScopeInfo* SharedFunctionInfo::scope_info() {
- return reinterpret_cast<SerializedScopeInfo*>(
- READ_FIELD(this, kScopeInfoOffset));
-}
-
-
-void SharedFunctionInfo::set_scope_info(SerializedScopeInfo* value,
- WriteBarrierMode mode) {
- WRITE_FIELD(this, kScopeInfoOffset, reinterpret_cast<Object*>(value));
- CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kScopeInfoOffset, mode);
-}
-
-
-Smi* SharedFunctionInfo::deopt_counter() {
- return reinterpret_cast<Smi*>(READ_FIELD(this, kDeoptCounterOffset));
-}
-
-
-void SharedFunctionInfo::set_deopt_counter(Smi* value) {
- WRITE_FIELD(this, kDeoptCounterOffset, value);
-}
-
-
-bool SharedFunctionInfo::is_compiled() {
- return code() !=
- Isolate::Current()->builtins()->builtin(Builtins::kLazyCompile);
-}
-
-
-bool SharedFunctionInfo::IsApiFunction() {
- return function_data()->IsFunctionTemplateInfo();
-}
-
-
-FunctionTemplateInfo* SharedFunctionInfo::get_api_func_data() {
- ASSERT(IsApiFunction());
- return FunctionTemplateInfo::cast(function_data());
-}
-
-
-bool SharedFunctionInfo::HasBuiltinFunctionId() {
- return function_data()->IsSmi();
-}
-
-
-BuiltinFunctionId SharedFunctionInfo::builtin_function_id() {
- ASSERT(HasBuiltinFunctionId());
- return static_cast<BuiltinFunctionId>(Smi::cast(function_data())->value());
-}
-
-
-int SharedFunctionInfo::code_age() {
- return (compiler_hints() >> kCodeAgeShift) & kCodeAgeMask;
-}
-
-
-void SharedFunctionInfo::set_code_age(int code_age) {
- set_compiler_hints(compiler_hints() |
- ((code_age & kCodeAgeMask) << kCodeAgeShift));
-}
-
-
-bool SharedFunctionInfo::has_deoptimization_support() {
- Code* code = this->code();
- return code->kind() == Code::FUNCTION && code->has_deoptimization_support();
-}
-
-
-bool JSFunction::IsBuiltin() {
- return context()->global()->IsJSBuiltinsObject();
-}
-
-
-bool JSFunction::NeedsArgumentsAdaption() {
- return shared()->formal_parameter_count() !=
- SharedFunctionInfo::kDontAdaptArgumentsSentinel;
-}
-
-
-bool JSFunction::IsOptimized() {
- return code()->kind() == Code::OPTIMIZED_FUNCTION;
-}
-
-
-bool JSFunction::IsMarkedForLazyRecompilation() {
- return code() == GetIsolate()->builtins()->builtin(Builtins::kLazyRecompile);
-}
-
-
-Code* JSFunction::code() {
- return Code::cast(unchecked_code());
-}
-
-
-Code* JSFunction::unchecked_code() {
- return reinterpret_cast<Code*>(
- Code::GetObjectFromEntryAddress(FIELD_ADDR(this, kCodeEntryOffset)));
-}
-
-
-void JSFunction::set_code(Code* value) {
- // Skip the write barrier because code is never in new space.
- ASSERT(!HEAP->InNewSpace(value));
- Address entry = value->entry();
- WRITE_INTPTR_FIELD(this, kCodeEntryOffset, reinterpret_cast<intptr_t>(entry));
-}
-
-
-void JSFunction::ReplaceCode(Code* code) {
- bool was_optimized = IsOptimized();
- bool is_optimized = code->kind() == Code::OPTIMIZED_FUNCTION;
-
- set_code(code);
-
- // Add/remove the function from the list of optimized functions for this
- // context based on the state change.
- if (!was_optimized && is_optimized) {
- context()->global_context()->AddOptimizedFunction(this);
- }
- if (was_optimized && !is_optimized) {
- context()->global_context()->RemoveOptimizedFunction(this);
- }
-}
-
-
-Context* JSFunction::context() {
- return Context::cast(READ_FIELD(this, kContextOffset));
-}
-
-
-Object* JSFunction::unchecked_context() {
- return READ_FIELD(this, kContextOffset);
-}
-
-
-SharedFunctionInfo* JSFunction::unchecked_shared() {
- return reinterpret_cast<SharedFunctionInfo*>(
- READ_FIELD(this, kSharedFunctionInfoOffset));
-}
-
-
-void JSFunction::set_context(Object* value) {
- ASSERT(value->IsUndefined() || value->IsContext());
- WRITE_FIELD(this, kContextOffset, value);
- WRITE_BARRIER(this, kContextOffset);
-}
-
-ACCESSORS(JSFunction, prototype_or_initial_map, Object,
- kPrototypeOrInitialMapOffset)
-
-
-Map* JSFunction::initial_map() {
- return Map::cast(prototype_or_initial_map());
-}
-
-
-void JSFunction::set_initial_map(Map* value) {
- set_prototype_or_initial_map(value);
-}
-
-
-bool JSFunction::has_initial_map() {
- return prototype_or_initial_map()->IsMap();
-}
-
-
-bool JSFunction::has_instance_prototype() {
- return has_initial_map() || !prototype_or_initial_map()->IsTheHole();
-}
-
-
-bool JSFunction::has_prototype() {
- return map()->has_non_instance_prototype() || has_instance_prototype();
-}
-
-
-Object* JSFunction::instance_prototype() {
- ASSERT(has_instance_prototype());
- if (has_initial_map()) return initial_map()->prototype();
- // When there is no initial map and the prototype is a JSObject, the
- // initial map field is used for the prototype field.
- return prototype_or_initial_map();
-}
-
-
-Object* JSFunction::prototype() {
- ASSERT(has_prototype());
- // If the function's prototype property has been set to a non-JSObject
- // value, that value is stored in the constructor field of the map.
- if (map()->has_non_instance_prototype()) return map()->constructor();
- return instance_prototype();
-}
-
-bool JSFunction::should_have_prototype() {
- return map()->function_with_prototype();
-}
-
-
-bool JSFunction::is_compiled() {
- return code() != GetIsolate()->builtins()->builtin(Builtins::kLazyCompile);
-}
-
-
-int JSFunction::NumberOfLiterals() {
- return literals()->length();
-}
-
-
-Object* JSBuiltinsObject::javascript_builtin(Builtins::JavaScript id) {
- ASSERT(id < kJSBuiltinsCount); // id is unsigned.
- return READ_FIELD(this, OffsetOfFunctionWithId(id));
-}
-
-
-void JSBuiltinsObject::set_javascript_builtin(Builtins::JavaScript id,
- Object* value) {
- ASSERT(id < kJSBuiltinsCount); // id is unsigned.
- WRITE_FIELD(this, OffsetOfFunctionWithId(id), value);
- WRITE_BARRIER(this, OffsetOfFunctionWithId(id));
-}
-
-
-Code* JSBuiltinsObject::javascript_builtin_code(Builtins::JavaScript id) {
- ASSERT(id < kJSBuiltinsCount); // id is unsigned.
- return Code::cast(READ_FIELD(this, OffsetOfCodeWithId(id)));
-}
-
-
-void JSBuiltinsObject::set_javascript_builtin_code(Builtins::JavaScript id,
- Code* value) {
- ASSERT(id < kJSBuiltinsCount); // id is unsigned.
- WRITE_FIELD(this, OffsetOfCodeWithId(id), value);
- ASSERT(!HEAP->InNewSpace(value));
-}
-
-
-Address Proxy::proxy() {
- return AddressFrom<Address>(READ_INTPTR_FIELD(this, kProxyOffset));
-}
-
-
-void Proxy::set_proxy(Address value) {
- WRITE_INTPTR_FIELD(this, kProxyOffset, OffsetFrom(value));
-}
-
-
-ACCESSORS(JSValue, value, Object, kValueOffset)
-
-
-JSValue* JSValue::cast(Object* obj) {
- ASSERT(obj->IsJSValue());
- ASSERT(HeapObject::cast(obj)->Size() == JSValue::kSize);
- return reinterpret_cast<JSValue*>(obj);
-}
-
-
-ACCESSORS(JSMessageObject, type, String, kTypeOffset)
-ACCESSORS(JSMessageObject, arguments, JSArray, kArgumentsOffset)
-ACCESSORS(JSMessageObject, script, Object, kScriptOffset)
-ACCESSORS(JSMessageObject, stack_trace, Object, kStackTraceOffset)
-ACCESSORS(JSMessageObject, stack_frames, Object, kStackFramesOffset)
-SMI_ACCESSORS(JSMessageObject, start_position, kStartPositionOffset)
-SMI_ACCESSORS(JSMessageObject, end_position, kEndPositionOffset)
-
-
-JSMessageObject* JSMessageObject::cast(Object* obj) {
- ASSERT(obj->IsJSMessageObject());
- ASSERT(HeapObject::cast(obj)->Size() == JSMessageObject::kSize);
- return reinterpret_cast<JSMessageObject*>(obj);
-}
-
-
-INT_ACCESSORS(Code, instruction_size, kInstructionSizeOffset)
-ACCESSORS(Code, relocation_info, ByteArray, kRelocationInfoOffset)
-ACCESSORS(Code, deoptimization_data, FixedArray, kDeoptimizationDataOffset)
-
-
-byte* Code::instruction_start() {
- return FIELD_ADDR(this, kHeaderSize);
-}
-
-
-byte* Code::instruction_end() {
- return instruction_start() + instruction_size();
-}
-
-
-int Code::body_size() {
- return RoundUp(instruction_size(), kObjectAlignment);
-}
-
-
-FixedArray* Code::unchecked_deoptimization_data() {
- return reinterpret_cast<FixedArray*>(
- READ_FIELD(this, kDeoptimizationDataOffset));
-}
-
-
-ByteArray* Code::unchecked_relocation_info() {
- return reinterpret_cast<ByteArray*>(READ_FIELD(this, kRelocationInfoOffset));
-}
-
-
-byte* Code::relocation_start() {
- return unchecked_relocation_info()->GetDataStartAddress();
-}
-
-
-int Code::relocation_size() {
- return unchecked_relocation_info()->length();
-}
-
-
-byte* Code::entry() {
- return instruction_start();
-}
-
-
-bool Code::contains(byte* pc) {
- return (instruction_start() <= pc) &&
- (pc <= instruction_start() + instruction_size());
-}
-
-
-ACCESSORS(JSArray, length, Object, kLengthOffset)
-
-
-ACCESSORS(JSRegExp, data, Object, kDataOffset)
-
-
-JSRegExp::Type JSRegExp::TypeTag() {
- Object* data = this->data();
- if (data->IsUndefined()) return JSRegExp::NOT_COMPILED;
- Smi* smi = Smi::cast(FixedArray::cast(data)->get(kTagIndex));
- return static_cast<JSRegExp::Type>(smi->value());
-}
-
-
-int JSRegExp::CaptureCount() {
- switch (TypeTag()) {
- case ATOM:
- return 0;
- case IRREGEXP:
- return Smi::cast(DataAt(kIrregexpCaptureCountIndex))->value();
- default:
- UNREACHABLE();
- return -1;
- }
-}
-
-
-JSRegExp::Flags JSRegExp::GetFlags() {
- ASSERT(this->data()->IsFixedArray());
- Object* data = this->data();
- Smi* smi = Smi::cast(FixedArray::cast(data)->get(kFlagsIndex));
- return Flags(smi->value());
-}
-
-
-String* JSRegExp::Pattern() {
- ASSERT(this->data()->IsFixedArray());
- Object* data = this->data();
- String* pattern= String::cast(FixedArray::cast(data)->get(kSourceIndex));
- return pattern;
-}
-
-
-Object* JSRegExp::DataAt(int index) {
- ASSERT(TypeTag() != NOT_COMPILED);
- return FixedArray::cast(data())->get(index);
-}
-
-
-void JSRegExp::SetDataAt(int index, Object* value) {
- ASSERT(TypeTag() != NOT_COMPILED);
- ASSERT(index >= kDataIndex); // Only implementation data can be set this way.
- FixedArray::cast(data())->set(index, value);
-}
-
-
-JSObject::ElementsKind JSObject::GetElementsKind() {
- if (map()->has_fast_elements()) {
- ASSERT(elements()->map() == GetHeap()->fixed_array_map() ||
- elements()->map() == GetHeap()->fixed_cow_array_map());
- return FAST_ELEMENTS;
- }
- HeapObject* array = elements();
- if (array->IsFixedArray()) {
- // FAST_ELEMENTS or DICTIONARY_ELEMENTS are both stored in a
- // FixedArray, but FAST_ELEMENTS is already handled above.
- ASSERT(array->IsDictionary());
- return DICTIONARY_ELEMENTS;
- }
- ASSERT(!map()->has_fast_elements());
- if (array->IsExternalArray()) {
- switch (array->map()->instance_type()) {
- case EXTERNAL_BYTE_ARRAY_TYPE:
- return EXTERNAL_BYTE_ELEMENTS;
- case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
- return EXTERNAL_UNSIGNED_BYTE_ELEMENTS;
- case EXTERNAL_SHORT_ARRAY_TYPE:
- return EXTERNAL_SHORT_ELEMENTS;
- case EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE:
- return EXTERNAL_UNSIGNED_SHORT_ELEMENTS;
- case EXTERNAL_INT_ARRAY_TYPE:
- return EXTERNAL_INT_ELEMENTS;
- case EXTERNAL_UNSIGNED_INT_ARRAY_TYPE:
- return EXTERNAL_UNSIGNED_INT_ELEMENTS;
- case EXTERNAL_PIXEL_ARRAY_TYPE:
- return EXTERNAL_PIXEL_ELEMENTS;
- default:
- break;
- }
- }
- ASSERT(array->map()->instance_type() == EXTERNAL_FLOAT_ARRAY_TYPE);
- return EXTERNAL_FLOAT_ELEMENTS;
-}
-
-
-bool JSObject::HasFastElements() {
- return GetElementsKind() == FAST_ELEMENTS;
-}
-
-
-bool JSObject::HasDictionaryElements() {
- return GetElementsKind() == DICTIONARY_ELEMENTS;
-}
-
-
-bool JSObject::HasExternalArrayElements() {
- HeapObject* array = elements();
- ASSERT(array != NULL);
- return array->IsExternalArray();
-}
-
-
-#define EXTERNAL_ELEMENTS_CHECK(name, type) \
-bool JSObject::HasExternal##name##Elements() { \
- HeapObject* array = elements(); \
- ASSERT(array != NULL); \
- if (!array->IsHeapObject()) \
- return false; \
- return array->map()->instance_type() == type; \
-}
-
-
-EXTERNAL_ELEMENTS_CHECK(Byte, EXTERNAL_BYTE_ARRAY_TYPE)
-EXTERNAL_ELEMENTS_CHECK(UnsignedByte, EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE)
-EXTERNAL_ELEMENTS_CHECK(Short, EXTERNAL_SHORT_ARRAY_TYPE)
-EXTERNAL_ELEMENTS_CHECK(UnsignedShort,
- EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE)
-EXTERNAL_ELEMENTS_CHECK(Int, EXTERNAL_INT_ARRAY_TYPE)
-EXTERNAL_ELEMENTS_CHECK(UnsignedInt,
- EXTERNAL_UNSIGNED_INT_ARRAY_TYPE)
-EXTERNAL_ELEMENTS_CHECK(Float,
- EXTERNAL_FLOAT_ARRAY_TYPE)
-EXTERNAL_ELEMENTS_CHECK(Pixel, EXTERNAL_PIXEL_ARRAY_TYPE)
-
-
-bool JSObject::HasNamedInterceptor() {
- return map()->has_named_interceptor();
-}
-
-
-bool JSObject::HasIndexedInterceptor() {
- return map()->has_indexed_interceptor();
-}
-
-
-bool JSObject::AllowsSetElementsLength() {
- bool result = elements()->IsFixedArray();
- ASSERT(result == !HasExternalArrayElements());
- return result;
-}
-
-
-MaybeObject* JSObject::EnsureWritableFastElements() {
- ASSERT(HasFastElements());
- FixedArray* elems = FixedArray::cast(elements());
- Isolate* isolate = GetIsolate();
- if (elems->map() != isolate->heap()->fixed_cow_array_map()) return elems;
- Object* writable_elems;
- { MaybeObject* maybe_writable_elems = isolate->heap()->CopyFixedArrayWithMap(
- elems, isolate->heap()->fixed_array_map());
- if (!maybe_writable_elems->ToObject(&writable_elems)) {
- return maybe_writable_elems;
- }
- }
- set_elements(FixedArray::cast(writable_elems));
- isolate->counters()->cow_arrays_converted()->Increment();
- return writable_elems;
-}
-
-
-StringDictionary* JSObject::property_dictionary() {
- ASSERT(!HasFastProperties());
- return StringDictionary::cast(properties());
-}
-
-
-NumberDictionary* JSObject::element_dictionary() {
- ASSERT(HasDictionaryElements());
- return NumberDictionary::cast(elements());
-}
-
-
-bool String::IsHashFieldComputed(uint32_t field) {
- return (field & kHashNotComputedMask) == 0;
-}
-
-
-bool String::HasHashCode() {
- return IsHashFieldComputed(hash_field());
-}
-
-
-uint32_t String::Hash() {
- // Fast case: has hash code already been computed?
- uint32_t field = hash_field();
- if (IsHashFieldComputed(field)) return field >> kHashShift;
- // Slow case: compute hash code and set it.
- return ComputeAndSetHash();
-}
-
-
-StringHasher::StringHasher(int length)
- : length_(length),
- raw_running_hash_(0),
- array_index_(0),
- is_array_index_(0 < length_ && length_ <= String::kMaxArrayIndexSize),
- is_first_char_(true),
- is_valid_(true) { }
-
-
-bool StringHasher::has_trivial_hash() {
- return length_ > String::kMaxHashCalcLength;
-}
-
-
-void StringHasher::AddCharacter(uc32 c) {
- // Use the Jenkins one-at-a-time hash function to update the hash
- // for the given character.
- raw_running_hash_ += c;
- raw_running_hash_ += (raw_running_hash_ << 10);
- raw_running_hash_ ^= (raw_running_hash_ >> 6);
- // Incremental array index computation.
- if (is_array_index_) {
- if (c < '0' || c > '9') {
- is_array_index_ = false;
- } else {
- int d = c - '0';
- if (is_first_char_) {
- is_first_char_ = false;
- if (c == '0' && length_ > 1) {
- is_array_index_ = false;
- return;
- }
- }
- if (array_index_ > 429496729U - ((d + 2) >> 3)) {
- is_array_index_ = false;
- } else {
- array_index_ = array_index_ * 10 + d;
- }
- }
- }
-}
-
-
-void StringHasher::AddCharacterNoIndex(uc32 c) {
- ASSERT(!is_array_index());
- raw_running_hash_ += c;
- raw_running_hash_ += (raw_running_hash_ << 10);
- raw_running_hash_ ^= (raw_running_hash_ >> 6);
-}
-
-
-uint32_t StringHasher::GetHash() {
- // Get the calculated raw hash value and do some more bit ops to distribute
- // the hash further. Ensure that we never return zero as the hash value.
- uint32_t result = raw_running_hash_;
- result += (result << 3);
- result ^= (result >> 11);
- result += (result << 15);
- if (result == 0) {
- result = 27;
- }
- return result;
-}
-
-
-template <typename schar>
-uint32_t HashSequentialString(const schar* chars, int length) {
- StringHasher hasher(length);
- if (!hasher.has_trivial_hash()) {
- int i;
- for (i = 0; hasher.is_array_index() && (i < length); i++) {
- hasher.AddCharacter(chars[i]);
- }
- for (; i < length; i++) {
- hasher.AddCharacterNoIndex(chars[i]);
- }
- }
- return hasher.GetHashField();
-}
-
-
-bool String::AsArrayIndex(uint32_t* index) {
- uint32_t field = hash_field();
- if (IsHashFieldComputed(field) && (field & kIsNotArrayIndexMask)) {
- return false;
- }
- return SlowAsArrayIndex(index);
-}
-
-
-Object* JSObject::GetPrototype() {
- return JSObject::cast(this)->map()->prototype();
-}
-
-
-PropertyAttributes JSObject::GetPropertyAttribute(String* key) {
- return GetPropertyAttributeWithReceiver(this, key);
-}
-
-// TODO(504): this may be useful in other places too where JSGlobalProxy
-// is used.
-Object* JSObject::BypassGlobalProxy() {
- if (IsJSGlobalProxy()) {
- Object* proto = GetPrototype();
- if (proto->IsNull()) return GetHeap()->undefined_value();
- ASSERT(proto->IsJSGlobalObject());
- return proto;
- }
- return this;
-}
-
-
-bool JSObject::HasHiddenPropertiesObject() {
- ASSERT(!IsJSGlobalProxy());
- return GetPropertyAttributePostInterceptor(this,
- GetHeap()->hidden_symbol(),
- false) != ABSENT;
-}
-
-
-Object* JSObject::GetHiddenPropertiesObject() {
- ASSERT(!IsJSGlobalProxy());
- PropertyAttributes attributes;
- // You can't install a getter on a property indexed by the hidden symbol,
- // so we can be sure that GetLocalPropertyPostInterceptor returns a real
- // object.
- Object* result =
- GetLocalPropertyPostInterceptor(this,
- GetHeap()->hidden_symbol(),
- &attributes)->ToObjectUnchecked();
- return result;
-}
-
-
-MaybeObject* JSObject::SetHiddenPropertiesObject(Object* hidden_obj) {
- ASSERT(!IsJSGlobalProxy());
- return SetPropertyPostInterceptor(GetHeap()->hidden_symbol(),
- hidden_obj,
- DONT_ENUM,
- kNonStrictMode);
-}
-
-
-bool JSObject::HasElement(uint32_t index) {
- return HasElementWithReceiver(this, index);
-}
-
-
-bool AccessorInfo::all_can_read() {
- return BooleanBit::get(flag(), kAllCanReadBit);
-}
-
-
-void AccessorInfo::set_all_can_read(bool value) {
- set_flag(BooleanBit::set(flag(), kAllCanReadBit, value));
-}
-
-
-bool AccessorInfo::all_can_write() {
- return BooleanBit::get(flag(), kAllCanWriteBit);
-}
-
-
-void AccessorInfo::set_all_can_write(bool value) {
- set_flag(BooleanBit::set(flag(), kAllCanWriteBit, value));
-}
-
-
-bool AccessorInfo::prohibits_overwriting() {
- return BooleanBit::get(flag(), kProhibitsOverwritingBit);
-}
-
-
-void AccessorInfo::set_prohibits_overwriting(bool value) {
- set_flag(BooleanBit::set(flag(), kProhibitsOverwritingBit, value));
-}
-
-
-PropertyAttributes AccessorInfo::property_attributes() {
- return AttributesField::decode(static_cast<uint32_t>(flag()->value()));
-}
-
-
-void AccessorInfo::set_property_attributes(PropertyAttributes attributes) {
- ASSERT(AttributesField::is_valid(attributes));
- int rest_value = flag()->value() & ~AttributesField::mask();
- set_flag(Smi::FromInt(rest_value | AttributesField::encode(attributes)));
-}
-
-template<typename Shape, typename Key>
-void Dictionary<Shape, Key>::SetEntry(int entry,
- Object* key,
- Object* value,
- PropertyDetails details) {
- ASSERT(!key->IsString() || details.IsDeleted() || details.index() > 0);
- int index = HashTable<Shape, Key>::EntryToIndex(entry);
- AssertNoAllocation no_gc;
- WriteBarrierMode mode = FixedArray::GetWriteBarrierMode(no_gc);
- FixedArray::set(index, key, mode);
- FixedArray::set(index+1, value, mode);
- FixedArray::fast_set(this, index+2, details.AsSmi());
-}
-
-
-bool NumberDictionaryShape::IsMatch(uint32_t key, Object* other) {
- ASSERT(other->IsNumber());
- return key == static_cast<uint32_t>(other->Number());
-}
-
-
-uint32_t NumberDictionaryShape::Hash(uint32_t key) {
- return ComputeIntegerHash(key);
-}
-
-
-uint32_t NumberDictionaryShape::HashForObject(uint32_t key, Object* other) {
- ASSERT(other->IsNumber());
- return ComputeIntegerHash(static_cast<uint32_t>(other->Number()));
-}
-
-
-MaybeObject* NumberDictionaryShape::AsObject(uint32_t key) {
- return Isolate::Current()->heap()->NumberFromUint32(key);
-}
-
-
-bool StringDictionaryShape::IsMatch(String* key, Object* other) {
- // We know that all entries in a hash table had their hash keys created.
- // Use that knowledge to have fast failure.
- if (key->Hash() != String::cast(other)->Hash()) return false;
- return key->Equals(String::cast(other));
-}
-
-
-uint32_t StringDictionaryShape::Hash(String* key) {
- return key->Hash();
-}
-
-
-uint32_t StringDictionaryShape::HashForObject(String* key, Object* other) {
- return String::cast(other)->Hash();
-}
-
-
-MaybeObject* StringDictionaryShape::AsObject(String* key) {
- return key;
-}
-
-
-void Map::ClearCodeCache(Heap* heap) {
- // No write barrier is needed since empty_fixed_array is not in new space.
- // Please note this function is used during marking:
- // - MarkCompactCollector::MarkUnmarkedObject
- ASSERT(!heap->InNewSpace(heap->raw_unchecked_empty_fixed_array()));
- WRITE_FIELD(this, kCodeCacheOffset, heap->raw_unchecked_empty_fixed_array());
-}
-
-
-void JSArray::EnsureSize(int required_size) {
- ASSERT(HasFastElements());
- FixedArray* elts = FixedArray::cast(elements());
- const int kArraySizeThatFitsComfortablyInNewSpace = 128;
- if (elts->length() < required_size) {
- // Doubling in size would be overkill, but leave some slack to avoid
- // constantly growing.
- Expand(required_size + (required_size >> 3));
- // It's a performance benefit to keep a frequently used array in new-space.
- } else if (!GetHeap()->new_space()->Contains(elts) &&
- required_size < kArraySizeThatFitsComfortablyInNewSpace) {
- // Expand will allocate a new backing store in new space even if the size
- // we asked for isn't larger than what we had before.
- Expand(required_size);
- }
-}
-
-
-void JSArray::set_length(Smi* length) {
- set_length(static_cast<Object*>(length), SKIP_WRITE_BARRIER);
-}
-
-
-void JSArray::SetContent(FixedArray* storage) {
- set_length(Smi::FromInt(storage->length()));
- set_elements(storage);
-}
-
-
-MaybeObject* FixedArray::Copy() {
- if (length() == 0) return this;
- return GetHeap()->CopyFixedArray(this);
-}
-
-
-Relocatable::Relocatable(Isolate* isolate) {
- ASSERT(isolate == Isolate::Current());
- isolate_ = isolate;
- prev_ = isolate->relocatable_top();
- isolate->set_relocatable_top(this);
-}
-
-
-Relocatable::~Relocatable() {
- ASSERT(isolate_ == Isolate::Current());
- ASSERT_EQ(isolate_->relocatable_top(), this);
- isolate_->set_relocatable_top(prev_);
-}
-
-
-int JSObject::BodyDescriptor::SizeOf(Map* map, HeapObject* object) {
- return map->instance_size();
-}
-
-
-void Proxy::ProxyIterateBody(ObjectVisitor* v) {
- v->VisitExternalReference(
- reinterpret_cast<Address *>(FIELD_ADDR(this, kProxyOffset)));
-}
-
-
-template<typename StaticVisitor>
-void Proxy::ProxyIterateBody() {
- StaticVisitor::VisitExternalReference(
- reinterpret_cast<Address *>(FIELD_ADDR(this, kProxyOffset)));
-}
-
-
-void ExternalAsciiString::ExternalAsciiStringIterateBody(ObjectVisitor* v) {
- typedef v8::String::ExternalAsciiStringResource Resource;
- v->VisitExternalAsciiString(
- reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset)));
-}
-
-
-template<typename StaticVisitor>
-void ExternalAsciiString::ExternalAsciiStringIterateBody() {
- typedef v8::String::ExternalAsciiStringResource Resource;
- StaticVisitor::VisitExternalAsciiString(
- reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset)));
-}
-
-
-void ExternalTwoByteString::ExternalTwoByteStringIterateBody(ObjectVisitor* v) {
- typedef v8::String::ExternalStringResource Resource;
- v->VisitExternalTwoByteString(
- reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset)));
-}
-
-
-template<typename StaticVisitor>
-void ExternalTwoByteString::ExternalTwoByteStringIterateBody() {
- typedef v8::String::ExternalStringResource Resource;
- StaticVisitor::VisitExternalTwoByteString(
- reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset)));
-}
-
-#define SLOT_ADDR(obj, offset) \
- reinterpret_cast<Object**>((obj)->address() + offset)
-
-template<int start_offset, int end_offset, int size>
-void FixedBodyDescriptor<start_offset, end_offset, size>::IterateBody(
- HeapObject* obj,
- ObjectVisitor* v) {
- v->VisitPointers(SLOT_ADDR(obj, start_offset), SLOT_ADDR(obj, end_offset));
-}
-
-
-template<int start_offset>
-void FlexibleBodyDescriptor<start_offset>::IterateBody(HeapObject* obj,
- int object_size,
- ObjectVisitor* v) {
- v->VisitPointers(SLOT_ADDR(obj, start_offset), SLOT_ADDR(obj, object_size));
-}
-
-#undef SLOT_ADDR
-
-
-#undef CAST_ACCESSOR
-#undef INT_ACCESSORS
-#undef SMI_ACCESSORS
-#undef ACCESSORS
-#undef FIELD_ADDR
-#undef READ_FIELD
-#undef WRITE_FIELD
-#undef WRITE_BARRIER
-#undef CONDITIONAL_WRITE_BARRIER
-#undef READ_MEMADDR_FIELD
-#undef WRITE_MEMADDR_FIELD
-#undef READ_DOUBLE_FIELD
-#undef WRITE_DOUBLE_FIELD
-#undef READ_INT_FIELD
-#undef WRITE_INT_FIELD
-#undef READ_SHORT_FIELD
-#undef WRITE_SHORT_FIELD
-#undef READ_BYTE_FIELD
-#undef WRITE_BYTE_FIELD
-
-
-} } // namespace v8::internal
-
-#endif // V8_OBJECTS_INL_H_
diff --git a/src/3rdparty/v8/src/objects-printer.cc b/src/3rdparty/v8/src/objects-printer.cc
deleted file mode 100644
index b7e2fdd..0000000
--- a/src/3rdparty/v8/src/objects-printer.cc
+++ /dev/null
@@ -1,801 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "disassembler.h"
-#include "disasm.h"
-#include "jsregexp.h"
-#include "objects-visiting.h"
-
-namespace v8 {
-namespace internal {
-
-#ifdef OBJECT_PRINT
-
-static const char* TypeToString(InstanceType type);
-
-
-void MaybeObject::Print(FILE* out) {
- Object* this_as_object;
- if (ToObject(&this_as_object)) {
- if (this_as_object->IsSmi()) {
- Smi::cast(this_as_object)->SmiPrint(out);
- } else {
- HeapObject::cast(this_as_object)->HeapObjectPrint(out);
- }
- } else {
- Failure::cast(this)->FailurePrint(out);
- }
- Flush(out);
-}
-
-
-void MaybeObject::PrintLn(FILE* out) {
- Print(out);
- PrintF(out, "\n");
-}
-
-
-void HeapObject::PrintHeader(FILE* out, const char* id) {
- PrintF(out, "%p: [%s]\n", reinterpret_cast<void*>(this), id);
-}
-
-
-void HeapObject::HeapObjectPrint(FILE* out) {
- InstanceType instance_type = map()->instance_type();
-
- HandleScope scope;
- if (instance_type < FIRST_NONSTRING_TYPE) {
- String::cast(this)->StringPrint(out);
- return;
- }
-
- switch (instance_type) {
- case MAP_TYPE:
- Map::cast(this)->MapPrint(out);
- break;
- case HEAP_NUMBER_TYPE:
- HeapNumber::cast(this)->HeapNumberPrint(out);
- break;
- case FIXED_ARRAY_TYPE:
- FixedArray::cast(this)->FixedArrayPrint(out);
- break;
- case BYTE_ARRAY_TYPE:
- ByteArray::cast(this)->ByteArrayPrint(out);
- break;
- case EXTERNAL_PIXEL_ARRAY_TYPE:
- ExternalPixelArray::cast(this)->ExternalPixelArrayPrint(out);
- break;
- case EXTERNAL_BYTE_ARRAY_TYPE:
- ExternalByteArray::cast(this)->ExternalByteArrayPrint(out);
- break;
- case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
- ExternalUnsignedByteArray::cast(this)
- ->ExternalUnsignedByteArrayPrint(out);
- break;
- case EXTERNAL_SHORT_ARRAY_TYPE:
- ExternalShortArray::cast(this)->ExternalShortArrayPrint(out);
- break;
- case EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE:
- ExternalUnsignedShortArray::cast(this)
- ->ExternalUnsignedShortArrayPrint(out);
- break;
- case EXTERNAL_INT_ARRAY_TYPE:
- ExternalIntArray::cast(this)->ExternalIntArrayPrint(out);
- break;
- case EXTERNAL_UNSIGNED_INT_ARRAY_TYPE:
- ExternalUnsignedIntArray::cast(this)->ExternalUnsignedIntArrayPrint(out);
- break;
- case EXTERNAL_FLOAT_ARRAY_TYPE:
- ExternalFloatArray::cast(this)->ExternalFloatArrayPrint(out);
- break;
- case FILLER_TYPE:
- PrintF(out, "filler");
- break;
- case JS_OBJECT_TYPE: // fall through
- case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
- case JS_ARRAY_TYPE:
- case JS_REGEXP_TYPE:
- JSObject::cast(this)->JSObjectPrint(out);
- break;
- case ODDBALL_TYPE:
- Oddball::cast(this)->to_string()->Print(out);
- break;
- case JS_FUNCTION_TYPE:
- JSFunction::cast(this)->JSFunctionPrint(out);
- break;
- case JS_GLOBAL_PROXY_TYPE:
- JSGlobalProxy::cast(this)->JSGlobalProxyPrint(out);
- break;
- case JS_GLOBAL_OBJECT_TYPE:
- JSGlobalObject::cast(this)->JSGlobalObjectPrint(out);
- break;
- case JS_BUILTINS_OBJECT_TYPE:
- JSBuiltinsObject::cast(this)->JSBuiltinsObjectPrint(out);
- break;
- case JS_VALUE_TYPE:
- PrintF(out, "Value wrapper around:");
- JSValue::cast(this)->value()->Print(out);
- break;
- case CODE_TYPE:
- Code::cast(this)->CodePrint(out);
- break;
- case PROXY_TYPE:
- Proxy::cast(this)->ProxyPrint(out);
- break;
- case SHARED_FUNCTION_INFO_TYPE:
- SharedFunctionInfo::cast(this)->SharedFunctionInfoPrint(out);
- break;
- case JS_MESSAGE_OBJECT_TYPE:
- JSMessageObject::cast(this)->JSMessageObjectPrint(out);
- break;
- case JS_GLOBAL_PROPERTY_CELL_TYPE:
- JSGlobalPropertyCell::cast(this)->JSGlobalPropertyCellPrint(out);
- break;
-#define MAKE_STRUCT_CASE(NAME, Name, name) \
- case NAME##_TYPE: \
- Name::cast(this)->Name##Print(out); \
- break;
- STRUCT_LIST(MAKE_STRUCT_CASE)
-#undef MAKE_STRUCT_CASE
-
- default:
- PrintF(out, "UNKNOWN TYPE %d", map()->instance_type());
- UNREACHABLE();
- break;
- }
-}
-
-
-void ByteArray::ByteArrayPrint(FILE* out) {
- PrintF(out, "byte array, data starts at %p", GetDataStartAddress());
-}
-
-
-void ExternalPixelArray::ExternalPixelArrayPrint(FILE* out) {
- PrintF(out, "external pixel array");
-}
-
-
-void ExternalByteArray::ExternalByteArrayPrint(FILE* out) {
- PrintF(out, "external byte array");
-}
-
-
-void ExternalUnsignedByteArray::ExternalUnsignedByteArrayPrint(FILE* out) {
- PrintF(out, "external unsigned byte array");
-}
-
-
-void ExternalShortArray::ExternalShortArrayPrint(FILE* out) {
- PrintF(out, "external short array");
-}
-
-
-void ExternalUnsignedShortArray::ExternalUnsignedShortArrayPrint(FILE* out) {
- PrintF(out, "external unsigned short array");
-}
-
-
-void ExternalIntArray::ExternalIntArrayPrint(FILE* out) {
- PrintF(out, "external int array");
-}
-
-
-void ExternalUnsignedIntArray::ExternalUnsignedIntArrayPrint(FILE* out) {
- PrintF(out, "external unsigned int array");
-}
-
-
-void ExternalFloatArray::ExternalFloatArrayPrint(FILE* out) {
- PrintF(out, "external float array");
-}
-
-
-void JSObject::PrintProperties(FILE* out) {
- if (HasFastProperties()) {
- DescriptorArray* descs = map()->instance_descriptors();
- for (int i = 0; i < descs->number_of_descriptors(); i++) {
- PrintF(out, " ");
- descs->GetKey(i)->StringPrint(out);
- PrintF(out, ": ");
- switch (descs->GetType(i)) {
- case FIELD: {
- int index = descs->GetFieldIndex(i);
- FastPropertyAt(index)->ShortPrint(out);
- PrintF(out, " (field at offset %d)\n", index);
- break;
- }
- case CONSTANT_FUNCTION:
- descs->GetConstantFunction(i)->ShortPrint(out);
- PrintF(out, " (constant function)\n");
- break;
- case CALLBACKS:
- descs->GetCallbacksObject(i)->ShortPrint(out);
- PrintF(out, " (callback)\n");
- break;
- case MAP_TRANSITION:
- PrintF(out, " (map transition)\n");
- break;
- case CONSTANT_TRANSITION:
- PrintF(out, " (constant transition)\n");
- break;
- case NULL_DESCRIPTOR:
- PrintF(out, " (null descriptor)\n");
- break;
- default:
- UNREACHABLE();
- break;
- }
- }
- } else {
- property_dictionary()->Print(out);
- }
-}
-
-
-void JSObject::PrintElements(FILE* out) {
- switch (GetElementsKind()) {
- case FAST_ELEMENTS: {
- // Print in array notation for non-sparse arrays.
- FixedArray* p = FixedArray::cast(elements());
- for (int i = 0; i < p->length(); i++) {
- PrintF(out, " %d: ", i);
- p->get(i)->ShortPrint(out);
- PrintF(out, "\n");
- }
- break;
- }
- case EXTERNAL_PIXEL_ELEMENTS: {
- ExternalPixelArray* p = ExternalPixelArray::cast(elements());
- for (int i = 0; i < p->length(); i++) {
- PrintF(out, " %d: %d\n", i, p->get(i));
- }
- break;
- }
- case EXTERNAL_BYTE_ELEMENTS: {
- ExternalByteArray* p = ExternalByteArray::cast(elements());
- for (int i = 0; i < p->length(); i++) {
- PrintF(out, " %d: %d\n", i, static_cast<int>(p->get(i)));
- }
- break;
- }
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: {
- ExternalUnsignedByteArray* p =
- ExternalUnsignedByteArray::cast(elements());
- for (int i = 0; i < p->length(); i++) {
- PrintF(out, " %d: %d\n", i, static_cast<int>(p->get(i)));
- }
- break;
- }
- case EXTERNAL_SHORT_ELEMENTS: {
- ExternalShortArray* p = ExternalShortArray::cast(elements());
- for (int i = 0; i < p->length(); i++) {
- PrintF(out, " %d: %d\n", i, static_cast<int>(p->get(i)));
- }
- break;
- }
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: {
- ExternalUnsignedShortArray* p =
- ExternalUnsignedShortArray::cast(elements());
- for (int i = 0; i < p->length(); i++) {
- PrintF(out, " %d: %d\n", i, static_cast<int>(p->get(i)));
- }
- break;
- }
- case EXTERNAL_INT_ELEMENTS: {
- ExternalIntArray* p = ExternalIntArray::cast(elements());
- for (int i = 0; i < p->length(); i++) {
- PrintF(out, " %d: %d\n", i, static_cast<int>(p->get(i)));
- }
- break;
- }
- case EXTERNAL_UNSIGNED_INT_ELEMENTS: {
- ExternalUnsignedIntArray* p =
- ExternalUnsignedIntArray::cast(elements());
- for (int i = 0; i < p->length(); i++) {
- PrintF(out, " %d: %d\n", i, static_cast<int>(p->get(i)));
- }
- break;
- }
- case EXTERNAL_FLOAT_ELEMENTS: {
- ExternalFloatArray* p = ExternalFloatArray::cast(elements());
- for (int i = 0; i < p->length(); i++) {
- PrintF(out, " %d: %f\n", i, p->get(i));
- }
- break;
- }
- case DICTIONARY_ELEMENTS:
- elements()->Print(out);
- break;
- default:
- UNREACHABLE();
- break;
- }
-}
-
-
-void JSObject::JSObjectPrint(FILE* out) {
- PrintF(out, "%p: [JSObject]\n", reinterpret_cast<void*>(this));
- PrintF(out, " - map = %p\n", reinterpret_cast<void*>(map()));
- PrintF(out, " - prototype = %p\n", reinterpret_cast<void*>(GetPrototype()));
- PrintF(out, " {\n");
- PrintProperties(out);
- PrintElements(out);
- PrintF(out, " }\n");
-}
-
-
-static const char* TypeToString(InstanceType type) {
- switch (type) {
- case INVALID_TYPE: return "INVALID";
- case MAP_TYPE: return "MAP";
- case HEAP_NUMBER_TYPE: return "HEAP_NUMBER";
- case SYMBOL_TYPE: return "SYMBOL";
- case ASCII_SYMBOL_TYPE: return "ASCII_SYMBOL";
- case CONS_SYMBOL_TYPE: return "CONS_SYMBOL";
- case CONS_ASCII_SYMBOL_TYPE: return "CONS_ASCII_SYMBOL";
- case EXTERNAL_ASCII_SYMBOL_TYPE:
- case EXTERNAL_SYMBOL_WITH_ASCII_DATA_TYPE:
- case EXTERNAL_SYMBOL_TYPE: return "EXTERNAL_SYMBOL";
- case ASCII_STRING_TYPE: return "ASCII_STRING";
- case STRING_TYPE: return "TWO_BYTE_STRING";
- case CONS_STRING_TYPE:
- case CONS_ASCII_STRING_TYPE: return "CONS_STRING";
- case EXTERNAL_ASCII_STRING_TYPE:
- case EXTERNAL_STRING_WITH_ASCII_DATA_TYPE:
- case EXTERNAL_STRING_TYPE: return "EXTERNAL_STRING";
- case FIXED_ARRAY_TYPE: return "FIXED_ARRAY";
- case BYTE_ARRAY_TYPE: return "BYTE_ARRAY";
- case EXTERNAL_PIXEL_ARRAY_TYPE: return "EXTERNAL_PIXEL_ARRAY";
- case EXTERNAL_BYTE_ARRAY_TYPE: return "EXTERNAL_BYTE_ARRAY";
- case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
- return "EXTERNAL_UNSIGNED_BYTE_ARRAY";
- case EXTERNAL_SHORT_ARRAY_TYPE: return "EXTERNAL_SHORT_ARRAY";
- case EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE:
- return "EXTERNAL_UNSIGNED_SHORT_ARRAY";
- case EXTERNAL_INT_ARRAY_TYPE: return "EXTERNAL_INT_ARRAY";
- case EXTERNAL_UNSIGNED_INT_ARRAY_TYPE:
- return "EXTERNAL_UNSIGNED_INT_ARRAY";
- case EXTERNAL_FLOAT_ARRAY_TYPE: return "EXTERNAL_FLOAT_ARRAY";
- case FILLER_TYPE: return "FILLER";
- case JS_OBJECT_TYPE: return "JS_OBJECT";
- case JS_CONTEXT_EXTENSION_OBJECT_TYPE: return "JS_CONTEXT_EXTENSION_OBJECT";
- case ODDBALL_TYPE: return "ODDBALL";
- case JS_GLOBAL_PROPERTY_CELL_TYPE: return "JS_GLOBAL_PROPERTY_CELL";
- case SHARED_FUNCTION_INFO_TYPE: return "SHARED_FUNCTION_INFO";
- case JS_FUNCTION_TYPE: return "JS_FUNCTION";
- case CODE_TYPE: return "CODE";
- case JS_ARRAY_TYPE: return "JS_ARRAY";
- case JS_REGEXP_TYPE: return "JS_REGEXP";
- case JS_VALUE_TYPE: return "JS_VALUE";
- case JS_GLOBAL_OBJECT_TYPE: return "JS_GLOBAL_OBJECT";
- case JS_BUILTINS_OBJECT_TYPE: return "JS_BUILTINS_OBJECT";
- case JS_GLOBAL_PROXY_TYPE: return "JS_GLOBAL_PROXY";
- case PROXY_TYPE: return "PROXY";
- case LAST_STRING_TYPE: return "LAST_STRING_TYPE";
- case JS_MESSAGE_OBJECT_TYPE: return "JS_MESSAGE_OBJECT_TYPE";
-#define MAKE_STRUCT_CASE(NAME, Name, name) case NAME##_TYPE: return #NAME;
- STRUCT_LIST(MAKE_STRUCT_CASE)
-#undef MAKE_STRUCT_CASE
- }
- return "UNKNOWN";
-}
-
-
-void Map::MapPrint(FILE* out) {
- HeapObject::PrintHeader(out, "Map");
- PrintF(out, " - type: %s\n", TypeToString(instance_type()));
- PrintF(out, " - instance size: %d\n", instance_size());
- PrintF(out, " - inobject properties: %d\n", inobject_properties());
- PrintF(out, " - pre-allocated property fields: %d\n",
- pre_allocated_property_fields());
- PrintF(out, " - unused property fields: %d\n", unused_property_fields());
- if (is_hidden_prototype()) {
- PrintF(out, " - hidden_prototype\n");
- }
- if (has_named_interceptor()) {
- PrintF(out, " - named_interceptor\n");
- }
- if (has_indexed_interceptor()) {
- PrintF(out, " - indexed_interceptor\n");
- }
- if (is_undetectable()) {
- PrintF(out, " - undetectable\n");
- }
- if (has_instance_call_handler()) {
- PrintF(out, " - instance_call_handler\n");
- }
- if (is_access_check_needed()) {
- PrintF(out, " - access_check_needed\n");
- }
- PrintF(out, " - instance descriptors: ");
- instance_descriptors()->ShortPrint(out);
- PrintF(out, "\n - prototype: ");
- prototype()->ShortPrint(out);
- PrintF(out, "\n - constructor: ");
- constructor()->ShortPrint(out);
- PrintF(out, "\n");
-}
-
-
-void CodeCache::CodeCachePrint(FILE* out) {
- HeapObject::PrintHeader(out, "CodeCache");
- PrintF(out, "\n - default_cache: ");
- default_cache()->ShortPrint(out);
- PrintF(out, "\n - normal_type_cache: ");
- normal_type_cache()->ShortPrint(out);
-}
-
-
-void FixedArray::FixedArrayPrint(FILE* out) {
- HeapObject::PrintHeader(out, "FixedArray");
- PrintF(out, " - length: %d", length());
- for (int i = 0; i < length(); i++) {
- PrintF(out, "\n [%d]: ", i);
- get(i)->ShortPrint(out);
- }
- PrintF(out, "\n");
-}
-
-
-void JSValue::JSValuePrint(FILE* out) {
- HeapObject::PrintHeader(out, "ValueObject");
- value()->Print(out);
-}
-
-
-void JSMessageObject::JSMessageObjectPrint(FILE* out) {
- HeapObject::PrintHeader(out, "JSMessageObject");
- PrintF(out, " - type: ");
- type()->ShortPrint(out);
- PrintF(out, "\n - arguments: ");
- arguments()->ShortPrint(out);
- PrintF(out, "\n - start_position: %d", start_position());
- PrintF(out, "\n - end_position: %d", end_position());
- PrintF(out, "\n - script: ");
- script()->ShortPrint(out);
- PrintF(out, "\n - stack_trace: ");
- stack_trace()->ShortPrint(out);
- PrintF(out, "\n - stack_frames: ");
- stack_frames()->ShortPrint(out);
- PrintF(out, "\n");
-}
-
-
-void String::StringPrint(FILE* out) {
- if (StringShape(this).IsSymbol()) {
- PrintF(out, "#");
- } else if (StringShape(this).IsCons()) {
- PrintF(out, "c\"");
- } else {
- PrintF(out, "\"");
- }
-
- const char truncated_epilogue[] = "...<truncated>";
- int len = length();
- if (!FLAG_use_verbose_printer) {
- if (len > 100) {
- len = 100 - sizeof(truncated_epilogue);
- }
- }
- for (int i = 0; i < len; i++) {
- PrintF(out, "%c", Get(i));
- }
- if (len != length()) {
- PrintF(out, "%s", truncated_epilogue);
- }
-
- if (!StringShape(this).IsSymbol()) PrintF(out, "\"");
-}
-
-
-void JSFunction::JSFunctionPrint(FILE* out) {
- HeapObject::PrintHeader(out, "Function");
- PrintF(out, " - map = 0x%p\n", reinterpret_cast<void*>(map()));
- PrintF(out, " - initial_map = ");
- if (has_initial_map()) {
- initial_map()->ShortPrint(out);
- }
- PrintF(out, "\n - shared_info = ");
- shared()->ShortPrint(out);
- PrintF(out, "\n - name = ");
- shared()->name()->Print(out);
- PrintF(out, "\n - context = ");
- unchecked_context()->ShortPrint(out);
- PrintF(out, "\n - code = ");
- code()->ShortPrint(out);
- PrintF(out, "\n");
-
- PrintProperties(out);
- PrintElements(out);
-
- PrintF(out, "\n");
-}
-
-
-void SharedFunctionInfo::SharedFunctionInfoPrint(FILE* out) {
- HeapObject::PrintHeader(out, "SharedFunctionInfo");
- PrintF(out, " - name: ");
- name()->ShortPrint(out);
- PrintF(out, "\n - expected_nof_properties: %d", expected_nof_properties());
- PrintF(out, "\n - instance class name = ");
- instance_class_name()->Print(out);
- PrintF(out, "\n - code = ");
- code()->ShortPrint(out);
- PrintF(out, "\n - source code = ");
- GetSourceCode()->ShortPrint(out);
- // Script files are often large, hard to read.
- // PrintF(out, "\n - script =");
- // script()->Print(out);
- PrintF(out, "\n - function token position = %d", function_token_position());
- PrintF(out, "\n - start position = %d", start_position());
- PrintF(out, "\n - end position = %d", end_position());
- PrintF(out, "\n - is expression = %d", is_expression());
- PrintF(out, "\n - debug info = ");
- debug_info()->ShortPrint(out);
- PrintF(out, "\n - length = %d", length());
- PrintF(out, "\n - has_only_simple_this_property_assignments = %d",
- has_only_simple_this_property_assignments());
- PrintF(out, "\n - this_property_assignments = ");
- this_property_assignments()->ShortPrint(out);
- PrintF(out, "\n");
-}
-
-
-void JSGlobalProxy::JSGlobalProxyPrint(FILE* out) {
- PrintF(out, "global_proxy");
- JSObjectPrint(out);
- PrintF(out, "context : ");
- context()->ShortPrint(out);
- PrintF(out, "\n");
-}
-
-
-void JSGlobalObject::JSGlobalObjectPrint(FILE* out) {
- PrintF(out, "global ");
- JSObjectPrint(out);
- PrintF(out, "global context : ");
- global_context()->ShortPrint(out);
- PrintF(out, "\n");
-}
-
-
-void JSBuiltinsObject::JSBuiltinsObjectPrint(FILE* out) {
- PrintF(out, "builtins ");
- JSObjectPrint(out);
-}
-
-
-void JSGlobalPropertyCell::JSGlobalPropertyCellPrint(FILE* out) {
- HeapObject::PrintHeader(out, "JSGlobalPropertyCell");
-}
-
-
-void Code::CodePrint(FILE* out) {
- HeapObject::PrintHeader(out, "Code");
-#ifdef ENABLE_DISASSEMBLER
- if (FLAG_use_verbose_printer) {
- Disassemble(NULL, out);
- }
-#endif
-}
-
-
-void Proxy::ProxyPrint(FILE* out) {
- PrintF(out, "proxy to %p", proxy());
-}
-
-
-void AccessorInfo::AccessorInfoPrint(FILE* out) {
- HeapObject::PrintHeader(out, "AccessorInfo");
- PrintF(out, "\n - getter: ");
- getter()->ShortPrint(out);
- PrintF(out, "\n - setter: ");
- setter()->ShortPrint(out);
- PrintF(out, "\n - name: ");
- name()->ShortPrint(out);
- PrintF(out, "\n - data: ");
- data()->ShortPrint(out);
- PrintF(out, "\n - flag: ");
- flag()->ShortPrint(out);
-}
-
-
-void AccessCheckInfo::AccessCheckInfoPrint(FILE* out) {
- HeapObject::PrintHeader(out, "AccessCheckInfo");
- PrintF(out, "\n - named_callback: ");
- named_callback()->ShortPrint(out);
- PrintF(out, "\n - indexed_callback: ");
- indexed_callback()->ShortPrint(out);
- PrintF(out, "\n - data: ");
- data()->ShortPrint(out);
-}
-
-
-void InterceptorInfo::InterceptorInfoPrint(FILE* out) {
- HeapObject::PrintHeader(out, "InterceptorInfo");
- PrintF(out, "\n - getter: ");
- getter()->ShortPrint(out);
- PrintF(out, "\n - setter: ");
- setter()->ShortPrint(out);
- PrintF(out, "\n - query: ");
- query()->ShortPrint(out);
- PrintF(out, "\n - deleter: ");
- deleter()->ShortPrint(out);
- PrintF(out, "\n - enumerator: ");
- enumerator()->ShortPrint(out);
- PrintF(out, "\n - data: ");
- data()->ShortPrint(out);
-}
-
-
-void CallHandlerInfo::CallHandlerInfoPrint(FILE* out) {
- HeapObject::PrintHeader(out, "CallHandlerInfo");
- PrintF(out, "\n - callback: ");
- callback()->ShortPrint(out);
- PrintF(out, "\n - data: ");
- data()->ShortPrint(out);
- PrintF(out, "\n - call_stub_cache: ");
-}
-
-
-void FunctionTemplateInfo::FunctionTemplateInfoPrint(FILE* out) {
- HeapObject::PrintHeader(out, "FunctionTemplateInfo");
- PrintF(out, "\n - class name: ");
- class_name()->ShortPrint(out);
- PrintF(out, "\n - tag: ");
- tag()->ShortPrint(out);
- PrintF(out, "\n - property_list: ");
- property_list()->ShortPrint(out);
- PrintF(out, "\n - serial_number: ");
- serial_number()->ShortPrint(out);
- PrintF(out, "\n - call_code: ");
- call_code()->ShortPrint(out);
- PrintF(out, "\n - property_accessors: ");
- property_accessors()->ShortPrint(out);
- PrintF(out, "\n - prototype_template: ");
- prototype_template()->ShortPrint(out);
- PrintF(out, "\n - parent_template: ");
- parent_template()->ShortPrint(out);
- PrintF(out, "\n - named_property_handler: ");
- named_property_handler()->ShortPrint(out);
- PrintF(out, "\n - indexed_property_handler: ");
- indexed_property_handler()->ShortPrint(out);
- PrintF(out, "\n - instance_template: ");
- instance_template()->ShortPrint(out);
- PrintF(out, "\n - signature: ");
- signature()->ShortPrint(out);
- PrintF(out, "\n - access_check_info: ");
- access_check_info()->ShortPrint(out);
- PrintF(out, "\n - hidden_prototype: %s",
- hidden_prototype() ? "true" : "false");
- PrintF(out, "\n - undetectable: %s", undetectable() ? "true" : "false");
- PrintF(out, "\n - need_access_check: %s",
- needs_access_check() ? "true" : "false");
-}
-
-
-void ObjectTemplateInfo::ObjectTemplateInfoPrint(FILE* out) {
- HeapObject::PrintHeader(out, "ObjectTemplateInfo");
- PrintF(out, "\n - constructor: ");
- constructor()->ShortPrint(out);
- PrintF(out, "\n - internal_field_count: ");
- internal_field_count()->ShortPrint(out);
-}
-
-
-void SignatureInfo::SignatureInfoPrint(FILE* out) {
- HeapObject::PrintHeader(out, "SignatureInfo");
- PrintF(out, "\n - receiver: ");
- receiver()->ShortPrint(out);
- PrintF(out, "\n - args: ");
- args()->ShortPrint(out);
-}
-
-
-void TypeSwitchInfo::TypeSwitchInfoPrint(FILE* out) {
- HeapObject::PrintHeader(out, "TypeSwitchInfo");
- PrintF(out, "\n - types: ");
- types()->ShortPrint(out);
-}
-
-
-void Script::ScriptPrint(FILE* out) {
- HeapObject::PrintHeader(out, "Script");
- PrintF(out, "\n - source: ");
- source()->ShortPrint(out);
- PrintF(out, "\n - name: ");
- name()->ShortPrint(out);
- PrintF(out, "\n - line_offset: ");
- line_offset()->ShortPrint(out);
- PrintF(out, "\n - column_offset: ");
- column_offset()->ShortPrint(out);
- PrintF(out, "\n - type: ");
- type()->ShortPrint(out);
- PrintF(out, "\n - id: ");
- id()->ShortPrint(out);
- PrintF(out, "\n - data: ");
- data()->ShortPrint(out);
- PrintF(out, "\n - context data: ");
- context_data()->ShortPrint(out);
- PrintF(out, "\n - wrapper: ");
- wrapper()->ShortPrint(out);
- PrintF(out, "\n - compilation type: ");
- compilation_type()->ShortPrint(out);
- PrintF(out, "\n - line ends: ");
- line_ends()->ShortPrint(out);
- PrintF(out, "\n - eval from shared: ");
- eval_from_shared()->ShortPrint(out);
- PrintF(out, "\n - eval from instructions offset: ");
- eval_from_instructions_offset()->ShortPrint(out);
- PrintF(out, "\n");
-}
-
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-void DebugInfo::DebugInfoPrint(FILE* out) {
- HeapObject::PrintHeader(out, "DebugInfo");
- PrintF(out, "\n - shared: ");
- shared()->ShortPrint(out);
- PrintF(out, "\n - original_code: ");
- original_code()->ShortPrint(out);
- PrintF(out, "\n - code: ");
- code()->ShortPrint(out);
- PrintF(out, "\n - break_points: ");
- break_points()->Print(out);
-}
-
-
-void BreakPointInfo::BreakPointInfoPrint(FILE* out) {
- HeapObject::PrintHeader(out, "BreakPointInfo");
- PrintF(out, "\n - code_position: %d", code_position()->value());
- PrintF(out, "\n - source_position: %d", source_position()->value());
- PrintF(out, "\n - statement_position: %d", statement_position()->value());
- PrintF(out, "\n - break_point_objects: ");
- break_point_objects()->ShortPrint(out);
-}
-#endif // ENABLE_DEBUGGER_SUPPORT
-
-
-void DescriptorArray::PrintDescriptors(FILE* out) {
- PrintF(out, "Descriptor array %d\n", number_of_descriptors());
- for (int i = 0; i < number_of_descriptors(); i++) {
- PrintF(out, " %d: ", i);
- Descriptor desc;
- Get(i, &desc);
- desc.Print(out);
- }
- PrintF(out, "\n");
-}
-
-
-#endif // OBJECT_PRINT
-
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/objects-visiting.cc b/src/3rdparty/v8/src/objects-visiting.cc
deleted file mode 100644
index 5a23658..0000000
--- a/src/3rdparty/v8/src/objects-visiting.cc
+++ /dev/null
@@ -1,142 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "ic-inl.h"
-#include "objects-visiting.h"
-
-namespace v8 {
-namespace internal {
-
-
-static inline bool IsShortcutCandidate(int type) {
- return ((type & kShortcutTypeMask) == kShortcutTypeTag);
-}
-
-
-StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId(
- int instance_type,
- int instance_size) {
- if (instance_type < FIRST_NONSTRING_TYPE) {
- switch (instance_type & kStringRepresentationMask) {
- case kSeqStringTag:
- if ((instance_type & kStringEncodingMask) == kAsciiStringTag) {
- return kVisitSeqAsciiString;
- } else {
- return kVisitSeqTwoByteString;
- }
-
- case kConsStringTag:
- if (IsShortcutCandidate(instance_type)) {
- return kVisitShortcutCandidate;
- } else {
- return kVisitConsString;
- }
-
- case kExternalStringTag:
- return GetVisitorIdForSize(kVisitDataObject,
- kVisitDataObjectGeneric,
- ExternalString::kSize);
- }
- UNREACHABLE();
- }
-
- switch (instance_type) {
- case BYTE_ARRAY_TYPE:
- return kVisitByteArray;
-
- case FIXED_ARRAY_TYPE:
- return kVisitFixedArray;
-
- case ODDBALL_TYPE:
- return kVisitOddball;
-
- case MAP_TYPE:
- return kVisitMap;
-
- case CODE_TYPE:
- return kVisitCode;
-
- case JS_GLOBAL_PROPERTY_CELL_TYPE:
- return kVisitPropertyCell;
-
- case SHARED_FUNCTION_INFO_TYPE:
- return kVisitSharedFunctionInfo;
-
- case PROXY_TYPE:
- return GetVisitorIdForSize(kVisitDataObject,
- kVisitDataObjectGeneric,
- Proxy::kSize);
-
- case FILLER_TYPE:
- return kVisitDataObjectGeneric;
-
- case JS_OBJECT_TYPE:
- case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
- case JS_VALUE_TYPE:
- case JS_ARRAY_TYPE:
- case JS_REGEXP_TYPE:
- case JS_GLOBAL_PROXY_TYPE:
- case JS_GLOBAL_OBJECT_TYPE:
- case JS_BUILTINS_OBJECT_TYPE:
- case JS_MESSAGE_OBJECT_TYPE:
- return GetVisitorIdForSize(kVisitJSObject,
- kVisitJSObjectGeneric,
- instance_size);
-
- case JS_FUNCTION_TYPE:
- return kVisitJSFunction;
-
- case HEAP_NUMBER_TYPE:
- case EXTERNAL_PIXEL_ARRAY_TYPE:
- case EXTERNAL_BYTE_ARRAY_TYPE:
- case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
- case EXTERNAL_SHORT_ARRAY_TYPE:
- case EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE:
- case EXTERNAL_INT_ARRAY_TYPE:
- case EXTERNAL_UNSIGNED_INT_ARRAY_TYPE:
- case EXTERNAL_FLOAT_ARRAY_TYPE:
- return GetVisitorIdForSize(kVisitDataObject,
- kVisitDataObjectGeneric,
- instance_size);
-
-#define MAKE_STRUCT_CASE(NAME, Name, name) \
- case NAME##_TYPE:
- STRUCT_LIST(MAKE_STRUCT_CASE)
-#undef MAKE_STRUCT_CASE
- return GetVisitorIdForSize(kVisitStruct,
- kVisitStructGeneric,
- instance_size);
-
- default:
- UNREACHABLE();
- return kVisitorIdCount;
- }
-}
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/objects-visiting.h b/src/3rdparty/v8/src/objects-visiting.h
deleted file mode 100644
index da955da..0000000
--- a/src/3rdparty/v8/src/objects-visiting.h
+++ /dev/null
@@ -1,422 +0,0 @@
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_OBJECTS_VISITING_H_
-#define V8_OBJECTS_VISITING_H_
-
-// This file provides base classes and auxiliary methods for defining
-// static object visitors used during GC.
-// Visiting HeapObject body with a normal ObjectVisitor requires performing
-// two switches on object's instance type to determine object size and layout
-// and one or more virtual method calls on visitor itself.
-// Static visitor is different: it provides a dispatch table which contains
-// pointers to specialized visit functions. Each map has the visitor_id
-// field which contains an index of specialized visitor to use.
-
-namespace v8 {
-namespace internal {
-
-
-// Base class for all static visitors.
-class StaticVisitorBase : public AllStatic {
- public:
- enum VisitorId {
- kVisitSeqAsciiString = 0,
- kVisitSeqTwoByteString,
- kVisitShortcutCandidate,
- kVisitByteArray,
- kVisitFixedArray,
- kVisitGlobalContext,
-
- // For data objects, JS objects and structs along with generic visitor which
- // can visit object of any size we provide visitors specialized by
- // object size in words.
- // Ids of specialized visitors are declared in a linear order (without
- // holes) starting from the id of visitor specialized for 2 words objects
- // (base visitor id) and ending with the id of generic visitor.
- // Method GetVisitorIdForSize depends on this ordering to calculate visitor
- // id of specialized visitor from given instance size, base visitor id and
- // generic visitor's id.
-
- kVisitDataObject,
- kVisitDataObject2 = kVisitDataObject,
- kVisitDataObject3,
- kVisitDataObject4,
- kVisitDataObject5,
- kVisitDataObject6,
- kVisitDataObject7,
- kVisitDataObject8,
- kVisitDataObject9,
- kVisitDataObjectGeneric,
-
- kVisitJSObject,
- kVisitJSObject2 = kVisitJSObject,
- kVisitJSObject3,
- kVisitJSObject4,
- kVisitJSObject5,
- kVisitJSObject6,
- kVisitJSObject7,
- kVisitJSObject8,
- kVisitJSObject9,
- kVisitJSObjectGeneric,
-
- kVisitStruct,
- kVisitStruct2 = kVisitStruct,
- kVisitStruct3,
- kVisitStruct4,
- kVisitStruct5,
- kVisitStruct6,
- kVisitStruct7,
- kVisitStruct8,
- kVisitStruct9,
- kVisitStructGeneric,
-
- kVisitConsString,
- kVisitOddball,
- kVisitCode,
- kVisitMap,
- kVisitPropertyCell,
- kVisitSharedFunctionInfo,
- kVisitJSFunction,
-
- kVisitorIdCount,
- kMinObjectSizeInWords = 2
- };
-
- // Visitor ID should fit in one byte.
- STATIC_ASSERT(kVisitorIdCount <= 256);
-
- // Determine which specialized visitor should be used for given instance type
- // and instance type.
- static VisitorId GetVisitorId(int instance_type, int instance_size);
-
- static VisitorId GetVisitorId(Map* map) {
- return GetVisitorId(map->instance_type(), map->instance_size());
- }
-
- // For visitors that allow specialization by size calculate VisitorId based
- // on size, base visitor id and generic visitor id.
- static VisitorId GetVisitorIdForSize(VisitorId base,
- VisitorId generic,
- int object_size) {
- ASSERT((base == kVisitDataObject) ||
- (base == kVisitStruct) ||
- (base == kVisitJSObject));
- ASSERT(IsAligned(object_size, kPointerSize));
- ASSERT(kMinObjectSizeInWords * kPointerSize <= object_size);
- ASSERT(object_size < Page::kMaxHeapObjectSize);
-
- const VisitorId specialization = static_cast<VisitorId>(
- base + (object_size >> kPointerSizeLog2) - kMinObjectSizeInWords);
-
- return Min(specialization, generic);
- }
-};
-
-
-template<typename Callback>
-class VisitorDispatchTable {
- public:
- void CopyFrom(VisitorDispatchTable* other) {
- // We are not using memcpy to guarantee that during update
- // every element of callbacks_ array will remain correct
- // pointer (memcpy might be implemented as a byte copying loop).
- for (int i = 0; i < StaticVisitorBase::kVisitorIdCount; i++) {
- NoBarrier_Store(&callbacks_[i], other->callbacks_[i]);
- }
- }
-
- inline Callback GetVisitor(Map* map) {
- return reinterpret_cast<Callback>(callbacks_[map->visitor_id()]);
- }
-
- void Register(StaticVisitorBase::VisitorId id, Callback callback) {
- ASSERT(id < StaticVisitorBase::kVisitorIdCount); // id is unsigned.
- callbacks_[id] = reinterpret_cast<AtomicWord>(callback);
- }
-
- template<typename Visitor,
- StaticVisitorBase::VisitorId base,
- StaticVisitorBase::VisitorId generic,
- int object_size_in_words>
- void RegisterSpecialization() {
- static const int size = object_size_in_words * kPointerSize;
- Register(StaticVisitorBase::GetVisitorIdForSize(base, generic, size),
- &Visitor::template VisitSpecialized<size>);
- }
-
-
- template<typename Visitor,
- StaticVisitorBase::VisitorId base,
- StaticVisitorBase::VisitorId generic>
- void RegisterSpecializations() {
- STATIC_ASSERT(
- (generic - base + StaticVisitorBase::kMinObjectSizeInWords) == 10);
- RegisterSpecialization<Visitor, base, generic, 2>();
- RegisterSpecialization<Visitor, base, generic, 3>();
- RegisterSpecialization<Visitor, base, generic, 4>();
- RegisterSpecialization<Visitor, base, generic, 5>();
- RegisterSpecialization<Visitor, base, generic, 6>();
- RegisterSpecialization<Visitor, base, generic, 7>();
- RegisterSpecialization<Visitor, base, generic, 8>();
- RegisterSpecialization<Visitor, base, generic, 9>();
- Register(generic, &Visitor::Visit);
- }
-
- private:
- AtomicWord callbacks_[StaticVisitorBase::kVisitorIdCount];
-};
-
-
-template<typename StaticVisitor>
-class BodyVisitorBase : public AllStatic {
- public:
- INLINE(static void IteratePointers(Heap* heap,
- HeapObject* object,
- int start_offset,
- int end_offset)) {
- Object** start_slot = reinterpret_cast<Object**>(object->address() +
- start_offset);
- Object** end_slot = reinterpret_cast<Object**>(object->address() +
- end_offset);
- StaticVisitor::VisitPointers(heap, start_slot, end_slot);
- }
-};
-
-
-template<typename StaticVisitor, typename BodyDescriptor, typename ReturnType>
-class FlexibleBodyVisitor : public BodyVisitorBase<StaticVisitor> {
- public:
- static inline ReturnType Visit(Map* map, HeapObject* object) {
- int object_size = BodyDescriptor::SizeOf(map, object);
- BodyVisitorBase<StaticVisitor>::IteratePointers(
- map->heap(),
- object,
- BodyDescriptor::kStartOffset,
- object_size);
- return static_cast<ReturnType>(object_size);
- }
-
- template<int object_size>
- static inline ReturnType VisitSpecialized(Map* map, HeapObject* object) {
- ASSERT(BodyDescriptor::SizeOf(map, object) == object_size);
- BodyVisitorBase<StaticVisitor>::IteratePointers(
- map->heap(),
- object,
- BodyDescriptor::kStartOffset,
- object_size);
- return static_cast<ReturnType>(object_size);
- }
-};
-
-
-template<typename StaticVisitor, typename BodyDescriptor, typename ReturnType>
-class FixedBodyVisitor : public BodyVisitorBase<StaticVisitor> {
- public:
- static inline ReturnType Visit(Map* map, HeapObject* object) {
- BodyVisitorBase<StaticVisitor>::IteratePointers(
- map->heap(),
- object,
- BodyDescriptor::kStartOffset,
- BodyDescriptor::kEndOffset);
- return static_cast<ReturnType>(BodyDescriptor::kSize);
- }
-};
-
-
-// Base class for visitors used for a linear new space iteration.
-// IterateBody returns size of visited object.
-// Certain types of objects (i.e. Code objects) are not handled
-// by dispatch table of this visitor because they cannot appear
-// in the new space.
-//
-// This class is intended to be used in the following way:
-//
-// class SomeVisitor : public StaticNewSpaceVisitor<SomeVisitor> {
-// ...
-// }
-//
-// This is an example of Curiously recurring template pattern
-// (see http://en.wikipedia.org/wiki/Curiously_recurring_template_pattern).
-// We use CRTP to guarantee aggressive compile time optimizations (i.e.
-// inlining and specialization of StaticVisitor::VisitPointers methods).
-template<typename StaticVisitor>
-class StaticNewSpaceVisitor : public StaticVisitorBase {
- public:
- static void Initialize() {
- table_.Register(kVisitShortcutCandidate,
- &FixedBodyVisitor<StaticVisitor,
- ConsString::BodyDescriptor,
- int>::Visit);
-
- table_.Register(kVisitConsString,
- &FixedBodyVisitor<StaticVisitor,
- ConsString::BodyDescriptor,
- int>::Visit);
-
- table_.Register(kVisitFixedArray,
- &FlexibleBodyVisitor<StaticVisitor,
- FixedArray::BodyDescriptor,
- int>::Visit);
-
- table_.Register(kVisitGlobalContext,
- &FixedBodyVisitor<StaticVisitor,
- Context::ScavengeBodyDescriptor,
- int>::Visit);
-
- table_.Register(kVisitByteArray, &VisitByteArray);
-
- table_.Register(kVisitSharedFunctionInfo,
- &FixedBodyVisitor<StaticVisitor,
- SharedFunctionInfo::BodyDescriptor,
- int>::Visit);
-
- table_.Register(kVisitSeqAsciiString, &VisitSeqAsciiString);
-
- table_.Register(kVisitSeqTwoByteString, &VisitSeqTwoByteString);
-
- table_.Register(kVisitJSFunction,
- &JSObjectVisitor::
- template VisitSpecialized<JSFunction::kSize>);
-
- table_.RegisterSpecializations<DataObjectVisitor,
- kVisitDataObject,
- kVisitDataObjectGeneric>();
- table_.RegisterSpecializations<JSObjectVisitor,
- kVisitJSObject,
- kVisitJSObjectGeneric>();
- table_.RegisterSpecializations<StructVisitor,
- kVisitStruct,
- kVisitStructGeneric>();
- }
-
- static inline int IterateBody(Map* map, HeapObject* obj) {
- return table_.GetVisitor(map)(map, obj);
- }
-
- static inline void VisitPointers(Heap* heap, Object** start, Object** end) {
- for (Object** p = start; p < end; p++) StaticVisitor::VisitPointer(heap, p);
- }
-
- private:
- static inline int VisitByteArray(Map* map, HeapObject* object) {
- return reinterpret_cast<ByteArray*>(object)->ByteArraySize();
- }
-
- static inline int VisitSeqAsciiString(Map* map, HeapObject* object) {
- return SeqAsciiString::cast(object)->
- SeqAsciiStringSize(map->instance_type());
- }
-
- static inline int VisitSeqTwoByteString(Map* map, HeapObject* object) {
- return SeqTwoByteString::cast(object)->
- SeqTwoByteStringSize(map->instance_type());
- }
-
- class DataObjectVisitor {
- public:
- template<int object_size>
- static inline int VisitSpecialized(Map* map, HeapObject* object) {
- return object_size;
- }
-
- static inline int Visit(Map* map, HeapObject* object) {
- return map->instance_size();
- }
- };
-
- typedef FlexibleBodyVisitor<StaticVisitor,
- StructBodyDescriptor,
- int> StructVisitor;
-
- typedef FlexibleBodyVisitor<StaticVisitor,
- JSObject::BodyDescriptor,
- int> JSObjectVisitor;
-
- typedef int (*Callback)(Map* map, HeapObject* object);
-
- static VisitorDispatchTable<Callback> table_;
-};
-
-
-template<typename StaticVisitor>
-VisitorDispatchTable<typename StaticNewSpaceVisitor<StaticVisitor>::Callback>
- StaticNewSpaceVisitor<StaticVisitor>::table_;
-
-
-void Code::CodeIterateBody(ObjectVisitor* v) {
- int mode_mask = RelocInfo::kCodeTargetMask |
- RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
- RelocInfo::ModeMask(RelocInfo::GLOBAL_PROPERTY_CELL) |
- RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
- RelocInfo::ModeMask(RelocInfo::JS_RETURN) |
- RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT) |
- RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
-
- // Use the relocation info pointer before it is visited by
- // the heap compaction in the next statement.
- RelocIterator it(this, mode_mask);
-
- IteratePointer(v, kRelocationInfoOffset);
- IteratePointer(v, kDeoptimizationDataOffset);
-
- for (; !it.done(); it.next()) {
- it.rinfo()->Visit(v);
- }
-}
-
-
-template<typename StaticVisitor>
-void Code::CodeIterateBody(Heap* heap) {
- int mode_mask = RelocInfo::kCodeTargetMask |
- RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
- RelocInfo::ModeMask(RelocInfo::GLOBAL_PROPERTY_CELL) |
- RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
- RelocInfo::ModeMask(RelocInfo::JS_RETURN) |
- RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT) |
- RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
-
- // Use the relocation info pointer before it is visited by
- // the heap compaction in the next statement.
- RelocIterator it(this, mode_mask);
-
- StaticVisitor::VisitPointer(
- heap,
- reinterpret_cast<Object**>(this->address() + kRelocationInfoOffset));
- StaticVisitor::VisitPointer(
- heap,
- reinterpret_cast<Object**>(this->address() + kDeoptimizationDataOffset));
-
- for (; !it.done(); it.next()) {
- it.rinfo()->template Visit<StaticVisitor>(heap);
- }
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_OBJECTS_VISITING_H_
diff --git a/src/3rdparty/v8/src/objects.cc b/src/3rdparty/v8/src/objects.cc
deleted file mode 100644
index 9a5357a..0000000
--- a/src/3rdparty/v8/src/objects.cc
+++ /dev/null
@@ -1,10296 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "api.h"
-#include "arguments.h"
-#include "bootstrapper.h"
-#include "codegen.h"
-#include "debug.h"
-#include "deoptimizer.h"
-#include "execution.h"
-#include "full-codegen.h"
-#include "hydrogen.h"
-#include "objects-inl.h"
-#include "objects-visiting.h"
-#include "macro-assembler.h"
-#include "safepoint-table.h"
-#include "scanner-base.h"
-#include "scopeinfo.h"
-#include "string-stream.h"
-#include "utils.h"
-#include "vm-state-inl.h"
-
-#ifdef ENABLE_DISASSEMBLER
-#include "disasm.h"
-#include "disassembler.h"
-#endif
-
-namespace v8 {
-namespace internal {
-
-// Getters and setters are stored in a fixed array property. These are
-// constants for their indices.
-const int kGetterIndex = 0;
-const int kSetterIndex = 1;
-
-
-MUST_USE_RESULT static MaybeObject* CreateJSValue(JSFunction* constructor,
- Object* value) {
- Object* result;
- { MaybeObject* maybe_result =
- constructor->GetHeap()->AllocateJSObject(constructor);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- JSValue::cast(result)->set_value(value);
- return result;
-}
-
-
-MaybeObject* Object::ToObject(Context* global_context) {
- if (IsNumber()) {
- return CreateJSValue(global_context->number_function(), this);
- } else if (IsBoolean()) {
- return CreateJSValue(global_context->boolean_function(), this);
- } else if (IsString()) {
- return CreateJSValue(global_context->string_function(), this);
- }
- ASSERT(IsJSObject());
- return this;
-}
-
-
-MaybeObject* Object::ToObject() {
- if (IsJSObject()) {
- return this;
- } else if (IsNumber()) {
- Isolate* isolate = Isolate::Current();
- Context* global_context = isolate->context()->global_context();
- return CreateJSValue(global_context->number_function(), this);
- } else if (IsBoolean()) {
- Isolate* isolate = HeapObject::cast(this)->GetIsolate();
- Context* global_context = isolate->context()->global_context();
- return CreateJSValue(global_context->boolean_function(), this);
- } else if (IsString()) {
- Isolate* isolate = HeapObject::cast(this)->GetIsolate();
- Context* global_context = isolate->context()->global_context();
- return CreateJSValue(global_context->string_function(), this);
- }
-
- // Throw a type error.
- return Failure::InternalError();
-}
-
-
-Object* Object::ToBoolean() {
- if (IsTrue()) return this;
- if (IsFalse()) return this;
- if (IsSmi()) {
- return Isolate::Current()->heap()->ToBoolean(Smi::cast(this)->value() != 0);
- }
- HeapObject* heap_object = HeapObject::cast(this);
- if (heap_object->IsUndefined() || heap_object->IsNull()) {
- return heap_object->GetHeap()->false_value();
- }
- // Undetectable object is false
- if (heap_object->IsUndetectableObject()) {
- return heap_object->GetHeap()->false_value();
- }
- if (heap_object->IsString()) {
- return heap_object->GetHeap()->ToBoolean(
- String::cast(this)->length() != 0);
- }
- if (heap_object->IsHeapNumber()) {
- return HeapNumber::cast(this)->HeapNumberToBoolean();
- }
- return heap_object->GetHeap()->true_value();
-}
-
-
-void Object::Lookup(String* name, LookupResult* result) {
- Object* holder = NULL;
- if (IsSmi()) {
- Heap* heap = Isolate::Current()->heap();
- Context* global_context = heap->isolate()->context()->global_context();
- holder = global_context->number_function()->instance_prototype();
- } else {
- HeapObject* heap_object = HeapObject::cast(this);
- if (heap_object->IsJSObject()) {
- return JSObject::cast(this)->Lookup(name, result);
- }
- Heap* heap = heap_object->GetHeap();
- if (heap_object->IsString()) {
- Context* global_context = heap->isolate()->context()->global_context();
- holder = global_context->string_function()->instance_prototype();
- } else if (heap_object->IsHeapNumber()) {
- Context* global_context = heap->isolate()->context()->global_context();
- holder = global_context->number_function()->instance_prototype();
- } else if (heap_object->IsBoolean()) {
- Context* global_context = heap->isolate()->context()->global_context();
- holder = global_context->boolean_function()->instance_prototype();
- }
- }
- ASSERT(holder != NULL); // Cannot handle null or undefined.
- JSObject::cast(holder)->Lookup(name, result);
-}
-
-
-MaybeObject* Object::GetPropertyWithReceiver(Object* receiver,
- String* name,
- PropertyAttributes* attributes) {
- LookupResult result;
- Lookup(name, &result);
- MaybeObject* value = GetProperty(receiver, &result, name, attributes);
- ASSERT(*attributes <= ABSENT);
- return value;
-}
-
-
-MaybeObject* Object::GetPropertyWithCallback(Object* receiver,
- Object* structure,
- String* name,
- Object* holder) {
- Isolate* isolate = name->GetIsolate();
- // To accommodate both the old and the new api we switch on the
- // data structure used to store the callbacks. Eventually proxy
- // callbacks should be phased out.
- if (structure->IsProxy()) {
- AccessorDescriptor* callback =
- reinterpret_cast<AccessorDescriptor*>(Proxy::cast(structure)->proxy());
- MaybeObject* value = (callback->getter)(receiver, callback->data);
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- return value;
- }
-
- // api style callbacks.
- if (structure->IsAccessorInfo()) {
- AccessorInfo* data = AccessorInfo::cast(structure);
- Object* fun_obj = data->getter();
- v8::AccessorGetter call_fun = v8::ToCData<v8::AccessorGetter>(fun_obj);
- HandleScope scope;
- JSObject* self = JSObject::cast(receiver);
- JSObject* holder_handle = JSObject::cast(holder);
- Handle<String> key(name);
- LOG(isolate, ApiNamedPropertyAccess("load", self, name));
- CustomArguments args(isolate, data->data(), self, holder_handle);
- v8::AccessorInfo info(args.end());
- v8::Handle<v8::Value> result;
- {
- // Leaving JavaScript.
- VMState state(isolate, EXTERNAL);
- result = call_fun(v8::Utils::ToLocal(key), info);
- }
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- if (result.IsEmpty()) {
- return isolate->heap()->undefined_value();
- }
- return *v8::Utils::OpenHandle(*result);
- }
-
- // __defineGetter__ callback
- if (structure->IsFixedArray()) {
- Object* getter = FixedArray::cast(structure)->get(kGetterIndex);
- if (getter->IsJSFunction()) {
- return Object::GetPropertyWithDefinedGetter(receiver,
- JSFunction::cast(getter));
- }
- // Getter is not a function.
- return isolate->heap()->undefined_value();
- }
-
- UNREACHABLE();
- return NULL;
-}
-
-
-MaybeObject* Object::GetPropertyWithDefinedGetter(Object* receiver,
- JSFunction* getter) {
- HandleScope scope;
- Handle<JSFunction> fun(JSFunction::cast(getter));
- Handle<Object> self(receiver);
-#ifdef ENABLE_DEBUGGER_SUPPORT
- Debug* debug = fun->GetHeap()->isolate()->debug();
- // Handle stepping into a getter if step into is active.
- if (debug->StepInActive()) {
- debug->HandleStepIn(fun, Handle<Object>::null(), 0, false);
- }
-#endif
- bool has_pending_exception;
- Handle<Object> result =
- Execution::Call(fun, self, 0, NULL, &has_pending_exception);
- // Check for pending exception and return the result.
- if (has_pending_exception) return Failure::Exception();
- return *result;
-}
-
-
-// Only deal with CALLBACKS and INTERCEPTOR
-MaybeObject* JSObject::GetPropertyWithFailedAccessCheck(
- Object* receiver,
- LookupResult* result,
- String* name,
- PropertyAttributes* attributes) {
- if (result->IsProperty()) {
- switch (result->type()) {
- case CALLBACKS: {
- // Only allow API accessors.
- Object* obj = result->GetCallbackObject();
- if (obj->IsAccessorInfo()) {
- AccessorInfo* info = AccessorInfo::cast(obj);
- if (info->all_can_read()) {
- *attributes = result->GetAttributes();
- return GetPropertyWithCallback(receiver,
- result->GetCallbackObject(),
- name,
- result->holder());
- }
- }
- break;
- }
- case NORMAL:
- case FIELD:
- case CONSTANT_FUNCTION: {
- // Search ALL_CAN_READ accessors in prototype chain.
- LookupResult r;
- result->holder()->LookupRealNamedPropertyInPrototypes(name, &r);
- if (r.IsProperty()) {
- return GetPropertyWithFailedAccessCheck(receiver,
- &r,
- name,
- attributes);
- }
- break;
- }
- case INTERCEPTOR: {
- // If the object has an interceptor, try real named properties.
- // No access check in GetPropertyAttributeWithInterceptor.
- LookupResult r;
- result->holder()->LookupRealNamedProperty(name, &r);
- if (r.IsProperty()) {
- return GetPropertyWithFailedAccessCheck(receiver,
- &r,
- name,
- attributes);
- }
- break;
- }
- default:
- UNREACHABLE();
- }
- }
-
- // No accessible property found.
- *attributes = ABSENT;
- Heap* heap = name->GetHeap();
- heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_GET);
- return heap->undefined_value();
-}
-
-
-PropertyAttributes JSObject::GetPropertyAttributeWithFailedAccessCheck(
- Object* receiver,
- LookupResult* result,
- String* name,
- bool continue_search) {
- if (result->IsProperty()) {
- switch (result->type()) {
- case CALLBACKS: {
- // Only allow API accessors.
- Object* obj = result->GetCallbackObject();
- if (obj->IsAccessorInfo()) {
- AccessorInfo* info = AccessorInfo::cast(obj);
- if (info->all_can_read()) {
- return result->GetAttributes();
- }
- }
- break;
- }
-
- case NORMAL:
- case FIELD:
- case CONSTANT_FUNCTION: {
- if (!continue_search) break;
- // Search ALL_CAN_READ accessors in prototype chain.
- LookupResult r;
- result->holder()->LookupRealNamedPropertyInPrototypes(name, &r);
- if (r.IsProperty()) {
- return GetPropertyAttributeWithFailedAccessCheck(receiver,
- &r,
- name,
- continue_search);
- }
- break;
- }
-
- case INTERCEPTOR: {
- // If the object has an interceptor, try real named properties.
- // No access check in GetPropertyAttributeWithInterceptor.
- LookupResult r;
- if (continue_search) {
- result->holder()->LookupRealNamedProperty(name, &r);
- } else {
- result->holder()->LocalLookupRealNamedProperty(name, &r);
- }
- if (r.IsProperty()) {
- return GetPropertyAttributeWithFailedAccessCheck(receiver,
- &r,
- name,
- continue_search);
- }
- break;
- }
-
- default:
- UNREACHABLE();
- }
- }
-
- GetHeap()->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
- return ABSENT;
-}
-
-
-Object* JSObject::GetNormalizedProperty(LookupResult* result) {
- ASSERT(!HasFastProperties());
- Object* value = property_dictionary()->ValueAt(result->GetDictionaryEntry());
- if (IsGlobalObject()) {
- value = JSGlobalPropertyCell::cast(value)->value();
- }
- ASSERT(!value->IsJSGlobalPropertyCell());
- return value;
-}
-
-
-Object* JSObject::SetNormalizedProperty(LookupResult* result, Object* value) {
- ASSERT(!HasFastProperties());
- if (IsGlobalObject()) {
- JSGlobalPropertyCell* cell =
- JSGlobalPropertyCell::cast(
- property_dictionary()->ValueAt(result->GetDictionaryEntry()));
- cell->set_value(value);
- } else {
- property_dictionary()->ValueAtPut(result->GetDictionaryEntry(), value);
- }
- return value;
-}
-
-
-MaybeObject* JSObject::SetNormalizedProperty(String* name,
- Object* value,
- PropertyDetails details) {
- ASSERT(!HasFastProperties());
- int entry = property_dictionary()->FindEntry(name);
- if (entry == StringDictionary::kNotFound) {
- Object* store_value = value;
- if (IsGlobalObject()) {
- Heap* heap = name->GetHeap();
- MaybeObject* maybe_store_value =
- heap->AllocateJSGlobalPropertyCell(value);
- if (!maybe_store_value->ToObject(&store_value)) return maybe_store_value;
- }
- Object* dict;
- { MaybeObject* maybe_dict =
- property_dictionary()->Add(name, store_value, details);
- if (!maybe_dict->ToObject(&dict)) return maybe_dict;
- }
- set_properties(StringDictionary::cast(dict));
- return value;
- }
- // Preserve enumeration index.
- details = PropertyDetails(details.attributes(),
- details.type(),
- property_dictionary()->DetailsAt(entry).index());
- if (IsGlobalObject()) {
- JSGlobalPropertyCell* cell =
- JSGlobalPropertyCell::cast(property_dictionary()->ValueAt(entry));
- cell->set_value(value);
- // Please note we have to update the property details.
- property_dictionary()->DetailsAtPut(entry, details);
- } else {
- property_dictionary()->SetEntry(entry, name, value, details);
- }
- return value;
-}
-
-
-MaybeObject* JSObject::DeleteNormalizedProperty(String* name, DeleteMode mode) {
- ASSERT(!HasFastProperties());
- StringDictionary* dictionary = property_dictionary();
- int entry = dictionary->FindEntry(name);
- if (entry != StringDictionary::kNotFound) {
- // If we have a global object set the cell to the hole.
- if (IsGlobalObject()) {
- PropertyDetails details = dictionary->DetailsAt(entry);
- if (details.IsDontDelete()) {
- if (mode != FORCE_DELETION) return GetHeap()->false_value();
- // When forced to delete global properties, we have to make a
- // map change to invalidate any ICs that think they can load
- // from the DontDelete cell without checking if it contains
- // the hole value.
- Object* new_map;
- { MaybeObject* maybe_new_map = map()->CopyDropDescriptors();
- if (!maybe_new_map->ToObject(&new_map)) return maybe_new_map;
- }
- set_map(Map::cast(new_map));
- }
- JSGlobalPropertyCell* cell =
- JSGlobalPropertyCell::cast(dictionary->ValueAt(entry));
- cell->set_value(cell->heap()->the_hole_value());
- dictionary->DetailsAtPut(entry, details.AsDeleted());
- } else {
- return dictionary->DeleteProperty(entry, mode);
- }
- }
- return GetHeap()->true_value();
-}
-
-
-bool JSObject::IsDirty() {
- Object* cons_obj = map()->constructor();
- if (!cons_obj->IsJSFunction())
- return true;
- JSFunction* fun = JSFunction::cast(cons_obj);
- if (!fun->shared()->IsApiFunction())
- return true;
- // If the object is fully fast case and has the same map it was
- // created with then no changes can have been made to it.
- return map() != fun->initial_map()
- || !HasFastElements()
- || !HasFastProperties();
-}
-
-
-MaybeObject* Object::GetProperty(Object* receiver,
- LookupResult* result,
- String* name,
- PropertyAttributes* attributes) {
- // Make sure that the top context does not change when doing
- // callbacks or interceptor calls.
- AssertNoContextChange ncc;
- Heap* heap = name->GetHeap();
-
- // Traverse the prototype chain from the current object (this) to
- // the holder and check for access rights. This avoid traversing the
- // objects more than once in case of interceptors, because the
- // holder will always be the interceptor holder and the search may
- // only continue with a current object just after the interceptor
- // holder in the prototype chain.
- Object* last = result->IsProperty() ? result->holder() : heap->null_value();
- for (Object* current = this; true; current = current->GetPrototype()) {
- if (current->IsAccessCheckNeeded()) {
- // Check if we're allowed to read from the current object. Note
- // that even though we may not actually end up loading the named
- // property from the current object, we still check that we have
- // access to it.
- JSObject* checked = JSObject::cast(current);
- if (!heap->isolate()->MayNamedAccess(checked, name, v8::ACCESS_GET)) {
- return checked->GetPropertyWithFailedAccessCheck(receiver,
- result,
- name,
- attributes);
- }
- }
- // Stop traversing the chain once we reach the last object in the
- // chain; either the holder of the result or null in case of an
- // absent property.
- if (current == last) break;
- }
-
- if (!result->IsProperty()) {
- *attributes = ABSENT;
- return heap->undefined_value();
- }
- *attributes = result->GetAttributes();
- Object* value;
- JSObject* holder = result->holder();
- switch (result->type()) {
- case NORMAL:
- value = holder->GetNormalizedProperty(result);
- ASSERT(!value->IsTheHole() || result->IsReadOnly());
- return value->IsTheHole() ? heap->undefined_value() : value;
- case FIELD:
- value = holder->FastPropertyAt(result->GetFieldIndex());
- ASSERT(!value->IsTheHole() || result->IsReadOnly());
- return value->IsTheHole() ? heap->undefined_value() : value;
- case CONSTANT_FUNCTION:
- return result->GetConstantFunction();
- case CALLBACKS:
- return GetPropertyWithCallback(receiver,
- result->GetCallbackObject(),
- name,
- holder);
- case INTERCEPTOR: {
- JSObject* recvr = JSObject::cast(receiver);
- return holder->GetPropertyWithInterceptor(recvr, name, attributes);
- }
- default:
- UNREACHABLE();
- return NULL;
- }
-}
-
-
-MaybeObject* Object::GetElementWithReceiver(Object* receiver, uint32_t index) {
- Object* holder = NULL;
- if (IsSmi()) {
- Context* global_context = Isolate::Current()->context()->global_context();
- holder = global_context->number_function()->instance_prototype();
- } else {
- HeapObject* heap_object = HeapObject::cast(this);
-
- if (heap_object->IsJSObject()) {
- return JSObject::cast(this)->GetElementWithReceiver(receiver, index);
- }
- Heap* heap = heap_object->GetHeap();
- Isolate* isolate = heap->isolate();
-
- Context* global_context = isolate->context()->global_context();
- if (heap_object->IsString()) {
- holder = global_context->string_function()->instance_prototype();
- } else if (heap_object->IsHeapNumber()) {
- holder = global_context->number_function()->instance_prototype();
- } else if (heap_object->IsBoolean()) {
- holder = global_context->boolean_function()->instance_prototype();
- } else {
- // Undefined and null have no indexed properties.
- ASSERT(heap_object->IsUndefined() || heap_object->IsNull());
- return heap->undefined_value();
- }
- }
-
- return JSObject::cast(holder)->GetElementWithReceiver(receiver, index);
-}
-
-
-Object* Object::GetPrototype() {
- if (IsSmi()) {
- Heap* heap = Isolate::Current()->heap();
- Context* context = heap->isolate()->context()->global_context();
- return context->number_function()->instance_prototype();
- }
-
- HeapObject* heap_object = HeapObject::cast(this);
-
- // The object is either a number, a string, a boolean, or a real JS object.
- if (heap_object->IsJSObject()) {
- return JSObject::cast(this)->map()->prototype();
- }
- Heap* heap = heap_object->GetHeap();
- Context* context = heap->isolate()->context()->global_context();
-
- if (heap_object->IsHeapNumber()) {
- return context->number_function()->instance_prototype();
- }
- if (heap_object->IsString()) {
- return context->string_function()->instance_prototype();
- }
- if (heap_object->IsBoolean()) {
- return context->boolean_function()->instance_prototype();
- } else {
- return heap->null_value();
- }
-}
-
-
-void Object::ShortPrint(FILE* out) {
- HeapStringAllocator allocator;
- StringStream accumulator(&allocator);
- ShortPrint(&accumulator);
- accumulator.OutputToFile(out);
-}
-
-
-void Object::ShortPrint(StringStream* accumulator) {
- if (IsSmi()) {
- Smi::cast(this)->SmiPrint(accumulator);
- } else if (IsFailure()) {
- Failure::cast(this)->FailurePrint(accumulator);
- } else {
- HeapObject::cast(this)->HeapObjectShortPrint(accumulator);
- }
-}
-
-
-void Smi::SmiPrint(FILE* out) {
- PrintF(out, "%d", value());
-}
-
-
-void Smi::SmiPrint(StringStream* accumulator) {
- accumulator->Add("%d", value());
-}
-
-
-void Failure::FailurePrint(StringStream* accumulator) {
- accumulator->Add("Failure(%p)", reinterpret_cast<void*>(value()));
-}
-
-
-void Failure::FailurePrint(FILE* out) {
- PrintF(out, "Failure(%p)", reinterpret_cast<void*>(value()));
-}
-
-
-// Should a word be prefixed by 'a' or 'an' in order to read naturally in
-// English? Returns false for non-ASCII or words that don't start with
-// a capital letter. The a/an rule follows pronunciation in English.
-// We don't use the BBC's overcorrect "an historic occasion" though if
-// you speak a dialect you may well say "an 'istoric occasion".
-static bool AnWord(String* str) {
- if (str->length() == 0) return false; // A nothing.
- int c0 = str->Get(0);
- int c1 = str->length() > 1 ? str->Get(1) : 0;
- if (c0 == 'U') {
- if (c1 > 'Z') {
- return true; // An Umpire, but a UTF8String, a U.
- }
- } else if (c0 == 'A' || c0 == 'E' || c0 == 'I' || c0 == 'O') {
- return true; // An Ape, an ABCBook.
- } else if ((c1 == 0 || (c1 >= 'A' && c1 <= 'Z')) &&
- (c0 == 'F' || c0 == 'H' || c0 == 'M' || c0 == 'N' || c0 == 'R' ||
- c0 == 'S' || c0 == 'X')) {
- return true; // An MP3File, an M.
- }
- return false;
-}
-
-
-MaybeObject* String::SlowTryFlatten(PretenureFlag pretenure) {
-#ifdef DEBUG
- // Do not attempt to flatten in debug mode when allocation is not
- // allowed. This is to avoid an assertion failure when allocating.
- // Flattening strings is the only case where we always allow
- // allocation because no GC is performed if the allocation fails.
- if (!HEAP->IsAllocationAllowed()) return this;
-#endif
-
- Heap* heap = GetHeap();
- switch (StringShape(this).representation_tag()) {
- case kConsStringTag: {
- ConsString* cs = ConsString::cast(this);
- if (cs->second()->length() == 0) {
- return cs->first();
- }
- // There's little point in putting the flat string in new space if the
- // cons string is in old space. It can never get GCed until there is
- // an old space GC.
- PretenureFlag tenure = heap->InNewSpace(this) ? pretenure : TENURED;
- int len = length();
- Object* object;
- String* result;
- if (IsAsciiRepresentation()) {
- { MaybeObject* maybe_object = heap->AllocateRawAsciiString(len, tenure);
- if (!maybe_object->ToObject(&object)) return maybe_object;
- }
- result = String::cast(object);
- String* first = cs->first();
- int first_length = first->length();
- char* dest = SeqAsciiString::cast(result)->GetChars();
- WriteToFlat(first, dest, 0, first_length);
- String* second = cs->second();
- WriteToFlat(second,
- dest + first_length,
- 0,
- len - first_length);
- } else {
- { MaybeObject* maybe_object =
- heap->AllocateRawTwoByteString(len, tenure);
- if (!maybe_object->ToObject(&object)) return maybe_object;
- }
- result = String::cast(object);
- uc16* dest = SeqTwoByteString::cast(result)->GetChars();
- String* first = cs->first();
- int first_length = first->length();
- WriteToFlat(first, dest, 0, first_length);
- String* second = cs->second();
- WriteToFlat(second,
- dest + first_length,
- 0,
- len - first_length);
- }
- cs->set_first(result);
- cs->set_second(heap->empty_string());
- return result;
- }
- default:
- return this;
- }
-}
-
-
-bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
- // Externalizing twice leaks the external resource, so it's
- // prohibited by the API.
- ASSERT(!this->IsExternalString());
-#ifdef DEBUG
- if (FLAG_enable_slow_asserts) {
- // Assert that the resource and the string are equivalent.
- ASSERT(static_cast<size_t>(this->length()) == resource->length());
- ScopedVector<uc16> smart_chars(this->length());
- String::WriteToFlat(this, smart_chars.start(), 0, this->length());
- ASSERT(memcmp(smart_chars.start(),
- resource->data(),
- resource->length() * sizeof(smart_chars[0])) == 0);
- }
-#endif // DEBUG
- Heap* heap = GetHeap();
- int size = this->Size(); // Byte size of the original string.
- if (size < ExternalString::kSize) {
- // The string is too small to fit an external String in its place. This can
- // only happen for zero length strings.
- return false;
- }
- ASSERT(size >= ExternalString::kSize);
- bool is_ascii = this->IsAsciiRepresentation();
- bool is_symbol = this->IsSymbol();
- int length = this->length();
- int hash_field = this->hash_field();
-
- // Morph the object to an external string by adjusting the map and
- // reinitializing the fields.
- this->set_map(is_ascii ?
- heap->external_string_with_ascii_data_map() :
- heap->external_string_map());
- ExternalTwoByteString* self = ExternalTwoByteString::cast(this);
- self->set_length(length);
- self->set_hash_field(hash_field);
- self->set_resource(resource);
- // Additionally make the object into an external symbol if the original string
- // was a symbol to start with.
- if (is_symbol) {
- self->Hash(); // Force regeneration of the hash value.
- // Now morph this external string into a external symbol.
- this->set_map(is_ascii ?
- heap->external_symbol_with_ascii_data_map() :
- heap->external_symbol_map());
- }
-
- // Fill the remainder of the string with dead wood.
- int new_size = this->Size(); // Byte size of the external String object.
- heap->CreateFillerObjectAt(this->address() + new_size, size - new_size);
- return true;
-}
-
-
-bool String::MakeExternal(v8::String::ExternalAsciiStringResource* resource) {
-#ifdef DEBUG
- if (FLAG_enable_slow_asserts) {
- // Assert that the resource and the string are equivalent.
- ASSERT(static_cast<size_t>(this->length()) == resource->length());
- ScopedVector<char> smart_chars(this->length());
- String::WriteToFlat(this, smart_chars.start(), 0, this->length());
- ASSERT(memcmp(smart_chars.start(),
- resource->data(),
- resource->length() * sizeof(smart_chars[0])) == 0);
- }
-#endif // DEBUG
- Heap* heap = GetHeap();
- int size = this->Size(); // Byte size of the original string.
- if (size < ExternalString::kSize) {
- // The string is too small to fit an external String in its place. This can
- // only happen for zero length strings.
- return false;
- }
- ASSERT(size >= ExternalString::kSize);
- bool is_symbol = this->IsSymbol();
- int length = this->length();
- int hash_field = this->hash_field();
-
- // Morph the object to an external string by adjusting the map and
- // reinitializing the fields.
- this->set_map(heap->external_ascii_string_map());
- ExternalAsciiString* self = ExternalAsciiString::cast(this);
- self->set_length(length);
- self->set_hash_field(hash_field);
- self->set_resource(resource);
- // Additionally make the object into an external symbol if the original string
- // was a symbol to start with.
- if (is_symbol) {
- self->Hash(); // Force regeneration of the hash value.
- // Now morph this external string into a external symbol.
- this->set_map(heap->external_ascii_symbol_map());
- }
-
- // Fill the remainder of the string with dead wood.
- int new_size = this->Size(); // Byte size of the external String object.
- heap->CreateFillerObjectAt(this->address() + new_size, size - new_size);
- return true;
-}
-
-
-void String::StringShortPrint(StringStream* accumulator) {
- int len = length();
- if (len > kMaxShortPrintLength) {
- accumulator->Add("<Very long string[%u]>", len);
- return;
- }
-
- if (!LooksValid()) {
- accumulator->Add("<Invalid String>");
- return;
- }
-
- StringInputBuffer buf(this);
-
- bool truncated = false;
- if (len > kMaxShortPrintLength) {
- len = kMaxShortPrintLength;
- truncated = true;
- }
- bool ascii = true;
- for (int i = 0; i < len; i++) {
- int c = buf.GetNext();
-
- if (c < 32 || c >= 127) {
- ascii = false;
- }
- }
- buf.Reset(this);
- if (ascii) {
- accumulator->Add("<String[%u]: ", length());
- for (int i = 0; i < len; i++) {
- accumulator->Put(buf.GetNext());
- }
- accumulator->Put('>');
- } else {
- // Backslash indicates that the string contains control
- // characters and that backslashes are therefore escaped.
- accumulator->Add("<String[%u]\\: ", length());
- for (int i = 0; i < len; i++) {
- int c = buf.GetNext();
- if (c == '\n') {
- accumulator->Add("\\n");
- } else if (c == '\r') {
- accumulator->Add("\\r");
- } else if (c == '\\') {
- accumulator->Add("\\\\");
- } else if (c < 32 || c > 126) {
- accumulator->Add("\\x%02x", c);
- } else {
- accumulator->Put(c);
- }
- }
- if (truncated) {
- accumulator->Put('.');
- accumulator->Put('.');
- accumulator->Put('.');
- }
- accumulator->Put('>');
- }
- return;
-}
-
-
-void JSObject::JSObjectShortPrint(StringStream* accumulator) {
- switch (map()->instance_type()) {
- case JS_ARRAY_TYPE: {
- double length = JSArray::cast(this)->length()->Number();
- accumulator->Add("<JS array[%u]>", static_cast<uint32_t>(length));
- break;
- }
- case JS_REGEXP_TYPE: {
- accumulator->Add("<JS RegExp>");
- break;
- }
- case JS_FUNCTION_TYPE: {
- Object* fun_name = JSFunction::cast(this)->shared()->name();
- bool printed = false;
- if (fun_name->IsString()) {
- String* str = String::cast(fun_name);
- if (str->length() > 0) {
- accumulator->Add("<JS Function ");
- accumulator->Put(str);
- accumulator->Put('>');
- printed = true;
- }
- }
- if (!printed) {
- accumulator->Add("<JS Function>");
- }
- break;
- }
- // All other JSObjects are rather similar to each other (JSObject,
- // JSGlobalProxy, JSGlobalObject, JSUndetectableObject, JSValue).
- default: {
- Map* map_of_this = map();
- Heap* heap = map_of_this->heap();
- Object* constructor = map_of_this->constructor();
- bool printed = false;
- if (constructor->IsHeapObject() &&
- !heap->Contains(HeapObject::cast(constructor))) {
- accumulator->Add("!!!INVALID CONSTRUCTOR!!!");
- } else {
- bool global_object = IsJSGlobalProxy();
- if (constructor->IsJSFunction()) {
- if (!heap->Contains(JSFunction::cast(constructor)->shared())) {
- accumulator->Add("!!!INVALID SHARED ON CONSTRUCTOR!!!");
- } else {
- Object* constructor_name =
- JSFunction::cast(constructor)->shared()->name();
- if (constructor_name->IsString()) {
- String* str = String::cast(constructor_name);
- if (str->length() > 0) {
- bool vowel = AnWord(str);
- accumulator->Add("<%sa%s ",
- global_object ? "Global Object: " : "",
- vowel ? "n" : "");
- accumulator->Put(str);
- accumulator->Put('>');
- printed = true;
- }
- }
- }
- }
- if (!printed) {
- accumulator->Add("<JS %sObject", global_object ? "Global " : "");
- }
- }
- if (IsJSValue()) {
- accumulator->Add(" value = ");
- JSValue::cast(this)->value()->ShortPrint(accumulator);
- }
- accumulator->Put('>');
- break;
- }
- }
-}
-
-
-void HeapObject::HeapObjectShortPrint(StringStream* accumulator) {
- // if (!HEAP->InNewSpace(this)) PrintF("*", this);
- Heap* heap = GetHeap();
- if (!heap->Contains(this)) {
- accumulator->Add("!!!INVALID POINTER!!!");
- return;
- }
- if (!heap->Contains(map())) {
- accumulator->Add("!!!INVALID MAP!!!");
- return;
- }
-
- accumulator->Add("%p ", this);
-
- if (IsString()) {
- String::cast(this)->StringShortPrint(accumulator);
- return;
- }
- if (IsJSObject()) {
- JSObject::cast(this)->JSObjectShortPrint(accumulator);
- return;
- }
- switch (map()->instance_type()) {
- case MAP_TYPE:
- accumulator->Add("<Map>");
- break;
- case FIXED_ARRAY_TYPE:
- accumulator->Add("<FixedArray[%u]>", FixedArray::cast(this)->length());
- break;
- case BYTE_ARRAY_TYPE:
- accumulator->Add("<ByteArray[%u]>", ByteArray::cast(this)->length());
- break;
- case EXTERNAL_PIXEL_ARRAY_TYPE:
- accumulator->Add("<ExternalPixelArray[%u]>",
- ExternalPixelArray::cast(this)->length());
- break;
- case EXTERNAL_BYTE_ARRAY_TYPE:
- accumulator->Add("<ExternalByteArray[%u]>",
- ExternalByteArray::cast(this)->length());
- break;
- case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
- accumulator->Add("<ExternalUnsignedByteArray[%u]>",
- ExternalUnsignedByteArray::cast(this)->length());
- break;
- case EXTERNAL_SHORT_ARRAY_TYPE:
- accumulator->Add("<ExternalShortArray[%u]>",
- ExternalShortArray::cast(this)->length());
- break;
- case EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE:
- accumulator->Add("<ExternalUnsignedShortArray[%u]>",
- ExternalUnsignedShortArray::cast(this)->length());
- break;
- case EXTERNAL_INT_ARRAY_TYPE:
- accumulator->Add("<ExternalIntArray[%u]>",
- ExternalIntArray::cast(this)->length());
- break;
- case EXTERNAL_UNSIGNED_INT_ARRAY_TYPE:
- accumulator->Add("<ExternalUnsignedIntArray[%u]>",
- ExternalUnsignedIntArray::cast(this)->length());
- break;
- case EXTERNAL_FLOAT_ARRAY_TYPE:
- accumulator->Add("<ExternalFloatArray[%u]>",
- ExternalFloatArray::cast(this)->length());
- break;
- case SHARED_FUNCTION_INFO_TYPE:
- accumulator->Add("<SharedFunctionInfo>");
- break;
- case JS_MESSAGE_OBJECT_TYPE:
- accumulator->Add("<JSMessageObject>");
- break;
-#define MAKE_STRUCT_CASE(NAME, Name, name) \
- case NAME##_TYPE: \
- accumulator->Put('<'); \
- accumulator->Add(#Name); \
- accumulator->Put('>'); \
- break;
- STRUCT_LIST(MAKE_STRUCT_CASE)
-#undef MAKE_STRUCT_CASE
- case CODE_TYPE:
- accumulator->Add("<Code>");
- break;
- case ODDBALL_TYPE: {
- if (IsUndefined())
- accumulator->Add("<undefined>");
- else if (IsTheHole())
- accumulator->Add("<the hole>");
- else if (IsNull())
- accumulator->Add("<null>");
- else if (IsTrue())
- accumulator->Add("<true>");
- else if (IsFalse())
- accumulator->Add("<false>");
- else
- accumulator->Add("<Odd Oddball>");
- break;
- }
- case HEAP_NUMBER_TYPE:
- accumulator->Add("<Number: ");
- HeapNumber::cast(this)->HeapNumberPrint(accumulator);
- accumulator->Put('>');
- break;
- case PROXY_TYPE:
- accumulator->Add("<Proxy>");
- break;
- case JS_GLOBAL_PROPERTY_CELL_TYPE:
- accumulator->Add("Cell for ");
- JSGlobalPropertyCell::cast(this)->value()->ShortPrint(accumulator);
- break;
- default:
- accumulator->Add("<Other heap object (%d)>", map()->instance_type());
- break;
- }
-}
-
-
-void HeapObject::Iterate(ObjectVisitor* v) {
- // Handle header
- IteratePointer(v, kMapOffset);
- // Handle object body
- Map* m = map();
- IterateBody(m->instance_type(), SizeFromMap(m), v);
-}
-
-
-void HeapObject::IterateBody(InstanceType type, int object_size,
- ObjectVisitor* v) {
- // Avoiding <Type>::cast(this) because it accesses the map pointer field.
- // During GC, the map pointer field is encoded.
- if (type < FIRST_NONSTRING_TYPE) {
- switch (type & kStringRepresentationMask) {
- case kSeqStringTag:
- break;
- case kConsStringTag:
- ConsString::BodyDescriptor::IterateBody(this, v);
- break;
- case kExternalStringTag:
- if ((type & kStringEncodingMask) == kAsciiStringTag) {
- reinterpret_cast<ExternalAsciiString*>(this)->
- ExternalAsciiStringIterateBody(v);
- } else {
- reinterpret_cast<ExternalTwoByteString*>(this)->
- ExternalTwoByteStringIterateBody(v);
- }
- break;
- }
- return;
- }
-
- switch (type) {
- case FIXED_ARRAY_TYPE:
- FixedArray::BodyDescriptor::IterateBody(this, object_size, v);
- break;
- case JS_OBJECT_TYPE:
- case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
- case JS_VALUE_TYPE:
- case JS_ARRAY_TYPE:
- case JS_REGEXP_TYPE:
- case JS_GLOBAL_PROXY_TYPE:
- case JS_GLOBAL_OBJECT_TYPE:
- case JS_BUILTINS_OBJECT_TYPE:
- case JS_MESSAGE_OBJECT_TYPE:
- JSObject::BodyDescriptor::IterateBody(this, object_size, v);
- break;
- case JS_FUNCTION_TYPE:
- reinterpret_cast<JSFunction*>(this)
- ->JSFunctionIterateBody(object_size, v);
- break;
- case ODDBALL_TYPE:
- Oddball::BodyDescriptor::IterateBody(this, v);
- break;
- case PROXY_TYPE:
- reinterpret_cast<Proxy*>(this)->ProxyIterateBody(v);
- break;
- case MAP_TYPE:
- Map::BodyDescriptor::IterateBody(this, v);
- break;
- case CODE_TYPE:
- reinterpret_cast<Code*>(this)->CodeIterateBody(v);
- break;
- case JS_GLOBAL_PROPERTY_CELL_TYPE:
- JSGlobalPropertyCell::BodyDescriptor::IterateBody(this, v);
- break;
- case HEAP_NUMBER_TYPE:
- case FILLER_TYPE:
- case BYTE_ARRAY_TYPE:
- case EXTERNAL_PIXEL_ARRAY_TYPE:
- case EXTERNAL_BYTE_ARRAY_TYPE:
- case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
- case EXTERNAL_SHORT_ARRAY_TYPE:
- case EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE:
- case EXTERNAL_INT_ARRAY_TYPE:
- case EXTERNAL_UNSIGNED_INT_ARRAY_TYPE:
- case EXTERNAL_FLOAT_ARRAY_TYPE:
- break;
- case SHARED_FUNCTION_INFO_TYPE:
- SharedFunctionInfo::BodyDescriptor::IterateBody(this, v);
- break;
-
-#define MAKE_STRUCT_CASE(NAME, Name, name) \
- case NAME##_TYPE:
- STRUCT_LIST(MAKE_STRUCT_CASE)
-#undef MAKE_STRUCT_CASE
- StructBodyDescriptor::IterateBody(this, object_size, v);
- break;
- default:
- PrintF("Unknown type: %d\n", type);
- UNREACHABLE();
- }
-}
-
-
-Object* HeapNumber::HeapNumberToBoolean() {
- // NaN, +0, and -0 should return the false object
-#if __BYTE_ORDER == __LITTLE_ENDIAN
- union IeeeDoubleLittleEndianArchType u;
-#elif __BYTE_ORDER == __BIG_ENDIAN
- union IeeeDoubleBigEndianArchType u;
-#endif
- u.d = value();
- if (u.bits.exp == 2047) {
- // Detect NaN for IEEE double precision floating point.
- if ((u.bits.man_low | u.bits.man_high) != 0)
- return GetHeap()->false_value();
- }
- if (u.bits.exp == 0) {
- // Detect +0, and -0 for IEEE double precision floating point.
- if ((u.bits.man_low | u.bits.man_high) == 0)
- return GetHeap()->false_value();
- }
- return GetHeap()->true_value();
-}
-
-
-void HeapNumber::HeapNumberPrint(FILE* out) {
- PrintF(out, "%.16g", Number());
-}
-
-
-void HeapNumber::HeapNumberPrint(StringStream* accumulator) {
- // The Windows version of vsnprintf can allocate when printing a %g string
- // into a buffer that may not be big enough. We don't want random memory
- // allocation when producing post-crash stack traces, so we print into a
- // buffer that is plenty big enough for any floating point number, then
- // print that using vsnprintf (which may truncate but never allocate if
- // there is no more space in the buffer).
- EmbeddedVector<char, 100> buffer;
- OS::SNPrintF(buffer, "%.16g", Number());
- accumulator->Add("%s", buffer.start());
-}
-
-
-String* JSObject::class_name() {
- if (IsJSFunction()) {
- return GetHeap()->function_class_symbol();
- }
- if (map()->constructor()->IsJSFunction()) {
- JSFunction* constructor = JSFunction::cast(map()->constructor());
- return String::cast(constructor->shared()->instance_class_name());
- }
- // If the constructor is not present, return "Object".
- return GetHeap()->Object_symbol();
-}
-
-
-String* JSObject::constructor_name() {
- if (map()->constructor()->IsJSFunction()) {
- JSFunction* constructor = JSFunction::cast(map()->constructor());
- String* name = String::cast(constructor->shared()->name());
- if (name->length() > 0) return name;
- String* inferred_name = constructor->shared()->inferred_name();
- if (inferred_name->length() > 0) return inferred_name;
- Object* proto = GetPrototype();
- if (proto->IsJSObject()) return JSObject::cast(proto)->constructor_name();
- }
- // If the constructor is not present, return "Object".
- return GetHeap()->Object_symbol();
-}
-
-
-MaybeObject* JSObject::AddFastPropertyUsingMap(Map* new_map,
- String* name,
- Object* value) {
- int index = new_map->PropertyIndexFor(name);
- if (map()->unused_property_fields() == 0) {
- ASSERT(map()->unused_property_fields() == 0);
- int new_unused = new_map->unused_property_fields();
- Object* values;
- { MaybeObject* maybe_values =
- properties()->CopySize(properties()->length() + new_unused + 1);
- if (!maybe_values->ToObject(&values)) return maybe_values;
- }
- set_properties(FixedArray::cast(values));
- }
- set_map(new_map);
- return FastPropertyAtPut(index, value);
-}
-
-
-MaybeObject* JSObject::AddFastProperty(String* name,
- Object* value,
- PropertyAttributes attributes) {
- ASSERT(!IsJSGlobalProxy());
-
- // Normalize the object if the name is an actual string (not the
- // hidden symbols) and is not a real identifier.
- Isolate* isolate = GetHeap()->isolate();
- StringInputBuffer buffer(name);
- if (!isolate->scanner_constants()->IsIdentifier(&buffer)
- && name != isolate->heap()->hidden_symbol()) {
- Object* obj;
- { MaybeObject* maybe_obj =
- NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- return AddSlowProperty(name, value, attributes);
- }
-
- DescriptorArray* old_descriptors = map()->instance_descriptors();
- // Compute the new index for new field.
- int index = map()->NextFreePropertyIndex();
-
- // Allocate new instance descriptors with (name, index) added
- FieldDescriptor new_field(name, index, attributes);
- Object* new_descriptors;
- { MaybeObject* maybe_new_descriptors =
- old_descriptors->CopyInsert(&new_field, REMOVE_TRANSITIONS);
- if (!maybe_new_descriptors->ToObject(&new_descriptors)) {
- return maybe_new_descriptors;
- }
- }
-
- // Only allow map transition if the object isn't the global object and there
- // is not a transition for the name, or there's a transition for the name but
- // it's unrelated to properties.
- int descriptor_index = old_descriptors->Search(name);
-
- // External array transitions are stored in the descriptor for property "",
- // which is not a identifier and should have forced a switch to slow
- // properties above.
- ASSERT(descriptor_index == DescriptorArray::kNotFound ||
- old_descriptors->GetType(descriptor_index) != EXTERNAL_ARRAY_TRANSITION);
- bool can_insert_transition = descriptor_index == DescriptorArray::kNotFound ||
- old_descriptors->GetType(descriptor_index) == EXTERNAL_ARRAY_TRANSITION;
- bool allow_map_transition =
- can_insert_transition &&
- (isolate->context()->global_context()->object_function()->map() != map());
-
- ASSERT(index < map()->inobject_properties() ||
- (index - map()->inobject_properties()) < properties()->length() ||
- map()->unused_property_fields() == 0);
- // Allocate a new map for the object.
- Object* r;
- { MaybeObject* maybe_r = map()->CopyDropDescriptors();
- if (!maybe_r->ToObject(&r)) return maybe_r;
- }
- Map* new_map = Map::cast(r);
- if (allow_map_transition) {
- // Allocate new instance descriptors for the old map with map transition.
- MapTransitionDescriptor d(name, Map::cast(new_map), attributes);
- Object* r;
- { MaybeObject* maybe_r = old_descriptors->CopyInsert(&d, KEEP_TRANSITIONS);
- if (!maybe_r->ToObject(&r)) return maybe_r;
- }
- old_descriptors = DescriptorArray::cast(r);
- }
-
- if (map()->unused_property_fields() == 0) {
- if (properties()->length() > MaxFastProperties()) {
- Object* obj;
- { MaybeObject* maybe_obj =
- NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- return AddSlowProperty(name, value, attributes);
- }
- // Make room for the new value
- Object* values;
- { MaybeObject* maybe_values =
- properties()->CopySize(properties()->length() + kFieldsAdded);
- if (!maybe_values->ToObject(&values)) return maybe_values;
- }
- set_properties(FixedArray::cast(values));
- new_map->set_unused_property_fields(kFieldsAdded - 1);
- } else {
- new_map->set_unused_property_fields(map()->unused_property_fields() - 1);
- }
- // We have now allocated all the necessary objects.
- // All the changes can be applied at once, so they are atomic.
- map()->set_instance_descriptors(old_descriptors);
- new_map->set_instance_descriptors(DescriptorArray::cast(new_descriptors));
- set_map(new_map);
- return FastPropertyAtPut(index, value);
-}
-
-
-MaybeObject* JSObject::AddConstantFunctionProperty(
- String* name,
- JSFunction* function,
- PropertyAttributes attributes) {
- ASSERT(!GetHeap()->InNewSpace(function));
-
- // Allocate new instance descriptors with (name, function) added
- ConstantFunctionDescriptor d(name, function, attributes);
- Object* new_descriptors;
- { MaybeObject* maybe_new_descriptors =
- map()->instance_descriptors()->CopyInsert(&d, REMOVE_TRANSITIONS);
- if (!maybe_new_descriptors->ToObject(&new_descriptors)) {
- return maybe_new_descriptors;
- }
- }
-
- // Allocate a new map for the object.
- Object* new_map;
- { MaybeObject* maybe_new_map = map()->CopyDropDescriptors();
- if (!maybe_new_map->ToObject(&new_map)) return maybe_new_map;
- }
-
- DescriptorArray* descriptors = DescriptorArray::cast(new_descriptors);
- Map::cast(new_map)->set_instance_descriptors(descriptors);
- Map* old_map = map();
- set_map(Map::cast(new_map));
-
- // If the old map is the global object map (from new Object()),
- // then transitions are not added to it, so we are done.
- Heap* heap = old_map->heap();
- if (old_map == heap->isolate()->context()->global_context()->
- object_function()->map()) {
- return function;
- }
-
- // Do not add CONSTANT_TRANSITIONS to global objects
- if (IsGlobalObject()) {
- return function;
- }
-
- // Add a CONSTANT_TRANSITION descriptor to the old map,
- // so future assignments to this property on other objects
- // of the same type will create a normal field, not a constant function.
- // Don't do this for special properties, with non-trival attributes.
- if (attributes != NONE) {
- return function;
- }
- ConstTransitionDescriptor mark(name, Map::cast(new_map));
- { MaybeObject* maybe_new_descriptors =
- old_map->instance_descriptors()->CopyInsert(&mark, KEEP_TRANSITIONS);
- if (!maybe_new_descriptors->ToObject(&new_descriptors)) {
- // We have accomplished the main goal, so return success.
- return function;
- }
- }
- old_map->set_instance_descriptors(DescriptorArray::cast(new_descriptors));
-
- return function;
-}
-
-
-// Add property in slow mode
-MaybeObject* JSObject::AddSlowProperty(String* name,
- Object* value,
- PropertyAttributes attributes) {
- ASSERT(!HasFastProperties());
- StringDictionary* dict = property_dictionary();
- Object* store_value = value;
- if (IsGlobalObject()) {
- // In case name is an orphaned property reuse the cell.
- int entry = dict->FindEntry(name);
- if (entry != StringDictionary::kNotFound) {
- store_value = dict->ValueAt(entry);
- JSGlobalPropertyCell::cast(store_value)->set_value(value);
- // Assign an enumeration index to the property and update
- // SetNextEnumerationIndex.
- int index = dict->NextEnumerationIndex();
- PropertyDetails details = PropertyDetails(attributes, NORMAL, index);
- dict->SetNextEnumerationIndex(index + 1);
- dict->SetEntry(entry, name, store_value, details);
- return value;
- }
- Heap* heap = GetHeap();
- { MaybeObject* maybe_store_value =
- heap->AllocateJSGlobalPropertyCell(value);
- if (!maybe_store_value->ToObject(&store_value)) return maybe_store_value;
- }
- JSGlobalPropertyCell::cast(store_value)->set_value(value);
- }
- PropertyDetails details = PropertyDetails(attributes, NORMAL);
- Object* result;
- { MaybeObject* maybe_result = dict->Add(name, store_value, details);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- if (dict != result) set_properties(StringDictionary::cast(result));
- return value;
-}
-
-
-MaybeObject* JSObject::AddProperty(String* name,
- Object* value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode) {
- ASSERT(!IsJSGlobalProxy());
- Map* map_of_this = map();
- Heap* heap = map_of_this->heap();
- if (!map_of_this->is_extensible()) {
- if (strict_mode == kNonStrictMode) {
- return heap->undefined_value();
- } else {
- Handle<Object> args[1] = {Handle<String>(name)};
- return heap->isolate()->Throw(
- *FACTORY->NewTypeError("object_not_extensible",
- HandleVector(args, 1)));
- }
- }
- if (HasFastProperties()) {
- // Ensure the descriptor array does not get too big.
- if (map_of_this->instance_descriptors()->number_of_descriptors() <
- DescriptorArray::kMaxNumberOfDescriptors) {
- if (value->IsJSFunction() && !heap->InNewSpace(value)) {
- return AddConstantFunctionProperty(name,
- JSFunction::cast(value),
- attributes);
- } else {
- return AddFastProperty(name, value, attributes);
- }
- } else {
- // Normalize the object to prevent very large instance descriptors.
- // This eliminates unwanted N^2 allocation and lookup behavior.
- Object* obj;
- { MaybeObject* maybe_obj =
- NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- }
- }
- return AddSlowProperty(name, value, attributes);
-}
-
-
-MaybeObject* JSObject::SetPropertyPostInterceptor(
- String* name,
- Object* value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode) {
- // Check local property, ignore interceptor.
- LookupResult result;
- LocalLookupRealNamedProperty(name, &result);
- if (result.IsFound()) {
- // An existing property, a map transition or a null descriptor was
- // found. Use set property to handle all these cases.
- return SetProperty(&result, name, value, attributes, strict_mode);
- }
- // Add a new real property.
- return AddProperty(name, value, attributes, strict_mode);
-}
-
-
-MaybeObject* JSObject::ReplaceSlowProperty(String* name,
- Object* value,
- PropertyAttributes attributes) {
- StringDictionary* dictionary = property_dictionary();
- int old_index = dictionary->FindEntry(name);
- int new_enumeration_index = 0; // 0 means "Use the next available index."
- if (old_index != -1) {
- // All calls to ReplaceSlowProperty have had all transitions removed.
- ASSERT(!dictionary->DetailsAt(old_index).IsTransition());
- new_enumeration_index = dictionary->DetailsAt(old_index).index();
- }
-
- PropertyDetails new_details(attributes, NORMAL, new_enumeration_index);
- return SetNormalizedProperty(name, value, new_details);
-}
-
-
-MaybeObject* JSObject::ConvertDescriptorToFieldAndMapTransition(
- String* name,
- Object* new_value,
- PropertyAttributes attributes) {
- Map* old_map = map();
- Object* result;
- { MaybeObject* maybe_result =
- ConvertDescriptorToField(name, new_value, attributes);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- // If we get to this point we have succeeded - do not return failure
- // after this point. Later stuff is optional.
- if (!HasFastProperties()) {
- return result;
- }
- // Do not add transitions to the map of "new Object()".
- if (map() == old_map->heap()->isolate()->context()->global_context()->
- object_function()->map()) {
- return result;
- }
-
- MapTransitionDescriptor transition(name,
- map(),
- attributes);
- Object* new_descriptors;
- { MaybeObject* maybe_new_descriptors = old_map->instance_descriptors()->
- CopyInsert(&transition, KEEP_TRANSITIONS);
- if (!maybe_new_descriptors->ToObject(&new_descriptors)) {
- return result; // Yes, return _result_.
- }
- }
- old_map->set_instance_descriptors(DescriptorArray::cast(new_descriptors));
- return result;
-}
-
-
-MaybeObject* JSObject::ConvertDescriptorToField(String* name,
- Object* new_value,
- PropertyAttributes attributes) {
- if (map()->unused_property_fields() == 0 &&
- properties()->length() > MaxFastProperties()) {
- Object* obj;
- { MaybeObject* maybe_obj =
- NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- return ReplaceSlowProperty(name, new_value, attributes);
- }
-
- int index = map()->NextFreePropertyIndex();
- FieldDescriptor new_field(name, index, attributes);
- // Make a new DescriptorArray replacing an entry with FieldDescriptor.
- Object* descriptors_unchecked;
- { MaybeObject* maybe_descriptors_unchecked = map()->instance_descriptors()->
- CopyInsert(&new_field, REMOVE_TRANSITIONS);
- if (!maybe_descriptors_unchecked->ToObject(&descriptors_unchecked)) {
- return maybe_descriptors_unchecked;
- }
- }
- DescriptorArray* new_descriptors =
- DescriptorArray::cast(descriptors_unchecked);
-
- // Make a new map for the object.
- Object* new_map_unchecked;
- { MaybeObject* maybe_new_map_unchecked = map()->CopyDropDescriptors();
- if (!maybe_new_map_unchecked->ToObject(&new_map_unchecked)) {
- return maybe_new_map_unchecked;
- }
- }
- Map* new_map = Map::cast(new_map_unchecked);
- new_map->set_instance_descriptors(new_descriptors);
-
- // Make new properties array if necessary.
- FixedArray* new_properties = 0; // Will always be NULL or a valid pointer.
- int new_unused_property_fields = map()->unused_property_fields() - 1;
- if (map()->unused_property_fields() == 0) {
- new_unused_property_fields = kFieldsAdded - 1;
- Object* new_properties_object;
- { MaybeObject* maybe_new_properties_object =
- properties()->CopySize(properties()->length() + kFieldsAdded);
- if (!maybe_new_properties_object->ToObject(&new_properties_object)) {
- return maybe_new_properties_object;
- }
- }
- new_properties = FixedArray::cast(new_properties_object);
- }
-
- // Update pointers to commit changes.
- // Object points to the new map.
- new_map->set_unused_property_fields(new_unused_property_fields);
- set_map(new_map);
- if (new_properties) {
- set_properties(FixedArray::cast(new_properties));
- }
- return FastPropertyAtPut(index, new_value);
-}
-
-
-
-MaybeObject* JSObject::SetPropertyWithInterceptor(
- String* name,
- Object* value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode) {
- Isolate* isolate = GetIsolate();
- HandleScope scope(isolate);
- Handle<JSObject> this_handle(this);
- Handle<String> name_handle(name);
- Handle<Object> value_handle(value, isolate);
- Handle<InterceptorInfo> interceptor(GetNamedInterceptor());
- if (!interceptor->setter()->IsUndefined()) {
- LOG(isolate, ApiNamedPropertyAccess("interceptor-named-set", this, name));
- CustomArguments args(isolate, interceptor->data(), this, this);
- v8::AccessorInfo info(args.end());
- v8::NamedPropertySetter setter =
- v8::ToCData<v8::NamedPropertySetter>(interceptor->setter());
- v8::Handle<v8::Value> result;
- {
- // Leaving JavaScript.
- VMState state(isolate, EXTERNAL);
- Handle<Object> value_unhole(value->IsTheHole() ?
- isolate->heap()->undefined_value() :
- value,
- isolate);
- result = setter(v8::Utils::ToLocal(name_handle),
- v8::Utils::ToLocal(value_unhole),
- info);
- }
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- if (!result.IsEmpty()) return *value_handle;
- }
- MaybeObject* raw_result =
- this_handle->SetPropertyPostInterceptor(*name_handle,
- *value_handle,
- attributes,
- strict_mode);
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- return raw_result;
-}
-
-
-MaybeObject* JSObject::SetProperty(String* name,
- Object* value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode) {
- LookupResult result;
- LocalLookup(name, &result);
- return SetProperty(&result, name, value, attributes, strict_mode);
-}
-
-
-MaybeObject* JSObject::SetPropertyWithCallback(Object* structure,
- String* name,
- Object* value,
- JSObject* holder) {
- Isolate* isolate = GetIsolate();
- HandleScope scope(isolate);
-
- // We should never get here to initialize a const with the hole
- // value since a const declaration would conflict with the setter.
- ASSERT(!value->IsTheHole());
- Handle<Object> value_handle(value, isolate);
-
- // To accommodate both the old and the new api we switch on the
- // data structure used to store the callbacks. Eventually proxy
- // callbacks should be phased out.
- if (structure->IsProxy()) {
- AccessorDescriptor* callback =
- reinterpret_cast<AccessorDescriptor*>(Proxy::cast(structure)->proxy());
- MaybeObject* obj = (callback->setter)(this, value, callback->data);
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- if (obj->IsFailure()) return obj;
- return *value_handle;
- }
-
- if (structure->IsAccessorInfo()) {
- // api style callbacks
- AccessorInfo* data = AccessorInfo::cast(structure);
- Object* call_obj = data->setter();
- v8::AccessorSetter call_fun = v8::ToCData<v8::AccessorSetter>(call_obj);
- if (call_fun == NULL) return value;
- Handle<String> key(name);
- LOG(isolate, ApiNamedPropertyAccess("store", this, name));
- CustomArguments args(isolate, data->data(), this, JSObject::cast(holder));
- v8::AccessorInfo info(args.end());
- {
- // Leaving JavaScript.
- VMState state(isolate, EXTERNAL);
- call_fun(v8::Utils::ToLocal(key),
- v8::Utils::ToLocal(value_handle),
- info);
- }
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- return *value_handle;
- }
-
- if (structure->IsFixedArray()) {
- Object* setter = FixedArray::cast(structure)->get(kSetterIndex);
- if (setter->IsJSFunction()) {
- return SetPropertyWithDefinedSetter(JSFunction::cast(setter), value);
- } else {
- Handle<String> key(name);
- Handle<Object> holder_handle(holder, isolate);
- Handle<Object> args[2] = { key, holder_handle };
- return isolate->Throw(
- *isolate->factory()->NewTypeError("no_setter_in_callback",
- HandleVector(args, 2)));
- }
- }
-
- UNREACHABLE();
- return NULL;
-}
-
-
-MaybeObject* JSObject::SetPropertyWithDefinedSetter(JSFunction* setter,
- Object* value) {
- Isolate* isolate = GetIsolate();
- Handle<Object> value_handle(value, isolate);
- Handle<JSFunction> fun(JSFunction::cast(setter), isolate);
- Handle<JSObject> self(this, isolate);
-#ifdef ENABLE_DEBUGGER_SUPPORT
- Debug* debug = isolate->debug();
- // Handle stepping into a setter if step into is active.
- if (debug->StepInActive()) {
- debug->HandleStepIn(fun, Handle<Object>::null(), 0, false);
- }
-#endif
- bool has_pending_exception;
- Object** argv[] = { value_handle.location() };
- Execution::Call(fun, self, 1, argv, &has_pending_exception);
- // Check for pending exception and return the result.
- if (has_pending_exception) return Failure::Exception();
- return *value_handle;
-}
-
-
-void JSObject::LookupCallbackSetterInPrototypes(String* name,
- LookupResult* result) {
- Heap* heap = GetHeap();
- for (Object* pt = GetPrototype();
- pt != heap->null_value();
- pt = pt->GetPrototype()) {
- JSObject::cast(pt)->LocalLookupRealNamedProperty(name, result);
- if (result->IsProperty()) {
- if (result->IsReadOnly()) {
- result->NotFound();
- return;
- }
- if (result->type() == CALLBACKS) {
- return;
- }
- }
- }
- result->NotFound();
-}
-
-
-MaybeObject* JSObject::SetElementWithCallbackSetterInPrototypes(uint32_t index,
- Object* value,
- bool* found) {
- Heap* heap = GetHeap();
- for (Object* pt = GetPrototype();
- pt != heap->null_value();
- pt = pt->GetPrototype()) {
- if (!JSObject::cast(pt)->HasDictionaryElements()) {
- continue;
- }
- NumberDictionary* dictionary = JSObject::cast(pt)->element_dictionary();
- int entry = dictionary->FindEntry(index);
- if (entry != NumberDictionary::kNotFound) {
- PropertyDetails details = dictionary->DetailsAt(entry);
- if (details.type() == CALLBACKS) {
- *found = true;
- return SetElementWithCallback(
- dictionary->ValueAt(entry), index, value, JSObject::cast(pt));
- }
- }
- }
- *found = false;
- return heap->the_hole_value();
-}
-
-
-void JSObject::LookupInDescriptor(String* name, LookupResult* result) {
- DescriptorArray* descriptors = map()->instance_descriptors();
- int number = descriptors->SearchWithCache(name);
- if (number != DescriptorArray::kNotFound) {
- result->DescriptorResult(this, descriptors->GetDetails(number), number);
- } else {
- result->NotFound();
- }
-}
-
-
-void Map::LookupInDescriptors(JSObject* holder,
- String* name,
- LookupResult* result) {
- DescriptorArray* descriptors = instance_descriptors();
- DescriptorLookupCache* cache = heap()->isolate()->descriptor_lookup_cache();
- int number = cache->Lookup(descriptors, name);
- if (number == DescriptorLookupCache::kAbsent) {
- number = descriptors->Search(name);
- cache->Update(descriptors, name, number);
- }
- if (number != DescriptorArray::kNotFound) {
- result->DescriptorResult(holder, descriptors->GetDetails(number), number);
- } else {
- result->NotFound();
- }
-}
-
-
-MaybeObject* Map::GetExternalArrayElementsMap(ExternalArrayType array_type,
- bool safe_to_add_transition) {
- Heap* current_heap = heap();
- DescriptorArray* descriptors = instance_descriptors();
- String* external_array_sentinel_name = current_heap->empty_symbol();
-
- if (safe_to_add_transition) {
- // It's only safe to manipulate the descriptor array if it would be
- // safe to add a transition.
-
- ASSERT(!is_shared()); // no transitions can be added to shared maps.
- // Check if the external array transition already exists.
- DescriptorLookupCache* cache =
- current_heap->isolate()->descriptor_lookup_cache();
- int index = cache->Lookup(descriptors, external_array_sentinel_name);
- if (index == DescriptorLookupCache::kAbsent) {
- index = descriptors->Search(external_array_sentinel_name);
- cache->Update(descriptors,
- external_array_sentinel_name,
- index);
- }
-
- // If the transition already exists, check the type. If there is a match,
- // return it.
- if (index != DescriptorArray::kNotFound) {
- PropertyDetails details(PropertyDetails(descriptors->GetDetails(index)));
- if (details.type() == EXTERNAL_ARRAY_TRANSITION &&
- details.array_type() == array_type) {
- return descriptors->GetValue(index);
- } else {
- safe_to_add_transition = false;
- }
- }
- }
-
- // No transition to an existing external array map. Make a new one.
- Object* obj;
- { MaybeObject* maybe_map = CopyDropTransitions();
- if (!maybe_map->ToObject(&obj)) return maybe_map;
- }
- Map* new_map = Map::cast(obj);
-
- new_map->set_has_fast_elements(false);
- new_map->set_has_external_array_elements(true);
- GetIsolate()->counters()->map_to_external_array_elements()->Increment();
-
- // Only remember the map transition if the object's map is NOT equal to the
- // global object_function's map and there is not an already existing
- // non-matching external array transition.
- bool allow_map_transition =
- safe_to_add_transition &&
- (GetIsolate()->context()->global_context()->object_function()->map() !=
- map());
- if (allow_map_transition) {
- // Allocate new instance descriptors for the old map with map transition.
- ExternalArrayTransitionDescriptor desc(external_array_sentinel_name,
- Map::cast(new_map),
- array_type);
- Object* new_descriptors;
- MaybeObject* maybe_new_descriptors = descriptors->CopyInsert(
- &desc,
- KEEP_TRANSITIONS);
- if (!maybe_new_descriptors->ToObject(&new_descriptors)) {
- return maybe_new_descriptors;
- }
- descriptors = DescriptorArray::cast(new_descriptors);
- set_instance_descriptors(descriptors);
- }
-
- return new_map;
-}
-
-
-void JSObject::LocalLookupRealNamedProperty(String* name,
- LookupResult* result) {
- if (IsJSGlobalProxy()) {
- Object* proto = GetPrototype();
- if (proto->IsNull()) return result->NotFound();
- ASSERT(proto->IsJSGlobalObject());
- return JSObject::cast(proto)->LocalLookupRealNamedProperty(name, result);
- }
-
- if (HasFastProperties()) {
- LookupInDescriptor(name, result);
- if (result->IsFound()) {
- // A property, a map transition or a null descriptor was found.
- // We return all of these result types because
- // LocalLookupRealNamedProperty is used when setting properties
- // where map transitions and null descriptors are handled.
- ASSERT(result->holder() == this && result->type() != NORMAL);
- // Disallow caching for uninitialized constants. These can only
- // occur as fields.
- if (result->IsReadOnly() && result->type() == FIELD &&
- FastPropertyAt(result->GetFieldIndex())->IsTheHole()) {
- result->DisallowCaching();
- }
- return;
- }
- } else {
- int entry = property_dictionary()->FindEntry(name);
- if (entry != StringDictionary::kNotFound) {
- Object* value = property_dictionary()->ValueAt(entry);
- if (IsGlobalObject()) {
- PropertyDetails d = property_dictionary()->DetailsAt(entry);
- if (d.IsDeleted()) {
- result->NotFound();
- return;
- }
- value = JSGlobalPropertyCell::cast(value)->value();
- }
- // Make sure to disallow caching for uninitialized constants
- // found in the dictionary-mode objects.
- if (value->IsTheHole()) result->DisallowCaching();
- result->DictionaryResult(this, entry);
- return;
- }
- }
- result->NotFound();
-}
-
-
-void JSObject::LookupRealNamedProperty(String* name, LookupResult* result) {
- LocalLookupRealNamedProperty(name, result);
- if (result->IsProperty()) return;
-
- LookupRealNamedPropertyInPrototypes(name, result);
-}
-
-
-void JSObject::LookupRealNamedPropertyInPrototypes(String* name,
- LookupResult* result) {
- Heap* heap = GetHeap();
- for (Object* pt = GetPrototype();
- pt != heap->null_value();
- pt = JSObject::cast(pt)->GetPrototype()) {
- JSObject::cast(pt)->LocalLookupRealNamedProperty(name, result);
- if (result->IsProperty() && (result->type() != INTERCEPTOR)) return;
- }
- result->NotFound();
-}
-
-
-// We only need to deal with CALLBACKS and INTERCEPTORS
-MaybeObject* JSObject::SetPropertyWithFailedAccessCheck(LookupResult* result,
- String* name,
- Object* value,
- bool check_prototype) {
- if (check_prototype && !result->IsProperty()) {
- LookupCallbackSetterInPrototypes(name, result);
- }
-
- if (result->IsProperty()) {
- if (!result->IsReadOnly()) {
- switch (result->type()) {
- case CALLBACKS: {
- Object* obj = result->GetCallbackObject();
- if (obj->IsAccessorInfo()) {
- AccessorInfo* info = AccessorInfo::cast(obj);
- if (info->all_can_write()) {
- return SetPropertyWithCallback(result->GetCallbackObject(),
- name,
- value,
- result->holder());
- }
- }
- break;
- }
- case INTERCEPTOR: {
- // Try lookup real named properties. Note that only property can be
- // set is callbacks marked as ALL_CAN_WRITE on the prototype chain.
- LookupResult r;
- LookupRealNamedProperty(name, &r);
- if (r.IsProperty()) {
- return SetPropertyWithFailedAccessCheck(&r, name, value,
- check_prototype);
- }
- break;
- }
- default: {
- break;
- }
- }
- }
- }
-
- HandleScope scope;
- Handle<Object> value_handle(value);
- Heap* heap = GetHeap();
- heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_SET);
- return *value_handle;
-}
-
-
-MaybeObject* JSObject::SetProperty(LookupResult* result,
- String* name,
- Object* value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode) {
- Heap* heap = GetHeap();
- // Make sure that the top context does not change when doing callbacks or
- // interceptor calls.
- AssertNoContextChange ncc;
-
- // Optimization for 2-byte strings often used as keys in a decompression
- // dictionary. We make these short keys into symbols to avoid constantly
- // reallocating them.
- if (!name->IsSymbol() && name->length() <= 2) {
- Object* symbol_version;
- { MaybeObject* maybe_symbol_version = heap->LookupSymbol(name);
- if (maybe_symbol_version->ToObject(&symbol_version)) {
- name = String::cast(symbol_version);
- }
- }
- }
-
- // Check access rights if needed.
- if (IsAccessCheckNeeded()
- && !heap->isolate()->MayNamedAccess(this, name, v8::ACCESS_SET)) {
- return SetPropertyWithFailedAccessCheck(result, name, value, true);
- }
-
- if (IsJSGlobalProxy()) {
- Object* proto = GetPrototype();
- if (proto->IsNull()) return value;
- ASSERT(proto->IsJSGlobalObject());
- return JSObject::cast(proto)->SetProperty(
- result, name, value, attributes, strict_mode);
- }
-
- if (!result->IsProperty() && !IsJSContextExtensionObject()) {
- // We could not find a local property so let's check whether there is an
- // accessor that wants to handle the property.
- LookupResult accessor_result;
- LookupCallbackSetterInPrototypes(name, &accessor_result);
- if (accessor_result.IsProperty()) {
- return SetPropertyWithCallback(accessor_result.GetCallbackObject(),
- name,
- value,
- accessor_result.holder());
- }
- }
- if (!result->IsFound()) {
- // Neither properties nor transitions found.
- return AddProperty(name, value, attributes, strict_mode);
- }
- if (result->IsReadOnly() && result->IsProperty()) {
- if (strict_mode == kStrictMode) {
- HandleScope scope;
- Handle<String> key(name);
- Handle<Object> holder(this);
- Handle<Object> args[2] = { key, holder };
- return heap->isolate()->Throw(*heap->isolate()->factory()->NewTypeError(
- "strict_read_only_property", HandleVector(args, 2)));
- } else {
- return value;
- }
- }
- // This is a real property that is not read-only, or it is a
- // transition or null descriptor and there are no setters in the prototypes.
- switch (result->type()) {
- case NORMAL:
- return SetNormalizedProperty(result, value);
- case FIELD:
- return FastPropertyAtPut(result->GetFieldIndex(), value);
- case MAP_TRANSITION:
- if (attributes == result->GetAttributes()) {
- // Only use map transition if the attributes match.
- return AddFastPropertyUsingMap(result->GetTransitionMap(),
- name,
- value);
- }
- return ConvertDescriptorToField(name, value, attributes);
- case CONSTANT_FUNCTION:
- // Only replace the function if necessary.
- if (value == result->GetConstantFunction()) return value;
- // Preserve the attributes of this existing property.
- attributes = result->GetAttributes();
- return ConvertDescriptorToField(name, value, attributes);
- case CALLBACKS:
- return SetPropertyWithCallback(result->GetCallbackObject(),
- name,
- value,
- result->holder());
- case INTERCEPTOR:
- return SetPropertyWithInterceptor(name, value, attributes, strict_mode);
- case CONSTANT_TRANSITION: {
- // If the same constant function is being added we can simply
- // transition to the target map.
- Map* target_map = result->GetTransitionMap();
- DescriptorArray* target_descriptors = target_map->instance_descriptors();
- int number = target_descriptors->SearchWithCache(name);
- ASSERT(number != DescriptorArray::kNotFound);
- ASSERT(target_descriptors->GetType(number) == CONSTANT_FUNCTION);
- JSFunction* function =
- JSFunction::cast(target_descriptors->GetValue(number));
- ASSERT(!HEAP->InNewSpace(function));
- if (value == function) {
- set_map(target_map);
- return value;
- }
- // Otherwise, replace with a MAP_TRANSITION to a new map with a
- // FIELD, even if the value is a constant function.
- return ConvertDescriptorToFieldAndMapTransition(name, value, attributes);
- }
- case NULL_DESCRIPTOR:
- case EXTERNAL_ARRAY_TRANSITION:
- return ConvertDescriptorToFieldAndMapTransition(name, value, attributes);
- default:
- UNREACHABLE();
- }
- UNREACHABLE();
- return value;
-}
-
-
-// Set a real local property, even if it is READ_ONLY. If the property is not
-// present, add it with attributes NONE. This code is an exact clone of
-// SetProperty, with the check for IsReadOnly and the check for a
-// callback setter removed. The two lines looking up the LookupResult
-// result are also added. If one of the functions is changed, the other
-// should be.
-MaybeObject* JSObject::SetLocalPropertyIgnoreAttributes(
- String* name,
- Object* value,
- PropertyAttributes attributes) {
-
- // Make sure that the top context does not change when doing callbacks or
- // interceptor calls.
- AssertNoContextChange ncc;
- LookupResult result;
- LocalLookup(name, &result);
- // Check access rights if needed.
- if (IsAccessCheckNeeded()) {
- Heap* heap = GetHeap();
- if (!heap->isolate()->MayNamedAccess(this, name, v8::ACCESS_SET)) {
- return SetPropertyWithFailedAccessCheck(&result, name, value, false);
- }
- }
-
- if (IsJSGlobalProxy()) {
- Object* proto = GetPrototype();
- if (proto->IsNull()) return value;
- ASSERT(proto->IsJSGlobalObject());
- return JSObject::cast(proto)->SetLocalPropertyIgnoreAttributes(
- name,
- value,
- attributes);
- }
-
- // Check for accessor in prototype chain removed here in clone.
- if (!result.IsFound()) {
- // Neither properties nor transitions found.
- return AddProperty(name, value, attributes, kNonStrictMode);
- }
-
- PropertyDetails details = PropertyDetails(attributes, NORMAL);
-
- // Check of IsReadOnly removed from here in clone.
- switch (result.type()) {
- case NORMAL:
- return SetNormalizedProperty(name, value, details);
- case FIELD:
- return FastPropertyAtPut(result.GetFieldIndex(), value);
- case MAP_TRANSITION:
- if (attributes == result.GetAttributes()) {
- // Only use map transition if the attributes match.
- return AddFastPropertyUsingMap(result.GetTransitionMap(),
- name,
- value);
- }
- return ConvertDescriptorToField(name, value, attributes);
- case CONSTANT_FUNCTION:
- // Only replace the function if necessary.
- if (value == result.GetConstantFunction()) return value;
- // Preserve the attributes of this existing property.
- attributes = result.GetAttributes();
- return ConvertDescriptorToField(name, value, attributes);
- case CALLBACKS:
- case INTERCEPTOR:
- // Override callback in clone
- return ConvertDescriptorToField(name, value, attributes);
- case CONSTANT_TRANSITION:
- // Replace with a MAP_TRANSITION to a new map with a FIELD, even
- // if the value is a function.
- return ConvertDescriptorToFieldAndMapTransition(name, value, attributes);
- case NULL_DESCRIPTOR:
- case EXTERNAL_ARRAY_TRANSITION:
- return ConvertDescriptorToFieldAndMapTransition(name, value, attributes);
- default:
- UNREACHABLE();
- }
- UNREACHABLE();
- return value;
-}
-
-
-PropertyAttributes JSObject::GetPropertyAttributePostInterceptor(
- JSObject* receiver,
- String* name,
- bool continue_search) {
- // Check local property, ignore interceptor.
- LookupResult result;
- LocalLookupRealNamedProperty(name, &result);
- if (result.IsProperty()) return result.GetAttributes();
-
- if (continue_search) {
- // Continue searching via the prototype chain.
- Object* pt = GetPrototype();
- if (!pt->IsNull()) {
- return JSObject::cast(pt)->
- GetPropertyAttributeWithReceiver(receiver, name);
- }
- }
- return ABSENT;
-}
-
-
-PropertyAttributes JSObject::GetPropertyAttributeWithInterceptor(
- JSObject* receiver,
- String* name,
- bool continue_search) {
- Isolate* isolate = GetIsolate();
-
- // Make sure that the top context does not change when doing
- // callbacks or interceptor calls.
- AssertNoContextChange ncc;
-
- HandleScope scope(isolate);
- Handle<InterceptorInfo> interceptor(GetNamedInterceptor());
- Handle<JSObject> receiver_handle(receiver);
- Handle<JSObject> holder_handle(this);
- Handle<String> name_handle(name);
- CustomArguments args(isolate, interceptor->data(), receiver, this);
- v8::AccessorInfo info(args.end());
- if (!interceptor->query()->IsUndefined()) {
- v8::NamedPropertyQuery query =
- v8::ToCData<v8::NamedPropertyQuery>(interceptor->query());
- LOG(isolate,
- ApiNamedPropertyAccess("interceptor-named-has", *holder_handle, name));
- v8::Handle<v8::Integer> result;
- {
- // Leaving JavaScript.
- VMState state(isolate, EXTERNAL);
- result = query(v8::Utils::ToLocal(name_handle), info);
- }
- if (!result.IsEmpty()) {
- ASSERT(result->IsInt32());
- return static_cast<PropertyAttributes>(result->Int32Value());
- }
- } else if (!interceptor->getter()->IsUndefined()) {
- v8::NamedPropertyGetter getter =
- v8::ToCData<v8::NamedPropertyGetter>(interceptor->getter());
- LOG(isolate,
- ApiNamedPropertyAccess("interceptor-named-get-has", this, name));
- v8::Handle<v8::Value> result;
- {
- // Leaving JavaScript.
- VMState state(isolate, EXTERNAL);
- result = getter(v8::Utils::ToLocal(name_handle), info);
- }
- if (!result.IsEmpty()) return DONT_ENUM;
- }
- return holder_handle->GetPropertyAttributePostInterceptor(*receiver_handle,
- *name_handle,
- continue_search);
-}
-
-
-PropertyAttributes JSObject::GetPropertyAttributeWithReceiver(
- JSObject* receiver,
- String* key) {
- uint32_t index = 0;
- if (key->AsArrayIndex(&index)) {
- if (HasElementWithReceiver(receiver, index)) return NONE;
- return ABSENT;
- }
- // Named property.
- LookupResult result;
- Lookup(key, &result);
- return GetPropertyAttribute(receiver, &result, key, true);
-}
-
-
-PropertyAttributes JSObject::GetPropertyAttribute(JSObject* receiver,
- LookupResult* result,
- String* name,
- bool continue_search) {
- // Check access rights if needed.
- if (IsAccessCheckNeeded()) {
- Heap* heap = GetHeap();
- if (!heap->isolate()->MayNamedAccess(this, name, v8::ACCESS_HAS)) {
- return GetPropertyAttributeWithFailedAccessCheck(receiver,
- result,
- name,
- continue_search);
- }
- }
- if (result->IsProperty()) {
- switch (result->type()) {
- case NORMAL: // fall through
- case FIELD:
- case CONSTANT_FUNCTION:
- case CALLBACKS:
- return result->GetAttributes();
- case INTERCEPTOR:
- return result->holder()->
- GetPropertyAttributeWithInterceptor(receiver, name, continue_search);
- default:
- UNREACHABLE();
- }
- }
- return ABSENT;
-}
-
-
-PropertyAttributes JSObject::GetLocalPropertyAttribute(String* name) {
- // Check whether the name is an array index.
- uint32_t index = 0;
- if (name->AsArrayIndex(&index)) {
- if (HasLocalElement(index)) return NONE;
- return ABSENT;
- }
- // Named property.
- LookupResult result;
- LocalLookup(name, &result);
- return GetPropertyAttribute(this, &result, name, false);
-}
-
-
-MaybeObject* NormalizedMapCache::Get(JSObject* obj,
- PropertyNormalizationMode mode) {
- Isolate* isolate = obj->GetIsolate();
- Map* fast = obj->map();
- int index = Hash(fast) % kEntries;
- Object* result = get(index);
- if (result->IsMap() && CheckHit(Map::cast(result), fast, mode)) {
-#ifdef DEBUG
- if (FLAG_enable_slow_asserts) {
- // The cached map should match newly created normalized map bit-by-bit.
- Object* fresh;
- { MaybeObject* maybe_fresh =
- fast->CopyNormalized(mode, SHARED_NORMALIZED_MAP);
- if (maybe_fresh->ToObject(&fresh)) {
- ASSERT(memcmp(Map::cast(fresh)->address(),
- Map::cast(result)->address(),
- Map::kSize) == 0);
- }
- }
- }
-#endif
- return result;
- }
-
- { MaybeObject* maybe_result =
- fast->CopyNormalized(mode, SHARED_NORMALIZED_MAP);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- set(index, result);
- isolate->counters()->normalized_maps()->Increment();
-
- return result;
-}
-
-
-void NormalizedMapCache::Clear() {
- int entries = length();
- for (int i = 0; i != entries; i++) {
- set_undefined(i);
- }
-}
-
-
-int NormalizedMapCache::Hash(Map* fast) {
- // For performance reasons we only hash the 3 most variable fields of a map:
- // constructor, prototype and bit_field2.
-
- // Shift away the tag.
- int hash = (static_cast<uint32_t>(
- reinterpret_cast<uintptr_t>(fast->constructor())) >> 2);
-
- // XOR-ing the prototype and constructor directly yields too many zero bits
- // when the two pointers are close (which is fairly common).
- // To avoid this we shift the prototype 4 bits relatively to the constructor.
- hash ^= (static_cast<uint32_t>(
- reinterpret_cast<uintptr_t>(fast->prototype())) << 2);
-
- return hash ^ (hash >> 16) ^ fast->bit_field2();
-}
-
-
-bool NormalizedMapCache::CheckHit(Map* slow,
- Map* fast,
- PropertyNormalizationMode mode) {
-#ifdef DEBUG
- slow->SharedMapVerify();
-#endif
- return
- slow->constructor() == fast->constructor() &&
- slow->prototype() == fast->prototype() &&
- slow->inobject_properties() == ((mode == CLEAR_INOBJECT_PROPERTIES) ?
- 0 :
- fast->inobject_properties()) &&
- slow->instance_type() == fast->instance_type() &&
- slow->bit_field() == fast->bit_field() &&
- (slow->bit_field2() & ~(1<<Map::kIsShared)) == fast->bit_field2();
-}
-
-
-MaybeObject* JSObject::UpdateMapCodeCache(String* name, Code* code) {
- if (map()->is_shared()) {
- // Fast case maps are never marked as shared.
- ASSERT(!HasFastProperties());
- // Replace the map with an identical copy that can be safely modified.
- Object* obj;
- { MaybeObject* maybe_obj = map()->CopyNormalized(KEEP_INOBJECT_PROPERTIES,
- UNIQUE_NORMALIZED_MAP);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- GetIsolate()->counters()->normalized_maps()->Increment();
-
- set_map(Map::cast(obj));
- }
- return map()->UpdateCodeCache(name, code);
-}
-
-
-MaybeObject* JSObject::NormalizeProperties(PropertyNormalizationMode mode,
- int expected_additional_properties) {
- if (!HasFastProperties()) return this;
-
- // The global object is always normalized.
- ASSERT(!IsGlobalObject());
- // JSGlobalProxy must never be normalized
- ASSERT(!IsJSGlobalProxy());
-
- Map* map_of_this = map();
-
- // Allocate new content.
- int property_count = map_of_this->NumberOfDescribedProperties();
- if (expected_additional_properties > 0) {
- property_count += expected_additional_properties;
- } else {
- property_count += 2; // Make space for two more properties.
- }
- Object* obj;
- { MaybeObject* maybe_obj =
- StringDictionary::Allocate(property_count);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- StringDictionary* dictionary = StringDictionary::cast(obj);
-
- DescriptorArray* descs = map_of_this->instance_descriptors();
- for (int i = 0; i < descs->number_of_descriptors(); i++) {
- PropertyDetails details = descs->GetDetails(i);
- switch (details.type()) {
- case CONSTANT_FUNCTION: {
- PropertyDetails d =
- PropertyDetails(details.attributes(), NORMAL, details.index());
- Object* value = descs->GetConstantFunction(i);
- Object* result;
- { MaybeObject* maybe_result =
- dictionary->Add(descs->GetKey(i), value, d);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- dictionary = StringDictionary::cast(result);
- break;
- }
- case FIELD: {
- PropertyDetails d =
- PropertyDetails(details.attributes(), NORMAL, details.index());
- Object* value = FastPropertyAt(descs->GetFieldIndex(i));
- Object* result;
- { MaybeObject* maybe_result =
- dictionary->Add(descs->GetKey(i), value, d);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- dictionary = StringDictionary::cast(result);
- break;
- }
- case CALLBACKS: {
- PropertyDetails d =
- PropertyDetails(details.attributes(), CALLBACKS, details.index());
- Object* value = descs->GetCallbacksObject(i);
- Object* result;
- { MaybeObject* maybe_result =
- dictionary->Add(descs->GetKey(i), value, d);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- dictionary = StringDictionary::cast(result);
- break;
- }
- case MAP_TRANSITION:
- case CONSTANT_TRANSITION:
- case NULL_DESCRIPTOR:
- case INTERCEPTOR:
- break;
- default:
- UNREACHABLE();
- }
- }
-
- Heap* current_heap = map_of_this->heap();
-
- // Copy the next enumeration index from instance descriptor.
- int index = map_of_this->instance_descriptors()->NextEnumerationIndex();
- dictionary->SetNextEnumerationIndex(index);
-
- { MaybeObject* maybe_obj =
- current_heap->isolate()->context()->global_context()->
- normalized_map_cache()->Get(this, mode);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- Map* new_map = Map::cast(obj);
-
- // We have now successfully allocated all the necessary objects.
- // Changes can now be made with the guarantee that all of them take effect.
-
- // Resize the object in the heap if necessary.
- int new_instance_size = new_map->instance_size();
- int instance_size_delta = map_of_this->instance_size() - new_instance_size;
- ASSERT(instance_size_delta >= 0);
- current_heap->CreateFillerObjectAt(this->address() + new_instance_size,
- instance_size_delta);
-
- set_map(new_map);
- new_map->set_instance_descriptors(current_heap->empty_descriptor_array());
-
- set_properties(dictionary);
-
- current_heap->isolate()->counters()->props_to_dictionary()->Increment();
-
-#ifdef DEBUG
- if (FLAG_trace_normalization) {
- PrintF("Object properties have been normalized:\n");
- Print();
- }
-#endif
- return this;
-}
-
-
-MaybeObject* JSObject::TransformToFastProperties(int unused_property_fields) {
- if (HasFastProperties()) return this;
- ASSERT(!IsGlobalObject());
- return property_dictionary()->
- TransformPropertiesToFastFor(this, unused_property_fields);
-}
-
-
-MaybeObject* JSObject::NormalizeElements() {
- ASSERT(!HasExternalArrayElements());
- if (HasDictionaryElements()) return this;
- Map* old_map = map();
- ASSERT(old_map->has_fast_elements());
-
- Object* obj;
- { MaybeObject* maybe_obj = old_map->GetSlowElementsMap();
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- Map* new_map = Map::cast(obj);
-
- // Get number of entries.
- FixedArray* array = FixedArray::cast(elements());
-
- // Compute the effective length.
- int length = IsJSArray() ?
- Smi::cast(JSArray::cast(this)->length())->value() :
- array->length();
- { MaybeObject* maybe_obj = NumberDictionary::Allocate(length);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- NumberDictionary* dictionary = NumberDictionary::cast(obj);
- // Copy entries.
- for (int i = 0; i < length; i++) {
- Object* value = array->get(i);
- if (!value->IsTheHole()) {
- PropertyDetails details = PropertyDetails(NONE, NORMAL);
- Object* result;
- { MaybeObject* maybe_result =
- dictionary->AddNumberEntry(i, array->get(i), details);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- dictionary = NumberDictionary::cast(result);
- }
- }
- // Switch to using the dictionary as the backing storage for
- // elements. Set the new map first to satify the elements type
- // assert in set_elements().
- set_map(new_map);
- set_elements(dictionary);
-
- new_map->heap()->isolate()->counters()->elements_to_dictionary()->
- Increment();
-
-#ifdef DEBUG
- if (FLAG_trace_normalization) {
- PrintF("Object elements have been normalized:\n");
- Print();
- }
-#endif
-
- return this;
-}
-
-
-MaybeObject* JSObject::DeletePropertyPostInterceptor(String* name,
- DeleteMode mode) {
- // Check local property, ignore interceptor.
- LookupResult result;
- LocalLookupRealNamedProperty(name, &result);
- if (!result.IsProperty()) return GetHeap()->true_value();
-
- // Normalize object if needed.
- Object* obj;
- { MaybeObject* maybe_obj = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
-
- return DeleteNormalizedProperty(name, mode);
-}
-
-
-MaybeObject* JSObject::DeletePropertyWithInterceptor(String* name) {
- Isolate* isolate = GetIsolate();
- HandleScope scope(isolate);
- Handle<InterceptorInfo> interceptor(GetNamedInterceptor());
- Handle<String> name_handle(name);
- Handle<JSObject> this_handle(this);
- if (!interceptor->deleter()->IsUndefined()) {
- v8::NamedPropertyDeleter deleter =
- v8::ToCData<v8::NamedPropertyDeleter>(interceptor->deleter());
- LOG(isolate,
- ApiNamedPropertyAccess("interceptor-named-delete", *this_handle, name));
- CustomArguments args(isolate, interceptor->data(), this, this);
- v8::AccessorInfo info(args.end());
- v8::Handle<v8::Boolean> result;
- {
- // Leaving JavaScript.
- VMState state(isolate, EXTERNAL);
- result = deleter(v8::Utils::ToLocal(name_handle), info);
- }
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- if (!result.IsEmpty()) {
- ASSERT(result->IsBoolean());
- return *v8::Utils::OpenHandle(*result);
- }
- }
- MaybeObject* raw_result =
- this_handle->DeletePropertyPostInterceptor(*name_handle, NORMAL_DELETION);
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- return raw_result;
-}
-
-
-MaybeObject* JSObject::DeleteElementPostInterceptor(uint32_t index,
- DeleteMode mode) {
- ASSERT(!HasExternalArrayElements());
- switch (GetElementsKind()) {
- case FAST_ELEMENTS: {
- Object* obj;
- { MaybeObject* maybe_obj = EnsureWritableFastElements();
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- uint32_t length = IsJSArray() ?
- static_cast<uint32_t>(Smi::cast(JSArray::cast(this)->length())->value()) :
- static_cast<uint32_t>(FixedArray::cast(elements())->length());
- if (index < length) {
- FixedArray::cast(elements())->set_the_hole(index);
- }
- break;
- }
- case DICTIONARY_ELEMENTS: {
- NumberDictionary* dictionary = element_dictionary();
- int entry = dictionary->FindEntry(index);
- if (entry != NumberDictionary::kNotFound) {
- return dictionary->DeleteProperty(entry, mode);
- }
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
- return GetHeap()->true_value();
-}
-
-
-MaybeObject* JSObject::DeleteElementWithInterceptor(uint32_t index) {
- Isolate* isolate = GetIsolate();
- Heap* heap = isolate->heap();
- // Make sure that the top context does not change when doing
- // callbacks or interceptor calls.
- AssertNoContextChange ncc;
- HandleScope scope(isolate);
- Handle<InterceptorInfo> interceptor(GetIndexedInterceptor());
- if (interceptor->deleter()->IsUndefined()) return heap->false_value();
- v8::IndexedPropertyDeleter deleter =
- v8::ToCData<v8::IndexedPropertyDeleter>(interceptor->deleter());
- Handle<JSObject> this_handle(this);
- LOG(isolate,
- ApiIndexedPropertyAccess("interceptor-indexed-delete", this, index));
- CustomArguments args(isolate, interceptor->data(), this, this);
- v8::AccessorInfo info(args.end());
- v8::Handle<v8::Boolean> result;
- {
- // Leaving JavaScript.
- VMState state(isolate, EXTERNAL);
- result = deleter(index, info);
- }
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- if (!result.IsEmpty()) {
- ASSERT(result->IsBoolean());
- return *v8::Utils::OpenHandle(*result);
- }
- MaybeObject* raw_result =
- this_handle->DeleteElementPostInterceptor(index, NORMAL_DELETION);
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- return raw_result;
-}
-
-
-MaybeObject* JSObject::DeleteElement(uint32_t index, DeleteMode mode) {
- Isolate* isolate = GetIsolate();
- // Check access rights if needed.
- if (IsAccessCheckNeeded() &&
- !isolate->MayIndexedAccess(this, index, v8::ACCESS_DELETE)) {
- isolate->ReportFailedAccessCheck(this, v8::ACCESS_DELETE);
- return isolate->heap()->false_value();
- }
-
- if (IsJSGlobalProxy()) {
- Object* proto = GetPrototype();
- if (proto->IsNull()) return isolate->heap()->false_value();
- ASSERT(proto->IsJSGlobalObject());
- return JSGlobalObject::cast(proto)->DeleteElement(index, mode);
- }
-
- if (HasIndexedInterceptor()) {
- // Skip interceptor if forcing deletion.
- if (mode == FORCE_DELETION) {
- return DeleteElementPostInterceptor(index, mode);
- }
- return DeleteElementWithInterceptor(index);
- }
-
- switch (GetElementsKind()) {
- case FAST_ELEMENTS: {
- Object* obj;
- { MaybeObject* maybe_obj = EnsureWritableFastElements();
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- uint32_t length = IsJSArray() ?
- static_cast<uint32_t>(Smi::cast(JSArray::cast(this)->length())->value()) :
- static_cast<uint32_t>(FixedArray::cast(elements())->length());
- if (index < length) {
- FixedArray::cast(elements())->set_the_hole(index);
- }
- break;
- }
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS:
- // Pixel and external array elements cannot be deleted. Just
- // silently ignore here.
- break;
- case DICTIONARY_ELEMENTS: {
- NumberDictionary* dictionary = element_dictionary();
- int entry = dictionary->FindEntry(index);
- if (entry != NumberDictionary::kNotFound) {
- Object* result = dictionary->DeleteProperty(entry, mode);
- if (mode == STRICT_DELETION && result ==
- isolate->heap()->false_value()) {
- // In strict mode, deleting a non-configurable property throws
- // exception. dictionary->DeleteProperty will return false_value()
- // if a non-configurable property is being deleted.
- HandleScope scope;
- Handle<Object> i = isolate->factory()->NewNumberFromUint(index);
- Handle<Object> args[2] = { i, Handle<Object>(this) };
- return isolate->Throw(*isolate->factory()->NewTypeError(
- "strict_delete_property", HandleVector(args, 2)));
- }
- }
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
- return isolate->heap()->true_value();
-}
-
-
-MaybeObject* JSObject::DeleteProperty(String* name, DeleteMode mode) {
- Isolate* isolate = GetIsolate();
- // ECMA-262, 3rd, 8.6.2.5
- ASSERT(name->IsString());
-
- // Check access rights if needed.
- if (IsAccessCheckNeeded() &&
- !isolate->MayNamedAccess(this, name, v8::ACCESS_DELETE)) {
- isolate->ReportFailedAccessCheck(this, v8::ACCESS_DELETE);
- return isolate->heap()->false_value();
- }
-
- if (IsJSGlobalProxy()) {
- Object* proto = GetPrototype();
- if (proto->IsNull()) return isolate->heap()->false_value();
- ASSERT(proto->IsJSGlobalObject());
- return JSGlobalObject::cast(proto)->DeleteProperty(name, mode);
- }
-
- uint32_t index = 0;
- if (name->AsArrayIndex(&index)) {
- return DeleteElement(index, mode);
- } else {
- LookupResult result;
- LocalLookup(name, &result);
- if (!result.IsProperty()) return isolate->heap()->true_value();
- // Ignore attributes if forcing a deletion.
- if (result.IsDontDelete() && mode != FORCE_DELETION) {
- if (mode == STRICT_DELETION) {
- // Deleting a non-configurable property in strict mode.
- HandleScope scope(isolate);
- Handle<Object> args[2] = { Handle<Object>(name), Handle<Object>(this) };
- return isolate->Throw(*isolate->factory()->NewTypeError(
- "strict_delete_property", HandleVector(args, 2)));
- }
- return isolate->heap()->false_value();
- }
- // Check for interceptor.
- if (result.type() == INTERCEPTOR) {
- // Skip interceptor if forcing a deletion.
- if (mode == FORCE_DELETION) {
- return DeletePropertyPostInterceptor(name, mode);
- }
- return DeletePropertyWithInterceptor(name);
- }
- // Normalize object if needed.
- Object* obj;
- { MaybeObject* maybe_obj =
- NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- // Make sure the properties are normalized before removing the entry.
- return DeleteNormalizedProperty(name, mode);
- }
-}
-
-
-// Check whether this object references another object.
-bool JSObject::ReferencesObject(Object* obj) {
- Map* map_of_this = map();
- Heap* heap = map_of_this->heap();
- AssertNoAllocation no_alloc;
-
- // Is the object the constructor for this object?
- if (map_of_this->constructor() == obj) {
- return true;
- }
-
- // Is the object the prototype for this object?
- if (map_of_this->prototype() == obj) {
- return true;
- }
-
- // Check if the object is among the named properties.
- Object* key = SlowReverseLookup(obj);
- if (!key->IsUndefined()) {
- return true;
- }
-
- // Check if the object is among the indexed properties.
- switch (GetElementsKind()) {
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS:
- // Raw pixels and external arrays do not reference other
- // objects.
- break;
- case FAST_ELEMENTS: {
- int length = IsJSArray() ?
- Smi::cast(JSArray::cast(this)->length())->value() :
- FixedArray::cast(elements())->length();
- for (int i = 0; i < length; i++) {
- Object* element = FixedArray::cast(elements())->get(i);
- if (!element->IsTheHole() && element == obj) {
- return true;
- }
- }
- break;
- }
- case DICTIONARY_ELEMENTS: {
- key = element_dictionary()->SlowReverseLookup(obj);
- if (!key->IsUndefined()) {
- return true;
- }
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
-
- // For functions check the context.
- if (IsJSFunction()) {
- // Get the constructor function for arguments array.
- JSObject* arguments_boilerplate =
- heap->isolate()->context()->global_context()->
- arguments_boilerplate();
- JSFunction* arguments_function =
- JSFunction::cast(arguments_boilerplate->map()->constructor());
-
- // Get the context and don't check if it is the global context.
- JSFunction* f = JSFunction::cast(this);
- Context* context = f->context();
- if (context->IsGlobalContext()) {
- return false;
- }
-
- // Check the non-special context slots.
- for (int i = Context::MIN_CONTEXT_SLOTS; i < context->length(); i++) {
- // Only check JS objects.
- if (context->get(i)->IsJSObject()) {
- JSObject* ctxobj = JSObject::cast(context->get(i));
- // If it is an arguments array check the content.
- if (ctxobj->map()->constructor() == arguments_function) {
- if (ctxobj->ReferencesObject(obj)) {
- return true;
- }
- } else if (ctxobj == obj) {
- return true;
- }
- }
- }
-
- // Check the context extension if any.
- if (context->has_extension()) {
- return context->extension()->ReferencesObject(obj);
- }
- }
-
- // No references to object.
- return false;
-}
-
-
-MaybeObject* JSObject::PreventExtensions() {
- Isolate* isolate = GetIsolate();
- if (IsAccessCheckNeeded() &&
- !isolate->MayNamedAccess(this,
- isolate->heap()->undefined_value(),
- v8::ACCESS_KEYS)) {
- isolate->ReportFailedAccessCheck(this, v8::ACCESS_KEYS);
- return isolate->heap()->false_value();
- }
-
- if (IsJSGlobalProxy()) {
- Object* proto = GetPrototype();
- if (proto->IsNull()) return this;
- ASSERT(proto->IsJSGlobalObject());
- return JSObject::cast(proto)->PreventExtensions();
- }
-
- // If there are fast elements we normalize.
- if (HasFastElements()) {
- Object* ok;
- { MaybeObject* maybe_ok = NormalizeElements();
- if (!maybe_ok->ToObject(&ok)) return maybe_ok;
- }
- }
- // Make sure that we never go back to fast case.
- element_dictionary()->set_requires_slow_elements();
-
- // Do a map transition, other objects with this map may still
- // be extensible.
- Object* new_map;
- { MaybeObject* maybe_new_map = map()->CopyDropTransitions();
- if (!maybe_new_map->ToObject(&new_map)) return maybe_new_map;
- }
- Map::cast(new_map)->set_is_extensible(false);
- set_map(Map::cast(new_map));
- ASSERT(!map()->is_extensible());
- return new_map;
-}
-
-
-// Tests for the fast common case for property enumeration:
-// - This object and all prototypes has an enum cache (which means that it has
-// no interceptors and needs no access checks).
-// - This object has no elements.
-// - No prototype has enumerable properties/elements.
-bool JSObject::IsSimpleEnum() {
- Heap* heap = GetHeap();
- for (Object* o = this;
- o != heap->null_value();
- o = JSObject::cast(o)->GetPrototype()) {
- JSObject* curr = JSObject::cast(o);
- if (!curr->map()->instance_descriptors()->HasEnumCache()) return false;
- ASSERT(!curr->HasNamedInterceptor());
- ASSERT(!curr->HasIndexedInterceptor());
- ASSERT(!curr->IsAccessCheckNeeded());
- if (curr->NumberOfEnumElements() > 0) return false;
- if (curr != this) {
- FixedArray* curr_fixed_array =
- FixedArray::cast(curr->map()->instance_descriptors()->GetEnumCache());
- if (curr_fixed_array->length() > 0) return false;
- }
- }
- return true;
-}
-
-
-int Map::NumberOfDescribedProperties() {
- int result = 0;
- DescriptorArray* descs = instance_descriptors();
- for (int i = 0; i < descs->number_of_descriptors(); i++) {
- if (descs->IsProperty(i)) result++;
- }
- return result;
-}
-
-
-int Map::PropertyIndexFor(String* name) {
- DescriptorArray* descs = instance_descriptors();
- for (int i = 0; i < descs->number_of_descriptors(); i++) {
- if (name->Equals(descs->GetKey(i)) && !descs->IsNullDescriptor(i)) {
- return descs->GetFieldIndex(i);
- }
- }
- return -1;
-}
-
-
-int Map::NextFreePropertyIndex() {
- int max_index = -1;
- DescriptorArray* descs = instance_descriptors();
- for (int i = 0; i < descs->number_of_descriptors(); i++) {
- if (descs->GetType(i) == FIELD) {
- int current_index = descs->GetFieldIndex(i);
- if (current_index > max_index) max_index = current_index;
- }
- }
- return max_index + 1;
-}
-
-
-AccessorDescriptor* Map::FindAccessor(String* name) {
- DescriptorArray* descs = instance_descriptors();
- for (int i = 0; i < descs->number_of_descriptors(); i++) {
- if (name->Equals(descs->GetKey(i)) && descs->GetType(i) == CALLBACKS) {
- return descs->GetCallbacks(i);
- }
- }
- return NULL;
-}
-
-
-void JSObject::LocalLookup(String* name, LookupResult* result) {
- ASSERT(name->IsString());
-
- Heap* heap = GetHeap();
-
- if (IsJSGlobalProxy()) {
- Object* proto = GetPrototype();
- if (proto->IsNull()) return result->NotFound();
- ASSERT(proto->IsJSGlobalObject());
- return JSObject::cast(proto)->LocalLookup(name, result);
- }
-
- // Do not use inline caching if the object is a non-global object
- // that requires access checks.
- if (!IsJSGlobalProxy() && IsAccessCheckNeeded()) {
- result->DisallowCaching();
- }
-
- // Check __proto__ before interceptor.
- if (name->Equals(heap->Proto_symbol()) &&
- !IsJSContextExtensionObject()) {
- result->ConstantResult(this);
- return;
- }
-
- // Check for lookup interceptor except when bootstrapping.
- if (HasNamedInterceptor() && !heap->isolate()->bootstrapper()->IsActive()) {
- result->InterceptorResult(this);
- return;
- }
-
- LocalLookupRealNamedProperty(name, result);
-}
-
-
-void JSObject::Lookup(String* name, LookupResult* result) {
- // Ecma-262 3rd 8.6.2.4
- Heap* heap = GetHeap();
- for (Object* current = this;
- current != heap->null_value();
- current = JSObject::cast(current)->GetPrototype()) {
- JSObject::cast(current)->LocalLookup(name, result);
- if (result->IsProperty()) return;
- }
- result->NotFound();
-}
-
-
-// Search object and it's prototype chain for callback properties.
-void JSObject::LookupCallback(String* name, LookupResult* result) {
- Heap* heap = GetHeap();
- for (Object* current = this;
- current != heap->null_value();
- current = JSObject::cast(current)->GetPrototype()) {
- JSObject::cast(current)->LocalLookupRealNamedProperty(name, result);
- if (result->IsProperty() && result->type() == CALLBACKS) return;
- }
- result->NotFound();
-}
-
-
-MaybeObject* JSObject::DefineGetterSetter(String* name,
- PropertyAttributes attributes) {
- Heap* heap = GetHeap();
- // Make sure that the top context does not change when doing callbacks or
- // interceptor calls.
- AssertNoContextChange ncc;
-
- // Try to flatten before operating on the string.
- name->TryFlatten();
-
- if (!CanSetCallback(name)) {
- return heap->undefined_value();
- }
-
- uint32_t index = 0;
- bool is_element = name->AsArrayIndex(&index);
-
- if (is_element) {
- switch (GetElementsKind()) {
- case FAST_ELEMENTS:
- break;
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS:
- // Ignore getters and setters on pixel and external array
- // elements.
- return heap->undefined_value();
- case DICTIONARY_ELEMENTS: {
- // Lookup the index.
- NumberDictionary* dictionary = element_dictionary();
- int entry = dictionary->FindEntry(index);
- if (entry != NumberDictionary::kNotFound) {
- Object* result = dictionary->ValueAt(entry);
- PropertyDetails details = dictionary->DetailsAt(entry);
- if (details.IsReadOnly()) return heap->undefined_value();
- if (details.type() == CALLBACKS) {
- if (result->IsFixedArray()) {
- return result;
- }
- // Otherwise allow to override it.
- }
- }
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
- } else {
- // Lookup the name.
- LookupResult result;
- LocalLookup(name, &result);
- if (result.IsProperty()) {
- if (result.IsReadOnly()) return heap->undefined_value();
- if (result.type() == CALLBACKS) {
- Object* obj = result.GetCallbackObject();
- // Need to preserve old getters/setters.
- if (obj->IsFixedArray()) {
- // Use set to update attributes.
- return SetPropertyCallback(name, obj, attributes);
- }
- }
- }
- }
-
- // Allocate the fixed array to hold getter and setter.
- Object* structure;
- { MaybeObject* maybe_structure = heap->AllocateFixedArray(2, TENURED);
- if (!maybe_structure->ToObject(&structure)) return maybe_structure;
- }
-
- if (is_element) {
- return SetElementCallback(index, structure, attributes);
- } else {
- return SetPropertyCallback(name, structure, attributes);
- }
-}
-
-
-bool JSObject::CanSetCallback(String* name) {
- ASSERT(!IsAccessCheckNeeded()
- || Isolate::Current()->MayNamedAccess(this, name, v8::ACCESS_SET));
-
- // Check if there is an API defined callback object which prohibits
- // callback overwriting in this object or it's prototype chain.
- // This mechanism is needed for instance in a browser setting, where
- // certain accessors such as window.location should not be allowed
- // to be overwritten because allowing overwriting could potentially
- // cause security problems.
- LookupResult callback_result;
- LookupCallback(name, &callback_result);
- if (callback_result.IsProperty()) {
- Object* obj = callback_result.GetCallbackObject();
- if (obj->IsAccessorInfo() &&
- AccessorInfo::cast(obj)->prohibits_overwriting()) {
- return false;
- }
- }
-
- return true;
-}
-
-
-MaybeObject* JSObject::SetElementCallback(uint32_t index,
- Object* structure,
- PropertyAttributes attributes) {
- PropertyDetails details = PropertyDetails(attributes, CALLBACKS);
-
- // Normalize elements to make this operation simple.
- Object* ok;
- { MaybeObject* maybe_ok = NormalizeElements();
- if (!maybe_ok->ToObject(&ok)) return maybe_ok;
- }
-
- // Update the dictionary with the new CALLBACKS property.
- Object* dict;
- { MaybeObject* maybe_dict =
- element_dictionary()->Set(index, structure, details);
- if (!maybe_dict->ToObject(&dict)) return maybe_dict;
- }
-
- NumberDictionary* elements = NumberDictionary::cast(dict);
- elements->set_requires_slow_elements();
- // Set the potential new dictionary on the object.
- set_elements(elements);
-
- return structure;
-}
-
-
-MaybeObject* JSObject::SetPropertyCallback(String* name,
- Object* structure,
- PropertyAttributes attributes) {
- PropertyDetails details = PropertyDetails(attributes, CALLBACKS);
-
- bool convert_back_to_fast = HasFastProperties() &&
- (map()->instance_descriptors()->number_of_descriptors()
- < DescriptorArray::kMaxNumberOfDescriptors);
-
- // Normalize object to make this operation simple.
- Object* ok;
- { MaybeObject* maybe_ok = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
- if (!maybe_ok->ToObject(&ok)) return maybe_ok;
- }
-
- // For the global object allocate a new map to invalidate the global inline
- // caches which have a global property cell reference directly in the code.
- if (IsGlobalObject()) {
- Object* new_map;
- { MaybeObject* maybe_new_map = map()->CopyDropDescriptors();
- if (!maybe_new_map->ToObject(&new_map)) return maybe_new_map;
- }
- set_map(Map::cast(new_map));
- // When running crankshaft, changing the map is not enough. We
- // need to deoptimize all functions that rely on this global
- // object.
- Deoptimizer::DeoptimizeGlobalObject(this);
- }
-
- // Update the dictionary with the new CALLBACKS property.
- Object* result;
- { MaybeObject* maybe_result = SetNormalizedProperty(name, structure, details);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
-
- if (convert_back_to_fast) {
- { MaybeObject* maybe_ok = TransformToFastProperties(0);
- if (!maybe_ok->ToObject(&ok)) return maybe_ok;
- }
- }
- return result;
-}
-
-MaybeObject* JSObject::DefineAccessor(String* name,
- bool is_getter,
- Object* fun,
- PropertyAttributes attributes) {
- ASSERT(fun->IsJSFunction() || fun->IsUndefined());
- Isolate* isolate = GetIsolate();
- // Check access rights if needed.
- if (IsAccessCheckNeeded() &&
- !isolate->MayNamedAccess(this, name, v8::ACCESS_SET)) {
- isolate->ReportFailedAccessCheck(this, v8::ACCESS_SET);
- return isolate->heap()->undefined_value();
- }
-
- if (IsJSGlobalProxy()) {
- Object* proto = GetPrototype();
- if (proto->IsNull()) return this;
- ASSERT(proto->IsJSGlobalObject());
- return JSObject::cast(proto)->DefineAccessor(name, is_getter,
- fun, attributes);
- }
-
- Object* array;
- { MaybeObject* maybe_array = DefineGetterSetter(name, attributes);
- if (!maybe_array->ToObject(&array)) return maybe_array;
- }
- if (array->IsUndefined()) return array;
- FixedArray::cast(array)->set(is_getter ? 0 : 1, fun);
- return this;
-}
-
-
-MaybeObject* JSObject::DefineAccessor(AccessorInfo* info) {
- Isolate* isolate = GetIsolate();
- String* name = String::cast(info->name());
- // Check access rights if needed.
- if (IsAccessCheckNeeded() &&
- !isolate->MayNamedAccess(this, name, v8::ACCESS_SET)) {
- isolate->ReportFailedAccessCheck(this, v8::ACCESS_SET);
- return isolate->heap()->undefined_value();
- }
-
- if (IsJSGlobalProxy()) {
- Object* proto = GetPrototype();
- if (proto->IsNull()) return this;
- ASSERT(proto->IsJSGlobalObject());
- return JSObject::cast(proto)->DefineAccessor(info);
- }
-
- // Make sure that the top context does not change when doing callbacks or
- // interceptor calls.
- AssertNoContextChange ncc;
-
- // Try to flatten before operating on the string.
- name->TryFlatten();
-
- if (!CanSetCallback(name)) {
- return isolate->heap()->undefined_value();
- }
-
- uint32_t index = 0;
- bool is_element = name->AsArrayIndex(&index);
-
- if (is_element) {
- if (IsJSArray()) return isolate->heap()->undefined_value();
-
- // Accessors overwrite previous callbacks (cf. with getters/setters).
- switch (GetElementsKind()) {
- case FAST_ELEMENTS:
- break;
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS:
- // Ignore getters and setters on pixel and external array
- // elements.
- return isolate->heap()->undefined_value();
- case DICTIONARY_ELEMENTS:
- break;
- default:
- UNREACHABLE();
- break;
- }
-
- Object* ok;
- { MaybeObject* maybe_ok =
- SetElementCallback(index, info, info->property_attributes());
- if (!maybe_ok->ToObject(&ok)) return maybe_ok;
- }
- } else {
- // Lookup the name.
- LookupResult result;
- LocalLookup(name, &result);
- // ES5 forbids turning a property into an accessor if it's not
- // configurable (that is IsDontDelete in ES3 and v8), see 8.6.1 (Table 5).
- if (result.IsProperty() && (result.IsReadOnly() || result.IsDontDelete())) {
- return isolate->heap()->undefined_value();
- }
- Object* ok;
- { MaybeObject* maybe_ok =
- SetPropertyCallback(name, info, info->property_attributes());
- if (!maybe_ok->ToObject(&ok)) return maybe_ok;
- }
- }
-
- return this;
-}
-
-
-Object* JSObject::LookupAccessor(String* name, bool is_getter) {
- Heap* heap = GetHeap();
-
- // Make sure that the top context does not change when doing callbacks or
- // interceptor calls.
- AssertNoContextChange ncc;
-
- // Check access rights if needed.
- if (IsAccessCheckNeeded() &&
- !heap->isolate()->MayNamedAccess(this, name, v8::ACCESS_HAS)) {
- heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
- return heap->undefined_value();
- }
-
- // Make the lookup and include prototypes.
- int accessor_index = is_getter ? kGetterIndex : kSetterIndex;
- uint32_t index = 0;
- if (name->AsArrayIndex(&index)) {
- for (Object* obj = this;
- obj != heap->null_value();
- obj = JSObject::cast(obj)->GetPrototype()) {
- JSObject* js_object = JSObject::cast(obj);
- if (js_object->HasDictionaryElements()) {
- NumberDictionary* dictionary = js_object->element_dictionary();
- int entry = dictionary->FindEntry(index);
- if (entry != NumberDictionary::kNotFound) {
- Object* element = dictionary->ValueAt(entry);
- PropertyDetails details = dictionary->DetailsAt(entry);
- if (details.type() == CALLBACKS) {
- if (element->IsFixedArray()) {
- return FixedArray::cast(element)->get(accessor_index);
- }
- }
- }
- }
- }
- } else {
- for (Object* obj = this;
- obj != heap->null_value();
- obj = JSObject::cast(obj)->GetPrototype()) {
- LookupResult result;
- JSObject::cast(obj)->LocalLookup(name, &result);
- if (result.IsProperty()) {
- if (result.IsReadOnly()) return heap->undefined_value();
- if (result.type() == CALLBACKS) {
- Object* obj = result.GetCallbackObject();
- if (obj->IsFixedArray()) {
- return FixedArray::cast(obj)->get(accessor_index);
- }
- }
- }
- }
- }
- return heap->undefined_value();
-}
-
-
-Object* JSObject::SlowReverseLookup(Object* value) {
- if (HasFastProperties()) {
- DescriptorArray* descs = map()->instance_descriptors();
- for (int i = 0; i < descs->number_of_descriptors(); i++) {
- if (descs->GetType(i) == FIELD) {
- if (FastPropertyAt(descs->GetFieldIndex(i)) == value) {
- return descs->GetKey(i);
- }
- } else if (descs->GetType(i) == CONSTANT_FUNCTION) {
- if (descs->GetConstantFunction(i) == value) {
- return descs->GetKey(i);
- }
- }
- }
- return GetHeap()->undefined_value();
- } else {
- return property_dictionary()->SlowReverseLookup(value);
- }
-}
-
-
-MaybeObject* Map::CopyDropDescriptors() {
- Heap* heap = GetHeap();
- Object* result;
- { MaybeObject* maybe_result =
- heap->AllocateMap(instance_type(), instance_size());
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- Map::cast(result)->set_prototype(prototype());
- Map::cast(result)->set_constructor(constructor());
- // Don't copy descriptors, so map transitions always remain a forest.
- // If we retained the same descriptors we would have two maps
- // pointing to the same transition which is bad because the garbage
- // collector relies on being able to reverse pointers from transitions
- // to maps. If properties need to be retained use CopyDropTransitions.
- Map::cast(result)->set_instance_descriptors(
- heap->empty_descriptor_array());
- // Please note instance_type and instance_size are set when allocated.
- Map::cast(result)->set_inobject_properties(inobject_properties());
- Map::cast(result)->set_unused_property_fields(unused_property_fields());
-
- // If the map has pre-allocated properties always start out with a descriptor
- // array describing these properties.
- if (pre_allocated_property_fields() > 0) {
- ASSERT(constructor()->IsJSFunction());
- JSFunction* ctor = JSFunction::cast(constructor());
- Object* descriptors;
- { MaybeObject* maybe_descriptors =
- ctor->initial_map()->instance_descriptors()->RemoveTransitions();
- if (!maybe_descriptors->ToObject(&descriptors)) return maybe_descriptors;
- }
- Map::cast(result)->set_instance_descriptors(
- DescriptorArray::cast(descriptors));
- Map::cast(result)->set_pre_allocated_property_fields(
- pre_allocated_property_fields());
- }
- Map::cast(result)->set_bit_field(bit_field());
- Map::cast(result)->set_bit_field2(bit_field2());
- Map::cast(result)->set_is_shared(false);
- Map::cast(result)->ClearCodeCache(heap);
- return result;
-}
-
-
-MaybeObject* Map::CopyNormalized(PropertyNormalizationMode mode,
- NormalizedMapSharingMode sharing) {
- int new_instance_size = instance_size();
- if (mode == CLEAR_INOBJECT_PROPERTIES) {
- new_instance_size -= inobject_properties() * kPointerSize;
- }
-
- Object* result;
- { MaybeObject* maybe_result =
- GetHeap()->AllocateMap(instance_type(), new_instance_size);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
-
- if (mode != CLEAR_INOBJECT_PROPERTIES) {
- Map::cast(result)->set_inobject_properties(inobject_properties());
- }
-
- Map::cast(result)->set_prototype(prototype());
- Map::cast(result)->set_constructor(constructor());
-
- Map::cast(result)->set_bit_field(bit_field());
- Map::cast(result)->set_bit_field2(bit_field2());
-
- Map::cast(result)->set_is_shared(sharing == SHARED_NORMALIZED_MAP);
-
-#ifdef DEBUG
- if (Map::cast(result)->is_shared()) {
- Map::cast(result)->SharedMapVerify();
- }
-#endif
-
- return result;
-}
-
-
-MaybeObject* Map::CopyDropTransitions() {
- Object* new_map;
- { MaybeObject* maybe_new_map = CopyDropDescriptors();
- if (!maybe_new_map->ToObject(&new_map)) return maybe_new_map;
- }
- Object* descriptors;
- { MaybeObject* maybe_descriptors =
- instance_descriptors()->RemoveTransitions();
- if (!maybe_descriptors->ToObject(&descriptors)) return maybe_descriptors;
- }
- cast(new_map)->set_instance_descriptors(DescriptorArray::cast(descriptors));
- return new_map;
-}
-
-
-MaybeObject* Map::UpdateCodeCache(String* name, Code* code) {
- // Allocate the code cache if not present.
- if (code_cache()->IsFixedArray()) {
- Object* result;
- { MaybeObject* maybe_result = code->heap()->AllocateCodeCache();
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- set_code_cache(result);
- }
-
- // Update the code cache.
- return CodeCache::cast(code_cache())->Update(name, code);
-}
-
-
-Object* Map::FindInCodeCache(String* name, Code::Flags flags) {
- // Do a lookup if a code cache exists.
- if (!code_cache()->IsFixedArray()) {
- return CodeCache::cast(code_cache())->Lookup(name, flags);
- } else {
- return GetHeap()->undefined_value();
- }
-}
-
-
-int Map::IndexInCodeCache(Object* name, Code* code) {
- // Get the internal index if a code cache exists.
- if (!code_cache()->IsFixedArray()) {
- return CodeCache::cast(code_cache())->GetIndex(name, code);
- }
- return -1;
-}
-
-
-void Map::RemoveFromCodeCache(String* name, Code* code, int index) {
- // No GC is supposed to happen between a call to IndexInCodeCache and
- // RemoveFromCodeCache so the code cache must be there.
- ASSERT(!code_cache()->IsFixedArray());
- CodeCache::cast(code_cache())->RemoveByIndex(name, code, index);
-}
-
-
-void Map::TraverseTransitionTree(TraverseCallback callback, void* data) {
- Map* current = this;
- Map* meta_map = heap()->meta_map();
- while (current != meta_map) {
- DescriptorArray* d = reinterpret_cast<DescriptorArray*>(
- *RawField(current, Map::kInstanceDescriptorsOffset));
- if (d == heap()->empty_descriptor_array()) {
- Map* prev = current->map();
- current->set_map(meta_map);
- callback(current, data);
- current = prev;
- continue;
- }
-
- FixedArray* contents = reinterpret_cast<FixedArray*>(
- d->get(DescriptorArray::kContentArrayIndex));
- Object** map_or_index_field = RawField(contents, HeapObject::kMapOffset);
- Object* map_or_index = *map_or_index_field;
- bool map_done = true;
- for (int i = map_or_index->IsSmi() ? Smi::cast(map_or_index)->value() : 0;
- i < contents->length();
- i += 2) {
- PropertyDetails details(Smi::cast(contents->get(i + 1)));
- if (details.IsTransition()) {
- Map* next = reinterpret_cast<Map*>(contents->get(i));
- next->set_map(current);
- *map_or_index_field = Smi::FromInt(i + 2);
- current = next;
- map_done = false;
- break;
- }
- }
- if (!map_done) continue;
- *map_or_index_field = heap()->fixed_array_map();
- Map* prev = current->map();
- current->set_map(meta_map);
- callback(current, data);
- current = prev;
- }
-}
-
-
-MaybeObject* CodeCache::Update(String* name, Code* code) {
- ASSERT(code->ic_state() == MONOMORPHIC);
-
- // The number of monomorphic stubs for normal load/store/call IC's can grow to
- // a large number and therefore they need to go into a hash table. They are
- // used to load global properties from cells.
- if (code->type() == NORMAL) {
- // Make sure that a hash table is allocated for the normal load code cache.
- if (normal_type_cache()->IsUndefined()) {
- Object* result;
- { MaybeObject* maybe_result =
- CodeCacheHashTable::Allocate(CodeCacheHashTable::kInitialSize);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- set_normal_type_cache(result);
- }
- return UpdateNormalTypeCache(name, code);
- } else {
- ASSERT(default_cache()->IsFixedArray());
- return UpdateDefaultCache(name, code);
- }
-}
-
-
-MaybeObject* CodeCache::UpdateDefaultCache(String* name, Code* code) {
- // When updating the default code cache we disregard the type encoded in the
- // flags. This allows call constant stubs to overwrite call field
- // stubs, etc.
- Code::Flags flags = Code::RemoveTypeFromFlags(code->flags());
-
- // First check whether we can update existing code cache without
- // extending it.
- FixedArray* cache = default_cache();
- int length = cache->length();
- int deleted_index = -1;
- for (int i = 0; i < length; i += kCodeCacheEntrySize) {
- Object* key = cache->get(i);
- if (key->IsNull()) {
- if (deleted_index < 0) deleted_index = i;
- continue;
- }
- if (key->IsUndefined()) {
- if (deleted_index >= 0) i = deleted_index;
- cache->set(i + kCodeCacheEntryNameOffset, name);
- cache->set(i + kCodeCacheEntryCodeOffset, code);
- return this;
- }
- if (name->Equals(String::cast(key))) {
- Code::Flags found =
- Code::cast(cache->get(i + kCodeCacheEntryCodeOffset))->flags();
- if (Code::RemoveTypeFromFlags(found) == flags) {
- cache->set(i + kCodeCacheEntryCodeOffset, code);
- return this;
- }
- }
- }
-
- // Reached the end of the code cache. If there were deleted
- // elements, reuse the space for the first of them.
- if (deleted_index >= 0) {
- cache->set(deleted_index + kCodeCacheEntryNameOffset, name);
- cache->set(deleted_index + kCodeCacheEntryCodeOffset, code);
- return this;
- }
-
- // Extend the code cache with some new entries (at least one). Must be a
- // multiple of the entry size.
- int new_length = length + ((length >> 1)) + kCodeCacheEntrySize;
- new_length = new_length - new_length % kCodeCacheEntrySize;
- ASSERT((new_length % kCodeCacheEntrySize) == 0);
- Object* result;
- { MaybeObject* maybe_result = cache->CopySize(new_length);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
-
- // Add the (name, code) pair to the new cache.
- cache = FixedArray::cast(result);
- cache->set(length + kCodeCacheEntryNameOffset, name);
- cache->set(length + kCodeCacheEntryCodeOffset, code);
- set_default_cache(cache);
- return this;
-}
-
-
-MaybeObject* CodeCache::UpdateNormalTypeCache(String* name, Code* code) {
- // Adding a new entry can cause a new cache to be allocated.
- CodeCacheHashTable* cache = CodeCacheHashTable::cast(normal_type_cache());
- Object* new_cache;
- { MaybeObject* maybe_new_cache = cache->Put(name, code);
- if (!maybe_new_cache->ToObject(&new_cache)) return maybe_new_cache;
- }
- set_normal_type_cache(new_cache);
- return this;
-}
-
-
-Object* CodeCache::Lookup(String* name, Code::Flags flags) {
- if (Code::ExtractTypeFromFlags(flags) == NORMAL) {
- return LookupNormalTypeCache(name, flags);
- } else {
- return LookupDefaultCache(name, flags);
- }
-}
-
-
-Object* CodeCache::LookupDefaultCache(String* name, Code::Flags flags) {
- FixedArray* cache = default_cache();
- int length = cache->length();
- for (int i = 0; i < length; i += kCodeCacheEntrySize) {
- Object* key = cache->get(i + kCodeCacheEntryNameOffset);
- // Skip deleted elements.
- if (key->IsNull()) continue;
- if (key->IsUndefined()) return key;
- if (name->Equals(String::cast(key))) {
- Code* code = Code::cast(cache->get(i + kCodeCacheEntryCodeOffset));
- if (code->flags() == flags) {
- return code;
- }
- }
- }
- return GetHeap()->undefined_value();
-}
-
-
-Object* CodeCache::LookupNormalTypeCache(String* name, Code::Flags flags) {
- if (!normal_type_cache()->IsUndefined()) {
- CodeCacheHashTable* cache = CodeCacheHashTable::cast(normal_type_cache());
- return cache->Lookup(name, flags);
- } else {
- return GetHeap()->undefined_value();
- }
-}
-
-
-int CodeCache::GetIndex(Object* name, Code* code) {
- if (code->type() == NORMAL) {
- if (normal_type_cache()->IsUndefined()) return -1;
- CodeCacheHashTable* cache = CodeCacheHashTable::cast(normal_type_cache());
- return cache->GetIndex(String::cast(name), code->flags());
- }
-
- FixedArray* array = default_cache();
- int len = array->length();
- for (int i = 0; i < len; i += kCodeCacheEntrySize) {
- if (array->get(i + kCodeCacheEntryCodeOffset) == code) return i + 1;
- }
- return -1;
-}
-
-
-void CodeCache::RemoveByIndex(Object* name, Code* code, int index) {
- if (code->type() == NORMAL) {
- ASSERT(!normal_type_cache()->IsUndefined());
- CodeCacheHashTable* cache = CodeCacheHashTable::cast(normal_type_cache());
- ASSERT(cache->GetIndex(String::cast(name), code->flags()) == index);
- cache->RemoveByIndex(index);
- } else {
- FixedArray* array = default_cache();
- ASSERT(array->length() >= index && array->get(index)->IsCode());
- // Use null instead of undefined for deleted elements to distinguish
- // deleted elements from unused elements. This distinction is used
- // when looking up in the cache and when updating the cache.
- ASSERT_EQ(1, kCodeCacheEntryCodeOffset - kCodeCacheEntryNameOffset);
- array->set_null(index - 1); // Name.
- array->set_null(index); // Code.
- }
-}
-
-
-// The key in the code cache hash table consists of the property name and the
-// code object. The actual match is on the name and the code flags. If a key
-// is created using the flags and not a code object it can only be used for
-// lookup not to create a new entry.
-class CodeCacheHashTableKey : public HashTableKey {
- public:
- CodeCacheHashTableKey(String* name, Code::Flags flags)
- : name_(name), flags_(flags), code_(NULL) { }
-
- CodeCacheHashTableKey(String* name, Code* code)
- : name_(name),
- flags_(code->flags()),
- code_(code) { }
-
-
- bool IsMatch(Object* other) {
- if (!other->IsFixedArray()) return false;
- FixedArray* pair = FixedArray::cast(other);
- String* name = String::cast(pair->get(0));
- Code::Flags flags = Code::cast(pair->get(1))->flags();
- if (flags != flags_) {
- return false;
- }
- return name_->Equals(name);
- }
-
- static uint32_t NameFlagsHashHelper(String* name, Code::Flags flags) {
- return name->Hash() ^ flags;
- }
-
- uint32_t Hash() { return NameFlagsHashHelper(name_, flags_); }
-
- uint32_t HashForObject(Object* obj) {
- FixedArray* pair = FixedArray::cast(obj);
- String* name = String::cast(pair->get(0));
- Code* code = Code::cast(pair->get(1));
- return NameFlagsHashHelper(name, code->flags());
- }
-
- MUST_USE_RESULT MaybeObject* AsObject() {
- ASSERT(code_ != NULL);
- Object* obj;
- { MaybeObject* maybe_obj = code_->heap()->AllocateFixedArray(2);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- FixedArray* pair = FixedArray::cast(obj);
- pair->set(0, name_);
- pair->set(1, code_);
- return pair;
- }
-
- private:
- String* name_;
- Code::Flags flags_;
- Code* code_;
-};
-
-
-Object* CodeCacheHashTable::Lookup(String* name, Code::Flags flags) {
- CodeCacheHashTableKey key(name, flags);
- int entry = FindEntry(&key);
- if (entry == kNotFound) return GetHeap()->undefined_value();
- return get(EntryToIndex(entry) + 1);
-}
-
-
-MaybeObject* CodeCacheHashTable::Put(String* name, Code* code) {
- CodeCacheHashTableKey key(name, code);
- Object* obj;
- { MaybeObject* maybe_obj = EnsureCapacity(1, &key);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
-
- // Don't use this, as the table might have grown.
- CodeCacheHashTable* cache = reinterpret_cast<CodeCacheHashTable*>(obj);
-
- int entry = cache->FindInsertionEntry(key.Hash());
- Object* k;
- { MaybeObject* maybe_k = key.AsObject();
- if (!maybe_k->ToObject(&k)) return maybe_k;
- }
-
- cache->set(EntryToIndex(entry), k);
- cache->set(EntryToIndex(entry) + 1, code);
- cache->ElementAdded();
- return cache;
-}
-
-
-int CodeCacheHashTable::GetIndex(String* name, Code::Flags flags) {
- CodeCacheHashTableKey key(name, flags);
- int entry = FindEntry(&key);
- return (entry == kNotFound) ? -1 : entry;
-}
-
-
-void CodeCacheHashTable::RemoveByIndex(int index) {
- ASSERT(index >= 0);
- Heap* heap = GetHeap();
- set(EntryToIndex(index), heap->null_value());
- set(EntryToIndex(index) + 1, heap->null_value());
- ElementRemoved();
-}
-
-
-static bool HasKey(FixedArray* array, Object* key) {
- int len0 = array->length();
- for (int i = 0; i < len0; i++) {
- Object* element = array->get(i);
- if (element->IsSmi() && key->IsSmi() && (element == key)) return true;
- if (element->IsString() &&
- key->IsString() && String::cast(element)->Equals(String::cast(key))) {
- return true;
- }
- }
- return false;
-}
-
-
-MaybeObject* FixedArray::AddKeysFromJSArray(JSArray* array) {
- ASSERT(!array->HasExternalArrayElements());
- switch (array->GetElementsKind()) {
- case JSObject::FAST_ELEMENTS:
- return UnionOfKeys(FixedArray::cast(array->elements()));
- case JSObject::DICTIONARY_ELEMENTS: {
- NumberDictionary* dict = array->element_dictionary();
- int size = dict->NumberOfElements();
-
- // Allocate a temporary fixed array.
- Object* object;
- { MaybeObject* maybe_object = GetHeap()->AllocateFixedArray(size);
- if (!maybe_object->ToObject(&object)) return maybe_object;
- }
- FixedArray* key_array = FixedArray::cast(object);
-
- int capacity = dict->Capacity();
- int pos = 0;
- // Copy the elements from the JSArray to the temporary fixed array.
- for (int i = 0; i < capacity; i++) {
- if (dict->IsKey(dict->KeyAt(i))) {
- key_array->set(pos++, dict->ValueAt(i));
- }
- }
- // Compute the union of this and the temporary fixed array.
- return UnionOfKeys(key_array);
- }
- default:
- UNREACHABLE();
- }
- UNREACHABLE();
- return GetHeap()->null_value(); // Failure case needs to "return" a value.
-}
-
-
-MaybeObject* FixedArray::UnionOfKeys(FixedArray* other) {
- int len0 = length();
-#ifdef DEBUG
- if (FLAG_enable_slow_asserts) {
- for (int i = 0; i < len0; i++) {
- ASSERT(get(i)->IsString() || get(i)->IsNumber());
- }
- }
-#endif
- int len1 = other->length();
- // Optimize if 'other' is empty.
- // We cannot optimize if 'this' is empty, as other may have holes
- // or non keys.
- if (len1 == 0) return this;
-
- // Compute how many elements are not in this.
- int extra = 0;
- for (int y = 0; y < len1; y++) {
- Object* value = other->get(y);
- if (!value->IsTheHole() && !HasKey(this, value)) extra++;
- }
-
- if (extra == 0) return this;
-
- // Allocate the result
- Object* obj;
- { MaybeObject* maybe_obj = GetHeap()->AllocateFixedArray(len0 + extra);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- // Fill in the content
- AssertNoAllocation no_gc;
- FixedArray* result = FixedArray::cast(obj);
- WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
- for (int i = 0; i < len0; i++) {
- Object* e = get(i);
- ASSERT(e->IsString() || e->IsNumber());
- result->set(i, e, mode);
- }
- // Fill in the extra keys.
- int index = 0;
- for (int y = 0; y < len1; y++) {
- Object* value = other->get(y);
- if (!value->IsTheHole() && !HasKey(this, value)) {
- Object* e = other->get(y);
- ASSERT(e->IsString() || e->IsNumber());
- result->set(len0 + index, e, mode);
- index++;
- }
- }
- ASSERT(extra == index);
- return result;
-}
-
-
-MaybeObject* FixedArray::CopySize(int new_length) {
- Heap* heap = GetHeap();
- if (new_length == 0) return heap->empty_fixed_array();
- Object* obj;
- { MaybeObject* maybe_obj = heap->AllocateFixedArray(new_length);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- FixedArray* result = FixedArray::cast(obj);
- // Copy the content
- AssertNoAllocation no_gc;
- int len = length();
- if (new_length < len) len = new_length;
- result->set_map(map());
- WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
- for (int i = 0; i < len; i++) {
- result->set(i, get(i), mode);
- }
- return result;
-}
-
-
-void FixedArray::CopyTo(int pos, FixedArray* dest, int dest_pos, int len) {
- AssertNoAllocation no_gc;
- WriteBarrierMode mode = dest->GetWriteBarrierMode(no_gc);
- for (int index = 0; index < len; index++) {
- dest->set(dest_pos+index, get(pos+index), mode);
- }
-}
-
-
-#ifdef DEBUG
-bool FixedArray::IsEqualTo(FixedArray* other) {
- if (length() != other->length()) return false;
- for (int i = 0 ; i < length(); ++i) {
- if (get(i) != other->get(i)) return false;
- }
- return true;
-}
-#endif
-
-
-MaybeObject* DescriptorArray::Allocate(int number_of_descriptors) {
- Heap* heap = Isolate::Current()->heap();
- if (number_of_descriptors == 0) {
- return heap->empty_descriptor_array();
- }
- // Allocate the array of keys.
- Object* array;
- { MaybeObject* maybe_array =
- heap->AllocateFixedArray(ToKeyIndex(number_of_descriptors));
- if (!maybe_array->ToObject(&array)) return maybe_array;
- }
- // Do not use DescriptorArray::cast on incomplete object.
- FixedArray* result = FixedArray::cast(array);
-
- // Allocate the content array and set it in the descriptor array.
- { MaybeObject* maybe_array =
- heap->AllocateFixedArray(number_of_descriptors << 1);
- if (!maybe_array->ToObject(&array)) return maybe_array;
- }
- result->set(kContentArrayIndex, array);
- result->set(kEnumerationIndexIndex,
- Smi::FromInt(PropertyDetails::kInitialIndex));
- return result;
-}
-
-
-void DescriptorArray::SetEnumCache(FixedArray* bridge_storage,
- FixedArray* new_cache) {
- ASSERT(bridge_storage->length() >= kEnumCacheBridgeLength);
- if (HasEnumCache()) {
- FixedArray::cast(get(kEnumerationIndexIndex))->
- set(kEnumCacheBridgeCacheIndex, new_cache);
- } else {
- if (IsEmpty()) return; // Do nothing for empty descriptor array.
- FixedArray::cast(bridge_storage)->
- set(kEnumCacheBridgeCacheIndex, new_cache);
- fast_set(FixedArray::cast(bridge_storage),
- kEnumCacheBridgeEnumIndex,
- get(kEnumerationIndexIndex));
- set(kEnumerationIndexIndex, bridge_storage);
- }
-}
-
-
-MaybeObject* DescriptorArray::CopyInsert(Descriptor* descriptor,
- TransitionFlag transition_flag) {
- // Transitions are only kept when inserting another transition.
- // This precondition is not required by this function's implementation, but
- // is currently required by the semantics of maps, so we check it.
- // Conversely, we filter after replacing, so replacing a transition and
- // removing all other transitions is not supported.
- bool remove_transitions = transition_flag == REMOVE_TRANSITIONS;
- ASSERT(remove_transitions == !descriptor->GetDetails().IsTransition());
- ASSERT(descriptor->GetDetails().type() != NULL_DESCRIPTOR);
-
- // Ensure the key is a symbol.
- Object* result;
- { MaybeObject* maybe_result = descriptor->KeyToSymbol();
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
-
- int transitions = 0;
- int null_descriptors = 0;
- if (remove_transitions) {
- for (int i = 0; i < number_of_descriptors(); i++) {
- if (IsTransition(i)) transitions++;
- if (IsNullDescriptor(i)) null_descriptors++;
- }
- } else {
- for (int i = 0; i < number_of_descriptors(); i++) {
- if (IsNullDescriptor(i)) null_descriptors++;
- }
- }
- int new_size = number_of_descriptors() - transitions - null_descriptors;
-
- // If key is in descriptor, we replace it in-place when filtering.
- // Count a null descriptor for key as inserted, not replaced.
- int index = Search(descriptor->GetKey());
- const bool inserting = (index == kNotFound);
- const bool replacing = !inserting;
- bool keep_enumeration_index = false;
- if (inserting) {
- ++new_size;
- }
- if (replacing) {
- // We are replacing an existing descriptor. We keep the enumeration
- // index of a visible property.
- PropertyType t = PropertyDetails(GetDetails(index)).type();
- if (t == CONSTANT_FUNCTION ||
- t == FIELD ||
- t == CALLBACKS ||
- t == INTERCEPTOR) {
- keep_enumeration_index = true;
- } else if (remove_transitions) {
- // Replaced descriptor has been counted as removed if it is
- // a transition that will be replaced. Adjust count in this case.
- ++new_size;
- }
- }
- { MaybeObject* maybe_result = Allocate(new_size);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- DescriptorArray* new_descriptors = DescriptorArray::cast(result);
- // Set the enumeration index in the descriptors and set the enumeration index
- // in the result.
- int enumeration_index = NextEnumerationIndex();
- if (!descriptor->GetDetails().IsTransition()) {
- if (keep_enumeration_index) {
- descriptor->SetEnumerationIndex(
- PropertyDetails(GetDetails(index)).index());
- } else {
- descriptor->SetEnumerationIndex(enumeration_index);
- ++enumeration_index;
- }
- }
- new_descriptors->SetNextEnumerationIndex(enumeration_index);
-
- // Copy the descriptors, filtering out transitions and null descriptors,
- // and inserting or replacing a descriptor.
- uint32_t descriptor_hash = descriptor->GetKey()->Hash();
- int from_index = 0;
- int to_index = 0;
-
- for (; from_index < number_of_descriptors(); from_index++) {
- String* key = GetKey(from_index);
- if (key->Hash() > descriptor_hash || key == descriptor->GetKey()) {
- break;
- }
- if (IsNullDescriptor(from_index)) continue;
- if (remove_transitions && IsTransition(from_index)) continue;
- new_descriptors->CopyFrom(to_index++, this, from_index);
- }
-
- new_descriptors->Set(to_index++, descriptor);
- if (replacing) from_index++;
-
- for (; from_index < number_of_descriptors(); from_index++) {
- if (IsNullDescriptor(from_index)) continue;
- if (remove_transitions && IsTransition(from_index)) continue;
- new_descriptors->CopyFrom(to_index++, this, from_index);
- }
-
- ASSERT(to_index == new_descriptors->number_of_descriptors());
- SLOW_ASSERT(new_descriptors->IsSortedNoDuplicates());
-
- return new_descriptors;
-}
-
-
-MaybeObject* DescriptorArray::RemoveTransitions() {
- // Remove all transitions and null descriptors. Return a copy of the array
- // with all transitions removed, or a Failure object if the new array could
- // not be allocated.
-
- // Compute the size of the map transition entries to be removed.
- int num_removed = 0;
- for (int i = 0; i < number_of_descriptors(); i++) {
- if (!IsProperty(i)) num_removed++;
- }
-
- // Allocate the new descriptor array.
- Object* result;
- { MaybeObject* maybe_result = Allocate(number_of_descriptors() - num_removed);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- DescriptorArray* new_descriptors = DescriptorArray::cast(result);
-
- // Copy the content.
- int next_descriptor = 0;
- for (int i = 0; i < number_of_descriptors(); i++) {
- if (IsProperty(i)) new_descriptors->CopyFrom(next_descriptor++, this, i);
- }
- ASSERT(next_descriptor == new_descriptors->number_of_descriptors());
-
- return new_descriptors;
-}
-
-
-void DescriptorArray::SortUnchecked() {
- // In-place heap sort.
- int len = number_of_descriptors();
-
- // Bottom-up max-heap construction.
- // Index of the last node with children
- const int max_parent_index = (len / 2) - 1;
- for (int i = max_parent_index; i >= 0; --i) {
- int parent_index = i;
- const uint32_t parent_hash = GetKey(i)->Hash();
- while (parent_index <= max_parent_index) {
- int child_index = 2 * parent_index + 1;
- uint32_t child_hash = GetKey(child_index)->Hash();
- if (child_index + 1 < len) {
- uint32_t right_child_hash = GetKey(child_index + 1)->Hash();
- if (right_child_hash > child_hash) {
- child_index++;
- child_hash = right_child_hash;
- }
- }
- if (child_hash <= parent_hash) break;
- Swap(parent_index, child_index);
- // Now element at child_index could be < its children.
- parent_index = child_index; // parent_hash remains correct.
- }
- }
-
- // Extract elements and create sorted array.
- for (int i = len - 1; i > 0; --i) {
- // Put max element at the back of the array.
- Swap(0, i);
- // Sift down the new top element.
- int parent_index = 0;
- const uint32_t parent_hash = GetKey(parent_index)->Hash();
- const int max_parent_index = (i / 2) - 1;
- while (parent_index <= max_parent_index) {
- int child_index = parent_index * 2 + 1;
- uint32_t child_hash = GetKey(child_index)->Hash();
- if (child_index + 1 < i) {
- uint32_t right_child_hash = GetKey(child_index + 1)->Hash();
- if (right_child_hash > child_hash) {
- child_index++;
- child_hash = right_child_hash;
- }
- }
- if (child_hash <= parent_hash) break;
- Swap(parent_index, child_index);
- parent_index = child_index;
- }
- }
-}
-
-
-void DescriptorArray::Sort() {
- SortUnchecked();
- SLOW_ASSERT(IsSortedNoDuplicates());
-}
-
-
-int DescriptorArray::BinarySearch(String* name, int low, int high) {
- uint32_t hash = name->Hash();
-
- while (low <= high) {
- int mid = (low + high) / 2;
- String* mid_name = GetKey(mid);
- uint32_t mid_hash = mid_name->Hash();
-
- if (mid_hash > hash) {
- high = mid - 1;
- continue;
- }
- if (mid_hash < hash) {
- low = mid + 1;
- continue;
- }
- // Found an element with the same hash-code.
- ASSERT(hash == mid_hash);
- // There might be more, so we find the first one and
- // check them all to see if we have a match.
- if (name == mid_name && !is_null_descriptor(mid)) return mid;
- while ((mid > low) && (GetKey(mid - 1)->Hash() == hash)) mid--;
- for (; (mid <= high) && (GetKey(mid)->Hash() == hash); mid++) {
- if (GetKey(mid)->Equals(name) && !is_null_descriptor(mid)) return mid;
- }
- break;
- }
- return kNotFound;
-}
-
-
-int DescriptorArray::LinearSearch(String* name, int len) {
- uint32_t hash = name->Hash();
- for (int number = 0; number < len; number++) {
- String* entry = GetKey(number);
- if ((entry->Hash() == hash) &&
- name->Equals(entry) &&
- !is_null_descriptor(number)) {
- return number;
- }
- }
- return kNotFound;
-}
-
-
-MaybeObject* DeoptimizationInputData::Allocate(int deopt_entry_count,
- PretenureFlag pretenure) {
- ASSERT(deopt_entry_count > 0);
- return HEAP->AllocateFixedArray(LengthFor(deopt_entry_count),
- pretenure);
-}
-
-
-MaybeObject* DeoptimizationOutputData::Allocate(int number_of_deopt_points,
- PretenureFlag pretenure) {
- if (number_of_deopt_points == 0) return HEAP->empty_fixed_array();
- return HEAP->AllocateFixedArray(LengthOfFixedArray(number_of_deopt_points),
- pretenure);
-}
-
-
-#ifdef DEBUG
-bool DescriptorArray::IsEqualTo(DescriptorArray* other) {
- if (IsEmpty()) return other->IsEmpty();
- if (other->IsEmpty()) return false;
- if (length() != other->length()) return false;
- for (int i = 0; i < length(); ++i) {
- if (get(i) != other->get(i) && i != kContentArrayIndex) return false;
- }
- return GetContentArray()->IsEqualTo(other->GetContentArray());
-}
-#endif
-
-
-bool String::LooksValid() {
- if (!Isolate::Current()->heap()->Contains(this)) return false;
- return true;
-}
-
-
-int String::Utf8Length() {
- if (IsAsciiRepresentation()) return length();
- // Attempt to flatten before accessing the string. It probably
- // doesn't make Utf8Length faster, but it is very likely that
- // the string will be accessed later (for example by WriteUtf8)
- // so it's still a good idea.
- Heap* heap = GetHeap();
- TryFlatten();
- Access<StringInputBuffer> buffer(
- heap->isolate()->objects_string_input_buffer());
- buffer->Reset(0, this);
- int result = 0;
- while (buffer->has_more())
- result += unibrow::Utf8::Length(buffer->GetNext());
- return result;
-}
-
-
-Vector<const char> String::ToAsciiVector() {
- ASSERT(IsAsciiRepresentation());
- ASSERT(IsFlat());
-
- int offset = 0;
- int length = this->length();
- StringRepresentationTag string_tag = StringShape(this).representation_tag();
- String* string = this;
- if (string_tag == kConsStringTag) {
- ConsString* cons = ConsString::cast(string);
- ASSERT(cons->second()->length() == 0);
- string = cons->first();
- string_tag = StringShape(string).representation_tag();
- }
- if (string_tag == kSeqStringTag) {
- SeqAsciiString* seq = SeqAsciiString::cast(string);
- char* start = seq->GetChars();
- return Vector<const char>(start + offset, length);
- }
- ASSERT(string_tag == kExternalStringTag);
- ExternalAsciiString* ext = ExternalAsciiString::cast(string);
- const char* start = ext->resource()->data();
- return Vector<const char>(start + offset, length);
-}
-
-
-Vector<const uc16> String::ToUC16Vector() {
- ASSERT(IsTwoByteRepresentation());
- ASSERT(IsFlat());
-
- int offset = 0;
- int length = this->length();
- StringRepresentationTag string_tag = StringShape(this).representation_tag();
- String* string = this;
- if (string_tag == kConsStringTag) {
- ConsString* cons = ConsString::cast(string);
- ASSERT(cons->second()->length() == 0);
- string = cons->first();
- string_tag = StringShape(string).representation_tag();
- }
- if (string_tag == kSeqStringTag) {
- SeqTwoByteString* seq = SeqTwoByteString::cast(string);
- return Vector<const uc16>(seq->GetChars() + offset, length);
- }
- ASSERT(string_tag == kExternalStringTag);
- ExternalTwoByteString* ext = ExternalTwoByteString::cast(string);
- const uc16* start =
- reinterpret_cast<const uc16*>(ext->resource()->data());
- return Vector<const uc16>(start + offset, length);
-}
-
-
-SmartPointer<char> String::ToCString(AllowNullsFlag allow_nulls,
- RobustnessFlag robust_flag,
- int offset,
- int length,
- int* length_return) {
- if (robust_flag == ROBUST_STRING_TRAVERSAL && !LooksValid()) {
- return SmartPointer<char>(NULL);
- }
- Heap* heap = GetHeap();
-
- // Negative length means the to the end of the string.
- if (length < 0) length = kMaxInt - offset;
-
- // Compute the size of the UTF-8 string. Start at the specified offset.
- Access<StringInputBuffer> buffer(
- heap->isolate()->objects_string_input_buffer());
- buffer->Reset(offset, this);
- int character_position = offset;
- int utf8_bytes = 0;
- while (buffer->has_more()) {
- uint16_t character = buffer->GetNext();
- if (character_position < offset + length) {
- utf8_bytes += unibrow::Utf8::Length(character);
- }
- character_position++;
- }
-
- if (length_return) {
- *length_return = utf8_bytes;
- }
-
- char* result = NewArray<char>(utf8_bytes + 1);
-
- // Convert the UTF-16 string to a UTF-8 buffer. Start at the specified offset.
- buffer->Rewind();
- buffer->Seek(offset);
- character_position = offset;
- int utf8_byte_position = 0;
- while (buffer->has_more()) {
- uint16_t character = buffer->GetNext();
- if (character_position < offset + length) {
- if (allow_nulls == DISALLOW_NULLS && character == 0) {
- character = ' ';
- }
- utf8_byte_position +=
- unibrow::Utf8::Encode(result + utf8_byte_position, character);
- }
- character_position++;
- }
- result[utf8_byte_position] = 0;
- return SmartPointer<char>(result);
-}
-
-
-SmartPointer<char> String::ToCString(AllowNullsFlag allow_nulls,
- RobustnessFlag robust_flag,
- int* length_return) {
- return ToCString(allow_nulls, robust_flag, 0, -1, length_return);
-}
-
-
-const uc16* String::GetTwoByteData() {
- return GetTwoByteData(0);
-}
-
-
-const uc16* String::GetTwoByteData(unsigned start) {
- ASSERT(!IsAsciiRepresentation());
- switch (StringShape(this).representation_tag()) {
- case kSeqStringTag:
- return SeqTwoByteString::cast(this)->SeqTwoByteStringGetData(start);
- case kExternalStringTag:
- return ExternalTwoByteString::cast(this)->
- ExternalTwoByteStringGetData(start);
- case kConsStringTag:
- UNREACHABLE();
- return NULL;
- }
- UNREACHABLE();
- return NULL;
-}
-
-
-SmartPointer<uc16> String::ToWideCString(RobustnessFlag robust_flag) {
- if (robust_flag == ROBUST_STRING_TRAVERSAL && !LooksValid()) {
- return SmartPointer<uc16>();
- }
- Heap* heap = GetHeap();
-
- Access<StringInputBuffer> buffer(
- heap->isolate()->objects_string_input_buffer());
- buffer->Reset(this);
-
- uc16* result = NewArray<uc16>(length() + 1);
-
- int i = 0;
- while (buffer->has_more()) {
- uint16_t character = buffer->GetNext();
- result[i++] = character;
- }
- result[i] = 0;
- return SmartPointer<uc16>(result);
-}
-
-
-const uc16* SeqTwoByteString::SeqTwoByteStringGetData(unsigned start) {
- return reinterpret_cast<uc16*>(
- reinterpret_cast<char*>(this) - kHeapObjectTag + kHeaderSize) + start;
-}
-
-
-void SeqTwoByteString::SeqTwoByteStringReadBlockIntoBuffer(ReadBlockBuffer* rbb,
- unsigned* offset_ptr,
- unsigned max_chars) {
- unsigned chars_read = 0;
- unsigned offset = *offset_ptr;
- while (chars_read < max_chars) {
- uint16_t c = *reinterpret_cast<uint16_t*>(
- reinterpret_cast<char*>(this) -
- kHeapObjectTag + kHeaderSize + offset * kShortSize);
- if (c <= kMaxAsciiCharCode) {
- // Fast case for ASCII characters. Cursor is an input output argument.
- if (!unibrow::CharacterStream::EncodeAsciiCharacter(c,
- rbb->util_buffer,
- rbb->capacity,
- rbb->cursor)) {
- break;
- }
- } else {
- if (!unibrow::CharacterStream::EncodeNonAsciiCharacter(c,
- rbb->util_buffer,
- rbb->capacity,
- rbb->cursor)) {
- break;
- }
- }
- offset++;
- chars_read++;
- }
- *offset_ptr = offset;
- rbb->remaining += chars_read;
-}
-
-
-const unibrow::byte* SeqAsciiString::SeqAsciiStringReadBlock(
- unsigned* remaining,
- unsigned* offset_ptr,
- unsigned max_chars) {
- const unibrow::byte* b = reinterpret_cast<unibrow::byte*>(this) -
- kHeapObjectTag + kHeaderSize + *offset_ptr * kCharSize;
- *remaining = max_chars;
- *offset_ptr += max_chars;
- return b;
-}
-
-
-// This will iterate unless the block of string data spans two 'halves' of
-// a ConsString, in which case it will recurse. Since the block of string
-// data to be read has a maximum size this limits the maximum recursion
-// depth to something sane. Since C++ does not have tail call recursion
-// elimination, the iteration must be explicit. Since this is not an
-// -IntoBuffer method it can delegate to one of the efficient
-// *AsciiStringReadBlock routines.
-const unibrow::byte* ConsString::ConsStringReadBlock(ReadBlockBuffer* rbb,
- unsigned* offset_ptr,
- unsigned max_chars) {
- ConsString* current = this;
- unsigned offset = *offset_ptr;
- int offset_correction = 0;
-
- while (true) {
- String* left = current->first();
- unsigned left_length = (unsigned)left->length();
- if (left_length > offset &&
- (max_chars <= left_length - offset ||
- (rbb->capacity <= left_length - offset &&
- (max_chars = left_length - offset, true)))) { // comma operator!
- // Left hand side only - iterate unless we have reached the bottom of
- // the cons tree. The assignment on the left of the comma operator is
- // in order to make use of the fact that the -IntoBuffer routines can
- // produce at most 'capacity' characters. This enables us to postpone
- // the point where we switch to the -IntoBuffer routines (below) in order
- // to maximize the chances of delegating a big chunk of work to the
- // efficient *AsciiStringReadBlock routines.
- if (StringShape(left).IsCons()) {
- current = ConsString::cast(left);
- continue;
- } else {
- const unibrow::byte* answer =
- String::ReadBlock(left, rbb, &offset, max_chars);
- *offset_ptr = offset + offset_correction;
- return answer;
- }
- } else if (left_length <= offset) {
- // Right hand side only - iterate unless we have reached the bottom of
- // the cons tree.
- String* right = current->second();
- offset -= left_length;
- offset_correction += left_length;
- if (StringShape(right).IsCons()) {
- current = ConsString::cast(right);
- continue;
- } else {
- const unibrow::byte* answer =
- String::ReadBlock(right, rbb, &offset, max_chars);
- *offset_ptr = offset + offset_correction;
- return answer;
- }
- } else {
- // The block to be read spans two sides of the ConsString, so we call the
- // -IntoBuffer version, which will recurse. The -IntoBuffer methods
- // are able to assemble data from several part strings because they use
- // the util_buffer to store their data and never return direct pointers
- // to their storage. We don't try to read more than the buffer capacity
- // here or we can get too much recursion.
- ASSERT(rbb->remaining == 0);
- ASSERT(rbb->cursor == 0);
- current->ConsStringReadBlockIntoBuffer(
- rbb,
- &offset,
- max_chars > rbb->capacity ? rbb->capacity : max_chars);
- *offset_ptr = offset + offset_correction;
- return rbb->util_buffer;
- }
- }
-}
-
-
-uint16_t ExternalAsciiString::ExternalAsciiStringGet(int index) {
- ASSERT(index >= 0 && index < length());
- return resource()->data()[index];
-}
-
-
-const unibrow::byte* ExternalAsciiString::ExternalAsciiStringReadBlock(
- unsigned* remaining,
- unsigned* offset_ptr,
- unsigned max_chars) {
- // Cast const char* to unibrow::byte* (signedness difference).
- const unibrow::byte* b =
- reinterpret_cast<const unibrow::byte*>(resource()->data()) + *offset_ptr;
- *remaining = max_chars;
- *offset_ptr += max_chars;
- return b;
-}
-
-
-const uc16* ExternalTwoByteString::ExternalTwoByteStringGetData(
- unsigned start) {
- return resource()->data() + start;
-}
-
-
-uint16_t ExternalTwoByteString::ExternalTwoByteStringGet(int index) {
- ASSERT(index >= 0 && index < length());
- return resource()->data()[index];
-}
-
-
-void ExternalTwoByteString::ExternalTwoByteStringReadBlockIntoBuffer(
- ReadBlockBuffer* rbb,
- unsigned* offset_ptr,
- unsigned max_chars) {
- unsigned chars_read = 0;
- unsigned offset = *offset_ptr;
- const uint16_t* data = resource()->data();
- while (chars_read < max_chars) {
- uint16_t c = data[offset];
- if (c <= kMaxAsciiCharCode) {
- // Fast case for ASCII characters. Cursor is an input output argument.
- if (!unibrow::CharacterStream::EncodeAsciiCharacter(c,
- rbb->util_buffer,
- rbb->capacity,
- rbb->cursor))
- break;
- } else {
- if (!unibrow::CharacterStream::EncodeNonAsciiCharacter(c,
- rbb->util_buffer,
- rbb->capacity,
- rbb->cursor))
- break;
- }
- offset++;
- chars_read++;
- }
- *offset_ptr = offset;
- rbb->remaining += chars_read;
-}
-
-
-void SeqAsciiString::SeqAsciiStringReadBlockIntoBuffer(ReadBlockBuffer* rbb,
- unsigned* offset_ptr,
- unsigned max_chars) {
- unsigned capacity = rbb->capacity - rbb->cursor;
- if (max_chars > capacity) max_chars = capacity;
- memcpy(rbb->util_buffer + rbb->cursor,
- reinterpret_cast<char*>(this) - kHeapObjectTag + kHeaderSize +
- *offset_ptr * kCharSize,
- max_chars);
- rbb->remaining += max_chars;
- *offset_ptr += max_chars;
- rbb->cursor += max_chars;
-}
-
-
-void ExternalAsciiString::ExternalAsciiStringReadBlockIntoBuffer(
- ReadBlockBuffer* rbb,
- unsigned* offset_ptr,
- unsigned max_chars) {
- unsigned capacity = rbb->capacity - rbb->cursor;
- if (max_chars > capacity) max_chars = capacity;
- memcpy(rbb->util_buffer + rbb->cursor,
- resource()->data() + *offset_ptr,
- max_chars);
- rbb->remaining += max_chars;
- *offset_ptr += max_chars;
- rbb->cursor += max_chars;
-}
-
-
-// This method determines the type of string involved and then copies
-// a whole chunk of characters into a buffer, or returns a pointer to a buffer
-// where they can be found. The pointer is not necessarily valid across a GC
-// (see AsciiStringReadBlock).
-const unibrow::byte* String::ReadBlock(String* input,
- ReadBlockBuffer* rbb,
- unsigned* offset_ptr,
- unsigned max_chars) {
- ASSERT(*offset_ptr <= static_cast<unsigned>(input->length()));
- if (max_chars == 0) {
- rbb->remaining = 0;
- return NULL;
- }
- switch (StringShape(input).representation_tag()) {
- case kSeqStringTag:
- if (input->IsAsciiRepresentation()) {
- SeqAsciiString* str = SeqAsciiString::cast(input);
- return str->SeqAsciiStringReadBlock(&rbb->remaining,
- offset_ptr,
- max_chars);
- } else {
- SeqTwoByteString* str = SeqTwoByteString::cast(input);
- str->SeqTwoByteStringReadBlockIntoBuffer(rbb,
- offset_ptr,
- max_chars);
- return rbb->util_buffer;
- }
- case kConsStringTag:
- return ConsString::cast(input)->ConsStringReadBlock(rbb,
- offset_ptr,
- max_chars);
- case kExternalStringTag:
- if (input->IsAsciiRepresentation()) {
- return ExternalAsciiString::cast(input)->ExternalAsciiStringReadBlock(
- &rbb->remaining,
- offset_ptr,
- max_chars);
- } else {
- ExternalTwoByteString::cast(input)->
- ExternalTwoByteStringReadBlockIntoBuffer(rbb,
- offset_ptr,
- max_chars);
- return rbb->util_buffer;
- }
- default:
- break;
- }
-
- UNREACHABLE();
- return 0;
-}
-
-
-void Relocatable::PostGarbageCollectionProcessing() {
- Isolate* isolate = Isolate::Current();
- Relocatable* current = isolate->relocatable_top();
- while (current != NULL) {
- current->PostGarbageCollection();
- current = current->prev_;
- }
-}
-
-
-// Reserve space for statics needing saving and restoring.
-int Relocatable::ArchiveSpacePerThread() {
- return sizeof(Isolate::Current()->relocatable_top());
-}
-
-
-// Archive statics that are thread local.
-char* Relocatable::ArchiveState(char* to) {
- Isolate* isolate = Isolate::Current();
- *reinterpret_cast<Relocatable**>(to) = isolate->relocatable_top();
- isolate->set_relocatable_top(NULL);
- return to + ArchiveSpacePerThread();
-}
-
-
-// Restore statics that are thread local.
-char* Relocatable::RestoreState(char* from) {
- Isolate* isolate = Isolate::Current();
- isolate->set_relocatable_top(*reinterpret_cast<Relocatable**>(from));
- return from + ArchiveSpacePerThread();
-}
-
-
-char* Relocatable::Iterate(ObjectVisitor* v, char* thread_storage) {
- Relocatable* top = *reinterpret_cast<Relocatable**>(thread_storage);
- Iterate(v, top);
- return thread_storage + ArchiveSpacePerThread();
-}
-
-
-void Relocatable::Iterate(ObjectVisitor* v) {
- Isolate* isolate = Isolate::Current();
- Iterate(v, isolate->relocatable_top());
-}
-
-
-void Relocatable::Iterate(ObjectVisitor* v, Relocatable* top) {
- Relocatable* current = top;
- while (current != NULL) {
- current->IterateInstance(v);
- current = current->prev_;
- }
-}
-
-
-FlatStringReader::FlatStringReader(Isolate* isolate, Handle<String> str)
- : Relocatable(isolate),
- str_(str.location()),
- length_(str->length()) {
- PostGarbageCollection();
-}
-
-
-FlatStringReader::FlatStringReader(Isolate* isolate, Vector<const char> input)
- : Relocatable(isolate),
- str_(0),
- is_ascii_(true),
- length_(input.length()),
- start_(input.start()) { }
-
-
-void FlatStringReader::PostGarbageCollection() {
- if (str_ == NULL) return;
- Handle<String> str(str_);
- ASSERT(str->IsFlat());
- is_ascii_ = str->IsAsciiRepresentation();
- if (is_ascii_) {
- start_ = str->ToAsciiVector().start();
- } else {
- start_ = str->ToUC16Vector().start();
- }
-}
-
-
-void StringInputBuffer::Seek(unsigned pos) {
- Reset(pos, input_);
-}
-
-
-void SafeStringInputBuffer::Seek(unsigned pos) {
- Reset(pos, input_);
-}
-
-
-// This method determines the type of string involved and then copies
-// a whole chunk of characters into a buffer. It can be used with strings
-// that have been glued together to form a ConsString and which must cooperate
-// to fill up a buffer.
-void String::ReadBlockIntoBuffer(String* input,
- ReadBlockBuffer* rbb,
- unsigned* offset_ptr,
- unsigned max_chars) {
- ASSERT(*offset_ptr <= (unsigned)input->length());
- if (max_chars == 0) return;
-
- switch (StringShape(input).representation_tag()) {
- case kSeqStringTag:
- if (input->IsAsciiRepresentation()) {
- SeqAsciiString::cast(input)->SeqAsciiStringReadBlockIntoBuffer(rbb,
- offset_ptr,
- max_chars);
- return;
- } else {
- SeqTwoByteString::cast(input)->SeqTwoByteStringReadBlockIntoBuffer(rbb,
- offset_ptr,
- max_chars);
- return;
- }
- case kConsStringTag:
- ConsString::cast(input)->ConsStringReadBlockIntoBuffer(rbb,
- offset_ptr,
- max_chars);
- return;
- case kExternalStringTag:
- if (input->IsAsciiRepresentation()) {
- ExternalAsciiString::cast(input)->
- ExternalAsciiStringReadBlockIntoBuffer(rbb, offset_ptr, max_chars);
- } else {
- ExternalTwoByteString::cast(input)->
- ExternalTwoByteStringReadBlockIntoBuffer(rbb,
- offset_ptr,
- max_chars);
- }
- return;
- default:
- break;
- }
-
- UNREACHABLE();
- return;
-}
-
-
-const unibrow::byte* String::ReadBlock(String* input,
- unibrow::byte* util_buffer,
- unsigned capacity,
- unsigned* remaining,
- unsigned* offset_ptr) {
- ASSERT(*offset_ptr <= (unsigned)input->length());
- unsigned chars = input->length() - *offset_ptr;
- ReadBlockBuffer rbb(util_buffer, 0, capacity, 0);
- const unibrow::byte* answer = ReadBlock(input, &rbb, offset_ptr, chars);
- ASSERT(rbb.remaining <= static_cast<unsigned>(input->length()));
- *remaining = rbb.remaining;
- return answer;
-}
-
-
-const unibrow::byte* String::ReadBlock(String** raw_input,
- unibrow::byte* util_buffer,
- unsigned capacity,
- unsigned* remaining,
- unsigned* offset_ptr) {
- Handle<String> input(raw_input);
- ASSERT(*offset_ptr <= (unsigned)input->length());
- unsigned chars = input->length() - *offset_ptr;
- if (chars > capacity) chars = capacity;
- ReadBlockBuffer rbb(util_buffer, 0, capacity, 0);
- ReadBlockIntoBuffer(*input, &rbb, offset_ptr, chars);
- ASSERT(rbb.remaining <= static_cast<unsigned>(input->length()));
- *remaining = rbb.remaining;
- return rbb.util_buffer;
-}
-
-
-// This will iterate unless the block of string data spans two 'halves' of
-// a ConsString, in which case it will recurse. Since the block of string
-// data to be read has a maximum size this limits the maximum recursion
-// depth to something sane. Since C++ does not have tail call recursion
-// elimination, the iteration must be explicit.
-void ConsString::ConsStringReadBlockIntoBuffer(ReadBlockBuffer* rbb,
- unsigned* offset_ptr,
- unsigned max_chars) {
- ConsString* current = this;
- unsigned offset = *offset_ptr;
- int offset_correction = 0;
-
- while (true) {
- String* left = current->first();
- unsigned left_length = (unsigned)left->length();
- if (left_length > offset &&
- max_chars <= left_length - offset) {
- // Left hand side only - iterate unless we have reached the bottom of
- // the cons tree.
- if (StringShape(left).IsCons()) {
- current = ConsString::cast(left);
- continue;
- } else {
- String::ReadBlockIntoBuffer(left, rbb, &offset, max_chars);
- *offset_ptr = offset + offset_correction;
- return;
- }
- } else if (left_length <= offset) {
- // Right hand side only - iterate unless we have reached the bottom of
- // the cons tree.
- offset -= left_length;
- offset_correction += left_length;
- String* right = current->second();
- if (StringShape(right).IsCons()) {
- current = ConsString::cast(right);
- continue;
- } else {
- String::ReadBlockIntoBuffer(right, rbb, &offset, max_chars);
- *offset_ptr = offset + offset_correction;
- return;
- }
- } else {
- // The block to be read spans two sides of the ConsString, so we recurse.
- // First recurse on the left.
- max_chars -= left_length - offset;
- String::ReadBlockIntoBuffer(left, rbb, &offset, left_length - offset);
- // We may have reached the max or there may not have been enough space
- // in the buffer for the characters in the left hand side.
- if (offset == left_length) {
- // Recurse on the right.
- String* right = String::cast(current->second());
- offset -= left_length;
- offset_correction += left_length;
- String::ReadBlockIntoBuffer(right, rbb, &offset, max_chars);
- }
- *offset_ptr = offset + offset_correction;
- return;
- }
- }
-}
-
-
-uint16_t ConsString::ConsStringGet(int index) {
- ASSERT(index >= 0 && index < this->length());
-
- // Check for a flattened cons string
- if (second()->length() == 0) {
- String* left = first();
- return left->Get(index);
- }
-
- String* string = String::cast(this);
-
- while (true) {
- if (StringShape(string).IsCons()) {
- ConsString* cons_string = ConsString::cast(string);
- String* left = cons_string->first();
- if (left->length() > index) {
- string = left;
- } else {
- index -= left->length();
- string = cons_string->second();
- }
- } else {
- return string->Get(index);
- }
- }
-
- UNREACHABLE();
- return 0;
-}
-
-
-template <typename sinkchar>
-void String::WriteToFlat(String* src,
- sinkchar* sink,
- int f,
- int t) {
- String* source = src;
- int from = f;
- int to = t;
- while (true) {
- ASSERT(0 <= from && from <= to && to <= source->length());
- switch (StringShape(source).full_representation_tag()) {
- case kAsciiStringTag | kExternalStringTag: {
- CopyChars(sink,
- ExternalAsciiString::cast(source)->resource()->data() + from,
- to - from);
- return;
- }
- case kTwoByteStringTag | kExternalStringTag: {
- const uc16* data =
- ExternalTwoByteString::cast(source)->resource()->data();
- CopyChars(sink,
- data + from,
- to - from);
- return;
- }
- case kAsciiStringTag | kSeqStringTag: {
- CopyChars(sink,
- SeqAsciiString::cast(source)->GetChars() + from,
- to - from);
- return;
- }
- case kTwoByteStringTag | kSeqStringTag: {
- CopyChars(sink,
- SeqTwoByteString::cast(source)->GetChars() + from,
- to - from);
- return;
- }
- case kAsciiStringTag | kConsStringTag:
- case kTwoByteStringTag | kConsStringTag: {
- ConsString* cons_string = ConsString::cast(source);
- String* first = cons_string->first();
- int boundary = first->length();
- if (to - boundary >= boundary - from) {
- // Right hand side is longer. Recurse over left.
- if (from < boundary) {
- WriteToFlat(first, sink, from, boundary);
- sink += boundary - from;
- from = 0;
- } else {
- from -= boundary;
- }
- to -= boundary;
- source = cons_string->second();
- } else {
- // Left hand side is longer. Recurse over right.
- if (to > boundary) {
- String* second = cons_string->second();
- WriteToFlat(second,
- sink + boundary - from,
- 0,
- to - boundary);
- to = boundary;
- }
- source = first;
- }
- break;
- }
- }
- }
-}
-
-
-template <typename IteratorA, typename IteratorB>
-static inline bool CompareStringContents(IteratorA* ia, IteratorB* ib) {
- // General slow case check. We know that the ia and ib iterators
- // have the same length.
- while (ia->has_more()) {
- uc32 ca = ia->GetNext();
- uc32 cb = ib->GetNext();
- if (ca != cb)
- return false;
- }
- return true;
-}
-
-
-// Compares the contents of two strings by reading and comparing
-// int-sized blocks of characters.
-template <typename Char>
-static inline bool CompareRawStringContents(Vector<Char> a, Vector<Char> b) {
- int length = a.length();
- ASSERT_EQ(length, b.length());
- const Char* pa = a.start();
- const Char* pb = b.start();
- int i = 0;
-#ifndef V8_HOST_CAN_READ_UNALIGNED
- // If this architecture isn't comfortable reading unaligned ints
- // then we have to check that the strings are aligned before
- // comparing them blockwise.
- const int kAlignmentMask = sizeof(uint32_t) - 1; // NOLINT
- uint32_t pa_addr = reinterpret_cast<uint32_t>(pa);
- uint32_t pb_addr = reinterpret_cast<uint32_t>(pb);
- if (((pa_addr & kAlignmentMask) | (pb_addr & kAlignmentMask)) == 0) {
-#endif
- const int kStepSize = sizeof(int) / sizeof(Char); // NOLINT
- int endpoint = length - kStepSize;
- // Compare blocks until we reach near the end of the string.
- for (; i <= endpoint; i += kStepSize) {
- uint32_t wa = *reinterpret_cast<const uint32_t*>(pa + i);
- uint32_t wb = *reinterpret_cast<const uint32_t*>(pb + i);
- if (wa != wb) {
- return false;
- }
- }
-#ifndef V8_HOST_CAN_READ_UNALIGNED
- }
-#endif
- // Compare the remaining characters that didn't fit into a block.
- for (; i < length; i++) {
- if (a[i] != b[i]) {
- return false;
- }
- }
- return true;
-}
-
-
-template <typename IteratorA>
-static inline bool CompareStringContentsPartial(Isolate* isolate,
- IteratorA* ia,
- String* b) {
- if (b->IsFlat()) {
- if (b->IsAsciiRepresentation()) {
- VectorIterator<char> ib(b->ToAsciiVector());
- return CompareStringContents(ia, &ib);
- } else {
- VectorIterator<uc16> ib(b->ToUC16Vector());
- return CompareStringContents(ia, &ib);
- }
- } else {
- isolate->objects_string_compare_buffer_b()->Reset(0, b);
- return CompareStringContents(ia,
- isolate->objects_string_compare_buffer_b());
- }
-}
-
-
-bool String::SlowEquals(String* other) {
- // Fast check: negative check with lengths.
- int len = length();
- if (len != other->length()) return false;
- if (len == 0) return true;
-
- // Fast check: if hash code is computed for both strings
- // a fast negative check can be performed.
- if (HasHashCode() && other->HasHashCode()) {
- if (Hash() != other->Hash()) return false;
- }
-
- // We know the strings are both non-empty. Compare the first chars
- // before we try to flatten the strings.
- if (this->Get(0) != other->Get(0)) return false;
-
- String* lhs = this->TryFlattenGetString();
- String* rhs = other->TryFlattenGetString();
-
- if (StringShape(lhs).IsSequentialAscii() &&
- StringShape(rhs).IsSequentialAscii()) {
- const char* str1 = SeqAsciiString::cast(lhs)->GetChars();
- const char* str2 = SeqAsciiString::cast(rhs)->GetChars();
- return CompareRawStringContents(Vector<const char>(str1, len),
- Vector<const char>(str2, len));
- }
-
- Isolate* isolate = GetIsolate();
- if (lhs->IsFlat()) {
- if (lhs->IsAsciiRepresentation()) {
- Vector<const char> vec1 = lhs->ToAsciiVector();
- if (rhs->IsFlat()) {
- if (rhs->IsAsciiRepresentation()) {
- Vector<const char> vec2 = rhs->ToAsciiVector();
- return CompareRawStringContents(vec1, vec2);
- } else {
- VectorIterator<char> buf1(vec1);
- VectorIterator<uc16> ib(rhs->ToUC16Vector());
- return CompareStringContents(&buf1, &ib);
- }
- } else {
- VectorIterator<char> buf1(vec1);
- isolate->objects_string_compare_buffer_b()->Reset(0, rhs);
- return CompareStringContents(&buf1,
- isolate->objects_string_compare_buffer_b());
- }
- } else {
- Vector<const uc16> vec1 = lhs->ToUC16Vector();
- if (rhs->IsFlat()) {
- if (rhs->IsAsciiRepresentation()) {
- VectorIterator<uc16> buf1(vec1);
- VectorIterator<char> ib(rhs->ToAsciiVector());
- return CompareStringContents(&buf1, &ib);
- } else {
- Vector<const uc16> vec2(rhs->ToUC16Vector());
- return CompareRawStringContents(vec1, vec2);
- }
- } else {
- VectorIterator<uc16> buf1(vec1);
- isolate->objects_string_compare_buffer_b()->Reset(0, rhs);
- return CompareStringContents(&buf1,
- isolate->objects_string_compare_buffer_b());
- }
- }
- } else {
- isolate->objects_string_compare_buffer_a()->Reset(0, lhs);
- return CompareStringContentsPartial(isolate,
- isolate->objects_string_compare_buffer_a(), rhs);
- }
-}
-
-
-bool String::MarkAsUndetectable() {
- if (StringShape(this).IsSymbol()) return false;
-
- Map* map = this->map();
- Heap* heap = map->heap();
- if (map == heap->string_map()) {
- this->set_map(heap->undetectable_string_map());
- return true;
- } else if (map == heap->ascii_string_map()) {
- this->set_map(heap->undetectable_ascii_string_map());
- return true;
- }
- // Rest cannot be marked as undetectable
- return false;
-}
-
-
-bool String::IsEqualTo(Vector<const char> str) {
- Isolate* isolate = GetIsolate();
- int slen = length();
- Access<ScannerConstants::Utf8Decoder>
- decoder(isolate->scanner_constants()->utf8_decoder());
- decoder->Reset(str.start(), str.length());
- int i;
- for (i = 0; i < slen && decoder->has_more(); i++) {
- uc32 r = decoder->GetNext();
- if (Get(i) != r) return false;
- }
- return i == slen && !decoder->has_more();
-}
-
-
-bool String::IsAsciiEqualTo(Vector<const char> str) {
- int slen = length();
- if (str.length() != slen) return false;
- for (int i = 0; i < slen; i++) {
- if (Get(i) != static_cast<uint16_t>(str[i])) return false;
- }
- return true;
-}
-
-
-bool String::IsTwoByteEqualTo(Vector<const uc16> str) {
- int slen = length();
- if (str.length() != slen) return false;
- for (int i = 0; i < slen; i++) {
- if (Get(i) != str[i]) return false;
- }
- return true;
-}
-
-
-uint32_t String::ComputeAndSetHash() {
- // Should only be called if hash code has not yet been computed.
- ASSERT(!HasHashCode());
-
- const int len = length();
-
- // Compute the hash code.
- uint32_t field = 0;
- if (StringShape(this).IsSequentialAscii()) {
- field = HashSequentialString(SeqAsciiString::cast(this)->GetChars(), len);
- } else if (StringShape(this).IsSequentialTwoByte()) {
- field = HashSequentialString(SeqTwoByteString::cast(this)->GetChars(), len);
- } else {
- StringInputBuffer buffer(this);
- field = ComputeHashField(&buffer, len);
- }
-
- // Store the hash code in the object.
- set_hash_field(field);
-
- // Check the hash code is there.
- ASSERT(HasHashCode());
- uint32_t result = field >> kHashShift;
- ASSERT(result != 0); // Ensure that the hash value of 0 is never computed.
- return result;
-}
-
-
-bool String::ComputeArrayIndex(unibrow::CharacterStream* buffer,
- uint32_t* index,
- int length) {
- if (length == 0 || length > kMaxArrayIndexSize) return false;
- uc32 ch = buffer->GetNext();
-
- // If the string begins with a '0' character, it must only consist
- // of it to be a legal array index.
- if (ch == '0') {
- *index = 0;
- return length == 1;
- }
-
- // Convert string to uint32 array index; character by character.
- int d = ch - '0';
- if (d < 0 || d > 9) return false;
- uint32_t result = d;
- while (buffer->has_more()) {
- d = buffer->GetNext() - '0';
- if (d < 0 || d > 9) return false;
- // Check that the new result is below the 32 bit limit.
- if (result > 429496729U - ((d > 5) ? 1 : 0)) return false;
- result = (result * 10) + d;
- }
-
- *index = result;
- return true;
-}
-
-
-bool String::SlowAsArrayIndex(uint32_t* index) {
- if (length() <= kMaxCachedArrayIndexLength) {
- Hash(); // force computation of hash code
- uint32_t field = hash_field();
- if ((field & kIsNotArrayIndexMask) != 0) return false;
- // Isolate the array index form the full hash field.
- *index = (kArrayIndexHashMask & field) >> kHashShift;
- return true;
- } else {
- StringInputBuffer buffer(this);
- return ComputeArrayIndex(&buffer, index, length());
- }
-}
-
-
-uint32_t StringHasher::MakeArrayIndexHash(uint32_t value, int length) {
- // For array indexes mix the length into the hash as an array index could
- // be zero.
- ASSERT(length > 0);
- ASSERT(length <= String::kMaxArrayIndexSize);
- ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
- (1 << String::kArrayIndexValueBits));
-
- value <<= String::kHashShift;
- value |= length << String::kArrayIndexHashLengthShift;
-
- ASSERT((value & String::kIsNotArrayIndexMask) == 0);
- ASSERT((length > String::kMaxCachedArrayIndexLength) ||
- (value & String::kContainsCachedArrayIndexMask) == 0);
- return value;
-}
-
-
-uint32_t StringHasher::GetHashField() {
- ASSERT(is_valid());
- if (length_ <= String::kMaxHashCalcLength) {
- if (is_array_index()) {
- return MakeArrayIndexHash(array_index(), length_);
- }
- return (GetHash() << String::kHashShift) | String::kIsNotArrayIndexMask;
- } else {
- return (length_ << String::kHashShift) | String::kIsNotArrayIndexMask;
- }
-}
-
-
-uint32_t String::ComputeHashField(unibrow::CharacterStream* buffer,
- int length) {
- StringHasher hasher(length);
-
- // Very long strings have a trivial hash that doesn't inspect the
- // string contents.
- if (hasher.has_trivial_hash()) {
- return hasher.GetHashField();
- }
-
- // Do the iterative array index computation as long as there is a
- // chance this is an array index.
- while (buffer->has_more() && hasher.is_array_index()) {
- hasher.AddCharacter(buffer->GetNext());
- }
-
- // Process the remaining characters without updating the array
- // index.
- while (buffer->has_more()) {
- hasher.AddCharacterNoIndex(buffer->GetNext());
- }
-
- return hasher.GetHashField();
-}
-
-
-MaybeObject* String::SubString(int start, int end, PretenureFlag pretenure) {
- Heap* heap = GetHeap();
- if (start == 0 && end == length()) return this;
- MaybeObject* result = heap->AllocateSubString(this, start, end, pretenure);
- return result;
-}
-
-
-void String::PrintOn(FILE* file) {
- int length = this->length();
- for (int i = 0; i < length; i++) {
- fprintf(file, "%c", Get(i));
- }
-}
-
-
-void Map::CreateBackPointers() {
- DescriptorArray* descriptors = instance_descriptors();
- for (int i = 0; i < descriptors->number_of_descriptors(); i++) {
- if (descriptors->GetType(i) == MAP_TRANSITION ||
- descriptors->GetType(i) == EXTERNAL_ARRAY_TRANSITION ||
- descriptors->GetType(i) == CONSTANT_TRANSITION) {
- // Get target.
- Map* target = Map::cast(descriptors->GetValue(i));
-#ifdef DEBUG
- // Verify target.
- Object* source_prototype = prototype();
- Object* target_prototype = target->prototype();
- ASSERT(source_prototype->IsJSObject() ||
- source_prototype->IsMap() ||
- source_prototype->IsNull());
- ASSERT(target_prototype->IsJSObject() ||
- target_prototype->IsNull());
- ASSERT(source_prototype->IsMap() ||
- source_prototype == target_prototype);
-#endif
- // Point target back to source. set_prototype() will not let us set
- // the prototype to a map, as we do here.
- *RawField(target, kPrototypeOffset) = this;
- }
- }
-}
-
-
-void Map::ClearNonLiveTransitions(Heap* heap, Object* real_prototype) {
- // Live DescriptorArray objects will be marked, so we must use
- // low-level accessors to get and modify their data.
- DescriptorArray* d = reinterpret_cast<DescriptorArray*>(
- *RawField(this, Map::kInstanceDescriptorsOffset));
- if (d == heap->raw_unchecked_empty_descriptor_array()) return;
- Smi* NullDescriptorDetails =
- PropertyDetails(NONE, NULL_DESCRIPTOR).AsSmi();
- FixedArray* contents = reinterpret_cast<FixedArray*>(
- d->get(DescriptorArray::kContentArrayIndex));
- ASSERT(contents->length() >= 2);
- for (int i = 0; i < contents->length(); i += 2) {
- // If the pair (value, details) is a map transition,
- // check if the target is live. If not, null the descriptor.
- // Also drop the back pointer for that map transition, so that this
- // map is not reached again by following a back pointer from a
- // non-live object.
- PropertyDetails details(Smi::cast(contents->get(i + 1)));
- if (details.type() == MAP_TRANSITION ||
- details.type() == EXTERNAL_ARRAY_TRANSITION ||
- details.type() == CONSTANT_TRANSITION) {
- Map* target = reinterpret_cast<Map*>(contents->get(i));
- ASSERT(target->IsHeapObject());
- if (!target->IsMarked()) {
- ASSERT(target->IsMap());
- contents->set_unchecked(i + 1, NullDescriptorDetails);
- contents->set_null_unchecked(heap, i);
- ASSERT(target->prototype() == this ||
- target->prototype() == real_prototype);
- // Getter prototype() is read-only, set_prototype() has side effects.
- *RawField(target, Map::kPrototypeOffset) = real_prototype;
- }
- }
- }
-}
-
-
-void JSFunction::JSFunctionIterateBody(int object_size, ObjectVisitor* v) {
- // Iterate over all fields in the body but take care in dealing with
- // the code entry.
- IteratePointers(v, kPropertiesOffset, kCodeEntryOffset);
- v->VisitCodeEntry(this->address() + kCodeEntryOffset);
- IteratePointers(v, kCodeEntryOffset + kPointerSize, object_size);
-}
-
-
-void JSFunction::MarkForLazyRecompilation() {
- ASSERT(is_compiled() && !IsOptimized());
- ASSERT(shared()->allows_lazy_compilation() ||
- code()->optimizable());
- Builtins* builtins = GetIsolate()->builtins();
- ReplaceCode(builtins->builtin(Builtins::kLazyRecompile));
-}
-
-
-uint32_t JSFunction::SourceHash() {
- uint32_t hash = 0;
- Object* script = shared()->script();
- if (!script->IsUndefined()) {
- Object* source = Script::cast(script)->source();
- if (source->IsUndefined()) hash = String::cast(source)->Hash();
- }
- hash ^= ComputeIntegerHash(shared()->start_position_and_type());
- hash += ComputeIntegerHash(shared()->end_position());
- return hash;
-}
-
-
-bool JSFunction::IsInlineable() {
- if (IsBuiltin()) return false;
- SharedFunctionInfo* shared_info = shared();
- // Check that the function has a script associated with it.
- if (!shared_info->script()->IsScript()) return false;
- if (shared_info->optimization_disabled()) return false;
- Code* code = shared_info->code();
- if (code->kind() == Code::OPTIMIZED_FUNCTION) return true;
- // If we never ran this (unlikely) then lets try to optimize it.
- if (code->kind() != Code::FUNCTION) return true;
- return code->optimizable();
-}
-
-
-Object* JSFunction::SetInstancePrototype(Object* value) {
- ASSERT(value->IsJSObject());
- Heap* heap = GetHeap();
- if (has_initial_map()) {
- initial_map()->set_prototype(value);
- } else {
- // Put the value in the initial map field until an initial map is
- // needed. At that point, a new initial map is created and the
- // prototype is put into the initial map where it belongs.
- set_prototype_or_initial_map(value);
- }
- heap->ClearInstanceofCache();
- return value;
-}
-
-
-MaybeObject* JSFunction::SetPrototype(Object* value) {
- ASSERT(should_have_prototype());
- Object* construct_prototype = value;
-
- // If the value is not a JSObject, store the value in the map's
- // constructor field so it can be accessed. Also, set the prototype
- // used for constructing objects to the original object prototype.
- // See ECMA-262 13.2.2.
- if (!value->IsJSObject()) {
- // Copy the map so this does not affect unrelated functions.
- // Remove map transitions because they point to maps with a
- // different prototype.
- Object* new_object;
- { MaybeObject* maybe_new_map = map()->CopyDropTransitions();
- if (!maybe_new_map->ToObject(&new_object)) return maybe_new_map;
- }
- Map* new_map = Map::cast(new_object);
- Heap* heap = new_map->heap();
- set_map(new_map);
- new_map->set_constructor(value);
- new_map->set_non_instance_prototype(true);
- construct_prototype =
- heap->isolate()->context()->global_context()->
- initial_object_prototype();
- } else {
- map()->set_non_instance_prototype(false);
- }
-
- return SetInstancePrototype(construct_prototype);
-}
-
-
-Object* JSFunction::RemovePrototype() {
- Context* global_context = context()->global_context();
- Map* no_prototype_map = shared()->strict_mode()
- ? global_context->strict_mode_function_without_prototype_map()
- : global_context->function_without_prototype_map();
-
- if (map() == no_prototype_map) {
- // Be idempotent.
- return this;
- }
-
- ASSERT(!shared()->strict_mode() ||
- map() == global_context->strict_mode_function_map());
- ASSERT(shared()->strict_mode() || map() == global_context->function_map());
-
- set_map(no_prototype_map);
- set_prototype_or_initial_map(no_prototype_map->heap()->the_hole_value());
- return this;
-}
-
-
-Object* JSFunction::SetInstanceClassName(String* name) {
- shared()->set_instance_class_name(name);
- return this;
-}
-
-
-void JSFunction::PrintName(FILE* out) {
- SmartPointer<char> name = shared()->DebugName()->ToCString();
- PrintF(out, "%s", *name);
-}
-
-
-Context* JSFunction::GlobalContextFromLiterals(FixedArray* literals) {
- return Context::cast(literals->get(JSFunction::kLiteralGlobalContextIndex));
-}
-
-
-MaybeObject* Oddball::Initialize(const char* to_string,
- Object* to_number,
- byte kind) {
- Object* symbol;
- { MaybeObject* maybe_symbol =
- Isolate::Current()->heap()->LookupAsciiSymbol(to_string);
- if (!maybe_symbol->ToObject(&symbol)) return maybe_symbol;
- }
- set_to_string(String::cast(symbol));
- set_to_number(to_number);
- set_kind(kind);
- return this;
-}
-
-
-String* SharedFunctionInfo::DebugName() {
- Object* n = name();
- if (!n->IsString() || String::cast(n)->length() == 0) return inferred_name();
- return String::cast(n);
-}
-
-
-bool SharedFunctionInfo::HasSourceCode() {
- return !script()->IsUndefined() &&
- !reinterpret_cast<Script*>(script())->source()->IsUndefined();
-}
-
-
-Object* SharedFunctionInfo::GetSourceCode() {
- Isolate* isolate = GetIsolate();
- if (!HasSourceCode()) return isolate->heap()->undefined_value();
- HandleScope scope(isolate);
- Object* source = Script::cast(script())->source();
- return *SubString(Handle<String>(String::cast(source), isolate),
- start_position(), end_position());
-}
-
-
-int SharedFunctionInfo::SourceSize() {
- return end_position() - start_position();
-}
-
-
-int SharedFunctionInfo::CalculateInstanceSize() {
- int instance_size =
- JSObject::kHeaderSize +
- expected_nof_properties() * kPointerSize;
- if (instance_size > JSObject::kMaxInstanceSize) {
- instance_size = JSObject::kMaxInstanceSize;
- }
- return instance_size;
-}
-
-
-int SharedFunctionInfo::CalculateInObjectProperties() {
- return (CalculateInstanceSize() - JSObject::kHeaderSize) / kPointerSize;
-}
-
-
-bool SharedFunctionInfo::CanGenerateInlineConstructor(Object* prototype) {
- // Check the basic conditions for generating inline constructor code.
- if (!FLAG_inline_new
- || !has_only_simple_this_property_assignments()
- || this_property_assignments_count() == 0) {
- return false;
- }
-
- // If the prototype is null inline constructors cause no problems.
- if (!prototype->IsJSObject()) {
- ASSERT(prototype->IsNull());
- return true;
- }
-
- Heap* heap = GetHeap();
-
- // Traverse the proposed prototype chain looking for setters for properties of
- // the same names as are set by the inline constructor.
- for (Object* obj = prototype;
- obj != heap->null_value();
- obj = obj->GetPrototype()) {
- JSObject* js_object = JSObject::cast(obj);
- for (int i = 0; i < this_property_assignments_count(); i++) {
- LookupResult result;
- String* name = GetThisPropertyAssignmentName(i);
- js_object->LocalLookupRealNamedProperty(name, &result);
- if (result.IsProperty() && result.type() == CALLBACKS) {
- return false;
- }
- }
- }
-
- return true;
-}
-
-
-void SharedFunctionInfo::ForbidInlineConstructor() {
- set_compiler_hints(BooleanBit::set(compiler_hints(),
- kHasOnlySimpleThisPropertyAssignments,
- false));
-}
-
-
-void SharedFunctionInfo::SetThisPropertyAssignmentsInfo(
- bool only_simple_this_property_assignments,
- FixedArray* assignments) {
- set_compiler_hints(BooleanBit::set(compiler_hints(),
- kHasOnlySimpleThisPropertyAssignments,
- only_simple_this_property_assignments));
- set_this_property_assignments(assignments);
- set_this_property_assignments_count(assignments->length() / 3);
-}
-
-
-void SharedFunctionInfo::ClearThisPropertyAssignmentsInfo() {
- Heap* heap = GetHeap();
- set_compiler_hints(BooleanBit::set(compiler_hints(),
- kHasOnlySimpleThisPropertyAssignments,
- false));
- set_this_property_assignments(heap->undefined_value());
- set_this_property_assignments_count(0);
-}
-
-
-String* SharedFunctionInfo::GetThisPropertyAssignmentName(int index) {
- Object* obj = this_property_assignments();
- ASSERT(obj->IsFixedArray());
- ASSERT(index < this_property_assignments_count());
- obj = FixedArray::cast(obj)->get(index * 3);
- ASSERT(obj->IsString());
- return String::cast(obj);
-}
-
-
-bool SharedFunctionInfo::IsThisPropertyAssignmentArgument(int index) {
- Object* obj = this_property_assignments();
- ASSERT(obj->IsFixedArray());
- ASSERT(index < this_property_assignments_count());
- obj = FixedArray::cast(obj)->get(index * 3 + 1);
- return Smi::cast(obj)->value() != -1;
-}
-
-
-int SharedFunctionInfo::GetThisPropertyAssignmentArgument(int index) {
- ASSERT(IsThisPropertyAssignmentArgument(index));
- Object* obj =
- FixedArray::cast(this_property_assignments())->get(index * 3 + 1);
- return Smi::cast(obj)->value();
-}
-
-
-Object* SharedFunctionInfo::GetThisPropertyAssignmentConstant(int index) {
- ASSERT(!IsThisPropertyAssignmentArgument(index));
- Object* obj =
- FixedArray::cast(this_property_assignments())->get(index * 3 + 2);
- return obj;
-}
-
-
-// Support function for printing the source code to a StringStream
-// without any allocation in the heap.
-void SharedFunctionInfo::SourceCodePrint(StringStream* accumulator,
- int max_length) {
- // For some native functions there is no source.
- if (!HasSourceCode()) {
- accumulator->Add("<No Source>");
- return;
- }
-
- // Get the source for the script which this function came from.
- // Don't use String::cast because we don't want more assertion errors while
- // we are already creating a stack dump.
- String* script_source =
- reinterpret_cast<String*>(Script::cast(script())->source());
-
- if (!script_source->LooksValid()) {
- accumulator->Add("<Invalid Source>");
- return;
- }
-
- if (!is_toplevel()) {
- accumulator->Add("function ");
- Object* name = this->name();
- if (name->IsString() && String::cast(name)->length() > 0) {
- accumulator->PrintName(name);
- }
- }
-
- int len = end_position() - start_position();
- if (len <= max_length || max_length < 0) {
- accumulator->Put(script_source, start_position(), end_position());
- } else {
- accumulator->Put(script_source,
- start_position(),
- start_position() + max_length);
- accumulator->Add("...\n");
- }
-}
-
-
-static bool IsCodeEquivalent(Code* code, Code* recompiled) {
- if (code->instruction_size() != recompiled->instruction_size()) return false;
- ByteArray* code_relocation = code->relocation_info();
- ByteArray* recompiled_relocation = recompiled->relocation_info();
- int length = code_relocation->length();
- if (length != recompiled_relocation->length()) return false;
- int compare = memcmp(code_relocation->GetDataStartAddress(),
- recompiled_relocation->GetDataStartAddress(),
- length);
- return compare == 0;
-}
-
-
-void SharedFunctionInfo::EnableDeoptimizationSupport(Code* recompiled) {
- ASSERT(!has_deoptimization_support());
- AssertNoAllocation no_allocation;
- Code* code = this->code();
- if (IsCodeEquivalent(code, recompiled)) {
- // Copy the deoptimization data from the recompiled code.
- code->set_deoptimization_data(recompiled->deoptimization_data());
- code->set_has_deoptimization_support(true);
- } else {
- // TODO(3025757): In case the recompiled isn't equivalent to the
- // old code, we have to replace it. We should try to avoid this
- // altogether because it flushes valuable type feedback by
- // effectively resetting all IC state.
- set_code(recompiled);
- }
- ASSERT(has_deoptimization_support());
-}
-
-
-bool SharedFunctionInfo::VerifyBailoutId(int id) {
- // TODO(srdjan): debugging ARM crashes in hydrogen. OK to disable while
- // we are always bailing out on ARM.
-
- ASSERT(id != AstNode::kNoNumber);
- Code* unoptimized = code();
- DeoptimizationOutputData* data =
- DeoptimizationOutputData::cast(unoptimized->deoptimization_data());
- unsigned ignore = Deoptimizer::GetOutputInfo(data, id, this);
- USE(ignore);
- return true; // Return true if there was no ASSERT.
-}
-
-
-void SharedFunctionInfo::StartInobjectSlackTracking(Map* map) {
- ASSERT(!IsInobjectSlackTrackingInProgress());
-
- // Only initiate the tracking the first time.
- if (live_objects_may_exist()) return;
- set_live_objects_may_exist(true);
-
- // No tracking during the snapshot construction phase.
- if (Serializer::enabled()) return;
-
- if (map->unused_property_fields() == 0) return;
-
- // Nonzero counter is a leftover from the previous attempt interrupted
- // by GC, keep it.
- if (construction_count() == 0) {
- set_construction_count(kGenerousAllocationCount);
- }
- set_initial_map(map);
- Builtins* builtins = map->heap()->isolate()->builtins();
- ASSERT_EQ(builtins->builtin(Builtins::kJSConstructStubGeneric),
- construct_stub());
- set_construct_stub(builtins->builtin(Builtins::kJSConstructStubCountdown));
-}
-
-
-// Called from GC, hence reinterpret_cast and unchecked accessors.
-void SharedFunctionInfo::DetachInitialMap() {
- Map* map = reinterpret_cast<Map*>(initial_map());
-
- // Make the map remember to restore the link if it survives the GC.
- map->set_bit_field2(
- map->bit_field2() | (1 << Map::kAttachedToSharedFunctionInfo));
-
- // Undo state changes made by StartInobjectTracking (except the
- // construction_count). This way if the initial map does not survive the GC
- // then StartInobjectTracking will be called again the next time the
- // constructor is called. The countdown will continue and (possibly after
- // several more GCs) CompleteInobjectSlackTracking will eventually be called.
- set_initial_map(map->heap()->raw_unchecked_undefined_value());
- Builtins* builtins = map->heap()->isolate()->builtins();
- ASSERT_EQ(builtins->builtin(Builtins::kJSConstructStubCountdown),
- *RawField(this, kConstructStubOffset));
- set_construct_stub(builtins->builtin(Builtins::kJSConstructStubGeneric));
- // It is safe to clear the flag: it will be set again if the map is live.
- set_live_objects_may_exist(false);
-}
-
-
-// Called from GC, hence reinterpret_cast and unchecked accessors.
-void SharedFunctionInfo::AttachInitialMap(Map* map) {
- map->set_bit_field2(
- map->bit_field2() & ~(1 << Map::kAttachedToSharedFunctionInfo));
-
- // Resume inobject slack tracking.
- set_initial_map(map);
- Builtins* builtins = map->heap()->isolate()->builtins();
- ASSERT_EQ(builtins->builtin(Builtins::kJSConstructStubGeneric),
- *RawField(this, kConstructStubOffset));
- set_construct_stub(builtins->builtin(Builtins::kJSConstructStubCountdown));
- // The map survived the gc, so there may be objects referencing it.
- set_live_objects_may_exist(true);
-}
-
-
-static void GetMinInobjectSlack(Map* map, void* data) {
- int slack = map->unused_property_fields();
- if (*reinterpret_cast<int*>(data) > slack) {
- *reinterpret_cast<int*>(data) = slack;
- }
-}
-
-
-static void ShrinkInstanceSize(Map* map, void* data) {
- int slack = *reinterpret_cast<int*>(data);
- map->set_inobject_properties(map->inobject_properties() - slack);
- map->set_unused_property_fields(map->unused_property_fields() - slack);
- map->set_instance_size(map->instance_size() - slack * kPointerSize);
-
- // Visitor id might depend on the instance size, recalculate it.
- map->set_visitor_id(StaticVisitorBase::GetVisitorId(map));
-}
-
-
-void SharedFunctionInfo::CompleteInobjectSlackTracking() {
- ASSERT(live_objects_may_exist() && IsInobjectSlackTrackingInProgress());
- Map* map = Map::cast(initial_map());
-
- Heap* heap = map->heap();
- set_initial_map(heap->undefined_value());
- Builtins* builtins = heap->isolate()->builtins();
- ASSERT_EQ(builtins->builtin(Builtins::kJSConstructStubCountdown),
- construct_stub());
- set_construct_stub(builtins->builtin(Builtins::kJSConstructStubGeneric));
-
- int slack = map->unused_property_fields();
- map->TraverseTransitionTree(&GetMinInobjectSlack, &slack);
- if (slack != 0) {
- // Resize the initial map and all maps in its transition tree.
- map->TraverseTransitionTree(&ShrinkInstanceSize, &slack);
- // Give the correct expected_nof_properties to initial maps created later.
- ASSERT(expected_nof_properties() >= slack);
- set_expected_nof_properties(expected_nof_properties() - slack);
- }
-}
-
-
-void ObjectVisitor::VisitCodeTarget(RelocInfo* rinfo) {
- ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
- Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
- Object* old_target = target;
- VisitPointer(&target);
- CHECK_EQ(target, old_target); // VisitPointer doesn't change Code* *target.
-}
-
-
-void ObjectVisitor::VisitCodeEntry(Address entry_address) {
- Object* code = Code::GetObjectFromEntryAddress(entry_address);
- Object* old_code = code;
- VisitPointer(&code);
- if (code != old_code) {
- Memory::Address_at(entry_address) = reinterpret_cast<Code*>(code)->entry();
- }
-}
-
-
-void ObjectVisitor::VisitGlobalPropertyCell(RelocInfo* rinfo) {
- ASSERT(rinfo->rmode() == RelocInfo::GLOBAL_PROPERTY_CELL);
- Object* cell = rinfo->target_cell();
- Object* old_cell = cell;
- VisitPointer(&cell);
- if (cell != old_cell) {
- rinfo->set_target_cell(reinterpret_cast<JSGlobalPropertyCell*>(cell));
- }
-}
-
-
-void ObjectVisitor::VisitDebugTarget(RelocInfo* rinfo) {
- ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
- rinfo->IsPatchedReturnSequence()) ||
- (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
- rinfo->IsPatchedDebugBreakSlotSequence()));
- Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
- Object* old_target = target;
- VisitPointer(&target);
- CHECK_EQ(target, old_target); // VisitPointer doesn't change Code* *target.
-}
-
-
-void Code::InvalidateRelocation() {
- set_relocation_info(heap()->empty_byte_array());
-}
-
-
-void Code::Relocate(intptr_t delta) {
- for (RelocIterator it(this, RelocInfo::kApplyMask); !it.done(); it.next()) {
- it.rinfo()->apply(delta);
- }
- CPU::FlushICache(instruction_start(), instruction_size());
-}
-
-
-void Code::CopyFrom(const CodeDesc& desc) {
- // copy code
- memmove(instruction_start(), desc.buffer, desc.instr_size);
-
- // copy reloc info
- memmove(relocation_start(),
- desc.buffer + desc.buffer_size - desc.reloc_size,
- desc.reloc_size);
-
- // unbox handles and relocate
- intptr_t delta = instruction_start() - desc.buffer;
- int mode_mask = RelocInfo::kCodeTargetMask |
- RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
- RelocInfo::ModeMask(RelocInfo::GLOBAL_PROPERTY_CELL) |
- RelocInfo::kApplyMask;
- Assembler* origin = desc.origin; // Needed to find target_object on X64.
- for (RelocIterator it(this, mode_mask); !it.done(); it.next()) {
- RelocInfo::Mode mode = it.rinfo()->rmode();
- if (mode == RelocInfo::EMBEDDED_OBJECT) {
- Handle<Object> p = it.rinfo()->target_object_handle(origin);
- it.rinfo()->set_target_object(*p);
- } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
- Handle<JSGlobalPropertyCell> cell = it.rinfo()->target_cell_handle();
- it.rinfo()->set_target_cell(*cell);
- } else if (RelocInfo::IsCodeTarget(mode)) {
- // rewrite code handles in inline cache targets to direct
- // pointers to the first instruction in the code object
- Handle<Object> p = it.rinfo()->target_object_handle(origin);
- Code* code = Code::cast(*p);
- it.rinfo()->set_target_address(code->instruction_start());
- } else {
- it.rinfo()->apply(delta);
- }
- }
- CPU::FlushICache(instruction_start(), instruction_size());
-}
-
-
-// Locate the source position which is closest to the address in the code. This
-// is using the source position information embedded in the relocation info.
-// The position returned is relative to the beginning of the script where the
-// source for this function is found.
-int Code::SourcePosition(Address pc) {
- int distance = kMaxInt;
- int position = RelocInfo::kNoPosition; // Initially no position found.
- // Run through all the relocation info to find the best matching source
- // position. All the code needs to be considered as the sequence of the
- // instructions in the code does not necessarily follow the same order as the
- // source.
- RelocIterator it(this, RelocInfo::kPositionMask);
- while (!it.done()) {
- // Only look at positions after the current pc.
- if (it.rinfo()->pc() < pc) {
- // Get position and distance.
-
- int dist = static_cast<int>(pc - it.rinfo()->pc());
- int pos = static_cast<int>(it.rinfo()->data());
- // If this position is closer than the current candidate or if it has the
- // same distance as the current candidate and the position is higher then
- // this position is the new candidate.
- if ((dist < distance) ||
- (dist == distance && pos > position)) {
- position = pos;
- distance = dist;
- }
- }
- it.next();
- }
- return position;
-}
-
-
-// Same as Code::SourcePosition above except it only looks for statement
-// positions.
-int Code::SourceStatementPosition(Address pc) {
- // First find the position as close as possible using all position
- // information.
- int position = SourcePosition(pc);
- // Now find the closest statement position before the position.
- int statement_position = 0;
- RelocIterator it(this, RelocInfo::kPositionMask);
- while (!it.done()) {
- if (RelocInfo::IsStatementPosition(it.rinfo()->rmode())) {
- int p = static_cast<int>(it.rinfo()->data());
- if (statement_position < p && p <= position) {
- statement_position = p;
- }
- }
- it.next();
- }
- return statement_position;
-}
-
-
-SafepointEntry Code::GetSafepointEntry(Address pc) {
- SafepointTable table(this);
- return table.FindEntry(pc);
-}
-
-
-void Code::SetNoStackCheckTable() {
- // Indicate the absence of a stack-check table by a table start after the
- // end of the instructions. Table start must be aligned, so round up.
- set_stack_check_table_offset(RoundUp(instruction_size(), kIntSize));
-}
-
-
-Map* Code::FindFirstMap() {
- ASSERT(is_inline_cache_stub());
- AssertNoAllocation no_allocation;
- int mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
- for (RelocIterator it(this, mask); !it.done(); it.next()) {
- RelocInfo* info = it.rinfo();
- Object* object = info->target_object();
- if (object->IsMap()) return Map::cast(object);
- }
- return NULL;
-}
-
-
-#ifdef ENABLE_DISASSEMBLER
-
-#ifdef OBJECT_PRINT
-
-void DeoptimizationInputData::DeoptimizationInputDataPrint(FILE* out) {
- disasm::NameConverter converter;
- int deopt_count = DeoptCount();
- PrintF(out, "Deoptimization Input Data (deopt points = %d)\n", deopt_count);
- if (0 == deopt_count) return;
-
- PrintF(out, "%6s %6s %6s %12s\n", "index", "ast id", "argc", "commands");
- for (int i = 0; i < deopt_count; i++) {
- int command_count = 0;
- PrintF(out, "%6d %6d %6d",
- i, AstId(i)->value(), ArgumentsStackHeight(i)->value());
- int translation_index = TranslationIndex(i)->value();
- TranslationIterator iterator(TranslationByteArray(), translation_index);
- Translation::Opcode opcode =
- static_cast<Translation::Opcode>(iterator.Next());
- ASSERT(Translation::BEGIN == opcode);
- int frame_count = iterator.Next();
- if (FLAG_print_code_verbose) {
- PrintF(out, " %s {count=%d}\n", Translation::StringFor(opcode),
- frame_count);
- }
-
- for (int i = 0; i < frame_count; ++i) {
- opcode = static_cast<Translation::Opcode>(iterator.Next());
- ASSERT(Translation::FRAME == opcode);
- int ast_id = iterator.Next();
- int function_id = iterator.Next();
- JSFunction* function =
- JSFunction::cast(LiteralArray()->get(function_id));
- unsigned height = iterator.Next();
- if (FLAG_print_code_verbose) {
- PrintF(out, "%24s %s {ast_id=%d, function=",
- "", Translation::StringFor(opcode), ast_id);
- function->PrintName(out);
- PrintF(out, ", height=%u}\n", height);
- }
-
- // Size of translation is height plus all incoming arguments including
- // receiver.
- int size = height + function->shared()->formal_parameter_count() + 1;
- command_count += size;
- for (int j = 0; j < size; ++j) {
- opcode = static_cast<Translation::Opcode>(iterator.Next());
- if (FLAG_print_code_verbose) {
- PrintF(out, "%24s %s ", "", Translation::StringFor(opcode));
- }
-
- if (opcode == Translation::DUPLICATE) {
- opcode = static_cast<Translation::Opcode>(iterator.Next());
- if (FLAG_print_code_verbose) {
- PrintF(out, "%s ", Translation::StringFor(opcode));
- }
- --j; // Two commands share the same frame index.
- }
-
- switch (opcode) {
- case Translation::BEGIN:
- case Translation::FRAME:
- case Translation::DUPLICATE:
- UNREACHABLE();
- break;
-
- case Translation::REGISTER: {
- int reg_code = iterator.Next();
- if (FLAG_print_code_verbose) {
- PrintF(out, "{input=%s}", converter.NameOfCPURegister(reg_code));
- }
- break;
- }
-
- case Translation::INT32_REGISTER: {
- int reg_code = iterator.Next();
- if (FLAG_print_code_verbose) {
- PrintF(out, "{input=%s}", converter.NameOfCPURegister(reg_code));
- }
- break;
- }
-
- case Translation::DOUBLE_REGISTER: {
- int reg_code = iterator.Next();
- if (FLAG_print_code_verbose) {
- PrintF(out, "{input=%s}",
- DoubleRegister::AllocationIndexToString(reg_code));
- }
- break;
- }
-
- case Translation::STACK_SLOT: {
- int input_slot_index = iterator.Next();
- if (FLAG_print_code_verbose) {
- PrintF(out, "{input=%d}", input_slot_index);
- }
- break;
- }
-
- case Translation::INT32_STACK_SLOT: {
- int input_slot_index = iterator.Next();
- if (FLAG_print_code_verbose) {
- PrintF(out, "{input=%d}", input_slot_index);
- }
- break;
- }
-
- case Translation::DOUBLE_STACK_SLOT: {
- int input_slot_index = iterator.Next();
- if (FLAG_print_code_verbose) {
- PrintF(out, "{input=%d}", input_slot_index);
- }
- break;
- }
-
- case Translation::LITERAL: {
- unsigned literal_index = iterator.Next();
- if (FLAG_print_code_verbose) {
- PrintF(out, "{literal_id=%u}", literal_index);
- }
- break;
- }
-
- case Translation::ARGUMENTS_OBJECT:
- break;
- }
- if (FLAG_print_code_verbose) PrintF(out, "\n");
- }
- }
- if (!FLAG_print_code_verbose) PrintF(out, " %12d\n", command_count);
- }
-}
-
-
-void DeoptimizationOutputData::DeoptimizationOutputDataPrint(FILE* out) {
- PrintF(out, "Deoptimization Output Data (deopt points = %d)\n",
- this->DeoptPoints());
- if (this->DeoptPoints() == 0) return;
-
- PrintF("%6s %8s %s\n", "ast id", "pc", "state");
- for (int i = 0; i < this->DeoptPoints(); i++) {
- int pc_and_state = this->PcAndState(i)->value();
- PrintF("%6d %8d %s\n",
- this->AstId(i)->value(),
- FullCodeGenerator::PcField::decode(pc_and_state),
- FullCodeGenerator::State2String(
- FullCodeGenerator::StateField::decode(pc_and_state)));
- }
-}
-
-#endif
-
-
-// Identify kind of code.
-const char* Code::Kind2String(Kind kind) {
- switch (kind) {
- case FUNCTION: return "FUNCTION";
- case OPTIMIZED_FUNCTION: return "OPTIMIZED_FUNCTION";
- case STUB: return "STUB";
- case BUILTIN: return "BUILTIN";
- case LOAD_IC: return "LOAD_IC";
- case KEYED_LOAD_IC: return "KEYED_LOAD_IC";
- case KEYED_EXTERNAL_ARRAY_LOAD_IC: return "KEYED_EXTERNAL_ARRAY_LOAD_IC";
- case STORE_IC: return "STORE_IC";
- case KEYED_STORE_IC: return "KEYED_STORE_IC";
- case KEYED_EXTERNAL_ARRAY_STORE_IC: return "KEYED_EXTERNAL_ARRAY_STORE_IC";
- case CALL_IC: return "CALL_IC";
- case KEYED_CALL_IC: return "KEYED_CALL_IC";
- case BINARY_OP_IC: return "BINARY_OP_IC";
- case TYPE_RECORDING_BINARY_OP_IC: return "TYPE_RECORDING_BINARY_OP_IC";
- case COMPARE_IC: return "COMPARE_IC";
- }
- UNREACHABLE();
- return NULL;
-}
-
-
-const char* Code::ICState2String(InlineCacheState state) {
- switch (state) {
- case UNINITIALIZED: return "UNINITIALIZED";
- case PREMONOMORPHIC: return "PREMONOMORPHIC";
- case MONOMORPHIC: return "MONOMORPHIC";
- case MONOMORPHIC_PROTOTYPE_FAILURE: return "MONOMORPHIC_PROTOTYPE_FAILURE";
- case MEGAMORPHIC: return "MEGAMORPHIC";
- case DEBUG_BREAK: return "DEBUG_BREAK";
- case DEBUG_PREPARE_STEP_IN: return "DEBUG_PREPARE_STEP_IN";
- }
- UNREACHABLE();
- return NULL;
-}
-
-
-const char* Code::PropertyType2String(PropertyType type) {
- switch (type) {
- case NORMAL: return "NORMAL";
- case FIELD: return "FIELD";
- case CONSTANT_FUNCTION: return "CONSTANT_FUNCTION";
- case CALLBACKS: return "CALLBACKS";
- case INTERCEPTOR: return "INTERCEPTOR";
- case MAP_TRANSITION: return "MAP_TRANSITION";
- case EXTERNAL_ARRAY_TRANSITION: return "EXTERNAL_ARRAY_TRANSITION";
- case CONSTANT_TRANSITION: return "CONSTANT_TRANSITION";
- case NULL_DESCRIPTOR: return "NULL_DESCRIPTOR";
- }
- UNREACHABLE();
- return NULL;
-}
-
-
-void Code::PrintExtraICState(FILE* out, Kind kind, ExtraICState extra) {
- const char* name = NULL;
- switch (kind) {
- case CALL_IC:
- if (extra == STRING_INDEX_OUT_OF_BOUNDS) {
- name = "STRING_INDEX_OUT_OF_BOUNDS";
- }
- break;
- case STORE_IC:
- case KEYED_STORE_IC:
- if (extra == kStrictMode) {
- name = "STRICT";
- }
- break;
- default:
- break;
- }
- if (name != NULL) {
- PrintF(out, "extra_ic_state = %s\n", name);
- } else {
- PrintF(out, "etra_ic_state = %d\n", extra);
- }
-}
-
-
-void Code::Disassemble(const char* name, FILE* out) {
- PrintF(out, "kind = %s\n", Kind2String(kind()));
- if (is_inline_cache_stub()) {
- PrintF(out, "ic_state = %s\n", ICState2String(ic_state()));
- PrintExtraICState(out, kind(), extra_ic_state());
- PrintF(out, "ic_in_loop = %d\n", ic_in_loop() == IN_LOOP);
- if (ic_state() == MONOMORPHIC) {
- PrintF(out, "type = %s\n", PropertyType2String(type()));
- }
- }
- if ((name != NULL) && (name[0] != '\0')) {
- PrintF(out, "name = %s\n", name);
- }
- if (kind() == OPTIMIZED_FUNCTION) {
- PrintF(out, "stack_slots = %d\n", stack_slots());
- }
-
- PrintF(out, "Instructions (size = %d)\n", instruction_size());
- Disassembler::Decode(out, this);
- PrintF(out, "\n");
-
-#ifdef DEBUG
- if (kind() == FUNCTION) {
- DeoptimizationOutputData* data =
- DeoptimizationOutputData::cast(this->deoptimization_data());
- data->DeoptimizationOutputDataPrint(out);
- } else if (kind() == OPTIMIZED_FUNCTION) {
- DeoptimizationInputData* data =
- DeoptimizationInputData::cast(this->deoptimization_data());
- data->DeoptimizationInputDataPrint(out);
- }
- PrintF("\n");
-#endif
-
- if (kind() == OPTIMIZED_FUNCTION) {
- SafepointTable table(this);
- PrintF(out, "Safepoints (size = %u)\n", table.size());
- for (unsigned i = 0; i < table.length(); i++) {
- unsigned pc_offset = table.GetPcOffset(i);
- PrintF(out, "%p %4d ", (instruction_start() + pc_offset), pc_offset);
- table.PrintEntry(i);
- PrintF(out, " (sp -> fp)");
- SafepointEntry entry = table.GetEntry(i);
- if (entry.deoptimization_index() != Safepoint::kNoDeoptimizationIndex) {
- PrintF(out, " %6d", entry.deoptimization_index());
- } else {
- PrintF(out, " <none>");
- }
- if (entry.argument_count() > 0) {
- PrintF(out, " argc: %d", entry.argument_count());
- }
- PrintF(out, "\n");
- }
- PrintF(out, "\n");
- } else if (kind() == FUNCTION) {
- unsigned offset = stack_check_table_offset();
- // If there is no stack check table, the "table start" will at or after
- // (due to alignment) the end of the instruction stream.
- if (static_cast<int>(offset) < instruction_size()) {
- unsigned* address =
- reinterpret_cast<unsigned*>(instruction_start() + offset);
- unsigned length = address[0];
- PrintF(out, "Stack checks (size = %u)\n", length);
- PrintF(out, "ast_id pc_offset\n");
- for (unsigned i = 0; i < length; ++i) {
- unsigned index = (2 * i) + 1;
- PrintF(out, "%6u %9u\n", address[index], address[index + 1]);
- }
- PrintF(out, "\n");
- }
- }
-
- PrintF("RelocInfo (size = %d)\n", relocation_size());
- for (RelocIterator it(this); !it.done(); it.next()) it.rinfo()->Print(out);
- PrintF(out, "\n");
-}
-#endif // ENABLE_DISASSEMBLER
-
-
-MaybeObject* JSObject::SetFastElementsCapacityAndLength(int capacity,
- int length) {
- Heap* heap = GetHeap();
- // We should never end in here with a pixel or external array.
- ASSERT(!HasExternalArrayElements());
-
- Object* obj;
- { MaybeObject* maybe_obj = heap->AllocateFixedArrayWithHoles(capacity);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- FixedArray* elems = FixedArray::cast(obj);
-
- { MaybeObject* maybe_obj = map()->GetFastElementsMap();
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- Map* new_map = Map::cast(obj);
-
- AssertNoAllocation no_gc;
- WriteBarrierMode mode = elems->GetWriteBarrierMode(no_gc);
- switch (GetElementsKind()) {
- case FAST_ELEMENTS: {
- FixedArray* old_elements = FixedArray::cast(elements());
- uint32_t old_length = static_cast<uint32_t>(old_elements->length());
- // Fill out the new array with this content and array holes.
- for (uint32_t i = 0; i < old_length; i++) {
- elems->set(i, old_elements->get(i), mode);
- }
- break;
- }
- case DICTIONARY_ELEMENTS: {
- NumberDictionary* dictionary = NumberDictionary::cast(elements());
- for (int i = 0; i < dictionary->Capacity(); i++) {
- Object* key = dictionary->KeyAt(i);
- if (key->IsNumber()) {
- uint32_t entry = static_cast<uint32_t>(key->Number());
- elems->set(entry, dictionary->ValueAt(i), mode);
- }
- }
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
-
- set_map(new_map);
- set_elements(elems);
-
- if (IsJSArray()) {
- JSArray::cast(this)->set_length(Smi::FromInt(length));
- }
-
- return this;
-}
-
-
-MaybeObject* JSObject::SetSlowElements(Object* len) {
- // We should never end in here with a pixel or external array.
- ASSERT(!HasExternalArrayElements());
-
- uint32_t new_length = static_cast<uint32_t>(len->Number());
-
- switch (GetElementsKind()) {
- case FAST_ELEMENTS: {
- // Make sure we never try to shrink dense arrays into sparse arrays.
- ASSERT(static_cast<uint32_t>(FixedArray::cast(elements())->length()) <=
- new_length);
- Object* obj;
- { MaybeObject* maybe_obj = NormalizeElements();
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
-
- // Update length for JSArrays.
- if (IsJSArray()) JSArray::cast(this)->set_length(len);
- break;
- }
- case DICTIONARY_ELEMENTS: {
- if (IsJSArray()) {
- uint32_t old_length =
- static_cast<uint32_t>(JSArray::cast(this)->length()->Number());
- element_dictionary()->RemoveNumberEntries(new_length, old_length),
- JSArray::cast(this)->set_length(len);
- }
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
- return this;
-}
-
-
-MaybeObject* JSArray::Initialize(int capacity) {
- Heap* heap = GetHeap();
- ASSERT(capacity >= 0);
- set_length(Smi::FromInt(0));
- FixedArray* new_elements;
- if (capacity == 0) {
- new_elements = heap->empty_fixed_array();
- } else {
- Object* obj;
- { MaybeObject* maybe_obj = heap->AllocateFixedArrayWithHoles(capacity);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- new_elements = FixedArray::cast(obj);
- }
- set_elements(new_elements);
- return this;
-}
-
-
-void JSArray::Expand(int required_size) {
- Handle<JSArray> self(this);
- Handle<FixedArray> old_backing(FixedArray::cast(elements()));
- int old_size = old_backing->length();
- int new_size = required_size > old_size ? required_size : old_size;
- Handle<FixedArray> new_backing = FACTORY->NewFixedArray(new_size);
- // Can't use this any more now because we may have had a GC!
- for (int i = 0; i < old_size; i++) new_backing->set(i, old_backing->get(i));
- self->SetContent(*new_backing);
-}
-
-
-static Failure* ArrayLengthRangeError(Heap* heap) {
- HandleScope scope;
- return heap->isolate()->Throw(
- *FACTORY->NewRangeError("invalid_array_length",
- HandleVector<Object>(NULL, 0)));
-}
-
-
-MaybeObject* JSObject::SetElementsLength(Object* len) {
- // We should never end in here with a pixel or external array.
- ASSERT(AllowsSetElementsLength());
-
- MaybeObject* maybe_smi_length = len->ToSmi();
- Object* smi_length = Smi::FromInt(0);
- if (maybe_smi_length->ToObject(&smi_length) && smi_length->IsSmi()) {
- const int value = Smi::cast(smi_length)->value();
- if (value < 0) return ArrayLengthRangeError(GetHeap());
- switch (GetElementsKind()) {
- case FAST_ELEMENTS: {
- int old_capacity = FixedArray::cast(elements())->length();
- if (value <= old_capacity) {
- if (IsJSArray()) {
- Object* obj;
- { MaybeObject* maybe_obj = EnsureWritableFastElements();
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- int old_length = FastD2I(JSArray::cast(this)->length()->Number());
- // NOTE: We may be able to optimize this by removing the
- // last part of the elements backing storage array and
- // setting the capacity to the new size.
- for (int i = value; i < old_length; i++) {
- FixedArray::cast(elements())->set_the_hole(i);
- }
- JSArray::cast(this)->set_length(Smi::cast(smi_length));
- }
- return this;
- }
- int min = NewElementsCapacity(old_capacity);
- int new_capacity = value > min ? value : min;
- if (new_capacity <= kMaxFastElementsLength ||
- !ShouldConvertToSlowElements(new_capacity)) {
- Object* obj;
- { MaybeObject* maybe_obj =
- SetFastElementsCapacityAndLength(new_capacity, value);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- return this;
- }
- break;
- }
- case DICTIONARY_ELEMENTS: {
- if (IsJSArray()) {
- if (value == 0) {
- // If the length of a slow array is reset to zero, we clear
- // the array and flush backing storage. This has the added
- // benefit that the array returns to fast mode.
- Object* obj;
- { MaybeObject* maybe_obj = ResetElements();
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- } else {
- // Remove deleted elements.
- uint32_t old_length =
- static_cast<uint32_t>(JSArray::cast(this)->length()->Number());
- element_dictionary()->RemoveNumberEntries(value, old_length);
- }
- JSArray::cast(this)->set_length(Smi::cast(smi_length));
- }
- return this;
- }
- default:
- UNREACHABLE();
- break;
- }
- }
-
- // General slow case.
- if (len->IsNumber()) {
- uint32_t length;
- if (len->ToArrayIndex(&length)) {
- return SetSlowElements(len);
- } else {
- return ArrayLengthRangeError(GetHeap());
- }
- }
-
- // len is not a number so make the array size one and
- // set only element to len.
- Object* obj;
- { MaybeObject* maybe_obj = GetHeap()->AllocateFixedArray(1);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- FixedArray::cast(obj)->set(0, len);
- if (IsJSArray()) JSArray::cast(this)->set_length(Smi::FromInt(1));
- set_elements(FixedArray::cast(obj));
- return this;
-}
-
-
-MaybeObject* JSObject::SetPrototype(Object* value,
- bool skip_hidden_prototypes) {
- Heap* heap = GetHeap();
- // Silently ignore the change if value is not a JSObject or null.
- // SpiderMonkey behaves this way.
- if (!value->IsJSObject() && !value->IsNull()) return value;
-
- // Before we can set the prototype we need to be sure
- // prototype cycles are prevented.
- // It is sufficient to validate that the receiver is not in the new prototype
- // chain.
- for (Object* pt = value; pt != heap->null_value(); pt = pt->GetPrototype()) {
- if (JSObject::cast(pt) == this) {
- // Cycle detected.
- HandleScope scope;
- return heap->isolate()->Throw(
- *FACTORY->NewError("cyclic_proto", HandleVector<Object>(NULL, 0)));
- }
- }
-
- JSObject* real_receiver = this;
-
- if (skip_hidden_prototypes) {
- // Find the first object in the chain whose prototype object is not
- // hidden and set the new prototype on that object.
- Object* current_proto = real_receiver->GetPrototype();
- while (current_proto->IsJSObject() &&
- JSObject::cast(current_proto)->map()->is_hidden_prototype()) {
- real_receiver = JSObject::cast(current_proto);
- current_proto = current_proto->GetPrototype();
- }
- }
-
- // Set the new prototype of the object.
- Object* new_map;
- { MaybeObject* maybe_new_map = real_receiver->map()->CopyDropTransitions();
- if (!maybe_new_map->ToObject(&new_map)) return maybe_new_map;
- }
- Map::cast(new_map)->set_prototype(value);
- real_receiver->set_map(Map::cast(new_map));
-
- heap->ClearInstanceofCache();
-
- return value;
-}
-
-
-bool JSObject::HasElementPostInterceptor(JSObject* receiver, uint32_t index) {
- switch (GetElementsKind()) {
- case FAST_ELEMENTS: {
- uint32_t length = IsJSArray() ?
- static_cast<uint32_t>
- (Smi::cast(JSArray::cast(this)->length())->value()) :
- static_cast<uint32_t>(FixedArray::cast(elements())->length());
- if ((index < length) &&
- !FixedArray::cast(elements())->get(index)->IsTheHole()) {
- return true;
- }
- break;
- }
- case EXTERNAL_PIXEL_ELEMENTS: {
- ExternalPixelArray* pixels = ExternalPixelArray::cast(elements());
- if (index < static_cast<uint32_t>(pixels->length())) {
- return true;
- }
- break;
- }
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS: {
- ExternalArray* array = ExternalArray::cast(elements());
- if (index < static_cast<uint32_t>(array->length())) {
- return true;
- }
- break;
- }
- case DICTIONARY_ELEMENTS: {
- if (element_dictionary()->FindEntry(index)
- != NumberDictionary::kNotFound) {
- return true;
- }
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
-
- // Handle [] on String objects.
- if (this->IsStringObjectWithCharacterAt(index)) return true;
-
- Object* pt = GetPrototype();
- if (pt->IsNull()) return false;
- return JSObject::cast(pt)->HasElementWithReceiver(receiver, index);
-}
-
-
-bool JSObject::HasElementWithInterceptor(JSObject* receiver, uint32_t index) {
- Isolate* isolate = GetIsolate();
- // Make sure that the top context does not change when doing
- // callbacks or interceptor calls.
- AssertNoContextChange ncc;
- HandleScope scope(isolate);
- Handle<InterceptorInfo> interceptor(GetIndexedInterceptor());
- Handle<JSObject> receiver_handle(receiver);
- Handle<JSObject> holder_handle(this);
- CustomArguments args(isolate, interceptor->data(), receiver, this);
- v8::AccessorInfo info(args.end());
- if (!interceptor->query()->IsUndefined()) {
- v8::IndexedPropertyQuery query =
- v8::ToCData<v8::IndexedPropertyQuery>(interceptor->query());
- LOG(isolate,
- ApiIndexedPropertyAccess("interceptor-indexed-has", this, index));
- v8::Handle<v8::Integer> result;
- {
- // Leaving JavaScript.
- VMState state(isolate, EXTERNAL);
- result = query(index, info);
- }
- if (!result.IsEmpty()) {
- ASSERT(result->IsInt32());
- return true; // absence of property is signaled by empty handle.
- }
- } else if (!interceptor->getter()->IsUndefined()) {
- v8::IndexedPropertyGetter getter =
- v8::ToCData<v8::IndexedPropertyGetter>(interceptor->getter());
- LOG(isolate,
- ApiIndexedPropertyAccess("interceptor-indexed-has-get", this, index));
- v8::Handle<v8::Value> result;
- {
- // Leaving JavaScript.
- VMState state(isolate, EXTERNAL);
- result = getter(index, info);
- }
- if (!result.IsEmpty()) return true;
- }
- return holder_handle->HasElementPostInterceptor(*receiver_handle, index);
-}
-
-
-JSObject::LocalElementType JSObject::HasLocalElement(uint32_t index) {
- // Check access rights if needed.
- if (IsAccessCheckNeeded()) {
- Heap* heap = GetHeap();
- if (!heap->isolate()->MayIndexedAccess(this, index, v8::ACCESS_HAS)) {
- heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
- return UNDEFINED_ELEMENT;
- }
- }
-
- if (IsJSGlobalProxy()) {
- Object* proto = GetPrototype();
- if (proto->IsNull()) return UNDEFINED_ELEMENT;
- ASSERT(proto->IsJSGlobalObject());
- return JSObject::cast(proto)->HasLocalElement(index);
- }
-
- // Check for lookup interceptor
- if (HasIndexedInterceptor()) {
- return HasElementWithInterceptor(this, index) ? INTERCEPTED_ELEMENT
- : UNDEFINED_ELEMENT;
- }
-
- // Handle [] on String objects.
- if (this->IsStringObjectWithCharacterAt(index)) {
- return STRING_CHARACTER_ELEMENT;
- }
-
- switch (GetElementsKind()) {
- case FAST_ELEMENTS: {
- uint32_t length = IsJSArray() ?
- static_cast<uint32_t>
- (Smi::cast(JSArray::cast(this)->length())->value()) :
- static_cast<uint32_t>(FixedArray::cast(elements())->length());
- if ((index < length) &&
- !FixedArray::cast(elements())->get(index)->IsTheHole()) {
- return FAST_ELEMENT;
- }
- break;
- }
- case EXTERNAL_PIXEL_ELEMENTS: {
- ExternalPixelArray* pixels = ExternalPixelArray::cast(elements());
- if (index < static_cast<uint32_t>(pixels->length())) return FAST_ELEMENT;
- break;
- }
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS: {
- ExternalArray* array = ExternalArray::cast(elements());
- if (index < static_cast<uint32_t>(array->length())) return FAST_ELEMENT;
- break;
- }
- case DICTIONARY_ELEMENTS: {
- if (element_dictionary()->FindEntry(index) !=
- NumberDictionary::kNotFound) {
- return DICTIONARY_ELEMENT;
- }
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
-
- return UNDEFINED_ELEMENT;
-}
-
-
-bool JSObject::HasElementWithReceiver(JSObject* receiver, uint32_t index) {
- // Check access rights if needed.
- if (IsAccessCheckNeeded()) {
- Heap* heap = GetHeap();
- if (!heap->isolate()->MayIndexedAccess(this, index, v8::ACCESS_HAS)) {
- heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
- return false;
- }
- }
-
- // Check for lookup interceptor
- if (HasIndexedInterceptor()) {
- return HasElementWithInterceptor(receiver, index);
- }
-
- switch (GetElementsKind()) {
- case FAST_ELEMENTS: {
- uint32_t length = IsJSArray() ?
- static_cast<uint32_t>
- (Smi::cast(JSArray::cast(this)->length())->value()) :
- static_cast<uint32_t>(FixedArray::cast(elements())->length());
- if ((index < length) &&
- !FixedArray::cast(elements())->get(index)->IsTheHole()) return true;
- break;
- }
- case EXTERNAL_PIXEL_ELEMENTS: {
- ExternalPixelArray* pixels = ExternalPixelArray::cast(elements());
- if (index < static_cast<uint32_t>(pixels->length())) {
- return true;
- }
- break;
- }
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS: {
- ExternalArray* array = ExternalArray::cast(elements());
- if (index < static_cast<uint32_t>(array->length())) {
- return true;
- }
- break;
- }
- case DICTIONARY_ELEMENTS: {
- if (element_dictionary()->FindEntry(index)
- != NumberDictionary::kNotFound) {
- return true;
- }
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
-
- // Handle [] on String objects.
- if (this->IsStringObjectWithCharacterAt(index)) return true;
-
- Object* pt = GetPrototype();
- if (pt->IsNull()) return false;
- return JSObject::cast(pt)->HasElementWithReceiver(receiver, index);
-}
-
-
-MaybeObject* JSObject::SetElementWithInterceptor(uint32_t index,
- Object* value,
- StrictModeFlag strict_mode,
- bool check_prototype) {
- Isolate* isolate = GetIsolate();
- // Make sure that the top context does not change when doing
- // callbacks or interceptor calls.
- AssertNoContextChange ncc;
- HandleScope scope(isolate);
- Handle<InterceptorInfo> interceptor(GetIndexedInterceptor());
- Handle<JSObject> this_handle(this);
- Handle<Object> value_handle(value, isolate);
- if (!interceptor->setter()->IsUndefined()) {
- v8::IndexedPropertySetter setter =
- v8::ToCData<v8::IndexedPropertySetter>(interceptor->setter());
- LOG(isolate,
- ApiIndexedPropertyAccess("interceptor-indexed-set", this, index));
- CustomArguments args(isolate, interceptor->data(), this, this);
- v8::AccessorInfo info(args.end());
- v8::Handle<v8::Value> result;
- {
- // Leaving JavaScript.
- VMState state(isolate, EXTERNAL);
- result = setter(index, v8::Utils::ToLocal(value_handle), info);
- }
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- if (!result.IsEmpty()) return *value_handle;
- }
- MaybeObject* raw_result =
- this_handle->SetElementWithoutInterceptor(index,
- *value_handle,
- strict_mode,
- check_prototype);
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- return raw_result;
-}
-
-
-MaybeObject* JSObject::GetElementWithCallback(Object* receiver,
- Object* structure,
- uint32_t index,
- Object* holder) {
- Isolate* isolate = GetIsolate();
- ASSERT(!structure->IsProxy());
-
- // api style callbacks.
- if (structure->IsAccessorInfo()) {
- AccessorInfo* data = AccessorInfo::cast(structure);
- Object* fun_obj = data->getter();
- v8::AccessorGetter call_fun = v8::ToCData<v8::AccessorGetter>(fun_obj);
- HandleScope scope(isolate);
- Handle<JSObject> self(JSObject::cast(receiver));
- Handle<JSObject> holder_handle(JSObject::cast(holder));
- Handle<Object> number = isolate->factory()->NewNumberFromUint(index);
- Handle<String> key(isolate->factory()->NumberToString(number));
- LOG(isolate, ApiNamedPropertyAccess("load", *self, *key));
- CustomArguments args(isolate, data->data(), *self, *holder_handle);
- v8::AccessorInfo info(args.end());
- v8::Handle<v8::Value> result;
- {
- // Leaving JavaScript.
- VMState state(isolate, EXTERNAL);
- result = call_fun(v8::Utils::ToLocal(key), info);
- }
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- if (result.IsEmpty()) return isolate->heap()->undefined_value();
- return *v8::Utils::OpenHandle(*result);
- }
-
- // __defineGetter__ callback
- if (structure->IsFixedArray()) {
- Object* getter = FixedArray::cast(structure)->get(kGetterIndex);
- if (getter->IsJSFunction()) {
- return Object::GetPropertyWithDefinedGetter(receiver,
- JSFunction::cast(getter));
- }
- // Getter is not a function.
- return isolate->heap()->undefined_value();
- }
-
- UNREACHABLE();
- return NULL;
-}
-
-
-MaybeObject* JSObject::SetElementWithCallback(Object* structure,
- uint32_t index,
- Object* value,
- JSObject* holder) {
- Isolate* isolate = GetIsolate();
- HandleScope scope(isolate);
-
- // We should never get here to initialize a const with the hole
- // value since a const declaration would conflict with the setter.
- ASSERT(!value->IsTheHole());
- Handle<Object> value_handle(value, isolate);
-
- // To accommodate both the old and the new api we switch on the
- // data structure used to store the callbacks. Eventually proxy
- // callbacks should be phased out.
- ASSERT(!structure->IsProxy());
-
- if (structure->IsAccessorInfo()) {
- // api style callbacks
- AccessorInfo* data = AccessorInfo::cast(structure);
- Object* call_obj = data->setter();
- v8::AccessorSetter call_fun = v8::ToCData<v8::AccessorSetter>(call_obj);
- if (call_fun == NULL) return value;
- Handle<Object> number = isolate->factory()->NewNumberFromUint(index);
- Handle<String> key(isolate->factory()->NumberToString(number));
- LOG(isolate, ApiNamedPropertyAccess("store", this, *key));
- CustomArguments args(isolate, data->data(), this, JSObject::cast(holder));
- v8::AccessorInfo info(args.end());
- {
- // Leaving JavaScript.
- VMState state(isolate, EXTERNAL);
- call_fun(v8::Utils::ToLocal(key),
- v8::Utils::ToLocal(value_handle),
- info);
- }
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- return *value_handle;
- }
-
- if (structure->IsFixedArray()) {
- Object* setter = FixedArray::cast(structure)->get(kSetterIndex);
- if (setter->IsJSFunction()) {
- return SetPropertyWithDefinedSetter(JSFunction::cast(setter), value);
- } else {
- Handle<Object> holder_handle(holder, isolate);
- Handle<Object> key(isolate->factory()->NewNumberFromUint(index));
- Handle<Object> args[2] = { key, holder_handle };
- return isolate->Throw(
- *isolate->factory()->NewTypeError("no_setter_in_callback",
- HandleVector(args, 2)));
- }
- }
-
- UNREACHABLE();
- return NULL;
-}
-
-
-// Adding n elements in fast case is O(n*n).
-// Note: revisit design to have dual undefined values to capture absent
-// elements.
-MaybeObject* JSObject::SetFastElement(uint32_t index,
- Object* value,
- StrictModeFlag strict_mode,
- bool check_prototype) {
- ASSERT(HasFastElements());
-
- Object* elms_obj;
- { MaybeObject* maybe_elms_obj = EnsureWritableFastElements();
- if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj;
- }
- FixedArray* elms = FixedArray::cast(elms_obj);
- uint32_t elms_length = static_cast<uint32_t>(elms->length());
-
- if (check_prototype &&
- (index >= elms_length || elms->get(index)->IsTheHole())) {
- bool found;
- MaybeObject* result =
- SetElementWithCallbackSetterInPrototypes(index, value, &found);
- if (found) return result;
- }
-
-
- // Check whether there is extra space in fixed array..
- if (index < elms_length) {
- elms->set(index, value);
- if (IsJSArray()) {
- // Update the length of the array if needed.
- uint32_t array_length = 0;
- CHECK(JSArray::cast(this)->length()->ToArrayIndex(&array_length));
- if (index >= array_length) {
- JSArray::cast(this)->set_length(Smi::FromInt(index + 1));
- }
- }
- return value;
- }
-
- // Allow gap in fast case.
- if ((index - elms_length) < kMaxGap) {
- // Try allocating extra space.
- int new_capacity = NewElementsCapacity(index+1);
- if (new_capacity <= kMaxFastElementsLength ||
- !ShouldConvertToSlowElements(new_capacity)) {
- ASSERT(static_cast<uint32_t>(new_capacity) > index);
- Object* obj;
- { MaybeObject* maybe_obj =
- SetFastElementsCapacityAndLength(new_capacity, index + 1);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- FixedArray::cast(elements())->set(index, value);
- return value;
- }
- }
-
- // Otherwise default to slow case.
- Object* obj;
- { MaybeObject* maybe_obj = NormalizeElements();
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- ASSERT(HasDictionaryElements());
- return SetElement(index, value, strict_mode, check_prototype);
-}
-
-
-MaybeObject* JSObject::SetElement(uint32_t index,
- Object* value,
- StrictModeFlag strict_mode,
- bool check_prototype) {
- // Check access rights if needed.
- if (IsAccessCheckNeeded()) {
- Heap* heap = GetHeap();
- if (!heap->isolate()->MayIndexedAccess(this, index, v8::ACCESS_SET)) {
- HandleScope scope;
- Handle<Object> value_handle(value);
- heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_SET);
- return *value_handle;
- }
- }
-
- if (IsJSGlobalProxy()) {
- Object* proto = GetPrototype();
- if (proto->IsNull()) return value;
- ASSERT(proto->IsJSGlobalObject());
- return JSObject::cast(proto)->SetElement(index,
- value,
- strict_mode,
- check_prototype);
- }
-
- // Check for lookup interceptor
- if (HasIndexedInterceptor()) {
- return SetElementWithInterceptor(index,
- value,
- strict_mode,
- check_prototype);
- }
-
- return SetElementWithoutInterceptor(index,
- value,
- strict_mode,
- check_prototype);
-}
-
-
-MaybeObject* JSObject::SetElementWithoutInterceptor(uint32_t index,
- Object* value,
- StrictModeFlag strict_mode,
- bool check_prototype) {
- Isolate* isolate = GetIsolate();
- switch (GetElementsKind()) {
- case FAST_ELEMENTS:
- // Fast case.
- return SetFastElement(index, value, strict_mode, check_prototype);
- case EXTERNAL_PIXEL_ELEMENTS: {
- ExternalPixelArray* pixels = ExternalPixelArray::cast(elements());
- return pixels->SetValue(index, value);
- }
- case EXTERNAL_BYTE_ELEMENTS: {
- ExternalByteArray* array = ExternalByteArray::cast(elements());
- return array->SetValue(index, value);
- }
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: {
- ExternalUnsignedByteArray* array =
- ExternalUnsignedByteArray::cast(elements());
- return array->SetValue(index, value);
- }
- case EXTERNAL_SHORT_ELEMENTS: {
- ExternalShortArray* array = ExternalShortArray::cast(elements());
- return array->SetValue(index, value);
- }
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: {
- ExternalUnsignedShortArray* array =
- ExternalUnsignedShortArray::cast(elements());
- return array->SetValue(index, value);
- }
- case EXTERNAL_INT_ELEMENTS: {
- ExternalIntArray* array = ExternalIntArray::cast(elements());
- return array->SetValue(index, value);
- }
- case EXTERNAL_UNSIGNED_INT_ELEMENTS: {
- ExternalUnsignedIntArray* array =
- ExternalUnsignedIntArray::cast(elements());
- return array->SetValue(index, value);
- }
- case EXTERNAL_FLOAT_ELEMENTS: {
- ExternalFloatArray* array = ExternalFloatArray::cast(elements());
- return array->SetValue(index, value);
- }
- case DICTIONARY_ELEMENTS: {
- // Insert element in the dictionary.
- FixedArray* elms = FixedArray::cast(elements());
- NumberDictionary* dictionary = NumberDictionary::cast(elms);
-
- int entry = dictionary->FindEntry(index);
- if (entry != NumberDictionary::kNotFound) {
- Object* element = dictionary->ValueAt(entry);
- PropertyDetails details = dictionary->DetailsAt(entry);
- if (details.type() == CALLBACKS) {
- return SetElementWithCallback(element, index, value, this);
- } else {
- dictionary->UpdateMaxNumberKey(index);
- // If put fails instrict mode, throw exception.
- if (!dictionary->ValueAtPut(entry, value) &&
- strict_mode == kStrictMode) {
- Handle<Object> number(isolate->factory()->NewNumberFromUint(index));
- Handle<Object> holder(this);
- Handle<Object> args[2] = { number, holder };
- return isolate->Throw(
- *isolate->factory()->NewTypeError("strict_read_only_property",
- HandleVector(args, 2)));
- }
- }
- } else {
- // Index not already used. Look for an accessor in the prototype chain.
- if (check_prototype) {
- bool found;
- MaybeObject* result =
- // Strict mode not needed. No-setter case already handled.
- SetElementWithCallbackSetterInPrototypes(index, value, &found);
- if (found) return result;
- }
- // When we set the is_extensible flag to false we always force
- // the element into dictionary mode (and force them to stay there).
- if (!map()->is_extensible()) {
- if (strict_mode == kNonStrictMode) {
- return isolate->heap()->undefined_value();
- } else {
- Handle<Object> number(isolate->factory()->NewNumberFromUint(index));
- Handle<String> index_string(
- isolate->factory()->NumberToString(number));
- Handle<Object> args[1] = { index_string };
- return isolate->Throw(
- *isolate->factory()->NewTypeError("object_not_extensible",
- HandleVector(args, 1)));
- }
- }
- Object* result;
- { MaybeObject* maybe_result = dictionary->AtNumberPut(index, value);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- if (elms != FixedArray::cast(result)) {
- set_elements(FixedArray::cast(result));
- }
- }
-
- // Update the array length if this JSObject is an array.
- if (IsJSArray()) {
- JSArray* array = JSArray::cast(this);
- Object* return_value;
- { MaybeObject* maybe_return_value =
- array->JSArrayUpdateLengthFromIndex(index, value);
- if (!maybe_return_value->ToObject(&return_value)) {
- return maybe_return_value;
- }
- }
- }
-
- // Attempt to put this object back in fast case.
- if (ShouldConvertToFastElements()) {
- uint32_t new_length = 0;
- if (IsJSArray()) {
- CHECK(JSArray::cast(this)->length()->ToArrayIndex(&new_length));
- } else {
- new_length = NumberDictionary::cast(elements())->max_number_key() + 1;
- }
- Object* obj;
- { MaybeObject* maybe_obj =
- SetFastElementsCapacityAndLength(new_length, new_length);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
-#ifdef DEBUG
- if (FLAG_trace_normalization) {
- PrintF("Object elements are fast case again:\n");
- Print();
- }
-#endif
- }
-
- return value;
- }
- default:
- UNREACHABLE();
- break;
- }
- // All possible cases have been handled above. Add a return to avoid the
- // complaints from the compiler.
- UNREACHABLE();
- return isolate->heap()->null_value();
-}
-
-
-MaybeObject* JSArray::JSArrayUpdateLengthFromIndex(uint32_t index,
- Object* value) {
- uint32_t old_len = 0;
- CHECK(length()->ToArrayIndex(&old_len));
- // Check to see if we need to update the length. For now, we make
- // sure that the length stays within 32-bits (unsigned).
- if (index >= old_len && index != 0xffffffff) {
- Object* len;
- { MaybeObject* maybe_len =
- GetHeap()->NumberFromDouble(static_cast<double>(index) + 1);
- if (!maybe_len->ToObject(&len)) return maybe_len;
- }
- set_length(len);
- }
- return value;
-}
-
-
-MaybeObject* JSObject::GetElementPostInterceptor(Object* receiver,
- uint32_t index) {
- // Get element works for both JSObject and JSArray since
- // JSArray::length cannot change.
- switch (GetElementsKind()) {
- case FAST_ELEMENTS: {
- FixedArray* elms = FixedArray::cast(elements());
- if (index < static_cast<uint32_t>(elms->length())) {
- Object* value = elms->get(index);
- if (!value->IsTheHole()) return value;
- }
- break;
- }
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS: {
- MaybeObject* maybe_value = GetExternalElement(index);
- Object* value;
- if (!maybe_value->ToObject(&value)) return maybe_value;
- if (!value->IsUndefined()) return value;
- break;
- }
- case DICTIONARY_ELEMENTS: {
- NumberDictionary* dictionary = element_dictionary();
- int entry = dictionary->FindEntry(index);
- if (entry != NumberDictionary::kNotFound) {
- Object* element = dictionary->ValueAt(entry);
- PropertyDetails details = dictionary->DetailsAt(entry);
- if (details.type() == CALLBACKS) {
- return GetElementWithCallback(receiver,
- element,
- index,
- this);
- }
- return element;
- }
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
-
- // Continue searching via the prototype chain.
- Object* pt = GetPrototype();
- if (pt->IsNull()) return GetHeap()->undefined_value();
- return pt->GetElementWithReceiver(receiver, index);
-}
-
-
-MaybeObject* JSObject::GetElementWithInterceptor(Object* receiver,
- uint32_t index) {
- Isolate* isolate = GetIsolate();
- // Make sure that the top context does not change when doing
- // callbacks or interceptor calls.
- AssertNoContextChange ncc;
- HandleScope scope(isolate);
- Handle<InterceptorInfo> interceptor(GetIndexedInterceptor(), isolate);
- Handle<Object> this_handle(receiver, isolate);
- Handle<JSObject> holder_handle(this, isolate);
-
- if (!interceptor->getter()->IsUndefined()) {
- v8::IndexedPropertyGetter getter =
- v8::ToCData<v8::IndexedPropertyGetter>(interceptor->getter());
- LOG(isolate,
- ApiIndexedPropertyAccess("interceptor-indexed-get", this, index));
- CustomArguments args(isolate, interceptor->data(), receiver, this);
- v8::AccessorInfo info(args.end());
- v8::Handle<v8::Value> result;
- {
- // Leaving JavaScript.
- VMState state(isolate, EXTERNAL);
- result = getter(index, info);
- }
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- if (!result.IsEmpty()) return *v8::Utils::OpenHandle(*result);
- }
-
- MaybeObject* raw_result =
- holder_handle->GetElementPostInterceptor(*this_handle, index);
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- return raw_result;
-}
-
-
-MaybeObject* JSObject::GetElementWithReceiver(Object* receiver,
- uint32_t index) {
- // Check access rights if needed.
- if (IsAccessCheckNeeded()) {
- Heap* heap = GetHeap();
- if (!heap->isolate()->MayIndexedAccess(this, index, v8::ACCESS_GET)) {
- heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_GET);
- return heap->undefined_value();
- }
- }
-
- if (HasIndexedInterceptor()) {
- return GetElementWithInterceptor(receiver, index);
- }
-
- // Get element works for both JSObject and JSArray since
- // JSArray::length cannot change.
- switch (GetElementsKind()) {
- case FAST_ELEMENTS: {
- FixedArray* elms = FixedArray::cast(elements());
- if (index < static_cast<uint32_t>(elms->length())) {
- Object* value = elms->get(index);
- if (!value->IsTheHole()) return value;
- }
- break;
- }
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS: {
- MaybeObject* maybe_value = GetExternalElement(index);
- Object* value;
- if (!maybe_value->ToObject(&value)) return maybe_value;
- if (!value->IsUndefined()) return value;
- break;
- }
- case DICTIONARY_ELEMENTS: {
- NumberDictionary* dictionary = element_dictionary();
- int entry = dictionary->FindEntry(index);
- if (entry != NumberDictionary::kNotFound) {
- Object* element = dictionary->ValueAt(entry);
- PropertyDetails details = dictionary->DetailsAt(entry);
- if (details.type() == CALLBACKS) {
- return GetElementWithCallback(receiver,
- element,
- index,
- this);
- }
- return element;
- }
- break;
- }
- }
-
- Object* pt = GetPrototype();
- Heap* heap = GetHeap();
- if (pt == heap->null_value()) return heap->undefined_value();
- return pt->GetElementWithReceiver(receiver, index);
-}
-
-
-MaybeObject* JSObject::GetExternalElement(uint32_t index) {
- // Get element works for both JSObject and JSArray since
- // JSArray::length cannot change.
- switch (GetElementsKind()) {
- case EXTERNAL_PIXEL_ELEMENTS: {
- ExternalPixelArray* pixels = ExternalPixelArray::cast(elements());
- if (index < static_cast<uint32_t>(pixels->length())) {
- uint8_t value = pixels->get(index);
- return Smi::FromInt(value);
- }
- break;
- }
- case EXTERNAL_BYTE_ELEMENTS: {
- ExternalByteArray* array = ExternalByteArray::cast(elements());
- if (index < static_cast<uint32_t>(array->length())) {
- int8_t value = array->get(index);
- return Smi::FromInt(value);
- }
- break;
- }
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: {
- ExternalUnsignedByteArray* array =
- ExternalUnsignedByteArray::cast(elements());
- if (index < static_cast<uint32_t>(array->length())) {
- uint8_t value = array->get(index);
- return Smi::FromInt(value);
- }
- break;
- }
- case EXTERNAL_SHORT_ELEMENTS: {
- ExternalShortArray* array = ExternalShortArray::cast(elements());
- if (index < static_cast<uint32_t>(array->length())) {
- int16_t value = array->get(index);
- return Smi::FromInt(value);
- }
- break;
- }
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: {
- ExternalUnsignedShortArray* array =
- ExternalUnsignedShortArray::cast(elements());
- if (index < static_cast<uint32_t>(array->length())) {
- uint16_t value = array->get(index);
- return Smi::FromInt(value);
- }
- break;
- }
- case EXTERNAL_INT_ELEMENTS: {
- ExternalIntArray* array = ExternalIntArray::cast(elements());
- if (index < static_cast<uint32_t>(array->length())) {
- int32_t value = array->get(index);
- return GetHeap()->NumberFromInt32(value);
- }
- break;
- }
- case EXTERNAL_UNSIGNED_INT_ELEMENTS: {
- ExternalUnsignedIntArray* array =
- ExternalUnsignedIntArray::cast(elements());
- if (index < static_cast<uint32_t>(array->length())) {
- uint32_t value = array->get(index);
- return GetHeap()->NumberFromUint32(value);
- }
- break;
- }
- case EXTERNAL_FLOAT_ELEMENTS: {
- ExternalFloatArray* array = ExternalFloatArray::cast(elements());
- if (index < static_cast<uint32_t>(array->length())) {
- float value = array->get(index);
- return GetHeap()->AllocateHeapNumber(value);
- }
- break;
- }
- case FAST_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- UNREACHABLE();
- break;
- }
- return GetHeap()->undefined_value();
-}
-
-
-bool JSObject::HasDenseElements() {
- int capacity = 0;
- int number_of_elements = 0;
-
- switch (GetElementsKind()) {
- case FAST_ELEMENTS: {
- FixedArray* elms = FixedArray::cast(elements());
- capacity = elms->length();
- for (int i = 0; i < capacity; i++) {
- if (!elms->get(i)->IsTheHole()) number_of_elements++;
- }
- break;
- }
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS: {
- return true;
- }
- case DICTIONARY_ELEMENTS: {
- NumberDictionary* dictionary = NumberDictionary::cast(elements());
- capacity = dictionary->Capacity();
- number_of_elements = dictionary->NumberOfElements();
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
-
- if (capacity == 0) return true;
- return (number_of_elements > (capacity / 2));
-}
-
-
-bool JSObject::ShouldConvertToSlowElements(int new_capacity) {
- ASSERT(HasFastElements());
- // Keep the array in fast case if the current backing storage is
- // almost filled and if the new capacity is no more than twice the
- // old capacity.
- int elements_length = FixedArray::cast(elements())->length();
- return !HasDenseElements() || ((new_capacity / 2) > elements_length);
-}
-
-
-bool JSObject::ShouldConvertToFastElements() {
- ASSERT(HasDictionaryElements());
- NumberDictionary* dictionary = NumberDictionary::cast(elements());
- // If the elements are sparse, we should not go back to fast case.
- if (!HasDenseElements()) return false;
- // If an element has been added at a very high index in the elements
- // dictionary, we cannot go back to fast case.
- if (dictionary->requires_slow_elements()) return false;
- // An object requiring access checks is never allowed to have fast
- // elements. If it had fast elements we would skip security checks.
- if (IsAccessCheckNeeded()) return false;
- // If the dictionary backing storage takes up roughly half as much
- // space as a fast-case backing storage would the array should have
- // fast elements.
- uint32_t length = 0;
- if (IsJSArray()) {
- CHECK(JSArray::cast(this)->length()->ToArrayIndex(&length));
- } else {
- length = dictionary->max_number_key();
- }
- return static_cast<uint32_t>(dictionary->Capacity()) >=
- (length / (2 * NumberDictionary::kEntrySize));
-}
-
-
-// Certain compilers request function template instantiation when they
-// see the definition of the other template functions in the
-// class. This requires us to have the template functions put
-// together, so even though this function belongs in objects-debug.cc,
-// we keep it here instead to satisfy certain compilers.
-#ifdef OBJECT_PRINT
-template<typename Shape, typename Key>
-void Dictionary<Shape, Key>::Print(FILE* out) {
- int capacity = HashTable<Shape, Key>::Capacity();
- for (int i = 0; i < capacity; i++) {
- Object* k = HashTable<Shape, Key>::KeyAt(i);
- if (HashTable<Shape, Key>::IsKey(k)) {
- PrintF(out, " ");
- if (k->IsString()) {
- String::cast(k)->StringPrint(out);
- } else {
- k->ShortPrint(out);
- }
- PrintF(out, ": ");
- ValueAt(i)->ShortPrint(out);
- PrintF(out, "\n");
- }
- }
-}
-#endif
-
-
-template<typename Shape, typename Key>
-void Dictionary<Shape, Key>::CopyValuesTo(FixedArray* elements) {
- int pos = 0;
- int capacity = HashTable<Shape, Key>::Capacity();
- AssertNoAllocation no_gc;
- WriteBarrierMode mode = elements->GetWriteBarrierMode(no_gc);
- for (int i = 0; i < capacity; i++) {
- Object* k = Dictionary<Shape, Key>::KeyAt(i);
- if (Dictionary<Shape, Key>::IsKey(k)) {
- elements->set(pos++, ValueAt(i), mode);
- }
- }
- ASSERT(pos == elements->length());
-}
-
-
-InterceptorInfo* JSObject::GetNamedInterceptor() {
- ASSERT(map()->has_named_interceptor());
- JSFunction* constructor = JSFunction::cast(map()->constructor());
- ASSERT(constructor->shared()->IsApiFunction());
- Object* result =
- constructor->shared()->get_api_func_data()->named_property_handler();
- return InterceptorInfo::cast(result);
-}
-
-
-InterceptorInfo* JSObject::GetIndexedInterceptor() {
- ASSERT(map()->has_indexed_interceptor());
- JSFunction* constructor = JSFunction::cast(map()->constructor());
- ASSERT(constructor->shared()->IsApiFunction());
- Object* result =
- constructor->shared()->get_api_func_data()->indexed_property_handler();
- return InterceptorInfo::cast(result);
-}
-
-
-MaybeObject* JSObject::GetPropertyPostInterceptor(
- JSObject* receiver,
- String* name,
- PropertyAttributes* attributes) {
- // Check local property in holder, ignore interceptor.
- LookupResult result;
- LocalLookupRealNamedProperty(name, &result);
- if (result.IsProperty()) {
- return GetProperty(receiver, &result, name, attributes);
- }
- // Continue searching via the prototype chain.
- Object* pt = GetPrototype();
- *attributes = ABSENT;
- if (pt->IsNull()) return GetHeap()->undefined_value();
- return pt->GetPropertyWithReceiver(receiver, name, attributes);
-}
-
-
-MaybeObject* JSObject::GetLocalPropertyPostInterceptor(
- JSObject* receiver,
- String* name,
- PropertyAttributes* attributes) {
- // Check local property in holder, ignore interceptor.
- LookupResult result;
- LocalLookupRealNamedProperty(name, &result);
- if (result.IsProperty()) {
- return GetProperty(receiver, &result, name, attributes);
- }
- return GetHeap()->undefined_value();
-}
-
-
-MaybeObject* JSObject::GetPropertyWithInterceptor(
- JSObject* receiver,
- String* name,
- PropertyAttributes* attributes) {
- Isolate* isolate = GetIsolate();
- InterceptorInfo* interceptor = GetNamedInterceptor();
- HandleScope scope(isolate);
- Handle<JSObject> receiver_handle(receiver);
- Handle<JSObject> holder_handle(this);
- Handle<String> name_handle(name);
-
- if (!interceptor->getter()->IsUndefined()) {
- v8::NamedPropertyGetter getter =
- v8::ToCData<v8::NamedPropertyGetter>(interceptor->getter());
- LOG(isolate,
- ApiNamedPropertyAccess("interceptor-named-get", *holder_handle, name));
- CustomArguments args(isolate, interceptor->data(), receiver, this);
- v8::AccessorInfo info(args.end());
- v8::Handle<v8::Value> result;
- {
- // Leaving JavaScript.
- VMState state(isolate, EXTERNAL);
- result = getter(v8::Utils::ToLocal(name_handle), info);
- }
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- if (!result.IsEmpty()) {
- *attributes = NONE;
- return *v8::Utils::OpenHandle(*result);
- }
- }
-
- MaybeObject* result = holder_handle->GetPropertyPostInterceptor(
- *receiver_handle,
- *name_handle,
- attributes);
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- return result;
-}
-
-
-bool JSObject::HasRealNamedProperty(String* key) {
- // Check access rights if needed.
- if (IsAccessCheckNeeded()) {
- Heap* heap = GetHeap();
- if (!heap->isolate()->MayNamedAccess(this, key, v8::ACCESS_HAS)) {
- heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
- return false;
- }
- }
-
- LookupResult result;
- LocalLookupRealNamedProperty(key, &result);
- return result.IsProperty() && (result.type() != INTERCEPTOR);
-}
-
-
-bool JSObject::HasRealElementProperty(uint32_t index) {
- // Check access rights if needed.
- if (IsAccessCheckNeeded()) {
- Heap* heap = GetHeap();
- if (!heap->isolate()->MayIndexedAccess(this, index, v8::ACCESS_HAS)) {
- heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
- return false;
- }
- }
-
- // Handle [] on String objects.
- if (this->IsStringObjectWithCharacterAt(index)) return true;
-
- switch (GetElementsKind()) {
- case FAST_ELEMENTS: {
- uint32_t length = IsJSArray() ?
- static_cast<uint32_t>(
- Smi::cast(JSArray::cast(this)->length())->value()) :
- static_cast<uint32_t>(FixedArray::cast(elements())->length());
- return (index < length) &&
- !FixedArray::cast(elements())->get(index)->IsTheHole();
- }
- case EXTERNAL_PIXEL_ELEMENTS: {
- ExternalPixelArray* pixels = ExternalPixelArray::cast(elements());
- return index < static_cast<uint32_t>(pixels->length());
- }
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS: {
- ExternalArray* array = ExternalArray::cast(elements());
- return index < static_cast<uint32_t>(array->length());
- }
- case DICTIONARY_ELEMENTS: {
- return element_dictionary()->FindEntry(index)
- != NumberDictionary::kNotFound;
- }
- default:
- UNREACHABLE();
- break;
- }
- // All possibilities have been handled above already.
- UNREACHABLE();
- return GetHeap()->null_value();
-}
-
-
-bool JSObject::HasRealNamedCallbackProperty(String* key) {
- // Check access rights if needed.
- if (IsAccessCheckNeeded()) {
- Heap* heap = GetHeap();
- if (!heap->isolate()->MayNamedAccess(this, key, v8::ACCESS_HAS)) {
- heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
- return false;
- }
- }
-
- LookupResult result;
- LocalLookupRealNamedProperty(key, &result);
- return result.IsProperty() && (result.type() == CALLBACKS);
-}
-
-
-int JSObject::NumberOfLocalProperties(PropertyAttributes filter) {
- if (HasFastProperties()) {
- DescriptorArray* descs = map()->instance_descriptors();
- int result = 0;
- for (int i = 0; i < descs->number_of_descriptors(); i++) {
- PropertyDetails details = descs->GetDetails(i);
- if (details.IsProperty() && (details.attributes() & filter) == 0) {
- result++;
- }
- }
- return result;
- } else {
- return property_dictionary()->NumberOfElementsFilterAttributes(filter);
- }
-}
-
-
-int JSObject::NumberOfEnumProperties() {
- return NumberOfLocalProperties(static_cast<PropertyAttributes>(DONT_ENUM));
-}
-
-
-void FixedArray::SwapPairs(FixedArray* numbers, int i, int j) {
- Object* temp = get(i);
- set(i, get(j));
- set(j, temp);
- if (this != numbers) {
- temp = numbers->get(i);
- numbers->set(i, numbers->get(j));
- numbers->set(j, temp);
- }
-}
-
-
-static void InsertionSortPairs(FixedArray* content,
- FixedArray* numbers,
- int len) {
- for (int i = 1; i < len; i++) {
- int j = i;
- while (j > 0 &&
- (NumberToUint32(numbers->get(j - 1)) >
- NumberToUint32(numbers->get(j)))) {
- content->SwapPairs(numbers, j - 1, j);
- j--;
- }
- }
-}
-
-
-void HeapSortPairs(FixedArray* content, FixedArray* numbers, int len) {
- // In-place heap sort.
- ASSERT(content->length() == numbers->length());
-
- // Bottom-up max-heap construction.
- for (int i = 1; i < len; ++i) {
- int child_index = i;
- while (child_index > 0) {
- int parent_index = ((child_index + 1) >> 1) - 1;
- uint32_t parent_value = NumberToUint32(numbers->get(parent_index));
- uint32_t child_value = NumberToUint32(numbers->get(child_index));
- if (parent_value < child_value) {
- content->SwapPairs(numbers, parent_index, child_index);
- } else {
- break;
- }
- child_index = parent_index;
- }
- }
-
- // Extract elements and create sorted array.
- for (int i = len - 1; i > 0; --i) {
- // Put max element at the back of the array.
- content->SwapPairs(numbers, 0, i);
- // Sift down the new top element.
- int parent_index = 0;
- while (true) {
- int child_index = ((parent_index + 1) << 1) - 1;
- if (child_index >= i) break;
- uint32_t child1_value = NumberToUint32(numbers->get(child_index));
- uint32_t child2_value = NumberToUint32(numbers->get(child_index + 1));
- uint32_t parent_value = NumberToUint32(numbers->get(parent_index));
- if (child_index + 1 >= i || child1_value > child2_value) {
- if (parent_value > child1_value) break;
- content->SwapPairs(numbers, parent_index, child_index);
- parent_index = child_index;
- } else {
- if (parent_value > child2_value) break;
- content->SwapPairs(numbers, parent_index, child_index + 1);
- parent_index = child_index + 1;
- }
- }
- }
-}
-
-
-// Sort this array and the numbers as pairs wrt. the (distinct) numbers.
-void FixedArray::SortPairs(FixedArray* numbers, uint32_t len) {
- ASSERT(this->length() == numbers->length());
- // For small arrays, simply use insertion sort.
- if (len <= 10) {
- InsertionSortPairs(this, numbers, len);
- return;
- }
- // Check the range of indices.
- uint32_t min_index = NumberToUint32(numbers->get(0));
- uint32_t max_index = min_index;
- uint32_t i;
- for (i = 1; i < len; i++) {
- if (NumberToUint32(numbers->get(i)) < min_index) {
- min_index = NumberToUint32(numbers->get(i));
- } else if (NumberToUint32(numbers->get(i)) > max_index) {
- max_index = NumberToUint32(numbers->get(i));
- }
- }
- if (max_index - min_index + 1 == len) {
- // Indices form a contiguous range, unless there are duplicates.
- // Do an in-place linear time sort assuming distinct numbers, but
- // avoid hanging in case they are not.
- for (i = 0; i < len; i++) {
- uint32_t p;
- uint32_t j = 0;
- // While the current element at i is not at its correct position p,
- // swap the elements at these two positions.
- while ((p = NumberToUint32(numbers->get(i)) - min_index) != i &&
- j++ < len) {
- SwapPairs(numbers, i, p);
- }
- }
- } else {
- HeapSortPairs(this, numbers, len);
- return;
- }
-}
-
-
-// Fill in the names of local properties into the supplied storage. The main
-// purpose of this function is to provide reflection information for the object
-// mirrors.
-void JSObject::GetLocalPropertyNames(FixedArray* storage, int index) {
- ASSERT(storage->length() >= (NumberOfLocalProperties(NONE) - index));
- if (HasFastProperties()) {
- DescriptorArray* descs = map()->instance_descriptors();
- for (int i = 0; i < descs->number_of_descriptors(); i++) {
- if (descs->IsProperty(i)) storage->set(index++, descs->GetKey(i));
- }
- ASSERT(storage->length() >= index);
- } else {
- property_dictionary()->CopyKeysTo(storage);
- }
-}
-
-
-int JSObject::NumberOfLocalElements(PropertyAttributes filter) {
- return GetLocalElementKeys(NULL, filter);
-}
-
-
-int JSObject::NumberOfEnumElements() {
- // Fast case for objects with no elements.
- if (!IsJSValue() && HasFastElements()) {
- uint32_t length = IsJSArray() ?
- static_cast<uint32_t>(
- Smi::cast(JSArray::cast(this)->length())->value()) :
- static_cast<uint32_t>(FixedArray::cast(elements())->length());
- if (length == 0) return 0;
- }
- // Compute the number of enumerable elements.
- return NumberOfLocalElements(static_cast<PropertyAttributes>(DONT_ENUM));
-}
-
-
-int JSObject::GetLocalElementKeys(FixedArray* storage,
- PropertyAttributes filter) {
- int counter = 0;
- switch (GetElementsKind()) {
- case FAST_ELEMENTS: {
- int length = IsJSArray() ?
- Smi::cast(JSArray::cast(this)->length())->value() :
- FixedArray::cast(elements())->length();
- for (int i = 0; i < length; i++) {
- if (!FixedArray::cast(elements())->get(i)->IsTheHole()) {
- if (storage != NULL) {
- storage->set(counter, Smi::FromInt(i));
- }
- counter++;
- }
- }
- ASSERT(!storage || storage->length() >= counter);
- break;
- }
- case EXTERNAL_PIXEL_ELEMENTS: {
- int length = ExternalPixelArray::cast(elements())->length();
- while (counter < length) {
- if (storage != NULL) {
- storage->set(counter, Smi::FromInt(counter));
- }
- counter++;
- }
- ASSERT(!storage || storage->length() >= counter);
- break;
- }
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS: {
- int length = ExternalArray::cast(elements())->length();
- while (counter < length) {
- if (storage != NULL) {
- storage->set(counter, Smi::FromInt(counter));
- }
- counter++;
- }
- ASSERT(!storage || storage->length() >= counter);
- break;
- }
- case DICTIONARY_ELEMENTS: {
- if (storage != NULL) {
- element_dictionary()->CopyKeysTo(storage, filter);
- }
- counter = element_dictionary()->NumberOfElementsFilterAttributes(filter);
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
-
- if (this->IsJSValue()) {
- Object* val = JSValue::cast(this)->value();
- if (val->IsString()) {
- String* str = String::cast(val);
- if (storage) {
- for (int i = 0; i < str->length(); i++) {
- storage->set(counter + i, Smi::FromInt(i));
- }
- }
- counter += str->length();
- }
- }
- ASSERT(!storage || storage->length() == counter);
- return counter;
-}
-
-
-int JSObject::GetEnumElementKeys(FixedArray* storage) {
- return GetLocalElementKeys(storage,
- static_cast<PropertyAttributes>(DONT_ENUM));
-}
-
-
-// StringKey simply carries a string object as key.
-class StringKey : public HashTableKey {
- public:
- explicit StringKey(String* string) :
- string_(string),
- hash_(HashForObject(string)) { }
-
- bool IsMatch(Object* string) {
- // We know that all entries in a hash table had their hash keys created.
- // Use that knowledge to have fast failure.
- if (hash_ != HashForObject(string)) {
- return false;
- }
- return string_->Equals(String::cast(string));
- }
-
- uint32_t Hash() { return hash_; }
-
- uint32_t HashForObject(Object* other) { return String::cast(other)->Hash(); }
-
- Object* AsObject() { return string_; }
-
- String* string_;
- uint32_t hash_;
-};
-
-
-// StringSharedKeys are used as keys in the eval cache.
-class StringSharedKey : public HashTableKey {
- public:
- StringSharedKey(String* source,
- SharedFunctionInfo* shared,
- StrictModeFlag strict_mode)
- : source_(source),
- shared_(shared),
- strict_mode_(strict_mode) { }
-
- bool IsMatch(Object* other) {
- if (!other->IsFixedArray()) return false;
- FixedArray* pair = FixedArray::cast(other);
- SharedFunctionInfo* shared = SharedFunctionInfo::cast(pair->get(0));
- if (shared != shared_) return false;
- StrictModeFlag strict_mode = static_cast<StrictModeFlag>(
- Smi::cast(pair->get(2))->value());
- if (strict_mode != strict_mode_) return false;
- String* source = String::cast(pair->get(1));
- return source->Equals(source_);
- }
-
- static uint32_t StringSharedHashHelper(String* source,
- SharedFunctionInfo* shared,
- StrictModeFlag strict_mode) {
- uint32_t hash = source->Hash();
- if (shared->HasSourceCode()) {
- // Instead of using the SharedFunctionInfo pointer in the hash
- // code computation, we use a combination of the hash of the
- // script source code and the start and end positions. We do
- // this to ensure that the cache entries can survive garbage
- // collection.
- Script* script = Script::cast(shared->script());
- hash ^= String::cast(script->source())->Hash();
- if (strict_mode == kStrictMode) hash ^= 0x8000;
- hash += shared->start_position();
- }
- return hash;
- }
-
- uint32_t Hash() {
- return StringSharedHashHelper(source_, shared_, strict_mode_);
- }
-
- uint32_t HashForObject(Object* obj) {
- FixedArray* pair = FixedArray::cast(obj);
- SharedFunctionInfo* shared = SharedFunctionInfo::cast(pair->get(0));
- String* source = String::cast(pair->get(1));
- StrictModeFlag strict_mode = static_cast<StrictModeFlag>(
- Smi::cast(pair->get(2))->value());
- return StringSharedHashHelper(source, shared, strict_mode);
- }
-
- MUST_USE_RESULT MaybeObject* AsObject() {
- Object* obj;
- { MaybeObject* maybe_obj = source_->GetHeap()->AllocateFixedArray(3);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- FixedArray* pair = FixedArray::cast(obj);
- pair->set(0, shared_);
- pair->set(1, source_);
- pair->set(2, Smi::FromInt(strict_mode_));
- return pair;
- }
-
- private:
- String* source_;
- SharedFunctionInfo* shared_;
- StrictModeFlag strict_mode_;
-};
-
-
-// RegExpKey carries the source and flags of a regular expression as key.
-class RegExpKey : public HashTableKey {
- public:
- RegExpKey(String* string, JSRegExp::Flags flags)
- : string_(string),
- flags_(Smi::FromInt(flags.value())) { }
-
- // Rather than storing the key in the hash table, a pointer to the
- // stored value is stored where the key should be. IsMatch then
- // compares the search key to the found object, rather than comparing
- // a key to a key.
- bool IsMatch(Object* obj) {
- FixedArray* val = FixedArray::cast(obj);
- return string_->Equals(String::cast(val->get(JSRegExp::kSourceIndex)))
- && (flags_ == val->get(JSRegExp::kFlagsIndex));
- }
-
- uint32_t Hash() { return RegExpHash(string_, flags_); }
-
- Object* AsObject() {
- // Plain hash maps, which is where regexp keys are used, don't
- // use this function.
- UNREACHABLE();
- return NULL;
- }
-
- uint32_t HashForObject(Object* obj) {
- FixedArray* val = FixedArray::cast(obj);
- return RegExpHash(String::cast(val->get(JSRegExp::kSourceIndex)),
- Smi::cast(val->get(JSRegExp::kFlagsIndex)));
- }
-
- static uint32_t RegExpHash(String* string, Smi* flags) {
- return string->Hash() + flags->value();
- }
-
- String* string_;
- Smi* flags_;
-};
-
-// Utf8SymbolKey carries a vector of chars as key.
-class Utf8SymbolKey : public HashTableKey {
- public:
- explicit Utf8SymbolKey(Vector<const char> string)
- : string_(string), hash_field_(0) { }
-
- bool IsMatch(Object* string) {
- return String::cast(string)->IsEqualTo(string_);
- }
-
- uint32_t Hash() {
- if (hash_field_ != 0) return hash_field_ >> String::kHashShift;
- unibrow::Utf8InputBuffer<> buffer(string_.start(),
- static_cast<unsigned>(string_.length()));
- chars_ = buffer.Length();
- hash_field_ = String::ComputeHashField(&buffer, chars_);
- uint32_t result = hash_field_ >> String::kHashShift;
- ASSERT(result != 0); // Ensure that the hash value of 0 is never computed.
- return result;
- }
-
- uint32_t HashForObject(Object* other) {
- return String::cast(other)->Hash();
- }
-
- MaybeObject* AsObject() {
- if (hash_field_ == 0) Hash();
- return Isolate::Current()->heap()->AllocateSymbol(
- string_, chars_, hash_field_);
- }
-
- Vector<const char> string_;
- uint32_t hash_field_;
- int chars_; // Caches the number of characters when computing the hash code.
-};
-
-
-template <typename Char>
-class SequentialSymbolKey : public HashTableKey {
- public:
- explicit SequentialSymbolKey(Vector<const Char> string)
- : string_(string), hash_field_(0) { }
-
- uint32_t Hash() {
- StringHasher hasher(string_.length());
-
- // Very long strings have a trivial hash that doesn't inspect the
- // string contents.
- if (hasher.has_trivial_hash()) {
- hash_field_ = hasher.GetHashField();
- } else {
- int i = 0;
- // Do the iterative array index computation as long as there is a
- // chance this is an array index.
- while (i < string_.length() && hasher.is_array_index()) {
- hasher.AddCharacter(static_cast<uc32>(string_[i]));
- i++;
- }
-
- // Process the remaining characters without updating the array
- // index.
- while (i < string_.length()) {
- hasher.AddCharacterNoIndex(static_cast<uc32>(string_[i]));
- i++;
- }
- hash_field_ = hasher.GetHashField();
- }
-
- uint32_t result = hash_field_ >> String::kHashShift;
- ASSERT(result != 0); // Ensure that the hash value of 0 is never computed.
- return result;
- }
-
-
- uint32_t HashForObject(Object* other) {
- return String::cast(other)->Hash();
- }
-
- Vector<const Char> string_;
- uint32_t hash_field_;
-};
-
-
-
-class AsciiSymbolKey : public SequentialSymbolKey<char> {
- public:
- explicit AsciiSymbolKey(Vector<const char> str)
- : SequentialSymbolKey<char>(str) { }
-
- bool IsMatch(Object* string) {
- return String::cast(string)->IsAsciiEqualTo(string_);
- }
-
- MaybeObject* AsObject() {
- if (hash_field_ == 0) Hash();
- return HEAP->AllocateAsciiSymbol(string_, hash_field_);
- }
-};
-
-
-class TwoByteSymbolKey : public SequentialSymbolKey<uc16> {
- public:
- explicit TwoByteSymbolKey(Vector<const uc16> str)
- : SequentialSymbolKey<uc16>(str) { }
-
- bool IsMatch(Object* string) {
- return String::cast(string)->IsTwoByteEqualTo(string_);
- }
-
- MaybeObject* AsObject() {
- if (hash_field_ == 0) Hash();
- return HEAP->AllocateTwoByteSymbol(string_, hash_field_);
- }
-};
-
-
-// SymbolKey carries a string/symbol object as key.
-class SymbolKey : public HashTableKey {
- public:
- explicit SymbolKey(String* string)
- : string_(string) { }
-
- bool IsMatch(Object* string) {
- return String::cast(string)->Equals(string_);
- }
-
- uint32_t Hash() { return string_->Hash(); }
-
- uint32_t HashForObject(Object* other) {
- return String::cast(other)->Hash();
- }
-
- MaybeObject* AsObject() {
- // Attempt to flatten the string, so that symbols will most often
- // be flat strings.
- string_ = string_->TryFlattenGetString();
- Heap* heap = string_->GetHeap();
- // Transform string to symbol if possible.
- Map* map = heap->SymbolMapForString(string_);
- if (map != NULL) {
- string_->set_map(map);
- ASSERT(string_->IsSymbol());
- return string_;
- }
- // Otherwise allocate a new symbol.
- StringInputBuffer buffer(string_);
- return heap->AllocateInternalSymbol(&buffer,
- string_->length(),
- string_->hash_field());
- }
-
- static uint32_t StringHash(Object* obj) {
- return String::cast(obj)->Hash();
- }
-
- String* string_;
-};
-
-
-template<typename Shape, typename Key>
-void HashTable<Shape, Key>::IteratePrefix(ObjectVisitor* v) {
- IteratePointers(v, 0, kElementsStartOffset);
-}
-
-
-template<typename Shape, typename Key>
-void HashTable<Shape, Key>::IterateElements(ObjectVisitor* v) {
- IteratePointers(v,
- kElementsStartOffset,
- kHeaderSize + length() * kPointerSize);
-}
-
-
-template<typename Shape, typename Key>
-MaybeObject* HashTable<Shape, Key>::Allocate(int at_least_space_for,
- PretenureFlag pretenure) {
- const int kMinCapacity = 32;
- int capacity = RoundUpToPowerOf2(at_least_space_for * 2);
- if (capacity < kMinCapacity) {
- capacity = kMinCapacity; // Guarantee min capacity.
- } else if (capacity > HashTable::kMaxCapacity) {
- return Failure::OutOfMemoryException();
- }
-
- Object* obj;
- { MaybeObject* maybe_obj = Isolate::Current()->heap()->
- AllocateHashTable(EntryToIndex(capacity), pretenure);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- HashTable::cast(obj)->SetNumberOfElements(0);
- HashTable::cast(obj)->SetNumberOfDeletedElements(0);
- HashTable::cast(obj)->SetCapacity(capacity);
- return obj;
-}
-
-
-// Find entry for key otherwise return kNotFound.
-int StringDictionary::FindEntry(String* key) {
- if (!key->IsSymbol()) {
- return HashTable<StringDictionaryShape, String*>::FindEntry(key);
- }
-
- // Optimized for symbol key. Knowledge of the key type allows:
- // 1. Move the check if the key is a symbol out of the loop.
- // 2. Avoid comparing hash codes in symbol to symbol comparision.
- // 3. Detect a case when a dictionary key is not a symbol but the key is.
- // In case of positive result the dictionary key may be replaced by
- // the symbol with minimal performance penalty. It gives a chance to
- // perform further lookups in code stubs (and significant performance boost
- // a certain style of code).
-
- // EnsureCapacity will guarantee the hash table is never full.
- uint32_t capacity = Capacity();
- uint32_t entry = FirstProbe(key->Hash(), capacity);
- uint32_t count = 1;
-
- while (true) {
- int index = EntryToIndex(entry);
- Object* element = get(index);
- if (element->IsUndefined()) break; // Empty entry.
- if (key == element) return entry;
- if (!element->IsSymbol() &&
- !element->IsNull() &&
- String::cast(element)->Equals(key)) {
- // Replace a non-symbol key by the equivalent symbol for faster further
- // lookups.
- set(index, key);
- return entry;
- }
- ASSERT(element->IsNull() || !String::cast(element)->Equals(key));
- entry = NextProbe(entry, count++, capacity);
- }
- return kNotFound;
-}
-
-
-template<typename Shape, typename Key>
-MaybeObject* HashTable<Shape, Key>::EnsureCapacity(int n, Key key) {
- int capacity = Capacity();
- int nof = NumberOfElements() + n;
- int nod = NumberOfDeletedElements();
- // Return if:
- // 50% is still free after adding n elements and
- // at most 50% of the free elements are deleted elements.
- if (nod <= (capacity - nof) >> 1) {
- int needed_free = nof >> 1;
- if (nof + needed_free <= capacity) return this;
- }
-
- const int kMinCapacityForPretenure = 256;
- bool pretenure =
- (capacity > kMinCapacityForPretenure) && !GetHeap()->InNewSpace(this);
- Object* obj;
- { MaybeObject* maybe_obj =
- Allocate(nof * 2, pretenure ? TENURED : NOT_TENURED);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
-
- AssertNoAllocation no_gc;
- HashTable* table = HashTable::cast(obj);
- WriteBarrierMode mode = table->GetWriteBarrierMode(no_gc);
-
- // Copy prefix to new array.
- for (int i = kPrefixStartIndex;
- i < kPrefixStartIndex + Shape::kPrefixSize;
- i++) {
- table->set(i, get(i), mode);
- }
- // Rehash the elements.
- for (int i = 0; i < capacity; i++) {
- uint32_t from_index = EntryToIndex(i);
- Object* k = get(from_index);
- if (IsKey(k)) {
- uint32_t hash = Shape::HashForObject(key, k);
- uint32_t insertion_index =
- EntryToIndex(table->FindInsertionEntry(hash));
- for (int j = 0; j < Shape::kEntrySize; j++) {
- table->set(insertion_index + j, get(from_index + j), mode);
- }
- }
- }
- table->SetNumberOfElements(NumberOfElements());
- table->SetNumberOfDeletedElements(0);
- return table;
-}
-
-
-template<typename Shape, typename Key>
-uint32_t HashTable<Shape, Key>::FindInsertionEntry(uint32_t hash) {
- uint32_t capacity = Capacity();
- uint32_t entry = FirstProbe(hash, capacity);
- uint32_t count = 1;
- // EnsureCapacity will guarantee the hash table is never full.
- while (true) {
- Object* element = KeyAt(entry);
- if (element->IsUndefined() || element->IsNull()) break;
- entry = NextProbe(entry, count++, capacity);
- }
- return entry;
-}
-
-// Force instantiation of template instances class.
-// Please note this list is compiler dependent.
-
-template class HashTable<SymbolTableShape, HashTableKey*>;
-
-template class HashTable<CompilationCacheShape, HashTableKey*>;
-
-template class HashTable<MapCacheShape, HashTableKey*>;
-
-template class Dictionary<StringDictionaryShape, String*>;
-
-template class Dictionary<NumberDictionaryShape, uint32_t>;
-
-template MaybeObject* Dictionary<NumberDictionaryShape, uint32_t>::Allocate(
- int);
-
-template MaybeObject* Dictionary<StringDictionaryShape, String*>::Allocate(
- int);
-
-template MaybeObject* Dictionary<NumberDictionaryShape, uint32_t>::AtPut(
- uint32_t, Object*);
-
-template Object* Dictionary<NumberDictionaryShape, uint32_t>::SlowReverseLookup(
- Object*);
-
-template Object* Dictionary<StringDictionaryShape, String*>::SlowReverseLookup(
- Object*);
-
-template void Dictionary<NumberDictionaryShape, uint32_t>::CopyKeysTo(
- FixedArray*, PropertyAttributes);
-
-template Object* Dictionary<StringDictionaryShape, String*>::DeleteProperty(
- int, JSObject::DeleteMode);
-
-template Object* Dictionary<NumberDictionaryShape, uint32_t>::DeleteProperty(
- int, JSObject::DeleteMode);
-
-template void Dictionary<StringDictionaryShape, String*>::CopyKeysTo(
- FixedArray*);
-
-template int
-Dictionary<StringDictionaryShape, String*>::NumberOfElementsFilterAttributes(
- PropertyAttributes);
-
-template MaybeObject* Dictionary<StringDictionaryShape, String*>::Add(
- String*, Object*, PropertyDetails);
-
-template MaybeObject*
-Dictionary<StringDictionaryShape, String*>::GenerateNewEnumerationIndices();
-
-template int
-Dictionary<NumberDictionaryShape, uint32_t>::NumberOfElementsFilterAttributes(
- PropertyAttributes);
-
-template MaybeObject* Dictionary<NumberDictionaryShape, uint32_t>::Add(
- uint32_t, Object*, PropertyDetails);
-
-template MaybeObject* Dictionary<NumberDictionaryShape, uint32_t>::
- EnsureCapacity(int, uint32_t);
-
-template MaybeObject* Dictionary<StringDictionaryShape, String*>::
- EnsureCapacity(int, String*);
-
-template MaybeObject* Dictionary<NumberDictionaryShape, uint32_t>::AddEntry(
- uint32_t, Object*, PropertyDetails, uint32_t);
-
-template MaybeObject* Dictionary<StringDictionaryShape, String*>::AddEntry(
- String*, Object*, PropertyDetails, uint32_t);
-
-template
-int Dictionary<NumberDictionaryShape, uint32_t>::NumberOfEnumElements();
-
-template
-int Dictionary<StringDictionaryShape, String*>::NumberOfEnumElements();
-
-template
-int HashTable<NumberDictionaryShape, uint32_t>::FindEntry(uint32_t);
-
-
-// Collates undefined and unexisting elements below limit from position
-// zero of the elements. The object stays in Dictionary mode.
-MaybeObject* JSObject::PrepareSlowElementsForSort(uint32_t limit) {
- ASSERT(HasDictionaryElements());
- // Must stay in dictionary mode, either because of requires_slow_elements,
- // or because we are not going to sort (and therefore compact) all of the
- // elements.
- NumberDictionary* dict = element_dictionary();
- HeapNumber* result_double = NULL;
- if (limit > static_cast<uint32_t>(Smi::kMaxValue)) {
- // Allocate space for result before we start mutating the object.
- Object* new_double;
- { MaybeObject* maybe_new_double = GetHeap()->AllocateHeapNumber(0.0);
- if (!maybe_new_double->ToObject(&new_double)) return maybe_new_double;
- }
- result_double = HeapNumber::cast(new_double);
- }
-
- Object* obj;
- { MaybeObject* maybe_obj =
- NumberDictionary::Allocate(dict->NumberOfElements());
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- NumberDictionary* new_dict = NumberDictionary::cast(obj);
-
- AssertNoAllocation no_alloc;
-
- uint32_t pos = 0;
- uint32_t undefs = 0;
- int capacity = dict->Capacity();
- for (int i = 0; i < capacity; i++) {
- Object* k = dict->KeyAt(i);
- if (dict->IsKey(k)) {
- ASSERT(k->IsNumber());
- ASSERT(!k->IsSmi() || Smi::cast(k)->value() >= 0);
- ASSERT(!k->IsHeapNumber() || HeapNumber::cast(k)->value() >= 0);
- ASSERT(!k->IsHeapNumber() || HeapNumber::cast(k)->value() <= kMaxUInt32);
- Object* value = dict->ValueAt(i);
- PropertyDetails details = dict->DetailsAt(i);
- if (details.type() == CALLBACKS) {
- // Bail out and do the sorting of undefineds and array holes in JS.
- return Smi::FromInt(-1);
- }
- uint32_t key = NumberToUint32(k);
- // In the following we assert that adding the entry to the new dictionary
- // does not cause GC. This is the case because we made sure to allocate
- // the dictionary big enough above, so it need not grow.
- if (key < limit) {
- if (value->IsUndefined()) {
- undefs++;
- } else {
- if (pos > static_cast<uint32_t>(Smi::kMaxValue)) {
- // Adding an entry with the key beyond smi-range requires
- // allocation. Bailout.
- return Smi::FromInt(-1);
- }
- new_dict->AddNumberEntry(pos, value, details)->ToObjectUnchecked();
- pos++;
- }
- } else {
- if (key > static_cast<uint32_t>(Smi::kMaxValue)) {
- // Adding an entry with the key beyond smi-range requires
- // allocation. Bailout.
- return Smi::FromInt(-1);
- }
- new_dict->AddNumberEntry(key, value, details)->ToObjectUnchecked();
- }
- }
- }
-
- uint32_t result = pos;
- PropertyDetails no_details = PropertyDetails(NONE, NORMAL);
- Heap* heap = GetHeap();
- while (undefs > 0) {
- if (pos > static_cast<uint32_t>(Smi::kMaxValue)) {
- // Adding an entry with the key beyond smi-range requires
- // allocation. Bailout.
- return Smi::FromInt(-1);
- }
- new_dict->AddNumberEntry(pos, heap->undefined_value(), no_details)->
- ToObjectUnchecked();
- pos++;
- undefs--;
- }
-
- set_elements(new_dict);
-
- if (result <= static_cast<uint32_t>(Smi::kMaxValue)) {
- return Smi::FromInt(static_cast<int>(result));
- }
-
- ASSERT_NE(NULL, result_double);
- result_double->set_value(static_cast<double>(result));
- return result_double;
-}
-
-
-// Collects all defined (non-hole) and non-undefined (array) elements at
-// the start of the elements array.
-// If the object is in dictionary mode, it is converted to fast elements
-// mode.
-MaybeObject* JSObject::PrepareElementsForSort(uint32_t limit) {
- ASSERT(!HasExternalArrayElements());
-
- Heap* heap = GetHeap();
-
- if (HasDictionaryElements()) {
- // Convert to fast elements containing only the existing properties.
- // Ordering is irrelevant, since we are going to sort anyway.
- NumberDictionary* dict = element_dictionary();
- if (IsJSArray() || dict->requires_slow_elements() ||
- dict->max_number_key() >= limit) {
- return PrepareSlowElementsForSort(limit);
- }
- // Convert to fast elements.
-
- Object* obj;
- { MaybeObject* maybe_obj = map()->GetFastElementsMap();
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- Map* new_map = Map::cast(obj);
-
- PretenureFlag tenure = heap->InNewSpace(this) ? NOT_TENURED: TENURED;
- Object* new_array;
- { MaybeObject* maybe_new_array =
- heap->AllocateFixedArray(dict->NumberOfElements(), tenure);
- if (!maybe_new_array->ToObject(&new_array)) return maybe_new_array;
- }
- FixedArray* fast_elements = FixedArray::cast(new_array);
- dict->CopyValuesTo(fast_elements);
-
- set_map(new_map);
- set_elements(fast_elements);
- } else {
- Object* obj;
- { MaybeObject* maybe_obj = EnsureWritableFastElements();
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- }
- ASSERT(HasFastElements());
-
- // Collect holes at the end, undefined before that and the rest at the
- // start, and return the number of non-hole, non-undefined values.
-
- FixedArray* elements = FixedArray::cast(this->elements());
- uint32_t elements_length = static_cast<uint32_t>(elements->length());
- if (limit > elements_length) {
- limit = elements_length ;
- }
- if (limit == 0) {
- return Smi::FromInt(0);
- }
-
- HeapNumber* result_double = NULL;
- if (limit > static_cast<uint32_t>(Smi::kMaxValue)) {
- // Pessimistically allocate space for return value before
- // we start mutating the array.
- Object* new_double;
- { MaybeObject* maybe_new_double = heap->AllocateHeapNumber(0.0);
- if (!maybe_new_double->ToObject(&new_double)) return maybe_new_double;
- }
- result_double = HeapNumber::cast(new_double);
- }
-
- AssertNoAllocation no_alloc;
-
- // Split elements into defined, undefined and the_hole, in that order.
- // Only count locations for undefined and the hole, and fill them afterwards.
- WriteBarrierMode write_barrier = elements->GetWriteBarrierMode(no_alloc);
- unsigned int undefs = limit;
- unsigned int holes = limit;
- // Assume most arrays contain no holes and undefined values, so minimize the
- // number of stores of non-undefined, non-the-hole values.
- for (unsigned int i = 0; i < undefs; i++) {
- Object* current = elements->get(i);
- if (current->IsTheHole()) {
- holes--;
- undefs--;
- } else if (current->IsUndefined()) {
- undefs--;
- } else {
- continue;
- }
- // Position i needs to be filled.
- while (undefs > i) {
- current = elements->get(undefs);
- if (current->IsTheHole()) {
- holes--;
- undefs--;
- } else if (current->IsUndefined()) {
- undefs--;
- } else {
- elements->set(i, current, write_barrier);
- break;
- }
- }
- }
- uint32_t result = undefs;
- while (undefs < holes) {
- elements->set_undefined(undefs);
- undefs++;
- }
- while (holes < limit) {
- elements->set_the_hole(holes);
- holes++;
- }
-
- if (result <= static_cast<uint32_t>(Smi::kMaxValue)) {
- return Smi::FromInt(static_cast<int>(result));
- }
- ASSERT_NE(NULL, result_double);
- result_double->set_value(static_cast<double>(result));
- return result_double;
-}
-
-
-Object* ExternalPixelArray::SetValue(uint32_t index, Object* value) {
- uint8_t clamped_value = 0;
- if (index < static_cast<uint32_t>(length())) {
- if (value->IsSmi()) {
- int int_value = Smi::cast(value)->value();
- if (int_value < 0) {
- clamped_value = 0;
- } else if (int_value > 255) {
- clamped_value = 255;
- } else {
- clamped_value = static_cast<uint8_t>(int_value);
- }
- } else if (value->IsHeapNumber()) {
- double double_value = HeapNumber::cast(value)->value();
- if (!(double_value > 0)) {
- // NaN and less than zero clamp to zero.
- clamped_value = 0;
- } else if (double_value > 255) {
- // Greater than 255 clamp to 255.
- clamped_value = 255;
- } else {
- // Other doubles are rounded to the nearest integer.
- clamped_value = static_cast<uint8_t>(double_value + 0.5);
- }
- } else {
- // Clamp undefined to zero (default). All other types have been
- // converted to a number type further up in the call chain.
- ASSERT(value->IsUndefined());
- }
- set(index, clamped_value);
- }
- return Smi::FromInt(clamped_value);
-}
-
-
-template<typename ExternalArrayClass, typename ValueType>
-static MaybeObject* ExternalArrayIntSetter(Heap* heap,
- ExternalArrayClass* receiver,
- uint32_t index,
- Object* value) {
- ValueType cast_value = 0;
- if (index < static_cast<uint32_t>(receiver->length())) {
- if (value->IsSmi()) {
- int int_value = Smi::cast(value)->value();
- cast_value = static_cast<ValueType>(int_value);
- } else if (value->IsHeapNumber()) {
- double double_value = HeapNumber::cast(value)->value();
- cast_value = static_cast<ValueType>(DoubleToInt32(double_value));
- } else {
- // Clamp undefined to zero (default). All other types have been
- // converted to a number type further up in the call chain.
- ASSERT(value->IsUndefined());
- }
- receiver->set(index, cast_value);
- }
- return heap->NumberFromInt32(cast_value);
-}
-
-
-MaybeObject* ExternalByteArray::SetValue(uint32_t index, Object* value) {
- return ExternalArrayIntSetter<ExternalByteArray, int8_t>
- (GetHeap(), this, index, value);
-}
-
-
-MaybeObject* ExternalUnsignedByteArray::SetValue(uint32_t index,
- Object* value) {
- return ExternalArrayIntSetter<ExternalUnsignedByteArray, uint8_t>
- (GetHeap(), this, index, value);
-}
-
-
-MaybeObject* ExternalShortArray::SetValue(uint32_t index,
- Object* value) {
- return ExternalArrayIntSetter<ExternalShortArray, int16_t>
- (GetHeap(), this, index, value);
-}
-
-
-MaybeObject* ExternalUnsignedShortArray::SetValue(uint32_t index,
- Object* value) {
- return ExternalArrayIntSetter<ExternalUnsignedShortArray, uint16_t>
- (GetHeap(), this, index, value);
-}
-
-
-MaybeObject* ExternalIntArray::SetValue(uint32_t index, Object* value) {
- return ExternalArrayIntSetter<ExternalIntArray, int32_t>
- (GetHeap(), this, index, value);
-}
-
-
-MaybeObject* ExternalUnsignedIntArray::SetValue(uint32_t index, Object* value) {
- uint32_t cast_value = 0;
- Heap* heap = GetHeap();
- if (index < static_cast<uint32_t>(length())) {
- if (value->IsSmi()) {
- int int_value = Smi::cast(value)->value();
- cast_value = static_cast<uint32_t>(int_value);
- } else if (value->IsHeapNumber()) {
- double double_value = HeapNumber::cast(value)->value();
- cast_value = static_cast<uint32_t>(DoubleToUint32(double_value));
- } else {
- // Clamp undefined to zero (default). All other types have been
- // converted to a number type further up in the call chain.
- ASSERT(value->IsUndefined());
- }
- set(index, cast_value);
- }
- return heap->NumberFromUint32(cast_value);
-}
-
-
-MaybeObject* ExternalFloatArray::SetValue(uint32_t index, Object* value) {
- float cast_value = 0;
- Heap* heap = GetHeap();
- if (index < static_cast<uint32_t>(length())) {
- if (value->IsSmi()) {
- int int_value = Smi::cast(value)->value();
- cast_value = static_cast<float>(int_value);
- } else if (value->IsHeapNumber()) {
- double double_value = HeapNumber::cast(value)->value();
- cast_value = static_cast<float>(double_value);
- } else {
- // Clamp undefined to zero (default). All other types have been
- // converted to a number type further up in the call chain.
- ASSERT(value->IsUndefined());
- }
- set(index, cast_value);
- }
- return heap->AllocateHeapNumber(cast_value);
-}
-
-
-JSGlobalPropertyCell* GlobalObject::GetPropertyCell(LookupResult* result) {
- ASSERT(!HasFastProperties());
- Object* value = property_dictionary()->ValueAt(result->GetDictionaryEntry());
- return JSGlobalPropertyCell::cast(value);
-}
-
-
-MaybeObject* GlobalObject::EnsurePropertyCell(String* name) {
- ASSERT(!HasFastProperties());
- int entry = property_dictionary()->FindEntry(name);
- if (entry == StringDictionary::kNotFound) {
- Heap* heap = GetHeap();
- Object* cell;
- { MaybeObject* maybe_cell =
- heap->AllocateJSGlobalPropertyCell(heap->the_hole_value());
- if (!maybe_cell->ToObject(&cell)) return maybe_cell;
- }
- PropertyDetails details(NONE, NORMAL);
- details = details.AsDeleted();
- Object* dictionary;
- { MaybeObject* maybe_dictionary =
- property_dictionary()->Add(name, cell, details);
- if (!maybe_dictionary->ToObject(&dictionary)) return maybe_dictionary;
- }
- set_properties(StringDictionary::cast(dictionary));
- return cell;
- } else {
- Object* value = property_dictionary()->ValueAt(entry);
- ASSERT(value->IsJSGlobalPropertyCell());
- return value;
- }
-}
-
-
-MaybeObject* SymbolTable::LookupString(String* string, Object** s) {
- SymbolKey key(string);
- return LookupKey(&key, s);
-}
-
-
-// This class is used for looking up two character strings in the symbol table.
-// If we don't have a hit we don't want to waste much time so we unroll the
-// string hash calculation loop here for speed. Doesn't work if the two
-// characters form a decimal integer, since such strings have a different hash
-// algorithm.
-class TwoCharHashTableKey : public HashTableKey {
- public:
- TwoCharHashTableKey(uint32_t c1, uint32_t c2)
- : c1_(c1), c2_(c2) {
- // Char 1.
- uint32_t hash = c1 + (c1 << 10);
- hash ^= hash >> 6;
- // Char 2.
- hash += c2;
- hash += hash << 10;
- hash ^= hash >> 6;
- // GetHash.
- hash += hash << 3;
- hash ^= hash >> 11;
- hash += hash << 15;
- if (hash == 0) hash = 27;
-#ifdef DEBUG
- StringHasher hasher(2);
- hasher.AddCharacter(c1);
- hasher.AddCharacter(c2);
- // If this assert fails then we failed to reproduce the two-character
- // version of the string hashing algorithm above. One reason could be
- // that we were passed two digits as characters, since the hash
- // algorithm is different in that case.
- ASSERT_EQ(static_cast<int>(hasher.GetHash()), static_cast<int>(hash));
-#endif
- hash_ = hash;
- }
-
- bool IsMatch(Object* o) {
- if (!o->IsString()) return false;
- String* other = String::cast(o);
- if (other->length() != 2) return false;
- if (other->Get(0) != c1_) return false;
- return other->Get(1) == c2_;
- }
-
- uint32_t Hash() { return hash_; }
- uint32_t HashForObject(Object* key) {
- if (!key->IsString()) return 0;
- return String::cast(key)->Hash();
- }
-
- Object* AsObject() {
- // The TwoCharHashTableKey is only used for looking in the symbol
- // table, not for adding to it.
- UNREACHABLE();
- return NULL;
- }
- private:
- uint32_t c1_;
- uint32_t c2_;
- uint32_t hash_;
-};
-
-
-bool SymbolTable::LookupSymbolIfExists(String* string, String** symbol) {
- SymbolKey key(string);
- int entry = FindEntry(&key);
- if (entry == kNotFound) {
- return false;
- } else {
- String* result = String::cast(KeyAt(entry));
- ASSERT(StringShape(result).IsSymbol());
- *symbol = result;
- return true;
- }
-}
-
-
-bool SymbolTable::LookupTwoCharsSymbolIfExists(uint32_t c1,
- uint32_t c2,
- String** symbol) {
- TwoCharHashTableKey key(c1, c2);
- int entry = FindEntry(&key);
- if (entry == kNotFound) {
- return false;
- } else {
- String* result = String::cast(KeyAt(entry));
- ASSERT(StringShape(result).IsSymbol());
- *symbol = result;
- return true;
- }
-}
-
-
-MaybeObject* SymbolTable::LookupSymbol(Vector<const char> str, Object** s) {
- Utf8SymbolKey key(str);
- return LookupKey(&key, s);
-}
-
-
-MaybeObject* SymbolTable::LookupAsciiSymbol(Vector<const char> str,
- Object** s) {
- AsciiSymbolKey key(str);
- return LookupKey(&key, s);
-}
-
-
-MaybeObject* SymbolTable::LookupTwoByteSymbol(Vector<const uc16> str,
- Object** s) {
- TwoByteSymbolKey key(str);
- return LookupKey(&key, s);
-}
-
-MaybeObject* SymbolTable::LookupKey(HashTableKey* key, Object** s) {
- int entry = FindEntry(key);
-
- // Symbol already in table.
- if (entry != kNotFound) {
- *s = KeyAt(entry);
- return this;
- }
-
- // Adding new symbol. Grow table if needed.
- Object* obj;
- { MaybeObject* maybe_obj = EnsureCapacity(1, key);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
-
- // Create symbol object.
- Object* symbol;
- { MaybeObject* maybe_symbol = key->AsObject();
- if (!maybe_symbol->ToObject(&symbol)) return maybe_symbol;
- }
-
- // If the symbol table grew as part of EnsureCapacity, obj is not
- // the current symbol table and therefore we cannot use
- // SymbolTable::cast here.
- SymbolTable* table = reinterpret_cast<SymbolTable*>(obj);
-
- // Add the new symbol and return it along with the symbol table.
- entry = table->FindInsertionEntry(key->Hash());
- table->set(EntryToIndex(entry), symbol);
- table->ElementAdded();
- *s = symbol;
- return table;
-}
-
-
-Object* CompilationCacheTable::Lookup(String* src) {
- StringKey key(src);
- int entry = FindEntry(&key);
- if (entry == kNotFound) return GetHeap()->undefined_value();
- return get(EntryToIndex(entry) + 1);
-}
-
-
-Object* CompilationCacheTable::LookupEval(String* src,
- Context* context,
- StrictModeFlag strict_mode) {
- StringSharedKey key(src, context->closure()->shared(), strict_mode);
- int entry = FindEntry(&key);
- if (entry == kNotFound) return GetHeap()->undefined_value();
- return get(EntryToIndex(entry) + 1);
-}
-
-
-Object* CompilationCacheTable::LookupRegExp(String* src,
- JSRegExp::Flags flags) {
- RegExpKey key(src, flags);
- int entry = FindEntry(&key);
- if (entry == kNotFound) return GetHeap()->undefined_value();
- return get(EntryToIndex(entry) + 1);
-}
-
-
-MaybeObject* CompilationCacheTable::Put(String* src, Object* value) {
- StringKey key(src);
- Object* obj;
- { MaybeObject* maybe_obj = EnsureCapacity(1, &key);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
-
- CompilationCacheTable* cache =
- reinterpret_cast<CompilationCacheTable*>(obj);
- int entry = cache->FindInsertionEntry(key.Hash());
- cache->set(EntryToIndex(entry), src);
- cache->set(EntryToIndex(entry) + 1, value);
- cache->ElementAdded();
- return cache;
-}
-
-
-MaybeObject* CompilationCacheTable::PutEval(String* src,
- Context* context,
- SharedFunctionInfo* value) {
- StringSharedKey key(src,
- context->closure()->shared(),
- value->strict_mode() ? kStrictMode : kNonStrictMode);
- Object* obj;
- { MaybeObject* maybe_obj = EnsureCapacity(1, &key);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
-
- CompilationCacheTable* cache =
- reinterpret_cast<CompilationCacheTable*>(obj);
- int entry = cache->FindInsertionEntry(key.Hash());
-
- Object* k;
- { MaybeObject* maybe_k = key.AsObject();
- if (!maybe_k->ToObject(&k)) return maybe_k;
- }
-
- cache->set(EntryToIndex(entry), k);
- cache->set(EntryToIndex(entry) + 1, value);
- cache->ElementAdded();
- return cache;
-}
-
-
-MaybeObject* CompilationCacheTable::PutRegExp(String* src,
- JSRegExp::Flags flags,
- FixedArray* value) {
- RegExpKey key(src, flags);
- Object* obj;
- { MaybeObject* maybe_obj = EnsureCapacity(1, &key);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
-
- CompilationCacheTable* cache =
- reinterpret_cast<CompilationCacheTable*>(obj);
- int entry = cache->FindInsertionEntry(key.Hash());
- // We store the value in the key slot, and compare the search key
- // to the stored value with a custon IsMatch function during lookups.
- cache->set(EntryToIndex(entry), value);
- cache->set(EntryToIndex(entry) + 1, value);
- cache->ElementAdded();
- return cache;
-}
-
-
-void CompilationCacheTable::Remove(Object* value) {
- Object* null_value = GetHeap()->null_value();
- for (int entry = 0, size = Capacity(); entry < size; entry++) {
- int entry_index = EntryToIndex(entry);
- int value_index = entry_index + 1;
- if (get(value_index) == value) {
- fast_set(this, entry_index, null_value);
- fast_set(this, value_index, null_value);
- ElementRemoved();
- }
- }
- return;
-}
-
-
-// SymbolsKey used for HashTable where key is array of symbols.
-class SymbolsKey : public HashTableKey {
- public:
- explicit SymbolsKey(FixedArray* symbols) : symbols_(symbols) { }
-
- bool IsMatch(Object* symbols) {
- FixedArray* o = FixedArray::cast(symbols);
- int len = symbols_->length();
- if (o->length() != len) return false;
- for (int i = 0; i < len; i++) {
- if (o->get(i) != symbols_->get(i)) return false;
- }
- return true;
- }
-
- uint32_t Hash() { return HashForObject(symbols_); }
-
- uint32_t HashForObject(Object* obj) {
- FixedArray* symbols = FixedArray::cast(obj);
- int len = symbols->length();
- uint32_t hash = 0;
- for (int i = 0; i < len; i++) {
- hash ^= String::cast(symbols->get(i))->Hash();
- }
- return hash;
- }
-
- Object* AsObject() { return symbols_; }
-
- private:
- FixedArray* symbols_;
-};
-
-
-Object* MapCache::Lookup(FixedArray* array) {
- SymbolsKey key(array);
- int entry = FindEntry(&key);
- if (entry == kNotFound) return GetHeap()->undefined_value();
- return get(EntryToIndex(entry) + 1);
-}
-
-
-MaybeObject* MapCache::Put(FixedArray* array, Map* value) {
- SymbolsKey key(array);
- Object* obj;
- { MaybeObject* maybe_obj = EnsureCapacity(1, &key);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
-
- MapCache* cache = reinterpret_cast<MapCache*>(obj);
- int entry = cache->FindInsertionEntry(key.Hash());
- cache->set(EntryToIndex(entry), array);
- cache->set(EntryToIndex(entry) + 1, value);
- cache->ElementAdded();
- return cache;
-}
-
-
-template<typename Shape, typename Key>
-MaybeObject* Dictionary<Shape, Key>::Allocate(int at_least_space_for) {
- Object* obj;
- { MaybeObject* maybe_obj =
- HashTable<Shape, Key>::Allocate(at_least_space_for);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- // Initialize the next enumeration index.
- Dictionary<Shape, Key>::cast(obj)->
- SetNextEnumerationIndex(PropertyDetails::kInitialIndex);
- return obj;
-}
-
-
-template<typename Shape, typename Key>
-MaybeObject* Dictionary<Shape, Key>::GenerateNewEnumerationIndices() {
- Heap* heap = Dictionary<Shape, Key>::GetHeap();
- int length = HashTable<Shape, Key>::NumberOfElements();
-
- // Allocate and initialize iteration order array.
- Object* obj;
- { MaybeObject* maybe_obj = heap->AllocateFixedArray(length);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- FixedArray* iteration_order = FixedArray::cast(obj);
- for (int i = 0; i < length; i++) {
- iteration_order->set(i, Smi::FromInt(i));
- }
-
- // Allocate array with enumeration order.
- { MaybeObject* maybe_obj = heap->AllocateFixedArray(length);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- FixedArray* enumeration_order = FixedArray::cast(obj);
-
- // Fill the enumeration order array with property details.
- int capacity = HashTable<Shape, Key>::Capacity();
- int pos = 0;
- for (int i = 0; i < capacity; i++) {
- if (Dictionary<Shape, Key>::IsKey(Dictionary<Shape, Key>::KeyAt(i))) {
- enumeration_order->set(pos++, Smi::FromInt(DetailsAt(i).index()));
- }
- }
-
- // Sort the arrays wrt. enumeration order.
- iteration_order->SortPairs(enumeration_order, enumeration_order->length());
-
- // Overwrite the enumeration_order with the enumeration indices.
- for (int i = 0; i < length; i++) {
- int index = Smi::cast(iteration_order->get(i))->value();
- int enum_index = PropertyDetails::kInitialIndex + i;
- enumeration_order->set(index, Smi::FromInt(enum_index));
- }
-
- // Update the dictionary with new indices.
- capacity = HashTable<Shape, Key>::Capacity();
- pos = 0;
- for (int i = 0; i < capacity; i++) {
- if (Dictionary<Shape, Key>::IsKey(Dictionary<Shape, Key>::KeyAt(i))) {
- int enum_index = Smi::cast(enumeration_order->get(pos++))->value();
- PropertyDetails details = DetailsAt(i);
- PropertyDetails new_details =
- PropertyDetails(details.attributes(), details.type(), enum_index);
- DetailsAtPut(i, new_details);
- }
- }
-
- // Set the next enumeration index.
- SetNextEnumerationIndex(PropertyDetails::kInitialIndex+length);
- return this;
-}
-
-template<typename Shape, typename Key>
-MaybeObject* Dictionary<Shape, Key>::EnsureCapacity(int n, Key key) {
- // Check whether there are enough enumeration indices to add n elements.
- if (Shape::kIsEnumerable &&
- !PropertyDetails::IsValidIndex(NextEnumerationIndex() + n)) {
- // If not, we generate new indices for the properties.
- Object* result;
- { MaybeObject* maybe_result = GenerateNewEnumerationIndices();
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- }
- return HashTable<Shape, Key>::EnsureCapacity(n, key);
-}
-
-
-void NumberDictionary::RemoveNumberEntries(uint32_t from, uint32_t to) {
- // Do nothing if the interval [from, to) is empty.
- if (from >= to) return;
-
- Heap* heap = GetHeap();
- int removed_entries = 0;
- Object* sentinel = heap->null_value();
- int capacity = Capacity();
- for (int i = 0; i < capacity; i++) {
- Object* key = KeyAt(i);
- if (key->IsNumber()) {
- uint32_t number = static_cast<uint32_t>(key->Number());
- if (from <= number && number < to) {
- SetEntry(i, sentinel, sentinel, Smi::FromInt(0));
- removed_entries++;
- }
- }
- }
-
- // Update the number of elements.
- ElementsRemoved(removed_entries);
-}
-
-
-template<typename Shape, typename Key>
-Object* Dictionary<Shape, Key>::DeleteProperty(int entry,
- JSObject::DeleteMode mode) {
- Heap* heap = Dictionary<Shape, Key>::GetHeap();
- PropertyDetails details = DetailsAt(entry);
- // Ignore attributes if forcing a deletion.
- if (details.IsDontDelete() && mode != JSObject::FORCE_DELETION) {
- return heap->false_value();
- }
- SetEntry(entry, heap->null_value(), heap->null_value(), Smi::FromInt(0));
- HashTable<Shape, Key>::ElementRemoved();
- return heap->true_value();
-}
-
-
-template<typename Shape, typename Key>
-MaybeObject* Dictionary<Shape, Key>::AtPut(Key key, Object* value) {
- int entry = this->FindEntry(key);
-
- // If the entry is present set the value;
- if (entry != Dictionary<Shape, Key>::kNotFound) {
- ValueAtPut(entry, value);
- return this;
- }
-
- // Check whether the dictionary should be extended.
- Object* obj;
- { MaybeObject* maybe_obj = EnsureCapacity(1, key);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
-
- Object* k;
- { MaybeObject* maybe_k = Shape::AsObject(key);
- if (!maybe_k->ToObject(&k)) return maybe_k;
- }
- PropertyDetails details = PropertyDetails(NONE, NORMAL);
- return Dictionary<Shape, Key>::cast(obj)->
- AddEntry(key, value, details, Shape::Hash(key));
-}
-
-
-template<typename Shape, typename Key>
-MaybeObject* Dictionary<Shape, Key>::Add(Key key,
- Object* value,
- PropertyDetails details) {
- // Valdate key is absent.
- SLOW_ASSERT((this->FindEntry(key) == Dictionary<Shape, Key>::kNotFound));
- // Check whether the dictionary should be extended.
- Object* obj;
- { MaybeObject* maybe_obj = EnsureCapacity(1, key);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- return Dictionary<Shape, Key>::cast(obj)->
- AddEntry(key, value, details, Shape::Hash(key));
-}
-
-
-// Add a key, value pair to the dictionary.
-template<typename Shape, typename Key>
-MaybeObject* Dictionary<Shape, Key>::AddEntry(Key key,
- Object* value,
- PropertyDetails details,
- uint32_t hash) {
- // Compute the key object.
- Object* k;
- { MaybeObject* maybe_k = Shape::AsObject(key);
- if (!maybe_k->ToObject(&k)) return maybe_k;
- }
-
- uint32_t entry = Dictionary<Shape, Key>::FindInsertionEntry(hash);
- // Insert element at empty or deleted entry
- if (!details.IsDeleted() && details.index() == 0 && Shape::kIsEnumerable) {
- // Assign an enumeration index to the property and update
- // SetNextEnumerationIndex.
- int index = NextEnumerationIndex();
- details = PropertyDetails(details.attributes(), details.type(), index);
- SetNextEnumerationIndex(index + 1);
- }
- SetEntry(entry, k, value, details);
- ASSERT((Dictionary<Shape, Key>::KeyAt(entry)->IsNumber()
- || Dictionary<Shape, Key>::KeyAt(entry)->IsString()));
- HashTable<Shape, Key>::ElementAdded();
- return this;
-}
-
-
-void NumberDictionary::UpdateMaxNumberKey(uint32_t key) {
- // If the dictionary requires slow elements an element has already
- // been added at a high index.
- if (requires_slow_elements()) return;
- // Check if this index is high enough that we should require slow
- // elements.
- if (key > kRequiresSlowElementsLimit) {
- set_requires_slow_elements();
- return;
- }
- // Update max key value.
- Object* max_index_object = get(kMaxNumberKeyIndex);
- if (!max_index_object->IsSmi() || max_number_key() < key) {
- FixedArray::set(kMaxNumberKeyIndex,
- Smi::FromInt(key << kRequiresSlowElementsTagSize));
- }
-}
-
-
-MaybeObject* NumberDictionary::AddNumberEntry(uint32_t key,
- Object* value,
- PropertyDetails details) {
- UpdateMaxNumberKey(key);
- SLOW_ASSERT(this->FindEntry(key) == kNotFound);
- return Add(key, value, details);
-}
-
-
-MaybeObject* NumberDictionary::AtNumberPut(uint32_t key, Object* value) {
- UpdateMaxNumberKey(key);
- return AtPut(key, value);
-}
-
-
-MaybeObject* NumberDictionary::Set(uint32_t key,
- Object* value,
- PropertyDetails details) {
- int entry = FindEntry(key);
- if (entry == kNotFound) return AddNumberEntry(key, value, details);
- // Preserve enumeration index.
- details = PropertyDetails(details.attributes(),
- details.type(),
- DetailsAt(entry).index());
- MaybeObject* maybe_object_key = NumberDictionaryShape::AsObject(key);
- Object* object_key;
- if (!maybe_object_key->ToObject(&object_key)) return maybe_object_key;
- SetEntry(entry, object_key, value, details);
- return this;
-}
-
-
-
-template<typename Shape, typename Key>
-int Dictionary<Shape, Key>::NumberOfElementsFilterAttributes(
- PropertyAttributes filter) {
- int capacity = HashTable<Shape, Key>::Capacity();
- int result = 0;
- for (int i = 0; i < capacity; i++) {
- Object* k = HashTable<Shape, Key>::KeyAt(i);
- if (HashTable<Shape, Key>::IsKey(k)) {
- PropertyDetails details = DetailsAt(i);
- if (details.IsDeleted()) continue;
- PropertyAttributes attr = details.attributes();
- if ((attr & filter) == 0) result++;
- }
- }
- return result;
-}
-
-
-template<typename Shape, typename Key>
-int Dictionary<Shape, Key>::NumberOfEnumElements() {
- return NumberOfElementsFilterAttributes(
- static_cast<PropertyAttributes>(DONT_ENUM));
-}
-
-
-template<typename Shape, typename Key>
-void Dictionary<Shape, Key>::CopyKeysTo(FixedArray* storage,
- PropertyAttributes filter) {
- ASSERT(storage->length() >= NumberOfEnumElements());
- int capacity = HashTable<Shape, Key>::Capacity();
- int index = 0;
- for (int i = 0; i < capacity; i++) {
- Object* k = HashTable<Shape, Key>::KeyAt(i);
- if (HashTable<Shape, Key>::IsKey(k)) {
- PropertyDetails details = DetailsAt(i);
- if (details.IsDeleted()) continue;
- PropertyAttributes attr = details.attributes();
- if ((attr & filter) == 0) storage->set(index++, k);
- }
- }
- storage->SortPairs(storage, index);
- ASSERT(storage->length() >= index);
-}
-
-
-void StringDictionary::CopyEnumKeysTo(FixedArray* storage,
- FixedArray* sort_array) {
- ASSERT(storage->length() >= NumberOfEnumElements());
- int capacity = Capacity();
- int index = 0;
- for (int i = 0; i < capacity; i++) {
- Object* k = KeyAt(i);
- if (IsKey(k)) {
- PropertyDetails details = DetailsAt(i);
- if (details.IsDeleted() || details.IsDontEnum()) continue;
- storage->set(index, k);
- sort_array->set(index, Smi::FromInt(details.index()));
- index++;
- }
- }
- storage->SortPairs(sort_array, sort_array->length());
- ASSERT(storage->length() >= index);
-}
-
-
-template<typename Shape, typename Key>
-void Dictionary<Shape, Key>::CopyKeysTo(FixedArray* storage) {
- ASSERT(storage->length() >= NumberOfElementsFilterAttributes(
- static_cast<PropertyAttributes>(NONE)));
- int capacity = HashTable<Shape, Key>::Capacity();
- int index = 0;
- for (int i = 0; i < capacity; i++) {
- Object* k = HashTable<Shape, Key>::KeyAt(i);
- if (HashTable<Shape, Key>::IsKey(k)) {
- PropertyDetails details = DetailsAt(i);
- if (details.IsDeleted()) continue;
- storage->set(index++, k);
- }
- }
- ASSERT(storage->length() >= index);
-}
-
-
-// Backwards lookup (slow).
-template<typename Shape, typename Key>
-Object* Dictionary<Shape, Key>::SlowReverseLookup(Object* value) {
- int capacity = HashTable<Shape, Key>::Capacity();
- for (int i = 0; i < capacity; i++) {
- Object* k = HashTable<Shape, Key>::KeyAt(i);
- if (Dictionary<Shape, Key>::IsKey(k)) {
- Object* e = ValueAt(i);
- if (e->IsJSGlobalPropertyCell()) {
- e = JSGlobalPropertyCell::cast(e)->value();
- }
- if (e == value) return k;
- }
- }
- Heap* heap = Dictionary<Shape, Key>::GetHeap();
- return heap->undefined_value();
-}
-
-
-MaybeObject* StringDictionary::TransformPropertiesToFastFor(
- JSObject* obj, int unused_property_fields) {
- // Make sure we preserve dictionary representation if there are too many
- // descriptors.
- if (NumberOfElements() > DescriptorArray::kMaxNumberOfDescriptors) return obj;
-
- // Figure out if it is necessary to generate new enumeration indices.
- int max_enumeration_index =
- NextEnumerationIndex() +
- (DescriptorArray::kMaxNumberOfDescriptors -
- NumberOfElements());
- if (!PropertyDetails::IsValidIndex(max_enumeration_index)) {
- Object* result;
- { MaybeObject* maybe_result = GenerateNewEnumerationIndices();
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- }
-
- int instance_descriptor_length = 0;
- int number_of_fields = 0;
-
- Heap* heap = GetHeap();
-
- // Compute the length of the instance descriptor.
- int capacity = Capacity();
- for (int i = 0; i < capacity; i++) {
- Object* k = KeyAt(i);
- if (IsKey(k)) {
- Object* value = ValueAt(i);
- PropertyType type = DetailsAt(i).type();
- ASSERT(type != FIELD);
- instance_descriptor_length++;
- if (type == NORMAL &&
- (!value->IsJSFunction() || heap->InNewSpace(value))) {
- number_of_fields += 1;
- }
- }
- }
-
- // Allocate the instance descriptor.
- Object* descriptors_unchecked;
- { MaybeObject* maybe_descriptors_unchecked =
- DescriptorArray::Allocate(instance_descriptor_length);
- if (!maybe_descriptors_unchecked->ToObject(&descriptors_unchecked)) {
- return maybe_descriptors_unchecked;
- }
- }
- DescriptorArray* descriptors = DescriptorArray::cast(descriptors_unchecked);
-
- int inobject_props = obj->map()->inobject_properties();
- int number_of_allocated_fields =
- number_of_fields + unused_property_fields - inobject_props;
- if (number_of_allocated_fields < 0) {
- // There is enough inobject space for all fields (including unused).
- number_of_allocated_fields = 0;
- unused_property_fields = inobject_props - number_of_fields;
- }
-
- // Allocate the fixed array for the fields.
- Object* fields;
- { MaybeObject* maybe_fields =
- heap->AllocateFixedArray(number_of_allocated_fields);
- if (!maybe_fields->ToObject(&fields)) return maybe_fields;
- }
-
- // Fill in the instance descriptor and the fields.
- int next_descriptor = 0;
- int current_offset = 0;
- for (int i = 0; i < capacity; i++) {
- Object* k = KeyAt(i);
- if (IsKey(k)) {
- Object* value = ValueAt(i);
- // Ensure the key is a symbol before writing into the instance descriptor.
- Object* key;
- { MaybeObject* maybe_key = heap->LookupSymbol(String::cast(k));
- if (!maybe_key->ToObject(&key)) return maybe_key;
- }
- PropertyDetails details = DetailsAt(i);
- PropertyType type = details.type();
-
- if (value->IsJSFunction() && !heap->InNewSpace(value)) {
- ConstantFunctionDescriptor d(String::cast(key),
- JSFunction::cast(value),
- details.attributes(),
- details.index());
- descriptors->Set(next_descriptor++, &d);
- } else if (type == NORMAL) {
- if (current_offset < inobject_props) {
- obj->InObjectPropertyAtPut(current_offset,
- value,
- UPDATE_WRITE_BARRIER);
- } else {
- int offset = current_offset - inobject_props;
- FixedArray::cast(fields)->set(offset, value);
- }
- FieldDescriptor d(String::cast(key),
- current_offset++,
- details.attributes(),
- details.index());
- descriptors->Set(next_descriptor++, &d);
- } else if (type == CALLBACKS) {
- CallbacksDescriptor d(String::cast(key),
- value,
- details.attributes(),
- details.index());
- descriptors->Set(next_descriptor++, &d);
- } else {
- UNREACHABLE();
- }
- }
- }
- ASSERT(current_offset == number_of_fields);
-
- descriptors->Sort();
- // Allocate new map.
- Object* new_map;
- { MaybeObject* maybe_new_map = obj->map()->CopyDropDescriptors();
- if (!maybe_new_map->ToObject(&new_map)) return maybe_new_map;
- }
-
- // Transform the object.
- obj->set_map(Map::cast(new_map));
- obj->map()->set_instance_descriptors(descriptors);
- obj->map()->set_unused_property_fields(unused_property_fields);
-
- obj->set_properties(FixedArray::cast(fields));
- ASSERT(obj->IsJSObject());
-
- descriptors->SetNextEnumerationIndex(NextEnumerationIndex());
- // Check that it really works.
- ASSERT(obj->HasFastProperties());
-
- return obj;
-}
-
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-// Check if there is a break point at this code position.
-bool DebugInfo::HasBreakPoint(int code_position) {
- // Get the break point info object for this code position.
- Object* break_point_info = GetBreakPointInfo(code_position);
-
- // If there is no break point info object or no break points in the break
- // point info object there is no break point at this code position.
- if (break_point_info->IsUndefined()) return false;
- return BreakPointInfo::cast(break_point_info)->GetBreakPointCount() > 0;
-}
-
-
-// Get the break point info object for this code position.
-Object* DebugInfo::GetBreakPointInfo(int code_position) {
- // Find the index of the break point info object for this code position.
- int index = GetBreakPointInfoIndex(code_position);
-
- // Return the break point info object if any.
- if (index == kNoBreakPointInfo) return GetHeap()->undefined_value();
- return BreakPointInfo::cast(break_points()->get(index));
-}
-
-
-// Clear a break point at the specified code position.
-void DebugInfo::ClearBreakPoint(Handle<DebugInfo> debug_info,
- int code_position,
- Handle<Object> break_point_object) {
- Handle<Object> break_point_info(debug_info->GetBreakPointInfo(code_position));
- if (break_point_info->IsUndefined()) return;
- BreakPointInfo::ClearBreakPoint(
- Handle<BreakPointInfo>::cast(break_point_info),
- break_point_object);
-}
-
-
-void DebugInfo::SetBreakPoint(Handle<DebugInfo> debug_info,
- int code_position,
- int source_position,
- int statement_position,
- Handle<Object> break_point_object) {
- Isolate* isolate = Isolate::Current();
- Handle<Object> break_point_info(debug_info->GetBreakPointInfo(code_position));
- if (!break_point_info->IsUndefined()) {
- BreakPointInfo::SetBreakPoint(
- Handle<BreakPointInfo>::cast(break_point_info),
- break_point_object);
- return;
- }
-
- // Adding a new break point for a code position which did not have any
- // break points before. Try to find a free slot.
- int index = kNoBreakPointInfo;
- for (int i = 0; i < debug_info->break_points()->length(); i++) {
- if (debug_info->break_points()->get(i)->IsUndefined()) {
- index = i;
- break;
- }
- }
- if (index == kNoBreakPointInfo) {
- // No free slot - extend break point info array.
- Handle<FixedArray> old_break_points =
- Handle<FixedArray>(FixedArray::cast(debug_info->break_points()));
- Handle<FixedArray> new_break_points =
- isolate->factory()->NewFixedArray(
- old_break_points->length() +
- Debug::kEstimatedNofBreakPointsInFunction);
-
- debug_info->set_break_points(*new_break_points);
- for (int i = 0; i < old_break_points->length(); i++) {
- new_break_points->set(i, old_break_points->get(i));
- }
- index = old_break_points->length();
- }
- ASSERT(index != kNoBreakPointInfo);
-
- // Allocate new BreakPointInfo object and set the break point.
- Handle<BreakPointInfo> new_break_point_info = Handle<BreakPointInfo>::cast(
- isolate->factory()->NewStruct(BREAK_POINT_INFO_TYPE));
- new_break_point_info->set_code_position(Smi::FromInt(code_position));
- new_break_point_info->set_source_position(Smi::FromInt(source_position));
- new_break_point_info->
- set_statement_position(Smi::FromInt(statement_position));
- new_break_point_info->set_break_point_objects(
- isolate->heap()->undefined_value());
- BreakPointInfo::SetBreakPoint(new_break_point_info, break_point_object);
- debug_info->break_points()->set(index, *new_break_point_info);
-}
-
-
-// Get the break point objects for a code position.
-Object* DebugInfo::GetBreakPointObjects(int code_position) {
- Object* break_point_info = GetBreakPointInfo(code_position);
- if (break_point_info->IsUndefined()) {
- return GetHeap()->undefined_value();
- }
- return BreakPointInfo::cast(break_point_info)->break_point_objects();
-}
-
-
-// Get the total number of break points.
-int DebugInfo::GetBreakPointCount() {
- if (break_points()->IsUndefined()) return 0;
- int count = 0;
- for (int i = 0; i < break_points()->length(); i++) {
- if (!break_points()->get(i)->IsUndefined()) {
- BreakPointInfo* break_point_info =
- BreakPointInfo::cast(break_points()->get(i));
- count += break_point_info->GetBreakPointCount();
- }
- }
- return count;
-}
-
-
-Object* DebugInfo::FindBreakPointInfo(Handle<DebugInfo> debug_info,
- Handle<Object> break_point_object) {
- Heap* heap = debug_info->GetHeap();
- if (debug_info->break_points()->IsUndefined()) return heap->undefined_value();
- for (int i = 0; i < debug_info->break_points()->length(); i++) {
- if (!debug_info->break_points()->get(i)->IsUndefined()) {
- Handle<BreakPointInfo> break_point_info =
- Handle<BreakPointInfo>(BreakPointInfo::cast(
- debug_info->break_points()->get(i)));
- if (BreakPointInfo::HasBreakPointObject(break_point_info,
- break_point_object)) {
- return *break_point_info;
- }
- }
- }
- return heap->undefined_value();
-}
-
-
-// Find the index of the break point info object for the specified code
-// position.
-int DebugInfo::GetBreakPointInfoIndex(int code_position) {
- if (break_points()->IsUndefined()) return kNoBreakPointInfo;
- for (int i = 0; i < break_points()->length(); i++) {
- if (!break_points()->get(i)->IsUndefined()) {
- BreakPointInfo* break_point_info =
- BreakPointInfo::cast(break_points()->get(i));
- if (break_point_info->code_position()->value() == code_position) {
- return i;
- }
- }
- }
- return kNoBreakPointInfo;
-}
-
-
-// Remove the specified break point object.
-void BreakPointInfo::ClearBreakPoint(Handle<BreakPointInfo> break_point_info,
- Handle<Object> break_point_object) {
- Isolate* isolate = Isolate::Current();
- // If there are no break points just ignore.
- if (break_point_info->break_point_objects()->IsUndefined()) return;
- // If there is a single break point clear it if it is the same.
- if (!break_point_info->break_point_objects()->IsFixedArray()) {
- if (break_point_info->break_point_objects() == *break_point_object) {
- break_point_info->set_break_point_objects(
- isolate->heap()->undefined_value());
- }
- return;
- }
- // If there are multiple break points shrink the array
- ASSERT(break_point_info->break_point_objects()->IsFixedArray());
- Handle<FixedArray> old_array =
- Handle<FixedArray>(
- FixedArray::cast(break_point_info->break_point_objects()));
- Handle<FixedArray> new_array =
- isolate->factory()->NewFixedArray(old_array->length() - 1);
- int found_count = 0;
- for (int i = 0; i < old_array->length(); i++) {
- if (old_array->get(i) == *break_point_object) {
- ASSERT(found_count == 0);
- found_count++;
- } else {
- new_array->set(i - found_count, old_array->get(i));
- }
- }
- // If the break point was found in the list change it.
- if (found_count > 0) break_point_info->set_break_point_objects(*new_array);
-}
-
-
-// Add the specified break point object.
-void BreakPointInfo::SetBreakPoint(Handle<BreakPointInfo> break_point_info,
- Handle<Object> break_point_object) {
- // If there was no break point objects before just set it.
- if (break_point_info->break_point_objects()->IsUndefined()) {
- break_point_info->set_break_point_objects(*break_point_object);
- return;
- }
- // If the break point object is the same as before just ignore.
- if (break_point_info->break_point_objects() == *break_point_object) return;
- // If there was one break point object before replace with array.
- if (!break_point_info->break_point_objects()->IsFixedArray()) {
- Handle<FixedArray> array = FACTORY->NewFixedArray(2);
- array->set(0, break_point_info->break_point_objects());
- array->set(1, *break_point_object);
- break_point_info->set_break_point_objects(*array);
- return;
- }
- // If there was more than one break point before extend array.
- Handle<FixedArray> old_array =
- Handle<FixedArray>(
- FixedArray::cast(break_point_info->break_point_objects()));
- Handle<FixedArray> new_array =
- FACTORY->NewFixedArray(old_array->length() + 1);
- for (int i = 0; i < old_array->length(); i++) {
- // If the break point was there before just ignore.
- if (old_array->get(i) == *break_point_object) return;
- new_array->set(i, old_array->get(i));
- }
- // Add the new break point.
- new_array->set(old_array->length(), *break_point_object);
- break_point_info->set_break_point_objects(*new_array);
-}
-
-
-bool BreakPointInfo::HasBreakPointObject(
- Handle<BreakPointInfo> break_point_info,
- Handle<Object> break_point_object) {
- // No break point.
- if (break_point_info->break_point_objects()->IsUndefined()) return false;
- // Single beak point.
- if (!break_point_info->break_point_objects()->IsFixedArray()) {
- return break_point_info->break_point_objects() == *break_point_object;
- }
- // Multiple break points.
- FixedArray* array = FixedArray::cast(break_point_info->break_point_objects());
- for (int i = 0; i < array->length(); i++) {
- if (array->get(i) == *break_point_object) {
- return true;
- }
- }
- return false;
-}
-
-
-// Get the number of break points.
-int BreakPointInfo::GetBreakPointCount() {
- // No break point.
- if (break_point_objects()->IsUndefined()) return 0;
- // Single beak point.
- if (!break_point_objects()->IsFixedArray()) return 1;
- // Multiple break points.
- return FixedArray::cast(break_point_objects())->length();
-}
-#endif
-
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/objects.h b/src/3rdparty/v8/src/objects.h
deleted file mode 100644
index 874dcbc..0000000
--- a/src/3rdparty/v8/src/objects.h
+++ /dev/null
@@ -1,6662 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_OBJECTS_H_
-#define V8_OBJECTS_H_
-
-#include "builtins.h"
-#include "smart-pointer.h"
-#include "unicode-inl.h"
-#if V8_TARGET_ARCH_ARM
-#include "arm/constants-arm.h"
-#elif V8_TARGET_ARCH_MIPS
-#include "mips/constants-mips.h"
-#endif
-
-//
-// Most object types in the V8 JavaScript are described in this file.
-//
-// Inheritance hierarchy:
-// - MaybeObject (an object or a failure)
-// - Failure (immediate for marking failed operation)
-// - Object
-// - Smi (immediate small integer)
-// - HeapObject (superclass for everything allocated in the heap)
-// - JSObject
-// - JSArray
-// - JSRegExp
-// - JSFunction
-// - GlobalObject
-// - JSGlobalObject
-// - JSBuiltinsObject
-// - JSGlobalProxy
-// - JSValue
-// - JSMessageObject
-// - ByteArray
-// - ExternalArray
-// - ExternalPixelArray
-// - ExternalByteArray
-// - ExternalUnsignedByteArray
-// - ExternalShortArray
-// - ExternalUnsignedShortArray
-// - ExternalIntArray
-// - ExternalUnsignedIntArray
-// - ExternalFloatArray
-// - FixedArray
-// - DescriptorArray
-// - HashTable
-// - Dictionary
-// - SymbolTable
-// - CompilationCacheTable
-// - CodeCacheHashTable
-// - MapCache
-// - Context
-// - JSFunctionResultCache
-// - SerializedScopeInfo
-// - String
-// - SeqString
-// - SeqAsciiString
-// - SeqTwoByteString
-// - ConsString
-// - ExternalString
-// - ExternalAsciiString
-// - ExternalTwoByteString
-// - HeapNumber
-// - Code
-// - Map
-// - Oddball
-// - Proxy
-// - SharedFunctionInfo
-// - Struct
-// - AccessorInfo
-// - AccessCheckInfo
-// - InterceptorInfo
-// - CallHandlerInfo
-// - TemplateInfo
-// - FunctionTemplateInfo
-// - ObjectTemplateInfo
-// - Script
-// - SignatureInfo
-// - TypeSwitchInfo
-// - DebugInfo
-// - BreakPointInfo
-// - CodeCache
-//
-// Formats of Object*:
-// Smi: [31 bit signed int] 0
-// HeapObject: [32 bit direct pointer] (4 byte aligned) | 01
-// Failure: [30 bit signed int] 11
-
-// Ecma-262 3rd 8.6.1
-enum PropertyAttributes {
- NONE = v8::None,
- READ_ONLY = v8::ReadOnly,
- DONT_ENUM = v8::DontEnum,
- DONT_DELETE = v8::DontDelete,
- ABSENT = 16 // Used in runtime to indicate a property is absent.
- // ABSENT can never be stored in or returned from a descriptor's attributes
- // bitfield. It is only used as a return value meaning the attributes of
- // a non-existent property.
-};
-
-namespace v8 {
-namespace internal {
-
-
-// PropertyDetails captures type and attributes for a property.
-// They are used both in property dictionaries and instance descriptors.
-class PropertyDetails BASE_EMBEDDED {
- public:
-
- PropertyDetails(PropertyAttributes attributes,
- PropertyType type,
- int index = 0) {
- ASSERT(type != EXTERNAL_ARRAY_TRANSITION);
- ASSERT(TypeField::is_valid(type));
- ASSERT(AttributesField::is_valid(attributes));
- ASSERT(StorageField::is_valid(index));
-
- value_ = TypeField::encode(type)
- | AttributesField::encode(attributes)
- | StorageField::encode(index);
-
- ASSERT(type == this->type());
- ASSERT(attributes == this->attributes());
- ASSERT(index == this->index());
- }
-
- PropertyDetails(PropertyAttributes attributes,
- PropertyType type,
- ExternalArrayType array_type) {
- ASSERT(type == EXTERNAL_ARRAY_TRANSITION);
- ASSERT(TypeField::is_valid(type));
- ASSERT(AttributesField::is_valid(attributes));
- ASSERT(StorageField::is_valid(static_cast<int>(array_type)));
-
- value_ = TypeField::encode(type)
- | AttributesField::encode(attributes)
- | StorageField::encode(static_cast<int>(array_type));
-
- ASSERT(type == this->type());
- ASSERT(attributes == this->attributes());
- ASSERT(array_type == this->array_type());
- }
-
- // Conversion for storing details as Object*.
- inline PropertyDetails(Smi* smi);
- inline Smi* AsSmi();
-
- PropertyType type() { return TypeField::decode(value_); }
-
- bool IsTransition() {
- PropertyType t = type();
- ASSERT(t != INTERCEPTOR);
- return t == MAP_TRANSITION || t == CONSTANT_TRANSITION ||
- t == EXTERNAL_ARRAY_TRANSITION;
- }
-
- bool IsProperty() {
- return type() < FIRST_PHANTOM_PROPERTY_TYPE;
- }
-
- PropertyAttributes attributes() { return AttributesField::decode(value_); }
-
- int index() { return StorageField::decode(value_); }
-
- ExternalArrayType array_type() {
- ASSERT(type() == EXTERNAL_ARRAY_TRANSITION);
- return static_cast<ExternalArrayType>(StorageField::decode(value_));
- }
-
- inline PropertyDetails AsDeleted();
-
- static bool IsValidIndex(int index) {
- return StorageField::is_valid(index);
- }
-
- bool IsReadOnly() { return (attributes() & READ_ONLY) != 0; }
- bool IsDontDelete() { return (attributes() & DONT_DELETE) != 0; }
- bool IsDontEnum() { return (attributes() & DONT_ENUM) != 0; }
- bool IsDeleted() { return DeletedField::decode(value_) != 0;}
-
- // Bit fields in value_ (type, shift, size). Must be public so the
- // constants can be embedded in generated code.
- class TypeField: public BitField<PropertyType, 0, 4> {};
- class AttributesField: public BitField<PropertyAttributes, 4, 3> {};
- class DeletedField: public BitField<uint32_t, 7, 1> {};
- class StorageField: public BitField<uint32_t, 8, 32-8> {};
-
- static const int kInitialIndex = 1;
- private:
- uint32_t value_;
-};
-
-
-// Setter that skips the write barrier if mode is SKIP_WRITE_BARRIER.
-enum WriteBarrierMode { SKIP_WRITE_BARRIER, UPDATE_WRITE_BARRIER };
-
-
-// PropertyNormalizationMode is used to specify whether to keep
-// inobject properties when normalizing properties of a JSObject.
-enum PropertyNormalizationMode {
- CLEAR_INOBJECT_PROPERTIES,
- KEEP_INOBJECT_PROPERTIES
-};
-
-
-// NormalizedMapSharingMode is used to specify whether a map may be shared
-// by different objects with normalized properties.
-enum NormalizedMapSharingMode {
- UNIQUE_NORMALIZED_MAP,
- SHARED_NORMALIZED_MAP
-};
-
-
-// Instance size sentinel for objects of variable size.
-static const int kVariableSizeSentinel = 0;
-
-
-// All Maps have a field instance_type containing a InstanceType.
-// It describes the type of the instances.
-//
-// As an example, a JavaScript object is a heap object and its map
-// instance_type is JS_OBJECT_TYPE.
-//
-// The names of the string instance types are intended to systematically
-// mirror their encoding in the instance_type field of the map. The default
-// encoding is considered TWO_BYTE. It is not mentioned in the name. ASCII
-// encoding is mentioned explicitly in the name. Likewise, the default
-// representation is considered sequential. It is not mentioned in the
-// name. The other representations (eg, CONS, EXTERNAL) are explicitly
-// mentioned. Finally, the string is either a SYMBOL_TYPE (if it is a
-// symbol) or a STRING_TYPE (if it is not a symbol).
-//
-// NOTE: The following things are some that depend on the string types having
-// instance_types that are less than those of all other types:
-// HeapObject::Size, HeapObject::IterateBody, the typeof operator, and
-// Object::IsString.
-//
-// NOTE: Everything following JS_VALUE_TYPE is considered a
-// JSObject for GC purposes. The first four entries here have typeof
-// 'object', whereas JS_FUNCTION_TYPE has typeof 'function'.
-#define INSTANCE_TYPE_LIST_ALL(V) \
- V(SYMBOL_TYPE) \
- V(ASCII_SYMBOL_TYPE) \
- V(CONS_SYMBOL_TYPE) \
- V(CONS_ASCII_SYMBOL_TYPE) \
- V(EXTERNAL_SYMBOL_TYPE) \
- V(EXTERNAL_SYMBOL_WITH_ASCII_DATA_TYPE) \
- V(EXTERNAL_ASCII_SYMBOL_TYPE) \
- V(STRING_TYPE) \
- V(ASCII_STRING_TYPE) \
- V(CONS_STRING_TYPE) \
- V(CONS_ASCII_STRING_TYPE) \
- V(EXTERNAL_STRING_TYPE) \
- V(EXTERNAL_STRING_WITH_ASCII_DATA_TYPE) \
- V(EXTERNAL_ASCII_STRING_TYPE) \
- V(PRIVATE_EXTERNAL_ASCII_STRING_TYPE) \
- \
- V(MAP_TYPE) \
- V(CODE_TYPE) \
- V(ODDBALL_TYPE) \
- V(JS_GLOBAL_PROPERTY_CELL_TYPE) \
- \
- V(HEAP_NUMBER_TYPE) \
- V(PROXY_TYPE) \
- V(BYTE_ARRAY_TYPE) \
- /* Note: the order of these external array */ \
- /* types is relied upon in */ \
- /* Object::IsExternalArray(). */ \
- V(EXTERNAL_BYTE_ARRAY_TYPE) \
- V(EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE) \
- V(EXTERNAL_SHORT_ARRAY_TYPE) \
- V(EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE) \
- V(EXTERNAL_INT_ARRAY_TYPE) \
- V(EXTERNAL_UNSIGNED_INT_ARRAY_TYPE) \
- V(EXTERNAL_FLOAT_ARRAY_TYPE) \
- V(EXTERNAL_PIXEL_ARRAY_TYPE) \
- V(FILLER_TYPE) \
- \
- V(ACCESSOR_INFO_TYPE) \
- V(ACCESS_CHECK_INFO_TYPE) \
- V(INTERCEPTOR_INFO_TYPE) \
- V(CALL_HANDLER_INFO_TYPE) \
- V(FUNCTION_TEMPLATE_INFO_TYPE) \
- V(OBJECT_TEMPLATE_INFO_TYPE) \
- V(SIGNATURE_INFO_TYPE) \
- V(TYPE_SWITCH_INFO_TYPE) \
- V(SCRIPT_TYPE) \
- V(CODE_CACHE_TYPE) \
- \
- V(FIXED_ARRAY_TYPE) \
- V(SHARED_FUNCTION_INFO_TYPE) \
- \
- V(JS_MESSAGE_OBJECT_TYPE) \
- \
- V(JS_VALUE_TYPE) \
- V(JS_OBJECT_TYPE) \
- V(JS_CONTEXT_EXTENSION_OBJECT_TYPE) \
- V(JS_GLOBAL_OBJECT_TYPE) \
- V(JS_BUILTINS_OBJECT_TYPE) \
- V(JS_GLOBAL_PROXY_TYPE) \
- V(JS_ARRAY_TYPE) \
- V(JS_REGEXP_TYPE) \
- \
- V(JS_FUNCTION_TYPE) \
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-#define INSTANCE_TYPE_LIST_DEBUGGER(V) \
- V(DEBUG_INFO_TYPE) \
- V(BREAK_POINT_INFO_TYPE)
-#else
-#define INSTANCE_TYPE_LIST_DEBUGGER(V)
-#endif
-
-#define INSTANCE_TYPE_LIST(V) \
- INSTANCE_TYPE_LIST_ALL(V) \
- INSTANCE_TYPE_LIST_DEBUGGER(V)
-
-
-// Since string types are not consecutive, this macro is used to
-// iterate over them.
-#define STRING_TYPE_LIST(V) \
- V(SYMBOL_TYPE, \
- kVariableSizeSentinel, \
- symbol, \
- Symbol) \
- V(ASCII_SYMBOL_TYPE, \
- kVariableSizeSentinel, \
- ascii_symbol, \
- AsciiSymbol) \
- V(CONS_SYMBOL_TYPE, \
- ConsString::kSize, \
- cons_symbol, \
- ConsSymbol) \
- V(CONS_ASCII_SYMBOL_TYPE, \
- ConsString::kSize, \
- cons_ascii_symbol, \
- ConsAsciiSymbol) \
- V(EXTERNAL_SYMBOL_TYPE, \
- ExternalTwoByteString::kSize, \
- external_symbol, \
- ExternalSymbol) \
- V(EXTERNAL_SYMBOL_WITH_ASCII_DATA_TYPE, \
- ExternalTwoByteString::kSize, \
- external_symbol_with_ascii_data, \
- ExternalSymbolWithAsciiData) \
- V(EXTERNAL_ASCII_SYMBOL_TYPE, \
- ExternalAsciiString::kSize, \
- external_ascii_symbol, \
- ExternalAsciiSymbol) \
- V(STRING_TYPE, \
- kVariableSizeSentinel, \
- string, \
- String) \
- V(ASCII_STRING_TYPE, \
- kVariableSizeSentinel, \
- ascii_string, \
- AsciiString) \
- V(CONS_STRING_TYPE, \
- ConsString::kSize, \
- cons_string, \
- ConsString) \
- V(CONS_ASCII_STRING_TYPE, \
- ConsString::kSize, \
- cons_ascii_string, \
- ConsAsciiString) \
- V(EXTERNAL_STRING_TYPE, \
- ExternalTwoByteString::kSize, \
- external_string, \
- ExternalString) \
- V(EXTERNAL_STRING_WITH_ASCII_DATA_TYPE, \
- ExternalTwoByteString::kSize, \
- external_string_with_ascii_data, \
- ExternalStringWithAsciiData) \
- V(EXTERNAL_ASCII_STRING_TYPE, \
- ExternalAsciiString::kSize, \
- external_ascii_string, \
- ExternalAsciiString)
-
-// A struct is a simple object a set of object-valued fields. Including an
-// object type in this causes the compiler to generate most of the boilerplate
-// code for the class including allocation and garbage collection routines,
-// casts and predicates. All you need to define is the class, methods and
-// object verification routines. Easy, no?
-//
-// Note that for subtle reasons related to the ordering or numerical values of
-// type tags, elements in this list have to be added to the INSTANCE_TYPE_LIST
-// manually.
-#define STRUCT_LIST_ALL(V) \
- V(ACCESSOR_INFO, AccessorInfo, accessor_info) \
- V(ACCESS_CHECK_INFO, AccessCheckInfo, access_check_info) \
- V(INTERCEPTOR_INFO, InterceptorInfo, interceptor_info) \
- V(CALL_HANDLER_INFO, CallHandlerInfo, call_handler_info) \
- V(FUNCTION_TEMPLATE_INFO, FunctionTemplateInfo, function_template_info) \
- V(OBJECT_TEMPLATE_INFO, ObjectTemplateInfo, object_template_info) \
- V(SIGNATURE_INFO, SignatureInfo, signature_info) \
- V(TYPE_SWITCH_INFO, TypeSwitchInfo, type_switch_info) \
- V(SCRIPT, Script, script) \
- V(CODE_CACHE, CodeCache, code_cache)
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-#define STRUCT_LIST_DEBUGGER(V) \
- V(DEBUG_INFO, DebugInfo, debug_info) \
- V(BREAK_POINT_INFO, BreakPointInfo, break_point_info)
-#else
-#define STRUCT_LIST_DEBUGGER(V)
-#endif
-
-#define STRUCT_LIST(V) \
- STRUCT_LIST_ALL(V) \
- STRUCT_LIST_DEBUGGER(V)
-
-// We use the full 8 bits of the instance_type field to encode heap object
-// instance types. The high-order bit (bit 7) is set if the object is not a
-// string, and cleared if it is a string.
-const uint32_t kIsNotStringMask = 0x80;
-const uint32_t kStringTag = 0x0;
-const uint32_t kNotStringTag = 0x80;
-
-// Bit 6 indicates that the object is a symbol (if set) or not (if cleared).
-// There are not enough types that the non-string types (with bit 7 set) can
-// have bit 6 set too.
-const uint32_t kIsSymbolMask = 0x40;
-const uint32_t kNotSymbolTag = 0x0;
-const uint32_t kSymbolTag = 0x40;
-
-// If bit 7 is clear then bit 2 indicates whether the string consists of
-// two-byte characters or one-byte characters.
-const uint32_t kStringEncodingMask = 0x4;
-const uint32_t kTwoByteStringTag = 0x0;
-const uint32_t kAsciiStringTag = 0x4;
-
-// If bit 7 is clear, the low-order 2 bits indicate the representation
-// of the string.
-const uint32_t kStringRepresentationMask = 0x03;
-enum StringRepresentationTag {
- kSeqStringTag = 0x0,
- kConsStringTag = 0x1,
- kExternalStringTag = 0x2
-};
-const uint32_t kIsConsStringMask = 0x1;
-
-// If bit 7 is clear, then bit 3 indicates whether this two-byte
-// string actually contains ascii data.
-const uint32_t kAsciiDataHintMask = 0x08;
-const uint32_t kAsciiDataHintTag = 0x08;
-
-
-// A ConsString with an empty string as the right side is a candidate
-// for being shortcut by the garbage collector unless it is a
-// symbol. It's not common to have non-flat symbols, so we do not
-// shortcut them thereby avoiding turning symbols into strings. See
-// heap.cc and mark-compact.cc.
-const uint32_t kShortcutTypeMask =
- kIsNotStringMask |
- kIsSymbolMask |
- kStringRepresentationMask;
-const uint32_t kShortcutTypeTag = kConsStringTag;
-
-
-enum InstanceType {
- // String types.
- // FIRST_STRING_TYPE
- SYMBOL_TYPE = kTwoByteStringTag | kSymbolTag | kSeqStringTag,
- ASCII_SYMBOL_TYPE = kAsciiStringTag | kSymbolTag | kSeqStringTag,
- CONS_SYMBOL_TYPE = kTwoByteStringTag | kSymbolTag | kConsStringTag,
- CONS_ASCII_SYMBOL_TYPE = kAsciiStringTag | kSymbolTag | kConsStringTag,
- EXTERNAL_SYMBOL_TYPE = kTwoByteStringTag | kSymbolTag | kExternalStringTag,
- EXTERNAL_SYMBOL_WITH_ASCII_DATA_TYPE =
- kTwoByteStringTag | kSymbolTag | kExternalStringTag | kAsciiDataHintTag,
- EXTERNAL_ASCII_SYMBOL_TYPE =
- kAsciiStringTag | kSymbolTag | kExternalStringTag,
- STRING_TYPE = kTwoByteStringTag | kSeqStringTag,
- ASCII_STRING_TYPE = kAsciiStringTag | kSeqStringTag,
- CONS_STRING_TYPE = kTwoByteStringTag | kConsStringTag,
- CONS_ASCII_STRING_TYPE = kAsciiStringTag | kConsStringTag,
- EXTERNAL_STRING_TYPE = kTwoByteStringTag | kExternalStringTag,
- EXTERNAL_STRING_WITH_ASCII_DATA_TYPE =
- kTwoByteStringTag | kExternalStringTag | kAsciiDataHintTag,
- // LAST_STRING_TYPE
- EXTERNAL_ASCII_STRING_TYPE = kAsciiStringTag | kExternalStringTag,
- PRIVATE_EXTERNAL_ASCII_STRING_TYPE = EXTERNAL_ASCII_STRING_TYPE,
-
- // Objects allocated in their own spaces (never in new space).
- MAP_TYPE = kNotStringTag, // FIRST_NONSTRING_TYPE
- CODE_TYPE,
- ODDBALL_TYPE,
- JS_GLOBAL_PROPERTY_CELL_TYPE,
-
- // "Data", objects that cannot contain non-map-word pointers to heap
- // objects.
- HEAP_NUMBER_TYPE,
- PROXY_TYPE,
- BYTE_ARRAY_TYPE,
- EXTERNAL_BYTE_ARRAY_TYPE, // FIRST_EXTERNAL_ARRAY_TYPE
- EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE,
- EXTERNAL_SHORT_ARRAY_TYPE,
- EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE,
- EXTERNAL_INT_ARRAY_TYPE,
- EXTERNAL_UNSIGNED_INT_ARRAY_TYPE,
- EXTERNAL_FLOAT_ARRAY_TYPE,
- EXTERNAL_PIXEL_ARRAY_TYPE, // LAST_EXTERNAL_ARRAY_TYPE
- FILLER_TYPE, // LAST_DATA_TYPE
-
- // Structs.
- ACCESSOR_INFO_TYPE,
- ACCESS_CHECK_INFO_TYPE,
- INTERCEPTOR_INFO_TYPE,
- CALL_HANDLER_INFO_TYPE,
- FUNCTION_TEMPLATE_INFO_TYPE,
- OBJECT_TEMPLATE_INFO_TYPE,
- SIGNATURE_INFO_TYPE,
- TYPE_SWITCH_INFO_TYPE,
- SCRIPT_TYPE,
- CODE_CACHE_TYPE,
- // The following two instance types are only used when ENABLE_DEBUGGER_SUPPORT
- // is defined. However as include/v8.h contain some of the instance type
- // constants always having them avoids them getting different numbers
- // depending on whether ENABLE_DEBUGGER_SUPPORT is defined or not.
- DEBUG_INFO_TYPE,
- BREAK_POINT_INFO_TYPE,
-
- FIXED_ARRAY_TYPE,
- SHARED_FUNCTION_INFO_TYPE,
-
- JS_MESSAGE_OBJECT_TYPE,
-
- JS_VALUE_TYPE, // FIRST_JS_OBJECT_TYPE
- JS_OBJECT_TYPE,
- JS_CONTEXT_EXTENSION_OBJECT_TYPE,
- JS_GLOBAL_OBJECT_TYPE,
- JS_BUILTINS_OBJECT_TYPE,
- JS_GLOBAL_PROXY_TYPE,
- JS_ARRAY_TYPE,
-
- JS_REGEXP_TYPE, // LAST_JS_OBJECT_TYPE, FIRST_FUNCTION_CLASS_TYPE
-
- JS_FUNCTION_TYPE,
-
- // Pseudo-types
- FIRST_TYPE = 0x0,
- LAST_TYPE = JS_FUNCTION_TYPE,
- INVALID_TYPE = FIRST_TYPE - 1,
- FIRST_NONSTRING_TYPE = MAP_TYPE,
- FIRST_STRING_TYPE = FIRST_TYPE,
- LAST_STRING_TYPE = FIRST_NONSTRING_TYPE - 1,
- // Boundaries for testing for an external array.
- FIRST_EXTERNAL_ARRAY_TYPE = EXTERNAL_BYTE_ARRAY_TYPE,
- LAST_EXTERNAL_ARRAY_TYPE = EXTERNAL_PIXEL_ARRAY_TYPE,
- // Boundary for promotion to old data space/old pointer space.
- LAST_DATA_TYPE = FILLER_TYPE,
- // Boundaries for testing the type is a JavaScript "object". Note that
- // function objects are not counted as objects, even though they are
- // implemented as such; only values whose typeof is "object" are included.
- FIRST_JS_OBJECT_TYPE = JS_VALUE_TYPE,
- LAST_JS_OBJECT_TYPE = JS_REGEXP_TYPE,
- // RegExp objects have [[Class]] "function" because they are callable.
- // All types from this type and above are objects with [[Class]] "function".
- FIRST_FUNCTION_CLASS_TYPE = JS_REGEXP_TYPE
-};
-
-static const int kExternalArrayTypeCount = LAST_EXTERNAL_ARRAY_TYPE -
- FIRST_EXTERNAL_ARRAY_TYPE + 1;
-
-STATIC_CHECK(JS_OBJECT_TYPE == Internals::kJSObjectType);
-STATIC_CHECK(FIRST_NONSTRING_TYPE == Internals::kFirstNonstringType);
-STATIC_CHECK(PROXY_TYPE == Internals::kProxyType);
-
-
-enum CompareResult {
- LESS = -1,
- EQUAL = 0,
- GREATER = 1,
-
- NOT_EQUAL = GREATER
-};
-
-
-#define DECL_BOOLEAN_ACCESSORS(name) \
- inline bool name(); \
- inline void set_##name(bool value); \
-
-
-#define DECL_ACCESSORS(name, type) \
- inline type* name(); \
- inline void set_##name(type* value, \
- WriteBarrierMode mode = UPDATE_WRITE_BARRIER); \
-
-
-class StringStream;
-class ObjectVisitor;
-
-struct ValueInfo : public Malloced {
- ValueInfo() : type(FIRST_TYPE), ptr(NULL), str(NULL), number(0) { }
- InstanceType type;
- Object* ptr;
- const char* str;
- double number;
-};
-
-
-// A template-ized version of the IsXXX functions.
-template <class C> static inline bool Is(Object* obj);
-
-
-class MaybeObject BASE_EMBEDDED {
- public:
- inline bool IsFailure();
- inline bool IsRetryAfterGC();
- inline bool IsOutOfMemory();
- inline bool IsException();
- INLINE(bool IsTheHole());
- inline bool ToObject(Object** obj) {
- if (IsFailure()) return false;
- *obj = reinterpret_cast<Object*>(this);
- return true;
- }
- inline Object* ToObjectUnchecked() {
- ASSERT(!IsFailure());
- return reinterpret_cast<Object*>(this);
- }
- inline Object* ToObjectChecked() {
- CHECK(!IsFailure());
- return reinterpret_cast<Object*>(this);
- }
-
-#ifdef OBJECT_PRINT
- // Prints this object with details.
- inline void Print() {
- Print(stdout);
- };
- inline void PrintLn() {
- PrintLn(stdout);
- }
- void Print(FILE* out);
- void PrintLn(FILE* out);
-#endif
-#ifdef DEBUG
- // Verifies the object.
- void Verify();
-#endif
-};
-
-
-#define OBJECT_TYPE_LIST(V) \
- V(Smi) \
- V(HeapObject) \
- V(Number) \
-
-#define HEAP_OBJECT_TYPE_LIST(V) \
- V(HeapNumber) \
- V(String) \
- V(Symbol) \
- V(SeqString) \
- V(ExternalString) \
- V(ConsString) \
- V(ExternalTwoByteString) \
- V(ExternalAsciiString) \
- V(SeqTwoByteString) \
- V(SeqAsciiString) \
- \
- V(ExternalArray) \
- V(ExternalByteArray) \
- V(ExternalUnsignedByteArray) \
- V(ExternalShortArray) \
- V(ExternalUnsignedShortArray) \
- V(ExternalIntArray) \
- V(ExternalUnsignedIntArray) \
- V(ExternalFloatArray) \
- V(ExternalPixelArray) \
- V(ByteArray) \
- V(JSObject) \
- V(JSContextExtensionObject) \
- V(Map) \
- V(DescriptorArray) \
- V(DeoptimizationInputData) \
- V(DeoptimizationOutputData) \
- V(FixedArray) \
- V(Context) \
- V(CatchContext) \
- V(GlobalContext) \
- V(JSFunction) \
- V(Code) \
- V(Oddball) \
- V(SharedFunctionInfo) \
- V(JSValue) \
- V(JSMessageObject) \
- V(StringWrapper) \
- V(Proxy) \
- V(Boolean) \
- V(JSArray) \
- V(JSRegExp) \
- V(HashTable) \
- V(Dictionary) \
- V(SymbolTable) \
- V(JSFunctionResultCache) \
- V(NormalizedMapCache) \
- V(CompilationCacheTable) \
- V(CodeCacheHashTable) \
- V(MapCache) \
- V(Primitive) \
- V(GlobalObject) \
- V(JSGlobalObject) \
- V(JSBuiltinsObject) \
- V(JSGlobalProxy) \
- V(UndetectableObject) \
- V(AccessCheckNeeded) \
- V(JSGlobalPropertyCell) \
-
-// Object is the abstract superclass for all classes in the
-// object hierarchy.
-// Object does not use any virtual functions to avoid the
-// allocation of the C++ vtable.
-// Since Smi and Failure are subclasses of Object no
-// data members can be present in Object.
-class Object : public MaybeObject {
- public:
- // Type testing.
-#define IS_TYPE_FUNCTION_DECL(type_) inline bool Is##type_();
- OBJECT_TYPE_LIST(IS_TYPE_FUNCTION_DECL)
- HEAP_OBJECT_TYPE_LIST(IS_TYPE_FUNCTION_DECL)
-#undef IS_TYPE_FUNCTION_DECL
-
- // Returns true if this object is an instance of the specified
- // function template.
- inline bool IsInstanceOf(FunctionTemplateInfo* type);
-
- inline bool IsStruct();
-#define DECLARE_STRUCT_PREDICATE(NAME, Name, name) inline bool Is##Name();
- STRUCT_LIST(DECLARE_STRUCT_PREDICATE)
-#undef DECLARE_STRUCT_PREDICATE
-
- // Oddball testing.
- INLINE(bool IsUndefined());
- INLINE(bool IsNull());
- INLINE(bool IsTheHole()); // Shadows MaybeObject's implementation.
- INLINE(bool IsTrue());
- INLINE(bool IsFalse());
- inline bool IsArgumentsMarker();
-
- // Extract the number.
- inline double Number();
-
- inline bool HasSpecificClassOf(String* name);
-
- MUST_USE_RESULT MaybeObject* ToObject(); // ECMA-262 9.9.
- Object* ToBoolean(); // ECMA-262 9.2.
-
- // Convert to a JSObject if needed.
- // global_context is used when creating wrapper object.
- MUST_USE_RESULT MaybeObject* ToObject(Context* global_context);
-
- // Converts this to a Smi if possible.
- // Failure is returned otherwise.
- MUST_USE_RESULT inline MaybeObject* ToSmi();
-
- void Lookup(String* name, LookupResult* result);
-
- // Property access.
- MUST_USE_RESULT inline MaybeObject* GetProperty(String* key);
- MUST_USE_RESULT inline MaybeObject* GetProperty(
- String* key,
- PropertyAttributes* attributes);
- MUST_USE_RESULT MaybeObject* GetPropertyWithReceiver(
- Object* receiver,
- String* key,
- PropertyAttributes* attributes);
- MUST_USE_RESULT MaybeObject* GetProperty(Object* receiver,
- LookupResult* result,
- String* key,
- PropertyAttributes* attributes);
- MUST_USE_RESULT MaybeObject* GetPropertyWithCallback(Object* receiver,
- Object* structure,
- String* name,
- Object* holder);
- MUST_USE_RESULT MaybeObject* GetPropertyWithDefinedGetter(Object* receiver,
- JSFunction* getter);
-
- inline MaybeObject* GetElement(uint32_t index);
- // For use when we know that no exception can be thrown.
- inline Object* GetElementNoExceptionThrown(uint32_t index);
- MaybeObject* GetElementWithReceiver(Object* receiver, uint32_t index);
-
- // Return the object's prototype (might be Heap::null_value()).
- Object* GetPrototype();
-
- // Tries to convert an object to an array index. Returns true and sets
- // the output parameter if it succeeds.
- inline bool ToArrayIndex(uint32_t* index);
-
- // Returns true if this is a JSValue containing a string and the index is
- // < the length of the string. Used to implement [] on strings.
- inline bool IsStringObjectWithCharacterAt(uint32_t index);
-
-#ifdef DEBUG
- // Verify a pointer is a valid object pointer.
- static void VerifyPointer(Object* p);
-#endif
-
- // Prints this object without details.
- inline void ShortPrint() {
- ShortPrint(stdout);
- }
- void ShortPrint(FILE* out);
-
- // Prints this object without details to a message accumulator.
- void ShortPrint(StringStream* accumulator);
-
- // Casting: This cast is only needed to satisfy macros in objects-inl.h.
- static Object* cast(Object* value) { return value; }
-
- // Layout description.
- static const int kHeaderSize = 0; // Object does not take up any space.
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(Object);
-};
-
-
-// Smi represents integer Numbers that can be stored in 31 bits.
-// Smis are immediate which means they are NOT allocated in the heap.
-// The this pointer has the following format: [31 bit signed int] 0
-// For long smis it has the following format:
-// [32 bit signed int] [31 bits zero padding] 0
-// Smi stands for small integer.
-class Smi: public Object {
- public:
- // Returns the integer value.
- inline int value();
-
- // Convert a value to a Smi object.
- static inline Smi* FromInt(int value);
-
- static inline Smi* FromIntptr(intptr_t value);
-
- // Returns whether value can be represented in a Smi.
- static inline bool IsValid(intptr_t value);
-
- // Casting.
- static inline Smi* cast(Object* object);
-
- // Dispatched behavior.
- inline void SmiPrint() {
- SmiPrint(stdout);
- }
- void SmiPrint(FILE* out);
- void SmiPrint(StringStream* accumulator);
-#ifdef DEBUG
- void SmiVerify();
-#endif
-
- static const int kMinValue = (-1 << (kSmiValueSize - 1));
- static const int kMaxValue = -(kMinValue + 1);
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(Smi);
-};
-
-
-// Failure is used for reporting out of memory situations and
-// propagating exceptions through the runtime system. Failure objects
-// are transient and cannot occur as part of the object graph.
-//
-// Failures are a single word, encoded as follows:
-// +-------------------------+---+--+--+
-// |.........unused..........|sss|tt|11|
-// +-------------------------+---+--+--+
-// 7 6 4 32 10
-//
-//
-// The low two bits, 0-1, are the failure tag, 11. The next two bits,
-// 2-3, are a failure type tag 'tt' with possible values:
-// 00 RETRY_AFTER_GC
-// 01 EXCEPTION
-// 10 INTERNAL_ERROR
-// 11 OUT_OF_MEMORY_EXCEPTION
-//
-// The next three bits, 4-6, are an allocation space tag 'sss'. The
-// allocation space tag is 000 for all failure types except
-// RETRY_AFTER_GC. For RETRY_AFTER_GC, the possible values are the
-// allocation spaces (the encoding is found in globals.h).
-
-// Failure type tag info.
-const int kFailureTypeTagSize = 2;
-const int kFailureTypeTagMask = (1 << kFailureTypeTagSize) - 1;
-
-class Failure: public MaybeObject {
- public:
- // RuntimeStubs assumes EXCEPTION = 1 in the compiler-generated code.
- enum Type {
- RETRY_AFTER_GC = 0,
- EXCEPTION = 1, // Returning this marker tells the real exception
- // is in Isolate::pending_exception.
- INTERNAL_ERROR = 2,
- OUT_OF_MEMORY_EXCEPTION = 3
- };
-
- inline Type type() const;
-
- // Returns the space that needs to be collected for RetryAfterGC failures.
- inline AllocationSpace allocation_space() const;
-
- inline bool IsInternalError() const;
- inline bool IsOutOfMemoryException() const;
-
- static inline Failure* RetryAfterGC(AllocationSpace space);
- static inline Failure* RetryAfterGC(); // NEW_SPACE
- static inline Failure* Exception();
- static inline Failure* InternalError();
- static inline Failure* OutOfMemoryException();
- // Casting.
- static inline Failure* cast(MaybeObject* object);
-
- // Dispatched behavior.
- inline void FailurePrint() {
- FailurePrint(stdout);
- }
- void FailurePrint(FILE* out);
- void FailurePrint(StringStream* accumulator);
-#ifdef DEBUG
- void FailureVerify();
-#endif
-
- private:
- inline intptr_t value() const;
- static inline Failure* Construct(Type type, intptr_t value = 0);
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(Failure);
-};
-
-
-// Heap objects typically have a map pointer in their first word. However,
-// during GC other data (eg, mark bits, forwarding addresses) is sometimes
-// encoded in the first word. The class MapWord is an abstraction of the
-// value in a heap object's first word.
-class MapWord BASE_EMBEDDED {
- public:
- // Normal state: the map word contains a map pointer.
-
- // Create a map word from a map pointer.
- static inline MapWord FromMap(Map* map);
-
- // View this map word as a map pointer.
- inline Map* ToMap();
-
-
- // Scavenge collection: the map word of live objects in the from space
- // contains a forwarding address (a heap object pointer in the to space).
-
- // True if this map word is a forwarding address for a scavenge
- // collection. Only valid during a scavenge collection (specifically,
- // when all map words are heap object pointers, ie. not during a full GC).
- inline bool IsForwardingAddress();
-
- // Create a map word from a forwarding address.
- static inline MapWord FromForwardingAddress(HeapObject* object);
-
- // View this map word as a forwarding address.
- inline HeapObject* ToForwardingAddress();
-
- // Marking phase of full collection: the map word of live objects is
- // marked, and may be marked as overflowed (eg, the object is live, its
- // children have not been visited, and it does not fit in the marking
- // stack).
-
- // True if this map word's mark bit is set.
- inline bool IsMarked();
-
- // Return this map word but with its mark bit set.
- inline void SetMark();
-
- // Return this map word but with its mark bit cleared.
- inline void ClearMark();
-
- // True if this map word's overflow bit is set.
- inline bool IsOverflowed();
-
- // Return this map word but with its overflow bit set.
- inline void SetOverflow();
-
- // Return this map word but with its overflow bit cleared.
- inline void ClearOverflow();
-
-
- // Compacting phase of a full compacting collection: the map word of live
- // objects contains an encoding of the original map address along with the
- // forwarding address (represented as an offset from the first live object
- // in the same page as the (old) object address).
-
- // Create a map word from a map address and a forwarding address offset.
- static inline MapWord EncodeAddress(Address map_address, int offset);
-
- // Return the map address encoded in this map word.
- inline Address DecodeMapAddress(MapSpace* map_space);
-
- // Return the forwarding offset encoded in this map word.
- inline int DecodeOffset();
-
-
- // During serialization: the map word is used to hold an encoded
- // address, and possibly a mark bit (set and cleared with SetMark
- // and ClearMark).
-
- // Create a map word from an encoded address.
- static inline MapWord FromEncodedAddress(Address address);
-
- inline Address ToEncodedAddress();
-
- // Bits used by the marking phase of the garbage collector.
- //
- // The first word of a heap object is normally a map pointer. The last two
- // bits are tagged as '01' (kHeapObjectTag). We reuse the last two bits to
- // mark an object as live and/or overflowed:
- // last bit = 0, marked as alive
- // second bit = 1, overflowed
- // An object is only marked as overflowed when it is marked as live while
- // the marking stack is overflowed.
- static const int kMarkingBit = 0; // marking bit
- static const int kMarkingMask = (1 << kMarkingBit); // marking mask
- static const int kOverflowBit = 1; // overflow bit
- static const int kOverflowMask = (1 << kOverflowBit); // overflow mask
-
- // Forwarding pointers and map pointer encoding. On 32 bit all the bits are
- // used.
- // +-----------------+------------------+-----------------+
- // |forwarding offset|page offset of map|page index of map|
- // +-----------------+------------------+-----------------+
- // ^ ^ ^
- // | | |
- // | | kMapPageIndexBits
- // | kMapPageOffsetBits
- // kForwardingOffsetBits
- static const int kMapPageOffsetBits = kPageSizeBits - kMapAlignmentBits;
- static const int kForwardingOffsetBits = kPageSizeBits - kObjectAlignmentBits;
-#ifdef V8_HOST_ARCH_64_BIT
- static const int kMapPageIndexBits = 16;
-#else
- // Use all the 32-bits to encode on a 32-bit platform.
- static const int kMapPageIndexBits =
- 32 - (kMapPageOffsetBits + kForwardingOffsetBits);
-#endif
-
- static const int kMapPageIndexShift = 0;
- static const int kMapPageOffsetShift =
- kMapPageIndexShift + kMapPageIndexBits;
- static const int kForwardingOffsetShift =
- kMapPageOffsetShift + kMapPageOffsetBits;
-
- // Bit masks covering the different parts the encoding.
- static const uintptr_t kMapPageIndexMask =
- (1 << kMapPageOffsetShift) - 1;
- static const uintptr_t kMapPageOffsetMask =
- ((1 << kForwardingOffsetShift) - 1) & ~kMapPageIndexMask;
- static const uintptr_t kForwardingOffsetMask =
- ~(kMapPageIndexMask | kMapPageOffsetMask);
-
- private:
- // HeapObject calls the private constructor and directly reads the value.
- friend class HeapObject;
-
- explicit MapWord(uintptr_t value) : value_(value) {}
-
- uintptr_t value_;
-};
-
-
-// HeapObject is the superclass for all classes describing heap allocated
-// objects.
-class HeapObject: public Object {
- public:
- // [map]: Contains a map which contains the object's reflective
- // information.
- inline Map* map();
- inline void set_map(Map* value);
-
- // During garbage collection, the map word of a heap object does not
- // necessarily contain a map pointer.
- inline MapWord map_word();
- inline void set_map_word(MapWord map_word);
-
- // The Heap the object was allocated in. Used also to access Isolate.
- // This method can not be used during GC, it ASSERTs this.
- inline Heap* GetHeap();
- // Convenience method to get current isolate. This method can be
- // accessed only when its result is the same as
- // Isolate::Current(), it ASSERTs this. See also comment for GetHeap.
- inline Isolate* GetIsolate();
-
- // Converts an address to a HeapObject pointer.
- static inline HeapObject* FromAddress(Address address);
-
- // Returns the address of this HeapObject.
- inline Address address();
-
- // Iterates over pointers contained in the object (including the Map)
- void Iterate(ObjectVisitor* v);
-
- // Iterates over all pointers contained in the object except the
- // first map pointer. The object type is given in the first
- // parameter. This function does not access the map pointer in the
- // object, and so is safe to call while the map pointer is modified.
- void IterateBody(InstanceType type, int object_size, ObjectVisitor* v);
-
- // Returns the heap object's size in bytes
- inline int Size();
-
- // Given a heap object's map pointer, returns the heap size in bytes
- // Useful when the map pointer field is used for other purposes.
- // GC internal.
- inline int SizeFromMap(Map* map);
-
- // Support for the marking heap objects during the marking phase of GC.
- // True if the object is marked live.
- inline bool IsMarked();
-
- // Mutate this object's map pointer to indicate that the object is live.
- inline void SetMark();
-
- // Mutate this object's map pointer to remove the indication that the
- // object is live (ie, partially restore the map pointer).
- inline void ClearMark();
-
- // True if this object is marked as overflowed. Overflowed objects have
- // been reached and marked during marking of the heap, but their children
- // have not necessarily been marked and they have not been pushed on the
- // marking stack.
- inline bool IsOverflowed();
-
- // Mutate this object's map pointer to indicate that the object is
- // overflowed.
- inline void SetOverflow();
-
- // Mutate this object's map pointer to remove the indication that the
- // object is overflowed (ie, partially restore the map pointer).
- inline void ClearOverflow();
-
- // Returns the field at offset in obj, as a read/write Object* reference.
- // Does no checking, and is safe to use during GC, while maps are invalid.
- // Does not invoke write barrier, so should only be assigned to
- // during marking GC.
- static inline Object** RawField(HeapObject* obj, int offset);
-
- // Casting.
- static inline HeapObject* cast(Object* obj);
-
- // Return the write barrier mode for this. Callers of this function
- // must be able to present a reference to an AssertNoAllocation
- // object as a sign that they are not going to use this function
- // from code that allocates and thus invalidates the returned write
- // barrier mode.
- inline WriteBarrierMode GetWriteBarrierMode(const AssertNoAllocation&);
-
- // Dispatched behavior.
- void HeapObjectShortPrint(StringStream* accumulator);
-#ifdef OBJECT_PRINT
- inline void HeapObjectPrint() {
- HeapObjectPrint(stdout);
- }
- void HeapObjectPrint(FILE* out);
-#endif
-#ifdef DEBUG
- void HeapObjectVerify();
- inline void VerifyObjectField(int offset);
- inline void VerifySmiField(int offset);
-#endif
-
-#ifdef OBJECT_PRINT
- void PrintHeader(FILE* out, const char* id);
-#endif
-
-#ifdef DEBUG
- // Verify a pointer is a valid HeapObject pointer that points to object
- // areas in the heap.
- static void VerifyHeapPointer(Object* p);
-#endif
-
- // Layout description.
- // First field in a heap object is map.
- static const int kMapOffset = Object::kHeaderSize;
- static const int kHeaderSize = kMapOffset + kPointerSize;
-
- STATIC_CHECK(kMapOffset == Internals::kHeapObjectMapOffset);
-
- protected:
- // helpers for calling an ObjectVisitor to iterate over pointers in the
- // half-open range [start, end) specified as integer offsets
- inline void IteratePointers(ObjectVisitor* v, int start, int end);
- // as above, for the single element at "offset"
- inline void IteratePointer(ObjectVisitor* v, int offset);
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(HeapObject);
-};
-
-
-#define SLOT_ADDR(obj, offset) \
- reinterpret_cast<Object**>((obj)->address() + offset)
-
-// This class describes a body of an object of a fixed size
-// in which all pointer fields are located in the [start_offset, end_offset)
-// interval.
-template<int start_offset, int end_offset, int size>
-class FixedBodyDescriptor {
- public:
- static const int kStartOffset = start_offset;
- static const int kEndOffset = end_offset;
- static const int kSize = size;
-
- static inline void IterateBody(HeapObject* obj, ObjectVisitor* v);
-
- template<typename StaticVisitor>
- static inline void IterateBody(HeapObject* obj) {
- StaticVisitor::VisitPointers(SLOT_ADDR(obj, start_offset),
- SLOT_ADDR(obj, end_offset));
- }
-};
-
-
-// This class describes a body of an object of a variable size
-// in which all pointer fields are located in the [start_offset, object_size)
-// interval.
-template<int start_offset>
-class FlexibleBodyDescriptor {
- public:
- static const int kStartOffset = start_offset;
-
- static inline void IterateBody(HeapObject* obj,
- int object_size,
- ObjectVisitor* v);
-
- template<typename StaticVisitor>
- static inline void IterateBody(HeapObject* obj, int object_size) {
- StaticVisitor::VisitPointers(SLOT_ADDR(obj, start_offset),
- SLOT_ADDR(obj, object_size));
- }
-};
-
-#undef SLOT_ADDR
-
-
-// The HeapNumber class describes heap allocated numbers that cannot be
-// represented in a Smi (small integer)
-class HeapNumber: public HeapObject {
- public:
- // [value]: number value.
- inline double value();
- inline void set_value(double value);
-
- // Casting.
- static inline HeapNumber* cast(Object* obj);
-
- // Dispatched behavior.
- Object* HeapNumberToBoolean();
- inline void HeapNumberPrint() {
- HeapNumberPrint(stdout);
- }
- void HeapNumberPrint(FILE* out);
- void HeapNumberPrint(StringStream* accumulator);
-#ifdef DEBUG
- void HeapNumberVerify();
-#endif
-
- inline int get_exponent();
- inline int get_sign();
-
- // Layout description.
- static const int kValueOffset = HeapObject::kHeaderSize;
- // IEEE doubles are two 32 bit words. The first is just mantissa, the second
- // is a mixture of sign, exponent and mantissa. Our current platforms are all
- // little endian apart from non-EABI arm which is little endian with big
- // endian floating point word ordering!
-#if !defined(V8_HOST_ARCH_ARM) || defined(USE_ARM_EABI)
- static const int kMantissaOffset = kValueOffset;
- static const int kExponentOffset = kValueOffset + 4;
-#else
- static const int kMantissaOffset = kValueOffset + 4;
- static const int kExponentOffset = kValueOffset;
-# define BIG_ENDIAN_FLOATING_POINT 1
-#endif
- static const int kSize = kValueOffset + kDoubleSize;
- static const uint32_t kSignMask = 0x80000000u;
- static const uint32_t kExponentMask = 0x7ff00000u;
- static const uint32_t kMantissaMask = 0xfffffu;
- static const int kMantissaBits = 52;
- static const int kExponentBits = 11;
- static const int kExponentBias = 1023;
- static const int kExponentShift = 20;
- static const int kMantissaBitsInTopWord = 20;
- static const int kNonMantissaBitsInTopWord = 12;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(HeapNumber);
-};
-
-
-// The JSObject describes real heap allocated JavaScript objects with
-// properties.
-// Note that the map of JSObject changes during execution to enable inline
-// caching.
-class JSObject: public HeapObject {
- public:
- enum DeleteMode {
- NORMAL_DELETION,
- STRICT_DELETION,
- FORCE_DELETION
- };
-
- enum ElementsKind {
- // The only "fast" kind.
- FAST_ELEMENTS,
- // All the kinds below are "slow".
- DICTIONARY_ELEMENTS,
- EXTERNAL_BYTE_ELEMENTS,
- EXTERNAL_UNSIGNED_BYTE_ELEMENTS,
- EXTERNAL_SHORT_ELEMENTS,
- EXTERNAL_UNSIGNED_SHORT_ELEMENTS,
- EXTERNAL_INT_ELEMENTS,
- EXTERNAL_UNSIGNED_INT_ELEMENTS,
- EXTERNAL_FLOAT_ELEMENTS,
- EXTERNAL_PIXEL_ELEMENTS
- };
-
- // [properties]: Backing storage for properties.
- // properties is a FixedArray in the fast case and a Dictionary in the
- // slow case.
- DECL_ACCESSORS(properties, FixedArray) // Get and set fast properties.
- inline void initialize_properties();
- inline bool HasFastProperties();
- inline StringDictionary* property_dictionary(); // Gets slow properties.
-
- // [elements]: The elements (properties with names that are integers).
- //
- // Elements can be in two general modes: fast and slow. Each mode
- // corrensponds to a set of object representations of elements that
- // have something in common.
- //
- // In the fast mode elements is a FixedArray and so each element can
- // be quickly accessed. This fact is used in the generated code. The
- // elements array can have one of the two maps in this mode:
- // fixed_array_map or fixed_cow_array_map (for copy-on-write
- // arrays). In the latter case the elements array may be shared by a
- // few objects and so before writing to any element the array must
- // be copied. Use EnsureWritableFastElements in this case.
- //
- // In the slow mode elements is either a NumberDictionary or an ExternalArray.
- DECL_ACCESSORS(elements, HeapObject)
- inline void initialize_elements();
- MUST_USE_RESULT inline MaybeObject* ResetElements();
- inline ElementsKind GetElementsKind();
- inline bool HasFastElements();
- inline bool HasDictionaryElements();
- inline bool HasExternalPixelElements();
- inline bool HasExternalArrayElements();
- inline bool HasExternalByteElements();
- inline bool HasExternalUnsignedByteElements();
- inline bool HasExternalShortElements();
- inline bool HasExternalUnsignedShortElements();
- inline bool HasExternalIntElements();
- inline bool HasExternalUnsignedIntElements();
- inline bool HasExternalFloatElements();
- inline bool AllowsSetElementsLength();
- inline NumberDictionary* element_dictionary(); // Gets slow elements.
- // Requires: this->HasFastElements().
- MUST_USE_RESULT inline MaybeObject* EnsureWritableFastElements();
-
- // Collects elements starting at index 0.
- // Undefined values are placed after non-undefined values.
- // Returns the number of non-undefined values.
- MUST_USE_RESULT MaybeObject* PrepareElementsForSort(uint32_t limit);
- // As PrepareElementsForSort, but only on objects where elements is
- // a dictionary, and it will stay a dictionary.
- MUST_USE_RESULT MaybeObject* PrepareSlowElementsForSort(uint32_t limit);
-
- MUST_USE_RESULT MaybeObject* SetProperty(String* key,
- Object* value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode);
- MUST_USE_RESULT MaybeObject* SetProperty(LookupResult* result,
- String* key,
- Object* value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode);
- MUST_USE_RESULT MaybeObject* SetPropertyWithFailedAccessCheck(
- LookupResult* result,
- String* name,
- Object* value,
- bool check_prototype);
- MUST_USE_RESULT MaybeObject* SetPropertyWithCallback(Object* structure,
- String* name,
- Object* value,
- JSObject* holder);
- MUST_USE_RESULT MaybeObject* SetPropertyWithDefinedSetter(JSFunction* setter,
- Object* value);
- MUST_USE_RESULT MaybeObject* SetPropertyWithInterceptor(
- String* name,
- Object* value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode);
- MUST_USE_RESULT MaybeObject* SetPropertyPostInterceptor(
- String* name,
- Object* value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode);
- MUST_USE_RESULT MaybeObject* SetLocalPropertyIgnoreAttributes(
- String* key,
- Object* value,
- PropertyAttributes attributes);
-
- // Retrieve a value in a normalized object given a lookup result.
- // Handles the special representation of JS global objects.
- Object* GetNormalizedProperty(LookupResult* result);
-
- // Sets the property value in a normalized object given a lookup result.
- // Handles the special representation of JS global objects.
- Object* SetNormalizedProperty(LookupResult* result, Object* value);
-
- // Sets the property value in a normalized object given (key, value, details).
- // Handles the special representation of JS global objects.
- MUST_USE_RESULT MaybeObject* SetNormalizedProperty(String* name,
- Object* value,
- PropertyDetails details);
-
- // Deletes the named property in a normalized object.
- MUST_USE_RESULT MaybeObject* DeleteNormalizedProperty(String* name,
- DeleteMode mode);
-
- // Returns the class name ([[Class]] property in the specification).
- String* class_name();
-
- // Returns the constructor name (the name (possibly, inferred name) of the
- // function that was used to instantiate the object).
- String* constructor_name();
-
- // Retrieve interceptors.
- InterceptorInfo* GetNamedInterceptor();
- InterceptorInfo* GetIndexedInterceptor();
-
- inline PropertyAttributes GetPropertyAttribute(String* name);
- PropertyAttributes GetPropertyAttributeWithReceiver(JSObject* receiver,
- String* name);
- PropertyAttributes GetLocalPropertyAttribute(String* name);
-
- MUST_USE_RESULT MaybeObject* DefineAccessor(String* name,
- bool is_getter,
- Object* fun,
- PropertyAttributes attributes);
- Object* LookupAccessor(String* name, bool is_getter);
-
- MUST_USE_RESULT MaybeObject* DefineAccessor(AccessorInfo* info);
-
- // Used from Object::GetProperty().
- MaybeObject* GetPropertyWithFailedAccessCheck(
- Object* receiver,
- LookupResult* result,
- String* name,
- PropertyAttributes* attributes);
- MaybeObject* GetPropertyWithInterceptor(
- JSObject* receiver,
- String* name,
- PropertyAttributes* attributes);
- MaybeObject* GetPropertyPostInterceptor(
- JSObject* receiver,
- String* name,
- PropertyAttributes* attributes);
- MaybeObject* GetLocalPropertyPostInterceptor(JSObject* receiver,
- String* name,
- PropertyAttributes* attributes);
-
- // Returns true if this is an instance of an api function and has
- // been modified since it was created. May give false positives.
- bool IsDirty();
-
- bool HasProperty(String* name) {
- return GetPropertyAttribute(name) != ABSENT;
- }
-
- // Can cause a GC if it hits an interceptor.
- bool HasLocalProperty(String* name) {
- return GetLocalPropertyAttribute(name) != ABSENT;
- }
-
- // If the receiver is a JSGlobalProxy this method will return its prototype,
- // otherwise the result is the receiver itself.
- inline Object* BypassGlobalProxy();
-
- // Accessors for hidden properties object.
- //
- // Hidden properties are not local properties of the object itself.
- // Instead they are stored on an auxiliary JSObject stored as a local
- // property with a special name Heap::hidden_symbol(). But if the
- // receiver is a JSGlobalProxy then the auxiliary object is a property
- // of its prototype.
- //
- // Has/Get/SetHiddenPropertiesObject methods don't allow the holder to be
- // a JSGlobalProxy. Use BypassGlobalProxy method above to get to the real
- // holder.
- //
- // These accessors do not touch interceptors or accessors.
- inline bool HasHiddenPropertiesObject();
- inline Object* GetHiddenPropertiesObject();
- MUST_USE_RESULT inline MaybeObject* SetHiddenPropertiesObject(
- Object* hidden_obj);
-
- MUST_USE_RESULT MaybeObject* DeleteProperty(String* name, DeleteMode mode);
- MUST_USE_RESULT MaybeObject* DeleteElement(uint32_t index, DeleteMode mode);
-
- // Tests for the fast common case for property enumeration.
- bool IsSimpleEnum();
-
- // Do we want to keep the elements in fast case when increasing the
- // capacity?
- bool ShouldConvertToSlowElements(int new_capacity);
- // Returns true if the backing storage for the slow-case elements of
- // this object takes up nearly as much space as a fast-case backing
- // storage would. In that case the JSObject should have fast
- // elements.
- bool ShouldConvertToFastElements();
-
- // Return the object's prototype (might be Heap::null_value()).
- inline Object* GetPrototype();
-
- // Set the object's prototype (only JSObject and null are allowed).
- MUST_USE_RESULT MaybeObject* SetPrototype(Object* value,
- bool skip_hidden_prototypes);
-
- // Tells whether the index'th element is present.
- inline bool HasElement(uint32_t index);
- bool HasElementWithReceiver(JSObject* receiver, uint32_t index);
-
- // Computes the new capacity when expanding the elements of a JSObject.
- static int NewElementsCapacity(int old_capacity) {
- // (old_capacity + 50%) + 16
- return old_capacity + (old_capacity >> 1) + 16;
- }
-
- // Tells whether the index'th element is present and how it is stored.
- enum LocalElementType {
- // There is no element with given index.
- UNDEFINED_ELEMENT,
-
- // Element with given index is handled by interceptor.
- INTERCEPTED_ELEMENT,
-
- // Element with given index is character in string.
- STRING_CHARACTER_ELEMENT,
-
- // Element with given index is stored in fast backing store.
- FAST_ELEMENT,
-
- // Element with given index is stored in slow backing store.
- DICTIONARY_ELEMENT
- };
-
- LocalElementType HasLocalElement(uint32_t index);
-
- bool HasElementWithInterceptor(JSObject* receiver, uint32_t index);
- bool HasElementPostInterceptor(JSObject* receiver, uint32_t index);
-
- MUST_USE_RESULT MaybeObject* SetFastElement(uint32_t index,
- Object* value,
- StrictModeFlag strict_mode,
- bool check_prototype = true);
-
- // Set the index'th array element.
- // A Failure object is returned if GC is needed.
- MUST_USE_RESULT MaybeObject* SetElement(uint32_t index,
- Object* value,
- StrictModeFlag strict_mode,
- bool check_prototype = true);
-
- // Returns the index'th element.
- // The undefined object if index is out of bounds.
- MaybeObject* GetElementWithReceiver(Object* receiver, uint32_t index);
- MaybeObject* GetElementWithInterceptor(Object* receiver, uint32_t index);
-
- // Get external element value at index if there is one and undefined
- // otherwise. Can return a failure if allocation of a heap number
- // failed.
- MaybeObject* GetExternalElement(uint32_t index);
-
- MUST_USE_RESULT MaybeObject* SetFastElementsCapacityAndLength(int capacity,
- int length);
- MUST_USE_RESULT MaybeObject* SetSlowElements(Object* length);
-
- // Lookup interceptors are used for handling properties controlled by host
- // objects.
- inline bool HasNamedInterceptor();
- inline bool HasIndexedInterceptor();
-
- // Support functions for v8 api (needed for correct interceptor behavior).
- bool HasRealNamedProperty(String* key);
- bool HasRealElementProperty(uint32_t index);
- bool HasRealNamedCallbackProperty(String* key);
-
- // Initializes the array to a certain length
- MUST_USE_RESULT MaybeObject* SetElementsLength(Object* length);
-
- // Get the header size for a JSObject. Used to compute the index of
- // internal fields as well as the number of internal fields.
- inline int GetHeaderSize();
-
- inline int GetInternalFieldCount();
- inline int GetInternalFieldOffset(int index);
- inline Object* GetInternalField(int index);
- inline void SetInternalField(int index, Object* value);
-
- // Lookup a property. If found, the result is valid and has
- // detailed information.
- void LocalLookup(String* name, LookupResult* result);
- void Lookup(String* name, LookupResult* result);
-
- // The following lookup functions skip interceptors.
- void LocalLookupRealNamedProperty(String* name, LookupResult* result);
- void LookupRealNamedProperty(String* name, LookupResult* result);
- void LookupRealNamedPropertyInPrototypes(String* name, LookupResult* result);
- void LookupCallbackSetterInPrototypes(String* name, LookupResult* result);
- MUST_USE_RESULT MaybeObject* SetElementWithCallbackSetterInPrototypes(
- uint32_t index, Object* value, bool* found);
- void LookupCallback(String* name, LookupResult* result);
-
- // Returns the number of properties on this object filtering out properties
- // with the specified attributes (ignoring interceptors).
- int NumberOfLocalProperties(PropertyAttributes filter);
- // Returns the number of enumerable properties (ignoring interceptors).
- int NumberOfEnumProperties();
- // Fill in details for properties into storage starting at the specified
- // index.
- void GetLocalPropertyNames(FixedArray* storage, int index);
-
- // Returns the number of properties on this object filtering out properties
- // with the specified attributes (ignoring interceptors).
- int NumberOfLocalElements(PropertyAttributes filter);
- // Returns the number of enumerable elements (ignoring interceptors).
- int NumberOfEnumElements();
- // Returns the number of elements on this object filtering out elements
- // with the specified attributes (ignoring interceptors).
- int GetLocalElementKeys(FixedArray* storage, PropertyAttributes filter);
- // Count and fill in the enumerable elements into storage.
- // (storage->length() == NumberOfEnumElements()).
- // If storage is NULL, will count the elements without adding
- // them to any storage.
- // Returns the number of enumerable elements.
- int GetEnumElementKeys(FixedArray* storage);
-
- // Add a property to a fast-case object using a map transition to
- // new_map.
- MUST_USE_RESULT MaybeObject* AddFastPropertyUsingMap(Map* new_map,
- String* name,
- Object* value);
-
- // Add a constant function property to a fast-case object.
- // This leaves a CONSTANT_TRANSITION in the old map, and
- // if it is called on a second object with this map, a
- // normal property is added instead, with a map transition.
- // This avoids the creation of many maps with the same constant
- // function, all orphaned.
- MUST_USE_RESULT MaybeObject* AddConstantFunctionProperty(
- String* name,
- JSFunction* function,
- PropertyAttributes attributes);
-
- MUST_USE_RESULT MaybeObject* ReplaceSlowProperty(
- String* name,
- Object* value,
- PropertyAttributes attributes);
-
- // Converts a descriptor of any other type to a real field,
- // backed by the properties array. Descriptors of visible
- // types, such as CONSTANT_FUNCTION, keep their enumeration order.
- // Converts the descriptor on the original object's map to a
- // map transition, and the the new field is on the object's new map.
- MUST_USE_RESULT MaybeObject* ConvertDescriptorToFieldAndMapTransition(
- String* name,
- Object* new_value,
- PropertyAttributes attributes);
-
- // Converts a descriptor of any other type to a real field,
- // backed by the properties array. Descriptors of visible
- // types, such as CONSTANT_FUNCTION, keep their enumeration order.
- MUST_USE_RESULT MaybeObject* ConvertDescriptorToField(
- String* name,
- Object* new_value,
- PropertyAttributes attributes);
-
- // Add a property to a fast-case object.
- MUST_USE_RESULT MaybeObject* AddFastProperty(String* name,
- Object* value,
- PropertyAttributes attributes);
-
- // Add a property to a slow-case object.
- MUST_USE_RESULT MaybeObject* AddSlowProperty(String* name,
- Object* value,
- PropertyAttributes attributes);
-
- // Add a property to an object.
- MUST_USE_RESULT MaybeObject* AddProperty(String* name,
- Object* value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode);
-
- // Convert the object to use the canonical dictionary
- // representation. If the object is expected to have additional properties
- // added this number can be indicated to have the backing store allocated to
- // an initial capacity for holding these properties.
- MUST_USE_RESULT MaybeObject* NormalizeProperties(
- PropertyNormalizationMode mode,
- int expected_additional_properties);
- MUST_USE_RESULT MaybeObject* NormalizeElements();
-
- MUST_USE_RESULT MaybeObject* UpdateMapCodeCache(String* name, Code* code);
-
- // Transform slow named properties to fast variants.
- // Returns failure if allocation failed.
- MUST_USE_RESULT MaybeObject* TransformToFastProperties(
- int unused_property_fields);
-
- // Access fast-case object properties at index.
- inline Object* FastPropertyAt(int index);
- inline Object* FastPropertyAtPut(int index, Object* value);
-
- // Access to in object properties.
- inline int GetInObjectPropertyOffset(int index);
- inline Object* InObjectPropertyAt(int index);
- inline Object* InObjectPropertyAtPut(int index,
- Object* value,
- WriteBarrierMode mode
- = UPDATE_WRITE_BARRIER);
-
- // initializes the body after properties slot, properties slot is
- // initialized by set_properties
- // Note: this call does not update write barrier, it is caller's
- // reponsibility to ensure that *v* can be collected without WB here.
- inline void InitializeBody(int object_size, Object* value);
-
- // Check whether this object references another object
- bool ReferencesObject(Object* obj);
-
- // Casting.
- static inline JSObject* cast(Object* obj);
-
- // Disalow further properties to be added to the object.
- MUST_USE_RESULT MaybeObject* PreventExtensions();
-
-
- // Dispatched behavior.
- void JSObjectShortPrint(StringStream* accumulator);
-#ifdef OBJECT_PRINT
- inline void JSObjectPrint() {
- JSObjectPrint(stdout);
- }
- void JSObjectPrint(FILE* out);
-#endif
-#ifdef DEBUG
- void JSObjectVerify();
-#endif
-#ifdef OBJECT_PRINT
- inline void PrintProperties() {
- PrintProperties(stdout);
- }
- void PrintProperties(FILE* out);
-
- inline void PrintElements() {
- PrintElements(stdout);
- }
- void PrintElements(FILE* out);
-#endif
-
-#ifdef DEBUG
- // Structure for collecting spill information about JSObjects.
- class SpillInformation {
- public:
- void Clear();
- void Print();
- int number_of_objects_;
- int number_of_objects_with_fast_properties_;
- int number_of_objects_with_fast_elements_;
- int number_of_fast_used_fields_;
- int number_of_fast_unused_fields_;
- int number_of_slow_used_properties_;
- int number_of_slow_unused_properties_;
- int number_of_fast_used_elements_;
- int number_of_fast_unused_elements_;
- int number_of_slow_used_elements_;
- int number_of_slow_unused_elements_;
- };
-
- void IncrementSpillStatistics(SpillInformation* info);
-#endif
- Object* SlowReverseLookup(Object* value);
-
- // Maximal number of fast properties for the JSObject. Used to
- // restrict the number of map transitions to avoid an explosion in
- // the number of maps for objects used as dictionaries.
- inline int MaxFastProperties();
-
- // Maximal number of elements (numbered 0 .. kMaxElementCount - 1).
- // Also maximal value of JSArray's length property.
- static const uint32_t kMaxElementCount = 0xffffffffu;
-
- static const uint32_t kMaxGap = 1024;
- static const int kMaxFastElementsLength = 5000;
- static const int kInitialMaxFastElementArray = 100000;
- static const int kMaxFastProperties = 12;
- static const int kMaxInstanceSize = 255 * kPointerSize;
- // When extending the backing storage for property values, we increase
- // its size by more than the 1 entry necessary, so sequentially adding fields
- // to the same object requires fewer allocations and copies.
- static const int kFieldsAdded = 3;
-
- // Layout description.
- static const int kPropertiesOffset = HeapObject::kHeaderSize;
- static const int kElementsOffset = kPropertiesOffset + kPointerSize;
- static const int kHeaderSize = kElementsOffset + kPointerSize;
-
- STATIC_CHECK(kHeaderSize == Internals::kJSObjectHeaderSize);
-
- class BodyDescriptor : public FlexibleBodyDescriptor<kPropertiesOffset> {
- public:
- static inline int SizeOf(Map* map, HeapObject* object);
- };
-
- private:
- MUST_USE_RESULT MaybeObject* GetElementWithCallback(Object* receiver,
- Object* structure,
- uint32_t index,
- Object* holder);
- MaybeObject* SetElementWithCallback(Object* structure,
- uint32_t index,
- Object* value,
- JSObject* holder);
- MUST_USE_RESULT MaybeObject* SetElementWithInterceptor(
- uint32_t index,
- Object* value,
- StrictModeFlag strict_mode,
- bool check_prototype);
- MUST_USE_RESULT MaybeObject* SetElementWithoutInterceptor(
- uint32_t index,
- Object* value,
- StrictModeFlag strict_mode,
- bool check_prototype);
-
- MaybeObject* GetElementPostInterceptor(Object* receiver, uint32_t index);
-
- MUST_USE_RESULT MaybeObject* DeletePropertyPostInterceptor(String* name,
- DeleteMode mode);
- MUST_USE_RESULT MaybeObject* DeletePropertyWithInterceptor(String* name);
-
- MUST_USE_RESULT MaybeObject* DeleteElementPostInterceptor(uint32_t index,
- DeleteMode mode);
- MUST_USE_RESULT MaybeObject* DeleteElementWithInterceptor(uint32_t index);
-
- PropertyAttributes GetPropertyAttributePostInterceptor(JSObject* receiver,
- String* name,
- bool continue_search);
- PropertyAttributes GetPropertyAttributeWithInterceptor(JSObject* receiver,
- String* name,
- bool continue_search);
- PropertyAttributes GetPropertyAttributeWithFailedAccessCheck(
- Object* receiver,
- LookupResult* result,
- String* name,
- bool continue_search);
- PropertyAttributes GetPropertyAttribute(JSObject* receiver,
- LookupResult* result,
- String* name,
- bool continue_search);
-
- // Returns true if most of the elements backing storage is used.
- bool HasDenseElements();
-
- bool CanSetCallback(String* name);
- MUST_USE_RESULT MaybeObject* SetElementCallback(
- uint32_t index,
- Object* structure,
- PropertyAttributes attributes);
- MUST_USE_RESULT MaybeObject* SetPropertyCallback(
- String* name,
- Object* structure,
- PropertyAttributes attributes);
- MUST_USE_RESULT MaybeObject* DefineGetterSetter(
- String* name,
- PropertyAttributes attributes);
-
- void LookupInDescriptor(String* name, LookupResult* result);
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSObject);
-};
-
-
-// FixedArray describes fixed-sized arrays with element type Object*.
-class FixedArray: public HeapObject {
- public:
- // [length]: length of the array.
- inline int length();
- inline void set_length(int value);
-
- // Setter and getter for elements.
- inline Object* get(int index);
- // Setter that uses write barrier.
- inline void set(int index, Object* value);
-
- // Setter that doesn't need write barrier).
- inline void set(int index, Smi* value);
- // Setter with explicit barrier mode.
- inline void set(int index, Object* value, WriteBarrierMode mode);
-
- // Setters for frequently used oddballs located in old space.
- inline void set_undefined(int index);
- // TODO(isolates): duplicate.
- inline void set_undefined(Heap* heap, int index);
- inline void set_null(int index);
- // TODO(isolates): duplicate.
- inline void set_null(Heap* heap, int index);
- inline void set_the_hole(int index);
-
- // Setters with less debug checks for the GC to use.
- inline void set_unchecked(int index, Smi* value);
- inline void set_null_unchecked(Heap* heap, int index);
- inline void set_unchecked(Heap* heap, int index, Object* value,
- WriteBarrierMode mode);
-
- // Gives access to raw memory which stores the array's data.
- inline Object** data_start();
-
- // Copy operations.
- MUST_USE_RESULT inline MaybeObject* Copy();
- MUST_USE_RESULT MaybeObject* CopySize(int new_length);
-
- // Add the elements of a JSArray to this FixedArray.
- MUST_USE_RESULT MaybeObject* AddKeysFromJSArray(JSArray* array);
-
- // Compute the union of this and other.
- MUST_USE_RESULT MaybeObject* UnionOfKeys(FixedArray* other);
-
- // Copy a sub array from the receiver to dest.
- void CopyTo(int pos, FixedArray* dest, int dest_pos, int len);
-
- // Garbage collection support.
- static int SizeFor(int length) { return kHeaderSize + length * kPointerSize; }
-
- // Code Generation support.
- static int OffsetOfElementAt(int index) { return SizeFor(index); }
-
- // Casting.
- static inline FixedArray* cast(Object* obj);
-
- // Layout description.
- // Length is smi tagged when it is stored.
- static const int kLengthOffset = HeapObject::kHeaderSize;
- static const int kHeaderSize = kLengthOffset + kPointerSize;
-
- // Maximal allowed size, in bytes, of a single FixedArray.
- // Prevents overflowing size computations, as well as extreme memory
- // consumption.
- static const int kMaxSize = 512 * MB;
- // Maximally allowed length of a FixedArray.
- static const int kMaxLength = (kMaxSize - kHeaderSize) / kPointerSize;
-
- // Dispatched behavior.
-#ifdef OBJECT_PRINT
- inline void FixedArrayPrint() {
- FixedArrayPrint(stdout);
- }
- void FixedArrayPrint(FILE* out);
-#endif
-#ifdef DEBUG
- void FixedArrayVerify();
- // Checks if two FixedArrays have identical contents.
- bool IsEqualTo(FixedArray* other);
-#endif
-
- // Swap two elements in a pair of arrays. If this array and the
- // numbers array are the same object, the elements are only swapped
- // once.
- void SwapPairs(FixedArray* numbers, int i, int j);
-
- // Sort prefix of this array and the numbers array as pairs wrt. the
- // numbers. If the numbers array and the this array are the same
- // object, the prefix of this array is sorted.
- void SortPairs(FixedArray* numbers, uint32_t len);
-
- class BodyDescriptor : public FlexibleBodyDescriptor<kHeaderSize> {
- public:
- static inline int SizeOf(Map* map, HeapObject* object) {
- return SizeFor(reinterpret_cast<FixedArray*>(object)->length());
- }
- };
-
- protected:
- // Set operation on FixedArray without using write barriers. Can
- // only be used for storing old space objects or smis.
- static inline void fast_set(FixedArray* array, int index, Object* value);
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(FixedArray);
-};
-
-
-// DescriptorArrays are fixed arrays used to hold instance descriptors.
-// The format of the these objects is:
-// [0]: point to a fixed array with (value, detail) pairs.
-// [1]: next enumeration index (Smi), or pointer to small fixed array:
-// [0]: next enumeration index (Smi)
-// [1]: pointer to fixed array with enum cache
-// [2]: first key
-// [length() - 1]: last key
-//
-class DescriptorArray: public FixedArray {
- public:
- // Is this the singleton empty_descriptor_array?
- inline bool IsEmpty();
-
- // Returns the number of descriptors in the array.
- int number_of_descriptors() {
- ASSERT(length() > kFirstIndex || IsEmpty());
- int len = length();
- return len <= kFirstIndex ? 0 : len - kFirstIndex;
- }
-
- int NextEnumerationIndex() {
- if (IsEmpty()) return PropertyDetails::kInitialIndex;
- Object* obj = get(kEnumerationIndexIndex);
- if (obj->IsSmi()) {
- return Smi::cast(obj)->value();
- } else {
- Object* index = FixedArray::cast(obj)->get(kEnumCacheBridgeEnumIndex);
- return Smi::cast(index)->value();
- }
- }
-
- // Set next enumeration index and flush any enum cache.
- void SetNextEnumerationIndex(int value) {
- if (!IsEmpty()) {
- fast_set(this, kEnumerationIndexIndex, Smi::FromInt(value));
- }
- }
- bool HasEnumCache() {
- return !IsEmpty() && !get(kEnumerationIndexIndex)->IsSmi();
- }
-
- Object* GetEnumCache() {
- ASSERT(HasEnumCache());
- FixedArray* bridge = FixedArray::cast(get(kEnumerationIndexIndex));
- return bridge->get(kEnumCacheBridgeCacheIndex);
- }
-
- // Initialize or change the enum cache,
- // using the supplied storage for the small "bridge".
- void SetEnumCache(FixedArray* bridge_storage, FixedArray* new_cache);
-
- // Accessors for fetching instance descriptor at descriptor number.
- inline String* GetKey(int descriptor_number);
- inline Object* GetValue(int descriptor_number);
- inline Smi* GetDetails(int descriptor_number);
- inline PropertyType GetType(int descriptor_number);
- inline int GetFieldIndex(int descriptor_number);
- inline JSFunction* GetConstantFunction(int descriptor_number);
- inline Object* GetCallbacksObject(int descriptor_number);
- inline AccessorDescriptor* GetCallbacks(int descriptor_number);
- inline bool IsProperty(int descriptor_number);
- inline bool IsTransition(int descriptor_number);
- inline bool IsNullDescriptor(int descriptor_number);
- inline bool IsDontEnum(int descriptor_number);
-
- // Accessor for complete descriptor.
- inline void Get(int descriptor_number, Descriptor* desc);
- inline void Set(int descriptor_number, Descriptor* desc);
-
- // Transfer complete descriptor from another descriptor array to
- // this one.
- inline void CopyFrom(int index, DescriptorArray* src, int src_index);
-
- // Copy the descriptor array, insert a new descriptor and optionally
- // remove map transitions. If the descriptor is already present, it is
- // replaced. If a replaced descriptor is a real property (not a transition
- // or null), its enumeration index is kept as is.
- // If adding a real property, map transitions must be removed. If adding
- // a transition, they must not be removed. All null descriptors are removed.
- MUST_USE_RESULT MaybeObject* CopyInsert(Descriptor* descriptor,
- TransitionFlag transition_flag);
-
- // Remove all transitions. Return a copy of the array with all transitions
- // removed, or a Failure object if the new array could not be allocated.
- MUST_USE_RESULT MaybeObject* RemoveTransitions();
-
- // Sort the instance descriptors by the hash codes of their keys.
- // Does not check for duplicates.
- void SortUnchecked();
-
- // Sort the instance descriptors by the hash codes of their keys.
- // Checks the result for duplicates.
- void Sort();
-
- // Search the instance descriptors for given name.
- inline int Search(String* name);
-
- // As the above, but uses DescriptorLookupCache and updates it when
- // necessary.
- inline int SearchWithCache(String* name);
-
- // Tells whether the name is present int the array.
- bool Contains(String* name) { return kNotFound != Search(name); }
-
- // Perform a binary search in the instance descriptors represented
- // by this fixed array. low and high are descriptor indices. If there
- // are three instance descriptors in this array it should be called
- // with low=0 and high=2.
- int BinarySearch(String* name, int low, int high);
-
- // Perform a linear search in the instance descriptors represented
- // by this fixed array. len is the number of descriptor indices that are
- // valid. Does not require the descriptors to be sorted.
- int LinearSearch(String* name, int len);
-
- // Allocates a DescriptorArray, but returns the singleton
- // empty descriptor array object if number_of_descriptors is 0.
- MUST_USE_RESULT static MaybeObject* Allocate(int number_of_descriptors);
-
- // Casting.
- static inline DescriptorArray* cast(Object* obj);
-
- // Constant for denoting key was not found.
- static const int kNotFound = -1;
-
- static const int kContentArrayIndex = 0;
- static const int kEnumerationIndexIndex = 1;
- static const int kFirstIndex = 2;
-
- // The length of the "bridge" to the enum cache.
- static const int kEnumCacheBridgeLength = 2;
- static const int kEnumCacheBridgeEnumIndex = 0;
- static const int kEnumCacheBridgeCacheIndex = 1;
-
- // Layout description.
- static const int kContentArrayOffset = FixedArray::kHeaderSize;
- static const int kEnumerationIndexOffset = kContentArrayOffset + kPointerSize;
- static const int kFirstOffset = kEnumerationIndexOffset + kPointerSize;
-
- // Layout description for the bridge array.
- static const int kEnumCacheBridgeEnumOffset = FixedArray::kHeaderSize;
- static const int kEnumCacheBridgeCacheOffset =
- kEnumCacheBridgeEnumOffset + kPointerSize;
-
-#ifdef OBJECT_PRINT
- // Print all the descriptors.
- inline void PrintDescriptors() {
- PrintDescriptors(stdout);
- }
- void PrintDescriptors(FILE* out);
-#endif
-
-#ifdef DEBUG
- // Is the descriptor array sorted and without duplicates?
- bool IsSortedNoDuplicates();
-
- // Are two DescriptorArrays equal?
- bool IsEqualTo(DescriptorArray* other);
-#endif
-
- // The maximum number of descriptors we want in a descriptor array (should
- // fit in a page).
- static const int kMaxNumberOfDescriptors = 1024 + 512;
-
- private:
- // Conversion from descriptor number to array indices.
- static int ToKeyIndex(int descriptor_number) {
- return descriptor_number+kFirstIndex;
- }
-
- static int ToDetailsIndex(int descriptor_number) {
- return (descriptor_number << 1) + 1;
- }
-
- static int ToValueIndex(int descriptor_number) {
- return descriptor_number << 1;
- }
-
- bool is_null_descriptor(int descriptor_number) {
- return PropertyDetails(GetDetails(descriptor_number)).type() ==
- NULL_DESCRIPTOR;
- }
- // Swap operation on FixedArray without using write barriers.
- static inline void fast_swap(FixedArray* array, int first, int second);
-
- // Swap descriptor first and second.
- inline void Swap(int first, int second);
-
- FixedArray* GetContentArray() {
- return FixedArray::cast(get(kContentArrayIndex));
- }
- DISALLOW_IMPLICIT_CONSTRUCTORS(DescriptorArray);
-};
-
-
-// HashTable is a subclass of FixedArray that implements a hash table
-// that uses open addressing and quadratic probing.
-//
-// In order for the quadratic probing to work, elements that have not
-// yet been used and elements that have been deleted are
-// distinguished. Probing continues when deleted elements are
-// encountered and stops when unused elements are encountered.
-//
-// - Elements with key == undefined have not been used yet.
-// - Elements with key == null have been deleted.
-//
-// The hash table class is parameterized with a Shape and a Key.
-// Shape must be a class with the following interface:
-// class ExampleShape {
-// public:
-// // Tells whether key matches other.
-// static bool IsMatch(Key key, Object* other);
-// // Returns the hash value for key.
-// static uint32_t Hash(Key key);
-// // Returns the hash value for object.
-// static uint32_t HashForObject(Key key, Object* object);
-// // Convert key to an object.
-// static inline Object* AsObject(Key key);
-// // The prefix size indicates number of elements in the beginning
-// // of the backing storage.
-// static const int kPrefixSize = ..;
-// // The Element size indicates number of elements per entry.
-// static const int kEntrySize = ..;
-// };
-// The prefix size indicates an amount of memory in the
-// beginning of the backing storage that can be used for non-element
-// information by subclasses.
-
-template<typename Shape, typename Key>
-class HashTable: public FixedArray {
- public:
- // Returns the number of elements in the hash table.
- int NumberOfElements() {
- return Smi::cast(get(kNumberOfElementsIndex))->value();
- }
-
- // Returns the number of deleted elements in the hash table.
- int NumberOfDeletedElements() {
- return Smi::cast(get(kNumberOfDeletedElementsIndex))->value();
- }
-
- // Returns the capacity of the hash table.
- int Capacity() {
- return Smi::cast(get(kCapacityIndex))->value();
- }
-
- // ElementAdded should be called whenever an element is added to a
- // hash table.
- void ElementAdded() { SetNumberOfElements(NumberOfElements() + 1); }
-
- // ElementRemoved should be called whenever an element is removed from
- // a hash table.
- void ElementRemoved() {
- SetNumberOfElements(NumberOfElements() - 1);
- SetNumberOfDeletedElements(NumberOfDeletedElements() + 1);
- }
- void ElementsRemoved(int n) {
- SetNumberOfElements(NumberOfElements() - n);
- SetNumberOfDeletedElements(NumberOfDeletedElements() + n);
- }
-
- // Returns a new HashTable object. Might return Failure.
- MUST_USE_RESULT static MaybeObject* Allocate(
- int at_least_space_for,
- PretenureFlag pretenure = NOT_TENURED);
-
- // Returns the key at entry.
- Object* KeyAt(int entry) { return get(EntryToIndex(entry)); }
-
- // Tells whether k is a real key. Null and undefined are not allowed
- // as keys and can be used to indicate missing or deleted elements.
- bool IsKey(Object* k) {
- return !k->IsNull() && !k->IsUndefined();
- }
-
- // Garbage collection support.
- void IteratePrefix(ObjectVisitor* visitor);
- void IterateElements(ObjectVisitor* visitor);
-
- // Casting.
- static inline HashTable* cast(Object* obj);
-
- // Compute the probe offset (quadratic probing).
- INLINE(static uint32_t GetProbeOffset(uint32_t n)) {
- return (n + n * n) >> 1;
- }
-
- static const int kNumberOfElementsIndex = 0;
- static const int kNumberOfDeletedElementsIndex = 1;
- static const int kCapacityIndex = 2;
- static const int kPrefixStartIndex = 3;
- static const int kElementsStartIndex =
- kPrefixStartIndex + Shape::kPrefixSize;
- static const int kEntrySize = Shape::kEntrySize;
- static const int kElementsStartOffset =
- kHeaderSize + kElementsStartIndex * kPointerSize;
- static const int kCapacityOffset =
- kHeaderSize + kCapacityIndex * kPointerSize;
-
- // Constant used for denoting a absent entry.
- static const int kNotFound = -1;
-
- // Maximal capacity of HashTable. Based on maximal length of underlying
- // FixedArray. Staying below kMaxCapacity also ensures that EntryToIndex
- // cannot overflow.
- static const int kMaxCapacity =
- (FixedArray::kMaxLength - kElementsStartOffset) / kEntrySize;
-
- // Find entry for key otherwise return kNotFound.
- inline int FindEntry(Key key);
- int FindEntry(Isolate* isolate, Key key);
-
- protected:
-
- // Find the entry at which to insert element with the given key that
- // has the given hash value.
- uint32_t FindInsertionEntry(uint32_t hash);
-
- // Returns the index for an entry (of the key)
- static inline int EntryToIndex(int entry) {
- return (entry * kEntrySize) + kElementsStartIndex;
- }
-
- // Update the number of elements in the hash table.
- void SetNumberOfElements(int nof) {
- fast_set(this, kNumberOfElementsIndex, Smi::FromInt(nof));
- }
-
- // Update the number of deleted elements in the hash table.
- void SetNumberOfDeletedElements(int nod) {
- fast_set(this, kNumberOfDeletedElementsIndex, Smi::FromInt(nod));
- }
-
- // Sets the capacity of the hash table.
- void SetCapacity(int capacity) {
- // To scale a computed hash code to fit within the hash table, we
- // use bit-wise AND with a mask, so the capacity must be positive
- // and non-zero.
- ASSERT(capacity > 0);
- ASSERT(capacity <= kMaxCapacity);
- fast_set(this, kCapacityIndex, Smi::FromInt(capacity));
- }
-
-
- // Returns probe entry.
- static uint32_t GetProbe(uint32_t hash, uint32_t number, uint32_t size) {
- ASSERT(IsPowerOf2(size));
- return (hash + GetProbeOffset(number)) & (size - 1);
- }
-
- static uint32_t FirstProbe(uint32_t hash, uint32_t size) {
- return hash & (size - 1);
- }
-
- static uint32_t NextProbe(uint32_t last, uint32_t number, uint32_t size) {
- return (last + number) & (size - 1);
- }
-
- // Ensure enough space for n additional elements.
- MUST_USE_RESULT MaybeObject* EnsureCapacity(int n, Key key);
-};
-
-
-
-// HashTableKey is an abstract superclass for virtual key behavior.
-class HashTableKey {
- public:
- // Returns whether the other object matches this key.
- virtual bool IsMatch(Object* other) = 0;
- // Returns the hash value for this key.
- virtual uint32_t Hash() = 0;
- // Returns the hash value for object.
- virtual uint32_t HashForObject(Object* key) = 0;
- // Returns the key object for storing into the hash table.
- // If allocations fails a failure object is returned.
- MUST_USE_RESULT virtual MaybeObject* AsObject() = 0;
- // Required.
- virtual ~HashTableKey() {}
-};
-
-class SymbolTableShape {
- public:
- static inline bool IsMatch(HashTableKey* key, Object* value) {
- return key->IsMatch(value);
- }
- static inline uint32_t Hash(HashTableKey* key) {
- return key->Hash();
- }
- static inline uint32_t HashForObject(HashTableKey* key, Object* object) {
- return key->HashForObject(object);
- }
- MUST_USE_RESULT static inline MaybeObject* AsObject(HashTableKey* key) {
- return key->AsObject();
- }
-
- static const int kPrefixSize = 0;
- static const int kEntrySize = 1;
-};
-
-// SymbolTable.
-//
-// No special elements in the prefix and the element size is 1
-// because only the symbol itself (the key) needs to be stored.
-class SymbolTable: public HashTable<SymbolTableShape, HashTableKey*> {
- public:
- // Find symbol in the symbol table. If it is not there yet, it is
- // added. The return value is the symbol table which might have
- // been enlarged. If the return value is not a failure, the symbol
- // pointer *s is set to the symbol found.
- MUST_USE_RESULT MaybeObject* LookupSymbol(Vector<const char> str, Object** s);
- MUST_USE_RESULT MaybeObject* LookupAsciiSymbol(Vector<const char> str,
- Object** s);
- MUST_USE_RESULT MaybeObject* LookupTwoByteSymbol(Vector<const uc16> str,
- Object** s);
- MUST_USE_RESULT MaybeObject* LookupString(String* key, Object** s);
-
- // Looks up a symbol that is equal to the given string and returns
- // true if it is found, assigning the symbol to the given output
- // parameter.
- bool LookupSymbolIfExists(String* str, String** symbol);
- bool LookupTwoCharsSymbolIfExists(uint32_t c1, uint32_t c2, String** symbol);
-
- // Casting.
- static inline SymbolTable* cast(Object* obj);
-
- private:
- MUST_USE_RESULT MaybeObject* LookupKey(HashTableKey* key, Object** s);
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(SymbolTable);
-};
-
-
-class MapCacheShape {
- public:
- static inline bool IsMatch(HashTableKey* key, Object* value) {
- return key->IsMatch(value);
- }
- static inline uint32_t Hash(HashTableKey* key) {
- return key->Hash();
- }
-
- static inline uint32_t HashForObject(HashTableKey* key, Object* object) {
- return key->HashForObject(object);
- }
-
- MUST_USE_RESULT static inline MaybeObject* AsObject(HashTableKey* key) {
- return key->AsObject();
- }
-
- static const int kPrefixSize = 0;
- static const int kEntrySize = 2;
-};
-
-
-// MapCache.
-//
-// Maps keys that are a fixed array of symbols to a map.
-// Used for canonicalize maps for object literals.
-class MapCache: public HashTable<MapCacheShape, HashTableKey*> {
- public:
- // Find cached value for a string key, otherwise return null.
- Object* Lookup(FixedArray* key);
- MUST_USE_RESULT MaybeObject* Put(FixedArray* key, Map* value);
- static inline MapCache* cast(Object* obj);
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(MapCache);
-};
-
-
-template <typename Shape, typename Key>
-class Dictionary: public HashTable<Shape, Key> {
- public:
-
- static inline Dictionary<Shape, Key>* cast(Object* obj) {
- return reinterpret_cast<Dictionary<Shape, Key>*>(obj);
- }
-
- // Returns the value at entry.
- Object* ValueAt(int entry) {
- return this->get(HashTable<Shape, Key>::EntryToIndex(entry)+1);
- }
-
- // Set the value for entry.
- // Returns false if the put wasn't performed due to property being read only.
- // Returns true on successful put.
- bool ValueAtPut(int entry, Object* value) {
- // Check that this value can actually be written.
- PropertyDetails details = DetailsAt(entry);
- // If a value has not been initilized we allow writing to it even if
- // it is read only (a declared const that has not been initialized).
- if (details.IsReadOnly() && !ValueAt(entry)->IsTheHole()) {
- return false;
- }
- this->set(HashTable<Shape, Key>::EntryToIndex(entry) + 1, value);
- return true;
- }
-
- // Returns the property details for the property at entry.
- PropertyDetails DetailsAt(int entry) {
- ASSERT(entry >= 0); // Not found is -1, which is not caught by get().
- return PropertyDetails(
- Smi::cast(this->get(HashTable<Shape, Key>::EntryToIndex(entry) + 2)));
- }
-
- // Set the details for entry.
- void DetailsAtPut(int entry, PropertyDetails value) {
- this->set(HashTable<Shape, Key>::EntryToIndex(entry) + 2, value.AsSmi());
- }
-
- // Sorting support
- void CopyValuesTo(FixedArray* elements);
-
- // Delete a property from the dictionary.
- Object* DeleteProperty(int entry, JSObject::DeleteMode mode);
-
- // Returns the number of elements in the dictionary filtering out properties
- // with the specified attributes.
- int NumberOfElementsFilterAttributes(PropertyAttributes filter);
-
- // Returns the number of enumerable elements in the dictionary.
- int NumberOfEnumElements();
-
- // Copies keys to preallocated fixed array.
- void CopyKeysTo(FixedArray* storage, PropertyAttributes filter);
- // Fill in details for properties into storage.
- void CopyKeysTo(FixedArray* storage);
-
- // Accessors for next enumeration index.
- void SetNextEnumerationIndex(int index) {
- this->fast_set(this, kNextEnumerationIndexIndex, Smi::FromInt(index));
- }
-
- int NextEnumerationIndex() {
- return Smi::cast(FixedArray::get(kNextEnumerationIndexIndex))->value();
- }
-
- // Returns a new array for dictionary usage. Might return Failure.
- MUST_USE_RESULT static MaybeObject* Allocate(int at_least_space_for);
-
- // Ensure enough space for n additional elements.
- MUST_USE_RESULT MaybeObject* EnsureCapacity(int n, Key key);
-
-#ifdef OBJECT_PRINT
- inline void Print() {
- Print(stdout);
- }
- void Print(FILE* out);
-#endif
- // Returns the key (slow).
- Object* SlowReverseLookup(Object* value);
-
- // Sets the entry to (key, value) pair.
- inline void SetEntry(int entry,
- Object* key,
- Object* value,
- PropertyDetails details);
-
- MUST_USE_RESULT MaybeObject* Add(Key key,
- Object* value,
- PropertyDetails details);
-
- protected:
- // Generic at put operation.
- MUST_USE_RESULT MaybeObject* AtPut(Key key, Object* value);
-
- // Add entry to dictionary.
- MUST_USE_RESULT MaybeObject* AddEntry(Key key,
- Object* value,
- PropertyDetails details,
- uint32_t hash);
-
- // Generate new enumeration indices to avoid enumeration index overflow.
- MUST_USE_RESULT MaybeObject* GenerateNewEnumerationIndices();
- static const int kMaxNumberKeyIndex =
- HashTable<Shape, Key>::kPrefixStartIndex;
- static const int kNextEnumerationIndexIndex = kMaxNumberKeyIndex + 1;
-};
-
-
-class StringDictionaryShape {
- public:
- static inline bool IsMatch(String* key, Object* other);
- static inline uint32_t Hash(String* key);
- static inline uint32_t HashForObject(String* key, Object* object);
- MUST_USE_RESULT static inline MaybeObject* AsObject(String* key);
- static const int kPrefixSize = 2;
- static const int kEntrySize = 3;
- static const bool kIsEnumerable = true;
-};
-
-
-class StringDictionary: public Dictionary<StringDictionaryShape, String*> {
- public:
- static inline StringDictionary* cast(Object* obj) {
- ASSERT(obj->IsDictionary());
- return reinterpret_cast<StringDictionary*>(obj);
- }
-
- // Copies enumerable keys to preallocated fixed array.
- void CopyEnumKeysTo(FixedArray* storage, FixedArray* sort_array);
-
- // For transforming properties of a JSObject.
- MUST_USE_RESULT MaybeObject* TransformPropertiesToFastFor(
- JSObject* obj,
- int unused_property_fields);
-
- // Find entry for key otherwise return kNotFound. Optimzed version of
- // HashTable::FindEntry.
- int FindEntry(String* key);
-};
-
-
-class NumberDictionaryShape {
- public:
- static inline bool IsMatch(uint32_t key, Object* other);
- static inline uint32_t Hash(uint32_t key);
- static inline uint32_t HashForObject(uint32_t key, Object* object);
- MUST_USE_RESULT static inline MaybeObject* AsObject(uint32_t key);
- static const int kPrefixSize = 2;
- static const int kEntrySize = 3;
- static const bool kIsEnumerable = false;
-};
-
-
-class NumberDictionary: public Dictionary<NumberDictionaryShape, uint32_t> {
- public:
- static NumberDictionary* cast(Object* obj) {
- ASSERT(obj->IsDictionary());
- return reinterpret_cast<NumberDictionary*>(obj);
- }
-
- // Type specific at put (default NONE attributes is used when adding).
- MUST_USE_RESULT MaybeObject* AtNumberPut(uint32_t key, Object* value);
- MUST_USE_RESULT MaybeObject* AddNumberEntry(uint32_t key,
- Object* value,
- PropertyDetails details);
-
- // Set an existing entry or add a new one if needed.
- MUST_USE_RESULT MaybeObject* Set(uint32_t key,
- Object* value,
- PropertyDetails details);
-
- void UpdateMaxNumberKey(uint32_t key);
-
- // If slow elements are required we will never go back to fast-case
- // for the elements kept in this dictionary. We require slow
- // elements if an element has been added at an index larger than
- // kRequiresSlowElementsLimit or set_requires_slow_elements() has been called
- // when defining a getter or setter with a number key.
- inline bool requires_slow_elements();
- inline void set_requires_slow_elements();
-
- // Get the value of the max number key that has been added to this
- // dictionary. max_number_key can only be called if
- // requires_slow_elements returns false.
- inline uint32_t max_number_key();
-
- // Remove all entries were key is a number and (from <= key && key < to).
- void RemoveNumberEntries(uint32_t from, uint32_t to);
-
- // Bit masks.
- static const int kRequiresSlowElementsMask = 1;
- static const int kRequiresSlowElementsTagSize = 1;
- static const uint32_t kRequiresSlowElementsLimit = (1 << 29) - 1;
-};
-
-
-// JSFunctionResultCache caches results of some JSFunction invocation.
-// It is a fixed array with fixed structure:
-// [0]: factory function
-// [1]: finger index
-// [2]: current cache size
-// [3]: dummy field.
-// The rest of array are key/value pairs.
-class JSFunctionResultCache: public FixedArray {
- public:
- static const int kFactoryIndex = 0;
- static const int kFingerIndex = kFactoryIndex + 1;
- static const int kCacheSizeIndex = kFingerIndex + 1;
- static const int kDummyIndex = kCacheSizeIndex + 1;
- static const int kEntriesIndex = kDummyIndex + 1;
-
- static const int kEntrySize = 2; // key + value
-
- static const int kFactoryOffset = kHeaderSize;
- static const int kFingerOffset = kFactoryOffset + kPointerSize;
- static const int kCacheSizeOffset = kFingerOffset + kPointerSize;
-
- inline void MakeZeroSize();
- inline void Clear();
-
- inline int size();
- inline void set_size(int size);
- inline int finger_index();
- inline void set_finger_index(int finger_index);
-
- // Casting
- static inline JSFunctionResultCache* cast(Object* obj);
-
-#ifdef DEBUG
- void JSFunctionResultCacheVerify();
-#endif
-};
-
-
-// The cache for maps used by normalized (dictionary mode) objects.
-// Such maps do not have property descriptors, so a typical program
-// needs very limited number of distinct normalized maps.
-class NormalizedMapCache: public FixedArray {
- public:
- static const int kEntries = 64;
-
- MUST_USE_RESULT MaybeObject* Get(JSObject* object,
- PropertyNormalizationMode mode);
-
- void Clear();
-
- // Casting
- static inline NormalizedMapCache* cast(Object* obj);
-
-#ifdef DEBUG
- void NormalizedMapCacheVerify();
-#endif
-
- private:
- static int Hash(Map* fast);
-
- static bool CheckHit(Map* slow, Map* fast, PropertyNormalizationMode mode);
-};
-
-
-// ByteArray represents fixed sized byte arrays. Used by the outside world,
-// such as PCRE, and also by the memory allocator and garbage collector to
-// fill in free blocks in the heap.
-class ByteArray: public HeapObject {
- public:
- // [length]: length of the array.
- inline int length();
- inline void set_length(int value);
-
- // Setter and getter.
- inline byte get(int index);
- inline void set(int index, byte value);
-
- // Treat contents as an int array.
- inline int get_int(int index);
-
- static int SizeFor(int length) {
- return OBJECT_POINTER_ALIGN(kHeaderSize + length);
- }
- // We use byte arrays for free blocks in the heap. Given a desired size in
- // bytes that is a multiple of the word size and big enough to hold a byte
- // array, this function returns the number of elements a byte array should
- // have.
- static int LengthFor(int size_in_bytes) {
- ASSERT(IsAligned(size_in_bytes, kPointerSize));
- ASSERT(size_in_bytes >= kHeaderSize);
- return size_in_bytes - kHeaderSize;
- }
-
- // Returns data start address.
- inline Address GetDataStartAddress();
-
- // Returns a pointer to the ByteArray object for a given data start address.
- static inline ByteArray* FromDataStartAddress(Address address);
-
- // Casting.
- static inline ByteArray* cast(Object* obj);
-
- // Dispatched behavior.
- inline int ByteArraySize() {
- return SizeFor(this->length());
- }
-#ifdef OBJECT_PRINT
- inline void ByteArrayPrint() {
- ByteArrayPrint(stdout);
- }
- void ByteArrayPrint(FILE* out);
-#endif
-#ifdef DEBUG
- void ByteArrayVerify();
-#endif
-
- // Layout description.
- // Length is smi tagged when it is stored.
- static const int kLengthOffset = HeapObject::kHeaderSize;
- static const int kHeaderSize = kLengthOffset + kPointerSize;
-
- static const int kAlignedSize = OBJECT_POINTER_ALIGN(kHeaderSize);
-
- // Maximal memory consumption for a single ByteArray.
- static const int kMaxSize = 512 * MB;
- // Maximal length of a single ByteArray.
- static const int kMaxLength = kMaxSize - kHeaderSize;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(ByteArray);
-};
-
-
-// An ExternalArray represents a fixed-size array of primitive values
-// which live outside the JavaScript heap. Its subclasses are used to
-// implement the CanvasArray types being defined in the WebGL
-// specification. As of this writing the first public draft is not yet
-// available, but Khronos members can access the draft at:
-// https://cvs.khronos.org/svn/repos/3dweb/trunk/doc/spec/WebGL-spec.html
-//
-// The semantics of these arrays differ from CanvasPixelArray.
-// Out-of-range values passed to the setter are converted via a C
-// cast, not clamping. Out-of-range indices cause exceptions to be
-// raised rather than being silently ignored.
-class ExternalArray: public HeapObject {
- public:
- // [length]: length of the array.
- inline int length();
- inline void set_length(int value);
-
- // [external_pointer]: The pointer to the external memory area backing this
- // external array.
- DECL_ACCESSORS(external_pointer, void) // Pointer to the data store.
-
- // Casting.
- static inline ExternalArray* cast(Object* obj);
-
- // Maximal acceptable length for an external array.
- static const int kMaxLength = 0x3fffffff;
-
- // ExternalArray headers are not quadword aligned.
- static const int kLengthOffset = HeapObject::kHeaderSize;
- static const int kExternalPointerOffset =
- POINTER_SIZE_ALIGN(kLengthOffset + kIntSize);
- static const int kHeaderSize = kExternalPointerOffset + kPointerSize;
- static const int kAlignedSize = OBJECT_POINTER_ALIGN(kHeaderSize);
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalArray);
-};
-
-
-// A ExternalPixelArray represents a fixed-size byte array with special
-// semantics used for implementing the CanvasPixelArray object. Please see the
-// specification at:
-
-// http://www.whatwg.org/specs/web-apps/current-work/
-// multipage/the-canvas-element.html#canvaspixelarray
-// In particular, write access clamps the value written to 0 or 255 if the
-// value written is outside this range.
-class ExternalPixelArray: public ExternalArray {
- public:
- inline uint8_t* external_pixel_pointer();
-
- // Setter and getter.
- inline uint8_t get(int index);
- inline void set(int index, uint8_t value);
-
- // This accessor applies the correct conversion from Smi, HeapNumber and
- // undefined and clamps the converted value between 0 and 255.
- Object* SetValue(uint32_t index, Object* value);
-
- // Casting.
- static inline ExternalPixelArray* cast(Object* obj);
-
-#ifdef OBJECT_PRINT
- inline void ExternalPixelArrayPrint() {
- ExternalPixelArrayPrint(stdout);
- }
- void ExternalPixelArrayPrint(FILE* out);
-#endif
-#ifdef DEBUG
- void ExternalPixelArrayVerify();
-#endif // DEBUG
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalPixelArray);
-};
-
-
-class ExternalByteArray: public ExternalArray {
- public:
- // Setter and getter.
- inline int8_t get(int index);
- inline void set(int index, int8_t value);
-
- // This accessor applies the correct conversion from Smi, HeapNumber
- // and undefined.
- MaybeObject* SetValue(uint32_t index, Object* value);
-
- // Casting.
- static inline ExternalByteArray* cast(Object* obj);
-
-#ifdef OBJECT_PRINT
- inline void ExternalByteArrayPrint() {
- ExternalByteArrayPrint(stdout);
- }
- void ExternalByteArrayPrint(FILE* out);
-#endif
-#ifdef DEBUG
- void ExternalByteArrayVerify();
-#endif // DEBUG
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalByteArray);
-};
-
-
-class ExternalUnsignedByteArray: public ExternalArray {
- public:
- // Setter and getter.
- inline uint8_t get(int index);
- inline void set(int index, uint8_t value);
-
- // This accessor applies the correct conversion from Smi, HeapNumber
- // and undefined.
- MaybeObject* SetValue(uint32_t index, Object* value);
-
- // Casting.
- static inline ExternalUnsignedByteArray* cast(Object* obj);
-
-#ifdef OBJECT_PRINT
- inline void ExternalUnsignedByteArrayPrint() {
- ExternalUnsignedByteArrayPrint(stdout);
- }
- void ExternalUnsignedByteArrayPrint(FILE* out);
-#endif
-#ifdef DEBUG
- void ExternalUnsignedByteArrayVerify();
-#endif // DEBUG
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalUnsignedByteArray);
-};
-
-
-class ExternalShortArray: public ExternalArray {
- public:
- // Setter and getter.
- inline int16_t get(int index);
- inline void set(int index, int16_t value);
-
- // This accessor applies the correct conversion from Smi, HeapNumber
- // and undefined.
- MaybeObject* SetValue(uint32_t index, Object* value);
-
- // Casting.
- static inline ExternalShortArray* cast(Object* obj);
-
-#ifdef OBJECT_PRINT
- inline void ExternalShortArrayPrint() {
- ExternalShortArrayPrint(stdout);
- }
- void ExternalShortArrayPrint(FILE* out);
-#endif
-#ifdef DEBUG
- void ExternalShortArrayVerify();
-#endif // DEBUG
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalShortArray);
-};
-
-
-class ExternalUnsignedShortArray: public ExternalArray {
- public:
- // Setter and getter.
- inline uint16_t get(int index);
- inline void set(int index, uint16_t value);
-
- // This accessor applies the correct conversion from Smi, HeapNumber
- // and undefined.
- MaybeObject* SetValue(uint32_t index, Object* value);
-
- // Casting.
- static inline ExternalUnsignedShortArray* cast(Object* obj);
-
-#ifdef OBJECT_PRINT
- inline void ExternalUnsignedShortArrayPrint() {
- ExternalUnsignedShortArrayPrint(stdout);
- }
- void ExternalUnsignedShortArrayPrint(FILE* out);
-#endif
-#ifdef DEBUG
- void ExternalUnsignedShortArrayVerify();
-#endif // DEBUG
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalUnsignedShortArray);
-};
-
-
-class ExternalIntArray: public ExternalArray {
- public:
- // Setter and getter.
- inline int32_t get(int index);
- inline void set(int index, int32_t value);
-
- // This accessor applies the correct conversion from Smi, HeapNumber
- // and undefined.
- MaybeObject* SetValue(uint32_t index, Object* value);
-
- // Casting.
- static inline ExternalIntArray* cast(Object* obj);
-
-#ifdef OBJECT_PRINT
- inline void ExternalIntArrayPrint() {
- ExternalIntArrayPrint(stdout);
- }
- void ExternalIntArrayPrint(FILE* out);
-#endif
-#ifdef DEBUG
- void ExternalIntArrayVerify();
-#endif // DEBUG
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalIntArray);
-};
-
-
-class ExternalUnsignedIntArray: public ExternalArray {
- public:
- // Setter and getter.
- inline uint32_t get(int index);
- inline void set(int index, uint32_t value);
-
- // This accessor applies the correct conversion from Smi, HeapNumber
- // and undefined.
- MaybeObject* SetValue(uint32_t index, Object* value);
-
- // Casting.
- static inline ExternalUnsignedIntArray* cast(Object* obj);
-
-#ifdef OBJECT_PRINT
- inline void ExternalUnsignedIntArrayPrint() {
- ExternalUnsignedIntArrayPrint(stdout);
- }
- void ExternalUnsignedIntArrayPrint(FILE* out);
-#endif
-#ifdef DEBUG
- void ExternalUnsignedIntArrayVerify();
-#endif // DEBUG
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalUnsignedIntArray);
-};
-
-
-class ExternalFloatArray: public ExternalArray {
- public:
- // Setter and getter.
- inline float get(int index);
- inline void set(int index, float value);
-
- // This accessor applies the correct conversion from Smi, HeapNumber
- // and undefined.
- MaybeObject* SetValue(uint32_t index, Object* value);
-
- // Casting.
- static inline ExternalFloatArray* cast(Object* obj);
-
-#ifdef OBJECT_PRINT
- inline void ExternalFloatArrayPrint() {
- ExternalFloatArrayPrint(stdout);
- }
- void ExternalFloatArrayPrint(FILE* out);
-#endif
-#ifdef DEBUG
- void ExternalFloatArrayVerify();
-#endif // DEBUG
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalFloatArray);
-};
-
-
-// DeoptimizationInputData is a fixed array used to hold the deoptimization
-// data for code generated by the Hydrogen/Lithium compiler. It also
-// contains information about functions that were inlined. If N different
-// functions were inlined then first N elements of the literal array will
-// contain these functions.
-//
-// It can be empty.
-class DeoptimizationInputData: public FixedArray {
- public:
- // Layout description. Indices in the array.
- static const int kTranslationByteArrayIndex = 0;
- static const int kInlinedFunctionCountIndex = 1;
- static const int kLiteralArrayIndex = 2;
- static const int kOsrAstIdIndex = 3;
- static const int kOsrPcOffsetIndex = 4;
- static const int kFirstDeoptEntryIndex = 5;
-
- // Offsets of deopt entry elements relative to the start of the entry.
- static const int kAstIdOffset = 0;
- static const int kTranslationIndexOffset = 1;
- static const int kArgumentsStackHeightOffset = 2;
- static const int kDeoptEntrySize = 3;
-
- // Simple element accessors.
-#define DEFINE_ELEMENT_ACCESSORS(name, type) \
- type* name() { \
- return type::cast(get(k##name##Index)); \
- } \
- void Set##name(type* value) { \
- set(k##name##Index, value); \
- }
-
- DEFINE_ELEMENT_ACCESSORS(TranslationByteArray, ByteArray)
- DEFINE_ELEMENT_ACCESSORS(InlinedFunctionCount, Smi)
- DEFINE_ELEMENT_ACCESSORS(LiteralArray, FixedArray)
- DEFINE_ELEMENT_ACCESSORS(OsrAstId, Smi)
- DEFINE_ELEMENT_ACCESSORS(OsrPcOffset, Smi)
-
- // Unchecked accessor to be used during GC.
- FixedArray* UncheckedLiteralArray() {
- return reinterpret_cast<FixedArray*>(get(kLiteralArrayIndex));
- }
-
-#undef DEFINE_ELEMENT_ACCESSORS
-
- // Accessors for elements of the ith deoptimization entry.
-#define DEFINE_ENTRY_ACCESSORS(name, type) \
- type* name(int i) { \
- return type::cast(get(IndexForEntry(i) + k##name##Offset)); \
- } \
- void Set##name(int i, type* value) { \
- set(IndexForEntry(i) + k##name##Offset, value); \
- }
-
- DEFINE_ENTRY_ACCESSORS(AstId, Smi)
- DEFINE_ENTRY_ACCESSORS(TranslationIndex, Smi)
- DEFINE_ENTRY_ACCESSORS(ArgumentsStackHeight, Smi)
-
-#undef DEFINE_ENTRY_ACCESSORS
-
- int DeoptCount() {
- return (length() - kFirstDeoptEntryIndex) / kDeoptEntrySize;
- }
-
- // Allocates a DeoptimizationInputData.
- MUST_USE_RESULT static MaybeObject* Allocate(int deopt_entry_count,
- PretenureFlag pretenure);
-
- // Casting.
- static inline DeoptimizationInputData* cast(Object* obj);
-
-#ifdef OBJECT_PRINT
- void DeoptimizationInputDataPrint(FILE* out);
-#endif
-
- private:
- static int IndexForEntry(int i) {
- return kFirstDeoptEntryIndex + (i * kDeoptEntrySize);
- }
-
- static int LengthFor(int entry_count) {
- return IndexForEntry(entry_count);
- }
-};
-
-
-// DeoptimizationOutputData is a fixed array used to hold the deoptimization
-// data for code generated by the full compiler.
-// The format of the these objects is
-// [i * 2]: Ast ID for ith deoptimization.
-// [i * 2 + 1]: PC and state of ith deoptimization
-class DeoptimizationOutputData: public FixedArray {
- public:
- int DeoptPoints() { return length() / 2; }
- Smi* AstId(int index) { return Smi::cast(get(index * 2)); }
- void SetAstId(int index, Smi* id) { set(index * 2, id); }
- Smi* PcAndState(int index) { return Smi::cast(get(1 + index * 2)); }
- void SetPcAndState(int index, Smi* offset) { set(1 + index * 2, offset); }
-
- static int LengthOfFixedArray(int deopt_points) {
- return deopt_points * 2;
- }
-
- // Allocates a DeoptimizationOutputData.
- MUST_USE_RESULT static MaybeObject* Allocate(int number_of_deopt_points,
- PretenureFlag pretenure);
-
- // Casting.
- static inline DeoptimizationOutputData* cast(Object* obj);
-
-#ifdef OBJECT_PRINT
- void DeoptimizationOutputDataPrint(FILE* out);
-#endif
-};
-
-
-class SafepointEntry;
-
-
-// Code describes objects with on-the-fly generated machine code.
-class Code: public HeapObject {
- public:
- // Opaque data type for encapsulating code flags like kind, inline
- // cache state, and arguments count.
- // FLAGS_MIN_VALUE and FLAGS_MAX_VALUE are specified to ensure that
- // enumeration type has correct value range (see Issue 830 for more details).
- enum Flags {
- FLAGS_MIN_VALUE = kMinInt,
- FLAGS_MAX_VALUE = kMaxInt
- };
-
- enum Kind {
- FUNCTION,
- OPTIMIZED_FUNCTION,
- STUB,
- BUILTIN,
- LOAD_IC,
- KEYED_LOAD_IC,
- KEYED_EXTERNAL_ARRAY_LOAD_IC,
- CALL_IC,
- KEYED_CALL_IC,
- STORE_IC,
- KEYED_STORE_IC,
- KEYED_EXTERNAL_ARRAY_STORE_IC,
- BINARY_OP_IC,
- TYPE_RECORDING_BINARY_OP_IC,
- COMPARE_IC,
- // No more than 16 kinds. The value currently encoded in four bits in
- // Flags.
-
- // Pseudo-kinds.
- REGEXP = BUILTIN,
- FIRST_IC_KIND = LOAD_IC,
- LAST_IC_KIND = COMPARE_IC
- };
-
- enum {
- NUMBER_OF_KINDS = LAST_IC_KIND + 1
- };
-
- typedef int ExtraICState;
-
- static const ExtraICState kNoExtraICState = 0;
-
-#ifdef ENABLE_DISASSEMBLER
- // Printing
- static const char* Kind2String(Kind kind);
- static const char* ICState2String(InlineCacheState state);
- static const char* PropertyType2String(PropertyType type);
- static void PrintExtraICState(FILE* out, Kind kind, ExtraICState extra);
- inline void Disassemble(const char* name) {
- Disassemble(name, stdout);
- }
- void Disassemble(const char* name, FILE* out);
-#endif // ENABLE_DISASSEMBLER
-
- // [instruction_size]: Size of the native instructions
- inline int instruction_size();
- inline void set_instruction_size(int value);
-
- // [relocation_info]: Code relocation information
- DECL_ACCESSORS(relocation_info, ByteArray)
- void InvalidateRelocation();
-
- // [deoptimization_data]: Array containing data for deopt.
- DECL_ACCESSORS(deoptimization_data, FixedArray)
-
- // Unchecked accessors to be used during GC.
- inline ByteArray* unchecked_relocation_info();
- inline FixedArray* unchecked_deoptimization_data();
-
- inline int relocation_size();
-
- // [flags]: Various code flags.
- inline Flags flags();
- inline void set_flags(Flags flags);
-
- // [flags]: Access to specific code flags.
- inline Kind kind();
- inline InlineCacheState ic_state(); // Only valid for IC stubs.
- inline ExtraICState extra_ic_state(); // Only valid for IC stubs.
- inline InLoopFlag ic_in_loop(); // Only valid for IC stubs.
- inline PropertyType type(); // Only valid for monomorphic IC stubs.
- inline int arguments_count(); // Only valid for call IC stubs.
-
- // Testers for IC stub kinds.
- inline bool is_inline_cache_stub();
- inline bool is_load_stub() { return kind() == LOAD_IC; }
- inline bool is_keyed_load_stub() { return kind() == KEYED_LOAD_IC; }
- inline bool is_store_stub() { return kind() == STORE_IC; }
- inline bool is_keyed_store_stub() { return kind() == KEYED_STORE_IC; }
- inline bool is_call_stub() { return kind() == CALL_IC; }
- inline bool is_keyed_call_stub() { return kind() == KEYED_CALL_IC; }
- inline bool is_binary_op_stub() { return kind() == BINARY_OP_IC; }
- inline bool is_type_recording_binary_op_stub() {
- return kind() == TYPE_RECORDING_BINARY_OP_IC;
- }
- inline bool is_compare_ic_stub() { return kind() == COMPARE_IC; }
- inline bool is_external_array_load_stub() {
- return kind() == KEYED_EXTERNAL_ARRAY_LOAD_IC;
- }
- inline bool is_external_array_store_stub() {
- return kind() == KEYED_EXTERNAL_ARRAY_STORE_IC;
- }
-
- // [major_key]: For kind STUB or BINARY_OP_IC, the major key.
- inline int major_key();
- inline void set_major_key(int value);
-
- // [optimizable]: For FUNCTION kind, tells if it is optimizable.
- inline bool optimizable();
- inline void set_optimizable(bool value);
-
- // [has_deoptimization_support]: For FUNCTION kind, tells if it has
- // deoptimization support.
- inline bool has_deoptimization_support();
- inline void set_has_deoptimization_support(bool value);
-
- // [allow_osr_at_loop_nesting_level]: For FUNCTION kind, tells for
- // how long the function has been marked for OSR and therefore which
- // level of loop nesting we are willing to do on-stack replacement
- // for.
- inline void set_allow_osr_at_loop_nesting_level(int level);
- inline int allow_osr_at_loop_nesting_level();
-
- // [stack_slots]: For kind OPTIMIZED_FUNCTION, the number of stack slots
- // reserved in the code prologue.
- inline unsigned stack_slots();
- inline void set_stack_slots(unsigned slots);
-
- // [safepoint_table_start]: For kind OPTIMIZED_CODE, the offset in
- // the instruction stream where the safepoint table starts.
- inline unsigned safepoint_table_offset();
- inline void set_safepoint_table_offset(unsigned offset);
-
- // [stack_check_table_start]: For kind FUNCTION, the offset in the
- // instruction stream where the stack check table starts.
- inline unsigned stack_check_table_offset();
- inline void set_stack_check_table_offset(unsigned offset);
-
- // [check type]: For kind CALL_IC, tells how to check if the
- // receiver is valid for the given call.
- inline CheckType check_type();
- inline void set_check_type(CheckType value);
-
- // [external array type]: For kind KEYED_EXTERNAL_ARRAY_LOAD_IC and
- // KEYED_EXTERNAL_ARRAY_STORE_IC, identifies the type of external
- // array that the code stub is specialized for.
- inline ExternalArrayType external_array_type();
- inline void set_external_array_type(ExternalArrayType value);
-
- // [binary op type]: For all BINARY_OP_IC.
- inline byte binary_op_type();
- inline void set_binary_op_type(byte value);
-
- // [type-recording binary op type]: For all TYPE_RECORDING_BINARY_OP_IC.
- inline byte type_recording_binary_op_type();
- inline void set_type_recording_binary_op_type(byte value);
- inline byte type_recording_binary_op_result_type();
- inline void set_type_recording_binary_op_result_type(byte value);
-
- // [compare state]: For kind compare IC stubs, tells what state the
- // stub is in.
- inline byte compare_state();
- inline void set_compare_state(byte value);
-
- // Get the safepoint entry for the given pc.
- SafepointEntry GetSafepointEntry(Address pc);
-
- // Mark this code object as not having a stack check table. Assumes kind
- // is FUNCTION.
- void SetNoStackCheckTable();
-
- // Find the first map in an IC stub.
- Map* FindFirstMap();
-
- // Flags operations.
- static inline Flags ComputeFlags(
- Kind kind,
- InLoopFlag in_loop = NOT_IN_LOOP,
- InlineCacheState ic_state = UNINITIALIZED,
- ExtraICState extra_ic_state = kNoExtraICState,
- PropertyType type = NORMAL,
- int argc = -1,
- InlineCacheHolderFlag holder = OWN_MAP);
-
- static inline Flags ComputeMonomorphicFlags(
- Kind kind,
- PropertyType type,
- ExtraICState extra_ic_state = kNoExtraICState,
- InlineCacheHolderFlag holder = OWN_MAP,
- InLoopFlag in_loop = NOT_IN_LOOP,
- int argc = -1);
-
- static inline Kind ExtractKindFromFlags(Flags flags);
- static inline InlineCacheState ExtractICStateFromFlags(Flags flags);
- static inline ExtraICState ExtractExtraICStateFromFlags(Flags flags);
- static inline InLoopFlag ExtractICInLoopFromFlags(Flags flags);
- static inline PropertyType ExtractTypeFromFlags(Flags flags);
- static inline int ExtractArgumentsCountFromFlags(Flags flags);
- static inline InlineCacheHolderFlag ExtractCacheHolderFromFlags(Flags flags);
- static inline Flags RemoveTypeFromFlags(Flags flags);
-
- // Convert a target address into a code object.
- static inline Code* GetCodeFromTargetAddress(Address address);
-
- // Convert an entry address into an object.
- static inline Object* GetObjectFromEntryAddress(Address location_of_address);
-
- // Returns the address of the first instruction.
- inline byte* instruction_start();
-
- // Returns the address right after the last instruction.
- inline byte* instruction_end();
-
- // Returns the size of the instructions, padding, and relocation information.
- inline int body_size();
-
- // Returns the address of the first relocation info (read backwards!).
- inline byte* relocation_start();
-
- // Code entry point.
- inline byte* entry();
-
- // Returns true if pc is inside this object's instructions.
- inline bool contains(byte* pc);
-
- // Relocate the code by delta bytes. Called to signal that this code
- // object has been moved by delta bytes.
- void Relocate(intptr_t delta);
-
- // Migrate code described by desc.
- void CopyFrom(const CodeDesc& desc);
-
- // Returns the object size for a given body (used for allocation).
- static int SizeFor(int body_size) {
- ASSERT_SIZE_TAG_ALIGNED(body_size);
- return RoundUp(kHeaderSize + body_size, kCodeAlignment);
- }
-
- // Calculate the size of the code object to report for log events. This takes
- // the layout of the code object into account.
- int ExecutableSize() {
- // Check that the assumptions about the layout of the code object holds.
- ASSERT_EQ(static_cast<int>(instruction_start() - address()),
- Code::kHeaderSize);
- return instruction_size() + Code::kHeaderSize;
- }
-
- // Locating source position.
- int SourcePosition(Address pc);
- int SourceStatementPosition(Address pc);
-
- // Casting.
- static inline Code* cast(Object* obj);
-
- // Dispatched behavior.
- int CodeSize() { return SizeFor(body_size()); }
- inline void CodeIterateBody(ObjectVisitor* v);
-
- template<typename StaticVisitor>
- inline void CodeIterateBody(Heap* heap);
-#ifdef OBJECT_PRINT
- inline void CodePrint() {
- CodePrint(stdout);
- }
- void CodePrint(FILE* out);
-#endif
-#ifdef DEBUG
- void CodeVerify();
-#endif
-
- // Returns the isolate/heap this code object belongs to.
- inline Isolate* isolate();
- inline Heap* heap();
-
- // Max loop nesting marker used to postpose OSR. We don't take loop
- // nesting that is deeper than 5 levels into account.
- static const int kMaxLoopNestingMarker = 6;
-
- // Layout description.
- static const int kInstructionSizeOffset = HeapObject::kHeaderSize;
- static const int kRelocationInfoOffset = kInstructionSizeOffset + kIntSize;
- static const int kDeoptimizationDataOffset =
- kRelocationInfoOffset + kPointerSize;
- static const int kFlagsOffset = kDeoptimizationDataOffset + kPointerSize;
- static const int kKindSpecificFlagsOffset = kFlagsOffset + kIntSize;
-
- static const int kKindSpecificFlagsSize = 2 * kIntSize;
-
- static const int kHeaderPaddingStart = kKindSpecificFlagsOffset +
- kKindSpecificFlagsSize;
-
- // Add padding to align the instruction start following right after
- // the Code object header.
- static const int kHeaderSize =
- (kHeaderPaddingStart + kCodeAlignmentMask) & ~kCodeAlignmentMask;
-
- // Byte offsets within kKindSpecificFlagsOffset.
- static const int kStubMajorKeyOffset = kKindSpecificFlagsOffset;
- static const int kOptimizableOffset = kKindSpecificFlagsOffset;
- static const int kStackSlotsOffset = kKindSpecificFlagsOffset;
- static const int kCheckTypeOffset = kKindSpecificFlagsOffset;
- static const int kExternalArrayTypeOffset = kKindSpecificFlagsOffset;
-
- static const int kCompareStateOffset = kStubMajorKeyOffset + 1;
- static const int kBinaryOpTypeOffset = kStubMajorKeyOffset + 1;
- static const int kHasDeoptimizationSupportOffset = kOptimizableOffset + 1;
-
- static const int kBinaryOpReturnTypeOffset = kBinaryOpTypeOffset + 1;
- static const int kAllowOSRAtLoopNestingLevelOffset =
- kHasDeoptimizationSupportOffset + 1;
-
- static const int kSafepointTableOffsetOffset = kStackSlotsOffset + kIntSize;
- static const int kStackCheckTableOffsetOffset = kStackSlotsOffset + kIntSize;
-
- // Flags layout.
- static const int kFlagsICStateShift = 0;
- static const int kFlagsICInLoopShift = 3;
- static const int kFlagsTypeShift = 4;
- static const int kFlagsKindShift = 8;
- static const int kFlagsICHolderShift = 12;
- static const int kFlagsExtraICStateShift = 13;
- static const int kFlagsArgumentsCountShift = 15;
-
- static const int kFlagsICStateMask = 0x00000007; // 00000000111
- static const int kFlagsICInLoopMask = 0x00000008; // 00000001000
- static const int kFlagsTypeMask = 0x000000F0; // 00001110000
- static const int kFlagsKindMask = 0x00000F00; // 11110000000
- static const int kFlagsCacheInPrototypeMapMask = 0x00001000;
- static const int kFlagsExtraICStateMask = 0x00006000;
- static const int kFlagsArgumentsCountMask = 0xFFFF8000;
-
- static const int kFlagsNotUsedInLookup =
- (kFlagsICInLoopMask | kFlagsTypeMask | kFlagsCacheInPrototypeMapMask);
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(Code);
-};
-
-
-// All heap objects have a Map that describes their structure.
-// A Map contains information about:
-// - Size information about the object
-// - How to iterate over an object (for garbage collection)
-class Map: public HeapObject {
- public:
- // Instance size.
- // Size in bytes or kVariableSizeSentinel if instances do not have
- // a fixed size.
- inline int instance_size();
- inline void set_instance_size(int value);
-
- // Count of properties allocated in the object.
- inline int inobject_properties();
- inline void set_inobject_properties(int value);
-
- // Count of property fields pre-allocated in the object when first allocated.
- inline int pre_allocated_property_fields();
- inline void set_pre_allocated_property_fields(int value);
-
- // Instance type.
- inline InstanceType instance_type();
- inline void set_instance_type(InstanceType value);
-
- // Tells how many unused property fields are available in the
- // instance (only used for JSObject in fast mode).
- inline int unused_property_fields();
- inline void set_unused_property_fields(int value);
-
- // Bit field.
- inline byte bit_field();
- inline void set_bit_field(byte value);
-
- // Bit field 2.
- inline byte bit_field2();
- inline void set_bit_field2(byte value);
-
- // Tells whether the object in the prototype property will be used
- // for instances created from this function. If the prototype
- // property is set to a value that is not a JSObject, the prototype
- // property will not be used to create instances of the function.
- // See ECMA-262, 13.2.2.
- inline void set_non_instance_prototype(bool value);
- inline bool has_non_instance_prototype();
-
- // Tells whether function has special prototype property. If not, prototype
- // property will not be created when accessed (will return undefined),
- // and construction from this function will not be allowed.
- inline void set_function_with_prototype(bool value);
- inline bool function_with_prototype();
-
- // Tells whether the instance with this map should be ignored by the
- // __proto__ accessor.
- inline void set_is_hidden_prototype() {
- set_bit_field(bit_field() | (1 << kIsHiddenPrototype));
- }
-
- inline bool is_hidden_prototype() {
- return ((1 << kIsHiddenPrototype) & bit_field()) != 0;
- }
-
- // Records and queries whether the instance has a named interceptor.
- inline void set_has_named_interceptor() {
- set_bit_field(bit_field() | (1 << kHasNamedInterceptor));
- }
-
- inline bool has_named_interceptor() {
- return ((1 << kHasNamedInterceptor) & bit_field()) != 0;
- }
-
- // Records and queries whether the instance has an indexed interceptor.
- inline void set_has_indexed_interceptor() {
- set_bit_field(bit_field() | (1 << kHasIndexedInterceptor));
- }
-
- inline bool has_indexed_interceptor() {
- return ((1 << kHasIndexedInterceptor) & bit_field()) != 0;
- }
-
- // Tells whether the instance is undetectable.
- // An undetectable object is a special class of JSObject: 'typeof' operator
- // returns undefined, ToBoolean returns false. Otherwise it behaves like
- // a normal JS object. It is useful for implementing undetectable
- // document.all in Firefox & Safari.
- // See https://bugzilla.mozilla.org/show_bug.cgi?id=248549.
- inline void set_is_undetectable() {
- set_bit_field(bit_field() | (1 << kIsUndetectable));
- }
-
- inline bool is_undetectable() {
- return ((1 << kIsUndetectable) & bit_field()) != 0;
- }
-
- // Tells whether the instance has a call-as-function handler.
- inline void set_has_instance_call_handler() {
- set_bit_field(bit_field() | (1 << kHasInstanceCallHandler));
- }
-
- inline bool has_instance_call_handler() {
- return ((1 << kHasInstanceCallHandler) & bit_field()) != 0;
- }
-
- inline void set_is_extensible(bool value);
- inline bool is_extensible();
-
- // Tells whether the instance has fast elements.
- // Equivalent to instance->GetElementsKind() == FAST_ELEMENTS.
- inline void set_has_fast_elements(bool value) {
- if (value) {
- set_bit_field2(bit_field2() | (1 << kHasFastElements));
- } else {
- set_bit_field2(bit_field2() & ~(1 << kHasFastElements));
- }
- }
-
- inline bool has_fast_elements() {
- return ((1 << kHasFastElements) & bit_field2()) != 0;
- }
-
- // Tells whether an instance has pixel array elements.
- inline void set_has_external_array_elements(bool value) {
- if (value) {
- set_bit_field2(bit_field2() | (1 << kHasExternalArrayElements));
- } else {
- set_bit_field2(bit_field2() & ~(1 << kHasExternalArrayElements));
- }
- }
-
- inline bool has_external_array_elements() {
- return ((1 << kHasExternalArrayElements) & bit_field2()) != 0;
- }
-
- // Tells whether the map is attached to SharedFunctionInfo
- // (for inobject slack tracking).
- inline void set_attached_to_shared_function_info(bool value);
-
- inline bool attached_to_shared_function_info();
-
- // Tells whether the map is shared between objects that may have different
- // behavior. If true, the map should never be modified, instead a clone
- // should be created and modified.
- inline void set_is_shared(bool value);
-
- inline bool is_shared();
-
- // Tells whether the instance needs security checks when accessing its
- // properties.
- inline void set_is_access_check_needed(bool access_check_needed);
- inline bool is_access_check_needed();
-
- // [prototype]: implicit prototype object.
- DECL_ACCESSORS(prototype, Object)
-
- // [constructor]: points back to the function responsible for this map.
- DECL_ACCESSORS(constructor, Object)
-
- inline JSFunction* unchecked_constructor();
-
- // [instance descriptors]: describes the object.
- DECL_ACCESSORS(instance_descriptors, DescriptorArray)
-
- // [stub cache]: contains stubs compiled for this map.
- DECL_ACCESSORS(code_cache, Object)
-
- // Lookup in the map's instance descriptors and fill out the result
- // with the given holder if the name is found. The holder may be
- // NULL when this function is used from the compiler.
- void LookupInDescriptors(JSObject* holder,
- String* name,
- LookupResult* result);
-
- MUST_USE_RESULT MaybeObject* CopyDropDescriptors();
-
- MUST_USE_RESULT MaybeObject* CopyNormalized(PropertyNormalizationMode mode,
- NormalizedMapSharingMode sharing);
-
- // Returns a copy of the map, with all transitions dropped from the
- // instance descriptors.
- MUST_USE_RESULT MaybeObject* CopyDropTransitions();
-
- // Returns this map if it has the fast elements bit set, otherwise
- // returns a copy of the map, with all transitions dropped from the
- // descriptors and the fast elements bit set.
- MUST_USE_RESULT inline MaybeObject* GetFastElementsMap();
-
- // Returns this map if it has the fast elements bit cleared,
- // otherwise returns a copy of the map, with all transitions dropped
- // from the descriptors and the fast elements bit cleared.
- MUST_USE_RESULT inline MaybeObject* GetSlowElementsMap();
-
- // Returns a new map with all transitions dropped from the descriptors and the
- // external array elements bit set.
- MUST_USE_RESULT MaybeObject* GetExternalArrayElementsMap(
- ExternalArrayType array_type,
- bool safe_to_add_transition);
-
- // Returns the property index for name (only valid for FAST MODE).
- int PropertyIndexFor(String* name);
-
- // Returns the next free property index (only valid for FAST MODE).
- int NextFreePropertyIndex();
-
- // Returns the number of properties described in instance_descriptors.
- int NumberOfDescribedProperties();
-
- // Casting.
- static inline Map* cast(Object* obj);
-
- // Locate an accessor in the instance descriptor.
- AccessorDescriptor* FindAccessor(String* name);
-
- // Code cache operations.
-
- // Clears the code cache.
- inline void ClearCodeCache(Heap* heap);
-
- // Update code cache.
- MUST_USE_RESULT MaybeObject* UpdateCodeCache(String* name, Code* code);
-
- // Returns the found code or undefined if absent.
- Object* FindInCodeCache(String* name, Code::Flags flags);
-
- // Returns the non-negative index of the code object if it is in the
- // cache and -1 otherwise.
- int IndexInCodeCache(Object* name, Code* code);
-
- // Removes a code object from the code cache at the given index.
- void RemoveFromCodeCache(String* name, Code* code, int index);
-
- // For every transition in this map, makes the transition's
- // target's prototype pointer point back to this map.
- // This is undone in MarkCompactCollector::ClearNonLiveTransitions().
- void CreateBackPointers();
-
- // Set all map transitions from this map to dead maps to null.
- // Also, restore the original prototype on the targets of these
- // transitions, so that we do not process this map again while
- // following back pointers.
- void ClearNonLiveTransitions(Heap* heap, Object* real_prototype);
-
- // Dispatched behavior.
-#ifdef OBJECT_PRINT
- inline void MapPrint() {
- MapPrint(stdout);
- }
- void MapPrint(FILE* out);
-#endif
-#ifdef DEBUG
- void MapVerify();
- void SharedMapVerify();
-#endif
-
- inline int visitor_id();
- inline void set_visitor_id(int visitor_id);
-
- // Returns the isolate/heap this map belongs to.
- inline Isolate* isolate();
- inline Heap* heap();
-
- typedef void (*TraverseCallback)(Map* map, void* data);
-
- void TraverseTransitionTree(TraverseCallback callback, void* data);
-
- static const int kMaxPreAllocatedPropertyFields = 255;
-
- // Layout description.
- static const int kInstanceSizesOffset = HeapObject::kHeaderSize;
- static const int kInstanceAttributesOffset = kInstanceSizesOffset + kIntSize;
- static const int kPrototypeOffset = kInstanceAttributesOffset + kIntSize;
- static const int kConstructorOffset = kPrototypeOffset + kPointerSize;
- static const int kInstanceDescriptorsOffset =
- kConstructorOffset + kPointerSize;
- static const int kCodeCacheOffset = kInstanceDescriptorsOffset + kPointerSize;
- static const int kPadStart = kCodeCacheOffset + kPointerSize;
- static const int kSize = MAP_POINTER_ALIGN(kPadStart);
-
- // Layout of pointer fields. Heap iteration code relies on them
- // being continiously allocated.
- static const int kPointerFieldsBeginOffset = Map::kPrototypeOffset;
- static const int kPointerFieldsEndOffset =
- Map::kCodeCacheOffset + kPointerSize;
-
- // Byte offsets within kInstanceSizesOffset.
- static const int kInstanceSizeOffset = kInstanceSizesOffset + 0;
- static const int kInObjectPropertiesByte = 1;
- static const int kInObjectPropertiesOffset =
- kInstanceSizesOffset + kInObjectPropertiesByte;
- static const int kPreAllocatedPropertyFieldsByte = 2;
- static const int kPreAllocatedPropertyFieldsOffset =
- kInstanceSizesOffset + kPreAllocatedPropertyFieldsByte;
- static const int kVisitorIdByte = 3;
- static const int kVisitorIdOffset = kInstanceSizesOffset + kVisitorIdByte;
-
- // Byte offsets within kInstanceAttributesOffset attributes.
- static const int kInstanceTypeOffset = kInstanceAttributesOffset + 0;
- static const int kUnusedPropertyFieldsOffset = kInstanceAttributesOffset + 1;
- static const int kBitFieldOffset = kInstanceAttributesOffset + 2;
- static const int kBitField2Offset = kInstanceAttributesOffset + 3;
-
- STATIC_CHECK(kInstanceTypeOffset == Internals::kMapInstanceTypeOffset);
-
- // Bit positions for bit field.
- static const int kUnused = 0; // To be used for marking recently used maps.
- static const int kHasNonInstancePrototype = 1;
- static const int kIsHiddenPrototype = 2;
- static const int kHasNamedInterceptor = 3;
- static const int kHasIndexedInterceptor = 4;
- static const int kIsUndetectable = 5;
- static const int kHasInstanceCallHandler = 6;
- static const int kIsAccessCheckNeeded = 7;
-
- // Bit positions for bit field 2
- static const int kIsExtensible = 0;
- static const int kFunctionWithPrototype = 1;
- static const int kHasFastElements = 2;
- static const int kStringWrapperSafeForDefaultValueOf = 3;
- static const int kAttachedToSharedFunctionInfo = 4;
- static const int kIsShared = 5;
- static const int kHasExternalArrayElements = 6;
-
- // Layout of the default cache. It holds alternating name and code objects.
- static const int kCodeCacheEntrySize = 2;
- static const int kCodeCacheEntryNameOffset = 0;
- static const int kCodeCacheEntryCodeOffset = 1;
-
- typedef FixedBodyDescriptor<kPointerFieldsBeginOffset,
- kPointerFieldsEndOffset,
- kSize> BodyDescriptor;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(Map);
-};
-
-
-// An abstract superclass, a marker class really, for simple structure classes.
-// It doesn't carry much functionality but allows struct classes to me
-// identified in the type system.
-class Struct: public HeapObject {
- public:
- inline void InitializeBody(int object_size);
- static inline Struct* cast(Object* that);
-};
-
-
-// Script describes a script which has been added to the VM.
-class Script: public Struct {
- public:
- // Script types.
- enum Type {
- TYPE_NATIVE = 0,
- TYPE_EXTENSION = 1,
- TYPE_NORMAL = 2
- };
-
- // Script compilation types.
- enum CompilationType {
- COMPILATION_TYPE_HOST = 0,
- COMPILATION_TYPE_EVAL = 1
- };
-
- // [source]: the script source.
- DECL_ACCESSORS(source, Object)
-
- // [name]: the script name.
- DECL_ACCESSORS(name, Object)
-
- // [id]: the script id.
- DECL_ACCESSORS(id, Object)
-
- // [line_offset]: script line offset in resource from where it was extracted.
- DECL_ACCESSORS(line_offset, Smi)
-
- // [column_offset]: script column offset in resource from where it was
- // extracted.
- DECL_ACCESSORS(column_offset, Smi)
-
- // [data]: additional data associated with this script.
- DECL_ACCESSORS(data, Object)
-
- // [context_data]: context data for the context this script was compiled in.
- DECL_ACCESSORS(context_data, Object)
-
- // [wrapper]: the wrapper cache.
- DECL_ACCESSORS(wrapper, Proxy)
-
- // [type]: the script type.
- DECL_ACCESSORS(type, Smi)
-
- // [compilation]: how the the script was compiled.
- DECL_ACCESSORS(compilation_type, Smi)
-
- // [line_ends]: FixedArray of line ends positions.
- DECL_ACCESSORS(line_ends, Object)
-
- // [eval_from_shared]: for eval scripts the shared funcion info for the
- // function from which eval was called.
- DECL_ACCESSORS(eval_from_shared, Object)
-
- // [eval_from_instructions_offset]: the instruction offset in the code for the
- // function from which eval was called where eval was called.
- DECL_ACCESSORS(eval_from_instructions_offset, Smi)
-
- static inline Script* cast(Object* obj);
-
- // If script source is an external string, check that the underlying
- // resource is accessible. Otherwise, always return true.
- inline bool HasValidSource();
-
-#ifdef OBJECT_PRINT
- inline void ScriptPrint() {
- ScriptPrint(stdout);
- }
- void ScriptPrint(FILE* out);
-#endif
-#ifdef DEBUG
- void ScriptVerify();
-#endif
-
- static const int kSourceOffset = HeapObject::kHeaderSize;
- static const int kNameOffset = kSourceOffset + kPointerSize;
- static const int kLineOffsetOffset = kNameOffset + kPointerSize;
- static const int kColumnOffsetOffset = kLineOffsetOffset + kPointerSize;
- static const int kDataOffset = kColumnOffsetOffset + kPointerSize;
- static const int kContextOffset = kDataOffset + kPointerSize;
- static const int kWrapperOffset = kContextOffset + kPointerSize;
- static const int kTypeOffset = kWrapperOffset + kPointerSize;
- static const int kCompilationTypeOffset = kTypeOffset + kPointerSize;
- static const int kLineEndsOffset = kCompilationTypeOffset + kPointerSize;
- static const int kIdOffset = kLineEndsOffset + kPointerSize;
- static const int kEvalFromSharedOffset = kIdOffset + kPointerSize;
- static const int kEvalFrominstructionsOffsetOffset =
- kEvalFromSharedOffset + kPointerSize;
- static const int kSize = kEvalFrominstructionsOffsetOffset + kPointerSize;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(Script);
-};
-
-
-// List of builtin functions we want to identify to improve code
-// generation.
-//
-// Each entry has a name of a global object property holding an object
-// optionally followed by ".prototype", a name of a builtin function
-// on the object (the one the id is set for), and a label.
-//
-// Installation of ids for the selected builtin functions is handled
-// by the bootstrapper.
-//
-// NOTE: Order is important: math functions should be at the end of
-// the list and MathFloor should be the first math function.
-#define FUNCTIONS_WITH_ID_LIST(V) \
- V(Array.prototype, push, ArrayPush) \
- V(Array.prototype, pop, ArrayPop) \
- V(String.prototype, charCodeAt, StringCharCodeAt) \
- V(String.prototype, charAt, StringCharAt) \
- V(String, fromCharCode, StringFromCharCode) \
- V(Math, floor, MathFloor) \
- V(Math, round, MathRound) \
- V(Math, ceil, MathCeil) \
- V(Math, abs, MathAbs) \
- V(Math, log, MathLog) \
- V(Math, sin, MathSin) \
- V(Math, cos, MathCos) \
- V(Math, tan, MathTan) \
- V(Math, asin, MathASin) \
- V(Math, acos, MathACos) \
- V(Math, atan, MathATan) \
- V(Math, exp, MathExp) \
- V(Math, sqrt, MathSqrt) \
- V(Math, pow, MathPow)
-
-
-enum BuiltinFunctionId {
-#define DECLARE_FUNCTION_ID(ignored1, ignore2, name) \
- k##name,
- FUNCTIONS_WITH_ID_LIST(DECLARE_FUNCTION_ID)
-#undef DECLARE_FUNCTION_ID
- // Fake id for a special case of Math.pow. Note, it continues the
- // list of math functions.
- kMathPowHalf,
- kFirstMathFunctionId = kMathFloor
-};
-
-
-// SharedFunctionInfo describes the JSFunction information that can be
-// shared by multiple instances of the function.
-class SharedFunctionInfo: public HeapObject {
- public:
- // [name]: Function name.
- DECL_ACCESSORS(name, Object)
-
- // [code]: Function code.
- DECL_ACCESSORS(code, Code)
-
- // [scope_info]: Scope info.
- DECL_ACCESSORS(scope_info, SerializedScopeInfo)
-
- // [construct stub]: Code stub for constructing instances of this function.
- DECL_ACCESSORS(construct_stub, Code)
-
- inline Code* unchecked_code();
-
- // Returns if this function has been compiled to native code yet.
- inline bool is_compiled();
-
- // [length]: The function length - usually the number of declared parameters.
- // Use up to 2^30 parameters.
- inline int length();
- inline void set_length(int value);
-
- // [formal parameter count]: The declared number of parameters.
- inline int formal_parameter_count();
- inline void set_formal_parameter_count(int value);
-
- // Set the formal parameter count so the function code will be
- // called without using argument adaptor frames.
- inline void DontAdaptArguments();
-
- // [expected_nof_properties]: Expected number of properties for the function.
- inline int expected_nof_properties();
- inline void set_expected_nof_properties(int value);
-
- // Inobject slack tracking is the way to reclaim unused inobject space.
- //
- // The instance size is initially determined by adding some slack to
- // expected_nof_properties (to allow for a few extra properties added
- // after the constructor). There is no guarantee that the extra space
- // will not be wasted.
- //
- // Here is the algorithm to reclaim the unused inobject space:
- // - Detect the first constructor call for this SharedFunctionInfo.
- // When it happens enter the "in progress" state: remember the
- // constructor's initial_map and install a special construct stub that
- // counts constructor calls.
- // - While the tracking is in progress create objects filled with
- // one_pointer_filler_map instead of undefined_value. This way they can be
- // resized quickly and safely.
- // - Once enough (kGenerousAllocationCount) objects have been created
- // compute the 'slack' (traverse the map transition tree starting from the
- // initial_map and find the lowest value of unused_property_fields).
- // - Traverse the transition tree again and decrease the instance size
- // of every map. Existing objects will resize automatically (they are
- // filled with one_pointer_filler_map). All further allocations will
- // use the adjusted instance size.
- // - Decrease expected_nof_properties so that an allocations made from
- // another context will use the adjusted instance size too.
- // - Exit "in progress" state by clearing the reference to the initial_map
- // and setting the regular construct stub (generic or inline).
- //
- // The above is the main event sequence. Some special cases are possible
- // while the tracking is in progress:
- //
- // - GC occurs.
- // Check if the initial_map is referenced by any live objects (except this
- // SharedFunctionInfo). If it is, continue tracking as usual.
- // If it is not, clear the reference and reset the tracking state. The
- // tracking will be initiated again on the next constructor call.
- //
- // - The constructor is called from another context.
- // Immediately complete the tracking, perform all the necessary changes
- // to maps. This is necessary because there is no efficient way to track
- // multiple initial_maps.
- // Proceed to create an object in the current context (with the adjusted
- // size).
- //
- // - A different constructor function sharing the same SharedFunctionInfo is
- // called in the same context. This could be another closure in the same
- // context, or the first function could have been disposed.
- // This is handled the same way as the previous case.
- //
- // Important: inobject slack tracking is not attempted during the snapshot
- // creation.
-
- static const int kGenerousAllocationCount = 8;
-
- // [construction_count]: Counter for constructor calls made during
- // the tracking phase.
- inline int construction_count();
- inline void set_construction_count(int value);
-
- // [initial_map]: initial map of the first function called as a constructor.
- // Saved for the duration of the tracking phase.
- // This is a weak link (GC resets it to undefined_value if no other live
- // object reference this map).
- DECL_ACCESSORS(initial_map, Object)
-
- // True if the initial_map is not undefined and the countdown stub is
- // installed.
- inline bool IsInobjectSlackTrackingInProgress();
-
- // Starts the tracking.
- // Stores the initial map and installs the countdown stub.
- // IsInobjectSlackTrackingInProgress is normally true after this call,
- // except when tracking have not been started (e.g. the map has no unused
- // properties or the snapshot is being built).
- void StartInobjectSlackTracking(Map* map);
-
- // Completes the tracking.
- // IsInobjectSlackTrackingInProgress is false after this call.
- void CompleteInobjectSlackTracking();
-
- // Clears the initial_map before the GC marking phase to ensure the reference
- // is weak. IsInobjectSlackTrackingInProgress is false after this call.
- void DetachInitialMap();
-
- // Restores the link to the initial map after the GC marking phase.
- // IsInobjectSlackTrackingInProgress is true after this call.
- void AttachInitialMap(Map* map);
-
- // False if there are definitely no live objects created from this function.
- // True if live objects _may_ exist (existence not guaranteed).
- // May go back from true to false after GC.
- inline bool live_objects_may_exist();
-
- inline void set_live_objects_may_exist(bool value);
-
- // [instance class name]: class name for instances.
- DECL_ACCESSORS(instance_class_name, Object)
-
- // [function data]: This field holds some additional data for function.
- // Currently it either has FunctionTemplateInfo to make benefit the API
- // or Smi identifying a builtin function.
- // In the long run we don't want all functions to have this field but
- // we can fix that when we have a better model for storing hidden data
- // on objects.
- DECL_ACCESSORS(function_data, Object)
-
- inline bool IsApiFunction();
- inline FunctionTemplateInfo* get_api_func_data();
- inline bool HasBuiltinFunctionId();
- inline BuiltinFunctionId builtin_function_id();
-
- // [script info]: Script from which the function originates.
- DECL_ACCESSORS(script, Object)
-
- // [num_literals]: Number of literals used by this function.
- inline int num_literals();
- inline void set_num_literals(int value);
-
- // [start_position_and_type]: Field used to store both the source code
- // position, whether or not the function is a function expression,
- // and whether or not the function is a toplevel function. The two
- // least significants bit indicates whether the function is an
- // expression and the rest contains the source code position.
- inline int start_position_and_type();
- inline void set_start_position_and_type(int value);
-
- // [debug info]: Debug information.
- DECL_ACCESSORS(debug_info, Object)
-
- // [inferred name]: Name inferred from variable or property
- // assignment of this function. Used to facilitate debugging and
- // profiling of JavaScript code written in OO style, where almost
- // all functions are anonymous but are assigned to object
- // properties.
- DECL_ACCESSORS(inferred_name, String)
-
- // The function's name if it is non-empty, otherwise the inferred name.
- String* DebugName();
-
- // Position of the 'function' token in the script source.
- inline int function_token_position();
- inline void set_function_token_position(int function_token_position);
-
- // Position of this function in the script source.
- inline int start_position();
- inline void set_start_position(int start_position);
-
- // End position of this function in the script source.
- inline int end_position();
- inline void set_end_position(int end_position);
-
- // Is this function a function expression in the source code.
- inline bool is_expression();
- inline void set_is_expression(bool value);
-
- // Is this function a top-level function (scripts, evals).
- inline bool is_toplevel();
- inline void set_is_toplevel(bool value);
-
- // Bit field containing various information collected by the compiler to
- // drive optimization.
- inline int compiler_hints();
- inline void set_compiler_hints(int value);
-
- // A counter used to determine when to stress the deoptimizer with a
- // deopt.
- inline Smi* deopt_counter();
- inline void set_deopt_counter(Smi* counter);
-
- // Add information on assignments of the form this.x = ...;
- void SetThisPropertyAssignmentsInfo(
- bool has_only_simple_this_property_assignments,
- FixedArray* this_property_assignments);
-
- // Clear information on assignments of the form this.x = ...;
- void ClearThisPropertyAssignmentsInfo();
-
- // Indicate that this function only consists of assignments of the form
- // this.x = y; where y is either a constant or refers to an argument.
- inline bool has_only_simple_this_property_assignments();
-
- // Indicates if this function can be lazy compiled.
- // This is used to determine if we can safely flush code from a function
- // when doing GC if we expect that the function will no longer be used.
- inline bool allows_lazy_compilation();
- inline void set_allows_lazy_compilation(bool flag);
-
- // Indicates how many full GCs this function has survived with assigned
- // code object. Used to determine when it is relatively safe to flush
- // this code object and replace it with lazy compilation stub.
- // Age is reset when GC notices that the code object is referenced
- // from the stack or compilation cache.
- inline int code_age();
- inline void set_code_age(int age);
-
- // Indicates whether optimizations have been disabled for this
- // shared function info. If a function is repeatedly optimized or if
- // we cannot optimize the function we disable optimization to avoid
- // spending time attempting to optimize it again.
- inline bool optimization_disabled();
- inline void set_optimization_disabled(bool value);
-
- // Indicates whether the function is a strict mode function.
- inline bool strict_mode();
- inline void set_strict_mode(bool value);
-
- // Indicates whether or not the code in the shared function support
- // deoptimization.
- inline bool has_deoptimization_support();
-
- // Enable deoptimization support through recompiled code.
- void EnableDeoptimizationSupport(Code* recompiled);
-
- // Lookup the bailout ID and ASSERT that it exists in the non-optimized
- // code, returns whether it asserted (i.e., always true if assertions are
- // disabled).
- bool VerifyBailoutId(int id);
-
- // Check whether a inlined constructor can be generated with the given
- // prototype.
- bool CanGenerateInlineConstructor(Object* prototype);
-
- // Prevents further attempts to generate inline constructors.
- // To be called if generation failed for any reason.
- void ForbidInlineConstructor();
-
- // For functions which only contains this property assignments this provides
- // access to the names for the properties assigned.
- DECL_ACCESSORS(this_property_assignments, Object)
- inline int this_property_assignments_count();
- inline void set_this_property_assignments_count(int value);
- String* GetThisPropertyAssignmentName(int index);
- bool IsThisPropertyAssignmentArgument(int index);
- int GetThisPropertyAssignmentArgument(int index);
- Object* GetThisPropertyAssignmentConstant(int index);
-
- // [source code]: Source code for the function.
- bool HasSourceCode();
- Object* GetSourceCode();
-
- inline int opt_count();
- inline void set_opt_count(int opt_count);
-
- // Source size of this function.
- int SourceSize();
-
- // Calculate the instance size.
- int CalculateInstanceSize();
-
- // Calculate the number of in-object properties.
- int CalculateInObjectProperties();
-
- // Dispatched behavior.
- // Set max_length to -1 for unlimited length.
- void SourceCodePrint(StringStream* accumulator, int max_length);
-#ifdef OBJECT_PRINT
- inline void SharedFunctionInfoPrint() {
- SharedFunctionInfoPrint(stdout);
- }
- void SharedFunctionInfoPrint(FILE* out);
-#endif
-#ifdef DEBUG
- void SharedFunctionInfoVerify();
-#endif
-
- // Casting.
- static inline SharedFunctionInfo* cast(Object* obj);
-
- // Constants.
- static const int kDontAdaptArgumentsSentinel = -1;
-
- // Layout description.
- // Pointer fields.
- static const int kNameOffset = HeapObject::kHeaderSize;
- static const int kCodeOffset = kNameOffset + kPointerSize;
- static const int kScopeInfoOffset = kCodeOffset + kPointerSize;
- static const int kConstructStubOffset = kScopeInfoOffset + kPointerSize;
- static const int kInstanceClassNameOffset =
- kConstructStubOffset + kPointerSize;
- static const int kFunctionDataOffset =
- kInstanceClassNameOffset + kPointerSize;
- static const int kScriptOffset = kFunctionDataOffset + kPointerSize;
- static const int kDebugInfoOffset = kScriptOffset + kPointerSize;
- static const int kInferredNameOffset = kDebugInfoOffset + kPointerSize;
- static const int kInitialMapOffset =
- kInferredNameOffset + kPointerSize;
- static const int kThisPropertyAssignmentsOffset =
- kInitialMapOffset + kPointerSize;
- static const int kDeoptCounterOffset =
- kThisPropertyAssignmentsOffset + kPointerSize;
-#if V8_HOST_ARCH_32_BIT
- // Smi fields.
- static const int kLengthOffset =
- kDeoptCounterOffset + kPointerSize;
- static const int kFormalParameterCountOffset = kLengthOffset + kPointerSize;
- static const int kExpectedNofPropertiesOffset =
- kFormalParameterCountOffset + kPointerSize;
- static const int kNumLiteralsOffset =
- kExpectedNofPropertiesOffset + kPointerSize;
- static const int kStartPositionAndTypeOffset =
- kNumLiteralsOffset + kPointerSize;
- static const int kEndPositionOffset =
- kStartPositionAndTypeOffset + kPointerSize;
- static const int kFunctionTokenPositionOffset =
- kEndPositionOffset + kPointerSize;
- static const int kCompilerHintsOffset =
- kFunctionTokenPositionOffset + kPointerSize;
- static const int kThisPropertyAssignmentsCountOffset =
- kCompilerHintsOffset + kPointerSize;
- static const int kOptCountOffset =
- kThisPropertyAssignmentsCountOffset + kPointerSize;
- // Total size.
- static const int kSize = kOptCountOffset + kPointerSize;
-#else
- // The only reason to use smi fields instead of int fields
- // is to allow iteration without maps decoding during
- // garbage collections.
- // To avoid wasting space on 64-bit architectures we use
- // the following trick: we group integer fields into pairs
- // First integer in each pair is shifted left by 1.
- // By doing this we guarantee that LSB of each kPointerSize aligned
- // word is not set and thus this word cannot be treated as pointer
- // to HeapObject during old space traversal.
- static const int kLengthOffset =
- kDeoptCounterOffset + kPointerSize;
- static const int kFormalParameterCountOffset =
- kLengthOffset + kIntSize;
-
- static const int kExpectedNofPropertiesOffset =
- kFormalParameterCountOffset + kIntSize;
- static const int kNumLiteralsOffset =
- kExpectedNofPropertiesOffset + kIntSize;
-
- static const int kEndPositionOffset =
- kNumLiteralsOffset + kIntSize;
- static const int kStartPositionAndTypeOffset =
- kEndPositionOffset + kIntSize;
-
- static const int kFunctionTokenPositionOffset =
- kStartPositionAndTypeOffset + kIntSize;
- static const int kCompilerHintsOffset =
- kFunctionTokenPositionOffset + kIntSize;
-
- static const int kThisPropertyAssignmentsCountOffset =
- kCompilerHintsOffset + kIntSize;
- static const int kOptCountOffset =
- kThisPropertyAssignmentsCountOffset + kIntSize;
-
- // Total size.
- static const int kSize = kOptCountOffset + kIntSize;
-
-#endif
-
- // The construction counter for inobject slack tracking is stored in the
- // most significant byte of compiler_hints which is otherwise unused.
- // Its offset depends on the endian-ness of the architecture.
-#if __BYTE_ORDER == __LITTLE_ENDIAN
- static const int kConstructionCountOffset = kCompilerHintsOffset + 3;
-#elif __BYTE_ORDER == __BIG_ENDIAN
- static const int kConstructionCountOffset = kCompilerHintsOffset + 0;
-#else
-#error Unknown byte ordering
-#endif
-
- static const int kAlignedSize = POINTER_SIZE_ALIGN(kSize);
-
- typedef FixedBodyDescriptor<kNameOffset,
- kThisPropertyAssignmentsOffset + kPointerSize,
- kSize> BodyDescriptor;
-
- // Bit positions in start_position_and_type.
- // The source code start position is in the 30 most significant bits of
- // the start_position_and_type field.
- static const int kIsExpressionBit = 0;
- static const int kIsTopLevelBit = 1;
- static const int kStartPositionShift = 2;
- static const int kStartPositionMask = ~((1 << kStartPositionShift) - 1);
-
- // Bit positions in compiler_hints.
- static const int kHasOnlySimpleThisPropertyAssignments = 0;
- static const int kAllowLazyCompilation = 1;
- static const int kLiveObjectsMayExist = 2;
- static const int kCodeAgeShift = 3;
- static const int kCodeAgeMask = 0x7;
- static const int kOptimizationDisabled = 6;
- static const int kStrictModeFunction = 7;
-
- private:
-#if V8_HOST_ARCH_32_BIT
- // On 32 bit platforms, compiler hints is a smi.
- static const int kCompilerHintsSmiTagSize = kSmiTagSize;
- static const int kCompilerHintsSize = kPointerSize;
-#else
- // On 64 bit platforms, compiler hints is not a smi, see comment above.
- static const int kCompilerHintsSmiTagSize = 0;
- static const int kCompilerHintsSize = kIntSize;
-#endif
-
- public:
- // Constants for optimizing codegen for strict mode function tests.
- // Allows to use byte-widgh instructions.
- static const int kStrictModeBitWithinByte =
- (kStrictModeFunction + kCompilerHintsSmiTagSize) % kBitsPerByte;
-
-#if __BYTE_ORDER == __LITTLE_ENDIAN
- static const int kStrictModeByteOffset = kCompilerHintsOffset +
- (kStrictModeFunction + kCompilerHintsSmiTagSize) / kBitsPerByte;
-#elif __BYTE_ORDER == __BIG_ENDIAN
- static const int kStrictModeByteOffset = kCompilerHintsOffset +
- (kCompilerHintsSize - 1) -
- ((kStrictModeFunction + kCompilerHintsSmiTagSize) / kBitsPerByte);
-#else
-#error Unknown byte ordering
-#endif
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(SharedFunctionInfo);
-};
-
-
-// JSFunction describes JavaScript functions.
-class JSFunction: public JSObject {
- public:
- // [prototype_or_initial_map]:
- DECL_ACCESSORS(prototype_or_initial_map, Object)
-
- // [shared_function_info]: The information about the function that
- // can be shared by instances.
- DECL_ACCESSORS(shared, SharedFunctionInfo)
-
- inline SharedFunctionInfo* unchecked_shared();
-
- // [context]: The context for this function.
- inline Context* context();
- inline Object* unchecked_context();
- inline void set_context(Object* context);
-
- // [code]: The generated code object for this function. Executed
- // when the function is invoked, e.g. foo() or new foo(). See
- // [[Call]] and [[Construct]] description in ECMA-262, section
- // 8.6.2, page 27.
- inline Code* code();
- inline void set_code(Code* code);
- inline void ReplaceCode(Code* code);
-
- inline Code* unchecked_code();
-
- // Tells whether this function is builtin.
- inline bool IsBuiltin();
-
- // Tells whether or not the function needs arguments adaption.
- inline bool NeedsArgumentsAdaption();
-
- // Tells whether or not this function has been optimized.
- inline bool IsOptimized();
-
- // Mark this function for lazy recompilation. The function will be
- // recompiled the next time it is executed.
- void MarkForLazyRecompilation();
-
- // Tells whether or not the function is already marked for lazy
- // recompilation.
- inline bool IsMarkedForLazyRecompilation();
-
- // Compute a hash code for the source code of this function.
- uint32_t SourceHash();
-
- // Check whether or not this function is inlineable.
- bool IsInlineable();
-
- // [literals]: Fixed array holding the materialized literals.
- //
- // If the function contains object, regexp or array literals, the
- // literals array prefix contains the object, regexp, and array
- // function to be used when creating these literals. This is
- // necessary so that we do not dynamically lookup the object, regexp
- // or array functions. Performing a dynamic lookup, we might end up
- // using the functions from a new context that we should not have
- // access to.
- DECL_ACCESSORS(literals, FixedArray)
-
- // The initial map for an object created by this constructor.
- inline Map* initial_map();
- inline void set_initial_map(Map* value);
- inline bool has_initial_map();
-
- // Get and set the prototype property on a JSFunction. If the
- // function has an initial map the prototype is set on the initial
- // map. Otherwise, the prototype is put in the initial map field
- // until an initial map is needed.
- inline bool has_prototype();
- inline bool has_instance_prototype();
- inline Object* prototype();
- inline Object* instance_prototype();
- Object* SetInstancePrototype(Object* value);
- MUST_USE_RESULT MaybeObject* SetPrototype(Object* value);
-
- // After prototype is removed, it will not be created when accessed, and
- // [[Construct]] from this function will not be allowed.
- Object* RemovePrototype();
- inline bool should_have_prototype();
-
- // Accessor for this function's initial map's [[class]]
- // property. This is primarily used by ECMA native functions. This
- // method sets the class_name field of this function's initial map
- // to a given value. It creates an initial map if this function does
- // not have one. Note that this method does not copy the initial map
- // if it has one already, but simply replaces it with the new value.
- // Instances created afterwards will have a map whose [[class]] is
- // set to 'value', but there is no guarantees on instances created
- // before.
- Object* SetInstanceClassName(String* name);
-
- // Returns if this function has been compiled to native code yet.
- inline bool is_compiled();
-
- // [next_function_link]: Field for linking functions. This list is treated as
- // a weak list by the GC.
- DECL_ACCESSORS(next_function_link, Object)
-
- // Prints the name of the function using PrintF.
- inline void PrintName() {
- PrintName(stdout);
- }
- void PrintName(FILE* out);
-
- // Casting.
- static inline JSFunction* cast(Object* obj);
-
- // Iterates the objects, including code objects indirectly referenced
- // through pointers to the first instruction in the code object.
- void JSFunctionIterateBody(int object_size, ObjectVisitor* v);
-
- // Dispatched behavior.
-#ifdef OBJECT_PRINT
- inline void JSFunctionPrint() {
- JSFunctionPrint(stdout);
- }
- void JSFunctionPrint(FILE* out);
-#endif
-#ifdef DEBUG
- void JSFunctionVerify();
-#endif
-
- // Returns the number of allocated literals.
- inline int NumberOfLiterals();
-
- // Retrieve the global context from a function's literal array.
- static Context* GlobalContextFromLiterals(FixedArray* literals);
-
- // Layout descriptors. The last property (from kNonWeakFieldsEndOffset to
- // kSize) is weak and has special handling during garbage collection.
- static const int kCodeEntryOffset = JSObject::kHeaderSize;
- static const int kPrototypeOrInitialMapOffset =
- kCodeEntryOffset + kPointerSize;
- static const int kSharedFunctionInfoOffset =
- kPrototypeOrInitialMapOffset + kPointerSize;
- static const int kContextOffset = kSharedFunctionInfoOffset + kPointerSize;
- static const int kLiteralsOffset = kContextOffset + kPointerSize;
- static const int kNonWeakFieldsEndOffset = kLiteralsOffset + kPointerSize;
- static const int kNextFunctionLinkOffset = kNonWeakFieldsEndOffset;
- static const int kSize = kNextFunctionLinkOffset + kPointerSize;
-
- // Layout of the literals array.
- static const int kLiteralsPrefixSize = 1;
- static const int kLiteralGlobalContextIndex = 0;
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSFunction);
-};
-
-
-// JSGlobalProxy's prototype must be a JSGlobalObject or null,
-// and the prototype is hidden. JSGlobalProxy always delegates
-// property accesses to its prototype if the prototype is not null.
-//
-// A JSGlobalProxy can be reinitialized which will preserve its identity.
-//
-// Accessing a JSGlobalProxy requires security check.
-
-class JSGlobalProxy : public JSObject {
- public:
- // [context]: the owner global context of this proxy object.
- // It is null value if this object is not used by any context.
- DECL_ACCESSORS(context, Object)
-
- // Casting.
- static inline JSGlobalProxy* cast(Object* obj);
-
- // Dispatched behavior.
-#ifdef OBJECT_PRINT
- inline void JSGlobalProxyPrint() {
- JSGlobalProxyPrint(stdout);
- }
- void JSGlobalProxyPrint(FILE* out);
-#endif
-#ifdef DEBUG
- void JSGlobalProxyVerify();
-#endif
-
- // Layout description.
- static const int kContextOffset = JSObject::kHeaderSize;
- static const int kSize = kContextOffset + kPointerSize;
-
- private:
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSGlobalProxy);
-};
-
-
-// Forward declaration.
-class JSBuiltinsObject;
-class JSGlobalPropertyCell;
-
-// Common super class for JavaScript global objects and the special
-// builtins global objects.
-class GlobalObject: public JSObject {
- public:
- // [builtins]: the object holding the runtime routines written in JS.
- DECL_ACCESSORS(builtins, JSBuiltinsObject)
-
- // [global context]: the global context corresponding to this global object.
- DECL_ACCESSORS(global_context, Context)
-
- // [global receiver]: the global receiver object of the context
- DECL_ACCESSORS(global_receiver, JSObject)
-
- // Retrieve the property cell used to store a property.
- JSGlobalPropertyCell* GetPropertyCell(LookupResult* result);
-
- // This is like GetProperty, but is used when you know the lookup won't fail
- // by throwing an exception. This is for the debug and builtins global
- // objects, where it is known which properties can be expected to be present
- // on the object.
- Object* GetPropertyNoExceptionThrown(String* key) {
- Object* answer = GetProperty(key)->ToObjectUnchecked();
- return answer;
- }
-
- // Ensure that the global object has a cell for the given property name.
- MUST_USE_RESULT MaybeObject* EnsurePropertyCell(String* name);
-
- // Casting.
- static inline GlobalObject* cast(Object* obj);
-
- // Layout description.
- static const int kBuiltinsOffset = JSObject::kHeaderSize;
- static const int kGlobalContextOffset = kBuiltinsOffset + kPointerSize;
- static const int kGlobalReceiverOffset = kGlobalContextOffset + kPointerSize;
- static const int kHeaderSize = kGlobalReceiverOffset + kPointerSize;
-
- private:
- friend class AGCCVersionRequiresThisClassToHaveAFriendSoHereItIs;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(GlobalObject);
-};
-
-
-// JavaScript global object.
-class JSGlobalObject: public GlobalObject {
- public:
-
- // Casting.
- static inline JSGlobalObject* cast(Object* obj);
-
- // Dispatched behavior.
-#ifdef OBJECT_PRINT
- inline void JSGlobalObjectPrint() {
- JSGlobalObjectPrint(stdout);
- }
- void JSGlobalObjectPrint(FILE* out);
-#endif
-#ifdef DEBUG
- void JSGlobalObjectVerify();
-#endif
-
- // Layout description.
- static const int kSize = GlobalObject::kHeaderSize;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSGlobalObject);
-};
-
-
-// Builtins global object which holds the runtime routines written in
-// JavaScript.
-class JSBuiltinsObject: public GlobalObject {
- public:
- // Accessors for the runtime routines written in JavaScript.
- inline Object* javascript_builtin(Builtins::JavaScript id);
- inline void set_javascript_builtin(Builtins::JavaScript id, Object* value);
-
- // Accessors for code of the runtime routines written in JavaScript.
- inline Code* javascript_builtin_code(Builtins::JavaScript id);
- inline void set_javascript_builtin_code(Builtins::JavaScript id, Code* value);
-
- // Casting.
- static inline JSBuiltinsObject* cast(Object* obj);
-
- // Dispatched behavior.
-#ifdef OBJECT_PRINT
- inline void JSBuiltinsObjectPrint() {
- JSBuiltinsObjectPrint(stdout);
- }
- void JSBuiltinsObjectPrint(FILE* out);
-#endif
-#ifdef DEBUG
- void JSBuiltinsObjectVerify();
-#endif
-
- // Layout description. The size of the builtins object includes
- // room for two pointers per runtime routine written in javascript
- // (function and code object).
- static const int kJSBuiltinsCount = Builtins::id_count;
- static const int kJSBuiltinsOffset = GlobalObject::kHeaderSize;
- static const int kJSBuiltinsCodeOffset =
- GlobalObject::kHeaderSize + (kJSBuiltinsCount * kPointerSize);
- static const int kSize =
- kJSBuiltinsCodeOffset + (kJSBuiltinsCount * kPointerSize);
-
- static int OffsetOfFunctionWithId(Builtins::JavaScript id) {
- return kJSBuiltinsOffset + id * kPointerSize;
- }
-
- static int OffsetOfCodeWithId(Builtins::JavaScript id) {
- return kJSBuiltinsCodeOffset + id * kPointerSize;
- }
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSBuiltinsObject);
-};
-
-
-// Representation for JS Wrapper objects, String, Number, Boolean, Date, etc.
-class JSValue: public JSObject {
- public:
- // [value]: the object being wrapped.
- DECL_ACCESSORS(value, Object)
-
- // Casting.
- static inline JSValue* cast(Object* obj);
-
- // Dispatched behavior.
-#ifdef OBJECT_PRINT
- inline void JSValuePrint() {
- JSValuePrint(stdout);
- }
- void JSValuePrint(FILE* out);
-#endif
-#ifdef DEBUG
- void JSValueVerify();
-#endif
-
- // Layout description.
- static const int kValueOffset = JSObject::kHeaderSize;
- static const int kSize = kValueOffset + kPointerSize;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSValue);
-};
-
-
-// Representation of message objects used for error reporting through
-// the API. The messages are formatted in JavaScript so this object is
-// a real JavaScript object. The information used for formatting the
-// error messages are not directly accessible from JavaScript to
-// prevent leaking information to user code called during error
-// formatting.
-class JSMessageObject: public JSObject {
- public:
- // [type]: the type of error message.
- DECL_ACCESSORS(type, String)
-
- // [arguments]: the arguments for formatting the error message.
- DECL_ACCESSORS(arguments, JSArray)
-
- // [script]: the script from which the error message originated.
- DECL_ACCESSORS(script, Object)
-
- // [stack_trace]: the stack trace for this error message.
- DECL_ACCESSORS(stack_trace, Object)
-
- // [stack_frames]: an array of stack frames for this error object.
- DECL_ACCESSORS(stack_frames, Object)
-
- // [start_position]: the start position in the script for the error message.
- inline int start_position();
- inline void set_start_position(int value);
-
- // [end_position]: the end position in the script for the error message.
- inline int end_position();
- inline void set_end_position(int value);
-
- // Casting.
- static inline JSMessageObject* cast(Object* obj);
-
- // Dispatched behavior.
-#ifdef OBJECT_PRINT
- inline void JSMessageObjectPrint() {
- JSMessageObjectPrint(stdout);
- }
- void JSMessageObjectPrint(FILE* out);
-#endif
-#ifdef DEBUG
- void JSMessageObjectVerify();
-#endif
-
- // Layout description.
- static const int kTypeOffset = JSObject::kHeaderSize;
- static const int kArgumentsOffset = kTypeOffset + kPointerSize;
- static const int kScriptOffset = kArgumentsOffset + kPointerSize;
- static const int kStackTraceOffset = kScriptOffset + kPointerSize;
- static const int kStackFramesOffset = kStackTraceOffset + kPointerSize;
- static const int kStartPositionOffset = kStackFramesOffset + kPointerSize;
- static const int kEndPositionOffset = kStartPositionOffset + kPointerSize;
- static const int kSize = kEndPositionOffset + kPointerSize;
-
- typedef FixedBodyDescriptor<HeapObject::kMapOffset,
- kStackFramesOffset + kPointerSize,
- kSize> BodyDescriptor;
-};
-
-
-// Regular expressions
-// The regular expression holds a single reference to a FixedArray in
-// the kDataOffset field.
-// The FixedArray contains the following data:
-// - tag : type of regexp implementation (not compiled yet, atom or irregexp)
-// - reference to the original source string
-// - reference to the original flag string
-// If it is an atom regexp
-// - a reference to a literal string to search for
-// If it is an irregexp regexp:
-// - a reference to code for ASCII inputs (bytecode or compiled).
-// - a reference to code for UC16 inputs (bytecode or compiled).
-// - max number of registers used by irregexp implementations.
-// - number of capture registers (output values) of the regexp.
-class JSRegExp: public JSObject {
- public:
- // Meaning of Type:
- // NOT_COMPILED: Initial value. No data has been stored in the JSRegExp yet.
- // ATOM: A simple string to match against using an indexOf operation.
- // IRREGEXP: Compiled with Irregexp.
- // IRREGEXP_NATIVE: Compiled to native code with Irregexp.
- enum Type { NOT_COMPILED, ATOM, IRREGEXP };
- enum Flag { NONE = 0, GLOBAL = 1, IGNORE_CASE = 2, MULTILINE = 4 };
-
- class Flags {
- public:
- explicit Flags(uint32_t value) : value_(value) { }
- bool is_global() { return (value_ & GLOBAL) != 0; }
- bool is_ignore_case() { return (value_ & IGNORE_CASE) != 0; }
- bool is_multiline() { return (value_ & MULTILINE) != 0; }
- uint32_t value() { return value_; }
- private:
- uint32_t value_;
- };
-
- DECL_ACCESSORS(data, Object)
-
- inline Type TypeTag();
- inline int CaptureCount();
- inline Flags GetFlags();
- inline String* Pattern();
- inline Object* DataAt(int index);
- // Set implementation data after the object has been prepared.
- inline void SetDataAt(int index, Object* value);
- static int code_index(bool is_ascii) {
- if (is_ascii) {
- return kIrregexpASCIICodeIndex;
- } else {
- return kIrregexpUC16CodeIndex;
- }
- }
-
- static inline JSRegExp* cast(Object* obj);
-
- // Dispatched behavior.
-#ifdef DEBUG
- void JSRegExpVerify();
-#endif
-
- static const int kDataOffset = JSObject::kHeaderSize;
- static const int kSize = kDataOffset + kPointerSize;
-
- // Indices in the data array.
- static const int kTagIndex = 0;
- static const int kSourceIndex = kTagIndex + 1;
- static const int kFlagsIndex = kSourceIndex + 1;
- static const int kDataIndex = kFlagsIndex + 1;
- // The data fields are used in different ways depending on the
- // value of the tag.
- // Atom regexps (literal strings).
- static const int kAtomPatternIndex = kDataIndex;
-
- static const int kAtomDataSize = kAtomPatternIndex + 1;
-
- // Irregexp compiled code or bytecode for ASCII. If compilation
- // fails, this fields hold an exception object that should be
- // thrown if the regexp is used again.
- static const int kIrregexpASCIICodeIndex = kDataIndex;
- // Irregexp compiled code or bytecode for UC16. If compilation
- // fails, this fields hold an exception object that should be
- // thrown if the regexp is used again.
- static const int kIrregexpUC16CodeIndex = kDataIndex + 1;
- // Maximal number of registers used by either ASCII or UC16.
- // Only used to check that there is enough stack space
- static const int kIrregexpMaxRegisterCountIndex = kDataIndex + 2;
- // Number of captures in the compiled regexp.
- static const int kIrregexpCaptureCountIndex = kDataIndex + 3;
-
- static const int kIrregexpDataSize = kIrregexpCaptureCountIndex + 1;
-
- // Offsets directly into the data fixed array.
- static const int kDataTagOffset =
- FixedArray::kHeaderSize + kTagIndex * kPointerSize;
- static const int kDataAsciiCodeOffset =
- FixedArray::kHeaderSize + kIrregexpASCIICodeIndex * kPointerSize;
- static const int kDataUC16CodeOffset =
- FixedArray::kHeaderSize + kIrregexpUC16CodeIndex * kPointerSize;
- static const int kIrregexpCaptureCountOffset =
- FixedArray::kHeaderSize + kIrregexpCaptureCountIndex * kPointerSize;
-
- // In-object fields.
- static const int kSourceFieldIndex = 0;
- static const int kGlobalFieldIndex = 1;
- static const int kIgnoreCaseFieldIndex = 2;
- static const int kMultilineFieldIndex = 3;
- static const int kLastIndexFieldIndex = 4;
- static const int kInObjectFieldCount = 5;
-};
-
-
-class CompilationCacheShape {
- public:
- static inline bool IsMatch(HashTableKey* key, Object* value) {
- return key->IsMatch(value);
- }
-
- static inline uint32_t Hash(HashTableKey* key) {
- return key->Hash();
- }
-
- static inline uint32_t HashForObject(HashTableKey* key, Object* object) {
- return key->HashForObject(object);
- }
-
- MUST_USE_RESULT static MaybeObject* AsObject(HashTableKey* key) {
- return key->AsObject();
- }
-
- static const int kPrefixSize = 0;
- static const int kEntrySize = 2;
-};
-
-
-class CompilationCacheTable: public HashTable<CompilationCacheShape,
- HashTableKey*> {
- public:
- // Find cached value for a string key, otherwise return null.
- Object* Lookup(String* src);
- Object* LookupEval(String* src, Context* context, StrictModeFlag strict_mode);
- Object* LookupRegExp(String* source, JSRegExp::Flags flags);
- MaybeObject* Put(String* src, Object* value);
- MaybeObject* PutEval(String* src,
- Context* context,
- SharedFunctionInfo* value);
- MaybeObject* PutRegExp(String* src, JSRegExp::Flags flags, FixedArray* value);
-
- // Remove given value from cache.
- void Remove(Object* value);
-
- static inline CompilationCacheTable* cast(Object* obj);
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheTable);
-};
-
-
-class CodeCache: public Struct {
- public:
- DECL_ACCESSORS(default_cache, FixedArray)
- DECL_ACCESSORS(normal_type_cache, Object)
-
- // Add the code object to the cache.
- MUST_USE_RESULT MaybeObject* Update(String* name, Code* code);
-
- // Lookup code object in the cache. Returns code object if found and undefined
- // if not.
- Object* Lookup(String* name, Code::Flags flags);
-
- // Get the internal index of a code object in the cache. Returns -1 if the
- // code object is not in that cache. This index can be used to later call
- // RemoveByIndex. The cache cannot be modified between a call to GetIndex and
- // RemoveByIndex.
- int GetIndex(Object* name, Code* code);
-
- // Remove an object from the cache with the provided internal index.
- void RemoveByIndex(Object* name, Code* code, int index);
-
- static inline CodeCache* cast(Object* obj);
-
-#ifdef OBJECT_PRINT
- inline void CodeCachePrint() {
- CodeCachePrint(stdout);
- }
- void CodeCachePrint(FILE* out);
-#endif
-#ifdef DEBUG
- void CodeCacheVerify();
-#endif
-
- static const int kDefaultCacheOffset = HeapObject::kHeaderSize;
- static const int kNormalTypeCacheOffset =
- kDefaultCacheOffset + kPointerSize;
- static const int kSize = kNormalTypeCacheOffset + kPointerSize;
-
- private:
- MUST_USE_RESULT MaybeObject* UpdateDefaultCache(String* name, Code* code);
- MUST_USE_RESULT MaybeObject* UpdateNormalTypeCache(String* name, Code* code);
- Object* LookupDefaultCache(String* name, Code::Flags flags);
- Object* LookupNormalTypeCache(String* name, Code::Flags flags);
-
- // Code cache layout of the default cache. Elements are alternating name and
- // code objects for non normal load/store/call IC's.
- static const int kCodeCacheEntrySize = 2;
- static const int kCodeCacheEntryNameOffset = 0;
- static const int kCodeCacheEntryCodeOffset = 1;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(CodeCache);
-};
-
-
-class CodeCacheHashTableShape {
- public:
- static inline bool IsMatch(HashTableKey* key, Object* value) {
- return key->IsMatch(value);
- }
-
- static inline uint32_t Hash(HashTableKey* key) {
- return key->Hash();
- }
-
- static inline uint32_t HashForObject(HashTableKey* key, Object* object) {
- return key->HashForObject(object);
- }
-
- MUST_USE_RESULT static MaybeObject* AsObject(HashTableKey* key) {
- return key->AsObject();
- }
-
- static const int kPrefixSize = 0;
- static const int kEntrySize = 2;
-};
-
-
-class CodeCacheHashTable: public HashTable<CodeCacheHashTableShape,
- HashTableKey*> {
- public:
- Object* Lookup(String* name, Code::Flags flags);
- MUST_USE_RESULT MaybeObject* Put(String* name, Code* code);
-
- int GetIndex(String* name, Code::Flags flags);
- void RemoveByIndex(int index);
-
- static inline CodeCacheHashTable* cast(Object* obj);
-
- // Initial size of the fixed array backing the hash table.
- static const int kInitialSize = 64;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(CodeCacheHashTable);
-};
-
-
-enum AllowNullsFlag {ALLOW_NULLS, DISALLOW_NULLS};
-enum RobustnessFlag {ROBUST_STRING_TRAVERSAL, FAST_STRING_TRAVERSAL};
-
-
-class StringHasher {
- public:
- inline StringHasher(int length);
-
- // Returns true if the hash of this string can be computed without
- // looking at the contents.
- inline bool has_trivial_hash();
-
- // Add a character to the hash and update the array index calculation.
- inline void AddCharacter(uc32 c);
-
- // Adds a character to the hash but does not update the array index
- // calculation. This can only be called when it has been verified
- // that the input is not an array index.
- inline void AddCharacterNoIndex(uc32 c);
-
- // Returns the value to store in the hash field of a string with
- // the given length and contents.
- uint32_t GetHashField();
-
- // Returns true if the characters seen so far make up a legal array
- // index.
- bool is_array_index() { return is_array_index_; }
-
- bool is_valid() { return is_valid_; }
-
- void invalidate() { is_valid_ = false; }
-
- // Calculated hash value for a string consisting of 1 to
- // String::kMaxArrayIndexSize digits with no leading zeros (except "0").
- // value is represented decimal value.
- static uint32_t MakeArrayIndexHash(uint32_t value, int length);
-
- private:
-
- uint32_t array_index() {
- ASSERT(is_array_index());
- return array_index_;
- }
-
- inline uint32_t GetHash();
-
- int length_;
- uint32_t raw_running_hash_;
- uint32_t array_index_;
- bool is_array_index_;
- bool is_first_char_;
- bool is_valid_;
- friend class TwoCharHashTableKey;
-};
-
-
-// Calculates string hash.
-template <typename schar>
-inline uint32_t HashSequentialString(const schar* chars, int length);
-
-
-// The characteristics of a string are stored in its map. Retrieving these
-// few bits of information is moderately expensive, involving two memory
-// loads where the second is dependent on the first. To improve efficiency
-// the shape of the string is given its own class so that it can be retrieved
-// once and used for several string operations. A StringShape is small enough
-// to be passed by value and is immutable, but be aware that flattening a
-// string can potentially alter its shape. Also be aware that a GC caused by
-// something else can alter the shape of a string due to ConsString
-// shortcutting. Keeping these restrictions in mind has proven to be error-
-// prone and so we no longer put StringShapes in variables unless there is a
-// concrete performance benefit at that particular point in the code.
-class StringShape BASE_EMBEDDED {
- public:
- inline explicit StringShape(String* s);
- inline explicit StringShape(Map* s);
- inline explicit StringShape(InstanceType t);
- inline bool IsSequential();
- inline bool IsExternal();
- inline bool IsCons();
- inline bool IsExternalAscii();
- inline bool IsExternalTwoByte();
- inline bool IsSequentialAscii();
- inline bool IsSequentialTwoByte();
- inline bool IsSymbol();
- inline StringRepresentationTag representation_tag();
- inline uint32_t full_representation_tag();
- inline uint32_t size_tag();
-#ifdef DEBUG
- inline uint32_t type() { return type_; }
- inline void invalidate() { valid_ = false; }
- inline bool valid() { return valid_; }
-#else
- inline void invalidate() { }
-#endif
- private:
- uint32_t type_;
-#ifdef DEBUG
- inline void set_valid() { valid_ = true; }
- bool valid_;
-#else
- inline void set_valid() { }
-#endif
-};
-
-
-// The String abstract class captures JavaScript string values:
-//
-// Ecma-262:
-// 4.3.16 String Value
-// A string value is a member of the type String and is a finite
-// ordered sequence of zero or more 16-bit unsigned integer values.
-//
-// All string values have a length field.
-class String: public HeapObject {
- public:
- // Get and set the length of the string.
- inline int length();
- inline void set_length(int value);
-
- // Get and set the hash field of the string.
- inline uint32_t hash_field();
- inline void set_hash_field(uint32_t value);
-
- inline bool IsAsciiRepresentation();
- inline bool IsTwoByteRepresentation();
-
- // Returns whether this string has ascii chars, i.e. all of them can
- // be ascii encoded. This might be the case even if the string is
- // two-byte. Such strings may appear when the embedder prefers
- // two-byte external representations even for ascii data.
- //
- // NOTE: this should be considered only a hint. False negatives are
- // possible.
- inline bool HasOnlyAsciiChars();
-
- // Get and set individual two byte chars in the string.
- inline void Set(int index, uint16_t value);
- // Get individual two byte char in the string. Repeated calls
- // to this method are not efficient unless the string is flat.
- inline uint16_t Get(int index);
-
- // Try to flatten the string. Checks first inline to see if it is
- // necessary. Does nothing if the string is not a cons string.
- // Flattening allocates a sequential string with the same data as
- // the given string and mutates the cons string to a degenerate
- // form, where the first component is the new sequential string and
- // the second component is the empty string. If allocation fails,
- // this function returns a failure. If flattening succeeds, this
- // function returns the sequential string that is now the first
- // component of the cons string.
- //
- // Degenerate cons strings are handled specially by the garbage
- // collector (see IsShortcutCandidate).
- //
- // Use FlattenString from Handles.cc to flatten even in case an
- // allocation failure happens.
- inline MaybeObject* TryFlatten(PretenureFlag pretenure = NOT_TENURED);
-
- // Convenience function. Has exactly the same behavior as
- // TryFlatten(), except in the case of failure returns the original
- // string.
- inline String* TryFlattenGetString(PretenureFlag pretenure = NOT_TENURED);
-
- Vector<const char> ToAsciiVector();
- Vector<const uc16> ToUC16Vector();
-
- // Mark the string as an undetectable object. It only applies to
- // ascii and two byte string types.
- bool MarkAsUndetectable();
-
- // Return a substring.
- MUST_USE_RESULT MaybeObject* SubString(int from,
- int to,
- PretenureFlag pretenure = NOT_TENURED);
-
- // String equality operations.
- inline bool Equals(String* other);
- bool IsEqualTo(Vector<const char> str);
- bool IsAsciiEqualTo(Vector<const char> str);
- bool IsTwoByteEqualTo(Vector<const uc16> str);
-
- // Return a UTF8 representation of the string. The string is null
- // terminated but may optionally contain nulls. Length is returned
- // in length_output if length_output is not a null pointer The string
- // should be nearly flat, otherwise the performance of this method may
- // be very slow (quadratic in the length). Setting robustness_flag to
- // ROBUST_STRING_TRAVERSAL invokes behaviour that is robust This means it
- // handles unexpected data without causing assert failures and it does not
- // do any heap allocations. This is useful when printing stack traces.
- SmartPointer<char> ToCString(AllowNullsFlag allow_nulls,
- RobustnessFlag robustness_flag,
- int offset,
- int length,
- int* length_output = 0);
- SmartPointer<char> ToCString(
- AllowNullsFlag allow_nulls = DISALLOW_NULLS,
- RobustnessFlag robustness_flag = FAST_STRING_TRAVERSAL,
- int* length_output = 0);
-
- int Utf8Length();
-
- // Return a 16 bit Unicode representation of the string.
- // The string should be nearly flat, otherwise the performance of
- // of this method may be very bad. Setting robustness_flag to
- // ROBUST_STRING_TRAVERSAL invokes behaviour that is robust This means it
- // handles unexpected data without causing assert failures and it does not
- // do any heap allocations. This is useful when printing stack traces.
- SmartPointer<uc16> ToWideCString(
- RobustnessFlag robustness_flag = FAST_STRING_TRAVERSAL);
-
- // Tells whether the hash code has been computed.
- inline bool HasHashCode();
-
- // Returns a hash value used for the property table
- inline uint32_t Hash();
-
- static uint32_t ComputeHashField(unibrow::CharacterStream* buffer,
- int length);
-
- static bool ComputeArrayIndex(unibrow::CharacterStream* buffer,
- uint32_t* index,
- int length);
-
- // Externalization.
- bool MakeExternal(v8::String::ExternalStringResource* resource);
- bool MakeExternal(v8::String::ExternalAsciiStringResource* resource);
-
- // Conversion.
- inline bool AsArrayIndex(uint32_t* index);
-
- // Casting.
- static inline String* cast(Object* obj);
-
- void PrintOn(FILE* out);
-
- // For use during stack traces. Performs rudimentary sanity check.
- bool LooksValid();
-
- // Dispatched behavior.
- void StringShortPrint(StringStream* accumulator);
-#ifdef OBJECT_PRINT
- inline void StringPrint() {
- StringPrint(stdout);
- }
- void StringPrint(FILE* out);
-#endif
-#ifdef DEBUG
- void StringVerify();
-#endif
- inline bool IsFlat();
-
- // Layout description.
- static const int kLengthOffset = HeapObject::kHeaderSize;
- static const int kHashFieldOffset = kLengthOffset + kPointerSize;
- static const int kSize = kHashFieldOffset + kPointerSize;
-
- // Maximum number of characters to consider when trying to convert a string
- // value into an array index.
- static const int kMaxArrayIndexSize = 10;
-
- // Max ascii char code.
- static const int kMaxAsciiCharCode = unibrow::Utf8::kMaxOneByteChar;
- static const unsigned kMaxAsciiCharCodeU = unibrow::Utf8::kMaxOneByteChar;
- static const int kMaxUC16CharCode = 0xffff;
-
- // Minimum length for a cons string.
- static const int kMinNonFlatLength = 13;
-
- // Mask constant for checking if a string has a computed hash code
- // and if it is an array index. The least significant bit indicates
- // whether a hash code has been computed. If the hash code has been
- // computed the 2nd bit tells whether the string can be used as an
- // array index.
- static const int kHashNotComputedMask = 1;
- static const int kIsNotArrayIndexMask = 1 << 1;
- static const int kNofHashBitFields = 2;
-
- // Shift constant retrieving hash code from hash field.
- static const int kHashShift = kNofHashBitFields;
-
- // Array index strings this short can keep their index in the hash
- // field.
- static const int kMaxCachedArrayIndexLength = 7;
-
- // For strings which are array indexes the hash value has the string length
- // mixed into the hash, mainly to avoid a hash value of zero which would be
- // the case for the string '0'. 24 bits are used for the array index value.
- static const int kArrayIndexValueBits = 24;
- static const int kArrayIndexLengthBits =
- kBitsPerInt - kArrayIndexValueBits - kNofHashBitFields;
-
- STATIC_CHECK((kArrayIndexLengthBits > 0));
- STATIC_CHECK(kMaxArrayIndexSize < (1 << kArrayIndexLengthBits));
-
- static const int kArrayIndexHashLengthShift =
- kArrayIndexValueBits + kNofHashBitFields;
-
- static const int kArrayIndexHashMask = (1 << kArrayIndexHashLengthShift) - 1;
-
- static const int kArrayIndexValueMask =
- ((1 << kArrayIndexValueBits) - 1) << kHashShift;
-
- // Check that kMaxCachedArrayIndexLength + 1 is a power of two so we
- // could use a mask to test if the length of string is less than or equal to
- // kMaxCachedArrayIndexLength.
- STATIC_CHECK(IS_POWER_OF_TWO(kMaxCachedArrayIndexLength + 1));
-
- static const int kContainsCachedArrayIndexMask =
- (~kMaxCachedArrayIndexLength << kArrayIndexHashLengthShift) |
- kIsNotArrayIndexMask;
-
- // Value of empty hash field indicating that the hash is not computed.
- static const int kEmptyHashField =
- kIsNotArrayIndexMask | kHashNotComputedMask;
-
- // Value of hash field containing computed hash equal to zero.
- static const int kZeroHash = kIsNotArrayIndexMask;
-
- // Maximal string length.
- static const int kMaxLength = (1 << (32 - 2)) - 1;
-
- // Max length for computing hash. For strings longer than this limit the
- // string length is used as the hash value.
- static const int kMaxHashCalcLength = 16383;
-
- // Limit for truncation in short printing.
- static const int kMaxShortPrintLength = 1024;
-
- // Support for regular expressions.
- const uc16* GetTwoByteData();
- const uc16* GetTwoByteData(unsigned start);
-
- // Support for StringInputBuffer
- static const unibrow::byte* ReadBlock(String* input,
- unibrow::byte* util_buffer,
- unsigned capacity,
- unsigned* remaining,
- unsigned* offset);
- static const unibrow::byte* ReadBlock(String** input,
- unibrow::byte* util_buffer,
- unsigned capacity,
- unsigned* remaining,
- unsigned* offset);
-
- // Helper function for flattening strings.
- template <typename sinkchar>
- static void WriteToFlat(String* source,
- sinkchar* sink,
- int from,
- int to);
-
- static inline bool IsAscii(const char* chars, int length) {
- const char* limit = chars + length;
-#ifdef V8_HOST_CAN_READ_UNALIGNED
- ASSERT(kMaxAsciiCharCode == 0x7F);
- const uintptr_t non_ascii_mask = kUintptrAllBitsSet / 0xFF * 0x80;
- while (chars <= limit - sizeof(uintptr_t)) {
- if (*reinterpret_cast<const uintptr_t*>(chars) & non_ascii_mask) {
- return false;
- }
- chars += sizeof(uintptr_t);
- }
-#endif
- while (chars < limit) {
- if (static_cast<uint8_t>(*chars) > kMaxAsciiCharCodeU) return false;
- ++chars;
- }
- return true;
- }
-
- static inline bool IsAscii(const uc16* chars, int length) {
- const uc16* limit = chars + length;
- while (chars < limit) {
- if (*chars > kMaxAsciiCharCodeU) return false;
- ++chars;
- }
- return true;
- }
-
- protected:
- class ReadBlockBuffer {
- public:
- ReadBlockBuffer(unibrow::byte* util_buffer_,
- unsigned cursor_,
- unsigned capacity_,
- unsigned remaining_) :
- util_buffer(util_buffer_),
- cursor(cursor_),
- capacity(capacity_),
- remaining(remaining_) {
- }
- unibrow::byte* util_buffer;
- unsigned cursor;
- unsigned capacity;
- unsigned remaining;
- };
-
- static inline const unibrow::byte* ReadBlock(String* input,
- ReadBlockBuffer* buffer,
- unsigned* offset,
- unsigned max_chars);
- static void ReadBlockIntoBuffer(String* input,
- ReadBlockBuffer* buffer,
- unsigned* offset_ptr,
- unsigned max_chars);
-
- private:
- // Try to flatten the top level ConsString that is hiding behind this
- // string. This is a no-op unless the string is a ConsString. Flatten
- // mutates the ConsString and might return a failure.
- MUST_USE_RESULT MaybeObject* SlowTryFlatten(PretenureFlag pretenure);
-
- static inline bool IsHashFieldComputed(uint32_t field);
-
- // Slow case of String::Equals. This implementation works on any strings
- // but it is most efficient on strings that are almost flat.
- bool SlowEquals(String* other);
-
- // Slow case of AsArrayIndex.
- bool SlowAsArrayIndex(uint32_t* index);
-
- // Compute and set the hash code.
- uint32_t ComputeAndSetHash();
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(String);
-};
-
-
-// The SeqString abstract class captures sequential string values.
-class SeqString: public String {
- public:
-
- // Casting.
- static inline SeqString* cast(Object* obj);
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(SeqString);
-};
-
-
-// The AsciiString class captures sequential ascii string objects.
-// Each character in the AsciiString is an ascii character.
-class SeqAsciiString: public SeqString {
- public:
- static const bool kHasAsciiEncoding = true;
-
- // Dispatched behavior.
- inline uint16_t SeqAsciiStringGet(int index);
- inline void SeqAsciiStringSet(int index, uint16_t value);
-
- // Get the address of the characters in this string.
- inline Address GetCharsAddress();
-
- inline char* GetChars();
-
- // Casting
- static inline SeqAsciiString* cast(Object* obj);
-
- // Garbage collection support. This method is called by the
- // garbage collector to compute the actual size of an AsciiString
- // instance.
- inline int SeqAsciiStringSize(InstanceType instance_type);
-
- // Computes the size for an AsciiString instance of a given length.
- static int SizeFor(int length) {
- return OBJECT_POINTER_ALIGN(kHeaderSize + length * kCharSize);
- }
-
- // Layout description.
- static const int kHeaderSize = String::kSize;
- static const int kAlignedSize = POINTER_SIZE_ALIGN(kHeaderSize);
-
- // Maximal memory usage for a single sequential ASCII string.
- static const int kMaxSize = 512 * MB;
- // Maximal length of a single sequential ASCII string.
- // Q.v. String::kMaxLength which is the maximal size of concatenated strings.
- static const int kMaxLength = (kMaxSize - kHeaderSize);
-
- // Support for StringInputBuffer.
- inline void SeqAsciiStringReadBlockIntoBuffer(ReadBlockBuffer* buffer,
- unsigned* offset,
- unsigned chars);
- inline const unibrow::byte* SeqAsciiStringReadBlock(unsigned* remaining,
- unsigned* offset,
- unsigned chars);
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(SeqAsciiString);
-};
-
-
-// The TwoByteString class captures sequential unicode string objects.
-// Each character in the TwoByteString is a two-byte uint16_t.
-class SeqTwoByteString: public SeqString {
- public:
- static const bool kHasAsciiEncoding = false;
-
- // Dispatched behavior.
- inline uint16_t SeqTwoByteStringGet(int index);
- inline void SeqTwoByteStringSet(int index, uint16_t value);
-
- // Get the address of the characters in this string.
- inline Address GetCharsAddress();
-
- inline uc16* GetChars();
-
- // For regexp code.
- const uint16_t* SeqTwoByteStringGetData(unsigned start);
-
- // Casting
- static inline SeqTwoByteString* cast(Object* obj);
-
- // Garbage collection support. This method is called by the
- // garbage collector to compute the actual size of a TwoByteString
- // instance.
- inline int SeqTwoByteStringSize(InstanceType instance_type);
-
- // Computes the size for a TwoByteString instance of a given length.
- static int SizeFor(int length) {
- return OBJECT_POINTER_ALIGN(kHeaderSize + length * kShortSize);
- }
-
- // Layout description.
- static const int kHeaderSize = String::kSize;
- static const int kAlignedSize = POINTER_SIZE_ALIGN(kHeaderSize);
-
- // Maximal memory usage for a single sequential two-byte string.
- static const int kMaxSize = 512 * MB;
- // Maximal length of a single sequential two-byte string.
- // Q.v. String::kMaxLength which is the maximal size of concatenated strings.
- static const int kMaxLength = (kMaxSize - kHeaderSize) / sizeof(uint16_t);
-
- // Support for StringInputBuffer.
- inline void SeqTwoByteStringReadBlockIntoBuffer(ReadBlockBuffer* buffer,
- unsigned* offset_ptr,
- unsigned chars);
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(SeqTwoByteString);
-};
-
-
-// The ConsString class describes string values built by using the
-// addition operator on strings. A ConsString is a pair where the
-// first and second components are pointers to other string values.
-// One or both components of a ConsString can be pointers to other
-// ConsStrings, creating a binary tree of ConsStrings where the leaves
-// are non-ConsString string values. The string value represented by
-// a ConsString can be obtained by concatenating the leaf string
-// values in a left-to-right depth-first traversal of the tree.
-class ConsString: public String {
- public:
- // First string of the cons cell.
- inline String* first();
- // Doesn't check that the result is a string, even in debug mode. This is
- // useful during GC where the mark bits confuse the checks.
- inline Object* unchecked_first();
- inline void set_first(String* first,
- WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
-
- // Second string of the cons cell.
- inline String* second();
- // Doesn't check that the result is a string, even in debug mode. This is
- // useful during GC where the mark bits confuse the checks.
- inline Object* unchecked_second();
- inline void set_second(String* second,
- WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
-
- // Dispatched behavior.
- uint16_t ConsStringGet(int index);
-
- // Casting.
- static inline ConsString* cast(Object* obj);
-
- // Layout description.
- static const int kFirstOffset = POINTER_SIZE_ALIGN(String::kSize);
- static const int kSecondOffset = kFirstOffset + kPointerSize;
- static const int kSize = kSecondOffset + kPointerSize;
-
- // Support for StringInputBuffer.
- inline const unibrow::byte* ConsStringReadBlock(ReadBlockBuffer* buffer,
- unsigned* offset_ptr,
- unsigned chars);
- inline void ConsStringReadBlockIntoBuffer(ReadBlockBuffer* buffer,
- unsigned* offset_ptr,
- unsigned chars);
-
- // Minimum length for a cons string.
- static const int kMinLength = 13;
-
- typedef FixedBodyDescriptor<kFirstOffset, kSecondOffset + kPointerSize, kSize>
- BodyDescriptor;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(ConsString);
-};
-
-
-// The ExternalString class describes string values that are backed by
-// a string resource that lies outside the V8 heap. ExternalStrings
-// consist of the length field common to all strings, a pointer to the
-// external resource. It is important to ensure (externally) that the
-// resource is not deallocated while the ExternalString is live in the
-// V8 heap.
-//
-// The API expects that all ExternalStrings are created through the
-// API. Therefore, ExternalStrings should not be used internally.
-class ExternalString: public String {
- public:
- // Casting
- static inline ExternalString* cast(Object* obj);
-
- // Layout description.
- static const int kResourceOffset = POINTER_SIZE_ALIGN(String::kSize);
- static const int kSize = kResourceOffset + kPointerSize;
-
- STATIC_CHECK(kResourceOffset == Internals::kStringResourceOffset);
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalString);
-};
-
-
-// The ExternalAsciiString class is an external string backed by an
-// ASCII string.
-class ExternalAsciiString: public ExternalString {
- public:
- static const bool kHasAsciiEncoding = true;
-
- typedef v8::String::ExternalAsciiStringResource Resource;
-
- // The underlying resource.
- inline Resource* resource();
- inline void set_resource(Resource* buffer);
-
- // Dispatched behavior.
- uint16_t ExternalAsciiStringGet(int index);
-
- // Casting.
- static inline ExternalAsciiString* cast(Object* obj);
-
- // Garbage collection support.
- inline void ExternalAsciiStringIterateBody(ObjectVisitor* v);
-
- template<typename StaticVisitor>
- inline void ExternalAsciiStringIterateBody();
-
- // Support for StringInputBuffer.
- const unibrow::byte* ExternalAsciiStringReadBlock(unsigned* remaining,
- unsigned* offset,
- unsigned chars);
- inline void ExternalAsciiStringReadBlockIntoBuffer(ReadBlockBuffer* buffer,
- unsigned* offset,
- unsigned chars);
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalAsciiString);
-};
-
-
-// The ExternalTwoByteString class is an external string backed by a UTF-16
-// encoded string.
-class ExternalTwoByteString: public ExternalString {
- public:
- static const bool kHasAsciiEncoding = false;
-
- typedef v8::String::ExternalStringResource Resource;
-
- // The underlying string resource.
- inline Resource* resource();
- inline void set_resource(Resource* buffer);
-
- // Dispatched behavior.
- uint16_t ExternalTwoByteStringGet(int index);
-
- // For regexp code.
- const uint16_t* ExternalTwoByteStringGetData(unsigned start);
-
- // Casting.
- static inline ExternalTwoByteString* cast(Object* obj);
-
- // Garbage collection support.
- inline void ExternalTwoByteStringIterateBody(ObjectVisitor* v);
-
- template<typename StaticVisitor>
- inline void ExternalTwoByteStringIterateBody();
-
-
- // Support for StringInputBuffer.
- void ExternalTwoByteStringReadBlockIntoBuffer(ReadBlockBuffer* buffer,
- unsigned* offset_ptr,
- unsigned chars);
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalTwoByteString);
-};
-
-
-// Utility superclass for stack-allocated objects that must be updated
-// on gc. It provides two ways for the gc to update instances, either
-// iterating or updating after gc.
-class Relocatable BASE_EMBEDDED {
- public:
- explicit inline Relocatable(Isolate* isolate);
- inline virtual ~Relocatable();
- virtual void IterateInstance(ObjectVisitor* v) { }
- virtual void PostGarbageCollection() { }
-
- static void PostGarbageCollectionProcessing();
- static int ArchiveSpacePerThread();
- static char* ArchiveState(char* to);
- static char* RestoreState(char* from);
- static void Iterate(ObjectVisitor* v);
- static void Iterate(ObjectVisitor* v, Relocatable* top);
- static char* Iterate(ObjectVisitor* v, char* t);
- private:
- Isolate* isolate_;
- Relocatable* prev_;
-};
-
-
-// A flat string reader provides random access to the contents of a
-// string independent of the character width of the string. The handle
-// must be valid as long as the reader is being used.
-class FlatStringReader : public Relocatable {
- public:
- FlatStringReader(Isolate* isolate, Handle<String> str);
- FlatStringReader(Isolate* isolate, Vector<const char> input);
- void PostGarbageCollection();
- inline uc32 Get(int index);
- int length() { return length_; }
- private:
- String** str_;
- bool is_ascii_;
- int length_;
- const void* start_;
-};
-
-
-// Note that StringInputBuffers are not valid across a GC! To fix this
-// it would have to store a String Handle instead of a String* and
-// AsciiStringReadBlock would have to be modified to use memcpy.
-//
-// StringInputBuffer is able to traverse any string regardless of how
-// deeply nested a sequence of ConsStrings it is made of. However,
-// performance will be better if deep strings are flattened before they
-// are traversed. Since flattening requires memory allocation this is
-// not always desirable, however (esp. in debugging situations).
-class StringInputBuffer: public unibrow::InputBuffer<String, String*, 1024> {
- public:
- virtual void Seek(unsigned pos);
- inline StringInputBuffer(): unibrow::InputBuffer<String, String*, 1024>() {}
- inline StringInputBuffer(String* backing):
- unibrow::InputBuffer<String, String*, 1024>(backing) {}
-};
-
-
-class SafeStringInputBuffer
- : public unibrow::InputBuffer<String, String**, 256> {
- public:
- virtual void Seek(unsigned pos);
- inline SafeStringInputBuffer()
- : unibrow::InputBuffer<String, String**, 256>() {}
- inline SafeStringInputBuffer(String** backing)
- : unibrow::InputBuffer<String, String**, 256>(backing) {}
-};
-
-
-template <typename T>
-class VectorIterator {
- public:
- VectorIterator(T* d, int l) : data_(Vector<const T>(d, l)), index_(0) { }
- explicit VectorIterator(Vector<const T> data) : data_(data), index_(0) { }
- T GetNext() { return data_[index_++]; }
- bool has_more() { return index_ < data_.length(); }
- private:
- Vector<const T> data_;
- int index_;
-};
-
-
-// The Oddball describes objects null, undefined, true, and false.
-class Oddball: public HeapObject {
- public:
- // [to_string]: Cached to_string computed at startup.
- DECL_ACCESSORS(to_string, String)
-
- // [to_number]: Cached to_number computed at startup.
- DECL_ACCESSORS(to_number, Object)
-
- inline byte kind();
- inline void set_kind(byte kind);
-
- // Casting.
- static inline Oddball* cast(Object* obj);
-
- // Dispatched behavior.
-#ifdef DEBUG
- void OddballVerify();
-#endif
-
- // Initialize the fields.
- MUST_USE_RESULT MaybeObject* Initialize(const char* to_string,
- Object* to_number,
- byte kind);
-
- // Layout description.
- static const int kToStringOffset = HeapObject::kHeaderSize;
- static const int kToNumberOffset = kToStringOffset + kPointerSize;
- static const int kKindOffset = kToNumberOffset + kPointerSize;
- static const int kSize = kKindOffset + kPointerSize;
-
- static const byte kFalse = 0;
- static const byte kTrue = 1;
- static const byte kNotBooleanMask = ~1;
- static const byte kTheHole = 2;
- static const byte kNull = 3;
- static const byte kArgumentMarker = 4;
- static const byte kUndefined = 5;
- static const byte kOther = 6;
-
- typedef FixedBodyDescriptor<kToStringOffset,
- kToNumberOffset + kPointerSize,
- kSize> BodyDescriptor;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(Oddball);
-};
-
-
-class JSGlobalPropertyCell: public HeapObject {
- public:
- // [value]: value of the global property.
- DECL_ACCESSORS(value, Object)
-
- // Casting.
- static inline JSGlobalPropertyCell* cast(Object* obj);
-
-#ifdef DEBUG
- void JSGlobalPropertyCellVerify();
-#endif
-#ifdef OBJECT_PRINT
- inline void JSGlobalPropertyCellPrint() {
- JSGlobalPropertyCellPrint(stdout);
- }
- void JSGlobalPropertyCellPrint(FILE* out);
-#endif
-
- // Layout description.
- static const int kValueOffset = HeapObject::kHeaderSize;
- static const int kSize = kValueOffset + kPointerSize;
-
- typedef FixedBodyDescriptor<kValueOffset,
- kValueOffset + kPointerSize,
- kSize> BodyDescriptor;
-
- // Returns the isolate/heap this cell object belongs to.
- inline Isolate* isolate();
- inline Heap* heap();
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSGlobalPropertyCell);
-};
-
-
-
-// Proxy describes objects pointing from JavaScript to C structures.
-// Since they cannot contain references to JS HeapObjects they can be
-// placed in old_data_space.
-class Proxy: public HeapObject {
- public:
- // [proxy]: field containing the address.
- inline Address proxy();
- inline void set_proxy(Address value);
-
- // Casting.
- static inline Proxy* cast(Object* obj);
-
- // Dispatched behavior.
- inline void ProxyIterateBody(ObjectVisitor* v);
-
- template<typename StaticVisitor>
- inline void ProxyIterateBody();
-
-#ifdef OBJECT_PRINT
- inline void ProxyPrint() {
- ProxyPrint(stdout);
- }
- void ProxyPrint(FILE* out);
-#endif
-#ifdef DEBUG
- void ProxyVerify();
-#endif
-
- // Layout description.
-
- static const int kProxyOffset = HeapObject::kHeaderSize;
- static const int kSize = kProxyOffset + kPointerSize;
-
- STATIC_CHECK(kProxyOffset == Internals::kProxyProxyOffset);
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(Proxy);
-};
-
-
-// The JSArray describes JavaScript Arrays
-// Such an array can be in one of two modes:
-// - fast, backing storage is a FixedArray and length <= elements.length();
-// Please note: push and pop can be used to grow and shrink the array.
-// - slow, backing storage is a HashTable with numbers as keys.
-class JSArray: public JSObject {
- public:
- // [length]: The length property.
- DECL_ACCESSORS(length, Object)
-
- // Overload the length setter to skip write barrier when the length
- // is set to a smi. This matches the set function on FixedArray.
- inline void set_length(Smi* length);
-
- MUST_USE_RESULT MaybeObject* JSArrayUpdateLengthFromIndex(uint32_t index,
- Object* value);
-
- // Initialize the array with the given capacity. The function may
- // fail due to out-of-memory situations, but only if the requested
- // capacity is non-zero.
- MUST_USE_RESULT MaybeObject* Initialize(int capacity);
-
- // Set the content of the array to the content of storage.
- inline void SetContent(FixedArray* storage);
-
- // Casting.
- static inline JSArray* cast(Object* obj);
-
- // Uses handles. Ensures that the fixed array backing the JSArray has at
- // least the stated size.
- inline void EnsureSize(int minimum_size_of_backing_fixed_array);
-
- // Dispatched behavior.
-#ifdef OBJECT_PRINT
- inline void JSArrayPrint() {
- JSArrayPrint(stdout);
- }
- void JSArrayPrint(FILE* out);
-#endif
-#ifdef DEBUG
- void JSArrayVerify();
-#endif
-
- // Number of element slots to pre-allocate for an empty array.
- static const int kPreallocatedArrayElements = 4;
-
- // Layout description.
- static const int kLengthOffset = JSObject::kHeaderSize;
- static const int kSize = kLengthOffset + kPointerSize;
-
- private:
- // Expand the fixed array backing of a fast-case JSArray to at least
- // the requested size.
- void Expand(int minimum_size_of_backing_fixed_array);
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSArray);
-};
-
-
-// JSRegExpResult is just a JSArray with a specific initial map.
-// This initial map adds in-object properties for "index" and "input"
-// properties, as assigned by RegExp.prototype.exec, which allows
-// faster creation of RegExp exec results.
-// This class just holds constants used when creating the result.
-// After creation the result must be treated as a JSArray in all regards.
-class JSRegExpResult: public JSArray {
- public:
- // Offsets of object fields.
- static const int kIndexOffset = JSArray::kSize;
- static const int kInputOffset = kIndexOffset + kPointerSize;
- static const int kSize = kInputOffset + kPointerSize;
- // Indices of in-object properties.
- static const int kIndexIndex = 0;
- static const int kInputIndex = 1;
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSRegExpResult);
-};
-
-
-// An accessor must have a getter, but can have no setter.
-//
-// When setting a property, V8 searches accessors in prototypes.
-// If an accessor was found and it does not have a setter,
-// the request is ignored.
-//
-// If the accessor in the prototype has the READ_ONLY property attribute, then
-// a new value is added to the local object when the property is set.
-// This shadows the accessor in the prototype.
-class AccessorInfo: public Struct {
- public:
- DECL_ACCESSORS(getter, Object)
- DECL_ACCESSORS(setter, Object)
- DECL_ACCESSORS(data, Object)
- DECL_ACCESSORS(name, Object)
- DECL_ACCESSORS(flag, Smi)
-
- inline bool all_can_read();
- inline void set_all_can_read(bool value);
-
- inline bool all_can_write();
- inline void set_all_can_write(bool value);
-
- inline bool prohibits_overwriting();
- inline void set_prohibits_overwriting(bool value);
-
- inline PropertyAttributes property_attributes();
- inline void set_property_attributes(PropertyAttributes attributes);
-
- static inline AccessorInfo* cast(Object* obj);
-
-#ifdef OBJECT_PRINT
- inline void AccessorInfoPrint() {
- AccessorInfoPrint(stdout);
- }
- void AccessorInfoPrint(FILE* out);
-#endif
-#ifdef DEBUG
- void AccessorInfoVerify();
-#endif
-
- static const int kGetterOffset = HeapObject::kHeaderSize;
- static const int kSetterOffset = kGetterOffset + kPointerSize;
- static const int kDataOffset = kSetterOffset + kPointerSize;
- static const int kNameOffset = kDataOffset + kPointerSize;
- static const int kFlagOffset = kNameOffset + kPointerSize;
- static const int kSize = kFlagOffset + kPointerSize;
-
- private:
- // Bit positions in flag.
- static const int kAllCanReadBit = 0;
- static const int kAllCanWriteBit = 1;
- static const int kProhibitsOverwritingBit = 2;
- class AttributesField: public BitField<PropertyAttributes, 3, 3> {};
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(AccessorInfo);
-};
-
-
-class AccessCheckInfo: public Struct {
- public:
- DECL_ACCESSORS(named_callback, Object)
- DECL_ACCESSORS(indexed_callback, Object)
- DECL_ACCESSORS(data, Object)
-
- static inline AccessCheckInfo* cast(Object* obj);
-
-#ifdef OBJECT_PRINT
- inline void AccessCheckInfoPrint() {
- AccessCheckInfoPrint(stdout);
- }
- void AccessCheckInfoPrint(FILE* out);
-#endif
-#ifdef DEBUG
- void AccessCheckInfoVerify();
-#endif
-
- static const int kNamedCallbackOffset = HeapObject::kHeaderSize;
- static const int kIndexedCallbackOffset = kNamedCallbackOffset + kPointerSize;
- static const int kDataOffset = kIndexedCallbackOffset + kPointerSize;
- static const int kSize = kDataOffset + kPointerSize;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(AccessCheckInfo);
-};
-
-
-class InterceptorInfo: public Struct {
- public:
- DECL_ACCESSORS(getter, Object)
- DECL_ACCESSORS(setter, Object)
- DECL_ACCESSORS(query, Object)
- DECL_ACCESSORS(deleter, Object)
- DECL_ACCESSORS(enumerator, Object)
- DECL_ACCESSORS(data, Object)
-
- static inline InterceptorInfo* cast(Object* obj);
-
-#ifdef OBJECT_PRINT
- inline void InterceptorInfoPrint() {
- InterceptorInfoPrint(stdout);
- }
- void InterceptorInfoPrint(FILE* out);
-#endif
-#ifdef DEBUG
- void InterceptorInfoVerify();
-#endif
-
- static const int kGetterOffset = HeapObject::kHeaderSize;
- static const int kSetterOffset = kGetterOffset + kPointerSize;
- static const int kQueryOffset = kSetterOffset + kPointerSize;
- static const int kDeleterOffset = kQueryOffset + kPointerSize;
- static const int kEnumeratorOffset = kDeleterOffset + kPointerSize;
- static const int kDataOffset = kEnumeratorOffset + kPointerSize;
- static const int kSize = kDataOffset + kPointerSize;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(InterceptorInfo);
-};
-
-
-class CallHandlerInfo: public Struct {
- public:
- DECL_ACCESSORS(callback, Object)
- DECL_ACCESSORS(data, Object)
-
- static inline CallHandlerInfo* cast(Object* obj);
-
-#ifdef OBJECT_PRINT
- inline void CallHandlerInfoPrint() {
- CallHandlerInfoPrint(stdout);
- }
- void CallHandlerInfoPrint(FILE* out);
-#endif
-#ifdef DEBUG
- void CallHandlerInfoVerify();
-#endif
-
- static const int kCallbackOffset = HeapObject::kHeaderSize;
- static const int kDataOffset = kCallbackOffset + kPointerSize;
- static const int kSize = kDataOffset + kPointerSize;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(CallHandlerInfo);
-};
-
-
-class TemplateInfo: public Struct {
- public:
- DECL_ACCESSORS(tag, Object)
- DECL_ACCESSORS(property_list, Object)
-
-#ifdef DEBUG
- void TemplateInfoVerify();
-#endif
-
- static const int kTagOffset = HeapObject::kHeaderSize;
- static const int kPropertyListOffset = kTagOffset + kPointerSize;
- static const int kHeaderSize = kPropertyListOffset + kPointerSize;
- protected:
- friend class AGCCVersionRequiresThisClassToHaveAFriendSoHereItIs;
- DISALLOW_IMPLICIT_CONSTRUCTORS(TemplateInfo);
-};
-
-
-class FunctionTemplateInfo: public TemplateInfo {
- public:
- DECL_ACCESSORS(serial_number, Object)
- DECL_ACCESSORS(call_code, Object)
- DECL_ACCESSORS(property_accessors, Object)
- DECL_ACCESSORS(prototype_template, Object)
- DECL_ACCESSORS(parent_template, Object)
- DECL_ACCESSORS(named_property_handler, Object)
- DECL_ACCESSORS(indexed_property_handler, Object)
- DECL_ACCESSORS(instance_template, Object)
- DECL_ACCESSORS(class_name, Object)
- DECL_ACCESSORS(signature, Object)
- DECL_ACCESSORS(instance_call_handler, Object)
- DECL_ACCESSORS(access_check_info, Object)
- DECL_ACCESSORS(flag, Smi)
-
- // Following properties use flag bits.
- DECL_BOOLEAN_ACCESSORS(hidden_prototype)
- DECL_BOOLEAN_ACCESSORS(undetectable)
- // If the bit is set, object instances created by this function
- // requires access check.
- DECL_BOOLEAN_ACCESSORS(needs_access_check)
-
- static inline FunctionTemplateInfo* cast(Object* obj);
-
-#ifdef OBJECT_PRINT
- inline void FunctionTemplateInfoPrint() {
- FunctionTemplateInfoPrint(stdout);
- }
- void FunctionTemplateInfoPrint(FILE* out);
-#endif
-#ifdef DEBUG
- void FunctionTemplateInfoVerify();
-#endif
-
- static const int kSerialNumberOffset = TemplateInfo::kHeaderSize;
- static const int kCallCodeOffset = kSerialNumberOffset + kPointerSize;
- static const int kPropertyAccessorsOffset = kCallCodeOffset + kPointerSize;
- static const int kPrototypeTemplateOffset =
- kPropertyAccessorsOffset + kPointerSize;
- static const int kParentTemplateOffset =
- kPrototypeTemplateOffset + kPointerSize;
- static const int kNamedPropertyHandlerOffset =
- kParentTemplateOffset + kPointerSize;
- static const int kIndexedPropertyHandlerOffset =
- kNamedPropertyHandlerOffset + kPointerSize;
- static const int kInstanceTemplateOffset =
- kIndexedPropertyHandlerOffset + kPointerSize;
- static const int kClassNameOffset = kInstanceTemplateOffset + kPointerSize;
- static const int kSignatureOffset = kClassNameOffset + kPointerSize;
- static const int kInstanceCallHandlerOffset = kSignatureOffset + kPointerSize;
- static const int kAccessCheckInfoOffset =
- kInstanceCallHandlerOffset + kPointerSize;
- static const int kFlagOffset = kAccessCheckInfoOffset + kPointerSize;
- static const int kSize = kFlagOffset + kPointerSize;
-
- private:
- // Bit position in the flag, from least significant bit position.
- static const int kHiddenPrototypeBit = 0;
- static const int kUndetectableBit = 1;
- static const int kNeedsAccessCheckBit = 2;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(FunctionTemplateInfo);
-};
-
-
-class ObjectTemplateInfo: public TemplateInfo {
- public:
- DECL_ACCESSORS(constructor, Object)
- DECL_ACCESSORS(internal_field_count, Object)
-
- static inline ObjectTemplateInfo* cast(Object* obj);
-
-#ifdef OBJECT_PRINT
- inline void ObjectTemplateInfoPrint() {
- ObjectTemplateInfoPrint(stdout);
- }
- void ObjectTemplateInfoPrint(FILE* out);
-#endif
-#ifdef DEBUG
- void ObjectTemplateInfoVerify();
-#endif
-
- static const int kConstructorOffset = TemplateInfo::kHeaderSize;
- static const int kInternalFieldCountOffset =
- kConstructorOffset + kPointerSize;
- static const int kSize = kInternalFieldCountOffset + kPointerSize;
-};
-
-
-class SignatureInfo: public Struct {
- public:
- DECL_ACCESSORS(receiver, Object)
- DECL_ACCESSORS(args, Object)
-
- static inline SignatureInfo* cast(Object* obj);
-
-#ifdef OBJECT_PRINT
- inline void SignatureInfoPrint() {
- SignatureInfoPrint(stdout);
- }
- void SignatureInfoPrint(FILE* out);
-#endif
-#ifdef DEBUG
- void SignatureInfoVerify();
-#endif
-
- static const int kReceiverOffset = Struct::kHeaderSize;
- static const int kArgsOffset = kReceiverOffset + kPointerSize;
- static const int kSize = kArgsOffset + kPointerSize;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(SignatureInfo);
-};
-
-
-class TypeSwitchInfo: public Struct {
- public:
- DECL_ACCESSORS(types, Object)
-
- static inline TypeSwitchInfo* cast(Object* obj);
-
-#ifdef OBJECT_PRINT
- inline void TypeSwitchInfoPrint() {
- TypeSwitchInfoPrint(stdout);
- }
- void TypeSwitchInfoPrint(FILE* out);
-#endif
-#ifdef DEBUG
- void TypeSwitchInfoVerify();
-#endif
-
- static const int kTypesOffset = Struct::kHeaderSize;
- static const int kSize = kTypesOffset + kPointerSize;
-};
-
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-// The DebugInfo class holds additional information for a function being
-// debugged.
-class DebugInfo: public Struct {
- public:
- // The shared function info for the source being debugged.
- DECL_ACCESSORS(shared, SharedFunctionInfo)
- // Code object for the original code.
- DECL_ACCESSORS(original_code, Code)
- // Code object for the patched code. This code object is the code object
- // currently active for the function.
- DECL_ACCESSORS(code, Code)
- // Fixed array holding status information for each active break point.
- DECL_ACCESSORS(break_points, FixedArray)
-
- // Check if there is a break point at a code position.
- bool HasBreakPoint(int code_position);
- // Get the break point info object for a code position.
- Object* GetBreakPointInfo(int code_position);
- // Clear a break point.
- static void ClearBreakPoint(Handle<DebugInfo> debug_info,
- int code_position,
- Handle<Object> break_point_object);
- // Set a break point.
- static void SetBreakPoint(Handle<DebugInfo> debug_info, int code_position,
- int source_position, int statement_position,
- Handle<Object> break_point_object);
- // Get the break point objects for a code position.
- Object* GetBreakPointObjects(int code_position);
- // Find the break point info holding this break point object.
- static Object* FindBreakPointInfo(Handle<DebugInfo> debug_info,
- Handle<Object> break_point_object);
- // Get the number of break points for this function.
- int GetBreakPointCount();
-
- static inline DebugInfo* cast(Object* obj);
-
-#ifdef OBJECT_PRINT
- inline void DebugInfoPrint() {
- DebugInfoPrint(stdout);
- }
- void DebugInfoPrint(FILE* out);
-#endif
-#ifdef DEBUG
- void DebugInfoVerify();
-#endif
-
- static const int kSharedFunctionInfoIndex = Struct::kHeaderSize;
- static const int kOriginalCodeIndex = kSharedFunctionInfoIndex + kPointerSize;
- static const int kPatchedCodeIndex = kOriginalCodeIndex + kPointerSize;
- static const int kActiveBreakPointsCountIndex =
- kPatchedCodeIndex + kPointerSize;
- static const int kBreakPointsStateIndex =
- kActiveBreakPointsCountIndex + kPointerSize;
- static const int kSize = kBreakPointsStateIndex + kPointerSize;
-
- private:
- static const int kNoBreakPointInfo = -1;
-
- // Lookup the index in the break_points array for a code position.
- int GetBreakPointInfoIndex(int code_position);
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(DebugInfo);
-};
-
-
-// The BreakPointInfo class holds information for break points set in a
-// function. The DebugInfo object holds a BreakPointInfo object for each code
-// position with one or more break points.
-class BreakPointInfo: public Struct {
- public:
- // The position in the code for the break point.
- DECL_ACCESSORS(code_position, Smi)
- // The position in the source for the break position.
- DECL_ACCESSORS(source_position, Smi)
- // The position in the source for the last statement before this break
- // position.
- DECL_ACCESSORS(statement_position, Smi)
- // List of related JavaScript break points.
- DECL_ACCESSORS(break_point_objects, Object)
-
- // Removes a break point.
- static void ClearBreakPoint(Handle<BreakPointInfo> info,
- Handle<Object> break_point_object);
- // Set a break point.
- static void SetBreakPoint(Handle<BreakPointInfo> info,
- Handle<Object> break_point_object);
- // Check if break point info has this break point object.
- static bool HasBreakPointObject(Handle<BreakPointInfo> info,
- Handle<Object> break_point_object);
- // Get the number of break points for this code position.
- int GetBreakPointCount();
-
- static inline BreakPointInfo* cast(Object* obj);
-
-#ifdef OBJECT_PRINT
- inline void BreakPointInfoPrint() {
- BreakPointInfoPrint(stdout);
- }
- void BreakPointInfoPrint(FILE* out);
-#endif
-#ifdef DEBUG
- void BreakPointInfoVerify();
-#endif
-
- static const int kCodePositionIndex = Struct::kHeaderSize;
- static const int kSourcePositionIndex = kCodePositionIndex + kPointerSize;
- static const int kStatementPositionIndex =
- kSourcePositionIndex + kPointerSize;
- static const int kBreakPointObjectsIndex =
- kStatementPositionIndex + kPointerSize;
- static const int kSize = kBreakPointObjectsIndex + kPointerSize;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(BreakPointInfo);
-};
-#endif // ENABLE_DEBUGGER_SUPPORT
-
-
-#undef DECL_BOOLEAN_ACCESSORS
-#undef DECL_ACCESSORS
-
-
-// Abstract base class for visiting, and optionally modifying, the
-// pointers contained in Objects. Used in GC and serialization/deserialization.
-class ObjectVisitor BASE_EMBEDDED {
- public:
- virtual ~ObjectVisitor() {}
-
- // Visits a contiguous arrays of pointers in the half-open range
- // [start, end). Any or all of the values may be modified on return.
- virtual void VisitPointers(Object** start, Object** end) = 0;
-
- // To allow lazy clearing of inline caches the visitor has
- // a rich interface for iterating over Code objects..
-
- // Visits a code target in the instruction stream.
- virtual void VisitCodeTarget(RelocInfo* rinfo);
-
- // Visits a code entry in a JS function.
- virtual void VisitCodeEntry(Address entry_address);
-
- // Visits a global property cell reference in the instruction stream.
- virtual void VisitGlobalPropertyCell(RelocInfo* rinfo);
-
- // Visits a runtime entry in the instruction stream.
- virtual void VisitRuntimeEntry(RelocInfo* rinfo) {}
-
- // Visits the resource of an ASCII or two-byte string.
- virtual void VisitExternalAsciiString(
- v8::String::ExternalAsciiStringResource** resource) {}
- virtual void VisitExternalTwoByteString(
- v8::String::ExternalStringResource** resource) {}
-
- // Visits a debug call target in the instruction stream.
- virtual void VisitDebugTarget(RelocInfo* rinfo);
-
- // Handy shorthand for visiting a single pointer.
- virtual void VisitPointer(Object** p) { VisitPointers(p, p + 1); }
-
- // Visits a contiguous arrays of external references (references to the C++
- // heap) in the half-open range [start, end). Any or all of the values
- // may be modified on return.
- virtual void VisitExternalReferences(Address* start, Address* end) {}
-
- inline void VisitExternalReference(Address* p) {
- VisitExternalReferences(p, p + 1);
- }
-
- // Visits a handle that has an embedder-assigned class ID.
- virtual void VisitEmbedderReference(Object** p, uint16_t class_id) {}
-
-#ifdef DEBUG
- // Intended for serialization/deserialization checking: insert, or
- // check for the presence of, a tag at this position in the stream.
- virtual void Synchronize(const char* tag) {}
-#else
- inline void Synchronize(const char* tag) {}
-#endif
-};
-
-
-class StructBodyDescriptor : public
- FlexibleBodyDescriptor<HeapObject::kHeaderSize> {
- public:
- static inline int SizeOf(Map* map, HeapObject* object) {
- return map->instance_size();
- }
-};
-
-
-// BooleanBit is a helper class for setting and getting a bit in an
-// integer or Smi.
-class BooleanBit : public AllStatic {
- public:
- static inline bool get(Smi* smi, int bit_position) {
- return get(smi->value(), bit_position);
- }
-
- static inline bool get(int value, int bit_position) {
- return (value & (1 << bit_position)) != 0;
- }
-
- static inline Smi* set(Smi* smi, int bit_position, bool v) {
- return Smi::FromInt(set(smi->value(), bit_position, v));
- }
-
- static inline int set(int value, int bit_position, bool v) {
- if (v) {
- value |= (1 << bit_position);
- } else {
- value &= ~(1 << bit_position);
- }
- return value;
- }
-};
-
-} } // namespace v8::internal
-
-#endif // V8_OBJECTS_H_
diff --git a/src/3rdparty/v8/src/parser.cc b/src/3rdparty/v8/src/parser.cc
deleted file mode 100644
index 22d4d3f..0000000
--- a/src/3rdparty/v8/src/parser.cc
+++ /dev/null
@@ -1,5168 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "api.h"
-#include "ast.h"
-#include "bootstrapper.h"
-#include "codegen.h"
-#include "compiler.h"
-#include "func-name-inferrer.h"
-#include "messages.h"
-#include "parser.h"
-#include "platform.h"
-#include "preparser.h"
-#include "runtime.h"
-#include "scopeinfo.h"
-#include "string-stream.h"
-
-#include "ast-inl.h"
-#include "jump-target-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// PositionStack is used for on-stack allocation of token positions for
-// new expressions. Please look at ParseNewExpression.
-
-class PositionStack {
- public:
- explicit PositionStack(bool* ok) : top_(NULL), ok_(ok) {}
- ~PositionStack() { ASSERT(!*ok_ || is_empty()); }
-
- class Element {
- public:
- Element(PositionStack* stack, int value) {
- previous_ = stack->top();
- value_ = value;
- stack->set_top(this);
- }
-
- private:
- Element* previous() { return previous_; }
- int value() { return value_; }
- friend class PositionStack;
- Element* previous_;
- int value_;
- };
-
- bool is_empty() { return top_ == NULL; }
- int pop() {
- ASSERT(!is_empty());
- int result = top_->value();
- top_ = top_->previous();
- return result;
- }
-
- private:
- Element* top() { return top_; }
- void set_top(Element* value) { top_ = value; }
- Element* top_;
- bool* ok_;
-};
-
-
-RegExpBuilder::RegExpBuilder()
- : zone_(Isolate::Current()->zone()),
- pending_empty_(false),
- characters_(NULL),
- terms_(),
- alternatives_()
-#ifdef DEBUG
- , last_added_(ADD_NONE)
-#endif
- {}
-
-
-void RegExpBuilder::FlushCharacters() {
- pending_empty_ = false;
- if (characters_ != NULL) {
- RegExpTree* atom = new(zone()) RegExpAtom(characters_->ToConstVector());
- characters_ = NULL;
- text_.Add(atom);
- LAST(ADD_ATOM);
- }
-}
-
-
-void RegExpBuilder::FlushText() {
- FlushCharacters();
- int num_text = text_.length();
- if (num_text == 0) {
- return;
- } else if (num_text == 1) {
- terms_.Add(text_.last());
- } else {
- RegExpText* text = new(zone()) RegExpText();
- for (int i = 0; i < num_text; i++)
- text_.Get(i)->AppendToText(text);
- terms_.Add(text);
- }
- text_.Clear();
-}
-
-
-void RegExpBuilder::AddCharacter(uc16 c) {
- pending_empty_ = false;
- if (characters_ == NULL) {
- characters_ = new ZoneList<uc16>(4);
- }
- characters_->Add(c);
- LAST(ADD_CHAR);
-}
-
-
-void RegExpBuilder::AddEmpty() {
- pending_empty_ = true;
-}
-
-
-void RegExpBuilder::AddAtom(RegExpTree* term) {
- if (term->IsEmpty()) {
- AddEmpty();
- return;
- }
- if (term->IsTextElement()) {
- FlushCharacters();
- text_.Add(term);
- } else {
- FlushText();
- terms_.Add(term);
- }
- LAST(ADD_ATOM);
-}
-
-
-void RegExpBuilder::AddAssertion(RegExpTree* assert) {
- FlushText();
- terms_.Add(assert);
- LAST(ADD_ASSERT);
-}
-
-
-void RegExpBuilder::NewAlternative() {
- FlushTerms();
-}
-
-
-void RegExpBuilder::FlushTerms() {
- FlushText();
- int num_terms = terms_.length();
- RegExpTree* alternative;
- if (num_terms == 0) {
- alternative = RegExpEmpty::GetInstance();
- } else if (num_terms == 1) {
- alternative = terms_.last();
- } else {
- alternative = new(zone()) RegExpAlternative(terms_.GetList());
- }
- alternatives_.Add(alternative);
- terms_.Clear();
- LAST(ADD_NONE);
-}
-
-
-RegExpTree* RegExpBuilder::ToRegExp() {
- FlushTerms();
- int num_alternatives = alternatives_.length();
- if (num_alternatives == 0) {
- return RegExpEmpty::GetInstance();
- }
- if (num_alternatives == 1) {
- return alternatives_.last();
- }
- return new(zone()) RegExpDisjunction(alternatives_.GetList());
-}
-
-
-void RegExpBuilder::AddQuantifierToAtom(int min,
- int max,
- RegExpQuantifier::Type type) {
- if (pending_empty_) {
- pending_empty_ = false;
- return;
- }
- RegExpTree* atom;
- if (characters_ != NULL) {
- ASSERT(last_added_ == ADD_CHAR);
- // Last atom was character.
- Vector<const uc16> char_vector = characters_->ToConstVector();
- int num_chars = char_vector.length();
- if (num_chars > 1) {
- Vector<const uc16> prefix = char_vector.SubVector(0, num_chars - 1);
- text_.Add(new(zone()) RegExpAtom(prefix));
- char_vector = char_vector.SubVector(num_chars - 1, num_chars);
- }
- characters_ = NULL;
- atom = new(zone()) RegExpAtom(char_vector);
- FlushText();
- } else if (text_.length() > 0) {
- ASSERT(last_added_ == ADD_ATOM);
- atom = text_.RemoveLast();
- FlushText();
- } else if (terms_.length() > 0) {
- ASSERT(last_added_ == ADD_ATOM);
- atom = terms_.RemoveLast();
- if (atom->max_match() == 0) {
- // Guaranteed to only match an empty string.
- LAST(ADD_TERM);
- if (min == 0) {
- return;
- }
- terms_.Add(atom);
- return;
- }
- } else {
- // Only call immediately after adding an atom or character!
- UNREACHABLE();
- return;
- }
- terms_.Add(new(zone()) RegExpQuantifier(min, max, type, atom));
- LAST(ADD_TERM);
-}
-
-
-Handle<String> Parser::LookupSymbol(int symbol_id) {
- // Length of symbol cache is the number of identified symbols.
- // If we are larger than that, or negative, it's not a cached symbol.
- // This might also happen if there is no preparser symbol data, even
- // if there is some preparser data.
- if (static_cast<unsigned>(symbol_id)
- >= static_cast<unsigned>(symbol_cache_.length())) {
- if (scanner().is_literal_ascii()) {
- return isolate()->factory()->LookupAsciiSymbol(
- scanner().literal_ascii_string());
- } else {
- return isolate()->factory()->LookupTwoByteSymbol(
- scanner().literal_uc16_string());
- }
- }
- return LookupCachedSymbol(symbol_id);
-}
-
-
-Handle<String> Parser::LookupCachedSymbol(int symbol_id) {
- // Make sure the cache is large enough to hold the symbol identifier.
- if (symbol_cache_.length() <= symbol_id) {
- // Increase length to index + 1.
- symbol_cache_.AddBlock(Handle<String>::null(),
- symbol_id + 1 - symbol_cache_.length());
- }
- Handle<String> result = symbol_cache_.at(symbol_id);
- if (result.is_null()) {
- if (scanner().is_literal_ascii()) {
- result = isolate()->factory()->LookupAsciiSymbol(
- scanner().literal_ascii_string());
- } else {
- result = isolate()->factory()->LookupTwoByteSymbol(
- scanner().literal_uc16_string());
- }
- symbol_cache_.at(symbol_id) = result;
- return result;
- }
- isolate()->counters()->total_preparse_symbols_skipped()->Increment();
- return result;
-}
-
-
-FunctionEntry ScriptDataImpl::GetFunctionEntry(int start) {
- // The current pre-data entry must be a FunctionEntry with the given
- // start position.
- if ((function_index_ + FunctionEntry::kSize <= store_.length())
- && (static_cast<int>(store_[function_index_]) == start)) {
- int index = function_index_;
- function_index_ += FunctionEntry::kSize;
- return FunctionEntry(store_.SubVector(index,
- index + FunctionEntry::kSize));
- }
- return FunctionEntry();
-}
-
-
-int ScriptDataImpl::GetSymbolIdentifier() {
- return ReadNumber(&symbol_data_);
-}
-
-
-bool ScriptDataImpl::SanityCheck() {
- // Check that the header data is valid and doesn't specify
- // point to positions outside the store.
- if (store_.length() < PreparseDataConstants::kHeaderSize) return false;
- if (magic() != PreparseDataConstants::kMagicNumber) return false;
- if (version() != PreparseDataConstants::kCurrentVersion) return false;
- if (has_error()) {
- // Extra sane sanity check for error message encoding.
- if (store_.length() <= PreparseDataConstants::kHeaderSize
- + PreparseDataConstants::kMessageTextPos) {
- return false;
- }
- if (Read(PreparseDataConstants::kMessageStartPos) >
- Read(PreparseDataConstants::kMessageEndPos)) {
- return false;
- }
- unsigned arg_count = Read(PreparseDataConstants::kMessageArgCountPos);
- int pos = PreparseDataConstants::kMessageTextPos;
- for (unsigned int i = 0; i <= arg_count; i++) {
- if (store_.length() <= PreparseDataConstants::kHeaderSize + pos) {
- return false;
- }
- int length = static_cast<int>(Read(pos));
- if (length < 0) return false;
- pos += 1 + length;
- }
- if (store_.length() < PreparseDataConstants::kHeaderSize + pos) {
- return false;
- }
- return true;
- }
- // Check that the space allocated for function entries is sane.
- int functions_size =
- static_cast<int>(store_[PreparseDataConstants::kFunctionsSizeOffset]);
- if (functions_size < 0) return false;
- if (functions_size % FunctionEntry::kSize != 0) return false;
- // Check that the count of symbols is non-negative.
- int symbol_count =
- static_cast<int>(store_[PreparseDataConstants::kSymbolCountOffset]);
- if (symbol_count < 0) return false;
- // Check that the total size has room for header and function entries.
- int minimum_size =
- PreparseDataConstants::kHeaderSize + functions_size;
- if (store_.length() < minimum_size) return false;
- return true;
-}
-
-
-
-const char* ScriptDataImpl::ReadString(unsigned* start, int* chars) {
- int length = start[0];
- char* result = NewArray<char>(length + 1);
- for (int i = 0; i < length; i++) {
- result[i] = start[i + 1];
- }
- result[length] = '\0';
- if (chars != NULL) *chars = length;
- return result;
-}
-
-Scanner::Location ScriptDataImpl::MessageLocation() {
- int beg_pos = Read(PreparseDataConstants::kMessageStartPos);
- int end_pos = Read(PreparseDataConstants::kMessageEndPos);
- return Scanner::Location(beg_pos, end_pos);
-}
-
-
-const char* ScriptDataImpl::BuildMessage() {
- unsigned* start = ReadAddress(PreparseDataConstants::kMessageTextPos);
- return ReadString(start, NULL);
-}
-
-
-Vector<const char*> ScriptDataImpl::BuildArgs() {
- int arg_count = Read(PreparseDataConstants::kMessageArgCountPos);
- const char** array = NewArray<const char*>(arg_count);
- // Position after text found by skipping past length field and
- // length field content words.
- int pos = PreparseDataConstants::kMessageTextPos + 1
- + Read(PreparseDataConstants::kMessageTextPos);
- for (int i = 0; i < arg_count; i++) {
- int count = 0;
- array[i] = ReadString(ReadAddress(pos), &count);
- pos += count + 1;
- }
- return Vector<const char*>(array, arg_count);
-}
-
-
-unsigned ScriptDataImpl::Read(int position) {
- return store_[PreparseDataConstants::kHeaderSize + position];
-}
-
-
-unsigned* ScriptDataImpl::ReadAddress(int position) {
- return &store_[PreparseDataConstants::kHeaderSize + position];
-}
-
-
-Scope* Parser::NewScope(Scope* parent, Scope::Type type, bool inside_with) {
- Scope* result = new(zone()) Scope(parent, type);
- result->Initialize(inside_with);
- return result;
-}
-
-// ----------------------------------------------------------------------------
-// Target is a support class to facilitate manipulation of the
-// Parser's target_stack_ (the stack of potential 'break' and
-// 'continue' statement targets). Upon construction, a new target is
-// added; it is removed upon destruction.
-
-class Target BASE_EMBEDDED {
- public:
- Target(Target** variable, AstNode* node)
- : variable_(variable), node_(node), previous_(*variable) {
- *variable = this;
- }
-
- ~Target() {
- *variable_ = previous_;
- }
-
- Target* previous() { return previous_; }
- AstNode* node() { return node_; }
-
- private:
- Target** variable_;
- AstNode* node_;
- Target* previous_;
-};
-
-
-class TargetScope BASE_EMBEDDED {
- public:
- explicit TargetScope(Target** variable)
- : variable_(variable), previous_(*variable) {
- *variable = NULL;
- }
-
- ~TargetScope() {
- *variable_ = previous_;
- }
-
- private:
- Target** variable_;
- Target* previous_;
-};
-
-
-// ----------------------------------------------------------------------------
-// LexicalScope is a support class to facilitate manipulation of the
-// Parser's scope stack. The constructor sets the parser's top scope
-// to the incoming scope, and the destructor resets it.
-//
-// Additionlaly, it stores transient information used during parsing.
-// These scopes are not kept around after parsing or referenced by syntax
-// trees so they can be stack-allocated and hence used by the pre-parser.
-
-class LexicalScope BASE_EMBEDDED {
- public:
- LexicalScope(Parser* parser, Scope* scope, Isolate* isolate);
- ~LexicalScope();
-
- int NextMaterializedLiteralIndex() {
- int next_index =
- materialized_literal_count_ + JSFunction::kLiteralsPrefixSize;
- materialized_literal_count_++;
- return next_index;
- }
- int materialized_literal_count() { return materialized_literal_count_; }
-
- void SetThisPropertyAssignmentInfo(
- bool only_simple_this_property_assignments,
- Handle<FixedArray> this_property_assignments) {
- only_simple_this_property_assignments_ =
- only_simple_this_property_assignments;
- this_property_assignments_ = this_property_assignments;
- }
- bool only_simple_this_property_assignments() {
- return only_simple_this_property_assignments_;
- }
- Handle<FixedArray> this_property_assignments() {
- return this_property_assignments_;
- }
-
- void AddProperty() { expected_property_count_++; }
- int expected_property_count() { return expected_property_count_; }
-
- void AddLoop() { loop_count_++; }
- bool ContainsLoops() const { return loop_count_ > 0; }
-
- private:
- // Captures the number of literals that need materialization in the
- // function. Includes regexp literals, and boilerplate for object
- // and array literals.
- int materialized_literal_count_;
-
- // Properties count estimation.
- int expected_property_count_;
-
- // Keeps track of assignments to properties of this. Used for
- // optimizing constructors.
- bool only_simple_this_property_assignments_;
- Handle<FixedArray> this_property_assignments_;
-
- // Captures the number of loops inside the scope.
- int loop_count_;
-
- // Bookkeeping
- Parser* parser_;
- // Previous values
- LexicalScope* lexical_scope_parent_;
- Scope* previous_scope_;
- int previous_with_nesting_level_;
-};
-
-
-LexicalScope::LexicalScope(Parser* parser, Scope* scope, Isolate* isolate)
- : materialized_literal_count_(0),
- expected_property_count_(0),
- only_simple_this_property_assignments_(false),
- this_property_assignments_(isolate->factory()->empty_fixed_array()),
- loop_count_(0),
- parser_(parser),
- lexical_scope_parent_(parser->lexical_scope_),
- previous_scope_(parser->top_scope_),
- previous_with_nesting_level_(parser->with_nesting_level_) {
- parser->top_scope_ = scope;
- parser->lexical_scope_ = this;
- parser->with_nesting_level_ = 0;
-}
-
-
-LexicalScope::~LexicalScope() {
- parser_->top_scope_->Leave();
- parser_->top_scope_ = previous_scope_;
- parser_->lexical_scope_ = lexical_scope_parent_;
- parser_->with_nesting_level_ = previous_with_nesting_level_;
-}
-
-
-// ----------------------------------------------------------------------------
-// The CHECK_OK macro is a convenient macro to enforce error
-// handling for functions that may fail (by returning !*ok).
-//
-// CAUTION: This macro appends extra statements after a call,
-// thus it must never be used where only a single statement
-// is correct (e.g. an if statement branch w/o braces)!
-
-#define CHECK_OK ok); \
- if (!*ok) return NULL; \
- ((void)0
-#define DUMMY ) // to make indentation work
-#undef DUMMY
-
-#define CHECK_FAILED /**/); \
- if (failed_) return NULL; \
- ((void)0
-#define DUMMY ) // to make indentation work
-#undef DUMMY
-
-// ----------------------------------------------------------------------------
-// Implementation of Parser
-
-Parser::Parser(Handle<Script> script,
- bool allow_natives_syntax,
- v8::Extension* extension,
- ScriptDataImpl* pre_data)
- : isolate_(script->GetIsolate()),
- symbol_cache_(pre_data ? pre_data->symbol_count() : 0),
- script_(script),
- scanner_(isolate_->scanner_constants()),
- top_scope_(NULL),
- with_nesting_level_(0),
- lexical_scope_(NULL),
- target_stack_(NULL),
- allow_natives_syntax_(allow_natives_syntax),
- extension_(extension),
- pre_data_(pre_data),
- fni_(NULL),
- stack_overflow_(false),
- parenthesized_function_(false) {
- AstNode::ResetIds();
-}
-
-
-FunctionLiteral* Parser::ParseProgram(Handle<String> source,
- bool in_global_context,
- StrictModeFlag strict_mode) {
- CompilationZoneScope zone_scope(DONT_DELETE_ON_EXIT);
-
- HistogramTimerScope timer(isolate()->counters()->parse());
- isolate()->counters()->total_parse_size()->Increment(source->length());
- fni_ = new(zone()) FuncNameInferrer();
-
- // Initialize parser state.
- source->TryFlatten();
- if (source->IsExternalTwoByteString()) {
- // Notice that the stream is destroyed at the end of the branch block.
- // The last line of the blocks can't be moved outside, even though they're
- // identical calls.
- ExternalTwoByteStringUC16CharacterStream stream(
- Handle<ExternalTwoByteString>::cast(source), 0, source->length());
- scanner_.Initialize(&stream);
- return DoParseProgram(source, in_global_context, strict_mode, &zone_scope);
- } else {
- GenericStringUC16CharacterStream stream(source, 0, source->length());
- scanner_.Initialize(&stream);
- return DoParseProgram(source, in_global_context, strict_mode, &zone_scope);
- }
-}
-
-
-FunctionLiteral* Parser::DoParseProgram(Handle<String> source,
- bool in_global_context,
- StrictModeFlag strict_mode,
- ZoneScope* zone_scope) {
- ASSERT(target_stack_ == NULL);
- if (pre_data_ != NULL) pre_data_->Initialize();
-
- // Compute the parsing mode.
- mode_ = FLAG_lazy ? PARSE_LAZILY : PARSE_EAGERLY;
- if (allow_natives_syntax_ || extension_ != NULL) mode_ = PARSE_EAGERLY;
-
- Scope::Type type =
- in_global_context
- ? Scope::GLOBAL_SCOPE
- : Scope::EVAL_SCOPE;
- Handle<String> no_name = isolate()->factory()->empty_symbol();
-
- FunctionLiteral* result = NULL;
- { Scope* scope = NewScope(top_scope_, type, inside_with());
- LexicalScope lexical_scope(this, scope, isolate());
- if (strict_mode == kStrictMode) {
- top_scope_->EnableStrictMode();
- }
- ZoneList<Statement*>* body = new ZoneList<Statement*>(16);
- bool ok = true;
- int beg_loc = scanner().location().beg_pos;
- ParseSourceElements(body, Token::EOS, &ok);
- if (ok && top_scope_->is_strict_mode()) {
- CheckOctalLiteral(beg_loc, scanner().location().end_pos, &ok);
- }
- if (ok) {
- result = new(zone()) FunctionLiteral(
- no_name,
- top_scope_,
- body,
- lexical_scope.materialized_literal_count(),
- lexical_scope.expected_property_count(),
- lexical_scope.only_simple_this_property_assignments(),
- lexical_scope.this_property_assignments(),
- 0,
- 0,
- source->length(),
- false,
- lexical_scope.ContainsLoops());
- } else if (stack_overflow_) {
- isolate()->StackOverflow();
- }
- }
-
- // Make sure the target stack is empty.
- ASSERT(target_stack_ == NULL);
-
- // If there was a syntax error we have to get rid of the AST
- // and it is not safe to do so before the scope has been deleted.
- if (result == NULL) zone_scope->DeleteOnExit();
- return result;
-}
-
-FunctionLiteral* Parser::ParseLazy(CompilationInfo* info) {
- CompilationZoneScope zone_scope(DONT_DELETE_ON_EXIT);
- HistogramTimerScope timer(isolate()->counters()->parse_lazy());
- Handle<String> source(String::cast(script_->source()));
- isolate()->counters()->total_parse_size()->Increment(source->length());
-
- Handle<SharedFunctionInfo> shared_info = info->shared_info();
- // Initialize parser state.
- source->TryFlatten();
- if (source->IsExternalTwoByteString()) {
- ExternalTwoByteStringUC16CharacterStream stream(
- Handle<ExternalTwoByteString>::cast(source),
- shared_info->start_position(),
- shared_info->end_position());
- FunctionLiteral* result = ParseLazy(info, &stream, &zone_scope);
- return result;
- } else {
- GenericStringUC16CharacterStream stream(source,
- shared_info->start_position(),
- shared_info->end_position());
- FunctionLiteral* result = ParseLazy(info, &stream, &zone_scope);
- return result;
- }
-}
-
-
-FunctionLiteral* Parser::ParseLazy(CompilationInfo* info,
- UC16CharacterStream* source,
- ZoneScope* zone_scope) {
- Handle<SharedFunctionInfo> shared_info = info->shared_info();
- scanner_.Initialize(source);
- ASSERT(target_stack_ == NULL);
-
- Handle<String> name(String::cast(shared_info->name()));
- fni_ = new(zone()) FuncNameInferrer();
- fni_->PushEnclosingName(name);
-
- mode_ = PARSE_EAGERLY;
-
- // Place holder for the result.
- FunctionLiteral* result = NULL;
-
- {
- // Parse the function literal.
- Handle<String> no_name = isolate()->factory()->empty_symbol();
- Scope* scope = NewScope(top_scope_, Scope::GLOBAL_SCOPE, inside_with());
- if (!info->closure().is_null()) {
- scope = Scope::DeserializeScopeChain(info, scope);
- }
- LexicalScope lexical_scope(this, scope, isolate());
-
- if (shared_info->strict_mode()) {
- top_scope_->EnableStrictMode();
- }
-
- FunctionLiteralType type =
- shared_info->is_expression() ? EXPRESSION : DECLARATION;
- bool ok = true;
- result = ParseFunctionLiteral(name,
- false, // Strict mode name already checked.
- RelocInfo::kNoPosition, type, &ok);
- // Make sure the results agree.
- ASSERT(ok == (result != NULL));
- }
-
- // Make sure the target stack is empty.
- ASSERT(target_stack_ == NULL);
-
- // If there was a stack overflow we have to get rid of AST and it is
- // not safe to do before scope has been deleted.
- if (result == NULL) {
- zone_scope->DeleteOnExit();
- if (stack_overflow_) isolate()->StackOverflow();
- } else {
- Handle<String> inferred_name(shared_info->inferred_name());
- result->set_inferred_name(inferred_name);
- }
- return result;
-}
-
-
-Handle<String> Parser::GetSymbol(bool* ok) {
- int symbol_id = -1;
- if (pre_data() != NULL) {
- symbol_id = pre_data()->GetSymbolIdentifier();
- }
- return LookupSymbol(symbol_id);
-}
-
-
-void Parser::ReportMessage(const char* type, Vector<const char*> args) {
- Scanner::Location source_location = scanner().location();
- ReportMessageAt(source_location, type, args);
-}
-
-
-void Parser::ReportMessageAt(Scanner::Location source_location,
- const char* type,
- Vector<const char*> args) {
- MessageLocation location(script_,
- source_location.beg_pos,
- source_location.end_pos);
- Factory* factory = isolate()->factory();
- Handle<FixedArray> elements = factory->NewFixedArray(args.length());
- for (int i = 0; i < args.length(); i++) {
- Handle<String> arg_string = factory->NewStringFromUtf8(CStrVector(args[i]));
- elements->set(i, *arg_string);
- }
- Handle<JSArray> array = factory->NewJSArrayWithElements(elements);
- Handle<Object> result = factory->NewSyntaxError(type, array);
- isolate()->Throw(*result, &location);
-}
-
-
-void Parser::ReportMessageAt(Scanner::Location source_location,
- const char* type,
- Vector<Handle<String> > args) {
- MessageLocation location(script_,
- source_location.beg_pos,
- source_location.end_pos);
- Factory* factory = isolate()->factory();
- Handle<FixedArray> elements = factory->NewFixedArray(args.length());
- for (int i = 0; i < args.length(); i++) {
- elements->set(i, *args[i]);
- }
- Handle<JSArray> array = factory->NewJSArrayWithElements(elements);
- Handle<Object> result = factory->NewSyntaxError(type, array);
- isolate()->Throw(*result, &location);
-}
-
-
-// Base class containing common code for the different finder classes used by
-// the parser.
-class ParserFinder {
- protected:
- ParserFinder() {}
- static Assignment* AsAssignment(Statement* stat) {
- if (stat == NULL) return NULL;
- ExpressionStatement* exp_stat = stat->AsExpressionStatement();
- if (exp_stat == NULL) return NULL;
- return exp_stat->expression()->AsAssignment();
- }
-};
-
-
-// An InitializationBlockFinder finds and marks sequences of statements of the
-// form expr.a = ...; expr.b = ...; etc.
-class InitializationBlockFinder : public ParserFinder {
- public:
- InitializationBlockFinder()
- : first_in_block_(NULL), last_in_block_(NULL), block_size_(0) {}
-
- ~InitializationBlockFinder() {
- if (InBlock()) EndBlock();
- }
-
- void Update(Statement* stat) {
- Assignment* assignment = AsAssignment(stat);
- if (InBlock()) {
- if (BlockContinues(assignment)) {
- UpdateBlock(assignment);
- } else {
- EndBlock();
- }
- }
- if (!InBlock() && (assignment != NULL) &&
- (assignment->op() == Token::ASSIGN)) {
- StartBlock(assignment);
- }
- }
-
- private:
- // The minimum number of contiguous assignment that will
- // be treated as an initialization block. Benchmarks show that
- // the overhead exceeds the savings below this limit.
- static const int kMinInitializationBlock = 3;
-
- // Returns true if the expressions appear to denote the same object.
- // In the context of initialization blocks, we only consider expressions
- // of the form 'expr.x' or expr["x"].
- static bool SameObject(Expression* e1, Expression* e2) {
- VariableProxy* v1 = e1->AsVariableProxy();
- VariableProxy* v2 = e2->AsVariableProxy();
- if (v1 != NULL && v2 != NULL) {
- return v1->name()->Equals(*v2->name());
- }
- Property* p1 = e1->AsProperty();
- Property* p2 = e2->AsProperty();
- if ((p1 == NULL) || (p2 == NULL)) return false;
- Literal* key1 = p1->key()->AsLiteral();
- Literal* key2 = p2->key()->AsLiteral();
- if ((key1 == NULL) || (key2 == NULL)) return false;
- if (!key1->handle()->IsString() || !key2->handle()->IsString()) {
- return false;
- }
- String* name1 = String::cast(*key1->handle());
- String* name2 = String::cast(*key2->handle());
- if (!name1->Equals(name2)) return false;
- return SameObject(p1->obj(), p2->obj());
- }
-
- // Returns true if the expressions appear to denote different properties
- // of the same object.
- static bool PropertyOfSameObject(Expression* e1, Expression* e2) {
- Property* p1 = e1->AsProperty();
- Property* p2 = e2->AsProperty();
- if ((p1 == NULL) || (p2 == NULL)) return false;
- return SameObject(p1->obj(), p2->obj());
- }
-
- bool BlockContinues(Assignment* assignment) {
- if ((assignment == NULL) || (first_in_block_ == NULL)) return false;
- if (assignment->op() != Token::ASSIGN) return false;
- return PropertyOfSameObject(first_in_block_->target(),
- assignment->target());
- }
-
- void StartBlock(Assignment* assignment) {
- first_in_block_ = assignment;
- last_in_block_ = assignment;
- block_size_ = 1;
- }
-
- void UpdateBlock(Assignment* assignment) {
- last_in_block_ = assignment;
- ++block_size_;
- }
-
- void EndBlock() {
- if (block_size_ >= kMinInitializationBlock) {
- first_in_block_->mark_block_start();
- last_in_block_->mark_block_end();
- }
- last_in_block_ = first_in_block_ = NULL;
- block_size_ = 0;
- }
-
- bool InBlock() { return first_in_block_ != NULL; }
-
- Assignment* first_in_block_;
- Assignment* last_in_block_;
- int block_size_;
-
- DISALLOW_COPY_AND_ASSIGN(InitializationBlockFinder);
-};
-
-
-// A ThisNamedPropertyAssigmentFinder finds and marks statements of the form
-// this.x = ...;, where x is a named property. It also determines whether a
-// function contains only assignments of this type.
-class ThisNamedPropertyAssigmentFinder : public ParserFinder {
- public:
- explicit ThisNamedPropertyAssigmentFinder(Isolate* isolate)
- : isolate_(isolate),
- only_simple_this_property_assignments_(true),
- names_(NULL),
- assigned_arguments_(NULL),
- assigned_constants_(NULL) {}
-
- void Update(Scope* scope, Statement* stat) {
- // Bail out if function already has property assignment that are
- // not simple this property assignments.
- if (!only_simple_this_property_assignments_) {
- return;
- }
-
- // Check whether this statement is of the form this.x = ...;
- Assignment* assignment = AsAssignment(stat);
- if (IsThisPropertyAssignment(assignment)) {
- HandleThisPropertyAssignment(scope, assignment);
- } else {
- only_simple_this_property_assignments_ = false;
- }
- }
-
- // Returns whether only statements of the form this.x = y; where y is either a
- // constant or a function argument was encountered.
- bool only_simple_this_property_assignments() {
- return only_simple_this_property_assignments_;
- }
-
- // Returns a fixed array containing three elements for each assignment of the
- // form this.x = y;
- Handle<FixedArray> GetThisPropertyAssignments() {
- if (names_ == NULL) {
- return isolate_->factory()->empty_fixed_array();
- }
- ASSERT(names_ != NULL);
- ASSERT(assigned_arguments_ != NULL);
- ASSERT_EQ(names_->length(), assigned_arguments_->length());
- ASSERT_EQ(names_->length(), assigned_constants_->length());
- Handle<FixedArray> assignments =
- isolate_->factory()->NewFixedArray(names_->length() * 3);
- for (int i = 0; i < names_->length(); i++) {
- assignments->set(i * 3, *names_->at(i));
- assignments->set(i * 3 + 1, Smi::FromInt(assigned_arguments_->at(i)));
- assignments->set(i * 3 + 2, *assigned_constants_->at(i));
- }
- return assignments;
- }
-
- private:
- bool IsThisPropertyAssignment(Assignment* assignment) {
- if (assignment != NULL) {
- Property* property = assignment->target()->AsProperty();
- return assignment->op() == Token::ASSIGN
- && property != NULL
- && property->obj()->AsVariableProxy() != NULL
- && property->obj()->AsVariableProxy()->is_this();
- }
- return false;
- }
-
- void HandleThisPropertyAssignment(Scope* scope, Assignment* assignment) {
- // Check that the property assigned to is a named property, which is not
- // __proto__.
- Property* property = assignment->target()->AsProperty();
- ASSERT(property != NULL);
- Literal* literal = property->key()->AsLiteral();
- uint32_t dummy;
- if (literal != NULL &&
- literal->handle()->IsString() &&
- !String::cast(*(literal->handle()))->Equals(
- isolate_->heap()->Proto_symbol()) &&
- !String::cast(*(literal->handle()))->AsArrayIndex(&dummy)) {
- Handle<String> key = Handle<String>::cast(literal->handle());
-
- // Check whether the value assigned is either a constant or matches the
- // name of one of the arguments to the function.
- if (assignment->value()->AsLiteral() != NULL) {
- // Constant assigned.
- Literal* literal = assignment->value()->AsLiteral();
- AssignmentFromConstant(key, literal->handle());
- return;
- } else if (assignment->value()->AsVariableProxy() != NULL) {
- // Variable assigned.
- Handle<String> name =
- assignment->value()->AsVariableProxy()->name();
- // Check whether the variable assigned matches an argument name.
- for (int i = 0; i < scope->num_parameters(); i++) {
- if (*scope->parameter(i)->name() == *name) {
- // Assigned from function argument.
- AssignmentFromParameter(key, i);
- return;
- }
- }
- }
- }
- // It is not a simple "this.x = value;" assignment with a constant
- // or parameter value.
- AssignmentFromSomethingElse();
- }
-
- void AssignmentFromParameter(Handle<String> name, int index) {
- EnsureAllocation();
- names_->Add(name);
- assigned_arguments_->Add(index);
- assigned_constants_->Add(isolate_->factory()->undefined_value());
- }
-
- void AssignmentFromConstant(Handle<String> name, Handle<Object> value) {
- EnsureAllocation();
- names_->Add(name);
- assigned_arguments_->Add(-1);
- assigned_constants_->Add(value);
- }
-
- void AssignmentFromSomethingElse() {
- // The this assignment is not a simple one.
- only_simple_this_property_assignments_ = false;
- }
-
- void EnsureAllocation() {
- if (names_ == NULL) {
- ASSERT(assigned_arguments_ == NULL);
- ASSERT(assigned_constants_ == NULL);
- names_ = new ZoneStringList(4);
- assigned_arguments_ = new ZoneList<int>(4);
- assigned_constants_ = new ZoneObjectList(4);
- }
- }
-
- Isolate* isolate_;
- bool only_simple_this_property_assignments_;
- ZoneStringList* names_;
- ZoneList<int>* assigned_arguments_;
- ZoneObjectList* assigned_constants_;
-};
-
-
-void* Parser::ParseSourceElements(ZoneList<Statement*>* processor,
- int end_token,
- bool* ok) {
- // SourceElements ::
- // (Statement)* <end_token>
-
- // Allocate a target stack to use for this set of source
- // elements. This way, all scripts and functions get their own
- // target stack thus avoiding illegal breaks and continues across
- // functions.
- TargetScope scope(&this->target_stack_);
-
- ASSERT(processor != NULL);
- InitializationBlockFinder block_finder;
- ThisNamedPropertyAssigmentFinder this_property_assignment_finder(isolate());
- bool directive_prologue = true; // Parsing directive prologue.
-
- while (peek() != end_token) {
- if (directive_prologue && peek() != Token::STRING) {
- directive_prologue = false;
- }
-
- Scanner::Location token_loc = scanner().peek_location();
-
- Statement* stat;
- if (peek() == Token::FUNCTION) {
- // FunctionDeclaration is only allowed in the context of SourceElements
- // (Ecma 262 5th Edition, clause 14):
- // SourceElement:
- // Statement
- // FunctionDeclaration
- // Common language extension is to allow function declaration in place
- // of any statement. This language extension is disabled in strict mode.
- stat = ParseFunctionDeclaration(CHECK_OK);
- } else {
- stat = ParseStatement(NULL, CHECK_OK);
- }
-
- if (stat == NULL || stat->IsEmpty()) {
- directive_prologue = false; // End of directive prologue.
- continue;
- }
-
- if (directive_prologue) {
- // A shot at a directive.
- ExpressionStatement *e_stat;
- Literal *literal;
- // Still processing directive prologue?
- if ((e_stat = stat->AsExpressionStatement()) != NULL &&
- (literal = e_stat->expression()->AsLiteral()) != NULL &&
- literal->handle()->IsString()) {
- Handle<String> directive = Handle<String>::cast(literal->handle());
-
- // Check "use strict" directive (ES5 14.1).
- if (!top_scope_->is_strict_mode() &&
- directive->Equals(isolate()->heap()->use_strict()) &&
- token_loc.end_pos - token_loc.beg_pos ==
- isolate()->heap()->use_strict()->length() + 2) {
- top_scope_->EnableStrictMode();
- // "use strict" is the only directive for now.
- directive_prologue = false;
- }
- } else {
- // End of the directive prologue.
- directive_prologue = false;
- }
- }
-
- // We find and mark the initialization blocks on top level code only.
- // This is because the optimization prevents reuse of the map transitions,
- // so it should be used only for code that will only be run once.
- if (top_scope_->is_global_scope()) {
- block_finder.Update(stat);
- }
- // Find and mark all assignments to named properties in this (this.x =)
- if (top_scope_->is_function_scope()) {
- this_property_assignment_finder.Update(top_scope_, stat);
- }
- processor->Add(stat);
- }
-
- // Propagate the collected information on this property assignments.
- if (top_scope_->is_function_scope()) {
- bool only_simple_this_property_assignments =
- this_property_assignment_finder.only_simple_this_property_assignments()
- && top_scope_->declarations()->length() == 0;
- if (only_simple_this_property_assignments) {
- lexical_scope_->SetThisPropertyAssignmentInfo(
- only_simple_this_property_assignments,
- this_property_assignment_finder.GetThisPropertyAssignments());
- }
- }
- return 0;
-}
-
-
-Statement* Parser::ParseStatement(ZoneStringList* labels, bool* ok) {
- // Statement ::
- // Block
- // VariableStatement
- // EmptyStatement
- // ExpressionStatement
- // IfStatement
- // IterationStatement
- // ContinueStatement
- // BreakStatement
- // ReturnStatement
- // WithStatement
- // LabelledStatement
- // SwitchStatement
- // ThrowStatement
- // TryStatement
- // DebuggerStatement
-
- // Note: Since labels can only be used by 'break' and 'continue'
- // statements, which themselves are only valid within blocks,
- // iterations or 'switch' statements (i.e., BreakableStatements),
- // labels can be simply ignored in all other cases; except for
- // trivial labeled break statements 'label: break label' which is
- // parsed into an empty statement.
-
- // Keep the source position of the statement
- int statement_pos = scanner().peek_location().beg_pos;
- Statement* stmt = NULL;
- switch (peek()) {
- case Token::LBRACE:
- return ParseBlock(labels, ok);
-
- case Token::CONST: // fall through
- case Token::VAR:
- stmt = ParseVariableStatement(ok);
- break;
-
- case Token::SEMICOLON:
- Next();
- return EmptyStatement();
-
- case Token::IF:
- stmt = ParseIfStatement(labels, ok);
- break;
-
- case Token::DO:
- stmt = ParseDoWhileStatement(labels, ok);
- break;
-
- case Token::WHILE:
- stmt = ParseWhileStatement(labels, ok);
- break;
-
- case Token::FOR:
- stmt = ParseForStatement(labels, ok);
- break;
-
- case Token::CONTINUE:
- stmt = ParseContinueStatement(ok);
- break;
-
- case Token::BREAK:
- stmt = ParseBreakStatement(labels, ok);
- break;
-
- case Token::RETURN:
- stmt = ParseReturnStatement(ok);
- break;
-
- case Token::WITH:
- stmt = ParseWithStatement(labels, ok);
- break;
-
- case Token::SWITCH:
- stmt = ParseSwitchStatement(labels, ok);
- break;
-
- case Token::THROW:
- stmt = ParseThrowStatement(ok);
- break;
-
- case Token::TRY: {
- // NOTE: It is somewhat complicated to have labels on
- // try-statements. When breaking out of a try-finally statement,
- // one must take great care not to treat it as a
- // fall-through. It is much easier just to wrap the entire
- // try-statement in a statement block and put the labels there
- Block* result = new(zone()) Block(labels, 1, false);
- Target target(&this->target_stack_, result);
- TryStatement* statement = ParseTryStatement(CHECK_OK);
- if (statement) {
- statement->set_statement_pos(statement_pos);
- }
- if (result) result->AddStatement(statement);
- return result;
- }
-
- case Token::FUNCTION: {
- // In strict mode, FunctionDeclaration is only allowed in the context
- // of SourceElements.
- if (top_scope_->is_strict_mode()) {
- ReportMessageAt(scanner().peek_location(), "strict_function",
- Vector<const char*>::empty());
- *ok = false;
- return NULL;
- }
- return ParseFunctionDeclaration(ok);
- }
-
- case Token::NATIVE:
- return ParseNativeDeclaration(ok);
-
- case Token::DEBUGGER:
- stmt = ParseDebuggerStatement(ok);
- break;
-
- default:
- stmt = ParseExpressionOrLabelledStatement(labels, ok);
- }
-
- // Store the source position of the statement
- if (stmt != NULL) stmt->set_statement_pos(statement_pos);
- return stmt;
-}
-
-
-VariableProxy* Parser::Declare(Handle<String> name,
- Variable::Mode mode,
- FunctionLiteral* fun,
- bool resolve,
- bool* ok) {
- Variable* var = NULL;
- // If we are inside a function, a declaration of a variable
- // is a truly local variable, and the scope of the variable
- // is always the function scope.
-
- // If a function scope exists, then we can statically declare this
- // variable and also set its mode. In any case, a Declaration node
- // will be added to the scope so that the declaration can be added
- // to the corresponding activation frame at runtime if necessary.
- // For instance declarations inside an eval scope need to be added
- // to the calling function context.
- if (top_scope_->is_function_scope()) {
- // Declare the variable in the function scope.
- var = top_scope_->LocalLookup(name);
- if (var == NULL) {
- // Declare the name.
- var = top_scope_->DeclareLocal(name, mode);
- } else {
- // The name was declared before; check for conflicting
- // re-declarations. If the previous declaration was a const or the
- // current declaration is a const then we have a conflict. There is
- // similar code in runtime.cc in the Declare functions.
- if ((mode == Variable::CONST) || (var->mode() == Variable::CONST)) {
- // We only have vars and consts in declarations.
- ASSERT(var->mode() == Variable::VAR ||
- var->mode() == Variable::CONST);
- const char* type = (var->mode() == Variable::VAR) ? "var" : "const";
- Handle<String> type_string =
- isolate()->factory()->NewStringFromUtf8(CStrVector(type), TENURED);
- Expression* expression =
- NewThrowTypeError(isolate()->factory()->redeclaration_symbol(),
- type_string, name);
- top_scope_->SetIllegalRedeclaration(expression);
- }
- }
- }
-
- // We add a declaration node for every declaration. The compiler
- // will only generate code if necessary. In particular, declarations
- // for inner local variables that do not represent functions won't
- // result in any generated code.
- //
- // Note that we always add an unresolved proxy even if it's not
- // used, simply because we don't know in this method (w/o extra
- // parameters) if the proxy is needed or not. The proxy will be
- // bound during variable resolution time unless it was pre-bound
- // below.
- //
- // WARNING: This will lead to multiple declaration nodes for the
- // same variable if it is declared several times. This is not a
- // semantic issue as long as we keep the source order, but it may be
- // a performance issue since it may lead to repeated
- // Runtime::DeclareContextSlot() calls.
- VariableProxy* proxy = top_scope_->NewUnresolved(name, inside_with());
- top_scope_->AddDeclaration(new(zone()) Declaration(proxy, mode, fun));
-
- // For global const variables we bind the proxy to a variable.
- if (mode == Variable::CONST && top_scope_->is_global_scope()) {
- ASSERT(resolve); // should be set by all callers
- Variable::Kind kind = Variable::NORMAL;
- var = new(zone()) Variable(top_scope_, name, Variable::CONST, true, kind);
- }
-
- // If requested and we have a local variable, bind the proxy to the variable
- // at parse-time. This is used for functions (and consts) declared inside
- // statements: the corresponding function (or const) variable must be in the
- // function scope and not a statement-local scope, e.g. as provided with a
- // 'with' statement:
- //
- // with (obj) {
- // function f() {}
- // }
- //
- // which is translated into:
- //
- // with (obj) {
- // // in this case this is not: 'var f; f = function () {};'
- // var f = function () {};
- // }
- //
- // Note that if 'f' is accessed from inside the 'with' statement, it
- // will be allocated in the context (because we must be able to look
- // it up dynamically) but it will also be accessed statically, i.e.,
- // with a context slot index and a context chain length for this
- // initialization code. Thus, inside the 'with' statement, we need
- // both access to the static and the dynamic context chain; the
- // runtime needs to provide both.
- if (resolve && var != NULL) proxy->BindTo(var);
-
- return proxy;
-}
-
-
-// Language extension which is only enabled for source files loaded
-// through the API's extension mechanism. A native function
-// declaration is resolved by looking up the function through a
-// callback provided by the extension.
-Statement* Parser::ParseNativeDeclaration(bool* ok) {
- if (extension_ == NULL) {
- ReportUnexpectedToken(Token::NATIVE);
- *ok = false;
- return NULL;
- }
-
- Expect(Token::NATIVE, CHECK_OK);
- Expect(Token::FUNCTION, CHECK_OK);
- Handle<String> name = ParseIdentifier(CHECK_OK);
- Expect(Token::LPAREN, CHECK_OK);
- bool done = (peek() == Token::RPAREN);
- while (!done) {
- ParseIdentifier(CHECK_OK);
- done = (peek() == Token::RPAREN);
- if (!done) {
- Expect(Token::COMMA, CHECK_OK);
- }
- }
- Expect(Token::RPAREN, CHECK_OK);
- Expect(Token::SEMICOLON, CHECK_OK);
-
- // Make sure that the function containing the native declaration
- // isn't lazily compiled. The extension structures are only
- // accessible while parsing the first time not when reparsing
- // because of lazy compilation.
- top_scope_->ForceEagerCompilation();
-
- // Compute the function template for the native function.
- v8::Handle<v8::FunctionTemplate> fun_template =
- extension_->GetNativeFunction(v8::Utils::ToLocal(name));
- ASSERT(!fun_template.IsEmpty());
-
- // Instantiate the function and create a shared function info from it.
- Handle<JSFunction> fun = Utils::OpenHandle(*fun_template->GetFunction());
- const int literals = fun->NumberOfLiterals();
- Handle<Code> code = Handle<Code>(fun->shared()->code());
- Handle<Code> construct_stub = Handle<Code>(fun->shared()->construct_stub());
- Handle<SharedFunctionInfo> shared =
- isolate()->factory()->NewSharedFunctionInfo(name, literals, code,
- Handle<SerializedScopeInfo>(fun->shared()->scope_info()));
- shared->set_construct_stub(*construct_stub);
-
- // Copy the function data to the shared function info.
- shared->set_function_data(fun->shared()->function_data());
- int parameters = fun->shared()->formal_parameter_count();
- shared->set_formal_parameter_count(parameters);
-
- // TODO(1240846): It's weird that native function declarations are
- // introduced dynamically when we meet their declarations, whereas
- // other functions are setup when entering the surrounding scope.
- SharedFunctionInfoLiteral* lit =
- new(zone()) SharedFunctionInfoLiteral(shared);
- VariableProxy* var = Declare(name, Variable::VAR, NULL, true, CHECK_OK);
- return new(zone()) ExpressionStatement(new(zone()) Assignment(
- Token::INIT_VAR, var, lit, RelocInfo::kNoPosition));
-}
-
-
-Statement* Parser::ParseFunctionDeclaration(bool* ok) {
- // FunctionDeclaration ::
- // 'function' Identifier '(' FormalParameterListopt ')' '{' FunctionBody '}'
- Expect(Token::FUNCTION, CHECK_OK);
- int function_token_position = scanner().location().beg_pos;
- bool is_reserved = false;
- Handle<String> name = ParseIdentifierOrReservedWord(&is_reserved, CHECK_OK);
- FunctionLiteral* fun = ParseFunctionLiteral(name,
- is_reserved,
- function_token_position,
- DECLARATION,
- CHECK_OK);
- // Even if we're not at the top-level of the global or a function
- // scope, we treat is as such and introduce the function with it's
- // initial value upon entering the corresponding scope.
- Declare(name, Variable::VAR, fun, true, CHECK_OK);
- return EmptyStatement();
-}
-
-
-Block* Parser::ParseBlock(ZoneStringList* labels, bool* ok) {
- // Block ::
- // '{' Statement* '}'
-
- // Note that a Block does not introduce a new execution scope!
- // (ECMA-262, 3rd, 12.2)
- //
- // Construct block expecting 16 statements.
- Block* result = new(zone()) Block(labels, 16, false);
- Target target(&this->target_stack_, result);
- Expect(Token::LBRACE, CHECK_OK);
- while (peek() != Token::RBRACE) {
- Statement* stat = ParseStatement(NULL, CHECK_OK);
- if (stat && !stat->IsEmpty()) result->AddStatement(stat);
- }
- Expect(Token::RBRACE, CHECK_OK);
- return result;
-}
-
-
-Block* Parser::ParseVariableStatement(bool* ok) {
- // VariableStatement ::
- // VariableDeclarations ';'
-
- Expression* dummy; // to satisfy the ParseVariableDeclarations() signature
- Block* result = ParseVariableDeclarations(true, &dummy, CHECK_OK);
- ExpectSemicolon(CHECK_OK);
- return result;
-}
-
-
-bool Parser::IsEvalOrArguments(Handle<String> string) {
- return string.is_identical_to(isolate()->factory()->eval_symbol()) ||
- string.is_identical_to(isolate()->factory()->arguments_symbol());
-}
-
-
-// If the variable declaration declares exactly one non-const
-// variable, then *var is set to that variable. In all other cases,
-// *var is untouched; in particular, it is the caller's responsibility
-// to initialize it properly. This mechanism is used for the parsing
-// of 'for-in' loops.
-Block* Parser::ParseVariableDeclarations(bool accept_IN,
- Expression** var,
- bool* ok) {
- // VariableDeclarations ::
- // ('var' | 'const') (Identifier ('=' AssignmentExpression)?)+[',']
-
- Variable::Mode mode = Variable::VAR;
- bool is_const = false;
- if (peek() == Token::VAR) {
- Consume(Token::VAR);
- } else if (peek() == Token::CONST) {
- Consume(Token::CONST);
- if (top_scope_->is_strict_mode()) {
- ReportMessage("strict_const", Vector<const char*>::empty());
- *ok = false;
- return NULL;
- }
- mode = Variable::CONST;
- is_const = true;
- } else {
- UNREACHABLE(); // by current callers
- }
-
- // The scope of a variable/const declared anywhere inside a function
- // is the entire function (ECMA-262, 3rd, 10.1.3, and 12.2). Thus we can
- // transform a source-level variable/const declaration into a (Function)
- // Scope declaration, and rewrite the source-level initialization into an
- // assignment statement. We use a block to collect multiple assignments.
- //
- // We mark the block as initializer block because we don't want the
- // rewriter to add a '.result' assignment to such a block (to get compliant
- // behavior for code such as print(eval('var x = 7')), and for cosmetic
- // reasons when pretty-printing. Also, unless an assignment (initialization)
- // is inside an initializer block, it is ignored.
- //
- // Create new block with one expected declaration.
- Block* block = new(zone()) Block(NULL, 1, true);
- VariableProxy* last_var = NULL; // the last variable declared
- int nvars = 0; // the number of variables declared
- do {
- if (fni_ != NULL) fni_->Enter();
-
- // Parse variable name.
- if (nvars > 0) Consume(Token::COMMA);
- Handle<String> name = ParseIdentifier(CHECK_OK);
- if (fni_ != NULL) fni_->PushVariableName(name);
-
- // Strict mode variables may not be named eval or arguments
- if (top_scope_->is_strict_mode() && IsEvalOrArguments(name)) {
- ReportMessage("strict_var_name", Vector<const char*>::empty());
- *ok = false;
- return NULL;
- }
-
- // Declare variable.
- // Note that we *always* must treat the initial value via a separate init
- // assignment for variables and constants because the value must be assigned
- // when the variable is encountered in the source. But the variable/constant
- // is declared (and set to 'undefined') upon entering the function within
- // which the variable or constant is declared. Only function variables have
- // an initial value in the declaration (because they are initialized upon
- // entering the function).
- //
- // If we have a const declaration, in an inner scope, the proxy is always
- // bound to the declared variable (independent of possibly surrounding with
- // statements).
- last_var = Declare(name, mode, NULL,
- is_const /* always bound for CONST! */,
- CHECK_OK);
- nvars++;
-
- // Parse initialization expression if present and/or needed. A
- // declaration of the form:
- //
- // var v = x;
- //
- // is syntactic sugar for:
- //
- // var v; v = x;
- //
- // In particular, we need to re-lookup 'v' as it may be a
- // different 'v' than the 'v' in the declaration (if we are inside
- // a 'with' statement that makes a object property with name 'v'
- // visible).
- //
- // However, note that const declarations are different! A const
- // declaration of the form:
- //
- // const c = x;
- //
- // is *not* syntactic sugar for:
- //
- // const c; c = x;
- //
- // The "variable" c initialized to x is the same as the declared
- // one - there is no re-lookup (see the last parameter of the
- // Declare() call above).
-
- Expression* value = NULL;
- int position = -1;
- if (peek() == Token::ASSIGN) {
- Expect(Token::ASSIGN, CHECK_OK);
- position = scanner().location().beg_pos;
- value = ParseAssignmentExpression(accept_IN, CHECK_OK);
- // Don't infer if it is "a = function(){...}();"-like expression.
- if (fni_ != NULL && value->AsCall() == NULL) fni_->Infer();
- }
-
- // Make sure that 'const c' actually initializes 'c' to undefined
- // even though it seems like a stupid thing to do.
- if (value == NULL && is_const) {
- value = GetLiteralUndefined();
- }
-
- // Global variable declarations must be compiled in a specific
- // way. When the script containing the global variable declaration
- // is entered, the global variable must be declared, so that if it
- // doesn't exist (not even in a prototype of the global object) it
- // gets created with an initial undefined value. This is handled
- // by the declarations part of the function representing the
- // top-level global code; see Runtime::DeclareGlobalVariable. If
- // it already exists (in the object or in a prototype), it is
- // *not* touched until the variable declaration statement is
- // executed.
- //
- // Executing the variable declaration statement will always
- // guarantee to give the global object a "local" variable; a
- // variable defined in the global object and not in any
- // prototype. This way, global variable declarations can shadow
- // properties in the prototype chain, but only after the variable
- // declaration statement has been executed. This is important in
- // browsers where the global object (window) has lots of
- // properties defined in prototype objects.
-
- if (top_scope_->is_global_scope()) {
- // Compute the arguments for the runtime call.
- ZoneList<Expression*>* arguments = new ZoneList<Expression*>(3);
- // We have at least 1 parameter.
- arguments->Add(new(zone()) Literal(name));
- CallRuntime* initialize;
-
- if (is_const) {
- arguments->Add(value);
- value = NULL; // zap the value to avoid the unnecessary assignment
-
- // Construct the call to Runtime_InitializeConstGlobal
- // and add it to the initialization statement block.
- // Note that the function does different things depending on
- // the number of arguments (1 or 2).
- initialize =
- new(zone()) CallRuntime(
- isolate()->factory()->InitializeConstGlobal_symbol(),
- Runtime::FunctionForId(Runtime::kInitializeConstGlobal),
- arguments);
- } else {
- // Add strict mode.
- // We may want to pass singleton to avoid Literal allocations.
- arguments->Add(NewNumberLiteral(
- top_scope_->is_strict_mode() ? kStrictMode : kNonStrictMode));
-
- // Be careful not to assign a value to the global variable if
- // we're in a with. The initialization value should not
- // necessarily be stored in the global object in that case,
- // which is why we need to generate a separate assignment node.
- if (value != NULL && !inside_with()) {
- arguments->Add(value);
- value = NULL; // zap the value to avoid the unnecessary assignment
- }
-
- // Construct the call to Runtime_InitializeVarGlobal
- // and add it to the initialization statement block.
- // Note that the function does different things depending on
- // the number of arguments (2 or 3).
- initialize =
- new(zone()) CallRuntime(
- isolate()->factory()->InitializeVarGlobal_symbol(),
- Runtime::FunctionForId(Runtime::kInitializeVarGlobal),
- arguments);
- }
-
- block->AddStatement(new(zone()) ExpressionStatement(initialize));
- }
-
- // Add an assignment node to the initialization statement block if
- // we still have a pending initialization value. We must distinguish
- // between variables and constants: Variable initializations are simply
- // assignments (with all the consequences if they are inside a 'with'
- // statement - they may change a 'with' object property). Constant
- // initializations always assign to the declared constant which is
- // always at the function scope level. This is only relevant for
- // dynamically looked-up variables and constants (the start context
- // for constant lookups is always the function context, while it is
- // the top context for variables). Sigh...
- if (value != NULL) {
- Token::Value op = (is_const ? Token::INIT_CONST : Token::INIT_VAR);
- Assignment* assignment =
- new(zone()) Assignment(op, last_var, value, position);
- if (block) {
- block->AddStatement(new(zone()) ExpressionStatement(assignment));
- }
- }
-
- if (fni_ != NULL) fni_->Leave();
- } while (peek() == Token::COMMA);
-
- if (!is_const && nvars == 1) {
- // We have a single, non-const variable.
- ASSERT(last_var != NULL);
- *var = last_var;
- }
-
- return block;
-}
-
-
-static bool ContainsLabel(ZoneStringList* labels, Handle<String> label) {
- ASSERT(!label.is_null());
- if (labels != NULL)
- for (int i = labels->length(); i-- > 0; )
- if (labels->at(i).is_identical_to(label))
- return true;
-
- return false;
-}
-
-
-Statement* Parser::ParseExpressionOrLabelledStatement(ZoneStringList* labels,
- bool* ok) {
- // ExpressionStatement | LabelledStatement ::
- // Expression ';'
- // Identifier ':' Statement
- bool starts_with_idenfifier = peek_any_identifier();
- Expression* expr = ParseExpression(true, CHECK_OK);
- if (peek() == Token::COLON && starts_with_idenfifier && expr &&
- expr->AsVariableProxy() != NULL &&
- !expr->AsVariableProxy()->is_this()) {
- // Expression is a single identifier, and not, e.g., a parenthesized
- // identifier.
- VariableProxy* var = expr->AsVariableProxy();
- Handle<String> label = var->name();
- // TODO(1240780): We don't check for redeclaration of labels
- // during preparsing since keeping track of the set of active
- // labels requires nontrivial changes to the way scopes are
- // structured. However, these are probably changes we want to
- // make later anyway so we should go back and fix this then.
- if (ContainsLabel(labels, label) || TargetStackContainsLabel(label)) {
- SmartPointer<char> c_string = label->ToCString(DISALLOW_NULLS);
- const char* elms[2] = { "Label", *c_string };
- Vector<const char*> args(elms, 2);
- ReportMessage("redeclaration", args);
- *ok = false;
- return NULL;
- }
- if (labels == NULL) labels = new ZoneStringList(4);
- labels->Add(label);
- // Remove the "ghost" variable that turned out to be a label
- // from the top scope. This way, we don't try to resolve it
- // during the scope processing.
- top_scope_->RemoveUnresolved(var);
- Expect(Token::COLON, CHECK_OK);
- return ParseStatement(labels, ok);
- }
-
- // Parsed expression statement.
- ExpectSemicolon(CHECK_OK);
- return new(zone()) ExpressionStatement(expr);
-}
-
-
-IfStatement* Parser::ParseIfStatement(ZoneStringList* labels, bool* ok) {
- // IfStatement ::
- // 'if' '(' Expression ')' Statement ('else' Statement)?
-
- Expect(Token::IF, CHECK_OK);
- Expect(Token::LPAREN, CHECK_OK);
- Expression* condition = ParseExpression(true, CHECK_OK);
- Expect(Token::RPAREN, CHECK_OK);
- Statement* then_statement = ParseStatement(labels, CHECK_OK);
- Statement* else_statement = NULL;
- if (peek() == Token::ELSE) {
- Next();
- else_statement = ParseStatement(labels, CHECK_OK);
- } else {
- else_statement = EmptyStatement();
- }
- return new(zone()) IfStatement(condition, then_statement, else_statement);
-}
-
-
-Statement* Parser::ParseContinueStatement(bool* ok) {
- // ContinueStatement ::
- // 'continue' Identifier? ';'
-
- Expect(Token::CONTINUE, CHECK_OK);
- Handle<String> label = Handle<String>::null();
- Token::Value tok = peek();
- if (!scanner().has_line_terminator_before_next() &&
- tok != Token::SEMICOLON && tok != Token::RBRACE && tok != Token::EOS) {
- label = ParseIdentifier(CHECK_OK);
- }
- IterationStatement* target = NULL;
- target = LookupContinueTarget(label, CHECK_OK);
- if (target == NULL) {
- // Illegal continue statement.
- const char* message = "illegal_continue";
- Vector<Handle<String> > args;
- if (!label.is_null()) {
- message = "unknown_label";
- args = Vector<Handle<String> >(&label, 1);
- }
- ReportMessageAt(scanner().location(), message, args);
- *ok = false;
- return NULL;
- }
- ExpectSemicolon(CHECK_OK);
- return new(zone()) ContinueStatement(target);
-}
-
-
-Statement* Parser::ParseBreakStatement(ZoneStringList* labels, bool* ok) {
- // BreakStatement ::
- // 'break' Identifier? ';'
-
- Expect(Token::BREAK, CHECK_OK);
- Handle<String> label;
- Token::Value tok = peek();
- if (!scanner().has_line_terminator_before_next() &&
- tok != Token::SEMICOLON && tok != Token::RBRACE && tok != Token::EOS) {
- label = ParseIdentifier(CHECK_OK);
- }
- // Parse labeled break statements that target themselves into
- // empty statements, e.g. 'l1: l2: l3: break l2;'
- if (!label.is_null() && ContainsLabel(labels, label)) {
- return EmptyStatement();
- }
- BreakableStatement* target = NULL;
- target = LookupBreakTarget(label, CHECK_OK);
- if (target == NULL) {
- // Illegal break statement.
- const char* message = "illegal_break";
- Vector<Handle<String> > args;
- if (!label.is_null()) {
- message = "unknown_label";
- args = Vector<Handle<String> >(&label, 1);
- }
- ReportMessageAt(scanner().location(), message, args);
- *ok = false;
- return NULL;
- }
- ExpectSemicolon(CHECK_OK);
- return new(zone()) BreakStatement(target);
-}
-
-
-Statement* Parser::ParseReturnStatement(bool* ok) {
- // ReturnStatement ::
- // 'return' Expression? ';'
-
- // Consume the return token. It is necessary to do the before
- // reporting any errors on it, because of the way errors are
- // reported (underlining).
- Expect(Token::RETURN, CHECK_OK);
-
- // An ECMAScript program is considered syntactically incorrect if it
- // contains a return statement that is not within the body of a
- // function. See ECMA-262, section 12.9, page 67.
- //
- // To be consistent with KJS we report the syntax error at runtime.
- if (!top_scope_->is_function_scope()) {
- Handle<String> type = isolate()->factory()->illegal_return_symbol();
- Expression* throw_error = NewThrowSyntaxError(type, Handle<Object>::null());
- return new(zone()) ExpressionStatement(throw_error);
- }
-
- Token::Value tok = peek();
- if (scanner().has_line_terminator_before_next() ||
- tok == Token::SEMICOLON ||
- tok == Token::RBRACE ||
- tok == Token::EOS) {
- ExpectSemicolon(CHECK_OK);
- return new(zone()) ReturnStatement(GetLiteralUndefined());
- }
-
- Expression* expr = ParseExpression(true, CHECK_OK);
- ExpectSemicolon(CHECK_OK);
- return new(zone()) ReturnStatement(expr);
-}
-
-
-Block* Parser::WithHelper(Expression* obj,
- ZoneStringList* labels,
- bool is_catch_block,
- bool* ok) {
- // Parse the statement and collect escaping labels.
- ZoneList<BreakTarget*>* target_list = new ZoneList<BreakTarget*>(0);
- TargetCollector collector(target_list);
- Statement* stat;
- { Target target(&this->target_stack_, &collector);
- with_nesting_level_++;
- top_scope_->RecordWithStatement();
- stat = ParseStatement(labels, CHECK_OK);
- with_nesting_level_--;
- }
- // Create resulting block with two statements.
- // 1: Evaluate the with expression.
- // 2: The try-finally block evaluating the body.
- Block* result = new(zone()) Block(NULL, 2, false);
-
- if (result != NULL) {
- result->AddStatement(new(zone()) WithEnterStatement(obj, is_catch_block));
-
- // Create body block.
- Block* body = new(zone()) Block(NULL, 1, false);
- body->AddStatement(stat);
-
- // Create exit block.
- Block* exit = new(zone()) Block(NULL, 1, false);
- exit->AddStatement(new(zone()) WithExitStatement());
-
- // Return a try-finally statement.
- TryFinallyStatement* wrapper = new(zone()) TryFinallyStatement(body, exit);
- wrapper->set_escaping_targets(collector.targets());
- result->AddStatement(wrapper);
- }
- return result;
-}
-
-
-Statement* Parser::ParseWithStatement(ZoneStringList* labels, bool* ok) {
- // WithStatement ::
- // 'with' '(' Expression ')' Statement
-
- Expect(Token::WITH, CHECK_OK);
-
- if (top_scope_->is_strict_mode()) {
- ReportMessage("strict_mode_with", Vector<const char*>::empty());
- *ok = false;
- return NULL;
- }
-
- Expect(Token::LPAREN, CHECK_OK);
- Expression* expr = ParseExpression(true, CHECK_OK);
- Expect(Token::RPAREN, CHECK_OK);
-
- return WithHelper(expr, labels, false, CHECK_OK);
-}
-
-
-CaseClause* Parser::ParseCaseClause(bool* default_seen_ptr, bool* ok) {
- // CaseClause ::
- // 'case' Expression ':' Statement*
- // 'default' ':' Statement*
-
- Expression* label = NULL; // NULL expression indicates default case
- if (peek() == Token::CASE) {
- Expect(Token::CASE, CHECK_OK);
- label = ParseExpression(true, CHECK_OK);
- } else {
- Expect(Token::DEFAULT, CHECK_OK);
- if (*default_seen_ptr) {
- ReportMessage("multiple_defaults_in_switch",
- Vector<const char*>::empty());
- *ok = false;
- return NULL;
- }
- *default_seen_ptr = true;
- }
- Expect(Token::COLON, CHECK_OK);
- int pos = scanner().location().beg_pos;
- ZoneList<Statement*>* statements = new ZoneList<Statement*>(5);
- while (peek() != Token::CASE &&
- peek() != Token::DEFAULT &&
- peek() != Token::RBRACE) {
- Statement* stat = ParseStatement(NULL, CHECK_OK);
- statements->Add(stat);
- }
-
- return new(zone()) CaseClause(label, statements, pos);
-}
-
-
-SwitchStatement* Parser::ParseSwitchStatement(ZoneStringList* labels,
- bool* ok) {
- // SwitchStatement ::
- // 'switch' '(' Expression ')' '{' CaseClause* '}'
-
- SwitchStatement* statement = new(zone()) SwitchStatement(labels);
- Target target(&this->target_stack_, statement);
-
- Expect(Token::SWITCH, CHECK_OK);
- Expect(Token::LPAREN, CHECK_OK);
- Expression* tag = ParseExpression(true, CHECK_OK);
- Expect(Token::RPAREN, CHECK_OK);
-
- bool default_seen = false;
- ZoneList<CaseClause*>* cases = new ZoneList<CaseClause*>(4);
- Expect(Token::LBRACE, CHECK_OK);
- while (peek() != Token::RBRACE) {
- CaseClause* clause = ParseCaseClause(&default_seen, CHECK_OK);
- cases->Add(clause);
- }
- Expect(Token::RBRACE, CHECK_OK);
-
- if (statement) statement->Initialize(tag, cases);
- return statement;
-}
-
-
-Statement* Parser::ParseThrowStatement(bool* ok) {
- // ThrowStatement ::
- // 'throw' Expression ';'
-
- Expect(Token::THROW, CHECK_OK);
- int pos = scanner().location().beg_pos;
- if (scanner().has_line_terminator_before_next()) {
- ReportMessage("newline_after_throw", Vector<const char*>::empty());
- *ok = false;
- return NULL;
- }
- Expression* exception = ParseExpression(true, CHECK_OK);
- ExpectSemicolon(CHECK_OK);
-
- return new(zone()) ExpressionStatement(new(zone()) Throw(exception, pos));
-}
-
-
-TryStatement* Parser::ParseTryStatement(bool* ok) {
- // TryStatement ::
- // 'try' Block Catch
- // 'try' Block Finally
- // 'try' Block Catch Finally
- //
- // Catch ::
- // 'catch' '(' Identifier ')' Block
- //
- // Finally ::
- // 'finally' Block
-
- Expect(Token::TRY, CHECK_OK);
-
- ZoneList<BreakTarget*>* target_list = new ZoneList<BreakTarget*>(0);
- TargetCollector collector(target_list);
- Block* try_block;
-
- { Target target(&this->target_stack_, &collector);
- try_block = ParseBlock(NULL, CHECK_OK);
- }
-
- Block* catch_block = NULL;
- Variable* catch_var = NULL;
- Block* finally_block = NULL;
-
- Token::Value tok = peek();
- if (tok != Token::CATCH && tok != Token::FINALLY) {
- ReportMessage("no_catch_or_finally", Vector<const char*>::empty());
- *ok = false;
- return NULL;
- }
-
- // If we can break out from the catch block and there is a finally block,
- // then we will need to collect jump targets from the catch block. Since
- // we don't know yet if there will be a finally block, we always collect
- // the jump targets.
- ZoneList<BreakTarget*>* catch_target_list = new ZoneList<BreakTarget*>(0);
- TargetCollector catch_collector(catch_target_list);
- bool has_catch = false;
- if (tok == Token::CATCH) {
- has_catch = true;
- Consume(Token::CATCH);
-
- Expect(Token::LPAREN, CHECK_OK);
- Handle<String> name = ParseIdentifier(CHECK_OK);
-
- if (top_scope_->is_strict_mode() && IsEvalOrArguments(name)) {
- ReportMessage("strict_catch_variable", Vector<const char*>::empty());
- *ok = false;
- return NULL;
- }
-
- Expect(Token::RPAREN, CHECK_OK);
-
- if (peek() == Token::LBRACE) {
- // Allocate a temporary for holding the finally state while
- // executing the finally block.
- catch_var =
- top_scope_->NewTemporary(isolate()->factory()->catch_var_symbol());
- Literal* name_literal = new(zone()) Literal(name);
- VariableProxy* catch_var_use = new(zone()) VariableProxy(catch_var);
- Expression* obj =
- new(zone()) CatchExtensionObject(name_literal, catch_var_use);
- { Target target(&this->target_stack_, &catch_collector);
- catch_block = WithHelper(obj, NULL, true, CHECK_OK);
- }
- } else {
- Expect(Token::LBRACE, CHECK_OK);
- }
-
- tok = peek();
- }
-
- if (tok == Token::FINALLY || !has_catch) {
- Consume(Token::FINALLY);
- // Declare a variable for holding the finally state while
- // executing the finally block.
- finally_block = ParseBlock(NULL, CHECK_OK);
- }
-
- // Simplify the AST nodes by converting:
- // 'try { } catch { } finally { }'
- // to:
- // 'try { try { } catch { } } finally { }'
-
- if (catch_block != NULL && finally_block != NULL) {
- VariableProxy* catch_var_defn = new(zone()) VariableProxy(catch_var);
- TryCatchStatement* statement =
- new(zone()) TryCatchStatement(try_block, catch_var_defn, catch_block);
- statement->set_escaping_targets(collector.targets());
- try_block = new(zone()) Block(NULL, 1, false);
- try_block->AddStatement(statement);
- catch_block = NULL;
- }
-
- TryStatement* result = NULL;
- if (catch_block != NULL) {
- ASSERT(finally_block == NULL);
- VariableProxy* catch_var_defn = new(zone()) VariableProxy(catch_var);
- result =
- new(zone()) TryCatchStatement(try_block, catch_var_defn, catch_block);
- result->set_escaping_targets(collector.targets());
- } else {
- ASSERT(finally_block != NULL);
- result = new(zone()) TryFinallyStatement(try_block, finally_block);
- // Add the jump targets of the try block and the catch block.
- for (int i = 0; i < collector.targets()->length(); i++) {
- catch_collector.AddTarget(collector.targets()->at(i));
- }
- result->set_escaping_targets(catch_collector.targets());
- }
-
- return result;
-}
-
-
-DoWhileStatement* Parser::ParseDoWhileStatement(ZoneStringList* labels,
- bool* ok) {
- // DoStatement ::
- // 'do' Statement 'while' '(' Expression ')' ';'
-
- lexical_scope_->AddLoop();
- DoWhileStatement* loop = new(zone()) DoWhileStatement(labels);
- Target target(&this->target_stack_, loop);
-
- Expect(Token::DO, CHECK_OK);
- Statement* body = ParseStatement(NULL, CHECK_OK);
- Expect(Token::WHILE, CHECK_OK);
- Expect(Token::LPAREN, CHECK_OK);
-
- if (loop != NULL) {
- int position = scanner().location().beg_pos;
- loop->set_condition_position(position);
- }
-
- Expression* cond = ParseExpression(true, CHECK_OK);
- if (cond != NULL) cond->set_is_loop_condition(true);
- Expect(Token::RPAREN, CHECK_OK);
-
- // Allow do-statements to be terminated with and without
- // semi-colons. This allows code such as 'do;while(0)return' to
- // parse, which would not be the case if we had used the
- // ExpectSemicolon() functionality here.
- if (peek() == Token::SEMICOLON) Consume(Token::SEMICOLON);
-
- if (loop != NULL) loop->Initialize(cond, body);
- return loop;
-}
-
-
-WhileStatement* Parser::ParseWhileStatement(ZoneStringList* labels, bool* ok) {
- // WhileStatement ::
- // 'while' '(' Expression ')' Statement
-
- lexical_scope_->AddLoop();
- WhileStatement* loop = new(zone()) WhileStatement(labels);
- Target target(&this->target_stack_, loop);
-
- Expect(Token::WHILE, CHECK_OK);
- Expect(Token::LPAREN, CHECK_OK);
- Expression* cond = ParseExpression(true, CHECK_OK);
- if (cond != NULL) cond->set_is_loop_condition(true);
- Expect(Token::RPAREN, CHECK_OK);
- Statement* body = ParseStatement(NULL, CHECK_OK);
-
- if (loop != NULL) loop->Initialize(cond, body);
- return loop;
-}
-
-
-Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
- // ForStatement ::
- // 'for' '(' Expression? ';' Expression? ';' Expression? ')' Statement
-
- lexical_scope_->AddLoop();
- Statement* init = NULL;
-
- Expect(Token::FOR, CHECK_OK);
- Expect(Token::LPAREN, CHECK_OK);
- if (peek() != Token::SEMICOLON) {
- if (peek() == Token::VAR || peek() == Token::CONST) {
- Expression* each = NULL;
- Block* variable_statement =
- ParseVariableDeclarations(false, &each, CHECK_OK);
- if (peek() == Token::IN && each != NULL) {
- ForInStatement* loop = new(zone()) ForInStatement(labels);
- Target target(&this->target_stack_, loop);
-
- Expect(Token::IN, CHECK_OK);
- Expression* enumerable = ParseExpression(true, CHECK_OK);
- Expect(Token::RPAREN, CHECK_OK);
-
- Statement* body = ParseStatement(NULL, CHECK_OK);
- loop->Initialize(each, enumerable, body);
- Block* result = new(zone()) Block(NULL, 2, false);
- result->AddStatement(variable_statement);
- result->AddStatement(loop);
- // Parsed for-in loop w/ variable/const declaration.
- return result;
- } else {
- init = variable_statement;
- }
-
- } else {
- Expression* expression = ParseExpression(false, CHECK_OK);
- if (peek() == Token::IN) {
- // Signal a reference error if the expression is an invalid
- // left-hand side expression. We could report this as a syntax
- // error here but for compatibility with JSC we choose to report
- // the error at runtime.
- if (expression == NULL || !expression->IsValidLeftHandSide()) {
- Handle<String> type =
- isolate()->factory()->invalid_lhs_in_for_in_symbol();
- expression = NewThrowReferenceError(type);
- }
- ForInStatement* loop = new(zone()) ForInStatement(labels);
- Target target(&this->target_stack_, loop);
-
- Expect(Token::IN, CHECK_OK);
- Expression* enumerable = ParseExpression(true, CHECK_OK);
- Expect(Token::RPAREN, CHECK_OK);
-
- Statement* body = ParseStatement(NULL, CHECK_OK);
- if (loop) loop->Initialize(expression, enumerable, body);
- // Parsed for-in loop.
- return loop;
-
- } else {
- init = new(zone()) ExpressionStatement(expression);
- }
- }
- }
-
- // Standard 'for' loop
- ForStatement* loop = new(zone()) ForStatement(labels);
- Target target(&this->target_stack_, loop);
-
- // Parsed initializer at this point.
- Expect(Token::SEMICOLON, CHECK_OK);
-
- Expression* cond = NULL;
- if (peek() != Token::SEMICOLON) {
- cond = ParseExpression(true, CHECK_OK);
- if (cond != NULL) cond->set_is_loop_condition(true);
- }
- Expect(Token::SEMICOLON, CHECK_OK);
-
- Statement* next = NULL;
- if (peek() != Token::RPAREN) {
- Expression* exp = ParseExpression(true, CHECK_OK);
- next = new(zone()) ExpressionStatement(exp);
- }
- Expect(Token::RPAREN, CHECK_OK);
-
- Statement* body = ParseStatement(NULL, CHECK_OK);
- if (loop) loop->Initialize(init, cond, next, body);
- return loop;
-}
-
-
-// Precedence = 1
-Expression* Parser::ParseExpression(bool accept_IN, bool* ok) {
- // Expression ::
- // AssignmentExpression
- // Expression ',' AssignmentExpression
-
- Expression* result = ParseAssignmentExpression(accept_IN, CHECK_OK);
- while (peek() == Token::COMMA) {
- Expect(Token::COMMA, CHECK_OK);
- int position = scanner().location().beg_pos;
- Expression* right = ParseAssignmentExpression(accept_IN, CHECK_OK);
- result = new(zone()) BinaryOperation(Token::COMMA, result, right, position);
- }
- return result;
-}
-
-
-// Precedence = 2
-Expression* Parser::ParseAssignmentExpression(bool accept_IN, bool* ok) {
- // AssignmentExpression ::
- // ConditionalExpression
- // LeftHandSideExpression AssignmentOperator AssignmentExpression
-
- if (fni_ != NULL) fni_->Enter();
- Expression* expression = ParseConditionalExpression(accept_IN, CHECK_OK);
-
- if (!Token::IsAssignmentOp(peek())) {
- if (fni_ != NULL) fni_->Leave();
- // Parsed conditional expression only (no assignment).
- return expression;
- }
-
- // Signal a reference error if the expression is an invalid left-hand
- // side expression. We could report this as a syntax error here but
- // for compatibility with JSC we choose to report the error at
- // runtime.
- if (expression == NULL || !expression->IsValidLeftHandSide()) {
- Handle<String> type =
- isolate()->factory()->invalid_lhs_in_assignment_symbol();
- expression = NewThrowReferenceError(type);
- }
-
- if (top_scope_->is_strict_mode()) {
- // Assignment to eval or arguments is disallowed in strict mode.
- CheckStrictModeLValue(expression, "strict_lhs_assignment", CHECK_OK);
- }
-
- Token::Value op = Next(); // Get assignment operator.
- int pos = scanner().location().beg_pos;
- Expression* right = ParseAssignmentExpression(accept_IN, CHECK_OK);
-
- // TODO(1231235): We try to estimate the set of properties set by
- // constructors. We define a new property whenever there is an
- // assignment to a property of 'this'. We should probably only add
- // properties if we haven't seen them before. Otherwise we'll
- // probably overestimate the number of properties.
- Property* property = expression ? expression->AsProperty() : NULL;
- if (op == Token::ASSIGN &&
- property != NULL &&
- property->obj()->AsVariableProxy() != NULL &&
- property->obj()->AsVariableProxy()->is_this()) {
- lexical_scope_->AddProperty();
- }
-
- // If we assign a function literal to a property we pretenure the
- // literal so it can be added as a constant function property.
- if (property != NULL && right->AsFunctionLiteral() != NULL) {
- right->AsFunctionLiteral()->set_pretenure(true);
- }
-
- if (fni_ != NULL) {
- // Check if the right hand side is a call to avoid inferring a
- // name if we're dealing with "a = function(){...}();"-like
- // expression.
- if ((op == Token::INIT_VAR
- || op == Token::INIT_CONST
- || op == Token::ASSIGN)
- && (right->AsCall() == NULL)) {
- fni_->Infer();
- }
- fni_->Leave();
- }
-
- return new(zone()) Assignment(op, expression, right, pos);
-}
-
-
-// Precedence = 3
-Expression* Parser::ParseConditionalExpression(bool accept_IN, bool* ok) {
- // ConditionalExpression ::
- // LogicalOrExpression
- // LogicalOrExpression '?' AssignmentExpression ':' AssignmentExpression
-
- // We start using the binary expression parser for prec >= 4 only!
- Expression* expression = ParseBinaryExpression(4, accept_IN, CHECK_OK);
- if (peek() != Token::CONDITIONAL) return expression;
- Consume(Token::CONDITIONAL);
- // In parsing the first assignment expression in conditional
- // expressions we always accept the 'in' keyword; see ECMA-262,
- // section 11.12, page 58.
- int left_position = scanner().peek_location().beg_pos;
- Expression* left = ParseAssignmentExpression(true, CHECK_OK);
- Expect(Token::COLON, CHECK_OK);
- int right_position = scanner().peek_location().beg_pos;
- Expression* right = ParseAssignmentExpression(accept_IN, CHECK_OK);
- return new(zone()) Conditional(expression, left, right,
- left_position, right_position);
-}
-
-
-static int Precedence(Token::Value tok, bool accept_IN) {
- if (tok == Token::IN && !accept_IN)
- return 0; // 0 precedence will terminate binary expression parsing
-
- return Token::Precedence(tok);
-}
-
-
-// Precedence >= 4
-Expression* Parser::ParseBinaryExpression(int prec, bool accept_IN, bool* ok) {
- ASSERT(prec >= 4);
- Expression* x = ParseUnaryExpression(CHECK_OK);
- for (int prec1 = Precedence(peek(), accept_IN); prec1 >= prec; prec1--) {
- // prec1 >= 4
- while (Precedence(peek(), accept_IN) == prec1) {
- Token::Value op = Next();
- int position = scanner().location().beg_pos;
- Expression* y = ParseBinaryExpression(prec1 + 1, accept_IN, CHECK_OK);
-
- // Compute some expressions involving only number literals.
- if (x && x->AsLiteral() && x->AsLiteral()->handle()->IsNumber() &&
- y && y->AsLiteral() && y->AsLiteral()->handle()->IsNumber()) {
- double x_val = x->AsLiteral()->handle()->Number();
- double y_val = y->AsLiteral()->handle()->Number();
-
- switch (op) {
- case Token::ADD:
- x = NewNumberLiteral(x_val + y_val);
- continue;
- case Token::SUB:
- x = NewNumberLiteral(x_val - y_val);
- continue;
- case Token::MUL:
- x = NewNumberLiteral(x_val * y_val);
- continue;
- case Token::DIV:
- x = NewNumberLiteral(x_val / y_val);
- continue;
- case Token::BIT_OR:
- x = NewNumberLiteral(DoubleToInt32(x_val) | DoubleToInt32(y_val));
- continue;
- case Token::BIT_AND:
- x = NewNumberLiteral(DoubleToInt32(x_val) & DoubleToInt32(y_val));
- continue;
- case Token::BIT_XOR:
- x = NewNumberLiteral(DoubleToInt32(x_val) ^ DoubleToInt32(y_val));
- continue;
- case Token::SHL: {
- int value = DoubleToInt32(x_val) << (DoubleToInt32(y_val) & 0x1f);
- x = NewNumberLiteral(value);
- continue;
- }
- case Token::SHR: {
- uint32_t shift = DoubleToInt32(y_val) & 0x1f;
- uint32_t value = DoubleToUint32(x_val) >> shift;
- x = NewNumberLiteral(value);
- continue;
- }
- case Token::SAR: {
- uint32_t shift = DoubleToInt32(y_val) & 0x1f;
- int value = ArithmeticShiftRight(DoubleToInt32(x_val), shift);
- x = NewNumberLiteral(value);
- continue;
- }
- default:
- break;
- }
- }
-
- // For now we distinguish between comparisons and other binary
- // operations. (We could combine the two and get rid of this
- // code and AST node eventually.)
- if (Token::IsCompareOp(op)) {
- // We have a comparison.
- Token::Value cmp = op;
- switch (op) {
- case Token::NE: cmp = Token::EQ; break;
- case Token::NE_STRICT: cmp = Token::EQ_STRICT; break;
- default: break;
- }
- x = NewCompareNode(cmp, x, y, position);
- if (cmp != op) {
- // The comparison was negated - add a NOT.
- x = new(zone()) UnaryOperation(Token::NOT, x);
- }
-
- } else {
- // We have a "normal" binary operation.
- x = new(zone()) BinaryOperation(op, x, y, position);
- }
- }
- }
- return x;
-}
-
-
-Expression* Parser::NewCompareNode(Token::Value op,
- Expression* x,
- Expression* y,
- int position) {
- ASSERT(op != Token::NE && op != Token::NE_STRICT);
- if (op == Token::EQ || op == Token::EQ_STRICT) {
- bool is_strict = (op == Token::EQ_STRICT);
- Literal* x_literal = x->AsLiteral();
- if (x_literal != NULL && x_literal->IsNull()) {
- return new(zone()) CompareToNull(is_strict, y);
- }
-
- Literal* y_literal = y->AsLiteral();
- if (y_literal != NULL && y_literal->IsNull()) {
- return new(zone()) CompareToNull(is_strict, x);
- }
- }
- return new(zone()) CompareOperation(op, x, y, position);
-}
-
-
-Expression* Parser::ParseUnaryExpression(bool* ok) {
- // UnaryExpression ::
- // PostfixExpression
- // 'delete' UnaryExpression
- // 'void' UnaryExpression
- // 'typeof' UnaryExpression
- // '++' UnaryExpression
- // '--' UnaryExpression
- // '+' UnaryExpression
- // '-' UnaryExpression
- // '~' UnaryExpression
- // '!' UnaryExpression
-
- Token::Value op = peek();
- if (Token::IsUnaryOp(op)) {
- op = Next();
- Expression* expression = ParseUnaryExpression(CHECK_OK);
-
- // Compute some expressions involving only number literals.
- if (expression != NULL && expression->AsLiteral() &&
- expression->AsLiteral()->handle()->IsNumber()) {
- double value = expression->AsLiteral()->handle()->Number();
- switch (op) {
- case Token::ADD:
- return expression;
- case Token::SUB:
- return NewNumberLiteral(-value);
- case Token::BIT_NOT:
- return NewNumberLiteral(~DoubleToInt32(value));
- default: break;
- }
- }
-
- // "delete identifier" is a syntax error in strict mode.
- if (op == Token::DELETE && top_scope_->is_strict_mode()) {
- VariableProxy* operand = expression->AsVariableProxy();
- if (operand != NULL && !operand->is_this()) {
- ReportMessage("strict_delete", Vector<const char*>::empty());
- *ok = false;
- return NULL;
- }
- }
-
- return new(zone()) UnaryOperation(op, expression);
-
- } else if (Token::IsCountOp(op)) {
- op = Next();
- Expression* expression = ParseUnaryExpression(CHECK_OK);
- // Signal a reference error if the expression is an invalid
- // left-hand side expression. We could report this as a syntax
- // error here but for compatibility with JSC we choose to report the
- // error at runtime.
- if (expression == NULL || !expression->IsValidLeftHandSide()) {
- Handle<String> type =
- isolate()->factory()->invalid_lhs_in_prefix_op_symbol();
- expression = NewThrowReferenceError(type);
- }
-
- if (top_scope_->is_strict_mode()) {
- // Prefix expression operand in strict mode may not be eval or arguments.
- CheckStrictModeLValue(expression, "strict_lhs_prefix", CHECK_OK);
- }
-
- int position = scanner().location().beg_pos;
- IncrementOperation* increment =
- new(zone()) IncrementOperation(op, expression);
- return new(zone()) CountOperation(true /* prefix */, increment, position);
-
- } else {
- return ParsePostfixExpression(ok);
- }
-}
-
-
-Expression* Parser::ParsePostfixExpression(bool* ok) {
- // PostfixExpression ::
- // LeftHandSideExpression ('++' | '--')?
-
- Expression* expression = ParseLeftHandSideExpression(CHECK_OK);
- if (!scanner().has_line_terminator_before_next() &&
- Token::IsCountOp(peek())) {
- // Signal a reference error if the expression is an invalid
- // left-hand side expression. We could report this as a syntax
- // error here but for compatibility with JSC we choose to report the
- // error at runtime.
- if (expression == NULL || !expression->IsValidLeftHandSide()) {
- Handle<String> type =
- isolate()->factory()->invalid_lhs_in_postfix_op_symbol();
- expression = NewThrowReferenceError(type);
- }
-
- if (top_scope_->is_strict_mode()) {
- // Postfix expression operand in strict mode may not be eval or arguments.
- CheckStrictModeLValue(expression, "strict_lhs_prefix", CHECK_OK);
- }
-
- Token::Value next = Next();
- int position = scanner().location().beg_pos;
- IncrementOperation* increment =
- new(zone()) IncrementOperation(next, expression);
- expression =
- new(zone()) CountOperation(false /* postfix */, increment, position);
- }
- return expression;
-}
-
-
-Expression* Parser::ParseLeftHandSideExpression(bool* ok) {
- // LeftHandSideExpression ::
- // (NewExpression | MemberExpression) ...
-
- Expression* result;
- if (peek() == Token::NEW) {
- result = ParseNewExpression(CHECK_OK);
- } else {
- result = ParseMemberExpression(CHECK_OK);
- }
-
- while (true) {
- switch (peek()) {
- case Token::LBRACK: {
- Consume(Token::LBRACK);
- int pos = scanner().location().beg_pos;
- Expression* index = ParseExpression(true, CHECK_OK);
- result = new(zone()) Property(result, index, pos);
- Expect(Token::RBRACK, CHECK_OK);
- break;
- }
-
- case Token::LPAREN: {
- int pos = scanner().location().beg_pos;
- ZoneList<Expression*>* args = ParseArguments(CHECK_OK);
-
- // Keep track of eval() calls since they disable all local variable
- // optimizations.
- // The calls that need special treatment are the
- // direct (i.e. not aliased) eval calls. These calls are all of the
- // form eval(...) with no explicit receiver object where eval is not
- // declared in the current scope chain.
- // These calls are marked as potentially direct eval calls. Whether
- // they are actually direct calls to eval is determined at run time.
- // TODO(994): In ES5, it doesn't matter if the "eval" var is declared
- // in the local scope chain. It only matters that it's called "eval",
- // is called without a receiver and it refers to the original eval
- // function.
- VariableProxy* callee = result->AsVariableProxy();
- if (callee != NULL &&
- callee->IsVariable(isolate()->factory()->eval_symbol())) {
- Handle<String> name = callee->name();
- Variable* var = top_scope_->Lookup(name);
- if (var == NULL) {
- top_scope_->RecordEvalCall();
- }
- }
- result = NewCall(result, args, pos);
- break;
- }
-
- case Token::PERIOD: {
- Consume(Token::PERIOD);
- int pos = scanner().location().beg_pos;
- Handle<String> name = ParseIdentifierName(CHECK_OK);
- result = new(zone()) Property(result, new(zone()) Literal(name), pos);
- if (fni_ != NULL) fni_->PushLiteralName(name);
- break;
- }
-
- default:
- return result;
- }
- }
-}
-
-
-Expression* Parser::ParseNewPrefix(PositionStack* stack, bool* ok) {
- // NewExpression ::
- // ('new')+ MemberExpression
-
- // The grammar for new expressions is pretty warped. The keyword
- // 'new' can either be a part of the new expression (where it isn't
- // followed by an argument list) or a part of the member expression,
- // where it must be followed by an argument list. To accommodate
- // this, we parse the 'new' keywords greedily and keep track of how
- // many we have parsed. This information is then passed on to the
- // member expression parser, which is only allowed to match argument
- // lists as long as it has 'new' prefixes left
- Expect(Token::NEW, CHECK_OK);
- PositionStack::Element pos(stack, scanner().location().beg_pos);
-
- Expression* result;
- if (peek() == Token::NEW) {
- result = ParseNewPrefix(stack, CHECK_OK);
- } else {
- result = ParseMemberWithNewPrefixesExpression(stack, CHECK_OK);
- }
-
- if (!stack->is_empty()) {
- int last = stack->pop();
- result = new(zone()) CallNew(result, new ZoneList<Expression*>(0), last);
- }
- return result;
-}
-
-
-Expression* Parser::ParseNewExpression(bool* ok) {
- PositionStack stack(ok);
- return ParseNewPrefix(&stack, ok);
-}
-
-
-Expression* Parser::ParseMemberExpression(bool* ok) {
- return ParseMemberWithNewPrefixesExpression(NULL, ok);
-}
-
-
-Expression* Parser::ParseMemberWithNewPrefixesExpression(PositionStack* stack,
- bool* ok) {
- // MemberExpression ::
- // (PrimaryExpression | FunctionLiteral)
- // ('[' Expression ']' | '.' Identifier | Arguments)*
-
- // Parse the initial primary or function expression.
- Expression* result = NULL;
- if (peek() == Token::FUNCTION) {
- Expect(Token::FUNCTION, CHECK_OK);
- int function_token_position = scanner().location().beg_pos;
- Handle<String> name;
- bool is_reserved_name = false;
- if (peek_any_identifier()) {
- name = ParseIdentifierOrReservedWord(&is_reserved_name, CHECK_OK);
- }
- result = ParseFunctionLiteral(name, is_reserved_name,
- function_token_position, NESTED, CHECK_OK);
- } else {
- result = ParsePrimaryExpression(CHECK_OK);
- }
-
- while (true) {
- switch (peek()) {
- case Token::LBRACK: {
- Consume(Token::LBRACK);
- int pos = scanner().location().beg_pos;
- Expression* index = ParseExpression(true, CHECK_OK);
- result = new(zone()) Property(result, index, pos);
- Expect(Token::RBRACK, CHECK_OK);
- break;
- }
- case Token::PERIOD: {
- Consume(Token::PERIOD);
- int pos = scanner().location().beg_pos;
- Handle<String> name = ParseIdentifierName(CHECK_OK);
- result = new(zone()) Property(result, new(zone()) Literal(name), pos);
- if (fni_ != NULL) fni_->PushLiteralName(name);
- break;
- }
- case Token::LPAREN: {
- if ((stack == NULL) || stack->is_empty()) return result;
- // Consume one of the new prefixes (already parsed).
- ZoneList<Expression*>* args = ParseArguments(CHECK_OK);
- int last = stack->pop();
- result = new CallNew(result, args, last);
- break;
- }
- default:
- return result;
- }
- }
-}
-
-
-DebuggerStatement* Parser::ParseDebuggerStatement(bool* ok) {
- // In ECMA-262 'debugger' is defined as a reserved keyword. In some browser
- // contexts this is used as a statement which invokes the debugger as i a
- // break point is present.
- // DebuggerStatement ::
- // 'debugger' ';'
-
- Expect(Token::DEBUGGER, CHECK_OK);
- ExpectSemicolon(CHECK_OK);
- return new(zone()) DebuggerStatement();
-}
-
-
-void Parser::ReportUnexpectedToken(Token::Value token) {
- // We don't report stack overflows here, to avoid increasing the
- // stack depth even further. Instead we report it after parsing is
- // over, in ParseProgram/ParseJson.
- if (token == Token::ILLEGAL && stack_overflow_) return;
- // Four of the tokens are treated specially
- switch (token) {
- case Token::EOS:
- return ReportMessage("unexpected_eos", Vector<const char*>::empty());
- case Token::NUMBER:
- return ReportMessage("unexpected_token_number",
- Vector<const char*>::empty());
- case Token::STRING:
- return ReportMessage("unexpected_token_string",
- Vector<const char*>::empty());
- case Token::IDENTIFIER:
- return ReportMessage("unexpected_token_identifier",
- Vector<const char*>::empty());
- case Token::FUTURE_RESERVED_WORD:
- return ReportMessage(top_scope_->is_strict_mode() ?
- "unexpected_strict_reserved" :
- "unexpected_token_identifier",
- Vector<const char*>::empty());
- default:
- const char* name = Token::String(token);
- ASSERT(name != NULL);
- ReportMessage("unexpected_token", Vector<const char*>(&name, 1));
- }
-}
-
-
-void Parser::ReportInvalidPreparseData(Handle<String> name, bool* ok) {
- SmartPointer<char> name_string = name->ToCString(DISALLOW_NULLS);
- const char* element[1] = { *name_string };
- ReportMessage("invalid_preparser_data",
- Vector<const char*>(element, 1));
- *ok = false;
-}
-
-
-Expression* Parser::ParsePrimaryExpression(bool* ok) {
- // PrimaryExpression ::
- // 'this'
- // 'null'
- // 'true'
- // 'false'
- // Identifier
- // Number
- // String
- // ArrayLiteral
- // ObjectLiteral
- // RegExpLiteral
- // '(' Expression ')'
-
- Expression* result = NULL;
- switch (peek()) {
- case Token::THIS: {
- Consume(Token::THIS);
- VariableProxy* recv = top_scope_->receiver();
- result = recv;
- break;
- }
-
- case Token::NULL_LITERAL:
- Consume(Token::NULL_LITERAL);
- result = new(zone()) Literal(isolate()->factory()->null_value());
- break;
-
- case Token::TRUE_LITERAL:
- Consume(Token::TRUE_LITERAL);
- result = new(zone()) Literal(isolate()->factory()->true_value());
- break;
-
- case Token::FALSE_LITERAL:
- Consume(Token::FALSE_LITERAL);
- result = new(zone()) Literal(isolate()->factory()->false_value());
- break;
-
- case Token::IDENTIFIER:
- case Token::FUTURE_RESERVED_WORD: {
- Handle<String> name = ParseIdentifier(CHECK_OK);
- if (fni_ != NULL) fni_->PushVariableName(name);
- result = top_scope_->NewUnresolved(name,
- inside_with(),
- scanner().location().beg_pos);
- break;
- }
-
- case Token::NUMBER: {
- Consume(Token::NUMBER);
- ASSERT(scanner().is_literal_ascii());
- double value = StringToDouble(scanner().literal_ascii_string(),
- ALLOW_HEX | ALLOW_OCTALS);
- result = NewNumberLiteral(value);
- break;
- }
-
- case Token::STRING: {
- Consume(Token::STRING);
- Handle<String> symbol = GetSymbol(CHECK_OK);
- result = new(zone()) Literal(symbol);
- if (fni_ != NULL) fni_->PushLiteralName(symbol);
- break;
- }
-
- case Token::ASSIGN_DIV:
- result = ParseRegExpLiteral(true, CHECK_OK);
- break;
-
- case Token::DIV:
- result = ParseRegExpLiteral(false, CHECK_OK);
- break;
-
- case Token::LBRACK:
- result = ParseArrayLiteral(CHECK_OK);
- break;
-
- case Token::LBRACE:
- result = ParseObjectLiteral(CHECK_OK);
- break;
-
- case Token::LPAREN:
- Consume(Token::LPAREN);
- // Heuristically try to detect immediately called functions before
- // seeing the call parentheses.
- parenthesized_function_ = (peek() == Token::FUNCTION);
- result = ParseExpression(true, CHECK_OK);
- Expect(Token::RPAREN, CHECK_OK);
- break;
-
- case Token::MOD:
- if (allow_natives_syntax_ || extension_ != NULL) {
- result = ParseV8Intrinsic(CHECK_OK);
- break;
- }
- // If we're not allowing special syntax we fall-through to the
- // default case.
-
- default: {
- Token::Value tok = Next();
- ReportUnexpectedToken(tok);
- *ok = false;
- return NULL;
- }
- }
-
- return result;
-}
-
-
-void Parser::BuildArrayLiteralBoilerplateLiterals(ZoneList<Expression*>* values,
- Handle<FixedArray> literals,
- bool* is_simple,
- int* depth) {
- // Fill in the literals.
- // Accumulate output values in local variables.
- bool is_simple_acc = true;
- int depth_acc = 1;
- for (int i = 0; i < values->length(); i++) {
- MaterializedLiteral* m_literal = values->at(i)->AsMaterializedLiteral();
- if (m_literal != NULL && m_literal->depth() >= depth_acc) {
- depth_acc = m_literal->depth() + 1;
- }
- Handle<Object> boilerplate_value = GetBoilerplateValue(values->at(i));
- if (boilerplate_value->IsUndefined()) {
- literals->set_the_hole(i);
- is_simple_acc = false;
- } else {
- literals->set(i, *boilerplate_value);
- }
- }
-
- *is_simple = is_simple_acc;
- *depth = depth_acc;
-}
-
-
-Expression* Parser::ParseArrayLiteral(bool* ok) {
- // ArrayLiteral ::
- // '[' Expression? (',' Expression?)* ']'
-
- ZoneList<Expression*>* values = new ZoneList<Expression*>(4);
- Expect(Token::LBRACK, CHECK_OK);
- while (peek() != Token::RBRACK) {
- Expression* elem;
- if (peek() == Token::COMMA) {
- elem = GetLiteralTheHole();
- } else {
- elem = ParseAssignmentExpression(true, CHECK_OK);
- }
- values->Add(elem);
- if (peek() != Token::RBRACK) {
- Expect(Token::COMMA, CHECK_OK);
- }
- }
- Expect(Token::RBRACK, CHECK_OK);
-
- // Update the scope information before the pre-parsing bailout.
- int literal_index = lexical_scope_->NextMaterializedLiteralIndex();
-
- // Allocate a fixed array with all the literals.
- Handle<FixedArray> literals =
- isolate()->factory()->NewFixedArray(values->length(), TENURED);
-
- // Fill in the literals.
- bool is_simple = true;
- int depth = 1;
- for (int i = 0, n = values->length(); i < n; i++) {
- MaterializedLiteral* m_literal = values->at(i)->AsMaterializedLiteral();
- if (m_literal != NULL && m_literal->depth() + 1 > depth) {
- depth = m_literal->depth() + 1;
- }
- Handle<Object> boilerplate_value = GetBoilerplateValue(values->at(i));
- if (boilerplate_value->IsUndefined()) {
- literals->set_the_hole(i);
- is_simple = false;
- } else {
- literals->set(i, *boilerplate_value);
- }
- }
-
- // Simple and shallow arrays can be lazily copied, we transform the
- // elements array to a copy-on-write array.
- if (is_simple && depth == 1 && values->length() > 0) {
- literals->set_map(isolate()->heap()->fixed_cow_array_map());
- }
-
- return new(zone()) ArrayLiteral(literals, values,
- literal_index, is_simple, depth);
-}
-
-
-bool Parser::IsBoilerplateProperty(ObjectLiteral::Property* property) {
- return property != NULL &&
- property->kind() != ObjectLiteral::Property::PROTOTYPE;
-}
-
-
-bool CompileTimeValue::IsCompileTimeValue(Expression* expression) {
- if (expression->AsLiteral() != NULL) return true;
- MaterializedLiteral* lit = expression->AsMaterializedLiteral();
- return lit != NULL && lit->is_simple();
-}
-
-
-bool CompileTimeValue::ArrayLiteralElementNeedsInitialization(
- Expression* value) {
- // If value is a literal the property value is already set in the
- // boilerplate object.
- if (value->AsLiteral() != NULL) return false;
- // If value is a materialized literal the property value is already set
- // in the boilerplate object if it is simple.
- if (CompileTimeValue::IsCompileTimeValue(value)) return false;
- return true;
-}
-
-
-Handle<FixedArray> CompileTimeValue::GetValue(Expression* expression) {
- ASSERT(IsCompileTimeValue(expression));
- Handle<FixedArray> result = FACTORY->NewFixedArray(2, TENURED);
- ObjectLiteral* object_literal = expression->AsObjectLiteral();
- if (object_literal != NULL) {
- ASSERT(object_literal->is_simple());
- if (object_literal->fast_elements()) {
- result->set(kTypeSlot, Smi::FromInt(OBJECT_LITERAL_FAST_ELEMENTS));
- } else {
- result->set(kTypeSlot, Smi::FromInt(OBJECT_LITERAL_SLOW_ELEMENTS));
- }
- result->set(kElementsSlot, *object_literal->constant_properties());
- } else {
- ArrayLiteral* array_literal = expression->AsArrayLiteral();
- ASSERT(array_literal != NULL && array_literal->is_simple());
- result->set(kTypeSlot, Smi::FromInt(ARRAY_LITERAL));
- result->set(kElementsSlot, *array_literal->constant_elements());
- }
- return result;
-}
-
-
-CompileTimeValue::Type CompileTimeValue::GetType(Handle<FixedArray> value) {
- Smi* type_value = Smi::cast(value->get(kTypeSlot));
- return static_cast<Type>(type_value->value());
-}
-
-
-Handle<FixedArray> CompileTimeValue::GetElements(Handle<FixedArray> value) {
- return Handle<FixedArray>(FixedArray::cast(value->get(kElementsSlot)));
-}
-
-
-Handle<Object> Parser::GetBoilerplateValue(Expression* expression) {
- if (expression->AsLiteral() != NULL) {
- return expression->AsLiteral()->handle();
- }
- if (CompileTimeValue::IsCompileTimeValue(expression)) {
- return CompileTimeValue::GetValue(expression);
- }
- return isolate()->factory()->undefined_value();
-}
-
-// Defined in ast.cc
-bool IsEqualString(void* first, void* second);
-bool IsEqualNumber(void* first, void* second);
-
-
-// Validation per 11.1.5 Object Initialiser
-class ObjectLiteralPropertyChecker {
- public:
- ObjectLiteralPropertyChecker(Parser* parser, bool strict) :
- props(&IsEqualString),
- elems(&IsEqualNumber),
- parser_(parser),
- strict_(strict) {
- }
-
- void CheckProperty(
- ObjectLiteral::Property* property,
- Scanner::Location loc,
- bool* ok);
-
- private:
- enum PropertyKind {
- kGetAccessor = 0x01,
- kSetAccessor = 0x02,
- kAccessor = kGetAccessor | kSetAccessor,
- kData = 0x04
- };
-
- static intptr_t GetPropertyKind(ObjectLiteral::Property* property) {
- switch (property->kind()) {
- case ObjectLiteral::Property::GETTER:
- return kGetAccessor;
- case ObjectLiteral::Property::SETTER:
- return kSetAccessor;
- default:
- return kData;
- }
- }
-
- HashMap props;
- HashMap elems;
- Parser* parser_;
- bool strict_;
-};
-
-
-void ObjectLiteralPropertyChecker::CheckProperty(
- ObjectLiteral::Property* property,
- Scanner::Location loc,
- bool* ok) {
-
- ASSERT(property != NULL);
-
- Literal *lit = property->key();
- Handle<Object> handle = lit->handle();
-
- uint32_t hash;
- HashMap* map;
- void* key;
-
- if (handle->IsSymbol()) {
- Handle<String> name(String::cast(*handle));
- if (name->AsArrayIndex(&hash)) {
- Handle<Object> key_handle = FACTORY->NewNumberFromUint(hash);
- key = key_handle.location();
- map = &elems;
- } else {
- key = handle.location();
- hash = name->Hash();
- map = &props;
- }
- } else if (handle->ToArrayIndex(&hash)) {
- key = handle.location();
- map = &elems;
- } else {
- ASSERT(handle->IsNumber());
- double num = handle->Number();
- char arr[100];
- Vector<char> buffer(arr, ARRAY_SIZE(arr));
- const char* str = DoubleToCString(num, buffer);
- Handle<String> name = FACTORY->NewStringFromAscii(CStrVector(str));
- key = name.location();
- hash = name->Hash();
- map = &props;
- }
-
- // Lookup property previously defined, if any.
- HashMap::Entry* entry = map->Lookup(key, hash, true);
- intptr_t prev = reinterpret_cast<intptr_t> (entry->value);
- intptr_t curr = GetPropertyKind(property);
-
- // Duplicate data properties are illegal in strict mode.
- if (strict_ && (curr & prev & kData) != 0) {
- parser_->ReportMessageAt(loc, "strict_duplicate_property",
- Vector<const char*>::empty());
- *ok = false;
- return;
- }
- // Data property conflicting with an accessor.
- if (((curr & kData) && (prev & kAccessor)) ||
- ((prev & kData) && (curr & kAccessor))) {
- parser_->ReportMessageAt(loc, "accessor_data_property",
- Vector<const char*>::empty());
- *ok = false;
- return;
- }
- // Two accessors of the same type conflicting
- if ((curr & prev & kAccessor) != 0) {
- parser_->ReportMessageAt(loc, "accessor_get_set",
- Vector<const char*>::empty());
- *ok = false;
- return;
- }
-
- // Update map
- entry->value = reinterpret_cast<void*> (prev | curr);
- *ok = true;
-}
-
-
-void Parser::BuildObjectLiteralConstantProperties(
- ZoneList<ObjectLiteral::Property*>* properties,
- Handle<FixedArray> constant_properties,
- bool* is_simple,
- bool* fast_elements,
- int* depth) {
- int position = 0;
- // Accumulate the value in local variables and store it at the end.
- bool is_simple_acc = true;
- int depth_acc = 1;
- uint32_t max_element_index = 0;
- uint32_t elements = 0;
- for (int i = 0; i < properties->length(); i++) {
- ObjectLiteral::Property* property = properties->at(i);
- if (!IsBoilerplateProperty(property)) {
- is_simple_acc = false;
- continue;
- }
- MaterializedLiteral* m_literal = property->value()->AsMaterializedLiteral();
- if (m_literal != NULL && m_literal->depth() >= depth_acc) {
- depth_acc = m_literal->depth() + 1;
- }
-
- // Add CONSTANT and COMPUTED properties to boilerplate. Use undefined
- // value for COMPUTED properties, the real value is filled in at
- // runtime. The enumeration order is maintained.
- Handle<Object> key = property->key()->handle();
- Handle<Object> value = GetBoilerplateValue(property->value());
- is_simple_acc = is_simple_acc && !value->IsUndefined();
-
- // Keep track of the number of elements in the object literal and
- // the largest element index. If the largest element index is
- // much larger than the number of elements, creating an object
- // literal with fast elements will be a waste of space.
- uint32_t element_index = 0;
- if (key->IsString()
- && Handle<String>::cast(key)->AsArrayIndex(&element_index)
- && element_index > max_element_index) {
- max_element_index = element_index;
- elements++;
- } else if (key->IsSmi()) {
- int key_value = Smi::cast(*key)->value();
- if (key_value > 0
- && static_cast<uint32_t>(key_value) > max_element_index) {
- max_element_index = key_value;
- }
- elements++;
- }
-
- // Add name, value pair to the fixed array.
- constant_properties->set(position++, *key);
- constant_properties->set(position++, *value);
- }
- *fast_elements =
- (max_element_index <= 32) || ((2 * elements) >= max_element_index);
- *is_simple = is_simple_acc;
- *depth = depth_acc;
-}
-
-
-ObjectLiteral::Property* Parser::ParseObjectLiteralGetSet(bool is_getter,
- bool* ok) {
- // Special handling of getter and setter syntax:
- // { ... , get foo() { ... }, ... , set foo(v) { ... v ... } , ... }
- // We have already read the "get" or "set" keyword.
- Token::Value next = Next();
- bool is_keyword = Token::IsKeyword(next);
- if (next == Token::IDENTIFIER || next == Token::NUMBER ||
- next == Token::FUTURE_RESERVED_WORD ||
- next == Token::STRING || is_keyword) {
- Handle<String> name;
- if (is_keyword) {
- name = isolate_->factory()->LookupAsciiSymbol(Token::String(next));
- } else {
- name = GetSymbol(CHECK_OK);
- }
- FunctionLiteral* value =
- ParseFunctionLiteral(name,
- false, // reserved words are allowed here
- RelocInfo::kNoPosition,
- DECLARATION,
- CHECK_OK);
- // Allow any number of parameters for compatiabilty with JSC.
- // Specification only allows zero parameters for get and one for set.
- ObjectLiteral::Property* property =
- new(zone()) ObjectLiteral::Property(is_getter, value);
- return property;
- } else {
- ReportUnexpectedToken(next);
- *ok = false;
- return NULL;
- }
-}
-
-
-Expression* Parser::ParseObjectLiteral(bool* ok) {
- // ObjectLiteral ::
- // '{' (
- // ((IdentifierName | String | Number) ':' AssignmentExpression)
- // | (('get' | 'set') (IdentifierName | String | Number) FunctionLiteral)
- // )*[','] '}'
-
- ZoneList<ObjectLiteral::Property*>* properties =
- new ZoneList<ObjectLiteral::Property*>(4);
- int number_of_boilerplate_properties = 0;
- bool has_function = false;
-
- ObjectLiteralPropertyChecker checker(this, top_scope_->is_strict_mode());
-
- Expect(Token::LBRACE, CHECK_OK);
- Scanner::Location loc = scanner().location();
-
- while (peek() != Token::RBRACE) {
- if (fni_ != NULL) fni_->Enter();
-
- Literal* key = NULL;
- Token::Value next = peek();
-
- // Location of the property name token
- Scanner::Location loc = scanner().peek_location();
-
- switch (next) {
- case Token::FUTURE_RESERVED_WORD:
- case Token::IDENTIFIER: {
- bool is_getter = false;
- bool is_setter = false;
- Handle<String> id =
- ParseIdentifierOrGetOrSet(&is_getter, &is_setter, CHECK_OK);
- if (fni_ != NULL) fni_->PushLiteralName(id);
-
- if ((is_getter || is_setter) && peek() != Token::COLON) {
- // Update loc to point to the identifier
- loc = scanner().peek_location();
- ObjectLiteral::Property* property =
- ParseObjectLiteralGetSet(is_getter, CHECK_OK);
- if (IsBoilerplateProperty(property)) {
- number_of_boilerplate_properties++;
- }
- // Validate the property.
- checker.CheckProperty(property, loc, CHECK_OK);
- properties->Add(property);
- if (peek() != Token::RBRACE) Expect(Token::COMMA, CHECK_OK);
-
- if (fni_ != NULL) {
- fni_->Infer();
- fni_->Leave();
- }
- continue; // restart the while
- }
- // Failed to parse as get/set property, so it's just a property
- // called "get" or "set".
- key = new(zone()) Literal(id);
- break;
- }
- case Token::STRING: {
- Consume(Token::STRING);
- Handle<String> string = GetSymbol(CHECK_OK);
- if (fni_ != NULL) fni_->PushLiteralName(string);
- uint32_t index;
- if (!string.is_null() && string->AsArrayIndex(&index)) {
- key = NewNumberLiteral(index);
- break;
- }
- key = new(zone()) Literal(string);
- break;
- }
- case Token::NUMBER: {
- Consume(Token::NUMBER);
- ASSERT(scanner().is_literal_ascii());
- double value = StringToDouble(scanner().literal_ascii_string(),
- ALLOW_HEX | ALLOW_OCTALS);
- key = NewNumberLiteral(value);
- break;
- }
- default:
- if (Token::IsKeyword(next)) {
- Consume(next);
- Handle<String> string = GetSymbol(CHECK_OK);
- key = new(zone()) Literal(string);
- } else {
- // Unexpected token.
- Token::Value next = Next();
- ReportUnexpectedToken(next);
- *ok = false;
- return NULL;
- }
- }
-
- Expect(Token::COLON, CHECK_OK);
- Expression* value = ParseAssignmentExpression(true, CHECK_OK);
-
- ObjectLiteral::Property* property =
- new(zone()) ObjectLiteral::Property(key, value);
-
- // Mark object literals that contain function literals and pretenure the
- // literal so it can be added as a constant function property.
- if (value->AsFunctionLiteral() != NULL) {
- has_function = true;
- value->AsFunctionLiteral()->set_pretenure(true);
- }
-
- // Count CONSTANT or COMPUTED properties to maintain the enumeration order.
- if (IsBoilerplateProperty(property)) number_of_boilerplate_properties++;
- // Validate the property
- checker.CheckProperty(property, loc, CHECK_OK);
- properties->Add(property);
-
- // TODO(1240767): Consider allowing trailing comma.
- if (peek() != Token::RBRACE) Expect(Token::COMMA, CHECK_OK);
-
- if (fni_ != NULL) {
- fni_->Infer();
- fni_->Leave();
- }
- }
- Expect(Token::RBRACE, CHECK_OK);
-
- // Computation of literal_index must happen before pre parse bailout.
- int literal_index = lexical_scope_->NextMaterializedLiteralIndex();
-
- Handle<FixedArray> constant_properties = isolate()->factory()->NewFixedArray(
- number_of_boilerplate_properties * 2, TENURED);
-
- bool is_simple = true;
- bool fast_elements = true;
- int depth = 1;
- BuildObjectLiteralConstantProperties(properties,
- constant_properties,
- &is_simple,
- &fast_elements,
- &depth);
- return new(zone()) ObjectLiteral(constant_properties,
- properties,
- literal_index,
- is_simple,
- fast_elements,
- depth,
- has_function);
-}
-
-
-Expression* Parser::ParseRegExpLiteral(bool seen_equal, bool* ok) {
- if (!scanner().ScanRegExpPattern(seen_equal)) {
- Next();
- ReportMessage("unterminated_regexp", Vector<const char*>::empty());
- *ok = false;
- return NULL;
- }
-
- int literal_index = lexical_scope_->NextMaterializedLiteralIndex();
-
- Handle<String> js_pattern = NextLiteralString(TENURED);
- scanner().ScanRegExpFlags();
- Handle<String> js_flags = NextLiteralString(TENURED);
- Next();
-
- return new(zone()) RegExpLiteral(js_pattern, js_flags, literal_index);
-}
-
-
-ZoneList<Expression*>* Parser::ParseArguments(bool* ok) {
- // Arguments ::
- // '(' (AssignmentExpression)*[','] ')'
-
- ZoneList<Expression*>* result = new ZoneList<Expression*>(4);
- Expect(Token::LPAREN, CHECK_OK);
- bool done = (peek() == Token::RPAREN);
- while (!done) {
- Expression* argument = ParseAssignmentExpression(true, CHECK_OK);
- result->Add(argument);
- done = (peek() == Token::RPAREN);
- if (!done) Expect(Token::COMMA, CHECK_OK);
- }
- Expect(Token::RPAREN, CHECK_OK);
- return result;
-}
-
-
-FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> var_name,
- bool name_is_reserved,
- int function_token_position,
- FunctionLiteralType type,
- bool* ok) {
- // Function ::
- // '(' FormalParameterList? ')' '{' FunctionBody '}'
- bool is_named = !var_name.is_null();
-
- // The name associated with this function. If it's a function expression,
- // this is the actual function name, otherwise this is the name of the
- // variable declared and initialized with the function (expression). In
- // that case, we don't have a function name (it's empty).
- Handle<String> name =
- is_named ? var_name : isolate()->factory()->empty_symbol();
- // The function name, if any.
- Handle<String> function_name = isolate()->factory()->empty_symbol();
- if (is_named && (type == EXPRESSION || type == NESTED)) {
- function_name = name;
- }
-
- int num_parameters = 0;
- // Parse function body.
- { Scope* scope =
- NewScope(top_scope_, Scope::FUNCTION_SCOPE, inside_with());
- LexicalScope lexical_scope(this, scope, isolate());
- top_scope_->SetScopeName(name);
-
- // FormalParameterList ::
- // '(' (Identifier)*[','] ')'
- Expect(Token::LPAREN, CHECK_OK);
- int start_pos = scanner().location().beg_pos;
- Scanner::Location name_loc = Scanner::NoLocation();
- Scanner::Location dupe_loc = Scanner::NoLocation();
- Scanner::Location reserved_loc = Scanner::NoLocation();
-
- bool done = (peek() == Token::RPAREN);
- while (!done) {
- bool is_reserved = false;
- Handle<String> param_name =
- ParseIdentifierOrReservedWord(&is_reserved, CHECK_OK);
-
- // Store locations for possible future error reports.
- if (!name_loc.IsValid() && IsEvalOrArguments(param_name)) {
- name_loc = scanner().location();
- }
- if (!dupe_loc.IsValid() && top_scope_->IsDeclared(param_name)) {
- dupe_loc = scanner().location();
- }
- if (!reserved_loc.IsValid() && is_reserved) {
- reserved_loc = scanner().location();
- }
-
- Variable* parameter = top_scope_->DeclareLocal(param_name, Variable::VAR);
- top_scope_->AddParameter(parameter);
- num_parameters++;
- if (num_parameters > kMaxNumFunctionParameters) {
- ReportMessageAt(scanner().location(), "too_many_parameters",
- Vector<const char*>::empty());
- *ok = false;
- return NULL;
- }
- done = (peek() == Token::RPAREN);
- if (!done) Expect(Token::COMMA, CHECK_OK);
- }
- Expect(Token::RPAREN, CHECK_OK);
-
- Expect(Token::LBRACE, CHECK_OK);
- ZoneList<Statement*>* body = new ZoneList<Statement*>(8);
-
- // If we have a named function expression, we add a local variable
- // declaration to the body of the function with the name of the
- // function and let it refer to the function itself (closure).
- // NOTE: We create a proxy and resolve it here so that in the
- // future we can change the AST to only refer to VariableProxies
- // instead of Variables and Proxis as is the case now.
- if (!function_name.is_null() && function_name->length() > 0) {
- Variable* fvar = top_scope_->DeclareFunctionVar(function_name);
- VariableProxy* fproxy =
- top_scope_->NewUnresolved(function_name, inside_with());
- fproxy->BindTo(fvar);
- body->Add(new(zone()) ExpressionStatement(
- new(zone()) Assignment(Token::INIT_CONST, fproxy,
- new(zone()) ThisFunction(),
- RelocInfo::kNoPosition)));
- }
-
- // Determine if the function will be lazily compiled. The mode can
- // only be PARSE_LAZILY if the --lazy flag is true.
- bool is_lazily_compiled = (mode() == PARSE_LAZILY &&
- top_scope_->outer_scope()->is_global_scope() &&
- top_scope_->HasTrivialOuterContext() &&
- !parenthesized_function_);
- parenthesized_function_ = false; // The bit was set for this function only.
-
- int function_block_pos = scanner().location().beg_pos;
- int materialized_literal_count;
- int expected_property_count;
- int end_pos;
- bool only_simple_this_property_assignments;
- Handle<FixedArray> this_property_assignments;
- if (is_lazily_compiled && pre_data() != NULL) {
- FunctionEntry entry = pre_data()->GetFunctionEntry(function_block_pos);
- if (!entry.is_valid()) {
- ReportInvalidPreparseData(name, CHECK_OK);
- }
- end_pos = entry.end_pos();
- if (end_pos <= function_block_pos) {
- // End position greater than end of stream is safe, and hard to check.
- ReportInvalidPreparseData(name, CHECK_OK);
- }
- isolate()->counters()->total_preparse_skipped()->Increment(
- end_pos - function_block_pos);
- // Seek to position just before terminal '}'.
- scanner().SeekForward(end_pos - 1);
- materialized_literal_count = entry.literal_count();
- expected_property_count = entry.property_count();
- only_simple_this_property_assignments = false;
- this_property_assignments = isolate()->factory()->empty_fixed_array();
- Expect(Token::RBRACE, CHECK_OK);
- } else {
- ParseSourceElements(body, Token::RBRACE, CHECK_OK);
-
- materialized_literal_count = lexical_scope.materialized_literal_count();
- expected_property_count = lexical_scope.expected_property_count();
- only_simple_this_property_assignments =
- lexical_scope.only_simple_this_property_assignments();
- this_property_assignments = lexical_scope.this_property_assignments();
-
- Expect(Token::RBRACE, CHECK_OK);
- end_pos = scanner().location().end_pos;
- }
-
- // Validate strict mode.
- if (top_scope_->is_strict_mode()) {
- if (IsEvalOrArguments(name)) {
- int position = function_token_position != RelocInfo::kNoPosition
- ? function_token_position
- : (start_pos > 0 ? start_pos - 1 : start_pos);
- Scanner::Location location = Scanner::Location(position, start_pos);
- ReportMessageAt(location,
- "strict_function_name", Vector<const char*>::empty());
- *ok = false;
- return NULL;
- }
- if (name_loc.IsValid()) {
- ReportMessageAt(name_loc, "strict_param_name",
- Vector<const char*>::empty());
- *ok = false;
- return NULL;
- }
- if (dupe_loc.IsValid()) {
- ReportMessageAt(dupe_loc, "strict_param_dupe",
- Vector<const char*>::empty());
- *ok = false;
- return NULL;
- }
- if (name_is_reserved) {
- int position = function_token_position != RelocInfo::kNoPosition
- ? function_token_position
- : (start_pos > 0 ? start_pos - 1 : start_pos);
- Scanner::Location location = Scanner::Location(position, start_pos);
- ReportMessageAt(location, "strict_reserved_word",
- Vector<const char*>::empty());
- *ok = false;
- return NULL;
- }
- if (reserved_loc.IsValid()) {
- ReportMessageAt(reserved_loc, "strict_reserved_word",
- Vector<const char*>::empty());
- *ok = false;
- return NULL;
- }
- CheckOctalLiteral(start_pos, end_pos, CHECK_OK);
- }
-
- FunctionLiteral* function_literal =
- new(zone()) FunctionLiteral(name,
- top_scope_,
- body,
- materialized_literal_count,
- expected_property_count,
- only_simple_this_property_assignments,
- this_property_assignments,
- num_parameters,
- start_pos,
- end_pos,
- function_name->length() > 0,
- lexical_scope.ContainsLoops());
- function_literal->set_function_token_position(function_token_position);
-
- if (fni_ != NULL && !is_named) fni_->AddFunction(function_literal);
- return function_literal;
- }
-}
-
-
-Expression* Parser::ParseV8Intrinsic(bool* ok) {
- // CallRuntime ::
- // '%' Identifier Arguments
-
- Expect(Token::MOD, CHECK_OK);
- Handle<String> name = ParseIdentifier(CHECK_OK);
- ZoneList<Expression*>* args = ParseArguments(CHECK_OK);
-
- if (extension_ != NULL) {
- // The extension structures are only accessible while parsing the
- // very first time not when reparsing because of lazy compilation.
- top_scope_->ForceEagerCompilation();
- }
-
- const Runtime::Function* function = Runtime::FunctionForSymbol(name);
-
- // Check for built-in IS_VAR macro.
- if (function != NULL &&
- function->intrinsic_type == Runtime::RUNTIME &&
- function->function_id == Runtime::kIS_VAR) {
- // %IS_VAR(x) evaluates to x if x is a variable,
- // leads to a parse error otherwise. Could be implemented as an
- // inline function %_IS_VAR(x) to eliminate this special case.
- if (args->length() == 1 && args->at(0)->AsVariableProxy() != NULL) {
- return args->at(0);
- } else {
- ReportMessage("unable_to_parse", Vector<const char*>::empty());
- *ok = false;
- return NULL;
- }
- }
-
- // Check that the expected number of arguments are being passed.
- if (function != NULL &&
- function->nargs != -1 &&
- function->nargs != args->length()) {
- ReportMessage("illegal_access", Vector<const char*>::empty());
- *ok = false;
- return NULL;
- }
-
- // We have a valid intrinsics call or a call to a builtin.
- return new(zone()) CallRuntime(name, function, args);
-}
-
-
-bool Parser::peek_any_identifier() {
- Token::Value next = peek();
- return next == Token::IDENTIFIER ||
- next == Token::FUTURE_RESERVED_WORD;
-}
-
-
-void Parser::Consume(Token::Value token) {
- Token::Value next = Next();
- USE(next);
- USE(token);
- ASSERT(next == token);
-}
-
-
-void Parser::Expect(Token::Value token, bool* ok) {
- Token::Value next = Next();
- if (next == token) return;
- ReportUnexpectedToken(next);
- *ok = false;
-}
-
-
-bool Parser::Check(Token::Value token) {
- Token::Value next = peek();
- if (next == token) {
- Consume(next);
- return true;
- }
- return false;
-}
-
-
-void Parser::ExpectSemicolon(bool* ok) {
- // Check for automatic semicolon insertion according to
- // the rules given in ECMA-262, section 7.9, page 21.
- Token::Value tok = peek();
- if (tok == Token::SEMICOLON) {
- Next();
- return;
- }
- if (scanner().has_line_terminator_before_next() ||
- tok == Token::RBRACE ||
- tok == Token::EOS) {
- return;
- }
- Expect(Token::SEMICOLON, ok);
-}
-
-
-Literal* Parser::GetLiteralUndefined() {
- return new(zone()) Literal(isolate()->factory()->undefined_value());
-}
-
-
-Literal* Parser::GetLiteralTheHole() {
- return new(zone()) Literal(isolate()->factory()->the_hole_value());
-}
-
-
-Literal* Parser::GetLiteralNumber(double value) {
- return NewNumberLiteral(value);
-}
-
-
-Handle<String> Parser::ParseIdentifier(bool* ok) {
- bool is_reserved;
- return ParseIdentifierOrReservedWord(&is_reserved, ok);
-}
-
-
-Handle<String> Parser::ParseIdentifierOrReservedWord(bool* is_reserved,
- bool* ok) {
- *is_reserved = false;
- if (top_scope_->is_strict_mode()) {
- Expect(Token::IDENTIFIER, ok);
- } else {
- if (!Check(Token::IDENTIFIER)) {
- Expect(Token::FUTURE_RESERVED_WORD, ok);
- *is_reserved = true;
- }
- }
- if (!*ok) return Handle<String>();
- return GetSymbol(ok);
-}
-
-
-Handle<String> Parser::ParseIdentifierName(bool* ok) {
- Token::Value next = Next();
- if (next != Token::IDENTIFIER &&
- next != Token::FUTURE_RESERVED_WORD &&
- !Token::IsKeyword(next)) {
- ReportUnexpectedToken(next);
- *ok = false;
- return Handle<String>();
- }
- return GetSymbol(ok);
-}
-
-
-// Checks LHS expression for assignment and prefix/postfix increment/decrement
-// in strict mode.
-void Parser::CheckStrictModeLValue(Expression* expression,
- const char* error,
- bool* ok) {
- ASSERT(top_scope_->is_strict_mode());
- VariableProxy* lhs = expression != NULL
- ? expression->AsVariableProxy()
- : NULL;
-
- if (lhs != NULL && !lhs->is_this() && IsEvalOrArguments(lhs->name())) {
- ReportMessage(error, Vector<const char*>::empty());
- *ok = false;
- }
-}
-
-
-// Checks whether octal literal last seen is between beg_pos and end_pos.
-// If so, reports an error.
-void Parser::CheckOctalLiteral(int beg_pos, int end_pos, bool* ok) {
- int octal = scanner().octal_position();
- if (beg_pos <= octal && octal <= end_pos) {
- ReportMessageAt(Scanner::Location(octal, octal + 1), "strict_octal_literal",
- Vector<const char*>::empty());
- scanner().clear_octal_position();
- *ok = false;
- }
-}
-
-
-// This function reads an identifier and determines whether or not it
-// is 'get' or 'set'.
-Handle<String> Parser::ParseIdentifierOrGetOrSet(bool* is_get,
- bool* is_set,
- bool* ok) {
- Handle<String> result = ParseIdentifier(ok);
- if (!*ok) return Handle<String>();
- if (scanner().is_literal_ascii() && scanner().literal_length() == 3) {
- const char* token = scanner().literal_ascii_string().start();
- *is_get = strncmp(token, "get", 3) == 0;
- *is_set = !*is_get && strncmp(token, "set", 3) == 0;
- }
- return result;
-}
-
-
-// ----------------------------------------------------------------------------
-// Parser support
-
-
-bool Parser::TargetStackContainsLabel(Handle<String> label) {
- for (Target* t = target_stack_; t != NULL; t = t->previous()) {
- BreakableStatement* stat = t->node()->AsBreakableStatement();
- if (stat != NULL && ContainsLabel(stat->labels(), label))
- return true;
- }
- return false;
-}
-
-
-BreakableStatement* Parser::LookupBreakTarget(Handle<String> label, bool* ok) {
- bool anonymous = label.is_null();
- for (Target* t = target_stack_; t != NULL; t = t->previous()) {
- BreakableStatement* stat = t->node()->AsBreakableStatement();
- if (stat == NULL) continue;
- if ((anonymous && stat->is_target_for_anonymous()) ||
- (!anonymous && ContainsLabel(stat->labels(), label))) {
- RegisterTargetUse(stat->break_target(), t->previous());
- return stat;
- }
- }
- return NULL;
-}
-
-
-IterationStatement* Parser::LookupContinueTarget(Handle<String> label,
- bool* ok) {
- bool anonymous = label.is_null();
- for (Target* t = target_stack_; t != NULL; t = t->previous()) {
- IterationStatement* stat = t->node()->AsIterationStatement();
- if (stat == NULL) continue;
-
- ASSERT(stat->is_target_for_anonymous());
- if (anonymous || ContainsLabel(stat->labels(), label)) {
- RegisterTargetUse(stat->continue_target(), t->previous());
- return stat;
- }
- }
- return NULL;
-}
-
-
-void Parser::RegisterTargetUse(BreakTarget* target, Target* stop) {
- // Register that a break target found at the given stop in the
- // target stack has been used from the top of the target stack. Add
- // the break target to any TargetCollectors passed on the stack.
- for (Target* t = target_stack_; t != stop; t = t->previous()) {
- TargetCollector* collector = t->node()->AsTargetCollector();
- if (collector != NULL) collector->AddTarget(target);
- }
-}
-
-
-Literal* Parser::NewNumberLiteral(double number) {
- return new(zone()) Literal(isolate()->factory()->NewNumber(number, TENURED));
-}
-
-
-Expression* Parser::NewThrowReferenceError(Handle<String> type) {
- return NewThrowError(isolate()->factory()->MakeReferenceError_symbol(),
- type, HandleVector<Object>(NULL, 0));
-}
-
-
-Expression* Parser::NewThrowSyntaxError(Handle<String> type,
- Handle<Object> first) {
- int argc = first.is_null() ? 0 : 1;
- Vector< Handle<Object> > arguments = HandleVector<Object>(&first, argc);
- return NewThrowError(
- isolate()->factory()->MakeSyntaxError_symbol(), type, arguments);
-}
-
-
-Expression* Parser::NewThrowTypeError(Handle<String> type,
- Handle<Object> first,
- Handle<Object> second) {
- ASSERT(!first.is_null() && !second.is_null());
- Handle<Object> elements[] = { first, second };
- Vector< Handle<Object> > arguments =
- HandleVector<Object>(elements, ARRAY_SIZE(elements));
- return NewThrowError(
- isolate()->factory()->MakeTypeError_symbol(), type, arguments);
-}
-
-
-Expression* Parser::NewThrowError(Handle<String> constructor,
- Handle<String> type,
- Vector< Handle<Object> > arguments) {
- int argc = arguments.length();
- Handle<FixedArray> elements = isolate()->factory()->NewFixedArray(argc,
- TENURED);
- for (int i = 0; i < argc; i++) {
- Handle<Object> element = arguments[i];
- if (!element.is_null()) {
- elements->set(i, *element);
- }
- }
- Handle<JSArray> array = isolate()->factory()->NewJSArrayWithElements(elements,
- TENURED);
-
- ZoneList<Expression*>* args = new ZoneList<Expression*>(2);
- args->Add(new(zone()) Literal(type));
- args->Add(new(zone()) Literal(array));
- return new(zone()) Throw(new(zone()) CallRuntime(constructor, NULL, args),
- scanner().location().beg_pos);
-}
-
-// ----------------------------------------------------------------------------
-// JSON
-
-Handle<Object> JsonParser::ParseJson(Handle<String> script,
- UC16CharacterStream* source) {
- scanner_.Initialize(source);
- stack_overflow_ = false;
- Handle<Object> result = ParseJsonValue();
- if (result.is_null() || scanner_.Next() != Token::EOS) {
- if (stack_overflow_) {
- // Scanner failed.
- isolate()->StackOverflow();
- } else {
- // Parse failed. Scanner's current token is the unexpected token.
- Token::Value token = scanner_.current_token();
-
- const char* message;
- const char* name_opt = NULL;
-
- switch (token) {
- case Token::EOS:
- message = "unexpected_eos";
- break;
- case Token::NUMBER:
- message = "unexpected_token_number";
- break;
- case Token::STRING:
- message = "unexpected_token_string";
- break;
- case Token::IDENTIFIER:
- case Token::FUTURE_RESERVED_WORD:
- message = "unexpected_token_identifier";
- break;
- default:
- message = "unexpected_token";
- name_opt = Token::String(token);
- ASSERT(name_opt != NULL);
- break;
- }
-
- Scanner::Location source_location = scanner_.location();
- Factory* factory = isolate()->factory();
- MessageLocation location(factory->NewScript(script),
- source_location.beg_pos,
- source_location.end_pos);
- Handle<JSArray> array;
- if (name_opt == NULL) {
- array = factory->NewJSArray(0);
- } else {
- Handle<String> name = factory->NewStringFromUtf8(CStrVector(name_opt));
- Handle<FixedArray> element = factory->NewFixedArray(1);
- element->set(0, *name);
- array = factory->NewJSArrayWithElements(element);
- }
- Handle<Object> result = factory->NewSyntaxError(message, array);
- isolate()->Throw(*result, &location);
- return Handle<Object>::null();
- }
- }
- return result;
-}
-
-
-Handle<String> JsonParser::GetString() {
- int literal_length = scanner_.literal_length();
- if (literal_length == 0) {
- return isolate()->factory()->empty_string();
- }
- if (scanner_.is_literal_ascii()) {
- return isolate()->factory()->NewStringFromAscii(
- scanner_.literal_ascii_string());
- } else {
- return isolate()->factory()->NewStringFromTwoByte(
- scanner_.literal_uc16_string());
- }
-}
-
-
-// Parse any JSON value.
-Handle<Object> JsonParser::ParseJsonValue() {
- Token::Value token = scanner_.Next();
- switch (token) {
- case Token::STRING:
- return GetString();
- case Token::NUMBER:
- return isolate()->factory()->NewNumber(scanner_.number());
- case Token::FALSE_LITERAL:
- return isolate()->factory()->false_value();
- case Token::TRUE_LITERAL:
- return isolate()->factory()->true_value();
- case Token::NULL_LITERAL:
- return isolate()->factory()->null_value();
- case Token::LBRACE:
- return ParseJsonObject();
- case Token::LBRACK:
- return ParseJsonArray();
- default:
- return ReportUnexpectedToken();
- }
-}
-
-
-// Parse a JSON object. Scanner must be right after '{' token.
-Handle<Object> JsonParser::ParseJsonObject() {
- Handle<JSFunction> object_constructor(
- isolate()->global_context()->object_function());
- Handle<JSObject> json_object =
- isolate()->factory()->NewJSObject(object_constructor);
- if (scanner_.peek() == Token::RBRACE) {
- scanner_.Next();
- } else {
- if (StackLimitCheck(isolate()).HasOverflowed()) {
- stack_overflow_ = true;
- return Handle<Object>::null();
- }
- do {
- if (scanner_.Next() != Token::STRING) {
- return ReportUnexpectedToken();
- }
- Handle<String> key = GetString();
- if (scanner_.Next() != Token::COLON) {
- return ReportUnexpectedToken();
- }
- Handle<Object> value = ParseJsonValue();
- if (value.is_null()) return Handle<Object>::null();
- uint32_t index;
- if (key->AsArrayIndex(&index)) {
- SetOwnElement(json_object, index, value, kNonStrictMode);
- } else if (key->Equals(isolate()->heap()->Proto_symbol())) {
- // We can't remove the __proto__ accessor since it's hardcoded
- // in several places. Instead go along and add the value as
- // the prototype of the created object if possible.
- SetPrototype(json_object, value);
- } else {
- SetLocalPropertyIgnoreAttributes(json_object, key, value, NONE);
- }
- } while (scanner_.Next() == Token::COMMA);
- if (scanner_.current_token() != Token::RBRACE) {
- return ReportUnexpectedToken();
- }
- }
- return json_object;
-}
-
-
-// Parse a JSON array. Scanner must be right after '[' token.
-Handle<Object> JsonParser::ParseJsonArray() {
- ZoneScope zone_scope(DELETE_ON_EXIT);
- ZoneList<Handle<Object> > elements(4);
-
- Token::Value token = scanner_.peek();
- if (token == Token::RBRACK) {
- scanner_.Next();
- } else {
- if (StackLimitCheck(isolate()).HasOverflowed()) {
- stack_overflow_ = true;
- return Handle<Object>::null();
- }
- do {
- Handle<Object> element = ParseJsonValue();
- if (element.is_null()) return Handle<Object>::null();
- elements.Add(element);
- token = scanner_.Next();
- } while (token == Token::COMMA);
- if (token != Token::RBRACK) {
- return ReportUnexpectedToken();
- }
- }
-
- // Allocate a fixed array with all the elements.
- Handle<FixedArray> fast_elements =
- isolate()->factory()->NewFixedArray(elements.length());
-
- for (int i = 0, n = elements.length(); i < n; i++) {
- fast_elements->set(i, *elements[i]);
- }
-
- return isolate()->factory()->NewJSArrayWithElements(fast_elements);
-}
-
-// ----------------------------------------------------------------------------
-// Regular expressions
-
-
-RegExpParser::RegExpParser(FlatStringReader* in,
- Handle<String>* error,
- bool multiline)
- : isolate_(Isolate::Current()),
- error_(error),
- captures_(NULL),
- in_(in),
- current_(kEndMarker),
- next_pos_(0),
- capture_count_(0),
- has_more_(true),
- multiline_(multiline),
- simple_(false),
- contains_anchor_(false),
- is_scanned_for_captures_(false),
- failed_(false) {
- Advance();
-}
-
-
-uc32 RegExpParser::Next() {
- if (has_next()) {
- return in()->Get(next_pos_);
- } else {
- return kEndMarker;
- }
-}
-
-
-void RegExpParser::Advance() {
- if (next_pos_ < in()->length()) {
- StackLimitCheck check(isolate());
- if (check.HasOverflowed()) {
- ReportError(CStrVector(Isolate::kStackOverflowMessage));
- } else if (isolate()->zone()->excess_allocation()) {
- ReportError(CStrVector("Regular expression too large"));
- } else {
- current_ = in()->Get(next_pos_);
- next_pos_++;
- }
- } else {
- current_ = kEndMarker;
- has_more_ = false;
- }
-}
-
-
-void RegExpParser::Reset(int pos) {
- next_pos_ = pos;
- Advance();
-}
-
-
-void RegExpParser::Advance(int dist) {
- next_pos_ += dist - 1;
- Advance();
-}
-
-
-bool RegExpParser::simple() {
- return simple_;
-}
-
-RegExpTree* RegExpParser::ReportError(Vector<const char> message) {
- failed_ = true;
- *error_ = isolate()->factory()->NewStringFromAscii(message, NOT_TENURED);
- // Zip to the end to make sure the no more input is read.
- current_ = kEndMarker;
- next_pos_ = in()->length();
- return NULL;
-}
-
-
-// Pattern ::
-// Disjunction
-RegExpTree* RegExpParser::ParsePattern() {
- RegExpTree* result = ParseDisjunction(CHECK_FAILED);
- ASSERT(!has_more());
- // If the result of parsing is a literal string atom, and it has the
- // same length as the input, then the atom is identical to the input.
- if (result->IsAtom() && result->AsAtom()->length() == in()->length()) {
- simple_ = true;
- }
- return result;
-}
-
-
-// Disjunction ::
-// Alternative
-// Alternative | Disjunction
-// Alternative ::
-// [empty]
-// Term Alternative
-// Term ::
-// Assertion
-// Atom
-// Atom Quantifier
-RegExpTree* RegExpParser::ParseDisjunction() {
- // Used to store current state while parsing subexpressions.
- RegExpParserState initial_state(NULL, INITIAL, 0);
- RegExpParserState* stored_state = &initial_state;
- // Cache the builder in a local variable for quick access.
- RegExpBuilder* builder = initial_state.builder();
- while (true) {
- switch (current()) {
- case kEndMarker:
- if (stored_state->IsSubexpression()) {
- // Inside a parenthesized group when hitting end of input.
- ReportError(CStrVector("Unterminated group") CHECK_FAILED);
- }
- ASSERT_EQ(INITIAL, stored_state->group_type());
- // Parsing completed successfully.
- return builder->ToRegExp();
- case ')': {
- if (!stored_state->IsSubexpression()) {
- ReportError(CStrVector("Unmatched ')'") CHECK_FAILED);
- }
- ASSERT_NE(INITIAL, stored_state->group_type());
-
- Advance();
- // End disjunction parsing and convert builder content to new single
- // regexp atom.
- RegExpTree* body = builder->ToRegExp();
-
- int end_capture_index = captures_started();
-
- int capture_index = stored_state->capture_index();
- SubexpressionType type = stored_state->group_type();
-
- // Restore previous state.
- stored_state = stored_state->previous_state();
- builder = stored_state->builder();
-
- // Build result of subexpression.
- if (type == CAPTURE) {
- RegExpCapture* capture = new(zone()) RegExpCapture(body, capture_index);
- captures_->at(capture_index - 1) = capture;
- body = capture;
- } else if (type != GROUPING) {
- ASSERT(type == POSITIVE_LOOKAHEAD || type == NEGATIVE_LOOKAHEAD);
- bool is_positive = (type == POSITIVE_LOOKAHEAD);
- body = new(zone()) RegExpLookahead(body,
- is_positive,
- end_capture_index - capture_index,
- capture_index);
- }
- builder->AddAtom(body);
- // For compatability with JSC and ES3, we allow quantifiers after
- // lookaheads, and break in all cases.
- break;
- }
- case '|': {
- Advance();
- builder->NewAlternative();
- continue;
- }
- case '*':
- case '+':
- case '?':
- return ReportError(CStrVector("Nothing to repeat"));
- case '^': {
- Advance();
- if (multiline_) {
- builder->AddAssertion(
- new(zone()) RegExpAssertion(RegExpAssertion::START_OF_LINE));
- } else {
- builder->AddAssertion(
- new(zone()) RegExpAssertion(RegExpAssertion::START_OF_INPUT));
- set_contains_anchor();
- }
- continue;
- }
- case '$': {
- Advance();
- RegExpAssertion::Type type =
- multiline_ ? RegExpAssertion::END_OF_LINE :
- RegExpAssertion::END_OF_INPUT;
- builder->AddAssertion(new(zone()) RegExpAssertion(type));
- continue;
- }
- case '.': {
- Advance();
- // everything except \x0a, \x0d, \u2028 and \u2029
- ZoneList<CharacterRange>* ranges = new ZoneList<CharacterRange>(2);
- CharacterRange::AddClassEscape('.', ranges);
- RegExpTree* atom = new(zone()) RegExpCharacterClass(ranges, false);
- builder->AddAtom(atom);
- break;
- }
- case '(': {
- SubexpressionType type = CAPTURE;
- Advance();
- if (current() == '?') {
- switch (Next()) {
- case ':':
- type = GROUPING;
- break;
- case '=':
- type = POSITIVE_LOOKAHEAD;
- break;
- case '!':
- type = NEGATIVE_LOOKAHEAD;
- break;
- default:
- ReportError(CStrVector("Invalid group") CHECK_FAILED);
- break;
- }
- Advance(2);
- } else {
- if (captures_ == NULL) {
- captures_ = new ZoneList<RegExpCapture*>(2);
- }
- if (captures_started() >= kMaxCaptures) {
- ReportError(CStrVector("Too many captures") CHECK_FAILED);
- }
- captures_->Add(NULL);
- }
- // Store current state and begin new disjunction parsing.
- stored_state = new(zone()) RegExpParserState(stored_state,
- type,
- captures_started());
- builder = stored_state->builder();
- continue;
- }
- case '[': {
- RegExpTree* atom = ParseCharacterClass(CHECK_FAILED);
- builder->AddAtom(atom);
- break;
- }
- // Atom ::
- // \ AtomEscape
- case '\\':
- switch (Next()) {
- case kEndMarker:
- return ReportError(CStrVector("\\ at end of pattern"));
- case 'b':
- Advance(2);
- builder->AddAssertion(
- new(zone()) RegExpAssertion(RegExpAssertion::BOUNDARY));
- continue;
- case 'B':
- Advance(2);
- builder->AddAssertion(
- new(zone()) RegExpAssertion(RegExpAssertion::NON_BOUNDARY));
- continue;
- // AtomEscape ::
- // CharacterClassEscape
- //
- // CharacterClassEscape :: one of
- // d D s S w W
- case 'd': case 'D': case 's': case 'S': case 'w': case 'W': {
- uc32 c = Next();
- Advance(2);
- ZoneList<CharacterRange>* ranges = new ZoneList<CharacterRange>(2);
- CharacterRange::AddClassEscape(c, ranges);
- RegExpTree* atom = new(zone()) RegExpCharacterClass(ranges, false);
- builder->AddAtom(atom);
- break;
- }
- case '1': case '2': case '3': case '4': case '5': case '6':
- case '7': case '8': case '9': {
- int index = 0;
- if (ParseBackReferenceIndex(&index)) {
- RegExpCapture* capture = NULL;
- if (captures_ != NULL && index <= captures_->length()) {
- capture = captures_->at(index - 1);
- }
- if (capture == NULL) {
- builder->AddEmpty();
- break;
- }
- RegExpTree* atom = new(zone()) RegExpBackReference(capture);
- builder->AddAtom(atom);
- break;
- }
- uc32 first_digit = Next();
- if (first_digit == '8' || first_digit == '9') {
- // Treat as identity escape
- builder->AddCharacter(first_digit);
- Advance(2);
- break;
- }
- }
- // FALLTHROUGH
- case '0': {
- Advance();
- uc32 octal = ParseOctalLiteral();
- builder->AddCharacter(octal);
- break;
- }
- // ControlEscape :: one of
- // f n r t v
- case 'f':
- Advance(2);
- builder->AddCharacter('\f');
- break;
- case 'n':
- Advance(2);
- builder->AddCharacter('\n');
- break;
- case 'r':
- Advance(2);
- builder->AddCharacter('\r');
- break;
- case 't':
- Advance(2);
- builder->AddCharacter('\t');
- break;
- case 'v':
- Advance(2);
- builder->AddCharacter('\v');
- break;
- case 'c': {
- Advance();
- uc32 controlLetter = Next();
- // Special case if it is an ASCII letter.
- // Convert lower case letters to uppercase.
- uc32 letter = controlLetter & ~('a' ^ 'A');
- if (letter < 'A' || 'Z' < letter) {
- // controlLetter is not in range 'A'-'Z' or 'a'-'z'.
- // This is outside the specification. We match JSC in
- // reading the backslash as a literal character instead
- // of as starting an escape.
- builder->AddCharacter('\\');
- } else {
- Advance(2);
- builder->AddCharacter(controlLetter & 0x1f);
- }
- break;
- }
- case 'x': {
- Advance(2);
- uc32 value;
- if (ParseHexEscape(2, &value)) {
- builder->AddCharacter(value);
- } else {
- builder->AddCharacter('x');
- }
- break;
- }
- case 'u': {
- Advance(2);
- uc32 value;
- if (ParseHexEscape(4, &value)) {
- builder->AddCharacter(value);
- } else {
- builder->AddCharacter('u');
- }
- break;
- }
- default:
- // Identity escape.
- builder->AddCharacter(Next());
- Advance(2);
- break;
- }
- break;
- case '{': {
- int dummy;
- if (ParseIntervalQuantifier(&dummy, &dummy)) {
- ReportError(CStrVector("Nothing to repeat") CHECK_FAILED);
- }
- // fallthrough
- }
- default:
- builder->AddCharacter(current());
- Advance();
- break;
- } // end switch(current())
-
- int min;
- int max;
- switch (current()) {
- // QuantifierPrefix ::
- // *
- // +
- // ?
- // {
- case '*':
- min = 0;
- max = RegExpTree::kInfinity;
- Advance();
- break;
- case '+':
- min = 1;
- max = RegExpTree::kInfinity;
- Advance();
- break;
- case '?':
- min = 0;
- max = 1;
- Advance();
- break;
- case '{':
- if (ParseIntervalQuantifier(&min, &max)) {
- if (max < min) {
- ReportError(CStrVector("numbers out of order in {} quantifier.")
- CHECK_FAILED);
- }
- break;
- } else {
- continue;
- }
- default:
- continue;
- }
- RegExpQuantifier::Type type = RegExpQuantifier::GREEDY;
- if (current() == '?') {
- type = RegExpQuantifier::NON_GREEDY;
- Advance();
- } else if (FLAG_regexp_possessive_quantifier && current() == '+') {
- // FLAG_regexp_possessive_quantifier is a debug-only flag.
- type = RegExpQuantifier::POSSESSIVE;
- Advance();
- }
- builder->AddQuantifierToAtom(min, max, type);
- }
-}
-
-
-#ifdef DEBUG
-// Currently only used in an ASSERT.
-static bool IsSpecialClassEscape(uc32 c) {
- switch (c) {
- case 'd': case 'D':
- case 's': case 'S':
- case 'w': case 'W':
- return true;
- default:
- return false;
- }
-}
-#endif
-
-
-// In order to know whether an escape is a backreference or not we have to scan
-// the entire regexp and find the number of capturing parentheses. However we
-// don't want to scan the regexp twice unless it is necessary. This mini-parser
-// is called when needed. It can see the difference between capturing and
-// noncapturing parentheses and can skip character classes and backslash-escaped
-// characters.
-void RegExpParser::ScanForCaptures() {
- // Start with captures started previous to current position
- int capture_count = captures_started();
- // Add count of captures after this position.
- int n;
- while ((n = current()) != kEndMarker) {
- Advance();
- switch (n) {
- case '\\':
- Advance();
- break;
- case '[': {
- int c;
- while ((c = current()) != kEndMarker) {
- Advance();
- if (c == '\\') {
- Advance();
- } else {
- if (c == ']') break;
- }
- }
- break;
- }
- case '(':
- if (current() != '?') capture_count++;
- break;
- }
- }
- capture_count_ = capture_count;
- is_scanned_for_captures_ = true;
-}
-
-
-bool RegExpParser::ParseBackReferenceIndex(int* index_out) {
- ASSERT_EQ('\\', current());
- ASSERT('1' <= Next() && Next() <= '9');
- // Try to parse a decimal literal that is no greater than the total number
- // of left capturing parentheses in the input.
- int start = position();
- int value = Next() - '0';
- Advance(2);
- while (true) {
- uc32 c = current();
- if (IsDecimalDigit(c)) {
- value = 10 * value + (c - '0');
- if (value > kMaxCaptures) {
- Reset(start);
- return false;
- }
- Advance();
- } else {
- break;
- }
- }
- if (value > captures_started()) {
- if (!is_scanned_for_captures_) {
- int saved_position = position();
- ScanForCaptures();
- Reset(saved_position);
- }
- if (value > capture_count_) {
- Reset(start);
- return false;
- }
- }
- *index_out = value;
- return true;
-}
-
-
-// QuantifierPrefix ::
-// { DecimalDigits }
-// { DecimalDigits , }
-// { DecimalDigits , DecimalDigits }
-//
-// Returns true if parsing succeeds, and set the min_out and max_out
-// values. Values are truncated to RegExpTree::kInfinity if they overflow.
-bool RegExpParser::ParseIntervalQuantifier(int* min_out, int* max_out) {
- ASSERT_EQ(current(), '{');
- int start = position();
- Advance();
- int min = 0;
- if (!IsDecimalDigit(current())) {
- Reset(start);
- return false;
- }
- while (IsDecimalDigit(current())) {
- int next = current() - '0';
- if (min > (RegExpTree::kInfinity - next) / 10) {
- // Overflow. Skip past remaining decimal digits and return -1.
- do {
- Advance();
- } while (IsDecimalDigit(current()));
- min = RegExpTree::kInfinity;
- break;
- }
- min = 10 * min + next;
- Advance();
- }
- int max = 0;
- if (current() == '}') {
- max = min;
- Advance();
- } else if (current() == ',') {
- Advance();
- if (current() == '}') {
- max = RegExpTree::kInfinity;
- Advance();
- } else {
- while (IsDecimalDigit(current())) {
- int next = current() - '0';
- if (max > (RegExpTree::kInfinity - next) / 10) {
- do {
- Advance();
- } while (IsDecimalDigit(current()));
- max = RegExpTree::kInfinity;
- break;
- }
- max = 10 * max + next;
- Advance();
- }
- if (current() != '}') {
- Reset(start);
- return false;
- }
- Advance();
- }
- } else {
- Reset(start);
- return false;
- }
- *min_out = min;
- *max_out = max;
- return true;
-}
-
-
-uc32 RegExpParser::ParseOctalLiteral() {
- ASSERT('0' <= current() && current() <= '7');
- // For compatibility with some other browsers (not all), we parse
- // up to three octal digits with a value below 256.
- uc32 value = current() - '0';
- Advance();
- if ('0' <= current() && current() <= '7') {
- value = value * 8 + current() - '0';
- Advance();
- if (value < 32 && '0' <= current() && current() <= '7') {
- value = value * 8 + current() - '0';
- Advance();
- }
- }
- return value;
-}
-
-
-bool RegExpParser::ParseHexEscape(int length, uc32 *value) {
- int start = position();
- uc32 val = 0;
- bool done = false;
- for (int i = 0; !done; i++) {
- uc32 c = current();
- int d = HexValue(c);
- if (d < 0) {
- Reset(start);
- return false;
- }
- val = val * 16 + d;
- Advance();
- if (i == length - 1) {
- done = true;
- }
- }
- *value = val;
- return true;
-}
-
-
-uc32 RegExpParser::ParseClassCharacterEscape() {
- ASSERT(current() == '\\');
- ASSERT(has_next() && !IsSpecialClassEscape(Next()));
- Advance();
- switch (current()) {
- case 'b':
- Advance();
- return '\b';
- // ControlEscape :: one of
- // f n r t v
- case 'f':
- Advance();
- return '\f';
- case 'n':
- Advance();
- return '\n';
- case 'r':
- Advance();
- return '\r';
- case 't':
- Advance();
- return '\t';
- case 'v':
- Advance();
- return '\v';
- case 'c': {
- uc32 controlLetter = Next();
- uc32 letter = controlLetter & ~('A' ^ 'a');
- // For compatibility with JSC, inside a character class
- // we also accept digits and underscore as control characters.
- if ((controlLetter >= '0' && controlLetter <= '9') ||
- controlLetter == '_' ||
- (letter >= 'A' && letter <= 'Z')) {
- Advance(2);
- // Control letters mapped to ASCII control characters in the range
- // 0x00-0x1f.
- return controlLetter & 0x1f;
- }
- // We match JSC in reading the backslash as a literal
- // character instead of as starting an escape.
- return '\\';
- }
- case '0': case '1': case '2': case '3': case '4': case '5':
- case '6': case '7':
- // For compatibility, we interpret a decimal escape that isn't
- // a back reference (and therefore either \0 or not valid according
- // to the specification) as a 1..3 digit octal character code.
- return ParseOctalLiteral();
- case 'x': {
- Advance();
- uc32 value;
- if (ParseHexEscape(2, &value)) {
- return value;
- }
- // If \x is not followed by a two-digit hexadecimal, treat it
- // as an identity escape.
- return 'x';
- }
- case 'u': {
- Advance();
- uc32 value;
- if (ParseHexEscape(4, &value)) {
- return value;
- }
- // If \u is not followed by a four-digit hexadecimal, treat it
- // as an identity escape.
- return 'u';
- }
- default: {
- // Extended identity escape. We accept any character that hasn't
- // been matched by a more specific case, not just the subset required
- // by the ECMAScript specification.
- uc32 result = current();
- Advance();
- return result;
- }
- }
- return 0;
-}
-
-
-CharacterRange RegExpParser::ParseClassAtom(uc16* char_class) {
- ASSERT_EQ(0, *char_class);
- uc32 first = current();
- if (first == '\\') {
- switch (Next()) {
- case 'w': case 'W': case 'd': case 'D': case 's': case 'S': {
- *char_class = Next();
- Advance(2);
- return CharacterRange::Singleton(0); // Return dummy value.
- }
- case kEndMarker:
- return ReportError(CStrVector("\\ at end of pattern"));
- default:
- uc32 c = ParseClassCharacterEscape(CHECK_FAILED);
- return CharacterRange::Singleton(c);
- }
- } else {
- Advance();
- return CharacterRange::Singleton(first);
- }
-}
-
-
-static const uc16 kNoCharClass = 0;
-
-// Adds range or pre-defined character class to character ranges.
-// If char_class is not kInvalidClass, it's interpreted as a class
-// escape (i.e., 's' means whitespace, from '\s').
-static inline void AddRangeOrEscape(ZoneList<CharacterRange>* ranges,
- uc16 char_class,
- CharacterRange range) {
- if (char_class != kNoCharClass) {
- CharacterRange::AddClassEscape(char_class, ranges);
- } else {
- ranges->Add(range);
- }
-}
-
-
-RegExpTree* RegExpParser::ParseCharacterClass() {
- static const char* kUnterminated = "Unterminated character class";
- static const char* kRangeOutOfOrder = "Range out of order in character class";
-
- ASSERT_EQ(current(), '[');
- Advance();
- bool is_negated = false;
- if (current() == '^') {
- is_negated = true;
- Advance();
- }
- ZoneList<CharacterRange>* ranges = new ZoneList<CharacterRange>(2);
- while (has_more() && current() != ']') {
- uc16 char_class = kNoCharClass;
- CharacterRange first = ParseClassAtom(&char_class CHECK_FAILED);
- if (current() == '-') {
- Advance();
- if (current() == kEndMarker) {
- // If we reach the end we break out of the loop and let the
- // following code report an error.
- break;
- } else if (current() == ']') {
- AddRangeOrEscape(ranges, char_class, first);
- ranges->Add(CharacterRange::Singleton('-'));
- break;
- }
- uc16 char_class_2 = kNoCharClass;
- CharacterRange next = ParseClassAtom(&char_class_2 CHECK_FAILED);
- if (char_class != kNoCharClass || char_class_2 != kNoCharClass) {
- // Either end is an escaped character class. Treat the '-' verbatim.
- AddRangeOrEscape(ranges, char_class, first);
- ranges->Add(CharacterRange::Singleton('-'));
- AddRangeOrEscape(ranges, char_class_2, next);
- continue;
- }
- if (first.from() > next.to()) {
- return ReportError(CStrVector(kRangeOutOfOrder) CHECK_FAILED);
- }
- ranges->Add(CharacterRange::Range(first.from(), next.to()));
- } else {
- AddRangeOrEscape(ranges, char_class, first);
- }
- }
- if (!has_more()) {
- return ReportError(CStrVector(kUnterminated) CHECK_FAILED);
- }
- Advance();
- if (ranges->length() == 0) {
- ranges->Add(CharacterRange::Everything());
- is_negated = !is_negated;
- }
- return new(zone()) RegExpCharacterClass(ranges, is_negated);
-}
-
-
-// ----------------------------------------------------------------------------
-// The Parser interface.
-
-ParserMessage::~ParserMessage() {
- for (int i = 0; i < args().length(); i++)
- DeleteArray(args()[i]);
- DeleteArray(args().start());
-}
-
-
-ScriptDataImpl::~ScriptDataImpl() {
- if (owns_store_) store_.Dispose();
-}
-
-
-int ScriptDataImpl::Length() {
- return store_.length() * sizeof(unsigned);
-}
-
-
-const char* ScriptDataImpl::Data() {
- return reinterpret_cast<const char*>(store_.start());
-}
-
-
-bool ScriptDataImpl::HasError() {
- return has_error();
-}
-
-
-void ScriptDataImpl::Initialize() {
- // Prepares state for use.
- if (store_.length() >= PreparseDataConstants::kHeaderSize) {
- function_index_ = PreparseDataConstants::kHeaderSize;
- int symbol_data_offset = PreparseDataConstants::kHeaderSize
- + store_[PreparseDataConstants::kFunctionsSizeOffset];
- if (store_.length() > symbol_data_offset) {
- symbol_data_ = reinterpret_cast<byte*>(&store_[symbol_data_offset]);
- } else {
- // Partial preparse causes no symbol information.
- symbol_data_ = reinterpret_cast<byte*>(&store_[0] + store_.length());
- }
- symbol_data_end_ = reinterpret_cast<byte*>(&store_[0] + store_.length());
- }
-}
-
-
-int ScriptDataImpl::ReadNumber(byte** source) {
- // Reads a number from symbol_data_ in base 128. The most significant
- // bit marks that there are more digits.
- // If the first byte is 0x80 (kNumberTerminator), it would normally
- // represent a leading zero. Since that is useless, and therefore won't
- // appear as the first digit of any actual value, it is used to
- // mark the end of the input stream.
- byte* data = *source;
- if (data >= symbol_data_end_) return -1;
- byte input = *data;
- if (input == PreparseDataConstants::kNumberTerminator) {
- // End of stream marker.
- return -1;
- }
- int result = input & 0x7f;
- data++;
- while ((input & 0x80u) != 0) {
- if (data >= symbol_data_end_) return -1;
- input = *data;
- result = (result << 7) | (input & 0x7f);
- data++;
- }
- *source = data;
- return result;
-}
-
-
-// Create a Scanner for the preparser to use as input, and preparse the source.
-static ScriptDataImpl* DoPreParse(UC16CharacterStream* source,
- bool allow_lazy,
- ParserRecorder* recorder) {
- Isolate* isolate = Isolate::Current();
- V8JavaScriptScanner scanner(isolate->scanner_constants());
- scanner.Initialize(source);
- intptr_t stack_limit = isolate->stack_guard()->real_climit();
- if (!preparser::PreParser::PreParseProgram(&scanner,
- recorder,
- allow_lazy,
- stack_limit)) {
- isolate->StackOverflow();
- return NULL;
- }
-
- // Extract the accumulated data from the recorder as a single
- // contiguous vector that we are responsible for disposing.
- Vector<unsigned> store = recorder->ExtractData();
- return new ScriptDataImpl(store);
-}
-
-
-// Preparse, but only collect data that is immediately useful,
-// even if the preparser data is only used once.
-ScriptDataImpl* ParserApi::PartialPreParse(UC16CharacterStream* source,
- v8::Extension* extension) {
- bool allow_lazy = FLAG_lazy && (extension == NULL);
- if (!allow_lazy) {
- // Partial preparsing is only about lazily compiled functions.
- // If we don't allow lazy compilation, the log data will be empty.
- return NULL;
- }
- PartialParserRecorder recorder;
- return DoPreParse(source, allow_lazy, &recorder);
-}
-
-
-ScriptDataImpl* ParserApi::PreParse(UC16CharacterStream* source,
- v8::Extension* extension) {
- Handle<Script> no_script;
- bool allow_lazy = FLAG_lazy && (extension == NULL);
- CompleteParserRecorder recorder;
- return DoPreParse(source, allow_lazy, &recorder);
-}
-
-
-bool RegExpParser::ParseRegExp(FlatStringReader* input,
- bool multiline,
- RegExpCompileData* result) {
- ASSERT(result != NULL);
- RegExpParser parser(input, &result->error, multiline);
- RegExpTree* tree = parser.ParsePattern();
- if (parser.failed()) {
- ASSERT(tree == NULL);
- ASSERT(!result->error.is_null());
- } else {
- ASSERT(tree != NULL);
- ASSERT(result->error.is_null());
- result->tree = tree;
- int capture_count = parser.captures_started();
- result->simple = tree->IsAtom() && parser.simple() && capture_count == 0;
- result->contains_anchor = parser.contains_anchor();
- result->capture_count = capture_count;
- }
- return !parser.failed();
-}
-
-
-bool ParserApi::Parse(CompilationInfo* info) {
- ASSERT(info->function() == NULL);
- FunctionLiteral* result = NULL;
- Handle<Script> script = info->script();
- if (info->is_lazy()) {
- Parser parser(script, true, NULL, NULL);
- result = parser.ParseLazy(info);
- } else {
- bool allow_natives_syntax =
- info->allows_natives_syntax() || FLAG_allow_natives_syntax;
- ScriptDataImpl* pre_data = info->pre_parse_data();
- Parser parser(script, allow_natives_syntax, info->extension(), pre_data);
- if (pre_data != NULL && pre_data->has_error()) {
- Scanner::Location loc = pre_data->MessageLocation();
- const char* message = pre_data->BuildMessage();
- Vector<const char*> args = pre_data->BuildArgs();
- parser.ReportMessageAt(loc, message, args);
- DeleteArray(message);
- for (int i = 0; i < args.length(); i++) {
- DeleteArray(args[i]);
- }
- DeleteArray(args.start());
- ASSERT(info->isolate()->has_pending_exception());
- } else {
- Handle<String> source = Handle<String>(String::cast(script->source()));
- result = parser.ParseProgram(source,
- info->is_global(),
- info->StrictMode());
- }
- }
-
- info->SetFunction(result);
- return (result != NULL);
-}
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/parser.h b/src/3rdparty/v8/src/parser.h
deleted file mode 100644
index 78faea1..0000000
--- a/src/3rdparty/v8/src/parser.h
+++ /dev/null
@@ -1,823 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_PARSER_H_
-#define V8_PARSER_H_
-
-#include "allocation.h"
-#include "ast.h"
-#include "scanner.h"
-#include "scopes.h"
-#include "preparse-data.h"
-
-namespace v8 {
-namespace internal {
-
-class CompilationInfo;
-class FuncNameInferrer;
-class ParserLog;
-class PositionStack;
-class Target;
-class LexicalScope;
-
-template <typename T> class ZoneListWrapper;
-
-
-class ParserMessage : public Malloced {
- public:
- ParserMessage(Scanner::Location loc, const char* message,
- Vector<const char*> args)
- : loc_(loc),
- message_(message),
- args_(args) { }
- ~ParserMessage();
- Scanner::Location location() { return loc_; }
- const char* message() { return message_; }
- Vector<const char*> args() { return args_; }
- private:
- Scanner::Location loc_;
- const char* message_;
- Vector<const char*> args_;
-};
-
-
-class FunctionEntry BASE_EMBEDDED {
- public:
- explicit FunctionEntry(Vector<unsigned> backing) : backing_(backing) { }
- FunctionEntry() : backing_(Vector<unsigned>::empty()) { }
-
- int start_pos() { return backing_[kStartPosOffset]; }
- void set_start_pos(int value) { backing_[kStartPosOffset] = value; }
-
- int end_pos() { return backing_[kEndPosOffset]; }
- void set_end_pos(int value) { backing_[kEndPosOffset] = value; }
-
- int literal_count() { return backing_[kLiteralCountOffset]; }
- void set_literal_count(int value) { backing_[kLiteralCountOffset] = value; }
-
- int property_count() { return backing_[kPropertyCountOffset]; }
- void set_property_count(int value) {
- backing_[kPropertyCountOffset] = value;
- }
-
- bool is_valid() { return backing_.length() > 0; }
-
- static const int kSize = 4;
-
- private:
- Vector<unsigned> backing_;
- static const int kStartPosOffset = 0;
- static const int kEndPosOffset = 1;
- static const int kLiteralCountOffset = 2;
- static const int kPropertyCountOffset = 3;
-};
-
-
-class ScriptDataImpl : public ScriptData {
- public:
- explicit ScriptDataImpl(Vector<unsigned> store)
- : store_(store),
- owns_store_(true) { }
-
- // Create an empty ScriptDataImpl that is guaranteed to not satisfy
- // a SanityCheck.
- ScriptDataImpl() : store_(Vector<unsigned>()), owns_store_(false) { }
-
- virtual ~ScriptDataImpl();
- virtual int Length();
- virtual const char* Data();
- virtual bool HasError();
-
- void Initialize();
- void ReadNextSymbolPosition();
-
- FunctionEntry GetFunctionEntry(int start);
- int GetSymbolIdentifier();
- bool SanityCheck();
-
- Scanner::Location MessageLocation();
- const char* BuildMessage();
- Vector<const char*> BuildArgs();
-
- int symbol_count() {
- return (store_.length() > PreparseDataConstants::kHeaderSize)
- ? store_[PreparseDataConstants::kSymbolCountOffset]
- : 0;
- }
- // The following functions should only be called if SanityCheck has
- // returned true.
- bool has_error() { return store_[PreparseDataConstants::kHasErrorOffset]; }
- unsigned magic() { return store_[PreparseDataConstants::kMagicOffset]; }
- unsigned version() { return store_[PreparseDataConstants::kVersionOffset]; }
-
- private:
- Vector<unsigned> store_;
- unsigned char* symbol_data_;
- unsigned char* symbol_data_end_;
- int function_index_;
- bool owns_store_;
-
- unsigned Read(int position);
- unsigned* ReadAddress(int position);
- // Reads a number from the current symbols
- int ReadNumber(byte** source);
-
- ScriptDataImpl(const char* backing_store, int length)
- : store_(reinterpret_cast<unsigned*>(const_cast<char*>(backing_store)),
- length / static_cast<int>(sizeof(unsigned))),
- owns_store_(false) {
- ASSERT_EQ(0, static_cast<int>(
- reinterpret_cast<intptr_t>(backing_store) % sizeof(unsigned)));
- }
-
- // Read strings written by ParserRecorder::WriteString.
- static const char* ReadString(unsigned* start, int* chars);
-
- friend class ScriptData;
-};
-
-
-class ParserApi {
- public:
- // Parses the source code represented by the compilation info and sets its
- // function literal. Returns false (and deallocates any allocated AST
- // nodes) if parsing failed.
- static bool Parse(CompilationInfo* info);
-
- // Generic preparser generating full preparse data.
- static ScriptDataImpl* PreParse(UC16CharacterStream* source,
- v8::Extension* extension);
-
- // Preparser that only does preprocessing that makes sense if only used
- // immediately after.
- static ScriptDataImpl* PartialPreParse(UC16CharacterStream* source,
- v8::Extension* extension);
-};
-
-// ----------------------------------------------------------------------------
-// REGEXP PARSING
-
-// A BuffferedZoneList is an automatically growing list, just like (and backed
-// by) a ZoneList, that is optimized for the case of adding and removing
-// a single element. The last element added is stored outside the backing list,
-// and if no more than one element is ever added, the ZoneList isn't even
-// allocated.
-// Elements must not be NULL pointers.
-template <typename T, int initial_size>
-class BufferedZoneList {
- public:
- BufferedZoneList() : list_(NULL), last_(NULL) {}
-
- // Adds element at end of list. This element is buffered and can
- // be read using last() or removed using RemoveLast until a new Add or until
- // RemoveLast or GetList has been called.
- void Add(T* value) {
- if (last_ != NULL) {
- if (list_ == NULL) {
- list_ = new ZoneList<T*>(initial_size);
- }
- list_->Add(last_);
- }
- last_ = value;
- }
-
- T* last() {
- ASSERT(last_ != NULL);
- return last_;
- }
-
- T* RemoveLast() {
- ASSERT(last_ != NULL);
- T* result = last_;
- if ((list_ != NULL) && (list_->length() > 0))
- last_ = list_->RemoveLast();
- else
- last_ = NULL;
- return result;
- }
-
- T* Get(int i) {
- ASSERT((0 <= i) && (i < length()));
- if (list_ == NULL) {
- ASSERT_EQ(0, i);
- return last_;
- } else {
- if (i == list_->length()) {
- ASSERT(last_ != NULL);
- return last_;
- } else {
- return list_->at(i);
- }
- }
- }
-
- void Clear() {
- list_ = NULL;
- last_ = NULL;
- }
-
- int length() {
- int length = (list_ == NULL) ? 0 : list_->length();
- return length + ((last_ == NULL) ? 0 : 1);
- }
-
- ZoneList<T*>* GetList() {
- if (list_ == NULL) {
- list_ = new ZoneList<T*>(initial_size);
- }
- if (last_ != NULL) {
- list_->Add(last_);
- last_ = NULL;
- }
- return list_;
- }
-
- private:
- ZoneList<T*>* list_;
- T* last_;
-};
-
-
-// Accumulates RegExp atoms and assertions into lists of terms and alternatives.
-class RegExpBuilder: public ZoneObject {
- public:
- RegExpBuilder();
- void AddCharacter(uc16 character);
- // "Adds" an empty expression. Does nothing except consume a
- // following quantifier
- void AddEmpty();
- void AddAtom(RegExpTree* tree);
- void AddAssertion(RegExpTree* tree);
- void NewAlternative(); // '|'
- void AddQuantifierToAtom(int min, int max, RegExpQuantifier::Type type);
- RegExpTree* ToRegExp();
-
- private:
- void FlushCharacters();
- void FlushText();
- void FlushTerms();
- Zone* zone() { return zone_; }
-
- Zone* zone_;
- bool pending_empty_;
- ZoneList<uc16>* characters_;
- BufferedZoneList<RegExpTree, 2> terms_;
- BufferedZoneList<RegExpTree, 2> text_;
- BufferedZoneList<RegExpTree, 2> alternatives_;
-#ifdef DEBUG
- enum {ADD_NONE, ADD_CHAR, ADD_TERM, ADD_ASSERT, ADD_ATOM} last_added_;
-#define LAST(x) last_added_ = x;
-#else
-#define LAST(x)
-#endif
-};
-
-
-class RegExpParser {
- public:
- RegExpParser(FlatStringReader* in,
- Handle<String>* error,
- bool multiline_mode);
-
- static bool ParseRegExp(FlatStringReader* input,
- bool multiline,
- RegExpCompileData* result);
-
- RegExpTree* ParsePattern();
- RegExpTree* ParseDisjunction();
- RegExpTree* ParseGroup();
- RegExpTree* ParseCharacterClass();
-
- // Parses a {...,...} quantifier and stores the range in the given
- // out parameters.
- bool ParseIntervalQuantifier(int* min_out, int* max_out);
-
- // Parses and returns a single escaped character. The character
- // must not be 'b' or 'B' since they are usually handle specially.
- uc32 ParseClassCharacterEscape();
-
- // Checks whether the following is a length-digit hexadecimal number,
- // and sets the value if it is.
- bool ParseHexEscape(int length, uc32* value);
-
- uc32 ParseOctalLiteral();
-
- // Tries to parse the input as a back reference. If successful it
- // stores the result in the output parameter and returns true. If
- // it fails it will push back the characters read so the same characters
- // can be reparsed.
- bool ParseBackReferenceIndex(int* index_out);
-
- CharacterRange ParseClassAtom(uc16* char_class);
- RegExpTree* ReportError(Vector<const char> message);
- void Advance();
- void Advance(int dist);
- void Reset(int pos);
-
- // Reports whether the pattern might be used as a literal search string.
- // Only use if the result of the parse is a single atom node.
- bool simple();
- bool contains_anchor() { return contains_anchor_; }
- void set_contains_anchor() { contains_anchor_ = true; }
- int captures_started() { return captures_ == NULL ? 0 : captures_->length(); }
- int position() { return next_pos_ - 1; }
- bool failed() { return failed_; }
-
- static const int kMaxCaptures = 1 << 16;
- static const uc32 kEndMarker = (1 << 21);
-
- private:
- enum SubexpressionType {
- INITIAL,
- CAPTURE, // All positive values represent captures.
- POSITIVE_LOOKAHEAD,
- NEGATIVE_LOOKAHEAD,
- GROUPING
- };
-
- class RegExpParserState : public ZoneObject {
- public:
- RegExpParserState(RegExpParserState* previous_state,
- SubexpressionType group_type,
- int disjunction_capture_index)
- : previous_state_(previous_state),
- builder_(new RegExpBuilder()),
- group_type_(group_type),
- disjunction_capture_index_(disjunction_capture_index) {}
- // Parser state of containing expression, if any.
- RegExpParserState* previous_state() { return previous_state_; }
- bool IsSubexpression() { return previous_state_ != NULL; }
- // RegExpBuilder building this regexp's AST.
- RegExpBuilder* builder() { return builder_; }
- // Type of regexp being parsed (parenthesized group or entire regexp).
- SubexpressionType group_type() { return group_type_; }
- // Index in captures array of first capture in this sub-expression, if any.
- // Also the capture index of this sub-expression itself, if group_type
- // is CAPTURE.
- int capture_index() { return disjunction_capture_index_; }
-
- private:
- // Linked list implementation of stack of states.
- RegExpParserState* previous_state_;
- // Builder for the stored disjunction.
- RegExpBuilder* builder_;
- // Stored disjunction type (capture, look-ahead or grouping), if any.
- SubexpressionType group_type_;
- // Stored disjunction's capture index (if any).
- int disjunction_capture_index_;
- };
-
- Isolate* isolate() { return isolate_; }
- Zone* zone() { return isolate_->zone(); }
-
- uc32 current() { return current_; }
- bool has_more() { return has_more_; }
- bool has_next() { return next_pos_ < in()->length(); }
- uc32 Next();
- FlatStringReader* in() { return in_; }
- void ScanForCaptures();
-
- Isolate* isolate_;
- Handle<String>* error_;
- ZoneList<RegExpCapture*>* captures_;
- FlatStringReader* in_;
- uc32 current_;
- int next_pos_;
- // The capture count is only valid after we have scanned for captures.
- int capture_count_;
- bool has_more_;
- bool multiline_;
- bool simple_;
- bool contains_anchor_;
- bool is_scanned_for_captures_;
- bool failed_;
-};
-
-// ----------------------------------------------------------------------------
-// JAVASCRIPT PARSING
-
-class Parser {
- public:
- Parser(Handle<Script> script,
- bool allow_natives_syntax,
- v8::Extension* extension,
- ScriptDataImpl* pre_data);
- virtual ~Parser() { }
-
- // Returns NULL if parsing failed.
- FunctionLiteral* ParseProgram(Handle<String> source,
- bool in_global_context,
- StrictModeFlag strict_mode);
-
- FunctionLiteral* ParseLazy(CompilationInfo* info);
-
- void ReportMessageAt(Scanner::Location loc,
- const char* message,
- Vector<const char*> args);
- void ReportMessageAt(Scanner::Location loc,
- const char* message,
- Vector<Handle<String> > args);
-
- protected:
- // Limit on number of function parameters is chosen arbitrarily.
- // Code::Flags uses only the low 17 bits of num-parameters to
- // construct a hashable id, so if more than 2^17 are allowed, this
- // should be checked.
- static const int kMaxNumFunctionParameters = 32766;
- FunctionLiteral* ParseLazy(CompilationInfo* info,
- UC16CharacterStream* source,
- ZoneScope* zone_scope);
- enum Mode {
- PARSE_LAZILY,
- PARSE_EAGERLY
- };
-
- Isolate* isolate() { return isolate_; }
- Zone* zone() { return isolate_->zone(); }
-
- // Called by ParseProgram after setting up the scanner.
- FunctionLiteral* DoParseProgram(Handle<String> source,
- bool in_global_context,
- StrictModeFlag strict_mode,
- ZoneScope* zone_scope);
-
- // Report syntax error
- void ReportUnexpectedToken(Token::Value token);
- void ReportInvalidPreparseData(Handle<String> name, bool* ok);
- void ReportMessage(const char* message, Vector<const char*> args);
-
- bool inside_with() const { return with_nesting_level_ > 0; }
- V8JavaScriptScanner& scanner() { return scanner_; }
- Mode mode() const { return mode_; }
- ScriptDataImpl* pre_data() const { return pre_data_; }
-
- // Check if the given string is 'eval' or 'arguments'.
- bool IsEvalOrArguments(Handle<String> string);
-
- // All ParseXXX functions take as the last argument an *ok parameter
- // which is set to false if parsing failed; it is unchanged otherwise.
- // By making the 'exception handling' explicit, we are forced to check
- // for failure at the call sites.
- void* ParseSourceElements(ZoneList<Statement*>* processor,
- int end_token, bool* ok);
- Statement* ParseStatement(ZoneStringList* labels, bool* ok);
- Statement* ParseFunctionDeclaration(bool* ok);
- Statement* ParseNativeDeclaration(bool* ok);
- Block* ParseBlock(ZoneStringList* labels, bool* ok);
- Block* ParseVariableStatement(bool* ok);
- Block* ParseVariableDeclarations(bool accept_IN, Expression** var, bool* ok);
- Statement* ParseExpressionOrLabelledStatement(ZoneStringList* labels,
- bool* ok);
- IfStatement* ParseIfStatement(ZoneStringList* labels, bool* ok);
- Statement* ParseContinueStatement(bool* ok);
- Statement* ParseBreakStatement(ZoneStringList* labels, bool* ok);
- Statement* ParseReturnStatement(bool* ok);
- Block* WithHelper(Expression* obj,
- ZoneStringList* labels,
- bool is_catch_block,
- bool* ok);
- Statement* ParseWithStatement(ZoneStringList* labels, bool* ok);
- CaseClause* ParseCaseClause(bool* default_seen_ptr, bool* ok);
- SwitchStatement* ParseSwitchStatement(ZoneStringList* labels, bool* ok);
- DoWhileStatement* ParseDoWhileStatement(ZoneStringList* labels, bool* ok);
- WhileStatement* ParseWhileStatement(ZoneStringList* labels, bool* ok);
- Statement* ParseForStatement(ZoneStringList* labels, bool* ok);
- Statement* ParseThrowStatement(bool* ok);
- Expression* MakeCatchContext(Handle<String> id, VariableProxy* value);
- TryStatement* ParseTryStatement(bool* ok);
- DebuggerStatement* ParseDebuggerStatement(bool* ok);
-
- Expression* ParseExpression(bool accept_IN, bool* ok);
- Expression* ParseAssignmentExpression(bool accept_IN, bool* ok);
- Expression* ParseConditionalExpression(bool accept_IN, bool* ok);
- Expression* ParseBinaryExpression(int prec, bool accept_IN, bool* ok);
- Expression* ParseUnaryExpression(bool* ok);
- Expression* ParsePostfixExpression(bool* ok);
- Expression* ParseLeftHandSideExpression(bool* ok);
- Expression* ParseNewExpression(bool* ok);
- Expression* ParseMemberExpression(bool* ok);
- Expression* ParseNewPrefix(PositionStack* stack, bool* ok);
- Expression* ParseMemberWithNewPrefixesExpression(PositionStack* stack,
- bool* ok);
- Expression* ParsePrimaryExpression(bool* ok);
- Expression* ParseArrayLiteral(bool* ok);
- Expression* ParseObjectLiteral(bool* ok);
- ObjectLiteral::Property* ParseObjectLiteralGetSet(bool is_getter, bool* ok);
- Expression* ParseRegExpLiteral(bool seen_equal, bool* ok);
-
- Expression* NewCompareNode(Token::Value op,
- Expression* x,
- Expression* y,
- int position);
-
- // Populate the constant properties fixed array for a materialized object
- // literal.
- void BuildObjectLiteralConstantProperties(
- ZoneList<ObjectLiteral::Property*>* properties,
- Handle<FixedArray> constants,
- bool* is_simple,
- bool* fast_elements,
- int* depth);
-
- // Populate the literals fixed array for a materialized array literal.
- void BuildArrayLiteralBoilerplateLiterals(ZoneList<Expression*>* properties,
- Handle<FixedArray> constants,
- bool* is_simple,
- int* depth);
-
- // Decide if a property should be in the object boilerplate.
- bool IsBoilerplateProperty(ObjectLiteral::Property* property);
- // If the expression is a literal, return the literal value;
- // if the expression is a materialized literal and is simple return a
- // compile time value as encoded by CompileTimeValue::GetValue().
- // Otherwise, return undefined literal as the placeholder
- // in the object literal boilerplate.
- Handle<Object> GetBoilerplateValue(Expression* expression);
-
- enum FunctionLiteralType {
- EXPRESSION,
- DECLARATION,
- NESTED
- };
-
- ZoneList<Expression*>* ParseArguments(bool* ok);
- FunctionLiteral* ParseFunctionLiteral(Handle<String> var_name,
- bool name_is_reserved,
- int function_token_position,
- FunctionLiteralType type,
- bool* ok);
-
-
- // Magical syntax support.
- Expression* ParseV8Intrinsic(bool* ok);
-
- INLINE(Token::Value peek()) {
- if (stack_overflow_) return Token::ILLEGAL;
- return scanner().peek();
- }
-
- INLINE(Token::Value Next()) {
- // BUG 1215673: Find a thread safe way to set a stack limit in
- // pre-parse mode. Otherwise, we cannot safely pre-parse from other
- // threads.
- if (stack_overflow_) {
- return Token::ILLEGAL;
- }
- if (StackLimitCheck(isolate()).HasOverflowed()) {
- // Any further calls to Next or peek will return the illegal token.
- // The current call must return the next token, which might already
- // have been peek'ed.
- stack_overflow_ = true;
- }
- return scanner().Next();
- }
-
- bool peek_any_identifier();
-
- INLINE(void Consume(Token::Value token));
- void Expect(Token::Value token, bool* ok);
- bool Check(Token::Value token);
- void ExpectSemicolon(bool* ok);
-
- Handle<String> LiteralString(PretenureFlag tenured) {
- if (scanner().is_literal_ascii()) {
- return isolate_->factory()->NewStringFromAscii(
- scanner().literal_ascii_string(), tenured);
- } else {
- return isolate_->factory()->NewStringFromTwoByte(
- scanner().literal_uc16_string(), tenured);
- }
- }
-
- Handle<String> NextLiteralString(PretenureFlag tenured) {
- if (scanner().is_next_literal_ascii()) {
- return isolate_->factory()->NewStringFromAscii(
- scanner().next_literal_ascii_string(), tenured);
- } else {
- return isolate_->factory()->NewStringFromTwoByte(
- scanner().next_literal_uc16_string(), tenured);
- }
- }
-
- Handle<String> GetSymbol(bool* ok);
-
- // Get odd-ball literals.
- Literal* GetLiteralUndefined();
- Literal* GetLiteralTheHole();
- Literal* GetLiteralNumber(double value);
-
- Handle<String> ParseIdentifier(bool* ok);
- Handle<String> ParseIdentifierOrReservedWord(bool* is_reserved, bool* ok);
- Handle<String> ParseIdentifierName(bool* ok);
- Handle<String> ParseIdentifierOrGetOrSet(bool* is_get,
- bool* is_set,
- bool* ok);
-
- // Strict mode validation of LValue expressions
- void CheckStrictModeLValue(Expression* expression,
- const char* error,
- bool* ok);
-
- // Strict mode octal literal validation.
- void CheckOctalLiteral(int beg_pos, int end_pos, bool* ok);
-
- // Parser support
- VariableProxy* Declare(Handle<String> name, Variable::Mode mode,
- FunctionLiteral* fun,
- bool resolve,
- bool* ok);
-
- bool TargetStackContainsLabel(Handle<String> label);
- BreakableStatement* LookupBreakTarget(Handle<String> label, bool* ok);
- IterationStatement* LookupContinueTarget(Handle<String> label, bool* ok);
-
- void RegisterTargetUse(BreakTarget* target, Target* stop);
-
- // Factory methods.
-
- Statement* EmptyStatement() {
- static v8::internal::EmptyStatement empty;
- return &empty;
- }
-
- Scope* NewScope(Scope* parent, Scope::Type type, bool inside_with);
-
- Handle<String> LookupSymbol(int symbol_id);
-
- Handle<String> LookupCachedSymbol(int symbol_id);
-
- Expression* NewCall(Expression* expression,
- ZoneList<Expression*>* arguments,
- int pos) {
- return new Call(expression, arguments, pos);
- }
-
-
- // Create a number literal.
- Literal* NewNumberLiteral(double value);
-
- // Generate AST node that throw a ReferenceError with the given type.
- Expression* NewThrowReferenceError(Handle<String> type);
-
- // Generate AST node that throw a SyntaxError with the given
- // type. The first argument may be null (in the handle sense) in
- // which case no arguments are passed to the constructor.
- Expression* NewThrowSyntaxError(Handle<String> type, Handle<Object> first);
-
- // Generate AST node that throw a TypeError with the given
- // type. Both arguments must be non-null (in the handle sense).
- Expression* NewThrowTypeError(Handle<String> type,
- Handle<Object> first,
- Handle<Object> second);
-
- // Generic AST generator for throwing errors from compiled code.
- Expression* NewThrowError(Handle<String> constructor,
- Handle<String> type,
- Vector< Handle<Object> > arguments);
-
- Isolate* isolate_;
- ZoneList<Handle<String> > symbol_cache_;
-
- Handle<Script> script_;
- V8JavaScriptScanner scanner_;
-
- Scope* top_scope_;
- int with_nesting_level_;
-
- LexicalScope* lexical_scope_;
- Mode mode_;
-
- Target* target_stack_; // for break, continue statements
- bool allow_natives_syntax_;
- v8::Extension* extension_;
- bool is_pre_parsing_;
- ScriptDataImpl* pre_data_;
- FuncNameInferrer* fni_;
- bool stack_overflow_;
- // If true, the next (and immediately following) function literal is
- // preceded by a parenthesis.
- // Heuristically that means that the function will be called immediately,
- // so never lazily compile it.
- bool parenthesized_function_;
-
- friend class LexicalScope;
-};
-
-
-// Support for handling complex values (array and object literals) that
-// can be fully handled at compile time.
-class CompileTimeValue: public AllStatic {
- public:
- enum Type {
- OBJECT_LITERAL_FAST_ELEMENTS,
- OBJECT_LITERAL_SLOW_ELEMENTS,
- ARRAY_LITERAL
- };
-
- static bool IsCompileTimeValue(Expression* expression);
-
- static bool ArrayLiteralElementNeedsInitialization(Expression* value);
-
- // Get the value as a compile time value.
- static Handle<FixedArray> GetValue(Expression* expression);
-
- // Get the type of a compile time value returned by GetValue().
- static Type GetType(Handle<FixedArray> value);
-
- // Get the elements array of a compile time value returned by GetValue().
- static Handle<FixedArray> GetElements(Handle<FixedArray> value);
-
- private:
- static const int kTypeSlot = 0;
- static const int kElementsSlot = 1;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(CompileTimeValue);
-};
-
-
-// ----------------------------------------------------------------------------
-// JSON PARSING
-
-// JSON is a subset of JavaScript, as specified in, e.g., the ECMAScript 5
-// specification section 15.12.1 (and appendix A.8).
-// The grammar is given section 15.12.1.2 (and appendix A.8.2).
-class JsonParser BASE_EMBEDDED {
- public:
- // Parse JSON input as a single JSON value.
- // Returns null handle and sets exception if parsing failed.
- static Handle<Object> Parse(Handle<String> source) {
- if (source->IsExternalTwoByteString()) {
- ExternalTwoByteStringUC16CharacterStream stream(
- Handle<ExternalTwoByteString>::cast(source), 0, source->length());
- return JsonParser().ParseJson(source, &stream);
- } else {
- GenericStringUC16CharacterStream stream(source, 0, source->length());
- return JsonParser().ParseJson(source, &stream);
- }
- }
-
- private:
- JsonParser()
- : isolate_(Isolate::Current()),
- scanner_(isolate_->scanner_constants()) { }
- ~JsonParser() { }
-
- Isolate* isolate() { return isolate_; }
-
- // Parse a string containing a single JSON value.
- Handle<Object> ParseJson(Handle<String> script, UC16CharacterStream* source);
- // Parse a single JSON value from input (grammar production JSONValue).
- // A JSON value is either a (double-quoted) string literal, a number literal,
- // one of "true", "false", or "null", or an object or array literal.
- Handle<Object> ParseJsonValue();
- // Parse a JSON object literal (grammar production JSONObject).
- // An object literal is a squiggly-braced and comma separated sequence
- // (possibly empty) of key/value pairs, where the key is a JSON string
- // literal, the value is a JSON value, and the two are separated by a colon.
- // A JSON array dosn't allow numbers and identifiers as keys, like a
- // JavaScript array.
- Handle<Object> ParseJsonObject();
- // Parses a JSON array literal (grammar production JSONArray). An array
- // literal is a square-bracketed and comma separated sequence (possibly empty)
- // of JSON values.
- // A JSON array doesn't allow leaving out values from the sequence, nor does
- // it allow a terminal comma, like a JavaScript array does.
- Handle<Object> ParseJsonArray();
-
- // Mark that a parsing error has happened at the current token, and
- // return a null handle. Primarily for readability.
- Handle<Object> ReportUnexpectedToken() { return Handle<Object>::null(); }
- // Converts the currently parsed literal to a JavaScript String.
- Handle<String> GetString();
-
- Isolate* isolate_;
- JsonScanner scanner_;
- bool stack_overflow_;
-};
-} } // namespace v8::internal
-
-#endif // V8_PARSER_H_
diff --git a/src/3rdparty/v8/src/platform-cygwin.cc b/src/3rdparty/v8/src/platform-cygwin.cc
deleted file mode 100644
index d591b9d..0000000
--- a/src/3rdparty/v8/src/platform-cygwin.cc
+++ /dev/null
@@ -1,811 +0,0 @@
-// Copyright 2006-2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Platform specific code for Cygwin goes here. For the POSIX comaptible parts
-// the implementation is in platform-posix.cc.
-
-#include <errno.h>
-#include <pthread.h>
-#include <semaphore.h>
-#include <stdarg.h>
-#include <strings.h> // index
-#include <sys/time.h>
-#include <sys/mman.h> // mmap & munmap
-#include <unistd.h> // sysconf
-
-#undef MAP_TYPE
-
-#include "v8.h"
-
-#include "platform.h"
-#include "v8threads.h"
-#include "vm-state-inl.h"
-#include "win32-headers.h"
-
-namespace v8 {
-namespace internal {
-
-// 0 is never a valid thread id
-static const pthread_t kNoThread = (pthread_t) 0;
-
-
-double ceiling(double x) {
- return ceil(x);
-}
-
-
-static Mutex* limit_mutex = NULL;
-
-
-void OS::Setup() {
- // Seed the random number generator.
- // Convert the current time to a 64-bit integer first, before converting it
- // to an unsigned. Going directly can cause an overflow and the seed to be
- // set to all ones. The seed will be identical for different instances that
- // call this setup code within the same millisecond.
- uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
- srandom(static_cast<unsigned int>(seed));
- limit_mutex = CreateMutex();
-}
-
-
-uint64_t OS::CpuFeaturesImpliedByPlatform() {
- return 0; // Nothing special about Cygwin.
-}
-
-
-int OS::ActivationFrameAlignment() {
- // With gcc 4.4 the tree vectorization optimizer can generate code
- // that requires 16 byte alignment such as movdqa on x86.
- return 16;
-}
-
-
-void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
- __asm__ __volatile__("" : : : "memory");
- // An x86 store acts as a release barrier.
- *ptr = value;
-}
-
-const char* OS::LocalTimezone(double time) {
- if (isnan(time)) return "";
- time_t tv = static_cast<time_t>(floor(time/msPerSecond));
- struct tm* t = localtime(&tv);
- if (NULL == t) return "";
- return tzname[0]; // The location of the timezone string on Cygwin.
-}
-
-
-double OS::LocalTimeOffset() {
- // On Cygwin, struct tm does not contain a tm_gmtoff field.
- time_t utc = time(NULL);
- ASSERT(utc != -1);
- struct tm* loc = localtime(&utc);
- ASSERT(loc != NULL);
- // time - localtime includes any daylight savings offset, so subtract it.
- return static_cast<double>((mktime(loc) - utc) * msPerSecond -
- (loc->tm_isdst > 0 ? 3600 * msPerSecond : 0));
-}
-
-
-// We keep the lowest and highest addresses mapped as a quick way of
-// determining that pointers are outside the heap (used mostly in assertions
-// and verification). The estimate is conservative, ie, not all addresses in
-// 'allocated' space are actually allocated to our heap. The range is
-// [lowest, highest), inclusive on the low and and exclusive on the high end.
-static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
-static void* highest_ever_allocated = reinterpret_cast<void*>(0);
-
-
-static void UpdateAllocatedSpaceLimits(void* address, int size) {
- ASSERT(limit_mutex != NULL);
- ScopedLock lock(limit_mutex);
-
- lowest_ever_allocated = Min(lowest_ever_allocated, address);
- highest_ever_allocated =
- Max(highest_ever_allocated,
- reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
-}
-
-
-bool OS::IsOutsideAllocatedSpace(void* address) {
- return address < lowest_ever_allocated || address >= highest_ever_allocated;
-}
-
-
-size_t OS::AllocateAlignment() {
- return sysconf(_SC_PAGESIZE);
-}
-
-
-void* OS::Allocate(const size_t requested,
- size_t* allocated,
- bool is_executable) {
- const size_t msize = RoundUp(requested, sysconf(_SC_PAGESIZE));
- int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
- void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
- if (mbase == MAP_FAILED) {
- LOG(ISOLATE, StringEvent("OS::Allocate", "mmap failed"));
- return NULL;
- }
- *allocated = msize;
- UpdateAllocatedSpaceLimits(mbase, msize);
- return mbase;
-}
-
-
-void OS::Free(void* address, const size_t size) {
- // TODO(1240712): munmap has a return value which is ignored here.
- int result = munmap(address, size);
- USE(result);
- ASSERT(result == 0);
-}
-
-
-#ifdef ENABLE_HEAP_PROTECTION
-
-void OS::Protect(void* address, size_t size) {
- // TODO(1240712): mprotect has a return value which is ignored here.
- mprotect(address, size, PROT_READ);
-}
-
-
-void OS::Unprotect(void* address, size_t size, bool is_executable) {
- // TODO(1240712): mprotect has a return value which is ignored here.
- int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
- mprotect(address, size, prot);
-}
-
-#endif
-
-
-void OS::Sleep(int milliseconds) {
- unsigned int ms = static_cast<unsigned int>(milliseconds);
- usleep(1000 * ms);
-}
-
-
-void OS::Abort() {
- // Redirect to std abort to signal abnormal program termination.
- abort();
-}
-
-
-void OS::DebugBreak() {
- asm("int $3");
-}
-
-
-class PosixMemoryMappedFile : public OS::MemoryMappedFile {
- public:
- PosixMemoryMappedFile(FILE* file, void* memory, int size)
- : file_(file), memory_(memory), size_(size) { }
- virtual ~PosixMemoryMappedFile();
- virtual void* memory() { return memory_; }
- virtual int size() { return size_; }
- private:
- FILE* file_;
- void* memory_;
- int size_;
-};
-
-
-OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
- FILE* file = fopen(name, "r+");
- if (file == NULL) return NULL;
-
- fseek(file, 0, SEEK_END);
- int size = ftell(file);
-
- void* memory =
- mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
- return new PosixMemoryMappedFile(file, memory, size);
-}
-
-
-OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
- void* initial) {
- FILE* file = fopen(name, "w+");
- if (file == NULL) return NULL;
- int result = fwrite(initial, size, 1, file);
- if (result < 1) {
- fclose(file);
- return NULL;
- }
- void* memory =
- mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
- return new PosixMemoryMappedFile(file, memory, size);
-}
-
-
-PosixMemoryMappedFile::~PosixMemoryMappedFile() {
- if (memory_) munmap(memory_, size_);
- fclose(file_);
-}
-
-
-void OS::LogSharedLibraryAddresses() {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- // This function assumes that the layout of the file is as follows:
- // hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name]
- // If we encounter an unexpected situation we abort scanning further entries.
- FILE* fp = fopen("/proc/self/maps", "r");
- if (fp == NULL) return;
-
- // Allocate enough room to be able to store a full file name.
- const int kLibNameLen = FILENAME_MAX + 1;
- char* lib_name = reinterpret_cast<char*>(malloc(kLibNameLen));
-
- i::Isolate* isolate = ISOLATE;
- // This loop will terminate once the scanning hits an EOF.
- while (true) {
- uintptr_t start, end;
- char attr_r, attr_w, attr_x, attr_p;
- // Parse the addresses and permission bits at the beginning of the line.
- if (fscanf(fp, "%" V8PRIxPTR "-%" V8PRIxPTR, &start, &end) != 2) break;
- if (fscanf(fp, " %c%c%c%c", &attr_r, &attr_w, &attr_x, &attr_p) != 4) break;
-
- int c;
- if (attr_r == 'r' && attr_w != 'w' && attr_x == 'x') {
- // Found a read-only executable entry. Skip characters until we reach
- // the beginning of the filename or the end of the line.
- do {
- c = getc(fp);
- } while ((c != EOF) && (c != '\n') && (c != '/'));
- if (c == EOF) break; // EOF: Was unexpected, just exit.
-
- // Process the filename if found.
- if (c == '/') {
- ungetc(c, fp); // Push the '/' back into the stream to be read below.
-
- // Read to the end of the line. Exit if the read fails.
- if (fgets(lib_name, kLibNameLen, fp) == NULL) break;
-
- // Drop the newline character read by fgets. We do not need to check
- // for a zero-length string because we know that we at least read the
- // '/' character.
- lib_name[strlen(lib_name) - 1] = '\0';
- } else {
- // No library name found, just record the raw address range.
- snprintf(lib_name, kLibNameLen,
- "%08" V8PRIxPTR "-%08" V8PRIxPTR, start, end);
- }
- LOG(isolate, SharedLibraryEvent(lib_name, start, end));
- } else {
- // Entry not describing executable data. Skip to end of line to setup
- // reading the next entry.
- do {
- c = getc(fp);
- } while ((c != EOF) && (c != '\n'));
- if (c == EOF) break;
- }
- }
- free(lib_name);
- fclose(fp);
-#endif
-}
-
-
-void OS::SignalCodeMovingGC() {
- // Nothing to do on Cygwin.
-}
-
-
-int OS::StackWalk(Vector<OS::StackFrame> frames) {
- // Not supported on Cygwin.
- return 0;
-}
-
-
-// The VirtualMemory implementation is taken from platform-win32.cc.
-// The mmap-based virtual memory implementation as it is used on most posix
-// platforms does not work well because Cygwin does not support MAP_FIXED.
-// This causes VirtualMemory::Commit to not always commit the memory region
-// specified.
-
-bool VirtualMemory::IsReserved() {
- return address_ != NULL;
-}
-
-
-VirtualMemory::VirtualMemory(size_t size) {
- address_ = VirtualAlloc(NULL, size, MEM_RESERVE, PAGE_NOACCESS);
- size_ = size;
-}
-
-
-VirtualMemory::~VirtualMemory() {
- if (IsReserved()) {
- if (0 == VirtualFree(address(), 0, MEM_RELEASE)) address_ = NULL;
- }
-}
-
-
-bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
- int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
- if (NULL == VirtualAlloc(address, size, MEM_COMMIT, prot)) {
- return false;
- }
-
- UpdateAllocatedSpaceLimits(address, static_cast<int>(size));
- return true;
-}
-
-
-bool VirtualMemory::Uncommit(void* address, size_t size) {
- ASSERT(IsReserved());
- return VirtualFree(address, size, MEM_DECOMMIT) != false;
-}
-
-
-class ThreadHandle::PlatformData : public Malloced {
- public:
- explicit PlatformData(ThreadHandle::Kind kind) {
- Initialize(kind);
- }
-
- void Initialize(ThreadHandle::Kind kind) {
- switch (kind) {
- case ThreadHandle::SELF: thread_ = pthread_self(); break;
- case ThreadHandle::INVALID: thread_ = kNoThread; break;
- }
- }
-
- pthread_t thread_; // Thread handle for pthread.
-};
-
-
-ThreadHandle::ThreadHandle(Kind kind) {
- data_ = new PlatformData(kind);
-}
-
-
-void ThreadHandle::Initialize(ThreadHandle::Kind kind) {
- data_->Initialize(kind);
-}
-
-
-ThreadHandle::~ThreadHandle() {
- delete data_;
-}
-
-
-bool ThreadHandle::IsSelf() const {
- return pthread_equal(data_->thread_, pthread_self());
-}
-
-
-bool ThreadHandle::IsValid() const {
- return data_->thread_ != kNoThread;
-}
-
-
-Thread::Thread(Isolate* isolate, const Options& options)
- : ThreadHandle(ThreadHandle::INVALID),
- isolate_(isolate),
- stack_size_(options.stack_size) {
- set_name(options.name);
-}
-
-
-Thread::Thread(Isolate* isolate, const char* name)
- : ThreadHandle(ThreadHandle::INVALID),
- isolate_(isolate),
- stack_size_(0) {
- set_name(name);
-}
-
-
-Thread::~Thread() {
-}
-
-
-static void* ThreadEntry(void* arg) {
- Thread* thread = reinterpret_cast<Thread*>(arg);
- // This is also initialized by the first argument to pthread_create() but we
- // don't know which thread will run first (the original thread or the new
- // one) so we initialize it here too.
- thread->thread_handle_data()->thread_ = pthread_self();
- ASSERT(thread->IsValid());
- Thread::SetThreadLocal(Isolate::isolate_key(), thread->isolate());
- thread->Run();
- return NULL;
-}
-
-
-void Thread::set_name(const char* name) {
- strncpy(name_, name, sizeof(name_));
- name_[sizeof(name_) - 1] = '\0';
-}
-
-
-void Thread::Start() {
- pthread_attr_t* attr_ptr = NULL;
- pthread_attr_t attr;
- if (stack_size_ > 0) {
- pthread_attr_init(&attr);
- pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_));
- attr_ptr = &attr;
- }
- pthread_create(&thread_handle_data()->thread_, attr_ptr, ThreadEntry, this);
- ASSERT(IsValid());
-}
-
-
-void Thread::Join() {
- pthread_join(thread_handle_data()->thread_, NULL);
-}
-
-
-static inline Thread::LocalStorageKey PthreadKeyToLocalKey(
- pthread_key_t pthread_key) {
- // We need to cast pthread_key_t to Thread::LocalStorageKey in two steps
- // because pthread_key_t is a pointer type on Cygwin. This will probably not
- // work on 64-bit platforms, but Cygwin doesn't support 64-bit anyway.
- STATIC_ASSERT(sizeof(Thread::LocalStorageKey) == sizeof(pthread_key_t));
- intptr_t ptr_key = reinterpret_cast<intptr_t>(pthread_key);
- return static_cast<Thread::LocalStorageKey>(ptr_key);
-}
-
-
-static inline pthread_key_t LocalKeyToPthreadKey(
- Thread::LocalStorageKey local_key) {
- STATIC_ASSERT(sizeof(Thread::LocalStorageKey) == sizeof(pthread_key_t));
- intptr_t ptr_key = static_cast<intptr_t>(local_key);
- return reinterpret_cast<pthread_key_t>(ptr_key);
-}
-
-
-Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
- pthread_key_t key;
- int result = pthread_key_create(&key, NULL);
- USE(result);
- ASSERT(result == 0);
- return PthreadKeyToLocalKey(key);
-}
-
-
-void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
- pthread_key_t pthread_key = LocalKeyToPthreadKey(key);
- int result = pthread_key_delete(pthread_key);
- USE(result);
- ASSERT(result == 0);
-}
-
-
-void* Thread::GetThreadLocal(LocalStorageKey key) {
- pthread_key_t pthread_key = LocalKeyToPthreadKey(key);
- return pthread_getspecific(pthread_key);
-}
-
-
-void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
- pthread_key_t pthread_key = LocalKeyToPthreadKey(key);
- pthread_setspecific(pthread_key, value);
-}
-
-
-void Thread::YieldCPU() {
- sched_yield();
-}
-
-
-class CygwinMutex : public Mutex {
- public:
-
- CygwinMutex() {
- pthread_mutexattr_t attrs;
- memset(&attrs, 0, sizeof(attrs));
-
- int result = pthread_mutexattr_init(&attrs);
- ASSERT(result == 0);
- result = pthread_mutexattr_settype(&attrs, PTHREAD_MUTEX_RECURSIVE);
- ASSERT(result == 0);
- result = pthread_mutex_init(&mutex_, &attrs);
- ASSERT(result == 0);
- }
-
- virtual ~CygwinMutex() { pthread_mutex_destroy(&mutex_); }
-
- virtual int Lock() {
- int result = pthread_mutex_lock(&mutex_);
- return result;
- }
-
- virtual int Unlock() {
- int result = pthread_mutex_unlock(&mutex_);
- return result;
- }
-
- virtual bool TryLock() {
- int result = pthread_mutex_trylock(&mutex_);
- // Return false if the lock is busy and locking failed.
- if (result == EBUSY) {
- return false;
- }
- ASSERT(result == 0); // Verify no other errors.
- return true;
- }
-
- private:
- pthread_mutex_t mutex_; // Pthread mutex for POSIX platforms.
-};
-
-
-Mutex* OS::CreateMutex() {
- return new CygwinMutex();
-}
-
-
-class CygwinSemaphore : public Semaphore {
- public:
- explicit CygwinSemaphore(int count) { sem_init(&sem_, 0, count); }
- virtual ~CygwinSemaphore() { sem_destroy(&sem_); }
-
- virtual void Wait();
- virtual bool Wait(int timeout);
- virtual void Signal() { sem_post(&sem_); }
- private:
- sem_t sem_;
-};
-
-
-void CygwinSemaphore::Wait() {
- while (true) {
- int result = sem_wait(&sem_);
- if (result == 0) return; // Successfully got semaphore.
- CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
- }
-}
-
-
-#ifndef TIMEVAL_TO_TIMESPEC
-#define TIMEVAL_TO_TIMESPEC(tv, ts) do { \
- (ts)->tv_sec = (tv)->tv_sec; \
- (ts)->tv_nsec = (tv)->tv_usec * 1000; \
-} while (false)
-#endif
-
-
-bool CygwinSemaphore::Wait(int timeout) {
- const long kOneSecondMicros = 1000000; // NOLINT
-
- // Split timeout into second and nanosecond parts.
- struct timeval delta;
- delta.tv_usec = timeout % kOneSecondMicros;
- delta.tv_sec = timeout / kOneSecondMicros;
-
- struct timeval current_time;
- // Get the current time.
- if (gettimeofday(&current_time, NULL) == -1) {
- return false;
- }
-
- // Calculate time for end of timeout.
- struct timeval end_time;
- timeradd(&current_time, &delta, &end_time);
-
- struct timespec ts;
- TIMEVAL_TO_TIMESPEC(&end_time, &ts);
- // Wait for semaphore signalled or timeout.
- while (true) {
- int result = sem_timedwait(&sem_, &ts);
- if (result == 0) return true; // Successfully got semaphore.
- if (result == -1 && errno == ETIMEDOUT) return false; // Timeout.
- CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
- }
-}
-
-
-Semaphore* OS::CreateSemaphore(int count) {
- return new CygwinSemaphore(count);
-}
-
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
-
-// ----------------------------------------------------------------------------
-// Cygwin profiler support.
-//
-// On Cygwin we use the same sampler implementation as on win32.
-
-class Sampler::PlatformData : public Malloced {
- public:
- // Get a handle to the calling thread. This is the thread that we are
- // going to profile. We need to make a copy of the handle because we are
- // going to use it in the sampler thread. Using GetThreadHandle() will
- // not work in this case. We're using OpenThread because DuplicateHandle
- // for some reason doesn't work in Chrome's sandbox.
- PlatformData() : profiled_thread_(OpenThread(THREAD_GET_CONTEXT |
- THREAD_SUSPEND_RESUME |
- THREAD_QUERY_INFORMATION,
- false,
- GetCurrentThreadId())) {}
-
- ~PlatformData() {
- if (profiled_thread_ != NULL) {
- CloseHandle(profiled_thread_);
- profiled_thread_ = NULL;
- }
- }
-
- HANDLE profiled_thread() { return profiled_thread_; }
-
- private:
- HANDLE profiled_thread_;
-};
-
-
-class SamplerThread : public Thread {
- public:
- explicit SamplerThread(int interval)
- : Thread(NULL, "SamplerThread"),
- interval_(interval) {}
-
- static void AddActiveSampler(Sampler* sampler) {
- ScopedLock lock(mutex_);
- SamplerRegistry::AddActiveSampler(sampler);
- if (instance_ == NULL) {
- instance_ = new SamplerThread(sampler->interval());
- instance_->Start();
- } else {
- ASSERT(instance_->interval_ == sampler->interval());
- }
- }
-
- static void RemoveActiveSampler(Sampler* sampler) {
- ScopedLock lock(mutex_);
- SamplerRegistry::RemoveActiveSampler(sampler);
- if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
- RuntimeProfiler::WakeUpRuntimeProfilerThreadBeforeShutdown();
- instance_->Join();
- delete instance_;
- instance_ = NULL;
- }
- }
-
- // Implement Thread::Run().
- virtual void Run() {
- SamplerRegistry::State state;
- while ((state = SamplerRegistry::GetState()) !=
- SamplerRegistry::HAS_NO_SAMPLERS) {
- bool cpu_profiling_enabled =
- (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS);
- bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled();
- // When CPU profiling is enabled both JavaScript and C++ code is
- // profiled. We must not suspend.
- if (!cpu_profiling_enabled) {
- if (rate_limiter_.SuspendIfNecessary()) continue;
- }
- if (cpu_profiling_enabled) {
- if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this)) {
- return;
- }
- }
- if (runtime_profiler_enabled) {
- if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile, NULL)) {
- return;
- }
- }
- OS::Sleep(interval_);
- }
- }
-
- static void DoCpuProfile(Sampler* sampler, void* raw_sampler_thread) {
- if (!sampler->isolate()->IsInitialized()) return;
- if (!sampler->IsProfiling()) return;
- SamplerThread* sampler_thread =
- reinterpret_cast<SamplerThread*>(raw_sampler_thread);
- sampler_thread->SampleContext(sampler);
- }
-
- static void DoRuntimeProfile(Sampler* sampler, void* ignored) {
- if (!sampler->isolate()->IsInitialized()) return;
- sampler->isolate()->runtime_profiler()->NotifyTick();
- }
-
- void SampleContext(Sampler* sampler) {
- HANDLE profiled_thread = sampler->platform_data()->profiled_thread();
- if (profiled_thread == NULL) return;
-
- // Context used for sampling the register state of the profiled thread.
- CONTEXT context;
- memset(&context, 0, sizeof(context));
-
- TickSample sample_obj;
- TickSample* sample = CpuProfiler::TickSampleEvent(sampler->isolate());
- if (sample == NULL) sample = &sample_obj;
-
- static const DWORD kSuspendFailed = static_cast<DWORD>(-1);
- if (SuspendThread(profiled_thread) == kSuspendFailed) return;
- sample->state = sampler->isolate()->current_vm_state();
-
- context.ContextFlags = CONTEXT_FULL;
- if (GetThreadContext(profiled_thread, &context) != 0) {
-#if V8_HOST_ARCH_X64
- sample->pc = reinterpret_cast<Address>(context.Rip);
- sample->sp = reinterpret_cast<Address>(context.Rsp);
- sample->fp = reinterpret_cast<Address>(context.Rbp);
-#else
- sample->pc = reinterpret_cast<Address>(context.Eip);
- sample->sp = reinterpret_cast<Address>(context.Esp);
- sample->fp = reinterpret_cast<Address>(context.Ebp);
-#endif
- sampler->SampleStack(sample);
- sampler->Tick(sample);
- }
- ResumeThread(profiled_thread);
- }
-
- const int interval_;
- RuntimeProfilerRateLimiter rate_limiter_;
-
- // Protects the process wide state below.
- static Mutex* mutex_;
- static SamplerThread* instance_;
-
- DISALLOW_COPY_AND_ASSIGN(SamplerThread);
-};
-
-
-Mutex* SamplerThread::mutex_ = OS::CreateMutex();
-SamplerThread* SamplerThread::instance_ = NULL;
-
-
-Sampler::Sampler(Isolate* isolate, int interval)
- : isolate_(isolate),
- interval_(interval),
- profiling_(false),
- active_(false),
- samples_taken_(0) {
- data_ = new PlatformData;
-}
-
-
-Sampler::~Sampler() {
- ASSERT(!IsActive());
- delete data_;
-}
-
-
-void Sampler::Start() {
- ASSERT(!IsActive());
- SetActive(true);
- SamplerThread::AddActiveSampler(this);
-}
-
-
-void Sampler::Stop() {
- ASSERT(IsActive());
- SamplerThread::RemoveActiveSampler(this);
- SetActive(false);
-}
-
-#endif // ENABLE_LOGGING_AND_PROFILING
-
-} } // namespace v8::internal
-
diff --git a/src/3rdparty/v8/src/platform-freebsd.cc b/src/3rdparty/v8/src/platform-freebsd.cc
deleted file mode 100644
index 2a73b6e..0000000
--- a/src/3rdparty/v8/src/platform-freebsd.cc
+++ /dev/null
@@ -1,854 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Platform specific code for FreeBSD goes here. For the POSIX comaptible parts
-// the implementation is in platform-posix.cc.
-
-#include <pthread.h>
-#include <semaphore.h>
-#include <signal.h>
-#include <sys/time.h>
-#include <sys/resource.h>
-#include <sys/types.h>
-#include <sys/ucontext.h>
-#include <stdlib.h>
-
-#include <sys/types.h> // mmap & munmap
-#include <sys/mman.h> // mmap & munmap
-#include <sys/stat.h> // open
-#include <sys/fcntl.h> // open
-#include <unistd.h> // getpagesize
-// If you don't have execinfo.h then you need devel/libexecinfo from ports.
-#include <execinfo.h> // backtrace, backtrace_symbols
-#include <strings.h> // index
-#include <errno.h>
-#include <stdarg.h>
-#include <limits.h>
-
-#undef MAP_TYPE
-
-#include "v8.h"
-
-#include "platform.h"
-#include "vm-state-inl.h"
-
-
-namespace v8 {
-namespace internal {
-
-// 0 is never a valid thread id on FreeBSD since tids and pids share a
-// name space and pid 0 is used to kill the group (see man 2 kill).
-static const pthread_t kNoThread = (pthread_t) 0;
-
-
-double ceiling(double x) {
- // Correct as on OS X
- if (-1.0 < x && x < 0.0) {
- return -0.0;
- } else {
- return ceil(x);
- }
-}
-
-
-static Mutex* limit_mutex = NULL;
-
-
-void OS::Setup() {
- // Seed the random number generator.
- // Convert the current time to a 64-bit integer first, before converting it
- // to an unsigned. Going directly can cause an overflow and the seed to be
- // set to all ones. The seed will be identical for different instances that
- // call this setup code within the same millisecond.
- uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
- srandom(static_cast<unsigned int>(seed));
- limit_mutex = CreateMutex();
-}
-
-
-void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
- __asm__ __volatile__("" : : : "memory");
- *ptr = value;
-}
-
-
-uint64_t OS::CpuFeaturesImpliedByPlatform() {
- return 0; // FreeBSD runs on anything.
-}
-
-
-int OS::ActivationFrameAlignment() {
- // 16 byte alignment on FreeBSD
- return 16;
-}
-
-
-const char* OS::LocalTimezone(double time) {
- if (isnan(time)) return "";
- time_t tv = static_cast<time_t>(floor(time/msPerSecond));
- struct tm* t = localtime(&tv);
- if (NULL == t) return "";
- return t->tm_zone;
-}
-
-
-double OS::LocalTimeOffset() {
- time_t tv = time(NULL);
- struct tm* t = localtime(&tv);
- // tm_gmtoff includes any daylight savings offset, so subtract it.
- return static_cast<double>(t->tm_gmtoff * msPerSecond -
- (t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
-}
-
-
-// We keep the lowest and highest addresses mapped as a quick way of
-// determining that pointers are outside the heap (used mostly in assertions
-// and verification). The estimate is conservative, ie, not all addresses in
-// 'allocated' space are actually allocated to our heap. The range is
-// [lowest, highest), inclusive on the low and and exclusive on the high end.
-static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
-static void* highest_ever_allocated = reinterpret_cast<void*>(0);
-
-
-static void UpdateAllocatedSpaceLimits(void* address, int size) {
- ASSERT(limit_mutex != NULL);
- ScopedLock lock(limit_mutex);
-
- lowest_ever_allocated = Min(lowest_ever_allocated, address);
- highest_ever_allocated =
- Max(highest_ever_allocated,
- reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
-}
-
-
-bool OS::IsOutsideAllocatedSpace(void* address) {
- return address < lowest_ever_allocated || address >= highest_ever_allocated;
-}
-
-
-size_t OS::AllocateAlignment() {
- return getpagesize();
-}
-
-
-void* OS::Allocate(const size_t requested,
- size_t* allocated,
- bool executable) {
- const size_t msize = RoundUp(requested, getpagesize());
- int prot = PROT_READ | PROT_WRITE | (executable ? PROT_EXEC : 0);
- void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
-
- if (mbase == MAP_FAILED) {
- LOG(ISOLATE, StringEvent("OS::Allocate", "mmap failed"));
- return NULL;
- }
- *allocated = msize;
- UpdateAllocatedSpaceLimits(mbase, msize);
- return mbase;
-}
-
-
-void OS::Free(void* buf, const size_t length) {
- // TODO(1240712): munmap has a return value which is ignored here.
- int result = munmap(buf, length);
- USE(result);
- ASSERT(result == 0);
-}
-
-
-#ifdef ENABLE_HEAP_PROTECTION
-
-void OS::Protect(void* address, size_t size) {
- UNIMPLEMENTED();
-}
-
-
-void OS::Unprotect(void* address, size_t size, bool is_executable) {
- UNIMPLEMENTED();
-}
-
-#endif
-
-
-void OS::Sleep(int milliseconds) {
- unsigned int ms = static_cast<unsigned int>(milliseconds);
- usleep(1000 * ms);
-}
-
-
-void OS::Abort() {
- // Redirect to std abort to signal abnormal program termination.
- abort();
-}
-
-
-void OS::DebugBreak() {
-#if (defined(__arm__) || defined(__thumb__))
-# if defined(CAN_USE_ARMV5_INSTRUCTIONS)
- asm("bkpt 0");
-# endif
-#else
- asm("int $3");
-#endif
-}
-
-
-class PosixMemoryMappedFile : public OS::MemoryMappedFile {
- public:
- PosixMemoryMappedFile(FILE* file, void* memory, int size)
- : file_(file), memory_(memory), size_(size) { }
- virtual ~PosixMemoryMappedFile();
- virtual void* memory() { return memory_; }
- virtual int size() { return size_; }
- private:
- FILE* file_;
- void* memory_;
- int size_;
-};
-
-
-OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
- FILE* file = fopen(name, "r+");
- if (file == NULL) return NULL;
-
- fseek(file, 0, SEEK_END);
- int size = ftell(file);
-
- void* memory =
- mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
- return new PosixMemoryMappedFile(file, memory, size);
-}
-
-
-OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
- void* initial) {
- FILE* file = fopen(name, "w+");
- if (file == NULL) return NULL;
- int result = fwrite(initial, size, 1, file);
- if (result < 1) {
- fclose(file);
- return NULL;
- }
- void* memory =
- mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
- return new PosixMemoryMappedFile(file, memory, size);
-}
-
-
-PosixMemoryMappedFile::~PosixMemoryMappedFile() {
- if (memory_) munmap(memory_, size_);
- fclose(file_);
-}
-
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
-static unsigned StringToLong(char* buffer) {
- return static_cast<unsigned>(strtol(buffer, NULL, 16)); // NOLINT
-}
-#endif
-
-
-void OS::LogSharedLibraryAddresses() {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- static const int MAP_LENGTH = 1024;
- int fd = open("/proc/self/maps", O_RDONLY);
- if (fd < 0) return;
- while (true) {
- char addr_buffer[11];
- addr_buffer[0] = '0';
- addr_buffer[1] = 'x';
- addr_buffer[10] = 0;
- int result = read(fd, addr_buffer + 2, 8);
- if (result < 8) break;
- unsigned start = StringToLong(addr_buffer);
- result = read(fd, addr_buffer + 2, 1);
- if (result < 1) break;
- if (addr_buffer[2] != '-') break;
- result = read(fd, addr_buffer + 2, 8);
- if (result < 8) break;
- unsigned end = StringToLong(addr_buffer);
- char buffer[MAP_LENGTH];
- int bytes_read = -1;
- do {
- bytes_read++;
- if (bytes_read >= MAP_LENGTH - 1)
- break;
- result = read(fd, buffer + bytes_read, 1);
- if (result < 1) break;
- } while (buffer[bytes_read] != '\n');
- buffer[bytes_read] = 0;
- // Ignore mappings that are not executable.
- if (buffer[3] != 'x') continue;
- char* start_of_path = index(buffer, '/');
- // There may be no filename in this line. Skip to next.
- if (start_of_path == NULL) continue;
- buffer[bytes_read] = 0;
- LOG(i::Isolate::Current(), SharedLibraryEvent(start_of_path, start, end));
- }
- close(fd);
-#endif
-}
-
-
-void OS::SignalCodeMovingGC() {
-}
-
-
-int OS::StackWalk(Vector<OS::StackFrame> frames) {
- int frames_size = frames.length();
- ScopedVector<void*> addresses(frames_size);
-
- int frames_count = backtrace(addresses.start(), frames_size);
-
- char** symbols = backtrace_symbols(addresses.start(), frames_count);
- if (symbols == NULL) {
- return kStackWalkError;
- }
-
- for (int i = 0; i < frames_count; i++) {
- frames[i].address = addresses[i];
- // Format a text representation of the frame based on the information
- // available.
- SNPrintF(MutableCStrVector(frames[i].text, kStackWalkMaxTextLen),
- "%s",
- symbols[i]);
- // Make sure line termination is in place.
- frames[i].text[kStackWalkMaxTextLen - 1] = '\0';
- }
-
- free(symbols);
-
- return frames_count;
-}
-
-
-// Constants used for mmap.
-static const int kMmapFd = -1;
-static const int kMmapFdOffset = 0;
-
-
-VirtualMemory::VirtualMemory(size_t size) {
- address_ = mmap(NULL, size, PROT_NONE,
- MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
- kMmapFd, kMmapFdOffset);
- size_ = size;
-}
-
-
-VirtualMemory::~VirtualMemory() {
- if (IsReserved()) {
- if (0 == munmap(address(), size())) address_ = MAP_FAILED;
- }
-}
-
-
-bool VirtualMemory::IsReserved() {
- return address_ != MAP_FAILED;
-}
-
-
-bool VirtualMemory::Commit(void* address, size_t size, bool executable) {
- int prot = PROT_READ | PROT_WRITE | (executable ? PROT_EXEC : 0);
- if (MAP_FAILED == mmap(address, size, prot,
- MAP_PRIVATE | MAP_ANON | MAP_FIXED,
- kMmapFd, kMmapFdOffset)) {
- return false;
- }
-
- UpdateAllocatedSpaceLimits(address, size);
- return true;
-}
-
-
-bool VirtualMemory::Uncommit(void* address, size_t size) {
- return mmap(address, size, PROT_NONE,
- MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED,
- kMmapFd, kMmapFdOffset) != MAP_FAILED;
-}
-
-
-class ThreadHandle::PlatformData : public Malloced {
- public:
- explicit PlatformData(ThreadHandle::Kind kind) {
- Initialize(kind);
- }
-
- void Initialize(ThreadHandle::Kind kind) {
- switch (kind) {
- case ThreadHandle::SELF: thread_ = pthread_self(); break;
- case ThreadHandle::INVALID: thread_ = kNoThread; break;
- }
- }
- pthread_t thread_; // Thread handle for pthread.
-};
-
-
-ThreadHandle::ThreadHandle(Kind kind) {
- data_ = new PlatformData(kind);
-}
-
-
-void ThreadHandle::Initialize(ThreadHandle::Kind kind) {
- data_->Initialize(kind);
-}
-
-
-ThreadHandle::~ThreadHandle() {
- delete data_;
-}
-
-
-bool ThreadHandle::IsSelf() const {
- return pthread_equal(data_->thread_, pthread_self());
-}
-
-
-bool ThreadHandle::IsValid() const {
- return data_->thread_ != kNoThread;
-}
-
-
-Thread::Thread(Isolate* isolate, const Options& options)
- : ThreadHandle(ThreadHandle::INVALID),
- isolate_(isolate),
- stack_size_(options.stack_size) {
- set_name(options.name);
-}
-
-
-Thread::Thread(Isolate* isolate, const char* name)
- : ThreadHandle(ThreadHandle::INVALID),
- isolate_(isolate),
- stack_size_(0) {
- set_name(name);
-}
-
-
-Thread::~Thread() {
-}
-
-
-static void* ThreadEntry(void* arg) {
- Thread* thread = reinterpret_cast<Thread*>(arg);
- // This is also initialized by the first argument to pthread_create() but we
- // don't know which thread will run first (the original thread or the new
- // one) so we initialize it here too.
- thread->thread_handle_data()->thread_ = pthread_self();
- ASSERT(thread->IsValid());
- Thread::SetThreadLocal(Isolate::isolate_key(), thread->isolate());
- thread->Run();
- return NULL;
-}
-
-
-void Thread::set_name(const char* name) {
- strncpy(name_, name, sizeof(name_));
- name_[sizeof(name_) - 1] = '\0';
-}
-
-
-void Thread::Start() {
- pthread_attr_t* attr_ptr = NULL;
- pthread_attr_t attr;
- if (stack_size_ > 0) {
- pthread_attr_init(&attr);
- pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_));
- attr_ptr = &attr;
- }
- pthread_create(&thread_handle_data()->thread_, attr_ptr, ThreadEntry, this);
- ASSERT(IsValid());
-}
-
-
-void Thread::Join() {
- pthread_join(thread_handle_data()->thread_, NULL);
-}
-
-
-Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
- pthread_key_t key;
- int result = pthread_key_create(&key, NULL);
- USE(result);
- ASSERT(result == 0);
- return static_cast<LocalStorageKey>(key);
-}
-
-
-void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
- pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
- int result = pthread_key_delete(pthread_key);
- USE(result);
- ASSERT(result == 0);
-}
-
-
-void* Thread::GetThreadLocal(LocalStorageKey key) {
- pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
- return pthread_getspecific(pthread_key);
-}
-
-
-void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
- pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
- pthread_setspecific(pthread_key, value);
-}
-
-
-void Thread::YieldCPU() {
- sched_yield();
-}
-
-
-class FreeBSDMutex : public Mutex {
- public:
-
- FreeBSDMutex() {
- pthread_mutexattr_t attrs;
- int result = pthread_mutexattr_init(&attrs);
- ASSERT(result == 0);
- result = pthread_mutexattr_settype(&attrs, PTHREAD_MUTEX_RECURSIVE);
- ASSERT(result == 0);
- result = pthread_mutex_init(&mutex_, &attrs);
- ASSERT(result == 0);
- }
-
- virtual ~FreeBSDMutex() { pthread_mutex_destroy(&mutex_); }
-
- virtual int Lock() {
- int result = pthread_mutex_lock(&mutex_);
- return result;
- }
-
- virtual int Unlock() {
- int result = pthread_mutex_unlock(&mutex_);
- return result;
- }
-
- virtual bool TryLock() {
- int result = pthread_mutex_trylock(&mutex_);
- // Return false if the lock is busy and locking failed.
- if (result == EBUSY) {
- return false;
- }
- ASSERT(result == 0); // Verify no other errors.
- return true;
- }
-
- private:
- pthread_mutex_t mutex_; // Pthread mutex for POSIX platforms.
-};
-
-
-Mutex* OS::CreateMutex() {
- return new FreeBSDMutex();
-}
-
-
-class FreeBSDSemaphore : public Semaphore {
- public:
- explicit FreeBSDSemaphore(int count) { sem_init(&sem_, 0, count); }
- virtual ~FreeBSDSemaphore() { sem_destroy(&sem_); }
-
- virtual void Wait();
- virtual bool Wait(int timeout);
- virtual void Signal() { sem_post(&sem_); }
- private:
- sem_t sem_;
-};
-
-
-void FreeBSDSemaphore::Wait() {
- while (true) {
- int result = sem_wait(&sem_);
- if (result == 0) return; // Successfully got semaphore.
- CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
- }
-}
-
-
-bool FreeBSDSemaphore::Wait(int timeout) {
- const long kOneSecondMicros = 1000000; // NOLINT
-
- // Split timeout into second and nanosecond parts.
- struct timeval delta;
- delta.tv_usec = timeout % kOneSecondMicros;
- delta.tv_sec = timeout / kOneSecondMicros;
-
- struct timeval current_time;
- // Get the current time.
- if (gettimeofday(&current_time, NULL) == -1) {
- return false;
- }
-
- // Calculate time for end of timeout.
- struct timeval end_time;
- timeradd(&current_time, &delta, &end_time);
-
- struct timespec ts;
- TIMEVAL_TO_TIMESPEC(&end_time, &ts);
- while (true) {
- int result = sem_timedwait(&sem_, &ts);
- if (result == 0) return true; // Successfully got semaphore.
- if (result == -1 && errno == ETIMEDOUT) return false; // Timeout.
- CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
- }
-}
-
-
-Semaphore* OS::CreateSemaphore(int count) {
- return new FreeBSDSemaphore(count);
-}
-
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
-
-static pthread_t GetThreadID() {
- pthread_t thread_id = pthread_self();
- return thread_id;
-}
-
-
-class Sampler::PlatformData : public Malloced {
- public:
- PlatformData() : vm_tid_(GetThreadID()) {}
-
- pthread_t vm_tid() const { return vm_tid_; }
-
- private:
- pthread_t vm_tid_;
-};
-
-
-static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
- USE(info);
- if (signal != SIGPROF) return;
- Isolate* isolate = Isolate::UncheckedCurrent();
- if (isolate == NULL || !isolate->IsInitialized() || !isolate->IsInUse()) {
- // We require a fully initialized and entered isolate.
- return;
- }
- Sampler* sampler = isolate->logger()->sampler();
- if (sampler == NULL || !sampler->IsActive()) return;
-
- TickSample sample_obj;
- TickSample* sample = CpuProfiler::TickSampleEvent(isolate);
- if (sample == NULL) sample = &sample_obj;
-
- // Extracting the sample from the context is extremely machine dependent.
- ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
- mcontext_t& mcontext = ucontext->uc_mcontext;
- sample->state = isolate->current_vm_state();
-#if V8_HOST_ARCH_IA32
- sample->pc = reinterpret_cast<Address>(mcontext.mc_eip);
- sample->sp = reinterpret_cast<Address>(mcontext.mc_esp);
- sample->fp = reinterpret_cast<Address>(mcontext.mc_ebp);
-#elif V8_HOST_ARCH_X64
- sample->pc = reinterpret_cast<Address>(mcontext.mc_rip);
- sample->sp = reinterpret_cast<Address>(mcontext.mc_rsp);
- sample->fp = reinterpret_cast<Address>(mcontext.mc_rbp);
-#elif V8_HOST_ARCH_ARM
- sample->pc = reinterpret_cast<Address>(mcontext.mc_r15);
- sample->sp = reinterpret_cast<Address>(mcontext.mc_r13);
- sample->fp = reinterpret_cast<Address>(mcontext.mc_r11);
-#endif
- sampler->SampleStack(sample);
- sampler->Tick(sample);
-}
-
-
-class SignalSender : public Thread {
- public:
- enum SleepInterval {
- HALF_INTERVAL,
- FULL_INTERVAL
- };
-
- explicit SignalSender(int interval)
- : Thread(NULL, "SignalSender"),
- interval_(interval) {}
-
- static void AddActiveSampler(Sampler* sampler) {
- ScopedLock lock(mutex_);
- SamplerRegistry::AddActiveSampler(sampler);
- if (instance_ == NULL) {
- // Install a signal handler.
- struct sigaction sa;
- sa.sa_sigaction = ProfilerSignalHandler;
- sigemptyset(&sa.sa_mask);
- sa.sa_flags = SA_RESTART | SA_SIGINFO;
- signal_handler_installed_ =
- (sigaction(SIGPROF, &sa, &old_signal_handler_) == 0);
-
- // Start a thread that sends SIGPROF signal to VM threads.
- instance_ = new SignalSender(sampler->interval());
- instance_->Start();
- } else {
- ASSERT(instance_->interval_ == sampler->interval());
- }
- }
-
- static void RemoveActiveSampler(Sampler* sampler) {
- ScopedLock lock(mutex_);
- SamplerRegistry::RemoveActiveSampler(sampler);
- if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
- RuntimeProfiler::WakeUpRuntimeProfilerThreadBeforeShutdown();
- instance_->Join();
- delete instance_;
- instance_ = NULL;
-
- // Restore the old signal handler.
- if (signal_handler_installed_) {
- sigaction(SIGPROF, &old_signal_handler_, 0);
- signal_handler_installed_ = false;
- }
- }
- }
-
- // Implement Thread::Run().
- virtual void Run() {
- SamplerRegistry::State state;
- while ((state = SamplerRegistry::GetState()) !=
- SamplerRegistry::HAS_NO_SAMPLERS) {
- bool cpu_profiling_enabled =
- (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS);
- bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled();
- // When CPU profiling is enabled both JavaScript and C++ code is
- // profiled. We must not suspend.
- if (!cpu_profiling_enabled) {
- if (rate_limiter_.SuspendIfNecessary()) continue;
- }
- if (cpu_profiling_enabled && runtime_profiler_enabled) {
- if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this)) {
- return;
- }
- Sleep(HALF_INTERVAL);
- if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile, NULL)) {
- return;
- }
- Sleep(HALF_INTERVAL);
- } else {
- if (cpu_profiling_enabled) {
- if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile,
- this)) {
- return;
- }
- }
- if (runtime_profiler_enabled) {
- if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile,
- NULL)) {
- return;
- }
- }
- Sleep(FULL_INTERVAL);
- }
- }
- }
-
- static void DoCpuProfile(Sampler* sampler, void* raw_sender) {
- if (!sampler->IsProfiling()) return;
- SignalSender* sender = reinterpret_cast<SignalSender*>(raw_sender);
- sender->SendProfilingSignal(sampler->platform_data()->vm_tid());
- }
-
- static void DoRuntimeProfile(Sampler* sampler, void* ignored) {
- if (!sampler->isolate()->IsInitialized()) return;
- sampler->isolate()->runtime_profiler()->NotifyTick();
- }
-
- void SendProfilingSignal(pthread_t tid) {
- if (!signal_handler_installed_) return;
- pthread_kill(tid, SIGPROF);
- }
-
- void Sleep(SleepInterval full_or_half) {
- // Convert ms to us and subtract 100 us to compensate delays
- // occuring during signal delivery.
- useconds_t interval = interval_ * 1000 - 100;
- if (full_or_half == HALF_INTERVAL) interval /= 2;
- int result = usleep(interval);
-#ifdef DEBUG
- if (result != 0 && errno != EINTR) {
- fprintf(stderr,
- "SignalSender usleep error; interval = %u, errno = %d\n",
- interval,
- errno);
- ASSERT(result == 0 || errno == EINTR);
- }
-#endif
- USE(result);
- }
-
- const int interval_;
- RuntimeProfilerRateLimiter rate_limiter_;
-
- // Protects the process wide state below.
- static Mutex* mutex_;
- static SignalSender* instance_;
- static bool signal_handler_installed_;
- static struct sigaction old_signal_handler_;
-
- DISALLOW_COPY_AND_ASSIGN(SignalSender);
-};
-
-Mutex* SignalSender::mutex_ = OS::CreateMutex();
-SignalSender* SignalSender::instance_ = NULL;
-struct sigaction SignalSender::old_signal_handler_;
-bool SignalSender::signal_handler_installed_ = false;
-
-
-Sampler::Sampler(Isolate* isolate, int interval)
- : isolate_(isolate),
- interval_(interval),
- profiling_(false),
- active_(false),
- samples_taken_(0) {
- data_ = new PlatformData;
-}
-
-
-Sampler::~Sampler() {
- ASSERT(!IsActive());
- delete data_;
-}
-
-
-void Sampler::Start() {
- ASSERT(!IsActive());
- SetActive(true);
- SignalSender::AddActiveSampler(this);
-}
-
-
-void Sampler::Stop() {
- ASSERT(IsActive());
- SignalSender::RemoveActiveSampler(this);
- SetActive(false);
-}
-
-#endif // ENABLE_LOGGING_AND_PROFILING
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/platform-linux.cc b/src/3rdparty/v8/src/platform-linux.cc
deleted file mode 100644
index 73a6ccb..0000000
--- a/src/3rdparty/v8/src/platform-linux.cc
+++ /dev/null
@@ -1,1120 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Platform specific code for Linux goes here. For the POSIX comaptible parts
-// the implementation is in platform-posix.cc.
-
-#include <pthread.h>
-#include <semaphore.h>
-#include <signal.h>
-#include <sys/prctl.h>
-#include <sys/time.h>
-#include <sys/resource.h>
-#include <sys/syscall.h>
-#include <sys/types.h>
-#include <stdlib.h>
-
-// Ubuntu Dapper requires memory pages to be marked as
-// executable. Otherwise, OS raises an exception when executing code
-// in that page.
-#include <sys/types.h> // mmap & munmap
-#include <sys/mman.h> // mmap & munmap
-#include <sys/stat.h> // open
-#include <fcntl.h> // open
-#include <unistd.h> // sysconf
-#ifdef __GLIBC__
-#include <execinfo.h> // backtrace, backtrace_symbols
-#endif // def __GLIBC__
-#include <strings.h> // index
-#include <errno.h>
-#include <stdarg.h>
-
-#undef MAP_TYPE
-
-#include "v8.h"
-
-#include "platform.h"
-#include "v8threads.h"
-#include "vm-state-inl.h"
-
-
-namespace v8 {
-namespace internal {
-
-// 0 is never a valid thread id on Linux since tids and pids share a
-// name space and pid 0 is reserved (see man 2 kill).
-static const pthread_t kNoThread = (pthread_t) 0;
-
-
-double ceiling(double x) {
- return ceil(x);
-}
-
-
-static Mutex* limit_mutex = NULL;
-
-
-void OS::Setup() {
- // Seed the random number generator.
- // Convert the current time to a 64-bit integer first, before converting it
- // to an unsigned. Going directly can cause an overflow and the seed to be
- // set to all ones. The seed will be identical for different instances that
- // call this setup code within the same millisecond.
- uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
- srandom(static_cast<unsigned int>(seed));
- limit_mutex = CreateMutex();
-}
-
-
-uint64_t OS::CpuFeaturesImpliedByPlatform() {
-#if (defined(__VFP_FP__) && !defined(__SOFTFP__))
- // Here gcc is telling us that we are on an ARM and gcc is assuming that we
- // have VFP3 instructions. If gcc can assume it then so can we.
- return 1u << VFP3;
-#elif CAN_USE_ARMV7_INSTRUCTIONS
- return 1u << ARMv7;
-#elif(defined(__mips_hard_float) && __mips_hard_float != 0)
- // Here gcc is telling us that we are on an MIPS and gcc is assuming that we
- // have FPU instructions. If gcc can assume it then so can we.
- return 1u << FPU;
-#else
- return 0; // Linux runs on anything.
-#endif
-}
-
-
-#ifdef __arm__
-static bool CPUInfoContainsString(const char * search_string) {
- const char* file_name = "/proc/cpuinfo";
- // This is written as a straight shot one pass parser
- // and not using STL string and ifstream because,
- // on Linux, it's reading from a (non-mmap-able)
- // character special device.
- FILE* f = NULL;
- const char* what = search_string;
-
- if (NULL == (f = fopen(file_name, "r")))
- return false;
-
- int k;
- while (EOF != (k = fgetc(f))) {
- if (k == *what) {
- ++what;
- while ((*what != '\0') && (*what == fgetc(f))) {
- ++what;
- }
- if (*what == '\0') {
- fclose(f);
- return true;
- } else {
- what = search_string;
- }
- }
- }
- fclose(f);
-
- // Did not find string in the proc file.
- return false;
-}
-
-bool OS::ArmCpuHasFeature(CpuFeature feature) {
- const char* search_string = NULL;
- // Simple detection of VFP at runtime for Linux.
- // It is based on /proc/cpuinfo, which reveals hardware configuration
- // to user-space applications. According to ARM (mid 2009), no similar
- // facility is universally available on the ARM architectures,
- // so it's up to individual OSes to provide such.
- switch (feature) {
- case VFP3:
- search_string = "vfpv3";
- break;
- case ARMv7:
- search_string = "ARMv7";
- break;
- default:
- UNREACHABLE();
- }
-
- if (CPUInfoContainsString(search_string)) {
- return true;
- }
-
- if (feature == VFP3) {
- // Some old kernels will report vfp not vfpv3. Here we make a last attempt
- // to detect vfpv3 by checking for vfp *and* neon, since neon is only
- // available on architectures with vfpv3.
- // Checking neon on its own is not enough as it is possible to have neon
- // without vfp.
- if (CPUInfoContainsString("vfp") && CPUInfoContainsString("neon")) {
- return true;
- }
- }
-
- return false;
-}
-#endif // def __arm__
-
-
-#ifdef __mips__
-bool OS::MipsCpuHasFeature(CpuFeature feature) {
- const char* search_string = NULL;
- const char* file_name = "/proc/cpuinfo";
- // Simple detection of FPU at runtime for Linux.
- // It is based on /proc/cpuinfo, which reveals hardware configuration
- // to user-space applications. According to MIPS (early 2010), no similar
- // facility is universally available on the MIPS architectures,
- // so it's up to individual OSes to provide such.
- //
- // This is written as a straight shot one pass parser
- // and not using STL string and ifstream because,
- // on Linux, it's reading from a (non-mmap-able)
- // character special device.
-
- switch (feature) {
- case FPU:
- search_string = "FPU";
- break;
- default:
- UNREACHABLE();
- }
-
- FILE* f = NULL;
- const char* what = search_string;
-
- if (NULL == (f = fopen(file_name, "r")))
- return false;
-
- int k;
- while (EOF != (k = fgetc(f))) {
- if (k == *what) {
- ++what;
- while ((*what != '\0') && (*what == fgetc(f))) {
- ++what;
- }
- if (*what == '\0') {
- fclose(f);
- return true;
- } else {
- what = search_string;
- }
- }
- }
- fclose(f);
-
- // Did not find string in the proc file.
- return false;
-}
-#endif // def __mips__
-
-
-int OS::ActivationFrameAlignment() {
-#ifdef V8_TARGET_ARCH_ARM
- // On EABI ARM targets this is required for fp correctness in the
- // runtime system.
- return 8;
-#elif V8_TARGET_ARCH_MIPS
- return 8;
-#endif
- // With gcc 4.4 the tree vectorization optimizer can generate code
- // that requires 16 byte alignment such as movdqa on x86.
- return 16;
-}
-
-
-void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
-#if (defined(V8_TARGET_ARCH_ARM) && defined(__arm__)) || \
- (defined(V8_TARGET_ARCH_MIPS) && defined(__mips__))
- // Only use on ARM or MIPS hardware.
- MemoryBarrier();
-#else
- __asm__ __volatile__("" : : : "memory");
- // An x86 store acts as a release barrier.
-#endif
- *ptr = value;
-}
-
-
-const char* OS::LocalTimezone(double time) {
- if (isnan(time)) return "";
- time_t tv = static_cast<time_t>(floor(time/msPerSecond));
- struct tm* t = localtime(&tv);
- if (NULL == t) return "";
- return t->tm_zone;
-}
-
-
-double OS::LocalTimeOffset() {
- time_t tv = time(NULL);
- struct tm* t = localtime(&tv);
- // tm_gmtoff includes any daylight savings offset, so subtract it.
- return static_cast<double>(t->tm_gmtoff * msPerSecond -
- (t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
-}
-
-
-// We keep the lowest and highest addresses mapped as a quick way of
-// determining that pointers are outside the heap (used mostly in assertions
-// and verification). The estimate is conservative, ie, not all addresses in
-// 'allocated' space are actually allocated to our heap. The range is
-// [lowest, highest), inclusive on the low and and exclusive on the high end.
-static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
-static void* highest_ever_allocated = reinterpret_cast<void*>(0);
-
-
-static void UpdateAllocatedSpaceLimits(void* address, int size) {
- ASSERT(limit_mutex != NULL);
- ScopedLock lock(limit_mutex);
-
- lowest_ever_allocated = Min(lowest_ever_allocated, address);
- highest_ever_allocated =
- Max(highest_ever_allocated,
- reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
-}
-
-
-bool OS::IsOutsideAllocatedSpace(void* address) {
- return address < lowest_ever_allocated || address >= highest_ever_allocated;
-}
-
-
-size_t OS::AllocateAlignment() {
- return sysconf(_SC_PAGESIZE);
-}
-
-
-void* OS::Allocate(const size_t requested,
- size_t* allocated,
- bool is_executable) {
- // TODO(805): Port randomization of allocated executable memory to Linux.
- const size_t msize = RoundUp(requested, sysconf(_SC_PAGESIZE));
- int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
- void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
- if (mbase == MAP_FAILED) {
- LOG(i::Isolate::Current(),
- StringEvent("OS::Allocate", "mmap failed"));
- return NULL;
- }
- *allocated = msize;
- UpdateAllocatedSpaceLimits(mbase, msize);
- return mbase;
-}
-
-
-void OS::Free(void* address, const size_t size) {
- // TODO(1240712): munmap has a return value which is ignored here.
- int result = munmap(address, size);
- USE(result);
- ASSERT(result == 0);
-}
-
-
-#ifdef ENABLE_HEAP_PROTECTION
-
-void OS::Protect(void* address, size_t size) {
- // TODO(1240712): mprotect has a return value which is ignored here.
- mprotect(address, size, PROT_READ);
-}
-
-
-void OS::Unprotect(void* address, size_t size, bool is_executable) {
- // TODO(1240712): mprotect has a return value which is ignored here.
- int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
- mprotect(address, size, prot);
-}
-
-#endif
-
-
-void OS::Sleep(int milliseconds) {
- unsigned int ms = static_cast<unsigned int>(milliseconds);
- usleep(1000 * ms);
-}
-
-
-void OS::Abort() {
- // Redirect to std abort to signal abnormal program termination.
- abort();
-}
-
-
-void OS::DebugBreak() {
-// TODO(lrn): Introduce processor define for runtime system (!= V8_ARCH_x,
-// which is the architecture of generated code).
-#if (defined(__arm__) || defined(__thumb__))
-# if defined(CAN_USE_ARMV5_INSTRUCTIONS)
- asm("bkpt 0");
-# endif
-#elif defined(__mips__)
- asm("break");
-#else
- asm("int $3");
-#endif
-}
-
-
-class PosixMemoryMappedFile : public OS::MemoryMappedFile {
- public:
- PosixMemoryMappedFile(FILE* file, void* memory, int size)
- : file_(file), memory_(memory), size_(size) { }
- virtual ~PosixMemoryMappedFile();
- virtual void* memory() { return memory_; }
- virtual int size() { return size_; }
- private:
- FILE* file_;
- void* memory_;
- int size_;
-};
-
-
-OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
- FILE* file = fopen(name, "r+");
- if (file == NULL) return NULL;
-
- fseek(file, 0, SEEK_END);
- int size = ftell(file);
-
- void* memory =
- mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
- return new PosixMemoryMappedFile(file, memory, size);
-}
-
-
-OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
- void* initial) {
- FILE* file = fopen(name, "w+");
- if (file == NULL) return NULL;
- int result = fwrite(initial, size, 1, file);
- if (result < 1) {
- fclose(file);
- return NULL;
- }
- void* memory =
- mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
- return new PosixMemoryMappedFile(file, memory, size);
-}
-
-
-PosixMemoryMappedFile::~PosixMemoryMappedFile() {
- if (memory_) munmap(memory_, size_);
- fclose(file_);
-}
-
-
-void OS::LogSharedLibraryAddresses() {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- // This function assumes that the layout of the file is as follows:
- // hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name]
- // If we encounter an unexpected situation we abort scanning further entries.
- FILE* fp = fopen("/proc/self/maps", "r");
- if (fp == NULL) return;
-
- // Allocate enough room to be able to store a full file name.
- const int kLibNameLen = FILENAME_MAX + 1;
- char* lib_name = reinterpret_cast<char*>(malloc(kLibNameLen));
-
- i::Isolate* isolate = ISOLATE;
- // This loop will terminate once the scanning hits an EOF.
- while (true) {
- uintptr_t start, end;
- char attr_r, attr_w, attr_x, attr_p;
- // Parse the addresses and permission bits at the beginning of the line.
- if (fscanf(fp, "%" V8PRIxPTR "-%" V8PRIxPTR, &start, &end) != 2) break;
- if (fscanf(fp, " %c%c%c%c", &attr_r, &attr_w, &attr_x, &attr_p) != 4) break;
-
- int c;
- if (attr_r == 'r' && attr_w != 'w' && attr_x == 'x') {
- // Found a read-only executable entry. Skip characters until we reach
- // the beginning of the filename or the end of the line.
- do {
- c = getc(fp);
- } while ((c != EOF) && (c != '\n') && (c != '/'));
- if (c == EOF) break; // EOF: Was unexpected, just exit.
-
- // Process the filename if found.
- if (c == '/') {
- ungetc(c, fp); // Push the '/' back into the stream to be read below.
-
- // Read to the end of the line. Exit if the read fails.
- if (fgets(lib_name, kLibNameLen, fp) == NULL) break;
-
- // Drop the newline character read by fgets. We do not need to check
- // for a zero-length string because we know that we at least read the
- // '/' character.
- lib_name[strlen(lib_name) - 1] = '\0';
- } else {
- // No library name found, just record the raw address range.
- snprintf(lib_name, kLibNameLen,
- "%08" V8PRIxPTR "-%08" V8PRIxPTR, start, end);
- }
- LOG(isolate, SharedLibraryEvent(lib_name, start, end));
- } else {
- // Entry not describing executable data. Skip to end of line to setup
- // reading the next entry.
- do {
- c = getc(fp);
- } while ((c != EOF) && (c != '\n'));
- if (c == EOF) break;
- }
- }
- free(lib_name);
- fclose(fp);
-#endif
-}
-
-
-static const char kGCFakeMmap[] = "/tmp/__v8_gc__";
-
-
-void OS::SignalCodeMovingGC() {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- // Support for ll_prof.py.
- //
- // The Linux profiler built into the kernel logs all mmap's with
- // PROT_EXEC so that analysis tools can properly attribute ticks. We
- // do a mmap with a name known by ll_prof.py and immediately munmap
- // it. This injects a GC marker into the stream of events generated
- // by the kernel and allows us to synchronize V8 code log and the
- // kernel log.
- int size = sysconf(_SC_PAGESIZE);
- FILE* f = fopen(kGCFakeMmap, "w+");
- void* addr = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_PRIVATE,
- fileno(f), 0);
- ASSERT(addr != MAP_FAILED);
- munmap(addr, size);
- fclose(f);
-#endif
-}
-
-
-int OS::StackWalk(Vector<OS::StackFrame> frames) {
- // backtrace is a glibc extension.
-#ifdef __GLIBC__
- int frames_size = frames.length();
- ScopedVector<void*> addresses(frames_size);
-
- int frames_count = backtrace(addresses.start(), frames_size);
-
- char** symbols = backtrace_symbols(addresses.start(), frames_count);
- if (symbols == NULL) {
- return kStackWalkError;
- }
-
- for (int i = 0; i < frames_count; i++) {
- frames[i].address = addresses[i];
- // Format a text representation of the frame based on the information
- // available.
- SNPrintF(MutableCStrVector(frames[i].text, kStackWalkMaxTextLen),
- "%s",
- symbols[i]);
- // Make sure line termination is in place.
- frames[i].text[kStackWalkMaxTextLen - 1] = '\0';
- }
-
- free(symbols);
-
- return frames_count;
-#else // ndef __GLIBC__
- return 0;
-#endif // ndef __GLIBC__
-}
-
-
-// Constants used for mmap.
-static const int kMmapFd = -1;
-static const int kMmapFdOffset = 0;
-
-
-VirtualMemory::VirtualMemory(size_t size) {
- address_ = mmap(NULL, size, PROT_NONE,
- MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
- kMmapFd, kMmapFdOffset);
- size_ = size;
-}
-
-
-VirtualMemory::~VirtualMemory() {
- if (IsReserved()) {
- if (0 == munmap(address(), size())) address_ = MAP_FAILED;
- }
-}
-
-
-bool VirtualMemory::IsReserved() {
- return address_ != MAP_FAILED;
-}
-
-
-bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
- int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
- if (MAP_FAILED == mmap(address, size, prot,
- MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
- kMmapFd, kMmapFdOffset)) {
- return false;
- }
-
- UpdateAllocatedSpaceLimits(address, size);
- return true;
-}
-
-
-bool VirtualMemory::Uncommit(void* address, size_t size) {
- return mmap(address, size, PROT_NONE,
- MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED,
- kMmapFd, kMmapFdOffset) != MAP_FAILED;
-}
-
-
-class ThreadHandle::PlatformData : public Malloced {
- public:
- explicit PlatformData(ThreadHandle::Kind kind) {
- Initialize(kind);
- }
-
- void Initialize(ThreadHandle::Kind kind) {
- switch (kind) {
- case ThreadHandle::SELF: thread_ = pthread_self(); break;
- case ThreadHandle::INVALID: thread_ = kNoThread; break;
- }
- }
-
- pthread_t thread_; // Thread handle for pthread.
-};
-
-
-ThreadHandle::ThreadHandle(Kind kind) {
- data_ = new PlatformData(kind);
-}
-
-
-void ThreadHandle::Initialize(ThreadHandle::Kind kind) {
- data_->Initialize(kind);
-}
-
-
-ThreadHandle::~ThreadHandle() {
- delete data_;
-}
-
-
-bool ThreadHandle::IsSelf() const {
- return pthread_equal(data_->thread_, pthread_self());
-}
-
-
-bool ThreadHandle::IsValid() const {
- return data_->thread_ != kNoThread;
-}
-
-
-Thread::Thread(Isolate* isolate, const Options& options)
- : ThreadHandle(ThreadHandle::INVALID),
- isolate_(isolate),
- stack_size_(options.stack_size) {
- set_name(options.name);
-}
-
-
-Thread::Thread(Isolate* isolate, const char* name)
- : ThreadHandle(ThreadHandle::INVALID),
- isolate_(isolate),
- stack_size_(0) {
- set_name(name);
-}
-
-
-Thread::~Thread() {
-}
-
-
-static void* ThreadEntry(void* arg) {
- Thread* thread = reinterpret_cast<Thread*>(arg);
- // This is also initialized by the first argument to pthread_create() but we
- // don't know which thread will run first (the original thread or the new
- // one) so we initialize it here too.
- prctl(PR_SET_NAME,
- reinterpret_cast<unsigned long>(thread->name()), // NOLINT
- 0, 0, 0);
- thread->thread_handle_data()->thread_ = pthread_self();
- ASSERT(thread->IsValid());
- Thread::SetThreadLocal(Isolate::isolate_key(), thread->isolate());
- thread->Run();
- return NULL;
-}
-
-
-void Thread::set_name(const char* name) {
- strncpy(name_, name, sizeof(name_));
- name_[sizeof(name_) - 1] = '\0';
-}
-
-
-void Thread::Start() {
- pthread_attr_t* attr_ptr = NULL;
- pthread_attr_t attr;
- if (stack_size_ > 0) {
- pthread_attr_init(&attr);
- pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_));
- attr_ptr = &attr;
- }
- pthread_create(&thread_handle_data()->thread_, attr_ptr, ThreadEntry, this);
- ASSERT(IsValid());
-}
-
-
-void Thread::Join() {
- pthread_join(thread_handle_data()->thread_, NULL);
-}
-
-
-Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
- pthread_key_t key;
- int result = pthread_key_create(&key, NULL);
- USE(result);
- ASSERT(result == 0);
- return static_cast<LocalStorageKey>(key);
-}
-
-
-void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
- pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
- int result = pthread_key_delete(pthread_key);
- USE(result);
- ASSERT(result == 0);
-}
-
-
-void* Thread::GetThreadLocal(LocalStorageKey key) {
- pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
- return pthread_getspecific(pthread_key);
-}
-
-
-void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
- pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
- pthread_setspecific(pthread_key, value);
-}
-
-
-void Thread::YieldCPU() {
- sched_yield();
-}
-
-
-class LinuxMutex : public Mutex {
- public:
-
- LinuxMutex() {
- pthread_mutexattr_t attrs;
- int result = pthread_mutexattr_init(&attrs);
- ASSERT(result == 0);
- result = pthread_mutexattr_settype(&attrs, PTHREAD_MUTEX_RECURSIVE);
- ASSERT(result == 0);
- result = pthread_mutex_init(&mutex_, &attrs);
- ASSERT(result == 0);
- }
-
- virtual ~LinuxMutex() { pthread_mutex_destroy(&mutex_); }
-
- virtual int Lock() {
- int result = pthread_mutex_lock(&mutex_);
- return result;
- }
-
- virtual int Unlock() {
- int result = pthread_mutex_unlock(&mutex_);
- return result;
- }
-
- virtual bool TryLock() {
- int result = pthread_mutex_trylock(&mutex_);
- // Return false if the lock is busy and locking failed.
- if (result == EBUSY) {
- return false;
- }
- ASSERT(result == 0); // Verify no other errors.
- return true;
- }
-
- private:
- pthread_mutex_t mutex_; // Pthread mutex for POSIX platforms.
-};
-
-
-Mutex* OS::CreateMutex() {
- return new LinuxMutex();
-}
-
-
-class LinuxSemaphore : public Semaphore {
- public:
- explicit LinuxSemaphore(int count) { sem_init(&sem_, 0, count); }
- virtual ~LinuxSemaphore() { sem_destroy(&sem_); }
-
- virtual void Wait();
- virtual bool Wait(int timeout);
- virtual void Signal() { sem_post(&sem_); }
- private:
- sem_t sem_;
-};
-
-
-void LinuxSemaphore::Wait() {
- while (true) {
- int result = sem_wait(&sem_);
- if (result == 0) return; // Successfully got semaphore.
- CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
- }
-}
-
-
-#ifndef TIMEVAL_TO_TIMESPEC
-#define TIMEVAL_TO_TIMESPEC(tv, ts) do { \
- (ts)->tv_sec = (tv)->tv_sec; \
- (ts)->tv_nsec = (tv)->tv_usec * 1000; \
-} while (false)
-#endif
-
-
-bool LinuxSemaphore::Wait(int timeout) {
- const long kOneSecondMicros = 1000000; // NOLINT
-
- // Split timeout into second and nanosecond parts.
- struct timeval delta;
- delta.tv_usec = timeout % kOneSecondMicros;
- delta.tv_sec = timeout / kOneSecondMicros;
-
- struct timeval current_time;
- // Get the current time.
- if (gettimeofday(&current_time, NULL) == -1) {
- return false;
- }
-
- // Calculate time for end of timeout.
- struct timeval end_time;
- timeradd(&current_time, &delta, &end_time);
-
- struct timespec ts;
- TIMEVAL_TO_TIMESPEC(&end_time, &ts);
- // Wait for semaphore signalled or timeout.
- while (true) {
- int result = sem_timedwait(&sem_, &ts);
- if (result == 0) return true; // Successfully got semaphore.
- if (result > 0) {
- // For glibc prior to 2.3.4 sem_timedwait returns the error instead of -1.
- errno = result;
- result = -1;
- }
- if (result == -1 && errno == ETIMEDOUT) return false; // Timeout.
- CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
- }
-}
-
-
-Semaphore* OS::CreateSemaphore(int count) {
- return new LinuxSemaphore(count);
-}
-
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
-
-#if !defined(__GLIBC__) && (defined(__arm__) || defined(__thumb__))
-// Android runs a fairly new Linux kernel, so signal info is there,
-// but the C library doesn't have the structs defined.
-
-struct sigcontext {
- uint32_t trap_no;
- uint32_t error_code;
- uint32_t oldmask;
- uint32_t gregs[16];
- uint32_t arm_cpsr;
- uint32_t fault_address;
-};
-typedef uint32_t __sigset_t;
-typedef struct sigcontext mcontext_t;
-typedef struct ucontext {
- uint32_t uc_flags;
- struct ucontext* uc_link;
- stack_t uc_stack;
- mcontext_t uc_mcontext;
- __sigset_t uc_sigmask;
-} ucontext_t;
-enum ArmRegisters {R15 = 15, R13 = 13, R11 = 11};
-
-#endif
-
-
-static int GetThreadID() {
- // Glibc doesn't provide a wrapper for gettid(2).
-#if defined(ANDROID)
- return syscall(__NR_gettid);
-#else
- return syscall(SYS_gettid);
-#endif
-}
-
-
-static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
-#ifndef V8_HOST_ARCH_MIPS
- USE(info);
- if (signal != SIGPROF) return;
- Isolate* isolate = Isolate::UncheckedCurrent();
- if (isolate == NULL || !isolate->IsInitialized() || !isolate->IsInUse()) {
- // We require a fully initialized and entered isolate.
- return;
- }
- Sampler* sampler = isolate->logger()->sampler();
- if (sampler == NULL || !sampler->IsActive()) return;
-
- TickSample sample_obj;
- TickSample* sample = CpuProfiler::TickSampleEvent(isolate);
- if (sample == NULL) sample = &sample_obj;
-
- // Extracting the sample from the context is extremely machine dependent.
- ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
- mcontext_t& mcontext = ucontext->uc_mcontext;
- sample->state = isolate->current_vm_state();
-#if V8_HOST_ARCH_IA32
- sample->pc = reinterpret_cast<Address>(mcontext.gregs[REG_EIP]);
- sample->sp = reinterpret_cast<Address>(mcontext.gregs[REG_ESP]);
- sample->fp = reinterpret_cast<Address>(mcontext.gregs[REG_EBP]);
-#elif V8_HOST_ARCH_X64
- sample->pc = reinterpret_cast<Address>(mcontext.gregs[REG_RIP]);
- sample->sp = reinterpret_cast<Address>(mcontext.gregs[REG_RSP]);
- sample->fp = reinterpret_cast<Address>(mcontext.gregs[REG_RBP]);
-#elif V8_HOST_ARCH_ARM
-// An undefined macro evaluates to 0, so this applies to Android's Bionic also.
-#if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
- sample->pc = reinterpret_cast<Address>(mcontext.gregs[R15]);
- sample->sp = reinterpret_cast<Address>(mcontext.gregs[R13]);
- sample->fp = reinterpret_cast<Address>(mcontext.gregs[R11]);
-#else
- sample->pc = reinterpret_cast<Address>(mcontext.arm_pc);
- sample->sp = reinterpret_cast<Address>(mcontext.arm_sp);
- sample->fp = reinterpret_cast<Address>(mcontext.arm_fp);
-#endif
-#elif V8_HOST_ARCH_MIPS
- sample.pc = reinterpret_cast<Address>(mcontext.pc);
- sample.sp = reinterpret_cast<Address>(mcontext.gregs[29]);
- sample.fp = reinterpret_cast<Address>(mcontext.gregs[30]);
-#endif
- sampler->SampleStack(sample);
- sampler->Tick(sample);
-#endif
-}
-
-
-class Sampler::PlatformData : public Malloced {
- public:
- PlatformData() : vm_tid_(GetThreadID()) {}
-
- int vm_tid() const { return vm_tid_; }
-
- private:
- const int vm_tid_;
-};
-
-
-class SignalSender : public Thread {
- public:
- enum SleepInterval {
- HALF_INTERVAL,
- FULL_INTERVAL
- };
-
- explicit SignalSender(int interval)
- : Thread(NULL, "SignalSender"),
- vm_tgid_(getpid()),
- interval_(interval) {}
-
- static void AddActiveSampler(Sampler* sampler) {
- ScopedLock lock(mutex_);
- SamplerRegistry::AddActiveSampler(sampler);
- if (instance_ == NULL) {
- // Install a signal handler.
- struct sigaction sa;
- sa.sa_sigaction = ProfilerSignalHandler;
- sigemptyset(&sa.sa_mask);
- sa.sa_flags = SA_RESTART | SA_SIGINFO;
- signal_handler_installed_ =
- (sigaction(SIGPROF, &sa, &old_signal_handler_) == 0);
-
- // Start a thread that sends SIGPROF signal to VM threads.
- instance_ = new SignalSender(sampler->interval());
- instance_->Start();
- } else {
- ASSERT(instance_->interval_ == sampler->interval());
- }
- }
-
- static void RemoveActiveSampler(Sampler* sampler) {
- ScopedLock lock(mutex_);
- SamplerRegistry::RemoveActiveSampler(sampler);
- if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
- RuntimeProfiler::WakeUpRuntimeProfilerThreadBeforeShutdown();
- instance_->Join();
- delete instance_;
- instance_ = NULL;
-
- // Restore the old signal handler.
- if (signal_handler_installed_) {
- sigaction(SIGPROF, &old_signal_handler_, 0);
- signal_handler_installed_ = false;
- }
- }
- }
-
- // Implement Thread::Run().
- virtual void Run() {
- SamplerRegistry::State state;
- while ((state = SamplerRegistry::GetState()) !=
- SamplerRegistry::HAS_NO_SAMPLERS) {
- bool cpu_profiling_enabled =
- (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS);
- bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled();
- // When CPU profiling is enabled both JavaScript and C++ code is
- // profiled. We must not suspend.
- if (!cpu_profiling_enabled) {
- if (rate_limiter_.SuspendIfNecessary()) continue;
- }
- if (cpu_profiling_enabled && runtime_profiler_enabled) {
- if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this)) {
- return;
- }
- Sleep(HALF_INTERVAL);
- if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile, NULL)) {
- return;
- }
- Sleep(HALF_INTERVAL);
- } else {
- if (cpu_profiling_enabled) {
- if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile,
- this)) {
- return;
- }
- }
- if (runtime_profiler_enabled) {
- if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile,
- NULL)) {
- return;
- }
- }
- Sleep(FULL_INTERVAL);
- }
- }
- }
-
- static void DoCpuProfile(Sampler* sampler, void* raw_sender) {
- if (!sampler->IsProfiling()) return;
- SignalSender* sender = reinterpret_cast<SignalSender*>(raw_sender);
- sender->SendProfilingSignal(sampler->platform_data()->vm_tid());
- }
-
- static void DoRuntimeProfile(Sampler* sampler, void* ignored) {
- if (!sampler->isolate()->IsInitialized()) return;
- sampler->isolate()->runtime_profiler()->NotifyTick();
- }
-
- void SendProfilingSignal(int tid) {
- if (!signal_handler_installed_) return;
- // Glibc doesn't provide a wrapper for tgkill(2).
-#if defined(ANDROID)
- syscall(__NR_tgkill, vm_tgid_, tid, SIGPROF);
-#else
- syscall(SYS_tgkill, vm_tgid_, tid, SIGPROF);
-#endif
- }
-
- void Sleep(SleepInterval full_or_half) {
- // Convert ms to us and subtract 100 us to compensate delays
- // occuring during signal delivery.
- useconds_t interval = interval_ * 1000 - 100;
- if (full_or_half == HALF_INTERVAL) interval /= 2;
- int result = usleep(interval);
-#ifdef DEBUG
- if (result != 0 && errno != EINTR) {
- fprintf(stderr,
- "SignalSender usleep error; interval = %u, errno = %d\n",
- interval,
- errno);
- ASSERT(result == 0 || errno == EINTR);
- }
-#endif
- USE(result);
- }
-
- const int vm_tgid_;
- const int interval_;
- RuntimeProfilerRateLimiter rate_limiter_;
-
- // Protects the process wide state below.
- static Mutex* mutex_;
- static SignalSender* instance_;
- static bool signal_handler_installed_;
- static struct sigaction old_signal_handler_;
-
- DISALLOW_COPY_AND_ASSIGN(SignalSender);
-};
-
-
-Mutex* SignalSender::mutex_ = OS::CreateMutex();
-SignalSender* SignalSender::instance_ = NULL;
-struct sigaction SignalSender::old_signal_handler_;
-bool SignalSender::signal_handler_installed_ = false;
-
-
-Sampler::Sampler(Isolate* isolate, int interval)
- : isolate_(isolate),
- interval_(interval),
- profiling_(false),
- active_(false),
- samples_taken_(0) {
- data_ = new PlatformData;
-}
-
-
-Sampler::~Sampler() {
- ASSERT(!IsActive());
- delete data_;
-}
-
-
-void Sampler::Start() {
- ASSERT(!IsActive());
- SetActive(true);
- SignalSender::AddActiveSampler(this);
-}
-
-
-void Sampler::Stop() {
- ASSERT(IsActive());
- SignalSender::RemoveActiveSampler(this);
- SetActive(false);
-}
-
-#endif // ENABLE_LOGGING_AND_PROFILING
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/platform-macos.cc b/src/3rdparty/v8/src/platform-macos.cc
deleted file mode 100644
index bfdf3b2..0000000
--- a/src/3rdparty/v8/src/platform-macos.cc
+++ /dev/null
@@ -1,865 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Platform specific code for MacOS goes here. For the POSIX comaptible parts
-// the implementation is in platform-posix.cc.
-
-#include <dlfcn.h>
-#include <unistd.h>
-#include <sys/mman.h>
-#include <mach/mach_init.h>
-#include <mach-o/dyld.h>
-#include <mach-o/getsect.h>
-
-#include <AvailabilityMacros.h>
-
-#include <pthread.h>
-#include <semaphore.h>
-#include <signal.h>
-#include <libkern/OSAtomic.h>
-#include <mach/mach.h>
-#include <mach/semaphore.h>
-#include <mach/task.h>
-#include <mach/vm_statistics.h>
-#include <sys/time.h>
-#include <sys/resource.h>
-#include <sys/types.h>
-#include <sys/sysctl.h>
-#include <stdarg.h>
-#include <stdlib.h>
-#include <string.h>
-#include <errno.h>
-
-#undef MAP_TYPE
-
-#include "v8.h"
-
-#include "platform.h"
-#include "vm-state-inl.h"
-
-// Manually define these here as weak imports, rather than including execinfo.h.
-// This lets us launch on 10.4 which does not have these calls.
-extern "C" {
- extern int backtrace(void**, int) __attribute__((weak_import));
- extern char** backtrace_symbols(void* const*, int)
- __attribute__((weak_import));
- extern void backtrace_symbols_fd(void* const*, int, int)
- __attribute__((weak_import));
-}
-
-
-namespace v8 {
-namespace internal {
-
-// 0 is never a valid thread id on MacOSX since a ptread_t is
-// a pointer.
-static const pthread_t kNoThread = (pthread_t) 0;
-
-
-double ceiling(double x) {
- // Correct Mac OS X Leopard 'ceil' behavior.
- if (-1.0 < x && x < 0.0) {
- return -0.0;
- } else {
- return ceil(x);
- }
-}
-
-
-static Mutex* limit_mutex = NULL;
-
-
-void OS::Setup() {
- // Seed the random number generator.
- // Convert the current time to a 64-bit integer first, before converting it
- // to an unsigned. Going directly will cause an overflow and the seed to be
- // set to all ones. The seed will be identical for different instances that
- // call this setup code within the same millisecond.
- uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
- srandom(static_cast<unsigned int>(seed));
- limit_mutex = CreateMutex();
-}
-
-
-// We keep the lowest and highest addresses mapped as a quick way of
-// determining that pointers are outside the heap (used mostly in assertions
-// and verification). The estimate is conservative, ie, not all addresses in
-// 'allocated' space are actually allocated to our heap. The range is
-// [lowest, highest), inclusive on the low and and exclusive on the high end.
-static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
-static void* highest_ever_allocated = reinterpret_cast<void*>(0);
-
-
-static void UpdateAllocatedSpaceLimits(void* address, int size) {
- ASSERT(limit_mutex != NULL);
- ScopedLock lock(limit_mutex);
-
- lowest_ever_allocated = Min(lowest_ever_allocated, address);
- highest_ever_allocated =
- Max(highest_ever_allocated,
- reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
-}
-
-
-bool OS::IsOutsideAllocatedSpace(void* address) {
- return address < lowest_ever_allocated || address >= highest_ever_allocated;
-}
-
-
-size_t OS::AllocateAlignment() {
- return getpagesize();
-}
-
-
-// Constants used for mmap.
-// kMmapFd is used to pass vm_alloc flags to tag the region with the user
-// defined tag 255 This helps identify V8-allocated regions in memory analysis
-// tools like vmmap(1).
-static const int kMmapFd = VM_MAKE_TAG(255);
-static const off_t kMmapFdOffset = 0;
-
-
-void* OS::Allocate(const size_t requested,
- size_t* allocated,
- bool is_executable) {
- const size_t msize = RoundUp(requested, getpagesize());
- int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
- void* mbase = mmap(NULL, msize, prot,
- MAP_PRIVATE | MAP_ANON,
- kMmapFd, kMmapFdOffset);
- if (mbase == MAP_FAILED) {
- LOG(Isolate::Current(), StringEvent("OS::Allocate", "mmap failed"));
- return NULL;
- }
- *allocated = msize;
- UpdateAllocatedSpaceLimits(mbase, msize);
- return mbase;
-}
-
-
-void OS::Free(void* address, const size_t size) {
- // TODO(1240712): munmap has a return value which is ignored here.
- int result = munmap(address, size);
- USE(result);
- ASSERT(result == 0);
-}
-
-
-#ifdef ENABLE_HEAP_PROTECTION
-
-void OS::Protect(void* address, size_t size) {
- UNIMPLEMENTED();
-}
-
-
-void OS::Unprotect(void* address, size_t size, bool is_executable) {
- UNIMPLEMENTED();
-}
-
-#endif
-
-
-void OS::Sleep(int milliseconds) {
- usleep(1000 * milliseconds);
-}
-
-
-void OS::Abort() {
- // Redirect to std abort to signal abnormal program termination
- abort();
-}
-
-
-void OS::DebugBreak() {
- asm("int $3");
-}
-
-
-class PosixMemoryMappedFile : public OS::MemoryMappedFile {
- public:
- PosixMemoryMappedFile(FILE* file, void* memory, int size)
- : file_(file), memory_(memory), size_(size) { }
- virtual ~PosixMemoryMappedFile();
- virtual void* memory() { return memory_; }
- virtual int size() { return size_; }
- private:
- FILE* file_;
- void* memory_;
- int size_;
-};
-
-
-OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
- FILE* file = fopen(name, "r+");
- if (file == NULL) return NULL;
-
- fseek(file, 0, SEEK_END);
- int size = ftell(file);
-
- void* memory =
- mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
- return new PosixMemoryMappedFile(file, memory, size);
-}
-
-
-OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
- void* initial) {
- FILE* file = fopen(name, "w+");
- if (file == NULL) return NULL;
- int result = fwrite(initial, size, 1, file);
- if (result < 1) {
- fclose(file);
- return NULL;
- }
- void* memory =
- mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
- return new PosixMemoryMappedFile(file, memory, size);
-}
-
-
-PosixMemoryMappedFile::~PosixMemoryMappedFile() {
- if (memory_) munmap(memory_, size_);
- fclose(file_);
-}
-
-
-void OS::LogSharedLibraryAddresses() {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- unsigned int images_count = _dyld_image_count();
- for (unsigned int i = 0; i < images_count; ++i) {
- const mach_header* header = _dyld_get_image_header(i);
- if (header == NULL) continue;
-#if V8_HOST_ARCH_X64
- uint64_t size;
- char* code_ptr = getsectdatafromheader_64(
- reinterpret_cast<const mach_header_64*>(header),
- SEG_TEXT,
- SECT_TEXT,
- &size);
-#else
- unsigned int size;
- char* code_ptr = getsectdatafromheader(header, SEG_TEXT, SECT_TEXT, &size);
-#endif
- if (code_ptr == NULL) continue;
- const uintptr_t slide = _dyld_get_image_vmaddr_slide(i);
- const uintptr_t start = reinterpret_cast<uintptr_t>(code_ptr) + slide;
- LOG(Isolate::Current(),
- SharedLibraryEvent(_dyld_get_image_name(i), start, start + size));
- }
-#endif // ENABLE_LOGGING_AND_PROFILING
-}
-
-
-void OS::SignalCodeMovingGC() {
-}
-
-
-uint64_t OS::CpuFeaturesImpliedByPlatform() {
- // MacOSX requires all these to install so we can assume they are present.
- // These constants are defined by the CPUid instructions.
- const uint64_t one = 1;
- return (one << SSE2) | (one << CMOV) | (one << RDTSC) | (one << CPUID);
-}
-
-
-int OS::ActivationFrameAlignment() {
- // OS X activation frames must be 16 byte-aligned; see "Mac OS X ABI
- // Function Call Guide".
- return 16;
-}
-
-
-void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
- OSMemoryBarrier();
- *ptr = value;
-}
-
-
-const char* OS::LocalTimezone(double time) {
- if (isnan(time)) return "";
- time_t tv = static_cast<time_t>(floor(time/msPerSecond));
- struct tm* t = localtime(&tv);
- if (NULL == t) return "";
- return t->tm_zone;
-}
-
-
-double OS::LocalTimeOffset() {
- time_t tv = time(NULL);
- struct tm* t = localtime(&tv);
- // tm_gmtoff includes any daylight savings offset, so subtract it.
- return static_cast<double>(t->tm_gmtoff * msPerSecond -
- (t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
-}
-
-
-int OS::StackWalk(Vector<StackFrame> frames) {
- // If weak link to execinfo lib has failed, ie because we are on 10.4, abort.
- if (backtrace == NULL)
- return 0;
-
- int frames_size = frames.length();
- ScopedVector<void*> addresses(frames_size);
-
- int frames_count = backtrace(addresses.start(), frames_size);
-
- char** symbols = backtrace_symbols(addresses.start(), frames_count);
- if (symbols == NULL) {
- return kStackWalkError;
- }
-
- for (int i = 0; i < frames_count; i++) {
- frames[i].address = addresses[i];
- // Format a text representation of the frame based on the information
- // available.
- SNPrintF(MutableCStrVector(frames[i].text,
- kStackWalkMaxTextLen),
- "%s",
- symbols[i]);
- // Make sure line termination is in place.
- frames[i].text[kStackWalkMaxTextLen - 1] = '\0';
- }
-
- free(symbols);
-
- return frames_count;
-}
-
-
-
-
-VirtualMemory::VirtualMemory(size_t size) {
- address_ = mmap(NULL, size, PROT_NONE,
- MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
- kMmapFd, kMmapFdOffset);
- size_ = size;
-}
-
-
-VirtualMemory::~VirtualMemory() {
- if (IsReserved()) {
- if (0 == munmap(address(), size())) address_ = MAP_FAILED;
- }
-}
-
-
-bool VirtualMemory::IsReserved() {
- return address_ != MAP_FAILED;
-}
-
-
-bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
- int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
- if (MAP_FAILED == mmap(address, size, prot,
- MAP_PRIVATE | MAP_ANON | MAP_FIXED,
- kMmapFd, kMmapFdOffset)) {
- return false;
- }
-
- UpdateAllocatedSpaceLimits(address, size);
- return true;
-}
-
-
-bool VirtualMemory::Uncommit(void* address, size_t size) {
- return mmap(address, size, PROT_NONE,
- MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED,
- kMmapFd, kMmapFdOffset) != MAP_FAILED;
-}
-
-
-class ThreadHandle::PlatformData : public Malloced {
- public:
- explicit PlatformData(ThreadHandle::Kind kind) {
- Initialize(kind);
- }
-
- void Initialize(ThreadHandle::Kind kind) {
- switch (kind) {
- case ThreadHandle::SELF: thread_ = pthread_self(); break;
- case ThreadHandle::INVALID: thread_ = kNoThread; break;
- }
- }
- pthread_t thread_; // Thread handle for pthread.
-};
-
-
-
-ThreadHandle::ThreadHandle(Kind kind) {
- data_ = new PlatformData(kind);
-}
-
-
-void ThreadHandle::Initialize(ThreadHandle::Kind kind) {
- data_->Initialize(kind);
-}
-
-
-ThreadHandle::~ThreadHandle() {
- delete data_;
-}
-
-
-bool ThreadHandle::IsSelf() const {
- return pthread_equal(data_->thread_, pthread_self());
-}
-
-
-bool ThreadHandle::IsValid() const {
- return data_->thread_ != kNoThread;
-}
-
-
-Thread::Thread(Isolate* isolate, const Options& options)
- : ThreadHandle(ThreadHandle::INVALID),
- isolate_(isolate),
- stack_size_(options.stack_size) {
- set_name(options.name);
-}
-
-
-Thread::Thread(Isolate* isolate, const char* name)
- : ThreadHandle(ThreadHandle::INVALID),
- isolate_(isolate),
- stack_size_(0) {
- set_name(name);
-}
-
-
-Thread::~Thread() {
-}
-
-
-static void SetThreadName(const char* name) {
- // pthread_setname_np is only available in 10.6 or later, so test
- // for it at runtime.
- int (*dynamic_pthread_setname_np)(const char*);
- *reinterpret_cast<void**>(&dynamic_pthread_setname_np) =
- dlsym(RTLD_DEFAULT, "pthread_setname_np");
- if (!dynamic_pthread_setname_np)
- return;
-
- // Mac OS X does not expose the length limit of the name, so hardcode it.
- static const int kMaxNameLength = 63;
- USE(kMaxNameLength);
- ASSERT(Thread::kMaxThreadNameLength <= kMaxNameLength);
- dynamic_pthread_setname_np(name);
-}
-
-
-static void* ThreadEntry(void* arg) {
- Thread* thread = reinterpret_cast<Thread*>(arg);
- // This is also initialized by the first argument to pthread_create() but we
- // don't know which thread will run first (the original thread or the new
- // one) so we initialize it here too.
- thread->thread_handle_data()->thread_ = pthread_self();
- SetThreadName(thread->name());
- ASSERT(thread->IsValid());
- Thread::SetThreadLocal(Isolate::isolate_key(), thread->isolate());
- thread->Run();
- return NULL;
-}
-
-
-void Thread::set_name(const char* name) {
- strncpy(name_, name, sizeof(name_));
- name_[sizeof(name_) - 1] = '\0';
-}
-
-
-void Thread::Start() {
- pthread_attr_t* attr_ptr = NULL;
- pthread_attr_t attr;
- if (stack_size_ > 0) {
- pthread_attr_init(&attr);
- pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_));
- attr_ptr = &attr;
- }
- pthread_create(&thread_handle_data()->thread_, attr_ptr, ThreadEntry, this);
- ASSERT(IsValid());
-}
-
-
-void Thread::Join() {
- pthread_join(thread_handle_data()->thread_, NULL);
-}
-
-
-#ifdef V8_FAST_TLS_SUPPORTED
-
-static Atomic32 tls_base_offset_initialized = 0;
-intptr_t kMacTlsBaseOffset = 0;
-
-// It's safe to do the initialization more that once, but it has to be
-// done at least once.
-static void InitializeTlsBaseOffset() {
- const size_t kBufferSize = 128;
- char buffer[kBufferSize];
- size_t buffer_size = kBufferSize;
- int ctl_name[] = { CTL_KERN , KERN_OSRELEASE };
- if (sysctl(ctl_name, 2, buffer, &buffer_size, NULL, 0) != 0) {
- V8_Fatal(__FILE__, __LINE__, "V8 failed to get kernel version");
- }
- // The buffer now contains a string of the form XX.YY.ZZ, where
- // XX is the major kernel version component.
- // Make sure the buffer is 0-terminated.
- buffer[kBufferSize - 1] = '\0';
- char* period_pos = strchr(buffer, '.');
- *period_pos = '\0';
- int kernel_version_major =
- static_cast<int>(strtol(buffer, NULL, 10)); // NOLINT
- // The constants below are taken from pthreads.s from the XNU kernel
- // sources archive at www.opensource.apple.com.
- if (kernel_version_major < 11) {
- // 8.x.x (Tiger), 9.x.x (Leopard), 10.x.x (Snow Leopard) have the
- // same offsets.
-#if defined(V8_HOST_ARCH_IA32)
- kMacTlsBaseOffset = 0x48;
-#else
- kMacTlsBaseOffset = 0x60;
-#endif
- } else {
- // 11.x.x (Lion) changed the offset.
- kMacTlsBaseOffset = 0;
- }
-
- Release_Store(&tls_base_offset_initialized, 1);
-}
-
-static void CheckFastTls(Thread::LocalStorageKey key) {
- void* expected = reinterpret_cast<void*>(0x1234CAFE);
- Thread::SetThreadLocal(key, expected);
- void* actual = Thread::GetExistingThreadLocal(key);
- if (expected != actual) {
- V8_Fatal(__FILE__, __LINE__,
- "V8 failed to initialize fast TLS on current kernel");
- }
- Thread::SetThreadLocal(key, NULL);
-}
-
-#endif // V8_FAST_TLS_SUPPORTED
-
-
-Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
-#ifdef V8_FAST_TLS_SUPPORTED
- bool check_fast_tls = false;
- if (tls_base_offset_initialized == 0) {
- check_fast_tls = true;
- InitializeTlsBaseOffset();
- }
-#endif
- pthread_key_t key;
- int result = pthread_key_create(&key, NULL);
- USE(result);
- ASSERT(result == 0);
- LocalStorageKey typed_key = static_cast<LocalStorageKey>(key);
-#ifdef V8_FAST_TLS_SUPPORTED
- // If we just initialized fast TLS support, make sure it works.
- if (check_fast_tls) CheckFastTls(typed_key);
-#endif
- return typed_key;
-}
-
-
-void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
- pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
- int result = pthread_key_delete(pthread_key);
- USE(result);
- ASSERT(result == 0);
-}
-
-
-void* Thread::GetThreadLocal(LocalStorageKey key) {
- pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
- return pthread_getspecific(pthread_key);
-}
-
-
-void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
- pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
- pthread_setspecific(pthread_key, value);
-}
-
-
-void Thread::YieldCPU() {
- sched_yield();
-}
-
-
-class MacOSMutex : public Mutex {
- public:
-
- MacOSMutex() {
- pthread_mutexattr_t attr;
- pthread_mutexattr_init(&attr);
- pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
- pthread_mutex_init(&mutex_, &attr);
- }
-
- virtual ~MacOSMutex() { pthread_mutex_destroy(&mutex_); }
-
- virtual int Lock() { return pthread_mutex_lock(&mutex_); }
- virtual int Unlock() { return pthread_mutex_unlock(&mutex_); }
-
- virtual bool TryLock() {
- int result = pthread_mutex_trylock(&mutex_);
- // Return false if the lock is busy and locking failed.
- if (result == EBUSY) {
- return false;
- }
- ASSERT(result == 0); // Verify no other errors.
- return true;
- }
-
- private:
- pthread_mutex_t mutex_;
-};
-
-
-Mutex* OS::CreateMutex() {
- return new MacOSMutex();
-}
-
-
-class MacOSSemaphore : public Semaphore {
- public:
- explicit MacOSSemaphore(int count) {
- semaphore_create(mach_task_self(), &semaphore_, SYNC_POLICY_FIFO, count);
- }
-
- ~MacOSSemaphore() {
- semaphore_destroy(mach_task_self(), semaphore_);
- }
-
- // The MacOS mach semaphore documentation claims it does not have spurious
- // wakeups, the way pthreads semaphores do. So the code from the linux
- // platform is not needed here.
- void Wait() { semaphore_wait(semaphore_); }
-
- bool Wait(int timeout);
-
- void Signal() { semaphore_signal(semaphore_); }
-
- private:
- semaphore_t semaphore_;
-};
-
-
-bool MacOSSemaphore::Wait(int timeout) {
- mach_timespec_t ts;
- ts.tv_sec = timeout / 1000000;
- ts.tv_nsec = (timeout % 1000000) * 1000;
- return semaphore_timedwait(semaphore_, ts) != KERN_OPERATION_TIMED_OUT;
-}
-
-
-Semaphore* OS::CreateSemaphore(int count) {
- return new MacOSSemaphore(count);
-}
-
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
-
-class Sampler::PlatformData : public Malloced {
- public:
- PlatformData() : profiled_thread_(mach_thread_self()) {}
-
- ~PlatformData() {
- // Deallocate Mach port for thread.
- mach_port_deallocate(mach_task_self(), profiled_thread_);
- }
-
- thread_act_t profiled_thread() { return profiled_thread_; }
-
- private:
- // Note: for profiled_thread_ Mach primitives are used instead of PThread's
- // because the latter doesn't provide thread manipulation primitives required.
- // For details, consult "Mac OS X Internals" book, Section 7.3.
- thread_act_t profiled_thread_;
-};
-
-class SamplerThread : public Thread {
- public:
- explicit SamplerThread(int interval)
- : Thread(NULL, "SamplerThread"),
- interval_(interval) {}
-
- static void AddActiveSampler(Sampler* sampler) {
- ScopedLock lock(mutex_);
- SamplerRegistry::AddActiveSampler(sampler);
- if (instance_ == NULL) {
- instance_ = new SamplerThread(sampler->interval());
- instance_->Start();
- } else {
- ASSERT(instance_->interval_ == sampler->interval());
- }
- }
-
- static void RemoveActiveSampler(Sampler* sampler) {
- ScopedLock lock(mutex_);
- SamplerRegistry::RemoveActiveSampler(sampler);
- if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
- RuntimeProfiler::WakeUpRuntimeProfilerThreadBeforeShutdown();
- instance_->Join();
- delete instance_;
- instance_ = NULL;
- }
- }
-
- // Implement Thread::Run().
- virtual void Run() {
- SamplerRegistry::State state;
- while ((state = SamplerRegistry::GetState()) !=
- SamplerRegistry::HAS_NO_SAMPLERS) {
- bool cpu_profiling_enabled =
- (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS);
- bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled();
- // When CPU profiling is enabled both JavaScript and C++ code is
- // profiled. We must not suspend.
- if (!cpu_profiling_enabled) {
- if (rate_limiter_.SuspendIfNecessary()) continue;
- }
- if (cpu_profiling_enabled) {
- if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this)) {
- return;
- }
- }
- if (runtime_profiler_enabled) {
- if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile, NULL)) {
- return;
- }
- }
- OS::Sleep(interval_);
- }
- }
-
- static void DoCpuProfile(Sampler* sampler, void* raw_sampler_thread) {
- if (!sampler->isolate()->IsInitialized()) return;
- if (!sampler->IsProfiling()) return;
- SamplerThread* sampler_thread =
- reinterpret_cast<SamplerThread*>(raw_sampler_thread);
- sampler_thread->SampleContext(sampler);
- }
-
- static void DoRuntimeProfile(Sampler* sampler, void* ignored) {
- if (!sampler->isolate()->IsInitialized()) return;
- sampler->isolate()->runtime_profiler()->NotifyTick();
- }
-
- void SampleContext(Sampler* sampler) {
- thread_act_t profiled_thread = sampler->platform_data()->profiled_thread();
- TickSample sample_obj;
- TickSample* sample = CpuProfiler::TickSampleEvent(sampler->isolate());
- if (sample == NULL) sample = &sample_obj;
-
- if (KERN_SUCCESS != thread_suspend(profiled_thread)) return;
-
-#if V8_HOST_ARCH_X64
- thread_state_flavor_t flavor = x86_THREAD_STATE64;
- x86_thread_state64_t state;
- mach_msg_type_number_t count = x86_THREAD_STATE64_COUNT;
-#if __DARWIN_UNIX03
-#define REGISTER_FIELD(name) __r ## name
-#else
-#define REGISTER_FIELD(name) r ## name
-#endif // __DARWIN_UNIX03
-#elif V8_HOST_ARCH_IA32
- thread_state_flavor_t flavor = i386_THREAD_STATE;
- i386_thread_state_t state;
- mach_msg_type_number_t count = i386_THREAD_STATE_COUNT;
-#if __DARWIN_UNIX03
-#define REGISTER_FIELD(name) __e ## name
-#else
-#define REGISTER_FIELD(name) e ## name
-#endif // __DARWIN_UNIX03
-#else
-#error Unsupported Mac OS X host architecture.
-#endif // V8_HOST_ARCH
-
- if (thread_get_state(profiled_thread,
- flavor,
- reinterpret_cast<natural_t*>(&state),
- &count) == KERN_SUCCESS) {
- sample->state = sampler->isolate()->current_vm_state();
- sample->pc = reinterpret_cast<Address>(state.REGISTER_FIELD(ip));
- sample->sp = reinterpret_cast<Address>(state.REGISTER_FIELD(sp));
- sample->fp = reinterpret_cast<Address>(state.REGISTER_FIELD(bp));
- sampler->SampleStack(sample);
- sampler->Tick(sample);
- }
- thread_resume(profiled_thread);
- }
-
- const int interval_;
- RuntimeProfilerRateLimiter rate_limiter_;
-
- // Protects the process wide state below.
- static Mutex* mutex_;
- static SamplerThread* instance_;
-
- DISALLOW_COPY_AND_ASSIGN(SamplerThread);
-};
-
-#undef REGISTER_FIELD
-
-
-Mutex* SamplerThread::mutex_ = OS::CreateMutex();
-SamplerThread* SamplerThread::instance_ = NULL;
-
-
-Sampler::Sampler(Isolate* isolate, int interval)
- : isolate_(isolate),
- interval_(interval),
- profiling_(false),
- active_(false),
- samples_taken_(0) {
- data_ = new PlatformData;
-}
-
-
-Sampler::~Sampler() {
- ASSERT(!IsActive());
- delete data_;
-}
-
-
-void Sampler::Start() {
- ASSERT(!IsActive());
- SetActive(true);
- SamplerThread::AddActiveSampler(this);
-}
-
-
-void Sampler::Stop() {
- ASSERT(IsActive());
- SamplerThread::RemoveActiveSampler(this);
- SetActive(false);
-}
-
-#endif // ENABLE_LOGGING_AND_PROFILING
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/platform-nullos.cc b/src/3rdparty/v8/src/platform-nullos.cc
deleted file mode 100644
index 5409936..0000000
--- a/src/3rdparty/v8/src/platform-nullos.cc
+++ /dev/null
@@ -1,504 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Platform specific code for NULLOS goes here
-
-// Minimal include to get access to abort, fprintf and friends for bootstrapping
-// messages.
-#include <stdio.h>
-#include <stdlib.h>
-
-#include "v8.h"
-
-#include "platform.h"
-#include "vm-state-inl.h"
-
-
-namespace v8 {
-namespace internal {
-
-// Give V8 the opportunity to override the default ceil behaviour.
-double ceiling(double x) {
- UNIMPLEMENTED();
- return 0;
-}
-
-
-// Give V8 the opportunity to override the default fmod behavior.
-double modulo(double x, double y) {
- UNIMPLEMENTED();
- return 0;
-}
-
-
-// Initialize OS class early in the V8 startup.
-void OS::Setup() {
- // Seed the random number generator.
- UNIMPLEMENTED();
-}
-
-
-// Returns the accumulated user time for thread.
-int OS::GetUserTime(uint32_t* secs, uint32_t* usecs) {
- UNIMPLEMENTED();
- *secs = 0;
- *usecs = 0;
- return 0;
-}
-
-
-// Returns current time as the number of milliseconds since
-// 00:00:00 UTC, January 1, 1970.
-double OS::TimeCurrentMillis() {
- UNIMPLEMENTED();
- return 0;
-}
-
-
-// Returns ticks in microsecond resolution.
-int64_t OS::Ticks() {
- UNIMPLEMENTED();
- return 0;
-}
-
-
-// Returns a string identifying the current timezone taking into
-// account daylight saving.
-const char* OS::LocalTimezone(double time) {
- UNIMPLEMENTED();
- return "<none>";
-}
-
-
-// Returns the daylight savings offset in milliseconds for the given time.
-double OS::DaylightSavingsOffset(double time) {
- UNIMPLEMENTED();
- return 0;
-}
-
-
-int OS::GetLastError() {
- UNIMPLEMENTED();
- return 0;
-}
-
-
-// Returns the local time offset in milliseconds east of UTC without
-// taking daylight savings time into account.
-double OS::LocalTimeOffset() {
- UNIMPLEMENTED();
- return 0;
-}
-
-
-// Print (debug) message to console.
-void OS::Print(const char* format, ...) {
- UNIMPLEMENTED();
-}
-
-
-// Print (debug) message to console.
-void OS::VPrint(const char* format, va_list args) {
- // Minimalistic implementation for bootstrapping.
- vfprintf(stdout, format, args);
-}
-
-
-void OS::FPrint(FILE* out, const char* format, ...) {
- va_list args;
- va_start(args, format);
- VFPrint(out, format, args);
- va_end(args);
-}
-
-
-void OS::VFPrint(FILE* out, const char* format, va_list args) {
- vfprintf(out, format, args);
-}
-
-
-// Print error message to console.
-void OS::PrintError(const char* format, ...) {
- // Minimalistic implementation for bootstrapping.
- va_list args;
- va_start(args, format);
- VPrintError(format, args);
- va_end(args);
-}
-
-
-// Print error message to console.
-void OS::VPrintError(const char* format, va_list args) {
- // Minimalistic implementation for bootstrapping.
- vfprintf(stderr, format, args);
-}
-
-
-int OS::SNPrintF(char* str, size_t size, const char* format, ...) {
- UNIMPLEMENTED();
- return 0;
-}
-
-
-int OS::VSNPrintF(char* str, size_t size, const char* format, va_list args) {
- UNIMPLEMENTED();
- return 0;
-}
-
-
-uint64_t OS::CpuFeaturesImpliedByPlatform() {
- return 0;
-}
-
-
-double OS::nan_value() {
- UNIMPLEMENTED();
- return 0;
-}
-
-
-bool OS::ArmCpuHasFeature(CpuFeature feature) {
- UNIMPLEMENTED();
-}
-
-
-bool OS::IsOutsideAllocatedSpace(void* address) {
- UNIMPLEMENTED();
- return false;
-}
-
-
-size_t OS::AllocateAlignment() {
- UNIMPLEMENTED();
- return 0;
-}
-
-
-void* OS::Allocate(const size_t requested,
- size_t* allocated,
- bool executable) {
- UNIMPLEMENTED();
- return NULL;
-}
-
-
-void OS::Free(void* buf, const size_t length) {
- // TODO(1240712): potential system call return value which is ignored here.
- UNIMPLEMENTED();
-}
-
-
-#ifdef ENABLE_HEAP_PROTECTION
-
-void OS::Protect(void* address, size_t size) {
- UNIMPLEMENTED();
-}
-
-
-void OS::Unprotect(void* address, size_t size, bool is_executable) {
- UNIMPLEMENTED();
-}
-
-#endif
-
-
-void OS::Sleep(int milliseconds) {
- UNIMPLEMENTED();
-}
-
-
-void OS::Abort() {
- // Minimalistic implementation for bootstrapping.
- abort();
-}
-
-
-void OS::DebugBreak() {
- UNIMPLEMENTED();
-}
-
-
-OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
- UNIMPLEMENTED();
- return NULL;
-}
-
-
-OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
- void* initial) {
- UNIMPLEMENTED();
- return NULL;
-}
-
-
-void OS::LogSharedLibraryAddresses() {
- UNIMPLEMENTED();
-}
-
-
-void OS::SignalCodeMovingGC() {
- UNIMPLEMENTED();
-}
-
-
-int OS::StackWalk(Vector<OS::StackFrame> frames) {
- UNIMPLEMENTED();
- return 0;
-}
-
-
-VirtualMemory::VirtualMemory(size_t size, void* address_hint) {
- UNIMPLEMENTED();
-}
-
-
-VirtualMemory::~VirtualMemory() {
- UNIMPLEMENTED();
-}
-
-
-bool VirtualMemory::IsReserved() {
- UNIMPLEMENTED();
- return false;
-}
-
-
-bool VirtualMemory::Commit(void* address, size_t size, bool executable) {
- UNIMPLEMENTED();
- return false;
-}
-
-
-bool VirtualMemory::Uncommit(void* address, size_t size) {
- UNIMPLEMENTED();
- return false;
-}
-
-
-class ThreadHandle::PlatformData : public Malloced {
- public:
- explicit PlatformData(ThreadHandle::Kind kind) {
- UNIMPLEMENTED();
- }
-
- void* pd_data_;
-};
-
-
-ThreadHandle::ThreadHandle(Kind kind) {
- UNIMPLEMENTED();
- // Shared setup follows.
- data_ = new PlatformData(kind);
-}
-
-
-void ThreadHandle::Initialize(ThreadHandle::Kind kind) {
- UNIMPLEMENTED();
-}
-
-
-ThreadHandle::~ThreadHandle() {
- UNIMPLEMENTED();
- // Shared tear down follows.
- delete data_;
-}
-
-
-bool ThreadHandle::IsSelf() const {
- UNIMPLEMENTED();
- return false;
-}
-
-
-bool ThreadHandle::IsValid() const {
- UNIMPLEMENTED();
- return false;
-}
-
-
-Thread::Thread(Isolate* isolate, const Options& options)
- : ThreadHandle(ThreadHandle::INVALID),
- isolate_(isolate),
- stack_size_(options.stack_size) {
- set_name(options.name);
- UNIMPLEMENTED();
-}
-
-
-Thread::Thread(Isolate* isolate, const char* name)
- : ThreadHandle(ThreadHandle::INVALID),
- isolate_(isolate),
- stack_size_(0) {
- set_name(name);
- UNIMPLEMENTED();
-}
-
-
-Thread::~Thread() {
- UNIMPLEMENTED();
-}
-
-
-void Thread::set_name(const char* name) {
- strncpy(name_, name, sizeof(name_));
- name_[sizeof(name_) - 1] = '\0';
-}
-
-
-void Thread::Start() {
- UNIMPLEMENTED();
-}
-
-
-void Thread::Join() {
- UNIMPLEMENTED();
-}
-
-
-Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
- UNIMPLEMENTED();
- return static_cast<LocalStorageKey>(0);
-}
-
-
-void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
- UNIMPLEMENTED();
-}
-
-
-void* Thread::GetThreadLocal(LocalStorageKey key) {
- UNIMPLEMENTED();
- return NULL;
-}
-
-
-void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
- UNIMPLEMENTED();
-}
-
-
-void Thread::YieldCPU() {
- UNIMPLEMENTED();
-}
-
-
-class NullMutex : public Mutex {
- public:
- NullMutex() : data_(NULL) {
- UNIMPLEMENTED();
- }
-
- virtual ~NullMutex() {
- UNIMPLEMENTED();
- }
-
- virtual int Lock() {
- UNIMPLEMENTED();
- return 0;
- }
-
- virtual int Unlock() {
- UNIMPLEMENTED();
- return 0;
- }
-
- private:
- void* data_;
-};
-
-
-Mutex* OS::CreateMutex() {
- UNIMPLEMENTED();
- return new NullMutex();
-}
-
-
-class NullSemaphore : public Semaphore {
- public:
- explicit NullSemaphore(int count) : data_(NULL) {
- UNIMPLEMENTED();
- }
-
- virtual ~NullSemaphore() {
- UNIMPLEMENTED();
- }
-
- virtual void Wait() {
- UNIMPLEMENTED();
- }
-
- virtual void Signal() {
- UNIMPLEMENTED();
- }
- private:
- void* data_;
-};
-
-
-Semaphore* OS::CreateSemaphore(int count) {
- UNIMPLEMENTED();
- return new NullSemaphore(count);
-}
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
-
-class ProfileSampler::PlatformData : public Malloced {
- public:
- PlatformData() {
- UNIMPLEMENTED();
- }
-};
-
-
-ProfileSampler::ProfileSampler(int interval) {
- UNIMPLEMENTED();
- // Shared setup follows.
- data_ = new PlatformData();
- interval_ = interval;
- active_ = false;
-}
-
-
-ProfileSampler::~ProfileSampler() {
- UNIMPLEMENTED();
- // Shared tear down follows.
- delete data_;
-}
-
-
-void ProfileSampler::Start() {
- UNIMPLEMENTED();
-}
-
-
-void ProfileSampler::Stop() {
- UNIMPLEMENTED();
-}
-
-#endif // ENABLE_LOGGING_AND_PROFILING
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/platform-openbsd.cc b/src/3rdparty/v8/src/platform-openbsd.cc
deleted file mode 100644
index fe1a62a..0000000
--- a/src/3rdparty/v8/src/platform-openbsd.cc
+++ /dev/null
@@ -1,672 +0,0 @@
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Platform specific code for OpenBSD goes here. For the POSIX comaptible parts
-// the implementation is in platform-posix.cc.
-
-#include <pthread.h>
-#include <semaphore.h>
-#include <signal.h>
-#include <sys/time.h>
-#include <sys/resource.h>
-#include <sys/types.h>
-#include <stdlib.h>
-
-#include <sys/types.h> // mmap & munmap
-#include <sys/mman.h> // mmap & munmap
-#include <sys/stat.h> // open
-#include <sys/fcntl.h> // open
-#include <unistd.h> // getpagesize
-#include <execinfo.h> // backtrace, backtrace_symbols
-#include <strings.h> // index
-#include <errno.h>
-#include <stdarg.h>
-#include <limits.h>
-
-#undef MAP_TYPE
-
-#include "v8.h"
-
-#include "platform.h"
-#include "vm-state-inl.h"
-
-
-namespace v8 {
-namespace internal {
-
-// 0 is never a valid thread id on OpenBSD since tids and pids share a
-// name space and pid 0 is used to kill the group (see man 2 kill).
-static const pthread_t kNoThread = (pthread_t) 0;
-
-
-double ceiling(double x) {
- // Correct as on OS X
- if (-1.0 < x && x < 0.0) {
- return -0.0;
- } else {
- return ceil(x);
- }
-}
-
-
-void OS::Setup() {
- // Seed the random number generator.
- // Convert the current time to a 64-bit integer first, before converting it
- // to an unsigned. Going directly can cause an overflow and the seed to be
- // set to all ones. The seed will be identical for different instances that
- // call this setup code within the same millisecond.
- uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
- srandom(static_cast<unsigned int>(seed));
-}
-
-
-void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
- __asm__ __volatile__("" : : : "memory");
- *ptr = value;
-}
-
-
-uint64_t OS::CpuFeaturesImpliedByPlatform() {
- return 0; // OpenBSD runs on anything.
-}
-
-
-int OS::ActivationFrameAlignment() {
- // 16 byte alignment on OpenBSD
- return 16;
-}
-
-
-const char* OS::LocalTimezone(double time) {
- if (isnan(time)) return "";
- time_t tv = static_cast<time_t>(floor(time/msPerSecond));
- struct tm* t = localtime(&tv);
- if (NULL == t) return "";
- return t->tm_zone;
-}
-
-
-double OS::LocalTimeOffset() {
- time_t tv = time(NULL);
- struct tm* t = localtime(&tv);
- // tm_gmtoff includes any daylight savings offset, so subtract it.
- return static_cast<double>(t->tm_gmtoff * msPerSecond -
- (t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
-}
-
-
-// We keep the lowest and highest addresses mapped as a quick way of
-// determining that pointers are outside the heap (used mostly in assertions
-// and verification). The estimate is conservative, ie, not all addresses in
-// 'allocated' space are actually allocated to our heap. The range is
-// [lowest, highest), inclusive on the low and and exclusive on the high end.
-static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
-static void* highest_ever_allocated = reinterpret_cast<void*>(0);
-
-
-static void UpdateAllocatedSpaceLimits(void* address, int size) {
- lowest_ever_allocated = Min(lowest_ever_allocated, address);
- highest_ever_allocated =
- Max(highest_ever_allocated,
- reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
-}
-
-
-bool OS::IsOutsideAllocatedSpace(void* address) {
- return address < lowest_ever_allocated || address >= highest_ever_allocated;
-}
-
-
-size_t OS::AllocateAlignment() {
- return getpagesize();
-}
-
-
-void* OS::Allocate(const size_t requested,
- size_t* allocated,
- bool executable) {
- const size_t msize = RoundUp(requested, getpagesize());
- int prot = PROT_READ | PROT_WRITE | (executable ? PROT_EXEC : 0);
- void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
-
- if (mbase == MAP_FAILED) {
- LOG(ISOLATE, StringEvent("OS::Allocate", "mmap failed"));
- return NULL;
- }
- *allocated = msize;
- UpdateAllocatedSpaceLimits(mbase, msize);
- return mbase;
-}
-
-
-void OS::Free(void* buf, const size_t length) {
- int result = munmap(buf, length);
- USE(result);
- ASSERT(result == 0);
-}
-
-
-#ifdef ENABLE_HEAP_PROTECTION
-
-void OS::Protect(void* address, size_t size) {
- UNIMPLEMENTED();
-}
-
-
-void OS::Unprotect(void* address, size_t size, bool is_executable) {
- UNIMPLEMENTED();
-}
-
-#endif
-
-
-void OS::Sleep(int milliseconds) {
- unsigned int ms = static_cast<unsigned int>(milliseconds);
- usleep(1000 * ms);
-}
-
-
-void OS::Abort() {
- // Redirect to std abort to signal abnormal program termination.
- abort();
-}
-
-
-void OS::DebugBreak() {
-#if (defined(__arm__) || defined(__thumb__))
-# if defined(CAN_USE_ARMV5_INSTRUCTIONS)
- asm("bkpt 0");
-# endif
-#else
- asm("int $3");
-#endif
-}
-
-
-class PosixMemoryMappedFile : public OS::MemoryMappedFile {
- public:
- PosixMemoryMappedFile(FILE* file, void* memory, int size)
- : file_(file), memory_(memory), size_(size) { }
- virtual ~PosixMemoryMappedFile();
- virtual void* memory() { return memory_; }
- virtual int size() { return size_; }
- private:
- FILE* file_;
- void* memory_;
- int size_;
-};
-
-
-OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
- FILE* file = fopen(name, "r+");
- if (file == NULL) return NULL;
-
- fseek(file, 0, SEEK_END);
- int size = ftell(file);
-
- void* memory =
- mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
- return new PosixMemoryMappedFile(file, memory, size);
-}
-
-
-OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
- void* initial) {
- FILE* file = fopen(name, "w+");
- if (file == NULL) return NULL;
- int result = fwrite(initial, size, 1, file);
- if (result < 1) {
- fclose(file);
- return NULL;
- }
- void* memory =
- mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
- return new PosixMemoryMappedFile(file, memory, size);
-}
-
-
-PosixMemoryMappedFile::~PosixMemoryMappedFile() {
- if (memory_) munmap(memory_, size_);
- fclose(file_);
-}
-
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
-static unsigned StringToLong(char* buffer) {
- return static_cast<unsigned>(strtol(buffer, NULL, 16)); // NOLINT
-}
-#endif
-
-
-void OS::LogSharedLibraryAddresses() {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- static const int MAP_LENGTH = 1024;
- int fd = open("/proc/self/maps", O_RDONLY);
- if (fd < 0) return;
- while (true) {
- char addr_buffer[11];
- addr_buffer[0] = '0';
- addr_buffer[1] = 'x';
- addr_buffer[10] = 0;
- int result = read(fd, addr_buffer + 2, 8);
- if (result < 8) break;
- unsigned start = StringToLong(addr_buffer);
- result = read(fd, addr_buffer + 2, 1);
- if (result < 1) break;
- if (addr_buffer[2] != '-') break;
- result = read(fd, addr_buffer + 2, 8);
- if (result < 8) break;
- unsigned end = StringToLong(addr_buffer);
- char buffer[MAP_LENGTH];
- int bytes_read = -1;
- do {
- bytes_read++;
- if (bytes_read >= MAP_LENGTH - 1)
- break;
- result = read(fd, buffer + bytes_read, 1);
- if (result < 1) break;
- } while (buffer[bytes_read] != '\n');
- buffer[bytes_read] = 0;
- // Ignore mappings that are not executable.
- if (buffer[3] != 'x') continue;
- char* start_of_path = index(buffer, '/');
- // There may be no filename in this line. Skip to next.
- if (start_of_path == NULL) continue;
- buffer[bytes_read] = 0;
- LOG(SharedLibraryEvent(start_of_path, start, end));
- }
- close(fd);
-#endif
-}
-
-
-void OS::SignalCodeMovingGC() {
-}
-
-
-int OS::StackWalk(Vector<OS::StackFrame> frames) {
- UNIMPLEMENTED();
- return 1;
-}
-
-
-// Constants used for mmap.
-static const int kMmapFd = -1;
-static const int kMmapFdOffset = 0;
-
-
-VirtualMemory::VirtualMemory(size_t size) {
- address_ = mmap(NULL, size, PROT_NONE,
- MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
- kMmapFd, kMmapFdOffset);
- size_ = size;
-}
-
-
-VirtualMemory::~VirtualMemory() {
- if (IsReserved()) {
- if (0 == munmap(address(), size())) address_ = MAP_FAILED;
- }
-}
-
-
-bool VirtualMemory::IsReserved() {
- return address_ != MAP_FAILED;
-}
-
-
-bool VirtualMemory::Commit(void* address, size_t size, bool executable) {
- int prot = PROT_READ | PROT_WRITE | (executable ? PROT_EXEC : 0);
- if (MAP_FAILED == mmap(address, size, prot,
- MAP_PRIVATE | MAP_ANON | MAP_FIXED,
- kMmapFd, kMmapFdOffset)) {
- return false;
- }
-
- UpdateAllocatedSpaceLimits(address, size);
- return true;
-}
-
-
-bool VirtualMemory::Uncommit(void* address, size_t size) {
- return mmap(address, size, PROT_NONE,
- MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
- kMmapFd, kMmapFdOffset) != MAP_FAILED;
-}
-
-
-class ThreadHandle::PlatformData : public Malloced {
- public:
- explicit PlatformData(ThreadHandle::Kind kind) {
- Initialize(kind);
- }
-
- void Initialize(ThreadHandle::Kind kind) {
- switch (kind) {
- case ThreadHandle::SELF: thread_ = pthread_self(); break;
- case ThreadHandle::INVALID: thread_ = kNoThread; break;
- }
- }
- pthread_t thread_; // Thread handle for pthread.
-};
-
-
-ThreadHandle::ThreadHandle(Kind kind) {
- data_ = new PlatformData(kind);
-}
-
-
-void ThreadHandle::Initialize(ThreadHandle::Kind kind) {
- data_->Initialize(kind);
-}
-
-
-ThreadHandle::~ThreadHandle() {
- delete data_;
-}
-
-
-bool ThreadHandle::IsSelf() const {
- return pthread_equal(data_->thread_, pthread_self());
-}
-
-
-bool ThreadHandle::IsValid() const {
- return data_->thread_ != kNoThread;
-}
-
-
-Thread::Thread(Isolate* isolate, const Options& options)
- : ThreadHandle(ThreadHandle::INVALID),
- isolate_(isolate),
- stack_size_(options.stack_size) {
- set_name(options.name);
-}
-
-
-Thread::Thread(Isolate* isolate, const char* name)
- : ThreadHandle(ThreadHandle::INVALID),
- isolate_(isolate),
- stack_size_(0) {
- set_name(name);
-}
-
-
-Thread::~Thread() {
-}
-
-
-static void* ThreadEntry(void* arg) {
- Thread* thread = reinterpret_cast<Thread*>(arg);
- // This is also initialized by the first argument to pthread_create() but we
- // don't know which thread will run first (the original thread or the new
- // one) so we initialize it here too.
- thread->thread_handle_data()->thread_ = pthread_self();
- ASSERT(thread->IsValid());
- Thread::SetThreadLocal(Isolate::isolate_key(), thread->isolate());
- thread->Run();
- return NULL;
-}
-
-
-void Thread::set_name(const char* name) {
- strncpy(name_, name, sizeof(name_));
- name_[sizeof(name_) - 1] = '\0';
-}
-
-
-void Thread::Start() {
- pthread_attr_t* attr_ptr = NULL;
- pthread_attr_t attr;
- if (stack_size_ > 0) {
- pthread_attr_init(&attr);
- pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_));
- attr_ptr = &attr;
- }
- pthread_create(&thread_handle_data()->thread_, attr_ptr, ThreadEntry, this);
- ASSERT(IsValid());
-}
-
-
-void Thread::Join() {
- pthread_join(thread_handle_data()->thread_, NULL);
-}
-
-
-Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
- pthread_key_t key;
- int result = pthread_key_create(&key, NULL);
- USE(result);
- ASSERT(result == 0);
- return static_cast<LocalStorageKey>(key);
-}
-
-
-void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
- pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
- int result = pthread_key_delete(pthread_key);
- USE(result);
- ASSERT(result == 0);
-}
-
-
-void* Thread::GetThreadLocal(LocalStorageKey key) {
- pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
- return pthread_getspecific(pthread_key);
-}
-
-
-void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
- pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
- pthread_setspecific(pthread_key, value);
-}
-
-
-void Thread::YieldCPU() {
- sched_yield();
-}
-
-
-class OpenBSDMutex : public Mutex {
- public:
-
- OpenBSDMutex() {
- pthread_mutexattr_t attrs;
- int result = pthread_mutexattr_init(&attrs);
- ASSERT(result == 0);
- result = pthread_mutexattr_settype(&attrs, PTHREAD_MUTEX_RECURSIVE);
- ASSERT(result == 0);
- result = pthread_mutex_init(&mutex_, &attrs);
- ASSERT(result == 0);
- }
-
- virtual ~OpenBSDMutex() { pthread_mutex_destroy(&mutex_); }
-
- virtual int Lock() {
- int result = pthread_mutex_lock(&mutex_);
- return result;
- }
-
- virtual int Unlock() {
- int result = pthread_mutex_unlock(&mutex_);
- return result;
- }
-
- private:
- pthread_mutex_t mutex_; // Pthread mutex for POSIX platforms.
-};
-
-
-Mutex* OS::CreateMutex() {
- return new OpenBSDMutex();
-}
-
-
-class OpenBSDSemaphore : public Semaphore {
- public:
- explicit OpenBSDSemaphore(int count) { sem_init(&sem_, 0, count); }
- virtual ~OpenBSDSemaphore() { sem_destroy(&sem_); }
-
- virtual void Wait();
- virtual bool Wait(int timeout);
- virtual void Signal() { sem_post(&sem_); }
- private:
- sem_t sem_;
-};
-
-
-void OpenBSDSemaphore::Wait() {
- while (true) {
- int result = sem_wait(&sem_);
- if (result == 0) return; // Successfully got semaphore.
- CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
- }
-}
-
-
-bool OpenBSDSemaphore::Wait(int timeout) {
- const long kOneSecondMicros = 1000000; // NOLINT
-
- // Split timeout into second and nanosecond parts.
- struct timeval delta;
- delta.tv_usec = timeout % kOneSecondMicros;
- delta.tv_sec = timeout / kOneSecondMicros;
-
- struct timeval current_time;
- // Get the current time.
- if (gettimeofday(&current_time, NULL) == -1) {
- return false;
- }
-
- // Calculate time for end of timeout.
- struct timeval end_time;
- timeradd(&current_time, &delta, &end_time);
-
- struct timespec ts;
- TIMEVAL_TO_TIMESPEC(&end_time, &ts);
- while (true) {
- int result = sem_trywait(&sem_);
- if (result == 0) return true; // Successfully got semaphore.
- if (result == -1 && errno == ETIMEDOUT) return false; // Timeout.
- CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
- }
-}
-
-
-Semaphore* OS::CreateSemaphore(int count) {
- return new OpenBSDSemaphore(count);
-}
-
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
-
-static Sampler* active_sampler_ = NULL;
-
-static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
- USE(info);
- if (signal != SIGPROF) return;
- if (active_sampler_ == NULL) return;
-
- TickSample sample;
-
- // We always sample the VM state.
- sample.state = VMState::current_state();
-
- active_sampler_->Tick(&sample);
-}
-
-
-class Sampler::PlatformData : public Malloced {
- public:
- PlatformData() {
- signal_handler_installed_ = false;
- }
-
- bool signal_handler_installed_;
- struct sigaction old_signal_handler_;
- struct itimerval old_timer_value_;
-};
-
-
-Sampler::Sampler(Isolate* isolate, int interval)
- : isolate_(isolate),
- interval_(interval),
- profiling_(false),
- active_(false),
- samples_taken_(0) {
- data_ = new PlatformData();
-}
-
-
-Sampler::~Sampler() {
- delete data_;
-}
-
-
-void Sampler::Start() {
- // There can only be one active sampler at the time on POSIX
- // platforms.
- if (active_sampler_ != NULL) return;
-
- // Request profiling signals.
- struct sigaction sa;
- sa.sa_sigaction = ProfilerSignalHandler;
- sigemptyset(&sa.sa_mask);
- sa.sa_flags = SA_SIGINFO;
- if (sigaction(SIGPROF, &sa, &data_->old_signal_handler_) != 0) return;
- data_->signal_handler_installed_ = true;
-
- // Set the itimer to generate a tick for each interval.
- itimerval itimer;
- itimer.it_interval.tv_sec = interval_ / 1000;
- itimer.it_interval.tv_usec = (interval_ % 1000) * 1000;
- itimer.it_value.tv_sec = itimer.it_interval.tv_sec;
- itimer.it_value.tv_usec = itimer.it_interval.tv_usec;
- setitimer(ITIMER_PROF, &itimer, &data_->old_timer_value_);
-
- // Set this sampler as the active sampler.
- active_sampler_ = this;
- active_ = true;
-}
-
-
-void Sampler::Stop() {
- // Restore old signal handler
- if (data_->signal_handler_installed_) {
- setitimer(ITIMER_PROF, &data_->old_timer_value_, NULL);
- sigaction(SIGPROF, &data_->old_signal_handler_, 0);
- data_->signal_handler_installed_ = false;
- }
-
- // This sampler is no longer the active sampler.
- active_sampler_ = NULL;
- active_ = false;
-}
-
-#endif // ENABLE_LOGGING_AND_PROFILING
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/platform-posix.cc b/src/3rdparty/v8/src/platform-posix.cc
deleted file mode 100644
index c4b0fb8..0000000
--- a/src/3rdparty/v8/src/platform-posix.cc
+++ /dev/null
@@ -1,424 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Platform specific code for POSIX goes here. This is not a platform on its
-// own but contains the parts which are the same across POSIX platforms Linux,
-// Mac OS, FreeBSD and OpenBSD.
-
-#include <unistd.h>
-#include <errno.h>
-#include <time.h>
-
-#include <sys/socket.h>
-#include <sys/resource.h>
-#include <sys/time.h>
-#include <sys/types.h>
-
-#include <arpa/inet.h>
-#include <netinet/in.h>
-#include <netdb.h>
-
-#if defined(ANDROID)
-#define LOG_TAG "v8"
-#include <utils/Log.h> // LOG_PRI_VA
-#endif
-
-#include "v8.h"
-
-#include "platform.h"
-
-namespace v8 {
-namespace internal {
-
-// ----------------------------------------------------------------------------
-// Math functions
-
-double modulo(double x, double y) {
- return fmod(x, y);
-}
-
-
-double OS::nan_value() {
- // NAN from math.h is defined in C99 and not in POSIX.
- return NAN;
-}
-
-
-// ----------------------------------------------------------------------------
-// POSIX date/time support.
-//
-
-int OS::GetUserTime(uint32_t* secs, uint32_t* usecs) {
- struct rusage usage;
-
- if (getrusage(RUSAGE_SELF, &usage) < 0) return -1;
- *secs = usage.ru_utime.tv_sec;
- *usecs = usage.ru_utime.tv_usec;
- return 0;
-}
-
-
-double OS::TimeCurrentMillis() {
- struct timeval tv;
- if (gettimeofday(&tv, NULL) < 0) return 0.0;
- return (static_cast<double>(tv.tv_sec) * 1000) +
- (static_cast<double>(tv.tv_usec) / 1000);
-}
-
-
-int64_t OS::Ticks() {
- // gettimeofday has microsecond resolution.
- struct timeval tv;
- if (gettimeofday(&tv, NULL) < 0)
- return 0;
- return (static_cast<int64_t>(tv.tv_sec) * 1000000) + tv.tv_usec;
-}
-
-
-double OS::DaylightSavingsOffset(double time) {
- if (isnan(time)) return nan_value();
- time_t tv = static_cast<time_t>(floor(time/msPerSecond));
- struct tm* t = localtime(&tv);
- if (NULL == t) return nan_value();
- return t->tm_isdst > 0 ? 3600 * msPerSecond : 0;
-}
-
-
-int OS::GetLastError() {
- return errno;
-}
-
-
-// ----------------------------------------------------------------------------
-// POSIX stdio support.
-//
-
-FILE* OS::FOpen(const char* path, const char* mode) {
- return fopen(path, mode);
-}
-
-
-bool OS::Remove(const char* path) {
- return (remove(path) == 0);
-}
-
-
-const char* const OS::LogFileOpenMode = "w";
-
-
-void OS::Print(const char* format, ...) {
- va_list args;
- va_start(args, format);
- VPrint(format, args);
- va_end(args);
-}
-
-
-void OS::VPrint(const char* format, va_list args) {
-#if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
- LOG_PRI_VA(ANDROID_LOG_INFO, LOG_TAG, format, args);
-#else
- vprintf(format, args);
-#endif
-}
-
-
-void OS::FPrint(FILE* out, const char* format, ...) {
- va_list args;
- va_start(args, format);
- VFPrint(out, format, args);
- va_end(args);
-}
-
-
-void OS::VFPrint(FILE* out, const char* format, va_list args) {
-#if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
- LOG_PRI_VA(ANDROID_LOG_INFO, LOG_TAG, format, args);
-#else
- vfprintf(out, format, args);
-#endif
-}
-
-
-void OS::PrintError(const char* format, ...) {
- va_list args;
- va_start(args, format);
- VPrintError(format, args);
- va_end(args);
-}
-
-
-void OS::VPrintError(const char* format, va_list args) {
-#if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
- LOG_PRI_VA(ANDROID_LOG_ERROR, LOG_TAG, format, args);
-#else
- vfprintf(stderr, format, args);
-#endif
-}
-
-
-int OS::SNPrintF(Vector<char> str, const char* format, ...) {
- va_list args;
- va_start(args, format);
- int result = VSNPrintF(str, format, args);
- va_end(args);
- return result;
-}
-
-
-int OS::VSNPrintF(Vector<char> str,
- const char* format,
- va_list args) {
- int n = vsnprintf(str.start(), str.length(), format, args);
- if (n < 0 || n >= str.length()) {
- // If the length is zero, the assignment fails.
- if (str.length() > 0)
- str[str.length() - 1] = '\0';
- return -1;
- } else {
- return n;
- }
-}
-
-
-#if defined(V8_TARGET_ARCH_IA32)
-static OS::MemCopyFunction memcopy_function = NULL;
-static Mutex* memcopy_function_mutex = OS::CreateMutex();
-// Defined in codegen-ia32.cc.
-OS::MemCopyFunction CreateMemCopyFunction();
-
-// Copy memory area to disjoint memory area.
-void OS::MemCopy(void* dest, const void* src, size_t size) {
- if (memcopy_function == NULL) {
- ScopedLock lock(memcopy_function_mutex);
- if (memcopy_function == NULL) {
- OS::MemCopyFunction temp = CreateMemCopyFunction();
- MemoryBarrier();
- memcopy_function = temp;
- }
- }
- // Note: here we rely on dependent reads being ordered. This is true
- // on all architectures we currently support.
- (*memcopy_function)(dest, src, size);
-#ifdef DEBUG
- CHECK_EQ(0, memcmp(dest, src, size));
-#endif
-}
-#endif // V8_TARGET_ARCH_IA32
-
-// ----------------------------------------------------------------------------
-// POSIX string support.
-//
-
-char* OS::StrChr(char* str, int c) {
- return strchr(str, c);
-}
-
-
-void OS::StrNCpy(Vector<char> dest, const char* src, size_t n) {
- strncpy(dest.start(), src, n);
-}
-
-
-// ----------------------------------------------------------------------------
-// POSIX socket support.
-//
-
-class POSIXSocket : public Socket {
- public:
- explicit POSIXSocket() {
- // Create the socket.
- socket_ = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
- if (IsValid()) {
- // Allow rapid reuse.
- static const int kOn = 1;
- int ret = setsockopt(socket_, SOL_SOCKET, SO_REUSEADDR,
- &kOn, sizeof(kOn));
- ASSERT(ret == 0);
- USE(ret);
- }
- }
- explicit POSIXSocket(int socket): socket_(socket) { }
- virtual ~POSIXSocket() { Shutdown(); }
-
- // Server initialization.
- bool Bind(const int port);
- bool Listen(int backlog) const;
- Socket* Accept() const;
-
- // Client initialization.
- bool Connect(const char* host, const char* port);
-
- // Shutdown socket for both read and write.
- bool Shutdown();
-
- // Data Transimission
- int Send(const char* data, int len) const;
- int Receive(char* data, int len) const;
-
- bool SetReuseAddress(bool reuse_address);
-
- bool IsValid() const { return socket_ != -1; }
-
- private:
- int socket_;
-};
-
-
-bool POSIXSocket::Bind(const int port) {
- if (!IsValid()) {
- return false;
- }
-
- sockaddr_in addr;
- memset(&addr, 0, sizeof(addr));
- addr.sin_family = AF_INET;
- addr.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
- addr.sin_port = htons(port);
- int status = bind(socket_,
- BitCast<struct sockaddr *>(&addr),
- sizeof(addr));
- return status == 0;
-}
-
-
-bool POSIXSocket::Listen(int backlog) const {
- if (!IsValid()) {
- return false;
- }
-
- int status = listen(socket_, backlog);
- return status == 0;
-}
-
-
-Socket* POSIXSocket::Accept() const {
- if (!IsValid()) {
- return NULL;
- }
-
- int socket = accept(socket_, NULL, NULL);
- if (socket == -1) {
- return NULL;
- } else {
- return new POSIXSocket(socket);
- }
-}
-
-
-bool POSIXSocket::Connect(const char* host, const char* port) {
- if (!IsValid()) {
- return false;
- }
-
- // Lookup host and port.
- struct addrinfo *result = NULL;
- struct addrinfo hints;
- memset(&hints, 0, sizeof(addrinfo));
- hints.ai_family = AF_INET;
- hints.ai_socktype = SOCK_STREAM;
- hints.ai_protocol = IPPROTO_TCP;
- int status = getaddrinfo(host, port, &hints, &result);
- if (status != 0) {
- return false;
- }
-
- // Connect.
- status = connect(socket_, result->ai_addr, result->ai_addrlen);
- freeaddrinfo(result);
- return status == 0;
-}
-
-
-bool POSIXSocket::Shutdown() {
- if (IsValid()) {
- // Shutdown socket for both read and write.
- int status = shutdown(socket_, SHUT_RDWR);
- close(socket_);
- socket_ = -1;
- return status == 0;
- }
- return true;
-}
-
-
-int POSIXSocket::Send(const char* data, int len) const {
- int status = send(socket_, data, len, 0);
- return status;
-}
-
-
-int POSIXSocket::Receive(char* data, int len) const {
- int status = recv(socket_, data, len, 0);
- return status;
-}
-
-
-bool POSIXSocket::SetReuseAddress(bool reuse_address) {
- int on = reuse_address ? 1 : 0;
- int status = setsockopt(socket_, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on));
- return status == 0;
-}
-
-
-bool Socket::Setup() {
- // Nothing to do on POSIX.
- return true;
-}
-
-
-int Socket::LastError() {
- return errno;
-}
-
-
-uint16_t Socket::HToN(uint16_t value) {
- return htons(value);
-}
-
-
-uint16_t Socket::NToH(uint16_t value) {
- return ntohs(value);
-}
-
-
-uint32_t Socket::HToN(uint32_t value) {
- return htonl(value);
-}
-
-
-uint32_t Socket::NToH(uint32_t value) {
- return ntohl(value);
-}
-
-
-Socket* OS::CreateSocket() {
- return new POSIXSocket();
-}
-
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/platform-solaris.cc b/src/3rdparty/v8/src/platform-solaris.cc
deleted file mode 100644
index da278f3..0000000
--- a/src/3rdparty/v8/src/platform-solaris.cc
+++ /dev/null
@@ -1,796 +0,0 @@
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Platform specific code for Solaris 10 goes here. For the POSIX comaptible
-// parts the implementation is in platform-posix.cc.
-
-#ifdef __sparc
-# error "V8 does not support the SPARC CPU architecture."
-#endif
-
-#include <sys/stack.h> // for stack alignment
-#include <unistd.h> // getpagesize(), usleep()
-#include <sys/mman.h> // mmap()
-#include <ucontext.h> // walkstack(), getcontext()
-#include <dlfcn.h> // dladdr
-#include <pthread.h>
-#include <sched.h> // for sched_yield
-#include <semaphore.h>
-#include <time.h>
-#include <sys/time.h> // gettimeofday(), timeradd()
-#include <errno.h>
-#include <ieeefp.h> // finite()
-#include <signal.h> // sigemptyset(), etc
-#include <sys/regset.h>
-
-
-#undef MAP_TYPE
-
-#include "v8.h"
-
-#include "platform.h"
-#include "vm-state-inl.h"
-
-
-// It seems there is a bug in some Solaris distributions (experienced in
-// SunOS 5.10 Generic_141445-09) which make it difficult or impossible to
-// access signbit() despite the availability of other C99 math functions.
-#ifndef signbit
-// Test sign - usually defined in math.h
-int signbit(double x) {
- // We need to take care of the special case of both positive and negative
- // versions of zero.
- if (x == 0) {
- return fpclass(x) & FP_NZERO;
- } else {
- // This won't detect negative NaN but that should be okay since we don't
- // assume that behavior.
- return x < 0;
- }
-}
-#endif // signbit
-
-namespace v8 {
-namespace internal {
-
-
-// 0 is never a valid thread id on Solaris since the main thread is 1 and
-// subsequent have their ids incremented from there
-static const pthread_t kNoThread = (pthread_t) 0;
-
-
-double ceiling(double x) {
- return ceil(x);
-}
-
-
-void OS::Setup() {
- // Seed the random number generator.
- // Convert the current time to a 64-bit integer first, before converting it
- // to an unsigned. Going directly will cause an overflow and the seed to be
- // set to all ones. The seed will be identical for different instances that
- // call this setup code within the same millisecond.
- uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
- srandom(static_cast<unsigned int>(seed));
-}
-
-
-uint64_t OS::CpuFeaturesImpliedByPlatform() {
- return 0; // Solaris runs on a lot of things.
-}
-
-
-int OS::ActivationFrameAlignment() {
- return STACK_ALIGN;
-}
-
-
-void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
- __asm__ __volatile__("" : : : "memory");
- *ptr = value;
-}
-
-
-const char* OS::LocalTimezone(double time) {
- if (isnan(time)) return "";
- time_t tv = static_cast<time_t>(floor(time/msPerSecond));
- struct tm* t = localtime(&tv);
- if (NULL == t) return "";
- return tzname[0]; // The location of the timezone string on Solaris.
-}
-
-
-double OS::LocalTimeOffset() {
- // On Solaris, struct tm does not contain a tm_gmtoff field.
- time_t utc = time(NULL);
- ASSERT(utc != -1);
- struct tm* loc = localtime(&utc);
- ASSERT(loc != NULL);
- return static_cast<double>((mktime(loc) - utc) * msPerSecond);
-}
-
-
-// We keep the lowest and highest addresses mapped as a quick way of
-// determining that pointers are outside the heap (used mostly in assertions
-// and verification). The estimate is conservative, ie, not all addresses in
-// 'allocated' space are actually allocated to our heap. The range is
-// [lowest, highest), inclusive on the low and and exclusive on the high end.
-static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
-static void* highest_ever_allocated = reinterpret_cast<void*>(0);
-
-
-static void UpdateAllocatedSpaceLimits(void* address, int size) {
- lowest_ever_allocated = Min(lowest_ever_allocated, address);
- highest_ever_allocated =
- Max(highest_ever_allocated,
- reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
-}
-
-
-bool OS::IsOutsideAllocatedSpace(void* address) {
- return address < lowest_ever_allocated || address >= highest_ever_allocated;
-}
-
-
-size_t OS::AllocateAlignment() {
- return static_cast<size_t>(getpagesize());
-}
-
-
-void* OS::Allocate(const size_t requested,
- size_t* allocated,
- bool is_executable) {
- const size_t msize = RoundUp(requested, getpagesize());
- int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
- void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
-
- if (mbase == MAP_FAILED) {
- LOG(ISOLATE, StringEvent("OS::Allocate", "mmap failed"));
- return NULL;
- }
- *allocated = msize;
- UpdateAllocatedSpaceLimits(mbase, msize);
- return mbase;
-}
-
-
-void OS::Free(void* address, const size_t size) {
- // TODO(1240712): munmap has a return value which is ignored here.
- int result = munmap(address, size);
- USE(result);
- ASSERT(result == 0);
-}
-
-
-#ifdef ENABLE_HEAP_PROTECTION
-
-void OS::Protect(void* address, size_t size) {
- // TODO(1240712): mprotect has a return value which is ignored here.
- mprotect(address, size, PROT_READ);
-}
-
-
-void OS::Unprotect(void* address, size_t size, bool is_executable) {
- // TODO(1240712): mprotect has a return value which is ignored here.
- int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
- mprotect(address, size, prot);
-}
-
-#endif
-
-
-void OS::Sleep(int milliseconds) {
- useconds_t ms = static_cast<useconds_t>(milliseconds);
- usleep(1000 * ms);
-}
-
-
-void OS::Abort() {
- // Redirect to std abort to signal abnormal program termination.
- abort();
-}
-
-
-void OS::DebugBreak() {
- asm("int $3");
-}
-
-
-class PosixMemoryMappedFile : public OS::MemoryMappedFile {
- public:
- PosixMemoryMappedFile(FILE* file, void* memory, int size)
- : file_(file), memory_(memory), size_(size) { }
- virtual ~PosixMemoryMappedFile();
- virtual void* memory() { return memory_; }
- virtual int size() { return size_; }
- private:
- FILE* file_;
- void* memory_;
- int size_;
-};
-
-
-OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
- FILE* file = fopen(name, "r+");
- if (file == NULL) return NULL;
-
- fseek(file, 0, SEEK_END);
- int size = ftell(file);
-
- void* memory =
- mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
- return new PosixMemoryMappedFile(file, memory, size);
-}
-
-
-OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
- void* initial) {
- FILE* file = fopen(name, "w+");
- if (file == NULL) return NULL;
- int result = fwrite(initial, size, 1, file);
- if (result < 1) {
- fclose(file);
- return NULL;
- }
- void* memory =
- mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
- return new PosixMemoryMappedFile(file, memory, size);
-}
-
-
-PosixMemoryMappedFile::~PosixMemoryMappedFile() {
- if (memory_) munmap(memory_, size_);
- fclose(file_);
-}
-
-
-void OS::LogSharedLibraryAddresses() {
-}
-
-
-void OS::SignalCodeMovingGC() {
-}
-
-
-struct StackWalker {
- Vector<OS::StackFrame>& frames;
- int index;
-};
-
-
-static int StackWalkCallback(uintptr_t pc, int signo, void* data) {
- struct StackWalker* walker = static_cast<struct StackWalker*>(data);
- Dl_info info;
-
- int i = walker->index;
-
- walker->frames[i].address = reinterpret_cast<void*>(pc);
-
- // Make sure line termination is in place.
- walker->frames[i].text[OS::kStackWalkMaxTextLen - 1] = '\0';
-
- Vector<char> text = MutableCStrVector(walker->frames[i].text,
- OS::kStackWalkMaxTextLen);
-
- if (dladdr(reinterpret_cast<void*>(pc), &info) == 0) {
- OS::SNPrintF(text, "[0x%p]", pc);
- } else if ((info.dli_fname != NULL && info.dli_sname != NULL)) {
- // We have symbol info.
- OS::SNPrintF(text, "%s'%s+0x%x", info.dli_fname, info.dli_sname, pc);
- } else {
- // No local symbol info.
- OS::SNPrintF(text,
- "%s'0x%p [0x%p]",
- info.dli_fname,
- pc - reinterpret_cast<uintptr_t>(info.dli_fbase),
- pc);
- }
- walker->index++;
- return 0;
-}
-
-
-int OS::StackWalk(Vector<OS::StackFrame> frames) {
- ucontext_t ctx;
- struct StackWalker walker = { frames, 0 };
-
- if (getcontext(&ctx) < 0) return kStackWalkError;
-
- if (!walkcontext(&ctx, StackWalkCallback, &walker)) {
- return kStackWalkError;
- }
-
- return walker.index;
-}
-
-
-// Constants used for mmap.
-static const int kMmapFd = -1;
-static const int kMmapFdOffset = 0;
-
-
-VirtualMemory::VirtualMemory(size_t size) {
- address_ = mmap(NULL, size, PROT_NONE,
- MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
- kMmapFd, kMmapFdOffset);
- size_ = size;
-}
-
-
-VirtualMemory::~VirtualMemory() {
- if (IsReserved()) {
- if (0 == munmap(address(), size())) address_ = MAP_FAILED;
- }
-}
-
-
-bool VirtualMemory::IsReserved() {
- return address_ != MAP_FAILED;
-}
-
-
-bool VirtualMemory::Commit(void* address, size_t size, bool executable) {
- int prot = PROT_READ | PROT_WRITE | (executable ? PROT_EXEC : 0);
- if (MAP_FAILED == mmap(address, size, prot,
- MAP_PRIVATE | MAP_ANON | MAP_FIXED,
- kMmapFd, kMmapFdOffset)) {
- return false;
- }
-
- UpdateAllocatedSpaceLimits(address, size);
- return true;
-}
-
-
-bool VirtualMemory::Uncommit(void* address, size_t size) {
- return mmap(address, size, PROT_NONE,
- MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED,
- kMmapFd, kMmapFdOffset) != MAP_FAILED;
-}
-
-
-class ThreadHandle::PlatformData : public Malloced {
- public:
- explicit PlatformData(ThreadHandle::Kind kind) {
- Initialize(kind);
- }
-
- void Initialize(ThreadHandle::Kind kind) {
- switch (kind) {
- case ThreadHandle::SELF: thread_ = pthread_self(); break;
- case ThreadHandle::INVALID: thread_ = kNoThread; break;
- }
- }
-
- pthread_t thread_; // Thread handle for pthread.
-};
-
-
-ThreadHandle::ThreadHandle(Kind kind) {
- data_ = new PlatformData(kind);
-}
-
-
-void ThreadHandle::Initialize(ThreadHandle::Kind kind) {
- data_->Initialize(kind);
-}
-
-
-ThreadHandle::~ThreadHandle() {
- delete data_;
-}
-
-
-bool ThreadHandle::IsSelf() const {
- return pthread_equal(data_->thread_, pthread_self());
-}
-
-
-bool ThreadHandle::IsValid() const {
- return data_->thread_ != kNoThread;
-}
-
-
-Thread::Thread(Isolate* isolate, const Options& options)
- : ThreadHandle(ThreadHandle::INVALID),
- isolate_(isolate),
- stack_size_(options.stack_size) {
- set_name(options.name);
-}
-
-
-Thread::Thread(Isolate* isolate, const char* name)
- : ThreadHandle(ThreadHandle::INVALID),
- isolate_(isolate),
- stack_size_(0) {
- set_name(name);
-}
-
-
-Thread::~Thread() {
-}
-
-
-static void* ThreadEntry(void* arg) {
- Thread* thread = reinterpret_cast<Thread*>(arg);
- // This is also initialized by the first argument to pthread_create() but we
- // don't know which thread will run first (the original thread or the new
- // one) so we initialize it here too.
- thread->thread_handle_data()->thread_ = pthread_self();
- ASSERT(thread->IsValid());
- Thread::SetThreadLocal(Isolate::isolate_key(), thread->isolate());
- thread->Run();
- return NULL;
-}
-
-
-void Thread::set_name(const char* name) {
- strncpy(name_, name, sizeof(name_));
- name_[sizeof(name_) - 1] = '\0';
-}
-
-
-void Thread::Start() {
- pthread_attr_t* attr_ptr = NULL;
- pthread_attr_t attr;
- if (stack_size_ > 0) {
- pthread_attr_init(&attr);
- pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_));
- attr_ptr = &attr;
- }
- pthread_create(&thread_handle_data()->thread_, NULL, ThreadEntry, this);
- ASSERT(IsValid());
-}
-
-
-void Thread::Join() {
- pthread_join(thread_handle_data()->thread_, NULL);
-}
-
-
-Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
- pthread_key_t key;
- int result = pthread_key_create(&key, NULL);
- USE(result);
- ASSERT(result == 0);
- return static_cast<LocalStorageKey>(key);
-}
-
-
-void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
- pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
- int result = pthread_key_delete(pthread_key);
- USE(result);
- ASSERT(result == 0);
-}
-
-
-void* Thread::GetThreadLocal(LocalStorageKey key) {
- pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
- return pthread_getspecific(pthread_key);
-}
-
-
-void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
- pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
- pthread_setspecific(pthread_key, value);
-}
-
-
-void Thread::YieldCPU() {
- sched_yield();
-}
-
-
-class SolarisMutex : public Mutex {
- public:
-
- SolarisMutex() {
- pthread_mutexattr_t attr;
- pthread_mutexattr_init(&attr);
- pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
- pthread_mutex_init(&mutex_, &attr);
- }
-
- ~SolarisMutex() { pthread_mutex_destroy(&mutex_); }
-
- int Lock() { return pthread_mutex_lock(&mutex_); }
-
- int Unlock() { return pthread_mutex_unlock(&mutex_); }
-
- virtual bool TryLock() {
- int result = pthread_mutex_trylock(&mutex_);
- // Return false if the lock is busy and locking failed.
- if (result == EBUSY) {
- return false;
- }
- ASSERT(result == 0); // Verify no other errors.
- return true;
- }
-
- private:
- pthread_mutex_t mutex_;
-};
-
-
-Mutex* OS::CreateMutex() {
- return new SolarisMutex();
-}
-
-
-class SolarisSemaphore : public Semaphore {
- public:
- explicit SolarisSemaphore(int count) { sem_init(&sem_, 0, count); }
- virtual ~SolarisSemaphore() { sem_destroy(&sem_); }
-
- virtual void Wait();
- virtual bool Wait(int timeout);
- virtual void Signal() { sem_post(&sem_); }
- private:
- sem_t sem_;
-};
-
-
-void SolarisSemaphore::Wait() {
- while (true) {
- int result = sem_wait(&sem_);
- if (result == 0) return; // Successfully got semaphore.
- CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
- }
-}
-
-
-#ifndef TIMEVAL_TO_TIMESPEC
-#define TIMEVAL_TO_TIMESPEC(tv, ts) do { \
- (ts)->tv_sec = (tv)->tv_sec; \
- (ts)->tv_nsec = (tv)->tv_usec * 1000; \
-} while (false)
-#endif
-
-
-#ifndef timeradd
-#define timeradd(a, b, result) \
- do { \
- (result)->tv_sec = (a)->tv_sec + (b)->tv_sec; \
- (result)->tv_usec = (a)->tv_usec + (b)->tv_usec; \
- if ((result)->tv_usec >= 1000000) { \
- ++(result)->tv_sec; \
- (result)->tv_usec -= 1000000; \
- } \
- } while (0)
-#endif
-
-
-bool SolarisSemaphore::Wait(int timeout) {
- const long kOneSecondMicros = 1000000; // NOLINT
-
- // Split timeout into second and nanosecond parts.
- struct timeval delta;
- delta.tv_usec = timeout % kOneSecondMicros;
- delta.tv_sec = timeout / kOneSecondMicros;
-
- struct timeval current_time;
- // Get the current time.
- if (gettimeofday(&current_time, NULL) == -1) {
- return false;
- }
-
- // Calculate time for end of timeout.
- struct timeval end_time;
- timeradd(&current_time, &delta, &end_time);
-
- struct timespec ts;
- TIMEVAL_TO_TIMESPEC(&end_time, &ts);
- // Wait for semaphore signalled or timeout.
- while (true) {
- int result = sem_timedwait(&sem_, &ts);
- if (result == 0) return true; // Successfully got semaphore.
- if (result == -1 && errno == ETIMEDOUT) return false; // Timeout.
- CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
- }
-}
-
-
-Semaphore* OS::CreateSemaphore(int count) {
- return new SolarisSemaphore(count);
-}
-
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
-
-static Sampler* active_sampler_ = NULL;
-static pthread_t vm_tid_ = 0;
-
-
-static pthread_t GetThreadID() {
- return pthread_self();
-}
-
-
-static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
- USE(info);
- if (signal != SIGPROF) return;
- if (active_sampler_ == NULL || !active_sampler_->IsActive()) return;
- if (vm_tid_ != GetThreadID()) return;
-
- TickSample sample_obj;
- TickSample* sample = CpuProfiler::TickSampleEvent();
- if (sample == NULL) sample = &sample_obj;
-
- // Extracting the sample from the context is extremely machine dependent.
- ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
- mcontext_t& mcontext = ucontext->uc_mcontext;
- sample->state = Top::current_vm_state();
-
- sample->pc = reinterpret_cast<Address>(mcontext.gregs[REG_PC]);
- sample->sp = reinterpret_cast<Address>(mcontext.gregs[REG_SP]);
- sample->fp = reinterpret_cast<Address>(mcontext.gregs[REG_FP]);
-
- active_sampler_->SampleStack(sample);
- active_sampler_->Tick(sample);
-}
-
-
-class Sampler::PlatformData : public Malloced {
- public:
- enum SleepInterval {
- FULL_INTERVAL,
- HALF_INTERVAL
- };
-
- explicit PlatformData(Sampler* sampler)
- : sampler_(sampler),
- signal_handler_installed_(false),
- vm_tgid_(getpid()),
- signal_sender_launched_(false) {
- }
-
- void SignalSender() {
- while (sampler_->IsActive()) {
- if (rate_limiter_.SuspendIfNecessary()) continue;
- if (sampler_->IsProfiling() && RuntimeProfiler::IsEnabled()) {
- SendProfilingSignal();
- Sleep(HALF_INTERVAL);
- RuntimeProfiler::NotifyTick();
- Sleep(HALF_INTERVAL);
- } else {
- if (sampler_->IsProfiling()) SendProfilingSignal();
- if (RuntimeProfiler::IsEnabled()) RuntimeProfiler::NotifyTick();
- Sleep(FULL_INTERVAL);
- }
- }
- }
-
- void SendProfilingSignal() {
- if (!signal_handler_installed_) return;
- pthread_kill(vm_tid_, SIGPROF);
- }
-
- void Sleep(SleepInterval full_or_half) {
- // Convert ms to us and subtract 100 us to compensate delays
- // occuring during signal delivery.
- useconds_t interval = sampler_->interval_ * 1000 - 100;
- if (full_or_half == HALF_INTERVAL) interval /= 2;
- int result = usleep(interval);
-#ifdef DEBUG
- if (result != 0 && errno != EINTR) {
- fprintf(stderr,
- "SignalSender usleep error; interval = %u, errno = %d\n",
- interval,
- errno);
- ASSERT(result == 0 || errno == EINTR);
- }
-#endif
- USE(result);
- }
-
- Sampler* sampler_;
- bool signal_handler_installed_;
- struct sigaction old_signal_handler_;
- int vm_tgid_;
- bool signal_sender_launched_;
- pthread_t signal_sender_thread_;
- RuntimeProfilerRateLimiter rate_limiter_;
-};
-
-
-static void* SenderEntry(void* arg) {
- Sampler::PlatformData* data =
- reinterpret_cast<Sampler::PlatformData*>(arg);
- data->SignalSender();
- return 0;
-}
-
-
-Sampler::Sampler(Isolate* isolate, int interval)
- : isolate_(isolate),
- interval_(interval),
- profiling_(false),
- active_(false),
- samples_taken_(0) {
- data_ = new PlatformData(this);
-}
-
-
-Sampler::~Sampler() {
- ASSERT(!data_->signal_sender_launched_);
- delete data_;
-}
-
-
-void Sampler::Start() {
- // There can only be one active sampler at the time on POSIX
- // platforms.
- ASSERT(!IsActive());
- vm_tid_ = GetThreadID();
-
- // Request profiling signals.
- struct sigaction sa;
- sa.sa_sigaction = ProfilerSignalHandler;
- sigemptyset(&sa.sa_mask);
- sa.sa_flags = SA_RESTART | SA_SIGINFO;
- data_->signal_handler_installed_ =
- sigaction(SIGPROF, &sa, &data_->old_signal_handler_) == 0;
-
- // Start a thread that sends SIGPROF signal to VM thread.
- // Sending the signal ourselves instead of relying on itimer provides
- // much better accuracy.
- SetActive(true);
- if (pthread_create(
- &data_->signal_sender_thread_, NULL, SenderEntry, data_) == 0) {
- data_->signal_sender_launched_ = true;
- }
-
- // Set this sampler as the active sampler.
- active_sampler_ = this;
-}
-
-
-void Sampler::Stop() {
- SetActive(false);
-
- // Wait for signal sender termination (it will exit after setting
- // active_ to false).
- if (data_->signal_sender_launched_) {
- Top::WakeUpRuntimeProfilerThreadBeforeShutdown();
- pthread_join(data_->signal_sender_thread_, NULL);
- data_->signal_sender_launched_ = false;
- }
-
- // Restore old signal handler
- if (data_->signal_handler_installed_) {
- sigaction(SIGPROF, &data_->old_signal_handler_, 0);
- data_->signal_handler_installed_ = false;
- }
-
- // This sampler is no longer the active sampler.
- active_sampler_ = NULL;
-}
-
-#endif // ENABLE_LOGGING_AND_PROFILING
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/platform-tls-mac.h b/src/3rdparty/v8/src/platform-tls-mac.h
deleted file mode 100644
index 728524e..0000000
--- a/src/3rdparty/v8/src/platform-tls-mac.h
+++ /dev/null
@@ -1,62 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_PLATFORM_TLS_MAC_H_
-#define V8_PLATFORM_TLS_MAC_H_
-
-#include "globals.h"
-
-namespace v8 {
-namespace internal {
-
-#if defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_X64)
-
-#define V8_FAST_TLS_SUPPORTED 1
-
-extern intptr_t kMacTlsBaseOffset;
-
-INLINE(intptr_t InternalGetExistingThreadLocal(intptr_t index));
-
-inline intptr_t InternalGetExistingThreadLocal(intptr_t index) {
- intptr_t result;
-#if defined(V8_HOST_ARCH_IA32)
- asm("movl %%gs:(%1,%2,4), %0;"
- :"=r"(result) // Output must be a writable register.
- :"r"(kMacTlsBaseOffset), "r"(index));
-#else
- asm("movq %%gs:(%1,%2,8), %0;"
- :"=r"(result)
- :"r"(kMacTlsBaseOffset), "r"(index));
-#endif
- return result;
-}
-
-#endif
-
-} } // namespace v8::internal
-
-#endif // V8_PLATFORM_TLS_MAC_H_
diff --git a/src/3rdparty/v8/src/platform-tls-win32.h b/src/3rdparty/v8/src/platform-tls-win32.h
deleted file mode 100644
index 4056e8c..0000000
--- a/src/3rdparty/v8/src/platform-tls-win32.h
+++ /dev/null
@@ -1,62 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_PLATFORM_TLS_WIN32_H_
-#define V8_PLATFORM_TLS_WIN32_H_
-
-#include "checks.h"
-#include "globals.h"
-#include "win32-headers.h"
-
-namespace v8 {
-namespace internal {
-
-#if defined(_WIN32) && !defined(_WIN64)
-
-#define V8_FAST_TLS_SUPPORTED 1
-
-inline intptr_t InternalGetExistingThreadLocal(intptr_t index) {
- const intptr_t kTibInlineTlsOffset = 0xE10;
- const intptr_t kTibExtraTlsOffset = 0xF94;
- const intptr_t kMaxInlineSlots = 64;
- const intptr_t kMaxSlots = kMaxInlineSlots + 1024;
- ASSERT(0 <= index && index < kMaxSlots);
- if (index < kMaxInlineSlots) {
- return static_cast<intptr_t>(__readfsdword(kTibInlineTlsOffset +
- kPointerSize * index));
- }
- intptr_t extra = static_cast<intptr_t>(__readfsdword(kTibExtraTlsOffset));
- ASSERT(extra != 0);
- return *reinterpret_cast<intptr_t*>(extra +
- kPointerSize * (index - kMaxInlineSlots));
-}
-
-#endif
-
-} } // namespace v8::internal
-
-#endif // V8_PLATFORM_TLS_WIN32_H_
diff --git a/src/3rdparty/v8/src/platform-tls.h b/src/3rdparty/v8/src/platform-tls.h
deleted file mode 100644
index 5649175..0000000
--- a/src/3rdparty/v8/src/platform-tls.h
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Platform and architecture specific thread local store functions.
-
-#ifndef V8_PLATFORM_TLS_H_
-#define V8_PLATFORM_TLS_H_
-
-#ifdef V8_FAST_TLS
-
-// When fast TLS is requested we include the appropriate
-// implementation header.
-//
-// The implementation header defines V8_FAST_TLS_SUPPORTED if it
-// provides fast TLS support for the current platform and architecture
-// combination.
-
-#if defined(_MSC_VER) && (defined(_WIN32) || defined(_WIN64))
-#include "platform-tls-win32.h"
-#elif defined(__APPLE__)
-#include "platform-tls-mac.h"
-#endif
-
-#endif
-
-#endif // V8_PLATFORM_TLS_H_
diff --git a/src/3rdparty/v8/src/platform-win32.cc b/src/3rdparty/v8/src/platform-win32.cc
deleted file mode 100644
index ab03e3d..0000000
--- a/src/3rdparty/v8/src/platform-win32.cc
+++ /dev/null
@@ -1,2072 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Platform specific code for Win32.
-
-#define V8_WIN32_HEADERS_FULL
-#include "win32-headers.h"
-
-#include "v8.h"
-
-#include "platform.h"
-#include "vm-state-inl.h"
-
-// Extra POSIX/ANSI routines for Win32 when when using Visual Studio C++. Please
-// refer to The Open Group Base Specification for specification of the correct
-// semantics for these functions.
-// (http://www.opengroup.org/onlinepubs/000095399/)
-#ifdef _MSC_VER
-
-namespace v8 {
-namespace internal {
-
-// Test for finite value - usually defined in math.h
-int isfinite(double x) {
- return _finite(x);
-}
-
-} // namespace v8
-} // namespace internal
-
-// Test for a NaN (not a number) value - usually defined in math.h
-int isnan(double x) {
- return _isnan(x);
-}
-
-
-// Test for infinity - usually defined in math.h
-int isinf(double x) {
- return (_fpclass(x) & (_FPCLASS_PINF | _FPCLASS_NINF)) != 0;
-}
-
-
-// Test if x is less than y and both nominal - usually defined in math.h
-int isless(double x, double y) {
- return isnan(x) || isnan(y) ? 0 : x < y;
-}
-
-
-// Test if x is greater than y and both nominal - usually defined in math.h
-int isgreater(double x, double y) {
- return isnan(x) || isnan(y) ? 0 : x > y;
-}
-
-
-// Classify floating point number - usually defined in math.h
-int fpclassify(double x) {
- // Use the MS-specific _fpclass() for classification.
- int flags = _fpclass(x);
-
- // Determine class. We cannot use a switch statement because
- // the _FPCLASS_ constants are defined as flags.
- if (flags & (_FPCLASS_PN | _FPCLASS_NN)) return FP_NORMAL;
- if (flags & (_FPCLASS_PZ | _FPCLASS_NZ)) return FP_ZERO;
- if (flags & (_FPCLASS_PD | _FPCLASS_ND)) return FP_SUBNORMAL;
- if (flags & (_FPCLASS_PINF | _FPCLASS_NINF)) return FP_INFINITE;
-
- // All cases should be covered by the code above.
- ASSERT(flags & (_FPCLASS_SNAN | _FPCLASS_QNAN));
- return FP_NAN;
-}
-
-
-// Test sign - usually defined in math.h
-int signbit(double x) {
- // We need to take care of the special case of both positive
- // and negative versions of zero.
- if (x == 0)
- return _fpclass(x) & _FPCLASS_NZ;
- else
- return x < 0;
-}
-
-
-// Case-insensitive bounded string comparisons. Use stricmp() on Win32. Usually
-// defined in strings.h.
-int strncasecmp(const char* s1, const char* s2, int n) {
- return _strnicmp(s1, s2, n);
-}
-
-#endif // _MSC_VER
-
-
-// Extra functions for MinGW. Most of these are the _s functions which are in
-// the Microsoft Visual Studio C++ CRT.
-#ifdef __MINGW32__
-
-int localtime_s(tm* out_tm, const time_t* time) {
- tm* posix_local_time_struct = localtime(time);
- if (posix_local_time_struct == NULL) return 1;
- *out_tm = *posix_local_time_struct;
- return 0;
-}
-
-
-// Not sure this the correct interpretation of _mkgmtime
-time_t _mkgmtime(tm* timeptr) {
- return mktime(timeptr);
-}
-
-
-int fopen_s(FILE** pFile, const char* filename, const char* mode) {
- *pFile = fopen(filename, mode);
- return *pFile != NULL ? 0 : 1;
-}
-
-
-int _vsnprintf_s(char* buffer, size_t sizeOfBuffer, size_t count,
- const char* format, va_list argptr) {
- return _vsnprintf(buffer, sizeOfBuffer, format, argptr);
-}
-#define _TRUNCATE 0
-
-
-int strncpy_s(char* strDest, size_t numberOfElements,
- const char* strSource, size_t count) {
- strncpy(strDest, strSource, count);
- return 0;
-}
-
-
-inline void MemoryBarrier() {
- int barrier = 0;
- __asm__ __volatile__("xchgl %%eax,%0 ":"=r" (barrier));
-}
-
-#endif // __MINGW32__
-
-// Generate a pseudo-random number in the range 0-2^31-1. Usually
-// defined in stdlib.h. Missing in both Microsoft Visual Studio C++ and MinGW.
-int random() {
- return rand();
-}
-
-
-namespace v8 {
-namespace internal {
-
-double ceiling(double x) {
- return ceil(x);
-}
-
-
-static Mutex* limit_mutex = NULL;
-
-#if defined(V8_TARGET_ARCH_IA32)
-static OS::MemCopyFunction memcopy_function = NULL;
-static Mutex* memcopy_function_mutex = OS::CreateMutex();
-// Defined in codegen-ia32.cc.
-OS::MemCopyFunction CreateMemCopyFunction();
-
-// Copy memory area to disjoint memory area.
-void OS::MemCopy(void* dest, const void* src, size_t size) {
- if (memcopy_function == NULL) {
- ScopedLock lock(memcopy_function_mutex);
- if (memcopy_function == NULL) {
- OS::MemCopyFunction temp = CreateMemCopyFunction();
- MemoryBarrier();
- memcopy_function = temp;
- }
- }
- // Note: here we rely on dependent reads being ordered. This is true
- // on all architectures we currently support.
- (*memcopy_function)(dest, src, size);
-#ifdef DEBUG
- CHECK_EQ(0, memcmp(dest, src, size));
-#endif
-}
-#endif // V8_TARGET_ARCH_IA32
-
-#ifdef _WIN64
-typedef double (*ModuloFunction)(double, double);
-static ModuloFunction modulo_function = NULL;
-static Mutex* modulo_function_mutex = OS::CreateMutex();
-// Defined in codegen-x64.cc.
-ModuloFunction CreateModuloFunction();
-
-double modulo(double x, double y) {
- if (modulo_function == NULL) {
- ScopedLock lock(modulo_function_mutex);
- if (modulo_function == NULL) {
- ModuloFunction temp = CreateModuloFunction();
- MemoryBarrier();
- modulo_function = temp;
- }
- }
- // Note: here we rely on dependent reads being ordered. This is true
- // on all architectures we currently support.
- return (*modulo_function)(x, y);
-}
-#else // Win32
-
-double modulo(double x, double y) {
- // Workaround MS fmod bugs. ECMA-262 says:
- // dividend is finite and divisor is an infinity => result equals dividend
- // dividend is a zero and divisor is nonzero finite => result equals dividend
- if (!(isfinite(x) && (!isfinite(y) && !isnan(y))) &&
- !(x == 0 && (y != 0 && isfinite(y)))) {
- x = fmod(x, y);
- }
- return x;
-}
-
-#endif // _WIN64
-
-// ----------------------------------------------------------------------------
-// The Time class represents time on win32. A timestamp is represented as
-// a 64-bit integer in 100 nano-seconds since January 1, 1601 (UTC). JavaScript
-// timestamps are represented as a doubles in milliseconds since 00:00:00 UTC,
-// January 1, 1970.
-
-class Time {
- public:
- // Constructors.
- Time();
- explicit Time(double jstime);
- Time(int year, int mon, int day, int hour, int min, int sec);
-
- // Convert timestamp to JavaScript representation.
- double ToJSTime();
-
- // Set timestamp to current time.
- void SetToCurrentTime();
-
- // Returns the local timezone offset in milliseconds east of UTC. This is
- // the number of milliseconds you must add to UTC to get local time, i.e.
- // LocalOffset(CET) = 3600000 and LocalOffset(PST) = -28800000. This
- // routine also takes into account whether daylight saving is effect
- // at the time.
- int64_t LocalOffset();
-
- // Returns the daylight savings time offset for the time in milliseconds.
- int64_t DaylightSavingsOffset();
-
- // Returns a string identifying the current timezone for the
- // timestamp taking into account daylight saving.
- char* LocalTimezone();
-
- private:
- // Constants for time conversion.
- static const int64_t kTimeEpoc = 116444736000000000LL;
- static const int64_t kTimeScaler = 10000;
- static const int64_t kMsPerMinute = 60000;
-
- // Constants for timezone information.
- static const int kTzNameSize = 128;
- static const bool kShortTzNames = false;
-
- // Timezone information. We need to have static buffers for the
- // timezone names because we return pointers to these in
- // LocalTimezone().
- static bool tz_initialized_;
- static TIME_ZONE_INFORMATION tzinfo_;
- static char std_tz_name_[kTzNameSize];
- static char dst_tz_name_[kTzNameSize];
-
- // Initialize the timezone information (if not already done).
- static void TzSet();
-
- // Guess the name of the timezone from the bias.
- static const char* GuessTimezoneNameFromBias(int bias);
-
- // Return whether or not daylight savings time is in effect at this time.
- bool InDST();
-
- // Return the difference (in milliseconds) between this timestamp and
- // another timestamp.
- int64_t Diff(Time* other);
-
- // Accessor for FILETIME representation.
- FILETIME& ft() { return time_.ft_; }
-
- // Accessor for integer representation.
- int64_t& t() { return time_.t_; }
-
- // Although win32 uses 64-bit integers for representing timestamps,
- // these are packed into a FILETIME structure. The FILETIME structure
- // is just a struct representing a 64-bit integer. The TimeStamp union
- // allows access to both a FILETIME and an integer representation of
- // the timestamp.
- union TimeStamp {
- FILETIME ft_;
- int64_t t_;
- };
-
- TimeStamp time_;
-};
-
-// Static variables.
-bool Time::tz_initialized_ = false;
-TIME_ZONE_INFORMATION Time::tzinfo_;
-char Time::std_tz_name_[kTzNameSize];
-char Time::dst_tz_name_[kTzNameSize];
-
-
-// Initialize timestamp to start of epoc.
-Time::Time() {
- t() = 0;
-}
-
-
-// Initialize timestamp from a JavaScript timestamp.
-Time::Time(double jstime) {
- t() = static_cast<int64_t>(jstime) * kTimeScaler + kTimeEpoc;
-}
-
-
-// Initialize timestamp from date/time components.
-Time::Time(int year, int mon, int day, int hour, int min, int sec) {
- SYSTEMTIME st;
- st.wYear = year;
- st.wMonth = mon;
- st.wDay = day;
- st.wHour = hour;
- st.wMinute = min;
- st.wSecond = sec;
- st.wMilliseconds = 0;
- SystemTimeToFileTime(&st, &ft());
-}
-
-
-// Convert timestamp to JavaScript timestamp.
-double Time::ToJSTime() {
- return static_cast<double>((t() - kTimeEpoc) / kTimeScaler);
-}
-
-
-// Guess the name of the timezone from the bias.
-// The guess is very biased towards the northern hemisphere.
-const char* Time::GuessTimezoneNameFromBias(int bias) {
- static const int kHour = 60;
- switch (-bias) {
- case -9*kHour: return "Alaska";
- case -8*kHour: return "Pacific";
- case -7*kHour: return "Mountain";
- case -6*kHour: return "Central";
- case -5*kHour: return "Eastern";
- case -4*kHour: return "Atlantic";
- case 0*kHour: return "GMT";
- case +1*kHour: return "Central Europe";
- case +2*kHour: return "Eastern Europe";
- case +3*kHour: return "Russia";
- case +5*kHour + 30: return "India";
- case +8*kHour: return "China";
- case +9*kHour: return "Japan";
- case +12*kHour: return "New Zealand";
- default: return "Local";
- }
-}
-
-
-// Initialize timezone information. The timezone information is obtained from
-// windows. If we cannot get the timezone information we fall back to CET.
-// Please notice that this code is not thread-safe.
-void Time::TzSet() {
- // Just return if timezone information has already been initialized.
- if (tz_initialized_) return;
-
- // Initialize POSIX time zone data.
- _tzset();
- // Obtain timezone information from operating system.
- memset(&tzinfo_, 0, sizeof(tzinfo_));
- if (GetTimeZoneInformation(&tzinfo_) == TIME_ZONE_ID_INVALID) {
- // If we cannot get timezone information we fall back to CET.
- tzinfo_.Bias = -60;
- tzinfo_.StandardDate.wMonth = 10;
- tzinfo_.StandardDate.wDay = 5;
- tzinfo_.StandardDate.wHour = 3;
- tzinfo_.StandardBias = 0;
- tzinfo_.DaylightDate.wMonth = 3;
- tzinfo_.DaylightDate.wDay = 5;
- tzinfo_.DaylightDate.wHour = 2;
- tzinfo_.DaylightBias = -60;
- }
-
- // Make standard and DST timezone names.
- OS::SNPrintF(Vector<char>(std_tz_name_, kTzNameSize),
- "%S",
- tzinfo_.StandardName);
- std_tz_name_[kTzNameSize - 1] = '\0';
- OS::SNPrintF(Vector<char>(dst_tz_name_, kTzNameSize),
- "%S",
- tzinfo_.DaylightName);
- dst_tz_name_[kTzNameSize - 1] = '\0';
-
- // If OS returned empty string or resource id (like "@tzres.dll,-211")
- // simply guess the name from the UTC bias of the timezone.
- // To properly resolve the resource identifier requires a library load,
- // which is not possible in a sandbox.
- if (std_tz_name_[0] == '\0' || std_tz_name_[0] == '@') {
- OS::SNPrintF(Vector<char>(std_tz_name_, kTzNameSize - 1),
- "%s Standard Time",
- GuessTimezoneNameFromBias(tzinfo_.Bias));
- }
- if (dst_tz_name_[0] == '\0' || dst_tz_name_[0] == '@') {
- OS::SNPrintF(Vector<char>(dst_tz_name_, kTzNameSize - 1),
- "%s Daylight Time",
- GuessTimezoneNameFromBias(tzinfo_.Bias));
- }
-
- // Timezone information initialized.
- tz_initialized_ = true;
-}
-
-
-// Return the difference in milliseconds between this and another timestamp.
-int64_t Time::Diff(Time* other) {
- return (t() - other->t()) / kTimeScaler;
-}
-
-
-// Set timestamp to current time.
-void Time::SetToCurrentTime() {
- // The default GetSystemTimeAsFileTime has a ~15.5ms resolution.
- // Because we're fast, we like fast timers which have at least a
- // 1ms resolution.
- //
- // timeGetTime() provides 1ms granularity when combined with
- // timeBeginPeriod(). If the host application for v8 wants fast
- // timers, it can use timeBeginPeriod to increase the resolution.
- //
- // Using timeGetTime() has a drawback because it is a 32bit value
- // and hence rolls-over every ~49days.
- //
- // To use the clock, we use GetSystemTimeAsFileTime as our base;
- // and then use timeGetTime to extrapolate current time from the
- // start time. To deal with rollovers, we resync the clock
- // any time when more than kMaxClockElapsedTime has passed or
- // whenever timeGetTime creates a rollover.
-
- static bool initialized = false;
- static TimeStamp init_time;
- static DWORD init_ticks;
- static const int64_t kHundredNanosecondsPerSecond = 10000000;
- static const int64_t kMaxClockElapsedTime =
- 60*kHundredNanosecondsPerSecond; // 1 minute
-
- // If we are uninitialized, we need to resync the clock.
- bool needs_resync = !initialized;
-
- // Get the current time.
- TimeStamp time_now;
- GetSystemTimeAsFileTime(&time_now.ft_);
- DWORD ticks_now = timeGetTime();
-
- // Check if we need to resync due to clock rollover.
- needs_resync |= ticks_now < init_ticks;
-
- // Check if we need to resync due to elapsed time.
- needs_resync |= (time_now.t_ - init_time.t_) > kMaxClockElapsedTime;
-
- // Resync the clock if necessary.
- if (needs_resync) {
- GetSystemTimeAsFileTime(&init_time.ft_);
- init_ticks = ticks_now = timeGetTime();
- initialized = true;
- }
-
- // Finally, compute the actual time. Why is this so hard.
- DWORD elapsed = ticks_now - init_ticks;
- this->time_.t_ = init_time.t_ + (static_cast<int64_t>(elapsed) * 10000);
-}
-
-
-// Return the local timezone offset in milliseconds east of UTC. This
-// takes into account whether daylight saving is in effect at the time.
-// Only times in the 32-bit Unix range may be passed to this function.
-// Also, adding the time-zone offset to the input must not overflow.
-// The function EquivalentTime() in date.js guarantees this.
-int64_t Time::LocalOffset() {
- // Initialize timezone information, if needed.
- TzSet();
-
- Time rounded_to_second(*this);
- rounded_to_second.t() = rounded_to_second.t() / 1000 / kTimeScaler *
- 1000 * kTimeScaler;
- // Convert to local time using POSIX localtime function.
- // Windows XP Service Pack 3 made SystemTimeToTzSpecificLocalTime()
- // very slow. Other browsers use localtime().
-
- // Convert from JavaScript milliseconds past 1/1/1970 0:00:00 to
- // POSIX seconds past 1/1/1970 0:00:00.
- double unchecked_posix_time = rounded_to_second.ToJSTime() / 1000;
- if (unchecked_posix_time > INT_MAX || unchecked_posix_time < 0) {
- return 0;
- }
- // Because _USE_32BIT_TIME_T is defined, time_t is a 32-bit int.
- time_t posix_time = static_cast<time_t>(unchecked_posix_time);
-
- // Convert to local time, as struct with fields for day, hour, year, etc.
- tm posix_local_time_struct;
- if (localtime_s(&posix_local_time_struct, &posix_time)) return 0;
- // Convert local time in struct to POSIX time as if it were a UTC time.
- time_t local_posix_time = _mkgmtime(&posix_local_time_struct);
- Time localtime(1000.0 * local_posix_time);
-
- return localtime.Diff(&rounded_to_second);
-}
-
-
-// Return whether or not daylight savings time is in effect at this time.
-bool Time::InDST() {
- // Initialize timezone information, if needed.
- TzSet();
-
- // Determine if DST is in effect at the specified time.
- bool in_dst = false;
- if (tzinfo_.StandardDate.wMonth != 0 || tzinfo_.DaylightDate.wMonth != 0) {
- // Get the local timezone offset for the timestamp in milliseconds.
- int64_t offset = LocalOffset();
-
- // Compute the offset for DST. The bias parameters in the timezone info
- // are specified in minutes. These must be converted to milliseconds.
- int64_t dstofs = -(tzinfo_.Bias + tzinfo_.DaylightBias) * kMsPerMinute;
-
- // If the local time offset equals the timezone bias plus the daylight
- // bias then DST is in effect.
- in_dst = offset == dstofs;
- }
-
- return in_dst;
-}
-
-
-// Return the daylight savings time offset for this time.
-int64_t Time::DaylightSavingsOffset() {
- return InDST() ? 60 * kMsPerMinute : 0;
-}
-
-
-// Returns a string identifying the current timezone for the
-// timestamp taking into account daylight saving.
-char* Time::LocalTimezone() {
- // Return the standard or DST time zone name based on whether daylight
- // saving is in effect at the given time.
- return InDST() ? dst_tz_name_ : std_tz_name_;
-}
-
-
-void OS::Setup() {
- // Seed the random number generator.
- // Convert the current time to a 64-bit integer first, before converting it
- // to an unsigned. Going directly can cause an overflow and the seed to be
- // set to all ones. The seed will be identical for different instances that
- // call this setup code within the same millisecond.
- uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
- srand(static_cast<unsigned int>(seed));
- limit_mutex = CreateMutex();
-}
-
-
-// Returns the accumulated user time for thread.
-int OS::GetUserTime(uint32_t* secs, uint32_t* usecs) {
- FILETIME dummy;
- uint64_t usertime;
-
- // Get the amount of time that the thread has executed in user mode.
- if (!GetThreadTimes(GetCurrentThread(), &dummy, &dummy, &dummy,
- reinterpret_cast<FILETIME*>(&usertime))) return -1;
-
- // Adjust the resolution to micro-seconds.
- usertime /= 10;
-
- // Convert to seconds and microseconds
- *secs = static_cast<uint32_t>(usertime / 1000000);
- *usecs = static_cast<uint32_t>(usertime % 1000000);
- return 0;
-}
-
-
-// Returns current time as the number of milliseconds since
-// 00:00:00 UTC, January 1, 1970.
-double OS::TimeCurrentMillis() {
- Time t;
- t.SetToCurrentTime();
- return t.ToJSTime();
-}
-
-// Returns the tickcounter based on timeGetTime.
-int64_t OS::Ticks() {
- return timeGetTime() * 1000; // Convert to microseconds.
-}
-
-
-// Returns a string identifying the current timezone taking into
-// account daylight saving.
-const char* OS::LocalTimezone(double time) {
- return Time(time).LocalTimezone();
-}
-
-
-// Returns the local time offset in milliseconds east of UTC without
-// taking daylight savings time into account.
-double OS::LocalTimeOffset() {
- // Use current time, rounded to the millisecond.
- Time t(TimeCurrentMillis());
- // Time::LocalOffset inlcudes any daylight savings offset, so subtract it.
- return static_cast<double>(t.LocalOffset() - t.DaylightSavingsOffset());
-}
-
-
-// Returns the daylight savings offset in milliseconds for the given
-// time.
-double OS::DaylightSavingsOffset(double time) {
- int64_t offset = Time(time).DaylightSavingsOffset();
- return static_cast<double>(offset);
-}
-
-
-int OS::GetLastError() {
- return ::GetLastError();
-}
-
-
-// ----------------------------------------------------------------------------
-// Win32 console output.
-//
-// If a Win32 application is linked as a console application it has a normal
-// standard output and standard error. In this case normal printf works fine
-// for output. However, if the application is linked as a GUI application,
-// the process doesn't have a console, and therefore (debugging) output is lost.
-// This is the case if we are embedded in a windows program (like a browser).
-// In order to be able to get debug output in this case the the debugging
-// facility using OutputDebugString. This output goes to the active debugger
-// for the process (if any). Else the output can be monitored using DBMON.EXE.
-
-enum OutputMode {
- UNKNOWN, // Output method has not yet been determined.
- CONSOLE, // Output is written to stdout.
- ODS // Output is written to debug facility.
-};
-
-static OutputMode output_mode = UNKNOWN; // Current output mode.
-
-
-// Determine if the process has a console for output.
-static bool HasConsole() {
- // Only check the first time. Eventual race conditions are not a problem,
- // because all threads will eventually determine the same mode.
- if (output_mode == UNKNOWN) {
- // We cannot just check that the standard output is attached to a console
- // because this would fail if output is redirected to a file. Therefore we
- // say that a process does not have an output console if either the
- // standard output handle is invalid or its file type is unknown.
- if (GetStdHandle(STD_OUTPUT_HANDLE) != INVALID_HANDLE_VALUE &&
- GetFileType(GetStdHandle(STD_OUTPUT_HANDLE)) != FILE_TYPE_UNKNOWN)
- output_mode = CONSOLE;
- else
- output_mode = ODS;
- }
- return output_mode == CONSOLE;
-}
-
-
-static void VPrintHelper(FILE* stream, const char* format, va_list args) {
- if (HasConsole()) {
- vfprintf(stream, format, args);
- } else {
- // It is important to use safe print here in order to avoid
- // overflowing the buffer. We might truncate the output, but this
- // does not crash.
- EmbeddedVector<char, 4096> buffer;
- OS::VSNPrintF(buffer, format, args);
- OutputDebugStringA(buffer.start());
- }
-}
-
-
-FILE* OS::FOpen(const char* path, const char* mode) {
- FILE* result;
- if (fopen_s(&result, path, mode) == 0) {
- return result;
- } else {
- return NULL;
- }
-}
-
-
-bool OS::Remove(const char* path) {
- return (DeleteFileA(path) != 0);
-}
-
-
-// Open log file in binary mode to avoid /n -> /r/n conversion.
-const char* const OS::LogFileOpenMode = "wb";
-
-
-// Print (debug) message to console.
-void OS::Print(const char* format, ...) {
- va_list args;
- va_start(args, format);
- VPrint(format, args);
- va_end(args);
-}
-
-
-void OS::VPrint(const char* format, va_list args) {
- VPrintHelper(stdout, format, args);
-}
-
-
-void OS::FPrint(FILE* out, const char* format, ...) {
- va_list args;
- va_start(args, format);
- VFPrint(out, format, args);
- va_end(args);
-}
-
-
-void OS::VFPrint(FILE* out, const char* format, va_list args) {
- VPrintHelper(out, format, args);
-}
-
-
-// Print error message to console.
-void OS::PrintError(const char* format, ...) {
- va_list args;
- va_start(args, format);
- VPrintError(format, args);
- va_end(args);
-}
-
-
-void OS::VPrintError(const char* format, va_list args) {
- VPrintHelper(stderr, format, args);
-}
-
-
-int OS::SNPrintF(Vector<char> str, const char* format, ...) {
- va_list args;
- va_start(args, format);
- int result = VSNPrintF(str, format, args);
- va_end(args);
- return result;
-}
-
-
-int OS::VSNPrintF(Vector<char> str, const char* format, va_list args) {
- int n = _vsnprintf_s(str.start(), str.length(), _TRUNCATE, format, args);
- // Make sure to zero-terminate the string if the output was
- // truncated or if there was an error.
- if (n < 0 || n >= str.length()) {
- if (str.length() > 0)
- str[str.length() - 1] = '\0';
- return -1;
- } else {
- return n;
- }
-}
-
-
-char* OS::StrChr(char* str, int c) {
- return const_cast<char*>(strchr(str, c));
-}
-
-
-void OS::StrNCpy(Vector<char> dest, const char* src, size_t n) {
- // Use _TRUNCATE or strncpy_s crashes (by design) if buffer is too small.
- size_t buffer_size = static_cast<size_t>(dest.length());
- if (n + 1 > buffer_size) // count for trailing '\0'
- n = _TRUNCATE;
- int result = strncpy_s(dest.start(), dest.length(), src, n);
- USE(result);
- ASSERT(result == 0 || (n == _TRUNCATE && result == STRUNCATE));
-}
-
-
-// We keep the lowest and highest addresses mapped as a quick way of
-// determining that pointers are outside the heap (used mostly in assertions
-// and verification). The estimate is conservative, ie, not all addresses in
-// 'allocated' space are actually allocated to our heap. The range is
-// [lowest, highest), inclusive on the low and and exclusive on the high end.
-static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
-static void* highest_ever_allocated = reinterpret_cast<void*>(0);
-
-
-static void UpdateAllocatedSpaceLimits(void* address, int size) {
- ASSERT(limit_mutex != NULL);
- ScopedLock lock(limit_mutex);
-
- lowest_ever_allocated = Min(lowest_ever_allocated, address);
- highest_ever_allocated =
- Max(highest_ever_allocated,
- reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
-}
-
-
-bool OS::IsOutsideAllocatedSpace(void* pointer) {
- if (pointer < lowest_ever_allocated || pointer >= highest_ever_allocated)
- return true;
- // Ask the Windows API
- if (IsBadWritePtr(pointer, 1))
- return true;
- return false;
-}
-
-
-// Get the system's page size used by VirtualAlloc() or the next power
-// of two. The reason for always returning a power of two is that the
-// rounding up in OS::Allocate expects that.
-static size_t GetPageSize() {
- static size_t page_size = 0;
- if (page_size == 0) {
- SYSTEM_INFO info;
- GetSystemInfo(&info);
- page_size = RoundUpToPowerOf2(info.dwPageSize);
- }
- return page_size;
-}
-
-
-// The allocation alignment is the guaranteed alignment for
-// VirtualAlloc'ed blocks of memory.
-size_t OS::AllocateAlignment() {
- static size_t allocate_alignment = 0;
- if (allocate_alignment == 0) {
- SYSTEM_INFO info;
- GetSystemInfo(&info);
- allocate_alignment = info.dwAllocationGranularity;
- }
- return allocate_alignment;
-}
-
-
-void* OS::Allocate(const size_t requested,
- size_t* allocated,
- bool is_executable) {
- // The address range used to randomize RWX allocations in OS::Allocate
- // Try not to map pages into the default range that windows loads DLLs
- // Use a multiple of 64k to prevent committing unused memory.
- // Note: This does not guarantee RWX regions will be within the
- // range kAllocationRandomAddressMin to kAllocationRandomAddressMax
-#ifdef V8_HOST_ARCH_64_BIT
- static const intptr_t kAllocationRandomAddressMin = 0x0000000080000000;
- static const intptr_t kAllocationRandomAddressMax = 0x000003FFFFFF0000;
-#else
- static const intptr_t kAllocationRandomAddressMin = 0x04000000;
- static const intptr_t kAllocationRandomAddressMax = 0x3FFF0000;
-#endif
-
- // VirtualAlloc rounds allocated size to page size automatically.
- size_t msize = RoundUp(requested, static_cast<int>(GetPageSize()));
- intptr_t address = 0;
-
- // Windows XP SP2 allows Data Excution Prevention (DEP).
- int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
-
- // For exectutable pages try and randomize the allocation address
- if (prot == PAGE_EXECUTE_READWRITE &&
- msize >= static_cast<size_t>(Page::kPageSize)) {
- address = (V8::RandomPrivate(Isolate::Current()) << kPageSizeBits)
- | kAllocationRandomAddressMin;
- address &= kAllocationRandomAddressMax;
- }
-
- LPVOID mbase = VirtualAlloc(reinterpret_cast<void *>(address),
- msize,
- MEM_COMMIT | MEM_RESERVE,
- prot);
- if (mbase == NULL && address != 0)
- mbase = VirtualAlloc(NULL, msize, MEM_COMMIT | MEM_RESERVE, prot);
-
- if (mbase == NULL) {
- LOG(ISOLATE, StringEvent("OS::Allocate", "VirtualAlloc failed"));
- return NULL;
- }
-
- ASSERT(IsAligned(reinterpret_cast<size_t>(mbase), OS::AllocateAlignment()));
-
- *allocated = msize;
- UpdateAllocatedSpaceLimits(mbase, static_cast<int>(msize));
- return mbase;
-}
-
-
-void OS::Free(void* address, const size_t size) {
- // TODO(1240712): VirtualFree has a return value which is ignored here.
- VirtualFree(address, 0, MEM_RELEASE);
- USE(size);
-}
-
-
-#ifdef ENABLE_HEAP_PROTECTION
-
-void OS::Protect(void* address, size_t size) {
- // TODO(1240712): VirtualProtect has a return value which is ignored here.
- DWORD old_protect;
- VirtualProtect(address, size, PAGE_READONLY, &old_protect);
-}
-
-
-void OS::Unprotect(void* address, size_t size, bool is_executable) {
- // TODO(1240712): VirtualProtect has a return value which is ignored here.
- DWORD new_protect = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
- DWORD old_protect;
- VirtualProtect(address, size, new_protect, &old_protect);
-}
-
-#endif
-
-
-void OS::Sleep(int milliseconds) {
- ::Sleep(milliseconds);
-}
-
-
-void OS::Abort() {
- if (!IsDebuggerPresent()) {
-#ifdef _MSC_VER
- // Make the MSVCRT do a silent abort.
- _set_abort_behavior(0, _WRITE_ABORT_MSG);
- _set_abort_behavior(0, _CALL_REPORTFAULT);
-#endif // _MSC_VER
- abort();
- } else {
- DebugBreak();
- }
-}
-
-
-void OS::DebugBreak() {
-#ifdef _MSC_VER
- __debugbreak();
-#else
- ::DebugBreak();
-#endif
-}
-
-
-class Win32MemoryMappedFile : public OS::MemoryMappedFile {
- public:
- Win32MemoryMappedFile(HANDLE file,
- HANDLE file_mapping,
- void* memory,
- int size)
- : file_(file),
- file_mapping_(file_mapping),
- memory_(memory),
- size_(size) { }
- virtual ~Win32MemoryMappedFile();
- virtual void* memory() { return memory_; }
- virtual int size() { return size_; }
- private:
- HANDLE file_;
- HANDLE file_mapping_;
- void* memory_;
- int size_;
-};
-
-
-OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
- // Open a physical file
- HANDLE file = CreateFileA(name, GENERIC_READ | GENERIC_WRITE,
- FILE_SHARE_READ | FILE_SHARE_WRITE, NULL, OPEN_EXISTING, 0, NULL);
- if (file == INVALID_HANDLE_VALUE) return NULL;
-
- int size = static_cast<int>(GetFileSize(file, NULL));
-
- // Create a file mapping for the physical file
- HANDLE file_mapping = CreateFileMapping(file, NULL,
- PAGE_READWRITE, 0, static_cast<DWORD>(size), NULL);
- if (file_mapping == NULL) return NULL;
-
- // Map a view of the file into memory
- void* memory = MapViewOfFile(file_mapping, FILE_MAP_ALL_ACCESS, 0, 0, size);
- return new Win32MemoryMappedFile(file, file_mapping, memory, size);
-}
-
-
-OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
- void* initial) {
- // Open a physical file
- HANDLE file = CreateFileA(name, GENERIC_READ | GENERIC_WRITE,
- FILE_SHARE_READ | FILE_SHARE_WRITE, NULL, OPEN_ALWAYS, 0, NULL);
- if (file == NULL) return NULL;
- // Create a file mapping for the physical file
- HANDLE file_mapping = CreateFileMapping(file, NULL,
- PAGE_READWRITE, 0, static_cast<DWORD>(size), NULL);
- if (file_mapping == NULL) return NULL;
- // Map a view of the file into memory
- void* memory = MapViewOfFile(file_mapping, FILE_MAP_ALL_ACCESS, 0, 0, size);
- if (memory) memmove(memory, initial, size);
- return new Win32MemoryMappedFile(file, file_mapping, memory, size);
-}
-
-
-Win32MemoryMappedFile::~Win32MemoryMappedFile() {
- if (memory_ != NULL)
- UnmapViewOfFile(memory_);
- CloseHandle(file_mapping_);
- CloseHandle(file_);
-}
-
-
-// The following code loads functions defined in DbhHelp.h and TlHelp32.h
-// dynamically. This is to avoid being depending on dbghelp.dll and
-// tlhelp32.dll when running (the functions in tlhelp32.dll have been moved to
-// kernel32.dll at some point so loading functions defines in TlHelp32.h
-// dynamically might not be necessary any more - for some versions of Windows?).
-
-// Function pointers to functions dynamically loaded from dbghelp.dll.
-#define DBGHELP_FUNCTION_LIST(V) \
- V(SymInitialize) \
- V(SymGetOptions) \
- V(SymSetOptions) \
- V(SymGetSearchPath) \
- V(SymLoadModule64) \
- V(StackWalk64) \
- V(SymGetSymFromAddr64) \
- V(SymGetLineFromAddr64) \
- V(SymFunctionTableAccess64) \
- V(SymGetModuleBase64)
-
-// Function pointers to functions dynamically loaded from dbghelp.dll.
-#define TLHELP32_FUNCTION_LIST(V) \
- V(CreateToolhelp32Snapshot) \
- V(Module32FirstW) \
- V(Module32NextW)
-
-// Define the decoration to use for the type and variable name used for
-// dynamically loaded DLL function..
-#define DLL_FUNC_TYPE(name) _##name##_
-#define DLL_FUNC_VAR(name) _##name
-
-// Define the type for each dynamically loaded DLL function. The function
-// definitions are copied from DbgHelp.h and TlHelp32.h. The IN and VOID macros
-// from the Windows include files are redefined here to have the function
-// definitions to be as close to the ones in the original .h files as possible.
-#ifndef IN
-#define IN
-#endif
-#ifndef VOID
-#define VOID void
-#endif
-
-// DbgHelp isn't supported on MinGW yet
-#ifndef __MINGW32__
-// DbgHelp.h functions.
-typedef BOOL (__stdcall *DLL_FUNC_TYPE(SymInitialize))(IN HANDLE hProcess,
- IN PSTR UserSearchPath,
- IN BOOL fInvadeProcess);
-typedef DWORD (__stdcall *DLL_FUNC_TYPE(SymGetOptions))(VOID);
-typedef DWORD (__stdcall *DLL_FUNC_TYPE(SymSetOptions))(IN DWORD SymOptions);
-typedef BOOL (__stdcall *DLL_FUNC_TYPE(SymGetSearchPath))(
- IN HANDLE hProcess,
- OUT PSTR SearchPath,
- IN DWORD SearchPathLength);
-typedef DWORD64 (__stdcall *DLL_FUNC_TYPE(SymLoadModule64))(
- IN HANDLE hProcess,
- IN HANDLE hFile,
- IN PSTR ImageName,
- IN PSTR ModuleName,
- IN DWORD64 BaseOfDll,
- IN DWORD SizeOfDll);
-typedef BOOL (__stdcall *DLL_FUNC_TYPE(StackWalk64))(
- DWORD MachineType,
- HANDLE hProcess,
- HANDLE hThread,
- LPSTACKFRAME64 StackFrame,
- PVOID ContextRecord,
- PREAD_PROCESS_MEMORY_ROUTINE64 ReadMemoryRoutine,
- PFUNCTION_TABLE_ACCESS_ROUTINE64 FunctionTableAccessRoutine,
- PGET_MODULE_BASE_ROUTINE64 GetModuleBaseRoutine,
- PTRANSLATE_ADDRESS_ROUTINE64 TranslateAddress);
-typedef BOOL (__stdcall *DLL_FUNC_TYPE(SymGetSymFromAddr64))(
- IN HANDLE hProcess,
- IN DWORD64 qwAddr,
- OUT PDWORD64 pdwDisplacement,
- OUT PIMAGEHLP_SYMBOL64 Symbol);
-typedef BOOL (__stdcall *DLL_FUNC_TYPE(SymGetLineFromAddr64))(
- IN HANDLE hProcess,
- IN DWORD64 qwAddr,
- OUT PDWORD pdwDisplacement,
- OUT PIMAGEHLP_LINE64 Line64);
-// DbgHelp.h typedefs. Implementation found in dbghelp.dll.
-typedef PVOID (__stdcall *DLL_FUNC_TYPE(SymFunctionTableAccess64))(
- HANDLE hProcess,
- DWORD64 AddrBase); // DbgHelp.h typedef PFUNCTION_TABLE_ACCESS_ROUTINE64
-typedef DWORD64 (__stdcall *DLL_FUNC_TYPE(SymGetModuleBase64))(
- HANDLE hProcess,
- DWORD64 AddrBase); // DbgHelp.h typedef PGET_MODULE_BASE_ROUTINE64
-
-// TlHelp32.h functions.
-typedef HANDLE (__stdcall *DLL_FUNC_TYPE(CreateToolhelp32Snapshot))(
- DWORD dwFlags,
- DWORD th32ProcessID);
-typedef BOOL (__stdcall *DLL_FUNC_TYPE(Module32FirstW))(HANDLE hSnapshot,
- LPMODULEENTRY32W lpme);
-typedef BOOL (__stdcall *DLL_FUNC_TYPE(Module32NextW))(HANDLE hSnapshot,
- LPMODULEENTRY32W lpme);
-
-#undef IN
-#undef VOID
-
-// Declare a variable for each dynamically loaded DLL function.
-#define DEF_DLL_FUNCTION(name) DLL_FUNC_TYPE(name) DLL_FUNC_VAR(name) = NULL;
-DBGHELP_FUNCTION_LIST(DEF_DLL_FUNCTION)
-TLHELP32_FUNCTION_LIST(DEF_DLL_FUNCTION)
-#undef DEF_DLL_FUNCTION
-
-// Load the functions. This function has a lot of "ugly" macros in order to
-// keep down code duplication.
-
-static bool LoadDbgHelpAndTlHelp32() {
- static bool dbghelp_loaded = false;
-
- if (dbghelp_loaded) return true;
-
- HMODULE module;
-
- // Load functions from the dbghelp.dll module.
- module = LoadLibrary(TEXT("dbghelp.dll"));
- if (module == NULL) {
- return false;
- }
-
-#define LOAD_DLL_FUNC(name) \
- DLL_FUNC_VAR(name) = \
- reinterpret_cast<DLL_FUNC_TYPE(name)>(GetProcAddress(module, #name));
-
-DBGHELP_FUNCTION_LIST(LOAD_DLL_FUNC)
-
-#undef LOAD_DLL_FUNC
-
- // Load functions from the kernel32.dll module (the TlHelp32.h function used
- // to be in tlhelp32.dll but are now moved to kernel32.dll).
- module = LoadLibrary(TEXT("kernel32.dll"));
- if (module == NULL) {
- return false;
- }
-
-#define LOAD_DLL_FUNC(name) \
- DLL_FUNC_VAR(name) = \
- reinterpret_cast<DLL_FUNC_TYPE(name)>(GetProcAddress(module, #name));
-
-TLHELP32_FUNCTION_LIST(LOAD_DLL_FUNC)
-
-#undef LOAD_DLL_FUNC
-
- // Check that all functions where loaded.
- bool result =
-#define DLL_FUNC_LOADED(name) (DLL_FUNC_VAR(name) != NULL) &&
-
-DBGHELP_FUNCTION_LIST(DLL_FUNC_LOADED)
-TLHELP32_FUNCTION_LIST(DLL_FUNC_LOADED)
-
-#undef DLL_FUNC_LOADED
- true;
-
- dbghelp_loaded = result;
- return result;
- // NOTE: The modules are never unloaded and will stay around until the
- // application is closed.
-}
-
-
-// Load the symbols for generating stack traces.
-static bool LoadSymbols(HANDLE process_handle) {
- static bool symbols_loaded = false;
-
- if (symbols_loaded) return true;
-
- BOOL ok;
-
- // Initialize the symbol engine.
- ok = _SymInitialize(process_handle, // hProcess
- NULL, // UserSearchPath
- false); // fInvadeProcess
- if (!ok) return false;
-
- DWORD options = _SymGetOptions();
- options |= SYMOPT_LOAD_LINES;
- options |= SYMOPT_FAIL_CRITICAL_ERRORS;
- options = _SymSetOptions(options);
-
- char buf[OS::kStackWalkMaxNameLen] = {0};
- ok = _SymGetSearchPath(process_handle, buf, OS::kStackWalkMaxNameLen);
- if (!ok) {
- int err = GetLastError();
- PrintF("%d\n", err);
- return false;
- }
-
- HANDLE snapshot = _CreateToolhelp32Snapshot(
- TH32CS_SNAPMODULE, // dwFlags
- GetCurrentProcessId()); // th32ProcessId
- if (snapshot == INVALID_HANDLE_VALUE) return false;
- MODULEENTRY32W module_entry;
- module_entry.dwSize = sizeof(module_entry); // Set the size of the structure.
- BOOL cont = _Module32FirstW(snapshot, &module_entry);
- while (cont) {
- DWORD64 base;
- // NOTE the SymLoadModule64 function has the peculiarity of accepting a
- // both unicode and ASCII strings even though the parameter is PSTR.
- base = _SymLoadModule64(
- process_handle, // hProcess
- 0, // hFile
- reinterpret_cast<PSTR>(module_entry.szExePath), // ImageName
- reinterpret_cast<PSTR>(module_entry.szModule), // ModuleName
- reinterpret_cast<DWORD64>(module_entry.modBaseAddr), // BaseOfDll
- module_entry.modBaseSize); // SizeOfDll
- if (base == 0) {
- int err = GetLastError();
- if (err != ERROR_MOD_NOT_FOUND &&
- err != ERROR_INVALID_HANDLE) return false;
- }
- LOG(i::Isolate::Current(),
- SharedLibraryEvent(
- module_entry.szExePath,
- reinterpret_cast<unsigned int>(module_entry.modBaseAddr),
- reinterpret_cast<unsigned int>(module_entry.modBaseAddr +
- module_entry.modBaseSize)));
- cont = _Module32NextW(snapshot, &module_entry);
- }
- CloseHandle(snapshot);
-
- symbols_loaded = true;
- return true;
-}
-
-
-void OS::LogSharedLibraryAddresses() {
- // SharedLibraryEvents are logged when loading symbol information.
- // Only the shared libraries loaded at the time of the call to
- // LogSharedLibraryAddresses are logged. DLLs loaded after
- // initialization are not accounted for.
- if (!LoadDbgHelpAndTlHelp32()) return;
- HANDLE process_handle = GetCurrentProcess();
- LoadSymbols(process_handle);
-}
-
-
-void OS::SignalCodeMovingGC() {
-}
-
-
-// Walk the stack using the facilities in dbghelp.dll and tlhelp32.dll
-
-// Switch off warning 4748 (/GS can not protect parameters and local variables
-// from local buffer overrun because optimizations are disabled in function) as
-// it is triggered by the use of inline assembler.
-#pragma warning(push)
-#pragma warning(disable : 4748)
-int OS::StackWalk(Vector<OS::StackFrame> frames) {
- BOOL ok;
-
- // Load the required functions from DLL's.
- if (!LoadDbgHelpAndTlHelp32()) return kStackWalkError;
-
- // Get the process and thread handles.
- HANDLE process_handle = GetCurrentProcess();
- HANDLE thread_handle = GetCurrentThread();
-
- // Read the symbols.
- if (!LoadSymbols(process_handle)) return kStackWalkError;
-
- // Capture current context.
- CONTEXT context;
- RtlCaptureContext(&context);
-
- // Initialize the stack walking
- STACKFRAME64 stack_frame;
- memset(&stack_frame, 0, sizeof(stack_frame));
-#ifdef _WIN64
- stack_frame.AddrPC.Offset = context.Rip;
- stack_frame.AddrFrame.Offset = context.Rbp;
- stack_frame.AddrStack.Offset = context.Rsp;
-#else
- stack_frame.AddrPC.Offset = context.Eip;
- stack_frame.AddrFrame.Offset = context.Ebp;
- stack_frame.AddrStack.Offset = context.Esp;
-#endif
- stack_frame.AddrPC.Mode = AddrModeFlat;
- stack_frame.AddrFrame.Mode = AddrModeFlat;
- stack_frame.AddrStack.Mode = AddrModeFlat;
- int frames_count = 0;
-
- // Collect stack frames.
- int frames_size = frames.length();
- while (frames_count < frames_size) {
- ok = _StackWalk64(
- IMAGE_FILE_MACHINE_I386, // MachineType
- process_handle, // hProcess
- thread_handle, // hThread
- &stack_frame, // StackFrame
- &context, // ContextRecord
- NULL, // ReadMemoryRoutine
- _SymFunctionTableAccess64, // FunctionTableAccessRoutine
- _SymGetModuleBase64, // GetModuleBaseRoutine
- NULL); // TranslateAddress
- if (!ok) break;
-
- // Store the address.
- ASSERT((stack_frame.AddrPC.Offset >> 32) == 0); // 32-bit address.
- frames[frames_count].address =
- reinterpret_cast<void*>(stack_frame.AddrPC.Offset);
-
- // Try to locate a symbol for this frame.
- DWORD64 symbol_displacement;
- SmartPointer<IMAGEHLP_SYMBOL64> symbol(
- NewArray<IMAGEHLP_SYMBOL64>(kStackWalkMaxNameLen));
- if (symbol.is_empty()) return kStackWalkError; // Out of memory.
- memset(*symbol, 0, sizeof(IMAGEHLP_SYMBOL64) + kStackWalkMaxNameLen);
- (*symbol)->SizeOfStruct = sizeof(IMAGEHLP_SYMBOL64);
- (*symbol)->MaxNameLength = kStackWalkMaxNameLen;
- ok = _SymGetSymFromAddr64(process_handle, // hProcess
- stack_frame.AddrPC.Offset, // Address
- &symbol_displacement, // Displacement
- *symbol); // Symbol
- if (ok) {
- // Try to locate more source information for the symbol.
- IMAGEHLP_LINE64 Line;
- memset(&Line, 0, sizeof(Line));
- Line.SizeOfStruct = sizeof(Line);
- DWORD line_displacement;
- ok = _SymGetLineFromAddr64(
- process_handle, // hProcess
- stack_frame.AddrPC.Offset, // dwAddr
- &line_displacement, // pdwDisplacement
- &Line); // Line
- // Format a text representation of the frame based on the information
- // available.
- if (ok) {
- SNPrintF(MutableCStrVector(frames[frames_count].text,
- kStackWalkMaxTextLen),
- "%s %s:%d:%d",
- (*symbol)->Name, Line.FileName, Line.LineNumber,
- line_displacement);
- } else {
- SNPrintF(MutableCStrVector(frames[frames_count].text,
- kStackWalkMaxTextLen),
- "%s",
- (*symbol)->Name);
- }
- // Make sure line termination is in place.
- frames[frames_count].text[kStackWalkMaxTextLen - 1] = '\0';
- } else {
- // No text representation of this frame
- frames[frames_count].text[0] = '\0';
-
- // Continue if we are just missing a module (for non C/C++ frames a
- // module will never be found).
- int err = GetLastError();
- if (err != ERROR_MOD_NOT_FOUND) {
- break;
- }
- }
-
- frames_count++;
- }
-
- // Return the number of frames filled in.
- return frames_count;
-}
-
-// Restore warnings to previous settings.
-#pragma warning(pop)
-
-#else // __MINGW32__
-void OS::LogSharedLibraryAddresses() { }
-void OS::SignalCodeMovingGC() { }
-int OS::StackWalk(Vector<OS::StackFrame> frames) { return 0; }
-#endif // __MINGW32__
-
-
-uint64_t OS::CpuFeaturesImpliedByPlatform() {
- return 0; // Windows runs on anything.
-}
-
-
-double OS::nan_value() {
-#ifdef _MSC_VER
- // Positive Quiet NaN with no payload (aka. Indeterminate) has all bits
- // in mask set, so value equals mask.
- static const __int64 nanval = kQuietNaNMask;
- return *reinterpret_cast<const double*>(&nanval);
-#else // _MSC_VER
- return NAN;
-#endif // _MSC_VER
-}
-
-
-int OS::ActivationFrameAlignment() {
-#ifdef _WIN64
- return 16; // Windows 64-bit ABI requires the stack to be 16-byte aligned.
-#else
- return 8; // Floating-point math runs faster with 8-byte alignment.
-#endif
-}
-
-
-void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
- MemoryBarrier();
- *ptr = value;
-}
-
-
-bool VirtualMemory::IsReserved() {
- return address_ != NULL;
-}
-
-
-VirtualMemory::VirtualMemory(size_t size) {
- address_ = VirtualAlloc(NULL, size, MEM_RESERVE, PAGE_NOACCESS);
- size_ = size;
-}
-
-
-VirtualMemory::~VirtualMemory() {
- if (IsReserved()) {
- if (0 == VirtualFree(address(), 0, MEM_RELEASE)) address_ = NULL;
- }
-}
-
-
-bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
- int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
- if (NULL == VirtualAlloc(address, size, MEM_COMMIT, prot)) {
- return false;
- }
-
- UpdateAllocatedSpaceLimits(address, static_cast<int>(size));
- return true;
-}
-
-
-bool VirtualMemory::Uncommit(void* address, size_t size) {
- ASSERT(IsReserved());
- return VirtualFree(address, size, MEM_DECOMMIT) != false;
-}
-
-
-// ----------------------------------------------------------------------------
-// Win32 thread support.
-
-// Definition of invalid thread handle and id.
-static const HANDLE kNoThread = INVALID_HANDLE_VALUE;
-static const DWORD kNoThreadId = 0;
-
-
-class ThreadHandle::PlatformData : public Malloced {
- public:
- explicit PlatformData(ThreadHandle::Kind kind) {
- Initialize(kind);
- }
-
- void Initialize(ThreadHandle::Kind kind) {
- switch (kind) {
- case ThreadHandle::SELF: tid_ = GetCurrentThreadId(); break;
- case ThreadHandle::INVALID: tid_ = kNoThreadId; break;
- }
- }
- DWORD tid_; // Win32 thread identifier.
-};
-
-
-// Entry point for threads. The supplied argument is a pointer to the thread
-// object. The entry function dispatches to the run method in the thread
-// object. It is important that this function has __stdcall calling
-// convention.
-static unsigned int __stdcall ThreadEntry(void* arg) {
- Thread* thread = reinterpret_cast<Thread*>(arg);
- // This is also initialized by the last parameter to _beginthreadex() but we
- // don't know which thread will run first (the original thread or the new
- // one) so we initialize it here too.
- thread->thread_handle_data()->tid_ = GetCurrentThreadId();
- Thread::SetThreadLocal(Isolate::isolate_key(), thread->isolate());
- thread->Run();
- return 0;
-}
-
-
-// Initialize thread handle to invalid handle.
-ThreadHandle::ThreadHandle(ThreadHandle::Kind kind) {
- data_ = new PlatformData(kind);
-}
-
-
-ThreadHandle::~ThreadHandle() {
- delete data_;
-}
-
-
-// The thread is running if it has the same id as the current thread.
-bool ThreadHandle::IsSelf() const {
- return GetCurrentThreadId() == data_->tid_;
-}
-
-
-// Test for invalid thread handle.
-bool ThreadHandle::IsValid() const {
- return data_->tid_ != kNoThreadId;
-}
-
-
-void ThreadHandle::Initialize(ThreadHandle::Kind kind) {
- data_->Initialize(kind);
-}
-
-
-class Thread::PlatformData : public Malloced {
- public:
- explicit PlatformData(HANDLE thread) : thread_(thread) {}
- HANDLE thread_;
-};
-
-
-// Initialize a Win32 thread object. The thread has an invalid thread
-// handle until it is started.
-
-Thread::Thread(Isolate* isolate, const Options& options)
- : ThreadHandle(ThreadHandle::INVALID),
- isolate_(isolate),
- stack_size_(options.stack_size) {
- data_ = new PlatformData(kNoThread);
- set_name(options.name);
-}
-
-
-Thread::Thread(Isolate* isolate, const char* name)
- : ThreadHandle(ThreadHandle::INVALID),
- isolate_(isolate),
- stack_size_(0) {
- data_ = new PlatformData(kNoThread);
- set_name(name);
-}
-
-
-void Thread::set_name(const char* name) {
- OS::StrNCpy(Vector<char>(name_, sizeof(name_)), name, strlen(name));
- name_[sizeof(name_) - 1] = '\0';
-}
-
-
-// Close our own handle for the thread.
-Thread::~Thread() {
- if (data_->thread_ != kNoThread) CloseHandle(data_->thread_);
- delete data_;
-}
-
-
-// Create a new thread. It is important to use _beginthreadex() instead of
-// the Win32 function CreateThread(), because the CreateThread() does not
-// initialize thread specific structures in the C runtime library.
-void Thread::Start() {
- data_->thread_ = reinterpret_cast<HANDLE>(
- _beginthreadex(NULL,
- static_cast<unsigned>(stack_size_),
- ThreadEntry,
- this,
- 0,
- reinterpret_cast<unsigned int*>(
- &thread_handle_data()->tid_)));
- ASSERT(IsValid());
-}
-
-
-// Wait for thread to terminate.
-void Thread::Join() {
- WaitForSingleObject(data_->thread_, INFINITE);
-}
-
-
-Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
- DWORD result = TlsAlloc();
- ASSERT(result != TLS_OUT_OF_INDEXES);
- return static_cast<LocalStorageKey>(result);
-}
-
-
-void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
- BOOL result = TlsFree(static_cast<DWORD>(key));
- USE(result);
- ASSERT(result);
-}
-
-
-void* Thread::GetThreadLocal(LocalStorageKey key) {
- return TlsGetValue(static_cast<DWORD>(key));
-}
-
-
-void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
- BOOL result = TlsSetValue(static_cast<DWORD>(key), value);
- USE(result);
- ASSERT(result);
-}
-
-
-
-void Thread::YieldCPU() {
- Sleep(0);
-}
-
-
-// ----------------------------------------------------------------------------
-// Win32 mutex support.
-//
-// On Win32 mutexes are implemented using CRITICAL_SECTION objects. These are
-// faster than Win32 Mutex objects because they are implemented using user mode
-// atomic instructions. Therefore we only do ring transitions if there is lock
-// contention.
-
-class Win32Mutex : public Mutex {
- public:
-
- Win32Mutex() { InitializeCriticalSection(&cs_); }
-
- virtual ~Win32Mutex() { DeleteCriticalSection(&cs_); }
-
- virtual int Lock() {
- EnterCriticalSection(&cs_);
- return 0;
- }
-
- virtual int Unlock() {
- LeaveCriticalSection(&cs_);
- return 0;
- }
-
-
- virtual bool TryLock() {
- // Returns non-zero if critical section is entered successfully entered.
- return TryEnterCriticalSection(&cs_);
- }
-
- private:
- CRITICAL_SECTION cs_; // Critical section used for mutex
-};
-
-
-Mutex* OS::CreateMutex() {
- return new Win32Mutex();
-}
-
-
-// ----------------------------------------------------------------------------
-// Win32 semaphore support.
-//
-// On Win32 semaphores are implemented using Win32 Semaphore objects. The
-// semaphores are anonymous. Also, the semaphores are initialized to have
-// no upper limit on count.
-
-
-class Win32Semaphore : public Semaphore {
- public:
- explicit Win32Semaphore(int count) {
- sem = ::CreateSemaphoreA(NULL, count, 0x7fffffff, NULL);
- }
-
- ~Win32Semaphore() {
- CloseHandle(sem);
- }
-
- void Wait() {
- WaitForSingleObject(sem, INFINITE);
- }
-
- bool Wait(int timeout) {
- // Timeout in Windows API is in milliseconds.
- DWORD millis_timeout = timeout / 1000;
- return WaitForSingleObject(sem, millis_timeout) != WAIT_TIMEOUT;
- }
-
- void Signal() {
- LONG dummy;
- ReleaseSemaphore(sem, 1, &dummy);
- }
-
- private:
- HANDLE sem;
-};
-
-
-Semaphore* OS::CreateSemaphore(int count) {
- return new Win32Semaphore(count);
-}
-
-
-// ----------------------------------------------------------------------------
-// Win32 socket support.
-//
-
-class Win32Socket : public Socket {
- public:
- explicit Win32Socket() {
- // Create the socket.
- socket_ = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
- }
- explicit Win32Socket(SOCKET socket): socket_(socket) { }
- virtual ~Win32Socket() { Shutdown(); }
-
- // Server initialization.
- bool Bind(const int port);
- bool Listen(int backlog) const;
- Socket* Accept() const;
-
- // Client initialization.
- bool Connect(const char* host, const char* port);
-
- // Shutdown socket for both read and write.
- bool Shutdown();
-
- // Data Transimission
- int Send(const char* data, int len) const;
- int Receive(char* data, int len) const;
-
- bool SetReuseAddress(bool reuse_address);
-
- bool IsValid() const { return socket_ != INVALID_SOCKET; }
-
- private:
- SOCKET socket_;
-};
-
-
-bool Win32Socket::Bind(const int port) {
- if (!IsValid()) {
- return false;
- }
-
- sockaddr_in addr;
- memset(&addr, 0, sizeof(addr));
- addr.sin_family = AF_INET;
- addr.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
- addr.sin_port = htons(port);
- int status = bind(socket_,
- reinterpret_cast<struct sockaddr *>(&addr),
- sizeof(addr));
- return status == 0;
-}
-
-
-bool Win32Socket::Listen(int backlog) const {
- if (!IsValid()) {
- return false;
- }
-
- int status = listen(socket_, backlog);
- return status == 0;
-}
-
-
-Socket* Win32Socket::Accept() const {
- if (!IsValid()) {
- return NULL;
- }
-
- SOCKET socket = accept(socket_, NULL, NULL);
- if (socket == INVALID_SOCKET) {
- return NULL;
- } else {
- return new Win32Socket(socket);
- }
-}
-
-
-bool Win32Socket::Connect(const char* host, const char* port) {
- if (!IsValid()) {
- return false;
- }
-
- // Lookup host and port.
- struct addrinfo *result = NULL;
- struct addrinfo hints;
- memset(&hints, 0, sizeof(addrinfo));
- hints.ai_family = AF_INET;
- hints.ai_socktype = SOCK_STREAM;
- hints.ai_protocol = IPPROTO_TCP;
- int status = getaddrinfo(host, port, &hints, &result);
- if (status != 0) {
- return false;
- }
-
- // Connect.
- status = connect(socket_,
- result->ai_addr,
- static_cast<int>(result->ai_addrlen));
- freeaddrinfo(result);
- return status == 0;
-}
-
-
-bool Win32Socket::Shutdown() {
- if (IsValid()) {
- // Shutdown socket for both read and write.
- int status = shutdown(socket_, SD_BOTH);
- closesocket(socket_);
- socket_ = INVALID_SOCKET;
- return status == SOCKET_ERROR;
- }
- return true;
-}
-
-
-int Win32Socket::Send(const char* data, int len) const {
- int status = send(socket_, data, len, 0);
- return status;
-}
-
-
-int Win32Socket::Receive(char* data, int len) const {
- int status = recv(socket_, data, len, 0);
- return status;
-}
-
-
-bool Win32Socket::SetReuseAddress(bool reuse_address) {
- BOOL on = reuse_address ? true : false;
- int status = setsockopt(socket_, SOL_SOCKET, SO_REUSEADDR,
- reinterpret_cast<char*>(&on), sizeof(on));
- return status == SOCKET_ERROR;
-}
-
-
-bool Socket::Setup() {
- // Initialize Winsock32
- int err;
- WSADATA winsock_data;
- WORD version_requested = MAKEWORD(1, 0);
- err = WSAStartup(version_requested, &winsock_data);
- if (err != 0) {
- PrintF("Unable to initialize Winsock, err = %d\n", Socket::LastError());
- }
-
- return err == 0;
-}
-
-
-int Socket::LastError() {
- return WSAGetLastError();
-}
-
-
-uint16_t Socket::HToN(uint16_t value) {
- return htons(value);
-}
-
-
-uint16_t Socket::NToH(uint16_t value) {
- return ntohs(value);
-}
-
-
-uint32_t Socket::HToN(uint32_t value) {
- return htonl(value);
-}
-
-
-uint32_t Socket::NToH(uint32_t value) {
- return ntohl(value);
-}
-
-
-Socket* OS::CreateSocket() {
- return new Win32Socket();
-}
-
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
-
-// ----------------------------------------------------------------------------
-// Win32 profiler support.
-
-class Sampler::PlatformData : public Malloced {
- public:
- // Get a handle to the calling thread. This is the thread that we are
- // going to profile. We need to make a copy of the handle because we are
- // going to use it in the sampler thread. Using GetThreadHandle() will
- // not work in this case. We're using OpenThread because DuplicateHandle
- // for some reason doesn't work in Chrome's sandbox.
- PlatformData() : profiled_thread_(OpenThread(THREAD_GET_CONTEXT |
- THREAD_SUSPEND_RESUME |
- THREAD_QUERY_INFORMATION,
- false,
- GetCurrentThreadId())) {}
-
- ~PlatformData() {
- if (profiled_thread_ != NULL) {
- CloseHandle(profiled_thread_);
- profiled_thread_ = NULL;
- }
- }
-
- HANDLE profiled_thread() { return profiled_thread_; }
-
- private:
- HANDLE profiled_thread_;
-};
-
-
-class SamplerThread : public Thread {
- public:
- explicit SamplerThread(int interval)
- : Thread(NULL, "SamplerThread"),
- interval_(interval) {}
-
- static void AddActiveSampler(Sampler* sampler) {
- ScopedLock lock(mutex_);
- SamplerRegistry::AddActiveSampler(sampler);
- if (instance_ == NULL) {
- instance_ = new SamplerThread(sampler->interval());
- instance_->Start();
- } else {
- ASSERT(instance_->interval_ == sampler->interval());
- }
- }
-
- static void RemoveActiveSampler(Sampler* sampler) {
- ScopedLock lock(mutex_);
- SamplerRegistry::RemoveActiveSampler(sampler);
- if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
- RuntimeProfiler::WakeUpRuntimeProfilerThreadBeforeShutdown();
- instance_->Join();
- delete instance_;
- instance_ = NULL;
- }
- }
-
- // Implement Thread::Run().
- virtual void Run() {
- SamplerRegistry::State state;
- while ((state = SamplerRegistry::GetState()) !=
- SamplerRegistry::HAS_NO_SAMPLERS) {
- bool cpu_profiling_enabled =
- (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS);
- bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled();
- // When CPU profiling is enabled both JavaScript and C++ code is
- // profiled. We must not suspend.
- if (!cpu_profiling_enabled) {
- if (rate_limiter_.SuspendIfNecessary()) continue;
- }
- if (cpu_profiling_enabled) {
- if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this)) {
- return;
- }
- }
- if (runtime_profiler_enabled) {
- if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile, NULL)) {
- return;
- }
- }
- OS::Sleep(interval_);
- }
- }
-
- static void DoCpuProfile(Sampler* sampler, void* raw_sampler_thread) {
- if (!sampler->isolate()->IsInitialized()) return;
- if (!sampler->IsProfiling()) return;
- SamplerThread* sampler_thread =
- reinterpret_cast<SamplerThread*>(raw_sampler_thread);
- sampler_thread->SampleContext(sampler);
- }
-
- static void DoRuntimeProfile(Sampler* sampler, void* ignored) {
- if (!sampler->isolate()->IsInitialized()) return;
- sampler->isolate()->runtime_profiler()->NotifyTick();
- }
-
- void SampleContext(Sampler* sampler) {
- HANDLE profiled_thread = sampler->platform_data()->profiled_thread();
- if (profiled_thread == NULL) return;
-
- // Context used for sampling the register state of the profiled thread.
- CONTEXT context;
- memset(&context, 0, sizeof(context));
-
- TickSample sample_obj;
- TickSample* sample = CpuProfiler::TickSampleEvent(sampler->isolate());
- if (sample == NULL) sample = &sample_obj;
-
- static const DWORD kSuspendFailed = static_cast<DWORD>(-1);
- if (SuspendThread(profiled_thread) == kSuspendFailed) return;
- sample->state = sampler->isolate()->current_vm_state();
-
- context.ContextFlags = CONTEXT_FULL;
- if (GetThreadContext(profiled_thread, &context) != 0) {
-#if V8_HOST_ARCH_X64
- sample->pc = reinterpret_cast<Address>(context.Rip);
- sample->sp = reinterpret_cast<Address>(context.Rsp);
- sample->fp = reinterpret_cast<Address>(context.Rbp);
-#else
- sample->pc = reinterpret_cast<Address>(context.Eip);
- sample->sp = reinterpret_cast<Address>(context.Esp);
- sample->fp = reinterpret_cast<Address>(context.Ebp);
-#endif
- sampler->SampleStack(sample);
- sampler->Tick(sample);
- }
- ResumeThread(profiled_thread);
- }
-
- const int interval_;
- RuntimeProfilerRateLimiter rate_limiter_;
-
- // Protects the process wide state below.
- static Mutex* mutex_;
- static SamplerThread* instance_;
-
- DISALLOW_COPY_AND_ASSIGN(SamplerThread);
-};
-
-
-Mutex* SamplerThread::mutex_ = OS::CreateMutex();
-SamplerThread* SamplerThread::instance_ = NULL;
-
-
-Sampler::Sampler(Isolate* isolate, int interval)
- : isolate_(isolate),
- interval_(interval),
- profiling_(false),
- active_(false),
- samples_taken_(0) {
- data_ = new PlatformData;
-}
-
-
-Sampler::~Sampler() {
- ASSERT(!IsActive());
- delete data_;
-}
-
-
-void Sampler::Start() {
- ASSERT(!IsActive());
- SetActive(true);
- SamplerThread::AddActiveSampler(this);
-}
-
-
-void Sampler::Stop() {
- ASSERT(IsActive());
- SamplerThread::RemoveActiveSampler(this);
- SetActive(false);
-}
-
-#endif // ENABLE_LOGGING_AND_PROFILING
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/platform.h b/src/3rdparty/v8/src/platform.h
deleted file mode 100644
index fea16c8..0000000
--- a/src/3rdparty/v8/src/platform.h
+++ /dev/null
@@ -1,693 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// This module contains the platform-specific code. This make the rest of the
-// code less dependent on operating system, compilers and runtime libraries.
-// This module does specifically not deal with differences between different
-// processor architecture.
-// The platform classes have the same definition for all platforms. The
-// implementation for a particular platform is put in platform_<os>.cc.
-// The build system then uses the implementation for the target platform.
-//
-// This design has been chosen because it is simple and fast. Alternatively,
-// the platform dependent classes could have been implemented using abstract
-// superclasses with virtual methods and having specializations for each
-// platform. This design was rejected because it was more complicated and
-// slower. It would require factory methods for selecting the right
-// implementation and the overhead of virtual methods for performance
-// sensitive like mutex locking/unlocking.
-
-#ifndef V8_PLATFORM_H_
-#define V8_PLATFORM_H_
-
-#define V8_INFINITY INFINITY
-
-// Windows specific stuff.
-#ifdef WIN32
-
-// Microsoft Visual C++ specific stuff.
-#ifdef _MSC_VER
-
-enum {
- FP_NAN,
- FP_INFINITE,
- FP_ZERO,
- FP_SUBNORMAL,
- FP_NORMAL
-};
-
-#undef V8_INFINITY
-#define V8_INFINITY HUGE_VAL
-
-namespace v8 {
-namespace internal {
-int isfinite(double x);
-} }
-int isnan(double x);
-int isinf(double x);
-int isless(double x, double y);
-int isgreater(double x, double y);
-int fpclassify(double x);
-int signbit(double x);
-
-int strncasecmp(const char* s1, const char* s2, int n);
-
-#endif // _MSC_VER
-
-// Random is missing on both Visual Studio and MinGW.
-int random();
-
-#endif // WIN32
-
-
-#ifdef __sun
-# ifndef signbit
-int signbit(double x);
-# endif
-#endif
-
-
-// GCC specific stuff
-#ifdef __GNUC__
-
-// Needed for va_list on at least MinGW and Android.
-#include <stdarg.h>
-
-#define __GNUC_VERSION__ (__GNUC__ * 10000 + __GNUC_MINOR__ * 100)
-
-// Unfortunately, the INFINITY macro cannot be used with the '-pedantic'
-// warning flag and certain versions of GCC due to a bug:
-// http://gcc.gnu.org/bugzilla/show_bug.cgi?id=11931
-// For now, we use the more involved template-based version from <limits>, but
-// only when compiling with GCC versions affected by the bug (2.96.x - 4.0.x)
-// __GNUC_PREREQ is not defined in GCC for Mac OS X, so we define our own macro
-#if __GNUC_VERSION__ >= 29600 && __GNUC_VERSION__ < 40100
-#include <limits>
-#undef V8_INFINITY
-#define V8_INFINITY std::numeric_limits<double>::infinity()
-#endif
-
-#endif // __GNUC__
-
-#include "atomicops.h"
-#include "platform-tls.h"
-#include "utils.h"
-#include "v8globals.h"
-
-namespace v8 {
-namespace internal {
-
-// Use AtomicWord for a machine-sized pointer. It is assumed that
-// reads and writes of naturally aligned values of this type are atomic.
-typedef intptr_t AtomicWord;
-
-class Semaphore;
-class Mutex;
-
-double ceiling(double x);
-double modulo(double x, double y);
-
-// Forward declarations.
-class Socket;
-
-// ----------------------------------------------------------------------------
-// OS
-//
-// This class has static methods for the different platform specific
-// functions. Add methods here to cope with differences between the
-// supported platforms.
-
-class OS {
- public:
- // Initializes the platform OS support. Called once at VM startup.
- static void Setup();
-
- // Returns the accumulated user time for thread. This routine
- // can be used for profiling. The implementation should
- // strive for high-precision timer resolution, preferable
- // micro-second resolution.
- static int GetUserTime(uint32_t* secs, uint32_t* usecs);
-
- // Get a tick counter normalized to one tick per microsecond.
- // Used for calculating time intervals.
- static int64_t Ticks();
-
- // Returns current time as the number of milliseconds since
- // 00:00:00 UTC, January 1, 1970.
- static double TimeCurrentMillis();
-
- // Returns a string identifying the current time zone. The
- // timestamp is used for determining if DST is in effect.
- static const char* LocalTimezone(double time);
-
- // Returns the local time offset in milliseconds east of UTC without
- // taking daylight savings time into account.
- static double LocalTimeOffset();
-
- // Returns the daylight savings offset for the given time.
- static double DaylightSavingsOffset(double time);
-
- // Returns last OS error.
- static int GetLastError();
-
- static FILE* FOpen(const char* path, const char* mode);
- static bool Remove(const char* path);
-
- // Log file open mode is platform-dependent due to line ends issues.
- static const char* const LogFileOpenMode;
-
- // Print output to console. This is mostly used for debugging output.
- // On platforms that has standard terminal output, the output
- // should go to stdout.
- static void Print(const char* format, ...);
- static void VPrint(const char* format, va_list args);
-
- // Print output to a file. This is mostly used for debugging output.
- static void FPrint(FILE* out, const char* format, ...);
- static void VFPrint(FILE* out, const char* format, va_list args);
-
- // Print error output to console. This is mostly used for error message
- // output. On platforms that has standard terminal output, the output
- // should go to stderr.
- static void PrintError(const char* format, ...);
- static void VPrintError(const char* format, va_list args);
-
- // Allocate/Free memory used by JS heap. Pages are readable/writable, but
- // they are not guaranteed to be executable unless 'executable' is true.
- // Returns the address of allocated memory, or NULL if failed.
- static void* Allocate(const size_t requested,
- size_t* allocated,
- bool is_executable);
- static void Free(void* address, const size_t size);
- // Get the Alignment guaranteed by Allocate().
- static size_t AllocateAlignment();
-
-#ifdef ENABLE_HEAP_PROTECTION
- // Protect/unprotect a block of memory by marking it read-only/writable.
- static void Protect(void* address, size_t size);
- static void Unprotect(void* address, size_t size, bool is_executable);
-#endif
-
- // Returns an indication of whether a pointer is in a space that
- // has been allocated by Allocate(). This method may conservatively
- // always return false, but giving more accurate information may
- // improve the robustness of the stack dump code in the presence of
- // heap corruption.
- static bool IsOutsideAllocatedSpace(void* pointer);
-
- // Sleep for a number of milliseconds.
- static void Sleep(const int milliseconds);
-
- // Abort the current process.
- static void Abort();
-
- // Debug break.
- static void DebugBreak();
-
- // Walk the stack.
- static const int kStackWalkError = -1;
- static const int kStackWalkMaxNameLen = 256;
- static const int kStackWalkMaxTextLen = 256;
- struct StackFrame {
- void* address;
- char text[kStackWalkMaxTextLen];
- };
-
- static int StackWalk(Vector<StackFrame> frames);
-
- // Factory method for creating platform dependent Mutex.
- // Please use delete to reclaim the storage for the returned Mutex.
- static Mutex* CreateMutex();
-
- // Factory method for creating platform dependent Semaphore.
- // Please use delete to reclaim the storage for the returned Semaphore.
- static Semaphore* CreateSemaphore(int count);
-
- // Factory method for creating platform dependent Socket.
- // Please use delete to reclaim the storage for the returned Socket.
- static Socket* CreateSocket();
-
- class MemoryMappedFile {
- public:
- static MemoryMappedFile* open(const char* name);
- static MemoryMappedFile* create(const char* name, int size, void* initial);
- virtual ~MemoryMappedFile() { }
- virtual void* memory() = 0;
- virtual int size() = 0;
- };
-
- // Safe formatting print. Ensures that str is always null-terminated.
- // Returns the number of chars written, or -1 if output was truncated.
- static int SNPrintF(Vector<char> str, const char* format, ...);
- static int VSNPrintF(Vector<char> str,
- const char* format,
- va_list args);
-
- static char* StrChr(char* str, int c);
- static void StrNCpy(Vector<char> dest, const char* src, size_t n);
-
- // Support for the profiler. Can do nothing, in which case ticks
- // occuring in shared libraries will not be properly accounted for.
- static void LogSharedLibraryAddresses();
-
- // Support for the profiler. Notifies the external profiling
- // process that a code moving garbage collection starts. Can do
- // nothing, in which case the code objects must not move (e.g., by
- // using --never-compact) if accurate profiling is desired.
- static void SignalCodeMovingGC();
-
- // The return value indicates the CPU features we are sure of because of the
- // OS. For example MacOSX doesn't run on any x86 CPUs that don't have SSE2
- // instructions.
- // This is a little messy because the interpretation is subject to the cross
- // of the CPU and the OS. The bits in the answer correspond to the bit
- // positions indicated by the members of the CpuFeature enum from globals.h
- static uint64_t CpuFeaturesImpliedByPlatform();
-
- // Returns the double constant NAN
- static double nan_value();
-
- // Support runtime detection of VFP3 on ARM CPUs.
- static bool ArmCpuHasFeature(CpuFeature feature);
-
- // Support runtime detection of FPU on MIPS CPUs.
- static bool MipsCpuHasFeature(CpuFeature feature);
-
- // Returns the activation frame alignment constraint or zero if
- // the platform doesn't care. Guaranteed to be a power of two.
- static int ActivationFrameAlignment();
-
- static void ReleaseStore(volatile AtomicWord* ptr, AtomicWord value);
-
-#if defined(V8_TARGET_ARCH_IA32)
- // Copy memory area to disjoint memory area.
- static void MemCopy(void* dest, const void* src, size_t size);
- // Limit below which the extra overhead of the MemCopy function is likely
- // to outweigh the benefits of faster copying.
- static const int kMinComplexMemCopy = 64;
- typedef void (*MemCopyFunction)(void* dest, const void* src, size_t size);
-
-#else // V8_TARGET_ARCH_IA32
- static void MemCopy(void* dest, const void* src, size_t size) {
- memcpy(dest, src, size);
- }
- static const int kMinComplexMemCopy = 256;
-#endif // V8_TARGET_ARCH_IA32
-
- private:
- static const int msPerSecond = 1000;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(OS);
-};
-
-
-class VirtualMemory {
- public:
- // Reserves virtual memory with size.
- explicit VirtualMemory(size_t size);
- ~VirtualMemory();
-
- // Returns whether the memory has been reserved.
- bool IsReserved();
-
- // Returns the start address of the reserved memory.
- void* address() {
- ASSERT(IsReserved());
- return address_;
- }
-
- // Returns the size of the reserved memory.
- size_t size() { return size_; }
-
- // Commits real memory. Returns whether the operation succeeded.
- bool Commit(void* address, size_t size, bool is_executable);
-
- // Uncommit real memory. Returns whether the operation succeeded.
- bool Uncommit(void* address, size_t size);
-
- private:
- void* address_; // Start address of the virtual memory.
- size_t size_; // Size of the virtual memory.
-};
-
-
-// ----------------------------------------------------------------------------
-// ThreadHandle
-//
-// A ThreadHandle represents a thread identifier for a thread. The ThreadHandle
-// does not own the underlying os handle. Thread handles can be used for
-// refering to threads and testing equality.
-
-class ThreadHandle {
- public:
- enum Kind { SELF, INVALID };
- explicit ThreadHandle(Kind kind);
-
- // Destructor.
- ~ThreadHandle();
-
- // Test for thread running.
- bool IsSelf() const;
-
- // Test for valid thread handle.
- bool IsValid() const;
-
- // Get platform-specific data.
- class PlatformData;
- PlatformData* thread_handle_data() { return data_; }
-
- // Initialize the handle to kind
- void Initialize(Kind kind);
-
- private:
- PlatformData* data_; // Captures platform dependent data.
-};
-
-
-// ----------------------------------------------------------------------------
-// Thread
-//
-// Thread objects are used for creating and running threads. When the start()
-// method is called the new thread starts running the run() method in the new
-// thread. The Thread object should not be deallocated before the thread has
-// terminated.
-
-class Thread: public ThreadHandle {
- public:
- // Opaque data type for thread-local storage keys.
- // LOCAL_STORAGE_KEY_MIN_VALUE and LOCAL_STORAGE_KEY_MAX_VALUE are specified
- // to ensure that enumeration type has correct value range (see Issue 830 for
- // more details).
- enum LocalStorageKey {
- LOCAL_STORAGE_KEY_MIN_VALUE = kMinInt,
- LOCAL_STORAGE_KEY_MAX_VALUE = kMaxInt
- };
-
- struct Options {
- Options() : name("v8:<unknown>"), stack_size(0) {}
-
- const char* name;
- int stack_size;
- };
-
- // Create new thread (with a value for storing in the TLS isolate field).
- Thread(Isolate* isolate, const Options& options);
- Thread(Isolate* isolate, const char* name);
- virtual ~Thread();
-
- // Start new thread by calling the Run() method in the new thread.
- void Start();
-
- // Wait until thread terminates.
- void Join();
-
- inline const char* name() const {
- return name_;
- }
-
- // Abstract method for run handler.
- virtual void Run() = 0;
-
- // Thread-local storage.
- static LocalStorageKey CreateThreadLocalKey();
- static void DeleteThreadLocalKey(LocalStorageKey key);
- static void* GetThreadLocal(LocalStorageKey key);
- static int GetThreadLocalInt(LocalStorageKey key) {
- return static_cast<int>(reinterpret_cast<intptr_t>(GetThreadLocal(key)));
- }
- static void SetThreadLocal(LocalStorageKey key, void* value);
- static void SetThreadLocalInt(LocalStorageKey key, int value) {
- SetThreadLocal(key, reinterpret_cast<void*>(static_cast<intptr_t>(value)));
- }
- static bool HasThreadLocal(LocalStorageKey key) {
- return GetThreadLocal(key) != NULL;
- }
-
-#ifdef V8_FAST_TLS_SUPPORTED
- static inline void* GetExistingThreadLocal(LocalStorageKey key) {
- void* result = reinterpret_cast<void*>(
- InternalGetExistingThreadLocal(static_cast<intptr_t>(key)));
- ASSERT(result == GetThreadLocal(key));
- return result;
- }
-#else
- static inline void* GetExistingThreadLocal(LocalStorageKey key) {
- return GetThreadLocal(key);
- }
-#endif
-
- // A hint to the scheduler to let another thread run.
- static void YieldCPU();
-
- Isolate* isolate() const { return isolate_; }
-
- // The thread name length is limited to 16 based on Linux's implementation of
- // prctl().
- static const int kMaxThreadNameLength = 16;
- private:
- void set_name(const char *name);
-
- class PlatformData;
- PlatformData* data_;
- Isolate* isolate_;
- char name_[kMaxThreadNameLength];
- int stack_size_;
-
- DISALLOW_COPY_AND_ASSIGN(Thread);
-};
-
-
-// ----------------------------------------------------------------------------
-// Mutex
-//
-// Mutexes are used for serializing access to non-reentrant sections of code.
-// The implementations of mutex should allow for nested/recursive locking.
-
-class Mutex {
- public:
- virtual ~Mutex() {}
-
- // Locks the given mutex. If the mutex is currently unlocked, it becomes
- // locked and owned by the calling thread, and immediately. If the mutex
- // is already locked by another thread, suspends the calling thread until
- // the mutex is unlocked.
- virtual int Lock() = 0;
-
- // Unlocks the given mutex. The mutex is assumed to be locked and owned by
- // the calling thread on entrance.
- virtual int Unlock() = 0;
-
- // Tries to lock the given mutex. Returns whether the mutex was
- // successfully locked.
- virtual bool TryLock() = 0;
-};
-
-
-// ----------------------------------------------------------------------------
-// ScopedLock
-//
-// Stack-allocated ScopedLocks provide block-scoped locking and
-// unlocking of a mutex.
-class ScopedLock {
- public:
- explicit ScopedLock(Mutex* mutex): mutex_(mutex) {
- ASSERT(mutex_ != NULL);
- mutex_->Lock();
- }
- ~ScopedLock() {
- mutex_->Unlock();
- }
-
- private:
- Mutex* mutex_;
- DISALLOW_COPY_AND_ASSIGN(ScopedLock);
-};
-
-
-// ----------------------------------------------------------------------------
-// Semaphore
-//
-// A semaphore object is a synchronization object that maintains a count. The
-// count is decremented each time a thread completes a wait for the semaphore
-// object and incremented each time a thread signals the semaphore. When the
-// count reaches zero, threads waiting for the semaphore blocks until the
-// count becomes non-zero.
-
-class Semaphore {
- public:
- virtual ~Semaphore() {}
-
- // Suspends the calling thread until the semaphore counter is non zero
- // and then decrements the semaphore counter.
- virtual void Wait() = 0;
-
- // Suspends the calling thread until the counter is non zero or the timeout
- // time has passsed. If timeout happens the return value is false and the
- // counter is unchanged. Otherwise the semaphore counter is decremented and
- // true is returned. The timeout value is specified in microseconds.
- virtual bool Wait(int timeout) = 0;
-
- // Increments the semaphore counter.
- virtual void Signal() = 0;
-};
-
-
-// ----------------------------------------------------------------------------
-// Socket
-//
-
-class Socket {
- public:
- virtual ~Socket() {}
-
- // Server initialization.
- virtual bool Bind(const int port) = 0;
- virtual bool Listen(int backlog) const = 0;
- virtual Socket* Accept() const = 0;
-
- // Client initialization.
- virtual bool Connect(const char* host, const char* port) = 0;
-
- // Shutdown socket for both read and write. This causes blocking Send and
- // Receive calls to exit. After Shutdown the Socket object cannot be used for
- // any communication.
- virtual bool Shutdown() = 0;
-
- // Data Transimission
- virtual int Send(const char* data, int len) const = 0;
- virtual int Receive(char* data, int len) const = 0;
-
- // Set the value of the SO_REUSEADDR socket option.
- virtual bool SetReuseAddress(bool reuse_address) = 0;
-
- virtual bool IsValid() const = 0;
-
- static bool Setup();
- static int LastError();
- static uint16_t HToN(uint16_t value);
- static uint16_t NToH(uint16_t value);
- static uint32_t HToN(uint32_t value);
- static uint32_t NToH(uint32_t value);
-};
-
-
-// ----------------------------------------------------------------------------
-// Sampler
-//
-// A sampler periodically samples the state of the VM and optionally
-// (if used for profiling) the program counter and stack pointer for
-// the thread that created it.
-
-// TickSample captures the information collected for each sample.
-class TickSample {
- public:
- TickSample()
- : state(OTHER),
- pc(NULL),
- sp(NULL),
- fp(NULL),
- tos(NULL),
- frames_count(0),
- has_external_callback(false) {}
- StateTag state; // The state of the VM.
- Address pc; // Instruction pointer.
- Address sp; // Stack pointer.
- Address fp; // Frame pointer.
- union {
- Address tos; // Top stack value (*sp).
- Address external_callback;
- };
- static const int kMaxFramesCount = 64;
- Address stack[kMaxFramesCount]; // Call stack.
- int frames_count : 8; // Number of captured frames.
- bool has_external_callback : 1;
-};
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
-class Sampler {
- public:
- // Initialize sampler.
- Sampler(Isolate* isolate, int interval);
- virtual ~Sampler();
-
- int interval() const { return interval_; }
-
- // Performs stack sampling.
- void SampleStack(TickSample* sample) {
- DoSampleStack(sample);
- IncSamplesTaken();
- }
-
- // This method is called for each sampling period with the current
- // program counter.
- virtual void Tick(TickSample* sample) = 0;
-
- // Start and stop sampler.
- void Start();
- void Stop();
-
- // Is the sampler used for profiling?
- bool IsProfiling() const { return NoBarrier_Load(&profiling_) > 0; }
- void IncreaseProfilingDepth() { NoBarrier_AtomicIncrement(&profiling_, 1); }
- void DecreaseProfilingDepth() { NoBarrier_AtomicIncrement(&profiling_, -1); }
-
- // Whether the sampler is running (that is, consumes resources).
- bool IsActive() const { return NoBarrier_Load(&active_); }
-
- Isolate* isolate() { return isolate_; }
-
- // Used in tests to make sure that stack sampling is performed.
- int samples_taken() const { return samples_taken_; }
- void ResetSamplesTaken() { samples_taken_ = 0; }
-
- class PlatformData;
- PlatformData* data() { return data_; }
-
- PlatformData* platform_data() { return data_; }
-
- protected:
- virtual void DoSampleStack(TickSample* sample) = 0;
-
- private:
- void SetActive(bool value) { NoBarrier_Store(&active_, value); }
- void IncSamplesTaken() { if (++samples_taken_ < 0) samples_taken_ = 0; }
-
- Isolate* isolate_;
- const int interval_;
- Atomic32 profiling_;
- Atomic32 active_;
- PlatformData* data_; // Platform specific data.
- int samples_taken_; // Counts stack samples taken.
- DISALLOW_IMPLICIT_CONSTRUCTORS(Sampler);
-};
-
-
-#endif // ENABLE_LOGGING_AND_PROFILING
-
-} } // namespace v8::internal
-
-#endif // V8_PLATFORM_H_
diff --git a/src/3rdparty/v8/src/preparse-data.cc b/src/3rdparty/v8/src/preparse-data.cc
deleted file mode 100644
index 92a0338..0000000
--- a/src/3rdparty/v8/src/preparse-data.cc
+++ /dev/null
@@ -1,185 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "../include/v8stdint.h"
-#include "globals.h"
-#include "checks.h"
-#include "allocation.h"
-#include "utils.h"
-#include "list-inl.h"
-#include "hashmap.h"
-
-#include "preparse-data.h"
-
-
-namespace v8 {
-namespace internal {
-
-// ----------------------------------------------------------------------------
-// FunctionLoggingParserRecorder
-
-FunctionLoggingParserRecorder::FunctionLoggingParserRecorder()
- : function_store_(0),
- is_recording_(true),
- pause_count_(0) {
- preamble_[PreparseDataConstants::kMagicOffset] =
- PreparseDataConstants::kMagicNumber;
- preamble_[PreparseDataConstants::kVersionOffset] =
- PreparseDataConstants::kCurrentVersion;
- preamble_[PreparseDataConstants::kHasErrorOffset] = false;
- preamble_[PreparseDataConstants::kFunctionsSizeOffset] = 0;
- preamble_[PreparseDataConstants::kSymbolCountOffset] = 0;
- preamble_[PreparseDataConstants::kSizeOffset] = 0;
- ASSERT_EQ(6, PreparseDataConstants::kHeaderSize);
-#ifdef DEBUG
- prev_start_ = -1;
-#endif
-}
-
-
-void FunctionLoggingParserRecorder::LogMessage(int start_pos,
- int end_pos,
- const char* message,
- const char* arg_opt) {
- if (has_error()) return;
- preamble_[PreparseDataConstants::kHasErrorOffset] = true;
- function_store_.Reset();
- STATIC_ASSERT(PreparseDataConstants::kMessageStartPos == 0);
- function_store_.Add(start_pos);
- STATIC_ASSERT(PreparseDataConstants::kMessageEndPos == 1);
- function_store_.Add(end_pos);
- STATIC_ASSERT(PreparseDataConstants::kMessageArgCountPos == 2);
- function_store_.Add((arg_opt == NULL) ? 0 : 1);
- STATIC_ASSERT(PreparseDataConstants::kMessageTextPos == 3);
- WriteString(CStrVector(message));
- if (arg_opt) WriteString(CStrVector(arg_opt));
- is_recording_ = false;
-}
-
-
-void FunctionLoggingParserRecorder::WriteString(Vector<const char> str) {
- function_store_.Add(str.length());
- for (int i = 0; i < str.length(); i++) {
- function_store_.Add(str[i]);
- }
-}
-
-// ----------------------------------------------------------------------------
-// PartialParserRecorder - Record both function entries and symbols.
-
-Vector<unsigned> PartialParserRecorder::ExtractData() {
- int function_size = function_store_.size();
- int total_size = PreparseDataConstants::kHeaderSize + function_size;
- Vector<unsigned> data = Vector<unsigned>::New(total_size);
- preamble_[PreparseDataConstants::kFunctionsSizeOffset] = function_size;
- preamble_[PreparseDataConstants::kSymbolCountOffset] = 0;
- memcpy(data.start(), preamble_, sizeof(preamble_));
- int symbol_start = PreparseDataConstants::kHeaderSize + function_size;
- if (function_size > 0) {
- function_store_.WriteTo(data.SubVector(PreparseDataConstants::kHeaderSize,
- symbol_start));
- }
- return data;
-}
-
-
-// ----------------------------------------------------------------------------
-// CompleteParserRecorder - Record both function entries and symbols.
-
-CompleteParserRecorder::CompleteParserRecorder()
- : FunctionLoggingParserRecorder(),
- literal_chars_(0),
- symbol_store_(0),
- symbol_keys_(0),
- symbol_table_(vector_compare),
- symbol_id_(0) {
-}
-
-
-void CompleteParserRecorder::LogSymbol(int start,
- int hash,
- bool is_ascii,
- Vector<const byte> literal_bytes) {
- Key key = { is_ascii, literal_bytes };
- HashMap::Entry* entry = symbol_table_.Lookup(&key, hash, true);
- int id = static_cast<int>(reinterpret_cast<intptr_t>(entry->value));
- if (id == 0) {
- // Copy literal contents for later comparison.
- key.literal_bytes =
- Vector<const byte>::cast(literal_chars_.AddBlock(literal_bytes));
- // Put (symbol_id_ + 1) into entry and increment it.
- id = ++symbol_id_;
- entry->value = reinterpret_cast<void*>(id);
- Vector<Key> symbol = symbol_keys_.AddBlock(1, key);
- entry->key = &symbol[0];
- }
- WriteNumber(id - 1);
-}
-
-
-Vector<unsigned> CompleteParserRecorder::ExtractData() {
- int function_size = function_store_.size();
- // Add terminator to symbols, then pad to unsigned size.
- int symbol_size = symbol_store_.size();
- int padding = sizeof(unsigned) - (symbol_size % sizeof(unsigned));
- symbol_store_.AddBlock(padding, PreparseDataConstants::kNumberTerminator);
- symbol_size += padding;
- int total_size = PreparseDataConstants::kHeaderSize + function_size
- + (symbol_size / sizeof(unsigned));
- Vector<unsigned> data = Vector<unsigned>::New(total_size);
- preamble_[PreparseDataConstants::kFunctionsSizeOffset] = function_size;
- preamble_[PreparseDataConstants::kSymbolCountOffset] = symbol_id_;
- memcpy(data.start(), preamble_, sizeof(preamble_));
- int symbol_start = PreparseDataConstants::kHeaderSize + function_size;
- if (function_size > 0) {
- function_store_.WriteTo(data.SubVector(PreparseDataConstants::kHeaderSize,
- symbol_start));
- }
- if (!has_error()) {
- symbol_store_.WriteTo(
- Vector<byte>::cast(data.SubVector(symbol_start, total_size)));
- }
- return data;
-}
-
-
-void CompleteParserRecorder::WriteNumber(int number) {
- ASSERT(number >= 0);
-
- int mask = (1 << 28) - 1;
- for (int i = 28; i > 0; i -= 7) {
- if (number > mask) {
- symbol_store_.Add(static_cast<byte>(number >> i) | 0x80u);
- number &= mask;
- }
- mask >>= 7;
- }
- symbol_store_.Add(static_cast<byte>(number));
-}
-
-
-} } // namespace v8::internal.
diff --git a/src/3rdparty/v8/src/preparse-data.h b/src/3rdparty/v8/src/preparse-data.h
deleted file mode 100644
index bb5707b..0000000
--- a/src/3rdparty/v8/src/preparse-data.h
+++ /dev/null
@@ -1,249 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_PREPARSER_DATA_H_
-#define V8_PREPARSER_DATA_H_
-
-#include "hashmap.h"
-
-namespace v8 {
-namespace internal {
-
-// Generic and general data used by preparse data recorders and readers.
-
-class PreparseDataConstants : public AllStatic {
- public:
- // Layout and constants of the preparse data exchange format.
- static const unsigned kMagicNumber = 0xBadDead;
- static const unsigned kCurrentVersion = 6;
-
- static const int kMagicOffset = 0;
- static const int kVersionOffset = 1;
- static const int kHasErrorOffset = 2;
- static const int kFunctionsSizeOffset = 3;
- static const int kSymbolCountOffset = 4;
- static const int kSizeOffset = 5;
- static const int kHeaderSize = 6;
-
- // If encoding a message, the following positions are fixed.
- static const int kMessageStartPos = 0;
- static const int kMessageEndPos = 1;
- static const int kMessageArgCountPos = 2;
- static const int kMessageTextPos = 3;
-
- static const byte kNumberTerminator = 0x80u;
-};
-
-
-// ----------------------------------------------------------------------------
-// ParserRecorder - Logging of preparser data.
-
-// Abstract interface for preparse data recorder.
-class ParserRecorder {
- public:
- ParserRecorder() { }
- virtual ~ParserRecorder() { }
-
- // Logs the scope and some details of a function literal in the source.
- virtual void LogFunction(int start,
- int end,
- int literals,
- int properties) = 0;
-
- // Logs a symbol creation of a literal or identifier.
- virtual void LogAsciiSymbol(int start, Vector<const char> literal) { }
- virtual void LogUC16Symbol(int start, Vector<const uc16> literal) { }
-
- // Logs an error message and marks the log as containing an error.
- // Further logging will be ignored, and ExtractData will return a vector
- // representing the error only.
- virtual void LogMessage(int start,
- int end,
- const char* message,
- const char* argument_opt) = 0;
-
- virtual int function_position() = 0;
-
- virtual int symbol_position() = 0;
-
- virtual int symbol_ids() = 0;
-
- virtual Vector<unsigned> ExtractData() = 0;
-
- virtual void PauseRecording() = 0;
-
- virtual void ResumeRecording() = 0;
-};
-
-
-// ----------------------------------------------------------------------------
-// FunctionLoggingParserRecorder - Record only function entries
-
-class FunctionLoggingParserRecorder : public ParserRecorder {
- public:
- FunctionLoggingParserRecorder();
- virtual ~FunctionLoggingParserRecorder() {}
-
- virtual void LogFunction(int start, int end, int literals, int properties) {
- function_store_.Add(start);
- function_store_.Add(end);
- function_store_.Add(literals);
- function_store_.Add(properties);
- }
-
- // Logs an error message and marks the log as containing an error.
- // Further logging will be ignored, and ExtractData will return a vector
- // representing the error only.
- virtual void LogMessage(int start,
- int end,
- const char* message,
- const char* argument_opt);
-
- virtual int function_position() { return function_store_.size(); }
-
-
- virtual Vector<unsigned> ExtractData() = 0;
-
- virtual void PauseRecording() {
- pause_count_++;
- is_recording_ = false;
- }
-
- virtual void ResumeRecording() {
- ASSERT(pause_count_ > 0);
- if (--pause_count_ == 0) is_recording_ = !has_error();
- }
-
- protected:
- bool has_error() {
- return static_cast<bool>(preamble_[PreparseDataConstants::kHasErrorOffset]);
- }
-
- bool is_recording() {
- return is_recording_;
- }
-
- void WriteString(Vector<const char> str);
-
- Collector<unsigned> function_store_;
- unsigned preamble_[PreparseDataConstants::kHeaderSize];
- bool is_recording_;
- int pause_count_;
-
-#ifdef DEBUG
- int prev_start_;
-#endif
-};
-
-
-// ----------------------------------------------------------------------------
-// PartialParserRecorder - Record only function entries
-
-class PartialParserRecorder : public FunctionLoggingParserRecorder {
- public:
- PartialParserRecorder() : FunctionLoggingParserRecorder() { }
- virtual void LogAsciiSymbol(int start, Vector<const char> literal) { }
- virtual void LogUC16Symbol(int start, Vector<const uc16> literal) { }
- virtual ~PartialParserRecorder() { }
- virtual Vector<unsigned> ExtractData();
- virtual int symbol_position() { return 0; }
- virtual int symbol_ids() { return 0; }
-};
-
-
-// ----------------------------------------------------------------------------
-// CompleteParserRecorder - Record both function entries and symbols.
-
-class CompleteParserRecorder: public FunctionLoggingParserRecorder {
- public:
- CompleteParserRecorder();
- virtual ~CompleteParserRecorder() { }
-
- virtual void LogAsciiSymbol(int start, Vector<const char> literal) {
- if (!is_recording_) return;
- int hash = vector_hash(literal);
- LogSymbol(start, hash, true, Vector<const byte>::cast(literal));
- }
-
- virtual void LogUC16Symbol(int start, Vector<const uc16> literal) {
- if (!is_recording_) return;
- int hash = vector_hash(literal);
- LogSymbol(start, hash, false, Vector<const byte>::cast(literal));
- }
-
- virtual Vector<unsigned> ExtractData();
-
- virtual int symbol_position() { return symbol_store_.size(); }
- virtual int symbol_ids() { return symbol_id_; }
-
- private:
- struct Key {
- bool is_ascii;
- Vector<const byte> literal_bytes;
- };
-
- virtual void LogSymbol(int start,
- int hash,
- bool is_ascii,
- Vector<const byte> literal);
-
- template <typename Char>
- static int vector_hash(Vector<const Char> string) {
- int hash = 0;
- for (int i = 0; i < string.length(); i++) {
- int c = static_cast<int>(string[i]);
- hash += c;
- hash += (hash << 10);
- hash ^= (hash >> 6);
- }
- return hash;
- }
-
- static bool vector_compare(void* a, void* b) {
- Key* string1 = reinterpret_cast<Key*>(a);
- Key* string2 = reinterpret_cast<Key*>(b);
- if (string1->is_ascii != string2->is_ascii) return false;
- int length = string1->literal_bytes.length();
- if (string2->literal_bytes.length() != length) return false;
- return memcmp(string1->literal_bytes.start(),
- string2->literal_bytes.start(), length) == 0;
- }
-
- // Write a non-negative number to the symbol store.
- void WriteNumber(int number);
-
- Collector<byte> literal_chars_;
- Collector<byte> symbol_store_;
- Collector<Key> symbol_keys_;
- HashMap symbol_table_;
- int symbol_id_;
-};
-
-
-} } // namespace v8::internal.
-
-#endif // V8_PREPARSER_DATA_H_
diff --git a/src/3rdparty/v8/src/preparser-api.cc b/src/3rdparty/v8/src/preparser-api.cc
deleted file mode 100644
index 61e9e7e..0000000
--- a/src/3rdparty/v8/src/preparser-api.cc
+++ /dev/null
@@ -1,219 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "../include/v8-preparser.h"
-
-#include "globals.h"
-#include "checks.h"
-#include "allocation.h"
-#include "utils.h"
-#include "list.h"
-#include "scanner-base.h"
-#include "preparse-data.h"
-#include "preparser.h"
-
-namespace v8 {
-namespace internal {
-
-// UTF16Buffer based on a v8::UnicodeInputStream.
-class InputStreamUTF16Buffer : public UC16CharacterStream {
- public:
- /* The InputStreamUTF16Buffer maintains an internal buffer
- * that is filled in chunks from the UC16CharacterStream.
- * It also maintains unlimited pushback capability, but optimized
- * for small pushbacks.
- * The pushback_buffer_ pointer points to the limit of pushbacks
- * in the current buffer. There is room for a few pushback'ed chars before
- * the buffer containing the most recently read chunk. If this is overflowed,
- * an external buffer is allocated/reused to hold further pushbacks, and
- * pushback_buffer_ and buffer_cursor_/buffer_end_ now points to the
- * new buffer. When this buffer is read to the end again, the cursor is
- * switched back to the internal buffer
- */
- explicit InputStreamUTF16Buffer(v8::UnicodeInputStream* stream)
- : UC16CharacterStream(),
- stream_(stream),
- pushback_buffer_(buffer_),
- pushback_buffer_end_cache_(NULL),
- pushback_buffer_backing_(NULL),
- pushback_buffer_backing_size_(0) {
- buffer_cursor_ = buffer_end_ = buffer_ + kPushBackSize;
- }
-
- virtual ~InputStreamUTF16Buffer() {
- if (pushback_buffer_backing_ != NULL) {
- DeleteArray(pushback_buffer_backing_);
- }
- }
-
- virtual void PushBack(uc32 ch) {
- ASSERT(pos_ > 0);
- if (ch == kEndOfInput) {
- pos_--;
- return;
- }
- if (buffer_cursor_ <= pushback_buffer_) {
- // No more room in the current buffer to do pushbacks.
- if (pushback_buffer_end_cache_ == NULL) {
- // We have overflowed the pushback space at the beginning of buffer_.
- // Switch to using a separate allocated pushback buffer.
- if (pushback_buffer_backing_ == NULL) {
- // Allocate a buffer the first time we need it.
- pushback_buffer_backing_ = NewArray<uc16>(kPushBackSize);
- pushback_buffer_backing_size_ = kPushBackSize;
- }
- pushback_buffer_ = pushback_buffer_backing_;
- pushback_buffer_end_cache_ = buffer_end_;
- buffer_end_ = pushback_buffer_backing_ + pushback_buffer_backing_size_;
- buffer_cursor_ = buffer_end_ - 1;
- } else {
- // Hit the bottom of the allocated pushback buffer.
- // Double the buffer and continue.
- uc16* new_buffer = NewArray<uc16>(pushback_buffer_backing_size_ * 2);
- memcpy(new_buffer + pushback_buffer_backing_size_,
- pushback_buffer_backing_,
- pushback_buffer_backing_size_);
- DeleteArray(pushback_buffer_backing_);
- buffer_cursor_ = new_buffer + pushback_buffer_backing_size_;
- pushback_buffer_backing_ = pushback_buffer_ = new_buffer;
- buffer_end_ = pushback_buffer_backing_ + pushback_buffer_backing_size_;
- }
- }
- pushback_buffer_[buffer_cursor_ - pushback_buffer_- 1] =
- static_cast<uc16>(ch);
- pos_--;
- }
-
- protected:
- virtual bool ReadBlock() {
- if (pushback_buffer_end_cache_ != NULL) {
- buffer_cursor_ = buffer_;
- buffer_end_ = pushback_buffer_end_cache_;
- pushback_buffer_end_cache_ = NULL;
- return buffer_end_ > buffer_cursor_;
- }
- // Copy the top of the buffer into the pushback area.
- int32_t value;
- uc16* buffer_start = buffer_ + kPushBackSize;
- buffer_cursor_ = buffer_end_ = buffer_start;
- while ((value = stream_->Next()) >= 0) {
- if (value > static_cast<int32_t>(unibrow::Utf8::kMaxThreeByteChar)) {
- value = unibrow::Utf8::kBadChar;
- }
- // buffer_end_ is a const pointer, but buffer_ is writable.
- buffer_start[buffer_end_++ - buffer_start] = static_cast<uc16>(value);
- if (buffer_end_ == buffer_ + kPushBackSize + kBufferSize) break;
- }
- return buffer_end_ > buffer_start;
- }
-
- virtual unsigned SlowSeekForward(unsigned pos) {
- // Seeking in the input is not used by preparsing.
- // It's only used by the real parser based on preparser data.
- UNIMPLEMENTED();
- return 0;
- }
-
- private:
- static const unsigned kBufferSize = 512;
- static const unsigned kPushBackSize = 16;
- v8::UnicodeInputStream* const stream_;
- // Buffer holding first kPushBackSize characters of pushback buffer,
- // then kBufferSize chars of read-ahead.
- // The pushback buffer is only used if pushing back characters past
- // the start of a block.
- uc16 buffer_[kPushBackSize + kBufferSize];
- // Limit of pushbacks before new allocation is necessary.
- uc16* pushback_buffer_;
- // Only if that pushback buffer at the start of buffer_ isn't sufficient
- // is the following used.
- const uc16* pushback_buffer_end_cache_;
- uc16* pushback_buffer_backing_;
- unsigned pushback_buffer_backing_size_;
-};
-
-
-class StandAloneJavaScriptScanner : public JavaScriptScanner {
- public:
- explicit StandAloneJavaScriptScanner(ScannerConstants* scanner_constants)
- : JavaScriptScanner(scanner_constants) { }
-
- void Initialize(UC16CharacterStream* source) {
- source_ = source;
- Init();
- // Skip initial whitespace allowing HTML comment ends just like
- // after a newline and scan first token.
- has_line_terminator_before_next_ = true;
- SkipWhiteSpace();
- Scan();
- }
-};
-
-
-// Functions declared by allocation.h and implemented in both api.cc (for v8)
-// or here (for a stand-alone preparser).
-
-void FatalProcessOutOfMemory(const char* reason) {
- V8_Fatal(__FILE__, __LINE__, reason);
-}
-
-bool EnableSlowAsserts() { return true; }
-
-} // namespace internal.
-
-
-UnicodeInputStream::~UnicodeInputStream() { }
-
-
-PreParserData Preparse(UnicodeInputStream* input, size_t max_stack) {
- internal::InputStreamUTF16Buffer buffer(input);
- uintptr_t stack_limit = reinterpret_cast<uintptr_t>(&buffer) - max_stack;
- internal::ScannerConstants scanner_constants;
- internal::StandAloneJavaScriptScanner scanner(&scanner_constants);
- scanner.Initialize(&buffer);
- internal::CompleteParserRecorder recorder;
- preparser::PreParser::PreParseResult result =
- preparser::PreParser::PreParseProgram(&scanner,
- &recorder,
- true,
- stack_limit);
- if (result == preparser::PreParser::kPreParseStackOverflow) {
- return PreParserData::StackOverflow();
- }
- internal::Vector<unsigned> pre_data = recorder.ExtractData();
- size_t size = pre_data.length() * sizeof(pre_data[0]);
- unsigned char* data = reinterpret_cast<unsigned char*>(pre_data.start());
- return PreParserData(size, data);
-}
-
-} // namespace v8.
-
-
-// Used by ASSERT macros and other immediate exits.
-extern "C" void V8_Fatal(const char* file, int line, const char* format, ...) {
- exit(EXIT_FAILURE);
-}
diff --git a/src/3rdparty/v8/src/preparser.cc b/src/3rdparty/v8/src/preparser.cc
deleted file mode 100644
index fec1567..0000000
--- a/src/3rdparty/v8/src/preparser.cc
+++ /dev/null
@@ -1,1205 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "../include/v8stdint.h"
-#include "unicode.h"
-#include "globals.h"
-#include "checks.h"
-#include "allocation.h"
-#include "utils.h"
-#include "list.h"
-
-#include "scanner-base.h"
-#include "preparse-data.h"
-#include "preparser.h"
-
-namespace v8 {
-namespace preparser {
-
-// Preparsing checks a JavaScript program and emits preparse-data that helps
-// a later parsing to be faster.
-// See preparser-data.h for the data.
-
-// The PreParser checks that the syntax follows the grammar for JavaScript,
-// and collects some information about the program along the way.
-// The grammar check is only performed in order to understand the program
-// sufficiently to deduce some information about it, that can be used
-// to speed up later parsing. Finding errors is not the goal of pre-parsing,
-// rather it is to speed up properly written and correct programs.
-// That means that contextual checks (like a label being declared where
-// it is used) are generally omitted.
-
-namespace i = ::v8::internal;
-
-#define CHECK_OK ok); \
- if (!*ok) return -1; \
- ((void)0
-#define DUMMY ) // to make indentation work
-#undef DUMMY
-
-
-void PreParser::ReportUnexpectedToken(i::Token::Value token) {
- // We don't report stack overflows here, to avoid increasing the
- // stack depth even further. Instead we report it after parsing is
- // over, in ParseProgram.
- if (token == i::Token::ILLEGAL && stack_overflow_) {
- return;
- }
- i::JavaScriptScanner::Location source_location = scanner_->location();
-
- // Four of the tokens are treated specially
- switch (token) {
- case i::Token::EOS:
- return ReportMessageAt(source_location.beg_pos, source_location.end_pos,
- "unexpected_eos", NULL);
- case i::Token::NUMBER:
- return ReportMessageAt(source_location.beg_pos, source_location.end_pos,
- "unexpected_token_number", NULL);
- case i::Token::STRING:
- return ReportMessageAt(source_location.beg_pos, source_location.end_pos,
- "unexpected_token_string", NULL);
- case i::Token::IDENTIFIER:
- case i::Token::FUTURE_RESERVED_WORD:
- return ReportMessageAt(source_location.beg_pos, source_location.end_pos,
- "unexpected_token_identifier", NULL);
- default:
- const char* name = i::Token::String(token);
- ReportMessageAt(source_location.beg_pos, source_location.end_pos,
- "unexpected_token", name);
- }
-}
-
-
-PreParser::SourceElements PreParser::ParseSourceElements(int end_token,
- bool* ok) {
- // SourceElements ::
- // (Statement)* <end_token>
-
- while (peek() != end_token) {
- ParseStatement(CHECK_OK);
- }
- return kUnknownSourceElements;
-}
-
-
-PreParser::Statement PreParser::ParseStatement(bool* ok) {
- // Statement ::
- // Block
- // VariableStatement
- // EmptyStatement
- // ExpressionStatement
- // IfStatement
- // IterationStatement
- // ContinueStatement
- // BreakStatement
- // ReturnStatement
- // WithStatement
- // LabelledStatement
- // SwitchStatement
- // ThrowStatement
- // TryStatement
- // DebuggerStatement
-
- // Note: Since labels can only be used by 'break' and 'continue'
- // statements, which themselves are only valid within blocks,
- // iterations or 'switch' statements (i.e., BreakableStatements),
- // labels can be simply ignored in all other cases; except for
- // trivial labeled break statements 'label: break label' which is
- // parsed into an empty statement.
-
- // Keep the source position of the statement
- switch (peek()) {
- case i::Token::LBRACE:
- return ParseBlock(ok);
-
- case i::Token::CONST:
- case i::Token::VAR:
- return ParseVariableStatement(ok);
-
- case i::Token::SEMICOLON:
- Next();
- return kUnknownStatement;
-
- case i::Token::IF:
- return ParseIfStatement(ok);
-
- case i::Token::DO:
- return ParseDoWhileStatement(ok);
-
- case i::Token::WHILE:
- return ParseWhileStatement(ok);
-
- case i::Token::FOR:
- return ParseForStatement(ok);
-
- case i::Token::CONTINUE:
- return ParseContinueStatement(ok);
-
- case i::Token::BREAK:
- return ParseBreakStatement(ok);
-
- case i::Token::RETURN:
- return ParseReturnStatement(ok);
-
- case i::Token::WITH:
- return ParseWithStatement(ok);
-
- case i::Token::SWITCH:
- return ParseSwitchStatement(ok);
-
- case i::Token::THROW:
- return ParseThrowStatement(ok);
-
- case i::Token::TRY:
- return ParseTryStatement(ok);
-
- case i::Token::FUNCTION:
- return ParseFunctionDeclaration(ok);
-
- case i::Token::NATIVE:
- return ParseNativeDeclaration(ok);
-
- case i::Token::DEBUGGER:
- return ParseDebuggerStatement(ok);
-
- default:
- return ParseExpressionOrLabelledStatement(ok);
- }
-}
-
-
-PreParser::Statement PreParser::ParseFunctionDeclaration(bool* ok) {
- // FunctionDeclaration ::
- // 'function' Identifier '(' FormalParameterListopt ')' '{' FunctionBody '}'
- Expect(i::Token::FUNCTION, CHECK_OK);
- ParseIdentifier(CHECK_OK);
- ParseFunctionLiteral(CHECK_OK);
- return kUnknownStatement;
-}
-
-
-// Language extension which is only enabled for source files loaded
-// through the API's extension mechanism. A native function
-// declaration is resolved by looking up the function through a
-// callback provided by the extension.
-PreParser::Statement PreParser::ParseNativeDeclaration(bool* ok) {
- Expect(i::Token::NATIVE, CHECK_OK);
- Expect(i::Token::FUNCTION, CHECK_OK);
- ParseIdentifier(CHECK_OK);
- Expect(i::Token::LPAREN, CHECK_OK);
- bool done = (peek() == i::Token::RPAREN);
- while (!done) {
- ParseIdentifier(CHECK_OK);
- done = (peek() == i::Token::RPAREN);
- if (!done) {
- Expect(i::Token::COMMA, CHECK_OK);
- }
- }
- Expect(i::Token::RPAREN, CHECK_OK);
- Expect(i::Token::SEMICOLON, CHECK_OK);
- return kUnknownStatement;
-}
-
-
-PreParser::Statement PreParser::ParseBlock(bool* ok) {
- // Block ::
- // '{' Statement* '}'
-
- // Note that a Block does not introduce a new execution scope!
- // (ECMA-262, 3rd, 12.2)
- //
- Expect(i::Token::LBRACE, CHECK_OK);
- while (peek() != i::Token::RBRACE) {
- ParseStatement(CHECK_OK);
- }
- Expect(i::Token::RBRACE, CHECK_OK);
- return kUnknownStatement;
-}
-
-
-PreParser::Statement PreParser::ParseVariableStatement(bool* ok) {
- // VariableStatement ::
- // VariableDeclarations ';'
-
- Statement result = ParseVariableDeclarations(true, NULL, CHECK_OK);
- ExpectSemicolon(CHECK_OK);
- return result;
-}
-
-
-// If the variable declaration declares exactly one non-const
-// variable, then *var is set to that variable. In all other cases,
-// *var is untouched; in particular, it is the caller's responsibility
-// to initialize it properly. This mechanism is also used for the parsing
-// of 'for-in' loops.
-PreParser::Statement PreParser::ParseVariableDeclarations(bool accept_IN,
- int* num_decl,
- bool* ok) {
- // VariableDeclarations ::
- // ('var' | 'const') (Identifier ('=' AssignmentExpression)?)+[',']
-
- if (peek() == i::Token::VAR) {
- Consume(i::Token::VAR);
- } else if (peek() == i::Token::CONST) {
- Consume(i::Token::CONST);
- } else {
- *ok = false;
- return 0;
- }
-
- // The scope of a variable/const declared anywhere inside a function
- // is the entire function (ECMA-262, 3rd, 10.1.3, and 12.2). .
- int nvars = 0; // the number of variables declared
- do {
- // Parse variable name.
- if (nvars > 0) Consume(i::Token::COMMA);
- ParseIdentifier(CHECK_OK);
- nvars++;
- if (peek() == i::Token::ASSIGN) {
- Expect(i::Token::ASSIGN, CHECK_OK);
- ParseAssignmentExpression(accept_IN, CHECK_OK);
- }
- } while (peek() == i::Token::COMMA);
-
- if (num_decl != NULL) *num_decl = nvars;
- return kUnknownStatement;
-}
-
-
-PreParser::Statement PreParser::ParseExpressionOrLabelledStatement(
- bool* ok) {
- // ExpressionStatement | LabelledStatement ::
- // Expression ';'
- // Identifier ':' Statement
-
- Expression expr = ParseExpression(true, CHECK_OK);
- if (peek() == i::Token::COLON && expr == kIdentifierExpression) {
- Consume(i::Token::COLON);
- return ParseStatement(ok);
- }
- // Parsed expression statement.
- ExpectSemicolon(CHECK_OK);
- return kUnknownStatement;
-}
-
-
-PreParser::Statement PreParser::ParseIfStatement(bool* ok) {
- // IfStatement ::
- // 'if' '(' Expression ')' Statement ('else' Statement)?
-
- Expect(i::Token::IF, CHECK_OK);
- Expect(i::Token::LPAREN, CHECK_OK);
- ParseExpression(true, CHECK_OK);
- Expect(i::Token::RPAREN, CHECK_OK);
- ParseStatement(CHECK_OK);
- if (peek() == i::Token::ELSE) {
- Next();
- ParseStatement(CHECK_OK);
- }
- return kUnknownStatement;
-}
-
-
-PreParser::Statement PreParser::ParseContinueStatement(bool* ok) {
- // ContinueStatement ::
- // 'continue' [no line terminator] Identifier? ';'
-
- Expect(i::Token::CONTINUE, CHECK_OK);
- i::Token::Value tok = peek();
- if (!scanner_->has_line_terminator_before_next() &&
- tok != i::Token::SEMICOLON &&
- tok != i::Token::RBRACE &&
- tok != i::Token::EOS) {
- ParseIdentifier(CHECK_OK);
- }
- ExpectSemicolon(CHECK_OK);
- return kUnknownStatement;
-}
-
-
-PreParser::Statement PreParser::ParseBreakStatement(bool* ok) {
- // BreakStatement ::
- // 'break' [no line terminator] Identifier? ';'
-
- Expect(i::Token::BREAK, CHECK_OK);
- i::Token::Value tok = peek();
- if (!scanner_->has_line_terminator_before_next() &&
- tok != i::Token::SEMICOLON &&
- tok != i::Token::RBRACE &&
- tok != i::Token::EOS) {
- ParseIdentifier(CHECK_OK);
- }
- ExpectSemicolon(CHECK_OK);
- return kUnknownStatement;
-}
-
-
-PreParser::Statement PreParser::ParseReturnStatement(bool* ok) {
- // ReturnStatement ::
- // 'return' [no line terminator] Expression? ';'
-
- // Consume the return token. It is necessary to do the before
- // reporting any errors on it, because of the way errors are
- // reported (underlining).
- Expect(i::Token::RETURN, CHECK_OK);
-
- // An ECMAScript program is considered syntactically incorrect if it
- // contains a return statement that is not within the body of a
- // function. See ECMA-262, section 12.9, page 67.
- // This is not handled during preparsing.
-
- i::Token::Value tok = peek();
- if (!scanner_->has_line_terminator_before_next() &&
- tok != i::Token::SEMICOLON &&
- tok != i::Token::RBRACE &&
- tok != i::Token::EOS) {
- ParseExpression(true, CHECK_OK);
- }
- ExpectSemicolon(CHECK_OK);
- return kUnknownStatement;
-}
-
-
-PreParser::Statement PreParser::ParseWithStatement(bool* ok) {
- // WithStatement ::
- // 'with' '(' Expression ')' Statement
- Expect(i::Token::WITH, CHECK_OK);
- Expect(i::Token::LPAREN, CHECK_OK);
- ParseExpression(true, CHECK_OK);
- Expect(i::Token::RPAREN, CHECK_OK);
-
- scope_->EnterWith();
- ParseStatement(CHECK_OK);
- scope_->LeaveWith();
- return kUnknownStatement;
-}
-
-
-PreParser::Statement PreParser::ParseSwitchStatement(bool* ok) {
- // SwitchStatement ::
- // 'switch' '(' Expression ')' '{' CaseClause* '}'
-
- Expect(i::Token::SWITCH, CHECK_OK);
- Expect(i::Token::LPAREN, CHECK_OK);
- ParseExpression(true, CHECK_OK);
- Expect(i::Token::RPAREN, CHECK_OK);
-
- Expect(i::Token::LBRACE, CHECK_OK);
- i::Token::Value token = peek();
- while (token != i::Token::RBRACE) {
- if (token == i::Token::CASE) {
- Expect(i::Token::CASE, CHECK_OK);
- ParseExpression(true, CHECK_OK);
- Expect(i::Token::COLON, CHECK_OK);
- } else if (token == i::Token::DEFAULT) {
- Expect(i::Token::DEFAULT, CHECK_OK);
- Expect(i::Token::COLON, CHECK_OK);
- } else {
- ParseStatement(CHECK_OK);
- }
- token = peek();
- }
- Expect(i::Token::RBRACE, CHECK_OK);
-
- return kUnknownStatement;
-}
-
-
-PreParser::Statement PreParser::ParseDoWhileStatement(bool* ok) {
- // DoStatement ::
- // 'do' Statement 'while' '(' Expression ')' ';'
-
- Expect(i::Token::DO, CHECK_OK);
- ParseStatement(CHECK_OK);
- Expect(i::Token::WHILE, CHECK_OK);
- Expect(i::Token::LPAREN, CHECK_OK);
- ParseExpression(true, CHECK_OK);
- Expect(i::Token::RPAREN, CHECK_OK);
- return kUnknownStatement;
-}
-
-
-PreParser::Statement PreParser::ParseWhileStatement(bool* ok) {
- // WhileStatement ::
- // 'while' '(' Expression ')' Statement
-
- Expect(i::Token::WHILE, CHECK_OK);
- Expect(i::Token::LPAREN, CHECK_OK);
- ParseExpression(true, CHECK_OK);
- Expect(i::Token::RPAREN, CHECK_OK);
- ParseStatement(CHECK_OK);
- return kUnknownStatement;
-}
-
-
-PreParser::Statement PreParser::ParseForStatement(bool* ok) {
- // ForStatement ::
- // 'for' '(' Expression? ';' Expression? ';' Expression? ')' Statement
-
- Expect(i::Token::FOR, CHECK_OK);
- Expect(i::Token::LPAREN, CHECK_OK);
- if (peek() != i::Token::SEMICOLON) {
- if (peek() == i::Token::VAR || peek() == i::Token::CONST) {
- int decl_count;
- ParseVariableDeclarations(false, &decl_count, CHECK_OK);
- if (peek() == i::Token::IN && decl_count == 1) {
- Expect(i::Token::IN, CHECK_OK);
- ParseExpression(true, CHECK_OK);
- Expect(i::Token::RPAREN, CHECK_OK);
-
- ParseStatement(CHECK_OK);
- return kUnknownStatement;
- }
- } else {
- ParseExpression(false, CHECK_OK);
- if (peek() == i::Token::IN) {
- Expect(i::Token::IN, CHECK_OK);
- ParseExpression(true, CHECK_OK);
- Expect(i::Token::RPAREN, CHECK_OK);
-
- ParseStatement(CHECK_OK);
- return kUnknownStatement;
- }
- }
- }
-
- // Parsed initializer at this point.
- Expect(i::Token::SEMICOLON, CHECK_OK);
-
- if (peek() != i::Token::SEMICOLON) {
- ParseExpression(true, CHECK_OK);
- }
- Expect(i::Token::SEMICOLON, CHECK_OK);
-
- if (peek() != i::Token::RPAREN) {
- ParseExpression(true, CHECK_OK);
- }
- Expect(i::Token::RPAREN, CHECK_OK);
-
- ParseStatement(CHECK_OK);
- return kUnknownStatement;
-}
-
-
-PreParser::Statement PreParser::ParseThrowStatement(bool* ok) {
- // ThrowStatement ::
- // 'throw' [no line terminator] Expression ';'
-
- Expect(i::Token::THROW, CHECK_OK);
- if (scanner_->has_line_terminator_before_next()) {
- i::JavaScriptScanner::Location pos = scanner_->location();
- ReportMessageAt(pos.beg_pos, pos.end_pos,
- "newline_after_throw", NULL);
- *ok = false;
- return kUnknownStatement;
- }
- ParseExpression(true, CHECK_OK);
- ExpectSemicolon(CHECK_OK);
-
- return kUnknownStatement;
-}
-
-
-PreParser::Statement PreParser::ParseTryStatement(bool* ok) {
- // TryStatement ::
- // 'try' Block Catch
- // 'try' Block Finally
- // 'try' Block Catch Finally
- //
- // Catch ::
- // 'catch' '(' Identifier ')' Block
- //
- // Finally ::
- // 'finally' Block
-
- // In preparsing, allow any number of catch/finally blocks, including zero
- // of both.
-
- Expect(i::Token::TRY, CHECK_OK);
-
- ParseBlock(CHECK_OK);
-
- bool catch_or_finally_seen = false;
- if (peek() == i::Token::CATCH) {
- Consume(i::Token::CATCH);
- Expect(i::Token::LPAREN, CHECK_OK);
- ParseIdentifier(CHECK_OK);
- Expect(i::Token::RPAREN, CHECK_OK);
- scope_->EnterWith();
- ParseBlock(ok);
- scope_->LeaveWith();
- if (!*ok) return kUnknownStatement;
- catch_or_finally_seen = true;
- }
- if (peek() == i::Token::FINALLY) {
- Consume(i::Token::FINALLY);
- ParseBlock(CHECK_OK);
- catch_or_finally_seen = true;
- }
- if (!catch_or_finally_seen) {
- *ok = false;
- }
- return kUnknownStatement;
-}
-
-
-PreParser::Statement PreParser::ParseDebuggerStatement(bool* ok) {
- // In ECMA-262 'debugger' is defined as a reserved keyword. In some browser
- // contexts this is used as a statement which invokes the debugger as if a
- // break point is present.
- // DebuggerStatement ::
- // 'debugger' ';'
-
- Expect(i::Token::DEBUGGER, CHECK_OK);
- ExpectSemicolon(CHECK_OK);
- return kUnknownStatement;
-}
-
-
-// Precedence = 1
-PreParser::Expression PreParser::ParseExpression(bool accept_IN, bool* ok) {
- // Expression ::
- // AssignmentExpression
- // Expression ',' AssignmentExpression
-
- Expression result = ParseAssignmentExpression(accept_IN, CHECK_OK);
- while (peek() == i::Token::COMMA) {
- Expect(i::Token::COMMA, CHECK_OK);
- ParseAssignmentExpression(accept_IN, CHECK_OK);
- result = kUnknownExpression;
- }
- return result;
-}
-
-
-// Precedence = 2
-PreParser::Expression PreParser::ParseAssignmentExpression(bool accept_IN,
- bool* ok) {
- // AssignmentExpression ::
- // ConditionalExpression
- // LeftHandSideExpression AssignmentOperator AssignmentExpression
-
- Expression expression = ParseConditionalExpression(accept_IN, CHECK_OK);
-
- if (!i::Token::IsAssignmentOp(peek())) {
- // Parsed conditional expression only (no assignment).
- return expression;
- }
-
- i::Token::Value op = Next(); // Get assignment operator.
- ParseAssignmentExpression(accept_IN, CHECK_OK);
-
- if ((op == i::Token::ASSIGN) && (expression == kThisPropertyExpression)) {
- scope_->AddProperty();
- }
-
- return kUnknownExpression;
-}
-
-
-// Precedence = 3
-PreParser::Expression PreParser::ParseConditionalExpression(bool accept_IN,
- bool* ok) {
- // ConditionalExpression ::
- // LogicalOrExpression
- // LogicalOrExpression '?' AssignmentExpression ':' AssignmentExpression
-
- // We start using the binary expression parser for prec >= 4 only!
- Expression expression = ParseBinaryExpression(4, accept_IN, CHECK_OK);
- if (peek() != i::Token::CONDITIONAL) return expression;
- Consume(i::Token::CONDITIONAL);
- // In parsing the first assignment expression in conditional
- // expressions we always accept the 'in' keyword; see ECMA-262,
- // section 11.12, page 58.
- ParseAssignmentExpression(true, CHECK_OK);
- Expect(i::Token::COLON, CHECK_OK);
- ParseAssignmentExpression(accept_IN, CHECK_OK);
- return kUnknownExpression;
-}
-
-
-int PreParser::Precedence(i::Token::Value tok, bool accept_IN) {
- if (tok == i::Token::IN && !accept_IN)
- return 0; // 0 precedence will terminate binary expression parsing
-
- return i::Token::Precedence(tok);
-}
-
-
-// Precedence >= 4
-PreParser::Expression PreParser::ParseBinaryExpression(int prec,
- bool accept_IN,
- bool* ok) {
- Expression result = ParseUnaryExpression(CHECK_OK);
- for (int prec1 = Precedence(peek(), accept_IN); prec1 >= prec; prec1--) {
- // prec1 >= 4
- while (Precedence(peek(), accept_IN) == prec1) {
- Next();
- ParseBinaryExpression(prec1 + 1, accept_IN, CHECK_OK);
- result = kUnknownExpression;
- }
- }
- return result;
-}
-
-
-PreParser::Expression PreParser::ParseUnaryExpression(bool* ok) {
- // UnaryExpression ::
- // PostfixExpression
- // 'delete' UnaryExpression
- // 'void' UnaryExpression
- // 'typeof' UnaryExpression
- // '++' UnaryExpression
- // '--' UnaryExpression
- // '+' UnaryExpression
- // '-' UnaryExpression
- // '~' UnaryExpression
- // '!' UnaryExpression
-
- i::Token::Value op = peek();
- if (i::Token::IsUnaryOp(op) || i::Token::IsCountOp(op)) {
- op = Next();
- ParseUnaryExpression(ok);
- return kUnknownExpression;
- } else {
- return ParsePostfixExpression(ok);
- }
-}
-
-
-PreParser::Expression PreParser::ParsePostfixExpression(bool* ok) {
- // PostfixExpression ::
- // LeftHandSideExpression ('++' | '--')?
-
- Expression expression = ParseLeftHandSideExpression(CHECK_OK);
- if (!scanner_->has_line_terminator_before_next() &&
- i::Token::IsCountOp(peek())) {
- Next();
- return kUnknownExpression;
- }
- return expression;
-}
-
-
-PreParser::Expression PreParser::ParseLeftHandSideExpression(bool* ok) {
- // LeftHandSideExpression ::
- // (NewExpression | MemberExpression) ...
-
- Expression result;
- if (peek() == i::Token::NEW) {
- result = ParseNewExpression(CHECK_OK);
- } else {
- result = ParseMemberExpression(CHECK_OK);
- }
-
- while (true) {
- switch (peek()) {
- case i::Token::LBRACK: {
- Consume(i::Token::LBRACK);
- ParseExpression(true, CHECK_OK);
- Expect(i::Token::RBRACK, CHECK_OK);
- if (result == kThisExpression) {
- result = kThisPropertyExpression;
- } else {
- result = kUnknownExpression;
- }
- break;
- }
-
- case i::Token::LPAREN: {
- ParseArguments(CHECK_OK);
- result = kUnknownExpression;
- break;
- }
-
- case i::Token::PERIOD: {
- Consume(i::Token::PERIOD);
- ParseIdentifierName(CHECK_OK);
- if (result == kThisExpression) {
- result = kThisPropertyExpression;
- } else {
- result = kUnknownExpression;
- }
- break;
- }
-
- default:
- return result;
- }
- }
-}
-
-
-PreParser::Expression PreParser::ParseNewExpression(bool* ok) {
- // NewExpression ::
- // ('new')+ MemberExpression
-
- // The grammar for new expressions is pretty warped. The keyword
- // 'new' can either be a part of the new expression (where it isn't
- // followed by an argument list) or a part of the member expression,
- // where it must be followed by an argument list. To accommodate
- // this, we parse the 'new' keywords greedily and keep track of how
- // many we have parsed. This information is then passed on to the
- // member expression parser, which is only allowed to match argument
- // lists as long as it has 'new' prefixes left
- unsigned new_count = 0;
- do {
- Consume(i::Token::NEW);
- new_count++;
- } while (peek() == i::Token::NEW);
-
- return ParseMemberWithNewPrefixesExpression(new_count, ok);
-}
-
-
-PreParser::Expression PreParser::ParseMemberExpression(bool* ok) {
- return ParseMemberWithNewPrefixesExpression(0, ok);
-}
-
-
-PreParser::Expression PreParser::ParseMemberWithNewPrefixesExpression(
- unsigned new_count, bool* ok) {
- // MemberExpression ::
- // (PrimaryExpression | FunctionLiteral)
- // ('[' Expression ']' | '.' Identifier | Arguments)*
-
- // Parse the initial primary or function expression.
- Expression result = kUnknownExpression;
- if (peek() == i::Token::FUNCTION) {
- Consume(i::Token::FUNCTION);
- if (peek_any_identifier()) {
- ParseIdentifier(CHECK_OK);
- }
- result = ParseFunctionLiteral(CHECK_OK);
- } else {
- result = ParsePrimaryExpression(CHECK_OK);
- }
-
- while (true) {
- switch (peek()) {
- case i::Token::LBRACK: {
- Consume(i::Token::LBRACK);
- ParseExpression(true, CHECK_OK);
- Expect(i::Token::RBRACK, CHECK_OK);
- if (result == kThisExpression) {
- result = kThisPropertyExpression;
- } else {
- result = kUnknownExpression;
- }
- break;
- }
- case i::Token::PERIOD: {
- Consume(i::Token::PERIOD);
- ParseIdentifierName(CHECK_OK);
- if (result == kThisExpression) {
- result = kThisPropertyExpression;
- } else {
- result = kUnknownExpression;
- }
- break;
- }
- case i::Token::LPAREN: {
- if (new_count == 0) return result;
- // Consume one of the new prefixes (already parsed).
- ParseArguments(CHECK_OK);
- new_count--;
- result = kUnknownExpression;
- break;
- }
- default:
- return result;
- }
- }
-}
-
-
-PreParser::Expression PreParser::ParsePrimaryExpression(bool* ok) {
- // PrimaryExpression ::
- // 'this'
- // 'null'
- // 'true'
- // 'false'
- // Identifier
- // Number
- // String
- // ArrayLiteral
- // ObjectLiteral
- // RegExpLiteral
- // '(' Expression ')'
-
- Expression result = kUnknownExpression;
- switch (peek()) {
- case i::Token::THIS: {
- Next();
- result = kThisExpression;
- break;
- }
-
- case i::Token::IDENTIFIER:
- case i::Token::FUTURE_RESERVED_WORD: {
- ParseIdentifier(CHECK_OK);
- result = kIdentifierExpression;
- break;
- }
-
- case i::Token::NULL_LITERAL:
- case i::Token::TRUE_LITERAL:
- case i::Token::FALSE_LITERAL:
- case i::Token::NUMBER: {
- Next();
- break;
- }
- case i::Token::STRING: {
- Next();
- result = GetStringSymbol();
- break;
- }
-
- case i::Token::ASSIGN_DIV:
- result = ParseRegExpLiteral(true, CHECK_OK);
- break;
-
- case i::Token::DIV:
- result = ParseRegExpLiteral(false, CHECK_OK);
- break;
-
- case i::Token::LBRACK:
- result = ParseArrayLiteral(CHECK_OK);
- break;
-
- case i::Token::LBRACE:
- result = ParseObjectLiteral(CHECK_OK);
- break;
-
- case i::Token::LPAREN:
- Consume(i::Token::LPAREN);
- parenthesized_function_ = (peek() == i::Token::FUNCTION);
- result = ParseExpression(true, CHECK_OK);
- Expect(i::Token::RPAREN, CHECK_OK);
- if (result == kIdentifierExpression) result = kUnknownExpression;
- break;
-
- case i::Token::MOD:
- result = ParseV8Intrinsic(CHECK_OK);
- break;
-
- default: {
- Next();
- *ok = false;
- return kUnknownExpression;
- }
- }
-
- return result;
-}
-
-
-PreParser::Expression PreParser::ParseArrayLiteral(bool* ok) {
- // ArrayLiteral ::
- // '[' Expression? (',' Expression?)* ']'
- Expect(i::Token::LBRACK, CHECK_OK);
- while (peek() != i::Token::RBRACK) {
- if (peek() != i::Token::COMMA) {
- ParseAssignmentExpression(true, CHECK_OK);
- }
- if (peek() != i::Token::RBRACK) {
- Expect(i::Token::COMMA, CHECK_OK);
- }
- }
- Expect(i::Token::RBRACK, CHECK_OK);
-
- scope_->NextMaterializedLiteralIndex();
- return kUnknownExpression;
-}
-
-
-PreParser::Expression PreParser::ParseObjectLiteral(bool* ok) {
- // ObjectLiteral ::
- // '{' (
- // ((IdentifierName | String | Number) ':' AssignmentExpression)
- // | (('get' | 'set') (IdentifierName | String | Number) FunctionLiteral)
- // )*[','] '}'
-
- Expect(i::Token::LBRACE, CHECK_OK);
- while (peek() != i::Token::RBRACE) {
- i::Token::Value next = peek();
- switch (next) {
- case i::Token::IDENTIFIER:
- case i::Token::FUTURE_RESERVED_WORD: {
- bool is_getter = false;
- bool is_setter = false;
- ParseIdentifierOrGetOrSet(&is_getter, &is_setter, CHECK_OK);
- if ((is_getter || is_setter) && peek() != i::Token::COLON) {
- i::Token::Value name = Next();
- bool is_keyword = i::Token::IsKeyword(name);
- if (name != i::Token::IDENTIFIER &&
- name != i::Token::FUTURE_RESERVED_WORD &&
- name != i::Token::NUMBER &&
- name != i::Token::STRING &&
- !is_keyword) {
- *ok = false;
- return kUnknownExpression;
- }
- if (!is_keyword) {
- LogSymbol();
- }
- ParseFunctionLiteral(CHECK_OK);
- if (peek() != i::Token::RBRACE) {
- Expect(i::Token::COMMA, CHECK_OK);
- }
- continue; // restart the while
- }
- break;
- }
- case i::Token::STRING:
- Consume(next);
- GetStringSymbol();
- break;
- case i::Token::NUMBER:
- Consume(next);
- break;
- default:
- if (i::Token::IsKeyword(next)) {
- Consume(next);
- } else {
- // Unexpected token.
- *ok = false;
- return kUnknownExpression;
- }
- }
-
- Expect(i::Token::COLON, CHECK_OK);
- ParseAssignmentExpression(true, CHECK_OK);
-
- // TODO(1240767): Consider allowing trailing comma.
- if (peek() != i::Token::RBRACE) Expect(i::Token::COMMA, CHECK_OK);
- }
- Expect(i::Token::RBRACE, CHECK_OK);
-
- scope_->NextMaterializedLiteralIndex();
- return kUnknownExpression;
-}
-
-
-PreParser::Expression PreParser::ParseRegExpLiteral(bool seen_equal,
- bool* ok) {
- if (!scanner_->ScanRegExpPattern(seen_equal)) {
- Next();
- i::JavaScriptScanner::Location location = scanner_->location();
- ReportMessageAt(location.beg_pos, location.end_pos,
- "unterminated_regexp", NULL);
- *ok = false;
- return kUnknownExpression;
- }
-
- scope_->NextMaterializedLiteralIndex();
-
- if (!scanner_->ScanRegExpFlags()) {
- Next();
- i::JavaScriptScanner::Location location = scanner_->location();
- ReportMessageAt(location.beg_pos, location.end_pos,
- "invalid_regexp_flags", NULL);
- *ok = false;
- return kUnknownExpression;
- }
- Next();
- return kUnknownExpression;
-}
-
-
-PreParser::Arguments PreParser::ParseArguments(bool* ok) {
- // Arguments ::
- // '(' (AssignmentExpression)*[','] ')'
-
- Expect(i::Token::LPAREN, CHECK_OK);
- bool done = (peek() == i::Token::RPAREN);
- int argc = 0;
- while (!done) {
- ParseAssignmentExpression(true, CHECK_OK);
- argc++;
- done = (peek() == i::Token::RPAREN);
- if (!done) Expect(i::Token::COMMA, CHECK_OK);
- }
- Expect(i::Token::RPAREN, CHECK_OK);
- return argc;
-}
-
-
-PreParser::Expression PreParser::ParseFunctionLiteral(bool* ok) {
- // Function ::
- // '(' FormalParameterList? ')' '{' FunctionBody '}'
-
- // Parse function body.
- ScopeType outer_scope_type = scope_->type();
- bool inside_with = scope_->IsInsideWith();
- Scope function_scope(&scope_, kFunctionScope);
-
- // FormalParameterList ::
- // '(' (Identifier)*[','] ')'
- Expect(i::Token::LPAREN, CHECK_OK);
- bool done = (peek() == i::Token::RPAREN);
- while (!done) {
- ParseIdentifier(CHECK_OK);
- done = (peek() == i::Token::RPAREN);
- if (!done) {
- Expect(i::Token::COMMA, CHECK_OK);
- }
- }
- Expect(i::Token::RPAREN, CHECK_OK);
-
- Expect(i::Token::LBRACE, CHECK_OK);
- int function_block_pos = scanner_->location().beg_pos;
-
- // Determine if the function will be lazily compiled.
- // Currently only happens to top-level functions.
- // Optimistically assume that all top-level functions are lazily compiled.
- bool is_lazily_compiled = (outer_scope_type == kTopLevelScope &&
- !inside_with && allow_lazy_ &&
- !parenthesized_function_);
- parenthesized_function_ = false;
-
- if (is_lazily_compiled) {
- log_->PauseRecording();
- ParseSourceElements(i::Token::RBRACE, ok);
- log_->ResumeRecording();
- if (!*ok) return kUnknownExpression;
-
- Expect(i::Token::RBRACE, CHECK_OK);
-
- // Position right after terminal '}'.
- int end_pos = scanner_->location().end_pos;
- log_->LogFunction(function_block_pos, end_pos,
- function_scope.materialized_literal_count(),
- function_scope.expected_properties());
- } else {
- ParseSourceElements(i::Token::RBRACE, CHECK_OK);
- Expect(i::Token::RBRACE, CHECK_OK);
- }
- return kUnknownExpression;
-}
-
-
-PreParser::Expression PreParser::ParseV8Intrinsic(bool* ok) {
- // CallRuntime ::
- // '%' Identifier Arguments
-
- Expect(i::Token::MOD, CHECK_OK);
- ParseIdentifier(CHECK_OK);
- ParseArguments(CHECK_OK);
-
- return kUnknownExpression;
-}
-
-
-void PreParser::ExpectSemicolon(bool* ok) {
- // Check for automatic semicolon insertion according to
- // the rules given in ECMA-262, section 7.9, page 21.
- i::Token::Value tok = peek();
- if (tok == i::Token::SEMICOLON) {
- Next();
- return;
- }
- if (scanner_->has_line_terminator_before_next() ||
- tok == i::Token::RBRACE ||
- tok == i::Token::EOS) {
- return;
- }
- Expect(i::Token::SEMICOLON, ok);
-}
-
-
-void PreParser::LogSymbol() {
- int identifier_pos = scanner_->location().beg_pos;
- if (scanner_->is_literal_ascii()) {
- log_->LogAsciiSymbol(identifier_pos, scanner_->literal_ascii_string());
- } else {
- log_->LogUC16Symbol(identifier_pos, scanner_->literal_uc16_string());
- }
-}
-
-
-PreParser::Identifier PreParser::GetIdentifierSymbol() {
- LogSymbol();
- return kUnknownIdentifier;
-}
-
-
-PreParser::Expression PreParser::GetStringSymbol() {
- LogSymbol();
- return kUnknownExpression;
-}
-
-
-PreParser::Identifier PreParser::ParseIdentifier(bool* ok) {
- if (!Check(i::Token::FUTURE_RESERVED_WORD)) {
- Expect(i::Token::IDENTIFIER, ok);
- }
- if (!*ok) return kUnknownIdentifier;
- return GetIdentifierSymbol();
-}
-
-
-PreParser::Identifier PreParser::ParseIdentifierName(bool* ok) {
- i::Token::Value next = Next();
- if (i::Token::IsKeyword(next)) {
- int pos = scanner_->location().beg_pos;
- const char* keyword = i::Token::String(next);
- log_->LogAsciiSymbol(pos, i::Vector<const char>(keyword,
- i::StrLength(keyword)));
- return kUnknownExpression;
- }
- if (next == i::Token::IDENTIFIER ||
- next == i::Token::FUTURE_RESERVED_WORD) {
- return GetIdentifierSymbol();
- }
- *ok = false;
- return kUnknownIdentifier;
-}
-
-
-// This function reads an identifier and determines whether or not it
-// is 'get' or 'set'.
-PreParser::Identifier PreParser::ParseIdentifierOrGetOrSet(bool* is_get,
- bool* is_set,
- bool* ok) {
- PreParser::Identifier result = ParseIdentifier(CHECK_OK);
- if (scanner_->is_literal_ascii() && scanner_->literal_length() == 3) {
- const char* token = scanner_->literal_ascii_string().start();
- *is_get = strncmp(token, "get", 3) == 0;
- *is_set = !*is_get && strncmp(token, "set", 3) == 0;
- }
- return result;
-}
-
-bool PreParser::peek_any_identifier() {
- i::Token::Value next = peek();
- return next == i::Token::IDENTIFIER ||
- next == i::Token::FUTURE_RESERVED_WORD;
-}
-
-#undef CHECK_OK
-} } // v8::preparser
diff --git a/src/3rdparty/v8/src/preparser.h b/src/3rdparty/v8/src/preparser.h
deleted file mode 100644
index b7fa6c7..0000000
--- a/src/3rdparty/v8/src/preparser.h
+++ /dev/null
@@ -1,278 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_PREPARSER_H
-#define V8_PREPARSER_H
-
-namespace v8 {
-namespace preparser {
-
-// Preparsing checks a JavaScript program and emits preparse-data that helps
-// a later parsing to be faster.
-// See preparse-data.h for the data.
-
-// The PreParser checks that the syntax follows the grammar for JavaScript,
-// and collects some information about the program along the way.
-// The grammar check is only performed in order to understand the program
-// sufficiently to deduce some information about it, that can be used
-// to speed up later parsing. Finding errors is not the goal of pre-parsing,
-// rather it is to speed up properly written and correct programs.
-// That means that contextual checks (like a label being declared where
-// it is used) are generally omitted.
-
-namespace i = v8::internal;
-
-class PreParser {
- public:
- enum PreParseResult {
- kPreParseStackOverflow,
- kPreParseSuccess
- };
-
- ~PreParser() { }
-
- // Pre-parse the program from the character stream; returns true on
- // success (even if parsing failed, the pre-parse data successfully
- // captured the syntax error), and false if a stack-overflow happened
- // during parsing.
- static PreParseResult PreParseProgram(i::JavaScriptScanner* scanner,
- i::ParserRecorder* log,
- bool allow_lazy,
- uintptr_t stack_limit) {
- return PreParser(scanner, log, stack_limit, allow_lazy).PreParse();
- }
-
- private:
- enum ScopeType {
- kTopLevelScope,
- kFunctionScope
- };
-
- // Types that allow us to recognize simple this-property assignments.
- // A simple this-property assignment is a statement on the form
- // "this.propertyName = {primitive constant or function parameter name);"
- // where propertyName isn't "__proto__".
- // The result is only relevant if the function body contains only
- // simple this-property assignments.
-
- enum StatementType {
- kUnknownStatement
- };
-
- enum ExpressionType {
- kUnknownExpression,
- kIdentifierExpression, // Used to detect labels.
- kThisExpression,
- kThisPropertyExpression
- };
-
- enum IdentifierType {
- kUnknownIdentifier
- };
-
- enum SourceElementTypes {
- kUnknownSourceElements
- };
-
- typedef int SourceElements;
- typedef int Expression;
- typedef int Statement;
- typedef int Identifier;
- typedef int Arguments;
-
- class Scope {
- public:
- Scope(Scope** variable, ScopeType type)
- : variable_(variable),
- prev_(*variable),
- type_(type),
- materialized_literal_count_(0),
- expected_properties_(0),
- with_nesting_count_(0) {
- *variable = this;
- }
- ~Scope() { *variable_ = prev_; }
- void NextMaterializedLiteralIndex() { materialized_literal_count_++; }
- void AddProperty() { expected_properties_++; }
- ScopeType type() { return type_; }
- int expected_properties() { return expected_properties_; }
- int materialized_literal_count() { return materialized_literal_count_; }
- bool IsInsideWith() { return with_nesting_count_ != 0; }
- void EnterWith() { with_nesting_count_++; }
- void LeaveWith() { with_nesting_count_--; }
-
- private:
- Scope** const variable_;
- Scope* const prev_;
- const ScopeType type_;
- int materialized_literal_count_;
- int expected_properties_;
- int with_nesting_count_;
- };
-
- // Private constructor only used in PreParseProgram.
- PreParser(i::JavaScriptScanner* scanner,
- i::ParserRecorder* log,
- uintptr_t stack_limit,
- bool allow_lazy)
- : scanner_(scanner),
- log_(log),
- scope_(NULL),
- stack_limit_(stack_limit),
- stack_overflow_(false),
- allow_lazy_(true),
- parenthesized_function_(false) { }
-
- // Preparse the program. Only called in PreParseProgram after creating
- // the instance.
- PreParseResult PreParse() {
- Scope top_scope(&scope_, kTopLevelScope);
- bool ok = true;
- ParseSourceElements(i::Token::EOS, &ok);
- if (stack_overflow_) return kPreParseStackOverflow;
- if (!ok) {
- ReportUnexpectedToken(scanner_->current_token());
- }
- return kPreParseSuccess;
- }
-
- // Report syntax error
- void ReportUnexpectedToken(i::Token::Value token);
- void ReportMessageAt(int start_pos,
- int end_pos,
- const char* type,
- const char* name_opt) {
- log_->LogMessage(start_pos, end_pos, type, name_opt);
- }
-
- // All ParseXXX functions take as the last argument an *ok parameter
- // which is set to false if parsing failed; it is unchanged otherwise.
- // By making the 'exception handling' explicit, we are forced to check
- // for failure at the call sites.
- SourceElements ParseSourceElements(int end_token, bool* ok);
- Statement ParseStatement(bool* ok);
- Statement ParseFunctionDeclaration(bool* ok);
- Statement ParseNativeDeclaration(bool* ok);
- Statement ParseBlock(bool* ok);
- Statement ParseVariableStatement(bool* ok);
- Statement ParseVariableDeclarations(bool accept_IN, int* num_decl, bool* ok);
- Statement ParseExpressionOrLabelledStatement(bool* ok);
- Statement ParseIfStatement(bool* ok);
- Statement ParseContinueStatement(bool* ok);
- Statement ParseBreakStatement(bool* ok);
- Statement ParseReturnStatement(bool* ok);
- Statement ParseWithStatement(bool* ok);
- Statement ParseSwitchStatement(bool* ok);
- Statement ParseDoWhileStatement(bool* ok);
- Statement ParseWhileStatement(bool* ok);
- Statement ParseForStatement(bool* ok);
- Statement ParseThrowStatement(bool* ok);
- Statement ParseTryStatement(bool* ok);
- Statement ParseDebuggerStatement(bool* ok);
-
- Expression ParseExpression(bool accept_IN, bool* ok);
- Expression ParseAssignmentExpression(bool accept_IN, bool* ok);
- Expression ParseConditionalExpression(bool accept_IN, bool* ok);
- Expression ParseBinaryExpression(int prec, bool accept_IN, bool* ok);
- Expression ParseUnaryExpression(bool* ok);
- Expression ParsePostfixExpression(bool* ok);
- Expression ParseLeftHandSideExpression(bool* ok);
- Expression ParseNewExpression(bool* ok);
- Expression ParseMemberExpression(bool* ok);
- Expression ParseMemberWithNewPrefixesExpression(unsigned new_count, bool* ok);
- Expression ParsePrimaryExpression(bool* ok);
- Expression ParseArrayLiteral(bool* ok);
- Expression ParseObjectLiteral(bool* ok);
- Expression ParseRegExpLiteral(bool seen_equal, bool* ok);
- Expression ParseV8Intrinsic(bool* ok);
-
- Arguments ParseArguments(bool* ok);
- Expression ParseFunctionLiteral(bool* ok);
-
- Identifier ParseIdentifier(bool* ok);
- Identifier ParseIdentifierName(bool* ok);
- Identifier ParseIdentifierOrGetOrSet(bool* is_get, bool* is_set, bool* ok);
-
- // Logs the currently parsed literal as a symbol in the preparser data.
- void LogSymbol();
- // Log the currently parsed identifier.
- Identifier GetIdentifierSymbol();
- // Log the currently parsed string literal.
- Expression GetStringSymbol();
-
- i::Token::Value peek() {
- if (stack_overflow_) return i::Token::ILLEGAL;
- return scanner_->peek();
- }
-
- i::Token::Value Next() {
- if (stack_overflow_) return i::Token::ILLEGAL;
- {
- int marker;
- if (reinterpret_cast<uintptr_t>(&marker) < stack_limit_) {
- // Further calls to peek/Next will return illegal token.
- // The current one will still be returned. It might already
- // have been seen using peek.
- stack_overflow_ = true;
- }
- }
- return scanner_->Next();
- }
-
- bool peek_any_identifier();
-
- void Consume(i::Token::Value token) { Next(); }
-
- void Expect(i::Token::Value token, bool* ok) {
- if (Next() != token) {
- *ok = false;
- }
- }
-
- bool Check(i::Token::Value token) {
- i::Token::Value next = peek();
- if (next == token) {
- Consume(next);
- return true;
- }
- return false;
- }
- void ExpectSemicolon(bool* ok);
-
- static int Precedence(i::Token::Value tok, bool accept_IN);
-
- i::JavaScriptScanner* scanner_;
- i::ParserRecorder* log_;
- Scope* scope_;
- uintptr_t stack_limit_;
- bool stack_overflow_;
- bool allow_lazy_;
- bool parenthesized_function_;
-};
-} } // v8::preparser
-
-#endif // V8_PREPARSER_H
diff --git a/src/3rdparty/v8/src/prettyprinter.cc b/src/3rdparty/v8/src/prettyprinter.cc
deleted file mode 100644
index 043ad1c..0000000
--- a/src/3rdparty/v8/src/prettyprinter.cc
+++ /dev/null
@@ -1,1530 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include <stdarg.h>
-
-#include "v8.h"
-
-#include "prettyprinter.h"
-#include "scopes.h"
-#include "platform.h"
-
-namespace v8 {
-namespace internal {
-
-#ifdef DEBUG
-
-PrettyPrinter::PrettyPrinter() {
- output_ = NULL;
- size_ = 0;
- pos_ = 0;
-}
-
-
-PrettyPrinter::~PrettyPrinter() {
- DeleteArray(output_);
-}
-
-
-void PrettyPrinter::VisitBlock(Block* node) {
- if (!node->is_initializer_block()) Print("{ ");
- PrintStatements(node->statements());
- if (node->statements()->length() > 0) Print(" ");
- if (!node->is_initializer_block()) Print("}");
-}
-
-
-void PrettyPrinter::VisitDeclaration(Declaration* node) {
- Print("var ");
- PrintLiteral(node->proxy()->name(), false);
- if (node->fun() != NULL) {
- Print(" = ");
- PrintFunctionLiteral(node->fun());
- }
- Print(";");
-}
-
-
-void PrettyPrinter::VisitExpressionStatement(ExpressionStatement* node) {
- Visit(node->expression());
- Print(";");
-}
-
-
-void PrettyPrinter::VisitEmptyStatement(EmptyStatement* node) {
- Print(";");
-}
-
-
-void PrettyPrinter::VisitIfStatement(IfStatement* node) {
- Print("if (");
- Visit(node->condition());
- Print(") ");
- Visit(node->then_statement());
- if (node->HasElseStatement()) {
- Print(" else ");
- Visit(node->else_statement());
- }
-}
-
-
-void PrettyPrinter::VisitContinueStatement(ContinueStatement* node) {
- Print("continue");
- ZoneStringList* labels = node->target()->labels();
- if (labels != NULL) {
- Print(" ");
- ASSERT(labels->length() > 0); // guaranteed to have at least one entry
- PrintLiteral(labels->at(0), false); // any label from the list is fine
- }
- Print(";");
-}
-
-
-void PrettyPrinter::VisitBreakStatement(BreakStatement* node) {
- Print("break");
- ZoneStringList* labels = node->target()->labels();
- if (labels != NULL) {
- Print(" ");
- ASSERT(labels->length() > 0); // guaranteed to have at least one entry
- PrintLiteral(labels->at(0), false); // any label from the list is fine
- }
- Print(";");
-}
-
-
-void PrettyPrinter::VisitReturnStatement(ReturnStatement* node) {
- Print("return ");
- Visit(node->expression());
- Print(";");
-}
-
-
-void PrettyPrinter::VisitWithEnterStatement(WithEnterStatement* node) {
- Print("<enter with> (");
- Visit(node->expression());
- Print(") ");
-}
-
-
-void PrettyPrinter::VisitWithExitStatement(WithExitStatement* node) {
- Print("<exit with>");
-}
-
-
-void PrettyPrinter::VisitSwitchStatement(SwitchStatement* node) {
- PrintLabels(node->labels());
- Print("switch (");
- Visit(node->tag());
- Print(") { ");
- ZoneList<CaseClause*>* cases = node->cases();
- for (int i = 0; i < cases->length(); i++)
- PrintCaseClause(cases->at(i));
- Print("}");
-}
-
-
-void PrettyPrinter::VisitDoWhileStatement(DoWhileStatement* node) {
- PrintLabels(node->labels());
- Print("do ");
- Visit(node->body());
- Print(" while (");
- Visit(node->cond());
- Print(");");
-}
-
-
-void PrettyPrinter::VisitWhileStatement(WhileStatement* node) {
- PrintLabels(node->labels());
- Print("while (");
- Visit(node->cond());
- Print(") ");
- Visit(node->body());
-}
-
-
-void PrettyPrinter::VisitForStatement(ForStatement* node) {
- PrintLabels(node->labels());
- Print("for (");
- if (node->init() != NULL) {
- Visit(node->init());
- Print(" ");
- } else {
- Print("; ");
- }
- if (node->cond() != NULL) Visit(node->cond());
- Print("; ");
- if (node->next() != NULL) {
- Visit(node->next()); // prints extra ';', unfortunately
- // to fix: should use Expression for next
- }
- Print(") ");
- Visit(node->body());
-}
-
-
-void PrettyPrinter::VisitForInStatement(ForInStatement* node) {
- PrintLabels(node->labels());
- Print("for (");
- Visit(node->each());
- Print(" in ");
- Visit(node->enumerable());
- Print(") ");
- Visit(node->body());
-}
-
-
-void PrettyPrinter::VisitTryCatchStatement(TryCatchStatement* node) {
- Print("try ");
- Visit(node->try_block());
- Print(" catch (");
- Visit(node->catch_var());
- Print(") ");
- Visit(node->catch_block());
-}
-
-
-void PrettyPrinter::VisitTryFinallyStatement(TryFinallyStatement* node) {
- Print("try ");
- Visit(node->try_block());
- Print(" finally ");
- Visit(node->finally_block());
-}
-
-
-void PrettyPrinter::VisitDebuggerStatement(DebuggerStatement* node) {
- Print("debugger ");
-}
-
-
-void PrettyPrinter::VisitFunctionLiteral(FunctionLiteral* node) {
- Print("(");
- PrintFunctionLiteral(node);
- Print(")");
-}
-
-
-void PrettyPrinter::VisitSharedFunctionInfoLiteral(
- SharedFunctionInfoLiteral* node) {
- Print("(");
- PrintLiteral(node->shared_function_info(), true);
- Print(")");
-}
-
-
-void PrettyPrinter::VisitConditional(Conditional* node) {
- Visit(node->condition());
- Print(" ? ");
- Visit(node->then_expression());
- Print(" : ");
- Visit(node->else_expression());
-}
-
-
-void PrettyPrinter::VisitLiteral(Literal* node) {
- PrintLiteral(node->handle(), true);
-}
-
-
-void PrettyPrinter::VisitRegExpLiteral(RegExpLiteral* node) {
- Print(" RegExp(");
- PrintLiteral(node->pattern(), false);
- Print(",");
- PrintLiteral(node->flags(), false);
- Print(") ");
-}
-
-
-void PrettyPrinter::VisitObjectLiteral(ObjectLiteral* node) {
- Print("{ ");
- for (int i = 0; i < node->properties()->length(); i++) {
- if (i != 0) Print(",");
- ObjectLiteral::Property* property = node->properties()->at(i);
- Print(" ");
- Visit(property->key());
- Print(": ");
- Visit(property->value());
- }
- Print(" }");
-}
-
-
-void PrettyPrinter::VisitArrayLiteral(ArrayLiteral* node) {
- Print("[ ");
- for (int i = 0; i < node->values()->length(); i++) {
- if (i != 0) Print(",");
- Visit(node->values()->at(i));
- }
- Print(" ]");
-}
-
-
-void PrettyPrinter::VisitCatchExtensionObject(CatchExtensionObject* node) {
- Print("{ ");
- Visit(node->key());
- Print(": ");
- Visit(node->value());
- Print(" }");
-}
-
-
-void PrettyPrinter::VisitSlot(Slot* node) {
- switch (node->type()) {
- case Slot::PARAMETER:
- Print("parameter[%d]", node->index());
- break;
- case Slot::LOCAL:
- Print("local[%d]", node->index());
- break;
- case Slot::CONTEXT:
- Print("context[%d]", node->index());
- break;
- case Slot::LOOKUP:
- Print("lookup[");
- PrintLiteral(node->var()->name(), false);
- Print("]");
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void PrettyPrinter::VisitVariableProxy(VariableProxy* node) {
- PrintLiteral(node->name(), false);
-}
-
-
-void PrettyPrinter::VisitAssignment(Assignment* node) {
- Visit(node->target());
- Print(" %s ", Token::String(node->op()));
- Visit(node->value());
-}
-
-
-void PrettyPrinter::VisitThrow(Throw* node) {
- Print("throw ");
- Visit(node->exception());
-}
-
-
-void PrettyPrinter::VisitProperty(Property* node) {
- Expression* key = node->key();
- Literal* literal = key->AsLiteral();
- if (literal != NULL && literal->handle()->IsSymbol()) {
- Print("(");
- Visit(node->obj());
- Print(").");
- PrintLiteral(literal->handle(), false);
- } else {
- Visit(node->obj());
- Print("[");
- Visit(key);
- Print("]");
- }
-}
-
-
-void PrettyPrinter::VisitCall(Call* node) {
- Visit(node->expression());
- PrintArguments(node->arguments());
-}
-
-
-void PrettyPrinter::VisitCallNew(CallNew* node) {
- Print("new (");
- Visit(node->expression());
- Print(")");
- PrintArguments(node->arguments());
-}
-
-
-void PrettyPrinter::VisitCallRuntime(CallRuntime* node) {
- Print("%%");
- PrintLiteral(node->name(), false);
- PrintArguments(node->arguments());
-}
-
-
-void PrettyPrinter::VisitUnaryOperation(UnaryOperation* node) {
- Print("(%s", Token::String(node->op()));
- Visit(node->expression());
- Print(")");
-}
-
-
-void PrettyPrinter::VisitIncrementOperation(IncrementOperation* node) {
- UNREACHABLE();
-}
-
-
-void PrettyPrinter::VisitCountOperation(CountOperation* node) {
- Print("(");
- if (node->is_prefix()) Print("%s", Token::String(node->op()));
- Visit(node->expression());
- if (node->is_postfix()) Print("%s", Token::String(node->op()));
- Print(")");
-}
-
-
-void PrettyPrinter::VisitBinaryOperation(BinaryOperation* node) {
- Print("(");
- Visit(node->left());
- Print("%s", Token::String(node->op()));
- Visit(node->right());
- Print(")");
-}
-
-
-void PrettyPrinter::VisitCompareOperation(CompareOperation* node) {
- Print("(");
- Visit(node->left());
- Print("%s", Token::String(node->op()));
- Visit(node->right());
- Print(")");
-}
-
-
-void PrettyPrinter::VisitCompareToNull(CompareToNull* node) {
- Print("(");
- Visit(node->expression());
- Print("%s null)", Token::String(node->op()));
-}
-
-
-void PrettyPrinter::VisitThisFunction(ThisFunction* node) {
- Print("<this-function>");
-}
-
-
-const char* PrettyPrinter::Print(AstNode* node) {
- Init();
- Visit(node);
- return output_;
-}
-
-
-const char* PrettyPrinter::PrintExpression(FunctionLiteral* program) {
- Init();
- ExpressionStatement* statement =
- program->body()->at(0)->AsExpressionStatement();
- Visit(statement->expression());
- return output_;
-}
-
-
-const char* PrettyPrinter::PrintProgram(FunctionLiteral* program) {
- Init();
- PrintStatements(program->body());
- Print("\n");
- return output_;
-}
-
-
-void PrettyPrinter::PrintOut(AstNode* node) {
- PrettyPrinter printer;
- PrintF("%s", printer.Print(node));
-}
-
-
-void PrettyPrinter::Init() {
- if (size_ == 0) {
- ASSERT(output_ == NULL);
- const int initial_size = 256;
- output_ = NewArray<char>(initial_size);
- size_ = initial_size;
- }
- output_[0] = '\0';
- pos_ = 0;
-}
-
-
-void PrettyPrinter::Print(const char* format, ...) {
- for (;;) {
- va_list arguments;
- va_start(arguments, format);
- int n = OS::VSNPrintF(Vector<char>(output_, size_) + pos_,
- format,
- arguments);
- va_end(arguments);
-
- if (n >= 0) {
- // there was enough space - we are done
- pos_ += n;
- return;
- } else {
- // there was not enough space - allocate more and try again
- const int slack = 32;
- int new_size = size_ + (size_ >> 1) + slack;
- char* new_output = NewArray<char>(new_size);
- memcpy(new_output, output_, pos_);
- DeleteArray(output_);
- output_ = new_output;
- size_ = new_size;
- }
- }
-}
-
-
-void PrettyPrinter::PrintStatements(ZoneList<Statement*>* statements) {
- for (int i = 0; i < statements->length(); i++) {
- if (i != 0) Print(" ");
- Visit(statements->at(i));
- }
-}
-
-
-void PrettyPrinter::PrintLabels(ZoneStringList* labels) {
- if (labels != NULL) {
- for (int i = 0; i < labels->length(); i++) {
- PrintLiteral(labels->at(i), false);
- Print(": ");
- }
- }
-}
-
-
-void PrettyPrinter::PrintArguments(ZoneList<Expression*>* arguments) {
- Print("(");
- for (int i = 0; i < arguments->length(); i++) {
- if (i != 0) Print(", ");
- Visit(arguments->at(i));
- }
- Print(")");
-}
-
-
-void PrettyPrinter::PrintLiteral(Handle<Object> value, bool quote) {
- Object* object = *value;
- if (object->IsString()) {
- String* string = String::cast(object);
- if (quote) Print("\"");
- for (int i = 0; i < string->length(); i++) {
- Print("%c", string->Get(i));
- }
- if (quote) Print("\"");
- } else if (object->IsNull()) {
- Print("null");
- } else if (object->IsTrue()) {
- Print("true");
- } else if (object->IsFalse()) {
- Print("false");
- } else if (object->IsUndefined()) {
- Print("undefined");
- } else if (object->IsNumber()) {
- Print("%g", object->Number());
- } else if (object->IsJSObject()) {
- // regular expression
- if (object->IsJSFunction()) {
- Print("JS-Function");
- } else if (object->IsJSArray()) {
- Print("JS-array[%u]", JSArray::cast(object)->length());
- } else if (object->IsJSObject()) {
- Print("JS-Object");
- } else {
- Print("?UNKNOWN?");
- }
- } else if (object->IsFixedArray()) {
- Print("FixedArray");
- } else {
- Print("<unknown literal %p>", object);
- }
-}
-
-
-void PrettyPrinter::PrintParameters(Scope* scope) {
- Print("(");
- for (int i = 0; i < scope->num_parameters(); i++) {
- if (i > 0) Print(", ");
- PrintLiteral(scope->parameter(i)->name(), false);
- }
- Print(")");
-}
-
-
-void PrettyPrinter::PrintDeclarations(ZoneList<Declaration*>* declarations) {
- for (int i = 0; i < declarations->length(); i++) {
- if (i > 0) Print(" ");
- Visit(declarations->at(i));
- }
-}
-
-
-void PrettyPrinter::PrintFunctionLiteral(FunctionLiteral* function) {
- Print("function ");
- PrintLiteral(function->name(), false);
- PrintParameters(function->scope());
- Print(" { ");
- PrintDeclarations(function->scope()->declarations());
- PrintStatements(function->body());
- Print(" }");
-}
-
-
-void PrettyPrinter::PrintCaseClause(CaseClause* clause) {
- if (clause->is_default()) {
- Print("default");
- } else {
- Print("case ");
- Visit(clause->label());
- }
- Print(": ");
- PrintStatements(clause->statements());
- if (clause->statements()->length() > 0)
- Print(" ");
-}
-
-
-//-----------------------------------------------------------------------------
-
-class IndentedScope BASE_EMBEDDED {
- public:
- explicit IndentedScope(AstPrinter* printer) : ast_printer_(printer) {
- ast_printer_->inc_indent();
- }
-
- IndentedScope(AstPrinter* printer, const char* txt, AstNode* node = NULL)
- : ast_printer_(printer) {
- ast_printer_->PrintIndented(txt);
- if (node != NULL && node->AsExpression() != NULL) {
- Expression* expr = node->AsExpression();
- bool printed_first = false;
- if ((expr->type() != NULL) && (expr->type()->IsKnown())) {
- ast_printer_->Print(" (type = ");
- ast_printer_->Print(StaticType::Type2String(expr->type()));
- printed_first = true;
- }
- if (printed_first) ast_printer_->Print(")");
- }
- ast_printer_->Print("\n");
- ast_printer_->inc_indent();
- }
-
- virtual ~IndentedScope() {
- ast_printer_->dec_indent();
- }
-
- private:
- AstPrinter* ast_printer_;
-};
-
-
-//-----------------------------------------------------------------------------
-
-
-AstPrinter::AstPrinter() : indent_(0) {
-}
-
-
-AstPrinter::~AstPrinter() {
- ASSERT(indent_ == 0);
-}
-
-
-void AstPrinter::PrintIndented(const char* txt) {
- for (int i = 0; i < indent_; i++) {
- Print(". ");
- }
- Print(txt);
-}
-
-
-void AstPrinter::PrintLiteralIndented(const char* info,
- Handle<Object> value,
- bool quote) {
- PrintIndented(info);
- Print(" ");
- PrintLiteral(value, quote);
- Print("\n");
-}
-
-
-void AstPrinter::PrintLiteralWithModeIndented(const char* info,
- Variable* var,
- Handle<Object> value,
- StaticType* type) {
- if (var == NULL) {
- PrintLiteralIndented(info, value, true);
- } else {
- EmbeddedVector<char, 256> buf;
- int pos = OS::SNPrintF(buf, "%s (mode = %s", info,
- Variable::Mode2String(var->mode()));
- if (type->IsKnown()) {
- pos += OS::SNPrintF(buf + pos, ", type = %s",
- StaticType::Type2String(type));
- }
- OS::SNPrintF(buf + pos, ")");
- PrintLiteralIndented(buf.start(), value, true);
- }
-}
-
-
-void AstPrinter::PrintLabelsIndented(const char* info, ZoneStringList* labels) {
- if (labels != NULL && labels->length() > 0) {
- if (info == NULL) {
- PrintIndented("LABELS ");
- } else {
- PrintIndented(info);
- Print(" ");
- }
- PrintLabels(labels);
- } else if (info != NULL) {
- PrintIndented(info);
- }
- Print("\n");
-}
-
-
-void AstPrinter::PrintIndentedVisit(const char* s, AstNode* node) {
- IndentedScope indent(this, s, node);
- Visit(node);
-}
-
-
-const char* AstPrinter::PrintProgram(FunctionLiteral* program) {
- Init();
- { IndentedScope indent(this, "FUNC");
- PrintLiteralIndented("NAME", program->name(), true);
- PrintLiteralIndented("INFERRED NAME", program->inferred_name(), true);
- PrintParameters(program->scope());
- PrintDeclarations(program->scope()->declarations());
- PrintStatements(program->body());
- }
- return Output();
-}
-
-
-void AstPrinter::PrintDeclarations(ZoneList<Declaration*>* declarations) {
- if (declarations->length() > 0) {
- IndentedScope indent(this, "DECLS");
- for (int i = 0; i < declarations->length(); i++) {
- Visit(declarations->at(i));
- }
- }
-}
-
-
-void AstPrinter::PrintParameters(Scope* scope) {
- if (scope->num_parameters() > 0) {
- IndentedScope indent(this, "PARAMS");
- for (int i = 0; i < scope->num_parameters(); i++) {
- PrintLiteralWithModeIndented("VAR", scope->parameter(i),
- scope->parameter(i)->name(),
- scope->parameter(i)->type());
- }
- }
-}
-
-
-void AstPrinter::PrintStatements(ZoneList<Statement*>* statements) {
- for (int i = 0; i < statements->length(); i++) {
- Visit(statements->at(i));
- }
-}
-
-
-void AstPrinter::PrintArguments(ZoneList<Expression*>* arguments) {
- for (int i = 0; i < arguments->length(); i++) {
- Visit(arguments->at(i));
- }
-}
-
-
-void AstPrinter::PrintCaseClause(CaseClause* clause) {
- if (clause->is_default()) {
- IndentedScope indent(this, "DEFAULT");
- PrintStatements(clause->statements());
- } else {
- IndentedScope indent(this, "CASE");
- Visit(clause->label());
- PrintStatements(clause->statements());
- }
-}
-
-
-void AstPrinter::VisitBlock(Block* node) {
- const char* block_txt = node->is_initializer_block() ? "BLOCK INIT" : "BLOCK";
- IndentedScope indent(this, block_txt);
- PrintStatements(node->statements());
-}
-
-
-void AstPrinter::VisitDeclaration(Declaration* node) {
- if (node->fun() == NULL) {
- // var or const declarations
- PrintLiteralWithModeIndented(Variable::Mode2String(node->mode()),
- node->proxy()->AsVariable(),
- node->proxy()->name(),
- node->proxy()->AsVariable()->type());
- } else {
- // function declarations
- PrintIndented("FUNCTION ");
- PrintLiteral(node->proxy()->name(), true);
- Print(" = function ");
- PrintLiteral(node->fun()->name(), false);
- Print("\n");
- }
-}
-
-
-void AstPrinter::VisitExpressionStatement(ExpressionStatement* node) {
- Visit(node->expression());
-}
-
-
-void AstPrinter::VisitEmptyStatement(EmptyStatement* node) {
- PrintIndented("EMPTY\n");
-}
-
-
-void AstPrinter::VisitIfStatement(IfStatement* node) {
- PrintIndentedVisit("IF", node->condition());
- PrintIndentedVisit("THEN", node->then_statement());
- if (node->HasElseStatement()) {
- PrintIndentedVisit("ELSE", node->else_statement());
- }
-}
-
-
-void AstPrinter::VisitContinueStatement(ContinueStatement* node) {
- PrintLabelsIndented("CONTINUE", node->target()->labels());
-}
-
-
-void AstPrinter::VisitBreakStatement(BreakStatement* node) {
- PrintLabelsIndented("BREAK", node->target()->labels());
-}
-
-
-void AstPrinter::VisitReturnStatement(ReturnStatement* node) {
- PrintIndentedVisit("RETURN", node->expression());
-}
-
-
-void AstPrinter::VisitWithEnterStatement(WithEnterStatement* node) {
- PrintIndentedVisit("WITH ENTER", node->expression());
-}
-
-
-void AstPrinter::VisitWithExitStatement(WithExitStatement* node) {
- PrintIndented("WITH EXIT\n");
-}
-
-
-void AstPrinter::VisitSwitchStatement(SwitchStatement* node) {
- IndentedScope indent(this, "SWITCH");
- PrintLabelsIndented(NULL, node->labels());
- PrintIndentedVisit("TAG", node->tag());
- for (int i = 0; i < node->cases()->length(); i++) {
- PrintCaseClause(node->cases()->at(i));
- }
-}
-
-
-void AstPrinter::VisitDoWhileStatement(DoWhileStatement* node) {
- IndentedScope indent(this, "DO");
- PrintLabelsIndented(NULL, node->labels());
- PrintIndentedVisit("BODY", node->body());
- PrintIndentedVisit("COND", node->cond());
-}
-
-
-void AstPrinter::VisitWhileStatement(WhileStatement* node) {
- IndentedScope indent(this, "WHILE");
- PrintLabelsIndented(NULL, node->labels());
- PrintIndentedVisit("COND", node->cond());
- PrintIndentedVisit("BODY", node->body());
-}
-
-
-void AstPrinter::VisitForStatement(ForStatement* node) {
- IndentedScope indent(this, "FOR");
- PrintLabelsIndented(NULL, node->labels());
- if (node->init()) PrintIndentedVisit("INIT", node->init());
- if (node->cond()) PrintIndentedVisit("COND", node->cond());
- PrintIndentedVisit("BODY", node->body());
- if (node->next()) PrintIndentedVisit("NEXT", node->next());
-}
-
-
-void AstPrinter::VisitForInStatement(ForInStatement* node) {
- IndentedScope indent(this, "FOR IN");
- PrintIndentedVisit("FOR", node->each());
- PrintIndentedVisit("IN", node->enumerable());
- PrintIndentedVisit("BODY", node->body());
-}
-
-
-void AstPrinter::VisitTryCatchStatement(TryCatchStatement* node) {
- IndentedScope indent(this, "TRY CATCH");
- PrintIndentedVisit("TRY", node->try_block());
- PrintIndentedVisit("CATCHVAR", node->catch_var());
- PrintIndentedVisit("CATCH", node->catch_block());
-}
-
-
-void AstPrinter::VisitTryFinallyStatement(TryFinallyStatement* node) {
- IndentedScope indent(this, "TRY FINALLY");
- PrintIndentedVisit("TRY", node->try_block());
- PrintIndentedVisit("FINALLY", node->finally_block());
-}
-
-
-void AstPrinter::VisitDebuggerStatement(DebuggerStatement* node) {
- IndentedScope indent(this, "DEBUGGER");
-}
-
-
-void AstPrinter::VisitFunctionLiteral(FunctionLiteral* node) {
- IndentedScope indent(this, "FUNC LITERAL");
- PrintLiteralIndented("NAME", node->name(), false);
- PrintLiteralIndented("INFERRED NAME", node->inferred_name(), false);
- PrintParameters(node->scope());
- // We don't want to see the function literal in this case: it
- // will be printed via PrintProgram when the code for it is
- // generated.
- // PrintStatements(node->body());
-}
-
-
-void AstPrinter::VisitSharedFunctionInfoLiteral(
- SharedFunctionInfoLiteral* node) {
- IndentedScope indent(this, "FUNC LITERAL");
- PrintLiteralIndented("SHARED INFO", node->shared_function_info(), true);
-}
-
-
-void AstPrinter::VisitConditional(Conditional* node) {
- IndentedScope indent(this, "CONDITIONAL");
- PrintIndentedVisit("?", node->condition());
- PrintIndentedVisit("THEN", node->then_expression());
- PrintIndentedVisit("ELSE", node->else_expression());
-}
-
-
-void AstPrinter::VisitLiteral(Literal* node) {
- PrintLiteralIndented("LITERAL", node->handle(), true);
-}
-
-
-void AstPrinter::VisitRegExpLiteral(RegExpLiteral* node) {
- IndentedScope indent(this, "REGEXP LITERAL");
- PrintLiteralIndented("PATTERN", node->pattern(), false);
- PrintLiteralIndented("FLAGS", node->flags(), false);
-}
-
-
-void AstPrinter::VisitObjectLiteral(ObjectLiteral* node) {
- IndentedScope indent(this, "OBJ LITERAL");
- for (int i = 0; i < node->properties()->length(); i++) {
- const char* prop_kind = NULL;
- switch (node->properties()->at(i)->kind()) {
- case ObjectLiteral::Property::CONSTANT:
- prop_kind = "PROPERTY - CONSTANT";
- break;
- case ObjectLiteral::Property::COMPUTED:
- prop_kind = "PROPERTY - COMPUTED";
- break;
- case ObjectLiteral::Property::MATERIALIZED_LITERAL:
- prop_kind = "PROPERTY - MATERIALIZED_LITERAL";
- break;
- case ObjectLiteral::Property::PROTOTYPE:
- prop_kind = "PROPERTY - PROTOTYPE";
- break;
- case ObjectLiteral::Property::GETTER:
- prop_kind = "PROPERTY - GETTER";
- break;
- case ObjectLiteral::Property::SETTER:
- prop_kind = "PROPERTY - SETTER";
- break;
- default:
- UNREACHABLE();
- }
- IndentedScope prop(this, prop_kind);
- PrintIndentedVisit("KEY", node->properties()->at(i)->key());
- PrintIndentedVisit("VALUE", node->properties()->at(i)->value());
- }
-}
-
-
-void AstPrinter::VisitArrayLiteral(ArrayLiteral* node) {
- IndentedScope indent(this, "ARRAY LITERAL");
- if (node->values()->length() > 0) {
- IndentedScope indent(this, "VALUES");
- for (int i = 0; i < node->values()->length(); i++) {
- Visit(node->values()->at(i));
- }
- }
-}
-
-
-void AstPrinter::VisitCatchExtensionObject(CatchExtensionObject* node) {
- IndentedScope indent(this, "CatchExtensionObject");
- PrintIndentedVisit("KEY", node->key());
- PrintIndentedVisit("VALUE", node->value());
-}
-
-
-void AstPrinter::VisitSlot(Slot* node) {
- PrintIndented("SLOT ");
- PrettyPrinter::VisitSlot(node);
- Print("\n");
-}
-
-
-void AstPrinter::VisitVariableProxy(VariableProxy* node) {
- PrintLiteralWithModeIndented("VAR PROXY", node->AsVariable(), node->name(),
- node->type());
- Variable* var = node->var();
- if (var != NULL && var->rewrite() != NULL) {
- IndentedScope indent(this);
- Visit(var->rewrite());
- }
-}
-
-
-void AstPrinter::VisitAssignment(Assignment* node) {
- IndentedScope indent(this, Token::Name(node->op()), node);
- Visit(node->target());
- Visit(node->value());
-}
-
-
-void AstPrinter::VisitThrow(Throw* node) {
- PrintIndentedVisit("THROW", node->exception());
-}
-
-
-void AstPrinter::VisitProperty(Property* node) {
- IndentedScope indent(this, "PROPERTY", node);
- Visit(node->obj());
- Literal* literal = node->key()->AsLiteral();
- if (literal != NULL && literal->handle()->IsSymbol()) {
- PrintLiteralIndented("NAME", literal->handle(), false);
- } else {
- PrintIndentedVisit("KEY", node->key());
- }
-}
-
-
-void AstPrinter::VisitCall(Call* node) {
- IndentedScope indent(this, "CALL");
- Visit(node->expression());
- PrintArguments(node->arguments());
-}
-
-
-void AstPrinter::VisitCallNew(CallNew* node) {
- IndentedScope indent(this, "CALL NEW");
- Visit(node->expression());
- PrintArguments(node->arguments());
-}
-
-
-void AstPrinter::VisitCallRuntime(CallRuntime* node) {
- PrintLiteralIndented("CALL RUNTIME ", node->name(), false);
- IndentedScope indent(this);
- PrintArguments(node->arguments());
-}
-
-
-void AstPrinter::VisitUnaryOperation(UnaryOperation* node) {
- PrintIndentedVisit(Token::Name(node->op()), node->expression());
-}
-
-
-void AstPrinter::VisitIncrementOperation(IncrementOperation* node) {
- UNREACHABLE();
-}
-
-
-void AstPrinter::VisitCountOperation(CountOperation* node) {
- EmbeddedVector<char, 128> buf;
- if (node->type()->IsKnown()) {
- OS::SNPrintF(buf, "%s %s (type = %s)",
- (node->is_prefix() ? "PRE" : "POST"),
- Token::Name(node->op()),
- StaticType::Type2String(node->type()));
- } else {
- OS::SNPrintF(buf, "%s %s", (node->is_prefix() ? "PRE" : "POST"),
- Token::Name(node->op()));
- }
- PrintIndentedVisit(buf.start(), node->expression());
-}
-
-
-void AstPrinter::VisitBinaryOperation(BinaryOperation* node) {
- IndentedScope indent(this, Token::Name(node->op()), node);
- Visit(node->left());
- Visit(node->right());
-}
-
-
-void AstPrinter::VisitCompareOperation(CompareOperation* node) {
- IndentedScope indent(this, Token::Name(node->op()), node);
- Visit(node->left());
- Visit(node->right());
-}
-
-
-void AstPrinter::VisitCompareToNull(CompareToNull* node) {
- const char* name = node->is_strict()
- ? "COMPARE-TO-NULL-STRICT"
- : "COMPARE-TO-NULL";
- IndentedScope indent(this, name, node);
- Visit(node->expression());
-}
-
-
-void AstPrinter::VisitThisFunction(ThisFunction* node) {
- IndentedScope indent(this, "THIS-FUNCTION");
-}
-
-
-TagScope::TagScope(JsonAstBuilder* builder, const char* name)
- : builder_(builder), next_(builder->tag()), has_body_(false) {
- if (next_ != NULL) {
- next_->use();
- builder->Print(",\n");
- }
- builder->set_tag(this);
- builder->PrintIndented("[");
- builder->Print("\"%s\"", name);
- builder->increase_indent(JsonAstBuilder::kTagIndentSize);
-}
-
-
-TagScope::~TagScope() {
- builder_->decrease_indent(JsonAstBuilder::kTagIndentSize);
- if (has_body_) {
- builder_->Print("\n");
- builder_->PrintIndented("]");
- } else {
- builder_->Print("]");
- }
- builder_->set_tag(next_);
-}
-
-
-AttributesScope::AttributesScope(JsonAstBuilder* builder)
- : builder_(builder), attribute_count_(0) {
- builder->set_attributes(this);
- builder->tag()->use();
- builder->Print(",\n");
- builder->PrintIndented("{");
- builder->increase_indent(JsonAstBuilder::kAttributesIndentSize);
-}
-
-
-AttributesScope::~AttributesScope() {
- builder_->decrease_indent(JsonAstBuilder::kAttributesIndentSize);
- if (attribute_count_ > 1) {
- builder_->Print("\n");
- builder_->PrintIndented("}");
- } else {
- builder_->Print("}");
- }
- builder_->set_attributes(NULL);
-}
-
-
-const char* JsonAstBuilder::BuildProgram(FunctionLiteral* program) {
- Init();
- Visit(program);
- Print("\n");
- return Output();
-}
-
-
-void JsonAstBuilder::AddAttributePrefix(const char* name) {
- if (attributes()->is_used()) {
- Print(",\n");
- PrintIndented("\"");
- } else {
- Print("\"");
- }
- Print("%s\":", name);
- attributes()->use();
-}
-
-
-void JsonAstBuilder::AddAttribute(const char* name, Handle<String> value) {
- SmartPointer<char> value_string = value->ToCString();
- AddAttributePrefix(name);
- Print("\"%s\"", *value_string);
-}
-
-
-void JsonAstBuilder::AddAttribute(const char* name, const char* value) {
- AddAttributePrefix(name);
- Print("\"%s\"", value);
-}
-
-
-void JsonAstBuilder::AddAttribute(const char* name, int value) {
- AddAttributePrefix(name);
- Print("%d", value);
-}
-
-
-void JsonAstBuilder::AddAttribute(const char* name, bool value) {
- AddAttributePrefix(name);
- Print(value ? "true" : "false");
-}
-
-
-void JsonAstBuilder::VisitBlock(Block* stmt) {
- TagScope tag(this, "Block");
- VisitStatements(stmt->statements());
-}
-
-
-void JsonAstBuilder::VisitExpressionStatement(ExpressionStatement* stmt) {
- TagScope tag(this, "ExpressionStatement");
- Visit(stmt->expression());
-}
-
-
-void JsonAstBuilder::VisitEmptyStatement(EmptyStatement* stmt) {
- TagScope tag(this, "EmptyStatement");
-}
-
-
-void JsonAstBuilder::VisitIfStatement(IfStatement* stmt) {
- TagScope tag(this, "IfStatement");
- Visit(stmt->condition());
- Visit(stmt->then_statement());
- Visit(stmt->else_statement());
-}
-
-
-void JsonAstBuilder::VisitContinueStatement(ContinueStatement* stmt) {
- TagScope tag(this, "ContinueStatement");
-}
-
-
-void JsonAstBuilder::VisitBreakStatement(BreakStatement* stmt) {
- TagScope tag(this, "BreakStatement");
-}
-
-
-void JsonAstBuilder::VisitReturnStatement(ReturnStatement* stmt) {
- TagScope tag(this, "ReturnStatement");
- Visit(stmt->expression());
-}
-
-
-void JsonAstBuilder::VisitWithEnterStatement(WithEnterStatement* stmt) {
- TagScope tag(this, "WithEnterStatement");
- Visit(stmt->expression());
-}
-
-
-void JsonAstBuilder::VisitWithExitStatement(WithExitStatement* stmt) {
- TagScope tag(this, "WithExitStatement");
-}
-
-
-void JsonAstBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
- TagScope tag(this, "SwitchStatement");
-}
-
-
-void JsonAstBuilder::VisitDoWhileStatement(DoWhileStatement* stmt) {
- TagScope tag(this, "DoWhileStatement");
- Visit(stmt->body());
- Visit(stmt->cond());
-}
-
-
-void JsonAstBuilder::VisitWhileStatement(WhileStatement* stmt) {
- TagScope tag(this, "WhileStatement");
- Visit(stmt->cond());
- Visit(stmt->body());
-}
-
-
-void JsonAstBuilder::VisitForStatement(ForStatement* stmt) {
- TagScope tag(this, "ForStatement");
- if (stmt->init() != NULL) Visit(stmt->init());
- if (stmt->cond() != NULL) Visit(stmt->cond());
- Visit(stmt->body());
- if (stmt->next() != NULL) Visit(stmt->next());
-}
-
-
-void JsonAstBuilder::VisitForInStatement(ForInStatement* stmt) {
- TagScope tag(this, "ForInStatement");
- Visit(stmt->each());
- Visit(stmt->enumerable());
- Visit(stmt->body());
-}
-
-
-void JsonAstBuilder::VisitTryCatchStatement(TryCatchStatement* stmt) {
- TagScope tag(this, "TryCatchStatement");
- Visit(stmt->try_block());
- Visit(stmt->catch_var());
- Visit(stmt->catch_block());
-}
-
-
-void JsonAstBuilder::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
- TagScope tag(this, "TryFinallyStatement");
- Visit(stmt->try_block());
- Visit(stmt->finally_block());
-}
-
-
-void JsonAstBuilder::VisitDebuggerStatement(DebuggerStatement* stmt) {
- TagScope tag(this, "DebuggerStatement");
-}
-
-
-void JsonAstBuilder::VisitFunctionLiteral(FunctionLiteral* expr) {
- TagScope tag(this, "FunctionLiteral");
- {
- AttributesScope attributes(this);
- AddAttribute("name", expr->name());
- }
- VisitDeclarations(expr->scope()->declarations());
- VisitStatements(expr->body());
-}
-
-
-void JsonAstBuilder::VisitSharedFunctionInfoLiteral(
- SharedFunctionInfoLiteral* expr) {
- TagScope tag(this, "SharedFunctionInfoLiteral");
-}
-
-
-void JsonAstBuilder::VisitConditional(Conditional* expr) {
- TagScope tag(this, "Conditional");
-}
-
-
-void JsonAstBuilder::VisitSlot(Slot* expr) {
- TagScope tag(this, "Slot");
- {
- AttributesScope attributes(this);
- switch (expr->type()) {
- case Slot::PARAMETER:
- AddAttribute("type", "PARAMETER");
- break;
- case Slot::LOCAL:
- AddAttribute("type", "LOCAL");
- break;
- case Slot::CONTEXT:
- AddAttribute("type", "CONTEXT");
- break;
- case Slot::LOOKUP:
- AddAttribute("type", "LOOKUP");
- break;
- }
- AddAttribute("index", expr->index());
- }
-}
-
-
-void JsonAstBuilder::VisitVariableProxy(VariableProxy* expr) {
- if (expr->var()->rewrite() == NULL) {
- TagScope tag(this, "VariableProxy");
- {
- AttributesScope attributes(this);
- AddAttribute("name", expr->name());
- AddAttribute("mode", Variable::Mode2String(expr->var()->mode()));
- }
- } else {
- Visit(expr->var()->rewrite());
- }
-}
-
-
-void JsonAstBuilder::VisitLiteral(Literal* expr) {
- TagScope tag(this, "Literal");
- {
- AttributesScope attributes(this);
- Handle<Object> handle = expr->handle();
- if (handle->IsString()) {
- AddAttribute("handle", Handle<String>(String::cast(*handle)));
- } else if (handle->IsSmi()) {
- AddAttribute("handle", Smi::cast(*handle)->value());
- }
- }
-}
-
-
-void JsonAstBuilder::VisitRegExpLiteral(RegExpLiteral* expr) {
- TagScope tag(this, "RegExpLiteral");
-}
-
-
-void JsonAstBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
- TagScope tag(this, "ObjectLiteral");
-}
-
-
-void JsonAstBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
- TagScope tag(this, "ArrayLiteral");
-}
-
-
-void JsonAstBuilder::VisitCatchExtensionObject(CatchExtensionObject* expr) {
- TagScope tag(this, "CatchExtensionObject");
- Visit(expr->key());
- Visit(expr->value());
-}
-
-
-void JsonAstBuilder::VisitAssignment(Assignment* expr) {
- TagScope tag(this, "Assignment");
- {
- AttributesScope attributes(this);
- AddAttribute("op", Token::Name(expr->op()));
- }
- Visit(expr->target());
- Visit(expr->value());
-}
-
-
-void JsonAstBuilder::VisitThrow(Throw* expr) {
- TagScope tag(this, "Throw");
- Visit(expr->exception());
-}
-
-
-void JsonAstBuilder::VisitProperty(Property* expr) {
- TagScope tag(this, "Property");
- {
- AttributesScope attributes(this);
- AddAttribute("type", expr->is_synthetic() ? "SYNTHETIC" : "NORMAL");
- }
- Visit(expr->obj());
- Visit(expr->key());
-}
-
-
-void JsonAstBuilder::VisitCall(Call* expr) {
- TagScope tag(this, "Call");
- Visit(expr->expression());
- VisitExpressions(expr->arguments());
-}
-
-
-void JsonAstBuilder::VisitCallNew(CallNew* expr) {
- TagScope tag(this, "CallNew");
- Visit(expr->expression());
- VisitExpressions(expr->arguments());
-}
-
-
-void JsonAstBuilder::VisitCallRuntime(CallRuntime* expr) {
- TagScope tag(this, "CallRuntime");
- {
- AttributesScope attributes(this);
- AddAttribute("name", expr->name());
- }
- VisitExpressions(expr->arguments());
-}
-
-
-void JsonAstBuilder::VisitUnaryOperation(UnaryOperation* expr) {
- TagScope tag(this, "UnaryOperation");
- {
- AttributesScope attributes(this);
- AddAttribute("op", Token::Name(expr->op()));
- }
- Visit(expr->expression());
-}
-
-
-void JsonAstBuilder::VisitIncrementOperation(IncrementOperation* expr) {
- UNREACHABLE();
-}
-
-
-void JsonAstBuilder::VisitCountOperation(CountOperation* expr) {
- TagScope tag(this, "CountOperation");
- {
- AttributesScope attributes(this);
- AddAttribute("is_prefix", expr->is_prefix());
- AddAttribute("op", Token::Name(expr->op()));
- }
- Visit(expr->expression());
-}
-
-
-void JsonAstBuilder::VisitBinaryOperation(BinaryOperation* expr) {
- TagScope tag(this, "BinaryOperation");
- {
- AttributesScope attributes(this);
- AddAttribute("op", Token::Name(expr->op()));
- }
- Visit(expr->left());
- Visit(expr->right());
-}
-
-
-void JsonAstBuilder::VisitCompareOperation(CompareOperation* expr) {
- TagScope tag(this, "CompareOperation");
- {
- AttributesScope attributes(this);
- AddAttribute("op", Token::Name(expr->op()));
- }
- Visit(expr->left());
- Visit(expr->right());
-}
-
-
-void JsonAstBuilder::VisitCompareToNull(CompareToNull* expr) {
- TagScope tag(this, "CompareToNull");
- {
- AttributesScope attributes(this);
- AddAttribute("is_strict", expr->is_strict());
- }
- Visit(expr->expression());
-}
-
-
-void JsonAstBuilder::VisitThisFunction(ThisFunction* expr) {
- TagScope tag(this, "ThisFunction");
-}
-
-
-void JsonAstBuilder::VisitDeclaration(Declaration* decl) {
- TagScope tag(this, "Declaration");
- {
- AttributesScope attributes(this);
- AddAttribute("mode", Variable::Mode2String(decl->mode()));
- }
- Visit(decl->proxy());
- if (decl->fun() != NULL) Visit(decl->fun());
-}
-
-
-#endif // DEBUG
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/prettyprinter.h b/src/3rdparty/v8/src/prettyprinter.h
deleted file mode 100644
index 284a93f..0000000
--- a/src/3rdparty/v8/src/prettyprinter.h
+++ /dev/null
@@ -1,223 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_PRETTYPRINTER_H_
-#define V8_PRETTYPRINTER_H_
-
-#include "ast.h"
-
-namespace v8 {
-namespace internal {
-
-#ifdef DEBUG
-
-class PrettyPrinter: public AstVisitor {
- public:
- PrettyPrinter();
- virtual ~PrettyPrinter();
-
- // The following routines print a node into a string.
- // The result string is alive as long as the PrettyPrinter is alive.
- const char* Print(AstNode* node);
- const char* PrintExpression(FunctionLiteral* program);
- const char* PrintProgram(FunctionLiteral* program);
-
- void Print(const char* format, ...);
-
- // Print a node to stdout.
- static void PrintOut(AstNode* node);
-
- virtual void VisitSlot(Slot* node);
- // Individual nodes
-#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
- AST_NODE_LIST(DECLARE_VISIT)
-#undef DECLARE_VISIT
-
- private:
- char* output_; // output string buffer
- int size_; // output_ size
- int pos_; // current printing position
-
- protected:
- void Init();
- const char* Output() const { return output_; }
-
- virtual void PrintStatements(ZoneList<Statement*>* statements);
- void PrintLabels(ZoneStringList* labels);
- virtual void PrintArguments(ZoneList<Expression*>* arguments);
- void PrintLiteral(Handle<Object> value, bool quote);
- void PrintParameters(Scope* scope);
- void PrintDeclarations(ZoneList<Declaration*>* declarations);
- void PrintFunctionLiteral(FunctionLiteral* function);
- void PrintCaseClause(CaseClause* clause);
-};
-
-
-// Prints the AST structure
-class AstPrinter: public PrettyPrinter {
- public:
- AstPrinter();
- virtual ~AstPrinter();
-
- const char* PrintProgram(FunctionLiteral* program);
-
- // Individual nodes
- virtual void VisitSlot(Slot* node);
-#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
- AST_NODE_LIST(DECLARE_VISIT)
-#undef DECLARE_VISIT
-
- private:
- friend class IndentedScope;
- void PrintIndented(const char* txt);
- void PrintIndentedVisit(const char* s, AstNode* node);
-
- void PrintStatements(ZoneList<Statement*>* statements);
- void PrintDeclarations(ZoneList<Declaration*>* declarations);
- void PrintParameters(Scope* scope);
- void PrintArguments(ZoneList<Expression*>* arguments);
- void PrintCaseClause(CaseClause* clause);
- void PrintLiteralIndented(const char* info, Handle<Object> value, bool quote);
- void PrintLiteralWithModeIndented(const char* info,
- Variable* var,
- Handle<Object> value,
- StaticType* type);
- void PrintLabelsIndented(const char* info, ZoneStringList* labels);
-
- void inc_indent() { indent_++; }
- void dec_indent() { indent_--; }
-
- int indent_;
-};
-
-
-// Forward declaration of helper classes.
-class TagScope;
-class AttributesScope;
-
-// Build a C string containing a JSON representation of a function's
-// AST. The representation is based on JsonML (www.jsonml.org).
-class JsonAstBuilder: public PrettyPrinter {
- public:
- JsonAstBuilder()
- : indent_(0), top_tag_scope_(NULL), attributes_scope_(NULL) {
- }
- virtual ~JsonAstBuilder() {}
-
- // Controls the indentation of subsequent lines of a tag body after
- // the first line.
- static const int kTagIndentSize = 2;
-
- // Controls the indentation of subsequent lines of an attributes
- // blocks's body after the first line.
- static const int kAttributesIndentSize = 1;
-
- // Construct a JSON representation of a function literal.
- const char* BuildProgram(FunctionLiteral* program);
-
- // Print text indented by the current indentation level.
- void PrintIndented(const char* text) { Print("%*s%s", indent_, "", text); }
-
- // Change the indentation level.
- void increase_indent(int amount) { indent_ += amount; }
- void decrease_indent(int amount) { indent_ -= amount; }
-
- // The builder maintains a stack of opened AST node constructors.
- // Each node constructor corresponds to a JsonML tag.
- TagScope* tag() { return top_tag_scope_; }
- void set_tag(TagScope* scope) { top_tag_scope_ = scope; }
-
- // The builder maintains a pointer to the currently opened attributes
- // of current AST node or NULL if the attributes are not opened.
- AttributesScope* attributes() { return attributes_scope_; }
- void set_attributes(AttributesScope* scope) { attributes_scope_ = scope; }
-
- // Add an attribute to the currently opened attributes.
- void AddAttribute(const char* name, Handle<String> value);
- void AddAttribute(const char* name, const char* value);
- void AddAttribute(const char* name, int value);
- void AddAttribute(const char* name, bool value);
-
- // AST node visit functions.
- virtual void VisitSlot(Slot* node);
-#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
- AST_NODE_LIST(DECLARE_VISIT)
-#undef DECLARE_VISIT
-
- private:
- int indent_;
- TagScope* top_tag_scope_;
- AttributesScope* attributes_scope_;
-
- // Utility function used by AddAttribute implementations.
- void AddAttributePrefix(const char* name);
-};
-
-
-// The JSON AST builder keeps a stack of open element tags (AST node
-// constructors from the current iteration point to the root of the
-// AST). TagScope is a helper class to manage the opening and closing
-// of tags, the indentation of their bodies, and comma separating their
-// contents.
-class TagScope BASE_EMBEDDED {
- public:
- TagScope(JsonAstBuilder* builder, const char* name);
- ~TagScope();
-
- void use() { has_body_ = true; }
-
- private:
- JsonAstBuilder* builder_;
- TagScope* next_;
- bool has_body_;
-};
-
-
-// AttributesScope is a helper class to manage the opening and closing
-// of attribute blocks, the indentation of their bodies, and comma
-// separating their contents. JsonAstBuilder::AddAttribute adds an
-// attribute to the currently open AttributesScope. They cannot be
-// nested so the builder keeps an optional single scope rather than a
-// stack.
-class AttributesScope BASE_EMBEDDED {
- public:
- explicit AttributesScope(JsonAstBuilder* builder);
- ~AttributesScope();
-
- bool is_used() { return attribute_count_ > 0; }
- void use() { ++attribute_count_; }
-
- private:
- JsonAstBuilder* builder_;
- int attribute_count_;
-};
-
-#endif // DEBUG
-
-} } // namespace v8::internal
-
-#endif // V8_PRETTYPRINTER_H_
diff --git a/src/3rdparty/v8/src/profile-generator-inl.h b/src/3rdparty/v8/src/profile-generator-inl.h
deleted file mode 100644
index 747e5c7..0000000
--- a/src/3rdparty/v8/src/profile-generator-inl.h
+++ /dev/null
@@ -1,128 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_PROFILE_GENERATOR_INL_H_
-#define V8_PROFILE_GENERATOR_INL_H_
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
-
-#include "profile-generator.h"
-
-namespace v8 {
-namespace internal {
-
-const char* StringsStorage::GetFunctionName(String* name) {
- return GetFunctionName(GetName(name));
-}
-
-
-const char* StringsStorage::GetFunctionName(const char* name) {
- return strlen(name) > 0 ? name : ProfileGenerator::kAnonymousFunctionName;
-}
-
-
-CodeEntry::CodeEntry(Logger::LogEventsAndTags tag,
- const char* name_prefix,
- const char* name,
- const char* resource_name,
- int line_number,
- int security_token_id)
- : tag_(tag),
- name_prefix_(name_prefix),
- name_(name),
- resource_name_(resource_name),
- line_number_(line_number),
- shared_id_(0),
- security_token_id_(security_token_id) {
-}
-
-
-bool CodeEntry::is_js_function_tag(Logger::LogEventsAndTags tag) {
- return tag == Logger::FUNCTION_TAG
- || tag == Logger::LAZY_COMPILE_TAG
- || tag == Logger::SCRIPT_TAG
- || tag == Logger::NATIVE_FUNCTION_TAG
- || tag == Logger::NATIVE_LAZY_COMPILE_TAG
- || tag == Logger::NATIVE_SCRIPT_TAG;
-}
-
-
-ProfileNode::ProfileNode(ProfileTree* tree, CodeEntry* entry)
- : tree_(tree),
- entry_(entry),
- total_ticks_(0),
- self_ticks_(0),
- children_(CodeEntriesMatch) {
-}
-
-
-void CodeMap::AddCode(Address addr, CodeEntry* entry, unsigned size) {
- CodeTree::Locator locator;
- tree_.Insert(addr, &locator);
- locator.set_value(CodeEntryInfo(entry, size));
-}
-
-
-void CodeMap::MoveCode(Address from, Address to) {
- tree_.Move(from, to);
-}
-
-void CodeMap::DeleteCode(Address addr) {
- tree_.Remove(addr);
-}
-
-
-CodeEntry* ProfileGenerator::EntryForVMState(StateTag tag) {
- switch (tag) {
- case GC:
- return gc_entry_;
- case JS:
- case COMPILER:
- // DOM events handlers are reported as OTHER / EXTERNAL entries.
- // To avoid confusing people, let's put all these entries into
- // one bucket.
- case OTHER:
- case EXTERNAL:
- return program_entry_;
- default: return NULL;
- }
-}
-
-
-uint64_t HeapEntry::id() {
- union {
- Id stored_id;
- uint64_t returned_id;
- } id_adaptor = {id_};
- return id_adaptor.returned_id;
-}
-
-} } // namespace v8::internal
-
-#endif // ENABLE_LOGGING_AND_PROFILING
-
-#endif // V8_PROFILE_GENERATOR_INL_H_
diff --git a/src/3rdparty/v8/src/profile-generator.cc b/src/3rdparty/v8/src/profile-generator.cc
deleted file mode 100644
index fd3268d..0000000
--- a/src/3rdparty/v8/src/profile-generator.cc
+++ /dev/null
@@ -1,3095 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
-
-#include "v8.h"
-#include "global-handles.h"
-#include "heap-profiler.h"
-#include "scopeinfo.h"
-#include "unicode.h"
-#include "zone-inl.h"
-
-#include "profile-generator-inl.h"
-
-namespace v8 {
-namespace internal {
-
-
-TokenEnumerator::TokenEnumerator()
- : token_locations_(4),
- token_removed_(4) {
-}
-
-
-TokenEnumerator::~TokenEnumerator() {
- Isolate* isolate = Isolate::Current();
- for (int i = 0; i < token_locations_.length(); ++i) {
- if (!token_removed_[i]) {
- isolate->global_handles()->ClearWeakness(token_locations_[i]);
- isolate->global_handles()->Destroy(token_locations_[i]);
- }
- }
-}
-
-
-int TokenEnumerator::GetTokenId(Object* token) {
- Isolate* isolate = Isolate::Current();
- if (token == NULL) return TokenEnumerator::kNoSecurityToken;
- for (int i = 0; i < token_locations_.length(); ++i) {
- if (*token_locations_[i] == token && !token_removed_[i]) return i;
- }
- Handle<Object> handle = isolate->global_handles()->Create(token);
- // handle.location() points to a memory cell holding a pointer
- // to a token object in the V8's heap.
- isolate->global_handles()->MakeWeak(handle.location(), this,
- TokenRemovedCallback);
- token_locations_.Add(handle.location());
- token_removed_.Add(false);
- return token_locations_.length() - 1;
-}
-
-
-void TokenEnumerator::TokenRemovedCallback(v8::Persistent<v8::Value> handle,
- void* parameter) {
- reinterpret_cast<TokenEnumerator*>(parameter)->TokenRemoved(
- Utils::OpenHandle(*handle).location());
- handle.Dispose();
-}
-
-
-void TokenEnumerator::TokenRemoved(Object** token_location) {
- for (int i = 0; i < token_locations_.length(); ++i) {
- if (token_locations_[i] == token_location && !token_removed_[i]) {
- token_removed_[i] = true;
- return;
- }
- }
-}
-
-
-StringsStorage::StringsStorage()
- : names_(StringsMatch) {
-}
-
-
-StringsStorage::~StringsStorage() {
- for (HashMap::Entry* p = names_.Start();
- p != NULL;
- p = names_.Next(p)) {
- DeleteArray(reinterpret_cast<const char*>(p->value));
- }
-}
-
-
-const char* StringsStorage::GetCopy(const char* src) {
- int len = static_cast<int>(strlen(src));
- Vector<char> dst = Vector<char>::New(len + 1);
- OS::StrNCpy(dst, src, len);
- dst[len] = '\0';
- uint32_t hash = HashSequentialString(dst.start(), len);
- return AddOrDisposeString(dst.start(), hash);
-}
-
-
-const char* StringsStorage::GetFormatted(const char* format, ...) {
- va_list args;
- va_start(args, format);
- const char* result = GetVFormatted(format, args);
- va_end(args);
- return result;
-}
-
-
-const char* StringsStorage::AddOrDisposeString(char* str, uint32_t hash) {
- HashMap::Entry* cache_entry = names_.Lookup(str, hash, true);
- if (cache_entry->value == NULL) {
- // New entry added.
- cache_entry->value = str;
- } else {
- DeleteArray(str);
- }
- return reinterpret_cast<const char*>(cache_entry->value);
-}
-
-
-const char* StringsStorage::GetVFormatted(const char* format, va_list args) {
- Vector<char> str = Vector<char>::New(1024);
- int len = OS::VSNPrintF(str, format, args);
- if (len == -1) {
- DeleteArray(str.start());
- return format;
- }
- uint32_t hash = HashSequentialString(str.start(), len);
- return AddOrDisposeString(str.start(), hash);
-}
-
-
-const char* StringsStorage::GetName(String* name) {
- if (name->IsString()) {
- return AddOrDisposeString(
- name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL).Detach(),
- name->Hash());
- }
- return "";
-}
-
-
-const char* StringsStorage::GetName(int index) {
- return GetFormatted("%d", index);
-}
-
-
-const char* const CodeEntry::kEmptyNamePrefix = "";
-
-
-void CodeEntry::CopyData(const CodeEntry& source) {
- tag_ = source.tag_;
- name_prefix_ = source.name_prefix_;
- name_ = source.name_;
- resource_name_ = source.resource_name_;
- line_number_ = source.line_number_;
-}
-
-
-uint32_t CodeEntry::GetCallUid() const {
- uint32_t hash = ComputeIntegerHash(tag_);
- if (shared_id_ != 0) {
- hash ^= ComputeIntegerHash(
- static_cast<uint32_t>(shared_id_));
- } else {
- hash ^= ComputeIntegerHash(
- static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name_prefix_)));
- hash ^= ComputeIntegerHash(
- static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name_)));
- hash ^= ComputeIntegerHash(
- static_cast<uint32_t>(reinterpret_cast<uintptr_t>(resource_name_)));
- hash ^= ComputeIntegerHash(line_number_);
- }
- return hash;
-}
-
-
-bool CodeEntry::IsSameAs(CodeEntry* entry) const {
- return this == entry
- || (tag_ == entry->tag_
- && shared_id_ == entry->shared_id_
- && (shared_id_ != 0
- || (name_prefix_ == entry->name_prefix_
- && name_ == entry->name_
- && resource_name_ == entry->resource_name_
- && line_number_ == entry->line_number_)));
-}
-
-
-ProfileNode* ProfileNode::FindChild(CodeEntry* entry) {
- HashMap::Entry* map_entry =
- children_.Lookup(entry, CodeEntryHash(entry), false);
- return map_entry != NULL ?
- reinterpret_cast<ProfileNode*>(map_entry->value) : NULL;
-}
-
-
-ProfileNode* ProfileNode::FindOrAddChild(CodeEntry* entry) {
- HashMap::Entry* map_entry =
- children_.Lookup(entry, CodeEntryHash(entry), true);
- if (map_entry->value == NULL) {
- // New node added.
- ProfileNode* new_node = new ProfileNode(tree_, entry);
- map_entry->value = new_node;
- children_list_.Add(new_node);
- }
- return reinterpret_cast<ProfileNode*>(map_entry->value);
-}
-
-
-double ProfileNode::GetSelfMillis() const {
- return tree_->TicksToMillis(self_ticks_);
-}
-
-
-double ProfileNode::GetTotalMillis() const {
- return tree_->TicksToMillis(total_ticks_);
-}
-
-
-void ProfileNode::Print(int indent) {
- OS::Print("%5u %5u %*c %s%s [%d]",
- total_ticks_, self_ticks_,
- indent, ' ',
- entry_->name_prefix(),
- entry_->name(),
- entry_->security_token_id());
- if (entry_->resource_name()[0] != '\0')
- OS::Print(" %s:%d", entry_->resource_name(), entry_->line_number());
- OS::Print("\n");
- for (HashMap::Entry* p = children_.Start();
- p != NULL;
- p = children_.Next(p)) {
- reinterpret_cast<ProfileNode*>(p->value)->Print(indent + 2);
- }
-}
-
-
-class DeleteNodesCallback {
- public:
- void BeforeTraversingChild(ProfileNode*, ProfileNode*) { }
-
- void AfterAllChildrenTraversed(ProfileNode* node) {
- delete node;
- }
-
- void AfterChildTraversed(ProfileNode*, ProfileNode*) { }
-};
-
-
-ProfileTree::ProfileTree()
- : root_entry_(Logger::FUNCTION_TAG,
- "",
- "(root)",
- "",
- 0,
- TokenEnumerator::kNoSecurityToken),
- root_(new ProfileNode(this, &root_entry_)) {
-}
-
-
-ProfileTree::~ProfileTree() {
- DeleteNodesCallback cb;
- TraverseDepthFirst(&cb);
-}
-
-
-void ProfileTree::AddPathFromEnd(const Vector<CodeEntry*>& path) {
- ProfileNode* node = root_;
- for (CodeEntry** entry = path.start() + path.length() - 1;
- entry != path.start() - 1;
- --entry) {
- if (*entry != NULL) {
- node = node->FindOrAddChild(*entry);
- }
- }
- node->IncrementSelfTicks();
-}
-
-
-void ProfileTree::AddPathFromStart(const Vector<CodeEntry*>& path) {
- ProfileNode* node = root_;
- for (CodeEntry** entry = path.start();
- entry != path.start() + path.length();
- ++entry) {
- if (*entry != NULL) {
- node = node->FindOrAddChild(*entry);
- }
- }
- node->IncrementSelfTicks();
-}
-
-
-struct NodesPair {
- NodesPair(ProfileNode* src, ProfileNode* dst)
- : src(src), dst(dst) { }
- ProfileNode* src;
- ProfileNode* dst;
-};
-
-
-class FilteredCloneCallback {
- public:
- FilteredCloneCallback(ProfileNode* dst_root, int security_token_id)
- : stack_(10),
- security_token_id_(security_token_id) {
- stack_.Add(NodesPair(NULL, dst_root));
- }
-
- void BeforeTraversingChild(ProfileNode* parent, ProfileNode* child) {
- if (IsTokenAcceptable(child->entry()->security_token_id(),
- parent->entry()->security_token_id())) {
- ProfileNode* clone = stack_.last().dst->FindOrAddChild(child->entry());
- clone->IncreaseSelfTicks(child->self_ticks());
- stack_.Add(NodesPair(child, clone));
- } else {
- // Attribute ticks to parent node.
- stack_.last().dst->IncreaseSelfTicks(child->self_ticks());
- }
- }
-
- void AfterAllChildrenTraversed(ProfileNode* parent) { }
-
- void AfterChildTraversed(ProfileNode*, ProfileNode* child) {
- if (stack_.last().src == child) {
- stack_.RemoveLast();
- }
- }
-
- private:
- bool IsTokenAcceptable(int token, int parent_token) {
- if (token == TokenEnumerator::kNoSecurityToken
- || token == security_token_id_) return true;
- if (token == TokenEnumerator::kInheritsSecurityToken) {
- ASSERT(parent_token != TokenEnumerator::kInheritsSecurityToken);
- return parent_token == TokenEnumerator::kNoSecurityToken
- || parent_token == security_token_id_;
- }
- return false;
- }
-
- List<NodesPair> stack_;
- int security_token_id_;
-};
-
-void ProfileTree::FilteredClone(ProfileTree* src, int security_token_id) {
- ms_to_ticks_scale_ = src->ms_to_ticks_scale_;
- FilteredCloneCallback cb(root_, security_token_id);
- src->TraverseDepthFirst(&cb);
- CalculateTotalTicks();
-}
-
-
-void ProfileTree::SetTickRatePerMs(double ticks_per_ms) {
- ms_to_ticks_scale_ = ticks_per_ms > 0 ? 1.0 / ticks_per_ms : 1.0;
-}
-
-
-class Position {
- public:
- explicit Position(ProfileNode* node)
- : node(node), child_idx_(0) { }
- INLINE(ProfileNode* current_child()) {
- return node->children()->at(child_idx_);
- }
- INLINE(bool has_current_child()) {
- return child_idx_ < node->children()->length();
- }
- INLINE(void next_child()) { ++child_idx_; }
-
- ProfileNode* node;
- private:
- int child_idx_;
-};
-
-
-// Non-recursive implementation of a depth-first post-order tree traversal.
-template <typename Callback>
-void ProfileTree::TraverseDepthFirst(Callback* callback) {
- List<Position> stack(10);
- stack.Add(Position(root_));
- while (stack.length() > 0) {
- Position& current = stack.last();
- if (current.has_current_child()) {
- callback->BeforeTraversingChild(current.node, current.current_child());
- stack.Add(Position(current.current_child()));
- } else {
- callback->AfterAllChildrenTraversed(current.node);
- if (stack.length() > 1) {
- Position& parent = stack[stack.length() - 2];
- callback->AfterChildTraversed(parent.node, current.node);
- parent.next_child();
- }
- // Remove child from the stack.
- stack.RemoveLast();
- }
- }
-}
-
-
-class CalculateTotalTicksCallback {
- public:
- void BeforeTraversingChild(ProfileNode*, ProfileNode*) { }
-
- void AfterAllChildrenTraversed(ProfileNode* node) {
- node->IncreaseTotalTicks(node->self_ticks());
- }
-
- void AfterChildTraversed(ProfileNode* parent, ProfileNode* child) {
- parent->IncreaseTotalTicks(child->total_ticks());
- }
-};
-
-
-void ProfileTree::CalculateTotalTicks() {
- CalculateTotalTicksCallback cb;
- TraverseDepthFirst(&cb);
-}
-
-
-void ProfileTree::ShortPrint() {
- OS::Print("root: %u %u %.2fms %.2fms\n",
- root_->total_ticks(), root_->self_ticks(),
- root_->GetTotalMillis(), root_->GetSelfMillis());
-}
-
-
-void CpuProfile::AddPath(const Vector<CodeEntry*>& path) {
- top_down_.AddPathFromEnd(path);
- bottom_up_.AddPathFromStart(path);
-}
-
-
-void CpuProfile::CalculateTotalTicks() {
- top_down_.CalculateTotalTicks();
- bottom_up_.CalculateTotalTicks();
-}
-
-
-void CpuProfile::SetActualSamplingRate(double actual_sampling_rate) {
- top_down_.SetTickRatePerMs(actual_sampling_rate);
- bottom_up_.SetTickRatePerMs(actual_sampling_rate);
-}
-
-
-CpuProfile* CpuProfile::FilteredClone(int security_token_id) {
- ASSERT(security_token_id != TokenEnumerator::kNoSecurityToken);
- CpuProfile* clone = new CpuProfile(title_, uid_);
- clone->top_down_.FilteredClone(&top_down_, security_token_id);
- clone->bottom_up_.FilteredClone(&bottom_up_, security_token_id);
- return clone;
-}
-
-
-void CpuProfile::ShortPrint() {
- OS::Print("top down ");
- top_down_.ShortPrint();
- OS::Print("bottom up ");
- bottom_up_.ShortPrint();
-}
-
-
-void CpuProfile::Print() {
- OS::Print("[Top down]:\n");
- top_down_.Print();
- OS::Print("[Bottom up]:\n");
- bottom_up_.Print();
-}
-
-
-CodeEntry* const CodeMap::kSharedFunctionCodeEntry = NULL;
-const CodeMap::CodeTreeConfig::Key CodeMap::CodeTreeConfig::kNoKey = NULL;
-const CodeMap::CodeTreeConfig::Value CodeMap::CodeTreeConfig::kNoValue =
- CodeMap::CodeEntryInfo(NULL, 0);
-
-
-CodeEntry* CodeMap::FindEntry(Address addr) {
- CodeTree::Locator locator;
- if (tree_.FindGreatestLessThan(addr, &locator)) {
- // locator.key() <= addr. Need to check that addr is within entry.
- const CodeEntryInfo& entry = locator.value();
- if (addr < (locator.key() + entry.size))
- return entry.entry;
- }
- return NULL;
-}
-
-
-int CodeMap::GetSharedId(Address addr) {
- CodeTree::Locator locator;
- // For shared function entries, 'size' field is used to store their IDs.
- if (tree_.Find(addr, &locator)) {
- const CodeEntryInfo& entry = locator.value();
- ASSERT(entry.entry == kSharedFunctionCodeEntry);
- return entry.size;
- } else {
- tree_.Insert(addr, &locator);
- int id = next_shared_id_++;
- locator.set_value(CodeEntryInfo(kSharedFunctionCodeEntry, id));
- return id;
- }
-}
-
-
-void CodeMap::CodeTreePrinter::Call(
- const Address& key, const CodeMap::CodeEntryInfo& value) {
- OS::Print("%p %5d %s\n", key, value.size, value.entry->name());
-}
-
-
-void CodeMap::Print() {
- CodeTreePrinter printer;
- tree_.ForEach(&printer);
-}
-
-
-CpuProfilesCollection::CpuProfilesCollection()
- : profiles_uids_(UidsMatch),
- current_profiles_semaphore_(OS::CreateSemaphore(1)) {
- // Create list of unabridged profiles.
- profiles_by_token_.Add(new List<CpuProfile*>());
-}
-
-
-static void DeleteCodeEntry(CodeEntry** entry_ptr) {
- delete *entry_ptr;
-}
-
-static void DeleteCpuProfile(CpuProfile** profile_ptr) {
- delete *profile_ptr;
-}
-
-static void DeleteProfilesList(List<CpuProfile*>** list_ptr) {
- if (*list_ptr != NULL) {
- (*list_ptr)->Iterate(DeleteCpuProfile);
- delete *list_ptr;
- }
-}
-
-CpuProfilesCollection::~CpuProfilesCollection() {
- delete current_profiles_semaphore_;
- current_profiles_.Iterate(DeleteCpuProfile);
- detached_profiles_.Iterate(DeleteCpuProfile);
- profiles_by_token_.Iterate(DeleteProfilesList);
- code_entries_.Iterate(DeleteCodeEntry);
-}
-
-
-bool CpuProfilesCollection::StartProfiling(const char* title, unsigned uid) {
- ASSERT(uid > 0);
- current_profiles_semaphore_->Wait();
- if (current_profiles_.length() >= kMaxSimultaneousProfiles) {
- current_profiles_semaphore_->Signal();
- return false;
- }
- for (int i = 0; i < current_profiles_.length(); ++i) {
- if (strcmp(current_profiles_[i]->title(), title) == 0) {
- // Ignore attempts to start profile with the same title.
- current_profiles_semaphore_->Signal();
- return false;
- }
- }
- current_profiles_.Add(new CpuProfile(title, uid));
- current_profiles_semaphore_->Signal();
- return true;
-}
-
-
-bool CpuProfilesCollection::StartProfiling(String* title, unsigned uid) {
- return StartProfiling(GetName(title), uid);
-}
-
-
-CpuProfile* CpuProfilesCollection::StopProfiling(int security_token_id,
- const char* title,
- double actual_sampling_rate) {
- const int title_len = StrLength(title);
- CpuProfile* profile = NULL;
- current_profiles_semaphore_->Wait();
- for (int i = current_profiles_.length() - 1; i >= 0; --i) {
- if (title_len == 0 || strcmp(current_profiles_[i]->title(), title) == 0) {
- profile = current_profiles_.Remove(i);
- break;
- }
- }
- current_profiles_semaphore_->Signal();
-
- if (profile != NULL) {
- profile->CalculateTotalTicks();
- profile->SetActualSamplingRate(actual_sampling_rate);
- List<CpuProfile*>* unabridged_list =
- profiles_by_token_[TokenToIndex(TokenEnumerator::kNoSecurityToken)];
- unabridged_list->Add(profile);
- HashMap::Entry* entry =
- profiles_uids_.Lookup(reinterpret_cast<void*>(profile->uid()),
- static_cast<uint32_t>(profile->uid()),
- true);
- ASSERT(entry->value == NULL);
- entry->value = reinterpret_cast<void*>(unabridged_list->length() - 1);
- return GetProfile(security_token_id, profile->uid());
- }
- return NULL;
-}
-
-
-CpuProfile* CpuProfilesCollection::GetProfile(int security_token_id,
- unsigned uid) {
- int index = GetProfileIndex(uid);
- if (index < 0) return NULL;
- List<CpuProfile*>* unabridged_list =
- profiles_by_token_[TokenToIndex(TokenEnumerator::kNoSecurityToken)];
- if (security_token_id == TokenEnumerator::kNoSecurityToken) {
- return unabridged_list->at(index);
- }
- List<CpuProfile*>* list = GetProfilesList(security_token_id);
- if (list->at(index) == NULL) {
- (*list)[index] =
- unabridged_list->at(index)->FilteredClone(security_token_id);
- }
- return list->at(index);
-}
-
-
-int CpuProfilesCollection::GetProfileIndex(unsigned uid) {
- HashMap::Entry* entry = profiles_uids_.Lookup(reinterpret_cast<void*>(uid),
- static_cast<uint32_t>(uid),
- false);
- return entry != NULL ?
- static_cast<int>(reinterpret_cast<intptr_t>(entry->value)) : -1;
-}
-
-
-bool CpuProfilesCollection::IsLastProfile(const char* title) {
- // Called from VM thread, and only it can mutate the list,
- // so no locking is needed here.
- if (current_profiles_.length() != 1) return false;
- return StrLength(title) == 0
- || strcmp(current_profiles_[0]->title(), title) == 0;
-}
-
-
-void CpuProfilesCollection::RemoveProfile(CpuProfile* profile) {
- // Called from VM thread for a completed profile.
- unsigned uid = profile->uid();
- int index = GetProfileIndex(uid);
- if (index < 0) {
- detached_profiles_.RemoveElement(profile);
- return;
- }
- profiles_uids_.Remove(reinterpret_cast<void*>(uid),
- static_cast<uint32_t>(uid));
- // Decrement all indexes above the deleted one.
- for (HashMap::Entry* p = profiles_uids_.Start();
- p != NULL;
- p = profiles_uids_.Next(p)) {
- intptr_t p_index = reinterpret_cast<intptr_t>(p->value);
- if (p_index > index) {
- p->value = reinterpret_cast<void*>(p_index - 1);
- }
- }
- for (int i = 0; i < profiles_by_token_.length(); ++i) {
- List<CpuProfile*>* list = profiles_by_token_[i];
- if (list != NULL && index < list->length()) {
- // Move all filtered clones into detached_profiles_,
- // so we can know that they are still in use.
- CpuProfile* cloned_profile = list->Remove(index);
- if (cloned_profile != NULL && cloned_profile != profile) {
- detached_profiles_.Add(cloned_profile);
- }
- }
- }
-}
-
-
-int CpuProfilesCollection::TokenToIndex(int security_token_id) {
- ASSERT(TokenEnumerator::kNoSecurityToken == -1);
- return security_token_id + 1; // kNoSecurityToken -> 0, 0 -> 1, ...
-}
-
-
-List<CpuProfile*>* CpuProfilesCollection::GetProfilesList(
- int security_token_id) {
- const int index = TokenToIndex(security_token_id);
- const int lists_to_add = index - profiles_by_token_.length() + 1;
- if (lists_to_add > 0) profiles_by_token_.AddBlock(NULL, lists_to_add);
- List<CpuProfile*>* unabridged_list =
- profiles_by_token_[TokenToIndex(TokenEnumerator::kNoSecurityToken)];
- const int current_count = unabridged_list->length();
- if (profiles_by_token_[index] == NULL) {
- profiles_by_token_[index] = new List<CpuProfile*>(current_count);
- }
- List<CpuProfile*>* list = profiles_by_token_[index];
- const int profiles_to_add = current_count - list->length();
- if (profiles_to_add > 0) list->AddBlock(NULL, profiles_to_add);
- return list;
-}
-
-
-List<CpuProfile*>* CpuProfilesCollection::Profiles(int security_token_id) {
- List<CpuProfile*>* unabridged_list =
- profiles_by_token_[TokenToIndex(TokenEnumerator::kNoSecurityToken)];
- if (security_token_id == TokenEnumerator::kNoSecurityToken) {
- return unabridged_list;
- }
- List<CpuProfile*>* list = GetProfilesList(security_token_id);
- const int current_count = unabridged_list->length();
- for (int i = 0; i < current_count; ++i) {
- if (list->at(i) == NULL) {
- (*list)[i] = unabridged_list->at(i)->FilteredClone(security_token_id);
- }
- }
- return list;
-}
-
-
-CodeEntry* CpuProfilesCollection::NewCodeEntry(Logger::LogEventsAndTags tag,
- String* name,
- String* resource_name,
- int line_number) {
- CodeEntry* entry = new CodeEntry(tag,
- CodeEntry::kEmptyNamePrefix,
- GetFunctionName(name),
- GetName(resource_name),
- line_number,
- TokenEnumerator::kNoSecurityToken);
- code_entries_.Add(entry);
- return entry;
-}
-
-
-CodeEntry* CpuProfilesCollection::NewCodeEntry(Logger::LogEventsAndTags tag,
- const char* name) {
- CodeEntry* entry = new CodeEntry(tag,
- CodeEntry::kEmptyNamePrefix,
- GetFunctionName(name),
- "",
- v8::CpuProfileNode::kNoLineNumberInfo,
- TokenEnumerator::kNoSecurityToken);
- code_entries_.Add(entry);
- return entry;
-}
-
-
-CodeEntry* CpuProfilesCollection::NewCodeEntry(Logger::LogEventsAndTags tag,
- const char* name_prefix,
- String* name) {
- CodeEntry* entry = new CodeEntry(tag,
- name_prefix,
- GetName(name),
- "",
- v8::CpuProfileNode::kNoLineNumberInfo,
- TokenEnumerator::kInheritsSecurityToken);
- code_entries_.Add(entry);
- return entry;
-}
-
-
-CodeEntry* CpuProfilesCollection::NewCodeEntry(Logger::LogEventsAndTags tag,
- int args_count) {
- CodeEntry* entry = new CodeEntry(tag,
- "args_count: ",
- GetName(args_count),
- "",
- v8::CpuProfileNode::kNoLineNumberInfo,
- TokenEnumerator::kInheritsSecurityToken);
- code_entries_.Add(entry);
- return entry;
-}
-
-
-void CpuProfilesCollection::AddPathToCurrentProfiles(
- const Vector<CodeEntry*>& path) {
- // As starting / stopping profiles is rare relatively to this
- // method, we don't bother minimizing the duration of lock holding,
- // e.g. copying contents of the list to a local vector.
- current_profiles_semaphore_->Wait();
- for (int i = 0; i < current_profiles_.length(); ++i) {
- current_profiles_[i]->AddPath(path);
- }
- current_profiles_semaphore_->Signal();
-}
-
-
-void SampleRateCalculator::Tick() {
- if (--wall_time_query_countdown_ == 0)
- UpdateMeasurements(OS::TimeCurrentMillis());
-}
-
-
-void SampleRateCalculator::UpdateMeasurements(double current_time) {
- if (measurements_count_++ != 0) {
- const double measured_ticks_per_ms =
- (kWallTimeQueryIntervalMs * ticks_per_ms_) /
- (current_time - last_wall_time_);
- // Update the average value.
- ticks_per_ms_ +=
- (measured_ticks_per_ms - ticks_per_ms_) / measurements_count_;
- // Update the externally accessible result.
- result_ = static_cast<AtomicWord>(ticks_per_ms_ * kResultScale);
- }
- last_wall_time_ = current_time;
- wall_time_query_countdown_ =
- static_cast<unsigned>(kWallTimeQueryIntervalMs * ticks_per_ms_);
-}
-
-
-const char* const ProfileGenerator::kAnonymousFunctionName =
- "(anonymous function)";
-const char* const ProfileGenerator::kProgramEntryName =
- "(program)";
-const char* const ProfileGenerator::kGarbageCollectorEntryName =
- "(garbage collector)";
-
-
-ProfileGenerator::ProfileGenerator(CpuProfilesCollection* profiles)
- : profiles_(profiles),
- program_entry_(
- profiles->NewCodeEntry(Logger::FUNCTION_TAG, kProgramEntryName)),
- gc_entry_(
- profiles->NewCodeEntry(Logger::BUILTIN_TAG,
- kGarbageCollectorEntryName)) {
-}
-
-
-void ProfileGenerator::RecordTickSample(const TickSample& sample) {
- // Allocate space for stack frames + pc + function + vm-state.
- ScopedVector<CodeEntry*> entries(sample.frames_count + 3);
- // As actual number of decoded code entries may vary, initialize
- // entries vector with NULL values.
- CodeEntry** entry = entries.start();
- memset(entry, 0, entries.length() * sizeof(*entry));
- if (sample.pc != NULL) {
- *entry++ = code_map_.FindEntry(sample.pc);
-
- if (sample.has_external_callback) {
- // Don't use PC when in external callback code, as it can point
- // inside callback's code, and we will erroneously report
- // that a callback calls itself.
- *(entries.start()) = NULL;
- *entry++ = code_map_.FindEntry(sample.external_callback);
- } else if (sample.tos != NULL) {
- // Find out, if top of stack was pointing inside a JS function
- // meaning that we have encountered a frameless invocation.
- *entry = code_map_.FindEntry(sample.tos);
- if (*entry != NULL && !(*entry)->is_js_function()) {
- *entry = NULL;
- }
- entry++;
- }
-
- for (const Address *stack_pos = sample.stack,
- *stack_end = stack_pos + sample.frames_count;
- stack_pos != stack_end;
- ++stack_pos) {
- *entry++ = code_map_.FindEntry(*stack_pos);
- }
- }
-
- if (FLAG_prof_browser_mode) {
- bool no_symbolized_entries = true;
- for (CodeEntry** e = entries.start(); e != entry; ++e) {
- if (*e != NULL) {
- no_symbolized_entries = false;
- break;
- }
- }
- // If no frames were symbolized, put the VM state entry in.
- if (no_symbolized_entries) {
- *entry++ = EntryForVMState(sample.state);
- }
- }
-
- profiles_->AddPathToCurrentProfiles(entries);
-}
-
-
-void HeapGraphEdge::Init(
- int child_index, Type type, const char* name, HeapEntry* to) {
- ASSERT(type == kContextVariable
- || type == kProperty
- || type == kInternal
- || type == kShortcut);
- child_index_ = child_index;
- type_ = type;
- name_ = name;
- to_ = to;
-}
-
-
-void HeapGraphEdge::Init(int child_index, Type type, int index, HeapEntry* to) {
- ASSERT(type == kElement || type == kHidden);
- child_index_ = child_index;
- type_ = type;
- index_ = index;
- to_ = to;
-}
-
-
-void HeapGraphEdge::Init(int child_index, int index, HeapEntry* to) {
- Init(child_index, kElement, index, to);
-}
-
-
-HeapEntry* HeapGraphEdge::From() {
- return reinterpret_cast<HeapEntry*>(this - child_index_) - 1;
-}
-
-
-void HeapEntry::Init(HeapSnapshot* snapshot,
- Type type,
- const char* name,
- uint64_t id,
- int self_size,
- int children_count,
- int retainers_count) {
- snapshot_ = snapshot;
- type_ = type;
- painted_ = kUnpainted;
- name_ = name;
- self_size_ = self_size;
- retained_size_ = 0;
- children_count_ = children_count;
- retainers_count_ = retainers_count;
- dominator_ = NULL;
-
- union {
- uint64_t set_id;
- Id stored_id;
- } id_adaptor = {id};
- id_ = id_adaptor.stored_id;
-}
-
-
-void HeapEntry::SetNamedReference(HeapGraphEdge::Type type,
- int child_index,
- const char* name,
- HeapEntry* entry,
- int retainer_index) {
- children_arr()[child_index].Init(child_index, type, name, entry);
- entry->retainers_arr()[retainer_index] = children_arr() + child_index;
-}
-
-
-void HeapEntry::SetIndexedReference(HeapGraphEdge::Type type,
- int child_index,
- int index,
- HeapEntry* entry,
- int retainer_index) {
- children_arr()[child_index].Init(child_index, type, index, entry);
- entry->retainers_arr()[retainer_index] = children_arr() + child_index;
-}
-
-
-void HeapEntry::SetUnidirElementReference(
- int child_index, int index, HeapEntry* entry) {
- children_arr()[child_index].Init(child_index, index, entry);
-}
-
-
-int HeapEntry::RetainedSize(bool exact) {
- if (exact && (retained_size_ & kExactRetainedSizeTag) == 0) {
- CalculateExactRetainedSize();
- }
- return retained_size_ & (~kExactRetainedSizeTag);
-}
-
-
-template<class Visitor>
-void HeapEntry::ApplyAndPaintAllReachable(Visitor* visitor) {
- List<HeapEntry*> list(10);
- list.Add(this);
- this->paint_reachable();
- visitor->Apply(this);
- while (!list.is_empty()) {
- HeapEntry* entry = list.RemoveLast();
- Vector<HeapGraphEdge> children = entry->children();
- for (int i = 0; i < children.length(); ++i) {
- if (children[i].type() == HeapGraphEdge::kShortcut) continue;
- HeapEntry* child = children[i].to();
- if (!child->painted_reachable()) {
- list.Add(child);
- child->paint_reachable();
- visitor->Apply(child);
- }
- }
- }
-}
-
-
-class NullClass {
- public:
- void Apply(HeapEntry* entry) { }
-};
-
-void HeapEntry::PaintAllReachable() {
- NullClass null;
- ApplyAndPaintAllReachable(&null);
-}
-
-
-void HeapEntry::Print(int max_depth, int indent) {
- OS::Print("%6d %6d [%llu] ", self_size(), RetainedSize(false), id());
- if (type() != kString) {
- OS::Print("%s %.40s\n", TypeAsString(), name_);
- } else {
- OS::Print("\"");
- const char* c = name_;
- while (*c && (c - name_) <= 40) {
- if (*c != '\n')
- OS::Print("%c", *c);
- else
- OS::Print("\\n");
- ++c;
- }
- OS::Print("\"\n");
- }
- if (--max_depth == 0) return;
- Vector<HeapGraphEdge> ch = children();
- for (int i = 0; i < ch.length(); ++i) {
- HeapGraphEdge& edge = ch[i];
- switch (edge.type()) {
- case HeapGraphEdge::kContextVariable:
- OS::Print(" %*c #%s: ", indent, ' ', edge.name());
- break;
- case HeapGraphEdge::kElement:
- OS::Print(" %*c %d: ", indent, ' ', edge.index());
- break;
- case HeapGraphEdge::kInternal:
- OS::Print(" %*c $%s: ", indent, ' ', edge.name());
- break;
- case HeapGraphEdge::kProperty:
- OS::Print(" %*c %s: ", indent, ' ', edge.name());
- break;
- case HeapGraphEdge::kHidden:
- OS::Print(" %*c $%d: ", indent, ' ', edge.index());
- break;
- case HeapGraphEdge::kShortcut:
- OS::Print(" %*c ^%s: ", indent, ' ', edge.name());
- break;
- default:
- OS::Print("!!! unknown edge type: %d ", edge.type());
- }
- edge.to()->Print(max_depth, indent + 2);
- }
-}
-
-
-const char* HeapEntry::TypeAsString() {
- switch (type()) {
- case kHidden: return "/hidden/";
- case kObject: return "/object/";
- case kClosure: return "/closure/";
- case kString: return "/string/";
- case kCode: return "/code/";
- case kArray: return "/array/";
- case kRegExp: return "/regexp/";
- case kHeapNumber: return "/number/";
- case kNative: return "/native/";
- default: return "???";
- }
-}
-
-
-int HeapEntry::EntriesSize(int entries_count,
- int children_count,
- int retainers_count) {
- return sizeof(HeapEntry) * entries_count // NOLINT
- + sizeof(HeapGraphEdge) * children_count // NOLINT
- + sizeof(HeapGraphEdge*) * retainers_count; // NOLINT
-}
-
-
-class RetainedSizeCalculator {
- public:
- RetainedSizeCalculator()
- : retained_size_(0) {
- }
-
- int reained_size() const { return retained_size_; }
-
- void Apply(HeapEntry** entry_ptr) {
- if ((*entry_ptr)->painted_reachable()) {
- retained_size_ += (*entry_ptr)->self_size();
- }
- }
-
- private:
- int retained_size_;
-};
-
-void HeapEntry::CalculateExactRetainedSize() {
- // To calculate retained size, first we paint all reachable nodes in
- // one color, then we paint (or re-paint) all nodes reachable from
- // other nodes with a different color. Then we sum up self sizes of
- // nodes painted with the first color.
- snapshot()->ClearPaint();
- PaintAllReachable();
-
- List<HeapEntry*> list(10);
- HeapEntry* root = snapshot()->root();
- if (this != root) {
- list.Add(root);
- root->paint_reachable_from_others();
- }
- while (!list.is_empty()) {
- HeapEntry* curr = list.RemoveLast();
- Vector<HeapGraphEdge> children = curr->children();
- for (int i = 0; i < children.length(); ++i) {
- if (children[i].type() == HeapGraphEdge::kShortcut) continue;
- HeapEntry* child = children[i].to();
- if (child != this && child->not_painted_reachable_from_others()) {
- list.Add(child);
- child->paint_reachable_from_others();
- }
- }
- }
-
- RetainedSizeCalculator ret_size_calc;
- snapshot()->IterateEntries(&ret_size_calc);
- retained_size_ = ret_size_calc.reained_size();
- ASSERT((retained_size_ & kExactRetainedSizeTag) == 0);
- retained_size_ |= kExactRetainedSizeTag;
-}
-
-
-// It is very important to keep objects that form a heap snapshot
-// as small as possible.
-namespace { // Avoid littering the global namespace.
-
-template <size_t ptr_size> struct SnapshotSizeConstants;
-
-template <> struct SnapshotSizeConstants<4> {
- static const int kExpectedHeapGraphEdgeSize = 12;
- static const int kExpectedHeapEntrySize = 36;
-};
-
-template <> struct SnapshotSizeConstants<8> {
- static const int kExpectedHeapGraphEdgeSize = 24;
- static const int kExpectedHeapEntrySize = 48;
-};
-
-} // namespace
-
-HeapSnapshot::HeapSnapshot(HeapSnapshotsCollection* collection,
- HeapSnapshot::Type type,
- const char* title,
- unsigned uid)
- : collection_(collection),
- type_(type),
- title_(title),
- uid_(uid),
- root_entry_(NULL),
- gc_roots_entry_(NULL),
- natives_root_entry_(NULL),
- raw_entries_(NULL),
- entries_sorted_(false) {
- STATIC_ASSERT(
- sizeof(HeapGraphEdge) ==
- SnapshotSizeConstants<sizeof(void*)>::kExpectedHeapGraphEdgeSize); // NOLINT
- STATIC_ASSERT(
- sizeof(HeapEntry) ==
- SnapshotSizeConstants<sizeof(void*)>::kExpectedHeapEntrySize); // NOLINT
-}
-
-HeapSnapshot::~HeapSnapshot() {
- DeleteArray(raw_entries_);
-}
-
-
-void HeapSnapshot::Delete() {
- collection_->RemoveSnapshot(this);
- delete this;
-}
-
-
-void HeapSnapshot::AllocateEntries(int entries_count,
- int children_count,
- int retainers_count) {
- ASSERT(raw_entries_ == NULL);
- raw_entries_ = NewArray<char>(
- HeapEntry::EntriesSize(entries_count, children_count, retainers_count));
-#ifdef DEBUG
- raw_entries_size_ =
- HeapEntry::EntriesSize(entries_count, children_count, retainers_count);
-#endif
-}
-
-
-static void HeapEntryClearPaint(HeapEntry** entry_ptr) {
- (*entry_ptr)->clear_paint();
-}
-
-void HeapSnapshot::ClearPaint() {
- entries_.Iterate(HeapEntryClearPaint);
-}
-
-
-HeapEntry* HeapSnapshot::AddRootEntry(int children_count) {
- ASSERT(root_entry_ == NULL);
- return (root_entry_ = AddEntry(HeapEntry::kObject,
- "",
- HeapObjectsMap::kInternalRootObjectId,
- 0,
- children_count,
- 0));
-}
-
-
-HeapEntry* HeapSnapshot::AddGcRootsEntry(int children_count,
- int retainers_count) {
- ASSERT(gc_roots_entry_ == NULL);
- return (gc_roots_entry_ = AddEntry(HeapEntry::kObject,
- "(GC roots)",
- HeapObjectsMap::kGcRootsObjectId,
- 0,
- children_count,
- retainers_count));
-}
-
-
-HeapEntry* HeapSnapshot::AddNativesRootEntry(int children_count,
- int retainers_count) {
- ASSERT(natives_root_entry_ == NULL);
- return (natives_root_entry_ = AddEntry(
- HeapEntry::kObject,
- "(Native objects)",
- HeapObjectsMap::kNativesRootObjectId,
- 0,
- children_count,
- retainers_count));
-}
-
-
-HeapEntry* HeapSnapshot::AddEntry(HeapEntry::Type type,
- const char* name,
- uint64_t id,
- int size,
- int children_count,
- int retainers_count) {
- HeapEntry* entry = GetNextEntryToInit();
- entry->Init(this, type, name, id, size, children_count, retainers_count);
- return entry;
-}
-
-
-void HeapSnapshot::SetDominatorsToSelf() {
- for (int i = 0; i < entries_.length(); ++i) {
- HeapEntry* entry = entries_[i];
- if (entry->dominator() == NULL) entry->set_dominator(entry);
- }
-}
-
-
-HeapEntry* HeapSnapshot::GetNextEntryToInit() {
- if (entries_.length() > 0) {
- HeapEntry* last_entry = entries_.last();
- entries_.Add(reinterpret_cast<HeapEntry*>(
- reinterpret_cast<char*>(last_entry) + last_entry->EntrySize()));
- } else {
- entries_.Add(reinterpret_cast<HeapEntry*>(raw_entries_));
- }
- ASSERT(reinterpret_cast<char*>(entries_.last()) <
- (raw_entries_ + raw_entries_size_));
- return entries_.last();
-}
-
-
-HeapEntry* HeapSnapshot::GetEntryById(uint64_t id) {
- List<HeapEntry*>* entries_by_id = GetSortedEntriesList();
-
- // Perform a binary search by id.
- int low = 0;
- int high = entries_by_id->length() - 1;
- while (low <= high) {
- int mid =
- (static_cast<unsigned int>(low) + static_cast<unsigned int>(high)) >> 1;
- uint64_t mid_id = entries_by_id->at(mid)->id();
- if (mid_id > id)
- high = mid - 1;
- else if (mid_id < id)
- low = mid + 1;
- else
- return entries_by_id->at(mid);
- }
- return NULL;
-}
-
-
-template<class T>
-static int SortByIds(const T* entry1_ptr,
- const T* entry2_ptr) {
- if ((*entry1_ptr)->id() == (*entry2_ptr)->id()) return 0;
- return (*entry1_ptr)->id() < (*entry2_ptr)->id() ? -1 : 1;
-}
-
-List<HeapEntry*>* HeapSnapshot::GetSortedEntriesList() {
- if (!entries_sorted_) {
- entries_.Sort(SortByIds);
- entries_sorted_ = true;
- }
- return &entries_;
-}
-
-
-void HeapSnapshot::Print(int max_depth) {
- root()->Print(max_depth, 0);
-}
-
-
-// We split IDs on evens for embedder objects (see
-// HeapObjectsMap::GenerateId) and odds for native objects.
-const uint64_t HeapObjectsMap::kInternalRootObjectId = 1;
-const uint64_t HeapObjectsMap::kGcRootsObjectId = 3;
-const uint64_t HeapObjectsMap::kNativesRootObjectId = 5;
-// Increase kFirstAvailableObjectId if new 'special' objects appear.
-const uint64_t HeapObjectsMap::kFirstAvailableObjectId = 7;
-
-HeapObjectsMap::HeapObjectsMap()
- : initial_fill_mode_(true),
- next_id_(kFirstAvailableObjectId),
- entries_map_(AddressesMatch),
- entries_(new List<EntryInfo>()) { }
-
-
-HeapObjectsMap::~HeapObjectsMap() {
- delete entries_;
-}
-
-
-void HeapObjectsMap::SnapshotGenerationFinished() {
- initial_fill_mode_ = false;
- RemoveDeadEntries();
-}
-
-
-uint64_t HeapObjectsMap::FindObject(Address addr) {
- if (!initial_fill_mode_) {
- uint64_t existing = FindEntry(addr);
- if (existing != 0) return existing;
- }
- uint64_t id = next_id_;
- next_id_ += 2;
- AddEntry(addr, id);
- return id;
-}
-
-
-void HeapObjectsMap::MoveObject(Address from, Address to) {
- if (from == to) return;
- HashMap::Entry* entry = entries_map_.Lookup(from, AddressHash(from), false);
- if (entry != NULL) {
- void* value = entry->value;
- entries_map_.Remove(from, AddressHash(from));
- entry = entries_map_.Lookup(to, AddressHash(to), true);
- // We can have an entry at the new location, it is OK, as GC can overwrite
- // dead objects with alive objects being moved.
- entry->value = value;
- }
-}
-
-
-void HeapObjectsMap::AddEntry(Address addr, uint64_t id) {
- HashMap::Entry* entry = entries_map_.Lookup(addr, AddressHash(addr), true);
- ASSERT(entry->value == NULL);
- entry->value = reinterpret_cast<void*>(entries_->length());
- entries_->Add(EntryInfo(id));
-}
-
-
-uint64_t HeapObjectsMap::FindEntry(Address addr) {
- HashMap::Entry* entry = entries_map_.Lookup(addr, AddressHash(addr), false);
- if (entry != NULL) {
- int entry_index =
- static_cast<int>(reinterpret_cast<intptr_t>(entry->value));
- EntryInfo& entry_info = entries_->at(entry_index);
- entry_info.accessed = true;
- return entry_info.id;
- } else {
- return 0;
- }
-}
-
-
-void HeapObjectsMap::RemoveDeadEntries() {
- List<EntryInfo>* new_entries = new List<EntryInfo>();
- List<void*> dead_entries;
- for (HashMap::Entry* entry = entries_map_.Start();
- entry != NULL;
- entry = entries_map_.Next(entry)) {
- int entry_index =
- static_cast<int>(reinterpret_cast<intptr_t>(entry->value));
- EntryInfo& entry_info = entries_->at(entry_index);
- if (entry_info.accessed) {
- entry->value = reinterpret_cast<void*>(new_entries->length());
- new_entries->Add(EntryInfo(entry_info.id, false));
- } else {
- dead_entries.Add(entry->key);
- }
- }
- for (int i = 0; i < dead_entries.length(); ++i) {
- void* raw_entry = dead_entries[i];
- entries_map_.Remove(
- raw_entry, AddressHash(reinterpret_cast<Address>(raw_entry)));
- }
- delete entries_;
- entries_ = new_entries;
-}
-
-
-uint64_t HeapObjectsMap::GenerateId(v8::RetainedObjectInfo* info) {
- uint64_t id = static_cast<uint64_t>(info->GetHash());
- const char* label = info->GetLabel();
- id ^= HashSequentialString(label, static_cast<int>(strlen(label)));
- intptr_t element_count = info->GetElementCount();
- if (element_count != -1)
- id ^= ComputeIntegerHash(static_cast<uint32_t>(element_count));
- return id << 1;
-}
-
-
-HeapSnapshotsCollection::HeapSnapshotsCollection()
- : is_tracking_objects_(false),
- snapshots_uids_(HeapSnapshotsMatch),
- token_enumerator_(new TokenEnumerator()) {
-}
-
-
-static void DeleteHeapSnapshot(HeapSnapshot** snapshot_ptr) {
- delete *snapshot_ptr;
-}
-
-
-HeapSnapshotsCollection::~HeapSnapshotsCollection() {
- delete token_enumerator_;
- snapshots_.Iterate(DeleteHeapSnapshot);
-}
-
-
-HeapSnapshot* HeapSnapshotsCollection::NewSnapshot(HeapSnapshot::Type type,
- const char* name,
- unsigned uid) {
- is_tracking_objects_ = true; // Start watching for heap objects moves.
- return new HeapSnapshot(this, type, name, uid);
-}
-
-
-void HeapSnapshotsCollection::SnapshotGenerationFinished(
- HeapSnapshot* snapshot) {
- ids_.SnapshotGenerationFinished();
- if (snapshot != NULL) {
- snapshots_.Add(snapshot);
- HashMap::Entry* entry =
- snapshots_uids_.Lookup(reinterpret_cast<void*>(snapshot->uid()),
- static_cast<uint32_t>(snapshot->uid()),
- true);
- ASSERT(entry->value == NULL);
- entry->value = snapshot;
- }
-}
-
-
-HeapSnapshot* HeapSnapshotsCollection::GetSnapshot(unsigned uid) {
- HashMap::Entry* entry = snapshots_uids_.Lookup(reinterpret_cast<void*>(uid),
- static_cast<uint32_t>(uid),
- false);
- return entry != NULL ? reinterpret_cast<HeapSnapshot*>(entry->value) : NULL;
-}
-
-
-void HeapSnapshotsCollection::RemoveSnapshot(HeapSnapshot* snapshot) {
- snapshots_.RemoveElement(snapshot);
- unsigned uid = snapshot->uid();
- snapshots_uids_.Remove(reinterpret_cast<void*>(uid),
- static_cast<uint32_t>(uid));
-}
-
-
-HeapEntry *const HeapEntriesMap::kHeapEntryPlaceholder =
- reinterpret_cast<HeapEntry*>(1);
-
-HeapEntriesMap::HeapEntriesMap()
- : entries_(HeapThingsMatch),
- entries_count_(0),
- total_children_count_(0),
- total_retainers_count_(0) {
-}
-
-
-HeapEntriesMap::~HeapEntriesMap() {
- for (HashMap::Entry* p = entries_.Start(); p != NULL; p = entries_.Next(p)) {
- delete reinterpret_cast<EntryInfo*>(p->value);
- }
-}
-
-
-void HeapEntriesMap::AllocateEntries() {
- for (HashMap::Entry* p = entries_.Start();
- p != NULL;
- p = entries_.Next(p)) {
- EntryInfo* entry_info = reinterpret_cast<EntryInfo*>(p->value);
- entry_info->entry = entry_info->allocator->AllocateEntry(
- p->key,
- entry_info->children_count,
- entry_info->retainers_count);
- ASSERT(entry_info->entry != NULL);
- ASSERT(entry_info->entry != kHeapEntryPlaceholder);
- entry_info->children_count = 0;
- entry_info->retainers_count = 0;
- }
-}
-
-
-HeapEntry* HeapEntriesMap::Map(HeapThing thing) {
- HashMap::Entry* cache_entry = entries_.Lookup(thing, Hash(thing), false);
- if (cache_entry != NULL) {
- EntryInfo* entry_info = reinterpret_cast<EntryInfo*>(cache_entry->value);
- return entry_info->entry;
- } else {
- return NULL;
- }
-}
-
-
-void HeapEntriesMap::Pair(
- HeapThing thing, HeapEntriesAllocator* allocator, HeapEntry* entry) {
- HashMap::Entry* cache_entry = entries_.Lookup(thing, Hash(thing), true);
- ASSERT(cache_entry->value == NULL);
- cache_entry->value = new EntryInfo(entry, allocator);
- ++entries_count_;
-}
-
-
-void HeapEntriesMap::CountReference(HeapThing from, HeapThing to,
- int* prev_children_count,
- int* prev_retainers_count) {
- HashMap::Entry* from_cache_entry = entries_.Lookup(from, Hash(from), false);
- HashMap::Entry* to_cache_entry = entries_.Lookup(to, Hash(to), false);
- ASSERT(from_cache_entry != NULL);
- ASSERT(to_cache_entry != NULL);
- EntryInfo* from_entry_info =
- reinterpret_cast<EntryInfo*>(from_cache_entry->value);
- EntryInfo* to_entry_info =
- reinterpret_cast<EntryInfo*>(to_cache_entry->value);
- if (prev_children_count)
- *prev_children_count = from_entry_info->children_count;
- if (prev_retainers_count)
- *prev_retainers_count = to_entry_info->retainers_count;
- ++from_entry_info->children_count;
- ++to_entry_info->retainers_count;
- ++total_children_count_;
- ++total_retainers_count_;
-}
-
-
-HeapObjectsSet::HeapObjectsSet()
- : entries_(HeapEntriesMap::HeapThingsMatch) {
-}
-
-
-void HeapObjectsSet::Clear() {
- entries_.Clear();
-}
-
-
-bool HeapObjectsSet::Contains(Object* obj) {
- if (!obj->IsHeapObject()) return false;
- HeapObject* object = HeapObject::cast(obj);
- HashMap::Entry* cache_entry =
- entries_.Lookup(object, HeapEntriesMap::Hash(object), false);
- return cache_entry != NULL;
-}
-
-
-void HeapObjectsSet::Insert(Object* obj) {
- if (!obj->IsHeapObject()) return;
- HeapObject* object = HeapObject::cast(obj);
- HashMap::Entry* cache_entry =
- entries_.Lookup(object, HeapEntriesMap::Hash(object), true);
- if (cache_entry->value == NULL) {
- cache_entry->value = HeapEntriesMap::kHeapEntryPlaceholder;
- }
-}
-
-
-HeapObject *const V8HeapExplorer::kInternalRootObject =
- reinterpret_cast<HeapObject*>(
- static_cast<intptr_t>(HeapObjectsMap::kInternalRootObjectId));
-HeapObject *const V8HeapExplorer::kGcRootsObject =
- reinterpret_cast<HeapObject*>(
- static_cast<intptr_t>(HeapObjectsMap::kGcRootsObjectId));
-
-
-V8HeapExplorer::V8HeapExplorer(
- HeapSnapshot* snapshot,
- SnapshottingProgressReportingInterface* progress)
- : snapshot_(snapshot),
- collection_(snapshot_->collection()),
- progress_(progress),
- filler_(NULL) {
-}
-
-
-V8HeapExplorer::~V8HeapExplorer() {
-}
-
-
-HeapEntry* V8HeapExplorer::AllocateEntry(
- HeapThing ptr, int children_count, int retainers_count) {
- return AddEntry(
- reinterpret_cast<HeapObject*>(ptr), children_count, retainers_count);
-}
-
-
-HeapEntry* V8HeapExplorer::AddEntry(HeapObject* object,
- int children_count,
- int retainers_count) {
- if (object == kInternalRootObject) {
- ASSERT(retainers_count == 0);
- return snapshot_->AddRootEntry(children_count);
- } else if (object == kGcRootsObject) {
- return snapshot_->AddGcRootsEntry(children_count, retainers_count);
- } else if (object->IsJSFunction()) {
- JSFunction* func = JSFunction::cast(object);
- SharedFunctionInfo* shared = func->shared();
- return AddEntry(object,
- HeapEntry::kClosure,
- collection_->names()->GetName(String::cast(shared->name())),
- children_count,
- retainers_count);
- } else if (object->IsJSRegExp()) {
- JSRegExp* re = JSRegExp::cast(object);
- return AddEntry(object,
- HeapEntry::kRegExp,
- collection_->names()->GetName(re->Pattern()),
- children_count,
- retainers_count);
- } else if (object->IsJSObject()) {
- return AddEntry(object,
- HeapEntry::kObject,
- collection_->names()->GetName(
- GetConstructorNameForHeapProfile(
- JSObject::cast(object))),
- children_count,
- retainers_count);
- } else if (object->IsString()) {
- return AddEntry(object,
- HeapEntry::kString,
- collection_->names()->GetName(String::cast(object)),
- children_count,
- retainers_count);
- } else if (object->IsCode()) {
- return AddEntry(object,
- HeapEntry::kCode,
- "",
- children_count,
- retainers_count);
- } else if (object->IsSharedFunctionInfo()) {
- SharedFunctionInfo* shared = SharedFunctionInfo::cast(object);
- return AddEntry(object,
- HeapEntry::kCode,
- collection_->names()->GetName(String::cast(shared->name())),
- children_count,
- retainers_count);
- } else if (object->IsScript()) {
- Script* script = Script::cast(object);
- return AddEntry(object,
- HeapEntry::kCode,
- script->name()->IsString() ?
- collection_->names()->GetName(
- String::cast(script->name()))
- : "",
- children_count,
- retainers_count);
- } else if (object->IsFixedArray() || object->IsByteArray()) {
- return AddEntry(object,
- HeapEntry::kArray,
- "",
- children_count,
- retainers_count);
- } else if (object->IsHeapNumber()) {
- return AddEntry(object,
- HeapEntry::kHeapNumber,
- "number",
- children_count,
- retainers_count);
- }
- return AddEntry(object,
- HeapEntry::kHidden,
- GetSystemEntryName(object),
- children_count,
- retainers_count);
-}
-
-
-HeapEntry* V8HeapExplorer::AddEntry(HeapObject* object,
- HeapEntry::Type type,
- const char* name,
- int children_count,
- int retainers_count) {
- return snapshot_->AddEntry(type,
- name,
- collection_->GetObjectId(object->address()),
- object->Size(),
- children_count,
- retainers_count);
-}
-
-
-void V8HeapExplorer::AddRootEntries(SnapshotFillerInterface* filler) {
- filler->AddEntry(kInternalRootObject, this);
- filler->AddEntry(kGcRootsObject, this);
-}
-
-
-const char* V8HeapExplorer::GetSystemEntryName(HeapObject* object) {
- switch (object->map()->instance_type()) {
- case MAP_TYPE: return "system / Map";
- case JS_GLOBAL_PROPERTY_CELL_TYPE: return "system / JSGlobalPropertyCell";
- case PROXY_TYPE: return "system / Proxy";
- case ODDBALL_TYPE: return "system / Oddball";
-#define MAKE_STRUCT_CASE(NAME, Name, name) \
- case NAME##_TYPE: return "system / "#Name;
- STRUCT_LIST(MAKE_STRUCT_CASE)
-#undef MAKE_STRUCT_CASE
- default: return "system";
- }
-}
-
-
-int V8HeapExplorer::EstimateObjectsCount() {
- HeapIterator iterator(HeapIterator::kFilterUnreachable);
- int objects_count = 0;
- for (HeapObject* obj = iterator.next();
- obj != NULL;
- obj = iterator.next(), ++objects_count) {}
- return objects_count;
-}
-
-
-class IndexedReferencesExtractor : public ObjectVisitor {
- public:
- IndexedReferencesExtractor(V8HeapExplorer* generator,
- HeapObject* parent_obj,
- HeapEntry* parent_entry)
- : generator_(generator),
- parent_obj_(parent_obj),
- parent_(parent_entry),
- next_index_(1) {
- }
- void VisitPointers(Object** start, Object** end) {
- for (Object** p = start; p < end; p++) {
- if (CheckVisitedAndUnmark(p)) continue;
- generator_->SetHiddenReference(parent_obj_, parent_, next_index_++, *p);
- }
- }
- static void MarkVisitedField(HeapObject* obj, int offset) {
- if (offset < 0) return;
- Address field = obj->address() + offset;
- ASSERT(!Memory::Object_at(field)->IsFailure());
- ASSERT(Memory::Object_at(field)->IsHeapObject());
- *field |= kFailureTag;
- }
- private:
- bool CheckVisitedAndUnmark(Object** field) {
- if ((*field)->IsFailure()) {
- intptr_t untagged = reinterpret_cast<intptr_t>(*field) & ~kFailureTagMask;
- *field = reinterpret_cast<Object*>(untagged | kHeapObjectTag);
- ASSERT((*field)->IsHeapObject());
- return true;
- }
- return false;
- }
- V8HeapExplorer* generator_;
- HeapObject* parent_obj_;
- HeapEntry* parent_;
- int next_index_;
-};
-
-
-void V8HeapExplorer::ExtractReferences(HeapObject* obj) {
- HeapEntry* entry = GetEntry(obj);
- if (entry == NULL) return; // No interest in this object.
-
- if (obj->IsJSGlobalProxy()) {
- // We need to reference JS global objects from snapshot's root.
- // We use JSGlobalProxy because this is what embedder (e.g. browser)
- // uses for the global object.
- JSGlobalProxy* proxy = JSGlobalProxy::cast(obj);
- SetRootShortcutReference(proxy->map()->prototype());
- SetInternalReference(obj, entry, "map", obj->map(), HeapObject::kMapOffset);
- IndexedReferencesExtractor refs_extractor(this, obj, entry);
- obj->Iterate(&refs_extractor);
- } else if (obj->IsJSObject()) {
- JSObject* js_obj = JSObject::cast(obj);
- ExtractClosureReferences(js_obj, entry);
- ExtractPropertyReferences(js_obj, entry);
- ExtractElementReferences(js_obj, entry);
- ExtractInternalReferences(js_obj, entry);
- SetPropertyReference(
- obj, entry, HEAP->Proto_symbol(), js_obj->GetPrototype());
- if (obj->IsJSFunction()) {
- JSFunction* js_fun = JSFunction::cast(js_obj);
- Object* proto_or_map = js_fun->prototype_or_initial_map();
- if (!proto_or_map->IsTheHole()) {
- if (!proto_or_map->IsMap()) {
- SetPropertyReference(
- obj, entry,
- HEAP->prototype_symbol(), proto_or_map,
- JSFunction::kPrototypeOrInitialMapOffset);
- } else {
- SetPropertyReference(
- obj, entry,
- HEAP->prototype_symbol(), js_fun->prototype());
- }
- }
- SetInternalReference(js_fun, entry,
- "shared", js_fun->shared(),
- JSFunction::kSharedFunctionInfoOffset);
- SetInternalReference(js_fun, entry,
- "context", js_fun->unchecked_context(),
- JSFunction::kContextOffset);
- SetInternalReference(js_fun, entry,
- "literals", js_fun->literals(),
- JSFunction::kLiteralsOffset);
- }
- SetInternalReference(obj, entry,
- "properties", js_obj->properties(),
- JSObject::kPropertiesOffset);
- SetInternalReference(obj, entry,
- "elements", js_obj->elements(),
- JSObject::kElementsOffset);
- SetInternalReference(obj, entry, "map", obj->map(), HeapObject::kMapOffset);
- IndexedReferencesExtractor refs_extractor(this, obj, entry);
- obj->Iterate(&refs_extractor);
- } else if (obj->IsString()) {
- if (obj->IsConsString()) {
- ConsString* cs = ConsString::cast(obj);
- SetInternalReference(obj, entry, 1, cs->first());
- SetInternalReference(obj, entry, 2, cs->second());
- }
- } else if (obj->IsMap()) {
- Map* map = Map::cast(obj);
- SetInternalReference(obj, entry,
- "prototype", map->prototype(), Map::kPrototypeOffset);
- SetInternalReference(obj, entry,
- "constructor", map->constructor(),
- Map::kConstructorOffset);
- SetInternalReference(obj, entry,
- "descriptors", map->instance_descriptors(),
- Map::kInstanceDescriptorsOffset);
- SetInternalReference(obj, entry,
- "code_cache", map->code_cache(),
- Map::kCodeCacheOffset);
- SetInternalReference(obj, entry, "map", obj->map(), HeapObject::kMapOffset);
- IndexedReferencesExtractor refs_extractor(this, obj, entry);
- obj->Iterate(&refs_extractor);
- } else if (obj->IsSharedFunctionInfo()) {
- SharedFunctionInfo* shared = SharedFunctionInfo::cast(obj);
- SetInternalReference(obj, entry,
- "name", shared->name(),
- SharedFunctionInfo::kNameOffset);
- SetInternalReference(obj, entry,
- "code", shared->unchecked_code(),
- SharedFunctionInfo::kCodeOffset);
- SetInternalReference(obj, entry,
- "instance_class_name", shared->instance_class_name(),
- SharedFunctionInfo::kInstanceClassNameOffset);
- SetInternalReference(obj, entry,
- "script", shared->script(),
- SharedFunctionInfo::kScriptOffset);
- SetInternalReference(obj, entry, "map", obj->map(), HeapObject::kMapOffset);
- IndexedReferencesExtractor refs_extractor(this, obj, entry);
- obj->Iterate(&refs_extractor);
- } else {
- SetInternalReference(obj, entry, "map", obj->map(), HeapObject::kMapOffset);
- IndexedReferencesExtractor refs_extractor(this, obj, entry);
- obj->Iterate(&refs_extractor);
- }
-}
-
-
-void V8HeapExplorer::ExtractClosureReferences(JSObject* js_obj,
- HeapEntry* entry) {
- if (js_obj->IsJSFunction()) {
- HandleScope hs;
- JSFunction* func = JSFunction::cast(js_obj);
- Context* context = func->context();
- ZoneScope zscope(DELETE_ON_EXIT);
- SerializedScopeInfo* serialized_scope_info =
- context->closure()->shared()->scope_info();
- ScopeInfo<ZoneListAllocationPolicy> zone_scope_info(serialized_scope_info);
- int locals_number = zone_scope_info.NumberOfLocals();
- for (int i = 0; i < locals_number; ++i) {
- String* local_name = *zone_scope_info.LocalName(i);
- int idx = serialized_scope_info->ContextSlotIndex(local_name, NULL);
- if (idx >= 0 && idx < context->length()) {
- SetClosureReference(js_obj, entry, local_name, context->get(idx));
- }
- }
- }
-}
-
-
-void V8HeapExplorer::ExtractPropertyReferences(JSObject* js_obj,
- HeapEntry* entry) {
- if (js_obj->HasFastProperties()) {
- DescriptorArray* descs = js_obj->map()->instance_descriptors();
- for (int i = 0; i < descs->number_of_descriptors(); i++) {
- switch (descs->GetType(i)) {
- case FIELD: {
- int index = descs->GetFieldIndex(i);
- if (index < js_obj->map()->inobject_properties()) {
- SetPropertyReference(
- js_obj, entry,
- descs->GetKey(i), js_obj->InObjectPropertyAt(index),
- js_obj->GetInObjectPropertyOffset(index));
- } else {
- SetPropertyReference(
- js_obj, entry,
- descs->GetKey(i), js_obj->FastPropertyAt(index));
- }
- break;
- }
- case CONSTANT_FUNCTION:
- SetPropertyReference(
- js_obj, entry,
- descs->GetKey(i), descs->GetConstantFunction(i));
- break;
- default: ;
- }
- }
- } else {
- StringDictionary* dictionary = js_obj->property_dictionary();
- int length = dictionary->Capacity();
- for (int i = 0; i < length; ++i) {
- Object* k = dictionary->KeyAt(i);
- if (dictionary->IsKey(k)) {
- Object* target = dictionary->ValueAt(i);
- SetPropertyReference(
- js_obj, entry, String::cast(k), target);
- // We assume that global objects can only have slow properties.
- if (target->IsJSGlobalPropertyCell()) {
- SetPropertyShortcutReference(js_obj,
- entry,
- String::cast(k),
- JSGlobalPropertyCell::cast(
- target)->value());
- }
- }
- }
- }
-}
-
-
-void V8HeapExplorer::ExtractElementReferences(JSObject* js_obj,
- HeapEntry* entry) {
- if (js_obj->HasFastElements()) {
- FixedArray* elements = FixedArray::cast(js_obj->elements());
- int length = js_obj->IsJSArray() ?
- Smi::cast(JSArray::cast(js_obj)->length())->value() :
- elements->length();
- for (int i = 0; i < length; ++i) {
- if (!elements->get(i)->IsTheHole()) {
- SetElementReference(js_obj, entry, i, elements->get(i));
- }
- }
- } else if (js_obj->HasDictionaryElements()) {
- NumberDictionary* dictionary = js_obj->element_dictionary();
- int length = dictionary->Capacity();
- for (int i = 0; i < length; ++i) {
- Object* k = dictionary->KeyAt(i);
- if (dictionary->IsKey(k)) {
- ASSERT(k->IsNumber());
- uint32_t index = static_cast<uint32_t>(k->Number());
- SetElementReference(js_obj, entry, index, dictionary->ValueAt(i));
- }
- }
- }
-}
-
-
-void V8HeapExplorer::ExtractInternalReferences(JSObject* js_obj,
- HeapEntry* entry) {
- int length = js_obj->GetInternalFieldCount();
- for (int i = 0; i < length; ++i) {
- Object* o = js_obj->GetInternalField(i);
- SetInternalReference(
- js_obj, entry, i, o, js_obj->GetInternalFieldOffset(i));
- }
-}
-
-
-HeapEntry* V8HeapExplorer::GetEntry(Object* obj) {
- if (!obj->IsHeapObject()) return NULL;
- return filler_->FindOrAddEntry(obj, this);
-}
-
-
-class RootsReferencesExtractor : public ObjectVisitor {
- public:
- explicit RootsReferencesExtractor(V8HeapExplorer* explorer)
- : explorer_(explorer) {
- }
- void VisitPointers(Object** start, Object** end) {
- for (Object** p = start; p < end; p++) explorer_->SetGcRootsReference(*p);
- }
- private:
- V8HeapExplorer* explorer_;
-};
-
-
-bool V8HeapExplorer::IterateAndExtractReferences(
- SnapshotFillerInterface* filler) {
- filler_ = filler;
- HeapIterator iterator(HeapIterator::kFilterUnreachable);
- bool interrupted = false;
- // Heap iteration with filtering must be finished in any case.
- for (HeapObject* obj = iterator.next();
- obj != NULL;
- obj = iterator.next(), progress_->ProgressStep()) {
- if (!interrupted) {
- ExtractReferences(obj);
- if (!progress_->ProgressReport(false)) interrupted = true;
- }
- }
- if (interrupted) {
- filler_ = NULL;
- return false;
- }
- SetRootGcRootsReference();
- RootsReferencesExtractor extractor(this);
- HEAP->IterateRoots(&extractor, VISIT_ALL);
- filler_ = NULL;
- return progress_->ProgressReport(false);
-}
-
-
-void V8HeapExplorer::SetClosureReference(HeapObject* parent_obj,
- HeapEntry* parent_entry,
- String* reference_name,
- Object* child_obj) {
- HeapEntry* child_entry = GetEntry(child_obj);
- if (child_entry != NULL) {
- filler_->SetNamedReference(HeapGraphEdge::kContextVariable,
- parent_obj,
- parent_entry,
- collection_->names()->GetName(reference_name),
- child_obj,
- child_entry);
- }
-}
-
-
-void V8HeapExplorer::SetElementReference(HeapObject* parent_obj,
- HeapEntry* parent_entry,
- int index,
- Object* child_obj) {
- HeapEntry* child_entry = GetEntry(child_obj);
- if (child_entry != NULL) {
- filler_->SetIndexedReference(HeapGraphEdge::kElement,
- parent_obj,
- parent_entry,
- index,
- child_obj,
- child_entry);
- }
-}
-
-
-void V8HeapExplorer::SetInternalReference(HeapObject* parent_obj,
- HeapEntry* parent_entry,
- const char* reference_name,
- Object* child_obj,
- int field_offset) {
- HeapEntry* child_entry = GetEntry(child_obj);
- if (child_entry != NULL) {
- filler_->SetNamedReference(HeapGraphEdge::kInternal,
- parent_obj,
- parent_entry,
- reference_name,
- child_obj,
- child_entry);
- IndexedReferencesExtractor::MarkVisitedField(parent_obj, field_offset);
- }
-}
-
-
-void V8HeapExplorer::SetInternalReference(HeapObject* parent_obj,
- HeapEntry* parent_entry,
- int index,
- Object* child_obj,
- int field_offset) {
- HeapEntry* child_entry = GetEntry(child_obj);
- if (child_entry != NULL) {
- filler_->SetNamedReference(HeapGraphEdge::kInternal,
- parent_obj,
- parent_entry,
- collection_->names()->GetName(index),
- child_obj,
- child_entry);
- IndexedReferencesExtractor::MarkVisitedField(parent_obj, field_offset);
- }
-}
-
-
-void V8HeapExplorer::SetHiddenReference(HeapObject* parent_obj,
- HeapEntry* parent_entry,
- int index,
- Object* child_obj) {
- HeapEntry* child_entry = GetEntry(child_obj);
- if (child_entry != NULL) {
- filler_->SetIndexedReference(HeapGraphEdge::kHidden,
- parent_obj,
- parent_entry,
- index,
- child_obj,
- child_entry);
- }
-}
-
-
-void V8HeapExplorer::SetPropertyReference(HeapObject* parent_obj,
- HeapEntry* parent_entry,
- String* reference_name,
- Object* child_obj,
- int field_offset) {
- HeapEntry* child_entry = GetEntry(child_obj);
- if (child_entry != NULL) {
- HeapGraphEdge::Type type = reference_name->length() > 0 ?
- HeapGraphEdge::kProperty : HeapGraphEdge::kInternal;
- filler_->SetNamedReference(type,
- parent_obj,
- parent_entry,
- collection_->names()->GetName(reference_name),
- child_obj,
- child_entry);
- IndexedReferencesExtractor::MarkVisitedField(parent_obj, field_offset);
- }
-}
-
-
-void V8HeapExplorer::SetPropertyShortcutReference(HeapObject* parent_obj,
- HeapEntry* parent_entry,
- String* reference_name,
- Object* child_obj) {
- HeapEntry* child_entry = GetEntry(child_obj);
- if (child_entry != NULL) {
- filler_->SetNamedReference(HeapGraphEdge::kShortcut,
- parent_obj,
- parent_entry,
- collection_->names()->GetName(reference_name),
- child_obj,
- child_entry);
- }
-}
-
-
-void V8HeapExplorer::SetRootGcRootsReference() {
- filler_->SetIndexedAutoIndexReference(
- HeapGraphEdge::kElement,
- kInternalRootObject, snapshot_->root(),
- kGcRootsObject, snapshot_->gc_roots());
-}
-
-
-void V8HeapExplorer::SetRootShortcutReference(Object* child_obj) {
- HeapEntry* child_entry = GetEntry(child_obj);
- ASSERT(child_entry != NULL);
- filler_->SetNamedAutoIndexReference(
- HeapGraphEdge::kShortcut,
- kInternalRootObject, snapshot_->root(),
- child_obj, child_entry);
-}
-
-
-void V8HeapExplorer::SetGcRootsReference(Object* child_obj) {
- HeapEntry* child_entry = GetEntry(child_obj);
- if (child_entry != NULL) {
- filler_->SetIndexedAutoIndexReference(
- HeapGraphEdge::kElement,
- kGcRootsObject, snapshot_->gc_roots(),
- child_obj, child_entry);
- }
-}
-
-
-class GlobalHandlesExtractor : public ObjectVisitor {
- public:
- explicit GlobalHandlesExtractor(NativeObjectsExplorer* explorer)
- : explorer_(explorer) {}
- virtual ~GlobalHandlesExtractor() {}
- virtual void VisitPointers(Object** start, Object** end) {
- UNREACHABLE();
- }
- virtual void VisitEmbedderReference(Object** p, uint16_t class_id) {
- explorer_->VisitSubtreeWrapper(p, class_id);
- }
- private:
- NativeObjectsExplorer* explorer_;
-};
-
-HeapThing const NativeObjectsExplorer::kNativesRootObject =
- reinterpret_cast<HeapThing>(
- static_cast<intptr_t>(HeapObjectsMap::kNativesRootObjectId));
-
-
-NativeObjectsExplorer::NativeObjectsExplorer(
- HeapSnapshot* snapshot, SnapshottingProgressReportingInterface* progress)
- : snapshot_(snapshot),
- collection_(snapshot_->collection()),
- progress_(progress),
- embedder_queried_(false),
- objects_by_info_(RetainedInfosMatch),
- filler_(NULL) {
-}
-
-
-NativeObjectsExplorer::~NativeObjectsExplorer() {
- for (HashMap::Entry* p = objects_by_info_.Start();
- p != NULL;
- p = objects_by_info_.Next(p)) {
- v8::RetainedObjectInfo* info =
- reinterpret_cast<v8::RetainedObjectInfo*>(p->key);
- info->Dispose();
- List<HeapObject*>* objects =
- reinterpret_cast<List<HeapObject*>* >(p->value);
- delete objects;
- }
-}
-
-
-HeapEntry* NativeObjectsExplorer::AllocateEntry(
- HeapThing ptr, int children_count, int retainers_count) {
- if (ptr == kNativesRootObject) {
- return snapshot_->AddNativesRootEntry(children_count, retainers_count);
- } else {
- v8::RetainedObjectInfo* info =
- reinterpret_cast<v8::RetainedObjectInfo*>(ptr);
- intptr_t elements = info->GetElementCount();
- intptr_t size = info->GetSizeInBytes();
- return snapshot_->AddEntry(
- HeapEntry::kNative,
- elements != -1 ?
- collection_->names()->GetFormatted(
- "%s / %" V8_PTR_PREFIX "d entries",
- info->GetLabel(),
- info->GetElementCount()) :
- collection_->names()->GetCopy(info->GetLabel()),
- HeapObjectsMap::GenerateId(info),
- size != -1 ? static_cast<int>(size) : 0,
- children_count,
- retainers_count);
- }
-}
-
-
-void NativeObjectsExplorer::AddRootEntries(SnapshotFillerInterface* filler) {
- if (EstimateObjectsCount() <= 0) return;
- filler->AddEntry(kNativesRootObject, this);
-}
-
-
-int NativeObjectsExplorer::EstimateObjectsCount() {
- FillRetainedObjects();
- return objects_by_info_.occupancy();
-}
-
-
-void NativeObjectsExplorer::FillRetainedObjects() {
- if (embedder_queried_) return;
- Isolate* isolate = Isolate::Current();
- // Record objects that are joined into ObjectGroups.
- isolate->heap()->CallGlobalGCPrologueCallback();
- List<ObjectGroup*>* groups = isolate->global_handles()->object_groups();
- for (int i = 0; i < groups->length(); ++i) {
- ObjectGroup* group = groups->at(i);
- if (group->info_ == NULL) continue;
- List<HeapObject*>* list = GetListMaybeDisposeInfo(group->info_);
- for (int j = 0; j < group->objects_.length(); ++j) {
- HeapObject* obj = HeapObject::cast(*group->objects_[j]);
- list->Add(obj);
- in_groups_.Insert(obj);
- }
- group->info_ = NULL; // Acquire info object ownership.
- }
- isolate->global_handles()->RemoveObjectGroups();
- isolate->heap()->CallGlobalGCEpilogueCallback();
- // Record objects that are not in ObjectGroups, but have class ID.
- GlobalHandlesExtractor extractor(this);
- isolate->global_handles()->IterateAllRootsWithClassIds(&extractor);
- embedder_queried_ = true;
-}
-
-
-List<HeapObject*>* NativeObjectsExplorer::GetListMaybeDisposeInfo(
- v8::RetainedObjectInfo* info) {
- HashMap::Entry* entry =
- objects_by_info_.Lookup(info, InfoHash(info), true);
- if (entry->value != NULL) {
- info->Dispose();
- } else {
- entry->value = new List<HeapObject*>(4);
- }
- return reinterpret_cast<List<HeapObject*>* >(entry->value);
-}
-
-
-bool NativeObjectsExplorer::IterateAndExtractReferences(
- SnapshotFillerInterface* filler) {
- if (EstimateObjectsCount() <= 0) return true;
- filler_ = filler;
- FillRetainedObjects();
- for (HashMap::Entry* p = objects_by_info_.Start();
- p != NULL;
- p = objects_by_info_.Next(p)) {
- v8::RetainedObjectInfo* info =
- reinterpret_cast<v8::RetainedObjectInfo*>(p->key);
- SetNativeRootReference(info);
- List<HeapObject*>* objects =
- reinterpret_cast<List<HeapObject*>* >(p->value);
- for (int i = 0; i < objects->length(); ++i) {
- SetWrapperNativeReferences(objects->at(i), info);
- }
- }
- SetRootNativesRootReference();
- filler_ = NULL;
- return true;
-}
-
-
-void NativeObjectsExplorer::SetNativeRootReference(
- v8::RetainedObjectInfo* info) {
- HeapEntry* child_entry = filler_->FindOrAddEntry(info, this);
- ASSERT(child_entry != NULL);
- filler_->SetIndexedAutoIndexReference(
- HeapGraphEdge::kElement,
- kNativesRootObject, snapshot_->natives_root(),
- info, child_entry);
-}
-
-
-void NativeObjectsExplorer::SetWrapperNativeReferences(
- HeapObject* wrapper, v8::RetainedObjectInfo* info) {
- HeapEntry* wrapper_entry = filler_->FindEntry(wrapper);
- ASSERT(wrapper_entry != NULL);
- HeapEntry* info_entry = filler_->FindOrAddEntry(info, this);
- ASSERT(info_entry != NULL);
- filler_->SetNamedReference(HeapGraphEdge::kInternal,
- wrapper, wrapper_entry,
- "native",
- info, info_entry);
- filler_->SetIndexedAutoIndexReference(HeapGraphEdge::kElement,
- info, info_entry,
- wrapper, wrapper_entry);
-}
-
-
-void NativeObjectsExplorer::SetRootNativesRootReference() {
- filler_->SetIndexedAutoIndexReference(
- HeapGraphEdge::kElement,
- V8HeapExplorer::kInternalRootObject, snapshot_->root(),
- kNativesRootObject, snapshot_->natives_root());
-}
-
-
-void NativeObjectsExplorer::VisitSubtreeWrapper(Object** p, uint16_t class_id) {
- if (in_groups_.Contains(*p)) return;
- Isolate* isolate = Isolate::Current();
- v8::RetainedObjectInfo* info =
- isolate->heap_profiler()->ExecuteWrapperClassCallback(class_id, p);
- if (info == NULL) return;
- GetListMaybeDisposeInfo(info)->Add(HeapObject::cast(*p));
-}
-
-
-HeapSnapshotGenerator::HeapSnapshotGenerator(HeapSnapshot* snapshot,
- v8::ActivityControl* control)
- : snapshot_(snapshot),
- control_(control),
- v8_heap_explorer_(snapshot_, this),
- dom_explorer_(snapshot_, this) {
-}
-
-
-class SnapshotCounter : public SnapshotFillerInterface {
- public:
- explicit SnapshotCounter(HeapEntriesMap* entries) : entries_(entries) { }
- HeapEntry* AddEntry(HeapThing ptr, HeapEntriesAllocator* allocator) {
- entries_->Pair(ptr, allocator, HeapEntriesMap::kHeapEntryPlaceholder);
- return HeapEntriesMap::kHeapEntryPlaceholder;
- }
- HeapEntry* FindEntry(HeapThing ptr) {
- return entries_->Map(ptr);
- }
- HeapEntry* FindOrAddEntry(HeapThing ptr, HeapEntriesAllocator* allocator) {
- HeapEntry* entry = FindEntry(ptr);
- return entry != NULL ? entry : AddEntry(ptr, allocator);
- }
- void SetIndexedReference(HeapGraphEdge::Type,
- HeapThing parent_ptr,
- HeapEntry*,
- int,
- HeapThing child_ptr,
- HeapEntry*) {
- entries_->CountReference(parent_ptr, child_ptr);
- }
- void SetIndexedAutoIndexReference(HeapGraphEdge::Type,
- HeapThing parent_ptr,
- HeapEntry*,
- HeapThing child_ptr,
- HeapEntry*) {
- entries_->CountReference(parent_ptr, child_ptr);
- }
- void SetNamedReference(HeapGraphEdge::Type,
- HeapThing parent_ptr,
- HeapEntry*,
- const char*,
- HeapThing child_ptr,
- HeapEntry*) {
- entries_->CountReference(parent_ptr, child_ptr);
- }
- void SetNamedAutoIndexReference(HeapGraphEdge::Type,
- HeapThing parent_ptr,
- HeapEntry*,
- HeapThing child_ptr,
- HeapEntry*) {
- entries_->CountReference(parent_ptr, child_ptr);
- }
- private:
- HeapEntriesMap* entries_;
-};
-
-
-class SnapshotFiller : public SnapshotFillerInterface {
- public:
- explicit SnapshotFiller(HeapSnapshot* snapshot, HeapEntriesMap* entries)
- : snapshot_(snapshot),
- collection_(snapshot->collection()),
- entries_(entries) { }
- HeapEntry* AddEntry(HeapThing ptr, HeapEntriesAllocator* allocator) {
- UNREACHABLE();
- return NULL;
- }
- HeapEntry* FindEntry(HeapThing ptr) {
- return entries_->Map(ptr);
- }
- HeapEntry* FindOrAddEntry(HeapThing ptr, HeapEntriesAllocator* allocator) {
- HeapEntry* entry = FindEntry(ptr);
- return entry != NULL ? entry : AddEntry(ptr, allocator);
- }
- void SetIndexedReference(HeapGraphEdge::Type type,
- HeapThing parent_ptr,
- HeapEntry* parent_entry,
- int index,
- HeapThing child_ptr,
- HeapEntry* child_entry) {
- int child_index, retainer_index;
- entries_->CountReference(
- parent_ptr, child_ptr, &child_index, &retainer_index);
- parent_entry->SetIndexedReference(
- type, child_index, index, child_entry, retainer_index);
- }
- void SetIndexedAutoIndexReference(HeapGraphEdge::Type type,
- HeapThing parent_ptr,
- HeapEntry* parent_entry,
- HeapThing child_ptr,
- HeapEntry* child_entry) {
- int child_index, retainer_index;
- entries_->CountReference(
- parent_ptr, child_ptr, &child_index, &retainer_index);
- parent_entry->SetIndexedReference(
- type, child_index, child_index + 1, child_entry, retainer_index);
- }
- void SetNamedReference(HeapGraphEdge::Type type,
- HeapThing parent_ptr,
- HeapEntry* parent_entry,
- const char* reference_name,
- HeapThing child_ptr,
- HeapEntry* child_entry) {
- int child_index, retainer_index;
- entries_->CountReference(
- parent_ptr, child_ptr, &child_index, &retainer_index);
- parent_entry->SetNamedReference(
- type, child_index, reference_name, child_entry, retainer_index);
- }
- void SetNamedAutoIndexReference(HeapGraphEdge::Type type,
- HeapThing parent_ptr,
- HeapEntry* parent_entry,
- HeapThing child_ptr,
- HeapEntry* child_entry) {
- int child_index, retainer_index;
- entries_->CountReference(
- parent_ptr, child_ptr, &child_index, &retainer_index);
- parent_entry->SetNamedReference(type,
- child_index,
- collection_->names()->GetName(child_index + 1),
- child_entry,
- retainer_index);
- }
- private:
- HeapSnapshot* snapshot_;
- HeapSnapshotsCollection* collection_;
- HeapEntriesMap* entries_;
-};
-
-
-bool HeapSnapshotGenerator::GenerateSnapshot() {
- AssertNoAllocation no_alloc;
-
- SetProgressTotal(4); // 2 passes + dominators + sizes.
-
- // Pass 1. Iterate heap contents to count entries and references.
- if (!CountEntriesAndReferences()) return false;
-
- // Allocate and fill entries in the snapshot, allocate references.
- snapshot_->AllocateEntries(entries_.entries_count(),
- entries_.total_children_count(),
- entries_.total_retainers_count());
- entries_.AllocateEntries();
-
- // Pass 2. Fill references.
- if (!FillReferences()) return false;
-
- if (!SetEntriesDominators()) return false;
- if (!ApproximateRetainedSizes()) return false;
-
- progress_counter_ = progress_total_;
- if (!ProgressReport(true)) return false;
- return true;
-}
-
-
-void HeapSnapshotGenerator::ProgressStep() {
- ++progress_counter_;
-}
-
-
-bool HeapSnapshotGenerator::ProgressReport(bool force) {
- const int kProgressReportGranularity = 10000;
- if (control_ != NULL
- && (force || progress_counter_ % kProgressReportGranularity == 0)) {
- return
- control_->ReportProgressValue(progress_counter_, progress_total_) ==
- v8::ActivityControl::kContinue;
- }
- return true;
-}
-
-
-void HeapSnapshotGenerator::SetProgressTotal(int iterations_count) {
- if (control_ == NULL) return;
- progress_total_ = (
- v8_heap_explorer_.EstimateObjectsCount() +
- dom_explorer_.EstimateObjectsCount()) * iterations_count;
- progress_counter_ = 0;
-}
-
-
-bool HeapSnapshotGenerator::CountEntriesAndReferences() {
- SnapshotCounter counter(&entries_);
- v8_heap_explorer_.AddRootEntries(&counter);
- dom_explorer_.AddRootEntries(&counter);
- return
- v8_heap_explorer_.IterateAndExtractReferences(&counter) &&
- dom_explorer_.IterateAndExtractReferences(&counter);
-}
-
-
-bool HeapSnapshotGenerator::FillReferences() {
- SnapshotFiller filler(snapshot_, &entries_);
- return
- v8_heap_explorer_.IterateAndExtractReferences(&filler) &&
- dom_explorer_.IterateAndExtractReferences(&filler);
-}
-
-
-void HeapSnapshotGenerator::FillReversePostorderIndexes(
- Vector<HeapEntry*>* entries) {
- snapshot_->ClearPaint();
- int current_entry = 0;
- List<HeapEntry*> nodes_to_visit;
- nodes_to_visit.Add(snapshot_->root());
- snapshot_->root()->paint_reachable();
- while (!nodes_to_visit.is_empty()) {
- HeapEntry* entry = nodes_to_visit.last();
- Vector<HeapGraphEdge> children = entry->children();
- bool has_new_edges = false;
- for (int i = 0; i < children.length(); ++i) {
- if (children[i].type() == HeapGraphEdge::kShortcut) continue;
- HeapEntry* child = children[i].to();
- if (!child->painted_reachable()) {
- nodes_to_visit.Add(child);
- child->paint_reachable();
- has_new_edges = true;
- }
- }
- if (!has_new_edges) {
- entry->set_ordered_index(current_entry);
- (*entries)[current_entry++] = entry;
- nodes_to_visit.RemoveLast();
- }
- }
- entries->Truncate(current_entry);
-}
-
-
-static int Intersect(int i1, int i2, const Vector<HeapEntry*>& dominators) {
- int finger1 = i1, finger2 = i2;
- while (finger1 != finger2) {
- while (finger1 < finger2) finger1 = dominators[finger1]->ordered_index();
- while (finger2 < finger1) finger2 = dominators[finger2]->ordered_index();
- }
- return finger1;
-}
-
-// The algorithm is based on the article:
-// K. Cooper, T. Harvey and K. Kennedy "A Simple, Fast Dominance Algorithm"
-// Softw. Pract. Exper. 4 (2001), pp. 1-10.
-bool HeapSnapshotGenerator::BuildDominatorTree(
- const Vector<HeapEntry*>& entries,
- Vector<HeapEntry*>* dominators) {
- if (entries.length() == 0) return true;
- const int entries_length = entries.length(), root_index = entries_length - 1;
- for (int i = 0; i < root_index; ++i) (*dominators)[i] = NULL;
- (*dominators)[root_index] = entries[root_index];
- int changed = 1;
- const int base_progress_counter = progress_counter_;
- while (changed != 0) {
- changed = 0;
- for (int i = root_index - 1; i >= 0; --i) {
- HeapEntry* new_idom = NULL;
- Vector<HeapGraphEdge*> rets = entries[i]->retainers();
- int j = 0;
- for (; j < rets.length(); ++j) {
- if (rets[j]->type() == HeapGraphEdge::kShortcut) continue;
- HeapEntry* ret = rets[j]->From();
- if (dominators->at(ret->ordered_index()) != NULL) {
- new_idom = ret;
- break;
- }
- }
- for (++j; j < rets.length(); ++j) {
- if (rets[j]->type() == HeapGraphEdge::kShortcut) continue;
- HeapEntry* ret = rets[j]->From();
- if (dominators->at(ret->ordered_index()) != NULL) {
- new_idom = entries[Intersect(ret->ordered_index(),
- new_idom->ordered_index(),
- *dominators)];
- }
- }
- if (new_idom != NULL && dominators->at(i) != new_idom) {
- (*dominators)[i] = new_idom;
- ++changed;
- }
- }
- int remaining = entries_length - changed;
- if (remaining < 0) remaining = 0;
- progress_counter_ = base_progress_counter + remaining;
- if (!ProgressReport(true)) return false;
- }
- return true;
-}
-
-
-bool HeapSnapshotGenerator::SetEntriesDominators() {
- // This array is used for maintaining reverse postorder of nodes.
- ScopedVector<HeapEntry*> ordered_entries(snapshot_->entries()->length());
- FillReversePostorderIndexes(&ordered_entries);
- ScopedVector<HeapEntry*> dominators(ordered_entries.length());
- if (!BuildDominatorTree(ordered_entries, &dominators)) return false;
- for (int i = 0; i < ordered_entries.length(); ++i) {
- ASSERT(dominators[i] != NULL);
- ordered_entries[i]->set_dominator(dominators[i]);
- }
- return true;
-}
-
-
-bool HeapSnapshotGenerator::ApproximateRetainedSizes() {
- // As for the dominators tree we only know parent nodes, not
- // children, to sum up total sizes we "bubble" node's self size
- // adding it to all of its parents.
- for (int i = 0; i < snapshot_->entries()->length(); ++i) {
- HeapEntry* entry = snapshot_->entries()->at(i);
- entry->set_retained_size(entry->self_size());
- }
- for (int i = 0;
- i < snapshot_->entries()->length();
- ++i, ProgressStep()) {
- HeapEntry* entry = snapshot_->entries()->at(i);
- int entry_size = entry->self_size();
- for (HeapEntry* dominator = entry->dominator();
- dominator != entry;
- entry = dominator, dominator = entry->dominator()) {
- dominator->add_retained_size(entry_size);
- }
- if (!ProgressReport()) return false;
- }
- return true;
-}
-
-
-class OutputStreamWriter {
- public:
- explicit OutputStreamWriter(v8::OutputStream* stream)
- : stream_(stream),
- chunk_size_(stream->GetChunkSize()),
- chunk_(chunk_size_),
- chunk_pos_(0),
- aborted_(false) {
- ASSERT(chunk_size_ > 0);
- }
- bool aborted() { return aborted_; }
- void AddCharacter(char c) {
- ASSERT(c != '\0');
- ASSERT(chunk_pos_ < chunk_size_);
- chunk_[chunk_pos_++] = c;
- MaybeWriteChunk();
- }
- void AddString(const char* s) {
- AddSubstring(s, StrLength(s));
- }
- void AddSubstring(const char* s, int n) {
- if (n <= 0) return;
- ASSERT(static_cast<size_t>(n) <= strlen(s));
- const char* s_end = s + n;
- while (s < s_end) {
- int s_chunk_size = Min(
- chunk_size_ - chunk_pos_, static_cast<int>(s_end - s));
- ASSERT(s_chunk_size > 0);
- memcpy(chunk_.start() + chunk_pos_, s, s_chunk_size);
- s += s_chunk_size;
- chunk_pos_ += s_chunk_size;
- MaybeWriteChunk();
- }
- }
- void AddNumber(int n) { AddNumberImpl<int>(n, "%d"); }
- void AddNumber(unsigned n) { AddNumberImpl<unsigned>(n, "%u"); }
- void AddNumber(uint64_t n) { AddNumberImpl<uint64_t>(n, "%llu"); }
- void Finalize() {
- if (aborted_) return;
- ASSERT(chunk_pos_ < chunk_size_);
- if (chunk_pos_ != 0) {
- WriteChunk();
- }
- stream_->EndOfStream();
- }
-
- private:
- template<typename T>
- void AddNumberImpl(T n, const char* format) {
- ScopedVector<char> buffer(32);
- int result = OS::SNPrintF(buffer, format, n);
- USE(result);
- ASSERT(result != -1);
- AddString(buffer.start());
- }
- void MaybeWriteChunk() {
- ASSERT(chunk_pos_ <= chunk_size_);
- if (chunk_pos_ == chunk_size_) {
- WriteChunk();
- chunk_pos_ = 0;
- }
- }
- void WriteChunk() {
- if (aborted_) return;
- if (stream_->WriteAsciiChunk(chunk_.start(), chunk_pos_) ==
- v8::OutputStream::kAbort) aborted_ = true;
- }
-
- v8::OutputStream* stream_;
- int chunk_size_;
- ScopedVector<char> chunk_;
- int chunk_pos_;
- bool aborted_;
-};
-
-void HeapSnapshotJSONSerializer::Serialize(v8::OutputStream* stream) {
- ASSERT(writer_ == NULL);
- writer_ = new OutputStreamWriter(stream);
-
- // Since nodes graph is cyclic, we need the first pass to enumerate
- // them. Strings can be serialized in one pass.
- EnumerateNodes();
- SerializeImpl();
-
- delete writer_;
- writer_ = NULL;
-}
-
-
-void HeapSnapshotJSONSerializer::SerializeImpl() {
- writer_->AddCharacter('{');
- writer_->AddString("\"snapshot\":{");
- SerializeSnapshot();
- if (writer_->aborted()) return;
- writer_->AddString("},\n");
- writer_->AddString("\"nodes\":[");
- SerializeNodes();
- if (writer_->aborted()) return;
- writer_->AddString("],\n");
- writer_->AddString("\"strings\":[");
- SerializeStrings();
- if (writer_->aborted()) return;
- writer_->AddCharacter(']');
- writer_->AddCharacter('}');
- writer_->Finalize();
-}
-
-
-class HeapSnapshotJSONSerializerEnumerator {
- public:
- explicit HeapSnapshotJSONSerializerEnumerator(HeapSnapshotJSONSerializer* s)
- : s_(s) {
- }
- void Apply(HeapEntry** entry) {
- s_->GetNodeId(*entry);
- }
- private:
- HeapSnapshotJSONSerializer* s_;
-};
-
-void HeapSnapshotJSONSerializer::EnumerateNodes() {
- GetNodeId(snapshot_->root()); // Make sure root gets the first id.
- HeapSnapshotJSONSerializerEnumerator iter(this);
- snapshot_->IterateEntries(&iter);
-}
-
-
-int HeapSnapshotJSONSerializer::GetNodeId(HeapEntry* entry) {
- HashMap::Entry* cache_entry = nodes_.Lookup(entry, ObjectHash(entry), true);
- if (cache_entry->value == NULL) {
- cache_entry->value = reinterpret_cast<void*>(next_node_id_++);
- }
- return static_cast<int>(reinterpret_cast<intptr_t>(cache_entry->value));
-}
-
-
-int HeapSnapshotJSONSerializer::GetStringId(const char* s) {
- HashMap::Entry* cache_entry = strings_.Lookup(
- const_cast<char*>(s), ObjectHash(s), true);
- if (cache_entry->value == NULL) {
- cache_entry->value = reinterpret_cast<void*>(next_string_id_++);
- }
- return static_cast<int>(reinterpret_cast<intptr_t>(cache_entry->value));
-}
-
-
-void HeapSnapshotJSONSerializer::SerializeEdge(HeapGraphEdge* edge) {
- writer_->AddCharacter(',');
- writer_->AddNumber(edge->type());
- writer_->AddCharacter(',');
- if (edge->type() == HeapGraphEdge::kElement
- || edge->type() == HeapGraphEdge::kHidden) {
- writer_->AddNumber(edge->index());
- } else {
- writer_->AddNumber(GetStringId(edge->name()));
- }
- writer_->AddCharacter(',');
- writer_->AddNumber(GetNodeId(edge->to()));
-}
-
-
-void HeapSnapshotJSONSerializer::SerializeNode(HeapEntry* entry) {
- writer_->AddCharacter('\n');
- writer_->AddCharacter(',');
- writer_->AddNumber(entry->type());
- writer_->AddCharacter(',');
- writer_->AddNumber(GetStringId(entry->name()));
- writer_->AddCharacter(',');
- writer_->AddNumber(entry->id());
- writer_->AddCharacter(',');
- writer_->AddNumber(entry->self_size());
- writer_->AddCharacter(',');
- writer_->AddNumber(entry->RetainedSize(false));
- writer_->AddCharacter(',');
- writer_->AddNumber(GetNodeId(entry->dominator()));
- Vector<HeapGraphEdge> children = entry->children();
- writer_->AddCharacter(',');
- writer_->AddNumber(children.length());
- for (int i = 0; i < children.length(); ++i) {
- SerializeEdge(&children[i]);
- if (writer_->aborted()) return;
- }
-}
-
-
-void HeapSnapshotJSONSerializer::SerializeNodes() {
- // The first (zero) item of nodes array is an object describing node
- // serialization layout. We use a set of macros to improve
- // readability.
-#define JSON_A(s) "["s"]"
-#define JSON_O(s) "{"s"}"
-#define JSON_S(s) "\""s"\""
- writer_->AddString(JSON_O(
- JSON_S("fields") ":" JSON_A(
- JSON_S("type")
- "," JSON_S("name")
- "," JSON_S("id")
- "," JSON_S("self_size")
- "," JSON_S("retained_size")
- "," JSON_S("dominator")
- "," JSON_S("children_count")
- "," JSON_S("children"))
- "," JSON_S("types") ":" JSON_A(
- JSON_A(
- JSON_S("hidden")
- "," JSON_S("array")
- "," JSON_S("string")
- "," JSON_S("object")
- "," JSON_S("code")
- "," JSON_S("closure")
- "," JSON_S("regexp")
- "," JSON_S("number")
- "," JSON_S("native"))
- "," JSON_S("string")
- "," JSON_S("number")
- "," JSON_S("number")
- "," JSON_S("number")
- "," JSON_S("number")
- "," JSON_S("number")
- "," JSON_O(
- JSON_S("fields") ":" JSON_A(
- JSON_S("type")
- "," JSON_S("name_or_index")
- "," JSON_S("to_node"))
- "," JSON_S("types") ":" JSON_A(
- JSON_A(
- JSON_S("context")
- "," JSON_S("element")
- "," JSON_S("property")
- "," JSON_S("internal")
- "," JSON_S("hidden")
- "," JSON_S("shortcut"))
- "," JSON_S("string_or_number")
- "," JSON_S("node"))))));
-#undef JSON_S
-#undef JSON_O
-#undef JSON_A
-
- const int node_fields_count = 7;
- // type,name,id,self_size,retained_size,dominator,children_count.
- const int edge_fields_count = 3; // type,name|index,to_node.
- List<HashMap::Entry*> sorted_nodes;
- SortHashMap(&nodes_, &sorted_nodes);
- // Rewrite node ids, so they refer to actual array positions.
- if (sorted_nodes.length() > 1) {
- // Nodes start from array index 1.
- int prev_value = 1;
- sorted_nodes[0]->value = reinterpret_cast<void*>(prev_value);
- for (int i = 1; i < sorted_nodes.length(); ++i) {
- HeapEntry* prev_heap_entry =
- reinterpret_cast<HeapEntry*>(sorted_nodes[i-1]->key);
- prev_value += node_fields_count +
- prev_heap_entry->children().length() * edge_fields_count;
- sorted_nodes[i]->value = reinterpret_cast<void*>(prev_value);
- }
- }
- for (int i = 0; i < sorted_nodes.length(); ++i) {
- SerializeNode(reinterpret_cast<HeapEntry*>(sorted_nodes[i]->key));
- if (writer_->aborted()) return;
- }
-}
-
-
-void HeapSnapshotJSONSerializer::SerializeSnapshot() {
- writer_->AddString("\"title\":\"");
- writer_->AddString(snapshot_->title());
- writer_->AddString("\"");
- writer_->AddString(",\"uid\":");
- writer_->AddNumber(snapshot_->uid());
-}
-
-
-static void WriteUChar(OutputStreamWriter* w, unibrow::uchar u) {
- static const char hex_chars[] = "0123456789ABCDEF";
- w->AddString("\\u");
- w->AddCharacter(hex_chars[(u >> 12) & 0xf]);
- w->AddCharacter(hex_chars[(u >> 8) & 0xf]);
- w->AddCharacter(hex_chars[(u >> 4) & 0xf]);
- w->AddCharacter(hex_chars[u & 0xf]);
-}
-
-void HeapSnapshotJSONSerializer::SerializeString(const unsigned char* s) {
- writer_->AddCharacter('\n');
- writer_->AddCharacter('\"');
- for ( ; *s != '\0'; ++s) {
- switch (*s) {
- case '\b':
- writer_->AddString("\\b");
- continue;
- case '\f':
- writer_->AddString("\\f");
- continue;
- case '\n':
- writer_->AddString("\\n");
- continue;
- case '\r':
- writer_->AddString("\\r");
- continue;
- case '\t':
- writer_->AddString("\\t");
- continue;
- case '\"':
- case '\\':
- writer_->AddCharacter('\\');
- writer_->AddCharacter(*s);
- continue;
- default:
- if (*s > 31 && *s < 128) {
- writer_->AddCharacter(*s);
- } else if (*s <= 31) {
- // Special character with no dedicated literal.
- WriteUChar(writer_, *s);
- } else {
- // Convert UTF-8 into \u UTF-16 literal.
- unsigned length = 1, cursor = 0;
- for ( ; length <= 4 && *(s + length) != '\0'; ++length) { }
- unibrow::uchar c = unibrow::Utf8::CalculateValue(s, length, &cursor);
- if (c != unibrow::Utf8::kBadChar) {
- WriteUChar(writer_, c);
- ASSERT(cursor != 0);
- s += cursor - 1;
- } else {
- writer_->AddCharacter('?');
- }
- }
- }
- }
- writer_->AddCharacter('\"');
-}
-
-
-void HeapSnapshotJSONSerializer::SerializeStrings() {
- List<HashMap::Entry*> sorted_strings;
- SortHashMap(&strings_, &sorted_strings);
- writer_->AddString("\"<dummy>\"");
- for (int i = 0; i < sorted_strings.length(); ++i) {
- writer_->AddCharacter(',');
- SerializeString(
- reinterpret_cast<const unsigned char*>(sorted_strings[i]->key));
- if (writer_->aborted()) return;
- }
-}
-
-
-template<typename T>
-inline static int SortUsingEntryValue(const T* x, const T* y) {
- uintptr_t x_uint = reinterpret_cast<uintptr_t>((*x)->value);
- uintptr_t y_uint = reinterpret_cast<uintptr_t>((*y)->value);
- if (x_uint > y_uint) {
- return 1;
- } else if (x_uint == y_uint) {
- return 0;
- } else {
- return -1;
- }
-}
-
-
-void HeapSnapshotJSONSerializer::SortHashMap(
- HashMap* map, List<HashMap::Entry*>* sorted_entries) {
- for (HashMap::Entry* p = map->Start(); p != NULL; p = map->Next(p))
- sorted_entries->Add(p);
- sorted_entries->Sort(SortUsingEntryValue);
-}
-
-
-String* GetConstructorNameForHeapProfile(JSObject* object) {
- if (object->IsJSFunction()) return HEAP->closure_symbol();
- return object->constructor_name();
-}
-
-} } // namespace v8::internal
-
-#endif // ENABLE_LOGGING_AND_PROFILING
diff --git a/src/3rdparty/v8/src/profile-generator.h b/src/3rdparty/v8/src/profile-generator.h
deleted file mode 100644
index bbc9efc..0000000
--- a/src/3rdparty/v8/src/profile-generator.h
+++ /dev/null
@@ -1,1125 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_PROFILE_GENERATOR_H_
-#define V8_PROFILE_GENERATOR_H_
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
-
-#include "hashmap.h"
-#include "../include/v8-profiler.h"
-
-namespace v8 {
-namespace internal {
-
-class TokenEnumerator {
- public:
- TokenEnumerator();
- ~TokenEnumerator();
- int GetTokenId(Object* token);
-
- static const int kNoSecurityToken = -1;
- static const int kInheritsSecurityToken = -2;
-
- private:
- static void TokenRemovedCallback(v8::Persistent<v8::Value> handle,
- void* parameter);
- void TokenRemoved(Object** token_location);
-
- List<Object**> token_locations_;
- List<bool> token_removed_;
-
- friend class TokenEnumeratorTester;
-
- DISALLOW_COPY_AND_ASSIGN(TokenEnumerator);
-};
-
-
-// Provides a storage of strings allocated in C++ heap, to hold them
-// forever, even if they disappear from JS heap or external storage.
-class StringsStorage {
- public:
- StringsStorage();
- ~StringsStorage();
-
- const char* GetCopy(const char* src);
- const char* GetFormatted(const char* format, ...);
- const char* GetVFormatted(const char* format, va_list args);
- const char* GetName(String* name);
- const char* GetName(int index);
- inline const char* GetFunctionName(String* name);
- inline const char* GetFunctionName(const char* name);
-
- private:
- INLINE(static bool StringsMatch(void* key1, void* key2)) {
- return strcmp(reinterpret_cast<char*>(key1),
- reinterpret_cast<char*>(key2)) == 0;
- }
- const char* AddOrDisposeString(char* str, uint32_t hash);
-
- // Mapping of strings by String::Hash to const char* strings.
- HashMap names_;
-
- DISALLOW_COPY_AND_ASSIGN(StringsStorage);
-};
-
-
-class CodeEntry {
- public:
- // CodeEntry doesn't own name strings, just references them.
- INLINE(CodeEntry(Logger::LogEventsAndTags tag,
- const char* name_prefix,
- const char* name,
- const char* resource_name,
- int line_number,
- int security_token_id));
-
- INLINE(bool is_js_function() const) { return is_js_function_tag(tag_); }
- INLINE(const char* name_prefix() const) { return name_prefix_; }
- INLINE(bool has_name_prefix() const) { return name_prefix_[0] != '\0'; }
- INLINE(const char* name() const) { return name_; }
- INLINE(const char* resource_name() const) { return resource_name_; }
- INLINE(int line_number() const) { return line_number_; }
- INLINE(int shared_id() const) { return shared_id_; }
- INLINE(void set_shared_id(int shared_id)) { shared_id_ = shared_id; }
- INLINE(int security_token_id() const) { return security_token_id_; }
-
- INLINE(static bool is_js_function_tag(Logger::LogEventsAndTags tag));
-
- void CopyData(const CodeEntry& source);
- uint32_t GetCallUid() const;
- bool IsSameAs(CodeEntry* entry) const;
-
- static const char* const kEmptyNamePrefix;
-
- private:
- Logger::LogEventsAndTags tag_;
- const char* name_prefix_;
- const char* name_;
- const char* resource_name_;
- int line_number_;
- int shared_id_;
- int security_token_id_;
-
- DISALLOW_COPY_AND_ASSIGN(CodeEntry);
-};
-
-
-class ProfileTree;
-
-class ProfileNode {
- public:
- INLINE(ProfileNode(ProfileTree* tree, CodeEntry* entry));
-
- ProfileNode* FindChild(CodeEntry* entry);
- ProfileNode* FindOrAddChild(CodeEntry* entry);
- INLINE(void IncrementSelfTicks()) { ++self_ticks_; }
- INLINE(void IncreaseSelfTicks(unsigned amount)) { self_ticks_ += amount; }
- INLINE(void IncreaseTotalTicks(unsigned amount)) { total_ticks_ += amount; }
-
- INLINE(CodeEntry* entry() const) { return entry_; }
- INLINE(unsigned self_ticks() const) { return self_ticks_; }
- INLINE(unsigned total_ticks() const) { return total_ticks_; }
- INLINE(const List<ProfileNode*>* children() const) { return &children_list_; }
- double GetSelfMillis() const;
- double GetTotalMillis() const;
-
- void Print(int indent);
-
- private:
- INLINE(static bool CodeEntriesMatch(void* entry1, void* entry2)) {
- return reinterpret_cast<CodeEntry*>(entry1)->IsSameAs(
- reinterpret_cast<CodeEntry*>(entry2));
- }
-
- INLINE(static uint32_t CodeEntryHash(CodeEntry* entry)) {
- return entry->GetCallUid();
- }
-
- ProfileTree* tree_;
- CodeEntry* entry_;
- unsigned total_ticks_;
- unsigned self_ticks_;
- // Mapping from CodeEntry* to ProfileNode*
- HashMap children_;
- List<ProfileNode*> children_list_;
-
- DISALLOW_COPY_AND_ASSIGN(ProfileNode);
-};
-
-
-class ProfileTree {
- public:
- ProfileTree();
- ~ProfileTree();
-
- void AddPathFromEnd(const Vector<CodeEntry*>& path);
- void AddPathFromStart(const Vector<CodeEntry*>& path);
- void CalculateTotalTicks();
- void FilteredClone(ProfileTree* src, int security_token_id);
-
- double TicksToMillis(unsigned ticks) const {
- return ticks * ms_to_ticks_scale_;
- }
- ProfileNode* root() const { return root_; }
- void SetTickRatePerMs(double ticks_per_ms);
-
- void ShortPrint();
- void Print() {
- root_->Print(0);
- }
-
- private:
- template <typename Callback>
- void TraverseDepthFirst(Callback* callback);
-
- CodeEntry root_entry_;
- ProfileNode* root_;
- double ms_to_ticks_scale_;
-
- DISALLOW_COPY_AND_ASSIGN(ProfileTree);
-};
-
-
-class CpuProfile {
- public:
- CpuProfile(const char* title, unsigned uid)
- : title_(title), uid_(uid) { }
-
- // Add pc -> ... -> main() call path to the profile.
- void AddPath(const Vector<CodeEntry*>& path);
- void CalculateTotalTicks();
- void SetActualSamplingRate(double actual_sampling_rate);
- CpuProfile* FilteredClone(int security_token_id);
-
- INLINE(const char* title() const) { return title_; }
- INLINE(unsigned uid() const) { return uid_; }
- INLINE(const ProfileTree* top_down() const) { return &top_down_; }
- INLINE(const ProfileTree* bottom_up() const) { return &bottom_up_; }
-
- void UpdateTicksScale();
-
- void ShortPrint();
- void Print();
-
- private:
- const char* title_;
- unsigned uid_;
- ProfileTree top_down_;
- ProfileTree bottom_up_;
-
- DISALLOW_COPY_AND_ASSIGN(CpuProfile);
-};
-
-
-class CodeMap {
- public:
- CodeMap() : next_shared_id_(1) { }
- INLINE(void AddCode(Address addr, CodeEntry* entry, unsigned size));
- INLINE(void MoveCode(Address from, Address to));
- INLINE(void DeleteCode(Address addr));
- CodeEntry* FindEntry(Address addr);
- int GetSharedId(Address addr);
-
- void Print();
-
- private:
- struct CodeEntryInfo {
- CodeEntryInfo(CodeEntry* an_entry, unsigned a_size)
- : entry(an_entry), size(a_size) { }
- CodeEntry* entry;
- unsigned size;
- };
-
- struct CodeTreeConfig {
- typedef Address Key;
- typedef CodeEntryInfo Value;
- static const Key kNoKey;
- static const Value kNoValue;
- static int Compare(const Key& a, const Key& b) {
- return a < b ? -1 : (a > b ? 1 : 0);
- }
- };
- typedef SplayTree<CodeTreeConfig> CodeTree;
-
- class CodeTreePrinter {
- public:
- void Call(const Address& key, const CodeEntryInfo& value);
- };
-
- // Fake CodeEntry pointer to distinguish shared function entries.
- static CodeEntry* const kSharedFunctionCodeEntry;
-
- CodeTree tree_;
- int next_shared_id_;
-
- DISALLOW_COPY_AND_ASSIGN(CodeMap);
-};
-
-
-class CpuProfilesCollection {
- public:
- CpuProfilesCollection();
- ~CpuProfilesCollection();
-
- bool StartProfiling(const char* title, unsigned uid);
- bool StartProfiling(String* title, unsigned uid);
- CpuProfile* StopProfiling(int security_token_id,
- const char* title,
- double actual_sampling_rate);
- List<CpuProfile*>* Profiles(int security_token_id);
- const char* GetName(String* name) {
- return function_and_resource_names_.GetName(name);
- }
- const char* GetName(int args_count) {
- return function_and_resource_names_.GetName(args_count);
- }
- CpuProfile* GetProfile(int security_token_id, unsigned uid);
- bool IsLastProfile(const char* title);
- void RemoveProfile(CpuProfile* profile);
- bool HasDetachedProfiles() { return detached_profiles_.length() > 0; }
-
- CodeEntry* NewCodeEntry(Logger::LogEventsAndTags tag,
- String* name, String* resource_name, int line_number);
- CodeEntry* NewCodeEntry(Logger::LogEventsAndTags tag, const char* name);
- CodeEntry* NewCodeEntry(Logger::LogEventsAndTags tag,
- const char* name_prefix, String* name);
- CodeEntry* NewCodeEntry(Logger::LogEventsAndTags tag, int args_count);
- CodeEntry* NewCodeEntry(int security_token_id);
-
- // Called from profile generator thread.
- void AddPathToCurrentProfiles(const Vector<CodeEntry*>& path);
-
- // Limits the number of profiles that can be simultaneously collected.
- static const int kMaxSimultaneousProfiles = 100;
-
- private:
- const char* GetFunctionName(String* name) {
- return function_and_resource_names_.GetFunctionName(name);
- }
- const char* GetFunctionName(const char* name) {
- return function_and_resource_names_.GetFunctionName(name);
- }
- int GetProfileIndex(unsigned uid);
- List<CpuProfile*>* GetProfilesList(int security_token_id);
- int TokenToIndex(int security_token_id);
-
- INLINE(static bool UidsMatch(void* key1, void* key2)) {
- return key1 == key2;
- }
-
- StringsStorage function_and_resource_names_;
- List<CodeEntry*> code_entries_;
- List<List<CpuProfile*>* > profiles_by_token_;
- // Mapping from profiles' uids to indexes in the second nested list
- // of profiles_by_token_.
- HashMap profiles_uids_;
- List<CpuProfile*> detached_profiles_;
-
- // Accessed by VM thread and profile generator thread.
- List<CpuProfile*> current_profiles_;
- Semaphore* current_profiles_semaphore_;
-
- DISALLOW_COPY_AND_ASSIGN(CpuProfilesCollection);
-};
-
-
-class SampleRateCalculator {
- public:
- SampleRateCalculator()
- : result_(Logger::kSamplingIntervalMs * kResultScale),
- ticks_per_ms_(Logger::kSamplingIntervalMs),
- measurements_count_(0),
- wall_time_query_countdown_(1) {
- }
-
- double ticks_per_ms() {
- return result_ / static_cast<double>(kResultScale);
- }
- void Tick();
- void UpdateMeasurements(double current_time);
-
- // Instead of querying current wall time each tick,
- // we use this constant to control query intervals.
- static const unsigned kWallTimeQueryIntervalMs = 100;
-
- private:
- // As the result needs to be accessed from a different thread, we
- // use type that guarantees atomic writes to memory. There should
- // be <= 1000 ticks per second, thus storing a value of a 10 ** 5
- // order should provide enough precision while keeping away from a
- // potential overflow.
- static const int kResultScale = 100000;
-
- AtomicWord result_;
- // All other fields are accessed only from the sampler thread.
- double ticks_per_ms_;
- unsigned measurements_count_;
- unsigned wall_time_query_countdown_;
- double last_wall_time_;
-
- DISALLOW_COPY_AND_ASSIGN(SampleRateCalculator);
-};
-
-
-class ProfileGenerator {
- public:
- explicit ProfileGenerator(CpuProfilesCollection* profiles);
-
- INLINE(CodeEntry* NewCodeEntry(Logger::LogEventsAndTags tag,
- String* name,
- String* resource_name,
- int line_number)) {
- return profiles_->NewCodeEntry(tag, name, resource_name, line_number);
- }
-
- INLINE(CodeEntry* NewCodeEntry(Logger::LogEventsAndTags tag,
- const char* name)) {
- return profiles_->NewCodeEntry(tag, name);
- }
-
- INLINE(CodeEntry* NewCodeEntry(Logger::LogEventsAndTags tag,
- const char* name_prefix,
- String* name)) {
- return profiles_->NewCodeEntry(tag, name_prefix, name);
- }
-
- INLINE(CodeEntry* NewCodeEntry(Logger::LogEventsAndTags tag,
- int args_count)) {
- return profiles_->NewCodeEntry(tag, args_count);
- }
-
- INLINE(CodeEntry* NewCodeEntry(int security_token_id)) {
- return profiles_->NewCodeEntry(security_token_id);
- }
-
- void RecordTickSample(const TickSample& sample);
-
- INLINE(CodeMap* code_map()) { return &code_map_; }
-
- INLINE(void Tick()) { sample_rate_calc_.Tick(); }
- INLINE(double actual_sampling_rate()) {
- return sample_rate_calc_.ticks_per_ms();
- }
-
- static const char* const kAnonymousFunctionName;
- static const char* const kProgramEntryName;
- static const char* const kGarbageCollectorEntryName;
-
- private:
- INLINE(CodeEntry* EntryForVMState(StateTag tag));
-
- CpuProfilesCollection* profiles_;
- CodeMap code_map_;
- CodeEntry* program_entry_;
- CodeEntry* gc_entry_;
- SampleRateCalculator sample_rate_calc_;
-
- DISALLOW_COPY_AND_ASSIGN(ProfileGenerator);
-};
-
-
-class HeapEntry;
-
-class HeapGraphEdge BASE_EMBEDDED {
- public:
- enum Type {
- kContextVariable = v8::HeapGraphEdge::kContextVariable,
- kElement = v8::HeapGraphEdge::kElement,
- kProperty = v8::HeapGraphEdge::kProperty,
- kInternal = v8::HeapGraphEdge::kInternal,
- kHidden = v8::HeapGraphEdge::kHidden,
- kShortcut = v8::HeapGraphEdge::kShortcut
- };
-
- HeapGraphEdge() { }
- void Init(int child_index, Type type, const char* name, HeapEntry* to);
- void Init(int child_index, Type type, int index, HeapEntry* to);
- void Init(int child_index, int index, HeapEntry* to);
-
- Type type() { return static_cast<Type>(type_); }
- int index() {
- ASSERT(type_ == kElement || type_ == kHidden);
- return index_;
- }
- const char* name() {
- ASSERT(type_ == kContextVariable
- || type_ == kProperty
- || type_ == kInternal
- || type_ == kShortcut);
- return name_;
- }
- HeapEntry* to() { return to_; }
-
- HeapEntry* From();
-
- private:
- int child_index_ : 29;
- unsigned type_ : 3;
- union {
- int index_;
- const char* name_;
- };
- HeapEntry* to_;
-
- DISALLOW_COPY_AND_ASSIGN(HeapGraphEdge);
-};
-
-
-class HeapSnapshot;
-
-// HeapEntry instances represent an entity from the heap (or a special
-// virtual node, e.g. root). To make heap snapshots more compact,
-// HeapEntries has a special memory layout (no Vectors or Lists used):
-//
-// +-----------------+
-// HeapEntry
-// +-----------------+
-// HeapGraphEdge |
-// ... } children_count
-// HeapGraphEdge |
-// +-----------------+
-// HeapGraphEdge* |
-// ... } retainers_count
-// HeapGraphEdge* |
-// +-----------------+
-//
-// In a HeapSnapshot, all entries are hand-allocated in a continuous array
-// of raw bytes.
-//
-class HeapEntry BASE_EMBEDDED {
- public:
- enum Type {
- kHidden = v8::HeapGraphNode::kHidden,
- kArray = v8::HeapGraphNode::kArray,
- kString = v8::HeapGraphNode::kString,
- kObject = v8::HeapGraphNode::kObject,
- kCode = v8::HeapGraphNode::kCode,
- kClosure = v8::HeapGraphNode::kClosure,
- kRegExp = v8::HeapGraphNode::kRegExp,
- kHeapNumber = v8::HeapGraphNode::kHeapNumber,
- kNative = v8::HeapGraphNode::kNative
- };
-
- HeapEntry() { }
- void Init(HeapSnapshot* snapshot,
- Type type,
- const char* name,
- uint64_t id,
- int self_size,
- int children_count,
- int retainers_count);
-
- HeapSnapshot* snapshot() { return snapshot_; }
- Type type() { return static_cast<Type>(type_); }
- const char* name() { return name_; }
- inline uint64_t id();
- int self_size() { return self_size_; }
- int retained_size() { return retained_size_; }
- void add_retained_size(int size) { retained_size_ += size; }
- void set_retained_size(int value) { retained_size_ = value; }
- int ordered_index() { return ordered_index_; }
- void set_ordered_index(int value) { ordered_index_ = value; }
-
- Vector<HeapGraphEdge> children() {
- return Vector<HeapGraphEdge>(children_arr(), children_count_); }
- Vector<HeapGraphEdge*> retainers() {
- return Vector<HeapGraphEdge*>(retainers_arr(), retainers_count_); }
- HeapEntry* dominator() { return dominator_; }
- void set_dominator(HeapEntry* entry) { dominator_ = entry; }
-
- void clear_paint() { painted_ = kUnpainted; }
- bool painted_reachable() { return painted_ == kPainted; }
- void paint_reachable() {
- ASSERT(painted_ == kUnpainted);
- painted_ = kPainted;
- }
- bool not_painted_reachable_from_others() {
- return painted_ != kPaintedReachableFromOthers;
- }
- void paint_reachable_from_others() {
- painted_ = kPaintedReachableFromOthers;
- }
- template<class Visitor>
- void ApplyAndPaintAllReachable(Visitor* visitor);
- void PaintAllReachable();
-
- void SetIndexedReference(HeapGraphEdge::Type type,
- int child_index,
- int index,
- HeapEntry* entry,
- int retainer_index);
- void SetNamedReference(HeapGraphEdge::Type type,
- int child_index,
- const char* name,
- HeapEntry* entry,
- int retainer_index);
- void SetUnidirElementReference(int child_index, int index, HeapEntry* entry);
-
- int EntrySize() { return EntriesSize(1, children_count_, retainers_count_); }
- int RetainedSize(bool exact);
-
- void Print(int max_depth, int indent);
-
- static int EntriesSize(int entries_count,
- int children_count,
- int retainers_count);
-
- private:
- HeapGraphEdge* children_arr() {
- return reinterpret_cast<HeapGraphEdge*>(this + 1);
- }
- HeapGraphEdge** retainers_arr() {
- return reinterpret_cast<HeapGraphEdge**>(children_arr() + children_count_);
- }
- void CalculateExactRetainedSize();
- const char* TypeAsString();
-
- unsigned painted_: 2;
- unsigned type_: 4;
- int children_count_: 26;
- int retainers_count_;
- int self_size_;
- union {
- int ordered_index_; // Used during dominator tree building.
- int retained_size_; // At that moment, there is no retained size yet.
- };
- HeapEntry* dominator_;
- HeapSnapshot* snapshot_;
- struct Id {
- uint32_t id1_;
- uint32_t id2_;
- } id_; // This is to avoid extra padding of 64-bit value.
- const char* name_;
-
- // Paints used for exact retained sizes calculation.
- static const unsigned kUnpainted = 0;
- static const unsigned kPainted = 1;
- static const unsigned kPaintedReachableFromOthers = 2;
-
- static const int kExactRetainedSizeTag = 1;
-
- DISALLOW_COPY_AND_ASSIGN(HeapEntry);
-};
-
-
-class HeapSnapshotsCollection;
-
-// HeapSnapshot represents a single heap snapshot. It is stored in
-// HeapSnapshotsCollection, which is also a factory for
-// HeapSnapshots. All HeapSnapshots share strings copied from JS heap
-// to be able to return them even if they were collected.
-// HeapSnapshotGenerator fills in a HeapSnapshot.
-class HeapSnapshot {
- public:
- enum Type {
- kFull = v8::HeapSnapshot::kFull,
- kAggregated = v8::HeapSnapshot::kAggregated
- };
-
- HeapSnapshot(HeapSnapshotsCollection* collection,
- Type type,
- const char* title,
- unsigned uid);
- ~HeapSnapshot();
- void Delete();
-
- HeapSnapshotsCollection* collection() { return collection_; }
- Type type() { return type_; }
- const char* title() { return title_; }
- unsigned uid() { return uid_; }
- HeapEntry* root() { return root_entry_; }
- HeapEntry* gc_roots() { return gc_roots_entry_; }
- HeapEntry* natives_root() { return natives_root_entry_; }
- List<HeapEntry*>* entries() { return &entries_; }
-
- void AllocateEntries(
- int entries_count, int children_count, int retainers_count);
- HeapEntry* AddEntry(HeapEntry::Type type,
- const char* name,
- uint64_t id,
- int size,
- int children_count,
- int retainers_count);
- HeapEntry* AddRootEntry(int children_count);
- HeapEntry* AddGcRootsEntry(int children_count, int retainers_count);
- HeapEntry* AddNativesRootEntry(int children_count, int retainers_count);
- void ClearPaint();
- HeapEntry* GetEntryById(uint64_t id);
- List<HeapEntry*>* GetSortedEntriesList();
- template<class Visitor>
- void IterateEntries(Visitor* visitor) { entries_.Iterate(visitor); }
- void SetDominatorsToSelf();
-
- void Print(int max_depth);
- void PrintEntriesSize();
-
- private:
- HeapEntry* GetNextEntryToInit();
-
- HeapSnapshotsCollection* collection_;
- Type type_;
- const char* title_;
- unsigned uid_;
- HeapEntry* root_entry_;
- HeapEntry* gc_roots_entry_;
- HeapEntry* natives_root_entry_;
- char* raw_entries_;
- List<HeapEntry*> entries_;
- bool entries_sorted_;
-#ifdef DEBUG
- int raw_entries_size_;
-#endif
-
- friend class HeapSnapshotTester;
-
- DISALLOW_COPY_AND_ASSIGN(HeapSnapshot);
-};
-
-
-class HeapObjectsMap {
- public:
- HeapObjectsMap();
- ~HeapObjectsMap();
-
- void SnapshotGenerationFinished();
- uint64_t FindObject(Address addr);
- void MoveObject(Address from, Address to);
-
- static uint64_t GenerateId(v8::RetainedObjectInfo* info);
-
- static const uint64_t kInternalRootObjectId;
- static const uint64_t kGcRootsObjectId;
- static const uint64_t kNativesRootObjectId;
- static const uint64_t kFirstAvailableObjectId;
-
- private:
- struct EntryInfo {
- explicit EntryInfo(uint64_t id) : id(id), accessed(true) { }
- EntryInfo(uint64_t id, bool accessed) : id(id), accessed(accessed) { }
- uint64_t id;
- bool accessed;
- };
-
- void AddEntry(Address addr, uint64_t id);
- uint64_t FindEntry(Address addr);
- void RemoveDeadEntries();
-
- static bool AddressesMatch(void* key1, void* key2) {
- return key1 == key2;
- }
-
- static uint32_t AddressHash(Address addr) {
- return ComputeIntegerHash(
- static_cast<uint32_t>(reinterpret_cast<uintptr_t>(addr)));
- }
-
- bool initial_fill_mode_;
- uint64_t next_id_;
- HashMap entries_map_;
- List<EntryInfo>* entries_;
-
- DISALLOW_COPY_AND_ASSIGN(HeapObjectsMap);
-};
-
-
-class HeapSnapshotsCollection {
- public:
- HeapSnapshotsCollection();
- ~HeapSnapshotsCollection();
-
- bool is_tracking_objects() { return is_tracking_objects_; }
-
- HeapSnapshot* NewSnapshot(
- HeapSnapshot::Type type, const char* name, unsigned uid);
- void SnapshotGenerationFinished(HeapSnapshot* snapshot);
- List<HeapSnapshot*>* snapshots() { return &snapshots_; }
- HeapSnapshot* GetSnapshot(unsigned uid);
- void RemoveSnapshot(HeapSnapshot* snapshot);
-
- StringsStorage* names() { return &names_; }
- TokenEnumerator* token_enumerator() { return token_enumerator_; }
-
- uint64_t GetObjectId(Address addr) { return ids_.FindObject(addr); }
- void ObjectMoveEvent(Address from, Address to) { ids_.MoveObject(from, to); }
-
- private:
- INLINE(static bool HeapSnapshotsMatch(void* key1, void* key2)) {
- return key1 == key2;
- }
-
- bool is_tracking_objects_; // Whether tracking object moves is needed.
- List<HeapSnapshot*> snapshots_;
- // Mapping from snapshots' uids to HeapSnapshot* pointers.
- HashMap snapshots_uids_;
- StringsStorage names_;
- TokenEnumerator* token_enumerator_;
- // Mapping from HeapObject addresses to objects' uids.
- HeapObjectsMap ids_;
-
- DISALLOW_COPY_AND_ASSIGN(HeapSnapshotsCollection);
-};
-
-
-// A typedef for referencing anything that can be snapshotted living
-// in any kind of heap memory.
-typedef void* HeapThing;
-
-
-// An interface that creates HeapEntries by HeapThings.
-class HeapEntriesAllocator {
- public:
- virtual ~HeapEntriesAllocator() { }
- virtual HeapEntry* AllocateEntry(
- HeapThing ptr, int children_count, int retainers_count) = 0;
-};
-
-
-// The HeapEntriesMap instance is used to track a mapping between
-// real heap objects and their representations in heap snapshots.
-class HeapEntriesMap {
- public:
- HeapEntriesMap();
- ~HeapEntriesMap();
-
- void AllocateEntries();
- HeapEntry* Map(HeapThing thing);
- void Pair(HeapThing thing, HeapEntriesAllocator* allocator, HeapEntry* entry);
- void CountReference(HeapThing from, HeapThing to,
- int* prev_children_count = NULL,
- int* prev_retainers_count = NULL);
-
- int entries_count() { return entries_count_; }
- int total_children_count() { return total_children_count_; }
- int total_retainers_count() { return total_retainers_count_; }
-
- static HeapEntry *const kHeapEntryPlaceholder;
-
- private:
- struct EntryInfo {
- EntryInfo(HeapEntry* entry, HeapEntriesAllocator* allocator)
- : entry(entry),
- allocator(allocator),
- children_count(0),
- retainers_count(0) {
- }
- HeapEntry* entry;
- HeapEntriesAllocator* allocator;
- int children_count;
- int retainers_count;
- };
-
- static uint32_t Hash(HeapThing thing) {
- return ComputeIntegerHash(
- static_cast<uint32_t>(reinterpret_cast<uintptr_t>(thing)));
- }
- static bool HeapThingsMatch(HeapThing key1, HeapThing key2) {
- return key1 == key2;
- }
-
- HashMap entries_;
- int entries_count_;
- int total_children_count_;
- int total_retainers_count_;
-
- friend class HeapObjectsSet;
-
- DISALLOW_COPY_AND_ASSIGN(HeapEntriesMap);
-};
-
-
-class HeapObjectsSet {
- public:
- HeapObjectsSet();
- void Clear();
- bool Contains(Object* object);
- void Insert(Object* obj);
-
- private:
- HashMap entries_;
-
- DISALLOW_COPY_AND_ASSIGN(HeapObjectsSet);
-};
-
-
-// An interface used to populate a snapshot with nodes and edges.
-class SnapshotFillerInterface {
- public:
- virtual ~SnapshotFillerInterface() { }
- virtual HeapEntry* AddEntry(HeapThing ptr,
- HeapEntriesAllocator* allocator) = 0;
- virtual HeapEntry* FindEntry(HeapThing ptr) = 0;
- virtual HeapEntry* FindOrAddEntry(HeapThing ptr,
- HeapEntriesAllocator* allocator) = 0;
- virtual void SetIndexedReference(HeapGraphEdge::Type type,
- HeapThing parent_ptr,
- HeapEntry* parent_entry,
- int index,
- HeapThing child_ptr,
- HeapEntry* child_entry) = 0;
- virtual void SetIndexedAutoIndexReference(HeapGraphEdge::Type type,
- HeapThing parent_ptr,
- HeapEntry* parent_entry,
- HeapThing child_ptr,
- HeapEntry* child_entry) = 0;
- virtual void SetNamedReference(HeapGraphEdge::Type type,
- HeapThing parent_ptr,
- HeapEntry* parent_entry,
- const char* reference_name,
- HeapThing child_ptr,
- HeapEntry* child_entry) = 0;
- virtual void SetNamedAutoIndexReference(HeapGraphEdge::Type type,
- HeapThing parent_ptr,
- HeapEntry* parent_entry,
- HeapThing child_ptr,
- HeapEntry* child_entry) = 0;
-};
-
-
-class SnapshottingProgressReportingInterface {
- public:
- virtual ~SnapshottingProgressReportingInterface() { }
- virtual void ProgressStep() = 0;
- virtual bool ProgressReport(bool force) = 0;
-};
-
-
-// An implementation of V8 heap graph extractor.
-class V8HeapExplorer : public HeapEntriesAllocator {
- public:
- V8HeapExplorer(HeapSnapshot* snapshot,
- SnapshottingProgressReportingInterface* progress);
- virtual ~V8HeapExplorer();
- virtual HeapEntry* AllocateEntry(
- HeapThing ptr, int children_count, int retainers_count);
- void AddRootEntries(SnapshotFillerInterface* filler);
- int EstimateObjectsCount();
- bool IterateAndExtractReferences(SnapshotFillerInterface* filler);
-
- static HeapObject* const kInternalRootObject;
-
- private:
- HeapEntry* AddEntry(
- HeapObject* object, int children_count, int retainers_count);
- HeapEntry* AddEntry(HeapObject* object,
- HeapEntry::Type type,
- const char* name,
- int children_count,
- int retainers_count);
- const char* GetSystemEntryName(HeapObject* object);
- void ExtractReferences(HeapObject* obj);
- void ExtractClosureReferences(JSObject* js_obj, HeapEntry* entry);
- void ExtractPropertyReferences(JSObject* js_obj, HeapEntry* entry);
- void ExtractElementReferences(JSObject* js_obj, HeapEntry* entry);
- void ExtractInternalReferences(JSObject* js_obj, HeapEntry* entry);
- void SetClosureReference(HeapObject* parent_obj,
- HeapEntry* parent,
- String* reference_name,
- Object* child);
- void SetElementReference(HeapObject* parent_obj,
- HeapEntry* parent,
- int index,
- Object* child);
- void SetInternalReference(HeapObject* parent_obj,
- HeapEntry* parent,
- const char* reference_name,
- Object* child,
- int field_offset = -1);
- void SetInternalReference(HeapObject* parent_obj,
- HeapEntry* parent,
- int index,
- Object* child,
- int field_offset = -1);
- void SetHiddenReference(HeapObject* parent_obj,
- HeapEntry* parent,
- int index,
- Object* child);
- void SetPropertyReference(HeapObject* parent_obj,
- HeapEntry* parent,
- String* reference_name,
- Object* child,
- int field_offset = -1);
- void SetPropertyShortcutReference(HeapObject* parent_obj,
- HeapEntry* parent,
- String* reference_name,
- Object* child);
- void SetRootShortcutReference(Object* child);
- void SetRootGcRootsReference();
- void SetGcRootsReference(Object* child);
-
- HeapEntry* GetEntry(Object* obj);
-
- HeapSnapshot* snapshot_;
- HeapSnapshotsCollection* collection_;
- SnapshottingProgressReportingInterface* progress_;
- SnapshotFillerInterface* filler_;
-
- static HeapObject* const kGcRootsObject;
-
- friend class IndexedReferencesExtractor;
- friend class RootsReferencesExtractor;
-
- DISALLOW_COPY_AND_ASSIGN(V8HeapExplorer);
-};
-
-
-// An implementation of retained native objects extractor.
-class NativeObjectsExplorer : public HeapEntriesAllocator {
- public:
- NativeObjectsExplorer(HeapSnapshot* snapshot,
- SnapshottingProgressReportingInterface* progress);
- virtual ~NativeObjectsExplorer();
- virtual HeapEntry* AllocateEntry(
- HeapThing ptr, int children_count, int retainers_count);
- void AddRootEntries(SnapshotFillerInterface* filler);
- int EstimateObjectsCount();
- bool IterateAndExtractReferences(SnapshotFillerInterface* filler);
-
- private:
- void FillRetainedObjects();
- List<HeapObject*>* GetListMaybeDisposeInfo(v8::RetainedObjectInfo* info);
- void SetNativeRootReference(v8::RetainedObjectInfo* info);
- void SetRootNativesRootReference();
- void SetWrapperNativeReferences(HeapObject* wrapper,
- v8::RetainedObjectInfo* info);
- void VisitSubtreeWrapper(Object** p, uint16_t class_id);
-
- static uint32_t InfoHash(v8::RetainedObjectInfo* info) {
- return ComputeIntegerHash(static_cast<uint32_t>(info->GetHash()));
- }
- static bool RetainedInfosMatch(void* key1, void* key2) {
- return key1 == key2 ||
- (reinterpret_cast<v8::RetainedObjectInfo*>(key1))->IsEquivalent(
- reinterpret_cast<v8::RetainedObjectInfo*>(key2));
- }
-
- HeapSnapshot* snapshot_;
- HeapSnapshotsCollection* collection_;
- SnapshottingProgressReportingInterface* progress_;
- bool embedder_queried_;
- HeapObjectsSet in_groups_;
- // RetainedObjectInfo* -> List<HeapObject*>*
- HashMap objects_by_info_;
- // Used during references extraction.
- SnapshotFillerInterface* filler_;
-
- static HeapThing const kNativesRootObject;
-
- friend class GlobalHandlesExtractor;
-
- DISALLOW_COPY_AND_ASSIGN(NativeObjectsExplorer);
-};
-
-
-class HeapSnapshotGenerator : public SnapshottingProgressReportingInterface {
- public:
- HeapSnapshotGenerator(HeapSnapshot* snapshot,
- v8::ActivityControl* control);
- bool GenerateSnapshot();
-
- private:
- bool ApproximateRetainedSizes();
- bool BuildDominatorTree(const Vector<HeapEntry*>& entries,
- Vector<HeapEntry*>* dominators);
- bool CountEntriesAndReferences();
- bool FillReferences();
- void FillReversePostorderIndexes(Vector<HeapEntry*>* entries);
- void ProgressStep();
- bool ProgressReport(bool force = false);
- bool SetEntriesDominators();
- void SetProgressTotal(int iterations_count);
-
- HeapSnapshot* snapshot_;
- v8::ActivityControl* control_;
- V8HeapExplorer v8_heap_explorer_;
- NativeObjectsExplorer dom_explorer_;
- // Mapping from HeapThing pointers to HeapEntry* pointers.
- HeapEntriesMap entries_;
- // Used during snapshot generation.
- int progress_counter_;
- int progress_total_;
-
- DISALLOW_COPY_AND_ASSIGN(HeapSnapshotGenerator);
-};
-
-class OutputStreamWriter;
-
-class HeapSnapshotJSONSerializer {
- public:
- explicit HeapSnapshotJSONSerializer(HeapSnapshot* snapshot)
- : snapshot_(snapshot),
- nodes_(ObjectsMatch),
- strings_(ObjectsMatch),
- next_node_id_(1),
- next_string_id_(1),
- writer_(NULL) {
- }
- void Serialize(v8::OutputStream* stream);
-
- private:
- INLINE(static bool ObjectsMatch(void* key1, void* key2)) {
- return key1 == key2;
- }
-
- INLINE(static uint32_t ObjectHash(const void* key)) {
- return ComputeIntegerHash(
- static_cast<uint32_t>(reinterpret_cast<uintptr_t>(key)));
- }
-
- void EnumerateNodes();
- int GetNodeId(HeapEntry* entry);
- int GetStringId(const char* s);
- void SerializeEdge(HeapGraphEdge* edge);
- void SerializeImpl();
- void SerializeNode(HeapEntry* entry);
- void SerializeNodes();
- void SerializeSnapshot();
- void SerializeString(const unsigned char* s);
- void SerializeStrings();
- void SortHashMap(HashMap* map, List<HashMap::Entry*>* sorted_entries);
-
- HeapSnapshot* snapshot_;
- HashMap nodes_;
- HashMap strings_;
- int next_node_id_;
- int next_string_id_;
- OutputStreamWriter* writer_;
-
- friend class HeapSnapshotJSONSerializerEnumerator;
- friend class HeapSnapshotJSONSerializerIterator;
-
- DISALLOW_COPY_AND_ASSIGN(HeapSnapshotJSONSerializer);
-};
-
-
-String* GetConstructorNameForHeapProfile(JSObject* object);
-
-} } // namespace v8::internal
-
-#endif // ENABLE_LOGGING_AND_PROFILING
-
-#endif // V8_PROFILE_GENERATOR_H_
diff --git a/src/3rdparty/v8/src/property.cc b/src/3rdparty/v8/src/property.cc
deleted file mode 100644
index c35fb83..0000000
--- a/src/3rdparty/v8/src/property.cc
+++ /dev/null
@@ -1,102 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-namespace v8 {
-namespace internal {
-
-
-#ifdef OBJECT_PRINT
-void LookupResult::Print(FILE* out) {
- if (!IsFound()) {
- PrintF(out, "Not Found\n");
- return;
- }
-
- PrintF(out, "LookupResult:\n");
- PrintF(out, " -cacheable = %s\n", IsCacheable() ? "true" : "false");
- PrintF(out, " -attributes = %x\n", GetAttributes());
- switch (type()) {
- case NORMAL:
- PrintF(out, " -type = normal\n");
- PrintF(out, " -entry = %d", GetDictionaryEntry());
- break;
- case MAP_TRANSITION:
- PrintF(out, " -type = map transition\n");
- PrintF(out, " -map:\n");
- GetTransitionMap()->Print(out);
- PrintF(out, "\n");
- break;
- case EXTERNAL_ARRAY_TRANSITION:
- PrintF(out, " -type = external array transition\n");
- PrintF(out, " -map:\n");
- GetTransitionMap()->Print(out);
- PrintF(out, "\n");
- break;
- case CONSTANT_FUNCTION:
- PrintF(out, " -type = constant function\n");
- PrintF(out, " -function:\n");
- GetConstantFunction()->Print(out);
- PrintF(out, "\n");
- break;
- case FIELD:
- PrintF(out, " -type = field\n");
- PrintF(out, " -index = %d", GetFieldIndex());
- PrintF(out, "\n");
- break;
- case CALLBACKS:
- PrintF(out, " -type = call backs\n");
- PrintF(out, " -callback object:\n");
- GetCallbackObject()->Print(out);
- break;
- case INTERCEPTOR:
- PrintF(out, " -type = lookup interceptor\n");
- break;
- case CONSTANT_TRANSITION:
- PrintF(out, " -type = constant property transition\n");
- break;
- case NULL_DESCRIPTOR:
- PrintF(out, " =type = null descriptor\n");
- break;
- }
-}
-
-
-void Descriptor::Print(FILE* out) {
- PrintF(out, "Descriptor ");
- GetKey()->ShortPrint(out);
- PrintF(out, " @ ");
- GetValue()->ShortPrint(out);
- PrintF(out, " %d\n", GetDetails().index());
-}
-
-
-#endif
-
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/property.h b/src/3rdparty/v8/src/property.h
deleted file mode 100644
index fa3916e..0000000
--- a/src/3rdparty/v8/src/property.h
+++ /dev/null
@@ -1,348 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_PROPERTY_H_
-#define V8_PROPERTY_H_
-
-namespace v8 {
-namespace internal {
-
-
-// Abstraction for elements in instance-descriptor arrays.
-//
-// Each descriptor has a key, property attributes, property type,
-// property index (in the actual instance-descriptor array) and
-// optionally a piece of data.
-//
-
-class Descriptor BASE_EMBEDDED {
- public:
- static int IndexFromValue(Object* value) {
- return Smi::cast(value)->value();
- }
-
- MUST_USE_RESULT MaybeObject* KeyToSymbol() {
- if (!StringShape(key_).IsSymbol()) {
- Object* result;
- { MaybeObject* maybe_result = HEAP->LookupSymbol(key_);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- key_ = String::cast(result);
- }
- return key_;
- }
-
- String* GetKey() { return key_; }
- Object* GetValue() { return value_; }
- PropertyDetails GetDetails() { return details_; }
-
-#ifdef OBJECT_PRINT
- void Print(FILE* out);
-#endif
-
- void SetEnumerationIndex(int index) {
- ASSERT(PropertyDetails::IsValidIndex(index));
- details_ = PropertyDetails(details_.attributes(), details_.type(), index);
- }
-
- private:
- String* key_;
- Object* value_;
- PropertyDetails details_;
-
- protected:
- Descriptor() : details_(Smi::FromInt(0)) {}
-
- void Init(String* key, Object* value, PropertyDetails details) {
- key_ = key;
- value_ = value;
- details_ = details;
- }
-
- Descriptor(String* key, Object* value, PropertyDetails details)
- : key_(key),
- value_(value),
- details_(details) { }
-
- Descriptor(String* key,
- Object* value,
- PropertyAttributes attributes,
- PropertyType type,
- int index = 0)
- : key_(key),
- value_(value),
- details_(attributes, type, index) { }
-
- friend class DescriptorArray;
-};
-
-// A pointer from a map to the new map that is created by adding
-// a named property. These are key to the speed and functioning of V8.
-// The two maps should always have the same prototype, since
-// MapSpace::CreateBackPointers depends on this.
-class MapTransitionDescriptor: public Descriptor {
- public:
- MapTransitionDescriptor(String* key, Map* map, PropertyAttributes attributes)
- : Descriptor(key, map, attributes, MAP_TRANSITION) { }
-};
-
-class ExternalArrayTransitionDescriptor: public Descriptor {
- public:
- ExternalArrayTransitionDescriptor(String* key,
- Map* map,
- ExternalArrayType array_type)
- : Descriptor(key, map, PropertyDetails(NONE,
- EXTERNAL_ARRAY_TRANSITION,
- array_type)) { }
-};
-
-// Marks a field name in a map so that adding the field is guaranteed
-// to create a FIELD descriptor in the new map. Used after adding
-// a constant function the first time, creating a CONSTANT_FUNCTION
-// descriptor in the new map. This avoids creating multiple maps with
-// the same CONSTANT_FUNCTION field.
-class ConstTransitionDescriptor: public Descriptor {
- public:
- explicit ConstTransitionDescriptor(String* key, Map* map)
- : Descriptor(key, map, NONE, CONSTANT_TRANSITION) { }
-};
-
-
-class FieldDescriptor: public Descriptor {
- public:
- FieldDescriptor(String* key,
- int field_index,
- PropertyAttributes attributes,
- int index = 0)
- : Descriptor(key, Smi::FromInt(field_index), attributes, FIELD, index) {}
-};
-
-
-class ConstantFunctionDescriptor: public Descriptor {
- public:
- ConstantFunctionDescriptor(String* key,
- JSFunction* function,
- PropertyAttributes attributes,
- int index = 0)
- : Descriptor(key, function, attributes, CONSTANT_FUNCTION, index) {}
-};
-
-
-class CallbacksDescriptor: public Descriptor {
- public:
- CallbacksDescriptor(String* key,
- Object* proxy,
- PropertyAttributes attributes,
- int index = 0)
- : Descriptor(key, proxy, attributes, CALLBACKS, index) {}
-};
-
-
-class LookupResult BASE_EMBEDDED {
- public:
- // Where did we find the result;
- enum {
- NOT_FOUND,
- DESCRIPTOR_TYPE,
- DICTIONARY_TYPE,
- INTERCEPTOR_TYPE,
- CONSTANT_TYPE
- } lookup_type_;
-
- LookupResult()
- : lookup_type_(NOT_FOUND),
- cacheable_(true),
- details_(NONE, NORMAL) {}
-
- void DescriptorResult(JSObject* holder, PropertyDetails details, int number) {
- lookup_type_ = DESCRIPTOR_TYPE;
- holder_ = holder;
- details_ = details;
- number_ = number;
- }
-
- void ConstantResult(JSObject* holder) {
- lookup_type_ = CONSTANT_TYPE;
- holder_ = holder;
- details_ =
- PropertyDetails(static_cast<PropertyAttributes>(DONT_ENUM |
- DONT_DELETE),
- CALLBACKS);
- number_ = -1;
- }
-
- void DictionaryResult(JSObject* holder, int entry) {
- lookup_type_ = DICTIONARY_TYPE;
- holder_ = holder;
- details_ = holder->property_dictionary()->DetailsAt(entry);
- number_ = entry;
- }
-
- void InterceptorResult(JSObject* holder) {
- lookup_type_ = INTERCEPTOR_TYPE;
- holder_ = holder;
- details_ = PropertyDetails(NONE, INTERCEPTOR);
- }
-
- void NotFound() {
- lookup_type_ = NOT_FOUND;
- }
-
- JSObject* holder() {
- ASSERT(IsFound());
- return holder_;
- }
-
- PropertyType type() {
- ASSERT(IsFound());
- return details_.type();
- }
-
- PropertyAttributes GetAttributes() {
- ASSERT(IsFound());
- return details_.attributes();
- }
-
- PropertyDetails GetPropertyDetails() {
- return details_;
- }
-
- bool IsReadOnly() { return details_.IsReadOnly(); }
- bool IsDontDelete() { return details_.IsDontDelete(); }
- bool IsDontEnum() { return details_.IsDontEnum(); }
- bool IsDeleted() { return details_.IsDeleted(); }
- bool IsFound() { return lookup_type_ != NOT_FOUND; }
-
- // Is the result is a property excluding transitions and the null
- // descriptor?
- bool IsProperty() {
- return IsFound() && (type() < FIRST_PHANTOM_PROPERTY_TYPE);
- }
-
- // Is the result a property or a transition?
- bool IsPropertyOrTransition() {
- return IsFound() && (type() != NULL_DESCRIPTOR);
- }
-
- bool IsCacheable() { return cacheable_; }
- void DisallowCaching() { cacheable_ = false; }
-
- Object* GetLazyValue() {
- switch (type()) {
- case FIELD:
- return holder()->FastPropertyAt(GetFieldIndex());
- case NORMAL: {
- Object* value;
- value = holder()->property_dictionary()->ValueAt(GetDictionaryEntry());
- if (holder()->IsGlobalObject()) {
- value = JSGlobalPropertyCell::cast(value)->value();
- }
- return value;
- }
- case CONSTANT_FUNCTION:
- return GetConstantFunction();
- default:
- return Smi::FromInt(0);
- }
- }
-
- Map* GetTransitionMap() {
- ASSERT(lookup_type_ == DESCRIPTOR_TYPE);
- ASSERT(type() == MAP_TRANSITION || type() == CONSTANT_TRANSITION ||
- type() == EXTERNAL_ARRAY_TRANSITION);
- return Map::cast(GetValue());
- }
-
- Map* GetTransitionMapFromMap(Map* map) {
- ASSERT(lookup_type_ == DESCRIPTOR_TYPE);
- ASSERT(type() == MAP_TRANSITION);
- return Map::cast(map->instance_descriptors()->GetValue(number_));
- }
-
- int GetFieldIndex() {
- ASSERT(lookup_type_ == DESCRIPTOR_TYPE);
- ASSERT(type() == FIELD);
- return Descriptor::IndexFromValue(GetValue());
- }
-
- int GetLocalFieldIndexFromMap(Map* map) {
- ASSERT(lookup_type_ == DESCRIPTOR_TYPE);
- ASSERT(type() == FIELD);
- return Descriptor::IndexFromValue(
- map->instance_descriptors()->GetValue(number_)) -
- map->inobject_properties();
- }
-
- int GetDictionaryEntry() {
- ASSERT(lookup_type_ == DICTIONARY_TYPE);
- return number_;
- }
-
- JSFunction* GetConstantFunction() {
- ASSERT(type() == CONSTANT_FUNCTION);
- return JSFunction::cast(GetValue());
- }
-
- JSFunction* GetConstantFunctionFromMap(Map* map) {
- ASSERT(lookup_type_ == DESCRIPTOR_TYPE);
- ASSERT(type() == CONSTANT_FUNCTION);
- return JSFunction::cast(map->instance_descriptors()->GetValue(number_));
- }
-
- Object* GetCallbackObject() {
- if (lookup_type_ == CONSTANT_TYPE) {
- // For now we only have the __proto__ as constant type.
- return HEAP->prototype_accessors();
- }
- return GetValue();
- }
-
-#ifdef OBJECT_PRINT
- void Print(FILE* out);
-#endif
-
- Object* GetValue() {
- if (lookup_type_ == DESCRIPTOR_TYPE) {
- DescriptorArray* descriptors = holder()->map()->instance_descriptors();
- return descriptors->GetValue(number_);
- }
- // In the dictionary case, the data is held in the value field.
- ASSERT(lookup_type_ == DICTIONARY_TYPE);
- return holder()->GetNormalizedProperty(this);
- }
-
- private:
- JSObject* holder_;
- int number_;
- bool cacheable_;
- PropertyDetails details_;
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_PROPERTY_H_
diff --git a/src/3rdparty/v8/src/regexp-macro-assembler-irregexp-inl.h b/src/3rdparty/v8/src/regexp-macro-assembler-irregexp-inl.h
deleted file mode 100644
index f2a4e85..0000000
--- a/src/3rdparty/v8/src/regexp-macro-assembler-irregexp-inl.h
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright 2008-2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// A light-weight assembler for the Irregexp byte code.
-
-
-#include "v8.h"
-#include "ast.h"
-#include "bytecodes-irregexp.h"
-
-#ifndef V8_REGEXP_MACRO_ASSEMBLER_IRREGEXP_INL_H_
-#define V8_REGEXP_MACRO_ASSEMBLER_IRREGEXP_INL_H_
-
-namespace v8 {
-namespace internal {
-
-#ifdef V8_INTERPRETED_REGEXP
-
-void RegExpMacroAssemblerIrregexp::Emit(uint32_t byte,
- uint32_t twenty_four_bits) {
- uint32_t word = ((twenty_four_bits << BYTECODE_SHIFT) | byte);
- ASSERT(pc_ <= buffer_.length());
- if (pc_ + 3 >= buffer_.length()) {
- Expand();
- }
- *reinterpret_cast<uint32_t*>(buffer_.start() + pc_) = word;
- pc_ += 4;
-}
-
-
-void RegExpMacroAssemblerIrregexp::Emit16(uint32_t word) {
- ASSERT(pc_ <= buffer_.length());
- if (pc_ + 1 >= buffer_.length()) {
- Expand();
- }
- *reinterpret_cast<uint16_t*>(buffer_.start() + pc_) = word;
- pc_ += 2;
-}
-
-
-void RegExpMacroAssemblerIrregexp::Emit32(uint32_t word) {
- ASSERT(pc_ <= buffer_.length());
- if (pc_ + 3 >= buffer_.length()) {
- Expand();
- }
- *reinterpret_cast<uint32_t*>(buffer_.start() + pc_) = word;
- pc_ += 4;
-}
-
-#endif // V8_INTERPRETED_REGEXP
-
-} } // namespace v8::internal
-
-#endif // V8_REGEXP_MACRO_ASSEMBLER_IRREGEXP_INL_H_
diff --git a/src/3rdparty/v8/src/regexp-macro-assembler-irregexp.cc b/src/3rdparty/v8/src/regexp-macro-assembler-irregexp.cc
deleted file mode 100644
index d41a97c..0000000
--- a/src/3rdparty/v8/src/regexp-macro-assembler-irregexp.cc
+++ /dev/null
@@ -1,470 +0,0 @@
-// Copyright 2008-2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-#include "ast.h"
-#include "bytecodes-irregexp.h"
-#include "regexp-macro-assembler.h"
-#include "regexp-macro-assembler-irregexp.h"
-#include "regexp-macro-assembler-irregexp-inl.h"
-
-
-namespace v8 {
-namespace internal {
-
-#ifdef V8_INTERPRETED_REGEXP
-
-RegExpMacroAssemblerIrregexp::RegExpMacroAssemblerIrregexp(Vector<byte> buffer)
- : buffer_(buffer),
- pc_(0),
- own_buffer_(false),
- advance_current_end_(kInvalidPC) {
-}
-
-
-RegExpMacroAssemblerIrregexp::~RegExpMacroAssemblerIrregexp() {
- if (backtrack_.is_linked()) backtrack_.Unuse();
- if (own_buffer_) buffer_.Dispose();
-}
-
-
-RegExpMacroAssemblerIrregexp::IrregexpImplementation
-RegExpMacroAssemblerIrregexp::Implementation() {
- return kBytecodeImplementation;
-}
-
-
-void RegExpMacroAssemblerIrregexp::Bind(Label* l) {
- advance_current_end_ = kInvalidPC;
- ASSERT(!l->is_bound());
- if (l->is_linked()) {
- int pos = l->pos();
- while (pos != 0) {
- int fixup = pos;
- pos = *reinterpret_cast<int32_t*>(buffer_.start() + fixup);
- *reinterpret_cast<uint32_t*>(buffer_.start() + fixup) = pc_;
- }
- }
- l->bind_to(pc_);
-}
-
-
-void RegExpMacroAssemblerIrregexp::EmitOrLink(Label* l) {
- if (l == NULL) l = &backtrack_;
- if (l->is_bound()) {
- Emit32(l->pos());
- } else {
- int pos = 0;
- if (l->is_linked()) {
- pos = l->pos();
- }
- l->link_to(pc_);
- Emit32(pos);
- }
-}
-
-
-void RegExpMacroAssemblerIrregexp::PopRegister(int register_index) {
- ASSERT(register_index >= 0);
- ASSERT(register_index <= kMaxRegister);
- Emit(BC_POP_REGISTER, register_index);
-}
-
-
-void RegExpMacroAssemblerIrregexp::PushRegister(
- int register_index,
- StackCheckFlag check_stack_limit) {
- ASSERT(register_index >= 0);
- ASSERT(register_index <= kMaxRegister);
- Emit(BC_PUSH_REGISTER, register_index);
-}
-
-
-void RegExpMacroAssemblerIrregexp::WriteCurrentPositionToRegister(
- int register_index, int cp_offset) {
- ASSERT(register_index >= 0);
- ASSERT(register_index <= kMaxRegister);
- Emit(BC_SET_REGISTER_TO_CP, register_index);
- Emit32(cp_offset); // Current position offset.
-}
-
-
-void RegExpMacroAssemblerIrregexp::ClearRegisters(int reg_from, int reg_to) {
- ASSERT(reg_from <= reg_to);
- for (int reg = reg_from; reg <= reg_to; reg++) {
- SetRegister(reg, -1);
- }
-}
-
-
-void RegExpMacroAssemblerIrregexp::ReadCurrentPositionFromRegister(
- int register_index) {
- ASSERT(register_index >= 0);
- ASSERT(register_index <= kMaxRegister);
- Emit(BC_SET_CP_TO_REGISTER, register_index);
-}
-
-
-void RegExpMacroAssemblerIrregexp::WriteStackPointerToRegister(
- int register_index) {
- ASSERT(register_index >= 0);
- ASSERT(register_index <= kMaxRegister);
- Emit(BC_SET_REGISTER_TO_SP, register_index);
-}
-
-
-void RegExpMacroAssemblerIrregexp::ReadStackPointerFromRegister(
- int register_index) {
- ASSERT(register_index >= 0);
- ASSERT(register_index <= kMaxRegister);
- Emit(BC_SET_SP_TO_REGISTER, register_index);
-}
-
-
-void RegExpMacroAssemblerIrregexp::SetCurrentPositionFromEnd(int by) {
- ASSERT(is_uint24(by));
- Emit(BC_SET_CURRENT_POSITION_FROM_END, by);
-}
-
-
-void RegExpMacroAssemblerIrregexp::SetRegister(int register_index, int to) {
- ASSERT(register_index >= 0);
- ASSERT(register_index <= kMaxRegister);
- Emit(BC_SET_REGISTER, register_index);
- Emit32(to);
-}
-
-
-void RegExpMacroAssemblerIrregexp::AdvanceRegister(int register_index, int by) {
- ASSERT(register_index >= 0);
- ASSERT(register_index <= kMaxRegister);
- Emit(BC_ADVANCE_REGISTER, register_index);
- Emit32(by);
-}
-
-
-void RegExpMacroAssemblerIrregexp::PopCurrentPosition() {
- Emit(BC_POP_CP, 0);
-}
-
-
-void RegExpMacroAssemblerIrregexp::PushCurrentPosition() {
- Emit(BC_PUSH_CP, 0);
-}
-
-
-void RegExpMacroAssemblerIrregexp::Backtrack() {
- Emit(BC_POP_BT, 0);
-}
-
-
-void RegExpMacroAssemblerIrregexp::GoTo(Label* l) {
- if (advance_current_end_ == pc_) {
- // Combine advance current and goto.
- pc_ = advance_current_start_;
- Emit(BC_ADVANCE_CP_AND_GOTO, advance_current_offset_);
- EmitOrLink(l);
- advance_current_end_ = kInvalidPC;
- } else {
- // Regular goto.
- Emit(BC_GOTO, 0);
- EmitOrLink(l);
- }
-}
-
-
-void RegExpMacroAssemblerIrregexp::PushBacktrack(Label* l) {
- Emit(BC_PUSH_BT, 0);
- EmitOrLink(l);
-}
-
-
-void RegExpMacroAssemblerIrregexp::Succeed() {
- Emit(BC_SUCCEED, 0);
-}
-
-
-void RegExpMacroAssemblerIrregexp::Fail() {
- Emit(BC_FAIL, 0);
-}
-
-
-void RegExpMacroAssemblerIrregexp::AdvanceCurrentPosition(int by) {
- ASSERT(by >= kMinCPOffset);
- ASSERT(by <= kMaxCPOffset);
- advance_current_start_ = pc_;
- advance_current_offset_ = by;
- Emit(BC_ADVANCE_CP, by);
- advance_current_end_ = pc_;
-}
-
-
-void RegExpMacroAssemblerIrregexp::CheckGreedyLoop(
- Label* on_tos_equals_current_position) {
- Emit(BC_CHECK_GREEDY, 0);
- EmitOrLink(on_tos_equals_current_position);
-}
-
-
-void RegExpMacroAssemblerIrregexp::LoadCurrentCharacter(int cp_offset,
- Label* on_failure,
- bool check_bounds,
- int characters) {
- ASSERT(cp_offset >= kMinCPOffset);
- ASSERT(cp_offset <= kMaxCPOffset);
- int bytecode;
- if (check_bounds) {
- if (characters == 4) {
- bytecode = BC_LOAD_4_CURRENT_CHARS;
- } else if (characters == 2) {
- bytecode = BC_LOAD_2_CURRENT_CHARS;
- } else {
- ASSERT(characters == 1);
- bytecode = BC_LOAD_CURRENT_CHAR;
- }
- } else {
- if (characters == 4) {
- bytecode = BC_LOAD_4_CURRENT_CHARS_UNCHECKED;
- } else if (characters == 2) {
- bytecode = BC_LOAD_2_CURRENT_CHARS_UNCHECKED;
- } else {
- ASSERT(characters == 1);
- bytecode = BC_LOAD_CURRENT_CHAR_UNCHECKED;
- }
- }
- Emit(bytecode, cp_offset);
- if (check_bounds) EmitOrLink(on_failure);
-}
-
-
-void RegExpMacroAssemblerIrregexp::CheckCharacterLT(uc16 limit,
- Label* on_less) {
- Emit(BC_CHECK_LT, limit);
- EmitOrLink(on_less);
-}
-
-
-void RegExpMacroAssemblerIrregexp::CheckCharacterGT(uc16 limit,
- Label* on_greater) {
- Emit(BC_CHECK_GT, limit);
- EmitOrLink(on_greater);
-}
-
-
-void RegExpMacroAssemblerIrregexp::CheckCharacter(uint32_t c, Label* on_equal) {
- if (c > MAX_FIRST_ARG) {
- Emit(BC_CHECK_4_CHARS, 0);
- Emit32(c);
- } else {
- Emit(BC_CHECK_CHAR, c);
- }
- EmitOrLink(on_equal);
-}
-
-
-void RegExpMacroAssemblerIrregexp::CheckAtStart(Label* on_at_start) {
- Emit(BC_CHECK_AT_START, 0);
- EmitOrLink(on_at_start);
-}
-
-
-void RegExpMacroAssemblerIrregexp::CheckNotAtStart(Label* on_not_at_start) {
- Emit(BC_CHECK_NOT_AT_START, 0);
- EmitOrLink(on_not_at_start);
-}
-
-
-void RegExpMacroAssemblerIrregexp::CheckNotCharacter(uint32_t c,
- Label* on_not_equal) {
- if (c > MAX_FIRST_ARG) {
- Emit(BC_CHECK_NOT_4_CHARS, 0);
- Emit32(c);
- } else {
- Emit(BC_CHECK_NOT_CHAR, c);
- }
- EmitOrLink(on_not_equal);
-}
-
-
-void RegExpMacroAssemblerIrregexp::CheckCharacterAfterAnd(
- uint32_t c,
- uint32_t mask,
- Label* on_equal) {
- if (c > MAX_FIRST_ARG) {
- Emit(BC_AND_CHECK_4_CHARS, 0);
- Emit32(c);
- } else {
- Emit(BC_AND_CHECK_CHAR, c);
- }
- Emit32(mask);
- EmitOrLink(on_equal);
-}
-
-
-void RegExpMacroAssemblerIrregexp::CheckNotCharacterAfterAnd(
- uint32_t c,
- uint32_t mask,
- Label* on_not_equal) {
- if (c > MAX_FIRST_ARG) {
- Emit(BC_AND_CHECK_NOT_4_CHARS, 0);
- Emit32(c);
- } else {
- Emit(BC_AND_CHECK_NOT_CHAR, c);
- }
- Emit32(mask);
- EmitOrLink(on_not_equal);
-}
-
-
-void RegExpMacroAssemblerIrregexp::CheckNotCharacterAfterMinusAnd(
- uc16 c,
- uc16 minus,
- uc16 mask,
- Label* on_not_equal) {
- Emit(BC_MINUS_AND_CHECK_NOT_CHAR, c);
- Emit16(minus);
- Emit16(mask);
- EmitOrLink(on_not_equal);
-}
-
-
-void RegExpMacroAssemblerIrregexp::CheckNotBackReference(int start_reg,
- Label* on_not_equal) {
- ASSERT(start_reg >= 0);
- ASSERT(start_reg <= kMaxRegister);
- Emit(BC_CHECK_NOT_BACK_REF, start_reg);
- EmitOrLink(on_not_equal);
-}
-
-
-void RegExpMacroAssemblerIrregexp::CheckNotBackReferenceIgnoreCase(
- int start_reg,
- Label* on_not_equal) {
- ASSERT(start_reg >= 0);
- ASSERT(start_reg <= kMaxRegister);
- Emit(BC_CHECK_NOT_BACK_REF_NO_CASE, start_reg);
- EmitOrLink(on_not_equal);
-}
-
-
-void RegExpMacroAssemblerIrregexp::CheckNotRegistersEqual(int reg1,
- int reg2,
- Label* on_not_equal) {
- ASSERT(reg1 >= 0);
- ASSERT(reg1 <= kMaxRegister);
- Emit(BC_CHECK_NOT_REGS_EQUAL, reg1);
- Emit32(reg2);
- EmitOrLink(on_not_equal);
-}
-
-
-void RegExpMacroAssemblerIrregexp::CheckCharacters(
- Vector<const uc16> str,
- int cp_offset,
- Label* on_failure,
- bool check_end_of_string) {
- ASSERT(cp_offset >= kMinCPOffset);
- ASSERT(cp_offset + str.length() - 1 <= kMaxCPOffset);
- // It is vital that this loop is backwards due to the unchecked character
- // load below.
- for (int i = str.length() - 1; i >= 0; i--) {
- if (check_end_of_string && i == str.length() - 1) {
- Emit(BC_LOAD_CURRENT_CHAR, cp_offset + i);
- EmitOrLink(on_failure);
- } else {
- Emit(BC_LOAD_CURRENT_CHAR_UNCHECKED, cp_offset + i);
- }
- Emit(BC_CHECK_NOT_CHAR, str[i]);
- EmitOrLink(on_failure);
- }
-}
-
-
-void RegExpMacroAssemblerIrregexp::IfRegisterLT(int register_index,
- int comparand,
- Label* on_less_than) {
- ASSERT(register_index >= 0);
- ASSERT(register_index <= kMaxRegister);
- Emit(BC_CHECK_REGISTER_LT, register_index);
- Emit32(comparand);
- EmitOrLink(on_less_than);
-}
-
-
-void RegExpMacroAssemblerIrregexp::IfRegisterGE(int register_index,
- int comparand,
- Label* on_greater_or_equal) {
- ASSERT(register_index >= 0);
- ASSERT(register_index <= kMaxRegister);
- Emit(BC_CHECK_REGISTER_GE, register_index);
- Emit32(comparand);
- EmitOrLink(on_greater_or_equal);
-}
-
-
-void RegExpMacroAssemblerIrregexp::IfRegisterEqPos(int register_index,
- Label* on_eq) {
- ASSERT(register_index >= 0);
- ASSERT(register_index <= kMaxRegister);
- Emit(BC_CHECK_REGISTER_EQ_POS, register_index);
- EmitOrLink(on_eq);
-}
-
-
-Handle<Object> RegExpMacroAssemblerIrregexp::GetCode(Handle<String> source) {
- Bind(&backtrack_);
- Emit(BC_POP_BT, 0);
- Handle<ByteArray> array = FACTORY->NewByteArray(length());
- Copy(array->GetDataStartAddress());
- return array;
-}
-
-
-int RegExpMacroAssemblerIrregexp::length() {
- return pc_;
-}
-
-
-void RegExpMacroAssemblerIrregexp::Copy(Address a) {
- memcpy(a, buffer_.start(), length());
-}
-
-
-void RegExpMacroAssemblerIrregexp::Expand() {
- bool old_buffer_was_our_own = own_buffer_;
- Vector<byte> old_buffer = buffer_;
- buffer_ = Vector<byte>::New(old_buffer.length() * 2);
- own_buffer_ = true;
- memcpy(buffer_.start(), old_buffer.start(), old_buffer.length());
- if (old_buffer_was_our_own) {
- old_buffer.Dispose();
- }
-}
-
-#endif // V8_INTERPRETED_REGEXP
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/regexp-macro-assembler-irregexp.h b/src/3rdparty/v8/src/regexp-macro-assembler-irregexp.h
deleted file mode 100644
index 9deea86..0000000
--- a/src/3rdparty/v8/src/regexp-macro-assembler-irregexp.h
+++ /dev/null
@@ -1,142 +0,0 @@
-// Copyright 2008-2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_REGEXP_MACRO_ASSEMBLER_IRREGEXP_H_
-#define V8_REGEXP_MACRO_ASSEMBLER_IRREGEXP_H_
-
-namespace v8 {
-namespace internal {
-
-#ifdef V8_INTERPRETED_REGEXP
-
-class RegExpMacroAssemblerIrregexp: public RegExpMacroAssembler {
- public:
- // Create an assembler. Instructions and relocation information are emitted
- // into a buffer, with the instructions starting from the beginning and the
- // relocation information starting from the end of the buffer. See CodeDesc
- // for a detailed comment on the layout (globals.h).
- //
- // If the provided buffer is NULL, the assembler allocates and grows its own
- // buffer, and buffer_size determines the initial buffer size. The buffer is
- // owned by the assembler and deallocated upon destruction of the assembler.
- //
- // If the provided buffer is not NULL, the assembler uses the provided buffer
- // for code generation and assumes its size to be buffer_size. If the buffer
- // is too small, a fatal error occurs. No deallocation of the buffer is done
- // upon destruction of the assembler.
- explicit RegExpMacroAssemblerIrregexp(Vector<byte>);
- virtual ~RegExpMacroAssemblerIrregexp();
- // The byte-code interpreter checks on each push anyway.
- virtual int stack_limit_slack() { return 1; }
- virtual void Bind(Label* label);
- virtual void AdvanceCurrentPosition(int by); // Signed cp change.
- virtual void PopCurrentPosition();
- virtual void PushCurrentPosition();
- virtual void Backtrack();
- virtual void GoTo(Label* label);
- virtual void PushBacktrack(Label* label);
- virtual void Succeed();
- virtual void Fail();
- virtual void PopRegister(int register_index);
- virtual void PushRegister(int register_index,
- StackCheckFlag check_stack_limit);
- virtual void AdvanceRegister(int reg, int by); // r[reg] += by.
- virtual void SetCurrentPositionFromEnd(int by);
- virtual void SetRegister(int register_index, int to);
- virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
- virtual void ClearRegisters(int reg_from, int reg_to);
- virtual void ReadCurrentPositionFromRegister(int reg);
- virtual void WriteStackPointerToRegister(int reg);
- virtual void ReadStackPointerFromRegister(int reg);
- virtual void LoadCurrentCharacter(int cp_offset,
- Label* on_end_of_input,
- bool check_bounds = true,
- int characters = 1);
- virtual void CheckCharacter(unsigned c, Label* on_equal);
- virtual void CheckCharacterAfterAnd(unsigned c,
- unsigned mask,
- Label* on_equal);
- virtual void CheckCharacterGT(uc16 limit, Label* on_greater);
- virtual void CheckCharacterLT(uc16 limit, Label* on_less);
- virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
- virtual void CheckAtStart(Label* on_at_start);
- virtual void CheckNotAtStart(Label* on_not_at_start);
- virtual void CheckNotCharacter(unsigned c, Label* on_not_equal);
- virtual void CheckNotCharacterAfterAnd(unsigned c,
- unsigned mask,
- Label* on_not_equal);
- virtual void CheckNotCharacterAfterMinusAnd(uc16 c,
- uc16 minus,
- uc16 mask,
- Label* on_not_equal);
- virtual void CheckNotBackReference(int start_reg, Label* on_no_match);
- virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
- Label* on_no_match);
- virtual void CheckNotRegistersEqual(int reg1, int reg2, Label* on_not_equal);
- virtual void CheckCharacters(Vector<const uc16> str,
- int cp_offset,
- Label* on_failure,
- bool check_end_of_string);
- virtual void IfRegisterLT(int register_index, int comparand, Label* if_lt);
- virtual void IfRegisterGE(int register_index, int comparand, Label* if_ge);
- virtual void IfRegisterEqPos(int register_index, Label* if_eq);
-
- virtual IrregexpImplementation Implementation();
- virtual Handle<Object> GetCode(Handle<String> source);
- private:
- void Expand();
- // Code and bitmap emission.
- inline void EmitOrLink(Label* label);
- inline void Emit32(uint32_t x);
- inline void Emit16(uint32_t x);
- inline void Emit(uint32_t bc, uint32_t arg);
- // Bytecode buffer.
- int length();
- void Copy(Address a);
-
- // The buffer into which code and relocation info are generated.
- Vector<byte> buffer_;
- // The program counter.
- int pc_;
- // True if the assembler owns the buffer, false if buffer is external.
- bool own_buffer_;
- Label backtrack_;
-
- int advance_current_start_;
- int advance_current_offset_;
- int advance_current_end_;
-
- static const int kInvalidPC = -1;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(RegExpMacroAssemblerIrregexp);
-};
-
-#endif // V8_INTERPRETED_REGEXP
-
-} } // namespace v8::internal
-
-#endif // V8_REGEXP_MACRO_ASSEMBLER_IRREGEXP_H_
diff --git a/src/3rdparty/v8/src/regexp-macro-assembler-tracer.cc b/src/3rdparty/v8/src/regexp-macro-assembler-tracer.cc
deleted file mode 100644
index fa2c657..0000000
--- a/src/3rdparty/v8/src/regexp-macro-assembler-tracer.cc
+++ /dev/null
@@ -1,373 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-#include "ast.h"
-#include "regexp-macro-assembler.h"
-#include "regexp-macro-assembler-tracer.h"
-
-namespace v8 {
-namespace internal {
-
-RegExpMacroAssemblerTracer::RegExpMacroAssemblerTracer(
- RegExpMacroAssembler* assembler) :
- assembler_(assembler) {
- unsigned int type = assembler->Implementation();
- ASSERT(type < 4);
- const char* impl_names[4] = {"IA32", "ARM", "X64", "Bytecode"};
- PrintF("RegExpMacroAssembler%s();\n", impl_names[type]);
-}
-
-
-RegExpMacroAssemblerTracer::~RegExpMacroAssemblerTracer() {
-}
-
-
-// This is used for printing out debugging information. It makes an integer
-// that is closely related to the address of an object.
-static int LabelToInt(Label* label) {
- return static_cast<int>(reinterpret_cast<intptr_t>(label));
-}
-
-
-void RegExpMacroAssemblerTracer::Bind(Label* label) {
- PrintF("label[%08x]: (Bind)\n", LabelToInt(label));
- assembler_->Bind(label);
-}
-
-
-void RegExpMacroAssemblerTracer::AdvanceCurrentPosition(int by) {
- PrintF(" AdvanceCurrentPosition(by=%d);\n", by);
- assembler_->AdvanceCurrentPosition(by);
-}
-
-
-void RegExpMacroAssemblerTracer::CheckGreedyLoop(Label* label) {
- PrintF(" CheckGreedyLoop(label[%08x]);\n\n", LabelToInt(label));
- assembler_->CheckGreedyLoop(label);
-}
-
-
-void RegExpMacroAssemblerTracer::PopCurrentPosition() {
- PrintF(" PopCurrentPosition();\n");
- assembler_->PopCurrentPosition();
-}
-
-
-void RegExpMacroAssemblerTracer::PushCurrentPosition() {
- PrintF(" PushCurrentPosition();\n");
- assembler_->PushCurrentPosition();
-}
-
-
-void RegExpMacroAssemblerTracer::Backtrack() {
- PrintF(" Backtrack();\n");
- assembler_->Backtrack();
-}
-
-
-void RegExpMacroAssemblerTracer::GoTo(Label* label) {
- PrintF(" GoTo(label[%08x]);\n\n", LabelToInt(label));
- assembler_->GoTo(label);
-}
-
-
-void RegExpMacroAssemblerTracer::PushBacktrack(Label* label) {
- PrintF(" PushBacktrack(label[%08x]);\n", LabelToInt(label));
- assembler_->PushBacktrack(label);
-}
-
-
-void RegExpMacroAssemblerTracer::Succeed() {
- PrintF(" Succeed();\n");
- assembler_->Succeed();
-}
-
-
-void RegExpMacroAssemblerTracer::Fail() {
- PrintF(" Fail();\n");
- assembler_->Fail();
-}
-
-
-void RegExpMacroAssemblerTracer::PopRegister(int register_index) {
- PrintF(" PopRegister(register=%d);\n", register_index);
- assembler_->PopRegister(register_index);
-}
-
-
-void RegExpMacroAssemblerTracer::PushRegister(
- int register_index,
- StackCheckFlag check_stack_limit) {
- PrintF(" PushRegister(register=%d, %s);\n",
- register_index,
- check_stack_limit ? "check stack limit" : "");
- assembler_->PushRegister(register_index, check_stack_limit);
-}
-
-
-void RegExpMacroAssemblerTracer::AdvanceRegister(int reg, int by) {
- PrintF(" AdvanceRegister(register=%d, by=%d);\n", reg, by);
- assembler_->AdvanceRegister(reg, by);
-}
-
-
-void RegExpMacroAssemblerTracer::SetCurrentPositionFromEnd(int by) {
- PrintF(" SetCurrentPositionFromEnd(by=%d);\n", by);
- assembler_->SetCurrentPositionFromEnd(by);
-}
-
-
-void RegExpMacroAssemblerTracer::SetRegister(int register_index, int to) {
- PrintF(" SetRegister(register=%d, to=%d);\n", register_index, to);
- assembler_->SetRegister(register_index, to);
-}
-
-
-void RegExpMacroAssemblerTracer::WriteCurrentPositionToRegister(int reg,
- int cp_offset) {
- PrintF(" WriteCurrentPositionToRegister(register=%d,cp_offset=%d);\n",
- reg,
- cp_offset);
- assembler_->WriteCurrentPositionToRegister(reg, cp_offset);
-}
-
-
-void RegExpMacroAssemblerTracer::ClearRegisters(int reg_from, int reg_to) {
- PrintF(" ClearRegister(from=%d, to=%d);\n", reg_from, reg_to);
- assembler_->ClearRegisters(reg_from, reg_to);
-}
-
-
-void RegExpMacroAssemblerTracer::ReadCurrentPositionFromRegister(int reg) {
- PrintF(" ReadCurrentPositionFromRegister(register=%d);\n", reg);
- assembler_->ReadCurrentPositionFromRegister(reg);
-}
-
-
-void RegExpMacroAssemblerTracer::WriteStackPointerToRegister(int reg) {
- PrintF(" WriteStackPointerToRegister(register=%d);\n", reg);
- assembler_->WriteStackPointerToRegister(reg);
-}
-
-
-void RegExpMacroAssemblerTracer::ReadStackPointerFromRegister(int reg) {
- PrintF(" ReadStackPointerFromRegister(register=%d);\n", reg);
- assembler_->ReadStackPointerFromRegister(reg);
-}
-
-
-void RegExpMacroAssemblerTracer::LoadCurrentCharacter(int cp_offset,
- Label* on_end_of_input,
- bool check_bounds,
- int characters) {
- const char* check_msg = check_bounds ? "" : " (unchecked)";
- PrintF(" LoadCurrentCharacter(cp_offset=%d, label[%08x]%s (%d chars));\n",
- cp_offset,
- LabelToInt(on_end_of_input),
- check_msg,
- characters);
- assembler_->LoadCurrentCharacter(cp_offset,
- on_end_of_input,
- check_bounds,
- characters);
-}
-
-
-void RegExpMacroAssemblerTracer::CheckCharacterLT(uc16 limit, Label* on_less) {
- PrintF(" CheckCharacterLT(c='u%04x', label[%08x]);\n",
- limit, LabelToInt(on_less));
- assembler_->CheckCharacterLT(limit, on_less);
-}
-
-
-void RegExpMacroAssemblerTracer::CheckCharacterGT(uc16 limit,
- Label* on_greater) {
- PrintF(" CheckCharacterGT(c='u%04x', label[%08x]);\n",
- limit, LabelToInt(on_greater));
- assembler_->CheckCharacterGT(limit, on_greater);
-}
-
-
-void RegExpMacroAssemblerTracer::CheckCharacter(unsigned c, Label* on_equal) {
- PrintF(" CheckCharacter(c='u%04x', label[%08x]);\n",
- c, LabelToInt(on_equal));
- assembler_->CheckCharacter(c, on_equal);
-}
-
-
-void RegExpMacroAssemblerTracer::CheckAtStart(Label* on_at_start) {
- PrintF(" CheckAtStart(label[%08x]);\n", LabelToInt(on_at_start));
- assembler_->CheckAtStart(on_at_start);
-}
-
-
-void RegExpMacroAssemblerTracer::CheckNotAtStart(Label* on_not_at_start) {
- PrintF(" CheckNotAtStart(label[%08x]);\n", LabelToInt(on_not_at_start));
- assembler_->CheckNotAtStart(on_not_at_start);
-}
-
-
-void RegExpMacroAssemblerTracer::CheckNotCharacter(unsigned c,
- Label* on_not_equal) {
- PrintF(" CheckNotCharacter(c='u%04x', label[%08x]);\n",
- c, LabelToInt(on_not_equal));
- assembler_->CheckNotCharacter(c, on_not_equal);
-}
-
-
-void RegExpMacroAssemblerTracer::CheckCharacterAfterAnd(
- unsigned c,
- unsigned mask,
- Label* on_equal) {
- PrintF(" CheckCharacterAfterAnd(c='u%04x', mask=0x%04x, label[%08x]);\n",
- c,
- mask,
- LabelToInt(on_equal));
- assembler_->CheckCharacterAfterAnd(c, mask, on_equal);
-}
-
-
-void RegExpMacroAssemblerTracer::CheckNotCharacterAfterAnd(
- unsigned c,
- unsigned mask,
- Label* on_not_equal) {
- PrintF(" CheckNotCharacterAfterAnd(c='u%04x', mask=0x%04x, label[%08x]);\n",
- c,
- mask,
- LabelToInt(on_not_equal));
- assembler_->CheckNotCharacterAfterAnd(c, mask, on_not_equal);
-}
-
-
-void RegExpMacroAssemblerTracer::CheckNotCharacterAfterMinusAnd(
- uc16 c,
- uc16 minus,
- uc16 mask,
- Label* on_not_equal) {
- PrintF(" CheckNotCharacterAfterMinusAnd(c='u%04x', minus=%04x, mask=0x%04x, "
- "label[%08x]);\n",
- c,
- minus,
- mask,
- LabelToInt(on_not_equal));
- assembler_->CheckNotCharacterAfterMinusAnd(c, minus, mask, on_not_equal);
-}
-
-
-void RegExpMacroAssemblerTracer::CheckNotBackReference(int start_reg,
- Label* on_no_match) {
- PrintF(" CheckNotBackReference(register=%d, label[%08x]);\n", start_reg,
- LabelToInt(on_no_match));
- assembler_->CheckNotBackReference(start_reg, on_no_match);
-}
-
-
-void RegExpMacroAssemblerTracer::CheckNotBackReferenceIgnoreCase(
- int start_reg,
- Label* on_no_match) {
- PrintF(" CheckNotBackReferenceIgnoreCase(register=%d, label[%08x]);\n",
- start_reg, LabelToInt(on_no_match));
- assembler_->CheckNotBackReferenceIgnoreCase(start_reg, on_no_match);
-}
-
-
-void RegExpMacroAssemblerTracer::CheckNotRegistersEqual(int reg1,
- int reg2,
- Label* on_not_equal) {
- PrintF(" CheckNotRegistersEqual(reg1=%d, reg2=%d, label[%08x]);\n",
- reg1,
- reg2,
- LabelToInt(on_not_equal));
- assembler_->CheckNotRegistersEqual(reg1, reg2, on_not_equal);
-}
-
-
-void RegExpMacroAssemblerTracer::CheckCharacters(Vector<const uc16> str,
- int cp_offset,
- Label* on_failure,
- bool check_end_of_string) {
- PrintF(" %s(str=\"",
- check_end_of_string ? "CheckCharacters" : "CheckCharactersUnchecked");
- for (int i = 0; i < str.length(); i++) {
- PrintF("u%04x", str[i]);
- }
- PrintF("\", cp_offset=%d, label[%08x])\n",
- cp_offset, LabelToInt(on_failure));
- assembler_->CheckCharacters(str, cp_offset, on_failure, check_end_of_string);
-}
-
-
-bool RegExpMacroAssemblerTracer::CheckSpecialCharacterClass(
- uc16 type,
- Label* on_no_match) {
- bool supported = assembler_->CheckSpecialCharacterClass(type,
- on_no_match);
- PrintF(" CheckSpecialCharacterClass(type='%c', label[%08x]): %s;\n",
- type,
- LabelToInt(on_no_match),
- supported ? "true" : "false");
- return supported;
-}
-
-
-void RegExpMacroAssemblerTracer::IfRegisterLT(int register_index,
- int comparand, Label* if_lt) {
- PrintF(" IfRegisterLT(register=%d, number=%d, label[%08x]);\n",
- register_index, comparand, LabelToInt(if_lt));
- assembler_->IfRegisterLT(register_index, comparand, if_lt);
-}
-
-
-void RegExpMacroAssemblerTracer::IfRegisterEqPos(int register_index,
- Label* if_eq) {
- PrintF(" IfRegisterEqPos(register=%d, label[%08x]);\n",
- register_index, LabelToInt(if_eq));
- assembler_->IfRegisterEqPos(register_index, if_eq);
-}
-
-
-void RegExpMacroAssemblerTracer::IfRegisterGE(int register_index,
- int comparand, Label* if_ge) {
- PrintF(" IfRegisterGE(register=%d, number=%d, label[%08x]);\n",
- register_index, comparand, LabelToInt(if_ge));
- assembler_->IfRegisterGE(register_index, comparand, if_ge);
-}
-
-
-RegExpMacroAssembler::IrregexpImplementation
- RegExpMacroAssemblerTracer::Implementation() {
- return assembler_->Implementation();
-}
-
-
-Handle<Object> RegExpMacroAssemblerTracer::GetCode(Handle<String> source) {
- PrintF(" GetCode(%s);\n", *(source->ToCString()));
- return assembler_->GetCode(source);
-}
-
-}} // namespace v8::internal
diff --git a/src/3rdparty/v8/src/regexp-macro-assembler-tracer.h b/src/3rdparty/v8/src/regexp-macro-assembler-tracer.h
deleted file mode 100644
index 1fb6d54..0000000
--- a/src/3rdparty/v8/src/regexp-macro-assembler-tracer.h
+++ /dev/null
@@ -1,104 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_REGEXP_MACRO_ASSEMBLER_TRACER_H_
-#define V8_REGEXP_MACRO_ASSEMBLER_TRACER_H_
-
-namespace v8 {
-namespace internal {
-
-// Decorator on a RegExpMacroAssembler that write all calls.
-class RegExpMacroAssemblerTracer: public RegExpMacroAssembler {
- public:
- explicit RegExpMacroAssemblerTracer(RegExpMacroAssembler* assembler);
- virtual ~RegExpMacroAssemblerTracer();
- virtual int stack_limit_slack() { return assembler_->stack_limit_slack(); }
- virtual bool CanReadUnaligned() { return assembler_->CanReadUnaligned(); }
- virtual void AdvanceCurrentPosition(int by); // Signed cp change.
- virtual void AdvanceRegister(int reg, int by); // r[reg] += by.
- virtual void Backtrack();
- virtual void Bind(Label* label);
- virtual void CheckAtStart(Label* on_at_start);
- virtual void CheckCharacter(unsigned c, Label* on_equal);
- virtual void CheckCharacterAfterAnd(unsigned c,
- unsigned and_with,
- Label* on_equal);
- virtual void CheckCharacterGT(uc16 limit, Label* on_greater);
- virtual void CheckCharacterLT(uc16 limit, Label* on_less);
- virtual void CheckCharacters(
- Vector<const uc16> str,
- int cp_offset,
- Label* on_failure,
- bool check_end_of_string);
- virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
- virtual void CheckNotAtStart(Label* on_not_at_start);
- virtual void CheckNotBackReference(int start_reg, Label* on_no_match);
- virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
- Label* on_no_match);
- virtual void CheckNotRegistersEqual(int reg1, int reg2, Label* on_not_equal);
- virtual void CheckNotCharacter(unsigned c, Label* on_not_equal);
- virtual void CheckNotCharacterAfterAnd(unsigned c,
- unsigned and_with,
- Label* on_not_equal);
- virtual void CheckNotCharacterAfterMinusAnd(uc16 c,
- uc16 minus,
- uc16 and_with,
- Label* on_not_equal);
- virtual bool CheckSpecialCharacterClass(uc16 type,
- Label* on_no_match);
- virtual void Fail();
- virtual Handle<Object> GetCode(Handle<String> source);
- virtual void GoTo(Label* label);
- virtual void IfRegisterGE(int reg, int comparand, Label* if_ge);
- virtual void IfRegisterLT(int reg, int comparand, Label* if_lt);
- virtual void IfRegisterEqPos(int reg, Label* if_eq);
- virtual IrregexpImplementation Implementation();
- virtual void LoadCurrentCharacter(int cp_offset,
- Label* on_end_of_input,
- bool check_bounds = true,
- int characters = 1);
- virtual void PopCurrentPosition();
- virtual void PopRegister(int register_index);
- virtual void PushBacktrack(Label* label);
- virtual void PushCurrentPosition();
- virtual void PushRegister(int register_index,
- StackCheckFlag check_stack_limit);
- virtual void ReadCurrentPositionFromRegister(int reg);
- virtual void ReadStackPointerFromRegister(int reg);
- virtual void SetCurrentPositionFromEnd(int by);
- virtual void SetRegister(int register_index, int to);
- virtual void Succeed();
- virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
- virtual void ClearRegisters(int reg_from, int reg_to);
- virtual void WriteStackPointerToRegister(int reg);
- private:
- RegExpMacroAssembler* assembler_;
-};
-
-}} // namespace v8::internal
-
-#endif // V8_REGEXP_MACRO_ASSEMBLER_TRACER_H_
diff --git a/src/3rdparty/v8/src/regexp-macro-assembler.cc b/src/3rdparty/v8/src/regexp-macro-assembler.cc
deleted file mode 100644
index ea41db6..0000000
--- a/src/3rdparty/v8/src/regexp-macro-assembler.cc
+++ /dev/null
@@ -1,266 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-#include "ast.h"
-#include "assembler.h"
-#include "regexp-stack.h"
-#include "regexp-macro-assembler.h"
-#include "simulator.h"
-
-namespace v8 {
-namespace internal {
-
-RegExpMacroAssembler::RegExpMacroAssembler() {
-}
-
-
-RegExpMacroAssembler::~RegExpMacroAssembler() {
-}
-
-
-bool RegExpMacroAssembler::CanReadUnaligned() {
-#ifdef V8_HOST_CAN_READ_UNALIGNED
- return true;
-#else
- return false;
-#endif
-}
-
-
-#ifndef V8_INTERPRETED_REGEXP // Avoid unused code, e.g., on ARM.
-
-NativeRegExpMacroAssembler::NativeRegExpMacroAssembler() {
-}
-
-
-NativeRegExpMacroAssembler::~NativeRegExpMacroAssembler() {
-}
-
-
-bool NativeRegExpMacroAssembler::CanReadUnaligned() {
-#ifdef V8_TARGET_CAN_READ_UNALIGNED
- return true;
-#else
- return false;
-#endif
-}
-
-const byte* NativeRegExpMacroAssembler::StringCharacterPosition(
- String* subject,
- int start_index) {
- // Not just flat, but ultra flat.
- ASSERT(subject->IsExternalString() || subject->IsSeqString());
- ASSERT(start_index >= 0);
- ASSERT(start_index <= subject->length());
- if (subject->IsAsciiRepresentation()) {
- const byte* address;
- if (StringShape(subject).IsExternal()) {
- const char* data = ExternalAsciiString::cast(subject)->resource()->data();
- address = reinterpret_cast<const byte*>(data);
- } else {
- ASSERT(subject->IsSeqAsciiString());
- char* data = SeqAsciiString::cast(subject)->GetChars();
- address = reinterpret_cast<const byte*>(data);
- }
- return address + start_index;
- }
- const uc16* data;
- if (StringShape(subject).IsExternal()) {
- data = ExternalTwoByteString::cast(subject)->resource()->data();
- } else {
- ASSERT(subject->IsSeqTwoByteString());
- data = SeqTwoByteString::cast(subject)->GetChars();
- }
- return reinterpret_cast<const byte*>(data + start_index);
-}
-
-
-NativeRegExpMacroAssembler::Result NativeRegExpMacroAssembler::Match(
- Handle<Code> regexp_code,
- Handle<String> subject,
- int* offsets_vector,
- int offsets_vector_length,
- int previous_index,
- Isolate* isolate) {
-
- ASSERT(subject->IsFlat());
- ASSERT(previous_index >= 0);
- ASSERT(previous_index <= subject->length());
-
- // No allocations before calling the regexp, but we can't use
- // AssertNoAllocation, since regexps might be preempted, and another thread
- // might do allocation anyway.
-
- String* subject_ptr = *subject;
- // Character offsets into string.
- int start_offset = previous_index;
- int end_offset = subject_ptr->length();
-
- // The string has been flattened, so it it is a cons string it contains the
- // full string in the first part.
- if (StringShape(subject_ptr).IsCons()) {
- ASSERT_EQ(0, ConsString::cast(subject_ptr)->second()->length());
- subject_ptr = ConsString::cast(subject_ptr)->first();
- }
- // Ensure that an underlying string has the same ascii-ness.
- bool is_ascii = subject_ptr->IsAsciiRepresentation();
- ASSERT(subject_ptr->IsExternalString() || subject_ptr->IsSeqString());
- // String is now either Sequential or External
- int char_size_shift = is_ascii ? 0 : 1;
- int char_length = end_offset - start_offset;
-
- const byte* input_start =
- StringCharacterPosition(subject_ptr, start_offset);
- int byte_length = char_length << char_size_shift;
- const byte* input_end = input_start + byte_length;
- Result res = Execute(*regexp_code,
- subject_ptr,
- start_offset,
- input_start,
- input_end,
- offsets_vector,
- isolate);
- return res;
-}
-
-
-NativeRegExpMacroAssembler::Result NativeRegExpMacroAssembler::Execute(
- Code* code,
- String* input,
- int start_offset,
- const byte* input_start,
- const byte* input_end,
- int* output,
- Isolate* isolate) {
- ASSERT(isolate == Isolate::Current());
- // Ensure that the minimum stack has been allocated.
- RegExpStackScope stack_scope(isolate);
- Address stack_base = stack_scope.stack()->stack_base();
-
- int direct_call = 0;
- int result = CALL_GENERATED_REGEXP_CODE(code->entry(),
- input,
- start_offset,
- input_start,
- input_end,
- output,
- stack_base,
- direct_call,
- isolate);
- ASSERT(result <= SUCCESS);
- ASSERT(result >= RETRY);
-
- if (result == EXCEPTION && !isolate->has_pending_exception()) {
- // We detected a stack overflow (on the backtrack stack) in RegExp code,
- // but haven't created the exception yet.
- isolate->StackOverflow();
- }
- return static_cast<Result>(result);
-}
-
-
-const byte NativeRegExpMacroAssembler::word_character_map[] = {
- 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
- 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
- 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
- 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
-
- 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
- 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
- 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, // '0' - '7'
- 0xffu, 0xffu, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, // '8' - '9'
-
- 0x00u, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, // 'A' - 'G'
- 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, // 'H' - 'O'
- 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, // 'P' - 'W'
- 0xffu, 0xffu, 0xffu, 0x00u, 0x00u, 0x00u, 0x00u, 0xffu, // 'X' - 'Z', '_'
-
- 0x00u, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, // 'a' - 'g'
- 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, // 'h' - 'o'
- 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, // 'p' - 'w'
- 0xffu, 0xffu, 0xffu, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, // 'x' - 'z'
-};
-
-
-int NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16(
- Address byte_offset1,
- Address byte_offset2,
- size_t byte_length,
- Isolate* isolate) {
- ASSERT(isolate == Isolate::Current());
- unibrow::Mapping<unibrow::Ecma262Canonicalize>* canonicalize =
- isolate->regexp_macro_assembler_canonicalize();
- // This function is not allowed to cause a garbage collection.
- // A GC might move the calling generated code and invalidate the
- // return address on the stack.
- ASSERT(byte_length % 2 == 0);
- uc16* substring1 = reinterpret_cast<uc16*>(byte_offset1);
- uc16* substring2 = reinterpret_cast<uc16*>(byte_offset2);
- size_t length = byte_length >> 1;
-
- for (size_t i = 0; i < length; i++) {
- unibrow::uchar c1 = substring1[i];
- unibrow::uchar c2 = substring2[i];
- if (c1 != c2) {
- unibrow::uchar s1[1] = { c1 };
- canonicalize->get(c1, '\0', s1);
- if (s1[0] != c2) {
- unibrow::uchar s2[1] = { c2 };
- canonicalize->get(c2, '\0', s2);
- if (s1[0] != s2[0]) {
- return 0;
- }
- }
- }
- }
- return 1;
-}
-
-
-Address NativeRegExpMacroAssembler::GrowStack(Address stack_pointer,
- Address* stack_base,
- Isolate* isolate) {
- ASSERT(isolate == Isolate::Current());
- RegExpStack* regexp_stack = isolate->regexp_stack();
- size_t size = regexp_stack->stack_capacity();
- Address old_stack_base = regexp_stack->stack_base();
- ASSERT(old_stack_base == *stack_base);
- ASSERT(stack_pointer <= old_stack_base);
- ASSERT(static_cast<size_t>(old_stack_base - stack_pointer) <= size);
- Address new_stack_base = regexp_stack->EnsureCapacity(size * 2);
- if (new_stack_base == NULL) {
- return NULL;
- }
- *stack_base = new_stack_base;
- intptr_t stack_content_size = old_stack_base - stack_pointer;
- return new_stack_base - stack_content_size;
-}
-
-#endif // V8_INTERPRETED_REGEXP
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/regexp-macro-assembler.h b/src/3rdparty/v8/src/regexp-macro-assembler.h
deleted file mode 100644
index 1268e78..0000000
--- a/src/3rdparty/v8/src/regexp-macro-assembler.h
+++ /dev/null
@@ -1,236 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_REGEXP_MACRO_ASSEMBLER_H_
-#define V8_REGEXP_MACRO_ASSEMBLER_H_
-
-#include "ast.h"
-
-namespace v8 {
-namespace internal {
-
-struct DisjunctDecisionRow {
- RegExpCharacterClass cc;
- Label* on_match;
-};
-
-
-class RegExpMacroAssembler {
- public:
- // The implementation must be able to handle at least:
- static const int kMaxRegister = (1 << 16) - 1;
- static const int kMaxCPOffset = (1 << 15) - 1;
- static const int kMinCPOffset = -(1 << 15);
- enum IrregexpImplementation {
- kIA32Implementation,
- kARMImplementation,
- kMIPSImplementation,
- kX64Implementation,
- kBytecodeImplementation
- };
-
- enum StackCheckFlag {
- kNoStackLimitCheck = false,
- kCheckStackLimit = true
- };
-
- RegExpMacroAssembler();
- virtual ~RegExpMacroAssembler();
- // The maximal number of pushes between stack checks. Users must supply
- // kCheckStackLimit flag to push operations (instead of kNoStackLimitCheck)
- // at least once for every stack_limit() pushes that are executed.
- virtual int stack_limit_slack() = 0;
- virtual bool CanReadUnaligned();
- virtual void AdvanceCurrentPosition(int by) = 0; // Signed cp change.
- virtual void AdvanceRegister(int reg, int by) = 0; // r[reg] += by.
- // Continues execution from the position pushed on the top of the backtrack
- // stack by an earlier PushBacktrack(Label*).
- virtual void Backtrack() = 0;
- virtual void Bind(Label* label) = 0;
- virtual void CheckAtStart(Label* on_at_start) = 0;
- // Dispatch after looking the current character up in a 2-bits-per-entry
- // map. The destinations vector has up to 4 labels.
- virtual void CheckCharacter(unsigned c, Label* on_equal) = 0;
- // Bitwise and the current character with the given constant and then
- // check for a match with c.
- virtual void CheckCharacterAfterAnd(unsigned c,
- unsigned and_with,
- Label* on_equal) = 0;
- virtual void CheckCharacterGT(uc16 limit, Label* on_greater) = 0;
- virtual void CheckCharacterLT(uc16 limit, Label* on_less) = 0;
- // Check the current character for a match with a literal string. If we
- // fail to match then goto the on_failure label. If check_eos is set then
- // the end of input always fails. If check_eos is clear then it is the
- // caller's responsibility to ensure that the end of string is not hit.
- // If the label is NULL then we should pop a backtrack address off
- // the stack and go to that.
- virtual void CheckCharacters(
- Vector<const uc16> str,
- int cp_offset,
- Label* on_failure,
- bool check_eos) = 0;
- virtual void CheckGreedyLoop(Label* on_tos_equals_current_position) = 0;
- virtual void CheckNotAtStart(Label* on_not_at_start) = 0;
- virtual void CheckNotBackReference(int start_reg, Label* on_no_match) = 0;
- virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
- Label* on_no_match) = 0;
- // Check the current character for a match with a literal character. If we
- // fail to match then goto the on_failure label. End of input always
- // matches. If the label is NULL then we should pop a backtrack address off
- // the stack and go to that.
- virtual void CheckNotCharacter(unsigned c, Label* on_not_equal) = 0;
- virtual void CheckNotCharacterAfterAnd(unsigned c,
- unsigned and_with,
- Label* on_not_equal) = 0;
- // Subtract a constant from the current character, then or with the given
- // constant and then check for a match with c.
- virtual void CheckNotCharacterAfterMinusAnd(uc16 c,
- uc16 minus,
- uc16 and_with,
- Label* on_not_equal) = 0;
- virtual void CheckNotRegistersEqual(int reg1,
- int reg2,
- Label* on_not_equal) = 0;
-
- // Checks whether the given offset from the current position is before
- // the end of the string. May overwrite the current character.
- virtual void CheckPosition(int cp_offset, Label* on_outside_input) {
- LoadCurrentCharacter(cp_offset, on_outside_input, true);
- }
- // Check whether a standard/default character class matches the current
- // character. Returns false if the type of special character class does
- // not have custom support.
- // May clobber the current loaded character.
- virtual bool CheckSpecialCharacterClass(uc16 type,
- Label* on_no_match) {
- return false;
- }
- virtual void Fail() = 0;
- virtual Handle<Object> GetCode(Handle<String> source) = 0;
- virtual void GoTo(Label* label) = 0;
- // Check whether a register is >= a given constant and go to a label if it
- // is. Backtracks instead if the label is NULL.
- virtual void IfRegisterGE(int reg, int comparand, Label* if_ge) = 0;
- // Check whether a register is < a given constant and go to a label if it is.
- // Backtracks instead if the label is NULL.
- virtual void IfRegisterLT(int reg, int comparand, Label* if_lt) = 0;
- // Check whether a register is == to the current position and go to a
- // label if it is.
- virtual void IfRegisterEqPos(int reg, Label* if_eq) = 0;
- virtual IrregexpImplementation Implementation() = 0;
- virtual void LoadCurrentCharacter(int cp_offset,
- Label* on_end_of_input,
- bool check_bounds = true,
- int characters = 1) = 0;
- virtual void PopCurrentPosition() = 0;
- virtual void PopRegister(int register_index) = 0;
- // Pushes the label on the backtrack stack, so that a following Backtrack
- // will go to this label. Always checks the backtrack stack limit.
- virtual void PushBacktrack(Label* label) = 0;
- virtual void PushCurrentPosition() = 0;
- virtual void PushRegister(int register_index,
- StackCheckFlag check_stack_limit) = 0;
- virtual void ReadCurrentPositionFromRegister(int reg) = 0;
- virtual void ReadStackPointerFromRegister(int reg) = 0;
- virtual void SetCurrentPositionFromEnd(int by) = 0;
- virtual void SetRegister(int register_index, int to) = 0;
- virtual void Succeed() = 0;
- virtual void WriteCurrentPositionToRegister(int reg, int cp_offset) = 0;
- virtual void ClearRegisters(int reg_from, int reg_to) = 0;
- virtual void WriteStackPointerToRegister(int reg) = 0;
-};
-
-
-#ifndef V8_INTERPRETED_REGEXP // Avoid compiling unused code.
-
-class NativeRegExpMacroAssembler: public RegExpMacroAssembler {
- public:
- // Type of input string to generate code for.
- enum Mode { ASCII = 1, UC16 = 2 };
-
- // Result of calling generated native RegExp code.
- // RETRY: Something significant changed during execution, and the matching
- // should be retried from scratch.
- // EXCEPTION: Something failed during execution. If no exception has been
- // thrown, it's an internal out-of-memory, and the caller should
- // throw the exception.
- // FAILURE: Matching failed.
- // SUCCESS: Matching succeeded, and the output array has been filled with
- // capture positions.
- enum Result { RETRY = -2, EXCEPTION = -1, FAILURE = 0, SUCCESS = 1 };
-
- NativeRegExpMacroAssembler();
- virtual ~NativeRegExpMacroAssembler();
- virtual bool CanReadUnaligned();
-
- static Result Match(Handle<Code> regexp,
- Handle<String> subject,
- int* offsets_vector,
- int offsets_vector_length,
- int previous_index,
- Isolate* isolate);
-
- // Compares two-byte strings case insensitively.
- // Called from generated RegExp code.
- static int CaseInsensitiveCompareUC16(Address byte_offset1,
- Address byte_offset2,
- size_t byte_length,
- Isolate* isolate);
-
- // Called from RegExp if the backtrack stack limit is hit.
- // Tries to expand the stack. Returns the new stack-pointer if
- // successful, and updates the stack_top address, or returns 0 if unable
- // to grow the stack.
- // This function must not trigger a garbage collection.
- static Address GrowStack(Address stack_pointer, Address* stack_top,
- Isolate* isolate);
-
- static const byte* StringCharacterPosition(String* subject, int start_index);
-
- // Byte map of ASCII characters with a 0xff if the character is a word
- // character (digit, letter or underscore) and 0x00 otherwise.
- // Used by generated RegExp code.
- static const byte word_character_map[128];
-
- static Address word_character_map_address() {
- return const_cast<Address>(&word_character_map[0]);
- }
-
- static Result Execute(Code* code,
- String* input,
- int start_offset,
- const byte* input_start,
- const byte* input_end,
- int* output,
- Isolate* isolate);
-};
-
-#endif // V8_INTERPRETED_REGEXP
-
-} } // namespace v8::internal
-
-#endif // V8_REGEXP_MACRO_ASSEMBLER_H_
diff --git a/src/3rdparty/v8/src/regexp-stack.cc b/src/3rdparty/v8/src/regexp-stack.cc
deleted file mode 100644
index ff9547f..0000000
--- a/src/3rdparty/v8/src/regexp-stack.cc
+++ /dev/null
@@ -1,111 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-#include "regexp-stack.h"
-
-namespace v8 {
-namespace internal {
-
-RegExpStackScope::RegExpStackScope(Isolate* isolate)
- : regexp_stack_(isolate->regexp_stack()) {
- // Initialize, if not already initialized.
- regexp_stack_->EnsureCapacity(0);
-}
-
-
-RegExpStackScope::~RegExpStackScope() {
- ASSERT(Isolate::Current() == regexp_stack_->isolate_);
- // Reset the buffer if it has grown.
- regexp_stack_->Reset();
-}
-
-
-RegExpStack::RegExpStack()
- : isolate_(NULL) {
-}
-
-
-RegExpStack::~RegExpStack() {
-}
-
-
-char* RegExpStack::ArchiveStack(char* to) {
- size_t size = sizeof(thread_local_);
- memcpy(reinterpret_cast<void*>(to),
- &thread_local_,
- size);
- thread_local_ = ThreadLocal();
- return to + size;
-}
-
-
-char* RegExpStack::RestoreStack(char* from) {
- size_t size = sizeof(thread_local_);
- memcpy(&thread_local_, reinterpret_cast<void*>(from), size);
- return from + size;
-}
-
-
-void RegExpStack::Reset() {
- if (thread_local_.memory_size_ > kMinimumStackSize) {
- DeleteArray(thread_local_.memory_);
- thread_local_ = ThreadLocal();
- }
-}
-
-
-void RegExpStack::ThreadLocal::Free() {
- if (memory_size_ > 0) {
- DeleteArray(memory_);
- Clear();
- }
-}
-
-
-Address RegExpStack::EnsureCapacity(size_t size) {
- if (size > kMaximumStackSize) return NULL;
- if (size < kMinimumStackSize) size = kMinimumStackSize;
- if (thread_local_.memory_size_ < size) {
- Address new_memory = NewArray<byte>(static_cast<int>(size));
- if (thread_local_.memory_size_ > 0) {
- // Copy original memory into top of new memory.
- memcpy(reinterpret_cast<void*>(
- new_memory + size - thread_local_.memory_size_),
- reinterpret_cast<void*>(thread_local_.memory_),
- thread_local_.memory_size_);
- DeleteArray(thread_local_.memory_);
- }
- thread_local_.memory_ = new_memory;
- thread_local_.memory_size_ = size;
- thread_local_.limit_ = new_memory + kStackLimitSlack * kPointerSize;
- }
- return thread_local_.memory_ + thread_local_.memory_size_;
-}
-
-
-}} // namespace v8::internal
diff --git a/src/3rdparty/v8/src/regexp-stack.h b/src/3rdparty/v8/src/regexp-stack.h
deleted file mode 100644
index 5943206..0000000
--- a/src/3rdparty/v8/src/regexp-stack.h
+++ /dev/null
@@ -1,147 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_REGEXP_STACK_H_
-#define V8_REGEXP_STACK_H_
-
-namespace v8 {
-namespace internal {
-
-class RegExpStack;
-
-// Maintains a per-v8thread stack area that can be used by irregexp
-// implementation for its backtracking stack.
-// Since there is only one stack area, the Irregexp implementation is not
-// re-entrant. I.e., no regular expressions may be executed in the same thread
-// during a preempted Irregexp execution.
-class RegExpStackScope {
- public:
- // Create and delete an instance to control the life-time of a growing stack.
-
- // Initializes the stack memory area if necessary.
- explicit RegExpStackScope(Isolate* isolate);
- ~RegExpStackScope(); // Releases the stack if it has grown.
-
- RegExpStack* stack() const { return regexp_stack_; }
-
- private:
- RegExpStack* regexp_stack_;
-
- DISALLOW_COPY_AND_ASSIGN(RegExpStackScope);
-};
-
-
-class RegExpStack {
- public:
- // Number of allocated locations on the stack below the limit.
- // No sequence of pushes must be longer that this without doing a stack-limit
- // check.
- static const int kStackLimitSlack = 32;
-
- // Gives the top of the memory used as stack.
- Address stack_base() {
- ASSERT(thread_local_.memory_size_ != 0);
- return thread_local_.memory_ + thread_local_.memory_size_;
- }
-
- // The total size of the memory allocated for the stack.
- size_t stack_capacity() { return thread_local_.memory_size_; }
-
- // If the stack pointer gets below the limit, we should react and
- // either grow the stack or report an out-of-stack exception.
- // There is only a limited number of locations below the stack limit,
- // so users of the stack should check the stack limit during any
- // sequence of pushes longer that this.
- Address* limit_address() { return &(thread_local_.limit_); }
-
- // Ensures that there is a memory area with at least the specified size.
- // If passing zero, the default/minimum size buffer is allocated.
- Address EnsureCapacity(size_t size);
-
- // Thread local archiving.
- static int ArchiveSpacePerThread() {
- return static_cast<int>(sizeof(ThreadLocal));
- }
- char* ArchiveStack(char* to);
- char* RestoreStack(char* from);
- void FreeThreadResources() { thread_local_.Free(); }
- private:
- RegExpStack();
- ~RegExpStack();
-
- // Artificial limit used when no memory has been allocated.
- static const uintptr_t kMemoryTop = static_cast<uintptr_t>(-1);
-
- // Minimal size of allocated stack area.
- static const size_t kMinimumStackSize = 1 * KB;
-
- // Maximal size of allocated stack area.
- static const size_t kMaximumStackSize = 64 * MB;
-
- // Structure holding the allocated memory, size and limit.
- struct ThreadLocal {
- ThreadLocal() { Clear(); }
- // If memory_size_ > 0 then memory_ must be non-NULL.
- Address memory_;
- size_t memory_size_;
- Address limit_;
- void Clear() {
- memory_ = NULL;
- memory_size_ = 0;
- limit_ = reinterpret_cast<Address>(kMemoryTop);
- }
- void Free();
- };
-
- // Address of allocated memory.
- Address memory_address() {
- return reinterpret_cast<Address>(&thread_local_.memory_);
- }
-
- // Address of size of allocated memory.
- Address memory_size_address() {
- return reinterpret_cast<Address>(&thread_local_.memory_size_);
- }
-
- // Resets the buffer if it has grown beyond the default/minimum size.
- // After this, the buffer is either the default size, or it is empty, so
- // you have to call EnsureCapacity before using it again.
- void Reset();
-
- ThreadLocal thread_local_;
- Isolate* isolate_;
-
- friend class ExternalReference;
- friend class Isolate;
- friend class RegExpStackScope;
-
- DISALLOW_COPY_AND_ASSIGN(RegExpStack);
-};
-
-}} // namespace v8::internal
-
-#endif // V8_REGEXP_STACK_H_
diff --git a/src/3rdparty/v8/src/regexp.js b/src/3rdparty/v8/src/regexp.js
deleted file mode 100644
index f68dee6..0000000
--- a/src/3rdparty/v8/src/regexp.js
+++ /dev/null
@@ -1,483 +0,0 @@
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Expect $Object = global.Object;
-// Expect $Array = global.Array;
-
-const $RegExp = global.RegExp;
-
-// A recursive descent parser for Patterns according to the grammar of
-// ECMA-262 15.10.1, with deviations noted below.
-function DoConstructRegExp(object, pattern, flags) {
- // RegExp : Called as constructor; see ECMA-262, section 15.10.4.
- if (IS_REGEXP(pattern)) {
- if (!IS_UNDEFINED(flags)) {
- throw MakeTypeError('regexp_flags', []);
- }
- flags = (pattern.global ? 'g' : '')
- + (pattern.ignoreCase ? 'i' : '')
- + (pattern.multiline ? 'm' : '');
- pattern = pattern.source;
- }
-
- pattern = IS_UNDEFINED(pattern) ? '' : ToString(pattern);
- flags = IS_UNDEFINED(flags) ? '' : ToString(flags);
-
- var global = false;
- var ignoreCase = false;
- var multiline = false;
-
- for (var i = 0; i < flags.length; i++) {
- var c = %_CallFunction(flags, i, StringCharAt);
- switch (c) {
- case 'g':
- // Allow duplicate flags to be consistent with JSC and others.
- global = true;
- break;
- case 'i':
- ignoreCase = true;
- break;
- case 'm':
- multiline = true;
- break;
- default:
- // Ignore flags that have no meaning to be consistent with
- // JSC.
- break;
- }
- }
-
- %RegExpInitializeObject(object, pattern, global, ignoreCase, multiline);
-
- // Call internal function to compile the pattern.
- %RegExpCompile(object, pattern, flags);
-}
-
-
-function RegExpConstructor(pattern, flags) {
- if (%_IsConstructCall()) {
- DoConstructRegExp(this, pattern, flags);
- } else {
- // RegExp : Called as function; see ECMA-262, section 15.10.3.1.
- if (IS_REGEXP(pattern) && IS_UNDEFINED(flags)) {
- return pattern;
- }
- return new $RegExp(pattern, flags);
- }
-}
-
-
-// Deprecated RegExp.prototype.compile method. We behave like the constructor
-// were called again. In SpiderMonkey, this method returns the regexp object.
-// In JSC, it returns undefined. For compatibility with JSC, we match their
-// behavior.
-function CompileRegExp(pattern, flags) {
- // Both JSC and SpiderMonkey treat a missing pattern argument as the
- // empty subject string, and an actual undefined value passed as the
- // pattern as the string 'undefined'. Note that JSC is inconsistent
- // here, treating undefined values differently in
- // RegExp.prototype.compile and in the constructor, where they are
- // the empty string. For compatibility with JSC, we match their
- // behavior.
- if (IS_UNDEFINED(pattern) && %_ArgumentsLength() != 0) {
- DoConstructRegExp(this, 'undefined', flags);
- } else {
- DoConstructRegExp(this, pattern, flags);
- }
-}
-
-
-function DoRegExpExec(regexp, string, index) {
- var result = %_RegExpExec(regexp, string, index, lastMatchInfo);
- if (result !== null) lastMatchInfoOverride = null;
- return result;
-}
-
-
-function BuildResultFromMatchInfo(lastMatchInfo, s) {
- var numResults = NUMBER_OF_CAPTURES(lastMatchInfo) >> 1;
- var start = lastMatchInfo[CAPTURE0];
- var end = lastMatchInfo[CAPTURE1];
- var result = %_RegExpConstructResult(numResults, start, s);
- if (start + 1 == end) {
- result[0] = %_StringCharAt(s, start);
- } else {
- result[0] = %_SubString(s, start, end);
- }
- var j = REGEXP_FIRST_CAPTURE + 2;
- for (var i = 1; i < numResults; i++) {
- start = lastMatchInfo[j++];
- end = lastMatchInfo[j++];
- if (end != -1) {
- if (start + 1 == end) {
- result[i] = %_StringCharAt(s, start);
- } else {
- result[i] = %_SubString(s, start, end);
- }
- } else {
- // Make sure the element is present. Avoid reading the undefined
- // property from the global object since this may change.
- result[i] = void 0;
- }
- }
- return result;
-}
-
-
-function RegExpExecNoTests(regexp, string, start) {
- // Must be called with RegExp, string and positive integer as arguments.
- var matchInfo = %_RegExpExec(regexp, string, start, lastMatchInfo);
- if (matchInfo !== null) {
- lastMatchInfoOverride = null;
- return BuildResultFromMatchInfo(matchInfo, string);
- }
- return null;
-}
-
-
-function RegExpExec(string) {
- if (!IS_REGEXP(this)) {
- throw MakeTypeError('incompatible_method_receiver',
- ['RegExp.prototype.exec', this]);
- }
-
- if (%_ArgumentsLength() === 0) {
- var regExpInput = LAST_INPUT(lastMatchInfo);
- if (IS_UNDEFINED(regExpInput)) {
- throw MakeError('no_input_to_regexp', [this]);
- }
- string = regExpInput;
- }
- string = TO_STRING_INLINE(string);
- var lastIndex = this.lastIndex;
-
- // Conversion is required by the ES5 specification (RegExp.prototype.exec
- // algorithm, step 5) even if the value is discarded for non-global RegExps.
- var i = TO_INTEGER(lastIndex);
-
- var global = this.global;
- if (global) {
- if (i < 0 || i > string.length) {
- this.lastIndex = 0;
- return null;
- }
- } else {
- i = 0;
- }
-
- %_Log('regexp', 'regexp-exec,%0r,%1S,%2i', [this, string, lastIndex]);
- // matchIndices is either null or the lastMatchInfo array.
- var matchIndices = %_RegExpExec(this, string, i, lastMatchInfo);
-
- if (matchIndices === null) {
- if (global) this.lastIndex = 0;
- return null;
- }
-
- // Successful match.
- lastMatchInfoOverride = null;
- if (global) {
- this.lastIndex = lastMatchInfo[CAPTURE1];
- }
- return BuildResultFromMatchInfo(matchIndices, string);
-}
-
-
-// One-element cache for the simplified test regexp.
-var regexp_key;
-var regexp_val;
-
-// Section 15.10.6.3 doesn't actually make sense, but the intention seems to be
-// that test is defined in terms of String.prototype.exec. However, it probably
-// means the original value of String.prototype.exec, which is what everybody
-// else implements.
-function RegExpTest(string) {
- if (!IS_REGEXP(this)) {
- throw MakeTypeError('incompatible_method_receiver',
- ['RegExp.prototype.test', this]);
- }
- if (%_ArgumentsLength() == 0) {
- var regExpInput = LAST_INPUT(lastMatchInfo);
- if (IS_UNDEFINED(regExpInput)) {
- throw MakeError('no_input_to_regexp', [this]);
- }
- string = regExpInput;
- }
-
- string = TO_STRING_INLINE(string);
-
- var lastIndex = this.lastIndex;
-
- // Conversion is required by the ES5 specification (RegExp.prototype.exec
- // algorithm, step 5) even if the value is discarded for non-global RegExps.
- var i = TO_INTEGER(lastIndex);
-
- if (this.global) {
- if (i < 0 || i > string.length) {
- this.lastIndex = 0;
- return false;
- }
- %_Log('regexp', 'regexp-exec,%0r,%1S,%2i', [this, string, lastIndex]);
- // matchIndices is either null or the lastMatchInfo array.
- var matchIndices = %_RegExpExec(this, string, i, lastMatchInfo);
- if (matchIndices === null) {
- this.lastIndex = 0;
- return false;
- }
- lastMatchInfoOverride = null;
- this.lastIndex = lastMatchInfo[CAPTURE1];
- return true;
- } else {
- // Non-global regexp.
- // Remove irrelevant preceeding '.*' in a non-global test regexp.
- // The expression checks whether this.source starts with '.*' and
- // that the third char is not a '?'.
- if (%_StringCharCodeAt(this.source, 0) == 46 && // '.'
- %_StringCharCodeAt(this.source, 1) == 42 && // '*'
- %_StringCharCodeAt(this.source, 2) != 63) { // '?'
- if (!%_ObjectEquals(regexp_key, this)) {
- regexp_key = this;
- regexp_val = new $RegExp(SubString(this.source, 2, this.source.length),
- (!this.ignoreCase
- ? !this.multiline ? "" : "m"
- : !this.multiline ? "i" : "im"));
- }
- if (%_RegExpExec(regexp_val, string, 0, lastMatchInfo) === null) {
- return false;
- }
- }
- %_Log('regexp', 'regexp-exec,%0r,%1S,%2i', [this, string, lastIndex]);
- // matchIndices is either null or the lastMatchInfo array.
- var matchIndices = %_RegExpExec(this, string, 0, lastMatchInfo);
- if (matchIndices === null) return false;
- lastMatchInfoOverride = null;
- return true;
- }
-}
-
-
-function RegExpToString() {
- // If this.source is an empty string, output /(?:)/.
- // http://bugzilla.mozilla.org/show_bug.cgi?id=225550
- // ecma_2/RegExp/properties-001.js.
- var src = this.source ? this.source : '(?:)';
- var result = '/' + src + '/';
- if (this.global) result += 'g';
- if (this.ignoreCase) result += 'i';
- if (this.multiline) result += 'm';
- return result;
-}
-
-
-// Getters for the static properties lastMatch, lastParen, leftContext, and
-// rightContext of the RegExp constructor. The properties are computed based
-// on the captures array of the last successful match and the subject string
-// of the last successful match.
-function RegExpGetLastMatch() {
- if (lastMatchInfoOverride !== null) {
- return lastMatchInfoOverride[0];
- }
- var regExpSubject = LAST_SUBJECT(lastMatchInfo);
- return SubString(regExpSubject,
- lastMatchInfo[CAPTURE0],
- lastMatchInfo[CAPTURE1]);
-}
-
-
-function RegExpGetLastParen() {
- if (lastMatchInfoOverride) {
- var override = lastMatchInfoOverride;
- if (override.length <= 3) return '';
- return override[override.length - 3];
- }
- var length = NUMBER_OF_CAPTURES(lastMatchInfo);
- if (length <= 2) return ''; // There were no captures.
- // We match the SpiderMonkey behavior: return the substring defined by the
- // last pair (after the first pair) of elements of the capture array even if
- // it is empty.
- var regExpSubject = LAST_SUBJECT(lastMatchInfo);
- var start = lastMatchInfo[CAPTURE(length - 2)];
- var end = lastMatchInfo[CAPTURE(length - 1)];
- if (start != -1 && end != -1) {
- return SubString(regExpSubject, start, end);
- }
- return "";
-}
-
-
-function RegExpGetLeftContext() {
- var start_index;
- var subject;
- if (!lastMatchInfoOverride) {
- start_index = lastMatchInfo[CAPTURE0];
- subject = LAST_SUBJECT(lastMatchInfo);
- } else {
- var override = lastMatchInfoOverride;
- start_index = override[override.length - 2];
- subject = override[override.length - 1];
- }
- return SubString(subject, 0, start_index);
-}
-
-
-function RegExpGetRightContext() {
- var start_index;
- var subject;
- if (!lastMatchInfoOverride) {
- start_index = lastMatchInfo[CAPTURE1];
- subject = LAST_SUBJECT(lastMatchInfo);
- } else {
- var override = lastMatchInfoOverride;
- subject = override[override.length - 1];
- start_index = override[override.length - 2] + subject.length;
- }
- return SubString(subject, start_index, subject.length);
-}
-
-
-// The properties $1..$9 are the first nine capturing substrings of the last
-// successful match, or ''. The function RegExpMakeCaptureGetter will be
-// called with indices from 1 to 9.
-function RegExpMakeCaptureGetter(n) {
- return function() {
- if (lastMatchInfoOverride) {
- if (n < lastMatchInfoOverride.length - 2) return lastMatchInfoOverride[n];
- return '';
- }
- var index = n * 2;
- if (index >= NUMBER_OF_CAPTURES(lastMatchInfo)) return '';
- var matchStart = lastMatchInfo[CAPTURE(index)];
- var matchEnd = lastMatchInfo[CAPTURE(index + 1)];
- if (matchStart == -1 || matchEnd == -1) return '';
- return SubString(LAST_SUBJECT(lastMatchInfo), matchStart, matchEnd);
- };
-}
-
-
-// Property of the builtins object for recording the result of the last
-// regexp match. The property lastMatchInfo includes the matchIndices
-// array of the last successful regexp match (an array of start/end index
-// pairs for the match and all the captured substrings), the invariant is
-// that there are at least two capture indeces. The array also contains
-// the subject string for the last successful match.
-var lastMatchInfo = new InternalArray(
- 2, // REGEXP_NUMBER_OF_CAPTURES
- "", // Last subject.
- void 0, // Last input - settable with RegExpSetInput.
- 0, // REGEXP_FIRST_CAPTURE + 0
- 0 // REGEXP_FIRST_CAPTURE + 1
-);
-
-// Override last match info with an array of actual substrings.
-// Used internally by replace regexp with function.
-// The array has the format of an "apply" argument for a replacement
-// function.
-var lastMatchInfoOverride = null;
-
-// -------------------------------------------------------------------
-
-function SetupRegExp() {
- %FunctionSetInstanceClassName($RegExp, 'RegExp');
- %FunctionSetPrototype($RegExp, new $Object());
- %SetProperty($RegExp.prototype, 'constructor', $RegExp, DONT_ENUM);
- %SetCode($RegExp, RegExpConstructor);
-
- InstallFunctions($RegExp.prototype, DONT_ENUM, $Array(
- "exec", RegExpExec,
- "test", RegExpTest,
- "toString", RegExpToString,
- "compile", CompileRegExp
- ));
-
- // The length of compile is 1 in SpiderMonkey.
- %FunctionSetLength($RegExp.prototype.compile, 1);
-
- // The properties input, $input, and $_ are aliases for each other. When this
- // value is set the value it is set to is coerced to a string.
- // Getter and setter for the input.
- function RegExpGetInput() {
- var regExpInput = LAST_INPUT(lastMatchInfo);
- return IS_UNDEFINED(regExpInput) ? "" : regExpInput;
- }
- function RegExpSetInput(string) {
- LAST_INPUT(lastMatchInfo) = ToString(string);
- };
-
- %DefineAccessor($RegExp, 'input', GETTER, RegExpGetInput, DONT_DELETE);
- %DefineAccessor($RegExp, 'input', SETTER, RegExpSetInput, DONT_DELETE);
- %DefineAccessor($RegExp, '$_', GETTER, RegExpGetInput, DONT_ENUM | DONT_DELETE);
- %DefineAccessor($RegExp, '$_', SETTER, RegExpSetInput, DONT_ENUM | DONT_DELETE);
- %DefineAccessor($RegExp, '$input', GETTER, RegExpGetInput, DONT_ENUM | DONT_DELETE);
- %DefineAccessor($RegExp, '$input', SETTER, RegExpSetInput, DONT_ENUM | DONT_DELETE);
-
- // The properties multiline and $* are aliases for each other. When this
- // value is set in SpiderMonkey, the value it is set to is coerced to a
- // boolean. We mimic that behavior with a slight difference: in SpiderMonkey
- // the value of the expression 'RegExp.multiline = null' (for instance) is the
- // boolean false (ie, the value after coercion), while in V8 it is the value
- // null (ie, the value before coercion).
-
- // Getter and setter for multiline.
- var multiline = false;
- function RegExpGetMultiline() { return multiline; };
- function RegExpSetMultiline(flag) { multiline = flag ? true : false; };
-
- %DefineAccessor($RegExp, 'multiline', GETTER, RegExpGetMultiline, DONT_DELETE);
- %DefineAccessor($RegExp, 'multiline', SETTER, RegExpSetMultiline, DONT_DELETE);
- %DefineAccessor($RegExp, '$*', GETTER, RegExpGetMultiline, DONT_ENUM | DONT_DELETE);
- %DefineAccessor($RegExp, '$*', SETTER, RegExpSetMultiline, DONT_ENUM | DONT_DELETE);
-
-
- function NoOpSetter(ignored) {}
-
-
- // Static properties set by a successful match.
- %DefineAccessor($RegExp, 'lastMatch', GETTER, RegExpGetLastMatch, DONT_DELETE);
- %DefineAccessor($RegExp, 'lastMatch', SETTER, NoOpSetter, DONT_DELETE);
- %DefineAccessor($RegExp, '$&', GETTER, RegExpGetLastMatch, DONT_ENUM | DONT_DELETE);
- %DefineAccessor($RegExp, '$&', SETTER, NoOpSetter, DONT_ENUM | DONT_DELETE);
- %DefineAccessor($RegExp, 'lastParen', GETTER, RegExpGetLastParen, DONT_DELETE);
- %DefineAccessor($RegExp, 'lastParen', SETTER, NoOpSetter, DONT_DELETE);
- %DefineAccessor($RegExp, '$+', GETTER, RegExpGetLastParen, DONT_ENUM | DONT_DELETE);
- %DefineAccessor($RegExp, '$+', SETTER, NoOpSetter, DONT_ENUM | DONT_DELETE);
- %DefineAccessor($RegExp, 'leftContext', GETTER, RegExpGetLeftContext, DONT_DELETE);
- %DefineAccessor($RegExp, 'leftContext', SETTER, NoOpSetter, DONT_DELETE);
- %DefineAccessor($RegExp, '$`', GETTER, RegExpGetLeftContext, DONT_ENUM | DONT_DELETE);
- %DefineAccessor($RegExp, '$`', SETTER, NoOpSetter, DONT_ENUM | DONT_DELETE);
- %DefineAccessor($RegExp, 'rightContext', GETTER, RegExpGetRightContext, DONT_DELETE);
- %DefineAccessor($RegExp, 'rightContext', SETTER, NoOpSetter, DONT_DELETE);
- %DefineAccessor($RegExp, "$'", GETTER, RegExpGetRightContext, DONT_ENUM | DONT_DELETE);
- %DefineAccessor($RegExp, "$'", SETTER, NoOpSetter, DONT_ENUM | DONT_DELETE);
-
- for (var i = 1; i < 10; ++i) {
- %DefineAccessor($RegExp, '$' + i, GETTER, RegExpMakeCaptureGetter(i), DONT_DELETE);
- %DefineAccessor($RegExp, '$' + i, SETTER, NoOpSetter, DONT_DELETE);
- }
-}
-
-
-SetupRegExp();
diff --git a/src/3rdparty/v8/src/register-allocator-inl.h b/src/3rdparty/v8/src/register-allocator-inl.h
deleted file mode 100644
index 5a68ab0..0000000
--- a/src/3rdparty/v8/src/register-allocator-inl.h
+++ /dev/null
@@ -1,141 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_REGISTER_ALLOCATOR_INL_H_
-#define V8_REGISTER_ALLOCATOR_INL_H_
-
-#include "codegen.h"
-#include "register-allocator.h"
-
-#if V8_TARGET_ARCH_IA32
-#include "ia32/register-allocator-ia32-inl.h"
-#elif V8_TARGET_ARCH_X64
-#include "x64/register-allocator-x64-inl.h"
-#elif V8_TARGET_ARCH_ARM
-#include "arm/register-allocator-arm-inl.h"
-#elif V8_TARGET_ARCH_MIPS
-#include "mips/register-allocator-mips-inl.h"
-#else
-#error Unsupported target architecture.
-#endif
-
-
-namespace v8 {
-namespace internal {
-
-Result::Result(const Result& other) {
- other.CopyTo(this);
-}
-
-
-Result& Result::operator=(const Result& other) {
- if (this != &other) {
- Unuse();
- other.CopyTo(this);
- }
- return *this;
-}
-
-
-Result::~Result() {
- if (is_register()) {
- CodeGeneratorScope::Current(Isolate::Current())->allocator()->Unuse(reg());
- }
-}
-
-
-void Result::Unuse() {
- if (is_register()) {
- CodeGeneratorScope::Current(Isolate::Current())->allocator()->Unuse(reg());
- }
- invalidate();
-}
-
-
-void Result::CopyTo(Result* destination) const {
- destination->value_ = value_;
- if (is_register()) {
- CodeGeneratorScope::Current(Isolate::Current())->allocator()->Use(reg());
- }
-}
-
-
-bool RegisterAllocator::is_used(Register reg) {
- return registers_.is_used(ToNumber(reg));
-}
-
-
-int RegisterAllocator::count(Register reg) {
- return registers_.count(ToNumber(reg));
-}
-
-
-void RegisterAllocator::Use(Register reg) {
- registers_.Use(ToNumber(reg));
-}
-
-
-void RegisterAllocator::Unuse(Register reg) {
- registers_.Unuse(ToNumber(reg));
-}
-
-
-TypeInfo Result::type_info() const {
- ASSERT(is_valid());
- return TypeInfo::FromInt(TypeInfoField::decode(value_));
-}
-
-
-void Result::set_type_info(TypeInfo info) {
- ASSERT(is_valid());
- value_ &= ~TypeInfoField::mask();
- value_ |= TypeInfoField::encode(info.ToInt());
-}
-
-
-bool Result::is_number() const {
- return type_info().IsNumber();
-}
-
-
-bool Result::is_smi() const {
- return type_info().IsSmi();
-}
-
-
-bool Result::is_integer32() const {
- return type_info().IsInteger32();
-}
-
-
-bool Result::is_double() const {
- return type_info().IsDouble();
-}
-
-} } // namespace v8::internal
-
-#endif // V8_REGISTER_ALLOCATOR_INL_H_
diff --git a/src/3rdparty/v8/src/register-allocator.cc b/src/3rdparty/v8/src/register-allocator.cc
deleted file mode 100644
index cb5e35f..0000000
--- a/src/3rdparty/v8/src/register-allocator.cc
+++ /dev/null
@@ -1,98 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "codegen-inl.h"
-#include "register-allocator-inl.h"
-#include "virtual-frame-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// Result implementation.
-
-
-Result::Result(Register reg, TypeInfo info) {
- ASSERT(reg.is_valid() && !RegisterAllocator::IsReserved(reg));
- CodeGeneratorScope::Current(Isolate::Current())->allocator()->Use(reg);
- value_ = TypeField::encode(REGISTER)
- | TypeInfoField::encode(info.ToInt())
- | DataField::encode(reg.code_);
-}
-
-
-// -------------------------------------------------------------------------
-// RegisterAllocator implementation.
-
-
-Result RegisterAllocator::AllocateWithoutSpilling() {
- // Return the first free register, if any.
- int num = registers_.ScanForFreeRegister();
- if (num == RegisterAllocator::kInvalidRegister) {
- return Result();
- }
- return Result(RegisterAllocator::ToRegister(num));
-}
-
-
-Result RegisterAllocator::Allocate() {
- Result result = AllocateWithoutSpilling();
- if (!result.is_valid()) {
- // Ask the current frame to spill a register.
- ASSERT(cgen_->has_valid_frame());
- Register free_reg = cgen_->frame()->SpillAnyRegister();
- if (free_reg.is_valid()) {
- ASSERT(!is_used(free_reg));
- return Result(free_reg);
- }
- }
- return result;
-}
-
-
-Result RegisterAllocator::Allocate(Register target) {
- // If the target is not referenced, it can simply be allocated.
- if (!is_used(RegisterAllocator::ToNumber(target))) {
- return Result(target);
- }
- // If the target is only referenced in the frame, it can be spilled and
- // then allocated.
- ASSERT(cgen_->has_valid_frame());
- if (cgen_->frame()->is_used(RegisterAllocator::ToNumber(target)) &&
- count(target) == 1) {
- cgen_->frame()->Spill(target);
- ASSERT(!is_used(RegisterAllocator::ToNumber(target)));
- return Result(target);
- }
- // Otherwise (if it's referenced outside the frame) we cannot allocate it.
- return Result();
-}
-
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/register-allocator.h b/src/3rdparty/v8/src/register-allocator.h
deleted file mode 100644
index f0ef9c3..0000000
--- a/src/3rdparty/v8/src/register-allocator.h
+++ /dev/null
@@ -1,310 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_REGISTER_ALLOCATOR_H_
-#define V8_REGISTER_ALLOCATOR_H_
-
-#include "macro-assembler.h"
-#include "type-info.h"
-
-#if V8_TARGET_ARCH_IA32
-#include "ia32/register-allocator-ia32.h"
-#elif V8_TARGET_ARCH_X64
-#include "x64/register-allocator-x64.h"
-#elif V8_TARGET_ARCH_ARM
-#include "arm/register-allocator-arm.h"
-#elif V8_TARGET_ARCH_MIPS
-#include "mips/register-allocator-mips.h"
-#else
-#error Unsupported target architecture.
-#endif
-
-namespace v8 {
-namespace internal {
-
-
-// -------------------------------------------------------------------------
-// Results
-//
-// Results encapsulate the compile-time values manipulated by the code
-// generator. They can represent registers or constants.
-
-class Result BASE_EMBEDDED {
- public:
- enum Type {
- INVALID,
- REGISTER,
- CONSTANT
- };
-
- // Construct an invalid result.
- Result() { invalidate(); }
-
- // Construct a register Result.
- explicit Result(Register reg, TypeInfo info = TypeInfo::Unknown());
-
- // Construct a Result whose value is a compile-time constant.
- explicit Result(Handle<Object> value) {
- ZoneObjectList* constant_list = Isolate::Current()->result_constant_list();
- TypeInfo info = TypeInfo::TypeFromValue(value);
- value_ = TypeField::encode(CONSTANT)
- | TypeInfoField::encode(info.ToInt())
- | IsUntaggedInt32Field::encode(false)
- | DataField::encode(constant_list->length());
- constant_list->Add(value);
- }
-
- // The copy constructor and assignment operators could each create a new
- // register reference.
- inline Result(const Result& other);
-
- inline Result& operator=(const Result& other);
-
- inline ~Result();
-
- inline void Unuse();
-
- Type type() const { return TypeField::decode(value_); }
-
- void invalidate() { value_ = TypeField::encode(INVALID); }
-
- inline TypeInfo type_info() const;
- inline void set_type_info(TypeInfo info);
- inline bool is_number() const;
- inline bool is_smi() const;
- inline bool is_integer32() const;
- inline bool is_double() const;
-
- bool is_valid() const { return type() != INVALID; }
- bool is_register() const { return type() == REGISTER; }
- bool is_constant() const { return type() == CONSTANT; }
-
- // An untagged int32 Result contains a signed int32 in a register
- // or as a constant. These are only allowed in a side-effect-free
- // int32 calculation, and if a non-int32 input shows up or an overflow
- // occurs, we bail out and drop all the int32 values. Constants are
- // not converted to int32 until they are loaded into a register.
- bool is_untagged_int32() const {
- return IsUntaggedInt32Field::decode(value_);
- }
- void set_untagged_int32(bool value) {
- value_ &= ~IsUntaggedInt32Field::mask();
- value_ |= IsUntaggedInt32Field::encode(value);
- }
-
- Register reg() const {
- ASSERT(is_register());
- uint32_t reg = DataField::decode(value_);
- Register result;
- result.code_ = reg;
- return result;
- }
-
- Handle<Object> handle() const {
- ASSERT(type() == CONSTANT);
- return Isolate::Current()->result_constant_list()->
- at(DataField::decode(value_));
- }
-
- // Move this result to an arbitrary register. The register is not
- // necessarily spilled from the frame or even singly-referenced outside
- // it.
- void ToRegister();
-
- // Move this result to a specified register. The register is spilled from
- // the frame, and the register is singly-referenced (by this result)
- // outside the frame.
- void ToRegister(Register reg);
-
- private:
- uint32_t value_;
-
- // Declare BitFields with template parameters <type, start, size>.
- class TypeField: public BitField<Type, 0, 2> {};
- class TypeInfoField : public BitField<int, 2, 6> {};
- class IsUntaggedInt32Field : public BitField<bool, 8, 1> {};
- class DataField: public BitField<uint32_t, 9, 32 - 9> {};
-
- inline void CopyTo(Result* destination) const;
-
- friend class CodeGeneratorScope;
-};
-
-
-// -------------------------------------------------------------------------
-// Register file
-//
-// The register file tracks reference counts for the processor registers.
-// It is used by both the register allocator and the virtual frame.
-
-class RegisterFile BASE_EMBEDDED {
- public:
- RegisterFile() { Reset(); }
-
- void Reset() {
- for (int i = 0; i < kNumRegisters; i++) {
- ref_counts_[i] = 0;
- }
- }
-
- // Predicates and accessors for the reference counts.
- bool is_used(int num) {
- ASSERT(0 <= num && num < kNumRegisters);
- return ref_counts_[num] > 0;
- }
-
- int count(int num) {
- ASSERT(0 <= num && num < kNumRegisters);
- return ref_counts_[num];
- }
-
- // Record a use of a register by incrementing its reference count.
- void Use(int num) {
- ASSERT(0 <= num && num < kNumRegisters);
- ref_counts_[num]++;
- }
-
- // Record that a register will no longer be used by decrementing its
- // reference count.
- void Unuse(int num) {
- ASSERT(is_used(num));
- ref_counts_[num]--;
- }
-
- // Copy the reference counts from this register file to the other.
- void CopyTo(RegisterFile* other) {
- for (int i = 0; i < kNumRegisters; i++) {
- other->ref_counts_[i] = ref_counts_[i];
- }
- }
-
- private:
- // C++ doesn't like zero length arrays, so we make the array length 1 even if
- // we don't need it.
- static const int kNumRegisters =
- (RegisterAllocatorConstants::kNumRegisters == 0) ?
- 1 : RegisterAllocatorConstants::kNumRegisters;
-
- int ref_counts_[kNumRegisters];
-
- // Very fast inlined loop to find a free register. Used in
- // RegisterAllocator::AllocateWithoutSpilling. Returns
- // kInvalidRegister if no free register found.
- int ScanForFreeRegister() {
- for (int i = 0; i < RegisterAllocatorConstants::kNumRegisters; i++) {
- if (!is_used(i)) return i;
- }
- return RegisterAllocatorConstants::kInvalidRegister;
- }
-
- friend class RegisterAllocator;
-};
-
-
-// -------------------------------------------------------------------------
-// Register allocator
-//
-
-class RegisterAllocator BASE_EMBEDDED {
- public:
- static const int kNumRegisters =
- RegisterAllocatorConstants::kNumRegisters;
- static const int kInvalidRegister =
- RegisterAllocatorConstants::kInvalidRegister;
-
- explicit RegisterAllocator(CodeGenerator* cgen) : cgen_(cgen) {}
-
- // True if the register is reserved by the code generator, false if it
- // can be freely used by the allocator Defined in the
- // platform-specific XXX-inl.h files..
- static inline bool IsReserved(Register reg);
-
- // Convert between (unreserved) assembler registers and allocator
- // numbers. Defined in the platform-specific XXX-inl.h files.
- static inline int ToNumber(Register reg);
- static inline Register ToRegister(int num);
-
- // Predicates and accessors for the registers' reference counts.
- bool is_used(int num) { return registers_.is_used(num); }
- inline bool is_used(Register reg);
-
- int count(int num) { return registers_.count(num); }
- inline int count(Register reg);
-
- // Explicitly record a reference to a register.
- void Use(int num) { registers_.Use(num); }
- inline void Use(Register reg);
-
- // Explicitly record that a register will no longer be used.
- void Unuse(int num) { registers_.Unuse(num); }
- inline void Unuse(Register reg);
-
- // Reset the register reference counts to free all non-reserved registers.
- void Reset() { registers_.Reset(); }
-
- // Initialize the register allocator for entry to a JS function. On
- // entry, the (non-reserved) registers used by the JS calling
- // convention are referenced and the other (non-reserved) registers
- // are free.
- inline void Initialize();
-
- // Allocate a free register and return a register result if possible or
- // fail and return an invalid result.
- Result Allocate();
-
- // Allocate a specific register if possible, spilling it from the
- // current frame if necessary, or else fail and return an invalid
- // result.
- Result Allocate(Register target);
-
- // Allocate a free register without spilling any from the current
- // frame or fail and return an invalid result.
- Result AllocateWithoutSpilling();
-
- // Allocate a free byte register without spilling any from the current
- // frame or fail and return an invalid result.
- Result AllocateByteRegisterWithoutSpilling();
-
- // Copy the internal state to a register file, to be restored later by
- // RestoreFrom.
- void SaveTo(RegisterFile* register_file) {
- registers_.CopyTo(register_file);
- }
-
- // Restore the internal state.
- void RestoreFrom(RegisterFile* register_file) {
- register_file->CopyTo(&registers_);
- }
-
- private:
- CodeGenerator* cgen_;
- RegisterFile registers_;
-};
-
-} } // namespace v8::internal
-
-#endif // V8_REGISTER_ALLOCATOR_H_
diff --git a/src/3rdparty/v8/src/rewriter.cc b/src/3rdparty/v8/src/rewriter.cc
deleted file mode 100644
index 780314d..0000000
--- a/src/3rdparty/v8/src/rewriter.cc
+++ /dev/null
@@ -1,1024 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "rewriter.h"
-
-#include "ast.h"
-#include "compiler.h"
-#include "scopes.h"
-
-namespace v8 {
-namespace internal {
-
-class AstOptimizer: public AstVisitor {
- public:
- explicit AstOptimizer() : has_function_literal_(false) {}
-
- void Optimize(ZoneList<Statement*>* statements);
-
- private:
- // Used for loop condition analysis. Cleared before visiting a loop
- // condition, set when a function literal is visited.
- bool has_function_literal_;
-
- // Helpers
- void OptimizeArguments(ZoneList<Expression*>* arguments);
-
- // Node visitors.
-#define DEF_VISIT(type) \
- virtual void Visit##type(type* node);
- AST_NODE_LIST(DEF_VISIT)
-#undef DEF_VISIT
-
- DISALLOW_COPY_AND_ASSIGN(AstOptimizer);
-};
-
-
-void AstOptimizer::Optimize(ZoneList<Statement*>* statements) {
- int len = statements->length();
- for (int i = 0; i < len; i++) {
- Visit(statements->at(i));
- }
-}
-
-
-void AstOptimizer::OptimizeArguments(ZoneList<Expression*>* arguments) {
- for (int i = 0; i < arguments->length(); i++) {
- Visit(arguments->at(i));
- }
-}
-
-
-void AstOptimizer::VisitBlock(Block* node) {
- Optimize(node->statements());
-}
-
-
-void AstOptimizer::VisitExpressionStatement(ExpressionStatement* node) {
- node->expression()->set_no_negative_zero(true);
- Visit(node->expression());
-}
-
-
-void AstOptimizer::VisitIfStatement(IfStatement* node) {
- node->condition()->set_no_negative_zero(true);
- Visit(node->condition());
- Visit(node->then_statement());
- if (node->HasElseStatement()) {
- Visit(node->else_statement());
- }
-}
-
-
-void AstOptimizer::VisitDoWhileStatement(DoWhileStatement* node) {
- node->cond()->set_no_negative_zero(true);
- Visit(node->cond());
- Visit(node->body());
-}
-
-
-void AstOptimizer::VisitWhileStatement(WhileStatement* node) {
- has_function_literal_ = false;
- node->cond()->set_no_negative_zero(true);
- Visit(node->cond());
- node->set_may_have_function_literal(has_function_literal_);
- Visit(node->body());
-}
-
-
-void AstOptimizer::VisitForStatement(ForStatement* node) {
- if (node->init() != NULL) {
- Visit(node->init());
- }
- if (node->cond() != NULL) {
- has_function_literal_ = false;
- node->cond()->set_no_negative_zero(true);
- Visit(node->cond());
- node->set_may_have_function_literal(has_function_literal_);
- }
- Visit(node->body());
- if (node->next() != NULL) {
- Visit(node->next());
- }
-}
-
-
-void AstOptimizer::VisitForInStatement(ForInStatement* node) {
- Visit(node->each());
- Visit(node->enumerable());
- Visit(node->body());
-}
-
-
-void AstOptimizer::VisitTryCatchStatement(TryCatchStatement* node) {
- Visit(node->try_block());
- Visit(node->catch_var());
- Visit(node->catch_block());
-}
-
-
-void AstOptimizer::VisitTryFinallyStatement(TryFinallyStatement* node) {
- Visit(node->try_block());
- Visit(node->finally_block());
-}
-
-
-void AstOptimizer::VisitSwitchStatement(SwitchStatement* node) {
- node->tag()->set_no_negative_zero(true);
- Visit(node->tag());
- for (int i = 0; i < node->cases()->length(); i++) {
- CaseClause* clause = node->cases()->at(i);
- if (!clause->is_default()) {
- Visit(clause->label());
- }
- Optimize(clause->statements());
- }
-}
-
-
-void AstOptimizer::VisitContinueStatement(ContinueStatement* node) {
- USE(node);
-}
-
-
-void AstOptimizer::VisitBreakStatement(BreakStatement* node) {
- USE(node);
-}
-
-
-void AstOptimizer::VisitDeclaration(Declaration* node) {
- // Will not be reached by the current optimizations.
- USE(node);
-}
-
-
-void AstOptimizer::VisitEmptyStatement(EmptyStatement* node) {
- USE(node);
-}
-
-
-void AstOptimizer::VisitReturnStatement(ReturnStatement* node) {
- Visit(node->expression());
-}
-
-
-void AstOptimizer::VisitWithEnterStatement(WithEnterStatement* node) {
- Visit(node->expression());
-}
-
-
-void AstOptimizer::VisitWithExitStatement(WithExitStatement* node) {
- USE(node);
-}
-
-
-void AstOptimizer::VisitDebuggerStatement(DebuggerStatement* node) {
- USE(node);
-}
-
-
-void AstOptimizer::VisitFunctionLiteral(FunctionLiteral* node) {
- has_function_literal_ = true;
-}
-
-
-void AstOptimizer::VisitSharedFunctionInfoLiteral(
- SharedFunctionInfoLiteral* node) {
- USE(node);
-}
-
-
-void AstOptimizer::VisitConditional(Conditional* node) {
- node->condition()->set_no_negative_zero(true);
- Visit(node->condition());
- Visit(node->then_expression());
- Visit(node->else_expression());
-}
-
-
-void AstOptimizer::VisitVariableProxy(VariableProxy* node) {
- Variable* var = node->AsVariable();
- if (var != NULL) {
- if (var->type()->IsKnown()) {
- node->type()->CopyFrom(var->type());
- } else if (node->type()->IsLikelySmi()) {
- var->type()->SetAsLikelySmi();
- }
-
- if (FLAG_safe_int32_compiler) {
- if (var->IsStackAllocated() &&
- !var->is_arguments() &&
- var->mode() != Variable::CONST) {
- node->set_side_effect_free(true);
- }
- }
- }
-}
-
-
-void AstOptimizer::VisitLiteral(Literal* node) {
- Handle<Object> literal = node->handle();
- if (literal->IsSmi()) {
- node->type()->SetAsLikelySmi();
- node->set_side_effect_free(true);
- } else if (literal->IsHeapNumber()) {
- if (node->to_int32()) {
- // Any HeapNumber has an int32 value if it is the input to a bit op.
- node->set_side_effect_free(true);
- } else {
- double double_value = HeapNumber::cast(*literal)->value();
- int32_t int32_value = DoubleToInt32(double_value);
- node->set_side_effect_free(double_value == int32_value);
- }
- }
-}
-
-
-void AstOptimizer::VisitRegExpLiteral(RegExpLiteral* node) {
- USE(node);
-}
-
-
-void AstOptimizer::VisitArrayLiteral(ArrayLiteral* node) {
- for (int i = 0; i < node->values()->length(); i++) {
- Visit(node->values()->at(i));
- }
-}
-
-void AstOptimizer::VisitObjectLiteral(ObjectLiteral* node) {
- for (int i = 0; i < node->properties()->length(); i++) {
- Visit(node->properties()->at(i)->key());
- Visit(node->properties()->at(i)->value());
- }
-}
-
-
-void AstOptimizer::VisitCatchExtensionObject(CatchExtensionObject* node) {
- Visit(node->key());
- Visit(node->value());
-}
-
-
-void AstOptimizer::VisitAssignment(Assignment* node) {
- switch (node->op()) {
- case Token::INIT_VAR:
- case Token::INIT_CONST:
- case Token::ASSIGN:
- // No type can be infered from the general assignment.
- break;
- case Token::ASSIGN_BIT_OR:
- case Token::ASSIGN_BIT_XOR:
- case Token::ASSIGN_BIT_AND:
- case Token::ASSIGN_SHL:
- case Token::ASSIGN_SAR:
- case Token::ASSIGN_SHR:
- node->type()->SetAsLikelySmiIfUnknown();
- node->target()->type()->SetAsLikelySmiIfUnknown();
- node->value()->type()->SetAsLikelySmiIfUnknown();
- node->value()->set_to_int32(true);
- node->value()->set_no_negative_zero(true);
- break;
- case Token::ASSIGN_ADD:
- case Token::ASSIGN_SUB:
- case Token::ASSIGN_MUL:
- case Token::ASSIGN_DIV:
- case Token::ASSIGN_MOD:
- if (node->type()->IsLikelySmi()) {
- node->target()->type()->SetAsLikelySmiIfUnknown();
- node->value()->type()->SetAsLikelySmiIfUnknown();
- }
- break;
- default:
- UNREACHABLE();
- break;
- }
-
- Visit(node->target());
- Visit(node->value());
-
- switch (node->op()) {
- case Token::INIT_VAR:
- case Token::INIT_CONST:
- case Token::ASSIGN:
- // Pure assignment copies the type from the value.
- node->type()->CopyFrom(node->value()->type());
- break;
- case Token::ASSIGN_BIT_OR:
- case Token::ASSIGN_BIT_XOR:
- case Token::ASSIGN_BIT_AND:
- case Token::ASSIGN_SHL:
- case Token::ASSIGN_SAR:
- case Token::ASSIGN_SHR:
- // Should have been setup above already.
- break;
- case Token::ASSIGN_ADD:
- case Token::ASSIGN_SUB:
- case Token::ASSIGN_MUL:
- case Token::ASSIGN_DIV:
- case Token::ASSIGN_MOD:
- if (node->type()->IsUnknown()) {
- if (node->target()->type()->IsLikelySmi() ||
- node->value()->type()->IsLikelySmi()) {
- node->type()->SetAsLikelySmi();
- }
- }
- break;
- default:
- UNREACHABLE();
- break;
- }
-
- // Since this is an assignment. We have to propagate this node's type to the
- // variable.
- VariableProxy* proxy = node->target()->AsVariableProxy();
- if (proxy != NULL) {
- Variable* var = proxy->AsVariable();
- if (var != NULL) {
- StaticType* var_type = var->type();
- if (var_type->IsUnknown()) {
- var_type->CopyFrom(node->type());
- } else if (var_type->IsLikelySmi()) {
- // We do not reset likely types to Unknown.
- }
- }
- }
-}
-
-
-void AstOptimizer::VisitThrow(Throw* node) {
- Visit(node->exception());
-}
-
-
-void AstOptimizer::VisitProperty(Property* node) {
- node->key()->set_no_negative_zero(true);
- Visit(node->obj());
- Visit(node->key());
-}
-
-
-void AstOptimizer::VisitCall(Call* node) {
- Visit(node->expression());
- OptimizeArguments(node->arguments());
-}
-
-
-void AstOptimizer::VisitCallNew(CallNew* node) {
- Visit(node->expression());
- OptimizeArguments(node->arguments());
-}
-
-
-void AstOptimizer::VisitCallRuntime(CallRuntime* node) {
- OptimizeArguments(node->arguments());
-}
-
-
-void AstOptimizer::VisitUnaryOperation(UnaryOperation* node) {
- if (node->op() == Token::ADD || node->op() == Token::SUB) {
- node->expression()->set_no_negative_zero(node->no_negative_zero());
- } else {
- node->expression()->set_no_negative_zero(true);
- }
- Visit(node->expression());
- if (FLAG_safe_int32_compiler) {
- switch (node->op()) {
- case Token::BIT_NOT:
- node->expression()->set_no_negative_zero(true);
- node->expression()->set_to_int32(true);
- // Fall through.
- case Token::ADD:
- case Token::SUB:
- node->set_side_effect_free(node->expression()->side_effect_free());
- break;
- case Token::NOT:
- case Token::DELETE:
- case Token::TYPEOF:
- case Token::VOID:
- break;
- default:
- UNREACHABLE();
- break;
- }
- } else if (node->op() == Token::BIT_NOT) {
- node->expression()->set_to_int32(true);
- }
-}
-
-
-void AstOptimizer::VisitIncrementOperation(IncrementOperation* node) {
- UNREACHABLE();
-}
-
-
-void AstOptimizer::VisitCountOperation(CountOperation* node) {
- // Count operations assume that they work on Smis.
- node->expression()->set_no_negative_zero(node->is_prefix() ?
- true :
- node->no_negative_zero());
- node->type()->SetAsLikelySmiIfUnknown();
- node->expression()->type()->SetAsLikelySmiIfUnknown();
- Visit(node->expression());
-}
-
-
-static bool CouldBeNegativeZero(AstNode* node) {
- Literal* literal = node->AsLiteral();
- if (literal != NULL) {
- Handle<Object> handle = literal->handle();
- if (handle->IsString() || handle->IsSmi()) {
- return false;
- } else if (handle->IsHeapNumber()) {
- double double_value = HeapNumber::cast(*handle)->value();
- if (double_value != 0) {
- return false;
- }
- }
- }
- BinaryOperation* binary = node->AsBinaryOperation();
- if (binary != NULL && Token::IsBitOp(binary->op())) {
- return false;
- }
- return true;
-}
-
-
-static bool CouldBePositiveZero(AstNode* node) {
- Literal* literal = node->AsLiteral();
- if (literal != NULL) {
- Handle<Object> handle = literal->handle();
- if (handle->IsSmi()) {
- if (Smi::cast(*handle) != Smi::FromInt(0)) {
- return false;
- }
- } else if (handle->IsHeapNumber()) {
- // Heap number literal can't be +0, because that's a Smi.
- return false;
- }
- }
- return true;
-}
-
-
-void AstOptimizer::VisitBinaryOperation(BinaryOperation* node) {
- // Depending on the operation we can propagate this node's type down the
- // AST nodes.
- Token::Value op = node->op();
- switch (op) {
- case Token::COMMA:
- case Token::OR:
- node->left()->set_no_negative_zero(true);
- node->right()->set_no_negative_zero(node->no_negative_zero());
- break;
- case Token::AND:
- node->left()->set_no_negative_zero(node->no_negative_zero());
- node->right()->set_no_negative_zero(node->no_negative_zero());
- break;
- case Token::BIT_OR:
- case Token::BIT_XOR:
- case Token::BIT_AND:
- case Token::SHL:
- case Token::SAR:
- case Token::SHR:
- node->type()->SetAsLikelySmiIfUnknown();
- node->left()->type()->SetAsLikelySmiIfUnknown();
- node->right()->type()->SetAsLikelySmiIfUnknown();
- node->left()->set_to_int32(true);
- node->right()->set_to_int32(true);
- node->left()->set_no_negative_zero(true);
- node->right()->set_no_negative_zero(true);
- break;
- case Token::MUL: {
- VariableProxy* lvar_proxy = node->left()->AsVariableProxy();
- VariableProxy* rvar_proxy = node->right()->AsVariableProxy();
- if (lvar_proxy != NULL && rvar_proxy != NULL) {
- Variable* lvar = lvar_proxy->AsVariable();
- Variable* rvar = rvar_proxy->AsVariable();
- if (lvar != NULL && rvar != NULL) {
- if (lvar->mode() == Variable::VAR && rvar->mode() == Variable::VAR) {
- Slot* lslot = lvar->AsSlot();
- Slot* rslot = rvar->AsSlot();
- if (lslot->type() == rslot->type() &&
- (lslot->type() == Slot::PARAMETER ||
- lslot->type() == Slot::LOCAL) &&
- lslot->index() == rslot->index()) {
- // A number squared doesn't give negative zero.
- node->set_no_negative_zero(true);
- }
- }
- }
- }
- }
- case Token::ADD:
- case Token::SUB:
- case Token::DIV:
- case Token::MOD: {
- if (node->type()->IsLikelySmi()) {
- node->left()->type()->SetAsLikelySmiIfUnknown();
- node->right()->type()->SetAsLikelySmiIfUnknown();
- }
- if (op == Token::ADD && (!CouldBeNegativeZero(node->left()) ||
- !CouldBeNegativeZero(node->right()))) {
- node->left()->set_no_negative_zero(true);
- node->right()->set_no_negative_zero(true);
- } else if (op == Token::SUB && (!CouldBeNegativeZero(node->left()) ||
- !CouldBePositiveZero(node->right()))) {
- node->left()->set_no_negative_zero(true);
- node->right()->set_no_negative_zero(true);
- } else {
- node->left()->set_no_negative_zero(node->no_negative_zero());
- node->right()->set_no_negative_zero(node->no_negative_zero());
- }
- if (node->op() == Token::DIV) {
- node->right()->set_no_negative_zero(false);
- } else if (node->op() == Token::MOD) {
- node->right()->set_no_negative_zero(true);
- }
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
-
- Visit(node->left());
- Visit(node->right());
-
- // After visiting the operand nodes we have to check if this node's type
- // can be updated. If it does, then we can push that information down
- // towards the leaves again if the new information is an upgrade over the
- // previous type of the operand nodes.
- if (node->type()->IsUnknown()) {
- if (node->left()->type()->IsLikelySmi() ||
- node->right()->type()->IsLikelySmi()) {
- node->type()->SetAsLikelySmi();
- }
- if (node->type()->IsLikelySmi()) {
- // The type of this node changed to LIKELY_SMI. Propagate this knowledge
- // down through the nodes.
- if (node->left()->type()->IsUnknown()) {
- node->left()->type()->SetAsLikelySmi();
- Visit(node->left());
- }
- if (node->right()->type()->IsUnknown()) {
- node->right()->type()->SetAsLikelySmi();
- Visit(node->right());
- }
- }
- }
-
- if (FLAG_safe_int32_compiler) {
- switch (node->op()) {
- case Token::COMMA:
- case Token::OR:
- case Token::AND:
- break;
- case Token::BIT_OR:
- case Token::BIT_XOR:
- case Token::BIT_AND:
- case Token::SHL:
- case Token::SAR:
- case Token::SHR:
- // Add one to the number of bit operations in this expression.
- node->set_num_bit_ops(1);
- // Fall through.
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- case Token::MOD:
- node->set_side_effect_free(node->left()->side_effect_free() &&
- node->right()->side_effect_free());
- node->set_num_bit_ops(node->num_bit_ops() +
- node->left()->num_bit_ops() +
- node->right()->num_bit_ops());
- if (!node->no_negative_zero() && node->op() == Token::MUL) {
- node->set_side_effect_free(false);
- }
- break;
- default:
- UNREACHABLE();
- break;
- }
- }
-}
-
-
-void AstOptimizer::VisitCompareOperation(CompareOperation* node) {
- if (node->type()->IsKnown()) {
- // Propagate useful information down towards the leaves.
- node->left()->type()->SetAsLikelySmiIfUnknown();
- node->right()->type()->SetAsLikelySmiIfUnknown();
- }
-
- node->left()->set_no_negative_zero(true);
- // Only [[HasInstance]] has the right argument passed unchanged to it.
- node->right()->set_no_negative_zero(true);
-
- Visit(node->left());
- Visit(node->right());
-
- // After visiting the operand nodes we have to check if this node's type
- // can be updated. If it does, then we can push that information down
- // towards the leaves again if the new information is an upgrade over the
- // previous type of the operand nodes.
- if (node->type()->IsUnknown()) {
- if (node->left()->type()->IsLikelySmi() ||
- node->right()->type()->IsLikelySmi()) {
- node->type()->SetAsLikelySmi();
- }
- if (node->type()->IsLikelySmi()) {
- // The type of this node changed to LIKELY_SMI. Propagate this knowledge
- // down through the nodes.
- if (node->left()->type()->IsUnknown()) {
- node->left()->type()->SetAsLikelySmi();
- Visit(node->left());
- }
- if (node->right()->type()->IsUnknown()) {
- node->right()->type()->SetAsLikelySmi();
- Visit(node->right());
- }
- }
- }
-}
-
-
-void AstOptimizer::VisitCompareToNull(CompareToNull* node) {
- Visit(node->expression());
-}
-
-
-void AstOptimizer::VisitThisFunction(ThisFunction* node) {
- USE(node);
-}
-
-
-class Processor: public AstVisitor {
- public:
- explicit Processor(Variable* result)
- : result_(result),
- result_assigned_(false),
- is_set_(false),
- in_try_(false) {
- }
-
- void Process(ZoneList<Statement*>* statements);
- bool result_assigned() const { return result_assigned_; }
-
- private:
- Variable* result_;
-
- // We are not tracking result usage via the result_'s use
- // counts (we leave the accurate computation to the
- // usage analyzer). Instead we simple remember if
- // there was ever an assignment to result_.
- bool result_assigned_;
-
- // To avoid storing to .result all the time, we eliminate some of
- // the stores by keeping track of whether or not we're sure .result
- // will be overwritten anyway. This is a bit more tricky than what I
- // was hoping for
- bool is_set_;
- bool in_try_;
-
- Expression* SetResult(Expression* value) {
- result_assigned_ = true;
- VariableProxy* result_proxy = new VariableProxy(result_);
- return new Assignment(Token::ASSIGN, result_proxy, value,
- RelocInfo::kNoPosition);
- }
-
- // Node visitors.
-#define DEF_VISIT(type) \
- virtual void Visit##type(type* node);
- AST_NODE_LIST(DEF_VISIT)
-#undef DEF_VISIT
-
- void VisitIterationStatement(IterationStatement* stmt);
-};
-
-
-void Processor::Process(ZoneList<Statement*>* statements) {
- for (int i = statements->length() - 1; i >= 0; --i) {
- Visit(statements->at(i));
- }
-}
-
-
-void Processor::VisitBlock(Block* node) {
- // An initializer block is the rewritten form of a variable declaration
- // with initialization expressions. The initializer block contains the
- // list of assignments corresponding to the initialization expressions.
- // While unclear from the spec (ECMA-262, 3rd., 12.2), the value of
- // a variable declaration with initialization expression is 'undefined'
- // with some JS VMs: For instance, using smjs, print(eval('var x = 7'))
- // returns 'undefined'. To obtain the same behavior with v8, we need
- // to prevent rewriting in that case.
- if (!node->is_initializer_block()) Process(node->statements());
-}
-
-
-void Processor::VisitExpressionStatement(ExpressionStatement* node) {
- // Rewrite : <x>; -> .result = <x>;
- if (!is_set_) {
- node->set_expression(SetResult(node->expression()));
- if (!in_try_) is_set_ = true;
- }
-}
-
-
-void Processor::VisitIfStatement(IfStatement* node) {
- // Rewrite both then and else parts (reversed).
- bool save = is_set_;
- Visit(node->else_statement());
- bool set_after_then = is_set_;
- is_set_ = save;
- Visit(node->then_statement());
- is_set_ = is_set_ && set_after_then;
-}
-
-
-void Processor::VisitIterationStatement(IterationStatement* node) {
- // Rewrite the body.
- bool set_after_loop = is_set_;
- Visit(node->body());
- is_set_ = is_set_ && set_after_loop;
-}
-
-
-void Processor::VisitDoWhileStatement(DoWhileStatement* node) {
- VisitIterationStatement(node);
-}
-
-
-void Processor::VisitWhileStatement(WhileStatement* node) {
- VisitIterationStatement(node);
-}
-
-
-void Processor::VisitForStatement(ForStatement* node) {
- VisitIterationStatement(node);
-}
-
-
-void Processor::VisitForInStatement(ForInStatement* node) {
- VisitIterationStatement(node);
-}
-
-
-void Processor::VisitTryCatchStatement(TryCatchStatement* node) {
- // Rewrite both try and catch blocks (reversed order).
- bool set_after_catch = is_set_;
- Visit(node->catch_block());
- is_set_ = is_set_ && set_after_catch;
- bool save = in_try_;
- in_try_ = true;
- Visit(node->try_block());
- in_try_ = save;
-}
-
-
-void Processor::VisitTryFinallyStatement(TryFinallyStatement* node) {
- // Rewrite both try and finally block (reversed order).
- Visit(node->finally_block());
- bool save = in_try_;
- in_try_ = true;
- Visit(node->try_block());
- in_try_ = save;
-}
-
-
-void Processor::VisitSwitchStatement(SwitchStatement* node) {
- // Rewrite statements in all case clauses in reversed order.
- ZoneList<CaseClause*>* clauses = node->cases();
- bool set_after_switch = is_set_;
- for (int i = clauses->length() - 1; i >= 0; --i) {
- CaseClause* clause = clauses->at(i);
- Process(clause->statements());
- }
- is_set_ = is_set_ && set_after_switch;
-}
-
-
-void Processor::VisitContinueStatement(ContinueStatement* node) {
- is_set_ = false;
-}
-
-
-void Processor::VisitBreakStatement(BreakStatement* node) {
- is_set_ = false;
-}
-
-
-// Do nothing:
-void Processor::VisitDeclaration(Declaration* node) {}
-void Processor::VisitEmptyStatement(EmptyStatement* node) {}
-void Processor::VisitReturnStatement(ReturnStatement* node) {}
-void Processor::VisitWithEnterStatement(WithEnterStatement* node) {}
-void Processor::VisitWithExitStatement(WithExitStatement* node) {}
-void Processor::VisitDebuggerStatement(DebuggerStatement* node) {}
-
-
-// Expressions are never visited yet.
-void Processor::VisitFunctionLiteral(FunctionLiteral* node) {
- USE(node);
- UNREACHABLE();
-}
-
-
-void Processor::VisitSharedFunctionInfoLiteral(
- SharedFunctionInfoLiteral* node) {
- USE(node);
- UNREACHABLE();
-}
-
-
-void Processor::VisitConditional(Conditional* node) {
- USE(node);
- UNREACHABLE();
-}
-
-
-void Processor::VisitVariableProxy(VariableProxy* node) {
- USE(node);
- UNREACHABLE();
-}
-
-
-void Processor::VisitLiteral(Literal* node) {
- USE(node);
- UNREACHABLE();
-}
-
-
-void Processor::VisitRegExpLiteral(RegExpLiteral* node) {
- USE(node);
- UNREACHABLE();
-}
-
-
-void Processor::VisitArrayLiteral(ArrayLiteral* node) {
- USE(node);
- UNREACHABLE();
-}
-
-
-void Processor::VisitObjectLiteral(ObjectLiteral* node) {
- USE(node);
- UNREACHABLE();
-}
-
-
-void Processor::VisitCatchExtensionObject(CatchExtensionObject* node) {
- USE(node);
- UNREACHABLE();
-}
-
-
-void Processor::VisitAssignment(Assignment* node) {
- USE(node);
- UNREACHABLE();
-}
-
-
-void Processor::VisitThrow(Throw* node) {
- USE(node);
- UNREACHABLE();
-}
-
-
-void Processor::VisitProperty(Property* node) {
- USE(node);
- UNREACHABLE();
-}
-
-
-void Processor::VisitCall(Call* node) {
- USE(node);
- UNREACHABLE();
-}
-
-
-void Processor::VisitCallNew(CallNew* node) {
- USE(node);
- UNREACHABLE();
-}
-
-
-void Processor::VisitCallRuntime(CallRuntime* node) {
- USE(node);
- UNREACHABLE();
-}
-
-
-void Processor::VisitUnaryOperation(UnaryOperation* node) {
- USE(node);
- UNREACHABLE();
-}
-
-
-void Processor::VisitIncrementOperation(IncrementOperation* node) {
- UNREACHABLE();
-}
-
-
-void Processor::VisitCountOperation(CountOperation* node) {
- USE(node);
- UNREACHABLE();
-}
-
-
-void Processor::VisitBinaryOperation(BinaryOperation* node) {
- USE(node);
- UNREACHABLE();
-}
-
-
-void Processor::VisitCompareOperation(CompareOperation* node) {
- USE(node);
- UNREACHABLE();
-}
-
-
-void Processor::VisitCompareToNull(CompareToNull* node) {
- USE(node);
- UNREACHABLE();
-}
-
-
-void Processor::VisitThisFunction(ThisFunction* node) {
- USE(node);
- UNREACHABLE();
-}
-
-
-// Assumes code has been parsed and scopes have been analyzed. Mutates the
-// AST, so the AST should not continue to be used in the case of failure.
-bool Rewriter::Rewrite(CompilationInfo* info) {
- FunctionLiteral* function = info->function();
- ASSERT(function != NULL);
- Scope* scope = function->scope();
- ASSERT(scope != NULL);
- if (scope->is_function_scope()) return true;
-
- ZoneList<Statement*>* body = function->body();
- if (!body->is_empty()) {
- Variable* result = scope->NewTemporary(
- info->isolate()->factory()->result_symbol());
- Processor processor(result);
- processor.Process(body);
- if (processor.HasStackOverflow()) return false;
-
- if (processor.result_assigned()) {
- VariableProxy* result_proxy = new VariableProxy(result);
- body->Add(new ReturnStatement(result_proxy));
- }
- }
-
- return true;
-}
-
-
-// Assumes code has been parsed and scopes have been analyzed. Mutates the
-// AST, so the AST should not continue to be used in the case of failure.
-bool Rewriter::Analyze(CompilationInfo* info) {
- FunctionLiteral* function = info->function();
- ASSERT(function != NULL && function->scope() != NULL);
-
- ZoneList<Statement*>* body = function->body();
- if (FLAG_optimize_ast && !body->is_empty()) {
- AstOptimizer optimizer;
- optimizer.Optimize(body);
- if (optimizer.HasStackOverflow()) return false;
- }
- return true;
-}
-
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/rewriter.h b/src/3rdparty/v8/src/rewriter.h
deleted file mode 100644
index 62e1b7f..0000000
--- a/src/3rdparty/v8/src/rewriter.h
+++ /dev/null
@@ -1,59 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_REWRITER_H_
-#define V8_REWRITER_H_
-
-namespace v8 {
-namespace internal {
-
-class CompilationInfo;
-
-class Rewriter {
- public:
- // Rewrite top-level code (ECMA 262 "programs") so as to conservatively
- // include an assignment of the value of the last statement in the code to
- // a compiler-generated temporary variable wherever needed.
- //
- // Assumes code has been parsed and scopes have been analyzed. Mutates the
- // AST, so the AST should not continue to be used in the case of failure.
- static bool Rewrite(CompilationInfo* info);
-
- // Perform a suite of simple non-iterative analyses of the AST. Mark
- // expressions that are likely smis, expressions without side effects,
- // expressions whose value will be converted to Int32, and expressions in a
- // context where +0 and -0 are treated the same.
- //
- // Assumes code has been parsed and scopes have been analyzed. Mutates the
- // AST, so the AST should not continue to be used in the case of failure.
- static bool Analyze(CompilationInfo* info);
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_REWRITER_H_
diff --git a/src/3rdparty/v8/src/runtime-profiler.cc b/src/3rdparty/v8/src/runtime-profiler.cc
deleted file mode 100644
index 28755e3..0000000
--- a/src/3rdparty/v8/src/runtime-profiler.cc
+++ /dev/null
@@ -1,478 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "runtime-profiler.h"
-
-#include "assembler.h"
-#include "code-stubs.h"
-#include "compilation-cache.h"
-#include "deoptimizer.h"
-#include "execution.h"
-#include "global-handles.h"
-#include "mark-compact.h"
-#include "platform.h"
-#include "scopeinfo.h"
-
-namespace v8 {
-namespace internal {
-
-
-class PendingListNode : public Malloced {
- public:
- explicit PendingListNode(JSFunction* function);
- ~PendingListNode() { Destroy(); }
-
- PendingListNode* next() const { return next_; }
- void set_next(PendingListNode* node) { next_ = node; }
- Handle<JSFunction> function() { return Handle<JSFunction>::cast(function_); }
-
- // If the function is garbage collected before we've had the chance
- // to optimize it the weak handle will be null.
- bool IsValid() { return !function_.is_null(); }
-
- // Returns the number of microseconds this node has been pending.
- int Delay() const { return static_cast<int>(OS::Ticks() - start_); }
-
- private:
- void Destroy();
- static void WeakCallback(v8::Persistent<v8::Value> object, void* data);
-
- PendingListNode* next_;
- Handle<Object> function_; // Weak handle.
- int64_t start_;
-};
-
-
-// Optimization sampler constants.
-static const int kSamplerFrameCount = 2;
-static const int kSamplerFrameWeight[kSamplerFrameCount] = { 2, 1 };
-
-static const int kSamplerTicksBetweenThresholdAdjustment = 32;
-
-static const int kSamplerThresholdInit = 3;
-static const int kSamplerThresholdMin = 1;
-static const int kSamplerThresholdDelta = 1;
-
-static const int kSamplerThresholdSizeFactorInit = 3;
-static const int kSamplerThresholdSizeFactorMin = 1;
-static const int kSamplerThresholdSizeFactorDelta = 1;
-
-static const int kSizeLimit = 1500;
-
-
-PendingListNode::PendingListNode(JSFunction* function) : next_(NULL) {
- GlobalHandles* global_handles = Isolate::Current()->global_handles();
- function_ = global_handles->Create(function);
- start_ = OS::Ticks();
- global_handles->MakeWeak(function_.location(), this, &WeakCallback);
-}
-
-
-void PendingListNode::Destroy() {
- if (!IsValid()) return;
- GlobalHandles* global_handles = Isolate::Current()->global_handles();
- global_handles->Destroy(function_.location());
- function_= Handle<Object>::null();
-}
-
-
-void PendingListNode::WeakCallback(v8::Persistent<v8::Value>, void* data) {
- reinterpret_cast<PendingListNode*>(data)->Destroy();
-}
-
-
-static bool IsOptimizable(JSFunction* function) {
- Code* code = function->code();
- return code->kind() == Code::FUNCTION && code->optimizable();
-}
-
-
-Atomic32 RuntimeProfiler::state_ = 0;
-// TODO(isolates): Create the semaphore lazily and clean it up when no
-// longer required.
-#ifdef ENABLE_LOGGING_AND_PROFILING
-Semaphore* RuntimeProfiler::semaphore_ = OS::CreateSemaphore(0);
-#endif
-
-
-RuntimeProfiler::RuntimeProfiler(Isolate* isolate)
- : isolate_(isolate),
- sampler_threshold_(kSamplerThresholdInit),
- sampler_threshold_size_factor_(kSamplerThresholdSizeFactorInit),
- sampler_ticks_until_threshold_adjustment_(
- kSamplerTicksBetweenThresholdAdjustment),
- js_ratio_(0),
- sampler_window_position_(0),
- optimize_soon_list_(NULL),
- state_window_position_(0) {
- state_counts_[0] = kStateWindowSize;
- state_counts_[1] = 0;
- memset(state_window_, 0, sizeof(state_window_));
- ClearSampleBuffer();
-}
-
-
-bool RuntimeProfiler::IsEnabled() {
- return V8::UseCrankshaft() && FLAG_opt;
-}
-
-
-void RuntimeProfiler::Optimize(JSFunction* function, bool eager, int delay) {
- ASSERT(IsOptimizable(function));
- if (FLAG_trace_opt) {
- PrintF("[marking (%s) ", eager ? "eagerly" : "lazily");
- function->PrintName();
- PrintF(" for recompilation");
- if (delay > 0) {
- PrintF(" (delayed %0.3f ms)", static_cast<double>(delay) / 1000);
- }
- PrintF("]\n");
- }
-
- // The next call to the function will trigger optimization.
- function->MarkForLazyRecompilation();
-}
-
-
-void RuntimeProfiler::AttemptOnStackReplacement(JSFunction* function) {
- // See AlwaysFullCompiler (in compiler.cc) comment on why we need
- // Debug::has_break_points().
- ASSERT(function->IsMarkedForLazyRecompilation());
- if (!FLAG_use_osr ||
- isolate_->debug()->has_break_points() ||
- function->IsBuiltin()) {
- return;
- }
-
- SharedFunctionInfo* shared = function->shared();
- // If the code is not optimizable or references context slots, don't try OSR.
- if (!shared->code()->optimizable() || !shared->allows_lazy_compilation()) {
- return;
- }
-
- // We are not prepared to do OSR for a function that already has an
- // allocated arguments object. The optimized code would bypass it for
- // arguments accesses, which is unsound. Don't try OSR.
- if (shared->scope_info()->HasArgumentsShadow()) return;
-
- // We're using on-stack replacement: patch the unoptimized code so that
- // any back edge in any unoptimized frame will trigger on-stack
- // replacement for that frame.
- if (FLAG_trace_osr) {
- PrintF("[patching stack checks in ");
- function->PrintName();
- PrintF(" for on-stack replacement]\n");
- }
-
- // Get the stack check stub code object to match against. We aren't
- // prepared to generate it, but we don't expect to have to.
- StackCheckStub check_stub;
- Object* check_code;
- MaybeObject* maybe_check_code = check_stub.TryGetCode();
- if (maybe_check_code->ToObject(&check_code)) {
- Code* replacement_code =
- isolate_->builtins()->builtin(Builtins::kOnStackReplacement);
- Code* unoptimized_code = shared->code();
- Deoptimizer::PatchStackCheckCode(unoptimized_code,
- Code::cast(check_code),
- replacement_code);
- }
-}
-
-
-void RuntimeProfiler::ClearSampleBuffer() {
- memset(sampler_window_, 0, sizeof(sampler_window_));
- memset(sampler_window_weight_, 0, sizeof(sampler_window_weight_));
-}
-
-
-int RuntimeProfiler::LookupSample(JSFunction* function) {
- int weight = 0;
- for (int i = 0; i < kSamplerWindowSize; i++) {
- Object* sample = sampler_window_[i];
- if (sample != NULL) {
- if (function == sample) {
- weight += sampler_window_weight_[i];
- }
- }
- }
- return weight;
-}
-
-
-void RuntimeProfiler::AddSample(JSFunction* function, int weight) {
- ASSERT(IsPowerOf2(kSamplerWindowSize));
- sampler_window_[sampler_window_position_] = function;
- sampler_window_weight_[sampler_window_position_] = weight;
- sampler_window_position_ = (sampler_window_position_ + 1) &
- (kSamplerWindowSize - 1);
-}
-
-
-void RuntimeProfiler::OptimizeNow() {
- HandleScope scope(isolate_);
- PendingListNode* current = optimize_soon_list_;
- while (current != NULL) {
- PendingListNode* next = current->next();
- if (current->IsValid()) {
- Handle<JSFunction> function = current->function();
- int delay = current->Delay();
- if (IsOptimizable(*function)) {
- Optimize(*function, true, delay);
- }
- }
- delete current;
- current = next;
- }
- optimize_soon_list_ = NULL;
-
- // Run through the JavaScript frames and collect them. If we already
- // have a sample of the function, we mark it for optimizations
- // (eagerly or lazily).
- JSFunction* samples[kSamplerFrameCount];
- int sample_count = 0;
- int frame_count = 0;
- for (JavaScriptFrameIterator it(isolate_);
- frame_count++ < kSamplerFrameCount && !it.done();
- it.Advance()) {
- JavaScriptFrame* frame = it.frame();
- JSFunction* function = JSFunction::cast(frame->function());
-
- // Adjust threshold each time we have processed
- // a certain number of ticks.
- if (sampler_ticks_until_threshold_adjustment_ > 0) {
- sampler_ticks_until_threshold_adjustment_--;
- if (sampler_ticks_until_threshold_adjustment_ <= 0) {
- // If the threshold is not already at the minimum
- // modify and reset the ticks until next adjustment.
- if (sampler_threshold_ > kSamplerThresholdMin) {
- sampler_threshold_ -= kSamplerThresholdDelta;
- sampler_ticks_until_threshold_adjustment_ =
- kSamplerTicksBetweenThresholdAdjustment;
- }
- }
- }
-
- if (function->IsMarkedForLazyRecompilation()) {
- Code* unoptimized = function->shared()->code();
- int nesting = unoptimized->allow_osr_at_loop_nesting_level();
- if (nesting == 0) AttemptOnStackReplacement(function);
- int new_nesting = Min(nesting + 1, Code::kMaxLoopNestingMarker);
- unoptimized->set_allow_osr_at_loop_nesting_level(new_nesting);
- }
-
- // Do not record non-optimizable functions.
- if (!IsOptimizable(function)) continue;
- samples[sample_count++] = function;
-
- int function_size = function->shared()->SourceSize();
- int threshold_size_factor = (function_size > kSizeLimit)
- ? sampler_threshold_size_factor_
- : 1;
-
- int threshold = sampler_threshold_ * threshold_size_factor;
- int current_js_ratio = NoBarrier_Load(&js_ratio_);
-
- // Adjust threshold depending on the ratio of time spent
- // in JS code.
- if (current_js_ratio < 20) {
- // If we spend less than 20% of the time in JS code,
- // do not optimize.
- continue;
- } else if (current_js_ratio < 75) {
- // Below 75% of time spent in JS code, only optimize very
- // frequently used functions.
- threshold *= 3;
- }
-
- if (LookupSample(function) >= threshold) {
- Optimize(function, false, 0);
- isolate_->compilation_cache()->MarkForEagerOptimizing(
- Handle<JSFunction>(function));
- }
- }
-
- // Add the collected functions as samples. It's important not to do
- // this as part of collecting them because this will interfere with
- // the sample lookup in case of recursive functions.
- for (int i = 0; i < sample_count; i++) {
- AddSample(samples[i], kSamplerFrameWeight[i]);
- }
-}
-
-
-void RuntimeProfiler::OptimizeSoon(JSFunction* function) {
- if (!IsOptimizable(function)) return;
- PendingListNode* node = new PendingListNode(function);
- node->set_next(optimize_soon_list_);
- optimize_soon_list_ = node;
-}
-
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
-void RuntimeProfiler::UpdateStateRatio(SamplerState current_state) {
- SamplerState old_state = state_window_[state_window_position_];
- state_counts_[old_state]--;
- state_window_[state_window_position_] = current_state;
- state_counts_[current_state]++;
- ASSERT(IsPowerOf2(kStateWindowSize));
- state_window_position_ = (state_window_position_ + 1) &
- (kStateWindowSize - 1);
- NoBarrier_Store(&js_ratio_, state_counts_[IN_JS_STATE] * 100 /
- kStateWindowSize);
-}
-#endif
-
-
-void RuntimeProfiler::NotifyTick() {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- // Record state sample.
- SamplerState state = IsSomeIsolateInJS()
- ? IN_JS_STATE
- : IN_NON_JS_STATE;
- UpdateStateRatio(state);
- isolate_->stack_guard()->RequestRuntimeProfilerTick();
-#endif
-}
-
-
-void RuntimeProfiler::Setup() {
- ClearSampleBuffer();
- // If the ticker hasn't already started, make sure to do so to get
- // the ticks for the runtime profiler.
- if (IsEnabled()) isolate_->logger()->EnsureTickerStarted();
-}
-
-
-void RuntimeProfiler::Reset() {
- sampler_threshold_ = kSamplerThresholdInit;
- sampler_threshold_size_factor_ = kSamplerThresholdSizeFactorInit;
- sampler_ticks_until_threshold_adjustment_ =
- kSamplerTicksBetweenThresholdAdjustment;
-}
-
-
-void RuntimeProfiler::TearDown() {
- // Nothing to do.
-}
-
-
-int RuntimeProfiler::SamplerWindowSize() {
- return kSamplerWindowSize;
-}
-
-
-// Update the pointers in the sampler window after a GC.
-void RuntimeProfiler::UpdateSamplesAfterScavenge() {
- for (int i = 0; i < kSamplerWindowSize; i++) {
- Object* function = sampler_window_[i];
- if (function != NULL && isolate_->heap()->InNewSpace(function)) {
- MapWord map_word = HeapObject::cast(function)->map_word();
- if (map_word.IsForwardingAddress()) {
- sampler_window_[i] = map_word.ToForwardingAddress();
- } else {
- sampler_window_[i] = NULL;
- }
- }
- }
-}
-
-
-void RuntimeProfiler::HandleWakeUp(Isolate* isolate) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- // The profiler thread must still be waiting.
- ASSERT(NoBarrier_Load(&state_) >= 0);
- // In IsolateEnteredJS we have already incremented the counter and
- // undid the decrement done by the profiler thread. Increment again
- // to get the right count of active isolates.
- NoBarrier_AtomicIncrement(&state_, 1);
- semaphore_->Signal();
- isolate->ResetEagerOptimizingData();
-#endif
-}
-
-
-bool RuntimeProfiler::IsSomeIsolateInJS() {
- return NoBarrier_Load(&state_) > 0;
-}
-
-
-bool RuntimeProfiler::WaitForSomeIsolateToEnterJS() {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- Atomic32 old_state = NoBarrier_CompareAndSwap(&state_, 0, -1);
- ASSERT(old_state >= -1);
- if (old_state != 0) return false;
- semaphore_->Wait();
-#endif
- return true;
-}
-
-
-void RuntimeProfiler::WakeUpRuntimeProfilerThreadBeforeShutdown() {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- semaphore_->Signal();
-#endif
-}
-
-
-void RuntimeProfiler::RemoveDeadSamples() {
- for (int i = 0; i < kSamplerWindowSize; i++) {
- Object* function = sampler_window_[i];
- if (function != NULL && !HeapObject::cast(function)->IsMarked()) {
- sampler_window_[i] = NULL;
- }
- }
-}
-
-
-void RuntimeProfiler::UpdateSamplesAfterCompact(ObjectVisitor* visitor) {
- for (int i = 0; i < kSamplerWindowSize; i++) {
- visitor->VisitPointer(&sampler_window_[i]);
- }
-}
-
-
-bool RuntimeProfilerRateLimiter::SuspendIfNecessary() {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- static const int kNonJSTicksThreshold = 100;
- if (RuntimeProfiler::IsSomeIsolateInJS()) {
- non_js_ticks_ = 0;
- } else {
- if (non_js_ticks_ < kNonJSTicksThreshold) {
- ++non_js_ticks_;
- } else {
- return RuntimeProfiler::WaitForSomeIsolateToEnterJS();
- }
- }
-#endif
- return false;
-}
-
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/runtime-profiler.h b/src/3rdparty/v8/src/runtime-profiler.h
deleted file mode 100644
index 8074035..0000000
--- a/src/3rdparty/v8/src/runtime-profiler.h
+++ /dev/null
@@ -1,192 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_RUNTIME_PROFILER_H_
-#define V8_RUNTIME_PROFILER_H_
-
-#include "allocation.h"
-#include "atomicops.h"
-
-namespace v8 {
-namespace internal {
-
-class Isolate;
-class JSFunction;
-class Object;
-class PendingListNode;
-class Semaphore;
-
-
-enum SamplerState {
- IN_NON_JS_STATE = 0,
- IN_JS_STATE = 1
-};
-
-
-class RuntimeProfiler {
- public:
- explicit RuntimeProfiler(Isolate* isolate);
-
- static bool IsEnabled();
-
- void OptimizeNow();
- void OptimizeSoon(JSFunction* function);
-
- void NotifyTick();
-
- void Setup();
- void Reset();
- void TearDown();
-
- Object** SamplerWindowAddress();
- int SamplerWindowSize();
-
- // Rate limiting support.
-
- // VM thread interface.
- //
- // Called by isolates when their states change.
- static inline void IsolateEnteredJS(Isolate* isolate);
- static inline void IsolateExitedJS(Isolate* isolate);
-
- // Profiler thread interface.
- //
- // IsSomeIsolateInJS():
- // The profiler thread can query whether some isolate is currently
- // running JavaScript code.
- //
- // WaitForSomeIsolateToEnterJS():
- // When no isolates are running JavaScript code for some time the
- // profiler thread suspends itself by calling the wait function. The
- // wait function returns true after it waited or false immediately.
- // While the function was waiting the profiler may have been
- // disabled so it *must check* whether it is allowed to continue.
- static bool IsSomeIsolateInJS();
- static bool WaitForSomeIsolateToEnterJS();
-
- // When shutting down we join the profiler thread. Doing so while
- // it's waiting on a semaphore will cause a deadlock, so we have to
- // wake it up first.
- static void WakeUpRuntimeProfilerThreadBeforeShutdown();
-
- void UpdateSamplesAfterScavenge();
- void RemoveDeadSamples();
- void UpdateSamplesAfterCompact(ObjectVisitor* visitor);
-
- private:
- static const int kSamplerWindowSize = 16;
- static const int kStateWindowSize = 128;
-
- static void HandleWakeUp(Isolate* isolate);
-
- void Optimize(JSFunction* function, bool eager, int delay);
-
- void AttemptOnStackReplacement(JSFunction* function);
-
- void ClearSampleBuffer();
-
- void ClearSampleBufferNewSpaceEntries();
-
- int LookupSample(JSFunction* function);
-
- void AddSample(JSFunction* function, int weight);
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
- void UpdateStateRatio(SamplerState current_state);
-#endif
-
- Isolate* isolate_;
-
- int sampler_threshold_;
- int sampler_threshold_size_factor_;
- int sampler_ticks_until_threshold_adjustment_;
-
- // The ratio of ticks spent in JS code in percent.
- Atomic32 js_ratio_;
-
- Object* sampler_window_[kSamplerWindowSize];
- int sampler_window_position_;
- int sampler_window_weight_[kSamplerWindowSize];
-
- // Support for pending 'optimize soon' requests.
- PendingListNode* optimize_soon_list_;
-
- SamplerState state_window_[kStateWindowSize];
- int state_window_position_;
- int state_counts_[2];
-
- // Possible state values:
- // -1 => the profiler thread is waiting on the semaphore
- // 0 or positive => the number of isolates running JavaScript code.
- static Atomic32 state_;
- static Semaphore* semaphore_;
-};
-
-
-// Rate limiter intended to be used in the profiler thread.
-class RuntimeProfilerRateLimiter BASE_EMBEDDED {
- public:
- RuntimeProfilerRateLimiter() : non_js_ticks_(0) { }
-
- // Suspends the current thread (which must be the profiler thread)
- // when not executing JavaScript to minimize CPU usage. Returns
- // whether the thread was suspended (and so must check whether
- // profiling is still active.)
- //
- // Does nothing when runtime profiling is not enabled.
- bool SuspendIfNecessary();
-
- private:
- int non_js_ticks_;
-
- DISALLOW_COPY_AND_ASSIGN(RuntimeProfilerRateLimiter);
-};
-
-
-// Implementation of RuntimeProfiler inline functions.
-
-void RuntimeProfiler::IsolateEnteredJS(Isolate* isolate) {
- Atomic32 new_state = NoBarrier_AtomicIncrement(&state_, 1);
- if (new_state == 0) {
- // Just incremented from -1 to 0. -1 can only be set by the
- // profiler thread before it suspends itself and starts waiting on
- // the semaphore.
- HandleWakeUp(isolate);
- }
- ASSERT(new_state >= 0);
-}
-
-
-void RuntimeProfiler::IsolateExitedJS(Isolate* isolate) {
- Atomic32 new_state = NoBarrier_AtomicIncrement(&state_, -1);
- ASSERT(new_state >= 0);
- USE(new_state);
-}
-
-} } // namespace v8::internal
-
-#endif // V8_RUNTIME_PROFILER_H_
diff --git a/src/3rdparty/v8/src/runtime.cc b/src/3rdparty/v8/src/runtime.cc
deleted file mode 100644
index ff9f914..0000000
--- a/src/3rdparty/v8/src/runtime.cc
+++ /dev/null
@@ -1,11949 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include <stdlib.h>
-
-#include "v8.h"
-
-#include "accessors.h"
-#include "api.h"
-#include "arguments.h"
-#include "codegen.h"
-#include "compilation-cache.h"
-#include "compiler.h"
-#include "cpu.h"
-#include "dateparser-inl.h"
-#include "debug.h"
-#include "deoptimizer.h"
-#include "execution.h"
-#include "global-handles.h"
-#include "jsregexp.h"
-#include "liveedit.h"
-#include "liveobjectlist-inl.h"
-#include "parser.h"
-#include "platform.h"
-#include "runtime.h"
-#include "runtime-profiler.h"
-#include "scopeinfo.h"
-#include "smart-pointer.h"
-#include "stub-cache.h"
-#include "v8threads.h"
-#include "string-search.h"
-
-namespace v8 {
-namespace internal {
-
-
-#define RUNTIME_ASSERT(value) \
- if (!(value)) return isolate->ThrowIllegalOperation();
-
-// Cast the given object to a value of the specified type and store
-// it in a variable with the given name. If the object is not of the
-// expected type call IllegalOperation and return.
-#define CONVERT_CHECKED(Type, name, obj) \
- RUNTIME_ASSERT(obj->Is##Type()); \
- Type* name = Type::cast(obj);
-
-#define CONVERT_ARG_CHECKED(Type, name, index) \
- RUNTIME_ASSERT(args[index]->Is##Type()); \
- Handle<Type> name = args.at<Type>(index);
-
-// Cast the given object to a boolean and store it in a variable with
-// the given name. If the object is not a boolean call IllegalOperation
-// and return.
-#define CONVERT_BOOLEAN_CHECKED(name, obj) \
- RUNTIME_ASSERT(obj->IsBoolean()); \
- bool name = (obj)->IsTrue();
-
-// Cast the given object to a Smi and store its value in an int variable
-// with the given name. If the object is not a Smi call IllegalOperation
-// and return.
-#define CONVERT_SMI_CHECKED(name, obj) \
- RUNTIME_ASSERT(obj->IsSmi()); \
- int name = Smi::cast(obj)->value();
-
-// Cast the given object to a double and store it in a variable with
-// the given name. If the object is not a number (as opposed to
-// the number not-a-number) call IllegalOperation and return.
-#define CONVERT_DOUBLE_CHECKED(name, obj) \
- RUNTIME_ASSERT(obj->IsNumber()); \
- double name = (obj)->Number();
-
-// Call the specified converter on the object *comand store the result in
-// a variable of the specified type with the given name. If the
-// object is not a Number call IllegalOperation and return.
-#define CONVERT_NUMBER_CHECKED(type, name, Type, obj) \
- RUNTIME_ASSERT(obj->IsNumber()); \
- type name = NumberTo##Type(obj);
-
-
-MUST_USE_RESULT static MaybeObject* DeepCopyBoilerplate(Isolate* isolate,
- JSObject* boilerplate) {
- StackLimitCheck check(isolate);
- if (check.HasOverflowed()) return isolate->StackOverflow();
-
- Heap* heap = isolate->heap();
- Object* result;
- { MaybeObject* maybe_result = heap->CopyJSObject(boilerplate);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- JSObject* copy = JSObject::cast(result);
-
- // Deep copy local properties.
- if (copy->HasFastProperties()) {
- FixedArray* properties = copy->properties();
- for (int i = 0; i < properties->length(); i++) {
- Object* value = properties->get(i);
- if (value->IsJSObject()) {
- JSObject* js_object = JSObject::cast(value);
- { MaybeObject* maybe_result = DeepCopyBoilerplate(isolate, js_object);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- properties->set(i, result);
- }
- }
- int nof = copy->map()->inobject_properties();
- for (int i = 0; i < nof; i++) {
- Object* value = copy->InObjectPropertyAt(i);
- if (value->IsJSObject()) {
- JSObject* js_object = JSObject::cast(value);
- { MaybeObject* maybe_result = DeepCopyBoilerplate(isolate, js_object);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- copy->InObjectPropertyAtPut(i, result);
- }
- }
- } else {
- { MaybeObject* maybe_result =
- heap->AllocateFixedArray(copy->NumberOfLocalProperties(NONE));
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- FixedArray* names = FixedArray::cast(result);
- copy->GetLocalPropertyNames(names, 0);
- for (int i = 0; i < names->length(); i++) {
- ASSERT(names->get(i)->IsString());
- String* key_string = String::cast(names->get(i));
- PropertyAttributes attributes =
- copy->GetLocalPropertyAttribute(key_string);
- // Only deep copy fields from the object literal expression.
- // In particular, don't try to copy the length attribute of
- // an array.
- if (attributes != NONE) continue;
- Object* value =
- copy->GetProperty(key_string, &attributes)->ToObjectUnchecked();
- if (value->IsJSObject()) {
- JSObject* js_object = JSObject::cast(value);
- { MaybeObject* maybe_result = DeepCopyBoilerplate(isolate, js_object);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- { MaybeObject* maybe_result =
- // Creating object copy for literals. No strict mode needed.
- copy->SetProperty(key_string, result, NONE, kNonStrictMode);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- }
- }
- }
-
- // Deep copy local elements.
- // Pixel elements cannot be created using an object literal.
- ASSERT(!copy->HasExternalArrayElements());
- switch (copy->GetElementsKind()) {
- case JSObject::FAST_ELEMENTS: {
- FixedArray* elements = FixedArray::cast(copy->elements());
- if (elements->map() == heap->fixed_cow_array_map()) {
- isolate->counters()->cow_arrays_created_runtime()->Increment();
-#ifdef DEBUG
- for (int i = 0; i < elements->length(); i++) {
- ASSERT(!elements->get(i)->IsJSObject());
- }
-#endif
- } else {
- for (int i = 0; i < elements->length(); i++) {
- Object* value = elements->get(i);
- if (value->IsJSObject()) {
- JSObject* js_object = JSObject::cast(value);
- { MaybeObject* maybe_result = DeepCopyBoilerplate(isolate,
- js_object);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- elements->set(i, result);
- }
- }
- }
- break;
- }
- case JSObject::DICTIONARY_ELEMENTS: {
- NumberDictionary* element_dictionary = copy->element_dictionary();
- int capacity = element_dictionary->Capacity();
- for (int i = 0; i < capacity; i++) {
- Object* k = element_dictionary->KeyAt(i);
- if (element_dictionary->IsKey(k)) {
- Object* value = element_dictionary->ValueAt(i);
- if (value->IsJSObject()) {
- JSObject* js_object = JSObject::cast(value);
- { MaybeObject* maybe_result = DeepCopyBoilerplate(isolate,
- js_object);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- element_dictionary->ValueAtPut(i, result);
- }
- }
- }
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
- return copy;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_CloneLiteralBoilerplate) {
- CONVERT_CHECKED(JSObject, boilerplate, args[0]);
- return DeepCopyBoilerplate(isolate, boilerplate);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_CloneShallowLiteralBoilerplate) {
- CONVERT_CHECKED(JSObject, boilerplate, args[0]);
- return isolate->heap()->CopyJSObject(boilerplate);
-}
-
-
-static Handle<Map> ComputeObjectLiteralMap(
- Handle<Context> context,
- Handle<FixedArray> constant_properties,
- bool* is_result_from_cache) {
- Isolate* isolate = context->GetIsolate();
- int properties_length = constant_properties->length();
- int number_of_properties = properties_length / 2;
- if (FLAG_canonicalize_object_literal_maps) {
- // Check that there are only symbols and array indices among keys.
- int number_of_symbol_keys = 0;
- for (int p = 0; p != properties_length; p += 2) {
- Object* key = constant_properties->get(p);
- uint32_t element_index = 0;
- if (key->IsSymbol()) {
- number_of_symbol_keys++;
- } else if (key->ToArrayIndex(&element_index)) {
- // An index key does not require space in the property backing store.
- number_of_properties--;
- } else {
- // Bail out as a non-symbol non-index key makes caching impossible.
- // ASSERT to make sure that the if condition after the loop is false.
- ASSERT(number_of_symbol_keys != number_of_properties);
- break;
- }
- }
- // If we only have symbols and array indices among keys then we can
- // use the map cache in the global context.
- const int kMaxKeys = 10;
- if ((number_of_symbol_keys == number_of_properties) &&
- (number_of_symbol_keys < kMaxKeys)) {
- // Create the fixed array with the key.
- Handle<FixedArray> keys =
- isolate->factory()->NewFixedArray(number_of_symbol_keys);
- if (number_of_symbol_keys > 0) {
- int index = 0;
- for (int p = 0; p < properties_length; p += 2) {
- Object* key = constant_properties->get(p);
- if (key->IsSymbol()) {
- keys->set(index++, key);
- }
- }
- ASSERT(index == number_of_symbol_keys);
- }
- *is_result_from_cache = true;
- return isolate->factory()->ObjectLiteralMapFromCache(context, keys);
- }
- }
- *is_result_from_cache = false;
- return isolate->factory()->CopyMap(
- Handle<Map>(context->object_function()->initial_map()),
- number_of_properties);
-}
-
-
-static Handle<Object> CreateLiteralBoilerplate(
- Isolate* isolate,
- Handle<FixedArray> literals,
- Handle<FixedArray> constant_properties);
-
-
-static Handle<Object> CreateObjectLiteralBoilerplate(
- Isolate* isolate,
- Handle<FixedArray> literals,
- Handle<FixedArray> constant_properties,
- bool should_have_fast_elements,
- bool has_function_literal) {
- // Get the global context from the literals array. This is the
- // context in which the function was created and we use the object
- // function from this context to create the object literal. We do
- // not use the object function from the current global context
- // because this might be the object function from another context
- // which we should not have access to.
- Handle<Context> context =
- Handle<Context>(JSFunction::GlobalContextFromLiterals(*literals));
-
- // In case we have function literals, we want the object to be in
- // slow properties mode for now. We don't go in the map cache because
- // maps with constant functions can't be shared if the functions are
- // not the same (which is the common case).
- bool is_result_from_cache = false;
- Handle<Map> map = has_function_literal
- ? Handle<Map>(context->object_function()->initial_map())
- : ComputeObjectLiteralMap(context,
- constant_properties,
- &is_result_from_cache);
-
- Handle<JSObject> boilerplate = isolate->factory()->NewJSObjectFromMap(map);
-
- // Normalize the elements of the boilerplate to save space if needed.
- if (!should_have_fast_elements) NormalizeElements(boilerplate);
-
- // Add the constant properties to the boilerplate.
- int length = constant_properties->length();
- bool should_transform =
- !is_result_from_cache && boilerplate->HasFastProperties();
- if (should_transform || has_function_literal) {
- // Normalize the properties of object to avoid n^2 behavior
- // when extending the object multiple properties. Indicate the number of
- // properties to be added.
- NormalizeProperties(boilerplate, KEEP_INOBJECT_PROPERTIES, length / 2);
- }
-
- for (int index = 0; index < length; index +=2) {
- Handle<Object> key(constant_properties->get(index+0), isolate);
- Handle<Object> value(constant_properties->get(index+1), isolate);
- if (value->IsFixedArray()) {
- // The value contains the constant_properties of a
- // simple object or array literal.
- Handle<FixedArray> array = Handle<FixedArray>::cast(value);
- value = CreateLiteralBoilerplate(isolate, literals, array);
- if (value.is_null()) return value;
- }
- Handle<Object> result;
- uint32_t element_index = 0;
- if (key->IsSymbol()) {
- if (Handle<String>::cast(key)->AsArrayIndex(&element_index)) {
- // Array index as string (uint32).
- result = SetOwnElement(boilerplate,
- element_index,
- value,
- kNonStrictMode);
- } else {
- Handle<String> name(String::cast(*key));
- ASSERT(!name->AsArrayIndex(&element_index));
- result = SetLocalPropertyIgnoreAttributes(boilerplate, name,
- value, NONE);
- }
- } else if (key->ToArrayIndex(&element_index)) {
- // Array index (uint32).
- result = SetOwnElement(boilerplate,
- element_index,
- value,
- kNonStrictMode);
- } else {
- // Non-uint32 number.
- ASSERT(key->IsNumber());
- double num = key->Number();
- char arr[100];
- Vector<char> buffer(arr, ARRAY_SIZE(arr));
- const char* str = DoubleToCString(num, buffer);
- Handle<String> name =
- isolate->factory()->NewStringFromAscii(CStrVector(str));
- result = SetLocalPropertyIgnoreAttributes(boilerplate, name,
- value, NONE);
- }
- // If setting the property on the boilerplate throws an
- // exception, the exception is converted to an empty handle in
- // the handle based operations. In that case, we need to
- // convert back to an exception.
- if (result.is_null()) return result;
- }
-
- // Transform to fast properties if necessary. For object literals with
- // containing function literals we defer this operation until after all
- // computed properties have been assigned so that we can generate
- // constant function properties.
- if (should_transform && !has_function_literal) {
- TransformToFastProperties(boilerplate,
- boilerplate->map()->unused_property_fields());
- }
-
- return boilerplate;
-}
-
-
-static Handle<Object> CreateArrayLiteralBoilerplate(
- Isolate* isolate,
- Handle<FixedArray> literals,
- Handle<FixedArray> elements) {
- // Create the JSArray.
- Handle<JSFunction> constructor(
- JSFunction::GlobalContextFromLiterals(*literals)->array_function());
- Handle<Object> object = isolate->factory()->NewJSObject(constructor);
-
- const bool is_cow =
- (elements->map() == isolate->heap()->fixed_cow_array_map());
- Handle<FixedArray> copied_elements =
- is_cow ? elements : isolate->factory()->CopyFixedArray(elements);
-
- Handle<FixedArray> content = Handle<FixedArray>::cast(copied_elements);
- if (is_cow) {
-#ifdef DEBUG
- // Copy-on-write arrays must be shallow (and simple).
- for (int i = 0; i < content->length(); i++) {
- ASSERT(!content->get(i)->IsFixedArray());
- }
-#endif
- } else {
- for (int i = 0; i < content->length(); i++) {
- if (content->get(i)->IsFixedArray()) {
- // The value contains the constant_properties of a
- // simple object or array literal.
- Handle<FixedArray> fa(FixedArray::cast(content->get(i)));
- Handle<Object> result =
- CreateLiteralBoilerplate(isolate, literals, fa);
- if (result.is_null()) return result;
- content->set(i, *result);
- }
- }
- }
-
- // Set the elements.
- Handle<JSArray>::cast(object)->SetContent(*content);
- return object;
-}
-
-
-static Handle<Object> CreateLiteralBoilerplate(
- Isolate* isolate,
- Handle<FixedArray> literals,
- Handle<FixedArray> array) {
- Handle<FixedArray> elements = CompileTimeValue::GetElements(array);
- const bool kHasNoFunctionLiteral = false;
- switch (CompileTimeValue::GetType(array)) {
- case CompileTimeValue::OBJECT_LITERAL_FAST_ELEMENTS:
- return CreateObjectLiteralBoilerplate(isolate,
- literals,
- elements,
- true,
- kHasNoFunctionLiteral);
- case CompileTimeValue::OBJECT_LITERAL_SLOW_ELEMENTS:
- return CreateObjectLiteralBoilerplate(isolate,
- literals,
- elements,
- false,
- kHasNoFunctionLiteral);
- case CompileTimeValue::ARRAY_LITERAL:
- return CreateArrayLiteralBoilerplate(isolate, literals, elements);
- default:
- UNREACHABLE();
- return Handle<Object>::null();
- }
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateArrayLiteralBoilerplate) {
- // Takes a FixedArray of elements containing the literal elements of
- // the array literal and produces JSArray with those elements.
- // Additionally takes the literals array of the surrounding function
- // which contains the context from which to get the Array function
- // to use for creating the array literal.
- HandleScope scope(isolate);
- ASSERT(args.length() == 3);
- CONVERT_ARG_CHECKED(FixedArray, literals, 0);
- CONVERT_SMI_CHECKED(literals_index, args[1]);
- CONVERT_ARG_CHECKED(FixedArray, elements, 2);
-
- Handle<Object> object =
- CreateArrayLiteralBoilerplate(isolate, literals, elements);
- if (object.is_null()) return Failure::Exception();
-
- // Update the functions literal and return the boilerplate.
- literals->set(literals_index, *object);
- return *object;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateObjectLiteral) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 4);
- CONVERT_ARG_CHECKED(FixedArray, literals, 0);
- CONVERT_SMI_CHECKED(literals_index, args[1]);
- CONVERT_ARG_CHECKED(FixedArray, constant_properties, 2);
- CONVERT_SMI_CHECKED(flags, args[3]);
- bool should_have_fast_elements = (flags & ObjectLiteral::kFastElements) != 0;
- bool has_function_literal = (flags & ObjectLiteral::kHasFunction) != 0;
-
- // Check if boilerplate exists. If not, create it first.
- Handle<Object> boilerplate(literals->get(literals_index), isolate);
- if (*boilerplate == isolate->heap()->undefined_value()) {
- boilerplate = CreateObjectLiteralBoilerplate(isolate,
- literals,
- constant_properties,
- should_have_fast_elements,
- has_function_literal);
- if (boilerplate.is_null()) return Failure::Exception();
- // Update the functions literal and return the boilerplate.
- literals->set(literals_index, *boilerplate);
- }
- return DeepCopyBoilerplate(isolate, JSObject::cast(*boilerplate));
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateObjectLiteralShallow) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 4);
- CONVERT_ARG_CHECKED(FixedArray, literals, 0);
- CONVERT_SMI_CHECKED(literals_index, args[1]);
- CONVERT_ARG_CHECKED(FixedArray, constant_properties, 2);
- CONVERT_SMI_CHECKED(flags, args[3]);
- bool should_have_fast_elements = (flags & ObjectLiteral::kFastElements) != 0;
- bool has_function_literal = (flags & ObjectLiteral::kHasFunction) != 0;
-
- // Check if boilerplate exists. If not, create it first.
- Handle<Object> boilerplate(literals->get(literals_index), isolate);
- if (*boilerplate == isolate->heap()->undefined_value()) {
- boilerplate = CreateObjectLiteralBoilerplate(isolate,
- literals,
- constant_properties,
- should_have_fast_elements,
- has_function_literal);
- if (boilerplate.is_null()) return Failure::Exception();
- // Update the functions literal and return the boilerplate.
- literals->set(literals_index, *boilerplate);
- }
- return isolate->heap()->CopyJSObject(JSObject::cast(*boilerplate));
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateArrayLiteral) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 3);
- CONVERT_ARG_CHECKED(FixedArray, literals, 0);
- CONVERT_SMI_CHECKED(literals_index, args[1]);
- CONVERT_ARG_CHECKED(FixedArray, elements, 2);
-
- // Check if boilerplate exists. If not, create it first.
- Handle<Object> boilerplate(literals->get(literals_index), isolate);
- if (*boilerplate == isolate->heap()->undefined_value()) {
- boilerplate = CreateArrayLiteralBoilerplate(isolate, literals, elements);
- if (boilerplate.is_null()) return Failure::Exception();
- // Update the functions literal and return the boilerplate.
- literals->set(literals_index, *boilerplate);
- }
- return DeepCopyBoilerplate(isolate, JSObject::cast(*boilerplate));
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateArrayLiteralShallow) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 3);
- CONVERT_ARG_CHECKED(FixedArray, literals, 0);
- CONVERT_SMI_CHECKED(literals_index, args[1]);
- CONVERT_ARG_CHECKED(FixedArray, elements, 2);
-
- // Check if boilerplate exists. If not, create it first.
- Handle<Object> boilerplate(literals->get(literals_index), isolate);
- if (*boilerplate == isolate->heap()->undefined_value()) {
- boilerplate = CreateArrayLiteralBoilerplate(isolate, literals, elements);
- if (boilerplate.is_null()) return Failure::Exception();
- // Update the functions literal and return the boilerplate.
- literals->set(literals_index, *boilerplate);
- }
- if (JSObject::cast(*boilerplate)->elements()->map() ==
- isolate->heap()->fixed_cow_array_map()) {
- isolate->counters()->cow_arrays_created_runtime()->Increment();
- }
- return isolate->heap()->CopyJSObject(JSObject::cast(*boilerplate));
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateCatchExtensionObject) {
- ASSERT(args.length() == 2);
- CONVERT_CHECKED(String, key, args[0]);
- Object* value = args[1];
- // Create a catch context extension object.
- JSFunction* constructor =
- isolate->context()->global_context()->
- context_extension_function();
- Object* object;
- { MaybeObject* maybe_object = isolate->heap()->AllocateJSObject(constructor);
- if (!maybe_object->ToObject(&object)) return maybe_object;
- }
- // Assign the exception value to the catch variable and make sure
- // that the catch variable is DontDelete.
- { MaybeObject* maybe_value =
- // Passing non-strict per ECMA-262 5th Ed. 12.14. Catch, bullet #4.
- JSObject::cast(object)->SetProperty(
- key, value, DONT_DELETE, kNonStrictMode);
- if (!maybe_value->ToObject(&value)) return maybe_value;
- }
- return object;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ClassOf) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 1);
- Object* obj = args[0];
- if (!obj->IsJSObject()) return isolate->heap()->null_value();
- return JSObject::cast(obj)->class_name();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_IsInPrototypeChain) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 2);
- // See ECMA-262, section 15.3.5.3, page 88 (steps 5 - 8).
- Object* O = args[0];
- Object* V = args[1];
- while (true) {
- Object* prototype = V->GetPrototype();
- if (prototype->IsNull()) return isolate->heap()->false_value();
- if (O == prototype) return isolate->heap()->true_value();
- V = prototype;
- }
-}
-
-
-// Inserts an object as the hidden prototype of another object.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SetHiddenPrototype) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 2);
- CONVERT_CHECKED(JSObject, jsobject, args[0]);
- CONVERT_CHECKED(JSObject, proto, args[1]);
-
- // Sanity checks. The old prototype (that we are replacing) could
- // theoretically be null, but if it is not null then check that we
- // didn't already install a hidden prototype here.
- RUNTIME_ASSERT(!jsobject->GetPrototype()->IsHeapObject() ||
- !HeapObject::cast(jsobject->GetPrototype())->map()->is_hidden_prototype());
- RUNTIME_ASSERT(!proto->map()->is_hidden_prototype());
-
- // Allocate up front before we start altering state in case we get a GC.
- Object* map_or_failure;
- { MaybeObject* maybe_map_or_failure = proto->map()->CopyDropTransitions();
- if (!maybe_map_or_failure->ToObject(&map_or_failure)) {
- return maybe_map_or_failure;
- }
- }
- Map* new_proto_map = Map::cast(map_or_failure);
-
- { MaybeObject* maybe_map_or_failure = jsobject->map()->CopyDropTransitions();
- if (!maybe_map_or_failure->ToObject(&map_or_failure)) {
- return maybe_map_or_failure;
- }
- }
- Map* new_map = Map::cast(map_or_failure);
-
- // Set proto's prototype to be the old prototype of the object.
- new_proto_map->set_prototype(jsobject->GetPrototype());
- proto->set_map(new_proto_map);
- new_proto_map->set_is_hidden_prototype();
-
- // Set the object's prototype to proto.
- new_map->set_prototype(proto);
- jsobject->set_map(new_map);
-
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_IsConstructCall) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 0);
- JavaScriptFrameIterator it(isolate);
- return isolate->heap()->ToBoolean(it.frame()->IsConstructor());
-}
-
-
-// Recursively traverses hidden prototypes if property is not found
-static void GetOwnPropertyImplementation(JSObject* obj,
- String* name,
- LookupResult* result) {
- obj->LocalLookupRealNamedProperty(name, result);
-
- if (!result->IsProperty()) {
- Object* proto = obj->GetPrototype();
- if (proto->IsJSObject() &&
- JSObject::cast(proto)->map()->is_hidden_prototype())
- GetOwnPropertyImplementation(JSObject::cast(proto),
- name, result);
- }
-}
-
-
-static bool CheckAccessException(LookupResult* result,
- v8::AccessType access_type) {
- if (result->type() == CALLBACKS) {
- Object* callback = result->GetCallbackObject();
- if (callback->IsAccessorInfo()) {
- AccessorInfo* info = AccessorInfo::cast(callback);
- bool can_access =
- (access_type == v8::ACCESS_HAS &&
- (info->all_can_read() || info->all_can_write())) ||
- (access_type == v8::ACCESS_GET && info->all_can_read()) ||
- (access_type == v8::ACCESS_SET && info->all_can_write());
- return can_access;
- }
- }
-
- return false;
-}
-
-
-static bool CheckAccess(JSObject* obj,
- String* name,
- LookupResult* result,
- v8::AccessType access_type) {
- ASSERT(result->IsProperty());
-
- JSObject* holder = result->holder();
- JSObject* current = obj;
- Isolate* isolate = obj->GetIsolate();
- while (true) {
- if (current->IsAccessCheckNeeded() &&
- !isolate->MayNamedAccess(current, name, access_type)) {
- // Access check callback denied the access, but some properties
- // can have a special permissions which override callbacks descision
- // (currently see v8::AccessControl).
- break;
- }
-
- if (current == holder) {
- return true;
- }
-
- current = JSObject::cast(current->GetPrototype());
- }
-
- // API callbacks can have per callback access exceptions.
- switch (result->type()) {
- case CALLBACKS: {
- if (CheckAccessException(result, access_type)) {
- return true;
- }
- break;
- }
- case INTERCEPTOR: {
- // If the object has an interceptor, try real named properties.
- // Overwrite the result to fetch the correct property later.
- holder->LookupRealNamedProperty(name, result);
- if (result->IsProperty()) {
- if (CheckAccessException(result, access_type)) {
- return true;
- }
- }
- break;
- }
- default:
- break;
- }
-
- isolate->ReportFailedAccessCheck(current, access_type);
- return false;
-}
-
-
-// TODO(1095): we should traverse hidden prototype hierachy as well.
-static bool CheckElementAccess(JSObject* obj,
- uint32_t index,
- v8::AccessType access_type) {
- if (obj->IsAccessCheckNeeded() &&
- !obj->GetIsolate()->MayIndexedAccess(obj, index, access_type)) {
- return false;
- }
-
- return true;
-}
-
-
-// Enumerator used as indices into the array returned from GetOwnProperty
-enum PropertyDescriptorIndices {
- IS_ACCESSOR_INDEX,
- VALUE_INDEX,
- GETTER_INDEX,
- SETTER_INDEX,
- WRITABLE_INDEX,
- ENUMERABLE_INDEX,
- CONFIGURABLE_INDEX,
- DESCRIPTOR_SIZE
-};
-
-// Returns an array with the property description:
-// if args[1] is not a property on args[0]
-// returns undefined
-// if args[1] is a data property on args[0]
-// [false, value, Writeable, Enumerable, Configurable]
-// if args[1] is an accessor on args[0]
-// [true, GetFunction, SetFunction, Enumerable, Configurable]
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOwnProperty) {
- ASSERT(args.length() == 2);
- Heap* heap = isolate->heap();
- HandleScope scope(isolate);
- Handle<FixedArray> elms = isolate->factory()->NewFixedArray(DESCRIPTOR_SIZE);
- Handle<JSArray> desc = isolate->factory()->NewJSArrayWithElements(elms);
- LookupResult result;
- CONVERT_ARG_CHECKED(JSObject, obj, 0);
- CONVERT_ARG_CHECKED(String, name, 1);
-
- // This could be an element.
- uint32_t index;
- if (name->AsArrayIndex(&index)) {
- switch (obj->HasLocalElement(index)) {
- case JSObject::UNDEFINED_ELEMENT:
- return heap->undefined_value();
-
- case JSObject::STRING_CHARACTER_ELEMENT: {
- // Special handling of string objects according to ECMAScript 5
- // 15.5.5.2. Note that this might be a string object with elements
- // other than the actual string value. This is covered by the
- // subsequent cases.
- Handle<JSValue> js_value = Handle<JSValue>::cast(obj);
- Handle<String> str(String::cast(js_value->value()));
- Handle<String> substr = SubString(str, index, index + 1, NOT_TENURED);
-
- elms->set(IS_ACCESSOR_INDEX, heap->false_value());
- elms->set(VALUE_INDEX, *substr);
- elms->set(WRITABLE_INDEX, heap->false_value());
- elms->set(ENUMERABLE_INDEX, heap->false_value());
- elms->set(CONFIGURABLE_INDEX, heap->false_value());
- return *desc;
- }
-
- case JSObject::INTERCEPTED_ELEMENT:
- case JSObject::FAST_ELEMENT: {
- elms->set(IS_ACCESSOR_INDEX, heap->false_value());
- Handle<Object> value = GetElement(obj, index);
- RETURN_IF_EMPTY_HANDLE(isolate, value);
- elms->set(VALUE_INDEX, *value);
- elms->set(WRITABLE_INDEX, heap->true_value());
- elms->set(ENUMERABLE_INDEX, heap->true_value());
- elms->set(CONFIGURABLE_INDEX, heap->true_value());
- return *desc;
- }
-
- case JSObject::DICTIONARY_ELEMENT: {
- Handle<JSObject> holder = obj;
- if (obj->IsJSGlobalProxy()) {
- Object* proto = obj->GetPrototype();
- if (proto->IsNull()) return heap->undefined_value();
- ASSERT(proto->IsJSGlobalObject());
- holder = Handle<JSObject>(JSObject::cast(proto));
- }
- NumberDictionary* dictionary = holder->element_dictionary();
- int entry = dictionary->FindEntry(index);
- ASSERT(entry != NumberDictionary::kNotFound);
- PropertyDetails details = dictionary->DetailsAt(entry);
- switch (details.type()) {
- case CALLBACKS: {
- // This is an accessor property with getter and/or setter.
- FixedArray* callbacks =
- FixedArray::cast(dictionary->ValueAt(entry));
- elms->set(IS_ACCESSOR_INDEX, heap->true_value());
- if (CheckElementAccess(*obj, index, v8::ACCESS_GET)) {
- elms->set(GETTER_INDEX, callbacks->get(0));
- }
- if (CheckElementAccess(*obj, index, v8::ACCESS_SET)) {
- elms->set(SETTER_INDEX, callbacks->get(1));
- }
- break;
- }
- case NORMAL: {
- // This is a data property.
- elms->set(IS_ACCESSOR_INDEX, heap->false_value());
- Handle<Object> value = GetElement(obj, index);
- ASSERT(!value.is_null());
- elms->set(VALUE_INDEX, *value);
- elms->set(WRITABLE_INDEX, heap->ToBoolean(!details.IsReadOnly()));
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
- elms->set(ENUMERABLE_INDEX, heap->ToBoolean(!details.IsDontEnum()));
- elms->set(CONFIGURABLE_INDEX, heap->ToBoolean(!details.IsDontDelete()));
- return *desc;
- }
- }
- }
-
- // Use recursive implementation to also traverse hidden prototypes
- GetOwnPropertyImplementation(*obj, *name, &result);
-
- if (!result.IsProperty()) {
- return heap->undefined_value();
- }
-
- if (!CheckAccess(*obj, *name, &result, v8::ACCESS_HAS)) {
- return heap->false_value();
- }
-
- elms->set(ENUMERABLE_INDEX, heap->ToBoolean(!result.IsDontEnum()));
- elms->set(CONFIGURABLE_INDEX, heap->ToBoolean(!result.IsDontDelete()));
-
- bool is_js_accessor = (result.type() == CALLBACKS) &&
- (result.GetCallbackObject()->IsFixedArray());
-
- if (is_js_accessor) {
- // __defineGetter__/__defineSetter__ callback.
- elms->set(IS_ACCESSOR_INDEX, heap->true_value());
-
- FixedArray* structure = FixedArray::cast(result.GetCallbackObject());
- if (CheckAccess(*obj, *name, &result, v8::ACCESS_GET)) {
- elms->set(GETTER_INDEX, structure->get(0));
- }
- if (CheckAccess(*obj, *name, &result, v8::ACCESS_SET)) {
- elms->set(SETTER_INDEX, structure->get(1));
- }
- } else {
- elms->set(IS_ACCESSOR_INDEX, heap->false_value());
- elms->set(WRITABLE_INDEX, heap->ToBoolean(!result.IsReadOnly()));
-
- PropertyAttributes attrs;
- Object* value;
- // GetProperty will check access and report any violations.
- { MaybeObject* maybe_value = obj->GetProperty(*obj, &result, *name, &attrs);
- if (!maybe_value->ToObject(&value)) return maybe_value;
- }
- elms->set(VALUE_INDEX, value);
- }
-
- return *desc;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_PreventExtensions) {
- ASSERT(args.length() == 1);
- CONVERT_CHECKED(JSObject, obj, args[0]);
- return obj->PreventExtensions();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_IsExtensible) {
- ASSERT(args.length() == 1);
- CONVERT_CHECKED(JSObject, obj, args[0]);
- if (obj->IsJSGlobalProxy()) {
- Object* proto = obj->GetPrototype();
- if (proto->IsNull()) return isolate->heap()->false_value();
- ASSERT(proto->IsJSGlobalObject());
- obj = JSObject::cast(proto);
- }
- return obj->map()->is_extensible() ? isolate->heap()->true_value()
- : isolate->heap()->false_value();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpCompile) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 3);
- CONVERT_ARG_CHECKED(JSRegExp, re, 0);
- CONVERT_ARG_CHECKED(String, pattern, 1);
- CONVERT_ARG_CHECKED(String, flags, 2);
- Handle<Object> result = RegExpImpl::Compile(re, pattern, flags);
- if (result.is_null()) return Failure::Exception();
- return *result;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateApiFunction) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 1);
- CONVERT_ARG_CHECKED(FunctionTemplateInfo, data, 0);
- return *isolate->factory()->CreateApiFunction(data);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_IsTemplate) {
- ASSERT(args.length() == 1);
- Object* arg = args[0];
- bool result = arg->IsObjectTemplateInfo() || arg->IsFunctionTemplateInfo();
- return isolate->heap()->ToBoolean(result);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetTemplateField) {
- ASSERT(args.length() == 2);
- CONVERT_CHECKED(HeapObject, templ, args[0]);
- CONVERT_CHECKED(Smi, field, args[1]);
- int index = field->value();
- int offset = index * kPointerSize + HeapObject::kHeaderSize;
- InstanceType type = templ->map()->instance_type();
- RUNTIME_ASSERT(type == FUNCTION_TEMPLATE_INFO_TYPE ||
- type == OBJECT_TEMPLATE_INFO_TYPE);
- RUNTIME_ASSERT(offset > 0);
- if (type == FUNCTION_TEMPLATE_INFO_TYPE) {
- RUNTIME_ASSERT(offset < FunctionTemplateInfo::kSize);
- } else {
- RUNTIME_ASSERT(offset < ObjectTemplateInfo::kSize);
- }
- return *HeapObject::RawField(templ, offset);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DisableAccessChecks) {
- ASSERT(args.length() == 1);
- CONVERT_CHECKED(HeapObject, object, args[0]);
- Map* old_map = object->map();
- bool needs_access_checks = old_map->is_access_check_needed();
- if (needs_access_checks) {
- // Copy map so it won't interfere constructor's initial map.
- Object* new_map;
- { MaybeObject* maybe_new_map = old_map->CopyDropTransitions();
- if (!maybe_new_map->ToObject(&new_map)) return maybe_new_map;
- }
-
- Map::cast(new_map)->set_is_access_check_needed(false);
- object->set_map(Map::cast(new_map));
- }
- return needs_access_checks ? isolate->heap()->true_value()
- : isolate->heap()->false_value();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_EnableAccessChecks) {
- ASSERT(args.length() == 1);
- CONVERT_CHECKED(HeapObject, object, args[0]);
- Map* old_map = object->map();
- if (!old_map->is_access_check_needed()) {
- // Copy map so it won't interfere constructor's initial map.
- Object* new_map;
- { MaybeObject* maybe_new_map = old_map->CopyDropTransitions();
- if (!maybe_new_map->ToObject(&new_map)) return maybe_new_map;
- }
-
- Map::cast(new_map)->set_is_access_check_needed(true);
- object->set_map(Map::cast(new_map));
- }
- return isolate->heap()->undefined_value();
-}
-
-
-static Failure* ThrowRedeclarationError(Isolate* isolate,
- const char* type,
- Handle<String> name) {
- HandleScope scope(isolate);
- Handle<Object> type_handle =
- isolate->factory()->NewStringFromAscii(CStrVector(type));
- Handle<Object> args[2] = { type_handle, name };
- Handle<Object> error =
- isolate->factory()->NewTypeError("redeclaration", HandleVector(args, 2));
- return isolate->Throw(*error);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareGlobals) {
- ASSERT(args.length() == 4);
- HandleScope scope(isolate);
- Handle<GlobalObject> global = Handle<GlobalObject>(
- isolate->context()->global());
-
- Handle<Context> context = args.at<Context>(0);
- CONVERT_ARG_CHECKED(FixedArray, pairs, 1);
- bool is_eval = Smi::cast(args[2])->value() == 1;
- StrictModeFlag strict_mode =
- static_cast<StrictModeFlag>(Smi::cast(args[3])->value());
- ASSERT(strict_mode == kStrictMode || strict_mode == kNonStrictMode);
-
- // Compute the property attributes. According to ECMA-262, section
- // 13, page 71, the property must be read-only and
- // non-deletable. However, neither SpiderMonkey nor KJS creates the
- // property as read-only, so we don't either.
- PropertyAttributes base = is_eval ? NONE : DONT_DELETE;
-
- // Traverse the name/value pairs and set the properties.
- int length = pairs->length();
- for (int i = 0; i < length; i += 2) {
- HandleScope scope(isolate);
- Handle<String> name(String::cast(pairs->get(i)));
- Handle<Object> value(pairs->get(i + 1), isolate);
-
- // We have to declare a global const property. To capture we only
- // assign to it when evaluating the assignment for "const x =
- // <expr>" the initial value is the hole.
- bool is_const_property = value->IsTheHole();
-
- if (value->IsUndefined() || is_const_property) {
- // Lookup the property in the global object, and don't set the
- // value of the variable if the property is already there.
- LookupResult lookup;
- global->Lookup(*name, &lookup);
- if (lookup.IsProperty()) {
- // Determine if the property is local by comparing the holder
- // against the global object. The information will be used to
- // avoid throwing re-declaration errors when declaring
- // variables or constants that exist in the prototype chain.
- bool is_local = (*global == lookup.holder());
- // Get the property attributes and determine if the property is
- // read-only.
- PropertyAttributes attributes = global->GetPropertyAttribute(*name);
- bool is_read_only = (attributes & READ_ONLY) != 0;
- if (lookup.type() == INTERCEPTOR) {
- // If the interceptor says the property is there, we
- // just return undefined without overwriting the property.
- // Otherwise, we continue to setting the property.
- if (attributes != ABSENT) {
- // Check if the existing property conflicts with regards to const.
- if (is_local && (is_read_only || is_const_property)) {
- const char* type = (is_read_only) ? "const" : "var";
- return ThrowRedeclarationError(isolate, type, name);
- };
- // The property already exists without conflicting: Go to
- // the next declaration.
- continue;
- }
- // Fall-through and introduce the absent property by using
- // SetProperty.
- } else {
- // For const properties, we treat a callback with this name
- // even in the prototype as a conflicting declaration.
- if (is_const_property && (lookup.type() == CALLBACKS)) {
- return ThrowRedeclarationError(isolate, "const", name);
- }
- // Otherwise, we check for locally conflicting declarations.
- if (is_local && (is_read_only || is_const_property)) {
- const char* type = (is_read_only) ? "const" : "var";
- return ThrowRedeclarationError(isolate, type, name);
- }
- // The property already exists without conflicting: Go to
- // the next declaration.
- continue;
- }
- }
- } else {
- // Copy the function and update its context. Use it as value.
- Handle<SharedFunctionInfo> shared =
- Handle<SharedFunctionInfo>::cast(value);
- Handle<JSFunction> function =
- isolate->factory()->NewFunctionFromSharedFunctionInfo(shared,
- context,
- TENURED);
- value = function;
- }
-
- LookupResult lookup;
- global->LocalLookup(*name, &lookup);
-
- PropertyAttributes attributes = is_const_property
- ? static_cast<PropertyAttributes>(base | READ_ONLY)
- : base;
-
- // There's a local property that we need to overwrite because
- // we're either declaring a function or there's an interceptor
- // that claims the property is absent.
- //
- // Check for conflicting re-declarations. We cannot have
- // conflicting types in case of intercepted properties because
- // they are absent.
- if (lookup.IsProperty() &&
- (lookup.type() != INTERCEPTOR) &&
- (lookup.IsReadOnly() || is_const_property)) {
- const char* type = (lookup.IsReadOnly()) ? "const" : "var";
- return ThrowRedeclarationError(isolate, type, name);
- }
-
- // Safari does not allow the invocation of callback setters for
- // function declarations. To mimic this behavior, we do not allow
- // the invocation of setters for function values. This makes a
- // difference for global functions with the same names as event
- // handlers such as "function onload() {}". Firefox does call the
- // onload setter in those case and Safari does not. We follow
- // Safari for compatibility.
- if (value->IsJSFunction()) {
- // Do not change DONT_DELETE to false from true.
- if (lookup.IsProperty() && (lookup.type() != INTERCEPTOR)) {
- attributes = static_cast<PropertyAttributes>(
- attributes | (lookup.GetAttributes() & DONT_DELETE));
- }
- RETURN_IF_EMPTY_HANDLE(isolate,
- SetLocalPropertyIgnoreAttributes(global,
- name,
- value,
- attributes));
- } else {
- RETURN_IF_EMPTY_HANDLE(isolate,
- SetProperty(global,
- name,
- value,
- attributes,
- strict_mode));
- }
- }
-
- ASSERT(!isolate->has_pending_exception());
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareContextSlot) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 4);
-
- CONVERT_ARG_CHECKED(Context, context, 0);
- Handle<String> name(String::cast(args[1]));
- PropertyAttributes mode =
- static_cast<PropertyAttributes>(Smi::cast(args[2])->value());
- RUNTIME_ASSERT(mode == READ_ONLY || mode == NONE);
- Handle<Object> initial_value(args[3], isolate);
-
- // Declarations are always done in the function context.
- context = Handle<Context>(context->fcontext());
-
- int index;
- PropertyAttributes attributes;
- ContextLookupFlags flags = DONT_FOLLOW_CHAINS;
- Handle<Object> holder =
- context->Lookup(name, flags, &index, &attributes);
-
- if (attributes != ABSENT) {
- // The name was declared before; check for conflicting
- // re-declarations: This is similar to the code in parser.cc in
- // the AstBuildingParser::Declare function.
- if (((attributes & READ_ONLY) != 0) || (mode == READ_ONLY)) {
- // Functions are not read-only.
- ASSERT(mode != READ_ONLY || initial_value->IsTheHole());
- const char* type = ((attributes & READ_ONLY) != 0) ? "const" : "var";
- return ThrowRedeclarationError(isolate, type, name);
- }
-
- // Initialize it if necessary.
- if (*initial_value != NULL) {
- if (index >= 0) {
- // The variable or constant context slot should always be in
- // the function context or the arguments object.
- if (holder->IsContext()) {
- ASSERT(holder.is_identical_to(context));
- if (((attributes & READ_ONLY) == 0) ||
- context->get(index)->IsTheHole()) {
- context->set(index, *initial_value);
- }
- } else {
- // The holder is an arguments object.
- Handle<JSObject> arguments(Handle<JSObject>::cast(holder));
- Handle<Object> result = SetElement(arguments, index, initial_value,
- kNonStrictMode);
- if (result.is_null()) return Failure::Exception();
- }
- } else {
- // Slow case: The property is not in the FixedArray part of the context.
- Handle<JSObject> context_ext = Handle<JSObject>::cast(holder);
- RETURN_IF_EMPTY_HANDLE(
- isolate,
- SetProperty(context_ext, name, initial_value,
- mode, kNonStrictMode));
- }
- }
-
- } else {
- // The property is not in the function context. It needs to be
- // "declared" in the function context's extension context, or in the
- // global context.
- Handle<JSObject> context_ext;
- if (context->has_extension()) {
- // The function context's extension context exists - use it.
- context_ext = Handle<JSObject>(context->extension());
- } else {
- // The function context's extension context does not exists - allocate
- // it.
- context_ext = isolate->factory()->NewJSObject(
- isolate->context_extension_function());
- // And store it in the extension slot.
- context->set_extension(*context_ext);
- }
- ASSERT(*context_ext != NULL);
-
- // Declare the property by setting it to the initial value if provided,
- // or undefined, and use the correct mode (e.g. READ_ONLY attribute for
- // constant declarations).
- ASSERT(!context_ext->HasLocalProperty(*name));
- Handle<Object> value(isolate->heap()->undefined_value(), isolate);
- if (*initial_value != NULL) value = initial_value;
- // Declaring a const context slot is a conflicting declaration if
- // there is a callback with that name in a prototype. It is
- // allowed to introduce const variables in
- // JSContextExtensionObjects. They are treated specially in
- // SetProperty and no setters are invoked for those since they are
- // not real JSObjects.
- if (initial_value->IsTheHole() &&
- !context_ext->IsJSContextExtensionObject()) {
- LookupResult lookup;
- context_ext->Lookup(*name, &lookup);
- if (lookup.IsProperty() && (lookup.type() == CALLBACKS)) {
- return ThrowRedeclarationError(isolate, "const", name);
- }
- }
- RETURN_IF_EMPTY_HANDLE(isolate,
- SetProperty(context_ext, name, value, mode,
- kNonStrictMode));
- }
-
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeVarGlobal) {
- NoHandleAllocation nha;
- // args[0] == name
- // args[1] == strict_mode
- // args[2] == value (optional)
-
- // Determine if we need to assign to the variable if it already
- // exists (based on the number of arguments).
- RUNTIME_ASSERT(args.length() == 2 || args.length() == 3);
- bool assign = args.length() == 3;
-
- CONVERT_ARG_CHECKED(String, name, 0);
- GlobalObject* global = isolate->context()->global();
- RUNTIME_ASSERT(args[1]->IsSmi());
- StrictModeFlag strict_mode =
- static_cast<StrictModeFlag>(Smi::cast(args[1])->value());
- ASSERT(strict_mode == kStrictMode || strict_mode == kNonStrictMode);
-
- // According to ECMA-262, section 12.2, page 62, the property must
- // not be deletable.
- PropertyAttributes attributes = DONT_DELETE;
-
- // Lookup the property locally in the global object. If it isn't
- // there, there is a property with this name in the prototype chain.
- // We follow Safari and Firefox behavior and only set the property
- // locally if there is an explicit initialization value that we have
- // to assign to the property.
- // Note that objects can have hidden prototypes, so we need to traverse
- // the whole chain of hidden prototypes to do a 'local' lookup.
- JSObject* real_holder = global;
- LookupResult lookup;
- while (true) {
- real_holder->LocalLookup(*name, &lookup);
- if (lookup.IsProperty()) {
- // Determine if this is a redeclaration of something read-only.
- if (lookup.IsReadOnly()) {
- // If we found readonly property on one of hidden prototypes,
- // just shadow it.
- if (real_holder != isolate->context()->global()) break;
- return ThrowRedeclarationError(isolate, "const", name);
- }
-
- // Determine if this is a redeclaration of an intercepted read-only
- // property and figure out if the property exists at all.
- bool found = true;
- PropertyType type = lookup.type();
- if (type == INTERCEPTOR) {
- HandleScope handle_scope(isolate);
- Handle<JSObject> holder(real_holder);
- PropertyAttributes intercepted = holder->GetPropertyAttribute(*name);
- real_holder = *holder;
- if (intercepted == ABSENT) {
- // The interceptor claims the property isn't there. We need to
- // make sure to introduce it.
- found = false;
- } else if ((intercepted & READ_ONLY) != 0) {
- // The property is present, but read-only. Since we're trying to
- // overwrite it with a variable declaration we must throw a
- // re-declaration error. However if we found readonly property
- // on one of hidden prototypes, just shadow it.
- if (real_holder != isolate->context()->global()) break;
- return ThrowRedeclarationError(isolate, "const", name);
- }
- }
-
- if (found && !assign) {
- // The global property is there and we're not assigning any value
- // to it. Just return.
- return isolate->heap()->undefined_value();
- }
-
- // Assign the value (or undefined) to the property.
- Object* value = (assign) ? args[2] : isolate->heap()->undefined_value();
- return real_holder->SetProperty(
- &lookup, *name, value, attributes, strict_mode);
- }
-
- Object* proto = real_holder->GetPrototype();
- if (!proto->IsJSObject())
- break;
-
- if (!JSObject::cast(proto)->map()->is_hidden_prototype())
- break;
-
- real_holder = JSObject::cast(proto);
- }
-
- global = isolate->context()->global();
- if (assign) {
- return global->SetProperty(*name, args[2], attributes, strict_mode);
- }
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeConstGlobal) {
- // All constants are declared with an initial value. The name
- // of the constant is the first argument and the initial value
- // is the second.
- RUNTIME_ASSERT(args.length() == 2);
- CONVERT_ARG_CHECKED(String, name, 0);
- Handle<Object> value = args.at<Object>(1);
-
- // Get the current global object from top.
- GlobalObject* global = isolate->context()->global();
-
- // According to ECMA-262, section 12.2, page 62, the property must
- // not be deletable. Since it's a const, it must be READ_ONLY too.
- PropertyAttributes attributes =
- static_cast<PropertyAttributes>(DONT_DELETE | READ_ONLY);
-
- // Lookup the property locally in the global object. If it isn't
- // there, we add the property and take special precautions to always
- // add it as a local property even in case of callbacks in the
- // prototype chain (this rules out using SetProperty).
- // We use SetLocalPropertyIgnoreAttributes instead
- LookupResult lookup;
- global->LocalLookup(*name, &lookup);
- if (!lookup.IsProperty()) {
- return global->SetLocalPropertyIgnoreAttributes(*name,
- *value,
- attributes);
- }
-
- // Determine if this is a redeclaration of something not
- // read-only. In case the result is hidden behind an interceptor we
- // need to ask it for the property attributes.
- if (!lookup.IsReadOnly()) {
- if (lookup.type() != INTERCEPTOR) {
- return ThrowRedeclarationError(isolate, "var", name);
- }
-
- PropertyAttributes intercepted = global->GetPropertyAttribute(*name);
-
- // Throw re-declaration error if the intercepted property is present
- // but not read-only.
- if (intercepted != ABSENT && (intercepted & READ_ONLY) == 0) {
- return ThrowRedeclarationError(isolate, "var", name);
- }
-
- // Restore global object from context (in case of GC) and continue
- // with setting the value because the property is either absent or
- // read-only. We also have to do redo the lookup.
- HandleScope handle_scope(isolate);
- Handle<GlobalObject> global(isolate->context()->global());
-
- // BUG 1213575: Handle the case where we have to set a read-only
- // property through an interceptor and only do it if it's
- // uninitialized, e.g. the hole. Nirk...
- // Passing non-strict mode because the property is writable.
- RETURN_IF_EMPTY_HANDLE(isolate,
- SetProperty(global,
- name,
- value,
- attributes,
- kNonStrictMode));
- return *value;
- }
-
- // Set the value, but only we're assigning the initial value to a
- // constant. For now, we determine this by checking if the
- // current value is the hole.
- // Strict mode handling not needed (const disallowed in strict mode).
- PropertyType type = lookup.type();
- if (type == FIELD) {
- FixedArray* properties = global->properties();
- int index = lookup.GetFieldIndex();
- if (properties->get(index)->IsTheHole()) {
- properties->set(index, *value);
- }
- } else if (type == NORMAL) {
- if (global->GetNormalizedProperty(&lookup)->IsTheHole()) {
- global->SetNormalizedProperty(&lookup, *value);
- }
- } else {
- // Ignore re-initialization of constants that have already been
- // assigned a function value.
- ASSERT(lookup.IsReadOnly() && type == CONSTANT_FUNCTION);
- }
-
- // Use the set value as the result of the operation.
- return *value;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeConstContextSlot) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 3);
-
- Handle<Object> value(args[0], isolate);
- ASSERT(!value->IsTheHole());
- CONVERT_ARG_CHECKED(Context, context, 1);
- Handle<String> name(String::cast(args[2]));
-
- // Initializations are always done in the function context.
- context = Handle<Context>(context->fcontext());
-
- int index;
- PropertyAttributes attributes;
- ContextLookupFlags flags = FOLLOW_CHAINS;
- Handle<Object> holder =
- context->Lookup(name, flags, &index, &attributes);
-
- // In most situations, the property introduced by the const
- // declaration should be present in the context extension object.
- // However, because declaration and initialization are separate, the
- // property might have been deleted (if it was introduced by eval)
- // before we reach the initialization point.
- //
- // Example:
- //
- // function f() { eval("delete x; const x;"); }
- //
- // In that case, the initialization behaves like a normal assignment
- // to property 'x'.
- if (index >= 0) {
- // Property was found in a context.
- if (holder->IsContext()) {
- // The holder cannot be the function context. If it is, there
- // should have been a const redeclaration error when declaring
- // the const property.
- ASSERT(!holder.is_identical_to(context));
- if ((attributes & READ_ONLY) == 0) {
- Handle<Context>::cast(holder)->set(index, *value);
- }
- } else {
- // The holder is an arguments object.
- ASSERT((attributes & READ_ONLY) == 0);
- Handle<JSObject> arguments(Handle<JSObject>::cast(holder));
- RETURN_IF_EMPTY_HANDLE(
- isolate,
- SetElement(arguments, index, value, kNonStrictMode));
- }
- return *value;
- }
-
- // The property could not be found, we introduce it in the global
- // context.
- if (attributes == ABSENT) {
- Handle<JSObject> global = Handle<JSObject>(
- isolate->context()->global());
- // Strict mode not needed (const disallowed in strict mode).
- RETURN_IF_EMPTY_HANDLE(
- isolate,
- SetProperty(global, name, value, NONE, kNonStrictMode));
- return *value;
- }
-
- // The property was present in a context extension object.
- Handle<JSObject> context_ext = Handle<JSObject>::cast(holder);
-
- if (*context_ext == context->extension()) {
- // This is the property that was introduced by the const
- // declaration. Set it if it hasn't been set before. NOTE: We
- // cannot use GetProperty() to get the current value as it
- // 'unholes' the value.
- LookupResult lookup;
- context_ext->LocalLookupRealNamedProperty(*name, &lookup);
- ASSERT(lookup.IsProperty()); // the property was declared
- ASSERT(lookup.IsReadOnly()); // and it was declared as read-only
-
- PropertyType type = lookup.type();
- if (type == FIELD) {
- FixedArray* properties = context_ext->properties();
- int index = lookup.GetFieldIndex();
- if (properties->get(index)->IsTheHole()) {
- properties->set(index, *value);
- }
- } else if (type == NORMAL) {
- if (context_ext->GetNormalizedProperty(&lookup)->IsTheHole()) {
- context_ext->SetNormalizedProperty(&lookup, *value);
- }
- } else {
- // We should not reach here. Any real, named property should be
- // either a field or a dictionary slot.
- UNREACHABLE();
- }
- } else {
- // The property was found in a different context extension object.
- // Set it if it is not a read-only property.
- if ((attributes & READ_ONLY) == 0) {
- // Strict mode not needed (const disallowed in strict mode).
- RETURN_IF_EMPTY_HANDLE(
- isolate,
- SetProperty(context_ext, name, value, attributes, kNonStrictMode));
- }
- }
-
- return *value;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*,
- Runtime_OptimizeObjectForAddingMultipleProperties) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 2);
- CONVERT_ARG_CHECKED(JSObject, object, 0);
- CONVERT_SMI_CHECKED(properties, args[1]);
- if (object->HasFastProperties()) {
- NormalizeProperties(object, KEEP_INOBJECT_PROPERTIES, properties);
- }
- return *object;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpExec) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 4);
- CONVERT_ARG_CHECKED(JSRegExp, regexp, 0);
- CONVERT_ARG_CHECKED(String, subject, 1);
- // Due to the way the JS calls are constructed this must be less than the
- // length of a string, i.e. it is always a Smi. We check anyway for security.
- CONVERT_SMI_CHECKED(index, args[2]);
- CONVERT_ARG_CHECKED(JSArray, last_match_info, 3);
- RUNTIME_ASSERT(last_match_info->HasFastElements());
- RUNTIME_ASSERT(index >= 0);
- RUNTIME_ASSERT(index <= subject->length());
- isolate->counters()->regexp_entry_runtime()->Increment();
- Handle<Object> result = RegExpImpl::Exec(regexp,
- subject,
- index,
- last_match_info);
- if (result.is_null()) return Failure::Exception();
- return *result;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpConstructResult) {
- ASSERT(args.length() == 3);
- CONVERT_SMI_CHECKED(elements_count, args[0]);
- if (elements_count > JSArray::kMaxFastElementsLength) {
- return isolate->ThrowIllegalOperation();
- }
- Object* new_object;
- { MaybeObject* maybe_new_object =
- isolate->heap()->AllocateFixedArrayWithHoles(elements_count);
- if (!maybe_new_object->ToObject(&new_object)) return maybe_new_object;
- }
- FixedArray* elements = FixedArray::cast(new_object);
- { MaybeObject* maybe_new_object = isolate->heap()->AllocateRaw(
- JSRegExpResult::kSize, NEW_SPACE, OLD_POINTER_SPACE);
- if (!maybe_new_object->ToObject(&new_object)) return maybe_new_object;
- }
- {
- AssertNoAllocation no_gc;
- HandleScope scope(isolate);
- reinterpret_cast<HeapObject*>(new_object)->
- set_map(isolate->global_context()->regexp_result_map());
- }
- JSArray* array = JSArray::cast(new_object);
- array->set_properties(isolate->heap()->empty_fixed_array());
- array->set_elements(elements);
- array->set_length(Smi::FromInt(elements_count));
- // Write in-object properties after the length of the array.
- array->InObjectPropertyAtPut(JSRegExpResult::kIndexIndex, args[1]);
- array->InObjectPropertyAtPut(JSRegExpResult::kInputIndex, args[2]);
- return array;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpInitializeObject) {
- AssertNoAllocation no_alloc;
- ASSERT(args.length() == 5);
- CONVERT_CHECKED(JSRegExp, regexp, args[0]);
- CONVERT_CHECKED(String, source, args[1]);
-
- Object* global = args[2];
- if (!global->IsTrue()) global = isolate->heap()->false_value();
-
- Object* ignoreCase = args[3];
- if (!ignoreCase->IsTrue()) ignoreCase = isolate->heap()->false_value();
-
- Object* multiline = args[4];
- if (!multiline->IsTrue()) multiline = isolate->heap()->false_value();
-
- Map* map = regexp->map();
- Object* constructor = map->constructor();
- if (constructor->IsJSFunction() &&
- JSFunction::cast(constructor)->initial_map() == map) {
- // If we still have the original map, set in-object properties directly.
- regexp->InObjectPropertyAtPut(JSRegExp::kSourceFieldIndex, source);
- // TODO(lrn): Consider skipping write barrier on booleans as well.
- // Both true and false should be in oldspace at all times.
- regexp->InObjectPropertyAtPut(JSRegExp::kGlobalFieldIndex, global);
- regexp->InObjectPropertyAtPut(JSRegExp::kIgnoreCaseFieldIndex, ignoreCase);
- regexp->InObjectPropertyAtPut(JSRegExp::kMultilineFieldIndex, multiline);
- regexp->InObjectPropertyAtPut(JSRegExp::kLastIndexFieldIndex,
- Smi::FromInt(0),
- SKIP_WRITE_BARRIER);
- return regexp;
- }
-
- // Map has changed, so use generic, but slower, method.
- PropertyAttributes final =
- static_cast<PropertyAttributes>(READ_ONLY | DONT_ENUM | DONT_DELETE);
- PropertyAttributes writable =
- static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE);
- Heap* heap = isolate->heap();
- MaybeObject* result;
- result = regexp->SetLocalPropertyIgnoreAttributes(heap->source_symbol(),
- source,
- final);
- ASSERT(!result->IsFailure());
- result = regexp->SetLocalPropertyIgnoreAttributes(heap->global_symbol(),
- global,
- final);
- ASSERT(!result->IsFailure());
- result =
- regexp->SetLocalPropertyIgnoreAttributes(heap->ignore_case_symbol(),
- ignoreCase,
- final);
- ASSERT(!result->IsFailure());
- result = regexp->SetLocalPropertyIgnoreAttributes(heap->multiline_symbol(),
- multiline,
- final);
- ASSERT(!result->IsFailure());
- result =
- regexp->SetLocalPropertyIgnoreAttributes(heap->last_index_symbol(),
- Smi::FromInt(0),
- writable);
- ASSERT(!result->IsFailure());
- USE(result);
- return regexp;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_FinishArrayPrototypeSetup) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 1);
- CONVERT_ARG_CHECKED(JSArray, prototype, 0);
- // This is necessary to enable fast checks for absence of elements
- // on Array.prototype and below.
- prototype->set_elements(isolate->heap()->empty_fixed_array());
- return Smi::FromInt(0);
-}
-
-
-static Handle<JSFunction> InstallBuiltin(Isolate* isolate,
- Handle<JSObject> holder,
- const char* name,
- Builtins::Name builtin_name) {
- Handle<String> key = isolate->factory()->LookupAsciiSymbol(name);
- Handle<Code> code(isolate->builtins()->builtin(builtin_name));
- Handle<JSFunction> optimized =
- isolate->factory()->NewFunction(key,
- JS_OBJECT_TYPE,
- JSObject::kHeaderSize,
- code,
- false);
- optimized->shared()->DontAdaptArguments();
- SetProperty(holder, key, optimized, NONE, kStrictMode);
- return optimized;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SpecialArrayFunctions) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 1);
- CONVERT_ARG_CHECKED(JSObject, holder, 0);
-
- InstallBuiltin(isolate, holder, "pop", Builtins::kArrayPop);
- InstallBuiltin(isolate, holder, "push", Builtins::kArrayPush);
- InstallBuiltin(isolate, holder, "shift", Builtins::kArrayShift);
- InstallBuiltin(isolate, holder, "unshift", Builtins::kArrayUnshift);
- InstallBuiltin(isolate, holder, "slice", Builtins::kArraySlice);
- InstallBuiltin(isolate, holder, "splice", Builtins::kArraySplice);
- InstallBuiltin(isolate, holder, "concat", Builtins::kArrayConcat);
-
- return *holder;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetGlobalReceiver) {
- // Returns a real global receiver, not one of builtins object.
- Context* global_context =
- isolate->context()->global()->global_context();
- return global_context->global()->global_receiver();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_MaterializeRegExpLiteral) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 4);
- CONVERT_ARG_CHECKED(FixedArray, literals, 0);
- int index = Smi::cast(args[1])->value();
- Handle<String> pattern = args.at<String>(2);
- Handle<String> flags = args.at<String>(3);
-
- // Get the RegExp function from the context in the literals array.
- // This is the RegExp function from the context in which the
- // function was created. We do not use the RegExp function from the
- // current global context because this might be the RegExp function
- // from another context which we should not have access to.
- Handle<JSFunction> constructor =
- Handle<JSFunction>(
- JSFunction::GlobalContextFromLiterals(*literals)->regexp_function());
- // Compute the regular expression literal.
- bool has_pending_exception;
- Handle<Object> regexp =
- RegExpImpl::CreateRegExpLiteral(constructor, pattern, flags,
- &has_pending_exception);
- if (has_pending_exception) {
- ASSERT(isolate->has_pending_exception());
- return Failure::Exception();
- }
- literals->set(index, *regexp);
- return *regexp;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetName) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 1);
-
- CONVERT_CHECKED(JSFunction, f, args[0]);
- return f->shared()->name();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetName) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 2);
-
- CONVERT_CHECKED(JSFunction, f, args[0]);
- CONVERT_CHECKED(String, name, args[1]);
- f->shared()->set_name(name);
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionRemovePrototype) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 1);
-
- CONVERT_CHECKED(JSFunction, f, args[0]);
- Object* obj = f->RemovePrototype();
- if (obj->IsFailure()) return obj;
-
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetScript) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 1);
-
- CONVERT_CHECKED(JSFunction, fun, args[0]);
- Handle<Object> script = Handle<Object>(fun->shared()->script(), isolate);
- if (!script->IsScript()) return isolate->heap()->undefined_value();
-
- return *GetScriptWrapper(Handle<Script>::cast(script));
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetSourceCode) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 1);
-
- CONVERT_CHECKED(JSFunction, f, args[0]);
- return f->shared()->GetSourceCode();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetScriptSourcePosition) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 1);
-
- CONVERT_CHECKED(JSFunction, fun, args[0]);
- int pos = fun->shared()->start_position();
- return Smi::FromInt(pos);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetPositionForOffset) {
- ASSERT(args.length() == 2);
-
- CONVERT_CHECKED(Code, code, args[0]);
- CONVERT_NUMBER_CHECKED(int, offset, Int32, args[1]);
-
- RUNTIME_ASSERT(0 <= offset && offset < code->Size());
-
- Address pc = code->address() + offset;
- return Smi::FromInt(code->SourcePosition(pc));
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetInstanceClassName) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 2);
-
- CONVERT_CHECKED(JSFunction, fun, args[0]);
- CONVERT_CHECKED(String, name, args[1]);
- fun->SetInstanceClassName(name);
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetLength) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 2);
-
- CONVERT_CHECKED(JSFunction, fun, args[0]);
- CONVERT_CHECKED(Smi, length, args[1]);
- fun->shared()->set_length(length->value());
- return length;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetPrototype) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 2);
-
- CONVERT_CHECKED(JSFunction, fun, args[0]);
- ASSERT(fun->should_have_prototype());
- Object* obj;
- { MaybeObject* maybe_obj =
- Accessors::FunctionSetPrototype(fun, args[1], NULL);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- return args[0]; // return TOS
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionIsAPIFunction) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 1);
-
- CONVERT_CHECKED(JSFunction, f, args[0]);
- return f->shared()->IsApiFunction() ? isolate->heap()->true_value()
- : isolate->heap()->false_value();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionIsBuiltin) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 1);
-
- CONVERT_CHECKED(JSFunction, f, args[0]);
- return f->IsBuiltin() ? isolate->heap()->true_value() :
- isolate->heap()->false_value();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SetCode) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 2);
-
- CONVERT_ARG_CHECKED(JSFunction, target, 0);
- Handle<Object> code = args.at<Object>(1);
-
- Handle<Context> context(target->context());
-
- if (!code->IsNull()) {
- RUNTIME_ASSERT(code->IsJSFunction());
- Handle<JSFunction> fun = Handle<JSFunction>::cast(code);
- Handle<SharedFunctionInfo> shared(fun->shared());
-
- if (!EnsureCompiled(shared, KEEP_EXCEPTION)) {
- return Failure::Exception();
- }
- // Since we don't store the source for this we should never
- // optimize this.
- shared->code()->set_optimizable(false);
-
- // Set the code, scope info, formal parameter count,
- // and the length of the target function.
- target->shared()->set_code(shared->code());
- target->ReplaceCode(shared->code());
- target->shared()->set_scope_info(shared->scope_info());
- target->shared()->set_length(shared->length());
- target->shared()->set_formal_parameter_count(
- shared->formal_parameter_count());
- // Set the source code of the target function to undefined.
- // SetCode is only used for built-in constructors like String,
- // Array, and Object, and some web code
- // doesn't like seeing source code for constructors.
- target->shared()->set_script(isolate->heap()->undefined_value());
- target->shared()->code()->set_optimizable(false);
- // Clear the optimization hints related to the compiled code as these are no
- // longer valid when the code is overwritten.
- target->shared()->ClearThisPropertyAssignmentsInfo();
- context = Handle<Context>(fun->context());
-
- // Make sure we get a fresh copy of the literal vector to avoid
- // cross context contamination.
- int number_of_literals = fun->NumberOfLiterals();
- Handle<FixedArray> literals =
- isolate->factory()->NewFixedArray(number_of_literals, TENURED);
- if (number_of_literals > 0) {
- // Insert the object, regexp and array functions in the literals
- // array prefix. These are the functions that will be used when
- // creating object, regexp and array literals.
- literals->set(JSFunction::kLiteralGlobalContextIndex,
- context->global_context());
- }
- // It's okay to skip the write barrier here because the literals
- // are guaranteed to be in old space.
- target->set_literals(*literals, SKIP_WRITE_BARRIER);
- target->set_next_function_link(isolate->heap()->undefined_value());
- }
-
- target->set_context(*context);
- return *target;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SetExpectedNumberOfProperties) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 2);
- CONVERT_ARG_CHECKED(JSFunction, function, 0);
- CONVERT_SMI_CHECKED(num, args[1]);
- RUNTIME_ASSERT(num >= 0);
- SetExpectedNofProperties(function, num);
- return isolate->heap()->undefined_value();
-}
-
-
-MUST_USE_RESULT static MaybeObject* CharFromCode(Isolate* isolate,
- Object* char_code) {
- uint32_t code;
- if (char_code->ToArrayIndex(&code)) {
- if (code <= 0xffff) {
- return isolate->heap()->LookupSingleCharacterStringFromCode(code);
- }
- }
- return isolate->heap()->empty_string();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_StringCharCodeAt) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 2);
-
- CONVERT_CHECKED(String, subject, args[0]);
- Object* index = args[1];
- RUNTIME_ASSERT(index->IsNumber());
-
- uint32_t i = 0;
- if (index->IsSmi()) {
- int value = Smi::cast(index)->value();
- if (value < 0) return isolate->heap()->nan_value();
- i = value;
- } else {
- ASSERT(index->IsHeapNumber());
- double value = HeapNumber::cast(index)->value();
- i = static_cast<uint32_t>(DoubleToInteger(value));
- }
-
- // Flatten the string. If someone wants to get a char at an index
- // in a cons string, it is likely that more indices will be
- // accessed.
- Object* flat;
- { MaybeObject* maybe_flat = subject->TryFlatten();
- if (!maybe_flat->ToObject(&flat)) return maybe_flat;
- }
- subject = String::cast(flat);
-
- if (i >= static_cast<uint32_t>(subject->length())) {
- return isolate->heap()->nan_value();
- }
-
- return Smi::FromInt(subject->Get(i));
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_CharFromCode) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 1);
- return CharFromCode(isolate, args[0]);
-}
-
-
-class FixedArrayBuilder {
- public:
- explicit FixedArrayBuilder(Isolate* isolate, int initial_capacity)
- : array_(isolate->factory()->NewFixedArrayWithHoles(initial_capacity)),
- length_(0) {
- // Require a non-zero initial size. Ensures that doubling the size to
- // extend the array will work.
- ASSERT(initial_capacity > 0);
- }
-
- explicit FixedArrayBuilder(Handle<FixedArray> backing_store)
- : array_(backing_store),
- length_(0) {
- // Require a non-zero initial size. Ensures that doubling the size to
- // extend the array will work.
- ASSERT(backing_store->length() > 0);
- }
-
- bool HasCapacity(int elements) {
- int length = array_->length();
- int required_length = length_ + elements;
- return (length >= required_length);
- }
-
- void EnsureCapacity(int elements) {
- int length = array_->length();
- int required_length = length_ + elements;
- if (length < required_length) {
- int new_length = length;
- do {
- new_length *= 2;
- } while (new_length < required_length);
- Handle<FixedArray> extended_array =
- array_->GetIsolate()->factory()->NewFixedArrayWithHoles(new_length);
- array_->CopyTo(0, *extended_array, 0, length_);
- array_ = extended_array;
- }
- }
-
- void Add(Object* value) {
- ASSERT(length_ < capacity());
- array_->set(length_, value);
- length_++;
- }
-
- void Add(Smi* value) {
- ASSERT(length_ < capacity());
- array_->set(length_, value);
- length_++;
- }
-
- Handle<FixedArray> array() {
- return array_;
- }
-
- int length() {
- return length_;
- }
-
- int capacity() {
- return array_->length();
- }
-
- Handle<JSArray> ToJSArray() {
- Handle<JSArray> result_array = FACTORY->NewJSArrayWithElements(array_);
- result_array->set_length(Smi::FromInt(length_));
- return result_array;
- }
-
- Handle<JSArray> ToJSArray(Handle<JSArray> target_array) {
- target_array->set_elements(*array_);
- target_array->set_length(Smi::FromInt(length_));
- return target_array;
- }
-
- private:
- Handle<FixedArray> array_;
- int length_;
-};
-
-
-// Forward declarations.
-const int kStringBuilderConcatHelperLengthBits = 11;
-const int kStringBuilderConcatHelperPositionBits = 19;
-
-template <typename schar>
-static inline void StringBuilderConcatHelper(String*,
- schar*,
- FixedArray*,
- int);
-
-typedef BitField<int, 0, kStringBuilderConcatHelperLengthBits>
- StringBuilderSubstringLength;
-typedef BitField<int,
- kStringBuilderConcatHelperLengthBits,
- kStringBuilderConcatHelperPositionBits>
- StringBuilderSubstringPosition;
-
-
-class ReplacementStringBuilder {
- public:
- ReplacementStringBuilder(Heap* heap,
- Handle<String> subject,
- int estimated_part_count)
- : heap_(heap),
- array_builder_(heap->isolate(), estimated_part_count),
- subject_(subject),
- character_count_(0),
- is_ascii_(subject->IsAsciiRepresentation()) {
- // Require a non-zero initial size. Ensures that doubling the size to
- // extend the array will work.
- ASSERT(estimated_part_count > 0);
- }
-
- static inline void AddSubjectSlice(FixedArrayBuilder* builder,
- int from,
- int to) {
- ASSERT(from >= 0);
- int length = to - from;
- ASSERT(length > 0);
- if (StringBuilderSubstringLength::is_valid(length) &&
- StringBuilderSubstringPosition::is_valid(from)) {
- int encoded_slice = StringBuilderSubstringLength::encode(length) |
- StringBuilderSubstringPosition::encode(from);
- builder->Add(Smi::FromInt(encoded_slice));
- } else {
- // Otherwise encode as two smis.
- builder->Add(Smi::FromInt(-length));
- builder->Add(Smi::FromInt(from));
- }
- }
-
-
- void EnsureCapacity(int elements) {
- array_builder_.EnsureCapacity(elements);
- }
-
-
- void AddSubjectSlice(int from, int to) {
- AddSubjectSlice(&array_builder_, from, to);
- IncrementCharacterCount(to - from);
- }
-
-
- void AddString(Handle<String> string) {
- int length = string->length();
- ASSERT(length > 0);
- AddElement(*string);
- if (!string->IsAsciiRepresentation()) {
- is_ascii_ = false;
- }
- IncrementCharacterCount(length);
- }
-
-
- Handle<String> ToString() {
- if (array_builder_.length() == 0) {
- return heap_->isolate()->factory()->empty_string();
- }
-
- Handle<String> joined_string;
- if (is_ascii_) {
- joined_string = NewRawAsciiString(character_count_);
- AssertNoAllocation no_alloc;
- SeqAsciiString* seq = SeqAsciiString::cast(*joined_string);
- char* char_buffer = seq->GetChars();
- StringBuilderConcatHelper(*subject_,
- char_buffer,
- *array_builder_.array(),
- array_builder_.length());
- } else {
- // Non-ASCII.
- joined_string = NewRawTwoByteString(character_count_);
- AssertNoAllocation no_alloc;
- SeqTwoByteString* seq = SeqTwoByteString::cast(*joined_string);
- uc16* char_buffer = seq->GetChars();
- StringBuilderConcatHelper(*subject_,
- char_buffer,
- *array_builder_.array(),
- array_builder_.length());
- }
- return joined_string;
- }
-
-
- void IncrementCharacterCount(int by) {
- if (character_count_ > String::kMaxLength - by) {
- V8::FatalProcessOutOfMemory("String.replace result too large.");
- }
- character_count_ += by;
- }
-
- Handle<JSArray> GetParts() {
- return array_builder_.ToJSArray();
- }
-
- private:
- Handle<String> NewRawAsciiString(int size) {
- CALL_HEAP_FUNCTION(heap_->isolate(),
- heap_->AllocateRawAsciiString(size), String);
- }
-
-
- Handle<String> NewRawTwoByteString(int size) {
- CALL_HEAP_FUNCTION(heap_->isolate(),
- heap_->AllocateRawTwoByteString(size), String);
- }
-
-
- void AddElement(Object* element) {
- ASSERT(element->IsSmi() || element->IsString());
- ASSERT(array_builder_.capacity() > array_builder_.length());
- array_builder_.Add(element);
- }
-
- Heap* heap_;
- FixedArrayBuilder array_builder_;
- Handle<String> subject_;
- int character_count_;
- bool is_ascii_;
-};
-
-
-class CompiledReplacement {
- public:
- CompiledReplacement()
- : parts_(1), replacement_substrings_(0) {}
-
- void Compile(Handle<String> replacement,
- int capture_count,
- int subject_length);
-
- void Apply(ReplacementStringBuilder* builder,
- int match_from,
- int match_to,
- Handle<JSArray> last_match_info);
-
- // Number of distinct parts of the replacement pattern.
- int parts() {
- return parts_.length();
- }
- private:
- enum PartType {
- SUBJECT_PREFIX = 1,
- SUBJECT_SUFFIX,
- SUBJECT_CAPTURE,
- REPLACEMENT_SUBSTRING,
- REPLACEMENT_STRING,
-
- NUMBER_OF_PART_TYPES
- };
-
- struct ReplacementPart {
- static inline ReplacementPart SubjectMatch() {
- return ReplacementPart(SUBJECT_CAPTURE, 0);
- }
- static inline ReplacementPart SubjectCapture(int capture_index) {
- return ReplacementPart(SUBJECT_CAPTURE, capture_index);
- }
- static inline ReplacementPart SubjectPrefix() {
- return ReplacementPart(SUBJECT_PREFIX, 0);
- }
- static inline ReplacementPart SubjectSuffix(int subject_length) {
- return ReplacementPart(SUBJECT_SUFFIX, subject_length);
- }
- static inline ReplacementPart ReplacementString() {
- return ReplacementPart(REPLACEMENT_STRING, 0);
- }
- static inline ReplacementPart ReplacementSubString(int from, int to) {
- ASSERT(from >= 0);
- ASSERT(to > from);
- return ReplacementPart(-from, to);
- }
-
- // If tag <= 0 then it is the negation of a start index of a substring of
- // the replacement pattern, otherwise it's a value from PartType.
- ReplacementPart(int tag, int data)
- : tag(tag), data(data) {
- // Must be non-positive or a PartType value.
- ASSERT(tag < NUMBER_OF_PART_TYPES);
- }
- // Either a value of PartType or a non-positive number that is
- // the negation of an index into the replacement string.
- int tag;
- // The data value's interpretation depends on the value of tag:
- // tag == SUBJECT_PREFIX ||
- // tag == SUBJECT_SUFFIX: data is unused.
- // tag == SUBJECT_CAPTURE: data is the number of the capture.
- // tag == REPLACEMENT_SUBSTRING ||
- // tag == REPLACEMENT_STRING: data is index into array of substrings
- // of the replacement string.
- // tag <= 0: Temporary representation of the substring of the replacement
- // string ranging over -tag .. data.
- // Is replaced by REPLACEMENT_{SUB,}STRING when we create the
- // substring objects.
- int data;
- };
-
- template<typename Char>
- static void ParseReplacementPattern(ZoneList<ReplacementPart>* parts,
- Vector<Char> characters,
- int capture_count,
- int subject_length) {
- int length = characters.length();
- int last = 0;
- for (int i = 0; i < length; i++) {
- Char c = characters[i];
- if (c == '$') {
- int next_index = i + 1;
- if (next_index == length) { // No next character!
- break;
- }
- Char c2 = characters[next_index];
- switch (c2) {
- case '$':
- if (i > last) {
- // There is a substring before. Include the first "$".
- parts->Add(ReplacementPart::ReplacementSubString(last, next_index));
- last = next_index + 1; // Continue after the second "$".
- } else {
- // Let the next substring start with the second "$".
- last = next_index;
- }
- i = next_index;
- break;
- case '`':
- if (i > last) {
- parts->Add(ReplacementPart::ReplacementSubString(last, i));
- }
- parts->Add(ReplacementPart::SubjectPrefix());
- i = next_index;
- last = i + 1;
- break;
- case '\'':
- if (i > last) {
- parts->Add(ReplacementPart::ReplacementSubString(last, i));
- }
- parts->Add(ReplacementPart::SubjectSuffix(subject_length));
- i = next_index;
- last = i + 1;
- break;
- case '&':
- if (i > last) {
- parts->Add(ReplacementPart::ReplacementSubString(last, i));
- }
- parts->Add(ReplacementPart::SubjectMatch());
- i = next_index;
- last = i + 1;
- break;
- case '0':
- case '1':
- case '2':
- case '3':
- case '4':
- case '5':
- case '6':
- case '7':
- case '8':
- case '9': {
- int capture_ref = c2 - '0';
- if (capture_ref > capture_count) {
- i = next_index;
- continue;
- }
- int second_digit_index = next_index + 1;
- if (second_digit_index < length) {
- // Peek ahead to see if we have two digits.
- Char c3 = characters[second_digit_index];
- if ('0' <= c3 && c3 <= '9') { // Double digits.
- int double_digit_ref = capture_ref * 10 + c3 - '0';
- if (double_digit_ref <= capture_count) {
- next_index = second_digit_index;
- capture_ref = double_digit_ref;
- }
- }
- }
- if (capture_ref > 0) {
- if (i > last) {
- parts->Add(ReplacementPart::ReplacementSubString(last, i));
- }
- ASSERT(capture_ref <= capture_count);
- parts->Add(ReplacementPart::SubjectCapture(capture_ref));
- last = next_index + 1;
- }
- i = next_index;
- break;
- }
- default:
- i = next_index;
- break;
- }
- }
- }
- if (length > last) {
- if (last == 0) {
- parts->Add(ReplacementPart::ReplacementString());
- } else {
- parts->Add(ReplacementPart::ReplacementSubString(last, length));
- }
- }
- }
-
- ZoneList<ReplacementPart> parts_;
- ZoneList<Handle<String> > replacement_substrings_;
-};
-
-
-void CompiledReplacement::Compile(Handle<String> replacement,
- int capture_count,
- int subject_length) {
- ASSERT(replacement->IsFlat());
- if (replacement->IsAsciiRepresentation()) {
- AssertNoAllocation no_alloc;
- ParseReplacementPattern(&parts_,
- replacement->ToAsciiVector(),
- capture_count,
- subject_length);
- } else {
- ASSERT(replacement->IsTwoByteRepresentation());
- AssertNoAllocation no_alloc;
-
- ParseReplacementPattern(&parts_,
- replacement->ToUC16Vector(),
- capture_count,
- subject_length);
- }
- Isolate* isolate = replacement->GetIsolate();
- // Find substrings of replacement string and create them as String objects.
- int substring_index = 0;
- for (int i = 0, n = parts_.length(); i < n; i++) {
- int tag = parts_[i].tag;
- if (tag <= 0) { // A replacement string slice.
- int from = -tag;
- int to = parts_[i].data;
- replacement_substrings_.Add(
- isolate->factory()->NewSubString(replacement, from, to));
- parts_[i].tag = REPLACEMENT_SUBSTRING;
- parts_[i].data = substring_index;
- substring_index++;
- } else if (tag == REPLACEMENT_STRING) {
- replacement_substrings_.Add(replacement);
- parts_[i].data = substring_index;
- substring_index++;
- }
- }
-}
-
-
-void CompiledReplacement::Apply(ReplacementStringBuilder* builder,
- int match_from,
- int match_to,
- Handle<JSArray> last_match_info) {
- for (int i = 0, n = parts_.length(); i < n; i++) {
- ReplacementPart part = parts_[i];
- switch (part.tag) {
- case SUBJECT_PREFIX:
- if (match_from > 0) builder->AddSubjectSlice(0, match_from);
- break;
- case SUBJECT_SUFFIX: {
- int subject_length = part.data;
- if (match_to < subject_length) {
- builder->AddSubjectSlice(match_to, subject_length);
- }
- break;
- }
- case SUBJECT_CAPTURE: {
- int capture = part.data;
- FixedArray* match_info = FixedArray::cast(last_match_info->elements());
- int from = RegExpImpl::GetCapture(match_info, capture * 2);
- int to = RegExpImpl::GetCapture(match_info, capture * 2 + 1);
- if (from >= 0 && to > from) {
- builder->AddSubjectSlice(from, to);
- }
- break;
- }
- case REPLACEMENT_SUBSTRING:
- case REPLACEMENT_STRING:
- builder->AddString(replacement_substrings_[part.data]);
- break;
- default:
- UNREACHABLE();
- }
- }
-}
-
-
-
-MUST_USE_RESULT static MaybeObject* StringReplaceRegExpWithString(
- Isolate* isolate,
- String* subject,
- JSRegExp* regexp,
- String* replacement,
- JSArray* last_match_info) {
- ASSERT(subject->IsFlat());
- ASSERT(replacement->IsFlat());
-
- HandleScope handles(isolate);
-
- int length = subject->length();
- Handle<String> subject_handle(subject);
- Handle<JSRegExp> regexp_handle(regexp);
- Handle<String> replacement_handle(replacement);
- Handle<JSArray> last_match_info_handle(last_match_info);
- Handle<Object> match = RegExpImpl::Exec(regexp_handle,
- subject_handle,
- 0,
- last_match_info_handle);
- if (match.is_null()) {
- return Failure::Exception();
- }
- if (match->IsNull()) {
- return *subject_handle;
- }
-
- int capture_count = regexp_handle->CaptureCount();
-
- // CompiledReplacement uses zone allocation.
- CompilationZoneScope zone(DELETE_ON_EXIT);
- CompiledReplacement compiled_replacement;
- compiled_replacement.Compile(replacement_handle,
- capture_count,
- length);
-
- bool is_global = regexp_handle->GetFlags().is_global();
-
- // Guessing the number of parts that the final result string is built
- // from. Global regexps can match any number of times, so we guess
- // conservatively.
- int expected_parts =
- (compiled_replacement.parts() + 1) * (is_global ? 4 : 1) + 1;
- ReplacementStringBuilder builder(isolate->heap(),
- subject_handle,
- expected_parts);
-
- // Index of end of last match.
- int prev = 0;
-
- // Number of parts added by compiled replacement plus preceeding
- // string and possibly suffix after last match. It is possible for
- // all components to use two elements when encoded as two smis.
- const int parts_added_per_loop = 2 * (compiled_replacement.parts() + 2);
- bool matched = true;
- do {
- ASSERT(last_match_info_handle->HasFastElements());
- // Increase the capacity of the builder before entering local handle-scope,
- // so its internal buffer can safely allocate a new handle if it grows.
- builder.EnsureCapacity(parts_added_per_loop);
-
- HandleScope loop_scope(isolate);
- int start, end;
- {
- AssertNoAllocation match_info_array_is_not_in_a_handle;
- FixedArray* match_info_array =
- FixedArray::cast(last_match_info_handle->elements());
-
- ASSERT_EQ(capture_count * 2 + 2,
- RegExpImpl::GetLastCaptureCount(match_info_array));
- start = RegExpImpl::GetCapture(match_info_array, 0);
- end = RegExpImpl::GetCapture(match_info_array, 1);
- }
-
- if (prev < start) {
- builder.AddSubjectSlice(prev, start);
- }
- compiled_replacement.Apply(&builder,
- start,
- end,
- last_match_info_handle);
- prev = end;
-
- // Only continue checking for global regexps.
- if (!is_global) break;
-
- // Continue from where the match ended, unless it was an empty match.
- int next = end;
- if (start == end) {
- next = end + 1;
- if (next > length) break;
- }
-
- match = RegExpImpl::Exec(regexp_handle,
- subject_handle,
- next,
- last_match_info_handle);
- if (match.is_null()) {
- return Failure::Exception();
- }
- matched = !match->IsNull();
- } while (matched);
-
- if (prev < length) {
- builder.AddSubjectSlice(prev, length);
- }
-
- return *(builder.ToString());
-}
-
-
-template <typename ResultSeqString>
-MUST_USE_RESULT static MaybeObject* StringReplaceRegExpWithEmptyString(
- Isolate* isolate,
- String* subject,
- JSRegExp* regexp,
- JSArray* last_match_info) {
- ASSERT(subject->IsFlat());
-
- HandleScope handles(isolate);
-
- Handle<String> subject_handle(subject);
- Handle<JSRegExp> regexp_handle(regexp);
- Handle<JSArray> last_match_info_handle(last_match_info);
- Handle<Object> match = RegExpImpl::Exec(regexp_handle,
- subject_handle,
- 0,
- last_match_info_handle);
- if (match.is_null()) return Failure::Exception();
- if (match->IsNull()) return *subject_handle;
-
- ASSERT(last_match_info_handle->HasFastElements());
-
- int start, end;
- {
- AssertNoAllocation match_info_array_is_not_in_a_handle;
- FixedArray* match_info_array =
- FixedArray::cast(last_match_info_handle->elements());
-
- start = RegExpImpl::GetCapture(match_info_array, 0);
- end = RegExpImpl::GetCapture(match_info_array, 1);
- }
-
- int length = subject->length();
- int new_length = length - (end - start);
- if (new_length == 0) {
- return isolate->heap()->empty_string();
- }
- Handle<ResultSeqString> answer;
- if (ResultSeqString::kHasAsciiEncoding) {
- answer = Handle<ResultSeqString>::cast(
- isolate->factory()->NewRawAsciiString(new_length));
- } else {
- answer = Handle<ResultSeqString>::cast(
- isolate->factory()->NewRawTwoByteString(new_length));
- }
-
- // If the regexp isn't global, only match once.
- if (!regexp_handle->GetFlags().is_global()) {
- if (start > 0) {
- String::WriteToFlat(*subject_handle,
- answer->GetChars(),
- 0,
- start);
- }
- if (end < length) {
- String::WriteToFlat(*subject_handle,
- answer->GetChars() + start,
- end,
- length);
- }
- return *answer;
- }
-
- int prev = 0; // Index of end of last match.
- int next = 0; // Start of next search (prev unless last match was empty).
- int position = 0;
-
- do {
- if (prev < start) {
- // Add substring subject[prev;start] to answer string.
- String::WriteToFlat(*subject_handle,
- answer->GetChars() + position,
- prev,
- start);
- position += start - prev;
- }
- prev = end;
- next = end;
- // Continue from where the match ended, unless it was an empty match.
- if (start == end) {
- next++;
- if (next > length) break;
- }
- match = RegExpImpl::Exec(regexp_handle,
- subject_handle,
- next,
- last_match_info_handle);
- if (match.is_null()) return Failure::Exception();
- if (match->IsNull()) break;
-
- ASSERT(last_match_info_handle->HasFastElements());
- HandleScope loop_scope(isolate);
- {
- AssertNoAllocation match_info_array_is_not_in_a_handle;
- FixedArray* match_info_array =
- FixedArray::cast(last_match_info_handle->elements());
- start = RegExpImpl::GetCapture(match_info_array, 0);
- end = RegExpImpl::GetCapture(match_info_array, 1);
- }
- } while (true);
-
- if (prev < length) {
- // Add substring subject[prev;length] to answer string.
- String::WriteToFlat(*subject_handle,
- answer->GetChars() + position,
- prev,
- length);
- position += length - prev;
- }
-
- if (position == 0) {
- return isolate->heap()->empty_string();
- }
-
- // Shorten string and fill
- int string_size = ResultSeqString::SizeFor(position);
- int allocated_string_size = ResultSeqString::SizeFor(new_length);
- int delta = allocated_string_size - string_size;
-
- answer->set_length(position);
- if (delta == 0) return *answer;
-
- Address end_of_string = answer->address() + string_size;
- isolate->heap()->CreateFillerObjectAt(end_of_string, delta);
-
- return *answer;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_StringReplaceRegExpWithString) {
- ASSERT(args.length() == 4);
-
- CONVERT_CHECKED(String, subject, args[0]);
- if (!subject->IsFlat()) {
- Object* flat_subject;
- { MaybeObject* maybe_flat_subject = subject->TryFlatten();
- if (!maybe_flat_subject->ToObject(&flat_subject)) {
- return maybe_flat_subject;
- }
- }
- subject = String::cast(flat_subject);
- }
-
- CONVERT_CHECKED(String, replacement, args[2]);
- if (!replacement->IsFlat()) {
- Object* flat_replacement;
- { MaybeObject* maybe_flat_replacement = replacement->TryFlatten();
- if (!maybe_flat_replacement->ToObject(&flat_replacement)) {
- return maybe_flat_replacement;
- }
- }
- replacement = String::cast(flat_replacement);
- }
-
- CONVERT_CHECKED(JSRegExp, regexp, args[1]);
- CONVERT_CHECKED(JSArray, last_match_info, args[3]);
-
- ASSERT(last_match_info->HasFastElements());
-
- if (replacement->length() == 0) {
- if (subject->HasOnlyAsciiChars()) {
- return StringReplaceRegExpWithEmptyString<SeqAsciiString>(
- isolate, subject, regexp, last_match_info);
- } else {
- return StringReplaceRegExpWithEmptyString<SeqTwoByteString>(
- isolate, subject, regexp, last_match_info);
- }
- }
-
- return StringReplaceRegExpWithString(isolate,
- subject,
- regexp,
- replacement,
- last_match_info);
-}
-
-
-// Perform string match of pattern on subject, starting at start index.
-// Caller must ensure that 0 <= start_index <= sub->length(),
-// and should check that pat->length() + start_index <= sub->length().
-int Runtime::StringMatch(Isolate* isolate,
- Handle<String> sub,
- Handle<String> pat,
- int start_index) {
- ASSERT(0 <= start_index);
- ASSERT(start_index <= sub->length());
-
- int pattern_length = pat->length();
- if (pattern_length == 0) return start_index;
-
- int subject_length = sub->length();
- if (start_index + pattern_length > subject_length) return -1;
-
- if (!sub->IsFlat()) FlattenString(sub);
- if (!pat->IsFlat()) FlattenString(pat);
-
- AssertNoAllocation no_heap_allocation; // ensure vectors stay valid
- // Extract flattened substrings of cons strings before determining asciiness.
- String* seq_sub = *sub;
- if (seq_sub->IsConsString()) seq_sub = ConsString::cast(seq_sub)->first();
- String* seq_pat = *pat;
- if (seq_pat->IsConsString()) seq_pat = ConsString::cast(seq_pat)->first();
-
- // dispatch on type of strings
- if (seq_pat->IsAsciiRepresentation()) {
- Vector<const char> pat_vector = seq_pat->ToAsciiVector();
- if (seq_sub->IsAsciiRepresentation()) {
- return SearchString(isolate,
- seq_sub->ToAsciiVector(),
- pat_vector,
- start_index);
- }
- return SearchString(isolate,
- seq_sub->ToUC16Vector(),
- pat_vector,
- start_index);
- }
- Vector<const uc16> pat_vector = seq_pat->ToUC16Vector();
- if (seq_sub->IsAsciiRepresentation()) {
- return SearchString(isolate,
- seq_sub->ToAsciiVector(),
- pat_vector,
- start_index);
- }
- return SearchString(isolate,
- seq_sub->ToUC16Vector(),
- pat_vector,
- start_index);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_StringIndexOf) {
- HandleScope scope(isolate); // create a new handle scope
- ASSERT(args.length() == 3);
-
- CONVERT_ARG_CHECKED(String, sub, 0);
- CONVERT_ARG_CHECKED(String, pat, 1);
-
- Object* index = args[2];
- uint32_t start_index;
- if (!index->ToArrayIndex(&start_index)) return Smi::FromInt(-1);
-
- RUNTIME_ASSERT(start_index <= static_cast<uint32_t>(sub->length()));
- int position =
- Runtime::StringMatch(isolate, sub, pat, start_index);
- return Smi::FromInt(position);
-}
-
-
-template <typename schar, typename pchar>
-static int StringMatchBackwards(Vector<const schar> subject,
- Vector<const pchar> pattern,
- int idx) {
- int pattern_length = pattern.length();
- ASSERT(pattern_length >= 1);
- ASSERT(idx + pattern_length <= subject.length());
-
- if (sizeof(schar) == 1 && sizeof(pchar) > 1) {
- for (int i = 0; i < pattern_length; i++) {
- uc16 c = pattern[i];
- if (c > String::kMaxAsciiCharCode) {
- return -1;
- }
- }
- }
-
- pchar pattern_first_char = pattern[0];
- for (int i = idx; i >= 0; i--) {
- if (subject[i] != pattern_first_char) continue;
- int j = 1;
- while (j < pattern_length) {
- if (pattern[j] != subject[i+j]) {
- break;
- }
- j++;
- }
- if (j == pattern_length) {
- return i;
- }
- }
- return -1;
-}
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_StringLastIndexOf) {
- HandleScope scope(isolate); // create a new handle scope
- ASSERT(args.length() == 3);
-
- CONVERT_ARG_CHECKED(String, sub, 0);
- CONVERT_ARG_CHECKED(String, pat, 1);
-
- Object* index = args[2];
- uint32_t start_index;
- if (!index->ToArrayIndex(&start_index)) return Smi::FromInt(-1);
-
- uint32_t pat_length = pat->length();
- uint32_t sub_length = sub->length();
-
- if (start_index + pat_length > sub_length) {
- start_index = sub_length - pat_length;
- }
-
- if (pat_length == 0) {
- return Smi::FromInt(start_index);
- }
-
- if (!sub->IsFlat()) FlattenString(sub);
- if (!pat->IsFlat()) FlattenString(pat);
-
- AssertNoAllocation no_heap_allocation; // ensure vectors stay valid
-
- int position = -1;
-
- if (pat->IsAsciiRepresentation()) {
- Vector<const char> pat_vector = pat->ToAsciiVector();
- if (sub->IsAsciiRepresentation()) {
- position = StringMatchBackwards(sub->ToAsciiVector(),
- pat_vector,
- start_index);
- } else {
- position = StringMatchBackwards(sub->ToUC16Vector(),
- pat_vector,
- start_index);
- }
- } else {
- Vector<const uc16> pat_vector = pat->ToUC16Vector();
- if (sub->IsAsciiRepresentation()) {
- position = StringMatchBackwards(sub->ToAsciiVector(),
- pat_vector,
- start_index);
- } else {
- position = StringMatchBackwards(sub->ToUC16Vector(),
- pat_vector,
- start_index);
- }
- }
-
- return Smi::FromInt(position);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_StringLocaleCompare) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 2);
-
- CONVERT_CHECKED(String, str1, args[0]);
- CONVERT_CHECKED(String, str2, args[1]);
-
- if (str1 == str2) return Smi::FromInt(0); // Equal.
- int str1_length = str1->length();
- int str2_length = str2->length();
-
- // Decide trivial cases without flattening.
- if (str1_length == 0) {
- if (str2_length == 0) return Smi::FromInt(0); // Equal.
- return Smi::FromInt(-str2_length);
- } else {
- if (str2_length == 0) return Smi::FromInt(str1_length);
- }
-
- int end = str1_length < str2_length ? str1_length : str2_length;
-
- // No need to flatten if we are going to find the answer on the first
- // character. At this point we know there is at least one character
- // in each string, due to the trivial case handling above.
- int d = str1->Get(0) - str2->Get(0);
- if (d != 0) return Smi::FromInt(d);
-
- str1->TryFlatten();
- str2->TryFlatten();
-
- StringInputBuffer& buf1 =
- *isolate->runtime_state()->string_locale_compare_buf1();
- StringInputBuffer& buf2 =
- *isolate->runtime_state()->string_locale_compare_buf2();
-
- buf1.Reset(str1);
- buf2.Reset(str2);
-
- for (int i = 0; i < end; i++) {
- uint16_t char1 = buf1.GetNext();
- uint16_t char2 = buf2.GetNext();
- if (char1 != char2) return Smi::FromInt(char1 - char2);
- }
-
- return Smi::FromInt(str1_length - str2_length);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SubString) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 3);
-
- CONVERT_CHECKED(String, value, args[0]);
- Object* from = args[1];
- Object* to = args[2];
- int start, end;
- // We have a fast integer-only case here to avoid a conversion to double in
- // the common case where from and to are Smis.
- if (from->IsSmi() && to->IsSmi()) {
- start = Smi::cast(from)->value();
- end = Smi::cast(to)->value();
- } else {
- CONVERT_DOUBLE_CHECKED(from_number, from);
- CONVERT_DOUBLE_CHECKED(to_number, to);
- start = FastD2I(from_number);
- end = FastD2I(to_number);
- }
- RUNTIME_ASSERT(end >= start);
- RUNTIME_ASSERT(start >= 0);
- RUNTIME_ASSERT(end <= value->length());
- isolate->counters()->sub_string_runtime()->Increment();
- return value->SubString(start, end);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_StringMatch) {
- ASSERT_EQ(3, args.length());
-
- CONVERT_ARG_CHECKED(String, subject, 0);
- CONVERT_ARG_CHECKED(JSRegExp, regexp, 1);
- CONVERT_ARG_CHECKED(JSArray, regexp_info, 2);
- HandleScope handles;
-
- Handle<Object> match = RegExpImpl::Exec(regexp, subject, 0, regexp_info);
-
- if (match.is_null()) {
- return Failure::Exception();
- }
- if (match->IsNull()) {
- return isolate->heap()->null_value();
- }
- int length = subject->length();
-
- CompilationZoneScope zone_space(DELETE_ON_EXIT);
- ZoneList<int> offsets(8);
- do {
- int start;
- int end;
- {
- AssertNoAllocation no_alloc;
- FixedArray* elements = FixedArray::cast(regexp_info->elements());
- start = Smi::cast(elements->get(RegExpImpl::kFirstCapture))->value();
- end = Smi::cast(elements->get(RegExpImpl::kFirstCapture + 1))->value();
- }
- offsets.Add(start);
- offsets.Add(end);
- int index = start < end ? end : end + 1;
- if (index > length) break;
- match = RegExpImpl::Exec(regexp, subject, index, regexp_info);
- if (match.is_null()) {
- return Failure::Exception();
- }
- } while (!match->IsNull());
- int matches = offsets.length() / 2;
- Handle<FixedArray> elements = isolate->factory()->NewFixedArray(matches);
- for (int i = 0; i < matches ; i++) {
- int from = offsets.at(i * 2);
- int to = offsets.at(i * 2 + 1);
- Handle<String> match = isolate->factory()->NewSubString(subject, from, to);
- elements->set(i, *match);
- }
- Handle<JSArray> result = isolate->factory()->NewJSArrayWithElements(elements);
- result->set_length(Smi::FromInt(matches));
- return *result;
-}
-
-
-// Two smis before and after the match, for very long strings.
-const int kMaxBuilderEntriesPerRegExpMatch = 5;
-
-
-static void SetLastMatchInfoNoCaptures(Handle<String> subject,
- Handle<JSArray> last_match_info,
- int match_start,
- int match_end) {
- // Fill last_match_info with a single capture.
- last_match_info->EnsureSize(2 + RegExpImpl::kLastMatchOverhead);
- AssertNoAllocation no_gc;
- FixedArray* elements = FixedArray::cast(last_match_info->elements());
- RegExpImpl::SetLastCaptureCount(elements, 2);
- RegExpImpl::SetLastInput(elements, *subject);
- RegExpImpl::SetLastSubject(elements, *subject);
- RegExpImpl::SetCapture(elements, 0, match_start);
- RegExpImpl::SetCapture(elements, 1, match_end);
-}
-
-
-template <typename SubjectChar, typename PatternChar>
-static bool SearchStringMultiple(Isolate* isolate,
- Vector<const SubjectChar> subject,
- Vector<const PatternChar> pattern,
- String* pattern_string,
- FixedArrayBuilder* builder,
- int* match_pos) {
- int pos = *match_pos;
- int subject_length = subject.length();
- int pattern_length = pattern.length();
- int max_search_start = subject_length - pattern_length;
- StringSearch<PatternChar, SubjectChar> search(isolate, pattern);
- while (pos <= max_search_start) {
- if (!builder->HasCapacity(kMaxBuilderEntriesPerRegExpMatch)) {
- *match_pos = pos;
- return false;
- }
- // Position of end of previous match.
- int match_end = pos + pattern_length;
- int new_pos = search.Search(subject, match_end);
- if (new_pos >= 0) {
- // A match.
- if (new_pos > match_end) {
- ReplacementStringBuilder::AddSubjectSlice(builder,
- match_end,
- new_pos);
- }
- pos = new_pos;
- builder->Add(pattern_string);
- } else {
- break;
- }
- }
-
- if (pos < max_search_start) {
- ReplacementStringBuilder::AddSubjectSlice(builder,
- pos + pattern_length,
- subject_length);
- }
- *match_pos = pos;
- return true;
-}
-
-
-static bool SearchStringMultiple(Isolate* isolate,
- Handle<String> subject,
- Handle<String> pattern,
- Handle<JSArray> last_match_info,
- FixedArrayBuilder* builder) {
- ASSERT(subject->IsFlat());
- ASSERT(pattern->IsFlat());
-
- // Treating as if a previous match was before first character.
- int match_pos = -pattern->length();
-
- for (;;) { // Break when search complete.
- builder->EnsureCapacity(kMaxBuilderEntriesPerRegExpMatch);
- AssertNoAllocation no_gc;
- if (subject->IsAsciiRepresentation()) {
- Vector<const char> subject_vector = subject->ToAsciiVector();
- if (pattern->IsAsciiRepresentation()) {
- if (SearchStringMultiple(isolate,
- subject_vector,
- pattern->ToAsciiVector(),
- *pattern,
- builder,
- &match_pos)) break;
- } else {
- if (SearchStringMultiple(isolate,
- subject_vector,
- pattern->ToUC16Vector(),
- *pattern,
- builder,
- &match_pos)) break;
- }
- } else {
- Vector<const uc16> subject_vector = subject->ToUC16Vector();
- if (pattern->IsAsciiRepresentation()) {
- if (SearchStringMultiple(isolate,
- subject_vector,
- pattern->ToAsciiVector(),
- *pattern,
- builder,
- &match_pos)) break;
- } else {
- if (SearchStringMultiple(isolate,
- subject_vector,
- pattern->ToUC16Vector(),
- *pattern,
- builder,
- &match_pos)) break;
- }
- }
- }
-
- if (match_pos >= 0) {
- SetLastMatchInfoNoCaptures(subject,
- last_match_info,
- match_pos,
- match_pos + pattern->length());
- return true;
- }
- return false; // No matches at all.
-}
-
-
-static RegExpImpl::IrregexpResult SearchRegExpNoCaptureMultiple(
- Isolate* isolate,
- Handle<String> subject,
- Handle<JSRegExp> regexp,
- Handle<JSArray> last_match_array,
- FixedArrayBuilder* builder) {
- ASSERT(subject->IsFlat());
- int match_start = -1;
- int match_end = 0;
- int pos = 0;
- int required_registers = RegExpImpl::IrregexpPrepare(regexp, subject);
- if (required_registers < 0) return RegExpImpl::RE_EXCEPTION;
-
- OffsetsVector registers(required_registers);
- Vector<int32_t> register_vector(registers.vector(), registers.length());
- int subject_length = subject->length();
-
- for (;;) { // Break on failure, return on exception.
- RegExpImpl::IrregexpResult result =
- RegExpImpl::IrregexpExecOnce(regexp,
- subject,
- pos,
- register_vector);
- if (result == RegExpImpl::RE_SUCCESS) {
- match_start = register_vector[0];
- builder->EnsureCapacity(kMaxBuilderEntriesPerRegExpMatch);
- if (match_end < match_start) {
- ReplacementStringBuilder::AddSubjectSlice(builder,
- match_end,
- match_start);
- }
- match_end = register_vector[1];
- HandleScope loop_scope(isolate);
- builder->Add(*isolate->factory()->NewSubString(subject,
- match_start,
- match_end));
- if (match_start != match_end) {
- pos = match_end;
- } else {
- pos = match_end + 1;
- if (pos > subject_length) break;
- }
- } else if (result == RegExpImpl::RE_FAILURE) {
- break;
- } else {
- ASSERT_EQ(result, RegExpImpl::RE_EXCEPTION);
- return result;
- }
- }
-
- if (match_start >= 0) {
- if (match_end < subject_length) {
- ReplacementStringBuilder::AddSubjectSlice(builder,
- match_end,
- subject_length);
- }
- SetLastMatchInfoNoCaptures(subject,
- last_match_array,
- match_start,
- match_end);
- return RegExpImpl::RE_SUCCESS;
- } else {
- return RegExpImpl::RE_FAILURE; // No matches at all.
- }
-}
-
-
-static RegExpImpl::IrregexpResult SearchRegExpMultiple(
- Isolate* isolate,
- Handle<String> subject,
- Handle<JSRegExp> regexp,
- Handle<JSArray> last_match_array,
- FixedArrayBuilder* builder) {
-
- ASSERT(subject->IsFlat());
- int required_registers = RegExpImpl::IrregexpPrepare(regexp, subject);
- if (required_registers < 0) return RegExpImpl::RE_EXCEPTION;
-
- OffsetsVector registers(required_registers);
- Vector<int32_t> register_vector(registers.vector(), registers.length());
-
- RegExpImpl::IrregexpResult result =
- RegExpImpl::IrregexpExecOnce(regexp,
- subject,
- 0,
- register_vector);
-
- int capture_count = regexp->CaptureCount();
- int subject_length = subject->length();
-
- // Position to search from.
- int pos = 0;
- // End of previous match. Differs from pos if match was empty.
- int match_end = 0;
- if (result == RegExpImpl::RE_SUCCESS) {
- // Need to keep a copy of the previous match for creating last_match_info
- // at the end, so we have two vectors that we swap between.
- OffsetsVector registers2(required_registers);
- Vector<int> prev_register_vector(registers2.vector(), registers2.length());
-
- do {
- int match_start = register_vector[0];
- builder->EnsureCapacity(kMaxBuilderEntriesPerRegExpMatch);
- if (match_end < match_start) {
- ReplacementStringBuilder::AddSubjectSlice(builder,
- match_end,
- match_start);
- }
- match_end = register_vector[1];
-
- {
- // Avoid accumulating new handles inside loop.
- HandleScope temp_scope(isolate);
- // Arguments array to replace function is match, captures, index and
- // subject, i.e., 3 + capture count in total.
- Handle<FixedArray> elements =
- isolate->factory()->NewFixedArray(3 + capture_count);
- Handle<String> match = isolate->factory()->NewSubString(subject,
- match_start,
- match_end);
- elements->set(0, *match);
- for (int i = 1; i <= capture_count; i++) {
- int start = register_vector[i * 2];
- if (start >= 0) {
- int end = register_vector[i * 2 + 1];
- ASSERT(start <= end);
- Handle<String> substring = isolate->factory()->NewSubString(subject,
- start,
- end);
- elements->set(i, *substring);
- } else {
- ASSERT(register_vector[i * 2 + 1] < 0);
- elements->set(i, isolate->heap()->undefined_value());
- }
- }
- elements->set(capture_count + 1, Smi::FromInt(match_start));
- elements->set(capture_count + 2, *subject);
- builder->Add(*isolate->factory()->NewJSArrayWithElements(elements));
- }
- // Swap register vectors, so the last successful match is in
- // prev_register_vector.
- Vector<int32_t> tmp = prev_register_vector;
- prev_register_vector = register_vector;
- register_vector = tmp;
-
- if (match_end > match_start) {
- pos = match_end;
- } else {
- pos = match_end + 1;
- if (pos > subject_length) {
- break;
- }
- }
-
- result = RegExpImpl::IrregexpExecOnce(regexp,
- subject,
- pos,
- register_vector);
- } while (result == RegExpImpl::RE_SUCCESS);
-
- if (result != RegExpImpl::RE_EXCEPTION) {
- // Finished matching, with at least one match.
- if (match_end < subject_length) {
- ReplacementStringBuilder::AddSubjectSlice(builder,
- match_end,
- subject_length);
- }
-
- int last_match_capture_count = (capture_count + 1) * 2;
- int last_match_array_size =
- last_match_capture_count + RegExpImpl::kLastMatchOverhead;
- last_match_array->EnsureSize(last_match_array_size);
- AssertNoAllocation no_gc;
- FixedArray* elements = FixedArray::cast(last_match_array->elements());
- RegExpImpl::SetLastCaptureCount(elements, last_match_capture_count);
- RegExpImpl::SetLastSubject(elements, *subject);
- RegExpImpl::SetLastInput(elements, *subject);
- for (int i = 0; i < last_match_capture_count; i++) {
- RegExpImpl::SetCapture(elements, i, prev_register_vector[i]);
- }
- return RegExpImpl::RE_SUCCESS;
- }
- }
- // No matches at all, return failure or exception result directly.
- return result;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpExecMultiple) {
- ASSERT(args.length() == 4);
- HandleScope handles(isolate);
-
- CONVERT_ARG_CHECKED(String, subject, 1);
- if (!subject->IsFlat()) { FlattenString(subject); }
- CONVERT_ARG_CHECKED(JSRegExp, regexp, 0);
- CONVERT_ARG_CHECKED(JSArray, last_match_info, 2);
- CONVERT_ARG_CHECKED(JSArray, result_array, 3);
-
- ASSERT(last_match_info->HasFastElements());
- ASSERT(regexp->GetFlags().is_global());
- Handle<FixedArray> result_elements;
- if (result_array->HasFastElements()) {
- result_elements =
- Handle<FixedArray>(FixedArray::cast(result_array->elements()));
- } else {
- result_elements = isolate->factory()->NewFixedArrayWithHoles(16);
- }
- FixedArrayBuilder builder(result_elements);
-
- if (regexp->TypeTag() == JSRegExp::ATOM) {
- Handle<String> pattern(
- String::cast(regexp->DataAt(JSRegExp::kAtomPatternIndex)));
- ASSERT(pattern->IsFlat());
- if (SearchStringMultiple(isolate, subject, pattern,
- last_match_info, &builder)) {
- return *builder.ToJSArray(result_array);
- }
- return isolate->heap()->null_value();
- }
-
- ASSERT_EQ(regexp->TypeTag(), JSRegExp::IRREGEXP);
-
- RegExpImpl::IrregexpResult result;
- if (regexp->CaptureCount() == 0) {
- result = SearchRegExpNoCaptureMultiple(isolate,
- subject,
- regexp,
- last_match_info,
- &builder);
- } else {
- result = SearchRegExpMultiple(isolate,
- subject,
- regexp,
- last_match_info,
- &builder);
- }
- if (result == RegExpImpl::RE_SUCCESS) return *builder.ToJSArray(result_array);
- if (result == RegExpImpl::RE_FAILURE) return isolate->heap()->null_value();
- ASSERT_EQ(result, RegExpImpl::RE_EXCEPTION);
- return Failure::Exception();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToRadixString) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 2);
-
- // Fast case where the result is a one character string.
- if (args[0]->IsSmi() && args[1]->IsSmi()) {
- int value = Smi::cast(args[0])->value();
- int radix = Smi::cast(args[1])->value();
- if (value >= 0 && value < radix) {
- RUNTIME_ASSERT(radix <= 36);
- // Character array used for conversion.
- static const char kCharTable[] = "0123456789abcdefghijklmnopqrstuvwxyz";
- return isolate->heap()->
- LookupSingleCharacterStringFromCode(kCharTable[value]);
- }
- }
-
- // Slow case.
- CONVERT_DOUBLE_CHECKED(value, args[0]);
- if (isnan(value)) {
- return isolate->heap()->AllocateStringFromAscii(CStrVector("NaN"));
- }
- if (isinf(value)) {
- if (value < 0) {
- return isolate->heap()->AllocateStringFromAscii(CStrVector("-Infinity"));
- }
- return isolate->heap()->AllocateStringFromAscii(CStrVector("Infinity"));
- }
- CONVERT_DOUBLE_CHECKED(radix_number, args[1]);
- int radix = FastD2I(radix_number);
- RUNTIME_ASSERT(2 <= radix && radix <= 36);
- char* str = DoubleToRadixCString(value, radix);
- MaybeObject* result =
- isolate->heap()->AllocateStringFromAscii(CStrVector(str));
- DeleteArray(str);
- return result;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToFixed) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 2);
-
- CONVERT_DOUBLE_CHECKED(value, args[0]);
- if (isnan(value)) {
- return isolate->heap()->AllocateStringFromAscii(CStrVector("NaN"));
- }
- if (isinf(value)) {
- if (value < 0) {
- return isolate->heap()->AllocateStringFromAscii(CStrVector("-Infinity"));
- }
- return isolate->heap()->AllocateStringFromAscii(CStrVector("Infinity"));
- }
- CONVERT_DOUBLE_CHECKED(f_number, args[1]);
- int f = FastD2I(f_number);
- RUNTIME_ASSERT(f >= 0);
- char* str = DoubleToFixedCString(value, f);
- MaybeObject* res =
- isolate->heap()->AllocateStringFromAscii(CStrVector(str));
- DeleteArray(str);
- return res;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToExponential) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 2);
-
- CONVERT_DOUBLE_CHECKED(value, args[0]);
- if (isnan(value)) {
- return isolate->heap()->AllocateStringFromAscii(CStrVector("NaN"));
- }
- if (isinf(value)) {
- if (value < 0) {
- return isolate->heap()->AllocateStringFromAscii(CStrVector("-Infinity"));
- }
- return isolate->heap()->AllocateStringFromAscii(CStrVector("Infinity"));
- }
- CONVERT_DOUBLE_CHECKED(f_number, args[1]);
- int f = FastD2I(f_number);
- RUNTIME_ASSERT(f >= -1 && f <= 20);
- char* str = DoubleToExponentialCString(value, f);
- MaybeObject* res =
- isolate->heap()->AllocateStringFromAscii(CStrVector(str));
- DeleteArray(str);
- return res;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToPrecision) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 2);
-
- CONVERT_DOUBLE_CHECKED(value, args[0]);
- if (isnan(value)) {
- return isolate->heap()->AllocateStringFromAscii(CStrVector("NaN"));
- }
- if (isinf(value)) {
- if (value < 0) {
- return isolate->heap()->AllocateStringFromAscii(CStrVector("-Infinity"));
- }
- return isolate->heap()->AllocateStringFromAscii(CStrVector("Infinity"));
- }
- CONVERT_DOUBLE_CHECKED(f_number, args[1]);
- int f = FastD2I(f_number);
- RUNTIME_ASSERT(f >= 1 && f <= 21);
- char* str = DoubleToPrecisionCString(value, f);
- MaybeObject* res =
- isolate->heap()->AllocateStringFromAscii(CStrVector(str));
- DeleteArray(str);
- return res;
-}
-
-
-// Returns a single character string where first character equals
-// string->Get(index).
-static Handle<Object> GetCharAt(Handle<String> string, uint32_t index) {
- if (index < static_cast<uint32_t>(string->length())) {
- string->TryFlatten();
- return LookupSingleCharacterStringFromCode(
- string->Get(index));
- }
- return Execution::CharAt(string, index);
-}
-
-
-MaybeObject* Runtime::GetElementOrCharAt(Isolate* isolate,
- Handle<Object> object,
- uint32_t index) {
- // Handle [] indexing on Strings
- if (object->IsString()) {
- Handle<Object> result = GetCharAt(Handle<String>::cast(object), index);
- if (!result->IsUndefined()) return *result;
- }
-
- // Handle [] indexing on String objects
- if (object->IsStringObjectWithCharacterAt(index)) {
- Handle<JSValue> js_value = Handle<JSValue>::cast(object);
- Handle<Object> result =
- GetCharAt(Handle<String>(String::cast(js_value->value())), index);
- if (!result->IsUndefined()) return *result;
- }
-
- if (object->IsString() || object->IsNumber() || object->IsBoolean()) {
- Handle<Object> prototype = GetPrototype(object);
- return prototype->GetElement(index);
- }
-
- return GetElement(object, index);
-}
-
-
-MaybeObject* Runtime::GetElement(Handle<Object> object, uint32_t index) {
- return object->GetElement(index);
-}
-
-
-MaybeObject* Runtime::GetObjectProperty(Isolate* isolate,
- Handle<Object> object,
- Handle<Object> key) {
- HandleScope scope(isolate);
-
- if (object->IsUndefined() || object->IsNull()) {
- Handle<Object> args[2] = { key, object };
- Handle<Object> error =
- isolate->factory()->NewTypeError("non_object_property_load",
- HandleVector(args, 2));
- return isolate->Throw(*error);
- }
-
- // Check if the given key is an array index.
- uint32_t index;
- if (key->ToArrayIndex(&index)) {
- return GetElementOrCharAt(isolate, object, index);
- }
-
- // Convert the key to a string - possibly by calling back into JavaScript.
- Handle<String> name;
- if (key->IsString()) {
- name = Handle<String>::cast(key);
- } else {
- bool has_pending_exception = false;
- Handle<Object> converted =
- Execution::ToString(key, &has_pending_exception);
- if (has_pending_exception) return Failure::Exception();
- name = Handle<String>::cast(converted);
- }
-
- // Check if the name is trivially convertible to an index and get
- // the element if so.
- if (name->AsArrayIndex(&index)) {
- return GetElementOrCharAt(isolate, object, index);
- } else {
- PropertyAttributes attr;
- return object->GetProperty(*name, &attr);
- }
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetProperty) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 2);
-
- Handle<Object> object = args.at<Object>(0);
- Handle<Object> key = args.at<Object>(1);
-
- return Runtime::GetObjectProperty(isolate, object, key);
-}
-
-
-// KeyedStringGetProperty is called from KeyedLoadIC::GenerateGeneric.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_KeyedGetProperty) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 2);
-
- // Fast cases for getting named properties of the receiver JSObject
- // itself.
- //
- // The global proxy objects has to be excluded since LocalLookup on
- // the global proxy object can return a valid result even though the
- // global proxy object never has properties. This is the case
- // because the global proxy object forwards everything to its hidden
- // prototype including local lookups.
- //
- // Additionally, we need to make sure that we do not cache results
- // for objects that require access checks.
- if (args[0]->IsJSObject() &&
- !args[0]->IsJSGlobalProxy() &&
- !args[0]->IsAccessCheckNeeded() &&
- args[1]->IsString()) {
- JSObject* receiver = JSObject::cast(args[0]);
- String* key = String::cast(args[1]);
- if (receiver->HasFastProperties()) {
- // Attempt to use lookup cache.
- Map* receiver_map = receiver->map();
- KeyedLookupCache* keyed_lookup_cache = isolate->keyed_lookup_cache();
- int offset = keyed_lookup_cache->Lookup(receiver_map, key);
- if (offset != -1) {
- Object* value = receiver->FastPropertyAt(offset);
- return value->IsTheHole() ? isolate->heap()->undefined_value() : value;
- }
- // Lookup cache miss. Perform lookup and update the cache if appropriate.
- LookupResult result;
- receiver->LocalLookup(key, &result);
- if (result.IsProperty() && result.type() == FIELD) {
- int offset = result.GetFieldIndex();
- keyed_lookup_cache->Update(receiver_map, key, offset);
- return receiver->FastPropertyAt(offset);
- }
- } else {
- // Attempt dictionary lookup.
- StringDictionary* dictionary = receiver->property_dictionary();
- int entry = dictionary->FindEntry(key);
- if ((entry != StringDictionary::kNotFound) &&
- (dictionary->DetailsAt(entry).type() == NORMAL)) {
- Object* value = dictionary->ValueAt(entry);
- if (!receiver->IsGlobalObject()) return value;
- value = JSGlobalPropertyCell::cast(value)->value();
- if (!value->IsTheHole()) return value;
- // If value is the hole do the general lookup.
- }
- }
- } else if (args[0]->IsString() && args[1]->IsSmi()) {
- // Fast case for string indexing using [] with a smi index.
- HandleScope scope(isolate);
- Handle<String> str = args.at<String>(0);
- int index = Smi::cast(args[1])->value();
- if (index >= 0 && index < str->length()) {
- Handle<Object> result = GetCharAt(str, index);
- return *result;
- }
- }
-
- // Fall back to GetObjectProperty.
- return Runtime::GetObjectProperty(isolate,
- args.at<Object>(0),
- args.at<Object>(1));
-}
-
-// Implements part of 8.12.9 DefineOwnProperty.
-// There are 3 cases that lead here:
-// Step 4b - define a new accessor property.
-// Steps 9c & 12 - replace an existing data property with an accessor property.
-// Step 12 - update an existing accessor property with an accessor or generic
-// descriptor.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DefineOrRedefineAccessorProperty) {
- ASSERT(args.length() == 5);
- HandleScope scope(isolate);
- CONVERT_ARG_CHECKED(JSObject, obj, 0);
- CONVERT_CHECKED(String, name, args[1]);
- CONVERT_CHECKED(Smi, flag_setter, args[2]);
- Object* fun = args[3];
- RUNTIME_ASSERT(fun->IsJSFunction() || fun->IsUndefined());
- CONVERT_CHECKED(Smi, flag_attr, args[4]);
- int unchecked = flag_attr->value();
- RUNTIME_ASSERT((unchecked & ~(READ_ONLY | DONT_ENUM | DONT_DELETE)) == 0);
- RUNTIME_ASSERT(!obj->IsNull());
- LookupResult result;
- obj->LocalLookupRealNamedProperty(name, &result);
-
- PropertyAttributes attr = static_cast<PropertyAttributes>(unchecked);
- // If an existing property is either FIELD, NORMAL or CONSTANT_FUNCTION
- // delete it to avoid running into trouble in DefineAccessor, which
- // handles this incorrectly if the property is readonly (does nothing)
- if (result.IsProperty() &&
- (result.type() == FIELD || result.type() == NORMAL
- || result.type() == CONSTANT_FUNCTION)) {
- Object* ok;
- { MaybeObject* maybe_ok =
- obj->DeleteProperty(name, JSObject::NORMAL_DELETION);
- if (!maybe_ok->ToObject(&ok)) return maybe_ok;
- }
- }
- return obj->DefineAccessor(name, flag_setter->value() == 0, fun, attr);
-}
-
-// Implements part of 8.12.9 DefineOwnProperty.
-// There are 3 cases that lead here:
-// Step 4a - define a new data property.
-// Steps 9b & 12 - replace an existing accessor property with a data property.
-// Step 12 - update an existing data property with a data or generic
-// descriptor.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DefineOrRedefineDataProperty) {
- ASSERT(args.length() == 4);
- HandleScope scope(isolate);
- CONVERT_ARG_CHECKED(JSObject, js_object, 0);
- CONVERT_ARG_CHECKED(String, name, 1);
- Handle<Object> obj_value = args.at<Object>(2);
-
- CONVERT_CHECKED(Smi, flag, args[3]);
- int unchecked = flag->value();
- RUNTIME_ASSERT((unchecked & ~(READ_ONLY | DONT_ENUM | DONT_DELETE)) == 0);
-
- PropertyAttributes attr = static_cast<PropertyAttributes>(unchecked);
-
- // Check if this is an element.
- uint32_t index;
- bool is_element = name->AsArrayIndex(&index);
-
- // Special case for elements if any of the flags are true.
- // If elements are in fast case we always implicitly assume that:
- // DONT_DELETE: false, DONT_ENUM: false, READ_ONLY: false.
- if (((unchecked & (DONT_DELETE | DONT_ENUM | READ_ONLY)) != 0) &&
- is_element) {
- // Normalize the elements to enable attributes on the property.
- if (js_object->IsJSGlobalProxy()) {
- // We do not need to do access checks here since these has already
- // been performed by the call to GetOwnProperty.
- Handle<Object> proto(js_object->GetPrototype());
- // If proxy is detached, ignore the assignment. Alternatively,
- // we could throw an exception.
- if (proto->IsNull()) return *obj_value;
- js_object = Handle<JSObject>::cast(proto);
- }
- NormalizeElements(js_object);
- Handle<NumberDictionary> dictionary(js_object->element_dictionary());
- // Make sure that we never go back to fast case.
- dictionary->set_requires_slow_elements();
- PropertyDetails details = PropertyDetails(attr, NORMAL);
- NumberDictionarySet(dictionary, index, obj_value, details);
- return *obj_value;
- }
-
- LookupResult result;
- js_object->LookupRealNamedProperty(*name, &result);
-
- // To be compatible with safari we do not change the value on API objects
- // in defineProperty. Firefox disagrees here, and actually changes the value.
- if (result.IsProperty() &&
- (result.type() == CALLBACKS) &&
- result.GetCallbackObject()->IsAccessorInfo()) {
- return isolate->heap()->undefined_value();
- }
-
- // Take special care when attributes are different and there is already
- // a property. For simplicity we normalize the property which enables us
- // to not worry about changing the instance_descriptor and creating a new
- // map. The current version of SetObjectProperty does not handle attributes
- // correctly in the case where a property is a field and is reset with
- // new attributes.
- if (result.IsProperty() &&
- (attr != result.GetAttributes() || result.type() == CALLBACKS)) {
- // New attributes - normalize to avoid writing to instance descriptor
- if (js_object->IsJSGlobalProxy()) {
- // Since the result is a property, the prototype will exist so
- // we don't have to check for null.
- js_object = Handle<JSObject>(JSObject::cast(js_object->GetPrototype()));
- }
- NormalizeProperties(js_object, CLEAR_INOBJECT_PROPERTIES, 0);
- // Use IgnoreAttributes version since a readonly property may be
- // overridden and SetProperty does not allow this.
- return js_object->SetLocalPropertyIgnoreAttributes(*name,
- *obj_value,
- attr);
- }
-
- return Runtime::ForceSetObjectProperty(isolate,
- js_object,
- name,
- obj_value,
- attr);
-}
-
-
-MaybeObject* Runtime::SetObjectProperty(Isolate* isolate,
- Handle<Object> object,
- Handle<Object> key,
- Handle<Object> value,
- PropertyAttributes attr,
- StrictModeFlag strict_mode) {
- HandleScope scope(isolate);
-
- if (object->IsUndefined() || object->IsNull()) {
- Handle<Object> args[2] = { key, object };
- Handle<Object> error =
- isolate->factory()->NewTypeError("non_object_property_store",
- HandleVector(args, 2));
- return isolate->Throw(*error);
- }
-
- // If the object isn't a JavaScript object, we ignore the store.
- if (!object->IsJSObject()) return *value;
-
- Handle<JSObject> js_object = Handle<JSObject>::cast(object);
-
- // Check if the given key is an array index.
- uint32_t index;
- if (key->ToArrayIndex(&index)) {
- // In Firefox/SpiderMonkey, Safari and Opera you can access the characters
- // of a string using [] notation. We need to support this too in
- // JavaScript.
- // In the case of a String object we just need to redirect the assignment to
- // the underlying string if the index is in range. Since the underlying
- // string does nothing with the assignment then we can ignore such
- // assignments.
- if (js_object->IsStringObjectWithCharacterAt(index)) {
- return *value;
- }
-
- Handle<Object> result = SetElement(js_object, index, value, strict_mode);
- if (result.is_null()) return Failure::Exception();
- return *value;
- }
-
- if (key->IsString()) {
- Handle<Object> result;
- if (Handle<String>::cast(key)->AsArrayIndex(&index)) {
- result = SetElement(js_object, index, value, strict_mode);
- } else {
- Handle<String> key_string = Handle<String>::cast(key);
- key_string->TryFlatten();
- result = SetProperty(js_object, key_string, value, attr, strict_mode);
- }
- if (result.is_null()) return Failure::Exception();
- return *value;
- }
-
- // Call-back into JavaScript to convert the key to a string.
- bool has_pending_exception = false;
- Handle<Object> converted = Execution::ToString(key, &has_pending_exception);
- if (has_pending_exception) return Failure::Exception();
- Handle<String> name = Handle<String>::cast(converted);
-
- if (name->AsArrayIndex(&index)) {
- return js_object->SetElement(index, *value, strict_mode);
- } else {
- return js_object->SetProperty(*name, *value, attr, strict_mode);
- }
-}
-
-
-MaybeObject* Runtime::ForceSetObjectProperty(Isolate* isolate,
- Handle<JSObject> js_object,
- Handle<Object> key,
- Handle<Object> value,
- PropertyAttributes attr) {
- HandleScope scope(isolate);
-
- // Check if the given key is an array index.
- uint32_t index;
- if (key->ToArrayIndex(&index)) {
- // In Firefox/SpiderMonkey, Safari and Opera you can access the characters
- // of a string using [] notation. We need to support this too in
- // JavaScript.
- // In the case of a String object we just need to redirect the assignment to
- // the underlying string if the index is in range. Since the underlying
- // string does nothing with the assignment then we can ignore such
- // assignments.
- if (js_object->IsStringObjectWithCharacterAt(index)) {
- return *value;
- }
-
- return js_object->SetElement(index, *value, kNonStrictMode);
- }
-
- if (key->IsString()) {
- if (Handle<String>::cast(key)->AsArrayIndex(&index)) {
- return js_object->SetElement(index, *value, kNonStrictMode);
- } else {
- Handle<String> key_string = Handle<String>::cast(key);
- key_string->TryFlatten();
- return js_object->SetLocalPropertyIgnoreAttributes(*key_string,
- *value,
- attr);
- }
- }
-
- // Call-back into JavaScript to convert the key to a string.
- bool has_pending_exception = false;
- Handle<Object> converted = Execution::ToString(key, &has_pending_exception);
- if (has_pending_exception) return Failure::Exception();
- Handle<String> name = Handle<String>::cast(converted);
-
- if (name->AsArrayIndex(&index)) {
- return js_object->SetElement(index, *value, kNonStrictMode);
- } else {
- return js_object->SetLocalPropertyIgnoreAttributes(*name, *value, attr);
- }
-}
-
-
-MaybeObject* Runtime::ForceDeleteObjectProperty(Isolate* isolate,
- Handle<JSObject> js_object,
- Handle<Object> key) {
- HandleScope scope(isolate);
-
- // Check if the given key is an array index.
- uint32_t index;
- if (key->ToArrayIndex(&index)) {
- // In Firefox/SpiderMonkey, Safari and Opera you can access the
- // characters of a string using [] notation. In the case of a
- // String object we just need to redirect the deletion to the
- // underlying string if the index is in range. Since the
- // underlying string does nothing with the deletion, we can ignore
- // such deletions.
- if (js_object->IsStringObjectWithCharacterAt(index)) {
- return isolate->heap()->true_value();
- }
-
- return js_object->DeleteElement(index, JSObject::FORCE_DELETION);
- }
-
- Handle<String> key_string;
- if (key->IsString()) {
- key_string = Handle<String>::cast(key);
- } else {
- // Call-back into JavaScript to convert the key to a string.
- bool has_pending_exception = false;
- Handle<Object> converted = Execution::ToString(key, &has_pending_exception);
- if (has_pending_exception) return Failure::Exception();
- key_string = Handle<String>::cast(converted);
- }
-
- key_string->TryFlatten();
- return js_object->DeleteProperty(*key_string, JSObject::FORCE_DELETION);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SetProperty) {
- NoHandleAllocation ha;
- RUNTIME_ASSERT(args.length() == 4 || args.length() == 5);
-
- Handle<Object> object = args.at<Object>(0);
- Handle<Object> key = args.at<Object>(1);
- Handle<Object> value = args.at<Object>(2);
- CONVERT_SMI_CHECKED(unchecked_attributes, args[3]);
- RUNTIME_ASSERT(
- (unchecked_attributes & ~(READ_ONLY | DONT_ENUM | DONT_DELETE)) == 0);
- // Compute attributes.
- PropertyAttributes attributes =
- static_cast<PropertyAttributes>(unchecked_attributes);
-
- StrictModeFlag strict_mode = kNonStrictMode;
- if (args.length() == 5) {
- CONVERT_SMI_CHECKED(strict_unchecked, args[4]);
- RUNTIME_ASSERT(strict_unchecked == kStrictMode ||
- strict_unchecked == kNonStrictMode);
- strict_mode = static_cast<StrictModeFlag>(strict_unchecked);
- }
-
- return Runtime::SetObjectProperty(isolate,
- object,
- key,
- value,
- attributes,
- strict_mode);
-}
-
-
-// Set a local property, even if it is READ_ONLY. If the property does not
-// exist, it will be added with attributes NONE.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_IgnoreAttributesAndSetProperty) {
- NoHandleAllocation ha;
- RUNTIME_ASSERT(args.length() == 3 || args.length() == 4);
- CONVERT_CHECKED(JSObject, object, args[0]);
- CONVERT_CHECKED(String, name, args[1]);
- // Compute attributes.
- PropertyAttributes attributes = NONE;
- if (args.length() == 4) {
- CONVERT_CHECKED(Smi, value_obj, args[3]);
- int unchecked_value = value_obj->value();
- // Only attribute bits should be set.
- RUNTIME_ASSERT(
- (unchecked_value & ~(READ_ONLY | DONT_ENUM | DONT_DELETE)) == 0);
- attributes = static_cast<PropertyAttributes>(unchecked_value);
- }
-
- return object->
- SetLocalPropertyIgnoreAttributes(name, args[2], attributes);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DeleteProperty) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 3);
-
- CONVERT_CHECKED(JSObject, object, args[0]);
- CONVERT_CHECKED(String, key, args[1]);
- CONVERT_SMI_CHECKED(strict, args[2]);
- return object->DeleteProperty(key, (strict == kStrictMode)
- ? JSObject::STRICT_DELETION
- : JSObject::NORMAL_DELETION);
-}
-
-
-static Object* HasLocalPropertyImplementation(Isolate* isolate,
- Handle<JSObject> object,
- Handle<String> key) {
- if (object->HasLocalProperty(*key)) return isolate->heap()->true_value();
- // Handle hidden prototypes. If there's a hidden prototype above this thing
- // then we have to check it for properties, because they are supposed to
- // look like they are on this object.
- Handle<Object> proto(object->GetPrototype());
- if (proto->IsJSObject() &&
- Handle<JSObject>::cast(proto)->map()->is_hidden_prototype()) {
- return HasLocalPropertyImplementation(isolate,
- Handle<JSObject>::cast(proto),
- key);
- }
- return isolate->heap()->false_value();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_HasLocalProperty) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 2);
- CONVERT_CHECKED(String, key, args[1]);
-
- Object* obj = args[0];
- // Only JS objects can have properties.
- if (obj->IsJSObject()) {
- JSObject* object = JSObject::cast(obj);
- // Fast case - no interceptors.
- if (object->HasRealNamedProperty(key)) return isolate->heap()->true_value();
- // Slow case. Either it's not there or we have an interceptor. We should
- // have handles for this kind of deal.
- HandleScope scope(isolate);
- return HasLocalPropertyImplementation(isolate,
- Handle<JSObject>(object),
- Handle<String>(key));
- } else if (obj->IsString()) {
- // Well, there is one exception: Handle [] on strings.
- uint32_t index;
- if (key->AsArrayIndex(&index)) {
- String* string = String::cast(obj);
- if (index < static_cast<uint32_t>(string->length()))
- return isolate->heap()->true_value();
- }
- }
- return isolate->heap()->false_value();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_HasProperty) {
- NoHandleAllocation na;
- ASSERT(args.length() == 2);
-
- // Only JS objects can have properties.
- if (args[0]->IsJSObject()) {
- JSObject* object = JSObject::cast(args[0]);
- CONVERT_CHECKED(String, key, args[1]);
- if (object->HasProperty(key)) return isolate->heap()->true_value();
- }
- return isolate->heap()->false_value();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_HasElement) {
- NoHandleAllocation na;
- ASSERT(args.length() == 2);
-
- // Only JS objects can have elements.
- if (args[0]->IsJSObject()) {
- JSObject* object = JSObject::cast(args[0]);
- CONVERT_CHECKED(Smi, index_obj, args[1]);
- uint32_t index = index_obj->value();
- if (object->HasElement(index)) return isolate->heap()->true_value();
- }
- return isolate->heap()->false_value();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_IsPropertyEnumerable) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 2);
-
- CONVERT_CHECKED(JSObject, object, args[0]);
- CONVERT_CHECKED(String, key, args[1]);
-
- uint32_t index;
- if (key->AsArrayIndex(&index)) {
- return isolate->heap()->ToBoolean(object->HasElement(index));
- }
-
- PropertyAttributes att = object->GetLocalPropertyAttribute(key);
- return isolate->heap()->ToBoolean(att != ABSENT && (att & DONT_ENUM) == 0);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetPropertyNames) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 1);
- CONVERT_ARG_CHECKED(JSObject, object, 0);
- return *GetKeysFor(object);
-}
-
-
-// Returns either a FixedArray as Runtime_GetPropertyNames,
-// or, if the given object has an enum cache that contains
-// all enumerable properties of the object and its prototypes
-// have none, the map of the object. This is used to speed up
-// the check for deletions during a for-in.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetPropertyNamesFast) {
- ASSERT(args.length() == 1);
-
- CONVERT_CHECKED(JSObject, raw_object, args[0]);
-
- if (raw_object->IsSimpleEnum()) return raw_object->map();
-
- HandleScope scope(isolate);
- Handle<JSObject> object(raw_object);
- Handle<FixedArray> content = GetKeysInFixedArrayFor(object,
- INCLUDE_PROTOS);
-
- // Test again, since cache may have been built by preceding call.
- if (object->IsSimpleEnum()) return object->map();
-
- return *content;
-}
-
-
-// Find the length of the prototype chain that is to to handled as one. If a
-// prototype object is hidden it is to be viewed as part of the the object it
-// is prototype for.
-static int LocalPrototypeChainLength(JSObject* obj) {
- int count = 1;
- Object* proto = obj->GetPrototype();
- while (proto->IsJSObject() &&
- JSObject::cast(proto)->map()->is_hidden_prototype()) {
- count++;
- proto = JSObject::cast(proto)->GetPrototype();
- }
- return count;
-}
-
-
-// Return the names of the local named properties.
-// args[0]: object
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetLocalPropertyNames) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 1);
- if (!args[0]->IsJSObject()) {
- return isolate->heap()->undefined_value();
- }
- CONVERT_ARG_CHECKED(JSObject, obj, 0);
-
- // Skip the global proxy as it has no properties and always delegates to the
- // real global object.
- if (obj->IsJSGlobalProxy()) {
- // Only collect names if access is permitted.
- if (obj->IsAccessCheckNeeded() &&
- !isolate->MayNamedAccess(*obj,
- isolate->heap()->undefined_value(),
- v8::ACCESS_KEYS)) {
- isolate->ReportFailedAccessCheck(*obj, v8::ACCESS_KEYS);
- return *isolate->factory()->NewJSArray(0);
- }
- obj = Handle<JSObject>(JSObject::cast(obj->GetPrototype()));
- }
-
- // Find the number of objects making up this.
- int length = LocalPrototypeChainLength(*obj);
-
- // Find the number of local properties for each of the objects.
- ScopedVector<int> local_property_count(length);
- int total_property_count = 0;
- Handle<JSObject> jsproto = obj;
- for (int i = 0; i < length; i++) {
- // Only collect names if access is permitted.
- if (jsproto->IsAccessCheckNeeded() &&
- !isolate->MayNamedAccess(*jsproto,
- isolate->heap()->undefined_value(),
- v8::ACCESS_KEYS)) {
- isolate->ReportFailedAccessCheck(*jsproto, v8::ACCESS_KEYS);
- return *isolate->factory()->NewJSArray(0);
- }
- int n;
- n = jsproto->NumberOfLocalProperties(static_cast<PropertyAttributes>(NONE));
- local_property_count[i] = n;
- total_property_count += n;
- if (i < length - 1) {
- jsproto = Handle<JSObject>(JSObject::cast(jsproto->GetPrototype()));
- }
- }
-
- // Allocate an array with storage for all the property names.
- Handle<FixedArray> names =
- isolate->factory()->NewFixedArray(total_property_count);
-
- // Get the property names.
- jsproto = obj;
- int proto_with_hidden_properties = 0;
- for (int i = 0; i < length; i++) {
- jsproto->GetLocalPropertyNames(*names,
- i == 0 ? 0 : local_property_count[i - 1]);
- if (!GetHiddenProperties(jsproto, false)->IsUndefined()) {
- proto_with_hidden_properties++;
- }
- if (i < length - 1) {
- jsproto = Handle<JSObject>(JSObject::cast(jsproto->GetPrototype()));
- }
- }
-
- // Filter out name of hidden propeties object.
- if (proto_with_hidden_properties > 0) {
- Handle<FixedArray> old_names = names;
- names = isolate->factory()->NewFixedArray(
- names->length() - proto_with_hidden_properties);
- int dest_pos = 0;
- for (int i = 0; i < total_property_count; i++) {
- Object* name = old_names->get(i);
- if (name == isolate->heap()->hidden_symbol()) {
- continue;
- }
- names->set(dest_pos++, name);
- }
- }
-
- return *isolate->factory()->NewJSArrayWithElements(names);
-}
-
-
-// Return the names of the local indexed properties.
-// args[0]: object
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetLocalElementNames) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 1);
- if (!args[0]->IsJSObject()) {
- return isolate->heap()->undefined_value();
- }
- CONVERT_ARG_CHECKED(JSObject, obj, 0);
-
- int n = obj->NumberOfLocalElements(static_cast<PropertyAttributes>(NONE));
- Handle<FixedArray> names = isolate->factory()->NewFixedArray(n);
- obj->GetLocalElementKeys(*names, static_cast<PropertyAttributes>(NONE));
- return *isolate->factory()->NewJSArrayWithElements(names);
-}
-
-
-// Return information on whether an object has a named or indexed interceptor.
-// args[0]: object
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetInterceptorInfo) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 1);
- if (!args[0]->IsJSObject()) {
- return Smi::FromInt(0);
- }
- CONVERT_ARG_CHECKED(JSObject, obj, 0);
-
- int result = 0;
- if (obj->HasNamedInterceptor()) result |= 2;
- if (obj->HasIndexedInterceptor()) result |= 1;
-
- return Smi::FromInt(result);
-}
-
-
-// Return property names from named interceptor.
-// args[0]: object
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetNamedInterceptorPropertyNames) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 1);
- CONVERT_ARG_CHECKED(JSObject, obj, 0);
-
- if (obj->HasNamedInterceptor()) {
- v8::Handle<v8::Array> result = GetKeysForNamedInterceptor(obj, obj);
- if (!result.IsEmpty()) return *v8::Utils::OpenHandle(*result);
- }
- return isolate->heap()->undefined_value();
-}
-
-
-// Return element names from indexed interceptor.
-// args[0]: object
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetIndexedInterceptorElementNames) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 1);
- CONVERT_ARG_CHECKED(JSObject, obj, 0);
-
- if (obj->HasIndexedInterceptor()) {
- v8::Handle<v8::Array> result = GetKeysForIndexedInterceptor(obj, obj);
- if (!result.IsEmpty()) return *v8::Utils::OpenHandle(*result);
- }
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_LocalKeys) {
- ASSERT_EQ(args.length(), 1);
- CONVERT_CHECKED(JSObject, raw_object, args[0]);
- HandleScope scope(isolate);
- Handle<JSObject> object(raw_object);
-
- if (object->IsJSGlobalProxy()) {
- // Do access checks before going to the global object.
- if (object->IsAccessCheckNeeded() &&
- !isolate->MayNamedAccess(*object, isolate->heap()->undefined_value(),
- v8::ACCESS_KEYS)) {
- isolate->ReportFailedAccessCheck(*object, v8::ACCESS_KEYS);
- return *isolate->factory()->NewJSArray(0);
- }
-
- Handle<Object> proto(object->GetPrototype());
- // If proxy is detached we simply return an empty array.
- if (proto->IsNull()) return *isolate->factory()->NewJSArray(0);
- object = Handle<JSObject>::cast(proto);
- }
-
- Handle<FixedArray> contents = GetKeysInFixedArrayFor(object,
- LOCAL_ONLY);
- // Some fast paths through GetKeysInFixedArrayFor reuse a cached
- // property array and since the result is mutable we have to create
- // a fresh clone on each invocation.
- int length = contents->length();
- Handle<FixedArray> copy = isolate->factory()->NewFixedArray(length);
- for (int i = 0; i < length; i++) {
- Object* entry = contents->get(i);
- if (entry->IsString()) {
- copy->set(i, entry);
- } else {
- ASSERT(entry->IsNumber());
- HandleScope scope(isolate);
- Handle<Object> entry_handle(entry, isolate);
- Handle<Object> entry_str =
- isolate->factory()->NumberToString(entry_handle);
- copy->set(i, *entry_str);
- }
- }
- return *isolate->factory()->NewJSArrayWithElements(copy);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetArgumentsProperty) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 1);
-
- // Compute the frame holding the arguments.
- JavaScriptFrameIterator it(isolate);
- it.AdvanceToArgumentsFrame();
- JavaScriptFrame* frame = it.frame();
-
- // Get the actual number of provided arguments.
- const uint32_t n = frame->ComputeParametersCount();
-
- // Try to convert the key to an index. If successful and within
- // index return the the argument from the frame.
- uint32_t index;
- if (args[0]->ToArrayIndex(&index) && index < n) {
- return frame->GetParameter(index);
- }
-
- // Convert the key to a string.
- HandleScope scope(isolate);
- bool exception = false;
- Handle<Object> converted =
- Execution::ToString(args.at<Object>(0), &exception);
- if (exception) return Failure::Exception();
- Handle<String> key = Handle<String>::cast(converted);
-
- // Try to convert the string key into an array index.
- if (key->AsArrayIndex(&index)) {
- if (index < n) {
- return frame->GetParameter(index);
- } else {
- return isolate->initial_object_prototype()->GetElement(index);
- }
- }
-
- // Handle special arguments properties.
- if (key->Equals(isolate->heap()->length_symbol())) return Smi::FromInt(n);
- if (key->Equals(isolate->heap()->callee_symbol())) {
- Object* function = frame->function();
- if (function->IsJSFunction() &&
- JSFunction::cast(function)->shared()->strict_mode()) {
- return isolate->Throw(*isolate->factory()->NewTypeError(
- "strict_arguments_callee", HandleVector<Object>(NULL, 0)));
- }
- return function;
- }
-
- // Lookup in the initial Object.prototype object.
- return isolate->initial_object_prototype()->GetProperty(*key);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ToFastProperties) {
- HandleScope scope(isolate);
-
- ASSERT(args.length() == 1);
- Handle<Object> object = args.at<Object>(0);
- if (object->IsJSObject()) {
- Handle<JSObject> js_object = Handle<JSObject>::cast(object);
- if (!js_object->HasFastProperties() && !js_object->IsGlobalObject()) {
- MaybeObject* ok = js_object->TransformToFastProperties(0);
- if (ok->IsRetryAfterGC()) return ok;
- }
- }
- return *object;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ToSlowProperties) {
- HandleScope scope(isolate);
-
- ASSERT(args.length() == 1);
- Handle<Object> object = args.at<Object>(0);
- if (object->IsJSObject() && !object->IsJSGlobalProxy()) {
- Handle<JSObject> js_object = Handle<JSObject>::cast(object);
- NormalizeProperties(js_object, CLEAR_INOBJECT_PROPERTIES, 0);
- }
- return *object;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ToBool) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 1);
-
- return args[0]->ToBoolean();
-}
-
-
-// Returns the type string of a value; see ECMA-262, 11.4.3 (p 47).
-// Possible optimizations: put the type string into the oddballs.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_Typeof) {
- NoHandleAllocation ha;
-
- Object* obj = args[0];
- if (obj->IsNumber()) return isolate->heap()->number_symbol();
- HeapObject* heap_obj = HeapObject::cast(obj);
-
- // typeof an undetectable object is 'undefined'
- if (heap_obj->map()->is_undetectable()) {
- return isolate->heap()->undefined_symbol();
- }
-
- InstanceType instance_type = heap_obj->map()->instance_type();
- if (instance_type < FIRST_NONSTRING_TYPE) {
- return isolate->heap()->string_symbol();
- }
-
- switch (instance_type) {
- case ODDBALL_TYPE:
- if (heap_obj->IsTrue() || heap_obj->IsFalse()) {
- return isolate->heap()->boolean_symbol();
- }
- if (heap_obj->IsNull()) {
- return isolate->heap()->object_symbol();
- }
- ASSERT(heap_obj->IsUndefined());
- return isolate->heap()->undefined_symbol();
- case JS_FUNCTION_TYPE: case JS_REGEXP_TYPE:
- return isolate->heap()->function_symbol();
- default:
- // For any kind of object not handled above, the spec rule for
- // host objects gives that it is okay to return "object"
- return isolate->heap()->object_symbol();
- }
-}
-
-
-static bool AreDigits(const char*s, int from, int to) {
- for (int i = from; i < to; i++) {
- if (s[i] < '0' || s[i] > '9') return false;
- }
-
- return true;
-}
-
-
-static int ParseDecimalInteger(const char*s, int from, int to) {
- ASSERT(to - from < 10); // Overflow is not possible.
- ASSERT(from < to);
- int d = s[from] - '0';
-
- for (int i = from + 1; i < to; i++) {
- d = 10 * d + (s[i] - '0');
- }
-
- return d;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_StringToNumber) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 1);
- CONVERT_CHECKED(String, subject, args[0]);
- subject->TryFlatten();
-
- // Fast case: short integer or some sorts of junk values.
- int len = subject->length();
- if (subject->IsSeqAsciiString()) {
- if (len == 0) return Smi::FromInt(0);
-
- char const* data = SeqAsciiString::cast(subject)->GetChars();
- bool minus = (data[0] == '-');
- int start_pos = (minus ? 1 : 0);
-
- if (start_pos == len) {
- return isolate->heap()->nan_value();
- } else if (data[start_pos] > '9') {
- // Fast check for a junk value. A valid string may start from a
- // whitespace, a sign ('+' or '-'), the decimal point, a decimal digit or
- // the 'I' character ('Infinity'). All of that have codes not greater than
- // '9' except 'I'.
- if (data[start_pos] != 'I') {
- return isolate->heap()->nan_value();
- }
- } else if (len - start_pos < 10 && AreDigits(data, start_pos, len)) {
- // The maximal/minimal smi has 10 digits. If the string has less digits we
- // know it will fit into the smi-data type.
- int d = ParseDecimalInteger(data, start_pos, len);
- if (minus) {
- if (d == 0) return isolate->heap()->minus_zero_value();
- d = -d;
- } else if (!subject->HasHashCode() &&
- len <= String::kMaxArrayIndexSize &&
- (len == 1 || data[0] != '0')) {
- // String hash is not calculated yet but all the data are present.
- // Update the hash field to speed up sequential convertions.
- uint32_t hash = StringHasher::MakeArrayIndexHash(d, len);
-#ifdef DEBUG
- subject->Hash(); // Force hash calculation.
- ASSERT_EQ(static_cast<int>(subject->hash_field()),
- static_cast<int>(hash));
-#endif
- subject->set_hash_field(hash);
- }
- return Smi::FromInt(d);
- }
- }
-
- // Slower case.
- return isolate->heap()->NumberFromDouble(StringToDouble(subject, ALLOW_HEX));
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_StringFromCharCodeArray) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 1);
-
- CONVERT_CHECKED(JSArray, codes, args[0]);
- int length = Smi::cast(codes->length())->value();
-
- // Check if the string can be ASCII.
- int i;
- for (i = 0; i < length; i++) {
- Object* element;
- { MaybeObject* maybe_element = codes->GetElement(i);
- // We probably can't get an exception here, but just in order to enforce
- // the checking of inputs in the runtime calls we check here.
- if (!maybe_element->ToObject(&element)) return maybe_element;
- }
- CONVERT_NUMBER_CHECKED(int, chr, Int32, element);
- if ((chr & 0xffff) > String::kMaxAsciiCharCode)
- break;
- }
-
- MaybeObject* maybe_object = NULL;
- if (i == length) { // The string is ASCII.
- maybe_object = isolate->heap()->AllocateRawAsciiString(length);
- } else { // The string is not ASCII.
- maybe_object = isolate->heap()->AllocateRawTwoByteString(length);
- }
-
- Object* object = NULL;
- if (!maybe_object->ToObject(&object)) return maybe_object;
- String* result = String::cast(object);
- for (int i = 0; i < length; i++) {
- Object* element;
- { MaybeObject* maybe_element = codes->GetElement(i);
- if (!maybe_element->ToObject(&element)) return maybe_element;
- }
- CONVERT_NUMBER_CHECKED(int, chr, Int32, element);
- result->Set(i, chr & 0xffff);
- }
- return result;
-}
-
-
-// kNotEscaped is generated by the following:
-//
-// #!/bin/perl
-// for (my $i = 0; $i < 256; $i++) {
-// print "\n" if $i % 16 == 0;
-// my $c = chr($i);
-// my $escaped = 1;
-// $escaped = 0 if $c =~ m#[A-Za-z0-9@*_+./-]#;
-// print $escaped ? "0, " : "1, ";
-// }
-
-
-static bool IsNotEscaped(uint16_t character) {
- // Only for 8 bit characters, the rest are always escaped (in a different way)
- ASSERT(character < 256);
- static const char kNotEscaped[256] = {
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1,
- 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- };
- return kNotEscaped[character] != 0;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_URIEscape) {
- const char hex_chars[] = "0123456789ABCDEF";
- NoHandleAllocation ha;
- ASSERT(args.length() == 1);
- CONVERT_CHECKED(String, source, args[0]);
-
- source->TryFlatten();
-
- int escaped_length = 0;
- int length = source->length();
- {
- Access<StringInputBuffer> buffer(
- isolate->runtime_state()->string_input_buffer());
- buffer->Reset(source);
- while (buffer->has_more()) {
- uint16_t character = buffer->GetNext();
- if (character >= 256) {
- escaped_length += 6;
- } else if (IsNotEscaped(character)) {
- escaped_length++;
- } else {
- escaped_length += 3;
- }
- // We don't allow strings that are longer than a maximal length.
- ASSERT(String::kMaxLength < 0x7fffffff - 6); // Cannot overflow.
- if (escaped_length > String::kMaxLength) {
- isolate->context()->mark_out_of_memory();
- return Failure::OutOfMemoryException();
- }
- }
- }
- // No length change implies no change. Return original string if no change.
- if (escaped_length == length) {
- return source;
- }
- Object* o;
- { MaybeObject* maybe_o =
- isolate->heap()->AllocateRawAsciiString(escaped_length);
- if (!maybe_o->ToObject(&o)) return maybe_o;
- }
- String* destination = String::cast(o);
- int dest_position = 0;
-
- Access<StringInputBuffer> buffer(
- isolate->runtime_state()->string_input_buffer());
- buffer->Rewind();
- while (buffer->has_more()) {
- uint16_t chr = buffer->GetNext();
- if (chr >= 256) {
- destination->Set(dest_position, '%');
- destination->Set(dest_position+1, 'u');
- destination->Set(dest_position+2, hex_chars[chr >> 12]);
- destination->Set(dest_position+3, hex_chars[(chr >> 8) & 0xf]);
- destination->Set(dest_position+4, hex_chars[(chr >> 4) & 0xf]);
- destination->Set(dest_position+5, hex_chars[chr & 0xf]);
- dest_position += 6;
- } else if (IsNotEscaped(chr)) {
- destination->Set(dest_position, chr);
- dest_position++;
- } else {
- destination->Set(dest_position, '%');
- destination->Set(dest_position+1, hex_chars[chr >> 4]);
- destination->Set(dest_position+2, hex_chars[chr & 0xf]);
- dest_position += 3;
- }
- }
- return destination;
-}
-
-
-static inline int TwoDigitHex(uint16_t character1, uint16_t character2) {
- static const signed char kHexValue['g'] = {
- -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
- 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -1, -1, -1, -1, -1, -1,
- -1, 10, 11, 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
- -1, 10, 11, 12, 13, 14, 15 };
-
- if (character1 > 'f') return -1;
- int hi = kHexValue[character1];
- if (hi == -1) return -1;
- if (character2 > 'f') return -1;
- int lo = kHexValue[character2];
- if (lo == -1) return -1;
- return (hi << 4) + lo;
-}
-
-
-static inline int Unescape(String* source,
- int i,
- int length,
- int* step) {
- uint16_t character = source->Get(i);
- int32_t hi = 0;
- int32_t lo = 0;
- if (character == '%' &&
- i <= length - 6 &&
- source->Get(i + 1) == 'u' &&
- (hi = TwoDigitHex(source->Get(i + 2),
- source->Get(i + 3))) != -1 &&
- (lo = TwoDigitHex(source->Get(i + 4),
- source->Get(i + 5))) != -1) {
- *step = 6;
- return (hi << 8) + lo;
- } else if (character == '%' &&
- i <= length - 3 &&
- (lo = TwoDigitHex(source->Get(i + 1),
- source->Get(i + 2))) != -1) {
- *step = 3;
- return lo;
- } else {
- *step = 1;
- return character;
- }
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_URIUnescape) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 1);
- CONVERT_CHECKED(String, source, args[0]);
-
- source->TryFlatten();
-
- bool ascii = true;
- int length = source->length();
-
- int unescaped_length = 0;
- for (int i = 0; i < length; unescaped_length++) {
- int step;
- if (Unescape(source, i, length, &step) > String::kMaxAsciiCharCode) {
- ascii = false;
- }
- i += step;
- }
-
- // No length change implies no change. Return original string if no change.
- if (unescaped_length == length)
- return source;
-
- Object* o;
- { MaybeObject* maybe_o =
- ascii ?
- isolate->heap()->AllocateRawAsciiString(unescaped_length) :
- isolate->heap()->AllocateRawTwoByteString(unescaped_length);
- if (!maybe_o->ToObject(&o)) return maybe_o;
- }
- String* destination = String::cast(o);
-
- int dest_position = 0;
- for (int i = 0; i < length; dest_position++) {
- int step;
- destination->Set(dest_position, Unescape(source, i, length, &step));
- i += step;
- }
- return destination;
-}
-
-
-static const unsigned int kQuoteTableLength = 128u;
-
-static const int kJsonQuotesCharactersPerEntry = 8;
-static const char* const JsonQuotes =
- "\\u0000 \\u0001 \\u0002 \\u0003 "
- "\\u0004 \\u0005 \\u0006 \\u0007 "
- "\\b \\t \\n \\u000b "
- "\\f \\r \\u000e \\u000f "
- "\\u0010 \\u0011 \\u0012 \\u0013 "
- "\\u0014 \\u0015 \\u0016 \\u0017 "
- "\\u0018 \\u0019 \\u001a \\u001b "
- "\\u001c \\u001d \\u001e \\u001f "
- " ! \\\" # "
- "$ % & ' "
- "( ) * + "
- ", - . / "
- "0 1 2 3 "
- "4 5 6 7 "
- "8 9 : ; "
- "< = > ? "
- "@ A B C "
- "D E F G "
- "H I J K "
- "L M N O "
- "P Q R S "
- "T U V W "
- "X Y Z [ "
- "\\\\ ] ^ _ "
- "` a b c "
- "d e f g "
- "h i j k "
- "l m n o "
- "p q r s "
- "t u v w "
- "x y z { "
- "| } ~ \177 ";
-
-
-// For a string that is less than 32k characters it should always be
-// possible to allocate it in new space.
-static const int kMaxGuaranteedNewSpaceString = 32 * 1024;
-
-
-// Doing JSON quoting cannot make the string more than this many times larger.
-static const int kJsonQuoteWorstCaseBlowup = 6;
-
-
-// Covers the entire ASCII range (all other characters are unchanged by JSON
-// quoting).
-static const byte JsonQuoteLengths[kQuoteTableLength] = {
- 6, 6, 6, 6, 6, 6, 6, 6,
- 2, 2, 2, 6, 2, 2, 6, 6,
- 6, 6, 6, 6, 6, 6, 6, 6,
- 6, 6, 6, 6, 6, 6, 6, 6,
- 1, 1, 2, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 2, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
-};
-
-
-template <typename StringType>
-MaybeObject* AllocateRawString(Isolate* isolate, int length);
-
-
-template <>
-MaybeObject* AllocateRawString<SeqTwoByteString>(Isolate* isolate, int length) {
- return isolate->heap()->AllocateRawTwoByteString(length);
-}
-
-
-template <>
-MaybeObject* AllocateRawString<SeqAsciiString>(Isolate* isolate, int length) {
- return isolate->heap()->AllocateRawAsciiString(length);
-}
-
-
-template <typename Char, typename StringType, bool comma>
-static MaybeObject* SlowQuoteJsonString(Isolate* isolate,
- Vector<const Char> characters) {
- int length = characters.length();
- const Char* read_cursor = characters.start();
- const Char* end = read_cursor + length;
- const int kSpaceForQuotes = 2 + (comma ? 1 :0);
- int quoted_length = kSpaceForQuotes;
- while (read_cursor < end) {
- Char c = *(read_cursor++);
- if (sizeof(Char) > 1u && static_cast<unsigned>(c) >= kQuoteTableLength) {
- quoted_length++;
- } else {
- quoted_length += JsonQuoteLengths[static_cast<unsigned>(c)];
- }
- }
- MaybeObject* new_alloc = AllocateRawString<StringType>(isolate,
- quoted_length);
- Object* new_object;
- if (!new_alloc->ToObject(&new_object)) {
- return new_alloc;
- }
- StringType* new_string = StringType::cast(new_object);
-
- Char* write_cursor = reinterpret_cast<Char*>(
- new_string->address() + SeqAsciiString::kHeaderSize);
- if (comma) *(write_cursor++) = ',';
- *(write_cursor++) = '"';
-
- read_cursor = characters.start();
- while (read_cursor < end) {
- Char c = *(read_cursor++);
- if (sizeof(Char) > 1u && static_cast<unsigned>(c) >= kQuoteTableLength) {
- *(write_cursor++) = c;
- } else {
- int len = JsonQuoteLengths[static_cast<unsigned>(c)];
- const char* replacement = JsonQuotes +
- static_cast<unsigned>(c) * kJsonQuotesCharactersPerEntry;
- for (int i = 0; i < len; i++) {
- *write_cursor++ = *replacement++;
- }
- }
- }
- *(write_cursor++) = '"';
- return new_string;
-}
-
-
-template <typename Char, typename StringType, bool comma>
-static MaybeObject* QuoteJsonString(Isolate* isolate,
- Vector<const Char> characters) {
- int length = characters.length();
- isolate->counters()->quote_json_char_count()->Increment(length);
- const int kSpaceForQuotes = 2 + (comma ? 1 :0);
- int worst_case_length = length * kJsonQuoteWorstCaseBlowup + kSpaceForQuotes;
- if (worst_case_length > kMaxGuaranteedNewSpaceString) {
- return SlowQuoteJsonString<Char, StringType, comma>(isolate, characters);
- }
-
- MaybeObject* new_alloc = AllocateRawString<StringType>(isolate,
- worst_case_length);
- Object* new_object;
- if (!new_alloc->ToObject(&new_object)) {
- return new_alloc;
- }
- if (!isolate->heap()->new_space()->Contains(new_object)) {
- // Even if our string is small enough to fit in new space we still have to
- // handle it being allocated in old space as may happen in the third
- // attempt. See CALL_AND_RETRY in heap-inl.h and similar code in
- // CEntryStub::GenerateCore.
- return SlowQuoteJsonString<Char, StringType, comma>(isolate, characters);
- }
- StringType* new_string = StringType::cast(new_object);
- ASSERT(isolate->heap()->new_space()->Contains(new_string));
-
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
- Char* write_cursor = reinterpret_cast<Char*>(
- new_string->address() + SeqAsciiString::kHeaderSize);
- if (comma) *(write_cursor++) = ',';
- *(write_cursor++) = '"';
-
- const Char* read_cursor = characters.start();
- const Char* end = read_cursor + length;
- while (read_cursor < end) {
- Char c = *(read_cursor++);
- if (sizeof(Char) > 1u && static_cast<unsigned>(c) >= kQuoteTableLength) {
- *(write_cursor++) = c;
- } else {
- int len = JsonQuoteLengths[static_cast<unsigned>(c)];
- const char* replacement = JsonQuotes +
- static_cast<unsigned>(c) * kJsonQuotesCharactersPerEntry;
- write_cursor[0] = replacement[0];
- if (len > 1) {
- write_cursor[1] = replacement[1];
- if (len > 2) {
- ASSERT(len == 6);
- write_cursor[2] = replacement[2];
- write_cursor[3] = replacement[3];
- write_cursor[4] = replacement[4];
- write_cursor[5] = replacement[5];
- }
- }
- write_cursor += len;
- }
- }
- *(write_cursor++) = '"';
-
- int final_length = static_cast<int>(
- write_cursor - reinterpret_cast<Char*>(
- new_string->address() + SeqAsciiString::kHeaderSize));
- isolate->heap()->new_space()->
- template ShrinkStringAtAllocationBoundary<StringType>(
- new_string, final_length);
- return new_string;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_QuoteJSONString) {
- NoHandleAllocation ha;
- CONVERT_CHECKED(String, str, args[0]);
- if (!str->IsFlat()) {
- MaybeObject* try_flatten = str->TryFlatten();
- Object* flat;
- if (!try_flatten->ToObject(&flat)) {
- return try_flatten;
- }
- str = String::cast(flat);
- ASSERT(str->IsFlat());
- }
- if (str->IsTwoByteRepresentation()) {
- return QuoteJsonString<uc16, SeqTwoByteString, false>(isolate,
- str->ToUC16Vector());
- } else {
- return QuoteJsonString<char, SeqAsciiString, false>(isolate,
- str->ToAsciiVector());
- }
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_QuoteJSONStringComma) {
- NoHandleAllocation ha;
- CONVERT_CHECKED(String, str, args[0]);
- if (!str->IsFlat()) {
- MaybeObject* try_flatten = str->TryFlatten();
- Object* flat;
- if (!try_flatten->ToObject(&flat)) {
- return try_flatten;
- }
- str = String::cast(flat);
- ASSERT(str->IsFlat());
- }
- if (str->IsTwoByteRepresentation()) {
- return QuoteJsonString<uc16, SeqTwoByteString, true>(isolate,
- str->ToUC16Vector());
- } else {
- return QuoteJsonString<char, SeqAsciiString, true>(isolate,
- str->ToAsciiVector());
- }
-}
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_StringParseInt) {
- NoHandleAllocation ha;
-
- CONVERT_CHECKED(String, s, args[0]);
- CONVERT_SMI_CHECKED(radix, args[1]);
-
- s->TryFlatten();
-
- RUNTIME_ASSERT(radix == 0 || (2 <= radix && radix <= 36));
- double value = StringToInt(s, radix);
- return isolate->heap()->NumberFromDouble(value);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_StringParseFloat) {
- NoHandleAllocation ha;
- CONVERT_CHECKED(String, str, args[0]);
-
- // ECMA-262 section 15.1.2.3, empty string is NaN
- double value = StringToDouble(str, ALLOW_TRAILING_JUNK, OS::nan_value());
-
- // Create a number object from the value.
- return isolate->heap()->NumberFromDouble(value);
-}
-
-
-template <class Converter>
-MUST_USE_RESULT static MaybeObject* ConvertCaseHelper(
- Isolate* isolate,
- String* s,
- int length,
- int input_string_length,
- unibrow::Mapping<Converter, 128>* mapping) {
- // We try this twice, once with the assumption that the result is no longer
- // than the input and, if that assumption breaks, again with the exact
- // length. This may not be pretty, but it is nicer than what was here before
- // and I hereby claim my vaffel-is.
- //
- // Allocate the resulting string.
- //
- // NOTE: This assumes that the upper/lower case of an ascii
- // character is also ascii. This is currently the case, but it
- // might break in the future if we implement more context and locale
- // dependent upper/lower conversions.
- Object* o;
- { MaybeObject* maybe_o = s->IsAsciiRepresentation()
- ? isolate->heap()->AllocateRawAsciiString(length)
- : isolate->heap()->AllocateRawTwoByteString(length);
- if (!maybe_o->ToObject(&o)) return maybe_o;
- }
- String* result = String::cast(o);
- bool has_changed_character = false;
-
- // Convert all characters to upper case, assuming that they will fit
- // in the buffer
- Access<StringInputBuffer> buffer(
- isolate->runtime_state()->string_input_buffer());
- buffer->Reset(s);
- unibrow::uchar chars[Converter::kMaxWidth];
- // We can assume that the string is not empty
- uc32 current = buffer->GetNext();
- for (int i = 0; i < length;) {
- bool has_next = buffer->has_more();
- uc32 next = has_next ? buffer->GetNext() : 0;
- int char_length = mapping->get(current, next, chars);
- if (char_length == 0) {
- // The case conversion of this character is the character itself.
- result->Set(i, current);
- i++;
- } else if (char_length == 1) {
- // Common case: converting the letter resulted in one character.
- ASSERT(static_cast<uc32>(chars[0]) != current);
- result->Set(i, chars[0]);
- has_changed_character = true;
- i++;
- } else if (length == input_string_length) {
- // We've assumed that the result would be as long as the
- // input but here is a character that converts to several
- // characters. No matter, we calculate the exact length
- // of the result and try the whole thing again.
- //
- // Note that this leaves room for optimization. We could just
- // memcpy what we already have to the result string. Also,
- // the result string is the last object allocated we could
- // "realloc" it and probably, in the vast majority of cases,
- // extend the existing string to be able to hold the full
- // result.
- int next_length = 0;
- if (has_next) {
- next_length = mapping->get(next, 0, chars);
- if (next_length == 0) next_length = 1;
- }
- int current_length = i + char_length + next_length;
- while (buffer->has_more()) {
- current = buffer->GetNext();
- // NOTE: we use 0 as the next character here because, while
- // the next character may affect what a character converts to,
- // it does not in any case affect the length of what it convert
- // to.
- int char_length = mapping->get(current, 0, chars);
- if (char_length == 0) char_length = 1;
- current_length += char_length;
- if (current_length > Smi::kMaxValue) {
- isolate->context()->mark_out_of_memory();
- return Failure::OutOfMemoryException();
- }
- }
- // Try again with the real length.
- return Smi::FromInt(current_length);
- } else {
- for (int j = 0; j < char_length; j++) {
- result->Set(i, chars[j]);
- i++;
- }
- has_changed_character = true;
- }
- current = next;
- }
- if (has_changed_character) {
- return result;
- } else {
- // If we didn't actually change anything in doing the conversion
- // we simple return the result and let the converted string
- // become garbage; there is no reason to keep two identical strings
- // alive.
- return s;
- }
-}
-
-
-namespace {
-
-static const uintptr_t kOneInEveryByte = kUintptrAllBitsSet / 0xFF;
-
-
-// Given a word and two range boundaries returns a word with high bit
-// set in every byte iff the corresponding input byte was strictly in
-// the range (m, n). All the other bits in the result are cleared.
-// This function is only useful when it can be inlined and the
-// boundaries are statically known.
-// Requires: all bytes in the input word and the boundaries must be
-// ascii (less than 0x7F).
-static inline uintptr_t AsciiRangeMask(uintptr_t w, char m, char n) {
- // Every byte in an ascii string is less than or equal to 0x7F.
- ASSERT((w & (kOneInEveryByte * 0x7F)) == w);
- // Use strict inequalities since in edge cases the function could be
- // further simplified.
- ASSERT(0 < m && m < n && n < 0x7F);
- // Has high bit set in every w byte less than n.
- uintptr_t tmp1 = kOneInEveryByte * (0x7F + n) - w;
- // Has high bit set in every w byte greater than m.
- uintptr_t tmp2 = w + kOneInEveryByte * (0x7F - m);
- return (tmp1 & tmp2 & (kOneInEveryByte * 0x80));
-}
-
-
-enum AsciiCaseConversion {
- ASCII_TO_LOWER,
- ASCII_TO_UPPER
-};
-
-
-template <AsciiCaseConversion dir>
-struct FastAsciiConverter {
- static bool Convert(char* dst, char* src, int length) {
-#ifdef DEBUG
- char* saved_dst = dst;
- char* saved_src = src;
-#endif
- // We rely on the distance between upper and lower case letters
- // being a known power of 2.
- ASSERT('a' - 'A' == (1 << 5));
- // Boundaries for the range of input characters than require conversion.
- const char lo = (dir == ASCII_TO_LOWER) ? 'A' - 1 : 'a' - 1;
- const char hi = (dir == ASCII_TO_LOWER) ? 'Z' + 1 : 'z' + 1;
- bool changed = false;
- char* const limit = src + length;
-#ifdef V8_HOST_CAN_READ_UNALIGNED
- // Process the prefix of the input that requires no conversion one
- // (machine) word at a time.
- while (src <= limit - sizeof(uintptr_t)) {
- uintptr_t w = *reinterpret_cast<uintptr_t*>(src);
- if (AsciiRangeMask(w, lo, hi) != 0) {
- changed = true;
- break;
- }
- *reinterpret_cast<uintptr_t*>(dst) = w;
- src += sizeof(uintptr_t);
- dst += sizeof(uintptr_t);
- }
- // Process the remainder of the input performing conversion when
- // required one word at a time.
- while (src <= limit - sizeof(uintptr_t)) {
- uintptr_t w = *reinterpret_cast<uintptr_t*>(src);
- uintptr_t m = AsciiRangeMask(w, lo, hi);
- // The mask has high (7th) bit set in every byte that needs
- // conversion and we know that the distance between cases is
- // 1 << 5.
- *reinterpret_cast<uintptr_t*>(dst) = w ^ (m >> 2);
- src += sizeof(uintptr_t);
- dst += sizeof(uintptr_t);
- }
-#endif
- // Process the last few bytes of the input (or the whole input if
- // unaligned access is not supported).
- while (src < limit) {
- char c = *src;
- if (lo < c && c < hi) {
- c ^= (1 << 5);
- changed = true;
- }
- *dst = c;
- ++src;
- ++dst;
- }
-#ifdef DEBUG
- CheckConvert(saved_dst, saved_src, length, changed);
-#endif
- return changed;
- }
-
-#ifdef DEBUG
- static void CheckConvert(char* dst, char* src, int length, bool changed) {
- bool expected_changed = false;
- for (int i = 0; i < length; i++) {
- if (dst[i] == src[i]) continue;
- expected_changed = true;
- if (dir == ASCII_TO_LOWER) {
- ASSERT('A' <= src[i] && src[i] <= 'Z');
- ASSERT(dst[i] == src[i] + ('a' - 'A'));
- } else {
- ASSERT(dir == ASCII_TO_UPPER);
- ASSERT('a' <= src[i] && src[i] <= 'z');
- ASSERT(dst[i] == src[i] - ('a' - 'A'));
- }
- }
- ASSERT(expected_changed == changed);
- }
-#endif
-};
-
-
-struct ToLowerTraits {
- typedef unibrow::ToLowercase UnibrowConverter;
-
- typedef FastAsciiConverter<ASCII_TO_LOWER> AsciiConverter;
-};
-
-
-struct ToUpperTraits {
- typedef unibrow::ToUppercase UnibrowConverter;
-
- typedef FastAsciiConverter<ASCII_TO_UPPER> AsciiConverter;
-};
-
-} // namespace
-
-
-template <typename ConvertTraits>
-MUST_USE_RESULT static MaybeObject* ConvertCase(
- Arguments args,
- Isolate* isolate,
- unibrow::Mapping<typename ConvertTraits::UnibrowConverter, 128>* mapping) {
- NoHandleAllocation ha;
- CONVERT_CHECKED(String, s, args[0]);
- s = s->TryFlattenGetString();
-
- const int length = s->length();
- // Assume that the string is not empty; we need this assumption later
- if (length == 0) return s;
-
- // Simpler handling of ascii strings.
- //
- // NOTE: This assumes that the upper/lower case of an ascii
- // character is also ascii. This is currently the case, but it
- // might break in the future if we implement more context and locale
- // dependent upper/lower conversions.
- if (s->IsSeqAsciiString()) {
- Object* o;
- { MaybeObject* maybe_o = isolate->heap()->AllocateRawAsciiString(length);
- if (!maybe_o->ToObject(&o)) return maybe_o;
- }
- SeqAsciiString* result = SeqAsciiString::cast(o);
- bool has_changed_character = ConvertTraits::AsciiConverter::Convert(
- result->GetChars(), SeqAsciiString::cast(s)->GetChars(), length);
- return has_changed_character ? result : s;
- }
-
- Object* answer;
- { MaybeObject* maybe_answer =
- ConvertCaseHelper(isolate, s, length, length, mapping);
- if (!maybe_answer->ToObject(&answer)) return maybe_answer;
- }
- if (answer->IsSmi()) {
- // Retry with correct length.
- { MaybeObject* maybe_answer =
- ConvertCaseHelper(isolate,
- s, Smi::cast(answer)->value(), length, mapping);
- if (!maybe_answer->ToObject(&answer)) return maybe_answer;
- }
- }
- return answer;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_StringToLowerCase) {
- return ConvertCase<ToLowerTraits>(
- args, isolate, isolate->runtime_state()->to_lower_mapping());
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_StringToUpperCase) {
- return ConvertCase<ToUpperTraits>(
- args, isolate, isolate->runtime_state()->to_upper_mapping());
-}
-
-
-static inline bool IsTrimWhiteSpace(unibrow::uchar c) {
- return unibrow::WhiteSpace::Is(c) || c == 0x200b;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_StringTrim) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 3);
-
- CONVERT_CHECKED(String, s, args[0]);
- CONVERT_BOOLEAN_CHECKED(trimLeft, args[1]);
- CONVERT_BOOLEAN_CHECKED(trimRight, args[2]);
-
- s->TryFlatten();
- int length = s->length();
-
- int left = 0;
- if (trimLeft) {
- while (left < length && IsTrimWhiteSpace(s->Get(left))) {
- left++;
- }
- }
-
- int right = length;
- if (trimRight) {
- while (right > left && IsTrimWhiteSpace(s->Get(right - 1))) {
- right--;
- }
- }
- return s->SubString(left, right);
-}
-
-
-template <typename SubjectChar, typename PatternChar>
-void FindStringIndices(Isolate* isolate,
- Vector<const SubjectChar> subject,
- Vector<const PatternChar> pattern,
- ZoneList<int>* indices,
- unsigned int limit) {
- ASSERT(limit > 0);
- // Collect indices of pattern in subject, and the end-of-string index.
- // Stop after finding at most limit values.
- StringSearch<PatternChar, SubjectChar> search(isolate, pattern);
- int pattern_length = pattern.length();
- int index = 0;
- while (limit > 0) {
- index = search.Search(subject, index);
- if (index < 0) return;
- indices->Add(index);
- index += pattern_length;
- limit--;
- }
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_StringSplit) {
- ASSERT(args.length() == 3);
- HandleScope handle_scope(isolate);
- CONVERT_ARG_CHECKED(String, subject, 0);
- CONVERT_ARG_CHECKED(String, pattern, 1);
- CONVERT_NUMBER_CHECKED(uint32_t, limit, Uint32, args[2]);
-
- int subject_length = subject->length();
- int pattern_length = pattern->length();
- RUNTIME_ASSERT(pattern_length > 0);
-
- // The limit can be very large (0xffffffffu), but since the pattern
- // isn't empty, we can never create more parts than ~half the length
- // of the subject.
-
- if (!subject->IsFlat()) FlattenString(subject);
-
- static const int kMaxInitialListCapacity = 16;
-
- ZoneScope scope(DELETE_ON_EXIT);
-
- // Find (up to limit) indices of separator and end-of-string in subject
- int initial_capacity = Min<uint32_t>(kMaxInitialListCapacity, limit);
- ZoneList<int> indices(initial_capacity);
- if (!pattern->IsFlat()) FlattenString(pattern);
-
- // No allocation block.
- {
- AssertNoAllocation nogc;
- if (subject->IsAsciiRepresentation()) {
- Vector<const char> subject_vector = subject->ToAsciiVector();
- if (pattern->IsAsciiRepresentation()) {
- FindStringIndices(isolate,
- subject_vector,
- pattern->ToAsciiVector(),
- &indices,
- limit);
- } else {
- FindStringIndices(isolate,
- subject_vector,
- pattern->ToUC16Vector(),
- &indices,
- limit);
- }
- } else {
- Vector<const uc16> subject_vector = subject->ToUC16Vector();
- if (pattern->IsAsciiRepresentation()) {
- FindStringIndices(isolate,
- subject_vector,
- pattern->ToAsciiVector(),
- &indices,
- limit);
- } else {
- FindStringIndices(isolate,
- subject_vector,
- pattern->ToUC16Vector(),
- &indices,
- limit);
- }
- }
- }
-
- if (static_cast<uint32_t>(indices.length()) < limit) {
- indices.Add(subject_length);
- }
-
- // The list indices now contains the end of each part to create.
-
- // Create JSArray of substrings separated by separator.
- int part_count = indices.length();
-
- Handle<JSArray> result = isolate->factory()->NewJSArray(part_count);
- result->set_length(Smi::FromInt(part_count));
-
- ASSERT(result->HasFastElements());
-
- if (part_count == 1 && indices.at(0) == subject_length) {
- FixedArray::cast(result->elements())->set(0, *subject);
- return *result;
- }
-
- Handle<FixedArray> elements(FixedArray::cast(result->elements()));
- int part_start = 0;
- for (int i = 0; i < part_count; i++) {
- HandleScope local_loop_handle;
- int part_end = indices.at(i);
- Handle<String> substring =
- isolate->factory()->NewSubString(subject, part_start, part_end);
- elements->set(i, *substring);
- part_start = part_end + pattern_length;
- }
-
- return *result;
-}
-
-
-// Copies ascii characters to the given fixed array looking up
-// one-char strings in the cache. Gives up on the first char that is
-// not in the cache and fills the remainder with smi zeros. Returns
-// the length of the successfully copied prefix.
-static int CopyCachedAsciiCharsToArray(Heap* heap,
- const char* chars,
- FixedArray* elements,
- int length) {
- AssertNoAllocation nogc;
- FixedArray* ascii_cache = heap->single_character_string_cache();
- Object* undefined = heap->undefined_value();
- int i;
- for (i = 0; i < length; ++i) {
- Object* value = ascii_cache->get(chars[i]);
- if (value == undefined) break;
- ASSERT(!heap->InNewSpace(value));
- elements->set(i, value, SKIP_WRITE_BARRIER);
- }
- if (i < length) {
- ASSERT(Smi::FromInt(0) == 0);
- memset(elements->data_start() + i, 0, kPointerSize * (length - i));
- }
-#ifdef DEBUG
- for (int j = 0; j < length; ++j) {
- Object* element = elements->get(j);
- ASSERT(element == Smi::FromInt(0) ||
- (element->IsString() && String::cast(element)->LooksValid()));
- }
-#endif
- return i;
-}
-
-
-// Converts a String to JSArray.
-// For example, "foo" => ["f", "o", "o"].
-RUNTIME_FUNCTION(MaybeObject*, Runtime_StringToArray) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 2);
- CONVERT_ARG_CHECKED(String, s, 0);
- CONVERT_NUMBER_CHECKED(uint32_t, limit, Uint32, args[1]);
-
- s->TryFlatten();
- const int length = static_cast<int>(Min<uint32_t>(s->length(), limit));
-
- Handle<FixedArray> elements;
- if (s->IsFlat() && s->IsAsciiRepresentation()) {
- Object* obj;
- { MaybeObject* maybe_obj =
- isolate->heap()->AllocateUninitializedFixedArray(length);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- elements = Handle<FixedArray>(FixedArray::cast(obj), isolate);
-
- Vector<const char> chars = s->ToAsciiVector();
- // Note, this will initialize all elements (not only the prefix)
- // to prevent GC from seeing partially initialized array.
- int num_copied_from_cache = CopyCachedAsciiCharsToArray(isolate->heap(),
- chars.start(),
- *elements,
- length);
-
- for (int i = num_copied_from_cache; i < length; ++i) {
- Handle<Object> str = LookupSingleCharacterStringFromCode(chars[i]);
- elements->set(i, *str);
- }
- } else {
- elements = isolate->factory()->NewFixedArray(length);
- for (int i = 0; i < length; ++i) {
- Handle<Object> str = LookupSingleCharacterStringFromCode(s->Get(i));
- elements->set(i, *str);
- }
- }
-
-#ifdef DEBUG
- for (int i = 0; i < length; ++i) {
- ASSERT(String::cast(elements->get(i))->length() == 1);
- }
-#endif
-
- return *isolate->factory()->NewJSArrayWithElements(elements);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NewStringWrapper) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 1);
- CONVERT_CHECKED(String, value, args[0]);
- return value->ToObject();
-}
-
-
-bool Runtime::IsUpperCaseChar(RuntimeState* runtime_state, uint16_t ch) {
- unibrow::uchar chars[unibrow::ToUppercase::kMaxWidth];
- int char_length = runtime_state->to_upper_mapping()->get(ch, 0, chars);
- return char_length == 0;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToString) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 1);
-
- Object* number = args[0];
- RUNTIME_ASSERT(number->IsNumber());
-
- return isolate->heap()->NumberToString(number);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToStringSkipCache) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 1);
-
- Object* number = args[0];
- RUNTIME_ASSERT(number->IsNumber());
-
- return isolate->heap()->NumberToString(number, false);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToInteger) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 1);
-
- CONVERT_DOUBLE_CHECKED(number, args[0]);
-
- // We do not include 0 so that we don't have to treat +0 / -0 cases.
- if (number > 0 && number <= Smi::kMaxValue) {
- return Smi::FromInt(static_cast<int>(number));
- }
- return isolate->heap()->NumberFromDouble(DoubleToInteger(number));
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToIntegerMapMinusZero) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 1);
-
- CONVERT_DOUBLE_CHECKED(number, args[0]);
-
- // We do not include 0 so that we don't have to treat +0 / -0 cases.
- if (number > 0 && number <= Smi::kMaxValue) {
- return Smi::FromInt(static_cast<int>(number));
- }
-
- double double_value = DoubleToInteger(number);
- // Map both -0 and +0 to +0.
- if (double_value == 0) double_value = 0;
-
- return isolate->heap()->NumberFromDouble(double_value);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToJSUint32) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 1);
-
- CONVERT_NUMBER_CHECKED(int32_t, number, Uint32, args[0]);
- return isolate->heap()->NumberFromUint32(number);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToJSInt32) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 1);
-
- CONVERT_DOUBLE_CHECKED(number, args[0]);
-
- // We do not include 0 so that we don't have to treat +0 / -0 cases.
- if (number > 0 && number <= Smi::kMaxValue) {
- return Smi::FromInt(static_cast<int>(number));
- }
- return isolate->heap()->NumberFromInt32(DoubleToInt32(number));
-}
-
-
-// Converts a Number to a Smi, if possible. Returns NaN if the number is not
-// a small integer.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToSmi) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 1);
-
- Object* obj = args[0];
- if (obj->IsSmi()) {
- return obj;
- }
- if (obj->IsHeapNumber()) {
- double value = HeapNumber::cast(obj)->value();
- int int_value = FastD2I(value);
- if (value == FastI2D(int_value) && Smi::IsValid(int_value)) {
- return Smi::FromInt(int_value);
- }
- }
- return isolate->heap()->nan_value();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_AllocateHeapNumber) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 0);
- return isolate->heap()->AllocateHeapNumber(0);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberAdd) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 2);
-
- CONVERT_DOUBLE_CHECKED(x, args[0]);
- CONVERT_DOUBLE_CHECKED(y, args[1]);
- return isolate->heap()->NumberFromDouble(x + y);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberSub) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 2);
-
- CONVERT_DOUBLE_CHECKED(x, args[0]);
- CONVERT_DOUBLE_CHECKED(y, args[1]);
- return isolate->heap()->NumberFromDouble(x - y);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberMul) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 2);
-
- CONVERT_DOUBLE_CHECKED(x, args[0]);
- CONVERT_DOUBLE_CHECKED(y, args[1]);
- return isolate->heap()->NumberFromDouble(x * y);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberUnaryMinus) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 1);
-
- CONVERT_DOUBLE_CHECKED(x, args[0]);
- return isolate->heap()->NumberFromDouble(-x);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberAlloc) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 0);
-
- return isolate->heap()->NumberFromDouble(9876543210.0);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberDiv) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 2);
-
- CONVERT_DOUBLE_CHECKED(x, args[0]);
- CONVERT_DOUBLE_CHECKED(y, args[1]);
- return isolate->heap()->NumberFromDouble(x / y);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberMod) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 2);
-
- CONVERT_DOUBLE_CHECKED(x, args[0]);
- CONVERT_DOUBLE_CHECKED(y, args[1]);
-
- x = modulo(x, y);
- // NumberFromDouble may return a Smi instead of a Number object
- return isolate->heap()->NumberFromDouble(x);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_StringAdd) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 2);
- CONVERT_CHECKED(String, str1, args[0]);
- CONVERT_CHECKED(String, str2, args[1]);
- isolate->counters()->string_add_runtime()->Increment();
- return isolate->heap()->AllocateConsString(str1, str2);
-}
-
-
-template <typename sinkchar>
-static inline void StringBuilderConcatHelper(String* special,
- sinkchar* sink,
- FixedArray* fixed_array,
- int array_length) {
- int position = 0;
- for (int i = 0; i < array_length; i++) {
- Object* element = fixed_array->get(i);
- if (element->IsSmi()) {
- // Smi encoding of position and length.
- int encoded_slice = Smi::cast(element)->value();
- int pos;
- int len;
- if (encoded_slice > 0) {
- // Position and length encoded in one smi.
- pos = StringBuilderSubstringPosition::decode(encoded_slice);
- len = StringBuilderSubstringLength::decode(encoded_slice);
- } else {
- // Position and length encoded in two smis.
- Object* obj = fixed_array->get(++i);
- ASSERT(obj->IsSmi());
- pos = Smi::cast(obj)->value();
- len = -encoded_slice;
- }
- String::WriteToFlat(special,
- sink + position,
- pos,
- pos + len);
- position += len;
- } else {
- String* string = String::cast(element);
- int element_length = string->length();
- String::WriteToFlat(string, sink + position, 0, element_length);
- position += element_length;
- }
- }
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_StringBuilderConcat) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 3);
- CONVERT_CHECKED(JSArray, array, args[0]);
- if (!args[1]->IsSmi()) {
- isolate->context()->mark_out_of_memory();
- return Failure::OutOfMemoryException();
- }
- int array_length = Smi::cast(args[1])->value();
- CONVERT_CHECKED(String, special, args[2]);
-
- // This assumption is used by the slice encoding in one or two smis.
- ASSERT(Smi::kMaxValue >= String::kMaxLength);
-
- int special_length = special->length();
- if (!array->HasFastElements()) {
- return isolate->Throw(isolate->heap()->illegal_argument_symbol());
- }
- FixedArray* fixed_array = FixedArray::cast(array->elements());
- if (fixed_array->length() < array_length) {
- array_length = fixed_array->length();
- }
-
- if (array_length == 0) {
- return isolate->heap()->empty_string();
- } else if (array_length == 1) {
- Object* first = fixed_array->get(0);
- if (first->IsString()) return first;
- }
-
- bool ascii = special->HasOnlyAsciiChars();
- int position = 0;
- for (int i = 0; i < array_length; i++) {
- int increment = 0;
- Object* elt = fixed_array->get(i);
- if (elt->IsSmi()) {
- // Smi encoding of position and length.
- int smi_value = Smi::cast(elt)->value();
- int pos;
- int len;
- if (smi_value > 0) {
- // Position and length encoded in one smi.
- pos = StringBuilderSubstringPosition::decode(smi_value);
- len = StringBuilderSubstringLength::decode(smi_value);
- } else {
- // Position and length encoded in two smis.
- len = -smi_value;
- // Get the position and check that it is a positive smi.
- i++;
- if (i >= array_length) {
- return isolate->Throw(isolate->heap()->illegal_argument_symbol());
- }
- Object* next_smi = fixed_array->get(i);
- if (!next_smi->IsSmi()) {
- return isolate->Throw(isolate->heap()->illegal_argument_symbol());
- }
- pos = Smi::cast(next_smi)->value();
- if (pos < 0) {
- return isolate->Throw(isolate->heap()->illegal_argument_symbol());
- }
- }
- ASSERT(pos >= 0);
- ASSERT(len >= 0);
- if (pos > special_length || len > special_length - pos) {
- return isolate->Throw(isolate->heap()->illegal_argument_symbol());
- }
- increment = len;
- } else if (elt->IsString()) {
- String* element = String::cast(elt);
- int element_length = element->length();
- increment = element_length;
- if (ascii && !element->HasOnlyAsciiChars()) {
- ascii = false;
- }
- } else {
- return isolate->Throw(isolate->heap()->illegal_argument_symbol());
- }
- if (increment > String::kMaxLength - position) {
- isolate->context()->mark_out_of_memory();
- return Failure::OutOfMemoryException();
- }
- position += increment;
- }
-
- int length = position;
- Object* object;
-
- if (ascii) {
- { MaybeObject* maybe_object =
- isolate->heap()->AllocateRawAsciiString(length);
- if (!maybe_object->ToObject(&object)) return maybe_object;
- }
- SeqAsciiString* answer = SeqAsciiString::cast(object);
- StringBuilderConcatHelper(special,
- answer->GetChars(),
- fixed_array,
- array_length);
- return answer;
- } else {
- { MaybeObject* maybe_object =
- isolate->heap()->AllocateRawTwoByteString(length);
- if (!maybe_object->ToObject(&object)) return maybe_object;
- }
- SeqTwoByteString* answer = SeqTwoByteString::cast(object);
- StringBuilderConcatHelper(special,
- answer->GetChars(),
- fixed_array,
- array_length);
- return answer;
- }
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_StringBuilderJoin) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 3);
- CONVERT_CHECKED(JSArray, array, args[0]);
- if (!args[1]->IsSmi()) {
- isolate->context()->mark_out_of_memory();
- return Failure::OutOfMemoryException();
- }
- int array_length = Smi::cast(args[1])->value();
- CONVERT_CHECKED(String, separator, args[2]);
-
- if (!array->HasFastElements()) {
- return isolate->Throw(isolate->heap()->illegal_argument_symbol());
- }
- FixedArray* fixed_array = FixedArray::cast(array->elements());
- if (fixed_array->length() < array_length) {
- array_length = fixed_array->length();
- }
-
- if (array_length == 0) {
- return isolate->heap()->empty_string();
- } else if (array_length == 1) {
- Object* first = fixed_array->get(0);
- if (first->IsString()) return first;
- }
-
- int separator_length = separator->length();
- int max_nof_separators =
- (String::kMaxLength + separator_length - 1) / separator_length;
- if (max_nof_separators < (array_length - 1)) {
- isolate->context()->mark_out_of_memory();
- return Failure::OutOfMemoryException();
- }
- int length = (array_length - 1) * separator_length;
- for (int i = 0; i < array_length; i++) {
- Object* element_obj = fixed_array->get(i);
- if (!element_obj->IsString()) {
- // TODO(1161): handle this case.
- return isolate->Throw(isolate->heap()->illegal_argument_symbol());
- }
- String* element = String::cast(element_obj);
- int increment = element->length();
- if (increment > String::kMaxLength - length) {
- isolate->context()->mark_out_of_memory();
- return Failure::OutOfMemoryException();
- }
- length += increment;
- }
-
- Object* object;
- { MaybeObject* maybe_object =
- isolate->heap()->AllocateRawTwoByteString(length);
- if (!maybe_object->ToObject(&object)) return maybe_object;
- }
- SeqTwoByteString* answer = SeqTwoByteString::cast(object);
-
- uc16* sink = answer->GetChars();
-#ifdef DEBUG
- uc16* end = sink + length;
-#endif
-
- String* first = String::cast(fixed_array->get(0));
- int first_length = first->length();
- String::WriteToFlat(first, sink, 0, first_length);
- sink += first_length;
-
- for (int i = 1; i < array_length; i++) {
- ASSERT(sink + separator_length <= end);
- String::WriteToFlat(separator, sink, 0, separator_length);
- sink += separator_length;
-
- String* element = String::cast(fixed_array->get(i));
- int element_length = element->length();
- ASSERT(sink + element_length <= end);
- String::WriteToFlat(element, sink, 0, element_length);
- sink += element_length;
- }
- ASSERT(sink == end);
-
- ASSERT(!answer->HasOnlyAsciiChars()); // Use %_FastAsciiArrayJoin instead.
- return answer;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberOr) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 2);
-
- CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]);
- CONVERT_NUMBER_CHECKED(int32_t, y, Int32, args[1]);
- return isolate->heap()->NumberFromInt32(x | y);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberAnd) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 2);
-
- CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]);
- CONVERT_NUMBER_CHECKED(int32_t, y, Int32, args[1]);
- return isolate->heap()->NumberFromInt32(x & y);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberXor) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 2);
-
- CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]);
- CONVERT_NUMBER_CHECKED(int32_t, y, Int32, args[1]);
- return isolate->heap()->NumberFromInt32(x ^ y);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberNot) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 1);
-
- CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]);
- return isolate->heap()->NumberFromInt32(~x);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberShl) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 2);
-
- CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]);
- CONVERT_NUMBER_CHECKED(int32_t, y, Int32, args[1]);
- return isolate->heap()->NumberFromInt32(x << (y & 0x1f));
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberShr) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 2);
-
- CONVERT_NUMBER_CHECKED(uint32_t, x, Uint32, args[0]);
- CONVERT_NUMBER_CHECKED(int32_t, y, Int32, args[1]);
- return isolate->heap()->NumberFromUint32(x >> (y & 0x1f));
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberSar) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 2);
-
- CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]);
- CONVERT_NUMBER_CHECKED(int32_t, y, Int32, args[1]);
- return isolate->heap()->NumberFromInt32(ArithmeticShiftRight(x, y & 0x1f));
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberEquals) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 2);
-
- CONVERT_DOUBLE_CHECKED(x, args[0]);
- CONVERT_DOUBLE_CHECKED(y, args[1]);
- if (isnan(x)) return Smi::FromInt(NOT_EQUAL);
- if (isnan(y)) return Smi::FromInt(NOT_EQUAL);
- if (x == y) return Smi::FromInt(EQUAL);
- Object* result;
- if ((fpclassify(x) == FP_ZERO) && (fpclassify(y) == FP_ZERO)) {
- result = Smi::FromInt(EQUAL);
- } else {
- result = Smi::FromInt(NOT_EQUAL);
- }
- return result;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_StringEquals) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 2);
-
- CONVERT_CHECKED(String, x, args[0]);
- CONVERT_CHECKED(String, y, args[1]);
-
- bool not_equal = !x->Equals(y);
- // This is slightly convoluted because the value that signifies
- // equality is 0 and inequality is 1 so we have to negate the result
- // from String::Equals.
- ASSERT(not_equal == 0 || not_equal == 1);
- STATIC_CHECK(EQUAL == 0);
- STATIC_CHECK(NOT_EQUAL == 1);
- return Smi::FromInt(not_equal);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberCompare) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 3);
-
- CONVERT_DOUBLE_CHECKED(x, args[0]);
- CONVERT_DOUBLE_CHECKED(y, args[1]);
- if (isnan(x) || isnan(y)) return args[2];
- if (x == y) return Smi::FromInt(EQUAL);
- if (isless(x, y)) return Smi::FromInt(LESS);
- return Smi::FromInt(GREATER);
-}
-
-
-// Compare two Smis as if they were converted to strings and then
-// compared lexicographically.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SmiLexicographicCompare) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 2);
-
- // Extract the integer values from the Smis.
- CONVERT_CHECKED(Smi, x, args[0]);
- CONVERT_CHECKED(Smi, y, args[1]);
- int x_value = x->value();
- int y_value = y->value();
-
- // If the integers are equal so are the string representations.
- if (x_value == y_value) return Smi::FromInt(EQUAL);
-
- // If one of the integers are zero the normal integer order is the
- // same as the lexicographic order of the string representations.
- if (x_value == 0 || y_value == 0) return Smi::FromInt(x_value - y_value);
-
- // If only one of the integers is negative the negative number is
- // smallest because the char code of '-' is less than the char code
- // of any digit. Otherwise, we make both values positive.
- if (x_value < 0 || y_value < 0) {
- if (y_value >= 0) return Smi::FromInt(LESS);
- if (x_value >= 0) return Smi::FromInt(GREATER);
- x_value = -x_value;
- y_value = -y_value;
- }
-
- // Arrays for the individual characters of the two Smis. Smis are
- // 31 bit integers and 10 decimal digits are therefore enough.
- // TODO(isolates): maybe we should simply allocate 20 bytes on the stack.
- int* x_elms = isolate->runtime_state()->smi_lexicographic_compare_x_elms();
- int* y_elms = isolate->runtime_state()->smi_lexicographic_compare_y_elms();
-
-
- // Convert the integers to arrays of their decimal digits.
- int x_index = 0;
- int y_index = 0;
- while (x_value > 0) {
- x_elms[x_index++] = x_value % 10;
- x_value /= 10;
- }
- while (y_value > 0) {
- y_elms[y_index++] = y_value % 10;
- y_value /= 10;
- }
-
- // Loop through the arrays of decimal digits finding the first place
- // where they differ.
- while (--x_index >= 0 && --y_index >= 0) {
- int diff = x_elms[x_index] - y_elms[y_index];
- if (diff != 0) return Smi::FromInt(diff);
- }
-
- // If one array is a suffix of the other array, the longest array is
- // the representation of the largest of the Smis in the
- // lexicographic ordering.
- return Smi::FromInt(x_index - y_index);
-}
-
-
-static Object* StringInputBufferCompare(RuntimeState* state,
- String* x,
- String* y) {
- StringInputBuffer& bufx = *state->string_input_buffer_compare_bufx();
- StringInputBuffer& bufy = *state->string_input_buffer_compare_bufy();
- bufx.Reset(x);
- bufy.Reset(y);
- while (bufx.has_more() && bufy.has_more()) {
- int d = bufx.GetNext() - bufy.GetNext();
- if (d < 0) return Smi::FromInt(LESS);
- else if (d > 0) return Smi::FromInt(GREATER);
- }
-
- // x is (non-trivial) prefix of y:
- if (bufy.has_more()) return Smi::FromInt(LESS);
- // y is prefix of x:
- return Smi::FromInt(bufx.has_more() ? GREATER : EQUAL);
-}
-
-
-static Object* FlatStringCompare(String* x, String* y) {
- ASSERT(x->IsFlat());
- ASSERT(y->IsFlat());
- Object* equal_prefix_result = Smi::FromInt(EQUAL);
- int prefix_length = x->length();
- if (y->length() < prefix_length) {
- prefix_length = y->length();
- equal_prefix_result = Smi::FromInt(GREATER);
- } else if (y->length() > prefix_length) {
- equal_prefix_result = Smi::FromInt(LESS);
- }
- int r;
- if (x->IsAsciiRepresentation()) {
- Vector<const char> x_chars = x->ToAsciiVector();
- if (y->IsAsciiRepresentation()) {
- Vector<const char> y_chars = y->ToAsciiVector();
- r = CompareChars(x_chars.start(), y_chars.start(), prefix_length);
- } else {
- Vector<const uc16> y_chars = y->ToUC16Vector();
- r = CompareChars(x_chars.start(), y_chars.start(), prefix_length);
- }
- } else {
- Vector<const uc16> x_chars = x->ToUC16Vector();
- if (y->IsAsciiRepresentation()) {
- Vector<const char> y_chars = y->ToAsciiVector();
- r = CompareChars(x_chars.start(), y_chars.start(), prefix_length);
- } else {
- Vector<const uc16> y_chars = y->ToUC16Vector();
- r = CompareChars(x_chars.start(), y_chars.start(), prefix_length);
- }
- }
- Object* result;
- if (r == 0) {
- result = equal_prefix_result;
- } else {
- result = (r < 0) ? Smi::FromInt(LESS) : Smi::FromInt(GREATER);
- }
- ASSERT(result ==
- StringInputBufferCompare(Isolate::Current()->runtime_state(), x, y));
- return result;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_StringCompare) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 2);
-
- CONVERT_CHECKED(String, x, args[0]);
- CONVERT_CHECKED(String, y, args[1]);
-
- isolate->counters()->string_compare_runtime()->Increment();
-
- // A few fast case tests before we flatten.
- if (x == y) return Smi::FromInt(EQUAL);
- if (y->length() == 0) {
- if (x->length() == 0) return Smi::FromInt(EQUAL);
- return Smi::FromInt(GREATER);
- } else if (x->length() == 0) {
- return Smi::FromInt(LESS);
- }
-
- int d = x->Get(0) - y->Get(0);
- if (d < 0) return Smi::FromInt(LESS);
- else if (d > 0) return Smi::FromInt(GREATER);
-
- Object* obj;
- { MaybeObject* maybe_obj = isolate->heap()->PrepareForCompare(x);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- { MaybeObject* maybe_obj = isolate->heap()->PrepareForCompare(y);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
-
- return (x->IsFlat() && y->IsFlat()) ? FlatStringCompare(x, y)
- : StringInputBufferCompare(isolate->runtime_state(), x, y);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_acos) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 1);
- isolate->counters()->math_acos()->Increment();
-
- CONVERT_DOUBLE_CHECKED(x, args[0]);
- return isolate->transcendental_cache()->Get(TranscendentalCache::ACOS, x);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_asin) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 1);
- isolate->counters()->math_asin()->Increment();
-
- CONVERT_DOUBLE_CHECKED(x, args[0]);
- return isolate->transcendental_cache()->Get(TranscendentalCache::ASIN, x);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_atan) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 1);
- isolate->counters()->math_atan()->Increment();
-
- CONVERT_DOUBLE_CHECKED(x, args[0]);
- return isolate->transcendental_cache()->Get(TranscendentalCache::ATAN, x);
-}
-
-
-static const double kPiDividedBy4 = 0.78539816339744830962;
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_atan2) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 2);
- isolate->counters()->math_atan2()->Increment();
-
- CONVERT_DOUBLE_CHECKED(x, args[0]);
- CONVERT_DOUBLE_CHECKED(y, args[1]);
- double result;
- if (isinf(x) && isinf(y)) {
- // Make sure that the result in case of two infinite arguments
- // is a multiple of Pi / 4. The sign of the result is determined
- // by the first argument (x) and the sign of the second argument
- // determines the multiplier: one or three.
- int multiplier = (x < 0) ? -1 : 1;
- if (y < 0) multiplier *= 3;
- result = multiplier * kPiDividedBy4;
- } else {
- result = atan2(x, y);
- }
- return isolate->heap()->AllocateHeapNumber(result);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_ceil) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 1);
- isolate->counters()->math_ceil()->Increment();
-
- CONVERT_DOUBLE_CHECKED(x, args[0]);
- return isolate->heap()->NumberFromDouble(ceiling(x));
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_cos) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 1);
- isolate->counters()->math_cos()->Increment();
-
- CONVERT_DOUBLE_CHECKED(x, args[0]);
- return isolate->transcendental_cache()->Get(TranscendentalCache::COS, x);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_exp) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 1);
- isolate->counters()->math_exp()->Increment();
-
- CONVERT_DOUBLE_CHECKED(x, args[0]);
- return isolate->transcendental_cache()->Get(TranscendentalCache::EXP, x);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_floor) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 1);
- isolate->counters()->math_floor()->Increment();
-
- CONVERT_DOUBLE_CHECKED(x, args[0]);
- return isolate->heap()->NumberFromDouble(floor(x));
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_log) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 1);
- isolate->counters()->math_log()->Increment();
-
- CONVERT_DOUBLE_CHECKED(x, args[0]);
- return isolate->transcendental_cache()->Get(TranscendentalCache::LOG, x);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_pow) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 2);
- isolate->counters()->math_pow()->Increment();
-
- CONVERT_DOUBLE_CHECKED(x, args[0]);
-
- // If the second argument is a smi, it is much faster to call the
- // custom powi() function than the generic pow().
- if (args[1]->IsSmi()) {
- int y = Smi::cast(args[1])->value();
- return isolate->heap()->NumberFromDouble(power_double_int(x, y));
- }
-
- CONVERT_DOUBLE_CHECKED(y, args[1]);
- return isolate->heap()->AllocateHeapNumber(power_double_double(x, y));
-}
-
-// Fast version of Math.pow if we know that y is not an integer and
-// y is not -0.5 or 0.5. Used as slowcase from codegen.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_pow_cfunction) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 2);
- CONVERT_DOUBLE_CHECKED(x, args[0]);
- CONVERT_DOUBLE_CHECKED(y, args[1]);
- if (y == 0) {
- return Smi::FromInt(1);
- } else if (isnan(y) || ((x == 1 || x == -1) && isinf(y))) {
- return isolate->heap()->nan_value();
- } else {
- return isolate->heap()->AllocateHeapNumber(pow(x, y));
- }
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_RoundNumber) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 1);
- isolate->counters()->math_round()->Increment();
-
- if (!args[0]->IsHeapNumber()) {
- // Must be smi. Return the argument unchanged for all the other types
- // to make fuzz-natives test happy.
- return args[0];
- }
-
- HeapNumber* number = reinterpret_cast<HeapNumber*>(args[0]);
-
- double value = number->value();
- int exponent = number->get_exponent();
- int sign = number->get_sign();
-
- // We compare with kSmiValueSize - 3 because (2^30 - 0.1) has exponent 29 and
- // should be rounded to 2^30, which is not smi.
- if (!sign && exponent <= kSmiValueSize - 3) {
- return Smi::FromInt(static_cast<int>(value + 0.5));
- }
-
- // If the magnitude is big enough, there's no place for fraction part. If we
- // try to add 0.5 to this number, 1.0 will be added instead.
- if (exponent >= 52) {
- return number;
- }
-
- if (sign && value >= -0.5) return isolate->heap()->minus_zero_value();
-
- // Do not call NumberFromDouble() to avoid extra checks.
- return isolate->heap()->AllocateHeapNumber(floor(value + 0.5));
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_sin) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 1);
- isolate->counters()->math_sin()->Increment();
-
- CONVERT_DOUBLE_CHECKED(x, args[0]);
- return isolate->transcendental_cache()->Get(TranscendentalCache::SIN, x);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_sqrt) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 1);
- isolate->counters()->math_sqrt()->Increment();
-
- CONVERT_DOUBLE_CHECKED(x, args[0]);
- return isolate->heap()->AllocateHeapNumber(sqrt(x));
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_tan) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 1);
- isolate->counters()->math_tan()->Increment();
-
- CONVERT_DOUBLE_CHECKED(x, args[0]);
- return isolate->transcendental_cache()->Get(TranscendentalCache::TAN, x);
-}
-
-
-static int MakeDay(int year, int month, int day) {
- static const int day_from_month[] = {0, 31, 59, 90, 120, 151,
- 181, 212, 243, 273, 304, 334};
- static const int day_from_month_leap[] = {0, 31, 60, 91, 121, 152,
- 182, 213, 244, 274, 305, 335};
-
- year += month / 12;
- month %= 12;
- if (month < 0) {
- year--;
- month += 12;
- }
-
- ASSERT(month >= 0);
- ASSERT(month < 12);
-
- // year_delta is an arbitrary number such that:
- // a) year_delta = -1 (mod 400)
- // b) year + year_delta > 0 for years in the range defined by
- // ECMA 262 - 15.9.1.1, i.e. upto 100,000,000 days on either side of
- // Jan 1 1970. This is required so that we don't run into integer
- // division of negative numbers.
- // c) there shouldn't be an overflow for 32-bit integers in the following
- // operations.
- static const int year_delta = 399999;
- static const int base_day = 365 * (1970 + year_delta) +
- (1970 + year_delta) / 4 -
- (1970 + year_delta) / 100 +
- (1970 + year_delta) / 400;
-
- int year1 = year + year_delta;
- int day_from_year = 365 * year1 +
- year1 / 4 -
- year1 / 100 +
- year1 / 400 -
- base_day;
-
- if (year % 4 || (year % 100 == 0 && year % 400 != 0)) {
- return day_from_year + day_from_month[month] + day - 1;
- }
-
- return day_from_year + day_from_month_leap[month] + day - 1;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DateMakeDay) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 3);
-
- CONVERT_SMI_CHECKED(year, args[0]);
- CONVERT_SMI_CHECKED(month, args[1]);
- CONVERT_SMI_CHECKED(date, args[2]);
-
- return Smi::FromInt(MakeDay(year, month, date));
-}
-
-
-static const int kDays4Years[] = {0, 365, 2 * 365, 3 * 365 + 1};
-static const int kDaysIn4Years = 4 * 365 + 1;
-static const int kDaysIn100Years = 25 * kDaysIn4Years - 1;
-static const int kDaysIn400Years = 4 * kDaysIn100Years + 1;
-static const int kDays1970to2000 = 30 * 365 + 7;
-static const int kDaysOffset = 1000 * kDaysIn400Years + 5 * kDaysIn400Years -
- kDays1970to2000;
-static const int kYearsOffset = 400000;
-
-static const char kDayInYear[] = {
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
- 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
- 22, 23, 24, 25, 26, 27, 28,
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
- 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
- 22, 23, 24, 25, 26, 27, 28, 29, 30,
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
- 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
- 22, 23, 24, 25, 26, 27, 28, 29, 30,
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
- 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
- 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
- 22, 23, 24, 25, 26, 27, 28, 29, 30,
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
- 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
- 22, 23, 24, 25, 26, 27, 28, 29, 30,
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
- 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
-
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
- 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
- 22, 23, 24, 25, 26, 27, 28,
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
- 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
- 22, 23, 24, 25, 26, 27, 28, 29, 30,
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
- 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
- 22, 23, 24, 25, 26, 27, 28, 29, 30,
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
- 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
- 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
- 22, 23, 24, 25, 26, 27, 28, 29, 30,
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
- 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
- 22, 23, 24, 25, 26, 27, 28, 29, 30,
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
- 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
-
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
- 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
- 22, 23, 24, 25, 26, 27, 28, 29,
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
- 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
- 22, 23, 24, 25, 26, 27, 28, 29, 30,
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
- 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
- 22, 23, 24, 25, 26, 27, 28, 29, 30,
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
- 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
- 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
- 22, 23, 24, 25, 26, 27, 28, 29, 30,
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
- 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
- 22, 23, 24, 25, 26, 27, 28, 29, 30,
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
- 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
-
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
- 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
- 22, 23, 24, 25, 26, 27, 28,
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
- 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
- 22, 23, 24, 25, 26, 27, 28, 29, 30,
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
- 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
- 22, 23, 24, 25, 26, 27, 28, 29, 30,
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
- 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
- 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
- 22, 23, 24, 25, 26, 27, 28, 29, 30,
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
- 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
- 22, 23, 24, 25, 26, 27, 28, 29, 30,
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
- 22, 23, 24, 25, 26, 27, 28, 29, 30, 31};
-
-static const char kMonthInYear[] = {
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1,
- 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
- 2, 2, 2, 2, 2, 2,
- 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
- 3, 3, 3, 3, 3,
- 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
- 4, 4, 4, 4, 4, 4,
- 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
- 5, 5, 5, 5, 5,
- 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
- 6, 6, 6, 6, 6, 6,
- 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
- 7, 7, 7, 7, 7, 7,
- 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
- 8, 8, 8, 8, 8,
- 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
- 9, 9, 9, 9, 9, 9,
- 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
- 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
- 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
- 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
-
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1,
- 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
- 2, 2, 2, 2, 2, 2,
- 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
- 3, 3, 3, 3, 3,
- 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
- 4, 4, 4, 4, 4, 4,
- 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
- 5, 5, 5, 5, 5,
- 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
- 6, 6, 6, 6, 6, 6,
- 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
- 7, 7, 7, 7, 7, 7,
- 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
- 8, 8, 8, 8, 8,
- 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
- 9, 9, 9, 9, 9, 9,
- 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
- 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
- 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
- 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
-
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1,
- 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
- 2, 2, 2, 2, 2, 2,
- 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
- 3, 3, 3, 3, 3,
- 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
- 4, 4, 4, 4, 4, 4,
- 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
- 5, 5, 5, 5, 5,
- 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
- 6, 6, 6, 6, 6, 6,
- 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
- 7, 7, 7, 7, 7, 7,
- 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
- 8, 8, 8, 8, 8,
- 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
- 9, 9, 9, 9, 9, 9,
- 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
- 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
- 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
- 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
-
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1,
- 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
- 2, 2, 2, 2, 2, 2,
- 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
- 3, 3, 3, 3, 3,
- 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
- 4, 4, 4, 4, 4, 4,
- 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
- 5, 5, 5, 5, 5,
- 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
- 6, 6, 6, 6, 6, 6,
- 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
- 7, 7, 7, 7, 7, 7,
- 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
- 8, 8, 8, 8, 8,
- 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
- 9, 9, 9, 9, 9, 9,
- 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
- 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
- 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
- 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11};
-
-
-// This function works for dates from 1970 to 2099.
-static inline void DateYMDFromTimeAfter1970(int date,
- int& year, int& month, int& day) {
-#ifdef DEBUG
- int save_date = date; // Need this for ASSERT in the end.
-#endif
-
- year = 1970 + (4 * date + 2) / kDaysIn4Years;
- date %= kDaysIn4Years;
-
- month = kMonthInYear[date];
- day = kDayInYear[date];
-
- ASSERT(MakeDay(year, month, day) == save_date);
-}
-
-
-static inline void DateYMDFromTimeSlow(int date,
- int& year, int& month, int& day) {
-#ifdef DEBUG
- int save_date = date; // Need this for ASSERT in the end.
-#endif
-
- date += kDaysOffset;
- year = 400 * (date / kDaysIn400Years) - kYearsOffset;
- date %= kDaysIn400Years;
-
- ASSERT(MakeDay(year, 0, 1) + date == save_date);
-
- date--;
- int yd1 = date / kDaysIn100Years;
- date %= kDaysIn100Years;
- year += 100 * yd1;
-
- date++;
- int yd2 = date / kDaysIn4Years;
- date %= kDaysIn4Years;
- year += 4 * yd2;
-
- date--;
- int yd3 = date / 365;
- date %= 365;
- year += yd3;
-
- bool is_leap = (!yd1 || yd2) && !yd3;
-
- ASSERT(date >= -1);
- ASSERT(is_leap || (date >= 0));
- ASSERT((date < 365) || (is_leap && (date < 366)));
- ASSERT(is_leap == ((year % 4 == 0) && (year % 100 || (year % 400 == 0))));
- ASSERT(is_leap || ((MakeDay(year, 0, 1) + date) == save_date));
- ASSERT(!is_leap || ((MakeDay(year, 0, 1) + date + 1) == save_date));
-
- if (is_leap) {
- day = kDayInYear[2*365 + 1 + date];
- month = kMonthInYear[2*365 + 1 + date];
- } else {
- day = kDayInYear[date];
- month = kMonthInYear[date];
- }
-
- ASSERT(MakeDay(year, month, day) == save_date);
-}
-
-
-static inline void DateYMDFromTime(int date,
- int& year, int& month, int& day) {
- if (date >= 0 && date < 32 * kDaysIn4Years) {
- DateYMDFromTimeAfter1970(date, year, month, day);
- } else {
- DateYMDFromTimeSlow(date, year, month, day);
- }
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DateYMDFromTime) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 2);
-
- CONVERT_DOUBLE_CHECKED(t, args[0]);
- CONVERT_CHECKED(JSArray, res_array, args[1]);
-
- int year, month, day;
- DateYMDFromTime(static_cast<int>(floor(t / 86400000)), year, month, day);
-
- RUNTIME_ASSERT(res_array->elements()->map() ==
- isolate->heap()->fixed_array_map());
- FixedArray* elms = FixedArray::cast(res_array->elements());
- RUNTIME_ASSERT(elms->length() == 3);
-
- elms->set(0, Smi::FromInt(year));
- elms->set(1, Smi::FromInt(month));
- elms->set(2, Smi::FromInt(day));
-
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NewArgumentsFast) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 3);
-
- JSFunction* callee = JSFunction::cast(args[0]);
- Object** parameters = reinterpret_cast<Object**>(args[1]);
- const int length = Smi::cast(args[2])->value();
-
- Object* result;
- { MaybeObject* maybe_result =
- isolate->heap()->AllocateArgumentsObject(callee, length);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- // Allocate the elements if needed.
- if (length > 0) {
- // Allocate the fixed array.
- Object* obj;
- { MaybeObject* maybe_obj = isolate->heap()->AllocateRawFixedArray(length);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
-
- AssertNoAllocation no_gc;
- FixedArray* array = reinterpret_cast<FixedArray*>(obj);
- array->set_map(isolate->heap()->fixed_array_map());
- array->set_length(length);
-
- WriteBarrierMode mode = array->GetWriteBarrierMode(no_gc);
- for (int i = 0; i < length; i++) {
- array->set(i, *--parameters, mode);
- }
- JSObject::cast(result)->set_elements(FixedArray::cast(obj));
- }
- return result;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NewClosure) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 3);
- CONVERT_ARG_CHECKED(Context, context, 0);
- CONVERT_ARG_CHECKED(SharedFunctionInfo, shared, 1);
- CONVERT_BOOLEAN_CHECKED(pretenure, args[2]);
-
- // Allocate global closures in old space and allocate local closures
- // in new space. Additionally pretenure closures that are assigned
- // directly to properties.
- pretenure = pretenure || (context->global_context() == *context);
- PretenureFlag pretenure_flag = pretenure ? TENURED : NOT_TENURED;
- Handle<JSFunction> result =
- isolate->factory()->NewFunctionFromSharedFunctionInfo(shared,
- context,
- pretenure_flag);
- return *result;
-}
-
-
-static SmartPointer<Object**> GetNonBoundArguments(int bound_argc,
- int* total_argc) {
- // Find frame containing arguments passed to the caller.
- JavaScriptFrameIterator it;
- JavaScriptFrame* frame = it.frame();
- List<JSFunction*> functions(2);
- frame->GetFunctions(&functions);
- if (functions.length() > 1) {
- int inlined_frame_index = functions.length() - 1;
- JSFunction* inlined_function = functions[inlined_frame_index];
- int args_count = inlined_function->shared()->formal_parameter_count();
- ScopedVector<SlotRef> args_slots(args_count);
- SlotRef::ComputeSlotMappingForArguments(frame,
- inlined_frame_index,
- &args_slots);
-
- *total_argc = bound_argc + args_count;
- SmartPointer<Object**> param_data(NewArray<Object**>(*total_argc));
- for (int i = 0; i < args_count; i++) {
- Handle<Object> val = args_slots[i].GetValue();
- param_data[bound_argc + i] = val.location();
- }
- return param_data;
- } else {
- it.AdvanceToArgumentsFrame();
- frame = it.frame();
- int args_count = frame->ComputeParametersCount();
-
- *total_argc = bound_argc + args_count;
- SmartPointer<Object**> param_data(NewArray<Object**>(*total_argc));
- for (int i = 0; i < args_count; i++) {
- Handle<Object> val = Handle<Object>(frame->GetParameter(i));
- param_data[bound_argc + i] = val.location();
- }
- return param_data;
- }
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NewObjectFromBound) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 2);
- // First argument is a function to use as a constructor.
- CONVERT_ARG_CHECKED(JSFunction, function, 0);
-
- // Second argument is either null or an array of bound arguments.
- Handle<FixedArray> bound_args;
- int bound_argc = 0;
- if (!args[1]->IsNull()) {
- CONVERT_ARG_CHECKED(JSArray, params, 1);
- RUNTIME_ASSERT(params->HasFastElements());
- bound_args = Handle<FixedArray>(FixedArray::cast(params->elements()));
- bound_argc = Smi::cast(params->length())->value();
- }
-
- int total_argc = 0;
- SmartPointer<Object**> param_data =
- GetNonBoundArguments(bound_argc, &total_argc);
- for (int i = 0; i < bound_argc; i++) {
- Handle<Object> val = Handle<Object>(bound_args->get(i));
- param_data[i] = val.location();
- }
-
- bool exception = false;
- Handle<Object> result =
- Execution::New(function, total_argc, *param_data, &exception);
- if (exception) {
- return Failure::Exception();
- }
-
- ASSERT(!result.is_null());
- return *result;
-}
-
-
-static void TrySettingInlineConstructStub(Isolate* isolate,
- Handle<JSFunction> function) {
- Handle<Object> prototype = isolate->factory()->null_value();
- if (function->has_instance_prototype()) {
- prototype = Handle<Object>(function->instance_prototype(), isolate);
- }
- if (function->shared()->CanGenerateInlineConstructor(*prototype)) {
- ConstructStubCompiler compiler;
- MaybeObject* code = compiler.CompileConstructStub(*function);
- if (!code->IsFailure()) {
- function->shared()->set_construct_stub(
- Code::cast(code->ToObjectUnchecked()));
- }
- }
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NewObject) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 1);
-
- Handle<Object> constructor = args.at<Object>(0);
-
- // If the constructor isn't a proper function we throw a type error.
- if (!constructor->IsJSFunction()) {
- Vector< Handle<Object> > arguments = HandleVector(&constructor, 1);
- Handle<Object> type_error =
- isolate->factory()->NewTypeError("not_constructor", arguments);
- return isolate->Throw(*type_error);
- }
-
- Handle<JSFunction> function = Handle<JSFunction>::cast(constructor);
-
- // If function should not have prototype, construction is not allowed. In this
- // case generated code bailouts here, since function has no initial_map.
- if (!function->should_have_prototype()) {
- Vector< Handle<Object> > arguments = HandleVector(&constructor, 1);
- Handle<Object> type_error =
- isolate->factory()->NewTypeError("not_constructor", arguments);
- return isolate->Throw(*type_error);
- }
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- Debug* debug = isolate->debug();
- // Handle stepping into constructors if step into is active.
- if (debug->StepInActive()) {
- debug->HandleStepIn(function, Handle<Object>::null(), 0, true);
- }
-#endif
-
- if (function->has_initial_map()) {
- if (function->initial_map()->instance_type() == JS_FUNCTION_TYPE) {
- // The 'Function' function ignores the receiver object when
- // called using 'new' and creates a new JSFunction object that
- // is returned. The receiver object is only used for error
- // reporting if an error occurs when constructing the new
- // JSFunction. FACTORY->NewJSObject() should not be used to
- // allocate JSFunctions since it does not properly initialize
- // the shared part of the function. Since the receiver is
- // ignored anyway, we use the global object as the receiver
- // instead of a new JSFunction object. This way, errors are
- // reported the same way whether or not 'Function' is called
- // using 'new'.
- return isolate->context()->global();
- }
- }
-
- // The function should be compiled for the optimization hints to be
- // available. We cannot use EnsureCompiled because that forces a
- // compilation through the shared function info which makes it
- // impossible for us to optimize.
- Handle<SharedFunctionInfo> shared(function->shared(), isolate);
- if (!function->is_compiled()) CompileLazy(function, CLEAR_EXCEPTION);
-
- if (!function->has_initial_map() &&
- shared->IsInobjectSlackTrackingInProgress()) {
- // The tracking is already in progress for another function. We can only
- // track one initial_map at a time, so we force the completion before the
- // function is called as a constructor for the first time.
- shared->CompleteInobjectSlackTracking();
- }
-
- bool first_allocation = !shared->live_objects_may_exist();
- Handle<JSObject> result = isolate->factory()->NewJSObject(function);
- RETURN_IF_EMPTY_HANDLE(isolate, result);
- // Delay setting the stub if inobject slack tracking is in progress.
- if (first_allocation && !shared->IsInobjectSlackTrackingInProgress()) {
- TrySettingInlineConstructStub(isolate, function);
- }
-
- isolate->counters()->constructed_objects()->Increment();
- isolate->counters()->constructed_objects_runtime()->Increment();
-
- return *result;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_FinalizeInstanceSize) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 1);
-
- CONVERT_ARG_CHECKED(JSFunction, function, 0);
- function->shared()->CompleteInobjectSlackTracking();
- TrySettingInlineConstructStub(isolate, function);
-
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_LazyCompile) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 1);
-
- Handle<JSFunction> function = args.at<JSFunction>(0);
-#ifdef DEBUG
- if (FLAG_trace_lazy && !function->shared()->is_compiled()) {
- PrintF("[lazy: ");
- function->PrintName();
- PrintF("]\n");
- }
-#endif
-
- // Compile the target function. Here we compile using CompileLazyInLoop in
- // order to get the optimized version. This helps code like delta-blue
- // that calls performance-critical routines through constructors. A
- // constructor call doesn't use a CallIC, it uses a LoadIC followed by a
- // direct call. Since the in-loop tracking takes place through CallICs
- // this means that things called through constructors are never known to
- // be in loops. We compile them as if they are in loops here just in case.
- ASSERT(!function->is_compiled());
- if (!CompileLazyInLoop(function, KEEP_EXCEPTION)) {
- return Failure::Exception();
- }
-
- // All done. Return the compiled code.
- ASSERT(function->is_compiled());
- return function->code();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_LazyRecompile) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 1);
- Handle<JSFunction> function = args.at<JSFunction>(0);
- // If the function is not optimizable or debugger is active continue using the
- // code from the full compiler.
- if (!function->shared()->code()->optimizable() ||
- isolate->debug()->has_break_points()) {
- if (FLAG_trace_opt) {
- PrintF("[failed to optimize ");
- function->PrintName();
- PrintF(": is code optimizable: %s, is debugger enabled: %s]\n",
- function->shared()->code()->optimizable() ? "T" : "F",
- isolate->debug()->has_break_points() ? "T" : "F");
- }
- function->ReplaceCode(function->shared()->code());
- return function->code();
- }
- if (CompileOptimized(function, AstNode::kNoNumber, CLEAR_EXCEPTION)) {
- return function->code();
- }
- if (FLAG_trace_opt) {
- PrintF("[failed to optimize ");
- function->PrintName();
- PrintF(": optimized compilation failed]\n");
- }
- function->ReplaceCode(function->shared()->code());
- return function->code();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyDeoptimized) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 1);
- RUNTIME_ASSERT(args[0]->IsSmi());
- Deoptimizer::BailoutType type =
- static_cast<Deoptimizer::BailoutType>(Smi::cast(args[0])->value());
- Deoptimizer* deoptimizer = Deoptimizer::Grab(isolate);
- ASSERT(isolate->heap()->IsAllocationAllowed());
- int frames = deoptimizer->output_count();
-
- JavaScriptFrameIterator it(isolate);
- JavaScriptFrame* frame = NULL;
- for (int i = 0; i < frames; i++) {
- if (i != 0) it.Advance();
- frame = it.frame();
- deoptimizer->InsertHeapNumberValues(frames - i - 1, frame);
- }
- delete deoptimizer;
-
- RUNTIME_ASSERT(frame->function()->IsJSFunction());
- Handle<JSFunction> function(JSFunction::cast(frame->function()), isolate);
- Handle<Object> arguments;
- for (int i = frame->ComputeExpressionsCount() - 1; i >= 0; --i) {
- if (frame->GetExpression(i) == isolate->heap()->arguments_marker()) {
- if (arguments.is_null()) {
- // FunctionGetArguments can't throw an exception, so cast away the
- // doubt with an assert.
- arguments = Handle<Object>(
- Accessors::FunctionGetArguments(*function,
- NULL)->ToObjectUnchecked());
- ASSERT(*arguments != isolate->heap()->null_value());
- ASSERT(*arguments != isolate->heap()->undefined_value());
- }
- frame->SetExpression(i, *arguments);
- }
- }
-
- isolate->compilation_cache()->MarkForLazyOptimizing(function);
- if (type == Deoptimizer::EAGER) {
- RUNTIME_ASSERT(function->IsOptimized());
- } else {
- RUNTIME_ASSERT(!function->IsOptimized());
- }
-
- // Avoid doing too much work when running with --always-opt and keep
- // the optimized code around.
- if (FLAG_always_opt || type == Deoptimizer::LAZY) {
- return isolate->heap()->undefined_value();
- }
-
- // Count the number of optimized activations of the function.
- int activations = 0;
- while (!it.done()) {
- JavaScriptFrame* frame = it.frame();
- if (frame->is_optimized() && frame->function() == *function) {
- activations++;
- }
- it.Advance();
- }
-
- // TODO(kasperl): For now, we cannot support removing the optimized
- // code when we have recursive invocations of the same function.
- if (activations == 0) {
- if (FLAG_trace_deopt) {
- PrintF("[removing optimized code for: ");
- function->PrintName();
- PrintF("]\n");
- }
- function->ReplaceCode(function->shared()->code());
- }
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyOSR) {
- Deoptimizer* deoptimizer = Deoptimizer::Grab(isolate);
- delete deoptimizer;
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DeoptimizeFunction) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 1);
- CONVERT_ARG_CHECKED(JSFunction, function, 0);
- if (!function->IsOptimized()) return isolate->heap()->undefined_value();
-
- Deoptimizer::DeoptimizeFunction(*function);
-
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileForOnStackReplacement) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 1);
- CONVERT_ARG_CHECKED(JSFunction, function, 0);
-
- // We're not prepared to handle a function with arguments object.
- ASSERT(!function->shared()->scope_info()->HasArgumentsShadow());
-
- // We have hit a back edge in an unoptimized frame for a function that was
- // selected for on-stack replacement. Find the unoptimized code object.
- Handle<Code> unoptimized(function->shared()->code(), isolate);
- // Keep track of whether we've succeeded in optimizing.
- bool succeeded = unoptimized->optimizable();
- if (succeeded) {
- // If we are trying to do OSR when there are already optimized
- // activations of the function, it means (a) the function is directly or
- // indirectly recursive and (b) an optimized invocation has been
- // deoptimized so that we are currently in an unoptimized activation.
- // Check for optimized activations of this function.
- JavaScriptFrameIterator it(isolate);
- while (succeeded && !it.done()) {
- JavaScriptFrame* frame = it.frame();
- succeeded = !frame->is_optimized() || frame->function() != *function;
- it.Advance();
- }
- }
-
- int ast_id = AstNode::kNoNumber;
- if (succeeded) {
- // The top JS function is this one, the PC is somewhere in the
- // unoptimized code.
- JavaScriptFrameIterator it(isolate);
- JavaScriptFrame* frame = it.frame();
- ASSERT(frame->function() == *function);
- ASSERT(frame->LookupCode() == *unoptimized);
- ASSERT(unoptimized->contains(frame->pc()));
-
- // Use linear search of the unoptimized code's stack check table to find
- // the AST id matching the PC.
- Address start = unoptimized->instruction_start();
- unsigned target_pc_offset = static_cast<unsigned>(frame->pc() - start);
- Address table_cursor = start + unoptimized->stack_check_table_offset();
- uint32_t table_length = Memory::uint32_at(table_cursor);
- table_cursor += kIntSize;
- for (unsigned i = 0; i < table_length; ++i) {
- // Table entries are (AST id, pc offset) pairs.
- uint32_t pc_offset = Memory::uint32_at(table_cursor + kIntSize);
- if (pc_offset == target_pc_offset) {
- ast_id = static_cast<int>(Memory::uint32_at(table_cursor));
- break;
- }
- table_cursor += 2 * kIntSize;
- }
- ASSERT(ast_id != AstNode::kNoNumber);
- if (FLAG_trace_osr) {
- PrintF("[replacing on-stack at AST id %d in ", ast_id);
- function->PrintName();
- PrintF("]\n");
- }
-
- // Try to compile the optimized code. A true return value from
- // CompileOptimized means that compilation succeeded, not necessarily
- // that optimization succeeded.
- if (CompileOptimized(function, ast_id, CLEAR_EXCEPTION) &&
- function->IsOptimized()) {
- DeoptimizationInputData* data = DeoptimizationInputData::cast(
- function->code()->deoptimization_data());
- if (data->OsrPcOffset()->value() >= 0) {
- if (FLAG_trace_osr) {
- PrintF("[on-stack replacement offset %d in optimized code]\n",
- data->OsrPcOffset()->value());
- }
- ASSERT(data->OsrAstId()->value() == ast_id);
- } else {
- // We may never generate the desired OSR entry if we emit an
- // early deoptimize.
- succeeded = false;
- }
- } else {
- succeeded = false;
- }
- }
-
- // Revert to the original stack checks in the original unoptimized code.
- if (FLAG_trace_osr) {
- PrintF("[restoring original stack checks in ");
- function->PrintName();
- PrintF("]\n");
- }
- StackCheckStub check_stub;
- Handle<Code> check_code = check_stub.GetCode();
- Handle<Code> replacement_code = isolate->builtins()->OnStackReplacement();
- Deoptimizer::RevertStackCheckCode(*unoptimized,
- *check_code,
- *replacement_code);
-
- // Allow OSR only at nesting level zero again.
- unoptimized->set_allow_osr_at_loop_nesting_level(0);
-
- // If the optimization attempt succeeded, return the AST id tagged as a
- // smi. This tells the builtin that we need to translate the unoptimized
- // frame to an optimized one.
- if (succeeded) {
- ASSERT(function->code()->kind() == Code::OPTIMIZED_FUNCTION);
- return Smi::FromInt(ast_id);
- } else {
- if (function->IsMarkedForLazyRecompilation()) {
- function->ReplaceCode(function->shared()->code());
- }
- return Smi::FromInt(-1);
- }
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFunctionDelegate) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 1);
- RUNTIME_ASSERT(!args[0]->IsJSFunction());
- return *Execution::GetFunctionDelegate(args.at<Object>(0));
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetConstructorDelegate) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 1);
- RUNTIME_ASSERT(!args[0]->IsJSFunction());
- return *Execution::GetConstructorDelegate(args.at<Object>(0));
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NewContext) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 1);
-
- CONVERT_CHECKED(JSFunction, function, args[0]);
- int length = function->shared()->scope_info()->NumberOfContextSlots();
- Object* result;
- { MaybeObject* maybe_result =
- isolate->heap()->AllocateFunctionContext(length, function);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
-
- isolate->set_context(Context::cast(result));
-
- return result; // non-failure
-}
-
-
-MUST_USE_RESULT static MaybeObject* PushContextHelper(Isolate* isolate,
- Object* object,
- bool is_catch_context) {
- // Convert the object to a proper JavaScript object.
- Object* js_object = object;
- if (!js_object->IsJSObject()) {
- MaybeObject* maybe_js_object = js_object->ToObject();
- if (!maybe_js_object->ToObject(&js_object)) {
- if (!Failure::cast(maybe_js_object)->IsInternalError()) {
- return maybe_js_object;
- }
- HandleScope scope(isolate);
- Handle<Object> handle(object, isolate);
- Handle<Object> result =
- isolate->factory()->NewTypeError("with_expression",
- HandleVector(&handle, 1));
- return isolate->Throw(*result);
- }
- }
-
- Object* result;
- { MaybeObject* maybe_result = isolate->heap()->AllocateWithContext(
- isolate->context(), JSObject::cast(js_object), is_catch_context);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
-
- Context* context = Context::cast(result);
- isolate->set_context(context);
-
- return result;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_PushContext) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 1);
- return PushContextHelper(isolate, args[0], false);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_PushCatchContext) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 1);
- return PushContextHelper(isolate, args[0], true);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DeleteContextSlot) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 2);
-
- CONVERT_ARG_CHECKED(Context, context, 0);
- CONVERT_ARG_CHECKED(String, name, 1);
-
- int index;
- PropertyAttributes attributes;
- ContextLookupFlags flags = FOLLOW_CHAINS;
- Handle<Object> holder = context->Lookup(name, flags, &index, &attributes);
-
- // If the slot was not found the result is true.
- if (holder.is_null()) {
- return isolate->heap()->true_value();
- }
-
- // If the slot was found in a context, it should be DONT_DELETE.
- if (holder->IsContext()) {
- return isolate->heap()->false_value();
- }
-
- // The slot was found in a JSObject, either a context extension object,
- // the global object, or an arguments object. Try to delete it
- // (respecting DONT_DELETE). For consistency with V8's usual behavior,
- // which allows deleting all parameters in functions that mention
- // 'arguments', we do this even for the case of slots found on an
- // arguments object. The slot was found on an arguments object if the
- // index is non-negative.
- Handle<JSObject> object = Handle<JSObject>::cast(holder);
- if (index >= 0) {
- return object->DeleteElement(index, JSObject::NORMAL_DELETION);
- } else {
- return object->DeleteProperty(*name, JSObject::NORMAL_DELETION);
- }
-}
-
-
-// A mechanism to return a pair of Object pointers in registers (if possible).
-// How this is achieved is calling convention-dependent.
-// All currently supported x86 compiles uses calling conventions that are cdecl
-// variants where a 64-bit value is returned in two 32-bit registers
-// (edx:eax on ia32, r1:r0 on ARM).
-// In AMD-64 calling convention a struct of two pointers is returned in rdx:rax.
-// In Win64 calling convention, a struct of two pointers is returned in memory,
-// allocated by the caller, and passed as a pointer in a hidden first parameter.
-#ifdef V8_HOST_ARCH_64_BIT
-struct ObjectPair {
- MaybeObject* x;
- MaybeObject* y;
-};
-
-static inline ObjectPair MakePair(MaybeObject* x, MaybeObject* y) {
- ObjectPair result = {x, y};
- // Pointers x and y returned in rax and rdx, in AMD-x64-abi.
- // In Win64 they are assigned to a hidden first argument.
- return result;
-}
-#else
-typedef uint64_t ObjectPair;
-static inline ObjectPair MakePair(MaybeObject* x, MaybeObject* y) {
- return reinterpret_cast<uint32_t>(x) |
- (reinterpret_cast<ObjectPair>(y) << 32);
-}
-#endif
-
-
-static inline MaybeObject* Unhole(Heap* heap,
- MaybeObject* x,
- PropertyAttributes attributes) {
- ASSERT(!x->IsTheHole() || (attributes & READ_ONLY) != 0);
- USE(attributes);
- return x->IsTheHole() ? heap->undefined_value() : x;
-}
-
-
-static JSObject* ComputeReceiverForNonGlobal(Isolate* isolate,
- JSObject* holder) {
- ASSERT(!holder->IsGlobalObject());
- Context* top = isolate->context();
- // Get the context extension function.
- JSFunction* context_extension_function =
- top->global_context()->context_extension_function();
- // If the holder isn't a context extension object, we just return it
- // as the receiver. This allows arguments objects to be used as
- // receivers, but only if they are put in the context scope chain
- // explicitly via a with-statement.
- Object* constructor = holder->map()->constructor();
- if (constructor != context_extension_function) return holder;
- // Fall back to using the global object as the receiver if the
- // property turns out to be a local variable allocated in a context
- // extension object - introduced via eval.
- return top->global()->global_receiver();
-}
-
-
-static ObjectPair LoadContextSlotHelper(Arguments args,
- Isolate* isolate,
- bool throw_error) {
- HandleScope scope(isolate);
- ASSERT_EQ(2, args.length());
-
- if (!args[0]->IsContext() || !args[1]->IsString()) {
- return MakePair(isolate->ThrowIllegalOperation(), NULL);
- }
- Handle<Context> context = args.at<Context>(0);
- Handle<String> name = args.at<String>(1);
-
- int index;
- PropertyAttributes attributes;
- ContextLookupFlags flags = FOLLOW_CHAINS;
- Handle<Object> holder = context->Lookup(name, flags, &index, &attributes);
-
- // If the index is non-negative, the slot has been found in a local
- // variable or a parameter. Read it from the context object or the
- // arguments object.
- if (index >= 0) {
- // If the "property" we were looking for is a local variable or an
- // argument in a context, the receiver is the global object; see
- // ECMA-262, 3rd., 10.1.6 and 10.2.3.
- JSObject* receiver =
- isolate->context()->global()->global_receiver();
- MaybeObject* value = (holder->IsContext())
- ? Context::cast(*holder)->get(index)
- : JSObject::cast(*holder)->GetElement(index);
- return MakePair(Unhole(isolate->heap(), value, attributes), receiver);
- }
-
- // If the holder is found, we read the property from it.
- if (!holder.is_null() && holder->IsJSObject()) {
- ASSERT(Handle<JSObject>::cast(holder)->HasProperty(*name));
- JSObject* object = JSObject::cast(*holder);
- JSObject* receiver;
- if (object->IsGlobalObject()) {
- receiver = GlobalObject::cast(object)->global_receiver();
- } else if (context->is_exception_holder(*holder)) {
- receiver = isolate->context()->global()->global_receiver();
- } else {
- receiver = ComputeReceiverForNonGlobal(isolate, object);
- }
- // No need to unhole the value here. This is taken care of by the
- // GetProperty function.
- MaybeObject* value = object->GetProperty(*name);
- return MakePair(value, receiver);
- }
-
- if (throw_error) {
- // The property doesn't exist - throw exception.
- Handle<Object> reference_error =
- isolate->factory()->NewReferenceError("not_defined",
- HandleVector(&name, 1));
- return MakePair(isolate->Throw(*reference_error), NULL);
- } else {
- // The property doesn't exist - return undefined
- return MakePair(isolate->heap()->undefined_value(),
- isolate->heap()->undefined_value());
- }
-}
-
-
-RUNTIME_FUNCTION(ObjectPair, Runtime_LoadContextSlot) {
- return LoadContextSlotHelper(args, isolate, true);
-}
-
-
-RUNTIME_FUNCTION(ObjectPair, Runtime_LoadContextSlotNoReferenceError) {
- return LoadContextSlotHelper(args, isolate, false);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_StoreContextSlot) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 4);
-
- Handle<Object> value(args[0], isolate);
- CONVERT_ARG_CHECKED(Context, context, 1);
- CONVERT_ARG_CHECKED(String, name, 2);
- CONVERT_SMI_CHECKED(strict_unchecked, args[3]);
- RUNTIME_ASSERT(strict_unchecked == kStrictMode ||
- strict_unchecked == kNonStrictMode);
- StrictModeFlag strict_mode = static_cast<StrictModeFlag>(strict_unchecked);
-
- int index;
- PropertyAttributes attributes;
- ContextLookupFlags flags = FOLLOW_CHAINS;
- Handle<Object> holder = context->Lookup(name, flags, &index, &attributes);
-
- if (index >= 0) {
- if (holder->IsContext()) {
- // Ignore if read_only variable.
- if ((attributes & READ_ONLY) == 0) {
- // Context is a fixed array and set cannot fail.
- Context::cast(*holder)->set(index, *value);
- } else if (strict_mode == kStrictMode) {
- // Setting read only property in strict mode.
- Handle<Object> error =
- isolate->factory()->NewTypeError("strict_cannot_assign",
- HandleVector(&name, 1));
- return isolate->Throw(*error);
- }
- } else {
- ASSERT((attributes & READ_ONLY) == 0);
- Handle<Object> result =
- SetElement(Handle<JSObject>::cast(holder), index, value, strict_mode);
- if (result.is_null()) {
- ASSERT(isolate->has_pending_exception());
- return Failure::Exception();
- }
- }
- return *value;
- }
-
- // Slow case: The property is not in a FixedArray context.
- // It is either in an JSObject extension context or it was not found.
- Handle<JSObject> context_ext;
-
- if (!holder.is_null()) {
- // The property exists in the extension context.
- context_ext = Handle<JSObject>::cast(holder);
- } else {
- // The property was not found. It needs to be stored in the global context.
- ASSERT(attributes == ABSENT);
- attributes = NONE;
- context_ext = Handle<JSObject>(isolate->context()->global());
- }
-
- // Set the property, but ignore if read_only variable on the context
- // extension object itself.
- if ((attributes & READ_ONLY) == 0 ||
- (context_ext->GetLocalPropertyAttribute(*name) == ABSENT)) {
- RETURN_IF_EMPTY_HANDLE(
- isolate,
- SetProperty(context_ext, name, value, NONE, strict_mode));
- } else if (strict_mode == kStrictMode && (attributes & READ_ONLY) != 0) {
- // Setting read only property in strict mode.
- Handle<Object> error =
- isolate->factory()->NewTypeError(
- "strict_cannot_assign", HandleVector(&name, 1));
- return isolate->Throw(*error);
- }
- return *value;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_Throw) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 1);
-
- return isolate->Throw(args[0]);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ReThrow) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 1);
-
- return isolate->ReThrow(args[0]);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_PromoteScheduledException) {
- ASSERT_EQ(0, args.length());
- return isolate->PromoteScheduledException();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ThrowReferenceError) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 1);
-
- Handle<Object> name(args[0], isolate);
- Handle<Object> reference_error =
- isolate->factory()->NewReferenceError("not_defined",
- HandleVector(&name, 1));
- return isolate->Throw(*reference_error);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_StackGuard) {
- ASSERT(args.length() == 0);
-
- // First check if this is a real stack overflow.
- if (isolate->stack_guard()->IsStackOverflow()) {
- NoHandleAllocation na;
- return isolate->StackOverflow();
- }
-
- return Execution::HandleStackGuardInterrupt();
-}
-
-
-// NOTE: These PrintXXX functions are defined for all builds (not just
-// DEBUG builds) because we may want to be able to trace function
-// calls in all modes.
-static void PrintString(String* str) {
- // not uncommon to have empty strings
- if (str->length() > 0) {
- SmartPointer<char> s =
- str->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- PrintF("%s", *s);
- }
-}
-
-
-static void PrintObject(Object* obj) {
- if (obj->IsSmi()) {
- PrintF("%d", Smi::cast(obj)->value());
- } else if (obj->IsString() || obj->IsSymbol()) {
- PrintString(String::cast(obj));
- } else if (obj->IsNumber()) {
- PrintF("%g", obj->Number());
- } else if (obj->IsFailure()) {
- PrintF("<failure>");
- } else if (obj->IsUndefined()) {
- PrintF("<undefined>");
- } else if (obj->IsNull()) {
- PrintF("<null>");
- } else if (obj->IsTrue()) {
- PrintF("<true>");
- } else if (obj->IsFalse()) {
- PrintF("<false>");
- } else {
- PrintF("%p", reinterpret_cast<void*>(obj));
- }
-}
-
-
-static int StackSize() {
- int n = 0;
- for (JavaScriptFrameIterator it; !it.done(); it.Advance()) n++;
- return n;
-}
-
-
-static void PrintTransition(Object* result) {
- // indentation
- { const int nmax = 80;
- int n = StackSize();
- if (n <= nmax)
- PrintF("%4d:%*s", n, n, "");
- else
- PrintF("%4d:%*s", n, nmax, "...");
- }
-
- if (result == NULL) {
- // constructor calls
- JavaScriptFrameIterator it;
- JavaScriptFrame* frame = it.frame();
- if (frame->IsConstructor()) PrintF("new ");
- // function name
- Object* fun = frame->function();
- if (fun->IsJSFunction()) {
- PrintObject(JSFunction::cast(fun)->shared()->name());
- } else {
- PrintObject(fun);
- }
- // function arguments
- // (we are intentionally only printing the actually
- // supplied parameters, not all parameters required)
- PrintF("(this=");
- PrintObject(frame->receiver());
- const int length = frame->ComputeParametersCount();
- for (int i = 0; i < length; i++) {
- PrintF(", ");
- PrintObject(frame->GetParameter(i));
- }
- PrintF(") {\n");
-
- } else {
- // function result
- PrintF("} -> ");
- PrintObject(result);
- PrintF("\n");
- }
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_TraceEnter) {
- ASSERT(args.length() == 0);
- NoHandleAllocation ha;
- PrintTransition(NULL);
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_TraceExit) {
- NoHandleAllocation ha;
- PrintTransition(args[0]);
- return args[0]; // return TOS
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPrint) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 1);
-
-#ifdef DEBUG
- if (args[0]->IsString()) {
- // If we have a string, assume it's a code "marker"
- // and print some interesting cpu debugging info.
- JavaScriptFrameIterator it(isolate);
- JavaScriptFrame* frame = it.frame();
- PrintF("fp = %p, sp = %p, caller_sp = %p: ",
- frame->fp(), frame->sp(), frame->caller_sp());
- } else {
- PrintF("DebugPrint: ");
- }
- args[0]->Print();
- if (args[0]->IsHeapObject()) {
- PrintF("\n");
- HeapObject::cast(args[0])->map()->Print();
- }
-#else
- // ShortPrint is available in release mode. Print is not.
- args[0]->ShortPrint();
-#endif
- PrintF("\n");
- Flush();
-
- return args[0]; // return TOS
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugTrace) {
- ASSERT(args.length() == 0);
- NoHandleAllocation ha;
- isolate->PrintStack();
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DateCurrentTime) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 0);
-
- // According to ECMA-262, section 15.9.1, page 117, the precision of
- // the number in a Date object representing a particular instant in
- // time is milliseconds. Therefore, we floor the result of getting
- // the OS time.
- double millis = floor(OS::TimeCurrentMillis());
- return isolate->heap()->NumberFromDouble(millis);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DateParseString) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 2);
-
- CONVERT_ARG_CHECKED(String, str, 0);
- FlattenString(str);
-
- CONVERT_ARG_CHECKED(JSArray, output, 1);
- RUNTIME_ASSERT(output->HasFastElements());
-
- AssertNoAllocation no_allocation;
-
- FixedArray* output_array = FixedArray::cast(output->elements());
- RUNTIME_ASSERT(output_array->length() >= DateParser::OUTPUT_SIZE);
- bool result;
- if (str->IsAsciiRepresentation()) {
- result = DateParser::Parse(str->ToAsciiVector(), output_array);
- } else {
- ASSERT(str->IsTwoByteRepresentation());
- result = DateParser::Parse(str->ToUC16Vector(), output_array);
- }
-
- if (result) {
- return *output;
- } else {
- return isolate->heap()->null_value();
- }
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DateLocalTimezone) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 1);
-
- CONVERT_DOUBLE_CHECKED(x, args[0]);
- const char* zone = OS::LocalTimezone(x);
- return isolate->heap()->AllocateStringFromUtf8(CStrVector(zone));
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DateLocalTimeOffset) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 0);
-
- return isolate->heap()->NumberFromDouble(OS::LocalTimeOffset());
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DateDaylightSavingsOffset) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 1);
-
- CONVERT_DOUBLE_CHECKED(x, args[0]);
- return isolate->heap()->NumberFromDouble(OS::DaylightSavingsOffset(x));
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GlobalReceiver) {
- ASSERT(args.length() == 1);
- Object* global = args[0];
- if (!global->IsJSGlobalObject()) return isolate->heap()->null_value();
- return JSGlobalObject::cast(global)->global_receiver();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ParseJson) {
- HandleScope scope(isolate);
- ASSERT_EQ(1, args.length());
- CONVERT_ARG_CHECKED(String, source, 0);
-
- Handle<Object> result = JsonParser::Parse(source);
- if (result.is_null()) {
- // Syntax error or stack overflow in scanner.
- ASSERT(isolate->has_pending_exception());
- return Failure::Exception();
- }
- return *result;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileString) {
- HandleScope scope(isolate);
- ASSERT_EQ(1, args.length());
- CONVERT_ARG_CHECKED(String, source, 0);
-
- // Compile source string in the global context.
- Handle<Context> context(isolate->context()->global_context());
- Handle<SharedFunctionInfo> shared = Compiler::CompileEval(source,
- context,
- true,
- kNonStrictMode);
- if (shared.is_null()) return Failure::Exception();
- Handle<JSFunction> fun =
- isolate->factory()->NewFunctionFromSharedFunctionInfo(shared,
- context,
- NOT_TENURED);
- return *fun;
-}
-
-
-static ObjectPair CompileGlobalEval(Isolate* isolate,
- Handle<String> source,
- Handle<Object> receiver,
- StrictModeFlag strict_mode) {
- // Deal with a normal eval call with a string argument. Compile it
- // and return the compiled function bound in the local context.
- Handle<SharedFunctionInfo> shared = Compiler::CompileEval(
- source,
- Handle<Context>(isolate->context()),
- isolate->context()->IsGlobalContext(),
- strict_mode);
- if (shared.is_null()) return MakePair(Failure::Exception(), NULL);
- Handle<JSFunction> compiled =
- isolate->factory()->NewFunctionFromSharedFunctionInfo(
- shared, Handle<Context>(isolate->context()), NOT_TENURED);
- return MakePair(*compiled, *receiver);
-}
-
-
-RUNTIME_FUNCTION(ObjectPair, Runtime_ResolvePossiblyDirectEval) {
- ASSERT(args.length() == 4);
-
- HandleScope scope(isolate);
- Handle<Object> callee = args.at<Object>(0);
- Handle<Object> receiver; // Will be overwritten.
-
- // Compute the calling context.
- Handle<Context> context = Handle<Context>(isolate->context(), isolate);
-#ifdef DEBUG
- // Make sure Isolate::context() agrees with the old code that traversed
- // the stack frames to compute the context.
- StackFrameLocator locator;
- JavaScriptFrame* frame = locator.FindJavaScriptFrame(0);
- ASSERT(Context::cast(frame->context()) == *context);
-#endif
-
- // Find where the 'eval' symbol is bound. It is unaliased only if
- // it is bound in the global context.
- int index = -1;
- PropertyAttributes attributes = ABSENT;
- while (true) {
- receiver = context->Lookup(isolate->factory()->eval_symbol(),
- FOLLOW_PROTOTYPE_CHAIN,
- &index, &attributes);
- // Stop search when eval is found or when the global context is
- // reached.
- if (attributes != ABSENT || context->IsGlobalContext()) break;
- if (context->is_function_context()) {
- context = Handle<Context>(Context::cast(context->closure()->context()),
- isolate);
- } else {
- context = Handle<Context>(context->previous(), isolate);
- }
- }
-
- // If eval could not be resolved, it has been deleted and we need to
- // throw a reference error.
- if (attributes == ABSENT) {
- Handle<Object> name = isolate->factory()->eval_symbol();
- Handle<Object> reference_error =
- isolate->factory()->NewReferenceError("not_defined",
- HandleVector(&name, 1));
- return MakePair(isolate->Throw(*reference_error), NULL);
- }
-
- if (!context->IsGlobalContext()) {
- // 'eval' is not bound in the global context. Just call the function
- // with the given arguments. This is not necessarily the global eval.
- if (receiver->IsContext()) {
- context = Handle<Context>::cast(receiver);
- receiver = Handle<Object>(context->get(index), isolate);
- } else if (receiver->IsJSContextExtensionObject()) {
- receiver = Handle<JSObject>(
- isolate->context()->global()->global_receiver(), isolate);
- }
- return MakePair(*callee, *receiver);
- }
-
- // 'eval' is bound in the global context, but it may have been overwritten.
- // Compare it to the builtin 'GlobalEval' function to make sure.
- if (*callee != isolate->global_context()->global_eval_fun() ||
- !args[1]->IsString()) {
- return MakePair(*callee,
- isolate->context()->global()->global_receiver());
- }
-
- ASSERT(args[3]->IsSmi());
- return CompileGlobalEval(isolate,
- args.at<String>(1),
- args.at<Object>(2),
- static_cast<StrictModeFlag>(
- Smi::cast(args[3])->value()));
-}
-
-
-RUNTIME_FUNCTION(ObjectPair, Runtime_ResolvePossiblyDirectEvalNoLookup) {
- ASSERT(args.length() == 4);
-
- HandleScope scope(isolate);
- Handle<Object> callee = args.at<Object>(0);
-
- // 'eval' is bound in the global context, but it may have been overwritten.
- // Compare it to the builtin 'GlobalEval' function to make sure.
- if (*callee != isolate->global_context()->global_eval_fun() ||
- !args[1]->IsString()) {
- return MakePair(*callee,
- isolate->context()->global()->global_receiver());
- }
-
- ASSERT(args[3]->IsSmi());
- return CompileGlobalEval(isolate,
- args.at<String>(1),
- args.at<Object>(2),
- static_cast<StrictModeFlag>(
- Smi::cast(args[3])->value()));
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SetNewFunctionAttributes) {
- // This utility adjusts the property attributes for newly created Function
- // object ("new Function(...)") by changing the map.
- // All it does is changing the prototype property to enumerable
- // as specified in ECMA262, 15.3.5.2.
- HandleScope scope(isolate);
- ASSERT(args.length() == 1);
- CONVERT_ARG_CHECKED(JSFunction, func, 0);
-
- Handle<Map> map = func->shared()->strict_mode()
- ? isolate->strict_mode_function_instance_map()
- : isolate->function_instance_map();
-
- ASSERT(func->map()->instance_type() == map->instance_type());
- ASSERT(func->map()->instance_size() == map->instance_size());
- func->set_map(*map);
- return *func;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_AllocateInNewSpace) {
- // Allocate a block of memory in NewSpace (filled with a filler).
- // Use as fallback for allocation in generated code when NewSpace
- // is full.
- ASSERT(args.length() == 1);
- CONVERT_ARG_CHECKED(Smi, size_smi, 0);
- int size = size_smi->value();
- RUNTIME_ASSERT(IsAligned(size, kPointerSize));
- RUNTIME_ASSERT(size > 0);
- Heap* heap = isolate->heap();
- const int kMinFreeNewSpaceAfterGC = heap->InitialSemiSpaceSize() * 3/4;
- RUNTIME_ASSERT(size <= kMinFreeNewSpaceAfterGC);
- Object* allocation;
- { MaybeObject* maybe_allocation = heap->new_space()->AllocateRaw(size);
- if (maybe_allocation->ToObject(&allocation)) {
- heap->CreateFillerObjectAt(HeapObject::cast(allocation)->address(), size);
- }
- return maybe_allocation;
- }
-}
-
-
-// Push an object unto an array of objects if it is not already in the
-// array. Returns true if the element was pushed on the stack and
-// false otherwise.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_PushIfAbsent) {
- ASSERT(args.length() == 2);
- CONVERT_CHECKED(JSArray, array, args[0]);
- CONVERT_CHECKED(JSObject, element, args[1]);
- RUNTIME_ASSERT(array->HasFastElements());
- int length = Smi::cast(array->length())->value();
- FixedArray* elements = FixedArray::cast(array->elements());
- for (int i = 0; i < length; i++) {
- if (elements->get(i) == element) return isolate->heap()->false_value();
- }
- Object* obj;
- // Strict not needed. Used for cycle detection in Array join implementation.
- { MaybeObject* maybe_obj = array->SetFastElement(length, element,
- kNonStrictMode);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- return isolate->heap()->true_value();
-}
-
-
-/**
- * A simple visitor visits every element of Array's.
- * The backend storage can be a fixed array for fast elements case,
- * or a dictionary for sparse array. Since Dictionary is a subtype
- * of FixedArray, the class can be used by both fast and slow cases.
- * The second parameter of the constructor, fast_elements, specifies
- * whether the storage is a FixedArray or Dictionary.
- *
- * An index limit is used to deal with the situation that a result array
- * length overflows 32-bit non-negative integer.
- */
-class ArrayConcatVisitor {
- public:
- ArrayConcatVisitor(Isolate* isolate,
- Handle<FixedArray> storage,
- bool fast_elements) :
- isolate_(isolate),
- storage_(Handle<FixedArray>::cast(
- isolate->global_handles()->Create(*storage))),
- index_offset_(0u),
- fast_elements_(fast_elements) { }
-
- ~ArrayConcatVisitor() {
- clear_storage();
- }
-
- void visit(uint32_t i, Handle<Object> elm) {
- if (i >= JSObject::kMaxElementCount - index_offset_) return;
- uint32_t index = index_offset_ + i;
-
- if (fast_elements_) {
- if (index < static_cast<uint32_t>(storage_->length())) {
- storage_->set(index, *elm);
- return;
- }
- // Our initial estimate of length was foiled, possibly by
- // getters on the arrays increasing the length of later arrays
- // during iteration.
- // This shouldn't happen in anything but pathological cases.
- SetDictionaryMode(index);
- // Fall-through to dictionary mode.
- }
- ASSERT(!fast_elements_);
- Handle<NumberDictionary> dict(NumberDictionary::cast(*storage_));
- Handle<NumberDictionary> result =
- isolate_->factory()->DictionaryAtNumberPut(dict, index, elm);
- if (!result.is_identical_to(dict)) {
- // Dictionary needed to grow.
- clear_storage();
- set_storage(*result);
- }
-}
-
- void increase_index_offset(uint32_t delta) {
- if (JSObject::kMaxElementCount - index_offset_ < delta) {
- index_offset_ = JSObject::kMaxElementCount;
- } else {
- index_offset_ += delta;
- }
- }
-
- Handle<JSArray> ToArray() {
- Handle<JSArray> array = isolate_->factory()->NewJSArray(0);
- Handle<Object> length =
- isolate_->factory()->NewNumber(static_cast<double>(index_offset_));
- Handle<Map> map;
- if (fast_elements_) {
- map = isolate_->factory()->GetFastElementsMap(Handle<Map>(array->map()));
- } else {
- map = isolate_->factory()->GetSlowElementsMap(Handle<Map>(array->map()));
- }
- array->set_map(*map);
- array->set_length(*length);
- array->set_elements(*storage_);
- return array;
- }
-
- private:
- // Convert storage to dictionary mode.
- void SetDictionaryMode(uint32_t index) {
- ASSERT(fast_elements_);
- Handle<FixedArray> current_storage(*storage_);
- Handle<NumberDictionary> slow_storage(
- isolate_->factory()->NewNumberDictionary(current_storage->length()));
- uint32_t current_length = static_cast<uint32_t>(current_storage->length());
- for (uint32_t i = 0; i < current_length; i++) {
- HandleScope loop_scope;
- Handle<Object> element(current_storage->get(i));
- if (!element->IsTheHole()) {
- Handle<NumberDictionary> new_storage =
- isolate_->factory()->DictionaryAtNumberPut(slow_storage, i, element);
- if (!new_storage.is_identical_to(slow_storage)) {
- slow_storage = loop_scope.CloseAndEscape(new_storage);
- }
- }
- }
- clear_storage();
- set_storage(*slow_storage);
- fast_elements_ = false;
- }
-
- inline void clear_storage() {
- isolate_->global_handles()->Destroy(
- Handle<Object>::cast(storage_).location());
- }
-
- inline void set_storage(FixedArray* storage) {
- storage_ = Handle<FixedArray>::cast(
- isolate_->global_handles()->Create(storage));
- }
-
- Isolate* isolate_;
- Handle<FixedArray> storage_; // Always a global handle.
- // Index after last seen index. Always less than or equal to
- // JSObject::kMaxElementCount.
- uint32_t index_offset_;
- bool fast_elements_;
-};
-
-
-static uint32_t EstimateElementCount(Handle<JSArray> array) {
- uint32_t length = static_cast<uint32_t>(array->length()->Number());
- int element_count = 0;
- switch (array->GetElementsKind()) {
- case JSObject::FAST_ELEMENTS: {
- // Fast elements can't have lengths that are not representable by
- // a 32-bit signed integer.
- ASSERT(static_cast<int32_t>(FixedArray::kMaxLength) >= 0);
- int fast_length = static_cast<int>(length);
- Handle<FixedArray> elements(FixedArray::cast(array->elements()));
- for (int i = 0; i < fast_length; i++) {
- if (!elements->get(i)->IsTheHole()) element_count++;
- }
- break;
- }
- case JSObject::DICTIONARY_ELEMENTS: {
- Handle<NumberDictionary> dictionary(
- NumberDictionary::cast(array->elements()));
- int capacity = dictionary->Capacity();
- for (int i = 0; i < capacity; i++) {
- Handle<Object> key(dictionary->KeyAt(i));
- if (dictionary->IsKey(*key)) {
- element_count++;
- }
- }
- break;
- }
- default:
- // External arrays are always dense.
- return length;
- }
- // As an estimate, we assume that the prototype doesn't contain any
- // inherited elements.
- return element_count;
-}
-
-
-
-template<class ExternalArrayClass, class ElementType>
-static void IterateExternalArrayElements(Isolate* isolate,
- Handle<JSObject> receiver,
- bool elements_are_ints,
- bool elements_are_guaranteed_smis,
- ArrayConcatVisitor* visitor) {
- Handle<ExternalArrayClass> array(
- ExternalArrayClass::cast(receiver->elements()));
- uint32_t len = static_cast<uint32_t>(array->length());
-
- ASSERT(visitor != NULL);
- if (elements_are_ints) {
- if (elements_are_guaranteed_smis) {
- for (uint32_t j = 0; j < len; j++) {
- HandleScope loop_scope;
- Handle<Smi> e(Smi::FromInt(static_cast<int>(array->get(j))));
- visitor->visit(j, e);
- }
- } else {
- for (uint32_t j = 0; j < len; j++) {
- HandleScope loop_scope;
- int64_t val = static_cast<int64_t>(array->get(j));
- if (Smi::IsValid(static_cast<intptr_t>(val))) {
- Handle<Smi> e(Smi::FromInt(static_cast<int>(val)));
- visitor->visit(j, e);
- } else {
- Handle<Object> e =
- isolate->factory()->NewNumber(static_cast<ElementType>(val));
- visitor->visit(j, e);
- }
- }
- }
- } else {
- for (uint32_t j = 0; j < len; j++) {
- HandleScope loop_scope(isolate);
- Handle<Object> e = isolate->factory()->NewNumber(array->get(j));
- visitor->visit(j, e);
- }
- }
-}
-
-
-// Used for sorting indices in a List<uint32_t>.
-static int compareUInt32(const uint32_t* ap, const uint32_t* bp) {
- uint32_t a = *ap;
- uint32_t b = *bp;
- return (a == b) ? 0 : (a < b) ? -1 : 1;
-}
-
-
-static void CollectElementIndices(Handle<JSObject> object,
- uint32_t range,
- List<uint32_t>* indices) {
- JSObject::ElementsKind kind = object->GetElementsKind();
- switch (kind) {
- case JSObject::FAST_ELEMENTS: {
- Handle<FixedArray> elements(FixedArray::cast(object->elements()));
- uint32_t length = static_cast<uint32_t>(elements->length());
- if (range < length) length = range;
- for (uint32_t i = 0; i < length; i++) {
- if (!elements->get(i)->IsTheHole()) {
- indices->Add(i);
- }
- }
- break;
- }
- case JSObject::DICTIONARY_ELEMENTS: {
- Handle<NumberDictionary> dict(NumberDictionary::cast(object->elements()));
- uint32_t capacity = dict->Capacity();
- for (uint32_t j = 0; j < capacity; j++) {
- HandleScope loop_scope;
- Handle<Object> k(dict->KeyAt(j));
- if (dict->IsKey(*k)) {
- ASSERT(k->IsNumber());
- uint32_t index = static_cast<uint32_t>(k->Number());
- if (index < range) {
- indices->Add(index);
- }
- }
- }
- break;
- }
- default: {
- int dense_elements_length;
- switch (kind) {
- case JSObject::EXTERNAL_PIXEL_ELEMENTS: {
- dense_elements_length =
- ExternalPixelArray::cast(object->elements())->length();
- break;
- }
- case JSObject::EXTERNAL_BYTE_ELEMENTS: {
- dense_elements_length =
- ExternalByteArray::cast(object->elements())->length();
- break;
- }
- case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS: {
- dense_elements_length =
- ExternalUnsignedByteArray::cast(object->elements())->length();
- break;
- }
- case JSObject::EXTERNAL_SHORT_ELEMENTS: {
- dense_elements_length =
- ExternalShortArray::cast(object->elements())->length();
- break;
- }
- case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS: {
- dense_elements_length =
- ExternalUnsignedShortArray::cast(object->elements())->length();
- break;
- }
- case JSObject::EXTERNAL_INT_ELEMENTS: {
- dense_elements_length =
- ExternalIntArray::cast(object->elements())->length();
- break;
- }
- case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS: {
- dense_elements_length =
- ExternalUnsignedIntArray::cast(object->elements())->length();
- break;
- }
- case JSObject::EXTERNAL_FLOAT_ELEMENTS: {
- dense_elements_length =
- ExternalFloatArray::cast(object->elements())->length();
- break;
- }
- default:
- UNREACHABLE();
- dense_elements_length = 0;
- break;
- }
- uint32_t length = static_cast<uint32_t>(dense_elements_length);
- if (range <= length) {
- length = range;
- // We will add all indices, so we might as well clear it first
- // and avoid duplicates.
- indices->Clear();
- }
- for (uint32_t i = 0; i < length; i++) {
- indices->Add(i);
- }
- if (length == range) return; // All indices accounted for already.
- break;
- }
- }
-
- Handle<Object> prototype(object->GetPrototype());
- if (prototype->IsJSObject()) {
- // The prototype will usually have no inherited element indices,
- // but we have to check.
- CollectElementIndices(Handle<JSObject>::cast(prototype), range, indices);
- }
-}
-
-
-/**
- * A helper function that visits elements of a JSArray in numerical
- * order.
- *
- * The visitor argument called for each existing element in the array
- * with the element index and the element's value.
- * Afterwards it increments the base-index of the visitor by the array
- * length.
- * Returns false if any access threw an exception, otherwise true.
- */
-static bool IterateElements(Isolate* isolate,
- Handle<JSArray> receiver,
- ArrayConcatVisitor* visitor) {
- uint32_t length = static_cast<uint32_t>(receiver->length()->Number());
- switch (receiver->GetElementsKind()) {
- case JSObject::FAST_ELEMENTS: {
- // Run through the elements FixedArray and use HasElement and GetElement
- // to check the prototype for missing elements.
- Handle<FixedArray> elements(FixedArray::cast(receiver->elements()));
- int fast_length = static_cast<int>(length);
- ASSERT(fast_length <= elements->length());
- for (int j = 0; j < fast_length; j++) {
- HandleScope loop_scope(isolate);
- Handle<Object> element_value(elements->get(j), isolate);
- if (!element_value->IsTheHole()) {
- visitor->visit(j, element_value);
- } else if (receiver->HasElement(j)) {
- // Call GetElement on receiver, not its prototype, or getters won't
- // have the correct receiver.
- element_value = GetElement(receiver, j);
- if (element_value.is_null()) return false;
- visitor->visit(j, element_value);
- }
- }
- break;
- }
- case JSObject::DICTIONARY_ELEMENTS: {
- Handle<NumberDictionary> dict(receiver->element_dictionary());
- List<uint32_t> indices(dict->Capacity() / 2);
- // Collect all indices in the object and the prototypes less
- // than length. This might introduce duplicates in the indices list.
- CollectElementIndices(receiver, length, &indices);
- indices.Sort(&compareUInt32);
- int j = 0;
- int n = indices.length();
- while (j < n) {
- HandleScope loop_scope;
- uint32_t index = indices[j];
- Handle<Object> element = GetElement(receiver, index);
- if (element.is_null()) return false;
- visitor->visit(index, element);
- // Skip to next different index (i.e., omit duplicates).
- do {
- j++;
- } while (j < n && indices[j] == index);
- }
- break;
- }
- case JSObject::EXTERNAL_PIXEL_ELEMENTS: {
- Handle<ExternalPixelArray> pixels(ExternalPixelArray::cast(
- receiver->elements()));
- for (uint32_t j = 0; j < length; j++) {
- Handle<Smi> e(Smi::FromInt(pixels->get(j)));
- visitor->visit(j, e);
- }
- break;
- }
- case JSObject::EXTERNAL_BYTE_ELEMENTS: {
- IterateExternalArrayElements<ExternalByteArray, int8_t>(
- isolate, receiver, true, true, visitor);
- break;
- }
- case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS: {
- IterateExternalArrayElements<ExternalUnsignedByteArray, uint8_t>(
- isolate, receiver, true, true, visitor);
- break;
- }
- case JSObject::EXTERNAL_SHORT_ELEMENTS: {
- IterateExternalArrayElements<ExternalShortArray, int16_t>(
- isolate, receiver, true, true, visitor);
- break;
- }
- case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS: {
- IterateExternalArrayElements<ExternalUnsignedShortArray, uint16_t>(
- isolate, receiver, true, true, visitor);
- break;
- }
- case JSObject::EXTERNAL_INT_ELEMENTS: {
- IterateExternalArrayElements<ExternalIntArray, int32_t>(
- isolate, receiver, true, false, visitor);
- break;
- }
- case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS: {
- IterateExternalArrayElements<ExternalUnsignedIntArray, uint32_t>(
- isolate, receiver, true, false, visitor);
- break;
- }
- case JSObject::EXTERNAL_FLOAT_ELEMENTS: {
- IterateExternalArrayElements<ExternalFloatArray, float>(
- isolate, receiver, false, false, visitor);
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
- visitor->increase_index_offset(length);
- return true;
-}
-
-
-/**
- * Array::concat implementation.
- * See ECMAScript 262, 15.4.4.4.
- * TODO(581): Fix non-compliance for very large concatenations and update to
- * following the ECMAScript 5 specification.
- */
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ArrayConcat) {
- ASSERT(args.length() == 1);
- HandleScope handle_scope(isolate);
-
- CONVERT_ARG_CHECKED(JSArray, arguments, 0);
- int argument_count = static_cast<int>(arguments->length()->Number());
- RUNTIME_ASSERT(arguments->HasFastElements());
- Handle<FixedArray> elements(FixedArray::cast(arguments->elements()));
-
- // Pass 1: estimate the length and number of elements of the result.
- // The actual length can be larger if any of the arguments have getters
- // that mutate other arguments (but will otherwise be precise).
- // The number of elements is precise if there are no inherited elements.
-
- uint32_t estimate_result_length = 0;
- uint32_t estimate_nof_elements = 0;
- {
- for (int i = 0; i < argument_count; i++) {
- HandleScope loop_scope;
- Handle<Object> obj(elements->get(i));
- uint32_t length_estimate;
- uint32_t element_estimate;
- if (obj->IsJSArray()) {
- Handle<JSArray> array(Handle<JSArray>::cast(obj));
- length_estimate =
- static_cast<uint32_t>(array->length()->Number());
- element_estimate =
- EstimateElementCount(array);
- } else {
- length_estimate = 1;
- element_estimate = 1;
- }
- // Avoid overflows by capping at kMaxElementCount.
- if (JSObject::kMaxElementCount - estimate_result_length <
- length_estimate) {
- estimate_result_length = JSObject::kMaxElementCount;
- } else {
- estimate_result_length += length_estimate;
- }
- if (JSObject::kMaxElementCount - estimate_nof_elements <
- element_estimate) {
- estimate_nof_elements = JSObject::kMaxElementCount;
- } else {
- estimate_nof_elements += element_estimate;
- }
- }
- }
-
- // If estimated number of elements is more than half of length, a
- // fixed array (fast case) is more time and space-efficient than a
- // dictionary.
- bool fast_case = (estimate_nof_elements * 2) >= estimate_result_length;
-
- Handle<FixedArray> storage;
- if (fast_case) {
- // The backing storage array must have non-existing elements to
- // preserve holes across concat operations.
- storage = isolate->factory()->NewFixedArrayWithHoles(
- estimate_result_length);
- } else {
- // TODO(126): move 25% pre-allocation logic into Dictionary::Allocate
- uint32_t at_least_space_for = estimate_nof_elements +
- (estimate_nof_elements >> 2);
- storage = Handle<FixedArray>::cast(
- isolate->factory()->NewNumberDictionary(at_least_space_for));
- }
-
- ArrayConcatVisitor visitor(isolate, storage, fast_case);
-
- for (int i = 0; i < argument_count; i++) {
- Handle<Object> obj(elements->get(i));
- if (obj->IsJSArray()) {
- Handle<JSArray> array = Handle<JSArray>::cast(obj);
- if (!IterateElements(isolate, array, &visitor)) {
- return Failure::Exception();
- }
- } else {
- visitor.visit(0, obj);
- visitor.increase_index_offset(1);
- }
- }
-
- return *visitor.ToArray();
-}
-
-
-// This will not allocate (flatten the string), but it may run
-// very slowly for very deeply nested ConsStrings. For debugging use only.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GlobalPrint) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 1);
-
- CONVERT_CHECKED(String, string, args[0]);
- StringInputBuffer buffer(string);
- while (buffer.has_more()) {
- uint16_t character = buffer.GetNext();
- PrintF("%c", character);
- }
- return string;
-}
-
-// Moves all own elements of an object, that are below a limit, to positions
-// starting at zero. All undefined values are placed after non-undefined values,
-// and are followed by non-existing element. Does not change the length
-// property.
-// Returns the number of non-undefined elements collected.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_RemoveArrayHoles) {
- ASSERT(args.length() == 2);
- CONVERT_CHECKED(JSObject, object, args[0]);
- CONVERT_NUMBER_CHECKED(uint32_t, limit, Uint32, args[1]);
- return object->PrepareElementsForSort(limit);
-}
-
-
-// Move contents of argument 0 (an array) to argument 1 (an array)
-RUNTIME_FUNCTION(MaybeObject*, Runtime_MoveArrayContents) {
- ASSERT(args.length() == 2);
- CONVERT_CHECKED(JSArray, from, args[0]);
- CONVERT_CHECKED(JSArray, to, args[1]);
- HeapObject* new_elements = from->elements();
- MaybeObject* maybe_new_map;
- if (new_elements->map() == isolate->heap()->fixed_array_map() ||
- new_elements->map() == isolate->heap()->fixed_cow_array_map()) {
- maybe_new_map = to->map()->GetFastElementsMap();
- } else {
- maybe_new_map = to->map()->GetSlowElementsMap();
- }
- Object* new_map;
- if (!maybe_new_map->ToObject(&new_map)) return maybe_new_map;
- to->set_map(Map::cast(new_map));
- to->set_elements(new_elements);
- to->set_length(from->length());
- Object* obj;
- { MaybeObject* maybe_obj = from->ResetElements();
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- from->set_length(Smi::FromInt(0));
- return to;
-}
-
-
-// How many elements does this object/array have?
-RUNTIME_FUNCTION(MaybeObject*, Runtime_EstimateNumberOfElements) {
- ASSERT(args.length() == 1);
- CONVERT_CHECKED(JSObject, object, args[0]);
- HeapObject* elements = object->elements();
- if (elements->IsDictionary()) {
- return Smi::FromInt(NumberDictionary::cast(elements)->NumberOfElements());
- } else if (object->IsJSArray()) {
- return JSArray::cast(object)->length();
- } else {
- return Smi::FromInt(FixedArray::cast(elements)->length());
- }
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SwapElements) {
- HandleScope handle_scope(isolate);
-
- ASSERT_EQ(3, args.length());
-
- CONVERT_ARG_CHECKED(JSObject, object, 0);
- Handle<Object> key1 = args.at<Object>(1);
- Handle<Object> key2 = args.at<Object>(2);
-
- uint32_t index1, index2;
- if (!key1->ToArrayIndex(&index1)
- || !key2->ToArrayIndex(&index2)) {
- return isolate->ThrowIllegalOperation();
- }
-
- Handle<JSObject> jsobject = Handle<JSObject>::cast(object);
- Handle<Object> tmp1 = GetElement(jsobject, index1);
- RETURN_IF_EMPTY_HANDLE(isolate, tmp1);
- Handle<Object> tmp2 = GetElement(jsobject, index2);
- RETURN_IF_EMPTY_HANDLE(isolate, tmp2);
-
- RETURN_IF_EMPTY_HANDLE(isolate,
- SetElement(jsobject, index1, tmp2, kStrictMode));
- RETURN_IF_EMPTY_HANDLE(isolate,
- SetElement(jsobject, index2, tmp1, kStrictMode));
-
- return isolate->heap()->undefined_value();
-}
-
-
-// Returns an array that tells you where in the [0, length) interval an array
-// might have elements. Can either return keys (positive integers) or
-// intervals (pair of a negative integer (-start-1) followed by a
-// positive (length)) or undefined values.
-// Intervals can span over some keys that are not in the object.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetArrayKeys) {
- ASSERT(args.length() == 2);
- HandleScope scope(isolate);
- CONVERT_ARG_CHECKED(JSObject, array, 0);
- CONVERT_NUMBER_CHECKED(uint32_t, length, Uint32, args[1]);
- if (array->elements()->IsDictionary()) {
- // Create an array and get all the keys into it, then remove all the
- // keys that are not integers in the range 0 to length-1.
- Handle<FixedArray> keys = GetKeysInFixedArrayFor(array, INCLUDE_PROTOS);
- int keys_length = keys->length();
- for (int i = 0; i < keys_length; i++) {
- Object* key = keys->get(i);
- uint32_t index = 0;
- if (!key->ToArrayIndex(&index) || index >= length) {
- // Zap invalid keys.
- keys->set_undefined(i);
- }
- }
- return *isolate->factory()->NewJSArrayWithElements(keys);
- } else {
- ASSERT(array->HasFastElements());
- Handle<FixedArray> single_interval = isolate->factory()->NewFixedArray(2);
- // -1 means start of array.
- single_interval->set(0, Smi::FromInt(-1));
- uint32_t actual_length =
- static_cast<uint32_t>(FixedArray::cast(array->elements())->length());
- uint32_t min_length = actual_length < length ? actual_length : length;
- Handle<Object> length_object =
- isolate->factory()->NewNumber(static_cast<double>(min_length));
- single_interval->set(1, *length_object);
- return *isolate->factory()->NewJSArrayWithElements(single_interval);
- }
-}
-
-
-// DefineAccessor takes an optional final argument which is the
-// property attributes (eg, DONT_ENUM, DONT_DELETE). IMPORTANT: due
-// to the way accessors are implemented, it is set for both the getter
-// and setter on the first call to DefineAccessor and ignored on
-// subsequent calls.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DefineAccessor) {
- RUNTIME_ASSERT(args.length() == 4 || args.length() == 5);
- // Compute attributes.
- PropertyAttributes attributes = NONE;
- if (args.length() == 5) {
- CONVERT_CHECKED(Smi, attrs, args[4]);
- int value = attrs->value();
- // Only attribute bits should be set.
- ASSERT((value & ~(READ_ONLY | DONT_ENUM | DONT_DELETE)) == 0);
- attributes = static_cast<PropertyAttributes>(value);
- }
-
- CONVERT_CHECKED(JSObject, obj, args[0]);
- CONVERT_CHECKED(String, name, args[1]);
- CONVERT_CHECKED(Smi, flag, args[2]);
- CONVERT_CHECKED(JSFunction, fun, args[3]);
- return obj->DefineAccessor(name, flag->value() == 0, fun, attributes);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_LookupAccessor) {
- ASSERT(args.length() == 3);
- CONVERT_CHECKED(JSObject, obj, args[0]);
- CONVERT_CHECKED(String, name, args[1]);
- CONVERT_CHECKED(Smi, flag, args[2]);
- return obj->LookupAccessor(name, flag->value() == 0);
-}
-
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugBreak) {
- ASSERT(args.length() == 0);
- return Execution::DebugBreakHelper();
-}
-
-
-// Helper functions for wrapping and unwrapping stack frame ids.
-static Smi* WrapFrameId(StackFrame::Id id) {
- ASSERT(IsAligned(OffsetFrom(id), static_cast<intptr_t>(4)));
- return Smi::FromInt(id >> 2);
-}
-
-
-static StackFrame::Id UnwrapFrameId(Smi* wrapped) {
- return static_cast<StackFrame::Id>(wrapped->value() << 2);
-}
-
-
-// Adds a JavaScript function as a debug event listener.
-// args[0]: debug event listener function to set or null or undefined for
-// clearing the event listener function
-// args[1]: object supplied during callback
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SetDebugEventListener) {
- ASSERT(args.length() == 2);
- RUNTIME_ASSERT(args[0]->IsJSFunction() ||
- args[0]->IsUndefined() ||
- args[0]->IsNull());
- Handle<Object> callback = args.at<Object>(0);
- Handle<Object> data = args.at<Object>(1);
- isolate->debugger()->SetEventListener(callback, data);
-
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_Break) {
- ASSERT(args.length() == 0);
- isolate->stack_guard()->DebugBreak();
- return isolate->heap()->undefined_value();
-}
-
-
-static MaybeObject* DebugLookupResultValue(Heap* heap,
- Object* receiver,
- String* name,
- LookupResult* result,
- bool* caught_exception) {
- Object* value;
- switch (result->type()) {
- case NORMAL:
- value = result->holder()->GetNormalizedProperty(result);
- if (value->IsTheHole()) {
- return heap->undefined_value();
- }
- return value;
- case FIELD:
- value =
- JSObject::cast(
- result->holder())->FastPropertyAt(result->GetFieldIndex());
- if (value->IsTheHole()) {
- return heap->undefined_value();
- }
- return value;
- case CONSTANT_FUNCTION:
- return result->GetConstantFunction();
- case CALLBACKS: {
- Object* structure = result->GetCallbackObject();
- if (structure->IsProxy() || structure->IsAccessorInfo()) {
- MaybeObject* maybe_value = receiver->GetPropertyWithCallback(
- receiver, structure, name, result->holder());
- if (!maybe_value->ToObject(&value)) {
- if (maybe_value->IsRetryAfterGC()) return maybe_value;
- ASSERT(maybe_value->IsException());
- maybe_value = heap->isolate()->pending_exception();
- heap->isolate()->clear_pending_exception();
- if (caught_exception != NULL) {
- *caught_exception = true;
- }
- return maybe_value;
- }
- return value;
- } else {
- return heap->undefined_value();
- }
- }
- case INTERCEPTOR:
- case MAP_TRANSITION:
- case EXTERNAL_ARRAY_TRANSITION:
- case CONSTANT_TRANSITION:
- case NULL_DESCRIPTOR:
- return heap->undefined_value();
- default:
- UNREACHABLE();
- }
- UNREACHABLE();
- return heap->undefined_value();
-}
-
-
-// Get debugger related details for an object property.
-// args[0]: object holding property
-// args[1]: name of the property
-//
-// The array returned contains the following information:
-// 0: Property value
-// 1: Property details
-// 2: Property value is exception
-// 3: Getter function if defined
-// 4: Setter function if defined
-// Items 2-4 are only filled if the property has either a getter or a setter
-// defined through __defineGetter__ and/or __defineSetter__.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugGetPropertyDetails) {
- HandleScope scope(isolate);
-
- ASSERT(args.length() == 2);
-
- CONVERT_ARG_CHECKED(JSObject, obj, 0);
- CONVERT_ARG_CHECKED(String, name, 1);
-
- // Make sure to set the current context to the context before the debugger was
- // entered (if the debugger is entered). The reason for switching context here
- // is that for some property lookups (accessors and interceptors) callbacks
- // into the embedding application can occour, and the embedding application
- // could have the assumption that its own global context is the current
- // context and not some internal debugger context.
- SaveContext save(isolate);
- if (isolate->debug()->InDebugger()) {
- isolate->set_context(*isolate->debug()->debugger_entry()->GetContext());
- }
-
- // Skip the global proxy as it has no properties and always delegates to the
- // real global object.
- if (obj->IsJSGlobalProxy()) {
- obj = Handle<JSObject>(JSObject::cast(obj->GetPrototype()));
- }
-
-
- // Check if the name is trivially convertible to an index and get the element
- // if so.
- uint32_t index;
- if (name->AsArrayIndex(&index)) {
- Handle<FixedArray> details = isolate->factory()->NewFixedArray(2);
- Object* element_or_char;
- { MaybeObject* maybe_element_or_char =
- Runtime::GetElementOrCharAt(isolate, obj, index);
- if (!maybe_element_or_char->ToObject(&element_or_char)) {
- return maybe_element_or_char;
- }
- }
- details->set(0, element_or_char);
- details->set(1, PropertyDetails(NONE, NORMAL).AsSmi());
- return *isolate->factory()->NewJSArrayWithElements(details);
- }
-
- // Find the number of objects making up this.
- int length = LocalPrototypeChainLength(*obj);
-
- // Try local lookup on each of the objects.
- Handle<JSObject> jsproto = obj;
- for (int i = 0; i < length; i++) {
- LookupResult result;
- jsproto->LocalLookup(*name, &result);
- if (result.IsProperty()) {
- // LookupResult is not GC safe as it holds raw object pointers.
- // GC can happen later in this code so put the required fields into
- // local variables using handles when required for later use.
- PropertyType result_type = result.type();
- Handle<Object> result_callback_obj;
- if (result_type == CALLBACKS) {
- result_callback_obj = Handle<Object>(result.GetCallbackObject(),
- isolate);
- }
- Smi* property_details = result.GetPropertyDetails().AsSmi();
- // DebugLookupResultValue can cause GC so details from LookupResult needs
- // to be copied to handles before this.
- bool caught_exception = false;
- Object* raw_value;
- { MaybeObject* maybe_raw_value =
- DebugLookupResultValue(isolate->heap(), *obj, *name,
- &result, &caught_exception);
- if (!maybe_raw_value->ToObject(&raw_value)) return maybe_raw_value;
- }
- Handle<Object> value(raw_value, isolate);
-
- // If the callback object is a fixed array then it contains JavaScript
- // getter and/or setter.
- bool hasJavaScriptAccessors = result_type == CALLBACKS &&
- result_callback_obj->IsFixedArray();
- Handle<FixedArray> details =
- isolate->factory()->NewFixedArray(hasJavaScriptAccessors ? 5 : 2);
- details->set(0, *value);
- details->set(1, property_details);
- if (hasJavaScriptAccessors) {
- details->set(2,
- caught_exception ? isolate->heap()->true_value()
- : isolate->heap()->false_value());
- details->set(3, FixedArray::cast(*result_callback_obj)->get(0));
- details->set(4, FixedArray::cast(*result_callback_obj)->get(1));
- }
-
- return *isolate->factory()->NewJSArrayWithElements(details);
- }
- if (i < length - 1) {
- jsproto = Handle<JSObject>(JSObject::cast(jsproto->GetPrototype()));
- }
- }
-
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugGetProperty) {
- HandleScope scope(isolate);
-
- ASSERT(args.length() == 2);
-
- CONVERT_ARG_CHECKED(JSObject, obj, 0);
- CONVERT_ARG_CHECKED(String, name, 1);
-
- LookupResult result;
- obj->Lookup(*name, &result);
- if (result.IsProperty()) {
- return DebugLookupResultValue(isolate->heap(), *obj, *name, &result, NULL);
- }
- return isolate->heap()->undefined_value();
-}
-
-
-// Return the property type calculated from the property details.
-// args[0]: smi with property details.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPropertyTypeFromDetails) {
- ASSERT(args.length() == 1);
- CONVERT_CHECKED(Smi, details, args[0]);
- PropertyType type = PropertyDetails(details).type();
- return Smi::FromInt(static_cast<int>(type));
-}
-
-
-// Return the property attribute calculated from the property details.
-// args[0]: smi with property details.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPropertyAttributesFromDetails) {
- ASSERT(args.length() == 1);
- CONVERT_CHECKED(Smi, details, args[0]);
- PropertyAttributes attributes = PropertyDetails(details).attributes();
- return Smi::FromInt(static_cast<int>(attributes));
-}
-
-
-// Return the property insertion index calculated from the property details.
-// args[0]: smi with property details.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPropertyIndexFromDetails) {
- ASSERT(args.length() == 1);
- CONVERT_CHECKED(Smi, details, args[0]);
- int index = PropertyDetails(details).index();
- return Smi::FromInt(index);
-}
-
-
-// Return property value from named interceptor.
-// args[0]: object
-// args[1]: property name
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugNamedInterceptorPropertyValue) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 2);
- CONVERT_ARG_CHECKED(JSObject, obj, 0);
- RUNTIME_ASSERT(obj->HasNamedInterceptor());
- CONVERT_ARG_CHECKED(String, name, 1);
-
- PropertyAttributes attributes;
- return obj->GetPropertyWithInterceptor(*obj, *name, &attributes);
-}
-
-
-// Return element value from indexed interceptor.
-// args[0]: object
-// args[1]: index
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugIndexedInterceptorElementValue) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 2);
- CONVERT_ARG_CHECKED(JSObject, obj, 0);
- RUNTIME_ASSERT(obj->HasIndexedInterceptor());
- CONVERT_NUMBER_CHECKED(uint32_t, index, Uint32, args[1]);
-
- return obj->GetElementWithInterceptor(*obj, index);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_CheckExecutionState) {
- ASSERT(args.length() >= 1);
- CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]);
- // Check that the break id is valid.
- if (isolate->debug()->break_id() == 0 ||
- break_id != isolate->debug()->break_id()) {
- return isolate->Throw(
- isolate->heap()->illegal_execution_state_symbol());
- }
-
- return isolate->heap()->true_value();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFrameCount) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 1);
-
- // Check arguments.
- Object* result;
- { MaybeObject* maybe_result = Runtime_CheckExecutionState(
- RUNTIME_ARGUMENTS(isolate, args));
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
-
- // Count all frames which are relevant to debugging stack trace.
- int n = 0;
- StackFrame::Id id = isolate->debug()->break_frame_id();
- if (id == StackFrame::NO_ID) {
- // If there is no JavaScript stack frame count is 0.
- return Smi::FromInt(0);
- }
- for (JavaScriptFrameIterator it(isolate, id); !it.done(); it.Advance()) n++;
- return Smi::FromInt(n);
-}
-
-
-static const int kFrameDetailsFrameIdIndex = 0;
-static const int kFrameDetailsReceiverIndex = 1;
-static const int kFrameDetailsFunctionIndex = 2;
-static const int kFrameDetailsArgumentCountIndex = 3;
-static const int kFrameDetailsLocalCountIndex = 4;
-static const int kFrameDetailsSourcePositionIndex = 5;
-static const int kFrameDetailsConstructCallIndex = 6;
-static const int kFrameDetailsAtReturnIndex = 7;
-static const int kFrameDetailsDebuggerFrameIndex = 8;
-static const int kFrameDetailsFirstDynamicIndex = 9;
-
-// Return an array with frame details
-// args[0]: number: break id
-// args[1]: number: frame index
-//
-// The array returned contains the following information:
-// 0: Frame id
-// 1: Receiver
-// 2: Function
-// 3: Argument count
-// 4: Local count
-// 5: Source position
-// 6: Constructor call
-// 7: Is at return
-// 8: Debugger frame
-// Arguments name, value
-// Locals name, value
-// Return value if any
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFrameDetails) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 2);
-
- // Check arguments.
- Object* check;
- { MaybeObject* maybe_check = Runtime_CheckExecutionState(
- RUNTIME_ARGUMENTS(isolate, args));
- if (!maybe_check->ToObject(&check)) return maybe_check;
- }
- CONVERT_NUMBER_CHECKED(int, index, Int32, args[1]);
- Heap* heap = isolate->heap();
-
- // Find the relevant frame with the requested index.
- StackFrame::Id id = isolate->debug()->break_frame_id();
- if (id == StackFrame::NO_ID) {
- // If there are no JavaScript stack frames return undefined.
- return heap->undefined_value();
- }
- int count = 0;
- JavaScriptFrameIterator it(isolate, id);
- for (; !it.done(); it.Advance()) {
- if (count == index) break;
- count++;
- }
- if (it.done()) return heap->undefined_value();
-
- bool is_optimized_frame =
- it.frame()->LookupCode()->kind() == Code::OPTIMIZED_FUNCTION;
-
- // Traverse the saved contexts chain to find the active context for the
- // selected frame.
- SaveContext* save = isolate->save_context();
- while (save != NULL && !save->below(it.frame())) {
- save = save->prev();
- }
- ASSERT(save != NULL);
-
- // Get the frame id.
- Handle<Object> frame_id(WrapFrameId(it.frame()->id()), isolate);
-
- // Find source position.
- int position =
- it.frame()->LookupCode()->SourcePosition(it.frame()->pc());
-
- // Check for constructor frame.
- bool constructor = it.frame()->IsConstructor();
-
- // Get scope info and read from it for local variable information.
- Handle<JSFunction> function(JSFunction::cast(it.frame()->function()));
- Handle<SerializedScopeInfo> scope_info(function->shared()->scope_info());
- ScopeInfo<> info(*scope_info);
-
- // Get the context.
- Handle<Context> context(Context::cast(it.frame()->context()));
-
- // Get the locals names and values into a temporary array.
- //
- // TODO(1240907): Hide compiler-introduced stack variables
- // (e.g. .result)? For users of the debugger, they will probably be
- // confusing.
- Handle<FixedArray> locals =
- isolate->factory()->NewFixedArray(info.NumberOfLocals() * 2);
-
- // Fill in the names of the locals.
- for (int i = 0; i < info.NumberOfLocals(); i++) {
- locals->set(i * 2, *info.LocalName(i));
- }
-
- // Fill in the values of the locals.
- for (int i = 0; i < info.NumberOfLocals(); i++) {
- if (is_optimized_frame) {
- // If we are inspecting an optimized frame use undefined as the
- // value for all locals.
- //
- // TODO(1140): We should be able to get the correct values
- // for locals in optimized frames.
- locals->set(i * 2 + 1, isolate->heap()->undefined_value());
- } else if (i < info.number_of_stack_slots()) {
- // Get the value from the stack.
- locals->set(i * 2 + 1, it.frame()->GetExpression(i));
- } else {
- // Traverse the context chain to the function context as all local
- // variables stored in the context will be on the function context.
- Handle<String> name = info.LocalName(i);
- while (!context->is_function_context()) {
- context = Handle<Context>(context->previous());
- }
- ASSERT(context->is_function_context());
- locals->set(i * 2 + 1,
- context->get(scope_info->ContextSlotIndex(*name, NULL)));
- }
- }
-
- // Check whether this frame is positioned at return. If not top
- // frame or if the frame is optimized it cannot be at a return.
- bool at_return = false;
- if (!is_optimized_frame && index == 0) {
- at_return = isolate->debug()->IsBreakAtReturn(it.frame());
- }
-
- // If positioned just before return find the value to be returned and add it
- // to the frame information.
- Handle<Object> return_value = isolate->factory()->undefined_value();
- if (at_return) {
- StackFrameIterator it2(isolate);
- Address internal_frame_sp = NULL;
- while (!it2.done()) {
- if (it2.frame()->is_internal()) {
- internal_frame_sp = it2.frame()->sp();
- } else {
- if (it2.frame()->is_java_script()) {
- if (it2.frame()->id() == it.frame()->id()) {
- // The internal frame just before the JavaScript frame contains the
- // value to return on top. A debug break at return will create an
- // internal frame to store the return value (eax/rax/r0) before
- // entering the debug break exit frame.
- if (internal_frame_sp != NULL) {
- return_value =
- Handle<Object>(Memory::Object_at(internal_frame_sp),
- isolate);
- break;
- }
- }
- }
-
- // Indicate that the previous frame was not an internal frame.
- internal_frame_sp = NULL;
- }
- it2.Advance();
- }
- }
-
- // Now advance to the arguments adapter frame (if any). It contains all
- // the provided parameters whereas the function frame always have the number
- // of arguments matching the functions parameters. The rest of the
- // information (except for what is collected above) is the same.
- it.AdvanceToArgumentsFrame();
-
- // Find the number of arguments to fill. At least fill the number of
- // parameters for the function and fill more if more parameters are provided.
- int argument_count = info.number_of_parameters();
- if (argument_count < it.frame()->ComputeParametersCount()) {
- argument_count = it.frame()->ComputeParametersCount();
- }
-
- // Calculate the size of the result.
- int details_size = kFrameDetailsFirstDynamicIndex +
- 2 * (argument_count + info.NumberOfLocals()) +
- (at_return ? 1 : 0);
- Handle<FixedArray> details = isolate->factory()->NewFixedArray(details_size);
-
- // Add the frame id.
- details->set(kFrameDetailsFrameIdIndex, *frame_id);
-
- // Add the function (same as in function frame).
- details->set(kFrameDetailsFunctionIndex, it.frame()->function());
-
- // Add the arguments count.
- details->set(kFrameDetailsArgumentCountIndex, Smi::FromInt(argument_count));
-
- // Add the locals count
- details->set(kFrameDetailsLocalCountIndex,
- Smi::FromInt(info.NumberOfLocals()));
-
- // Add the source position.
- if (position != RelocInfo::kNoPosition) {
- details->set(kFrameDetailsSourcePositionIndex, Smi::FromInt(position));
- } else {
- details->set(kFrameDetailsSourcePositionIndex, heap->undefined_value());
- }
-
- // Add the constructor information.
- details->set(kFrameDetailsConstructCallIndex, heap->ToBoolean(constructor));
-
- // Add the at return information.
- details->set(kFrameDetailsAtReturnIndex, heap->ToBoolean(at_return));
-
- // Add information on whether this frame is invoked in the debugger context.
- details->set(kFrameDetailsDebuggerFrameIndex,
- heap->ToBoolean(*save->context() ==
- *isolate->debug()->debug_context()));
-
- // Fill the dynamic part.
- int details_index = kFrameDetailsFirstDynamicIndex;
-
- // Add arguments name and value.
- for (int i = 0; i < argument_count; i++) {
- // Name of the argument.
- if (i < info.number_of_parameters()) {
- details->set(details_index++, *info.parameter_name(i));
- } else {
- details->set(details_index++, heap->undefined_value());
- }
-
- // Parameter value. If we are inspecting an optimized frame, use
- // undefined as the value.
- //
- // TODO(3141533): We should be able to get the actual parameter
- // value for optimized frames.
- if (!is_optimized_frame &&
- (i < it.frame()->ComputeParametersCount())) {
- details->set(details_index++, it.frame()->GetParameter(i));
- } else {
- details->set(details_index++, heap->undefined_value());
- }
- }
-
- // Add locals name and value from the temporary copy from the function frame.
- for (int i = 0; i < info.NumberOfLocals() * 2; i++) {
- details->set(details_index++, locals->get(i));
- }
-
- // Add the value being returned.
- if (at_return) {
- details->set(details_index++, *return_value);
- }
-
- // Add the receiver (same as in function frame).
- // THIS MUST BE DONE LAST SINCE WE MIGHT ADVANCE
- // THE FRAME ITERATOR TO WRAP THE RECEIVER.
- Handle<Object> receiver(it.frame()->receiver(), isolate);
- if (!receiver->IsJSObject()) {
- // If the receiver is NOT a JSObject we have hit an optimization
- // where a value object is not converted into a wrapped JS objects.
- // To hide this optimization from the debugger, we wrap the receiver
- // by creating correct wrapper object based on the calling frame's
- // global context.
- it.Advance();
- Handle<Context> calling_frames_global_context(
- Context::cast(Context::cast(it.frame()->context())->global_context()));
- receiver =
- isolate->factory()->ToObject(receiver, calling_frames_global_context);
- }
- details->set(kFrameDetailsReceiverIndex, *receiver);
-
- ASSERT_EQ(details_size, details_index);
- return *isolate->factory()->NewJSArrayWithElements(details);
-}
-
-
-// Copy all the context locals into an object used to materialize a scope.
-static bool CopyContextLocalsToScopeObject(
- Isolate* isolate,
- Handle<SerializedScopeInfo> serialized_scope_info,
- ScopeInfo<>& scope_info,
- Handle<Context> context,
- Handle<JSObject> scope_object) {
- // Fill all context locals to the context extension.
- for (int i = Context::MIN_CONTEXT_SLOTS;
- i < scope_info.number_of_context_slots();
- i++) {
- int context_index = serialized_scope_info->ContextSlotIndex(
- *scope_info.context_slot_name(i), NULL);
-
- // Don't include the arguments shadow (.arguments) context variable.
- if (*scope_info.context_slot_name(i) !=
- isolate->heap()->arguments_shadow_symbol()) {
- RETURN_IF_EMPTY_HANDLE_VALUE(
- isolate,
- SetProperty(scope_object,
- scope_info.context_slot_name(i),
- Handle<Object>(context->get(context_index), isolate),
- NONE,
- kNonStrictMode),
- false);
- }
- }
-
- return true;
-}
-
-
-// Create a plain JSObject which materializes the local scope for the specified
-// frame.
-static Handle<JSObject> MaterializeLocalScope(Isolate* isolate,
- JavaScriptFrame* frame) {
- Handle<JSFunction> function(JSFunction::cast(frame->function()));
- Handle<SharedFunctionInfo> shared(function->shared());
- Handle<SerializedScopeInfo> serialized_scope_info(shared->scope_info());
- ScopeInfo<> scope_info(*serialized_scope_info);
-
- // Allocate and initialize a JSObject with all the arguments, stack locals
- // heap locals and extension properties of the debugged function.
- Handle<JSObject> local_scope =
- isolate->factory()->NewJSObject(isolate->object_function());
-
- // First fill all parameters.
- for (int i = 0; i < scope_info.number_of_parameters(); ++i) {
- RETURN_IF_EMPTY_HANDLE_VALUE(
- isolate,
- SetProperty(local_scope,
- scope_info.parameter_name(i),
- Handle<Object>(frame->GetParameter(i), isolate),
- NONE,
- kNonStrictMode),
- Handle<JSObject>());
- }
-
- // Second fill all stack locals.
- for (int i = 0; i < scope_info.number_of_stack_slots(); i++) {
- RETURN_IF_EMPTY_HANDLE_VALUE(
- isolate,
- SetProperty(local_scope,
- scope_info.stack_slot_name(i),
- Handle<Object>(frame->GetExpression(i), isolate),
- NONE,
- kNonStrictMode),
- Handle<JSObject>());
- }
-
- // Third fill all context locals.
- Handle<Context> frame_context(Context::cast(frame->context()));
- Handle<Context> function_context(frame_context->fcontext());
- if (!CopyContextLocalsToScopeObject(isolate,
- serialized_scope_info, scope_info,
- function_context, local_scope)) {
- return Handle<JSObject>();
- }
-
- // Finally copy any properties from the function context extension. This will
- // be variables introduced by eval.
- if (function_context->closure() == *function) {
- if (function_context->has_extension() &&
- !function_context->IsGlobalContext()) {
- Handle<JSObject> ext(JSObject::cast(function_context->extension()));
- Handle<FixedArray> keys = GetKeysInFixedArrayFor(ext, INCLUDE_PROTOS);
- for (int i = 0; i < keys->length(); i++) {
- // Names of variables introduced by eval are strings.
- ASSERT(keys->get(i)->IsString());
- Handle<String> key(String::cast(keys->get(i)));
- RETURN_IF_EMPTY_HANDLE_VALUE(
- isolate,
- SetProperty(local_scope,
- key,
- GetProperty(ext, key),
- NONE,
- kNonStrictMode),
- Handle<JSObject>());
- }
- }
- }
- return local_scope;
-}
-
-
-// Create a plain JSObject which materializes the closure content for the
-// context.
-static Handle<JSObject> MaterializeClosure(Isolate* isolate,
- Handle<Context> context) {
- ASSERT(context->is_function_context());
-
- Handle<SharedFunctionInfo> shared(context->closure()->shared());
- Handle<SerializedScopeInfo> serialized_scope_info(shared->scope_info());
- ScopeInfo<> scope_info(*serialized_scope_info);
-
- // Allocate and initialize a JSObject with all the content of theis function
- // closure.
- Handle<JSObject> closure_scope =
- isolate->factory()->NewJSObject(isolate->object_function());
-
- // Check whether the arguments shadow object exists.
- int arguments_shadow_index =
- shared->scope_info()->ContextSlotIndex(
- isolate->heap()->arguments_shadow_symbol(), NULL);
- if (arguments_shadow_index >= 0) {
- // In this case all the arguments are available in the arguments shadow
- // object.
- Handle<JSObject> arguments_shadow(
- JSObject::cast(context->get(arguments_shadow_index)));
- for (int i = 0; i < scope_info.number_of_parameters(); ++i) {
- // We don't expect exception-throwing getters on the arguments shadow.
- Object* element = arguments_shadow->GetElement(i)->ToObjectUnchecked();
- RETURN_IF_EMPTY_HANDLE_VALUE(
- isolate,
- SetProperty(closure_scope,
- scope_info.parameter_name(i),
- Handle<Object>(element, isolate),
- NONE,
- kNonStrictMode),
- Handle<JSObject>());
- }
- }
-
- // Fill all context locals to the context extension.
- if (!CopyContextLocalsToScopeObject(isolate,
- serialized_scope_info, scope_info,
- context, closure_scope)) {
- return Handle<JSObject>();
- }
-
- // Finally copy any properties from the function context extension. This will
- // be variables introduced by eval.
- if (context->has_extension()) {
- Handle<JSObject> ext(JSObject::cast(context->extension()));
- Handle<FixedArray> keys = GetKeysInFixedArrayFor(ext, INCLUDE_PROTOS);
- for (int i = 0; i < keys->length(); i++) {
- // Names of variables introduced by eval are strings.
- ASSERT(keys->get(i)->IsString());
- Handle<String> key(String::cast(keys->get(i)));
- RETURN_IF_EMPTY_HANDLE_VALUE(
- isolate,
- SetProperty(closure_scope,
- key,
- GetProperty(ext, key),
- NONE,
- kNonStrictMode),
- Handle<JSObject>());
- }
- }
-
- return closure_scope;
-}
-
-
-// Iterate over the actual scopes visible from a stack frame. All scopes are
-// backed by an actual context except the local scope, which is inserted
-// "artifically" in the context chain.
-class ScopeIterator {
- public:
- enum ScopeType {
- ScopeTypeGlobal = 0,
- ScopeTypeLocal,
- ScopeTypeWith,
- ScopeTypeClosure,
- // Every catch block contains an implicit with block (its parameter is
- // a JSContextExtensionObject) that extends current scope with a variable
- // holding exception object. Such with blocks are treated as scopes of their
- // own type.
- ScopeTypeCatch
- };
-
- ScopeIterator(Isolate* isolate, JavaScriptFrame* frame)
- : isolate_(isolate),
- frame_(frame),
- function_(JSFunction::cast(frame->function())),
- context_(Context::cast(frame->context())),
- local_done_(false),
- at_local_(false) {
-
- // Check whether the first scope is actually a local scope.
- if (context_->IsGlobalContext()) {
- // If there is a stack slot for .result then this local scope has been
- // created for evaluating top level code and it is not a real local scope.
- // Checking for the existence of .result seems fragile, but the scope info
- // saved with the code object does not otherwise have that information.
- int index = function_->shared()->scope_info()->
- StackSlotIndex(isolate_->heap()->result_symbol());
- at_local_ = index < 0;
- } else if (context_->is_function_context()) {
- at_local_ = true;
- }
- }
-
- // More scopes?
- bool Done() { return context_.is_null(); }
-
- // Move to the next scope.
- void Next() {
- // If at a local scope mark the local scope as passed.
- if (at_local_) {
- at_local_ = false;
- local_done_ = true;
-
- // If the current context is not associated with the local scope the
- // current context is the next real scope, so don't move to the next
- // context in this case.
- if (context_->closure() != *function_) {
- return;
- }
- }
-
- // The global scope is always the last in the chain.
- if (context_->IsGlobalContext()) {
- context_ = Handle<Context>();
- return;
- }
-
- // Move to the next context.
- if (context_->is_function_context()) {
- context_ = Handle<Context>(Context::cast(context_->closure()->context()));
- } else {
- context_ = Handle<Context>(context_->previous());
- }
-
- // If passing the local scope indicate that the current scope is now the
- // local scope.
- if (!local_done_ &&
- (context_->IsGlobalContext() || (context_->is_function_context()))) {
- at_local_ = true;
- }
- }
-
- // Return the type of the current scope.
- int Type() {
- if (at_local_) {
- return ScopeTypeLocal;
- }
- if (context_->IsGlobalContext()) {
- ASSERT(context_->global()->IsGlobalObject());
- return ScopeTypeGlobal;
- }
- if (context_->is_function_context()) {
- return ScopeTypeClosure;
- }
- ASSERT(context_->has_extension());
- // Current scope is either an explicit with statement or a with statement
- // implicitely generated for a catch block.
- // If the extension object here is a JSContextExtensionObject then
- // current with statement is one frome a catch block otherwise it's a
- // regular with statement.
- if (context_->extension()->IsJSContextExtensionObject()) {
- return ScopeTypeCatch;
- }
- return ScopeTypeWith;
- }
-
- // Return the JavaScript object with the content of the current scope.
- Handle<JSObject> ScopeObject() {
- switch (Type()) {
- case ScopeIterator::ScopeTypeGlobal:
- return Handle<JSObject>(CurrentContext()->global());
- break;
- case ScopeIterator::ScopeTypeLocal:
- // Materialize the content of the local scope into a JSObject.
- return MaterializeLocalScope(isolate_, frame_);
- break;
- case ScopeIterator::ScopeTypeWith:
- case ScopeIterator::ScopeTypeCatch:
- // Return the with object.
- return Handle<JSObject>(CurrentContext()->extension());
- break;
- case ScopeIterator::ScopeTypeClosure:
- // Materialize the content of the closure scope into a JSObject.
- return MaterializeClosure(isolate_, CurrentContext());
- break;
- }
- UNREACHABLE();
- return Handle<JSObject>();
- }
-
- // Return the context for this scope. For the local context there might not
- // be an actual context.
- Handle<Context> CurrentContext() {
- if (at_local_ && context_->closure() != *function_) {
- return Handle<Context>();
- }
- return context_;
- }
-
-#ifdef DEBUG
- // Debug print of the content of the current scope.
- void DebugPrint() {
- switch (Type()) {
- case ScopeIterator::ScopeTypeGlobal:
- PrintF("Global:\n");
- CurrentContext()->Print();
- break;
-
- case ScopeIterator::ScopeTypeLocal: {
- PrintF("Local:\n");
- ScopeInfo<> scope_info(function_->shared()->scope_info());
- scope_info.Print();
- if (!CurrentContext().is_null()) {
- CurrentContext()->Print();
- if (CurrentContext()->has_extension()) {
- Handle<JSObject> extension =
- Handle<JSObject>(CurrentContext()->extension());
- if (extension->IsJSContextExtensionObject()) {
- extension->Print();
- }
- }
- }
- break;
- }
-
- case ScopeIterator::ScopeTypeWith: {
- PrintF("With:\n");
- Handle<JSObject> extension =
- Handle<JSObject>(CurrentContext()->extension());
- extension->Print();
- break;
- }
-
- case ScopeIterator::ScopeTypeCatch: {
- PrintF("Catch:\n");
- Handle<JSObject> extension =
- Handle<JSObject>(CurrentContext()->extension());
- extension->Print();
- break;
- }
-
- case ScopeIterator::ScopeTypeClosure: {
- PrintF("Closure:\n");
- CurrentContext()->Print();
- if (CurrentContext()->has_extension()) {
- Handle<JSObject> extension =
- Handle<JSObject>(CurrentContext()->extension());
- if (extension->IsJSContextExtensionObject()) {
- extension->Print();
- }
- }
- break;
- }
-
- default:
- UNREACHABLE();
- }
- PrintF("\n");
- }
-#endif
-
- private:
- Isolate* isolate_;
- JavaScriptFrame* frame_;
- Handle<JSFunction> function_;
- Handle<Context> context_;
- bool local_done_;
- bool at_local_;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(ScopeIterator);
-};
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetScopeCount) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 2);
-
- // Check arguments.
- Object* check;
- { MaybeObject* maybe_check = Runtime_CheckExecutionState(
- RUNTIME_ARGUMENTS(isolate, args));
- if (!maybe_check->ToObject(&check)) return maybe_check;
- }
- CONVERT_CHECKED(Smi, wrapped_id, args[1]);
-
- // Get the frame where the debugging is performed.
- StackFrame::Id id = UnwrapFrameId(wrapped_id);
- JavaScriptFrameIterator it(isolate, id);
- JavaScriptFrame* frame = it.frame();
-
- // Count the visible scopes.
- int n = 0;
- for (ScopeIterator it(isolate, frame); !it.Done(); it.Next()) {
- n++;
- }
-
- return Smi::FromInt(n);
-}
-
-
-static const int kScopeDetailsTypeIndex = 0;
-static const int kScopeDetailsObjectIndex = 1;
-static const int kScopeDetailsSize = 2;
-
-// Return an array with scope details
-// args[0]: number: break id
-// args[1]: number: frame index
-// args[2]: number: scope index
-//
-// The array returned contains the following information:
-// 0: Scope type
-// 1: Scope object
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetScopeDetails) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 3);
-
- // Check arguments.
- Object* check;
- { MaybeObject* maybe_check = Runtime_CheckExecutionState(
- RUNTIME_ARGUMENTS(isolate, args));
- if (!maybe_check->ToObject(&check)) return maybe_check;
- }
- CONVERT_CHECKED(Smi, wrapped_id, args[1]);
- CONVERT_NUMBER_CHECKED(int, index, Int32, args[2]);
-
- // Get the frame where the debugging is performed.
- StackFrame::Id id = UnwrapFrameId(wrapped_id);
- JavaScriptFrameIterator frame_it(isolate, id);
- JavaScriptFrame* frame = frame_it.frame();
-
- // Find the requested scope.
- int n = 0;
- ScopeIterator it(isolate, frame);
- for (; !it.Done() && n < index; it.Next()) {
- n++;
- }
- if (it.Done()) {
- return isolate->heap()->undefined_value();
- }
-
- // Calculate the size of the result.
- int details_size = kScopeDetailsSize;
- Handle<FixedArray> details = isolate->factory()->NewFixedArray(details_size);
-
- // Fill in scope details.
- details->set(kScopeDetailsTypeIndex, Smi::FromInt(it.Type()));
- Handle<JSObject> scope_object = it.ScopeObject();
- RETURN_IF_EMPTY_HANDLE(isolate, scope_object);
- details->set(kScopeDetailsObjectIndex, *scope_object);
-
- return *isolate->factory()->NewJSArrayWithElements(details);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPrintScopes) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 0);
-
-#ifdef DEBUG
- // Print the scopes for the top frame.
- StackFrameLocator locator;
- JavaScriptFrame* frame = locator.FindJavaScriptFrame(0);
- for (ScopeIterator it(isolate, frame); !it.Done(); it.Next()) {
- it.DebugPrint();
- }
-#endif
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetThreadCount) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 1);
-
- // Check arguments.
- Object* result;
- { MaybeObject* maybe_result = Runtime_CheckExecutionState(
- RUNTIME_ARGUMENTS(isolate, args));
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
-
- // Count all archived V8 threads.
- int n = 0;
- for (ThreadState* thread =
- isolate->thread_manager()->FirstThreadStateInUse();
- thread != NULL;
- thread = thread->Next()) {
- n++;
- }
-
- // Total number of threads is current thread and archived threads.
- return Smi::FromInt(n + 1);
-}
-
-
-static const int kThreadDetailsCurrentThreadIndex = 0;
-static const int kThreadDetailsThreadIdIndex = 1;
-static const int kThreadDetailsSize = 2;
-
-// Return an array with thread details
-// args[0]: number: break id
-// args[1]: number: thread index
-//
-// The array returned contains the following information:
-// 0: Is current thread?
-// 1: Thread id
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetThreadDetails) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 2);
-
- // Check arguments.
- Object* check;
- { MaybeObject* maybe_check = Runtime_CheckExecutionState(
- RUNTIME_ARGUMENTS(isolate, args));
- if (!maybe_check->ToObject(&check)) return maybe_check;
- }
- CONVERT_NUMBER_CHECKED(int, index, Int32, args[1]);
-
- // Allocate array for result.
- Handle<FixedArray> details =
- isolate->factory()->NewFixedArray(kThreadDetailsSize);
-
- // Thread index 0 is current thread.
- if (index == 0) {
- // Fill the details.
- details->set(kThreadDetailsCurrentThreadIndex,
- isolate->heap()->true_value());
- details->set(kThreadDetailsThreadIdIndex,
- Smi::FromInt(
- isolate->thread_manager()->CurrentId()));
- } else {
- // Find the thread with the requested index.
- int n = 1;
- ThreadState* thread =
- isolate->thread_manager()->FirstThreadStateInUse();
- while (index != n && thread != NULL) {
- thread = thread->Next();
- n++;
- }
- if (thread == NULL) {
- return isolate->heap()->undefined_value();
- }
-
- // Fill the details.
- details->set(kThreadDetailsCurrentThreadIndex,
- isolate->heap()->false_value());
- details->set(kThreadDetailsThreadIdIndex, Smi::FromInt(thread->id()));
- }
-
- // Convert to JS array and return.
- return *isolate->factory()->NewJSArrayWithElements(details);
-}
-
-
-// Sets the disable break state
-// args[0]: disable break state
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SetDisableBreak) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 1);
- CONVERT_BOOLEAN_CHECKED(disable_break, args[0]);
- isolate->debug()->set_disable_break(disable_break);
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetBreakLocations) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 1);
-
- CONVERT_ARG_CHECKED(JSFunction, fun, 0);
- Handle<SharedFunctionInfo> shared(fun->shared());
- // Find the number of break points
- Handle<Object> break_locations = Debug::GetSourceBreakLocations(shared);
- if (break_locations->IsUndefined()) return isolate->heap()->undefined_value();
- // Return array as JS array
- return *isolate->factory()->NewJSArrayWithElements(
- Handle<FixedArray>::cast(break_locations));
-}
-
-
-// Set a break point in a function
-// args[0]: function
-// args[1]: number: break source position (within the function source)
-// args[2]: number: break point object
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SetFunctionBreakPoint) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 3);
- CONVERT_ARG_CHECKED(JSFunction, fun, 0);
- Handle<SharedFunctionInfo> shared(fun->shared());
- CONVERT_NUMBER_CHECKED(int32_t, source_position, Int32, args[1]);
- RUNTIME_ASSERT(source_position >= 0);
- Handle<Object> break_point_object_arg = args.at<Object>(2);
-
- // Set break point.
- isolate->debug()->SetBreakPoint(shared, break_point_object_arg,
- &source_position);
-
- return Smi::FromInt(source_position);
-}
-
-
-Object* Runtime::FindSharedFunctionInfoInScript(Isolate* isolate,
- Handle<Script> script,
- int position) {
- // Iterate the heap looking for SharedFunctionInfo generated from the
- // script. The inner most SharedFunctionInfo containing the source position
- // for the requested break point is found.
- // NOTE: This might require several heap iterations. If the SharedFunctionInfo
- // which is found is not compiled it is compiled and the heap is iterated
- // again as the compilation might create inner functions from the newly
- // compiled function and the actual requested break point might be in one of
- // these functions.
- bool done = false;
- // The current candidate for the source position:
- int target_start_position = RelocInfo::kNoPosition;
- Handle<SharedFunctionInfo> target;
- while (!done) {
- HeapIterator iterator;
- for (HeapObject* obj = iterator.next();
- obj != NULL; obj = iterator.next()) {
- if (obj->IsSharedFunctionInfo()) {
- Handle<SharedFunctionInfo> shared(SharedFunctionInfo::cast(obj));
- if (shared->script() == *script) {
- // If the SharedFunctionInfo found has the requested script data and
- // contains the source position it is a candidate.
- int start_position = shared->function_token_position();
- if (start_position == RelocInfo::kNoPosition) {
- start_position = shared->start_position();
- }
- if (start_position <= position &&
- position <= shared->end_position()) {
- // If there is no candidate or this function is within the current
- // candidate this is the new candidate.
- if (target.is_null()) {
- target_start_position = start_position;
- target = shared;
- } else {
- if (target_start_position == start_position &&
- shared->end_position() == target->end_position()) {
- // If a top-level function contain only one function
- // declartion the source for the top-level and the function is
- // the same. In that case prefer the non top-level function.
- if (!shared->is_toplevel()) {
- target_start_position = start_position;
- target = shared;
- }
- } else if (target_start_position <= start_position &&
- shared->end_position() <= target->end_position()) {
- // This containment check includes equality as a function inside
- // a top-level function can share either start or end position
- // with the top-level function.
- target_start_position = start_position;
- target = shared;
- }
- }
- }
- }
- }
- }
-
- if (target.is_null()) {
- return isolate->heap()->undefined_value();
- }
-
- // If the candidate found is compiled we are done. NOTE: when lazy
- // compilation of inner functions is introduced some additional checking
- // needs to be done here to compile inner functions.
- done = target->is_compiled();
- if (!done) {
- // If the candidate is not compiled compile it to reveal any inner
- // functions which might contain the requested source position.
- CompileLazyShared(target, KEEP_EXCEPTION);
- }
- }
-
- return *target;
-}
-
-
-// Changes the state of a break point in a script and returns source position
-// where break point was set. NOTE: Regarding performance see the NOTE for
-// GetScriptFromScriptData.
-// args[0]: script to set break point in
-// args[1]: number: break source position (within the script source)
-// args[2]: number: break point object
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SetScriptBreakPoint) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 3);
- CONVERT_ARG_CHECKED(JSValue, wrapper, 0);
- CONVERT_NUMBER_CHECKED(int32_t, source_position, Int32, args[1]);
- RUNTIME_ASSERT(source_position >= 0);
- Handle<Object> break_point_object_arg = args.at<Object>(2);
-
- // Get the script from the script wrapper.
- RUNTIME_ASSERT(wrapper->value()->IsScript());
- Handle<Script> script(Script::cast(wrapper->value()));
-
- Object* result = Runtime::FindSharedFunctionInfoInScript(
- isolate, script, source_position);
- if (!result->IsUndefined()) {
- Handle<SharedFunctionInfo> shared(SharedFunctionInfo::cast(result));
- // Find position within function. The script position might be before the
- // source position of the first function.
- int position;
- if (shared->start_position() > source_position) {
- position = 0;
- } else {
- position = source_position - shared->start_position();
- }
- isolate->debug()->SetBreakPoint(shared, break_point_object_arg, &position);
- position += shared->start_position();
- return Smi::FromInt(position);
- }
- return isolate->heap()->undefined_value();
-}
-
-
-// Clear a break point
-// args[0]: number: break point object
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ClearBreakPoint) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 1);
- Handle<Object> break_point_object_arg = args.at<Object>(0);
-
- // Clear break point.
- isolate->debug()->ClearBreakPoint(break_point_object_arg);
-
- return isolate->heap()->undefined_value();
-}
-
-
-// Change the state of break on exceptions.
-// args[0]: Enum value indicating whether to affect caught/uncaught exceptions.
-// args[1]: Boolean indicating on/off.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ChangeBreakOnException) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 2);
- RUNTIME_ASSERT(args[0]->IsNumber());
- CONVERT_BOOLEAN_CHECKED(enable, args[1]);
-
- // If the number doesn't match an enum value, the ChangeBreakOnException
- // function will default to affecting caught exceptions.
- ExceptionBreakType type =
- static_cast<ExceptionBreakType>(NumberToUint32(args[0]));
- // Update break point state.
- isolate->debug()->ChangeBreakOnException(type, enable);
- return isolate->heap()->undefined_value();
-}
-
-
-// Returns the state of break on exceptions
-// args[0]: boolean indicating uncaught exceptions
-RUNTIME_FUNCTION(MaybeObject*, Runtime_IsBreakOnException) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 1);
- RUNTIME_ASSERT(args[0]->IsNumber());
-
- ExceptionBreakType type =
- static_cast<ExceptionBreakType>(NumberToUint32(args[0]));
- bool result = isolate->debug()->IsBreakOnException(type);
- return Smi::FromInt(result);
-}
-
-
-// Prepare for stepping
-// args[0]: break id for checking execution state
-// args[1]: step action from the enumeration StepAction
-// args[2]: number of times to perform the step, for step out it is the number
-// of frames to step down.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_PrepareStep) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 3);
- // Check arguments.
- Object* check;
- { MaybeObject* maybe_check = Runtime_CheckExecutionState(
- RUNTIME_ARGUMENTS(isolate, args));
- if (!maybe_check->ToObject(&check)) return maybe_check;
- }
- if (!args[1]->IsNumber() || !args[2]->IsNumber()) {
- return isolate->Throw(isolate->heap()->illegal_argument_symbol());
- }
-
- // Get the step action and check validity.
- StepAction step_action = static_cast<StepAction>(NumberToInt32(args[1]));
- if (step_action != StepIn &&
- step_action != StepNext &&
- step_action != StepOut &&
- step_action != StepInMin &&
- step_action != StepMin) {
- return isolate->Throw(isolate->heap()->illegal_argument_symbol());
- }
-
- // Get the number of steps.
- int step_count = NumberToInt32(args[2]);
- if (step_count < 1) {
- return isolate->Throw(isolate->heap()->illegal_argument_symbol());
- }
-
- // Clear all current stepping setup.
- isolate->debug()->ClearStepping();
-
- // Prepare step.
- isolate->debug()->PrepareStep(static_cast<StepAction>(step_action),
- step_count);
- return isolate->heap()->undefined_value();
-}
-
-
-// Clear all stepping set by PrepareStep.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ClearStepping) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 0);
- isolate->debug()->ClearStepping();
- return isolate->heap()->undefined_value();
-}
-
-
-// Creates a copy of the with context chain. The copy of the context chain is
-// is linked to the function context supplied.
-static Handle<Context> CopyWithContextChain(Handle<Context> context_chain,
- Handle<Context> function_context) {
- // At the bottom of the chain. Return the function context to link to.
- if (context_chain->is_function_context()) {
- return function_context;
- }
-
- // Recursively copy the with contexts.
- Handle<Context> previous(context_chain->previous());
- Handle<JSObject> extension(JSObject::cast(context_chain->extension()));
- Handle<Context> context = CopyWithContextChain(function_context, previous);
- return context->GetIsolate()->factory()->NewWithContext(
- context, extension, context_chain->IsCatchContext());
-}
-
-
-// Helper function to find or create the arguments object for
-// Runtime_DebugEvaluate.
-static Handle<Object> GetArgumentsObject(Isolate* isolate,
- JavaScriptFrame* frame,
- Handle<JSFunction> function,
- Handle<SerializedScopeInfo> scope_info,
- const ScopeInfo<>* sinfo,
- Handle<Context> function_context) {
- // Try to find the value of 'arguments' to pass as parameter. If it is not
- // found (that is the debugged function does not reference 'arguments' and
- // does not support eval) then create an 'arguments' object.
- int index;
- if (sinfo->number_of_stack_slots() > 0) {
- index = scope_info->StackSlotIndex(isolate->heap()->arguments_symbol());
- if (index != -1) {
- return Handle<Object>(frame->GetExpression(index), isolate);
- }
- }
-
- if (sinfo->number_of_context_slots() > Context::MIN_CONTEXT_SLOTS) {
- index = scope_info->ContextSlotIndex(isolate->heap()->arguments_symbol(),
- NULL);
- if (index != -1) {
- return Handle<Object>(function_context->get(index), isolate);
- }
- }
-
- const int length = frame->ComputeParametersCount();
- Handle<JSObject> arguments =
- isolate->factory()->NewArgumentsObject(function, length);
- Handle<FixedArray> array = isolate->factory()->NewFixedArray(length);
-
- AssertNoAllocation no_gc;
- WriteBarrierMode mode = array->GetWriteBarrierMode(no_gc);
- for (int i = 0; i < length; i++) {
- array->set(i, frame->GetParameter(i), mode);
- }
- arguments->set_elements(*array);
- return arguments;
-}
-
-
-static const char kSourceStr[] =
- "(function(arguments,__source__){return eval(__source__);})";
-
-
-// Evaluate a piece of JavaScript in the context of a stack frame for
-// debugging. This is accomplished by creating a new context which in its
-// extension part has all the parameters and locals of the function on the
-// stack frame. A function which calls eval with the code to evaluate is then
-// compiled in this context and called in this context. As this context
-// replaces the context of the function on the stack frame a new (empty)
-// function is created as well to be used as the closure for the context.
-// This function and the context acts as replacements for the function on the
-// stack frame presenting the same view of the values of parameters and
-// local variables as if the piece of JavaScript was evaluated at the point
-// where the function on the stack frame is currently stopped.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugEvaluate) {
- HandleScope scope(isolate);
-
- // Check the execution state and decode arguments frame and source to be
- // evaluated.
- ASSERT(args.length() == 5);
- Object* check_result;
- { MaybeObject* maybe_check_result = Runtime_CheckExecutionState(
- RUNTIME_ARGUMENTS(isolate, args));
- if (!maybe_check_result->ToObject(&check_result)) {
- return maybe_check_result;
- }
- }
- CONVERT_CHECKED(Smi, wrapped_id, args[1]);
- CONVERT_ARG_CHECKED(String, source, 2);
- CONVERT_BOOLEAN_CHECKED(disable_break, args[3]);
- Handle<Object> additional_context(args[4]);
-
- // Handle the processing of break.
- DisableBreak disable_break_save(disable_break);
-
- // Get the frame where the debugging is performed.
- StackFrame::Id id = UnwrapFrameId(wrapped_id);
- JavaScriptFrameIterator it(isolate, id);
- JavaScriptFrame* frame = it.frame();
- Handle<JSFunction> function(JSFunction::cast(frame->function()));
- Handle<SerializedScopeInfo> scope_info(function->shared()->scope_info());
- ScopeInfo<> sinfo(*scope_info);
-
- // Traverse the saved contexts chain to find the active context for the
- // selected frame.
- SaveContext* save = isolate->save_context();
- while (save != NULL && !save->below(frame)) {
- save = save->prev();
- }
- ASSERT(save != NULL);
- SaveContext savex(isolate);
- isolate->set_context(*(save->context()));
-
- // Create the (empty) function replacing the function on the stack frame for
- // the purpose of evaluating in the context created below. It is important
- // that this function does not describe any parameters and local variables
- // in the context. If it does then this will cause problems with the lookup
- // in Context::Lookup, where context slots for parameters and local variables
- // are looked at before the extension object.
- Handle<JSFunction> go_between =
- isolate->factory()->NewFunction(isolate->factory()->empty_string(),
- isolate->factory()->undefined_value());
- go_between->set_context(function->context());
-#ifdef DEBUG
- ScopeInfo<> go_between_sinfo(go_between->shared()->scope_info());
- ASSERT(go_between_sinfo.number_of_parameters() == 0);
- ASSERT(go_between_sinfo.number_of_context_slots() == 0);
-#endif
-
- // Materialize the content of the local scope into a JSObject.
- Handle<JSObject> local_scope = MaterializeLocalScope(isolate, frame);
- RETURN_IF_EMPTY_HANDLE(isolate, local_scope);
-
- // Allocate a new context for the debug evaluation and set the extension
- // object build.
- Handle<Context> context =
- isolate->factory()->NewFunctionContext(Context::MIN_CONTEXT_SLOTS,
- go_between);
- context->set_extension(*local_scope);
- // Copy any with contexts present and chain them in front of this context.
- Handle<Context> frame_context(Context::cast(frame->context()));
- Handle<Context> function_context(frame_context->fcontext());
- context = CopyWithContextChain(frame_context, context);
-
- if (additional_context->IsJSObject()) {
- context = isolate->factory()->NewWithContext(context,
- Handle<JSObject>::cast(additional_context), false);
- }
-
- // Wrap the evaluation statement in a new function compiled in the newly
- // created context. The function has one parameter which has to be called
- // 'arguments'. This it to have access to what would have been 'arguments' in
- // the function being debugged.
- // function(arguments,__source__) {return eval(__source__);}
-
- Handle<String> function_source =
- isolate->factory()->NewStringFromAscii(
- Vector<const char>(kSourceStr, sizeof(kSourceStr) - 1));
-
- // Currently, the eval code will be executed in non-strict mode,
- // even in the strict code context.
- Handle<SharedFunctionInfo> shared =
- Compiler::CompileEval(function_source,
- context,
- context->IsGlobalContext(),
- kNonStrictMode);
- if (shared.is_null()) return Failure::Exception();
- Handle<JSFunction> compiled_function =
- isolate->factory()->NewFunctionFromSharedFunctionInfo(shared, context);
-
- // Invoke the result of the compilation to get the evaluation function.
- bool has_pending_exception;
- Handle<Object> receiver(frame->receiver(), isolate);
- Handle<Object> evaluation_function =
- Execution::Call(compiled_function, receiver, 0, NULL,
- &has_pending_exception);
- if (has_pending_exception) return Failure::Exception();
-
- Handle<Object> arguments = GetArgumentsObject(isolate, frame,
- function, scope_info,
- &sinfo, function_context);
-
- // Invoke the evaluation function and return the result.
- const int argc = 2;
- Object** argv[argc] = { arguments.location(),
- Handle<Object>::cast(source).location() };
- Handle<Object> result =
- Execution::Call(Handle<JSFunction>::cast(evaluation_function), receiver,
- argc, argv, &has_pending_exception);
- if (has_pending_exception) return Failure::Exception();
-
- // Skip the global proxy as it has no properties and always delegates to the
- // real global object.
- if (result->IsJSGlobalProxy()) {
- result = Handle<JSObject>(JSObject::cast(result->GetPrototype()));
- }
-
- return *result;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugEvaluateGlobal) {
- HandleScope scope(isolate);
-
- // Check the execution state and decode arguments frame and source to be
- // evaluated.
- ASSERT(args.length() == 4);
- Object* check_result;
- { MaybeObject* maybe_check_result = Runtime_CheckExecutionState(
- RUNTIME_ARGUMENTS(isolate, args));
- if (!maybe_check_result->ToObject(&check_result)) {
- return maybe_check_result;
- }
- }
- CONVERT_ARG_CHECKED(String, source, 1);
- CONVERT_BOOLEAN_CHECKED(disable_break, args[2]);
- Handle<Object> additional_context(args[3]);
-
- // Handle the processing of break.
- DisableBreak disable_break_save(disable_break);
-
- // Enter the top context from before the debugger was invoked.
- SaveContext save(isolate);
- SaveContext* top = &save;
- while (top != NULL && *top->context() == *isolate->debug()->debug_context()) {
- top = top->prev();
- }
- if (top != NULL) {
- isolate->set_context(*top->context());
- }
-
- // Get the global context now set to the top context from before the
- // debugger was invoked.
- Handle<Context> context = isolate->global_context();
-
- bool is_global = true;
-
- if (additional_context->IsJSObject()) {
- // Create a function context first, than put 'with' context on top of it.
- Handle<JSFunction> go_between = isolate->factory()->NewFunction(
- isolate->factory()->empty_string(),
- isolate->factory()->undefined_value());
- go_between->set_context(*context);
- context =
- isolate->factory()->NewFunctionContext(
- Context::MIN_CONTEXT_SLOTS, go_between);
- context->set_extension(JSObject::cast(*additional_context));
- is_global = false;
- }
-
- // Compile the source to be evaluated.
- // Currently, the eval code will be executed in non-strict mode,
- // even in the strict code context.
- Handle<SharedFunctionInfo> shared =
- Compiler::CompileEval(source, context, is_global, kNonStrictMode);
- if (shared.is_null()) return Failure::Exception();
- Handle<JSFunction> compiled_function =
- Handle<JSFunction>(
- isolate->factory()->NewFunctionFromSharedFunctionInfo(shared,
- context));
-
- // Invoke the result of the compilation to get the evaluation function.
- bool has_pending_exception;
- Handle<Object> receiver = isolate->global();
- Handle<Object> result =
- Execution::Call(compiled_function, receiver, 0, NULL,
- &has_pending_exception);
- if (has_pending_exception) return Failure::Exception();
- return *result;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugGetLoadedScripts) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 0);
-
- // Fill the script objects.
- Handle<FixedArray> instances = isolate->debug()->GetLoadedScripts();
-
- // Convert the script objects to proper JS objects.
- for (int i = 0; i < instances->length(); i++) {
- Handle<Script> script = Handle<Script>(Script::cast(instances->get(i)));
- // Get the script wrapper in a local handle before calling GetScriptWrapper,
- // because using
- // instances->set(i, *GetScriptWrapper(script))
- // is unsafe as GetScriptWrapper might call GC and the C++ compiler might
- // already have deferenced the instances handle.
- Handle<JSValue> wrapper = GetScriptWrapper(script);
- instances->set(i, *wrapper);
- }
-
- // Return result as a JS array.
- Handle<JSObject> result =
- isolate->factory()->NewJSObject(isolate->array_function());
- Handle<JSArray>::cast(result)->SetContent(*instances);
- return *result;
-}
-
-
-// Helper function used by Runtime_DebugReferencedBy below.
-static int DebugReferencedBy(JSObject* target,
- Object* instance_filter, int max_references,
- FixedArray* instances, int instances_size,
- JSFunction* arguments_function) {
- NoHandleAllocation ha;
- AssertNoAllocation no_alloc;
-
- // Iterate the heap.
- int count = 0;
- JSObject* last = NULL;
- HeapIterator iterator;
- HeapObject* heap_obj = NULL;
- while (((heap_obj = iterator.next()) != NULL) &&
- (max_references == 0 || count < max_references)) {
- // Only look at all JSObjects.
- if (heap_obj->IsJSObject()) {
- // Skip context extension objects and argument arrays as these are
- // checked in the context of functions using them.
- JSObject* obj = JSObject::cast(heap_obj);
- if (obj->IsJSContextExtensionObject() ||
- obj->map()->constructor() == arguments_function) {
- continue;
- }
-
- // Check if the JS object has a reference to the object looked for.
- if (obj->ReferencesObject(target)) {
- // Check instance filter if supplied. This is normally used to avoid
- // references from mirror objects (see Runtime_IsInPrototypeChain).
- if (!instance_filter->IsUndefined()) {
- Object* V = obj;
- while (true) {
- Object* prototype = V->GetPrototype();
- if (prototype->IsNull()) {
- break;
- }
- if (instance_filter == prototype) {
- obj = NULL; // Don't add this object.
- break;
- }
- V = prototype;
- }
- }
-
- if (obj != NULL) {
- // Valid reference found add to instance array if supplied an update
- // count.
- if (instances != NULL && count < instances_size) {
- instances->set(count, obj);
- }
- last = obj;
- count++;
- }
- }
- }
- }
-
- // Check for circular reference only. This can happen when the object is only
- // referenced from mirrors and has a circular reference in which case the
- // object is not really alive and would have been garbage collected if not
- // referenced from the mirror.
- if (count == 1 && last == target) {
- count = 0;
- }
-
- // Return the number of referencing objects found.
- return count;
-}
-
-
-// Scan the heap for objects with direct references to an object
-// args[0]: the object to find references to
-// args[1]: constructor function for instances to exclude (Mirror)
-// args[2]: the the maximum number of objects to return
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugReferencedBy) {
- ASSERT(args.length() == 3);
-
- // First perform a full GC in order to avoid references from dead objects.
- isolate->heap()->CollectAllGarbage(false);
-
- // Check parameters.
- CONVERT_CHECKED(JSObject, target, args[0]);
- Object* instance_filter = args[1];
- RUNTIME_ASSERT(instance_filter->IsUndefined() ||
- instance_filter->IsJSObject());
- CONVERT_NUMBER_CHECKED(int32_t, max_references, Int32, args[2]);
- RUNTIME_ASSERT(max_references >= 0);
-
- // Get the constructor function for context extension and arguments array.
- JSObject* arguments_boilerplate =
- isolate->context()->global_context()->arguments_boilerplate();
- JSFunction* arguments_function =
- JSFunction::cast(arguments_boilerplate->map()->constructor());
-
- // Get the number of referencing objects.
- int count;
- count = DebugReferencedBy(target, instance_filter, max_references,
- NULL, 0, arguments_function);
-
- // Allocate an array to hold the result.
- Object* object;
- { MaybeObject* maybe_object = isolate->heap()->AllocateFixedArray(count);
- if (!maybe_object->ToObject(&object)) return maybe_object;
- }
- FixedArray* instances = FixedArray::cast(object);
-
- // Fill the referencing objects.
- count = DebugReferencedBy(target, instance_filter, max_references,
- instances, count, arguments_function);
-
- // Return result as JS array.
- Object* result;
- { MaybeObject* maybe_result = isolate->heap()->AllocateJSObject(
- isolate->context()->global_context()->array_function());
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- JSArray::cast(result)->SetContent(instances);
- return result;
-}
-
-
-// Helper function used by Runtime_DebugConstructedBy below.
-static int DebugConstructedBy(JSFunction* constructor, int max_references,
- FixedArray* instances, int instances_size) {
- AssertNoAllocation no_alloc;
-
- // Iterate the heap.
- int count = 0;
- HeapIterator iterator;
- HeapObject* heap_obj = NULL;
- while (((heap_obj = iterator.next()) != NULL) &&
- (max_references == 0 || count < max_references)) {
- // Only look at all JSObjects.
- if (heap_obj->IsJSObject()) {
- JSObject* obj = JSObject::cast(heap_obj);
- if (obj->map()->constructor() == constructor) {
- // Valid reference found add to instance array if supplied an update
- // count.
- if (instances != NULL && count < instances_size) {
- instances->set(count, obj);
- }
- count++;
- }
- }
- }
-
- // Return the number of referencing objects found.
- return count;
-}
-
-
-// Scan the heap for objects constructed by a specific function.
-// args[0]: the constructor to find instances of
-// args[1]: the the maximum number of objects to return
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugConstructedBy) {
- ASSERT(args.length() == 2);
-
- // First perform a full GC in order to avoid dead objects.
- isolate->heap()->CollectAllGarbage(false);
-
- // Check parameters.
- CONVERT_CHECKED(JSFunction, constructor, args[0]);
- CONVERT_NUMBER_CHECKED(int32_t, max_references, Int32, args[1]);
- RUNTIME_ASSERT(max_references >= 0);
-
- // Get the number of referencing objects.
- int count;
- count = DebugConstructedBy(constructor, max_references, NULL, 0);
-
- // Allocate an array to hold the result.
- Object* object;
- { MaybeObject* maybe_object = isolate->heap()->AllocateFixedArray(count);
- if (!maybe_object->ToObject(&object)) return maybe_object;
- }
- FixedArray* instances = FixedArray::cast(object);
-
- // Fill the referencing objects.
- count = DebugConstructedBy(constructor, max_references, instances, count);
-
- // Return result as JS array.
- Object* result;
- { MaybeObject* maybe_result = isolate->heap()->AllocateJSObject(
- isolate->context()->global_context()->array_function());
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- JSArray::cast(result)->SetContent(instances);
- return result;
-}
-
-
-// Find the effective prototype object as returned by __proto__.
-// args[0]: the object to find the prototype for.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugGetPrototype) {
- ASSERT(args.length() == 1);
-
- CONVERT_CHECKED(JSObject, obj, args[0]);
-
- // Use the __proto__ accessor.
- return Accessors::ObjectPrototype.getter(obj, NULL);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SystemBreak) {
- ASSERT(args.length() == 0);
- CPU::DebugBreak();
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugDisassembleFunction) {
-#ifdef DEBUG
- HandleScope scope(isolate);
- ASSERT(args.length() == 1);
- // Get the function and make sure it is compiled.
- CONVERT_ARG_CHECKED(JSFunction, func, 0);
- Handle<SharedFunctionInfo> shared(func->shared());
- if (!EnsureCompiled(shared, KEEP_EXCEPTION)) {
- return Failure::Exception();
- }
- func->code()->PrintLn();
-#endif // DEBUG
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugDisassembleConstructor) {
-#ifdef DEBUG
- HandleScope scope(isolate);
- ASSERT(args.length() == 1);
- // Get the function and make sure it is compiled.
- CONVERT_ARG_CHECKED(JSFunction, func, 0);
- Handle<SharedFunctionInfo> shared(func->shared());
- if (!EnsureCompiled(shared, KEEP_EXCEPTION)) {
- return Failure::Exception();
- }
- shared->construct_stub()->PrintLn();
-#endif // DEBUG
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetInferredName) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 1);
-
- CONVERT_CHECKED(JSFunction, f, args[0]);
- return f->shared()->inferred_name();
-}
-
-
-static int FindSharedFunctionInfosForScript(Script* script,
- FixedArray* buffer) {
- AssertNoAllocation no_allocations;
-
- int counter = 0;
- int buffer_size = buffer->length();
- HeapIterator iterator;
- for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
- ASSERT(obj != NULL);
- if (!obj->IsSharedFunctionInfo()) {
- continue;
- }
- SharedFunctionInfo* shared = SharedFunctionInfo::cast(obj);
- if (shared->script() != script) {
- continue;
- }
- if (counter < buffer_size) {
- buffer->set(counter, shared);
- }
- counter++;
- }
- return counter;
-}
-
-// For a script finds all SharedFunctionInfo's in the heap that points
-// to this script. Returns JSArray of SharedFunctionInfo wrapped
-// in OpaqueReferences.
-RUNTIME_FUNCTION(MaybeObject*,
- Runtime_LiveEditFindSharedFunctionInfosForScript) {
- ASSERT(args.length() == 1);
- HandleScope scope(isolate);
- CONVERT_CHECKED(JSValue, script_value, args[0]);
-
- Handle<Script> script = Handle<Script>(Script::cast(script_value->value()));
-
- const int kBufferSize = 32;
-
- Handle<FixedArray> array;
- array = isolate->factory()->NewFixedArray(kBufferSize);
- int number = FindSharedFunctionInfosForScript(*script, *array);
- if (number > kBufferSize) {
- array = isolate->factory()->NewFixedArray(number);
- FindSharedFunctionInfosForScript(*script, *array);
- }
-
- Handle<JSArray> result = isolate->factory()->NewJSArrayWithElements(array);
- result->set_length(Smi::FromInt(number));
-
- LiveEdit::WrapSharedFunctionInfos(result);
-
- return *result;
-}
-
-// For a script calculates compilation information about all its functions.
-// The script source is explicitly specified by the second argument.
-// The source of the actual script is not used, however it is important that
-// all generated code keeps references to this particular instance of script.
-// Returns a JSArray of compilation infos. The array is ordered so that
-// each function with all its descendant is always stored in a continues range
-// with the function itself going first. The root function is a script function.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditGatherCompileInfo) {
- ASSERT(args.length() == 2);
- HandleScope scope(isolate);
- CONVERT_CHECKED(JSValue, script, args[0]);
- CONVERT_ARG_CHECKED(String, source, 1);
- Handle<Script> script_handle = Handle<Script>(Script::cast(script->value()));
-
- JSArray* result = LiveEdit::GatherCompileInfo(script_handle, source);
-
- if (isolate->has_pending_exception()) {
- return Failure::Exception();
- }
-
- return result;
-}
-
-// Changes the source of the script to a new_source.
-// If old_script_name is provided (i.e. is a String), also creates a copy of
-// the script with its original source and sends notification to debugger.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditReplaceScript) {
- ASSERT(args.length() == 3);
- HandleScope scope(isolate);
- CONVERT_CHECKED(JSValue, original_script_value, args[0]);
- CONVERT_ARG_CHECKED(String, new_source, 1);
- Handle<Object> old_script_name(args[2], isolate);
-
- CONVERT_CHECKED(Script, original_script_pointer,
- original_script_value->value());
- Handle<Script> original_script(original_script_pointer);
-
- Object* old_script = LiveEdit::ChangeScriptSource(original_script,
- new_source,
- old_script_name);
-
- if (old_script->IsScript()) {
- Handle<Script> script_handle(Script::cast(old_script));
- return *(GetScriptWrapper(script_handle));
- } else {
- return isolate->heap()->null_value();
- }
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditFunctionSourceUpdated) {
- ASSERT(args.length() == 1);
- HandleScope scope(isolate);
- CONVERT_ARG_CHECKED(JSArray, shared_info, 0);
- return LiveEdit::FunctionSourceUpdated(shared_info);
-}
-
-
-// Replaces code of SharedFunctionInfo with a new one.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditReplaceFunctionCode) {
- ASSERT(args.length() == 2);
- HandleScope scope(isolate);
- CONVERT_ARG_CHECKED(JSArray, new_compile_info, 0);
- CONVERT_ARG_CHECKED(JSArray, shared_info, 1);
-
- return LiveEdit::ReplaceFunctionCode(new_compile_info, shared_info);
-}
-
-// Connects SharedFunctionInfo to another script.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditFunctionSetScript) {
- ASSERT(args.length() == 2);
- HandleScope scope(isolate);
- Handle<Object> function_object(args[0], isolate);
- Handle<Object> script_object(args[1], isolate);
-
- if (function_object->IsJSValue()) {
- Handle<JSValue> function_wrapper = Handle<JSValue>::cast(function_object);
- if (script_object->IsJSValue()) {
- CONVERT_CHECKED(Script, script, JSValue::cast(*script_object)->value());
- script_object = Handle<Object>(script, isolate);
- }
-
- LiveEdit::SetFunctionScript(function_wrapper, script_object);
- } else {
- // Just ignore this. We may not have a SharedFunctionInfo for some functions
- // and we check it in this function.
- }
-
- return isolate->heap()->undefined_value();
-}
-
-
-// In a code of a parent function replaces original function as embedded object
-// with a substitution one.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditReplaceRefToNestedFunction) {
- ASSERT(args.length() == 3);
- HandleScope scope(isolate);
-
- CONVERT_ARG_CHECKED(JSValue, parent_wrapper, 0);
- CONVERT_ARG_CHECKED(JSValue, orig_wrapper, 1);
- CONVERT_ARG_CHECKED(JSValue, subst_wrapper, 2);
-
- LiveEdit::ReplaceRefToNestedFunction(parent_wrapper, orig_wrapper,
- subst_wrapper);
-
- return isolate->heap()->undefined_value();
-}
-
-
-// Updates positions of a shared function info (first parameter) according
-// to script source change. Text change is described in second parameter as
-// array of groups of 3 numbers:
-// (change_begin, change_end, change_end_new_position).
-// Each group describes a change in text; groups are sorted by change_begin.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditPatchFunctionPositions) {
- ASSERT(args.length() == 2);
- HandleScope scope(isolate);
- CONVERT_ARG_CHECKED(JSArray, shared_array, 0);
- CONVERT_ARG_CHECKED(JSArray, position_change_array, 1);
-
- return LiveEdit::PatchFunctionPositions(shared_array, position_change_array);
-}
-
-
-// For array of SharedFunctionInfo's (each wrapped in JSValue)
-// checks that none of them have activations on stacks (of any thread).
-// Returns array of the same length with corresponding results of
-// LiveEdit::FunctionPatchabilityStatus type.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditCheckAndDropActivations) {
- ASSERT(args.length() == 2);
- HandleScope scope(isolate);
- CONVERT_ARG_CHECKED(JSArray, shared_array, 0);
- CONVERT_BOOLEAN_CHECKED(do_drop, args[1]);
-
- return *LiveEdit::CheckAndDropActivations(shared_array, do_drop);
-}
-
-// Compares 2 strings line-by-line, then token-wise and returns diff in form
-// of JSArray of triplets (pos1, pos1_end, pos2_end) describing list
-// of diff chunks.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditCompareStrings) {
- ASSERT(args.length() == 2);
- HandleScope scope(isolate);
- CONVERT_ARG_CHECKED(String, s1, 0);
- CONVERT_ARG_CHECKED(String, s2, 1);
-
- return *LiveEdit::CompareStrings(s1, s2);
-}
-
-
-// A testing entry. Returns statement position which is the closest to
-// source_position.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFunctionCodePositionFromSource) {
- ASSERT(args.length() == 2);
- HandleScope scope(isolate);
- CONVERT_ARG_CHECKED(JSFunction, function, 0);
- CONVERT_NUMBER_CHECKED(int32_t, source_position, Int32, args[1]);
-
- Handle<Code> code(function->code(), isolate);
-
- if (code->kind() != Code::FUNCTION &&
- code->kind() != Code::OPTIMIZED_FUNCTION) {
- return isolate->heap()->undefined_value();
- }
-
- RelocIterator it(*code, RelocInfo::ModeMask(RelocInfo::STATEMENT_POSITION));
- int closest_pc = 0;
- int distance = kMaxInt;
- while (!it.done()) {
- int statement_position = static_cast<int>(it.rinfo()->data());
- // Check if this break point is closer that what was previously found.
- if (source_position <= statement_position &&
- statement_position - source_position < distance) {
- closest_pc =
- static_cast<int>(it.rinfo()->pc() - code->instruction_start());
- distance = statement_position - source_position;
- // Check whether we can't get any closer.
- if (distance == 0) break;
- }
- it.next();
- }
-
- return Smi::FromInt(closest_pc);
-}
-
-
-// Calls specified function with or without entering the debugger.
-// This is used in unit tests to run code as if debugger is entered or simply
-// to have a stack with C++ frame in the middle.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ExecuteInDebugContext) {
- ASSERT(args.length() == 2);
- HandleScope scope(isolate);
- CONVERT_ARG_CHECKED(JSFunction, function, 0);
- CONVERT_BOOLEAN_CHECKED(without_debugger, args[1]);
-
- Handle<Object> result;
- bool pending_exception;
- {
- if (without_debugger) {
- result = Execution::Call(function, isolate->global(), 0, NULL,
- &pending_exception);
- } else {
- EnterDebugger enter_debugger;
- result = Execution::Call(function, isolate->global(), 0, NULL,
- &pending_exception);
- }
- }
- if (!pending_exception) {
- return *result;
- } else {
- return Failure::Exception();
- }
-}
-
-
-// Sets a v8 flag.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SetFlags) {
- CONVERT_CHECKED(String, arg, args[0]);
- SmartPointer<char> flags =
- arg->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- FlagList::SetFlagsFromString(*flags, StrLength(*flags));
- return isolate->heap()->undefined_value();
-}
-
-
-// Performs a GC.
-// Presently, it only does a full GC.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_CollectGarbage) {
- isolate->heap()->CollectAllGarbage(true);
- return isolate->heap()->undefined_value();
-}
-
-
-// Gets the current heap usage.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetHeapUsage) {
- int usage = static_cast<int>(isolate->heap()->SizeOfObjects());
- if (!Smi::IsValid(usage)) {
- return *isolate->factory()->NewNumberFromInt(usage);
- }
- return Smi::FromInt(usage);
-}
-
-
-// Captures a live object list from the present heap.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_HasLOLEnabled) {
-#ifdef LIVE_OBJECT_LIST
- return isolate->heap()->true_value();
-#else
- return isolate->heap()->false_value();
-#endif
-}
-
-
-// Captures a live object list from the present heap.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_CaptureLOL) {
-#ifdef LIVE_OBJECT_LIST
- return LiveObjectList::Capture();
-#else
- return isolate->heap()->undefined_value();
-#endif
-}
-
-
-// Deletes the specified live object list.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DeleteLOL) {
-#ifdef LIVE_OBJECT_LIST
- CONVERT_SMI_CHECKED(id, args[0]);
- bool success = LiveObjectList::Delete(id);
- return success ? isolate->heap()->true_value() :
- isolate->heap()->false_value();
-#else
- return isolate->heap()->undefined_value();
-#endif
-}
-
-
-// Generates the response to a debugger request for a dump of the objects
-// contained in the difference between the captured live object lists
-// specified by id1 and id2.
-// If id1 is 0 (i.e. not a valid lol), then the whole of lol id2 will be
-// dumped.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DumpLOL) {
-#ifdef LIVE_OBJECT_LIST
- HandleScope scope;
- CONVERT_SMI_CHECKED(id1, args[0]);
- CONVERT_SMI_CHECKED(id2, args[1]);
- CONVERT_SMI_CHECKED(start, args[2]);
- CONVERT_SMI_CHECKED(count, args[3]);
- CONVERT_ARG_CHECKED(JSObject, filter_obj, 4);
- EnterDebugger enter_debugger;
- return LiveObjectList::Dump(id1, id2, start, count, filter_obj);
-#else
- return isolate->heap()->undefined_value();
-#endif
-}
-
-
-// Gets the specified object as requested by the debugger.
-// This is only used for obj ids shown in live object lists.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetLOLObj) {
-#ifdef LIVE_OBJECT_LIST
- CONVERT_SMI_CHECKED(obj_id, args[0]);
- Object* result = LiveObjectList::GetObj(obj_id);
- return result;
-#else
- return isolate->heap()->undefined_value();
-#endif
-}
-
-
-// Gets the obj id for the specified address if valid.
-// This is only used for obj ids shown in live object lists.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetLOLObjId) {
-#ifdef LIVE_OBJECT_LIST
- HandleScope scope;
- CONVERT_ARG_CHECKED(String, address, 0);
- Object* result = LiveObjectList::GetObjId(address);
- return result;
-#else
- return isolate->heap()->undefined_value();
-#endif
-}
-
-
-// Gets the retainers that references the specified object alive.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetLOLObjRetainers) {
-#ifdef LIVE_OBJECT_LIST
- HandleScope scope;
- CONVERT_SMI_CHECKED(obj_id, args[0]);
- RUNTIME_ASSERT(args[1]->IsUndefined() || args[1]->IsJSObject());
- RUNTIME_ASSERT(args[2]->IsUndefined() || args[2]->IsBoolean());
- RUNTIME_ASSERT(args[3]->IsUndefined() || args[3]->IsSmi());
- RUNTIME_ASSERT(args[4]->IsUndefined() || args[4]->IsSmi());
- CONVERT_ARG_CHECKED(JSObject, filter_obj, 5);
-
- Handle<JSObject> instance_filter;
- if (args[1]->IsJSObject()) {
- instance_filter = args.at<JSObject>(1);
- }
- bool verbose = false;
- if (args[2]->IsBoolean()) {
- verbose = args[2]->IsTrue();
- }
- int start = 0;
- if (args[3]->IsSmi()) {
- start = Smi::cast(args[3])->value();
- }
- int limit = Smi::kMaxValue;
- if (args[4]->IsSmi()) {
- limit = Smi::cast(args[4])->value();
- }
-
- return LiveObjectList::GetObjRetainers(obj_id,
- instance_filter,
- verbose,
- start,
- limit,
- filter_obj);
-#else
- return isolate->heap()->undefined_value();
-#endif
-}
-
-
-// Gets the reference path between 2 objects.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetLOLPath) {
-#ifdef LIVE_OBJECT_LIST
- HandleScope scope;
- CONVERT_SMI_CHECKED(obj_id1, args[0]);
- CONVERT_SMI_CHECKED(obj_id2, args[1]);
- RUNTIME_ASSERT(args[2]->IsUndefined() || args[2]->IsJSObject());
-
- Handle<JSObject> instance_filter;
- if (args[2]->IsJSObject()) {
- instance_filter = args.at<JSObject>(2);
- }
-
- Object* result =
- LiveObjectList::GetPath(obj_id1, obj_id2, instance_filter);
- return result;
-#else
- return isolate->heap()->undefined_value();
-#endif
-}
-
-
-// Generates the response to a debugger request for a list of all
-// previously captured live object lists.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_InfoLOL) {
-#ifdef LIVE_OBJECT_LIST
- CONVERT_SMI_CHECKED(start, args[0]);
- CONVERT_SMI_CHECKED(count, args[1]);
- return LiveObjectList::Info(start, count);
-#else
- return isolate->heap()->undefined_value();
-#endif
-}
-
-
-// Gets a dump of the specified object as requested by the debugger.
-// This is only used for obj ids shown in live object lists.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_PrintLOLObj) {
-#ifdef LIVE_OBJECT_LIST
- HandleScope scope;
- CONVERT_SMI_CHECKED(obj_id, args[0]);
- Object* result = LiveObjectList::PrintObj(obj_id);
- return result;
-#else
- return isolate->heap()->undefined_value();
-#endif
-}
-
-
-// Resets and releases all previously captured live object lists.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ResetLOL) {
-#ifdef LIVE_OBJECT_LIST
- LiveObjectList::Reset();
- return isolate->heap()->undefined_value();
-#else
- return isolate->heap()->undefined_value();
-#endif
-}
-
-
-// Generates the response to a debugger request for a summary of the types
-// of objects in the difference between the captured live object lists
-// specified by id1 and id2.
-// If id1 is 0 (i.e. not a valid lol), then the whole of lol id2 will be
-// summarized.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SummarizeLOL) {
-#ifdef LIVE_OBJECT_LIST
- HandleScope scope;
- CONVERT_SMI_CHECKED(id1, args[0]);
- CONVERT_SMI_CHECKED(id2, args[1]);
- CONVERT_ARG_CHECKED(JSObject, filter_obj, 2);
-
- EnterDebugger enter_debugger;
- return LiveObjectList::Summarize(id1, id2, filter_obj);
-#else
- return isolate->heap()->undefined_value();
-#endif
-}
-
-#endif // ENABLE_DEBUGGER_SUPPORT
-
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ProfilerResume) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 2);
-
- CONVERT_CHECKED(Smi, smi_modules, args[0]);
- CONVERT_CHECKED(Smi, smi_tag, args[1]);
- v8::V8::ResumeProfilerEx(smi_modules->value(), smi_tag->value());
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ProfilerPause) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 2);
-
- CONVERT_CHECKED(Smi, smi_modules, args[0]);
- CONVERT_CHECKED(Smi, smi_tag, args[1]);
- v8::V8::PauseProfilerEx(smi_modules->value(), smi_tag->value());
- return isolate->heap()->undefined_value();
-}
-
-#endif // ENABLE_LOGGING_AND_PROFILING
-
-// Finds the script object from the script data. NOTE: This operation uses
-// heap traversal to find the function generated for the source position
-// for the requested break point. For lazily compiled functions several heap
-// traversals might be required rendering this operation as a rather slow
-// operation. However for setting break points which is normally done through
-// some kind of user interaction the performance is not crucial.
-static Handle<Object> Runtime_GetScriptFromScriptName(
- Handle<String> script_name) {
- // Scan the heap for Script objects to find the script with the requested
- // script data.
- Handle<Script> script;
- HeapIterator iterator;
- HeapObject* obj = NULL;
- while (script.is_null() && ((obj = iterator.next()) != NULL)) {
- // If a script is found check if it has the script data requested.
- if (obj->IsScript()) {
- if (Script::cast(obj)->name()->IsString()) {
- if (String::cast(Script::cast(obj)->name())->Equals(*script_name)) {
- script = Handle<Script>(Script::cast(obj));
- }
- }
- }
- }
-
- // If no script with the requested script data is found return undefined.
- if (script.is_null()) return FACTORY->undefined_value();
-
- // Return the script found.
- return GetScriptWrapper(script);
-}
-
-
-// Get the script object from script data. NOTE: Regarding performance
-// see the NOTE for GetScriptFromScriptData.
-// args[0]: script data for the script to find the source for
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetScript) {
- HandleScope scope(isolate);
-
- ASSERT(args.length() == 1);
-
- CONVERT_CHECKED(String, script_name, args[0]);
-
- // Find the requested script.
- Handle<Object> result =
- Runtime_GetScriptFromScriptName(Handle<String>(script_name));
- return *result;
-}
-
-
-// Determines whether the given stack frame should be displayed in
-// a stack trace. The caller is the error constructor that asked
-// for the stack trace to be collected. The first time a construct
-// call to this function is encountered it is skipped. The seen_caller
-// in/out parameter is used to remember if the caller has been seen
-// yet.
-static bool ShowFrameInStackTrace(StackFrame* raw_frame, Object* caller,
- bool* seen_caller) {
- // Only display JS frames.
- if (!raw_frame->is_java_script())
- return false;
- JavaScriptFrame* frame = JavaScriptFrame::cast(raw_frame);
- Object* raw_fun = frame->function();
- // Not sure when this can happen but skip it just in case.
- if (!raw_fun->IsJSFunction())
- return false;
- if ((raw_fun == caller) && !(*seen_caller)) {
- *seen_caller = true;
- return false;
- }
- // Skip all frames until we've seen the caller. Also, skip the most
- // obvious builtin calls. Some builtin calls (such as Number.ADD
- // which is invoked using 'call') are very difficult to recognize
- // so we're leaving them in for now.
- return *seen_caller && !frame->receiver()->IsJSBuiltinsObject();
-}
-
-
-// Collect the raw data for a stack trace. Returns an array of 4
-// element segments each containing a receiver, function, code and
-// native code offset.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_CollectStackTrace) {
- ASSERT_EQ(args.length(), 2);
- Handle<Object> caller = args.at<Object>(0);
- CONVERT_NUMBER_CHECKED(int32_t, limit, Int32, args[1]);
-
- HandleScope scope(isolate);
- Factory* factory = isolate->factory();
-
- limit = Max(limit, 0); // Ensure that limit is not negative.
- int initial_size = Min(limit, 10);
- Handle<FixedArray> elements =
- factory->NewFixedArrayWithHoles(initial_size * 4);
-
- StackFrameIterator iter(isolate);
- // If the caller parameter is a function we skip frames until we're
- // under it before starting to collect.
- bool seen_caller = !caller->IsJSFunction();
- int cursor = 0;
- int frames_seen = 0;
- while (!iter.done() && frames_seen < limit) {
- StackFrame* raw_frame = iter.frame();
- if (ShowFrameInStackTrace(raw_frame, *caller, &seen_caller)) {
- frames_seen++;
- JavaScriptFrame* frame = JavaScriptFrame::cast(raw_frame);
- // Set initial size to the maximum inlining level + 1 for the outermost
- // function.
- List<FrameSummary> frames(Compiler::kMaxInliningLevels + 1);
- frame->Summarize(&frames);
- for (int i = frames.length() - 1; i >= 0; i--) {
- if (cursor + 4 > elements->length()) {
- int new_capacity = JSObject::NewElementsCapacity(elements->length());
- Handle<FixedArray> new_elements =
- factory->NewFixedArrayWithHoles(new_capacity);
- for (int i = 0; i < cursor; i++) {
- new_elements->set(i, elements->get(i));
- }
- elements = new_elements;
- }
- ASSERT(cursor + 4 <= elements->length());
-
- Handle<Object> recv = frames[i].receiver();
- Handle<JSFunction> fun = frames[i].function();
- Handle<Code> code = frames[i].code();
- Handle<Smi> offset(Smi::FromInt(frames[i].offset()));
- elements->set(cursor++, *recv);
- elements->set(cursor++, *fun);
- elements->set(cursor++, *code);
- elements->set(cursor++, *offset);
- }
- }
- iter.Advance();
- }
- Handle<JSArray> result = factory->NewJSArrayWithElements(elements);
- result->set_length(Smi::FromInt(cursor));
- return *result;
-}
-
-
-// Returns V8 version as a string.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetV8Version) {
- ASSERT_EQ(args.length(), 0);
-
- NoHandleAllocation ha;
-
- const char* version_string = v8::V8::GetVersion();
-
- return isolate->heap()->AllocateStringFromAscii(CStrVector(version_string),
- NOT_TENURED);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_Abort) {
- ASSERT(args.length() == 2);
- OS::PrintError("abort: %s\n", reinterpret_cast<char*>(args[0]) +
- Smi::cast(args[1])->value());
- isolate->PrintStack();
- OS::Abort();
- UNREACHABLE();
- return NULL;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFromCache) {
- // This is only called from codegen, so checks might be more lax.
- CONVERT_CHECKED(JSFunctionResultCache, cache, args[0]);
- Object* key = args[1];
-
- int finger_index = cache->finger_index();
- Object* o = cache->get(finger_index);
- if (o == key) {
- // The fastest case: hit the same place again.
- return cache->get(finger_index + 1);
- }
-
- for (int i = finger_index - 2;
- i >= JSFunctionResultCache::kEntriesIndex;
- i -= 2) {
- o = cache->get(i);
- if (o == key) {
- cache->set_finger_index(i);
- return cache->get(i + 1);
- }
- }
-
- int size = cache->size();
- ASSERT(size <= cache->length());
-
- for (int i = size - 2; i > finger_index; i -= 2) {
- o = cache->get(i);
- if (o == key) {
- cache->set_finger_index(i);
- return cache->get(i + 1);
- }
- }
-
- // There is no value in the cache. Invoke the function and cache result.
- HandleScope scope(isolate);
-
- Handle<JSFunctionResultCache> cache_handle(cache);
- Handle<Object> key_handle(key);
- Handle<Object> value;
- {
- Handle<JSFunction> factory(JSFunction::cast(
- cache_handle->get(JSFunctionResultCache::kFactoryIndex)));
- // TODO(antonm): consider passing a receiver when constructing a cache.
- Handle<Object> receiver(isolate->global_context()->global());
- // This handle is nor shared, nor used later, so it's safe.
- Object** argv[] = { key_handle.location() };
- bool pending_exception = false;
- value = Execution::Call(factory,
- receiver,
- 1,
- argv,
- &pending_exception);
- if (pending_exception) return Failure::Exception();
- }
-
-#ifdef DEBUG
- cache_handle->JSFunctionResultCacheVerify();
-#endif
-
- // Function invocation may have cleared the cache. Reread all the data.
- finger_index = cache_handle->finger_index();
- size = cache_handle->size();
-
- // If we have spare room, put new data into it, otherwise evict post finger
- // entry which is likely to be the least recently used.
- int index = -1;
- if (size < cache_handle->length()) {
- cache_handle->set_size(size + JSFunctionResultCache::kEntrySize);
- index = size;
- } else {
- index = finger_index + JSFunctionResultCache::kEntrySize;
- if (index == cache_handle->length()) {
- index = JSFunctionResultCache::kEntriesIndex;
- }
- }
-
- ASSERT(index % 2 == 0);
- ASSERT(index >= JSFunctionResultCache::kEntriesIndex);
- ASSERT(index < cache_handle->length());
-
- cache_handle->set(index, *key_handle);
- cache_handle->set(index + 1, *value);
- cache_handle->set_finger_index(index);
-
-#ifdef DEBUG
- cache_handle->JSFunctionResultCacheVerify();
-#endif
-
- return *value;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NewMessageObject) {
- HandleScope scope(isolate);
- CONVERT_ARG_CHECKED(String, type, 0);
- CONVERT_ARG_CHECKED(JSArray, arguments, 1);
- return *isolate->factory()->NewJSMessageObject(
- type,
- arguments,
- 0,
- 0,
- isolate->factory()->undefined_value(),
- isolate->factory()->undefined_value(),
- isolate->factory()->undefined_value());
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_MessageGetType) {
- CONVERT_CHECKED(JSMessageObject, message, args[0]);
- return message->type();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_MessageGetArguments) {
- CONVERT_CHECKED(JSMessageObject, message, args[0]);
- return message->arguments();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_MessageGetStartPosition) {
- CONVERT_CHECKED(JSMessageObject, message, args[0]);
- return Smi::FromInt(message->start_position());
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_MessageGetScript) {
- CONVERT_CHECKED(JSMessageObject, message, args[0]);
- return message->script();
-}
-
-
-#ifdef DEBUG
-// ListNatives is ONLY used by the fuzz-natives.js in debug mode
-// Exclude the code in release mode.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ListNatives) {
- ASSERT(args.length() == 0);
- HandleScope scope;
-#define COUNT_ENTRY(Name, argc, ressize) + 1
- int entry_count = 0
- RUNTIME_FUNCTION_LIST(COUNT_ENTRY)
- INLINE_FUNCTION_LIST(COUNT_ENTRY)
- INLINE_RUNTIME_FUNCTION_LIST(COUNT_ENTRY);
-#undef COUNT_ENTRY
- Factory* factory = isolate->factory();
- Handle<FixedArray> elements = factory->NewFixedArray(entry_count);
- int index = 0;
- bool inline_runtime_functions = false;
-#define ADD_ENTRY(Name, argc, ressize) \
- { \
- HandleScope inner; \
- Handle<String> name; \
- /* Inline runtime functions have an underscore in front of the name. */ \
- if (inline_runtime_functions) { \
- name = factory->NewStringFromAscii( \
- Vector<const char>("_" #Name, StrLength("_" #Name))); \
- } else { \
- name = factory->NewStringFromAscii( \
- Vector<const char>(#Name, StrLength(#Name))); \
- } \
- Handle<FixedArray> pair_elements = factory->NewFixedArray(2); \
- pair_elements->set(0, *name); \
- pair_elements->set(1, Smi::FromInt(argc)); \
- Handle<JSArray> pair = factory->NewJSArrayWithElements(pair_elements); \
- elements->set(index++, *pair); \
- }
- inline_runtime_functions = false;
- RUNTIME_FUNCTION_LIST(ADD_ENTRY)
- inline_runtime_functions = true;
- INLINE_FUNCTION_LIST(ADD_ENTRY)
- INLINE_RUNTIME_FUNCTION_LIST(ADD_ENTRY)
-#undef ADD_ENTRY
- ASSERT_EQ(index, entry_count);
- Handle<JSArray> result = factory->NewJSArrayWithElements(elements);
- return *result;
-}
-#endif
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_Log) {
- ASSERT(args.length() == 2);
- CONVERT_CHECKED(String, format, args[0]);
- CONVERT_CHECKED(JSArray, elms, args[1]);
- Vector<const char> chars = format->ToAsciiVector();
- LOGGER->LogRuntime(chars, elms);
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_IS_VAR) {
- UNREACHABLE(); // implemented as macro in the parser
- return NULL;
-}
-
-
-// ----------------------------------------------------------------------------
-// Implementation of Runtime
-
-#define F(name, number_of_args, result_size) \
- { Runtime::k##name, Runtime::RUNTIME, #name, \
- FUNCTION_ADDR(Runtime_##name), number_of_args, result_size },
-
-
-#define I(name, number_of_args, result_size) \
- { Runtime::kInline##name, Runtime::INLINE, \
- "_" #name, NULL, number_of_args, result_size },
-
-static const Runtime::Function kIntrinsicFunctions[] = {
- RUNTIME_FUNCTION_LIST(F)
- INLINE_FUNCTION_LIST(I)
- INLINE_RUNTIME_FUNCTION_LIST(I)
-};
-
-
-MaybeObject* Runtime::InitializeIntrinsicFunctionNames(Heap* heap,
- Object* dictionary) {
- ASSERT(Isolate::Current()->heap() == heap);
- ASSERT(dictionary != NULL);
- ASSERT(StringDictionary::cast(dictionary)->NumberOfElements() == 0);
- for (int i = 0; i < kNumFunctions; ++i) {
- Object* name_symbol;
- { MaybeObject* maybe_name_symbol =
- heap->LookupAsciiSymbol(kIntrinsicFunctions[i].name);
- if (!maybe_name_symbol->ToObject(&name_symbol)) return maybe_name_symbol;
- }
- StringDictionary* string_dictionary = StringDictionary::cast(dictionary);
- { MaybeObject* maybe_dictionary = string_dictionary->Add(
- String::cast(name_symbol),
- Smi::FromInt(i),
- PropertyDetails(NONE, NORMAL));
- if (!maybe_dictionary->ToObject(&dictionary)) {
- // Non-recoverable failure. Calling code must restart heap
- // initialization.
- return maybe_dictionary;
- }
- }
- }
- return dictionary;
-}
-
-
-const Runtime::Function* Runtime::FunctionForSymbol(Handle<String> name) {
- Heap* heap = name->GetHeap();
- int entry = heap->intrinsic_function_names()->FindEntry(*name);
- if (entry != kNotFound) {
- Object* smi_index = heap->intrinsic_function_names()->ValueAt(entry);
- int function_index = Smi::cast(smi_index)->value();
- return &(kIntrinsicFunctions[function_index]);
- }
- return NULL;
-}
-
-
-const Runtime::Function* Runtime::FunctionForId(Runtime::FunctionId id) {
- return &(kIntrinsicFunctions[static_cast<int>(id)]);
-}
-
-
-void Runtime::PerformGC(Object* result) {
- Isolate* isolate = Isolate::Current();
- Failure* failure = Failure::cast(result);
- if (failure->IsRetryAfterGC()) {
- // Try to do a garbage collection; ignore it if it fails. The C
- // entry stub will throw an out-of-memory exception in that case.
- isolate->heap()->CollectGarbage(failure->allocation_space());
- } else {
- // Handle last resort GC and make sure to allow future allocations
- // to grow the heap without causing GCs (if possible).
- isolate->counters()->gc_last_resort_from_js()->Increment();
- isolate->heap()->CollectAllGarbage(false);
- }
-}
-
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/runtime.h b/src/3rdparty/v8/src/runtime.h
deleted file mode 100644
index 58062ca..0000000
--- a/src/3rdparty/v8/src/runtime.h
+++ /dev/null
@@ -1,643 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_RUNTIME_H_
-#define V8_RUNTIME_H_
-
-#include "zone.h"
-
-namespace v8 {
-namespace internal {
-
-// The interface to C++ runtime functions.
-
-// ----------------------------------------------------------------------------
-// RUNTIME_FUNCTION_LIST_ALWAYS defines runtime calls available in both
-// release and debug mode.
-// This macro should only be used by the macro RUNTIME_FUNCTION_LIST.
-
-// WARNING: RUNTIME_FUNCTION_LIST_ALWAYS_* is a very large macro that caused
-// MSVC Intellisense to crash. It was broken into two macros to work around
-// this problem. Please avoid large recursive macros whenever possible.
-#define RUNTIME_FUNCTION_LIST_ALWAYS_1(F) \
- /* Property access */ \
- F(GetProperty, 2, 1) \
- F(KeyedGetProperty, 2, 1) \
- F(DeleteProperty, 3, 1) \
- F(HasLocalProperty, 2, 1) \
- F(HasProperty, 2, 1) \
- F(HasElement, 2, 1) \
- F(IsPropertyEnumerable, 2, 1) \
- F(GetPropertyNames, 1, 1) \
- F(GetPropertyNamesFast, 1, 1) \
- F(GetLocalPropertyNames, 1, 1) \
- F(GetLocalElementNames, 1, 1) \
- F(GetInterceptorInfo, 1, 1) \
- F(GetNamedInterceptorPropertyNames, 1, 1) \
- F(GetIndexedInterceptorElementNames, 1, 1) \
- F(GetArgumentsProperty, 1, 1) \
- F(ToFastProperties, 1, 1) \
- F(ToSlowProperties, 1, 1) \
- F(FinishArrayPrototypeSetup, 1, 1) \
- F(SpecialArrayFunctions, 1, 1) \
- F(GetGlobalReceiver, 0, 1) \
- \
- F(IsInPrototypeChain, 2, 1) \
- F(SetHiddenPrototype, 2, 1) \
- \
- F(IsConstructCall, 0, 1) \
- \
- F(GetOwnProperty, 2, 1) \
- \
- F(IsExtensible, 1, 1) \
- F(PreventExtensions, 1, 1)\
- \
- /* Utilities */ \
- F(GetFunctionDelegate, 1, 1) \
- F(GetConstructorDelegate, 1, 1) \
- F(NewArgumentsFast, 3, 1) \
- F(LazyCompile, 1, 1) \
- F(LazyRecompile, 1, 1) \
- F(NotifyDeoptimized, 1, 1) \
- F(NotifyOSR, 0, 1) \
- F(DeoptimizeFunction, 1, 1) \
- F(CompileForOnStackReplacement, 1, 1) \
- F(SetNewFunctionAttributes, 1, 1) \
- F(AllocateInNewSpace, 1, 1) \
- \
- /* Array join support */ \
- F(PushIfAbsent, 2, 1) \
- F(ArrayConcat, 1, 1) \
- \
- /* Conversions */ \
- F(ToBool, 1, 1) \
- F(Typeof, 1, 1) \
- \
- F(StringToNumber, 1, 1) \
- F(StringFromCharCodeArray, 1, 1) \
- F(StringParseInt, 2, 1) \
- F(StringParseFloat, 1, 1) \
- F(StringToLowerCase, 1, 1) \
- F(StringToUpperCase, 1, 1) \
- F(StringSplit, 3, 1) \
- F(CharFromCode, 1, 1) \
- F(URIEscape, 1, 1) \
- F(URIUnescape, 1, 1) \
- F(QuoteJSONString, 1, 1) \
- F(QuoteJSONStringComma, 1, 1) \
- \
- F(NumberToString, 1, 1) \
- F(NumberToStringSkipCache, 1, 1) \
- F(NumberToInteger, 1, 1) \
- F(NumberToIntegerMapMinusZero, 1, 1) \
- F(NumberToJSUint32, 1, 1) \
- F(NumberToJSInt32, 1, 1) \
- F(NumberToSmi, 1, 1) \
- F(AllocateHeapNumber, 0, 1) \
- \
- /* Arithmetic operations */ \
- F(NumberAdd, 2, 1) \
- F(NumberSub, 2, 1) \
- F(NumberMul, 2, 1) \
- F(NumberDiv, 2, 1) \
- F(NumberMod, 2, 1) \
- F(NumberUnaryMinus, 1, 1) \
- F(NumberAlloc, 0, 1) \
- \
- F(StringAdd, 2, 1) \
- F(StringBuilderConcat, 3, 1) \
- F(StringBuilderJoin, 3, 1) \
- \
- /* Bit operations */ \
- F(NumberOr, 2, 1) \
- F(NumberAnd, 2, 1) \
- F(NumberXor, 2, 1) \
- F(NumberNot, 1, 1) \
- \
- F(NumberShl, 2, 1) \
- F(NumberShr, 2, 1) \
- F(NumberSar, 2, 1) \
- \
- /* Comparisons */ \
- F(NumberEquals, 2, 1) \
- F(StringEquals, 2, 1) \
- \
- F(NumberCompare, 3, 1) \
- F(SmiLexicographicCompare, 2, 1) \
- F(StringCompare, 2, 1) \
- \
- /* Math */ \
- F(Math_acos, 1, 1) \
- F(Math_asin, 1, 1) \
- F(Math_atan, 1, 1) \
- F(Math_atan2, 2, 1) \
- F(Math_ceil, 1, 1) \
- F(Math_cos, 1, 1) \
- F(Math_exp, 1, 1) \
- F(Math_floor, 1, 1) \
- F(Math_log, 1, 1) \
- F(Math_pow, 2, 1) \
- F(Math_pow_cfunction, 2, 1) \
- F(RoundNumber, 1, 1) \
- F(Math_sin, 1, 1) \
- F(Math_sqrt, 1, 1) \
- F(Math_tan, 1, 1) \
- \
- /* Regular expressions */ \
- F(RegExpCompile, 3, 1) \
- F(RegExpExec, 4, 1) \
- F(RegExpExecMultiple, 4, 1) \
- F(RegExpInitializeObject, 5, 1) \
- F(RegExpConstructResult, 3, 1) \
- \
- /* JSON */ \
- F(ParseJson, 1, 1) \
- \
- /* Strings */ \
- F(StringCharCodeAt, 2, 1) \
- F(StringIndexOf, 3, 1) \
- F(StringLastIndexOf, 3, 1) \
- F(StringLocaleCompare, 2, 1) \
- F(SubString, 3, 1) \
- F(StringReplaceRegExpWithString, 4, 1) \
- F(StringMatch, 3, 1) \
- F(StringTrim, 3, 1) \
- F(StringToArray, 2, 1) \
- F(NewStringWrapper, 1, 1) \
- \
- /* Numbers */ \
- F(NumberToRadixString, 2, 1) \
- F(NumberToFixed, 2, 1) \
- F(NumberToExponential, 2, 1) \
- F(NumberToPrecision, 2, 1)
-
-#define RUNTIME_FUNCTION_LIST_ALWAYS_2(F) \
- /* Reflection */ \
- F(FunctionSetInstanceClassName, 2, 1) \
- F(FunctionSetLength, 2, 1) \
- F(FunctionSetPrototype, 2, 1) \
- F(FunctionGetName, 1, 1) \
- F(FunctionSetName, 2, 1) \
- F(FunctionRemovePrototype, 1, 1) \
- F(FunctionGetSourceCode, 1, 1) \
- F(FunctionGetScript, 1, 1) \
- F(FunctionGetScriptSourcePosition, 1, 1) \
- F(FunctionGetPositionForOffset, 2, 1) \
- F(FunctionIsAPIFunction, 1, 1) \
- F(FunctionIsBuiltin, 1, 1) \
- F(GetScript, 1, 1) \
- F(CollectStackTrace, 2, 1) \
- F(GetV8Version, 0, 1) \
- \
- F(ClassOf, 1, 1) \
- F(SetCode, 2, 1) \
- F(SetExpectedNumberOfProperties, 2, 1) \
- \
- F(CreateApiFunction, 1, 1) \
- F(IsTemplate, 1, 1) \
- F(GetTemplateField, 2, 1) \
- F(DisableAccessChecks, 1, 1) \
- F(EnableAccessChecks, 1, 1) \
- \
- /* Dates */ \
- F(DateCurrentTime, 0, 1) \
- F(DateParseString, 2, 1) \
- F(DateLocalTimezone, 1, 1) \
- F(DateLocalTimeOffset, 0, 1) \
- F(DateDaylightSavingsOffset, 1, 1) \
- F(DateMakeDay, 3, 1) \
- F(DateYMDFromTime, 2, 1) \
- \
- /* Numbers */ \
- \
- /* Globals */ \
- F(CompileString, 1, 1) \
- F(GlobalPrint, 1, 1) \
- \
- /* Eval */ \
- F(GlobalReceiver, 1, 1) \
- F(ResolvePossiblyDirectEval, 4, 2) \
- F(ResolvePossiblyDirectEvalNoLookup, 4, 2) \
- \
- F(SetProperty, -1 /* 4 or 5 */, 1) \
- F(DefineOrRedefineDataProperty, 4, 1) \
- F(DefineOrRedefineAccessorProperty, 5, 1) \
- F(IgnoreAttributesAndSetProperty, -1 /* 3 or 4 */, 1) \
- \
- /* Arrays */ \
- F(RemoveArrayHoles, 2, 1) \
- F(GetArrayKeys, 2, 1) \
- F(MoveArrayContents, 2, 1) \
- F(EstimateNumberOfElements, 1, 1) \
- F(SwapElements, 3, 1) \
- \
- /* Getters and Setters */ \
- F(DefineAccessor, -1 /* 4 or 5 */, 1) \
- F(LookupAccessor, 3, 1) \
- \
- /* Literals */ \
- F(MaterializeRegExpLiteral, 4, 1)\
- F(CreateArrayLiteralBoilerplate, 3, 1) \
- F(CloneLiteralBoilerplate, 1, 1) \
- F(CloneShallowLiteralBoilerplate, 1, 1) \
- F(CreateObjectLiteral, 4, 1) \
- F(CreateObjectLiteralShallow, 4, 1) \
- F(CreateArrayLiteral, 3, 1) \
- F(CreateArrayLiteralShallow, 3, 1) \
- \
- /* Catch context extension objects */ \
- F(CreateCatchExtensionObject, 2, 1) \
- \
- /* Statements */ \
- F(NewClosure, 3, 1) \
- F(NewObject, 1, 1) \
- F(NewObjectFromBound, 2, 1) \
- F(FinalizeInstanceSize, 1, 1) \
- F(Throw, 1, 1) \
- F(ReThrow, 1, 1) \
- F(ThrowReferenceError, 1, 1) \
- F(StackGuard, 0, 1) \
- F(PromoteScheduledException, 0, 1) \
- \
- /* Contexts */ \
- F(NewContext, 1, 1) \
- F(PushContext, 1, 1) \
- F(PushCatchContext, 1, 1) \
- F(DeleteContextSlot, 2, 1) \
- F(LoadContextSlot, 2, 2) \
- F(LoadContextSlotNoReferenceError, 2, 2) \
- F(StoreContextSlot, 4, 1) \
- \
- /* Declarations and initialization */ \
- F(DeclareGlobals, 4, 1) \
- F(DeclareContextSlot, 4, 1) \
- F(InitializeVarGlobal, -1 /* 2 or 3 */, 1) \
- F(InitializeConstGlobal, 2, 1) \
- F(InitializeConstContextSlot, 3, 1) \
- F(OptimizeObjectForAddingMultipleProperties, 2, 1) \
- \
- /* Debugging */ \
- F(DebugPrint, 1, 1) \
- F(DebugTrace, 0, 1) \
- F(TraceEnter, 0, 1) \
- F(TraceExit, 1, 1) \
- F(Abort, 2, 1) \
- /* Logging */ \
- F(Log, 2, 1) \
- /* ES5 */ \
- F(LocalKeys, 1, 1) \
- /* Cache suport */ \
- F(GetFromCache, 2, 1) \
- \
- /* Message objects */ \
- F(NewMessageObject, 2, 1) \
- F(MessageGetType, 1, 1) \
- F(MessageGetArguments, 1, 1) \
- F(MessageGetStartPosition, 1, 1) \
- F(MessageGetScript, 1, 1) \
- \
- /* Pseudo functions - handled as macros by parser */ \
- F(IS_VAR, 1, 1)
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-#define RUNTIME_FUNCTION_LIST_DEBUGGER_SUPPORT(F) \
- /* Debugger support*/ \
- F(DebugBreak, 0, 1) \
- F(SetDebugEventListener, 2, 1) \
- F(Break, 0, 1) \
- F(DebugGetPropertyDetails, 2, 1) \
- F(DebugGetProperty, 2, 1) \
- F(DebugPropertyTypeFromDetails, 1, 1) \
- F(DebugPropertyAttributesFromDetails, 1, 1) \
- F(DebugPropertyIndexFromDetails, 1, 1) \
- F(DebugNamedInterceptorPropertyValue, 2, 1) \
- F(DebugIndexedInterceptorElementValue, 2, 1) \
- F(CheckExecutionState, 1, 1) \
- F(GetFrameCount, 1, 1) \
- F(GetFrameDetails, 2, 1) \
- F(GetScopeCount, 2, 1) \
- F(GetScopeDetails, 3, 1) \
- F(DebugPrintScopes, 0, 1) \
- F(GetThreadCount, 1, 1) \
- F(GetThreadDetails, 2, 1) \
- F(SetDisableBreak, 1, 1) \
- F(GetBreakLocations, 1, 1) \
- F(SetFunctionBreakPoint, 3, 1) \
- F(SetScriptBreakPoint, 3, 1) \
- F(ClearBreakPoint, 1, 1) \
- F(ChangeBreakOnException, 2, 1) \
- F(IsBreakOnException, 1, 1) \
- F(PrepareStep, 3, 1) \
- F(ClearStepping, 0, 1) \
- F(DebugEvaluate, 5, 1) \
- F(DebugEvaluateGlobal, 4, 1) \
- F(DebugGetLoadedScripts, 0, 1) \
- F(DebugReferencedBy, 3, 1) \
- F(DebugConstructedBy, 2, 1) \
- F(DebugGetPrototype, 1, 1) \
- F(SystemBreak, 0, 1) \
- F(DebugDisassembleFunction, 1, 1) \
- F(DebugDisassembleConstructor, 1, 1) \
- F(FunctionGetInferredName, 1, 1) \
- F(LiveEditFindSharedFunctionInfosForScript, 1, 1) \
- F(LiveEditGatherCompileInfo, 2, 1) \
- F(LiveEditReplaceScript, 3, 1) \
- F(LiveEditReplaceFunctionCode, 2, 1) \
- F(LiveEditFunctionSourceUpdated, 1, 1) \
- F(LiveEditFunctionSetScript, 2, 1) \
- F(LiveEditReplaceRefToNestedFunction, 3, 1) \
- F(LiveEditPatchFunctionPositions, 2, 1) \
- F(LiveEditCheckAndDropActivations, 2, 1) \
- F(LiveEditCompareStrings, 2, 1) \
- F(GetFunctionCodePositionFromSource, 2, 1) \
- F(ExecuteInDebugContext, 2, 1) \
- \
- F(SetFlags, 1, 1) \
- F(CollectGarbage, 1, 1) \
- F(GetHeapUsage, 0, 1) \
- \
- /* LiveObjectList support*/ \
- F(HasLOLEnabled, 0, 1) \
- F(CaptureLOL, 0, 1) \
- F(DeleteLOL, 1, 1) \
- F(DumpLOL, 5, 1) \
- F(GetLOLObj, 1, 1) \
- F(GetLOLObjId, 1, 1) \
- F(GetLOLObjRetainers, 6, 1) \
- F(GetLOLPath, 3, 1) \
- F(InfoLOL, 2, 1) \
- F(PrintLOLObj, 1, 1) \
- F(ResetLOL, 0, 1) \
- F(SummarizeLOL, 3, 1)
-
-#else
-#define RUNTIME_FUNCTION_LIST_DEBUGGER_SUPPORT(F)
-#endif
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
-#define RUNTIME_FUNCTION_LIST_PROFILER_SUPPORT(F) \
- F(ProfilerResume, 2, 1) \
- F(ProfilerPause, 2, 1)
-#else
-#define RUNTIME_FUNCTION_LIST_PROFILER_SUPPORT(F)
-#endif
-
-#ifdef DEBUG
-#define RUNTIME_FUNCTION_LIST_DEBUG(F) \
- /* Testing */ \
- F(ListNatives, 0, 1)
-#else
-#define RUNTIME_FUNCTION_LIST_DEBUG(F)
-#endif
-
-// ----------------------------------------------------------------------------
-// RUNTIME_FUNCTION_LIST defines all runtime functions accessed
-// either directly by id (via the code generator), or indirectly
-// via a native call by name (from within JS code).
-
-#define RUNTIME_FUNCTION_LIST(F) \
- RUNTIME_FUNCTION_LIST_ALWAYS_1(F) \
- RUNTIME_FUNCTION_LIST_ALWAYS_2(F) \
- RUNTIME_FUNCTION_LIST_DEBUG(F) \
- RUNTIME_FUNCTION_LIST_DEBUGGER_SUPPORT(F) \
- RUNTIME_FUNCTION_LIST_PROFILER_SUPPORT(F)
-
-// ----------------------------------------------------------------------------
-// INLINE_FUNCTION_LIST defines all inlined functions accessed
-// with a native call of the form %_name from within JS code.
-// Entries have the form F(name, number of arguments, number of return values).
-#define INLINE_FUNCTION_LIST(F) \
- F(IsSmi, 1, 1) \
- F(IsNonNegativeSmi, 1, 1) \
- F(IsArray, 1, 1) \
- F(IsRegExp, 1, 1) \
- F(CallFunction, -1 /* receiver + n args + function */, 1) \
- F(ArgumentsLength, 0, 1) \
- F(Arguments, 1, 1) \
- F(ValueOf, 1, 1) \
- F(SetValueOf, 2, 1) \
- F(StringCharFromCode, 1, 1) \
- F(StringCharAt, 2, 1) \
- F(ObjectEquals, 2, 1) \
- F(RandomHeapNumber, 0, 1) \
- F(IsObject, 1, 1) \
- F(IsFunction, 1, 1) \
- F(IsUndetectableObject, 1, 1) \
- F(IsSpecObject, 1, 1) \
- F(IsStringWrapperSafeForDefaultValueOf, 1, 1) \
- F(MathPow, 2, 1) \
- F(MathSin, 1, 1) \
- F(MathCos, 1, 1) \
- F(MathSqrt, 1, 1) \
- F(MathLog, 1, 1) \
- F(IsRegExpEquivalent, 2, 1) \
- F(HasCachedArrayIndex, 1, 1) \
- F(GetCachedArrayIndex, 1, 1) \
- F(FastAsciiArrayJoin, 2, 1)
-
-
-// ----------------------------------------------------------------------------
-// INLINE_AND_RUNTIME_FUNCTION_LIST defines all inlined functions accessed
-// with a native call of the form %_name from within JS code that also have
-// a corresponding runtime function, that is called for slow cases.
-// Entries have the form F(name, number of arguments, number of return values).
-#define INLINE_RUNTIME_FUNCTION_LIST(F) \
- F(IsConstructCall, 0, 1) \
- F(ClassOf, 1, 1) \
- F(StringCharCodeAt, 2, 1) \
- F(Log, 3, 1) \
- F(StringAdd, 2, 1) \
- F(SubString, 3, 1) \
- F(StringCompare, 2, 1) \
- F(RegExpExec, 4, 1) \
- F(RegExpConstructResult, 3, 1) \
- F(GetFromCache, 2, 1) \
- F(NumberToString, 1, 1) \
- F(SwapElements, 3, 1)
-
-
-//---------------------------------------------------------------------------
-// Runtime provides access to all C++ runtime functions.
-
-class RuntimeState {
- public:
-
- StaticResource<StringInputBuffer>* string_input_buffer() {
- return &string_input_buffer_;
- }
- unibrow::Mapping<unibrow::ToUppercase, 128>* to_upper_mapping() {
- return &to_upper_mapping_;
- }
- unibrow::Mapping<unibrow::ToLowercase, 128>* to_lower_mapping() {
- return &to_lower_mapping_;
- }
- StringInputBuffer* string_input_buffer_compare_bufx() {
- return &string_input_buffer_compare_bufx_;
- }
- StringInputBuffer* string_input_buffer_compare_bufy() {
- return &string_input_buffer_compare_bufy_;
- }
- StringInputBuffer* string_locale_compare_buf1() {
- return &string_locale_compare_buf1_;
- }
- StringInputBuffer* string_locale_compare_buf2() {
- return &string_locale_compare_buf2_;
- }
- int* smi_lexicographic_compare_x_elms() {
- return smi_lexicographic_compare_x_elms_;
- }
- int* smi_lexicographic_compare_y_elms() {
- return smi_lexicographic_compare_y_elms_;
- }
-
- private:
- RuntimeState() {}
- // Non-reentrant string buffer for efficient general use in the runtime.
- StaticResource<StringInputBuffer> string_input_buffer_;
- unibrow::Mapping<unibrow::ToUppercase, 128> to_upper_mapping_;
- unibrow::Mapping<unibrow::ToLowercase, 128> to_lower_mapping_;
- StringInputBuffer string_input_buffer_compare_bufx_;
- StringInputBuffer string_input_buffer_compare_bufy_;
- StringInputBuffer string_locale_compare_buf1_;
- StringInputBuffer string_locale_compare_buf2_;
- int smi_lexicographic_compare_x_elms_[10];
- int smi_lexicographic_compare_y_elms_[10];
-
- friend class Isolate;
- friend class Runtime;
-
- DISALLOW_COPY_AND_ASSIGN(RuntimeState);
-};
-
-
-class Runtime : public AllStatic {
- public:
- enum FunctionId {
-#define F(name, nargs, ressize) k##name,
- RUNTIME_FUNCTION_LIST(F)
-#undef F
-#define F(name, nargs, ressize) kInline##name,
- INLINE_FUNCTION_LIST(F)
- INLINE_RUNTIME_FUNCTION_LIST(F)
-#undef F
- kNumFunctions,
- kFirstInlineFunction = kInlineIsSmi
- };
-
- enum IntrinsicType {
- RUNTIME,
- INLINE
- };
-
- // Intrinsic function descriptor.
- struct Function {
- FunctionId function_id;
- IntrinsicType intrinsic_type;
- // The JS name of the function.
- const char* name;
-
- // The C++ (native) entry point. NULL if the function is inlined.
- byte* entry;
-
- // The number of arguments expected. nargs is -1 if the function takes
- // a variable number of arguments.
- int nargs;
- // Size of result. Most functions return a single pointer, size 1.
- int result_size;
- };
-
- static const int kNotFound = -1;
-
- // Add symbols for all the intrinsic function names to a StringDictionary.
- // Returns failure if an allocation fails. In this case, it must be
- // retried with a new, empty StringDictionary, not with the same one.
- // Alternatively, heap initialization can be completely restarted.
- MUST_USE_RESULT static MaybeObject* InitializeIntrinsicFunctionNames(
- Heap* heap, Object* dictionary);
-
- // Get the intrinsic function with the given name, which must be a symbol.
- static const Function* FunctionForSymbol(Handle<String> name);
-
- // Get the intrinsic function with the given FunctionId.
- static const Function* FunctionForId(FunctionId id);
-
- // General-purpose helper functions for runtime system.
- static int StringMatch(Isolate* isolate,
- Handle<String> sub,
- Handle<String> pat,
- int index);
-
- static bool IsUpperCaseChar(RuntimeState* runtime_state, uint16_t ch);
-
- // TODO(1240886): The following three methods are *not* handle safe,
- // but accept handle arguments. This seems fragile.
-
- // Support getting the characters in a string using [] notation as
- // in Firefox/SpiderMonkey, Safari and Opera.
- MUST_USE_RESULT static MaybeObject* GetElementOrCharAt(Isolate* isolate,
- Handle<Object> object,
- uint32_t index);
- MUST_USE_RESULT static MaybeObject* GetElement(Handle<Object> object,
- uint32_t index);
-
- MUST_USE_RESULT static MaybeObject* SetObjectProperty(
- Isolate* isolate,
- Handle<Object> object,
- Handle<Object> key,
- Handle<Object> value,
- PropertyAttributes attr,
- StrictModeFlag strict_mode);
-
- MUST_USE_RESULT static MaybeObject* ForceSetObjectProperty(
- Isolate* isolate,
- Handle<JSObject> object,
- Handle<Object> key,
- Handle<Object> value,
- PropertyAttributes attr);
-
- MUST_USE_RESULT static MaybeObject* ForceDeleteObjectProperty(
- Isolate* isolate,
- Handle<JSObject> object,
- Handle<Object> key);
-
- MUST_USE_RESULT static MaybeObject* GetObjectProperty(
- Isolate* isolate,
- Handle<Object> object,
- Handle<Object> key);
-
- // This function is used in FunctionNameUsing* tests.
- static Object* FindSharedFunctionInfoInScript(Isolate* isolate,
- Handle<Script> script,
- int position);
-
- // Helper functions used stubs.
- static void PerformGC(Object* result);
-};
-
-} } // namespace v8::internal
-
-#endif // V8_RUNTIME_H_
diff --git a/src/3rdparty/v8/src/runtime.js b/src/3rdparty/v8/src/runtime.js
deleted file mode 100644
index 66d839b..0000000
--- a/src/3rdparty/v8/src/runtime.js
+++ /dev/null
@@ -1,643 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// This files contains runtime support implemented in JavaScript.
-
-// CAUTION: Some of the functions specified in this file are called
-// directly from compiled code. These are the functions with names in
-// ALL CAPS. The compiled code passes the first argument in 'this' and
-// it does not push the function onto the stack. This means that you
-// cannot use contexts in all these functions.
-
-
-/* -----------------------------------
- - - - C o m p a r i s o n - - -
- -----------------------------------
-*/
-
-// The following const declarations are shared with other native JS files.
-// They are all declared at this one spot to avoid const redeclaration errors.
-const $Object = global.Object;
-const $Array = global.Array;
-const $String = global.String;
-const $Number = global.Number;
-const $Function = global.Function;
-const $Boolean = global.Boolean;
-const $NaN = 0/0;
-
-
-// ECMA-262, section 11.9.1, page 55.
-function EQUALS(y) {
- if (IS_STRING(this) && IS_STRING(y)) return %StringEquals(this, y);
- var x = this;
-
- // NOTE: We use iteration instead of recursion, because it is
- // difficult to call EQUALS with the correct setting of 'this' in
- // an efficient way.
- while (true) {
- if (IS_NUMBER(x)) {
- if (y == null) return 1; // not equal
- return %NumberEquals(x, %ToNumber(y));
- } else if (IS_STRING(x)) {
- if (IS_STRING(y)) return %StringEquals(x, y);
- if (IS_NUMBER(y)) return %NumberEquals(%ToNumber(x), y);
- if (IS_BOOLEAN(y)) return %NumberEquals(%ToNumber(x), %ToNumber(y));
- if (y == null) return 1; // not equal
- y = %ToPrimitive(y, NO_HINT);
- } else if (IS_BOOLEAN(x)) {
- if (IS_BOOLEAN(y)) {
- return %_ObjectEquals(x, y) ? 0 : 1;
- }
- if (y == null) return 1; // not equal
- return %NumberEquals(%ToNumber(x), %ToNumber(y));
- } else if (x == null) {
- // NOTE: This checks for both null and undefined.
- return (y == null) ? 0 : 1;
- } else {
- // x is not a number, boolean, null or undefined.
- if (y == null) return 1; // not equal
- if (IS_SPEC_OBJECT(y)) {
- return %_ObjectEquals(x, y) ? 0 : 1;
- }
-
- x = %ToPrimitive(x, NO_HINT);
- }
- }
-}
-
-// ECMA-262, section 11.9.4, page 56.
-function STRICT_EQUALS(x) {
- if (IS_STRING(this)) {
- if (!IS_STRING(x)) return 1; // not equal
- return %StringEquals(this, x);
- }
-
- if (IS_NUMBER(this)) {
- if (!IS_NUMBER(x)) return 1; // not equal
- return %NumberEquals(this, x);
- }
-
- // If anything else gets here, we just do simple identity check.
- // Objects (including functions), null, undefined and booleans were
- // checked in the CompareStub, so there should be nothing left.
- return %_ObjectEquals(this, x) ? 0 : 1;
-}
-
-
-// ECMA-262, section 11.8.5, page 53. The 'ncr' parameter is used as
-// the result when either (or both) the operands are NaN.
-function COMPARE(x, ncr) {
- var left;
- var right;
- // Fast cases for string, numbers and undefined compares.
- if (IS_STRING(this)) {
- if (IS_STRING(x)) return %_StringCompare(this, x);
- if (IS_UNDEFINED(x)) return ncr;
- left = this;
- } else if (IS_NUMBER(this)) {
- if (IS_NUMBER(x)) return %NumberCompare(this, x, ncr);
- if (IS_UNDEFINED(x)) return ncr;
- left = this;
- } else if (IS_UNDEFINED(this)) {
- if (!IS_UNDEFINED(x)) {
- %ToPrimitive(x, NUMBER_HINT);
- }
- return ncr;
- } else if (IS_UNDEFINED(x)) {
- %ToPrimitive(this, NUMBER_HINT);
- return ncr;
- } else {
- left = %ToPrimitive(this, NUMBER_HINT);
- }
-
- right = %ToPrimitive(x, NUMBER_HINT);
- if (IS_STRING(left) && IS_STRING(right)) {
- return %_StringCompare(left, right);
- } else {
- var left_number = %ToNumber(left);
- var right_number = %ToNumber(right);
- if (NUMBER_IS_NAN(left_number) || NUMBER_IS_NAN(right_number)) return ncr;
- return %NumberCompare(left_number, right_number, ncr);
- }
-}
-
-
-
-/* -----------------------------------
- - - - A r i t h m e t i c - - -
- -----------------------------------
-*/
-
-// ECMA-262, section 11.6.1, page 50.
-function ADD(x) {
- // Fast case: Check for number operands and do the addition.
- if (IS_NUMBER(this) && IS_NUMBER(x)) return %NumberAdd(this, x);
- if (IS_STRING(this) && IS_STRING(x)) return %_StringAdd(this, x);
-
- // Default implementation.
- var a = %ToPrimitive(this, NO_HINT);
- var b = %ToPrimitive(x, NO_HINT);
-
- if (IS_STRING(a)) {
- return %_StringAdd(a, %ToString(b));
- } else if (IS_STRING(b)) {
- return %_StringAdd(%NonStringToString(a), b);
- } else {
- return %NumberAdd(%ToNumber(a), %ToNumber(b));
- }
-}
-
-
-// Left operand (this) is already a string.
-function STRING_ADD_LEFT(y) {
- if (!IS_STRING(y)) {
- if (IS_STRING_WRAPPER(y) && %_IsStringWrapperSafeForDefaultValueOf(y)) {
- y = %_ValueOf(y);
- } else {
- y = IS_NUMBER(y)
- ? %_NumberToString(y)
- : %ToString(%ToPrimitive(y, NO_HINT));
- }
- }
- return %_StringAdd(this, y);
-}
-
-
-// Right operand (y) is already a string.
-function STRING_ADD_RIGHT(y) {
- var x = this;
- if (!IS_STRING(x)) {
- if (IS_STRING_WRAPPER(x) && %_IsStringWrapperSafeForDefaultValueOf(x)) {
- x = %_ValueOf(x);
- } else {
- x = IS_NUMBER(x)
- ? %_NumberToString(x)
- : %ToString(%ToPrimitive(x, NO_HINT));
- }
- }
- return %_StringAdd(x, y);
-}
-
-
-// ECMA-262, section 11.6.2, page 50.
-function SUB(y) {
- var x = IS_NUMBER(this) ? this : %NonNumberToNumber(this);
- if (!IS_NUMBER(y)) y = %NonNumberToNumber(y);
- return %NumberSub(x, y);
-}
-
-
-// ECMA-262, section 11.5.1, page 48.
-function MUL(y) {
- var x = IS_NUMBER(this) ? this : %NonNumberToNumber(this);
- if (!IS_NUMBER(y)) y = %NonNumberToNumber(y);
- return %NumberMul(x, y);
-}
-
-
-// ECMA-262, section 11.5.2, page 49.
-function DIV(y) {
- var x = IS_NUMBER(this) ? this : %NonNumberToNumber(this);
- if (!IS_NUMBER(y)) y = %NonNumberToNumber(y);
- return %NumberDiv(x, y);
-}
-
-
-// ECMA-262, section 11.5.3, page 49.
-function MOD(y) {
- var x = IS_NUMBER(this) ? this : %NonNumberToNumber(this);
- if (!IS_NUMBER(y)) y = %NonNumberToNumber(y);
- return %NumberMod(x, y);
-}
-
-
-
-/* -------------------------------------------
- - - - B i t o p e r a t i o n s - - -
- -------------------------------------------
-*/
-
-// ECMA-262, section 11.10, page 57.
-function BIT_OR(y) {
- var x = IS_NUMBER(this) ? this : %NonNumberToNumber(this);
- if (!IS_NUMBER(y)) y = %NonNumberToNumber(y);
- return %NumberOr(x, y);
-}
-
-
-// ECMA-262, section 11.10, page 57.
-function BIT_AND(y) {
- var x;
- if (IS_NUMBER(this)) {
- x = this;
- if (!IS_NUMBER(y)) y = %NonNumberToNumber(y);
- } else {
- x = %NonNumberToNumber(this);
- // Make sure to convert the right operand to a number before
- // bailing out in the fast case, but after converting the
- // left operand. This ensures that valueOf methods on the right
- // operand are always executed.
- if (!IS_NUMBER(y)) y = %NonNumberToNumber(y);
- // Optimize for the case where we end up AND'ing a value
- // that doesn't convert to a number. This is common in
- // certain benchmarks.
- if (NUMBER_IS_NAN(x)) return 0;
- }
- return %NumberAnd(x, y);
-}
-
-
-// ECMA-262, section 11.10, page 57.
-function BIT_XOR(y) {
- var x = IS_NUMBER(this) ? this : %NonNumberToNumber(this);
- if (!IS_NUMBER(y)) y = %NonNumberToNumber(y);
- return %NumberXor(x, y);
-}
-
-
-// ECMA-262, section 11.4.7, page 47.
-function UNARY_MINUS() {
- var x = IS_NUMBER(this) ? this : %NonNumberToNumber(this);
- return %NumberUnaryMinus(x);
-}
-
-
-// ECMA-262, section 11.4.8, page 48.
-function BIT_NOT() {
- var x = IS_NUMBER(this) ? this : %NonNumberToNumber(this);
- return %NumberNot(x);
-}
-
-
-// ECMA-262, section 11.7.1, page 51.
-function SHL(y) {
- var x = IS_NUMBER(this) ? this : %NonNumberToNumber(this);
- if (!IS_NUMBER(y)) y = %NonNumberToNumber(y);
- return %NumberShl(x, y);
-}
-
-
-// ECMA-262, section 11.7.2, page 51.
-function SAR(y) {
- var x;
- if (IS_NUMBER(this)) {
- x = this;
- if (!IS_NUMBER(y)) y = %NonNumberToNumber(y);
- } else {
- x = %NonNumberToNumber(this);
- // Make sure to convert the right operand to a number before
- // bailing out in the fast case, but after converting the
- // left operand. This ensures that valueOf methods on the right
- // operand are always executed.
- if (!IS_NUMBER(y)) y = %NonNumberToNumber(y);
- // Optimize for the case where we end up shifting a value
- // that doesn't convert to a number. This is common in
- // certain benchmarks.
- if (NUMBER_IS_NAN(x)) return 0;
- }
- return %NumberSar(x, y);
-}
-
-
-// ECMA-262, section 11.7.3, page 52.
-function SHR(y) {
- var x = IS_NUMBER(this) ? this : %NonNumberToNumber(this);
- if (!IS_NUMBER(y)) y = %NonNumberToNumber(y);
- return %NumberShr(x, y);
-}
-
-
-
-/* -----------------------------
- - - - H e l p e r s - - -
- -----------------------------
-*/
-
-// ECMA-262, section 11.4.1, page 46.
-function DELETE(key, strict) {
- return %DeleteProperty(%ToObject(this), %ToString(key), strict);
-}
-
-
-// ECMA-262, section 11.8.7, page 54.
-function IN(x) {
- if (!IS_SPEC_OBJECT(x)) {
- throw %MakeTypeError('invalid_in_operator_use', [this, x]);
- }
- return %_IsNonNegativeSmi(this) ? %HasElement(x, this) : %HasProperty(x, %ToString(this));
-}
-
-
-// ECMA-262, section 11.8.6, page 54. To make the implementation more
-// efficient, the return value should be zero if the 'this' is an
-// instance of F, and non-zero if not. This makes it possible to avoid
-// an expensive ToBoolean conversion in the generated code.
-function INSTANCE_OF(F) {
- var V = this;
- if (!IS_FUNCTION(F)) {
- throw %MakeTypeError('instanceof_function_expected', [V]);
- }
-
- // If V is not an object, return false.
- if (!IS_SPEC_OBJECT(V)) {
- return 1;
- }
-
- // Get the prototype of F; if it is not an object, throw an error.
- var O = F.prototype;
- if (!IS_SPEC_OBJECT(O)) {
- throw %MakeTypeError('instanceof_nonobject_proto', [O]);
- }
-
- // Return whether or not O is in the prototype chain of V.
- return %IsInPrototypeChain(O, V) ? 0 : 1;
-}
-
-
-// Get an array of property keys for the given object. Used in
-// for-in statements.
-function GET_KEYS() {
- return %GetPropertyNames(this);
-}
-
-
-// Filter a given key against an object by checking if the object
-// has a property with the given key; return the key as a string if
-// it has. Otherwise returns 0 (smi). Used in for-in statements.
-function FILTER_KEY(key) {
- var string = %ToString(key);
- if (%HasProperty(this, string)) return string;
- return 0;
-}
-
-
-function CALL_NON_FUNCTION() {
- var delegate = %GetFunctionDelegate(this);
- if (!IS_FUNCTION(delegate)) {
- throw %MakeTypeError('called_non_callable', [typeof this]);
- }
- return delegate.apply(this, arguments);
-}
-
-
-function CALL_NON_FUNCTION_AS_CONSTRUCTOR() {
- var delegate = %GetConstructorDelegate(this);
- if (!IS_FUNCTION(delegate)) {
- throw %MakeTypeError('called_non_callable', [typeof this]);
- }
- return delegate.apply(this, arguments);
-}
-
-
-function APPLY_PREPARE(args) {
- var length;
- // First check whether length is a positive Smi and args is an
- // array. This is the fast case. If this fails, we do the slow case
- // that takes care of more eventualities.
- if (IS_ARRAY(args)) {
- length = args.length;
- if (%_IsSmi(length) && length >= 0 && length < 0x800000 && IS_FUNCTION(this)) {
- return length;
- }
- }
-
- length = (args == null) ? 0 : %ToUint32(args.length);
-
- // We can handle any number of apply arguments if the stack is
- // big enough, but sanity check the value to avoid overflow when
- // multiplying with pointer size.
- if (length > 0x800000) {
- throw %MakeRangeError('stack_overflow', []);
- }
-
- if (!IS_FUNCTION(this)) {
- throw %MakeTypeError('apply_non_function', [ %ToString(this), typeof this ]);
- }
-
- // Make sure the arguments list has the right type.
- if (args != null && !IS_ARRAY(args) && !IS_ARGUMENTS(args)) {
- throw %MakeTypeError('apply_wrong_args', []);
- }
-
- // Return the length which is the number of arguments to copy to the
- // stack. It is guaranteed to be a small integer at this point.
- return length;
-}
-
-
-function APPLY_OVERFLOW(length) {
- throw %MakeRangeError('stack_overflow', []);
-}
-
-
-// Convert the receiver to an object - forward to ToObject.
-function TO_OBJECT() {
- return %ToObject(this);
-}
-
-
-// Convert the receiver to a number - forward to ToNumber.
-function TO_NUMBER() {
- return %ToNumber(this);
-}
-
-
-// Convert the receiver to a string - forward to ToString.
-function TO_STRING() {
- return %ToString(this);
-}
-
-
-/* -------------------------------------
- - - - C o n v e r s i o n s - - -
- -------------------------------------
-*/
-
-// ECMA-262, section 9.1, page 30. Use null/undefined for no hint,
-// (1) for number hint, and (2) for string hint.
-function ToPrimitive(x, hint) {
- // Fast case check.
- if (IS_STRING(x)) return x;
- // Normal behavior.
- if (!IS_SPEC_OBJECT(x)) return x;
- if (hint == NO_HINT) hint = (IS_DATE(x)) ? STRING_HINT : NUMBER_HINT;
- return (hint == NUMBER_HINT) ? %DefaultNumber(x) : %DefaultString(x);
-}
-
-
-// ECMA-262, section 9.2, page 30
-function ToBoolean(x) {
- if (IS_BOOLEAN(x)) return x;
- if (IS_STRING(x)) return x.length != 0;
- if (x == null) return false;
- if (IS_NUMBER(x)) return !((x == 0) || NUMBER_IS_NAN(x));
- return true;
-}
-
-
-// ECMA-262, section 9.3, page 31.
-function ToNumber(x) {
- if (IS_NUMBER(x)) return x;
- if (IS_STRING(x)) {
- return %_HasCachedArrayIndex(x) ? %_GetCachedArrayIndex(x)
- : %StringToNumber(x);
- }
- if (IS_BOOLEAN(x)) return x ? 1 : 0;
- if (IS_UNDEFINED(x)) return $NaN;
- return (IS_NULL(x)) ? 0 : ToNumber(%DefaultNumber(x));
-}
-
-function NonNumberToNumber(x) {
- if (IS_STRING(x)) {
- return %_HasCachedArrayIndex(x) ? %_GetCachedArrayIndex(x)
- : %StringToNumber(x);
- }
- if (IS_BOOLEAN(x)) return x ? 1 : 0;
- if (IS_UNDEFINED(x)) return $NaN;
- return (IS_NULL(x)) ? 0 : ToNumber(%DefaultNumber(x));
-}
-
-
-// ECMA-262, section 9.8, page 35.
-function ToString(x) {
- if (IS_STRING(x)) return x;
- if (IS_NUMBER(x)) return %_NumberToString(x);
- if (IS_BOOLEAN(x)) return x ? 'true' : 'false';
- if (IS_UNDEFINED(x)) return 'undefined';
- return (IS_NULL(x)) ? 'null' : %ToString(%DefaultString(x));
-}
-
-function NonStringToString(x) {
- if (IS_NUMBER(x)) return %_NumberToString(x);
- if (IS_BOOLEAN(x)) return x ? 'true' : 'false';
- if (IS_UNDEFINED(x)) return 'undefined';
- return (IS_NULL(x)) ? 'null' : %ToString(%DefaultString(x));
-}
-
-
-// ECMA-262, section 9.9, page 36.
-function ToObject(x) {
- if (IS_STRING(x)) return new $String(x);
- if (IS_NUMBER(x)) return new $Number(x);
- if (IS_BOOLEAN(x)) return new $Boolean(x);
- if (IS_NULL_OR_UNDEFINED(x) && !IS_UNDETECTABLE(x)) {
- throw %MakeTypeError('null_to_object', []);
- }
- return x;
-}
-
-
-// ECMA-262, section 9.4, page 34.
-function ToInteger(x) {
- if (%_IsSmi(x)) return x;
- return %NumberToInteger(ToNumber(x));
-}
-
-
-// ECMA-262, section 9.6, page 34.
-function ToUint32(x) {
- if (%_IsSmi(x) && x >= 0) return x;
- return %NumberToJSUint32(ToNumber(x));
-}
-
-
-// ECMA-262, section 9.5, page 34
-function ToInt32(x) {
- if (%_IsSmi(x)) return x;
- return %NumberToJSInt32(ToNumber(x));
-}
-
-
-// ES5, section 9.12
-function SameValue(x, y) {
- if (typeof x != typeof y) return false;
- if (IS_NUMBER(x)) {
- if (NUMBER_IS_NAN(x) && NUMBER_IS_NAN(y)) return true;
- // x is +0 and y is -0 or vice versa.
- if (x === 0 && y === 0 && (1 / x) != (1 / y)) return false;
- }
- return x === y;
-}
-
-
-/* ---------------------------------
- - - - U t i l i t i e s - - -
- ---------------------------------
-*/
-
-// Returns if the given x is a primitive value - not an object or a
-// function.
-function IsPrimitive(x) {
- // Even though the type of null is "object", null is still
- // considered a primitive value. IS_SPEC_OBJECT handles this correctly
- // (i.e., it will return false if x is null).
- return !IS_SPEC_OBJECT(x);
-}
-
-
-// ECMA-262, section 8.6.2.6, page 28.
-function DefaultNumber(x) {
- var valueOf = x.valueOf;
- if (IS_FUNCTION(valueOf)) {
- var v = %_CallFunction(x, valueOf);
- if (%IsPrimitive(v)) return v;
- }
-
- var toString = x.toString;
- if (IS_FUNCTION(toString)) {
- var s = %_CallFunction(x, toString);
- if (%IsPrimitive(s)) return s;
- }
-
- throw %MakeTypeError('cannot_convert_to_primitive', []);
-}
-
-
-// ECMA-262, section 8.6.2.6, page 28.
-function DefaultString(x) {
- var toString = x.toString;
- if (IS_FUNCTION(toString)) {
- var s = %_CallFunction(x, toString);
- if (%IsPrimitive(s)) return s;
- }
-
- var valueOf = x.valueOf;
- if (IS_FUNCTION(valueOf)) {
- var v = %_CallFunction(x, valueOf);
- if (%IsPrimitive(v)) return v;
- }
-
- throw %MakeTypeError('cannot_convert_to_primitive', []);
-}
-
-
-// NOTE: Setting the prototype for Array must take place as early as
-// possible due to code generation for array literals. When
-// generating code for a array literal a boilerplate array is created
-// that is cloned when running the code. It is essiential that the
-// boilerplate gets the right prototype.
-%FunctionSetPrototype($Array, new $Array(0));
diff --git a/src/3rdparty/v8/src/safepoint-table.cc b/src/3rdparty/v8/src/safepoint-table.cc
deleted file mode 100644
index 28cf6e6..0000000
--- a/src/3rdparty/v8/src/safepoint-table.cc
+++ /dev/null
@@ -1,256 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "safepoint-table.h"
-
-#include "deoptimizer.h"
-#include "disasm.h"
-#include "macro-assembler.h"
-#include "zone-inl.h"
-
-namespace v8 {
-namespace internal {
-
-
-bool SafepointEntry::HasRegisters() const {
- ASSERT(is_valid());
- ASSERT(IsAligned(kNumSafepointRegisters, kBitsPerByte));
- const int num_reg_bytes = kNumSafepointRegisters >> kBitsPerByteLog2;
- for (int i = 0; i < num_reg_bytes; i++) {
- if (bits_[i] != SafepointTable::kNoRegisters) return true;
- }
- return false;
-}
-
-
-bool SafepointEntry::HasRegisterAt(int reg_index) const {
- ASSERT(is_valid());
- ASSERT(reg_index >= 0 && reg_index < kNumSafepointRegisters);
- int byte_index = reg_index >> kBitsPerByteLog2;
- int bit_index = reg_index & (kBitsPerByte - 1);
- return (bits_[byte_index] & (1 << bit_index)) != 0;
-}
-
-
-SafepointTable::SafepointTable(Code* code) {
- ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
- code_ = code;
- Address header = code->instruction_start() + code->safepoint_table_offset();
- length_ = Memory::uint32_at(header + kLengthOffset);
- entry_size_ = Memory::uint32_at(header + kEntrySizeOffset);
- pc_and_deoptimization_indexes_ = header + kHeaderSize;
- entries_ = pc_and_deoptimization_indexes_ +
- (length_ * kPcAndDeoptimizationIndexSize);
- ASSERT(entry_size_ > 0);
- ASSERT_EQ(SafepointEntry::DeoptimizationIndexField::max(),
- Safepoint::kNoDeoptimizationIndex);
-}
-
-
-SafepointEntry SafepointTable::FindEntry(Address pc) const {
- unsigned pc_offset = static_cast<unsigned>(pc - code_->instruction_start());
- for (unsigned i = 0; i < length(); i++) {
- // TODO(kasperl): Replace the linear search with binary search.
- if (GetPcOffset(i) == pc_offset) return GetEntry(i);
- }
- return SafepointEntry();
-}
-
-
-void SafepointTable::PrintEntry(unsigned index) const {
- disasm::NameConverter converter;
- SafepointEntry entry = GetEntry(index);
- uint8_t* bits = entry.bits();
-
- // Print the stack slot bits.
- if (entry_size_ > 0) {
- ASSERT(IsAligned(kNumSafepointRegisters, kBitsPerByte));
- const int first = kNumSafepointRegisters >> kBitsPerByteLog2;
- int last = entry_size_ - 1;
- for (int i = first; i < last; i++) PrintBits(bits[i], kBitsPerByte);
- int last_bits = code_->stack_slots() - ((last - first) * kBitsPerByte);
- PrintBits(bits[last], last_bits);
-
- // Print the registers (if any).
- if (!entry.HasRegisters()) return;
- for (int j = 0; j < kNumSafepointRegisters; j++) {
- if (entry.HasRegisterAt(j)) {
- PrintF(" | %s", converter.NameOfCPURegister(j));
- }
- }
- }
-}
-
-
-void SafepointTable::PrintBits(uint8_t byte, int digits) {
- ASSERT(digits >= 0 && digits <= kBitsPerByte);
- for (int i = 0; i < digits; i++) {
- PrintF("%c", ((byte & (1 << i)) == 0) ? '0' : '1');
- }
-}
-
-
-void Safepoint::DefinePointerRegister(Register reg) {
- registers_->Add(reg.code());
-}
-
-
-Safepoint SafepointTableBuilder::DefineSafepoint(
- Assembler* assembler, Safepoint::Kind kind, int arguments,
- int deoptimization_index) {
- ASSERT(deoptimization_index != -1);
- ASSERT(arguments >= 0);
- DeoptimizationInfo pc_and_deoptimization_index;
- pc_and_deoptimization_index.pc = assembler->pc_offset();
- pc_and_deoptimization_index.deoptimization_index = deoptimization_index;
- pc_and_deoptimization_index.pc_after_gap = assembler->pc_offset();
- pc_and_deoptimization_index.arguments = arguments;
- pc_and_deoptimization_index.has_doubles = (kind & Safepoint::kWithDoubles);
- deoptimization_info_.Add(pc_and_deoptimization_index);
- indexes_.Add(new ZoneList<int>(8));
- registers_.Add((kind & Safepoint::kWithRegisters)
- ? new ZoneList<int>(4)
- : NULL);
- return Safepoint(indexes_.last(), registers_.last());
-}
-
-
-unsigned SafepointTableBuilder::GetCodeOffset() const {
- ASSERT(emitted_);
- return offset_;
-}
-
-
-void SafepointTableBuilder::Emit(Assembler* assembler, int bits_per_entry) {
- // For lazy deoptimization we need space to patch a call after every call.
- // Ensure there is always space for such patching, even if the code ends
- // in a call.
- int target_offset = assembler->pc_offset() + Deoptimizer::patch_size();
- while (assembler->pc_offset() < target_offset) {
- assembler->nop();
- }
-
- // Make sure the safepoint table is properly aligned. Pad with nops.
- assembler->Align(kIntSize);
- assembler->RecordComment(";;; Safepoint table.");
- offset_ = assembler->pc_offset();
-
- // Take the register bits into account.
- bits_per_entry += kNumSafepointRegisters;
-
- // Compute the number of bytes per safepoint entry.
- int bytes_per_entry =
- RoundUp(bits_per_entry, kBitsPerByte) >> kBitsPerByteLog2;
-
- // Emit the table header.
- int length = deoptimization_info_.length();
- assembler->dd(length);
- assembler->dd(bytes_per_entry);
-
- // Emit sorted table of pc offsets together with deoptimization indexes and
- // pc after gap information.
- for (int i = 0; i < length; i++) {
- assembler->dd(deoptimization_info_[i].pc);
- assembler->dd(EncodeExceptPC(deoptimization_info_[i]));
- }
-
- // Emit table of bitmaps.
- ZoneList<uint8_t> bits(bytes_per_entry);
- for (int i = 0; i < length; i++) {
- ZoneList<int>* indexes = indexes_[i];
- ZoneList<int>* registers = registers_[i];
- bits.Clear();
- bits.AddBlock(0, bytes_per_entry);
-
- // Run through the registers (if any).
- ASSERT(IsAligned(kNumSafepointRegisters, kBitsPerByte));
- if (registers == NULL) {
- const int num_reg_bytes = kNumSafepointRegisters >> kBitsPerByteLog2;
- for (int j = 0; j < num_reg_bytes; j++) {
- bits[j] = SafepointTable::kNoRegisters;
- }
- } else {
- for (int j = 0; j < registers->length(); j++) {
- int index = registers->at(j);
- ASSERT(index >= 0 && index < kNumSafepointRegisters);
- int byte_index = index >> kBitsPerByteLog2;
- int bit_index = index & (kBitsPerByte - 1);
- bits[byte_index] |= (1 << bit_index);
- }
- }
-
- // Run through the indexes and build a bitmap.
- for (int j = 0; j < indexes->length(); j++) {
- int index = bits_per_entry - 1 - indexes->at(j);
- int byte_index = index >> kBitsPerByteLog2;
- int bit_index = index & (kBitsPerByte - 1);
- bits[byte_index] |= (1U << bit_index);
- }
-
- // Emit the bitmap for the current entry.
- for (int k = 0; k < bytes_per_entry; k++) {
- assembler->db(bits[k]);
- }
- }
- emitted_ = true;
-}
-
-
-uint32_t SafepointTableBuilder::EncodeExceptPC(const DeoptimizationInfo& info) {
- unsigned index = info.deoptimization_index;
- unsigned gap_size = info.pc_after_gap - info.pc;
- uint32_t encoding = SafepointEntry::DeoptimizationIndexField::encode(index);
- encoding |= SafepointEntry::GapCodeSizeField::encode(gap_size);
- encoding |= SafepointEntry::ArgumentsField::encode(info.arguments);
- encoding |= SafepointEntry::SaveDoublesField::encode(info.has_doubles);
- return encoding;
-}
-
-
-int SafepointTableBuilder::CountShortDeoptimizationIntervals(unsigned limit) {
- int result = 0;
- if (!deoptimization_info_.is_empty()) {
- unsigned previous_gap_end = deoptimization_info_[0].pc_after_gap;
- for (int i = 1, n = deoptimization_info_.length(); i < n; i++) {
- DeoptimizationInfo info = deoptimization_info_[i];
- if (static_cast<int>(info.deoptimization_index) !=
- Safepoint::kNoDeoptimizationIndex) {
- if (previous_gap_end + limit > info.pc) {
- result++;
- }
- previous_gap_end = info.pc_after_gap;
- }
- }
- }
- return result;
-}
-
-
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/safepoint-table.h b/src/3rdparty/v8/src/safepoint-table.h
deleted file mode 100644
index 084a0b4..0000000
--- a/src/3rdparty/v8/src/safepoint-table.h
+++ /dev/null
@@ -1,269 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_SAFEPOINT_TABLE_H_
-#define V8_SAFEPOINT_TABLE_H_
-
-#include "heap.h"
-#include "v8memory.h"
-#include "zone.h"
-
-namespace v8 {
-namespace internal {
-
-struct Register;
-
-class SafepointEntry BASE_EMBEDDED {
- public:
- SafepointEntry() : info_(0), bits_(NULL) {}
-
- SafepointEntry(unsigned info, uint8_t* bits) : info_(info), bits_(bits) {
- ASSERT(is_valid());
- }
-
- bool is_valid() const { return bits_ != NULL; }
-
- bool Equals(const SafepointEntry& other) const {
- return info_ == other.info_ && bits_ == other.bits_;
- }
-
- void Reset() {
- info_ = 0;
- bits_ = NULL;
- }
-
- int deoptimization_index() const {
- ASSERT(is_valid());
- return DeoptimizationIndexField::decode(info_);
- }
-
- int gap_code_size() const {
- ASSERT(is_valid());
- return GapCodeSizeField::decode(info_);
- }
-
- int argument_count() const {
- ASSERT(is_valid());
- return ArgumentsField::decode(info_);
- }
-
- bool has_doubles() const {
- ASSERT(is_valid());
- return SaveDoublesField::decode(info_);
- }
-
- uint8_t* bits() {
- ASSERT(is_valid());
- return bits_;
- }
-
- bool HasRegisters() const;
- bool HasRegisterAt(int reg_index) const;
-
- // Reserve 13 bits for the gap code size. On ARM a constant pool can be
- // emitted when generating the gap code. The size of the const pool is less
- // than what can be represented in 12 bits, so 13 bits gives room for having
- // instructions before potentially emitting a constant pool.
- static const int kGapCodeSizeBits = 13;
- static const int kArgumentsFieldBits = 3;
- static const int kSaveDoublesFieldBits = 1;
- static const int kDeoptIndexBits =
- 32 - kGapCodeSizeBits - kArgumentsFieldBits - kSaveDoublesFieldBits;
- class GapCodeSizeField: public BitField<unsigned, 0, kGapCodeSizeBits> {};
- class DeoptimizationIndexField: public BitField<int,
- kGapCodeSizeBits,
- kDeoptIndexBits> {}; // NOLINT
- class ArgumentsField: public BitField<unsigned,
- kGapCodeSizeBits + kDeoptIndexBits,
- kArgumentsFieldBits> {}; // NOLINT
- class SaveDoublesField: public BitField<bool,
- kGapCodeSizeBits + kDeoptIndexBits +
- kArgumentsFieldBits,
- kSaveDoublesFieldBits> { }; // NOLINT
-
- private:
- unsigned info_;
- uint8_t* bits_;
-};
-
-
-class SafepointTable BASE_EMBEDDED {
- public:
- explicit SafepointTable(Code* code);
-
- int size() const {
- return kHeaderSize +
- (length_ * (kPcAndDeoptimizationIndexSize + entry_size_)); }
- unsigned length() const { return length_; }
- unsigned entry_size() const { return entry_size_; }
-
- unsigned GetPcOffset(unsigned index) const {
- ASSERT(index < length_);
- return Memory::uint32_at(GetPcOffsetLocation(index));
- }
-
- SafepointEntry GetEntry(unsigned index) const {
- ASSERT(index < length_);
- unsigned info = Memory::uint32_at(GetInfoLocation(index));
- uint8_t* bits = &Memory::uint8_at(entries_ + (index * entry_size_));
- return SafepointEntry(info, bits);
- }
-
- // Returns the entry for the given pc.
- SafepointEntry FindEntry(Address pc) const;
-
- void PrintEntry(unsigned index) const;
-
- private:
- static const uint8_t kNoRegisters = 0xFF;
-
- static const int kLengthOffset = 0;
- static const int kEntrySizeOffset = kLengthOffset + kIntSize;
- static const int kHeaderSize = kEntrySizeOffset + kIntSize;
-
- static const int kPcSize = kIntSize;
- static const int kDeoptimizationIndexSize = kIntSize;
- static const int kPcAndDeoptimizationIndexSize =
- kPcSize + kDeoptimizationIndexSize;
-
- Address GetPcOffsetLocation(unsigned index) const {
- return pc_and_deoptimization_indexes_ +
- (index * kPcAndDeoptimizationIndexSize);
- }
-
- Address GetInfoLocation(unsigned index) const {
- return GetPcOffsetLocation(index) + kPcSize;
- }
-
- static void PrintBits(uint8_t byte, int digits);
-
- AssertNoAllocation no_allocation_;
- Code* code_;
- unsigned length_;
- unsigned entry_size_;
-
- Address pc_and_deoptimization_indexes_;
- Address entries_;
-
- friend class SafepointTableBuilder;
- friend class SafepointEntry;
-
- DISALLOW_COPY_AND_ASSIGN(SafepointTable);
-};
-
-
-class Safepoint BASE_EMBEDDED {
- public:
- typedef enum {
- kSimple = 0,
- kWithRegisters = 1 << 0,
- kWithDoubles = 1 << 1,
- kWithRegistersAndDoubles = kWithRegisters | kWithDoubles
- } Kind;
-
- static const int kNoDeoptimizationIndex =
- (1 << (SafepointEntry::kDeoptIndexBits)) - 1;
-
- void DefinePointerSlot(int index) { indexes_->Add(index); }
- void DefinePointerRegister(Register reg);
-
- private:
- Safepoint(ZoneList<int>* indexes, ZoneList<int>* registers) :
- indexes_(indexes), registers_(registers) { }
- ZoneList<int>* indexes_;
- ZoneList<int>* registers_;
-
- friend class SafepointTableBuilder;
-};
-
-
-class SafepointTableBuilder BASE_EMBEDDED {
- public:
- SafepointTableBuilder()
- : deoptimization_info_(32),
- indexes_(32),
- registers_(32),
- emitted_(false) { }
-
- // Get the offset of the emitted safepoint table in the code.
- unsigned GetCodeOffset() const;
-
- // Define a new safepoint for the current position in the body.
- Safepoint DefineSafepoint(Assembler* assembler,
- Safepoint::Kind kind,
- int arguments,
- int deoptimization_index);
-
- // Update the last safepoint with the size of the code generated until the
- // end of the gap following it.
- void SetPcAfterGap(int pc) {
- ASSERT(!deoptimization_info_.is_empty());
- int index = deoptimization_info_.length() - 1;
- deoptimization_info_[index].pc_after_gap = pc;
- }
-
- // Get the end pc offset of the last safepoint, including the code generated
- // until the end of the gap following it.
- unsigned GetPcAfterGap() {
- int index = deoptimization_info_.length();
- if (index == 0) return 0;
- return deoptimization_info_[index - 1].pc_after_gap;
- }
-
- // Emit the safepoint table after the body. The number of bits per
- // entry must be enough to hold all the pointer indexes.
- void Emit(Assembler* assembler, int bits_per_entry);
-
- // Count the number of deoptimization points where the next
- // following deoptimization point comes less than limit bytes
- // after the end of this point's gap.
- int CountShortDeoptimizationIntervals(unsigned limit);
-
- private:
- struct DeoptimizationInfo {
- unsigned pc;
- unsigned deoptimization_index;
- unsigned pc_after_gap;
- unsigned arguments;
- bool has_doubles;
- };
-
- uint32_t EncodeExceptPC(const DeoptimizationInfo& info);
-
- ZoneList<DeoptimizationInfo> deoptimization_info_;
- ZoneList<ZoneList<int>*> indexes_;
- ZoneList<ZoneList<int>*> registers_;
-
- unsigned offset_;
- bool emitted_;
-
- DISALLOW_COPY_AND_ASSIGN(SafepointTableBuilder);
-};
-
-} } // namespace v8::internal
-
-#endif // V8_SAFEPOINT_TABLE_H_
diff --git a/src/3rdparty/v8/src/scanner-base.cc b/src/3rdparty/v8/src/scanner-base.cc
deleted file mode 100644
index 2066b5a..0000000
--- a/src/3rdparty/v8/src/scanner-base.cc
+++ /dev/null
@@ -1,964 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Features shared by parsing and pre-parsing scanners.
-
-#include "../include/v8stdint.h"
-#include "scanner-base.h"
-#include "char-predicates-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// ----------------------------------------------------------------------------
-// Compound predicates.
-
-bool ScannerConstants::IsIdentifier(unibrow::CharacterStream* buffer) {
- // Checks whether the buffer contains an identifier (no escape).
- if (!buffer->has_more()) return false;
- if (!kIsIdentifierStart.get(buffer->GetNext())) {
- return false;
- }
- while (buffer->has_more()) {
- if (!kIsIdentifierPart.get(buffer->GetNext())) {
- return false;
- }
- }
- return true;
-}
-
-// ----------------------------------------------------------------------------
-// Scanner
-
-Scanner::Scanner(ScannerConstants* scanner_constants)
- : scanner_constants_(scanner_constants),
- octal_pos_(kNoOctalLocation) {
-}
-
-
-uc32 Scanner::ScanHexEscape(uc32 c, int length) {
- ASSERT(length <= 4); // prevent overflow
-
- uc32 digits[4];
- uc32 x = 0;
- for (int i = 0; i < length; i++) {
- digits[i] = c0_;
- int d = HexValue(c0_);
- if (d < 0) {
- // According to ECMA-262, 3rd, 7.8.4, page 18, these hex escapes
- // should be illegal, but other JS VMs just return the
- // non-escaped version of the original character.
-
- // Push back digits read, except the last one (in c0_).
- for (int j = i-1; j >= 0; j--) {
- PushBack(digits[j]);
- }
- // Notice: No handling of error - treat it as "\u"->"u".
- return c;
- }
- x = x * 16 + d;
- Advance();
- }
-
- return x;
-}
-
-
-// Octal escapes of the forms '\0xx' and '\xxx' are not a part of
-// ECMA-262. Other JS VMs support them.
-uc32 Scanner::ScanOctalEscape(uc32 c, int length) {
- uc32 x = c - '0';
- int i = 0;
- for (; i < length; i++) {
- int d = c0_ - '0';
- if (d < 0 || d > 7) break;
- int nx = x * 8 + d;
- if (nx >= 256) break;
- x = nx;
- Advance();
- }
- // Anything excelt '\0' is an octal escape sequence, illegal in strict mode.
- // Remember the position of octal escape sequences so that better error
- // can be reported later (in strict mode).
- if (c != '0' || i > 0) {
- octal_pos_ = source_pos() - i - 1; // Already advanced
- }
- return x;
-}
-
-
-// ----------------------------------------------------------------------------
-// JavaScriptScanner
-
-JavaScriptScanner::JavaScriptScanner(ScannerConstants* scanner_contants)
- : Scanner(scanner_contants) { }
-
-
-Token::Value JavaScriptScanner::Next() {
- current_ = next_;
- has_line_terminator_before_next_ = false;
- Scan();
- return current_.token;
-}
-
-
-static inline bool IsByteOrderMark(uc32 c) {
- // The Unicode value U+FFFE is guaranteed never to be assigned as a
- // Unicode character; this implies that in a Unicode context the
- // 0xFF, 0xFE byte pattern can only be interpreted as the U+FEFF
- // character expressed in little-endian byte order (since it could
- // not be a U+FFFE character expressed in big-endian byte
- // order). Nevertheless, we check for it to be compatible with
- // Spidermonkey.
- return c == 0xFEFF || c == 0xFFFE;
-}
-
-
-bool JavaScriptScanner::SkipWhiteSpace() {
- int start_position = source_pos();
-
- while (true) {
- // We treat byte-order marks (BOMs) as whitespace for better
- // compatibility with Spidermonkey and other JavaScript engines.
- while (scanner_constants_->IsWhiteSpace(c0_) || IsByteOrderMark(c0_)) {
- // IsWhiteSpace() includes line terminators!
- if (scanner_constants_->IsLineTerminator(c0_)) {
- // Ignore line terminators, but remember them. This is necessary
- // for automatic semicolon insertion.
- has_line_terminator_before_next_ = true;
- }
- Advance();
- }
-
- // If there is an HTML comment end '-->' at the beginning of a
- // line (with only whitespace in front of it), we treat the rest
- // of the line as a comment. This is in line with the way
- // SpiderMonkey handles it.
- if (c0_ == '-' && has_line_terminator_before_next_) {
- Advance();
- if (c0_ == '-') {
- Advance();
- if (c0_ == '>') {
- // Treat the rest of the line as a comment.
- SkipSingleLineComment();
- // Continue skipping white space after the comment.
- continue;
- }
- PushBack('-'); // undo Advance()
- }
- PushBack('-'); // undo Advance()
- }
- // Return whether or not we skipped any characters.
- return source_pos() != start_position;
- }
-}
-
-
-Token::Value JavaScriptScanner::SkipSingleLineComment() {
- Advance();
-
- // The line terminator at the end of the line is not considered
- // to be part of the single-line comment; it is recognized
- // separately by the lexical grammar and becomes part of the
- // stream of input elements for the syntactic grammar (see
- // ECMA-262, section 7.4, page 12).
- while (c0_ >= 0 && !scanner_constants_->IsLineTerminator(c0_)) {
- Advance();
- }
-
- return Token::WHITESPACE;
-}
-
-
-Token::Value JavaScriptScanner::SkipMultiLineComment() {
- ASSERT(c0_ == '*');
- Advance();
-
- while (c0_ >= 0) {
- char ch = c0_;
- Advance();
- // If we have reached the end of the multi-line comment, we
- // consume the '/' and insert a whitespace. This way all
- // multi-line comments are treated as whitespace - even the ones
- // containing line terminators. This contradicts ECMA-262, section
- // 7.4, page 12, that says that multi-line comments containing
- // line terminators should be treated as a line terminator, but it
- // matches the behaviour of SpiderMonkey and KJS.
- if (ch == '*' && c0_ == '/') {
- c0_ = ' ';
- return Token::WHITESPACE;
- }
- }
-
- // Unterminated multi-line comment.
- return Token::ILLEGAL;
-}
-
-
-Token::Value JavaScriptScanner::ScanHtmlComment() {
- // Check for <!-- comments.
- ASSERT(c0_ == '!');
- Advance();
- if (c0_ == '-') {
- Advance();
- if (c0_ == '-') return SkipSingleLineComment();
- PushBack('-'); // undo Advance()
- }
- PushBack('!'); // undo Advance()
- ASSERT(c0_ == '!');
- return Token::LT;
-}
-
-
-void JavaScriptScanner::Scan() {
- next_.literal_chars = NULL;
- Token::Value token;
- do {
- // Remember the position of the next token
- next_.location.beg_pos = source_pos();
-
- switch (c0_) {
- case ' ':
- case '\t':
- Advance();
- token = Token::WHITESPACE;
- break;
-
- case '\n':
- Advance();
- has_line_terminator_before_next_ = true;
- token = Token::WHITESPACE;
- break;
-
- case '"': case '\'':
- token = ScanString();
- break;
-
- case '<':
- // < <= << <<= <!--
- Advance();
- if (c0_ == '=') {
- token = Select(Token::LTE);
- } else if (c0_ == '<') {
- token = Select('=', Token::ASSIGN_SHL, Token::SHL);
- } else if (c0_ == '!') {
- token = ScanHtmlComment();
- } else {
- token = Token::LT;
- }
- break;
-
- case '>':
- // > >= >> >>= >>> >>>=
- Advance();
- if (c0_ == '=') {
- token = Select(Token::GTE);
- } else if (c0_ == '>') {
- // >> >>= >>> >>>=
- Advance();
- if (c0_ == '=') {
- token = Select(Token::ASSIGN_SAR);
- } else if (c0_ == '>') {
- token = Select('=', Token::ASSIGN_SHR, Token::SHR);
- } else {
- token = Token::SAR;
- }
- } else {
- token = Token::GT;
- }
- break;
-
- case '=':
- // = == ===
- Advance();
- if (c0_ == '=') {
- token = Select('=', Token::EQ_STRICT, Token::EQ);
- } else {
- token = Token::ASSIGN;
- }
- break;
-
- case '!':
- // ! != !==
- Advance();
- if (c0_ == '=') {
- token = Select('=', Token::NE_STRICT, Token::NE);
- } else {
- token = Token::NOT;
- }
- break;
-
- case '+':
- // + ++ +=
- Advance();
- if (c0_ == '+') {
- token = Select(Token::INC);
- } else if (c0_ == '=') {
- token = Select(Token::ASSIGN_ADD);
- } else {
- token = Token::ADD;
- }
- break;
-
- case '-':
- // - -- --> -=
- Advance();
- if (c0_ == '-') {
- Advance();
- if (c0_ == '>' && has_line_terminator_before_next_) {
- // For compatibility with SpiderMonkey, we skip lines that
- // start with an HTML comment end '-->'.
- token = SkipSingleLineComment();
- } else {
- token = Token::DEC;
- }
- } else if (c0_ == '=') {
- token = Select(Token::ASSIGN_SUB);
- } else {
- token = Token::SUB;
- }
- break;
-
- case '*':
- // * *=
- token = Select('=', Token::ASSIGN_MUL, Token::MUL);
- break;
-
- case '%':
- // % %=
- token = Select('=', Token::ASSIGN_MOD, Token::MOD);
- break;
-
- case '/':
- // / // /* /=
- Advance();
- if (c0_ == '/') {
- token = SkipSingleLineComment();
- } else if (c0_ == '*') {
- token = SkipMultiLineComment();
- } else if (c0_ == '=') {
- token = Select(Token::ASSIGN_DIV);
- } else {
- token = Token::DIV;
- }
- break;
-
- case '&':
- // & && &=
- Advance();
- if (c0_ == '&') {
- token = Select(Token::AND);
- } else if (c0_ == '=') {
- token = Select(Token::ASSIGN_BIT_AND);
- } else {
- token = Token::BIT_AND;
- }
- break;
-
- case '|':
- // | || |=
- Advance();
- if (c0_ == '|') {
- token = Select(Token::OR);
- } else if (c0_ == '=') {
- token = Select(Token::ASSIGN_BIT_OR);
- } else {
- token = Token::BIT_OR;
- }
- break;
-
- case '^':
- // ^ ^=
- token = Select('=', Token::ASSIGN_BIT_XOR, Token::BIT_XOR);
- break;
-
- case '.':
- // . Number
- Advance();
- if (IsDecimalDigit(c0_)) {
- token = ScanNumber(true);
- } else {
- token = Token::PERIOD;
- }
- break;
-
- case ':':
- token = Select(Token::COLON);
- break;
-
- case ';':
- token = Select(Token::SEMICOLON);
- break;
-
- case ',':
- token = Select(Token::COMMA);
- break;
-
- case '(':
- token = Select(Token::LPAREN);
- break;
-
- case ')':
- token = Select(Token::RPAREN);
- break;
-
- case '[':
- token = Select(Token::LBRACK);
- break;
-
- case ']':
- token = Select(Token::RBRACK);
- break;
-
- case '{':
- token = Select(Token::LBRACE);
- break;
-
- case '}':
- token = Select(Token::RBRACE);
- break;
-
- case '?':
- token = Select(Token::CONDITIONAL);
- break;
-
- case '~':
- token = Select(Token::BIT_NOT);
- break;
-
- default:
- if (scanner_constants_->IsIdentifierStart(c0_)) {
- token = ScanIdentifierOrKeyword();
- } else if (IsDecimalDigit(c0_)) {
- token = ScanNumber(false);
- } else if (SkipWhiteSpace()) {
- token = Token::WHITESPACE;
- } else if (c0_ < 0) {
- token = Token::EOS;
- } else {
- token = Select(Token::ILLEGAL);
- }
- break;
- }
-
- // Continue scanning for tokens as long as we're just skipping
- // whitespace.
- } while (token == Token::WHITESPACE);
-
- next_.location.end_pos = source_pos();
- next_.token = token;
-}
-
-
-void JavaScriptScanner::SeekForward(int pos) {
- // After this call, we will have the token at the given position as
- // the "next" token. The "current" token will be invalid.
- if (pos == next_.location.beg_pos) return;
- int current_pos = source_pos();
- ASSERT_EQ(next_.location.end_pos, current_pos);
- // Positions inside the lookahead token aren't supported.
- ASSERT(pos >= current_pos);
- if (pos != current_pos) {
- source_->SeekForward(pos - source_->pos());
- Advance();
- // This function is only called to seek to the location
- // of the end of a function (at the "}" token). It doesn't matter
- // whether there was a line terminator in the part we skip.
- has_line_terminator_before_next_ = false;
- }
- Scan();
-}
-
-
-void JavaScriptScanner::ScanEscape() {
- uc32 c = c0_;
- Advance();
-
- // Skip escaped newlines.
- if (scanner_constants_->IsLineTerminator(c)) {
- // Allow CR+LF newlines in multiline string literals.
- if (IsCarriageReturn(c) && IsLineFeed(c0_)) Advance();
- // Allow LF+CR newlines in multiline string literals.
- if (IsLineFeed(c) && IsCarriageReturn(c0_)) Advance();
- return;
- }
-
- switch (c) {
- case '\'': // fall through
- case '"' : // fall through
- case '\\': break;
- case 'b' : c = '\b'; break;
- case 'f' : c = '\f'; break;
- case 'n' : c = '\n'; break;
- case 'r' : c = '\r'; break;
- case 't' : c = '\t'; break;
- case 'u' : c = ScanHexEscape(c, 4); break;
- case 'v' : c = '\v'; break;
- case 'x' : c = ScanHexEscape(c, 2); break;
- case '0' : // fall through
- case '1' : // fall through
- case '2' : // fall through
- case '3' : // fall through
- case '4' : // fall through
- case '5' : // fall through
- case '6' : // fall through
- case '7' : c = ScanOctalEscape(c, 2); break;
- }
-
- // According to ECMA-262, 3rd, 7.8.4 (p 18ff) these
- // should be illegal, but they are commonly handled
- // as non-escaped characters by JS VMs.
- AddLiteralChar(c);
-}
-
-
-Token::Value JavaScriptScanner::ScanString() {
- uc32 quote = c0_;
- Advance(); // consume quote
-
- LiteralScope literal(this);
- while (c0_ != quote && c0_ >= 0
- && !scanner_constants_->IsLineTerminator(c0_)) {
- uc32 c = c0_;
- Advance();
- if (c == '\\') {
- if (c0_ < 0) return Token::ILLEGAL;
- ScanEscape();
- } else {
- AddLiteralChar(c);
- }
- }
- if (c0_ != quote) return Token::ILLEGAL;
- literal.Complete();
-
- Advance(); // consume quote
- return Token::STRING;
-}
-
-
-void JavaScriptScanner::ScanDecimalDigits() {
- while (IsDecimalDigit(c0_))
- AddLiteralCharAdvance();
-}
-
-
-Token::Value JavaScriptScanner::ScanNumber(bool seen_period) {
- ASSERT(IsDecimalDigit(c0_)); // the first digit of the number or the fraction
-
- enum { DECIMAL, HEX, OCTAL } kind = DECIMAL;
-
- LiteralScope literal(this);
- if (seen_period) {
- // we have already seen a decimal point of the float
- AddLiteralChar('.');
- ScanDecimalDigits(); // we know we have at least one digit
-
- } else {
- // if the first character is '0' we must check for octals and hex
- if (c0_ == '0') {
- AddLiteralCharAdvance();
-
- // either 0, 0exxx, 0Exxx, 0.xxx, an octal number, or a hex number
- if (c0_ == 'x' || c0_ == 'X') {
- // hex number
- kind = HEX;
- AddLiteralCharAdvance();
- if (!IsHexDigit(c0_)) {
- // we must have at least one hex digit after 'x'/'X'
- return Token::ILLEGAL;
- }
- while (IsHexDigit(c0_)) {
- AddLiteralCharAdvance();
- }
- } else if ('0' <= c0_ && c0_ <= '7') {
- // (possible) octal number
- kind = OCTAL;
- while (true) {
- if (c0_ == '8' || c0_ == '9') {
- kind = DECIMAL;
- break;
- }
- if (c0_ < '0' || '7' < c0_) {
- // Octal literal finished.
- octal_pos_ = next_.location.beg_pos;
- break;
- }
- AddLiteralCharAdvance();
- }
- }
- }
-
- // Parse decimal digits and allow trailing fractional part.
- if (kind == DECIMAL) {
- ScanDecimalDigits(); // optional
- if (c0_ == '.') {
- AddLiteralCharAdvance();
- ScanDecimalDigits(); // optional
- }
- }
- }
-
- // scan exponent, if any
- if (c0_ == 'e' || c0_ == 'E') {
- ASSERT(kind != HEX); // 'e'/'E' must be scanned as part of the hex number
- if (kind == OCTAL) return Token::ILLEGAL; // no exponent for octals allowed
- // scan exponent
- AddLiteralCharAdvance();
- if (c0_ == '+' || c0_ == '-')
- AddLiteralCharAdvance();
- if (!IsDecimalDigit(c0_)) {
- // we must have at least one decimal digit after 'e'/'E'
- return Token::ILLEGAL;
- }
- ScanDecimalDigits();
- }
-
- // The source character immediately following a numeric literal must
- // not be an identifier start or a decimal digit; see ECMA-262
- // section 7.8.3, page 17 (note that we read only one decimal digit
- // if the value is 0).
- if (IsDecimalDigit(c0_) || scanner_constants_->IsIdentifierStart(c0_))
- return Token::ILLEGAL;
-
- literal.Complete();
-
- return Token::NUMBER;
-}
-
-
-uc32 JavaScriptScanner::ScanIdentifierUnicodeEscape() {
- Advance();
- if (c0_ != 'u') return unibrow::Utf8::kBadChar;
- Advance();
- uc32 c = ScanHexEscape('u', 4);
- // We do not allow a unicode escape sequence to start another
- // unicode escape sequence.
- if (c == '\\') return unibrow::Utf8::kBadChar;
- return c;
-}
-
-
-Token::Value JavaScriptScanner::ScanIdentifierOrKeyword() {
- ASSERT(scanner_constants_->IsIdentifierStart(c0_));
- LiteralScope literal(this);
- KeywordMatcher keyword_match;
- // Scan identifier start character.
- if (c0_ == '\\') {
- uc32 c = ScanIdentifierUnicodeEscape();
- // Only allow legal identifier start characters.
- if (!scanner_constants_->IsIdentifierStart(c)) return Token::ILLEGAL;
- AddLiteralChar(c);
- return ScanIdentifierSuffix(&literal);
- }
-
- uc32 first_char = c0_;
- Advance();
- AddLiteralChar(first_char);
- if (!keyword_match.AddChar(first_char)) {
- return ScanIdentifierSuffix(&literal);
- }
-
- // Scan the rest of the identifier characters.
- while (scanner_constants_->IsIdentifierPart(c0_)) {
- if (c0_ != '\\') {
- uc32 next_char = c0_;
- Advance();
- AddLiteralChar(next_char);
- if (keyword_match.AddChar(next_char)) continue;
- }
- // Fallthrough if no loner able to complete keyword.
- return ScanIdentifierSuffix(&literal);
- }
- literal.Complete();
-
- return keyword_match.token();
-}
-
-
-Token::Value JavaScriptScanner::ScanIdentifierSuffix(LiteralScope* literal) {
- // Scan the rest of the identifier characters.
- while (scanner_constants_->IsIdentifierPart(c0_)) {
- if (c0_ == '\\') {
- uc32 c = ScanIdentifierUnicodeEscape();
- // Only allow legal identifier part characters.
- if (!scanner_constants_->IsIdentifierPart(c)) return Token::ILLEGAL;
- AddLiteralChar(c);
- } else {
- AddLiteralChar(c0_);
- Advance();
- }
- }
- literal->Complete();
-
- return Token::IDENTIFIER;
-}
-
-
-bool JavaScriptScanner::ScanRegExpPattern(bool seen_equal) {
- // Scan: ('/' | '/=') RegularExpressionBody '/' RegularExpressionFlags
- bool in_character_class = false;
-
- // Previous token is either '/' or '/=', in the second case, the
- // pattern starts at =.
- next_.location.beg_pos = source_pos() - (seen_equal ? 2 : 1);
- next_.location.end_pos = source_pos() - (seen_equal ? 1 : 0);
-
- // Scan regular expression body: According to ECMA-262, 3rd, 7.8.5,
- // the scanner should pass uninterpreted bodies to the RegExp
- // constructor.
- LiteralScope literal(this);
- if (seen_equal)
- AddLiteralChar('=');
-
- while (c0_ != '/' || in_character_class) {
- if (scanner_constants_->IsLineTerminator(c0_) || c0_ < 0) return false;
- if (c0_ == '\\') { // Escape sequence.
- AddLiteralCharAdvance();
- if (scanner_constants_->IsLineTerminator(c0_) || c0_ < 0) return false;
- AddLiteralCharAdvance();
- // If the escape allows more characters, i.e., \x??, \u????, or \c?,
- // only "safe" characters are allowed (letters, digits, underscore),
- // otherwise the escape isn't valid and the invalid character has
- // its normal meaning. I.e., we can just continue scanning without
- // worrying whether the following characters are part of the escape
- // or not, since any '/', '\\' or '[' is guaranteed to not be part
- // of the escape sequence.
- } else { // Unescaped character.
- if (c0_ == '[') in_character_class = true;
- if (c0_ == ']') in_character_class = false;
- AddLiteralCharAdvance();
- }
- }
- Advance(); // consume '/'
-
- literal.Complete();
-
- return true;
-}
-
-
-bool JavaScriptScanner::ScanRegExpFlags() {
- // Scan regular expression flags.
- LiteralScope literal(this);
- while (scanner_constants_->IsIdentifierPart(c0_)) {
- if (c0_ == '\\') {
- uc32 c = ScanIdentifierUnicodeEscape();
- if (c != static_cast<uc32>(unibrow::Utf8::kBadChar)) {
- // We allow any escaped character, unlike the restriction on
- // IdentifierPart when it is used to build an IdentifierName.
- AddLiteralChar(c);
- continue;
- }
- }
- AddLiteralCharAdvance();
- }
- literal.Complete();
-
- next_.location.end_pos = source_pos() - 1;
- return true;
-}
-
-// ----------------------------------------------------------------------------
-// Keyword Matcher
-
-KeywordMatcher::FirstState KeywordMatcher::first_states_[] = {
- { "break", KEYWORD_PREFIX, Token::BREAK },
- { NULL, C, Token::ILLEGAL },
- { NULL, D, Token::ILLEGAL },
- { NULL, E, Token::ILLEGAL },
- { NULL, F, Token::ILLEGAL },
- { NULL, UNMATCHABLE, Token::ILLEGAL },
- { NULL, UNMATCHABLE, Token::ILLEGAL },
- { NULL, I, Token::ILLEGAL },
- { NULL, UNMATCHABLE, Token::ILLEGAL },
- { NULL, UNMATCHABLE, Token::ILLEGAL },
- { "let", KEYWORD_PREFIX, Token::FUTURE_RESERVED_WORD },
- { NULL, UNMATCHABLE, Token::ILLEGAL },
- { NULL, N, Token::ILLEGAL },
- { NULL, UNMATCHABLE, Token::ILLEGAL },
- { NULL, P, Token::ILLEGAL },
- { NULL, UNMATCHABLE, Token::ILLEGAL },
- { "return", KEYWORD_PREFIX, Token::RETURN },
- { NULL, S, Token::ILLEGAL },
- { NULL, T, Token::ILLEGAL },
- { NULL, UNMATCHABLE, Token::ILLEGAL },
- { NULL, V, Token::ILLEGAL },
- { NULL, W, Token::ILLEGAL },
- { NULL, UNMATCHABLE, Token::ILLEGAL },
- { "yield", KEYWORD_PREFIX, Token::FUTURE_RESERVED_WORD }
-};
-
-
-void KeywordMatcher::Step(unibrow::uchar input) {
- switch (state_) {
- case INITIAL: {
- // matching the first character is the only state with significant fanout.
- // Match only lower-case letters in range 'b'..'y'.
- unsigned int offset = input - kFirstCharRangeMin;
- if (offset < kFirstCharRangeLength) {
- state_ = first_states_[offset].state;
- if (state_ == KEYWORD_PREFIX) {
- keyword_ = first_states_[offset].keyword;
- counter_ = 1;
- keyword_token_ = first_states_[offset].token;
- }
- return;
- }
- break;
- }
- case KEYWORD_PREFIX:
- if (static_cast<unibrow::uchar>(keyword_[counter_]) == input) {
- counter_++;
- if (keyword_[counter_] == '\0') {
- state_ = KEYWORD_MATCHED;
- token_ = keyword_token_;
- }
- return;
- }
- break;
- case KEYWORD_MATCHED:
- token_ = Token::IDENTIFIER;
- break;
- case C:
- if (MatchState(input, 'a', CA)) return;
- if (MatchKeywordStart(input, "class", 1,
- Token::FUTURE_RESERVED_WORD)) return;
- if (MatchState(input, 'o', CO)) return;
- break;
- case CA:
- if (MatchKeywordStart(input, "case", 2, Token::CASE)) return;
- if (MatchKeywordStart(input, "catch", 2, Token::CATCH)) return;
- break;
- case CO:
- if (MatchState(input, 'n', CON)) return;
- break;
- case CON:
- if (MatchKeywordStart(input, "const", 3, Token::CONST)) return;
- if (MatchKeywordStart(input, "continue", 3, Token::CONTINUE)) return;
- break;
- case D:
- if (MatchState(input, 'e', DE)) return;
- if (MatchKeyword(input, 'o', KEYWORD_MATCHED, Token::DO)) return;
- break;
- case DE:
- if (MatchKeywordStart(input, "debugger", 2, Token::DEBUGGER)) return;
- if (MatchKeywordStart(input, "default", 2, Token::DEFAULT)) return;
- if (MatchKeywordStart(input, "delete", 2, Token::DELETE)) return;
- break;
- case E:
- if (MatchKeywordStart(input, "else", 1, Token::ELSE)) return;
- if (MatchKeywordStart(input, "enum", 1,
- Token::FUTURE_RESERVED_WORD)) return;
- if (MatchState(input, 'x', EX)) return;
- break;
- case EX:
- if (MatchKeywordStart(input, "export", 2,
- Token::FUTURE_RESERVED_WORD)) return;
- if (MatchKeywordStart(input, "extends", 2,
- Token::FUTURE_RESERVED_WORD)) return;
- break;
- case F:
- if (MatchKeywordStart(input, "false", 1, Token::FALSE_LITERAL)) return;
- if (MatchKeywordStart(input, "finally", 1, Token::FINALLY)) return;
- if (MatchKeywordStart(input, "for", 1, Token::FOR)) return;
- if (MatchKeywordStart(input, "function", 1, Token::FUNCTION)) return;
- break;
- case I:
- if (MatchKeyword(input, 'f', KEYWORD_MATCHED, Token::IF)) return;
- if (MatchState(input, 'm', IM)) return;
- if (MatchKeyword(input, 'n', IN, Token::IN)) return;
- break;
- case IM:
- if (MatchState(input, 'p', IMP)) return;
- break;
- case IMP:
- if (MatchKeywordStart(input, "implements", 3,
- Token::FUTURE_RESERVED_WORD )) return;
- if (MatchKeywordStart(input, "import", 3,
- Token::FUTURE_RESERVED_WORD)) return;
- break;
- case IN:
- token_ = Token::IDENTIFIER;
- if (MatchKeywordStart(input, "interface", 2,
- Token::FUTURE_RESERVED_WORD)) return;
- if (MatchKeywordStart(input, "instanceof", 2, Token::INSTANCEOF)) return;
- break;
- case N:
- if (MatchKeywordStart(input, "native", 1, Token::NATIVE)) return;
- if (MatchKeywordStart(input, "new", 1, Token::NEW)) return;
- if (MatchKeywordStart(input, "null", 1, Token::NULL_LITERAL)) return;
- break;
- case P:
- if (MatchKeywordStart(input, "package", 1,
- Token::FUTURE_RESERVED_WORD)) return;
- if (MatchState(input, 'r', PR)) return;
- if (MatchKeywordStart(input, "public", 1,
- Token::FUTURE_RESERVED_WORD)) return;
- break;
- case PR:
- if (MatchKeywordStart(input, "private", 2,
- Token::FUTURE_RESERVED_WORD)) return;
- if (MatchKeywordStart(input, "protected", 2,
- Token::FUTURE_RESERVED_WORD)) return;
- break;
- case S:
- if (MatchKeywordStart(input, "static", 1,
- Token::FUTURE_RESERVED_WORD)) return;
- if (MatchKeywordStart(input, "super", 1,
- Token::FUTURE_RESERVED_WORD)) return;
- if (MatchKeywordStart(input, "switch", 1,
- Token::SWITCH)) return;
- break;
- case T:
- if (MatchState(input, 'h', TH)) return;
- if (MatchState(input, 'r', TR)) return;
- if (MatchKeywordStart(input, "typeof", 1, Token::TYPEOF)) return;
- break;
- case TH:
- if (MatchKeywordStart(input, "this", 2, Token::THIS)) return;
- if (MatchKeywordStart(input, "throw", 2, Token::THROW)) return;
- break;
- case TR:
- if (MatchKeywordStart(input, "true", 2, Token::TRUE_LITERAL)) return;
- if (MatchKeyword(input, 'y', KEYWORD_MATCHED, Token::TRY)) return;
- break;
- case V:
- if (MatchKeywordStart(input, "var", 1, Token::VAR)) return;
- if (MatchKeywordStart(input, "void", 1, Token::VOID)) return;
- break;
- case W:
- if (MatchKeywordStart(input, "while", 1, Token::WHILE)) return;
- if (MatchKeywordStart(input, "with", 1, Token::WITH)) return;
- break;
- case UNMATCHABLE:
- break;
- }
- // On fallthrough, it's a failure.
- state_ = UNMATCHABLE;
-}
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/scanner-base.h b/src/3rdparty/v8/src/scanner-base.h
deleted file mode 100644
index 552f387..0000000
--- a/src/3rdparty/v8/src/scanner-base.h
+++ /dev/null
@@ -1,664 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Features shared by parsing and pre-parsing scanners.
-
-#ifndef V8_SCANNER_BASE_H_
-#define V8_SCANNER_BASE_H_
-
-#include "globals.h"
-#include "checks.h"
-#include "allocation.h"
-#include "token.h"
-#include "unicode-inl.h"
-#include "char-predicates.h"
-#include "utils.h"
-#include "list-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// Returns the value (0 .. 15) of a hexadecimal character c.
-// If c is not a legal hexadecimal character, returns a value < 0.
-inline int HexValue(uc32 c) {
- c -= '0';
- if (static_cast<unsigned>(c) <= 9) return c;
- c = (c | 0x20) - ('a' - '0'); // detect 0x11..0x16 and 0x31..0x36.
- if (static_cast<unsigned>(c) <= 5) return c + 10;
- return -1;
-}
-
-
-// ---------------------------------------------------------------------
-// Buffered stream of characters, using an internal UC16 buffer.
-
-class UC16CharacterStream {
- public:
- UC16CharacterStream() : pos_(0) { }
- virtual ~UC16CharacterStream() { }
-
- // Returns and advances past the next UC16 character in the input
- // stream. If there are no more characters, it returns a negative
- // value.
- inline uc32 Advance() {
- if (buffer_cursor_ < buffer_end_ || ReadBlock()) {
- pos_++;
- return static_cast<uc32>(*(buffer_cursor_++));
- }
- // Note: currently the following increment is necessary to avoid a
- // parser problem! The scanner treats the final kEndOfInput as
- // a character with a position, and does math relative to that
- // position.
- pos_++;
-
- return kEndOfInput;
- }
-
- // Return the current position in the character stream.
- // Starts at zero.
- inline unsigned pos() const { return pos_; }
-
- // Skips forward past the next character_count UC16 characters
- // in the input, or until the end of input if that comes sooner.
- // Returns the number of characters actually skipped. If less
- // than character_count,
- inline unsigned SeekForward(unsigned character_count) {
- unsigned buffered_chars =
- static_cast<unsigned>(buffer_end_ - buffer_cursor_);
- if (character_count <= buffered_chars) {
- buffer_cursor_ += character_count;
- pos_ += character_count;
- return character_count;
- }
- return SlowSeekForward(character_count);
- }
-
- // Pushes back the most recently read UC16 character (or negative
- // value if at end of input), i.e., the value returned by the most recent
- // call to Advance.
- // Must not be used right after calling SeekForward.
- virtual void PushBack(int32_t character) = 0;
-
- protected:
- static const uc32 kEndOfInput = -1;
-
- // Ensures that the buffer_cursor_ points to the character at
- // position pos_ of the input, if possible. If the position
- // is at or after the end of the input, return false. If there
- // are more characters available, return true.
- virtual bool ReadBlock() = 0;
- virtual unsigned SlowSeekForward(unsigned character_count) = 0;
-
- const uc16* buffer_cursor_;
- const uc16* buffer_end_;
- unsigned pos_;
-};
-
-
-class ScannerConstants {
-// ---------------------------------------------------------------------
-// Constants used by scanners.
- public:
- ScannerConstants() {}
- typedef unibrow::Utf8InputBuffer<1024> Utf8Decoder;
-
- StaticResource<Utf8Decoder>* utf8_decoder() {
- return &utf8_decoder_;
- }
-
- bool IsIdentifierStart(unibrow::uchar c) { return kIsIdentifierStart.get(c); }
- bool IsIdentifierPart(unibrow::uchar c) { return kIsIdentifierPart.get(c); }
- bool IsLineTerminator(unibrow::uchar c) { return kIsLineTerminator.get(c); }
- bool IsWhiteSpace(unibrow::uchar c) { return kIsWhiteSpace.get(c); }
-
- bool IsIdentifier(unibrow::CharacterStream* buffer);
-
- private:
-
- unibrow::Predicate<IdentifierStart, 128> kIsIdentifierStart;
- unibrow::Predicate<IdentifierPart, 128> kIsIdentifierPart;
- unibrow::Predicate<unibrow::LineTerminator, 128> kIsLineTerminator;
- unibrow::Predicate<unibrow::WhiteSpace, 128> kIsWhiteSpace;
- StaticResource<Utf8Decoder> utf8_decoder_;
-
- DISALLOW_COPY_AND_ASSIGN(ScannerConstants);
-};
-
-// ----------------------------------------------------------------------------
-// LiteralBuffer - Collector of chars of literals.
-
-class LiteralBuffer {
- public:
- LiteralBuffer() : is_ascii_(true), position_(0), backing_store_() { }
-
- ~LiteralBuffer() {
- if (backing_store_.length() > 0) {
- backing_store_.Dispose();
- }
- }
-
- inline void AddChar(uc16 character) {
- if (position_ >= backing_store_.length()) ExpandBuffer();
- if (is_ascii_) {
- if (character < kMaxAsciiCharCodeU) {
- backing_store_[position_] = static_cast<byte>(character);
- position_ += kASCIISize;
- return;
- }
- ConvertToUC16();
- }
- *reinterpret_cast<uc16*>(&backing_store_[position_]) = character;
- position_ += kUC16Size;
- }
-
- bool is_ascii() { return is_ascii_; }
-
- Vector<const uc16> uc16_literal() {
- ASSERT(!is_ascii_);
- ASSERT((position_ & 0x1) == 0);
- return Vector<const uc16>(
- reinterpret_cast<const uc16*>(backing_store_.start()),
- position_ >> 1);
- }
-
- Vector<const char> ascii_literal() {
- ASSERT(is_ascii_);
- return Vector<const char>(
- reinterpret_cast<const char*>(backing_store_.start()),
- position_);
- }
-
- int length() {
- return is_ascii_ ? position_ : (position_ >> 1);
- }
-
- void Reset() {
- position_ = 0;
- is_ascii_ = true;
- }
- private:
- static const int kInitialCapacity = 16;
- static const int kGrowthFactory = 4;
- static const int kMinConversionSlack = 256;
- static const int kMaxGrowth = 1 * MB;
- inline int NewCapacity(int min_capacity) {
- int capacity = Max(min_capacity, backing_store_.length());
- int new_capacity = Min(capacity * kGrowthFactory, capacity + kMaxGrowth);
- return new_capacity;
- }
-
- void ExpandBuffer() {
- Vector<byte> new_store = Vector<byte>::New(NewCapacity(kInitialCapacity));
- memcpy(new_store.start(), backing_store_.start(), position_);
- backing_store_.Dispose();
- backing_store_ = new_store;
- }
-
- void ConvertToUC16() {
- ASSERT(is_ascii_);
- Vector<byte> new_store;
- int new_content_size = position_ * kUC16Size;
- if (new_content_size >= backing_store_.length()) {
- // Ensure room for all currently read characters as UC16 as well
- // as the character about to be stored.
- new_store = Vector<byte>::New(NewCapacity(new_content_size));
- } else {
- new_store = backing_store_;
- }
- char* src = reinterpret_cast<char*>(backing_store_.start());
- uc16* dst = reinterpret_cast<uc16*>(new_store.start());
- for (int i = position_ - 1; i >= 0; i--) {
- dst[i] = src[i];
- }
- if (new_store.start() != backing_store_.start()) {
- backing_store_.Dispose();
- backing_store_ = new_store;
- }
- position_ = new_content_size;
- is_ascii_ = false;
- }
-
- bool is_ascii_;
- int position_;
- Vector<byte> backing_store_;
-
- DISALLOW_COPY_AND_ASSIGN(LiteralBuffer);
-};
-
-
-// ----------------------------------------------------------------------------
-// Scanner base-class.
-
-// Generic functionality used by both JSON and JavaScript scanners.
-class Scanner {
- public:
- // -1 is outside of the range of any real source code.
- static const int kNoOctalLocation = -1;
-
- typedef unibrow::Utf8InputBuffer<1024> Utf8Decoder;
-
- class LiteralScope {
- public:
- explicit LiteralScope(Scanner* self);
- ~LiteralScope();
- void Complete();
-
- private:
- Scanner* scanner_;
- bool complete_;
- };
-
- explicit Scanner(ScannerConstants* scanner_contants);
-
- // Returns the current token again.
- Token::Value current_token() { return current_.token; }
-
- // One token look-ahead (past the token returned by Next()).
- Token::Value peek() const { return next_.token; }
-
- struct Location {
- Location(int b, int e) : beg_pos(b), end_pos(e) { }
- Location() : beg_pos(0), end_pos(0) { }
-
- bool IsValid() const {
- return beg_pos >= 0 && end_pos >= beg_pos;
- }
-
- int beg_pos;
- int end_pos;
- };
-
- static Location NoLocation() {
- return Location(-1, -1);
- }
-
- // Returns the location information for the current token
- // (the token returned by Next()).
- Location location() const { return current_.location; }
- Location peek_location() const { return next_.location; }
-
- // Returns the location of the last seen octal literal
- int octal_position() const { return octal_pos_; }
- void clear_octal_position() { octal_pos_ = -1; }
-
- // Returns the literal string, if any, for the current token (the
- // token returned by Next()). The string is 0-terminated and in
- // UTF-8 format; they may contain 0-characters. Literal strings are
- // collected for identifiers, strings, and numbers.
- // These functions only give the correct result if the literal
- // was scanned between calls to StartLiteral() and TerminateLiteral().
- bool is_literal_ascii() {
- ASSERT_NOT_NULL(current_.literal_chars);
- return current_.literal_chars->is_ascii();
- }
- Vector<const char> literal_ascii_string() {
- ASSERT_NOT_NULL(current_.literal_chars);
- return current_.literal_chars->ascii_literal();
- }
- Vector<const uc16> literal_uc16_string() {
- ASSERT_NOT_NULL(current_.literal_chars);
- return current_.literal_chars->uc16_literal();
- }
- int literal_length() const {
- ASSERT_NOT_NULL(current_.literal_chars);
- return current_.literal_chars->length();
- }
-
- // Returns the literal string for the next token (the token that
- // would be returned if Next() were called).
- bool is_next_literal_ascii() {
- ASSERT_NOT_NULL(next_.literal_chars);
- return next_.literal_chars->is_ascii();
- }
- Vector<const char> next_literal_ascii_string() {
- ASSERT_NOT_NULL(next_.literal_chars);
- return next_.literal_chars->ascii_literal();
- }
- Vector<const uc16> next_literal_uc16_string() {
- ASSERT_NOT_NULL(next_.literal_chars);
- return next_.literal_chars->uc16_literal();
- }
- int next_literal_length() const {
- ASSERT_NOT_NULL(next_.literal_chars);
- return next_.literal_chars->length();
- }
-
- static const int kCharacterLookaheadBufferSize = 1;
-
- protected:
- // The current and look-ahead token.
- struct TokenDesc {
- Token::Value token;
- Location location;
- LiteralBuffer* literal_chars;
- };
-
- // Call this after setting source_ to the input.
- void Init() {
- // Set c0_ (one character ahead)
- ASSERT(kCharacterLookaheadBufferSize == 1);
- Advance();
- // Initialize current_ to not refer to a literal.
- current_.literal_chars = NULL;
- }
-
- // Literal buffer support
- inline void StartLiteral() {
- LiteralBuffer* free_buffer = (current_.literal_chars == &literal_buffer1_) ?
- &literal_buffer2_ : &literal_buffer1_;
- free_buffer->Reset();
- next_.literal_chars = free_buffer;
- }
-
- inline void AddLiteralChar(uc32 c) {
- ASSERT_NOT_NULL(next_.literal_chars);
- next_.literal_chars->AddChar(c);
- }
-
- // Complete scanning of a literal.
- inline void TerminateLiteral() {
- // Does nothing in the current implementation.
- }
-
- // Stops scanning of a literal and drop the collected characters,
- // e.g., due to an encountered error.
- inline void DropLiteral() {
- next_.literal_chars = NULL;
- }
-
- inline void AddLiteralCharAdvance() {
- AddLiteralChar(c0_);
- Advance();
- }
-
- // Low-level scanning support.
- void Advance() { c0_ = source_->Advance(); }
- void PushBack(uc32 ch) {
- source_->PushBack(c0_);
- c0_ = ch;
- }
-
- inline Token::Value Select(Token::Value tok) {
- Advance();
- return tok;
- }
-
- inline Token::Value Select(uc32 next, Token::Value then, Token::Value else_) {
- Advance();
- if (c0_ == next) {
- Advance();
- return then;
- } else {
- return else_;
- }
- }
-
- uc32 ScanHexEscape(uc32 c, int length);
-
- // Scans octal escape sequence. Also accepts "\0" decimal escape sequence.
- uc32 ScanOctalEscape(uc32 c, int length);
-
- // Return the current source position.
- int source_pos() {
- return source_->pos() - kCharacterLookaheadBufferSize;
- }
-
- ScannerConstants* scanner_constants_;
-
- // Buffers collecting literal strings, numbers, etc.
- LiteralBuffer literal_buffer1_;
- LiteralBuffer literal_buffer2_;
-
- TokenDesc current_; // desc for current token (as returned by Next())
- TokenDesc next_; // desc for next token (one token look-ahead)
-
- // Input stream. Must be initialized to an UC16CharacterStream.
- UC16CharacterStream* source_;
-
- // Start position of the octal literal last scanned.
- int octal_pos_;
-
- // One Unicode character look-ahead; c0_ < 0 at the end of the input.
- uc32 c0_;
-};
-
-// ----------------------------------------------------------------------------
-// JavaScriptScanner - base logic for JavaScript scanning.
-
-class JavaScriptScanner : public Scanner {
- public:
- // A LiteralScope that disables recording of some types of JavaScript
- // literals. If the scanner is configured to not record the specific
- // type of literal, the scope will not call StartLiteral.
- class LiteralScope {
- public:
- explicit LiteralScope(JavaScriptScanner* self)
- : scanner_(self), complete_(false) {
- scanner_->StartLiteral();
- }
- ~LiteralScope() {
- if (!complete_) scanner_->DropLiteral();
- }
- void Complete() {
- scanner_->TerminateLiteral();
- complete_ = true;
- }
-
- private:
- JavaScriptScanner* scanner_;
- bool complete_;
- };
-
- explicit JavaScriptScanner(ScannerConstants* scanner_contants);
-
- // Returns the next token.
- Token::Value Next();
-
- // Returns true if there was a line terminator before the peek'ed token.
- bool has_line_terminator_before_next() const {
- return has_line_terminator_before_next_;
- }
-
- // Scans the input as a regular expression pattern, previous
- // character(s) must be /(=). Returns true if a pattern is scanned.
- bool ScanRegExpPattern(bool seen_equal);
- // Returns true if regexp flags are scanned (always since flags can
- // be empty).
- bool ScanRegExpFlags();
-
- // Tells whether the buffer contains an identifier (no escapes).
- // Used for checking if a property name is an identifier.
- static bool IsIdentifier(unibrow::CharacterStream* buffer);
-
- // Seek forward to the given position. This operation does not
- // work in general, for instance when there are pushed back
- // characters, but works for seeking forward until simple delimiter
- // tokens, which is what it is used for.
- void SeekForward(int pos);
-
- protected:
- bool SkipWhiteSpace();
- Token::Value SkipSingleLineComment();
- Token::Value SkipMultiLineComment();
-
- // Scans a single JavaScript token.
- void Scan();
-
- void ScanDecimalDigits();
- Token::Value ScanNumber(bool seen_period);
- Token::Value ScanIdentifierOrKeyword();
- Token::Value ScanIdentifierSuffix(LiteralScope* literal);
-
- void ScanEscape();
- Token::Value ScanString();
-
- // Scans a possible HTML comment -- begins with '<!'.
- Token::Value ScanHtmlComment();
-
- // Decodes a unicode escape-sequence which is part of an identifier.
- // If the escape sequence cannot be decoded the result is kBadChar.
- uc32 ScanIdentifierUnicodeEscape();
-
- bool has_line_terminator_before_next_;
-};
-
-
-// ----------------------------------------------------------------------------
-// Keyword matching state machine.
-
-class KeywordMatcher {
-// Incrementally recognize keywords.
-//
-// Recognized keywords:
-// break case catch const* continue debugger* default delete do else
-// finally false for function if in instanceof native* new null
-// return switch this throw true try typeof var void while with
-//
-// *: Actually "future reserved keywords". These are the only ones we
-// recognize, the remaining are allowed as identifiers.
-// In ES5 strict mode, we should disallow all reserved keywords.
- public:
- KeywordMatcher()
- : state_(INITIAL),
- token_(Token::IDENTIFIER),
- keyword_(NULL),
- counter_(0),
- keyword_token_(Token::ILLEGAL) {}
-
- Token::Value token() { return token_; }
-
- inline bool AddChar(unibrow::uchar input) {
- if (state_ != UNMATCHABLE) {
- Step(input);
- }
- return state_ != UNMATCHABLE;
- }
-
- void Fail() {
- token_ = Token::IDENTIFIER;
- state_ = UNMATCHABLE;
- }
-
- private:
- enum State {
- UNMATCHABLE,
- INITIAL,
- KEYWORD_PREFIX,
- KEYWORD_MATCHED,
- C,
- CA,
- CO,
- CON,
- D,
- DE,
- E,
- EX,
- F,
- I,
- IM,
- IMP,
- IN,
- N,
- P,
- PR,
- S,
- T,
- TH,
- TR,
- V,
- W
- };
-
- struct FirstState {
- const char* keyword;
- State state;
- Token::Value token;
- };
-
- // Range of possible first characters of a keyword.
- static const unsigned int kFirstCharRangeMin = 'b';
- static const unsigned int kFirstCharRangeMax = 'y';
- static const unsigned int kFirstCharRangeLength =
- kFirstCharRangeMax - kFirstCharRangeMin + 1;
- // State map for first keyword character range.
- static FirstState first_states_[kFirstCharRangeLength];
-
- // If input equals keyword's character at position, continue matching keyword
- // from that position.
- inline bool MatchKeywordStart(unibrow::uchar input,
- const char* keyword,
- int position,
- Token::Value token_if_match) {
- if (input != static_cast<unibrow::uchar>(keyword[position])) {
- return false;
- }
- state_ = KEYWORD_PREFIX;
- this->keyword_ = keyword;
- this->counter_ = position + 1;
- this->keyword_token_ = token_if_match;
- return true;
- }
-
- // If input equals match character, transition to new state and return true.
- inline bool MatchState(unibrow::uchar input, char match, State new_state) {
- if (input != static_cast<unibrow::uchar>(match)) {
- return false;
- }
- state_ = new_state;
- return true;
- }
-
- inline bool MatchKeyword(unibrow::uchar input,
- char match,
- State new_state,
- Token::Value keyword_token) {
- if (input != static_cast<unibrow::uchar>(match)) {
- return false;
- }
- state_ = new_state;
- token_ = keyword_token;
- return true;
- }
-
- void Step(unibrow::uchar input);
-
- // Current state.
- State state_;
- // Token for currently added characters.
- Token::Value token_;
-
- // Matching a specific keyword string (there is only one possible valid
- // keyword with the current prefix).
- const char* keyword_;
- int counter_;
- Token::Value keyword_token_;
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_SCANNER_BASE_H_
diff --git a/src/3rdparty/v8/src/scanner.cc b/src/3rdparty/v8/src/scanner.cc
deleted file mode 100755
index d9c2188..0000000
--- a/src/3rdparty/v8/src/scanner.cc
+++ /dev/null
@@ -1,584 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "ast.h"
-#include "handles.h"
-#include "scanner.h"
-#include "unicode-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// ----------------------------------------------------------------------------
-// BufferedUC16CharacterStreams
-
-BufferedUC16CharacterStream::BufferedUC16CharacterStream()
- : UC16CharacterStream(),
- pushback_limit_(NULL) {
- // Initialize buffer as being empty. First read will fill the buffer.
- buffer_cursor_ = buffer_;
- buffer_end_ = buffer_;
-}
-
-BufferedUC16CharacterStream::~BufferedUC16CharacterStream() { }
-
-void BufferedUC16CharacterStream::PushBack(uc32 character) {
- if (character == kEndOfInput) {
- pos_--;
- return;
- }
- if (pushback_limit_ == NULL && buffer_cursor_ > buffer_) {
- // buffer_ is writable, buffer_cursor_ is const pointer.
- buffer_[--buffer_cursor_ - buffer_] = static_cast<uc16>(character);
- pos_--;
- return;
- }
- SlowPushBack(static_cast<uc16>(character));
-}
-
-
-void BufferedUC16CharacterStream::SlowPushBack(uc16 character) {
- // In pushback mode, the end of the buffer contains pushback,
- // and the start of the buffer (from buffer start to pushback_limit_)
- // contains valid data that comes just after the pushback.
- // We NULL the pushback_limit_ if pushing all the way back to the
- // start of the buffer.
-
- if (pushback_limit_ == NULL) {
- // Enter pushback mode.
- pushback_limit_ = buffer_end_;
- buffer_end_ = buffer_ + kBufferSize;
- buffer_cursor_ = buffer_end_;
- }
- // Ensure that there is room for at least one pushback.
- ASSERT(buffer_cursor_ > buffer_);
- ASSERT(pos_ > 0);
- buffer_[--buffer_cursor_ - buffer_] = character;
- if (buffer_cursor_ == buffer_) {
- pushback_limit_ = NULL;
- } else if (buffer_cursor_ < pushback_limit_) {
- pushback_limit_ = buffer_cursor_;
- }
- pos_--;
-}
-
-
-bool BufferedUC16CharacterStream::ReadBlock() {
- buffer_cursor_ = buffer_;
- if (pushback_limit_ != NULL) {
- // Leave pushback mode.
- buffer_end_ = pushback_limit_;
- pushback_limit_ = NULL;
- // If there were any valid characters left at the
- // start of the buffer, use those.
- if (buffer_cursor_ < buffer_end_) return true;
- // Otherwise read a new block.
- }
- unsigned length = FillBuffer(pos_, kBufferSize);
- buffer_end_ = buffer_ + length;
- return length > 0;
-}
-
-
-unsigned BufferedUC16CharacterStream::SlowSeekForward(unsigned delta) {
- // Leave pushback mode (i.e., ignore that there might be valid data
- // in the buffer before the pushback_limit_ point).
- pushback_limit_ = NULL;
- return BufferSeekForward(delta);
-}
-
-// ----------------------------------------------------------------------------
-// GenericStringUC16CharacterStream
-
-
-GenericStringUC16CharacterStream::GenericStringUC16CharacterStream(
- Handle<String> data,
- unsigned start_position,
- unsigned end_position)
- : string_(data),
- length_(end_position) {
- ASSERT(end_position >= start_position);
- buffer_cursor_ = buffer_;
- buffer_end_ = buffer_;
- pos_ = start_position;
-}
-
-
-GenericStringUC16CharacterStream::~GenericStringUC16CharacterStream() { }
-
-
-unsigned GenericStringUC16CharacterStream::BufferSeekForward(unsigned delta) {
- unsigned old_pos = pos_;
- pos_ = Min(pos_ + delta, length_);
- ReadBlock();
- return pos_ - old_pos;
-}
-
-
-unsigned GenericStringUC16CharacterStream::FillBuffer(unsigned from_pos,
- unsigned length) {
- if (from_pos >= length_) return 0;
- if (from_pos + length > length_) {
- length = length_ - from_pos;
- }
- String::WriteToFlat<uc16>(*string_, buffer_, from_pos, from_pos + length);
- return length;
-}
-
-
-// ----------------------------------------------------------------------------
-// Utf8ToUC16CharacterStream
-Utf8ToUC16CharacterStream::Utf8ToUC16CharacterStream(const byte* data,
- unsigned length)
- : BufferedUC16CharacterStream(),
- raw_data_(data),
- raw_data_length_(length),
- raw_data_pos_(0),
- raw_character_position_(0) {
- ReadBlock();
-}
-
-
-Utf8ToUC16CharacterStream::~Utf8ToUC16CharacterStream() { }
-
-
-unsigned Utf8ToUC16CharacterStream::BufferSeekForward(unsigned delta) {
- unsigned old_pos = pos_;
- unsigned target_pos = pos_ + delta;
- SetRawPosition(target_pos);
- pos_ = raw_character_position_;
- ReadBlock();
- return pos_ - old_pos;
-}
-
-
-unsigned Utf8ToUC16CharacterStream::FillBuffer(unsigned char_position,
- unsigned length) {
- static const unibrow::uchar kMaxUC16Character = 0xffff;
- SetRawPosition(char_position);
- if (raw_character_position_ != char_position) {
- // char_position was not a valid position in the stream (hit the end
- // while spooling to it).
- return 0u;
- }
- unsigned i = 0;
- while (i < length) {
- if (raw_data_pos_ == raw_data_length_) break;
- unibrow::uchar c = raw_data_[raw_data_pos_];
- if (c <= unibrow::Utf8::kMaxOneByteChar) {
- raw_data_pos_++;
- } else {
- c = unibrow::Utf8::CalculateValue(raw_data_ + raw_data_pos_,
- raw_data_length_ - raw_data_pos_,
- &raw_data_pos_);
- // Don't allow characters outside of the BMP.
- if (c > kMaxUC16Character) {
- c = unibrow::Utf8::kBadChar;
- }
- }
- buffer_[i++] = static_cast<uc16>(c);
- }
- raw_character_position_ = char_position + i;
- return i;
-}
-
-
-static const byte kUtf8MultiByteMask = 0xC0;
-static const byte kUtf8MultiByteCharStart = 0xC0;
-static const byte kUtf8MultiByteCharFollower = 0x80;
-
-
-#ifdef DEBUG
-static bool IsUtf8MultiCharacterStart(byte first_byte) {
- return (first_byte & kUtf8MultiByteMask) == kUtf8MultiByteCharStart;
-}
-#endif
-
-
-static bool IsUtf8MultiCharacterFollower(byte later_byte) {
- return (later_byte & kUtf8MultiByteMask) == kUtf8MultiByteCharFollower;
-}
-
-
-// Move the cursor back to point at the preceding UTF-8 character start
-// in the buffer.
-static inline void Utf8CharacterBack(const byte* buffer, unsigned* cursor) {
- byte character = buffer[--*cursor];
- if (character > unibrow::Utf8::kMaxOneByteChar) {
- ASSERT(IsUtf8MultiCharacterFollower(character));
- // Last byte of a multi-byte character encoding. Step backwards until
- // pointing to the first byte of the encoding, recognized by having the
- // top two bits set.
- while (IsUtf8MultiCharacterFollower(buffer[--*cursor])) { }
- ASSERT(IsUtf8MultiCharacterStart(buffer[*cursor]));
- }
-}
-
-
-// Move the cursor forward to point at the next following UTF-8 character start
-// in the buffer.
-static inline void Utf8CharacterForward(const byte* buffer, unsigned* cursor) {
- byte character = buffer[(*cursor)++];
- if (character > unibrow::Utf8::kMaxOneByteChar) {
- // First character of a multi-byte character encoding.
- // The number of most-significant one-bits determines the length of the
- // encoding:
- // 110..... - (0xCx, 0xDx) one additional byte (minimum).
- // 1110.... - (0xEx) two additional bytes.
- // 11110... - (0xFx) three additional bytes (maximum).
- ASSERT(IsUtf8MultiCharacterStart(character));
- // Additional bytes is:
- // 1 if value in range 0xC0 .. 0xDF.
- // 2 if value in range 0xE0 .. 0xEF.
- // 3 if value in range 0xF0 .. 0xF7.
- // Encode that in a single value.
- unsigned additional_bytes =
- ((0x3211u) >> (((character - 0xC0) >> 2) & 0xC)) & 0x03;
- *cursor += additional_bytes;
- ASSERT(!IsUtf8MultiCharacterFollower(buffer[1 + additional_bytes]));
- }
-}
-
-
-void Utf8ToUC16CharacterStream::SetRawPosition(unsigned target_position) {
- if (raw_character_position_ > target_position) {
- // Spool backwards in utf8 buffer.
- do {
- Utf8CharacterBack(raw_data_, &raw_data_pos_);
- raw_character_position_--;
- } while (raw_character_position_ > target_position);
- return;
- }
- // Spool forwards in the utf8 buffer.
- while (raw_character_position_ < target_position) {
- if (raw_data_pos_ == raw_data_length_) return;
- Utf8CharacterForward(raw_data_, &raw_data_pos_);
- raw_character_position_++;
- }
-}
-
-
-// ----------------------------------------------------------------------------
-// ExternalTwoByteStringUC16CharacterStream
-
-ExternalTwoByteStringUC16CharacterStream::
- ~ExternalTwoByteStringUC16CharacterStream() { }
-
-
-ExternalTwoByteStringUC16CharacterStream
- ::ExternalTwoByteStringUC16CharacterStream(
- Handle<ExternalTwoByteString> data,
- int start_position,
- int end_position)
- : UC16CharacterStream(),
- source_(data),
- raw_data_(data->GetTwoByteData(start_position)) {
- buffer_cursor_ = raw_data_,
- buffer_end_ = raw_data_ + (end_position - start_position);
- pos_ = start_position;
-}
-
-
-// ----------------------------------------------------------------------------
-// Scanner::LiteralScope
-
-Scanner::LiteralScope::LiteralScope(Scanner* self)
- : scanner_(self), complete_(false) {
- self->StartLiteral();
-}
-
-
-Scanner::LiteralScope::~LiteralScope() {
- if (!complete_) scanner_->DropLiteral();
-}
-
-
-void Scanner::LiteralScope::Complete() {
- scanner_->TerminateLiteral();
- complete_ = true;
-}
-
-
-// ----------------------------------------------------------------------------
-// V8JavaScriptScanner
-
-
-void V8JavaScriptScanner::Initialize(UC16CharacterStream* source) {
- source_ = source;
- // Need to capture identifiers in order to recognize "get" and "set"
- // in object literals.
- Init();
- // Skip initial whitespace allowing HTML comment ends just like
- // after a newline and scan first token.
- has_line_terminator_before_next_ = true;
- SkipWhiteSpace();
- Scan();
-}
-
-
-// ----------------------------------------------------------------------------
-// JsonScanner
-
-JsonScanner::JsonScanner(ScannerConstants* scanner_constants)
- : Scanner(scanner_constants) { }
-
-
-void JsonScanner::Initialize(UC16CharacterStream* source) {
- source_ = source;
- Init();
- // Skip initial whitespace.
- SkipJsonWhiteSpace();
- // Preload first token as look-ahead.
- ScanJson();
-}
-
-
-Token::Value JsonScanner::Next() {
- // BUG 1215673: Find a thread safe way to set a stack limit in
- // pre-parse mode. Otherwise, we cannot safely pre-parse from other
- // threads.
- current_ = next_;
- // Check for stack-overflow before returning any tokens.
- ScanJson();
- return current_.token;
-}
-
-
-bool JsonScanner::SkipJsonWhiteSpace() {
- int start_position = source_pos();
- // JSON WhiteSpace is tab, carrige-return, newline and space.
- while (c0_ == ' ' || c0_ == '\n' || c0_ == '\r' || c0_ == '\t') {
- Advance();
- }
- return source_pos() != start_position;
-}
-
-
-void JsonScanner::ScanJson() {
- next_.literal_chars = NULL;
- Token::Value token;
- do {
- // Remember the position of the next token
- next_.location.beg_pos = source_pos();
- switch (c0_) {
- case '\t':
- case '\r':
- case '\n':
- case ' ':
- Advance();
- token = Token::WHITESPACE;
- break;
- case '{':
- Advance();
- token = Token::LBRACE;
- break;
- case '}':
- Advance();
- token = Token::RBRACE;
- break;
- case '[':
- Advance();
- token = Token::LBRACK;
- break;
- case ']':
- Advance();
- token = Token::RBRACK;
- break;
- case ':':
- Advance();
- token = Token::COLON;
- break;
- case ',':
- Advance();
- token = Token::COMMA;
- break;
- case '"':
- token = ScanJsonString();
- break;
- case '-':
- case '0':
- case '1':
- case '2':
- case '3':
- case '4':
- case '5':
- case '6':
- case '7':
- case '8':
- case '9':
- token = ScanJsonNumber();
- break;
- case 't':
- token = ScanJsonIdentifier("true", Token::TRUE_LITERAL);
- break;
- case 'f':
- token = ScanJsonIdentifier("false", Token::FALSE_LITERAL);
- break;
- case 'n':
- token = ScanJsonIdentifier("null", Token::NULL_LITERAL);
- break;
- default:
- if (c0_ < 0) {
- Advance();
- token = Token::EOS;
- } else {
- Advance();
- token = Select(Token::ILLEGAL);
- }
- }
- } while (token == Token::WHITESPACE);
-
- next_.location.end_pos = source_pos();
- next_.token = token;
-}
-
-
-Token::Value JsonScanner::ScanJsonString() {
- ASSERT_EQ('"', c0_);
- Advance();
- LiteralScope literal(this);
- while (c0_ != '"') {
- // Check for control character (0x00-0x1f) or unterminated string (<0).
- if (c0_ < 0x20) return Token::ILLEGAL;
- if (c0_ != '\\') {
- AddLiteralCharAdvance();
- } else {
- Advance();
- switch (c0_) {
- case '"':
- case '\\':
- case '/':
- AddLiteralChar(c0_);
- break;
- case 'b':
- AddLiteralChar('\x08');
- break;
- case 'f':
- AddLiteralChar('\x0c');
- break;
- case 'n':
- AddLiteralChar('\x0a');
- break;
- case 'r':
- AddLiteralChar('\x0d');
- break;
- case 't':
- AddLiteralChar('\x09');
- break;
- case 'u': {
- uc32 value = 0;
- for (int i = 0; i < 4; i++) {
- Advance();
- int digit = HexValue(c0_);
- if (digit < 0) {
- return Token::ILLEGAL;
- }
- value = value * 16 + digit;
- }
- AddLiteralChar(value);
- break;
- }
- default:
- return Token::ILLEGAL;
- }
- Advance();
- }
- }
- literal.Complete();
- Advance();
- return Token::STRING;
-}
-
-
-Token::Value JsonScanner::ScanJsonNumber() {
- LiteralScope literal(this);
- bool negative = false;
-
- if (c0_ == '-') {
- AddLiteralCharAdvance();
- negative = true;
- }
- if (c0_ == '0') {
- AddLiteralCharAdvance();
- // Prefix zero is only allowed if it's the only digit before
- // a decimal point or exponent.
- if ('0' <= c0_ && c0_ <= '9') return Token::ILLEGAL;
- } else {
- int i = 0;
- int digits = 0;
- if (c0_ < '1' || c0_ > '9') return Token::ILLEGAL;
- do {
- i = i * 10 + c0_ - '0';
- digits++;
- AddLiteralCharAdvance();
- } while (c0_ >= '0' && c0_ <= '9');
- if (c0_ != '.' && c0_ != 'e' && c0_ != 'E' && digits < 10) {
- number_ = (negative ? -i : i);
- return Token::NUMBER;
- }
- }
- if (c0_ == '.') {
- AddLiteralCharAdvance();
- if (c0_ < '0' || c0_ > '9') return Token::ILLEGAL;
- do {
- AddLiteralCharAdvance();
- } while (c0_ >= '0' && c0_ <= '9');
- }
- if (AsciiAlphaToLower(c0_) == 'e') {
- AddLiteralCharAdvance();
- if (c0_ == '-' || c0_ == '+') AddLiteralCharAdvance();
- if (c0_ < '0' || c0_ > '9') return Token::ILLEGAL;
- do {
- AddLiteralCharAdvance();
- } while (c0_ >= '0' && c0_ <= '9');
- }
- literal.Complete();
- ASSERT_NOT_NULL(next_.literal_chars);
- number_ = StringToDouble(next_.literal_chars->ascii_literal(),
- NO_FLAGS, // Hex, octal or trailing junk.
- OS::nan_value());
- return Token::NUMBER;
-}
-
-
-Token::Value JsonScanner::ScanJsonIdentifier(const char* text,
- Token::Value token) {
- LiteralScope literal(this);
- while (*text != '\0') {
- if (c0_ != *text) return Token::ILLEGAL;
- Advance();
- text++;
- }
- if (scanner_constants_->IsIdentifierPart(c0_)) return Token::ILLEGAL;
- literal.Complete();
- return token;
-}
-
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/scanner.h b/src/3rdparty/v8/src/scanner.h
deleted file mode 100644
index 776ba53..0000000
--- a/src/3rdparty/v8/src/scanner.h
+++ /dev/null
@@ -1,196 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_SCANNER_H_
-#define V8_SCANNER_H_
-
-#include "token.h"
-#include "char-predicates-inl.h"
-#include "scanner-base.h"
-
-namespace v8 {
-namespace internal {
-
-// A buffered character stream based on a random access character
-// source (ReadBlock can be called with pos_ pointing to any position,
-// even positions before the current).
-class BufferedUC16CharacterStream: public UC16CharacterStream {
- public:
- BufferedUC16CharacterStream();
- virtual ~BufferedUC16CharacterStream();
-
- virtual void PushBack(uc32 character);
-
- protected:
- static const unsigned kBufferSize = 512;
- static const unsigned kPushBackStepSize = 16;
-
- virtual unsigned SlowSeekForward(unsigned delta);
- virtual bool ReadBlock();
- virtual void SlowPushBack(uc16 character);
-
- virtual unsigned BufferSeekForward(unsigned delta) = 0;
- virtual unsigned FillBuffer(unsigned position, unsigned length) = 0;
-
- const uc16* pushback_limit_;
- uc16 buffer_[kBufferSize];
-};
-
-
-// Generic string stream.
-class GenericStringUC16CharacterStream: public BufferedUC16CharacterStream {
- public:
- GenericStringUC16CharacterStream(Handle<String> data,
- unsigned start_position,
- unsigned end_position);
- virtual ~GenericStringUC16CharacterStream();
-
- protected:
- virtual unsigned BufferSeekForward(unsigned delta);
- virtual unsigned FillBuffer(unsigned position, unsigned length);
-
- Handle<String> string_;
- unsigned start_position_;
- unsigned length_;
-};
-
-
-// UC16 stream based on a literal UTF-8 string.
-class Utf8ToUC16CharacterStream: public BufferedUC16CharacterStream {
- public:
- Utf8ToUC16CharacterStream(const byte* data, unsigned length);
- virtual ~Utf8ToUC16CharacterStream();
-
- protected:
- virtual unsigned BufferSeekForward(unsigned delta);
- virtual unsigned FillBuffer(unsigned char_position, unsigned length);
- void SetRawPosition(unsigned char_position);
-
- const byte* raw_data_;
- unsigned raw_data_length_; // Measured in bytes, not characters.
- unsigned raw_data_pos_;
- // The character position of the character at raw_data[raw_data_pos_].
- // Not necessarily the same as pos_.
- unsigned raw_character_position_;
-};
-
-
-// UTF16 buffer to read characters from an external string.
-class ExternalTwoByteStringUC16CharacterStream: public UC16CharacterStream {
- public:
- ExternalTwoByteStringUC16CharacterStream(Handle<ExternalTwoByteString> data,
- int start_position,
- int end_position);
- virtual ~ExternalTwoByteStringUC16CharacterStream();
-
- virtual void PushBack(uc32 character) {
- ASSERT(buffer_cursor_ > raw_data_);
- buffer_cursor_--;
- pos_--;
- }
-
- protected:
- virtual unsigned SlowSeekForward(unsigned delta) {
- // Fast case always handles seeking.
- return 0;
- }
- virtual bool ReadBlock() {
- // Entire string is read at start.
- return false;
- }
- Handle<ExternalTwoByteString> source_;
- const uc16* raw_data_; // Pointer to the actual array of characters.
-};
-
-
-// ----------------------------------------------------------------------------
-// V8JavaScriptScanner
-// JavaScript scanner getting its input from either a V8 String or a unicode
-// CharacterStream.
-
-class V8JavaScriptScanner : public JavaScriptScanner {
- public:
- explicit V8JavaScriptScanner(ScannerConstants* scanner_constants)
- : JavaScriptScanner(scanner_constants) {}
-
- void Initialize(UC16CharacterStream* source);
-};
-
-
-class JsonScanner : public Scanner {
- public:
- explicit JsonScanner(ScannerConstants* scanner_constants);
-
- void Initialize(UC16CharacterStream* source);
-
- // Returns the next token.
- Token::Value Next();
-
- // Returns the value of a number token.
- double number() {
- return number_;
- }
-
-
- protected:
- // Skip past JSON whitespace (only space, tab, newline and carrige-return).
- bool SkipJsonWhiteSpace();
-
- // Scan a single JSON token. The JSON lexical grammar is specified in the
- // ECMAScript 5 standard, section 15.12.1.1.
- // Recognizes all of the single-character tokens directly, or calls a function
- // to scan a number, string or identifier literal.
- // The only allowed whitespace characters between tokens are tab,
- // carriage-return, newline and space.
- void ScanJson();
-
- // A JSON number (production JSONNumber) is a subset of the valid JavaScript
- // decimal number literals.
- // It includes an optional minus sign, must have at least one
- // digit before and after a decimal point, may not have prefixed zeros (unless
- // the integer part is zero), and may include an exponent part (e.g., "e-10").
- // Hexadecimal and octal numbers are not allowed.
- Token::Value ScanJsonNumber();
-
- // A JSON string (production JSONString) is subset of valid JavaScript string
- // literals. The string must only be double-quoted (not single-quoted), and
- // the only allowed backslash-escapes are ", /, \, b, f, n, r, t and
- // four-digit hex escapes (uXXXX). Any other use of backslashes is invalid.
- Token::Value ScanJsonString();
-
- // Used to recognizes one of the literals "true", "false", or "null". These
- // are the only valid JSON identifiers (productions JSONBooleanLiteral,
- // JSONNullLiteral).
- Token::Value ScanJsonIdentifier(const char* text, Token::Value token);
-
- // Holds the value of a scanned number token.
- double number_;
-};
-
-} } // namespace v8::internal
-
-#endif // V8_SCANNER_H_
diff --git a/src/3rdparty/v8/src/scopeinfo.cc b/src/3rdparty/v8/src/scopeinfo.cc
deleted file mode 100644
index 58e2ad2..0000000
--- a/src/3rdparty/v8/src/scopeinfo.cc
+++ /dev/null
@@ -1,631 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include <stdlib.h>
-
-#include "v8.h"
-
-#include "scopeinfo.h"
-#include "scopes.h"
-
-namespace v8 {
-namespace internal {
-
-
-static int CompareLocal(Variable* const* v, Variable* const* w) {
- Slot* s = (*v)->AsSlot();
- Slot* t = (*w)->AsSlot();
- // We may have rewritten parameters (that are in the arguments object)
- // and which may have a NULL slot... - find a better solution...
- int x = (s != NULL ? s->index() : 0);
- int y = (t != NULL ? t->index() : 0);
- // Consider sorting them according to type as well?
- return x - y;
-}
-
-
-template<class Allocator>
-ScopeInfo<Allocator>::ScopeInfo(Scope* scope)
- : function_name_(FACTORY->empty_symbol()),
- calls_eval_(scope->calls_eval()),
- parameters_(scope->num_parameters()),
- stack_slots_(scope->num_stack_slots()),
- context_slots_(scope->num_heap_slots()),
- context_modes_(scope->num_heap_slots()) {
- // Add parameters.
- for (int i = 0; i < scope->num_parameters(); i++) {
- ASSERT(parameters_.length() == i);
- parameters_.Add(scope->parameter(i)->name());
- }
-
- // Add stack locals and collect heap locals.
- // We are assuming that the locals' slots are allocated in
- // increasing order, so we can simply add them to the
- // ScopeInfo lists. However, due to usage analysis, this is
- // not true for context-allocated locals: Some of them
- // may be parameters which are allocated before the
- // non-parameter locals. When the non-parameter locals are
- // sorted according to usage, the allocated slot indices may
- // not be in increasing order with the variable list anymore.
- // Thus, we first collect the context-allocated locals, and then
- // sort them by context slot index before adding them to the
- // ScopeInfo list.
- List<Variable*, Allocator> locals(32); // 32 is a wild guess
- ASSERT(locals.is_empty());
- scope->CollectUsedVariables(&locals);
- locals.Sort(&CompareLocal);
-
- List<Variable*, Allocator> heap_locals(locals.length());
- for (int i = 0; i < locals.length(); i++) {
- Variable* var = locals[i];
- if (var->is_used()) {
- Slot* slot = var->AsSlot();
- if (slot != NULL) {
- switch (slot->type()) {
- case Slot::PARAMETER:
- // explicitly added to parameters_ above - ignore
- break;
-
- case Slot::LOCAL:
- ASSERT(stack_slots_.length() == slot->index());
- stack_slots_.Add(var->name());
- break;
-
- case Slot::CONTEXT:
- heap_locals.Add(var);
- break;
-
- case Slot::LOOKUP:
- // This is currently not used.
- UNREACHABLE();
- break;
- }
- }
- }
- }
-
- // Add heap locals.
- if (scope->num_heap_slots() > 0) {
- // Add user-defined slots.
- for (int i = 0; i < heap_locals.length(); i++) {
- ASSERT(heap_locals[i]->AsSlot()->index() - Context::MIN_CONTEXT_SLOTS ==
- context_slots_.length());
- ASSERT(heap_locals[i]->AsSlot()->index() - Context::MIN_CONTEXT_SLOTS ==
- context_modes_.length());
- context_slots_.Add(heap_locals[i]->name());
- context_modes_.Add(heap_locals[i]->mode());
- }
-
- } else {
- ASSERT(heap_locals.length() == 0);
- }
-
- // Add the function context slot, if present.
- // For now, this must happen at the very end because of the
- // ordering of the scope info slots and the respective slot indices.
- if (scope->is_function_scope()) {
- Variable* var = scope->function();
- if (var != NULL &&
- var->is_used() &&
- var->AsSlot()->type() == Slot::CONTEXT) {
- function_name_ = var->name();
- // Note that we must not find the function name in the context slot
- // list - instead it must be handled separately in the
- // Contexts::Lookup() function. Thus record an empty symbol here so we
- // get the correct number of context slots.
- ASSERT(var->AsSlot()->index() - Context::MIN_CONTEXT_SLOTS ==
- context_slots_.length());
- ASSERT(var->AsSlot()->index() - Context::MIN_CONTEXT_SLOTS ==
- context_modes_.length());
- context_slots_.Add(FACTORY->empty_symbol());
- context_modes_.Add(Variable::INTERNAL);
- }
- }
-}
-
-
-// Encoding format in a FixedArray object:
-//
-// - function name
-//
-// - calls eval boolean flag
-//
-// - number of variables in the context object (smi) (= function context
-// slot index + 1)
-// - list of pairs (name, Var mode) of context-allocated variables (starting
-// with context slot 0)
-//
-// - number of parameters (smi)
-// - list of parameter names (starting with parameter 0 first)
-//
-// - number of variables on the stack (smi)
-// - list of names of stack-allocated variables (starting with stack slot 0)
-
-// The ScopeInfo representation could be simplified and the ScopeInfo
-// re-implemented (with almost the same interface). Here is a
-// suggestion for the new format:
-//
-// - have a single list with all variable names (parameters, stack locals,
-// context locals), followed by a list of non-Object* values containing
-// the variables information (what kind, index, attributes)
-// - searching the linear list of names is fast and yields an index into the
-// list if the variable name is found
-// - that list index is then used to find the variable information in the
-// subsequent list
-// - the list entries don't have to be in any particular order, so all the
-// current sorting business can go away
-// - the ScopeInfo lookup routines can be reduced to perhaps a single lookup
-// which returns all information at once
-// - when gathering the information from a Scope, we only need to iterate
-// through the local variables (parameters and context info is already
-// present)
-
-
-static inline Object** ReadInt(Object** p, int* x) {
- *x = (reinterpret_cast<Smi*>(*p++))->value();
- return p;
-}
-
-
-static inline Object** ReadBool(Object** p, bool* x) {
- *x = (reinterpret_cast<Smi*>(*p++))->value() != 0;
- return p;
-}
-
-
-static inline Object** ReadSymbol(Object** p, Handle<String>* s) {
- *s = Handle<String>(reinterpret_cast<String*>(*p++));
- return p;
-}
-
-
-template <class Allocator>
-static Object** ReadList(Object** p, List<Handle<String>, Allocator >* list) {
- ASSERT(list->is_empty());
- int n;
- p = ReadInt(p, &n);
- while (n-- > 0) {
- Handle<String> s;
- p = ReadSymbol(p, &s);
- list->Add(s);
- }
- return p;
-}
-
-
-template <class Allocator>
-static Object** ReadList(Object** p,
- List<Handle<String>, Allocator>* list,
- List<Variable::Mode, Allocator>* modes) {
- ASSERT(list->is_empty());
- int n;
- p = ReadInt(p, &n);
- while (n-- > 0) {
- Handle<String> s;
- int m;
- p = ReadSymbol(p, &s);
- p = ReadInt(p, &m);
- list->Add(s);
- modes->Add(static_cast<Variable::Mode>(m));
- }
- return p;
-}
-
-
-template<class Allocator>
-ScopeInfo<Allocator>::ScopeInfo(SerializedScopeInfo* data)
- : function_name_(FACTORY->empty_symbol()),
- parameters_(4),
- stack_slots_(8),
- context_slots_(8),
- context_modes_(8) {
- if (data->length() > 0) {
- Object** p0 = data->data_start();
- Object** p = p0;
- p = ReadSymbol(p, &function_name_);
- p = ReadBool(p, &calls_eval_);
- p = ReadList<Allocator>(p, &context_slots_, &context_modes_);
- p = ReadList<Allocator>(p, &parameters_);
- p = ReadList<Allocator>(p, &stack_slots_);
- ASSERT((p - p0) == FixedArray::cast(data)->length());
- }
-}
-
-
-static inline Object** WriteInt(Object** p, int x) {
- *p++ = Smi::FromInt(x);
- return p;
-}
-
-
-static inline Object** WriteBool(Object** p, bool b) {
- *p++ = Smi::FromInt(b ? 1 : 0);
- return p;
-}
-
-
-static inline Object** WriteSymbol(Object** p, Handle<String> s) {
- *p++ = *s;
- return p;
-}
-
-
-template <class Allocator>
-static Object** WriteList(Object** p, List<Handle<String>, Allocator >* list) {
- const int n = list->length();
- p = WriteInt(p, n);
- for (int i = 0; i < n; i++) {
- p = WriteSymbol(p, list->at(i));
- }
- return p;
-}
-
-
-template <class Allocator>
-static Object** WriteList(Object** p,
- List<Handle<String>, Allocator>* list,
- List<Variable::Mode, Allocator>* modes) {
- const int n = list->length();
- p = WriteInt(p, n);
- for (int i = 0; i < n; i++) {
- p = WriteSymbol(p, list->at(i));
- p = WriteInt(p, modes->at(i));
- }
- return p;
-}
-
-
-template<class Allocator>
-Handle<SerializedScopeInfo> ScopeInfo<Allocator>::Serialize() {
- // function name, calls eval, length for 3 tables:
- const int extra_slots = 1 + 1 + 3;
- int length = extra_slots +
- context_slots_.length() * 2 +
- parameters_.length() +
- stack_slots_.length();
-
- Handle<SerializedScopeInfo> data(
- SerializedScopeInfo::cast(*FACTORY->NewFixedArray(length, TENURED)));
- AssertNoAllocation nogc;
-
- Object** p0 = data->data_start();
- Object** p = p0;
- p = WriteSymbol(p, function_name_);
- p = WriteBool(p, calls_eval_);
- p = WriteList(p, &context_slots_, &context_modes_);
- p = WriteList(p, &parameters_);
- p = WriteList(p, &stack_slots_);
- ASSERT((p - p0) == length);
-
- return data;
-}
-
-
-template<class Allocator>
-Handle<String> ScopeInfo<Allocator>::LocalName(int i) const {
- // A local variable can be allocated either on the stack or in the context.
- // For variables allocated in the context they are always preceded by
- // Context::MIN_CONTEXT_SLOTS of fixed allocated slots in the context.
- if (i < number_of_stack_slots()) {
- return stack_slot_name(i);
- } else {
- return context_slot_name(i - number_of_stack_slots() +
- Context::MIN_CONTEXT_SLOTS);
- }
-}
-
-
-template<class Allocator>
-int ScopeInfo<Allocator>::NumberOfLocals() const {
- int number_of_locals = number_of_stack_slots();
- if (number_of_context_slots() > 0) {
- ASSERT(number_of_context_slots() >= Context::MIN_CONTEXT_SLOTS);
- number_of_locals += number_of_context_slots() - Context::MIN_CONTEXT_SLOTS;
- }
- return number_of_locals;
-}
-
-
-Handle<SerializedScopeInfo> SerializedScopeInfo::Create(Scope* scope) {
- ScopeInfo<ZoneListAllocationPolicy> sinfo(scope);
- return sinfo.Serialize();
-}
-
-
-SerializedScopeInfo* SerializedScopeInfo::Empty() {
- return reinterpret_cast<SerializedScopeInfo*>(HEAP->empty_fixed_array());
-}
-
-
-Object** SerializedScopeInfo::ContextEntriesAddr() {
- ASSERT(length() > 0);
- return data_start() + 2; // +2 for function name and calls eval.
-}
-
-
-Object** SerializedScopeInfo::ParameterEntriesAddr() {
- ASSERT(length() > 0);
- Object** p = ContextEntriesAddr();
- int number_of_context_slots;
- p = ReadInt(p, &number_of_context_slots);
- return p + number_of_context_slots*2; // *2 for pairs
-}
-
-
-Object** SerializedScopeInfo::StackSlotEntriesAddr() {
- ASSERT(length() > 0);
- Object** p = ParameterEntriesAddr();
- int number_of_parameter_slots;
- p = ReadInt(p, &number_of_parameter_slots);
- return p + number_of_parameter_slots;
-}
-
-
-bool SerializedScopeInfo::CallsEval() {
- if (length() > 0) {
- Object** p = data_start() + 1; // +1 for function name.
- bool calls_eval;
- p = ReadBool(p, &calls_eval);
- return calls_eval;
- }
- return true;
-}
-
-
-int SerializedScopeInfo::NumberOfStackSlots() {
- if (length() > 0) {
- Object** p = StackSlotEntriesAddr();
- int number_of_stack_slots;
- ReadInt(p, &number_of_stack_slots);
- return number_of_stack_slots;
- }
- return 0;
-}
-
-
-int SerializedScopeInfo::NumberOfContextSlots() {
- if (length() > 0) {
- Object** p = ContextEntriesAddr();
- int number_of_context_slots;
- ReadInt(p, &number_of_context_slots);
- return number_of_context_slots + Context::MIN_CONTEXT_SLOTS;
- }
- return 0;
-}
-
-
-bool SerializedScopeInfo::HasHeapAllocatedLocals() {
- if (length() > 0) {
- Object** p = ContextEntriesAddr();
- int number_of_context_slots;
- ReadInt(p, &number_of_context_slots);
- return number_of_context_slots > 0;
- }
- return false;
-}
-
-
-int SerializedScopeInfo::StackSlotIndex(String* name) {
- ASSERT(name->IsSymbol());
- if (length() > 0) {
- // Slots start after length entry.
- Object** p0 = StackSlotEntriesAddr();
- int number_of_stack_slots;
- p0 = ReadInt(p0, &number_of_stack_slots);
- Object** p = p0;
- Object** end = p0 + number_of_stack_slots;
- while (p != end) {
- if (*p == name) return static_cast<int>(p - p0);
- p++;
- }
- }
- return -1;
-}
-
-int SerializedScopeInfo::ContextSlotIndex(String* name, Variable::Mode* mode) {
- ASSERT(name->IsSymbol());
- Isolate* isolate = GetIsolate();
- int result = isolate->context_slot_cache()->Lookup(this, name, mode);
- if (result != ContextSlotCache::kNotFound) return result;
- if (length() > 0) {
- // Slots start after length entry.
- Object** p0 = ContextEntriesAddr();
- int number_of_context_slots;
- p0 = ReadInt(p0, &number_of_context_slots);
- Object** p = p0;
- Object** end = p0 + number_of_context_slots * 2;
- while (p != end) {
- if (*p == name) {
- ASSERT(((p - p0) & 1) == 0);
- int v;
- ReadInt(p + 1, &v);
- Variable::Mode mode_value = static_cast<Variable::Mode>(v);
- if (mode != NULL) *mode = mode_value;
- result = static_cast<int>((p - p0) >> 1) + Context::MIN_CONTEXT_SLOTS;
- isolate->context_slot_cache()->Update(this, name, mode_value, result);
- return result;
- }
- p += 2;
- }
- }
- isolate->context_slot_cache()->Update(this, name, Variable::INTERNAL, -1);
- return -1;
-}
-
-
-int SerializedScopeInfo::ParameterIndex(String* name) {
- ASSERT(name->IsSymbol());
- if (length() > 0) {
- // We must read parameters from the end since for
- // multiply declared parameters the value of the
- // last declaration of that parameter is used
- // inside a function (and thus we need to look
- // at the last index). Was bug# 1110337.
- //
- // Eventually, we should only register such parameters
- // once, with corresponding index. This requires a new
- // implementation of the ScopeInfo code. See also other
- // comments in this file regarding this.
- Object** p = ParameterEntriesAddr();
- int number_of_parameter_slots;
- Object** p0 = ReadInt(p, &number_of_parameter_slots);
- p = p0 + number_of_parameter_slots;
- while (p > p0) {
- p--;
- if (*p == name) return static_cast<int>(p - p0);
- }
- }
- return -1;
-}
-
-
-int SerializedScopeInfo::FunctionContextSlotIndex(String* name) {
- ASSERT(name->IsSymbol());
- if (length() > 0) {
- Object** p = data_start();
- if (*p == name) {
- p = ContextEntriesAddr();
- int number_of_context_slots;
- ReadInt(p, &number_of_context_slots);
- ASSERT(number_of_context_slots != 0);
- // The function context slot is the last entry.
- return number_of_context_slots + Context::MIN_CONTEXT_SLOTS - 1;
- }
- }
- return -1;
-}
-
-
-int ContextSlotCache::Hash(Object* data, String* name) {
- // Uses only lower 32 bits if pointers are larger.
- uintptr_t addr_hash =
- static_cast<uint32_t>(reinterpret_cast<uintptr_t>(data)) >> 2;
- return static_cast<int>((addr_hash ^ name->Hash()) % kLength);
-}
-
-
-int ContextSlotCache::Lookup(Object* data,
- String* name,
- Variable::Mode* mode) {
- int index = Hash(data, name);
- Key& key = keys_[index];
- if ((key.data == data) && key.name->Equals(name)) {
- Value result(values_[index]);
- if (mode != NULL) *mode = result.mode();
- return result.index() + kNotFound;
- }
- return kNotFound;
-}
-
-
-void ContextSlotCache::Update(Object* data,
- String* name,
- Variable::Mode mode,
- int slot_index) {
- String* symbol;
- ASSERT(slot_index > kNotFound);
- if (HEAP->LookupSymbolIfExists(name, &symbol)) {
- int index = Hash(data, symbol);
- Key& key = keys_[index];
- key.data = data;
- key.name = symbol;
- // Please note value only takes a uint as index.
- values_[index] = Value(mode, slot_index - kNotFound).raw();
-#ifdef DEBUG
- ValidateEntry(data, name, mode, slot_index);
-#endif
- }
-}
-
-
-void ContextSlotCache::Clear() {
- for (int index = 0; index < kLength; index++) keys_[index].data = NULL;
-}
-
-
-#ifdef DEBUG
-
-void ContextSlotCache::ValidateEntry(Object* data,
- String* name,
- Variable::Mode mode,
- int slot_index) {
- String* symbol;
- if (HEAP->LookupSymbolIfExists(name, &symbol)) {
- int index = Hash(data, name);
- Key& key = keys_[index];
- ASSERT(key.data == data);
- ASSERT(key.name->Equals(name));
- Value result(values_[index]);
- ASSERT(result.mode() == mode);
- ASSERT(result.index() + kNotFound == slot_index);
- }
-}
-
-
-template <class Allocator>
-static void PrintList(const char* list_name,
- int nof_internal_slots,
- List<Handle<String>, Allocator>& list) {
- if (list.length() > 0) {
- PrintF("\n // %s\n", list_name);
- if (nof_internal_slots > 0) {
- PrintF(" %2d - %2d [internal slots]\n", 0 , nof_internal_slots - 1);
- }
- for (int i = 0; i < list.length(); i++) {
- PrintF(" %2d ", i + nof_internal_slots);
- list[i]->ShortPrint();
- PrintF("\n");
- }
- }
-}
-
-
-template<class Allocator>
-void ScopeInfo<Allocator>::Print() {
- PrintF("ScopeInfo ");
- if (function_name_->length() > 0)
- function_name_->ShortPrint();
- else
- PrintF("/* no function name */");
- PrintF("{");
-
- PrintList<Allocator>("parameters", 0, parameters_);
- PrintList<Allocator>("stack slots", 0, stack_slots_);
- PrintList<Allocator>("context slots", Context::MIN_CONTEXT_SLOTS,
- context_slots_);
-
- PrintF("}\n");
-}
-#endif // DEBUG
-
-
-// Make sure the classes get instantiated by the template system.
-template class ScopeInfo<FreeStoreAllocationPolicy>;
-template class ScopeInfo<PreallocatedStorage>;
-template class ScopeInfo<ZoneListAllocationPolicy>;
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/scopeinfo.h b/src/3rdparty/v8/src/scopeinfo.h
deleted file mode 100644
index cc9f816..0000000
--- a/src/3rdparty/v8/src/scopeinfo.h
+++ /dev/null
@@ -1,249 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_SCOPEINFO_H_
-#define V8_SCOPEINFO_H_
-
-#include "variables.h"
-#include "zone-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// Scope information represents information about a functions's
-// scopes (currently only one, because we don't do any inlining)
-// and the allocation of the scope's variables. Scope information
-// is stored in a compressed form in FixedArray objects and is used
-// at runtime (stack dumps, deoptimization, etc.).
-//
-// Historical note: In other VMs built by this team, ScopeInfo was
-// usually called DebugInfo since the information was used (among
-// other things) for on-demand debugging (Self, Smalltalk). However,
-// DebugInfo seems misleading, since this information is primarily used
-// in debugging-unrelated contexts.
-
-// Forward defined as
-// template <class Allocator = FreeStoreAllocationPolicy> class ScopeInfo;
-template<class Allocator>
-class ScopeInfo BASE_EMBEDDED {
- public:
- // Create a ScopeInfo instance from a scope.
- explicit ScopeInfo(Scope* scope);
-
- // Create a ScopeInfo instance from SerializedScopeInfo.
- explicit ScopeInfo(SerializedScopeInfo* data);
-
- // Creates a SerializedScopeInfo holding the serialized scope info.
- Handle<SerializedScopeInfo> Serialize();
-
- // --------------------------------------------------------------------------
- // Lookup
-
- Handle<String> function_name() const { return function_name_; }
-
- Handle<String> parameter_name(int i) const { return parameters_[i]; }
- int number_of_parameters() const { return parameters_.length(); }
-
- Handle<String> stack_slot_name(int i) const { return stack_slots_[i]; }
- int number_of_stack_slots() const { return stack_slots_.length(); }
-
- Handle<String> context_slot_name(int i) const {
- return context_slots_[i - Context::MIN_CONTEXT_SLOTS];
- }
- int number_of_context_slots() const {
- int l = context_slots_.length();
- return l == 0 ? 0 : l + Context::MIN_CONTEXT_SLOTS;
- }
-
- Handle<String> LocalName(int i) const;
- int NumberOfLocals() const;
-
- // --------------------------------------------------------------------------
- // Debugging support
-
-#ifdef DEBUG
- void Print();
-#endif
-
- private:
- Handle<String> function_name_;
- bool calls_eval_;
- List<Handle<String>, Allocator > parameters_;
- List<Handle<String>, Allocator > stack_slots_;
- List<Handle<String>, Allocator > context_slots_;
- List<Variable::Mode, Allocator > context_modes_;
-};
-
-
-// This object provides quick access to scope info details for runtime
-// routines w/o the need to explicitly create a ScopeInfo object.
-class SerializedScopeInfo : public FixedArray {
- public :
-
- static SerializedScopeInfo* cast(Object* object) {
- ASSERT(object->IsFixedArray());
- return reinterpret_cast<SerializedScopeInfo*>(object);
- }
-
- // Does this scope call eval?
- bool CallsEval();
-
- // Does this scope have an arguments shadow?
- bool HasArgumentsShadow() {
- return StackSlotIndex(GetHeap()->arguments_shadow_symbol()) >= 0;
- }
-
- // Return the number of stack slots for code.
- int NumberOfStackSlots();
-
- // Return the number of context slots for code.
- int NumberOfContextSlots();
-
- // Return if this has context slots besides MIN_CONTEXT_SLOTS;
- bool HasHeapAllocatedLocals();
-
- // Lookup support for serialized scope info. Returns the
- // the stack slot index for a given slot name if the slot is
- // present; otherwise returns a value < 0. The name must be a symbol
- // (canonicalized).
- int StackSlotIndex(String* name);
-
- // Lookup support for serialized scope info. Returns the
- // context slot index for a given slot name if the slot is present; otherwise
- // returns a value < 0. The name must be a symbol (canonicalized).
- // If the slot is present and mode != NULL, sets *mode to the corresponding
- // mode for that variable.
- int ContextSlotIndex(String* name, Variable::Mode* mode);
-
- // Lookup support for serialized scope info. Returns the
- // parameter index for a given parameter name if the parameter is present;
- // otherwise returns a value < 0. The name must be a symbol (canonicalized).
- int ParameterIndex(String* name);
-
- // Lookup support for serialized scope info. Returns the
- // function context slot index if the function name is present (named
- // function expressions, only), otherwise returns a value < 0. The name
- // must be a symbol (canonicalized).
- int FunctionContextSlotIndex(String* name);
-
- static Handle<SerializedScopeInfo> Create(Scope* scope);
-
- // Serializes empty scope info.
- static SerializedScopeInfo* Empty();
-
- private:
-
- inline Object** ContextEntriesAddr();
-
- inline Object** ParameterEntriesAddr();
-
- inline Object** StackSlotEntriesAddr();
-};
-
-
-// Cache for mapping (data, property name) into context slot index.
-// The cache contains both positive and negative results.
-// Slot index equals -1 means the property is absent.
-// Cleared at startup and prior to mark sweep collection.
-class ContextSlotCache {
- public:
- // Lookup context slot index for (data, name).
- // If absent, kNotFound is returned.
- int Lookup(Object* data,
- String* name,
- Variable::Mode* mode);
-
- // Update an element in the cache.
- void Update(Object* data,
- String* name,
- Variable::Mode mode,
- int slot_index);
-
- // Clear the cache.
- void Clear();
-
- static const int kNotFound = -2;
- private:
- ContextSlotCache() {
- for (int i = 0; i < kLength; ++i) {
- keys_[i].data = NULL;
- keys_[i].name = NULL;
- values_[i] = kNotFound;
- }
- }
-
- inline static int Hash(Object* data, String* name);
-
-#ifdef DEBUG
- void ValidateEntry(Object* data,
- String* name,
- Variable::Mode mode,
- int slot_index);
-#endif
-
- static const int kLength = 256;
- struct Key {
- Object* data;
- String* name;
- };
-
- struct Value {
- Value(Variable::Mode mode, int index) {
- ASSERT(ModeField::is_valid(mode));
- ASSERT(IndexField::is_valid(index));
- value_ = ModeField::encode(mode) | IndexField::encode(index);
- ASSERT(mode == this->mode());
- ASSERT(index == this->index());
- }
-
- inline Value(uint32_t value) : value_(value) {}
-
- uint32_t raw() { return value_; }
-
- Variable::Mode mode() { return ModeField::decode(value_); }
-
- int index() { return IndexField::decode(value_); }
-
- // Bit fields in value_ (type, shift, size). Must be public so the
- // constants can be embedded in generated code.
- class ModeField: public BitField<Variable::Mode, 0, 3> {};
- class IndexField: public BitField<int, 3, 32-3> {};
- private:
- uint32_t value_;
- };
-
- Key keys_[kLength];
- uint32_t values_[kLength];
-
- friend class Isolate;
- DISALLOW_COPY_AND_ASSIGN(ContextSlotCache);
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_SCOPEINFO_H_
diff --git a/src/3rdparty/v8/src/scopes.cc b/src/3rdparty/v8/src/scopes.cc
deleted file mode 100644
index 70e11ed..0000000
--- a/src/3rdparty/v8/src/scopes.cc
+++ /dev/null
@@ -1,1093 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "scopes.h"
-
-#include "bootstrapper.h"
-#include "compiler.h"
-#include "prettyprinter.h"
-#include "scopeinfo.h"
-
-namespace v8 {
-namespace internal {
-
-// ----------------------------------------------------------------------------
-// A Zone allocator for use with LocalsMap.
-
-// TODO(isolates): It is probably worth it to change the Allocator class to
-// take a pointer to an isolate.
-class ZoneAllocator: public Allocator {
- public:
- /* nothing to do */
- virtual ~ZoneAllocator() {}
-
- virtual void* New(size_t size) { return ZONE->New(static_cast<int>(size)); }
-
- /* ignored - Zone is freed in one fell swoop */
- virtual void Delete(void* p) {}
-};
-
-
-static ZoneAllocator LocalsMapAllocator;
-
-
-// ----------------------------------------------------------------------------
-// Implementation of LocalsMap
-//
-// Note: We are storing the handle locations as key values in the hash map.
-// When inserting a new variable via Declare(), we rely on the fact that
-// the handle location remains alive for the duration of that variable
-// use. Because a Variable holding a handle with the same location exists
-// this is ensured.
-
-static bool Match(void* key1, void* key2) {
- String* name1 = *reinterpret_cast<String**>(key1);
- String* name2 = *reinterpret_cast<String**>(key2);
- ASSERT(name1->IsSymbol());
- ASSERT(name2->IsSymbol());
- return name1 == name2;
-}
-
-
-// Dummy constructor
-VariableMap::VariableMap(bool gotta_love_static_overloading) : HashMap() {}
-
-VariableMap::VariableMap() : HashMap(Match, &LocalsMapAllocator, 8) {}
-VariableMap::~VariableMap() {}
-
-
-Variable* VariableMap::Declare(Scope* scope,
- Handle<String> name,
- Variable::Mode mode,
- bool is_valid_lhs,
- Variable::Kind kind) {
- HashMap::Entry* p = HashMap::Lookup(name.location(), name->Hash(), true);
- if (p->value == NULL) {
- // The variable has not been declared yet -> insert it.
- ASSERT(p->key == name.location());
- p->value = new Variable(scope, name, mode, is_valid_lhs, kind);
- }
- return reinterpret_cast<Variable*>(p->value);
-}
-
-
-Variable* VariableMap::Lookup(Handle<String> name) {
- HashMap::Entry* p = HashMap::Lookup(name.location(), name->Hash(), false);
- if (p != NULL) {
- ASSERT(*reinterpret_cast<String**>(p->key) == *name);
- ASSERT(p->value != NULL);
- return reinterpret_cast<Variable*>(p->value);
- }
- return NULL;
-}
-
-
-// ----------------------------------------------------------------------------
-// Implementation of Scope
-
-
-// Dummy constructor
-Scope::Scope(Type type)
- : inner_scopes_(0),
- variables_(false),
- temps_(0),
- params_(0),
- unresolved_(0),
- decls_(0) {
- SetDefaults(type, NULL, NULL);
- ASSERT(!resolved());
-}
-
-
-Scope::Scope(Scope* outer_scope, Type type)
- : inner_scopes_(4),
- variables_(),
- temps_(4),
- params_(4),
- unresolved_(16),
- decls_(4) {
- SetDefaults(type, outer_scope, NULL);
- // At some point we might want to provide outer scopes to
- // eval scopes (by walking the stack and reading the scope info).
- // In that case, the ASSERT below needs to be adjusted.
- ASSERT((type == GLOBAL_SCOPE || type == EVAL_SCOPE) == (outer_scope == NULL));
- ASSERT(!HasIllegalRedeclaration());
- ASSERT(!resolved());
-}
-
-
-Scope::Scope(Scope* inner_scope, SerializedScopeInfo* scope_info)
- : inner_scopes_(4),
- variables_(),
- temps_(4),
- params_(4),
- unresolved_(16),
- decls_(4) {
- ASSERT(scope_info != NULL);
- SetDefaults(FUNCTION_SCOPE, NULL, scope_info);
- ASSERT(resolved());
- if (scope_info->HasHeapAllocatedLocals()) {
- num_heap_slots_ = scope_info_->NumberOfContextSlots();
- }
-
- AddInnerScope(inner_scope);
-
- // This scope's arguments shadow (if present) is context-allocated if an inner
- // scope accesses this one's parameters. Allocate the arguments_shadow_
- // variable if necessary.
- Isolate* isolate = Isolate::Current();
- Variable::Mode mode;
- int arguments_shadow_index =
- scope_info_->ContextSlotIndex(
- isolate->heap()->arguments_shadow_symbol(), &mode);
- if (arguments_shadow_index >= 0) {
- ASSERT(mode == Variable::INTERNAL);
- arguments_shadow_ = new Variable(
- this,
- isolate->factory()->arguments_shadow_symbol(),
- Variable::INTERNAL,
- true,
- Variable::ARGUMENTS);
- arguments_shadow_->set_rewrite(
- new Slot(arguments_shadow_, Slot::CONTEXT, arguments_shadow_index));
- arguments_shadow_->set_is_used(true);
- }
-}
-
-
-Scope* Scope::DeserializeScopeChain(CompilationInfo* info,
- Scope* global_scope) {
- ASSERT(!info->closure().is_null());
- // If we have a serialized scope info, reuse it.
- Scope* innermost_scope = NULL;
- Scope* scope = NULL;
-
- SerializedScopeInfo* scope_info = info->closure()->shared()->scope_info();
- if (scope_info != SerializedScopeInfo::Empty()) {
- JSFunction* current = *info->closure();
- do {
- current = current->context()->closure();
- SerializedScopeInfo* scope_info = current->shared()->scope_info();
- if (scope_info != SerializedScopeInfo::Empty()) {
- scope = new Scope(scope, scope_info);
- if (innermost_scope == NULL) innermost_scope = scope;
- } else {
- ASSERT(current->context()->IsGlobalContext());
- }
- } while (!current->context()->IsGlobalContext());
- }
-
- global_scope->AddInnerScope(scope);
- if (innermost_scope == NULL) innermost_scope = global_scope;
-
- return innermost_scope;
-}
-
-
-bool Scope::Analyze(CompilationInfo* info) {
- ASSERT(info->function() != NULL);
- Scope* top = info->function()->scope();
-
- while (top->outer_scope() != NULL) top = top->outer_scope();
- top->AllocateVariables(info->calling_context());
-
-#ifdef DEBUG
- if (info->isolate()->bootstrapper()->IsActive()
- ? FLAG_print_builtin_scopes
- : FLAG_print_scopes) {
- info->function()->scope()->Print();
- }
-#endif
-
- info->SetScope(info->function()->scope());
- return true; // Can not fail.
-}
-
-
-void Scope::Initialize(bool inside_with) {
- ASSERT(!resolved());
-
- // Add this scope as a new inner scope of the outer scope.
- if (outer_scope_ != NULL) {
- outer_scope_->inner_scopes_.Add(this);
- scope_inside_with_ = outer_scope_->scope_inside_with_ || inside_with;
- } else {
- scope_inside_with_ = inside_with;
- }
-
- // Declare convenience variables.
- // Declare and allocate receiver (even for the global scope, and even
- // if naccesses_ == 0).
- // NOTE: When loading parameters in the global scope, we must take
- // care not to access them as properties of the global object, but
- // instead load them directly from the stack. Currently, the only
- // such parameter is 'this' which is passed on the stack when
- // invoking scripts
- Variable* var =
- variables_.Declare(this, FACTORY->this_symbol(), Variable::VAR,
- false, Variable::THIS);
- var->set_rewrite(new Slot(var, Slot::PARAMETER, -1));
- receiver_ = var;
-
- if (is_function_scope()) {
- // Declare 'arguments' variable which exists in all functions.
- // Note that it might never be accessed, in which case it won't be
- // allocated during variable allocation.
- variables_.Declare(this, FACTORY->arguments_symbol(), Variable::VAR,
- true, Variable::ARGUMENTS);
- }
-}
-
-
-Variable* Scope::LocalLookup(Handle<String> name) {
- Variable* result = variables_.Lookup(name);
- if (result != NULL || !resolved()) {
- return result;
- }
- // If the scope is resolved, we can find a variable in serialized scope info.
-
- // We should never lookup 'arguments' in this scope
- // as it is implicitly present in any scope.
- ASSERT(*name != *FACTORY->arguments_symbol());
-
- // Assert that there is no local slot with the given name.
- ASSERT(scope_info_->StackSlotIndex(*name) < 0);
-
- // Check context slot lookup.
- Variable::Mode mode;
- int index = scope_info_->ContextSlotIndex(*name, &mode);
- if (index >= 0) {
- Variable* var =
- variables_.Declare(this, name, mode, true, Variable::NORMAL);
- var->set_rewrite(new Slot(var, Slot::CONTEXT, index));
- return var;
- }
-
- index = scope_info_->ParameterIndex(*name);
- if (index >= 0) {
- // ".arguments" must be present in context slots.
- ASSERT(arguments_shadow_ != NULL);
- Variable* var =
- variables_.Declare(this, name, Variable::VAR, true, Variable::NORMAL);
- Property* rewrite =
- new Property(new VariableProxy(arguments_shadow_),
- new Literal(Handle<Object>(Smi::FromInt(index))),
- RelocInfo::kNoPosition,
- Property::SYNTHETIC);
- rewrite->set_is_arguments_access(true);
- var->set_rewrite(rewrite);
- return var;
- }
-
- index = scope_info_->FunctionContextSlotIndex(*name);
- if (index >= 0) {
- // Check that there is no local slot with the given name.
- ASSERT(scope_info_->StackSlotIndex(*name) < 0);
- Variable* var =
- variables_.Declare(this, name, Variable::VAR, true, Variable::NORMAL);
- var->set_rewrite(new Slot(var, Slot::CONTEXT, index));
- return var;
- }
-
- return NULL;
-}
-
-
-Variable* Scope::Lookup(Handle<String> name) {
- for (Scope* scope = this;
- scope != NULL;
- scope = scope->outer_scope()) {
- Variable* var = scope->LocalLookup(name);
- if (var != NULL) return var;
- }
- return NULL;
-}
-
-
-Variable* Scope::DeclareFunctionVar(Handle<String> name) {
- ASSERT(is_function_scope() && function_ == NULL);
- function_ = new Variable(this, name, Variable::CONST, true, Variable::NORMAL);
- return function_;
-}
-
-
-Variable* Scope::DeclareLocal(Handle<String> name, Variable::Mode mode) {
- // DYNAMIC variables are introduces during variable allocation,
- // INTERNAL variables are allocated explicitly, and TEMPORARY
- // variables are allocated via NewTemporary().
- ASSERT(!resolved());
- ASSERT(mode == Variable::VAR || mode == Variable::CONST);
- return variables_.Declare(this, name, mode, true, Variable::NORMAL);
-}
-
-
-Variable* Scope::DeclareGlobal(Handle<String> name) {
- ASSERT(is_global_scope());
- return variables_.Declare(this, name, Variable::DYNAMIC_GLOBAL, true,
- Variable::NORMAL);
-}
-
-
-void Scope::AddParameter(Variable* var) {
- ASSERT(is_function_scope());
- ASSERT(LocalLookup(var->name()) == var);
- params_.Add(var);
-}
-
-
-VariableProxy* Scope::NewUnresolved(Handle<String> name,
- bool inside_with,
- int position) {
- // Note that we must not share the unresolved variables with
- // the same name because they may be removed selectively via
- // RemoveUnresolved().
- ASSERT(!resolved());
- VariableProxy* proxy = new VariableProxy(name, false, inside_with, position);
- unresolved_.Add(proxy);
- return proxy;
-}
-
-
-void Scope::RemoveUnresolved(VariableProxy* var) {
- // Most likely (always?) any variable we want to remove
- // was just added before, so we search backwards.
- for (int i = unresolved_.length(); i-- > 0;) {
- if (unresolved_[i] == var) {
- unresolved_.Remove(i);
- return;
- }
- }
-}
-
-
-Variable* Scope::NewTemporary(Handle<String> name) {
- ASSERT(!resolved());
- Variable* var =
- new Variable(this, name, Variable::TEMPORARY, true, Variable::NORMAL);
- temps_.Add(var);
- return var;
-}
-
-
-void Scope::AddDeclaration(Declaration* declaration) {
- decls_.Add(declaration);
-}
-
-
-void Scope::SetIllegalRedeclaration(Expression* expression) {
- // Record only the first illegal redeclaration.
- if (!HasIllegalRedeclaration()) {
- illegal_redecl_ = expression;
- }
- ASSERT(HasIllegalRedeclaration());
-}
-
-
-void Scope::VisitIllegalRedeclaration(AstVisitor* visitor) {
- ASSERT(HasIllegalRedeclaration());
- illegal_redecl_->Accept(visitor);
-}
-
-
-template<class Allocator>
-void Scope::CollectUsedVariables(List<Variable*, Allocator>* locals) {
- // Collect variables in this scope.
- // Note that the function_ variable - if present - is not
- // collected here but handled separately in ScopeInfo
- // which is the current user of this function).
- for (int i = 0; i < temps_.length(); i++) {
- Variable* var = temps_[i];
- if (var->is_used()) {
- locals->Add(var);
- }
- }
- for (VariableMap::Entry* p = variables_.Start();
- p != NULL;
- p = variables_.Next(p)) {
- Variable* var = reinterpret_cast<Variable*>(p->value);
- if (var->is_used()) {
- locals->Add(var);
- }
- }
-}
-
-
-// Make sure the method gets instantiated by the template system.
-template void Scope::CollectUsedVariables(
- List<Variable*, FreeStoreAllocationPolicy>* locals);
-template void Scope::CollectUsedVariables(
- List<Variable*, PreallocatedStorage>* locals);
-template void Scope::CollectUsedVariables(
- List<Variable*, ZoneListAllocationPolicy>* locals);
-
-
-void Scope::AllocateVariables(Handle<Context> context) {
- ASSERT(outer_scope_ == NULL); // eval or global scopes only
-
- // 1) Propagate scope information.
- // If we are in an eval scope, we may have other outer scopes about
- // which we don't know anything at this point. Thus we must be conservative
- // and assume they may invoke eval themselves. Eventually we could capture
- // this information in the ScopeInfo and then use it here (by traversing
- // the call chain stack, at compile time).
- bool eval_scope = is_eval_scope();
- PropagateScopeInfo(eval_scope, eval_scope);
-
- // 2) Resolve variables.
- Scope* global_scope = NULL;
- if (is_global_scope()) global_scope = this;
- ResolveVariablesRecursively(global_scope, context);
-
- // 3) Allocate variables.
- AllocateVariablesRecursively();
-}
-
-
-bool Scope::AllowsLazyCompilation() const {
- return !force_eager_compilation_ && HasTrivialOuterContext();
-}
-
-
-bool Scope::HasTrivialContext() const {
- // A function scope has a trivial context if it always is the global
- // context. We iteratively scan out the context chain to see if
- // there is anything that makes this scope non-trivial; otherwise we
- // return true.
- for (const Scope* scope = this; scope != NULL; scope = scope->outer_scope_) {
- if (scope->is_eval_scope()) return false;
- if (scope->scope_inside_with_) return false;
- if (scope->num_heap_slots_ > 0) return false;
- }
- return true;
-}
-
-
-bool Scope::HasTrivialOuterContext() const {
- Scope* outer = outer_scope_;
- if (outer == NULL) return true;
- // Note that the outer context may be trivial in general, but the current
- // scope may be inside a 'with' statement in which case the outer context
- // for this scope is not trivial.
- return !scope_inside_with_ && outer->HasTrivialContext();
-}
-
-
-int Scope::ContextChainLength(Scope* scope) {
- int n = 0;
- for (Scope* s = this; s != scope; s = s->outer_scope_) {
- ASSERT(s != NULL); // scope must be in the scope chain
- if (s->num_heap_slots() > 0) n++;
- }
- return n;
-}
-
-
-#ifdef DEBUG
-static const char* Header(Scope::Type type) {
- switch (type) {
- case Scope::EVAL_SCOPE: return "eval";
- case Scope::FUNCTION_SCOPE: return "function";
- case Scope::GLOBAL_SCOPE: return "global";
- }
- UNREACHABLE();
- return NULL;
-}
-
-
-static void Indent(int n, const char* str) {
- PrintF("%*s%s", n, "", str);
-}
-
-
-static void PrintName(Handle<String> name) {
- SmartPointer<char> s = name->ToCString(DISALLOW_NULLS);
- PrintF("%s", *s);
-}
-
-
-static void PrintVar(PrettyPrinter* printer, int indent, Variable* var) {
- if (var->is_used() || var->rewrite() != NULL) {
- Indent(indent, Variable::Mode2String(var->mode()));
- PrintF(" ");
- PrintName(var->name());
- PrintF("; // ");
- if (var->rewrite() != NULL) {
- PrintF("%s, ", printer->Print(var->rewrite()));
- if (var->is_accessed_from_inner_scope()) PrintF(", ");
- }
- if (var->is_accessed_from_inner_scope()) PrintF("inner scope access");
- PrintF("\n");
- }
-}
-
-
-static void PrintMap(PrettyPrinter* printer, int indent, VariableMap* map) {
- for (VariableMap::Entry* p = map->Start(); p != NULL; p = map->Next(p)) {
- Variable* var = reinterpret_cast<Variable*>(p->value);
- PrintVar(printer, indent, var);
- }
-}
-
-
-void Scope::Print(int n) {
- int n0 = (n > 0 ? n : 0);
- int n1 = n0 + 2; // indentation
-
- // Print header.
- Indent(n0, Header(type_));
- if (scope_name_->length() > 0) {
- PrintF(" ");
- PrintName(scope_name_);
- }
-
- // Print parameters, if any.
- if (is_function_scope()) {
- PrintF(" (");
- for (int i = 0; i < params_.length(); i++) {
- if (i > 0) PrintF(", ");
- PrintName(params_[i]->name());
- }
- PrintF(")");
- }
-
- PrintF(" {\n");
-
- // Function name, if any (named function literals, only).
- if (function_ != NULL) {
- Indent(n1, "// (local) function name: ");
- PrintName(function_->name());
- PrintF("\n");
- }
-
- // Scope info.
- if (HasTrivialOuterContext()) {
- Indent(n1, "// scope has trivial outer context\n");
- }
- if (scope_inside_with_) Indent(n1, "// scope inside 'with'\n");
- if (scope_contains_with_) Indent(n1, "// scope contains 'with'\n");
- if (scope_calls_eval_) Indent(n1, "// scope calls 'eval'\n");
- if (outer_scope_calls_eval_) Indent(n1, "// outer scope calls 'eval'\n");
- if (inner_scope_calls_eval_) Indent(n1, "// inner scope calls 'eval'\n");
- if (outer_scope_is_eval_scope_) {
- Indent(n1, "// outer scope is 'eval' scope\n");
- }
- if (num_stack_slots_ > 0) { Indent(n1, "// ");
- PrintF("%d stack slots\n", num_stack_slots_); }
- if (num_heap_slots_ > 0) { Indent(n1, "// ");
- PrintF("%d heap slots\n", num_heap_slots_); }
-
- // Print locals.
- PrettyPrinter printer;
- Indent(n1, "// function var\n");
- if (function_ != NULL) {
- PrintVar(&printer, n1, function_);
- }
-
- Indent(n1, "// temporary vars\n");
- for (int i = 0; i < temps_.length(); i++) {
- PrintVar(&printer, n1, temps_[i]);
- }
-
- Indent(n1, "// local vars\n");
- PrintMap(&printer, n1, &variables_);
-
- Indent(n1, "// dynamic vars\n");
- if (dynamics_ != NULL) {
- PrintMap(&printer, n1, dynamics_->GetMap(Variable::DYNAMIC));
- PrintMap(&printer, n1, dynamics_->GetMap(Variable::DYNAMIC_LOCAL));
- PrintMap(&printer, n1, dynamics_->GetMap(Variable::DYNAMIC_GLOBAL));
- }
-
- // Print inner scopes (disable by providing negative n).
- if (n >= 0) {
- for (int i = 0; i < inner_scopes_.length(); i++) {
- PrintF("\n");
- inner_scopes_[i]->Print(n1);
- }
- }
-
- Indent(n0, "}\n");
-}
-#endif // DEBUG
-
-
-Variable* Scope::NonLocal(Handle<String> name, Variable::Mode mode) {
- if (dynamics_ == NULL) dynamics_ = new DynamicScopePart();
- VariableMap* map = dynamics_->GetMap(mode);
- Variable* var = map->Lookup(name);
- if (var == NULL) {
- // Declare a new non-local.
- var = map->Declare(NULL, name, mode, true, Variable::NORMAL);
- // Allocate it by giving it a dynamic lookup.
- var->set_rewrite(new Slot(var, Slot::LOOKUP, -1));
- }
- return var;
-}
-
-
-// Lookup a variable starting with this scope. The result is either
-// the statically resolved variable belonging to an outer scope, or
-// NULL. It may be NULL because a) we couldn't find a variable, or b)
-// because the variable is just a guess (and may be shadowed by
-// another variable that is introduced dynamically via an 'eval' call
-// or a 'with' statement).
-Variable* Scope::LookupRecursive(Handle<String> name,
- bool inner_lookup,
- Variable** invalidated_local) {
- // If we find a variable, but the current scope calls 'eval', the found
- // variable may not be the correct one (the 'eval' may introduce a
- // property with the same name). In that case, remember that the variable
- // found is just a guess.
- bool guess = scope_calls_eval_;
-
- // Try to find the variable in this scope.
- Variable* var = LocalLookup(name);
-
- if (var != NULL) {
- // We found a variable. If this is not an inner lookup, we are done.
- // (Even if there is an 'eval' in this scope which introduces the
- // same variable again, the resulting variable remains the same.
- // Note that enclosing 'with' statements are handled at the call site.)
- if (!inner_lookup)
- return var;
-
- } else {
- // We did not find a variable locally. Check against the function variable,
- // if any. We can do this for all scopes, since the function variable is
- // only present - if at all - for function scopes.
- //
- // This lookup corresponds to a lookup in the "intermediate" scope sitting
- // between this scope and the outer scope. (ECMA-262, 3rd., requires that
- // the name of named function literal is kept in an intermediate scope
- // in between this scope and the next outer scope.)
- if (function_ != NULL && function_->name().is_identical_to(name)) {
- var = function_;
-
- } else if (outer_scope_ != NULL) {
- var = outer_scope_->LookupRecursive(name, true, invalidated_local);
- // We may have found a variable in an outer scope. However, if
- // the current scope is inside a 'with', the actual variable may
- // be a property introduced via the 'with' statement. Then, the
- // variable we may have found is just a guess.
- if (scope_inside_with_)
- guess = true;
- }
-
- // If we did not find a variable, we are done.
- if (var == NULL)
- return NULL;
- }
-
- ASSERT(var != NULL);
-
- // If this is a lookup from an inner scope, mark the variable.
- if (inner_lookup) {
- var->MarkAsAccessedFromInnerScope();
- }
-
- // If the variable we have found is just a guess, invalidate the
- // result. If the found variable is local, record that fact so we
- // can generate fast code to get it if it is not shadowed by eval.
- if (guess) {
- if (!var->is_global()) *invalidated_local = var;
- var = NULL;
- }
-
- return var;
-}
-
-
-void Scope::ResolveVariable(Scope* global_scope,
- Handle<Context> context,
- VariableProxy* proxy) {
- ASSERT(global_scope == NULL || global_scope->is_global_scope());
-
- // If the proxy is already resolved there's nothing to do
- // (functions and consts may be resolved by the parser).
- if (proxy->var() != NULL) return;
-
- // Otherwise, try to resolve the variable.
- Variable* invalidated_local = NULL;
- Variable* var = LookupRecursive(proxy->name(), false, &invalidated_local);
-
- if (proxy->inside_with()) {
- // If we are inside a local 'with' statement, all bets are off
- // and we cannot resolve the proxy to a local variable even if
- // we found an outer matching variable.
- // Note that we must do a lookup anyway, because if we find one,
- // we must mark that variable as potentially accessed from this
- // inner scope (the property may not be in the 'with' object).
- var = NonLocal(proxy->name(), Variable::DYNAMIC);
-
- } else {
- // We are not inside a local 'with' statement.
-
- if (var == NULL) {
- // We did not find the variable. We have a global variable
- // if we are in the global scope (we know already that we
- // are outside a 'with' statement) or if there is no way
- // that the variable might be introduced dynamically (through
- // a local or outer eval() call, or an outer 'with' statement),
- // or we don't know about the outer scope (because we are
- // in an eval scope).
- if (is_global_scope() ||
- !(scope_inside_with_ || outer_scope_is_eval_scope_ ||
- scope_calls_eval_ || outer_scope_calls_eval_)) {
- // We must have a global variable.
- ASSERT(global_scope != NULL);
- var = global_scope->DeclareGlobal(proxy->name());
-
- } else if (scope_inside_with_) {
- // If we are inside a with statement we give up and look up
- // the variable at runtime.
- var = NonLocal(proxy->name(), Variable::DYNAMIC);
-
- } else if (invalidated_local != NULL) {
- // No with statements are involved and we found a local
- // variable that might be shadowed by eval introduced
- // variables.
- var = NonLocal(proxy->name(), Variable::DYNAMIC_LOCAL);
- var->set_local_if_not_shadowed(invalidated_local);
-
- } else if (outer_scope_is_eval_scope_) {
- // No with statements and we did not find a local and the code
- // is executed with a call to eval. The context contains
- // scope information that we can use to determine if the
- // variable is global if it is not shadowed by eval-introduced
- // variables.
- if (context->GlobalIfNotShadowedByEval(proxy->name())) {
- var = NonLocal(proxy->name(), Variable::DYNAMIC_GLOBAL);
-
- } else {
- var = NonLocal(proxy->name(), Variable::DYNAMIC);
- }
-
- } else {
- // No with statements and we did not find a local and the code
- // is not executed with a call to eval. We know that this
- // variable is global unless it is shadowed by eval-introduced
- // variables.
- var = NonLocal(proxy->name(), Variable::DYNAMIC_GLOBAL);
- }
- }
- }
-
- proxy->BindTo(var);
-}
-
-
-void Scope::ResolveVariablesRecursively(Scope* global_scope,
- Handle<Context> context) {
- ASSERT(global_scope == NULL || global_scope->is_global_scope());
-
- // Resolve unresolved variables for this scope.
- for (int i = 0; i < unresolved_.length(); i++) {
- ResolveVariable(global_scope, context, unresolved_[i]);
- }
-
- // Resolve unresolved variables for inner scopes.
- for (int i = 0; i < inner_scopes_.length(); i++) {
- inner_scopes_[i]->ResolveVariablesRecursively(global_scope, context);
- }
-}
-
-
-bool Scope::PropagateScopeInfo(bool outer_scope_calls_eval,
- bool outer_scope_is_eval_scope) {
- if (outer_scope_calls_eval) {
- outer_scope_calls_eval_ = true;
- }
-
- if (outer_scope_is_eval_scope) {
- outer_scope_is_eval_scope_ = true;
- }
-
- bool calls_eval = scope_calls_eval_ || outer_scope_calls_eval_;
- bool is_eval = is_eval_scope() || outer_scope_is_eval_scope_;
- for (int i = 0; i < inner_scopes_.length(); i++) {
- Scope* inner_scope = inner_scopes_[i];
- if (inner_scope->PropagateScopeInfo(calls_eval, is_eval)) {
- inner_scope_calls_eval_ = true;
- }
- if (inner_scope->force_eager_compilation_) {
- force_eager_compilation_ = true;
- }
- }
-
- return scope_calls_eval_ || inner_scope_calls_eval_;
-}
-
-
-bool Scope::MustAllocate(Variable* var) {
- // Give var a read/write use if there is a chance it might be accessed
- // via an eval() call. This is only possible if the variable has a
- // visible name.
- if ((var->is_this() || var->name()->length() > 0) &&
- (var->is_accessed_from_inner_scope() ||
- scope_calls_eval_ || inner_scope_calls_eval_ ||
- scope_contains_with_)) {
- var->set_is_used(true);
- }
- // Global variables do not need to be allocated.
- return !var->is_global() && var->is_used();
-}
-
-
-bool Scope::MustAllocateInContext(Variable* var) {
- // If var is accessed from an inner scope, or if there is a
- // possibility that it might be accessed from the current or an inner
- // scope (through an eval() call), it must be allocated in the
- // context. Exception: temporary variables are not allocated in the
- // context.
- return
- var->mode() != Variable::TEMPORARY &&
- (var->is_accessed_from_inner_scope() ||
- scope_calls_eval_ || inner_scope_calls_eval_ ||
- scope_contains_with_ || var->is_global());
-}
-
-
-bool Scope::HasArgumentsParameter() {
- for (int i = 0; i < params_.length(); i++) {
- if (params_[i]->name().is_identical_to(FACTORY->arguments_symbol()))
- return true;
- }
- return false;
-}
-
-
-void Scope::AllocateStackSlot(Variable* var) {
- var->set_rewrite(new Slot(var, Slot::LOCAL, num_stack_slots_++));
-}
-
-
-void Scope::AllocateHeapSlot(Variable* var) {
- var->set_rewrite(new Slot(var, Slot::CONTEXT, num_heap_slots_++));
-}
-
-
-void Scope::AllocateParameterLocals() {
- ASSERT(is_function_scope());
- Variable* arguments = LocalLookup(FACTORY->arguments_symbol());
- ASSERT(arguments != NULL); // functions have 'arguments' declared implicitly
-
- // Parameters are rewritten to arguments[i] if 'arguments' is used in
- // a non-strict mode function. Strict mode code doesn't alias arguments.
- bool rewrite_parameters = false;
-
- if (MustAllocate(arguments) && !HasArgumentsParameter()) {
- // 'arguments' is used. Unless there is also a parameter called
- // 'arguments', we must be conservative and access all parameters via
- // the arguments object: The i'th parameter is rewritten into
- // '.arguments[i]' (*). If we have a parameter named 'arguments', a
- // (new) value is always assigned to it via the function
- // invocation. Then 'arguments' denotes that specific parameter value
- // and cannot be used to access the parameters, which is why we don't
- // need to rewrite in that case.
- //
- // (*) Instead of having a parameter called 'arguments', we may have an
- // assignment to 'arguments' in the function body, at some arbitrary
- // point in time (possibly through an 'eval()' call!). After that
- // assignment any re-write of parameters would be invalid (was bug
- // 881452). Thus, we introduce a shadow '.arguments'
- // variable which also points to the arguments object. For rewrites we
- // use '.arguments' which remains valid even if we assign to
- // 'arguments'. To summarize: If we need to rewrite, we allocate an
- // 'arguments' object dynamically upon function invocation. The compiler
- // introduces 2 local variables 'arguments' and '.arguments', both of
- // which originally point to the arguments object that was
- // allocated. All parameters are rewritten into property accesses via
- // the '.arguments' variable. Thus, any changes to properties of
- // 'arguments' are reflected in the variables and vice versa. If the
- // 'arguments' variable is changed, '.arguments' still points to the
- // correct arguments object and the rewrites still work.
-
- // We are using 'arguments'. Tell the code generator that is needs to
- // allocate the arguments object by setting 'arguments_'.
- arguments_ = arguments;
-
- // In strict mode 'arguments' does not alias formal parameters.
- // Therefore in strict mode we allocate parameters as if 'arguments'
- // were not used.
- rewrite_parameters = !is_strict_mode();
- }
-
- if (rewrite_parameters) {
- // We also need the '.arguments' shadow variable. Declare it and create
- // and bind the corresponding proxy. It's ok to declare it only now
- // because it's a local variable that is allocated after the parameters
- // have been allocated.
- //
- // Note: This is "almost" at temporary variable but we cannot use
- // NewTemporary() because the mode needs to be INTERNAL since this
- // variable may be allocated in the heap-allocated context (temporaries
- // are never allocated in the context).
- arguments_shadow_ = new Variable(this,
- FACTORY->arguments_shadow_symbol(),
- Variable::INTERNAL,
- true,
- Variable::ARGUMENTS);
- arguments_shadow_->set_is_used(true);
- temps_.Add(arguments_shadow_);
-
- // Allocate the parameters by rewriting them into '.arguments[i]' accesses.
- for (int i = 0; i < params_.length(); i++) {
- Variable* var = params_[i];
- ASSERT(var->scope() == this);
- if (MustAllocate(var)) {
- if (MustAllocateInContext(var)) {
- // It is ok to set this only now, because arguments is a local
- // variable that is allocated after the parameters have been
- // allocated.
- arguments_shadow_->MarkAsAccessedFromInnerScope();
- }
- Property* rewrite =
- new Property(new VariableProxy(arguments_shadow_),
- new Literal(Handle<Object>(Smi::FromInt(i))),
- RelocInfo::kNoPosition,
- Property::SYNTHETIC);
- rewrite->set_is_arguments_access(true);
- var->set_rewrite(rewrite);
- }
- }
-
- } else {
- // The arguments object is not used, so we can access parameters directly.
- // The same parameter may occur multiple times in the parameters_ list.
- // If it does, and if it is not copied into the context object, it must
- // receive the highest parameter index for that parameter; thus iteration
- // order is relevant!
- for (int i = 0; i < params_.length(); i++) {
- Variable* var = params_[i];
- ASSERT(var->scope() == this);
- if (MustAllocate(var)) {
- if (MustAllocateInContext(var)) {
- ASSERT(var->rewrite() == NULL ||
- (var->AsSlot() != NULL &&
- var->AsSlot()->type() == Slot::CONTEXT));
- if (var->rewrite() == NULL) {
- // Only set the heap allocation if the parameter has not
- // been allocated yet.
- AllocateHeapSlot(var);
- }
- } else {
- ASSERT(var->rewrite() == NULL ||
- (var->AsSlot() != NULL &&
- var->AsSlot()->type() == Slot::PARAMETER));
- // Set the parameter index always, even if the parameter
- // was seen before! (We need to access the actual parameter
- // supplied for the last occurrence of a multiply declared
- // parameter.)
- var->set_rewrite(new Slot(var, Slot::PARAMETER, i));
- }
- }
- }
- }
-}
-
-
-void Scope::AllocateNonParameterLocal(Variable* var) {
- ASSERT(var->scope() == this);
- ASSERT(var->rewrite() == NULL ||
- (!var->IsVariable(FACTORY->result_symbol())) ||
- (var->AsSlot() == NULL || var->AsSlot()->type() != Slot::LOCAL));
- if (var->rewrite() == NULL && MustAllocate(var)) {
- if (MustAllocateInContext(var)) {
- AllocateHeapSlot(var);
- } else {
- AllocateStackSlot(var);
- }
- }
-}
-
-
-void Scope::AllocateNonParameterLocals() {
- // All variables that have no rewrite yet are non-parameter locals.
- for (int i = 0; i < temps_.length(); i++) {
- AllocateNonParameterLocal(temps_[i]);
- }
-
- for (VariableMap::Entry* p = variables_.Start();
- p != NULL;
- p = variables_.Next(p)) {
- Variable* var = reinterpret_cast<Variable*>(p->value);
- AllocateNonParameterLocal(var);
- }
-
- // For now, function_ must be allocated at the very end. If it gets
- // allocated in the context, it must be the last slot in the context,
- // because of the current ScopeInfo implementation (see
- // ScopeInfo::ScopeInfo(FunctionScope* scope) constructor).
- if (function_ != NULL) {
- AllocateNonParameterLocal(function_);
- }
-}
-
-
-void Scope::AllocateVariablesRecursively() {
- // Allocate variables for inner scopes.
- for (int i = 0; i < inner_scopes_.length(); i++) {
- inner_scopes_[i]->AllocateVariablesRecursively();
- }
-
- // If scope is already resolved, we still need to allocate
- // variables in inner scopes which might not had been resolved yet.
- if (resolved()) return;
- // The number of slots required for variables.
- num_stack_slots_ = 0;
- num_heap_slots_ = Context::MIN_CONTEXT_SLOTS;
-
- // Allocate variables for this scope.
- // Parameters must be allocated first, if any.
- if (is_function_scope()) AllocateParameterLocals();
- AllocateNonParameterLocals();
-
- // Allocate context if necessary.
- bool must_have_local_context = false;
- if (scope_calls_eval_ || scope_contains_with_) {
- // The context for the eval() call or 'with' statement in this scope.
- // Unless we are in the global or an eval scope, we need a local
- // context even if we didn't statically allocate any locals in it,
- // and the compiler will access the context variable. If we are
- // not in an inner scope, the scope is provided from the outside.
- must_have_local_context = is_function_scope();
- }
-
- // If we didn't allocate any locals in the local context, then we only
- // need the minimal number of slots if we must have a local context.
- if (num_heap_slots_ == Context::MIN_CONTEXT_SLOTS &&
- !must_have_local_context) {
- num_heap_slots_ = 0;
- }
-
- // Allocation done.
- ASSERT(num_heap_slots_ == 0 || num_heap_slots_ >= Context::MIN_CONTEXT_SLOTS);
-}
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/scopes.h b/src/3rdparty/v8/src/scopes.h
deleted file mode 100644
index 5f031ed..0000000
--- a/src/3rdparty/v8/src/scopes.h
+++ /dev/null
@@ -1,508 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_SCOPES_H_
-#define V8_SCOPES_H_
-
-#include "ast.h"
-#include "hashmap.h"
-
-namespace v8 {
-namespace internal {
-
-class CompilationInfo;
-
-
-// A hash map to support fast variable declaration and lookup.
-class VariableMap: public HashMap {
- public:
- VariableMap();
-
- // Dummy constructor. This constructor doesn't set up the map
- // properly so don't use it unless you have a good reason.
- explicit VariableMap(bool gotta_love_static_overloading);
-
- virtual ~VariableMap();
-
- Variable* Declare(Scope* scope,
- Handle<String> name,
- Variable::Mode mode,
- bool is_valid_lhs,
- Variable::Kind kind);
-
- Variable* Lookup(Handle<String> name);
-};
-
-
-// The dynamic scope part holds hash maps for the variables that will
-// be looked up dynamically from within eval and with scopes. The objects
-// are allocated on-demand from Scope::NonLocal to avoid wasting memory
-// and setup time for scopes that don't need them.
-class DynamicScopePart : public ZoneObject {
- public:
- VariableMap* GetMap(Variable::Mode mode) {
- int index = mode - Variable::DYNAMIC;
- ASSERT(index >= 0 && index < 3);
- return &maps_[index];
- }
-
- private:
- VariableMap maps_[3];
-};
-
-
-// Global invariants after AST construction: Each reference (i.e. identifier)
-// to a JavaScript variable (including global properties) is represented by a
-// VariableProxy node. Immediately after AST construction and before variable
-// allocation, most VariableProxy nodes are "unresolved", i.e. not bound to a
-// corresponding variable (though some are bound during parse time). Variable
-// allocation binds each unresolved VariableProxy to one Variable and assigns
-// a location. Note that many VariableProxy nodes may refer to the same Java-
-// Script variable.
-
-class Scope: public ZoneObject {
- public:
- // ---------------------------------------------------------------------------
- // Construction
-
- enum Type {
- EVAL_SCOPE, // the top-level scope for an 'eval' source
- FUNCTION_SCOPE, // the top-level scope for a function
- GLOBAL_SCOPE // the top-level scope for a program or a top-level eval
- };
-
- Scope(Scope* outer_scope, Type type);
-
- virtual ~Scope() { }
-
- // Compute top scope and allocate variables. For lazy compilation the top
- // scope only contains the single lazily compiled function, so this
- // doesn't re-allocate variables repeatedly.
- static bool Analyze(CompilationInfo* info);
-
- static Scope* DeserializeScopeChain(CompilationInfo* info,
- Scope* innermost_scope);
-
- // The scope name is only used for printing/debugging.
- void SetScopeName(Handle<String> scope_name) { scope_name_ = scope_name; }
-
- virtual void Initialize(bool inside_with);
-
- // Called just before leaving a scope.
- virtual void Leave() {
- // No cleanup or fixup necessary.
- }
-
- // ---------------------------------------------------------------------------
- // Declarations
-
- // Lookup a variable in this scope. Returns the variable or NULL if not found.
- virtual Variable* LocalLookup(Handle<String> name);
-
- // Lookup a variable in this scope or outer scopes.
- // Returns the variable or NULL if not found.
- virtual Variable* Lookup(Handle<String> name);
-
- // Declare the function variable for a function literal. This variable
- // is in an intermediate scope between this function scope and the the
- // outer scope. Only possible for function scopes; at most one variable.
- Variable* DeclareFunctionVar(Handle<String> name);
-
- // Declare a local variable in this scope. If the variable has been
- // declared before, the previously declared variable is returned.
- virtual Variable* DeclareLocal(Handle<String> name, Variable::Mode mode);
-
- // Declare an implicit global variable in this scope which must be a
- // global scope. The variable was introduced (possibly from an inner
- // scope) by a reference to an unresolved variable with no intervening
- // with statements or eval calls.
- Variable* DeclareGlobal(Handle<String> name);
-
- // Add a parameter to the parameter list. The parameter must have been
- // declared via Declare. The same parameter may occur more than once in
- // the parameter list; they must be added in source order, from left to
- // right.
- void AddParameter(Variable* var);
-
- // Create a new unresolved variable.
- virtual VariableProxy* NewUnresolved(Handle<String> name,
- bool inside_with,
- int position = RelocInfo::kNoPosition);
-
- // Remove a unresolved variable. During parsing, an unresolved variable
- // may have been added optimistically, but then only the variable name
- // was used (typically for labels). If the variable was not declared, the
- // addition introduced a new unresolved variable which may end up being
- // allocated globally as a "ghost" variable. RemoveUnresolved removes
- // such a variable again if it was added; otherwise this is a no-op.
- void RemoveUnresolved(VariableProxy* var);
-
- // Creates a new temporary variable in this scope. The name is only used
- // for printing and cannot be used to find the variable. In particular,
- // the only way to get hold of the temporary is by keeping the Variable*
- // around.
- virtual Variable* NewTemporary(Handle<String> name);
-
- // Adds the specific declaration node to the list of declarations in
- // this scope. The declarations are processed as part of entering
- // the scope; see codegen.cc:ProcessDeclarations.
- void AddDeclaration(Declaration* declaration);
-
- // ---------------------------------------------------------------------------
- // Illegal redeclaration support.
-
- // Set an expression node that will be executed when the scope is
- // entered. We only keep track of one illegal redeclaration node per
- // scope - the first one - so if you try to set it multiple times
- // the additional requests will be silently ignored.
- void SetIllegalRedeclaration(Expression* expression);
-
- // Visit the illegal redeclaration expression. Do not call if the
- // scope doesn't have an illegal redeclaration node.
- void VisitIllegalRedeclaration(AstVisitor* visitor);
-
- // Check if the scope has (at least) one illegal redeclaration.
- bool HasIllegalRedeclaration() const { return illegal_redecl_ != NULL; }
-
-
- // ---------------------------------------------------------------------------
- // Scope-specific info.
-
- // Inform the scope that the corresponding code contains a with statement.
- void RecordWithStatement() { scope_contains_with_ = true; }
-
- // Inform the scope that the corresponding code contains an eval call.
- void RecordEvalCall() { scope_calls_eval_ = true; }
-
- // Enable strict mode for the scope (unless disabled by a global flag).
- void EnableStrictMode() {
- strict_mode_ = FLAG_strict_mode;
- }
-
- // ---------------------------------------------------------------------------
- // Predicates.
-
- // Specific scope types.
- bool is_eval_scope() const { return type_ == EVAL_SCOPE; }
- bool is_function_scope() const { return type_ == FUNCTION_SCOPE; }
- bool is_global_scope() const { return type_ == GLOBAL_SCOPE; }
- bool is_strict_mode() const { return strict_mode_; }
-
- // Information about which scopes calls eval.
- bool calls_eval() const { return scope_calls_eval_; }
- bool outer_scope_calls_eval() const { return outer_scope_calls_eval_; }
-
- // Is this scope inside a with statement.
- bool inside_with() const { return scope_inside_with_; }
- // Does this scope contain a with statement.
- bool contains_with() const { return scope_contains_with_; }
-
- // The scope immediately surrounding this scope, or NULL.
- Scope* outer_scope() const { return outer_scope_; }
-
- // ---------------------------------------------------------------------------
- // Accessors.
-
- // A new variable proxy corresponding to the (function) receiver.
- VariableProxy* receiver() const {
- VariableProxy* proxy =
- new VariableProxy(FACTORY->this_symbol(), true, false);
- proxy->BindTo(receiver_);
- return proxy;
- }
-
- // The variable holding the function literal for named function
- // literals, or NULL.
- // Only valid for function scopes.
- Variable* function() const {
- ASSERT(is_function_scope());
- return function_;
- }
-
- // Parameters. The left-most parameter has index 0.
- // Only valid for function scopes.
- Variable* parameter(int index) const {
- ASSERT(is_function_scope());
- return params_[index];
- }
-
- int num_parameters() const { return params_.length(); }
-
- // The local variable 'arguments' if we need to allocate it; NULL otherwise.
- // If arguments() exist, arguments_shadow() exists, too.
- Variable* arguments() const { return arguments_; }
-
- // The '.arguments' shadow variable if we need to allocate it; NULL otherwise.
- // If arguments_shadow() exist, arguments() exists, too.
- Variable* arguments_shadow() const { return arguments_shadow_; }
-
- // Declarations list.
- ZoneList<Declaration*>* declarations() { return &decls_; }
-
-
-
- // ---------------------------------------------------------------------------
- // Variable allocation.
-
- // Collect all used locals in this scope.
- template<class Allocator>
- void CollectUsedVariables(List<Variable*, Allocator>* locals);
-
- // Resolve and fill in the allocation information for all variables
- // in this scopes. Must be called *after* all scopes have been
- // processed (parsed) to ensure that unresolved variables can be
- // resolved properly.
- //
- // In the case of code compiled and run using 'eval', the context
- // parameter is the context in which eval was called. In all other
- // cases the context parameter is an empty handle.
- void AllocateVariables(Handle<Context> context);
-
- // Result of variable allocation.
- int num_stack_slots() const { return num_stack_slots_; }
- int num_heap_slots() const { return num_heap_slots_; }
-
- // Make sure this scope and all outer scopes are eagerly compiled.
- void ForceEagerCompilation() { force_eager_compilation_ = true; }
-
- // Determine if we can use lazy compilation for this scope.
- bool AllowsLazyCompilation() const;
-
- // True if the outer context of this scope is always the global context.
- virtual bool HasTrivialOuterContext() const;
-
- // The number of contexts between this and scope; zero if this == scope.
- int ContextChainLength(Scope* scope);
-
- // ---------------------------------------------------------------------------
- // Strict mode support.
- bool IsDeclared(Handle<String> name) {
- // During formal parameter list parsing the scope only contains
- // two variables inserted at initialization: "this" and "arguments".
- // "this" is an invalid parameter name and "arguments" is invalid parameter
- // name in strict mode. Therefore looking up with the map which includes
- // "this" and "arguments" in addition to all formal parameters is safe.
- return variables_.Lookup(name) != NULL;
- }
-
- // ---------------------------------------------------------------------------
- // Debugging.
-
-#ifdef DEBUG
- void Print(int n = 0); // n = indentation; n < 0 => don't print recursively
-#endif
-
- // ---------------------------------------------------------------------------
- // Implementation.
- protected:
- friend class ParserFactory;
-
- explicit Scope(Type type);
-
- // Scope tree.
- Scope* outer_scope_; // the immediately enclosing outer scope, or NULL
- ZoneList<Scope*> inner_scopes_; // the immediately enclosed inner scopes
-
- // The scope type.
- Type type_;
-
- // Debugging support.
- Handle<String> scope_name_;
-
- // The variables declared in this scope:
- //
- // All user-declared variables (incl. parameters). For global scopes
- // variables may be implicitly 'declared' by being used (possibly in
- // an inner scope) with no intervening with statements or eval calls.
- VariableMap variables_;
- // Compiler-allocated (user-invisible) temporaries.
- ZoneList<Variable*> temps_;
- // Parameter list in source order.
- ZoneList<Variable*> params_;
- // Variables that must be looked up dynamically.
- DynamicScopePart* dynamics_;
- // Unresolved variables referred to from this scope.
- ZoneList<VariableProxy*> unresolved_;
- // Declarations.
- ZoneList<Declaration*> decls_;
- // Convenience variable.
- Variable* receiver_;
- // Function variable, if any; function scopes only.
- Variable* function_;
- // Convenience variable; function scopes only.
- Variable* arguments_;
- // Convenience variable; function scopes only.
- Variable* arguments_shadow_;
-
- // Illegal redeclaration.
- Expression* illegal_redecl_;
-
- // Scope-specific information.
- bool scope_inside_with_; // this scope is inside a 'with' of some outer scope
- bool scope_contains_with_; // this scope contains a 'with' statement
- bool scope_calls_eval_; // this scope contains an 'eval' call
- bool strict_mode_; // this scope is a strict mode scope
-
- // Computed via PropagateScopeInfo.
- bool outer_scope_calls_eval_;
- bool inner_scope_calls_eval_;
- bool outer_scope_is_eval_scope_;
- bool force_eager_compilation_;
-
- // Computed via AllocateVariables; function scopes only.
- int num_stack_slots_;
- int num_heap_slots_;
-
- // Serialized scopes support.
- SerializedScopeInfo* scope_info_;
- bool resolved() { return scope_info_ != NULL; }
-
- // Create a non-local variable with a given name.
- // These variables are looked up dynamically at runtime.
- Variable* NonLocal(Handle<String> name, Variable::Mode mode);
-
- // Variable resolution.
- Variable* LookupRecursive(Handle<String> name,
- bool inner_lookup,
- Variable** invalidated_local);
- void ResolveVariable(Scope* global_scope,
- Handle<Context> context,
- VariableProxy* proxy);
- void ResolveVariablesRecursively(Scope* global_scope,
- Handle<Context> context);
-
- // Scope analysis.
- bool PropagateScopeInfo(bool outer_scope_calls_eval,
- bool outer_scope_is_eval_scope);
- bool HasTrivialContext() const;
-
- // Predicates.
- bool MustAllocate(Variable* var);
- bool MustAllocateInContext(Variable* var);
- bool HasArgumentsParameter();
-
- // Variable allocation.
- void AllocateStackSlot(Variable* var);
- void AllocateHeapSlot(Variable* var);
- void AllocateParameterLocals();
- void AllocateNonParameterLocal(Variable* var);
- void AllocateNonParameterLocals();
- void AllocateVariablesRecursively();
-
- private:
- Scope(Scope* inner_scope, SerializedScopeInfo* scope_info);
-
- void AddInnerScope(Scope* inner_scope) {
- if (inner_scope != NULL) {
- inner_scopes_.Add(inner_scope);
- inner_scope->outer_scope_ = this;
- }
- }
-
- void SetDefaults(Type type,
- Scope* outer_scope,
- SerializedScopeInfo* scope_info) {
- outer_scope_ = outer_scope;
- type_ = type;
- scope_name_ = FACTORY->empty_symbol();
- dynamics_ = NULL;
- receiver_ = NULL;
- function_ = NULL;
- arguments_ = NULL;
- arguments_shadow_ = NULL;
- illegal_redecl_ = NULL;
- scope_inside_with_ = false;
- scope_contains_with_ = false;
- scope_calls_eval_ = false;
- // Inherit the strict mode from the parent scope.
- strict_mode_ = (outer_scope != NULL) && outer_scope->strict_mode_;
- outer_scope_calls_eval_ = false;
- inner_scope_calls_eval_ = false;
- outer_scope_is_eval_scope_ = false;
- force_eager_compilation_ = false;
- num_stack_slots_ = 0;
- num_heap_slots_ = 0;
- scope_info_ = scope_info;
- }
-};
-
-
-// Scope used during pre-parsing.
-class DummyScope : public Scope {
- public:
- DummyScope()
- : Scope(GLOBAL_SCOPE),
- nesting_level_(1), // Allows us to Leave the initial scope.
- inside_with_level_(kNotInsideWith) {
- outer_scope_ = this;
- scope_inside_with_ = false;
- }
-
- virtual void Initialize(bool inside_with) {
- nesting_level_++;
- if (inside_with && inside_with_level_ == kNotInsideWith) {
- inside_with_level_ = nesting_level_;
- }
- ASSERT(inside_with_level_ <= nesting_level_);
- }
-
- virtual void Leave() {
- nesting_level_--;
- ASSERT(nesting_level_ >= 0);
- if (nesting_level_ < inside_with_level_) {
- inside_with_level_ = kNotInsideWith;
- }
- ASSERT(inside_with_level_ <= nesting_level_);
- }
-
- virtual Variable* Lookup(Handle<String> name) { return NULL; }
-
- virtual VariableProxy* NewUnresolved(Handle<String> name,
- bool inside_with,
- int position = RelocInfo::kNoPosition) {
- return NULL;
- }
-
- virtual Variable* NewTemporary(Handle<String> name) { return NULL; }
-
- virtual bool HasTrivialOuterContext() const {
- return (nesting_level_ == 0 || inside_with_level_ <= 0);
- }
-
- private:
- static const int kNotInsideWith = -1;
- // Number of surrounding scopes of the current scope.
- int nesting_level_;
- // Nesting level of outermost scope that is contained in a with statement,
- // or kNotInsideWith if there are no with's around the current scope.
- int inside_with_level_;
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_SCOPES_H_
diff --git a/src/3rdparty/v8/src/serialize.cc b/src/3rdparty/v8/src/serialize.cc
deleted file mode 100644
index 12e9613..0000000
--- a/src/3rdparty/v8/src/serialize.cc
+++ /dev/null
@@ -1,1574 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "accessors.h"
-#include "api.h"
-#include "execution.h"
-#include "global-handles.h"
-#include "ic-inl.h"
-#include "natives.h"
-#include "platform.h"
-#include "runtime.h"
-#include "serialize.h"
-#include "stub-cache.h"
-#include "v8threads.h"
-#include "bootstrapper.h"
-
-namespace v8 {
-namespace internal {
-
-
-// -----------------------------------------------------------------------------
-// Coding of external references.
-
-// The encoding of an external reference. The type is in the high word.
-// The id is in the low word.
-static uint32_t EncodeExternal(TypeCode type, uint16_t id) {
- return static_cast<uint32_t>(type) << 16 | id;
-}
-
-
-static int* GetInternalPointer(StatsCounter* counter) {
- // All counters refer to dummy_counter, if deserializing happens without
- // setting up counters.
- static int dummy_counter = 0;
- return counter->Enabled() ? counter->GetInternalPointer() : &dummy_counter;
-}
-
-
-// ExternalReferenceTable is a helper class that defines the relationship
-// between external references and their encodings. It is used to build
-// hashmaps in ExternalReferenceEncoder and ExternalReferenceDecoder.
-class ExternalReferenceTable {
- public:
- static ExternalReferenceTable* instance(Isolate* isolate) {
- ExternalReferenceTable* external_reference_table =
- isolate->external_reference_table();
- if (external_reference_table == NULL) {
- external_reference_table = new ExternalReferenceTable(isolate);
- isolate->set_external_reference_table(external_reference_table);
- }
- return external_reference_table;
- }
-
- int size() const { return refs_.length(); }
-
- Address address(int i) { return refs_[i].address; }
-
- uint32_t code(int i) { return refs_[i].code; }
-
- const char* name(int i) { return refs_[i].name; }
-
- int max_id(int code) { return max_id_[code]; }
-
- private:
- explicit ExternalReferenceTable(Isolate* isolate) : refs_(64) {
- PopulateTable(isolate);
- }
- ~ExternalReferenceTable() { }
-
- struct ExternalReferenceEntry {
- Address address;
- uint32_t code;
- const char* name;
- };
-
- void PopulateTable(Isolate* isolate);
-
- // For a few types of references, we can get their address from their id.
- void AddFromId(TypeCode type,
- uint16_t id,
- const char* name,
- Isolate* isolate);
-
- // For other types of references, the caller will figure out the address.
- void Add(Address address, TypeCode type, uint16_t id, const char* name);
-
- List<ExternalReferenceEntry> refs_;
- int max_id_[kTypeCodeCount];
-};
-
-
-void ExternalReferenceTable::AddFromId(TypeCode type,
- uint16_t id,
- const char* name,
- Isolate* isolate) {
- Address address;
- switch (type) {
- case C_BUILTIN: {
- ExternalReference ref(static_cast<Builtins::CFunctionId>(id), isolate);
- address = ref.address();
- break;
- }
- case BUILTIN: {
- ExternalReference ref(static_cast<Builtins::Name>(id), isolate);
- address = ref.address();
- break;
- }
- case RUNTIME_FUNCTION: {
- ExternalReference ref(static_cast<Runtime::FunctionId>(id), isolate);
- address = ref.address();
- break;
- }
- case IC_UTILITY: {
- ExternalReference ref(IC_Utility(static_cast<IC::UtilityId>(id)),
- isolate);
- address = ref.address();
- break;
- }
- default:
- UNREACHABLE();
- return;
- }
- Add(address, type, id, name);
-}
-
-
-void ExternalReferenceTable::Add(Address address,
- TypeCode type,
- uint16_t id,
- const char* name) {
- ASSERT_NE(NULL, address);
- ExternalReferenceEntry entry;
- entry.address = address;
- entry.code = EncodeExternal(type, id);
- entry.name = name;
- ASSERT_NE(0, entry.code);
- refs_.Add(entry);
- if (id > max_id_[type]) max_id_[type] = id;
-}
-
-
-void ExternalReferenceTable::PopulateTable(Isolate* isolate) {
- for (int type_code = 0; type_code < kTypeCodeCount; type_code++) {
- max_id_[type_code] = 0;
- }
-
- // The following populates all of the different type of external references
- // into the ExternalReferenceTable.
- //
- // NOTE: This function was originally 100k of code. It has since been
- // rewritten to be mostly table driven, as the callback macro style tends to
- // very easily cause code bloat. Please be careful in the future when adding
- // new references.
-
- struct RefTableEntry {
- TypeCode type;
- uint16_t id;
- const char* name;
- };
-
- static const RefTableEntry ref_table[] = {
- // Builtins
-#define DEF_ENTRY_C(name, ignored) \
- { C_BUILTIN, \
- Builtins::c_##name, \
- "Builtins::" #name },
-
- BUILTIN_LIST_C(DEF_ENTRY_C)
-#undef DEF_ENTRY_C
-
-#define DEF_ENTRY_C(name, ignored) \
- { BUILTIN, \
- Builtins::k##name, \
- "Builtins::" #name },
-#define DEF_ENTRY_A(name, kind, state, extra) DEF_ENTRY_C(name, ignored)
-
- BUILTIN_LIST_C(DEF_ENTRY_C)
- BUILTIN_LIST_A(DEF_ENTRY_A)
- BUILTIN_LIST_DEBUG_A(DEF_ENTRY_A)
-#undef DEF_ENTRY_C
-#undef DEF_ENTRY_A
-
- // Runtime functions
-#define RUNTIME_ENTRY(name, nargs, ressize) \
- { RUNTIME_FUNCTION, \
- Runtime::k##name, \
- "Runtime::" #name },
-
- RUNTIME_FUNCTION_LIST(RUNTIME_ENTRY)
-#undef RUNTIME_ENTRY
-
- // IC utilities
-#define IC_ENTRY(name) \
- { IC_UTILITY, \
- IC::k##name, \
- "IC::" #name },
-
- IC_UTIL_LIST(IC_ENTRY)
-#undef IC_ENTRY
- }; // end of ref_table[].
-
- for (size_t i = 0; i < ARRAY_SIZE(ref_table); ++i) {
- AddFromId(ref_table[i].type,
- ref_table[i].id,
- ref_table[i].name,
- isolate);
- }
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Debug addresses
- Add(Debug_Address(Debug::k_after_break_target_address).address(isolate),
- DEBUG_ADDRESS,
- Debug::k_after_break_target_address << kDebugIdShift,
- "Debug::after_break_target_address()");
- Add(Debug_Address(Debug::k_debug_break_slot_address).address(isolate),
- DEBUG_ADDRESS,
- Debug::k_debug_break_slot_address << kDebugIdShift,
- "Debug::debug_break_slot_address()");
- Add(Debug_Address(Debug::k_debug_break_return_address).address(isolate),
- DEBUG_ADDRESS,
- Debug::k_debug_break_return_address << kDebugIdShift,
- "Debug::debug_break_return_address()");
- Add(Debug_Address(Debug::k_restarter_frame_function_pointer).address(isolate),
- DEBUG_ADDRESS,
- Debug::k_restarter_frame_function_pointer << kDebugIdShift,
- "Debug::restarter_frame_function_pointer_address()");
-#endif
-
- // Stat counters
- struct StatsRefTableEntry {
- StatsCounter* (Counters::*counter)();
- uint16_t id;
- const char* name;
- };
-
- const StatsRefTableEntry stats_ref_table[] = {
-#define COUNTER_ENTRY(name, caption) \
- { &Counters::name, \
- Counters::k_##name, \
- "Counters::" #name },
-
- STATS_COUNTER_LIST_1(COUNTER_ENTRY)
- STATS_COUNTER_LIST_2(COUNTER_ENTRY)
-#undef COUNTER_ENTRY
- }; // end of stats_ref_table[].
-
- Counters* counters = isolate->counters();
- for (size_t i = 0; i < ARRAY_SIZE(stats_ref_table); ++i) {
- Add(reinterpret_cast<Address>(GetInternalPointer(
- (counters->*(stats_ref_table[i].counter))())),
- STATS_COUNTER,
- stats_ref_table[i].id,
- stats_ref_table[i].name);
- }
-
- // Top addresses
-
- const char* AddressNames[] = {
-#define C(name) "Isolate::" #name,
- ISOLATE_ADDRESS_LIST(C)
- ISOLATE_ADDRESS_LIST_PROF(C)
- NULL
-#undef C
- };
-
- for (uint16_t i = 0; i < Isolate::k_isolate_address_count; ++i) {
- Add(isolate->get_address_from_id((Isolate::AddressId)i),
- TOP_ADDRESS, i, AddressNames[i]);
- }
-
- // Accessors
-#define ACCESSOR_DESCRIPTOR_DECLARATION(name) \
- Add((Address)&Accessors::name, \
- ACCESSOR, \
- Accessors::k##name, \
- "Accessors::" #name);
-
- ACCESSOR_DESCRIPTOR_LIST(ACCESSOR_DESCRIPTOR_DECLARATION)
-#undef ACCESSOR_DESCRIPTOR_DECLARATION
-
- StubCache* stub_cache = isolate->stub_cache();
-
- // Stub cache tables
- Add(stub_cache->key_reference(StubCache::kPrimary).address(),
- STUB_CACHE_TABLE,
- 1,
- "StubCache::primary_->key");
- Add(stub_cache->value_reference(StubCache::kPrimary).address(),
- STUB_CACHE_TABLE,
- 2,
- "StubCache::primary_->value");
- Add(stub_cache->key_reference(StubCache::kSecondary).address(),
- STUB_CACHE_TABLE,
- 3,
- "StubCache::secondary_->key");
- Add(stub_cache->value_reference(StubCache::kSecondary).address(),
- STUB_CACHE_TABLE,
- 4,
- "StubCache::secondary_->value");
-
- // Runtime entries
- Add(ExternalReference::perform_gc_function(isolate).address(),
- RUNTIME_ENTRY,
- 1,
- "Runtime::PerformGC");
- Add(ExternalReference::fill_heap_number_with_random_function(
- isolate).address(),
- RUNTIME_ENTRY,
- 2,
- "V8::FillHeapNumberWithRandom");
- Add(ExternalReference::random_uint32_function(isolate).address(),
- RUNTIME_ENTRY,
- 3,
- "V8::Random");
- Add(ExternalReference::delete_handle_scope_extensions(isolate).address(),
- RUNTIME_ENTRY,
- 4,
- "HandleScope::DeleteExtensions");
-
- // Miscellaneous
- Add(ExternalReference::the_hole_value_location(isolate).address(),
- UNCLASSIFIED,
- 2,
- "Factory::the_hole_value().location()");
- Add(ExternalReference::roots_address(isolate).address(),
- UNCLASSIFIED,
- 3,
- "Heap::roots_address()");
- Add(ExternalReference::address_of_stack_limit(isolate).address(),
- UNCLASSIFIED,
- 4,
- "StackGuard::address_of_jslimit()");
- Add(ExternalReference::address_of_real_stack_limit(isolate).address(),
- UNCLASSIFIED,
- 5,
- "StackGuard::address_of_real_jslimit()");
-#ifndef V8_INTERPRETED_REGEXP
- Add(ExternalReference::address_of_regexp_stack_limit(isolate).address(),
- UNCLASSIFIED,
- 6,
- "RegExpStack::limit_address()");
- Add(ExternalReference::address_of_regexp_stack_memory_address(
- isolate).address(),
- UNCLASSIFIED,
- 7,
- "RegExpStack::memory_address()");
- Add(ExternalReference::address_of_regexp_stack_memory_size(isolate).address(),
- UNCLASSIFIED,
- 8,
- "RegExpStack::memory_size()");
- Add(ExternalReference::address_of_static_offsets_vector(isolate).address(),
- UNCLASSIFIED,
- 9,
- "OffsetsVector::static_offsets_vector");
-#endif // V8_INTERPRETED_REGEXP
- Add(ExternalReference::new_space_start(isolate).address(),
- UNCLASSIFIED,
- 10,
- "Heap::NewSpaceStart()");
- Add(ExternalReference::new_space_mask(isolate).address(),
- UNCLASSIFIED,
- 11,
- "Heap::NewSpaceMask()");
- Add(ExternalReference::heap_always_allocate_scope_depth(isolate).address(),
- UNCLASSIFIED,
- 12,
- "Heap::always_allocate_scope_depth()");
- Add(ExternalReference::new_space_allocation_limit_address(isolate).address(),
- UNCLASSIFIED,
- 13,
- "Heap::NewSpaceAllocationLimitAddress()");
- Add(ExternalReference::new_space_allocation_top_address(isolate).address(),
- UNCLASSIFIED,
- 14,
- "Heap::NewSpaceAllocationTopAddress()");
-#ifdef ENABLE_DEBUGGER_SUPPORT
- Add(ExternalReference::debug_break(isolate).address(),
- UNCLASSIFIED,
- 15,
- "Debug::Break()");
- Add(ExternalReference::debug_step_in_fp_address(isolate).address(),
- UNCLASSIFIED,
- 16,
- "Debug::step_in_fp_addr()");
-#endif
- Add(ExternalReference::double_fp_operation(Token::ADD, isolate).address(),
- UNCLASSIFIED,
- 17,
- "add_two_doubles");
- Add(ExternalReference::double_fp_operation(Token::SUB, isolate).address(),
- UNCLASSIFIED,
- 18,
- "sub_two_doubles");
- Add(ExternalReference::double_fp_operation(Token::MUL, isolate).address(),
- UNCLASSIFIED,
- 19,
- "mul_two_doubles");
- Add(ExternalReference::double_fp_operation(Token::DIV, isolate).address(),
- UNCLASSIFIED,
- 20,
- "div_two_doubles");
- Add(ExternalReference::double_fp_operation(Token::MOD, isolate).address(),
- UNCLASSIFIED,
- 21,
- "mod_two_doubles");
- Add(ExternalReference::compare_doubles(isolate).address(),
- UNCLASSIFIED,
- 22,
- "compare_doubles");
-#ifndef V8_INTERPRETED_REGEXP
- Add(ExternalReference::re_case_insensitive_compare_uc16(isolate).address(),
- UNCLASSIFIED,
- 23,
- "NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16()");
- Add(ExternalReference::re_check_stack_guard_state(isolate).address(),
- UNCLASSIFIED,
- 24,
- "RegExpMacroAssembler*::CheckStackGuardState()");
- Add(ExternalReference::re_grow_stack(isolate).address(),
- UNCLASSIFIED,
- 25,
- "NativeRegExpMacroAssembler::GrowStack()");
- Add(ExternalReference::re_word_character_map().address(),
- UNCLASSIFIED,
- 26,
- "NativeRegExpMacroAssembler::word_character_map");
-#endif // V8_INTERPRETED_REGEXP
- // Keyed lookup cache.
- Add(ExternalReference::keyed_lookup_cache_keys(isolate).address(),
- UNCLASSIFIED,
- 27,
- "KeyedLookupCache::keys()");
- Add(ExternalReference::keyed_lookup_cache_field_offsets(isolate).address(),
- UNCLASSIFIED,
- 28,
- "KeyedLookupCache::field_offsets()");
- Add(ExternalReference::transcendental_cache_array_address(isolate).address(),
- UNCLASSIFIED,
- 29,
- "TranscendentalCache::caches()");
- Add(ExternalReference::handle_scope_next_address().address(),
- UNCLASSIFIED,
- 30,
- "HandleScope::next");
- Add(ExternalReference::handle_scope_limit_address().address(),
- UNCLASSIFIED,
- 31,
- "HandleScope::limit");
- Add(ExternalReference::handle_scope_level_address().address(),
- UNCLASSIFIED,
- 32,
- "HandleScope::level");
- Add(ExternalReference::new_deoptimizer_function(isolate).address(),
- UNCLASSIFIED,
- 33,
- "Deoptimizer::New()");
- Add(ExternalReference::compute_output_frames_function(isolate).address(),
- UNCLASSIFIED,
- 34,
- "Deoptimizer::ComputeOutputFrames()");
- Add(ExternalReference::address_of_min_int().address(),
- UNCLASSIFIED,
- 35,
- "LDoubleConstant::min_int");
- Add(ExternalReference::address_of_one_half().address(),
- UNCLASSIFIED,
- 36,
- "LDoubleConstant::one_half");
- Add(ExternalReference::isolate_address().address(),
- UNCLASSIFIED,
- 37,
- "isolate");
- Add(ExternalReference::address_of_minus_zero().address(),
- UNCLASSIFIED,
- 38,
- "LDoubleConstant::minus_zero");
- Add(ExternalReference::address_of_negative_infinity().address(),
- UNCLASSIFIED,
- 39,
- "LDoubleConstant::negative_infinity");
- Add(ExternalReference::power_double_double_function(isolate).address(),
- UNCLASSIFIED,
- 40,
- "power_double_double_function");
- Add(ExternalReference::power_double_int_function(isolate).address(),
- UNCLASSIFIED,
- 41,
- "power_double_int_function");
- Add(ExternalReference::arguments_marker_location(isolate).address(),
- UNCLASSIFIED,
- 42,
- "Factory::arguments_marker().location()");
-}
-
-
-ExternalReferenceEncoder::ExternalReferenceEncoder()
- : encodings_(Match),
- isolate_(Isolate::Current()) {
- ExternalReferenceTable* external_references =
- ExternalReferenceTable::instance(isolate_);
- for (int i = 0; i < external_references->size(); ++i) {
- Put(external_references->address(i), i);
- }
-}
-
-
-uint32_t ExternalReferenceEncoder::Encode(Address key) const {
- int index = IndexOf(key);
- ASSERT(key == NULL || index >= 0);
- return index >=0 ?
- ExternalReferenceTable::instance(isolate_)->code(index) : 0;
-}
-
-
-const char* ExternalReferenceEncoder::NameOfAddress(Address key) const {
- int index = IndexOf(key);
- return index >= 0 ?
- ExternalReferenceTable::instance(isolate_)->name(index) : NULL;
-}
-
-
-int ExternalReferenceEncoder::IndexOf(Address key) const {
- if (key == NULL) return -1;
- HashMap::Entry* entry =
- const_cast<HashMap&>(encodings_).Lookup(key, Hash(key), false);
- return entry == NULL
- ? -1
- : static_cast<int>(reinterpret_cast<intptr_t>(entry->value));
-}
-
-
-void ExternalReferenceEncoder::Put(Address key, int index) {
- HashMap::Entry* entry = encodings_.Lookup(key, Hash(key), true);
- entry->value = reinterpret_cast<void*>(index);
-}
-
-
-ExternalReferenceDecoder::ExternalReferenceDecoder()
- : encodings_(NewArray<Address*>(kTypeCodeCount)),
- isolate_(Isolate::Current()) {
- ExternalReferenceTable* external_references =
- ExternalReferenceTable::instance(isolate_);
- for (int type = kFirstTypeCode; type < kTypeCodeCount; ++type) {
- int max = external_references->max_id(type) + 1;
- encodings_[type] = NewArray<Address>(max + 1);
- }
- for (int i = 0; i < external_references->size(); ++i) {
- Put(external_references->code(i), external_references->address(i));
- }
-}
-
-
-ExternalReferenceDecoder::~ExternalReferenceDecoder() {
- for (int type = kFirstTypeCode; type < kTypeCodeCount; ++type) {
- DeleteArray(encodings_[type]);
- }
- DeleteArray(encodings_);
-}
-
-
-bool Serializer::serialization_enabled_ = false;
-bool Serializer::too_late_to_enable_now_ = false;
-
-
-Deserializer::Deserializer(SnapshotByteSource* source)
- : isolate_(NULL),
- source_(source),
- external_reference_decoder_(NULL) {
-}
-
-
-// This routine both allocates a new object, and also keeps
-// track of where objects have been allocated so that we can
-// fix back references when deserializing.
-Address Deserializer::Allocate(int space_index, Space* space, int size) {
- Address address;
- if (!SpaceIsLarge(space_index)) {
- ASSERT(!SpaceIsPaged(space_index) ||
- size <= Page::kPageSize - Page::kObjectStartOffset);
- MaybeObject* maybe_new_allocation;
- if (space_index == NEW_SPACE) {
- maybe_new_allocation =
- reinterpret_cast<NewSpace*>(space)->AllocateRaw(size);
- } else {
- maybe_new_allocation =
- reinterpret_cast<PagedSpace*>(space)->AllocateRaw(size);
- }
- Object* new_allocation = maybe_new_allocation->ToObjectUnchecked();
- HeapObject* new_object = HeapObject::cast(new_allocation);
- address = new_object->address();
- high_water_[space_index] = address + size;
- } else {
- ASSERT(SpaceIsLarge(space_index));
- LargeObjectSpace* lo_space = reinterpret_cast<LargeObjectSpace*>(space);
- Object* new_allocation;
- if (space_index == kLargeData) {
- new_allocation = lo_space->AllocateRaw(size)->ToObjectUnchecked();
- } else if (space_index == kLargeFixedArray) {
- new_allocation =
- lo_space->AllocateRawFixedArray(size)->ToObjectUnchecked();
- } else {
- ASSERT_EQ(kLargeCode, space_index);
- new_allocation = lo_space->AllocateRawCode(size)->ToObjectUnchecked();
- }
- HeapObject* new_object = HeapObject::cast(new_allocation);
- // Record all large objects in the same space.
- address = new_object->address();
- pages_[LO_SPACE].Add(address);
- }
- last_object_address_ = address;
- return address;
-}
-
-
-// This returns the address of an object that has been described in the
-// snapshot as being offset bytes back in a particular space.
-HeapObject* Deserializer::GetAddressFromEnd(int space) {
- int offset = source_->GetInt();
- ASSERT(!SpaceIsLarge(space));
- offset <<= kObjectAlignmentBits;
- return HeapObject::FromAddress(high_water_[space] - offset);
-}
-
-
-// This returns the address of an object that has been described in the
-// snapshot as being offset bytes into a particular space.
-HeapObject* Deserializer::GetAddressFromStart(int space) {
- int offset = source_->GetInt();
- if (SpaceIsLarge(space)) {
- // Large spaces have one object per 'page'.
- return HeapObject::FromAddress(pages_[LO_SPACE][offset]);
- }
- offset <<= kObjectAlignmentBits;
- if (space == NEW_SPACE) {
- // New space has only one space - numbered 0.
- return HeapObject::FromAddress(pages_[space][0] + offset);
- }
- ASSERT(SpaceIsPaged(space));
- int page_of_pointee = offset >> kPageSizeBits;
- Address object_address = pages_[space][page_of_pointee] +
- (offset & Page::kPageAlignmentMask);
- return HeapObject::FromAddress(object_address);
-}
-
-
-void Deserializer::Deserialize() {
- isolate_ = Isolate::Current();
- // Don't GC while deserializing - just expand the heap.
- AlwaysAllocateScope always_allocate;
- // Don't use the free lists while deserializing.
- LinearAllocationScope allocate_linearly;
- // No active threads.
- ASSERT_EQ(NULL, isolate_->thread_manager()->FirstThreadStateInUse());
- // No active handles.
- ASSERT(isolate_->handle_scope_implementer()->blocks()->is_empty());
- // Make sure the entire partial snapshot cache is traversed, filling it with
- // valid object pointers.
- isolate_->set_serialize_partial_snapshot_cache_length(
- Isolate::kPartialSnapshotCacheCapacity);
- ASSERT_EQ(NULL, external_reference_decoder_);
- external_reference_decoder_ = new ExternalReferenceDecoder();
- isolate_->heap()->IterateStrongRoots(this, VISIT_ONLY_STRONG);
- isolate_->heap()->IterateWeakRoots(this, VISIT_ALL);
-
- isolate_->heap()->set_global_contexts_list(
- isolate_->heap()->undefined_value());
-}
-
-
-void Deserializer::DeserializePartial(Object** root) {
- isolate_ = Isolate::Current();
- // Don't GC while deserializing - just expand the heap.
- AlwaysAllocateScope always_allocate;
- // Don't use the free lists while deserializing.
- LinearAllocationScope allocate_linearly;
- if (external_reference_decoder_ == NULL) {
- external_reference_decoder_ = new ExternalReferenceDecoder();
- }
- VisitPointer(root);
-}
-
-
-Deserializer::~Deserializer() {
- ASSERT(source_->AtEOF());
- if (external_reference_decoder_) {
- delete external_reference_decoder_;
- external_reference_decoder_ = NULL;
- }
-}
-
-
-// This is called on the roots. It is the driver of the deserialization
-// process. It is also called on the body of each function.
-void Deserializer::VisitPointers(Object** start, Object** end) {
- // The space must be new space. Any other space would cause ReadChunk to try
- // to update the remembered using NULL as the address.
- ReadChunk(start, end, NEW_SPACE, NULL);
-}
-
-
-// This routine writes the new object into the pointer provided and then
-// returns true if the new object was in young space and false otherwise.
-// The reason for this strange interface is that otherwise the object is
-// written very late, which means the ByteArray map is not set up by the
-// time we need to use it to mark the space at the end of a page free (by
-// making it into a byte array).
-void Deserializer::ReadObject(int space_number,
- Space* space,
- Object** write_back) {
- int size = source_->GetInt() << kObjectAlignmentBits;
- Address address = Allocate(space_number, space, size);
- *write_back = HeapObject::FromAddress(address);
- Object** current = reinterpret_cast<Object**>(address);
- Object** limit = current + (size >> kPointerSizeLog2);
- if (FLAG_log_snapshot_positions) {
- LOG(isolate_, SnapshotPositionEvent(address, source_->position()));
- }
- ReadChunk(current, limit, space_number, address);
-#ifdef DEBUG
- bool is_codespace = (space == HEAP->code_space()) ||
- ((space == HEAP->lo_space()) && (space_number == kLargeCode));
- ASSERT(HeapObject::FromAddress(address)->IsCode() == is_codespace);
-#endif
-}
-
-
-// This macro is always used with a constant argument so it should all fold
-// away to almost nothing in the generated code. It might be nicer to do this
-// with the ternary operator but there are type issues with that.
-#define ASSIGN_DEST_SPACE(space_number) \
- Space* dest_space; \
- if (space_number == NEW_SPACE) { \
- dest_space = isolate->heap()->new_space(); \
- } else if (space_number == OLD_POINTER_SPACE) { \
- dest_space = isolate->heap()->old_pointer_space(); \
- } else if (space_number == OLD_DATA_SPACE) { \
- dest_space = isolate->heap()->old_data_space(); \
- } else if (space_number == CODE_SPACE) { \
- dest_space = isolate->heap()->code_space(); \
- } else if (space_number == MAP_SPACE) { \
- dest_space = isolate->heap()->map_space(); \
- } else if (space_number == CELL_SPACE) { \
- dest_space = isolate->heap()->cell_space(); \
- } else { \
- ASSERT(space_number >= LO_SPACE); \
- dest_space = isolate->heap()->lo_space(); \
- }
-
-
-static const int kUnknownOffsetFromStart = -1;
-
-
-void Deserializer::ReadChunk(Object** current,
- Object** limit,
- int source_space,
- Address address) {
- Isolate* const isolate = isolate_;
- while (current < limit) {
- int data = source_->Get();
- switch (data) {
-#define CASE_STATEMENT(where, how, within, space_number) \
- case where + how + within + space_number: \
- ASSERT((where & ~kPointedToMask) == 0); \
- ASSERT((how & ~kHowToCodeMask) == 0); \
- ASSERT((within & ~kWhereToPointMask) == 0); \
- ASSERT((space_number & ~kSpaceMask) == 0);
-
-#define CASE_BODY(where, how, within, space_number_if_any, offset_from_start) \
- { \
- bool emit_write_barrier = false; \
- bool current_was_incremented = false; \
- int space_number = space_number_if_any == kAnyOldSpace ? \
- (data & kSpaceMask) : space_number_if_any; \
- if (where == kNewObject && how == kPlain && within == kStartOfObject) {\
- ASSIGN_DEST_SPACE(space_number) \
- ReadObject(space_number, dest_space, current); \
- emit_write_barrier = \
- (space_number == NEW_SPACE && source_space != NEW_SPACE); \
- } else { \
- Object* new_object = NULL; /* May not be a real Object pointer. */ \
- if (where == kNewObject) { \
- ASSIGN_DEST_SPACE(space_number) \
- ReadObject(space_number, dest_space, &new_object); \
- } else if (where == kRootArray) { \
- int root_id = source_->GetInt(); \
- new_object = isolate->heap()->roots_address()[root_id]; \
- } else if (where == kPartialSnapshotCache) { \
- int cache_index = source_->GetInt(); \
- new_object = isolate->serialize_partial_snapshot_cache() \
- [cache_index]; \
- } else if (where == kExternalReference) { \
- int reference_id = source_->GetInt(); \
- Address address = external_reference_decoder_-> \
- Decode(reference_id); \
- new_object = reinterpret_cast<Object*>(address); \
- } else if (where == kBackref) { \
- emit_write_barrier = \
- (space_number == NEW_SPACE && source_space != NEW_SPACE); \
- new_object = GetAddressFromEnd(data & kSpaceMask); \
- } else { \
- ASSERT(where == kFromStart); \
- if (offset_from_start == kUnknownOffsetFromStart) { \
- emit_write_barrier = \
- (space_number == NEW_SPACE && source_space != NEW_SPACE); \
- new_object = GetAddressFromStart(data & kSpaceMask); \
- } else { \
- Address object_address = pages_[space_number][0] + \
- (offset_from_start << kObjectAlignmentBits); \
- new_object = HeapObject::FromAddress(object_address); \
- } \
- } \
- if (within == kFirstInstruction) { \
- Code* new_code_object = reinterpret_cast<Code*>(new_object); \
- new_object = reinterpret_cast<Object*>( \
- new_code_object->instruction_start()); \
- } \
- if (how == kFromCode) { \
- Address location_of_branch_data = \
- reinterpret_cast<Address>(current); \
- Assembler::set_target_at(location_of_branch_data, \
- reinterpret_cast<Address>(new_object)); \
- if (within == kFirstInstruction) { \
- location_of_branch_data += Assembler::kCallTargetSize; \
- current = reinterpret_cast<Object**>(location_of_branch_data); \
- current_was_incremented = true; \
- } \
- } else { \
- *current = new_object; \
- } \
- } \
- if (emit_write_barrier) { \
- isolate->heap()->RecordWrite(address, static_cast<int>( \
- reinterpret_cast<Address>(current) - address)); \
- } \
- if (!current_was_incremented) { \
- current++; /* Increment current if it wasn't done above. */ \
- } \
- break; \
- } \
-
-// This generates a case and a body for each space. The large object spaces are
-// very rare in snapshots so they are grouped in one body.
-#define ONE_PER_SPACE(where, how, within) \
- CASE_STATEMENT(where, how, within, NEW_SPACE) \
- CASE_BODY(where, how, within, NEW_SPACE, kUnknownOffsetFromStart) \
- CASE_STATEMENT(where, how, within, OLD_DATA_SPACE) \
- CASE_BODY(where, how, within, OLD_DATA_SPACE, kUnknownOffsetFromStart) \
- CASE_STATEMENT(where, how, within, OLD_POINTER_SPACE) \
- CASE_BODY(where, how, within, OLD_POINTER_SPACE, kUnknownOffsetFromStart) \
- CASE_STATEMENT(where, how, within, CODE_SPACE) \
- CASE_BODY(where, how, within, CODE_SPACE, kUnknownOffsetFromStart) \
- CASE_STATEMENT(where, how, within, CELL_SPACE) \
- CASE_BODY(where, how, within, CELL_SPACE, kUnknownOffsetFromStart) \
- CASE_STATEMENT(where, how, within, MAP_SPACE) \
- CASE_BODY(where, how, within, MAP_SPACE, kUnknownOffsetFromStart) \
- CASE_STATEMENT(where, how, within, kLargeData) \
- CASE_STATEMENT(where, how, within, kLargeCode) \
- CASE_STATEMENT(where, how, within, kLargeFixedArray) \
- CASE_BODY(where, how, within, kAnyOldSpace, kUnknownOffsetFromStart)
-
-// This generates a case and a body for the new space (which has to do extra
-// write barrier handling) and handles the other spaces with 8 fall-through
-// cases and one body.
-#define ALL_SPACES(where, how, within) \
- CASE_STATEMENT(where, how, within, NEW_SPACE) \
- CASE_BODY(where, how, within, NEW_SPACE, kUnknownOffsetFromStart) \
- CASE_STATEMENT(where, how, within, OLD_DATA_SPACE) \
- CASE_STATEMENT(where, how, within, OLD_POINTER_SPACE) \
- CASE_STATEMENT(where, how, within, CODE_SPACE) \
- CASE_STATEMENT(where, how, within, CELL_SPACE) \
- CASE_STATEMENT(where, how, within, MAP_SPACE) \
- CASE_STATEMENT(where, how, within, kLargeData) \
- CASE_STATEMENT(where, how, within, kLargeCode) \
- CASE_STATEMENT(where, how, within, kLargeFixedArray) \
- CASE_BODY(where, how, within, kAnyOldSpace, kUnknownOffsetFromStart)
-
-#define ONE_PER_CODE_SPACE(where, how, within) \
- CASE_STATEMENT(where, how, within, CODE_SPACE) \
- CASE_BODY(where, how, within, CODE_SPACE, kUnknownOffsetFromStart) \
- CASE_STATEMENT(where, how, within, kLargeCode) \
- CASE_BODY(where, how, within, kLargeCode, kUnknownOffsetFromStart)
-
-#define EMIT_COMMON_REFERENCE_PATTERNS(pseudo_space_number, \
- space_number, \
- offset_from_start) \
- CASE_STATEMENT(kFromStart, kPlain, kStartOfObject, pseudo_space_number) \
- CASE_BODY(kFromStart, kPlain, kStartOfObject, space_number, offset_from_start)
-
- // We generate 15 cases and bodies that process special tags that combine
- // the raw data tag and the length into one byte.
-#define RAW_CASE(index, size) \
- case kRawData + index: { \
- byte* raw_data_out = reinterpret_cast<byte*>(current); \
- source_->CopyRaw(raw_data_out, size); \
- current = reinterpret_cast<Object**>(raw_data_out + size); \
- break; \
- }
- COMMON_RAW_LENGTHS(RAW_CASE)
-#undef RAW_CASE
-
- // Deserialize a chunk of raw data that doesn't have one of the popular
- // lengths.
- case kRawData: {
- int size = source_->GetInt();
- byte* raw_data_out = reinterpret_cast<byte*>(current);
- source_->CopyRaw(raw_data_out, size);
- current = reinterpret_cast<Object**>(raw_data_out + size);
- break;
- }
-
- // Deserialize a new object and write a pointer to it to the current
- // object.
- ONE_PER_SPACE(kNewObject, kPlain, kStartOfObject)
- // Support for direct instruction pointers in functions
- ONE_PER_CODE_SPACE(kNewObject, kPlain, kFirstInstruction)
- // Deserialize a new code object and write a pointer to its first
- // instruction to the current code object.
- ONE_PER_SPACE(kNewObject, kFromCode, kFirstInstruction)
- // Find a recently deserialized object using its offset from the current
- // allocation point and write a pointer to it to the current object.
- ALL_SPACES(kBackref, kPlain, kStartOfObject)
- // Find a recently deserialized code object using its offset from the
- // current allocation point and write a pointer to its first instruction
- // to the current code object or the instruction pointer in a function
- // object.
- ALL_SPACES(kBackref, kFromCode, kFirstInstruction)
- ALL_SPACES(kBackref, kPlain, kFirstInstruction)
- // Find an already deserialized object using its offset from the start
- // and write a pointer to it to the current object.
- ALL_SPACES(kFromStart, kPlain, kStartOfObject)
- ALL_SPACES(kFromStart, kPlain, kFirstInstruction)
- // Find an already deserialized code object using its offset from the
- // start and write a pointer to its first instruction to the current code
- // object.
- ALL_SPACES(kFromStart, kFromCode, kFirstInstruction)
- // Find an already deserialized object at one of the predetermined popular
- // offsets from the start and write a pointer to it in the current object.
- COMMON_REFERENCE_PATTERNS(EMIT_COMMON_REFERENCE_PATTERNS)
- // Find an object in the roots array and write a pointer to it to the
- // current object.
- CASE_STATEMENT(kRootArray, kPlain, kStartOfObject, 0)
- CASE_BODY(kRootArray, kPlain, kStartOfObject, 0, kUnknownOffsetFromStart)
- // Find an object in the partial snapshots cache and write a pointer to it
- // to the current object.
- CASE_STATEMENT(kPartialSnapshotCache, kPlain, kStartOfObject, 0)
- CASE_BODY(kPartialSnapshotCache,
- kPlain,
- kStartOfObject,
- 0,
- kUnknownOffsetFromStart)
- // Find an code entry in the partial snapshots cache and
- // write a pointer to it to the current object.
- CASE_STATEMENT(kPartialSnapshotCache, kPlain, kFirstInstruction, 0)
- CASE_BODY(kPartialSnapshotCache,
- kPlain,
- kFirstInstruction,
- 0,
- kUnknownOffsetFromStart)
- // Find an external reference and write a pointer to it to the current
- // object.
- CASE_STATEMENT(kExternalReference, kPlain, kStartOfObject, 0)
- CASE_BODY(kExternalReference,
- kPlain,
- kStartOfObject,
- 0,
- kUnknownOffsetFromStart)
- // Find an external reference and write a pointer to it in the current
- // code object.
- CASE_STATEMENT(kExternalReference, kFromCode, kStartOfObject, 0)
- CASE_BODY(kExternalReference,
- kFromCode,
- kStartOfObject,
- 0,
- kUnknownOffsetFromStart)
-
-#undef CASE_STATEMENT
-#undef CASE_BODY
-#undef ONE_PER_SPACE
-#undef ALL_SPACES
-#undef EMIT_COMMON_REFERENCE_PATTERNS
-#undef ASSIGN_DEST_SPACE
-
- case kNewPage: {
- int space = source_->Get();
- pages_[space].Add(last_object_address_);
- if (space == CODE_SPACE) {
- CPU::FlushICache(last_object_address_, Page::kPageSize);
- }
- break;
- }
-
- case kNativesStringResource: {
- int index = source_->Get();
- Vector<const char> source_vector = Natives::GetScriptSource(index);
- NativesExternalStringResource* resource =
- new NativesExternalStringResource(
- isolate->bootstrapper(), source_vector.start());
- *current++ = reinterpret_cast<Object*>(resource);
- break;
- }
-
- case kSynchronize: {
- // If we get here then that indicates that you have a mismatch between
- // the number of GC roots when serializing and deserializing.
- UNREACHABLE();
- }
-
- default:
- UNREACHABLE();
- }
- }
- ASSERT_EQ(current, limit);
-}
-
-
-void SnapshotByteSink::PutInt(uintptr_t integer, const char* description) {
- const int max_shift = ((kPointerSize * kBitsPerByte) / 7) * 7;
- for (int shift = max_shift; shift > 0; shift -= 7) {
- if (integer >= static_cast<uintptr_t>(1u) << shift) {
- Put((static_cast<int>((integer >> shift)) & 0x7f) | 0x80, "IntPart");
- }
- }
- PutSection(static_cast<int>(integer & 0x7f), "IntLastPart");
-}
-
-#ifdef DEBUG
-
-void Deserializer::Synchronize(const char* tag) {
- int data = source_->Get();
- // If this assert fails then that indicates that you have a mismatch between
- // the number of GC roots when serializing and deserializing.
- ASSERT_EQ(kSynchronize, data);
- do {
- int character = source_->Get();
- if (character == 0) break;
- if (FLAG_debug_serialization) {
- PrintF("%c", character);
- }
- } while (true);
- if (FLAG_debug_serialization) {
- PrintF("\n");
- }
-}
-
-
-void Serializer::Synchronize(const char* tag) {
- sink_->Put(kSynchronize, tag);
- int character;
- do {
- character = *tag++;
- sink_->PutSection(character, "TagCharacter");
- } while (character != 0);
-}
-
-#endif
-
-Serializer::Serializer(SnapshotByteSink* sink)
- : sink_(sink),
- current_root_index_(0),
- external_reference_encoder_(new ExternalReferenceEncoder),
- large_object_total_(0) {
- // The serializer is meant to be used only to generate initial heap images
- // from a context in which there is only one isolate.
- ASSERT(Isolate::Current()->IsDefaultIsolate());
- for (int i = 0; i <= LAST_SPACE; i++) {
- fullness_[i] = 0;
- }
-}
-
-
-Serializer::~Serializer() {
- delete external_reference_encoder_;
-}
-
-
-void StartupSerializer::SerializeStrongReferences() {
- Isolate* isolate = Isolate::Current();
- // No active threads.
- CHECK_EQ(NULL, Isolate::Current()->thread_manager()->FirstThreadStateInUse());
- // No active or weak handles.
- CHECK(isolate->handle_scope_implementer()->blocks()->is_empty());
- CHECK_EQ(0, isolate->global_handles()->NumberOfWeakHandles());
- // We don't support serializing installed extensions.
- for (RegisteredExtension* ext = v8::RegisteredExtension::first_extension();
- ext != NULL;
- ext = ext->next()) {
- CHECK_NE(v8::INSTALLED, ext->state());
- }
- HEAP->IterateStrongRoots(this, VISIT_ONLY_STRONG);
-}
-
-
-void PartialSerializer::Serialize(Object** object) {
- this->VisitPointer(object);
- Isolate* isolate = Isolate::Current();
-
- // After we have done the partial serialization the partial snapshot cache
- // will contain some references needed to decode the partial snapshot. We
- // fill it up with undefineds so it has a predictable length so the
- // deserialization code doesn't need to know the length.
- for (int index = isolate->serialize_partial_snapshot_cache_length();
- index < Isolate::kPartialSnapshotCacheCapacity;
- index++) {
- isolate->serialize_partial_snapshot_cache()[index] =
- isolate->heap()->undefined_value();
- startup_serializer_->VisitPointer(
- &isolate->serialize_partial_snapshot_cache()[index]);
- }
- isolate->set_serialize_partial_snapshot_cache_length(
- Isolate::kPartialSnapshotCacheCapacity);
-}
-
-
-void Serializer::VisitPointers(Object** start, Object** end) {
- for (Object** current = start; current < end; current++) {
- if ((*current)->IsSmi()) {
- sink_->Put(kRawData, "RawData");
- sink_->PutInt(kPointerSize, "length");
- for (int i = 0; i < kPointerSize; i++) {
- sink_->Put(reinterpret_cast<byte*>(current)[i], "Byte");
- }
- } else {
- SerializeObject(*current, kPlain, kStartOfObject);
- }
- }
-}
-
-
-// This ensures that the partial snapshot cache keeps things alive during GC and
-// tracks their movement. When it is called during serialization of the startup
-// snapshot the partial snapshot is empty, so nothing happens. When the partial
-// (context) snapshot is created, this array is populated with the pointers that
-// the partial snapshot will need. As that happens we emit serialized objects to
-// the startup snapshot that correspond to the elements of this cache array. On
-// deserialization we therefore need to visit the cache array. This fills it up
-// with pointers to deserialized objects.
-void SerializerDeserializer::Iterate(ObjectVisitor* visitor) {
- Isolate* isolate = Isolate::Current();
- visitor->VisitPointers(
- isolate->serialize_partial_snapshot_cache(),
- &isolate->serialize_partial_snapshot_cache()[
- isolate->serialize_partial_snapshot_cache_length()]);
-}
-
-
-// When deserializing we need to set the size of the snapshot cache. This means
-// the root iteration code (above) will iterate over array elements, writing the
-// references to deserialized objects in them.
-void SerializerDeserializer::SetSnapshotCacheSize(int size) {
- Isolate::Current()->set_serialize_partial_snapshot_cache_length(size);
-}
-
-
-int PartialSerializer::PartialSnapshotCacheIndex(HeapObject* heap_object) {
- Isolate* isolate = Isolate::Current();
-
- for (int i = 0;
- i < isolate->serialize_partial_snapshot_cache_length();
- i++) {
- Object* entry = isolate->serialize_partial_snapshot_cache()[i];
- if (entry == heap_object) return i;
- }
-
- // We didn't find the object in the cache. So we add it to the cache and
- // then visit the pointer so that it becomes part of the startup snapshot
- // and we can refer to it from the partial snapshot.
- int length = isolate->serialize_partial_snapshot_cache_length();
- CHECK(length < Isolate::kPartialSnapshotCacheCapacity);
- isolate->serialize_partial_snapshot_cache()[length] = heap_object;
- startup_serializer_->VisitPointer(
- &isolate->serialize_partial_snapshot_cache()[length]);
- // We don't recurse from the startup snapshot generator into the partial
- // snapshot generator.
- ASSERT(length == isolate->serialize_partial_snapshot_cache_length());
- isolate->set_serialize_partial_snapshot_cache_length(length + 1);
- return length;
-}
-
-
-int PartialSerializer::RootIndex(HeapObject* heap_object) {
- for (int i = 0; i < Heap::kRootListLength; i++) {
- Object* root = HEAP->roots_address()[i];
- if (root == heap_object) return i;
- }
- return kInvalidRootIndex;
-}
-
-
-// Encode the location of an already deserialized object in order to write its
-// location into a later object. We can encode the location as an offset from
-// the start of the deserialized objects or as an offset backwards from the
-// current allocation pointer.
-void Serializer::SerializeReferenceToPreviousObject(
- int space,
- int address,
- HowToCode how_to_code,
- WhereToPoint where_to_point) {
- int offset = CurrentAllocationAddress(space) - address;
- bool from_start = true;
- if (SpaceIsPaged(space)) {
- // For paged space it is simple to encode back from current allocation if
- // the object is on the same page as the current allocation pointer.
- if ((CurrentAllocationAddress(space) >> kPageSizeBits) ==
- (address >> kPageSizeBits)) {
- from_start = false;
- address = offset;
- }
- } else if (space == NEW_SPACE) {
- // For new space it is always simple to encode back from current allocation.
- if (offset < address) {
- from_start = false;
- address = offset;
- }
- }
- // If we are actually dealing with real offsets (and not a numbering of
- // all objects) then we should shift out the bits that are always 0.
- if (!SpaceIsLarge(space)) address >>= kObjectAlignmentBits;
- if (from_start) {
-#define COMMON_REFS_CASE(pseudo_space, actual_space, offset) \
- if (space == actual_space && address == offset && \
- how_to_code == kPlain && where_to_point == kStartOfObject) { \
- sink_->Put(kFromStart + how_to_code + where_to_point + \
- pseudo_space, "RefSer"); \
- } else /* NOLINT */
- COMMON_REFERENCE_PATTERNS(COMMON_REFS_CASE)
-#undef COMMON_REFS_CASE
- { /* NOLINT */
- sink_->Put(kFromStart + how_to_code + where_to_point + space, "RefSer");
- sink_->PutInt(address, "address");
- }
- } else {
- sink_->Put(kBackref + how_to_code + where_to_point + space, "BackRefSer");
- sink_->PutInt(address, "address");
- }
-}
-
-
-void StartupSerializer::SerializeObject(
- Object* o,
- HowToCode how_to_code,
- WhereToPoint where_to_point) {
- CHECK(o->IsHeapObject());
- HeapObject* heap_object = HeapObject::cast(o);
-
- if (address_mapper_.IsMapped(heap_object)) {
- int space = SpaceOfAlreadySerializedObject(heap_object);
- int address = address_mapper_.MappedTo(heap_object);
- SerializeReferenceToPreviousObject(space,
- address,
- how_to_code,
- where_to_point);
- } else {
- // Object has not yet been serialized. Serialize it here.
- ObjectSerializer object_serializer(this,
- heap_object,
- sink_,
- how_to_code,
- where_to_point);
- object_serializer.Serialize();
- }
-}
-
-
-void StartupSerializer::SerializeWeakReferences() {
- for (int i = Isolate::Current()->serialize_partial_snapshot_cache_length();
- i < Isolate::kPartialSnapshotCacheCapacity;
- i++) {
- sink_->Put(kRootArray + kPlain + kStartOfObject, "RootSerialization");
- sink_->PutInt(Heap::kUndefinedValueRootIndex, "root_index");
- }
- HEAP->IterateWeakRoots(this, VISIT_ALL);
-}
-
-
-void PartialSerializer::SerializeObject(
- Object* o,
- HowToCode how_to_code,
- WhereToPoint where_to_point) {
- CHECK(o->IsHeapObject());
- HeapObject* heap_object = HeapObject::cast(o);
-
- int root_index;
- if ((root_index = RootIndex(heap_object)) != kInvalidRootIndex) {
- sink_->Put(kRootArray + how_to_code + where_to_point, "RootSerialization");
- sink_->PutInt(root_index, "root_index");
- return;
- }
-
- if (ShouldBeInThePartialSnapshotCache(heap_object)) {
- int cache_index = PartialSnapshotCacheIndex(heap_object);
- sink_->Put(kPartialSnapshotCache + how_to_code + where_to_point,
- "PartialSnapshotCache");
- sink_->PutInt(cache_index, "partial_snapshot_cache_index");
- return;
- }
-
- // Pointers from the partial snapshot to the objects in the startup snapshot
- // should go through the root array or through the partial snapshot cache.
- // If this is not the case you may have to add something to the root array.
- ASSERT(!startup_serializer_->address_mapper()->IsMapped(heap_object));
- // All the symbols that the partial snapshot needs should be either in the
- // root table or in the partial snapshot cache.
- ASSERT(!heap_object->IsSymbol());
-
- if (address_mapper_.IsMapped(heap_object)) {
- int space = SpaceOfAlreadySerializedObject(heap_object);
- int address = address_mapper_.MappedTo(heap_object);
- SerializeReferenceToPreviousObject(space,
- address,
- how_to_code,
- where_to_point);
- } else {
- // Object has not yet been serialized. Serialize it here.
- ObjectSerializer serializer(this,
- heap_object,
- sink_,
- how_to_code,
- where_to_point);
- serializer.Serialize();
- }
-}
-
-
-void Serializer::ObjectSerializer::Serialize() {
- int space = Serializer::SpaceOfObject(object_);
- int size = object_->Size();
-
- sink_->Put(kNewObject + reference_representation_ + space,
- "ObjectSerialization");
- sink_->PutInt(size >> kObjectAlignmentBits, "Size in words");
-
- LOG(i::Isolate::Current(),
- SnapshotPositionEvent(object_->address(), sink_->Position()));
-
- // Mark this object as already serialized.
- bool start_new_page;
- int offset = serializer_->Allocate(space, size, &start_new_page);
- serializer_->address_mapper()->AddMapping(object_, offset);
- if (start_new_page) {
- sink_->Put(kNewPage, "NewPage");
- sink_->PutSection(space, "NewPageSpace");
- }
-
- // Serialize the map (first word of the object).
- serializer_->SerializeObject(object_->map(), kPlain, kStartOfObject);
-
- // Serialize the rest of the object.
- CHECK_EQ(0, bytes_processed_so_far_);
- bytes_processed_so_far_ = kPointerSize;
- object_->IterateBody(object_->map()->instance_type(), size, this);
- OutputRawData(object_->address() + size);
-}
-
-
-void Serializer::ObjectSerializer::VisitPointers(Object** start,
- Object** end) {
- Object** current = start;
- while (current < end) {
- while (current < end && (*current)->IsSmi()) current++;
- if (current < end) OutputRawData(reinterpret_cast<Address>(current));
-
- while (current < end && !(*current)->IsSmi()) {
- serializer_->SerializeObject(*current, kPlain, kStartOfObject);
- bytes_processed_so_far_ += kPointerSize;
- current++;
- }
- }
-}
-
-
-void Serializer::ObjectSerializer::VisitExternalReferences(Address* start,
- Address* end) {
- Address references_start = reinterpret_cast<Address>(start);
- OutputRawData(references_start);
-
- for (Address* current = start; current < end; current++) {
- sink_->Put(kExternalReference + kPlain + kStartOfObject, "ExternalRef");
- int reference_id = serializer_->EncodeExternalReference(*current);
- sink_->PutInt(reference_id, "reference id");
- }
- bytes_processed_so_far_ += static_cast<int>((end - start) * kPointerSize);
-}
-
-
-void Serializer::ObjectSerializer::VisitRuntimeEntry(RelocInfo* rinfo) {
- Address target_start = rinfo->target_address_address();
- OutputRawData(target_start);
- Address target = rinfo->target_address();
- uint32_t encoding = serializer_->EncodeExternalReference(target);
- CHECK(target == NULL ? encoding == 0 : encoding != 0);
- int representation;
- // Can't use a ternary operator because of gcc.
- if (rinfo->IsCodedSpecially()) {
- representation = kStartOfObject + kFromCode;
- } else {
- representation = kStartOfObject + kPlain;
- }
- sink_->Put(kExternalReference + representation, "ExternalReference");
- sink_->PutInt(encoding, "reference id");
- bytes_processed_so_far_ += rinfo->target_address_size();
-}
-
-
-void Serializer::ObjectSerializer::VisitCodeTarget(RelocInfo* rinfo) {
- CHECK(RelocInfo::IsCodeTarget(rinfo->rmode()));
- Address target_start = rinfo->target_address_address();
- OutputRawData(target_start);
- Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
- serializer_->SerializeObject(target, kFromCode, kFirstInstruction);
- bytes_processed_so_far_ += rinfo->target_address_size();
-}
-
-
-void Serializer::ObjectSerializer::VisitCodeEntry(Address entry_address) {
- Code* target = Code::cast(Code::GetObjectFromEntryAddress(entry_address));
- OutputRawData(entry_address);
- serializer_->SerializeObject(target, kPlain, kFirstInstruction);
- bytes_processed_so_far_ += kPointerSize;
-}
-
-
-void Serializer::ObjectSerializer::VisitGlobalPropertyCell(RelocInfo* rinfo) {
- // We shouldn't have any global property cell references in code
- // objects in the snapshot.
- UNREACHABLE();
-}
-
-
-void Serializer::ObjectSerializer::VisitExternalAsciiString(
- v8::String::ExternalAsciiStringResource** resource_pointer) {
- Address references_start = reinterpret_cast<Address>(resource_pointer);
- OutputRawData(references_start);
- for (int i = 0; i < Natives::GetBuiltinsCount(); i++) {
- Object* source = HEAP->natives_source_cache()->get(i);
- if (!source->IsUndefined()) {
- ExternalAsciiString* string = ExternalAsciiString::cast(source);
- typedef v8::String::ExternalAsciiStringResource Resource;
- Resource* resource = string->resource();
- if (resource == *resource_pointer) {
- sink_->Put(kNativesStringResource, "NativesStringResource");
- sink_->PutSection(i, "NativesStringResourceEnd");
- bytes_processed_so_far_ += sizeof(resource);
- return;
- }
- }
- }
- // One of the strings in the natives cache should match the resource. We
- // can't serialize any other kinds of external strings.
- UNREACHABLE();
-}
-
-
-void Serializer::ObjectSerializer::OutputRawData(Address up_to) {
- Address object_start = object_->address();
- int up_to_offset = static_cast<int>(up_to - object_start);
- int skipped = up_to_offset - bytes_processed_so_far_;
- // This assert will fail if the reloc info gives us the target_address_address
- // locations in a non-ascending order. Luckily that doesn't happen.
- ASSERT(skipped >= 0);
- if (skipped != 0) {
- Address base = object_start + bytes_processed_so_far_;
-#define RAW_CASE(index, length) \
- if (skipped == length) { \
- sink_->PutSection(kRawData + index, "RawDataFixed"); \
- } else /* NOLINT */
- COMMON_RAW_LENGTHS(RAW_CASE)
-#undef RAW_CASE
- { /* NOLINT */
- sink_->Put(kRawData, "RawData");
- sink_->PutInt(skipped, "length");
- }
- for (int i = 0; i < skipped; i++) {
- unsigned int data = base[i];
- sink_->PutSection(data, "Byte");
- }
- bytes_processed_so_far_ += skipped;
- }
-}
-
-
-int Serializer::SpaceOfObject(HeapObject* object) {
- for (int i = FIRST_SPACE; i <= LAST_SPACE; i++) {
- AllocationSpace s = static_cast<AllocationSpace>(i);
- if (HEAP->InSpace(object, s)) {
- if (i == LO_SPACE) {
- if (object->IsCode()) {
- return kLargeCode;
- } else if (object->IsFixedArray()) {
- return kLargeFixedArray;
- } else {
- return kLargeData;
- }
- }
- return i;
- }
- }
- UNREACHABLE();
- return 0;
-}
-
-
-int Serializer::SpaceOfAlreadySerializedObject(HeapObject* object) {
- for (int i = FIRST_SPACE; i <= LAST_SPACE; i++) {
- AllocationSpace s = static_cast<AllocationSpace>(i);
- if (HEAP->InSpace(object, s)) {
- return i;
- }
- }
- UNREACHABLE();
- return 0;
-}
-
-
-int Serializer::Allocate(int space, int size, bool* new_page) {
- CHECK(space >= 0 && space < kNumberOfSpaces);
- if (SpaceIsLarge(space)) {
- // In large object space we merely number the objects instead of trying to
- // determine some sort of address.
- *new_page = true;
- large_object_total_ += size;
- return fullness_[LO_SPACE]++;
- }
- *new_page = false;
- if (fullness_[space] == 0) {
- *new_page = true;
- }
- if (SpaceIsPaged(space)) {
- // Paged spaces are a little special. We encode their addresses as if the
- // pages were all contiguous and each page were filled up in the range
- // 0 - Page::kObjectAreaSize. In practice the pages may not be contiguous
- // and allocation does not start at offset 0 in the page, but this scheme
- // means the deserializer can get the page number quickly by shifting the
- // serialized address.
- CHECK(IsPowerOf2(Page::kPageSize));
- int used_in_this_page = (fullness_[space] & (Page::kPageSize - 1));
- CHECK(size <= Page::kObjectAreaSize);
- if (used_in_this_page + size > Page::kObjectAreaSize) {
- *new_page = true;
- fullness_[space] = RoundUp(fullness_[space], Page::kPageSize);
- }
- }
- int allocation_address = fullness_[space];
- fullness_[space] = allocation_address + size;
- return allocation_address;
-}
-
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/serialize.h b/src/3rdparty/v8/src/serialize.h
deleted file mode 100644
index 07c0a25..0000000
--- a/src/3rdparty/v8/src/serialize.h
+++ /dev/null
@@ -1,589 +0,0 @@
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_SERIALIZE_H_
-#define V8_SERIALIZE_H_
-
-#include "hashmap.h"
-
-namespace v8 {
-namespace internal {
-
-// A TypeCode is used to distinguish different kinds of external reference.
-// It is a single bit to make testing for types easy.
-enum TypeCode {
- UNCLASSIFIED, // One-of-a-kind references.
- BUILTIN,
- RUNTIME_FUNCTION,
- IC_UTILITY,
- DEBUG_ADDRESS,
- STATS_COUNTER,
- TOP_ADDRESS,
- C_BUILTIN,
- EXTENSION,
- ACCESSOR,
- RUNTIME_ENTRY,
- STUB_CACHE_TABLE
-};
-
-const int kTypeCodeCount = STUB_CACHE_TABLE + 1;
-const int kFirstTypeCode = UNCLASSIFIED;
-
-const int kReferenceIdBits = 16;
-const int kReferenceIdMask = (1 << kReferenceIdBits) - 1;
-const int kReferenceTypeShift = kReferenceIdBits;
-const int kDebugRegisterBits = 4;
-const int kDebugIdShift = kDebugRegisterBits;
-
-
-class ExternalReferenceEncoder {
- public:
- ExternalReferenceEncoder();
-
- uint32_t Encode(Address key) const;
-
- const char* NameOfAddress(Address key) const;
-
- private:
- HashMap encodings_;
- static uint32_t Hash(Address key) {
- return static_cast<uint32_t>(reinterpret_cast<uintptr_t>(key) >> 2);
- }
-
- int IndexOf(Address key) const;
-
- static bool Match(void* key1, void* key2) { return key1 == key2; }
-
- void Put(Address key, int index);
-
- Isolate* isolate_;
-};
-
-
-class ExternalReferenceDecoder {
- public:
- ExternalReferenceDecoder();
- ~ExternalReferenceDecoder();
-
- Address Decode(uint32_t key) const {
- if (key == 0) return NULL;
- return *Lookup(key);
- }
-
- private:
- Address** encodings_;
-
- Address* Lookup(uint32_t key) const {
- int type = key >> kReferenceTypeShift;
- ASSERT(kFirstTypeCode <= type && type < kTypeCodeCount);
- int id = key & kReferenceIdMask;
- return &encodings_[type][id];
- }
-
- void Put(uint32_t key, Address value) {
- *Lookup(key) = value;
- }
-
- Isolate* isolate_;
-};
-
-
-class SnapshotByteSource {
- public:
- SnapshotByteSource(const byte* array, int length)
- : data_(array), length_(length), position_(0) { }
-
- bool HasMore() { return position_ < length_; }
-
- int Get() {
- ASSERT(position_ < length_);
- return data_[position_++];
- }
-
- inline void CopyRaw(byte* to, int number_of_bytes);
-
- inline int GetInt();
-
- bool AtEOF() {
- return position_ == length_;
- }
-
- int position() { return position_; }
-
- private:
- const byte* data_;
- int length_;
- int position_;
-};
-
-
-// It is very common to have a reference to objects at certain offsets in the
-// heap. These offsets have been determined experimentally. We code
-// references to such objects in a single byte that encodes the way the pointer
-// is written (only plain pointers allowed), the space number and the offset.
-// This only works for objects in the first page of a space. Don't use this for
-// things in newspace since it bypasses the write barrier.
-
-RLYSTC const int k64 = (sizeof(uintptr_t) - 4) / 4;
-
-#define COMMON_REFERENCE_PATTERNS(f) \
- f(kNumberOfSpaces, 2, (11 - k64)) \
- f((kNumberOfSpaces + 1), 2, 0) \
- f((kNumberOfSpaces + 2), 2, (142 - 16 * k64)) \
- f((kNumberOfSpaces + 3), 2, (74 - 15 * k64)) \
- f((kNumberOfSpaces + 4), 2, 5) \
- f((kNumberOfSpaces + 5), 1, 135) \
- f((kNumberOfSpaces + 6), 2, (228 - 39 * k64))
-
-#define COMMON_RAW_LENGTHS(f) \
- f(1, 1) \
- f(2, 2) \
- f(3, 3) \
- f(4, 4) \
- f(5, 5) \
- f(6, 6) \
- f(7, 7) \
- f(8, 8) \
- f(9, 12) \
- f(10, 16) \
- f(11, 20) \
- f(12, 24) \
- f(13, 28) \
- f(14, 32) \
- f(15, 36)
-
-// The Serializer/Deserializer class is a common superclass for Serializer and
-// Deserializer which is used to store common constants and methods used by
-// both.
-class SerializerDeserializer: public ObjectVisitor {
- public:
- RLYSTC void Iterate(ObjectVisitor* visitor);
- RLYSTC void SetSnapshotCacheSize(int size);
-
- protected:
- // Where the pointed-to object can be found:
- enum Where {
- kNewObject = 0, // Object is next in snapshot.
- // 1-8 One per space.
- kRootArray = 0x9, // Object is found in root array.
- kPartialSnapshotCache = 0xa, // Object is in the cache.
- kExternalReference = 0xb, // Pointer to an external reference.
- // 0xc-0xf Free.
- kBackref = 0x10, // Object is described relative to end.
- // 0x11-0x18 One per space.
- // 0x19-0x1f Common backref offsets.
- kFromStart = 0x20, // Object is described relative to start.
- // 0x21-0x28 One per space.
- // 0x29-0x2f Free.
- // 0x30-0x3f Used by misc tags below.
- kPointedToMask = 0x3f
- };
-
- // How to code the pointer to the object.
- enum HowToCode {
- kPlain = 0, // Straight pointer.
- // What this means depends on the architecture:
- kFromCode = 0x40, // A pointer inlined in code.
- kHowToCodeMask = 0x40
- };
-
- // Where to point within the object.
- enum WhereToPoint {
- kStartOfObject = 0,
- kFirstInstruction = 0x80,
- kWhereToPointMask = 0x80
- };
-
- // Misc.
- // Raw data to be copied from the snapshot.
- RLYSTC const int kRawData = 0x30;
- // Some common raw lengths: 0x31-0x3f
- // A tag emitted at strategic points in the snapshot to delineate sections.
- // If the deserializer does not find these at the expected moments then it
- // is an indication that the snapshot and the VM do not fit together.
- // Examine the build process for architecture, version or configuration
- // mismatches.
- RLYSTC const int kSynchronize = 0x70;
- // Used for the source code of the natives, which is in the executable, but
- // is referred to from external strings in the snapshot.
- RLYSTC const int kNativesStringResource = 0x71;
- RLYSTC const int kNewPage = 0x72;
- // 0x73-0x7f Free.
- // 0xb0-0xbf Free.
- // 0xf0-0xff Free.
-
-
- RLYSTC const int kLargeData = LAST_SPACE;
- RLYSTC const int kLargeCode = kLargeData + 1;
- RLYSTC const int kLargeFixedArray = kLargeCode + 1;
- RLYSTC const int kNumberOfSpaces = kLargeFixedArray + 1;
- RLYSTC const int kAnyOldSpace = -1;
-
- // A bitmask for getting the space out of an instruction.
- RLYSTC const int kSpaceMask = 15;
-
- RLYSTC inline bool SpaceIsLarge(int space) { return space >= kLargeData; }
- RLYSTC inline bool SpaceIsPaged(int space) {
- return space >= FIRST_PAGED_SPACE && space <= LAST_PAGED_SPACE;
- }
-};
-
-
-int SnapshotByteSource::GetInt() {
- // A little unwind to catch the really small ints.
- int snapshot_byte = Get();
- if ((snapshot_byte & 0x80) == 0) {
- return snapshot_byte;
- }
- int accumulator = (snapshot_byte & 0x7f) << 7;
- while (true) {
- snapshot_byte = Get();
- if ((snapshot_byte & 0x80) == 0) {
- return accumulator | snapshot_byte;
- }
- accumulator = (accumulator | (snapshot_byte & 0x7f)) << 7;
- }
- UNREACHABLE();
- return accumulator;
-}
-
-
-void SnapshotByteSource::CopyRaw(byte* to, int number_of_bytes) {
- memcpy(to, data_ + position_, number_of_bytes);
- position_ += number_of_bytes;
-}
-
-
-// A Deserializer reads a snapshot and reconstructs the Object graph it defines.
-class Deserializer: public SerializerDeserializer {
- public:
- // Create a deserializer from a snapshot byte source.
- explicit Deserializer(SnapshotByteSource* source);
-
- virtual ~Deserializer();
-
- // Deserialize the snapshot into an empty heap.
- void Deserialize();
-
- // Deserialize a single object and the objects reachable from it.
- void DeserializePartial(Object** root);
-
-#ifdef DEBUG
- virtual void Synchronize(const char* tag);
-#endif
-
- private:
- virtual void VisitPointers(Object** start, Object** end);
-
- virtual void VisitExternalReferences(Address* start, Address* end) {
- UNREACHABLE();
- }
-
- virtual void VisitRuntimeEntry(RelocInfo* rinfo) {
- UNREACHABLE();
- }
-
- void ReadChunk(Object** start, Object** end, int space, Address address);
- HeapObject* GetAddressFromStart(int space);
- inline HeapObject* GetAddressFromEnd(int space);
- Address Allocate(int space_number, Space* space, int size);
- void ReadObject(int space_number, Space* space, Object** write_back);
-
- // Cached current isolate.
- Isolate* isolate_;
-
- // Keep track of the pages in the paged spaces.
- // (In large object space we are keeping track of individual objects
- // rather than pages.) In new space we just need the address of the
- // first object and the others will flow from that.
- List<Address> pages_[SerializerDeserializer::kNumberOfSpaces];
-
- SnapshotByteSource* source_;
- // This is the address of the next object that will be allocated in each
- // space. It is used to calculate the addresses of back-references.
- Address high_water_[LAST_SPACE + 1];
- // This is the address of the most recent object that was allocated. It
- // is used to set the location of the new page when we encounter a
- // START_NEW_PAGE_SERIALIZATION tag.
- Address last_object_address_;
-
- ExternalReferenceDecoder* external_reference_decoder_;
-
- DISALLOW_COPY_AND_ASSIGN(Deserializer);
-};
-
-
-class SnapshotByteSink {
- public:
- virtual ~SnapshotByteSink() { }
- virtual void Put(int byte, const char* description) = 0;
- virtual void PutSection(int byte, const char* description) {
- Put(byte, description);
- }
- void PutInt(uintptr_t integer, const char* description);
- virtual int Position() = 0;
-};
-
-
-// Mapping objects to their location after deserialization.
-// This is used during building, but not at runtime by V8.
-class SerializationAddressMapper {
- public:
- SerializationAddressMapper()
- : serialization_map_(new HashMap(&SerializationMatchFun)),
- no_allocation_(new AssertNoAllocation()) { }
-
- ~SerializationAddressMapper() {
- delete serialization_map_;
- delete no_allocation_;
- }
-
- bool IsMapped(HeapObject* obj) {
- return serialization_map_->Lookup(Key(obj), Hash(obj), false) != NULL;
- }
-
- int MappedTo(HeapObject* obj) {
- ASSERT(IsMapped(obj));
- return static_cast<int>(reinterpret_cast<intptr_t>(
- serialization_map_->Lookup(Key(obj), Hash(obj), false)->value));
- }
-
- void AddMapping(HeapObject* obj, int to) {
- ASSERT(!IsMapped(obj));
- HashMap::Entry* entry =
- serialization_map_->Lookup(Key(obj), Hash(obj), true);
- entry->value = Value(to);
- }
-
- private:
- RLYSTC bool SerializationMatchFun(void* key1, void* key2) {
- return key1 == key2;
- }
-
- RLYSTC uint32_t Hash(HeapObject* obj) {
- return static_cast<int32_t>(reinterpret_cast<intptr_t>(obj->address()));
- }
-
- RLYSTC void* Key(HeapObject* obj) {
- return reinterpret_cast<void*>(obj->address());
- }
-
- RLYSTC void* Value(int v) {
- return reinterpret_cast<void*>(v);
- }
-
- HashMap* serialization_map_;
- AssertNoAllocation* no_allocation_;
- DISALLOW_COPY_AND_ASSIGN(SerializationAddressMapper);
-};
-
-
-// There can be only one serializer per V8 process.
-STATIC_CLASS Serializer : public SerializerDeserializer {
- public:
- explicit Serializer(SnapshotByteSink* sink);
- ~Serializer();
- void VisitPointers(Object** start, Object** end);
- // You can call this after serialization to find out how much space was used
- // in each space.
- int CurrentAllocationAddress(int space) {
- if (SpaceIsLarge(space)) return large_object_total_;
- return fullness_[space];
- }
-
- RLYSTC void Enable() {
- if (!serialization_enabled_) {
- ASSERT(!too_late_to_enable_now_);
- }
- serialization_enabled_ = true;
- }
-
- RLYSTC void Disable() { serialization_enabled_ = false; }
- // Call this when you have made use of the fact that there is no serialization
- // going on.
- RLYSTC void TooLateToEnableNow() { too_late_to_enable_now_ = true; }
- RLYSTC bool enabled() { return serialization_enabled_; }
- SerializationAddressMapper* address_mapper() { return &address_mapper_; }
-#ifdef DEBUG
- virtual void Synchronize(const char* tag);
-#endif
-
- protected:
- RLYSTC const int kInvalidRootIndex = -1;
- virtual int RootIndex(HeapObject* heap_object) = 0;
- virtual bool ShouldBeInThePartialSnapshotCache(HeapObject* o) = 0;
-
- class ObjectSerializer : public ObjectVisitor {
- public:
- ObjectSerializer(Serializer* serializer,
- Object* o,
- SnapshotByteSink* sink,
- HowToCode how_to_code,
- WhereToPoint where_to_point)
- : serializer_(serializer),
- object_(HeapObject::cast(o)),
- sink_(sink),
- reference_representation_(how_to_code + where_to_point),
- bytes_processed_so_far_(0) { }
- void Serialize();
- void VisitPointers(Object** start, Object** end);
- void VisitExternalReferences(Address* start, Address* end);
- void VisitCodeTarget(RelocInfo* target);
- void VisitCodeEntry(Address entry_address);
- void VisitGlobalPropertyCell(RelocInfo* rinfo);
- void VisitRuntimeEntry(RelocInfo* reloc);
- // Used for seralizing the external strings that hold the natives source.
- void VisitExternalAsciiString(
- v8::String::ExternalAsciiStringResource** resource);
- // We can't serialize a heap with external two byte strings.
- void VisitExternalTwoByteString(
- v8::String::ExternalStringResource** resource) {
- UNREACHABLE();
- }
-
- private:
- void OutputRawData(Address up_to);
-
- Serializer* serializer_;
- HeapObject* object_;
- SnapshotByteSink* sink_;
- int reference_representation_;
- int bytes_processed_so_far_;
- };
-
- virtual void SerializeObject(Object* o,
- HowToCode how_to_code,
- WhereToPoint where_to_point) = 0;
- void SerializeReferenceToPreviousObject(
- int space,
- int address,
- HowToCode how_to_code,
- WhereToPoint where_to_point);
- void InitializeAllocators();
- // This will return the space for an object. If the object is in large
- // object space it may return kLargeCode or kLargeFixedArray in order
- // to indicate to the deserializer what kind of large object allocation
- // to make.
- RLYSTC int SpaceOfObject(HeapObject* object);
- // This just returns the space of the object. It will return LO_SPACE
- // for all large objects since you can't check the type of the object
- // once the map has been used for the serialization address.
- RLYSTC int SpaceOfAlreadySerializedObject(HeapObject* object);
- int Allocate(int space, int size, bool* new_page_started);
- int EncodeExternalReference(Address addr) {
- return external_reference_encoder_->Encode(addr);
- }
-
- // Keep track of the fullness of each space in order to generate
- // relative addresses for back references. Large objects are
- // just numbered sequentially since relative addresses make no
- // sense in large object space.
- int fullness_[LAST_SPACE + 1];
- SnapshotByteSink* sink_;
- int current_root_index_;
- ExternalReferenceEncoder* external_reference_encoder_;
- RLYSTC bool serialization_enabled_;
- // Did we already make use of the fact that serialization was not enabled?
- RLYSTC bool too_late_to_enable_now_;
- int large_object_total_;
- SerializationAddressMapper address_mapper_;
-
- friend class ObjectSerializer;
- friend class Deserializer;
-
- DISALLOW_COPY_AND_ASSIGN(Serializer);
-};
-
-
-class PartialSerializer : public Serializer {
- public:
- PartialSerializer(Serializer* startup_snapshot_serializer,
- SnapshotByteSink* sink)
- : Serializer(sink),
- startup_serializer_(startup_snapshot_serializer) {
- }
-
- // Serialize the objects reachable from a single object pointer.
- virtual void Serialize(Object** o);
- virtual void SerializeObject(Object* o,
- HowToCode how_to_code,
- WhereToPoint where_to_point);
-
- protected:
- virtual int RootIndex(HeapObject* o);
- virtual int PartialSnapshotCacheIndex(HeapObject* o);
- virtual bool ShouldBeInThePartialSnapshotCache(HeapObject* o) {
- // Scripts should be referred only through shared function infos. We can't
- // allow them to be part of the partial snapshot because they contain a
- // unique ID, and deserializing several partial snapshots containing script
- // would cause dupes.
- ASSERT(!o->IsScript());
- return o->IsString() || o->IsSharedFunctionInfo() ||
- o->IsHeapNumber() || o->IsCode() ||
- o->map() == HEAP->fixed_cow_array_map();
- }
-
- private:
- Serializer* startup_serializer_;
- DISALLOW_COPY_AND_ASSIGN(PartialSerializer);
-};
-
-
-class StartupSerializer : public Serializer {
- public:
- explicit StartupSerializer(SnapshotByteSink* sink) : Serializer(sink) {
- // Clear the cache of objects used by the partial snapshot. After the
- // strong roots have been serialized we can create a partial snapshot
- // which will repopulate the cache with objects neede by that partial
- // snapshot.
- Isolate::Current()->set_serialize_partial_snapshot_cache_length(0);
- }
- // Serialize the current state of the heap. The order is:
- // 1) Strong references.
- // 2) Partial snapshot cache.
- // 3) Weak references (eg the symbol table).
- virtual void SerializeStrongReferences();
- virtual void SerializeObject(Object* o,
- HowToCode how_to_code,
- WhereToPoint where_to_point);
- void SerializeWeakReferences();
- void Serialize() {
- SerializeStrongReferences();
- SerializeWeakReferences();
- }
-
- private:
- virtual int RootIndex(HeapObject* o) { return kInvalidRootIndex; }
- virtual bool ShouldBeInThePartialSnapshotCache(HeapObject* o) {
- return false;
- }
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_SERIALIZE_H_
diff --git a/src/3rdparty/v8/src/shell.h b/src/3rdparty/v8/src/shell.h
deleted file mode 100644
index ca51040..0000000
--- a/src/3rdparty/v8/src/shell.h
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// A simple interactive shell. Enable with --shell.
-
-#ifndef V8_SHELL_H_
-#define V8_SHELL_H_
-
-#include "../public/debug.h"
-
-namespace v8 {
-namespace internal {
-
-// Debug event handler for interactive debugging.
-void handle_debug_event(v8::DebugEvent event,
- v8::Handle<v8::Object> exec_state,
- v8::Handle<v8::Object> event_data,
- v8::Handle<Value> data);
-
-
-class Shell {
- public:
- static void PrintObject(v8::Handle<v8::Value> obj);
- // Run the read-eval loop, executing code in the specified
- // environment.
- static void Run(v8::Handle<v8::Context> context);
-};
-
-} } // namespace v8::internal
-
-#endif // V8_SHELL_H_
diff --git a/src/3rdparty/v8/src/simulator.h b/src/3rdparty/v8/src/simulator.h
deleted file mode 100644
index 485e930..0000000
--- a/src/3rdparty/v8/src/simulator.h
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_SIMULATOR_H_
-#define V8_SIMULATOR_H_
-
-#if V8_TARGET_ARCH_IA32
-#include "ia32/simulator-ia32.h"
-#elif V8_TARGET_ARCH_X64
-#include "x64/simulator-x64.h"
-#elif V8_TARGET_ARCH_ARM
-#include "arm/simulator-arm.h"
-#elif V8_TARGET_ARCH_MIPS
-#include "mips/simulator-mips.h"
-#else
-#error Unsupported target architecture.
-#endif
-
-#endif // V8_SIMULATOR_H_
diff --git a/src/3rdparty/v8/src/small-pointer-list.h b/src/3rdparty/v8/src/small-pointer-list.h
deleted file mode 100644
index 6291d9e..0000000
--- a/src/3rdparty/v8/src/small-pointer-list.h
+++ /dev/null
@@ -1,163 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_SMALL_POINTER_LIST_H_
-#define V8_SMALL_POINTER_LIST_H_
-
-#include "checks.h"
-#include "v8globals.h"
-#include "zone.h"
-
-namespace v8 {
-namespace internal {
-
-// SmallPointerList is a list optimized for storing no or just a
-// single value. When more values are given it falls back to ZoneList.
-//
-// The interface tries to be as close to List from list.h as possible.
-template <typename T>
-class SmallPointerList {
- public:
- SmallPointerList() : data_(kEmptyTag) {}
-
- bool is_empty() const { return length() == 0; }
-
- int length() const {
- if ((data_ & kTagMask) == kEmptyTag) return 0;
- if ((data_ & kTagMask) == kSingletonTag) return 1;
- return list()->length();
- }
-
- void Add(T* pointer) {
- ASSERT(IsAligned(reinterpret_cast<intptr_t>(pointer), kPointerAlignment));
- if ((data_ & kTagMask) == kEmptyTag) {
- data_ = reinterpret_cast<intptr_t>(pointer) | kSingletonTag;
- return;
- }
- if ((data_ & kTagMask) == kSingletonTag) {
- PointerList* list = new PointerList(2);
- list->Add(single_value());
- list->Add(pointer);
- ASSERT(IsAligned(reinterpret_cast<intptr_t>(list), kPointerAlignment));
- data_ = reinterpret_cast<intptr_t>(list) | kListTag;
- return;
- }
- list()->Add(pointer);
- }
-
- // Note: returns T* and not T*& (unlike List from list.h).
- // This makes the implementation simpler and more const correct.
- T* at(int i) const {
- ASSERT((data_ & kTagMask) != kEmptyTag);
- if ((data_ & kTagMask) == kSingletonTag) {
- ASSERT(i == 0);
- return single_value();
- }
- return list()->at(i);
- }
-
- // See the note above.
- T* operator[](int i) const { return at(i); }
-
- // Remove the given element from the list (if present).
- void RemoveElement(T* pointer) {
- if ((data_ & kTagMask) == kEmptyTag) return;
- if ((data_ & kTagMask) == kSingletonTag) {
- if (pointer == single_value()) {
- data_ = kEmptyTag;
- }
- return;
- }
- list()->RemoveElement(pointer);
- }
-
- T* RemoveLast() {
- ASSERT((data_ & kTagMask) != kEmptyTag);
- if ((data_ & kTagMask) == kSingletonTag) {
- T* result = single_value();
- data_ = kEmptyTag;
- return result;
- }
- return list()->RemoveLast();
- }
-
- void Rewind(int pos) {
- if ((data_ & kTagMask) == kEmptyTag) {
- ASSERT(pos == 0);
- return;
- }
- if ((data_ & kTagMask) == kSingletonTag) {
- ASSERT(pos == 0 || pos == 1);
- if (pos == 0) {
- data_ = kEmptyTag;
- }
- return;
- }
- list()->Rewind(pos);
- }
-
- int CountOccurrences(T* pointer, int start, int end) const {
- if ((data_ & kTagMask) == kEmptyTag) return 0;
- if ((data_ & kTagMask) == kSingletonTag) {
- if (start == 0 && end >= 0) {
- return (single_value() == pointer) ? 1 : 0;
- }
- return 0;
- }
- return list()->CountOccurrences(pointer, start, end);
- }
-
- private:
- typedef ZoneList<T*> PointerList;
-
- static const intptr_t kEmptyTag = 1;
- static const intptr_t kSingletonTag = 0;
- static const intptr_t kListTag = 2;
- static const intptr_t kTagMask = 3;
- static const intptr_t kValueMask = ~kTagMask;
-
- STATIC_ASSERT(kTagMask + 1 <= kPointerAlignment);
-
- T* single_value() const {
- ASSERT((data_ & kTagMask) == kSingletonTag);
- STATIC_ASSERT(kSingletonTag == 0);
- return reinterpret_cast<T*>(data_);
- }
-
- PointerList* list() const {
- ASSERT((data_ & kTagMask) == kListTag);
- return reinterpret_cast<PointerList*>(data_ & kValueMask);
- }
-
- intptr_t data_;
-
- DISALLOW_COPY_AND_ASSIGN(SmallPointerList);
-};
-
-} } // namespace v8::internal
-
-#endif // V8_SMALL_POINTER_LIST_H_
diff --git a/src/3rdparty/v8/src/smart-pointer.h b/src/3rdparty/v8/src/smart-pointer.h
deleted file mode 100644
index 0fa8224..0000000
--- a/src/3rdparty/v8/src/smart-pointer.h
+++ /dev/null
@@ -1,109 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_SMART_POINTER_H_
-#define V8_SMART_POINTER_H_
-
-namespace v8 {
-namespace internal {
-
-
-// A 'scoped array pointer' that calls DeleteArray on its pointer when the
-// destructor is called.
-template<typename T>
-class SmartPointer {
- public:
-
- // Default constructor. Construct an empty scoped pointer.
- inline SmartPointer() : p(NULL) {}
-
-
- // Construct a scoped pointer from a plain one.
- explicit inline SmartPointer(T* pointer) : p(pointer) {}
-
-
- // Copy constructor removes the pointer from the original to avoid double
- // freeing.
- inline SmartPointer(const SmartPointer<T>& rhs) : p(rhs.p) {
- const_cast<SmartPointer<T>&>(rhs).p = NULL;
- }
-
-
- // When the destructor of the scoped pointer is executed the plain pointer
- // is deleted using DeleteArray. This implies that you must allocate with
- // NewArray.
- inline ~SmartPointer() { if (p) DeleteArray(p); }
-
-
- // You can get the underlying pointer out with the * operator.
- inline T* operator*() { return p; }
-
-
- // You can use [n] to index as if it was a plain pointer
- inline T& operator[](size_t i) {
- return p[i];
- }
-
- // We don't have implicit conversion to a T* since that hinders migration:
- // You would not be able to change a method from returning a T* to
- // returning an SmartPointer<T> and then get errors wherever it is used.
-
-
- // If you want to take out the plain pointer and don't want it automatically
- // deleted then call Detach(). Afterwards, the smart pointer is empty
- // (NULL).
- inline T* Detach() {
- T* temp = p;
- p = NULL;
- return temp;
- }
-
-
- // Assignment requires an empty (NULL) SmartPointer as the receiver. Like
- // the copy constructor it removes the pointer in the original to avoid
- // double freeing.
- inline SmartPointer& operator=(const SmartPointer<T>& rhs) {
- ASSERT(is_empty());
- T* tmp = rhs.p; // swap to handle self-assignment
- const_cast<SmartPointer<T>&>(rhs).p = NULL;
- p = tmp;
- return *this;
- }
-
-
- inline bool is_empty() {
- return p == NULL;
- }
-
-
- private:
- T* p;
-};
-
-} } // namespace v8::internal
-
-#endif // V8_SMART_POINTER_H_
diff --git a/src/3rdparty/v8/src/snapshot-common.cc b/src/3rdparty/v8/src/snapshot-common.cc
deleted file mode 100644
index 7f82895..0000000
--- a/src/3rdparty/v8/src/snapshot-common.cc
+++ /dev/null
@@ -1,82 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// The common functionality when building with or without snapshots.
-
-#include "v8.h"
-
-#include "api.h"
-#include "serialize.h"
-#include "snapshot.h"
-#include "platform.h"
-
-namespace v8 {
-namespace internal {
-
-bool Snapshot::Deserialize(const byte* content, int len) {
- SnapshotByteSource source(content, len);
- Deserializer deserializer(&source);
- return V8::Initialize(&deserializer);
-}
-
-
-bool Snapshot::Initialize(const char* snapshot_file) {
- if (snapshot_file) {
- int len;
- byte* str = ReadBytes(snapshot_file, &len);
- if (!str) return false;
- Deserialize(str, len);
- DeleteArray(str);
- return true;
- } else if (size_ > 0) {
- Deserialize(data_, size_);
- return true;
- }
- return false;
-}
-
-
-Handle<Context> Snapshot::NewContextFromSnapshot() {
- if (context_size_ == 0) {
- return Handle<Context>();
- }
- HEAP->ReserveSpace(new_space_used_,
- pointer_space_used_,
- data_space_used_,
- code_space_used_,
- map_space_used_,
- cell_space_used_,
- large_space_used_);
- SnapshotByteSource source(context_data_, context_size_);
- Deserializer deserializer(&source);
- Object* root;
- deserializer.DeserializePartial(&root);
- CHECK(root->IsContext());
- return Handle<Context>(Context::cast(root));
-}
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/snapshot-empty.cc b/src/3rdparty/v8/src/snapshot-empty.cc
deleted file mode 100644
index cb26eb8..0000000
--- a/src/3rdparty/v8/src/snapshot-empty.cc
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Used for building without snapshots.
-
-#include "v8.h"
-
-#include "snapshot.h"
-
-namespace v8 {
-namespace internal {
-
-const byte Snapshot::data_[] = { 0 };
-const int Snapshot::size_ = 0;
-const byte Snapshot::context_data_[] = { 0 };
-const int Snapshot::context_size_ = 0;
-
-const int Snapshot::new_space_used_ = 0;
-const int Snapshot::pointer_space_used_ = 0;
-const int Snapshot::data_space_used_ = 0;
-const int Snapshot::code_space_used_ = 0;
-const int Snapshot::map_space_used_ = 0;
-const int Snapshot::cell_space_used_ = 0;
-const int Snapshot::large_space_used_ = 0;
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/snapshot.h b/src/3rdparty/v8/src/snapshot.h
deleted file mode 100644
index bedd186..0000000
--- a/src/3rdparty/v8/src/snapshot.h
+++ /dev/null
@@ -1,73 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "isolate.h"
-
-#ifndef V8_SNAPSHOT_H_
-#define V8_SNAPSHOT_H_
-
-namespace v8 {
-namespace internal {
-
-STATIC_CLASS Snapshot {
- public:
- // Initialize the VM from the given snapshot file. If snapshot_file is
- // NULL, use the internal snapshot instead. Returns false if no snapshot
- // could be found.
- static bool Initialize(const char* snapshot_file = NULL);
-
- // Create a new context using the internal partial snapshot.
- static Handle<Context> NewContextFromSnapshot();
-
- // Returns whether or not the snapshot is enabled.
- static bool IsEnabled() { return size_ != 0; }
-
- // Write snapshot to the given file. Returns true if snapshot was written
- // successfully.
- static bool WriteToFile(const char* snapshot_file);
-
- private:
- static const byte data_[];
- static const byte context_data_[];
- static const int new_space_used_;
- static const int pointer_space_used_;
- static const int data_space_used_;
- static const int code_space_used_;
- static const int map_space_used_;
- static const int cell_space_used_;
- static const int large_space_used_;
- static const int size_;
- static const int context_size_;
-
- static bool Deserialize(const byte* content, int len);
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(Snapshot);
-};
-
-} } // namespace v8::internal
-
-#endif // V8_SNAPSHOT_H_
diff --git a/src/3rdparty/v8/src/spaces-inl.h b/src/3rdparty/v8/src/spaces-inl.h
deleted file mode 100644
index 070f970..0000000
--- a/src/3rdparty/v8/src/spaces-inl.h
+++ /dev/null
@@ -1,529 +0,0 @@
-// Copyright 2006-2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_SPACES_INL_H_
-#define V8_SPACES_INL_H_
-
-#include "isolate.h"
-#include "spaces.h"
-#include "v8memory.h"
-
-namespace v8 {
-namespace internal {
-
-
-// -----------------------------------------------------------------------------
-// PageIterator
-
-bool PageIterator::has_next() {
- return prev_page_ != stop_page_;
-}
-
-
-Page* PageIterator::next() {
- ASSERT(has_next());
- prev_page_ = (prev_page_ == NULL)
- ? space_->first_page_
- : prev_page_->next_page();
- return prev_page_;
-}
-
-
-// -----------------------------------------------------------------------------
-// Page
-
-Page* Page::next_page() {
- return heap_->isolate()->memory_allocator()->GetNextPage(this);
-}
-
-
-Address Page::AllocationTop() {
- PagedSpace* owner = heap_->isolate()->memory_allocator()->PageOwner(this);
- return owner->PageAllocationTop(this);
-}
-
-
-Address Page::AllocationWatermark() {
- PagedSpace* owner = heap_->isolate()->memory_allocator()->PageOwner(this);
- if (this == owner->AllocationTopPage()) {
- return owner->top();
- }
- return address() + AllocationWatermarkOffset();
-}
-
-
-uint32_t Page::AllocationWatermarkOffset() {
- return static_cast<uint32_t>((flags_ & kAllocationWatermarkOffsetMask) >>
- kAllocationWatermarkOffsetShift);
-}
-
-
-void Page::SetAllocationWatermark(Address allocation_watermark) {
- if ((heap_->gc_state() == Heap::SCAVENGE) && IsWatermarkValid()) {
- // When iterating intergenerational references during scavenge
- // we might decide to promote an encountered young object.
- // We will allocate a space for such an object and put it
- // into the promotion queue to process it later.
- // If space for object was allocated somewhere beyond allocation
- // watermark this might cause garbage pointers to appear under allocation
- // watermark. To avoid visiting them during dirty regions iteration
- // which might be still in progress we store a valid allocation watermark
- // value and mark this page as having an invalid watermark.
- SetCachedAllocationWatermark(AllocationWatermark());
- InvalidateWatermark(true);
- }
-
- flags_ = (flags_ & kFlagsMask) |
- Offset(allocation_watermark) << kAllocationWatermarkOffsetShift;
- ASSERT(AllocationWatermarkOffset()
- == static_cast<uint32_t>(Offset(allocation_watermark)));
-}
-
-
-void Page::SetCachedAllocationWatermark(Address allocation_watermark) {
- mc_first_forwarded = allocation_watermark;
-}
-
-
-Address Page::CachedAllocationWatermark() {
- return mc_first_forwarded;
-}
-
-
-uint32_t Page::GetRegionMarks() {
- return dirty_regions_;
-}
-
-
-void Page::SetRegionMarks(uint32_t marks) {
- dirty_regions_ = marks;
-}
-
-
-int Page::GetRegionNumberForAddress(Address addr) {
- // Each page is divided into 256 byte regions. Each region has a corresponding
- // dirty mark bit in the page header. Region can contain intergenerational
- // references iff its dirty mark is set.
- // A normal 8K page contains exactly 32 regions so all region marks fit
- // into 32-bit integer field. To calculate a region number we just divide
- // offset inside page by region size.
- // A large page can contain more then 32 regions. But we want to avoid
- // additional write barrier code for distinguishing between large and normal
- // pages so we just ignore the fact that addr points into a large page and
- // calculate region number as if addr pointed into a normal 8K page. This way
- // we get a region number modulo 32 so for large pages several regions might
- // be mapped to a single dirty mark.
- ASSERT_PAGE_ALIGNED(this->address());
- STATIC_ASSERT((kPageAlignmentMask >> kRegionSizeLog2) < kBitsPerInt);
-
- // We are using masking with kPageAlignmentMask instead of Page::Offset()
- // to get an offset to the beginning of 8K page containing addr not to the
- // beginning of actual page which can be bigger then 8K.
- intptr_t offset_inside_normal_page = OffsetFrom(addr) & kPageAlignmentMask;
- return static_cast<int>(offset_inside_normal_page >> kRegionSizeLog2);
-}
-
-
-uint32_t Page::GetRegionMaskForAddress(Address addr) {
- return 1 << GetRegionNumberForAddress(addr);
-}
-
-
-uint32_t Page::GetRegionMaskForSpan(Address start, int length_in_bytes) {
- uint32_t result = 0;
- if (length_in_bytes >= kPageSize) {
- result = kAllRegionsDirtyMarks;
- } else if (length_in_bytes > 0) {
- int start_region = GetRegionNumberForAddress(start);
- int end_region =
- GetRegionNumberForAddress(start + length_in_bytes - kPointerSize);
- uint32_t start_mask = (~0) << start_region;
- uint32_t end_mask = ~((~1) << end_region);
- result = start_mask & end_mask;
- // if end_region < start_region, the mask is ored.
- if (result == 0) result = start_mask | end_mask;
- }
-#ifdef DEBUG
- if (FLAG_enable_slow_asserts) {
- uint32_t expected = 0;
- for (Address a = start; a < start + length_in_bytes; a += kPointerSize) {
- expected |= GetRegionMaskForAddress(a);
- }
- ASSERT(expected == result);
- }
-#endif
- return result;
-}
-
-
-void Page::MarkRegionDirty(Address address) {
- SetRegionMarks(GetRegionMarks() | GetRegionMaskForAddress(address));
-}
-
-
-bool Page::IsRegionDirty(Address address) {
- return GetRegionMarks() & GetRegionMaskForAddress(address);
-}
-
-
-void Page::ClearRegionMarks(Address start, Address end, bool reaches_limit) {
- int rstart = GetRegionNumberForAddress(start);
- int rend = GetRegionNumberForAddress(end);
-
- if (reaches_limit) {
- end += 1;
- }
-
- if ((rend - rstart) == 0) {
- return;
- }
-
- uint32_t bitmask = 0;
-
- if ((OffsetFrom(start) & kRegionAlignmentMask) == 0
- || (start == ObjectAreaStart())) {
- // First region is fully covered
- bitmask = 1 << rstart;
- }
-
- while (++rstart < rend) {
- bitmask |= 1 << rstart;
- }
-
- if (bitmask) {
- SetRegionMarks(GetRegionMarks() & ~bitmask);
- }
-}
-
-
-void Page::FlipMeaningOfInvalidatedWatermarkFlag(Heap* heap) {
- heap->page_watermark_invalidated_mark_ ^= 1 << WATERMARK_INVALIDATED;
-}
-
-
-bool Page::IsWatermarkValid() {
- return (flags_ & (1 << WATERMARK_INVALIDATED)) !=
- heap_->page_watermark_invalidated_mark_;
-}
-
-
-void Page::InvalidateWatermark(bool value) {
- if (value) {
- flags_ = (flags_ & ~(1 << WATERMARK_INVALIDATED)) |
- heap_->page_watermark_invalidated_mark_;
- } else {
- flags_ =
- (flags_ & ~(1 << WATERMARK_INVALIDATED)) |
- (heap_->page_watermark_invalidated_mark_ ^
- (1 << WATERMARK_INVALIDATED));
- }
-
- ASSERT(IsWatermarkValid() == !value);
-}
-
-
-bool Page::GetPageFlag(PageFlag flag) {
- return (flags_ & static_cast<intptr_t>(1 << flag)) != 0;
-}
-
-
-void Page::SetPageFlag(PageFlag flag, bool value) {
- if (value) {
- flags_ |= static_cast<intptr_t>(1 << flag);
- } else {
- flags_ &= ~static_cast<intptr_t>(1 << flag);
- }
-}
-
-
-void Page::ClearPageFlags() {
- flags_ = 0;
-}
-
-
-void Page::ClearGCFields() {
- InvalidateWatermark(true);
- SetAllocationWatermark(ObjectAreaStart());
- if (heap_->gc_state() == Heap::SCAVENGE) {
- SetCachedAllocationWatermark(ObjectAreaStart());
- }
- SetRegionMarks(kAllRegionsCleanMarks);
-}
-
-
-bool Page::WasInUseBeforeMC() {
- return GetPageFlag(WAS_IN_USE_BEFORE_MC);
-}
-
-
-void Page::SetWasInUseBeforeMC(bool was_in_use) {
- SetPageFlag(WAS_IN_USE_BEFORE_MC, was_in_use);
-}
-
-
-bool Page::IsLargeObjectPage() {
- return !GetPageFlag(IS_NORMAL_PAGE);
-}
-
-
-void Page::SetIsLargeObjectPage(bool is_large_object_page) {
- SetPageFlag(IS_NORMAL_PAGE, !is_large_object_page);
-}
-
-bool Page::IsPageExecutable() {
- return GetPageFlag(IS_EXECUTABLE);
-}
-
-
-void Page::SetIsPageExecutable(bool is_page_executable) {
- SetPageFlag(IS_EXECUTABLE, is_page_executable);
-}
-
-
-// -----------------------------------------------------------------------------
-// MemoryAllocator
-
-void MemoryAllocator::ChunkInfo::init(Address a, size_t s, PagedSpace* o) {
- address_ = a;
- size_ = s;
- owner_ = o;
- executable_ = (o == NULL) ? NOT_EXECUTABLE : o->executable();
- owner_identity_ = (o == NULL) ? FIRST_SPACE : o->identity();
-}
-
-
-bool MemoryAllocator::IsValidChunk(int chunk_id) {
- if (!IsValidChunkId(chunk_id)) return false;
-
- ChunkInfo& c = chunks_[chunk_id];
- return (c.address() != NULL) && (c.size() != 0) && (c.owner() != NULL);
-}
-
-
-bool MemoryAllocator::IsValidChunkId(int chunk_id) {
- return (0 <= chunk_id) && (chunk_id < max_nof_chunks_);
-}
-
-
-bool MemoryAllocator::IsPageInSpace(Page* p, PagedSpace* space) {
- ASSERT(p->is_valid());
-
- int chunk_id = GetChunkId(p);
- if (!IsValidChunkId(chunk_id)) return false;
-
- ChunkInfo& c = chunks_[chunk_id];
- return (c.address() <= p->address()) &&
- (p->address() < c.address() + c.size()) &&
- (space == c.owner());
-}
-
-
-Page* MemoryAllocator::GetNextPage(Page* p) {
- ASSERT(p->is_valid());
- intptr_t raw_addr = p->opaque_header & ~Page::kPageAlignmentMask;
- return Page::FromAddress(AddressFrom<Address>(raw_addr));
-}
-
-
-int MemoryAllocator::GetChunkId(Page* p) {
- ASSERT(p->is_valid());
- return static_cast<int>(p->opaque_header & Page::kPageAlignmentMask);
-}
-
-
-void MemoryAllocator::SetNextPage(Page* prev, Page* next) {
- ASSERT(prev->is_valid());
- int chunk_id = GetChunkId(prev);
- ASSERT_PAGE_ALIGNED(next->address());
- prev->opaque_header = OffsetFrom(next->address()) | chunk_id;
-}
-
-
-PagedSpace* MemoryAllocator::PageOwner(Page* page) {
- int chunk_id = GetChunkId(page);
- ASSERT(IsValidChunk(chunk_id));
- return chunks_[chunk_id].owner();
-}
-
-
-bool MemoryAllocator::InInitialChunk(Address address) {
- if (initial_chunk_ == NULL) return false;
-
- Address start = static_cast<Address>(initial_chunk_->address());
- return (start <= address) && (address < start + initial_chunk_->size());
-}
-
-
-#ifdef ENABLE_HEAP_PROTECTION
-
-void MemoryAllocator::Protect(Address start, size_t size) {
- OS::Protect(start, size);
-}
-
-
-void MemoryAllocator::Unprotect(Address start,
- size_t size,
- Executability executable) {
- OS::Unprotect(start, size, executable);
-}
-
-
-void MemoryAllocator::ProtectChunkFromPage(Page* page) {
- int id = GetChunkId(page);
- OS::Protect(chunks_[id].address(), chunks_[id].size());
-}
-
-
-void MemoryAllocator::UnprotectChunkFromPage(Page* page) {
- int id = GetChunkId(page);
- OS::Unprotect(chunks_[id].address(), chunks_[id].size(),
- chunks_[id].owner()->executable() == EXECUTABLE);
-}
-
-#endif
-
-
-// --------------------------------------------------------------------------
-// PagedSpace
-
-bool PagedSpace::Contains(Address addr) {
- Page* p = Page::FromAddress(addr);
- if (!p->is_valid()) return false;
- return heap()->isolate()->memory_allocator()->IsPageInSpace(p, this);
-}
-
-
-// Try linear allocation in the page of alloc_info's allocation top. Does
-// not contain slow case logic (eg, move to the next page or try free list
-// allocation) so it can be used by all the allocation functions and for all
-// the paged spaces.
-HeapObject* PagedSpace::AllocateLinearly(AllocationInfo* alloc_info,
- int size_in_bytes) {
- Address current_top = alloc_info->top;
- Address new_top = current_top + size_in_bytes;
- if (new_top > alloc_info->limit) return NULL;
-
- alloc_info->top = new_top;
- ASSERT(alloc_info->VerifyPagedAllocation());
- accounting_stats_.AllocateBytes(size_in_bytes);
- return HeapObject::FromAddress(current_top);
-}
-
-
-// Raw allocation.
-MaybeObject* PagedSpace::AllocateRaw(int size_in_bytes) {
- ASSERT(HasBeenSetup());
- ASSERT_OBJECT_SIZE(size_in_bytes);
- HeapObject* object = AllocateLinearly(&allocation_info_, size_in_bytes);
- if (object != NULL) return object;
-
- object = SlowAllocateRaw(size_in_bytes);
- if (object != NULL) return object;
-
- return Failure::RetryAfterGC(identity());
-}
-
-
-// Reallocating (and promoting) objects during a compacting collection.
-MaybeObject* PagedSpace::MCAllocateRaw(int size_in_bytes) {
- ASSERT(HasBeenSetup());
- ASSERT_OBJECT_SIZE(size_in_bytes);
- HeapObject* object = AllocateLinearly(&mc_forwarding_info_, size_in_bytes);
- if (object != NULL) return object;
-
- object = SlowMCAllocateRaw(size_in_bytes);
- if (object != NULL) return object;
-
- return Failure::RetryAfterGC(identity());
-}
-
-
-// -----------------------------------------------------------------------------
-// LargeObjectChunk
-
-Address LargeObjectChunk::GetStartAddress() {
- // Round the chunk address up to the nearest page-aligned address
- // and return the heap object in that page.
- Page* page = Page::FromAddress(RoundUp(address(), Page::kPageSize));
- return page->ObjectAreaStart();
-}
-
-
-void LargeObjectChunk::Free(Executability executable) {
- Isolate* isolate =
- Page::FromAddress(RoundUp(address(), Page::kPageSize))->heap_->isolate();
- isolate->memory_allocator()->FreeRawMemory(address(), size(), executable);
-}
-
-// -----------------------------------------------------------------------------
-// NewSpace
-
-MaybeObject* NewSpace::AllocateRawInternal(int size_in_bytes,
- AllocationInfo* alloc_info) {
- Address new_top = alloc_info->top + size_in_bytes;
- if (new_top > alloc_info->limit) return Failure::RetryAfterGC();
-
- Object* obj = HeapObject::FromAddress(alloc_info->top);
- alloc_info->top = new_top;
-#ifdef DEBUG
- SemiSpace* space =
- (alloc_info == &allocation_info_) ? &to_space_ : &from_space_;
- ASSERT(space->low() <= alloc_info->top
- && alloc_info->top <= space->high()
- && alloc_info->limit == space->high());
-#endif
- return obj;
-}
-
-
-intptr_t LargeObjectSpace::Available() {
- return LargeObjectChunk::ObjectSizeFor(
- heap()->isolate()->memory_allocator()->Available());
-}
-
-
-template <typename StringType>
-void NewSpace::ShrinkStringAtAllocationBoundary(String* string, int length) {
- ASSERT(length <= string->length());
- ASSERT(string->IsSeqString());
- ASSERT(string->address() + StringType::SizeFor(string->length()) ==
- allocation_info_.top);
- allocation_info_.top =
- string->address() + StringType::SizeFor(length);
- string->set_length(length);
-}
-
-
-bool FreeListNode::IsFreeListNode(HeapObject* object) {
- return object->map() == HEAP->raw_unchecked_byte_array_map()
- || object->map() == HEAP->raw_unchecked_one_pointer_filler_map()
- || object->map() == HEAP->raw_unchecked_two_pointer_filler_map();
-}
-
-} } // namespace v8::internal
-
-#endif // V8_SPACES_INL_H_
diff --git a/src/3rdparty/v8/src/spaces.cc b/src/3rdparty/v8/src/spaces.cc
deleted file mode 100644
index eb4fa7d..0000000
--- a/src/3rdparty/v8/src/spaces.cc
+++ /dev/null
@@ -1,3147 +0,0 @@
-// Copyright 2006-2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "liveobjectlist-inl.h"
-#include "macro-assembler.h"
-#include "mark-compact.h"
-#include "platform.h"
-
-namespace v8 {
-namespace internal {
-
-// For contiguous spaces, top should be in the space (or at the end) and limit
-// should be the end of the space.
-#define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \
- ASSERT((space).low() <= (info).top \
- && (info).top <= (space).high() \
- && (info).limit == (space).high())
-
-// ----------------------------------------------------------------------------
-// HeapObjectIterator
-
-HeapObjectIterator::HeapObjectIterator(PagedSpace* space) {
- Initialize(space->bottom(), space->top(), NULL);
-}
-
-
-HeapObjectIterator::HeapObjectIterator(PagedSpace* space,
- HeapObjectCallback size_func) {
- Initialize(space->bottom(), space->top(), size_func);
-}
-
-
-HeapObjectIterator::HeapObjectIterator(PagedSpace* space, Address start) {
- Initialize(start, space->top(), NULL);
-}
-
-
-HeapObjectIterator::HeapObjectIterator(PagedSpace* space, Address start,
- HeapObjectCallback size_func) {
- Initialize(start, space->top(), size_func);
-}
-
-
-HeapObjectIterator::HeapObjectIterator(Page* page,
- HeapObjectCallback size_func) {
- Initialize(page->ObjectAreaStart(), page->AllocationTop(), size_func);
-}
-
-
-void HeapObjectIterator::Initialize(Address cur, Address end,
- HeapObjectCallback size_f) {
- cur_addr_ = cur;
- end_addr_ = end;
- end_page_ = Page::FromAllocationTop(end);
- size_func_ = size_f;
- Page* p = Page::FromAllocationTop(cur_addr_);
- cur_limit_ = (p == end_page_) ? end_addr_ : p->AllocationTop();
-
-#ifdef DEBUG
- Verify();
-#endif
-}
-
-
-HeapObject* HeapObjectIterator::FromNextPage() {
- if (cur_addr_ == end_addr_) return NULL;
-
- Page* cur_page = Page::FromAllocationTop(cur_addr_);
- cur_page = cur_page->next_page();
- ASSERT(cur_page->is_valid());
-
- cur_addr_ = cur_page->ObjectAreaStart();
- cur_limit_ = (cur_page == end_page_) ? end_addr_ : cur_page->AllocationTop();
-
- if (cur_addr_ == end_addr_) return NULL;
- ASSERT(cur_addr_ < cur_limit_);
-#ifdef DEBUG
- Verify();
-#endif
- return FromCurrentPage();
-}
-
-
-#ifdef DEBUG
-void HeapObjectIterator::Verify() {
- Page* p = Page::FromAllocationTop(cur_addr_);
- ASSERT(p == Page::FromAllocationTop(cur_limit_));
- ASSERT(p->Offset(cur_addr_) <= p->Offset(cur_limit_));
-}
-#endif
-
-
-// -----------------------------------------------------------------------------
-// PageIterator
-
-PageIterator::PageIterator(PagedSpace* space, Mode mode) : space_(space) {
- prev_page_ = NULL;
- switch (mode) {
- case PAGES_IN_USE:
- stop_page_ = space->AllocationTopPage();
- break;
- case PAGES_USED_BY_MC:
- stop_page_ = space->MCRelocationTopPage();
- break;
- case ALL_PAGES:
-#ifdef DEBUG
- // Verify that the cached last page in the space is actually the
- // last page.
- for (Page* p = space->first_page_; p->is_valid(); p = p->next_page()) {
- if (!p->next_page()->is_valid()) {
- ASSERT(space->last_page_ == p);
- }
- }
-#endif
- stop_page_ = space->last_page_;
- break;
- }
-}
-
-
-// -----------------------------------------------------------------------------
-// CodeRange
-
-
-CodeRange::CodeRange()
- : code_range_(NULL),
- free_list_(0),
- allocation_list_(0),
- current_allocation_block_index_(0),
- isolate_(NULL) {
-}
-
-
-bool CodeRange::Setup(const size_t requested) {
- ASSERT(code_range_ == NULL);
-
- code_range_ = new VirtualMemory(requested);
- CHECK(code_range_ != NULL);
- if (!code_range_->IsReserved()) {
- delete code_range_;
- code_range_ = NULL;
- return false;
- }
-
- // We are sure that we have mapped a block of requested addresses.
- ASSERT(code_range_->size() == requested);
- LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested));
- allocation_list_.Add(FreeBlock(code_range_->address(), code_range_->size()));
- current_allocation_block_index_ = 0;
- return true;
-}
-
-
-int CodeRange::CompareFreeBlockAddress(const FreeBlock* left,
- const FreeBlock* right) {
- // The entire point of CodeRange is that the difference between two
- // addresses in the range can be represented as a signed 32-bit int,
- // so the cast is semantically correct.
- return static_cast<int>(left->start - right->start);
-}
-
-
-void CodeRange::GetNextAllocationBlock(size_t requested) {
- for (current_allocation_block_index_++;
- current_allocation_block_index_ < allocation_list_.length();
- current_allocation_block_index_++) {
- if (requested <= allocation_list_[current_allocation_block_index_].size) {
- return; // Found a large enough allocation block.
- }
- }
-
- // Sort and merge the free blocks on the free list and the allocation list.
- free_list_.AddAll(allocation_list_);
- allocation_list_.Clear();
- free_list_.Sort(&CompareFreeBlockAddress);
- for (int i = 0; i < free_list_.length();) {
- FreeBlock merged = free_list_[i];
- i++;
- // Add adjacent free blocks to the current merged block.
- while (i < free_list_.length() &&
- free_list_[i].start == merged.start + merged.size) {
- merged.size += free_list_[i].size;
- i++;
- }
- if (merged.size > 0) {
- allocation_list_.Add(merged);
- }
- }
- free_list_.Clear();
-
- for (current_allocation_block_index_ = 0;
- current_allocation_block_index_ < allocation_list_.length();
- current_allocation_block_index_++) {
- if (requested <= allocation_list_[current_allocation_block_index_].size) {
- return; // Found a large enough allocation block.
- }
- }
-
- // Code range is full or too fragmented.
- V8::FatalProcessOutOfMemory("CodeRange::GetNextAllocationBlock");
-}
-
-
-
-void* CodeRange::AllocateRawMemory(const size_t requested, size_t* allocated) {
- ASSERT(current_allocation_block_index_ < allocation_list_.length());
- if (requested > allocation_list_[current_allocation_block_index_].size) {
- // Find an allocation block large enough. This function call may
- // call V8::FatalProcessOutOfMemory if it cannot find a large enough block.
- GetNextAllocationBlock(requested);
- }
- // Commit the requested memory at the start of the current allocation block.
- *allocated = RoundUp(requested, Page::kPageSize);
- FreeBlock current = allocation_list_[current_allocation_block_index_];
- if (*allocated >= current.size - Page::kPageSize) {
- // Don't leave a small free block, useless for a large object or chunk.
- *allocated = current.size;
- }
- ASSERT(*allocated <= current.size);
- if (!code_range_->Commit(current.start, *allocated, true)) {
- *allocated = 0;
- return NULL;
- }
- allocation_list_[current_allocation_block_index_].start += *allocated;
- allocation_list_[current_allocation_block_index_].size -= *allocated;
- if (*allocated == current.size) {
- GetNextAllocationBlock(0); // This block is used up, get the next one.
- }
- return current.start;
-}
-
-
-void CodeRange::FreeRawMemory(void* address, size_t length) {
- free_list_.Add(FreeBlock(address, length));
- code_range_->Uncommit(address, length);
-}
-
-
-void CodeRange::TearDown() {
- delete code_range_; // Frees all memory in the virtual memory range.
- code_range_ = NULL;
- free_list_.Free();
- allocation_list_.Free();
-}
-
-
-// -----------------------------------------------------------------------------
-// MemoryAllocator
-//
-
-// 270 is an estimate based on the static default heap size of a pair of 256K
-// semispaces and a 64M old generation.
-const int kEstimatedNumberOfChunks = 270;
-
-
-MemoryAllocator::MemoryAllocator()
- : capacity_(0),
- capacity_executable_(0),
- size_(0),
- size_executable_(0),
- initial_chunk_(NULL),
- chunks_(kEstimatedNumberOfChunks),
- free_chunk_ids_(kEstimatedNumberOfChunks),
- max_nof_chunks_(0),
- top_(0),
- isolate_(NULL) {
-}
-
-
-void MemoryAllocator::Push(int free_chunk_id) {
- ASSERT(max_nof_chunks_ > 0);
- ASSERT(top_ < max_nof_chunks_);
- free_chunk_ids_[top_++] = free_chunk_id;
-}
-
-
-int MemoryAllocator::Pop() {
- ASSERT(top_ > 0);
- return free_chunk_ids_[--top_];
-}
-
-
-bool MemoryAllocator::Setup(intptr_t capacity, intptr_t capacity_executable) {
- capacity_ = RoundUp(capacity, Page::kPageSize);
- capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize);
- ASSERT_GE(capacity_, capacity_executable_);
-
- // Over-estimate the size of chunks_ array. It assumes the expansion of old
- // space is always in the unit of a chunk (kChunkSize) except the last
- // expansion.
- //
- // Due to alignment, allocated space might be one page less than required
- // number (kPagesPerChunk) of pages for old spaces.
- //
- // Reserve two chunk ids for semispaces, one for map space, one for old
- // space, and one for code space.
- max_nof_chunks_ =
- static_cast<int>((capacity_ / (kChunkSize - Page::kPageSize))) + 5;
- if (max_nof_chunks_ > kMaxNofChunks) return false;
-
- size_ = 0;
- size_executable_ = 0;
- ChunkInfo info; // uninitialized element.
- for (int i = max_nof_chunks_ - 1; i >= 0; i--) {
- chunks_.Add(info);
- free_chunk_ids_.Add(i);
- }
- top_ = max_nof_chunks_;
- return true;
-}
-
-
-void MemoryAllocator::TearDown() {
- for (int i = 0; i < max_nof_chunks_; i++) {
- if (chunks_[i].address() != NULL) DeleteChunk(i);
- }
- chunks_.Clear();
- free_chunk_ids_.Clear();
-
- if (initial_chunk_ != NULL) {
- LOG(isolate_, DeleteEvent("InitialChunk", initial_chunk_->address()));
- delete initial_chunk_;
- initial_chunk_ = NULL;
- }
-
- ASSERT(top_ == max_nof_chunks_); // all chunks are free
- top_ = 0;
- capacity_ = 0;
- capacity_executable_ = 0;
- size_ = 0;
- max_nof_chunks_ = 0;
-}
-
-
-void* MemoryAllocator::AllocateRawMemory(const size_t requested,
- size_t* allocated,
- Executability executable) {
- if (size_ + static_cast<size_t>(requested) > static_cast<size_t>(capacity_)) {
- return NULL;
- }
-
- void* mem;
- if (executable == EXECUTABLE) {
- // Check executable memory limit.
- if (size_executable_ + requested >
- static_cast<size_t>(capacity_executable_)) {
- LOG(isolate_,
- StringEvent("MemoryAllocator::AllocateRawMemory",
- "V8 Executable Allocation capacity exceeded"));
- return NULL;
- }
- // Allocate executable memory either from code range or from the
- // OS.
- if (isolate_->code_range()->exists()) {
- mem = isolate_->code_range()->AllocateRawMemory(requested, allocated);
- } else {
- mem = OS::Allocate(requested, allocated, true);
- }
- // Update executable memory size.
- size_executable_ += static_cast<int>(*allocated);
- } else {
- mem = OS::Allocate(requested, allocated, false);
- }
- int alloced = static_cast<int>(*allocated);
- size_ += alloced;
-
-#ifdef DEBUG
- ZapBlock(reinterpret_cast<Address>(mem), alloced);
-#endif
- isolate_->counters()->memory_allocated()->Increment(alloced);
- return mem;
-}
-
-
-void MemoryAllocator::FreeRawMemory(void* mem,
- size_t length,
- Executability executable) {
-#ifdef DEBUG
- ZapBlock(reinterpret_cast<Address>(mem), length);
-#endif
- if (isolate_->code_range()->contains(static_cast<Address>(mem))) {
- isolate_->code_range()->FreeRawMemory(mem, length);
- } else {
- OS::Free(mem, length);
- }
- isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(length));
- size_ -= static_cast<int>(length);
- if (executable == EXECUTABLE) size_executable_ -= static_cast<int>(length);
-
- ASSERT(size_ >= 0);
- ASSERT(size_executable_ >= 0);
-}
-
-
-void MemoryAllocator::PerformAllocationCallback(ObjectSpace space,
- AllocationAction action,
- size_t size) {
- for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
- MemoryAllocationCallbackRegistration registration =
- memory_allocation_callbacks_[i];
- if ((registration.space & space) == space &&
- (registration.action & action) == action)
- registration.callback(space, action, static_cast<int>(size));
- }
-}
-
-
-bool MemoryAllocator::MemoryAllocationCallbackRegistered(
- MemoryAllocationCallback callback) {
- for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
- if (memory_allocation_callbacks_[i].callback == callback) return true;
- }
- return false;
-}
-
-
-void MemoryAllocator::AddMemoryAllocationCallback(
- MemoryAllocationCallback callback,
- ObjectSpace space,
- AllocationAction action) {
- ASSERT(callback != NULL);
- MemoryAllocationCallbackRegistration registration(callback, space, action);
- ASSERT(!MemoryAllocator::MemoryAllocationCallbackRegistered(callback));
- return memory_allocation_callbacks_.Add(registration);
-}
-
-
-void MemoryAllocator::RemoveMemoryAllocationCallback(
- MemoryAllocationCallback callback) {
- ASSERT(callback != NULL);
- for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
- if (memory_allocation_callbacks_[i].callback == callback) {
- memory_allocation_callbacks_.Remove(i);
- return;
- }
- }
- UNREACHABLE();
-}
-
-void* MemoryAllocator::ReserveInitialChunk(const size_t requested) {
- ASSERT(initial_chunk_ == NULL);
-
- initial_chunk_ = new VirtualMemory(requested);
- CHECK(initial_chunk_ != NULL);
- if (!initial_chunk_->IsReserved()) {
- delete initial_chunk_;
- initial_chunk_ = NULL;
- return NULL;
- }
-
- // We are sure that we have mapped a block of requested addresses.
- ASSERT(initial_chunk_->size() == requested);
- LOG(isolate_,
- NewEvent("InitialChunk", initial_chunk_->address(), requested));
- size_ += static_cast<int>(requested);
- return initial_chunk_->address();
-}
-
-
-static int PagesInChunk(Address start, size_t size) {
- // The first page starts on the first page-aligned address from start onward
- // and the last page ends on the last page-aligned address before
- // start+size. Page::kPageSize is a power of two so we can divide by
- // shifting.
- return static_cast<int>((RoundDown(start + size, Page::kPageSize)
- - RoundUp(start, Page::kPageSize)) >> kPageSizeBits);
-}
-
-
-Page* MemoryAllocator::AllocatePages(int requested_pages,
- int* allocated_pages,
- PagedSpace* owner) {
- if (requested_pages <= 0) return Page::FromAddress(NULL);
- size_t chunk_size = requested_pages * Page::kPageSize;
-
- void* chunk = AllocateRawMemory(chunk_size, &chunk_size, owner->executable());
- if (chunk == NULL) return Page::FromAddress(NULL);
- LOG(isolate_, NewEvent("PagedChunk", chunk, chunk_size));
-
- *allocated_pages = PagesInChunk(static_cast<Address>(chunk), chunk_size);
- // We may 'lose' a page due to alignment.
- ASSERT(*allocated_pages >= kPagesPerChunk - 1);
- if (*allocated_pages == 0) {
- FreeRawMemory(chunk, chunk_size, owner->executable());
- LOG(isolate_, DeleteEvent("PagedChunk", chunk));
- return Page::FromAddress(NULL);
- }
-
- int chunk_id = Pop();
- chunks_[chunk_id].init(static_cast<Address>(chunk), chunk_size, owner);
-
- ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
- PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
- Page* new_pages = InitializePagesInChunk(chunk_id, *allocated_pages, owner);
-
- return new_pages;
-}
-
-
-Page* MemoryAllocator::CommitPages(Address start, size_t size,
- PagedSpace* owner, int* num_pages) {
- ASSERT(start != NULL);
- *num_pages = PagesInChunk(start, size);
- ASSERT(*num_pages > 0);
- ASSERT(initial_chunk_ != NULL);
- ASSERT(InInitialChunk(start));
- ASSERT(InInitialChunk(start + size - 1));
- if (!initial_chunk_->Commit(start, size, owner->executable() == EXECUTABLE)) {
- return Page::FromAddress(NULL);
- }
-#ifdef DEBUG
- ZapBlock(start, size);
-#endif
- isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
-
- // So long as we correctly overestimated the number of chunks we should not
- // run out of chunk ids.
- CHECK(!OutOfChunkIds());
- int chunk_id = Pop();
- chunks_[chunk_id].init(start, size, owner);
- return InitializePagesInChunk(chunk_id, *num_pages, owner);
-}
-
-
-bool MemoryAllocator::CommitBlock(Address start,
- size_t size,
- Executability executable) {
- ASSERT(start != NULL);
- ASSERT(size > 0);
- ASSERT(initial_chunk_ != NULL);
- ASSERT(InInitialChunk(start));
- ASSERT(InInitialChunk(start + size - 1));
-
- if (!initial_chunk_->Commit(start, size, executable)) return false;
-#ifdef DEBUG
- ZapBlock(start, size);
-#endif
- isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
- return true;
-}
-
-
-bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
- ASSERT(start != NULL);
- ASSERT(size > 0);
- ASSERT(initial_chunk_ != NULL);
- ASSERT(InInitialChunk(start));
- ASSERT(InInitialChunk(start + size - 1));
-
- if (!initial_chunk_->Uncommit(start, size)) return false;
- isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
- return true;
-}
-
-
-void MemoryAllocator::ZapBlock(Address start, size_t size) {
- for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) {
- Memory::Address_at(start + s) = kZapValue;
- }
-}
-
-
-Page* MemoryAllocator::InitializePagesInChunk(int chunk_id, int pages_in_chunk,
- PagedSpace* owner) {
- ASSERT(IsValidChunk(chunk_id));
- ASSERT(pages_in_chunk > 0);
-
- Address chunk_start = chunks_[chunk_id].address();
-
- Address low = RoundUp(chunk_start, Page::kPageSize);
-
-#ifdef DEBUG
- size_t chunk_size = chunks_[chunk_id].size();
- Address high = RoundDown(chunk_start + chunk_size, Page::kPageSize);
- ASSERT(pages_in_chunk <=
- ((OffsetFrom(high) - OffsetFrom(low)) / Page::kPageSize));
-#endif
-
- Address page_addr = low;
- for (int i = 0; i < pages_in_chunk; i++) {
- Page* p = Page::FromAddress(page_addr);
- p->heap_ = owner->heap();
- p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id;
- p->InvalidateWatermark(true);
- p->SetIsLargeObjectPage(false);
- p->SetAllocationWatermark(p->ObjectAreaStart());
- p->SetCachedAllocationWatermark(p->ObjectAreaStart());
- page_addr += Page::kPageSize;
- }
-
- // Set the next page of the last page to 0.
- Page* last_page = Page::FromAddress(page_addr - Page::kPageSize);
- last_page->opaque_header = OffsetFrom(0) | chunk_id;
-
- return Page::FromAddress(low);
-}
-
-
-Page* MemoryAllocator::FreePages(Page* p) {
- if (!p->is_valid()) return p;
-
- // Find the first page in the same chunk as 'p'
- Page* first_page = FindFirstPageInSameChunk(p);
- Page* page_to_return = Page::FromAddress(NULL);
-
- if (p != first_page) {
- // Find the last page in the same chunk as 'prev'.
- Page* last_page = FindLastPageInSameChunk(p);
- first_page = GetNextPage(last_page); // first page in next chunk
-
- // set the next_page of last_page to NULL
- SetNextPage(last_page, Page::FromAddress(NULL));
- page_to_return = p; // return 'p' when exiting
- }
-
- while (first_page->is_valid()) {
- int chunk_id = GetChunkId(first_page);
- ASSERT(IsValidChunk(chunk_id));
-
- // Find the first page of the next chunk before deleting this chunk.
- first_page = GetNextPage(FindLastPageInSameChunk(first_page));
-
- // Free the current chunk.
- DeleteChunk(chunk_id);
- }
-
- return page_to_return;
-}
-
-
-void MemoryAllocator::FreeAllPages(PagedSpace* space) {
- for (int i = 0, length = chunks_.length(); i < length; i++) {
- if (chunks_[i].owner() == space) {
- DeleteChunk(i);
- }
- }
-}
-
-
-void MemoryAllocator::DeleteChunk(int chunk_id) {
- ASSERT(IsValidChunk(chunk_id));
-
- ChunkInfo& c = chunks_[chunk_id];
-
- // We cannot free a chunk contained in the initial chunk because it was not
- // allocated with AllocateRawMemory. Instead we uncommit the virtual
- // memory.
- if (InInitialChunk(c.address())) {
- // TODO(1240712): VirtualMemory::Uncommit has a return value which
- // is ignored here.
- initial_chunk_->Uncommit(c.address(), c.size());
- Counters* counters = isolate_->counters();
- counters->memory_allocated()->Decrement(static_cast<int>(c.size()));
- } else {
- LOG(isolate_, DeleteEvent("PagedChunk", c.address()));
- ObjectSpace space = static_cast<ObjectSpace>(1 << c.owner_identity());
- size_t size = c.size();
- FreeRawMemory(c.address(), size, c.executable());
- PerformAllocationCallback(space, kAllocationActionFree, size);
- }
- c.init(NULL, 0, NULL);
- Push(chunk_id);
-}
-
-
-Page* MemoryAllocator::FindFirstPageInSameChunk(Page* p) {
- int chunk_id = GetChunkId(p);
- ASSERT(IsValidChunk(chunk_id));
-
- Address low = RoundUp(chunks_[chunk_id].address(), Page::kPageSize);
- return Page::FromAddress(low);
-}
-
-
-Page* MemoryAllocator::FindLastPageInSameChunk(Page* p) {
- int chunk_id = GetChunkId(p);
- ASSERT(IsValidChunk(chunk_id));
-
- Address chunk_start = chunks_[chunk_id].address();
- size_t chunk_size = chunks_[chunk_id].size();
-
- Address high = RoundDown(chunk_start + chunk_size, Page::kPageSize);
- ASSERT(chunk_start <= p->address() && p->address() < high);
-
- return Page::FromAddress(high - Page::kPageSize);
-}
-
-
-#ifdef DEBUG
-void MemoryAllocator::ReportStatistics() {
- float pct = static_cast<float>(capacity_ - size_) / capacity_;
- PrintF(" capacity: %" V8_PTR_PREFIX "d"
- ", used: %" V8_PTR_PREFIX "d"
- ", available: %%%d\n\n",
- capacity_, size_, static_cast<int>(pct*100));
-}
-#endif
-
-
-void MemoryAllocator::RelinkPageListInChunkOrder(PagedSpace* space,
- Page** first_page,
- Page** last_page,
- Page** last_page_in_use) {
- Page* first = NULL;
- Page* last = NULL;
-
- for (int i = 0, length = chunks_.length(); i < length; i++) {
- ChunkInfo& chunk = chunks_[i];
-
- if (chunk.owner() == space) {
- if (first == NULL) {
- Address low = RoundUp(chunk.address(), Page::kPageSize);
- first = Page::FromAddress(low);
- }
- last = RelinkPagesInChunk(i,
- chunk.address(),
- chunk.size(),
- last,
- last_page_in_use);
- }
- }
-
- if (first_page != NULL) {
- *first_page = first;
- }
-
- if (last_page != NULL) {
- *last_page = last;
- }
-}
-
-
-Page* MemoryAllocator::RelinkPagesInChunk(int chunk_id,
- Address chunk_start,
- size_t chunk_size,
- Page* prev,
- Page** last_page_in_use) {
- Address page_addr = RoundUp(chunk_start, Page::kPageSize);
- int pages_in_chunk = PagesInChunk(chunk_start, chunk_size);
-
- if (prev->is_valid()) {
- SetNextPage(prev, Page::FromAddress(page_addr));
- }
-
- for (int i = 0; i < pages_in_chunk; i++) {
- Page* p = Page::FromAddress(page_addr);
- p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id;
- page_addr += Page::kPageSize;
-
- p->InvalidateWatermark(true);
- if (p->WasInUseBeforeMC()) {
- *last_page_in_use = p;
- }
- }
-
- // Set the next page of the last page to 0.
- Page* last_page = Page::FromAddress(page_addr - Page::kPageSize);
- last_page->opaque_header = OffsetFrom(0) | chunk_id;
-
- if (last_page->WasInUseBeforeMC()) {
- *last_page_in_use = last_page;
- }
-
- return last_page;
-}
-
-
-// -----------------------------------------------------------------------------
-// PagedSpace implementation
-
-PagedSpace::PagedSpace(Heap* heap,
- intptr_t max_capacity,
- AllocationSpace id,
- Executability executable)
- : Space(heap, id, executable) {
- max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize)
- * Page::kObjectAreaSize;
- accounting_stats_.Clear();
-
- allocation_info_.top = NULL;
- allocation_info_.limit = NULL;
-
- mc_forwarding_info_.top = NULL;
- mc_forwarding_info_.limit = NULL;
-}
-
-
-bool PagedSpace::Setup(Address start, size_t size) {
- if (HasBeenSetup()) return false;
-
- int num_pages = 0;
- // Try to use the virtual memory range passed to us. If it is too small to
- // contain at least one page, ignore it and allocate instead.
- int pages_in_chunk = PagesInChunk(start, size);
- if (pages_in_chunk > 0) {
- first_page_ = Isolate::Current()->memory_allocator()->CommitPages(
- RoundUp(start, Page::kPageSize),
- Page::kPageSize * pages_in_chunk,
- this, &num_pages);
- } else {
- int requested_pages =
- Min(MemoryAllocator::kPagesPerChunk,
- static_cast<int>(max_capacity_ / Page::kObjectAreaSize));
- first_page_ =
- Isolate::Current()->memory_allocator()->AllocatePages(
- requested_pages, &num_pages, this);
- if (!first_page_->is_valid()) return false;
- }
-
- // We are sure that the first page is valid and that we have at least one
- // page.
- ASSERT(first_page_->is_valid());
- ASSERT(num_pages > 0);
- accounting_stats_.ExpandSpace(num_pages * Page::kObjectAreaSize);
- ASSERT(Capacity() <= max_capacity_);
-
- // Sequentially clear region marks in the newly allocated
- // pages and cache the current last page in the space.
- for (Page* p = first_page_; p->is_valid(); p = p->next_page()) {
- p->SetRegionMarks(Page::kAllRegionsCleanMarks);
- last_page_ = p;
- }
-
- // Use first_page_ for allocation.
- SetAllocationInfo(&allocation_info_, first_page_);
-
- page_list_is_chunk_ordered_ = true;
-
- return true;
-}
-
-
-bool PagedSpace::HasBeenSetup() {
- return (Capacity() > 0);
-}
-
-
-void PagedSpace::TearDown() {
- Isolate::Current()->memory_allocator()->FreeAllPages(this);
- first_page_ = NULL;
- accounting_stats_.Clear();
-}
-
-
-#ifdef ENABLE_HEAP_PROTECTION
-
-void PagedSpace::Protect() {
- Page* page = first_page_;
- while (page->is_valid()) {
- Isolate::Current()->memory_allocator()->ProtectChunkFromPage(page);
- page = Isolate::Current()->memory_allocator()->
- FindLastPageInSameChunk(page)->next_page();
- }
-}
-
-
-void PagedSpace::Unprotect() {
- Page* page = first_page_;
- while (page->is_valid()) {
- Isolate::Current()->memory_allocator()->UnprotectChunkFromPage(page);
- page = Isolate::Current()->memory_allocator()->
- FindLastPageInSameChunk(page)->next_page();
- }
-}
-
-#endif
-
-
-void PagedSpace::MarkAllPagesClean() {
- PageIterator it(this, PageIterator::ALL_PAGES);
- while (it.has_next()) {
- it.next()->SetRegionMarks(Page::kAllRegionsCleanMarks);
- }
-}
-
-
-MaybeObject* PagedSpace::FindObject(Address addr) {
- // Note: this function can only be called before or after mark-compact GC
- // because it accesses map pointers.
- ASSERT(!heap()->mark_compact_collector()->in_use());
-
- if (!Contains(addr)) return Failure::Exception();
-
- Page* p = Page::FromAddress(addr);
- ASSERT(IsUsed(p));
- Address cur = p->ObjectAreaStart();
- Address end = p->AllocationTop();
- while (cur < end) {
- HeapObject* obj = HeapObject::FromAddress(cur);
- Address next = cur + obj->Size();
- if ((cur <= addr) && (addr < next)) return obj;
- cur = next;
- }
-
- UNREACHABLE();
- return Failure::Exception();
-}
-
-
-bool PagedSpace::IsUsed(Page* page) {
- PageIterator it(this, PageIterator::PAGES_IN_USE);
- while (it.has_next()) {
- if (page == it.next()) return true;
- }
- return false;
-}
-
-
-void PagedSpace::SetAllocationInfo(AllocationInfo* alloc_info, Page* p) {
- alloc_info->top = p->ObjectAreaStart();
- alloc_info->limit = p->ObjectAreaEnd();
- ASSERT(alloc_info->VerifyPagedAllocation());
-}
-
-
-void PagedSpace::MCResetRelocationInfo() {
- // Set page indexes.
- int i = 0;
- PageIterator it(this, PageIterator::ALL_PAGES);
- while (it.has_next()) {
- Page* p = it.next();
- p->mc_page_index = i++;
- }
-
- // Set mc_forwarding_info_ to the first page in the space.
- SetAllocationInfo(&mc_forwarding_info_, first_page_);
- // All the bytes in the space are 'available'. We will rediscover
- // allocated and wasted bytes during GC.
- accounting_stats_.Reset();
-}
-
-
-int PagedSpace::MCSpaceOffsetForAddress(Address addr) {
-#ifdef DEBUG
- // The Contains function considers the address at the beginning of a
- // page in the page, MCSpaceOffsetForAddress considers it is in the
- // previous page.
- if (Page::IsAlignedToPageSize(addr)) {
- ASSERT(Contains(addr - kPointerSize));
- } else {
- ASSERT(Contains(addr));
- }
-#endif
-
- // If addr is at the end of a page, it belongs to previous page
- Page* p = Page::IsAlignedToPageSize(addr)
- ? Page::FromAllocationTop(addr)
- : Page::FromAddress(addr);
- int index = p->mc_page_index;
- return (index * Page::kPageSize) + p->Offset(addr);
-}
-
-
-// Slow case for reallocating and promoting objects during a compacting
-// collection. This function is not space-specific.
-HeapObject* PagedSpace::SlowMCAllocateRaw(int size_in_bytes) {
- Page* current_page = TopPageOf(mc_forwarding_info_);
- if (!current_page->next_page()->is_valid()) {
- if (!Expand(current_page)) {
- return NULL;
- }
- }
-
- // There are surely more pages in the space now.
- ASSERT(current_page->next_page()->is_valid());
- // We do not add the top of page block for current page to the space's
- // free list---the block may contain live objects so we cannot write
- // bookkeeping information to it. Instead, we will recover top of page
- // blocks when we move objects to their new locations.
- //
- // We do however write the allocation pointer to the page. The encoding
- // of forwarding addresses is as an offset in terms of live bytes, so we
- // need quick access to the allocation top of each page to decode
- // forwarding addresses.
- current_page->SetAllocationWatermark(mc_forwarding_info_.top);
- current_page->next_page()->InvalidateWatermark(true);
- SetAllocationInfo(&mc_forwarding_info_, current_page->next_page());
- return AllocateLinearly(&mc_forwarding_info_, size_in_bytes);
-}
-
-
-bool PagedSpace::Expand(Page* last_page) {
- ASSERT(max_capacity_ % Page::kObjectAreaSize == 0);
- ASSERT(Capacity() % Page::kObjectAreaSize == 0);
-
- if (Capacity() == max_capacity_) return false;
-
- ASSERT(Capacity() < max_capacity_);
- // Last page must be valid and its next page is invalid.
- ASSERT(last_page->is_valid() && !last_page->next_page()->is_valid());
-
- int available_pages =
- static_cast<int>((max_capacity_ - Capacity()) / Page::kObjectAreaSize);
- // We don't want to have to handle small chunks near the end so if there are
- // not kPagesPerChunk pages available without exceeding the max capacity then
- // act as if memory has run out.
- if (available_pages < MemoryAllocator::kPagesPerChunk) return false;
-
- int desired_pages = Min(available_pages, MemoryAllocator::kPagesPerChunk);
- Page* p = heap()->isolate()->memory_allocator()->AllocatePages(
- desired_pages, &desired_pages, this);
- if (!p->is_valid()) return false;
-
- accounting_stats_.ExpandSpace(desired_pages * Page::kObjectAreaSize);
- ASSERT(Capacity() <= max_capacity_);
-
- heap()->isolate()->memory_allocator()->SetNextPage(last_page, p);
-
- // Sequentially clear region marks of new pages and and cache the
- // new last page in the space.
- while (p->is_valid()) {
- p->SetRegionMarks(Page::kAllRegionsCleanMarks);
- last_page_ = p;
- p = p->next_page();
- }
-
- return true;
-}
-
-
-#ifdef DEBUG
-int PagedSpace::CountTotalPages() {
- int count = 0;
- for (Page* p = first_page_; p->is_valid(); p = p->next_page()) {
- count++;
- }
- return count;
-}
-#endif
-
-
-void PagedSpace::Shrink() {
- if (!page_list_is_chunk_ordered_) {
- // We can't shrink space if pages is not chunk-ordered
- // (see comment for class MemoryAllocator for definition).
- return;
- }
-
- // Release half of free pages.
- Page* top_page = AllocationTopPage();
- ASSERT(top_page->is_valid());
-
- // Count the number of pages we would like to free.
- int pages_to_free = 0;
- for (Page* p = top_page->next_page(); p->is_valid(); p = p->next_page()) {
- pages_to_free++;
- }
-
- // Free pages after top_page.
- Page* p = heap()->isolate()->memory_allocator()->
- FreePages(top_page->next_page());
- heap()->isolate()->memory_allocator()->SetNextPage(top_page, p);
-
- // Find out how many pages we failed to free and update last_page_.
- // Please note pages can only be freed in whole chunks.
- last_page_ = top_page;
- for (Page* p = top_page->next_page(); p->is_valid(); p = p->next_page()) {
- pages_to_free--;
- last_page_ = p;
- }
-
- accounting_stats_.ShrinkSpace(pages_to_free * Page::kObjectAreaSize);
- ASSERT(Capacity() == CountTotalPages() * Page::kObjectAreaSize);
-}
-
-
-bool PagedSpace::EnsureCapacity(int capacity) {
- if (Capacity() >= capacity) return true;
-
- // Start from the allocation top and loop to the last page in the space.
- Page* last_page = AllocationTopPage();
- Page* next_page = last_page->next_page();
- while (next_page->is_valid()) {
- last_page = heap()->isolate()->memory_allocator()->
- FindLastPageInSameChunk(next_page);
- next_page = last_page->next_page();
- }
-
- // Expand the space until it has the required capacity or expansion fails.
- do {
- if (!Expand(last_page)) return false;
- ASSERT(last_page->next_page()->is_valid());
- last_page =
- heap()->isolate()->memory_allocator()->FindLastPageInSameChunk(
- last_page->next_page());
- } while (Capacity() < capacity);
-
- return true;
-}
-
-
-#ifdef DEBUG
-void PagedSpace::Print() { }
-#endif
-
-
-#ifdef DEBUG
-// We do not assume that the PageIterator works, because it depends on the
-// invariants we are checking during verification.
-void PagedSpace::Verify(ObjectVisitor* visitor) {
- // The allocation pointer should be valid, and it should be in a page in the
- // space.
- ASSERT(allocation_info_.VerifyPagedAllocation());
- Page* top_page = Page::FromAllocationTop(allocation_info_.top);
- ASSERT(heap()->isolate()->memory_allocator()->IsPageInSpace(top_page, this));
-
- // Loop over all the pages.
- bool above_allocation_top = false;
- Page* current_page = first_page_;
- while (current_page->is_valid()) {
- if (above_allocation_top) {
- // We don't care what's above the allocation top.
- } else {
- Address top = current_page->AllocationTop();
- if (current_page == top_page) {
- ASSERT(top == allocation_info_.top);
- // The next page will be above the allocation top.
- above_allocation_top = true;
- }
-
- // It should be packed with objects from the bottom to the top.
- Address current = current_page->ObjectAreaStart();
- while (current < top) {
- HeapObject* object = HeapObject::FromAddress(current);
-
- // The first word should be a map, and we expect all map pointers to
- // be in map space.
- Map* map = object->map();
- ASSERT(map->IsMap());
- ASSERT(heap()->map_space()->Contains(map));
-
- // Perform space-specific object verification.
- VerifyObject(object);
-
- // The object itself should look OK.
- object->Verify();
-
- // All the interior pointers should be contained in the heap and
- // have page regions covering intergenerational references should be
- // marked dirty.
- int size = object->Size();
- object->IterateBody(map->instance_type(), size, visitor);
-
- current += size;
- }
-
- // The allocation pointer should not be in the middle of an object.
- ASSERT(current == top);
- }
-
- current_page = current_page->next_page();
- }
-}
-#endif
-
-
-// -----------------------------------------------------------------------------
-// NewSpace implementation
-
-
-bool NewSpace::Setup(Address start, int size) {
- // Setup new space based on the preallocated memory block defined by
- // start and size. The provided space is divided into two semi-spaces.
- // To support fast containment testing in the new space, the size of
- // this chunk must be a power of two and it must be aligned to its size.
- int initial_semispace_capacity = heap()->InitialSemiSpaceSize();
- int maximum_semispace_capacity = heap()->MaxSemiSpaceSize();
-
- ASSERT(initial_semispace_capacity <= maximum_semispace_capacity);
- ASSERT(IsPowerOf2(maximum_semispace_capacity));
-
- // Allocate and setup the histogram arrays if necessary.
-#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
- allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
- promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
-
-#define SET_NAME(name) allocated_histogram_[name].set_name(#name); \
- promoted_histogram_[name].set_name(#name);
- INSTANCE_TYPE_LIST(SET_NAME)
-#undef SET_NAME
-#endif
-
- ASSERT(size == 2 * heap()->ReservedSemiSpaceSize());
- ASSERT(IsAddressAligned(start, size, 0));
-
- if (!to_space_.Setup(start,
- initial_semispace_capacity,
- maximum_semispace_capacity)) {
- return false;
- }
- if (!from_space_.Setup(start + maximum_semispace_capacity,
- initial_semispace_capacity,
- maximum_semispace_capacity)) {
- return false;
- }
-
- start_ = start;
- address_mask_ = ~(size - 1);
- object_mask_ = address_mask_ | kHeapObjectTagMask;
- object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
-
- allocation_info_.top = to_space_.low();
- allocation_info_.limit = to_space_.high();
- mc_forwarding_info_.top = NULL;
- mc_forwarding_info_.limit = NULL;
-
- ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
- return true;
-}
-
-
-void NewSpace::TearDown() {
-#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
- if (allocated_histogram_) {
- DeleteArray(allocated_histogram_);
- allocated_histogram_ = NULL;
- }
- if (promoted_histogram_) {
- DeleteArray(promoted_histogram_);
- promoted_histogram_ = NULL;
- }
-#endif
-
- start_ = NULL;
- allocation_info_.top = NULL;
- allocation_info_.limit = NULL;
- mc_forwarding_info_.top = NULL;
- mc_forwarding_info_.limit = NULL;
-
- to_space_.TearDown();
- from_space_.TearDown();
-}
-
-
-#ifdef ENABLE_HEAP_PROTECTION
-
-void NewSpace::Protect() {
- heap()->isolate()->memory_allocator()->Protect(ToSpaceLow(), Capacity());
- heap()->isolate()->memory_allocator()->Protect(FromSpaceLow(), Capacity());
-}
-
-
-void NewSpace::Unprotect() {
- heap()->isolate()->memory_allocator()->Unprotect(ToSpaceLow(), Capacity(),
- to_space_.executable());
- heap()->isolate()->memory_allocator()->Unprotect(FromSpaceLow(), Capacity(),
- from_space_.executable());
-}
-
-#endif
-
-
-void NewSpace::Flip() {
- SemiSpace tmp = from_space_;
- from_space_ = to_space_;
- to_space_ = tmp;
-}
-
-
-void NewSpace::Grow() {
- ASSERT(Capacity() < MaximumCapacity());
- if (to_space_.Grow()) {
- // Only grow from space if we managed to grow to space.
- if (!from_space_.Grow()) {
- // If we managed to grow to space but couldn't grow from space,
- // attempt to shrink to space.
- if (!to_space_.ShrinkTo(from_space_.Capacity())) {
- // We are in an inconsistent state because we could not
- // commit/uncommit memory from new space.
- V8::FatalProcessOutOfMemory("Failed to grow new space.");
- }
- }
- }
- allocation_info_.limit = to_space_.high();
- ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
-}
-
-
-void NewSpace::Shrink() {
- int new_capacity = Max(InitialCapacity(), 2 * SizeAsInt());
- int rounded_new_capacity =
- RoundUp(new_capacity, static_cast<int>(OS::AllocateAlignment()));
- if (rounded_new_capacity < Capacity() &&
- to_space_.ShrinkTo(rounded_new_capacity)) {
- // Only shrink from space if we managed to shrink to space.
- if (!from_space_.ShrinkTo(rounded_new_capacity)) {
- // If we managed to shrink to space but couldn't shrink from
- // space, attempt to grow to space again.
- if (!to_space_.GrowTo(from_space_.Capacity())) {
- // We are in an inconsistent state because we could not
- // commit/uncommit memory from new space.
- V8::FatalProcessOutOfMemory("Failed to shrink new space.");
- }
- }
- }
- allocation_info_.limit = to_space_.high();
- ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
-}
-
-
-void NewSpace::ResetAllocationInfo() {
- allocation_info_.top = to_space_.low();
- allocation_info_.limit = to_space_.high();
- ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
-}
-
-
-void NewSpace::MCResetRelocationInfo() {
- mc_forwarding_info_.top = from_space_.low();
- mc_forwarding_info_.limit = from_space_.high();
- ASSERT_SEMISPACE_ALLOCATION_INFO(mc_forwarding_info_, from_space_);
-}
-
-
-void NewSpace::MCCommitRelocationInfo() {
- // Assumes that the spaces have been flipped so that mc_forwarding_info_ is
- // valid allocation info for the to space.
- allocation_info_.top = mc_forwarding_info_.top;
- allocation_info_.limit = to_space_.high();
- ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
-}
-
-
-#ifdef DEBUG
-// We do not use the SemispaceIterator because verification doesn't assume
-// that it works (it depends on the invariants we are checking).
-void NewSpace::Verify() {
- // The allocation pointer should be in the space or at the very end.
- ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
-
- // There should be objects packed in from the low address up to the
- // allocation pointer.
- Address current = to_space_.low();
- while (current < top()) {
- HeapObject* object = HeapObject::FromAddress(current);
-
- // The first word should be a map, and we expect all map pointers to
- // be in map space.
- Map* map = object->map();
- ASSERT(map->IsMap());
- ASSERT(heap()->map_space()->Contains(map));
-
- // The object should not be code or a map.
- ASSERT(!object->IsMap());
- ASSERT(!object->IsCode());
-
- // The object itself should look OK.
- object->Verify();
-
- // All the interior pointers should be contained in the heap.
- VerifyPointersVisitor visitor;
- int size = object->Size();
- object->IterateBody(map->instance_type(), size, &visitor);
-
- current += size;
- }
-
- // The allocation pointer should not be in the middle of an object.
- ASSERT(current == top());
-}
-#endif
-
-
-bool SemiSpace::Commit() {
- ASSERT(!is_committed());
- if (!heap()->isolate()->memory_allocator()->CommitBlock(
- start_, capacity_, executable())) {
- return false;
- }
- committed_ = true;
- return true;
-}
-
-
-bool SemiSpace::Uncommit() {
- ASSERT(is_committed());
- if (!heap()->isolate()->memory_allocator()->UncommitBlock(
- start_, capacity_)) {
- return false;
- }
- committed_ = false;
- return true;
-}
-
-
-// -----------------------------------------------------------------------------
-// SemiSpace implementation
-
-bool SemiSpace::Setup(Address start,
- int initial_capacity,
- int maximum_capacity) {
- // Creates a space in the young generation. The constructor does not
- // allocate memory from the OS. A SemiSpace is given a contiguous chunk of
- // memory of size 'capacity' when set up, and does not grow or shrink
- // otherwise. In the mark-compact collector, the memory region of the from
- // space is used as the marking stack. It requires contiguous memory
- // addresses.
- initial_capacity_ = initial_capacity;
- capacity_ = initial_capacity;
- maximum_capacity_ = maximum_capacity;
- committed_ = false;
-
- start_ = start;
- address_mask_ = ~(maximum_capacity - 1);
- object_mask_ = address_mask_ | kHeapObjectTagMask;
- object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
- age_mark_ = start_;
-
- return Commit();
-}
-
-
-void SemiSpace::TearDown() {
- start_ = NULL;
- capacity_ = 0;
-}
-
-
-bool SemiSpace::Grow() {
- // Double the semispace size but only up to maximum capacity.
- int maximum_extra = maximum_capacity_ - capacity_;
- int extra = Min(RoundUp(capacity_, static_cast<int>(OS::AllocateAlignment())),
- maximum_extra);
- if (!heap()->isolate()->memory_allocator()->CommitBlock(
- high(), extra, executable())) {
- return false;
- }
- capacity_ += extra;
- return true;
-}
-
-
-bool SemiSpace::GrowTo(int new_capacity) {
- ASSERT(new_capacity <= maximum_capacity_);
- ASSERT(new_capacity > capacity_);
- size_t delta = new_capacity - capacity_;
- ASSERT(IsAligned(delta, OS::AllocateAlignment()));
- if (!heap()->isolate()->memory_allocator()->CommitBlock(
- high(), delta, executable())) {
- return false;
- }
- capacity_ = new_capacity;
- return true;
-}
-
-
-bool SemiSpace::ShrinkTo(int new_capacity) {
- ASSERT(new_capacity >= initial_capacity_);
- ASSERT(new_capacity < capacity_);
- size_t delta = capacity_ - new_capacity;
- ASSERT(IsAligned(delta, OS::AllocateAlignment()));
- if (!heap()->isolate()->memory_allocator()->UncommitBlock(
- high() - delta, delta)) {
- return false;
- }
- capacity_ = new_capacity;
- return true;
-}
-
-
-#ifdef DEBUG
-void SemiSpace::Print() { }
-
-
-void SemiSpace::Verify() { }
-#endif
-
-
-// -----------------------------------------------------------------------------
-// SemiSpaceIterator implementation.
-SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) {
- Initialize(space, space->bottom(), space->top(), NULL);
-}
-
-
-SemiSpaceIterator::SemiSpaceIterator(NewSpace* space,
- HeapObjectCallback size_func) {
- Initialize(space, space->bottom(), space->top(), size_func);
-}
-
-
-SemiSpaceIterator::SemiSpaceIterator(NewSpace* space, Address start) {
- Initialize(space, start, space->top(), NULL);
-}
-
-
-void SemiSpaceIterator::Initialize(NewSpace* space, Address start,
- Address end,
- HeapObjectCallback size_func) {
- ASSERT(space->ToSpaceContains(start));
- ASSERT(space->ToSpaceLow() <= end
- && end <= space->ToSpaceHigh());
- space_ = &space->to_space_;
- current_ = start;
- limit_ = end;
- size_func_ = size_func;
-}
-
-
-#ifdef DEBUG
-// heap_histograms is shared, always clear it before using it.
-static void ClearHistograms() {
- Isolate* isolate = Isolate::Current();
- // We reset the name each time, though it hasn't changed.
-#define DEF_TYPE_NAME(name) isolate->heap_histograms()[name].set_name(#name);
- INSTANCE_TYPE_LIST(DEF_TYPE_NAME)
-#undef DEF_TYPE_NAME
-
-#define CLEAR_HISTOGRAM(name) isolate->heap_histograms()[name].clear();
- INSTANCE_TYPE_LIST(CLEAR_HISTOGRAM)
-#undef CLEAR_HISTOGRAM
-
- isolate->js_spill_information()->Clear();
-}
-
-
-static void ClearCodeKindStatistics() {
- Isolate* isolate = Isolate::Current();
- for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
- isolate->code_kind_statistics()[i] = 0;
- }
-}
-
-
-static void ReportCodeKindStatistics() {
- Isolate* isolate = Isolate::Current();
- const char* table[Code::NUMBER_OF_KINDS] = { NULL };
-
-#define CASE(name) \
- case Code::name: table[Code::name] = #name; \
- break
-
- for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
- switch (static_cast<Code::Kind>(i)) {
- CASE(FUNCTION);
- CASE(OPTIMIZED_FUNCTION);
- CASE(STUB);
- CASE(BUILTIN);
- CASE(LOAD_IC);
- CASE(KEYED_LOAD_IC);
- CASE(KEYED_EXTERNAL_ARRAY_LOAD_IC);
- CASE(STORE_IC);
- CASE(KEYED_STORE_IC);
- CASE(KEYED_EXTERNAL_ARRAY_STORE_IC);
- CASE(CALL_IC);
- CASE(KEYED_CALL_IC);
- CASE(BINARY_OP_IC);
- CASE(TYPE_RECORDING_BINARY_OP_IC);
- CASE(COMPARE_IC);
- }
- }
-
-#undef CASE
-
- PrintF("\n Code kind histograms: \n");
- for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
- if (isolate->code_kind_statistics()[i] > 0) {
- PrintF(" %-20s: %10d bytes\n", table[i],
- isolate->code_kind_statistics()[i]);
- }
- }
- PrintF("\n");
-}
-
-
-static int CollectHistogramInfo(HeapObject* obj) {
- Isolate* isolate = Isolate::Current();
- InstanceType type = obj->map()->instance_type();
- ASSERT(0 <= type && type <= LAST_TYPE);
- ASSERT(isolate->heap_histograms()[type].name() != NULL);
- isolate->heap_histograms()[type].increment_number(1);
- isolate->heap_histograms()[type].increment_bytes(obj->Size());
-
- if (FLAG_collect_heap_spill_statistics && obj->IsJSObject()) {
- JSObject::cast(obj)->IncrementSpillStatistics(
- isolate->js_spill_information());
- }
-
- return obj->Size();
-}
-
-
-static void ReportHistogram(bool print_spill) {
- Isolate* isolate = Isolate::Current();
- PrintF("\n Object Histogram:\n");
- for (int i = 0; i <= LAST_TYPE; i++) {
- if (isolate->heap_histograms()[i].number() > 0) {
- PrintF(" %-34s%10d (%10d bytes)\n",
- isolate->heap_histograms()[i].name(),
- isolate->heap_histograms()[i].number(),
- isolate->heap_histograms()[i].bytes());
- }
- }
- PrintF("\n");
-
- // Summarize string types.
- int string_number = 0;
- int string_bytes = 0;
-#define INCREMENT(type, size, name, camel_name) \
- string_number += isolate->heap_histograms()[type].number(); \
- string_bytes += isolate->heap_histograms()[type].bytes();
- STRING_TYPE_LIST(INCREMENT)
-#undef INCREMENT
- if (string_number > 0) {
- PrintF(" %-34s%10d (%10d bytes)\n\n", "STRING_TYPE", string_number,
- string_bytes);
- }
-
- if (FLAG_collect_heap_spill_statistics && print_spill) {
- isolate->js_spill_information()->Print();
- }
-}
-#endif // DEBUG
-
-
-// Support for statistics gathering for --heap-stats and --log-gc.
-#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
-void NewSpace::ClearHistograms() {
- for (int i = 0; i <= LAST_TYPE; i++) {
- allocated_histogram_[i].clear();
- promoted_histogram_[i].clear();
- }
-}
-
-// Because the copying collector does not touch garbage objects, we iterate
-// the new space before a collection to get a histogram of allocated objects.
-// This only happens (1) when compiled with DEBUG and the --heap-stats flag is
-// set, or when compiled with ENABLE_LOGGING_AND_PROFILING and the --log-gc
-// flag is set.
-void NewSpace::CollectStatistics() {
- ClearHistograms();
- SemiSpaceIterator it(this);
- for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
- RecordAllocation(obj);
-}
-
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
-static void DoReportStatistics(Isolate* isolate,
- HistogramInfo* info, const char* description) {
- LOG(isolate, HeapSampleBeginEvent("NewSpace", description));
- // Lump all the string types together.
- int string_number = 0;
- int string_bytes = 0;
-#define INCREMENT(type, size, name, camel_name) \
- string_number += info[type].number(); \
- string_bytes += info[type].bytes();
- STRING_TYPE_LIST(INCREMENT)
-#undef INCREMENT
- if (string_number > 0) {
- LOG(isolate,
- HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes));
- }
-
- // Then do the other types.
- for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) {
- if (info[i].number() > 0) {
- LOG(isolate,
- HeapSampleItemEvent(info[i].name(), info[i].number(),
- info[i].bytes()));
- }
- }
- LOG(isolate, HeapSampleEndEvent("NewSpace", description));
-}
-#endif // ENABLE_LOGGING_AND_PROFILING
-
-
-void NewSpace::ReportStatistics() {
-#ifdef DEBUG
- if (FLAG_heap_stats) {
- float pct = static_cast<float>(Available()) / Capacity();
- PrintF(" capacity: %" V8_PTR_PREFIX "d"
- ", available: %" V8_PTR_PREFIX "d, %%%d\n",
- Capacity(), Available(), static_cast<int>(pct*100));
- PrintF("\n Object Histogram:\n");
- for (int i = 0; i <= LAST_TYPE; i++) {
- if (allocated_histogram_[i].number() > 0) {
- PrintF(" %-34s%10d (%10d bytes)\n",
- allocated_histogram_[i].name(),
- allocated_histogram_[i].number(),
- allocated_histogram_[i].bytes());
- }
- }
- PrintF("\n");
- }
-#endif // DEBUG
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
- if (FLAG_log_gc) {
- Isolate* isolate = ISOLATE;
- DoReportStatistics(isolate, allocated_histogram_, "allocated");
- DoReportStatistics(isolate, promoted_histogram_, "promoted");
- }
-#endif // ENABLE_LOGGING_AND_PROFILING
-}
-
-
-void NewSpace::RecordAllocation(HeapObject* obj) {
- InstanceType type = obj->map()->instance_type();
- ASSERT(0 <= type && type <= LAST_TYPE);
- allocated_histogram_[type].increment_number(1);
- allocated_histogram_[type].increment_bytes(obj->Size());
-}
-
-
-void NewSpace::RecordPromotion(HeapObject* obj) {
- InstanceType type = obj->map()->instance_type();
- ASSERT(0 <= type && type <= LAST_TYPE);
- promoted_histogram_[type].increment_number(1);
- promoted_histogram_[type].increment_bytes(obj->Size());
-}
-#endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
-
-
-// -----------------------------------------------------------------------------
-// Free lists for old object spaces implementation
-
-void FreeListNode::set_size(Heap* heap, int size_in_bytes) {
- ASSERT(size_in_bytes > 0);
- ASSERT(IsAligned(size_in_bytes, kPointerSize));
-
- // We write a map and possibly size information to the block. If the block
- // is big enough to be a ByteArray with at least one extra word (the next
- // pointer), we set its map to be the byte array map and its size to an
- // appropriate array length for the desired size from HeapObject::Size().
- // If the block is too small (eg, one or two words), to hold both a size
- // field and a next pointer, we give it a filler map that gives it the
- // correct size.
- if (size_in_bytes > ByteArray::kHeaderSize) {
- set_map(heap->raw_unchecked_byte_array_map());
- // Can't use ByteArray::cast because it fails during deserialization.
- ByteArray* this_as_byte_array = reinterpret_cast<ByteArray*>(this);
- this_as_byte_array->set_length(ByteArray::LengthFor(size_in_bytes));
- } else if (size_in_bytes == kPointerSize) {
- set_map(heap->raw_unchecked_one_pointer_filler_map());
- } else if (size_in_bytes == 2 * kPointerSize) {
- set_map(heap->raw_unchecked_two_pointer_filler_map());
- } else {
- UNREACHABLE();
- }
- // We would like to ASSERT(Size() == size_in_bytes) but this would fail during
- // deserialization because the byte array map is not done yet.
-}
-
-
-Address FreeListNode::next(Heap* heap) {
- ASSERT(IsFreeListNode(this));
- if (map() == heap->raw_unchecked_byte_array_map()) {
- ASSERT(Size() >= kNextOffset + kPointerSize);
- return Memory::Address_at(address() + kNextOffset);
- } else {
- return Memory::Address_at(address() + kPointerSize);
- }
-}
-
-
-void FreeListNode::set_next(Heap* heap, Address next) {
- ASSERT(IsFreeListNode(this));
- if (map() == heap->raw_unchecked_byte_array_map()) {
- ASSERT(Size() >= kNextOffset + kPointerSize);
- Memory::Address_at(address() + kNextOffset) = next;
- } else {
- Memory::Address_at(address() + kPointerSize) = next;
- }
-}
-
-
-OldSpaceFreeList::OldSpaceFreeList(Heap* heap, AllocationSpace owner)
- : heap_(heap),
- owner_(owner) {
- Reset();
-}
-
-
-void OldSpaceFreeList::Reset() {
- available_ = 0;
- for (int i = 0; i < kFreeListsLength; i++) {
- free_[i].head_node_ = NULL;
- }
- needs_rebuild_ = false;
- finger_ = kHead;
- free_[kHead].next_size_ = kEnd;
-}
-
-
-void OldSpaceFreeList::RebuildSizeList() {
- ASSERT(needs_rebuild_);
- int cur = kHead;
- for (int i = cur + 1; i < kFreeListsLength; i++) {
- if (free_[i].head_node_ != NULL) {
- free_[cur].next_size_ = i;
- cur = i;
- }
- }
- free_[cur].next_size_ = kEnd;
- needs_rebuild_ = false;
-}
-
-
-int OldSpaceFreeList::Free(Address start, int size_in_bytes) {
-#ifdef DEBUG
- Isolate::Current()->memory_allocator()->ZapBlock(start, size_in_bytes);
-#endif
- FreeListNode* node = FreeListNode::FromAddress(start);
- node->set_size(heap_, size_in_bytes);
-
- // We don't use the freelists in compacting mode. This makes it more like a
- // GC that only has mark-sweep-compact and doesn't have a mark-sweep
- // collector.
- if (FLAG_always_compact) {
- return size_in_bytes;
- }
-
- // Early return to drop too-small blocks on the floor (one or two word
- // blocks cannot hold a map pointer, a size field, and a pointer to the
- // next block in the free list).
- if (size_in_bytes < kMinBlockSize) {
- return size_in_bytes;
- }
-
- // Insert other blocks at the head of an exact free list.
- int index = size_in_bytes >> kPointerSizeLog2;
- node->set_next(heap_, free_[index].head_node_);
- free_[index].head_node_ = node->address();
- available_ += size_in_bytes;
- needs_rebuild_ = true;
- return 0;
-}
-
-
-MaybeObject* OldSpaceFreeList::Allocate(int size_in_bytes, int* wasted_bytes) {
- ASSERT(0 < size_in_bytes);
- ASSERT(size_in_bytes <= kMaxBlockSize);
- ASSERT(IsAligned(size_in_bytes, kPointerSize));
-
- if (needs_rebuild_) RebuildSizeList();
- int index = size_in_bytes >> kPointerSizeLog2;
- // Check for a perfect fit.
- if (free_[index].head_node_ != NULL) {
- FreeListNode* node = FreeListNode::FromAddress(free_[index].head_node_);
- // If this was the last block of its size, remove the size.
- if ((free_[index].head_node_ = node->next(heap_)) == NULL)
- RemoveSize(index);
- available_ -= size_in_bytes;
- *wasted_bytes = 0;
- ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep.
- return node;
- }
- // Search the size list for the best fit.
- int prev = finger_ < index ? finger_ : kHead;
- int cur = FindSize(index, &prev);
- ASSERT(index < cur);
- if (cur == kEnd) {
- // No large enough size in list.
- *wasted_bytes = 0;
- return Failure::RetryAfterGC(owner_);
- }
- ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep.
- int rem = cur - index;
- int rem_bytes = rem << kPointerSizeLog2;
- FreeListNode* cur_node = FreeListNode::FromAddress(free_[cur].head_node_);
- ASSERT(cur_node->Size() == (cur << kPointerSizeLog2));
- FreeListNode* rem_node = FreeListNode::FromAddress(free_[cur].head_node_ +
- size_in_bytes);
- // Distinguish the cases prev < rem < cur and rem <= prev < cur
- // to avoid many redundant tests and calls to Insert/RemoveSize.
- if (prev < rem) {
- // Simple case: insert rem between prev and cur.
- finger_ = prev;
- free_[prev].next_size_ = rem;
- // If this was the last block of size cur, remove the size.
- if ((free_[cur].head_node_ = cur_node->next(heap_)) == NULL) {
- free_[rem].next_size_ = free_[cur].next_size_;
- } else {
- free_[rem].next_size_ = cur;
- }
- // Add the remainder block.
- rem_node->set_size(heap_, rem_bytes);
- rem_node->set_next(heap_, free_[rem].head_node_);
- free_[rem].head_node_ = rem_node->address();
- } else {
- // If this was the last block of size cur, remove the size.
- if ((free_[cur].head_node_ = cur_node->next(heap_)) == NULL) {
- finger_ = prev;
- free_[prev].next_size_ = free_[cur].next_size_;
- }
- if (rem_bytes < kMinBlockSize) {
- // Too-small remainder is wasted.
- rem_node->set_size(heap_, rem_bytes);
- available_ -= size_in_bytes + rem_bytes;
- *wasted_bytes = rem_bytes;
- return cur_node;
- }
- // Add the remainder block and, if needed, insert its size.
- rem_node->set_size(heap_, rem_bytes);
- rem_node->set_next(heap_, free_[rem].head_node_);
- free_[rem].head_node_ = rem_node->address();
- if (rem_node->next(heap_) == NULL) InsertSize(rem);
- }
- available_ -= size_in_bytes;
- *wasted_bytes = 0;
- return cur_node;
-}
-
-
-void OldSpaceFreeList::MarkNodes() {
- for (int i = 0; i < kFreeListsLength; i++) {
- Address cur_addr = free_[i].head_node_;
- while (cur_addr != NULL) {
- FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr);
- cur_addr = cur_node->next(heap_);
- cur_node->SetMark();
- }
- }
-}
-
-
-#ifdef DEBUG
-bool OldSpaceFreeList::Contains(FreeListNode* node) {
- for (int i = 0; i < kFreeListsLength; i++) {
- Address cur_addr = free_[i].head_node_;
- while (cur_addr != NULL) {
- FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr);
- if (cur_node == node) return true;
- cur_addr = cur_node->next(heap_);
- }
- }
- return false;
-}
-#endif
-
-
-FixedSizeFreeList::FixedSizeFreeList(Heap* heap,
- AllocationSpace owner,
- int object_size)
- : heap_(heap), owner_(owner), object_size_(object_size) {
- Reset();
-}
-
-
-void FixedSizeFreeList::Reset() {
- available_ = 0;
- head_ = tail_ = NULL;
-}
-
-
-void FixedSizeFreeList::Free(Address start) {
-#ifdef DEBUG
- Isolate::Current()->memory_allocator()->ZapBlock(start, object_size_);
-#endif
- // We only use the freelists with mark-sweep.
- ASSERT(!HEAP->mark_compact_collector()->IsCompacting());
- FreeListNode* node = FreeListNode::FromAddress(start);
- node->set_size(heap_, object_size_);
- node->set_next(heap_, NULL);
- if (head_ == NULL) {
- tail_ = head_ = node->address();
- } else {
- FreeListNode::FromAddress(tail_)->set_next(heap_, node->address());
- tail_ = node->address();
- }
- available_ += object_size_;
-}
-
-
-MaybeObject* FixedSizeFreeList::Allocate() {
- if (head_ == NULL) {
- return Failure::RetryAfterGC(owner_);
- }
-
- ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep.
- FreeListNode* node = FreeListNode::FromAddress(head_);
- head_ = node->next(heap_);
- available_ -= object_size_;
- return node;
-}
-
-
-void FixedSizeFreeList::MarkNodes() {
- Address cur_addr = head_;
- while (cur_addr != NULL && cur_addr != tail_) {
- FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr);
- cur_addr = cur_node->next(heap_);
- cur_node->SetMark();
- }
-}
-
-
-// -----------------------------------------------------------------------------
-// OldSpace implementation
-
-void OldSpace::PrepareForMarkCompact(bool will_compact) {
- // Call prepare of the super class.
- PagedSpace::PrepareForMarkCompact(will_compact);
-
- if (will_compact) {
- // Reset relocation info. During a compacting collection, everything in
- // the space is considered 'available' and we will rediscover live data
- // and waste during the collection.
- MCResetRelocationInfo();
- ASSERT(Available() == Capacity());
- } else {
- // During a non-compacting collection, everything below the linear
- // allocation pointer is considered allocated (everything above is
- // available) and we will rediscover available and wasted bytes during
- // the collection.
- accounting_stats_.AllocateBytes(free_list_.available());
- accounting_stats_.FillWastedBytes(Waste());
- }
-
- // Clear the free list before a full GC---it will be rebuilt afterward.
- free_list_.Reset();
-}
-
-
-void OldSpace::MCCommitRelocationInfo() {
- // Update fast allocation info.
- allocation_info_.top = mc_forwarding_info_.top;
- allocation_info_.limit = mc_forwarding_info_.limit;
- ASSERT(allocation_info_.VerifyPagedAllocation());
-
- // The space is compacted and we haven't yet built free lists or
- // wasted any space.
- ASSERT(Waste() == 0);
- ASSERT(AvailableFree() == 0);
-
- // Build the free list for the space.
- int computed_size = 0;
- PageIterator it(this, PageIterator::PAGES_USED_BY_MC);
- while (it.has_next()) {
- Page* p = it.next();
- // Space below the relocation pointer is allocated.
- computed_size +=
- static_cast<int>(p->AllocationWatermark() - p->ObjectAreaStart());
- if (it.has_next()) {
- // Free the space at the top of the page.
- int extra_size =
- static_cast<int>(p->ObjectAreaEnd() - p->AllocationWatermark());
- if (extra_size > 0) {
- int wasted_bytes = free_list_.Free(p->AllocationWatermark(),
- extra_size);
- // The bytes we have just "freed" to add to the free list were
- // already accounted as available.
- accounting_stats_.WasteBytes(wasted_bytes);
- }
- }
- }
-
- // Make sure the computed size - based on the used portion of the pages in
- // use - matches the size obtained while computing forwarding addresses.
- ASSERT(computed_size == Size());
-}
-
-
-bool NewSpace::ReserveSpace(int bytes) {
- // We can't reliably unpack a partial snapshot that needs more new space
- // space than the minimum NewSpace size.
- ASSERT(bytes <= InitialCapacity());
- Address limit = allocation_info_.limit;
- Address top = allocation_info_.top;
- return limit - top >= bytes;
-}
-
-
-void PagedSpace::FreePages(Page* prev, Page* last) {
- if (last == AllocationTopPage()) {
- // Pages are already at the end of used pages.
- return;
- }
-
- Page* first = NULL;
-
- // Remove pages from the list.
- if (prev == NULL) {
- first = first_page_;
- first_page_ = last->next_page();
- } else {
- first = prev->next_page();
- heap()->isolate()->memory_allocator()->SetNextPage(
- prev, last->next_page());
- }
-
- // Attach it after the last page.
- heap()->isolate()->memory_allocator()->SetNextPage(last_page_, first);
- last_page_ = last;
- heap()->isolate()->memory_allocator()->SetNextPage(last, NULL);
-
- // Clean them up.
- do {
- first->InvalidateWatermark(true);
- first->SetAllocationWatermark(first->ObjectAreaStart());
- first->SetCachedAllocationWatermark(first->ObjectAreaStart());
- first->SetRegionMarks(Page::kAllRegionsCleanMarks);
- first = first->next_page();
- } while (first != NULL);
-
- // Order of pages in this space might no longer be consistent with
- // order of pages in chunks.
- page_list_is_chunk_ordered_ = false;
-}
-
-
-void PagedSpace::RelinkPageListInChunkOrder(bool deallocate_blocks) {
- const bool add_to_freelist = true;
-
- // Mark used and unused pages to properly fill unused pages
- // after reordering.
- PageIterator all_pages_iterator(this, PageIterator::ALL_PAGES);
- Page* last_in_use = AllocationTopPage();
- bool in_use = true;
-
- while (all_pages_iterator.has_next()) {
- Page* p = all_pages_iterator.next();
- p->SetWasInUseBeforeMC(in_use);
- if (p == last_in_use) {
- // We passed a page containing allocation top. All consequent
- // pages are not used.
- in_use = false;
- }
- }
-
- if (page_list_is_chunk_ordered_) return;
-
- Page* new_last_in_use = Page::FromAddress(NULL);
- heap()->isolate()->memory_allocator()->RelinkPageListInChunkOrder(
- this, &first_page_, &last_page_, &new_last_in_use);
- ASSERT(new_last_in_use->is_valid());
-
- if (new_last_in_use != last_in_use) {
- // Current allocation top points to a page which is now in the middle
- // of page list. We should move allocation top forward to the new last
- // used page so various object iterators will continue to work properly.
- int size_in_bytes = static_cast<int>(PageAllocationLimit(last_in_use) -
- last_in_use->AllocationTop());
-
- last_in_use->SetAllocationWatermark(last_in_use->AllocationTop());
- if (size_in_bytes > 0) {
- Address start = last_in_use->AllocationTop();
- if (deallocate_blocks) {
- accounting_stats_.AllocateBytes(size_in_bytes);
- DeallocateBlock(start, size_in_bytes, add_to_freelist);
- } else {
- heap()->CreateFillerObjectAt(start, size_in_bytes);
- }
- }
-
- // New last in use page was in the middle of the list before
- // sorting so it full.
- SetTop(new_last_in_use->AllocationTop());
-
- ASSERT(AllocationTopPage() == new_last_in_use);
- ASSERT(AllocationTopPage()->WasInUseBeforeMC());
- }
-
- PageIterator pages_in_use_iterator(this, PageIterator::PAGES_IN_USE);
- while (pages_in_use_iterator.has_next()) {
- Page* p = pages_in_use_iterator.next();
- if (!p->WasInUseBeforeMC()) {
- // Empty page is in the middle of a sequence of used pages.
- // Allocate it as a whole and deallocate immediately.
- int size_in_bytes = static_cast<int>(PageAllocationLimit(p) -
- p->ObjectAreaStart());
-
- p->SetAllocationWatermark(p->ObjectAreaStart());
- Address start = p->ObjectAreaStart();
- if (deallocate_blocks) {
- accounting_stats_.AllocateBytes(size_in_bytes);
- DeallocateBlock(start, size_in_bytes, add_to_freelist);
- } else {
- heap()->CreateFillerObjectAt(start, size_in_bytes);
- }
- }
- }
-
- page_list_is_chunk_ordered_ = true;
-}
-
-
-void PagedSpace::PrepareForMarkCompact(bool will_compact) {
- if (will_compact) {
- RelinkPageListInChunkOrder(false);
- }
-}
-
-
-bool PagedSpace::ReserveSpace(int bytes) {
- Address limit = allocation_info_.limit;
- Address top = allocation_info_.top;
- if (limit - top >= bytes) return true;
-
- // There wasn't enough space in the current page. Lets put the rest
- // of the page on the free list and start a fresh page.
- PutRestOfCurrentPageOnFreeList(TopPageOf(allocation_info_));
-
- Page* reserved_page = TopPageOf(allocation_info_);
- int bytes_left_to_reserve = bytes;
- while (bytes_left_to_reserve > 0) {
- if (!reserved_page->next_page()->is_valid()) {
- if (heap()->OldGenerationAllocationLimitReached()) return false;
- Expand(reserved_page);
- }
- bytes_left_to_reserve -= Page::kPageSize;
- reserved_page = reserved_page->next_page();
- if (!reserved_page->is_valid()) return false;
- }
- ASSERT(TopPageOf(allocation_info_)->next_page()->is_valid());
- TopPageOf(allocation_info_)->next_page()->InvalidateWatermark(true);
- SetAllocationInfo(&allocation_info_,
- TopPageOf(allocation_info_)->next_page());
- return true;
-}
-
-
-// You have to call this last, since the implementation from PagedSpace
-// doesn't know that memory was 'promised' to large object space.
-bool LargeObjectSpace::ReserveSpace(int bytes) {
- return heap()->OldGenerationSpaceAvailable() >= bytes;
-}
-
-
-// Slow case for normal allocation. Try in order: (1) allocate in the next
-// page in the space, (2) allocate off the space's free list, (3) expand the
-// space, (4) fail.
-HeapObject* OldSpace::SlowAllocateRaw(int size_in_bytes) {
- // Linear allocation in this space has failed. If there is another page
- // in the space, move to that page and allocate there. This allocation
- // should succeed (size_in_bytes should not be greater than a page's
- // object area size).
- Page* current_page = TopPageOf(allocation_info_);
- if (current_page->next_page()->is_valid()) {
- return AllocateInNextPage(current_page, size_in_bytes);
- }
-
- // There is no next page in this space. Try free list allocation unless that
- // is currently forbidden.
- if (!heap()->linear_allocation()) {
- int wasted_bytes;
- Object* result;
- MaybeObject* maybe = free_list_.Allocate(size_in_bytes, &wasted_bytes);
- accounting_stats_.WasteBytes(wasted_bytes);
- if (maybe->ToObject(&result)) {
- accounting_stats_.AllocateBytes(size_in_bytes);
-
- HeapObject* obj = HeapObject::cast(result);
- Page* p = Page::FromAddress(obj->address());
-
- if (obj->address() >= p->AllocationWatermark()) {
- // There should be no hole between the allocation watermark
- // and allocated object address.
- // Memory above the allocation watermark was not swept and
- // might contain garbage pointers to new space.
- ASSERT(obj->address() == p->AllocationWatermark());
- p->SetAllocationWatermark(obj->address() + size_in_bytes);
- }
-
- return obj;
- }
- }
-
- // Free list allocation failed and there is no next page. Fail if we have
- // hit the old generation size limit that should cause a garbage
- // collection.
- if (!heap()->always_allocate() &&
- heap()->OldGenerationAllocationLimitReached()) {
- return NULL;
- }
-
- // Try to expand the space and allocate in the new next page.
- ASSERT(!current_page->next_page()->is_valid());
- if (Expand(current_page)) {
- return AllocateInNextPage(current_page, size_in_bytes);
- }
-
- // Finally, fail.
- return NULL;
-}
-
-
-void OldSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) {
- current_page->SetAllocationWatermark(allocation_info_.top);
- int free_size =
- static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top);
- if (free_size > 0) {
- int wasted_bytes = free_list_.Free(allocation_info_.top, free_size);
- accounting_stats_.WasteBytes(wasted_bytes);
- }
-}
-
-
-void FixedSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) {
- current_page->SetAllocationWatermark(allocation_info_.top);
- int free_size =
- static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top);
- // In the fixed space free list all the free list items have the right size.
- // We use up the rest of the page while preserving this invariant.
- while (free_size >= object_size_in_bytes_) {
- free_list_.Free(allocation_info_.top);
- allocation_info_.top += object_size_in_bytes_;
- free_size -= object_size_in_bytes_;
- accounting_stats_.WasteBytes(object_size_in_bytes_);
- }
-}
-
-
-// Add the block at the top of the page to the space's free list, set the
-// allocation info to the next page (assumed to be one), and allocate
-// linearly there.
-HeapObject* OldSpace::AllocateInNextPage(Page* current_page,
- int size_in_bytes) {
- ASSERT(current_page->next_page()->is_valid());
- Page* next_page = current_page->next_page();
- next_page->ClearGCFields();
- PutRestOfCurrentPageOnFreeList(current_page);
- SetAllocationInfo(&allocation_info_, next_page);
- return AllocateLinearly(&allocation_info_, size_in_bytes);
-}
-
-
-void OldSpace::DeallocateBlock(Address start,
- int size_in_bytes,
- bool add_to_freelist) {
- Free(start, size_in_bytes, add_to_freelist);
-}
-
-
-#ifdef DEBUG
-void PagedSpace::ReportCodeStatistics() {
- Isolate* isolate = Isolate::Current();
- CommentStatistic* comments_statistics =
- isolate->paged_space_comments_statistics();
- ReportCodeKindStatistics();
- PrintF("Code comment statistics (\" [ comment-txt : size/ "
- "count (average)\"):\n");
- for (int i = 0; i <= CommentStatistic::kMaxComments; i++) {
- const CommentStatistic& cs = comments_statistics[i];
- if (cs.size > 0) {
- PrintF(" %-30s: %10d/%6d (%d)\n", cs.comment, cs.size, cs.count,
- cs.size/cs.count);
- }
- }
- PrintF("\n");
-}
-
-
-void PagedSpace::ResetCodeStatistics() {
- Isolate* isolate = Isolate::Current();
- CommentStatistic* comments_statistics =
- isolate->paged_space_comments_statistics();
- ClearCodeKindStatistics();
- for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
- comments_statistics[i].Clear();
- }
- comments_statistics[CommentStatistic::kMaxComments].comment = "Unknown";
- comments_statistics[CommentStatistic::kMaxComments].size = 0;
- comments_statistics[CommentStatistic::kMaxComments].count = 0;
-}
-
-
-// Adds comment to 'comment_statistics' table. Performance OK as long as
-// 'kMaxComments' is small
-static void EnterComment(Isolate* isolate, const char* comment, int delta) {
- CommentStatistic* comments_statistics =
- isolate->paged_space_comments_statistics();
- // Do not count empty comments
- if (delta <= 0) return;
- CommentStatistic* cs = &comments_statistics[CommentStatistic::kMaxComments];
- // Search for a free or matching entry in 'comments_statistics': 'cs'
- // points to result.
- for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
- if (comments_statistics[i].comment == NULL) {
- cs = &comments_statistics[i];
- cs->comment = comment;
- break;
- } else if (strcmp(comments_statistics[i].comment, comment) == 0) {
- cs = &comments_statistics[i];
- break;
- }
- }
- // Update entry for 'comment'
- cs->size += delta;
- cs->count += 1;
-}
-
-
-// Call for each nested comment start (start marked with '[ xxx', end marked
-// with ']'. RelocIterator 'it' must point to a comment reloc info.
-static void CollectCommentStatistics(Isolate* isolate, RelocIterator* it) {
- ASSERT(!it->done());
- ASSERT(it->rinfo()->rmode() == RelocInfo::COMMENT);
- const char* tmp = reinterpret_cast<const char*>(it->rinfo()->data());
- if (tmp[0] != '[') {
- // Not a nested comment; skip
- return;
- }
-
- // Search for end of nested comment or a new nested comment
- const char* const comment_txt =
- reinterpret_cast<const char*>(it->rinfo()->data());
- const byte* prev_pc = it->rinfo()->pc();
- int flat_delta = 0;
- it->next();
- while (true) {
- // All nested comments must be terminated properly, and therefore exit
- // from loop.
- ASSERT(!it->done());
- if (it->rinfo()->rmode() == RelocInfo::COMMENT) {
- const char* const txt =
- reinterpret_cast<const char*>(it->rinfo()->data());
- flat_delta += static_cast<int>(it->rinfo()->pc() - prev_pc);
- if (txt[0] == ']') break; // End of nested comment
- // A new comment
- CollectCommentStatistics(isolate, it);
- // Skip code that was covered with previous comment
- prev_pc = it->rinfo()->pc();
- }
- it->next();
- }
- EnterComment(isolate, comment_txt, flat_delta);
-}
-
-
-// Collects code size statistics:
-// - by code kind
-// - by code comment
-void PagedSpace::CollectCodeStatistics() {
- Isolate* isolate = heap()->isolate();
- HeapObjectIterator obj_it(this);
- for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) {
- if (obj->IsCode()) {
- Code* code = Code::cast(obj);
- isolate->code_kind_statistics()[code->kind()] += code->Size();
- RelocIterator it(code);
- int delta = 0;
- const byte* prev_pc = code->instruction_start();
- while (!it.done()) {
- if (it.rinfo()->rmode() == RelocInfo::COMMENT) {
- delta += static_cast<int>(it.rinfo()->pc() - prev_pc);
- CollectCommentStatistics(isolate, &it);
- prev_pc = it.rinfo()->pc();
- }
- it.next();
- }
-
- ASSERT(code->instruction_start() <= prev_pc &&
- prev_pc <= code->instruction_end());
- delta += static_cast<int>(code->instruction_end() - prev_pc);
- EnterComment(isolate, "NoComment", delta);
- }
- }
-}
-
-
-void OldSpace::ReportStatistics() {
- int pct = static_cast<int>(Available() * 100 / Capacity());
- PrintF(" capacity: %" V8_PTR_PREFIX "d"
- ", waste: %" V8_PTR_PREFIX "d"
- ", available: %" V8_PTR_PREFIX "d, %%%d\n",
- Capacity(), Waste(), Available(), pct);
-
- ClearHistograms();
- HeapObjectIterator obj_it(this);
- for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next())
- CollectHistogramInfo(obj);
- ReportHistogram(true);
-}
-#endif
-
-// -----------------------------------------------------------------------------
-// FixedSpace implementation
-
-void FixedSpace::PrepareForMarkCompact(bool will_compact) {
- // Call prepare of the super class.
- PagedSpace::PrepareForMarkCompact(will_compact);
-
- if (will_compact) {
- // Reset relocation info.
- MCResetRelocationInfo();
-
- // During a compacting collection, everything in the space is considered
- // 'available' (set by the call to MCResetRelocationInfo) and we will
- // rediscover live and wasted bytes during the collection.
- ASSERT(Available() == Capacity());
- } else {
- // During a non-compacting collection, everything below the linear
- // allocation pointer except wasted top-of-page blocks is considered
- // allocated and we will rediscover available bytes during the
- // collection.
- accounting_stats_.AllocateBytes(free_list_.available());
- }
-
- // Clear the free list before a full GC---it will be rebuilt afterward.
- free_list_.Reset();
-}
-
-
-void FixedSpace::MCCommitRelocationInfo() {
- // Update fast allocation info.
- allocation_info_.top = mc_forwarding_info_.top;
- allocation_info_.limit = mc_forwarding_info_.limit;
- ASSERT(allocation_info_.VerifyPagedAllocation());
-
- // The space is compacted and we haven't yet wasted any space.
- ASSERT(Waste() == 0);
-
- // Update allocation_top of each page in use and compute waste.
- int computed_size = 0;
- PageIterator it(this, PageIterator::PAGES_USED_BY_MC);
- while (it.has_next()) {
- Page* page = it.next();
- Address page_top = page->AllocationTop();
- computed_size += static_cast<int>(page_top - page->ObjectAreaStart());
- if (it.has_next()) {
- accounting_stats_.WasteBytes(
- static_cast<int>(page->ObjectAreaEnd() - page_top));
- page->SetAllocationWatermark(page_top);
- }
- }
-
- // Make sure the computed size - based on the used portion of the
- // pages in use - matches the size we adjust during allocation.
- ASSERT(computed_size == Size());
-}
-
-
-// Slow case for normal allocation. Try in order: (1) allocate in the next
-// page in the space, (2) allocate off the space's free list, (3) expand the
-// space, (4) fail.
-HeapObject* FixedSpace::SlowAllocateRaw(int size_in_bytes) {
- ASSERT_EQ(object_size_in_bytes_, size_in_bytes);
- // Linear allocation in this space has failed. If there is another page
- // in the space, move to that page and allocate there. This allocation
- // should succeed.
- Page* current_page = TopPageOf(allocation_info_);
- if (current_page->next_page()->is_valid()) {
- return AllocateInNextPage(current_page, size_in_bytes);
- }
-
- // There is no next page in this space. Try free list allocation unless
- // that is currently forbidden. The fixed space free list implicitly assumes
- // that all free blocks are of the fixed size.
- if (!heap()->linear_allocation()) {
- Object* result;
- MaybeObject* maybe = free_list_.Allocate();
- if (maybe->ToObject(&result)) {
- accounting_stats_.AllocateBytes(size_in_bytes);
- HeapObject* obj = HeapObject::cast(result);
- Page* p = Page::FromAddress(obj->address());
-
- if (obj->address() >= p->AllocationWatermark()) {
- // There should be no hole between the allocation watermark
- // and allocated object address.
- // Memory above the allocation watermark was not swept and
- // might contain garbage pointers to new space.
- ASSERT(obj->address() == p->AllocationWatermark());
- p->SetAllocationWatermark(obj->address() + size_in_bytes);
- }
-
- return obj;
- }
- }
-
- // Free list allocation failed and there is no next page. Fail if we have
- // hit the old generation size limit that should cause a garbage
- // collection.
- if (!heap()->always_allocate() &&
- heap()->OldGenerationAllocationLimitReached()) {
- return NULL;
- }
-
- // Try to expand the space and allocate in the new next page.
- ASSERT(!current_page->next_page()->is_valid());
- if (Expand(current_page)) {
- return AllocateInNextPage(current_page, size_in_bytes);
- }
-
- // Finally, fail.
- return NULL;
-}
-
-
-// Move to the next page (there is assumed to be one) and allocate there.
-// The top of page block is always wasted, because it is too small to hold a
-// map.
-HeapObject* FixedSpace::AllocateInNextPage(Page* current_page,
- int size_in_bytes) {
- ASSERT(current_page->next_page()->is_valid());
- ASSERT(allocation_info_.top == PageAllocationLimit(current_page));
- ASSERT_EQ(object_size_in_bytes_, size_in_bytes);
- Page* next_page = current_page->next_page();
- next_page->ClearGCFields();
- current_page->SetAllocationWatermark(allocation_info_.top);
- accounting_stats_.WasteBytes(page_extra_);
- SetAllocationInfo(&allocation_info_, next_page);
- return AllocateLinearly(&allocation_info_, size_in_bytes);
-}
-
-
-void FixedSpace::DeallocateBlock(Address start,
- int size_in_bytes,
- bool add_to_freelist) {
- // Free-list elements in fixed space are assumed to have a fixed size.
- // We break the free block into chunks and add them to the free list
- // individually.
- int size = object_size_in_bytes();
- ASSERT(size_in_bytes % size == 0);
- Address end = start + size_in_bytes;
- for (Address a = start; a < end; a += size) {
- Free(a, add_to_freelist);
- }
-}
-
-
-#ifdef DEBUG
-void FixedSpace::ReportStatistics() {
- int pct = static_cast<int>(Available() * 100 / Capacity());
- PrintF(" capacity: %" V8_PTR_PREFIX "d"
- ", waste: %" V8_PTR_PREFIX "d"
- ", available: %" V8_PTR_PREFIX "d, %%%d\n",
- Capacity(), Waste(), Available(), pct);
-
- ClearHistograms();
- HeapObjectIterator obj_it(this);
- for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next())
- CollectHistogramInfo(obj);
- ReportHistogram(false);
-}
-#endif
-
-
-// -----------------------------------------------------------------------------
-// MapSpace implementation
-
-void MapSpace::PrepareForMarkCompact(bool will_compact) {
- // Call prepare of the super class.
- FixedSpace::PrepareForMarkCompact(will_compact);
-
- if (will_compact) {
- // Initialize map index entry.
- int page_count = 0;
- PageIterator it(this, PageIterator::ALL_PAGES);
- while (it.has_next()) {
- ASSERT_MAP_PAGE_INDEX(page_count);
-
- Page* p = it.next();
- ASSERT(p->mc_page_index == page_count);
-
- page_addresses_[page_count++] = p->address();
- }
- }
-}
-
-
-#ifdef DEBUG
-void MapSpace::VerifyObject(HeapObject* object) {
- // The object should be a map or a free-list node.
- ASSERT(object->IsMap() || object->IsByteArray());
-}
-#endif
-
-
-// -----------------------------------------------------------------------------
-// GlobalPropertyCellSpace implementation
-
-#ifdef DEBUG
-void CellSpace::VerifyObject(HeapObject* object) {
- // The object should be a global object property cell or a free-list node.
- ASSERT(object->IsJSGlobalPropertyCell() ||
- object->map() == heap()->two_pointer_filler_map());
-}
-#endif
-
-
-// -----------------------------------------------------------------------------
-// LargeObjectIterator
-
-LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) {
- current_ = space->first_chunk_;
- size_func_ = NULL;
-}
-
-
-LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space,
- HeapObjectCallback size_func) {
- current_ = space->first_chunk_;
- size_func_ = size_func;
-}
-
-
-HeapObject* LargeObjectIterator::next() {
- if (current_ == NULL) return NULL;
-
- HeapObject* object = current_->GetObject();
- current_ = current_->next();
- return object;
-}
-
-
-// -----------------------------------------------------------------------------
-// LargeObjectChunk
-
-LargeObjectChunk* LargeObjectChunk::New(int size_in_bytes,
- Executability executable) {
- size_t requested = ChunkSizeFor(size_in_bytes);
- size_t size;
- Isolate* isolate = Isolate::Current();
- void* mem = isolate->memory_allocator()->AllocateRawMemory(
- requested, &size, executable);
- if (mem == NULL) return NULL;
-
- // The start of the chunk may be overlayed with a page so we have to
- // make sure that the page flags fit in the size field.
- ASSERT((size & Page::kPageFlagMask) == 0);
-
- LOG(isolate, NewEvent("LargeObjectChunk", mem, size));
- if (size < requested) {
- isolate->memory_allocator()->FreeRawMemory(
- mem, size, executable);
- LOG(isolate, DeleteEvent("LargeObjectChunk", mem));
- return NULL;
- }
-
- ObjectSpace space = (executable == EXECUTABLE)
- ? kObjectSpaceCodeSpace
- : kObjectSpaceLoSpace;
- isolate->memory_allocator()->PerformAllocationCallback(
- space, kAllocationActionAllocate, size);
-
- LargeObjectChunk* chunk = reinterpret_cast<LargeObjectChunk*>(mem);
- chunk->size_ = size;
- Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize));
- page->heap_ = isolate->heap();
- return chunk;
-}
-
-
-int LargeObjectChunk::ChunkSizeFor(int size_in_bytes) {
- int os_alignment = static_cast<int>(OS::AllocateAlignment());
- if (os_alignment < Page::kPageSize) {
- size_in_bytes += (Page::kPageSize - os_alignment);
- }
- return size_in_bytes + Page::kObjectStartOffset;
-}
-
-// -----------------------------------------------------------------------------
-// LargeObjectSpace
-
-LargeObjectSpace::LargeObjectSpace(Heap* heap, AllocationSpace id)
- : Space(heap, id, NOT_EXECUTABLE), // Managed on a per-allocation basis
- first_chunk_(NULL),
- size_(0),
- page_count_(0),
- objects_size_(0) {}
-
-
-bool LargeObjectSpace::Setup() {
- first_chunk_ = NULL;
- size_ = 0;
- page_count_ = 0;
- objects_size_ = 0;
- return true;
-}
-
-
-void LargeObjectSpace::TearDown() {
- while (first_chunk_ != NULL) {
- LargeObjectChunk* chunk = first_chunk_;
- first_chunk_ = first_chunk_->next();
- LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", chunk->address()));
- Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize));
- Executability executable =
- page->IsPageExecutable() ? EXECUTABLE : NOT_EXECUTABLE;
- ObjectSpace space = kObjectSpaceLoSpace;
- if (executable == EXECUTABLE) space = kObjectSpaceCodeSpace;
- size_t size = chunk->size();
- heap()->isolate()->memory_allocator()->FreeRawMemory(chunk->address(),
- size,
- executable);
- heap()->isolate()->memory_allocator()->PerformAllocationCallback(
- space, kAllocationActionFree, size);
- }
-
- size_ = 0;
- page_count_ = 0;
- objects_size_ = 0;
-}
-
-
-#ifdef ENABLE_HEAP_PROTECTION
-
-void LargeObjectSpace::Protect() {
- LargeObjectChunk* chunk = first_chunk_;
- while (chunk != NULL) {
- heap()->isolate()->memory_allocator()->Protect(chunk->address(),
- chunk->size());
- chunk = chunk->next();
- }
-}
-
-
-void LargeObjectSpace::Unprotect() {
- LargeObjectChunk* chunk = first_chunk_;
- while (chunk != NULL) {
- bool is_code = chunk->GetObject()->IsCode();
- heap()->isolate()->memory_allocator()->Unprotect(chunk->address(),
- chunk->size(), is_code ? EXECUTABLE : NOT_EXECUTABLE);
- chunk = chunk->next();
- }
-}
-
-#endif
-
-
-MaybeObject* LargeObjectSpace::AllocateRawInternal(int requested_size,
- int object_size,
- Executability executable) {
- ASSERT(0 < object_size && object_size <= requested_size);
-
- // Check if we want to force a GC before growing the old space further.
- // If so, fail the allocation.
- if (!heap()->always_allocate() &&
- heap()->OldGenerationAllocationLimitReached()) {
- return Failure::RetryAfterGC(identity());
- }
-
- LargeObjectChunk* chunk = LargeObjectChunk::New(requested_size, executable);
- if (chunk == NULL) {
- return Failure::RetryAfterGC(identity());
- }
-
- size_ += static_cast<int>(chunk->size());
- objects_size_ += requested_size;
- page_count_++;
- chunk->set_next(first_chunk_);
- first_chunk_ = chunk;
-
- // Initialize page header.
- Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize));
- Address object_address = page->ObjectAreaStart();
-
- // Clear the low order bit of the second word in the page to flag it as a
- // large object page. If the chunk_size happened to be written there, its
- // low order bit should already be clear.
- page->SetIsLargeObjectPage(true);
- page->SetIsPageExecutable(executable);
- page->SetRegionMarks(Page::kAllRegionsCleanMarks);
- return HeapObject::FromAddress(object_address);
-}
-
-
-MaybeObject* LargeObjectSpace::AllocateRawCode(int size_in_bytes) {
- ASSERT(0 < size_in_bytes);
- return AllocateRawInternal(size_in_bytes,
- size_in_bytes,
- EXECUTABLE);
-}
-
-
-MaybeObject* LargeObjectSpace::AllocateRawFixedArray(int size_in_bytes) {
- ASSERT(0 < size_in_bytes);
- return AllocateRawInternal(size_in_bytes,
- size_in_bytes,
- NOT_EXECUTABLE);
-}
-
-
-MaybeObject* LargeObjectSpace::AllocateRaw(int size_in_bytes) {
- ASSERT(0 < size_in_bytes);
- return AllocateRawInternal(size_in_bytes,
- size_in_bytes,
- NOT_EXECUTABLE);
-}
-
-
-// GC support
-MaybeObject* LargeObjectSpace::FindObject(Address a) {
- for (LargeObjectChunk* chunk = first_chunk_;
- chunk != NULL;
- chunk = chunk->next()) {
- Address chunk_address = chunk->address();
- if (chunk_address <= a && a < chunk_address + chunk->size()) {
- return chunk->GetObject();
- }
- }
- return Failure::Exception();
-}
-
-
-LargeObjectChunk* LargeObjectSpace::FindChunkContainingPc(Address pc) {
- // TODO(853): Change this implementation to only find executable
- // chunks and use some kind of hash-based approach to speed it up.
- for (LargeObjectChunk* chunk = first_chunk_;
- chunk != NULL;
- chunk = chunk->next()) {
- Address chunk_address = chunk->address();
- if (chunk_address <= pc && pc < chunk_address + chunk->size()) {
- return chunk;
- }
- }
- return NULL;
-}
-
-
-void LargeObjectSpace::IterateDirtyRegions(ObjectSlotCallback copy_object) {
- LargeObjectIterator it(this);
- for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
- // We only have code, sequential strings, or fixed arrays in large
- // object space, and only fixed arrays can possibly contain pointers to
- // the young generation.
- if (object->IsFixedArray()) {
- Page* page = Page::FromAddress(object->address());
- uint32_t marks = page->GetRegionMarks();
- uint32_t newmarks = Page::kAllRegionsCleanMarks;
-
- if (marks != Page::kAllRegionsCleanMarks) {
- // For a large page a single dirty mark corresponds to several
- // regions (modulo 32). So we treat a large page as a sequence of
- // normal pages of size Page::kPageSize having same dirty marks
- // and subsequently iterate dirty regions on each of these pages.
- Address start = object->address();
- Address end = page->ObjectAreaEnd();
- Address object_end = start + object->Size();
-
- // Iterate regions of the first normal page covering object.
- uint32_t first_region_number = page->GetRegionNumberForAddress(start);
- newmarks |=
- heap()->IterateDirtyRegions(marks >> first_region_number,
- start,
- end,
- &Heap::IteratePointersInDirtyRegion,
- copy_object) << first_region_number;
-
- start = end;
- end = start + Page::kPageSize;
- while (end <= object_end) {
- // Iterate next 32 regions.
- newmarks |=
- heap()->IterateDirtyRegions(marks,
- start,
- end,
- &Heap::IteratePointersInDirtyRegion,
- copy_object);
- start = end;
- end = start + Page::kPageSize;
- }
-
- if (start != object_end) {
- // Iterate the last piece of an object which is less than
- // Page::kPageSize.
- newmarks |=
- heap()->IterateDirtyRegions(marks,
- start,
- object_end,
- &Heap::IteratePointersInDirtyRegion,
- copy_object);
- }
-
- page->SetRegionMarks(newmarks);
- }
- }
- }
-}
-
-
-void LargeObjectSpace::FreeUnmarkedObjects() {
- LargeObjectChunk* previous = NULL;
- LargeObjectChunk* current = first_chunk_;
- while (current != NULL) {
- HeapObject* object = current->GetObject();
- if (object->IsMarked()) {
- object->ClearMark();
- heap()->mark_compact_collector()->tracer()->decrement_marked_count();
- previous = current;
- current = current->next();
- } else {
- Page* page = Page::FromAddress(RoundUp(current->address(),
- Page::kPageSize));
- Executability executable =
- page->IsPageExecutable() ? EXECUTABLE : NOT_EXECUTABLE;
- Address chunk_address = current->address();
- size_t chunk_size = current->size();
-
- // Cut the chunk out from the chunk list.
- current = current->next();
- if (previous == NULL) {
- first_chunk_ = current;
- } else {
- previous->set_next(current);
- }
-
- // Free the chunk.
- heap()->mark_compact_collector()->ReportDeleteIfNeeded(
- object, heap()->isolate());
- LiveObjectList::ProcessNonLive(object);
-
- size_ -= static_cast<int>(chunk_size);
- objects_size_ -= object->Size();
- page_count_--;
- ObjectSpace space = kObjectSpaceLoSpace;
- if (executable == EXECUTABLE) space = kObjectSpaceCodeSpace;
- heap()->isolate()->memory_allocator()->FreeRawMemory(chunk_address,
- chunk_size,
- executable);
- heap()->isolate()->memory_allocator()->PerformAllocationCallback(
- space, kAllocationActionFree, size_);
- LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", chunk_address));
- }
- }
-}
-
-
-bool LargeObjectSpace::Contains(HeapObject* object) {
- Address address = object->address();
- if (heap()->new_space()->Contains(address)) {
- return false;
- }
- Page* page = Page::FromAddress(address);
-
- SLOW_ASSERT(!page->IsLargeObjectPage()
- || !FindObject(address)->IsFailure());
-
- return page->IsLargeObjectPage();
-}
-
-
-#ifdef DEBUG
-// We do not assume that the large object iterator works, because it depends
-// on the invariants we are checking during verification.
-void LargeObjectSpace::Verify() {
- for (LargeObjectChunk* chunk = first_chunk_;
- chunk != NULL;
- chunk = chunk->next()) {
- // Each chunk contains an object that starts at the large object page's
- // object area start.
- HeapObject* object = chunk->GetObject();
- Page* page = Page::FromAddress(object->address());
- ASSERT(object->address() == page->ObjectAreaStart());
-
- // The first word should be a map, and we expect all map pointers to be
- // in map space.
- Map* map = object->map();
- ASSERT(map->IsMap());
- ASSERT(heap()->map_space()->Contains(map));
-
- // We have only code, sequential strings, external strings
- // (sequential strings that have been morphed into external
- // strings), fixed arrays, and byte arrays in large object space.
- ASSERT(object->IsCode() || object->IsSeqString() ||
- object->IsExternalString() || object->IsFixedArray() ||
- object->IsByteArray());
-
- // The object itself should look OK.
- object->Verify();
-
- // Byte arrays and strings don't have interior pointers.
- if (object->IsCode()) {
- VerifyPointersVisitor code_visitor;
- object->IterateBody(map->instance_type(),
- object->Size(),
- &code_visitor);
- } else if (object->IsFixedArray()) {
- // We loop over fixed arrays ourselves, rather then using the visitor,
- // because the visitor doesn't support the start/offset iteration
- // needed for IsRegionDirty.
- FixedArray* array = FixedArray::cast(object);
- for (int j = 0; j < array->length(); j++) {
- Object* element = array->get(j);
- if (element->IsHeapObject()) {
- HeapObject* element_object = HeapObject::cast(element);
- ASSERT(heap()->Contains(element_object));
- ASSERT(element_object->map()->IsMap());
- if (heap()->InNewSpace(element_object)) {
- Address array_addr = object->address();
- Address element_addr = array_addr + FixedArray::kHeaderSize +
- j * kPointerSize;
-
- ASSERT(Page::FromAddress(array_addr)->IsRegionDirty(element_addr));
- }
- }
- }
- }
- }
-}
-
-
-void LargeObjectSpace::Print() {
- LargeObjectIterator it(this);
- for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) {
- obj->Print();
- }
-}
-
-
-void LargeObjectSpace::ReportStatistics() {
- PrintF(" size: %" V8_PTR_PREFIX "d\n", size_);
- int num_objects = 0;
- ClearHistograms();
- LargeObjectIterator it(this);
- for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) {
- num_objects++;
- CollectHistogramInfo(obj);
- }
-
- PrintF(" number of objects %d, "
- "size of objects %" V8_PTR_PREFIX "d\n", num_objects, objects_size_);
- if (num_objects > 0) ReportHistogram(false);
-}
-
-
-void LargeObjectSpace::CollectCodeStatistics() {
- Isolate* isolate = heap()->isolate();
- LargeObjectIterator obj_it(this);
- for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) {
- if (obj->IsCode()) {
- Code* code = Code::cast(obj);
- isolate->code_kind_statistics()[code->kind()] += code->Size();
- }
- }
-}
-#endif // DEBUG
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/spaces.h b/src/3rdparty/v8/src/spaces.h
deleted file mode 100644
index bd939d1..0000000
--- a/src/3rdparty/v8/src/spaces.h
+++ /dev/null
@@ -1,2368 +0,0 @@
-// Copyright 2006-2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_SPACES_H_
-#define V8_SPACES_H_
-
-#include "list-inl.h"
-#include "log.h"
-
-namespace v8 {
-namespace internal {
-
-class Isolate;
-
-// -----------------------------------------------------------------------------
-// Heap structures:
-//
-// A JS heap consists of a young generation, an old generation, and a large
-// object space. The young generation is divided into two semispaces. A
-// scavenger implements Cheney's copying algorithm. The old generation is
-// separated into a map space and an old object space. The map space contains
-// all (and only) map objects, the rest of old objects go into the old space.
-// The old generation is collected by a mark-sweep-compact collector.
-//
-// The semispaces of the young generation are contiguous. The old and map
-// spaces consists of a list of pages. A page has a page header and an object
-// area. A page size is deliberately chosen as 8K bytes.
-// The first word of a page is an opaque page header that has the
-// address of the next page and its ownership information. The second word may
-// have the allocation top address of this page. Heap objects are aligned to the
-// pointer size.
-//
-// There is a separate large object space for objects larger than
-// Page::kMaxHeapObjectSize, so that they do not have to move during
-// collection. The large object space is paged. Pages in large object space
-// may be larger than 8K.
-//
-// A card marking write barrier is used to keep track of intergenerational
-// references. Old space pages are divided into regions of Page::kRegionSize
-// size. Each region has a corresponding dirty bit in the page header which is
-// set if the region might contain pointers to new space. For details about
-// dirty bits encoding see comments in the Page::GetRegionNumberForAddress()
-// method body.
-//
-// During scavenges and mark-sweep collections we iterate intergenerational
-// pointers without decoding heap object maps so if the page belongs to old
-// pointer space or large object space it is essential to guarantee that
-// the page does not contain any garbage pointers to new space: every pointer
-// aligned word which satisfies the Heap::InNewSpace() predicate must be a
-// pointer to a live heap object in new space. Thus objects in old pointer
-// and large object spaces should have a special layout (e.g. no bare integer
-// fields). This requirement does not apply to map space which is iterated in
-// a special fashion. However we still require pointer fields of dead maps to
-// be cleaned.
-//
-// To enable lazy cleaning of old space pages we use a notion of allocation
-// watermark. Every pointer under watermark is considered to be well formed.
-// Page allocation watermark is not necessarily equal to page allocation top but
-// all alive objects on page should reside under allocation watermark.
-// During scavenge allocation watermark might be bumped and invalid pointers
-// might appear below it. To avoid following them we store a valid watermark
-// into special field in the page header and set a page WATERMARK_INVALIDATED
-// flag. For details see comments in the Page::SetAllocationWatermark() method
-// body.
-//
-
-// Some assertion macros used in the debugging mode.
-
-#define ASSERT_PAGE_ALIGNED(address) \
- ASSERT((OffsetFrom(address) & Page::kPageAlignmentMask) == 0)
-
-#define ASSERT_OBJECT_ALIGNED(address) \
- ASSERT((OffsetFrom(address) & kObjectAlignmentMask) == 0)
-
-#define ASSERT_MAP_ALIGNED(address) \
- ASSERT((OffsetFrom(address) & kMapAlignmentMask) == 0)
-
-#define ASSERT_OBJECT_SIZE(size) \
- ASSERT((0 < size) && (size <= Page::kMaxHeapObjectSize))
-
-#define ASSERT_PAGE_OFFSET(offset) \
- ASSERT((Page::kObjectStartOffset <= offset) \
- && (offset <= Page::kPageSize))
-
-#define ASSERT_MAP_PAGE_INDEX(index) \
- ASSERT((0 <= index) && (index <= MapSpace::kMaxMapPageIndex))
-
-
-class PagedSpace;
-class MemoryAllocator;
-class AllocationInfo;
-
-// -----------------------------------------------------------------------------
-// A page normally has 8K bytes. Large object pages may be larger. A page
-// address is always aligned to the 8K page size.
-//
-// Each page starts with a header of Page::kPageHeaderSize size which contains
-// bookkeeping data.
-//
-// The mark-compact collector transforms a map pointer into a page index and a
-// page offset. The exact encoding is described in the comments for
-// class MapWord in objects.h.
-//
-// The only way to get a page pointer is by calling factory methods:
-// Page* p = Page::FromAddress(addr); or
-// Page* p = Page::FromAllocationTop(top);
-class Page {
- public:
- // Returns the page containing a given address. The address ranges
- // from [page_addr .. page_addr + kPageSize[
- //
- // Note that this function only works for addresses in normal paged
- // spaces and addresses in the first 8K of large object pages (i.e.,
- // the start of large objects but not necessarily derived pointers
- // within them).
- INLINE(static Page* FromAddress(Address a)) {
- return reinterpret_cast<Page*>(OffsetFrom(a) & ~kPageAlignmentMask);
- }
-
- // Returns the page containing an allocation top. Because an allocation
- // top address can be the upper bound of the page, we need to subtract
- // it with kPointerSize first. The address ranges from
- // [page_addr + kObjectStartOffset .. page_addr + kPageSize].
- INLINE(static Page* FromAllocationTop(Address top)) {
- Page* p = FromAddress(top - kPointerSize);
- ASSERT_PAGE_OFFSET(p->Offset(top));
- return p;
- }
-
- // Returns the start address of this page.
- Address address() { return reinterpret_cast<Address>(this); }
-
- // Checks whether this is a valid page address.
- bool is_valid() { return address() != NULL; }
-
- // Returns the next page of this page.
- inline Page* next_page();
-
- // Return the end of allocation in this page. Undefined for unused pages.
- inline Address AllocationTop();
-
- // Return the allocation watermark for the page.
- // For old space pages it is guaranteed that the area under the watermark
- // does not contain any garbage pointers to new space.
- inline Address AllocationWatermark();
-
- // Return the allocation watermark offset from the beginning of the page.
- inline uint32_t AllocationWatermarkOffset();
-
- inline void SetAllocationWatermark(Address allocation_watermark);
-
- inline void SetCachedAllocationWatermark(Address allocation_watermark);
- inline Address CachedAllocationWatermark();
-
- // Returns the start address of the object area in this page.
- Address ObjectAreaStart() { return address() + kObjectStartOffset; }
-
- // Returns the end address (exclusive) of the object area in this page.
- Address ObjectAreaEnd() { return address() + Page::kPageSize; }
-
- // Checks whether an address is page aligned.
- static bool IsAlignedToPageSize(Address a) {
- return 0 == (OffsetFrom(a) & kPageAlignmentMask);
- }
-
- // True if this page was in use before current compaction started.
- // Result is valid only for pages owned by paged spaces and
- // only after PagedSpace::PrepareForMarkCompact was called.
- inline bool WasInUseBeforeMC();
-
- inline void SetWasInUseBeforeMC(bool was_in_use);
-
- // True if this page is a large object page.
- inline bool IsLargeObjectPage();
-
- inline void SetIsLargeObjectPage(bool is_large_object_page);
-
- inline bool IsPageExecutable();
-
- inline void SetIsPageExecutable(bool is_page_executable);
-
- // Returns the offset of a given address to this page.
- INLINE(int Offset(Address a)) {
- int offset = static_cast<int>(a - address());
- ASSERT_PAGE_OFFSET(offset);
- return offset;
- }
-
- // Returns the address for a given offset to the this page.
- Address OffsetToAddress(int offset) {
- ASSERT_PAGE_OFFSET(offset);
- return address() + offset;
- }
-
- // ---------------------------------------------------------------------
- // Card marking support
-
- static const uint32_t kAllRegionsCleanMarks = 0x0;
- static const uint32_t kAllRegionsDirtyMarks = 0xFFFFFFFF;
-
- inline uint32_t GetRegionMarks();
- inline void SetRegionMarks(uint32_t dirty);
-
- inline uint32_t GetRegionMaskForAddress(Address addr);
- inline uint32_t GetRegionMaskForSpan(Address start, int length_in_bytes);
- inline int GetRegionNumberForAddress(Address addr);
-
- inline void MarkRegionDirty(Address addr);
- inline bool IsRegionDirty(Address addr);
-
- inline void ClearRegionMarks(Address start,
- Address end,
- bool reaches_limit);
-
- // Page size in bytes. This must be a multiple of the OS page size.
- static const int kPageSize = 1 << kPageSizeBits;
-
- // Page size mask.
- static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
-
- static const int kPageHeaderSize = kPointerSize + kPointerSize + kIntSize +
- kIntSize + kPointerSize + kPointerSize;
-
- // The start offset of the object area in a page. Aligned to both maps and
- // code alignment to be suitable for both.
- static const int kObjectStartOffset =
- CODE_POINTER_ALIGN(MAP_POINTER_ALIGN(kPageHeaderSize));
-
- // Object area size in bytes.
- static const int kObjectAreaSize = kPageSize - kObjectStartOffset;
-
- // Maximum object size that fits in a page.
- static const int kMaxHeapObjectSize = kObjectAreaSize;
-
- static const int kDirtyFlagOffset = 2 * kPointerSize;
- static const int kRegionSizeLog2 = 8;
- static const int kRegionSize = 1 << kRegionSizeLog2;
- static const intptr_t kRegionAlignmentMask = (kRegionSize - 1);
-
- STATIC_CHECK(kRegionSize == kPageSize / kBitsPerInt);
-
- enum PageFlag {
- IS_NORMAL_PAGE = 0,
- WAS_IN_USE_BEFORE_MC,
-
- // Page allocation watermark was bumped by preallocation during scavenge.
- // Correct watermark can be retrieved by CachedAllocationWatermark() method
- WATERMARK_INVALIDATED,
- IS_EXECUTABLE,
- NUM_PAGE_FLAGS // Must be last
- };
- static const int kPageFlagMask = (1 << NUM_PAGE_FLAGS) - 1;
-
- // To avoid an additional WATERMARK_INVALIDATED flag clearing pass during
- // scavenge we just invalidate the watermark on each old space page after
- // processing it. And then we flip the meaning of the WATERMARK_INVALIDATED
- // flag at the beginning of the next scavenge and each page becomes marked as
- // having a valid watermark.
- //
- // The following invariant must hold for pages in old pointer and map spaces:
- // If page is in use then page is marked as having invalid watermark at
- // the beginning and at the end of any GC.
- //
- // This invariant guarantees that after flipping flag meaning at the
- // beginning of scavenge all pages in use will be marked as having valid
- // watermark.
- static inline void FlipMeaningOfInvalidatedWatermarkFlag(Heap* heap);
-
- // Returns true if the page allocation watermark was not altered during
- // scavenge.
- inline bool IsWatermarkValid();
-
- inline void InvalidateWatermark(bool value);
-
- inline bool GetPageFlag(PageFlag flag);
- inline void SetPageFlag(PageFlag flag, bool value);
- inline void ClearPageFlags();
-
- inline void ClearGCFields();
-
- static const int kAllocationWatermarkOffsetShift = WATERMARK_INVALIDATED + 1;
- static const int kAllocationWatermarkOffsetBits = kPageSizeBits + 1;
- static const uint32_t kAllocationWatermarkOffsetMask =
- ((1 << kAllocationWatermarkOffsetBits) - 1) <<
- kAllocationWatermarkOffsetShift;
-
- static const uint32_t kFlagsMask =
- ((1 << kAllocationWatermarkOffsetShift) - 1);
-
- STATIC_CHECK(kBitsPerInt - kAllocationWatermarkOffsetShift >=
- kAllocationWatermarkOffsetBits);
-
- //---------------------------------------------------------------------------
- // Page header description.
- //
- // If a page is not in the large object space, the first word,
- // opaque_header, encodes the next page address (aligned to kPageSize 8K)
- // and the chunk number (0 ~ 8K-1). Only MemoryAllocator should use
- // opaque_header. The value range of the opaque_header is [0..kPageSize[,
- // or [next_page_start, next_page_end[. It cannot point to a valid address
- // in the current page. If a page is in the large object space, the first
- // word *may* (if the page start and large object chunk start are the
- // same) contain the address of the next large object chunk.
- intptr_t opaque_header;
-
- // If the page is not in the large object space, the low-order bit of the
- // second word is set. If the page is in the large object space, the
- // second word *may* (if the page start and large object chunk start are
- // the same) contain the large object chunk size. In either case, the
- // low-order bit for large object pages will be cleared.
- // For normal pages this word is used to store page flags and
- // offset of allocation top.
- intptr_t flags_;
-
- // This field contains dirty marks for regions covering the page. Only dirty
- // regions might contain intergenerational references.
- // Only 32 dirty marks are supported so for large object pages several regions
- // might be mapped to a single dirty mark.
- uint32_t dirty_regions_;
-
- // The index of the page in its owner space.
- int mc_page_index;
-
- // During mark-compact collections this field contains the forwarding address
- // of the first live object in this page.
- // During scavenge collection this field is used to store allocation watermark
- // if it is altered during scavenge.
- Address mc_first_forwarded;
-
- Heap* heap_;
-};
-
-
-// ----------------------------------------------------------------------------
-// Space is the abstract superclass for all allocation spaces.
-class Space : public Malloced {
- public:
- Space(Heap* heap, AllocationSpace id, Executability executable)
- : heap_(heap), id_(id), executable_(executable) {}
-
- virtual ~Space() {}
-
- Heap* heap() const { return heap_; }
-
- // Does the space need executable memory?
- Executability executable() { return executable_; }
-
- // Identity used in error reporting.
- AllocationSpace identity() { return id_; }
-
- // Returns allocated size.
- virtual intptr_t Size() = 0;
-
- // Returns size of objects. Can differ from the allocated size
- // (e.g. see LargeObjectSpace).
- virtual intptr_t SizeOfObjects() { return Size(); }
-
-#ifdef ENABLE_HEAP_PROTECTION
- // Protect/unprotect the space by marking it read-only/writable.
- virtual void Protect() = 0;
- virtual void Unprotect() = 0;
-#endif
-
-#ifdef DEBUG
- virtual void Print() = 0;
-#endif
-
- // After calling this we can allocate a certain number of bytes using only
- // linear allocation (with a LinearAllocationScope and an AlwaysAllocateScope)
- // without using freelists or causing a GC. This is used by partial
- // snapshots. It returns true of space was reserved or false if a GC is
- // needed. For paged spaces the space requested must include the space wasted
- // at the end of each when allocating linearly.
- virtual bool ReserveSpace(int bytes) = 0;
-
- private:
- Heap* heap_;
- AllocationSpace id_;
- Executability executable_;
-};
-
-
-// ----------------------------------------------------------------------------
-// All heap objects containing executable code (code objects) must be allocated
-// from a 2 GB range of memory, so that they can call each other using 32-bit
-// displacements. This happens automatically on 32-bit platforms, where 32-bit
-// displacements cover the entire 4GB virtual address space. On 64-bit
-// platforms, we support this using the CodeRange object, which reserves and
-// manages a range of virtual memory.
-class CodeRange {
- public:
- // Reserves a range of virtual memory, but does not commit any of it.
- // Can only be called once, at heap initialization time.
- // Returns false on failure.
- bool Setup(const size_t requested_size);
-
- // Frees the range of virtual memory, and frees the data structures used to
- // manage it.
- void TearDown();
-
- bool exists() { return code_range_ != NULL; }
- bool contains(Address address) {
- if (code_range_ == NULL) return false;
- Address start = static_cast<Address>(code_range_->address());
- return start <= address && address < start + code_range_->size();
- }
-
- // Allocates a chunk of memory from the large-object portion of
- // the code range. On platforms with no separate code range, should
- // not be called.
- MUST_USE_RESULT void* AllocateRawMemory(const size_t requested,
- size_t* allocated);
- void FreeRawMemory(void* buf, size_t length);
-
- private:
- CodeRange();
-
- // The reserved range of virtual memory that all code objects are put in.
- VirtualMemory* code_range_;
- // Plain old data class, just a struct plus a constructor.
- class FreeBlock {
- public:
- FreeBlock(Address start_arg, size_t size_arg)
- : start(start_arg), size(size_arg) {}
- FreeBlock(void* start_arg, size_t size_arg)
- : start(static_cast<Address>(start_arg)), size(size_arg) {}
-
- Address start;
- size_t size;
- };
-
- // Freed blocks of memory are added to the free list. When the allocation
- // list is exhausted, the free list is sorted and merged to make the new
- // allocation list.
- List<FreeBlock> free_list_;
- // Memory is allocated from the free blocks on the allocation list.
- // The block at current_allocation_block_index_ is the current block.
- List<FreeBlock> allocation_list_;
- int current_allocation_block_index_;
-
- // Finds a block on the allocation list that contains at least the
- // requested amount of memory. If none is found, sorts and merges
- // the existing free memory blocks, and searches again.
- // If none can be found, terminates V8 with FatalProcessOutOfMemory.
- void GetNextAllocationBlock(size_t requested);
- // Compares the start addresses of two free blocks.
- static int CompareFreeBlockAddress(const FreeBlock* left,
- const FreeBlock* right);
-
- friend class Isolate;
-
- Isolate* isolate_;
-
- DISALLOW_COPY_AND_ASSIGN(CodeRange);
-};
-
-
-// ----------------------------------------------------------------------------
-// A space acquires chunks of memory from the operating system. The memory
-// allocator manages chunks for the paged heap spaces (old space and map
-// space). A paged chunk consists of pages. Pages in a chunk have contiguous
-// addresses and are linked as a list.
-//
-// The allocator keeps an initial chunk which is used for the new space. The
-// leftover regions of the initial chunk are used for the initial chunks of
-// old space and map space if they are big enough to hold at least one page.
-// The allocator assumes that there is one old space and one map space, each
-// expands the space by allocating kPagesPerChunk pages except the last
-// expansion (before running out of space). The first chunk may contain fewer
-// than kPagesPerChunk pages as well.
-//
-// The memory allocator also allocates chunks for the large object space, but
-// they are managed by the space itself. The new space does not expand.
-//
-// The fact that pages for paged spaces are allocated and deallocated in chunks
-// induces a constraint on the order of pages in a linked lists. We say that
-// pages are linked in the chunk-order if and only if every two consecutive
-// pages from the same chunk are consecutive in the linked list.
-//
-
-
-class MemoryAllocator {
- public:
- // Initializes its internal bookkeeping structures.
- // Max capacity of the total space and executable memory limit.
- bool Setup(intptr_t max_capacity, intptr_t capacity_executable);
-
- // Deletes valid chunks.
- void TearDown();
-
- // Reserves an initial address range of virtual memory to be split between
- // the two new space semispaces, the old space, and the map space. The
- // memory is not yet committed or assigned to spaces and split into pages.
- // The initial chunk is unmapped when the memory allocator is torn down.
- // This function should only be called when there is not already a reserved
- // initial chunk (initial_chunk_ should be NULL). It returns the start
- // address of the initial chunk if successful, with the side effect of
- // setting the initial chunk, or else NULL if unsuccessful and leaves the
- // initial chunk NULL.
- void* ReserveInitialChunk(const size_t requested);
-
- // Commits pages from an as-yet-unmanaged block of virtual memory into a
- // paged space. The block should be part of the initial chunk reserved via
- // a call to ReserveInitialChunk. The number of pages is always returned in
- // the output parameter num_pages. This function assumes that the start
- // address is non-null and that it is big enough to hold at least one
- // page-aligned page. The call always succeeds, and num_pages is always
- // greater than zero.
- Page* CommitPages(Address start, size_t size, PagedSpace* owner,
- int* num_pages);
-
- // Commit a contiguous block of memory from the initial chunk. Assumes that
- // the address is not NULL, the size is greater than zero, and that the
- // block is contained in the initial chunk. Returns true if it succeeded
- // and false otherwise.
- bool CommitBlock(Address start, size_t size, Executability executable);
-
- // Uncommit a contiguous block of memory [start..(start+size)[.
- // start is not NULL, the size is greater than zero, and the
- // block is contained in the initial chunk. Returns true if it succeeded
- // and false otherwise.
- bool UncommitBlock(Address start, size_t size);
-
- // Zaps a contiguous block of memory [start..(start+size)[ thus
- // filling it up with a recognizable non-NULL bit pattern.
- void ZapBlock(Address start, size_t size);
-
- // Attempts to allocate the requested (non-zero) number of pages from the
- // OS. Fewer pages might be allocated than requested. If it fails to
- // allocate memory for the OS or cannot allocate a single page, this
- // function returns an invalid page pointer (NULL). The caller must check
- // whether the returned page is valid (by calling Page::is_valid()). It is
- // guaranteed that allocated pages have contiguous addresses. The actual
- // number of allocated pages is returned in the output parameter
- // allocated_pages. If the PagedSpace owner is executable and there is
- // a code range, the pages are allocated from the code range.
- Page* AllocatePages(int requested_pages, int* allocated_pages,
- PagedSpace* owner);
-
- // Frees pages from a given page and after. Requires pages to be
- // linked in chunk-order (see comment for class).
- // If 'p' is the first page of a chunk, pages from 'p' are freed
- // and this function returns an invalid page pointer.
- // Otherwise, the function searches a page after 'p' that is
- // the first page of a chunk. Pages after the found page
- // are freed and the function returns 'p'.
- Page* FreePages(Page* p);
-
- // Frees all pages owned by given space.
- void FreeAllPages(PagedSpace* space);
-
- // Allocates and frees raw memory of certain size.
- // These are just thin wrappers around OS::Allocate and OS::Free,
- // but keep track of allocated bytes as part of heap.
- // If the flag is EXECUTABLE and a code range exists, the requested
- // memory is allocated from the code range. If a code range exists
- // and the freed memory is in it, the code range manages the freed memory.
- MUST_USE_RESULT void* AllocateRawMemory(const size_t requested,
- size_t* allocated,
- Executability executable);
- void FreeRawMemory(void* buf,
- size_t length,
- Executability executable);
- void PerformAllocationCallback(ObjectSpace space,
- AllocationAction action,
- size_t size);
-
- void AddMemoryAllocationCallback(MemoryAllocationCallback callback,
- ObjectSpace space,
- AllocationAction action);
- void RemoveMemoryAllocationCallback(MemoryAllocationCallback callback);
- bool MemoryAllocationCallbackRegistered(MemoryAllocationCallback callback);
-
- // Returns the maximum available bytes of heaps.
- intptr_t Available() { return capacity_ < size_ ? 0 : capacity_ - size_; }
-
- // Returns allocated spaces in bytes.
- intptr_t Size() { return size_; }
-
- // Returns the maximum available executable bytes of heaps.
- intptr_t AvailableExecutable() {
- if (capacity_executable_ < size_executable_) return 0;
- return capacity_executable_ - size_executable_;
- }
-
- // Returns allocated executable spaces in bytes.
- intptr_t SizeExecutable() { return size_executable_; }
-
- // Returns maximum available bytes that the old space can have.
- intptr_t MaxAvailable() {
- return (Available() / Page::kPageSize) * Page::kObjectAreaSize;
- }
-
- // Links two pages.
- inline void SetNextPage(Page* prev, Page* next);
-
- // Returns the next page of a given page.
- inline Page* GetNextPage(Page* p);
-
- // Checks whether a page belongs to a space.
- inline bool IsPageInSpace(Page* p, PagedSpace* space);
-
- // Returns the space that owns the given page.
- inline PagedSpace* PageOwner(Page* page);
-
- // Finds the first/last page in the same chunk as a given page.
- Page* FindFirstPageInSameChunk(Page* p);
- Page* FindLastPageInSameChunk(Page* p);
-
- // Relinks list of pages owned by space to make it chunk-ordered.
- // Returns new first and last pages of space.
- // Also returns last page in relinked list which has WasInUsedBeforeMC
- // flag set.
- void RelinkPageListInChunkOrder(PagedSpace* space,
- Page** first_page,
- Page** last_page,
- Page** last_page_in_use);
-
-#ifdef ENABLE_HEAP_PROTECTION
- // Protect/unprotect a block of memory by marking it read-only/writable.
- inline void Protect(Address start, size_t size);
- inline void Unprotect(Address start, size_t size,
- Executability executable);
-
- // Protect/unprotect a chunk given a page in the chunk.
- inline void ProtectChunkFromPage(Page* page);
- inline void UnprotectChunkFromPage(Page* page);
-#endif
-
-#ifdef DEBUG
- // Reports statistic info of the space.
- void ReportStatistics();
-#endif
-
- // Due to encoding limitation, we can only have 8K chunks.
- static const int kMaxNofChunks = 1 << kPageSizeBits;
- // If a chunk has at least 16 pages, the maximum heap size is about
- // 8K * 8K * 16 = 1G bytes.
-#ifdef V8_TARGET_ARCH_X64
- static const int kPagesPerChunk = 32;
- // On 64 bit the chunk table consists of 4 levels of 4096-entry tables.
- static const int kPagesPerChunkLog2 = 5;
- static const int kChunkTableLevels = 4;
- static const int kChunkTableBitsPerLevel = 12;
-#else
- static const int kPagesPerChunk = 16;
- // On 32 bit the chunk table consists of 2 levels of 256-entry tables.
- static const int kPagesPerChunkLog2 = 4;
- static const int kChunkTableLevels = 2;
- static const int kChunkTableBitsPerLevel = 8;
-#endif
-
- private:
- MemoryAllocator();
-
- static const int kChunkSize = kPagesPerChunk * Page::kPageSize;
- static const int kChunkSizeLog2 = kPagesPerChunkLog2 + kPageSizeBits;
-
- // Maximum space size in bytes.
- intptr_t capacity_;
- // Maximum subset of capacity_ that can be executable
- intptr_t capacity_executable_;
-
- // Allocated space size in bytes.
- intptr_t size_;
-
- // Allocated executable space size in bytes.
- intptr_t size_executable_;
-
- struct MemoryAllocationCallbackRegistration {
- MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback,
- ObjectSpace space,
- AllocationAction action)
- : callback(callback), space(space), action(action) {
- }
- MemoryAllocationCallback callback;
- ObjectSpace space;
- AllocationAction action;
- };
- // A List of callback that are triggered when memory is allocated or free'd
- List<MemoryAllocationCallbackRegistration>
- memory_allocation_callbacks_;
-
- // The initial chunk of virtual memory.
- VirtualMemory* initial_chunk_;
-
- // Allocated chunk info: chunk start address, chunk size, and owning space.
- class ChunkInfo BASE_EMBEDDED {
- public:
- ChunkInfo() : address_(NULL),
- size_(0),
- owner_(NULL),
- executable_(NOT_EXECUTABLE),
- owner_identity_(FIRST_SPACE) {}
- inline void init(Address a, size_t s, PagedSpace* o);
- Address address() { return address_; }
- size_t size() { return size_; }
- PagedSpace* owner() { return owner_; }
- // We save executability of the owner to allow using it
- // when collecting stats after the owner has been destroyed.
- Executability executable() const { return executable_; }
- AllocationSpace owner_identity() const { return owner_identity_; }
-
- private:
- Address address_;
- size_t size_;
- PagedSpace* owner_;
- Executability executable_;
- AllocationSpace owner_identity_;
- };
-
- // Chunks_, free_chunk_ids_ and top_ act as a stack of free chunk ids.
- List<ChunkInfo> chunks_;
- List<int> free_chunk_ids_;
- int max_nof_chunks_;
- int top_;
-
- // Push/pop a free chunk id onto/from the stack.
- void Push(int free_chunk_id);
- int Pop();
- bool OutOfChunkIds() { return top_ == 0; }
-
- // Frees a chunk.
- void DeleteChunk(int chunk_id);
-
- // Basic check whether a chunk id is in the valid range.
- inline bool IsValidChunkId(int chunk_id);
-
- // Checks whether a chunk id identifies an allocated chunk.
- inline bool IsValidChunk(int chunk_id);
-
- // Returns the chunk id that a page belongs to.
- inline int GetChunkId(Page* p);
-
- // True if the address lies in the initial chunk.
- inline bool InInitialChunk(Address address);
-
- // Initializes pages in a chunk. Returns the first page address.
- // This function and GetChunkId() are provided for the mark-compact
- // collector to rebuild page headers in the from space, which is
- // used as a marking stack and its page headers are destroyed.
- Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
- PagedSpace* owner);
-
- Page* RelinkPagesInChunk(int chunk_id,
- Address chunk_start,
- size_t chunk_size,
- Page* prev,
- Page** last_page_in_use);
-
- friend class Isolate;
-
- Isolate* isolate_;
-
- DISALLOW_COPY_AND_ASSIGN(MemoryAllocator);
-};
-
-
-// -----------------------------------------------------------------------------
-// Interface for heap object iterator to be implemented by all object space
-// object iterators.
-//
-// NOTE: The space specific object iterators also implements the own next()
-// method which is used to avoid using virtual functions
-// iterating a specific space.
-
-class ObjectIterator : public Malloced {
- public:
- virtual ~ObjectIterator() { }
-
- virtual HeapObject* next_object() = 0;
-};
-
-
-// -----------------------------------------------------------------------------
-// Heap object iterator in new/old/map spaces.
-//
-// A HeapObjectIterator iterates objects from a given address to the
-// top of a space. The given address must be below the current
-// allocation pointer (space top). There are some caveats.
-//
-// (1) If the space top changes upward during iteration (because of
-// allocating new objects), the iterator does not iterate objects
-// above the original space top. The caller must create a new
-// iterator starting from the old top in order to visit these new
-// objects.
-//
-// (2) If new objects are allocated below the original allocation top
-// (e.g., free-list allocation in paged spaces), the new objects
-// may or may not be iterated depending on their position with
-// respect to the current point of iteration.
-//
-// (3) The space top should not change downward during iteration,
-// otherwise the iterator will return not-necessarily-valid
-// objects.
-
-class HeapObjectIterator: public ObjectIterator {
- public:
- // Creates a new object iterator in a given space. If a start
- // address is not given, the iterator starts from the space bottom.
- // If the size function is not given, the iterator calls the default
- // Object::Size().
- explicit HeapObjectIterator(PagedSpace* space);
- HeapObjectIterator(PagedSpace* space, HeapObjectCallback size_func);
- HeapObjectIterator(PagedSpace* space, Address start);
- HeapObjectIterator(PagedSpace* space,
- Address start,
- HeapObjectCallback size_func);
- HeapObjectIterator(Page* page, HeapObjectCallback size_func);
-
- inline HeapObject* next() {
- return (cur_addr_ < cur_limit_) ? FromCurrentPage() : FromNextPage();
- }
-
- // implementation of ObjectIterator.
- virtual HeapObject* next_object() { return next(); }
-
- private:
- Address cur_addr_; // current iteration point
- Address end_addr_; // end iteration point
- Address cur_limit_; // current page limit
- HeapObjectCallback size_func_; // size function
- Page* end_page_; // caches the page of the end address
-
- HeapObject* FromCurrentPage() {
- ASSERT(cur_addr_ < cur_limit_);
-
- HeapObject* obj = HeapObject::FromAddress(cur_addr_);
- int obj_size = (size_func_ == NULL) ? obj->Size() : size_func_(obj);
- ASSERT_OBJECT_SIZE(obj_size);
-
- cur_addr_ += obj_size;
- ASSERT(cur_addr_ <= cur_limit_);
-
- return obj;
- }
-
- // Slow path of next, goes into the next page.
- HeapObject* FromNextPage();
-
- // Initializes fields.
- void Initialize(Address start, Address end, HeapObjectCallback size_func);
-
-#ifdef DEBUG
- // Verifies whether fields have valid values.
- void Verify();
-#endif
-};
-
-
-// -----------------------------------------------------------------------------
-// A PageIterator iterates the pages in a paged space.
-//
-// The PageIterator class provides three modes for iterating pages in a space:
-// PAGES_IN_USE iterates pages containing allocated objects.
-// PAGES_USED_BY_MC iterates pages that hold relocated objects during a
-// mark-compact collection.
-// ALL_PAGES iterates all pages in the space.
-//
-// There are some caveats.
-//
-// (1) If the space expands during iteration, new pages will not be
-// returned by the iterator in any mode.
-//
-// (2) If new objects are allocated during iteration, they will appear
-// in pages returned by the iterator. Allocation may cause the
-// allocation pointer or MC allocation pointer in the last page to
-// change between constructing the iterator and iterating the last
-// page.
-//
-// (3) The space should not shrink during iteration, otherwise the
-// iterator will return deallocated pages.
-
-class PageIterator BASE_EMBEDDED {
- public:
- enum Mode {
- PAGES_IN_USE,
- PAGES_USED_BY_MC,
- ALL_PAGES
- };
-
- PageIterator(PagedSpace* space, Mode mode);
-
- inline bool has_next();
- inline Page* next();
-
- private:
- PagedSpace* space_;
- Page* prev_page_; // Previous page returned.
- Page* stop_page_; // Page to stop at (last page returned by the iterator).
-};
-
-
-// -----------------------------------------------------------------------------
-// A space has a list of pages. The next page can be accessed via
-// Page::next_page() call. The next page of the last page is an
-// invalid page pointer. A space can expand and shrink dynamically.
-
-// An abstraction of allocation and relocation pointers in a page-structured
-// space.
-class AllocationInfo {
- public:
- Address top; // current allocation top
- Address limit; // current allocation limit
-
-#ifdef DEBUG
- bool VerifyPagedAllocation() {
- return (Page::FromAllocationTop(top) == Page::FromAllocationTop(limit))
- && (top <= limit);
- }
-#endif
-};
-
-
-// An abstraction of the accounting statistics of a page-structured space.
-// The 'capacity' of a space is the number of object-area bytes (ie, not
-// including page bookkeeping structures) currently in the space. The 'size'
-// of a space is the number of allocated bytes, the 'waste' in the space is
-// the number of bytes that are not allocated and not available to
-// allocation without reorganizing the space via a GC (eg, small blocks due
-// to internal fragmentation, top of page areas in map space), and the bytes
-// 'available' is the number of unallocated bytes that are not waste. The
-// capacity is the sum of size, waste, and available.
-//
-// The stats are only set by functions that ensure they stay balanced. These
-// functions increase or decrease one of the non-capacity stats in
-// conjunction with capacity, or else they always balance increases and
-// decreases to the non-capacity stats.
-class AllocationStats BASE_EMBEDDED {
- public:
- AllocationStats() { Clear(); }
-
- // Zero out all the allocation statistics (ie, no capacity).
- void Clear() {
- capacity_ = 0;
- available_ = 0;
- size_ = 0;
- waste_ = 0;
- }
-
- // Reset the allocation statistics (ie, available = capacity with no
- // wasted or allocated bytes).
- void Reset() {
- available_ = capacity_;
- size_ = 0;
- waste_ = 0;
- }
-
- // Accessors for the allocation statistics.
- intptr_t Capacity() { return capacity_; }
- intptr_t Available() { return available_; }
- intptr_t Size() { return size_; }
- intptr_t Waste() { return waste_; }
-
- // Grow the space by adding available bytes.
- void ExpandSpace(int size_in_bytes) {
- capacity_ += size_in_bytes;
- available_ += size_in_bytes;
- }
-
- // Shrink the space by removing available bytes.
- void ShrinkSpace(int size_in_bytes) {
- capacity_ -= size_in_bytes;
- available_ -= size_in_bytes;
- }
-
- // Allocate from available bytes (available -> size).
- void AllocateBytes(intptr_t size_in_bytes) {
- available_ -= size_in_bytes;
- size_ += size_in_bytes;
- }
-
- // Free allocated bytes, making them available (size -> available).
- void DeallocateBytes(intptr_t size_in_bytes) {
- size_ -= size_in_bytes;
- available_ += size_in_bytes;
- }
-
- // Waste free bytes (available -> waste).
- void WasteBytes(int size_in_bytes) {
- available_ -= size_in_bytes;
- waste_ += size_in_bytes;
- }
-
- // Consider the wasted bytes to be allocated, as they contain filler
- // objects (waste -> size).
- void FillWastedBytes(intptr_t size_in_bytes) {
- waste_ -= size_in_bytes;
- size_ += size_in_bytes;
- }
-
- private:
- intptr_t capacity_;
- intptr_t available_;
- intptr_t size_;
- intptr_t waste_;
-};
-
-
-class PagedSpace : public Space {
- public:
- // Creates a space with a maximum capacity, and an id.
- PagedSpace(Heap* heap,
- intptr_t max_capacity,
- AllocationSpace id,
- Executability executable);
-
- virtual ~PagedSpace() {}
-
- // Set up the space using the given address range of virtual memory (from
- // the memory allocator's initial chunk) if possible. If the block of
- // addresses is not big enough to contain a single page-aligned page, a
- // fresh chunk will be allocated.
- bool Setup(Address start, size_t size);
-
- // Returns true if the space has been successfully set up and not
- // subsequently torn down.
- bool HasBeenSetup();
-
- // Cleans up the space, frees all pages in this space except those belonging
- // to the initial chunk, uncommits addresses in the initial chunk.
- void TearDown();
-
- // Checks whether an object/address is in this space.
- inline bool Contains(Address a);
- bool Contains(HeapObject* o) { return Contains(o->address()); }
- // Never crashes even if a is not a valid pointer.
- inline bool SafeContains(Address a);
-
- // Given an address occupied by a live object, return that object if it is
- // in this space, or Failure::Exception() if it is not. The implementation
- // iterates over objects in the page containing the address, the cost is
- // linear in the number of objects in the page. It may be slow.
- MUST_USE_RESULT MaybeObject* FindObject(Address addr);
-
- // Checks whether page is currently in use by this space.
- bool IsUsed(Page* page);
-
- void MarkAllPagesClean();
-
- // Prepares for a mark-compact GC.
- virtual void PrepareForMarkCompact(bool will_compact);
-
- // The top of allocation in a page in this space. Undefined if page is unused.
- Address PageAllocationTop(Page* page) {
- return page == TopPageOf(allocation_info_) ? top()
- : PageAllocationLimit(page);
- }
-
- // The limit of allocation for a page in this space.
- virtual Address PageAllocationLimit(Page* page) = 0;
-
- void FlushTopPageWatermark() {
- AllocationTopPage()->SetCachedAllocationWatermark(top());
- AllocationTopPage()->InvalidateWatermark(true);
- }
-
- // Current capacity without growing (Size() + Available() + Waste()).
- intptr_t Capacity() { return accounting_stats_.Capacity(); }
-
- // Total amount of memory committed for this space. For paged
- // spaces this equals the capacity.
- intptr_t CommittedMemory() { return Capacity(); }
-
- // Available bytes without growing.
- intptr_t Available() { return accounting_stats_.Available(); }
-
- // Allocated bytes in this space.
- virtual intptr_t Size() { return accounting_stats_.Size(); }
-
- // Wasted bytes due to fragmentation and not recoverable until the
- // next GC of this space.
- intptr_t Waste() { return accounting_stats_.Waste(); }
-
- // Returns the address of the first object in this space.
- Address bottom() { return first_page_->ObjectAreaStart(); }
-
- // Returns the allocation pointer in this space.
- Address top() { return allocation_info_.top; }
-
- // Allocate the requested number of bytes in the space if possible, return a
- // failure object if not.
- MUST_USE_RESULT inline MaybeObject* AllocateRaw(int size_in_bytes);
-
- // Allocate the requested number of bytes for relocation during mark-compact
- // collection.
- MUST_USE_RESULT inline MaybeObject* MCAllocateRaw(int size_in_bytes);
-
- virtual bool ReserveSpace(int bytes);
-
- // Used by ReserveSpace.
- virtual void PutRestOfCurrentPageOnFreeList(Page* current_page) = 0;
-
- // Free all pages in range from prev (exclusive) to last (inclusive).
- // Freed pages are moved to the end of page list.
- void FreePages(Page* prev, Page* last);
-
- // Deallocates a block.
- virtual void DeallocateBlock(Address start,
- int size_in_bytes,
- bool add_to_freelist) = 0;
-
- // Set space allocation info.
- void SetTop(Address top) {
- allocation_info_.top = top;
- allocation_info_.limit = PageAllocationLimit(Page::FromAllocationTop(top));
- }
-
- // ---------------------------------------------------------------------------
- // Mark-compact collection support functions
-
- // Set the relocation point to the beginning of the space.
- void MCResetRelocationInfo();
-
- // Writes relocation info to the top page.
- void MCWriteRelocationInfoToPage() {
- TopPageOf(mc_forwarding_info_)->
- SetAllocationWatermark(mc_forwarding_info_.top);
- }
-
- // Computes the offset of a given address in this space to the beginning
- // of the space.
- int MCSpaceOffsetForAddress(Address addr);
-
- // Updates the allocation pointer to the relocation top after a mark-compact
- // collection.
- virtual void MCCommitRelocationInfo() = 0;
-
- // Releases half of unused pages.
- void Shrink();
-
- // Ensures that the capacity is at least 'capacity'. Returns false on failure.
- bool EnsureCapacity(int capacity);
-
-#ifdef ENABLE_HEAP_PROTECTION
- // Protect/unprotect the space by marking it read-only/writable.
- void Protect();
- void Unprotect();
-#endif
-
-#ifdef DEBUG
- // Print meta info and objects in this space.
- virtual void Print();
-
- // Verify integrity of this space.
- virtual void Verify(ObjectVisitor* visitor);
-
- // Overridden by subclasses to verify space-specific object
- // properties (e.g., only maps or free-list nodes are in map space).
- virtual void VerifyObject(HeapObject* obj) {}
-
- // Report code object related statistics
- void CollectCodeStatistics();
- static void ReportCodeStatistics();
- static void ResetCodeStatistics();
-#endif
-
- // Returns the page of the allocation pointer.
- Page* AllocationTopPage() { return TopPageOf(allocation_info_); }
-
- void RelinkPageListInChunkOrder(bool deallocate_blocks);
-
- protected:
- // Maximum capacity of this space.
- intptr_t max_capacity_;
-
- // Accounting information for this space.
- AllocationStats accounting_stats_;
-
- // The first page in this space.
- Page* first_page_;
-
- // The last page in this space. Initially set in Setup, updated in
- // Expand and Shrink.
- Page* last_page_;
-
- // True if pages owned by this space are linked in chunk-order.
- // See comment for class MemoryAllocator for definition of chunk-order.
- bool page_list_is_chunk_ordered_;
-
- // Normal allocation information.
- AllocationInfo allocation_info_;
-
- // Relocation information during mark-compact collections.
- AllocationInfo mc_forwarding_info_;
-
- // Bytes of each page that cannot be allocated. Possibly non-zero
- // for pages in spaces with only fixed-size objects. Always zero
- // for pages in spaces with variable sized objects (those pages are
- // padded with free-list nodes).
- int page_extra_;
-
- // Sets allocation pointer to a page bottom.
- static void SetAllocationInfo(AllocationInfo* alloc_info, Page* p);
-
- // Returns the top page specified by an allocation info structure.
- static Page* TopPageOf(AllocationInfo alloc_info) {
- return Page::FromAllocationTop(alloc_info.limit);
- }
-
- int CountPagesToTop() {
- Page* p = Page::FromAllocationTop(allocation_info_.top);
- PageIterator it(this, PageIterator::ALL_PAGES);
- int counter = 1;
- while (it.has_next()) {
- if (it.next() == p) return counter;
- counter++;
- }
- UNREACHABLE();
- return -1;
- }
-
- // Expands the space by allocating a fixed number of pages. Returns false if
- // it cannot allocate requested number of pages from OS. Newly allocated
- // pages are append to the last_page;
- bool Expand(Page* last_page);
-
- // Generic fast case allocation function that tries linear allocation in
- // the top page of 'alloc_info'. Returns NULL on failure.
- inline HeapObject* AllocateLinearly(AllocationInfo* alloc_info,
- int size_in_bytes);
-
- // During normal allocation or deserialization, roll to the next page in
- // the space (there is assumed to be one) and allocate there. This
- // function is space-dependent.
- virtual HeapObject* AllocateInNextPage(Page* current_page,
- int size_in_bytes) = 0;
-
- // Slow path of AllocateRaw. This function is space-dependent.
- MUST_USE_RESULT virtual HeapObject* SlowAllocateRaw(int size_in_bytes) = 0;
-
- // Slow path of MCAllocateRaw.
- MUST_USE_RESULT HeapObject* SlowMCAllocateRaw(int size_in_bytes);
-
-#ifdef DEBUG
- // Returns the number of total pages in this space.
- int CountTotalPages();
-#endif
- private:
-
- // Returns a pointer to the page of the relocation pointer.
- Page* MCRelocationTopPage() { return TopPageOf(mc_forwarding_info_); }
-
- friend class PageIterator;
-};
-
-
-#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
-class NumberAndSizeInfo BASE_EMBEDDED {
- public:
- NumberAndSizeInfo() : number_(0), bytes_(0) {}
-
- int number() const { return number_; }
- void increment_number(int num) { number_ += num; }
-
- int bytes() const { return bytes_; }
- void increment_bytes(int size) { bytes_ += size; }
-
- void clear() {
- number_ = 0;
- bytes_ = 0;
- }
-
- private:
- int number_;
- int bytes_;
-};
-
-
-// HistogramInfo class for recording a single "bar" of a histogram. This
-// class is used for collecting statistics to print to stdout (when compiled
-// with DEBUG) or to the log file (when compiled with
-// ENABLE_LOGGING_AND_PROFILING).
-class HistogramInfo: public NumberAndSizeInfo {
- public:
- HistogramInfo() : NumberAndSizeInfo() {}
-
- const char* name() { return name_; }
- void set_name(const char* name) { name_ = name; }
-
- private:
- const char* name_;
-};
-#endif
-
-
-// -----------------------------------------------------------------------------
-// SemiSpace in young generation
-//
-// A semispace is a contiguous chunk of memory. The mark-compact collector
-// uses the memory in the from space as a marking stack when tracing live
-// objects.
-
-class SemiSpace : public Space {
- public:
- // Constructor.
- explicit SemiSpace(Heap* heap) : Space(heap, NEW_SPACE, NOT_EXECUTABLE) {
- start_ = NULL;
- age_mark_ = NULL;
- }
-
- // Sets up the semispace using the given chunk.
- bool Setup(Address start, int initial_capacity, int maximum_capacity);
-
- // Tear down the space. Heap memory was not allocated by the space, so it
- // is not deallocated here.
- void TearDown();
-
- // True if the space has been set up but not torn down.
- bool HasBeenSetup() { return start_ != NULL; }
-
- // Grow the size of the semispace by committing extra virtual memory.
- // Assumes that the caller has checked that the semispace has not reached
- // its maximum capacity (and thus there is space available in the reserved
- // address range to grow).
- bool Grow();
-
- // Grow the semispace to the new capacity. The new capacity
- // requested must be larger than the current capacity.
- bool GrowTo(int new_capacity);
-
- // Shrinks the semispace to the new capacity. The new capacity
- // requested must be more than the amount of used memory in the
- // semispace and less than the current capacity.
- bool ShrinkTo(int new_capacity);
-
- // Returns the start address of the space.
- Address low() { return start_; }
- // Returns one past the end address of the space.
- Address high() { return low() + capacity_; }
-
- // Age mark accessors.
- Address age_mark() { return age_mark_; }
- void set_age_mark(Address mark) { age_mark_ = mark; }
-
- // True if the address is in the address range of this semispace (not
- // necessarily below the allocation pointer).
- bool Contains(Address a) {
- return (reinterpret_cast<uintptr_t>(a) & address_mask_)
- == reinterpret_cast<uintptr_t>(start_);
- }
-
- // True if the object is a heap object in the address range of this
- // semispace (not necessarily below the allocation pointer).
- bool Contains(Object* o) {
- return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_;
- }
-
- // The offset of an address from the beginning of the space.
- int SpaceOffsetForAddress(Address addr) {
- return static_cast<int>(addr - low());
- }
-
- // If we don't have these here then SemiSpace will be abstract. However
- // they should never be called.
- virtual intptr_t Size() {
- UNREACHABLE();
- return 0;
- }
-
- virtual bool ReserveSpace(int bytes) {
- UNREACHABLE();
- return false;
- }
-
- bool is_committed() { return committed_; }
- bool Commit();
- bool Uncommit();
-
-#ifdef ENABLE_HEAP_PROTECTION
- // Protect/unprotect the space by marking it read-only/writable.
- virtual void Protect() {}
- virtual void Unprotect() {}
-#endif
-
-#ifdef DEBUG
- virtual void Print();
- virtual void Verify();
-#endif
-
- // Returns the current capacity of the semi space.
- int Capacity() { return capacity_; }
-
- // Returns the maximum capacity of the semi space.
- int MaximumCapacity() { return maximum_capacity_; }
-
- // Returns the initial capacity of the semi space.
- int InitialCapacity() { return initial_capacity_; }
-
- private:
- // The current and maximum capacity of the space.
- int capacity_;
- int maximum_capacity_;
- int initial_capacity_;
-
- // The start address of the space.
- Address start_;
- // Used to govern object promotion during mark-compact collection.
- Address age_mark_;
-
- // Masks and comparison values to test for containment in this semispace.
- uintptr_t address_mask_;
- uintptr_t object_mask_;
- uintptr_t object_expected_;
-
- bool committed_;
-
- public:
- TRACK_MEMORY("SemiSpace")
-};
-
-
-// A SemiSpaceIterator is an ObjectIterator that iterates over the active
-// semispace of the heap's new space. It iterates over the objects in the
-// semispace from a given start address (defaulting to the bottom of the
-// semispace) to the top of the semispace. New objects allocated after the
-// iterator is created are not iterated.
-class SemiSpaceIterator : public ObjectIterator {
- public:
- // Create an iterator over the objects in the given space. If no start
- // address is given, the iterator starts from the bottom of the space. If
- // no size function is given, the iterator calls Object::Size().
- explicit SemiSpaceIterator(NewSpace* space);
- SemiSpaceIterator(NewSpace* space, HeapObjectCallback size_func);
- SemiSpaceIterator(NewSpace* space, Address start);
-
- HeapObject* next() {
- if (current_ == limit_) return NULL;
-
- HeapObject* object = HeapObject::FromAddress(current_);
- int size = (size_func_ == NULL) ? object->Size() : size_func_(object);
-
- current_ += size;
- return object;
- }
-
- // Implementation of the ObjectIterator functions.
- virtual HeapObject* next_object() { return next(); }
-
- private:
- void Initialize(NewSpace* space, Address start, Address end,
- HeapObjectCallback size_func);
-
- // The semispace.
- SemiSpace* space_;
- // The current iteration point.
- Address current_;
- // The end of iteration.
- Address limit_;
- // The callback function.
- HeapObjectCallback size_func_;
-};
-
-
-// -----------------------------------------------------------------------------
-// The young generation space.
-//
-// The new space consists of a contiguous pair of semispaces. It simply
-// forwards most functions to the appropriate semispace.
-
-class NewSpace : public Space {
- public:
- // Constructor.
- explicit NewSpace(Heap* heap)
- : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
- to_space_(heap),
- from_space_(heap) {}
-
- // Sets up the new space using the given chunk.
- bool Setup(Address start, int size);
-
- // Tears down the space. Heap memory was not allocated by the space, so it
- // is not deallocated here.
- void TearDown();
-
- // True if the space has been set up but not torn down.
- bool HasBeenSetup() {
- return to_space_.HasBeenSetup() && from_space_.HasBeenSetup();
- }
-
- // Flip the pair of spaces.
- void Flip();
-
- // Grow the capacity of the semispaces. Assumes that they are not at
- // their maximum capacity.
- void Grow();
-
- // Shrink the capacity of the semispaces.
- void Shrink();
-
- // True if the address or object lies in the address range of either
- // semispace (not necessarily below the allocation pointer).
- bool Contains(Address a) {
- return (reinterpret_cast<uintptr_t>(a) & address_mask_)
- == reinterpret_cast<uintptr_t>(start_);
- }
- bool Contains(Object* o) {
- return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_;
- }
-
- // Return the allocated bytes in the active semispace.
- virtual intptr_t Size() { return static_cast<int>(top() - bottom()); }
- // The same, but returning an int. We have to have the one that returns
- // intptr_t because it is inherited, but if we know we are dealing with the
- // new space, which can't get as big as the other spaces then this is useful:
- int SizeAsInt() { return static_cast<int>(Size()); }
-
- // Return the current capacity of a semispace.
- intptr_t Capacity() {
- ASSERT(to_space_.Capacity() == from_space_.Capacity());
- return to_space_.Capacity();
- }
-
- // Return the total amount of memory committed for new space.
- intptr_t CommittedMemory() {
- if (from_space_.is_committed()) return 2 * Capacity();
- return Capacity();
- }
-
- // Return the available bytes without growing in the active semispace.
- intptr_t Available() { return Capacity() - Size(); }
-
- // Return the maximum capacity of a semispace.
- int MaximumCapacity() {
- ASSERT(to_space_.MaximumCapacity() == from_space_.MaximumCapacity());
- return to_space_.MaximumCapacity();
- }
-
- // Returns the initial capacity of a semispace.
- int InitialCapacity() {
- ASSERT(to_space_.InitialCapacity() == from_space_.InitialCapacity());
- return to_space_.InitialCapacity();
- }
-
- // Return the address of the allocation pointer in the active semispace.
- Address top() { return allocation_info_.top; }
- // Return the address of the first object in the active semispace.
- Address bottom() { return to_space_.low(); }
-
- // Get the age mark of the inactive semispace.
- Address age_mark() { return from_space_.age_mark(); }
- // Set the age mark in the active semispace.
- void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
-
- // The start address of the space and a bit mask. Anding an address in the
- // new space with the mask will result in the start address.
- Address start() { return start_; }
- uintptr_t mask() { return address_mask_; }
-
- // The allocation top and limit addresses.
- Address* allocation_top_address() { return &allocation_info_.top; }
- Address* allocation_limit_address() { return &allocation_info_.limit; }
-
- MUST_USE_RESULT MaybeObject* AllocateRaw(int size_in_bytes) {
- return AllocateRawInternal(size_in_bytes, &allocation_info_);
- }
-
- // Allocate the requested number of bytes for relocation during mark-compact
- // collection.
- MUST_USE_RESULT MaybeObject* MCAllocateRaw(int size_in_bytes) {
- return AllocateRawInternal(size_in_bytes, &mc_forwarding_info_);
- }
-
- // Reset the allocation pointer to the beginning of the active semispace.
- void ResetAllocationInfo();
- // Reset the reloction pointer to the bottom of the inactive semispace in
- // preparation for mark-compact collection.
- void MCResetRelocationInfo();
- // Update the allocation pointer in the active semispace after a
- // mark-compact collection.
- void MCCommitRelocationInfo();
-
- // Get the extent of the inactive semispace (for use as a marking stack).
- Address FromSpaceLow() { return from_space_.low(); }
- Address FromSpaceHigh() { return from_space_.high(); }
-
- // Get the extent of the active semispace (to sweep newly copied objects
- // during a scavenge collection).
- Address ToSpaceLow() { return to_space_.low(); }
- Address ToSpaceHigh() { return to_space_.high(); }
-
- // Offsets from the beginning of the semispaces.
- int ToSpaceOffsetForAddress(Address a) {
- return to_space_.SpaceOffsetForAddress(a);
- }
- int FromSpaceOffsetForAddress(Address a) {
- return from_space_.SpaceOffsetForAddress(a);
- }
-
- // True if the object is a heap object in the address range of the
- // respective semispace (not necessarily below the allocation pointer of the
- // semispace).
- bool ToSpaceContains(Object* o) { return to_space_.Contains(o); }
- bool FromSpaceContains(Object* o) { return from_space_.Contains(o); }
-
- bool ToSpaceContains(Address a) { return to_space_.Contains(a); }
- bool FromSpaceContains(Address a) { return from_space_.Contains(a); }
-
- virtual bool ReserveSpace(int bytes);
-
- // Resizes a sequential string which must be the most recent thing that was
- // allocated in new space.
- template <typename StringType>
- inline void ShrinkStringAtAllocationBoundary(String* string, int len);
-
-#ifdef ENABLE_HEAP_PROTECTION
- // Protect/unprotect the space by marking it read-only/writable.
- virtual void Protect();
- virtual void Unprotect();
-#endif
-
-#ifdef DEBUG
- // Verify the active semispace.
- virtual void Verify();
- // Print the active semispace.
- virtual void Print() { to_space_.Print(); }
-#endif
-
-#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
- // Iterates the active semispace to collect statistics.
- void CollectStatistics();
- // Reports previously collected statistics of the active semispace.
- void ReportStatistics();
- // Clears previously collected statistics.
- void ClearHistograms();
-
- // Record the allocation or promotion of a heap object. Note that we don't
- // record every single allocation, but only those that happen in the
- // to space during a scavenge GC.
- void RecordAllocation(HeapObject* obj);
- void RecordPromotion(HeapObject* obj);
-#endif
-
- // Return whether the operation succeded.
- bool CommitFromSpaceIfNeeded() {
- if (from_space_.is_committed()) return true;
- return from_space_.Commit();
- }
-
- bool UncommitFromSpace() {
- if (!from_space_.is_committed()) return true;
- return from_space_.Uncommit();
- }
-
- private:
- // The semispaces.
- SemiSpace to_space_;
- SemiSpace from_space_;
-
- // Start address and bit mask for containment testing.
- Address start_;
- uintptr_t address_mask_;
- uintptr_t object_mask_;
- uintptr_t object_expected_;
-
- // Allocation pointer and limit for normal allocation and allocation during
- // mark-compact collection.
- AllocationInfo allocation_info_;
- AllocationInfo mc_forwarding_info_;
-
-#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
- HistogramInfo* allocated_histogram_;
- HistogramInfo* promoted_histogram_;
-#endif
-
- // Implementation of AllocateRaw and MCAllocateRaw.
- MUST_USE_RESULT inline MaybeObject* AllocateRawInternal(
- int size_in_bytes,
- AllocationInfo* alloc_info);
-
- friend class SemiSpaceIterator;
-
- public:
- TRACK_MEMORY("NewSpace")
-};
-
-
-// -----------------------------------------------------------------------------
-// Free lists for old object spaces
-//
-// Free-list nodes are free blocks in the heap. They look like heap objects
-// (free-list node pointers have the heap object tag, and they have a map like
-// a heap object). They have a size and a next pointer. The next pointer is
-// the raw address of the next free list node (or NULL).
-class FreeListNode: public HeapObject {
- public:
- // Obtain a free-list node from a raw address. This is not a cast because
- // it does not check nor require that the first word at the address is a map
- // pointer.
- static FreeListNode* FromAddress(Address address) {
- return reinterpret_cast<FreeListNode*>(HeapObject::FromAddress(address));
- }
-
- static inline bool IsFreeListNode(HeapObject* object);
-
- // Set the size in bytes, which can be read with HeapObject::Size(). This
- // function also writes a map to the first word of the block so that it
- // looks like a heap object to the garbage collector and heap iteration
- // functions.
- void set_size(Heap* heap, int size_in_bytes);
-
- // Accessors for the next field.
- inline Address next(Heap* heap);
- inline void set_next(Heap* heap, Address next);
-
- private:
- static const int kNextOffset = POINTER_SIZE_ALIGN(ByteArray::kHeaderSize);
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListNode);
-};
-
-
-// The free list for the old space.
-class OldSpaceFreeList BASE_EMBEDDED {
- public:
- OldSpaceFreeList(Heap* heap, AllocationSpace owner);
-
- // Clear the free list.
- void Reset();
-
- // Return the number of bytes available on the free list.
- intptr_t available() { return available_; }
-
- // Place a node on the free list. The block of size 'size_in_bytes'
- // starting at 'start' is placed on the free list. The return value is the
- // number of bytes that have been lost due to internal fragmentation by
- // freeing the block. Bookkeeping information will be written to the block,
- // ie, its contents will be destroyed. The start address should be word
- // aligned, and the size should be a non-zero multiple of the word size.
- int Free(Address start, int size_in_bytes);
-
- // Allocate a block of size 'size_in_bytes' from the free list. The block
- // is unitialized. A failure is returned if no block is available. The
- // number of bytes lost to fragmentation is returned in the output parameter
- // 'wasted_bytes'. The size should be a non-zero multiple of the word size.
- MUST_USE_RESULT MaybeObject* Allocate(int size_in_bytes, int* wasted_bytes);
-
- void MarkNodes();
-
- private:
- // The size range of blocks, in bytes. (Smaller allocations are allowed, but
- // will always result in waste.)
- static const int kMinBlockSize = 2 * kPointerSize;
- static const int kMaxBlockSize = Page::kMaxHeapObjectSize;
-
- Heap* heap_;
-
- // The identity of the owning space, for building allocation Failure
- // objects.
- AllocationSpace owner_;
-
- // Total available bytes in all blocks on this free list.
- int available_;
-
- // Blocks are put on exact free lists in an array, indexed by size in words.
- // The available sizes are kept in an increasingly ordered list. Entries
- // corresponding to sizes < kMinBlockSize always have an empty free list
- // (but index kHead is used for the head of the size list).
- struct SizeNode {
- // Address of the head FreeListNode of the implied block size or NULL.
- Address head_node_;
- // Size (words) of the next larger available size if head_node_ != NULL.
- int next_size_;
- };
- static const int kFreeListsLength = kMaxBlockSize / kPointerSize + 1;
- SizeNode free_[kFreeListsLength];
-
- // Sentinel elements for the size list. Real elements are in ]kHead..kEnd[.
- static const int kHead = kMinBlockSize / kPointerSize - 1;
- static const int kEnd = kMaxInt;
-
- // We keep a "finger" in the size list to speed up a common pattern:
- // repeated requests for the same or increasing sizes.
- int finger_;
-
- // Starting from *prev, find and return the smallest size >= index (words),
- // or kEnd. Update *prev to be the largest size < index, or kHead.
- int FindSize(int index, int* prev) {
- int cur = free_[*prev].next_size_;
- while (cur < index) {
- *prev = cur;
- cur = free_[cur].next_size_;
- }
- return cur;
- }
-
- // Remove an existing element from the size list.
- void RemoveSize(int index) {
- int prev = kHead;
- int cur = FindSize(index, &prev);
- ASSERT(cur == index);
- free_[prev].next_size_ = free_[cur].next_size_;
- finger_ = prev;
- }
-
- // Insert a new element into the size list.
- void InsertSize(int index) {
- int prev = kHead;
- int cur = FindSize(index, &prev);
- ASSERT(cur != index);
- free_[prev].next_size_ = index;
- free_[index].next_size_ = cur;
- }
-
- // The size list is not updated during a sequence of calls to Free, but is
- // rebuilt before the next allocation.
- void RebuildSizeList();
- bool needs_rebuild_;
-
-#ifdef DEBUG
- // Does this free list contain a free block located at the address of 'node'?
- bool Contains(FreeListNode* node);
-#endif
-
- DISALLOW_COPY_AND_ASSIGN(OldSpaceFreeList);
-};
-
-
-// The free list for the map space.
-class FixedSizeFreeList BASE_EMBEDDED {
- public:
- FixedSizeFreeList(Heap* heap, AllocationSpace owner, int object_size);
-
- // Clear the free list.
- void Reset();
-
- // Return the number of bytes available on the free list.
- intptr_t available() { return available_; }
-
- // Place a node on the free list. The block starting at 'start' (assumed to
- // have size object_size_) is placed on the free list. Bookkeeping
- // information will be written to the block, ie, its contents will be
- // destroyed. The start address should be word aligned.
- void Free(Address start);
-
- // Allocate a fixed sized block from the free list. The block is unitialized.
- // A failure is returned if no block is available.
- MUST_USE_RESULT MaybeObject* Allocate();
-
- void MarkNodes();
-
- private:
-
- Heap* heap_;
-
- // Available bytes on the free list.
- intptr_t available_;
-
- // The head of the free list.
- Address head_;
-
- // The tail of the free list.
- Address tail_;
-
- // The identity of the owning space, for building allocation Failure
- // objects.
- AllocationSpace owner_;
-
- // The size of the objects in this space.
- int object_size_;
-
- DISALLOW_COPY_AND_ASSIGN(FixedSizeFreeList);
-};
-
-
-// -----------------------------------------------------------------------------
-// Old object space (excluding map objects)
-
-class OldSpace : public PagedSpace {
- public:
- // Creates an old space object with a given maximum capacity.
- // The constructor does not allocate pages from OS.
- OldSpace(Heap* heap,
- intptr_t max_capacity,
- AllocationSpace id,
- Executability executable)
- : PagedSpace(heap, max_capacity, id, executable),
- free_list_(heap, id) {
- page_extra_ = 0;
- }
-
- // The bytes available on the free list (ie, not above the linear allocation
- // pointer).
- intptr_t AvailableFree() { return free_list_.available(); }
-
- // The limit of allocation for a page in this space.
- virtual Address PageAllocationLimit(Page* page) {
- return page->ObjectAreaEnd();
- }
-
- // Give a block of memory to the space's free list. It might be added to
- // the free list or accounted as waste.
- // If add_to_freelist is false then just accounting stats are updated and
- // no attempt to add area to free list is made.
- void Free(Address start, int size_in_bytes, bool add_to_freelist) {
- accounting_stats_.DeallocateBytes(size_in_bytes);
-
- if (add_to_freelist) {
- int wasted_bytes = free_list_.Free(start, size_in_bytes);
- accounting_stats_.WasteBytes(wasted_bytes);
- }
- }
-
- virtual void DeallocateBlock(Address start,
- int size_in_bytes,
- bool add_to_freelist);
-
- // Prepare for full garbage collection. Resets the relocation pointer and
- // clears the free list.
- virtual void PrepareForMarkCompact(bool will_compact);
-
- // Updates the allocation pointer to the relocation top after a mark-compact
- // collection.
- virtual void MCCommitRelocationInfo();
-
- virtual void PutRestOfCurrentPageOnFreeList(Page* current_page);
-
- void MarkFreeListNodes() { free_list_.MarkNodes(); }
-
-#ifdef DEBUG
- // Reports statistics for the space
- void ReportStatistics();
-#endif
-
- protected:
- // Virtual function in the superclass. Slow path of AllocateRaw.
- MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes);
-
- // Virtual function in the superclass. Allocate linearly at the start of
- // the page after current_page (there is assumed to be one).
- HeapObject* AllocateInNextPage(Page* current_page, int size_in_bytes);
-
- private:
- // The space's free list.
- OldSpaceFreeList free_list_;
-
- public:
- TRACK_MEMORY("OldSpace")
-};
-
-
-// -----------------------------------------------------------------------------
-// Old space for objects of a fixed size
-
-class FixedSpace : public PagedSpace {
- public:
- FixedSpace(Heap* heap,
- intptr_t max_capacity,
- AllocationSpace id,
- int object_size_in_bytes,
- const char* name)
- : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE),
- object_size_in_bytes_(object_size_in_bytes),
- name_(name),
- free_list_(heap, id, object_size_in_bytes) {
- page_extra_ = Page::kObjectAreaSize % object_size_in_bytes;
- }
-
- // The limit of allocation for a page in this space.
- virtual Address PageAllocationLimit(Page* page) {
- return page->ObjectAreaEnd() - page_extra_;
- }
-
- int object_size_in_bytes() { return object_size_in_bytes_; }
-
- // Give a fixed sized block of memory to the space's free list.
- // If add_to_freelist is false then just accounting stats are updated and
- // no attempt to add area to free list is made.
- void Free(Address start, bool add_to_freelist) {
- if (add_to_freelist) {
- free_list_.Free(start);
- }
- accounting_stats_.DeallocateBytes(object_size_in_bytes_);
- }
-
- // Prepares for a mark-compact GC.
- virtual void PrepareForMarkCompact(bool will_compact);
-
- // Updates the allocation pointer to the relocation top after a mark-compact
- // collection.
- virtual void MCCommitRelocationInfo();
-
- virtual void PutRestOfCurrentPageOnFreeList(Page* current_page);
-
- virtual void DeallocateBlock(Address start,
- int size_in_bytes,
- bool add_to_freelist);
-
- void MarkFreeListNodes() { free_list_.MarkNodes(); }
-
-#ifdef DEBUG
- // Reports statistic info of the space
- void ReportStatistics();
-#endif
-
- protected:
- // Virtual function in the superclass. Slow path of AllocateRaw.
- MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes);
-
- // Virtual function in the superclass. Allocate linearly at the start of
- // the page after current_page (there is assumed to be one).
- HeapObject* AllocateInNextPage(Page* current_page, int size_in_bytes);
-
- void ResetFreeList() {
- free_list_.Reset();
- }
-
- private:
- // The size of objects in this space.
- int object_size_in_bytes_;
-
- // The name of this space.
- const char* name_;
-
- // The space's free list.
- FixedSizeFreeList free_list_;
-};
-
-
-// -----------------------------------------------------------------------------
-// Old space for all map objects
-
-class MapSpace : public FixedSpace {
- public:
- // Creates a map space object with a maximum capacity.
- MapSpace(Heap* heap,
- intptr_t max_capacity,
- int max_map_space_pages,
- AllocationSpace id)
- : FixedSpace(heap, max_capacity, id, Map::kSize, "map"),
- max_map_space_pages_(max_map_space_pages) {
- ASSERT(max_map_space_pages < kMaxMapPageIndex);
- }
-
- // Prepares for a mark-compact GC.
- virtual void PrepareForMarkCompact(bool will_compact);
-
- // Given an index, returns the page address.
- Address PageAddress(int page_index) { return page_addresses_[page_index]; }
-
- static const int kMaxMapPageIndex = 1 << MapWord::kMapPageIndexBits;
-
- // Are map pointers encodable into map word?
- bool MapPointersEncodable() {
- if (!FLAG_use_big_map_space) {
- ASSERT(CountPagesToTop() <= kMaxMapPageIndex);
- return true;
- }
- return CountPagesToTop() <= max_map_space_pages_;
- }
-
- // Should be called after forced sweep to find out if map space needs
- // compaction.
- bool NeedsCompaction(int live_maps) {
- return !MapPointersEncodable() && live_maps <= CompactionThreshold();
- }
-
- Address TopAfterCompaction(int live_maps) {
- ASSERT(NeedsCompaction(live_maps));
-
- int pages_left = live_maps / kMapsPerPage;
- PageIterator it(this, PageIterator::ALL_PAGES);
- while (pages_left-- > 0) {
- ASSERT(it.has_next());
- it.next()->SetRegionMarks(Page::kAllRegionsCleanMarks);
- }
- ASSERT(it.has_next());
- Page* top_page = it.next();
- top_page->SetRegionMarks(Page::kAllRegionsCleanMarks);
- ASSERT(top_page->is_valid());
-
- int offset = live_maps % kMapsPerPage * Map::kSize;
- Address top = top_page->ObjectAreaStart() + offset;
- ASSERT(top < top_page->ObjectAreaEnd());
- ASSERT(Contains(top));
-
- return top;
- }
-
- void FinishCompaction(Address new_top, int live_maps) {
- Page* top_page = Page::FromAddress(new_top);
- ASSERT(top_page->is_valid());
-
- SetAllocationInfo(&allocation_info_, top_page);
- allocation_info_.top = new_top;
-
- int new_size = live_maps * Map::kSize;
- accounting_stats_.DeallocateBytes(accounting_stats_.Size());
- accounting_stats_.AllocateBytes(new_size);
-
- // Flush allocation watermarks.
- for (Page* p = first_page_; p != top_page; p = p->next_page()) {
- p->SetAllocationWatermark(p->AllocationTop());
- }
- top_page->SetAllocationWatermark(new_top);
-
-#ifdef DEBUG
- if (FLAG_enable_slow_asserts) {
- intptr_t actual_size = 0;
- for (Page* p = first_page_; p != top_page; p = p->next_page())
- actual_size += kMapsPerPage * Map::kSize;
- actual_size += (new_top - top_page->ObjectAreaStart());
- ASSERT(accounting_stats_.Size() == actual_size);
- }
-#endif
-
- Shrink();
- ResetFreeList();
- }
-
- protected:
-#ifdef DEBUG
- virtual void VerifyObject(HeapObject* obj);
-#endif
-
- private:
- static const int kMapsPerPage = Page::kObjectAreaSize / Map::kSize;
-
- // Do map space compaction if there is a page gap.
- int CompactionThreshold() {
- return kMapsPerPage * (max_map_space_pages_ - 1);
- }
-
- const int max_map_space_pages_;
-
- // An array of page start address in a map space.
- Address page_addresses_[kMaxMapPageIndex];
-
- public:
- TRACK_MEMORY("MapSpace")
-};
-
-
-// -----------------------------------------------------------------------------
-// Old space for all global object property cell objects
-
-class CellSpace : public FixedSpace {
- public:
- // Creates a property cell space object with a maximum capacity.
- CellSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id)
- : FixedSpace(heap, max_capacity, id, JSGlobalPropertyCell::kSize, "cell")
- {}
-
- protected:
-#ifdef DEBUG
- virtual void VerifyObject(HeapObject* obj);
-#endif
-
- public:
- TRACK_MEMORY("CellSpace")
-};
-
-
-// -----------------------------------------------------------------------------
-// Large objects ( > Page::kMaxHeapObjectSize ) are allocated and managed by
-// the large object space. A large object is allocated from OS heap with
-// extra padding bytes (Page::kPageSize + Page::kObjectStartOffset).
-// A large object always starts at Page::kObjectStartOffset to a page.
-// Large objects do not move during garbage collections.
-
-// A LargeObjectChunk holds exactly one large object page with exactly one
-// large object.
-class LargeObjectChunk {
- public:
- // Allocates a new LargeObjectChunk that contains a large object page
- // (Page::kPageSize aligned) that has at least size_in_bytes (for a large
- // object) bytes after the object area start of that page.
- static LargeObjectChunk* New(int size_in_bytes, Executability executable);
-
- // Free the memory associated with the chunk.
- inline void Free(Executability executable);
-
- // Interpret a raw address as a large object chunk.
- static LargeObjectChunk* FromAddress(Address address) {
- return reinterpret_cast<LargeObjectChunk*>(address);
- }
-
- // Returns the address of this chunk.
- Address address() { return reinterpret_cast<Address>(this); }
-
- // Accessors for the fields of the chunk.
- LargeObjectChunk* next() { return next_; }
- void set_next(LargeObjectChunk* chunk) { next_ = chunk; }
- size_t size() { return size_ & ~Page::kPageFlagMask; }
-
- // Compute the start address in the chunk.
- inline Address GetStartAddress();
-
- // Returns the object in this chunk.
- HeapObject* GetObject() { return HeapObject::FromAddress(GetStartAddress()); }
-
- // Given a requested size returns the physical size of a chunk to be
- // allocated.
- static int ChunkSizeFor(int size_in_bytes);
-
- // Given a chunk size, returns the object size it can accommodate. Used by
- // LargeObjectSpace::Available.
- static intptr_t ObjectSizeFor(intptr_t chunk_size) {
- if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0;
- return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
- }
-
- private:
- // A pointer to the next large object chunk in the space or NULL.
- LargeObjectChunk* next_;
-
- // The total size of this chunk.
- size_t size_;
-
- public:
- TRACK_MEMORY("LargeObjectChunk")
-};
-
-
-class LargeObjectSpace : public Space {
- public:
- LargeObjectSpace(Heap* heap, AllocationSpace id);
- virtual ~LargeObjectSpace() {}
-
- // Initializes internal data structures.
- bool Setup();
-
- // Releases internal resources, frees objects in this space.
- void TearDown();
-
- // Allocates a (non-FixedArray, non-Code) large object.
- MUST_USE_RESULT MaybeObject* AllocateRaw(int size_in_bytes);
- // Allocates a large Code object.
- MUST_USE_RESULT MaybeObject* AllocateRawCode(int size_in_bytes);
- // Allocates a large FixedArray.
- MUST_USE_RESULT MaybeObject* AllocateRawFixedArray(int size_in_bytes);
-
- // Available bytes for objects in this space.
- inline intptr_t Available();
-
- virtual intptr_t Size() {
- return size_;
- }
-
- virtual intptr_t SizeOfObjects() {
- return objects_size_;
- }
-
- int PageCount() {
- return page_count_;
- }
-
- // Finds an object for a given address, returns Failure::Exception()
- // if it is not found. The function iterates through all objects in this
- // space, may be slow.
- MaybeObject* FindObject(Address a);
-
- // Finds a large object page containing the given pc, returns NULL
- // if such a page doesn't exist.
- LargeObjectChunk* FindChunkContainingPc(Address pc);
-
- // Iterates objects covered by dirty regions.
- void IterateDirtyRegions(ObjectSlotCallback func);
-
- // Frees unmarked objects.
- void FreeUnmarkedObjects();
-
- // Checks whether a heap object is in this space; O(1).
- bool Contains(HeapObject* obj);
-
- // Checks whether the space is empty.
- bool IsEmpty() { return first_chunk_ == NULL; }
-
- // See the comments for ReserveSpace in the Space class. This has to be
- // called after ReserveSpace has been called on the paged spaces, since they
- // may use some memory, leaving less for large objects.
- virtual bool ReserveSpace(int bytes);
-
-#ifdef ENABLE_HEAP_PROTECTION
- // Protect/unprotect the space by marking it read-only/writable.
- void Protect();
- void Unprotect();
-#endif
-
-#ifdef DEBUG
- virtual void Verify();
- virtual void Print();
- void ReportStatistics();
- void CollectCodeStatistics();
-#endif
- // Checks whether an address is in the object area in this space. It
- // iterates all objects in the space. May be slow.
- bool SlowContains(Address addr) { return !FindObject(addr)->IsFailure(); }
-
- private:
- // The head of the linked list of large object chunks.
- LargeObjectChunk* first_chunk_;
- intptr_t size_; // allocated bytes
- int page_count_; // number of chunks
- intptr_t objects_size_; // size of objects
-
- // Shared implementation of AllocateRaw, AllocateRawCode and
- // AllocateRawFixedArray.
- MUST_USE_RESULT MaybeObject* AllocateRawInternal(int requested_size,
- int object_size,
- Executability executable);
-
- friend class LargeObjectIterator;
-
- public:
- TRACK_MEMORY("LargeObjectSpace")
-};
-
-
-class LargeObjectIterator: public ObjectIterator {
- public:
- explicit LargeObjectIterator(LargeObjectSpace* space);
- LargeObjectIterator(LargeObjectSpace* space, HeapObjectCallback size_func);
-
- HeapObject* next();
-
- // implementation of ObjectIterator.
- virtual HeapObject* next_object() { return next(); }
-
- private:
- LargeObjectChunk* current_;
- HeapObjectCallback size_func_;
-};
-
-
-#ifdef DEBUG
-struct CommentStatistic {
- const char* comment;
- int size;
- int count;
- void Clear() {
- comment = NULL;
- size = 0;
- count = 0;
- }
- // Must be small, since an iteration is used for lookup.
- static const int kMaxComments = 64;
-};
-#endif
-
-
-} } // namespace v8::internal
-
-#endif // V8_SPACES_H_
diff --git a/src/3rdparty/v8/src/splay-tree-inl.h b/src/3rdparty/v8/src/splay-tree-inl.h
deleted file mode 100644
index 9c2287e..0000000
--- a/src/3rdparty/v8/src/splay-tree-inl.h
+++ /dev/null
@@ -1,310 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_SPLAY_TREE_INL_H_
-#define V8_SPLAY_TREE_INL_H_
-
-#include "splay-tree.h"
-
-namespace v8 {
-namespace internal {
-
-
-template<typename Config, class Allocator>
-SplayTree<Config, Allocator>::~SplayTree() {
- NodeDeleter deleter;
- ForEachNode(&deleter);
-}
-
-
-template<typename Config, class Allocator>
-bool SplayTree<Config, Allocator>::Insert(const Key& key, Locator* locator) {
- if (is_empty()) {
- // If the tree is empty, insert the new node.
- root_ = new Node(key, Config::kNoValue);
- } else {
- // Splay on the key to move the last node on the search path
- // for the key to the root of the tree.
- Splay(key);
- // Ignore repeated insertions with the same key.
- int cmp = Config::Compare(key, root_->key_);
- if (cmp == 0) {
- locator->bind(root_);
- return false;
- }
- // Insert the new node.
- Node* node = new Node(key, Config::kNoValue);
- InsertInternal(cmp, node);
- }
- locator->bind(root_);
- return true;
-}
-
-
-template<typename Config, class Allocator>
-void SplayTree<Config, Allocator>::InsertInternal(int cmp, Node* node) {
- if (cmp > 0) {
- node->left_ = root_;
- node->right_ = root_->right_;
- root_->right_ = NULL;
- } else {
- node->right_ = root_;
- node->left_ = root_->left_;
- root_->left_ = NULL;
- }
- root_ = node;
-}
-
-
-template<typename Config, class Allocator>
-bool SplayTree<Config, Allocator>::FindInternal(const Key& key) {
- if (is_empty())
- return false;
- Splay(key);
- return Config::Compare(key, root_->key_) == 0;
-}
-
-
-template<typename Config, class Allocator>
-bool SplayTree<Config, Allocator>::Find(const Key& key, Locator* locator) {
- if (FindInternal(key)) {
- locator->bind(root_);
- return true;
- } else {
- return false;
- }
-}
-
-
-template<typename Config, class Allocator>
-bool SplayTree<Config, Allocator>::FindGreatestLessThan(const Key& key,
- Locator* locator) {
- if (is_empty())
- return false;
- // Splay on the key to move the node with the given key or the last
- // node on the search path to the top of the tree.
- Splay(key);
- // Now the result is either the root node or the greatest node in
- // the left subtree.
- int cmp = Config::Compare(root_->key_, key);
- if (cmp <= 0) {
- locator->bind(root_);
- return true;
- } else {
- Node* temp = root_;
- root_ = root_->left_;
- bool result = FindGreatest(locator);
- root_ = temp;
- return result;
- }
-}
-
-
-template<typename Config, class Allocator>
-bool SplayTree<Config, Allocator>::FindLeastGreaterThan(const Key& key,
- Locator* locator) {
- if (is_empty())
- return false;
- // Splay on the key to move the node with the given key or the last
- // node on the search path to the top of the tree.
- Splay(key);
- // Now the result is either the root node or the least node in
- // the right subtree.
- int cmp = Config::Compare(root_->key_, key);
- if (cmp >= 0) {
- locator->bind(root_);
- return true;
- } else {
- Node* temp = root_;
- root_ = root_->right_;
- bool result = FindLeast(locator);
- root_ = temp;
- return result;
- }
-}
-
-
-template<typename Config, class Allocator>
-bool SplayTree<Config, Allocator>::FindGreatest(Locator* locator) {
- if (is_empty())
- return false;
- Node* current = root_;
- while (current->right_ != NULL)
- current = current->right_;
- locator->bind(current);
- return true;
-}
-
-
-template<typename Config, class Allocator>
-bool SplayTree<Config, Allocator>::FindLeast(Locator* locator) {
- if (is_empty())
- return false;
- Node* current = root_;
- while (current->left_ != NULL)
- current = current->left_;
- locator->bind(current);
- return true;
-}
-
-
-template<typename Config, class Allocator>
-bool SplayTree<Config, Allocator>::Move(const Key& old_key,
- const Key& new_key) {
- if (!FindInternal(old_key))
- return false;
- Node* node_to_move = root_;
- RemoveRootNode(old_key);
- Splay(new_key);
- int cmp = Config::Compare(new_key, root_->key_);
- if (cmp == 0) {
- // A node with the target key already exists.
- delete node_to_move;
- return false;
- }
- node_to_move->key_ = new_key;
- InsertInternal(cmp, node_to_move);
- return true;
-}
-
-
-template<typename Config, class Allocator>
-bool SplayTree<Config, Allocator>::Remove(const Key& key) {
- if (!FindInternal(key))
- return false;
- Node* node_to_remove = root_;
- RemoveRootNode(key);
- delete node_to_remove;
- return true;
-}
-
-
-template<typename Config, class Allocator>
-void SplayTree<Config, Allocator>::RemoveRootNode(const Key& key) {
- if (root_->left_ == NULL) {
- // No left child, so the new tree is just the right child.
- root_ = root_->right_;
- } else {
- // Left child exists.
- Node* right = root_->right_;
- // Make the original left child the new root.
- root_ = root_->left_;
- // Splay to make sure that the new root has an empty right child.
- Splay(key);
- // Insert the original right child as the right child of the new
- // root.
- root_->right_ = right;
- }
-}
-
-
-template<typename Config, class Allocator>
-void SplayTree<Config, Allocator>::Splay(const Key& key) {
- if (is_empty())
- return;
- Node dummy_node(Config::kNoKey, Config::kNoValue);
- // Create a dummy node. The use of the dummy node is a bit
- // counter-intuitive: The right child of the dummy node will hold
- // the L tree of the algorithm. The left child of the dummy node
- // will hold the R tree of the algorithm. Using a dummy node, left
- // and right will always be nodes and we avoid special cases.
- Node* dummy = &dummy_node;
- Node* left = dummy;
- Node* right = dummy;
- Node* current = root_;
- while (true) {
- int cmp = Config::Compare(key, current->key_);
- if (cmp < 0) {
- if (current->left_ == NULL)
- break;
- if (Config::Compare(key, current->left_->key_) < 0) {
- // Rotate right.
- Node* temp = current->left_;
- current->left_ = temp->right_;
- temp->right_ = current;
- current = temp;
- if (current->left_ == NULL)
- break;
- }
- // Link right.
- right->left_ = current;
- right = current;
- current = current->left_;
- } else if (cmp > 0) {
- if (current->right_ == NULL)
- break;
- if (Config::Compare(key, current->right_->key_) > 0) {
- // Rotate left.
- Node* temp = current->right_;
- current->right_ = temp->left_;
- temp->left_ = current;
- current = temp;
- if (current->right_ == NULL)
- break;
- }
- // Link left.
- left->right_ = current;
- left = current;
- current = current->right_;
- } else {
- break;
- }
- }
- // Assemble.
- left->right_ = current->left_;
- right->left_ = current->right_;
- current->left_ = dummy->right_;
- current->right_ = dummy->left_;
- root_ = current;
-}
-
-
-template <typename Config, class Allocator> template <class Callback>
-void SplayTree<Config, Allocator>::ForEach(Callback* callback) {
- NodeToPairAdaptor<Callback> callback_adaptor(callback);
- ForEachNode(&callback_adaptor);
-}
-
-
-template <typename Config, class Allocator> template <class Callback>
-void SplayTree<Config, Allocator>::ForEachNode(Callback* callback) {
- // Pre-allocate some space for tiny trees.
- List<Node*, Allocator> nodes_to_visit(10);
- if (root_ != NULL) nodes_to_visit.Add(root_);
- int pos = 0;
- while (pos < nodes_to_visit.length()) {
- Node* node = nodes_to_visit[pos++];
- if (node->left() != NULL) nodes_to_visit.Add(node->left());
- if (node->right() != NULL) nodes_to_visit.Add(node->right());
- callback->Call(node);
- }
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_SPLAY_TREE_INL_H_
diff --git a/src/3rdparty/v8/src/splay-tree.h b/src/3rdparty/v8/src/splay-tree.h
deleted file mode 100644
index c265276..0000000
--- a/src/3rdparty/v8/src/splay-tree.h
+++ /dev/null
@@ -1,203 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_SPLAY_TREE_H_
-#define V8_SPLAY_TREE_H_
-
-namespace v8 {
-namespace internal {
-
-
-// A splay tree. The config type parameter encapsulates the different
-// configurations of a concrete splay tree:
-//
-// typedef Key: the key type
-// typedef Value: the value type
-// static const kNoKey: the dummy key used when no key is set
-// static const kNoValue: the dummy value used to initialize nodes
-// int (Compare)(Key& a, Key& b) -> {-1, 0, 1}: comparison function
-//
-// The tree is also parameterized by an allocation policy
-// (Allocator). The policy is used for allocating lists in the C free
-// store or the zone; see zone.h.
-
-// Forward defined as
-// template <typename Config, class Allocator = FreeStoreAllocationPolicy>
-// class SplayTree;
-template <typename Config, class Allocator>
-class SplayTree {
- public:
- typedef typename Config::Key Key;
- typedef typename Config::Value Value;
-
- class Locator;
-
- SplayTree() : root_(NULL) { }
- ~SplayTree();
-
- INLINE(void* operator new(size_t size)) {
- return Allocator::New(static_cast<int>(size));
- }
- INLINE(void operator delete(void* p, size_t)) { return Allocator::Delete(p); }
-
- // Inserts the given key in this tree with the given value. Returns
- // true if a node was inserted, otherwise false. If found the locator
- // is enabled and provides access to the mapping for the key.
- bool Insert(const Key& key, Locator* locator);
-
- // Looks up the key in this tree and returns true if it was found,
- // otherwise false. If the node is found the locator is enabled and
- // provides access to the mapping for the key.
- bool Find(const Key& key, Locator* locator);
-
- // Finds the mapping with the greatest key less than or equal to the
- // given key.
- bool FindGreatestLessThan(const Key& key, Locator* locator);
-
- // Find the mapping with the greatest key in this tree.
- bool FindGreatest(Locator* locator);
-
- // Finds the mapping with the least key greater than or equal to the
- // given key.
- bool FindLeastGreaterThan(const Key& key, Locator* locator);
-
- // Find the mapping with the least key in this tree.
- bool FindLeast(Locator* locator);
-
- // Move the node from one key to another.
- bool Move(const Key& old_key, const Key& new_key);
-
- // Remove the node with the given key from the tree.
- bool Remove(const Key& key);
-
- bool is_empty() { return root_ == NULL; }
-
- // Perform the splay operation for the given key. Moves the node with
- // the given key to the top of the tree. If no node has the given
- // key, the last node on the search path is moved to the top of the
- // tree.
- void Splay(const Key& key);
-
- class Node {
- public:
- Node(const Key& key, const Value& value)
- : key_(key),
- value_(value),
- left_(NULL),
- right_(NULL) { }
-
- INLINE(void* operator new(size_t size)) {
- return Allocator::New(static_cast<int>(size));
- }
- INLINE(void operator delete(void* p, size_t)) {
- return Allocator::Delete(p);
- }
-
- Key key() { return key_; }
- Value value() { return value_; }
- Node* left() { return left_; }
- Node* right() { return right_; }
- private:
-
- friend class SplayTree;
- friend class Locator;
- Key key_;
- Value value_;
- Node* left_;
- Node* right_;
- };
-
- // A locator provides access to a node in the tree without actually
- // exposing the node.
- class Locator BASE_EMBEDDED {
- public:
- explicit Locator(Node* node) : node_(node) { }
- Locator() : node_(NULL) { }
- const Key& key() { return node_->key_; }
- Value& value() { return node_->value_; }
- void set_value(const Value& value) { node_->value_ = value; }
- inline void bind(Node* node) { node_ = node; }
- private:
- Node* node_;
- };
-
- template <class Callback>
- void ForEach(Callback* callback);
-
- protected:
-
- // Resets tree root. Existing nodes become unreachable.
- void ResetRoot() { root_ = NULL; }
-
- private:
- // Search for a node with a given key. If found, root_ points
- // to the node.
- bool FindInternal(const Key& key);
-
- // Inserts a node assuming that root_ is already set up.
- void InsertInternal(int cmp, Node* node);
-
- // Removes root_ node.
- void RemoveRootNode(const Key& key);
-
- template<class Callback>
- class NodeToPairAdaptor BASE_EMBEDDED {
- public:
- explicit NodeToPairAdaptor(Callback* callback)
- : callback_(callback) { }
- void Call(Node* node) {
- callback_->Call(node->key(), node->value());
- }
-
- private:
- Callback* callback_;
-
- DISALLOW_COPY_AND_ASSIGN(NodeToPairAdaptor);
- };
-
- class NodeDeleter BASE_EMBEDDED {
- public:
- NodeDeleter() { }
- void Call(Node* node) { delete node; }
-
- private:
-
- DISALLOW_COPY_AND_ASSIGN(NodeDeleter);
- };
-
- template <class Callback>
- void ForEachNode(Callback* callback);
-
- Node* root_;
-
- DISALLOW_COPY_AND_ASSIGN(SplayTree);
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_SPLAY_TREE_H_
diff --git a/src/3rdparty/v8/src/string-search.cc b/src/3rdparty/v8/src/string-search.cc
deleted file mode 100644
index 3ae68b5..0000000
--- a/src/3rdparty/v8/src/string-search.cc
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-#include "string-search.h"
-
-namespace v8 {
-namespace internal {
-
-// Storage for constants used by string-search.
-
-// Now in Isolate:
-// bad_char_shift_table()
-// good_suffix_shift_table()
-// suffix_table()
-
-}} // namespace v8::internal
diff --git a/src/3rdparty/v8/src/string-search.h b/src/3rdparty/v8/src/string-search.h
deleted file mode 100644
index 1223db0..0000000
--- a/src/3rdparty/v8/src/string-search.h
+++ /dev/null
@@ -1,568 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_STRING_SEARCH_H_
-#define V8_STRING_SEARCH_H_
-
-namespace v8 {
-namespace internal {
-
-
-//---------------------------------------------------------------------
-// String Search object.
-//---------------------------------------------------------------------
-
-// Class holding constants and methods that apply to all string search variants,
-// independently of subject and pattern char size.
-class StringSearchBase {
- protected:
- // Cap on the maximal shift in the Boyer-Moore implementation. By setting a
- // limit, we can fix the size of tables. For a needle longer than this limit,
- // search will not be optimal, since we only build tables for a suffix
- // of the string, but it is a safe approximation.
- static const int kBMMaxShift = Isolate::kBMMaxShift;
-
- // Reduce alphabet to this size.
- // One of the tables used by Boyer-Moore and Boyer-Moore-Horspool has size
- // proportional to the input alphabet. We reduce the alphabet size by
- // equating input characters modulo a smaller alphabet size. This gives
- // a potentially less efficient searching, but is a safe approximation.
- // For needles using only characters in the same Unicode 256-code point page,
- // there is no search speed degradation.
- static const int kAsciiAlphabetSize = 128;
- static const int kUC16AlphabetSize = Isolate::kUC16AlphabetSize;
-
- // Bad-char shift table stored in the state. It's length is the alphabet size.
- // For patterns below this length, the skip length of Boyer-Moore is too short
- // to compensate for the algorithmic overhead compared to simple brute force.
- static const int kBMMinPatternLength = 7;
-
- static inline bool IsAsciiString(Vector<const char>) {
- return true;
- }
-
- static inline bool IsAsciiString(Vector<const uc16> string) {
- return String::IsAscii(string.start(), string.length());
- }
-
- friend class Isolate;
-};
-
-
-template <typename PatternChar, typename SubjectChar>
-class StringSearch : private StringSearchBase {
- public:
- StringSearch(Isolate* isolate, Vector<const PatternChar> pattern)
- : isolate_(isolate),
- pattern_(pattern),
- start_(Max(0, pattern.length() - kBMMaxShift)) {
- if (sizeof(PatternChar) > sizeof(SubjectChar)) {
- if (!IsAsciiString(pattern_)) {
- strategy_ = &FailSearch;
- return;
- }
- }
- int pattern_length = pattern_.length();
- if (pattern_length < kBMMinPatternLength) {
- if (pattern_length == 1) {
- strategy_ = &SingleCharSearch;
- return;
- }
- strategy_ = &LinearSearch;
- return;
- }
- strategy_ = &InitialSearch;
- }
-
- int Search(Vector<const SubjectChar> subject, int index) {
- return strategy_(this, subject, index);
- }
-
- static inline int AlphabetSize() {
- if (sizeof(PatternChar) == 1) {
- // ASCII needle.
- return kAsciiAlphabetSize;
- } else {
- ASSERT(sizeof(PatternChar) == 2);
- // UC16 needle.
- return kUC16AlphabetSize;
- }
- }
-
- private:
- typedef int (*SearchFunction)( // NOLINT - it's not a cast!
- StringSearch<PatternChar, SubjectChar>*,
- Vector<const SubjectChar>,
- int);
-
- static int FailSearch(StringSearch<PatternChar, SubjectChar>*,
- Vector<const SubjectChar>,
- int) {
- return -1;
- }
-
- static int SingleCharSearch(StringSearch<PatternChar, SubjectChar>* search,
- Vector<const SubjectChar> subject,
- int start_index);
-
- static int LinearSearch(StringSearch<PatternChar, SubjectChar>* search,
- Vector<const SubjectChar> subject,
- int start_index);
-
- static int InitialSearch(StringSearch<PatternChar, SubjectChar>* search,
- Vector<const SubjectChar> subject,
- int start_index);
-
- static int BoyerMooreHorspoolSearch(
- StringSearch<PatternChar, SubjectChar>* search,
- Vector<const SubjectChar> subject,
- int start_index);
-
- static int BoyerMooreSearch(StringSearch<PatternChar, SubjectChar>* search,
- Vector<const SubjectChar> subject,
- int start_index);
-
- void PopulateBoyerMooreHorspoolTable();
-
- void PopulateBoyerMooreTable();
-
- static inline int CharOccurrence(int* bad_char_occurrence,
- SubjectChar char_code) {
- if (sizeof(SubjectChar) == 1) {
- return bad_char_occurrence[static_cast<int>(char_code)];
- }
- if (sizeof(PatternChar) == 1) {
- if (static_cast<unsigned int>(char_code) > String::kMaxAsciiCharCodeU) {
- return -1;
- }
- return bad_char_occurrence[static_cast<unsigned int>(char_code)];
- }
- // Both pattern and subject are UC16. Reduce character to equivalence class.
- int equiv_class = char_code % kUC16AlphabetSize;
- return bad_char_occurrence[equiv_class];
- }
-
- // The following tables are shared by all searches.
- // TODO(lrn): Introduce a way for a pattern to keep its tables
- // between searches (e.g., for an Atom RegExp).
-
- // Store for the BoyerMoore(Horspool) bad char shift table.
- // Return a table covering the last kBMMaxShift+1 positions of
- // pattern.
- int* bad_char_table() {
- return isolate_->bad_char_shift_table();
- }
-
- // Store for the BoyerMoore good suffix shift table.
- int* good_suffix_shift_table() {
- // Return biased pointer that maps the range [start_..pattern_.length()
- // to the kGoodSuffixShiftTable array.
- return isolate_->good_suffix_shift_table() - start_;
- }
-
- // Table used temporarily while building the BoyerMoore good suffix
- // shift table.
- int* suffix_table() {
- // Return biased pointer that maps the range [start_..pattern_.length()
- // to the kSuffixTable array.
- return isolate_->suffix_table() - start_;
- }
-
- Isolate* isolate_;
- // The pattern to search for.
- Vector<const PatternChar> pattern_;
- // Pointer to implementation of the search.
- SearchFunction strategy_;
- // Cache value of Max(0, pattern_length() - kBMMaxShift)
- int start_;
-};
-
-
-//---------------------------------------------------------------------
-// Single Character Pattern Search Strategy
-//---------------------------------------------------------------------
-
-template <typename PatternChar, typename SubjectChar>
-int StringSearch<PatternChar, SubjectChar>::SingleCharSearch(
- StringSearch<PatternChar, SubjectChar>* search,
- Vector<const SubjectChar> subject,
- int index) {
- ASSERT_EQ(1, search->pattern_.length());
- PatternChar pattern_first_char = search->pattern_[0];
- int i = index;
- if (sizeof(SubjectChar) == 1 && sizeof(PatternChar) == 1) {
- const SubjectChar* pos = reinterpret_cast<const SubjectChar*>(
- memchr(subject.start() + i,
- pattern_first_char,
- subject.length() - i));
- if (pos == NULL) return -1;
- return static_cast<int>(pos - subject.start());
- } else {
- if (sizeof(PatternChar) > sizeof(SubjectChar)) {
- if (static_cast<uc16>(pattern_first_char) > String::kMaxAsciiCharCodeU) {
- return -1;
- }
- }
- SubjectChar search_char = static_cast<SubjectChar>(pattern_first_char);
- int n = subject.length();
- while (i < n) {
- if (subject[i++] == search_char) return i - 1;
- }
- return -1;
- }
-}
-
-//---------------------------------------------------------------------
-// Linear Search Strategy
-//---------------------------------------------------------------------
-
-
-template <typename PatternChar, typename SubjectChar>
-static inline bool CharCompare(const PatternChar* pattern,
- const SubjectChar* subject,
- int length) {
- ASSERT(length > 0);
- int pos = 0;
- do {
- if (pattern[pos] != subject[pos]) {
- return false;
- }
- pos++;
- } while (pos < length);
- return true;
-}
-
-
-// Simple linear search for short patterns. Never bails out.
-template <typename PatternChar, typename SubjectChar>
-int StringSearch<PatternChar, SubjectChar>::LinearSearch(
- StringSearch<PatternChar, SubjectChar>* search,
- Vector<const SubjectChar> subject,
- int index) {
- Vector<const PatternChar> pattern = search->pattern_;
- ASSERT(pattern.length() > 1);
- int pattern_length = pattern.length();
- PatternChar pattern_first_char = pattern[0];
- int i = index;
- int n = subject.length() - pattern_length;
- while (i <= n) {
- if (sizeof(SubjectChar) == 1 && sizeof(PatternChar) == 1) {
- const SubjectChar* pos = reinterpret_cast<const SubjectChar*>(
- memchr(subject.start() + i,
- pattern_first_char,
- n - i + 1));
- if (pos == NULL) return -1;
- i = static_cast<int>(pos - subject.start()) + 1;
- } else {
- if (subject[i++] != pattern_first_char) continue;
- }
- // Loop extracted to separate function to allow using return to do
- // a deeper break.
- if (CharCompare(pattern.start() + 1,
- subject.start() + i,
- pattern_length - 1)) {
- return i - 1;
- }
- }
- return -1;
-}
-
-//---------------------------------------------------------------------
-// Boyer-Moore string search
-//---------------------------------------------------------------------
-
-template <typename PatternChar, typename SubjectChar>
-int StringSearch<PatternChar, SubjectChar>::BoyerMooreSearch(
- StringSearch<PatternChar, SubjectChar>* search,
- Vector<const SubjectChar> subject,
- int start_index) {
- Vector<const PatternChar> pattern = search->pattern_;
- int subject_length = subject.length();
- int pattern_length = pattern.length();
- // Only preprocess at most kBMMaxShift last characters of pattern.
- int start = search->start_;
-
- int* bad_char_occurence = search->bad_char_table();
- int* good_suffix_shift = search->good_suffix_shift_table();
-
- PatternChar last_char = pattern[pattern_length - 1];
- int index = start_index;
- // Continue search from i.
- while (index <= subject_length - pattern_length) {
- int j = pattern_length - 1;
- int c;
- while (last_char != (c = subject[index + j])) {
- int shift =
- j - CharOccurrence(bad_char_occurence, c);
- index += shift;
- if (index > subject_length - pattern_length) {
- return -1;
- }
- }
- while (j >= 0 && pattern[j] == (c = subject[index + j])) j--;
- if (j < 0) {
- return index;
- } else if (j < start) {
- // we have matched more than our tables allow us to be smart about.
- // Fall back on BMH shift.
- index += pattern_length - 1
- - CharOccurrence(bad_char_occurence,
- static_cast<SubjectChar>(last_char));
- } else {
- int gs_shift = good_suffix_shift[j + 1];
- int bc_occ =
- CharOccurrence(bad_char_occurence, c);
- int shift = j - bc_occ;
- if (gs_shift > shift) {
- shift = gs_shift;
- }
- index += shift;
- }
- }
-
- return -1;
-}
-
-
-template <typename PatternChar, typename SubjectChar>
-void StringSearch<PatternChar, SubjectChar>::PopulateBoyerMooreTable() {
- int pattern_length = pattern_.length();
- const PatternChar* pattern = pattern_.start();
- // Only look at the last kBMMaxShift characters of pattern (from start_
- // to pattern_length).
- int start = start_;
- int length = pattern_length - start;
-
- // Biased tables so that we can use pattern indices as table indices,
- // even if we only cover the part of the pattern from offset start.
- int* shift_table = good_suffix_shift_table();
- int* suffix_table = this->suffix_table();
-
- // Initialize table.
- for (int i = start; i < pattern_length; i++) {
- shift_table[i] = length;
- }
- shift_table[pattern_length] = 1;
- suffix_table[pattern_length] = pattern_length + 1;
-
- // Find suffixes.
- PatternChar last_char = pattern[pattern_length - 1];
- int suffix = pattern_length + 1;
- {
- int i = pattern_length;
- while (i > start) {
- PatternChar c = pattern[i - 1];
- while (suffix <= pattern_length && c != pattern[suffix - 1]) {
- if (shift_table[suffix] == length) {
- shift_table[suffix] = suffix - i;
- }
- suffix = suffix_table[suffix];
- }
- suffix_table[--i] = --suffix;
- if (suffix == pattern_length) {
- // No suffix to extend, so we check against last_char only.
- while ((i > start) && (pattern[i - 1] != last_char)) {
- if (shift_table[pattern_length] == length) {
- shift_table[pattern_length] = pattern_length - i;
- }
- suffix_table[--i] = pattern_length;
- }
- if (i > start) {
- suffix_table[--i] = --suffix;
- }
- }
- }
- }
- // Build shift table using suffixes.
- if (suffix < pattern_length) {
- for (int i = start; i <= pattern_length; i++) {
- if (shift_table[i] == length) {
- shift_table[i] = suffix - start;
- }
- if (i == suffix) {
- suffix = suffix_table[suffix];
- }
- }
- }
-}
-
-//---------------------------------------------------------------------
-// Boyer-Moore-Horspool string search.
-//---------------------------------------------------------------------
-
-template <typename PatternChar, typename SubjectChar>
-int StringSearch<PatternChar, SubjectChar>::BoyerMooreHorspoolSearch(
- StringSearch<PatternChar, SubjectChar>* search,
- Vector<const SubjectChar> subject,
- int start_index) {
- Vector<const PatternChar> pattern = search->pattern_;
- int subject_length = subject.length();
- int pattern_length = pattern.length();
- int* char_occurrences = search->bad_char_table();
- int badness = -pattern_length;
-
- // How bad we are doing without a good-suffix table.
- PatternChar last_char = pattern[pattern_length - 1];
- int last_char_shift = pattern_length - 1 -
- CharOccurrence(char_occurrences, static_cast<SubjectChar>(last_char));
- // Perform search
- int index = start_index; // No matches found prior to this index.
- while (index <= subject_length - pattern_length) {
- int j = pattern_length - 1;
- int subject_char;
- while (last_char != (subject_char = subject[index + j])) {
- int bc_occ = CharOccurrence(char_occurrences, subject_char);
- int shift = j - bc_occ;
- index += shift;
- badness += 1 - shift; // at most zero, so badness cannot increase.
- if (index > subject_length - pattern_length) {
- return -1;
- }
- }
- j--;
- while (j >= 0 && pattern[j] == (subject[index + j])) j--;
- if (j < 0) {
- return index;
- } else {
- index += last_char_shift;
- // Badness increases by the number of characters we have
- // checked, and decreases by the number of characters we
- // can skip by shifting. It's a measure of how we are doing
- // compared to reading each character exactly once.
- badness += (pattern_length - j) - last_char_shift;
- if (badness > 0) {
- search->PopulateBoyerMooreTable();
- search->strategy_ = &BoyerMooreSearch;
- return BoyerMooreSearch(search, subject, index);
- }
- }
- }
- return -1;
-}
-
-
-template <typename PatternChar, typename SubjectChar>
-void StringSearch<PatternChar, SubjectChar>::PopulateBoyerMooreHorspoolTable() {
- int pattern_length = pattern_.length();
-
- int* bad_char_occurrence = bad_char_table();
-
- // Only preprocess at most kBMMaxShift last characters of pattern.
- int start = start_;
- // Run forwards to populate bad_char_table, so that *last* instance
- // of character equivalence class is the one registered.
- // Notice: Doesn't include the last character.
- int table_size = AlphabetSize();
- if (start == 0) { // All patterns less than kBMMaxShift in length.
- memset(bad_char_occurrence,
- -1,
- table_size * sizeof(*bad_char_occurrence));
- } else {
- for (int i = 0; i < table_size; i++) {
- bad_char_occurrence[i] = start - 1;
- }
- }
- for (int i = start; i < pattern_length - 1; i++) {
- PatternChar c = pattern_[i];
- int bucket = (sizeof(PatternChar) == 1) ? c : c % AlphabetSize();
- bad_char_occurrence[bucket] = i;
- }
-}
-
-//---------------------------------------------------------------------
-// Linear string search with bailout to BMH.
-//---------------------------------------------------------------------
-
-// Simple linear search for short patterns, which bails out if the string
-// isn't found very early in the subject. Upgrades to BoyerMooreHorspool.
-template <typename PatternChar, typename SubjectChar>
-int StringSearch<PatternChar, SubjectChar>::InitialSearch(
- StringSearch<PatternChar, SubjectChar>* search,
- Vector<const SubjectChar> subject,
- int index) {
- Vector<const PatternChar> pattern = search->pattern_;
- int pattern_length = pattern.length();
- // Badness is a count of how much work we have done. When we have
- // done enough work we decide it's probably worth switching to a better
- // algorithm.
- int badness = -10 - (pattern_length << 2);
-
- // We know our pattern is at least 2 characters, we cache the first so
- // the common case of the first character not matching is faster.
- PatternChar pattern_first_char = pattern[0];
- for (int i = index, n = subject.length() - pattern_length; i <= n; i++) {
- badness++;
- if (badness <= 0) {
- if (sizeof(SubjectChar) == 1 && sizeof(PatternChar) == 1) {
- const SubjectChar* pos = reinterpret_cast<const SubjectChar*>(
- memchr(subject.start() + i,
- pattern_first_char,
- n - i + 1));
- if (pos == NULL) {
- return -1;
- }
- i = static_cast<int>(pos - subject.start());
- } else {
- if (subject[i] != pattern_first_char) continue;
- }
- int j = 1;
- do {
- if (pattern[j] != subject[i + j]) {
- break;
- }
- j++;
- } while (j < pattern_length);
- if (j == pattern_length) {
- return i;
- }
- badness += j;
- } else {
- search->PopulateBoyerMooreHorspoolTable();
- search->strategy_ = &BoyerMooreHorspoolSearch;
- return BoyerMooreHorspoolSearch(search, subject, i);
- }
- }
- return -1;
-}
-
-
-// Perform a a single stand-alone search.
-// If searching multiple times for the same pattern, a search
-// object should be constructed once and the Search function then called
-// for each search.
-template <typename SubjectChar, typename PatternChar>
-static int SearchString(Isolate* isolate,
- Vector<const SubjectChar> subject,
- Vector<const PatternChar> pattern,
- int start_index) {
- StringSearch<PatternChar, SubjectChar> search(isolate, pattern);
- return search.Search(subject, start_index);
-}
-
-}} // namespace v8::internal
-
-#endif // V8_STRING_SEARCH_H_
diff --git a/src/3rdparty/v8/src/string-stream.cc b/src/3rdparty/v8/src/string-stream.cc
deleted file mode 100644
index aea1420..0000000
--- a/src/3rdparty/v8/src/string-stream.cc
+++ /dev/null
@@ -1,592 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "factory.h"
-#include "string-stream.h"
-
-namespace v8 {
-namespace internal {
-
-static const int kMentionedObjectCacheMaxSize = 256;
-
-char* HeapStringAllocator::allocate(unsigned bytes) {
- space_ = NewArray<char>(bytes);
- return space_;
-}
-
-
-NoAllocationStringAllocator::NoAllocationStringAllocator(char* memory,
- unsigned size) {
- size_ = size;
- space_ = memory;
-}
-
-
-bool StringStream::Put(char c) {
- if (full()) return false;
- ASSERT(length_ < capacity_);
- // Since the trailing '\0' is not accounted for in length_ fullness is
- // indicated by a difference of 1 between length_ and capacity_. Thus when
- // reaching a difference of 2 we need to grow the buffer.
- if (length_ == capacity_ - 2) {
- unsigned new_capacity = capacity_;
- char* new_buffer = allocator_->grow(&new_capacity);
- if (new_capacity > capacity_) {
- capacity_ = new_capacity;
- buffer_ = new_buffer;
- } else {
- // Reached the end of the available buffer.
- ASSERT(capacity_ >= 5);
- length_ = capacity_ - 1; // Indicate fullness of the stream.
- buffer_[length_ - 4] = '.';
- buffer_[length_ - 3] = '.';
- buffer_[length_ - 2] = '.';
- buffer_[length_ - 1] = '\n';
- buffer_[length_] = '\0';
- return false;
- }
- }
- buffer_[length_] = c;
- buffer_[length_ + 1] = '\0';
- length_++;
- return true;
-}
-
-
-// A control character is one that configures a format element. For
-// instance, in %.5s, .5 are control characters.
-static bool IsControlChar(char c) {
- switch (c) {
- case '0': case '1': case '2': case '3': case '4': case '5':
- case '6': case '7': case '8': case '9': case '.': case '-':
- return true;
- default:
- return false;
- }
-}
-
-
-void StringStream::Add(Vector<const char> format, Vector<FmtElm> elms) {
- // If we already ran out of space then return immediately.
- if (full()) return;
- int offset = 0;
- int elm = 0;
- while (offset < format.length()) {
- if (format[offset] != '%' || elm == elms.length()) {
- Put(format[offset]);
- offset++;
- continue;
- }
- // Read this formatting directive into a temporary buffer
- EmbeddedVector<char, 24> temp;
- int format_length = 0;
- // Skip over the whole control character sequence until the
- // format element type
- temp[format_length++] = format[offset++];
- while (offset < format.length() && IsControlChar(format[offset]))
- temp[format_length++] = format[offset++];
- if (offset >= format.length())
- return;
- char type = format[offset];
- temp[format_length++] = type;
- temp[format_length] = '\0';
- offset++;
- FmtElm current = elms[elm++];
- switch (type) {
- case 's': {
- ASSERT_EQ(FmtElm::C_STR, current.type_);
- const char* value = current.data_.u_c_str_;
- Add(value);
- break;
- }
- case 'w': {
- ASSERT_EQ(FmtElm::LC_STR, current.type_);
- Vector<const uc16> value = *current.data_.u_lc_str_;
- for (int i = 0; i < value.length(); i++)
- Put(static_cast<char>(value[i]));
- break;
- }
- case 'o': {
- ASSERT_EQ(FmtElm::OBJ, current.type_);
- Object* obj = current.data_.u_obj_;
- PrintObject(obj);
- break;
- }
- case 'k': {
- ASSERT_EQ(FmtElm::INT, current.type_);
- int value = current.data_.u_int_;
- if (0x20 <= value && value <= 0x7F) {
- Put(value);
- } else if (value <= 0xff) {
- Add("\\x%02x", value);
- } else {
- Add("\\u%04x", value);
- }
- break;
- }
- case 'i': case 'd': case 'u': case 'x': case 'c': case 'X': {
- int value = current.data_.u_int_;
- EmbeddedVector<char, 24> formatted;
- int length = OS::SNPrintF(formatted, temp.start(), value);
- Add(Vector<const char>(formatted.start(), length));
- break;
- }
- case 'f': case 'g': case 'G': case 'e': case 'E': {
- double value = current.data_.u_double_;
- EmbeddedVector<char, 28> formatted;
- OS::SNPrintF(formatted, temp.start(), value);
- Add(formatted.start());
- break;
- }
- case 'p': {
- void* value = current.data_.u_pointer_;
- EmbeddedVector<char, 20> formatted;
- OS::SNPrintF(formatted, temp.start(), value);
- Add(formatted.start());
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
- }
-
- // Verify that the buffer is 0-terminated
- ASSERT(buffer_[length_] == '\0');
-}
-
-
-void StringStream::PrintObject(Object* o) {
- o->ShortPrint(this);
- if (o->IsString()) {
- if (String::cast(o)->length() <= String::kMaxShortPrintLength) {
- return;
- }
- } else if (o->IsNumber() || o->IsOddball()) {
- return;
- }
- if (o->IsHeapObject()) {
- DebugObjectCache* debug_object_cache = Isolate::Current()->
- string_stream_debug_object_cache();
- for (int i = 0; i < debug_object_cache->length(); i++) {
- if ((*debug_object_cache)[i] == o) {
- Add("#%d#", i);
- return;
- }
- }
- if (debug_object_cache->length() < kMentionedObjectCacheMaxSize) {
- Add("#%d#", debug_object_cache->length());
- debug_object_cache->Add(HeapObject::cast(o));
- } else {
- Add("@%p", o);
- }
- }
-}
-
-
-void StringStream::Add(const char* format) {
- Add(CStrVector(format));
-}
-
-
-void StringStream::Add(Vector<const char> format) {
- Add(format, Vector<FmtElm>::empty());
-}
-
-
-void StringStream::Add(const char* format, FmtElm arg0) {
- const char argc = 1;
- FmtElm argv[argc] = { arg0 };
- Add(CStrVector(format), Vector<FmtElm>(argv, argc));
-}
-
-
-void StringStream::Add(const char* format, FmtElm arg0, FmtElm arg1) {
- const char argc = 2;
- FmtElm argv[argc] = { arg0, arg1 };
- Add(CStrVector(format), Vector<FmtElm>(argv, argc));
-}
-
-
-void StringStream::Add(const char* format, FmtElm arg0, FmtElm arg1,
- FmtElm arg2) {
- const char argc = 3;
- FmtElm argv[argc] = { arg0, arg1, arg2 };
- Add(CStrVector(format), Vector<FmtElm>(argv, argc));
-}
-
-
-void StringStream::Add(const char* format, FmtElm arg0, FmtElm arg1,
- FmtElm arg2, FmtElm arg3) {
- const char argc = 4;
- FmtElm argv[argc] = { arg0, arg1, arg2, arg3 };
- Add(CStrVector(format), Vector<FmtElm>(argv, argc));
-}
-
-
-SmartPointer<const char> StringStream::ToCString() const {
- char* str = NewArray<char>(length_ + 1);
- memcpy(str, buffer_, length_);
- str[length_] = '\0';
- return SmartPointer<const char>(str);
-}
-
-
-void StringStream::Log() {
- LOG(ISOLATE, StringEvent("StackDump", buffer_));
-}
-
-
-void StringStream::OutputToFile(FILE* out) {
- // Dump the output to stdout, but make sure to break it up into
- // manageable chunks to avoid losing parts of the output in the OS
- // printing code. This is a problem on Windows in particular; see
- // the VPrint() function implementations in platform-win32.cc.
- unsigned position = 0;
- for (unsigned next; (next = position + 2048) < length_; position = next) {
- char save = buffer_[next];
- buffer_[next] = '\0';
- internal::PrintF(out, "%s", &buffer_[position]);
- buffer_[next] = save;
- }
- internal::PrintF(out, "%s", &buffer_[position]);
-}
-
-
-Handle<String> StringStream::ToString() {
- return FACTORY->NewStringFromUtf8(Vector<const char>(buffer_, length_));
-}
-
-
-void StringStream::ClearMentionedObjectCache() {
- Isolate* isolate = Isolate::Current();
- isolate->set_string_stream_current_security_token(NULL);
- if (isolate->string_stream_debug_object_cache() == NULL) {
- isolate->set_string_stream_debug_object_cache(
- new List<HeapObject*, PreallocatedStorage>(0));
- }
- isolate->string_stream_debug_object_cache()->Clear();
-}
-
-
-#ifdef DEBUG
-bool StringStream::IsMentionedObjectCacheClear() {
- return (
- Isolate::Current()->string_stream_debug_object_cache()->length() == 0);
-}
-#endif
-
-
-bool StringStream::Put(String* str) {
- return Put(str, 0, str->length());
-}
-
-
-bool StringStream::Put(String* str, int start, int end) {
- StringInputBuffer name_buffer(str);
- name_buffer.Seek(start);
- for (int i = start; i < end && name_buffer.has_more(); i++) {
- int c = name_buffer.GetNext();
- if (c >= 127 || c < 32) {
- c = '?';
- }
- if (!Put(c)) {
- return false; // Output was truncated.
- }
- }
- return true;
-}
-
-
-void StringStream::PrintName(Object* name) {
- if (name->IsString()) {
- String* str = String::cast(name);
- if (str->length() > 0) {
- Put(str);
- } else {
- Add("/* anonymous */");
- }
- } else {
- Add("%o", name);
- }
-}
-
-
-void StringStream::PrintUsingMap(JSObject* js_object) {
- Map* map = js_object->map();
- if (!HEAP->Contains(map) ||
- !map->IsHeapObject() ||
- !map->IsMap()) {
- Add("<Invalid map>\n");
- return;
- }
- DescriptorArray* descs = map->instance_descriptors();
- for (int i = 0; i < descs->number_of_descriptors(); i++) {
- switch (descs->GetType(i)) {
- case FIELD: {
- Object* key = descs->GetKey(i);
- if (key->IsString() || key->IsNumber()) {
- int len = 3;
- if (key->IsString()) {
- len = String::cast(key)->length();
- }
- for (; len < 18; len++)
- Put(' ');
- if (key->IsString()) {
- Put(String::cast(key));
- } else {
- key->ShortPrint();
- }
- Add(": ");
- Object* value = js_object->FastPropertyAt(descs->GetFieldIndex(i));
- Add("%o\n", value);
- }
- }
- break;
- default:
- break;
- }
- }
-}
-
-
-void StringStream::PrintFixedArray(FixedArray* array, unsigned int limit) {
- Heap* heap = HEAP;
- for (unsigned int i = 0; i < 10 && i < limit; i++) {
- Object* element = array->get(i);
- if (element != heap->the_hole_value()) {
- for (int len = 1; len < 18; len++)
- Put(' ');
- Add("%d: %o\n", i, array->get(i));
- }
- }
- if (limit >= 10) {
- Add(" ...\n");
- }
-}
-
-
-void StringStream::PrintByteArray(ByteArray* byte_array) {
- unsigned int limit = byte_array->length();
- for (unsigned int i = 0; i < 10 && i < limit; i++) {
- byte b = byte_array->get(i);
- Add(" %d: %3d 0x%02x", i, b, b);
- if (b >= ' ' && b <= '~') {
- Add(" '%c'", b);
- } else if (b == '\n') {
- Add(" '\n'");
- } else if (b == '\r') {
- Add(" '\r'");
- } else if (b >= 1 && b <= 26) {
- Add(" ^%c", b + 'A' - 1);
- }
- Add("\n");
- }
- if (limit >= 10) {
- Add(" ...\n");
- }
-}
-
-
-void StringStream::PrintMentionedObjectCache() {
- DebugObjectCache* debug_object_cache =
- Isolate::Current()->string_stream_debug_object_cache();
- Add("==== Key ============================================\n\n");
- for (int i = 0; i < debug_object_cache->length(); i++) {
- HeapObject* printee = (*debug_object_cache)[i];
- Add(" #%d# %p: ", i, printee);
- printee->ShortPrint(this);
- Add("\n");
- if (printee->IsJSObject()) {
- if (printee->IsJSValue()) {
- Add(" value(): %o\n", JSValue::cast(printee)->value());
- }
- PrintUsingMap(JSObject::cast(printee));
- if (printee->IsJSArray()) {
- JSArray* array = JSArray::cast(printee);
- if (array->HasFastElements()) {
- unsigned int limit = FixedArray::cast(array->elements())->length();
- unsigned int length =
- static_cast<uint32_t>(JSArray::cast(array)->length()->Number());
- if (length < limit) limit = length;
- PrintFixedArray(FixedArray::cast(array->elements()), limit);
- }
- }
- } else if (printee->IsByteArray()) {
- PrintByteArray(ByteArray::cast(printee));
- } else if (printee->IsFixedArray()) {
- unsigned int limit = FixedArray::cast(printee)->length();
- PrintFixedArray(FixedArray::cast(printee), limit);
- }
- }
-}
-
-
-void StringStream::PrintSecurityTokenIfChanged(Object* f) {
- Isolate* isolate = Isolate::Current();
- Heap* heap = isolate->heap();
- if (!f->IsHeapObject() || !heap->Contains(HeapObject::cast(f))) {
- return;
- }
- Map* map = HeapObject::cast(f)->map();
- if (!map->IsHeapObject() ||
- !heap->Contains(map) ||
- !map->IsMap() ||
- !f->IsJSFunction()) {
- return;
- }
-
- JSFunction* fun = JSFunction::cast(f);
- Object* perhaps_context = fun->unchecked_context();
- if (perhaps_context->IsHeapObject() &&
- heap->Contains(HeapObject::cast(perhaps_context)) &&
- perhaps_context->IsContext()) {
- Context* context = fun->context();
- if (!heap->Contains(context)) {
- Add("(Function context is outside heap)\n");
- return;
- }
- Object* token = context->global_context()->security_token();
- if (token != isolate->string_stream_current_security_token()) {
- Add("Security context: %o\n", token);
- isolate->set_string_stream_current_security_token(token);
- }
- } else {
- Add("(Function context is corrupt)\n");
- }
-}
-
-
-void StringStream::PrintFunction(Object* f, Object* receiver, Code** code) {
- if (f->IsHeapObject() &&
- HEAP->Contains(HeapObject::cast(f)) &&
- HEAP->Contains(HeapObject::cast(f)->map()) &&
- HeapObject::cast(f)->map()->IsMap()) {
- if (f->IsJSFunction()) {
- JSFunction* fun = JSFunction::cast(f);
- // Common case: on-stack function present and resolved.
- PrintPrototype(fun, receiver);
- *code = fun->code();
- } else if (f->IsSymbol()) {
- // Unresolved and megamorphic calls: Instead of the function
- // we have the function name on the stack.
- PrintName(f);
- Add("/* unresolved */ ");
- } else {
- // Unless this is the frame of a built-in function, we should always have
- // the callee function or name on the stack. If we don't, we have a
- // problem or a change of the stack frame layout.
- Add("%o", f);
- Add("/* warning: no JSFunction object or function name found */ ");
- }
- /* } else if (is_trampoline()) {
- Print("trampoline ");
- */
- } else {
- if (!f->IsHeapObject()) {
- Add("/* warning: 'function' was not a heap object */ ");
- return;
- }
- if (!HEAP->Contains(HeapObject::cast(f))) {
- Add("/* warning: 'function' was not on the heap */ ");
- return;
- }
- if (!HEAP->Contains(HeapObject::cast(f)->map())) {
- Add("/* warning: function's map was not on the heap */ ");
- return;
- }
- if (!HeapObject::cast(f)->map()->IsMap()) {
- Add("/* warning: function's map was not a valid map */ ");
- return;
- }
- Add("/* warning: Invalid JSFunction object found */ ");
- }
-}
-
-
-void StringStream::PrintPrototype(JSFunction* fun, Object* receiver) {
- Object* name = fun->shared()->name();
- bool print_name = false;
- Heap* heap = HEAP;
- for (Object* p = receiver; p != heap->null_value(); p = p->GetPrototype()) {
- if (p->IsJSObject()) {
- Object* key = JSObject::cast(p)->SlowReverseLookup(fun);
- if (key != heap->undefined_value()) {
- if (!name->IsString() ||
- !key->IsString() ||
- !String::cast(name)->Equals(String::cast(key))) {
- print_name = true;
- }
- if (name->IsString() && String::cast(name)->length() == 0) {
- print_name = false;
- }
- name = key;
- }
- } else {
- print_name = true;
- }
- }
- PrintName(name);
- // Also known as - if the name in the function doesn't match the name under
- // which it was looked up.
- if (print_name) {
- Add("(aka ");
- PrintName(fun->shared()->name());
- Put(')');
- }
-}
-
-
-char* HeapStringAllocator::grow(unsigned* bytes) {
- unsigned new_bytes = *bytes * 2;
- // Check for overflow.
- if (new_bytes <= *bytes) {
- return space_;
- }
- char* new_space = NewArray<char>(new_bytes);
- if (new_space == NULL) {
- return space_;
- }
- memcpy(new_space, space_, *bytes);
- *bytes = new_bytes;
- DeleteArray(space_);
- space_ = new_space;
- return new_space;
-}
-
-
-// Only grow once to the maximum allowable size.
-char* NoAllocationStringAllocator::grow(unsigned* bytes) {
- ASSERT(size_ >= *bytes);
- *bytes = size_;
- return space_;
-}
-
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/string-stream.h b/src/3rdparty/v8/src/string-stream.h
deleted file mode 100644
index b3f2e0d..0000000
--- a/src/3rdparty/v8/src/string-stream.h
+++ /dev/null
@@ -1,191 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_STRING_STREAM_H_
-#define V8_STRING_STREAM_H_
-
-namespace v8 {
-namespace internal {
-
-
-class StringAllocator {
- public:
- virtual ~StringAllocator() {}
- // Allocate a number of bytes.
- virtual char* allocate(unsigned bytes) = 0;
- // Allocate a larger number of bytes and copy the old buffer to the new one.
- // bytes is an input and output parameter passing the old size of the buffer
- // and returning the new size. If allocation fails then we return the old
- // buffer and do not increase the size.
- virtual char* grow(unsigned* bytes) = 0;
-};
-
-
-// Normal allocator uses new[] and delete[].
-class HeapStringAllocator: public StringAllocator {
- public:
- ~HeapStringAllocator() { DeleteArray(space_); }
- char* allocate(unsigned bytes);
- char* grow(unsigned* bytes);
- private:
- char* space_;
-};
-
-
-// Allocator for use when no new c++ heap allocation is allowed.
-// Given a preallocated buffer up front and does no allocation while
-// building message.
-class NoAllocationStringAllocator: public StringAllocator {
- public:
- NoAllocationStringAllocator(char* memory, unsigned size);
- char* allocate(unsigned bytes) { return space_; }
- char* grow(unsigned* bytes);
- private:
- unsigned size_;
- char* space_;
-};
-
-
-class FmtElm {
- public:
- FmtElm(int value) : type_(INT) { // NOLINT
- data_.u_int_ = value;
- }
- explicit FmtElm(double value) : type_(DOUBLE) {
- data_.u_double_ = value;
- }
- FmtElm(const char* value) : type_(C_STR) { // NOLINT
- data_.u_c_str_ = value;
- }
- FmtElm(const Vector<const uc16>& value) : type_(LC_STR) { // NOLINT
- data_.u_lc_str_ = &value;
- }
- FmtElm(Object* value) : type_(OBJ) { // NOLINT
- data_.u_obj_ = value;
- }
- FmtElm(Handle<Object> value) : type_(HANDLE) { // NOLINT
- data_.u_handle_ = value.location();
- }
- FmtElm(void* value) : type_(POINTER) { // NOLINT
- data_.u_pointer_ = value;
- }
- private:
- friend class StringStream;
- enum Type { INT, DOUBLE, C_STR, LC_STR, OBJ, HANDLE, POINTER };
- Type type_;
- union {
- int u_int_;
- double u_double_;
- const char* u_c_str_;
- const Vector<const uc16>* u_lc_str_;
- Object* u_obj_;
- Object** u_handle_;
- void* u_pointer_;
- } data_;
-};
-
-
-class StringStream {
- public:
- explicit StringStream(StringAllocator* allocator):
- allocator_(allocator),
- capacity_(kInitialCapacity),
- length_(0),
- buffer_(allocator_->allocate(kInitialCapacity)) {
- buffer_[0] = 0;
- }
-
- ~StringStream() {
- }
-
- bool Put(char c);
- bool Put(String* str);
- bool Put(String* str, int start, int end);
- void Add(Vector<const char> format, Vector<FmtElm> elms);
- void Add(const char* format);
- void Add(Vector<const char> format);
- void Add(const char* format, FmtElm arg0);
- void Add(const char* format, FmtElm arg0, FmtElm arg1);
- void Add(const char* format, FmtElm arg0, FmtElm arg1, FmtElm arg2);
- void Add(const char* format,
- FmtElm arg0,
- FmtElm arg1,
- FmtElm arg2,
- FmtElm arg3);
-
- // Getting the message out.
- void OutputToFile(FILE* out);
- void OutputToStdOut() { OutputToFile(stdout); }
- void Log();
- Handle<String> ToString();
- SmartPointer<const char> ToCString() const;
- int length() const { return length_; }
-
- // Object printing support.
- void PrintName(Object* o);
- void PrintFixedArray(FixedArray* array, unsigned int limit);
- void PrintByteArray(ByteArray* ba);
- void PrintUsingMap(JSObject* js_object);
- void PrintPrototype(JSFunction* fun, Object* receiver);
- void PrintSecurityTokenIfChanged(Object* function);
- // NOTE: Returns the code in the output parameter.
- void PrintFunction(Object* function, Object* receiver, Code** code);
-
- // Reset the stream.
- void Reset() {
- length_ = 0;
- buffer_[0] = 0;
- }
-
- // Mentioned object cache support.
- void PrintMentionedObjectCache();
- static void ClearMentionedObjectCache();
-#ifdef DEBUG
- static bool IsMentionedObjectCacheClear();
-#endif
-
-
- static const int kInitialCapacity = 16;
-
- private:
- void PrintObject(Object* obj);
-
- StringAllocator* allocator_;
- unsigned capacity_;
- unsigned length_; // does not include terminating 0-character
- char* buffer_;
-
- bool full() const { return (capacity_ - length_) == 1; }
- int space() const { return capacity_ - length_; }
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(StringStream);
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_STRING_STREAM_H_
diff --git a/src/3rdparty/v8/src/string.js b/src/3rdparty/v8/src/string.js
deleted file mode 100644
index d8d402c..0000000
--- a/src/3rdparty/v8/src/string.js
+++ /dev/null
@@ -1,915 +0,0 @@
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-// This file relies on the fact that the following declaration has been made
-// in runtime.js:
-// const $String = global.String;
-// const $NaN = 0/0;
-
-
-// Set the String function and constructor.
-%SetCode($String, function(x) {
- var value = %_ArgumentsLength() == 0 ? '' : TO_STRING_INLINE(x);
- if (%_IsConstructCall()) {
- %_SetValueOf(this, value);
- } else {
- return value;
- }
-});
-
-%FunctionSetPrototype($String, new $String());
-
-// ECMA-262 section 15.5.4.2
-function StringToString() {
- if (!IS_STRING(this) && !IS_STRING_WRAPPER(this))
- throw new $TypeError('String.prototype.toString is not generic');
- return %_ValueOf(this);
-}
-
-
-// ECMA-262 section 15.5.4.3
-function StringValueOf() {
- if (!IS_STRING(this) && !IS_STRING_WRAPPER(this))
- throw new $TypeError('String.prototype.valueOf is not generic');
- return %_ValueOf(this);
-}
-
-
-// ECMA-262, section 15.5.4.4
-function StringCharAt(pos) {
- var result = %_StringCharAt(this, pos);
- if (%_IsSmi(result)) {
- result = %_StringCharAt(TO_STRING_INLINE(this), TO_INTEGER(pos));
- }
- return result;
-}
-
-
-// ECMA-262 section 15.5.4.5
-function StringCharCodeAt(pos) {
- var result = %_StringCharCodeAt(this, pos);
- if (!%_IsSmi(result)) {
- result = %_StringCharCodeAt(TO_STRING_INLINE(this), TO_INTEGER(pos));
- }
- return result;
-}
-
-
-// ECMA-262, section 15.5.4.6
-function StringConcat() {
- var len = %_ArgumentsLength();
- var this_as_string = TO_STRING_INLINE(this);
- if (len === 1) {
- return this_as_string + %_Arguments(0);
- }
- var parts = new InternalArray(len + 1);
- parts[0] = this_as_string;
- for (var i = 0; i < len; i++) {
- var part = %_Arguments(i);
- parts[i + 1] = TO_STRING_INLINE(part);
- }
- return %StringBuilderConcat(parts, len + 1, "");
-}
-
-// Match ES3 and Safari
-%FunctionSetLength(StringConcat, 1);
-
-
-// ECMA-262 section 15.5.4.7
-function StringIndexOf(pattern /* position */) { // length == 1
- var subject = TO_STRING_INLINE(this);
- pattern = TO_STRING_INLINE(pattern);
- var index = 0;
- if (%_ArgumentsLength() > 1) {
- index = %_Arguments(1); // position
- index = TO_INTEGER(index);
- if (index < 0) index = 0;
- if (index > subject.length) index = subject.length;
- }
- return %StringIndexOf(subject, pattern, index);
-}
-
-
-// ECMA-262 section 15.5.4.8
-function StringLastIndexOf(pat /* position */) { // length == 1
- var sub = TO_STRING_INLINE(this);
- var subLength = sub.length;
- var pat = TO_STRING_INLINE(pat);
- var patLength = pat.length;
- var index = subLength - patLength;
- if (%_ArgumentsLength() > 1) {
- var position = ToNumber(%_Arguments(1));
- if (!NUMBER_IS_NAN(position)) {
- position = TO_INTEGER(position);
- if (position < 0) {
- position = 0;
- }
- if (position + patLength < subLength) {
- index = position
- }
- }
- }
- if (index < 0) {
- return -1;
- }
- return %StringLastIndexOf(sub, pat, index);
-}
-
-
-// ECMA-262 section 15.5.4.9
-//
-// This function is implementation specific. For now, we do not
-// do anything locale specific.
-function StringLocaleCompare(other) {
- if (%_ArgumentsLength() === 0) return 0;
- return %StringLocaleCompare(TO_STRING_INLINE(this),
- TO_STRING_INLINE(other));
-}
-
-
-// ECMA-262 section 15.5.4.10
-function StringMatch(regexp) {
- var subject = TO_STRING_INLINE(this);
- if (IS_REGEXP(regexp)) {
- if (!regexp.global) return RegExpExecNoTests(regexp, subject, 0);
- %_Log('regexp', 'regexp-match,%0S,%1r', [subject, regexp]);
- // lastMatchInfo is defined in regexp.js.
- return %StringMatch(subject, regexp, lastMatchInfo);
- }
- // Non-regexp argument.
- regexp = new $RegExp(regexp);
- return RegExpExecNoTests(regexp, subject, 0);
-}
-
-
-// SubString is an internal function that returns the sub string of 'string'.
-// If resulting string is of length 1, we use the one character cache
-// otherwise we call the runtime system.
-function SubString(string, start, end) {
- // Use the one character string cache.
- if (start + 1 == end) return %_StringCharAt(string, start);
- return %_SubString(string, start, end);
-}
-
-
-// This has the same size as the lastMatchInfo array, and can be used for
-// functions that expect that structure to be returned. It is used when the
-// needle is a string rather than a regexp. In this case we can't update
-// lastMatchArray without erroneously affecting the properties on the global
-// RegExp object.
-var reusableMatchInfo = [2, "", "", -1, -1];
-
-
-// ECMA-262, section 15.5.4.11
-function StringReplace(search, replace) {
- var subject = TO_STRING_INLINE(this);
-
- // Delegate to one of the regular expression variants if necessary.
- if (IS_REGEXP(search)) {
- %_Log('regexp', 'regexp-replace,%0r,%1S', [search, subject]);
- if (IS_FUNCTION(replace)) {
- if (search.global) {
- return StringReplaceGlobalRegExpWithFunction(subject, search, replace);
- } else {
- return StringReplaceNonGlobalRegExpWithFunction(subject,
- search,
- replace);
- }
- } else {
- return %StringReplaceRegExpWithString(subject,
- search,
- TO_STRING_INLINE(replace),
- lastMatchInfo);
- }
- }
-
- // Convert the search argument to a string and search for it.
- search = TO_STRING_INLINE(search);
- var start = %StringIndexOf(subject, search, 0);
- if (start < 0) return subject;
- var end = start + search.length;
-
- var builder = new ReplaceResultBuilder(subject);
- // prefix
- builder.addSpecialSlice(0, start);
-
- // Compute the string to replace with.
- if (IS_FUNCTION(replace)) {
- builder.add(%_CallFunction(%GetGlobalReceiver(),
- search,
- start,
- subject,
- replace));
- } else {
- reusableMatchInfo[CAPTURE0] = start;
- reusableMatchInfo[CAPTURE1] = end;
- replace = TO_STRING_INLINE(replace);
- ExpandReplacement(replace, subject, reusableMatchInfo, builder);
- }
-
- // suffix
- builder.addSpecialSlice(end, subject.length);
-
- return builder.generate();
-}
-
-
-// Expand the $-expressions in the string and return a new string with
-// the result.
-function ExpandReplacement(string, subject, matchInfo, builder) {
- var length = string.length;
- var builder_elements = builder.elements;
- var next = %StringIndexOf(string, '$', 0);
- if (next < 0) {
- if (length > 0) builder_elements.push(string);
- return;
- }
-
- // Compute the number of captures; see ECMA-262, 15.5.4.11, p. 102.
- var m = NUMBER_OF_CAPTURES(matchInfo) >> 1; // Includes the match.
-
- if (next > 0) builder_elements.push(SubString(string, 0, next));
-
- while (true) {
- var expansion = '$';
- var position = next + 1;
- if (position < length) {
- var peek = %_StringCharCodeAt(string, position);
- if (peek == 36) { // $$
- ++position;
- builder_elements.push('$');
- } else if (peek == 38) { // $& - match
- ++position;
- builder.addSpecialSlice(matchInfo[CAPTURE0],
- matchInfo[CAPTURE1]);
- } else if (peek == 96) { // $` - prefix
- ++position;
- builder.addSpecialSlice(0, matchInfo[CAPTURE0]);
- } else if (peek == 39) { // $' - suffix
- ++position;
- builder.addSpecialSlice(matchInfo[CAPTURE1], subject.length);
- } else if (peek >= 48 && peek <= 57) { // $n, 0 <= n <= 9
- ++position;
- var n = peek - 48;
- if (position < length) {
- peek = %_StringCharCodeAt(string, position);
- // $nn, 01 <= nn <= 99
- if (n != 0 && peek == 48 || peek >= 49 && peek <= 57) {
- var nn = n * 10 + (peek - 48);
- if (nn < m) {
- // If the two digit capture reference is within range of
- // the captures, we use it instead of the single digit
- // one. Otherwise, we fall back to using the single
- // digit reference. This matches the behavior of
- // SpiderMonkey.
- ++position;
- n = nn;
- }
- }
- }
- if (0 < n && n < m) {
- addCaptureString(builder, matchInfo, n);
- } else {
- // Because of the captures range check in the parsing of two
- // digit capture references, we can only enter here when a
- // single digit capture reference is outside the range of
- // captures.
- builder_elements.push('$');
- --position;
- }
- } else {
- builder_elements.push('$');
- }
- } else {
- builder_elements.push('$');
- }
-
- // Go the the next $ in the string.
- next = %StringIndexOf(string, '$', position);
-
- // Return if there are no more $ characters in the string. If we
- // haven't reached the end, we need to append the suffix.
- if (next < 0) {
- if (position < length) {
- builder_elements.push(SubString(string, position, length));
- }
- return;
- }
-
- // Append substring between the previous and the next $ character.
- if (next > position) {
- builder_elements.push(SubString(string, position, next));
- }
- }
-};
-
-
-// Compute the string of a given regular expression capture.
-function CaptureString(string, lastCaptureInfo, index) {
- // Scale the index.
- var scaled = index << 1;
- // Compute start and end.
- var start = lastCaptureInfo[CAPTURE(scaled)];
- // If start isn't valid, return undefined.
- if (start < 0) return;
- var end = lastCaptureInfo[CAPTURE(scaled + 1)];
- return SubString(string, start, end);
-};
-
-
-// Add the string of a given regular expression capture to the
-// ReplaceResultBuilder
-function addCaptureString(builder, matchInfo, index) {
- // Scale the index.
- var scaled = index << 1;
- // Compute start and end.
- var start = matchInfo[CAPTURE(scaled)];
- if (start < 0) return;
- var end = matchInfo[CAPTURE(scaled + 1)];
- builder.addSpecialSlice(start, end);
-};
-
-// TODO(lrn): This array will survive indefinitely if replace is never
-// called again. However, it will be empty, since the contents are cleared
-// in the finally block.
-var reusableReplaceArray = new InternalArray(16);
-
-// Helper function for replacing regular expressions with the result of a
-// function application in String.prototype.replace.
-function StringReplaceGlobalRegExpWithFunction(subject, regexp, replace) {
- var resultArray = reusableReplaceArray;
- if (resultArray) {
- reusableReplaceArray = null;
- } else {
- // Inside a nested replace (replace called from the replacement function
- // of another replace) or we have failed to set the reusable array
- // back due to an exception in a replacement function. Create a new
- // array to use in the future, or until the original is written back.
- resultArray = new InternalArray(16);
- }
- var res = %RegExpExecMultiple(regexp,
- subject,
- lastMatchInfo,
- resultArray);
- regexp.lastIndex = 0;
- if (IS_NULL(res)) {
- // No matches at all.
- reusableReplaceArray = resultArray;
- return subject;
- }
- var len = res.length;
- var i = 0;
- if (NUMBER_OF_CAPTURES(lastMatchInfo) == 2) {
- var match_start = 0;
- var override = new InternalArray(null, 0, subject);
- var receiver = %GetGlobalReceiver();
- while (i < len) {
- var elem = res[i];
- if (%_IsSmi(elem)) {
- if (elem > 0) {
- match_start = (elem >> 11) + (elem & 0x7ff);
- } else {
- match_start = res[++i] - elem;
- }
- } else {
- override[0] = elem;
- override[1] = match_start;
- lastMatchInfoOverride = override;
- var func_result =
- %_CallFunction(receiver, elem, match_start, subject, replace);
- res[i] = TO_STRING_INLINE(func_result);
- match_start += elem.length;
- }
- i++;
- }
- } else {
- while (i < len) {
- var elem = res[i];
- if (!%_IsSmi(elem)) {
- // elem must be an Array.
- // Use the apply argument as backing for global RegExp properties.
- lastMatchInfoOverride = elem;
- var func_result = replace.apply(null, elem);
- res[i] = TO_STRING_INLINE(func_result);
- }
- i++;
- }
- }
- var resultBuilder = new ReplaceResultBuilder(subject, res);
- var result = resultBuilder.generate();
- resultArray.length = 0;
- reusableReplaceArray = resultArray;
- return result;
-}
-
-
-function StringReplaceNonGlobalRegExpWithFunction(subject, regexp, replace) {
- var matchInfo = DoRegExpExec(regexp, subject, 0);
- if (IS_NULL(matchInfo)) return subject;
- var result = new ReplaceResultBuilder(subject);
- var index = matchInfo[CAPTURE0];
- result.addSpecialSlice(0, index);
- var endOfMatch = matchInfo[CAPTURE1];
- // Compute the parameter list consisting of the match, captures, index,
- // and subject for the replace function invocation.
- // The number of captures plus one for the match.
- var m = NUMBER_OF_CAPTURES(matchInfo) >> 1;
- var replacement;
- if (m == 1) {
- // No captures, only the match, which is always valid.
- var s = SubString(subject, index, endOfMatch);
- // Don't call directly to avoid exposing the built-in global object.
- replacement =
- %_CallFunction(%GetGlobalReceiver(), s, index, subject, replace);
- } else {
- var parameters = new InternalArray(m + 2);
- for (var j = 0; j < m; j++) {
- parameters[j] = CaptureString(subject, matchInfo, j);
- }
- parameters[j] = index;
- parameters[j + 1] = subject;
-
- replacement = replace.apply(null, parameters);
- }
-
- result.add(replacement); // The add method converts to string if necessary.
- // Can't use matchInfo any more from here, since the function could
- // overwrite it.
- result.addSpecialSlice(endOfMatch, subject.length);
- return result.generate();
-}
-
-
-// ECMA-262 section 15.5.4.12
-function StringSearch(re) {
- var regexp;
- if (IS_STRING(re)) {
- regexp = %_GetFromCache(STRING_TO_REGEXP_CACHE_ID, re);
- } else if (IS_REGEXP(re)) {
- regexp = re;
- } else {
- regexp = new $RegExp(re);
- }
- var match = DoRegExpExec(regexp, TO_STRING_INLINE(this), 0);
- if (match) {
- return match[CAPTURE0];
- }
- return -1;
-}
-
-
-// ECMA-262 section 15.5.4.13
-function StringSlice(start, end) {
- var s = TO_STRING_INLINE(this);
- var s_len = s.length;
- var start_i = TO_INTEGER(start);
- var end_i = s_len;
- if (end !== void 0)
- end_i = TO_INTEGER(end);
-
- if (start_i < 0) {
- start_i += s_len;
- if (start_i < 0)
- start_i = 0;
- } else {
- if (start_i > s_len)
- start_i = s_len;
- }
-
- if (end_i < 0) {
- end_i += s_len;
- if (end_i < 0)
- end_i = 0;
- } else {
- if (end_i > s_len)
- end_i = s_len;
- }
-
- var num_c = end_i - start_i;
- if (num_c < 0)
- num_c = 0;
-
- return SubString(s, start_i, start_i + num_c);
-}
-
-
-// ECMA-262 section 15.5.4.14
-function StringSplit(separator, limit) {
- var subject = TO_STRING_INLINE(this);
- limit = (IS_UNDEFINED(limit)) ? 0xffffffff : TO_UINT32(limit);
- if (limit === 0) return [];
-
- // ECMA-262 says that if separator is undefined, the result should
- // be an array of size 1 containing the entire string. SpiderMonkey
- // and KJS have this behavior only when no separator is given. If
- // undefined is explicitly given, they convert it to a string and
- // use that. We do as SpiderMonkey and KJS.
- if (%_ArgumentsLength() === 0) {
- return [subject];
- }
-
- var length = subject.length;
- if (!IS_REGEXP(separator)) {
- separator = TO_STRING_INLINE(separator);
- var separator_length = separator.length;
-
- // If the separator string is empty then return the elements in the subject.
- if (separator_length === 0) return %StringToArray(subject, limit);
-
- var result = %StringSplit(subject, separator, limit);
-
- return result;
- }
-
- %_Log('regexp', 'regexp-split,%0S,%1r', [subject, separator]);
-
- if (length === 0) {
- if (DoRegExpExec(separator, subject, 0, 0) != null) {
- return [];
- }
- return [subject];
- }
-
- var currentIndex = 0;
- var startIndex = 0;
- var startMatch = 0;
- var result = [];
-
- outer_loop:
- while (true) {
-
- if (startIndex === length) {
- result.push(SubString(subject, currentIndex, length));
- break;
- }
-
- var matchInfo = DoRegExpExec(separator, subject, startIndex);
- if (matchInfo == null || length === (startMatch = matchInfo[CAPTURE0])) {
- result.push(SubString(subject, currentIndex, length));
- break;
- }
- var endIndex = matchInfo[CAPTURE1];
-
- // We ignore a zero-length match at the currentIndex.
- if (startIndex === endIndex && endIndex === currentIndex) {
- startIndex++;
- continue;
- }
-
- if (currentIndex + 1 == startMatch) {
- result.push(%_StringCharAt(subject, currentIndex));
- } else {
- result.push(%_SubString(subject, currentIndex, startMatch));
- }
-
- if (result.length === limit) break;
-
- var matchinfo_len = NUMBER_OF_CAPTURES(matchInfo) + REGEXP_FIRST_CAPTURE;
- for (var i = REGEXP_FIRST_CAPTURE + 2; i < matchinfo_len; ) {
- var start = matchInfo[i++];
- var end = matchInfo[i++];
- if (end != -1) {
- if (start + 1 == end) {
- result.push(%_StringCharAt(subject, start));
- } else {
- result.push(%_SubString(subject, start, end));
- }
- } else {
- result.push(void 0);
- }
- if (result.length === limit) break outer_loop;
- }
-
- startIndex = currentIndex = endIndex;
- }
- return result;
-}
-
-
-// ECMA-262 section 15.5.4.15
-function StringSubstring(start, end) {
- var s = TO_STRING_INLINE(this);
- var s_len = s.length;
-
- var start_i = TO_INTEGER(start);
- if (start_i < 0) {
- start_i = 0;
- } else if (start_i > s_len) {
- start_i = s_len;
- }
-
- var end_i = s_len;
- if (!IS_UNDEFINED(end)) {
- end_i = TO_INTEGER(end);
- if (end_i > s_len) {
- end_i = s_len;
- } else {
- if (end_i < 0) end_i = 0;
- if (start_i > end_i) {
- var tmp = end_i;
- end_i = start_i;
- start_i = tmp;
- }
- }
- }
-
- return (start_i + 1 == end_i
- ? %_StringCharAt(s, start_i)
- : %_SubString(s, start_i, end_i));
-}
-
-
-// This is not a part of ECMA-262.
-function StringSubstr(start, n) {
- var s = TO_STRING_INLINE(this);
- var len;
-
- // Correct n: If not given, set to string length; if explicitly
- // set to undefined, zero, or negative, returns empty string.
- if (n === void 0) {
- len = s.length;
- } else {
- len = TO_INTEGER(n);
- if (len <= 0) return '';
- }
-
- // Correct start: If not given (or undefined), set to zero; otherwise
- // convert to integer and handle negative case.
- if (start === void 0) {
- start = 0;
- } else {
- start = TO_INTEGER(start);
- // If positive, and greater than or equal to the string length,
- // return empty string.
- if (start >= s.length) return '';
- // If negative and absolute value is larger than the string length,
- // use zero.
- if (start < 0) {
- start += s.length;
- if (start < 0) start = 0;
- }
- }
-
- var end = start + len;
- if (end > s.length) end = s.length;
-
- return (start + 1 == end
- ? %_StringCharAt(s, start)
- : %_SubString(s, start, end));
-}
-
-
-// ECMA-262, 15.5.4.16
-function StringToLowerCase() {
- return %StringToLowerCase(TO_STRING_INLINE(this));
-}
-
-
-// ECMA-262, 15.5.4.17
-function StringToLocaleLowerCase() {
- return %StringToLowerCase(TO_STRING_INLINE(this));
-}
-
-
-// ECMA-262, 15.5.4.18
-function StringToUpperCase() {
- return %StringToUpperCase(TO_STRING_INLINE(this));
-}
-
-
-// ECMA-262, 15.5.4.19
-function StringToLocaleUpperCase() {
- return %StringToUpperCase(TO_STRING_INLINE(this));
-}
-
-// ES5, 15.5.4.20
-function StringTrim() {
- return %StringTrim(TO_STRING_INLINE(this), true, true);
-}
-
-function StringTrimLeft() {
- return %StringTrim(TO_STRING_INLINE(this), true, false);
-}
-
-function StringTrimRight() {
- return %StringTrim(TO_STRING_INLINE(this), false, true);
-}
-
-var static_charcode_array = new InternalArray(4);
-
-// ECMA-262, section 15.5.3.2
-function StringFromCharCode(code) {
- var n = %_ArgumentsLength();
- if (n == 1) {
- if (!%_IsSmi(code)) code = ToNumber(code);
- return %_StringCharFromCode(code & 0xffff);
- }
-
- // NOTE: This is not super-efficient, but it is necessary because we
- // want to avoid converting to numbers from within the virtual
- // machine. Maybe we can find another way of doing this?
- var codes = static_charcode_array;
- for (var i = 0; i < n; i++) {
- var code = %_Arguments(i);
- if (!%_IsSmi(code)) code = ToNumber(code);
- codes[i] = code;
- }
- codes.length = n;
- return %StringFromCharCodeArray(codes);
-}
-
-
-// Helper function for very basic XSS protection.
-function HtmlEscape(str) {
- return TO_STRING_INLINE(str).replace(/</g, "&lt;")
- .replace(/>/g, "&gt;")
- .replace(/"/g, "&quot;")
- .replace(/'/g, "&#039;");
-};
-
-
-// Compatibility support for KJS.
-// Tested by mozilla/js/tests/js1_5/Regress/regress-276103.js.
-function StringLink(s) {
- return "<a href=\"" + HtmlEscape(s) + "\">" + this + "</a>";
-}
-
-
-function StringAnchor(name) {
- return "<a name=\"" + HtmlEscape(name) + "\">" + this + "</a>";
-}
-
-
-function StringFontcolor(color) {
- return "<font color=\"" + HtmlEscape(color) + "\">" + this + "</font>";
-}
-
-
-function StringFontsize(size) {
- return "<font size=\"" + HtmlEscape(size) + "\">" + this + "</font>";
-}
-
-
-function StringBig() {
- return "<big>" + this + "</big>";
-}
-
-
-function StringBlink() {
- return "<blink>" + this + "</blink>";
-}
-
-
-function StringBold() {
- return "<b>" + this + "</b>";
-}
-
-
-function StringFixed() {
- return "<tt>" + this + "</tt>";
-}
-
-
-function StringItalics() {
- return "<i>" + this + "</i>";
-}
-
-
-function StringSmall() {
- return "<small>" + this + "</small>";
-}
-
-
-function StringStrike() {
- return "<strike>" + this + "</strike>";
-}
-
-
-function StringSub() {
- return "<sub>" + this + "</sub>";
-}
-
-
-function StringSup() {
- return "<sup>" + this + "</sup>";
-}
-
-
-// ReplaceResultBuilder support.
-function ReplaceResultBuilder(str) {
- if (%_ArgumentsLength() > 1) {
- this.elements = %_Arguments(1);
- } else {
- this.elements = new InternalArray();
- }
- this.special_string = str;
-}
-
-
-ReplaceResultBuilder.prototype.add = function(str) {
- str = TO_STRING_INLINE(str);
- if (str.length > 0) this.elements.push(str);
-}
-
-
-ReplaceResultBuilder.prototype.addSpecialSlice = function(start, end) {
- var len = end - start;
- if (start < 0 || len <= 0) return;
- if (start < 0x80000 && len < 0x800) {
- this.elements.push((start << 11) | len);
- } else {
- // 0 < len <= String::kMaxLength and Smi::kMaxValue >= String::kMaxLength,
- // so -len is a smi.
- var elements = this.elements;
- elements.push(-len);
- elements.push(start);
- }
-}
-
-
-ReplaceResultBuilder.prototype.generate = function() {
- var elements = this.elements;
- return %StringBuilderConcat(elements, elements.length, this.special_string);
-}
-
-
-// -------------------------------------------------------------------
-
-function SetupString() {
- // Setup the constructor property on the String prototype object.
- %SetProperty($String.prototype, "constructor", $String, DONT_ENUM);
-
-
- // Setup the non-enumerable functions on the String object.
- InstallFunctions($String, DONT_ENUM, $Array(
- "fromCharCode", StringFromCharCode
- ));
-
-
- // Setup the non-enumerable functions on the String prototype object.
- InstallFunctionsOnHiddenPrototype($String.prototype, DONT_ENUM, $Array(
- "valueOf", StringValueOf,
- "toString", StringToString,
- "charAt", StringCharAt,
- "charCodeAt", StringCharCodeAt,
- "concat", StringConcat,
- "indexOf", StringIndexOf,
- "lastIndexOf", StringLastIndexOf,
- "localeCompare", StringLocaleCompare,
- "match", StringMatch,
- "replace", StringReplace,
- "search", StringSearch,
- "slice", StringSlice,
- "split", StringSplit,
- "substring", StringSubstring,
- "substr", StringSubstr,
- "toLowerCase", StringToLowerCase,
- "toLocaleLowerCase", StringToLocaleLowerCase,
- "toUpperCase", StringToUpperCase,
- "toLocaleUpperCase", StringToLocaleUpperCase,
- "trim", StringTrim,
- "trimLeft", StringTrimLeft,
- "trimRight", StringTrimRight,
- "link", StringLink,
- "anchor", StringAnchor,
- "fontcolor", StringFontcolor,
- "fontsize", StringFontsize,
- "big", StringBig,
- "blink", StringBlink,
- "bold", StringBold,
- "fixed", StringFixed,
- "italics", StringItalics,
- "small", StringSmall,
- "strike", StringStrike,
- "sub", StringSub,
- "sup", StringSup
- ));
-}
-
-
-SetupString();
diff --git a/src/3rdparty/v8/src/strtod.cc b/src/3rdparty/v8/src/strtod.cc
deleted file mode 100644
index cedbff9..0000000
--- a/src/3rdparty/v8/src/strtod.cc
+++ /dev/null
@@ -1,440 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include <stdarg.h>
-#include <limits.h>
-
-#include "v8.h"
-
-#include "strtod.h"
-#include "bignum.h"
-#include "cached-powers.h"
-#include "double.h"
-
-namespace v8 {
-namespace internal {
-
-// 2^53 = 9007199254740992.
-// Any integer with at most 15 decimal digits will hence fit into a double
-// (which has a 53bit significand) without loss of precision.
-static const int kMaxExactDoubleIntegerDecimalDigits = 15;
-// 2^64 = 18446744073709551616 > 10^19
-static const int kMaxUint64DecimalDigits = 19;
-
-// Max double: 1.7976931348623157 x 10^308
-// Min non-zero double: 4.9406564584124654 x 10^-324
-// Any x >= 10^309 is interpreted as +infinity.
-// Any x <= 10^-324 is interpreted as 0.
-// Note that 2.5e-324 (despite being smaller than the min double) will be read
-// as non-zero (equal to the min non-zero double).
-static const int kMaxDecimalPower = 309;
-static const int kMinDecimalPower = -324;
-
-// 2^64 = 18446744073709551616
-static const uint64_t kMaxUint64 = V8_2PART_UINT64_C(0xFFFFFFFF, FFFFFFFF);
-
-
-static const double exact_powers_of_ten[] = {
- 1.0, // 10^0
- 10.0,
- 100.0,
- 1000.0,
- 10000.0,
- 100000.0,
- 1000000.0,
- 10000000.0,
- 100000000.0,
- 1000000000.0,
- 10000000000.0, // 10^10
- 100000000000.0,
- 1000000000000.0,
- 10000000000000.0,
- 100000000000000.0,
- 1000000000000000.0,
- 10000000000000000.0,
- 100000000000000000.0,
- 1000000000000000000.0,
- 10000000000000000000.0,
- 100000000000000000000.0, // 10^20
- 1000000000000000000000.0,
- // 10^22 = 0x21e19e0c9bab2400000 = 0x878678326eac9 * 2^22
- 10000000000000000000000.0
-};
-static const int kExactPowersOfTenSize = ARRAY_SIZE(exact_powers_of_ten);
-
-// Maximum number of significant digits in the decimal representation.
-// In fact the value is 772 (see conversions.cc), but to give us some margin
-// we round up to 780.
-static const int kMaxSignificantDecimalDigits = 780;
-
-static Vector<const char> TrimLeadingZeros(Vector<const char> buffer) {
- for (int i = 0; i < buffer.length(); i++) {
- if (buffer[i] != '0') {
- return buffer.SubVector(i, buffer.length());
- }
- }
- return Vector<const char>(buffer.start(), 0);
-}
-
-
-static Vector<const char> TrimTrailingZeros(Vector<const char> buffer) {
- for (int i = buffer.length() - 1; i >= 0; --i) {
- if (buffer[i] != '0') {
- return buffer.SubVector(0, i + 1);
- }
- }
- return Vector<const char>(buffer.start(), 0);
-}
-
-
-static void TrimToMaxSignificantDigits(Vector<const char> buffer,
- int exponent,
- char* significant_buffer,
- int* significant_exponent) {
- for (int i = 0; i < kMaxSignificantDecimalDigits - 1; ++i) {
- significant_buffer[i] = buffer[i];
- }
- // The input buffer has been trimmed. Therefore the last digit must be
- // different from '0'.
- ASSERT(buffer[buffer.length() - 1] != '0');
- // Set the last digit to be non-zero. This is sufficient to guarantee
- // correct rounding.
- significant_buffer[kMaxSignificantDecimalDigits - 1] = '1';
- *significant_exponent =
- exponent + (buffer.length() - kMaxSignificantDecimalDigits);
-}
-
-// Reads digits from the buffer and converts them to a uint64.
-// Reads in as many digits as fit into a uint64.
-// When the string starts with "1844674407370955161" no further digit is read.
-// Since 2^64 = 18446744073709551616 it would still be possible read another
-// digit if it was less or equal than 6, but this would complicate the code.
-static uint64_t ReadUint64(Vector<const char> buffer,
- int* number_of_read_digits) {
- uint64_t result = 0;
- int i = 0;
- while (i < buffer.length() && result <= (kMaxUint64 / 10 - 1)) {
- int digit = buffer[i++] - '0';
- ASSERT(0 <= digit && digit <= 9);
- result = 10 * result + digit;
- }
- *number_of_read_digits = i;
- return result;
-}
-
-
-// Reads a DiyFp from the buffer.
-// The returned DiyFp is not necessarily normalized.
-// If remaining_decimals is zero then the returned DiyFp is accurate.
-// Otherwise it has been rounded and has error of at most 1/2 ulp.
-static void ReadDiyFp(Vector<const char> buffer,
- DiyFp* result,
- int* remaining_decimals) {
- int read_digits;
- uint64_t significand = ReadUint64(buffer, &read_digits);
- if (buffer.length() == read_digits) {
- *result = DiyFp(significand, 0);
- *remaining_decimals = 0;
- } else {
- // Round the significand.
- if (buffer[read_digits] >= '5') {
- significand++;
- }
- // Compute the binary exponent.
- int exponent = 0;
- *result = DiyFp(significand, exponent);
- *remaining_decimals = buffer.length() - read_digits;
- }
-}
-
-
-static bool DoubleStrtod(Vector<const char> trimmed,
- int exponent,
- double* result) {
-#if (defined(V8_TARGET_ARCH_IA32) || defined(USE_SIMULATOR)) && !defined(WIN32)
- // On x86 the floating-point stack can be 64 or 80 bits wide. If it is
- // 80 bits wide (as is the case on Linux) then double-rounding occurs and the
- // result is not accurate.
- // We know that Windows32 uses 64 bits and is therefore accurate.
- // Note that the ARM simulator is compiled for 32bits. It therefore exhibits
- // the same problem.
- return false;
-#endif
- if (trimmed.length() <= kMaxExactDoubleIntegerDecimalDigits) {
- int read_digits;
- // The trimmed input fits into a double.
- // If the 10^exponent (resp. 10^-exponent) fits into a double too then we
- // can compute the result-double simply by multiplying (resp. dividing) the
- // two numbers.
- // This is possible because IEEE guarantees that floating-point operations
- // return the best possible approximation.
- if (exponent < 0 && -exponent < kExactPowersOfTenSize) {
- // 10^-exponent fits into a double.
- *result = static_cast<double>(ReadUint64(trimmed, &read_digits));
- ASSERT(read_digits == trimmed.length());
- *result /= exact_powers_of_ten[-exponent];
- return true;
- }
- if (0 <= exponent && exponent < kExactPowersOfTenSize) {
- // 10^exponent fits into a double.
- *result = static_cast<double>(ReadUint64(trimmed, &read_digits));
- ASSERT(read_digits == trimmed.length());
- *result *= exact_powers_of_ten[exponent];
- return true;
- }
- int remaining_digits =
- kMaxExactDoubleIntegerDecimalDigits - trimmed.length();
- if ((0 <= exponent) &&
- (exponent - remaining_digits < kExactPowersOfTenSize)) {
- // The trimmed string was short and we can multiply it with
- // 10^remaining_digits. As a result the remaining exponent now fits
- // into a double too.
- *result = static_cast<double>(ReadUint64(trimmed, &read_digits));
- ASSERT(read_digits == trimmed.length());
- *result *= exact_powers_of_ten[remaining_digits];
- *result *= exact_powers_of_ten[exponent - remaining_digits];
- return true;
- }
- }
- return false;
-}
-
-
-// Returns 10^exponent as an exact DiyFp.
-// The given exponent must be in the range [1; kDecimalExponentDistance[.
-static DiyFp AdjustmentPowerOfTen(int exponent) {
- ASSERT(0 < exponent);
- ASSERT(exponent < PowersOfTenCache::kDecimalExponentDistance);
- // Simply hardcode the remaining powers for the given decimal exponent
- // distance.
- ASSERT(PowersOfTenCache::kDecimalExponentDistance == 8);
- switch (exponent) {
- case 1: return DiyFp(V8_2PART_UINT64_C(0xa0000000, 00000000), -60);
- case 2: return DiyFp(V8_2PART_UINT64_C(0xc8000000, 00000000), -57);
- case 3: return DiyFp(V8_2PART_UINT64_C(0xfa000000, 00000000), -54);
- case 4: return DiyFp(V8_2PART_UINT64_C(0x9c400000, 00000000), -50);
- case 5: return DiyFp(V8_2PART_UINT64_C(0xc3500000, 00000000), -47);
- case 6: return DiyFp(V8_2PART_UINT64_C(0xf4240000, 00000000), -44);
- case 7: return DiyFp(V8_2PART_UINT64_C(0x98968000, 00000000), -40);
- default:
- UNREACHABLE();
- return DiyFp(0, 0);
- }
-}
-
-
-// If the function returns true then the result is the correct double.
-// Otherwise it is either the correct double or the double that is just below
-// the correct double.
-static bool DiyFpStrtod(Vector<const char> buffer,
- int exponent,
- double* result) {
- DiyFp input;
- int remaining_decimals;
- ReadDiyFp(buffer, &input, &remaining_decimals);
- // Since we may have dropped some digits the input is not accurate.
- // If remaining_decimals is different than 0 than the error is at most
- // .5 ulp (unit in the last place).
- // We don't want to deal with fractions and therefore keep a common
- // denominator.
- const int kDenominatorLog = 3;
- const int kDenominator = 1 << kDenominatorLog;
- // Move the remaining decimals into the exponent.
- exponent += remaining_decimals;
- int error = (remaining_decimals == 0 ? 0 : kDenominator / 2);
-
- int old_e = input.e();
- input.Normalize();
- error <<= old_e - input.e();
-
- ASSERT(exponent <= PowersOfTenCache::kMaxDecimalExponent);
- if (exponent < PowersOfTenCache::kMinDecimalExponent) {
- *result = 0.0;
- return true;
- }
- DiyFp cached_power;
- int cached_decimal_exponent;
- PowersOfTenCache::GetCachedPowerForDecimalExponent(exponent,
- &cached_power,
- &cached_decimal_exponent);
-
- if (cached_decimal_exponent != exponent) {
- int adjustment_exponent = exponent - cached_decimal_exponent;
- DiyFp adjustment_power = AdjustmentPowerOfTen(adjustment_exponent);
- input.Multiply(adjustment_power);
- if (kMaxUint64DecimalDigits - buffer.length() >= adjustment_exponent) {
- // The product of input with the adjustment power fits into a 64 bit
- // integer.
- ASSERT(DiyFp::kSignificandSize == 64);
- } else {
- // The adjustment power is exact. There is hence only an error of 0.5.
- error += kDenominator / 2;
- }
- }
-
- input.Multiply(cached_power);
- // The error introduced by a multiplication of a*b equals
- // error_a + error_b + error_a*error_b/2^64 + 0.5
- // Substituting a with 'input' and b with 'cached_power' we have
- // error_b = 0.5 (all cached powers have an error of less than 0.5 ulp),
- // error_ab = 0 or 1 / kDenominator > error_a*error_b/ 2^64
- int error_b = kDenominator / 2;
- int error_ab = (error == 0 ? 0 : 1); // We round up to 1.
- int fixed_error = kDenominator / 2;
- error += error_b + error_ab + fixed_error;
-
- old_e = input.e();
- input.Normalize();
- error <<= old_e - input.e();
-
- // See if the double's significand changes if we add/subtract the error.
- int order_of_magnitude = DiyFp::kSignificandSize + input.e();
- int effective_significand_size =
- Double::SignificandSizeForOrderOfMagnitude(order_of_magnitude);
- int precision_digits_count =
- DiyFp::kSignificandSize - effective_significand_size;
- if (precision_digits_count + kDenominatorLog >= DiyFp::kSignificandSize) {
- // This can only happen for very small denormals. In this case the
- // half-way multiplied by the denominator exceeds the range of an uint64.
- // Simply shift everything to the right.
- int shift_amount = (precision_digits_count + kDenominatorLog) -
- DiyFp::kSignificandSize + 1;
- input.set_f(input.f() >> shift_amount);
- input.set_e(input.e() + shift_amount);
- // We add 1 for the lost precision of error, and kDenominator for
- // the lost precision of input.f().
- error = (error >> shift_amount) + 1 + kDenominator;
- precision_digits_count -= shift_amount;
- }
- // We use uint64_ts now. This only works if the DiyFp uses uint64_ts too.
- ASSERT(DiyFp::kSignificandSize == 64);
- ASSERT(precision_digits_count < 64);
- uint64_t one64 = 1;
- uint64_t precision_bits_mask = (one64 << precision_digits_count) - 1;
- uint64_t precision_bits = input.f() & precision_bits_mask;
- uint64_t half_way = one64 << (precision_digits_count - 1);
- precision_bits *= kDenominator;
- half_way *= kDenominator;
- DiyFp rounded_input(input.f() >> precision_digits_count,
- input.e() + precision_digits_count);
- if (precision_bits >= half_way + error) {
- rounded_input.set_f(rounded_input.f() + 1);
- }
- // If the last_bits are too close to the half-way case than we are too
- // inaccurate and round down. In this case we return false so that we can
- // fall back to a more precise algorithm.
-
- *result = Double(rounded_input).value();
- if (half_way - error < precision_bits && precision_bits < half_way + error) {
- // Too imprecise. The caller will have to fall back to a slower version.
- // However the returned number is guaranteed to be either the correct
- // double, or the next-lower double.
- return false;
- } else {
- return true;
- }
-}
-
-
-// Returns the correct double for the buffer*10^exponent.
-// The variable guess should be a close guess that is either the correct double
-// or its lower neighbor (the nearest double less than the correct one).
-// Preconditions:
-// buffer.length() + exponent <= kMaxDecimalPower + 1
-// buffer.length() + exponent > kMinDecimalPower
-// buffer.length() <= kMaxDecimalSignificantDigits
-static double BignumStrtod(Vector<const char> buffer,
- int exponent,
- double guess) {
- if (guess == V8_INFINITY) {
- return guess;
- }
-
- DiyFp upper_boundary = Double(guess).UpperBoundary();
-
- ASSERT(buffer.length() + exponent <= kMaxDecimalPower + 1);
- ASSERT(buffer.length() + exponent > kMinDecimalPower);
- ASSERT(buffer.length() <= kMaxSignificantDecimalDigits);
- // Make sure that the Bignum will be able to hold all our numbers.
- // Our Bignum implementation has a separate field for exponents. Shifts will
- // consume at most one bigit (< 64 bits).
- // ln(10) == 3.3219...
- ASSERT(((kMaxDecimalPower + 1) * 333 / 100) < Bignum::kMaxSignificantBits);
- Bignum input;
- Bignum boundary;
- input.AssignDecimalString(buffer);
- boundary.AssignUInt64(upper_boundary.f());
- if (exponent >= 0) {
- input.MultiplyByPowerOfTen(exponent);
- } else {
- boundary.MultiplyByPowerOfTen(-exponent);
- }
- if (upper_boundary.e() > 0) {
- boundary.ShiftLeft(upper_boundary.e());
- } else {
- input.ShiftLeft(-upper_boundary.e());
- }
- int comparison = Bignum::Compare(input, boundary);
- if (comparison < 0) {
- return guess;
- } else if (comparison > 0) {
- return Double(guess).NextDouble();
- } else if ((Double(guess).Significand() & 1) == 0) {
- // Round towards even.
- return guess;
- } else {
- return Double(guess).NextDouble();
- }
-}
-
-
-double Strtod(Vector<const char> buffer, int exponent) {
- Vector<const char> left_trimmed = TrimLeadingZeros(buffer);
- Vector<const char> trimmed = TrimTrailingZeros(left_trimmed);
- exponent += left_trimmed.length() - trimmed.length();
- if (trimmed.length() == 0) return 0.0;
- if (trimmed.length() > kMaxSignificantDecimalDigits) {
- char significant_buffer[kMaxSignificantDecimalDigits];
- int significant_exponent;
- TrimToMaxSignificantDigits(trimmed, exponent,
- significant_buffer, &significant_exponent);
- return Strtod(Vector<const char>(significant_buffer,
- kMaxSignificantDecimalDigits),
- significant_exponent);
- }
- if (exponent + trimmed.length() - 1 >= kMaxDecimalPower) return V8_INFINITY;
- if (exponent + trimmed.length() <= kMinDecimalPower) return 0.0;
-
- double guess;
- if (DoubleStrtod(trimmed, exponent, &guess) ||
- DiyFpStrtod(trimmed, exponent, &guess)) {
- return guess;
- }
- return BignumStrtod(trimmed, exponent, guess);
-}
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/strtod.h b/src/3rdparty/v8/src/strtod.h
deleted file mode 100644
index 1a5a96c..0000000
--- a/src/3rdparty/v8/src/strtod.h
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_STRTOD_H_
-#define V8_STRTOD_H_
-
-namespace v8 {
-namespace internal {
-
-// The buffer must only contain digits in the range [0-9]. It must not
-// contain a dot or a sign. It must not start with '0', and must not be empty.
-double Strtod(Vector<const char> buffer, int exponent);
-
-} } // namespace v8::internal
-
-#endif // V8_STRTOD_H_
diff --git a/src/3rdparty/v8/src/stub-cache.cc b/src/3rdparty/v8/src/stub-cache.cc
deleted file mode 100644
index 0c6a7f7..0000000
--- a/src/3rdparty/v8/src/stub-cache.cc
+++ /dev/null
@@ -1,1940 +0,0 @@
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "api.h"
-#include "arguments.h"
-#include "gdb-jit.h"
-#include "ic-inl.h"
-#include "stub-cache.h"
-#include "vm-state-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// -----------------------------------------------------------------------
-// StubCache implementation.
-
-
-StubCache::StubCache(Isolate* isolate) : isolate_(isolate) {
- ASSERT(isolate == Isolate::Current());
- memset(primary_, 0, sizeof(primary_[0]) * StubCache::kPrimaryTableSize);
- memset(secondary_, 0, sizeof(secondary_[0]) * StubCache::kSecondaryTableSize);
-}
-
-
-void StubCache::Initialize(bool create_heap_objects) {
- ASSERT(IsPowerOf2(kPrimaryTableSize));
- ASSERT(IsPowerOf2(kSecondaryTableSize));
- if (create_heap_objects) {
- HandleScope scope;
- Clear();
- }
-}
-
-
-Code* StubCache::Set(String* name, Map* map, Code* code) {
- // Get the flags from the code.
- Code::Flags flags = Code::RemoveTypeFromFlags(code->flags());
-
- // Validate that the name does not move on scavenge, and that we
- // can use identity checks instead of string equality checks.
- ASSERT(!heap()->InNewSpace(name));
- ASSERT(name->IsSymbol());
-
- // The state bits are not important to the hash function because
- // the stub cache only contains monomorphic stubs. Make sure that
- // the bits are the least significant so they will be the ones
- // masked out.
- ASSERT(Code::ExtractICStateFromFlags(flags) == MONOMORPHIC);
- ASSERT(Code::kFlagsICStateShift == 0);
-
- // Make sure that the code type is not included in the hash.
- ASSERT(Code::ExtractTypeFromFlags(flags) == 0);
-
- // Compute the primary entry.
- int primary_offset = PrimaryOffset(name, flags, map);
- Entry* primary = entry(primary_, primary_offset);
- Code* hit = primary->value;
-
- // If the primary entry has useful data in it, we retire it to the
- // secondary cache before overwriting it.
- if (hit != isolate_->builtins()->builtin(Builtins::kIllegal)) {
- Code::Flags primary_flags = Code::RemoveTypeFromFlags(hit->flags());
- int secondary_offset =
- SecondaryOffset(primary->key, primary_flags, primary_offset);
- Entry* secondary = entry(secondary_, secondary_offset);
- *secondary = *primary;
- }
-
- // Update primary cache.
- primary->key = name;
- primary->value = code;
- return code;
-}
-
-
-MaybeObject* StubCache::ComputeLoadNonexistent(String* name,
- JSObject* receiver) {
- ASSERT(receiver->IsGlobalObject() || receiver->HasFastProperties());
- // If no global objects are present in the prototype chain, the load
- // nonexistent IC stub can be shared for all names for a given map
- // and we use the empty string for the map cache in that case. If
- // there are global objects involved, we need to check global
- // property cells in the stub and therefore the stub will be
- // specific to the name.
- String* cache_name = heap()->empty_string();
- if (receiver->IsGlobalObject()) cache_name = name;
- JSObject* last = receiver;
- while (last->GetPrototype() != heap()->null_value()) {
- last = JSObject::cast(last->GetPrototype());
- if (last->IsGlobalObject()) cache_name = name;
- }
- // Compile the stub that is either shared for all names or
- // name specific if there are global objects involved.
- Code::Flags flags =
- Code::ComputeMonomorphicFlags(Code::LOAD_IC, NONEXISTENT);
- Object* code = receiver->map()->FindInCodeCache(cache_name, flags);
- if (code->IsUndefined()) {
- LoadStubCompiler compiler;
- { MaybeObject* maybe_code =
- compiler.CompileLoadNonexistent(cache_name, receiver, last);
- if (!maybe_code->ToObject(&code)) return maybe_code;
- }
- PROFILE(isolate_,
- CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), cache_name));
- GDBJIT(AddCode(GDBJITInterface::LOAD_IC, cache_name, Code::cast(code)));
- Object* result;
- { MaybeObject* maybe_result =
- receiver->UpdateMapCodeCache(cache_name, Code::cast(code));
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- }
- return code;
-}
-
-
-MaybeObject* StubCache::ComputeLoadField(String* name,
- JSObject* receiver,
- JSObject* holder,
- int field_index) {
- ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
- Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, FIELD);
- Object* code = receiver->map()->FindInCodeCache(name, flags);
- if (code->IsUndefined()) {
- LoadStubCompiler compiler;
- { MaybeObject* maybe_code =
- compiler.CompileLoadField(receiver, holder, field_index, name);
- if (!maybe_code->ToObject(&code)) return maybe_code;
- }
- PROFILE(isolate_,
- CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
- GDBJIT(AddCode(GDBJITInterface::LOAD_IC, name, Code::cast(code)));
- Object* result;
- { MaybeObject* maybe_result =
- receiver->UpdateMapCodeCache(name, Code::cast(code));
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- }
- return code;
-}
-
-
-MaybeObject* StubCache::ComputeLoadCallback(String* name,
- JSObject* receiver,
- JSObject* holder,
- AccessorInfo* callback) {
- ASSERT(v8::ToCData<Address>(callback->getter()) != 0);
- ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
- Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, CALLBACKS);
- Object* code = receiver->map()->FindInCodeCache(name, flags);
- if (code->IsUndefined()) {
- LoadStubCompiler compiler;
- { MaybeObject* maybe_code =
- compiler.CompileLoadCallback(name, receiver, holder, callback);
- if (!maybe_code->ToObject(&code)) return maybe_code;
- }
- PROFILE(isolate_,
- CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
- GDBJIT(AddCode(GDBJITInterface::LOAD_IC, name, Code::cast(code)));
- Object* result;
- { MaybeObject* maybe_result =
- receiver->UpdateMapCodeCache(name, Code::cast(code));
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- }
- return code;
-}
-
-
-MaybeObject* StubCache::ComputeLoadConstant(String* name,
- JSObject* receiver,
- JSObject* holder,
- Object* value) {
- ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
- Code::Flags flags =
- Code::ComputeMonomorphicFlags(Code::LOAD_IC, CONSTANT_FUNCTION);
- Object* code = receiver->map()->FindInCodeCache(name, flags);
- if (code->IsUndefined()) {
- LoadStubCompiler compiler;
- { MaybeObject* maybe_code =
- compiler.CompileLoadConstant(receiver, holder, value, name);
- if (!maybe_code->ToObject(&code)) return maybe_code;
- }
- PROFILE(isolate_,
- CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
- GDBJIT(AddCode(GDBJITInterface::LOAD_IC, name, Code::cast(code)));
- Object* result;
- { MaybeObject* maybe_result =
- receiver->UpdateMapCodeCache(name, Code::cast(code));
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- }
- return code;
-}
-
-
-MaybeObject* StubCache::ComputeLoadInterceptor(String* name,
- JSObject* receiver,
- JSObject* holder) {
- ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
- Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, INTERCEPTOR);
- Object* code = receiver->map()->FindInCodeCache(name, flags);
- if (code->IsUndefined()) {
- LoadStubCompiler compiler;
- { MaybeObject* maybe_code =
- compiler.CompileLoadInterceptor(receiver, holder, name);
- if (!maybe_code->ToObject(&code)) return maybe_code;
- }
- PROFILE(isolate_,
- CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
- GDBJIT(AddCode(GDBJITInterface::LOAD_IC, name, Code::cast(code)));
- Object* result;
- { MaybeObject* maybe_result =
- receiver->UpdateMapCodeCache(name, Code::cast(code));
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- }
- return code;
-}
-
-
-MaybeObject* StubCache::ComputeLoadNormal() {
- return isolate_->builtins()->builtin(Builtins::kLoadIC_Normal);
-}
-
-
-MaybeObject* StubCache::ComputeLoadGlobal(String* name,
- JSObject* receiver,
- GlobalObject* holder,
- JSGlobalPropertyCell* cell,
- bool is_dont_delete) {
- ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
- Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, NORMAL);
- Object* code = receiver->map()->FindInCodeCache(name, flags);
- if (code->IsUndefined()) {
- LoadStubCompiler compiler;
- { MaybeObject* maybe_code = compiler.CompileLoadGlobal(receiver,
- holder,
- cell,
- name,
- is_dont_delete);
- if (!maybe_code->ToObject(&code)) return maybe_code;
- }
- PROFILE(isolate_,
- CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
- GDBJIT(AddCode(GDBJITInterface::LOAD_IC, name, Code::cast(code)));
- Object* result;
- { MaybeObject* maybe_result =
- receiver->UpdateMapCodeCache(name, Code::cast(code));
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- }
- return code;
-}
-
-
-MaybeObject* StubCache::ComputeKeyedLoadField(String* name,
- JSObject* receiver,
- JSObject* holder,
- int field_index) {
- ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
- Code::Flags flags = Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, FIELD);
- Object* code = receiver->map()->FindInCodeCache(name, flags);
- if (code->IsUndefined()) {
- KeyedLoadStubCompiler compiler;
- { MaybeObject* maybe_code =
- compiler.CompileLoadField(name, receiver, holder, field_index);
- if (!maybe_code->ToObject(&code)) return maybe_code;
- }
- PROFILE(isolate_,
- CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
- GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, name, Code::cast(code)));
- Object* result;
- { MaybeObject* maybe_result =
- receiver->UpdateMapCodeCache(name, Code::cast(code));
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- }
- return code;
-}
-
-
-MaybeObject* StubCache::ComputeKeyedLoadConstant(String* name,
- JSObject* receiver,
- JSObject* holder,
- Object* value) {
- ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
- Code::Flags flags =
- Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, CONSTANT_FUNCTION);
- Object* code = receiver->map()->FindInCodeCache(name, flags);
- if (code->IsUndefined()) {
- KeyedLoadStubCompiler compiler;
- { MaybeObject* maybe_code =
- compiler.CompileLoadConstant(name, receiver, holder, value);
- if (!maybe_code->ToObject(&code)) return maybe_code;
- }
- PROFILE(isolate_,
- CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
- GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, name, Code::cast(code)));
- Object* result;
- { MaybeObject* maybe_result =
- receiver->UpdateMapCodeCache(name, Code::cast(code));
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- }
- return code;
-}
-
-
-MaybeObject* StubCache::ComputeKeyedLoadInterceptor(String* name,
- JSObject* receiver,
- JSObject* holder) {
- ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
- Code::Flags flags =
- Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, INTERCEPTOR);
- Object* code = receiver->map()->FindInCodeCache(name, flags);
- if (code->IsUndefined()) {
- KeyedLoadStubCompiler compiler;
- { MaybeObject* maybe_code =
- compiler.CompileLoadInterceptor(receiver, holder, name);
- if (!maybe_code->ToObject(&code)) return maybe_code;
- }
- PROFILE(isolate_,
- CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
- GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, name, Code::cast(code)));
- Object* result;
- { MaybeObject* maybe_result =
- receiver->UpdateMapCodeCache(name, Code::cast(code));
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- }
- return code;
-}
-
-
-MaybeObject* StubCache::ComputeKeyedLoadCallback(String* name,
- JSObject* receiver,
- JSObject* holder,
- AccessorInfo* callback) {
- ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
- Code::Flags flags =
- Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, CALLBACKS);
- Object* code = receiver->map()->FindInCodeCache(name, flags);
- if (code->IsUndefined()) {
- KeyedLoadStubCompiler compiler;
- { MaybeObject* maybe_code =
- compiler.CompileLoadCallback(name, receiver, holder, callback);
- if (!maybe_code->ToObject(&code)) return maybe_code;
- }
- PROFILE(isolate_,
- CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
- GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, name, Code::cast(code)));
- Object* result;
- { MaybeObject* maybe_result =
- receiver->UpdateMapCodeCache(name, Code::cast(code));
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- }
- return code;
-}
-
-
-
-MaybeObject* StubCache::ComputeKeyedLoadArrayLength(String* name,
- JSArray* receiver) {
- Code::Flags flags =
- Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, CALLBACKS);
- ASSERT(receiver->IsJSObject());
- Object* code = receiver->map()->FindInCodeCache(name, flags);
- if (code->IsUndefined()) {
- KeyedLoadStubCompiler compiler;
- { MaybeObject* maybe_code = compiler.CompileLoadArrayLength(name);
- if (!maybe_code->ToObject(&code)) return maybe_code;
- }
- PROFILE(isolate_,
- CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
- GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, name, Code::cast(code)));
- Object* result;
- { MaybeObject* maybe_result =
- receiver->UpdateMapCodeCache(name, Code::cast(code));
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- }
- return code;
-}
-
-
-MaybeObject* StubCache::ComputeKeyedLoadStringLength(String* name,
- String* receiver) {
- Code::Flags flags =
- Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, CALLBACKS);
- Map* map = receiver->map();
- Object* code = map->FindInCodeCache(name, flags);
- if (code->IsUndefined()) {
- KeyedLoadStubCompiler compiler;
- { MaybeObject* maybe_code = compiler.CompileLoadStringLength(name);
- if (!maybe_code->ToObject(&code)) return maybe_code;
- }
- PROFILE(isolate_,
- CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
- GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, name, Code::cast(code)));
- Object* result;
- { MaybeObject* maybe_result = map->UpdateCodeCache(name, Code::cast(code));
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- }
- return code;
-}
-
-
-MaybeObject* StubCache::ComputeKeyedLoadFunctionPrototype(
- String* name,
- JSFunction* receiver) {
- Code::Flags flags =
- Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, CALLBACKS);
- Object* code = receiver->map()->FindInCodeCache(name, flags);
- if (code->IsUndefined()) {
- KeyedLoadStubCompiler compiler;
- { MaybeObject* maybe_code = compiler.CompileLoadFunctionPrototype(name);
- if (!maybe_code->ToObject(&code)) return maybe_code;
- }
- PROFILE(isolate_,
- CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
- GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, name, Code::cast(code)));
- Object* result;
- { MaybeObject* maybe_result =
- receiver->UpdateMapCodeCache(name, Code::cast(code));
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- }
- return code;
-}
-
-
-MaybeObject* StubCache::ComputeKeyedLoadSpecialized(JSObject* receiver) {
- // Using NORMAL as the PropertyType for array element loads is a misuse. The
- // generated stub always accesses fast elements, not slow-mode fields, but
- // some property type is required for the stub lookup. Note that overloading
- // the NORMAL PropertyType is only safe as long as no stubs are generated for
- // other keyed field loads. This is guaranteed to be the case since all field
- // keyed loads that are not array elements go through a generic builtin stub.
- Code::Flags flags =
- Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, NORMAL);
- String* name = heap()->KeyedLoadSpecialized_symbol();
- Object* code = receiver->map()->FindInCodeCache(name, flags);
- if (code->IsUndefined()) {
- KeyedLoadStubCompiler compiler;
- { MaybeObject* maybe_code = compiler.CompileLoadSpecialized(receiver);
- if (!maybe_code->ToObject(&code)) return maybe_code;
- }
- PROFILE(isolate_,
- CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), 0));
- Object* result;
- { MaybeObject* maybe_result =
- receiver->UpdateMapCodeCache(name, Code::cast(code));
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- }
- return code;
-}
-
-
-MaybeObject* StubCache::ComputeStoreField(String* name,
- JSObject* receiver,
- int field_index,
- Map* transition,
- StrictModeFlag strict_mode) {
- PropertyType type = (transition == NULL) ? FIELD : MAP_TRANSITION;
- Code::Flags flags = Code::ComputeMonomorphicFlags(
- Code::STORE_IC, type, strict_mode);
- Object* code = receiver->map()->FindInCodeCache(name, flags);
- if (code->IsUndefined()) {
- StoreStubCompiler compiler(strict_mode);
- { MaybeObject* maybe_code =
- compiler.CompileStoreField(receiver, field_index, transition, name);
- if (!maybe_code->ToObject(&code)) return maybe_code;
- }
- PROFILE(isolate_,
- CodeCreateEvent(Logger::STORE_IC_TAG, Code::cast(code), name));
- GDBJIT(AddCode(GDBJITInterface::STORE_IC, name, Code::cast(code)));
- Object* result;
- { MaybeObject* maybe_result =
- receiver->UpdateMapCodeCache(name, Code::cast(code));
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- }
- return code;
-}
-
-
-MaybeObject* StubCache::ComputeKeyedStoreSpecialized(
- JSObject* receiver,
- StrictModeFlag strict_mode) {
- Code::Flags flags =
- Code::ComputeMonomorphicFlags(Code::KEYED_STORE_IC, NORMAL, strict_mode);
- String* name = heap()->KeyedStoreSpecialized_symbol();
- Object* code = receiver->map()->FindInCodeCache(name, flags);
- if (code->IsUndefined()) {
- KeyedStoreStubCompiler compiler(strict_mode);
- { MaybeObject* maybe_code = compiler.CompileStoreSpecialized(receiver);
- if (!maybe_code->ToObject(&code)) return maybe_code;
- }
- PROFILE(isolate_,
- CodeCreateEvent(Logger::KEYED_STORE_IC_TAG, Code::cast(code), 0));
- Object* result;
- { MaybeObject* maybe_result =
- receiver->UpdateMapCodeCache(name, Code::cast(code));
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- }
- return code;
-}
-
-
-namespace {
-
-ExternalArrayType ElementsKindToExternalArrayType(JSObject::ElementsKind kind) {
- switch (kind) {
- case JSObject::EXTERNAL_BYTE_ELEMENTS:
- return kExternalByteArray;
- case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- return kExternalUnsignedByteArray;
- case JSObject::EXTERNAL_SHORT_ELEMENTS:
- return kExternalShortArray;
- case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- return kExternalUnsignedShortArray;
- case JSObject::EXTERNAL_INT_ELEMENTS:
- return kExternalIntArray;
- case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
- return kExternalUnsignedIntArray;
- case JSObject::EXTERNAL_FLOAT_ELEMENTS:
- return kExternalFloatArray;
- case JSObject::EXTERNAL_PIXEL_ELEMENTS:
- return kExternalPixelArray;
- default:
- UNREACHABLE();
- return static_cast<ExternalArrayType>(0);
- }
-}
-
-String* ExternalArrayTypeToStubName(Heap* heap,
- ExternalArrayType array_type,
- bool is_store) {
- if (is_store) {
- switch (array_type) {
- case kExternalByteArray:
- return heap->KeyedStoreExternalByteArray_symbol();
- case kExternalUnsignedByteArray:
- return heap->KeyedStoreExternalUnsignedByteArray_symbol();
- case kExternalShortArray:
- return heap->KeyedStoreExternalShortArray_symbol();
- case kExternalUnsignedShortArray:
- return heap->KeyedStoreExternalUnsignedShortArray_symbol();
- case kExternalIntArray:
- return heap->KeyedStoreExternalIntArray_symbol();
- case kExternalUnsignedIntArray:
- return heap->KeyedStoreExternalUnsignedIntArray_symbol();
- case kExternalFloatArray:
- return heap->KeyedStoreExternalFloatArray_symbol();
- case kExternalPixelArray:
- return heap->KeyedStoreExternalPixelArray_symbol();
- default:
- UNREACHABLE();
- return NULL;
- }
- } else {
- switch (array_type) {
- case kExternalByteArray:
- return heap->KeyedLoadExternalByteArray_symbol();
- case kExternalUnsignedByteArray:
- return heap->KeyedLoadExternalUnsignedByteArray_symbol();
- case kExternalShortArray:
- return heap->KeyedLoadExternalShortArray_symbol();
- case kExternalUnsignedShortArray:
- return heap->KeyedLoadExternalUnsignedShortArray_symbol();
- case kExternalIntArray:
- return heap->KeyedLoadExternalIntArray_symbol();
- case kExternalUnsignedIntArray:
- return heap->KeyedLoadExternalUnsignedIntArray_symbol();
- case kExternalFloatArray:
- return heap->KeyedLoadExternalFloatArray_symbol();
- case kExternalPixelArray:
- return heap->KeyedLoadExternalPixelArray_symbol();
- default:
- UNREACHABLE();
- return NULL;
- }
- }
-}
-
-} // anonymous namespace
-
-
-MaybeObject* StubCache::ComputeKeyedLoadOrStoreExternalArray(
- JSObject* receiver,
- bool is_store,
- StrictModeFlag strict_mode) {
- Code::Flags flags =
- Code::ComputeMonomorphicFlags(
- is_store ? Code::KEYED_EXTERNAL_ARRAY_STORE_IC :
- Code::KEYED_EXTERNAL_ARRAY_LOAD_IC,
- NORMAL,
- strict_mode);
- ExternalArrayType array_type =
- ElementsKindToExternalArrayType(receiver->GetElementsKind());
- String* name = ExternalArrayTypeToStubName(heap(), array_type, is_store);
- Object* code = receiver->map()->FindInCodeCache(name, flags);
- if (code->IsUndefined()) {
- ExternalArrayStubCompiler compiler;
- { MaybeObject* maybe_code =
- is_store ?
- compiler.CompileKeyedStoreStub(receiver, array_type, flags) :
- compiler.CompileKeyedLoadStub(receiver, array_type, flags);
- if (!maybe_code->ToObject(&code)) return maybe_code;
- }
- Code::cast(code)->set_external_array_type(array_type);
- if (is_store) {
- PROFILE(isolate_,
- CodeCreateEvent(Logger::KEYED_EXTERNAL_ARRAY_STORE_IC_TAG,
- Code::cast(code), 0));
- } else {
- PROFILE(isolate_,
- CodeCreateEvent(Logger::KEYED_EXTERNAL_ARRAY_LOAD_IC_TAG,
- Code::cast(code), 0));
- }
- Object* result;
- { MaybeObject* maybe_result =
- receiver->UpdateMapCodeCache(name, Code::cast(code));
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- }
- return code;
-}
-
-
-MaybeObject* StubCache::ComputeStoreNormal(StrictModeFlag strict_mode) {
- return isolate_->builtins()->builtin((strict_mode == kStrictMode)
- ? Builtins::kStoreIC_Normal_Strict
- : Builtins::kStoreIC_Normal);
-}
-
-
-MaybeObject* StubCache::ComputeStoreGlobal(String* name,
- GlobalObject* receiver,
- JSGlobalPropertyCell* cell,
- StrictModeFlag strict_mode) {
- Code::Flags flags = Code::ComputeMonomorphicFlags(
- Code::STORE_IC, NORMAL, strict_mode);
- Object* code = receiver->map()->FindInCodeCache(name, flags);
- if (code->IsUndefined()) {
- StoreStubCompiler compiler(strict_mode);
- { MaybeObject* maybe_code =
- compiler.CompileStoreGlobal(receiver, cell, name);
- if (!maybe_code->ToObject(&code)) return maybe_code;
- }
- PROFILE(isolate_,
- CodeCreateEvent(Logger::STORE_IC_TAG, Code::cast(code), name));
- GDBJIT(AddCode(GDBJITInterface::STORE_IC, name, Code::cast(code)));
- Object* result;
- { MaybeObject* maybe_result =
- receiver->UpdateMapCodeCache(name, Code::cast(code));
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- }
- return code;
-}
-
-
-MaybeObject* StubCache::ComputeStoreCallback(
- String* name,
- JSObject* receiver,
- AccessorInfo* callback,
- StrictModeFlag strict_mode) {
- ASSERT(v8::ToCData<Address>(callback->setter()) != 0);
- Code::Flags flags = Code::ComputeMonomorphicFlags(
- Code::STORE_IC, CALLBACKS, strict_mode);
- Object* code = receiver->map()->FindInCodeCache(name, flags);
- if (code->IsUndefined()) {
- StoreStubCompiler compiler(strict_mode);
- { MaybeObject* maybe_code =
- compiler.CompileStoreCallback(receiver, callback, name);
- if (!maybe_code->ToObject(&code)) return maybe_code;
- }
- PROFILE(isolate_,
- CodeCreateEvent(Logger::STORE_IC_TAG, Code::cast(code), name));
- GDBJIT(AddCode(GDBJITInterface::STORE_IC, name, Code::cast(code)));
- Object* result;
- { MaybeObject* maybe_result =
- receiver->UpdateMapCodeCache(name, Code::cast(code));
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- }
- return code;
-}
-
-
-MaybeObject* StubCache::ComputeStoreInterceptor(
- String* name,
- JSObject* receiver,
- StrictModeFlag strict_mode) {
- Code::Flags flags = Code::ComputeMonomorphicFlags(
- Code::STORE_IC, INTERCEPTOR, strict_mode);
- Object* code = receiver->map()->FindInCodeCache(name, flags);
- if (code->IsUndefined()) {
- StoreStubCompiler compiler(strict_mode);
- { MaybeObject* maybe_code =
- compiler.CompileStoreInterceptor(receiver, name);
- if (!maybe_code->ToObject(&code)) return maybe_code;
- }
- PROFILE(isolate_,
- CodeCreateEvent(Logger::STORE_IC_TAG, Code::cast(code), name));
- GDBJIT(AddCode(GDBJITInterface::STORE_IC, name, Code::cast(code)));
- Object* result;
- { MaybeObject* maybe_result =
- receiver->UpdateMapCodeCache(name, Code::cast(code));
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- }
- return code;
-}
-
-
-MaybeObject* StubCache::ComputeKeyedStoreField(String* name,
- JSObject* receiver,
- int field_index,
- Map* transition,
- StrictModeFlag strict_mode) {
- PropertyType type = (transition == NULL) ? FIELD : MAP_TRANSITION;
- Code::Flags flags = Code::ComputeMonomorphicFlags(
- Code::KEYED_STORE_IC, type, strict_mode);
- Object* code = receiver->map()->FindInCodeCache(name, flags);
- if (code->IsUndefined()) {
- KeyedStoreStubCompiler compiler(strict_mode);
- { MaybeObject* maybe_code =
- compiler.CompileStoreField(receiver, field_index, transition, name);
- if (!maybe_code->ToObject(&code)) return maybe_code;
- }
- PROFILE(isolate(),
- CodeCreateEvent(Logger::KEYED_STORE_IC_TAG,
- Code::cast(code), name));
- GDBJIT(AddCode(GDBJITInterface::KEYED_STORE_IC, name, Code::cast(code)));
- Object* result;
- { MaybeObject* maybe_result =
- receiver->UpdateMapCodeCache(name, Code::cast(code));
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- }
- return code;
-}
-
-#define CALL_LOGGER_TAG(kind, type) \
- (kind == Code::CALL_IC ? Logger::type : Logger::KEYED_##type)
-
-MaybeObject* StubCache::ComputeCallConstant(int argc,
- InLoopFlag in_loop,
- Code::Kind kind,
- Code::ExtraICState extra_ic_state,
- String* name,
- Object* object,
- JSObject* holder,
- JSFunction* function) {
- // Compute the check type and the map.
- InlineCacheHolderFlag cache_holder =
- IC::GetCodeCacheForObject(object, holder);
- JSObject* map_holder = IC::GetCodeCacheHolder(object, cache_holder);
-
- // Compute check type based on receiver/holder.
- CheckType check = RECEIVER_MAP_CHECK;
- if (object->IsString()) {
- check = STRING_CHECK;
- } else if (object->IsNumber()) {
- check = NUMBER_CHECK;
- } else if (object->IsBoolean()) {
- check = BOOLEAN_CHECK;
- }
-
- Code::Flags flags = Code::ComputeMonomorphicFlags(kind,
- CONSTANT_FUNCTION,
- extra_ic_state,
- cache_holder,
- in_loop,
- argc);
- Object* code = map_holder->map()->FindInCodeCache(name, flags);
- if (code->IsUndefined()) {
- // If the function hasn't been compiled yet, we cannot do it now
- // because it may cause GC. To avoid this issue, we return an
- // internal error which will make sure we do not update any
- // caches.
- if (!function->is_compiled()) return Failure::InternalError();
- // Compile the stub - only create stubs for fully compiled functions.
- CallStubCompiler compiler(
- argc, in_loop, kind, extra_ic_state, cache_holder);
- { MaybeObject* maybe_code =
- compiler.CompileCallConstant(object, holder, function, name, check);
- if (!maybe_code->ToObject(&code)) return maybe_code;
- }
- Code::cast(code)->set_check_type(check);
- ASSERT_EQ(flags, Code::cast(code)->flags());
- PROFILE(isolate_,
- CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG),
- Code::cast(code), name));
- GDBJIT(AddCode(GDBJITInterface::CALL_IC, name, Code::cast(code)));
- Object* result;
- { MaybeObject* maybe_result =
- map_holder->UpdateMapCodeCache(name, Code::cast(code));
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- }
- return code;
-}
-
-
-MaybeObject* StubCache::ComputeCallField(int argc,
- InLoopFlag in_loop,
- Code::Kind kind,
- String* name,
- Object* object,
- JSObject* holder,
- int index) {
- // Compute the check type and the map.
- InlineCacheHolderFlag cache_holder =
- IC::GetCodeCacheForObject(object, holder);
- JSObject* map_holder = IC::GetCodeCacheHolder(object, cache_holder);
-
- // TODO(1233596): We cannot do receiver map check for non-JS objects
- // because they may be represented as immediates without a
- // map. Instead, we check against the map in the holder.
- if (object->IsNumber() || object->IsBoolean() || object->IsString()) {
- object = holder;
- }
-
- Code::Flags flags = Code::ComputeMonomorphicFlags(kind,
- FIELD,
- Code::kNoExtraICState,
- cache_holder,
- in_loop,
- argc);
- Object* code = map_holder->map()->FindInCodeCache(name, flags);
- if (code->IsUndefined()) {
- CallStubCompiler compiler(
- argc, in_loop, kind, Code::kNoExtraICState, cache_holder);
- { MaybeObject* maybe_code =
- compiler.CompileCallField(JSObject::cast(object),
- holder,
- index,
- name);
- if (!maybe_code->ToObject(&code)) return maybe_code;
- }
- ASSERT_EQ(flags, Code::cast(code)->flags());
- PROFILE(isolate_,
- CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG),
- Code::cast(code), name));
- GDBJIT(AddCode(GDBJITInterface::CALL_IC, name, Code::cast(code)));
- Object* result;
- { MaybeObject* maybe_result =
- map_holder->UpdateMapCodeCache(name, Code::cast(code));
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- }
- return code;
-}
-
-
-MaybeObject* StubCache::ComputeCallInterceptor(int argc,
- Code::Kind kind,
- String* name,
- Object* object,
- JSObject* holder) {
- // Compute the check type and the map.
- InlineCacheHolderFlag cache_holder =
- IC::GetCodeCacheForObject(object, holder);
- JSObject* map_holder = IC::GetCodeCacheHolder(object, cache_holder);
-
- // TODO(1233596): We cannot do receiver map check for non-JS objects
- // because they may be represented as immediates without a
- // map. Instead, we check against the map in the holder.
- if (object->IsNumber() || object->IsBoolean() || object->IsString()) {
- object = holder;
- }
-
- Code::Flags flags = Code::ComputeMonomorphicFlags(kind,
- INTERCEPTOR,
- Code::kNoExtraICState,
- cache_holder,
- NOT_IN_LOOP,
- argc);
- Object* code = map_holder->map()->FindInCodeCache(name, flags);
- if (code->IsUndefined()) {
- CallStubCompiler compiler(
- argc, NOT_IN_LOOP, kind, Code::kNoExtraICState, cache_holder);
- { MaybeObject* maybe_code =
- compiler.CompileCallInterceptor(JSObject::cast(object), holder, name);
- if (!maybe_code->ToObject(&code)) return maybe_code;
- }
- ASSERT_EQ(flags, Code::cast(code)->flags());
- PROFILE(isolate(),
- CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG),
- Code::cast(code), name));
- GDBJIT(AddCode(GDBJITInterface::CALL_IC, name, Code::cast(code)));
- Object* result;
- { MaybeObject* maybe_result =
- map_holder->UpdateMapCodeCache(name, Code::cast(code));
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- }
- return code;
-}
-
-
-MaybeObject* StubCache::ComputeCallNormal(int argc,
- InLoopFlag in_loop,
- Code::Kind kind,
- String* name,
- JSObject* receiver) {
- Object* code;
- { MaybeObject* maybe_code = ComputeCallNormal(argc, in_loop, kind);
- if (!maybe_code->ToObject(&code)) return maybe_code;
- }
- return code;
-}
-
-
-MaybeObject* StubCache::ComputeCallGlobal(int argc,
- InLoopFlag in_loop,
- Code::Kind kind,
- String* name,
- JSObject* receiver,
- GlobalObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function) {
- InlineCacheHolderFlag cache_holder =
- IC::GetCodeCacheForObject(receiver, holder);
- JSObject* map_holder = IC::GetCodeCacheHolder(receiver, cache_holder);
- Code::Flags flags = Code::ComputeMonomorphicFlags(kind,
- NORMAL,
- Code::kNoExtraICState,
- cache_holder,
- in_loop,
- argc);
- Object* code = map_holder->map()->FindInCodeCache(name, flags);
- if (code->IsUndefined()) {
- // If the function hasn't been compiled yet, we cannot do it now
- // because it may cause GC. To avoid this issue, we return an
- // internal error which will make sure we do not update any
- // caches.
- if (!function->is_compiled()) return Failure::InternalError();
- CallStubCompiler compiler(
- argc, in_loop, kind, Code::kNoExtraICState, cache_holder);
- { MaybeObject* maybe_code =
- compiler.CompileCallGlobal(receiver, holder, cell, function, name);
- if (!maybe_code->ToObject(&code)) return maybe_code;
- }
- ASSERT_EQ(flags, Code::cast(code)->flags());
- PROFILE(isolate(),
- CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG),
- Code::cast(code), name));
- GDBJIT(AddCode(GDBJITInterface::CALL_IC, name, Code::cast(code)));
- Object* result;
- { MaybeObject* maybe_result =
- map_holder->UpdateMapCodeCache(name, Code::cast(code));
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- }
- return code;
-}
-
-
-static Object* GetProbeValue(Isolate* isolate, Code::Flags flags) {
- // Use raw_unchecked... so we don't get assert failures during GC.
- NumberDictionary* dictionary =
- isolate->heap()->raw_unchecked_non_monomorphic_cache();
- int entry = dictionary->FindEntry(isolate, flags);
- if (entry != -1) return dictionary->ValueAt(entry);
- return isolate->heap()->raw_unchecked_undefined_value();
-}
-
-
-MUST_USE_RESULT static MaybeObject* ProbeCache(Isolate* isolate,
- Code::Flags flags) {
- Heap* heap = isolate->heap();
- Object* probe = GetProbeValue(isolate, flags);
- if (probe != heap->undefined_value()) return probe;
- // Seed the cache with an undefined value to make sure that any
- // generated code object can always be inserted into the cache
- // without causing allocation failures.
- Object* result;
- { MaybeObject* maybe_result =
- heap->non_monomorphic_cache()->AtNumberPut(flags,
- heap->undefined_value());
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- heap->public_set_non_monomorphic_cache(NumberDictionary::cast(result));
- return probe;
-}
-
-
-static MaybeObject* FillCache(Isolate* isolate, MaybeObject* maybe_code) {
- Object* code;
- if (maybe_code->ToObject(&code)) {
- if (code->IsCode()) {
- Heap* heap = isolate->heap();
- int entry = heap->non_monomorphic_cache()->FindEntry(
- Code::cast(code)->flags());
- // The entry must be present see comment in ProbeCache.
- ASSERT(entry != -1);
- ASSERT(heap->non_monomorphic_cache()->ValueAt(entry) ==
- heap->undefined_value());
- heap->non_monomorphic_cache()->ValueAtPut(entry, code);
- CHECK(GetProbeValue(isolate, Code::cast(code)->flags()) == code);
- }
- }
- return maybe_code;
-}
-
-
-Code* StubCache::FindCallInitialize(int argc,
- InLoopFlag in_loop,
- Code::Kind kind) {
- Code::Flags flags = Code::ComputeFlags(kind,
- in_loop,
- UNINITIALIZED,
- Code::kNoExtraICState,
- NORMAL,
- argc);
- Object* result = ProbeCache(isolate(), flags)->ToObjectUnchecked();
- ASSERT(result != heap()->undefined_value());
- // This might be called during the marking phase of the collector
- // hence the unchecked cast.
- return reinterpret_cast<Code*>(result);
-}
-
-
-MaybeObject* StubCache::ComputeCallInitialize(int argc,
- InLoopFlag in_loop,
- Code::Kind kind) {
- Code::Flags flags = Code::ComputeFlags(kind,
- in_loop,
- UNINITIALIZED,
- Code::kNoExtraICState,
- NORMAL,
- argc);
- Object* probe;
- { MaybeObject* maybe_probe = ProbeCache(isolate_, flags);
- if (!maybe_probe->ToObject(&probe)) return maybe_probe;
- }
- if (!probe->IsUndefined()) return probe;
- StubCompiler compiler;
- return FillCache(isolate_, compiler.CompileCallInitialize(flags));
-}
-
-
-Handle<Code> StubCache::ComputeCallInitialize(int argc, InLoopFlag in_loop) {
- if (in_loop == IN_LOOP) {
- // Force the creation of the corresponding stub outside loops,
- // because it may be used when clearing the ICs later - it is
- // possible for a series of IC transitions to lose the in-loop
- // information, and the IC clearing code can't generate a stub
- // that it needs so we need to ensure it is generated already.
- ComputeCallInitialize(argc, NOT_IN_LOOP);
- }
- CALL_HEAP_FUNCTION(isolate_,
- ComputeCallInitialize(argc, in_loop, Code::CALL_IC), Code);
-}
-
-
-Handle<Code> StubCache::ComputeKeyedCallInitialize(int argc,
- InLoopFlag in_loop) {
- if (in_loop == IN_LOOP) {
- // Force the creation of the corresponding stub outside loops,
- // because it may be used when clearing the ICs later - it is
- // possible for a series of IC transitions to lose the in-loop
- // information, and the IC clearing code can't generate a stub
- // that it needs so we need to ensure it is generated already.
- ComputeKeyedCallInitialize(argc, NOT_IN_LOOP);
- }
- CALL_HEAP_FUNCTION(
- isolate_,
- ComputeCallInitialize(argc, in_loop, Code::KEYED_CALL_IC), Code);
-}
-
-
-MaybeObject* StubCache::ComputeCallPreMonomorphic(int argc,
- InLoopFlag in_loop,
- Code::Kind kind) {
- Code::Flags flags = Code::ComputeFlags(kind,
- in_loop,
- PREMONOMORPHIC,
- Code::kNoExtraICState,
- NORMAL,
- argc);
- Object* probe;
- { MaybeObject* maybe_probe = ProbeCache(isolate_, flags);
- if (!maybe_probe->ToObject(&probe)) return maybe_probe;
- }
- if (!probe->IsUndefined()) return probe;
- StubCompiler compiler;
- return FillCache(isolate_, compiler.CompileCallPreMonomorphic(flags));
-}
-
-
-MaybeObject* StubCache::ComputeCallNormal(int argc,
- InLoopFlag in_loop,
- Code::Kind kind) {
- Code::Flags flags = Code::ComputeFlags(kind,
- in_loop,
- MONOMORPHIC,
- Code::kNoExtraICState,
- NORMAL,
- argc);
- Object* probe;
- { MaybeObject* maybe_probe = ProbeCache(isolate_, flags);
- if (!maybe_probe->ToObject(&probe)) return maybe_probe;
- }
- if (!probe->IsUndefined()) return probe;
- StubCompiler compiler;
- return FillCache(isolate_, compiler.CompileCallNormal(flags));
-}
-
-
-MaybeObject* StubCache::ComputeCallMegamorphic(int argc,
- InLoopFlag in_loop,
- Code::Kind kind) {
- Code::Flags flags = Code::ComputeFlags(kind,
- in_loop,
- MEGAMORPHIC,
- Code::kNoExtraICState,
- NORMAL,
- argc);
- Object* probe;
- { MaybeObject* maybe_probe = ProbeCache(isolate_, flags);
- if (!maybe_probe->ToObject(&probe)) return maybe_probe;
- }
- if (!probe->IsUndefined()) return probe;
- StubCompiler compiler;
- return FillCache(isolate_, compiler.CompileCallMegamorphic(flags));
-}
-
-
-MaybeObject* StubCache::ComputeCallMiss(int argc, Code::Kind kind) {
- // MONOMORPHIC_PROTOTYPE_FAILURE state is used to make sure that miss stubs
- // and monomorphic stubs are not mixed up together in the stub cache.
- Code::Flags flags = Code::ComputeFlags(kind,
- NOT_IN_LOOP,
- MONOMORPHIC_PROTOTYPE_FAILURE,
- Code::kNoExtraICState,
- NORMAL,
- argc,
- OWN_MAP);
- Object* probe;
- { MaybeObject* maybe_probe = ProbeCache(isolate_, flags);
- if (!maybe_probe->ToObject(&probe)) return maybe_probe;
- }
- if (!probe->IsUndefined()) return probe;
- StubCompiler compiler;
- return FillCache(isolate_, compiler.CompileCallMiss(flags));
-}
-
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-MaybeObject* StubCache::ComputeCallDebugBreak(int argc, Code::Kind kind) {
- Code::Flags flags = Code::ComputeFlags(kind,
- NOT_IN_LOOP,
- DEBUG_BREAK,
- Code::kNoExtraICState,
- NORMAL,
- argc);
- Object* probe;
- { MaybeObject* maybe_probe = ProbeCache(isolate_, flags);
- if (!maybe_probe->ToObject(&probe)) return maybe_probe;
- }
- if (!probe->IsUndefined()) return probe;
- StubCompiler compiler;
- return FillCache(isolate_, compiler.CompileCallDebugBreak(flags));
-}
-
-
-MaybeObject* StubCache::ComputeCallDebugPrepareStepIn(int argc,
- Code::Kind kind) {
- Code::Flags flags = Code::ComputeFlags(kind,
- NOT_IN_LOOP,
- DEBUG_PREPARE_STEP_IN,
- Code::kNoExtraICState,
- NORMAL,
- argc);
- Object* probe;
- { MaybeObject* maybe_probe = ProbeCache(isolate_, flags);
- if (!maybe_probe->ToObject(&probe)) return maybe_probe;
- }
- if (!probe->IsUndefined()) return probe;
- StubCompiler compiler;
- return FillCache(isolate_, compiler.CompileCallDebugPrepareStepIn(flags));
-}
-#endif
-
-
-void StubCache::Clear() {
- for (int i = 0; i < kPrimaryTableSize; i++) {
- primary_[i].key = heap()->empty_string();
- primary_[i].value = isolate_->builtins()->builtin(
- Builtins::kIllegal);
- }
- for (int j = 0; j < kSecondaryTableSize; j++) {
- secondary_[j].key = heap()->empty_string();
- secondary_[j].value = isolate_->builtins()->builtin(
- Builtins::kIllegal);
- }
-}
-
-
-void StubCache::CollectMatchingMaps(ZoneMapList* types,
- String* name,
- Code::Flags flags) {
- for (int i = 0; i < kPrimaryTableSize; i++) {
- if (primary_[i].key == name) {
- Map* map = primary_[i].value->FindFirstMap();
- // Map can be NULL, if the stub is constant function call
- // with a primitive receiver.
- if (map == NULL) continue;
-
- int offset = PrimaryOffset(name, flags, map);
- if (entry(primary_, offset) == &primary_[i]) {
- types->Add(Handle<Map>(map));
- }
- }
- }
-
- for (int i = 0; i < kSecondaryTableSize; i++) {
- if (secondary_[i].key == name) {
- Map* map = secondary_[i].value->FindFirstMap();
- // Map can be NULL, if the stub is constant function call
- // with a primitive receiver.
- if (map == NULL) continue;
-
- // Lookup in primary table and skip duplicates.
- int primary_offset = PrimaryOffset(name, flags, map);
- Entry* primary_entry = entry(primary_, primary_offset);
- if (primary_entry->key == name) {
- Map* primary_map = primary_entry->value->FindFirstMap();
- if (map == primary_map) continue;
- }
-
- // Lookup in secondary table and add matches.
- int offset = SecondaryOffset(name, flags, primary_offset);
- if (entry(secondary_, offset) == &secondary_[i]) {
- types->Add(Handle<Map>(map));
- }
- }
- }
-}
-
-
-// ------------------------------------------------------------------------
-// StubCompiler implementation.
-
-
-RUNTIME_FUNCTION(MaybeObject*, LoadCallbackProperty) {
- ASSERT(args[0]->IsJSObject());
- ASSERT(args[1]->IsJSObject());
- AccessorInfo* callback = AccessorInfo::cast(args[3]);
- Address getter_address = v8::ToCData<Address>(callback->getter());
- v8::AccessorGetter fun = FUNCTION_CAST<v8::AccessorGetter>(getter_address);
- ASSERT(fun != NULL);
- v8::AccessorInfo info(&args[0]);
- HandleScope scope(isolate);
- v8::Handle<v8::Value> result;
- {
- // Leaving JavaScript.
- VMState state(isolate, EXTERNAL);
- ExternalCallbackScope call_scope(isolate, getter_address);
- result = fun(v8::Utils::ToLocal(args.at<String>(4)), info);
- }
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- if (result.IsEmpty()) return HEAP->undefined_value();
- return *v8::Utils::OpenHandle(*result);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, StoreCallbackProperty) {
- JSObject* recv = JSObject::cast(args[0]);
- AccessorInfo* callback = AccessorInfo::cast(args[1]);
- Address setter_address = v8::ToCData<Address>(callback->setter());
- v8::AccessorSetter fun = FUNCTION_CAST<v8::AccessorSetter>(setter_address);
- ASSERT(fun != NULL);
- Handle<String> name = args.at<String>(2);
- Handle<Object> value = args.at<Object>(3);
- HandleScope scope(isolate);
- LOG(isolate, ApiNamedPropertyAccess("store", recv, *name));
- CustomArguments custom_args(isolate, callback->data(), recv, recv);
- v8::AccessorInfo info(custom_args.end());
- {
- // Leaving JavaScript.
- VMState state(isolate, EXTERNAL);
- ExternalCallbackScope call_scope(isolate, setter_address);
- fun(v8::Utils::ToLocal(name), v8::Utils::ToLocal(value), info);
- }
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- return *value;
-}
-
-
-static const int kAccessorInfoOffsetInInterceptorArgs = 2;
-
-
-/**
- * Attempts to load a property with an interceptor (which must be present),
- * but doesn't search the prototype chain.
- *
- * Returns |Heap::no_interceptor_result_sentinel()| if interceptor doesn't
- * provide any value for the given name.
- */
-RUNTIME_FUNCTION(MaybeObject*, LoadPropertyWithInterceptorOnly) {
- Handle<String> name_handle = args.at<String>(0);
- Handle<InterceptorInfo> interceptor_info = args.at<InterceptorInfo>(1);
- ASSERT(kAccessorInfoOffsetInInterceptorArgs == 2);
- ASSERT(args[2]->IsJSObject()); // Receiver.
- ASSERT(args[3]->IsJSObject()); // Holder.
- ASSERT(args.length() == 5); // Last arg is data object.
-
- Address getter_address = v8::ToCData<Address>(interceptor_info->getter());
- v8::NamedPropertyGetter getter =
- FUNCTION_CAST<v8::NamedPropertyGetter>(getter_address);
- ASSERT(getter != NULL);
-
- {
- // Use the interceptor getter.
- v8::AccessorInfo info(args.arguments() -
- kAccessorInfoOffsetInInterceptorArgs);
- HandleScope scope(isolate);
- v8::Handle<v8::Value> r;
- {
- // Leaving JavaScript.
- VMState state(isolate, EXTERNAL);
- r = getter(v8::Utils::ToLocal(name_handle), info);
- }
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- if (!r.IsEmpty()) {
- return *v8::Utils::OpenHandle(*r);
- }
- }
-
- return isolate->heap()->no_interceptor_result_sentinel();
-}
-
-
-static MaybeObject* ThrowReferenceError(String* name) {
- // If the load is non-contextual, just return the undefined result.
- // Note that both keyed and non-keyed loads may end up here, so we
- // can't use either LoadIC or KeyedLoadIC constructors.
- IC ic(IC::NO_EXTRA_FRAME, Isolate::Current());
- ASSERT(ic.target()->is_load_stub() || ic.target()->is_keyed_load_stub());
- if (!ic.SlowIsContextual()) return HEAP->undefined_value();
-
- // Throw a reference error.
- HandleScope scope;
- Handle<String> name_handle(name);
- Handle<Object> error =
- FACTORY->NewReferenceError("not_defined",
- HandleVector(&name_handle, 1));
- return Isolate::Current()->Throw(*error);
-}
-
-
-static MaybeObject* LoadWithInterceptor(Arguments* args,
- PropertyAttributes* attrs) {
- Handle<String> name_handle = args->at<String>(0);
- Handle<InterceptorInfo> interceptor_info = args->at<InterceptorInfo>(1);
- ASSERT(kAccessorInfoOffsetInInterceptorArgs == 2);
- Handle<JSObject> receiver_handle = args->at<JSObject>(2);
- Handle<JSObject> holder_handle = args->at<JSObject>(3);
- ASSERT(args->length() == 5); // Last arg is data object.
-
- Isolate* isolate = receiver_handle->GetIsolate();
-
- Address getter_address = v8::ToCData<Address>(interceptor_info->getter());
- v8::NamedPropertyGetter getter =
- FUNCTION_CAST<v8::NamedPropertyGetter>(getter_address);
- ASSERT(getter != NULL);
-
- {
- // Use the interceptor getter.
- v8::AccessorInfo info(args->arguments() -
- kAccessorInfoOffsetInInterceptorArgs);
- HandleScope scope(isolate);
- v8::Handle<v8::Value> r;
- {
- // Leaving JavaScript.
- VMState state(isolate, EXTERNAL);
- r = getter(v8::Utils::ToLocal(name_handle), info);
- }
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- if (!r.IsEmpty()) {
- *attrs = NONE;
- return *v8::Utils::OpenHandle(*r);
- }
- }
-
- MaybeObject* result = holder_handle->GetPropertyPostInterceptor(
- *receiver_handle,
- *name_handle,
- attrs);
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- return result;
-}
-
-
-/**
- * Loads a property with an interceptor performing post interceptor
- * lookup if interceptor failed.
- */
-RUNTIME_FUNCTION(MaybeObject*, LoadPropertyWithInterceptorForLoad) {
- PropertyAttributes attr = NONE;
- Object* result;
- { MaybeObject* maybe_result = LoadWithInterceptor(&args, &attr);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
-
- // If the property is present, return it.
- if (attr != ABSENT) return result;
- return ThrowReferenceError(String::cast(args[0]));
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, LoadPropertyWithInterceptorForCall) {
- PropertyAttributes attr;
- MaybeObject* result = LoadWithInterceptor(&args, &attr);
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- // This is call IC. In this case, we simply return the undefined result which
- // will lead to an exception when trying to invoke the result as a
- // function.
- return result;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, StoreInterceptorProperty) {
- ASSERT(args.length() == 4);
- JSObject* recv = JSObject::cast(args[0]);
- String* name = String::cast(args[1]);
- Object* value = args[2];
- StrictModeFlag strict_mode =
- static_cast<StrictModeFlag>(Smi::cast(args[3])->value());
- ASSERT(strict_mode == kStrictMode || strict_mode == kNonStrictMode);
- ASSERT(recv->HasNamedInterceptor());
- PropertyAttributes attr = NONE;
- MaybeObject* result = recv->SetPropertyWithInterceptor(
- name, value, attr, strict_mode);
- return result;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, KeyedLoadPropertyWithInterceptor) {
- JSObject* receiver = JSObject::cast(args[0]);
- ASSERT(Smi::cast(args[1])->value() >= 0);
- uint32_t index = Smi::cast(args[1])->value();
- return receiver->GetElementWithInterceptor(receiver, index);
-}
-
-
-MaybeObject* StubCompiler::CompileCallInitialize(Code::Flags flags) {
- HandleScope scope(isolate());
- int argc = Code::ExtractArgumentsCountFromFlags(flags);
- Code::Kind kind = Code::ExtractKindFromFlags(flags);
- if (kind == Code::CALL_IC) {
- CallIC::GenerateInitialize(masm(), argc);
- } else {
- KeyedCallIC::GenerateInitialize(masm(), argc);
- }
- Object* result;
- { MaybeObject* maybe_result =
- GetCodeWithFlags(flags, "CompileCallInitialize");
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- isolate()->counters()->call_initialize_stubs()->Increment();
- Code* code = Code::cast(result);
- USE(code);
- PROFILE(isolate(),
- CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_INITIALIZE_TAG),
- code, code->arguments_count()));
- GDBJIT(AddCode(GDBJITInterface::CALL_INITIALIZE, Code::cast(code)));
- return result;
-}
-
-
-MaybeObject* StubCompiler::CompileCallPreMonomorphic(Code::Flags flags) {
- HandleScope scope(isolate());
- int argc = Code::ExtractArgumentsCountFromFlags(flags);
- // The code of the PreMonomorphic stub is the same as the code
- // of the Initialized stub. They just differ on the code object flags.
- Code::Kind kind = Code::ExtractKindFromFlags(flags);
- if (kind == Code::CALL_IC) {
- CallIC::GenerateInitialize(masm(), argc);
- } else {
- KeyedCallIC::GenerateInitialize(masm(), argc);
- }
- Object* result;
- { MaybeObject* maybe_result =
- GetCodeWithFlags(flags, "CompileCallPreMonomorphic");
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- isolate()->counters()->call_premonomorphic_stubs()->Increment();
- Code* code = Code::cast(result);
- USE(code);
- PROFILE(isolate(),
- CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_PRE_MONOMORPHIC_TAG),
- code, code->arguments_count()));
- GDBJIT(AddCode(GDBJITInterface::CALL_PRE_MONOMORPHIC, Code::cast(code)));
- return result;
-}
-
-
-MaybeObject* StubCompiler::CompileCallNormal(Code::Flags flags) {
- HandleScope scope(isolate());
- int argc = Code::ExtractArgumentsCountFromFlags(flags);
- Code::Kind kind = Code::ExtractKindFromFlags(flags);
- if (kind == Code::CALL_IC) {
- CallIC::GenerateNormal(masm(), argc);
- } else {
- KeyedCallIC::GenerateNormal(masm(), argc);
- }
- Object* result;
- { MaybeObject* maybe_result = GetCodeWithFlags(flags, "CompileCallNormal");
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- isolate()->counters()->call_normal_stubs()->Increment();
- Code* code = Code::cast(result);
- USE(code);
- PROFILE(isolate(),
- CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_NORMAL_TAG),
- code, code->arguments_count()));
- GDBJIT(AddCode(GDBJITInterface::CALL_NORMAL, Code::cast(code)));
- return result;
-}
-
-
-MaybeObject* StubCompiler::CompileCallMegamorphic(Code::Flags flags) {
- HandleScope scope(isolate());
- int argc = Code::ExtractArgumentsCountFromFlags(flags);
- Code::Kind kind = Code::ExtractKindFromFlags(flags);
- if (kind == Code::CALL_IC) {
- CallIC::GenerateMegamorphic(masm(), argc);
- } else {
- KeyedCallIC::GenerateMegamorphic(masm(), argc);
- }
- Object* result;
- { MaybeObject* maybe_result =
- GetCodeWithFlags(flags, "CompileCallMegamorphic");
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- isolate()->counters()->call_megamorphic_stubs()->Increment();
- Code* code = Code::cast(result);
- USE(code);
- PROFILE(isolate(),
- CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_MEGAMORPHIC_TAG),
- code, code->arguments_count()));
- GDBJIT(AddCode(GDBJITInterface::CALL_MEGAMORPHIC, Code::cast(code)));
- return result;
-}
-
-
-MaybeObject* StubCompiler::CompileCallMiss(Code::Flags flags) {
- HandleScope scope(isolate());
- int argc = Code::ExtractArgumentsCountFromFlags(flags);
- Code::Kind kind = Code::ExtractKindFromFlags(flags);
- if (kind == Code::CALL_IC) {
- CallIC::GenerateMiss(masm(), argc);
- } else {
- KeyedCallIC::GenerateMiss(masm(), argc);
- }
- Object* result;
- { MaybeObject* maybe_result = GetCodeWithFlags(flags, "CompileCallMiss");
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- isolate()->counters()->call_megamorphic_stubs()->Increment();
- Code* code = Code::cast(result);
- USE(code);
- PROFILE(isolate(),
- CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_MISS_TAG),
- code, code->arguments_count()));
- GDBJIT(AddCode(GDBJITInterface::CALL_MISS, Code::cast(code)));
- return result;
-}
-
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-MaybeObject* StubCompiler::CompileCallDebugBreak(Code::Flags flags) {
- HandleScope scope(isolate());
- Debug::GenerateCallICDebugBreak(masm());
- Object* result;
- { MaybeObject* maybe_result =
- GetCodeWithFlags(flags, "CompileCallDebugBreak");
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- Code* code = Code::cast(result);
- USE(code);
- Code::Kind kind = Code::ExtractKindFromFlags(flags);
- USE(kind);
- PROFILE(isolate(),
- CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_DEBUG_BREAK_TAG),
- code, code->arguments_count()));
- return result;
-}
-
-
-MaybeObject* StubCompiler::CompileCallDebugPrepareStepIn(Code::Flags flags) {
- HandleScope scope(isolate());
- // Use the same code for the the step in preparations as we do for
- // the miss case.
- int argc = Code::ExtractArgumentsCountFromFlags(flags);
- Code::Kind kind = Code::ExtractKindFromFlags(flags);
- if (kind == Code::CALL_IC) {
- CallIC::GenerateMiss(masm(), argc);
- } else {
- KeyedCallIC::GenerateMiss(masm(), argc);
- }
- Object* result;
- { MaybeObject* maybe_result =
- GetCodeWithFlags(flags, "CompileCallDebugPrepareStepIn");
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- Code* code = Code::cast(result);
- USE(code);
- PROFILE(isolate(),
- CodeCreateEvent(
- CALL_LOGGER_TAG(kind, CALL_DEBUG_PREPARE_STEP_IN_TAG),
- code,
- code->arguments_count()));
- return result;
-}
-#endif
-
-#undef CALL_LOGGER_TAG
-
-MaybeObject* StubCompiler::GetCodeWithFlags(Code::Flags flags,
- const char* name) {
- // Check for allocation failures during stub compilation.
- if (failure_->IsFailure()) return failure_;
-
- // Create code object in the heap.
- CodeDesc desc;
- masm_.GetCode(&desc);
- MaybeObject* result = heap()->CreateCode(desc, flags, masm_.CodeObject());
-#ifdef ENABLE_DISASSEMBLER
- if (FLAG_print_code_stubs && !result->IsFailure()) {
- Code::cast(result->ToObjectUnchecked())->Disassemble(name);
- }
-#endif
- return result;
-}
-
-
-MaybeObject* StubCompiler::GetCodeWithFlags(Code::Flags flags, String* name) {
- if (FLAG_print_code_stubs && (name != NULL)) {
- return GetCodeWithFlags(flags, *name->ToCString());
- }
- return GetCodeWithFlags(flags, reinterpret_cast<char*>(NULL));
-}
-
-
-void StubCompiler::LookupPostInterceptor(JSObject* holder,
- String* name,
- LookupResult* lookup) {
- holder->LocalLookupRealNamedProperty(name, lookup);
- if (!lookup->IsProperty()) {
- lookup->NotFound();
- Object* proto = holder->GetPrototype();
- if (!proto->IsNull()) {
- proto->Lookup(name, lookup);
- }
- }
-}
-
-
-
-MaybeObject* LoadStubCompiler::GetCode(PropertyType type, String* name) {
- Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, type);
- MaybeObject* result = GetCodeWithFlags(flags, name);
- if (!result->IsFailure()) {
- PROFILE(isolate(),
- CodeCreateEvent(Logger::LOAD_IC_TAG,
- Code::cast(result->ToObjectUnchecked()),
- name));
- GDBJIT(AddCode(GDBJITInterface::LOAD_IC,
- name,
- Code::cast(result->ToObjectUnchecked())));
- }
- return result;
-}
-
-
-MaybeObject* KeyedLoadStubCompiler::GetCode(PropertyType type, String* name) {
- Code::Flags flags = Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, type);
- MaybeObject* result = GetCodeWithFlags(flags, name);
- if (!result->IsFailure()) {
- PROFILE(isolate(),
- CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG,
- Code::cast(result->ToObjectUnchecked()),
- name));
- GDBJIT(AddCode(GDBJITInterface::LOAD_IC,
- name,
- Code::cast(result->ToObjectUnchecked())));
- }
- return result;
-}
-
-
-MaybeObject* StoreStubCompiler::GetCode(PropertyType type, String* name) {
- Code::Flags flags = Code::ComputeMonomorphicFlags(
- Code::STORE_IC, type, strict_mode_);
- MaybeObject* result = GetCodeWithFlags(flags, name);
- if (!result->IsFailure()) {
- PROFILE(isolate(),
- CodeCreateEvent(Logger::STORE_IC_TAG,
- Code::cast(result->ToObjectUnchecked()),
- name));
- GDBJIT(AddCode(GDBJITInterface::STORE_IC,
- name,
- Code::cast(result->ToObjectUnchecked())));
- }
- return result;
-}
-
-
-MaybeObject* KeyedStoreStubCompiler::GetCode(PropertyType type, String* name) {
- Code::Flags flags = Code::ComputeMonomorphicFlags(
- Code::KEYED_STORE_IC, type, strict_mode_);
- MaybeObject* result = GetCodeWithFlags(flags, name);
- if (!result->IsFailure()) {
- PROFILE(isolate(),
- CodeCreateEvent(Logger::KEYED_STORE_IC_TAG,
- Code::cast(result->ToObjectUnchecked()),
- name));
- GDBJIT(AddCode(GDBJITInterface::KEYED_STORE_IC,
- name,
- Code::cast(result->ToObjectUnchecked())));
- }
- return result;
-}
-
-
-CallStubCompiler::CallStubCompiler(int argc,
- InLoopFlag in_loop,
- Code::Kind kind,
- Code::ExtraICState extra_ic_state,
- InlineCacheHolderFlag cache_holder)
- : arguments_(argc),
- in_loop_(in_loop),
- kind_(kind),
- extra_ic_state_(extra_ic_state),
- cache_holder_(cache_holder) {
-}
-
-
-bool CallStubCompiler::HasCustomCallGenerator(JSFunction* function) {
- SharedFunctionInfo* info = function->shared();
- if (info->HasBuiltinFunctionId()) {
- BuiltinFunctionId id = info->builtin_function_id();
-#define CALL_GENERATOR_CASE(name) if (id == k##name) return true;
- CUSTOM_CALL_IC_GENERATORS(CALL_GENERATOR_CASE)
-#undef CALL_GENERATOR_CASE
- }
- CallOptimization optimization(function);
- if (optimization.is_simple_api_call()) {
- return true;
- }
- return false;
-}
-
-
-MaybeObject* CallStubCompiler::CompileCustomCall(Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* fname) {
- ASSERT(HasCustomCallGenerator(function));
-
- SharedFunctionInfo* info = function->shared();
- if (info->HasBuiltinFunctionId()) {
- BuiltinFunctionId id = info->builtin_function_id();
-#define CALL_GENERATOR_CASE(name) \
- if (id == k##name) { \
- return CallStubCompiler::Compile##name##Call(object, \
- holder, \
- cell, \
- function, \
- fname); \
- }
- CUSTOM_CALL_IC_GENERATORS(CALL_GENERATOR_CASE)
-#undef CALL_GENERATOR_CASE
- }
- CallOptimization optimization(function);
- ASSERT(optimization.is_simple_api_call());
- return CompileFastApiCall(optimization,
- object,
- holder,
- cell,
- function,
- fname);
-}
-
-
-MaybeObject* CallStubCompiler::GetCode(PropertyType type, String* name) {
- int argc = arguments_.immediate();
- Code::Flags flags = Code::ComputeMonomorphicFlags(kind_,
- type,
- extra_ic_state_,
- cache_holder_,
- in_loop_,
- argc);
- return GetCodeWithFlags(flags, name);
-}
-
-
-MaybeObject* CallStubCompiler::GetCode(JSFunction* function) {
- String* function_name = NULL;
- if (function->shared()->name()->IsString()) {
- function_name = String::cast(function->shared()->name());
- }
- return GetCode(CONSTANT_FUNCTION, function_name);
-}
-
-
-MaybeObject* ConstructStubCompiler::GetCode() {
- Code::Flags flags = Code::ComputeFlags(Code::STUB);
- Object* result;
- { MaybeObject* maybe_result = GetCodeWithFlags(flags, "ConstructStub");
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- Code* code = Code::cast(result);
- USE(code);
- PROFILE(isolate(), CodeCreateEvent(Logger::STUB_TAG, code, "ConstructStub"));
- GDBJIT(AddCode(GDBJITInterface::STUB, "ConstructStub", Code::cast(code)));
- return result;
-}
-
-
-CallOptimization::CallOptimization(LookupResult* lookup) {
- if (!lookup->IsProperty() || !lookup->IsCacheable() ||
- lookup->type() != CONSTANT_FUNCTION) {
- Initialize(NULL);
- } else {
- // We only optimize constant function calls.
- Initialize(lookup->GetConstantFunction());
- }
-}
-
-CallOptimization::CallOptimization(JSFunction* function) {
- Initialize(function);
-}
-
-
-int CallOptimization::GetPrototypeDepthOfExpectedType(JSObject* object,
- JSObject* holder) const {
- ASSERT(is_simple_api_call_);
- if (expected_receiver_type_ == NULL) return 0;
- int depth = 0;
- while (object != holder) {
- if (object->IsInstanceOf(expected_receiver_type_)) return depth;
- object = JSObject::cast(object->GetPrototype());
- ++depth;
- }
- if (holder->IsInstanceOf(expected_receiver_type_)) return depth;
- return kInvalidProtoDepth;
-}
-
-
-void CallOptimization::Initialize(JSFunction* function) {
- constant_function_ = NULL;
- is_simple_api_call_ = false;
- expected_receiver_type_ = NULL;
- api_call_info_ = NULL;
-
- if (function == NULL || !function->is_compiled()) return;
-
- constant_function_ = function;
- AnalyzePossibleApiFunction(function);
-}
-
-
-void CallOptimization::AnalyzePossibleApiFunction(JSFunction* function) {
- SharedFunctionInfo* sfi = function->shared();
- if (!sfi->IsApiFunction()) return;
- FunctionTemplateInfo* info = sfi->get_api_func_data();
-
- // Require a C++ callback.
- if (info->call_code()->IsUndefined()) return;
- api_call_info_ = CallHandlerInfo::cast(info->call_code());
-
- // Accept signatures that either have no restrictions at all or
- // only have restrictions on the receiver.
- if (!info->signature()->IsUndefined()) {
- SignatureInfo* signature = SignatureInfo::cast(info->signature());
- if (!signature->args()->IsUndefined()) return;
- if (!signature->receiver()->IsUndefined()) {
- expected_receiver_type_ =
- FunctionTemplateInfo::cast(signature->receiver());
- }
- }
-
- is_simple_api_call_ = true;
-}
-
-
-MaybeObject* ExternalArrayStubCompiler::GetCode(Code::Flags flags) {
- Object* result;
- { MaybeObject* maybe_result = GetCodeWithFlags(flags, "ExternalArrayStub");
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- Code* code = Code::cast(result);
- USE(code);
- PROFILE(isolate(),
- CodeCreateEvent(Logger::STUB_TAG, code, "ExternalArrayStub"));
- return result;
-}
-
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/stub-cache.h b/src/3rdparty/v8/src/stub-cache.h
deleted file mode 100644
index c5dcf36..0000000
--- a/src/3rdparty/v8/src/stub-cache.h
+++ /dev/null
@@ -1,866 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_STUB_CACHE_H_
-#define V8_STUB_CACHE_H_
-
-#include "arguments.h"
-#include "macro-assembler.h"
-#include "zone-inl.h"
-
-namespace v8 {
-namespace internal {
-
-
-// The stub cache is used for megamorphic calls and property accesses.
-// It maps (map, name, type)->Code*
-
-// The design of the table uses the inline cache stubs used for
-// mono-morphic calls. The beauty of this, we do not have to
-// invalidate the cache whenever a prototype map is changed. The stub
-// validates the map chain as in the mono-morphic case.
-
-class StubCache;
-
-class SCTableReference {
- public:
- Address address() const { return address_; }
-
- private:
- explicit SCTableReference(Address address) : address_(address) {}
-
- Address address_;
-
- friend class StubCache;
-};
-
-
-class StubCache {
- public:
- struct Entry {
- String* key;
- Code* value;
- };
-
- void Initialize(bool create_heap_objects);
-
-
- // Computes the right stub matching. Inserts the result in the
- // cache before returning. This might compile a stub if needed.
- MUST_USE_RESULT MaybeObject* ComputeLoadNonexistent(
- String* name,
- JSObject* receiver);
-
- MUST_USE_RESULT MaybeObject* ComputeLoadField(String* name,
- JSObject* receiver,
- JSObject* holder,
- int field_index);
-
- MUST_USE_RESULT MaybeObject* ComputeLoadCallback(
- String* name,
- JSObject* receiver,
- JSObject* holder,
- AccessorInfo* callback);
-
- MUST_USE_RESULT MaybeObject* ComputeLoadConstant(String* name,
- JSObject* receiver,
- JSObject* holder,
- Object* value);
-
- MUST_USE_RESULT MaybeObject* ComputeLoadInterceptor(
- String* name,
- JSObject* receiver,
- JSObject* holder);
-
- MUST_USE_RESULT MaybeObject* ComputeLoadNormal();
-
-
- MUST_USE_RESULT MaybeObject* ComputeLoadGlobal(
- String* name,
- JSObject* receiver,
- GlobalObject* holder,
- JSGlobalPropertyCell* cell,
- bool is_dont_delete);
-
-
- // ---
-
- MUST_USE_RESULT MaybeObject* ComputeKeyedLoadField(String* name,
- JSObject* receiver,
- JSObject* holder,
- int field_index);
-
- MUST_USE_RESULT MaybeObject* ComputeKeyedLoadCallback(
- String* name,
- JSObject* receiver,
- JSObject* holder,
- AccessorInfo* callback);
-
- MUST_USE_RESULT MaybeObject* ComputeKeyedLoadConstant(
- String* name,
- JSObject* receiver,
- JSObject* holder,
- Object* value);
-
- MUST_USE_RESULT MaybeObject* ComputeKeyedLoadInterceptor(
- String* name,
- JSObject* receiver,
- JSObject* holder);
-
- MUST_USE_RESULT MaybeObject* ComputeKeyedLoadArrayLength(
- String* name,
- JSArray* receiver);
-
- MUST_USE_RESULT MaybeObject* ComputeKeyedLoadStringLength(
- String* name,
- String* receiver);
-
- MUST_USE_RESULT MaybeObject* ComputeKeyedLoadFunctionPrototype(
- String* name,
- JSFunction* receiver);
-
- MUST_USE_RESULT MaybeObject* ComputeKeyedLoadSpecialized(
- JSObject* receiver);
-
- // ---
-
- MUST_USE_RESULT MaybeObject* ComputeStoreField(
- String* name,
- JSObject* receiver,
- int field_index,
- Map* transition,
- StrictModeFlag strict_mode);
-
- MUST_USE_RESULT MaybeObject* ComputeStoreNormal(
- StrictModeFlag strict_mode);
-
- MUST_USE_RESULT MaybeObject* ComputeStoreGlobal(
- String* name,
- GlobalObject* receiver,
- JSGlobalPropertyCell* cell,
- StrictModeFlag strict_mode);
-
- MUST_USE_RESULT MaybeObject* ComputeStoreCallback(
- String* name,
- JSObject* receiver,
- AccessorInfo* callback,
- StrictModeFlag strict_mode);
-
- MUST_USE_RESULT MaybeObject* ComputeStoreInterceptor(
- String* name,
- JSObject* receiver,
- StrictModeFlag strict_mode);
-
- // ---
-
- MUST_USE_RESULT MaybeObject* ComputeKeyedStoreField(
- String* name,
- JSObject* receiver,
- int field_index,
- Map* transition,
- StrictModeFlag strict_mode);
-
- MUST_USE_RESULT MaybeObject* ComputeKeyedStoreSpecialized(
- JSObject* receiver,
- StrictModeFlag strict_mode);
-
-
- MUST_USE_RESULT MaybeObject* ComputeKeyedLoadOrStoreExternalArray(
- JSObject* receiver,
- bool is_store,
- StrictModeFlag strict_mode);
-
- // ---
-
- MUST_USE_RESULT MaybeObject* ComputeCallField(int argc,
- InLoopFlag in_loop,
- Code::Kind,
- String* name,
- Object* object,
- JSObject* holder,
- int index);
-
- MUST_USE_RESULT MaybeObject* ComputeCallConstant(
- int argc,
- InLoopFlag in_loop,
- Code::Kind,
- Code::ExtraICState extra_ic_state,
- String* name,
- Object* object,
- JSObject* holder,
- JSFunction* function);
-
- MUST_USE_RESULT MaybeObject* ComputeCallNormal(int argc,
- InLoopFlag in_loop,
- Code::Kind,
- String* name,
- JSObject* receiver);
-
- MUST_USE_RESULT MaybeObject* ComputeCallInterceptor(int argc,
- Code::Kind,
- String* name,
- Object* object,
- JSObject* holder);
-
- MUST_USE_RESULT MaybeObject* ComputeCallGlobal(
- int argc,
- InLoopFlag in_loop,
- Code::Kind,
- String* name,
- JSObject* receiver,
- GlobalObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function);
-
- // ---
-
- MUST_USE_RESULT MaybeObject* ComputeCallInitialize(int argc,
- InLoopFlag in_loop,
- Code::Kind kind);
-
- Handle<Code> ComputeCallInitialize(int argc, InLoopFlag in_loop);
-
- Handle<Code> ComputeKeyedCallInitialize(int argc, InLoopFlag in_loop);
-
- MUST_USE_RESULT MaybeObject* ComputeCallPreMonomorphic(
- int argc,
- InLoopFlag in_loop,
- Code::Kind kind);
-
- MUST_USE_RESULT MaybeObject* ComputeCallNormal(int argc,
- InLoopFlag in_loop,
- Code::Kind kind);
-
- MUST_USE_RESULT MaybeObject* ComputeCallMegamorphic(int argc,
- InLoopFlag in_loop,
- Code::Kind kind);
-
- MUST_USE_RESULT MaybeObject* ComputeCallMiss(int argc, Code::Kind kind);
-
- // Finds the Code object stored in the Heap::non_monomorphic_cache().
- MUST_USE_RESULT Code* FindCallInitialize(int argc,
- InLoopFlag in_loop,
- Code::Kind kind);
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- MUST_USE_RESULT MaybeObject* ComputeCallDebugBreak(int argc, Code::Kind kind);
-
- MUST_USE_RESULT MaybeObject* ComputeCallDebugPrepareStepIn(int argc,
- Code::Kind kind);
-#endif
-
- // Update cache for entry hash(name, map).
- Code* Set(String* name, Map* map, Code* code);
-
- // Clear the lookup table (@ mark compact collection).
- void Clear();
-
- // Collect all maps that match the name and flags.
- void CollectMatchingMaps(ZoneMapList* types,
- String* name,
- Code::Flags flags);
-
- // Generate code for probing the stub cache table.
- // Arguments extra and extra2 may be used to pass additional scratch
- // registers. Set to no_reg if not needed.
- void GenerateProbe(MacroAssembler* masm,
- Code::Flags flags,
- Register receiver,
- Register name,
- Register scratch,
- Register extra,
- Register extra2 = no_reg);
-
- enum Table {
- kPrimary,
- kSecondary
- };
-
-
- SCTableReference key_reference(StubCache::Table table) {
- return SCTableReference(
- reinterpret_cast<Address>(&first_entry(table)->key));
- }
-
-
- SCTableReference value_reference(StubCache::Table table) {
- return SCTableReference(
- reinterpret_cast<Address>(&first_entry(table)->value));
- }
-
-
- StubCache::Entry* first_entry(StubCache::Table table) {
- switch (table) {
- case StubCache::kPrimary: return StubCache::primary_;
- case StubCache::kSecondary: return StubCache::secondary_;
- }
- UNREACHABLE();
- return NULL;
- }
-
- Isolate* isolate() { return isolate_; }
- Heap* heap() { return isolate()->heap(); }
-
- private:
- explicit StubCache(Isolate* isolate);
-
- friend class Isolate;
- friend class SCTableReference;
- static const int kPrimaryTableSize = 2048;
- static const int kSecondaryTableSize = 512;
- Entry primary_[kPrimaryTableSize];
- Entry secondary_[kSecondaryTableSize];
-
- // Computes the hashed offsets for primary and secondary caches.
- RLYSTC int PrimaryOffset(String* name, Code::Flags flags, Map* map) {
- // This works well because the heap object tag size and the hash
- // shift are equal. Shifting down the length field to get the
- // hash code would effectively throw away two bits of the hash
- // code.
- ASSERT(kHeapObjectTagSize == String::kHashShift);
- // Compute the hash of the name (use entire hash field).
- ASSERT(name->HasHashCode());
- uint32_t field = name->hash_field();
- // Using only the low bits in 64-bit mode is unlikely to increase the
- // risk of collision even if the heap is spread over an area larger than
- // 4Gb (and not at all if it isn't).
- uint32_t map_low32bits =
- static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map));
- // We always set the in_loop bit to zero when generating the lookup code
- // so do it here too so the hash codes match.
- uint32_t iflags =
- (static_cast<uint32_t>(flags) & ~Code::kFlagsNotUsedInLookup);
- // Base the offset on a simple combination of name, flags, and map.
- uint32_t key = (map_low32bits + field) ^ iflags;
- return key & ((kPrimaryTableSize - 1) << kHeapObjectTagSize);
- }
-
- RLYSTC int SecondaryOffset(String* name, Code::Flags flags, int seed) {
- // Use the seed from the primary cache in the secondary cache.
- uint32_t string_low32bits =
- static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name));
- // We always set the in_loop bit to zero when generating the lookup code
- // so do it here too so the hash codes match.
- uint32_t iflags =
- (static_cast<uint32_t>(flags) & ~Code::kFlagsICInLoopMask);
- uint32_t key = seed - string_low32bits + iflags;
- return key & ((kSecondaryTableSize - 1) << kHeapObjectTagSize);
- }
-
- // Compute the entry for a given offset in exactly the same way as
- // we do in generated code. We generate an hash code that already
- // ends in String::kHashShift 0s. Then we shift it so it is a multiple
- // of sizeof(Entry). This makes it easier to avoid making mistakes
- // in the hashed offset computations.
- RLYSTC Entry* entry(Entry* table, int offset) {
- const int shift_amount = kPointerSizeLog2 + 1 - String::kHashShift;
- return reinterpret_cast<Entry*>(
- reinterpret_cast<Address>(table) + (offset << shift_amount));
- }
-
- Isolate* isolate_;
-
- DISALLOW_COPY_AND_ASSIGN(StubCache);
-};
-
-
-// ------------------------------------------------------------------------
-
-
-// Support functions for IC stubs for callbacks.
-DECLARE_RUNTIME_FUNCTION(MaybeObject*, LoadCallbackProperty);
-DECLARE_RUNTIME_FUNCTION(MaybeObject*, StoreCallbackProperty);
-
-
-// Support functions for IC stubs for interceptors.
-DECLARE_RUNTIME_FUNCTION(MaybeObject*, LoadPropertyWithInterceptorOnly);
-DECLARE_RUNTIME_FUNCTION(MaybeObject*, LoadPropertyWithInterceptorForLoad);
-DECLARE_RUNTIME_FUNCTION(MaybeObject*, LoadPropertyWithInterceptorForCall);
-DECLARE_RUNTIME_FUNCTION(MaybeObject*, StoreInterceptorProperty);
-DECLARE_RUNTIME_FUNCTION(MaybeObject*, CallInterceptorProperty);
-DECLARE_RUNTIME_FUNCTION(MaybeObject*, KeyedLoadPropertyWithInterceptor);
-
-
-// The stub compiler compiles stubs for the stub cache.
-class StubCompiler BASE_EMBEDDED {
- public:
- StubCompiler()
- : scope_(), masm_(Isolate::Current(), NULL, 256), failure_(NULL) { }
-
- MUST_USE_RESULT MaybeObject* CompileCallInitialize(Code::Flags flags);
- MUST_USE_RESULT MaybeObject* CompileCallPreMonomorphic(Code::Flags flags);
- MUST_USE_RESULT MaybeObject* CompileCallNormal(Code::Flags flags);
- MUST_USE_RESULT MaybeObject* CompileCallMegamorphic(Code::Flags flags);
- MUST_USE_RESULT MaybeObject* CompileCallMiss(Code::Flags flags);
-#ifdef ENABLE_DEBUGGER_SUPPORT
- MUST_USE_RESULT MaybeObject* CompileCallDebugBreak(Code::Flags flags);
- MUST_USE_RESULT MaybeObject* CompileCallDebugPrepareStepIn(Code::Flags flags);
-#endif
-
- // Static functions for generating parts of stubs.
- static void GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
- int index,
- Register prototype);
-
- // Generates prototype loading code that uses the objects from the
- // context we were in when this function was called. If the context
- // has changed, a jump to miss is performed. This ties the generated
- // code to a particular context and so must not be used in cases
- // where the generated code is not allowed to have references to
- // objects from a context.
- static void GenerateDirectLoadGlobalFunctionPrototype(MacroAssembler* masm,
- int index,
- Register prototype,
- Label* miss);
-
- static void GenerateFastPropertyLoad(MacroAssembler* masm,
- Register dst, Register src,
- JSObject* holder, int index);
-
- static void GenerateLoadArrayLength(MacroAssembler* masm,
- Register receiver,
- Register scratch,
- Label* miss_label);
-
- static void GenerateLoadStringLength(MacroAssembler* masm,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Label* miss_label,
- bool support_wrappers);
-
- static void GenerateLoadFunctionPrototype(MacroAssembler* masm,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Label* miss_label);
-
- static void GenerateStoreField(MacroAssembler* masm,
- JSObject* object,
- int index,
- Map* transition,
- Register receiver_reg,
- Register name_reg,
- Register scratch,
- Label* miss_label);
-
- static void GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind);
-
- // Generates code that verifies that the property holder has not changed
- // (checking maps of objects in the prototype chain for fast and global
- // objects or doing negative lookup for slow objects, ensures that the
- // property cells for global objects are still empty) and checks that the map
- // of the holder has not changed. If necessary the function also generates
- // code for security check in case of global object holders. Helps to make
- // sure that the current IC is still valid.
- //
- // The scratch and holder registers are always clobbered, but the object
- // register is only clobbered if it the same as the holder register. The
- // function returns a register containing the holder - either object_reg or
- // holder_reg.
- // The function can optionally (when save_at_depth !=
- // kInvalidProtoDepth) save the object at the given depth by moving
- // it to [esp + kPointerSize].
-
- Register CheckPrototypes(JSObject* object,
- Register object_reg,
- JSObject* holder,
- Register holder_reg,
- Register scratch1,
- Register scratch2,
- String* name,
- Label* miss) {
- return CheckPrototypes(object, object_reg, holder, holder_reg, scratch1,
- scratch2, name, kInvalidProtoDepth, miss);
- }
-
- Register CheckPrototypes(JSObject* object,
- Register object_reg,
- JSObject* holder,
- Register holder_reg,
- Register scratch1,
- Register scratch2,
- String* name,
- int save_at_depth,
- Label* miss);
-
- protected:
- MaybeObject* GetCodeWithFlags(Code::Flags flags, const char* name);
- MaybeObject* GetCodeWithFlags(Code::Flags flags, String* name);
-
- MacroAssembler* masm() { return &masm_; }
- void set_failure(Failure* failure) { failure_ = failure; }
-
- void GenerateLoadField(JSObject* object,
- JSObject* holder,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- int index,
- String* name,
- Label* miss);
-
- MaybeObject* GenerateLoadCallback(JSObject* object,
- JSObject* holder,
- Register receiver,
- Register name_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- AccessorInfo* callback,
- String* name,
- Label* miss);
-
- void GenerateLoadConstant(JSObject* object,
- JSObject* holder,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Object* value,
- String* name,
- Label* miss);
-
- void GenerateLoadInterceptor(JSObject* object,
- JSObject* holder,
- LookupResult* lookup,
- Register receiver,
- Register name_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- String* name,
- Label* miss);
-
- static void LookupPostInterceptor(JSObject* holder,
- String* name,
- LookupResult* lookup);
-
- Isolate* isolate() { return scope_.isolate(); }
- Heap* heap() { return isolate()->heap(); }
- Factory* factory() { return isolate()->factory(); }
-
- private:
- HandleScope scope_;
- MacroAssembler masm_;
- Failure* failure_;
-};
-
-
-class LoadStubCompiler: public StubCompiler {
- public:
- MUST_USE_RESULT MaybeObject* CompileLoadNonexistent(String* name,
- JSObject* object,
- JSObject* last);
-
- MUST_USE_RESULT MaybeObject* CompileLoadField(JSObject* object,
- JSObject* holder,
- int index,
- String* name);
-
- MUST_USE_RESULT MaybeObject* CompileLoadCallback(String* name,
- JSObject* object,
- JSObject* holder,
- AccessorInfo* callback);
-
- MUST_USE_RESULT MaybeObject* CompileLoadConstant(JSObject* object,
- JSObject* holder,
- Object* value,
- String* name);
-
- MUST_USE_RESULT MaybeObject* CompileLoadInterceptor(JSObject* object,
- JSObject* holder,
- String* name);
-
- MUST_USE_RESULT MaybeObject* CompileLoadGlobal(JSObject* object,
- GlobalObject* holder,
- JSGlobalPropertyCell* cell,
- String* name,
- bool is_dont_delete);
-
- private:
- MUST_USE_RESULT MaybeObject* GetCode(PropertyType type, String* name);
-};
-
-
-class KeyedLoadStubCompiler: public StubCompiler {
- public:
- MUST_USE_RESULT MaybeObject* CompileLoadField(String* name,
- JSObject* object,
- JSObject* holder,
- int index);
-
- MUST_USE_RESULT MaybeObject* CompileLoadCallback(String* name,
- JSObject* object,
- JSObject* holder,
- AccessorInfo* callback);
-
- MUST_USE_RESULT MaybeObject* CompileLoadConstant(String* name,
- JSObject* object,
- JSObject* holder,
- Object* value);
-
- MUST_USE_RESULT MaybeObject* CompileLoadInterceptor(JSObject* object,
- JSObject* holder,
- String* name);
-
- MUST_USE_RESULT MaybeObject* CompileLoadArrayLength(String* name);
- MUST_USE_RESULT MaybeObject* CompileLoadStringLength(String* name);
- MUST_USE_RESULT MaybeObject* CompileLoadFunctionPrototype(String* name);
-
- MUST_USE_RESULT MaybeObject* CompileLoadSpecialized(JSObject* receiver);
-
- private:
- MaybeObject* GetCode(PropertyType type, String* name);
-};
-
-
-class StoreStubCompiler: public StubCompiler {
- public:
- explicit StoreStubCompiler(StrictModeFlag strict_mode)
- : strict_mode_(strict_mode) { }
-
- MUST_USE_RESULT MaybeObject* CompileStoreField(JSObject* object,
- int index,
- Map* transition,
- String* name);
-
- MUST_USE_RESULT MaybeObject* CompileStoreCallback(JSObject* object,
- AccessorInfo* callbacks,
- String* name);
- MUST_USE_RESULT MaybeObject* CompileStoreInterceptor(JSObject* object,
- String* name);
- MUST_USE_RESULT MaybeObject* CompileStoreGlobal(GlobalObject* object,
- JSGlobalPropertyCell* holder,
- String* name);
-
-
- private:
- MaybeObject* GetCode(PropertyType type, String* name);
-
- StrictModeFlag strict_mode_;
-};
-
-
-class KeyedStoreStubCompiler: public StubCompiler {
- public:
- explicit KeyedStoreStubCompiler(StrictModeFlag strict_mode)
- : strict_mode_(strict_mode) { }
-
- MUST_USE_RESULT MaybeObject* CompileStoreField(JSObject* object,
- int index,
- Map* transition,
- String* name);
-
- MUST_USE_RESULT MaybeObject* CompileStoreSpecialized(JSObject* receiver);
-
- private:
- MaybeObject* GetCode(PropertyType type, String* name);
-
- StrictModeFlag strict_mode_;
-};
-
-
-// Subset of FUNCTIONS_WITH_ID_LIST with custom constant/global call
-// IC stubs.
-#define CUSTOM_CALL_IC_GENERATORS(V) \
- V(ArrayPush) \
- V(ArrayPop) \
- V(StringCharCodeAt) \
- V(StringCharAt) \
- V(StringFromCharCode) \
- V(MathFloor) \
- V(MathAbs)
-
-
-class CallOptimization;
-
-class CallStubCompiler: public StubCompiler {
- public:
- CallStubCompiler(int argc,
- InLoopFlag in_loop,
- Code::Kind kind,
- Code::ExtraICState extra_ic_state,
- InlineCacheHolderFlag cache_holder);
-
- MUST_USE_RESULT MaybeObject* CompileCallField(JSObject* object,
- JSObject* holder,
- int index,
- String* name);
- MUST_USE_RESULT MaybeObject* CompileCallConstant(Object* object,
- JSObject* holder,
- JSFunction* function,
- String* name,
- CheckType check);
- MUST_USE_RESULT MaybeObject* CompileCallInterceptor(JSObject* object,
- JSObject* holder,
- String* name);
- MUST_USE_RESULT MaybeObject* CompileCallGlobal(JSObject* object,
- GlobalObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name);
-
- static bool HasCustomCallGenerator(JSFunction* function);
-
- private:
- // Compiles a custom call constant/global IC. For constant calls
- // cell is NULL. Returns undefined if there is no custom call code
- // for the given function or it can't be generated.
- MUST_USE_RESULT MaybeObject* CompileCustomCall(Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name);
-
-#define DECLARE_CALL_GENERATOR(name) \
- MUST_USE_RESULT MaybeObject* Compile##name##Call(Object* object, \
- JSObject* holder, \
- JSGlobalPropertyCell* cell, \
- JSFunction* function, \
- String* fname);
- CUSTOM_CALL_IC_GENERATORS(DECLARE_CALL_GENERATOR)
-#undef DECLARE_CALL_GENERATOR
-
- MUST_USE_RESULT MaybeObject* CompileFastApiCall(
- const CallOptimization& optimization,
- Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name);
-
- const ParameterCount arguments_;
- const InLoopFlag in_loop_;
- const Code::Kind kind_;
- const Code::ExtraICState extra_ic_state_;
- const InlineCacheHolderFlag cache_holder_;
-
- const ParameterCount& arguments() { return arguments_; }
-
- MUST_USE_RESULT MaybeObject* GetCode(PropertyType type, String* name);
-
- // Convenience function. Calls GetCode above passing
- // CONSTANT_FUNCTION type and the name of the given function.
- MUST_USE_RESULT MaybeObject* GetCode(JSFunction* function);
-
- void GenerateNameCheck(String* name, Label* miss);
-
- void GenerateGlobalReceiverCheck(JSObject* object,
- JSObject* holder,
- String* name,
- Label* miss);
-
- // Generates code to load the function from the cell checking that
- // it still contains the same function.
- void GenerateLoadFunctionFromCell(JSGlobalPropertyCell* cell,
- JSFunction* function,
- Label* miss);
-
- // Generates a jump to CallIC miss stub. Returns Failure if the jump cannot
- // be generated.
- MUST_USE_RESULT MaybeObject* GenerateMissBranch();
-};
-
-
-class ConstructStubCompiler: public StubCompiler {
- public:
- explicit ConstructStubCompiler() {}
-
- MUST_USE_RESULT MaybeObject* CompileConstructStub(JSFunction* function);
-
- private:
- MaybeObject* GetCode();
-};
-
-
-// Holds information about possible function call optimizations.
-class CallOptimization BASE_EMBEDDED {
- public:
- explicit CallOptimization(LookupResult* lookup);
-
- explicit CallOptimization(JSFunction* function);
-
- bool is_constant_call() const {
- return constant_function_ != NULL;
- }
-
- JSFunction* constant_function() const {
- ASSERT(constant_function_ != NULL);
- return constant_function_;
- }
-
- bool is_simple_api_call() const {
- return is_simple_api_call_;
- }
-
- FunctionTemplateInfo* expected_receiver_type() const {
- ASSERT(is_simple_api_call_);
- return expected_receiver_type_;
- }
-
- CallHandlerInfo* api_call_info() const {
- ASSERT(is_simple_api_call_);
- return api_call_info_;
- }
-
- // Returns the depth of the object having the expected type in the
- // prototype chain between the two arguments.
- int GetPrototypeDepthOfExpectedType(JSObject* object,
- JSObject* holder) const;
-
- private:
- void Initialize(JSFunction* function);
-
- // Determines whether the given function can be called using the
- // fast api call builtin.
- void AnalyzePossibleApiFunction(JSFunction* function);
-
- JSFunction* constant_function_;
- bool is_simple_api_call_;
- FunctionTemplateInfo* expected_receiver_type_;
- CallHandlerInfo* api_call_info_;
-};
-
-class ExternalArrayStubCompiler: public StubCompiler {
- public:
- explicit ExternalArrayStubCompiler() {}
-
- MUST_USE_RESULT MaybeObject* CompileKeyedLoadStub(
- JSObject* receiver, ExternalArrayType array_type, Code::Flags flags);
-
- MUST_USE_RESULT MaybeObject* CompileKeyedStoreStub(
- JSObject* receiver, ExternalArrayType array_type, Code::Flags flags);
-
- private:
- MaybeObject* GetCode(Code::Flags flags);
-};
-
-} } // namespace v8::internal
-
-#endif // V8_STUB_CACHE_H_
diff --git a/src/3rdparty/v8/src/third_party/valgrind/valgrind.h b/src/3rdparty/v8/src/third_party/valgrind/valgrind.h
deleted file mode 100644
index a94dc58..0000000
--- a/src/3rdparty/v8/src/third_party/valgrind/valgrind.h
+++ /dev/null
@@ -1,3925 +0,0 @@
-/* -*- c -*-
- ----------------------------------------------------------------
-
- Notice that the following BSD-style license applies to this one
- file (valgrind.h) only. The rest of Valgrind is licensed under the
- terms of the GNU General Public License, version 2, unless
- otherwise indicated. See the COPYING file in the source
- distribution for details.
-
- ----------------------------------------------------------------
-
- This file is part of Valgrind, a dynamic binary instrumentation
- framework.
-
- Copyright (C) 2000-2007 Julian Seward. All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- 1. Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
-
- 2. The origin of this software must not be misrepresented; you must
- not claim that you wrote the original software. If you use this
- software in a product, an acknowledgment in the product
- documentation would be appreciated but is not required.
-
- 3. Altered source versions must be plainly marked as such, and must
- not be misrepresented as being the original software.
-
- 4. The name of the author may not be used to endorse or promote
- products derived from this software without specific prior written
- permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
- OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
- GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
- ----------------------------------------------------------------
-
- Notice that the above BSD-style license applies to this one file
- (valgrind.h) only. The entire rest of Valgrind is licensed under
- the terms of the GNU General Public License, version 2. See the
- COPYING file in the source distribution for details.
-
- ----------------------------------------------------------------
-*/
-
-
-/* This file is for inclusion into client (your!) code.
-
- You can use these macros to manipulate and query Valgrind's
- execution inside your own programs.
-
- The resulting executables will still run without Valgrind, just a
- little bit more slowly than they otherwise would, but otherwise
- unchanged. When not running on valgrind, each client request
- consumes very few (eg. 7) instructions, so the resulting performance
- loss is negligible unless you plan to execute client requests
- millions of times per second. Nevertheless, if that is still a
- problem, you can compile with the NVALGRIND symbol defined (gcc
- -DNVALGRIND) so that client requests are not even compiled in. */
-
-#ifndef __VALGRIND_H
-#define __VALGRIND_H
-
-#include <stdarg.h>
-#include <stdint.h>
-
-/* Nb: this file might be included in a file compiled with -ansi. So
- we can't use C++ style "//" comments nor the "asm" keyword (instead
- use "__asm__"). */
-
-/* Derive some tags indicating what the target platform is. Note
- that in this file we're using the compiler's CPP symbols for
- identifying architectures, which are different to the ones we use
- within the rest of Valgrind. Note, __powerpc__ is active for both
- 32 and 64-bit PPC, whereas __powerpc64__ is only active for the
- latter (on Linux, that is). */
-#undef PLAT_x86_linux
-#undef PLAT_amd64_linux
-#undef PLAT_ppc32_linux
-#undef PLAT_ppc64_linux
-#undef PLAT_ppc32_aix5
-#undef PLAT_ppc64_aix5
-
-#if !defined(_AIX) && defined(__i386__)
-# define PLAT_x86_linux 1
-#elif !defined(_AIX) && defined(__x86_64__)
-# define PLAT_amd64_linux 1
-#elif !defined(_AIX) && defined(__powerpc__) && !defined(__powerpc64__)
-# define PLAT_ppc32_linux 1
-#elif !defined(_AIX) && defined(__powerpc__) && defined(__powerpc64__)
-# define PLAT_ppc64_linux 1
-#elif defined(_AIX) && defined(__64BIT__)
-# define PLAT_ppc64_aix5 1
-#elif defined(_AIX) && !defined(__64BIT__)
-# define PLAT_ppc32_aix5 1
-#endif
-
-
-/* If we're not compiling for our target platform, don't generate
- any inline asms. */
-#if !defined(PLAT_x86_linux) && !defined(PLAT_amd64_linux) \
- && !defined(PLAT_ppc32_linux) && !defined(PLAT_ppc64_linux) \
- && !defined(PLAT_ppc32_aix5) && !defined(PLAT_ppc64_aix5)
-# if !defined(NVALGRIND)
-# define NVALGRIND 1
-# endif
-#endif
-
-
-/* ------------------------------------------------------------------ */
-/* ARCHITECTURE SPECIFICS for SPECIAL INSTRUCTIONS. There is nothing */
-/* in here of use to end-users -- skip to the next section. */
-/* ------------------------------------------------------------------ */
-
-#if defined(NVALGRIND)
-
-/* Define NVALGRIND to completely remove the Valgrind magic sequence
- from the compiled code (analogous to NDEBUG's effects on
- assert()) */
-#define VALGRIND_DO_CLIENT_REQUEST( \
- _zzq_rlval, _zzq_default, _zzq_request, \
- _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
- { \
- (_zzq_rlval) = (_zzq_default); \
- }
-
-#else /* ! NVALGRIND */
-
-/* The following defines the magic code sequences which the JITter
- spots and handles magically. Don't look too closely at them as
- they will rot your brain.
-
- The assembly code sequences for all architectures is in this one
- file. This is because this file must be stand-alone, and we don't
- want to have multiple files.
-
- For VALGRIND_DO_CLIENT_REQUEST, we must ensure that the default
- value gets put in the return slot, so that everything works when
- this is executed not under Valgrind. Args are passed in a memory
- block, and so there's no intrinsic limit to the number that could
- be passed, but it's currently five.
-
- The macro args are:
- _zzq_rlval result lvalue
- _zzq_default default value (result returned when running on real CPU)
- _zzq_request request code
- _zzq_arg1..5 request params
-
- The other two macros are used to support function wrapping, and are
- a lot simpler. VALGRIND_GET_NR_CONTEXT returns the value of the
- guest's NRADDR pseudo-register and whatever other information is
- needed to safely run the call original from the wrapper: on
- ppc64-linux, the R2 value at the divert point is also needed. This
- information is abstracted into a user-visible type, OrigFn.
-
- VALGRIND_CALL_NOREDIR_* behaves the same as the following on the
- guest, but guarantees that the branch instruction will not be
- redirected: x86: call *%eax, amd64: call *%rax, ppc32/ppc64:
- branch-and-link-to-r11. VALGRIND_CALL_NOREDIR is just text, not a
- complete inline asm, since it needs to be combined with more magic
- inline asm stuff to be useful.
-*/
-
-/* ------------------------- x86-linux ------------------------- */
-
-#if defined(PLAT_x86_linux)
-
-typedef
- struct {
- unsigned int nraddr; /* where's the code? */
- }
- OrigFn;
-
-#define __SPECIAL_INSTRUCTION_PREAMBLE \
- "roll $3, %%edi ; roll $13, %%edi\n\t" \
- "roll $29, %%edi ; roll $19, %%edi\n\t"
-
-#define VALGRIND_DO_CLIENT_REQUEST( \
- _zzq_rlval, _zzq_default, _zzq_request, \
- _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
- { volatile unsigned int _zzq_args[6]; \
- volatile unsigned int _zzq_result; \
- _zzq_args[0] = (unsigned int)(_zzq_request); \
- _zzq_args[1] = (unsigned int)(_zzq_arg1); \
- _zzq_args[2] = (unsigned int)(_zzq_arg2); \
- _zzq_args[3] = (unsigned int)(_zzq_arg3); \
- _zzq_args[4] = (unsigned int)(_zzq_arg4); \
- _zzq_args[5] = (unsigned int)(_zzq_arg5); \
- __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
- /* %EDX = client_request ( %EAX ) */ \
- "xchgl %%ebx,%%ebx" \
- : "=d" (_zzq_result) \
- : "a" (&_zzq_args[0]), "0" (_zzq_default) \
- : "cc", "memory" \
- ); \
- _zzq_rlval = _zzq_result; \
- }
-
-#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
- { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
- volatile unsigned int __addr; \
- __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
- /* %EAX = guest_NRADDR */ \
- "xchgl %%ecx,%%ecx" \
- : "=a" (__addr) \
- : \
- : "cc", "memory" \
- ); \
- _zzq_orig->nraddr = __addr; \
- }
-
-#define VALGRIND_CALL_NOREDIR_EAX \
- __SPECIAL_INSTRUCTION_PREAMBLE \
- /* call-noredir *%EAX */ \
- "xchgl %%edx,%%edx\n\t"
-#endif /* PLAT_x86_linux */
-
-/* ------------------------ amd64-linux ------------------------ */
-
-#if defined(PLAT_amd64_linux)
-
-typedef
- struct {
- uint64_t nraddr; /* where's the code? */
- }
- OrigFn;
-
-#define __SPECIAL_INSTRUCTION_PREAMBLE \
- "rolq $3, %%rdi ; rolq $13, %%rdi\n\t" \
- "rolq $61, %%rdi ; rolq $51, %%rdi\n\t"
-
-#define VALGRIND_DO_CLIENT_REQUEST( \
- _zzq_rlval, _zzq_default, _zzq_request, \
- _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
- { volatile uint64_t _zzq_args[6]; \
- volatile uint64_t _zzq_result; \
- _zzq_args[0] = (uint64_t)(_zzq_request); \
- _zzq_args[1] = (uint64_t)(_zzq_arg1); \
- _zzq_args[2] = (uint64_t)(_zzq_arg2); \
- _zzq_args[3] = (uint64_t)(_zzq_arg3); \
- _zzq_args[4] = (uint64_t)(_zzq_arg4); \
- _zzq_args[5] = (uint64_t)(_zzq_arg5); \
- __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
- /* %RDX = client_request ( %RAX ) */ \
- "xchgq %%rbx,%%rbx" \
- : "=d" (_zzq_result) \
- : "a" (&_zzq_args[0]), "0" (_zzq_default) \
- : "cc", "memory" \
- ); \
- _zzq_rlval = _zzq_result; \
- }
-
-#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
- { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
- volatile uint64_t __addr; \
- __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
- /* %RAX = guest_NRADDR */ \
- "xchgq %%rcx,%%rcx" \
- : "=a" (__addr) \
- : \
- : "cc", "memory" \
- ); \
- _zzq_orig->nraddr = __addr; \
- }
-
-#define VALGRIND_CALL_NOREDIR_RAX \
- __SPECIAL_INSTRUCTION_PREAMBLE \
- /* call-noredir *%RAX */ \
- "xchgq %%rdx,%%rdx\n\t"
-#endif /* PLAT_amd64_linux */
-
-/* ------------------------ ppc32-linux ------------------------ */
-
-#if defined(PLAT_ppc32_linux)
-
-typedef
- struct {
- unsigned int nraddr; /* where's the code? */
- }
- OrigFn;
-
-#define __SPECIAL_INSTRUCTION_PREAMBLE \
- "rlwinm 0,0,3,0,0 ; rlwinm 0,0,13,0,0\n\t" \
- "rlwinm 0,0,29,0,0 ; rlwinm 0,0,19,0,0\n\t"
-
-#define VALGRIND_DO_CLIENT_REQUEST( \
- _zzq_rlval, _zzq_default, _zzq_request, \
- _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
- \
- { unsigned int _zzq_args[6]; \
- unsigned int _zzq_result; \
- unsigned int* _zzq_ptr; \
- _zzq_args[0] = (unsigned int)(_zzq_request); \
- _zzq_args[1] = (unsigned int)(_zzq_arg1); \
- _zzq_args[2] = (unsigned int)(_zzq_arg2); \
- _zzq_args[3] = (unsigned int)(_zzq_arg3); \
- _zzq_args[4] = (unsigned int)(_zzq_arg4); \
- _zzq_args[5] = (unsigned int)(_zzq_arg5); \
- _zzq_ptr = _zzq_args; \
- __asm__ volatile("mr 3,%1\n\t" /*default*/ \
- "mr 4,%2\n\t" /*ptr*/ \
- __SPECIAL_INSTRUCTION_PREAMBLE \
- /* %R3 = client_request ( %R4 ) */ \
- "or 1,1,1\n\t" \
- "mr %0,3" /*result*/ \
- : "=b" (_zzq_result) \
- : "b" (_zzq_default), "b" (_zzq_ptr) \
- : "cc", "memory", "r3", "r4"); \
- _zzq_rlval = _zzq_result; \
- }
-
-#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
- { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
- unsigned int __addr; \
- __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
- /* %R3 = guest_NRADDR */ \
- "or 2,2,2\n\t" \
- "mr %0,3" \
- : "=b" (__addr) \
- : \
- : "cc", "memory", "r3" \
- ); \
- _zzq_orig->nraddr = __addr; \
- }
-
-#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- __SPECIAL_INSTRUCTION_PREAMBLE \
- /* branch-and-link-to-noredir *%R11 */ \
- "or 3,3,3\n\t"
-#endif /* PLAT_ppc32_linux */
-
-/* ------------------------ ppc64-linux ------------------------ */
-
-#if defined(PLAT_ppc64_linux)
-
-typedef
- struct {
- uint64_t nraddr; /* where's the code? */
- uint64_t r2; /* what tocptr do we need? */
- }
- OrigFn;
-
-#define __SPECIAL_INSTRUCTION_PREAMBLE \
- "rotldi 0,0,3 ; rotldi 0,0,13\n\t" \
- "rotldi 0,0,61 ; rotldi 0,0,51\n\t"
-
-#define VALGRIND_DO_CLIENT_REQUEST( \
- _zzq_rlval, _zzq_default, _zzq_request, \
- _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
- \
- { uint64_t _zzq_args[6]; \
- register uint64_t _zzq_result __asm__("r3"); \
- register uint64_t* _zzq_ptr __asm__("r4"); \
- _zzq_args[0] = (uint64_t)(_zzq_request); \
- _zzq_args[1] = (uint64_t)(_zzq_arg1); \
- _zzq_args[2] = (uint64_t)(_zzq_arg2); \
- _zzq_args[3] = (uint64_t)(_zzq_arg3); \
- _zzq_args[4] = (uint64_t)(_zzq_arg4); \
- _zzq_args[5] = (uint64_t)(_zzq_arg5); \
- _zzq_ptr = _zzq_args; \
- __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
- /* %R3 = client_request ( %R4 ) */ \
- "or 1,1,1" \
- : "=r" (_zzq_result) \
- : "0" (_zzq_default), "r" (_zzq_ptr) \
- : "cc", "memory"); \
- _zzq_rlval = _zzq_result; \
- }
-
-#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
- { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
- register uint64_t __addr __asm__("r3"); \
- __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
- /* %R3 = guest_NRADDR */ \
- "or 2,2,2" \
- : "=r" (__addr) \
- : \
- : "cc", "memory" \
- ); \
- _zzq_orig->nraddr = __addr; \
- __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
- /* %R3 = guest_NRADDR_GPR2 */ \
- "or 4,4,4" \
- : "=r" (__addr) \
- : \
- : "cc", "memory" \
- ); \
- _zzq_orig->r2 = __addr; \
- }
-
-#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- __SPECIAL_INSTRUCTION_PREAMBLE \
- /* branch-and-link-to-noredir *%R11 */ \
- "or 3,3,3\n\t"
-
-#endif /* PLAT_ppc64_linux */
-
-/* ------------------------ ppc32-aix5 ------------------------- */
-
-#if defined(PLAT_ppc32_aix5)
-
-typedef
- struct {
- unsigned int nraddr; /* where's the code? */
- unsigned int r2; /* what tocptr do we need? */
- }
- OrigFn;
-
-#define __SPECIAL_INSTRUCTION_PREAMBLE \
- "rlwinm 0,0,3,0,0 ; rlwinm 0,0,13,0,0\n\t" \
- "rlwinm 0,0,29,0,0 ; rlwinm 0,0,19,0,0\n\t"
-
-#define VALGRIND_DO_CLIENT_REQUEST( \
- _zzq_rlval, _zzq_default, _zzq_request, \
- _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
- \
- { unsigned int _zzq_args[7]; \
- register unsigned int _zzq_result; \
- register unsigned int* _zzq_ptr; \
- _zzq_args[0] = (unsigned int)(_zzq_request); \
- _zzq_args[1] = (unsigned int)(_zzq_arg1); \
- _zzq_args[2] = (unsigned int)(_zzq_arg2); \
- _zzq_args[3] = (unsigned int)(_zzq_arg3); \
- _zzq_args[4] = (unsigned int)(_zzq_arg4); \
- _zzq_args[5] = (unsigned int)(_zzq_arg5); \
- _zzq_args[6] = (unsigned int)(_zzq_default); \
- _zzq_ptr = _zzq_args; \
- __asm__ volatile("mr 4,%1\n\t" \
- "lwz 3, 24(4)\n\t" \
- __SPECIAL_INSTRUCTION_PREAMBLE \
- /* %R3 = client_request ( %R4 ) */ \
- "or 1,1,1\n\t" \
- "mr %0,3" \
- : "=b" (_zzq_result) \
- : "b" (_zzq_ptr) \
- : "r3", "r4", "cc", "memory"); \
- _zzq_rlval = _zzq_result; \
- }
-
-#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
- { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
- register unsigned int __addr; \
- __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
- /* %R3 = guest_NRADDR */ \
- "or 2,2,2\n\t" \
- "mr %0,3" \
- : "=b" (__addr) \
- : \
- : "r3", "cc", "memory" \
- ); \
- _zzq_orig->nraddr = __addr; \
- __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
- /* %R3 = guest_NRADDR_GPR2 */ \
- "or 4,4,4\n\t" \
- "mr %0,3" \
- : "=b" (__addr) \
- : \
- : "r3", "cc", "memory" \
- ); \
- _zzq_orig->r2 = __addr; \
- }
-
-#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- __SPECIAL_INSTRUCTION_PREAMBLE \
- /* branch-and-link-to-noredir *%R11 */ \
- "or 3,3,3\n\t"
-
-#endif /* PLAT_ppc32_aix5 */
-
-/* ------------------------ ppc64-aix5 ------------------------- */
-
-#if defined(PLAT_ppc64_aix5)
-
-typedef
- struct {
- uint64_t nraddr; /* where's the code? */
- uint64_t r2; /* what tocptr do we need? */
- }
- OrigFn;
-
-#define __SPECIAL_INSTRUCTION_PREAMBLE \
- "rotldi 0,0,3 ; rotldi 0,0,13\n\t" \
- "rotldi 0,0,61 ; rotldi 0,0,51\n\t"
-
-#define VALGRIND_DO_CLIENT_REQUEST( \
- _zzq_rlval, _zzq_default, _zzq_request, \
- _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
- \
- { uint64_t _zzq_args[7]; \
- register uint64_t _zzq_result; \
- register uint64_t* _zzq_ptr; \
- _zzq_args[0] = (unsigned int long long)(_zzq_request); \
- _zzq_args[1] = (unsigned int long long)(_zzq_arg1); \
- _zzq_args[2] = (unsigned int long long)(_zzq_arg2); \
- _zzq_args[3] = (unsigned int long long)(_zzq_arg3); \
- _zzq_args[4] = (unsigned int long long)(_zzq_arg4); \
- _zzq_args[5] = (unsigned int long long)(_zzq_arg5); \
- _zzq_args[6] = (unsigned int long long)(_zzq_default); \
- _zzq_ptr = _zzq_args; \
- __asm__ volatile("mr 4,%1\n\t" \
- "ld 3, 48(4)\n\t" \
- __SPECIAL_INSTRUCTION_PREAMBLE \
- /* %R3 = client_request ( %R4 ) */ \
- "or 1,1,1\n\t" \
- "mr %0,3" \
- : "=b" (_zzq_result) \
- : "b" (_zzq_ptr) \
- : "r3", "r4", "cc", "memory"); \
- _zzq_rlval = _zzq_result; \
- }
-
-#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
- { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
- register uint64_t __addr; \
- __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
- /* %R3 = guest_NRADDR */ \
- "or 2,2,2\n\t" \
- "mr %0,3" \
- : "=b" (__addr) \
- : \
- : "r3", "cc", "memory" \
- ); \
- _zzq_orig->nraddr = __addr; \
- __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
- /* %R3 = guest_NRADDR_GPR2 */ \
- "or 4,4,4\n\t" \
- "mr %0,3" \
- : "=b" (__addr) \
- : \
- : "r3", "cc", "memory" \
- ); \
- _zzq_orig->r2 = __addr; \
- }
-
-#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- __SPECIAL_INSTRUCTION_PREAMBLE \
- /* branch-and-link-to-noredir *%R11 */ \
- "or 3,3,3\n\t"
-
-#endif /* PLAT_ppc64_aix5 */
-
-/* Insert assembly code for other platforms here... */
-
-#endif /* NVALGRIND */
-
-
-/* ------------------------------------------------------------------ */
-/* PLATFORM SPECIFICS for FUNCTION WRAPPING. This is all very */
-/* ugly. It's the least-worst tradeoff I can think of. */
-/* ------------------------------------------------------------------ */
-
-/* This section defines magic (a.k.a appalling-hack) macros for doing
- guaranteed-no-redirection macros, so as to get from function
- wrappers to the functions they are wrapping. The whole point is to
- construct standard call sequences, but to do the call itself with a
- special no-redirect call pseudo-instruction that the JIT
- understands and handles specially. This section is long and
- repetitious, and I can't see a way to make it shorter.
-
- The naming scheme is as follows:
-
- CALL_FN_{W,v}_{v,W,WW,WWW,WWWW,5W,6W,7W,etc}
-
- 'W' stands for "word" and 'v' for "void". Hence there are
- different macros for calling arity 0, 1, 2, 3, 4, etc, functions,
- and for each, the possibility of returning a word-typed result, or
- no result.
-*/
-
-/* Use these to write the name of your wrapper. NOTE: duplicates
- VG_WRAP_FUNCTION_Z{U,Z} in pub_tool_redir.h. */
-
-#define I_WRAP_SONAME_FNNAME_ZU(soname,fnname) \
- _vgwZU_##soname##_##fnname
-
-#define I_WRAP_SONAME_FNNAME_ZZ(soname,fnname) \
- _vgwZZ_##soname##_##fnname
-
-/* Use this macro from within a wrapper function to collect the
- context (address and possibly other info) of the original function.
- Once you have that you can then use it in one of the CALL_FN_
- macros. The type of the argument _lval is OrigFn. */
-#define VALGRIND_GET_ORIG_FN(_lval) VALGRIND_GET_NR_CONTEXT(_lval)
-
-/* Derivatives of the main macros below, for calling functions
- returning void. */
-
-#define CALL_FN_v_v(fnptr) \
- do { volatile unsigned long _junk; \
- CALL_FN_W_v(_junk,fnptr); } while (0)
-
-#define CALL_FN_v_W(fnptr, arg1) \
- do { volatile unsigned long _junk; \
- CALL_FN_W_W(_junk,fnptr,arg1); } while (0)
-
-#define CALL_FN_v_WW(fnptr, arg1,arg2) \
- do { volatile unsigned long _junk; \
- CALL_FN_W_WW(_junk,fnptr,arg1,arg2); } while (0)
-
-#define CALL_FN_v_WWW(fnptr, arg1,arg2,arg3) \
- do { volatile unsigned long _junk; \
- CALL_FN_W_WWW(_junk,fnptr,arg1,arg2,arg3); } while (0)
-
-/* ------------------------- x86-linux ------------------------- */
-
-#if defined(PLAT_x86_linux)
-
-/* These regs are trashed by the hidden call. No need to mention eax
- as gcc can already see that, plus causes gcc to bomb. */
-#define __CALLER_SAVED_REGS /*"eax"*/ "ecx", "edx"
-
-/* These CALL_FN_ macros assume that on x86-linux, sizeof(unsigned
- long) == 4. */
-
-#define CALL_FN_W_v(lval, orig) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[1]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- __asm__ volatile( \
- "movl (%%eax), %%eax\n\t" /* target->%eax */ \
- VALGRIND_CALL_NOREDIR_EAX \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_W(lval, orig, arg1) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[2]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- __asm__ volatile( \
- "pushl 4(%%eax)\n\t" \
- "movl (%%eax), %%eax\n\t" /* target->%eax */ \
- VALGRIND_CALL_NOREDIR_EAX \
- "addl $4, %%esp\n" \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_WW(lval, orig, arg1,arg2) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- __asm__ volatile( \
- "pushl 8(%%eax)\n\t" \
- "pushl 4(%%eax)\n\t" \
- "movl (%%eax), %%eax\n\t" /* target->%eax */ \
- VALGRIND_CALL_NOREDIR_EAX \
- "addl $8, %%esp\n" \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[4]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- __asm__ volatile( \
- "pushl 12(%%eax)\n\t" \
- "pushl 8(%%eax)\n\t" \
- "pushl 4(%%eax)\n\t" \
- "movl (%%eax), %%eax\n\t" /* target->%eax */ \
- VALGRIND_CALL_NOREDIR_EAX \
- "addl $12, %%esp\n" \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[5]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- __asm__ volatile( \
- "pushl 16(%%eax)\n\t" \
- "pushl 12(%%eax)\n\t" \
- "pushl 8(%%eax)\n\t" \
- "pushl 4(%%eax)\n\t" \
- "movl (%%eax), %%eax\n\t" /* target->%eax */ \
- VALGRIND_CALL_NOREDIR_EAX \
- "addl $16, %%esp\n" \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[6]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- __asm__ volatile( \
- "pushl 20(%%eax)\n\t" \
- "pushl 16(%%eax)\n\t" \
- "pushl 12(%%eax)\n\t" \
- "pushl 8(%%eax)\n\t" \
- "pushl 4(%%eax)\n\t" \
- "movl (%%eax), %%eax\n\t" /* target->%eax */ \
- VALGRIND_CALL_NOREDIR_EAX \
- "addl $20, %%esp\n" \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[7]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- __asm__ volatile( \
- "pushl 24(%%eax)\n\t" \
- "pushl 20(%%eax)\n\t" \
- "pushl 16(%%eax)\n\t" \
- "pushl 12(%%eax)\n\t" \
- "pushl 8(%%eax)\n\t" \
- "pushl 4(%%eax)\n\t" \
- "movl (%%eax), %%eax\n\t" /* target->%eax */ \
- VALGRIND_CALL_NOREDIR_EAX \
- "addl $24, %%esp\n" \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[8]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- __asm__ volatile( \
- "pushl 28(%%eax)\n\t" \
- "pushl 24(%%eax)\n\t" \
- "pushl 20(%%eax)\n\t" \
- "pushl 16(%%eax)\n\t" \
- "pushl 12(%%eax)\n\t" \
- "pushl 8(%%eax)\n\t" \
- "pushl 4(%%eax)\n\t" \
- "movl (%%eax), %%eax\n\t" /* target->%eax */ \
- VALGRIND_CALL_NOREDIR_EAX \
- "addl $28, %%esp\n" \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[9]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- _argvec[8] = (unsigned long)(arg8); \
- __asm__ volatile( \
- "pushl 32(%%eax)\n\t" \
- "pushl 28(%%eax)\n\t" \
- "pushl 24(%%eax)\n\t" \
- "pushl 20(%%eax)\n\t" \
- "pushl 16(%%eax)\n\t" \
- "pushl 12(%%eax)\n\t" \
- "pushl 8(%%eax)\n\t" \
- "pushl 4(%%eax)\n\t" \
- "movl (%%eax), %%eax\n\t" /* target->%eax */ \
- VALGRIND_CALL_NOREDIR_EAX \
- "addl $32, %%esp\n" \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[10]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- _argvec[8] = (unsigned long)(arg8); \
- _argvec[9] = (unsigned long)(arg9); \
- __asm__ volatile( \
- "pushl 36(%%eax)\n\t" \
- "pushl 32(%%eax)\n\t" \
- "pushl 28(%%eax)\n\t" \
- "pushl 24(%%eax)\n\t" \
- "pushl 20(%%eax)\n\t" \
- "pushl 16(%%eax)\n\t" \
- "pushl 12(%%eax)\n\t" \
- "pushl 8(%%eax)\n\t" \
- "pushl 4(%%eax)\n\t" \
- "movl (%%eax), %%eax\n\t" /* target->%eax */ \
- VALGRIND_CALL_NOREDIR_EAX \
- "addl $36, %%esp\n" \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9,arg10) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[11]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- _argvec[8] = (unsigned long)(arg8); \
- _argvec[9] = (unsigned long)(arg9); \
- _argvec[10] = (unsigned long)(arg10); \
- __asm__ volatile( \
- "pushl 40(%%eax)\n\t" \
- "pushl 36(%%eax)\n\t" \
- "pushl 32(%%eax)\n\t" \
- "pushl 28(%%eax)\n\t" \
- "pushl 24(%%eax)\n\t" \
- "pushl 20(%%eax)\n\t" \
- "pushl 16(%%eax)\n\t" \
- "pushl 12(%%eax)\n\t" \
- "pushl 8(%%eax)\n\t" \
- "pushl 4(%%eax)\n\t" \
- "movl (%%eax), %%eax\n\t" /* target->%eax */ \
- VALGRIND_CALL_NOREDIR_EAX \
- "addl $40, %%esp\n" \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5, \
- arg6,arg7,arg8,arg9,arg10, \
- arg11) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[12]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- _argvec[8] = (unsigned long)(arg8); \
- _argvec[9] = (unsigned long)(arg9); \
- _argvec[10] = (unsigned long)(arg10); \
- _argvec[11] = (unsigned long)(arg11); \
- __asm__ volatile( \
- "pushl 44(%%eax)\n\t" \
- "pushl 40(%%eax)\n\t" \
- "pushl 36(%%eax)\n\t" \
- "pushl 32(%%eax)\n\t" \
- "pushl 28(%%eax)\n\t" \
- "pushl 24(%%eax)\n\t" \
- "pushl 20(%%eax)\n\t" \
- "pushl 16(%%eax)\n\t" \
- "pushl 12(%%eax)\n\t" \
- "pushl 8(%%eax)\n\t" \
- "pushl 4(%%eax)\n\t" \
- "movl (%%eax), %%eax\n\t" /* target->%eax */ \
- VALGRIND_CALL_NOREDIR_EAX \
- "addl $44, %%esp\n" \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5, \
- arg6,arg7,arg8,arg9,arg10, \
- arg11,arg12) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[13]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- _argvec[8] = (unsigned long)(arg8); \
- _argvec[9] = (unsigned long)(arg9); \
- _argvec[10] = (unsigned long)(arg10); \
- _argvec[11] = (unsigned long)(arg11); \
- _argvec[12] = (unsigned long)(arg12); \
- __asm__ volatile( \
- "pushl 48(%%eax)\n\t" \
- "pushl 44(%%eax)\n\t" \
- "pushl 40(%%eax)\n\t" \
- "pushl 36(%%eax)\n\t" \
- "pushl 32(%%eax)\n\t" \
- "pushl 28(%%eax)\n\t" \
- "pushl 24(%%eax)\n\t" \
- "pushl 20(%%eax)\n\t" \
- "pushl 16(%%eax)\n\t" \
- "pushl 12(%%eax)\n\t" \
- "pushl 8(%%eax)\n\t" \
- "pushl 4(%%eax)\n\t" \
- "movl (%%eax), %%eax\n\t" /* target->%eax */ \
- VALGRIND_CALL_NOREDIR_EAX \
- "addl $48, %%esp\n" \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#endif /* PLAT_x86_linux */
-
-/* ------------------------ amd64-linux ------------------------ */
-
-#if defined(PLAT_amd64_linux)
-
-/* ARGREGS: rdi rsi rdx rcx r8 r9 (the rest on stack in R-to-L order) */
-
-/* These regs are trashed by the hidden call. */
-#define __CALLER_SAVED_REGS /*"rax",*/ "rcx", "rdx", "rsi", \
- "rdi", "r8", "r9", "r10", "r11"
-
-/* These CALL_FN_ macros assume that on amd64-linux, sizeof(unsigned
- long) == 8. */
-
-/* NB 9 Sept 07. There is a nasty kludge here in all these CALL_FN_
- macros. In order not to trash the stack redzone, we need to drop
- %rsp by 128 before the hidden call, and restore afterwards. The
- nastyness is that it is only by luck that the stack still appears
- to be unwindable during the hidden call - since then the behaviour
- of any routine using this macro does not match what the CFI data
- says. Sigh.
-
- Why is this important? Imagine that a wrapper has a stack
- allocated local, and passes to the hidden call, a pointer to it.
- Because gcc does not know about the hidden call, it may allocate
- that local in the redzone. Unfortunately the hidden call may then
- trash it before it comes to use it. So we must step clear of the
- redzone, for the duration of the hidden call, to make it safe.
-
- Probably the same problem afflicts the other redzone-style ABIs too
- (ppc64-linux, ppc32-aix5, ppc64-aix5); but for those, the stack is
- self describing (none of this CFI nonsense) so at least messing
- with the stack pointer doesn't give a danger of non-unwindable
- stack. */
-
-#define CALL_FN_W_v(lval, orig) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[1]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- __asm__ volatile( \
- "subq $128,%%rsp\n\t" \
- "movq (%%rax), %%rax\n\t" /* target->%rax */ \
- VALGRIND_CALL_NOREDIR_RAX \
- "addq $128,%%rsp\n\t" \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_W(lval, orig, arg1) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[2]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- __asm__ volatile( \
- "subq $128,%%rsp\n\t" \
- "movq 8(%%rax), %%rdi\n\t" \
- "movq (%%rax), %%rax\n\t" /* target->%rax */ \
- VALGRIND_CALL_NOREDIR_RAX \
- "addq $128,%%rsp\n\t" \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_WW(lval, orig, arg1,arg2) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- __asm__ volatile( \
- "subq $128,%%rsp\n\t" \
- "movq 16(%%rax), %%rsi\n\t" \
- "movq 8(%%rax), %%rdi\n\t" \
- "movq (%%rax), %%rax\n\t" /* target->%rax */ \
- VALGRIND_CALL_NOREDIR_RAX \
- "addq $128,%%rsp\n\t" \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[4]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- __asm__ volatile( \
- "subq $128,%%rsp\n\t" \
- "movq 24(%%rax), %%rdx\n\t" \
- "movq 16(%%rax), %%rsi\n\t" \
- "movq 8(%%rax), %%rdi\n\t" \
- "movq (%%rax), %%rax\n\t" /* target->%rax */ \
- VALGRIND_CALL_NOREDIR_RAX \
- "addq $128,%%rsp\n\t" \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[5]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- __asm__ volatile( \
- "subq $128,%%rsp\n\t" \
- "movq 32(%%rax), %%rcx\n\t" \
- "movq 24(%%rax), %%rdx\n\t" \
- "movq 16(%%rax), %%rsi\n\t" \
- "movq 8(%%rax), %%rdi\n\t" \
- "movq (%%rax), %%rax\n\t" /* target->%rax */ \
- VALGRIND_CALL_NOREDIR_RAX \
- "addq $128,%%rsp\n\t" \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[6]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- __asm__ volatile( \
- "subq $128,%%rsp\n\t" \
- "movq 40(%%rax), %%r8\n\t" \
- "movq 32(%%rax), %%rcx\n\t" \
- "movq 24(%%rax), %%rdx\n\t" \
- "movq 16(%%rax), %%rsi\n\t" \
- "movq 8(%%rax), %%rdi\n\t" \
- "movq (%%rax), %%rax\n\t" /* target->%rax */ \
- VALGRIND_CALL_NOREDIR_RAX \
- "addq $128,%%rsp\n\t" \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[7]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- __asm__ volatile( \
- "subq $128,%%rsp\n\t" \
- "movq 48(%%rax), %%r9\n\t" \
- "movq 40(%%rax), %%r8\n\t" \
- "movq 32(%%rax), %%rcx\n\t" \
- "movq 24(%%rax), %%rdx\n\t" \
- "movq 16(%%rax), %%rsi\n\t" \
- "movq 8(%%rax), %%rdi\n\t" \
- "movq (%%rax), %%rax\n\t" /* target->%rax */ \
- "addq $128,%%rsp\n\t" \
- VALGRIND_CALL_NOREDIR_RAX \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[8]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- __asm__ volatile( \
- "subq $128,%%rsp\n\t" \
- "pushq 56(%%rax)\n\t" \
- "movq 48(%%rax), %%r9\n\t" \
- "movq 40(%%rax), %%r8\n\t" \
- "movq 32(%%rax), %%rcx\n\t" \
- "movq 24(%%rax), %%rdx\n\t" \
- "movq 16(%%rax), %%rsi\n\t" \
- "movq 8(%%rax), %%rdi\n\t" \
- "movq (%%rax), %%rax\n\t" /* target->%rax */ \
- VALGRIND_CALL_NOREDIR_RAX \
- "addq $8, %%rsp\n" \
- "addq $128,%%rsp\n\t" \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[9]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- _argvec[8] = (unsigned long)(arg8); \
- __asm__ volatile( \
- "subq $128,%%rsp\n\t" \
- "pushq 64(%%rax)\n\t" \
- "pushq 56(%%rax)\n\t" \
- "movq 48(%%rax), %%r9\n\t" \
- "movq 40(%%rax), %%r8\n\t" \
- "movq 32(%%rax), %%rcx\n\t" \
- "movq 24(%%rax), %%rdx\n\t" \
- "movq 16(%%rax), %%rsi\n\t" \
- "movq 8(%%rax), %%rdi\n\t" \
- "movq (%%rax), %%rax\n\t" /* target->%rax */ \
- VALGRIND_CALL_NOREDIR_RAX \
- "addq $16, %%rsp\n" \
- "addq $128,%%rsp\n\t" \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[10]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- _argvec[8] = (unsigned long)(arg8); \
- _argvec[9] = (unsigned long)(arg9); \
- __asm__ volatile( \
- "subq $128,%%rsp\n\t" \
- "pushq 72(%%rax)\n\t" \
- "pushq 64(%%rax)\n\t" \
- "pushq 56(%%rax)\n\t" \
- "movq 48(%%rax), %%r9\n\t" \
- "movq 40(%%rax), %%r8\n\t" \
- "movq 32(%%rax), %%rcx\n\t" \
- "movq 24(%%rax), %%rdx\n\t" \
- "movq 16(%%rax), %%rsi\n\t" \
- "movq 8(%%rax), %%rdi\n\t" \
- "movq (%%rax), %%rax\n\t" /* target->%rax */ \
- VALGRIND_CALL_NOREDIR_RAX \
- "addq $24, %%rsp\n" \
- "addq $128,%%rsp\n\t" \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9,arg10) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[11]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- _argvec[8] = (unsigned long)(arg8); \
- _argvec[9] = (unsigned long)(arg9); \
- _argvec[10] = (unsigned long)(arg10); \
- __asm__ volatile( \
- "subq $128,%%rsp\n\t" \
- "pushq 80(%%rax)\n\t" \
- "pushq 72(%%rax)\n\t" \
- "pushq 64(%%rax)\n\t" \
- "pushq 56(%%rax)\n\t" \
- "movq 48(%%rax), %%r9\n\t" \
- "movq 40(%%rax), %%r8\n\t" \
- "movq 32(%%rax), %%rcx\n\t" \
- "movq 24(%%rax), %%rdx\n\t" \
- "movq 16(%%rax), %%rsi\n\t" \
- "movq 8(%%rax), %%rdi\n\t" \
- "movq (%%rax), %%rax\n\t" /* target->%rax */ \
- VALGRIND_CALL_NOREDIR_RAX \
- "addq $32, %%rsp\n" \
- "addq $128,%%rsp\n\t" \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9,arg10,arg11) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[12]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- _argvec[8] = (unsigned long)(arg8); \
- _argvec[9] = (unsigned long)(arg9); \
- _argvec[10] = (unsigned long)(arg10); \
- _argvec[11] = (unsigned long)(arg11); \
- __asm__ volatile( \
- "subq $128,%%rsp\n\t" \
- "pushq 88(%%rax)\n\t" \
- "pushq 80(%%rax)\n\t" \
- "pushq 72(%%rax)\n\t" \
- "pushq 64(%%rax)\n\t" \
- "pushq 56(%%rax)\n\t" \
- "movq 48(%%rax), %%r9\n\t" \
- "movq 40(%%rax), %%r8\n\t" \
- "movq 32(%%rax), %%rcx\n\t" \
- "movq 24(%%rax), %%rdx\n\t" \
- "movq 16(%%rax), %%rsi\n\t" \
- "movq 8(%%rax), %%rdi\n\t" \
- "movq (%%rax), %%rax\n\t" /* target->%rax */ \
- VALGRIND_CALL_NOREDIR_RAX \
- "addq $40, %%rsp\n" \
- "addq $128,%%rsp\n\t" \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9,arg10,arg11,arg12) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[13]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- _argvec[8] = (unsigned long)(arg8); \
- _argvec[9] = (unsigned long)(arg9); \
- _argvec[10] = (unsigned long)(arg10); \
- _argvec[11] = (unsigned long)(arg11); \
- _argvec[12] = (unsigned long)(arg12); \
- __asm__ volatile( \
- "subq $128,%%rsp\n\t" \
- "pushq 96(%%rax)\n\t" \
- "pushq 88(%%rax)\n\t" \
- "pushq 80(%%rax)\n\t" \
- "pushq 72(%%rax)\n\t" \
- "pushq 64(%%rax)\n\t" \
- "pushq 56(%%rax)\n\t" \
- "movq 48(%%rax), %%r9\n\t" \
- "movq 40(%%rax), %%r8\n\t" \
- "movq 32(%%rax), %%rcx\n\t" \
- "movq 24(%%rax), %%rdx\n\t" \
- "movq 16(%%rax), %%rsi\n\t" \
- "movq 8(%%rax), %%rdi\n\t" \
- "movq (%%rax), %%rax\n\t" /* target->%rax */ \
- VALGRIND_CALL_NOREDIR_RAX \
- "addq $48, %%rsp\n" \
- "addq $128,%%rsp\n\t" \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#endif /* PLAT_amd64_linux */
-
-/* ------------------------ ppc32-linux ------------------------ */
-
-#if defined(PLAT_ppc32_linux)
-
-/* This is useful for finding out about the on-stack stuff:
-
- extern int f9 ( int,int,int,int,int,int,int,int,int );
- extern int f10 ( int,int,int,int,int,int,int,int,int,int );
- extern int f11 ( int,int,int,int,int,int,int,int,int,int,int );
- extern int f12 ( int,int,int,int,int,int,int,int,int,int,int,int );
-
- int g9 ( void ) {
- return f9(11,22,33,44,55,66,77,88,99);
- }
- int g10 ( void ) {
- return f10(11,22,33,44,55,66,77,88,99,110);
- }
- int g11 ( void ) {
- return f11(11,22,33,44,55,66,77,88,99,110,121);
- }
- int g12 ( void ) {
- return f12(11,22,33,44,55,66,77,88,99,110,121,132);
- }
-*/
-
-/* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */
-
-/* These regs are trashed by the hidden call. */
-#define __CALLER_SAVED_REGS \
- "lr", "ctr", "xer", \
- "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7", \
- "r0", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", \
- "r11", "r12", "r13"
-
-/* These CALL_FN_ macros assume that on ppc32-linux,
- sizeof(unsigned long) == 4. */
-
-#define CALL_FN_W_v(lval, orig) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[1]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- "lwz 11,0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr %0,3" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_W(lval, orig, arg1) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[2]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)arg1; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- "lwz 3,4(11)\n\t" /* arg1->r3 */ \
- "lwz 11,0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr %0,3" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_WW(lval, orig, arg1,arg2) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)arg1; \
- _argvec[2] = (unsigned long)arg2; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- "lwz 3,4(11)\n\t" /* arg1->r3 */ \
- "lwz 4,8(11)\n\t" \
- "lwz 11,0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr %0,3" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[4]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)arg1; \
- _argvec[2] = (unsigned long)arg2; \
- _argvec[3] = (unsigned long)arg3; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- "lwz 3,4(11)\n\t" /* arg1->r3 */ \
- "lwz 4,8(11)\n\t" \
- "lwz 5,12(11)\n\t" \
- "lwz 11,0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr %0,3" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[5]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)arg1; \
- _argvec[2] = (unsigned long)arg2; \
- _argvec[3] = (unsigned long)arg3; \
- _argvec[4] = (unsigned long)arg4; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- "lwz 3,4(11)\n\t" /* arg1->r3 */ \
- "lwz 4,8(11)\n\t" \
- "lwz 5,12(11)\n\t" \
- "lwz 6,16(11)\n\t" /* arg4->r6 */ \
- "lwz 11,0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr %0,3" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[6]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)arg1; \
- _argvec[2] = (unsigned long)arg2; \
- _argvec[3] = (unsigned long)arg3; \
- _argvec[4] = (unsigned long)arg4; \
- _argvec[5] = (unsigned long)arg5; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- "lwz 3,4(11)\n\t" /* arg1->r3 */ \
- "lwz 4,8(11)\n\t" \
- "lwz 5,12(11)\n\t" \
- "lwz 6,16(11)\n\t" /* arg4->r6 */ \
- "lwz 7,20(11)\n\t" \
- "lwz 11,0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr %0,3" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[7]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)arg1; \
- _argvec[2] = (unsigned long)arg2; \
- _argvec[3] = (unsigned long)arg3; \
- _argvec[4] = (unsigned long)arg4; \
- _argvec[5] = (unsigned long)arg5; \
- _argvec[6] = (unsigned long)arg6; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- "lwz 3,4(11)\n\t" /* arg1->r3 */ \
- "lwz 4,8(11)\n\t" \
- "lwz 5,12(11)\n\t" \
- "lwz 6,16(11)\n\t" /* arg4->r6 */ \
- "lwz 7,20(11)\n\t" \
- "lwz 8,24(11)\n\t" \
- "lwz 11,0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr %0,3" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[8]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)arg1; \
- _argvec[2] = (unsigned long)arg2; \
- _argvec[3] = (unsigned long)arg3; \
- _argvec[4] = (unsigned long)arg4; \
- _argvec[5] = (unsigned long)arg5; \
- _argvec[6] = (unsigned long)arg6; \
- _argvec[7] = (unsigned long)arg7; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- "lwz 3,4(11)\n\t" /* arg1->r3 */ \
- "lwz 4,8(11)\n\t" \
- "lwz 5,12(11)\n\t" \
- "lwz 6,16(11)\n\t" /* arg4->r6 */ \
- "lwz 7,20(11)\n\t" \
- "lwz 8,24(11)\n\t" \
- "lwz 9,28(11)\n\t" \
- "lwz 11,0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr %0,3" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[9]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)arg1; \
- _argvec[2] = (unsigned long)arg2; \
- _argvec[3] = (unsigned long)arg3; \
- _argvec[4] = (unsigned long)arg4; \
- _argvec[5] = (unsigned long)arg5; \
- _argvec[6] = (unsigned long)arg6; \
- _argvec[7] = (unsigned long)arg7; \
- _argvec[8] = (unsigned long)arg8; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- "lwz 3,4(11)\n\t" /* arg1->r3 */ \
- "lwz 4,8(11)\n\t" \
- "lwz 5,12(11)\n\t" \
- "lwz 6,16(11)\n\t" /* arg4->r6 */ \
- "lwz 7,20(11)\n\t" \
- "lwz 8,24(11)\n\t" \
- "lwz 9,28(11)\n\t" \
- "lwz 10,32(11)\n\t" /* arg8->r10 */ \
- "lwz 11,0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr %0,3" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[10]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)arg1; \
- _argvec[2] = (unsigned long)arg2; \
- _argvec[3] = (unsigned long)arg3; \
- _argvec[4] = (unsigned long)arg4; \
- _argvec[5] = (unsigned long)arg5; \
- _argvec[6] = (unsigned long)arg6; \
- _argvec[7] = (unsigned long)arg7; \
- _argvec[8] = (unsigned long)arg8; \
- _argvec[9] = (unsigned long)arg9; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- "addi 1,1,-16\n\t" \
- /* arg9 */ \
- "lwz 3,36(11)\n\t" \
- "stw 3,8(1)\n\t" \
- /* args1-8 */ \
- "lwz 3,4(11)\n\t" /* arg1->r3 */ \
- "lwz 4,8(11)\n\t" \
- "lwz 5,12(11)\n\t" \
- "lwz 6,16(11)\n\t" /* arg4->r6 */ \
- "lwz 7,20(11)\n\t" \
- "lwz 8,24(11)\n\t" \
- "lwz 9,28(11)\n\t" \
- "lwz 10,32(11)\n\t" /* arg8->r10 */ \
- "lwz 11,0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "addi 1,1,16\n\t" \
- "mr %0,3" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9,arg10) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[11]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)arg1; \
- _argvec[2] = (unsigned long)arg2; \
- _argvec[3] = (unsigned long)arg3; \
- _argvec[4] = (unsigned long)arg4; \
- _argvec[5] = (unsigned long)arg5; \
- _argvec[6] = (unsigned long)arg6; \
- _argvec[7] = (unsigned long)arg7; \
- _argvec[8] = (unsigned long)arg8; \
- _argvec[9] = (unsigned long)arg9; \
- _argvec[10] = (unsigned long)arg10; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- "addi 1,1,-16\n\t" \
- /* arg10 */ \
- "lwz 3,40(11)\n\t" \
- "stw 3,12(1)\n\t" \
- /* arg9 */ \
- "lwz 3,36(11)\n\t" \
- "stw 3,8(1)\n\t" \
- /* args1-8 */ \
- "lwz 3,4(11)\n\t" /* arg1->r3 */ \
- "lwz 4,8(11)\n\t" \
- "lwz 5,12(11)\n\t" \
- "lwz 6,16(11)\n\t" /* arg4->r6 */ \
- "lwz 7,20(11)\n\t" \
- "lwz 8,24(11)\n\t" \
- "lwz 9,28(11)\n\t" \
- "lwz 10,32(11)\n\t" /* arg8->r10 */ \
- "lwz 11,0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "addi 1,1,16\n\t" \
- "mr %0,3" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9,arg10,arg11) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[12]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)arg1; \
- _argvec[2] = (unsigned long)arg2; \
- _argvec[3] = (unsigned long)arg3; \
- _argvec[4] = (unsigned long)arg4; \
- _argvec[5] = (unsigned long)arg5; \
- _argvec[6] = (unsigned long)arg6; \
- _argvec[7] = (unsigned long)arg7; \
- _argvec[8] = (unsigned long)arg8; \
- _argvec[9] = (unsigned long)arg9; \
- _argvec[10] = (unsigned long)arg10; \
- _argvec[11] = (unsigned long)arg11; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- "addi 1,1,-32\n\t" \
- /* arg11 */ \
- "lwz 3,44(11)\n\t" \
- "stw 3,16(1)\n\t" \
- /* arg10 */ \
- "lwz 3,40(11)\n\t" \
- "stw 3,12(1)\n\t" \
- /* arg9 */ \
- "lwz 3,36(11)\n\t" \
- "stw 3,8(1)\n\t" \
- /* args1-8 */ \
- "lwz 3,4(11)\n\t" /* arg1->r3 */ \
- "lwz 4,8(11)\n\t" \
- "lwz 5,12(11)\n\t" \
- "lwz 6,16(11)\n\t" /* arg4->r6 */ \
- "lwz 7,20(11)\n\t" \
- "lwz 8,24(11)\n\t" \
- "lwz 9,28(11)\n\t" \
- "lwz 10,32(11)\n\t" /* arg8->r10 */ \
- "lwz 11,0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "addi 1,1,32\n\t" \
- "mr %0,3" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9,arg10,arg11,arg12) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[13]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)arg1; \
- _argvec[2] = (unsigned long)arg2; \
- _argvec[3] = (unsigned long)arg3; \
- _argvec[4] = (unsigned long)arg4; \
- _argvec[5] = (unsigned long)arg5; \
- _argvec[6] = (unsigned long)arg6; \
- _argvec[7] = (unsigned long)arg7; \
- _argvec[8] = (unsigned long)arg8; \
- _argvec[9] = (unsigned long)arg9; \
- _argvec[10] = (unsigned long)arg10; \
- _argvec[11] = (unsigned long)arg11; \
- _argvec[12] = (unsigned long)arg12; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- "addi 1,1,-32\n\t" \
- /* arg12 */ \
- "lwz 3,48(11)\n\t" \
- "stw 3,20(1)\n\t" \
- /* arg11 */ \
- "lwz 3,44(11)\n\t" \
- "stw 3,16(1)\n\t" \
- /* arg10 */ \
- "lwz 3,40(11)\n\t" \
- "stw 3,12(1)\n\t" \
- /* arg9 */ \
- "lwz 3,36(11)\n\t" \
- "stw 3,8(1)\n\t" \
- /* args1-8 */ \
- "lwz 3,4(11)\n\t" /* arg1->r3 */ \
- "lwz 4,8(11)\n\t" \
- "lwz 5,12(11)\n\t" \
- "lwz 6,16(11)\n\t" /* arg4->r6 */ \
- "lwz 7,20(11)\n\t" \
- "lwz 8,24(11)\n\t" \
- "lwz 9,28(11)\n\t" \
- "lwz 10,32(11)\n\t" /* arg8->r10 */ \
- "lwz 11,0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "addi 1,1,32\n\t" \
- "mr %0,3" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#endif /* PLAT_ppc32_linux */
-
-/* ------------------------ ppc64-linux ------------------------ */
-
-#if defined(PLAT_ppc64_linux)
-
-/* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */
-
-/* These regs are trashed by the hidden call. */
-#define __CALLER_SAVED_REGS \
- "lr", "ctr", "xer", \
- "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7", \
- "r0", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", \
- "r11", "r12", "r13"
-
-/* These CALL_FN_ macros assume that on ppc64-linux, sizeof(unsigned
- long) == 8. */
-
-#define CALL_FN_W_v(lval, orig) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+0]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)" /* restore tocptr */ \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_W(lval, orig, arg1) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+1]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)" /* restore tocptr */ \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_WW(lval, orig, arg1,arg2) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+2]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 4, 16(11)\n\t" /* arg2->r4 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)" /* restore tocptr */ \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+3]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 4, 16(11)\n\t" /* arg2->r4 */ \
- "ld 5, 24(11)\n\t" /* arg3->r5 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)" /* restore tocptr */ \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+4]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 4, 16(11)\n\t" /* arg2->r4 */ \
- "ld 5, 24(11)\n\t" /* arg3->r5 */ \
- "ld 6, 32(11)\n\t" /* arg4->r6 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)" /* restore tocptr */ \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+5]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 4, 16(11)\n\t" /* arg2->r4 */ \
- "ld 5, 24(11)\n\t" /* arg3->r5 */ \
- "ld 6, 32(11)\n\t" /* arg4->r6 */ \
- "ld 7, 40(11)\n\t" /* arg5->r7 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)" /* restore tocptr */ \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+6]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
- _argvec[2+6] = (unsigned long)arg6; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 4, 16(11)\n\t" /* arg2->r4 */ \
- "ld 5, 24(11)\n\t" /* arg3->r5 */ \
- "ld 6, 32(11)\n\t" /* arg4->r6 */ \
- "ld 7, 40(11)\n\t" /* arg5->r7 */ \
- "ld 8, 48(11)\n\t" /* arg6->r8 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)" /* restore tocptr */ \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+7]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
- _argvec[2+6] = (unsigned long)arg6; \
- _argvec[2+7] = (unsigned long)arg7; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 4, 16(11)\n\t" /* arg2->r4 */ \
- "ld 5, 24(11)\n\t" /* arg3->r5 */ \
- "ld 6, 32(11)\n\t" /* arg4->r6 */ \
- "ld 7, 40(11)\n\t" /* arg5->r7 */ \
- "ld 8, 48(11)\n\t" /* arg6->r8 */ \
- "ld 9, 56(11)\n\t" /* arg7->r9 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)" /* restore tocptr */ \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+8]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
- _argvec[2+6] = (unsigned long)arg6; \
- _argvec[2+7] = (unsigned long)arg7; \
- _argvec[2+8] = (unsigned long)arg8; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 4, 16(11)\n\t" /* arg2->r4 */ \
- "ld 5, 24(11)\n\t" /* arg3->r5 */ \
- "ld 6, 32(11)\n\t" /* arg4->r6 */ \
- "ld 7, 40(11)\n\t" /* arg5->r7 */ \
- "ld 8, 48(11)\n\t" /* arg6->r8 */ \
- "ld 9, 56(11)\n\t" /* arg7->r9 */ \
- "ld 10, 64(11)\n\t" /* arg8->r10 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)" /* restore tocptr */ \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+9]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
- _argvec[2+6] = (unsigned long)arg6; \
- _argvec[2+7] = (unsigned long)arg7; \
- _argvec[2+8] = (unsigned long)arg8; \
- _argvec[2+9] = (unsigned long)arg9; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- "addi 1,1,-128\n\t" /* expand stack frame */ \
- /* arg9 */ \
- "ld 3,72(11)\n\t" \
- "std 3,112(1)\n\t" \
- /* args1-8 */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 4, 16(11)\n\t" /* arg2->r4 */ \
- "ld 5, 24(11)\n\t" /* arg3->r5 */ \
- "ld 6, 32(11)\n\t" /* arg4->r6 */ \
- "ld 7, 40(11)\n\t" /* arg5->r7 */ \
- "ld 8, 48(11)\n\t" /* arg6->r8 */ \
- "ld 9, 56(11)\n\t" /* arg7->r9 */ \
- "ld 10, 64(11)\n\t" /* arg8->r10 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)\n\t" /* restore tocptr */ \
- "addi 1,1,128" /* restore frame */ \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9,arg10) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+10]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
- _argvec[2+6] = (unsigned long)arg6; \
- _argvec[2+7] = (unsigned long)arg7; \
- _argvec[2+8] = (unsigned long)arg8; \
- _argvec[2+9] = (unsigned long)arg9; \
- _argvec[2+10] = (unsigned long)arg10; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- "addi 1,1,-128\n\t" /* expand stack frame */ \
- /* arg10 */ \
- "ld 3,80(11)\n\t" \
- "std 3,120(1)\n\t" \
- /* arg9 */ \
- "ld 3,72(11)\n\t" \
- "std 3,112(1)\n\t" \
- /* args1-8 */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 4, 16(11)\n\t" /* arg2->r4 */ \
- "ld 5, 24(11)\n\t" /* arg3->r5 */ \
- "ld 6, 32(11)\n\t" /* arg4->r6 */ \
- "ld 7, 40(11)\n\t" /* arg5->r7 */ \
- "ld 8, 48(11)\n\t" /* arg6->r8 */ \
- "ld 9, 56(11)\n\t" /* arg7->r9 */ \
- "ld 10, 64(11)\n\t" /* arg8->r10 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)\n\t" /* restore tocptr */ \
- "addi 1,1,128" /* restore frame */ \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9,arg10,arg11) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+11]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
- _argvec[2+6] = (unsigned long)arg6; \
- _argvec[2+7] = (unsigned long)arg7; \
- _argvec[2+8] = (unsigned long)arg8; \
- _argvec[2+9] = (unsigned long)arg9; \
- _argvec[2+10] = (unsigned long)arg10; \
- _argvec[2+11] = (unsigned long)arg11; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- "addi 1,1,-144\n\t" /* expand stack frame */ \
- /* arg11 */ \
- "ld 3,88(11)\n\t" \
- "std 3,128(1)\n\t" \
- /* arg10 */ \
- "ld 3,80(11)\n\t" \
- "std 3,120(1)\n\t" \
- /* arg9 */ \
- "ld 3,72(11)\n\t" \
- "std 3,112(1)\n\t" \
- /* args1-8 */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 4, 16(11)\n\t" /* arg2->r4 */ \
- "ld 5, 24(11)\n\t" /* arg3->r5 */ \
- "ld 6, 32(11)\n\t" /* arg4->r6 */ \
- "ld 7, 40(11)\n\t" /* arg5->r7 */ \
- "ld 8, 48(11)\n\t" /* arg6->r8 */ \
- "ld 9, 56(11)\n\t" /* arg7->r9 */ \
- "ld 10, 64(11)\n\t" /* arg8->r10 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)\n\t" /* restore tocptr */ \
- "addi 1,1,144" /* restore frame */ \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9,arg10,arg11,arg12) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+12]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
- _argvec[2+6] = (unsigned long)arg6; \
- _argvec[2+7] = (unsigned long)arg7; \
- _argvec[2+8] = (unsigned long)arg8; \
- _argvec[2+9] = (unsigned long)arg9; \
- _argvec[2+10] = (unsigned long)arg10; \
- _argvec[2+11] = (unsigned long)arg11; \
- _argvec[2+12] = (unsigned long)arg12; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- "addi 1,1,-144\n\t" /* expand stack frame */ \
- /* arg12 */ \
- "ld 3,96(11)\n\t" \
- "std 3,136(1)\n\t" \
- /* arg11 */ \
- "ld 3,88(11)\n\t" \
- "std 3,128(1)\n\t" \
- /* arg10 */ \
- "ld 3,80(11)\n\t" \
- "std 3,120(1)\n\t" \
- /* arg9 */ \
- "ld 3,72(11)\n\t" \
- "std 3,112(1)\n\t" \
- /* args1-8 */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 4, 16(11)\n\t" /* arg2->r4 */ \
- "ld 5, 24(11)\n\t" /* arg3->r5 */ \
- "ld 6, 32(11)\n\t" /* arg4->r6 */ \
- "ld 7, 40(11)\n\t" /* arg5->r7 */ \
- "ld 8, 48(11)\n\t" /* arg6->r8 */ \
- "ld 9, 56(11)\n\t" /* arg7->r9 */ \
- "ld 10, 64(11)\n\t" /* arg8->r10 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)\n\t" /* restore tocptr */ \
- "addi 1,1,144" /* restore frame */ \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#endif /* PLAT_ppc64_linux */
-
-/* ------------------------ ppc32-aix5 ------------------------- */
-
-#if defined(PLAT_ppc32_aix5)
-
-/* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */
-
-/* These regs are trashed by the hidden call. */
-#define __CALLER_SAVED_REGS \
- "lr", "ctr", "xer", \
- "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7", \
- "r0", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", \
- "r11", "r12", "r13"
-
-/* Expand the stack frame, copying enough info that unwinding
- still works. Trashes r3. */
-
-#define VG_EXPAND_FRAME_BY_trashes_r3(_n_fr) \
- "addi 1,1,-" #_n_fr "\n\t" \
- "lwz 3," #_n_fr "(1)\n\t" \
- "stw 3,0(1)\n\t"
-
-#define VG_CONTRACT_FRAME_BY(_n_fr) \
- "addi 1,1," #_n_fr "\n\t"
-
-/* These CALL_FN_ macros assume that on ppc32-aix5, sizeof(unsigned
- long) == 4. */
-
-#define CALL_FN_W_v(lval, orig) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+0]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "stw 2,-8(11)\n\t" /* save tocptr */ \
- "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
- "lwz 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "lwz 2,-8(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(512) \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_W(lval, orig, arg1) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+1]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "stw 2,-8(11)\n\t" /* save tocptr */ \
- "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
- "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
- "lwz 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "lwz 2,-8(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(512) \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_WW(lval, orig, arg1,arg2) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+2]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "stw 2,-8(11)\n\t" /* save tocptr */ \
- "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
- "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
- "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
- "lwz 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "lwz 2,-8(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(512) \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+3]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "stw 2,-8(11)\n\t" /* save tocptr */ \
- "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
- "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
- "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
- "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
- "lwz 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "lwz 2,-8(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(512) \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+4]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "stw 2,-8(11)\n\t" /* save tocptr */ \
- "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
- "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
- "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
- "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
- "lwz 6, 16(11)\n\t" /* arg4->r6 */ \
- "lwz 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "lwz 2,-8(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(512) \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+5]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "stw 2,-8(11)\n\t" /* save tocptr */ \
- "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
- "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
- "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
- "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
- "lwz 6, 16(11)\n\t" /* arg4->r6 */ \
- "lwz 7, 20(11)\n\t" /* arg5->r7 */ \
- "lwz 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "lwz 2,-8(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(512) \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+6]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
- _argvec[2+6] = (unsigned long)arg6; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "stw 2,-8(11)\n\t" /* save tocptr */ \
- "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
- "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
- "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
- "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
- "lwz 6, 16(11)\n\t" /* arg4->r6 */ \
- "lwz 7, 20(11)\n\t" /* arg5->r7 */ \
- "lwz 8, 24(11)\n\t" /* arg6->r8 */ \
- "lwz 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "lwz 2,-8(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(512) \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+7]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
- _argvec[2+6] = (unsigned long)arg6; \
- _argvec[2+7] = (unsigned long)arg7; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "stw 2,-8(11)\n\t" /* save tocptr */ \
- "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
- "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
- "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
- "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
- "lwz 6, 16(11)\n\t" /* arg4->r6 */ \
- "lwz 7, 20(11)\n\t" /* arg5->r7 */ \
- "lwz 8, 24(11)\n\t" /* arg6->r8 */ \
- "lwz 9, 28(11)\n\t" /* arg7->r9 */ \
- "lwz 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "lwz 2,-8(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(512) \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+8]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
- _argvec[2+6] = (unsigned long)arg6; \
- _argvec[2+7] = (unsigned long)arg7; \
- _argvec[2+8] = (unsigned long)arg8; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "stw 2,-8(11)\n\t" /* save tocptr */ \
- "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
- "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
- "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
- "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
- "lwz 6, 16(11)\n\t" /* arg4->r6 */ \
- "lwz 7, 20(11)\n\t" /* arg5->r7 */ \
- "lwz 8, 24(11)\n\t" /* arg6->r8 */ \
- "lwz 9, 28(11)\n\t" /* arg7->r9 */ \
- "lwz 10, 32(11)\n\t" /* arg8->r10 */ \
- "lwz 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "lwz 2,-8(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(512) \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+9]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
- _argvec[2+6] = (unsigned long)arg6; \
- _argvec[2+7] = (unsigned long)arg7; \
- _argvec[2+8] = (unsigned long)arg8; \
- _argvec[2+9] = (unsigned long)arg9; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "stw 2,-8(11)\n\t" /* save tocptr */ \
- "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
- VG_EXPAND_FRAME_BY_trashes_r3(64) \
- /* arg9 */ \
- "lwz 3,36(11)\n\t" \
- "stw 3,56(1)\n\t" \
- /* args1-8 */ \
- "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
- "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
- "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
- "lwz 6, 16(11)\n\t" /* arg4->r6 */ \
- "lwz 7, 20(11)\n\t" /* arg5->r7 */ \
- "lwz 8, 24(11)\n\t" /* arg6->r8 */ \
- "lwz 9, 28(11)\n\t" /* arg7->r9 */ \
- "lwz 10, 32(11)\n\t" /* arg8->r10 */ \
- "lwz 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "lwz 2,-8(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(64) \
- VG_CONTRACT_FRAME_BY(512) \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9,arg10) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+10]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
- _argvec[2+6] = (unsigned long)arg6; \
- _argvec[2+7] = (unsigned long)arg7; \
- _argvec[2+8] = (unsigned long)arg8; \
- _argvec[2+9] = (unsigned long)arg9; \
- _argvec[2+10] = (unsigned long)arg10; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "stw 2,-8(11)\n\t" /* save tocptr */ \
- "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
- VG_EXPAND_FRAME_BY_trashes_r3(64) \
- /* arg10 */ \
- "lwz 3,40(11)\n\t" \
- "stw 3,60(1)\n\t" \
- /* arg9 */ \
- "lwz 3,36(11)\n\t" \
- "stw 3,56(1)\n\t" \
- /* args1-8 */ \
- "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
- "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
- "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
- "lwz 6, 16(11)\n\t" /* arg4->r6 */ \
- "lwz 7, 20(11)\n\t" /* arg5->r7 */ \
- "lwz 8, 24(11)\n\t" /* arg6->r8 */ \
- "lwz 9, 28(11)\n\t" /* arg7->r9 */ \
- "lwz 10, 32(11)\n\t" /* arg8->r10 */ \
- "lwz 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "lwz 2,-8(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(64) \
- VG_CONTRACT_FRAME_BY(512) \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9,arg10,arg11) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+11]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
- _argvec[2+6] = (unsigned long)arg6; \
- _argvec[2+7] = (unsigned long)arg7; \
- _argvec[2+8] = (unsigned long)arg8; \
- _argvec[2+9] = (unsigned long)arg9; \
- _argvec[2+10] = (unsigned long)arg10; \
- _argvec[2+11] = (unsigned long)arg11; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "stw 2,-8(11)\n\t" /* save tocptr */ \
- "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
- VG_EXPAND_FRAME_BY_trashes_r3(72) \
- /* arg11 */ \
- "lwz 3,44(11)\n\t" \
- "stw 3,64(1)\n\t" \
- /* arg10 */ \
- "lwz 3,40(11)\n\t" \
- "stw 3,60(1)\n\t" \
- /* arg9 */ \
- "lwz 3,36(11)\n\t" \
- "stw 3,56(1)\n\t" \
- /* args1-8 */ \
- "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
- "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
- "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
- "lwz 6, 16(11)\n\t" /* arg4->r6 */ \
- "lwz 7, 20(11)\n\t" /* arg5->r7 */ \
- "lwz 8, 24(11)\n\t" /* arg6->r8 */ \
- "lwz 9, 28(11)\n\t" /* arg7->r9 */ \
- "lwz 10, 32(11)\n\t" /* arg8->r10 */ \
- "lwz 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "lwz 2,-8(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(72) \
- VG_CONTRACT_FRAME_BY(512) \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9,arg10,arg11,arg12) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+12]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
- _argvec[2+6] = (unsigned long)arg6; \
- _argvec[2+7] = (unsigned long)arg7; \
- _argvec[2+8] = (unsigned long)arg8; \
- _argvec[2+9] = (unsigned long)arg9; \
- _argvec[2+10] = (unsigned long)arg10; \
- _argvec[2+11] = (unsigned long)arg11; \
- _argvec[2+12] = (unsigned long)arg12; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "stw 2,-8(11)\n\t" /* save tocptr */ \
- "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
- VG_EXPAND_FRAME_BY_trashes_r3(72) \
- /* arg12 */ \
- "lwz 3,48(11)\n\t" \
- "stw 3,68(1)\n\t" \
- /* arg11 */ \
- "lwz 3,44(11)\n\t" \
- "stw 3,64(1)\n\t" \
- /* arg10 */ \
- "lwz 3,40(11)\n\t" \
- "stw 3,60(1)\n\t" \
- /* arg9 */ \
- "lwz 3,36(11)\n\t" \
- "stw 3,56(1)\n\t" \
- /* args1-8 */ \
- "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
- "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
- "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
- "lwz 6, 16(11)\n\t" /* arg4->r6 */ \
- "lwz 7, 20(11)\n\t" /* arg5->r7 */ \
- "lwz 8, 24(11)\n\t" /* arg6->r8 */ \
- "lwz 9, 28(11)\n\t" /* arg7->r9 */ \
- "lwz 10, 32(11)\n\t" /* arg8->r10 */ \
- "lwz 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "lwz 2,-8(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(72) \
- VG_CONTRACT_FRAME_BY(512) \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#endif /* PLAT_ppc32_aix5 */
-
-/* ------------------------ ppc64-aix5 ------------------------- */
-
-#if defined(PLAT_ppc64_aix5)
-
-/* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */
-
-/* These regs are trashed by the hidden call. */
-#define __CALLER_SAVED_REGS \
- "lr", "ctr", "xer", \
- "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7", \
- "r0", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", \
- "r11", "r12", "r13"
-
-/* Expand the stack frame, copying enough info that unwinding
- still works. Trashes r3. */
-
-#define VG_EXPAND_FRAME_BY_trashes_r3(_n_fr) \
- "addi 1,1,-" #_n_fr "\n\t" \
- "ld 3," #_n_fr "(1)\n\t" \
- "std 3,0(1)\n\t"
-
-#define VG_CONTRACT_FRAME_BY(_n_fr) \
- "addi 1,1," #_n_fr "\n\t"
-
-/* These CALL_FN_ macros assume that on ppc64-aix5, sizeof(unsigned
- long) == 8. */
-
-#define CALL_FN_W_v(lval, orig) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+0]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(512) \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_W(lval, orig, arg1) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+1]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(512) \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_WW(lval, orig, arg1,arg2) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+2]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 4, 16(11)\n\t" /* arg2->r4 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(512) \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+3]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 4, 16(11)\n\t" /* arg2->r4 */ \
- "ld 5, 24(11)\n\t" /* arg3->r5 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(512) \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+4]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 4, 16(11)\n\t" /* arg2->r4 */ \
- "ld 5, 24(11)\n\t" /* arg3->r5 */ \
- "ld 6, 32(11)\n\t" /* arg4->r6 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(512) \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+5]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 4, 16(11)\n\t" /* arg2->r4 */ \
- "ld 5, 24(11)\n\t" /* arg3->r5 */ \
- "ld 6, 32(11)\n\t" /* arg4->r6 */ \
- "ld 7, 40(11)\n\t" /* arg5->r7 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(512) \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+6]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
- _argvec[2+6] = (unsigned long)arg6; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 4, 16(11)\n\t" /* arg2->r4 */ \
- "ld 5, 24(11)\n\t" /* arg3->r5 */ \
- "ld 6, 32(11)\n\t" /* arg4->r6 */ \
- "ld 7, 40(11)\n\t" /* arg5->r7 */ \
- "ld 8, 48(11)\n\t" /* arg6->r8 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(512) \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+7]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
- _argvec[2+6] = (unsigned long)arg6; \
- _argvec[2+7] = (unsigned long)arg7; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 4, 16(11)\n\t" /* arg2->r4 */ \
- "ld 5, 24(11)\n\t" /* arg3->r5 */ \
- "ld 6, 32(11)\n\t" /* arg4->r6 */ \
- "ld 7, 40(11)\n\t" /* arg5->r7 */ \
- "ld 8, 48(11)\n\t" /* arg6->r8 */ \
- "ld 9, 56(11)\n\t" /* arg7->r9 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(512) \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+8]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
- _argvec[2+6] = (unsigned long)arg6; \
- _argvec[2+7] = (unsigned long)arg7; \
- _argvec[2+8] = (unsigned long)arg8; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 4, 16(11)\n\t" /* arg2->r4 */ \
- "ld 5, 24(11)\n\t" /* arg3->r5 */ \
- "ld 6, 32(11)\n\t" /* arg4->r6 */ \
- "ld 7, 40(11)\n\t" /* arg5->r7 */ \
- "ld 8, 48(11)\n\t" /* arg6->r8 */ \
- "ld 9, 56(11)\n\t" /* arg7->r9 */ \
- "ld 10, 64(11)\n\t" /* arg8->r10 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(512) \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+9]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
- _argvec[2+6] = (unsigned long)arg6; \
- _argvec[2+7] = (unsigned long)arg7; \
- _argvec[2+8] = (unsigned long)arg8; \
- _argvec[2+9] = (unsigned long)arg9; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- VG_EXPAND_FRAME_BY_trashes_r3(128) \
- /* arg9 */ \
- "ld 3,72(11)\n\t" \
- "std 3,112(1)\n\t" \
- /* args1-8 */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 4, 16(11)\n\t" /* arg2->r4 */ \
- "ld 5, 24(11)\n\t" /* arg3->r5 */ \
- "ld 6, 32(11)\n\t" /* arg4->r6 */ \
- "ld 7, 40(11)\n\t" /* arg5->r7 */ \
- "ld 8, 48(11)\n\t" /* arg6->r8 */ \
- "ld 9, 56(11)\n\t" /* arg7->r9 */ \
- "ld 10, 64(11)\n\t" /* arg8->r10 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(128) \
- VG_CONTRACT_FRAME_BY(512) \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9,arg10) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+10]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
- _argvec[2+6] = (unsigned long)arg6; \
- _argvec[2+7] = (unsigned long)arg7; \
- _argvec[2+8] = (unsigned long)arg8; \
- _argvec[2+9] = (unsigned long)arg9; \
- _argvec[2+10] = (unsigned long)arg10; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- VG_EXPAND_FRAME_BY_trashes_r3(128) \
- /* arg10 */ \
- "ld 3,80(11)\n\t" \
- "std 3,120(1)\n\t" \
- /* arg9 */ \
- "ld 3,72(11)\n\t" \
- "std 3,112(1)\n\t" \
- /* args1-8 */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 4, 16(11)\n\t" /* arg2->r4 */ \
- "ld 5, 24(11)\n\t" /* arg3->r5 */ \
- "ld 6, 32(11)\n\t" /* arg4->r6 */ \
- "ld 7, 40(11)\n\t" /* arg5->r7 */ \
- "ld 8, 48(11)\n\t" /* arg6->r8 */ \
- "ld 9, 56(11)\n\t" /* arg7->r9 */ \
- "ld 10, 64(11)\n\t" /* arg8->r10 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(128) \
- VG_CONTRACT_FRAME_BY(512) \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9,arg10,arg11) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+11]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
- _argvec[2+6] = (unsigned long)arg6; \
- _argvec[2+7] = (unsigned long)arg7; \
- _argvec[2+8] = (unsigned long)arg8; \
- _argvec[2+9] = (unsigned long)arg9; \
- _argvec[2+10] = (unsigned long)arg10; \
- _argvec[2+11] = (unsigned long)arg11; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- VG_EXPAND_FRAME_BY_trashes_r3(144) \
- /* arg11 */ \
- "ld 3,88(11)\n\t" \
- "std 3,128(1)\n\t" \
- /* arg10 */ \
- "ld 3,80(11)\n\t" \
- "std 3,120(1)\n\t" \
- /* arg9 */ \
- "ld 3,72(11)\n\t" \
- "std 3,112(1)\n\t" \
- /* args1-8 */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 4, 16(11)\n\t" /* arg2->r4 */ \
- "ld 5, 24(11)\n\t" /* arg3->r5 */ \
- "ld 6, 32(11)\n\t" /* arg4->r6 */ \
- "ld 7, 40(11)\n\t" /* arg5->r7 */ \
- "ld 8, 48(11)\n\t" /* arg6->r8 */ \
- "ld 9, 56(11)\n\t" /* arg7->r9 */ \
- "ld 10, 64(11)\n\t" /* arg8->r10 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(144) \
- VG_CONTRACT_FRAME_BY(512) \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9,arg10,arg11,arg12) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+12]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
- _argvec[2+6] = (unsigned long)arg6; \
- _argvec[2+7] = (unsigned long)arg7; \
- _argvec[2+8] = (unsigned long)arg8; \
- _argvec[2+9] = (unsigned long)arg9; \
- _argvec[2+10] = (unsigned long)arg10; \
- _argvec[2+11] = (unsigned long)arg11; \
- _argvec[2+12] = (unsigned long)arg12; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- VG_EXPAND_FRAME_BY_trashes_r3(144) \
- /* arg12 */ \
- "ld 3,96(11)\n\t" \
- "std 3,136(1)\n\t" \
- /* arg11 */ \
- "ld 3,88(11)\n\t" \
- "std 3,128(1)\n\t" \
- /* arg10 */ \
- "ld 3,80(11)\n\t" \
- "std 3,120(1)\n\t" \
- /* arg9 */ \
- "ld 3,72(11)\n\t" \
- "std 3,112(1)\n\t" \
- /* args1-8 */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 4, 16(11)\n\t" /* arg2->r4 */ \
- "ld 5, 24(11)\n\t" /* arg3->r5 */ \
- "ld 6, 32(11)\n\t" /* arg4->r6 */ \
- "ld 7, 40(11)\n\t" /* arg5->r7 */ \
- "ld 8, 48(11)\n\t" /* arg6->r8 */ \
- "ld 9, 56(11)\n\t" /* arg7->r9 */ \
- "ld 10, 64(11)\n\t" /* arg8->r10 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(144) \
- VG_CONTRACT_FRAME_BY(512) \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#endif /* PLAT_ppc64_aix5 */
-
-
-/* ------------------------------------------------------------------ */
-/* ARCHITECTURE INDEPENDENT MACROS for CLIENT REQUESTS. */
-/* */
-/* ------------------------------------------------------------------ */
-
-/* Some request codes. There are many more of these, but most are not
- exposed to end-user view. These are the public ones, all of the
- form 0x1000 + small_number.
-
- Core ones are in the range 0x00000000--0x0000ffff. The non-public
- ones start at 0x2000.
-*/
-
-/* These macros are used by tools -- they must be public, but don't
- embed them into other programs. */
-#define VG_USERREQ_TOOL_BASE(a,b) \
- ((unsigned int)(((a)&0xff) << 24 | ((b)&0xff) << 16))
-#define VG_IS_TOOL_USERREQ(a, b, v) \
- (VG_USERREQ_TOOL_BASE(a,b) == ((v) & 0xffff0000))
-
-/* !! ABIWARNING !! ABIWARNING !! ABIWARNING !! ABIWARNING !!
- This enum comprises an ABI exported by Valgrind to programs
- which use client requests. DO NOT CHANGE THE ORDER OF THESE
- ENTRIES, NOR DELETE ANY -- add new ones at the end. */
-typedef
- enum { VG_USERREQ__RUNNING_ON_VALGRIND = 0x1001,
- VG_USERREQ__DISCARD_TRANSLATIONS = 0x1002,
-
- /* These allow any function to be called from the simulated
- CPU but run on the real CPU. Nb: the first arg passed to
- the function is always the ThreadId of the running
- thread! So CLIENT_CALL0 actually requires a 1 arg
- function, etc. */
- VG_USERREQ__CLIENT_CALL0 = 0x1101,
- VG_USERREQ__CLIENT_CALL1 = 0x1102,
- VG_USERREQ__CLIENT_CALL2 = 0x1103,
- VG_USERREQ__CLIENT_CALL3 = 0x1104,
-
- /* Can be useful in regression testing suites -- eg. can
- send Valgrind's output to /dev/null and still count
- errors. */
- VG_USERREQ__COUNT_ERRORS = 0x1201,
-
- /* These are useful and can be interpreted by any tool that
- tracks malloc() et al, by using vg_replace_malloc.c. */
- VG_USERREQ__MALLOCLIKE_BLOCK = 0x1301,
- VG_USERREQ__FREELIKE_BLOCK = 0x1302,
- /* Memory pool support. */
- VG_USERREQ__CREATE_MEMPOOL = 0x1303,
- VG_USERREQ__DESTROY_MEMPOOL = 0x1304,
- VG_USERREQ__MEMPOOL_ALLOC = 0x1305,
- VG_USERREQ__MEMPOOL_FREE = 0x1306,
- VG_USERREQ__MEMPOOL_TRIM = 0x1307,
- VG_USERREQ__MOVE_MEMPOOL = 0x1308,
- VG_USERREQ__MEMPOOL_CHANGE = 0x1309,
- VG_USERREQ__MEMPOOL_EXISTS = 0x130a,
-
- /* Allow printfs to valgrind log. */
- VG_USERREQ__PRINTF = 0x1401,
- VG_USERREQ__PRINTF_BACKTRACE = 0x1402,
-
- /* Stack support. */
- VG_USERREQ__STACK_REGISTER = 0x1501,
- VG_USERREQ__STACK_DEREGISTER = 0x1502,
- VG_USERREQ__STACK_CHANGE = 0x1503
- } Vg_ClientRequest;
-
-#if !defined(__GNUC__)
-# define __extension__ /* */
-#endif
-
-/* Returns the number of Valgrinds this code is running under. That
- is, 0 if running natively, 1 if running under Valgrind, 2 if
- running under Valgrind which is running under another Valgrind,
- etc. */
-#define RUNNING_ON_VALGRIND __extension__ \
- ({unsigned int _qzz_res; \
- VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0 /* if not */, \
- VG_USERREQ__RUNNING_ON_VALGRIND, \
- 0, 0, 0, 0, 0); \
- _qzz_res; \
- })
-
-
-/* Discard translation of code in the range [_qzz_addr .. _qzz_addr +
- _qzz_len - 1]. Useful if you are debugging a JITter or some such,
- since it provides a way to make sure valgrind will retranslate the
- invalidated area. Returns no value. */
-#define VALGRIND_DISCARD_TRANSLATIONS(_qzz_addr,_qzz_len) \
- {unsigned int _qzz_res; \
- VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
- VG_USERREQ__DISCARD_TRANSLATIONS, \
- _qzz_addr, _qzz_len, 0, 0, 0); \
- }
-
-
-/* These requests are for getting Valgrind itself to print something.
- Possibly with a backtrace. This is a really ugly hack. */
-
-#if defined(NVALGRIND)
-
-# define VALGRIND_PRINTF(...)
-# define VALGRIND_PRINTF_BACKTRACE(...)
-
-#else /* NVALGRIND */
-
-/* Modern GCC will optimize the static routine out if unused,
- and unused attribute will shut down warnings about it. */
-static int VALGRIND_PRINTF(const char *format, ...)
- __attribute__((format(__printf__, 1, 2), __unused__));
-static int
-VALGRIND_PRINTF(const char *format, ...)
-{
- unsigned long _qzz_res;
- va_list vargs;
- va_start(vargs, format);
- VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, VG_USERREQ__PRINTF,
- (unsigned long)format, (unsigned long)vargs,
- 0, 0, 0);
- va_end(vargs);
- return (int)_qzz_res;
-}
-
-static int VALGRIND_PRINTF_BACKTRACE(const char *format, ...)
- __attribute__((format(__printf__, 1, 2), __unused__));
-static int
-VALGRIND_PRINTF_BACKTRACE(const char *format, ...)
-{
- unsigned long _qzz_res;
- va_list vargs;
- va_start(vargs, format);
- VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, VG_USERREQ__PRINTF_BACKTRACE,
- (unsigned long)format, (unsigned long)vargs,
- 0, 0, 0);
- va_end(vargs);
- return (int)_qzz_res;
-}
-
-#endif /* NVALGRIND */
-
-
-/* These requests allow control to move from the simulated CPU to the
- real CPU, calling an arbitary function.
-
- Note that the current ThreadId is inserted as the first argument.
- So this call:
-
- VALGRIND_NON_SIMD_CALL2(f, arg1, arg2)
-
- requires f to have this signature:
-
- Word f(Word tid, Word arg1, Word arg2)
-
- where "Word" is a word-sized type.
-
- Note that these client requests are not entirely reliable. For example,
- if you call a function with them that subsequently calls printf(),
- there's a high chance Valgrind will crash. Generally, your prospects of
- these working are made higher if the called function does not refer to
- any global variables, and does not refer to any libc or other functions
- (printf et al). Any kind of entanglement with libc or dynamic linking is
- likely to have a bad outcome, for tricky reasons which we've grappled
- with a lot in the past.
-*/
-#define VALGRIND_NON_SIMD_CALL0(_qyy_fn) \
- __extension__ \
- ({unsigned long _qyy_res; \
- VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */, \
- VG_USERREQ__CLIENT_CALL0, \
- _qyy_fn, \
- 0, 0, 0, 0); \
- _qyy_res; \
- })
-
-#define VALGRIND_NON_SIMD_CALL1(_qyy_fn, _qyy_arg1) \
- __extension__ \
- ({unsigned long _qyy_res; \
- VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */, \
- VG_USERREQ__CLIENT_CALL1, \
- _qyy_fn, \
- _qyy_arg1, 0, 0, 0); \
- _qyy_res; \
- })
-
-#define VALGRIND_NON_SIMD_CALL2(_qyy_fn, _qyy_arg1, _qyy_arg2) \
- __extension__ \
- ({unsigned long _qyy_res; \
- VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */, \
- VG_USERREQ__CLIENT_CALL2, \
- _qyy_fn, \
- _qyy_arg1, _qyy_arg2, 0, 0); \
- _qyy_res; \
- })
-
-#define VALGRIND_NON_SIMD_CALL3(_qyy_fn, _qyy_arg1, _qyy_arg2, _qyy_arg3) \
- __extension__ \
- ({unsigned long _qyy_res; \
- VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */, \
- VG_USERREQ__CLIENT_CALL3, \
- _qyy_fn, \
- _qyy_arg1, _qyy_arg2, \
- _qyy_arg3, 0); \
- _qyy_res; \
- })
-
-
-/* Counts the number of errors that have been recorded by a tool. Nb:
- the tool must record the errors with VG_(maybe_record_error)() or
- VG_(unique_error)() for them to be counted. */
-#define VALGRIND_COUNT_ERRORS \
- __extension__ \
- ({unsigned int _qyy_res; \
- VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */, \
- VG_USERREQ__COUNT_ERRORS, \
- 0, 0, 0, 0, 0); \
- _qyy_res; \
- })
-
-/* Mark a block of memory as having been allocated by a malloc()-like
- function. `addr' is the start of the usable block (ie. after any
- redzone) `rzB' is redzone size if the allocator can apply redzones;
- use '0' if not. Adding redzones makes it more likely Valgrind will spot
- block overruns. `is_zeroed' indicates if the memory is zeroed, as it is
- for calloc(). Put it immediately after the point where a block is
- allocated.
-
- If you're using Memcheck: If you're allocating memory via superblocks,
- and then handing out small chunks of each superblock, if you don't have
- redzones on your small blocks, it's worth marking the superblock with
- VALGRIND_MAKE_MEM_NOACCESS when it's created, so that block overruns are
- detected. But if you can put redzones on, it's probably better to not do
- this, so that messages for small overruns are described in terms of the
- small block rather than the superblock (but if you have a big overrun
- that skips over a redzone, you could miss an error this way). See
- memcheck/tests/custom_alloc.c for an example.
-
- WARNING: if your allocator uses malloc() or 'new' to allocate
- superblocks, rather than mmap() or brk(), this will not work properly --
- you'll likely get assertion failures during leak detection. This is
- because Valgrind doesn't like seeing overlapping heap blocks. Sorry.
-
- Nb: block must be freed via a free()-like function specified
- with VALGRIND_FREELIKE_BLOCK or mismatch errors will occur. */
-#define VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed) \
- {unsigned int _qzz_res; \
- VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
- VG_USERREQ__MALLOCLIKE_BLOCK, \
- addr, sizeB, rzB, is_zeroed, 0); \
- }
-
-/* Mark a block of memory as having been freed by a free()-like function.
- `rzB' is redzone size; it must match that given to
- VALGRIND_MALLOCLIKE_BLOCK. Memory not freed will be detected by the leak
- checker. Put it immediately after the point where the block is freed. */
-#define VALGRIND_FREELIKE_BLOCK(addr, rzB) \
- {unsigned int _qzz_res; \
- VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
- VG_USERREQ__FREELIKE_BLOCK, \
- addr, rzB, 0, 0, 0); \
- }
-
-/* Create a memory pool. */
-#define VALGRIND_CREATE_MEMPOOL(pool, rzB, is_zeroed) \
- {unsigned int _qzz_res; \
- VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
- VG_USERREQ__CREATE_MEMPOOL, \
- pool, rzB, is_zeroed, 0, 0); \
- }
-
-/* Destroy a memory pool. */
-#define VALGRIND_DESTROY_MEMPOOL(pool) \
- {unsigned int _qzz_res; \
- VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
- VG_USERREQ__DESTROY_MEMPOOL, \
- pool, 0, 0, 0, 0); \
- }
-
-/* Associate a piece of memory with a memory pool. */
-#define VALGRIND_MEMPOOL_ALLOC(pool, addr, size) \
- {unsigned int _qzz_res; \
- VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
- VG_USERREQ__MEMPOOL_ALLOC, \
- pool, addr, size, 0, 0); \
- }
-
-/* Disassociate a piece of memory from a memory pool. */
-#define VALGRIND_MEMPOOL_FREE(pool, addr) \
- {unsigned int _qzz_res; \
- VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
- VG_USERREQ__MEMPOOL_FREE, \
- pool, addr, 0, 0, 0); \
- }
-
-/* Disassociate any pieces outside a particular range. */
-#define VALGRIND_MEMPOOL_TRIM(pool, addr, size) \
- {unsigned int _qzz_res; \
- VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
- VG_USERREQ__MEMPOOL_TRIM, \
- pool, addr, size, 0, 0); \
- }
-
-/* Resize and/or move a piece associated with a memory pool. */
-#define VALGRIND_MOVE_MEMPOOL(poolA, poolB) \
- {unsigned int _qzz_res; \
- VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
- VG_USERREQ__MOVE_MEMPOOL, \
- poolA, poolB, 0, 0, 0); \
- }
-
-/* Resize and/or move a piece associated with a memory pool. */
-#define VALGRIND_MEMPOOL_CHANGE(pool, addrA, addrB, size) \
- {unsigned int _qzz_res; \
- VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
- VG_USERREQ__MEMPOOL_CHANGE, \
- pool, addrA, addrB, size, 0); \
- }
-
-/* Return 1 if a mempool exists, else 0. */
-#define VALGRIND_MEMPOOL_EXISTS(pool) \
- ({unsigned int _qzz_res; \
- VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
- VG_USERREQ__MEMPOOL_EXISTS, \
- pool, 0, 0, 0, 0); \
- _qzz_res; \
- })
-
-/* Mark a piece of memory as being a stack. Returns a stack id. */
-#define VALGRIND_STACK_REGISTER(start, end) \
- ({unsigned int _qzz_res; \
- VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
- VG_USERREQ__STACK_REGISTER, \
- start, end, 0, 0, 0); \
- _qzz_res; \
- })
-
-/* Unmark the piece of memory associated with a stack id as being a
- stack. */
-#define VALGRIND_STACK_DEREGISTER(id) \
- {unsigned int _qzz_res; \
- VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
- VG_USERREQ__STACK_DEREGISTER, \
- id, 0, 0, 0, 0); \
- }
-
-/* Change the start and end address of the stack id. */
-#define VALGRIND_STACK_CHANGE(id, start, end) \
- {unsigned int _qzz_res; \
- VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
- VG_USERREQ__STACK_CHANGE, \
- id, start, end, 0, 0); \
- }
-
-
-#undef PLAT_x86_linux
-#undef PLAT_amd64_linux
-#undef PLAT_ppc32_linux
-#undef PLAT_ppc64_linux
-#undef PLAT_ppc32_aix5
-#undef PLAT_ppc64_aix5
-
-#endif /* __VALGRIND_H */
diff --git a/src/3rdparty/v8/src/token.cc b/src/3rdparty/v8/src/token.cc
deleted file mode 100644
index feca7be..0000000
--- a/src/3rdparty/v8/src/token.cc
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "../include/v8stdint.h"
-#include "token.h"
-
-namespace v8 {
-namespace internal {
-
-#define T(name, string, precedence) #name,
-const char* const Token::name_[NUM_TOKENS] = {
- TOKEN_LIST(T, T, IGNORE_TOKEN)
-};
-#undef T
-
-
-#define T(name, string, precedence) string,
-const char* const Token::string_[NUM_TOKENS] = {
- TOKEN_LIST(T, T, IGNORE_TOKEN)
-};
-#undef T
-
-
-#define T(name, string, precedence) precedence,
-const int8_t Token::precedence_[NUM_TOKENS] = {
- TOKEN_LIST(T, T, IGNORE_TOKEN)
-};
-#undef T
-
-
-#define KT(a, b, c) 'T',
-#define KK(a, b, c) 'K',
-const char Token::token_type[] = {
- TOKEN_LIST(KT, KK, IGNORE_TOKEN)
-};
-#undef KT
-#undef KK
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/token.h b/src/3rdparty/v8/src/token.h
deleted file mode 100644
index a0afbc1..0000000
--- a/src/3rdparty/v8/src/token.h
+++ /dev/null
@@ -1,288 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_TOKEN_H_
-#define V8_TOKEN_H_
-
-#include "checks.h"
-
-namespace v8 {
-namespace internal {
-
-// TOKEN_LIST takes a list of 3 macros M, all of which satisfy the
-// same signature M(name, string, precedence), where name is the
-// symbolic token name, string is the corresponding syntactic symbol
-// (or NULL, for literals), and precedence is the precedence (or 0).
-// The parameters are invoked for token categories as follows:
-//
-// T: Non-keyword tokens
-// K: Keyword tokens
-// F: Future (reserved) keyword tokens
-
-// IGNORE_TOKEN is a convenience macro that can be supplied as
-// an argument (at any position) for a TOKEN_LIST call. It does
-// nothing with tokens belonging to the respective category.
-
-#define IGNORE_TOKEN(name, string, precedence)
-
-#define TOKEN_LIST(T, K, F) \
- /* End of source indicator. */ \
- T(EOS, "EOS", 0) \
- \
- /* Punctuators (ECMA-262, section 7.7, page 15). */ \
- T(LPAREN, "(", 0) \
- T(RPAREN, ")", 0) \
- T(LBRACK, "[", 0) \
- T(RBRACK, "]", 0) \
- T(LBRACE, "{", 0) \
- T(RBRACE, "}", 0) \
- T(COLON, ":", 0) \
- T(SEMICOLON, ";", 0) \
- T(PERIOD, ".", 0) \
- T(CONDITIONAL, "?", 3) \
- T(INC, "++", 0) \
- T(DEC, "--", 0) \
- \
- /* Assignment operators. */ \
- /* IsAssignmentOp() and Assignment::is_compound() relies on */ \
- /* this block of enum values being contiguous and sorted in the */ \
- /* same order! */ \
- T(INIT_VAR, "=init_var", 2) /* AST-use only. */ \
- T(INIT_CONST, "=init_const", 2) /* AST-use only. */ \
- T(ASSIGN, "=", 2) \
- T(ASSIGN_BIT_OR, "|=", 2) \
- T(ASSIGN_BIT_XOR, "^=", 2) \
- T(ASSIGN_BIT_AND, "&=", 2) \
- T(ASSIGN_SHL, "<<=", 2) \
- T(ASSIGN_SAR, ">>=", 2) \
- T(ASSIGN_SHR, ">>>=", 2) \
- T(ASSIGN_ADD, "+=", 2) \
- T(ASSIGN_SUB, "-=", 2) \
- T(ASSIGN_MUL, "*=", 2) \
- T(ASSIGN_DIV, "/=", 2) \
- T(ASSIGN_MOD, "%=", 2) \
- \
- /* Binary operators sorted by precedence. */ \
- /* IsBinaryOp() relies on this block of enum values */ \
- /* being contiguous and sorted in the same order! */ \
- T(COMMA, ",", 1) \
- T(OR, "||", 4) \
- T(AND, "&&", 5) \
- T(BIT_OR, "|", 6) \
- T(BIT_XOR, "^", 7) \
- T(BIT_AND, "&", 8) \
- T(SHL, "<<", 11) \
- T(SAR, ">>", 11) \
- T(SHR, ">>>", 11) \
- T(ADD, "+", 12) \
- T(SUB, "-", 12) \
- T(MUL, "*", 13) \
- T(DIV, "/", 13) \
- T(MOD, "%", 13) \
- \
- /* Compare operators sorted by precedence. */ \
- /* IsCompareOp() relies on this block of enum values */ \
- /* being contiguous and sorted in the same order! */ \
- T(EQ, "==", 9) \
- T(NE, "!=", 9) \
- T(EQ_STRICT, "===", 9) \
- T(NE_STRICT, "!==", 9) \
- T(LT, "<", 10) \
- T(GT, ">", 10) \
- T(LTE, "<=", 10) \
- T(GTE, ">=", 10) \
- K(INSTANCEOF, "instanceof", 10) \
- K(IN, "in", 10) \
- \
- /* Unary operators. */ \
- /* IsUnaryOp() relies on this block of enum values */ \
- /* being contiguous and sorted in the same order! */ \
- T(NOT, "!", 0) \
- T(BIT_NOT, "~", 0) \
- K(DELETE, "delete", 0) \
- K(TYPEOF, "typeof", 0) \
- K(VOID, "void", 0) \
- \
- /* Keywords (ECMA-262, section 7.5.2, page 13). */ \
- K(BREAK, "break", 0) \
- K(CASE, "case", 0) \
- K(CATCH, "catch", 0) \
- K(CONTINUE, "continue", 0) \
- K(DEBUGGER, "debugger", 0) \
- K(DEFAULT, "default", 0) \
- /* DELETE */ \
- K(DO, "do", 0) \
- K(ELSE, "else", 0) \
- K(FINALLY, "finally", 0) \
- K(FOR, "for", 0) \
- K(FUNCTION, "function", 0) \
- K(IF, "if", 0) \
- /* IN */ \
- /* INSTANCEOF */ \
- K(NEW, "new", 0) \
- K(RETURN, "return", 0) \
- K(SWITCH, "switch", 0) \
- K(THIS, "this", 0) \
- K(THROW, "throw", 0) \
- K(TRY, "try", 0) \
- /* TYPEOF */ \
- K(VAR, "var", 0) \
- /* VOID */ \
- K(WHILE, "while", 0) \
- K(WITH, "with", 0) \
- \
- /* Literals (ECMA-262, section 7.8, page 16). */ \
- K(NULL_LITERAL, "null", 0) \
- K(TRUE_LITERAL, "true", 0) \
- K(FALSE_LITERAL, "false", 0) \
- T(NUMBER, NULL, 0) \
- T(STRING, NULL, 0) \
- \
- /* Identifiers (not keywords or future reserved words). */ \
- T(IDENTIFIER, NULL, 0) \
- \
- /* Future reserved words (ECMA-262, section 7.6.1.2). */ \
- T(FUTURE_RESERVED_WORD, NULL, 0) \
- K(CONST, "const", 0) \
- K(NATIVE, "native", 0) \
- \
- /* Illegal token - not able to scan. */ \
- T(ILLEGAL, "ILLEGAL", 0) \
- \
- /* Scanner-internal use only. */ \
- T(WHITESPACE, NULL, 0)
-
-
-class Token {
- public:
- // All token values.
-#define T(name, string, precedence) name,
- enum Value {
- TOKEN_LIST(T, T, IGNORE_TOKEN)
- NUM_TOKENS
- };
-#undef T
-
- // Returns a string corresponding to the C++ token name
- // (e.g. "LT" for the token LT).
- static const char* Name(Value tok) {
- ASSERT(tok < NUM_TOKENS); // tok is unsigned
- return name_[tok];
- }
-
- // Predicates
- static bool IsKeyword(Value tok) {
- return token_type[tok] == 'K';
- }
-
- static bool IsAssignmentOp(Value tok) {
- return INIT_VAR <= tok && tok <= ASSIGN_MOD;
- }
-
- static bool IsBinaryOp(Value op) {
- return COMMA <= op && op <= MOD;
- }
-
- static bool IsCompareOp(Value op) {
- return EQ <= op && op <= IN;
- }
-
- static bool IsOrderedCompareOp(Value op) {
- return op == LT || op == LTE || op == GT || op == GTE;
- }
-
- static Value NegateCompareOp(Value op) {
- ASSERT(IsCompareOp(op));
- switch (op) {
- case EQ: return NE;
- case NE: return EQ;
- case EQ_STRICT: return NE_STRICT;
- case LT: return GTE;
- case GT: return LTE;
- case LTE: return GT;
- case GTE: return LT;
- default:
- return op;
- }
- }
-
- static Value InvertCompareOp(Value op) {
- ASSERT(IsCompareOp(op));
- switch (op) {
- case EQ: return NE;
- case NE: return EQ;
- case EQ_STRICT: return NE_STRICT;
- case LT: return GT;
- case GT: return LT;
- case LTE: return GTE;
- case GTE: return LTE;
- default:
- return op;
- }
- }
-
- static bool IsBitOp(Value op) {
- return (BIT_OR <= op && op <= SHR) || op == BIT_NOT;
- }
-
- static bool IsUnaryOp(Value op) {
- return (NOT <= op && op <= VOID) || op == ADD || op == SUB;
- }
-
- static bool IsCountOp(Value op) {
- return op == INC || op == DEC;
- }
-
- static bool IsShiftOp(Value op) {
- return (SHL <= op) && (op <= SHR);
- }
-
- // Returns a string corresponding to the JS token string
- // (.e., "<" for the token LT) or NULL if the token doesn't
- // have a (unique) string (e.g. an IDENTIFIER).
- static const char* String(Value tok) {
- ASSERT(tok < NUM_TOKENS); // tok is unsigned.
- return string_[tok];
- }
-
- // Returns the precedence > 0 for binary and compare
- // operators; returns 0 otherwise.
- static int Precedence(Value tok) {
- ASSERT(tok < NUM_TOKENS); // tok is unsigned.
- return precedence_[tok];
- }
-
- private:
- static const char* const name_[NUM_TOKENS];
- static const char* const string_[NUM_TOKENS];
- static const int8_t precedence_[NUM_TOKENS];
- static const char token_type[NUM_TOKENS];
-};
-
-} } // namespace v8::internal
-
-#endif // V8_TOKEN_H_
diff --git a/src/3rdparty/v8/src/top.cc b/src/3rdparty/v8/src/top.cc
deleted file mode 100644
index abd4ece..0000000
--- a/src/3rdparty/v8/src/top.cc
+++ /dev/null
@@ -1,993 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "api.h"
-#include "bootstrapper.h"
-#include "compiler.h"
-#include "debug.h"
-#include "execution.h"
-#include "messages.h"
-#include "platform.h"
-#include "simulator.h"
-#include "string-stream.h"
-#include "vm-state-inl.h"
-
-
-// TODO(isolates): move to isolate.cc. This stuff is kept here to
-// simplify merging.
-
-namespace v8 {
-namespace internal {
-
-v8::TryCatch* ThreadLocalTop::TryCatchHandler() {
- return TRY_CATCH_FROM_ADDRESS(try_catch_handler_address());
-}
-
-
-void ThreadLocalTop::Initialize() {
- c_entry_fp_ = 0;
- handler_ = 0;
-#ifdef USE_SIMULATOR
-#ifdef V8_TARGET_ARCH_ARM
- simulator_ = Simulator::current(Isolate::Current());
-#elif V8_TARGET_ARCH_MIPS
- simulator_ = Simulator::current(Isolate::Current());
-#endif
-#endif
-#ifdef ENABLE_LOGGING_AND_PROFILING
- js_entry_sp_ = NULL;
- external_callback_ = NULL;
-#endif
-#ifdef ENABLE_VMSTATE_TRACKING
- current_vm_state_ = EXTERNAL;
-#endif
- try_catch_handler_address_ = NULL;
- context_ = NULL;
- int id = Isolate::Current()->thread_manager()->CurrentId();
- thread_id_ = (id == 0) ? ThreadManager::kInvalidId : id;
- external_caught_exception_ = false;
- failed_access_check_callback_ = NULL;
- save_context_ = NULL;
- catcher_ = NULL;
-}
-
-
-Address Isolate::get_address_from_id(Isolate::AddressId id) {
- return isolate_addresses_[id];
-}
-
-
-char* Isolate::Iterate(ObjectVisitor* v, char* thread_storage) {
- ThreadLocalTop* thread = reinterpret_cast<ThreadLocalTop*>(thread_storage);
- Iterate(v, thread);
- return thread_storage + sizeof(ThreadLocalTop);
-}
-
-
-void Isolate::IterateThread(ThreadVisitor* v) {
- v->VisitThread(this, thread_local_top());
-}
-
-
-void Isolate::IterateThread(ThreadVisitor* v, char* t) {
- ThreadLocalTop* thread = reinterpret_cast<ThreadLocalTop*>(t);
- v->VisitThread(this, thread);
-}
-
-
-void Isolate::Iterate(ObjectVisitor* v, ThreadLocalTop* thread) {
- // Visit the roots from the top for a given thread.
- Object* pending;
- // The pending exception can sometimes be a failure. We can't show
- // that to the GC, which only understands objects.
- if (thread->pending_exception_->ToObject(&pending)) {
- v->VisitPointer(&pending);
- thread->pending_exception_ = pending; // In case GC updated it.
- }
- v->VisitPointer(&(thread->pending_message_obj_));
- v->VisitPointer(BitCast<Object**>(&(thread->pending_message_script_)));
- v->VisitPointer(BitCast<Object**>(&(thread->context_)));
- Object* scheduled;
- if (thread->scheduled_exception_->ToObject(&scheduled)) {
- v->VisitPointer(&scheduled);
- thread->scheduled_exception_ = scheduled;
- }
-
- for (v8::TryCatch* block = thread->TryCatchHandler();
- block != NULL;
- block = TRY_CATCH_FROM_ADDRESS(block->next_)) {
- v->VisitPointer(BitCast<Object**>(&(block->exception_)));
- v->VisitPointer(BitCast<Object**>(&(block->message_)));
- }
-
- // Iterate over pointers on native execution stack.
- for (StackFrameIterator it(this, thread); !it.done(); it.Advance()) {
- it.frame()->Iterate(v);
- }
-}
-
-
-void Isolate::Iterate(ObjectVisitor* v) {
- ThreadLocalTop* current_t = thread_local_top();
- Iterate(v, current_t);
-}
-
-
-void Isolate::RegisterTryCatchHandler(v8::TryCatch* that) {
- // The ARM simulator has a separate JS stack. We therefore register
- // the C++ try catch handler with the simulator and get back an
- // address that can be used for comparisons with addresses into the
- // JS stack. When running without the simulator, the address
- // returned will be the address of the C++ try catch handler itself.
- Address address = reinterpret_cast<Address>(
- SimulatorStack::RegisterCTryCatch(reinterpret_cast<uintptr_t>(that)));
- thread_local_top()->set_try_catch_handler_address(address);
-}
-
-
-void Isolate::UnregisterTryCatchHandler(v8::TryCatch* that) {
- ASSERT(thread_local_top()->TryCatchHandler() == that);
- thread_local_top()->set_try_catch_handler_address(
- reinterpret_cast<Address>(that->next_));
- thread_local_top()->catcher_ = NULL;
- SimulatorStack::UnregisterCTryCatch();
-}
-
-
-Handle<String> Isolate::StackTraceString() {
- if (stack_trace_nesting_level_ == 0) {
- stack_trace_nesting_level_++;
- HeapStringAllocator allocator;
- StringStream::ClearMentionedObjectCache();
- StringStream accumulator(&allocator);
- incomplete_message_ = &accumulator;
- PrintStack(&accumulator);
- Handle<String> stack_trace = accumulator.ToString();
- incomplete_message_ = NULL;
- stack_trace_nesting_level_ = 0;
- return stack_trace;
- } else if (stack_trace_nesting_level_ == 1) {
- stack_trace_nesting_level_++;
- OS::PrintError(
- "\n\nAttempt to print stack while printing stack (double fault)\n");
- OS::PrintError(
- "If you are lucky you may find a partial stack dump on stdout.\n\n");
- incomplete_message_->OutputToStdOut();
- return factory()->empty_symbol();
- } else {
- OS::Abort();
- // Unreachable
- return factory()->empty_symbol();
- }
-}
-
-
-Handle<JSArray> Isolate::CaptureCurrentStackTrace(
- int frame_limit, StackTrace::StackTraceOptions options) {
- // Ensure no negative values.
- int limit = Max(frame_limit, 0);
- Handle<JSArray> stack_trace = factory()->NewJSArray(frame_limit);
-
- Handle<String> column_key = factory()->LookupAsciiSymbol("column");
- Handle<String> line_key = factory()->LookupAsciiSymbol("lineNumber");
- Handle<String> script_key = factory()->LookupAsciiSymbol("scriptName");
-#ifdef QT_BUILD_SCRIPT_LIB
- Handle<String> script_id_key = factory()->LookupAsciiSymbol("scriptId");
-#endif
- Handle<String> name_or_source_url_key =
- factory()->LookupAsciiSymbol("nameOrSourceURL");
- Handle<String> script_name_or_source_url_key =
- factory()->LookupAsciiSymbol("scriptNameOrSourceURL");
- Handle<String> function_key = factory()->LookupAsciiSymbol("functionName");
- Handle<String> eval_key = factory()->LookupAsciiSymbol("isEval");
- Handle<String> constructor_key =
- factory()->LookupAsciiSymbol("isConstructor");
-
- StackTraceFrameIterator it(this);
- int frames_seen = 0;
- while (!it.done() && (frames_seen < limit)) {
- JavaScriptFrame* frame = it.frame();
- // Set initial size to the maximum inlining level + 1 for the outermost
- // function.
- List<FrameSummary> frames(Compiler::kMaxInliningLevels + 1);
- frame->Summarize(&frames);
- for (int i = frames.length() - 1; i >= 0 && frames_seen < limit; i--) {
- // Create a JSObject to hold the information for the StackFrame.
- Handle<JSObject> stackFrame = factory()->NewJSObject(object_function());
-
- Handle<JSFunction> fun = frames[i].function();
- Handle<Script> script(Script::cast(fun->shared()->script()));
-
- if (options & StackTrace::kLineNumber) {
- int script_line_offset = script->line_offset()->value();
- int position = frames[i].code()->SourcePosition(frames[i].pc());
- int line_number = GetScriptLineNumber(script, position);
- // line_number is already shifted by the script_line_offset.
- int relative_line_number = line_number - script_line_offset;
- if (options & StackTrace::kColumnOffset && relative_line_number >= 0) {
- Handle<FixedArray> line_ends(FixedArray::cast(script->line_ends()));
- int start = (relative_line_number == 0) ? 0 :
- Smi::cast(line_ends->get(relative_line_number - 1))->value() + 1;
- int column_offset = position - start;
- if (relative_line_number == 0) {
- // For the case where the code is on the same line as the script
- // tag.
- column_offset += script->column_offset()->value();
- }
- SetLocalPropertyNoThrow(stackFrame, column_key,
- Handle<Smi>(Smi::FromInt(column_offset + 1)));
- }
- SetLocalPropertyNoThrow(stackFrame, line_key,
- Handle<Smi>(Smi::FromInt(line_number + 1)));
- }
-
- if (options & StackTrace::kScriptName) {
- Handle<Object> script_name(script->name(), this);
- SetLocalPropertyNoThrow(stackFrame, script_key, script_name);
- }
-
-#ifdef QT_BUILD_SCRIPT_LIB
- if (options & StackTrace::kScriptId) {
- Handle<Object> script_id(script->id());
- SetLocalPropertyNoThrow(stackFrame, script_id_key, script_id);
- }
-#endif
-
- if (options & StackTrace::kScriptNameOrSourceURL) {
- Handle<Object> script_name(script->name(), this);
- Handle<JSValue> script_wrapper = GetScriptWrapper(script);
- Handle<Object> property = GetProperty(script_wrapper,
- name_or_source_url_key);
- ASSERT(property->IsJSFunction());
- Handle<JSFunction> method = Handle<JSFunction>::cast(property);
- bool caught_exception;
- Handle<Object> result = Execution::TryCall(method, script_wrapper, 0,
- NULL, &caught_exception);
- if (caught_exception) {
- result = factory()->undefined_value();
- }
- SetLocalPropertyNoThrow(stackFrame, script_name_or_source_url_key,
- result);
- }
-
- if (options & StackTrace::kFunctionName) {
- Handle<Object> fun_name(fun->shared()->name(), this);
- if (fun_name->ToBoolean()->IsFalse()) {
- fun_name = Handle<Object>(fun->shared()->inferred_name(), this);
- }
- SetLocalPropertyNoThrow(stackFrame, function_key, fun_name);
- }
-
- if (options & StackTrace::kIsEval) {
- int type = Smi::cast(script->compilation_type())->value();
- Handle<Object> is_eval = (type == Script::COMPILATION_TYPE_EVAL) ?
- factory()->true_value() : factory()->false_value();
- SetLocalPropertyNoThrow(stackFrame, eval_key, is_eval);
- }
-
- if (options & StackTrace::kIsConstructor) {
- Handle<Object> is_constructor = (frames[i].is_constructor()) ?
- factory()->true_value() : factory()->false_value();
- SetLocalPropertyNoThrow(stackFrame, constructor_key, is_constructor);
- }
-
- FixedArray::cast(stack_trace->elements())->set(frames_seen, *stackFrame);
- frames_seen++;
- }
- it.Advance();
- }
-
- stack_trace->set_length(Smi::FromInt(frames_seen));
- return stack_trace;
-}
-
-
-void Isolate::PrintStack() {
- if (stack_trace_nesting_level_ == 0) {
- stack_trace_nesting_level_++;
-
- StringAllocator* allocator;
- if (preallocated_message_space_ == NULL) {
- allocator = new HeapStringAllocator();
- } else {
- allocator = preallocated_message_space_;
- }
-
- StringStream::ClearMentionedObjectCache();
- StringStream accumulator(allocator);
- incomplete_message_ = &accumulator;
- PrintStack(&accumulator);
- accumulator.OutputToStdOut();
- accumulator.Log();
- incomplete_message_ = NULL;
- stack_trace_nesting_level_ = 0;
- if (preallocated_message_space_ == NULL) {
- // Remove the HeapStringAllocator created above.
- delete allocator;
- }
- } else if (stack_trace_nesting_level_ == 1) {
- stack_trace_nesting_level_++;
- OS::PrintError(
- "\n\nAttempt to print stack while printing stack (double fault)\n");
- OS::PrintError(
- "If you are lucky you may find a partial stack dump on stdout.\n\n");
- incomplete_message_->OutputToStdOut();
- }
-}
-
-
-static void PrintFrames(StringStream* accumulator,
- StackFrame::PrintMode mode) {
- StackFrameIterator it;
- for (int i = 0; !it.done(); it.Advance()) {
- it.frame()->Print(accumulator, mode, i++);
- }
-}
-
-
-void Isolate::PrintStack(StringStream* accumulator) {
- if (!IsInitialized()) {
- accumulator->Add(
- "\n==== Stack trace is not available ==========================\n\n");
- accumulator->Add(
- "\n==== Isolate for the thread is not initialized =============\n\n");
- return;
- }
- // The MentionedObjectCache is not GC-proof at the moment.
- AssertNoAllocation nogc;
- ASSERT(StringStream::IsMentionedObjectCacheClear());
-
- // Avoid printing anything if there are no frames.
- if (c_entry_fp(thread_local_top()) == 0) return;
-
- accumulator->Add(
- "\n==== Stack trace ============================================\n\n");
- PrintFrames(accumulator, StackFrame::OVERVIEW);
-
- accumulator->Add(
- "\n==== Details ================================================\n\n");
- PrintFrames(accumulator, StackFrame::DETAILS);
-
- accumulator->PrintMentionedObjectCache();
- accumulator->Add("=====================\n\n");
-}
-
-
-void Isolate::SetFailedAccessCheckCallback(
- v8::FailedAccessCheckCallback callback) {
- thread_local_top()->failed_access_check_callback_ = callback;
-}
-
-
-void Isolate::ReportFailedAccessCheck(JSObject* receiver, v8::AccessType type) {
- if (!thread_local_top()->failed_access_check_callback_) return;
-
- ASSERT(receiver->IsAccessCheckNeeded());
- ASSERT(context());
-
- // Get the data object from access check info.
- JSFunction* constructor = JSFunction::cast(receiver->map()->constructor());
- if (!constructor->shared()->IsApiFunction()) return;
- Object* data_obj =
- constructor->shared()->get_api_func_data()->access_check_info();
- if (data_obj == heap_.undefined_value()) return;
-
- HandleScope scope;
- Handle<JSObject> receiver_handle(receiver);
- Handle<Object> data(AccessCheckInfo::cast(data_obj)->data());
- thread_local_top()->failed_access_check_callback_(
- v8::Utils::ToLocal(receiver_handle),
- type,
- v8::Utils::ToLocal(data));
-}
-
-
-enum MayAccessDecision {
- YES, NO, UNKNOWN
-};
-
-
-static MayAccessDecision MayAccessPreCheck(Isolate* isolate,
- JSObject* receiver,
- v8::AccessType type) {
- // During bootstrapping, callback functions are not enabled yet.
- if (isolate->bootstrapper()->IsActive()) return YES;
-
- if (receiver->IsJSGlobalProxy()) {
- Object* receiver_context = JSGlobalProxy::cast(receiver)->context();
- if (!receiver_context->IsContext()) return NO;
-
- // Get the global context of current top context.
- // avoid using Isolate::global_context() because it uses Handle.
- Context* global_context = isolate->context()->global()->global_context();
- if (receiver_context == global_context) return YES;
-
- if (Context::cast(receiver_context)->security_token() ==
- global_context->security_token())
- return YES;
- }
-
- return UNKNOWN;
-}
-
-
-bool Isolate::MayNamedAccess(JSObject* receiver, Object* key,
- v8::AccessType type) {
- ASSERT(receiver->IsAccessCheckNeeded());
-
- // The callers of this method are not expecting a GC.
- AssertNoAllocation no_gc;
-
- // Skip checks for hidden properties access. Note, we do not
- // require existence of a context in this case.
- if (key == heap_.hidden_symbol()) return true;
-
- // Check for compatibility between the security tokens in the
- // current lexical context and the accessed object.
- ASSERT(context());
-
- MayAccessDecision decision = MayAccessPreCheck(this, receiver, type);
- if (decision != UNKNOWN) return decision == YES;
-
- // Get named access check callback
- JSFunction* constructor = JSFunction::cast(receiver->map()->constructor());
- if (!constructor->shared()->IsApiFunction()) return false;
-
- Object* data_obj =
- constructor->shared()->get_api_func_data()->access_check_info();
- if (data_obj == heap_.undefined_value()) return false;
-
- Object* fun_obj = AccessCheckInfo::cast(data_obj)->named_callback();
- v8::NamedSecurityCallback callback =
- v8::ToCData<v8::NamedSecurityCallback>(fun_obj);
-
- if (!callback) return false;
-
- HandleScope scope(this);
- Handle<JSObject> receiver_handle(receiver, this);
- Handle<Object> key_handle(key, this);
- Handle<Object> data(AccessCheckInfo::cast(data_obj)->data(), this);
- LOG(this, ApiNamedSecurityCheck(key));
- bool result = false;
- {
- // Leaving JavaScript.
- VMState state(this, EXTERNAL);
- result = callback(v8::Utils::ToLocal(receiver_handle),
- v8::Utils::ToLocal(key_handle),
- type,
- v8::Utils::ToLocal(data));
- }
- return result;
-}
-
-
-bool Isolate::MayIndexedAccess(JSObject* receiver,
- uint32_t index,
- v8::AccessType type) {
- ASSERT(receiver->IsAccessCheckNeeded());
- // Check for compatibility between the security tokens in the
- // current lexical context and the accessed object.
- ASSERT(context());
-
- MayAccessDecision decision = MayAccessPreCheck(this, receiver, type);
- if (decision != UNKNOWN) return decision == YES;
-
- // Get indexed access check callback
- JSFunction* constructor = JSFunction::cast(receiver->map()->constructor());
- if (!constructor->shared()->IsApiFunction()) return false;
-
- Object* data_obj =
- constructor->shared()->get_api_func_data()->access_check_info();
- if (data_obj == heap_.undefined_value()) return false;
-
- Object* fun_obj = AccessCheckInfo::cast(data_obj)->indexed_callback();
- v8::IndexedSecurityCallback callback =
- v8::ToCData<v8::IndexedSecurityCallback>(fun_obj);
-
- if (!callback) return false;
-
- HandleScope scope(this);
- Handle<JSObject> receiver_handle(receiver, this);
- Handle<Object> data(AccessCheckInfo::cast(data_obj)->data(), this);
- LOG(this, ApiIndexedSecurityCheck(index));
- bool result = false;
- {
- // Leaving JavaScript.
- VMState state(this, EXTERNAL);
- result = callback(v8::Utils::ToLocal(receiver_handle),
- index,
- type,
- v8::Utils::ToLocal(data));
- }
- return result;
-}
-
-
-const char* const Isolate::kStackOverflowMessage =
- "Uncaught RangeError: Maximum call stack size exceeded";
-
-
-Failure* Isolate::StackOverflow() {
- HandleScope scope;
- Handle<String> key = factory()->stack_overflow_symbol();
- Handle<JSObject> boilerplate =
- Handle<JSObject>::cast(GetProperty(js_builtins_object(), key));
- Handle<Object> exception = Copy(boilerplate);
- // TODO(1240995): To avoid having to call JavaScript code to compute
- // the message for stack overflow exceptions which is very likely to
- // double fault with another stack overflow exception, we use a
- // precomputed message.
- DoThrow(*exception, NULL, kStackOverflowMessage);
- return Failure::Exception();
-}
-
-
-Failure* Isolate::TerminateExecution() {
- DoThrow(heap_.termination_exception(), NULL, NULL);
- return Failure::Exception();
-}
-
-
-Failure* Isolate::Throw(Object* exception, MessageLocation* location) {
- DoThrow(exception, location, NULL);
- return Failure::Exception();
-}
-
-
-Failure* Isolate::ReThrow(MaybeObject* exception, MessageLocation* location) {
- bool can_be_caught_externally = false;
- ShouldReportException(&can_be_caught_externally,
- is_catchable_by_javascript(exception));
- thread_local_top()->catcher_ = can_be_caught_externally ?
- try_catch_handler() : NULL;
-
- // Set the exception being re-thrown.
- set_pending_exception(exception);
- return Failure::Exception();
-}
-
-
-Failure* Isolate::ThrowIllegalOperation() {
- return Throw(heap_.illegal_access_symbol());
-}
-
-
-void Isolate::ScheduleThrow(Object* exception) {
- // When scheduling a throw we first throw the exception to get the
- // error reporting if it is uncaught before rescheduling it.
- Throw(exception);
- thread_local_top()->scheduled_exception_ = pending_exception();
- thread_local_top()->external_caught_exception_ = false;
- clear_pending_exception();
-}
-
-
-Failure* Isolate::PromoteScheduledException() {
- MaybeObject* thrown = scheduled_exception();
- clear_scheduled_exception();
- // Re-throw the exception to avoid getting repeated error reporting.
- return ReThrow(thrown);
-}
-
-
-void Isolate::PrintCurrentStackTrace(FILE* out) {
- StackTraceFrameIterator it(this);
- while (!it.done()) {
- HandleScope scope;
- // Find code position if recorded in relocation info.
- JavaScriptFrame* frame = it.frame();
- int pos = frame->LookupCode()->SourcePosition(frame->pc());
- Handle<Object> pos_obj(Smi::FromInt(pos));
- // Fetch function and receiver.
- Handle<JSFunction> fun(JSFunction::cast(frame->function()));
- Handle<Object> recv(frame->receiver());
- // Advance to the next JavaScript frame and determine if the
- // current frame is the top-level frame.
- it.Advance();
- Handle<Object> is_top_level = it.done()
- ? factory()->true_value()
- : factory()->false_value();
- // Generate and print stack trace line.
- Handle<String> line =
- Execution::GetStackTraceLine(recv, fun, pos_obj, is_top_level);
- if (line->length() > 0) {
- line->PrintOn(out);
- fprintf(out, "\n");
- }
- }
-}
-
-
-void Isolate::ComputeLocation(MessageLocation* target) {
- *target = MessageLocation(Handle<Script>(heap_.empty_script()), -1, -1);
- StackTraceFrameIterator it(this);
- if (!it.done()) {
- JavaScriptFrame* frame = it.frame();
- JSFunction* fun = JSFunction::cast(frame->function());
- Object* script = fun->shared()->script();
- if (script->IsScript() &&
- !(Script::cast(script)->source()->IsUndefined())) {
- int pos = frame->LookupCode()->SourcePosition(frame->pc());
- // Compute the location from the function and the reloc info.
- Handle<Script> casted_script(Script::cast(script));
- *target = MessageLocation(casted_script, pos, pos + 1);
- }
- }
-}
-
-
-bool Isolate::ShouldReportException(bool* can_be_caught_externally,
- bool catchable_by_javascript) {
- // Find the top-most try-catch handler.
- StackHandler* handler =
- StackHandler::FromAddress(Isolate::handler(thread_local_top()));
- while (handler != NULL && !handler->is_try_catch()) {
- handler = handler->next();
- }
-
- // Get the address of the external handler so we can compare the address to
- // determine which one is closer to the top of the stack.
- Address external_handler_address =
- thread_local_top()->try_catch_handler_address();
-
- // The exception has been externally caught if and only if there is
- // an external handler which is on top of the top-most try-catch
- // handler.
- *can_be_caught_externally = external_handler_address != NULL &&
- (handler == NULL || handler->address() > external_handler_address ||
- !catchable_by_javascript);
-
- if (*can_be_caught_externally) {
- // Only report the exception if the external handler is verbose.
- return try_catch_handler()->is_verbose_;
- } else {
- // Report the exception if it isn't caught by JavaScript code.
- return handler == NULL;
- }
-}
-
-
-void Isolate::DoThrow(MaybeObject* exception,
- MessageLocation* location,
- const char* message) {
- ASSERT(!has_pending_exception());
-
- HandleScope scope;
- Object* exception_object = Smi::FromInt(0);
- bool is_object = exception->ToObject(&exception_object);
- Handle<Object> exception_handle(exception_object);
-
- // Determine reporting and whether the exception is caught externally.
- bool catchable_by_javascript = is_catchable_by_javascript(exception);
- // Only real objects can be caught by JS.
- ASSERT(!catchable_by_javascript || is_object);
- bool can_be_caught_externally = false;
- bool should_report_exception =
- ShouldReportException(&can_be_caught_externally, catchable_by_javascript);
- bool report_exception = catchable_by_javascript && should_report_exception;
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Notify debugger of exception.
- if (catchable_by_javascript) {
- debugger_->OnException(exception_handle, report_exception);
- }
-#endif
-
- // Generate the message.
- Handle<Object> message_obj;
- MessageLocation potential_computed_location;
- bool try_catch_needs_message =
- can_be_caught_externally &&
- try_catch_handler()->capture_message_;
- if (report_exception || try_catch_needs_message) {
- if (location == NULL) {
- // If no location was specified we use a computed one instead
- ComputeLocation(&potential_computed_location);
- location = &potential_computed_location;
- }
- if (!bootstrapper()->IsActive()) {
- // It's not safe to try to make message objects or collect stack
- // traces while the bootstrapper is active since the infrastructure
- // may not have been properly initialized.
- Handle<String> stack_trace;
- if (FLAG_trace_exception) stack_trace = StackTraceString();
- Handle<JSArray> stack_trace_object;
- if (report_exception && capture_stack_trace_for_uncaught_exceptions_) {
- stack_trace_object = CaptureCurrentStackTrace(
- stack_trace_for_uncaught_exceptions_frame_limit_,
- stack_trace_for_uncaught_exceptions_options_);
- }
- ASSERT(is_object); // Can't use the handle unless there's a real object.
- message_obj = MessageHandler::MakeMessageObject("uncaught_exception",
- location, HandleVector<Object>(&exception_handle, 1), stack_trace,
- stack_trace_object);
- }
- }
-
- // Save the message for reporting if the the exception remains uncaught.
- thread_local_top()->has_pending_message_ = report_exception;
- thread_local_top()->pending_message_ = message;
- if (!message_obj.is_null()) {
- thread_local_top()->pending_message_obj_ = *message_obj;
- if (location != NULL) {
- thread_local_top()->pending_message_script_ = *location->script();
- thread_local_top()->pending_message_start_pos_ = location->start_pos();
- thread_local_top()->pending_message_end_pos_ = location->end_pos();
- }
- }
-
- // Do not forget to clean catcher_ if currently thrown exception cannot
- // be caught. If necessary, ReThrow will update the catcher.
- thread_local_top()->catcher_ = can_be_caught_externally ?
- try_catch_handler() : NULL;
-
- // NOTE: Notifying the debugger or generating the message
- // may have caused new exceptions. For now, we just ignore
- // that and set the pending exception to the original one.
- if (is_object) {
- set_pending_exception(*exception_handle);
- } else {
- // Failures are not on the heap so they neither need nor work with handles.
- ASSERT(exception_handle->IsFailure());
- set_pending_exception(exception);
- }
-}
-
-
-bool Isolate::IsExternallyCaught() {
- ASSERT(has_pending_exception());
-
- if ((thread_local_top()->catcher_ == NULL) ||
- (try_catch_handler() != thread_local_top()->catcher_)) {
- // When throwing the exception, we found no v8::TryCatch
- // which should care about this exception.
- return false;
- }
-
- if (!is_catchable_by_javascript(pending_exception())) {
- return true;
- }
-
- // Get the address of the external handler so we can compare the address to
- // determine which one is closer to the top of the stack.
- Address external_handler_address =
- thread_local_top()->try_catch_handler_address();
- ASSERT(external_handler_address != NULL);
-
- // The exception has been externally caught if and only if there is
- // an external handler which is on top of the top-most try-finally
- // handler.
- // There should be no try-catch blocks as they would prohibit us from
- // finding external catcher in the first place (see catcher_ check above).
- //
- // Note, that finally clause would rethrow an exception unless it's
- // aborted by jumps in control flow like return, break, etc. and we'll
- // have another chances to set proper v8::TryCatch.
- StackHandler* handler =
- StackHandler::FromAddress(Isolate::handler(thread_local_top()));
- while (handler != NULL && handler->address() < external_handler_address) {
- ASSERT(!handler->is_try_catch());
- if (handler->is_try_finally()) return false;
-
- handler = handler->next();
- }
-
- return true;
-}
-
-
-void Isolate::ReportPendingMessages() {
- ASSERT(has_pending_exception());
- // If the pending exception is OutOfMemoryException set out_of_memory in
- // the global context. Note: We have to mark the global context here
- // since the GenerateThrowOutOfMemory stub cannot make a RuntimeCall to
- // set it.
- bool external_caught = IsExternallyCaught();
- thread_local_top()->external_caught_exception_ = external_caught;
- HandleScope scope(this);
- if (thread_local_top()->pending_exception_ ==
- Failure::OutOfMemoryException()) {
- context()->mark_out_of_memory();
- } else if (thread_local_top()->pending_exception_ ==
- heap_.termination_exception()) {
- if (external_caught) {
- try_catch_handler()->can_continue_ = false;
- try_catch_handler()->exception_ = heap_.null_value();
- }
- } else {
- // At this point all non-object (failure) exceptions have
- // been dealt with so this shouldn't fail.
- Object* pending_exception_object = pending_exception()->ToObjectUnchecked();
- Handle<Object> exception(pending_exception_object);
- thread_local_top()->external_caught_exception_ = false;
- if (external_caught) {
- try_catch_handler()->can_continue_ = true;
- try_catch_handler()->exception_ = thread_local_top()->pending_exception_;
- if (!thread_local_top()->pending_message_obj_->IsTheHole()) {
- try_catch_handler()->message_ =
- thread_local_top()->pending_message_obj_;
- }
- }
- if (thread_local_top()->has_pending_message_) {
- thread_local_top()->has_pending_message_ = false;
- if (thread_local_top()->pending_message_ != NULL) {
- MessageHandler::ReportMessage(thread_local_top()->pending_message_);
- } else if (!thread_local_top()->pending_message_obj_->IsTheHole()) {
- Handle<Object> message_obj(thread_local_top()->pending_message_obj_);
- if (thread_local_top()->pending_message_script_ != NULL) {
- Handle<Script> script(thread_local_top()->pending_message_script_);
- int start_pos = thread_local_top()->pending_message_start_pos_;
- int end_pos = thread_local_top()->pending_message_end_pos_;
- MessageLocation location(script, start_pos, end_pos);
- MessageHandler::ReportMessage(&location, message_obj);
- } else {
- MessageHandler::ReportMessage(NULL, message_obj);
- }
- }
- }
- thread_local_top()->external_caught_exception_ = external_caught;
- set_pending_exception(*exception);
- }
- clear_pending_message();
-}
-
-
-void Isolate::TraceException(bool flag) {
- FLAG_trace_exception = flag; // TODO(isolates): This is an unfortunate use.
-}
-
-
-bool Isolate::OptionalRescheduleException(bool is_bottom_call) {
- // Allways reschedule out of memory exceptions.
- if (!is_out_of_memory()) {
- bool is_termination_exception =
- pending_exception() == heap_.termination_exception();
-
- // Do not reschedule the exception if this is the bottom call.
- bool clear_exception = is_bottom_call;
-
- if (is_termination_exception) {
- if (is_bottom_call) {
- thread_local_top()->external_caught_exception_ = false;
- clear_pending_exception();
- return false;
- }
- } else if (thread_local_top()->external_caught_exception_) {
- // If the exception is externally caught, clear it if there are no
- // JavaScript frames on the way to the C++ frame that has the
- // external handler.
- ASSERT(thread_local_top()->try_catch_handler_address() != NULL);
- Address external_handler_address =
- thread_local_top()->try_catch_handler_address();
- JavaScriptFrameIterator it;
- if (it.done() || (it.frame()->sp() > external_handler_address)) {
- clear_exception = true;
- }
- }
-
- // Clear the exception if needed.
- if (clear_exception) {
- thread_local_top()->external_caught_exception_ = false;
- clear_pending_exception();
- return false;
- }
- }
-
- // Reschedule the exception.
- thread_local_top()->scheduled_exception_ = pending_exception();
- clear_pending_exception();
- return true;
-}
-
-
-void Isolate::SetCaptureStackTraceForUncaughtExceptions(
- bool capture,
- int frame_limit,
- StackTrace::StackTraceOptions options) {
- capture_stack_trace_for_uncaught_exceptions_ = capture;
- stack_trace_for_uncaught_exceptions_frame_limit_ = frame_limit;
- stack_trace_for_uncaught_exceptions_options_ = options;
-}
-
-
-bool Isolate::is_out_of_memory() {
- if (has_pending_exception()) {
- MaybeObject* e = pending_exception();
- if (e->IsFailure() && Failure::cast(e)->IsOutOfMemoryException()) {
- return true;
- }
- }
- if (has_scheduled_exception()) {
- MaybeObject* e = scheduled_exception();
- if (e->IsFailure() && Failure::cast(e)->IsOutOfMemoryException()) {
- return true;
- }
- }
- return false;
-}
-
-
-Handle<Context> Isolate::global_context() {
- GlobalObject* global = thread_local_top()->context_->global();
- return Handle<Context>(global->global_context());
-}
-
-
-Handle<Context> Isolate::GetCallingGlobalContext() {
- JavaScriptFrameIterator it;
-#ifdef ENABLE_DEBUGGER_SUPPORT
- if (debug_->InDebugger()) {
- while (!it.done()) {
- JavaScriptFrame* frame = it.frame();
- Context* context = Context::cast(frame->context());
- if (context->global_context() == *debug_->debug_context()) {
- it.Advance();
- } else {
- break;
- }
- }
- }
-#endif // ENABLE_DEBUGGER_SUPPORT
- if (it.done()) return Handle<Context>::null();
- JavaScriptFrame* frame = it.frame();
- Context* context = Context::cast(frame->context());
- return Handle<Context>(context->global_context());
-}
-
-
-char* Isolate::ArchiveThread(char* to) {
- if (RuntimeProfiler::IsEnabled() && current_vm_state() == JS) {
- RuntimeProfiler::IsolateExitedJS(this);
- }
- memcpy(to, reinterpret_cast<char*>(thread_local_top()),
- sizeof(ThreadLocalTop));
- InitializeThreadLocal();
- return to + sizeof(ThreadLocalTop);
-}
-
-
-char* Isolate::RestoreThread(char* from) {
- memcpy(reinterpret_cast<char*>(thread_local_top()), from,
- sizeof(ThreadLocalTop));
- // This might be just paranoia, but it seems to be needed in case a
- // thread_local_ is restored on a separate OS thread.
-#ifdef USE_SIMULATOR
-#ifdef V8_TARGET_ARCH_ARM
- thread_local_top()->simulator_ = Simulator::current(this);
-#elif V8_TARGET_ARCH_MIPS
- thread_local_top()->simulator_ = Simulator::current(this);
-#endif
-#endif
- if (RuntimeProfiler::IsEnabled() && current_vm_state() == JS) {
- RuntimeProfiler::IsolateEnteredJS(this);
- }
- return from + sizeof(ThreadLocalTop);
-}
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/type-info.cc b/src/3rdparty/v8/src/type-info.cc
deleted file mode 100644
index 256f48a..0000000
--- a/src/3rdparty/v8/src/type-info.cc
+++ /dev/null
@@ -1,472 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "ast.h"
-#include "compiler.h"
-#include "ic.h"
-#include "macro-assembler.h"
-#include "stub-cache.h"
-#include "type-info.h"
-
-#include "ic-inl.h"
-#include "objects-inl.h"
-
-namespace v8 {
-namespace internal {
-
-
-TypeInfo TypeInfo::TypeFromValue(Handle<Object> value) {
- TypeInfo info;
- if (value->IsSmi()) {
- info = TypeInfo::Smi();
- } else if (value->IsHeapNumber()) {
- info = TypeInfo::IsInt32Double(HeapNumber::cast(*value)->value())
- ? TypeInfo::Integer32()
- : TypeInfo::Double();
- } else if (value->IsString()) {
- info = TypeInfo::String();
- } else {
- info = TypeInfo::Unknown();
- }
- return info;
-}
-
-
-STATIC_ASSERT(DEFAULT_STRING_STUB == Code::kNoExtraICState);
-
-
-TypeFeedbackOracle::TypeFeedbackOracle(Handle<Code> code,
- Handle<Context> global_context) {
- global_context_ = global_context;
- PopulateMap(code);
- ASSERT(reinterpret_cast<Address>(*dictionary_.location()) != kHandleZapValue);
-}
-
-
-Handle<Object> TypeFeedbackOracle::GetInfo(int pos) {
- int entry = dictionary_->FindEntry(pos);
- return entry != NumberDictionary::kNotFound
- ? Handle<Object>(dictionary_->ValueAt(entry))
- : Isolate::Current()->factory()->undefined_value();
-}
-
-
-bool TypeFeedbackOracle::LoadIsMonomorphic(Property* expr) {
- Handle<Object> map_or_code(GetInfo(expr->position()));
- if (map_or_code->IsMap()) return true;
- if (map_or_code->IsCode()) {
- Handle<Code> code(Code::cast(*map_or_code));
- return code->kind() == Code::KEYED_EXTERNAL_ARRAY_LOAD_IC &&
- code->FindFirstMap() != NULL;
- }
- return false;
-}
-
-
-bool TypeFeedbackOracle::StoreIsMonomorphic(Assignment* expr) {
- Handle<Object> map_or_code(GetInfo(expr->position()));
- if (map_or_code->IsMap()) return true;
- if (map_or_code->IsCode()) {
- Handle<Code> code(Code::cast(*map_or_code));
- return code->kind() == Code::KEYED_EXTERNAL_ARRAY_STORE_IC &&
- code->FindFirstMap() != NULL;
- }
- return false;
-}
-
-
-bool TypeFeedbackOracle::CallIsMonomorphic(Call* expr) {
- Handle<Object> value = GetInfo(expr->position());
- return value->IsMap() || value->IsSmi();
-}
-
-
-Handle<Map> TypeFeedbackOracle::LoadMonomorphicReceiverType(Property* expr) {
- ASSERT(LoadIsMonomorphic(expr));
- Handle<Object> map_or_code(
- Handle<HeapObject>::cast(GetInfo(expr->position())));
- if (map_or_code->IsCode()) {
- Handle<Code> code(Code::cast(*map_or_code));
- return Handle<Map>(code->FindFirstMap());
- }
- return Handle<Map>(Map::cast(*map_or_code));
-}
-
-
-Handle<Map> TypeFeedbackOracle::StoreMonomorphicReceiverType(Assignment* expr) {
- ASSERT(StoreIsMonomorphic(expr));
- Handle<HeapObject> map_or_code(
- Handle<HeapObject>::cast(GetInfo(expr->position())));
- if (map_or_code->IsCode()) {
- Handle<Code> code(Code::cast(*map_or_code));
- return Handle<Map>(code->FindFirstMap());
- }
- return Handle<Map>(Map::cast(*map_or_code));
-}
-
-
-ZoneMapList* TypeFeedbackOracle::LoadReceiverTypes(Property* expr,
- Handle<String> name) {
- Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, NORMAL);
- return CollectReceiverTypes(expr->position(), name, flags);
-}
-
-
-ZoneMapList* TypeFeedbackOracle::StoreReceiverTypes(Assignment* expr,
- Handle<String> name) {
- Code::Flags flags = Code::ComputeMonomorphicFlags(Code::STORE_IC, NORMAL);
- return CollectReceiverTypes(expr->position(), name, flags);
-}
-
-
-ZoneMapList* TypeFeedbackOracle::CallReceiverTypes(Call* expr,
- Handle<String> name) {
- int arity = expr->arguments()->length();
- // Note: these flags won't let us get maps from stubs with
- // non-default extra ic state in the megamorphic case. In the more
- // important monomorphic case the map is obtained directly, so it's
- // not a problem until we decide to emit more polymorphic code.
- Code::Flags flags = Code::ComputeMonomorphicFlags(Code::CALL_IC,
- NORMAL,
- Code::kNoExtraICState,
- OWN_MAP,
- NOT_IN_LOOP,
- arity);
- return CollectReceiverTypes(expr->position(), name, flags);
-}
-
-
-CheckType TypeFeedbackOracle::GetCallCheckType(Call* expr) {
- Handle<Object> value = GetInfo(expr->position());
- if (!value->IsSmi()) return RECEIVER_MAP_CHECK;
- CheckType check = static_cast<CheckType>(Smi::cast(*value)->value());
- ASSERT(check != RECEIVER_MAP_CHECK);
- return check;
-}
-
-ExternalArrayType TypeFeedbackOracle::GetKeyedLoadExternalArrayType(
- Property* expr) {
- Handle<Object> stub = GetInfo(expr->position());
- ASSERT(stub->IsCode());
- return Code::cast(*stub)->external_array_type();
-}
-
-ExternalArrayType TypeFeedbackOracle::GetKeyedStoreExternalArrayType(
- Assignment* expr) {
- Handle<Object> stub = GetInfo(expr->position());
- ASSERT(stub->IsCode());
- return Code::cast(*stub)->external_array_type();
-}
-
-Handle<JSObject> TypeFeedbackOracle::GetPrototypeForPrimitiveCheck(
- CheckType check) {
- JSFunction* function = NULL;
- switch (check) {
- case RECEIVER_MAP_CHECK:
- UNREACHABLE();
- break;
- case STRING_CHECK:
- function = global_context_->string_function();
- break;
- case NUMBER_CHECK:
- function = global_context_->number_function();
- break;
- case BOOLEAN_CHECK:
- function = global_context_->boolean_function();
- break;
- }
- ASSERT(function != NULL);
- return Handle<JSObject>(JSObject::cast(function->instance_prototype()));
-}
-
-
-bool TypeFeedbackOracle::LoadIsBuiltin(Property* expr, Builtins::Name id) {
- return *GetInfo(expr->position()) ==
- Isolate::Current()->builtins()->builtin(id);
-}
-
-
-TypeInfo TypeFeedbackOracle::CompareType(CompareOperation* expr) {
- Handle<Object> object = GetInfo(expr->position());
- TypeInfo unknown = TypeInfo::Unknown();
- if (!object->IsCode()) return unknown;
- Handle<Code> code = Handle<Code>::cast(object);
- if (!code->is_compare_ic_stub()) return unknown;
-
- CompareIC::State state = static_cast<CompareIC::State>(code->compare_state());
- switch (state) {
- case CompareIC::UNINITIALIZED:
- // Uninitialized means never executed.
- // TODO(fschneider): Introduce a separate value for never-executed ICs.
- return unknown;
- case CompareIC::SMIS:
- return TypeInfo::Smi();
- case CompareIC::HEAP_NUMBERS:
- return TypeInfo::Number();
- case CompareIC::OBJECTS:
- // TODO(kasperl): We really need a type for JS objects here.
- return TypeInfo::NonPrimitive();
- case CompareIC::GENERIC:
- default:
- return unknown;
- }
-}
-
-
-TypeInfo TypeFeedbackOracle::BinaryType(BinaryOperation* expr) {
- Handle<Object> object = GetInfo(expr->position());
- TypeInfo unknown = TypeInfo::Unknown();
- if (!object->IsCode()) return unknown;
- Handle<Code> code = Handle<Code>::cast(object);
- if (code->is_binary_op_stub()) {
- BinaryOpIC::TypeInfo type = static_cast<BinaryOpIC::TypeInfo>(
- code->binary_op_type());
- switch (type) {
- case BinaryOpIC::UNINIT_OR_SMI:
- return TypeInfo::Smi();
- case BinaryOpIC::DEFAULT:
- return (expr->op() == Token::DIV || expr->op() == Token::MUL)
- ? TypeInfo::Double()
- : TypeInfo::Integer32();
- case BinaryOpIC::HEAP_NUMBERS:
- return TypeInfo::Double();
- default:
- return unknown;
- }
- } else if (code->is_type_recording_binary_op_stub()) {
- TRBinaryOpIC::TypeInfo type = static_cast<TRBinaryOpIC::TypeInfo>(
- code->type_recording_binary_op_type());
- TRBinaryOpIC::TypeInfo result_type = static_cast<TRBinaryOpIC::TypeInfo>(
- code->type_recording_binary_op_result_type());
-
- switch (type) {
- case TRBinaryOpIC::UNINITIALIZED:
- // Uninitialized means never executed.
- // TODO(fschneider): Introduce a separate value for never-executed ICs
- return unknown;
- case TRBinaryOpIC::SMI:
- switch (result_type) {
- case TRBinaryOpIC::UNINITIALIZED:
- case TRBinaryOpIC::SMI:
- return TypeInfo::Smi();
- case TRBinaryOpIC::INT32:
- return TypeInfo::Integer32();
- case TRBinaryOpIC::HEAP_NUMBER:
- return TypeInfo::Double();
- default:
- return unknown;
- }
- case TRBinaryOpIC::INT32:
- if (expr->op() == Token::DIV ||
- result_type == TRBinaryOpIC::HEAP_NUMBER) {
- return TypeInfo::Double();
- }
- return TypeInfo::Integer32();
- case TRBinaryOpIC::HEAP_NUMBER:
- return TypeInfo::Double();
- case TRBinaryOpIC::STRING:
- case TRBinaryOpIC::GENERIC:
- return unknown;
- default:
- return unknown;
- }
- }
- return unknown;
-}
-
-
-TypeInfo TypeFeedbackOracle::SwitchType(CaseClause* clause) {
- Handle<Object> object = GetInfo(clause->position());
- TypeInfo unknown = TypeInfo::Unknown();
- if (!object->IsCode()) return unknown;
- Handle<Code> code = Handle<Code>::cast(object);
- if (!code->is_compare_ic_stub()) return unknown;
-
- CompareIC::State state = static_cast<CompareIC::State>(code->compare_state());
- switch (state) {
- case CompareIC::UNINITIALIZED:
- // Uninitialized means never executed.
- // TODO(fschneider): Introduce a separate value for never-executed ICs.
- return unknown;
- case CompareIC::SMIS:
- return TypeInfo::Smi();
- case CompareIC::HEAP_NUMBERS:
- return TypeInfo::Number();
- case CompareIC::OBJECTS:
- // TODO(kasperl): We really need a type for JS objects here.
- return TypeInfo::NonPrimitive();
- case CompareIC::GENERIC:
- default:
- return unknown;
- }
-}
-
-
-ZoneMapList* TypeFeedbackOracle::CollectReceiverTypes(int position,
- Handle<String> name,
- Code::Flags flags) {
- Isolate* isolate = Isolate::Current();
- Handle<Object> object = GetInfo(position);
- if (object->IsUndefined() || object->IsSmi()) return NULL;
-
- if (*object == isolate->builtins()->builtin(Builtins::kStoreIC_GlobalProxy)) {
- // TODO(fschneider): We could collect the maps and signal that
- // we need a generic store (or load) here.
- ASSERT(Handle<Code>::cast(object)->ic_state() == MEGAMORPHIC);
- return NULL;
- } else if (object->IsMap()) {
- ZoneMapList* types = new ZoneMapList(1);
- types->Add(Handle<Map>::cast(object));
- return types;
- } else if (Handle<Code>::cast(object)->ic_state() == MEGAMORPHIC) {
- ZoneMapList* types = new ZoneMapList(4);
- ASSERT(object->IsCode());
- isolate->stub_cache()->CollectMatchingMaps(types, *name, flags);
- return types->length() > 0 ? types : NULL;
- } else {
- return NULL;
- }
-}
-
-
-void TypeFeedbackOracle::SetInfo(int position, Object* target) {
- MaybeObject* maybe_result = dictionary_->AtNumberPut(position, target);
- USE(maybe_result);
-#ifdef DEBUG
- Object* result;
- // Dictionary has been allocated with sufficient size for all elements.
- ASSERT(maybe_result->ToObject(&result));
- ASSERT(*dictionary_ == result);
-#endif
-}
-
-
-void TypeFeedbackOracle::PopulateMap(Handle<Code> code) {
- Isolate* isolate = Isolate::Current();
- HandleScope scope(isolate);
-
- const int kInitialCapacity = 16;
- List<int> code_positions(kInitialCapacity);
- List<int> source_positions(kInitialCapacity);
- CollectPositions(*code, &code_positions, &source_positions);
-
- ASSERT(dictionary_.is_null()); // Only initialize once.
- dictionary_ = isolate->factory()->NewNumberDictionary(
- code_positions.length());
-
- int length = code_positions.length();
- ASSERT(source_positions.length() == length);
- for (int i = 0; i < length; i++) {
- AssertNoAllocation no_allocation;
- RelocInfo info(code->instruction_start() + code_positions[i],
- RelocInfo::CODE_TARGET, 0);
- Code* target = Code::GetCodeFromTargetAddress(info.target_address());
- int position = source_positions[i];
- InlineCacheState state = target->ic_state();
- Code::Kind kind = target->kind();
-
- if (kind == Code::BINARY_OP_IC ||
- kind == Code::TYPE_RECORDING_BINARY_OP_IC ||
- kind == Code::COMPARE_IC) {
- // TODO(kasperl): Avoid having multiple ICs with the same
- // position by making sure that we have position information
- // recorded for all binary ICs.
- int entry = dictionary_->FindEntry(position);
- if (entry == NumberDictionary::kNotFound) {
- SetInfo(position, target);
- }
- } else if (state == MONOMORPHIC) {
- if (kind == Code::KEYED_EXTERNAL_ARRAY_LOAD_IC ||
- kind == Code::KEYED_EXTERNAL_ARRAY_STORE_IC) {
- SetInfo(position, target);
- } else if (target->kind() != Code::CALL_IC ||
- target->check_type() == RECEIVER_MAP_CHECK) {
- Map* map = target->FindFirstMap();
- if (map == NULL) {
- SetInfo(position, target);
- } else {
- SetInfo(position, map);
- }
- } else {
- ASSERT(target->kind() == Code::CALL_IC);
- CheckType check = target->check_type();
- ASSERT(check != RECEIVER_MAP_CHECK);
- SetInfo(position, Smi::FromInt(check));
- }
- } else if (state == MEGAMORPHIC) {
- SetInfo(position, target);
- }
- }
- // Allocate handle in the parent scope.
- dictionary_ = scope.CloseAndEscape(dictionary_);
-}
-
-
-void TypeFeedbackOracle::CollectPositions(Code* code,
- List<int>* code_positions,
- List<int>* source_positions) {
- AssertNoAllocation no_allocation;
- int position = 0;
- // Because the ICs we use for global variables access in the full
- // code generator do not have any meaningful positions, we avoid
- // collecting those by filtering out contextual code targets.
- int mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
- RelocInfo::kPositionMask;
- for (RelocIterator it(code, mask); !it.done(); it.next()) {
- RelocInfo* info = it.rinfo();
- RelocInfo::Mode mode = info->rmode();
- if (RelocInfo::IsCodeTarget(mode)) {
- Code* target = Code::GetCodeFromTargetAddress(info->target_address());
- if (target->is_inline_cache_stub()) {
- InlineCacheState state = target->ic_state();
- Code::Kind kind = target->kind();
- if (kind == Code::BINARY_OP_IC) {
- if (target->binary_op_type() == BinaryOpIC::GENERIC) continue;
- } else if (kind == Code::TYPE_RECORDING_BINARY_OP_IC) {
- if (target->type_recording_binary_op_type() ==
- TRBinaryOpIC::GENERIC) {
- continue;
- }
- } else if (kind == Code::COMPARE_IC) {
- if (target->compare_state() == CompareIC::GENERIC) continue;
- } else {
- if (state != MONOMORPHIC && state != MEGAMORPHIC) continue;
- }
- code_positions->Add(
- static_cast<int>(info->pc() - code->instruction_start()));
- source_positions->Add(position);
- }
- } else {
- ASSERT(RelocInfo::IsPosition(mode));
- position = static_cast<int>(info->data());
- }
- }
-}
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/type-info.h b/src/3rdparty/v8/src/type-info.h
deleted file mode 100644
index 9b69526..0000000
--- a/src/3rdparty/v8/src/type-info.h
+++ /dev/null
@@ -1,290 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_TYPE_INFO_H_
-#define V8_TYPE_INFO_H_
-
-#include "globals.h"
-#include "zone.h"
-#include "zone-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// Unknown
-// | |
-// | \--------------|
-// Primitive Non-primitive
-// | \--------| |
-// Number String |
-// / | | |
-// Double Integer32 | /
-// | | / /
-// | Smi / /
-// | | / /
-// | | / /
-// Uninitialized.--/
-
-class TypeInfo {
- public:
- TypeInfo() : type_(kUninitialized) { }
-
- static TypeInfo Unknown() { return TypeInfo(kUnknown); }
- // We know it's a primitive type.
- static TypeInfo Primitive() { return TypeInfo(kPrimitive); }
- // We know it's a number of some sort.
- static TypeInfo Number() { return TypeInfo(kNumber); }
- // We know it's a signed 32 bit integer.
- static TypeInfo Integer32() { return TypeInfo(kInteger32); }
- // We know it's a Smi.
- static TypeInfo Smi() { return TypeInfo(kSmi); }
- // We know it's a heap number.
- static TypeInfo Double() { return TypeInfo(kDouble); }
- // We know it's a string.
- static TypeInfo String() { return TypeInfo(kString); }
- // We know it's a non-primitive (object) type.
- static TypeInfo NonPrimitive() { return TypeInfo(kNonPrimitive); }
- // We haven't started collecting info yet.
- static TypeInfo Uninitialized() { return TypeInfo(kUninitialized); }
-
- // Return compact representation. Very sensitive to enum values below!
- // Compacting drops information about primitive types and strings types.
- // We use the compact representation when we only care about number types.
- int ThreeBitRepresentation() {
- ASSERT(type_ != kUninitialized);
- int answer = type_ & 0xf;
- answer = answer > 6 ? answer - 2 : answer;
- ASSERT(answer >= 0);
- ASSERT(answer <= 7);
- return answer;
- }
-
- // Decode compact representation. Very sensitive to enum values below!
- static TypeInfo ExpandedRepresentation(int three_bit_representation) {
- Type t = static_cast<Type>(three_bit_representation > 4 ?
- three_bit_representation + 2 :
- three_bit_representation);
- t = (t == kUnknown) ? t : static_cast<Type>(t | kPrimitive);
- ASSERT(t == kUnknown ||
- t == kNumber ||
- t == kInteger32 ||
- t == kSmi ||
- t == kDouble);
- return TypeInfo(t);
- }
-
- int ToInt() {
- return type_;
- }
-
- static TypeInfo FromInt(int bit_representation) {
- Type t = static_cast<Type>(bit_representation);
- ASSERT(t == kUnknown ||
- t == kPrimitive ||
- t == kNumber ||
- t == kInteger32 ||
- t == kSmi ||
- t == kDouble ||
- t == kString ||
- t == kNonPrimitive);
- return TypeInfo(t);
- }
-
- // Return the weakest (least precise) common type.
- static TypeInfo Combine(TypeInfo a, TypeInfo b) {
- return TypeInfo(static_cast<Type>(a.type_ & b.type_));
- }
-
-
- // Integer32 is an integer that can be represented as a signed
- // 32-bit integer. It has to be
- // in the range [-2^31, 2^31 - 1]. We also have to check for negative 0
- // as it is not an Integer32.
- static inline bool IsInt32Double(double value) {
- const DoubleRepresentation minus_zero(-0.0);
- DoubleRepresentation rep(value);
- if (rep.bits == minus_zero.bits) return false;
- if (value >= kMinInt && value <= kMaxInt &&
- value == static_cast<int32_t>(value)) {
- return true;
- }
- return false;
- }
-
- static TypeInfo TypeFromValue(Handle<Object> value);
-
- bool Equals(const TypeInfo& other) {
- return type_ == other.type_;
- }
-
- inline bool IsUnknown() {
- ASSERT(type_ != kUninitialized);
- return type_ == kUnknown;
- }
-
- inline bool IsPrimitive() {
- ASSERT(type_ != kUninitialized);
- return ((type_ & kPrimitive) == kPrimitive);
- }
-
- inline bool IsNumber() {
- ASSERT(type_ != kUninitialized);
- return ((type_ & kNumber) == kNumber);
- }
-
- inline bool IsSmi() {
- ASSERT(type_ != kUninitialized);
- return ((type_ & kSmi) == kSmi);
- }
-
- inline bool IsInteger32() {
- ASSERT(type_ != kUninitialized);
- return ((type_ & kInteger32) == kInteger32);
- }
-
- inline bool IsDouble() {
- ASSERT(type_ != kUninitialized);
- return ((type_ & kDouble) == kDouble);
- }
-
- inline bool IsString() {
- ASSERT(type_ != kUninitialized);
- return ((type_ & kString) == kString);
- }
-
- inline bool IsNonPrimitive() {
- ASSERT(type_ != kUninitialized);
- return ((type_ & kNonPrimitive) == kNonPrimitive);
- }
-
- inline bool IsUninitialized() {
- return type_ == kUninitialized;
- }
-
- const char* ToString() {
- switch (type_) {
- case kUnknown: return "Unknown";
- case kPrimitive: return "Primitive";
- case kNumber: return "Number";
- case kInteger32: return "Integer32";
- case kSmi: return "Smi";
- case kDouble: return "Double";
- case kString: return "String";
- case kNonPrimitive: return "Object";
- case kUninitialized: return "Uninitialized";
- }
- UNREACHABLE();
- return "Unreachable code";
- }
-
- private:
- enum Type {
- kUnknown = 0, // 0000000
- kPrimitive = 0x10, // 0010000
- kNumber = 0x11, // 0010001
- kInteger32 = 0x13, // 0010011
- kSmi = 0x17, // 0010111
- kDouble = 0x19, // 0011001
- kString = 0x30, // 0110000
- kNonPrimitive = 0x40, // 1000000
- kUninitialized = 0x7f // 1111111
- };
- explicit inline TypeInfo(Type t) : type_(t) { }
-
- Type type_;
-};
-
-
-enum StringStubFeedback {
- DEFAULT_STRING_STUB = 0,
- STRING_INDEX_OUT_OF_BOUNDS = 1
-};
-
-
-// Forward declarations.
-class Assignment;
-class BinaryOperation;
-class Call;
-class CompareOperation;
-class CompilationInfo;
-class Property;
-class CaseClause;
-
-class TypeFeedbackOracle BASE_EMBEDDED {
- public:
- TypeFeedbackOracle(Handle<Code> code, Handle<Context> global_context);
-
- bool LoadIsMonomorphic(Property* expr);
- bool StoreIsMonomorphic(Assignment* expr);
- bool CallIsMonomorphic(Call* expr);
-
- Handle<Map> LoadMonomorphicReceiverType(Property* expr);
- Handle<Map> StoreMonomorphicReceiverType(Assignment* expr);
-
- ZoneMapList* LoadReceiverTypes(Property* expr, Handle<String> name);
- ZoneMapList* StoreReceiverTypes(Assignment* expr, Handle<String> name);
- ZoneMapList* CallReceiverTypes(Call* expr, Handle<String> name);
-
- ExternalArrayType GetKeyedLoadExternalArrayType(Property* expr);
- ExternalArrayType GetKeyedStoreExternalArrayType(Assignment* expr);
-
- CheckType GetCallCheckType(Call* expr);
- Handle<JSObject> GetPrototypeForPrimitiveCheck(CheckType check);
-
- bool LoadIsBuiltin(Property* expr, Builtins::Name id);
-
- // Get type information for arithmetic operations and compares.
- TypeInfo BinaryType(BinaryOperation* expr);
- TypeInfo CompareType(CompareOperation* expr);
- TypeInfo SwitchType(CaseClause* clause);
-
- private:
- ZoneMapList* CollectReceiverTypes(int position,
- Handle<String> name,
- Code::Flags flags);
-
- void SetInfo(int position, Object* target);
-
- void PopulateMap(Handle<Code> code);
-
- void CollectPositions(Code* code,
- List<int>* code_positions,
- List<int>* source_positions);
-
- // Returns an element from the backing store. Returns undefined if
- // there is no information.
- Handle<Object> GetInfo(int pos);
-
- Handle<Context> global_context_;
- Handle<NumberDictionary> dictionary_;
-
- DISALLOW_COPY_AND_ASSIGN(TypeFeedbackOracle);
-};
-
-} } // namespace v8::internal
-
-#endif // V8_TYPE_INFO_H_
diff --git a/src/3rdparty/v8/src/unbound-queue-inl.h b/src/3rdparty/v8/src/unbound-queue-inl.h
deleted file mode 100644
index fffb1db..0000000
--- a/src/3rdparty/v8/src/unbound-queue-inl.h
+++ /dev/null
@@ -1,95 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_UNBOUND_QUEUE_INL_H_
-#define V8_UNBOUND_QUEUE_INL_H_
-
-#include "unbound-queue.h"
-
-namespace v8 {
-namespace internal {
-
-template<typename Record>
-struct UnboundQueue<Record>::Node: public Malloced {
- explicit Node(const Record& value)
- : value(value), next(NULL) {
- }
-
- Record value;
- Node* next;
-};
-
-
-template<typename Record>
-UnboundQueue<Record>::UnboundQueue() {
- first_ = new Node(Record());
- divider_ = last_ = reinterpret_cast<AtomicWord>(first_);
-}
-
-
-template<typename Record>
-UnboundQueue<Record>::~UnboundQueue() {
- while (first_ != NULL) DeleteFirst();
-}
-
-
-template<typename Record>
-void UnboundQueue<Record>::DeleteFirst() {
- Node* tmp = first_;
- first_ = tmp->next;
- delete tmp;
-}
-
-
-template<typename Record>
-void UnboundQueue<Record>::Dequeue(Record* rec) {
- ASSERT(divider_ != last_);
- Node* next = reinterpret_cast<Node*>(divider_)->next;
- *rec = next->value;
- OS::ReleaseStore(&divider_, reinterpret_cast<AtomicWord>(next));
-}
-
-
-template<typename Record>
-void UnboundQueue<Record>::Enqueue(const Record& rec) {
- Node*& next = reinterpret_cast<Node*>(last_)->next;
- next = new Node(rec);
- OS::ReleaseStore(&last_, reinterpret_cast<AtomicWord>(next));
- while (first_ != reinterpret_cast<Node*>(divider_)) DeleteFirst();
-}
-
-
-template<typename Record>
-Record* UnboundQueue<Record>::Peek() {
- ASSERT(divider_ != last_);
- Node* next = reinterpret_cast<Node*>(divider_)->next;
- return &next->value;
-}
-
-} } // namespace v8::internal
-
-#endif // V8_UNBOUND_QUEUE_INL_H_
diff --git a/src/3rdparty/v8/src/unbound-queue.h b/src/3rdparty/v8/src/unbound-queue.h
deleted file mode 100644
index 443d5ce..0000000
--- a/src/3rdparty/v8/src/unbound-queue.h
+++ /dev/null
@@ -1,67 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_UNBOUND_QUEUE_
-#define V8_UNBOUND_QUEUE_
-
-namespace v8 {
-namespace internal {
-
-
-// Lock-free unbound queue for small records. Intended for
-// transferring small records between a Single producer and a Single
-// consumer. Doesn't have restrictions on the number of queued
-// elements, so producer never blocks. Implemented after Herb
-// Sutter's article:
-// http://www.ddj.com/high-performance-computing/210604448
-template<typename Record>
-class UnboundQueue BASE_EMBEDDED {
- public:
- inline UnboundQueue();
- inline ~UnboundQueue();
-
- INLINE(void Dequeue(Record* rec));
- INLINE(void Enqueue(const Record& rec));
- INLINE(bool IsEmpty()) { return divider_ == last_; }
- INLINE(Record* Peek());
-
- private:
- INLINE(void DeleteFirst());
-
- struct Node;
-
- Node* first_;
- AtomicWord divider_; // Node*
- AtomicWord last_; // Node*
-
- DISALLOW_COPY_AND_ASSIGN(UnboundQueue);
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_UNBOUND_QUEUE_
diff --git a/src/3rdparty/v8/src/unicode-inl.h b/src/3rdparty/v8/src/unicode-inl.h
deleted file mode 100644
index c0649d7..0000000
--- a/src/3rdparty/v8/src/unicode-inl.h
+++ /dev/null
@@ -1,238 +0,0 @@
-// Copyright 2007-2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_UNICODE_INL_H_
-#define V8_UNICODE_INL_H_
-
-#include "unicode.h"
-
-namespace unibrow {
-
-template <class T, int s> bool Predicate<T, s>::get(uchar code_point) {
- CacheEntry entry = entries_[code_point & kMask];
- if (entry.code_point_ == code_point) return entry.value_;
- return CalculateValue(code_point);
-}
-
-template <class T, int s> bool Predicate<T, s>::CalculateValue(
- uchar code_point) {
- bool result = T::Is(code_point);
- entries_[code_point & kMask] = CacheEntry(code_point, result);
- return result;
-}
-
-template <class T, int s> int Mapping<T, s>::get(uchar c, uchar n,
- uchar* result) {
- CacheEntry entry = entries_[c & kMask];
- if (entry.code_point_ == c) {
- if (entry.offset_ == 0) {
- return 0;
- } else {
- result[0] = c + entry.offset_;
- return 1;
- }
- } else {
- return CalculateValue(c, n, result);
- }
-}
-
-template <class T, int s> int Mapping<T, s>::CalculateValue(uchar c, uchar n,
- uchar* result) {
- bool allow_caching = true;
- int length = T::Convert(c, n, result, &allow_caching);
- if (allow_caching) {
- if (length == 1) {
- entries_[c & kMask] = CacheEntry(c, result[0] - c);
- return 1;
- } else {
- entries_[c & kMask] = CacheEntry(c, 0);
- return 0;
- }
- } else {
- return length;
- }
-}
-
-
-unsigned Utf8::Encode(char* str, uchar c) {
- static const int kMask = ~(1 << 6);
- if (c <= kMaxOneByteChar) {
- str[0] = c;
- return 1;
- } else if (c <= kMaxTwoByteChar) {
- str[0] = 0xC0 | (c >> 6);
- str[1] = 0x80 | (c & kMask);
- return 2;
- } else if (c <= kMaxThreeByteChar) {
- str[0] = 0xE0 | (c >> 12);
- str[1] = 0x80 | ((c >> 6) & kMask);
- str[2] = 0x80 | (c & kMask);
- return 3;
- } else {
- str[0] = 0xF0 | (c >> 18);
- str[1] = 0x80 | ((c >> 12) & kMask);
- str[2] = 0x80 | ((c >> 6) & kMask);
- str[3] = 0x80 | (c & kMask);
- return 4;
- }
-}
-
-
-uchar Utf8::ValueOf(const byte* bytes, unsigned length, unsigned* cursor) {
- if (length <= 0) return kBadChar;
- byte first = bytes[0];
- // Characters between 0000 and 0007F are encoded as a single character
- if (first <= kMaxOneByteChar) {
- *cursor += 1;
- return first;
- }
- return CalculateValue(bytes, length, cursor);
-}
-
-unsigned Utf8::Length(uchar c) {
- if (c <= kMaxOneByteChar) {
- return 1;
- } else if (c <= kMaxTwoByteChar) {
- return 2;
- } else if (c <= kMaxThreeByteChar) {
- return 3;
- } else {
- return 4;
- }
-}
-
-uchar CharacterStream::GetNext() {
- uchar result = DecodeCharacter(buffer_, &cursor_);
- if (remaining_ == 1) {
- cursor_ = 0;
- FillBuffer();
- } else {
- remaining_--;
- }
- return result;
-}
-
-#if __BYTE_ORDER == __LITTLE_ENDIAN
-#define IF_LITTLE(expr) expr
-#define IF_BIG(expr) ((void) 0)
-#elif __BYTE_ORDER == __BIG_ENDIAN
-#define IF_LITTLE(expr) ((void) 0)
-#define IF_BIG(expr) expr
-#else
-#warning Unknown byte ordering
-#endif
-
-bool CharacterStream::EncodeAsciiCharacter(uchar c, byte* buffer,
- unsigned capacity, unsigned& offset) {
- if (offset >= capacity) return false;
- buffer[offset] = c;
- offset += 1;
- return true;
-}
-
-bool CharacterStream::EncodeNonAsciiCharacter(uchar c, byte* buffer,
- unsigned capacity, unsigned& offset) {
- unsigned aligned = (offset + 0x3) & ~0x3;
- if ((aligned + sizeof(uchar)) > capacity)
- return false;
- if (offset == aligned) {
- IF_LITTLE(*reinterpret_cast<uchar*>(buffer + aligned) = (c << 8) | 0x80);
- IF_BIG(*reinterpret_cast<uchar*>(buffer + aligned) = c | (1 << 31));
- } else {
- buffer[offset] = 0x80;
- IF_LITTLE(*reinterpret_cast<uchar*>(buffer + aligned) = c << 8);
- IF_BIG(*reinterpret_cast<uchar*>(buffer + aligned) = c);
- }
- offset = aligned + sizeof(uchar);
- return true;
-}
-
-bool CharacterStream::EncodeCharacter(uchar c, byte* buffer, unsigned capacity,
- unsigned& offset) {
- if (c <= Utf8::kMaxOneByteChar) {
- return EncodeAsciiCharacter(c, buffer, capacity, offset);
- } else {
- return EncodeNonAsciiCharacter(c, buffer, capacity, offset);
- }
-}
-
-uchar CharacterStream::DecodeCharacter(const byte* buffer, unsigned* offset) {
- byte b = buffer[*offset];
- if (b <= Utf8::kMaxOneByteChar) {
- (*offset)++;
- return b;
- } else {
- unsigned aligned = (*offset + 0x3) & ~0x3;
- *offset = aligned + sizeof(uchar);
- IF_LITTLE(return *reinterpret_cast<const uchar*>(buffer + aligned) >> 8);
- IF_BIG(return *reinterpret_cast<const uchar*>(buffer + aligned) &
- ~(1 << 31));
- }
-}
-
-#undef IF_LITTLE
-#undef IF_BIG
-
-template <class R, class I, unsigned s>
-void InputBuffer<R, I, s>::FillBuffer() {
- buffer_ = R::ReadBlock(input_, util_buffer_, s, &remaining_, &offset_);
-}
-
-template <class R, class I, unsigned s>
-void InputBuffer<R, I, s>::Rewind() {
- Reset(input_);
-}
-
-template <class R, class I, unsigned s>
-void InputBuffer<R, I, s>::Reset(unsigned position, I input) {
- input_ = input;
- remaining_ = 0;
- cursor_ = 0;
- offset_ = position;
- buffer_ = R::ReadBlock(input_, util_buffer_, s, &remaining_, &offset_);
-}
-
-template <class R, class I, unsigned s>
-void InputBuffer<R, I, s>::Reset(I input) {
- Reset(0, input);
-}
-
-template <class R, class I, unsigned s>
-void InputBuffer<R, I, s>::Seek(unsigned position) {
- offset_ = position;
- buffer_ = R::ReadBlock(input_, util_buffer_, s, &remaining_, &offset_);
-}
-
-template <unsigned s>
-Utf8InputBuffer<s>::Utf8InputBuffer(const char* data, unsigned length)
- : InputBuffer<Utf8, Buffer<const char*>, s>(Buffer<const char*>(data,
- length)) {
-}
-
-} // namespace unibrow
-
-#endif // V8_UNICODE_INL_H_
diff --git a/src/3rdparty/v8/src/unicode.cc b/src/3rdparty/v8/src/unicode.cc
deleted file mode 100644
index 6e0ac1a..0000000
--- a/src/3rdparty/v8/src/unicode.cc
+++ /dev/null
@@ -1,1624 +0,0 @@
-// Copyright 2007-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// This file was generated at 2011-01-03 10:57:02.088925
-
-#include "unicode-inl.h"
-#include <stdlib.h>
-#include <stdio.h>
-
-namespace unibrow {
-
-static const int kStartBit = (1 << 30);
-static const int kChunkBits = (1 << 13);
-static const uchar kSentinel = static_cast<uchar>(-1);
-
-/**
- * \file
- * Implementations of functions for working with unicode.
- */
-
-typedef signed short int16_t; // NOLINT
-typedef unsigned short uint16_t; // NOLINT
-typedef int int32_t; // NOLINT
-
-// All access to the character table should go through this function.
-template <int D>
-static inline uchar TableGet(const int32_t* table, int index) {
- return table[D * index];
-}
-
-static inline uchar GetEntry(int32_t entry) {
- return entry & (kStartBit - 1);
-}
-
-static inline bool IsStart(int32_t entry) {
- return (entry & kStartBit) != 0;
-}
-
-/**
- * Look up a character in the unicode table using a mix of binary and
- * interpolation search. For a uniformly distributed array
- * interpolation search beats binary search by a wide margin. However,
- * in this case interpolation search degenerates because of some very
- * high values in the lower end of the table so this function uses a
- * combination. The average number of steps to look up the information
- * about a character is around 10, slightly higher if there is no
- * information available about the character.
- */
-static bool LookupPredicate(const int32_t* table, uint16_t size, uchar chr) {
- static const int kEntryDist = 1;
- uint16_t value = chr & (kChunkBits - 1);
- unsigned int low = 0;
- unsigned int high = size - 1;
- while (high != low) {
- unsigned int mid = low + ((high - low) >> 1);
- uchar current_value = GetEntry(TableGet<kEntryDist>(table, mid));
- // If we've found an entry less than or equal to this one, and the
- // next one is not also less than this one, we've arrived.
- if ((current_value <= value) &&
- (mid + 1 == size ||
- GetEntry(TableGet<kEntryDist>(table, mid + 1)) > value)) {
- low = mid;
- break;
- } else if (current_value < value) {
- low = mid + 1;
- } else if (current_value > value) {
- // If we've just checked the bottom-most value and it's not
- // the one we're looking for, we're done.
- if (mid == 0) break;
- high = mid - 1;
- }
- }
- int32_t field = TableGet<kEntryDist>(table, low);
- uchar entry = GetEntry(field);
- bool is_start = IsStart(field);
- return (entry == value) || (entry < value && is_start);
-}
-
-template <int kW>
-struct MultiCharacterSpecialCase {
- static const uchar kEndOfEncoding = kSentinel;
- uchar chars[kW];
-};
-
-// Look up the mapping for the given character in the specified table,
-// which is of the specified length and uses the specified special case
-// mapping for multi-char mappings. The next parameter is the character
-// following the one to map. The result will be written in to the result
-// buffer and the number of characters written will be returned. Finally,
-// if the allow_caching_ptr is non-null then false will be stored in
-// it if the result contains multiple characters or depends on the
-// context.
-// If ranges are linear, a match between a start and end point is
-// offset by the distance between the match and the start. Otherwise
-// the result is the same as for the start point on the entire range.
-template <bool ranges_are_linear, int kW>
-static int LookupMapping(const int32_t* table,
- uint16_t size,
- const MultiCharacterSpecialCase<kW>* multi_chars,
- uchar chr,
- uchar next,
- uchar* result,
- bool* allow_caching_ptr) {
- static const int kEntryDist = 2;
- uint16_t key = chr & (kChunkBits - 1);
- uint16_t chunk_start = chr - key;
- unsigned int low = 0;
- unsigned int high = size - 1;
- while (high != low) {
- unsigned int mid = low + ((high - low) >> 1);
- uchar current_value = GetEntry(TableGet<kEntryDist>(table, mid));
- // If we've found an entry less than or equal to this one, and the next one
- // is not also less than this one, we've arrived.
- if ((current_value <= key) &&
- (mid + 1 == size ||
- GetEntry(TableGet<kEntryDist>(table, mid + 1)) > key)) {
- low = mid;
- break;
- } else if (current_value < key) {
- low = mid + 1;
- } else if (current_value > key) {
- // If we've just checked the bottom-most value and it's not
- // the one we're looking for, we're done.
- if (mid == 0) break;
- high = mid - 1;
- }
- }
- int32_t field = TableGet<kEntryDist>(table, low);
- uchar entry = GetEntry(field);
- bool is_start = IsStart(field);
- bool found = (entry == key) || (entry < key && is_start);
- if (found) {
- int32_t value = table[2 * low + 1];
- if (value == 0) {
- // 0 means not present
- return 0;
- } else if ((value & 3) == 0) {
- // Low bits 0 means a constant offset from the given character.
- if (ranges_are_linear) {
- result[0] = chr + (value >> 2);
- } else {
- result[0] = entry + chunk_start + (value >> 2);
- }
- return 1;
- } else if ((value & 3) == 1) {
- // Low bits 1 means a special case mapping
- if (allow_caching_ptr) *allow_caching_ptr = false;
- const MultiCharacterSpecialCase<kW>& mapping = multi_chars[value >> 2];
- int length = 0;
- for (length = 0; length < kW; length++) {
- uchar mapped = mapping.chars[length];
- if (mapped == MultiCharacterSpecialCase<kW>::kEndOfEncoding) break;
- if (ranges_are_linear) {
- result[length] = mapped + (key - entry);
- } else {
- result[length] = mapped;
- }
- }
- return length;
- } else {
- // Low bits 2 means a really really special case
- if (allow_caching_ptr) *allow_caching_ptr = false;
- // The cases of this switch are defined in unicode.py in the
- // really_special_cases mapping.
- switch (value >> 2) {
- case 1:
- // Really special case 1: upper case sigma. This letter
- // converts to two different lower case sigmas depending on
- // whether or not it occurs at the end of a word.
- if (next != 0 && Letter::Is(next)) {
- result[0] = 0x03C3;
- } else {
- result[0] = 0x03C2;
- }
- return 1;
- default:
- return 0;
- }
- return -1;
- }
- } else {
- return 0;
- }
-}
-
-uchar Utf8::CalculateValue(const byte* str,
- unsigned length,
- unsigned* cursor) {
- // We only get called for non-ascii characters.
- if (length == 1) {
- *cursor += 1;
- return kBadChar;
- }
- byte first = str[0];
- byte second = str[1] ^ 0x80;
- if (second & 0xC0) {
- *cursor += 1;
- return kBadChar;
- }
- if (first < 0xE0) {
- if (first < 0xC0) {
- *cursor += 1;
- return kBadChar;
- }
- uchar code_point = ((first << 6) | second) & kMaxTwoByteChar;
- if (code_point <= kMaxOneByteChar) {
- *cursor += 1;
- return kBadChar;
- }
- *cursor += 2;
- return code_point;
- }
- if (length == 2) {
- *cursor += 1;
- return kBadChar;
- }
- byte third = str[2] ^ 0x80;
- if (third & 0xC0) {
- *cursor += 1;
- return kBadChar;
- }
- if (first < 0xF0) {
- uchar code_point = ((((first << 6) | second) << 6) | third)
- & kMaxThreeByteChar;
- if (code_point <= kMaxTwoByteChar) {
- *cursor += 1;
- return kBadChar;
- }
- *cursor += 3;
- return code_point;
- }
- if (length == 3) {
- *cursor += 1;
- return kBadChar;
- }
- byte fourth = str[3] ^ 0x80;
- if (fourth & 0xC0) {
- *cursor += 1;
- return kBadChar;
- }
- if (first < 0xF8) {
- uchar code_point = (((((first << 6 | second) << 6) | third) << 6) | fourth)
- & kMaxFourByteChar;
- if (code_point <= kMaxThreeByteChar) {
- *cursor += 1;
- return kBadChar;
- }
- *cursor += 4;
- return code_point;
- }
- *cursor += 1;
- return kBadChar;
-}
-
-const byte* Utf8::ReadBlock(Buffer<const char*> str, byte* buffer,
- unsigned capacity, unsigned* chars_read_ptr, unsigned* offset_ptr) {
- unsigned offset = *offset_ptr;
- // Bail out early if we've reached the end of the string.
- if (offset == str.length()) {
- *chars_read_ptr = 0;
- return NULL;
- }
- const byte* data = reinterpret_cast<const byte*>(str.data());
- if (data[offset] <= kMaxOneByteChar) {
- // The next character is an ascii char so we scan forward over
- // the following ascii characters and return the next pure ascii
- // substring
- const byte* result = data + offset;
- offset++;
- while ((offset < str.length()) && (data[offset] <= kMaxOneByteChar))
- offset++;
- *chars_read_ptr = offset - *offset_ptr;
- *offset_ptr = offset;
- return result;
- } else {
- // The next character is non-ascii so we just fill the buffer
- unsigned cursor = 0;
- unsigned chars_read = 0;
- while (offset < str.length()) {
- uchar c = data[offset];
- if (c <= kMaxOneByteChar) {
- // Fast case for ascii characters
- if (!CharacterStream::EncodeAsciiCharacter(c,
- buffer,
- capacity,
- cursor))
- break;
- offset += 1;
- } else {
- unsigned chars = 0;
- c = Utf8::ValueOf(data + offset, str.length() - offset, &chars);
- if (!CharacterStream::EncodeNonAsciiCharacter(c,
- buffer,
- capacity,
- cursor))
- break;
- offset += chars;
- }
- chars_read++;
- }
- *offset_ptr = offset;
- *chars_read_ptr = chars_read;
- return buffer;
- }
-}
-
-unsigned CharacterStream::Length() {
- unsigned result = 0;
- while (has_more()) {
- result++;
- GetNext();
- }
- Rewind();
- return result;
-}
-
-void CharacterStream::Seek(unsigned position) {
- Rewind();
- for (unsigned i = 0; i < position; i++) {
- GetNext();
- }
-}
-
-// Uppercase: point.category == 'Lu'
-
-static const uint16_t kUppercaseTable0Size = 430;
-static const int32_t kUppercaseTable0[430] = {
- 1073741889, 90, 1073742016, 214, 1073742040, 222, 256, 258, // NOLINT
- 260, 262, 264, 266, 268, 270, 272, 274, // NOLINT
- 276, 278, 280, 282, 284, 286, 288, 290, // NOLINT
- 292, 294, 296, 298, 300, 302, 304, 306, // NOLINT
- 308, 310, 313, 315, 317, 319, 321, 323, // NOLINT
- 325, 327, 330, 332, 334, 336, 338, 340, // NOLINT
- 342, 344, 346, 348, 350, 352, 354, 356, // NOLINT
- 358, 360, 362, 364, 366, 368, 370, 372, // NOLINT
- 374, 1073742200, 377, 379, 381, 1073742209, 386, 388, // NOLINT
- 1073742214, 391, 1073742217, 395, 1073742222, 401, 1073742227, 404, // NOLINT
- 1073742230, 408, 1073742236, 413, 1073742239, 416, 418, 420, // NOLINT
- 1073742246, 423, 425, 428, 1073742254, 431, 1073742257, 435, // NOLINT
- 437, 1073742263, 440, 444, 452, 455, 458, 461, // NOLINT
- 463, 465, 467, 469, 471, 473, 475, 478, // NOLINT
- 480, 482, 484, 486, 488, 490, 492, 494, // NOLINT
- 497, 500, 1073742326, 504, 506, 508, 510, 512, // NOLINT
- 514, 516, 518, 520, 522, 524, 526, 528, // NOLINT
- 530, 532, 534, 536, 538, 540, 542, 544, // NOLINT
- 546, 548, 550, 552, 554, 556, 558, 560, // NOLINT
- 562, 1073742394, 571, 1073742397, 574, 577, 1073742403, 582, // NOLINT
- 584, 586, 588, 590, 902, 1073742728, 906, 908, // NOLINT
- 1073742734, 911, 1073742737, 929, 1073742755, 939, 1073742802, 980, // NOLINT
- 984, 986, 988, 990, 992, 994, 996, 998, // NOLINT
- 1000, 1002, 1004, 1006, 1012, 1015, 1073742841, 1018, // NOLINT
- 1073742845, 1071, 1120, 1122, 1124, 1126, 1128, 1130, // NOLINT
- 1132, 1134, 1136, 1138, 1140, 1142, 1144, 1146, // NOLINT
- 1148, 1150, 1152, 1162, 1164, 1166, 1168, 1170, // NOLINT
- 1172, 1174, 1176, 1178, 1180, 1182, 1184, 1186, // NOLINT
- 1188, 1190, 1192, 1194, 1196, 1198, 1200, 1202, // NOLINT
- 1204, 1206, 1208, 1210, 1212, 1214, 1073743040, 1217, // NOLINT
- 1219, 1221, 1223, 1225, 1227, 1229, 1232, 1234, // NOLINT
- 1236, 1238, 1240, 1242, 1244, 1246, 1248, 1250, // NOLINT
- 1252, 1254, 1256, 1258, 1260, 1262, 1264, 1266, // NOLINT
- 1268, 1270, 1272, 1274, 1276, 1278, 1280, 1282, // NOLINT
- 1284, 1286, 1288, 1290, 1292, 1294, 1296, 1298, // NOLINT
- 1073743153, 1366, 1073746080, 4293, 7680, 7682, 7684, 7686, // NOLINT
- 7688, 7690, 7692, 7694, 7696, 7698, 7700, 7702, // NOLINT
- 7704, 7706, 7708, 7710, 7712, 7714, 7716, 7718, // NOLINT
- 7720, 7722, 7724, 7726, 7728, 7730, 7732, 7734, // NOLINT
- 7736, 7738, 7740, 7742, 7744, 7746, 7748, 7750, // NOLINT
- 7752, 7754, 7756, 7758, 7760, 7762, 7764, 7766, // NOLINT
- 7768, 7770, 7772, 7774, 7776, 7778, 7780, 7782, // NOLINT
- 7784, 7786, 7788, 7790, 7792, 7794, 7796, 7798, // NOLINT
- 7800, 7802, 7804, 7806, 7808, 7810, 7812, 7814, // NOLINT
- 7816, 7818, 7820, 7822, 7824, 7826, 7828, 7840, // NOLINT
- 7842, 7844, 7846, 7848, 7850, 7852, 7854, 7856, // NOLINT
- 7858, 7860, 7862, 7864, 7866, 7868, 7870, 7872, // NOLINT
- 7874, 7876, 7878, 7880, 7882, 7884, 7886, 7888, // NOLINT
- 7890, 7892, 7894, 7896, 7898, 7900, 7902, 7904, // NOLINT
- 7906, 7908, 7910, 7912, 7914, 7916, 7918, 7920, // NOLINT
- 7922, 7924, 7926, 7928, 1073749768, 7951, 1073749784, 7965, // NOLINT
- 1073749800, 7983, 1073749816, 7999, 1073749832, 8013, 8025, 8027, // NOLINT
- 8029, 8031, 1073749864, 8047, 1073749944, 8123, 1073749960, 8139, // NOLINT
- 1073749976, 8155, 1073749992, 8172, 1073750008, 8187 }; // NOLINT
-static const uint16_t kUppercaseTable1Size = 79;
-static const int32_t kUppercaseTable1[79] = {
- 258, 263, 1073742091, 269, 1073742096, 274, 277, 1073742105, // NOLINT
- 285, 292, 294, 296, 1073742122, 301, 1073742128, 307, // NOLINT
- 1073742142, 319, 325, 387, 1073744896, 3118, 3168, 1073744994, // NOLINT
- 3172, 3175, 3177, 3179, 3189, 3200, 3202, 3204, // NOLINT
- 3206, 3208, 3210, 3212, 3214, 3216, 3218, 3220, // NOLINT
- 3222, 3224, 3226, 3228, 3230, 3232, 3234, 3236, // NOLINT
- 3238, 3240, 3242, 3244, 3246, 3248, 3250, 3252, // NOLINT
- 3254, 3256, 3258, 3260, 3262, 3264, 3266, 3268, // NOLINT
- 3270, 3272, 3274, 3276, 3278, 3280, 3282, 3284, // NOLINT
- 3286, 3288, 3290, 3292, 3294, 3296, 3298 }; // NOLINT
-static const uint16_t kUppercaseTable7Size = 2;
-static const int32_t kUppercaseTable7[2] = {
- 1073749793, 7994 }; // NOLINT
-bool Uppercase::Is(uchar c) {
- int chunk_index = c >> 13;
- switch (chunk_index) {
- case 0: return LookupPredicate(kUppercaseTable0,
- kUppercaseTable0Size,
- c);
- case 1: return LookupPredicate(kUppercaseTable1,
- kUppercaseTable1Size,
- c);
- case 7: return LookupPredicate(kUppercaseTable7,
- kUppercaseTable7Size,
- c);
- default: return false;
- }
-}
-
-// Lowercase: point.category == 'Ll'
-
-static const uint16_t kLowercaseTable0Size = 449;
-static const int32_t kLowercaseTable0[449] = {
- 1073741921, 122, 170, 181, 186, 1073742047, 246, 1073742072, // NOLINT
- 255, 257, 259, 261, 263, 265, 267, 269, // NOLINT
- 271, 273, 275, 277, 279, 281, 283, 285, // NOLINT
- 287, 289, 291, 293, 295, 297, 299, 301, // NOLINT
- 303, 305, 307, 309, 1073742135, 312, 314, 316, // NOLINT
- 318, 320, 322, 324, 326, 1073742152, 329, 331, // NOLINT
- 333, 335, 337, 339, 341, 343, 345, 347, // NOLINT
- 349, 351, 353, 355, 357, 359, 361, 363, // NOLINT
- 365, 367, 369, 371, 373, 375, 378, 380, // NOLINT
- 1073742206, 384, 387, 389, 392, 1073742220, 397, 402, // NOLINT
- 405, 1073742233, 411, 414, 417, 419, 421, 424, // NOLINT
- 1073742250, 427, 429, 432, 436, 438, 1073742265, 442, // NOLINT
- 1073742269, 447, 454, 457, 460, 462, 464, 466, // NOLINT
- 468, 470, 472, 474, 1073742300, 477, 479, 481, // NOLINT
- 483, 485, 487, 489, 491, 493, 1073742319, 496, // NOLINT
- 499, 501, 505, 507, 509, 511, 513, 515, // NOLINT
- 517, 519, 521, 523, 525, 527, 529, 531, // NOLINT
- 533, 535, 537, 539, 541, 543, 545, 547, // NOLINT
- 549, 551, 553, 555, 557, 559, 561, 1073742387, // NOLINT
- 569, 572, 1073742399, 576, 578, 583, 585, 587, // NOLINT
- 589, 1073742415, 659, 1073742485, 687, 1073742715, 893, 912, // NOLINT
- 1073742764, 974, 1073742800, 977, 1073742805, 983, 985, 987, // NOLINT
- 989, 991, 993, 995, 997, 999, 1001, 1003, // NOLINT
- 1005, 1073742831, 1011, 1013, 1016, 1073742843, 1020, 1073742896, // NOLINT
- 1119, 1121, 1123, 1125, 1127, 1129, 1131, 1133, // NOLINT
- 1135, 1137, 1139, 1141, 1143, 1145, 1147, 1149, // NOLINT
- 1151, 1153, 1163, 1165, 1167, 1169, 1171, 1173, // NOLINT
- 1175, 1177, 1179, 1181, 1183, 1185, 1187, 1189, // NOLINT
- 1191, 1193, 1195, 1197, 1199, 1201, 1203, 1205, // NOLINT
- 1207, 1209, 1211, 1213, 1215, 1218, 1220, 1222, // NOLINT
- 1224, 1226, 1228, 1073743054, 1231, 1233, 1235, 1237, // NOLINT
- 1239, 1241, 1243, 1245, 1247, 1249, 1251, 1253, // NOLINT
- 1255, 1257, 1259, 1261, 1263, 1265, 1267, 1269, // NOLINT
- 1271, 1273, 1275, 1277, 1279, 1281, 1283, 1285, // NOLINT
- 1287, 1289, 1291, 1293, 1295, 1297, 1299, 1073743201, // NOLINT
- 1415, 1073749248, 7467, 1073749346, 7543, 1073749369, 7578, 7681, // NOLINT
- 7683, 7685, 7687, 7689, 7691, 7693, 7695, 7697, // NOLINT
- 7699, 7701, 7703, 7705, 7707, 7709, 7711, 7713, // NOLINT
- 7715, 7717, 7719, 7721, 7723, 7725, 7727, 7729, // NOLINT
- 7731, 7733, 7735, 7737, 7739, 7741, 7743, 7745, // NOLINT
- 7747, 7749, 7751, 7753, 7755, 7757, 7759, 7761, // NOLINT
- 7763, 7765, 7767, 7769, 7771, 7773, 7775, 7777, // NOLINT
- 7779, 7781, 7783, 7785, 7787, 7789, 7791, 7793, // NOLINT
- 7795, 7797, 7799, 7801, 7803, 7805, 7807, 7809, // NOLINT
- 7811, 7813, 7815, 7817, 7819, 7821, 7823, 7825, // NOLINT
- 7827, 1073749653, 7835, 7841, 7843, 7845, 7847, 7849, // NOLINT
- 7851, 7853, 7855, 7857, 7859, 7861, 7863, 7865, // NOLINT
- 7867, 7869, 7871, 7873, 7875, 7877, 7879, 7881, // NOLINT
- 7883, 7885, 7887, 7889, 7891, 7893, 7895, 7897, // NOLINT
- 7899, 7901, 7903, 7905, 7907, 7909, 7911, 7913, // NOLINT
- 7915, 7917, 7919, 7921, 7923, 7925, 7927, 7929, // NOLINT
- 1073749760, 7943, 1073749776, 7957, 1073749792, 7975, 1073749808, 7991, // NOLINT
- 1073749824, 8005, 1073749840, 8023, 1073749856, 8039, 1073749872, 8061, // NOLINT
- 1073749888, 8071, 1073749904, 8087, 1073749920, 8103, 1073749936, 8116, // NOLINT
- 1073749942, 8119, 8126, 1073749954, 8132, 1073749958, 8135, 1073749968, // NOLINT
- 8147, 1073749974, 8151, 1073749984, 8167, 1073750002, 8180, 1073750006, // NOLINT
- 8183 }; // NOLINT
-static const uint16_t kLowercaseTable1Size = 79;
-static const int32_t kLowercaseTable1[79] = {
- 113, 127, 266, 1073742094, 271, 275, 303, 308, // NOLINT
- 313, 1073742140, 317, 1073742150, 329, 334, 388, 1073744944, // NOLINT
- 3166, 3169, 1073744997, 3174, 3176, 3178, 3180, 3188, // NOLINT
- 1073745014, 3191, 3201, 3203, 3205, 3207, 3209, 3211, // NOLINT
- 3213, 3215, 3217, 3219, 3221, 3223, 3225, 3227, // NOLINT
- 3229, 3231, 3233, 3235, 3237, 3239, 3241, 3243, // NOLINT
- 3245, 3247, 3249, 3251, 3253, 3255, 3257, 3259, // NOLINT
- 3261, 3263, 3265, 3267, 3269, 3271, 3273, 3275, // NOLINT
- 3277, 3279, 3281, 3283, 3285, 3287, 3289, 3291, // NOLINT
- 3293, 3295, 3297, 1073745123, 3300, 1073745152, 3365 }; // NOLINT
-static const uint16_t kLowercaseTable7Size = 6;
-static const int32_t kLowercaseTable7[6] = {
- 1073748736, 6918, 1073748755, 6935, 1073749825, 8026 }; // NOLINT
-bool Lowercase::Is(uchar c) {
- int chunk_index = c >> 13;
- switch (chunk_index) {
- case 0: return LookupPredicate(kLowercaseTable0,
- kLowercaseTable0Size,
- c);
- case 1: return LookupPredicate(kLowercaseTable1,
- kLowercaseTable1Size,
- c);
- case 7: return LookupPredicate(kLowercaseTable7,
- kLowercaseTable7Size,
- c);
- default: return false;
- }
-}
-
-// Letter: point.category in ['Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl' ]
-
-static const uint16_t kLetterTable0Size = 394;
-static const int32_t kLetterTable0[394] = {
- 1073741889, 90, 1073741921, 122, 170, 181, 186, 1073742016, // NOLINT
- 214, 1073742040, 246, 1073742072, 705, 1073742534, 721, 1073742560, // NOLINT
- 740, 750, 1073742714, 893, 902, 1073742728, 906, 908, // NOLINT
- 1073742734, 929, 1073742755, 974, 1073742800, 1013, 1073742839, 1153, // NOLINT
- 1073742986, 1299, 1073743153, 1366, 1369, 1073743201, 1415, 1073743312, // NOLINT
- 1514, 1073743344, 1522, 1073743393, 1594, 1073743424, 1610, 1073743470, // NOLINT
- 1647, 1073743473, 1747, 1749, 1073743589, 1766, 1073743598, 1775, // NOLINT
- 1073743610, 1788, 1791, 1808, 1073743634, 1839, 1073743693, 1901, // NOLINT
- 1073743744, 1957, 1969, 1073743818, 2026, 1073743860, 2037, 2042, // NOLINT
- 1073744132, 2361, 2365, 2384, 1073744216, 2401, 1073744251, 2431, // NOLINT
- 1073744261, 2444, 1073744271, 2448, 1073744275, 2472, 1073744298, 2480, // NOLINT
- 2482, 1073744310, 2489, 2493, 2510, 1073744348, 2525, 1073744351, // NOLINT
- 2529, 1073744368, 2545, 1073744389, 2570, 1073744399, 2576, 1073744403, // NOLINT
- 2600, 1073744426, 2608, 1073744434, 2611, 1073744437, 2614, 1073744440, // NOLINT
- 2617, 1073744473, 2652, 2654, 1073744498, 2676, 1073744517, 2701, // NOLINT
- 1073744527, 2705, 1073744531, 2728, 1073744554, 2736, 1073744562, 2739, // NOLINT
- 1073744565, 2745, 2749, 2768, 1073744608, 2785, 1073744645, 2828, // NOLINT
- 1073744655, 2832, 1073744659, 2856, 1073744682, 2864, 1073744690, 2867, // NOLINT
- 1073744693, 2873, 2877, 1073744732, 2909, 1073744735, 2913, 2929, // NOLINT
- 2947, 1073744773, 2954, 1073744782, 2960, 1073744786, 2965, 1073744793, // NOLINT
- 2970, 2972, 1073744798, 2975, 1073744803, 2980, 1073744808, 2986, // NOLINT
- 1073744814, 3001, 1073744901, 3084, 1073744910, 3088, 1073744914, 3112, // NOLINT
- 1073744938, 3123, 1073744949, 3129, 1073744992, 3169, 1073745029, 3212, // NOLINT
- 1073745038, 3216, 1073745042, 3240, 1073745066, 3251, 1073745077, 3257, // NOLINT
- 3261, 3294, 1073745120, 3297, 1073745157, 3340, 1073745166, 3344, // NOLINT
- 1073745170, 3368, 1073745194, 3385, 1073745248, 3425, 1073745285, 3478, // NOLINT
- 1073745306, 3505, 1073745331, 3515, 3517, 1073745344, 3526, 1073745409, // NOLINT
- 3632, 1073745458, 3635, 1073745472, 3654, 1073745537, 3714, 3716, // NOLINT
- 1073745543, 3720, 3722, 3725, 1073745556, 3735, 1073745561, 3743, // NOLINT
- 1073745569, 3747, 3749, 3751, 1073745578, 3755, 1073745581, 3760, // NOLINT
- 1073745586, 3763, 3773, 1073745600, 3780, 3782, 1073745628, 3805, // NOLINT
- 3840, 1073745728, 3911, 1073745737, 3946, 1073745800, 3979, 1073745920, // NOLINT
- 4129, 1073745955, 4135, 1073745961, 4138, 1073746000, 4181, 1073746080, // NOLINT
- 4293, 1073746128, 4346, 4348, 1073746176, 4441, 1073746271, 4514, // NOLINT
- 1073746344, 4601, 1073746432, 4680, 1073746506, 4685, 1073746512, 4694, // NOLINT
- 4696, 1073746522, 4701, 1073746528, 4744, 1073746570, 4749, 1073746576, // NOLINT
- 4784, 1073746610, 4789, 1073746616, 4798, 4800, 1073746626, 4805, // NOLINT
- 1073746632, 4822, 1073746648, 4880, 1073746706, 4885, 1073746712, 4954, // NOLINT
- 1073746816, 5007, 1073746848, 5108, 1073746945, 5740, 1073747567, 5750, // NOLINT
- 1073747585, 5786, 1073747616, 5866, 1073747694, 5872, 1073747712, 5900, // NOLINT
- 1073747726, 5905, 1073747744, 5937, 1073747776, 5969, 1073747808, 5996, // NOLINT
- 1073747822, 6000, 1073747840, 6067, 6103, 6108, 1073748000, 6263, // NOLINT
- 1073748096, 6312, 1073748224, 6428, 1073748304, 6509, 1073748336, 6516, // NOLINT
- 1073748352, 6569, 1073748417, 6599, 1073748480, 6678, 1073748741, 6963, // NOLINT
- 1073748805, 6987, 1073749248, 7615, 1073749504, 7835, 1073749664, 7929, // NOLINT
- 1073749760, 7957, 1073749784, 7965, 1073749792, 8005, 1073749832, 8013, // NOLINT
- 1073749840, 8023, 8025, 8027, 8029, 1073749855, 8061, 1073749888, // NOLINT
- 8116, 1073749942, 8124, 8126, 1073749954, 8132, 1073749958, 8140, // NOLINT
- 1073749968, 8147, 1073749974, 8155, 1073749984, 8172, 1073750002, 8180, // NOLINT
- 1073750006, 8188 }; // NOLINT
-static const uint16_t kLetterTable1Size = 84;
-static const int32_t kLetterTable1[84] = {
- 113, 127, 1073741968, 148, 258, 263, 1073742090, 275, // NOLINT
- 277, 1073742105, 285, 292, 294, 296, 1073742122, 301, // NOLINT
- 1073742127, 313, 1073742140, 319, 1073742149, 329, 334, 1073742176, // NOLINT
- 388, 1073744896, 3118, 1073744944, 3166, 1073744992, 3180, 1073745012, // NOLINT
- 3191, 1073745024, 3300, 1073745152, 3365, 1073745200, 3429, 3439, // NOLINT
- 1073745280, 3478, 1073745312, 3494, 1073745320, 3502, 1073745328, 3510, // NOLINT
- 1073745336, 3518, 1073745344, 3526, 1073745352, 3534, 1073745360, 3542, // NOLINT
- 1073745368, 3550, 1073745925, 4103, 1073745953, 4137, 1073745969, 4149, // NOLINT
- 1073745976, 4156, 1073745985, 4246, 1073746077, 4255, 1073746081, 4346, // NOLINT
- 1073746172, 4351, 1073746181, 4396, 1073746225, 4494, 1073746336, 4535, // NOLINT
- 1073746416, 4607, 1073746944, 8191 }; // NOLINT
-static const uint16_t kLetterTable2Size = 4;
-static const int32_t kLetterTable2[4] = {
- 1073741824, 3509, 1073745408, 8191 }; // NOLINT
-static const uint16_t kLetterTable3Size = 2;
-static const int32_t kLetterTable3[2] = {
- 1073741824, 8191 }; // NOLINT
-static const uint16_t kLetterTable4Size = 2;
-static const int32_t kLetterTable4[2] = {
- 1073741824, 8123 }; // NOLINT
-static const uint16_t kLetterTable5Size = 16;
-static const int32_t kLetterTable5[16] = {
- 1073741824, 1164, 1073743639, 1818, 1073743872, 2049, 1073743875, 2053, // NOLINT
- 1073743879, 2058, 1073743884, 2082, 1073743936, 2163, 1073744896, 8191 }; // NOLINT
-static const uint16_t kLetterTable6Size = 2;
-static const int32_t kLetterTable6[2] = {
- 1073741824, 6051 }; // NOLINT
-static const uint16_t kLetterTable7Size = 50;
-static const int32_t kLetterTable7[50] = {
- 1073748224, 6701, 1073748528, 6762, 1073748592, 6873, 1073748736, 6918, // NOLINT
- 1073748755, 6935, 6941, 1073748767, 6952, 1073748778, 6966, 1073748792, // NOLINT
- 6972, 6974, 1073748800, 6977, 1073748803, 6980, 1073748806, 7089, // NOLINT
- 1073748947, 7485, 1073749328, 7567, 1073749394, 7623, 1073749488, 7675, // NOLINT
- 1073749616, 7796, 1073749622, 7932, 1073749793, 7994, 1073749825, 8026, // NOLINT
- 1073749862, 8126, 1073749954, 8135, 1073749962, 8143, 1073749970, 8151, // NOLINT
- 1073749978, 8156 }; // NOLINT
-bool Letter::Is(uchar c) {
- int chunk_index = c >> 13;
- switch (chunk_index) {
- case 0: return LookupPredicate(kLetterTable0,
- kLetterTable0Size,
- c);
- case 1: return LookupPredicate(kLetterTable1,
- kLetterTable1Size,
- c);
- case 2: return LookupPredicate(kLetterTable2,
- kLetterTable2Size,
- c);
- case 3: return LookupPredicate(kLetterTable3,
- kLetterTable3Size,
- c);
- case 4: return LookupPredicate(kLetterTable4,
- kLetterTable4Size,
- c);
- case 5: return LookupPredicate(kLetterTable5,
- kLetterTable5Size,
- c);
- case 6: return LookupPredicate(kLetterTable6,
- kLetterTable6Size,
- c);
- case 7: return LookupPredicate(kLetterTable7,
- kLetterTable7Size,
- c);
- default: return false;
- }
-}
-
-// Space: point.category == 'Zs'
-
-static const uint16_t kSpaceTable0Size = 4;
-static const int32_t kSpaceTable0[4] = {
- 32, 160, 5760, 6158 }; // NOLINT
-static const uint16_t kSpaceTable1Size = 5;
-static const int32_t kSpaceTable1[5] = {
- 1073741824, 10, 47, 95, 4096 }; // NOLINT
-bool Space::Is(uchar c) {
- int chunk_index = c >> 13;
- switch (chunk_index) {
- case 0: return LookupPredicate(kSpaceTable0,
- kSpaceTable0Size,
- c);
- case 1: return LookupPredicate(kSpaceTable1,
- kSpaceTable1Size,
- c);
- default: return false;
- }
-}
-
-// Number: point.category == 'Nd'
-
-static const uint16_t kNumberTable0Size = 44;
-static const int32_t kNumberTable0[44] = {
- 1073741872, 57, 1073743456, 1641, 1073743600, 1785, 1073743808, 1993, // NOLINT
- 1073744230, 2415, 1073744358, 2543, 1073744486, 2671, 1073744614, 2799, // NOLINT
- 1073744742, 2927, 1073744870, 3055, 1073744998, 3183, 1073745126, 3311, // NOLINT
- 1073745254, 3439, 1073745488, 3673, 1073745616, 3801, 1073745696, 3881, // NOLINT
- 1073745984, 4169, 1073747936, 6121, 1073747984, 6169, 1073748294, 6479, // NOLINT
- 1073748432, 6617, 1073748816, 7001 }; // NOLINT
-static const uint16_t kNumberTable7Size = 2;
-static const int32_t kNumberTable7[2] = {
- 1073749776, 7961 }; // NOLINT
-bool Number::Is(uchar c) {
- int chunk_index = c >> 13;
- switch (chunk_index) {
- case 0: return LookupPredicate(kNumberTable0,
- kNumberTable0Size,
- c);
- case 7: return LookupPredicate(kNumberTable7,
- kNumberTable7Size,
- c);
- default: return false;
- }
-}
-
-// WhiteSpace: 'Ws' in point.properties
-
-static const uint16_t kWhiteSpaceTable0Size = 7;
-static const int32_t kWhiteSpaceTable0[7] = {
- 1073741833, 13, 32, 133, 160, 5760, 6158 }; // NOLINT
-static const uint16_t kWhiteSpaceTable1Size = 7;
-static const int32_t kWhiteSpaceTable1[7] = {
- 1073741824, 10, 1073741864, 41, 47, 95, 4096 }; // NOLINT
-bool WhiteSpace::Is(uchar c) {
- int chunk_index = c >> 13;
- switch (chunk_index) {
- case 0: return LookupPredicate(kWhiteSpaceTable0,
- kWhiteSpaceTable0Size,
- c);
- case 1: return LookupPredicate(kWhiteSpaceTable1,
- kWhiteSpaceTable1Size,
- c);
- default: return false;
- }
-}
-
-// LineTerminator: 'Lt' in point.properties
-
-static const uint16_t kLineTerminatorTable0Size = 2;
-static const int32_t kLineTerminatorTable0[2] = {
- 10, 13 }; // NOLINT
-static const uint16_t kLineTerminatorTable1Size = 2;
-static const int32_t kLineTerminatorTable1[2] = {
- 1073741864, 41 }; // NOLINT
-bool LineTerminator::Is(uchar c) {
- int chunk_index = c >> 13;
- switch (chunk_index) {
- case 0: return LookupPredicate(kLineTerminatorTable0,
- kLineTerminatorTable0Size,
- c);
- case 1: return LookupPredicate(kLineTerminatorTable1,
- kLineTerminatorTable1Size,
- c);
- default: return false;
- }
-}
-
-// CombiningMark: point.category in ['Mn', 'Mc']
-
-static const uint16_t kCombiningMarkTable0Size = 205;
-static const int32_t kCombiningMarkTable0[205] = {
- 1073742592, 879, 1073742979, 1158, 1073743249, 1469, 1471, 1073743297, // NOLINT
- 1474, 1073743300, 1477, 1479, 1073743376, 1557, 1073743435, 1630, // NOLINT
- 1648, 1073743574, 1756, 1073743583, 1764, 1073743591, 1768, 1073743594, // NOLINT
- 1773, 1809, 1073743664, 1866, 1073743782, 1968, 1073743851, 2035, // NOLINT
- 1073744129, 2307, 2364, 1073744190, 2381, 1073744209, 2388, 1073744226, // NOLINT
- 2403, 1073744257, 2435, 2492, 1073744318, 2500, 1073744327, 2504, // NOLINT
- 1073744331, 2509, 2519, 1073744354, 2531, 1073744385, 2563, 2620, // NOLINT
- 1073744446, 2626, 1073744455, 2632, 1073744459, 2637, 1073744496, 2673, // NOLINT
- 1073744513, 2691, 2748, 1073744574, 2757, 1073744583, 2761, 1073744587, // NOLINT
- 2765, 1073744610, 2787, 1073744641, 2819, 2876, 1073744702, 2883, // NOLINT
- 1073744711, 2888, 1073744715, 2893, 1073744726, 2903, 2946, 1073744830, // NOLINT
- 3010, 1073744838, 3016, 1073744842, 3021, 3031, 1073744897, 3075, // NOLINT
- 1073744958, 3140, 1073744966, 3144, 1073744970, 3149, 1073744981, 3158, // NOLINT
- 1073745026, 3203, 3260, 1073745086, 3268, 1073745094, 3272, 1073745098, // NOLINT
- 3277, 1073745109, 3286, 1073745122, 3299, 1073745154, 3331, 1073745214, // NOLINT
- 3395, 1073745222, 3400, 1073745226, 3405, 3415, 1073745282, 3459, // NOLINT
- 3530, 1073745359, 3540, 3542, 1073745368, 3551, 1073745394, 3571, // NOLINT
- 3633, 1073745460, 3642, 1073745479, 3662, 3761, 1073745588, 3769, // NOLINT
- 1073745595, 3772, 1073745608, 3789, 1073745688, 3865, 3893, 3895, // NOLINT
- 3897, 1073745726, 3903, 1073745777, 3972, 1073745798, 3975, 1073745808, // NOLINT
- 3991, 1073745817, 4028, 4038, 1073745964, 4146, 1073745974, 4153, // NOLINT
- 1073746006, 4185, 4959, 1073747730, 5908, 1073747762, 5940, 1073747794, // NOLINT
- 5971, 1073747826, 6003, 1073747894, 6099, 6109, 1073747979, 6157, // NOLINT
- 6313, 1073748256, 6443, 1073748272, 6459, 1073748400, 6592, 1073748424, // NOLINT
- 6601, 1073748503, 6683, 1073748736, 6916, 1073748788, 6980, 1073748843, // NOLINT
- 7027, 1073749440, 7626, 1073749502, 7679 }; // NOLINT
-static const uint16_t kCombiningMarkTable1Size = 9;
-static const int32_t kCombiningMarkTable1[9] = {
- 1073742032, 220, 225, 1073742053, 239, 1073745962, 4143, 1073746073, // NOLINT
- 4250 }; // NOLINT
-static const uint16_t kCombiningMarkTable5Size = 5;
-static const int32_t kCombiningMarkTable5[5] = {
- 2050, 2054, 2059, 1073743907, 2087 }; // NOLINT
-static const uint16_t kCombiningMarkTable7Size = 5;
-static const int32_t kCombiningMarkTable7[5] = {
- 6942, 1073749504, 7695, 1073749536, 7715 }; // NOLINT
-bool CombiningMark::Is(uchar c) {
- int chunk_index = c >> 13;
- switch (chunk_index) {
- case 0: return LookupPredicate(kCombiningMarkTable0,
- kCombiningMarkTable0Size,
- c);
- case 1: return LookupPredicate(kCombiningMarkTable1,
- kCombiningMarkTable1Size,
- c);
- case 5: return LookupPredicate(kCombiningMarkTable5,
- kCombiningMarkTable5Size,
- c);
- case 7: return LookupPredicate(kCombiningMarkTable7,
- kCombiningMarkTable7Size,
- c);
- default: return false;
- }
-}
-
-// ConnectorPunctuation: point.category == 'Pc'
-
-static const uint16_t kConnectorPunctuationTable0Size = 1;
-static const int32_t kConnectorPunctuationTable0[1] = {
- 95 }; // NOLINT
-static const uint16_t kConnectorPunctuationTable1Size = 3;
-static const int32_t kConnectorPunctuationTable1[3] = {
- 1073741887, 64, 84 }; // NOLINT
-static const uint16_t kConnectorPunctuationTable7Size = 5;
-static const int32_t kConnectorPunctuationTable7[5] = {
- 1073749555, 7732, 1073749581, 7759, 7999 }; // NOLINT
-bool ConnectorPunctuation::Is(uchar c) {
- int chunk_index = c >> 13;
- switch (chunk_index) {
- case 0: return LookupPredicate(kConnectorPunctuationTable0,
- kConnectorPunctuationTable0Size,
- c);
- case 1: return LookupPredicate(kConnectorPunctuationTable1,
- kConnectorPunctuationTable1Size,
- c);
- case 7: return LookupPredicate(kConnectorPunctuationTable7,
- kConnectorPunctuationTable7Size,
- c);
- default: return false;
- }
-}
-
-static const MultiCharacterSpecialCase<2> kToLowercaseMultiStrings0[2] = { // NOLINT
- {{105, 775}}, {{kSentinel}} }; // NOLINT
-static const uint16_t kToLowercaseTable0Size = 463; // NOLINT
-static const int32_t kToLowercaseTable0[926] = {
- 1073741889, 128, 90, 128, 1073742016, 128, 214, 128, 1073742040, 128, 222, 128, 256, 4, 258, 4, // NOLINT
- 260, 4, 262, 4, 264, 4, 266, 4, 268, 4, 270, 4, 272, 4, 274, 4, // NOLINT
- 276, 4, 278, 4, 280, 4, 282, 4, 284, 4, 286, 4, 288, 4, 290, 4, // NOLINT
- 292, 4, 294, 4, 296, 4, 298, 4, 300, 4, 302, 4, 304, 1, 306, 4, // NOLINT
- 308, 4, 310, 4, 313, 4, 315, 4, 317, 4, 319, 4, 321, 4, 323, 4, // NOLINT
- 325, 4, 327, 4, 330, 4, 332, 4, 334, 4, 336, 4, 338, 4, 340, 4, // NOLINT
- 342, 4, 344, 4, 346, 4, 348, 4, 350, 4, 352, 4, 354, 4, 356, 4, // NOLINT
- 358, 4, 360, 4, 362, 4, 364, 4, 366, 4, 368, 4, 370, 4, 372, 4, // NOLINT
- 374, 4, 376, -484, 377, 4, 379, 4, 381, 4, 385, 840, 386, 4, 388, 4, // NOLINT
- 390, 824, 391, 4, 1073742217, 820, 394, 820, 395, 4, 398, 316, 399, 808, 400, 812, // NOLINT
- 401, 4, 403, 820, 404, 828, 406, 844, 407, 836, 408, 4, 412, 844, 413, 852, // NOLINT
- 415, 856, 416, 4, 418, 4, 420, 4, 422, 872, 423, 4, 425, 872, 428, 4, // NOLINT
- 430, 872, 431, 4, 1073742257, 868, 434, 868, 435, 4, 437, 4, 439, 876, 440, 4, // NOLINT
- 444, 4, 452, 8, 453, 4, 455, 8, 456, 4, 458, 8, 459, 4, 461, 4, // NOLINT
- 463, 4, 465, 4, 467, 4, 469, 4, 471, 4, 473, 4, 475, 4, 478, 4, // NOLINT
- 480, 4, 482, 4, 484, 4, 486, 4, 488, 4, 490, 4, 492, 4, 494, 4, // NOLINT
- 497, 8, 498, 4, 500, 4, 502, -388, 503, -224, 504, 4, 506, 4, 508, 4, // NOLINT
- 510, 4, 512, 4, 514, 4, 516, 4, 518, 4, 520, 4, 522, 4, 524, 4, // NOLINT
- 526, 4, 528, 4, 530, 4, 532, 4, 534, 4, 536, 4, 538, 4, 540, 4, // NOLINT
- 542, 4, 544, -520, 546, 4, 548, 4, 550, 4, 552, 4, 554, 4, 556, 4, // NOLINT
- 558, 4, 560, 4, 562, 4, 570, 43180, 571, 4, 573, -652, 574, 43168, 577, 4, // NOLINT
- 579, -780, 580, 276, 581, 284, 582, 4, 584, 4, 586, 4, 588, 4, 590, 4, // NOLINT
- 902, 152, 1073742728, 148, 906, 148, 908, 256, 1073742734, 252, 911, 252, 1073742737, 128, 929, 128, // NOLINT
- 931, 6, 1073742756, 128, 939, 128, 984, 4, 986, 4, 988, 4, 990, 4, 992, 4, // NOLINT
- 994, 4, 996, 4, 998, 4, 1000, 4, 1002, 4, 1004, 4, 1006, 4, 1012, -240, // NOLINT
- 1015, 4, 1017, -28, 1018, 4, 1073742845, -520, 1023, -520, 1073742848, 320, 1039, 320, 1073742864, 128, // NOLINT
- 1071, 128, 1120, 4, 1122, 4, 1124, 4, 1126, 4, 1128, 4, 1130, 4, 1132, 4, // NOLINT
- 1134, 4, 1136, 4, 1138, 4, 1140, 4, 1142, 4, 1144, 4, 1146, 4, 1148, 4, // NOLINT
- 1150, 4, 1152, 4, 1162, 4, 1164, 4, 1166, 4, 1168, 4, 1170, 4, 1172, 4, // NOLINT
- 1174, 4, 1176, 4, 1178, 4, 1180, 4, 1182, 4, 1184, 4, 1186, 4, 1188, 4, // NOLINT
- 1190, 4, 1192, 4, 1194, 4, 1196, 4, 1198, 4, 1200, 4, 1202, 4, 1204, 4, // NOLINT
- 1206, 4, 1208, 4, 1210, 4, 1212, 4, 1214, 4, 1216, 60, 1217, 4, 1219, 4, // NOLINT
- 1221, 4, 1223, 4, 1225, 4, 1227, 4, 1229, 4, 1232, 4, 1234, 4, 1236, 4, // NOLINT
- 1238, 4, 1240, 4, 1242, 4, 1244, 4, 1246, 4, 1248, 4, 1250, 4, 1252, 4, // NOLINT
- 1254, 4, 1256, 4, 1258, 4, 1260, 4, 1262, 4, 1264, 4, 1266, 4, 1268, 4, // NOLINT
- 1270, 4, 1272, 4, 1274, 4, 1276, 4, 1278, 4, 1280, 4, 1282, 4, 1284, 4, // NOLINT
- 1286, 4, 1288, 4, 1290, 4, 1292, 4, 1294, 4, 1296, 4, 1298, 4, 1073743153, 192, // NOLINT
- 1366, 192, 1073746080, 29056, 4293, 29056, 7680, 4, 7682, 4, 7684, 4, 7686, 4, 7688, 4, // NOLINT
- 7690, 4, 7692, 4, 7694, 4, 7696, 4, 7698, 4, 7700, 4, 7702, 4, 7704, 4, // NOLINT
- 7706, 4, 7708, 4, 7710, 4, 7712, 4, 7714, 4, 7716, 4, 7718, 4, 7720, 4, // NOLINT
- 7722, 4, 7724, 4, 7726, 4, 7728, 4, 7730, 4, 7732, 4, 7734, 4, 7736, 4, // NOLINT
- 7738, 4, 7740, 4, 7742, 4, 7744, 4, 7746, 4, 7748, 4, 7750, 4, 7752, 4, // NOLINT
- 7754, 4, 7756, 4, 7758, 4, 7760, 4, 7762, 4, 7764, 4, 7766, 4, 7768, 4, // NOLINT
- 7770, 4, 7772, 4, 7774, 4, 7776, 4, 7778, 4, 7780, 4, 7782, 4, 7784, 4, // NOLINT
- 7786, 4, 7788, 4, 7790, 4, 7792, 4, 7794, 4, 7796, 4, 7798, 4, 7800, 4, // NOLINT
- 7802, 4, 7804, 4, 7806, 4, 7808, 4, 7810, 4, 7812, 4, 7814, 4, 7816, 4, // NOLINT
- 7818, 4, 7820, 4, 7822, 4, 7824, 4, 7826, 4, 7828, 4, 7840, 4, 7842, 4, // NOLINT
- 7844, 4, 7846, 4, 7848, 4, 7850, 4, 7852, 4, 7854, 4, 7856, 4, 7858, 4, // NOLINT
- 7860, 4, 7862, 4, 7864, 4, 7866, 4, 7868, 4, 7870, 4, 7872, 4, 7874, 4, // NOLINT
- 7876, 4, 7878, 4, 7880, 4, 7882, 4, 7884, 4, 7886, 4, 7888, 4, 7890, 4, // NOLINT
- 7892, 4, 7894, 4, 7896, 4, 7898, 4, 7900, 4, 7902, 4, 7904, 4, 7906, 4, // NOLINT
- 7908, 4, 7910, 4, 7912, 4, 7914, 4, 7916, 4, 7918, 4, 7920, 4, 7922, 4, // NOLINT
- 7924, 4, 7926, 4, 7928, 4, 1073749768, -32, 7951, -32, 1073749784, -32, 7965, -32, 1073749800, -32, // NOLINT
- 7983, -32, 1073749816, -32, 7999, -32, 1073749832, -32, 8013, -32, 8025, -32, 8027, -32, 8029, -32, // NOLINT
- 8031, -32, 1073749864, -32, 8047, -32, 1073749896, -32, 8079, -32, 1073749912, -32, 8095, -32, 1073749928, -32, // NOLINT
- 8111, -32, 1073749944, -32, 8121, -32, 1073749946, -296, 8123, -296, 8124, -36, 1073749960, -344, 8139, -344, // NOLINT
- 8140, -36, 1073749976, -32, 8153, -32, 1073749978, -400, 8155, -400, 1073749992, -32, 8169, -32, 1073749994, -448, // NOLINT
- 8171, -448, 8172, -28, 1073750008, -512, 8185, -512, 1073750010, -504, 8187, -504, 8188, -36 }; // NOLINT
-static const uint16_t kToLowercaseMultiStrings0Size = 2; // NOLINT
-static const MultiCharacterSpecialCase<1> kToLowercaseMultiStrings1[1] = { // NOLINT
- {{kSentinel}} }; // NOLINT
-static const uint16_t kToLowercaseTable1Size = 69; // NOLINT
-static const int32_t kToLowercaseTable1[138] = {
- 294, -30068, 298, -33532, 299, -33048, 306, 112, 1073742176, 64, 367, 64, 387, 4, 1073743030, 104, // NOLINT
- 1231, 104, 1073744896, 192, 3118, 192, 3168, 4, 3170, -42972, 3171, -15256, 3172, -42908, 3175, 4, // NOLINT
- 3177, 4, 3179, 4, 3189, 4, 3200, 4, 3202, 4, 3204, 4, 3206, 4, 3208, 4, // NOLINT
- 3210, 4, 3212, 4, 3214, 4, 3216, 4, 3218, 4, 3220, 4, 3222, 4, 3224, 4, // NOLINT
- 3226, 4, 3228, 4, 3230, 4, 3232, 4, 3234, 4, 3236, 4, 3238, 4, 3240, 4, // NOLINT
- 3242, 4, 3244, 4, 3246, 4, 3248, 4, 3250, 4, 3252, 4, 3254, 4, 3256, 4, // NOLINT
- 3258, 4, 3260, 4, 3262, 4, 3264, 4, 3266, 4, 3268, 4, 3270, 4, 3272, 4, // NOLINT
- 3274, 4, 3276, 4, 3278, 4, 3280, 4, 3282, 4, 3284, 4, 3286, 4, 3288, 4, // NOLINT
- 3290, 4, 3292, 4, 3294, 4, 3296, 4, 3298, 4 }; // NOLINT
-static const uint16_t kToLowercaseMultiStrings1Size = 1; // NOLINT
-static const MultiCharacterSpecialCase<1> kToLowercaseMultiStrings7[1] = { // NOLINT
- {{kSentinel}} }; // NOLINT
-static const uint16_t kToLowercaseTable7Size = 2; // NOLINT
-static const int32_t kToLowercaseTable7[4] = {
- 1073749793, 128, 7994, 128 }; // NOLINT
-static const uint16_t kToLowercaseMultiStrings7Size = 1; // NOLINT
-int ToLowercase::Convert(uchar c,
- uchar n,
- uchar* result,
- bool* allow_caching_ptr) {
- int chunk_index = c >> 13;
- switch (chunk_index) {
- case 0: return LookupMapping<true>(kToLowercaseTable0,
- kToLowercaseTable0Size,
- kToLowercaseMultiStrings0,
- c,
- n,
- result,
- allow_caching_ptr);
- case 1: return LookupMapping<true>(kToLowercaseTable1,
- kToLowercaseTable1Size,
- kToLowercaseMultiStrings1,
- c,
- n,
- result,
- allow_caching_ptr);
- case 7: return LookupMapping<true>(kToLowercaseTable7,
- kToLowercaseTable7Size,
- kToLowercaseMultiStrings7,
- c,
- n,
- result,
- allow_caching_ptr);
- default: return 0;
- }
-}
-
-static const MultiCharacterSpecialCase<3> kToUppercaseMultiStrings0[62] = { // NOLINT
- {{83, 83, kSentinel}}, {{700, 78, kSentinel}}, {{74, 780, kSentinel}}, {{921, 776, 769}}, // NOLINT
- {{933, 776, 769}}, {{1333, 1362, kSentinel}}, {{72, 817, kSentinel}}, {{84, 776, kSentinel}}, // NOLINT
- {{87, 778, kSentinel}}, {{89, 778, kSentinel}}, {{65, 702, kSentinel}}, {{933, 787, kSentinel}}, // NOLINT
- {{933, 787, 768}}, {{933, 787, 769}}, {{933, 787, 834}}, {{7944, 921, kSentinel}}, // NOLINT
- {{7945, 921, kSentinel}}, {{7946, 921, kSentinel}}, {{7947, 921, kSentinel}}, {{7948, 921, kSentinel}}, // NOLINT
- {{7949, 921, kSentinel}}, {{7950, 921, kSentinel}}, {{7951, 921, kSentinel}}, {{7976, 921, kSentinel}}, // NOLINT
- {{7977, 921, kSentinel}}, {{7978, 921, kSentinel}}, {{7979, 921, kSentinel}}, {{7980, 921, kSentinel}}, // NOLINT
- {{7981, 921, kSentinel}}, {{7982, 921, kSentinel}}, {{7983, 921, kSentinel}}, {{8040, 921, kSentinel}}, // NOLINT
- {{8041, 921, kSentinel}}, {{8042, 921, kSentinel}}, {{8043, 921, kSentinel}}, {{8044, 921, kSentinel}}, // NOLINT
- {{8045, 921, kSentinel}}, {{8046, 921, kSentinel}}, {{8047, 921, kSentinel}}, {{8122, 921, kSentinel}}, // NOLINT
- {{913, 921, kSentinel}}, {{902, 921, kSentinel}}, {{913, 834, kSentinel}}, {{913, 834, 921}}, // NOLINT
- {{8138, 921, kSentinel}}, {{919, 921, kSentinel}}, {{905, 921, kSentinel}}, {{919, 834, kSentinel}}, // NOLINT
- {{919, 834, 921}}, {{921, 776, 768}}, {{921, 834, kSentinel}}, {{921, 776, 834}}, // NOLINT
- {{933, 776, 768}}, {{929, 787, kSentinel}}, {{933, 834, kSentinel}}, {{933, 776, 834}}, // NOLINT
- {{8186, 921, kSentinel}}, {{937, 921, kSentinel}}, {{911, 921, kSentinel}}, {{937, 834, kSentinel}}, // NOLINT
- {{937, 834, 921}}, {{kSentinel}} }; // NOLINT
-static const uint16_t kToUppercaseTable0Size = 554; // NOLINT
-static const int32_t kToUppercaseTable0[1108] = {
- 1073741921, -128, 122, -128, 181, 2972, 223, 1, 1073742048, -128, 246, -128, 1073742072, -128, 254, -128, // NOLINT
- 255, 484, 257, -4, 259, -4, 261, -4, 263, -4, 265, -4, 267, -4, 269, -4, // NOLINT
- 271, -4, 273, -4, 275, -4, 277, -4, 279, -4, 281, -4, 283, -4, 285, -4, // NOLINT
- 287, -4, 289, -4, 291, -4, 293, -4, 295, -4, 297, -4, 299, -4, 301, -4, // NOLINT
- 303, -4, 305, -928, 307, -4, 309, -4, 311, -4, 314, -4, 316, -4, 318, -4, // NOLINT
- 320, -4, 322, -4, 324, -4, 326, -4, 328, -4, 329, 5, 331, -4, 333, -4, // NOLINT
- 335, -4, 337, -4, 339, -4, 341, -4, 343, -4, 345, -4, 347, -4, 349, -4, // NOLINT
- 351, -4, 353, -4, 355, -4, 357, -4, 359, -4, 361, -4, 363, -4, 365, -4, // NOLINT
- 367, -4, 369, -4, 371, -4, 373, -4, 375, -4, 378, -4, 380, -4, 382, -4, // NOLINT
- 383, -1200, 384, 780, 387, -4, 389, -4, 392, -4, 396, -4, 402, -4, 405, 388, // NOLINT
- 409, -4, 410, 652, 414, 520, 417, -4, 419, -4, 421, -4, 424, -4, 429, -4, // NOLINT
- 432, -4, 436, -4, 438, -4, 441, -4, 445, -4, 447, 224, 453, -4, 454, -8, // NOLINT
- 456, -4, 457, -8, 459, -4, 460, -8, 462, -4, 464, -4, 466, -4, 468, -4, // NOLINT
- 470, -4, 472, -4, 474, -4, 476, -4, 477, -316, 479, -4, 481, -4, 483, -4, // NOLINT
- 485, -4, 487, -4, 489, -4, 491, -4, 493, -4, 495, -4, 496, 9, 498, -4, // NOLINT
- 499, -8, 501, -4, 505, -4, 507, -4, 509, -4, 511, -4, 513, -4, 515, -4, // NOLINT
- 517, -4, 519, -4, 521, -4, 523, -4, 525, -4, 527, -4, 529, -4, 531, -4, // NOLINT
- 533, -4, 535, -4, 537, -4, 539, -4, 541, -4, 543, -4, 547, -4, 549, -4, // NOLINT
- 551, -4, 553, -4, 555, -4, 557, -4, 559, -4, 561, -4, 563, -4, 572, -4, // NOLINT
- 578, -4, 583, -4, 585, -4, 587, -4, 589, -4, 591, -4, 595, -840, 596, -824, // NOLINT
- 1073742422, -820, 599, -820, 601, -808, 603, -812, 608, -820, 611, -828, 616, -836, 617, -844, // NOLINT
- 619, 42972, 623, -844, 626, -852, 629, -856, 637, 42908, 640, -872, 643, -872, 648, -872, // NOLINT
- 649, -276, 1073742474, -868, 651, -868, 652, -284, 658, -876, 837, 336, 1073742715, 520, 893, 520, // NOLINT
- 912, 13, 940, -152, 1073742765, -148, 943, -148, 944, 17, 1073742769, -128, 961, -128, 962, -124, // NOLINT
- 1073742787, -128, 971, -128, 972, -256, 1073742797, -252, 974, -252, 976, -248, 977, -228, 981, -188, // NOLINT
- 982, -216, 985, -4, 987, -4, 989, -4, 991, -4, 993, -4, 995, -4, 997, -4, // NOLINT
- 999, -4, 1001, -4, 1003, -4, 1005, -4, 1007, -4, 1008, -344, 1009, -320, 1010, 28, // NOLINT
- 1013, -384, 1016, -4, 1019, -4, 1073742896, -128, 1103, -128, 1073742928, -320, 1119, -320, 1121, -4, // NOLINT
- 1123, -4, 1125, -4, 1127, -4, 1129, -4, 1131, -4, 1133, -4, 1135, -4, 1137, -4, // NOLINT
- 1139, -4, 1141, -4, 1143, -4, 1145, -4, 1147, -4, 1149, -4, 1151, -4, 1153, -4, // NOLINT
- 1163, -4, 1165, -4, 1167, -4, 1169, -4, 1171, -4, 1173, -4, 1175, -4, 1177, -4, // NOLINT
- 1179, -4, 1181, -4, 1183, -4, 1185, -4, 1187, -4, 1189, -4, 1191, -4, 1193, -4, // NOLINT
- 1195, -4, 1197, -4, 1199, -4, 1201, -4, 1203, -4, 1205, -4, 1207, -4, 1209, -4, // NOLINT
- 1211, -4, 1213, -4, 1215, -4, 1218, -4, 1220, -4, 1222, -4, 1224, -4, 1226, -4, // NOLINT
- 1228, -4, 1230, -4, 1231, -60, 1233, -4, 1235, -4, 1237, -4, 1239, -4, 1241, -4, // NOLINT
- 1243, -4, 1245, -4, 1247, -4, 1249, -4, 1251, -4, 1253, -4, 1255, -4, 1257, -4, // NOLINT
- 1259, -4, 1261, -4, 1263, -4, 1265, -4, 1267, -4, 1269, -4, 1271, -4, 1273, -4, // NOLINT
- 1275, -4, 1277, -4, 1279, -4, 1281, -4, 1283, -4, 1285, -4, 1287, -4, 1289, -4, // NOLINT
- 1291, -4, 1293, -4, 1295, -4, 1297, -4, 1299, -4, 1073743201, -192, 1414, -192, 1415, 21, // NOLINT
- 7549, 15256, 7681, -4, 7683, -4, 7685, -4, 7687, -4, 7689, -4, 7691, -4, 7693, -4, // NOLINT
- 7695, -4, 7697, -4, 7699, -4, 7701, -4, 7703, -4, 7705, -4, 7707, -4, 7709, -4, // NOLINT
- 7711, -4, 7713, -4, 7715, -4, 7717, -4, 7719, -4, 7721, -4, 7723, -4, 7725, -4, // NOLINT
- 7727, -4, 7729, -4, 7731, -4, 7733, -4, 7735, -4, 7737, -4, 7739, -4, 7741, -4, // NOLINT
- 7743, -4, 7745, -4, 7747, -4, 7749, -4, 7751, -4, 7753, -4, 7755, -4, 7757, -4, // NOLINT
- 7759, -4, 7761, -4, 7763, -4, 7765, -4, 7767, -4, 7769, -4, 7771, -4, 7773, -4, // NOLINT
- 7775, -4, 7777, -4, 7779, -4, 7781, -4, 7783, -4, 7785, -4, 7787, -4, 7789, -4, // NOLINT
- 7791, -4, 7793, -4, 7795, -4, 7797, -4, 7799, -4, 7801, -4, 7803, -4, 7805, -4, // NOLINT
- 7807, -4, 7809, -4, 7811, -4, 7813, -4, 7815, -4, 7817, -4, 7819, -4, 7821, -4, // NOLINT
- 7823, -4, 7825, -4, 7827, -4, 7829, -4, 7830, 25, 7831, 29, 7832, 33, 7833, 37, // NOLINT
- 7834, 41, 7835, -236, 7841, -4, 7843, -4, 7845, -4, 7847, -4, 7849, -4, 7851, -4, // NOLINT
- 7853, -4, 7855, -4, 7857, -4, 7859, -4, 7861, -4, 7863, -4, 7865, -4, 7867, -4, // NOLINT
- 7869, -4, 7871, -4, 7873, -4, 7875, -4, 7877, -4, 7879, -4, 7881, -4, 7883, -4, // NOLINT
- 7885, -4, 7887, -4, 7889, -4, 7891, -4, 7893, -4, 7895, -4, 7897, -4, 7899, -4, // NOLINT
- 7901, -4, 7903, -4, 7905, -4, 7907, -4, 7909, -4, 7911, -4, 7913, -4, 7915, -4, // NOLINT
- 7917, -4, 7919, -4, 7921, -4, 7923, -4, 7925, -4, 7927, -4, 7929, -4, 1073749760, 32, // NOLINT
- 7943, 32, 1073749776, 32, 7957, 32, 1073749792, 32, 7975, 32, 1073749808, 32, 7991, 32, 1073749824, 32, // NOLINT
- 8005, 32, 8016, 45, 8017, 32, 8018, 49, 8019, 32, 8020, 53, 8021, 32, 8022, 57, // NOLINT
- 8023, 32, 1073749856, 32, 8039, 32, 1073749872, 296, 8049, 296, 1073749874, 344, 8053, 344, 1073749878, 400, // NOLINT
- 8055, 400, 1073749880, 512, 8057, 512, 1073749882, 448, 8059, 448, 1073749884, 504, 8061, 504, 8064, 61, // NOLINT
- 8065, 65, 8066, 69, 8067, 73, 8068, 77, 8069, 81, 8070, 85, 8071, 89, 8072, 61, // NOLINT
- 8073, 65, 8074, 69, 8075, 73, 8076, 77, 8077, 81, 8078, 85, 8079, 89, 8080, 93, // NOLINT
- 8081, 97, 8082, 101, 8083, 105, 8084, 109, 8085, 113, 8086, 117, 8087, 121, 8088, 93, // NOLINT
- 8089, 97, 8090, 101, 8091, 105, 8092, 109, 8093, 113, 8094, 117, 8095, 121, 8096, 125, // NOLINT
- 8097, 129, 8098, 133, 8099, 137, 8100, 141, 8101, 145, 8102, 149, 8103, 153, 8104, 125, // NOLINT
- 8105, 129, 8106, 133, 8107, 137, 8108, 141, 8109, 145, 8110, 149, 8111, 153, 1073749936, 32, // NOLINT
- 8113, 32, 8114, 157, 8115, 161, 8116, 165, 8118, 169, 8119, 173, 8124, 161, 8126, -28820, // NOLINT
- 8130, 177, 8131, 181, 8132, 185, 8134, 189, 8135, 193, 8140, 181, 1073749968, 32, 8145, 32, // NOLINT
- 8146, 197, 8147, 13, 8150, 201, 8151, 205, 1073749984, 32, 8161, 32, 8162, 209, 8163, 17, // NOLINT
- 8164, 213, 8165, 28, 8166, 217, 8167, 221, 8178, 225, 8179, 229, 8180, 233, 8182, 237, // NOLINT
- 8183, 241, 8188, 229 }; // NOLINT
-static const uint16_t kToUppercaseMultiStrings0Size = 62; // NOLINT
-static const MultiCharacterSpecialCase<1> kToUppercaseMultiStrings1[1] = { // NOLINT
- {{kSentinel}} }; // NOLINT
-static const uint16_t kToUppercaseTable1Size = 67; // NOLINT
-static const int32_t kToUppercaseTable1[134] = {
- 334, -112, 1073742192, -64, 383, -64, 388, -4, 1073743056, -104, 1257, -104, 1073744944, -192, 3166, -192, // NOLINT
- 3169, -4, 3173, -43180, 3174, -43168, 3176, -4, 3178, -4, 3180, -4, 3190, -4, 3201, -4, // NOLINT
- 3203, -4, 3205, -4, 3207, -4, 3209, -4, 3211, -4, 3213, -4, 3215, -4, 3217, -4, // NOLINT
- 3219, -4, 3221, -4, 3223, -4, 3225, -4, 3227, -4, 3229, -4, 3231, -4, 3233, -4, // NOLINT
- 3235, -4, 3237, -4, 3239, -4, 3241, -4, 3243, -4, 3245, -4, 3247, -4, 3249, -4, // NOLINT
- 3251, -4, 3253, -4, 3255, -4, 3257, -4, 3259, -4, 3261, -4, 3263, -4, 3265, -4, // NOLINT
- 3267, -4, 3269, -4, 3271, -4, 3273, -4, 3275, -4, 3277, -4, 3279, -4, 3281, -4, // NOLINT
- 3283, -4, 3285, -4, 3287, -4, 3289, -4, 3291, -4, 3293, -4, 3295, -4, 3297, -4, // NOLINT
- 3299, -4, 1073745152, -29056, 3365, -29056 }; // NOLINT
-static const uint16_t kToUppercaseMultiStrings1Size = 1; // NOLINT
-static const MultiCharacterSpecialCase<3> kToUppercaseMultiStrings7[12] = { // NOLINT
- {{70, 70, kSentinel}}, {{70, 73, kSentinel}}, {{70, 76, kSentinel}}, {{70, 70, 73}}, // NOLINT
- {{70, 70, 76}}, {{83, 84, kSentinel}}, {{1348, 1350, kSentinel}}, {{1348, 1333, kSentinel}}, // NOLINT
- {{1348, 1339, kSentinel}}, {{1358, 1350, kSentinel}}, {{1348, 1341, kSentinel}}, {{kSentinel}} }; // NOLINT
-static const uint16_t kToUppercaseTable7Size = 14; // NOLINT
-static const int32_t kToUppercaseTable7[28] = {
- 6912, 1, 6913, 5, 6914, 9, 6915, 13, 6916, 17, 6917, 21, 6918, 21, 6931, 25, // NOLINT
- 6932, 29, 6933, 33, 6934, 37, 6935, 41, 1073749825, -128, 8026, -128 }; // NOLINT
-static const uint16_t kToUppercaseMultiStrings7Size = 12; // NOLINT
-int ToUppercase::Convert(uchar c,
- uchar n,
- uchar* result,
- bool* allow_caching_ptr) {
- int chunk_index = c >> 13;
- switch (chunk_index) {
- case 0: return LookupMapping<true>(kToUppercaseTable0,
- kToUppercaseTable0Size,
- kToUppercaseMultiStrings0,
- c,
- n,
- result,
- allow_caching_ptr);
- case 1: return LookupMapping<true>(kToUppercaseTable1,
- kToUppercaseTable1Size,
- kToUppercaseMultiStrings1,
- c,
- n,
- result,
- allow_caching_ptr);
- case 7: return LookupMapping<true>(kToUppercaseTable7,
- kToUppercaseTable7Size,
- kToUppercaseMultiStrings7,
- c,
- n,
- result,
- allow_caching_ptr);
- default: return 0;
- }
-}
-
-static const MultiCharacterSpecialCase<1> kEcma262CanonicalizeMultiStrings0[1] = { // NOLINT
- {{kSentinel}} }; // NOLINT
-static const uint16_t kEcma262CanonicalizeTable0Size = 462; // NOLINT
-static const int32_t kEcma262CanonicalizeTable0[924] = {
- 1073741921, -128, 122, -128, 181, 2972, 1073742048, -128, 246, -128, 1073742072, -128, 254, -128, 255, 484, // NOLINT
- 257, -4, 259, -4, 261, -4, 263, -4, 265, -4, 267, -4, 269, -4, 271, -4, // NOLINT
- 273, -4, 275, -4, 277, -4, 279, -4, 281, -4, 283, -4, 285, -4, 287, -4, // NOLINT
- 289, -4, 291, -4, 293, -4, 295, -4, 297, -4, 299, -4, 301, -4, 303, -4, // NOLINT
- 307, -4, 309, -4, 311, -4, 314, -4, 316, -4, 318, -4, 320, -4, 322, -4, // NOLINT
- 324, -4, 326, -4, 328, -4, 331, -4, 333, -4, 335, -4, 337, -4, 339, -4, // NOLINT
- 341, -4, 343, -4, 345, -4, 347, -4, 349, -4, 351, -4, 353, -4, 355, -4, // NOLINT
- 357, -4, 359, -4, 361, -4, 363, -4, 365, -4, 367, -4, 369, -4, 371, -4, // NOLINT
- 373, -4, 375, -4, 378, -4, 380, -4, 382, -4, 384, 780, 387, -4, 389, -4, // NOLINT
- 392, -4, 396, -4, 402, -4, 405, 388, 409, -4, 410, 652, 414, 520, 417, -4, // NOLINT
- 419, -4, 421, -4, 424, -4, 429, -4, 432, -4, 436, -4, 438, -4, 441, -4, // NOLINT
- 445, -4, 447, 224, 453, -4, 454, -8, 456, -4, 457, -8, 459, -4, 460, -8, // NOLINT
- 462, -4, 464, -4, 466, -4, 468, -4, 470, -4, 472, -4, 474, -4, 476, -4, // NOLINT
- 477, -316, 479, -4, 481, -4, 483, -4, 485, -4, 487, -4, 489, -4, 491, -4, // NOLINT
- 493, -4, 495, -4, 498, -4, 499, -8, 501, -4, 505, -4, 507, -4, 509, -4, // NOLINT
- 511, -4, 513, -4, 515, -4, 517, -4, 519, -4, 521, -4, 523, -4, 525, -4, // NOLINT
- 527, -4, 529, -4, 531, -4, 533, -4, 535, -4, 537, -4, 539, -4, 541, -4, // NOLINT
- 543, -4, 547, -4, 549, -4, 551, -4, 553, -4, 555, -4, 557, -4, 559, -4, // NOLINT
- 561, -4, 563, -4, 572, -4, 578, -4, 583, -4, 585, -4, 587, -4, 589, -4, // NOLINT
- 591, -4, 595, -840, 596, -824, 1073742422, -820, 599, -820, 601, -808, 603, -812, 608, -820, // NOLINT
- 611, -828, 616, -836, 617, -844, 619, 42972, 623, -844, 626, -852, 629, -856, 637, 42908, // NOLINT
- 640, -872, 643, -872, 648, -872, 649, -276, 1073742474, -868, 651, -868, 652, -284, 658, -876, // NOLINT
- 837, 336, 1073742715, 520, 893, 520, 940, -152, 1073742765, -148, 943, -148, 1073742769, -128, 961, -128, // NOLINT
- 962, -124, 1073742787, -128, 971, -128, 972, -256, 1073742797, -252, 974, -252, 976, -248, 977, -228, // NOLINT
- 981, -188, 982, -216, 985, -4, 987, -4, 989, -4, 991, -4, 993, -4, 995, -4, // NOLINT
- 997, -4, 999, -4, 1001, -4, 1003, -4, 1005, -4, 1007, -4, 1008, -344, 1009, -320, // NOLINT
- 1010, 28, 1013, -384, 1016, -4, 1019, -4, 1073742896, -128, 1103, -128, 1073742928, -320, 1119, -320, // NOLINT
- 1121, -4, 1123, -4, 1125, -4, 1127, -4, 1129, -4, 1131, -4, 1133, -4, 1135, -4, // NOLINT
- 1137, -4, 1139, -4, 1141, -4, 1143, -4, 1145, -4, 1147, -4, 1149, -4, 1151, -4, // NOLINT
- 1153, -4, 1163, -4, 1165, -4, 1167, -4, 1169, -4, 1171, -4, 1173, -4, 1175, -4, // NOLINT
- 1177, -4, 1179, -4, 1181, -4, 1183, -4, 1185, -4, 1187, -4, 1189, -4, 1191, -4, // NOLINT
- 1193, -4, 1195, -4, 1197, -4, 1199, -4, 1201, -4, 1203, -4, 1205, -4, 1207, -4, // NOLINT
- 1209, -4, 1211, -4, 1213, -4, 1215, -4, 1218, -4, 1220, -4, 1222, -4, 1224, -4, // NOLINT
- 1226, -4, 1228, -4, 1230, -4, 1231, -60, 1233, -4, 1235, -4, 1237, -4, 1239, -4, // NOLINT
- 1241, -4, 1243, -4, 1245, -4, 1247, -4, 1249, -4, 1251, -4, 1253, -4, 1255, -4, // NOLINT
- 1257, -4, 1259, -4, 1261, -4, 1263, -4, 1265, -4, 1267, -4, 1269, -4, 1271, -4, // NOLINT
- 1273, -4, 1275, -4, 1277, -4, 1279, -4, 1281, -4, 1283, -4, 1285, -4, 1287, -4, // NOLINT
- 1289, -4, 1291, -4, 1293, -4, 1295, -4, 1297, -4, 1299, -4, 1073743201, -192, 1414, -192, // NOLINT
- 7549, 15256, 7681, -4, 7683, -4, 7685, -4, 7687, -4, 7689, -4, 7691, -4, 7693, -4, // NOLINT
- 7695, -4, 7697, -4, 7699, -4, 7701, -4, 7703, -4, 7705, -4, 7707, -4, 7709, -4, // NOLINT
- 7711, -4, 7713, -4, 7715, -4, 7717, -4, 7719, -4, 7721, -4, 7723, -4, 7725, -4, // NOLINT
- 7727, -4, 7729, -4, 7731, -4, 7733, -4, 7735, -4, 7737, -4, 7739, -4, 7741, -4, // NOLINT
- 7743, -4, 7745, -4, 7747, -4, 7749, -4, 7751, -4, 7753, -4, 7755, -4, 7757, -4, // NOLINT
- 7759, -4, 7761, -4, 7763, -4, 7765, -4, 7767, -4, 7769, -4, 7771, -4, 7773, -4, // NOLINT
- 7775, -4, 7777, -4, 7779, -4, 7781, -4, 7783, -4, 7785, -4, 7787, -4, 7789, -4, // NOLINT
- 7791, -4, 7793, -4, 7795, -4, 7797, -4, 7799, -4, 7801, -4, 7803, -4, 7805, -4, // NOLINT
- 7807, -4, 7809, -4, 7811, -4, 7813, -4, 7815, -4, 7817, -4, 7819, -4, 7821, -4, // NOLINT
- 7823, -4, 7825, -4, 7827, -4, 7829, -4, 7835, -236, 7841, -4, 7843, -4, 7845, -4, // NOLINT
- 7847, -4, 7849, -4, 7851, -4, 7853, -4, 7855, -4, 7857, -4, 7859, -4, 7861, -4, // NOLINT
- 7863, -4, 7865, -4, 7867, -4, 7869, -4, 7871, -4, 7873, -4, 7875, -4, 7877, -4, // NOLINT
- 7879, -4, 7881, -4, 7883, -4, 7885, -4, 7887, -4, 7889, -4, 7891, -4, 7893, -4, // NOLINT
- 7895, -4, 7897, -4, 7899, -4, 7901, -4, 7903, -4, 7905, -4, 7907, -4, 7909, -4, // NOLINT
- 7911, -4, 7913, -4, 7915, -4, 7917, -4, 7919, -4, 7921, -4, 7923, -4, 7925, -4, // NOLINT
- 7927, -4, 7929, -4, 1073749760, 32, 7943, 32, 1073749776, 32, 7957, 32, 1073749792, 32, 7975, 32, // NOLINT
- 1073749808, 32, 7991, 32, 1073749824, 32, 8005, 32, 8017, 32, 8019, 32, 8021, 32, 8023, 32, // NOLINT
- 1073749856, 32, 8039, 32, 1073749872, 296, 8049, 296, 1073749874, 344, 8053, 344, 1073749878, 400, 8055, 400, // NOLINT
- 1073749880, 512, 8057, 512, 1073749882, 448, 8059, 448, 1073749884, 504, 8061, 504, 1073749936, 32, 8113, 32, // NOLINT
- 8126, -28820, 1073749968, 32, 8145, 32, 1073749984, 32, 8161, 32, 8165, 28 }; // NOLINT
-static const uint16_t kEcma262CanonicalizeMultiStrings0Size = 1; // NOLINT
-static const MultiCharacterSpecialCase<1> kEcma262CanonicalizeMultiStrings1[1] = { // NOLINT
- {{kSentinel}} }; // NOLINT
-static const uint16_t kEcma262CanonicalizeTable1Size = 67; // NOLINT
-static const int32_t kEcma262CanonicalizeTable1[134] = {
- 334, -112, 1073742192, -64, 383, -64, 388, -4, 1073743056, -104, 1257, -104, 1073744944, -192, 3166, -192, // NOLINT
- 3169, -4, 3173, -43180, 3174, -43168, 3176, -4, 3178, -4, 3180, -4, 3190, -4, 3201, -4, // NOLINT
- 3203, -4, 3205, -4, 3207, -4, 3209, -4, 3211, -4, 3213, -4, 3215, -4, 3217, -4, // NOLINT
- 3219, -4, 3221, -4, 3223, -4, 3225, -4, 3227, -4, 3229, -4, 3231, -4, 3233, -4, // NOLINT
- 3235, -4, 3237, -4, 3239, -4, 3241, -4, 3243, -4, 3245, -4, 3247, -4, 3249, -4, // NOLINT
- 3251, -4, 3253, -4, 3255, -4, 3257, -4, 3259, -4, 3261, -4, 3263, -4, 3265, -4, // NOLINT
- 3267, -4, 3269, -4, 3271, -4, 3273, -4, 3275, -4, 3277, -4, 3279, -4, 3281, -4, // NOLINT
- 3283, -4, 3285, -4, 3287, -4, 3289, -4, 3291, -4, 3293, -4, 3295, -4, 3297, -4, // NOLINT
- 3299, -4, 1073745152, -29056, 3365, -29056 }; // NOLINT
-static const uint16_t kEcma262CanonicalizeMultiStrings1Size = 1; // NOLINT
-static const MultiCharacterSpecialCase<1> kEcma262CanonicalizeMultiStrings7[1] = { // NOLINT
- {{kSentinel}} }; // NOLINT
-static const uint16_t kEcma262CanonicalizeTable7Size = 2; // NOLINT
-static const int32_t kEcma262CanonicalizeTable7[4] = {
- 1073749825, -128, 8026, -128 }; // NOLINT
-static const uint16_t kEcma262CanonicalizeMultiStrings7Size = 1; // NOLINT
-int Ecma262Canonicalize::Convert(uchar c,
- uchar n,
- uchar* result,
- bool* allow_caching_ptr) {
- int chunk_index = c >> 13;
- switch (chunk_index) {
- case 0: return LookupMapping<true>(kEcma262CanonicalizeTable0,
- kEcma262CanonicalizeTable0Size,
- kEcma262CanonicalizeMultiStrings0,
- c,
- n,
- result,
- allow_caching_ptr);
- case 1: return LookupMapping<true>(kEcma262CanonicalizeTable1,
- kEcma262CanonicalizeTable1Size,
- kEcma262CanonicalizeMultiStrings1,
- c,
- n,
- result,
- allow_caching_ptr);
- case 7: return LookupMapping<true>(kEcma262CanonicalizeTable7,
- kEcma262CanonicalizeTable7Size,
- kEcma262CanonicalizeMultiStrings7,
- c,
- n,
- result,
- allow_caching_ptr);
- default: return 0;
- }
-}
-
-static const MultiCharacterSpecialCase<4> kEcma262UnCanonicalizeMultiStrings0[469] = { // NOLINT
- {{65, 97, kSentinel}}, {{90, 122, kSentinel}}, {{181, 924, 956, kSentinel}}, {{192, 224, kSentinel}}, // NOLINT
- {{214, 246, kSentinel}}, {{216, 248, kSentinel}}, {{222, 254, kSentinel}}, {{255, 376, kSentinel}}, // NOLINT
- {{256, 257, kSentinel}}, {{258, 259, kSentinel}}, {{260, 261, kSentinel}}, {{262, 263, kSentinel}}, // NOLINT
- {{264, 265, kSentinel}}, {{266, 267, kSentinel}}, {{268, 269, kSentinel}}, {{270, 271, kSentinel}}, // NOLINT
- {{272, 273, kSentinel}}, {{274, 275, kSentinel}}, {{276, 277, kSentinel}}, {{278, 279, kSentinel}}, // NOLINT
- {{280, 281, kSentinel}}, {{282, 283, kSentinel}}, {{284, 285, kSentinel}}, {{286, 287, kSentinel}}, // NOLINT
- {{288, 289, kSentinel}}, {{290, 291, kSentinel}}, {{292, 293, kSentinel}}, {{294, 295, kSentinel}}, // NOLINT
- {{296, 297, kSentinel}}, {{298, 299, kSentinel}}, {{300, 301, kSentinel}}, {{302, 303, kSentinel}}, // NOLINT
- {{306, 307, kSentinel}}, {{308, 309, kSentinel}}, {{310, 311, kSentinel}}, {{313, 314, kSentinel}}, // NOLINT
- {{315, 316, kSentinel}}, {{317, 318, kSentinel}}, {{319, 320, kSentinel}}, {{321, 322, kSentinel}}, // NOLINT
- {{323, 324, kSentinel}}, {{325, 326, kSentinel}}, {{327, 328, kSentinel}}, {{330, 331, kSentinel}}, // NOLINT
- {{332, 333, kSentinel}}, {{334, 335, kSentinel}}, {{336, 337, kSentinel}}, {{338, 339, kSentinel}}, // NOLINT
- {{340, 341, kSentinel}}, {{342, 343, kSentinel}}, {{344, 345, kSentinel}}, {{346, 347, kSentinel}}, // NOLINT
- {{348, 349, kSentinel}}, {{350, 351, kSentinel}}, {{352, 353, kSentinel}}, {{354, 355, kSentinel}}, // NOLINT
- {{356, 357, kSentinel}}, {{358, 359, kSentinel}}, {{360, 361, kSentinel}}, {{362, 363, kSentinel}}, // NOLINT
- {{364, 365, kSentinel}}, {{366, 367, kSentinel}}, {{368, 369, kSentinel}}, {{370, 371, kSentinel}}, // NOLINT
- {{372, 373, kSentinel}}, {{374, 375, kSentinel}}, {{377, 378, kSentinel}}, {{379, 380, kSentinel}}, // NOLINT
- {{381, 382, kSentinel}}, {{384, 579, kSentinel}}, {{385, 595, kSentinel}}, {{386, 387, kSentinel}}, // NOLINT
- {{388, 389, kSentinel}}, {{390, 596, kSentinel}}, {{391, 392, kSentinel}}, {{393, 598, kSentinel}}, // NOLINT
- {{394, 599, kSentinel}}, {{395, 396, kSentinel}}, {{398, 477, kSentinel}}, {{399, 601, kSentinel}}, // NOLINT
- {{400, 603, kSentinel}}, {{401, 402, kSentinel}}, {{403, 608, kSentinel}}, {{404, 611, kSentinel}}, // NOLINT
- {{405, 502, kSentinel}}, {{406, 617, kSentinel}}, {{407, 616, kSentinel}}, {{408, 409, kSentinel}}, // NOLINT
- {{410, 573, kSentinel}}, {{412, 623, kSentinel}}, {{413, 626, kSentinel}}, {{414, 544, kSentinel}}, // NOLINT
- {{415, 629, kSentinel}}, {{416, 417, kSentinel}}, {{418, 419, kSentinel}}, {{420, 421, kSentinel}}, // NOLINT
- {{422, 640, kSentinel}}, {{423, 424, kSentinel}}, {{425, 643, kSentinel}}, {{428, 429, kSentinel}}, // NOLINT
- {{430, 648, kSentinel}}, {{431, 432, kSentinel}}, {{433, 650, kSentinel}}, {{434, 651, kSentinel}}, // NOLINT
- {{435, 436, kSentinel}}, {{437, 438, kSentinel}}, {{439, 658, kSentinel}}, {{440, 441, kSentinel}}, // NOLINT
- {{444, 445, kSentinel}}, {{447, 503, kSentinel}}, {{452, 453, 454, kSentinel}}, {{455, 456, 457, kSentinel}}, // NOLINT
- {{458, 459, 460, kSentinel}}, {{461, 462, kSentinel}}, {{463, 464, kSentinel}}, {{465, 466, kSentinel}}, // NOLINT
- {{467, 468, kSentinel}}, {{469, 470, kSentinel}}, {{471, 472, kSentinel}}, {{473, 474, kSentinel}}, // NOLINT
- {{475, 476, kSentinel}}, {{478, 479, kSentinel}}, {{480, 481, kSentinel}}, {{482, 483, kSentinel}}, // NOLINT
- {{484, 485, kSentinel}}, {{486, 487, kSentinel}}, {{488, 489, kSentinel}}, {{490, 491, kSentinel}}, // NOLINT
- {{492, 493, kSentinel}}, {{494, 495, kSentinel}}, {{497, 498, 499, kSentinel}}, {{500, 501, kSentinel}}, // NOLINT
- {{504, 505, kSentinel}}, {{506, 507, kSentinel}}, {{508, 509, kSentinel}}, {{510, 511, kSentinel}}, // NOLINT
- {{512, 513, kSentinel}}, {{514, 515, kSentinel}}, {{516, 517, kSentinel}}, {{518, 519, kSentinel}}, // NOLINT
- {{520, 521, kSentinel}}, {{522, 523, kSentinel}}, {{524, 525, kSentinel}}, {{526, 527, kSentinel}}, // NOLINT
- {{528, 529, kSentinel}}, {{530, 531, kSentinel}}, {{532, 533, kSentinel}}, {{534, 535, kSentinel}}, // NOLINT
- {{536, 537, kSentinel}}, {{538, 539, kSentinel}}, {{540, 541, kSentinel}}, {{542, 543, kSentinel}}, // NOLINT
- {{546, 547, kSentinel}}, {{548, 549, kSentinel}}, {{550, 551, kSentinel}}, {{552, 553, kSentinel}}, // NOLINT
- {{554, 555, kSentinel}}, {{556, 557, kSentinel}}, {{558, 559, kSentinel}}, {{560, 561, kSentinel}}, // NOLINT
- {{562, 563, kSentinel}}, {{570, 11365, kSentinel}}, {{571, 572, kSentinel}}, {{574, 11366, kSentinel}}, // NOLINT
- {{577, 578, kSentinel}}, {{580, 649, kSentinel}}, {{581, 652, kSentinel}}, {{582, 583, kSentinel}}, // NOLINT
- {{584, 585, kSentinel}}, {{586, 587, kSentinel}}, {{588, 589, kSentinel}}, {{590, 591, kSentinel}}, // NOLINT
- {{619, 11362, kSentinel}}, {{637, 11364, kSentinel}}, {{837, 921, 953, 8126}}, {{891, 1021, kSentinel}}, // NOLINT
- {{893, 1023, kSentinel}}, {{902, 940, kSentinel}}, {{904, 941, kSentinel}}, {{906, 943, kSentinel}}, // NOLINT
- {{908, 972, kSentinel}}, {{910, 973, kSentinel}}, {{911, 974, kSentinel}}, {{913, 945, kSentinel}}, // NOLINT
- {{914, 946, 976, kSentinel}}, {{915, 947, kSentinel}}, {{916, 948, kSentinel}}, {{917, 949, 1013, kSentinel}}, // NOLINT
- {{918, 950, kSentinel}}, {{919, 951, kSentinel}}, {{920, 952, 977, kSentinel}}, {{922, 954, 1008, kSentinel}}, // NOLINT
- {{923, 955, kSentinel}}, {{925, 957, kSentinel}}, {{927, 959, kSentinel}}, {{928, 960, 982, kSentinel}}, // NOLINT
- {{929, 961, 1009, kSentinel}}, {{931, 962, 963, kSentinel}}, {{932, 964, kSentinel}}, {{933, 965, kSentinel}}, // NOLINT
- {{934, 966, 981, kSentinel}}, {{935, 967, kSentinel}}, {{939, 971, kSentinel}}, {{984, 985, kSentinel}}, // NOLINT
- {{986, 987, kSentinel}}, {{988, 989, kSentinel}}, {{990, 991, kSentinel}}, {{992, 993, kSentinel}}, // NOLINT
- {{994, 995, kSentinel}}, {{996, 997, kSentinel}}, {{998, 999, kSentinel}}, {{1000, 1001, kSentinel}}, // NOLINT
- {{1002, 1003, kSentinel}}, {{1004, 1005, kSentinel}}, {{1006, 1007, kSentinel}}, {{1010, 1017, kSentinel}}, // NOLINT
- {{1015, 1016, kSentinel}}, {{1018, 1019, kSentinel}}, {{1024, 1104, kSentinel}}, {{1039, 1119, kSentinel}}, // NOLINT
- {{1040, 1072, kSentinel}}, {{1071, 1103, kSentinel}}, {{1120, 1121, kSentinel}}, {{1122, 1123, kSentinel}}, // NOLINT
- {{1124, 1125, kSentinel}}, {{1126, 1127, kSentinel}}, {{1128, 1129, kSentinel}}, {{1130, 1131, kSentinel}}, // NOLINT
- {{1132, 1133, kSentinel}}, {{1134, 1135, kSentinel}}, {{1136, 1137, kSentinel}}, {{1138, 1139, kSentinel}}, // NOLINT
- {{1140, 1141, kSentinel}}, {{1142, 1143, kSentinel}}, {{1144, 1145, kSentinel}}, {{1146, 1147, kSentinel}}, // NOLINT
- {{1148, 1149, kSentinel}}, {{1150, 1151, kSentinel}}, {{1152, 1153, kSentinel}}, {{1162, 1163, kSentinel}}, // NOLINT
- {{1164, 1165, kSentinel}}, {{1166, 1167, kSentinel}}, {{1168, 1169, kSentinel}}, {{1170, 1171, kSentinel}}, // NOLINT
- {{1172, 1173, kSentinel}}, {{1174, 1175, kSentinel}}, {{1176, 1177, kSentinel}}, {{1178, 1179, kSentinel}}, // NOLINT
- {{1180, 1181, kSentinel}}, {{1182, 1183, kSentinel}}, {{1184, 1185, kSentinel}}, {{1186, 1187, kSentinel}}, // NOLINT
- {{1188, 1189, kSentinel}}, {{1190, 1191, kSentinel}}, {{1192, 1193, kSentinel}}, {{1194, 1195, kSentinel}}, // NOLINT
- {{1196, 1197, kSentinel}}, {{1198, 1199, kSentinel}}, {{1200, 1201, kSentinel}}, {{1202, 1203, kSentinel}}, // NOLINT
- {{1204, 1205, kSentinel}}, {{1206, 1207, kSentinel}}, {{1208, 1209, kSentinel}}, {{1210, 1211, kSentinel}}, // NOLINT
- {{1212, 1213, kSentinel}}, {{1214, 1215, kSentinel}}, {{1216, 1231, kSentinel}}, {{1217, 1218, kSentinel}}, // NOLINT
- {{1219, 1220, kSentinel}}, {{1221, 1222, kSentinel}}, {{1223, 1224, kSentinel}}, {{1225, 1226, kSentinel}}, // NOLINT
- {{1227, 1228, kSentinel}}, {{1229, 1230, kSentinel}}, {{1232, 1233, kSentinel}}, {{1234, 1235, kSentinel}}, // NOLINT
- {{1236, 1237, kSentinel}}, {{1238, 1239, kSentinel}}, {{1240, 1241, kSentinel}}, {{1242, 1243, kSentinel}}, // NOLINT
- {{1244, 1245, kSentinel}}, {{1246, 1247, kSentinel}}, {{1248, 1249, kSentinel}}, {{1250, 1251, kSentinel}}, // NOLINT
- {{1252, 1253, kSentinel}}, {{1254, 1255, kSentinel}}, {{1256, 1257, kSentinel}}, {{1258, 1259, kSentinel}}, // NOLINT
- {{1260, 1261, kSentinel}}, {{1262, 1263, kSentinel}}, {{1264, 1265, kSentinel}}, {{1266, 1267, kSentinel}}, // NOLINT
- {{1268, 1269, kSentinel}}, {{1270, 1271, kSentinel}}, {{1272, 1273, kSentinel}}, {{1274, 1275, kSentinel}}, // NOLINT
- {{1276, 1277, kSentinel}}, {{1278, 1279, kSentinel}}, {{1280, 1281, kSentinel}}, {{1282, 1283, kSentinel}}, // NOLINT
- {{1284, 1285, kSentinel}}, {{1286, 1287, kSentinel}}, {{1288, 1289, kSentinel}}, {{1290, 1291, kSentinel}}, // NOLINT
- {{1292, 1293, kSentinel}}, {{1294, 1295, kSentinel}}, {{1296, 1297, kSentinel}}, {{1298, 1299, kSentinel}}, // NOLINT
- {{1329, 1377, kSentinel}}, {{1366, 1414, kSentinel}}, {{4256, 11520, kSentinel}}, {{4293, 11557, kSentinel}}, // NOLINT
- {{7549, 11363, kSentinel}}, {{7680, 7681, kSentinel}}, {{7682, 7683, kSentinel}}, {{7684, 7685, kSentinel}}, // NOLINT
- {{7686, 7687, kSentinel}}, {{7688, 7689, kSentinel}}, {{7690, 7691, kSentinel}}, {{7692, 7693, kSentinel}}, // NOLINT
- {{7694, 7695, kSentinel}}, {{7696, 7697, kSentinel}}, {{7698, 7699, kSentinel}}, {{7700, 7701, kSentinel}}, // NOLINT
- {{7702, 7703, kSentinel}}, {{7704, 7705, kSentinel}}, {{7706, 7707, kSentinel}}, {{7708, 7709, kSentinel}}, // NOLINT
- {{7710, 7711, kSentinel}}, {{7712, 7713, kSentinel}}, {{7714, 7715, kSentinel}}, {{7716, 7717, kSentinel}}, // NOLINT
- {{7718, 7719, kSentinel}}, {{7720, 7721, kSentinel}}, {{7722, 7723, kSentinel}}, {{7724, 7725, kSentinel}}, // NOLINT
- {{7726, 7727, kSentinel}}, {{7728, 7729, kSentinel}}, {{7730, 7731, kSentinel}}, {{7732, 7733, kSentinel}}, // NOLINT
- {{7734, 7735, kSentinel}}, {{7736, 7737, kSentinel}}, {{7738, 7739, kSentinel}}, {{7740, 7741, kSentinel}}, // NOLINT
- {{7742, 7743, kSentinel}}, {{7744, 7745, kSentinel}}, {{7746, 7747, kSentinel}}, {{7748, 7749, kSentinel}}, // NOLINT
- {{7750, 7751, kSentinel}}, {{7752, 7753, kSentinel}}, {{7754, 7755, kSentinel}}, {{7756, 7757, kSentinel}}, // NOLINT
- {{7758, 7759, kSentinel}}, {{7760, 7761, kSentinel}}, {{7762, 7763, kSentinel}}, {{7764, 7765, kSentinel}}, // NOLINT
- {{7766, 7767, kSentinel}}, {{7768, 7769, kSentinel}}, {{7770, 7771, kSentinel}}, {{7772, 7773, kSentinel}}, // NOLINT
- {{7774, 7775, kSentinel}}, {{7776, 7777, 7835, kSentinel}}, {{7778, 7779, kSentinel}}, {{7780, 7781, kSentinel}}, // NOLINT
- {{7782, 7783, kSentinel}}, {{7784, 7785, kSentinel}}, {{7786, 7787, kSentinel}}, {{7788, 7789, kSentinel}}, // NOLINT
- {{7790, 7791, kSentinel}}, {{7792, 7793, kSentinel}}, {{7794, 7795, kSentinel}}, {{7796, 7797, kSentinel}}, // NOLINT
- {{7798, 7799, kSentinel}}, {{7800, 7801, kSentinel}}, {{7802, 7803, kSentinel}}, {{7804, 7805, kSentinel}}, // NOLINT
- {{7806, 7807, kSentinel}}, {{7808, 7809, kSentinel}}, {{7810, 7811, kSentinel}}, {{7812, 7813, kSentinel}}, // NOLINT
- {{7814, 7815, kSentinel}}, {{7816, 7817, kSentinel}}, {{7818, 7819, kSentinel}}, {{7820, 7821, kSentinel}}, // NOLINT
- {{7822, 7823, kSentinel}}, {{7824, 7825, kSentinel}}, {{7826, 7827, kSentinel}}, {{7828, 7829, kSentinel}}, // NOLINT
- {{7840, 7841, kSentinel}}, {{7842, 7843, kSentinel}}, {{7844, 7845, kSentinel}}, {{7846, 7847, kSentinel}}, // NOLINT
- {{7848, 7849, kSentinel}}, {{7850, 7851, kSentinel}}, {{7852, 7853, kSentinel}}, {{7854, 7855, kSentinel}}, // NOLINT
- {{7856, 7857, kSentinel}}, {{7858, 7859, kSentinel}}, {{7860, 7861, kSentinel}}, {{7862, 7863, kSentinel}}, // NOLINT
- {{7864, 7865, kSentinel}}, {{7866, 7867, kSentinel}}, {{7868, 7869, kSentinel}}, {{7870, 7871, kSentinel}}, // NOLINT
- {{7872, 7873, kSentinel}}, {{7874, 7875, kSentinel}}, {{7876, 7877, kSentinel}}, {{7878, 7879, kSentinel}}, // NOLINT
- {{7880, 7881, kSentinel}}, {{7882, 7883, kSentinel}}, {{7884, 7885, kSentinel}}, {{7886, 7887, kSentinel}}, // NOLINT
- {{7888, 7889, kSentinel}}, {{7890, 7891, kSentinel}}, {{7892, 7893, kSentinel}}, {{7894, 7895, kSentinel}}, // NOLINT
- {{7896, 7897, kSentinel}}, {{7898, 7899, kSentinel}}, {{7900, 7901, kSentinel}}, {{7902, 7903, kSentinel}}, // NOLINT
- {{7904, 7905, kSentinel}}, {{7906, 7907, kSentinel}}, {{7908, 7909, kSentinel}}, {{7910, 7911, kSentinel}}, // NOLINT
- {{7912, 7913, kSentinel}}, {{7914, 7915, kSentinel}}, {{7916, 7917, kSentinel}}, {{7918, 7919, kSentinel}}, // NOLINT
- {{7920, 7921, kSentinel}}, {{7922, 7923, kSentinel}}, {{7924, 7925, kSentinel}}, {{7926, 7927, kSentinel}}, // NOLINT
- {{7928, 7929, kSentinel}}, {{7936, 7944, kSentinel}}, {{7943, 7951, kSentinel}}, {{7952, 7960, kSentinel}}, // NOLINT
- {{7957, 7965, kSentinel}}, {{7968, 7976, kSentinel}}, {{7975, 7983, kSentinel}}, {{7984, 7992, kSentinel}}, // NOLINT
- {{7991, 7999, kSentinel}}, {{8000, 8008, kSentinel}}, {{8005, 8013, kSentinel}}, {{8017, 8025, kSentinel}}, // NOLINT
- {{8019, 8027, kSentinel}}, {{8021, 8029, kSentinel}}, {{8023, 8031, kSentinel}}, {{8032, 8040, kSentinel}}, // NOLINT
- {{8039, 8047, kSentinel}}, {{8048, 8122, kSentinel}}, {{8049, 8123, kSentinel}}, {{8050, 8136, kSentinel}}, // NOLINT
- {{8053, 8139, kSentinel}}, {{8054, 8154, kSentinel}}, {{8055, 8155, kSentinel}}, {{8056, 8184, kSentinel}}, // NOLINT
- {{8057, 8185, kSentinel}}, {{8058, 8170, kSentinel}}, {{8059, 8171, kSentinel}}, {{8060, 8186, kSentinel}}, // NOLINT
- {{8061, 8187, kSentinel}}, {{8112, 8120, kSentinel}}, {{8113, 8121, kSentinel}}, {{8144, 8152, kSentinel}}, // NOLINT
- {{8145, 8153, kSentinel}}, {{8160, 8168, kSentinel}}, {{8161, 8169, kSentinel}}, {{8165, 8172, kSentinel}}, // NOLINT
- {{kSentinel}} }; // NOLINT
-static const uint16_t kEcma262UnCanonicalizeTable0Size = 945; // NOLINT
-static const int32_t kEcma262UnCanonicalizeTable0[1890] = {
- 1073741889, 1, 90, 5, 1073741921, 1, 122, 5, 181, 9, 1073742016, 13, 214, 17, 1073742040, 21, // NOLINT
- 222, 25, 1073742048, 13, 246, 17, 1073742072, 21, 254, 25, 255, 29, 256, 33, 257, 33, // NOLINT
- 258, 37, 259, 37, 260, 41, 261, 41, 262, 45, 263, 45, 264, 49, 265, 49, // NOLINT
- 266, 53, 267, 53, 268, 57, 269, 57, 270, 61, 271, 61, 272, 65, 273, 65, // NOLINT
- 274, 69, 275, 69, 276, 73, 277, 73, 278, 77, 279, 77, 280, 81, 281, 81, // NOLINT
- 282, 85, 283, 85, 284, 89, 285, 89, 286, 93, 287, 93, 288, 97, 289, 97, // NOLINT
- 290, 101, 291, 101, 292, 105, 293, 105, 294, 109, 295, 109, 296, 113, 297, 113, // NOLINT
- 298, 117, 299, 117, 300, 121, 301, 121, 302, 125, 303, 125, 306, 129, 307, 129, // NOLINT
- 308, 133, 309, 133, 310, 137, 311, 137, 313, 141, 314, 141, 315, 145, 316, 145, // NOLINT
- 317, 149, 318, 149, 319, 153, 320, 153, 321, 157, 322, 157, 323, 161, 324, 161, // NOLINT
- 325, 165, 326, 165, 327, 169, 328, 169, 330, 173, 331, 173, 332, 177, 333, 177, // NOLINT
- 334, 181, 335, 181, 336, 185, 337, 185, 338, 189, 339, 189, 340, 193, 341, 193, // NOLINT
- 342, 197, 343, 197, 344, 201, 345, 201, 346, 205, 347, 205, 348, 209, 349, 209, // NOLINT
- 350, 213, 351, 213, 352, 217, 353, 217, 354, 221, 355, 221, 356, 225, 357, 225, // NOLINT
- 358, 229, 359, 229, 360, 233, 361, 233, 362, 237, 363, 237, 364, 241, 365, 241, // NOLINT
- 366, 245, 367, 245, 368, 249, 369, 249, 370, 253, 371, 253, 372, 257, 373, 257, // NOLINT
- 374, 261, 375, 261, 376, 29, 377, 265, 378, 265, 379, 269, 380, 269, 381, 273, // NOLINT
- 382, 273, 384, 277, 385, 281, 386, 285, 387, 285, 388, 289, 389, 289, 390, 293, // NOLINT
- 391, 297, 392, 297, 1073742217, 301, 394, 305, 395, 309, 396, 309, 398, 313, 399, 317, // NOLINT
- 400, 321, 401, 325, 402, 325, 403, 329, 404, 333, 405, 337, 406, 341, 407, 345, // NOLINT
- 408, 349, 409, 349, 410, 353, 412, 357, 413, 361, 414, 365, 415, 369, 416, 373, // NOLINT
- 417, 373, 418, 377, 419, 377, 420, 381, 421, 381, 422, 385, 423, 389, 424, 389, // NOLINT
- 425, 393, 428, 397, 429, 397, 430, 401, 431, 405, 432, 405, 1073742257, 409, 434, 413, // NOLINT
- 435, 417, 436, 417, 437, 421, 438, 421, 439, 425, 440, 429, 441, 429, 444, 433, // NOLINT
- 445, 433, 447, 437, 452, 441, 453, 441, 454, 441, 455, 445, 456, 445, 457, 445, // NOLINT
- 458, 449, 459, 449, 460, 449, 461, 453, 462, 453, 463, 457, 464, 457, 465, 461, // NOLINT
- 466, 461, 467, 465, 468, 465, 469, 469, 470, 469, 471, 473, 472, 473, 473, 477, // NOLINT
- 474, 477, 475, 481, 476, 481, 477, 313, 478, 485, 479, 485, 480, 489, 481, 489, // NOLINT
- 482, 493, 483, 493, 484, 497, 485, 497, 486, 501, 487, 501, 488, 505, 489, 505, // NOLINT
- 490, 509, 491, 509, 492, 513, 493, 513, 494, 517, 495, 517, 497, 521, 498, 521, // NOLINT
- 499, 521, 500, 525, 501, 525, 502, 337, 503, 437, 504, 529, 505, 529, 506, 533, // NOLINT
- 507, 533, 508, 537, 509, 537, 510, 541, 511, 541, 512, 545, 513, 545, 514, 549, // NOLINT
- 515, 549, 516, 553, 517, 553, 518, 557, 519, 557, 520, 561, 521, 561, 522, 565, // NOLINT
- 523, 565, 524, 569, 525, 569, 526, 573, 527, 573, 528, 577, 529, 577, 530, 581, // NOLINT
- 531, 581, 532, 585, 533, 585, 534, 589, 535, 589, 536, 593, 537, 593, 538, 597, // NOLINT
- 539, 597, 540, 601, 541, 601, 542, 605, 543, 605, 544, 365, 546, 609, 547, 609, // NOLINT
- 548, 613, 549, 613, 550, 617, 551, 617, 552, 621, 553, 621, 554, 625, 555, 625, // NOLINT
- 556, 629, 557, 629, 558, 633, 559, 633, 560, 637, 561, 637, 562, 641, 563, 641, // NOLINT
- 570, 645, 571, 649, 572, 649, 573, 353, 574, 653, 577, 657, 578, 657, 579, 277, // NOLINT
- 580, 661, 581, 665, 582, 669, 583, 669, 584, 673, 585, 673, 586, 677, 587, 677, // NOLINT
- 588, 681, 589, 681, 590, 685, 591, 685, 595, 281, 596, 293, 1073742422, 301, 599, 305, // NOLINT
- 601, 317, 603, 321, 608, 329, 611, 333, 616, 345, 617, 341, 619, 689, 623, 357, // NOLINT
- 626, 361, 629, 369, 637, 693, 640, 385, 643, 393, 648, 401, 649, 661, 1073742474, 409, // NOLINT
- 651, 413, 652, 665, 658, 425, 837, 697, 1073742715, 701, 893, 705, 902, 709, 1073742728, 713, // NOLINT
- 906, 717, 908, 721, 1073742734, 725, 911, 729, 913, 733, 914, 737, 1073742739, 741, 916, 745, // NOLINT
- 917, 749, 1073742742, 753, 919, 757, 920, 761, 921, 697, 922, 765, 923, 769, 924, 9, // NOLINT
- 1073742749, 773, 927, 777, 928, 781, 929, 785, 931, 789, 1073742756, 793, 933, 797, 934, 801, // NOLINT
- 1073742759, 805, 939, 809, 940, 709, 1073742765, 713, 943, 717, 945, 733, 946, 737, 1073742771, 741, // NOLINT
- 948, 745, 949, 749, 1073742774, 753, 951, 757, 952, 761, 953, 697, 954, 765, 955, 769, // NOLINT
- 956, 9, 1073742781, 773, 959, 777, 960, 781, 961, 785, 962, 789, 963, 789, 1073742788, 793, // NOLINT
- 965, 797, 966, 801, 1073742791, 805, 971, 809, 972, 721, 1073742797, 725, 974, 729, 976, 737, // NOLINT
- 977, 761, 981, 801, 982, 781, 984, 813, 985, 813, 986, 817, 987, 817, 988, 821, // NOLINT
- 989, 821, 990, 825, 991, 825, 992, 829, 993, 829, 994, 833, 995, 833, 996, 837, // NOLINT
- 997, 837, 998, 841, 999, 841, 1000, 845, 1001, 845, 1002, 849, 1003, 849, 1004, 853, // NOLINT
- 1005, 853, 1006, 857, 1007, 857, 1008, 765, 1009, 785, 1010, 861, 1013, 749, 1015, 865, // NOLINT
- 1016, 865, 1017, 861, 1018, 869, 1019, 869, 1073742845, 701, 1023, 705, 1073742848, 873, 1039, 877, // NOLINT
- 1073742864, 881, 1071, 885, 1073742896, 881, 1103, 885, 1073742928, 873, 1119, 877, 1120, 889, 1121, 889, // NOLINT
- 1122, 893, 1123, 893, 1124, 897, 1125, 897, 1126, 901, 1127, 901, 1128, 905, 1129, 905, // NOLINT
- 1130, 909, 1131, 909, 1132, 913, 1133, 913, 1134, 917, 1135, 917, 1136, 921, 1137, 921, // NOLINT
- 1138, 925, 1139, 925, 1140, 929, 1141, 929, 1142, 933, 1143, 933, 1144, 937, 1145, 937, // NOLINT
- 1146, 941, 1147, 941, 1148, 945, 1149, 945, 1150, 949, 1151, 949, 1152, 953, 1153, 953, // NOLINT
- 1162, 957, 1163, 957, 1164, 961, 1165, 961, 1166, 965, 1167, 965, 1168, 969, 1169, 969, // NOLINT
- 1170, 973, 1171, 973, 1172, 977, 1173, 977, 1174, 981, 1175, 981, 1176, 985, 1177, 985, // NOLINT
- 1178, 989, 1179, 989, 1180, 993, 1181, 993, 1182, 997, 1183, 997, 1184, 1001, 1185, 1001, // NOLINT
- 1186, 1005, 1187, 1005, 1188, 1009, 1189, 1009, 1190, 1013, 1191, 1013, 1192, 1017, 1193, 1017, // NOLINT
- 1194, 1021, 1195, 1021, 1196, 1025, 1197, 1025, 1198, 1029, 1199, 1029, 1200, 1033, 1201, 1033, // NOLINT
- 1202, 1037, 1203, 1037, 1204, 1041, 1205, 1041, 1206, 1045, 1207, 1045, 1208, 1049, 1209, 1049, // NOLINT
- 1210, 1053, 1211, 1053, 1212, 1057, 1213, 1057, 1214, 1061, 1215, 1061, 1216, 1065, 1217, 1069, // NOLINT
- 1218, 1069, 1219, 1073, 1220, 1073, 1221, 1077, 1222, 1077, 1223, 1081, 1224, 1081, 1225, 1085, // NOLINT
- 1226, 1085, 1227, 1089, 1228, 1089, 1229, 1093, 1230, 1093, 1231, 1065, 1232, 1097, 1233, 1097, // NOLINT
- 1234, 1101, 1235, 1101, 1236, 1105, 1237, 1105, 1238, 1109, 1239, 1109, 1240, 1113, 1241, 1113, // NOLINT
- 1242, 1117, 1243, 1117, 1244, 1121, 1245, 1121, 1246, 1125, 1247, 1125, 1248, 1129, 1249, 1129, // NOLINT
- 1250, 1133, 1251, 1133, 1252, 1137, 1253, 1137, 1254, 1141, 1255, 1141, 1256, 1145, 1257, 1145, // NOLINT
- 1258, 1149, 1259, 1149, 1260, 1153, 1261, 1153, 1262, 1157, 1263, 1157, 1264, 1161, 1265, 1161, // NOLINT
- 1266, 1165, 1267, 1165, 1268, 1169, 1269, 1169, 1270, 1173, 1271, 1173, 1272, 1177, 1273, 1177, // NOLINT
- 1274, 1181, 1275, 1181, 1276, 1185, 1277, 1185, 1278, 1189, 1279, 1189, 1280, 1193, 1281, 1193, // NOLINT
- 1282, 1197, 1283, 1197, 1284, 1201, 1285, 1201, 1286, 1205, 1287, 1205, 1288, 1209, 1289, 1209, // NOLINT
- 1290, 1213, 1291, 1213, 1292, 1217, 1293, 1217, 1294, 1221, 1295, 1221, 1296, 1225, 1297, 1225, // NOLINT
- 1298, 1229, 1299, 1229, 1073743153, 1233, 1366, 1237, 1073743201, 1233, 1414, 1237, 1073746080, 1241, 4293, 1245, // NOLINT
- 7549, 1249, 7680, 1253, 7681, 1253, 7682, 1257, 7683, 1257, 7684, 1261, 7685, 1261, 7686, 1265, // NOLINT
- 7687, 1265, 7688, 1269, 7689, 1269, 7690, 1273, 7691, 1273, 7692, 1277, 7693, 1277, 7694, 1281, // NOLINT
- 7695, 1281, 7696, 1285, 7697, 1285, 7698, 1289, 7699, 1289, 7700, 1293, 7701, 1293, 7702, 1297, // NOLINT
- 7703, 1297, 7704, 1301, 7705, 1301, 7706, 1305, 7707, 1305, 7708, 1309, 7709, 1309, 7710, 1313, // NOLINT
- 7711, 1313, 7712, 1317, 7713, 1317, 7714, 1321, 7715, 1321, 7716, 1325, 7717, 1325, 7718, 1329, // NOLINT
- 7719, 1329, 7720, 1333, 7721, 1333, 7722, 1337, 7723, 1337, 7724, 1341, 7725, 1341, 7726, 1345, // NOLINT
- 7727, 1345, 7728, 1349, 7729, 1349, 7730, 1353, 7731, 1353, 7732, 1357, 7733, 1357, 7734, 1361, // NOLINT
- 7735, 1361, 7736, 1365, 7737, 1365, 7738, 1369, 7739, 1369, 7740, 1373, 7741, 1373, 7742, 1377, // NOLINT
- 7743, 1377, 7744, 1381, 7745, 1381, 7746, 1385, 7747, 1385, 7748, 1389, 7749, 1389, 7750, 1393, // NOLINT
- 7751, 1393, 7752, 1397, 7753, 1397, 7754, 1401, 7755, 1401, 7756, 1405, 7757, 1405, 7758, 1409, // NOLINT
- 7759, 1409, 7760, 1413, 7761, 1413, 7762, 1417, 7763, 1417, 7764, 1421, 7765, 1421, 7766, 1425, // NOLINT
- 7767, 1425, 7768, 1429, 7769, 1429, 7770, 1433, 7771, 1433, 7772, 1437, 7773, 1437, 7774, 1441, // NOLINT
- 7775, 1441, 7776, 1445, 7777, 1445, 7778, 1449, 7779, 1449, 7780, 1453, 7781, 1453, 7782, 1457, // NOLINT
- 7783, 1457, 7784, 1461, 7785, 1461, 7786, 1465, 7787, 1465, 7788, 1469, 7789, 1469, 7790, 1473, // NOLINT
- 7791, 1473, 7792, 1477, 7793, 1477, 7794, 1481, 7795, 1481, 7796, 1485, 7797, 1485, 7798, 1489, // NOLINT
- 7799, 1489, 7800, 1493, 7801, 1493, 7802, 1497, 7803, 1497, 7804, 1501, 7805, 1501, 7806, 1505, // NOLINT
- 7807, 1505, 7808, 1509, 7809, 1509, 7810, 1513, 7811, 1513, 7812, 1517, 7813, 1517, 7814, 1521, // NOLINT
- 7815, 1521, 7816, 1525, 7817, 1525, 7818, 1529, 7819, 1529, 7820, 1533, 7821, 1533, 7822, 1537, // NOLINT
- 7823, 1537, 7824, 1541, 7825, 1541, 7826, 1545, 7827, 1545, 7828, 1549, 7829, 1549, 7835, 1445, // NOLINT
- 7840, 1553, 7841, 1553, 7842, 1557, 7843, 1557, 7844, 1561, 7845, 1561, 7846, 1565, 7847, 1565, // NOLINT
- 7848, 1569, 7849, 1569, 7850, 1573, 7851, 1573, 7852, 1577, 7853, 1577, 7854, 1581, 7855, 1581, // NOLINT
- 7856, 1585, 7857, 1585, 7858, 1589, 7859, 1589, 7860, 1593, 7861, 1593, 7862, 1597, 7863, 1597, // NOLINT
- 7864, 1601, 7865, 1601, 7866, 1605, 7867, 1605, 7868, 1609, 7869, 1609, 7870, 1613, 7871, 1613, // NOLINT
- 7872, 1617, 7873, 1617, 7874, 1621, 7875, 1621, 7876, 1625, 7877, 1625, 7878, 1629, 7879, 1629, // NOLINT
- 7880, 1633, 7881, 1633, 7882, 1637, 7883, 1637, 7884, 1641, 7885, 1641, 7886, 1645, 7887, 1645, // NOLINT
- 7888, 1649, 7889, 1649, 7890, 1653, 7891, 1653, 7892, 1657, 7893, 1657, 7894, 1661, 7895, 1661, // NOLINT
- 7896, 1665, 7897, 1665, 7898, 1669, 7899, 1669, 7900, 1673, 7901, 1673, 7902, 1677, 7903, 1677, // NOLINT
- 7904, 1681, 7905, 1681, 7906, 1685, 7907, 1685, 7908, 1689, 7909, 1689, 7910, 1693, 7911, 1693, // NOLINT
- 7912, 1697, 7913, 1697, 7914, 1701, 7915, 1701, 7916, 1705, 7917, 1705, 7918, 1709, 7919, 1709, // NOLINT
- 7920, 1713, 7921, 1713, 7922, 1717, 7923, 1717, 7924, 1721, 7925, 1721, 7926, 1725, 7927, 1725, // NOLINT
- 7928, 1729, 7929, 1729, 1073749760, 1733, 7943, 1737, 1073749768, 1733, 7951, 1737, 1073749776, 1741, 7957, 1745, // NOLINT
- 1073749784, 1741, 7965, 1745, 1073749792, 1749, 7975, 1753, 1073749800, 1749, 7983, 1753, 1073749808, 1757, 7991, 1761, // NOLINT
- 1073749816, 1757, 7999, 1761, 1073749824, 1765, 8005, 1769, 1073749832, 1765, 8013, 1769, 8017, 1773, 8019, 1777, // NOLINT
- 8021, 1781, 8023, 1785, 8025, 1773, 8027, 1777, 8029, 1781, 8031, 1785, 1073749856, 1789, 8039, 1793, // NOLINT
- 1073749864, 1789, 8047, 1793, 1073749872, 1797, 8049, 1801, 1073749874, 1805, 8053, 1809, 1073749878, 1813, 8055, 1817, // NOLINT
- 1073749880, 1821, 8057, 1825, 1073749882, 1829, 8059, 1833, 1073749884, 1837, 8061, 1841, 1073749936, 1845, 8113, 1849, // NOLINT
- 1073749944, 1845, 8121, 1849, 1073749946, 1797, 8123, 1801, 8126, 697, 1073749960, 1805, 8139, 1809, 1073749968, 1853, // NOLINT
- 8145, 1857, 1073749976, 1853, 8153, 1857, 1073749978, 1813, 8155, 1817, 1073749984, 1861, 8161, 1865, 8165, 1869, // NOLINT
- 1073749992, 1861, 8169, 1865, 1073749994, 1829, 8171, 1833, 8172, 1869, 1073750008, 1821, 8185, 1825, 1073750010, 1837, // NOLINT
- 8187, 1841 }; // NOLINT
-static const uint16_t kEcma262UnCanonicalizeMultiStrings0Size = 469; // NOLINT
-static const MultiCharacterSpecialCase<2> kEcma262UnCanonicalizeMultiStrings1[71] = { // NOLINT
- {{8498, 8526}}, {{8544, 8560}}, {{8559, 8575}}, {{8579, 8580}}, // NOLINT
- {{9398, 9424}}, {{9423, 9449}}, {{11264, 11312}}, {{11310, 11358}}, // NOLINT
- {{11360, 11361}}, {{619, 11362}}, {{7549, 11363}}, {{637, 11364}}, // NOLINT
- {{570, 11365}}, {{574, 11366}}, {{11367, 11368}}, {{11369, 11370}}, // NOLINT
- {{11371, 11372}}, {{11381, 11382}}, {{11392, 11393}}, {{11394, 11395}}, // NOLINT
- {{11396, 11397}}, {{11398, 11399}}, {{11400, 11401}}, {{11402, 11403}}, // NOLINT
- {{11404, 11405}}, {{11406, 11407}}, {{11408, 11409}}, {{11410, 11411}}, // NOLINT
- {{11412, 11413}}, {{11414, 11415}}, {{11416, 11417}}, {{11418, 11419}}, // NOLINT
- {{11420, 11421}}, {{11422, 11423}}, {{11424, 11425}}, {{11426, 11427}}, // NOLINT
- {{11428, 11429}}, {{11430, 11431}}, {{11432, 11433}}, {{11434, 11435}}, // NOLINT
- {{11436, 11437}}, {{11438, 11439}}, {{11440, 11441}}, {{11442, 11443}}, // NOLINT
- {{11444, 11445}}, {{11446, 11447}}, {{11448, 11449}}, {{11450, 11451}}, // NOLINT
- {{11452, 11453}}, {{11454, 11455}}, {{11456, 11457}}, {{11458, 11459}}, // NOLINT
- {{11460, 11461}}, {{11462, 11463}}, {{11464, 11465}}, {{11466, 11467}}, // NOLINT
- {{11468, 11469}}, {{11470, 11471}}, {{11472, 11473}}, {{11474, 11475}}, // NOLINT
- {{11476, 11477}}, {{11478, 11479}}, {{11480, 11481}}, {{11482, 11483}}, // NOLINT
- {{11484, 11485}}, {{11486, 11487}}, {{11488, 11489}}, {{11490, 11491}}, // NOLINT
- {{4256, 11520}}, {{4293, 11557}}, {{kSentinel}} }; // NOLINT
-static const uint16_t kEcma262UnCanonicalizeTable1Size = 133; // NOLINT
-static const int32_t kEcma262UnCanonicalizeTable1[266] = {
- 306, 1, 334, 1, 1073742176, 5, 367, 9, 1073742192, 5, 383, 9, 387, 13, 388, 13, // NOLINT
- 1073743030, 17, 1231, 21, 1073743056, 17, 1257, 21, 1073744896, 25, 3118, 29, 1073744944, 25, 3166, 29, // NOLINT
- 3168, 33, 3169, 33, 3170, 37, 3171, 41, 3172, 45, 3173, 49, 3174, 53, 3175, 57, // NOLINT
- 3176, 57, 3177, 61, 3178, 61, 3179, 65, 3180, 65, 3189, 69, 3190, 69, 3200, 73, // NOLINT
- 3201, 73, 3202, 77, 3203, 77, 3204, 81, 3205, 81, 3206, 85, 3207, 85, 3208, 89, // NOLINT
- 3209, 89, 3210, 93, 3211, 93, 3212, 97, 3213, 97, 3214, 101, 3215, 101, 3216, 105, // NOLINT
- 3217, 105, 3218, 109, 3219, 109, 3220, 113, 3221, 113, 3222, 117, 3223, 117, 3224, 121, // NOLINT
- 3225, 121, 3226, 125, 3227, 125, 3228, 129, 3229, 129, 3230, 133, 3231, 133, 3232, 137, // NOLINT
- 3233, 137, 3234, 141, 3235, 141, 3236, 145, 3237, 145, 3238, 149, 3239, 149, 3240, 153, // NOLINT
- 3241, 153, 3242, 157, 3243, 157, 3244, 161, 3245, 161, 3246, 165, 3247, 165, 3248, 169, // NOLINT
- 3249, 169, 3250, 173, 3251, 173, 3252, 177, 3253, 177, 3254, 181, 3255, 181, 3256, 185, // NOLINT
- 3257, 185, 3258, 189, 3259, 189, 3260, 193, 3261, 193, 3262, 197, 3263, 197, 3264, 201, // NOLINT
- 3265, 201, 3266, 205, 3267, 205, 3268, 209, 3269, 209, 3270, 213, 3271, 213, 3272, 217, // NOLINT
- 3273, 217, 3274, 221, 3275, 221, 3276, 225, 3277, 225, 3278, 229, 3279, 229, 3280, 233, // NOLINT
- 3281, 233, 3282, 237, 3283, 237, 3284, 241, 3285, 241, 3286, 245, 3287, 245, 3288, 249, // NOLINT
- 3289, 249, 3290, 253, 3291, 253, 3292, 257, 3293, 257, 3294, 261, 3295, 261, 3296, 265, // NOLINT
- 3297, 265, 3298, 269, 3299, 269, 1073745152, 273, 3365, 277 }; // NOLINT
-static const uint16_t kEcma262UnCanonicalizeMultiStrings1Size = 71; // NOLINT
-static const MultiCharacterSpecialCase<2> kEcma262UnCanonicalizeMultiStrings7[3] = { // NOLINT
- {{65313, 65345}}, {{65338, 65370}}, {{kSentinel}} }; // NOLINT
-static const uint16_t kEcma262UnCanonicalizeTable7Size = 4; // NOLINT
-static const int32_t kEcma262UnCanonicalizeTable7[8] = {
- 1073749793, 1, 7994, 5, 1073749825, 1, 8026, 5 }; // NOLINT
-static const uint16_t kEcma262UnCanonicalizeMultiStrings7Size = 3; // NOLINT
-int Ecma262UnCanonicalize::Convert(uchar c,
- uchar n,
- uchar* result,
- bool* allow_caching_ptr) {
- int chunk_index = c >> 13;
- switch (chunk_index) {
- case 0: return LookupMapping<true>(kEcma262UnCanonicalizeTable0,
- kEcma262UnCanonicalizeTable0Size,
- kEcma262UnCanonicalizeMultiStrings0,
- c,
- n,
- result,
- allow_caching_ptr);
- case 1: return LookupMapping<true>(kEcma262UnCanonicalizeTable1,
- kEcma262UnCanonicalizeTable1Size,
- kEcma262UnCanonicalizeMultiStrings1,
- c,
- n,
- result,
- allow_caching_ptr);
- case 7: return LookupMapping<true>(kEcma262UnCanonicalizeTable7,
- kEcma262UnCanonicalizeTable7Size,
- kEcma262UnCanonicalizeMultiStrings7,
- c,
- n,
- result,
- allow_caching_ptr);
- default: return 0;
- }
-}
-
-static const MultiCharacterSpecialCase<1> kCanonicalizationRangeMultiStrings0[1] = { // NOLINT
- {{kSentinel}} }; // NOLINT
-static const uint16_t kCanonicalizationRangeTable0Size = 70; // NOLINT
-static const int32_t kCanonicalizationRangeTable0[140] = {
- 1073741889, 100, 90, 0, 1073741921, 100, 122, 0, 1073742016, 88, 214, 0, 1073742040, 24, 222, 0, // NOLINT
- 1073742048, 88, 246, 0, 1073742072, 24, 254, 0, 1073742715, 8, 893, 0, 1073742728, 8, 906, 0, // NOLINT
- 1073742749, 8, 927, 0, 1073742759, 16, 939, 0, 1073742765, 8, 943, 0, 1073742781, 8, 959, 0, // NOLINT
- 1073742791, 16, 971, 0, 1073742845, 8, 1023, 0, 1073742848, 60, 1039, 0, 1073742864, 124, 1071, 0, // NOLINT
- 1073742896, 124, 1103, 0, 1073742928, 60, 1119, 0, 1073743153, 148, 1366, 0, 1073743201, 148, 1414, 0, // NOLINT
- 1073746080, 148, 4293, 0, 1073749760, 28, 7943, 0, 1073749768, 28, 7951, 0, 1073749776, 20, 7957, 0, // NOLINT
- 1073749784, 20, 7965, 0, 1073749792, 28, 7975, 0, 1073749800, 28, 7983, 0, 1073749808, 28, 7991, 0, // NOLINT
- 1073749816, 28, 7999, 0, 1073749824, 20, 8005, 0, 1073749832, 20, 8013, 0, 1073749856, 28, 8039, 0, // NOLINT
- 1073749864, 28, 8047, 0, 1073749874, 12, 8053, 0, 1073749960, 12, 8139, 0 }; // NOLINT
-static const uint16_t kCanonicalizationRangeMultiStrings0Size = 1; // NOLINT
-static const MultiCharacterSpecialCase<1> kCanonicalizationRangeMultiStrings1[1] = { // NOLINT
- {{kSentinel}} }; // NOLINT
-static const uint16_t kCanonicalizationRangeTable1Size = 14; // NOLINT
-static const int32_t kCanonicalizationRangeTable1[28] = {
- 1073742176, 60, 367, 0, 1073742192, 60, 383, 0, 1073743030, 100, 1231, 0, 1073743056, 100, 1257, 0, // NOLINT
- 1073744896, 184, 3118, 0, 1073744944, 184, 3166, 0, 1073745152, 148, 3365, 0 }; // NOLINT
-static const uint16_t kCanonicalizationRangeMultiStrings1Size = 1; // NOLINT
-static const MultiCharacterSpecialCase<1> kCanonicalizationRangeMultiStrings7[1] = { // NOLINT
- {{kSentinel}} }; // NOLINT
-static const uint16_t kCanonicalizationRangeTable7Size = 4; // NOLINT
-static const int32_t kCanonicalizationRangeTable7[8] = {
- 1073749793, 100, 7994, 0, 1073749825, 100, 8026, 0 }; // NOLINT
-static const uint16_t kCanonicalizationRangeMultiStrings7Size = 1; // NOLINT
-int CanonicalizationRange::Convert(uchar c,
- uchar n,
- uchar* result,
- bool* allow_caching_ptr) {
- int chunk_index = c >> 13;
- switch (chunk_index) {
- case 0: return LookupMapping<false>(kCanonicalizationRangeTable0,
- kCanonicalizationRangeTable0Size,
- kCanonicalizationRangeMultiStrings0,
- c,
- n,
- result,
- allow_caching_ptr);
- case 1: return LookupMapping<false>(kCanonicalizationRangeTable1,
- kCanonicalizationRangeTable1Size,
- kCanonicalizationRangeMultiStrings1,
- c,
- n,
- result,
- allow_caching_ptr);
- case 7: return LookupMapping<false>(kCanonicalizationRangeTable7,
- kCanonicalizationRangeTable7Size,
- kCanonicalizationRangeMultiStrings7,
- c,
- n,
- result,
- allow_caching_ptr);
- default: return 0;
- }
-}
-
-
-const uchar UnicodeData::kMaxCodePoint = 65533;
-
-int UnicodeData::GetByteCount() {
- return kUppercaseTable0Size * sizeof(int32_t) // NOLINT
- + kUppercaseTable1Size * sizeof(int32_t) // NOLINT
- + kUppercaseTable7Size * sizeof(int32_t) // NOLINT
- + kLowercaseTable0Size * sizeof(int32_t) // NOLINT
- + kLowercaseTable1Size * sizeof(int32_t) // NOLINT
- + kLowercaseTable7Size * sizeof(int32_t) // NOLINT
- + kLetterTable0Size * sizeof(int32_t) // NOLINT
- + kLetterTable1Size * sizeof(int32_t) // NOLINT
- + kLetterTable2Size * sizeof(int32_t) // NOLINT
- + kLetterTable3Size * sizeof(int32_t) // NOLINT
- + kLetterTable4Size * sizeof(int32_t) // NOLINT
- + kLetterTable5Size * sizeof(int32_t) // NOLINT
- + kLetterTable6Size * sizeof(int32_t) // NOLINT
- + kLetterTable7Size * sizeof(int32_t) // NOLINT
- + kSpaceTable0Size * sizeof(int32_t) // NOLINT
- + kSpaceTable1Size * sizeof(int32_t) // NOLINT
- + kNumberTable0Size * sizeof(int32_t) // NOLINT
- + kNumberTable7Size * sizeof(int32_t) // NOLINT
- + kWhiteSpaceTable0Size * sizeof(int32_t) // NOLINT
- + kWhiteSpaceTable1Size * sizeof(int32_t) // NOLINT
- + kLineTerminatorTable0Size * sizeof(int32_t) // NOLINT
- + kLineTerminatorTable1Size * sizeof(int32_t) // NOLINT
- + kCombiningMarkTable0Size * sizeof(int32_t) // NOLINT
- + kCombiningMarkTable1Size * sizeof(int32_t) // NOLINT
- + kCombiningMarkTable5Size * sizeof(int32_t) // NOLINT
- + kCombiningMarkTable7Size * sizeof(int32_t) // NOLINT
- + kConnectorPunctuationTable0Size * sizeof(int32_t) // NOLINT
- + kConnectorPunctuationTable1Size * sizeof(int32_t) // NOLINT
- + kConnectorPunctuationTable7Size * sizeof(int32_t) // NOLINT
- + kToLowercaseMultiStrings0Size * sizeof(MultiCharacterSpecialCase<2>) // NOLINT
- + kToLowercaseMultiStrings1Size * sizeof(MultiCharacterSpecialCase<1>) // NOLINT
- + kToLowercaseMultiStrings7Size * sizeof(MultiCharacterSpecialCase<1>) // NOLINT
- + kToUppercaseMultiStrings0Size * sizeof(MultiCharacterSpecialCase<3>) // NOLINT
- + kToUppercaseMultiStrings1Size * sizeof(MultiCharacterSpecialCase<1>) // NOLINT
- + kToUppercaseMultiStrings7Size * sizeof(MultiCharacterSpecialCase<3>) // NOLINT
- + kEcma262CanonicalizeMultiStrings0Size * sizeof(MultiCharacterSpecialCase<1>) // NOLINT
- + kEcma262CanonicalizeMultiStrings1Size * sizeof(MultiCharacterSpecialCase<1>) // NOLINT
- + kEcma262CanonicalizeMultiStrings7Size * sizeof(MultiCharacterSpecialCase<1>) // NOLINT
- + kEcma262UnCanonicalizeMultiStrings0Size * sizeof(MultiCharacterSpecialCase<4>) // NOLINT
- + kEcma262UnCanonicalizeMultiStrings1Size * sizeof(MultiCharacterSpecialCase<2>) // NOLINT
- + kEcma262UnCanonicalizeMultiStrings7Size * sizeof(MultiCharacterSpecialCase<2>) // NOLINT
- + kCanonicalizationRangeMultiStrings0Size * sizeof(MultiCharacterSpecialCase<1>) // NOLINT
- + kCanonicalizationRangeMultiStrings1Size * sizeof(MultiCharacterSpecialCase<1>) // NOLINT
- + kCanonicalizationRangeMultiStrings7Size * sizeof(MultiCharacterSpecialCase<1>); // NOLINT
-}
-
-} // namespace unicode
diff --git a/src/3rdparty/v8/src/unicode.h b/src/3rdparty/v8/src/unicode.h
deleted file mode 100644
index 39fc349..0000000
--- a/src/3rdparty/v8/src/unicode.h
+++ /dev/null
@@ -1,280 +0,0 @@
-// Copyright 2007-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_UNICODE_H_
-#define V8_UNICODE_H_
-
-#include <sys/types.h>
-
-/**
- * \file
- * Definitions and convenience functions for working with unicode.
- */
-
-namespace unibrow {
-
-typedef unsigned int uchar;
-typedef unsigned char byte;
-
-/**
- * The max length of the result of converting the case of a single
- * character.
- */
-static const int kMaxMappingSize = 4;
-
-template <class T, int size = 256>
-class Predicate {
- public:
- inline Predicate() { }
- inline bool get(uchar c);
- private:
- friend class Test;
- bool CalculateValue(uchar c);
- struct CacheEntry {
- inline CacheEntry() : code_point_(0), value_(0) { }
- inline CacheEntry(uchar code_point, bool value)
- : code_point_(code_point),
- value_(value) { }
- uchar code_point_ : 21;
- bool value_ : 1;
- };
- static const int kSize = size;
- static const int kMask = kSize - 1;
- CacheEntry entries_[kSize];
-};
-
-// A cache used in case conversion. It caches the value for characters
-// that either have no mapping or map to a single character independent
-// of context. Characters that map to more than one character or that
-// map differently depending on context are always looked up.
-template <class T, int size = 256>
-class Mapping {
- public:
- inline Mapping() { }
- inline int get(uchar c, uchar n, uchar* result);
- private:
- friend class Test;
- int CalculateValue(uchar c, uchar n, uchar* result);
- struct CacheEntry {
- inline CacheEntry() : code_point_(kNoChar), offset_(0) { }
- inline CacheEntry(uchar code_point, signed offset)
- : code_point_(code_point),
- offset_(offset) { }
- uchar code_point_;
- signed offset_;
- static const int kNoChar = (1 << 21) - 1;
- };
- static const int kSize = size;
- static const int kMask = kSize - 1;
- CacheEntry entries_[kSize];
-};
-
-class UnicodeData {
- private:
- friend class Test;
- static int GetByteCount();
- static const uchar kMaxCodePoint;
-};
-
-// --- U t f 8 ---
-
-template <typename Data>
-class Buffer {
- public:
- inline Buffer(Data data, unsigned length) : data_(data), length_(length) { }
- inline Buffer() : data_(0), length_(0) { }
- Data data() { return data_; }
- unsigned length() { return length_; }
- private:
- Data data_;
- unsigned length_;
-};
-
-class Utf8 {
- public:
- static inline uchar Length(uchar chr);
- static inline unsigned Encode(char* out, uchar c);
- static const byte* ReadBlock(Buffer<const char*> str, byte* buffer,
- unsigned capacity, unsigned* chars_read, unsigned* offset);
- static uchar CalculateValue(const byte* str,
- unsigned length,
- unsigned* cursor);
- static const uchar kBadChar = 0xFFFD;
- static const unsigned kMaxEncodedSize = 4;
- static const unsigned kMaxOneByteChar = 0x7f;
- static const unsigned kMaxTwoByteChar = 0x7ff;
- static const unsigned kMaxThreeByteChar = 0xffff;
- static const unsigned kMaxFourByteChar = 0x1fffff;
-
- private:
- template <unsigned s> friend class Utf8InputBuffer;
- friend class Test;
- static inline uchar ValueOf(const byte* str,
- unsigned length,
- unsigned* cursor);
-};
-
-// --- C h a r a c t e r S t r e a m ---
-
-class CharacterStream {
- public:
- inline uchar GetNext();
- inline bool has_more() { return remaining_ != 0; }
- // Note that default implementation is not efficient.
- virtual void Seek(unsigned);
- unsigned Length();
- virtual ~CharacterStream() { }
- static inline bool EncodeCharacter(uchar c, byte* buffer, unsigned capacity,
- unsigned& offset);
- static inline bool EncodeAsciiCharacter(uchar c, byte* buffer,
- unsigned capacity, unsigned& offset);
- static inline bool EncodeNonAsciiCharacter(uchar c, byte* buffer,
- unsigned capacity, unsigned& offset);
- static inline uchar DecodeCharacter(const byte* buffer, unsigned* offset);
- virtual void Rewind() = 0;
- protected:
- virtual void FillBuffer() = 0;
- // The number of characters left in the current buffer
- unsigned remaining_;
- // The current offset within the buffer
- unsigned cursor_;
- // The buffer containing the decoded characters.
- const byte* buffer_;
-};
-
-// --- I n p u t B u f f e r ---
-
-/**
- * Provides efficient access to encoded characters in strings. It
- * does so by reading characters one block at a time, rather than one
- * character at a time, which gives string implementations an
- * opportunity to optimize the decoding.
- */
-template <class Reader, class Input = Reader*, unsigned kSize = 256>
-class InputBuffer : public CharacterStream {
- public:
- virtual void Rewind();
- inline void Reset(Input input);
- void Seek(unsigned position);
- inline void Reset(unsigned position, Input input);
- protected:
- InputBuffer() { }
- explicit InputBuffer(Input input) { Reset(input); }
- virtual void FillBuffer();
-
- // A custom offset that can be used by the string implementation to
- // mark progress within the encoded string.
- unsigned offset_;
- // The input string
- Input input_;
- // To avoid heap allocation, we keep an internal buffer to which
- // the encoded string can write its characters. The string
- // implementation is free to decide whether it wants to use this
- // buffer or not.
- byte util_buffer_[kSize];
-};
-
-// --- U t f 8 I n p u t B u f f e r ---
-
-template <unsigned s = 256>
-class Utf8InputBuffer : public InputBuffer<Utf8, Buffer<const char*>, s> {
- public:
- inline Utf8InputBuffer() { }
- inline Utf8InputBuffer(const char* data, unsigned length);
- inline void Reset(const char* data, unsigned length) {
- InputBuffer<Utf8, Buffer<const char*>, s>::Reset(
- Buffer<const char*>(data, length));
- }
-};
-
-
-struct Uppercase {
- static bool Is(uchar c);
-};
-struct Lowercase {
- static bool Is(uchar c);
-};
-struct Letter {
- static bool Is(uchar c);
-};
-struct Space {
- static bool Is(uchar c);
-};
-struct Number {
- static bool Is(uchar c);
-};
-struct WhiteSpace {
- static bool Is(uchar c);
-};
-struct LineTerminator {
- static bool Is(uchar c);
-};
-struct CombiningMark {
- static bool Is(uchar c);
-};
-struct ConnectorPunctuation {
- static bool Is(uchar c);
-};
-struct ToLowercase {
- static const int kMaxWidth = 3;
- static int Convert(uchar c,
- uchar n,
- uchar* result,
- bool* allow_caching_ptr);
-};
-struct ToUppercase {
- static const int kMaxWidth = 3;
- static int Convert(uchar c,
- uchar n,
- uchar* result,
- bool* allow_caching_ptr);
-};
-struct Ecma262Canonicalize {
- static const int kMaxWidth = 1;
- static int Convert(uchar c,
- uchar n,
- uchar* result,
- bool* allow_caching_ptr);
-};
-struct Ecma262UnCanonicalize {
- static const int kMaxWidth = 4;
- static int Convert(uchar c,
- uchar n,
- uchar* result,
- bool* allow_caching_ptr);
-};
-struct CanonicalizationRange {
- static const int kMaxWidth = 1;
- static int Convert(uchar c,
- uchar n,
- uchar* result,
- bool* allow_caching_ptr);
-};
-
-} // namespace unibrow
-
-#endif // V8_UNICODE_H_
diff --git a/src/3rdparty/v8/src/uri.js b/src/3rdparty/v8/src/uri.js
deleted file mode 100644
index e94b3fe..0000000
--- a/src/3rdparty/v8/src/uri.js
+++ /dev/null
@@ -1,402 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// This file contains support for URI manipulations written in
-// JavaScript.
-
-// Expect $String = global.String;
-
-// Lazily initialized.
-var hexCharArray = 0;
-var hexCharCodeArray = 0;
-
-
-function URIAddEncodedOctetToBuffer(octet, result, index) {
- result[index++] = 37; // Char code of '%'.
- result[index++] = hexCharCodeArray[octet >> 4];
- result[index++] = hexCharCodeArray[octet & 0x0F];
- return index;
-}
-
-
-function URIEncodeOctets(octets, result, index) {
- if (hexCharCodeArray === 0) {
- hexCharCodeArray = [48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
- 65, 66, 67, 68, 69, 70];
- }
- index = URIAddEncodedOctetToBuffer(octets[0], result, index);
- if (octets[1]) index = URIAddEncodedOctetToBuffer(octets[1], result, index);
- if (octets[2]) index = URIAddEncodedOctetToBuffer(octets[2], result, index);
- if (octets[3]) index = URIAddEncodedOctetToBuffer(octets[3], result, index);
- return index;
-}
-
-
-function URIEncodeSingle(cc, result, index) {
- var x = (cc >> 12) & 0xF;
- var y = (cc >> 6) & 63;
- var z = cc & 63;
- var octets = new $Array(3);
- if (cc <= 0x007F) {
- octets[0] = cc;
- } else if (cc <= 0x07FF) {
- octets[0] = y + 192;
- octets[1] = z + 128;
- } else {
- octets[0] = x + 224;
- octets[1] = y + 128;
- octets[2] = z + 128;
- }
- return URIEncodeOctets(octets, result, index);
-}
-
-
-function URIEncodePair(cc1 , cc2, result, index) {
- var u = ((cc1 >> 6) & 0xF) + 1;
- var w = (cc1 >> 2) & 0xF;
- var x = cc1 & 3;
- var y = (cc2 >> 6) & 0xF;
- var z = cc2 & 63;
- var octets = new $Array(4);
- octets[0] = (u >> 2) + 240;
- octets[1] = (((u & 3) << 4) | w) + 128;
- octets[2] = ((x << 4) | y) + 128;
- octets[3] = z + 128;
- return URIEncodeOctets(octets, result, index);
-}
-
-
-function URIHexCharsToCharCode(highChar, lowChar) {
- var highCode = HexValueOf(highChar);
- var lowCode = HexValueOf(lowChar);
- if (highCode == -1 || lowCode == -1) {
- throw new $URIError("URI malformed");
- }
- return (highCode << 4) | lowCode;
-}
-
-
-function URIDecodeOctets(octets, result, index) {
- var value;
- var o0 = octets[0];
- if (o0 < 0x80) {
- value = o0;
- } else if (o0 < 0xc2) {
- throw new $URIError("URI malformed");
- } else {
- var o1 = octets[1];
- if (o0 < 0xe0) {
- var a = o0 & 0x1f;
- if ((o1 < 0x80) || (o1 > 0xbf))
- throw new $URIError("URI malformed");
- var b = o1 & 0x3f;
- value = (a << 6) + b;
- if (value < 0x80 || value > 0x7ff)
- throw new $URIError("URI malformed");
- } else {
- var o2 = octets[2];
- if (o0 < 0xf0) {
- var a = o0 & 0x0f;
- if ((o1 < 0x80) || (o1 > 0xbf))
- throw new $URIError("URI malformed");
- var b = o1 & 0x3f;
- if ((o2 < 0x80) || (o2 > 0xbf))
- throw new $URIError("URI malformed");
- var c = o2 & 0x3f;
- value = (a << 12) + (b << 6) + c;
- if ((value < 0x800) || (value > 0xffff))
- throw new $URIError("URI malformed");
- } else {
- var o3 = octets[3];
- if (o0 < 0xf8) {
- var a = (o0 & 0x07);
- if ((o1 < 0x80) || (o1 > 0xbf))
- throw new $URIError("URI malformed");
- var b = (o1 & 0x3f);
- if ((o2 < 0x80) || (o2 > 0xbf))
- throw new $URIError("URI malformed");
- var c = (o2 & 0x3f);
- if ((o3 < 0x80) || (o3 > 0xbf))
- throw new $URIError("URI malformed");
- var d = (o3 & 0x3f);
- value = (a << 18) + (b << 12) + (c << 6) + d;
- if ((value < 0x10000) || (value > 0x10ffff))
- throw new $URIError("URI malformed");
- } else {
- throw new $URIError("URI malformed");
- }
- }
- }
- }
- if (value < 0x10000) {
- result[index++] = value;
- return index;
- } else {
- result[index++] = (value >> 10) + 0xd7c0;
- result[index++] = (value & 0x3ff) + 0xdc00;
- return index;
- }
-}
-
-
-// ECMA-262, section 15.1.3
-function Encode(uri, unescape) {
- var uriLength = uri.length;
- var result = new $Array(uriLength);
- var index = 0;
- for (var k = 0; k < uriLength; k++) {
- var cc1 = uri.charCodeAt(k);
- if (unescape(cc1)) {
- result[index++] = cc1;
- } else {
- if (cc1 >= 0xDC00 && cc1 <= 0xDFFF) throw new $URIError("URI malformed");
- if (cc1 < 0xD800 || cc1 > 0xDBFF) {
- index = URIEncodeSingle(cc1, result, index);
- } else {
- k++;
- if (k == uriLength) throw new $URIError("URI malformed");
- var cc2 = uri.charCodeAt(k);
- if (cc2 < 0xDC00 || cc2 > 0xDFFF) throw new $URIError("URI malformed");
- index = URIEncodePair(cc1, cc2, result, index);
- }
- }
- }
- return %StringFromCharCodeArray(result);
-}
-
-
-// ECMA-262, section 15.1.3
-function Decode(uri, reserved) {
- var uriLength = uri.length;
- var result = new $Array(uriLength);
- var index = 0;
- for (var k = 0; k < uriLength; k++) {
- var ch = uri.charAt(k);
- if (ch == '%') {
- if (k + 2 >= uriLength) throw new $URIError("URI malformed");
- var cc = URIHexCharsToCharCode(uri.charCodeAt(++k), uri.charCodeAt(++k));
- if (cc >> 7) {
- var n = 0;
- while (((cc << ++n) & 0x80) != 0) ;
- if (n == 1 || n > 4) throw new $URIError("URI malformed");
- var octets = new $Array(n);
- octets[0] = cc;
- if (k + 3 * (n - 1) >= uriLength) throw new $URIError("URI malformed");
- for (var i = 1; i < n; i++) {
- if (uri.charAt(++k) != '%') throw new $URIError("URI malformed");
- octets[i] = URIHexCharsToCharCode(uri.charCodeAt(++k), uri.charCodeAt(++k));
- }
- index = URIDecodeOctets(octets, result, index);
- } else {
- if (reserved(cc)) {
- result[index++] = 37; // Char code of '%'.
- result[index++] = uri.charCodeAt(k - 1);
- result[index++] = uri.charCodeAt(k);
- } else {
- result[index++] = cc;
- }
- }
- } else {
- result[index++] = ch.charCodeAt(0);
- }
- }
- result.length = index;
- return %StringFromCharCodeArray(result);
-}
-
-
-// ECMA-262 - 15.1.3.1.
-function URIDecode(uri) {
- function reservedPredicate(cc) {
- // #$
- if (35 <= cc && cc <= 36) return true;
- // &
- if (cc == 38) return true;
- // +,
- if (43 <= cc && cc <= 44) return true;
- // /
- if (cc == 47) return true;
- // :;
- if (58 <= cc && cc <= 59) return true;
- // =
- if (cc == 61) return true;
- // ?@
- if (63 <= cc && cc <= 64) return true;
-
- return false;
- };
- var string = ToString(uri);
- return Decode(string, reservedPredicate);
-}
-
-
-// ECMA-262 - 15.1.3.2.
-function URIDecodeComponent(component) {
- function reservedPredicate(cc) { return false; };
- var string = ToString(component);
- return Decode(string, reservedPredicate);
-}
-
-
-// Does the char code correspond to an alpha-numeric char.
-function isAlphaNumeric(cc) {
- // a - z
- if (97 <= cc && cc <= 122) return true;
- // A - Z
- if (65 <= cc && cc <= 90) return true;
- // 0 - 9
- if (48 <= cc && cc <= 57) return true;
-
- return false;
-}
-
-
-// ECMA-262 - 15.1.3.3.
-function URIEncode(uri) {
- function unescapePredicate(cc) {
- if (isAlphaNumeric(cc)) return true;
- // !
- if (cc == 33) return true;
- // #$
- if (35 <= cc && cc <= 36) return true;
- // &'()*+,-./
- if (38 <= cc && cc <= 47) return true;
- // :;
- if (58 <= cc && cc <= 59) return true;
- // =
- if (cc == 61) return true;
- // ?@
- if (63 <= cc && cc <= 64) return true;
- // _
- if (cc == 95) return true;
- // ~
- if (cc == 126) return true;
-
- return false;
- };
-
- var string = ToString(uri);
- return Encode(string, unescapePredicate);
-}
-
-
-// ECMA-262 - 15.1.3.4
-function URIEncodeComponent(component) {
- function unescapePredicate(cc) {
- if (isAlphaNumeric(cc)) return true;
- // !
- if (cc == 33) return true;
- // '()*
- if (39 <= cc && cc <= 42) return true;
- // -.
- if (45 <= cc && cc <= 46) return true;
- // _
- if (cc == 95) return true;
- // ~
- if (cc == 126) return true;
-
- return false;
- };
-
- var string = ToString(component);
- return Encode(string, unescapePredicate);
-}
-
-
-function HexValueOf(code) {
- // 0-9
- if (code >= 48 && code <= 57) return code - 48;
- // A-F
- if (code >= 65 && code <= 70) return code - 55;
- // a-f
- if (code >= 97 && code <= 102) return code - 87;
-
- return -1;
-}
-
-
-// Convert a character code to 4-digit hex string representation
-// 64 -> 0040, 62234 -> F31A.
-function CharCodeToHex4Str(cc) {
- var r = "";
- if (hexCharArray === 0) {
- hexCharArray = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9",
- "A", "B", "C", "D", "E", "F"];
- }
- for (var i = 0; i < 4; ++i) {
- var c = hexCharArray[cc & 0x0F];
- r = c + r;
- cc = cc >>> 4;
- }
- return r;
-}
-
-
-// Returns true if all digits in string s are valid hex numbers
-function IsValidHex(s) {
- for (var i = 0; i < s.length; ++i) {
- var cc = s.charCodeAt(i);
- if ((48 <= cc && cc <= 57) || (65 <= cc && cc <= 70) || (97 <= cc && cc <= 102)) {
- // '0'..'9', 'A'..'F' and 'a' .. 'f'.
- } else {
- return false;
- }
- }
- return true;
-}
-
-
-// ECMA-262 - B.2.1.
-function URIEscape(str) {
- var s = ToString(str);
- return %URIEscape(s);
-}
-
-
-// ECMA-262 - B.2.2.
-function URIUnescape(str) {
- var s = ToString(str);
- return %URIUnescape(s);
-}
-
-
-// -------------------------------------------------------------------
-
-function SetupURI() {
- // Setup non-enumerable URI functions on the global object and set
- // their names.
- InstallFunctions(global, DONT_ENUM, $Array(
- "escape", URIEscape,
- "unescape", URIUnescape,
- "decodeURI", URIDecode,
- "decodeURIComponent", URIDecodeComponent,
- "encodeURI", URIEncode,
- "encodeURIComponent", URIEncodeComponent
- ));
-}
-
-SetupURI();
diff --git a/src/3rdparty/v8/src/utils.cc b/src/3rdparty/v8/src/utils.cc
deleted file mode 100644
index b466301..0000000
--- a/src/3rdparty/v8/src/utils.cc
+++ /dev/null
@@ -1,371 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include <stdarg.h>
-
-#include "v8.h"
-
-#include "platform.h"
-
-#include "sys/stat.h"
-
-namespace v8 {
-namespace internal {
-
-
-void PrintF(const char* format, ...) {
- va_list arguments;
- va_start(arguments, format);
- OS::VPrint(format, arguments);
- va_end(arguments);
-}
-
-
-void PrintF(FILE* out, const char* format, ...) {
- va_list arguments;
- va_start(arguments, format);
- OS::VFPrint(out, format, arguments);
- va_end(arguments);
-}
-
-
-void Flush(FILE* out) {
- fflush(out);
-}
-
-
-char* ReadLine(const char* prompt) {
- char* result = NULL;
- char line_buf[256];
- int offset = 0;
- bool keep_going = true;
- fprintf(stdout, "%s", prompt);
- fflush(stdout);
- while (keep_going) {
- if (fgets(line_buf, sizeof(line_buf), stdin) == NULL) {
- // fgets got an error. Just give up.
- if (result != NULL) {
- DeleteArray(result);
- }
- return NULL;
- }
- int len = StrLength(line_buf);
- if (len > 1 &&
- line_buf[len - 2] == '\\' &&
- line_buf[len - 1] == '\n') {
- // When we read a line that ends with a "\" we remove the escape and
- // append the remainder.
- line_buf[len - 2] = '\n';
- line_buf[len - 1] = 0;
- len -= 1;
- } else if ((len > 0) && (line_buf[len - 1] == '\n')) {
- // Since we read a new line we are done reading the line. This
- // will exit the loop after copying this buffer into the result.
- keep_going = false;
- }
- if (result == NULL) {
- // Allocate the initial result and make room for the terminating '\0'
- result = NewArray<char>(len + 1);
- } else {
- // Allocate a new result with enough room for the new addition.
- int new_len = offset + len + 1;
- char* new_result = NewArray<char>(new_len);
- // Copy the existing input into the new array and set the new
- // array as the result.
- memcpy(new_result, result, offset * kCharSize);
- DeleteArray(result);
- result = new_result;
- }
- // Copy the newly read line into the result.
- memcpy(result + offset, line_buf, len * kCharSize);
- offset += len;
- }
- ASSERT(result != NULL);
- result[offset] = '\0';
- return result;
-}
-
-
-char* ReadCharsFromFile(const char* filename,
- int* size,
- int extra_space,
- bool verbose) {
- FILE* file = OS::FOpen(filename, "rb");
- if (file == NULL || fseek(file, 0, SEEK_END) != 0) {
- if (verbose) {
- OS::PrintError("Cannot read from file %s.\n", filename);
- }
- return NULL;
- }
-
- // Get the size of the file and rewind it.
- *size = ftell(file);
- rewind(file);
-
- char* result = NewArray<char>(*size + extra_space);
- for (int i = 0; i < *size;) {
- int read = static_cast<int>(fread(&result[i], 1, *size - i, file));
- if (read <= 0) {
- fclose(file);
- DeleteArray(result);
- return NULL;
- }
- i += read;
- }
- fclose(file);
- return result;
-}
-
-
-byte* ReadBytes(const char* filename, int* size, bool verbose) {
- char* chars = ReadCharsFromFile(filename, size, 0, verbose);
- return reinterpret_cast<byte*>(chars);
-}
-
-
-Vector<const char> ReadFile(const char* filename,
- bool* exists,
- bool verbose) {
- int size;
- char* result = ReadCharsFromFile(filename, &size, 1, verbose);
- if (!result) {
- *exists = false;
- return Vector<const char>::empty();
- }
- result[size] = '\0';
- *exists = true;
- return Vector<const char>(result, size);
-}
-
-
-int WriteCharsToFile(const char* str, int size, FILE* f) {
- int total = 0;
- while (total < size) {
- int write = static_cast<int>(fwrite(str, 1, size - total, f));
- if (write == 0) {
- return total;
- }
- total += write;
- str += write;
- }
- return total;
-}
-
-
-int AppendChars(const char* filename,
- const char* str,
- int size,
- bool verbose) {
- FILE* f = OS::FOpen(filename, "ab");
- if (f == NULL) {
- if (verbose) {
- OS::PrintError("Cannot open file %s for writing.\n", filename);
- }
- return 0;
- }
- int written = WriteCharsToFile(str, size, f);
- fclose(f);
- return written;
-}
-
-
-int WriteChars(const char* filename,
- const char* str,
- int size,
- bool verbose) {
- FILE* f = OS::FOpen(filename, "wb");
- if (f == NULL) {
- if (verbose) {
- OS::PrintError("Cannot open file %s for writing.\n", filename);
- }
- return 0;
- }
- int written = WriteCharsToFile(str, size, f);
- fclose(f);
- return written;
-}
-
-
-int WriteBytes(const char* filename,
- const byte* bytes,
- int size,
- bool verbose) {
- const char* str = reinterpret_cast<const char*>(bytes);
- return WriteChars(filename, str, size, verbose);
-}
-
-
-StringBuilder::StringBuilder(int size) {
- buffer_ = Vector<char>::New(size);
- position_ = 0;
-}
-
-
-void StringBuilder::AddString(const char* s) {
- AddSubstring(s, StrLength(s));
-}
-
-
-void StringBuilder::AddSubstring(const char* s, int n) {
- ASSERT(!is_finalized() && position_ + n < buffer_.length());
- ASSERT(static_cast<size_t>(n) <= strlen(s));
- memcpy(&buffer_[position_], s, n * kCharSize);
- position_ += n;
-}
-
-
-void StringBuilder::AddFormatted(const char* format, ...) {
- va_list arguments;
- va_start(arguments, format);
- AddFormattedList(format, arguments);
- va_end(arguments);
-}
-
-
-void StringBuilder::AddFormattedList(const char* format, va_list list) {
- ASSERT(!is_finalized() && position_ < buffer_.length());
- int n = OS::VSNPrintF(buffer_ + position_, format, list);
- if (n < 0 || n >= (buffer_.length() - position_)) {
- position_ = buffer_.length();
- } else {
- position_ += n;
- }
-}
-
-
-void StringBuilder::AddPadding(char c, int count) {
- for (int i = 0; i < count; i++) {
- AddCharacter(c);
- }
-}
-
-
-char* StringBuilder::Finalize() {
- ASSERT(!is_finalized() && position_ < buffer_.length());
- buffer_[position_] = '\0';
- // Make sure nobody managed to add a 0-character to the
- // buffer while building the string.
- ASSERT(strlen(buffer_.start()) == static_cast<size_t>(position_));
- position_ = -1;
- ASSERT(is_finalized());
- return buffer_.start();
-}
-
-
-MemoryMappedExternalResource::MemoryMappedExternalResource(const char* filename)
- : filename_(NULL),
- data_(NULL),
- length_(0),
- remove_file_on_cleanup_(false) {
- Init(filename);
-}
-
-
-MemoryMappedExternalResource::
- MemoryMappedExternalResource(const char* filename,
- bool remove_file_on_cleanup)
- : filename_(NULL),
- data_(NULL),
- length_(0),
- remove_file_on_cleanup_(remove_file_on_cleanup) {
- Init(filename);
-}
-
-
-MemoryMappedExternalResource::~MemoryMappedExternalResource() {
- // Release the resources if we had successfully acquired them:
- if (file_ != NULL) {
- delete file_;
- if (remove_file_on_cleanup_) {
- OS::Remove(filename_);
- }
- DeleteArray<char>(filename_);
- }
-}
-
-
-void MemoryMappedExternalResource::Init(const char* filename) {
- file_ = OS::MemoryMappedFile::open(filename);
- if (file_ != NULL) {
- filename_ = StrDup(filename);
- data_ = reinterpret_cast<char*>(file_->memory());
- length_ = file_->size();
- }
-}
-
-
-bool MemoryMappedExternalResource::EnsureIsAscii(bool abort_if_failed) const {
- bool is_ascii = true;
-
- int line_no = 1;
- const char* start_of_line = data_;
- const char* end = data_ + length_;
- for (const char* p = data_; p < end; p++) {
- char c = *p;
- if ((c & 0x80) != 0) {
- // Non-ascii detected:
- is_ascii = false;
-
- // Report the error and abort if appropriate:
- if (abort_if_failed) {
- int char_no = static_cast<int>(p - start_of_line) - 1;
-
- ASSERT(filename_ != NULL);
- PrintF("\n\n\n"
- "Abort: Non-Ascii character 0x%.2x in file %s line %d char %d",
- c, filename_, line_no, char_no);
-
- // Allow for some context up to kNumberOfLeadingContextChars chars
- // before the offending non-ascii char to help the user see where
- // the offending char is.
- const int kNumberOfLeadingContextChars = 10;
- const char* err_context = p - kNumberOfLeadingContextChars;
- if (err_context < data_) {
- err_context = data_;
- }
- // Compute the length of the error context and print it.
- int err_context_length = static_cast<int>(p - err_context);
- if (err_context_length != 0) {
- PrintF(" after \"%.*s\"", err_context_length, err_context);
- }
- PrintF(".\n\n\n");
- OS::Abort();
- }
-
- break; // Non-ascii detected. No need to continue scanning.
- }
- if (c == '\n') {
- start_of_line = p;
- line_no++;
- }
- }
-
- return is_ascii;
-}
-
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/utils.h b/src/3rdparty/v8/src/utils.h
deleted file mode 100644
index b89f284..0000000
--- a/src/3rdparty/v8/src/utils.h
+++ /dev/null
@@ -1,796 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_UTILS_H_
-#define V8_UTILS_H_
-
-#include <stdlib.h>
-#include <string.h>
-
-#include "globals.h"
-#include "checks.h"
-#include "allocation.h"
-
-namespace v8 {
-namespace internal {
-
-// ----------------------------------------------------------------------------
-// General helper functions
-
-#define IS_POWER_OF_TWO(x) (((x) & ((x) - 1)) == 0)
-
-// Returns true iff x is a power of 2 (or zero). Cannot be used with the
-// maximally negative value of the type T (the -1 overflows).
-template <typename T>
-static inline bool IsPowerOf2(T x) {
- return IS_POWER_OF_TWO(x);
-}
-
-
-// X must be a power of 2. Returns the number of trailing zeros.
-template <typename T>
-static inline int WhichPowerOf2(T x) {
- ASSERT(IsPowerOf2(x));
- ASSERT(x != 0);
- if (x < 0) return 31;
- int bits = 0;
-#ifdef DEBUG
- int original_x = x;
-#endif
- if (x >= 0x10000) {
- bits += 16;
- x >>= 16;
- }
- if (x >= 0x100) {
- bits += 8;
- x >>= 8;
- }
- if (x >= 0x10) {
- bits += 4;
- x >>= 4;
- }
- switch (x) {
- default: UNREACHABLE();
- case 8: bits++; // Fall through.
- case 4: bits++; // Fall through.
- case 2: bits++; // Fall through.
- case 1: break;
- }
- ASSERT_EQ(1 << bits, original_x);
- return bits;
- return 0;
-}
-
-
-// The C++ standard leaves the semantics of '>>' undefined for
-// negative signed operands. Most implementations do the right thing,
-// though.
-static inline int ArithmeticShiftRight(int x, int s) {
- return x >> s;
-}
-
-
-// Compute the 0-relative offset of some absolute value x of type T.
-// This allows conversion of Addresses and integral types into
-// 0-relative int offsets.
-template <typename T>
-static inline intptr_t OffsetFrom(T x) {
- return x - static_cast<T>(0);
-}
-
-
-// Compute the absolute value of type T for some 0-relative offset x.
-// This allows conversion of 0-relative int offsets into Addresses and
-// integral types.
-template <typename T>
-static inline T AddressFrom(intptr_t x) {
- return static_cast<T>(static_cast<T>(0) + x);
-}
-
-
-// Return the largest multiple of m which is <= x.
-template <typename T>
-static inline T RoundDown(T x, int m) {
- ASSERT(IsPowerOf2(m));
- return AddressFrom<T>(OffsetFrom(x) & -m);
-}
-
-
-// Return the smallest multiple of m which is >= x.
-template <typename T>
-static inline T RoundUp(T x, int m) {
- return RoundDown(x + m - 1, m);
-}
-
-
-template <typename T>
-static int Compare(const T& a, const T& b) {
- if (a == b)
- return 0;
- else if (a < b)
- return -1;
- else
- return 1;
-}
-
-
-template <typename T>
-static int PointerValueCompare(const T* a, const T* b) {
- return Compare<T>(*a, *b);
-}
-
-
-// Returns the smallest power of two which is >= x. If you pass in a
-// number that is already a power of two, it is returned as is.
-// Implementation is from "Hacker's Delight" by Henry S. Warren, Jr.,
-// figure 3-3, page 48, where the function is called clp2.
-static inline uint32_t RoundUpToPowerOf2(uint32_t x) {
- ASSERT(x <= 0x80000000u);
- x = x - 1;
- x = x | (x >> 1);
- x = x | (x >> 2);
- x = x | (x >> 4);
- x = x | (x >> 8);
- x = x | (x >> 16);
- return x + 1;
-}
-
-
-
-template <typename T>
-static inline bool IsAligned(T value, T alignment) {
- ASSERT(IsPowerOf2(alignment));
- return (value & (alignment - 1)) == 0;
-}
-
-
-// Returns true if (addr + offset) is aligned.
-static inline bool IsAddressAligned(Address addr,
- intptr_t alignment,
- int offset) {
- intptr_t offs = OffsetFrom(addr + offset);
- return IsAligned(offs, alignment);
-}
-
-
-// Returns the maximum of the two parameters.
-template <typename T>
-static T Max(T a, T b) {
- return a < b ? b : a;
-}
-
-
-// Returns the minimum of the two parameters.
-template <typename T>
-static T Min(T a, T b) {
- return a < b ? a : b;
-}
-
-
-inline int StrLength(const char* string) {
- size_t length = strlen(string);
- ASSERT(length == static_cast<size_t>(static_cast<int>(length)));
- return static_cast<int>(length);
-}
-
-
-// ----------------------------------------------------------------------------
-// BitField is a help template for encoding and decode bitfield with
-// unsigned content.
-template<class T, int shift, int size>
-class BitField {
- public:
- // Tells whether the provided value fits into the bit field.
- static bool is_valid(T value) {
- return (static_cast<uint32_t>(value) & ~((1U << (size)) - 1)) == 0;
- }
-
- // Returns a uint32_t mask of bit field.
- static uint32_t mask() {
- // To use all bits of a uint32 in a bitfield without compiler warnings we
- // have to compute 2^32 without using a shift count of 32.
- return ((1U << shift) << size) - (1U << shift);
- }
-
- // Returns a uint32_t with the bit field value encoded.
- static uint32_t encode(T value) {
- ASSERT(is_valid(value));
- return static_cast<uint32_t>(value) << shift;
- }
-
- // Extracts the bit field from the value.
- static T decode(uint32_t value) {
- return static_cast<T>((value & mask()) >> shift);
- }
-
- // Value for the field with all bits set.
- static T max() {
- return decode(mask());
- }
-};
-
-
-// ----------------------------------------------------------------------------
-// Hash function.
-
-// Thomas Wang, Integer Hash Functions.
-// http://www.concentric.net/~Ttwang/tech/inthash.htm
-static inline uint32_t ComputeIntegerHash(uint32_t key) {
- uint32_t hash = key;
- hash = ~hash + (hash << 15); // hash = (hash << 15) - hash - 1;
- hash = hash ^ (hash >> 12);
- hash = hash + (hash << 2);
- hash = hash ^ (hash >> 4);
- hash = hash * 2057; // hash = (hash + (hash << 3)) + (hash << 11);
- hash = hash ^ (hash >> 16);
- return hash;
-}
-
-
-// ----------------------------------------------------------------------------
-// Miscellaneous
-
-// A static resource holds a static instance that can be reserved in
-// a local scope using an instance of Access. Attempts to re-reserve
-// the instance will cause an error.
-template <typename T>
-class StaticResource {
- public:
- StaticResource() : is_reserved_(false) {}
-
- private:
- template <typename S> friend class Access;
- T instance_;
- bool is_reserved_;
-};
-
-
-// Locally scoped access to a static resource.
-template <typename T>
-class Access {
- public:
- explicit Access(StaticResource<T>* resource)
- : resource_(resource)
- , instance_(&resource->instance_) {
- ASSERT(!resource->is_reserved_);
- resource->is_reserved_ = true;
- }
-
- ~Access() {
- resource_->is_reserved_ = false;
- resource_ = NULL;
- instance_ = NULL;
- }
-
- T* value() { return instance_; }
- T* operator -> () { return instance_; }
-
- private:
- StaticResource<T>* resource_;
- T* instance_;
-};
-
-
-template <typename T>
-class Vector {
- public:
- Vector() : start_(NULL), length_(0) {}
- Vector(T* data, int length) : start_(data), length_(length) {
- ASSERT(length == 0 || (length > 0 && data != NULL));
- }
-
- static Vector<T> New(int length) {
- return Vector<T>(NewArray<T>(length), length);
- }
-
- // Returns a vector using the same backing storage as this one,
- // spanning from and including 'from', to but not including 'to'.
- Vector<T> SubVector(int from, int to) {
- ASSERT(to <= length_);
- ASSERT(from < to);
- ASSERT(0 <= from);
- return Vector<T>(start() + from, to - from);
- }
-
- // Returns the length of the vector.
- int length() const { return length_; }
-
- // Returns whether or not the vector is empty.
- bool is_empty() const { return length_ == 0; }
-
- // Returns the pointer to the start of the data in the vector.
- T* start() const { return start_; }
-
- // Access individual vector elements - checks bounds in debug mode.
- T& operator[](int index) const {
- ASSERT(0 <= index && index < length_);
- return start_[index];
- }
-
- const T& at(int index) const { return operator[](index); }
-
- T& first() { return start_[0]; }
-
- T& last() { return start_[length_ - 1]; }
-
- // Returns a clone of this vector with a new backing store.
- Vector<T> Clone() const {
- T* result = NewArray<T>(length_);
- for (int i = 0; i < length_; i++) result[i] = start_[i];
- return Vector<T>(result, length_);
- }
-
- void Sort(int (*cmp)(const T*, const T*)) {
- typedef int (*RawComparer)(const void*, const void*);
- qsort(start(),
- length(),
- sizeof(T),
- reinterpret_cast<RawComparer>(cmp));
- }
-
- void Sort() {
- Sort(PointerValueCompare<T>);
- }
-
- void Truncate(int length) {
- ASSERT(length <= length_);
- length_ = length;
- }
-
- // Releases the array underlying this vector. Once disposed the
- // vector is empty.
- void Dispose() {
- DeleteArray(start_);
- start_ = NULL;
- length_ = 0;
- }
-
- inline Vector<T> operator+(int offset) {
- ASSERT(offset < length_);
- return Vector<T>(start_ + offset, length_ - offset);
- }
-
- // Factory method for creating empty vectors.
- static Vector<T> empty() { return Vector<T>(NULL, 0); }
-
- template<typename S>
- static Vector<T> cast(Vector<S> input) {
- return Vector<T>(reinterpret_cast<T*>(input.start()),
- input.length() * sizeof(S) / sizeof(T));
- }
-
- protected:
- void set_start(T* start) { start_ = start; }
-
- private:
- T* start_;
- int length_;
-};
-
-
-// A pointer that can only be set once and doesn't allow NULL values.
-template<typename T>
-class SetOncePointer {
- public:
- SetOncePointer() : pointer_(NULL) { }
-
- bool is_set() const { return pointer_ != NULL; }
-
- T* get() const {
- ASSERT(pointer_ != NULL);
- return pointer_;
- }
-
- void set(T* value) {
- ASSERT(pointer_ == NULL && value != NULL);
- pointer_ = value;
- }
-
- private:
- T* pointer_;
-};
-
-
-template <typename T, int kSize>
-class EmbeddedVector : public Vector<T> {
- public:
- EmbeddedVector() : Vector<T>(buffer_, kSize) { }
-
- explicit EmbeddedVector(T initial_value) : Vector<T>(buffer_, kSize) {
- for (int i = 0; i < kSize; ++i) {
- buffer_[i] = initial_value;
- }
- }
-
- // When copying, make underlying Vector to reference our buffer.
- EmbeddedVector(const EmbeddedVector& rhs)
- : Vector<T>(rhs) {
- memcpy(buffer_, rhs.buffer_, sizeof(T) * kSize);
- set_start(buffer_);
- }
-
- EmbeddedVector& operator=(const EmbeddedVector& rhs) {
- if (this == &rhs) return *this;
- Vector<T>::operator=(rhs);
- memcpy(buffer_, rhs.buffer_, sizeof(T) * kSize);
- this->set_start(buffer_);
- return *this;
- }
-
- private:
- T buffer_[kSize];
-};
-
-
-template <typename T>
-class ScopedVector : public Vector<T> {
- public:
- explicit ScopedVector(int length) : Vector<T>(NewArray<T>(length), length) { }
- ~ScopedVector() {
- DeleteArray(this->start());
- }
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(ScopedVector);
-};
-
-
-inline Vector<const char> CStrVector(const char* data) {
- return Vector<const char>(data, StrLength(data));
-}
-
-inline Vector<char> MutableCStrVector(char* data) {
- return Vector<char>(data, StrLength(data));
-}
-
-inline Vector<char> MutableCStrVector(char* data, int max) {
- int length = StrLength(data);
- return Vector<char>(data, (length < max) ? length : max);
-}
-
-
-/*
- * A class that collects values into a backing store.
- * Specialized versions of the class can allow access to the backing store
- * in different ways.
- * There is no guarantee that the backing store is contiguous (and, as a
- * consequence, no guarantees that consecutively added elements are adjacent
- * in memory). The collector may move elements unless it has guaranteed not
- * to.
- */
-template <typename T, int growth_factor = 2, int max_growth = 1 * MB>
-class Collector {
- public:
- explicit Collector(int initial_capacity = kMinCapacity)
- : index_(0), size_(0) {
- if (initial_capacity < kMinCapacity) {
- initial_capacity = kMinCapacity;
- }
- current_chunk_ = Vector<T>::New(initial_capacity);
- }
-
- virtual ~Collector() {
- // Free backing store (in reverse allocation order).
- current_chunk_.Dispose();
- for (int i = chunks_.length() - 1; i >= 0; i--) {
- chunks_.at(i).Dispose();
- }
- }
-
- // Add a single element.
- inline void Add(T value) {
- if (index_ >= current_chunk_.length()) {
- Grow(1);
- }
- current_chunk_[index_] = value;
- index_++;
- size_++;
- }
-
- // Add a block of contiguous elements and return a Vector backed by the
- // memory area.
- // A basic Collector will keep this vector valid as long as the Collector
- // is alive.
- inline Vector<T> AddBlock(int size, T initial_value) {
- ASSERT(size > 0);
- if (size > current_chunk_.length() - index_) {
- Grow(size);
- }
- T* position = current_chunk_.start() + index_;
- index_ += size;
- size_ += size;
- for (int i = 0; i < size; i++) {
- position[i] = initial_value;
- }
- return Vector<T>(position, size);
- }
-
-
- // Add a contiguous block of elements and return a vector backed
- // by the added block.
- // A basic Collector will keep this vector valid as long as the Collector
- // is alive.
- inline Vector<T> AddBlock(Vector<const T> source) {
- if (source.length() > current_chunk_.length() - index_) {
- Grow(source.length());
- }
- T* position = current_chunk_.start() + index_;
- index_ += source.length();
- size_ += source.length();
- for (int i = 0; i < source.length(); i++) {
- position[i] = source[i];
- }
- return Vector<T>(position, source.length());
- }
-
-
- // Write the contents of the collector into the provided vector.
- void WriteTo(Vector<T> destination) {
- ASSERT(size_ <= destination.length());
- int position = 0;
- for (int i = 0; i < chunks_.length(); i++) {
- Vector<T> chunk = chunks_.at(i);
- for (int j = 0; j < chunk.length(); j++) {
- destination[position] = chunk[j];
- position++;
- }
- }
- for (int i = 0; i < index_; i++) {
- destination[position] = current_chunk_[i];
- position++;
- }
- }
-
- // Allocate a single contiguous vector, copy all the collected
- // elements to the vector, and return it.
- // The caller is responsible for freeing the memory of the returned
- // vector (e.g., using Vector::Dispose).
- Vector<T> ToVector() {
- Vector<T> new_store = Vector<T>::New(size_);
- WriteTo(new_store);
- return new_store;
- }
-
- // Resets the collector to be empty.
- virtual void Reset() {
- for (int i = chunks_.length() - 1; i >= 0; i--) {
- chunks_.at(i).Dispose();
- }
- chunks_.Rewind(0);
- index_ = 0;
- size_ = 0;
- }
-
- // Total number of elements added to collector so far.
- inline int size() { return size_; }
-
- protected:
- static const int kMinCapacity = 16;
- List<Vector<T> > chunks_;
- Vector<T> current_chunk_; // Block of memory currently being written into.
- int index_; // Current index in current chunk.
- int size_; // Total number of elements in collector.
-
- // Creates a new current chunk, and stores the old chunk in the chunks_ list.
- void Grow(int min_capacity) {
- ASSERT(growth_factor > 1);
- int growth = current_chunk_.length() * (growth_factor - 1);
- if (growth > max_growth) {
- growth = max_growth;
- }
- int new_capacity = current_chunk_.length() + growth;
- if (new_capacity < min_capacity) {
- new_capacity = min_capacity + growth;
- }
- Vector<T> new_chunk = Vector<T>::New(new_capacity);
- int new_index = PrepareGrow(new_chunk);
- if (index_ > 0) {
- chunks_.Add(current_chunk_.SubVector(0, index_));
- } else {
- // Can happen if the call to PrepareGrow moves everything into
- // the new chunk.
- current_chunk_.Dispose();
- }
- current_chunk_ = new_chunk;
- index_ = new_index;
- ASSERT(index_ + min_capacity <= current_chunk_.length());
- }
-
- // Before replacing the current chunk, give a subclass the option to move
- // some of the current data into the new chunk. The function may update
- // the current index_ value to represent data no longer in the current chunk.
- // Returns the initial index of the new chunk (after copied data).
- virtual int PrepareGrow(Vector<T> new_chunk) {
- return 0;
- }
-};
-
-
-/*
- * A collector that allows sequences of values to be guaranteed to
- * stay consecutive.
- * If the backing store grows while a sequence is active, the current
- * sequence might be moved, but after the sequence is ended, it will
- * not move again.
- * NOTICE: Blocks allocated using Collector::AddBlock(int) can move
- * as well, if inside an active sequence where another element is added.
- */
-template <typename T, int growth_factor = 2, int max_growth = 1 * MB>
-class SequenceCollector : public Collector<T, growth_factor, max_growth> {
- public:
- explicit SequenceCollector(int initial_capacity)
- : Collector<T, growth_factor, max_growth>(initial_capacity),
- sequence_start_(kNoSequence) { }
-
- virtual ~SequenceCollector() {}
-
- void StartSequence() {
- ASSERT(sequence_start_ == kNoSequence);
- sequence_start_ = this->index_;
- }
-
- Vector<T> EndSequence() {
- ASSERT(sequence_start_ != kNoSequence);
- int sequence_start = sequence_start_;
- sequence_start_ = kNoSequence;
- if (sequence_start == this->index_) return Vector<T>();
- return this->current_chunk_.SubVector(sequence_start, this->index_);
- }
-
- // Drops the currently added sequence, and all collected elements in it.
- void DropSequence() {
- ASSERT(sequence_start_ != kNoSequence);
- int sequence_length = this->index_ - sequence_start_;
- this->index_ = sequence_start_;
- this->size_ -= sequence_length;
- sequence_start_ = kNoSequence;
- }
-
- virtual void Reset() {
- sequence_start_ = kNoSequence;
- this->Collector<T, growth_factor, max_growth>::Reset();
- }
-
- private:
- static const int kNoSequence = -1;
- int sequence_start_;
-
- // Move the currently active sequence to the new chunk.
- virtual int PrepareGrow(Vector<T> new_chunk) {
- if (sequence_start_ != kNoSequence) {
- int sequence_length = this->index_ - sequence_start_;
- // The new chunk is always larger than the current chunk, so there
- // is room for the copy.
- ASSERT(sequence_length < new_chunk.length());
- for (int i = 0; i < sequence_length; i++) {
- new_chunk[i] = this->current_chunk_[sequence_start_ + i];
- }
- this->index_ = sequence_start_;
- sequence_start_ = 0;
- return sequence_length;
- }
- return 0;
- }
-};
-
-
-// Compare ASCII/16bit chars to ASCII/16bit chars.
-template <typename lchar, typename rchar>
-static inline int CompareChars(const lchar* lhs, const rchar* rhs, int chars) {
- const lchar* limit = lhs + chars;
-#ifdef V8_HOST_CAN_READ_UNALIGNED
- if (sizeof(*lhs) == sizeof(*rhs)) {
- // Number of characters in a uintptr_t.
- static const int kStepSize = sizeof(uintptr_t) / sizeof(*lhs); // NOLINT
- while (lhs <= limit - kStepSize) {
- if (*reinterpret_cast<const uintptr_t*>(lhs) !=
- *reinterpret_cast<const uintptr_t*>(rhs)) {
- break;
- }
- lhs += kStepSize;
- rhs += kStepSize;
- }
- }
-#endif
- while (lhs < limit) {
- int r = static_cast<int>(*lhs) - static_cast<int>(*rhs);
- if (r != 0) return r;
- ++lhs;
- ++rhs;
- }
- return 0;
-}
-
-
-// Calculate 10^exponent.
-static inline int TenToThe(int exponent) {
- ASSERT(exponent <= 9);
- ASSERT(exponent >= 1);
- int answer = 10;
- for (int i = 1; i < exponent; i++) answer *= 10;
- return answer;
-}
-
-
-// The type-based aliasing rule allows the compiler to assume that pointers of
-// different types (for some definition of different) never alias each other.
-// Thus the following code does not work:
-//
-// float f = foo();
-// int fbits = *(int*)(&f);
-//
-// The compiler 'knows' that the int pointer can't refer to f since the types
-// don't match, so the compiler may cache f in a register, leaving random data
-// in fbits. Using C++ style casts makes no difference, however a pointer to
-// char data is assumed to alias any other pointer. This is the 'memcpy
-// exception'.
-//
-// Bit_cast uses the memcpy exception to move the bits from a variable of one
-// type of a variable of another type. Of course the end result is likely to
-// be implementation dependent. Most compilers (gcc-4.2 and MSVC 2005)
-// will completely optimize BitCast away.
-//
-// There is an additional use for BitCast.
-// Recent gccs will warn when they see casts that may result in breakage due to
-// the type-based aliasing rule. If you have checked that there is no breakage
-// you can use BitCast to cast one pointer type to another. This confuses gcc
-// enough that it can no longer see that you have cast one pointer type to
-// another thus avoiding the warning.
-
-// We need different implementations of BitCast for pointer and non-pointer
-// values. We use partial specialization of auxiliary struct to work around
-// issues with template functions overloading.
-template <class Dest, class Source>
-struct BitCastHelper {
- STATIC_ASSERT(sizeof(Dest) == sizeof(Source));
-
- INLINE(static Dest cast(const Source& source)) {
- Dest dest;
- memcpy(&dest, &source, sizeof(dest));
- return dest;
- }
-};
-
-template <class Dest, class Source>
-struct BitCastHelper<Dest, Source*> {
- INLINE(static Dest cast(Source* source)) {
- return BitCastHelper<Dest, uintptr_t>::
- cast(reinterpret_cast<uintptr_t>(source));
- }
-};
-
-template <class Dest, class Source>
-INLINE(Dest BitCast(const Source& source));
-
-template <class Dest, class Source>
-inline Dest BitCast(const Source& source) {
- return BitCastHelper<Dest, Source>::cast(source);
-}
-
-} } // namespace v8::internal
-
-#endif // V8_UTILS_H_
diff --git a/src/3rdparty/v8/src/v8-counters.cc b/src/3rdparty/v8/src/v8-counters.cc
deleted file mode 100644
index c6aa9cb..0000000
--- a/src/3rdparty/v8/src/v8-counters.cc
+++ /dev/null
@@ -1,62 +0,0 @@
-// Copyright 2007-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "v8-counters.h"
-
-namespace v8 {
-namespace internal {
-
-Counters::Counters() {
-#define HT(name, caption) \
- HistogramTimer name = { #caption, NULL, false, 0, 0 }; \
- name##_ = name;
- HISTOGRAM_TIMER_LIST(HT)
-#undef HT
-
-#define SC(name, caption) \
- StatsCounter name = { "c:" #caption, NULL, false };\
- name##_ = name;
-
- STATS_COUNTER_LIST_1(SC)
- STATS_COUNTER_LIST_2(SC)
-#undef SC
-
- StatsCounter state_counters[] = {
-#define COUNTER_NAME(name) \
- { "c:V8.State" #name, NULL, false },
- STATE_TAG_LIST(COUNTER_NAME)
-#undef COUNTER_NAME
- };
-
- for (int i = 0; i < kSlidingStateWindowCounterCount; ++i) {
- state_counters_[i] = state_counters[i];
- }
-}
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/v8-counters.h b/src/3rdparty/v8/src/v8-counters.h
deleted file mode 100644
index 5e765b2..0000000
--- a/src/3rdparty/v8/src/v8-counters.h
+++ /dev/null
@@ -1,311 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_V8_COUNTERS_H_
-#define V8_V8_COUNTERS_H_
-
-#include "allocation.h"
-#include "counters.h"
-#include "v8globals.h"
-
-namespace v8 {
-namespace internal {
-
-#define HISTOGRAM_TIMER_LIST(HT) \
- /* Garbage collection timers. */ \
- HT(gc_compactor, V8.GCCompactor) \
- HT(gc_scavenger, V8.GCScavenger) \
- HT(gc_context, V8.GCContext) /* GC context cleanup time */ \
- /* Parsing timers. */ \
- HT(parse, V8.Parse) \
- HT(parse_lazy, V8.ParseLazy) \
- HT(pre_parse, V8.PreParse) \
- /* Total compilation times. */ \
- HT(compile, V8.Compile) \
- HT(compile_eval, V8.CompileEval) \
- HT(compile_lazy, V8.CompileLazy)
-
-
-// WARNING: STATS_COUNTER_LIST_* is a very large macro that is causing MSVC
-// Intellisense to crash. It was broken into two macros (each of length 40
-// lines) rather than one macro (of length about 80 lines) to work around
-// this problem. Please avoid using recursive macros of this length when
-// possible.
-#define STATS_COUNTER_LIST_1(SC) \
- /* Global Handle Count*/ \
- SC(global_handles, V8.GlobalHandles) \
- /* Mallocs from PCRE */ \
- SC(pcre_mallocs, V8.PcreMallocCount) \
- /* OS Memory allocated */ \
- SC(memory_allocated, V8.OsMemoryAllocated) \
- SC(normalized_maps, V8.NormalizedMaps) \
- SC(props_to_dictionary, V8.ObjectPropertiesToDictionary) \
- SC(elements_to_dictionary, V8.ObjectElementsToDictionary) \
- SC(alive_after_last_gc, V8.AliveAfterLastGC) \
- SC(objs_since_last_young, V8.ObjsSinceLastYoung) \
- SC(objs_since_last_full, V8.ObjsSinceLastFull) \
- SC(symbol_table_capacity, V8.SymbolTableCapacity) \
- SC(number_of_symbols, V8.NumberOfSymbols) \
- SC(script_wrappers, V8.ScriptWrappers) \
- SC(call_initialize_stubs, V8.CallInitializeStubs) \
- SC(call_premonomorphic_stubs, V8.CallPreMonomorphicStubs) \
- SC(call_normal_stubs, V8.CallNormalStubs) \
- SC(call_megamorphic_stubs, V8.CallMegamorphicStubs) \
- SC(arguments_adaptors, V8.ArgumentsAdaptors) \
- SC(compilation_cache_hits, V8.CompilationCacheHits) \
- SC(compilation_cache_misses, V8.CompilationCacheMisses) \
- SC(regexp_cache_hits, V8.RegExpCacheHits) \
- SC(regexp_cache_misses, V8.RegExpCacheMisses) \
- SC(string_ctor_calls, V8.StringConstructorCalls) \
- SC(string_ctor_conversions, V8.StringConstructorConversions) \
- SC(string_ctor_cached_number, V8.StringConstructorCachedNumber) \
- SC(string_ctor_string_value, V8.StringConstructorStringValue) \
- SC(string_ctor_gc_required, V8.StringConstructorGCRequired) \
- /* Amount of evaled source code. */ \
- SC(total_eval_size, V8.TotalEvalSize) \
- /* Amount of loaded source code. */ \
- SC(total_load_size, V8.TotalLoadSize) \
- /* Amount of parsed source code. */ \
- SC(total_parse_size, V8.TotalParseSize) \
- /* Amount of source code skipped over using preparsing. */ \
- SC(total_preparse_skipped, V8.TotalPreparseSkipped) \
- /* Number of symbol lookups skipped using preparsing */ \
- SC(total_preparse_symbols_skipped, V8.TotalPreparseSymbolSkipped) \
- /* Amount of compiled source code. */ \
- SC(total_compile_size, V8.TotalCompileSize) \
- /* Amount of source code compiled with the old codegen. */ \
- SC(total_old_codegen_source_size, V8.TotalOldCodegenSourceSize) \
- /* Amount of source code compiled with the full codegen. */ \
- SC(total_full_codegen_source_size, V8.TotalFullCodegenSourceSize) \
- /* Number of contexts created from scratch. */ \
- SC(contexts_created_from_scratch, V8.ContextsCreatedFromScratch) \
- /* Number of contexts created by partial snapshot. */ \
- SC(contexts_created_by_snapshot, V8.ContextsCreatedBySnapshot) \
- /* Number of code objects found from pc. */ \
- SC(pc_to_code, V8.PcToCode) \
- SC(pc_to_code_cached, V8.PcToCodeCached)
-
-
-#define STATS_COUNTER_LIST_2(SC) \
- /* Number of code stubs. */ \
- SC(code_stubs, V8.CodeStubs) \
- /* Amount of stub code. */ \
- SC(total_stubs_code_size, V8.TotalStubsCodeSize) \
- /* Amount of (JS) compiled code. */ \
- SC(total_compiled_code_size, V8.TotalCompiledCodeSize) \
- SC(gc_compactor_caused_by_request, V8.GCCompactorCausedByRequest) \
- SC(gc_compactor_caused_by_promoted_data, \
- V8.GCCompactorCausedByPromotedData) \
- SC(gc_compactor_caused_by_oldspace_exhaustion, \
- V8.GCCompactorCausedByOldspaceExhaustion) \
- SC(gc_compactor_caused_by_weak_handles, \
- V8.GCCompactorCausedByWeakHandles) \
- SC(gc_last_resort_from_js, V8.GCLastResortFromJS) \
- SC(gc_last_resort_from_handles, V8.GCLastResortFromHandles) \
- SC(map_slow_to_fast_elements, V8.MapSlowToFastElements) \
- SC(map_fast_to_slow_elements, V8.MapFastToSlowElements) \
- SC(map_to_external_array_elements, V8.MapToExternalArrayElements) \
- /* How is the generic keyed-load stub used? */ \
- SC(keyed_load_generic_smi, V8.KeyedLoadGenericSmi) \
- SC(keyed_load_generic_symbol, V8.KeyedLoadGenericSymbol) \
- SC(keyed_load_generic_lookup_cache, V8.KeyedLoadGenericLookupCache) \
- SC(keyed_load_generic_slow, V8.KeyedLoadGenericSlow) \
- SC(keyed_load_external_array_slow, V8.KeyedLoadExternalArraySlow) \
- /* How is the generic keyed-call stub used? */ \
- SC(keyed_call_generic_smi_fast, V8.KeyedCallGenericSmiFast) \
- SC(keyed_call_generic_smi_dict, V8.KeyedCallGenericSmiDict) \
- SC(keyed_call_generic_lookup_cache, V8.KeyedCallGenericLookupCache) \
- SC(keyed_call_generic_lookup_dict, V8.KeyedCallGenericLookupDict) \
- SC(keyed_call_generic_value_type, V8.KeyedCallGenericValueType) \
- SC(keyed_call_generic_slow, V8.KeyedCallGenericSlow) \
- SC(keyed_call_generic_slow_load, V8.KeyedCallGenericSlowLoad) \
- /* Count how much the monomorphic keyed-load stubs are hit. */ \
- SC(keyed_load_function_prototype, V8.KeyedLoadFunctionPrototype) \
- SC(keyed_load_string_length, V8.KeyedLoadStringLength) \
- SC(keyed_load_array_length, V8.KeyedLoadArrayLength) \
- SC(keyed_load_constant_function, V8.KeyedLoadConstantFunction) \
- SC(keyed_load_field, V8.KeyedLoadField) \
- SC(keyed_load_callback, V8.KeyedLoadCallback) \
- SC(keyed_load_interceptor, V8.KeyedLoadInterceptor) \
- SC(keyed_load_inline, V8.KeyedLoadInline) \
- SC(keyed_load_inline_miss, V8.KeyedLoadInlineMiss) \
- SC(named_load_inline, V8.NamedLoadInline) \
- SC(named_load_inline_miss, V8.NamedLoadInlineMiss) \
- SC(named_load_global_inline, V8.NamedLoadGlobalInline) \
- SC(named_load_global_inline_miss, V8.NamedLoadGlobalInlineMiss) \
- SC(dont_delete_hint_hit, V8.DontDeleteHintHit) \
- SC(dont_delete_hint_miss, V8.DontDeleteHintMiss) \
- SC(named_load_global_stub, V8.NamedLoadGlobalStub) \
- SC(named_load_global_stub_miss, V8.NamedLoadGlobalStubMiss) \
- SC(keyed_store_field, V8.KeyedStoreField) \
- SC(named_store_inline_field, V8.NamedStoreInlineField) \
- SC(keyed_store_inline, V8.KeyedStoreInline) \
- SC(named_load_inline_generic, V8.NamedLoadInlineGeneric) \
- SC(named_load_inline_field, V8.NamedLoadInlineFast) \
- SC(keyed_load_inline_generic, V8.KeyedLoadInlineGeneric) \
- SC(keyed_load_inline_fast, V8.KeyedLoadInlineFast) \
- SC(named_load_full, V8.NamedLoadFull) \
- SC(keyed_load_full, V8.KeyedLoadFull) \
- SC(keyed_store_inline_generic, V8.KeyedStoreInlineGeneric) \
- SC(keyed_store_inline_fast, V8.KeyedStoreInlineFast) \
- SC(named_store_inline_generic, V8.NamedStoreInlineGeneric) \
- SC(named_store_inline_fast, V8.NamedStoreInlineFast) \
- SC(keyed_store_full, V8.KeyedStoreFull) \
- SC(named_store_full, V8.NamedStoreFull) \
- SC(keyed_store_inline_miss, V8.KeyedStoreInlineMiss) \
- SC(named_store_global_inline, V8.NamedStoreGlobalInline) \
- SC(named_store_global_inline_miss, V8.NamedStoreGlobalInlineMiss) \
- SC(store_normal_miss, V8.StoreNormalMiss) \
- SC(store_normal_hit, V8.StoreNormalHit) \
- SC(cow_arrays_created_stub, V8.COWArraysCreatedStub) \
- SC(cow_arrays_created_runtime, V8.COWArraysCreatedRuntime) \
- SC(cow_arrays_converted, V8.COWArraysConverted) \
- SC(call_miss, V8.CallMiss) \
- SC(keyed_call_miss, V8.KeyedCallMiss) \
- SC(load_miss, V8.LoadMiss) \
- SC(keyed_load_miss, V8.KeyedLoadMiss) \
- SC(call_const, V8.CallConst) \
- SC(call_const_fast_api, V8.CallConstFastApi) \
- SC(call_const_interceptor, V8.CallConstInterceptor) \
- SC(call_const_interceptor_fast_api, V8.CallConstInterceptorFastApi) \
- SC(call_global_inline, V8.CallGlobalInline) \
- SC(call_global_inline_miss, V8.CallGlobalInlineMiss) \
- SC(constructed_objects, V8.ConstructedObjects) \
- SC(constructed_objects_runtime, V8.ConstructedObjectsRuntime) \
- SC(constructed_objects_stub, V8.ConstructedObjectsStub) \
- SC(negative_lookups, V8.NegativeLookups) \
- SC(negative_lookups_miss, V8.NegativeLookupsMiss) \
- SC(array_function_runtime, V8.ArrayFunctionRuntime) \
- SC(array_function_native, V8.ArrayFunctionNative) \
- SC(for_in, V8.ForIn) \
- SC(enum_cache_hits, V8.EnumCacheHits) \
- SC(enum_cache_misses, V8.EnumCacheMisses) \
- SC(zone_segment_bytes, V8.ZoneSegmentBytes) \
- SC(compute_entry_frame, V8.ComputeEntryFrame) \
- SC(generic_binary_stub_calls, V8.GenericBinaryStubCalls) \
- SC(generic_binary_stub_calls_regs, V8.GenericBinaryStubCallsRegs) \
- SC(string_add_runtime, V8.StringAddRuntime) \
- SC(string_add_native, V8.StringAddNative) \
- SC(string_add_runtime_ext_to_ascii, V8.StringAddRuntimeExtToAscii) \
- SC(sub_string_runtime, V8.SubStringRuntime) \
- SC(sub_string_native, V8.SubStringNative) \
- SC(string_add_make_two_char, V8.StringAddMakeTwoChar) \
- SC(string_compare_native, V8.StringCompareNative) \
- SC(string_compare_runtime, V8.StringCompareRuntime) \
- SC(regexp_entry_runtime, V8.RegExpEntryRuntime) \
- SC(regexp_entry_native, V8.RegExpEntryNative) \
- SC(number_to_string_native, V8.NumberToStringNative) \
- SC(number_to_string_runtime, V8.NumberToStringRuntime) \
- SC(math_acos, V8.MathAcos) \
- SC(math_asin, V8.MathAsin) \
- SC(math_atan, V8.MathAtan) \
- SC(math_atan2, V8.MathAtan2) \
- SC(math_ceil, V8.MathCeil) \
- SC(math_cos, V8.MathCos) \
- SC(math_exp, V8.MathExp) \
- SC(math_floor, V8.MathFloor) \
- SC(math_log, V8.MathLog) \
- SC(math_pow, V8.MathPow) \
- SC(math_round, V8.MathRound) \
- SC(math_sin, V8.MathSin) \
- SC(math_sqrt, V8.MathSqrt) \
- SC(math_tan, V8.MathTan) \
- SC(transcendental_cache_hit, V8.TranscendentalCacheHit) \
- SC(transcendental_cache_miss, V8.TranscendentalCacheMiss) \
- SC(stack_interrupts, V8.StackInterrupts) \
- SC(runtime_profiler_ticks, V8.RuntimeProfilerTicks) \
- SC(other_ticks, V8.OtherTicks) \
- SC(js_opt_ticks, V8.JsOptTicks) \
- SC(js_non_opt_ticks, V8.JsNonoptTicks) \
- SC(js_other_ticks, V8.JsOtherTicks) \
- SC(smi_checks_removed, V8.SmiChecksRemoved) \
- SC(map_checks_removed, V8.MapChecksRemoved) \
- SC(quote_json_char_count, V8.QuoteJsonCharacterCount) \
- SC(quote_json_char_recount, V8.QuoteJsonCharacterReCount)
-
-
-// This file contains all the v8 counters that are in use.
-class Counters {
- public:
-#define HT(name, caption) \
- HistogramTimer* name() { return &name##_; }
- HISTOGRAM_TIMER_LIST(HT)
-#undef HT
-
-#define SC(name, caption) \
- StatsCounter* name() { return &name##_; }
- STATS_COUNTER_LIST_1(SC)
- STATS_COUNTER_LIST_2(SC)
-#undef SC
-
- enum Id {
-#define RATE_ID(name, caption) k_##name,
- HISTOGRAM_TIMER_LIST(RATE_ID)
-#undef RATE_ID
-#define COUNTER_ID(name, caption) k_##name,
- STATS_COUNTER_LIST_1(COUNTER_ID)
- STATS_COUNTER_LIST_2(COUNTER_ID)
-#undef COUNTER_ID
-#define COUNTER_ID(name) k_##name,
- STATE_TAG_LIST(COUNTER_ID)
-#undef COUNTER_ID
- stats_counter_count
- };
-
- StatsCounter* state_counters(StateTag state) {
- return &state_counters_[state];
- }
-
- private:
-#define HT(name, caption) \
- HistogramTimer name##_;
- HISTOGRAM_TIMER_LIST(HT)
-#undef HT
-
-#define SC(name, caption) \
- StatsCounter name##_;
- STATS_COUNTER_LIST_1(SC)
- STATS_COUNTER_LIST_2(SC)
-#undef SC
-
- enum {
-#define COUNTER_ID(name) __##name,
- STATE_TAG_LIST(COUNTER_ID)
-#undef COUNTER_ID
- kSlidingStateWindowCounterCount
- };
-
- // Sliding state window counters.
- StatsCounter state_counters_[kSlidingStateWindowCounterCount];
- friend class Isolate;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(Counters);
-};
-
-} } // namespace v8::internal
-
-#endif // V8_V8_COUNTERS_H_
diff --git a/src/3rdparty/v8/src/v8.cc b/src/3rdparty/v8/src/v8.cc
deleted file mode 100644
index f89ed83..0000000
--- a/src/3rdparty/v8/src/v8.cc
+++ /dev/null
@@ -1,215 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "isolate.h"
-#include "bootstrapper.h"
-#include "debug.h"
-#include "deoptimizer.h"
-#include "heap-profiler.h"
-#include "hydrogen.h"
-#include "lithium-allocator.h"
-#include "log.h"
-#include "runtime-profiler.h"
-#include "serialize.h"
-
-namespace v8 {
-namespace internal {
-
-static Mutex* init_once_mutex = OS::CreateMutex();
-static bool init_once_called = false;
-
-bool V8::is_running_ = false;
-bool V8::has_been_setup_ = false;
-bool V8::has_been_disposed_ = false;
-bool V8::has_fatal_error_ = false;
-bool V8::use_crankshaft_ = true;
-
-
-bool V8::Initialize(Deserializer* des) {
- InitializeOncePerProcess();
-
- // The current thread may not yet had entered an isolate to run.
- // Note the Isolate::Current() may be non-null because for various
- // initialization purposes an initializing thread may be assigned an isolate
- // but not actually enter it.
- if (i::Isolate::CurrentPerIsolateThreadData() == NULL) {
- i::Isolate::EnterDefaultIsolate();
- }
-
- ASSERT(i::Isolate::CurrentPerIsolateThreadData() != NULL);
- ASSERT(i::Isolate::CurrentPerIsolateThreadData()->thread_id() ==
- i::Thread::GetThreadLocalInt(i::Isolate::thread_id_key()));
- ASSERT(i::Isolate::CurrentPerIsolateThreadData()->isolate() ==
- i::Isolate::Current());
-
- if (IsDead()) return false;
-
- Isolate* isolate = Isolate::Current();
- if (isolate->IsInitialized()) return true;
-
- is_running_ = true;
- has_been_setup_ = true;
- has_fatal_error_ = false;
- has_been_disposed_ = false;
-
- return isolate->Init(des);
-}
-
-
-void V8::SetFatalError() {
- is_running_ = false;
- has_fatal_error_ = true;
-}
-
-
-void V8::TearDown() {
- Isolate* isolate = Isolate::Current();
- ASSERT(isolate->IsDefaultIsolate());
-
- if (!has_been_setup_ || has_been_disposed_) return;
- isolate->TearDown();
-
- is_running_ = false;
- has_been_disposed_ = true;
-}
-
-
-static uint32_t random_seed() {
- if (FLAG_random_seed == 0) {
- return random();
- }
- return FLAG_random_seed;
-}
-
-
-typedef struct {
- uint32_t hi;
- uint32_t lo;
-} random_state;
-
-
-// Random number generator using George Marsaglia's MWC algorithm.
-static uint32_t random_base(random_state *state) {
- // Initialize seed using the system random(). If one of the seeds
- // should ever become zero again, or if random() returns zero, we
- // avoid getting stuck with zero bits in hi or lo by re-initializing
- // them on demand.
- if (state->hi == 0) state->hi = random_seed();
- if (state->lo == 0) state->lo = random_seed();
-
- // Mix the bits.
- state->hi = 36969 * (state->hi & 0xFFFF) + (state->hi >> 16);
- state->lo = 18273 * (state->lo & 0xFFFF) + (state->lo >> 16);
- return (state->hi << 16) + (state->lo & 0xFFFF);
-}
-
-
-// Used by JavaScript APIs
-uint32_t V8::Random(Isolate* isolate) {
- ASSERT(isolate == Isolate::Current());
- // TODO(isolates): move lo and hi to isolate
- static random_state state = {0, 0};
- return random_base(&state);
-}
-
-
-// Used internally by the JIT and memory allocator for security
-// purposes. So, we keep a different state to prevent informations
-// leaks that could be used in an exploit.
-uint32_t V8::RandomPrivate(Isolate* isolate) {
- ASSERT(isolate == Isolate::Current());
- // TODO(isolates): move lo and hi to isolate
- static random_state state = {0, 0};
- return random_base(&state);
-}
-
-
-bool V8::IdleNotification() {
- // Returning true tells the caller that there is no need to call
- // IdleNotification again.
- if (!FLAG_use_idle_notification) return true;
-
- // Tell the heap that it may want to adjust.
- return HEAP->IdleNotification();
-}
-
-
-// Use a union type to avoid type-aliasing optimizations in GCC.
-typedef union {
- double double_value;
- uint64_t uint64_t_value;
-} double_int_union;
-
-
-Object* V8::FillHeapNumberWithRandom(Object* heap_number, Isolate* isolate) {
- uint64_t random_bits = Random(isolate);
- // Make a double* from address (heap_number + sizeof(double)).
- double_int_union* r = reinterpret_cast<double_int_union*>(
- reinterpret_cast<char*>(heap_number) +
- HeapNumber::kValueOffset - kHeapObjectTag);
- // Convert 32 random bits to 0.(32 random bits) in a double
- // by computing:
- // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
- const double binary_million = 1048576.0;
- r->double_value = binary_million;
- r->uint64_t_value |= random_bits;
- r->double_value -= binary_million;
-
- return heap_number;
-}
-
-
-void V8::InitializeOncePerProcess() {
- ScopedLock lock(init_once_mutex);
- if (init_once_called) return;
- init_once_called = true;
-
- // Setup the platform OS support.
- OS::Setup();
-
-#if defined(V8_TARGET_ARCH_ARM) && !defined(USE_ARM_EABI)
- use_crankshaft_ = false;
-#else
- use_crankshaft_ = FLAG_crankshaft;
-#endif
-
- if (Serializer::enabled()) {
- use_crankshaft_ = false;
- }
-
- CPU::Setup();
- if (!CPU::SupportsCrankshaft()) {
- use_crankshaft_ = false;
- }
-
- // Peephole optimization might interfere with deoptimization.
- FLAG_peephole_optimization = !use_crankshaft_;
-}
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/v8.h b/src/3rdparty/v8/src/v8.h
deleted file mode 100644
index 776fa9c..0000000
--- a/src/3rdparty/v8/src/v8.h
+++ /dev/null
@@ -1,130 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-//
-// Top include for all V8 .cc files.
-//
-
-#ifndef V8_V8_H_
-#define V8_V8_H_
-
-#if defined(GOOGLE3)
-// Google3 special flag handling.
-#if defined(DEBUG) && defined(NDEBUG)
-// If both are defined in Google3, then we are building an optimized v8 with
-// assertions enabled.
-#undef NDEBUG
-#elif !defined(DEBUG) && !defined(NDEBUG)
-// If neither is defined in Google3, then we are building a debug v8. Mark it
-// as such.
-#define DEBUG
-#endif
-#endif // defined(GOOGLE3)
-
-// V8 only uses DEBUG, but included external files
-// may use NDEBUG - make sure they are consistent.
-#if defined(DEBUG) && defined(NDEBUG)
-#error both DEBUG and NDEBUG are set
-#endif
-
-// Basic includes
-#include "../include/v8.h"
-#include "v8globals.h"
-#include "v8checks.h"
-#include "allocation.h"
-#include "v8utils.h"
-#include "flags.h"
-
-// Objects & heap
-#include "objects-inl.h"
-#include "spaces-inl.h"
-#include "heap-inl.h"
-#include "log-inl.h"
-#include "cpu-profiler-inl.h"
-#include "handles-inl.h"
-
-namespace v8 {
-namespace internal {
-
-class Deserializer;
-
-class V8 : public AllStatic {
- public:
- // Global actions.
-
- // If Initialize is called with des == NULL, the initial state is
- // created from scratch. If a non-null Deserializer is given, the
- // initial state is created by reading the deserialized data into an
- // empty heap.
- static bool Initialize(Deserializer* des);
- static void TearDown();
- static bool IsRunning() { return is_running_; }
- static bool UseCrankshaft() { return use_crankshaft_; }
- // To be dead you have to have lived
- // TODO(isolates): move IsDead to Isolate.
- static bool IsDead() { return has_fatal_error_ || has_been_disposed_; }
- static void SetFatalError();
-
- // Report process out of memory. Implementation found in api.cc.
- static void FatalProcessOutOfMemory(const char* location,
- bool take_snapshot = false);
-
- // Random number generation support. Not cryptographically safe.
- static uint32_t Random(Isolate* isolate);
- // We use random numbers internally in memory allocation and in the
- // compilers for security. In order to prevent information leaks we
- // use a separate random state for internal random number
- // generation.
- static uint32_t RandomPrivate(Isolate* isolate);
- static Object* FillHeapNumberWithRandom(Object* heap_number,
- Isolate* isolate);
-
- // Idle notification directly from the API.
- static bool IdleNotification();
-
- private:
- static void InitializeOncePerProcess();
-
- // True if engine is currently running
- static bool is_running_;
- // True if V8 has ever been run
- static bool has_been_setup_;
- // True if error has been signaled for current engine
- // (reset to false if engine is restarted)
- static bool has_fatal_error_;
- // True if engine has been shut down
- // (reset if engine is restarted)
- static bool has_been_disposed_;
- // True if we are using the crankshaft optimizing compiler.
- static bool use_crankshaft_;
-};
-
-} } // namespace v8::internal
-
-namespace i = v8::internal;
-
-#endif // V8_V8_H_
diff --git a/src/3rdparty/v8/src/v8checks.h b/src/3rdparty/v8/src/v8checks.h
deleted file mode 100644
index 9857f73..0000000
--- a/src/3rdparty/v8/src/v8checks.h
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_V8CHECKS_H_
-#define V8_V8CHECKS_H_
-
-#include "checks.h"
-
-void API_Fatal(const char* location, const char* format, ...);
-
-namespace v8 {
- class Value;
- template <class T> class Handle;
-
-namespace internal {
- intptr_t HeapObjectTagMask();
-
-} } // namespace v8::internal
-
-
-void CheckNonEqualsHelper(const char* file,
- int line,
- const char* unexpected_source,
- v8::Handle<v8::Value> unexpected,
- const char* value_source,
- v8::Handle<v8::Value> value);
-
-void CheckEqualsHelper(const char* file,
- int line,
- const char* expected_source,
- v8::Handle<v8::Value> expected,
- const char* value_source,
- v8::Handle<v8::Value> value);
-
-#define ASSERT_TAG_ALIGNED(address) \
- ASSERT((reinterpret_cast<intptr_t>(address) & HeapObjectTagMask()) == 0)
-
-#define ASSERT_SIZE_TAG_ALIGNED(size) ASSERT((size & HeapObjectTagMask()) == 0)
-
-#endif // V8_V8CHECKS_H_
diff --git a/src/3rdparty/v8/src/v8dll-main.cc b/src/3rdparty/v8/src/v8dll-main.cc
deleted file mode 100644
index 3d4b3a3..0000000
--- a/src/3rdparty/v8/src/v8dll-main.cc
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include <windows.h>
-
-#include "../include/v8.h"
-
-extern "C" {
-BOOL WINAPI DllMain(HANDLE hinstDLL,
- DWORD dwReason,
- LPVOID lpvReserved) {
- // Do nothing.
- return TRUE;
-}
-}
diff --git a/src/3rdparty/v8/src/v8globals.h b/src/3rdparty/v8/src/v8globals.h
deleted file mode 100644
index 2a01dfd..0000000
--- a/src/3rdparty/v8/src/v8globals.h
+++ /dev/null
@@ -1,486 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_V8GLOBALS_H_
-#define V8_V8GLOBALS_H_
-
-#include "globals.h"
-
-namespace v8 {
-namespace internal {
-
-// This file contains constants and global declarations related to the
-// V8 system.
-
-// Mask for the sign bit in a smi.
-const intptr_t kSmiSignMask = kIntptrSignBit;
-
-const int kObjectAlignmentBits = kPointerSizeLog2;
-const intptr_t kObjectAlignment = 1 << kObjectAlignmentBits;
-const intptr_t kObjectAlignmentMask = kObjectAlignment - 1;
-
-// Desired alignment for pointers.
-const intptr_t kPointerAlignment = (1 << kPointerSizeLog2);
-const intptr_t kPointerAlignmentMask = kPointerAlignment - 1;
-
-// Desired alignment for maps.
-#if V8_HOST_ARCH_64_BIT
-const intptr_t kMapAlignmentBits = kObjectAlignmentBits;
-#else
-const intptr_t kMapAlignmentBits = kObjectAlignmentBits + 3;
-#endif
-const intptr_t kMapAlignment = (1 << kMapAlignmentBits);
-const intptr_t kMapAlignmentMask = kMapAlignment - 1;
-
-// Desired alignment for generated code is 32 bytes (to improve cache line
-// utilization).
-const int kCodeAlignmentBits = 5;
-const intptr_t kCodeAlignment = 1 << kCodeAlignmentBits;
-const intptr_t kCodeAlignmentMask = kCodeAlignment - 1;
-
-// Tag information for Failure.
-const int kFailureTag = 3;
-const int kFailureTagSize = 2;
-const intptr_t kFailureTagMask = (1 << kFailureTagSize) - 1;
-
-
-// Zap-value: The value used for zapping dead objects.
-// Should be a recognizable hex value tagged as a failure.
-#ifdef V8_HOST_ARCH_64_BIT
-const Address kZapValue =
- reinterpret_cast<Address>(V8_UINT64_C(0xdeadbeedbeadbeef));
-const Address kHandleZapValue =
- reinterpret_cast<Address>(V8_UINT64_C(0x1baddead0baddeaf));
-const Address kFromSpaceZapValue =
- reinterpret_cast<Address>(V8_UINT64_C(0x1beefdad0beefdaf));
-const uint64_t kDebugZapValue = V8_UINT64_C(0xbadbaddbbadbaddb);
-const uint64_t kSlotsZapValue = V8_UINT64_C(0xbeefdeadbeefdeef);
-#else
-const Address kZapValue = reinterpret_cast<Address>(0xdeadbeef);
-const Address kHandleZapValue = reinterpret_cast<Address>(0xbaddeaf);
-const Address kFromSpaceZapValue = reinterpret_cast<Address>(0xbeefdaf);
-const uint32_t kSlotsZapValue = 0xbeefdeef;
-const uint32_t kDebugZapValue = 0xbadbaddb;
-#endif
-
-
-// Number of bits to represent the page size for paged spaces. The value of 13
-// gives 8K bytes per page.
-const int kPageSizeBits = 13;
-
-// On Intel architecture, cache line size is 64 bytes.
-// On ARM it may be less (32 bytes), but as far this constant is
-// used for aligning data, it doesn't hurt to align on a greater value.
-const int kProcessorCacheLineSize = 64;
-
-// Constants relevant to double precision floating point numbers.
-
-// Quiet NaNs have bits 51 to 62 set, possibly the sign bit, and no
-// other bits set.
-const uint64_t kQuietNaNMask = static_cast<uint64_t>(0xfff) << 51;
-// If looking only at the top 32 bits, the QNaN mask is bits 19 to 30.
-const uint32_t kQuietNaNHighBitsMask = 0xfff << (51 - 32);
-
-
-// -----------------------------------------------------------------------------
-// Forward declarations for frequently used classes
-// (sorted alphabetically)
-
-class AccessorInfo;
-class Allocation;
-class Arguments;
-class Assembler;
-class AssertNoAllocation;
-class BreakableStatement;
-class Code;
-class CodeGenerator;
-class CodeStub;
-class Context;
-class Debug;
-class Debugger;
-class DebugInfo;
-class Descriptor;
-class DescriptorArray;
-class Expression;
-class ExternalReference;
-class FixedArray;
-class FunctionEntry;
-class FunctionLiteral;
-class FunctionTemplateInfo;
-class NumberDictionary;
-class StringDictionary;
-template <typename T> class Handle;
-class Heap;
-class HeapObject;
-class IC;
-class InterceptorInfo;
-class IterationStatement;
-class JSArray;
-class JSFunction;
-class JSObject;
-class LargeObjectSpace;
-class LookupResult;
-class MacroAssembler;
-class Map;
-class MapSpace;
-class MarkCompactCollector;
-class NewSpace;
-class NodeVisitor;
-class Object;
-class MaybeObject;
-class OldSpace;
-class Property;
-class Proxy;
-class RegExpNode;
-struct RegExpCompileData;
-class RegExpTree;
-class RegExpCompiler;
-class RegExpVisitor;
-class Scope;
-template<class Allocator = FreeStoreAllocationPolicy> class ScopeInfo;
-class SerializedScopeInfo;
-class Script;
-class Slot;
-class Smi;
-template <typename Config, class Allocator = FreeStoreAllocationPolicy>
- class SplayTree;
-class Statement;
-class String;
-class Struct;
-class SwitchStatement;
-class AstVisitor;
-class Variable;
-class VariableProxy;
-class RelocInfo;
-class Deserializer;
-class MessageLocation;
-class ObjectGroup;
-class TickSample;
-class VirtualMemory;
-class Mutex;
-
-typedef bool (*WeakSlotCallback)(Object** pointer);
-
-// -----------------------------------------------------------------------------
-// Miscellaneous
-
-// NOTE: SpaceIterator depends on AllocationSpace enumeration values being
-// consecutive.
-enum AllocationSpace {
- NEW_SPACE, // Semispaces collected with copying collector.
- OLD_POINTER_SPACE, // May contain pointers to new space.
- OLD_DATA_SPACE, // Must not have pointers to new space.
- CODE_SPACE, // No pointers to new space, marked executable.
- MAP_SPACE, // Only and all map objects.
- CELL_SPACE, // Only and all cell objects.
- LO_SPACE, // Promoted large objects.
-
- FIRST_SPACE = NEW_SPACE,
- LAST_SPACE = LO_SPACE,
- FIRST_PAGED_SPACE = OLD_POINTER_SPACE,
- LAST_PAGED_SPACE = CELL_SPACE
-};
-const int kSpaceTagSize = 3;
-const int kSpaceTagMask = (1 << kSpaceTagSize) - 1;
-
-
-// A flag that indicates whether objects should be pretenured when
-// allocated (allocated directly into the old generation) or not
-// (allocated in the young generation if the object size and type
-// allows).
-enum PretenureFlag { NOT_TENURED, TENURED };
-
-enum GarbageCollector { SCAVENGER, MARK_COMPACTOR };
-
-enum Executability { NOT_EXECUTABLE, EXECUTABLE };
-
-enum VisitMode { VISIT_ALL, VISIT_ALL_IN_SCAVENGE, VISIT_ONLY_STRONG };
-
-// Flag indicating whether code is built into the VM (one of the natives files).
-enum NativesFlag { NOT_NATIVES_CODE, NATIVES_CODE };
-
-
-// A CodeDesc describes a buffer holding instructions and relocation
-// information. The instructions start at the beginning of the buffer
-// and grow forward, the relocation information starts at the end of
-// the buffer and grows backward.
-//
-// |<--------------- buffer_size ---------------->|
-// |<-- instr_size -->| |<-- reloc_size -->|
-// +==================+========+==================+
-// | instructions | free | reloc info |
-// +==================+========+==================+
-// ^
-// |
-// buffer
-
-struct CodeDesc {
- byte* buffer;
- int buffer_size;
- int instr_size;
- int reloc_size;
- Assembler* origin;
-};
-
-
-// Callback function on object slots, used for iterating heap object slots in
-// HeapObjects, global pointers to heap objects, etc. The callback allows the
-// callback function to change the value of the slot.
-typedef void (*ObjectSlotCallback)(HeapObject** pointer);
-
-
-// Callback function used for iterating objects in heap spaces,
-// for example, scanning heap objects.
-typedef int (*HeapObjectCallback)(HeapObject* obj);
-
-
-// Callback function used for checking constraints when copying/relocating
-// objects. Returns true if an object can be copied/relocated from its
-// old_addr to a new_addr.
-typedef bool (*ConstraintCallback)(Address new_addr, Address old_addr);
-
-
-// Callback function on inline caches, used for iterating over inline caches
-// in compiled code.
-typedef void (*InlineCacheCallback)(Code* code, Address ic);
-
-
-// State for inline cache call sites. Aliased as IC::State.
-enum InlineCacheState {
- // Has never been executed.
- UNINITIALIZED,
- // Has been executed but monomorhic state has been delayed.
- PREMONOMORPHIC,
- // Has been executed and only one receiver type has been seen.
- MONOMORPHIC,
- // Like MONOMORPHIC but check failed due to prototype.
- MONOMORPHIC_PROTOTYPE_FAILURE,
- // Multiple receiver types have been seen.
- MEGAMORPHIC,
- // Special states for debug break or step in prepare stubs.
- DEBUG_BREAK,
- DEBUG_PREPARE_STEP_IN
-};
-
-
-enum CheckType {
- RECEIVER_MAP_CHECK,
- STRING_CHECK,
- NUMBER_CHECK,
- BOOLEAN_CHECK
-};
-
-
-enum InLoopFlag {
- NOT_IN_LOOP,
- IN_LOOP
-};
-
-
-enum CallFunctionFlags {
- NO_CALL_FUNCTION_FLAGS = 0,
- RECEIVER_MIGHT_BE_VALUE = 1 << 0 // Receiver might not be a JSObject.
-};
-
-
-enum InlineCacheHolderFlag {
- OWN_MAP, // For fast properties objects.
- PROTOTYPE_MAP // For slow properties objects (except GlobalObjects).
-};
-
-
-// Type of properties.
-// Order of properties is significant.
-// Must fit in the BitField PropertyDetails::TypeField.
-// A copy of this is in mirror-debugger.js.
-enum PropertyType {
- NORMAL = 0, // only in slow mode
- FIELD = 1, // only in fast mode
- CONSTANT_FUNCTION = 2, // only in fast mode
- CALLBACKS = 3,
- INTERCEPTOR = 4, // only in lookup results, not in descriptors.
- MAP_TRANSITION = 5, // only in fast mode
- EXTERNAL_ARRAY_TRANSITION = 6,
- CONSTANT_TRANSITION = 7, // only in fast mode
- NULL_DESCRIPTOR = 8, // only in fast mode
- // All properties before MAP_TRANSITION are real.
- FIRST_PHANTOM_PROPERTY_TYPE = MAP_TRANSITION,
- // There are no IC stubs for NULL_DESCRIPTORS. Therefore,
- // NULL_DESCRIPTOR can be used as the type flag for IC stubs for
- // nonexistent properties.
- NONEXISTENT = NULL_DESCRIPTOR
-};
-
-
-// Whether to remove map transitions and constant transitions from a
-// DescriptorArray.
-enum TransitionFlag {
- REMOVE_TRANSITIONS,
- KEEP_TRANSITIONS
-};
-
-
-// Union used for fast testing of specific double values.
-union DoubleRepresentation {
- double value;
- int64_t bits;
- DoubleRepresentation(double x) { value = x; }
-};
-
-
-// Union used for customized checking of the IEEE double types
-// inlined within v8 runtime, rather than going to the underlying
-// platform headers and libraries
-union IeeeDoubleLittleEndianArchType {
- double d;
- struct {
- unsigned int man_low :32;
- unsigned int man_high :20;
- unsigned int exp :11;
- unsigned int sign :1;
- } bits;
-};
-
-
-union IeeeDoubleBigEndianArchType {
- double d;
- struct {
- unsigned int sign :1;
- unsigned int exp :11;
- unsigned int man_high :20;
- unsigned int man_low :32;
- } bits;
-};
-
-
-// AccessorCallback
-struct AccessorDescriptor {
- MaybeObject* (*getter)(Object* object, void* data);
- MaybeObject* (*setter)(JSObject* object, Object* value, void* data);
- void* data;
-};
-
-
-// Logging and profiling.
-// A StateTag represents a possible state of the VM. When compiled with
-// ENABLE_VMSTATE_TRACKING, the logger maintains a stack of these.
-// Creating a VMState object enters a state by pushing on the stack, and
-// destroying a VMState object leaves a state by popping the current state
-// from the stack.
-
-#define STATE_TAG_LIST(V) \
- V(JS) \
- V(GC) \
- V(COMPILER) \
- V(OTHER) \
- V(EXTERNAL)
-
-enum StateTag {
-#define DEF_STATE_TAG(name) name,
- STATE_TAG_LIST(DEF_STATE_TAG)
-#undef DEF_STATE_TAG
- // Pseudo-types.
- state_tag_count
-};
-
-
-// -----------------------------------------------------------------------------
-// Macros
-
-// Testers for test.
-
-#define HAS_SMI_TAG(value) \
- ((reinterpret_cast<intptr_t>(value) & kSmiTagMask) == kSmiTag)
-
-#define HAS_FAILURE_TAG(value) \
- ((reinterpret_cast<intptr_t>(value) & kFailureTagMask) == kFailureTag)
-
-// OBJECT_POINTER_ALIGN returns the value aligned as a HeapObject pointer
-#define OBJECT_POINTER_ALIGN(value) \
- (((value) + kObjectAlignmentMask) & ~kObjectAlignmentMask)
-
-// POINTER_SIZE_ALIGN returns the value aligned as a pointer.
-#define POINTER_SIZE_ALIGN(value) \
- (((value) + kPointerAlignmentMask) & ~kPointerAlignmentMask)
-
-// MAP_POINTER_ALIGN returns the value aligned as a map pointer.
-#define MAP_POINTER_ALIGN(value) \
- (((value) + kMapAlignmentMask) & ~kMapAlignmentMask)
-
-// CODE_POINTER_ALIGN returns the value aligned as a generated code segment.
-#define CODE_POINTER_ALIGN(value) \
- (((value) + kCodeAlignmentMask) & ~kCodeAlignmentMask)
-
-// Support for tracking C++ memory allocation. Insert TRACK_MEMORY("Fisk")
-// inside a C++ class and new and delete will be overloaded so logging is
-// performed.
-// This file (globals.h) is included before log.h, so we use direct calls to
-// the Logger rather than the LOG macro.
-#ifdef DEBUG
-#define TRACK_MEMORY(name) \
- void* operator new(size_t size) { \
- void* result = ::operator new(size); \
- Logger::NewEventStatic(name, result, size); \
- return result; \
- } \
- void operator delete(void* object) { \
- Logger::DeleteEventStatic(name, object); \
- ::operator delete(object); \
- }
-#else
-#define TRACK_MEMORY(name)
-#endif
-
-
-// Feature flags bit positions. They are mostly based on the CPUID spec.
-// (We assign CPUID itself to one of the currently reserved bits --
-// feel free to change this if needed.)
-// On X86/X64, values below 32 are bits in EDX, values above 32 are bits in ECX.
-enum CpuFeature { SSE4_1 = 32 + 19, // x86
- SSE3 = 32 + 0, // x86
- SSE2 = 26, // x86
- CMOV = 15, // x86
- RDTSC = 4, // x86
- CPUID = 10, // x86
- VFP3 = 1, // ARM
- ARMv7 = 2, // ARM
- SAHF = 0, // x86
- FPU = 1}; // MIPS
-
-// The Strict Mode (ECMA-262 5th edition, 4.2.2).
-enum StrictModeFlag {
- kNonStrictMode,
- kStrictMode,
- // This value is never used, but is needed to prevent GCC 4.5 from failing
- // to compile when we assert that a flag is either kNonStrictMode or
- // kStrictMode.
- kInvalidStrictFlag
-};
-
-} } // namespace v8::internal
-
-#endif // V8_V8GLOBALS_H_
diff --git a/src/3rdparty/v8/src/v8memory.h b/src/3rdparty/v8/src/v8memory.h
deleted file mode 100644
index 901e78d..0000000
--- a/src/3rdparty/v8/src/v8memory.h
+++ /dev/null
@@ -1,82 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_MEMORY_H_
-#define V8_MEMORY_H_
-
-namespace v8 {
-namespace internal {
-
-// Memory provides an interface to 'raw' memory. It encapsulates the casts
-// that typically are needed when incompatible pointer types are used.
-
-class Memory {
- public:
- static uint8_t& uint8_at(Address addr) {
- return *reinterpret_cast<uint8_t*>(addr);
- }
-
- static uint16_t& uint16_at(Address addr) {
- return *reinterpret_cast<uint16_t*>(addr);
- }
-
- static uint32_t& uint32_at(Address addr) {
- return *reinterpret_cast<uint32_t*>(addr);
- }
-
- static int32_t& int32_at(Address addr) {
- return *reinterpret_cast<int32_t*>(addr);
- }
-
- static uint64_t& uint64_at(Address addr) {
- return *reinterpret_cast<uint64_t*>(addr);
- }
-
- static int& int_at(Address addr) {
- return *reinterpret_cast<int*>(addr);
- }
-
- static double& double_at(Address addr) {
- return *reinterpret_cast<double*>(addr);
- }
-
- static Address& Address_at(Address addr) {
- return *reinterpret_cast<Address*>(addr);
- }
-
- static Object*& Object_at(Address addr) {
- return *reinterpret_cast<Object**>(addr);
- }
-
- static Handle<Object>& Object_Handle_at(Address addr) {
- return *reinterpret_cast<Handle<Object>*>(addr);
- }
-};
-
-} } // namespace v8::internal
-
-#endif // V8_MEMORY_H_
diff --git a/src/3rdparty/v8/src/v8natives.js b/src/3rdparty/v8/src/v8natives.js
deleted file mode 100644
index 4fcf0ac..0000000
--- a/src/3rdparty/v8/src/v8natives.js
+++ /dev/null
@@ -1,1293 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// This file relies on the fact that the following declarations have been made
-//
-// in runtime.js:
-// const $Object = global.Object;
-// const $Boolean = global.Boolean;
-// const $Number = global.Number;
-// const $Function = global.Function;
-// const $Array = global.Array;
-// const $NaN = 0/0;
-//
-// in math.js:
-// const $floor = MathFloor
-
-const $isNaN = GlobalIsNaN;
-const $isFinite = GlobalIsFinite;
-
-
-// ----------------------------------------------------------------------------
-
-
-// Helper function used to install functions on objects.
-function InstallFunctions(object, attributes, functions) {
- if (functions.length >= 8) {
- %OptimizeObjectForAddingMultipleProperties(object, functions.length >> 1);
- }
- for (var i = 0; i < functions.length; i += 2) {
- var key = functions[i];
- var f = functions[i + 1];
- %FunctionSetName(f, key);
- %FunctionRemovePrototype(f);
- %SetProperty(object, key, f, attributes);
- }
- %ToFastProperties(object);
-}
-
-// Emulates JSC by installing functions on a hidden prototype that
-// lies above the current object/prototype. This lets you override
-// functions on String.prototype etc. and then restore the old function
-// with delete. See http://code.google.com/p/chromium/issues/detail?id=1717
-function InstallFunctionsOnHiddenPrototype(object, attributes, functions) {
- var hidden_prototype = new $Object();
- %SetHiddenPrototype(object, hidden_prototype);
- InstallFunctions(hidden_prototype, attributes, functions);
-}
-
-
-// ----------------------------------------------------------------------------
-
-
-// ECMA 262 - 15.1.4
-function GlobalIsNaN(number) {
- var n = ToNumber(number);
- return NUMBER_IS_NAN(n);
-}
-
-
-// ECMA 262 - 15.1.5
-function GlobalIsFinite(number) {
- if (!IS_NUMBER(number)) number = NonNumberToNumber(number);
-
- // NaN - NaN == NaN, Infinity - Infinity == NaN, -Infinity - -Infinity == NaN.
- return %_IsSmi(number) || number - number == 0;
-}
-
-
-// ECMA-262 - 15.1.2.2
-function GlobalParseInt(string, radix) {
- if (IS_UNDEFINED(radix) || radix === 10 || radix === 0) {
- // Some people use parseInt instead of Math.floor. This
- // optimization makes parseInt on a Smi 12 times faster (60ns
- // vs 800ns). The following optimization makes parseInt on a
- // non-Smi number 9 times faster (230ns vs 2070ns). Together
- // they make parseInt on a string 1.4% slower (274ns vs 270ns).
- if (%_IsSmi(string)) return string;
- if (IS_NUMBER(string) &&
- ((0.01 < string && string < 1e9) ||
- (-1e9 < string && string < -0.01))) {
- // Truncate number.
- return string | 0;
- }
- if (IS_UNDEFINED(radix)) radix = 0;
- } else {
- radix = TO_INT32(radix);
- if (!(radix == 0 || (2 <= radix && radix <= 36)))
- return $NaN;
- }
- string = TO_STRING_INLINE(string);
- if (%_HasCachedArrayIndex(string) &&
- (radix == 0 || radix == 10)) {
- return %_GetCachedArrayIndex(string);
- }
- return %StringParseInt(string, radix);
-}
-
-
-// ECMA-262 - 15.1.2.3
-function GlobalParseFloat(string) {
- string = TO_STRING_INLINE(string);
- if (%_HasCachedArrayIndex(string)) return %_GetCachedArrayIndex(string);
- return %StringParseFloat(string);
-}
-
-
-function GlobalEval(x) {
- if (!IS_STRING(x)) return x;
-
- var global_receiver = %GlobalReceiver(global);
- var this_is_global_receiver = (this === global_receiver);
- var global_is_detached = (global === global_receiver);
-
- if (!this_is_global_receiver || global_is_detached) {
- throw new $EvalError('The "this" object passed to eval must ' +
- 'be the global object from which eval originated');
- }
-
- var f = %CompileString(x);
- if (!IS_FUNCTION(f)) return f;
-
- return %_CallFunction(this, f);
-}
-
-
-// execScript for IE compatibility.
-function GlobalExecScript(expr, lang) {
- // NOTE: We don't care about the character casing.
- if (!lang || /javascript/i.test(lang)) {
- var f = %CompileString(ToString(expr));
- %_CallFunction(%GlobalReceiver(global), f);
- }
- return null;
-}
-
-
-// ----------------------------------------------------------------------------
-
-
-function SetupGlobal() {
- // ECMA 262 - 15.1.1.1.
- %SetProperty(global, "NaN", $NaN, DONT_ENUM | DONT_DELETE);
-
- // ECMA-262 - 15.1.1.2.
- %SetProperty(global, "Infinity", 1/0, DONT_ENUM | DONT_DELETE);
-
- // ECMA-262 - 15.1.1.3.
- %SetProperty(global, "undefined", void 0, DONT_ENUM | DONT_DELETE);
-
- // Setup non-enumerable function on the global object.
- InstallFunctions(global, DONT_ENUM, $Array(
- "isNaN", GlobalIsNaN,
- "isFinite", GlobalIsFinite,
- "parseInt", GlobalParseInt,
- "parseFloat", GlobalParseFloat,
- "eval", GlobalEval,
- "execScript", GlobalExecScript
- ));
-}
-
-SetupGlobal();
-
-
-// ----------------------------------------------------------------------------
-// Boolean (first part of definition)
-
-
-%SetCode($Boolean, function(x) {
- if (%_IsConstructCall()) {
- %_SetValueOf(this, ToBoolean(x));
- } else {
- return ToBoolean(x);
- }
-});
-
-%FunctionSetPrototype($Boolean, new $Boolean(false));
-
-%SetProperty($Boolean.prototype, "constructor", $Boolean, DONT_ENUM);
-
-// ----------------------------------------------------------------------------
-// Object
-
-$Object.prototype.constructor = $Object;
-
-// ECMA-262 - 15.2.4.2
-function ObjectToString() {
- return "[object " + %_ClassOf(ToObject(this)) + "]";
-}
-
-
-// ECMA-262 - 15.2.4.3
-function ObjectToLocaleString() {
- return this.toString();
-}
-
-
-// ECMA-262 - 15.2.4.4
-function ObjectValueOf() {
- return ToObject(this);
-}
-
-
-// ECMA-262 - 15.2.4.5
-function ObjectHasOwnProperty(V) {
- return %HasLocalProperty(ToObject(this), ToString(V));
-}
-
-
-// ECMA-262 - 15.2.4.6
-function ObjectIsPrototypeOf(V) {
- if (!IS_SPEC_OBJECT(V)) return false;
- return %IsInPrototypeChain(this, V);
-}
-
-
-// ECMA-262 - 15.2.4.6
-function ObjectPropertyIsEnumerable(V) {
- return %IsPropertyEnumerable(ToObject(this), ToString(V));
-}
-
-
-// Extensions for providing property getters and setters.
-function ObjectDefineGetter(name, fun) {
- if (this == null && !IS_UNDETECTABLE(this)) {
- throw new $TypeError('Object.prototype.__defineGetter__: this is Null');
- }
- if (!IS_FUNCTION(fun)) {
- throw new $TypeError('Object.prototype.__defineGetter__: Expecting function');
- }
- var desc = new PropertyDescriptor();
- desc.setGet(fun);
- desc.setEnumerable(true);
- desc.setConfigurable(true);
- DefineOwnProperty(ToObject(this), ToString(name), desc, false);
-}
-
-
-function ObjectLookupGetter(name) {
- if (this == null && !IS_UNDETECTABLE(this)) {
- throw new $TypeError('Object.prototype.__lookupGetter__: this is Null');
- }
- return %LookupAccessor(ToObject(this), ToString(name), GETTER);
-}
-
-
-function ObjectDefineSetter(name, fun) {
- if (this == null && !IS_UNDETECTABLE(this)) {
- throw new $TypeError('Object.prototype.__defineSetter__: this is Null');
- }
- if (!IS_FUNCTION(fun)) {
- throw new $TypeError(
- 'Object.prototype.__defineSetter__: Expecting function');
- }
- var desc = new PropertyDescriptor();
- desc.setSet(fun);
- desc.setEnumerable(true);
- desc.setConfigurable(true);
- DefineOwnProperty(ToObject(this), ToString(name), desc, false);
-}
-
-
-function ObjectLookupSetter(name) {
- if (this == null && !IS_UNDETECTABLE(this)) {
- throw new $TypeError('Object.prototype.__lookupSetter__: this is Null');
- }
- return %LookupAccessor(ToObject(this), ToString(name), SETTER);
-}
-
-
-function ObjectKeys(obj) {
- if (!IS_SPEC_OBJECT(obj))
- throw MakeTypeError("obj_ctor_property_non_object", ["keys"]);
- return %LocalKeys(obj);
-}
-
-
-// ES5 8.10.1.
-function IsAccessorDescriptor(desc) {
- if (IS_UNDEFINED(desc)) return false;
- return desc.hasGetter_ || desc.hasSetter_;
-}
-
-
-// ES5 8.10.2.
-function IsDataDescriptor(desc) {
- if (IS_UNDEFINED(desc)) return false;
- return desc.hasValue_ || desc.hasWritable_;
-}
-
-
-// ES5 8.10.3.
-function IsGenericDescriptor(desc) {
- return !(IsAccessorDescriptor(desc) || IsDataDescriptor(desc));
-}
-
-
-function IsInconsistentDescriptor(desc) {
- return IsAccessorDescriptor(desc) && IsDataDescriptor(desc);
-}
-
-// ES5 8.10.4
-function FromPropertyDescriptor(desc) {
- if (IS_UNDEFINED(desc)) return desc;
- var obj = new $Object();
- if (IsDataDescriptor(desc)) {
- obj.value = desc.getValue();
- obj.writable = desc.isWritable();
- }
- if (IsAccessorDescriptor(desc)) {
- obj.get = desc.getGet();
- obj.set = desc.getSet();
- }
- obj.enumerable = desc.isEnumerable();
- obj.configurable = desc.isConfigurable();
- return obj;
-}
-
-// ES5 8.10.5.
-function ToPropertyDescriptor(obj) {
- if (!IS_SPEC_OBJECT(obj)) {
- throw MakeTypeError("property_desc_object", [obj]);
- }
- var desc = new PropertyDescriptor();
-
- if ("enumerable" in obj) {
- desc.setEnumerable(ToBoolean(obj.enumerable));
- }
-
- if ("configurable" in obj) {
- desc.setConfigurable(ToBoolean(obj.configurable));
- }
-
- if ("value" in obj) {
- desc.setValue(obj.value);
- }
-
- if ("writable" in obj) {
- desc.setWritable(ToBoolean(obj.writable));
- }
-
- if ("get" in obj) {
- var get = obj.get;
- if (!IS_UNDEFINED(get) && !IS_FUNCTION(get)) {
- throw MakeTypeError("getter_must_be_callable", [get]);
- }
- desc.setGet(get);
- }
-
- if ("set" in obj) {
- var set = obj.set;
- if (!IS_UNDEFINED(set) && !IS_FUNCTION(set)) {
- throw MakeTypeError("setter_must_be_callable", [set]);
- }
- desc.setSet(set);
- }
-
- if (IsInconsistentDescriptor(desc)) {
- throw MakeTypeError("value_and_accessor", [obj]);
- }
- return desc;
-}
-
-
-function PropertyDescriptor() {
- // Initialize here so they are all in-object and have the same map.
- // Default values from ES5 8.6.1.
- this.value_ = void 0;
- this.hasValue_ = false;
- this.writable_ = false;
- this.hasWritable_ = false;
- this.enumerable_ = false;
- this.hasEnumerable_ = false;
- this.configurable_ = false;
- this.hasConfigurable_ = false;
- this.get_ = void 0;
- this.hasGetter_ = false;
- this.set_ = void 0;
- this.hasSetter_ = false;
-}
-
-PropertyDescriptor.prototype.__proto__ = null;
-PropertyDescriptor.prototype.toString = function() {
- return "[object PropertyDescriptor]";
-};
-
-PropertyDescriptor.prototype.setValue = function(value) {
- this.value_ = value;
- this.hasValue_ = true;
-}
-
-
-PropertyDescriptor.prototype.getValue = function() {
- return this.value_;
-}
-
-
-PropertyDescriptor.prototype.hasValue = function() {
- return this.hasValue_;
-}
-
-
-PropertyDescriptor.prototype.setEnumerable = function(enumerable) {
- this.enumerable_ = enumerable;
- this.hasEnumerable_ = true;
-}
-
-
-PropertyDescriptor.prototype.isEnumerable = function () {
- return this.enumerable_;
-}
-
-
-PropertyDescriptor.prototype.hasEnumerable = function() {
- return this.hasEnumerable_;
-}
-
-
-PropertyDescriptor.prototype.setWritable = function(writable) {
- this.writable_ = writable;
- this.hasWritable_ = true;
-}
-
-
-PropertyDescriptor.prototype.isWritable = function() {
- return this.writable_;
-}
-
-
-PropertyDescriptor.prototype.hasWritable = function() {
- return this.hasWritable_;
-}
-
-
-PropertyDescriptor.prototype.setConfigurable = function(configurable) {
- this.configurable_ = configurable;
- this.hasConfigurable_ = true;
-}
-
-
-PropertyDescriptor.prototype.hasConfigurable = function() {
- return this.hasConfigurable_;
-}
-
-
-PropertyDescriptor.prototype.isConfigurable = function() {
- return this.configurable_;
-}
-
-
-PropertyDescriptor.prototype.setGet = function(get) {
- this.get_ = get;
- this.hasGetter_ = true;
-}
-
-
-PropertyDescriptor.prototype.getGet = function() {
- return this.get_;
-}
-
-
-PropertyDescriptor.prototype.hasGetter = function() {
- return this.hasGetter_;
-}
-
-
-PropertyDescriptor.prototype.setSet = function(set) {
- this.set_ = set;
- this.hasSetter_ = true;
-}
-
-
-PropertyDescriptor.prototype.getSet = function() {
- return this.set_;
-}
-
-
-PropertyDescriptor.prototype.hasSetter = function() {
- return this.hasSetter_;
-}
-
-
-// Converts an array returned from Runtime_GetOwnProperty to an actual
-// property descriptor. For a description of the array layout please
-// see the runtime.cc file.
-function ConvertDescriptorArrayToDescriptor(desc_array) {
- if (desc_array === false) {
- throw 'Internal error: invalid desc_array';
- }
-
- if (IS_UNDEFINED(desc_array)) {
- return void 0;
- }
-
- var desc = new PropertyDescriptor();
- // This is an accessor.
- if (desc_array[IS_ACCESSOR_INDEX]) {
- desc.setGet(desc_array[GETTER_INDEX]);
- desc.setSet(desc_array[SETTER_INDEX]);
- } else {
- desc.setValue(desc_array[VALUE_INDEX]);
- desc.setWritable(desc_array[WRITABLE_INDEX]);
- }
- desc.setEnumerable(desc_array[ENUMERABLE_INDEX]);
- desc.setConfigurable(desc_array[CONFIGURABLE_INDEX]);
-
- return desc;
-}
-
-
-// ES5 section 8.12.2.
-function GetProperty(obj, p) {
- var prop = GetOwnProperty(obj);
- if (!IS_UNDEFINED(prop)) return prop;
- var proto = obj.__proto__;
- if (IS_NULL(proto)) return void 0;
- return GetProperty(proto, p);
-}
-
-
-// ES5 section 8.12.6
-function HasProperty(obj, p) {
- var desc = GetProperty(obj, p);
- return IS_UNDEFINED(desc) ? false : true;
-}
-
-
-// ES5 section 8.12.1.
-function GetOwnProperty(obj, p) {
- // GetOwnProperty returns an array indexed by the constants
- // defined in macros.py.
- // If p is not a property on obj undefined is returned.
- var props = %GetOwnProperty(ToObject(obj), ToString(p));
-
- // A false value here means that access checks failed.
- if (props === false) return void 0;
-
- return ConvertDescriptorArrayToDescriptor(props);
-}
-
-
-// ES5 8.12.9.
-function DefineOwnProperty(obj, p, desc, should_throw) {
- var current_or_access = %GetOwnProperty(ToObject(obj), ToString(p));
- // A false value here means that access checks failed.
- if (current_or_access === false) return void 0;
-
- var current = ConvertDescriptorArrayToDescriptor(current_or_access);
- var extensible = %IsExtensible(ToObject(obj));
-
- // Error handling according to spec.
- // Step 3
- if (IS_UNDEFINED(current) && !extensible) {
- if (should_throw) {
- throw MakeTypeError("define_disallowed", ["defineProperty"]);
- } else {
- return;
- }
- }
-
- if (!IS_UNDEFINED(current)) {
- // Step 5 and 6
- if ((IsGenericDescriptor(desc) ||
- IsDataDescriptor(desc) == IsDataDescriptor(current)) &&
- (!desc.hasEnumerable() ||
- SameValue(desc.isEnumerable(), current.isEnumerable())) &&
- (!desc.hasConfigurable() ||
- SameValue(desc.isConfigurable(), current.isConfigurable())) &&
- (!desc.hasWritable() ||
- SameValue(desc.isWritable(), current.isWritable())) &&
- (!desc.hasValue() ||
- SameValue(desc.getValue(), current.getValue())) &&
- (!desc.hasGetter() ||
- SameValue(desc.getGet(), current.getGet())) &&
- (!desc.hasSetter() ||
- SameValue(desc.getSet(), current.getSet()))) {
- return true;
- }
- if (!current.isConfigurable()) {
- // Step 7
- if (desc.isConfigurable() ||
- (desc.hasEnumerable() &&
- desc.isEnumerable() != current.isEnumerable())) {
- if (should_throw) {
- throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
- } else {
- return;
- }
- }
- // Step 8
- if (!IsGenericDescriptor(desc)) {
- // Step 9a
- if (IsDataDescriptor(current) != IsDataDescriptor(desc)) {
- if (should_throw) {
- throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
- } else {
- return;
- }
- }
- // Step 10a
- if (IsDataDescriptor(current) && IsDataDescriptor(desc)) {
- if (!current.isWritable() && desc.isWritable()) {
- if (should_throw) {
- throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
- } else {
- return;
- }
- }
- if (!current.isWritable() && desc.hasValue() &&
- !SameValue(desc.getValue(), current.getValue())) {
- if (should_throw) {
- throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
- } else {
- return;
- }
- }
- }
- // Step 11
- if (IsAccessorDescriptor(desc) && IsAccessorDescriptor(current)) {
- if (desc.hasSetter() && !SameValue(desc.getSet(), current.getSet())) {
- if (should_throw) {
- throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
- } else {
- return;
- }
- }
- if (desc.hasGetter() && !SameValue(desc.getGet(),current.getGet())) {
- if (should_throw) {
- throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
- } else {
- return;
- }
- }
- }
- }
- }
- }
-
- // Send flags - enumerable and configurable are common - writable is
- // only send to the data descriptor.
- // Take special care if enumerable and configurable is not defined on
- // desc (we need to preserve the existing values from current).
- var flag = NONE;
- if (desc.hasEnumerable()) {
- flag |= desc.isEnumerable() ? 0 : DONT_ENUM;
- } else if (!IS_UNDEFINED(current)) {
- flag |= current.isEnumerable() ? 0 : DONT_ENUM;
- } else {
- flag |= DONT_ENUM;
- }
-
- if (desc.hasConfigurable()) {
- flag |= desc.isConfigurable() ? 0 : DONT_DELETE;
- } else if (!IS_UNDEFINED(current)) {
- flag |= current.isConfigurable() ? 0 : DONT_DELETE;
- } else
- flag |= DONT_DELETE;
-
- if (IsDataDescriptor(desc) ||
- (IsGenericDescriptor(desc) &&
- (IS_UNDEFINED(current) || IsDataDescriptor(current)))) {
- // There are 3 cases that lead here:
- // Step 4a - defining a new data property.
- // Steps 9b & 12 - replacing an existing accessor property with a data
- // property.
- // Step 12 - updating an existing data property with a data or generic
- // descriptor.
-
- if (desc.hasWritable()) {
- flag |= desc.isWritable() ? 0 : READ_ONLY;
- } else if (!IS_UNDEFINED(current)) {
- flag |= current.isWritable() ? 0 : READ_ONLY;
- } else {
- flag |= READ_ONLY;
- }
-
- var value = void 0; // Default value is undefined.
- if (desc.hasValue()) {
- value = desc.getValue();
- } else if (!IS_UNDEFINED(current) && IsDataDescriptor(current)) {
- value = current.getValue();
- }
-
- %DefineOrRedefineDataProperty(obj, p, value, flag);
- } else if (IsGenericDescriptor(desc)) {
- // Step 12 - updating an existing accessor property with generic
- // descriptor. Changing flags only.
- %DefineOrRedefineAccessorProperty(obj, p, GETTER, current.getGet(), flag);
- } else {
- // There are 3 cases that lead here:
- // Step 4b - defining a new accessor property.
- // Steps 9c & 12 - replacing an existing data property with an accessor
- // property.
- // Step 12 - updating an existing accessor property with an accessor
- // descriptor.
- if (desc.hasGetter()) {
- %DefineOrRedefineAccessorProperty(obj, p, GETTER, desc.getGet(), flag);
- }
- if (desc.hasSetter()) {
- %DefineOrRedefineAccessorProperty(obj, p, SETTER, desc.getSet(), flag);
- }
- }
- return true;
-}
-
-
-// ES5 section 15.2.3.2.
-function ObjectGetPrototypeOf(obj) {
- if (!IS_SPEC_OBJECT(obj))
- throw MakeTypeError("obj_ctor_property_non_object", ["getPrototypeOf"]);
- return obj.__proto__;
-}
-
-
-// ES5 section 15.2.3.3
-function ObjectGetOwnPropertyDescriptor(obj, p) {
- if (!IS_SPEC_OBJECT(obj))
- throw MakeTypeError("obj_ctor_property_non_object", ["getOwnPropertyDescriptor"]);
- var desc = GetOwnProperty(obj, p);
- return FromPropertyDescriptor(desc);
-}
-
-
-// ES5 section 15.2.3.4.
-function ObjectGetOwnPropertyNames(obj) {
- if (!IS_SPEC_OBJECT(obj))
- throw MakeTypeError("obj_ctor_property_non_object", ["getOwnPropertyNames"]);
-
- // Find all the indexed properties.
-
- // Get the local element names.
- var propertyNames = %GetLocalElementNames(obj);
-
- // Get names for indexed interceptor properties.
- if (%GetInterceptorInfo(obj) & 1) {
- var indexedInterceptorNames =
- %GetIndexedInterceptorElementNames(obj);
- if (indexedInterceptorNames)
- propertyNames = propertyNames.concat(indexedInterceptorNames);
- }
-
- // Find all the named properties.
-
- // Get the local property names.
- propertyNames = propertyNames.concat(%GetLocalPropertyNames(obj));
-
- // Get names for named interceptor properties if any.
-
- if (%GetInterceptorInfo(obj) & 2) {
- var namedInterceptorNames =
- %GetNamedInterceptorPropertyNames(obj);
- if (namedInterceptorNames) {
- propertyNames = propertyNames.concat(namedInterceptorNames);
- }
- }
-
- // Property names are expected to be unique strings.
- var propertySet = {};
- var j = 0;
- for (var i = 0; i < propertyNames.length; ++i) {
- var name = ToString(propertyNames[i]);
- // We need to check for the exact property value since for intrinsic
- // properties like toString if(propertySet["toString"]) will always
- // succeed.
- if (propertySet[name] === true)
- continue;
- propertySet[name] = true;
- propertyNames[j++] = name;
- }
- propertyNames.length = j;
-
- return propertyNames;
-}
-
-
-// ES5 section 15.2.3.5.
-function ObjectCreate(proto, properties) {
- if (!IS_SPEC_OBJECT(proto) && proto !== null) {
- throw MakeTypeError("proto_object_or_null", [proto]);
- }
- var obj = new $Object();
- obj.__proto__ = proto;
- if (!IS_UNDEFINED(properties)) ObjectDefineProperties(obj, properties);
- return obj;
-}
-
-
-// ES5 section 15.2.3.6.
-function ObjectDefineProperty(obj, p, attributes) {
- if (!IS_SPEC_OBJECT(obj)) {
- throw MakeTypeError("obj_ctor_property_non_object", ["defineProperty"]);
- }
- var name = ToString(p);
- var desc = ToPropertyDescriptor(attributes);
- DefineOwnProperty(obj, name, desc, true);
- return obj;
-}
-
-
-// ES5 section 15.2.3.7.
-function ObjectDefineProperties(obj, properties) {
- if (!IS_SPEC_OBJECT(obj))
- throw MakeTypeError("obj_ctor_property_non_object", ["defineProperties"]);
- var props = ToObject(properties);
- var key_values = [];
- for (var key in props) {
- if (%HasLocalProperty(props, key)) {
- key_values.push(key);
- var value = props[key];
- var desc = ToPropertyDescriptor(value);
- key_values.push(desc);
- }
- }
- for (var i = 0; i < key_values.length; i += 2) {
- var key = key_values[i];
- var desc = key_values[i + 1];
- DefineOwnProperty(obj, key, desc, true);
- }
- return obj;
-}
-
-
-// ES5 section 15.2.3.8.
-function ObjectSeal(obj) {
- if (!IS_SPEC_OBJECT(obj)) {
- throw MakeTypeError("obj_ctor_property_non_object", ["seal"]);
- }
- var names = ObjectGetOwnPropertyNames(obj);
- for (var i = 0; i < names.length; i++) {
- var name = names[i];
- var desc = GetOwnProperty(obj, name);
- if (desc.isConfigurable()) desc.setConfigurable(false);
- DefineOwnProperty(obj, name, desc, true);
- }
- return ObjectPreventExtension(obj);
-}
-
-
-// ES5 section 15.2.3.9.
-function ObjectFreeze(obj) {
- if (!IS_SPEC_OBJECT(obj)) {
- throw MakeTypeError("obj_ctor_property_non_object", ["freeze"]);
- }
- var names = ObjectGetOwnPropertyNames(obj);
- for (var i = 0; i < names.length; i++) {
- var name = names[i];
- var desc = GetOwnProperty(obj, name);
- if (IsDataDescriptor(desc)) desc.setWritable(false);
- if (desc.isConfigurable()) desc.setConfigurable(false);
- DefineOwnProperty(obj, name, desc, true);
- }
- return ObjectPreventExtension(obj);
-}
-
-
-// ES5 section 15.2.3.10
-function ObjectPreventExtension(obj) {
- if (!IS_SPEC_OBJECT(obj)) {
- throw MakeTypeError("obj_ctor_property_non_object", ["preventExtension"]);
- }
- %PreventExtensions(obj);
- return obj;
-}
-
-
-// ES5 section 15.2.3.11
-function ObjectIsSealed(obj) {
- if (!IS_SPEC_OBJECT(obj)) {
- throw MakeTypeError("obj_ctor_property_non_object", ["isSealed"]);
- }
- var names = ObjectGetOwnPropertyNames(obj);
- for (var i = 0; i < names.length; i++) {
- var name = names[i];
- var desc = GetOwnProperty(obj, name);
- if (desc.isConfigurable()) return false;
- }
- if (!ObjectIsExtensible(obj)) {
- return true;
- }
- return false;
-}
-
-
-// ES5 section 15.2.3.12
-function ObjectIsFrozen(obj) {
- if (!IS_SPEC_OBJECT(obj)) {
- throw MakeTypeError("obj_ctor_property_non_object", ["isFrozen"]);
- }
- var names = ObjectGetOwnPropertyNames(obj);
- for (var i = 0; i < names.length; i++) {
- var name = names[i];
- var desc = GetOwnProperty(obj, name);
- if (IsDataDescriptor(desc) && desc.isWritable()) return false;
- if (desc.isConfigurable()) return false;
- }
- if (!ObjectIsExtensible(obj)) {
- return true;
- }
- return false;
-}
-
-
-// ES5 section 15.2.3.13
-function ObjectIsExtensible(obj) {
- if (!IS_SPEC_OBJECT(obj)) {
- throw MakeTypeError("obj_ctor_property_non_object", ["preventExtension"]);
- }
- return %IsExtensible(obj);
-}
-
-
-%SetCode($Object, function(x) {
- if (%_IsConstructCall()) {
- if (x == null) return this;
- return ToObject(x);
- } else {
- if (x == null) return { };
- return ToObject(x);
- }
-});
-
-%SetExpectedNumberOfProperties($Object, 4);
-
-// ----------------------------------------------------------------------------
-
-
-function SetupObject() {
- // Setup non-enumerable functions on the Object.prototype object.
- InstallFunctions($Object.prototype, DONT_ENUM, $Array(
- "toString", ObjectToString,
- "toLocaleString", ObjectToLocaleString,
- "valueOf", ObjectValueOf,
- "hasOwnProperty", ObjectHasOwnProperty,
- "isPrototypeOf", ObjectIsPrototypeOf,
- "propertyIsEnumerable", ObjectPropertyIsEnumerable,
- "__defineGetter__", ObjectDefineGetter,
- "__lookupGetter__", ObjectLookupGetter,
- "__defineSetter__", ObjectDefineSetter,
- "__lookupSetter__", ObjectLookupSetter
- ));
- InstallFunctions($Object, DONT_ENUM, $Array(
- "keys", ObjectKeys,
- "create", ObjectCreate,
- "defineProperty", ObjectDefineProperty,
- "defineProperties", ObjectDefineProperties,
- "freeze", ObjectFreeze,
- "getPrototypeOf", ObjectGetPrototypeOf,
- "getOwnPropertyDescriptor", ObjectGetOwnPropertyDescriptor,
- "getOwnPropertyNames", ObjectGetOwnPropertyNames,
- "isExtensible", ObjectIsExtensible,
- "isFrozen", ObjectIsFrozen,
- "isSealed", ObjectIsSealed,
- "preventExtensions", ObjectPreventExtension,
- "seal", ObjectSeal
- ));
-}
-
-SetupObject();
-
-
-// ----------------------------------------------------------------------------
-// Boolean
-
-function BooleanToString() {
- // NOTE: Both Boolean objects and values can enter here as
- // 'this'. This is not as dictated by ECMA-262.
- var b = this;
- if (!IS_BOOLEAN(b)) {
- if (!IS_BOOLEAN_WRAPPER(b)) {
- throw new $TypeError('Boolean.prototype.toString is not generic');
- }
- b = %_ValueOf(b);
- }
- return b ? 'true' : 'false';
-}
-
-
-function BooleanValueOf() {
- // NOTE: Both Boolean objects and values can enter here as
- // 'this'. This is not as dictated by ECMA-262.
- if (!IS_BOOLEAN(this) && !IS_BOOLEAN_WRAPPER(this))
- throw new $TypeError('Boolean.prototype.valueOf is not generic');
- return %_ValueOf(this);
-}
-
-
-// ----------------------------------------------------------------------------
-
-
-function SetupBoolean() {
- InstallFunctions($Boolean.prototype, DONT_ENUM, $Array(
- "toString", BooleanToString,
- "valueOf", BooleanValueOf
- ));
-}
-
-SetupBoolean();
-
-// ----------------------------------------------------------------------------
-// Number
-
-// Set the Number function and constructor.
-%SetCode($Number, function(x) {
- var value = %_ArgumentsLength() == 0 ? 0 : ToNumber(x);
- if (%_IsConstructCall()) {
- %_SetValueOf(this, value);
- } else {
- return value;
- }
-});
-
-%FunctionSetPrototype($Number, new $Number(0));
-
-// ECMA-262 section 15.7.4.2.
-function NumberToString(radix) {
- // NOTE: Both Number objects and values can enter here as
- // 'this'. This is not as dictated by ECMA-262.
- var number = this;
- if (!IS_NUMBER(this)) {
- if (!IS_NUMBER_WRAPPER(this))
- throw new $TypeError('Number.prototype.toString is not generic');
- // Get the value of this number in case it's an object.
- number = %_ValueOf(this);
- }
- // Fast case: Convert number in radix 10.
- if (IS_UNDEFINED(radix) || radix === 10) {
- return %_NumberToString(number);
- }
-
- // Convert the radix to an integer and check the range.
- radix = TO_INTEGER(radix);
- if (radix < 2 || radix > 36) {
- throw new $RangeError('toString() radix argument must be between 2 and 36');
- }
- // Convert the number to a string in the given radix.
- return %NumberToRadixString(number, radix);
-}
-
-
-// ECMA-262 section 15.7.4.3
-function NumberToLocaleString() {
- return this.toString();
-}
-
-
-// ECMA-262 section 15.7.4.4
-function NumberValueOf() {
- // NOTE: Both Number objects and values can enter here as
- // 'this'. This is not as dictated by ECMA-262.
- if (!IS_NUMBER(this) && !IS_NUMBER_WRAPPER(this))
- throw new $TypeError('Number.prototype.valueOf is not generic');
- return %_ValueOf(this);
-}
-
-
-// ECMA-262 section 15.7.4.5
-function NumberToFixed(fractionDigits) {
- var f = TO_INTEGER(fractionDigits);
- if (f < 0 || f > 20) {
- throw new $RangeError("toFixed() digits argument must be between 0 and 20");
- }
- var x = ToNumber(this);
- return %NumberToFixed(x, f);
-}
-
-
-// ECMA-262 section 15.7.4.6
-function NumberToExponential(fractionDigits) {
- var f = -1;
- if (!IS_UNDEFINED(fractionDigits)) {
- f = TO_INTEGER(fractionDigits);
- if (f < 0 || f > 20) {
- throw new $RangeError("toExponential() argument must be between 0 and 20");
- }
- }
- var x = ToNumber(this);
- return %NumberToExponential(x, f);
-}
-
-
-// ECMA-262 section 15.7.4.7
-function NumberToPrecision(precision) {
- if (IS_UNDEFINED(precision)) return ToString(%_ValueOf(this));
- var p = TO_INTEGER(precision);
- if (p < 1 || p > 21) {
- throw new $RangeError("toPrecision() argument must be between 1 and 21");
- }
- var x = ToNumber(this);
- return %NumberToPrecision(x, p);
-}
-
-
-// ----------------------------------------------------------------------------
-
-function SetupNumber() {
- %OptimizeObjectForAddingMultipleProperties($Number.prototype, 8);
- // Setup the constructor property on the Number prototype object.
- %SetProperty($Number.prototype, "constructor", $Number, DONT_ENUM);
-
- %OptimizeObjectForAddingMultipleProperties($Number, 5);
- // ECMA-262 section 15.7.3.1.
- %SetProperty($Number,
- "MAX_VALUE",
- 1.7976931348623157e+308,
- DONT_ENUM | DONT_DELETE | READ_ONLY);
-
- // ECMA-262 section 15.7.3.2.
- %SetProperty($Number, "MIN_VALUE", 5e-324, DONT_ENUM | DONT_DELETE | READ_ONLY);
-
- // ECMA-262 section 15.7.3.3.
- %SetProperty($Number, "NaN", $NaN, DONT_ENUM | DONT_DELETE | READ_ONLY);
-
- // ECMA-262 section 15.7.3.4.
- %SetProperty($Number,
- "NEGATIVE_INFINITY",
- -1/0,
- DONT_ENUM | DONT_DELETE | READ_ONLY);
-
- // ECMA-262 section 15.7.3.5.
- %SetProperty($Number,
- "POSITIVE_INFINITY",
- 1/0,
- DONT_ENUM | DONT_DELETE | READ_ONLY);
- %ToFastProperties($Number);
-
- // Setup non-enumerable functions on the Number prototype object.
- InstallFunctions($Number.prototype, DONT_ENUM, $Array(
- "toString", NumberToString,
- "toLocaleString", NumberToLocaleString,
- "valueOf", NumberValueOf,
- "toFixed", NumberToFixed,
- "toExponential", NumberToExponential,
- "toPrecision", NumberToPrecision
- ));
-}
-
-SetupNumber();
-
-
-// ----------------------------------------------------------------------------
-// Function
-
-$Function.prototype.constructor = $Function;
-
-function FunctionSourceString(func) {
- if (!IS_FUNCTION(func)) {
- throw new $TypeError('Function.prototype.toString is not generic');
- }
-
- var source = %FunctionGetSourceCode(func);
- if (!IS_STRING(source) || %FunctionIsBuiltin(func)) {
- var name = %FunctionGetName(func);
- if (name) {
- // Mimic what KJS does.
- return 'function ' + name + '() { [native code] }';
- } else {
- return 'function () { [native code] }';
- }
- }
-
- var name = %FunctionGetName(func);
- return 'function ' + name + source;
-}
-
-
-function FunctionToString() {
- return FunctionSourceString(this);
-}
-
-
-// ES5 15.3.4.5
-function FunctionBind(this_arg) { // Length is 1.
- if (!IS_FUNCTION(this)) {
- throw new $TypeError('Bind must be called on a function');
- }
- // this_arg is not an argument that should be bound.
- var argc_bound = (%_ArgumentsLength() || 1) - 1;
- var fn = this;
- if (argc_bound == 0) {
- var result = function() {
- if (%_IsConstructCall()) {
- // %NewObjectFromBound implicitly uses arguments passed to this
- // function. We do not pass the arguments object explicitly to avoid
- // materializing it and guarantee that this function will be optimized.
- return %NewObjectFromBound(fn, null);
- }
-
- return fn.apply(this_arg, arguments);
- };
- } else {
- var bound_args = new InternalArray(argc_bound);
- for(var i = 0; i < argc_bound; i++) {
- bound_args[i] = %_Arguments(i+1);
- }
-
- var result = function() {
- // If this is a construct call we use a special runtime method
- // to generate the actual object using the bound function.
- if (%_IsConstructCall()) {
- // %NewObjectFromBound implicitly uses arguments passed to this
- // function. We do not pass the arguments object explicitly to avoid
- // materializing it and guarantee that this function will be optimized.
- return %NewObjectFromBound(fn, bound_args);
- }
-
- // Combine the args we got from the bind call with the args
- // given as argument to the invocation.
- var argc = %_ArgumentsLength();
- var args = new InternalArray(argc + argc_bound);
- // Add bound arguments.
- for (var i = 0; i < argc_bound; i++) {
- args[i] = bound_args[i];
- }
- // Add arguments from call.
- for (var i = 0; i < argc; i++) {
- args[argc_bound + i] = %_Arguments(i);
- }
- return fn.apply(this_arg, args);
- };
- }
-
- // We already have caller and arguments properties on functions,
- // which are non-configurable. It therefore makes no sence to
- // try to redefine these as defined by the spec. The spec says
- // that bind should make these throw a TypeError if get or set
- // is called and make them non-enumerable and non-configurable.
- // To be consistent with our normal functions we leave this as it is.
-
- // Set the correct length.
- var length = (this.length - argc_bound) > 0 ? this.length - argc_bound : 0;
- %FunctionSetLength(result, length);
-
- return result;
-}
-
-
-function NewFunction(arg1) { // length == 1
- var n = %_ArgumentsLength();
- var p = '';
- if (n > 1) {
- p = new InternalArray(n - 1);
- for (var i = 0; i < n - 1; i++) p[i] = %_Arguments(i);
- p = Join(p, n - 1, ',', NonStringToString);
- // If the formal parameters string include ) - an illegal
- // character - it may make the combined function expression
- // compile. We avoid this problem by checking for this early on.
- if (p.indexOf(')') != -1) throw MakeSyntaxError('unable_to_parse',[]);
- }
- var body = (n > 0) ? ToString(%_Arguments(n - 1)) : '';
- var source = '(function(' + p + ') {\n' + body + '\n})';
-
- // The call to SetNewFunctionAttributes will ensure the prototype
- // property of the resulting function is enumerable (ECMA262, 15.3.5.2).
- var f = %CompileString(source)();
- %FunctionSetName(f, "anonymous");
- return %SetNewFunctionAttributes(f);
-}
-
-%SetCode($Function, NewFunction);
-
-// ----------------------------------------------------------------------------
-
-function SetupFunction() {
- InstallFunctions($Function.prototype, DONT_ENUM, $Array(
- "bind", FunctionBind,
- "toString", FunctionToString
- ));
-}
-
-SetupFunction();
diff --git a/src/3rdparty/v8/src/v8preparserdll-main.cc b/src/3rdparty/v8/src/v8preparserdll-main.cc
deleted file mode 100644
index c0344d3..0000000
--- a/src/3rdparty/v8/src/v8preparserdll-main.cc
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include <windows.h>
-
-#include "../include/v8-preparser.h"
-
-extern "C" {
-BOOL WINAPI DllMain(HANDLE hinstDLL,
- DWORD dwReason,
- LPVOID lpvReserved) {
- // Do nothing.
- return TRUE;
-}
-}
diff --git a/src/3rdparty/v8/src/v8threads.cc b/src/3rdparty/v8/src/v8threads.cc
deleted file mode 100644
index cecafaa..0000000
--- a/src/3rdparty/v8/src/v8threads.cc
+++ /dev/null
@@ -1,453 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "api.h"
-#include "bootstrapper.h"
-#include "debug.h"
-#include "execution.h"
-#include "v8threads.h"
-#include "regexp-stack.h"
-
-namespace v8 {
-
-
-// Track whether this V8 instance has ever called v8::Locker. This allows the
-// API code to verify that the lock is always held when V8 is being entered.
-bool Locker::active_ = false;
-
-
-// Constructor for the Locker object. Once the Locker is constructed the
-// current thread will be guaranteed to have the big V8 lock.
-Locker::Locker() : has_lock_(false), top_level_(true) {
- // TODO(isolates): When Locker has Isolate parameter and it is provided, grab
- // that one instead of using the current one.
- // We pull default isolate for Locker constructor w/o p[arameter.
- // A thread should not enter an isolate before acquiring a lock,
- // in cases which mandate using Lockers.
- // So getting a lock is the first thing threads do in a scenario where
- // multple threads share an isolate. Hence, we need to access
- // 'locking isolate' before we can actually enter into default isolate.
- internal::Isolate* isolate = internal::Isolate::GetDefaultIsolateForLocking();
- ASSERT(isolate != NULL);
-
- // Record that the Locker has been used at least once.
- active_ = true;
- // Get the big lock if necessary.
- if (!isolate->thread_manager()->IsLockedByCurrentThread()) {
- isolate->thread_manager()->Lock();
- has_lock_ = true;
-
- if (isolate->IsDefaultIsolate()) {
- // This only enters if not yet entered.
- internal::Isolate::EnterDefaultIsolate();
- }
-
- ASSERT(internal::Thread::HasThreadLocal(
- internal::Isolate::thread_id_key()));
-
- // Make sure that V8 is initialized. Archiving of threads interferes
- // with deserialization by adding additional root pointers, so we must
- // initialize here, before anyone can call ~Locker() or Unlocker().
- if (!isolate->IsInitialized()) {
- V8::Initialize();
- }
- // This may be a locker within an unlocker in which case we have to
- // get the saved state for this thread and restore it.
- if (isolate->thread_manager()->RestoreThread()) {
- top_level_ = false;
- } else {
- internal::ExecutionAccess access(isolate);
- isolate->stack_guard()->ClearThread(access);
- isolate->stack_guard()->InitThread(access);
- }
- }
- ASSERT(isolate->thread_manager()->IsLockedByCurrentThread());
-}
-
-
-bool Locker::IsLocked() {
- return internal::Isolate::Current()->thread_manager()->
- IsLockedByCurrentThread();
-}
-
-
-Locker::~Locker() {
- // TODO(isolate): this should use a field storing the isolate it
- // locked instead.
- internal::Isolate* isolate = internal::Isolate::Current();
- ASSERT(isolate->thread_manager()->IsLockedByCurrentThread());
- if (has_lock_) {
- if (top_level_) {
- isolate->thread_manager()->FreeThreadResources();
- } else {
- isolate->thread_manager()->ArchiveThread();
- }
- isolate->thread_manager()->Unlock();
- }
-}
-
-
-Unlocker::Unlocker() {
- internal::Isolate* isolate = internal::Isolate::Current();
- ASSERT(isolate->thread_manager()->IsLockedByCurrentThread());
- isolate->thread_manager()->ArchiveThread();
- isolate->thread_manager()->Unlock();
-}
-
-
-Unlocker::~Unlocker() {
- // TODO(isolates): check it's the isolate we unlocked.
- internal::Isolate* isolate = internal::Isolate::Current();
- ASSERT(!isolate->thread_manager()->IsLockedByCurrentThread());
- isolate->thread_manager()->Lock();
- isolate->thread_manager()->RestoreThread();
-}
-
-
-void Locker::StartPreemption(int every_n_ms) {
- v8::internal::ContextSwitcher::StartPreemption(every_n_ms);
-}
-
-
-void Locker::StopPreemption() {
- v8::internal::ContextSwitcher::StopPreemption();
-}
-
-
-namespace internal {
-
-
-bool ThreadManager::RestoreThread() {
- // First check whether the current thread has been 'lazily archived', ie
- // not archived at all. If that is the case we put the state storage we
- // had prepared back in the free list, since we didn't need it after all.
- if (lazily_archived_thread_.IsSelf()) {
- lazily_archived_thread_.Initialize(ThreadHandle::INVALID);
- ASSERT(Isolate::CurrentPerIsolateThreadData()->thread_state() ==
- lazily_archived_thread_state_);
- lazily_archived_thread_state_->set_id(kInvalidId);
- lazily_archived_thread_state_->LinkInto(ThreadState::FREE_LIST);
- lazily_archived_thread_state_ = NULL;
- Isolate::CurrentPerIsolateThreadData()->set_thread_state(NULL);
- return true;
- }
-
- // Make sure that the preemption thread cannot modify the thread state while
- // it is being archived or restored.
- ExecutionAccess access(isolate_);
-
- // If there is another thread that was lazily archived then we have to really
- // archive it now.
- if (lazily_archived_thread_.IsValid()) {
- EagerlyArchiveThread();
- }
- Isolate::PerIsolateThreadData* per_thread =
- Isolate::CurrentPerIsolateThreadData();
- if (per_thread == NULL || per_thread->thread_state() == NULL) {
- // This is a new thread.
- isolate_->stack_guard()->InitThread(access);
- return false;
- }
- ThreadState* state = per_thread->thread_state();
- char* from = state->data();
- from = isolate_->handle_scope_implementer()->RestoreThread(from);
- from = isolate_->RestoreThread(from);
- from = Relocatable::RestoreState(from);
-#ifdef ENABLE_DEBUGGER_SUPPORT
- from = isolate_->debug()->RestoreDebug(from);
-#endif
- from = isolate_->stack_guard()->RestoreStackGuard(from);
- from = isolate_->regexp_stack()->RestoreStack(from);
- from = isolate_->bootstrapper()->RestoreState(from);
- per_thread->set_thread_state(NULL);
- if (state->terminate_on_restore()) {
- isolate_->stack_guard()->TerminateExecution();
- state->set_terminate_on_restore(false);
- }
- state->set_id(kInvalidId);
- state->Unlink();
- state->LinkInto(ThreadState::FREE_LIST);
- return true;
-}
-
-
-void ThreadManager::Lock() {
- mutex_->Lock();
- mutex_owner_.Initialize(ThreadHandle::SELF);
- ASSERT(IsLockedByCurrentThread());
-}
-
-
-void ThreadManager::Unlock() {
- mutex_owner_.Initialize(ThreadHandle::INVALID);
- mutex_->Unlock();
-}
-
-
-static int ArchiveSpacePerThread() {
- return HandleScopeImplementer::ArchiveSpacePerThread() +
- Isolate::ArchiveSpacePerThread() +
-#ifdef ENABLE_DEBUGGER_SUPPORT
- Debug::ArchiveSpacePerThread() +
-#endif
- StackGuard::ArchiveSpacePerThread() +
- RegExpStack::ArchiveSpacePerThread() +
- Bootstrapper::ArchiveSpacePerThread() +
- Relocatable::ArchiveSpacePerThread();
-}
-
-
-ThreadState::ThreadState(ThreadManager* thread_manager)
- : id_(ThreadManager::kInvalidId),
- terminate_on_restore_(false),
- next_(this),
- previous_(this),
- thread_manager_(thread_manager) {
-}
-
-
-void ThreadState::AllocateSpace() {
- data_ = NewArray<char>(ArchiveSpacePerThread());
-}
-
-
-void ThreadState::Unlink() {
- next_->previous_ = previous_;
- previous_->next_ = next_;
-}
-
-
-void ThreadState::LinkInto(List list) {
- ThreadState* flying_anchor =
- list == FREE_LIST ? thread_manager_->free_anchor_
- : thread_manager_->in_use_anchor_;
- next_ = flying_anchor->next_;
- previous_ = flying_anchor;
- flying_anchor->next_ = this;
- next_->previous_ = this;
-}
-
-
-ThreadState* ThreadManager::GetFreeThreadState() {
- ThreadState* gotten = free_anchor_->next_;
- if (gotten == free_anchor_) {
- ThreadState* new_thread_state = new ThreadState(this);
- new_thread_state->AllocateSpace();
- return new_thread_state;
- }
- return gotten;
-}
-
-
-// Gets the first in the list of archived threads.
-ThreadState* ThreadManager::FirstThreadStateInUse() {
- return in_use_anchor_->Next();
-}
-
-
-ThreadState* ThreadState::Next() {
- if (next_ == thread_manager_->in_use_anchor_) return NULL;
- return next_;
-}
-
-
-// Thread ids must start with 1, because in TLS having thread id 0 can't
-// be distinguished from not having a thread id at all (since NULL is
-// defined as 0.)
-ThreadManager::ThreadManager()
- : mutex_(OS::CreateMutex()),
- mutex_owner_(ThreadHandle::INVALID),
- lazily_archived_thread_(ThreadHandle::INVALID),
- lazily_archived_thread_state_(NULL),
- free_anchor_(NULL),
- in_use_anchor_(NULL) {
- free_anchor_ = new ThreadState(this);
- in_use_anchor_ = new ThreadState(this);
-}
-
-
-ThreadManager::~ThreadManager() {
- // TODO(isolates): Destroy mutexes.
-}
-
-
-void ThreadManager::ArchiveThread() {
- ASSERT(!lazily_archived_thread_.IsValid());
- ASSERT(!IsArchived());
- ThreadState* state = GetFreeThreadState();
- state->Unlink();
- Isolate::CurrentPerIsolateThreadData()->set_thread_state(state);
- lazily_archived_thread_.Initialize(ThreadHandle::SELF);
- lazily_archived_thread_state_ = state;
- ASSERT(state->id() == kInvalidId);
- state->set_id(CurrentId());
- ASSERT(state->id() != kInvalidId);
-}
-
-
-void ThreadManager::EagerlyArchiveThread() {
- ThreadState* state = lazily_archived_thread_state_;
- state->LinkInto(ThreadState::IN_USE_LIST);
- char* to = state->data();
- // Ensure that data containing GC roots are archived first, and handle them
- // in ThreadManager::Iterate(ObjectVisitor*).
- to = isolate_->handle_scope_implementer()->ArchiveThread(to);
- to = isolate_->ArchiveThread(to);
- to = Relocatable::ArchiveState(to);
-#ifdef ENABLE_DEBUGGER_SUPPORT
- to = isolate_->debug()->ArchiveDebug(to);
-#endif
- to = isolate_->stack_guard()->ArchiveStackGuard(to);
- to = isolate_->regexp_stack()->ArchiveStack(to);
- to = isolate_->bootstrapper()->ArchiveState(to);
- lazily_archived_thread_.Initialize(ThreadHandle::INVALID);
- lazily_archived_thread_state_ = NULL;
-}
-
-
-void ThreadManager::FreeThreadResources() {
- isolate_->handle_scope_implementer()->FreeThreadResources();
- isolate_->FreeThreadResources();
-#ifdef ENABLE_DEBUGGER_SUPPORT
- isolate_->debug()->FreeThreadResources();
-#endif
- isolate_->stack_guard()->FreeThreadResources();
- isolate_->regexp_stack()->FreeThreadResources();
- isolate_->bootstrapper()->FreeThreadResources();
-}
-
-
-bool ThreadManager::IsArchived() {
- Isolate::PerIsolateThreadData* data = Isolate::CurrentPerIsolateThreadData();
- return data != NULL && data->thread_state() != NULL;
-}
-
-
-void ThreadManager::Iterate(ObjectVisitor* v) {
- // Expecting no threads during serialization/deserialization
- for (ThreadState* state = FirstThreadStateInUse();
- state != NULL;
- state = state->Next()) {
- char* data = state->data();
- data = HandleScopeImplementer::Iterate(v, data);
- data = isolate_->Iterate(v, data);
- data = Relocatable::Iterate(v, data);
- }
-}
-
-
-void ThreadManager::IterateArchivedThreads(ThreadVisitor* v) {
- for (ThreadState* state = FirstThreadStateInUse();
- state != NULL;
- state = state->Next()) {
- char* data = state->data();
- data += HandleScopeImplementer::ArchiveSpacePerThread();
- isolate_->IterateThread(v, data);
- }
-}
-
-
-int ThreadManager::CurrentId() {
- return Thread::GetThreadLocalInt(Isolate::thread_id_key());
-}
-
-
-void ThreadManager::TerminateExecution(int thread_id) {
- for (ThreadState* state = FirstThreadStateInUse();
- state != NULL;
- state = state->Next()) {
- if (thread_id == state->id()) {
- state->set_terminate_on_restore(true);
- }
- }
-}
-
-
-ContextSwitcher::ContextSwitcher(Isolate* isolate, int every_n_ms)
- : Thread(isolate, "v8:CtxtSwitcher"),
- keep_going_(true),
- sleep_ms_(every_n_ms) {
-}
-
-
-// Set the scheduling interval of V8 threads. This function starts the
-// ContextSwitcher thread if needed.
-void ContextSwitcher::StartPreemption(int every_n_ms) {
- Isolate* isolate = Isolate::Current();
- ASSERT(Locker::IsLocked());
- if (isolate->context_switcher() == NULL) {
- // If the ContextSwitcher thread is not running at the moment start it now.
- isolate->set_context_switcher(new ContextSwitcher(isolate, every_n_ms));
- isolate->context_switcher()->Start();
- } else {
- // ContextSwitcher thread is already running, so we just change the
- // scheduling interval.
- isolate->context_switcher()->sleep_ms_ = every_n_ms;
- }
-}
-
-
-// Disable preemption of V8 threads. If multiple threads want to use V8 they
-// must cooperatively schedule amongst them from this point on.
-void ContextSwitcher::StopPreemption() {
- Isolate* isolate = Isolate::Current();
- ASSERT(Locker::IsLocked());
- if (isolate->context_switcher() != NULL) {
- // The ContextSwitcher thread is running. We need to stop it and release
- // its resources.
- isolate->context_switcher()->keep_going_ = false;
- // Wait for the ContextSwitcher thread to exit.
- isolate->context_switcher()->Join();
- // Thread has exited, now we can delete it.
- delete(isolate->context_switcher());
- isolate->set_context_switcher(NULL);
- }
-}
-
-
-// Main loop of the ContextSwitcher thread: Preempt the currently running V8
-// thread at regular intervals.
-void ContextSwitcher::Run() {
- while (keep_going_) {
- OS::Sleep(sleep_ms_);
- isolate()->stack_guard()->Preempt();
- }
-}
-
-
-// Acknowledge the preemption by the receiving thread.
-void ContextSwitcher::PreemptionReceived() {
- ASSERT(Locker::IsLocked());
- // There is currently no accounting being done for this. But could be in the
- // future, which is why we leave this in.
-}
-
-
-} // namespace internal
-} // namespace v8
diff --git a/src/3rdparty/v8/src/v8threads.h b/src/3rdparty/v8/src/v8threads.h
deleted file mode 100644
index 1266af7..0000000
--- a/src/3rdparty/v8/src/v8threads.h
+++ /dev/null
@@ -1,164 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_V8THREADS_H_
-#define V8_V8THREADS_H_
-
-namespace v8 {
-namespace internal {
-
-
-class ThreadState {
- public:
- // Returns NULL after the last one.
- ThreadState* Next();
-
- enum List {FREE_LIST, IN_USE_LIST};
-
- void LinkInto(List list);
- void Unlink();
-
- // Id of thread.
- void set_id(int id) { id_ = id; }
- int id() { return id_; }
-
- // Should the thread be terminated when it is restored?
- bool terminate_on_restore() { return terminate_on_restore_; }
- void set_terminate_on_restore(bool terminate_on_restore) {
- terminate_on_restore_ = terminate_on_restore;
- }
-
- // Get data area for archiving a thread.
- char* data() { return data_; }
- private:
- explicit ThreadState(ThreadManager* thread_manager);
-
- void AllocateSpace();
-
- int id_;
- bool terminate_on_restore_;
- char* data_;
- ThreadState* next_;
- ThreadState* previous_;
-
- ThreadManager* thread_manager_;
-
- friend class ThreadManager;
-};
-
-
-// Defined in top.h
-class ThreadLocalTop;
-
-
-class ThreadVisitor {
- public:
- // ThreadLocalTop may be only available during this call.
- virtual void VisitThread(Isolate* isolate, ThreadLocalTop* top) = 0;
-
- protected:
- virtual ~ThreadVisitor() {}
-};
-
-
-class ThreadManager {
- public:
- void Lock();
- void Unlock();
-
- void ArchiveThread();
- bool RestoreThread();
- void FreeThreadResources();
- bool IsArchived();
-
- void Iterate(ObjectVisitor* v);
- void IterateArchivedThreads(ThreadVisitor* v);
- bool IsLockedByCurrentThread() { return mutex_owner_.IsSelf(); }
-
- int CurrentId();
-
- void TerminateExecution(int thread_id);
-
- // Iterate over in-use states.
- ThreadState* FirstThreadStateInUse();
- ThreadState* GetFreeThreadState();
-
- static const int kInvalidId = -1;
- private:
- ThreadManager();
- ~ThreadManager();
-
- void EagerlyArchiveThread();
-
- Mutex* mutex_;
- ThreadHandle mutex_owner_;
- ThreadHandle lazily_archived_thread_;
- ThreadState* lazily_archived_thread_state_;
-
- // In the following two lists there is always at least one object on the list.
- // The first object is a flying anchor that is only there to simplify linking
- // and unlinking.
- // Head of linked list of free states.
- ThreadState* free_anchor_;
- // Head of linked list of states in use.
- ThreadState* in_use_anchor_;
-
- Isolate* isolate_;
-
- friend class Isolate;
- friend class ThreadState;
-};
-
-
-// The ContextSwitcher thread is used to schedule regular preemptions to
-// multiple running V8 threads. Generally it is necessary to call
-// StartPreemption if there is more than one thread running. If not, a single
-// JavaScript can take full control of V8 and not allow other threads to run.
-class ContextSwitcher: public Thread {
- public:
- // Set the preemption interval for the ContextSwitcher thread.
- static void StartPreemption(int every_n_ms);
-
- // Stop sending preemption requests to threads.
- static void StopPreemption();
-
- // Preempted thread needs to call back to the ContextSwitcher to acknowledge
- // the handling of a preemption request.
- static void PreemptionReceived();
-
- private:
- explicit ContextSwitcher(Isolate* isolate, int every_n_ms);
-
- void Run();
-
- bool keep_going_;
- int sleep_ms_;
-};
-
-} } // namespace v8::internal
-
-#endif // V8_V8THREADS_H_
diff --git a/src/3rdparty/v8/src/v8utils.h b/src/3rdparty/v8/src/v8utils.h
deleted file mode 100644
index 87c5e7f..0000000
--- a/src/3rdparty/v8/src/v8utils.h
+++ /dev/null
@@ -1,317 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_V8UTILS_H_
-#define V8_V8UTILS_H_
-
-#include "utils.h"
-#include "platform.h" // For va_list on Solaris.
-
-namespace v8 {
-namespace internal {
-
-// ----------------------------------------------------------------------------
-// I/O support.
-
-#if __GNUC__ >= 4
-// On gcc we can ask the compiler to check the types of %d-style format
-// specifiers and their associated arguments. TODO(erikcorry) fix this
-// so it works on MacOSX.
-#if defined(__MACH__) && defined(__APPLE__)
-#define PRINTF_CHECKING
-#define FPRINTF_CHECKING
-#else // MacOsX.
-#define PRINTF_CHECKING __attribute__ ((format (printf, 1, 2)))
-#define FPRINTF_CHECKING __attribute__ ((format (printf, 2, 3)))
-#endif
-#else
-#define PRINTF_CHECKING
-#define FPRINTF_CHECKING
-#endif
-
-// Our version of printf().
-void PRINTF_CHECKING PrintF(const char* format, ...);
-void FPRINTF_CHECKING PrintF(FILE* out, const char* format, ...);
-
-// Our version of fflush.
-void Flush(FILE* out);
-
-inline void Flush() {
- Flush(stdout);
-}
-
-
-// Read a line of characters after printing the prompt to stdout. The resulting
-// char* needs to be disposed off with DeleteArray by the caller.
-char* ReadLine(const char* prompt);
-
-
-// Read and return the raw bytes in a file. the size of the buffer is returned
-// in size.
-// The returned buffer must be freed by the caller.
-byte* ReadBytes(const char* filename, int* size, bool verbose = true);
-
-
-// Append size chars from str to the file given by filename.
-// The file is overwritten. Returns the number of chars written.
-int AppendChars(const char* filename,
- const char* str,
- int size,
- bool verbose = true);
-
-
-// Write size chars from str to the file given by filename.
-// The file is overwritten. Returns the number of chars written.
-int WriteChars(const char* filename,
- const char* str,
- int size,
- bool verbose = true);
-
-
-// Write size bytes to the file given by filename.
-// The file is overwritten. Returns the number of bytes written.
-int WriteBytes(const char* filename,
- const byte* bytes,
- int size,
- bool verbose = true);
-
-
-// Write the C code
-// const char* <varname> = "<str>";
-// const int <varname>_len = <len>;
-// to the file given by filename. Only the first len chars are written.
-int WriteAsCFile(const char* filename, const char* varname,
- const char* str, int size, bool verbose = true);
-
-
-// Data structures
-
-template <typename T>
-inline Vector< Handle<Object> > HandleVector(v8::internal::Handle<T>* elms,
- int length) {
- return Vector< Handle<Object> >(
- reinterpret_cast<v8::internal::Handle<Object>*>(elms), length);
-}
-
-// Memory
-
-// Copies data from |src| to |dst|. The data spans MUST not overlap.
-inline void CopyWords(Object** dst, Object** src, int num_words) {
- ASSERT(Min(dst, src) + num_words <= Max(dst, src));
- ASSERT(num_words > 0);
-
- // Use block copying memcpy if the segment we're copying is
- // enough to justify the extra call/setup overhead.
- static const int kBlockCopyLimit = 16;
-
- if (num_words >= kBlockCopyLimit) {
- memcpy(dst, src, num_words * kPointerSize);
- } else {
- int remaining = num_words;
- do {
- remaining--;
- *dst++ = *src++;
- } while (remaining > 0);
- }
-}
-
-
-template <typename T>
-static inline void MemsetPointer(T** dest, T* value, int counter) {
-#if defined(V8_HOST_ARCH_IA32)
-#define STOS "stosl"
-#elif defined(V8_HOST_ARCH_X64)
-#define STOS "stosq"
-#endif
-
-#if defined(__GNUC__) && defined(STOS)
- asm volatile(
- "cld;"
- "rep ; " STOS
- : "+&c" (counter), "+&D" (dest)
- : "a" (value)
- : "memory", "cc");
-#else
- for (int i = 0; i < counter; i++) {
- dest[i] = value;
- }
-#endif
-
-#undef STOS
-}
-
-
-// Simple wrapper that allows an ExternalString to refer to a
-// Vector<const char>. Doesn't assume ownership of the data.
-class AsciiStringAdapter: public v8::String::ExternalAsciiStringResource {
- public:
- explicit AsciiStringAdapter(Vector<const char> data) : data_(data) {}
-
- virtual const char* data() const { return data_.start(); }
-
- virtual size_t length() const { return data_.length(); }
-
- private:
- Vector<const char> data_;
-};
-
-
-// Simple support to read a file into a 0-terminated C-string.
-// The returned buffer must be freed by the caller.
-// On return, *exits tells whether the file existed.
-Vector<const char> ReadFile(const char* filename,
- bool* exists,
- bool verbose = true);
-
-
-// Helper class for building result strings in a character buffer. The
-// purpose of the class is to use safe operations that checks the
-// buffer bounds on all operations in debug mode.
-class StringBuilder {
- public:
- // Create a string builder with a buffer of the given size. The
- // buffer is allocated through NewArray<char> and must be
- // deallocated by the caller of Finalize().
- explicit StringBuilder(int size);
-
- StringBuilder(char* buffer, int size)
- : buffer_(buffer, size), position_(0) { }
-
- ~StringBuilder() { if (!is_finalized()) Finalize(); }
-
- int size() const { return buffer_.length(); }
-
- // Get the current position in the builder.
- int position() const {
- ASSERT(!is_finalized());
- return position_;
- }
-
- // Reset the position.
- void Reset() { position_ = 0; }
-
- // Add a single character to the builder. It is not allowed to add
- // 0-characters; use the Finalize() method to terminate the string
- // instead.
- void AddCharacter(char c) {
- ASSERT(c != '\0');
- ASSERT(!is_finalized() && position_ < buffer_.length());
- buffer_[position_++] = c;
- }
-
- // Add an entire string to the builder. Uses strlen() internally to
- // compute the length of the input string.
- void AddString(const char* s);
-
- // Add the first 'n' characters of the given string 's' to the
- // builder. The input string must have enough characters.
- void AddSubstring(const char* s, int n);
-
- // Add formatted contents to the builder just like printf().
- void AddFormatted(const char* format, ...);
-
- // Add formatted contents like printf based on a va_list.
- void AddFormattedList(const char* format, va_list list);
-
- // Add character padding to the builder. If count is non-positive,
- // nothing is added to the builder.
- void AddPadding(char c, int count);
-
- // Finalize the string by 0-terminating it and returning the buffer.
- char* Finalize();
-
- private:
- Vector<char> buffer_;
- int position_;
-
- bool is_finalized() const { return position_ < 0; }
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(StringBuilder);
-};
-
-
-// Copy from ASCII/16bit chars to ASCII/16bit chars.
-template <typename sourcechar, typename sinkchar>
-static inline void CopyChars(sinkchar* dest, const sourcechar* src, int chars) {
- sinkchar* limit = dest + chars;
-#ifdef V8_HOST_CAN_READ_UNALIGNED
- if (sizeof(*dest) == sizeof(*src)) {
- if (chars >= static_cast<int>(OS::kMinComplexMemCopy / sizeof(*dest))) {
- OS::MemCopy(dest, src, chars * sizeof(*dest));
- return;
- }
- // Number of characters in a uintptr_t.
- static const int kStepSize = sizeof(uintptr_t) / sizeof(*dest); // NOLINT
- while (dest <= limit - kStepSize) {
- *reinterpret_cast<uintptr_t*>(dest) =
- *reinterpret_cast<const uintptr_t*>(src);
- dest += kStepSize;
- src += kStepSize;
- }
- }
-#endif
- while (dest < limit) {
- *dest++ = static_cast<sinkchar>(*src++);
- }
-}
-
-
-// A resource for using mmapped files to back external strings that are read
-// from files.
-class MemoryMappedExternalResource: public
- v8::String::ExternalAsciiStringResource {
- public:
- explicit MemoryMappedExternalResource(const char* filename);
- MemoryMappedExternalResource(const char* filename,
- bool remove_file_on_cleanup);
- virtual ~MemoryMappedExternalResource();
-
- virtual const char* data() const { return data_; }
- virtual size_t length() const { return length_; }
-
- bool exists() const { return file_ != NULL; }
- bool is_empty() const { return length_ == 0; }
-
- bool EnsureIsAscii(bool abort_if_failed) const;
- bool EnsureIsAscii() const { return EnsureIsAscii(true); }
- bool IsAscii() const { return EnsureIsAscii(false); }
-
- private:
- void Init(const char* filename);
-
- char* filename_;
- OS::MemoryMappedFile* file_;
-
- const char* data_;
- size_t length_;
- bool remove_file_on_cleanup_;
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_V8UTILS_H_
diff --git a/src/3rdparty/v8/src/variables.cc b/src/3rdparty/v8/src/variables.cc
deleted file mode 100644
index fa7ce1b..0000000
--- a/src/3rdparty/v8/src/variables.cc
+++ /dev/null
@@ -1,132 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "ast.h"
-#include "scopes.h"
-#include "variables.h"
-
-namespace v8 {
-namespace internal {
-
-// ----------------------------------------------------------------------------
-// Implementation StaticType.
-
-
-const char* StaticType::Type2String(StaticType* type) {
- switch (type->kind_) {
- case UNKNOWN:
- return "UNKNOWN";
- case LIKELY_SMI:
- return "LIKELY_SMI";
- default:
- UNREACHABLE();
- }
- return "UNREACHABLE";
-}
-
-
-// ----------------------------------------------------------------------------
-// Implementation Variable.
-
-
-const char* Variable::Mode2String(Mode mode) {
- switch (mode) {
- case VAR: return "VAR";
- case CONST: return "CONST";
- case DYNAMIC: return "DYNAMIC";
- case DYNAMIC_GLOBAL: return "DYNAMIC_GLOBAL";
- case DYNAMIC_LOCAL: return "DYNAMIC_LOCAL";
- case INTERNAL: return "INTERNAL";
- case TEMPORARY: return "TEMPORARY";
- }
- UNREACHABLE();
- return NULL;
-}
-
-
-Property* Variable::AsProperty() const {
- return rewrite_ == NULL ? NULL : rewrite_->AsProperty();
-}
-
-
-Slot* Variable::AsSlot() const {
- return rewrite_ == NULL ? NULL : rewrite_->AsSlot();
-}
-
-
-bool Variable::IsStackAllocated() const {
- Slot* slot = AsSlot();
- return slot != NULL && slot->IsStackAllocated();
-}
-
-
-bool Variable::IsParameter() const {
- Slot* s = AsSlot();
- return s != NULL && s->type() == Slot::PARAMETER;
-}
-
-
-bool Variable::IsStackLocal() const {
- Slot* s = AsSlot();
- return s != NULL && s->type() == Slot::LOCAL;
-}
-
-
-bool Variable::IsContextSlot() const {
- Slot* s = AsSlot();
- return s != NULL && s->type() == Slot::CONTEXT;
-}
-
-
-Variable::Variable(Scope* scope,
- Handle<String> name,
- Mode mode,
- bool is_valid_LHS,
- Kind kind)
- : scope_(scope),
- name_(name),
- mode_(mode),
- kind_(kind),
- local_if_not_shadowed_(NULL),
- rewrite_(NULL),
- is_valid_LHS_(is_valid_LHS),
- is_accessed_from_inner_scope_(false),
- is_used_(false) {
- // names must be canonicalized for fast equality checks
- ASSERT(name->IsSymbol());
-}
-
-
-bool Variable::is_global() const {
- // Temporaries are never global, they must always be allocated in the
- // activation frame.
- return mode_ != TEMPORARY && scope_ != NULL && scope_->is_global_scope();
-}
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/variables.h b/src/3rdparty/v8/src/variables.h
deleted file mode 100644
index 67e1a18..0000000
--- a/src/3rdparty/v8/src/variables.h
+++ /dev/null
@@ -1,212 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_VARIABLES_H_
-#define V8_VARIABLES_H_
-
-#include "zone.h"
-
-namespace v8 {
-namespace internal {
-
-// Variables and AST expression nodes can track their "type" to enable
-// optimizations and removal of redundant checks when generating code.
-
-class StaticType {
- public:
- enum Kind {
- UNKNOWN,
- LIKELY_SMI
- };
-
- StaticType() : kind_(UNKNOWN) {}
-
- bool Is(Kind kind) const { return kind_ == kind; }
-
- bool IsKnown() const { return !Is(UNKNOWN); }
- bool IsUnknown() const { return Is(UNKNOWN); }
- bool IsLikelySmi() const { return Is(LIKELY_SMI); }
-
- void CopyFrom(StaticType* other) {
- kind_ = other->kind_;
- }
-
- static const char* Type2String(StaticType* type);
-
- // LIKELY_SMI accessors
- void SetAsLikelySmi() {
- kind_ = LIKELY_SMI;
- }
-
- void SetAsLikelySmiIfUnknown() {
- if (IsUnknown()) {
- SetAsLikelySmi();
- }
- }
-
- private:
- Kind kind_;
-};
-
-
-// The AST refers to variables via VariableProxies - placeholders for the actual
-// variables. Variables themselves are never directly referred to from the AST,
-// they are maintained by scopes, and referred to from VariableProxies and Slots
-// after binding and variable allocation.
-
-class Variable: public ZoneObject {
- public:
- enum Mode {
- // User declared variables:
- VAR, // declared via 'var', and 'function' declarations
-
- CONST, // declared via 'const' declarations
-
- // Variables introduced by the compiler:
- DYNAMIC, // always require dynamic lookup (we don't know
- // the declaration)
-
- DYNAMIC_GLOBAL, // requires dynamic lookup, but we know that the
- // variable is global unless it has been shadowed
- // by an eval-introduced variable
-
- DYNAMIC_LOCAL, // requires dynamic lookup, but we know that the
- // variable is local and where it is unless it
- // has been shadowed by an eval-introduced
- // variable
-
- INTERNAL, // like VAR, but not user-visible (may or may not
- // be in a context)
-
- TEMPORARY // temporary variables (not user-visible), never
- // in a context
- };
-
- enum Kind {
- NORMAL,
- THIS,
- ARGUMENTS
- };
-
- Variable(Scope* scope,
- Handle<String> name,
- Mode mode,
- bool is_valid_lhs,
- Kind kind);
-
- // Printing support
- static const char* Mode2String(Mode mode);
-
- // Type testing & conversion
- Property* AsProperty() const;
- Slot* AsSlot() const;
-
- bool IsValidLeftHandSide() { return is_valid_LHS_; }
-
- // The source code for an eval() call may refer to a variable that is
- // in an outer scope about which we don't know anything (it may not
- // be the global scope). scope() is NULL in that case. Currently the
- // scope is only used to follow the context chain length.
- Scope* scope() const { return scope_; }
-
- Handle<String> name() const { return name_; }
- Mode mode() const { return mode_; }
- bool is_accessed_from_inner_scope() const {
- return is_accessed_from_inner_scope_;
- }
- void MarkAsAccessedFromInnerScope() {
- is_accessed_from_inner_scope_ = true;
- }
- bool is_used() { return is_used_; }
- void set_is_used(bool flag) { is_used_ = flag; }
-
- bool IsVariable(Handle<String> n) const {
- return !is_this() && name().is_identical_to(n);
- }
-
- bool IsStackAllocated() const;
- bool IsParameter() const; // Includes 'this'.
- bool IsStackLocal() const;
- bool IsContextSlot() const;
-
- bool is_dynamic() const {
- return (mode_ == DYNAMIC ||
- mode_ == DYNAMIC_GLOBAL ||
- mode_ == DYNAMIC_LOCAL);
- }
-
- bool is_global() const;
- bool is_this() const { return kind_ == THIS; }
- bool is_arguments() const { return kind_ == ARGUMENTS; }
-
- // True if the variable is named eval and not known to be shadowed.
- bool is_possibly_eval() const {
- return IsVariable(FACTORY->eval_symbol()) &&
- (mode_ == DYNAMIC || mode_ == DYNAMIC_GLOBAL);
- }
-
- Variable* local_if_not_shadowed() const {
- ASSERT(mode_ == DYNAMIC_LOCAL && local_if_not_shadowed_ != NULL);
- return local_if_not_shadowed_;
- }
-
- void set_local_if_not_shadowed(Variable* local) {
- local_if_not_shadowed_ = local;
- }
-
- Expression* rewrite() const { return rewrite_; }
- void set_rewrite(Expression* expr) { rewrite_ = expr; }
-
- StaticType* type() { return &type_; }
-
- private:
- Scope* scope_;
- Handle<String> name_;
- Mode mode_;
- Kind kind_;
-
- Variable* local_if_not_shadowed_;
-
- // Static type information
- StaticType type_;
-
- // Code generation.
- // rewrite_ is usually a Slot or a Property, but may be any expression.
- Expression* rewrite_;
-
- // Valid as a LHS? (const and this are not valid LHS, for example)
- bool is_valid_LHS_;
-
- // Usage info.
- bool is_accessed_from_inner_scope_; // set by variable resolver
- bool is_used_;
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_VARIABLES_H_
diff --git a/src/3rdparty/v8/src/version.cc b/src/3rdparty/v8/src/version.cc
deleted file mode 100644
index 52e758d..0000000
--- a/src/3rdparty/v8/src/version.cc
+++ /dev/null
@@ -1,116 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "version.h"
-
-// These macros define the version number for the current version.
-// NOTE these macros are used by the SCons build script so their names
-// cannot be changed without changing the SCons build script.
-#define MAJOR_VERSION 3
-#define MINOR_VERSION 2
-#define BUILD_NUMBER 8
-#define PATCH_LEVEL 0
-// Use 1 for candidates and 0 otherwise.
-// (Boolean macro values are not supported by all preprocessors.)
-#define IS_CANDIDATE_VERSION 1
-
-// Define SONAME to have the SCons build the put a specific SONAME into the
-// shared library instead the generic SONAME generated from the V8 version
-// number. This define is mainly used by the SCons build script.
-#define SONAME ""
-
-#if IS_CANDIDATE_VERSION
-#define CANDIDATE_STRING " (candidate)"
-#else
-#define CANDIDATE_STRING ""
-#endif
-
-#define SX(x) #x
-#define S(x) SX(x)
-
-#if PATCH_LEVEL > 0
-#define VERSION_STRING \
- S(MAJOR_VERSION) "." S(MINOR_VERSION) "." S(BUILD_NUMBER) "." \
- S(PATCH_LEVEL) CANDIDATE_STRING
-#else
-#define VERSION_STRING \
- S(MAJOR_VERSION) "." S(MINOR_VERSION) "." S(BUILD_NUMBER) \
- CANDIDATE_STRING
-#endif
-
-namespace v8 {
-namespace internal {
-
-int Version::major_ = MAJOR_VERSION;
-int Version::minor_ = MINOR_VERSION;
-int Version::build_ = BUILD_NUMBER;
-int Version::patch_ = PATCH_LEVEL;
-bool Version::candidate_ = (IS_CANDIDATE_VERSION != 0);
-const char* Version::soname_ = SONAME;
-const char* Version::version_string_ = VERSION_STRING;
-
-// Calculate the V8 version string.
-void Version::GetString(Vector<char> str) {
- const char* candidate = IsCandidate() ? " (candidate)" : "";
-#ifdef USE_SIMULATOR
- const char* is_simulator = " SIMULATOR";
-#else
- const char* is_simulator = "";
-#endif // USE_SIMULATOR
- if (GetPatch() > 0) {
- OS::SNPrintF(str, "%d.%d.%d.%d%s%s",
- GetMajor(), GetMinor(), GetBuild(), GetPatch(), candidate,
- is_simulator);
- } else {
- OS::SNPrintF(str, "%d.%d.%d%s%s",
- GetMajor(), GetMinor(), GetBuild(), candidate,
- is_simulator);
- }
-}
-
-
-// Calculate the SONAME for the V8 shared library.
-void Version::GetSONAME(Vector<char> str) {
- if (soname_ == NULL || *soname_ == '\0') {
- // Generate generic SONAME if no specific SONAME is defined.
- const char* candidate = IsCandidate() ? "-candidate" : "";
- if (GetPatch() > 0) {
- OS::SNPrintF(str, "libv8-%d.%d.%d.%d%s.so",
- GetMajor(), GetMinor(), GetBuild(), GetPatch(), candidate);
- } else {
- OS::SNPrintF(str, "libv8-%d.%d.%d%s.so",
- GetMajor(), GetMinor(), GetBuild(), candidate);
- }
- } else {
- // Use specific SONAME.
- OS::SNPrintF(str, "%s", soname_);
- }
-}
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/version.h b/src/3rdparty/v8/src/version.h
deleted file mode 100644
index 4b3e7e2..0000000
--- a/src/3rdparty/v8/src/version.h
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_VERSION_H_
-#define V8_VERSION_H_
-
-namespace v8 {
-namespace internal {
-
-class Version {
- public:
- // Return the various version components.
- static int GetMajor() { return major_; }
- static int GetMinor() { return minor_; }
- static int GetBuild() { return build_; }
- static int GetPatch() { return patch_; }
- static bool IsCandidate() { return candidate_; }
-
- // Calculate the V8 version string.
- static void GetString(Vector<char> str);
-
- // Calculate the SONAME for the V8 shared library.
- static void GetSONAME(Vector<char> str);
-
- static const char* GetVersion() { return version_string_; }
-
- private:
- // NOTE: can't make these really const because of test-version.cc.
- static int major_;
- static int minor_;
- static int build_;
- static int patch_;
- static bool candidate_;
- static const char* soname_;
- static const char* version_string_;
-
- // In test-version.cc.
- friend void SetVersion(int major, int minor, int build, int patch,
- bool candidate, const char* soname);
-};
-
-} } // namespace v8::internal
-
-#endif // V8_VERSION_H_
diff --git a/src/3rdparty/v8/src/virtual-frame-heavy-inl.h b/src/3rdparty/v8/src/virtual-frame-heavy-inl.h
deleted file mode 100644
index cf12eca..0000000
--- a/src/3rdparty/v8/src/virtual-frame-heavy-inl.h
+++ /dev/null
@@ -1,190 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_VIRTUAL_FRAME_HEAVY_INL_H_
-#define V8_VIRTUAL_FRAME_HEAVY_INL_H_
-
-#include "type-info.h"
-#include "register-allocator.h"
-#include "scopes.h"
-#include "register-allocator-inl.h"
-#include "codegen-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// On entry to a function, the virtual frame already contains the receiver,
-// the parameters, and a return address. All frame elements are in memory.
-VirtualFrame::VirtualFrame()
- : elements_(parameter_count() + local_count() + kPreallocatedElements),
- stack_pointer_(parameter_count() + 1) { // 0-based index of TOS.
- for (int i = 0; i <= stack_pointer_; i++) {
- elements_.Add(FrameElement::MemoryElement(TypeInfo::Unknown()));
- }
- for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
- register_locations_[i] = kIllegalIndex;
- }
-}
-
-
-// When cloned, a frame is a deep copy of the original.
-VirtualFrame::VirtualFrame(VirtualFrame* original)
- : elements_(original->element_count()),
- stack_pointer_(original->stack_pointer_) {
- elements_.AddAll(original->elements_);
- // Copy register locations from original.
- memcpy(&register_locations_,
- original->register_locations_,
- sizeof(register_locations_));
-}
-
-
-void VirtualFrame::PushFrameSlotAt(int index) {
- elements_.Add(CopyElementAt(index));
-}
-
-
-void VirtualFrame::Push(Register reg, TypeInfo info) {
- if (is_used(reg)) {
- int index = register_location(reg);
- FrameElement element = CopyElementAt(index, info);
- elements_.Add(element);
- } else {
- Use(reg, element_count());
- FrameElement element =
- FrameElement::RegisterElement(reg, FrameElement::NOT_SYNCED, info);
- elements_.Add(element);
- }
-}
-
-
-bool VirtualFrame::ConstantPoolOverflowed() {
- return FrameElement::ConstantPoolOverflowed();
-}
-
-
-bool VirtualFrame::Equals(VirtualFrame* other) {
-#ifdef DEBUG
- for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
- if (register_location(i) != other->register_location(i)) {
- return false;
- }
- }
- if (element_count() != other->element_count()) return false;
-#endif
- if (stack_pointer_ != other->stack_pointer_) return false;
- for (int i = 0; i < element_count(); i++) {
- if (!elements_[i].Equals(other->elements_[i])) return false;
- }
-
- return true;
-}
-
-
-void VirtualFrame::SetTypeForLocalAt(int index, TypeInfo info) {
- elements_[local0_index() + index].set_type_info(info);
-}
-
-
-// Make the type of all elements be MEMORY.
-void VirtualFrame::SpillAll() {
- for (int i = 0; i < element_count(); i++) {
- SpillElementAt(i);
- }
-}
-
-
-void VirtualFrame::PrepareForReturn() {
- // Spill all locals. This is necessary to make sure all locals have
- // the right value when breaking at the return site in the debugger.
- for (int i = 0; i < expression_base_index(); i++) {
- SpillElementAt(i);
- }
-}
-
-
-void VirtualFrame::SetTypeForParamAt(int index, TypeInfo info) {
- elements_[param0_index() + index].set_type_info(info);
-}
-
-
-void VirtualFrame::Nip(int num_dropped) {
- ASSERT(num_dropped >= 0);
- if (num_dropped == 0) return;
- Result tos = Pop();
- if (num_dropped > 1) {
- Drop(num_dropped - 1);
- }
- SetElementAt(0, &tos);
-}
-
-
-void VirtualFrame::Push(Smi* value) {
- Push(Handle<Object> (value));
-}
-
-
-int VirtualFrame::register_location(Register reg) {
- return register_locations_[RegisterAllocator::ToNumber(reg)];
-}
-
-
-void VirtualFrame::set_register_location(Register reg, int index) {
- register_locations_[RegisterAllocator::ToNumber(reg)] = index;
-}
-
-
-bool VirtualFrame::is_used(Register reg) {
- return register_locations_[RegisterAllocator::ToNumber(reg)]
- != kIllegalIndex;
-}
-
-
-void VirtualFrame::SetElementAt(int index, Handle<Object> value) {
- Result temp(value);
- SetElementAt(index, &temp);
-}
-
-
-Result VirtualFrame::CallStub(CodeStub* stub, int arg_count) {
- PrepareForCall(arg_count, arg_count);
- return RawCallStub(stub);
-}
-
-
-int VirtualFrame::parameter_count() {
- return cgen()->scope()->num_parameters();
-}
-
-
-int VirtualFrame::local_count() {
- return cgen()->scope()->num_stack_slots();
-}
-
-} } // namespace v8::internal
-
-#endif // V8_VIRTUAL_FRAME_HEAVY_INL_H_
diff --git a/src/3rdparty/v8/src/virtual-frame-heavy.cc b/src/3rdparty/v8/src/virtual-frame-heavy.cc
deleted file mode 100644
index 7270280..0000000
--- a/src/3rdparty/v8/src/virtual-frame-heavy.cc
+++ /dev/null
@@ -1,312 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "codegen-inl.h"
-#include "register-allocator-inl.h"
-#include "virtual-frame-inl.h"
-
-namespace v8 {
-namespace internal {
-
-void VirtualFrame::SetElementAt(int index, Result* value) {
- int frame_index = element_count() - index - 1;
- ASSERT(frame_index >= 0);
- ASSERT(frame_index < element_count());
- ASSERT(value->is_valid());
- FrameElement original = elements_[frame_index];
-
- // Early exit if the element is the same as the one being set.
- bool same_register = original.is_register()
- && value->is_register()
- && original.reg().is(value->reg());
- bool same_constant = original.is_constant()
- && value->is_constant()
- && original.handle().is_identical_to(value->handle());
- if (same_register || same_constant) {
- value->Unuse();
- return;
- }
-
- InvalidateFrameSlotAt(frame_index);
-
- if (value->is_register()) {
- if (is_used(value->reg())) {
- // The register already appears on the frame. Either the existing
- // register element, or the new element at frame_index, must be made
- // a copy.
- int i = register_location(value->reg());
-
- if (i < frame_index) {
- // The register FrameElement is lower in the frame than the new copy.
- elements_[frame_index] = CopyElementAt(i);
- } else {
- // There was an early bailout for the case of setting a
- // register element to itself.
- ASSERT(i != frame_index);
- elements_[frame_index] = elements_[i];
- elements_[i] = CopyElementAt(frame_index);
- if (elements_[frame_index].is_synced()) {
- elements_[i].set_sync();
- }
- elements_[frame_index].clear_sync();
- set_register_location(value->reg(), frame_index);
- for (int j = i + 1; j < element_count(); j++) {
- if (elements_[j].is_copy() && elements_[j].index() == i) {
- elements_[j].set_index(frame_index);
- }
- }
- }
- } else {
- // The register value->reg() was not already used on the frame.
- Use(value->reg(), frame_index);
- elements_[frame_index] =
- FrameElement::RegisterElement(value->reg(),
- FrameElement::NOT_SYNCED,
- value->type_info());
- }
- } else {
- ASSERT(value->is_constant());
- elements_[frame_index] =
- FrameElement::ConstantElement(value->handle(),
- FrameElement::NOT_SYNCED);
- }
- value->Unuse();
-}
-
-
-// Create a duplicate of an existing valid frame element.
-// We can pass an optional number type information that will override the
-// existing information about the backing element. The new information must
-// not conflict with the existing type information and must be equally or
-// more precise. The default parameter value kUninitialized means that there
-// is no additional information.
-FrameElement VirtualFrame::CopyElementAt(int index, TypeInfo info) {
- ASSERT(index >= 0);
- ASSERT(index < element_count());
-
- FrameElement target = elements_[index];
- FrameElement result;
-
- switch (target.type()) {
- case FrameElement::CONSTANT:
- // We do not copy constants and instead return a fresh unsynced
- // constant.
- result = FrameElement::ConstantElement(target.handle(),
- FrameElement::NOT_SYNCED);
- break;
-
- case FrameElement::COPY:
- // We do not allow copies of copies, so we follow one link to
- // the actual backing store of a copy before making a copy.
- index = target.index();
- ASSERT(elements_[index].is_memory() || elements_[index].is_register());
- // Fall through.
-
- case FrameElement::MEMORY: // Fall through.
- case FrameElement::REGISTER: {
- // All copies are backed by memory or register locations.
- result.set_type(FrameElement::COPY);
- result.clear_copied();
- result.clear_sync();
- result.set_index(index);
- elements_[index].set_copied();
- // Update backing element's number information.
- TypeInfo existing = elements_[index].type_info();
- ASSERT(!existing.IsUninitialized());
- // Assert that the new type information (a) does not conflict with the
- // existing one and (b) is equally or more precise.
- ASSERT((info.ToInt() & existing.ToInt()) == existing.ToInt());
- ASSERT((info.ToInt() | existing.ToInt()) == info.ToInt());
-
- elements_[index].set_type_info(!info.IsUninitialized()
- ? info
- : existing);
- break;
- }
- case FrameElement::INVALID:
- // We should not try to copy invalid elements.
- UNREACHABLE();
- break;
- }
- return result;
-}
-
-
-// Modify the state of the virtual frame to match the actual frame by adding
-// extra in-memory elements to the top of the virtual frame. The extra
-// elements will be externally materialized on the actual frame (eg, by
-// pushing an exception handler). No code is emitted.
-void VirtualFrame::Adjust(int count) {
- ASSERT(count >= 0);
- ASSERT(stack_pointer_ == element_count() - 1);
-
- for (int i = 0; i < count; i++) {
- elements_.Add(FrameElement::MemoryElement(TypeInfo::Unknown()));
- }
- stack_pointer_ += count;
-}
-
-
-void VirtualFrame::ForgetElements(int count) {
- ASSERT(count >= 0);
- ASSERT(element_count() >= count);
-
- for (int i = 0; i < count; i++) {
- FrameElement last = elements_.RemoveLast();
- if (last.is_register()) {
- // A hack to properly count register references for the code
- // generator's current frame and also for other frames. The
- // same code appears in PrepareMergeTo.
- if (cgen()->frame() == this) {
- Unuse(last.reg());
- } else {
- set_register_location(last.reg(), kIllegalIndex);
- }
- }
- }
-}
-
-
-// Make the type of the element at a given index be MEMORY.
-void VirtualFrame::SpillElementAt(int index) {
- if (!elements_[index].is_valid()) return;
-
- SyncElementAt(index);
- // Number type information is preserved.
- // Copies get their number information from their backing element.
- TypeInfo info;
- if (!elements_[index].is_copy()) {
- info = elements_[index].type_info();
- } else {
- info = elements_[elements_[index].index()].type_info();
- }
- // The element is now in memory. Its copied flag is preserved.
- FrameElement new_element = FrameElement::MemoryElement(info);
- if (elements_[index].is_copied()) {
- new_element.set_copied();
- }
- if (elements_[index].is_untagged_int32()) {
- new_element.set_untagged_int32(true);
- }
- if (elements_[index].is_register()) {
- Unuse(elements_[index].reg());
- }
- elements_[index] = new_element;
-}
-
-
-// Clear the dirty bit for the element at a given index.
-void VirtualFrame::SyncElementAt(int index) {
- if (index <= stack_pointer_) {
- if (!elements_[index].is_synced()) SyncElementBelowStackPointer(index);
- } else if (index == stack_pointer_ + 1) {
- SyncElementByPushing(index);
- } else {
- SyncRange(stack_pointer_ + 1, index);
- }
-}
-
-
-void VirtualFrame::PrepareMergeTo(VirtualFrame* expected) {
- // Perform state changes on this frame that will make merge to the
- // expected frame simpler or else increase the likelihood that his
- // frame will match another.
- for (int i = 0; i < element_count(); i++) {
- FrameElement source = elements_[i];
- FrameElement target = expected->elements_[i];
-
- if (!target.is_valid() ||
- (target.is_memory() && !source.is_memory() && source.is_synced())) {
- // No code needs to be generated to invalidate valid elements.
- // No code needs to be generated to move values to memory if
- // they are already synced. We perform those moves here, before
- // merging.
- if (source.is_register()) {
- // If the frame is the code generator's current frame, we have
- // to decrement both the frame-internal and global register
- // counts.
- if (cgen()->frame() == this) {
- Unuse(source.reg());
- } else {
- set_register_location(source.reg(), kIllegalIndex);
- }
- }
- elements_[i] = target;
- } else if (target.is_register() && !target.is_synced() &&
- !source.is_memory()) {
- // If an element's target is a register that doesn't need to be
- // synced, and the element is not in memory, then the sync state
- // of the element is irrelevant. We clear the sync bit.
- ASSERT(source.is_valid());
- elements_[i].clear_sync();
- }
- }
-}
-
-
-void VirtualFrame::PrepareForCall(int spilled_args, int dropped_args) {
- ASSERT(height() >= dropped_args);
- ASSERT(height() >= spilled_args);
- ASSERT(dropped_args <= spilled_args);
-
- SyncRange(0, element_count() - 1);
- // Spill registers.
- for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
- if (is_used(i)) {
- SpillElementAt(register_location(i));
- }
- }
-
- // Spill the arguments.
- for (int i = element_count() - spilled_args; i < element_count(); i++) {
- if (!elements_[i].is_memory()) {
- SpillElementAt(i);
- }
- }
-
- // Forget the frame elements that will be popped by the call.
- Forget(dropped_args);
-}
-
-
-// If there are any registers referenced only by the frame, spill one.
-Register VirtualFrame::SpillAnyRegister() {
- // Find the leftmost (ordered by register number) register whose only
- // reference is in the frame.
- for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
- if (is_used(i) && cgen()->allocator()->count(i) == 1) {
- SpillElementAt(register_location(i));
- ASSERT(!cgen()->allocator()->is_used(i));
- return RegisterAllocator::ToRegister(i);
- }
- }
- return no_reg;
-}
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/virtual-frame-inl.h b/src/3rdparty/v8/src/virtual-frame-inl.h
deleted file mode 100644
index c9f4aac..0000000
--- a/src/3rdparty/v8/src/virtual-frame-inl.h
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_VIRTUAL_FRAME_INL_H_
-#define V8_VIRTUAL_FRAME_INL_H_
-
-#include "virtual-frame.h"
-
-#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64
-#include "virtual-frame-heavy-inl.h"
-#else
-#include "virtual-frame-light-inl.h"
-#endif
-
-#endif // V8_VIRTUAL_FRAME_INL_H_
diff --git a/src/3rdparty/v8/src/virtual-frame-light-inl.h b/src/3rdparty/v8/src/virtual-frame-light-inl.h
deleted file mode 100644
index 681f93f..0000000
--- a/src/3rdparty/v8/src/virtual-frame-light-inl.h
+++ /dev/null
@@ -1,171 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_VIRTUAL_FRAME_LIGHT_INL_H_
-#define V8_VIRTUAL_FRAME_LIGHT_INL_H_
-
-#include "codegen.h"
-#include "register-allocator.h"
-#include "scopes.h"
-#include "type-info.h"
-
-#include "codegen-inl.h"
-#include "jump-target-light-inl.h"
-
-namespace v8 {
-namespace internal {
-
-VirtualFrame::VirtualFrame(InvalidVirtualFrameInitializer* dummy)
- : element_count_(0),
- top_of_stack_state_(NO_TOS_REGISTERS),
- register_allocation_map_(0),
- tos_known_smi_map_(0) { }
-
-
-// On entry to a function, the virtual frame already contains the receiver,
-// the parameters, and a return address. All frame elements are in memory.
-VirtualFrame::VirtualFrame()
- : element_count_(parameter_count() + 2),
- top_of_stack_state_(NO_TOS_REGISTERS),
- register_allocation_map_(0),
- tos_known_smi_map_(0) { }
-
-
-// When cloned, a frame is a deep copy of the original.
-VirtualFrame::VirtualFrame(VirtualFrame* original)
- : element_count_(original->element_count()),
- top_of_stack_state_(original->top_of_stack_state_),
- register_allocation_map_(original->register_allocation_map_),
- tos_known_smi_map_(0) { }
-
-
-bool VirtualFrame::Equals(const VirtualFrame* other) {
- ASSERT(element_count() == other->element_count());
- if (top_of_stack_state_ != other->top_of_stack_state_) return false;
- if (register_allocation_map_ != other->register_allocation_map_) return false;
- if (tos_known_smi_map_ != other->tos_known_smi_map_) return false;
-
- return true;
-}
-
-
-void VirtualFrame::PrepareForReturn() {
- // Don't bother flushing tos registers as returning does not require more
- // access to the expression stack.
- top_of_stack_state_ = NO_TOS_REGISTERS;
-}
-
-
-VirtualFrame::RegisterAllocationScope::RegisterAllocationScope(
- CodeGenerator* cgen)
- : cgen_(cgen),
- old_is_spilled_(
- Isolate::Current()->is_virtual_frame_in_spilled_scope()) {
- Isolate::Current()->set_is_virtual_frame_in_spilled_scope(false);
- if (old_is_spilled_) {
- VirtualFrame* frame = cgen->frame();
- if (frame != NULL) {
- frame->AssertIsSpilled();
- }
- }
-}
-
-
-VirtualFrame::RegisterAllocationScope::~RegisterAllocationScope() {
- Isolate::Current()->set_is_virtual_frame_in_spilled_scope(old_is_spilled_);
- if (old_is_spilled_) {
- VirtualFrame* frame = cgen_->frame();
- if (frame != NULL) {
- frame->SpillAll();
- }
- }
-}
-
-
-CodeGenerator* VirtualFrame::cgen() const {
- return CodeGeneratorScope::Current(Isolate::Current());
-}
-
-
-MacroAssembler* VirtualFrame::masm() { return cgen()->masm(); }
-
-
-void VirtualFrame::CallStub(CodeStub* stub, int arg_count) {
- if (arg_count != 0) Forget(arg_count);
- ASSERT(cgen()->HasValidEntryRegisters());
- masm()->CallStub(stub);
-}
-
-
-int VirtualFrame::parameter_count() const {
- return cgen()->scope()->num_parameters();
-}
-
-
-int VirtualFrame::local_count() const {
- return cgen()->scope()->num_stack_slots();
-}
-
-
-int VirtualFrame::frame_pointer() const { return parameter_count() + 3; }
-
-
-int VirtualFrame::context_index() { return frame_pointer() - 1; }
-
-
-int VirtualFrame::function_index() { return frame_pointer() - 2; }
-
-
-int VirtualFrame::local0_index() const { return frame_pointer() + 2; }
-
-
-int VirtualFrame::fp_relative(int index) {
- ASSERT(index < element_count());
- ASSERT(frame_pointer() < element_count()); // FP is on the frame.
- return (frame_pointer() - index) * kPointerSize;
-}
-
-
-int VirtualFrame::expression_base_index() const {
- return local0_index() + local_count();
-}
-
-
-int VirtualFrame::height() const {
- return element_count() - expression_base_index();
-}
-
-
-MemOperand VirtualFrame::LocalAt(int index) {
- ASSERT(0 <= index);
- ASSERT(index < local_count());
- return MemOperand(fp, kLocal0Offset - index * kPointerSize);
-}
-
-} } // namespace v8::internal
-
-#endif // V8_VIRTUAL_FRAME_LIGHT_INL_H_
diff --git a/src/3rdparty/v8/src/virtual-frame-light.cc b/src/3rdparty/v8/src/virtual-frame-light.cc
deleted file mode 100644
index bbaaaf5..0000000
--- a/src/3rdparty/v8/src/virtual-frame-light.cc
+++ /dev/null
@@ -1,52 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "codegen-inl.h"
-#include "register-allocator-inl.h"
-#include "virtual-frame-inl.h"
-
-namespace v8 {
-namespace internal {
-
-void VirtualFrame::Adjust(int count) {
- ASSERT(count >= 0);
- RaiseHeight(count, 0);
-}
-
-
-// If there are any registers referenced only by the frame, spill one.
-Register VirtualFrame::SpillAnyRegister() {
- UNIMPLEMENTED();
- return no_reg;
-}
-
-
-InvalidVirtualFrameInitializer* kInvalidVirtualFrameInitializer = NULL;
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/virtual-frame.cc b/src/3rdparty/v8/src/virtual-frame.cc
deleted file mode 100644
index 310ff59..0000000
--- a/src/3rdparty/v8/src/virtual-frame.cc
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "codegen-inl.h"
-#include "register-allocator-inl.h"
-#include "virtual-frame-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// VirtualFrame implementation.
-
-// Specialization of List::ResizeAdd to non-inlined version for FrameElements.
-// The function ResizeAdd becomes a real function, whose implementation is the
-// inlined ResizeAddInternal.
-template <>
-void List<FrameElement,
- FreeStoreAllocationPolicy>::ResizeAdd(const FrameElement& element) {
- ResizeAddInternal(element);
-}
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/virtual-frame.h b/src/3rdparty/v8/src/virtual-frame.h
deleted file mode 100644
index 65d1009..0000000
--- a/src/3rdparty/v8/src/virtual-frame.h
+++ /dev/null
@@ -1,59 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_VIRTUAL_FRAME_H_
-#define V8_VIRTUAL_FRAME_H_
-
-#include "frame-element.h"
-#include "macro-assembler.h"
-
-#include "list-inl.h"
-#include "utils.h"
-
-#if V8_TARGET_ARCH_IA32
-#include "ia32/virtual-frame-ia32.h"
-#elif V8_TARGET_ARCH_X64
-#include "x64/virtual-frame-x64.h"
-#elif V8_TARGET_ARCH_ARM
-#include "arm/virtual-frame-arm.h"
-#elif V8_TARGET_ARCH_MIPS
-#include "mips/virtual-frame-mips.h"
-#else
-#error Unsupported target architecture.
-#endif
-
-namespace v8 {
-namespace internal {
-
-// Add() on List is inlined, ResizeAdd() called by Add() is inlined except for
-// Lists of FrameElements, and ResizeAddInternal() is inlined in ResizeAdd().
-template <>
-void List<FrameElement,
- FreeStoreAllocationPolicy>::ResizeAdd(const FrameElement& element);
-} } // namespace v8::internal
-
-#endif // V8_VIRTUAL_FRAME_H_
diff --git a/src/3rdparty/v8/src/vm-state-inl.h b/src/3rdparty/v8/src/vm-state-inl.h
deleted file mode 100644
index 1f363de..0000000
--- a/src/3rdparty/v8/src/vm-state-inl.h
+++ /dev/null
@@ -1,138 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_VM_STATE_INL_H_
-#define V8_VM_STATE_INL_H_
-
-#include "vm-state.h"
-#include "runtime-profiler.h"
-
-namespace v8 {
-namespace internal {
-
-//
-// VMState class implementation. A simple stack of VM states held by the
-// logger and partially threaded through the call stack. States are pushed by
-// VMState construction and popped by destruction.
-//
-#ifdef ENABLE_VMSTATE_TRACKING
-inline const char* StateToString(StateTag state) {
- switch (state) {
- case JS:
- return "JS";
- case GC:
- return "GC";
- case COMPILER:
- return "COMPILER";
- case OTHER:
- return "OTHER";
- case EXTERNAL:
- return "EXTERNAL";
- default:
- UNREACHABLE();
- return NULL;
- }
-}
-
-
-VMState::VMState(Isolate* isolate, StateTag tag)
- : isolate_(isolate), previous_tag_(isolate->current_vm_state()) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- if (FLAG_log_state_changes) {
- LOG(isolate, UncheckedStringEvent("Entering", StateToString(tag)));
- LOG(isolate, UncheckedStringEvent("From", StateToString(previous_tag_)));
- }
-#endif
-
- isolate_->SetCurrentVMState(tag);
-
-#ifdef ENABLE_HEAP_PROTECTION
- if (FLAG_protect_heap) {
- if (tag == EXTERNAL) {
- // We are leaving V8.
- ASSERT(previous_tag_ != EXTERNAL);
- isolate_->heap()->Protect();
- } else if (previous_tag_ = EXTERNAL) {
- // We are entering V8.
- isolate_->heap()->Unprotect();
- }
- }
-#endif
-}
-
-
-VMState::~VMState() {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- if (FLAG_log_state_changes) {
- LOG(isolate_,
- UncheckedStringEvent("Leaving",
- StateToString(isolate_->current_vm_state())));
- LOG(isolate_,
- UncheckedStringEvent("To", StateToString(previous_tag_)));
- }
-#endif // ENABLE_LOGGING_AND_PROFILING
-
-#ifdef ENABLE_HEAP_PROTECTION
- StateTag tag = isolate_->current_vm_state();
-#endif
-
- isolate_->SetCurrentVMState(previous_tag_);
-
-#ifdef ENABLE_HEAP_PROTECTION
- if (FLAG_protect_heap) {
- if (tag == EXTERNAL) {
- // We are reentering V8.
- ASSERT(previous_tag_ != EXTERNAL);
- isolate_->heap()->Unprotect();
- } else if (previous_tag_ == EXTERNAL) {
- // We are leaving V8.
- isolate_->heap()->Protect();
- }
- }
-#endif // ENABLE_HEAP_PROTECTION
-}
-
-#endif // ENABLE_VMSTATE_TRACKING
-
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
-
-ExternalCallbackScope::ExternalCallbackScope(Isolate* isolate, Address callback)
- : isolate_(isolate), previous_callback_(isolate->external_callback()) {
- isolate_->set_external_callback(callback);
-}
-
-ExternalCallbackScope::~ExternalCallbackScope() {
- isolate_->set_external_callback(previous_callback_);
-}
-
-#endif // ENABLE_LOGGING_AND_PROFILING
-
-
-} } // namespace v8::internal
-
-#endif // V8_VM_STATE_INL_H_
diff --git a/src/3rdparty/v8/src/vm-state.h b/src/3rdparty/v8/src/vm-state.h
deleted file mode 100644
index 11fc6d6..0000000
--- a/src/3rdparty/v8/src/vm-state.h
+++ /dev/null
@@ -1,70 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_VM_STATE_H_
-#define V8_VM_STATE_H_
-
-#include "isolate.h"
-
-namespace v8 {
-namespace internal {
-
-class VMState BASE_EMBEDDED {
-#ifdef ENABLE_VMSTATE_TRACKING
- public:
- inline VMState(Isolate* isolate, StateTag tag);
- inline ~VMState();
-
- private:
- Isolate* isolate_;
- StateTag previous_tag_;
-
-#else
- public:
- VMState(Isolate* isolate, StateTag state) {}
-#endif
-};
-
-
-class ExternalCallbackScope BASE_EMBEDDED {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- public:
- inline ExternalCallbackScope(Isolate* isolate, Address callback);
- inline ~ExternalCallbackScope();
- private:
- Isolate* isolate_;
- Address previous_callback_;
-#else
- public:
- ExternalCallbackScope(Isolate* isolate, Address callback) {}
-#endif
-};
-
-} } // namespace v8::internal
-
-
-#endif // V8_VM_STATE_H_
diff --git a/src/3rdparty/v8/src/win32-headers.h b/src/3rdparty/v8/src/win32-headers.h
deleted file mode 100644
index fca5c13..0000000
--- a/src/3rdparty/v8/src/win32-headers.h
+++ /dev/null
@@ -1,96 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef WIN32_LEAN_AND_MEAN
-// WIN32_LEAN_AND_MEAN implies NOCRYPT and NOGDI.
-#define WIN32_LEAN_AND_MEAN
-#endif
-#ifndef NOMINMAX
-#define NOMINMAX
-#endif
-#ifndef NOKERNEL
-#define NOKERNEL
-#endif
-#ifndef NOUSER
-#define NOUSER
-#endif
-#ifndef NOSERVICE
-#define NOSERVICE
-#endif
-#ifndef NOSOUND
-#define NOSOUND
-#endif
-#ifndef NOMCX
-#define NOMCX
-#endif
-// Require Windows XP or higher (this is required for the RtlCaptureContext
-// function to be present).
-#ifndef _WIN32_WINNT
-#define _WIN32_WINNT 0x501
-#endif
-
-#include <windows.h>
-
-#ifdef V8_WIN32_HEADERS_FULL
-#include <time.h> // For LocalOffset() implementation.
-#include <mmsystem.h> // For timeGetTime().
-#ifdef __MINGW32__
-// Require Windows XP or higher when compiling with MinGW. This is for MinGW
-// header files to expose getaddrinfo.
-#undef _WIN32_WINNT
-#define _WIN32_WINNT 0x501
-#endif // __MINGW32__
-#ifndef __MINGW32__
-#include <dbghelp.h> // For SymLoadModule64 and al.
-#include <errno.h> // For STRUNCATE
-#endif // __MINGW32__
-#include <limits.h> // For INT_MAX and al.
-#include <tlhelp32.h> // For Module32First and al.
-
-// These additional WIN32 includes have to be right here as the #undef's below
-// makes it impossible to have them elsewhere.
-#include <winsock2.h>
-#include <ws2tcpip.h>
-#include <process.h> // for _beginthreadex()
-#include <stdlib.h>
-#endif // V8_WIN32_HEADERS_FULL
-
-#undef VOID
-#undef DELETE
-#undef IN
-#undef THIS
-#undef CONST
-#undef NAN
-#undef TRUE
-#undef FALSE
-#undef UNKNOWN
-#undef NONE
-#undef ANY
-#undef IGNORE
-#undef GetObject
-#undef CreateMutex
-#undef CreateSemaphore
diff --git a/src/3rdparty/v8/src/x64/assembler-x64-inl.h b/src/3rdparty/v8/src/x64/assembler-x64-inl.h
deleted file mode 100644
index 9541a58..0000000
--- a/src/3rdparty/v8/src/x64/assembler-x64-inl.h
+++ /dev/null
@@ -1,456 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_X64_ASSEMBLER_X64_INL_H_
-#define V8_X64_ASSEMBLER_X64_INL_H_
-
-#include "cpu.h"
-#include "debug.h"
-#include "v8memory.h"
-
-namespace v8 {
-namespace internal {
-
-
-// -----------------------------------------------------------------------------
-// Implementation of Assembler
-
-
-void Assembler::emitl(uint32_t x) {
- Memory::uint32_at(pc_) = x;
- pc_ += sizeof(uint32_t);
-}
-
-
-void Assembler::emitq(uint64_t x, RelocInfo::Mode rmode) {
- Memory::uint64_at(pc_) = x;
- if (rmode != RelocInfo::NONE) {
- RecordRelocInfo(rmode, x);
- }
- pc_ += sizeof(uint64_t);
-}
-
-
-void Assembler::emitw(uint16_t x) {
- Memory::uint16_at(pc_) = x;
- pc_ += sizeof(uint16_t);
-}
-
-
-void Assembler::emit_code_target(Handle<Code> target, RelocInfo::Mode rmode) {
- ASSERT(RelocInfo::IsCodeTarget(rmode));
- RecordRelocInfo(rmode);
- int current = code_targets_.length();
- if (current > 0 && code_targets_.last().is_identical_to(target)) {
- // Optimization if we keep jumping to the same code target.
- emitl(current - 1);
- } else {
- code_targets_.Add(target);
- emitl(current);
- }
-}
-
-
-void Assembler::emit_rex_64(Register reg, Register rm_reg) {
- emit(0x48 | reg.high_bit() << 2 | rm_reg.high_bit());
-}
-
-
-void Assembler::emit_rex_64(XMMRegister reg, Register rm_reg) {
- emit(0x48 | (reg.code() & 0x8) >> 1 | rm_reg.code() >> 3);
-}
-
-
-void Assembler::emit_rex_64(Register reg, XMMRegister rm_reg) {
- emit(0x48 | (reg.code() & 0x8) >> 1 | rm_reg.code() >> 3);
-}
-
-
-void Assembler::emit_rex_64(Register reg, const Operand& op) {
- emit(0x48 | reg.high_bit() << 2 | op.rex_);
-}
-
-
-void Assembler::emit_rex_64(XMMRegister reg, const Operand& op) {
- emit(0x48 | (reg.code() & 0x8) >> 1 | op.rex_);
-}
-
-
-void Assembler::emit_rex_64(Register rm_reg) {
- ASSERT_EQ(rm_reg.code() & 0xf, rm_reg.code());
- emit(0x48 | rm_reg.high_bit());
-}
-
-
-void Assembler::emit_rex_64(const Operand& op) {
- emit(0x48 | op.rex_);
-}
-
-
-void Assembler::emit_rex_32(Register reg, Register rm_reg) {
- emit(0x40 | reg.high_bit() << 2 | rm_reg.high_bit());
-}
-
-
-void Assembler::emit_rex_32(Register reg, const Operand& op) {
- emit(0x40 | reg.high_bit() << 2 | op.rex_);
-}
-
-
-void Assembler::emit_rex_32(Register rm_reg) {
- emit(0x40 | rm_reg.high_bit());
-}
-
-
-void Assembler::emit_rex_32(const Operand& op) {
- emit(0x40 | op.rex_);
-}
-
-
-void Assembler::emit_optional_rex_32(Register reg, Register rm_reg) {
- byte rex_bits = reg.high_bit() << 2 | rm_reg.high_bit();
- if (rex_bits != 0) emit(0x40 | rex_bits);
-}
-
-
-void Assembler::emit_optional_rex_32(Register reg, const Operand& op) {
- byte rex_bits = reg.high_bit() << 2 | op.rex_;
- if (rex_bits != 0) emit(0x40 | rex_bits);
-}
-
-
-void Assembler::emit_optional_rex_32(XMMRegister reg, const Operand& op) {
- byte rex_bits = (reg.code() & 0x8) >> 1 | op.rex_;
- if (rex_bits != 0) emit(0x40 | rex_bits);
-}
-
-
-void Assembler::emit_optional_rex_32(XMMRegister reg, XMMRegister base) {
- byte rex_bits = (reg.code() & 0x8) >> 1 | (base.code() & 0x8) >> 3;
- if (rex_bits != 0) emit(0x40 | rex_bits);
-}
-
-
-void Assembler::emit_optional_rex_32(XMMRegister reg, Register base) {
- byte rex_bits = (reg.code() & 0x8) >> 1 | (base.code() & 0x8) >> 3;
- if (rex_bits != 0) emit(0x40 | rex_bits);
-}
-
-
-void Assembler::emit_optional_rex_32(Register reg, XMMRegister base) {
- byte rex_bits = (reg.code() & 0x8) >> 1 | (base.code() & 0x8) >> 3;
- if (rex_bits != 0) emit(0x40 | rex_bits);
-}
-
-
-void Assembler::emit_optional_rex_32(Register rm_reg) {
- if (rm_reg.high_bit()) emit(0x41);
-}
-
-
-void Assembler::emit_optional_rex_32(const Operand& op) {
- if (op.rex_ != 0) emit(0x40 | op.rex_);
-}
-
-
-Address Assembler::target_address_at(Address pc) {
- return Memory::int32_at(pc) + pc + 4;
-}
-
-
-void Assembler::set_target_address_at(Address pc, Address target) {
- Memory::int32_at(pc) = static_cast<int32_t>(target - pc - 4);
- CPU::FlushICache(pc, sizeof(int32_t));
-}
-
-Handle<Object> Assembler::code_target_object_handle_at(Address pc) {
- return code_targets_[Memory::int32_at(pc)];
-}
-
-// -----------------------------------------------------------------------------
-// Implementation of RelocInfo
-
-// The modes possibly affected by apply must be in kApplyMask.
-void RelocInfo::apply(intptr_t delta) {
- if (IsInternalReference(rmode_)) {
- // absolute code pointer inside code object moves with the code object.
- Memory::Address_at(pc_) += static_cast<int32_t>(delta);
- CPU::FlushICache(pc_, sizeof(Address));
- } else if (IsCodeTarget(rmode_)) {
- Memory::int32_at(pc_) -= static_cast<int32_t>(delta);
- CPU::FlushICache(pc_, sizeof(int32_t));
- }
-}
-
-
-Address RelocInfo::target_address() {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
- if (IsCodeTarget(rmode_)) {
- return Assembler::target_address_at(pc_);
- } else {
- return Memory::Address_at(pc_);
- }
-}
-
-
-Address RelocInfo::target_address_address() {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
- return reinterpret_cast<Address>(pc_);
-}
-
-
-int RelocInfo::target_address_size() {
- if (IsCodedSpecially()) {
- return Assembler::kCallTargetSize;
- } else {
- return Assembler::kExternalTargetSize;
- }
-}
-
-
-void RelocInfo::set_target_address(Address target) {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
- if (IsCodeTarget(rmode_)) {
- Assembler::set_target_address_at(pc_, target);
- } else {
- Memory::Address_at(pc_) = target;
- CPU::FlushICache(pc_, sizeof(Address));
- }
-}
-
-
-Object* RelocInfo::target_object() {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return Memory::Object_at(pc_);
-}
-
-
-Handle<Object> RelocInfo::target_object_handle(Assembler *origin) {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- if (rmode_ == EMBEDDED_OBJECT) {
- return Memory::Object_Handle_at(pc_);
- } else {
- return origin->code_target_object_handle_at(pc_);
- }
-}
-
-
-Object** RelocInfo::target_object_address() {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return reinterpret_cast<Object**>(pc_);
-}
-
-
-Address* RelocInfo::target_reference_address() {
- ASSERT(rmode_ == RelocInfo::EXTERNAL_REFERENCE);
- return reinterpret_cast<Address*>(pc_);
-}
-
-
-void RelocInfo::set_target_object(Object* target) {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- *reinterpret_cast<Object**>(pc_) = target;
- CPU::FlushICache(pc_, sizeof(Address));
-}
-
-
-Handle<JSGlobalPropertyCell> RelocInfo::target_cell_handle() {
- ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
- Address address = Memory::Address_at(pc_);
- return Handle<JSGlobalPropertyCell>(
- reinterpret_cast<JSGlobalPropertyCell**>(address));
-}
-
-
-JSGlobalPropertyCell* RelocInfo::target_cell() {
- ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
- Address address = Memory::Address_at(pc_);
- Object* object = HeapObject::FromAddress(
- address - JSGlobalPropertyCell::kValueOffset);
- return reinterpret_cast<JSGlobalPropertyCell*>(object);
-}
-
-
-void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell) {
- ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
- Address address = cell->address() + JSGlobalPropertyCell::kValueOffset;
- Memory::Address_at(pc_) = address;
- CPU::FlushICache(pc_, sizeof(Address));
-}
-
-
-bool RelocInfo::IsPatchedReturnSequence() {
- // The recognized call sequence is:
- // movq(kScratchRegister, immediate64); call(kScratchRegister);
- // It only needs to be distinguished from a return sequence
- // movq(rsp, rbp); pop(rbp); ret(n); int3 *6
- // The 11th byte is int3 (0xCC) in the return sequence and
- // REX.WB (0x48+register bit) for the call sequence.
-#ifdef ENABLE_DEBUGGER_SUPPORT
- return pc_[10] != 0xCC;
-#else
- return false;
-#endif
-}
-
-
-bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
- return !Assembler::IsNop(pc());
-}
-
-
-Address RelocInfo::call_address() {
- ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
- (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
- return Memory::Address_at(
- pc_ + Assembler::kRealPatchReturnSequenceAddressOffset);
-}
-
-
-void RelocInfo::set_call_address(Address target) {
- ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
- (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
- Memory::Address_at(pc_ + Assembler::kRealPatchReturnSequenceAddressOffset) =
- target;
- CPU::FlushICache(pc_ + Assembler::kRealPatchReturnSequenceAddressOffset,
- sizeof(Address));
-}
-
-
-Object* RelocInfo::call_object() {
- return *call_object_address();
-}
-
-
-void RelocInfo::set_call_object(Object* target) {
- *call_object_address() = target;
-}
-
-
-Object** RelocInfo::call_object_address() {
- ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
- (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
- return reinterpret_cast<Object**>(
- pc_ + Assembler::kPatchReturnSequenceAddressOffset);
-}
-
-
-void RelocInfo::Visit(ObjectVisitor* visitor) {
- RelocInfo::Mode mode = rmode();
- if (mode == RelocInfo::EMBEDDED_OBJECT) {
- visitor->VisitPointer(target_object_address());
- CPU::FlushICache(pc_, sizeof(Address));
- } else if (RelocInfo::IsCodeTarget(mode)) {
- visitor->VisitCodeTarget(this);
- } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
- visitor->VisitGlobalPropertyCell(this);
- } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
- visitor->VisitExternalReference(target_reference_address());
- CPU::FlushICache(pc_, sizeof(Address));
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // TODO(isolates): Get a cached isolate below.
- } else if (((RelocInfo::IsJSReturn(mode) &&
- IsPatchedReturnSequence()) ||
- (RelocInfo::IsDebugBreakSlot(mode) &&
- IsPatchedDebugBreakSlotSequence())) &&
- Isolate::Current()->debug()->has_break_points()) {
- visitor->VisitDebugTarget(this);
-#endif
- } else if (mode == RelocInfo::RUNTIME_ENTRY) {
- visitor->VisitRuntimeEntry(this);
- }
-}
-
-
-template<typename StaticVisitor>
-void RelocInfo::Visit(Heap* heap) {
- RelocInfo::Mode mode = rmode();
- if (mode == RelocInfo::EMBEDDED_OBJECT) {
- StaticVisitor::VisitPointer(heap, target_object_address());
- CPU::FlushICache(pc_, sizeof(Address));
- } else if (RelocInfo::IsCodeTarget(mode)) {
- StaticVisitor::VisitCodeTarget(heap, this);
- } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
- StaticVisitor::VisitGlobalPropertyCell(heap, this);
- } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
- StaticVisitor::VisitExternalReference(target_reference_address());
- CPU::FlushICache(pc_, sizeof(Address));
-#ifdef ENABLE_DEBUGGER_SUPPORT
- } else if (heap->isolate()->debug()->has_break_points() &&
- ((RelocInfo::IsJSReturn(mode) &&
- IsPatchedReturnSequence()) ||
- (RelocInfo::IsDebugBreakSlot(mode) &&
- IsPatchedDebugBreakSlotSequence()))) {
- StaticVisitor::VisitDebugTarget(heap, this);
-#endif
- } else if (mode == RelocInfo::RUNTIME_ENTRY) {
- StaticVisitor::VisitRuntimeEntry(this);
- }
-}
-
-
-// -----------------------------------------------------------------------------
-// Implementation of Operand
-
-void Operand::set_modrm(int mod, Register rm_reg) {
- ASSERT(is_uint2(mod));
- buf_[0] = mod << 6 | rm_reg.low_bits();
- // Set REX.B to the high bit of rm.code().
- rex_ |= rm_reg.high_bit();
-}
-
-
-void Operand::set_sib(ScaleFactor scale, Register index, Register base) {
- ASSERT(len_ == 1);
- ASSERT(is_uint2(scale));
- // Use SIB with no index register only for base rsp or r12. Otherwise we
- // would skip the SIB byte entirely.
- ASSERT(!index.is(rsp) || base.is(rsp) || base.is(r12));
- buf_[1] = (scale << 6) | (index.low_bits() << 3) | base.low_bits();
- rex_ |= index.high_bit() << 1 | base.high_bit();
- len_ = 2;
-}
-
-void Operand::set_disp8(int disp) {
- ASSERT(is_int8(disp));
- ASSERT(len_ == 1 || len_ == 2);
- int8_t* p = reinterpret_cast<int8_t*>(&buf_[len_]);
- *p = disp;
- len_ += sizeof(int8_t);
-}
-
-void Operand::set_disp32(int disp) {
- ASSERT(len_ == 1 || len_ == 2);
- int32_t* p = reinterpret_cast<int32_t*>(&buf_[len_]);
- *p = disp;
- len_ += sizeof(int32_t);
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_X64_ASSEMBLER_X64_INL_H_
diff --git a/src/3rdparty/v8/src/x64/assembler-x64.cc b/src/3rdparty/v8/src/x64/assembler-x64.cc
deleted file mode 100644
index de28ae9..0000000
--- a/src/3rdparty/v8/src/x64/assembler-x64.cc
+++ /dev/null
@@ -1,3180 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_X64)
-
-#include "macro-assembler.h"
-#include "serialize.h"
-
-namespace v8 {
-namespace internal {
-
-// -----------------------------------------------------------------------------
-// Implementation of CpuFeatures
-
-
-#ifdef DEBUG
-bool CpuFeatures::initialized_ = false;
-#endif
-uint64_t CpuFeatures::supported_ = CpuFeatures::kDefaultCpuFeatures;
-uint64_t CpuFeatures::found_by_runtime_probing_ = 0;
-
-
-void CpuFeatures::Probe() {
- ASSERT(!initialized_);
-#ifdef DEBUG
- initialized_ = true;
-#endif
- supported_ = kDefaultCpuFeatures;
- if (Serializer::enabled()) {
- supported_ |= OS::CpuFeaturesImpliedByPlatform();
- return; // No features if we might serialize.
- }
-
- const int kBufferSize = 4 * KB;
- VirtualMemory* memory = new VirtualMemory(kBufferSize);
- if (!memory->IsReserved()) {
- delete memory;
- return;
- }
- ASSERT(memory->size() >= static_cast<size_t>(kBufferSize));
- if (!memory->Commit(memory->address(), kBufferSize, true/*executable*/)) {
- delete memory;
- return;
- }
-
- Assembler assm(NULL, memory->address(), kBufferSize);
- Label cpuid, done;
-#define __ assm.
- // Save old rsp, since we are going to modify the stack.
- __ push(rbp);
- __ pushfq();
- __ push(rcx);
- __ push(rbx);
- __ movq(rbp, rsp);
-
- // If we can modify bit 21 of the EFLAGS register, then CPUID is supported.
- __ pushfq();
- __ pop(rax);
- __ movq(rdx, rax);
- __ xor_(rax, Immediate(0x200000)); // Flip bit 21.
- __ push(rax);
- __ popfq();
- __ pushfq();
- __ pop(rax);
- __ xor_(rax, rdx); // Different if CPUID is supported.
- __ j(not_zero, &cpuid);
-
- // CPUID not supported. Clear the supported features in rax.
- __ xor_(rax, rax);
- __ jmp(&done);
-
- // Invoke CPUID with 1 in eax to get feature information in
- // ecx:edx. Temporarily enable CPUID support because we know it's
- // safe here.
- __ bind(&cpuid);
- __ movq(rax, Immediate(1));
- supported_ = kDefaultCpuFeatures | (1 << CPUID);
- { Scope fscope(CPUID);
- __ cpuid();
- // Move the result from ecx:edx to rdi.
- __ movl(rdi, rdx); // Zero-extended to 64 bits.
- __ shl(rcx, Immediate(32));
- __ or_(rdi, rcx);
-
- // Get the sahf supported flag, from CPUID(0x80000001)
- __ movq(rax, 0x80000001, RelocInfo::NONE);
- __ cpuid();
- }
- supported_ = kDefaultCpuFeatures;
-
- // Put the CPU flags in rax.
- // rax = (rcx & 1) | (rdi & ~1) | (1 << CPUID).
- __ movl(rax, Immediate(1));
- __ and_(rcx, rax); // Bit 0 is set if SAHF instruction supported.
- __ not_(rax);
- __ and_(rax, rdi);
- __ or_(rax, rcx);
- __ or_(rax, Immediate(1 << CPUID));
-
- // Done.
- __ bind(&done);
- __ movq(rsp, rbp);
- __ pop(rbx);
- __ pop(rcx);
- __ popfq();
- __ pop(rbp);
- __ ret(0);
-#undef __
-
- typedef uint64_t (*F0)();
- F0 probe = FUNCTION_CAST<F0>(reinterpret_cast<Address>(memory->address()));
- supported_ = probe();
- found_by_runtime_probing_ = supported_;
- found_by_runtime_probing_ &= ~kDefaultCpuFeatures;
- uint64_t os_guarantees = OS::CpuFeaturesImpliedByPlatform();
- supported_ |= os_guarantees;
- found_by_runtime_probing_ &= ~os_guarantees;
- // SSE2 and CMOV must be available on an X64 CPU.
- ASSERT(IsSupported(CPUID));
- ASSERT(IsSupported(SSE2));
- ASSERT(IsSupported(CMOV));
-
- delete memory;
-}
-
-
-// -----------------------------------------------------------------------------
-// Implementation of RelocInfo
-
-// Patch the code at the current PC with a call to the target address.
-// Additional guard int3 instructions can be added if required.
-void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
- // Load register with immediate 64 and call through a register instructions
- // takes up 13 bytes and int3 takes up one byte.
- static const int kCallCodeSize = 13;
- int code_size = kCallCodeSize + guard_bytes;
-
- // Create a code patcher.
- CodePatcher patcher(pc_, code_size);
-
- // Add a label for checking the size of the code used for returning.
-#ifdef DEBUG
- Label check_codesize;
- patcher.masm()->bind(&check_codesize);
-#endif
-
- // Patch the code.
- patcher.masm()->movq(r10, target, RelocInfo::NONE);
- patcher.masm()->call(r10);
-
- // Check that the size of the code generated is as expected.
- ASSERT_EQ(kCallCodeSize,
- patcher.masm()->SizeOfCodeGeneratedSince(&check_codesize));
-
- // Add the requested number of int3 instructions after the call.
- for (int i = 0; i < guard_bytes; i++) {
- patcher.masm()->int3();
- }
-}
-
-
-void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
- // Patch the code at the current address with the supplied instructions.
- for (int i = 0; i < instruction_count; i++) {
- *(pc_ + i) = *(instructions + i);
- }
-
- // Indicate that code has changed.
- CPU::FlushICache(pc_, instruction_count);
-}
-
-
-// -----------------------------------------------------------------------------
-// Register constants.
-
-const int Register::kRegisterCodeByAllocationIndex[kNumAllocatableRegisters] = {
- // rax, rbx, rdx, rcx, rdi, r8, r9, r11, r14, r15
- 0, 3, 2, 1, 7, 8, 9, 11, 14, 15
-};
-
-const int Register::kAllocationIndexByRegisterCode[kNumRegisters] = {
- 0, 3, 2, 1, -1, -1, -1, 4, 5, 6, -1, 7, -1, -1, 8, 9
-};
-
-
-// -----------------------------------------------------------------------------
-// Implementation of Operand
-
-Operand::Operand(Register base, int32_t disp) : rex_(0) {
- len_ = 1;
- if (base.is(rsp) || base.is(r12)) {
- // SIB byte is needed to encode (rsp + offset) or (r12 + offset).
- set_sib(times_1, rsp, base);
- }
-
- if (disp == 0 && !base.is(rbp) && !base.is(r13)) {
- set_modrm(0, base);
- } else if (is_int8(disp)) {
- set_modrm(1, base);
- set_disp8(disp);
- } else {
- set_modrm(2, base);
- set_disp32(disp);
- }
-}
-
-
-Operand::Operand(Register base,
- Register index,
- ScaleFactor scale,
- int32_t disp) : rex_(0) {
- ASSERT(!index.is(rsp));
- len_ = 1;
- set_sib(scale, index, base);
- if (disp == 0 && !base.is(rbp) && !base.is(r13)) {
- // This call to set_modrm doesn't overwrite the REX.B (or REX.X) bits
- // possibly set by set_sib.
- set_modrm(0, rsp);
- } else if (is_int8(disp)) {
- set_modrm(1, rsp);
- set_disp8(disp);
- } else {
- set_modrm(2, rsp);
- set_disp32(disp);
- }
-}
-
-
-Operand::Operand(Register index,
- ScaleFactor scale,
- int32_t disp) : rex_(0) {
- ASSERT(!index.is(rsp));
- len_ = 1;
- set_modrm(0, rsp);
- set_sib(scale, index, rbp);
- set_disp32(disp);
-}
-
-
-Operand::Operand(const Operand& operand, int32_t offset) {
- ASSERT(operand.len_ >= 1);
- // Operand encodes REX ModR/M [SIB] [Disp].
- byte modrm = operand.buf_[0];
- ASSERT(modrm < 0xC0); // Disallow mode 3 (register target).
- bool has_sib = ((modrm & 0x07) == 0x04);
- byte mode = modrm & 0xC0;
- int disp_offset = has_sib ? 2 : 1;
- int base_reg = (has_sib ? operand.buf_[1] : modrm) & 0x07;
- // Mode 0 with rbp/r13 as ModR/M or SIB base register always has a 32-bit
- // displacement.
- bool is_baseless = (mode == 0) && (base_reg == 0x05); // No base or RIP base.
- int32_t disp_value = 0;
- if (mode == 0x80 || is_baseless) {
- // Mode 2 or mode 0 with rbp/r13 as base: Word displacement.
- disp_value = *BitCast<const int32_t*>(&operand.buf_[disp_offset]);
- } else if (mode == 0x40) {
- // Mode 1: Byte displacement.
- disp_value = static_cast<signed char>(operand.buf_[disp_offset]);
- }
-
- // Write new operand with same registers, but with modified displacement.
- ASSERT(offset >= 0 ? disp_value + offset > disp_value
- : disp_value + offset < disp_value); // No overflow.
- disp_value += offset;
- rex_ = operand.rex_;
- if (!is_int8(disp_value) || is_baseless) {
- // Need 32 bits of displacement, mode 2 or mode 1 with register rbp/r13.
- buf_[0] = (modrm & 0x3f) | (is_baseless ? 0x00 : 0x80);
- len_ = disp_offset + 4;
- Memory::int32_at(&buf_[disp_offset]) = disp_value;
- } else if (disp_value != 0 || (base_reg == 0x05)) {
- // Need 8 bits of displacement.
- buf_[0] = (modrm & 0x3f) | 0x40; // Mode 1.
- len_ = disp_offset + 1;
- buf_[disp_offset] = static_cast<byte>(disp_value);
- } else {
- // Need no displacement.
- buf_[0] = (modrm & 0x3f); // Mode 0.
- len_ = disp_offset;
- }
- if (has_sib) {
- buf_[1] = operand.buf_[1];
- }
-}
-
-
-bool Operand::AddressUsesRegister(Register reg) const {
- int code = reg.code();
- ASSERT((buf_[0] & 0xC0) != 0xC0); // Always a memory operand.
- // Start with only low three bits of base register. Initial decoding doesn't
- // distinguish on the REX.B bit.
- int base_code = buf_[0] & 0x07;
- if (base_code == rsp.code()) {
- // SIB byte present in buf_[1].
- // Check the index register from the SIB byte + REX.X prefix.
- int index_code = ((buf_[1] >> 3) & 0x07) | ((rex_ & 0x02) << 2);
- // Index code (including REX.X) of 0x04 (rsp) means no index register.
- if (index_code != rsp.code() && index_code == code) return true;
- // Add REX.B to get the full base register code.
- base_code = (buf_[1] & 0x07) | ((rex_ & 0x01) << 3);
- // A base register of 0x05 (rbp) with mod = 0 means no base register.
- if (base_code == rbp.code() && ((buf_[0] & 0xC0) == 0)) return false;
- return code == base_code;
- } else {
- // A base register with low bits of 0x05 (rbp or r13) and mod = 0 means
- // no base register.
- if (base_code == rbp.code() && ((buf_[0] & 0xC0) == 0)) return false;
- base_code |= ((rex_ & 0x01) << 3);
- return code == base_code;
- }
-}
-
-
-// -----------------------------------------------------------------------------
-// Implementation of Assembler.
-
-#ifdef GENERATED_CODE_COVERAGE
-static void InitCoverageLog();
-#endif
-
-Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size)
- : AssemblerBase(arg_isolate),
- code_targets_(100),
- positions_recorder_(this),
- emit_debug_code_(FLAG_debug_code) {
- if (buffer == NULL) {
- // Do our own buffer management.
- if (buffer_size <= kMinimalBufferSize) {
- buffer_size = kMinimalBufferSize;
-
- if (isolate() != NULL && isolate()->assembler_spare_buffer() != NULL) {
- buffer = isolate()->assembler_spare_buffer();
- isolate()->set_assembler_spare_buffer(NULL);
- }
- }
- if (buffer == NULL) {
- buffer_ = NewArray<byte>(buffer_size);
- } else {
- buffer_ = static_cast<byte*>(buffer);
- }
- buffer_size_ = buffer_size;
- own_buffer_ = true;
- } else {
- // Use externally provided buffer instead.
- ASSERT(buffer_size > 0);
- buffer_ = static_cast<byte*>(buffer);
- buffer_size_ = buffer_size;
- own_buffer_ = false;
- }
-
- // Clear the buffer in debug mode unless it was provided by the
- // caller in which case we can't be sure it's okay to overwrite
- // existing code in it.
-#ifdef DEBUG
- if (own_buffer_) {
- memset(buffer_, 0xCC, buffer_size); // int3
- }
-#endif
-
- // Setup buffer pointers.
- ASSERT(buffer_ != NULL);
- pc_ = buffer_;
- reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
-
- last_pc_ = NULL;
-
-#ifdef GENERATED_CODE_COVERAGE
- InitCoverageLog();
-#endif
-}
-
-
-Assembler::~Assembler() {
- if (own_buffer_) {
- if (isolate() != NULL &&
- isolate()->assembler_spare_buffer() == NULL &&
- buffer_size_ == kMinimalBufferSize) {
- isolate()->set_assembler_spare_buffer(buffer_);
- } else {
- DeleteArray(buffer_);
- }
- }
-}
-
-
-void Assembler::GetCode(CodeDesc* desc) {
- // Finalize code (at this point overflow() may be true, but the gap ensures
- // that we are still not overlapping instructions and relocation info).
- ASSERT(pc_ <= reloc_info_writer.pos()); // No overlap.
- // Setup code descriptor.
- desc->buffer = buffer_;
- desc->buffer_size = buffer_size_;
- desc->instr_size = pc_offset();
- ASSERT(desc->instr_size > 0); // Zero-size code objects upset the system.
- desc->reloc_size =
- static_cast<int>((buffer_ + buffer_size_) - reloc_info_writer.pos());
- desc->origin = this;
-}
-
-
-void Assembler::Align(int m) {
- ASSERT(IsPowerOf2(m));
- int delta = (m - (pc_offset() & (m - 1))) & (m - 1);
- while (delta >= 9) {
- nop(9);
- delta -= 9;
- }
- if (delta > 0) {
- nop(delta);
- }
-}
-
-
-void Assembler::CodeTargetAlign() {
- Align(16); // Preferred alignment of jump targets on x64.
-}
-
-
-void Assembler::bind_to(Label* L, int pos) {
- ASSERT(!L->is_bound()); // Label may only be bound once.
- last_pc_ = NULL;
- ASSERT(0 <= pos && pos <= pc_offset()); // Position must be valid.
- if (L->is_linked()) {
- int current = L->pos();
- int next = long_at(current);
- while (next != current) {
- // Relative address, relative to point after address.
- int imm32 = pos - (current + sizeof(int32_t));
- long_at_put(current, imm32);
- current = next;
- next = long_at(next);
- }
- // Fix up last fixup on linked list.
- int last_imm32 = pos - (current + sizeof(int32_t));
- long_at_put(current, last_imm32);
- }
- L->bind_to(pos);
-}
-
-
-void Assembler::bind(Label* L) {
- bind_to(L, pc_offset());
-}
-
-
-void Assembler::bind(NearLabel* L) {
- ASSERT(!L->is_bound());
- last_pc_ = NULL;
- while (L->unresolved_branches_ > 0) {
- int branch_pos = L->unresolved_positions_[L->unresolved_branches_ - 1];
- int disp = pc_offset() - branch_pos;
- ASSERT(is_int8(disp));
- set_byte_at(branch_pos - sizeof(int8_t), disp);
- L->unresolved_branches_--;
- }
- L->bind_to(pc_offset());
-}
-
-
-void Assembler::GrowBuffer() {
- ASSERT(buffer_overflow());
- if (!own_buffer_) FATAL("external code buffer is too small");
-
- // Compute new buffer size.
- CodeDesc desc; // the new buffer
- if (buffer_size_ < 4*KB) {
- desc.buffer_size = 4*KB;
- } else {
- desc.buffer_size = 2*buffer_size_;
- }
- // Some internal data structures overflow for very large buffers,
- // they must ensure that kMaximalBufferSize is not too large.
- if ((desc.buffer_size > kMaximalBufferSize) ||
- (desc.buffer_size > HEAP->MaxOldGenerationSize())) {
- V8::FatalProcessOutOfMemory("Assembler::GrowBuffer");
- }
-
- // Setup new buffer.
- desc.buffer = NewArray<byte>(desc.buffer_size);
- desc.instr_size = pc_offset();
- desc.reloc_size =
- static_cast<int>((buffer_ + buffer_size_) - (reloc_info_writer.pos()));
-
- // Clear the buffer in debug mode. Use 'int3' instructions to make
- // sure to get into problems if we ever run uninitialized code.
-#ifdef DEBUG
- memset(desc.buffer, 0xCC, desc.buffer_size);
-#endif
-
- // Copy the data.
- intptr_t pc_delta = desc.buffer - buffer_;
- intptr_t rc_delta = (desc.buffer + desc.buffer_size) -
- (buffer_ + buffer_size_);
- memmove(desc.buffer, buffer_, desc.instr_size);
- memmove(rc_delta + reloc_info_writer.pos(),
- reloc_info_writer.pos(), desc.reloc_size);
-
- // Switch buffers.
- if (isolate() != NULL &&
- isolate()->assembler_spare_buffer() == NULL &&
- buffer_size_ == kMinimalBufferSize) {
- isolate()->set_assembler_spare_buffer(buffer_);
- } else {
- DeleteArray(buffer_);
- }
- buffer_ = desc.buffer;
- buffer_size_ = desc.buffer_size;
- pc_ += pc_delta;
- if (last_pc_ != NULL) {
- last_pc_ += pc_delta;
- }
- reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
- reloc_info_writer.last_pc() + pc_delta);
-
- // Relocate runtime entries.
- for (RelocIterator it(desc); !it.done(); it.next()) {
- RelocInfo::Mode rmode = it.rinfo()->rmode();
- if (rmode == RelocInfo::INTERNAL_REFERENCE) {
- intptr_t* p = reinterpret_cast<intptr_t*>(it.rinfo()->pc());
- if (*p != 0) { // 0 means uninitialized.
- *p += pc_delta;
- }
- }
- }
-
- ASSERT(!buffer_overflow());
-}
-
-
-void Assembler::emit_operand(int code, const Operand& adr) {
- ASSERT(is_uint3(code));
- const unsigned length = adr.len_;
- ASSERT(length > 0);
-
- // Emit updated ModR/M byte containing the given register.
- ASSERT((adr.buf_[0] & 0x38) == 0);
- pc_[0] = adr.buf_[0] | code << 3;
-
- // Emit the rest of the encoded operand.
- for (unsigned i = 1; i < length; i++) pc_[i] = adr.buf_[i];
- pc_ += length;
-}
-
-
-// Assembler Instruction implementations.
-
-void Assembler::arithmetic_op(byte opcode, Register reg, const Operand& op) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_rex_64(reg, op);
- emit(opcode);
- emit_operand(reg, op);
-}
-
-
-void Assembler::arithmetic_op(byte opcode, Register reg, Register rm_reg) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- ASSERT((opcode & 0xC6) == 2);
- if (rm_reg.low_bits() == 4) { // Forces SIB byte.
- // Swap reg and rm_reg and change opcode operand order.
- emit_rex_64(rm_reg, reg);
- emit(opcode ^ 0x02);
- emit_modrm(rm_reg, reg);
- } else {
- emit_rex_64(reg, rm_reg);
- emit(opcode);
- emit_modrm(reg, rm_reg);
- }
-}
-
-
-void Assembler::arithmetic_op_16(byte opcode, Register reg, Register rm_reg) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- ASSERT((opcode & 0xC6) == 2);
- if (rm_reg.low_bits() == 4) { // Forces SIB byte.
- // Swap reg and rm_reg and change opcode operand order.
- emit(0x66);
- emit_optional_rex_32(rm_reg, reg);
- emit(opcode ^ 0x02);
- emit_modrm(rm_reg, reg);
- } else {
- emit(0x66);
- emit_optional_rex_32(reg, rm_reg);
- emit(opcode);
- emit_modrm(reg, rm_reg);
- }
-}
-
-
-void Assembler::arithmetic_op_16(byte opcode,
- Register reg,
- const Operand& rm_reg) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit(0x66);
- emit_optional_rex_32(reg, rm_reg);
- emit(opcode);
- emit_operand(reg, rm_reg);
-}
-
-
-void Assembler::arithmetic_op_32(byte opcode, Register reg, Register rm_reg) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- ASSERT((opcode & 0xC6) == 2);
- if (rm_reg.low_bits() == 4) { // Forces SIB byte.
- // Swap reg and rm_reg and change opcode operand order.
- emit_optional_rex_32(rm_reg, reg);
- emit(opcode ^ 0x02); // E.g. 0x03 -> 0x01 for ADD.
- emit_modrm(rm_reg, reg);
- } else {
- emit_optional_rex_32(reg, rm_reg);
- emit(opcode);
- emit_modrm(reg, rm_reg);
- }
-}
-
-
-void Assembler::arithmetic_op_32(byte opcode,
- Register reg,
- const Operand& rm_reg) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_optional_rex_32(reg, rm_reg);
- emit(opcode);
- emit_operand(reg, rm_reg);
-}
-
-
-void Assembler::immediate_arithmetic_op(byte subcode,
- Register dst,
- Immediate src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_rex_64(dst);
- if (is_int8(src.value_)) {
- emit(0x83);
- emit_modrm(subcode, dst);
- emit(src.value_);
- } else if (dst.is(rax)) {
- emit(0x05 | (subcode << 3));
- emitl(src.value_);
- } else {
- emit(0x81);
- emit_modrm(subcode, dst);
- emitl(src.value_);
- }
-}
-
-void Assembler::immediate_arithmetic_op(byte subcode,
- const Operand& dst,
- Immediate src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_rex_64(dst);
- if (is_int8(src.value_)) {
- emit(0x83);
- emit_operand(subcode, dst);
- emit(src.value_);
- } else {
- emit(0x81);
- emit_operand(subcode, dst);
- emitl(src.value_);
- }
-}
-
-
-void Assembler::immediate_arithmetic_op_16(byte subcode,
- Register dst,
- Immediate src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit(0x66); // Operand size override prefix.
- emit_optional_rex_32(dst);
- if (is_int8(src.value_)) {
- emit(0x83);
- emit_modrm(subcode, dst);
- emit(src.value_);
- } else if (dst.is(rax)) {
- emit(0x05 | (subcode << 3));
- emitw(src.value_);
- } else {
- emit(0x81);
- emit_modrm(subcode, dst);
- emitw(src.value_);
- }
-}
-
-
-void Assembler::immediate_arithmetic_op_16(byte subcode,
- const Operand& dst,
- Immediate src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit(0x66); // Operand size override prefix.
- emit_optional_rex_32(dst);
- if (is_int8(src.value_)) {
- emit(0x83);
- emit_operand(subcode, dst);
- emit(src.value_);
- } else {
- emit(0x81);
- emit_operand(subcode, dst);
- emitw(src.value_);
- }
-}
-
-
-void Assembler::immediate_arithmetic_op_32(byte subcode,
- Register dst,
- Immediate src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_optional_rex_32(dst);
- if (is_int8(src.value_)) {
- emit(0x83);
- emit_modrm(subcode, dst);
- emit(src.value_);
- } else if (dst.is(rax)) {
- emit(0x05 | (subcode << 3));
- emitl(src.value_);
- } else {
- emit(0x81);
- emit_modrm(subcode, dst);
- emitl(src.value_);
- }
-}
-
-
-void Assembler::immediate_arithmetic_op_32(byte subcode,
- const Operand& dst,
- Immediate src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_optional_rex_32(dst);
- if (is_int8(src.value_)) {
- emit(0x83);
- emit_operand(subcode, dst);
- emit(src.value_);
- } else {
- emit(0x81);
- emit_operand(subcode, dst);
- emitl(src.value_);
- }
-}
-
-
-void Assembler::immediate_arithmetic_op_8(byte subcode,
- const Operand& dst,
- Immediate src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_optional_rex_32(dst);
- ASSERT(is_int8(src.value_) || is_uint8(src.value_));
- emit(0x80);
- emit_operand(subcode, dst);
- emit(src.value_);
-}
-
-
-void Assembler::immediate_arithmetic_op_8(byte subcode,
- Register dst,
- Immediate src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- if (dst.code() > 3) {
- // Use 64-bit mode byte registers.
- emit_rex_64(dst);
- }
- ASSERT(is_int8(src.value_) || is_uint8(src.value_));
- emit(0x80);
- emit_modrm(subcode, dst);
- emit(src.value_);
-}
-
-
-void Assembler::shift(Register dst, Immediate shift_amount, int subcode) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- ASSERT(is_uint6(shift_amount.value_)); // illegal shift count
- if (shift_amount.value_ == 1) {
- emit_rex_64(dst);
- emit(0xD1);
- emit_modrm(subcode, dst);
- } else {
- emit_rex_64(dst);
- emit(0xC1);
- emit_modrm(subcode, dst);
- emit(shift_amount.value_);
- }
-}
-
-
-void Assembler::shift(Register dst, int subcode) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_rex_64(dst);
- emit(0xD3);
- emit_modrm(subcode, dst);
-}
-
-
-void Assembler::shift_32(Register dst, int subcode) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_optional_rex_32(dst);
- emit(0xD3);
- emit_modrm(subcode, dst);
-}
-
-
-void Assembler::shift_32(Register dst, Immediate shift_amount, int subcode) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- ASSERT(is_uint5(shift_amount.value_)); // illegal shift count
- if (shift_amount.value_ == 1) {
- emit_optional_rex_32(dst);
- emit(0xD1);
- emit_modrm(subcode, dst);
- } else {
- emit_optional_rex_32(dst);
- emit(0xC1);
- emit_modrm(subcode, dst);
- emit(shift_amount.value_);
- }
-}
-
-
-void Assembler::bt(const Operand& dst, Register src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_rex_64(src, dst);
- emit(0x0F);
- emit(0xA3);
- emit_operand(src, dst);
-}
-
-
-void Assembler::bts(const Operand& dst, Register src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_rex_64(src, dst);
- emit(0x0F);
- emit(0xAB);
- emit_operand(src, dst);
-}
-
-
-void Assembler::call(Label* L) {
- positions_recorder()->WriteRecordedPositions();
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- // 1110 1000 #32-bit disp.
- emit(0xE8);
- if (L->is_bound()) {
- int offset = L->pos() - pc_offset() - sizeof(int32_t);
- ASSERT(offset <= 0);
- emitl(offset);
- } else if (L->is_linked()) {
- emitl(L->pos());
- L->link_to(pc_offset() - sizeof(int32_t));
- } else {
- ASSERT(L->is_unused());
- int32_t current = pc_offset();
- emitl(current);
- L->link_to(current);
- }
-}
-
-
-void Assembler::call(Handle<Code> target, RelocInfo::Mode rmode) {
- positions_recorder()->WriteRecordedPositions();
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- // 1110 1000 #32-bit disp.
- emit(0xE8);
- emit_code_target(target, rmode);
-}
-
-
-void Assembler::call(Register adr) {
- positions_recorder()->WriteRecordedPositions();
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- // Opcode: FF /2 r64.
- emit_optional_rex_32(adr);
- emit(0xFF);
- emit_modrm(0x2, adr);
-}
-
-
-void Assembler::call(const Operand& op) {
- positions_recorder()->WriteRecordedPositions();
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- // Opcode: FF /2 m64.
- emit_optional_rex_32(op);
- emit(0xFF);
- emit_operand(0x2, op);
-}
-
-
-// Calls directly to the given address using a relative offset.
-// Should only ever be used in Code objects for calls within the
-// same Code object. Should not be used when generating new code (use labels),
-// but only when patching existing code.
-void Assembler::call(Address target) {
- positions_recorder()->WriteRecordedPositions();
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- // 1110 1000 #32-bit disp.
- emit(0xE8);
- Address source = pc_ + 4;
- intptr_t displacement = target - source;
- ASSERT(is_int32(displacement));
- emitl(static_cast<int32_t>(displacement));
-}
-
-
-void Assembler::clc() {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit(0xF8);
-}
-
-void Assembler::cld() {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit(0xFC);
-}
-
-void Assembler::cdq() {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit(0x99);
-}
-
-
-void Assembler::cmovq(Condition cc, Register dst, Register src) {
- if (cc == always) {
- movq(dst, src);
- } else if (cc == never) {
- return;
- }
- // No need to check CpuInfo for CMOV support, it's a required part of the
- // 64-bit architecture.
- ASSERT(cc >= 0); // Use mov for unconditional moves.
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- // Opcode: REX.W 0f 40 + cc /r.
- emit_rex_64(dst, src);
- emit(0x0f);
- emit(0x40 + cc);
- emit_modrm(dst, src);
-}
-
-
-void Assembler::cmovq(Condition cc, Register dst, const Operand& src) {
- if (cc == always) {
- movq(dst, src);
- } else if (cc == never) {
- return;
- }
- ASSERT(cc >= 0);
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- // Opcode: REX.W 0f 40 + cc /r.
- emit_rex_64(dst, src);
- emit(0x0f);
- emit(0x40 + cc);
- emit_operand(dst, src);
-}
-
-
-void Assembler::cmovl(Condition cc, Register dst, Register src) {
- if (cc == always) {
- movl(dst, src);
- } else if (cc == never) {
- return;
- }
- ASSERT(cc >= 0);
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- // Opcode: 0f 40 + cc /r.
- emit_optional_rex_32(dst, src);
- emit(0x0f);
- emit(0x40 + cc);
- emit_modrm(dst, src);
-}
-
-
-void Assembler::cmovl(Condition cc, Register dst, const Operand& src) {
- if (cc == always) {
- movl(dst, src);
- } else if (cc == never) {
- return;
- }
- ASSERT(cc >= 0);
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- // Opcode: 0f 40 + cc /r.
- emit_optional_rex_32(dst, src);
- emit(0x0f);
- emit(0x40 + cc);
- emit_operand(dst, src);
-}
-
-
-void Assembler::cmpb_al(Immediate imm8) {
- ASSERT(is_int8(imm8.value_) || is_uint8(imm8.value_));
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit(0x3c);
- emit(imm8.value_);
-}
-
-
-void Assembler::cpuid() {
- ASSERT(CpuFeatures::IsEnabled(CPUID));
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit(0x0F);
- emit(0xA2);
-}
-
-
-void Assembler::cqo() {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_rex_64();
- emit(0x99);
-}
-
-
-void Assembler::decq(Register dst) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_rex_64(dst);
- emit(0xFF);
- emit_modrm(0x1, dst);
-}
-
-
-void Assembler::decq(const Operand& dst) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_rex_64(dst);
- emit(0xFF);
- emit_operand(1, dst);
-}
-
-
-void Assembler::decl(Register dst) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_optional_rex_32(dst);
- emit(0xFF);
- emit_modrm(0x1, dst);
-}
-
-
-void Assembler::decl(const Operand& dst) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_optional_rex_32(dst);
- emit(0xFF);
- emit_operand(1, dst);
-}
-
-
-void Assembler::decb(Register dst) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- if (dst.code() > 3) {
- // Register is not one of al, bl, cl, dl. Its encoding needs REX.
- emit_rex_32(dst);
- }
- emit(0xFE);
- emit_modrm(0x1, dst);
-}
-
-
-void Assembler::decb(const Operand& dst) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_optional_rex_32(dst);
- emit(0xFE);
- emit_operand(1, dst);
-}
-
-
-void Assembler::enter(Immediate size) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit(0xC8);
- emitw(size.value_); // 16 bit operand, always.
- emit(0);
-}
-
-
-void Assembler::hlt() {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit(0xF4);
-}
-
-
-void Assembler::idivq(Register src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_rex_64(src);
- emit(0xF7);
- emit_modrm(0x7, src);
-}
-
-
-void Assembler::idivl(Register src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_optional_rex_32(src);
- emit(0xF7);
- emit_modrm(0x7, src);
-}
-
-
-void Assembler::imul(Register src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_rex_64(src);
- emit(0xF7);
- emit_modrm(0x5, src);
-}
-
-
-void Assembler::imul(Register dst, Register src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_rex_64(dst, src);
- emit(0x0F);
- emit(0xAF);
- emit_modrm(dst, src);
-}
-
-
-void Assembler::imul(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_rex_64(dst, src);
- emit(0x0F);
- emit(0xAF);
- emit_operand(dst, src);
-}
-
-
-void Assembler::imul(Register dst, Register src, Immediate imm) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_rex_64(dst, src);
- if (is_int8(imm.value_)) {
- emit(0x6B);
- emit_modrm(dst, src);
- emit(imm.value_);
- } else {
- emit(0x69);
- emit_modrm(dst, src);
- emitl(imm.value_);
- }
-}
-
-
-void Assembler::imull(Register dst, Register src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0xAF);
- emit_modrm(dst, src);
-}
-
-
-void Assembler::imull(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0xAF);
- emit_operand(dst, src);
-}
-
-
-void Assembler::imull(Register dst, Register src, Immediate imm) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_optional_rex_32(dst, src);
- if (is_int8(imm.value_)) {
- emit(0x6B);
- emit_modrm(dst, src);
- emit(imm.value_);
- } else {
- emit(0x69);
- emit_modrm(dst, src);
- emitl(imm.value_);
- }
-}
-
-
-void Assembler::incq(Register dst) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_rex_64(dst);
- emit(0xFF);
- emit_modrm(0x0, dst);
-}
-
-
-void Assembler::incq(const Operand& dst) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_rex_64(dst);
- emit(0xFF);
- emit_operand(0, dst);
-}
-
-
-void Assembler::incl(const Operand& dst) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_optional_rex_32(dst);
- emit(0xFF);
- emit_operand(0, dst);
-}
-
-
-void Assembler::incl(Register dst) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_optional_rex_32(dst);
- emit(0xFF);
- emit_modrm(0, dst);
-}
-
-
-void Assembler::int3() {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit(0xCC);
-}
-
-
-void Assembler::j(Condition cc, Label* L) {
- if (cc == always) {
- jmp(L);
- return;
- } else if (cc == never) {
- return;
- }
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- ASSERT(is_uint4(cc));
- if (L->is_bound()) {
- const int short_size = 2;
- const int long_size = 6;
- int offs = L->pos() - pc_offset();
- ASSERT(offs <= 0);
- if (is_int8(offs - short_size)) {
- // 0111 tttn #8-bit disp.
- emit(0x70 | cc);
- emit((offs - short_size) & 0xFF);
- } else {
- // 0000 1111 1000 tttn #32-bit disp.
- emit(0x0F);
- emit(0x80 | cc);
- emitl(offs - long_size);
- }
- } else if (L->is_linked()) {
- // 0000 1111 1000 tttn #32-bit disp.
- emit(0x0F);
- emit(0x80 | cc);
- emitl(L->pos());
- L->link_to(pc_offset() - sizeof(int32_t));
- } else {
- ASSERT(L->is_unused());
- emit(0x0F);
- emit(0x80 | cc);
- int32_t current = pc_offset();
- emitl(current);
- L->link_to(current);
- }
-}
-
-
-void Assembler::j(Condition cc,
- Handle<Code> target,
- RelocInfo::Mode rmode) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- ASSERT(is_uint4(cc));
- // 0000 1111 1000 tttn #32-bit disp.
- emit(0x0F);
- emit(0x80 | cc);
- emit_code_target(target, rmode);
-}
-
-
-void Assembler::j(Condition cc, NearLabel* L, Hint hint) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- ASSERT(0 <= cc && cc < 16);
- if (FLAG_emit_branch_hints && hint != no_hint) emit(hint);
- if (L->is_bound()) {
- const int short_size = 2;
- int offs = L->pos() - pc_offset();
- ASSERT(offs <= 0);
- ASSERT(is_int8(offs - short_size));
- // 0111 tttn #8-bit disp
- emit(0x70 | cc);
- emit((offs - short_size) & 0xFF);
- } else {
- emit(0x70 | cc);
- emit(0x00); // The displacement will be resolved later.
- L->link_to(pc_offset());
- }
-}
-
-
-void Assembler::jmp(Label* L) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- const int short_size = sizeof(int8_t);
- const int long_size = sizeof(int32_t);
- if (L->is_bound()) {
- int offs = L->pos() - pc_offset() - 1;
- ASSERT(offs <= 0);
- if (is_int8(offs - short_size)) {
- // 1110 1011 #8-bit disp.
- emit(0xEB);
- emit((offs - short_size) & 0xFF);
- } else {
- // 1110 1001 #32-bit disp.
- emit(0xE9);
- emitl(offs - long_size);
- }
- } else if (L->is_linked()) {
- // 1110 1001 #32-bit disp.
- emit(0xE9);
- emitl(L->pos());
- L->link_to(pc_offset() - long_size);
- } else {
- // 1110 1001 #32-bit disp.
- ASSERT(L->is_unused());
- emit(0xE9);
- int32_t current = pc_offset();
- emitl(current);
- L->link_to(current);
- }
-}
-
-
-void Assembler::jmp(Handle<Code> target, RelocInfo::Mode rmode) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- // 1110 1001 #32-bit disp.
- emit(0xE9);
- emit_code_target(target, rmode);
-}
-
-
-void Assembler::jmp(NearLabel* L) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- if (L->is_bound()) {
- const int short_size = sizeof(int8_t);
- int offs = L->pos() - pc_offset();
- ASSERT(offs <= 0);
- ASSERT(is_int8(offs - short_size));
- // 1110 1011 #8-bit disp.
- emit(0xEB);
- emit((offs - short_size) & 0xFF);
- } else {
- emit(0xEB);
- emit(0x00); // The displacement will be resolved later.
- L->link_to(pc_offset());
- }
-}
-
-
-void Assembler::jmp(Register target) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- // Opcode FF/4 r64.
- emit_optional_rex_32(target);
- emit(0xFF);
- emit_modrm(0x4, target);
-}
-
-
-void Assembler::jmp(const Operand& src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- // Opcode FF/4 m64.
- emit_optional_rex_32(src);
- emit(0xFF);
- emit_operand(0x4, src);
-}
-
-
-void Assembler::lea(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_rex_64(dst, src);
- emit(0x8D);
- emit_operand(dst, src);
-}
-
-
-void Assembler::leal(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_optional_rex_32(dst, src);
- emit(0x8D);
- emit_operand(dst, src);
-}
-
-
-void Assembler::load_rax(void* value, RelocInfo::Mode mode) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit(0x48); // REX.W
- emit(0xA1);
- emitq(reinterpret_cast<uintptr_t>(value), mode);
-}
-
-
-void Assembler::load_rax(ExternalReference ref) {
- load_rax(ref.address(), RelocInfo::EXTERNAL_REFERENCE);
-}
-
-
-void Assembler::leave() {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit(0xC9);
-}
-
-
-void Assembler::movb(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_rex_32(dst, src);
- emit(0x8A);
- emit_operand(dst, src);
-}
-
-
-void Assembler::movb(Register dst, Immediate imm) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_rex_32(dst);
- emit(0xC6);
- emit_modrm(0x0, dst);
- emit(imm.value_);
-}
-
-
-void Assembler::movb(const Operand& dst, Register src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_rex_32(src, dst);
- emit(0x88);
- emit_operand(src, dst);
-}
-
-
-void Assembler::movw(const Operand& dst, Register src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit(0x66);
- emit_optional_rex_32(src, dst);
- emit(0x89);
- emit_operand(src, dst);
-}
-
-
-void Assembler::movl(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_optional_rex_32(dst, src);
- emit(0x8B);
- emit_operand(dst, src);
-}
-
-
-void Assembler::movl(Register dst, Register src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- if (src.low_bits() == 4) {
- emit_optional_rex_32(src, dst);
- emit(0x89);
- emit_modrm(src, dst);
- } else {
- emit_optional_rex_32(dst, src);
- emit(0x8B);
- emit_modrm(dst, src);
- }
-}
-
-
-void Assembler::movl(const Operand& dst, Register src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_optional_rex_32(src, dst);
- emit(0x89);
- emit_operand(src, dst);
-}
-
-
-void Assembler::movl(const Operand& dst, Immediate value) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_optional_rex_32(dst);
- emit(0xC7);
- emit_operand(0x0, dst);
- emit(value); // Only 32-bit immediates are possible, not 8-bit immediates.
-}
-
-
-void Assembler::movl(Register dst, Immediate value) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_optional_rex_32(dst);
- emit(0xC7);
- emit_modrm(0x0, dst);
- emit(value); // Only 32-bit immediates are possible, not 8-bit immediates.
-}
-
-
-void Assembler::movq(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_rex_64(dst, src);
- emit(0x8B);
- emit_operand(dst, src);
-}
-
-
-void Assembler::movq(Register dst, Register src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- if (src.low_bits() == 4) {
- emit_rex_64(src, dst);
- emit(0x89);
- emit_modrm(src, dst);
- } else {
- emit_rex_64(dst, src);
- emit(0x8B);
- emit_modrm(dst, src);
- }
-}
-
-
-void Assembler::movq(Register dst, Immediate value) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_rex_64(dst);
- emit(0xC7);
- emit_modrm(0x0, dst);
- emit(value); // Only 32-bit immediates are possible, not 8-bit immediates.
-}
-
-
-void Assembler::movq(const Operand& dst, Register src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_rex_64(src, dst);
- emit(0x89);
- emit_operand(src, dst);
-}
-
-
-void Assembler::movq(Register dst, void* value, RelocInfo::Mode rmode) {
- // This method must not be used with heap object references. The stored
- // address is not GC safe. Use the handle version instead.
- ASSERT(rmode > RelocInfo::LAST_GCED_ENUM);
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_rex_64(dst);
- emit(0xB8 | dst.low_bits());
- emitq(reinterpret_cast<uintptr_t>(value), rmode);
-}
-
-
-void Assembler::movq(Register dst, int64_t value, RelocInfo::Mode rmode) {
- // Non-relocatable values might not need a 64-bit representation.
- if (rmode == RelocInfo::NONE) {
- // Sadly, there is no zero or sign extending move for 8-bit immediates.
- if (is_int32(value)) {
- movq(dst, Immediate(static_cast<int32_t>(value)));
- return;
- } else if (is_uint32(value)) {
- movl(dst, Immediate(static_cast<int32_t>(value)));
- return;
- }
- // Value cannot be represented by 32 bits, so do a full 64 bit immediate
- // value.
- }
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_rex_64(dst);
- emit(0xB8 | dst.low_bits());
- emitq(value, rmode);
-}
-
-
-void Assembler::movq(Register dst, ExternalReference ref) {
- int64_t value = reinterpret_cast<int64_t>(ref.address());
- movq(dst, value, RelocInfo::EXTERNAL_REFERENCE);
-}
-
-
-void Assembler::movq(const Operand& dst, Immediate value) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_rex_64(dst);
- emit(0xC7);
- emit_operand(0, dst);
- emit(value);
-}
-
-
-// Loads the ip-relative location of the src label into the target location
-// (as a 32-bit offset sign extended to 64-bit).
-void Assembler::movl(const Operand& dst, Label* src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_optional_rex_32(dst);
- emit(0xC7);
- emit_operand(0, dst);
- if (src->is_bound()) {
- int offset = src->pos() - pc_offset() - sizeof(int32_t);
- ASSERT(offset <= 0);
- emitl(offset);
- } else if (src->is_linked()) {
- emitl(src->pos());
- src->link_to(pc_offset() - sizeof(int32_t));
- } else {
- ASSERT(src->is_unused());
- int32_t current = pc_offset();
- emitl(current);
- src->link_to(current);
- }
-}
-
-
-void Assembler::movq(Register dst, Handle<Object> value, RelocInfo::Mode mode) {
- // If there is no relocation info, emit the value of the handle efficiently
- // (possibly using less that 8 bytes for the value).
- if (mode == RelocInfo::NONE) {
- // There is no possible reason to store a heap pointer without relocation
- // info, so it must be a smi.
- ASSERT(value->IsSmi());
- movq(dst, reinterpret_cast<int64_t>(*value), RelocInfo::NONE);
- } else {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- ASSERT(value->IsHeapObject());
- ASSERT(!HEAP->InNewSpace(*value));
- emit_rex_64(dst);
- emit(0xB8 | dst.low_bits());
- emitq(reinterpret_cast<uintptr_t>(value.location()), mode);
- }
-}
-
-
-void Assembler::movsxbq(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_rex_64(dst, src);
- emit(0x0F);
- emit(0xBE);
- emit_operand(dst, src);
-}
-
-
-void Assembler::movsxwq(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_rex_64(dst, src);
- emit(0x0F);
- emit(0xBF);
- emit_operand(dst, src);
-}
-
-
-void Assembler::movsxlq(Register dst, Register src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_rex_64(dst, src);
- emit(0x63);
- emit_modrm(dst, src);
-}
-
-
-void Assembler::movsxlq(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_rex_64(dst, src);
- emit(0x63);
- emit_operand(dst, src);
-}
-
-
-void Assembler::movzxbq(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0xB6);
- emit_operand(dst, src);
-}
-
-
-void Assembler::movzxbl(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0xB6);
- emit_operand(dst, src);
-}
-
-
-void Assembler::movzxwq(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0xB7);
- emit_operand(dst, src);
-}
-
-
-void Assembler::movzxwl(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0xB7);
- emit_operand(dst, src);
-}
-
-
-void Assembler::repmovsb() {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit(0xF3);
- emit(0xA4);
-}
-
-
-void Assembler::repmovsw() {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit(0x66); // Operand size override.
- emit(0xF3);
- emit(0xA4);
-}
-
-
-void Assembler::repmovsl() {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit(0xF3);
- emit(0xA5);
-}
-
-
-void Assembler::repmovsq() {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit(0xF3);
- emit_rex_64();
- emit(0xA5);
-}
-
-
-void Assembler::mul(Register src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_rex_64(src);
- emit(0xF7);
- emit_modrm(0x4, src);
-}
-
-
-void Assembler::neg(Register dst) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_rex_64(dst);
- emit(0xF7);
- emit_modrm(0x3, dst);
-}
-
-
-void Assembler::negl(Register dst) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_optional_rex_32(dst);
- emit(0xF7);
- emit_modrm(0x3, dst);
-}
-
-
-void Assembler::neg(const Operand& dst) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_rex_64(dst);
- emit(0xF7);
- emit_operand(3, dst);
-}
-
-
-void Assembler::nop() {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit(0x90);
-}
-
-
-void Assembler::not_(Register dst) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_rex_64(dst);
- emit(0xF7);
- emit_modrm(0x2, dst);
-}
-
-
-void Assembler::not_(const Operand& dst) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_rex_64(dst);
- emit(0xF7);
- emit_operand(2, dst);
-}
-
-
-void Assembler::notl(Register dst) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_optional_rex_32(dst);
- emit(0xF7);
- emit_modrm(0x2, dst);
-}
-
-
-void Assembler::nop(int n) {
- // The recommended muti-byte sequences of NOP instructions from the Intel 64
- // and IA-32 Architectures Software Developer's Manual.
- //
- // Length Assembly Byte Sequence
- // 2 bytes 66 NOP 66 90H
- // 3 bytes NOP DWORD ptr [EAX] 0F 1F 00H
- // 4 bytes NOP DWORD ptr [EAX + 00H] 0F 1F 40 00H
- // 5 bytes NOP DWORD ptr [EAX + EAX*1 + 00H] 0F 1F 44 00 00H
- // 6 bytes 66 NOP DWORD ptr [EAX + EAX*1 + 00H] 66 0F 1F 44 00 00H
- // 7 bytes NOP DWORD ptr [EAX + 00000000H] 0F 1F 80 00 00 00 00H
- // 8 bytes NOP DWORD ptr [EAX + EAX*1 + 00000000H] 0F 1F 84 00 00 00 00 00H
- // 9 bytes 66 NOP DWORD ptr [EAX + EAX*1 + 66 0F 1F 84 00 00 00 00
- // 00000000H] 00H
-
- ASSERT(1 <= n);
- ASSERT(n <= 9);
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- switch (n) {
- case 1:
- emit(0x90);
- return;
- case 2:
- emit(0x66);
- emit(0x90);
- return;
- case 3:
- emit(0x0f);
- emit(0x1f);
- emit(0x00);
- return;
- case 4:
- emit(0x0f);
- emit(0x1f);
- emit(0x40);
- emit(0x00);
- return;
- case 5:
- emit(0x0f);
- emit(0x1f);
- emit(0x44);
- emit(0x00);
- emit(0x00);
- return;
- case 6:
- emit(0x66);
- emit(0x0f);
- emit(0x1f);
- emit(0x44);
- emit(0x00);
- emit(0x00);
- return;
- case 7:
- emit(0x0f);
- emit(0x1f);
- emit(0x80);
- emit(0x00);
- emit(0x00);
- emit(0x00);
- emit(0x00);
- return;
- case 8:
- emit(0x0f);
- emit(0x1f);
- emit(0x84);
- emit(0x00);
- emit(0x00);
- emit(0x00);
- emit(0x00);
- emit(0x00);
- return;
- case 9:
- emit(0x66);
- emit(0x0f);
- emit(0x1f);
- emit(0x84);
- emit(0x00);
- emit(0x00);
- emit(0x00);
- emit(0x00);
- emit(0x00);
- return;
- }
-}
-
-
-void Assembler::pop(Register dst) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_optional_rex_32(dst);
- emit(0x58 | dst.low_bits());
-}
-
-
-void Assembler::pop(const Operand& dst) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_optional_rex_32(dst);
- emit(0x8F);
- emit_operand(0, dst);
-}
-
-
-void Assembler::popfq() {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit(0x9D);
-}
-
-
-void Assembler::push(Register src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_optional_rex_32(src);
- emit(0x50 | src.low_bits());
-}
-
-
-void Assembler::push(const Operand& src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_optional_rex_32(src);
- emit(0xFF);
- emit_operand(6, src);
-}
-
-
-void Assembler::push(Immediate value) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- if (is_int8(value.value_)) {
- emit(0x6A);
- emit(value.value_); // Emit low byte of value.
- } else {
- emit(0x68);
- emitl(value.value_);
- }
-}
-
-
-void Assembler::push_imm32(int32_t imm32) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit(0x68);
- emitl(imm32);
-}
-
-
-void Assembler::pushfq() {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit(0x9C);
-}
-
-
-void Assembler::rdtsc() {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit(0x0F);
- emit(0x31);
-}
-
-
-void Assembler::ret(int imm16) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- ASSERT(is_uint16(imm16));
- if (imm16 == 0) {
- emit(0xC3);
- } else {
- emit(0xC2);
- emit(imm16 & 0xFF);
- emit((imm16 >> 8) & 0xFF);
- }
-}
-
-
-void Assembler::setcc(Condition cc, Register reg) {
- if (cc > last_condition) {
- movb(reg, Immediate(cc == always ? 1 : 0));
- return;
- }
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- ASSERT(is_uint4(cc));
- if (reg.code() > 3) { // Use x64 byte registers, where different.
- emit_rex_32(reg);
- }
- emit(0x0F);
- emit(0x90 | cc);
- emit_modrm(0x0, reg);
-}
-
-
-void Assembler::shld(Register dst, Register src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_rex_64(src, dst);
- emit(0x0F);
- emit(0xA5);
- emit_modrm(src, dst);
-}
-
-
-void Assembler::shrd(Register dst, Register src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_rex_64(src, dst);
- emit(0x0F);
- emit(0xAD);
- emit_modrm(src, dst);
-}
-
-
-void Assembler::xchg(Register dst, Register src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- if (src.is(rax) || dst.is(rax)) { // Single-byte encoding
- Register other = src.is(rax) ? dst : src;
- emit_rex_64(other);
- emit(0x90 | other.low_bits());
- } else if (dst.low_bits() == 4) {
- emit_rex_64(dst, src);
- emit(0x87);
- emit_modrm(dst, src);
- } else {
- emit_rex_64(src, dst);
- emit(0x87);
- emit_modrm(src, dst);
- }
-}
-
-
-void Assembler::store_rax(void* dst, RelocInfo::Mode mode) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit(0x48); // REX.W
- emit(0xA3);
- emitq(reinterpret_cast<uintptr_t>(dst), mode);
-}
-
-
-void Assembler::store_rax(ExternalReference ref) {
- store_rax(ref.address(), RelocInfo::EXTERNAL_REFERENCE);
-}
-
-
-void Assembler::testb(Register dst, Register src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- if (src.low_bits() == 4) {
- emit_rex_32(src, dst);
- emit(0x84);
- emit_modrm(src, dst);
- } else {
- if (dst.code() > 3 || src.code() > 3) {
- // Register is not one of al, bl, cl, dl. Its encoding needs REX.
- emit_rex_32(dst, src);
- }
- emit(0x84);
- emit_modrm(dst, src);
- }
-}
-
-
-void Assembler::testb(Register reg, Immediate mask) {
- ASSERT(is_int8(mask.value_) || is_uint8(mask.value_));
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- if (reg.is(rax)) {
- emit(0xA8);
- emit(mask.value_); // Low byte emitted.
- } else {
- if (reg.code() > 3) {
- // Register is not one of al, bl, cl, dl. Its encoding needs REX.
- emit_rex_32(reg);
- }
- emit(0xF6);
- emit_modrm(0x0, reg);
- emit(mask.value_); // Low byte emitted.
- }
-}
-
-
-void Assembler::testb(const Operand& op, Immediate mask) {
- ASSERT(is_int8(mask.value_) || is_uint8(mask.value_));
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_optional_rex_32(rax, op);
- emit(0xF6);
- emit_operand(rax, op); // Operation code 0
- emit(mask.value_); // Low byte emitted.
-}
-
-
-void Assembler::testb(const Operand& op, Register reg) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- if (reg.code() > 3) {
- // Register is not one of al, bl, cl, dl. Its encoding needs REX.
- emit_rex_32(reg, op);
- } else {
- emit_optional_rex_32(reg, op);
- }
- emit(0x84);
- emit_operand(reg, op);
-}
-
-
-void Assembler::testl(Register dst, Register src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- if (src.low_bits() == 4) {
- emit_optional_rex_32(src, dst);
- emit(0x85);
- emit_modrm(src, dst);
- } else {
- emit_optional_rex_32(dst, src);
- emit(0x85);
- emit_modrm(dst, src);
- }
-}
-
-
-void Assembler::testl(Register reg, Immediate mask) {
- // testl with a mask that fits in the low byte is exactly testb.
- if (is_uint8(mask.value_)) {
- testb(reg, mask);
- return;
- }
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- if (reg.is(rax)) {
- emit(0xA9);
- emit(mask);
- } else {
- emit_optional_rex_32(rax, reg);
- emit(0xF7);
- emit_modrm(0x0, reg);
- emit(mask);
- }
-}
-
-
-void Assembler::testl(const Operand& op, Immediate mask) {
- // testl with a mask that fits in the low byte is exactly testb.
- if (is_uint8(mask.value_)) {
- testb(op, mask);
- return;
- }
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_optional_rex_32(rax, op);
- emit(0xF7);
- emit_operand(rax, op); // Operation code 0
- emit(mask);
-}
-
-
-void Assembler::testq(const Operand& op, Register reg) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_rex_64(reg, op);
- emit(0x85);
- emit_operand(reg, op);
-}
-
-
-void Assembler::testq(Register dst, Register src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- if (src.low_bits() == 4) {
- emit_rex_64(src, dst);
- emit(0x85);
- emit_modrm(src, dst);
- } else {
- emit_rex_64(dst, src);
- emit(0x85);
- emit_modrm(dst, src);
- }
-}
-
-
-void Assembler::testq(Register dst, Immediate mask) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- if (dst.is(rax)) {
- emit_rex_64();
- emit(0xA9);
- emit(mask);
- } else {
- emit_rex_64(dst);
- emit(0xF7);
- emit_modrm(0, dst);
- emit(mask);
- }
-}
-
-
-// FPU instructions.
-
-
-void Assembler::fld(int i) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_farith(0xD9, 0xC0, i);
-}
-
-
-void Assembler::fld1() {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit(0xD9);
- emit(0xE8);
-}
-
-
-void Assembler::fldz() {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit(0xD9);
- emit(0xEE);
-}
-
-
-void Assembler::fldpi() {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit(0xD9);
- emit(0xEB);
-}
-
-
-void Assembler::fldln2() {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit(0xD9);
- emit(0xED);
-}
-
-
-void Assembler::fld_s(const Operand& adr) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_optional_rex_32(adr);
- emit(0xD9);
- emit_operand(0, adr);
-}
-
-
-void Assembler::fld_d(const Operand& adr) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_optional_rex_32(adr);
- emit(0xDD);
- emit_operand(0, adr);
-}
-
-
-void Assembler::fstp_s(const Operand& adr) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_optional_rex_32(adr);
- emit(0xD9);
- emit_operand(3, adr);
-}
-
-
-void Assembler::fstp_d(const Operand& adr) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_optional_rex_32(adr);
- emit(0xDD);
- emit_operand(3, adr);
-}
-
-
-void Assembler::fstp(int index) {
- ASSERT(is_uint3(index));
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_farith(0xDD, 0xD8, index);
-}
-
-
-void Assembler::fild_s(const Operand& adr) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_optional_rex_32(adr);
- emit(0xDB);
- emit_operand(0, adr);
-}
-
-
-void Assembler::fild_d(const Operand& adr) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_optional_rex_32(adr);
- emit(0xDF);
- emit_operand(5, adr);
-}
-
-
-void Assembler::fistp_s(const Operand& adr) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_optional_rex_32(adr);
- emit(0xDB);
- emit_operand(3, adr);
-}
-
-
-void Assembler::fisttp_s(const Operand& adr) {
- ASSERT(CpuFeatures::IsEnabled(SSE3));
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_optional_rex_32(adr);
- emit(0xDB);
- emit_operand(1, adr);
-}
-
-
-void Assembler::fisttp_d(const Operand& adr) {
- ASSERT(CpuFeatures::IsEnabled(SSE3));
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_optional_rex_32(adr);
- emit(0xDD);
- emit_operand(1, adr);
-}
-
-
-void Assembler::fist_s(const Operand& adr) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_optional_rex_32(adr);
- emit(0xDB);
- emit_operand(2, adr);
-}
-
-
-void Assembler::fistp_d(const Operand& adr) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_optional_rex_32(adr);
- emit(0xDF);
- emit_operand(7, adr);
-}
-
-
-void Assembler::fabs() {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit(0xD9);
- emit(0xE1);
-}
-
-
-void Assembler::fchs() {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit(0xD9);
- emit(0xE0);
-}
-
-
-void Assembler::fcos() {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit(0xD9);
- emit(0xFF);
-}
-
-
-void Assembler::fsin() {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit(0xD9);
- emit(0xFE);
-}
-
-
-void Assembler::fyl2x() {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit(0xD9);
- emit(0xF1);
-}
-
-
-void Assembler::fadd(int i) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_farith(0xDC, 0xC0, i);
-}
-
-
-void Assembler::fsub(int i) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_farith(0xDC, 0xE8, i);
-}
-
-
-void Assembler::fisub_s(const Operand& adr) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_optional_rex_32(adr);
- emit(0xDA);
- emit_operand(4, adr);
-}
-
-
-void Assembler::fmul(int i) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_farith(0xDC, 0xC8, i);
-}
-
-
-void Assembler::fdiv(int i) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_farith(0xDC, 0xF8, i);
-}
-
-
-void Assembler::faddp(int i) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_farith(0xDE, 0xC0, i);
-}
-
-
-void Assembler::fsubp(int i) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_farith(0xDE, 0xE8, i);
-}
-
-
-void Assembler::fsubrp(int i) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_farith(0xDE, 0xE0, i);
-}
-
-
-void Assembler::fmulp(int i) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_farith(0xDE, 0xC8, i);
-}
-
-
-void Assembler::fdivp(int i) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_farith(0xDE, 0xF8, i);
-}
-
-
-void Assembler::fprem() {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit(0xD9);
- emit(0xF8);
-}
-
-
-void Assembler::fprem1() {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit(0xD9);
- emit(0xF5);
-}
-
-
-void Assembler::fxch(int i) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_farith(0xD9, 0xC8, i);
-}
-
-
-void Assembler::fincstp() {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit(0xD9);
- emit(0xF7);
-}
-
-
-void Assembler::ffree(int i) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_farith(0xDD, 0xC0, i);
-}
-
-
-void Assembler::ftst() {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit(0xD9);
- emit(0xE4);
-}
-
-
-void Assembler::fucomp(int i) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_farith(0xDD, 0xE8, i);
-}
-
-
-void Assembler::fucompp() {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit(0xDA);
- emit(0xE9);
-}
-
-
-void Assembler::fucomi(int i) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit(0xDB);
- emit(0xE8 + i);
-}
-
-
-void Assembler::fucomip() {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit(0xDF);
- emit(0xE9);
-}
-
-
-void Assembler::fcompp() {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit(0xDE);
- emit(0xD9);
-}
-
-
-void Assembler::fnstsw_ax() {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit(0xDF);
- emit(0xE0);
-}
-
-
-void Assembler::fwait() {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit(0x9B);
-}
-
-
-void Assembler::frndint() {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit(0xD9);
- emit(0xFC);
-}
-
-
-void Assembler::fnclex() {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit(0xDB);
- emit(0xE2);
-}
-
-
-void Assembler::sahf() {
- // TODO(X64): Test for presence. Not all 64-bit intel CPU's have sahf
- // in 64-bit mode. Test CpuID.
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit(0x9E);
-}
-
-
-void Assembler::emit_farith(int b1, int b2, int i) {
- ASSERT(is_uint8(b1) && is_uint8(b2)); // wrong opcode
- ASSERT(is_uint3(i)); // illegal stack offset
- emit(b1);
- emit(b2 + i);
-}
-
-// SSE 2 operations.
-
-void Assembler::movd(XMMRegister dst, Register src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit(0x66);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x6E);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::movd(Register dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit(0x66);
- emit_optional_rex_32(src, dst);
- emit(0x0F);
- emit(0x7E);
- emit_sse_operand(src, dst);
-}
-
-
-void Assembler::movq(XMMRegister dst, Register src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit(0x66);
- emit_rex_64(dst, src);
- emit(0x0F);
- emit(0x6E);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::movq(Register dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit(0x66);
- emit_rex_64(src, dst);
- emit(0x0F);
- emit(0x7E);
- emit_sse_operand(src, dst);
-}
-
-
-void Assembler::movdqa(const Operand& dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit(0x66);
- emit_rex_64(src, dst);
- emit(0x0F);
- emit(0x7F);
- emit_sse_operand(src, dst);
-}
-
-
-void Assembler::movdqa(XMMRegister dst, const Operand& src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit(0x66);
- emit_rex_64(dst, src);
- emit(0x0F);
- emit(0x6F);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::extractps(Register dst, XMMRegister src, byte imm8) {
- ASSERT(is_uint2(imm8));
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit(0x66);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x3A);
- emit(0x17);
- emit_sse_operand(dst, src);
- emit(imm8);
-}
-
-
-void Assembler::movsd(const Operand& dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit(0xF2); // double
- emit_optional_rex_32(src, dst);
- emit(0x0F);
- emit(0x11); // store
- emit_sse_operand(src, dst);
-}
-
-
-void Assembler::movsd(XMMRegister dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit(0xF2); // double
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x10); // load
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::movsd(XMMRegister dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit(0xF2); // double
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x10); // load
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::movss(XMMRegister dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit(0xF3); // single
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x10); // load
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::movss(const Operand& src, XMMRegister dst) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit(0xF3); // single
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x11); // store
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::cvttss2si(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit(0xF3);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x2C);
- emit_operand(dst, src);
-}
-
-
-void Assembler::cvttss2si(Register dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit(0xF3);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x2C);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::cvttsd2si(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit(0xF2);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x2C);
- emit_operand(dst, src);
-}
-
-
-void Assembler::cvttsd2si(Register dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit(0xF2);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x2C);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::cvttsd2siq(Register dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit(0xF2);
- emit_rex_64(dst, src);
- emit(0x0F);
- emit(0x2C);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::cvtlsi2sd(XMMRegister dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit(0xF2);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x2A);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::cvtlsi2sd(XMMRegister dst, Register src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit(0xF2);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x2A);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::cvtlsi2ss(XMMRegister dst, Register src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit(0xF3);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x2A);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::cvtqsi2sd(XMMRegister dst, Register src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit(0xF2);
- emit_rex_64(dst, src);
- emit(0x0F);
- emit(0x2A);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::cvtss2sd(XMMRegister dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit(0xF3);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x5A);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::cvtss2sd(XMMRegister dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit(0xF3);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x5A);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::cvtsd2ss(XMMRegister dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit(0xF2);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x5A);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::cvtsd2si(Register dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit(0xF2);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x2D);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::cvtsd2siq(Register dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit(0xF2);
- emit_rex_64(dst, src);
- emit(0x0F);
- emit(0x2D);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::addsd(XMMRegister dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit(0xF2);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x58);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::mulsd(XMMRegister dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit(0xF2);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x59);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::subsd(XMMRegister dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit(0xF2);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x5C);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::divsd(XMMRegister dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit(0xF2);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x5E);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::andpd(XMMRegister dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit(0x66);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x54);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::orpd(XMMRegister dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit(0x66);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x56);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::xorpd(XMMRegister dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit(0x66);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x57);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit(0xF2);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x51);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::ucomisd(XMMRegister dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit(0x66);
- emit_optional_rex_32(dst, src);
- emit(0x0f);
- emit(0x2e);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::ucomisd(XMMRegister dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit(0x66);
- emit_optional_rex_32(dst, src);
- emit(0x0f);
- emit(0x2e);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::movmskpd(Register dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit(0x66);
- emit_optional_rex_32(dst, src);
- emit(0x0f);
- emit(0x50);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::emit_sse_operand(XMMRegister reg, const Operand& adr) {
- Register ireg = { reg.code() };
- emit_operand(ireg, adr);
-}
-
-
-void Assembler::emit_sse_operand(XMMRegister dst, XMMRegister src) {
- emit(0xC0 | (dst.low_bits() << 3) | src.low_bits());
-}
-
-void Assembler::emit_sse_operand(XMMRegister dst, Register src) {
- emit(0xC0 | (dst.low_bits() << 3) | src.low_bits());
-}
-
-void Assembler::emit_sse_operand(Register dst, XMMRegister src) {
- emit(0xC0 | (dst.low_bits() << 3) | src.low_bits());
-}
-
-
-void Assembler::db(uint8_t data) {
- EnsureSpace ensure_space(this);
- emit(data);
-}
-
-
-void Assembler::dd(uint32_t data) {
- EnsureSpace ensure_space(this);
- emitl(data);
-}
-
-
-// Relocation information implementations.
-
-void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
- ASSERT(rmode != RelocInfo::NONE);
- // Don't record external references unless the heap will be serialized.
- if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
-#ifdef DEBUG
- if (!Serializer::enabled()) {
- Serializer::TooLateToEnableNow();
- }
-#endif
- if (!Serializer::enabled() && !emit_debug_code()) {
- return;
- }
- }
- RelocInfo rinfo(pc_, rmode, data);
- reloc_info_writer.Write(&rinfo);
-}
-
-void Assembler::RecordJSReturn() {
- positions_recorder()->WriteRecordedPositions();
- EnsureSpace ensure_space(this);
- RecordRelocInfo(RelocInfo::JS_RETURN);
-}
-
-
-void Assembler::RecordDebugBreakSlot() {
- positions_recorder()->WriteRecordedPositions();
- EnsureSpace ensure_space(this);
- RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT);
-}
-
-
-void Assembler::RecordComment(const char* msg, bool force) {
- if (FLAG_code_comments || force) {
- EnsureSpace ensure_space(this);
- RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
- }
-}
-
-
-const int RelocInfo::kApplyMask = RelocInfo::kCodeTargetMask |
- 1 << RelocInfo::INTERNAL_REFERENCE;
-
-
-bool RelocInfo::IsCodedSpecially() {
- // The deserializer needs to know whether a pointer is specially coded. Being
- // specially coded on x64 means that it is a relative 32 bit address, as used
- // by branch instructions.
- return (1 << rmode_) & kApplyMask;
-}
-
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_X64
diff --git a/src/3rdparty/v8/src/x64/assembler-x64.h b/src/3rdparty/v8/src/x64/assembler-x64.h
deleted file mode 100644
index f22f80b..0000000
--- a/src/3rdparty/v8/src/x64/assembler-x64.h
+++ /dev/null
@@ -1,1632 +0,0 @@
-// Copyright (c) 1994-2006 Sun Microsystems Inc.
-// All Rights Reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// - Redistributions of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-//
-// - Redistribution in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// - Neither the name of Sun Microsystems or the names of contributors may
-// be used to endorse or promote products derived from this software without
-// specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
-// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
-// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// The original source code covered by the above license above has been
-// modified significantly by Google Inc.
-// Copyright 2011 the V8 project authors. All rights reserved.
-
-// A lightweight X64 Assembler.
-
-#ifndef V8_X64_ASSEMBLER_X64_H_
-#define V8_X64_ASSEMBLER_X64_H_
-
-#include "serialize.h"
-
-namespace v8 {
-namespace internal {
-
-// Utility functions
-
-// Test whether a 64-bit value is in a specific range.
-static inline bool is_uint32(int64_t x) {
- static const uint64_t kMaxUInt32 = V8_UINT64_C(0xffffffff);
- return static_cast<uint64_t>(x) <= kMaxUInt32;
-}
-
-static inline bool is_int32(int64_t x) {
- static const int64_t kMinInt32 = -V8_INT64_C(0x80000000);
- return is_uint32(x - kMinInt32);
-}
-
-static inline bool uint_is_int32(uint64_t x) {
- static const uint64_t kMaxInt32 = V8_UINT64_C(0x7fffffff);
- return x <= kMaxInt32;
-}
-
-static inline bool is_uint32(uint64_t x) {
- static const uint64_t kMaxUInt32 = V8_UINT64_C(0xffffffff);
- return x <= kMaxUInt32;
-}
-
-// CPU Registers.
-//
-// 1) We would prefer to use an enum, but enum values are assignment-
-// compatible with int, which has caused code-generation bugs.
-//
-// 2) We would prefer to use a class instead of a struct but we don't like
-// the register initialization to depend on the particular initialization
-// order (which appears to be different on OS X, Linux, and Windows for the
-// installed versions of C++ we tried). Using a struct permits C-style
-// "initialization". Also, the Register objects cannot be const as this
-// forces initialization stubs in MSVC, making us dependent on initialization
-// order.
-//
-// 3) By not using an enum, we are possibly preventing the compiler from
-// doing certain constant folds, which may significantly reduce the
-// code generated for some assembly instructions (because they boil down
-// to a few constants). If this is a problem, we could change the code
-// such that we use an enum in optimized mode, and the struct in debug
-// mode. This way we get the compile-time error checking in debug mode
-// and best performance in optimized code.
-//
-
-struct Register {
- // The non-allocatable registers are:
- // rsp - stack pointer
- // rbp - frame pointer
- // rsi - context register
- // r10 - fixed scratch register
- // r12 - smi constant register
- // r13 - root register
- static const int kNumRegisters = 16;
- static const int kNumAllocatableRegisters = 10;
-
- static int ToAllocationIndex(Register reg) {
- return kAllocationIndexByRegisterCode[reg.code()];
- }
-
- static Register FromAllocationIndex(int index) {
- ASSERT(index >= 0 && index < kNumAllocatableRegisters);
- Register result = { kRegisterCodeByAllocationIndex[index] };
- return result;
- }
-
- static const char* AllocationIndexToString(int index) {
- ASSERT(index >= 0 && index < kNumAllocatableRegisters);
- const char* const names[] = {
- "rax",
- "rbx",
- "rdx",
- "rcx",
- "rdi",
- "r8",
- "r9",
- "r11",
- "r14",
- "r15"
- };
- return names[index];
- }
-
- static Register toRegister(int code) {
- Register r = { code };
- return r;
- }
- bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
- bool is(Register reg) const { return code_ == reg.code_; }
- int code() const {
- ASSERT(is_valid());
- return code_;
- }
- int bit() const {
- return 1 << code_;
- }
-
- // Return the high bit of the register code as a 0 or 1. Used often
- // when constructing the REX prefix byte.
- int high_bit() const {
- return code_ >> 3;
- }
- // Return the 3 low bits of the register code. Used when encoding registers
- // in modR/M, SIB, and opcode bytes.
- int low_bits() const {
- return code_ & 0x7;
- }
-
- // Unfortunately we can't make this private in a struct when initializing
- // by assignment.
- int code_;
-
- private:
- static const int kRegisterCodeByAllocationIndex[kNumAllocatableRegisters];
- static const int kAllocationIndexByRegisterCode[kNumRegisters];
-};
-
-const Register rax = { 0 };
-const Register rcx = { 1 };
-const Register rdx = { 2 };
-const Register rbx = { 3 };
-const Register rsp = { 4 };
-const Register rbp = { 5 };
-const Register rsi = { 6 };
-const Register rdi = { 7 };
-const Register r8 = { 8 };
-const Register r9 = { 9 };
-const Register r10 = { 10 };
-const Register r11 = { 11 };
-const Register r12 = { 12 };
-const Register r13 = { 13 };
-const Register r14 = { 14 };
-const Register r15 = { 15 };
-const Register no_reg = { -1 };
-
-
-struct XMMRegister {
- static const int kNumRegisters = 16;
- static const int kNumAllocatableRegisters = 15;
-
- static int ToAllocationIndex(XMMRegister reg) {
- ASSERT(reg.code() != 0);
- return reg.code() - 1;
- }
-
- static XMMRegister FromAllocationIndex(int index) {
- ASSERT(0 <= index && index < kNumAllocatableRegisters);
- XMMRegister result = { index + 1 };
- return result;
- }
-
- static const char* AllocationIndexToString(int index) {
- ASSERT(index >= 0 && index < kNumAllocatableRegisters);
- const char* const names[] = {
- "xmm1",
- "xmm2",
- "xmm3",
- "xmm4",
- "xmm5",
- "xmm6",
- "xmm7",
- "xmm8",
- "xmm9",
- "xmm10",
- "xmm11",
- "xmm12",
- "xmm13",
- "xmm14",
- "xmm15"
- };
- return names[index];
- }
-
- bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
- bool is(XMMRegister reg) const { return code_ == reg.code_; }
- int code() const {
- ASSERT(is_valid());
- return code_;
- }
-
- // Return the high bit of the register code as a 0 or 1. Used often
- // when constructing the REX prefix byte.
- int high_bit() const {
- return code_ >> 3;
- }
- // Return the 3 low bits of the register code. Used when encoding registers
- // in modR/M, SIB, and opcode bytes.
- int low_bits() const {
- return code_ & 0x7;
- }
-
- int code_;
-};
-
-const XMMRegister xmm0 = { 0 };
-const XMMRegister xmm1 = { 1 };
-const XMMRegister xmm2 = { 2 };
-const XMMRegister xmm3 = { 3 };
-const XMMRegister xmm4 = { 4 };
-const XMMRegister xmm5 = { 5 };
-const XMMRegister xmm6 = { 6 };
-const XMMRegister xmm7 = { 7 };
-const XMMRegister xmm8 = { 8 };
-const XMMRegister xmm9 = { 9 };
-const XMMRegister xmm10 = { 10 };
-const XMMRegister xmm11 = { 11 };
-const XMMRegister xmm12 = { 12 };
-const XMMRegister xmm13 = { 13 };
-const XMMRegister xmm14 = { 14 };
-const XMMRegister xmm15 = { 15 };
-
-
-typedef XMMRegister DoubleRegister;
-
-
-enum Condition {
- // any value < 0 is considered no_condition
- no_condition = -1,
-
- overflow = 0,
- no_overflow = 1,
- below = 2,
- above_equal = 3,
- equal = 4,
- not_equal = 5,
- below_equal = 6,
- above = 7,
- negative = 8,
- positive = 9,
- parity_even = 10,
- parity_odd = 11,
- less = 12,
- greater_equal = 13,
- less_equal = 14,
- greater = 15,
-
- // Fake conditions that are handled by the
- // opcodes using them.
- always = 16,
- never = 17,
- // aliases
- carry = below,
- not_carry = above_equal,
- zero = equal,
- not_zero = not_equal,
- sign = negative,
- not_sign = positive,
- last_condition = greater
-};
-
-
-// Returns the equivalent of !cc.
-// Negation of the default no_condition (-1) results in a non-default
-// no_condition value (-2). As long as tests for no_condition check
-// for condition < 0, this will work as expected.
-inline Condition NegateCondition(Condition cc) {
- return static_cast<Condition>(cc ^ 1);
-}
-
-
-// Corresponds to transposing the operands of a comparison.
-inline Condition ReverseCondition(Condition cc) {
- switch (cc) {
- case below:
- return above;
- case above:
- return below;
- case above_equal:
- return below_equal;
- case below_equal:
- return above_equal;
- case less:
- return greater;
- case greater:
- return less;
- case greater_equal:
- return less_equal;
- case less_equal:
- return greater_equal;
- default:
- return cc;
- };
-}
-
-
-enum Hint {
- no_hint = 0,
- not_taken = 0x2e,
- taken = 0x3e
-};
-
-// The result of negating a hint is as if the corresponding condition
-// were negated by NegateCondition. That is, no_hint is mapped to
-// itself and not_taken and taken are mapped to each other.
-inline Hint NegateHint(Hint hint) {
- return (hint == no_hint)
- ? no_hint
- : ((hint == not_taken) ? taken : not_taken);
-}
-
-
-// -----------------------------------------------------------------------------
-// Machine instruction Immediates
-
-class Immediate BASE_EMBEDDED {
- public:
- explicit Immediate(int32_t value) : value_(value) {}
-
- private:
- int32_t value_;
-
- friend class Assembler;
-};
-
-
-// -----------------------------------------------------------------------------
-// Machine instruction Operands
-
-enum ScaleFactor {
- times_1 = 0,
- times_2 = 1,
- times_4 = 2,
- times_8 = 3,
- times_int_size = times_4,
- times_pointer_size = times_8
-};
-
-
-class Operand BASE_EMBEDDED {
- public:
- // [base + disp/r]
- Operand(Register base, int32_t disp);
-
- // [base + index*scale + disp/r]
- Operand(Register base,
- Register index,
- ScaleFactor scale,
- int32_t disp);
-
- // [index*scale + disp/r]
- Operand(Register index,
- ScaleFactor scale,
- int32_t disp);
-
- // Offset from existing memory operand.
- // Offset is added to existing displacement as 32-bit signed values and
- // this must not overflow.
- Operand(const Operand& base, int32_t offset);
-
- // Checks whether either base or index register is the given register.
- // Does not check the "reg" part of the Operand.
- bool AddressUsesRegister(Register reg) const;
-
- // Queries related to the size of the generated instruction.
- // Whether the generated instruction will have a REX prefix.
- bool requires_rex() const { return rex_ != 0; }
- // Size of the ModR/M, SIB and displacement parts of the generated
- // instruction.
- int operand_size() const { return len_; }
-
- private:
- byte rex_;
- byte buf_[6];
- // The number of bytes of buf_ in use.
- byte len_;
-
- // Set the ModR/M byte without an encoded 'reg' register. The
- // register is encoded later as part of the emit_operand operation.
- // set_modrm can be called before or after set_sib and set_disp*.
- inline void set_modrm(int mod, Register rm);
-
- // Set the SIB byte if one is needed. Sets the length to 2 rather than 1.
- inline void set_sib(ScaleFactor scale, Register index, Register base);
-
- // Adds operand displacement fields (offsets added to the memory address).
- // Needs to be called after set_sib, not before it.
- inline void set_disp8(int disp);
- inline void set_disp32(int disp);
-
- friend class Assembler;
-};
-
-
-// CpuFeatures keeps track of which features are supported by the target CPU.
-// Supported features must be enabled by a Scope before use.
-// Example:
-// if (CpuFeatures::IsSupported(SSE3)) {
-// CpuFeatures::Scope fscope(SSE3);
-// // Generate SSE3 floating point code.
-// } else {
-// // Generate standard x87 or SSE2 floating point code.
-// }
-class CpuFeatures : public AllStatic {
- public:
- // Detect features of the target CPU. Set safe defaults if the serializer
- // is enabled (snapshots must be portable).
- static void Probe();
-
- // Check whether a feature is supported by the target CPU.
- static bool IsSupported(CpuFeature f) {
- ASSERT(initialized_);
- if (f == SSE2 && !FLAG_enable_sse2) return false;
- if (f == SSE3 && !FLAG_enable_sse3) return false;
- if (f == CMOV && !FLAG_enable_cmov) return false;
- if (f == RDTSC && !FLAG_enable_rdtsc) return false;
- if (f == SAHF && !FLAG_enable_sahf) return false;
- return (supported_ & (V8_UINT64_C(1) << f)) != 0;
- }
-
-#ifdef DEBUG
- // Check whether a feature is currently enabled.
- static bool IsEnabled(CpuFeature f) {
- ASSERT(initialized_);
- Isolate* isolate = Isolate::UncheckedCurrent();
- if (isolate == NULL) {
- // When no isolate is available, work as if we're running in
- // release mode.
- return IsSupported(f);
- }
- uint64_t enabled = isolate->enabled_cpu_features();
- return (enabled & (V8_UINT64_C(1) << f)) != 0;
- }
-#endif
-
- // Enable a specified feature within a scope.
- class Scope BASE_EMBEDDED {
-#ifdef DEBUG
- public:
- explicit Scope(CpuFeature f) {
- uint64_t mask = V8_UINT64_C(1) << f;
- ASSERT(CpuFeatures::IsSupported(f));
- ASSERT(!Serializer::enabled() ||
- (CpuFeatures::found_by_runtime_probing_ & mask) == 0);
- isolate_ = Isolate::UncheckedCurrent();
- old_enabled_ = 0;
- if (isolate_ != NULL) {
- old_enabled_ = isolate_->enabled_cpu_features();
- isolate_->set_enabled_cpu_features(old_enabled_ | mask);
- }
- }
- ~Scope() {
- ASSERT_EQ(Isolate::UncheckedCurrent(), isolate_);
- if (isolate_ != NULL) {
- isolate_->set_enabled_cpu_features(old_enabled_);
- }
- }
- private:
- Isolate* isolate_;
- uint64_t old_enabled_;
-#else
- public:
- explicit Scope(CpuFeature f) {}
-#endif
- };
-
- private:
- // Safe defaults include SSE2 and CMOV for X64. It is always available, if
- // anyone checks, but they shouldn't need to check.
- // The required user mode extensions in X64 are (from AMD64 ABI Table A.1):
- // fpu, tsc, cx8, cmov, mmx, sse, sse2, fxsr, syscall
- static const uint64_t kDefaultCpuFeatures = (1 << SSE2 | 1 << CMOV);
-
-#ifdef DEBUG
- static bool initialized_;
-#endif
- static uint64_t supported_;
- static uint64_t found_by_runtime_probing_;
-
- DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
-};
-
-
-class Assembler : public AssemblerBase {
- private:
- // We check before assembling an instruction that there is sufficient
- // space to write an instruction and its relocation information.
- // The relocation writer's position must be kGap bytes above the end of
- // the generated instructions. This leaves enough space for the
- // longest possible x64 instruction, 15 bytes, and the longest possible
- // relocation information encoding, RelocInfoWriter::kMaxLength == 16.
- // (There is a 15 byte limit on x64 instruction length that rules out some
- // otherwise valid instructions.)
- // This allows for a single, fast space check per instruction.
- static const int kGap = 32;
-
- public:
- // Create an assembler. Instructions and relocation information are emitted
- // into a buffer, with the instructions starting from the beginning and the
- // relocation information starting from the end of the buffer. See CodeDesc
- // for a detailed comment on the layout (globals.h).
- //
- // If the provided buffer is NULL, the assembler allocates and grows its own
- // buffer, and buffer_size determines the initial buffer size. The buffer is
- // owned by the assembler and deallocated upon destruction of the assembler.
- //
- // If the provided buffer is not NULL, the assembler uses the provided buffer
- // for code generation and assumes its size to be buffer_size. If the buffer
- // is too small, a fatal error occurs. No deallocation of the buffer is done
- // upon destruction of the assembler.
- Assembler(Isolate* isolate, void* buffer, int buffer_size);
- ~Assembler();
-
- // Overrides the default provided by FLAG_debug_code.
- void set_emit_debug_code(bool value) { emit_debug_code_ = value; }
-
- // GetCode emits any pending (non-emitted) code and fills the descriptor
- // desc. GetCode() is idempotent; it returns the same result if no other
- // Assembler functions are invoked in between GetCode() calls.
- void GetCode(CodeDesc* desc);
-
- // Read/Modify the code target in the relative branch/call instruction at pc.
- // On the x64 architecture, we use relative jumps with a 32-bit displacement
- // to jump to other Code objects in the Code space in the heap.
- // Jumps to C functions are done indirectly through a 64-bit register holding
- // the absolute address of the target.
- // These functions convert between absolute Addresses of Code objects and
- // the relative displacements stored in the code.
- static inline Address target_address_at(Address pc);
- static inline void set_target_address_at(Address pc, Address target);
-
- // This sets the branch destination (which is in the instruction on x64).
- // This is for calls and branches within generated code.
- inline static void set_target_at(Address instruction_payload,
- Address target) {
- set_target_address_at(instruction_payload, target);
- }
-
- // This sets the branch destination (which is a load instruction on x64).
- // This is for calls and branches to runtime code.
- inline static void set_external_target_at(Address instruction_payload,
- Address target) {
- *reinterpret_cast<Address*>(instruction_payload) = target;
- }
-
- inline Handle<Object> code_target_object_handle_at(Address pc);
- // Number of bytes taken up by the branch target in the code.
- static const int kCallTargetSize = 4; // Use 32-bit displacement.
- static const int kExternalTargetSize = 8; // Use 64-bit absolute.
- // Distance between the address of the code target in the call instruction
- // and the return address pushed on the stack.
- static const int kCallTargetAddressOffset = 4; // Use 32-bit displacement.
- // Distance between the start of the JS return sequence and where the
- // 32-bit displacement of a near call would be, relative to the pushed
- // return address. TODO: Use return sequence length instead.
- // Should equal Debug::kX64JSReturnSequenceLength - kCallTargetAddressOffset;
- static const int kPatchReturnSequenceAddressOffset = 13 - 4;
- // Distance between start of patched debug break slot and where the
- // 32-bit displacement of a near call would be, relative to the pushed
- // return address. TODO: Use return sequence length instead.
- // Should equal Debug::kX64JSReturnSequenceLength - kCallTargetAddressOffset;
- static const int kPatchDebugBreakSlotAddressOffset = 13 - 4;
- // TODO(X64): Rename this, removing the "Real", after changing the above.
- static const int kRealPatchReturnSequenceAddressOffset = 2;
-
- // Some x64 JS code is padded with int3 to make it large
- // enough to hold an instruction when the debugger patches it.
- static const int kJumpInstructionLength = 13;
- static const int kCallInstructionLength = 13;
- static const int kJSReturnSequenceLength = 13;
- static const int kShortCallInstructionLength = 5;
-
- // The debug break slot must be able to contain a call instruction.
- static const int kDebugBreakSlotLength = kCallInstructionLength;
-
- // One byte opcode for test eax,0xXXXXXXXX.
- static const byte kTestEaxByte = 0xA9;
- // One byte opcode for test al, 0xXX.
- static const byte kTestAlByte = 0xA8;
- // One byte opcode for nop.
- static const byte kNopByte = 0x90;
-
- // One byte prefix for a short conditional jump.
- static const byte kJccShortPrefix = 0x70;
- static const byte kJncShortOpcode = kJccShortPrefix | not_carry;
- static const byte kJcShortOpcode = kJccShortPrefix | carry;
-
-
-
- // ---------------------------------------------------------------------------
- // Code generation
- //
- // Function names correspond one-to-one to x64 instruction mnemonics.
- // Unless specified otherwise, instructions operate on 64-bit operands.
- //
- // If we need versions of an assembly instruction that operate on different
- // width arguments, we add a single-letter suffix specifying the width.
- // This is done for the following instructions: mov, cmp, inc, dec,
- // add, sub, and test.
- // There are no versions of these instructions without the suffix.
- // - Instructions on 8-bit (byte) operands/registers have a trailing 'b'.
- // - Instructions on 16-bit (word) operands/registers have a trailing 'w'.
- // - Instructions on 32-bit (doubleword) operands/registers use 'l'.
- // - Instructions on 64-bit (quadword) operands/registers use 'q'.
- //
- // Some mnemonics, such as "and", are the same as C++ keywords.
- // Naming conflicts with C++ keywords are resolved by adding a trailing '_'.
-
- // Insert the smallest number of nop instructions
- // possible to align the pc offset to a multiple
- // of m, where m must be a power of 2.
- void Align(int m);
- // Aligns code to something that's optimal for a jump target for the platform.
- void CodeTargetAlign();
-
- // Stack
- void pushfq();
- void popfq();
-
- void push(Immediate value);
- // Push a 32 bit integer, and guarantee that it is actually pushed as a
- // 32 bit value, the normal push will optimize the 8 bit case.
- void push_imm32(int32_t imm32);
- void push(Register src);
- void push(const Operand& src);
-
- void pop(Register dst);
- void pop(const Operand& dst);
-
- void enter(Immediate size);
- void leave();
-
- // Moves
- void movb(Register dst, const Operand& src);
- void movb(Register dst, Immediate imm);
- void movb(const Operand& dst, Register src);
-
- // Move the low 16 bits of a 64-bit register value to a 16-bit
- // memory location.
- void movw(const Operand& dst, Register src);
-
- void movl(Register dst, Register src);
- void movl(Register dst, const Operand& src);
- void movl(const Operand& dst, Register src);
- void movl(const Operand& dst, Immediate imm);
- // Load a 32-bit immediate value, zero-extended to 64 bits.
- void movl(Register dst, Immediate imm32);
-
- // Move 64 bit register value to 64-bit memory location.
- void movq(const Operand& dst, Register src);
- // Move 64 bit memory location to 64-bit register value.
- void movq(Register dst, const Operand& src);
- void movq(Register dst, Register src);
- // Sign extends immediate 32-bit value to 64 bits.
- void movq(Register dst, Immediate x);
- // Move the offset of the label location relative to the current
- // position (after the move) to the destination.
- void movl(const Operand& dst, Label* src);
-
- // Move sign extended immediate to memory location.
- void movq(const Operand& dst, Immediate value);
- // Instructions to load a 64-bit immediate into a register.
- // All 64-bit immediates must have a relocation mode.
- void movq(Register dst, void* ptr, RelocInfo::Mode rmode);
- void movq(Register dst, int64_t value, RelocInfo::Mode rmode);
- void movq(Register dst, const char* s, RelocInfo::Mode rmode);
- // Moves the address of the external reference into the register.
- void movq(Register dst, ExternalReference ext);
- void movq(Register dst, Handle<Object> handle, RelocInfo::Mode rmode);
-
- void movsxbq(Register dst, const Operand& src);
- void movsxwq(Register dst, const Operand& src);
- void movsxlq(Register dst, Register src);
- void movsxlq(Register dst, const Operand& src);
- void movzxbq(Register dst, const Operand& src);
- void movzxbl(Register dst, const Operand& src);
- void movzxwq(Register dst, const Operand& src);
- void movzxwl(Register dst, const Operand& src);
-
- // Repeated moves.
-
- void repmovsb();
- void repmovsw();
- void repmovsl();
- void repmovsq();
-
- // Instruction to load from an immediate 64-bit pointer into RAX.
- void load_rax(void* ptr, RelocInfo::Mode rmode);
- void load_rax(ExternalReference ext);
-
- // Conditional moves.
- void cmovq(Condition cc, Register dst, Register src);
- void cmovq(Condition cc, Register dst, const Operand& src);
- void cmovl(Condition cc, Register dst, Register src);
- void cmovl(Condition cc, Register dst, const Operand& src);
-
- // Exchange two registers
- void xchg(Register dst, Register src);
-
- // Arithmetics
- void addl(Register dst, Register src) {
- arithmetic_op_32(0x03, dst, src);
- }
-
- void addl(Register dst, Immediate src) {
- immediate_arithmetic_op_32(0x0, dst, src);
- }
-
- void addl(Register dst, const Operand& src) {
- arithmetic_op_32(0x03, dst, src);
- }
-
- void addl(const Operand& dst, Immediate src) {
- immediate_arithmetic_op_32(0x0, dst, src);
- }
-
- void addq(Register dst, Register src) {
- arithmetic_op(0x03, dst, src);
- }
-
- void addq(Register dst, const Operand& src) {
- arithmetic_op(0x03, dst, src);
- }
-
- void addq(const Operand& dst, Register src) {
- arithmetic_op(0x01, src, dst);
- }
-
- void addq(Register dst, Immediate src) {
- immediate_arithmetic_op(0x0, dst, src);
- }
-
- void addq(const Operand& dst, Immediate src) {
- immediate_arithmetic_op(0x0, dst, src);
- }
-
- void sbbl(Register dst, Register src) {
- arithmetic_op_32(0x1b, dst, src);
- }
-
- void sbbq(Register dst, Register src) {
- arithmetic_op(0x1b, dst, src);
- }
-
- void cmpb(Register dst, Immediate src) {
- immediate_arithmetic_op_8(0x7, dst, src);
- }
-
- void cmpb_al(Immediate src);
-
- void cmpb(Register dst, Register src) {
- arithmetic_op(0x3A, dst, src);
- }
-
- void cmpb(Register dst, const Operand& src) {
- arithmetic_op(0x3A, dst, src);
- }
-
- void cmpb(const Operand& dst, Register src) {
- arithmetic_op(0x38, src, dst);
- }
-
- void cmpb(const Operand& dst, Immediate src) {
- immediate_arithmetic_op_8(0x7, dst, src);
- }
-
- void cmpw(const Operand& dst, Immediate src) {
- immediate_arithmetic_op_16(0x7, dst, src);
- }
-
- void cmpw(Register dst, Immediate src) {
- immediate_arithmetic_op_16(0x7, dst, src);
- }
-
- void cmpw(Register dst, const Operand& src) {
- arithmetic_op_16(0x3B, dst, src);
- }
-
- void cmpw(Register dst, Register src) {
- arithmetic_op_16(0x3B, dst, src);
- }
-
- void cmpw(const Operand& dst, Register src) {
- arithmetic_op_16(0x39, src, dst);
- }
-
- void cmpl(Register dst, Register src) {
- arithmetic_op_32(0x3B, dst, src);
- }
-
- void cmpl(Register dst, const Operand& src) {
- arithmetic_op_32(0x3B, dst, src);
- }
-
- void cmpl(const Operand& dst, Register src) {
- arithmetic_op_32(0x39, src, dst);
- }
-
- void cmpl(Register dst, Immediate src) {
- immediate_arithmetic_op_32(0x7, dst, src);
- }
-
- void cmpl(const Operand& dst, Immediate src) {
- immediate_arithmetic_op_32(0x7, dst, src);
- }
-
- void cmpq(Register dst, Register src) {
- arithmetic_op(0x3B, dst, src);
- }
-
- void cmpq(Register dst, const Operand& src) {
- arithmetic_op(0x3B, dst, src);
- }
-
- void cmpq(const Operand& dst, Register src) {
- arithmetic_op(0x39, src, dst);
- }
-
- void cmpq(Register dst, Immediate src) {
- immediate_arithmetic_op(0x7, dst, src);
- }
-
- void cmpq(const Operand& dst, Immediate src) {
- immediate_arithmetic_op(0x7, dst, src);
- }
-
- void and_(Register dst, Register src) {
- arithmetic_op(0x23, dst, src);
- }
-
- void and_(Register dst, const Operand& src) {
- arithmetic_op(0x23, dst, src);
- }
-
- void and_(const Operand& dst, Register src) {
- arithmetic_op(0x21, src, dst);
- }
-
- void and_(Register dst, Immediate src) {
- immediate_arithmetic_op(0x4, dst, src);
- }
-
- void and_(const Operand& dst, Immediate src) {
- immediate_arithmetic_op(0x4, dst, src);
- }
-
- void andl(Register dst, Immediate src) {
- immediate_arithmetic_op_32(0x4, dst, src);
- }
-
- void andl(Register dst, Register src) {
- arithmetic_op_32(0x23, dst, src);
- }
-
- void andl(Register dst, const Operand& src) {
- arithmetic_op_32(0x23, dst, src);
- }
-
- void andb(Register dst, Immediate src) {
- immediate_arithmetic_op_8(0x4, dst, src);
- }
-
- void decq(Register dst);
- void decq(const Operand& dst);
- void decl(Register dst);
- void decl(const Operand& dst);
- void decb(Register dst);
- void decb(const Operand& dst);
-
- // Sign-extends rax into rdx:rax.
- void cqo();
- // Sign-extends eax into edx:eax.
- void cdq();
-
- // Divide rdx:rax by src. Quotient in rax, remainder in rdx.
- void idivq(Register src);
- // Divide edx:eax by lower 32 bits of src. Quotient in eax, rem. in edx.
- void idivl(Register src);
-
- // Signed multiply instructions.
- void imul(Register src); // rdx:rax = rax * src.
- void imul(Register dst, Register src); // dst = dst * src.
- void imul(Register dst, const Operand& src); // dst = dst * src.
- void imul(Register dst, Register src, Immediate imm); // dst = src * imm.
- // Signed 32-bit multiply instructions.
- void imull(Register dst, Register src); // dst = dst * src.
- void imull(Register dst, const Operand& src); // dst = dst * src.
- void imull(Register dst, Register src, Immediate imm); // dst = src * imm.
-
- void incq(Register dst);
- void incq(const Operand& dst);
- void incl(Register dst);
- void incl(const Operand& dst);
-
- void lea(Register dst, const Operand& src);
- void leal(Register dst, const Operand& src);
-
- // Multiply rax by src, put the result in rdx:rax.
- void mul(Register src);
-
- void neg(Register dst);
- void neg(const Operand& dst);
- void negl(Register dst);
-
- void not_(Register dst);
- void not_(const Operand& dst);
- void notl(Register dst);
-
- void or_(Register dst, Register src) {
- arithmetic_op(0x0B, dst, src);
- }
-
- void orl(Register dst, Register src) {
- arithmetic_op_32(0x0B, dst, src);
- }
-
- void or_(Register dst, const Operand& src) {
- arithmetic_op(0x0B, dst, src);
- }
-
- void orl(Register dst, const Operand& src) {
- arithmetic_op_32(0x0B, dst, src);
- }
-
- void or_(const Operand& dst, Register src) {
- arithmetic_op(0x09, src, dst);
- }
-
- void or_(Register dst, Immediate src) {
- immediate_arithmetic_op(0x1, dst, src);
- }
-
- void orl(Register dst, Immediate src) {
- immediate_arithmetic_op_32(0x1, dst, src);
- }
-
- void or_(const Operand& dst, Immediate src) {
- immediate_arithmetic_op(0x1, dst, src);
- }
-
- void orl(const Operand& dst, Immediate src) {
- immediate_arithmetic_op_32(0x1, dst, src);
- }
-
-
- void rcl(Register dst, Immediate imm8) {
- shift(dst, imm8, 0x2);
- }
-
- void rol(Register dst, Immediate imm8) {
- shift(dst, imm8, 0x0);
- }
-
- void rcr(Register dst, Immediate imm8) {
- shift(dst, imm8, 0x3);
- }
-
- void ror(Register dst, Immediate imm8) {
- shift(dst, imm8, 0x1);
- }
-
- // Shifts dst:src left by cl bits, affecting only dst.
- void shld(Register dst, Register src);
-
- // Shifts src:dst right by cl bits, affecting only dst.
- void shrd(Register dst, Register src);
-
- // Shifts dst right, duplicating sign bit, by shift_amount bits.
- // Shifting by 1 is handled efficiently.
- void sar(Register dst, Immediate shift_amount) {
- shift(dst, shift_amount, 0x7);
- }
-
- // Shifts dst right, duplicating sign bit, by shift_amount bits.
- // Shifting by 1 is handled efficiently.
- void sarl(Register dst, Immediate shift_amount) {
- shift_32(dst, shift_amount, 0x7);
- }
-
- // Shifts dst right, duplicating sign bit, by cl % 64 bits.
- void sar_cl(Register dst) {
- shift(dst, 0x7);
- }
-
- // Shifts dst right, duplicating sign bit, by cl % 64 bits.
- void sarl_cl(Register dst) {
- shift_32(dst, 0x7);
- }
-
- void shl(Register dst, Immediate shift_amount) {
- shift(dst, shift_amount, 0x4);
- }
-
- void shl_cl(Register dst) {
- shift(dst, 0x4);
- }
-
- void shll_cl(Register dst) {
- shift_32(dst, 0x4);
- }
-
- void shll(Register dst, Immediate shift_amount) {
- shift_32(dst, shift_amount, 0x4);
- }
-
- void shr(Register dst, Immediate shift_amount) {
- shift(dst, shift_amount, 0x5);
- }
-
- void shr_cl(Register dst) {
- shift(dst, 0x5);
- }
-
- void shrl_cl(Register dst) {
- shift_32(dst, 0x5);
- }
-
- void shrl(Register dst, Immediate shift_amount) {
- shift_32(dst, shift_amount, 0x5);
- }
-
- void store_rax(void* dst, RelocInfo::Mode mode);
- void store_rax(ExternalReference ref);
-
- void subq(Register dst, Register src) {
- arithmetic_op(0x2B, dst, src);
- }
-
- void subq(Register dst, const Operand& src) {
- arithmetic_op(0x2B, dst, src);
- }
-
- void subq(const Operand& dst, Register src) {
- arithmetic_op(0x29, src, dst);
- }
-
- void subq(Register dst, Immediate src) {
- immediate_arithmetic_op(0x5, dst, src);
- }
-
- void subq(const Operand& dst, Immediate src) {
- immediate_arithmetic_op(0x5, dst, src);
- }
-
- void subl(Register dst, Register src) {
- arithmetic_op_32(0x2B, dst, src);
- }
-
- void subl(Register dst, const Operand& src) {
- arithmetic_op_32(0x2B, dst, src);
- }
-
- void subl(const Operand& dst, Immediate src) {
- immediate_arithmetic_op_32(0x5, dst, src);
- }
-
- void subl(Register dst, Immediate src) {
- immediate_arithmetic_op_32(0x5, dst, src);
- }
-
- void subb(Register dst, Immediate src) {
- immediate_arithmetic_op_8(0x5, dst, src);
- }
-
- void testb(Register dst, Register src);
- void testb(Register reg, Immediate mask);
- void testb(const Operand& op, Immediate mask);
- void testb(const Operand& op, Register reg);
- void testl(Register dst, Register src);
- void testl(Register reg, Immediate mask);
- void testl(const Operand& op, Immediate mask);
- void testq(const Operand& op, Register reg);
- void testq(Register dst, Register src);
- void testq(Register dst, Immediate mask);
-
- void xor_(Register dst, Register src) {
- if (dst.code() == src.code()) {
- arithmetic_op_32(0x33, dst, src);
- } else {
- arithmetic_op(0x33, dst, src);
- }
- }
-
- void xorl(Register dst, Register src) {
- arithmetic_op_32(0x33, dst, src);
- }
-
- void xorl(Register dst, const Operand& src) {
- arithmetic_op_32(0x33, dst, src);
- }
-
- void xorl(Register dst, Immediate src) {
- immediate_arithmetic_op_32(0x6, dst, src);
- }
-
- void xorl(const Operand& dst, Immediate src) {
- immediate_arithmetic_op_32(0x6, dst, src);
- }
-
- void xor_(Register dst, const Operand& src) {
- arithmetic_op(0x33, dst, src);
- }
-
- void xor_(const Operand& dst, Register src) {
- arithmetic_op(0x31, src, dst);
- }
-
- void xor_(Register dst, Immediate src) {
- immediate_arithmetic_op(0x6, dst, src);
- }
-
- void xor_(const Operand& dst, Immediate src) {
- immediate_arithmetic_op(0x6, dst, src);
- }
-
- // Bit operations.
- void bt(const Operand& dst, Register src);
- void bts(const Operand& dst, Register src);
-
- // Miscellaneous
- void clc();
- void cld();
- void cpuid();
- void hlt();
- void int3();
- void nop();
- void nop(int n);
- void rdtsc();
- void ret(int imm16);
- void setcc(Condition cc, Register reg);
-
- // Label operations & relative jumps (PPUM Appendix D)
- //
- // Takes a branch opcode (cc) and a label (L) and generates
- // either a backward branch or a forward branch and links it
- // to the label fixup chain. Usage:
- //
- // Label L; // unbound label
- // j(cc, &L); // forward branch to unbound label
- // bind(&L); // bind label to the current pc
- // j(cc, &L); // backward branch to bound label
- // bind(&L); // illegal: a label may be bound only once
- //
- // Note: The same Label can be used for forward and backward branches
- // but it may be bound only once.
-
- void bind(Label* L); // binds an unbound label L to the current code position
- void bind(NearLabel* L);
-
- // Calls
- // Call near relative 32-bit displacement, relative to next instruction.
- void call(Label* L);
- void call(Handle<Code> target, RelocInfo::Mode rmode);
-
- // Calls directly to the given address using a relative offset.
- // Should only ever be used in Code objects for calls within the
- // same Code object. Should not be used when generating new code (use labels),
- // but only when patching existing code.
- void call(Address target);
-
- // Call near absolute indirect, address in register
- void call(Register adr);
-
- // Call near indirect
- void call(const Operand& operand);
-
- // Jumps
- // Jump short or near relative.
- // Use a 32-bit signed displacement.
- void jmp(Label* L); // unconditional jump to L
- void jmp(Handle<Code> target, RelocInfo::Mode rmode);
-
- // Jump near absolute indirect (r64)
- void jmp(Register adr);
-
- // Jump near absolute indirect (m64)
- void jmp(const Operand& src);
-
- // Short jump
- void jmp(NearLabel* L);
-
- // Conditional jumps
- void j(Condition cc, Label* L);
- void j(Condition cc, Handle<Code> target, RelocInfo::Mode rmode);
-
- // Conditional short jump
- void j(Condition cc, NearLabel* L, Hint hint = no_hint);
-
- // Floating-point operations
- void fld(int i);
-
- void fld1();
- void fldz();
- void fldpi();
- void fldln2();
-
- void fld_s(const Operand& adr);
- void fld_d(const Operand& adr);
-
- void fstp_s(const Operand& adr);
- void fstp_d(const Operand& adr);
- void fstp(int index);
-
- void fild_s(const Operand& adr);
- void fild_d(const Operand& adr);
-
- void fist_s(const Operand& adr);
-
- void fistp_s(const Operand& adr);
- void fistp_d(const Operand& adr);
-
- void fisttp_s(const Operand& adr);
- void fisttp_d(const Operand& adr);
-
- void fabs();
- void fchs();
-
- void fadd(int i);
- void fsub(int i);
- void fmul(int i);
- void fdiv(int i);
-
- void fisub_s(const Operand& adr);
-
- void faddp(int i = 1);
- void fsubp(int i = 1);
- void fsubrp(int i = 1);
- void fmulp(int i = 1);
- void fdivp(int i = 1);
- void fprem();
- void fprem1();
-
- void fxch(int i = 1);
- void fincstp();
- void ffree(int i = 0);
-
- void ftst();
- void fucomp(int i);
- void fucompp();
- void fucomi(int i);
- void fucomip();
-
- void fcompp();
- void fnstsw_ax();
- void fwait();
- void fnclex();
-
- void fsin();
- void fcos();
- void fyl2x();
-
- void frndint();
-
- void sahf();
-
- // SSE2 instructions
- void movd(XMMRegister dst, Register src);
- void movd(Register dst, XMMRegister src);
- void movq(XMMRegister dst, Register src);
- void movq(Register dst, XMMRegister src);
- void extractps(Register dst, XMMRegister src, byte imm8);
-
- void movsd(const Operand& dst, XMMRegister src);
- void movsd(XMMRegister dst, XMMRegister src);
- void movsd(XMMRegister dst, const Operand& src);
-
- void movdqa(const Operand& dst, XMMRegister src);
- void movdqa(XMMRegister dst, const Operand& src);
-
- void movss(XMMRegister dst, const Operand& src);
- void movss(const Operand& dst, XMMRegister src);
-
- void cvttss2si(Register dst, const Operand& src);
- void cvttss2si(Register dst, XMMRegister src);
- void cvttsd2si(Register dst, const Operand& src);
- void cvttsd2si(Register dst, XMMRegister src);
- void cvttsd2siq(Register dst, XMMRegister src);
-
- void cvtlsi2sd(XMMRegister dst, const Operand& src);
- void cvtlsi2sd(XMMRegister dst, Register src);
- void cvtqsi2sd(XMMRegister dst, const Operand& src);
- void cvtqsi2sd(XMMRegister dst, Register src);
-
- void cvtlsi2ss(XMMRegister dst, Register src);
-
- void cvtss2sd(XMMRegister dst, XMMRegister src);
- void cvtss2sd(XMMRegister dst, const Operand& src);
- void cvtsd2ss(XMMRegister dst, XMMRegister src);
-
- void cvtsd2si(Register dst, XMMRegister src);
- void cvtsd2siq(Register dst, XMMRegister src);
-
- void addsd(XMMRegister dst, XMMRegister src);
- void subsd(XMMRegister dst, XMMRegister src);
- void mulsd(XMMRegister dst, XMMRegister src);
- void divsd(XMMRegister dst, XMMRegister src);
-
- void andpd(XMMRegister dst, XMMRegister src);
- void orpd(XMMRegister dst, XMMRegister src);
- void xorpd(XMMRegister dst, XMMRegister src);
- void sqrtsd(XMMRegister dst, XMMRegister src);
-
- void ucomisd(XMMRegister dst, XMMRegister src);
- void ucomisd(XMMRegister dst, const Operand& src);
-
- void movmskpd(Register dst, XMMRegister src);
-
- // The first argument is the reg field, the second argument is the r/m field.
- void emit_sse_operand(XMMRegister dst, XMMRegister src);
- void emit_sse_operand(XMMRegister reg, const Operand& adr);
- void emit_sse_operand(XMMRegister dst, Register src);
- void emit_sse_operand(Register dst, XMMRegister src);
-
- // Debugging
- void Print();
-
- // Check the code size generated from label to here.
- int SizeOfCodeGeneratedSince(Label* l) { return pc_offset() - l->pos(); }
-
- // Mark address of the ExitJSFrame code.
- void RecordJSReturn();
-
- // Mark address of a debug break slot.
- void RecordDebugBreakSlot();
-
- // Record a comment relocation entry that can be used by a disassembler.
- // Use --code-comments to enable.
- void RecordComment(const char* msg, bool force = false);
-
- // Writes a single word of data in the code stream.
- // Used for inline tables, e.g., jump-tables.
- void db(uint8_t data);
- void dd(uint32_t data);
-
- int pc_offset() const { return static_cast<int>(pc_ - buffer_); }
-
- PositionsRecorder* positions_recorder() { return &positions_recorder_; }
-
- // Check if there is less than kGap bytes available in the buffer.
- // If this is the case, we need to grow the buffer before emitting
- // an instruction or relocation information.
- inline bool buffer_overflow() const {
- return pc_ >= reloc_info_writer.pos() - kGap;
- }
-
- // Get the number of bytes available in the buffer.
- inline int available_space() const {
- return static_cast<int>(reloc_info_writer.pos() - pc_);
- }
-
- static bool IsNop(Address addr) { return *addr == 0x90; }
-
- // Avoid overflows for displacements etc.
- static const int kMaximalBufferSize = 512*MB;
- static const int kMinimalBufferSize = 4*KB;
-
- protected:
- bool emit_debug_code() const { return emit_debug_code_; }
-
- private:
- byte* addr_at(int pos) { return buffer_ + pos; }
- byte byte_at(int pos) { return buffer_[pos]; }
- void set_byte_at(int pos, byte value) { buffer_[pos] = value; }
- uint32_t long_at(int pos) {
- return *reinterpret_cast<uint32_t*>(addr_at(pos));
- }
- void long_at_put(int pos, uint32_t x) {
- *reinterpret_cast<uint32_t*>(addr_at(pos)) = x;
- }
-
- // code emission
- void GrowBuffer();
-
- void emit(byte x) { *pc_++ = x; }
- inline void emitl(uint32_t x);
- inline void emitq(uint64_t x, RelocInfo::Mode rmode);
- inline void emitw(uint16_t x);
- inline void emit_code_target(Handle<Code> target, RelocInfo::Mode rmode);
- void emit(Immediate x) { emitl(x.value_); }
-
- // Emits a REX prefix that encodes a 64-bit operand size and
- // the top bit of both register codes.
- // High bit of reg goes to REX.R, high bit of rm_reg goes to REX.B.
- // REX.W is set.
- inline void emit_rex_64(XMMRegister reg, Register rm_reg);
- inline void emit_rex_64(Register reg, XMMRegister rm_reg);
- inline void emit_rex_64(Register reg, Register rm_reg);
-
- // Emits a REX prefix that encodes a 64-bit operand size and
- // the top bit of the destination, index, and base register codes.
- // The high bit of reg is used for REX.R, the high bit of op's base
- // register is used for REX.B, and the high bit of op's index register
- // is used for REX.X. REX.W is set.
- inline void emit_rex_64(Register reg, const Operand& op);
- inline void emit_rex_64(XMMRegister reg, const Operand& op);
-
- // Emits a REX prefix that encodes a 64-bit operand size and
- // the top bit of the register code.
- // The high bit of register is used for REX.B.
- // REX.W is set and REX.R and REX.X are clear.
- inline void emit_rex_64(Register rm_reg);
-
- // Emits a REX prefix that encodes a 64-bit operand size and
- // the top bit of the index and base register codes.
- // The high bit of op's base register is used for REX.B, and the high
- // bit of op's index register is used for REX.X.
- // REX.W is set and REX.R clear.
- inline void emit_rex_64(const Operand& op);
-
- // Emit a REX prefix that only sets REX.W to choose a 64-bit operand size.
- void emit_rex_64() { emit(0x48); }
-
- // High bit of reg goes to REX.R, high bit of rm_reg goes to REX.B.
- // REX.W is clear.
- inline void emit_rex_32(Register reg, Register rm_reg);
-
- // The high bit of reg is used for REX.R, the high bit of op's base
- // register is used for REX.B, and the high bit of op's index register
- // is used for REX.X. REX.W is cleared.
- inline void emit_rex_32(Register reg, const Operand& op);
-
- // High bit of rm_reg goes to REX.B.
- // REX.W, REX.R and REX.X are clear.
- inline void emit_rex_32(Register rm_reg);
-
- // High bit of base goes to REX.B and high bit of index to REX.X.
- // REX.W and REX.R are clear.
- inline void emit_rex_32(const Operand& op);
-
- // High bit of reg goes to REX.R, high bit of rm_reg goes to REX.B.
- // REX.W is cleared. If no REX bits are set, no byte is emitted.
- inline void emit_optional_rex_32(Register reg, Register rm_reg);
-
- // The high bit of reg is used for REX.R, the high bit of op's base
- // register is used for REX.B, and the high bit of op's index register
- // is used for REX.X. REX.W is cleared. If no REX bits are set, nothing
- // is emitted.
- inline void emit_optional_rex_32(Register reg, const Operand& op);
-
- // As for emit_optional_rex_32(Register, Register), except that
- // the registers are XMM registers.
- inline void emit_optional_rex_32(XMMRegister reg, XMMRegister base);
-
- // As for emit_optional_rex_32(Register, Register), except that
- // one of the registers is an XMM registers.
- inline void emit_optional_rex_32(XMMRegister reg, Register base);
-
- // As for emit_optional_rex_32(Register, Register), except that
- // one of the registers is an XMM registers.
- inline void emit_optional_rex_32(Register reg, XMMRegister base);
-
- // As for emit_optional_rex_32(Register, const Operand&), except that
- // the register is an XMM register.
- inline void emit_optional_rex_32(XMMRegister reg, const Operand& op);
-
- // Optionally do as emit_rex_32(Register) if the register number has
- // the high bit set.
- inline void emit_optional_rex_32(Register rm_reg);
-
- // Optionally do as emit_rex_32(const Operand&) if the operand register
- // numbers have a high bit set.
- inline void emit_optional_rex_32(const Operand& op);
-
-
- // Emit the ModR/M byte, and optionally the SIB byte and
- // 1- or 4-byte offset for a memory operand. Also encodes
- // the second operand of the operation, a register or operation
- // subcode, into the reg field of the ModR/M byte.
- void emit_operand(Register reg, const Operand& adr) {
- emit_operand(reg.low_bits(), adr);
- }
-
- // Emit the ModR/M byte, and optionally the SIB byte and
- // 1- or 4-byte offset for a memory operand. Also used to encode
- // a three-bit opcode extension into the ModR/M byte.
- void emit_operand(int rm, const Operand& adr);
-
- // Emit a ModR/M byte with registers coded in the reg and rm_reg fields.
- void emit_modrm(Register reg, Register rm_reg) {
- emit(0xC0 | reg.low_bits() << 3 | rm_reg.low_bits());
- }
-
- // Emit a ModR/M byte with an operation subcode in the reg field and
- // a register in the rm_reg field.
- void emit_modrm(int code, Register rm_reg) {
- ASSERT(is_uint3(code));
- emit(0xC0 | code << 3 | rm_reg.low_bits());
- }
-
- // Emit the code-object-relative offset of the label's position
- inline void emit_code_relative_offset(Label* label);
-
- // Emit machine code for one of the operations ADD, ADC, SUB, SBC,
- // AND, OR, XOR, or CMP. The encodings of these operations are all
- // similar, differing just in the opcode or in the reg field of the
- // ModR/M byte.
- void arithmetic_op_16(byte opcode, Register reg, Register rm_reg);
- void arithmetic_op_16(byte opcode, Register reg, const Operand& rm_reg);
- void arithmetic_op_32(byte opcode, Register reg, Register rm_reg);
- void arithmetic_op_32(byte opcode, Register reg, const Operand& rm_reg);
- void arithmetic_op(byte opcode, Register reg, Register rm_reg);
- void arithmetic_op(byte opcode, Register reg, const Operand& rm_reg);
- void immediate_arithmetic_op(byte subcode, Register dst, Immediate src);
- void immediate_arithmetic_op(byte subcode, const Operand& dst, Immediate src);
- // Operate on a byte in memory or register.
- void immediate_arithmetic_op_8(byte subcode,
- Register dst,
- Immediate src);
- void immediate_arithmetic_op_8(byte subcode,
- const Operand& dst,
- Immediate src);
- // Operate on a word in memory or register.
- void immediate_arithmetic_op_16(byte subcode,
- Register dst,
- Immediate src);
- void immediate_arithmetic_op_16(byte subcode,
- const Operand& dst,
- Immediate src);
- // Operate on a 32-bit word in memory or register.
- void immediate_arithmetic_op_32(byte subcode,
- Register dst,
- Immediate src);
- void immediate_arithmetic_op_32(byte subcode,
- const Operand& dst,
- Immediate src);
-
- // Emit machine code for a shift operation.
- void shift(Register dst, Immediate shift_amount, int subcode);
- void shift_32(Register dst, Immediate shift_amount, int subcode);
- // Shift dst by cl % 64 bits.
- void shift(Register dst, int subcode);
- void shift_32(Register dst, int subcode);
-
- void emit_farith(int b1, int b2, int i);
-
- // labels
- // void print(Label* L);
- void bind_to(Label* L, int pos);
-
- // record reloc info for current pc_
- void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
-
- friend class CodePatcher;
- friend class EnsureSpace;
- friend class RegExpMacroAssemblerX64;
-
- // Code buffer:
- // The buffer into which code and relocation info are generated.
- byte* buffer_;
- int buffer_size_;
- // True if the assembler owns the buffer, false if buffer is external.
- bool own_buffer_;
-
- // code generation
- byte* pc_; // the program counter; moves forward
- RelocInfoWriter reloc_info_writer;
-
- List< Handle<Code> > code_targets_;
- // push-pop elimination
- byte* last_pc_;
-
- PositionsRecorder positions_recorder_;
-
- bool emit_debug_code_;
-
- friend class PositionsRecorder;
-};
-
-
-// Helper class that ensures that there is enough space for generating
-// instructions and relocation information. The constructor makes
-// sure that there is enough space and (in debug mode) the destructor
-// checks that we did not generate too much.
-class EnsureSpace BASE_EMBEDDED {
- public:
- explicit EnsureSpace(Assembler* assembler) : assembler_(assembler) {
- if (assembler_->buffer_overflow()) assembler_->GrowBuffer();
-#ifdef DEBUG
- space_before_ = assembler_->available_space();
-#endif
- }
-
-#ifdef DEBUG
- ~EnsureSpace() {
- int bytes_generated = space_before_ - assembler_->available_space();
- ASSERT(bytes_generated < assembler_->kGap);
- }
-#endif
-
- private:
- Assembler* assembler_;
-#ifdef DEBUG
- int space_before_;
-#endif
-};
-
-} } // namespace v8::internal
-
-#endif // V8_X64_ASSEMBLER_X64_H_
diff --git a/src/3rdparty/v8/src/x64/builtins-x64.cc b/src/3rdparty/v8/src/x64/builtins-x64.cc
deleted file mode 100644
index 21d3e54..0000000
--- a/src/3rdparty/v8/src/x64/builtins-x64.cc
+++ /dev/null
@@ -1,1493 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_X64)
-
-#include "codegen-inl.h"
-#include "deoptimizer.h"
-#include "full-codegen.h"
-
-namespace v8 {
-namespace internal {
-
-
-#define __ ACCESS_MASM(masm)
-
-
-void Builtins::Generate_Adaptor(MacroAssembler* masm,
- CFunctionId id,
- BuiltinExtraArguments extra_args) {
- // ----------- S t a t e -------------
- // -- rax : number of arguments excluding receiver
- // -- rdi : called function (only guaranteed when
- // extra_args requires it)
- // -- rsi : context
- // -- rsp[0] : return address
- // -- rsp[8] : last argument
- // -- ...
- // -- rsp[8 * argc] : first argument (argc == rax)
- // -- rsp[8 * (argc +1)] : receiver
- // -----------------------------------
-
- // Insert extra arguments.
- int num_extra_args = 0;
- if (extra_args == NEEDS_CALLED_FUNCTION) {
- num_extra_args = 1;
- __ pop(kScratchRegister); // Save return address.
- __ push(rdi);
- __ push(kScratchRegister); // Restore return address.
- } else {
- ASSERT(extra_args == NO_EXTRA_ARGUMENTS);
- }
-
- // JumpToExternalReference expects rax to contain the number of arguments
- // including the receiver and the extra arguments.
- __ addq(rax, Immediate(num_extra_args + 1));
- __ JumpToExternalReference(ExternalReference(id, masm->isolate()), 1);
-}
-
-
-void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax: number of arguments
- // -- rdi: constructor function
- // -----------------------------------
-
- Label non_function_call;
- // Check that function is not a smi.
- __ JumpIfSmi(rdi, &non_function_call);
- // Check that function is a JSFunction.
- __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
- __ j(not_equal, &non_function_call);
-
- // Jump to the function-specific construct stub.
- __ movq(rbx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ movq(rbx, FieldOperand(rbx, SharedFunctionInfo::kConstructStubOffset));
- __ lea(rbx, FieldOperand(rbx, Code::kHeaderSize));
- __ jmp(rbx);
-
- // rdi: called object
- // rax: number of arguments
- __ bind(&non_function_call);
- // Set expected number of arguments to zero (not changing rax).
- __ movq(rbx, Immediate(0));
- __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
- __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
-}
-
-
-static void Generate_JSConstructStubHelper(MacroAssembler* masm,
- bool is_api_function,
- bool count_constructions) {
- // Should never count constructions for api objects.
- ASSERT(!is_api_function || !count_constructions);
-
- // Enter a construct frame.
- __ EnterConstructFrame();
-
- // Store a smi-tagged arguments count on the stack.
- __ Integer32ToSmi(rax, rax);
- __ push(rax);
-
- // Push the function to invoke on the stack.
- __ push(rdi);
-
- // Try to allocate the object without transitioning into C code. If any of the
- // preconditions is not met, the code bails out to the runtime call.
- Label rt_call, allocated;
- if (FLAG_inline_new) {
- Label undo_allocation;
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- ExternalReference debug_step_in_fp =
- ExternalReference::debug_step_in_fp_address(masm->isolate());
- __ movq(kScratchRegister, debug_step_in_fp);
- __ cmpq(Operand(kScratchRegister, 0), Immediate(0));
- __ j(not_equal, &rt_call);
-#endif
-
- // Verified that the constructor is a JSFunction.
- // Load the initial map and verify that it is in fact a map.
- // rdi: constructor
- __ movq(rax, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a NULL and a Smi
- ASSERT(kSmiTag == 0);
- __ JumpIfSmi(rax, &rt_call);
- // rdi: constructor
- // rax: initial map (if proven valid below)
- __ CmpObjectType(rax, MAP_TYPE, rbx);
- __ j(not_equal, &rt_call);
-
- // Check that the constructor is not constructing a JSFunction (see comments
- // in Runtime_NewObject in runtime.cc). In which case the initial map's
- // instance type would be JS_FUNCTION_TYPE.
- // rdi: constructor
- // rax: initial map
- __ CmpInstanceType(rax, JS_FUNCTION_TYPE);
- __ j(equal, &rt_call);
-
- if (count_constructions) {
- Label allocate;
- // Decrease generous allocation count.
- __ movq(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ decb(FieldOperand(rcx, SharedFunctionInfo::kConstructionCountOffset));
- __ j(not_zero, &allocate);
-
- __ push(rax);
- __ push(rdi);
-
- __ push(rdi); // constructor
- // The call will replace the stub, so the countdown is only done once.
- __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
-
- __ pop(rdi);
- __ pop(rax);
-
- __ bind(&allocate);
- }
-
- // Now allocate the JSObject on the heap.
- __ movzxbq(rdi, FieldOperand(rax, Map::kInstanceSizeOffset));
- __ shl(rdi, Immediate(kPointerSizeLog2));
- // rdi: size of new object
- __ AllocateInNewSpace(rdi,
- rbx,
- rdi,
- no_reg,
- &rt_call,
- NO_ALLOCATION_FLAGS);
- // Allocated the JSObject, now initialize the fields.
- // rax: initial map
- // rbx: JSObject (not HeapObject tagged - the actual address).
- // rdi: start of next object
- __ movq(Operand(rbx, JSObject::kMapOffset), rax);
- __ LoadRoot(rcx, Heap::kEmptyFixedArrayRootIndex);
- __ movq(Operand(rbx, JSObject::kPropertiesOffset), rcx);
- __ movq(Operand(rbx, JSObject::kElementsOffset), rcx);
- // Set extra fields in the newly allocated object.
- // rax: initial map
- // rbx: JSObject
- // rdi: start of next object
- { Label loop, entry;
- // To allow for truncation.
- if (count_constructions) {
- __ LoadRoot(rdx, Heap::kOnePointerFillerMapRootIndex);
- } else {
- __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
- }
- __ lea(rcx, Operand(rbx, JSObject::kHeaderSize));
- __ jmp(&entry);
- __ bind(&loop);
- __ movq(Operand(rcx, 0), rdx);
- __ addq(rcx, Immediate(kPointerSize));
- __ bind(&entry);
- __ cmpq(rcx, rdi);
- __ j(less, &loop);
- }
-
- // Add the object tag to make the JSObject real, so that we can continue and
- // jump into the continuation code at any time from now on. Any failures
- // need to undo the allocation, so that the heap is in a consistent state
- // and verifiable.
- // rax: initial map
- // rbx: JSObject
- // rdi: start of next object
- __ or_(rbx, Immediate(kHeapObjectTag));
-
- // Check if a non-empty properties array is needed.
- // Allocate and initialize a FixedArray if it is.
- // rax: initial map
- // rbx: JSObject
- // rdi: start of next object
- // Calculate total properties described map.
- __ movzxbq(rdx, FieldOperand(rax, Map::kUnusedPropertyFieldsOffset));
- __ movzxbq(rcx, FieldOperand(rax, Map::kPreAllocatedPropertyFieldsOffset));
- __ addq(rdx, rcx);
- // Calculate unused properties past the end of the in-object properties.
- __ movzxbq(rcx, FieldOperand(rax, Map::kInObjectPropertiesOffset));
- __ subq(rdx, rcx);
- // Done if no extra properties are to be allocated.
- __ j(zero, &allocated);
- __ Assert(positive, "Property allocation count failed.");
-
- // Scale the number of elements by pointer size and add the header for
- // FixedArrays to the start of the next object calculation from above.
- // rbx: JSObject
- // rdi: start of next object (will be start of FixedArray)
- // rdx: number of elements in properties array
- __ AllocateInNewSpace(FixedArray::kHeaderSize,
- times_pointer_size,
- rdx,
- rdi,
- rax,
- no_reg,
- &undo_allocation,
- RESULT_CONTAINS_TOP);
-
- // Initialize the FixedArray.
- // rbx: JSObject
- // rdi: FixedArray
- // rdx: number of elements
- // rax: start of next object
- __ LoadRoot(rcx, Heap::kFixedArrayMapRootIndex);
- __ movq(Operand(rdi, HeapObject::kMapOffset), rcx); // setup the map
- __ Integer32ToSmi(rdx, rdx);
- __ movq(Operand(rdi, FixedArray::kLengthOffset), rdx); // and length
-
- // Initialize the fields to undefined.
- // rbx: JSObject
- // rdi: FixedArray
- // rax: start of next object
- // rdx: number of elements
- { Label loop, entry;
- __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
- __ lea(rcx, Operand(rdi, FixedArray::kHeaderSize));
- __ jmp(&entry);
- __ bind(&loop);
- __ movq(Operand(rcx, 0), rdx);
- __ addq(rcx, Immediate(kPointerSize));
- __ bind(&entry);
- __ cmpq(rcx, rax);
- __ j(below, &loop);
- }
-
- // Store the initialized FixedArray into the properties field of
- // the JSObject
- // rbx: JSObject
- // rdi: FixedArray
- __ or_(rdi, Immediate(kHeapObjectTag)); // add the heap tag
- __ movq(FieldOperand(rbx, JSObject::kPropertiesOffset), rdi);
-
-
- // Continue with JSObject being successfully allocated
- // rbx: JSObject
- __ jmp(&allocated);
-
- // Undo the setting of the new top so that the heap is verifiable. For
- // example, the map's unused properties potentially do not match the
- // allocated objects unused properties.
- // rbx: JSObject (previous new top)
- __ bind(&undo_allocation);
- __ UndoAllocationInNewSpace(rbx);
- }
-
- // Allocate the new receiver object using the runtime call.
- // rdi: function (constructor)
- __ bind(&rt_call);
- // Must restore rdi (constructor) before calling runtime.
- __ movq(rdi, Operand(rsp, 0));
- __ push(rdi);
- __ CallRuntime(Runtime::kNewObject, 1);
- __ movq(rbx, rax); // store result in rbx
-
- // New object allocated.
- // rbx: newly allocated object
- __ bind(&allocated);
- // Retrieve the function from the stack.
- __ pop(rdi);
-
- // Retrieve smi-tagged arguments count from the stack.
- __ movq(rax, Operand(rsp, 0));
- __ SmiToInteger32(rax, rax);
-
- // Push the allocated receiver to the stack. We need two copies
- // because we may have to return the original one and the calling
- // conventions dictate that the called function pops the receiver.
- __ push(rbx);
- __ push(rbx);
-
- // Setup pointer to last argument.
- __ lea(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset));
-
- // Copy arguments and receiver to the expression stack.
- Label loop, entry;
- __ movq(rcx, rax);
- __ jmp(&entry);
- __ bind(&loop);
- __ push(Operand(rbx, rcx, times_pointer_size, 0));
- __ bind(&entry);
- __ decq(rcx);
- __ j(greater_equal, &loop);
-
- // Call the function.
- if (is_api_function) {
- __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
- Handle<Code> code =
- masm->isolate()->builtins()->HandleApiCallConstruct();
- ParameterCount expected(0);
- __ InvokeCode(code, expected, expected,
- RelocInfo::CODE_TARGET, CALL_FUNCTION);
- } else {
- ParameterCount actual(rax);
- __ InvokeFunction(rdi, actual, CALL_FUNCTION);
- }
-
- // Restore context from the frame.
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
-
- // If the result is an object (in the ECMA sense), we should get rid
- // of the receiver and use the result; see ECMA-262 section 13.2.2-7
- // on page 74.
- Label use_receiver, exit;
- // If the result is a smi, it is *not* an object in the ECMA sense.
- __ JumpIfSmi(rax, &use_receiver);
-
- // If the type of the result (stored in its map) is less than
- // FIRST_JS_OBJECT_TYPE, it is not an object in the ECMA sense.
- __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
- __ j(above_equal, &exit);
-
- // Throw away the result of the constructor invocation and use the
- // on-stack receiver as the result.
- __ bind(&use_receiver);
- __ movq(rax, Operand(rsp, 0));
-
- // Restore the arguments count and leave the construct frame.
- __ bind(&exit);
- __ movq(rbx, Operand(rsp, kPointerSize)); // get arguments count
- __ LeaveConstructFrame();
-
- // Remove caller arguments from the stack and return.
- __ pop(rcx);
- SmiIndex index = masm->SmiToIndex(rbx, rbx, kPointerSizeLog2);
- __ lea(rsp, Operand(rsp, index.reg, index.scale, 1 * kPointerSize));
- __ push(rcx);
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->constructed_objects(), 1);
- __ ret(0);
-}
-
-
-void Builtins::Generate_JSConstructStubCountdown(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, true);
-}
-
-
-void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, false);
-}
-
-
-void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, true, false);
-}
-
-
-static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
- bool is_construct) {
- // Expects five C++ function parameters.
- // - Address entry (ignored)
- // - JSFunction* function (
- // - Object* receiver
- // - int argc
- // - Object*** argv
- // (see Handle::Invoke in execution.cc).
-
- // Platform specific argument handling. After this, the stack contains
- // an internal frame and the pushed function and receiver, and
- // register rax and rbx holds the argument count and argument array,
- // while rdi holds the function pointer and rsi the context.
-#ifdef _WIN64
- // MSVC parameters in:
- // rcx : entry (ignored)
- // rdx : function
- // r8 : receiver
- // r9 : argc
- // [rsp+0x20] : argv
-
- // Clear the context before we push it when entering the JS frame.
- __ Set(rsi, 0);
- __ EnterInternalFrame();
-
- // Load the function context into rsi.
- __ movq(rsi, FieldOperand(rdx, JSFunction::kContextOffset));
-
- // Push the function and the receiver onto the stack.
- __ push(rdx);
- __ push(r8);
-
- // Load the number of arguments and setup pointer to the arguments.
- __ movq(rax, r9);
- // Load the previous frame pointer to access C argument on stack
- __ movq(kScratchRegister, Operand(rbp, 0));
- __ movq(rbx, Operand(kScratchRegister, EntryFrameConstants::kArgvOffset));
- // Load the function pointer into rdi.
- __ movq(rdi, rdx);
-#else // _WIN64
- // GCC parameters in:
- // rdi : entry (ignored)
- // rsi : function
- // rdx : receiver
- // rcx : argc
- // r8 : argv
-
- __ movq(rdi, rsi);
- // rdi : function
-
- // Clear the context before we push it when entering the JS frame.
- __ Set(rsi, 0);
- // Enter an internal frame.
- __ EnterInternalFrame();
-
- // Push the function and receiver and setup the context.
- __ push(rdi);
- __ push(rdx);
- __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
-
- // Load the number of arguments and setup pointer to the arguments.
- __ movq(rax, rcx);
- __ movq(rbx, r8);
-#endif // _WIN64
-
- // Current stack contents:
- // [rsp + 2 * kPointerSize ... ]: Internal frame
- // [rsp + kPointerSize] : function
- // [rsp] : receiver
- // Current register contents:
- // rax : argc
- // rbx : argv
- // rsi : context
- // rdi : function
-
- // Copy arguments to the stack in a loop.
- // Register rbx points to array of pointers to handle locations.
- // Push the values of these handles.
- Label loop, entry;
- __ Set(rcx, 0); // Set loop variable to 0.
- __ jmp(&entry);
- __ bind(&loop);
- __ movq(kScratchRegister, Operand(rbx, rcx, times_pointer_size, 0));
- __ push(Operand(kScratchRegister, 0)); // dereference handle
- __ addq(rcx, Immediate(1));
- __ bind(&entry);
- __ cmpq(rcx, rax);
- __ j(not_equal, &loop);
-
- // Invoke the code.
- if (is_construct) {
- // Expects rdi to hold function pointer.
- __ Call(masm->isolate()->builtins()->JSConstructCall(),
- RelocInfo::CODE_TARGET);
- } else {
- ParameterCount actual(rax);
- // Function must be in rdi.
- __ InvokeFunction(rdi, actual, CALL_FUNCTION);
- }
-
- // Exit the JS frame. Notice that this also removes the empty
- // context and the function left on the stack by the code
- // invocation.
- __ LeaveInternalFrame();
- // TODO(X64): Is argument correct? Is there a receiver to remove?
- __ ret(1 * kPointerSize); // remove receiver
-}
-
-
-void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
- Generate_JSEntryTrampolineHelper(masm, false);
-}
-
-
-void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
- Generate_JSEntryTrampolineHelper(masm, true);
-}
-
-
-void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
- // Enter an internal frame.
- __ EnterInternalFrame();
-
- // Push a copy of the function onto the stack.
- __ push(rdi);
-
- __ push(rdi); // Function is also the parameter to the runtime call.
- __ CallRuntime(Runtime::kLazyCompile, 1);
- __ pop(rdi);
-
- // Tear down temporary frame.
- __ LeaveInternalFrame();
-
- // Do a tail-call of the compiled function.
- __ lea(rcx, FieldOperand(rax, Code::kHeaderSize));
- __ jmp(rcx);
-}
-
-
-void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
- // Enter an internal frame.
- __ EnterInternalFrame();
-
- // Push a copy of the function onto the stack.
- __ push(rdi);
-
- __ push(rdi); // Function is also the parameter to the runtime call.
- __ CallRuntime(Runtime::kLazyRecompile, 1);
-
- // Restore function and tear down temporary frame.
- __ pop(rdi);
- __ LeaveInternalFrame();
-
- // Do a tail-call of the compiled function.
- __ lea(rcx, FieldOperand(rax, Code::kHeaderSize));
- __ jmp(rcx);
-}
-
-
-static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
- Deoptimizer::BailoutType type) {
- // Enter an internal frame.
- __ EnterInternalFrame();
-
- // Pass the deoptimization type to the runtime system.
- __ Push(Smi::FromInt(static_cast<int>(type)));
-
- __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
- // Tear down temporary frame.
- __ LeaveInternalFrame();
-
- // Get the full codegen state from the stack and untag it.
- __ SmiToInteger32(rcx, Operand(rsp, 1 * kPointerSize));
-
- // Switch on the state.
- NearLabel not_no_registers, not_tos_rax;
- __ cmpq(rcx, Immediate(FullCodeGenerator::NO_REGISTERS));
- __ j(not_equal, &not_no_registers);
- __ ret(1 * kPointerSize); // Remove state.
-
- __ bind(&not_no_registers);
- __ movq(rax, Operand(rsp, 2 * kPointerSize));
- __ cmpq(rcx, Immediate(FullCodeGenerator::TOS_REG));
- __ j(not_equal, &not_tos_rax);
- __ ret(2 * kPointerSize); // Remove state, rax.
-
- __ bind(&not_tos_rax);
- __ Abort("no cases left");
-}
-
-void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
- Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
-}
-
-
-void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
- Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
-}
-
-
-void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
- // For now, we are relying on the fact that Runtime::NotifyOSR
- // doesn't do any garbage collection which allows us to save/restore
- // the registers without worrying about which of them contain
- // pointers. This seems a bit fragile.
- __ Pushad();
- __ EnterInternalFrame();
- __ CallRuntime(Runtime::kNotifyOSR, 0);
- __ LeaveInternalFrame();
- __ Popad();
- __ ret(0);
-}
-
-
-void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
- // Stack Layout:
- // rsp[0]: Return address
- // rsp[1]: Argument n
- // rsp[2]: Argument n-1
- // ...
- // rsp[n]: Argument 1
- // rsp[n+1]: Receiver (function to call)
- //
- // rax contains the number of arguments, n, not counting the receiver.
- //
- // 1. Make sure we have at least one argument.
- { Label done;
- __ testq(rax, rax);
- __ j(not_zero, &done);
- __ pop(rbx);
- __ Push(FACTORY->undefined_value());
- __ push(rbx);
- __ incq(rax);
- __ bind(&done);
- }
-
- // 2. Get the function to call (passed as receiver) from the stack, check
- // if it is a function.
- Label non_function;
- // The function to call is at position n+1 on the stack.
- __ movq(rdi, Operand(rsp, rax, times_pointer_size, 1 * kPointerSize));
- __ JumpIfSmi(rdi, &non_function);
- __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
- __ j(not_equal, &non_function);
-
- // 3a. Patch the first argument if necessary when calling a function.
- Label shift_arguments;
- { Label convert_to_object, use_global_receiver, patch_receiver;
- // Change context eagerly in case we need the global receiver.
- __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
-
- // Do not transform the receiver for strict mode functions.
- __ movq(rbx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ testb(FieldOperand(rbx, SharedFunctionInfo::kStrictModeByteOffset),
- Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
- __ j(not_equal, &shift_arguments);
-
- // Compute the receiver in non-strict mode.
- __ movq(rbx, Operand(rsp, rax, times_pointer_size, 0));
- __ JumpIfSmi(rbx, &convert_to_object);
-
- __ CompareRoot(rbx, Heap::kNullValueRootIndex);
- __ j(equal, &use_global_receiver);
- __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
- __ j(equal, &use_global_receiver);
-
- __ CmpObjectType(rbx, FIRST_JS_OBJECT_TYPE, rcx);
- __ j(below, &convert_to_object);
- __ CmpInstanceType(rcx, LAST_JS_OBJECT_TYPE);
- __ j(below_equal, &shift_arguments);
-
- __ bind(&convert_to_object);
- __ EnterInternalFrame(); // In order to preserve argument count.
- __ Integer32ToSmi(rax, rax);
- __ push(rax);
-
- __ push(rbx);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ movq(rbx, rax);
-
- __ pop(rax);
- __ SmiToInteger32(rax, rax);
- __ LeaveInternalFrame();
- // Restore the function to rdi.
- __ movq(rdi, Operand(rsp, rax, times_pointer_size, 1 * kPointerSize));
- __ jmp(&patch_receiver);
-
- // Use the global receiver object from the called function as the
- // receiver.
- __ bind(&use_global_receiver);
- const int kGlobalIndex =
- Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
- __ movq(rbx, FieldOperand(rsi, kGlobalIndex));
- __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalContextOffset));
- __ movq(rbx, FieldOperand(rbx, kGlobalIndex));
- __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
-
- __ bind(&patch_receiver);
- __ movq(Operand(rsp, rax, times_pointer_size, 0), rbx);
-
- __ jmp(&shift_arguments);
- }
-
-
- // 3b. Patch the first argument when calling a non-function. The
- // CALL_NON_FUNCTION builtin expects the non-function callee as
- // receiver, so overwrite the first argument which will ultimately
- // become the receiver.
- __ bind(&non_function);
- __ movq(Operand(rsp, rax, times_pointer_size, 0), rdi);
- __ Set(rdi, 0);
-
- // 4. Shift arguments and return address one slot down on the stack
- // (overwriting the original receiver). Adjust argument count to make
- // the original first argument the new receiver.
- __ bind(&shift_arguments);
- { Label loop;
- __ movq(rcx, rax);
- __ bind(&loop);
- __ movq(rbx, Operand(rsp, rcx, times_pointer_size, 0));
- __ movq(Operand(rsp, rcx, times_pointer_size, 1 * kPointerSize), rbx);
- __ decq(rcx);
- __ j(not_sign, &loop); // While non-negative (to copy return address).
- __ pop(rbx); // Discard copy of return address.
- __ decq(rax); // One fewer argument (first argument is new receiver).
- }
-
- // 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin.
- { Label function;
- __ testq(rdi, rdi);
- __ j(not_zero, &function);
- __ Set(rbx, 0);
- __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION);
- __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
- __ bind(&function);
- }
-
- // 5b. Get the code to call from the function and check that the number of
- // expected arguments matches what we're providing. If so, jump
- // (tail-call) to the code in register edx without checking arguments.
- __ movq(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ movsxlq(rbx,
- FieldOperand(rdx,
- SharedFunctionInfo::kFormalParameterCountOffset));
- __ movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
- __ cmpq(rax, rbx);
- __ j(not_equal,
- masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
-
- ParameterCount expected(0);
- __ InvokeCode(rdx, expected, expected, JUMP_FUNCTION);
-}
-
-
-void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
- // Stack at entry:
- // rsp: return address
- // rsp+8: arguments
- // rsp+16: receiver ("this")
- // rsp+24: function
- __ EnterInternalFrame();
- // Stack frame:
- // rbp: Old base pointer
- // rbp[1]: return address
- // rbp[2]: function arguments
- // rbp[3]: receiver
- // rbp[4]: function
- static const int kArgumentsOffset = 2 * kPointerSize;
- static const int kReceiverOffset = 3 * kPointerSize;
- static const int kFunctionOffset = 4 * kPointerSize;
- __ push(Operand(rbp, kFunctionOffset));
- __ push(Operand(rbp, kArgumentsOffset));
- __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
-
- // Check the stack for overflow. We are not trying need to catch
- // interruptions (e.g. debug break and preemption) here, so the "real stack
- // limit" is checked.
- Label okay;
- __ LoadRoot(kScratchRegister, Heap::kRealStackLimitRootIndex);
- __ movq(rcx, rsp);
- // Make rcx the space we have left. The stack might already be overflowed
- // here which will cause rcx to become negative.
- __ subq(rcx, kScratchRegister);
- // Make rdx the space we need for the array when it is unrolled onto the
- // stack.
- __ PositiveSmiTimesPowerOfTwoToInteger64(rdx, rax, kPointerSizeLog2);
- // Check if the arguments will overflow the stack.
- __ cmpq(rcx, rdx);
- __ j(greater, &okay); // Signed comparison.
-
- // Out of stack space.
- __ push(Operand(rbp, kFunctionOffset));
- __ push(rax);
- __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
- __ bind(&okay);
- // End of stack check.
-
- // Push current index and limit.
- const int kLimitOffset =
- StandardFrameConstants::kExpressionsOffset - 1 * kPointerSize;
- const int kIndexOffset = kLimitOffset - 1 * kPointerSize;
- __ push(rax); // limit
- __ push(Immediate(0)); // index
-
- // Change context eagerly to get the right global object if
- // necessary.
- __ movq(rdi, Operand(rbp, kFunctionOffset));
- __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
-
- // Compute the receiver.
- Label call_to_object, use_global_receiver, push_receiver;
- __ movq(rbx, Operand(rbp, kReceiverOffset));
-
- // Do not transform the receiver for strict mode functions.
- __ movq(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ testb(FieldOperand(rdx, SharedFunctionInfo::kStrictModeByteOffset),
- Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
- __ j(not_equal, &push_receiver);
-
- // Compute the receiver in non-strict mode.
- __ JumpIfSmi(rbx, &call_to_object);
- __ CompareRoot(rbx, Heap::kNullValueRootIndex);
- __ j(equal, &use_global_receiver);
- __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
- __ j(equal, &use_global_receiver);
-
- // If given receiver is already a JavaScript object then there's no
- // reason for converting it.
- __ CmpObjectType(rbx, FIRST_JS_OBJECT_TYPE, rcx);
- __ j(below, &call_to_object);
- __ CmpInstanceType(rcx, LAST_JS_OBJECT_TYPE);
- __ j(below_equal, &push_receiver);
-
- // Convert the receiver to an object.
- __ bind(&call_to_object);
- __ push(rbx);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ movq(rbx, rax);
- __ jmp(&push_receiver);
-
- // Use the current global receiver object as the receiver.
- __ bind(&use_global_receiver);
- const int kGlobalOffset =
- Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
- __ movq(rbx, FieldOperand(rsi, kGlobalOffset));
- __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalContextOffset));
- __ movq(rbx, FieldOperand(rbx, kGlobalOffset));
- __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
-
- // Push the receiver.
- __ bind(&push_receiver);
- __ push(rbx);
-
- // Copy all arguments from the array to the stack.
- Label entry, loop;
- __ movq(rax, Operand(rbp, kIndexOffset));
- __ jmp(&entry);
- __ bind(&loop);
- __ movq(rdx, Operand(rbp, kArgumentsOffset)); // load arguments
-
- // Use inline caching to speed up access to arguments.
- Handle<Code> ic =
- masm->isolate()->builtins()->KeyedLoadIC_Initialize();
- __ Call(ic, RelocInfo::CODE_TARGET);
- // It is important that we do not have a test instruction after the
- // call. A test instruction after the call is used to indicate that
- // we have generated an inline version of the keyed load. In this
- // case, we know that we are not generating a test instruction next.
-
- // Push the nth argument.
- __ push(rax);
-
- // Update the index on the stack and in register rax.
- __ movq(rax, Operand(rbp, kIndexOffset));
- __ SmiAddConstant(rax, rax, Smi::FromInt(1));
- __ movq(Operand(rbp, kIndexOffset), rax);
-
- __ bind(&entry);
- __ cmpq(rax, Operand(rbp, kLimitOffset));
- __ j(not_equal, &loop);
-
- // Invoke the function.
- ParameterCount actual(rax);
- __ SmiToInteger32(rax, rax);
- __ movq(rdi, Operand(rbp, kFunctionOffset));
- __ InvokeFunction(rdi, actual, CALL_FUNCTION);
-
- __ LeaveInternalFrame();
- __ ret(3 * kPointerSize); // remove function, receiver, and arguments
-}
-
-
-// Number of empty elements to allocate for an empty array.
-static const int kPreallocatedArrayElements = 4;
-
-
-// Allocate an empty JSArray. The allocated array is put into the result
-// register. If the parameter initial_capacity is larger than zero an elements
-// backing store is allocated with this size and filled with the hole values.
-// Otherwise the elements backing store is set to the empty FixedArray.
-static void AllocateEmptyJSArray(MacroAssembler* masm,
- Register array_function,
- Register result,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- int initial_capacity,
- Label* gc_required) {
- ASSERT(initial_capacity >= 0);
-
- // Load the initial map from the array function.
- __ movq(scratch1, FieldOperand(array_function,
- JSFunction::kPrototypeOrInitialMapOffset));
-
- // Allocate the JSArray object together with space for a fixed array with the
- // requested elements.
- int size = JSArray::kSize;
- if (initial_capacity > 0) {
- size += FixedArray::SizeFor(initial_capacity);
- }
- __ AllocateInNewSpace(size,
- result,
- scratch2,
- scratch3,
- gc_required,
- TAG_OBJECT);
-
- // Allocated the JSArray. Now initialize the fields except for the elements
- // array.
- // result: JSObject
- // scratch1: initial map
- // scratch2: start of next object
- __ movq(FieldOperand(result, JSObject::kMapOffset), scratch1);
- __ Move(FieldOperand(result, JSArray::kPropertiesOffset),
- FACTORY->empty_fixed_array());
- // Field JSArray::kElementsOffset is initialized later.
- __ Move(FieldOperand(result, JSArray::kLengthOffset), Smi::FromInt(0));
-
- // If no storage is requested for the elements array just set the empty
- // fixed array.
- if (initial_capacity == 0) {
- __ Move(FieldOperand(result, JSArray::kElementsOffset),
- FACTORY->empty_fixed_array());
- return;
- }
-
- // Calculate the location of the elements array and set elements array member
- // of the JSArray.
- // result: JSObject
- // scratch2: start of next object
- __ lea(scratch1, Operand(result, JSArray::kSize));
- __ movq(FieldOperand(result, JSArray::kElementsOffset), scratch1);
-
- // Initialize the FixedArray and fill it with holes. FixedArray length is
- // stored as a smi.
- // result: JSObject
- // scratch1: elements array
- // scratch2: start of next object
- __ Move(FieldOperand(scratch1, HeapObject::kMapOffset),
- FACTORY->fixed_array_map());
- __ Move(FieldOperand(scratch1, FixedArray::kLengthOffset),
- Smi::FromInt(initial_capacity));
-
- // Fill the FixedArray with the hole value. Inline the code if short.
- // Reconsider loop unfolding if kPreallocatedArrayElements gets changed.
- static const int kLoopUnfoldLimit = 4;
- ASSERT(kPreallocatedArrayElements <= kLoopUnfoldLimit);
- __ Move(scratch3, FACTORY->the_hole_value());
- if (initial_capacity <= kLoopUnfoldLimit) {
- // Use a scratch register here to have only one reloc info when unfolding
- // the loop.
- for (int i = 0; i < initial_capacity; i++) {
- __ movq(FieldOperand(scratch1,
- FixedArray::kHeaderSize + i * kPointerSize),
- scratch3);
- }
- } else {
- Label loop, entry;
- __ jmp(&entry);
- __ bind(&loop);
- __ movq(Operand(scratch1, 0), scratch3);
- __ addq(scratch1, Immediate(kPointerSize));
- __ bind(&entry);
- __ cmpq(scratch1, scratch2);
- __ j(below, &loop);
- }
-}
-
-
-// Allocate a JSArray with the number of elements stored in a register. The
-// register array_function holds the built-in Array function and the register
-// array_size holds the size of the array as a smi. The allocated array is put
-// into the result register and beginning and end of the FixedArray elements
-// storage is put into registers elements_array and elements_array_end (see
-// below for when that is not the case). If the parameter fill_with_holes is
-// true the allocated elements backing store is filled with the hole values
-// otherwise it is left uninitialized. When the backing store is filled the
-// register elements_array is scratched.
-static void AllocateJSArray(MacroAssembler* masm,
- Register array_function, // Array function.
- Register array_size, // As a smi.
- Register result,
- Register elements_array,
- Register elements_array_end,
- Register scratch,
- bool fill_with_hole,
- Label* gc_required) {
- Label not_empty, allocated;
-
- // Load the initial map from the array function.
- __ movq(elements_array,
- FieldOperand(array_function,
- JSFunction::kPrototypeOrInitialMapOffset));
-
- // Check whether an empty sized array is requested.
- __ testq(array_size, array_size);
- __ j(not_zero, &not_empty);
-
- // If an empty array is requested allocate a small elements array anyway. This
- // keeps the code below free of special casing for the empty array.
- int size = JSArray::kSize + FixedArray::SizeFor(kPreallocatedArrayElements);
- __ AllocateInNewSpace(size,
- result,
- elements_array_end,
- scratch,
- gc_required,
- TAG_OBJECT);
- __ jmp(&allocated);
-
- // Allocate the JSArray object together with space for a FixedArray with the
- // requested elements.
- __ bind(&not_empty);
- SmiIndex index =
- masm->SmiToIndex(kScratchRegister, array_size, kPointerSizeLog2);
- __ AllocateInNewSpace(JSArray::kSize + FixedArray::kHeaderSize,
- index.scale,
- index.reg,
- result,
- elements_array_end,
- scratch,
- gc_required,
- TAG_OBJECT);
-
- // Allocated the JSArray. Now initialize the fields except for the elements
- // array.
- // result: JSObject
- // elements_array: initial map
- // elements_array_end: start of next object
- // array_size: size of array (smi)
- __ bind(&allocated);
- __ movq(FieldOperand(result, JSObject::kMapOffset), elements_array);
- __ Move(elements_array, FACTORY->empty_fixed_array());
- __ movq(FieldOperand(result, JSArray::kPropertiesOffset), elements_array);
- // Field JSArray::kElementsOffset is initialized later.
- __ movq(FieldOperand(result, JSArray::kLengthOffset), array_size);
-
- // Calculate the location of the elements array and set elements array member
- // of the JSArray.
- // result: JSObject
- // elements_array_end: start of next object
- // array_size: size of array (smi)
- __ lea(elements_array, Operand(result, JSArray::kSize));
- __ movq(FieldOperand(result, JSArray::kElementsOffset), elements_array);
-
- // Initialize the fixed array. FixedArray length is stored as a smi.
- // result: JSObject
- // elements_array: elements array
- // elements_array_end: start of next object
- // array_size: size of array (smi)
- __ Move(FieldOperand(elements_array, JSObject::kMapOffset),
- FACTORY->fixed_array_map());
- Label not_empty_2, fill_array;
- __ SmiTest(array_size);
- __ j(not_zero, &not_empty_2);
- // Length of the FixedArray is the number of pre-allocated elements even
- // though the actual JSArray has length 0.
- __ Move(FieldOperand(elements_array, FixedArray::kLengthOffset),
- Smi::FromInt(kPreallocatedArrayElements));
- __ jmp(&fill_array);
- __ bind(&not_empty_2);
- // For non-empty JSArrays the length of the FixedArray and the JSArray is the
- // same.
- __ movq(FieldOperand(elements_array, FixedArray::kLengthOffset), array_size);
-
- // Fill the allocated FixedArray with the hole value if requested.
- // result: JSObject
- // elements_array: elements array
- // elements_array_end: start of next object
- __ bind(&fill_array);
- if (fill_with_hole) {
- Label loop, entry;
- __ Move(scratch, FACTORY->the_hole_value());
- __ lea(elements_array, Operand(elements_array,
- FixedArray::kHeaderSize - kHeapObjectTag));
- __ jmp(&entry);
- __ bind(&loop);
- __ movq(Operand(elements_array, 0), scratch);
- __ addq(elements_array, Immediate(kPointerSize));
- __ bind(&entry);
- __ cmpq(elements_array, elements_array_end);
- __ j(below, &loop);
- }
-}
-
-
-// Create a new array for the built-in Array function. This function allocates
-// the JSArray object and the FixedArray elements array and initializes these.
-// If the Array cannot be constructed in native code the runtime is called. This
-// function assumes the following state:
-// rdi: constructor (built-in Array function)
-// rax: argc
-// rsp[0]: return address
-// rsp[8]: last argument
-// This function is used for both construct and normal calls of Array. The only
-// difference between handling a construct call and a normal call is that for a
-// construct call the constructor function in rdi needs to be preserved for
-// entering the generic code. In both cases argc in rax needs to be preserved.
-// Both registers are preserved by this code so no need to differentiate between
-// a construct call and a normal call.
-static void ArrayNativeCode(MacroAssembler* masm,
- Label *call_generic_code) {
- Label argc_one_or_more, argc_two_or_more;
-
- // Check for array construction with zero arguments.
- __ testq(rax, rax);
- __ j(not_zero, &argc_one_or_more);
-
- // Handle construction of an empty array.
- AllocateEmptyJSArray(masm,
- rdi,
- rbx,
- rcx,
- rdx,
- r8,
- kPreallocatedArrayElements,
- call_generic_code);
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->array_function_native(), 1);
- __ movq(rax, rbx);
- __ ret(kPointerSize);
-
- // Check for one argument. Bail out if argument is not smi or if it is
- // negative.
- __ bind(&argc_one_or_more);
- __ cmpq(rax, Immediate(1));
- __ j(not_equal, &argc_two_or_more);
- __ movq(rdx, Operand(rsp, kPointerSize)); // Get the argument from the stack.
- __ JumpUnlessNonNegativeSmi(rdx, call_generic_code);
-
- // Handle construction of an empty array of a certain size. Bail out if size
- // is to large to actually allocate an elements array.
- __ SmiCompare(rdx, Smi::FromInt(JSObject::kInitialMaxFastElementArray));
- __ j(greater_equal, call_generic_code);
-
- // rax: argc
- // rdx: array_size (smi)
- // rdi: constructor
- // esp[0]: return address
- // esp[8]: argument
- AllocateJSArray(masm,
- rdi,
- rdx,
- rbx,
- rcx,
- r8,
- r9,
- true,
- call_generic_code);
- __ IncrementCounter(counters->array_function_native(), 1);
- __ movq(rax, rbx);
- __ ret(2 * kPointerSize);
-
- // Handle construction of an array from a list of arguments.
- __ bind(&argc_two_or_more);
- __ movq(rdx, rax);
- __ Integer32ToSmi(rdx, rdx); // Convet argc to a smi.
- // rax: argc
- // rdx: array_size (smi)
- // rdi: constructor
- // esp[0] : return address
- // esp[8] : last argument
- AllocateJSArray(masm,
- rdi,
- rdx,
- rbx,
- rcx,
- r8,
- r9,
- false,
- call_generic_code);
- __ IncrementCounter(counters->array_function_native(), 1);
-
- // rax: argc
- // rbx: JSArray
- // rcx: elements_array
- // r8: elements_array_end (untagged)
- // esp[0]: return address
- // esp[8]: last argument
-
- // Location of the last argument
- __ lea(r9, Operand(rsp, kPointerSize));
-
- // Location of the first array element (Parameter fill_with_holes to
- // AllocateJSArrayis false, so the FixedArray is returned in rcx).
- __ lea(rdx, Operand(rcx, FixedArray::kHeaderSize - kHeapObjectTag));
-
- // rax: argc
- // rbx: JSArray
- // rdx: location of the first array element
- // r9: location of the last argument
- // esp[0]: return address
- // esp[8]: last argument
- Label loop, entry;
- __ movq(rcx, rax);
- __ jmp(&entry);
- __ bind(&loop);
- __ movq(kScratchRegister, Operand(r9, rcx, times_pointer_size, 0));
- __ movq(Operand(rdx, 0), kScratchRegister);
- __ addq(rdx, Immediate(kPointerSize));
- __ bind(&entry);
- __ decq(rcx);
- __ j(greater_equal, &loop);
-
- // Remove caller arguments from the stack and return.
- // rax: argc
- // rbx: JSArray
- // esp[0]: return address
- // esp[8]: last argument
- __ pop(rcx);
- __ lea(rsp, Operand(rsp, rax, times_pointer_size, 1 * kPointerSize));
- __ push(rcx);
- __ movq(rax, rbx);
- __ ret(0);
-}
-
-
-void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : argc
- // -- rsp[0] : return address
- // -- rsp[8] : last argument
- // -----------------------------------
- Label generic_array_code;
-
- // Get the Array function.
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, rdi);
-
- if (FLAG_debug_code) {
- // Initial map for the builtin Array functions should be maps.
- __ movq(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a NULL and a Smi.
- ASSERT(kSmiTag == 0);
- Condition not_smi = NegateCondition(masm->CheckSmi(rbx));
- __ Check(not_smi, "Unexpected initial map for Array function");
- __ CmpObjectType(rbx, MAP_TYPE, rcx);
- __ Check(equal, "Unexpected initial map for Array function");
- }
-
- // Run the native code for the Array function called as a normal function.
- ArrayNativeCode(masm, &generic_array_code);
-
- // Jump to the generic array code in case the specialized code cannot handle
- // the construction.
- __ bind(&generic_array_code);
- Handle<Code> array_code =
- masm->isolate()->builtins()->ArrayCodeGeneric();
- __ Jump(array_code, RelocInfo::CODE_TARGET);
-}
-
-
-void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : argc
- // -- rdi : constructor
- // -- rsp[0] : return address
- // -- rsp[8] : last argument
- // -----------------------------------
- Label generic_constructor;
-
- if (FLAG_debug_code) {
- // The array construct code is only set for the builtin and internal
- // Array functions which always have a map.
- // Initial map for the builtin Array function should be a map.
- __ movq(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a NULL and a Smi.
- ASSERT(kSmiTag == 0);
- Condition not_smi = NegateCondition(masm->CheckSmi(rbx));
- __ Check(not_smi, "Unexpected initial map for Array function");
- __ CmpObjectType(rbx, MAP_TYPE, rcx);
- __ Check(equal, "Unexpected initial map for Array function");
- }
-
- // Run the native code for the Array function called as constructor.
- ArrayNativeCode(masm, &generic_constructor);
-
- // Jump to the generic construct code in case the specialized code cannot
- // handle the construction.
- __ bind(&generic_constructor);
- Handle<Code> generic_construct_stub =
- masm->isolate()->builtins()->JSConstructStubGeneric();
- __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
-}
-
-
-void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
- // TODO(849): implement custom construct stub.
- // Generate a copy of the generic stub for now.
- Generate_JSConstructStubGeneric(masm);
-}
-
-
-static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
- __ push(rbp);
- __ movq(rbp, rsp);
-
- // Store the arguments adaptor context sentinel.
- __ Push(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
-
- // Push the function on the stack.
- __ push(rdi);
-
- // Preserve the number of arguments on the stack. Must preserve both
- // rax and rbx because these registers are used when copying the
- // arguments and the receiver.
- __ Integer32ToSmi(rcx, rax);
- __ push(rcx);
-}
-
-
-static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
- // Retrieve the number of arguments from the stack. Number is a Smi.
- __ movq(rbx, Operand(rbp, ArgumentsAdaptorFrameConstants::kLengthOffset));
-
- // Leave the frame.
- __ movq(rsp, rbp);
- __ pop(rbp);
-
- // Remove caller arguments from the stack.
- __ pop(rcx);
- SmiIndex index = masm->SmiToIndex(rbx, rbx, kPointerSizeLog2);
- __ lea(rsp, Operand(rsp, index.reg, index.scale, 1 * kPointerSize));
- __ push(rcx);
-}
-
-
-void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : actual number of arguments
- // -- rbx : expected number of arguments
- // -- rdx : code entry to call
- // -----------------------------------
-
- Label invoke, dont_adapt_arguments;
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->arguments_adaptors(), 1);
-
- Label enough, too_few;
- __ cmpq(rax, rbx);
- __ j(less, &too_few);
- __ cmpq(rbx, Immediate(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
- __ j(equal, &dont_adapt_arguments);
-
- { // Enough parameters: Actual >= expected.
- __ bind(&enough);
- EnterArgumentsAdaptorFrame(masm);
-
- // Copy receiver and all expected arguments.
- const int offset = StandardFrameConstants::kCallerSPOffset;
- __ lea(rax, Operand(rbp, rax, times_pointer_size, offset));
- __ movq(rcx, Immediate(-1)); // account for receiver
-
- Label copy;
- __ bind(&copy);
- __ incq(rcx);
- __ push(Operand(rax, 0));
- __ subq(rax, Immediate(kPointerSize));
- __ cmpq(rcx, rbx);
- __ j(less, &copy);
- __ jmp(&invoke);
- }
-
- { // Too few parameters: Actual < expected.
- __ bind(&too_few);
- EnterArgumentsAdaptorFrame(masm);
-
- // Copy receiver and all actual arguments.
- const int offset = StandardFrameConstants::kCallerSPOffset;
- __ lea(rdi, Operand(rbp, rax, times_pointer_size, offset));
- __ movq(rcx, Immediate(-1)); // account for receiver
-
- Label copy;
- __ bind(&copy);
- __ incq(rcx);
- __ push(Operand(rdi, 0));
- __ subq(rdi, Immediate(kPointerSize));
- __ cmpq(rcx, rax);
- __ j(less, &copy);
-
- // Fill remaining expected arguments with undefined values.
- Label fill;
- __ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
- __ bind(&fill);
- __ incq(rcx);
- __ push(kScratchRegister);
- __ cmpq(rcx, rbx);
- __ j(less, &fill);
-
- // Restore function pointer.
- __ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- }
-
- // Call the entry point.
- __ bind(&invoke);
- __ call(rdx);
-
- // Leave frame and return.
- LeaveArgumentsAdaptorFrame(masm);
- __ ret(0);
-
- // -------------------------------------------
- // Dont adapt arguments.
- // -------------------------------------------
- __ bind(&dont_adapt_arguments);
- __ jmp(rdx);
-}
-
-
-void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
- // Get the loop depth of the stack guard check. This is recorded in
- // a test(rax, depth) instruction right after the call.
- Label stack_check;
- __ movq(rbx, Operand(rsp, 0)); // return address
- __ movzxbq(rbx, Operand(rbx, 1)); // depth
-
- // Get the loop nesting level at which we allow OSR from the
- // unoptimized code and check if we want to do OSR yet. If not we
- // should perform a stack guard check so we can get interrupts while
- // waiting for on-stack replacement.
- __ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ movq(rcx, FieldOperand(rax, JSFunction::kSharedFunctionInfoOffset));
- __ movq(rcx, FieldOperand(rcx, SharedFunctionInfo::kCodeOffset));
- __ cmpb(rbx, FieldOperand(rcx, Code::kAllowOSRAtLoopNestingLevelOffset));
- __ j(greater, &stack_check);
-
- // Pass the function to optimize as the argument to the on-stack
- // replacement runtime function.
- __ EnterInternalFrame();
- __ push(rax);
- __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
- __ LeaveInternalFrame();
-
- // If the result was -1 it means that we couldn't optimize the
- // function. Just return and continue in the unoptimized version.
- NearLabel skip;
- __ SmiCompare(rax, Smi::FromInt(-1));
- __ j(not_equal, &skip);
- __ ret(0);
-
- // If we decide not to perform on-stack replacement we perform a
- // stack guard check to enable interrupts.
- __ bind(&stack_check);
- NearLabel ok;
- __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
- __ j(above_equal, &ok);
-
- StackCheckStub stub;
- __ TailCallStub(&stub);
- __ Abort("Unreachable code: returned from tail call.");
- __ bind(&ok);
- __ ret(0);
-
- __ bind(&skip);
- // Untag the AST id and push it on the stack.
- __ SmiToInteger32(rax, rax);
- __ push(rax);
-
- // Generate the code for doing the frame-to-frame translation using
- // the deoptimizer infrastructure.
- Deoptimizer::EntryGenerator generator(masm, Deoptimizer::OSR);
- generator.Generate();
-}
-
-
-#undef __
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_X64
diff --git a/src/3rdparty/v8/src/x64/code-stubs-x64.cc b/src/3rdparty/v8/src/x64/code-stubs-x64.cc
deleted file mode 100644
index 12c0ec5..0000000
--- a/src/3rdparty/v8/src/x64/code-stubs-x64.cc
+++ /dev/null
@@ -1,5134 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_X64)
-
-#include "bootstrapper.h"
-#include "code-stubs.h"
-#include "regexp-macro-assembler.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-void ToNumberStub::Generate(MacroAssembler* masm) {
- // The ToNumber stub takes one argument in eax.
- NearLabel check_heap_number, call_builtin;
- __ SmiTest(rax);
- __ j(not_zero, &check_heap_number);
- __ Ret();
-
- __ bind(&check_heap_number);
- __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &call_builtin);
- __ Ret();
-
- __ bind(&call_builtin);
- __ pop(rcx); // Pop return address.
- __ push(rax);
- __ push(rcx); // Push return address.
- __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
-}
-
-
-void FastNewClosureStub::Generate(MacroAssembler* masm) {
- // Create a new closure from the given function info in new
- // space. Set the context to the current context in rsi.
- Label gc;
- __ AllocateInNewSpace(JSFunction::kSize, rax, rbx, rcx, &gc, TAG_OBJECT);
-
- // Get the function info from the stack.
- __ movq(rdx, Operand(rsp, 1 * kPointerSize));
-
- int map_index = strict_mode_ == kStrictMode
- ? Context::STRICT_MODE_FUNCTION_MAP_INDEX
- : Context::FUNCTION_MAP_INDEX;
-
- // Compute the function map in the current global context and set that
- // as the map of the allocated object.
- __ movq(rcx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ movq(rcx, FieldOperand(rcx, GlobalObject::kGlobalContextOffset));
- __ movq(rcx, Operand(rcx, Context::SlotOffset(map_index)));
- __ movq(FieldOperand(rax, JSObject::kMapOffset), rcx);
-
- // Initialize the rest of the function. We don't have to update the
- // write barrier because the allocated object is in new space.
- __ LoadRoot(rbx, Heap::kEmptyFixedArrayRootIndex);
- __ LoadRoot(rcx, Heap::kTheHoleValueRootIndex);
- __ LoadRoot(rdi, Heap::kUndefinedValueRootIndex);
- __ movq(FieldOperand(rax, JSObject::kPropertiesOffset), rbx);
- __ movq(FieldOperand(rax, JSObject::kElementsOffset), rbx);
- __ movq(FieldOperand(rax, JSFunction::kPrototypeOrInitialMapOffset), rcx);
- __ movq(FieldOperand(rax, JSFunction::kSharedFunctionInfoOffset), rdx);
- __ movq(FieldOperand(rax, JSFunction::kContextOffset), rsi);
- __ movq(FieldOperand(rax, JSFunction::kLiteralsOffset), rbx);
- __ movq(FieldOperand(rax, JSFunction::kNextFunctionLinkOffset), rdi);
-
- // Initialize the code pointer in the function to be the one
- // found in the shared function info object.
- __ movq(rdx, FieldOperand(rdx, SharedFunctionInfo::kCodeOffset));
- __ lea(rdx, FieldOperand(rdx, Code::kHeaderSize));
- __ movq(FieldOperand(rax, JSFunction::kCodeEntryOffset), rdx);
-
-
- // Return and remove the on-stack parameter.
- __ ret(1 * kPointerSize);
-
- // Create a new closure through the slower runtime call.
- __ bind(&gc);
- __ pop(rcx); // Temporarily remove return address.
- __ pop(rdx);
- __ push(rsi);
- __ push(rdx);
- __ PushRoot(Heap::kFalseValueRootIndex);
- __ push(rcx); // Restore return address.
- __ TailCallRuntime(Runtime::kNewClosure, 3, 1);
-}
-
-
-void FastNewContextStub::Generate(MacroAssembler* masm) {
- // Try to allocate the context in new space.
- Label gc;
- int length = slots_ + Context::MIN_CONTEXT_SLOTS;
- __ AllocateInNewSpace((length * kPointerSize) + FixedArray::kHeaderSize,
- rax, rbx, rcx, &gc, TAG_OBJECT);
-
- // Get the function from the stack.
- __ movq(rcx, Operand(rsp, 1 * kPointerSize));
-
- // Setup the object header.
- __ LoadRoot(kScratchRegister, Heap::kContextMapRootIndex);
- __ movq(FieldOperand(rax, HeapObject::kMapOffset), kScratchRegister);
- __ Move(FieldOperand(rax, FixedArray::kLengthOffset), Smi::FromInt(length));
-
- // Setup the fixed slots.
- __ Set(rbx, 0); // Set to NULL.
- __ movq(Operand(rax, Context::SlotOffset(Context::CLOSURE_INDEX)), rcx);
- __ movq(Operand(rax, Context::SlotOffset(Context::FCONTEXT_INDEX)), rax);
- __ movq(Operand(rax, Context::SlotOffset(Context::PREVIOUS_INDEX)), rbx);
- __ movq(Operand(rax, Context::SlotOffset(Context::EXTENSION_INDEX)), rbx);
-
- // Copy the global object from the surrounding context.
- __ movq(rbx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ movq(Operand(rax, Context::SlotOffset(Context::GLOBAL_INDEX)), rbx);
-
- // Initialize the rest of the slots to undefined.
- __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
- for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
- __ movq(Operand(rax, Context::SlotOffset(i)), rbx);
- }
-
- // Return and remove the on-stack parameter.
- __ movq(rsi, rax);
- __ ret(1 * kPointerSize);
-
- // Need to collect. Call into runtime system.
- __ bind(&gc);
- __ TailCallRuntime(Runtime::kNewContext, 1, 1);
-}
-
-
-void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
- // Stack layout on entry:
- //
- // [rsp + kPointerSize]: constant elements.
- // [rsp + (2 * kPointerSize)]: literal index.
- // [rsp + (3 * kPointerSize)]: literals array.
-
- // All sizes here are multiples of kPointerSize.
- int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0;
- int size = JSArray::kSize + elements_size;
-
- // Load boilerplate object into rcx and check if we need to create a
- // boilerplate.
- Label slow_case;
- __ movq(rcx, Operand(rsp, 3 * kPointerSize));
- __ movq(rax, Operand(rsp, 2 * kPointerSize));
- SmiIndex index = masm->SmiToIndex(rax, rax, kPointerSizeLog2);
- __ movq(rcx,
- FieldOperand(rcx, index.reg, index.scale, FixedArray::kHeaderSize));
- __ CompareRoot(rcx, Heap::kUndefinedValueRootIndex);
- __ j(equal, &slow_case);
-
- if (FLAG_debug_code) {
- const char* message;
- Heap::RootListIndex expected_map_index;
- if (mode_ == CLONE_ELEMENTS) {
- message = "Expected (writable) fixed array";
- expected_map_index = Heap::kFixedArrayMapRootIndex;
- } else {
- ASSERT(mode_ == COPY_ON_WRITE_ELEMENTS);
- message = "Expected copy-on-write fixed array";
- expected_map_index = Heap::kFixedCOWArrayMapRootIndex;
- }
- __ push(rcx);
- __ movq(rcx, FieldOperand(rcx, JSArray::kElementsOffset));
- __ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
- expected_map_index);
- __ Assert(equal, message);
- __ pop(rcx);
- }
-
- // Allocate both the JS array and the elements array in one big
- // allocation. This avoids multiple limit checks.
- __ AllocateInNewSpace(size, rax, rbx, rdx, &slow_case, TAG_OBJECT);
-
- // Copy the JS array part.
- for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
- if ((i != JSArray::kElementsOffset) || (length_ == 0)) {
- __ movq(rbx, FieldOperand(rcx, i));
- __ movq(FieldOperand(rax, i), rbx);
- }
- }
-
- if (length_ > 0) {
- // Get hold of the elements array of the boilerplate and setup the
- // elements pointer in the resulting object.
- __ movq(rcx, FieldOperand(rcx, JSArray::kElementsOffset));
- __ lea(rdx, Operand(rax, JSArray::kSize));
- __ movq(FieldOperand(rax, JSArray::kElementsOffset), rdx);
-
- // Copy the elements array.
- for (int i = 0; i < elements_size; i += kPointerSize) {
- __ movq(rbx, FieldOperand(rcx, i));
- __ movq(FieldOperand(rdx, i), rbx);
- }
- }
-
- // Return and remove the on-stack parameters.
- __ ret(3 * kPointerSize);
-
- __ bind(&slow_case);
- __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
-}
-
-
-void ToBooleanStub::Generate(MacroAssembler* masm) {
- NearLabel false_result, true_result, not_string;
- __ movq(rax, Operand(rsp, 1 * kPointerSize));
-
- // 'null' => false.
- __ CompareRoot(rax, Heap::kNullValueRootIndex);
- __ j(equal, &false_result);
-
- // Get the map and type of the heap object.
- // We don't use CmpObjectType because we manipulate the type field.
- __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
- __ movzxbq(rcx, FieldOperand(rdx, Map::kInstanceTypeOffset));
-
- // Undetectable => false.
- __ movzxbq(rbx, FieldOperand(rdx, Map::kBitFieldOffset));
- __ and_(rbx, Immediate(1 << Map::kIsUndetectable));
- __ j(not_zero, &false_result);
-
- // JavaScript object => true.
- __ cmpq(rcx, Immediate(FIRST_JS_OBJECT_TYPE));
- __ j(above_equal, &true_result);
-
- // String value => false iff empty.
- __ cmpq(rcx, Immediate(FIRST_NONSTRING_TYPE));
- __ j(above_equal, &not_string);
- __ movq(rdx, FieldOperand(rax, String::kLengthOffset));
- __ SmiTest(rdx);
- __ j(zero, &false_result);
- __ jmp(&true_result);
-
- __ bind(&not_string);
- __ CompareRoot(rdx, Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &true_result);
- // HeapNumber => false iff +0, -0, or NaN.
- // These three cases set the zero flag when compared to zero using ucomisd.
- __ xorpd(xmm0, xmm0);
- __ ucomisd(xmm0, FieldOperand(rax, HeapNumber::kValueOffset));
- __ j(zero, &false_result);
- // Fall through to |true_result|.
-
- // Return 1/0 for true/false in rax.
- __ bind(&true_result);
- __ movq(rax, Immediate(1));
- __ ret(1 * kPointerSize);
- __ bind(&false_result);
- __ Set(rax, 0);
- __ ret(1 * kPointerSize);
-}
-
-
-const char* GenericBinaryOpStub::GetName() {
- if (name_ != NULL) return name_;
- const int kMaxNameLength = 100;
- name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
- kMaxNameLength);
- if (name_ == NULL) return "OOM";
- const char* op_name = Token::Name(op_);
- const char* overwrite_name;
- switch (mode_) {
- case NO_OVERWRITE: overwrite_name = "Alloc"; break;
- case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
- case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
- default: overwrite_name = "UnknownOverwrite"; break;
- }
-
- OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
- "GenericBinaryOpStub_%s_%s%s_%s%s_%s_%s",
- op_name,
- overwrite_name,
- (flags_ & NO_SMI_CODE_IN_STUB) ? "_NoSmiInStub" : "",
- args_in_registers_ ? "RegArgs" : "StackArgs",
- args_reversed_ ? "_R" : "",
- static_operands_type_.ToString(),
- BinaryOpIC::GetName(runtime_operands_type_));
- return name_;
-}
-
-
-void GenericBinaryOpStub::GenerateCall(
- MacroAssembler* masm,
- Register left,
- Register right) {
- if (!ArgsInRegistersSupported()) {
- // Pass arguments on the stack.
- __ push(left);
- __ push(right);
- } else {
- // The calling convention with registers is left in rdx and right in rax.
- Register left_arg = rdx;
- Register right_arg = rax;
- if (!(left.is(left_arg) && right.is(right_arg))) {
- if (left.is(right_arg) && right.is(left_arg)) {
- if (IsOperationCommutative()) {
- SetArgsReversed();
- } else {
- __ xchg(left, right);
- }
- } else if (left.is(left_arg)) {
- __ movq(right_arg, right);
- } else if (right.is(right_arg)) {
- __ movq(left_arg, left);
- } else if (left.is(right_arg)) {
- if (IsOperationCommutative()) {
- __ movq(left_arg, right);
- SetArgsReversed();
- } else {
- // Order of moves important to avoid destroying left argument.
- __ movq(left_arg, left);
- __ movq(right_arg, right);
- }
- } else if (right.is(left_arg)) {
- if (IsOperationCommutative()) {
- __ movq(right_arg, left);
- SetArgsReversed();
- } else {
- // Order of moves important to avoid destroying right argument.
- __ movq(right_arg, right);
- __ movq(left_arg, left);
- }
- } else {
- // Order of moves is not important.
- __ movq(left_arg, left);
- __ movq(right_arg, right);
- }
- }
-
- // Update flags to indicate that arguments are in registers.
- SetArgsInRegisters();
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->generic_binary_stub_calls_regs(), 1);
- }
-
- // Call the stub.
- __ CallStub(this);
-}
-
-
-void GenericBinaryOpStub::GenerateCall(
- MacroAssembler* masm,
- Register left,
- Smi* right) {
- if (!ArgsInRegistersSupported()) {
- // Pass arguments on the stack.
- __ push(left);
- __ Push(right);
- } else {
- // The calling convention with registers is left in rdx and right in rax.
- Register left_arg = rdx;
- Register right_arg = rax;
- if (left.is(left_arg)) {
- __ Move(right_arg, right);
- } else if (left.is(right_arg) && IsOperationCommutative()) {
- __ Move(left_arg, right);
- SetArgsReversed();
- } else {
- // For non-commutative operations, left and right_arg might be
- // the same register. Therefore, the order of the moves is
- // important here in order to not overwrite left before moving
- // it to left_arg.
- __ movq(left_arg, left);
- __ Move(right_arg, right);
- }
-
- // Update flags to indicate that arguments are in registers.
- SetArgsInRegisters();
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->generic_binary_stub_calls_regs(), 1);
- }
-
- // Call the stub.
- __ CallStub(this);
-}
-
-
-void GenericBinaryOpStub::GenerateCall(
- MacroAssembler* masm,
- Smi* left,
- Register right) {
- if (!ArgsInRegistersSupported()) {
- // Pass arguments on the stack.
- __ Push(left);
- __ push(right);
- } else {
- // The calling convention with registers is left in rdx and right in rax.
- Register left_arg = rdx;
- Register right_arg = rax;
- if (right.is(right_arg)) {
- __ Move(left_arg, left);
- } else if (right.is(left_arg) && IsOperationCommutative()) {
- __ Move(right_arg, left);
- SetArgsReversed();
- } else {
- // For non-commutative operations, right and left_arg might be
- // the same register. Therefore, the order of the moves is
- // important here in order to not overwrite right before moving
- // it to right_arg.
- __ movq(right_arg, right);
- __ Move(left_arg, left);
- }
- // Update flags to indicate that arguments are in registers.
- SetArgsInRegisters();
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->generic_binary_stub_calls_regs(), 1);
- }
-
- // Call the stub.
- __ CallStub(this);
-}
-
-
-class FloatingPointHelper : public AllStatic {
- public:
- // Load the operands from rdx and rax into xmm0 and xmm1, as doubles.
- // If the operands are not both numbers, jump to not_numbers.
- // Leaves rdx and rax unchanged. SmiOperands assumes both are smis.
- // NumberOperands assumes both are smis or heap numbers.
- static void LoadSSE2SmiOperands(MacroAssembler* masm);
- static void LoadSSE2NumberOperands(MacroAssembler* masm);
- static void LoadSSE2UnknownOperands(MacroAssembler* masm,
- Label* not_numbers);
-
- // Takes the operands in rdx and rax and loads them as integers in rax
- // and rcx.
- static void LoadAsIntegers(MacroAssembler* masm,
- Label* operand_conversion_failure,
- Register heap_number_map);
- // As above, but we know the operands to be numbers. In that case,
- // conversion can't fail.
- static void LoadNumbersAsIntegers(MacroAssembler* masm);
-};
-
-
-void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
- // 1. Move arguments into rdx, rax except for DIV and MOD, which need the
- // dividend in rax and rdx free for the division. Use rax, rbx for those.
- Comment load_comment(masm, "-- Load arguments");
- Register left = rdx;
- Register right = rax;
- if (op_ == Token::DIV || op_ == Token::MOD) {
- left = rax;
- right = rbx;
- if (HasArgsInRegisters()) {
- __ movq(rbx, rax);
- __ movq(rax, rdx);
- }
- }
- if (!HasArgsInRegisters()) {
- __ movq(right, Operand(rsp, 1 * kPointerSize));
- __ movq(left, Operand(rsp, 2 * kPointerSize));
- }
-
- Label not_smis;
- // 2. Smi check both operands.
- if (static_operands_type_.IsSmi()) {
- // Skip smi check if we know that both arguments are smis.
- if (FLAG_debug_code) {
- __ AbortIfNotSmi(left);
- __ AbortIfNotSmi(right);
- }
- if (op_ == Token::BIT_OR) {
- // Handle OR here, since we do extra smi-checking in the or code below.
- __ SmiOr(right, right, left);
- GenerateReturn(masm);
- return;
- }
- } else {
- if (op_ != Token::BIT_OR) {
- // Skip the check for OR as it is better combined with the
- // actual operation.
- Comment smi_check_comment(masm, "-- Smi check arguments");
- __ JumpIfNotBothSmi(left, right, &not_smis);
- }
- }
-
- // 3. Operands are both smis (except for OR), perform the operation leaving
- // the result in rax and check the result if necessary.
- Comment perform_smi(masm, "-- Perform smi operation");
- Label use_fp_on_smis;
- switch (op_) {
- case Token::ADD: {
- ASSERT(right.is(rax));
- __ SmiAdd(right, right, left, &use_fp_on_smis); // ADD is commutative.
- break;
- }
-
- case Token::SUB: {
- __ SmiSub(left, left, right, &use_fp_on_smis);
- __ movq(rax, left);
- break;
- }
-
- case Token::MUL:
- ASSERT(right.is(rax));
- __ SmiMul(right, right, left, &use_fp_on_smis); // MUL is commutative.
- break;
-
- case Token::DIV:
- ASSERT(left.is(rax));
- __ SmiDiv(left, left, right, &use_fp_on_smis);
- break;
-
- case Token::MOD:
- ASSERT(left.is(rax));
- __ SmiMod(left, left, right, slow);
- break;
-
- case Token::BIT_OR:
- ASSERT(right.is(rax));
- __ movq(rcx, right); // Save the right operand.
- __ SmiOr(right, right, left); // BIT_OR is commutative.
- __ testb(right, Immediate(kSmiTagMask));
- __ j(not_zero, &not_smis);
- break;
-
- case Token::BIT_AND:
- ASSERT(right.is(rax));
- __ SmiAnd(right, right, left); // BIT_AND is commutative.
- break;
-
- case Token::BIT_XOR:
- ASSERT(right.is(rax));
- __ SmiXor(right, right, left); // BIT_XOR is commutative.
- break;
-
- case Token::SHL:
- case Token::SHR:
- case Token::SAR:
- switch (op_) {
- case Token::SAR:
- __ SmiShiftArithmeticRight(left, left, right);
- break;
- case Token::SHR:
- __ SmiShiftLogicalRight(left, left, right, slow);
- break;
- case Token::SHL:
- __ SmiShiftLeft(left, left, right);
- break;
- default:
- UNREACHABLE();
- }
- __ movq(rax, left);
- break;
-
- default:
- UNREACHABLE();
- break;
- }
-
- // 4. Emit return of result in rax.
- GenerateReturn(masm);
-
- // 5. For some operations emit inline code to perform floating point
- // operations on known smis (e.g., if the result of the operation
- // overflowed the smi range).
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV: {
- ASSERT(use_fp_on_smis.is_linked());
- __ bind(&use_fp_on_smis);
- if (op_ == Token::DIV) {
- __ movq(rdx, rax);
- __ movq(rax, rbx);
- }
- // left is rdx, right is rax.
- __ AllocateHeapNumber(rbx, rcx, slow);
- FloatingPointHelper::LoadSSE2SmiOperands(masm);
- switch (op_) {
- case Token::ADD: __ addsd(xmm0, xmm1); break;
- case Token::SUB: __ subsd(xmm0, xmm1); break;
- case Token::MUL: __ mulsd(xmm0, xmm1); break;
- case Token::DIV: __ divsd(xmm0, xmm1); break;
- default: UNREACHABLE();
- }
- __ movsd(FieldOperand(rbx, HeapNumber::kValueOffset), xmm0);
- __ movq(rax, rbx);
- GenerateReturn(masm);
- }
- default:
- break;
- }
-
- // 6. Non-smi operands, fall out to the non-smi code with the operands in
- // rdx and rax.
- Comment done_comment(masm, "-- Enter non-smi code");
- __ bind(&not_smis);
-
- switch (op_) {
- case Token::DIV:
- case Token::MOD:
- // Operands are in rax, rbx at this point.
- __ movq(rdx, rax);
- __ movq(rax, rbx);
- break;
-
- case Token::BIT_OR:
- // Right operand is saved in rcx and rax was destroyed by the smi
- // operation.
- __ movq(rax, rcx);
- break;
-
- default:
- break;
- }
-}
-
-
-void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
- Label call_runtime;
-
- if (ShouldGenerateSmiCode()) {
- GenerateSmiCode(masm, &call_runtime);
- } else if (op_ != Token::MOD) {
- if (!HasArgsInRegisters()) {
- GenerateLoadArguments(masm);
- }
- }
- // Floating point case.
- if (ShouldGenerateFPCode()) {
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV: {
- if (runtime_operands_type_ == BinaryOpIC::DEFAULT &&
- HasSmiCodeInStub()) {
- // Execution reaches this point when the first non-smi argument occurs
- // (and only if smi code is generated). This is the right moment to
- // patch to HEAP_NUMBERS state. The transition is attempted only for
- // the four basic operations. The stub stays in the DEFAULT state
- // forever for all other operations (also if smi code is skipped).
- GenerateTypeTransition(masm);
- break;
- }
-
- Label not_floats;
- // rax: y
- // rdx: x
- if (static_operands_type_.IsNumber()) {
- if (FLAG_debug_code) {
- // Assert at runtime that inputs are only numbers.
- __ AbortIfNotNumber(rdx);
- __ AbortIfNotNumber(rax);
- }
- FloatingPointHelper::LoadSSE2NumberOperands(masm);
- } else {
- FloatingPointHelper::LoadSSE2UnknownOperands(masm, &call_runtime);
- }
-
- switch (op_) {
- case Token::ADD: __ addsd(xmm0, xmm1); break;
- case Token::SUB: __ subsd(xmm0, xmm1); break;
- case Token::MUL: __ mulsd(xmm0, xmm1); break;
- case Token::DIV: __ divsd(xmm0, xmm1); break;
- default: UNREACHABLE();
- }
- // Allocate a heap number, if needed.
- Label skip_allocation;
- OverwriteMode mode = mode_;
- if (HasArgsReversed()) {
- if (mode == OVERWRITE_RIGHT) {
- mode = OVERWRITE_LEFT;
- } else if (mode == OVERWRITE_LEFT) {
- mode = OVERWRITE_RIGHT;
- }
- }
- switch (mode) {
- case OVERWRITE_LEFT:
- __ JumpIfNotSmi(rdx, &skip_allocation);
- __ AllocateHeapNumber(rbx, rcx, &call_runtime);
- __ movq(rdx, rbx);
- __ bind(&skip_allocation);
- __ movq(rax, rdx);
- break;
- case OVERWRITE_RIGHT:
- // If the argument in rax is already an object, we skip the
- // allocation of a heap number.
- __ JumpIfNotSmi(rax, &skip_allocation);
- // Fall through!
- case NO_OVERWRITE:
- // Allocate a heap number for the result. Keep rax and rdx intact
- // for the possible runtime call.
- __ AllocateHeapNumber(rbx, rcx, &call_runtime);
- __ movq(rax, rbx);
- __ bind(&skip_allocation);
- break;
- default: UNREACHABLE();
- }
- __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
- GenerateReturn(masm);
- __ bind(&not_floats);
- if (runtime_operands_type_ == BinaryOpIC::DEFAULT &&
- !HasSmiCodeInStub()) {
- // Execution reaches this point when the first non-number argument
- // occurs (and only if smi code is skipped from the stub, otherwise
- // the patching has already been done earlier in this case branch).
- // A perfect moment to try patching to STRINGS for ADD operation.
- if (op_ == Token::ADD) {
- GenerateTypeTransition(masm);
- }
- }
- break;
- }
- case Token::MOD: {
- // For MOD we go directly to runtime in the non-smi case.
- break;
- }
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR: {
- Label skip_allocation, non_smi_shr_result;
- Register heap_number_map = r9;
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- if (static_operands_type_.IsNumber()) {
- if (FLAG_debug_code) {
- // Assert at runtime that inputs are only numbers.
- __ AbortIfNotNumber(rdx);
- __ AbortIfNotNumber(rax);
- }
- FloatingPointHelper::LoadNumbersAsIntegers(masm);
- } else {
- FloatingPointHelper::LoadAsIntegers(masm,
- &call_runtime,
- heap_number_map);
- }
- switch (op_) {
- case Token::BIT_OR: __ orl(rax, rcx); break;
- case Token::BIT_AND: __ andl(rax, rcx); break;
- case Token::BIT_XOR: __ xorl(rax, rcx); break;
- case Token::SAR: __ sarl_cl(rax); break;
- case Token::SHL: __ shll_cl(rax); break;
- case Token::SHR: {
- __ shrl_cl(rax);
- // Check if result is negative. This can only happen for a shift
- // by zero.
- __ testl(rax, rax);
- __ j(negative, &non_smi_shr_result);
- break;
- }
- default: UNREACHABLE();
- }
-
- STATIC_ASSERT(kSmiValueSize == 32);
- // Tag smi result and return.
- __ Integer32ToSmi(rax, rax);
- GenerateReturn(masm);
-
- // All bit-ops except SHR return a signed int32 that can be
- // returned immediately as a smi.
- // We might need to allocate a HeapNumber if we shift a negative
- // number right by zero (i.e., convert to UInt32).
- if (op_ == Token::SHR) {
- ASSERT(non_smi_shr_result.is_linked());
- __ bind(&non_smi_shr_result);
- // Allocate a heap number if needed.
- __ movl(rbx, rax); // rbx holds result value (uint32 value as int64).
- switch (mode_) {
- case OVERWRITE_LEFT:
- case OVERWRITE_RIGHT:
- // If the operand was an object, we skip the
- // allocation of a heap number.
- __ movq(rax, Operand(rsp, mode_ == OVERWRITE_RIGHT ?
- 1 * kPointerSize : 2 * kPointerSize));
- __ JumpIfNotSmi(rax, &skip_allocation);
- // Fall through!
- case NO_OVERWRITE:
- // Allocate heap number in new space.
- // Not using AllocateHeapNumber macro in order to reuse
- // already loaded heap_number_map.
- __ AllocateInNewSpace(HeapNumber::kSize,
- rax,
- rcx,
- no_reg,
- &call_runtime,
- TAG_OBJECT);
- // Set the map.
- if (FLAG_debug_code) {
- __ AbortIfNotRootValue(heap_number_map,
- Heap::kHeapNumberMapRootIndex,
- "HeapNumberMap register clobbered.");
- }
- __ movq(FieldOperand(rax, HeapObject::kMapOffset),
- heap_number_map);
- __ bind(&skip_allocation);
- break;
- default: UNREACHABLE();
- }
- // Store the result in the HeapNumber and return.
- __ cvtqsi2sd(xmm0, rbx);
- __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
- GenerateReturn(masm);
- }
-
- break;
- }
- default: UNREACHABLE(); break;
- }
- }
-
- // If all else fails, use the runtime system to get the correct
- // result. If arguments was passed in registers now place them on the
- // stack in the correct order below the return address.
- __ bind(&call_runtime);
-
- if (HasArgsInRegisters()) {
- GenerateRegisterArgsPush(masm);
- }
-
- switch (op_) {
- case Token::ADD: {
- // Registers containing left and right operands respectively.
- Register lhs, rhs;
-
- if (HasArgsReversed()) {
- lhs = rax;
- rhs = rdx;
- } else {
- lhs = rdx;
- rhs = rax;
- }
-
- // Test for string arguments before calling runtime.
- Label not_strings, both_strings, not_string1, string1, string1_smi2;
-
- // If this stub has already generated FP-specific code then the arguments
- // are already in rdx and rax.
- if (!ShouldGenerateFPCode() && !HasArgsInRegisters()) {
- GenerateLoadArguments(masm);
- }
-
- Condition is_smi;
- is_smi = masm->CheckSmi(lhs);
- __ j(is_smi, &not_string1);
- __ CmpObjectType(lhs, FIRST_NONSTRING_TYPE, r8);
- __ j(above_equal, &not_string1);
-
- // First argument is a a string, test second.
- is_smi = masm->CheckSmi(rhs);
- __ j(is_smi, &string1_smi2);
- __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, r9);
- __ j(above_equal, &string1);
-
- // First and second argument are strings.
- StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
- __ TailCallStub(&string_add_stub);
-
- __ bind(&string1_smi2);
- // First argument is a string, second is a smi. Try to lookup the number
- // string for the smi in the number string cache.
- NumberToStringStub::GenerateLookupNumberStringCache(
- masm, rhs, rbx, rcx, r8, true, &string1);
-
- // Replace second argument on stack and tailcall string add stub to make
- // the result.
- __ movq(Operand(rsp, 1 * kPointerSize), rbx);
- __ TailCallStub(&string_add_stub);
-
- // Only first argument is a string.
- __ bind(&string1);
- __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_FUNCTION);
-
- // First argument was not a string, test second.
- __ bind(&not_string1);
- is_smi = masm->CheckSmi(rhs);
- __ j(is_smi, &not_strings);
- __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, rhs);
- __ j(above_equal, &not_strings);
-
- // Only second argument is a string.
- __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_FUNCTION);
-
- __ bind(&not_strings);
- // Neither argument is a string.
- __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
- break;
- }
- case Token::SUB:
- __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
- break;
- case Token::MUL:
- __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
- break;
- case Token::DIV:
- __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
- break;
- case Token::MOD:
- __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
- break;
- case Token::BIT_OR:
- __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
- break;
- case Token::BIT_AND:
- __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
- break;
- case Token::BIT_XOR:
- __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
- break;
- case Token::SAR:
- __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
- break;
- case Token::SHL:
- __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
- break;
- case Token::SHR:
- __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void GenericBinaryOpStub::GenerateLoadArguments(MacroAssembler* masm) {
- ASSERT(!HasArgsInRegisters());
- __ movq(rax, Operand(rsp, 1 * kPointerSize));
- __ movq(rdx, Operand(rsp, 2 * kPointerSize));
-}
-
-
-void GenericBinaryOpStub::GenerateReturn(MacroAssembler* masm) {
- // If arguments are not passed in registers remove them from the stack before
- // returning.
- if (!HasArgsInRegisters()) {
- __ ret(2 * kPointerSize); // Remove both operands
- } else {
- __ ret(0);
- }
-}
-
-
-void GenericBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
- ASSERT(HasArgsInRegisters());
- __ pop(rcx);
- if (HasArgsReversed()) {
- __ push(rax);
- __ push(rdx);
- } else {
- __ push(rdx);
- __ push(rax);
- }
- __ push(rcx);
-}
-
-
-void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
- Label get_result;
-
- // Ensure the operands are on the stack.
- if (HasArgsInRegisters()) {
- GenerateRegisterArgsPush(masm);
- }
-
- // Left and right arguments are already on stack.
- __ pop(rcx); // Save the return address.
-
- // Push this stub's key.
- __ Push(Smi::FromInt(MinorKey()));
-
- // Although the operation and the type info are encoded into the key,
- // the encoding is opaque, so push them too.
- __ Push(Smi::FromInt(op_));
-
- __ Push(Smi::FromInt(runtime_operands_type_));
-
- __ push(rcx); // The return address.
-
- // Perform patching to an appropriate fast case and return the result.
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kBinaryOp_Patch), masm->isolate()),
- 5,
- 1);
-}
-
-
-Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
- GenericBinaryOpStub stub(key, type_info);
- return stub.GetCode();
-}
-
-
-Handle<Code> GetTypeRecordingBinaryOpStub(int key,
- TRBinaryOpIC::TypeInfo type_info,
- TRBinaryOpIC::TypeInfo result_type_info) {
- TypeRecordingBinaryOpStub stub(key, type_info, result_type_info);
- return stub.GetCode();
-}
-
-
-void TypeRecordingBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
- __ pop(rcx); // Save return address.
- __ push(rdx);
- __ push(rax);
- // Left and right arguments are now on top.
- // Push this stub's key. Although the operation and the type info are
- // encoded into the key, the encoding is opaque, so push them too.
- __ Push(Smi::FromInt(MinorKey()));
- __ Push(Smi::FromInt(op_));
- __ Push(Smi::FromInt(operands_type_));
-
- __ push(rcx); // Push return address.
-
- // Patch the caller to an appropriate specialized stub and return the
- // operation result to the caller of the stub.
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kTypeRecordingBinaryOp_Patch),
- masm->isolate()),
- 5,
- 1);
-}
-
-
-void TypeRecordingBinaryOpStub::Generate(MacroAssembler* masm) {
- switch (operands_type_) {
- case TRBinaryOpIC::UNINITIALIZED:
- GenerateTypeTransition(masm);
- break;
- case TRBinaryOpIC::SMI:
- GenerateSmiStub(masm);
- break;
- case TRBinaryOpIC::INT32:
- UNREACHABLE();
- // The int32 case is identical to the Smi case. We avoid creating this
- // ic state on x64.
- break;
- case TRBinaryOpIC::HEAP_NUMBER:
- GenerateHeapNumberStub(masm);
- break;
- case TRBinaryOpIC::ODDBALL:
- GenerateOddballStub(masm);
- break;
- case TRBinaryOpIC::STRING:
- GenerateStringStub(masm);
- break;
- case TRBinaryOpIC::GENERIC:
- GenerateGeneric(masm);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-const char* TypeRecordingBinaryOpStub::GetName() {
- if (name_ != NULL) return name_;
- const int kMaxNameLength = 100;
- name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
- kMaxNameLength);
- if (name_ == NULL) return "OOM";
- const char* op_name = Token::Name(op_);
- const char* overwrite_name;
- switch (mode_) {
- case NO_OVERWRITE: overwrite_name = "Alloc"; break;
- case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
- case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
- default: overwrite_name = "UnknownOverwrite"; break;
- }
-
- OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
- "TypeRecordingBinaryOpStub_%s_%s_%s",
- op_name,
- overwrite_name,
- TRBinaryOpIC::GetName(operands_type_));
- return name_;
-}
-
-
-void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm,
- Label* slow,
- SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
-
- // We only generate heapnumber answers for overflowing calculations
- // for the four basic arithmetic operations.
- bool generate_inline_heapnumber_results =
- (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) &&
- (op_ == Token::ADD || op_ == Token::SUB ||
- op_ == Token::MUL || op_ == Token::DIV);
-
- // Arguments to TypeRecordingBinaryOpStub are in rdx and rax.
- Register left = rdx;
- Register right = rax;
-
-
- // Smi check of both operands. If op is BIT_OR, the check is delayed
- // until after the OR operation.
- Label not_smis;
- Label use_fp_on_smis;
- Label restore_MOD_registers; // Only used if op_ == Token::MOD.
-
- if (op_ != Token::BIT_OR) {
- Comment smi_check_comment(masm, "-- Smi check arguments");
- __ JumpIfNotBothSmi(left, right, &not_smis);
- }
-
- // Perform the operation.
- Comment perform_smi(masm, "-- Perform smi operation");
- switch (op_) {
- case Token::ADD:
- ASSERT(right.is(rax));
- __ SmiAdd(right, right, left, &use_fp_on_smis); // ADD is commutative.
- break;
-
- case Token::SUB:
- __ SmiSub(left, left, right, &use_fp_on_smis);
- __ movq(rax, left);
- break;
-
- case Token::MUL:
- ASSERT(right.is(rax));
- __ SmiMul(right, right, left, &use_fp_on_smis); // MUL is commutative.
- break;
-
- case Token::DIV:
- // SmiDiv will not accept left in rdx or right in rax.
- left = rcx;
- right = rbx;
- __ movq(rbx, rax);
- __ movq(rcx, rdx);
- __ SmiDiv(rax, left, right, &use_fp_on_smis);
- break;
-
- case Token::MOD:
- // SmiMod will not accept left in rdx or right in rax.
- left = rcx;
- right = rbx;
- __ movq(rbx, rax);
- __ movq(rcx, rdx);
- __ SmiMod(rax, left, right, &use_fp_on_smis);
- break;
-
- case Token::BIT_OR: {
- ASSERT(right.is(rax));
- __ movq(rcx, right); // Save the right operand.
- __ SmiOr(right, right, left); // BIT_OR is commutative.
- __ JumpIfNotSmi(right, &not_smis); // Test delayed until after BIT_OR.
- break;
- }
- case Token::BIT_XOR:
- ASSERT(right.is(rax));
- __ SmiXor(right, right, left); // BIT_XOR is commutative.
- break;
-
- case Token::BIT_AND:
- ASSERT(right.is(rax));
- __ SmiAnd(right, right, left); // BIT_AND is commutative.
- break;
-
- case Token::SHL:
- __ SmiShiftLeft(left, left, right);
- __ movq(rax, left);
- break;
-
- case Token::SAR:
- __ SmiShiftArithmeticRight(left, left, right);
- __ movq(rax, left);
- break;
-
- case Token::SHR:
- __ SmiShiftLogicalRight(left, left, right, &not_smis);
- __ movq(rax, left);
- break;
-
- default:
- UNREACHABLE();
- }
-
- // 5. Emit return of result in rax. Some operations have registers pushed.
- __ ret(0);
-
- // 6. For some operations emit inline code to perform floating point
- // operations on known smis (e.g., if the result of the operation
- // overflowed the smi range).
- __ bind(&use_fp_on_smis);
- if (op_ == Token::DIV || op_ == Token::MOD) {
- // Restore left and right to rdx and rax.
- __ movq(rdx, rcx);
- __ movq(rax, rbx);
- }
-
-
- if (generate_inline_heapnumber_results) {
- __ AllocateHeapNumber(rcx, rbx, slow);
- Comment perform_float(masm, "-- Perform float operation on smis");
- FloatingPointHelper::LoadSSE2SmiOperands(masm);
- switch (op_) {
- case Token::ADD: __ addsd(xmm0, xmm1); break;
- case Token::SUB: __ subsd(xmm0, xmm1); break;
- case Token::MUL: __ mulsd(xmm0, xmm1); break;
- case Token::DIV: __ divsd(xmm0, xmm1); break;
- default: UNREACHABLE();
- }
- __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0);
- __ movq(rax, rcx);
- __ ret(0);
- }
-
- // 7. Non-smi operands reach the end of the code generated by
- // GenerateSmiCode, and fall through to subsequent code,
- // with the operands in rdx and rax.
- Comment done_comment(masm, "-- Enter non-smi code");
- __ bind(&not_smis);
- if (op_ == Token::BIT_OR) {
- __ movq(right, rcx);
- }
-}
-
-
-void TypeRecordingBinaryOpStub::GenerateFloatingPointCode(
- MacroAssembler* masm,
- Label* allocation_failure,
- Label* non_numeric_failure) {
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV: {
- FloatingPointHelper::LoadSSE2UnknownOperands(masm, non_numeric_failure);
-
- switch (op_) {
- case Token::ADD: __ addsd(xmm0, xmm1); break;
- case Token::SUB: __ subsd(xmm0, xmm1); break;
- case Token::MUL: __ mulsd(xmm0, xmm1); break;
- case Token::DIV: __ divsd(xmm0, xmm1); break;
- default: UNREACHABLE();
- }
- GenerateHeapResultAllocation(masm, allocation_failure);
- __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
- __ ret(0);
- break;
- }
- case Token::MOD: {
- // For MOD we jump to the allocation_failure label, to call runtime.
- __ jmp(allocation_failure);
- break;
- }
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR: {
- Label non_smi_shr_result;
- Register heap_number_map = r9;
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- FloatingPointHelper::LoadAsIntegers(masm, non_numeric_failure,
- heap_number_map);
- switch (op_) {
- case Token::BIT_OR: __ orl(rax, rcx); break;
- case Token::BIT_AND: __ andl(rax, rcx); break;
- case Token::BIT_XOR: __ xorl(rax, rcx); break;
- case Token::SAR: __ sarl_cl(rax); break;
- case Token::SHL: __ shll_cl(rax); break;
- case Token::SHR: {
- __ shrl_cl(rax);
- // Check if result is negative. This can only happen for a shift
- // by zero.
- __ testl(rax, rax);
- __ j(negative, &non_smi_shr_result);
- break;
- }
- default: UNREACHABLE();
- }
- STATIC_ASSERT(kSmiValueSize == 32);
- // Tag smi result and return.
- __ Integer32ToSmi(rax, rax);
- __ Ret();
-
- // Logical shift right can produce an unsigned int32 that is not
- // an int32, and so is not in the smi range. Allocate a heap number
- // in that case.
- if (op_ == Token::SHR) {
- __ bind(&non_smi_shr_result);
- Label allocation_failed;
- __ movl(rbx, rax); // rbx holds result value (uint32 value as int64).
- // Allocate heap number in new space.
- // Not using AllocateHeapNumber macro in order to reuse
- // already loaded heap_number_map.
- __ AllocateInNewSpace(HeapNumber::kSize,
- rax,
- rcx,
- no_reg,
- &allocation_failed,
- TAG_OBJECT);
- // Set the map.
- if (FLAG_debug_code) {
- __ AbortIfNotRootValue(heap_number_map,
- Heap::kHeapNumberMapRootIndex,
- "HeapNumberMap register clobbered.");
- }
- __ movq(FieldOperand(rax, HeapObject::kMapOffset),
- heap_number_map);
- __ cvtqsi2sd(xmm0, rbx);
- __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
- __ Ret();
-
- __ bind(&allocation_failed);
- // We need tagged values in rdx and rax for the following code,
- // not int32 in rax and rcx.
- __ Integer32ToSmi(rax, rcx);
- __ Integer32ToSmi(rdx, rax);
- __ jmp(allocation_failure);
- }
- break;
- }
- default: UNREACHABLE(); break;
- }
- // No fall-through from this generated code.
- if (FLAG_debug_code) {
- __ Abort("Unexpected fall-through in "
- "TypeRecordingBinaryStub::GenerateFloatingPointCode.");
- }
-}
-
-
-void TypeRecordingBinaryOpStub::GenerateStringAddCode(MacroAssembler* masm) {
- ASSERT(op_ == Token::ADD);
- NearLabel left_not_string, call_runtime;
-
- // Registers containing left and right operands respectively.
- Register left = rdx;
- Register right = rax;
-
- // Test if left operand is a string.
- __ JumpIfSmi(left, &left_not_string);
- __ CmpObjectType(left, FIRST_NONSTRING_TYPE, rcx);
- __ j(above_equal, &left_not_string);
- StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
- GenerateRegisterArgsPush(masm);
- __ TailCallStub(&string_add_left_stub);
-
- // Left operand is not a string, test right.
- __ bind(&left_not_string);
- __ JumpIfSmi(right, &call_runtime);
- __ CmpObjectType(right, FIRST_NONSTRING_TYPE, rcx);
- __ j(above_equal, &call_runtime);
-
- StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
- GenerateRegisterArgsPush(masm);
- __ TailCallStub(&string_add_right_stub);
-
- // Neither argument is a string.
- __ bind(&call_runtime);
-}
-
-
-void TypeRecordingBinaryOpStub::GenerateCallRuntimeCode(MacroAssembler* masm) {
- GenerateRegisterArgsPush(masm);
- switch (op_) {
- case Token::ADD:
- __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
- break;
- case Token::SUB:
- __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
- break;
- case Token::MUL:
- __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
- break;
- case Token::DIV:
- __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
- break;
- case Token::MOD:
- __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
- break;
- case Token::BIT_OR:
- __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
- break;
- case Token::BIT_AND:
- __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
- break;
- case Token::BIT_XOR:
- __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
- break;
- case Token::SAR:
- __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
- break;
- case Token::SHL:
- __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
- break;
- case Token::SHR:
- __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
- Label not_smi;
-
- GenerateSmiCode(masm, &not_smi, NO_HEAPNUMBER_RESULTS);
-
- __ bind(&not_smi);
- GenerateTypeTransition(masm);
-}
-
-
-void TypeRecordingBinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
- ASSERT(operands_type_ == TRBinaryOpIC::STRING);
- ASSERT(op_ == Token::ADD);
- GenerateStringAddCode(masm);
- // Try to add arguments as strings, otherwise, transition to the generic
- // TRBinaryOpIC type.
- GenerateTypeTransition(masm);
-}
-
-
-void TypeRecordingBinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
- Label call_runtime;
-
- if (op_ == Token::ADD) {
- // Handle string addition here, because it is the only operation
- // that does not do a ToNumber conversion on the operands.
- GenerateStringAddCode(masm);
- }
-
- // Convert oddball arguments to numbers.
- NearLabel check, done;
- __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, &check);
- if (Token::IsBitOp(op_)) {
- __ xor_(rdx, rdx);
- } else {
- __ LoadRoot(rdx, Heap::kNanValueRootIndex);
- }
- __ jmp(&done);
- __ bind(&check);
- __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, &done);
- if (Token::IsBitOp(op_)) {
- __ xor_(rax, rax);
- } else {
- __ LoadRoot(rax, Heap::kNanValueRootIndex);
- }
- __ bind(&done);
-
- GenerateHeapNumberStub(masm);
-}
-
-
-void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
- Label gc_required, not_number;
- GenerateFloatingPointCode(masm, &gc_required, &not_number);
-
- __ bind(&not_number);
- GenerateTypeTransition(masm);
-
- __ bind(&gc_required);
- GenerateCallRuntimeCode(masm);
-}
-
-
-void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
- Label call_runtime, call_string_add_or_runtime;
-
- GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
-
- GenerateFloatingPointCode(masm, &call_runtime, &call_string_add_or_runtime);
-
- __ bind(&call_string_add_or_runtime);
- if (op_ == Token::ADD) {
- GenerateStringAddCode(masm);
- }
-
- __ bind(&call_runtime);
- GenerateCallRuntimeCode(masm);
-}
-
-
-void TypeRecordingBinaryOpStub::GenerateHeapResultAllocation(
- MacroAssembler* masm,
- Label* alloc_failure) {
- Label skip_allocation;
- OverwriteMode mode = mode_;
- switch (mode) {
- case OVERWRITE_LEFT: {
- // If the argument in rdx is already an object, we skip the
- // allocation of a heap number.
- __ JumpIfNotSmi(rdx, &skip_allocation);
- // Allocate a heap number for the result. Keep eax and edx intact
- // for the possible runtime call.
- __ AllocateHeapNumber(rbx, rcx, alloc_failure);
- // Now rdx can be overwritten losing one of the arguments as we are
- // now done and will not need it any more.
- __ movq(rdx, rbx);
- __ bind(&skip_allocation);
- // Use object in rdx as a result holder
- __ movq(rax, rdx);
- break;
- }
- case OVERWRITE_RIGHT:
- // If the argument in rax is already an object, we skip the
- // allocation of a heap number.
- __ JumpIfNotSmi(rax, &skip_allocation);
- // Fall through!
- case NO_OVERWRITE:
- // Allocate a heap number for the result. Keep rax and rdx intact
- // for the possible runtime call.
- __ AllocateHeapNumber(rbx, rcx, alloc_failure);
- // Now rax can be overwritten losing one of the arguments as we are
- // now done and will not need it any more.
- __ movq(rax, rbx);
- __ bind(&skip_allocation);
- break;
- default: UNREACHABLE();
- }
-}
-
-
-void TypeRecordingBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
- __ pop(rcx);
- __ push(rdx);
- __ push(rax);
- __ push(rcx);
-}
-
-
-void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
- // TAGGED case:
- // Input:
- // rsp[8]: argument (should be number).
- // rsp[0]: return address.
- // Output:
- // rax: tagged double result.
- // UNTAGGED case:
- // Input::
- // rsp[0]: return address.
- // xmm1: untagged double input argument
- // Output:
- // xmm1: untagged double result.
-
- Label runtime_call;
- Label runtime_call_clear_stack;
- Label skip_cache;
- const bool tagged = (argument_type_ == TAGGED);
- if (tagged) {
- NearLabel input_not_smi;
- NearLabel loaded;
- // Test that rax is a number.
- __ movq(rax, Operand(rsp, kPointerSize));
- __ JumpIfNotSmi(rax, &input_not_smi);
- // Input is a smi. Untag and load it onto the FPU stack.
- // Then load the bits of the double into rbx.
- __ SmiToInteger32(rax, rax);
- __ subq(rsp, Immediate(kDoubleSize));
- __ cvtlsi2sd(xmm1, rax);
- __ movsd(Operand(rsp, 0), xmm1);
- __ movq(rbx, xmm1);
- __ movq(rdx, xmm1);
- __ fld_d(Operand(rsp, 0));
- __ addq(rsp, Immediate(kDoubleSize));
- __ jmp(&loaded);
-
- __ bind(&input_not_smi);
- // Check if input is a HeapNumber.
- __ LoadRoot(rbx, Heap::kHeapNumberMapRootIndex);
- __ cmpq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
- __ j(not_equal, &runtime_call);
- // Input is a HeapNumber. Push it on the FPU stack and load its
- // bits into rbx.
- __ fld_d(FieldOperand(rax, HeapNumber::kValueOffset));
- __ movq(rbx, FieldOperand(rax, HeapNumber::kValueOffset));
- __ movq(rdx, rbx);
-
- __ bind(&loaded);
- } else { // UNTAGGED.
- __ movq(rbx, xmm1);
- __ movq(rdx, xmm1);
- }
-
- // ST[0] == double value, if TAGGED.
- // rbx = bits of double value.
- // rdx = also bits of double value.
- // Compute hash (h is 32 bits, bits are 64 and the shifts are arithmetic):
- // h = h0 = bits ^ (bits >> 32);
- // h ^= h >> 16;
- // h ^= h >> 8;
- // h = h & (cacheSize - 1);
- // or h = (h0 ^ (h0 >> 8) ^ (h0 >> 16) ^ (h0 >> 24)) & (cacheSize - 1)
- __ sar(rdx, Immediate(32));
- __ xorl(rdx, rbx);
- __ movl(rcx, rdx);
- __ movl(rax, rdx);
- __ movl(rdi, rdx);
- __ sarl(rdx, Immediate(8));
- __ sarl(rcx, Immediate(16));
- __ sarl(rax, Immediate(24));
- __ xorl(rcx, rdx);
- __ xorl(rax, rdi);
- __ xorl(rcx, rax);
- ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize));
- __ andl(rcx, Immediate(TranscendentalCache::SubCache::kCacheSize - 1));
-
- // ST[0] == double value.
- // rbx = bits of double value.
- // rcx = TranscendentalCache::hash(double value).
- ExternalReference cache_array =
- ExternalReference::transcendental_cache_array_address(masm->isolate());
- __ movq(rax, cache_array);
- int cache_array_index =
- type_ * sizeof(Isolate::Current()->transcendental_cache()->caches_[0]);
- __ movq(rax, Operand(rax, cache_array_index));
- // rax points to the cache for the type type_.
- // If NULL, the cache hasn't been initialized yet, so go through runtime.
- __ testq(rax, rax);
- __ j(zero, &runtime_call_clear_stack); // Only clears stack if TAGGED.
-#ifdef DEBUG
- // Check that the layout of cache elements match expectations.
- { // NOLINT - doesn't like a single brace on a line.
- TranscendentalCache::SubCache::Element test_elem[2];
- char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
- char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
- char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
- char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
- char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
- // Two uint_32's and a pointer per element.
- CHECK_EQ(16, static_cast<int>(elem2_start - elem_start));
- CHECK_EQ(0, static_cast<int>(elem_in0 - elem_start));
- CHECK_EQ(kIntSize, static_cast<int>(elem_in1 - elem_start));
- CHECK_EQ(2 * kIntSize, static_cast<int>(elem_out - elem_start));
- }
-#endif
- // Find the address of the rcx'th entry in the cache, i.e., &rax[rcx*16].
- __ addl(rcx, rcx);
- __ lea(rcx, Operand(rax, rcx, times_8, 0));
- // Check if cache matches: Double value is stored in uint32_t[2] array.
- NearLabel cache_miss;
- __ cmpq(rbx, Operand(rcx, 0));
- __ j(not_equal, &cache_miss);
- // Cache hit!
- __ movq(rax, Operand(rcx, 2 * kIntSize));
- if (tagged) {
- __ fstp(0); // Clear FPU stack.
- __ ret(kPointerSize);
- } else { // UNTAGGED.
- __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
- __ Ret();
- }
-
- __ bind(&cache_miss);
- // Update cache with new value.
- if (tagged) {
- __ AllocateHeapNumber(rax, rdi, &runtime_call_clear_stack);
- } else { // UNTAGGED.
- __ AllocateHeapNumber(rax, rdi, &skip_cache);
- __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm1);
- __ fld_d(FieldOperand(rax, HeapNumber::kValueOffset));
- }
- GenerateOperation(masm);
- __ movq(Operand(rcx, 0), rbx);
- __ movq(Operand(rcx, 2 * kIntSize), rax);
- __ fstp_d(FieldOperand(rax, HeapNumber::kValueOffset));
- if (tagged) {
- __ ret(kPointerSize);
- } else { // UNTAGGED.
- __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
- __ Ret();
-
- // Skip cache and return answer directly, only in untagged case.
- __ bind(&skip_cache);
- __ subq(rsp, Immediate(kDoubleSize));
- __ movsd(Operand(rsp, 0), xmm1);
- __ fld_d(Operand(rsp, 0));
- GenerateOperation(masm);
- __ fstp_d(Operand(rsp, 0));
- __ movsd(xmm1, Operand(rsp, 0));
- __ addq(rsp, Immediate(kDoubleSize));
- // We return the value in xmm1 without adding it to the cache, but
- // we cause a scavenging GC so that future allocations will succeed.
- __ EnterInternalFrame();
- // Allocate an unused object bigger than a HeapNumber.
- __ Push(Smi::FromInt(2 * kDoubleSize));
- __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
- __ LeaveInternalFrame();
- __ Ret();
- }
-
- // Call runtime, doing whatever allocation and cleanup is necessary.
- if (tagged) {
- __ bind(&runtime_call_clear_stack);
- __ fstp(0);
- __ bind(&runtime_call);
- __ TailCallExternalReference(
- ExternalReference(RuntimeFunction(), masm->isolate()), 1, 1);
- } else { // UNTAGGED.
- __ bind(&runtime_call_clear_stack);
- __ bind(&runtime_call);
- __ AllocateHeapNumber(rax, rdi, &skip_cache);
- __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm1);
- __ EnterInternalFrame();
- __ push(rax);
- __ CallRuntime(RuntimeFunction(), 1);
- __ LeaveInternalFrame();
- __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
- __ Ret();
- }
-}
-
-
-Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
- switch (type_) {
- // Add more cases when necessary.
- case TranscendentalCache::SIN: return Runtime::kMath_sin;
- case TranscendentalCache::COS: return Runtime::kMath_cos;
- case TranscendentalCache::LOG: return Runtime::kMath_log;
- default:
- UNIMPLEMENTED();
- return Runtime::kAbort;
- }
-}
-
-
-void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) {
- // Registers:
- // rax: Newly allocated HeapNumber, which must be preserved.
- // rbx: Bits of input double. Must be preserved.
- // rcx: Pointer to cache entry. Must be preserved.
- // st(0): Input double
- Label done;
- if (type_ == TranscendentalCache::SIN || type_ == TranscendentalCache::COS) {
- // Both fsin and fcos require arguments in the range +/-2^63 and
- // return NaN for infinities and NaN. They can share all code except
- // the actual fsin/fcos operation.
- Label in_range;
- // If argument is outside the range -2^63..2^63, fsin/cos doesn't
- // work. We must reduce it to the appropriate range.
- __ movq(rdi, rbx);
- // Move exponent and sign bits to low bits.
- __ shr(rdi, Immediate(HeapNumber::kMantissaBits));
- // Remove sign bit.
- __ andl(rdi, Immediate((1 << HeapNumber::kExponentBits) - 1));
- int supported_exponent_limit = (63 + HeapNumber::kExponentBias);
- __ cmpl(rdi, Immediate(supported_exponent_limit));
- __ j(below, &in_range);
- // Check for infinity and NaN. Both return NaN for sin.
- __ cmpl(rdi, Immediate(0x7ff));
- NearLabel non_nan_result;
- __ j(not_equal, &non_nan_result);
- // Input is +/-Infinity or NaN. Result is NaN.
- __ fstp(0);
- __ LoadRoot(kScratchRegister, Heap::kNanValueRootIndex);
- __ fld_d(FieldOperand(kScratchRegister, HeapNumber::kValueOffset));
- __ jmp(&done);
-
- __ bind(&non_nan_result);
-
- // Use fpmod to restrict argument to the range +/-2*PI.
- __ movq(rdi, rax); // Save rax before using fnstsw_ax.
- __ fldpi();
- __ fadd(0);
- __ fld(1);
- // FPU Stack: input, 2*pi, input.
- {
- Label no_exceptions;
- __ fwait();
- __ fnstsw_ax();
- // Clear if Illegal Operand or Zero Division exceptions are set.
- __ testl(rax, Immediate(5)); // #IO and #ZD flags of FPU status word.
- __ j(zero, &no_exceptions);
- __ fnclex();
- __ bind(&no_exceptions);
- }
-
- // Compute st(0) % st(1)
- {
- NearLabel partial_remainder_loop;
- __ bind(&partial_remainder_loop);
- __ fprem1();
- __ fwait();
- __ fnstsw_ax();
- __ testl(rax, Immediate(0x400)); // Check C2 bit of FPU status word.
- // If C2 is set, computation only has partial result. Loop to
- // continue computation.
- __ j(not_zero, &partial_remainder_loop);
- }
- // FPU Stack: input, 2*pi, input % 2*pi
- __ fstp(2);
- // FPU Stack: input % 2*pi, 2*pi,
- __ fstp(0);
- // FPU Stack: input % 2*pi
- __ movq(rax, rdi); // Restore rax, pointer to the new HeapNumber.
- __ bind(&in_range);
- switch (type_) {
- case TranscendentalCache::SIN:
- __ fsin();
- break;
- case TranscendentalCache::COS:
- __ fcos();
- break;
- default:
- UNREACHABLE();
- }
- __ bind(&done);
- } else {
- ASSERT(type_ == TranscendentalCache::LOG);
- __ fldln2();
- __ fxch();
- __ fyl2x();
- }
-}
-
-
-// Get the integer part of a heap number.
-// Overwrites the contents of rdi, rbx and rcx. Result cannot be rdi or rbx.
-void IntegerConvert(MacroAssembler* masm,
- Register result,
- Register source) {
- // Result may be rcx. If result and source are the same register, source will
- // be overwritten.
- ASSERT(!result.is(rdi) && !result.is(rbx));
- // TODO(lrn): When type info reaches here, if value is a 32-bit integer, use
- // cvttsd2si (32-bit version) directly.
- Register double_exponent = rbx;
- Register double_value = rdi;
- NearLabel done, exponent_63_plus;
- // Get double and extract exponent.
- __ movq(double_value, FieldOperand(source, HeapNumber::kValueOffset));
- // Clear result preemptively, in case we need to return zero.
- __ xorl(result, result);
- __ movq(xmm0, double_value); // Save copy in xmm0 in case we need it there.
- // Double to remove sign bit, shift exponent down to least significant bits.
- // and subtract bias to get the unshifted, unbiased exponent.
- __ lea(double_exponent, Operand(double_value, double_value, times_1, 0));
- __ shr(double_exponent, Immediate(64 - HeapNumber::kExponentBits));
- __ subl(double_exponent, Immediate(HeapNumber::kExponentBias));
- // Check whether the exponent is too big for a 63 bit unsigned integer.
- __ cmpl(double_exponent, Immediate(63));
- __ j(above_equal, &exponent_63_plus);
- // Handle exponent range 0..62.
- __ cvttsd2siq(result, xmm0);
- __ jmp(&done);
-
- __ bind(&exponent_63_plus);
- // Exponent negative or 63+.
- __ cmpl(double_exponent, Immediate(83));
- // If exponent negative or above 83, number contains no significant bits in
- // the range 0..2^31, so result is zero, and rcx already holds zero.
- __ j(above, &done);
-
- // Exponent in rage 63..83.
- // Mantissa * 2^exponent contains bits in the range 2^0..2^31, namely
- // the least significant exponent-52 bits.
-
- // Negate low bits of mantissa if value is negative.
- __ addq(double_value, double_value); // Move sign bit to carry.
- __ sbbl(result, result); // And convert carry to -1 in result register.
- // if scratch2 is negative, do (scratch2-1)^-1, otherwise (scratch2-0)^0.
- __ addl(double_value, result);
- // Do xor in opposite directions depending on where we want the result
- // (depending on whether result is rcx or not).
-
- if (result.is(rcx)) {
- __ xorl(double_value, result);
- // Left shift mantissa by (exponent - mantissabits - 1) to save the
- // bits that have positional values below 2^32 (the extra -1 comes from the
- // doubling done above to move the sign bit into the carry flag).
- __ leal(rcx, Operand(double_exponent, -HeapNumber::kMantissaBits - 1));
- __ shll_cl(double_value);
- __ movl(result, double_value);
- } else {
- // As the then-branch, but move double-value to result before shifting.
- __ xorl(result, double_value);
- __ leal(rcx, Operand(double_exponent, -HeapNumber::kMantissaBits - 1));
- __ shll_cl(result);
- }
-
- __ bind(&done);
-}
-
-
-// Input: rdx, rax are the left and right objects of a bit op.
-// Output: rax, rcx are left and right integers for a bit op.
-void FloatingPointHelper::LoadNumbersAsIntegers(MacroAssembler* masm) {
- // Check float operands.
- Label done;
- Label rax_is_smi;
- Label rax_is_object;
- Label rdx_is_object;
-
- __ JumpIfNotSmi(rdx, &rdx_is_object);
- __ SmiToInteger32(rdx, rdx);
- __ JumpIfSmi(rax, &rax_is_smi);
-
- __ bind(&rax_is_object);
- IntegerConvert(masm, rcx, rax); // Uses rdi, rcx and rbx.
- __ jmp(&done);
-
- __ bind(&rdx_is_object);
- IntegerConvert(masm, rdx, rdx); // Uses rdi, rcx and rbx.
- __ JumpIfNotSmi(rax, &rax_is_object);
- __ bind(&rax_is_smi);
- __ SmiToInteger32(rcx, rax);
-
- __ bind(&done);
- __ movl(rax, rdx);
-}
-
-
-// Input: rdx, rax are the left and right objects of a bit op.
-// Output: rax, rcx are left and right integers for a bit op.
-// Jump to conversion_failure: rdx and rax are unchanged.
-void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
- Label* conversion_failure,
- Register heap_number_map) {
- // Check float operands.
- Label arg1_is_object, check_undefined_arg1;
- Label arg2_is_object, check_undefined_arg2;
- Label load_arg2, done;
-
- __ JumpIfNotSmi(rdx, &arg1_is_object);
- __ SmiToInteger32(r8, rdx);
- __ jmp(&load_arg2);
-
- // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
- __ bind(&check_undefined_arg1);
- __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, conversion_failure);
- __ movl(r8, Immediate(0));
- __ jmp(&load_arg2);
-
- __ bind(&arg1_is_object);
- __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), heap_number_map);
- __ j(not_equal, &check_undefined_arg1);
- // Get the untagged integer version of the rdx heap number in rcx.
- IntegerConvert(masm, r8, rdx);
-
- // Here r8 has the untagged integer, rax has a Smi or a heap number.
- __ bind(&load_arg2);
- // Test if arg2 is a Smi.
- __ JumpIfNotSmi(rax, &arg2_is_object);
- __ SmiToInteger32(rcx, rax);
- __ jmp(&done);
-
- // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
- __ bind(&check_undefined_arg2);
- __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, conversion_failure);
- __ movl(rcx, Immediate(0));
- __ jmp(&done);
-
- __ bind(&arg2_is_object);
- __ cmpq(FieldOperand(rax, HeapObject::kMapOffset), heap_number_map);
- __ j(not_equal, &check_undefined_arg2);
- // Get the untagged integer version of the rax heap number in rcx.
- IntegerConvert(masm, rcx, rax);
- __ bind(&done);
- __ movl(rax, r8);
-}
-
-
-void FloatingPointHelper::LoadSSE2SmiOperands(MacroAssembler* masm) {
- __ SmiToInteger32(kScratchRegister, rdx);
- __ cvtlsi2sd(xmm0, kScratchRegister);
- __ SmiToInteger32(kScratchRegister, rax);
- __ cvtlsi2sd(xmm1, kScratchRegister);
-}
-
-
-void FloatingPointHelper::LoadSSE2NumberOperands(MacroAssembler* masm) {
- Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, done;
- // Load operand in rdx into xmm0.
- __ JumpIfSmi(rdx, &load_smi_rdx);
- __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
- // Load operand in rax into xmm1.
- __ JumpIfSmi(rax, &load_smi_rax);
- __ bind(&load_nonsmi_rax);
- __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
- __ jmp(&done);
-
- __ bind(&load_smi_rdx);
- __ SmiToInteger32(kScratchRegister, rdx);
- __ cvtlsi2sd(xmm0, kScratchRegister);
- __ JumpIfNotSmi(rax, &load_nonsmi_rax);
-
- __ bind(&load_smi_rax);
- __ SmiToInteger32(kScratchRegister, rax);
- __ cvtlsi2sd(xmm1, kScratchRegister);
-
- __ bind(&done);
-}
-
-
-void FloatingPointHelper::LoadSSE2UnknownOperands(MacroAssembler* masm,
- Label* not_numbers) {
- Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, load_float_rax, done;
- // Load operand in rdx into xmm0, or branch to not_numbers.
- __ LoadRoot(rcx, Heap::kHeapNumberMapRootIndex);
- __ JumpIfSmi(rdx, &load_smi_rdx);
- __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), rcx);
- __ j(not_equal, not_numbers); // Argument in rdx is not a number.
- __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
- // Load operand in rax into xmm1, or branch to not_numbers.
- __ JumpIfSmi(rax, &load_smi_rax);
-
- __ bind(&load_nonsmi_rax);
- __ cmpq(FieldOperand(rax, HeapObject::kMapOffset), rcx);
- __ j(not_equal, not_numbers);
- __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
- __ jmp(&done);
-
- __ bind(&load_smi_rdx);
- __ SmiToInteger32(kScratchRegister, rdx);
- __ cvtlsi2sd(xmm0, kScratchRegister);
- __ JumpIfNotSmi(rax, &load_nonsmi_rax);
-
- __ bind(&load_smi_rax);
- __ SmiToInteger32(kScratchRegister, rax);
- __ cvtlsi2sd(xmm1, kScratchRegister);
- __ bind(&done);
-}
-
-
-void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
- Label slow, done;
-
- if (op_ == Token::SUB) {
- if (include_smi_code_) {
- // Check whether the value is a smi.
- Label try_float;
- __ JumpIfNotSmi(rax, &try_float);
- if (negative_zero_ == kIgnoreNegativeZero) {
- __ SmiCompare(rax, Smi::FromInt(0));
- __ j(equal, &done);
- }
- __ SmiNeg(rax, rax, &done);
- __ jmp(&slow); // zero, if not handled above, and Smi::kMinValue.
-
- // Try floating point case.
- __ bind(&try_float);
- } else if (FLAG_debug_code) {
- __ AbortIfSmi(rax);
- }
-
- __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &slow);
- // Operand is a float, negate its value by flipping sign bit.
- __ movq(rdx, FieldOperand(rax, HeapNumber::kValueOffset));
- __ movq(kScratchRegister, Immediate(0x01));
- __ shl(kScratchRegister, Immediate(63));
- __ xor_(rdx, kScratchRegister); // Flip sign.
- // rdx is value to store.
- if (overwrite_ == UNARY_OVERWRITE) {
- __ movq(FieldOperand(rax, HeapNumber::kValueOffset), rdx);
- } else {
- __ AllocateHeapNumber(rcx, rbx, &slow);
- // rcx: allocated 'empty' number
- __ movq(FieldOperand(rcx, HeapNumber::kValueOffset), rdx);
- __ movq(rax, rcx);
- }
- } else if (op_ == Token::BIT_NOT) {
- if (include_smi_code_) {
- Label try_float;
- __ JumpIfNotSmi(rax, &try_float);
- __ SmiNot(rax, rax);
- __ jmp(&done);
- // Try floating point case.
- __ bind(&try_float);
- } else if (FLAG_debug_code) {
- __ AbortIfSmi(rax);
- }
-
- // Check if the operand is a heap number.
- __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &slow);
-
- // Convert the heap number in rax to an untagged integer in rcx.
- IntegerConvert(masm, rax, rax);
-
- // Do the bitwise operation and smi tag the result.
- __ notl(rax);
- __ Integer32ToSmi(rax, rax);
- }
-
- // Return from the stub.
- __ bind(&done);
- __ StubReturn(1);
-
- // Handle the slow case by jumping to the JavaScript builtin.
- __ bind(&slow);
- __ pop(rcx); // pop return address
- __ push(rax);
- __ push(rcx); // push return address
- switch (op_) {
- case Token::SUB:
- __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
- break;
- case Token::BIT_NOT:
- __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void MathPowStub::Generate(MacroAssembler* masm) {
- // Registers are used as follows:
- // rdx = base
- // rax = exponent
- // rcx = temporary, result
-
- Label allocate_return, call_runtime;
-
- // Load input parameters.
- __ movq(rdx, Operand(rsp, 2 * kPointerSize));
- __ movq(rax, Operand(rsp, 1 * kPointerSize));
-
- // Save 1 in xmm3 - we need this several times later on.
- __ movl(rcx, Immediate(1));
- __ cvtlsi2sd(xmm3, rcx);
-
- Label exponent_nonsmi;
- Label base_nonsmi;
- // If the exponent is a heap number go to that specific case.
- __ JumpIfNotSmi(rax, &exponent_nonsmi);
- __ JumpIfNotSmi(rdx, &base_nonsmi);
-
- // Optimized version when both exponent and base are smis.
- Label powi;
- __ SmiToInteger32(rdx, rdx);
- __ cvtlsi2sd(xmm0, rdx);
- __ jmp(&powi);
- // Exponent is a smi and base is a heapnumber.
- __ bind(&base_nonsmi);
- __ CompareRoot(FieldOperand(rdx, HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &call_runtime);
-
- __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
-
- // Optimized version of pow if exponent is a smi.
- // xmm0 contains the base.
- __ bind(&powi);
- __ SmiToInteger32(rax, rax);
-
- // Save exponent in base as we need to check if exponent is negative later.
- // We know that base and exponent are in different registers.
- __ movq(rdx, rax);
-
- // Get absolute value of exponent.
- NearLabel no_neg;
- __ cmpl(rax, Immediate(0));
- __ j(greater_equal, &no_neg);
- __ negl(rax);
- __ bind(&no_neg);
-
- // Load xmm1 with 1.
- __ movsd(xmm1, xmm3);
- NearLabel while_true;
- NearLabel no_multiply;
-
- __ bind(&while_true);
- __ shrl(rax, Immediate(1));
- __ j(not_carry, &no_multiply);
- __ mulsd(xmm1, xmm0);
- __ bind(&no_multiply);
- __ mulsd(xmm0, xmm0);
- __ j(not_zero, &while_true);
-
- // Base has the original value of the exponent - if the exponent is
- // negative return 1/result.
- __ testl(rdx, rdx);
- __ j(positive, &allocate_return);
- // Special case if xmm1 has reached infinity.
- __ divsd(xmm3, xmm1);
- __ movsd(xmm1, xmm3);
- __ xorpd(xmm0, xmm0);
- __ ucomisd(xmm0, xmm1);
- __ j(equal, &call_runtime);
-
- __ jmp(&allocate_return);
-
- // Exponent (or both) is a heapnumber - no matter what we should now work
- // on doubles.
- __ bind(&exponent_nonsmi);
- __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &call_runtime);
- __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
- // Test if exponent is nan.
- __ ucomisd(xmm1, xmm1);
- __ j(parity_even, &call_runtime);
-
- NearLabel base_not_smi;
- NearLabel handle_special_cases;
- __ JumpIfNotSmi(rdx, &base_not_smi);
- __ SmiToInteger32(rdx, rdx);
- __ cvtlsi2sd(xmm0, rdx);
- __ jmp(&handle_special_cases);
-
- __ bind(&base_not_smi);
- __ CompareRoot(FieldOperand(rdx, HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &call_runtime);
- __ movl(rcx, FieldOperand(rdx, HeapNumber::kExponentOffset));
- __ andl(rcx, Immediate(HeapNumber::kExponentMask));
- __ cmpl(rcx, Immediate(HeapNumber::kExponentMask));
- // base is NaN or +/-Infinity
- __ j(greater_equal, &call_runtime);
- __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
-
- // base is in xmm0 and exponent is in xmm1.
- __ bind(&handle_special_cases);
- NearLabel not_minus_half;
- // Test for -0.5.
- // Load xmm2 with -0.5.
- __ movq(rcx, V8_UINT64_C(0xBFE0000000000000), RelocInfo::NONE);
- __ movq(xmm2, rcx);
- // xmm2 now has -0.5.
- __ ucomisd(xmm2, xmm1);
- __ j(not_equal, &not_minus_half);
-
- // Calculates reciprocal of square root.
- // sqrtsd returns -0 when input is -0. ECMA spec requires +0.
- __ xorpd(xmm1, xmm1);
- __ addsd(xmm1, xmm0);
- __ sqrtsd(xmm1, xmm1);
- __ divsd(xmm3, xmm1);
- __ movsd(xmm1, xmm3);
- __ jmp(&allocate_return);
-
- // Test for 0.5.
- __ bind(&not_minus_half);
- // Load xmm2 with 0.5.
- // Since xmm3 is 1 and xmm2 is -0.5 this is simply xmm2 + xmm3.
- __ addsd(xmm2, xmm3);
- // xmm2 now has 0.5.
- __ ucomisd(xmm2, xmm1);
- __ j(not_equal, &call_runtime);
- // Calculates square root.
- // sqrtsd returns -0 when input is -0. ECMA spec requires +0.
- __ xorpd(xmm1, xmm1);
- __ addsd(xmm1, xmm0);
- __ sqrtsd(xmm1, xmm1);
-
- __ bind(&allocate_return);
- __ AllocateHeapNumber(rcx, rax, &call_runtime);
- __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm1);
- __ movq(rax, rcx);
- __ ret(2 * kPointerSize);
-
- __ bind(&call_runtime);
- __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
-}
-
-
-void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
- // The key is in rdx and the parameter count is in rax.
-
- // The displacement is used for skipping the frame pointer on the
- // stack. It is the offset of the last parameter (if any) relative
- // to the frame pointer.
- static const int kDisplacement = 1 * kPointerSize;
-
- // Check that the key is a smi.
- Label slow;
- __ JumpIfNotSmi(rdx, &slow);
-
- // Check if the calling frame is an arguments adaptor frame. We look at the
- // context offset, and if the frame is not a regular one, then we find a
- // Smi instead of the context. We can't use SmiCompare here, because that
- // only works for comparing two smis.
- Label adaptor;
- __ movq(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ Cmp(Operand(rbx, StandardFrameConstants::kContextOffset),
- Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ j(equal, &adaptor);
-
- // Check index against formal parameters count limit passed in
- // through register rax. Use unsigned comparison to get negative
- // check for free.
- __ cmpq(rdx, rax);
- __ j(above_equal, &slow);
-
- // Read the argument from the stack and return it.
- SmiIndex index = masm->SmiToIndex(rax, rax, kPointerSizeLog2);
- __ lea(rbx, Operand(rbp, index.reg, index.scale, 0));
- index = masm->SmiToNegativeIndex(rdx, rdx, kPointerSizeLog2);
- __ movq(rax, Operand(rbx, index.reg, index.scale, kDisplacement));
- __ Ret();
-
- // Arguments adaptor case: Check index against actual arguments
- // limit found in the arguments adaptor frame. Use unsigned
- // comparison to get negative check for free.
- __ bind(&adaptor);
- __ movq(rcx, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ cmpq(rdx, rcx);
- __ j(above_equal, &slow);
-
- // Read the argument from the stack and return it.
- index = masm->SmiToIndex(rax, rcx, kPointerSizeLog2);
- __ lea(rbx, Operand(rbx, index.reg, index.scale, 0));
- index = masm->SmiToNegativeIndex(rdx, rdx, kPointerSizeLog2);
- __ movq(rax, Operand(rbx, index.reg, index.scale, kDisplacement));
- __ Ret();
-
- // Slow-case: Handle non-smi or out-of-bounds access to arguments
- // by calling the runtime system.
- __ bind(&slow);
- __ pop(rbx); // Return address.
- __ push(rdx);
- __ push(rbx);
- __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
-}
-
-
-void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
- // rsp[0] : return address
- // rsp[8] : number of parameters
- // rsp[16] : receiver displacement
- // rsp[24] : function
-
- // The displacement is used for skipping the return address and the
- // frame pointer on the stack. It is the offset of the last
- // parameter (if any) relative to the frame pointer.
- static const int kDisplacement = 2 * kPointerSize;
-
- // Check if the calling frame is an arguments adaptor frame.
- Label adaptor_frame, try_allocate, runtime;
- __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ Cmp(Operand(rdx, StandardFrameConstants::kContextOffset),
- Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ j(equal, &adaptor_frame);
-
- // Get the length from the frame.
- __ SmiToInteger32(rcx, Operand(rsp, 1 * kPointerSize));
- __ jmp(&try_allocate);
-
- // Patch the arguments.length and the parameters pointer.
- __ bind(&adaptor_frame);
- __ SmiToInteger32(rcx,
- Operand(rdx,
- ArgumentsAdaptorFrameConstants::kLengthOffset));
- // Space on stack must already hold a smi.
- __ Integer32ToSmiField(Operand(rsp, 1 * kPointerSize), rcx);
- // Do not clobber the length index for the indexing operation since
- // it is used compute the size for allocation later.
- __ lea(rdx, Operand(rdx, rcx, times_pointer_size, kDisplacement));
- __ movq(Operand(rsp, 2 * kPointerSize), rdx);
-
- // Try the new space allocation. Start out with computing the size of
- // the arguments object and the elements array.
- Label add_arguments_object;
- __ bind(&try_allocate);
- __ testl(rcx, rcx);
- __ j(zero, &add_arguments_object);
- __ leal(rcx, Operand(rcx, times_pointer_size, FixedArray::kHeaderSize));
- __ bind(&add_arguments_object);
- __ addl(rcx, Immediate(GetArgumentsObjectSize()));
-
- // Do the allocation of both objects in one go.
- __ AllocateInNewSpace(rcx, rax, rdx, rbx, &runtime, TAG_OBJECT);
-
- // Get the arguments boilerplate from the current (global) context.
- __ movq(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ movq(rdi, FieldOperand(rdi, GlobalObject::kGlobalContextOffset));
- __ movq(rdi, Operand(rdi,
- Context::SlotOffset(GetArgumentsBoilerplateIndex())));
-
- // Copy the JS object part.
- STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
- __ movq(kScratchRegister, FieldOperand(rdi, 0 * kPointerSize));
- __ movq(rdx, FieldOperand(rdi, 1 * kPointerSize));
- __ movq(rbx, FieldOperand(rdi, 2 * kPointerSize));
- __ movq(FieldOperand(rax, 0 * kPointerSize), kScratchRegister);
- __ movq(FieldOperand(rax, 1 * kPointerSize), rdx);
- __ movq(FieldOperand(rax, 2 * kPointerSize), rbx);
-
- if (type_ == NEW_NON_STRICT) {
- // Setup the callee in-object property.
- ASSERT(Heap::kArgumentsCalleeIndex == 1);
- __ movq(kScratchRegister, Operand(rsp, 3 * kPointerSize));
- __ movq(FieldOperand(rax, JSObject::kHeaderSize +
- Heap::kArgumentsCalleeIndex * kPointerSize),
- kScratchRegister);
- }
-
- // Get the length (smi tagged) and set that as an in-object property too.
- ASSERT(Heap::kArgumentsLengthIndex == 0);
- __ movq(rcx, Operand(rsp, 1 * kPointerSize));
- __ movq(FieldOperand(rax, JSObject::kHeaderSize +
- Heap::kArgumentsLengthIndex * kPointerSize),
- rcx);
-
- // If there are no actual arguments, we're done.
- Label done;
- __ SmiTest(rcx);
- __ j(zero, &done);
-
- // Get the parameters pointer from the stack and untag the length.
- __ movq(rdx, Operand(rsp, 2 * kPointerSize));
-
- // Setup the elements pointer in the allocated arguments object and
- // initialize the header in the elements fixed array.
- __ lea(rdi, Operand(rax, GetArgumentsObjectSize()));
- __ movq(FieldOperand(rax, JSObject::kElementsOffset), rdi);
- __ LoadRoot(kScratchRegister, Heap::kFixedArrayMapRootIndex);
- __ movq(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister);
- __ movq(FieldOperand(rdi, FixedArray::kLengthOffset), rcx);
- __ SmiToInteger32(rcx, rcx); // Untag length for the loop below.
-
- // Copy the fixed array slots.
- Label loop;
- __ bind(&loop);
- __ movq(kScratchRegister, Operand(rdx, -1 * kPointerSize)); // Skip receiver.
- __ movq(FieldOperand(rdi, FixedArray::kHeaderSize), kScratchRegister);
- __ addq(rdi, Immediate(kPointerSize));
- __ subq(rdx, Immediate(kPointerSize));
- __ decl(rcx);
- __ j(not_zero, &loop);
-
- // Return and remove the on-stack parameters.
- __ bind(&done);
- __ ret(3 * kPointerSize);
-
- // Do the runtime call to allocate the arguments object.
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
-}
-
-
-void RegExpExecStub::Generate(MacroAssembler* masm) {
- // Just jump directly to runtime if native RegExp is not selected at compile
- // time or if regexp entry in generated code is turned off runtime switch or
- // at compilation.
-#ifdef V8_INTERPRETED_REGEXP
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
-#else // V8_INTERPRETED_REGEXP
- if (!FLAG_regexp_entry_native) {
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
- return;
- }
-
- // Stack frame on entry.
- // rsp[0]: return address
- // rsp[8]: last_match_info (expected JSArray)
- // rsp[16]: previous index
- // rsp[24]: subject string
- // rsp[32]: JSRegExp object
-
- static const int kLastMatchInfoOffset = 1 * kPointerSize;
- static const int kPreviousIndexOffset = 2 * kPointerSize;
- static const int kSubjectOffset = 3 * kPointerSize;
- static const int kJSRegExpOffset = 4 * kPointerSize;
-
- Label runtime;
- // Ensure that a RegExp stack is allocated.
- Isolate* isolate = masm->isolate();
- ExternalReference address_of_regexp_stack_memory_address =
- ExternalReference::address_of_regexp_stack_memory_address(isolate);
- ExternalReference address_of_regexp_stack_memory_size =
- ExternalReference::address_of_regexp_stack_memory_size(isolate);
- __ Load(kScratchRegister, address_of_regexp_stack_memory_size);
- __ testq(kScratchRegister, kScratchRegister);
- __ j(zero, &runtime);
-
-
- // Check that the first argument is a JSRegExp object.
- __ movq(rax, Operand(rsp, kJSRegExpOffset));
- __ JumpIfSmi(rax, &runtime);
- __ CmpObjectType(rax, JS_REGEXP_TYPE, kScratchRegister);
- __ j(not_equal, &runtime);
- // Check that the RegExp has been compiled (data contains a fixed array).
- __ movq(rax, FieldOperand(rax, JSRegExp::kDataOffset));
- if (FLAG_debug_code) {
- Condition is_smi = masm->CheckSmi(rax);
- __ Check(NegateCondition(is_smi),
- "Unexpected type for RegExp data, FixedArray expected");
- __ CmpObjectType(rax, FIXED_ARRAY_TYPE, kScratchRegister);
- __ Check(equal, "Unexpected type for RegExp data, FixedArray expected");
- }
-
- // rax: RegExp data (FixedArray)
- // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
- __ SmiToInteger32(rbx, FieldOperand(rax, JSRegExp::kDataTagOffset));
- __ cmpl(rbx, Immediate(JSRegExp::IRREGEXP));
- __ j(not_equal, &runtime);
-
- // rax: RegExp data (FixedArray)
- // Check that the number of captures fit in the static offsets vector buffer.
- __ SmiToInteger32(rdx,
- FieldOperand(rax, JSRegExp::kIrregexpCaptureCountOffset));
- // Calculate number of capture registers (number_of_captures + 1) * 2.
- __ leal(rdx, Operand(rdx, rdx, times_1, 2));
- // Check that the static offsets vector buffer is large enough.
- __ cmpl(rdx, Immediate(OffsetsVector::kStaticOffsetsVectorSize));
- __ j(above, &runtime);
-
- // rax: RegExp data (FixedArray)
- // rdx: Number of capture registers
- // Check that the second argument is a string.
- __ movq(rdi, Operand(rsp, kSubjectOffset));
- __ JumpIfSmi(rdi, &runtime);
- Condition is_string = masm->IsObjectStringType(rdi, rbx, rbx);
- __ j(NegateCondition(is_string), &runtime);
-
- // rdi: Subject string.
- // rax: RegExp data (FixedArray).
- // rdx: Number of capture registers.
- // Check that the third argument is a positive smi less than the string
- // length. A negative value will be greater (unsigned comparison).
- __ movq(rbx, Operand(rsp, kPreviousIndexOffset));
- __ JumpIfNotSmi(rbx, &runtime);
- __ SmiCompare(rbx, FieldOperand(rdi, String::kLengthOffset));
- __ j(above_equal, &runtime);
-
- // rax: RegExp data (FixedArray)
- // rdx: Number of capture registers
- // Check that the fourth object is a JSArray object.
- __ movq(rdi, Operand(rsp, kLastMatchInfoOffset));
- __ JumpIfSmi(rdi, &runtime);
- __ CmpObjectType(rdi, JS_ARRAY_TYPE, kScratchRegister);
- __ j(not_equal, &runtime);
- // Check that the JSArray is in fast case.
- __ movq(rbx, FieldOperand(rdi, JSArray::kElementsOffset));
- __ movq(rdi, FieldOperand(rbx, HeapObject::kMapOffset));
- __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
- Heap::kFixedArrayMapRootIndex);
- __ j(not_equal, &runtime);
- // Check that the last match info has space for the capture registers and the
- // additional information. Ensure no overflow in add.
- STATIC_ASSERT(FixedArray::kMaxLength < kMaxInt - FixedArray::kLengthOffset);
- __ SmiToInteger32(rdi, FieldOperand(rbx, FixedArray::kLengthOffset));
- __ addl(rdx, Immediate(RegExpImpl::kLastMatchOverhead));
- __ cmpl(rdx, rdi);
- __ j(greater, &runtime);
-
- // rax: RegExp data (FixedArray)
- // Check the representation and encoding of the subject string.
- NearLabel seq_ascii_string, seq_two_byte_string, check_code;
- __ movq(rdi, Operand(rsp, kSubjectOffset));
- __ movq(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
- __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
- // First check for flat two byte string.
- __ andb(rbx, Immediate(
- kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask));
- STATIC_ASSERT((kStringTag | kSeqStringTag | kTwoByteStringTag) == 0);
- __ j(zero, &seq_two_byte_string);
- // Any other flat string must be a flat ascii string.
- __ testb(rbx, Immediate(kIsNotStringMask | kStringRepresentationMask));
- __ j(zero, &seq_ascii_string);
-
- // Check for flat cons string.
- // A flat cons string is a cons string where the second part is the empty
- // string. In that case the subject string is just the first part of the cons
- // string. Also in this case the first part of the cons string is known to be
- // a sequential string or an external string.
- STATIC_ASSERT(kExternalStringTag !=0);
- STATIC_ASSERT((kConsStringTag & kExternalStringTag) == 0);
- __ testb(rbx, Immediate(kIsNotStringMask | kExternalStringTag));
- __ j(not_zero, &runtime);
- // String is a cons string.
- __ CompareRoot(FieldOperand(rdi, ConsString::kSecondOffset),
- Heap::kEmptyStringRootIndex);
- __ j(not_equal, &runtime);
- __ movq(rdi, FieldOperand(rdi, ConsString::kFirstOffset));
- __ movq(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
- // String is a cons string with empty second part.
- // rdi: first part of cons string.
- // rbx: map of first part of cons string.
- // Is first part a flat two byte string?
- __ testb(FieldOperand(rbx, Map::kInstanceTypeOffset),
- Immediate(kStringRepresentationMask | kStringEncodingMask));
- STATIC_ASSERT((kSeqStringTag | kTwoByteStringTag) == 0);
- __ j(zero, &seq_two_byte_string);
- // Any other flat string must be ascii.
- __ testb(FieldOperand(rbx, Map::kInstanceTypeOffset),
- Immediate(kStringRepresentationMask));
- __ j(not_zero, &runtime);
-
- __ bind(&seq_ascii_string);
- // rdi: subject string (sequential ascii)
- // rax: RegExp data (FixedArray)
- __ movq(r11, FieldOperand(rax, JSRegExp::kDataAsciiCodeOffset));
- __ Set(rcx, 1); // Type is ascii.
- __ jmp(&check_code);
-
- __ bind(&seq_two_byte_string);
- // rdi: subject string (flat two-byte)
- // rax: RegExp data (FixedArray)
- __ movq(r11, FieldOperand(rax, JSRegExp::kDataUC16CodeOffset));
- __ Set(rcx, 0); // Type is two byte.
-
- __ bind(&check_code);
- // Check that the irregexp code has been generated for the actual string
- // encoding. If it has, the field contains a code object otherwise it contains
- // the hole.
- __ CmpObjectType(r11, CODE_TYPE, kScratchRegister);
- __ j(not_equal, &runtime);
-
- // rdi: subject string
- // rcx: encoding of subject string (1 if ascii, 0 if two_byte);
- // r11: code
- // Load used arguments before starting to push arguments for call to native
- // RegExp code to avoid handling changing stack height.
- __ SmiToInteger64(rbx, Operand(rsp, kPreviousIndexOffset));
-
- // rdi: subject string
- // rbx: previous index
- // rcx: encoding of subject string (1 if ascii 0 if two_byte);
- // r11: code
- // All checks done. Now push arguments for native regexp code.
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->regexp_entry_native(), 1);
-
- // Isolates: note we add an additional parameter here (isolate pointer).
- static const int kRegExpExecuteArguments = 8;
- int argument_slots_on_stack =
- masm->ArgumentStackSlotsForCFunctionCall(kRegExpExecuteArguments);
- __ EnterApiExitFrame(argument_slots_on_stack);
-
- // Argument 8: Pass current isolate address.
- // __ movq(Operand(rsp, (argument_slots_on_stack - 1) * kPointerSize),
- // Immediate(ExternalReference::isolate_address()));
- __ LoadAddress(kScratchRegister, ExternalReference::isolate_address());
- __ movq(Operand(rsp, (argument_slots_on_stack - 1) * kPointerSize),
- kScratchRegister);
-
- // Argument 7: Indicate that this is a direct call from JavaScript.
- __ movq(Operand(rsp, (argument_slots_on_stack - 2) * kPointerSize),
- Immediate(1));
-
- // Argument 6: Start (high end) of backtracking stack memory area.
- __ movq(kScratchRegister, address_of_regexp_stack_memory_address);
- __ movq(r9, Operand(kScratchRegister, 0));
- __ movq(kScratchRegister, address_of_regexp_stack_memory_size);
- __ addq(r9, Operand(kScratchRegister, 0));
- // Argument 6 passed in r9 on Linux and on the stack on Windows.
-#ifdef _WIN64
- __ movq(Operand(rsp, (argument_slots_on_stack - 3) * kPointerSize), r9);
-#endif
-
- // Argument 5: static offsets vector buffer.
- __ LoadAddress(r8,
- ExternalReference::address_of_static_offsets_vector(isolate));
- // Argument 5 passed in r8 on Linux and on the stack on Windows.
-#ifdef _WIN64
- __ movq(Operand(rsp, (argument_slots_on_stack - 4) * kPointerSize), r8);
-#endif
-
- // First four arguments are passed in registers on both Linux and Windows.
-#ifdef _WIN64
- Register arg4 = r9;
- Register arg3 = r8;
- Register arg2 = rdx;
- Register arg1 = rcx;
-#else
- Register arg4 = rcx;
- Register arg3 = rdx;
- Register arg2 = rsi;
- Register arg1 = rdi;
-#endif
-
- // Keep track on aliasing between argX defined above and the registers used.
- // rdi: subject string
- // rbx: previous index
- // rcx: encoding of subject string (1 if ascii 0 if two_byte);
- // r11: code
-
- // Argument 4: End of string data
- // Argument 3: Start of string data
- NearLabel setup_two_byte, setup_rest;
- __ testb(rcx, rcx); // Last use of rcx as encoding of subject string.
- __ j(zero, &setup_two_byte);
- __ SmiToInteger32(rcx, FieldOperand(rdi, String::kLengthOffset));
- __ lea(arg4, FieldOperand(rdi, rcx, times_1, SeqAsciiString::kHeaderSize));
- __ lea(arg3, FieldOperand(rdi, rbx, times_1, SeqAsciiString::kHeaderSize));
- __ jmp(&setup_rest);
- __ bind(&setup_two_byte);
- __ SmiToInteger32(rcx, FieldOperand(rdi, String::kLengthOffset));
- __ lea(arg4, FieldOperand(rdi, rcx, times_2, SeqTwoByteString::kHeaderSize));
- __ lea(arg3, FieldOperand(rdi, rbx, times_2, SeqTwoByteString::kHeaderSize));
-
- __ bind(&setup_rest);
- // Argument 2: Previous index.
- __ movq(arg2, rbx);
-
- // Argument 1: Subject string.
-#ifdef _WIN64
- __ movq(arg1, rdi);
-#else
- // Already there in AMD64 calling convention.
- ASSERT(arg1.is(rdi));
-#endif
-
- // Locate the code entry and call it.
- __ addq(r11, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ call(r11);
-
- __ LeaveApiExitFrame();
-
- // Check the result.
- NearLabel success;
- Label exception;
- __ cmpl(rax, Immediate(NativeRegExpMacroAssembler::SUCCESS));
- __ j(equal, &success);
- __ cmpl(rax, Immediate(NativeRegExpMacroAssembler::EXCEPTION));
- __ j(equal, &exception);
- __ cmpl(rax, Immediate(NativeRegExpMacroAssembler::FAILURE));
- // If none of the above, it can only be retry.
- // Handle that in the runtime system.
- __ j(not_equal, &runtime);
-
- // For failure return null.
- __ LoadRoot(rax, Heap::kNullValueRootIndex);
- __ ret(4 * kPointerSize);
-
- // Load RegExp data.
- __ bind(&success);
- __ movq(rax, Operand(rsp, kJSRegExpOffset));
- __ movq(rcx, FieldOperand(rax, JSRegExp::kDataOffset));
- __ SmiToInteger32(rax,
- FieldOperand(rcx, JSRegExp::kIrregexpCaptureCountOffset));
- // Calculate number of capture registers (number_of_captures + 1) * 2.
- __ leal(rdx, Operand(rax, rax, times_1, 2));
-
- // rdx: Number of capture registers
- // Load last_match_info which is still known to be a fast case JSArray.
- __ movq(rax, Operand(rsp, kLastMatchInfoOffset));
- __ movq(rbx, FieldOperand(rax, JSArray::kElementsOffset));
-
- // rbx: last_match_info backing store (FixedArray)
- // rdx: number of capture registers
- // Store the capture count.
- __ Integer32ToSmi(kScratchRegister, rdx);
- __ movq(FieldOperand(rbx, RegExpImpl::kLastCaptureCountOffset),
- kScratchRegister);
- // Store last subject and last input.
- __ movq(rax, Operand(rsp, kSubjectOffset));
- __ movq(FieldOperand(rbx, RegExpImpl::kLastSubjectOffset), rax);
- __ movq(rcx, rbx);
- __ RecordWrite(rcx, RegExpImpl::kLastSubjectOffset, rax, rdi);
- __ movq(rax, Operand(rsp, kSubjectOffset));
- __ movq(FieldOperand(rbx, RegExpImpl::kLastInputOffset), rax);
- __ movq(rcx, rbx);
- __ RecordWrite(rcx, RegExpImpl::kLastInputOffset, rax, rdi);
-
- // Get the static offsets vector filled by the native regexp code.
- __ LoadAddress(rcx,
- ExternalReference::address_of_static_offsets_vector(isolate));
-
- // rbx: last_match_info backing store (FixedArray)
- // rcx: offsets vector
- // rdx: number of capture registers
- NearLabel next_capture, done;
- // Capture register counter starts from number of capture registers and
- // counts down until wraping after zero.
- __ bind(&next_capture);
- __ subq(rdx, Immediate(1));
- __ j(negative, &done);
- // Read the value from the static offsets vector buffer and make it a smi.
- __ movl(rdi, Operand(rcx, rdx, times_int_size, 0));
- __ Integer32ToSmi(rdi, rdi);
- // Store the smi value in the last match info.
- __ movq(FieldOperand(rbx,
- rdx,
- times_pointer_size,
- RegExpImpl::kFirstCaptureOffset),
- rdi);
- __ jmp(&next_capture);
- __ bind(&done);
-
- // Return last match info.
- __ movq(rax, Operand(rsp, kLastMatchInfoOffset));
- __ ret(4 * kPointerSize);
-
- __ bind(&exception);
- // Result must now be exception. If there is no pending exception already a
- // stack overflow (on the backtrack stack) was detected in RegExp code but
- // haven't created the exception yet. Handle that in the runtime system.
- // TODO(592): Rerunning the RegExp to get the stack overflow exception.
- ExternalReference pending_exception_address(
- Isolate::k_pending_exception_address, isolate);
- Operand pending_exception_operand =
- masm->ExternalOperand(pending_exception_address, rbx);
- __ movq(rax, pending_exception_operand);
- __ LoadRoot(rdx, Heap::kTheHoleValueRootIndex);
- __ cmpq(rax, rdx);
- __ j(equal, &runtime);
- __ movq(pending_exception_operand, rdx);
-
- __ CompareRoot(rax, Heap::kTerminationExceptionRootIndex);
- NearLabel termination_exception;
- __ j(equal, &termination_exception);
- __ Throw(rax);
-
- __ bind(&termination_exception);
- __ ThrowUncatchable(TERMINATION, rax);
-
- // Do the runtime call to execute the regexp.
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
-#endif // V8_INTERPRETED_REGEXP
-}
-
-
-void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
- const int kMaxInlineLength = 100;
- Label slowcase;
- Label done;
- __ movq(r8, Operand(rsp, kPointerSize * 3));
- __ JumpIfNotSmi(r8, &slowcase);
- __ SmiToInteger32(rbx, r8);
- __ cmpl(rbx, Immediate(kMaxInlineLength));
- __ j(above, &slowcase);
- // Smi-tagging is equivalent to multiplying by 2.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
- // Allocate RegExpResult followed by FixedArray with size in rbx.
- // JSArray: [Map][empty properties][Elements][Length-smi][index][input]
- // Elements: [Map][Length][..elements..]
- __ AllocateInNewSpace(JSRegExpResult::kSize + FixedArray::kHeaderSize,
- times_pointer_size,
- rbx, // In: Number of elements.
- rax, // Out: Start of allocation (tagged).
- rcx, // Out: End of allocation.
- rdx, // Scratch register
- &slowcase,
- TAG_OBJECT);
- // rax: Start of allocated area, object-tagged.
- // rbx: Number of array elements as int32.
- // r8: Number of array elements as smi.
-
- // Set JSArray map to global.regexp_result_map().
- __ movq(rdx, ContextOperand(rsi, Context::GLOBAL_INDEX));
- __ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalContextOffset));
- __ movq(rdx, ContextOperand(rdx, Context::REGEXP_RESULT_MAP_INDEX));
- __ movq(FieldOperand(rax, HeapObject::kMapOffset), rdx);
-
- // Set empty properties FixedArray.
- __ LoadRoot(kScratchRegister, Heap::kEmptyFixedArrayRootIndex);
- __ movq(FieldOperand(rax, JSObject::kPropertiesOffset), kScratchRegister);
-
- // Set elements to point to FixedArray allocated right after the JSArray.
- __ lea(rcx, Operand(rax, JSRegExpResult::kSize));
- __ movq(FieldOperand(rax, JSObject::kElementsOffset), rcx);
-
- // Set input, index and length fields from arguments.
- __ movq(r8, Operand(rsp, kPointerSize * 1));
- __ movq(FieldOperand(rax, JSRegExpResult::kInputOffset), r8);
- __ movq(r8, Operand(rsp, kPointerSize * 2));
- __ movq(FieldOperand(rax, JSRegExpResult::kIndexOffset), r8);
- __ movq(r8, Operand(rsp, kPointerSize * 3));
- __ movq(FieldOperand(rax, JSArray::kLengthOffset), r8);
-
- // Fill out the elements FixedArray.
- // rax: JSArray.
- // rcx: FixedArray.
- // rbx: Number of elements in array as int32.
-
- // Set map.
- __ LoadRoot(kScratchRegister, Heap::kFixedArrayMapRootIndex);
- __ movq(FieldOperand(rcx, HeapObject::kMapOffset), kScratchRegister);
- // Set length.
- __ Integer32ToSmi(rdx, rbx);
- __ movq(FieldOperand(rcx, FixedArray::kLengthOffset), rdx);
- // Fill contents of fixed-array with the-hole.
- __ LoadRoot(rdx, Heap::kTheHoleValueRootIndex);
- __ lea(rcx, FieldOperand(rcx, FixedArray::kHeaderSize));
- // Fill fixed array elements with hole.
- // rax: JSArray.
- // rbx: Number of elements in array that remains to be filled, as int32.
- // rcx: Start of elements in FixedArray.
- // rdx: the hole.
- Label loop;
- __ testl(rbx, rbx);
- __ bind(&loop);
- __ j(less_equal, &done); // Jump if rcx is negative or zero.
- __ subl(rbx, Immediate(1));
- __ movq(Operand(rcx, rbx, times_pointer_size, 0), rdx);
- __ jmp(&loop);
-
- __ bind(&done);
- __ ret(3 * kPointerSize);
-
- __ bind(&slowcase);
- __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1);
-}
-
-
-void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
- Register object,
- Register result,
- Register scratch1,
- Register scratch2,
- bool object_is_smi,
- Label* not_found) {
- // Use of registers. Register result is used as a temporary.
- Register number_string_cache = result;
- Register mask = scratch1;
- Register scratch = scratch2;
-
- // Load the number string cache.
- __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
-
- // Make the hash mask from the length of the number string cache. It
- // contains two elements (number and string) for each cache entry.
- __ SmiToInteger32(
- mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
- __ shrl(mask, Immediate(1));
- __ subq(mask, Immediate(1)); // Make mask.
-
- // Calculate the entry in the number string cache. The hash value in the
- // number string cache for smis is just the smi value, and the hash for
- // doubles is the xor of the upper and lower words. See
- // Heap::GetNumberStringCache.
- Label is_smi;
- Label load_result_from_cache;
- if (!object_is_smi) {
- __ JumpIfSmi(object, &is_smi);
- __ CheckMap(object, FACTORY->heap_number_map(), not_found, true);
-
- STATIC_ASSERT(8 == kDoubleSize);
- __ movl(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
- __ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset));
- GenerateConvertHashCodeToIndex(masm, scratch, mask);
-
- Register index = scratch;
- Register probe = mask;
- __ movq(probe,
- FieldOperand(number_string_cache,
- index,
- times_1,
- FixedArray::kHeaderSize));
- __ JumpIfSmi(probe, not_found);
- ASSERT(CpuFeatures::IsSupported(SSE2));
- CpuFeatures::Scope fscope(SSE2);
- __ movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
- __ movsd(xmm1, FieldOperand(probe, HeapNumber::kValueOffset));
- __ ucomisd(xmm0, xmm1);
- __ j(parity_even, not_found); // Bail out if NaN is involved.
- __ j(not_equal, not_found); // The cache did not contain this value.
- __ jmp(&load_result_from_cache);
- }
-
- __ bind(&is_smi);
- __ SmiToInteger32(scratch, object);
- GenerateConvertHashCodeToIndex(masm, scratch, mask);
-
- Register index = scratch;
- // Check if the entry is the smi we are looking for.
- __ cmpq(object,
- FieldOperand(number_string_cache,
- index,
- times_1,
- FixedArray::kHeaderSize));
- __ j(not_equal, not_found);
-
- // Get the result from the cache.
- __ bind(&load_result_from_cache);
- __ movq(result,
- FieldOperand(number_string_cache,
- index,
- times_1,
- FixedArray::kHeaderSize + kPointerSize));
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->number_to_string_native(), 1);
-}
-
-
-void NumberToStringStub::GenerateConvertHashCodeToIndex(MacroAssembler* masm,
- Register hash,
- Register mask) {
- __ and_(hash, mask);
- // Each entry in string cache consists of two pointer sized fields,
- // but times_twice_pointer_size (multiplication by 16) scale factor
- // is not supported by addrmode on x64 platform.
- // So we have to premultiply entry index before lookup.
- __ shl(hash, Immediate(kPointerSizeLog2 + 1));
-}
-
-
-void NumberToStringStub::Generate(MacroAssembler* masm) {
- Label runtime;
-
- __ movq(rbx, Operand(rsp, kPointerSize));
-
- // Generate code to lookup number in the number string cache.
- GenerateLookupNumberStringCache(masm, rbx, rax, r8, r9, false, &runtime);
- __ ret(1 * kPointerSize);
-
- __ bind(&runtime);
- // Handle number to string in the runtime system if not found in the cache.
- __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1);
-}
-
-
-static int NegativeComparisonResult(Condition cc) {
- ASSERT(cc != equal);
- ASSERT((cc == less) || (cc == less_equal)
- || (cc == greater) || (cc == greater_equal));
- return (cc == greater || cc == greater_equal) ? LESS : GREATER;
-}
-
-
-void CompareStub::Generate(MacroAssembler* masm) {
- ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
-
- Label check_unequal_objects, done;
-
- // Compare two smis if required.
- if (include_smi_compare_) {
- Label non_smi, smi_done;
- __ JumpIfNotBothSmi(rax, rdx, &non_smi);
- __ subq(rdx, rax);
- __ j(no_overflow, &smi_done);
- __ not_(rdx); // Correct sign in case of overflow. rdx cannot be 0 here.
- __ bind(&smi_done);
- __ movq(rax, rdx);
- __ ret(0);
- __ bind(&non_smi);
- } else if (FLAG_debug_code) {
- Label ok;
- __ JumpIfNotSmi(rdx, &ok);
- __ JumpIfNotSmi(rax, &ok);
- __ Abort("CompareStub: smi operands");
- __ bind(&ok);
- }
-
- // The compare stub returns a positive, negative, or zero 64-bit integer
- // value in rax, corresponding to result of comparing the two inputs.
- // NOTICE! This code is only reached after a smi-fast-case check, so
- // it is certain that at least one operand isn't a smi.
-
- // Two identical objects are equal unless they are both NaN or undefined.
- {
- NearLabel not_identical;
- __ cmpq(rax, rdx);
- __ j(not_equal, &not_identical);
-
- if (cc_ != equal) {
- // Check for undefined. undefined OP undefined is false even though
- // undefined == undefined.
- NearLabel check_for_nan;
- __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, &check_for_nan);
- __ Set(rax, NegativeComparisonResult(cc_));
- __ ret(0);
- __ bind(&check_for_nan);
- }
-
- // Test for NaN. Sadly, we can't just compare to FACTORY->nan_value(),
- // so we do the second best thing - test it ourselves.
- // Note: if cc_ != equal, never_nan_nan_ is not used.
- // We cannot set rax to EQUAL until just before return because
- // rax must be unchanged on jump to not_identical.
-
- if (never_nan_nan_ && (cc_ == equal)) {
- __ Set(rax, EQUAL);
- __ ret(0);
- } else {
- NearLabel heap_number;
- // If it's not a heap number, then return equal for (in)equality operator.
- __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
- FACTORY->heap_number_map());
- __ j(equal, &heap_number);
- if (cc_ != equal) {
- // Call runtime on identical JSObjects. Otherwise return equal.
- __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
- __ j(above_equal, &not_identical);
- }
- __ Set(rax, EQUAL);
- __ ret(0);
-
- __ bind(&heap_number);
- // It is a heap number, so return equal if it's not NaN.
- // For NaN, return 1 for every condition except greater and
- // greater-equal. Return -1 for them, so the comparison yields
- // false for all conditions except not-equal.
- __ Set(rax, EQUAL);
- __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
- __ ucomisd(xmm0, xmm0);
- __ setcc(parity_even, rax);
- // rax is 0 for equal non-NaN heapnumbers, 1 for NaNs.
- if (cc_ == greater_equal || cc_ == greater) {
- __ neg(rax);
- }
- __ ret(0);
- }
-
- __ bind(&not_identical);
- }
-
- if (cc_ == equal) { // Both strict and non-strict.
- Label slow; // Fallthrough label.
-
- // If we're doing a strict equality comparison, we don't have to do
- // type conversion, so we generate code to do fast comparison for objects
- // and oddballs. Non-smi numbers and strings still go through the usual
- // slow-case code.
- if (strict_) {
- // If either is a Smi (we know that not both are), then they can only
- // be equal if the other is a HeapNumber. If so, use the slow case.
- {
- Label not_smis;
- __ SelectNonSmi(rbx, rax, rdx, &not_smis);
-
- // Check if the non-smi operand is a heap number.
- __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
- FACTORY->heap_number_map());
- // If heap number, handle it in the slow case.
- __ j(equal, &slow);
- // Return non-equal. ebx (the lower half of rbx) is not zero.
- __ movq(rax, rbx);
- __ ret(0);
-
- __ bind(&not_smis);
- }
-
- // If either operand is a JSObject or an oddball value, then they are not
- // equal since their pointers are different
- // There is no test for undetectability in strict equality.
-
- // If the first object is a JS object, we have done pointer comparison.
- STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
- NearLabel first_non_object;
- __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
- __ j(below, &first_non_object);
- // Return non-zero (eax (not rax) is not zero)
- Label return_not_equal;
- STATIC_ASSERT(kHeapObjectTag != 0);
- __ bind(&return_not_equal);
- __ ret(0);
-
- __ bind(&first_non_object);
- // Check for oddballs: true, false, null, undefined.
- __ CmpInstanceType(rcx, ODDBALL_TYPE);
- __ j(equal, &return_not_equal);
-
- __ CmpObjectType(rdx, FIRST_JS_OBJECT_TYPE, rcx);
- __ j(above_equal, &return_not_equal);
-
- // Check for oddballs: true, false, null, undefined.
- __ CmpInstanceType(rcx, ODDBALL_TYPE);
- __ j(equal, &return_not_equal);
-
- // Fall through to the general case.
- }
- __ bind(&slow);
- }
-
- // Generate the number comparison code.
- if (include_number_compare_) {
- Label non_number_comparison;
- NearLabel unordered;
- FloatingPointHelper::LoadSSE2UnknownOperands(masm, &non_number_comparison);
- __ xorl(rax, rax);
- __ xorl(rcx, rcx);
- __ ucomisd(xmm0, xmm1);
-
- // Don't base result on EFLAGS when a NaN is involved.
- __ j(parity_even, &unordered);
- // Return a result of -1, 0, or 1, based on EFLAGS.
- __ setcc(above, rax);
- __ setcc(below, rcx);
- __ subq(rax, rcx);
- __ ret(0);
-
- // If one of the numbers was NaN, then the result is always false.
- // The cc is never not-equal.
- __ bind(&unordered);
- ASSERT(cc_ != not_equal);
- if (cc_ == less || cc_ == less_equal) {
- __ Set(rax, 1);
- } else {
- __ Set(rax, -1);
- }
- __ ret(0);
-
- // The number comparison code did not provide a valid result.
- __ bind(&non_number_comparison);
- }
-
- // Fast negative check for symbol-to-symbol equality.
- Label check_for_strings;
- if (cc_ == equal) {
- BranchIfNonSymbol(masm, &check_for_strings, rax, kScratchRegister);
- BranchIfNonSymbol(masm, &check_for_strings, rdx, kScratchRegister);
-
- // We've already checked for object identity, so if both operands
- // are symbols they aren't equal. Register eax (not rax) already holds a
- // non-zero value, which indicates not equal, so just return.
- __ ret(0);
- }
-
- __ bind(&check_for_strings);
-
- __ JumpIfNotBothSequentialAsciiStrings(
- rdx, rax, rcx, rbx, &check_unequal_objects);
-
- // Inline comparison of ascii strings.
- StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
- rdx,
- rax,
- rcx,
- rbx,
- rdi,
- r8);
-
-#ifdef DEBUG
- __ Abort("Unexpected fall-through from string comparison");
-#endif
-
- __ bind(&check_unequal_objects);
- if (cc_ == equal && !strict_) {
- // Not strict equality. Objects are unequal if
- // they are both JSObjects and not undetectable,
- // and their pointers are different.
- NearLabel not_both_objects, return_unequal;
- // At most one is a smi, so we can test for smi by adding the two.
- // A smi plus a heap object has the low bit set, a heap object plus
- // a heap object has the low bit clear.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagMask == 1);
- __ lea(rcx, Operand(rax, rdx, times_1, 0));
- __ testb(rcx, Immediate(kSmiTagMask));
- __ j(not_zero, &not_both_objects);
- __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rbx);
- __ j(below, &not_both_objects);
- __ CmpObjectType(rdx, FIRST_JS_OBJECT_TYPE, rcx);
- __ j(below, &not_both_objects);
- __ testb(FieldOperand(rbx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- __ j(zero, &return_unequal);
- __ testb(FieldOperand(rcx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- __ j(zero, &return_unequal);
- // The objects are both undetectable, so they both compare as the value
- // undefined, and are equal.
- __ Set(rax, EQUAL);
- __ bind(&return_unequal);
- // Return non-equal by returning the non-zero object pointer in rax,
- // or return equal if we fell through to here.
- __ ret(0);
- __ bind(&not_both_objects);
- }
-
- // Push arguments below the return address to prepare jump to builtin.
- __ pop(rcx);
- __ push(rdx);
- __ push(rax);
-
- // Figure out which native to call and setup the arguments.
- Builtins::JavaScript builtin;
- if (cc_ == equal) {
- builtin = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
- } else {
- builtin = Builtins::COMPARE;
- __ Push(Smi::FromInt(NegativeComparisonResult(cc_)));
- }
-
- // Restore return address on the stack.
- __ push(rcx);
-
- // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
- // tagged as a small integer.
- __ InvokeBuiltin(builtin, JUMP_FUNCTION);
-}
-
-
-void CompareStub::BranchIfNonSymbol(MacroAssembler* masm,
- Label* label,
- Register object,
- Register scratch) {
- __ JumpIfSmi(object, label);
- __ movq(scratch, FieldOperand(object, HeapObject::kMapOffset));
- __ movzxbq(scratch,
- FieldOperand(scratch, Map::kInstanceTypeOffset));
- // Ensure that no non-strings have the symbol bit set.
- STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsSymbolMask);
- STATIC_ASSERT(kSymbolTag != 0);
- __ testb(scratch, Immediate(kIsSymbolMask));
- __ j(zero, label);
-}
-
-
-void StackCheckStub::Generate(MacroAssembler* masm) {
- __ TailCallRuntime(Runtime::kStackGuard, 0, 1);
-}
-
-
-void CallFunctionStub::Generate(MacroAssembler* masm) {
- Label slow;
-
- // If the receiver might be a value (string, number or boolean) check for this
- // and box it if it is.
- if (ReceiverMightBeValue()) {
- // Get the receiver from the stack.
- // +1 ~ return address
- Label receiver_is_value, receiver_is_js_object;
- __ movq(rax, Operand(rsp, (argc_ + 1) * kPointerSize));
-
- // Check if receiver is a smi (which is a number value).
- __ JumpIfSmi(rax, &receiver_is_value);
-
- // Check if the receiver is a valid JS object.
- __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rdi);
- __ j(above_equal, &receiver_is_js_object);
-
- // Call the runtime to box the value.
- __ bind(&receiver_is_value);
- __ EnterInternalFrame();
- __ push(rax);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ LeaveInternalFrame();
- __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rax);
-
- __ bind(&receiver_is_js_object);
- }
-
- // Get the function to call from the stack.
- // +2 ~ receiver, return address
- __ movq(rdi, Operand(rsp, (argc_ + 2) * kPointerSize));
-
- // Check that the function really is a JavaScript function.
- __ JumpIfSmi(rdi, &slow);
- // Goto slow case if we do not have a function.
- __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
- __ j(not_equal, &slow);
-
- // Fast-case: Just invoke the function.
- ParameterCount actual(argc_);
- __ InvokeFunction(rdi, actual, JUMP_FUNCTION);
-
- // Slow-case: Non-function called.
- __ bind(&slow);
- // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
- // of the original receiver from the call site).
- __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rdi);
- __ Set(rax, argc_);
- __ Set(rbx, 0);
- __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION);
- Handle<Code> adaptor =
- Isolate::Current()->builtins()->ArgumentsAdaptorTrampoline();
- __ Jump(adaptor, RelocInfo::CODE_TARGET);
-}
-
-
-bool CEntryStub::NeedsImmovableCode() {
- return false;
-}
-
-
-void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
- // Throw exception in eax.
- __ Throw(rax);
-}
-
-
-void CEntryStub::GenerateCore(MacroAssembler* masm,
- Label* throw_normal_exception,
- Label* throw_termination_exception,
- Label* throw_out_of_memory_exception,
- bool do_gc,
- bool always_allocate_scope) {
- // rax: result parameter for PerformGC, if any.
- // rbx: pointer to C function (C callee-saved).
- // rbp: frame pointer (restored after C call).
- // rsp: stack pointer (restored after C call).
- // r14: number of arguments including receiver (C callee-saved).
- // r15: pointer to the first argument (C callee-saved).
- // This pointer is reused in LeaveExitFrame(), so it is stored in a
- // callee-saved register.
-
- // Simple results returned in rax (both AMD64 and Win64 calling conventions).
- // Complex results must be written to address passed as first argument.
- // AMD64 calling convention: a struct of two pointers in rax+rdx
-
- // Check stack alignment.
- if (FLAG_debug_code) {
- __ CheckStackAlignment();
- }
-
- if (do_gc) {
- // Pass failure code returned from last attempt as first argument to
- // PerformGC. No need to use PrepareCallCFunction/CallCFunction here as the
- // stack is known to be aligned. This function takes one argument which is
- // passed in register.
-#ifdef _WIN64
- __ movq(rcx, rax);
-#else // _WIN64
- __ movq(rdi, rax);
-#endif
- __ movq(kScratchRegister,
- FUNCTION_ADDR(Runtime::PerformGC),
- RelocInfo::RUNTIME_ENTRY);
- __ call(kScratchRegister);
- }
-
- ExternalReference scope_depth =
- ExternalReference::heap_always_allocate_scope_depth(masm->isolate());
- if (always_allocate_scope) {
- Operand scope_depth_operand = masm->ExternalOperand(scope_depth);
- __ incl(scope_depth_operand);
- }
-
- // Call C function.
-#ifdef _WIN64
- // Windows 64-bit ABI passes arguments in rcx, rdx, r8, r9
- // Store Arguments object on stack, below the 4 WIN64 ABI parameter slots.
- __ movq(StackSpaceOperand(0), r14); // argc.
- __ movq(StackSpaceOperand(1), r15); // argv.
- if (result_size_ < 2) {
- // Pass a pointer to the Arguments object as the first argument.
- // Return result in single register (rax).
- __ lea(rcx, StackSpaceOperand(0));
- __ LoadAddress(rdx, ExternalReference::isolate_address());
- } else {
- ASSERT_EQ(2, result_size_);
- // Pass a pointer to the result location as the first argument.
- __ lea(rcx, StackSpaceOperand(2));
- // Pass a pointer to the Arguments object as the second argument.
- __ lea(rdx, StackSpaceOperand(0));
- __ LoadAddress(r8, ExternalReference::isolate_address());
- }
-
-#else // _WIN64
- // GCC passes arguments in rdi, rsi, rdx, rcx, r8, r9.
- __ movq(rdi, r14); // argc.
- __ movq(rsi, r15); // argv.
- __ movq(rdx, ExternalReference::isolate_address());
-#endif
- __ call(rbx);
- // Result is in rax - do not destroy this register!
-
- if (always_allocate_scope) {
- Operand scope_depth_operand = masm->ExternalOperand(scope_depth);
- __ decl(scope_depth_operand);
- }
-
- // Check for failure result.
- Label failure_returned;
- STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
-#ifdef _WIN64
- // If return value is on the stack, pop it to registers.
- if (result_size_ > 1) {
- ASSERT_EQ(2, result_size_);
- // Read result values stored on stack. Result is stored
- // above the four argument mirror slots and the two
- // Arguments object slots.
- __ movq(rax, Operand(rsp, 6 * kPointerSize));
- __ movq(rdx, Operand(rsp, 7 * kPointerSize));
- }
-#endif
- __ lea(rcx, Operand(rax, 1));
- // Lower 2 bits of rcx are 0 iff rax has failure tag.
- __ testl(rcx, Immediate(kFailureTagMask));
- __ j(zero, &failure_returned);
-
- // Exit the JavaScript to C++ exit frame.
- __ LeaveExitFrame(save_doubles_);
- __ ret(0);
-
- // Handling of failure.
- __ bind(&failure_returned);
-
- NearLabel retry;
- // If the returned exception is RETRY_AFTER_GC continue at retry label
- STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
- __ testl(rax, Immediate(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
- __ j(zero, &retry);
-
- // Special handling of out of memory exceptions.
- __ movq(kScratchRegister, Failure::OutOfMemoryException(), RelocInfo::NONE);
- __ cmpq(rax, kScratchRegister);
- __ j(equal, throw_out_of_memory_exception);
-
- // Retrieve the pending exception and clear the variable.
- ExternalReference pending_exception_address(
- Isolate::k_pending_exception_address, masm->isolate());
- Operand pending_exception_operand =
- masm->ExternalOperand(pending_exception_address);
- __ movq(rax, pending_exception_operand);
- __ LoadRoot(rdx, Heap::kTheHoleValueRootIndex);
- __ movq(pending_exception_operand, rdx);
-
- // Special handling of termination exceptions which are uncatchable
- // by javascript code.
- __ CompareRoot(rax, Heap::kTerminationExceptionRootIndex);
- __ j(equal, throw_termination_exception);
-
- // Handle normal exception.
- __ jmp(throw_normal_exception);
-
- // Retry.
- __ bind(&retry);
-}
-
-
-void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
- UncatchableExceptionType type) {
- __ ThrowUncatchable(type, rax);
-}
-
-
-void CEntryStub::Generate(MacroAssembler* masm) {
- // rax: number of arguments including receiver
- // rbx: pointer to C function (C callee-saved)
- // rbp: frame pointer of calling JS frame (restored after C call)
- // rsp: stack pointer (restored after C call)
- // rsi: current context (restored)
-
- // NOTE: Invocations of builtins may return failure objects
- // instead of a proper result. The builtin entry handles
- // this by performing a garbage collection and retrying the
- // builtin once.
-
- // Enter the exit frame that transitions from JavaScript to C++.
-#ifdef _WIN64
- int arg_stack_space = (result_size_ < 2 ? 2 : 4);
-#else
- int arg_stack_space = 0;
-#endif
- __ EnterExitFrame(arg_stack_space, save_doubles_);
-
- // rax: Holds the context at this point, but should not be used.
- // On entry to code generated by GenerateCore, it must hold
- // a failure result if the collect_garbage argument to GenerateCore
- // is true. This failure result can be the result of code
- // generated by a previous call to GenerateCore. The value
- // of rax is then passed to Runtime::PerformGC.
- // rbx: pointer to builtin function (C callee-saved).
- // rbp: frame pointer of exit frame (restored after C call).
- // rsp: stack pointer (restored after C call).
- // r14: number of arguments including receiver (C callee-saved).
- // r15: argv pointer (C callee-saved).
-
- Label throw_normal_exception;
- Label throw_termination_exception;
- Label throw_out_of_memory_exception;
-
- // Call into the runtime system.
- GenerateCore(masm,
- &throw_normal_exception,
- &throw_termination_exception,
- &throw_out_of_memory_exception,
- false,
- false);
-
- // Do space-specific GC and retry runtime call.
- GenerateCore(masm,
- &throw_normal_exception,
- &throw_termination_exception,
- &throw_out_of_memory_exception,
- true,
- false);
-
- // Do full GC and retry runtime call one final time.
- Failure* failure = Failure::InternalError();
- __ movq(rax, failure, RelocInfo::NONE);
- GenerateCore(masm,
- &throw_normal_exception,
- &throw_termination_exception,
- &throw_out_of_memory_exception,
- true,
- true);
-
- __ bind(&throw_out_of_memory_exception);
- GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
-
- __ bind(&throw_termination_exception);
- GenerateThrowUncatchable(masm, TERMINATION);
-
- __ bind(&throw_normal_exception);
- GenerateThrowTOS(masm);
-}
-
-
-void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
- Label invoke, exit;
-#ifdef ENABLE_LOGGING_AND_PROFILING
- Label not_outermost_js, not_outermost_js_2;
-#endif
- { // NOLINT. Scope block confuses linter.
- MacroAssembler::NoRootArrayScope uninitialized_root_register(masm);
- // Setup frame.
- __ push(rbp);
- __ movq(rbp, rsp);
-
- // Push the stack frame type marker twice.
- int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
- // Scratch register is neither callee-save, nor an argument register on any
- // platform. It's free to use at this point.
- // Cannot use smi-register for loading yet.
- __ movq(kScratchRegister,
- reinterpret_cast<uint64_t>(Smi::FromInt(marker)),
- RelocInfo::NONE);
- __ push(kScratchRegister); // context slot
- __ push(kScratchRegister); // function slot
- // Save callee-saved registers (X64/Win64 calling conventions).
- __ push(r12);
- __ push(r13);
- __ push(r14);
- __ push(r15);
-#ifdef _WIN64
- __ push(rdi); // Only callee save in Win64 ABI, argument in AMD64 ABI.
- __ push(rsi); // Only callee save in Win64 ABI, argument in AMD64 ABI.
-#endif
- __ push(rbx);
- // TODO(X64): On Win64, if we ever use XMM6-XMM15, the low low 64 bits are
- // callee save as well.
-
- // Set up the roots and smi constant registers.
- // Needs to be done before any further smi loads.
- __ InitializeSmiConstantRegister();
- __ InitializeRootRegister();
- }
-
- Isolate* isolate = masm->isolate();
-
- // Save copies of the top frame descriptor on the stack.
- ExternalReference c_entry_fp(Isolate::k_c_entry_fp_address, isolate);
- {
- Operand c_entry_fp_operand = masm->ExternalOperand(c_entry_fp);
- __ push(c_entry_fp_operand);
- }
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
- // If this is the outermost JS call, set js_entry_sp value.
- ExternalReference js_entry_sp(Isolate::k_js_entry_sp_address, isolate);
- __ Load(rax, js_entry_sp);
- __ testq(rax, rax);
- __ j(not_zero, &not_outermost_js);
- __ movq(rax, rbp);
- __ Store(js_entry_sp, rax);
- __ bind(&not_outermost_js);
-#endif
-
- // Call a faked try-block that does the invoke.
- __ call(&invoke);
-
- // Caught exception: Store result (exception) in the pending
- // exception field in the JSEnv and return a failure sentinel.
- ExternalReference pending_exception(Isolate::k_pending_exception_address,
- isolate);
- __ Store(pending_exception, rax);
- __ movq(rax, Failure::Exception(), RelocInfo::NONE);
- __ jmp(&exit);
-
- // Invoke: Link this frame into the handler chain.
- __ bind(&invoke);
- __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
-
- // Clear any pending exceptions.
- __ LoadRoot(rax, Heap::kTheHoleValueRootIndex);
- __ Store(pending_exception, rax);
-
- // Fake a receiver (NULL).
- __ push(Immediate(0)); // receiver
-
- // Invoke the function by calling through JS entry trampoline
- // builtin and pop the faked function when we return. We load the address
- // from an external reference instead of inlining the call target address
- // directly in the code, because the builtin stubs may not have been
- // generated yet at the time this code is generated.
- if (is_construct) {
- ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
- isolate);
- __ Load(rax, construct_entry);
- } else {
- ExternalReference entry(Builtins::kJSEntryTrampoline, isolate);
- __ Load(rax, entry);
- }
- __ lea(kScratchRegister, FieldOperand(rax, Code::kHeaderSize));
- __ call(kScratchRegister);
-
- // Unlink this frame from the handler chain.
- Operand handler_operand =
- masm->ExternalOperand(ExternalReference(Isolate::k_handler_address,
- isolate));
- __ pop(handler_operand);
- // Pop next_sp.
- __ addq(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
- // If current RBP value is the same as js_entry_sp value, it means that
- // the current function is the outermost.
- __ movq(kScratchRegister, js_entry_sp);
- __ cmpq(rbp, Operand(kScratchRegister, 0));
- __ j(not_equal, &not_outermost_js_2);
- __ movq(Operand(kScratchRegister, 0), Immediate(0));
- __ bind(&not_outermost_js_2);
-#endif
-
- // Restore the top frame descriptor from the stack.
- __ bind(&exit);
- {
- Operand c_entry_fp_operand = masm->ExternalOperand(c_entry_fp);
- __ pop(c_entry_fp_operand);
- }
-
- // Restore callee-saved registers (X64 conventions).
- __ pop(rbx);
-#ifdef _WIN64
- // Callee save on in Win64 ABI, arguments/volatile in AMD64 ABI.
- __ pop(rsi);
- __ pop(rdi);
-#endif
- __ pop(r15);
- __ pop(r14);
- __ pop(r13);
- __ pop(r12);
- __ addq(rsp, Immediate(2 * kPointerSize)); // remove markers
-
- // Restore frame pointer and return.
- __ pop(rbp);
- __ ret(0);
-}
-
-
-void InstanceofStub::Generate(MacroAssembler* masm) {
- // Implements "value instanceof function" operator.
- // Expected input state with no inline cache:
- // rsp[0] : return address
- // rsp[1] : function pointer
- // rsp[2] : value
- // Expected input state with an inline one-element cache:
- // rsp[0] : return address
- // rsp[1] : offset from return address to location of inline cache
- // rsp[2] : function pointer
- // rsp[3] : value
- // Returns a bitwise zero to indicate that the value
- // is and instance of the function and anything else to
- // indicate that the value is not an instance.
-
- static const int kOffsetToMapCheckValue = 5;
- static const int kOffsetToResultValue = 21;
- // The last 4 bytes of the instruction sequence
- // movq(rax, FieldOperand(rdi, HeapObject::kMapOffset)
- // Move(kScratchRegister, FACTORY->the_hole_value())
- // in front of the hole value address.
- static const unsigned int kWordBeforeMapCheckValue = 0xBA49FF78;
- // The last 4 bytes of the instruction sequence
- // __ j(not_equal, &cache_miss);
- // __ LoadRoot(ToRegister(instr->result()), Heap::kTheHoleValueRootIndex);
- // before the offset of the hole value in the root array.
- static const unsigned int kWordBeforeResultValue = 0x458B4909;
- // Only the inline check flag is supported on X64.
- ASSERT(flags_ == kNoFlags || HasCallSiteInlineCheck());
- int extra_stack_space = HasCallSiteInlineCheck() ? kPointerSize : 0;
-
- // Get the object - go slow case if it's a smi.
- Label slow;
-
- __ movq(rax, Operand(rsp, 2 * kPointerSize + extra_stack_space));
- __ JumpIfSmi(rax, &slow);
-
- // Check that the left hand is a JS object. Leave its map in rax.
- __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rax);
- __ j(below, &slow);
- __ CmpInstanceType(rax, LAST_JS_OBJECT_TYPE);
- __ j(above, &slow);
-
- // Get the prototype of the function.
- __ movq(rdx, Operand(rsp, 1 * kPointerSize + extra_stack_space));
- // rdx is function, rax is map.
-
- // If there is a call site cache don't look in the global cache, but do the
- // real lookup and update the call site cache.
- if (!HasCallSiteInlineCheck()) {
- // Look up the function and the map in the instanceof cache.
- NearLabel miss;
- __ CompareRoot(rdx, Heap::kInstanceofCacheFunctionRootIndex);
- __ j(not_equal, &miss);
- __ CompareRoot(rax, Heap::kInstanceofCacheMapRootIndex);
- __ j(not_equal, &miss);
- __ LoadRoot(rax, Heap::kInstanceofCacheAnswerRootIndex);
- __ ret(2 * kPointerSize);
- __ bind(&miss);
- }
-
- __ TryGetFunctionPrototype(rdx, rbx, &slow);
-
- // Check that the function prototype is a JS object.
- __ JumpIfSmi(rbx, &slow);
- __ CmpObjectType(rbx, FIRST_JS_OBJECT_TYPE, kScratchRegister);
- __ j(below, &slow);
- __ CmpInstanceType(kScratchRegister, LAST_JS_OBJECT_TYPE);
- __ j(above, &slow);
-
- // Register mapping:
- // rax is object map.
- // rdx is function.
- // rbx is function prototype.
- if (!HasCallSiteInlineCheck()) {
- __ StoreRoot(rdx, Heap::kInstanceofCacheFunctionRootIndex);
- __ StoreRoot(rax, Heap::kInstanceofCacheMapRootIndex);
- } else {
- __ movq(kScratchRegister, Operand(rsp, 0 * kPointerSize));
- __ subq(kScratchRegister, Operand(rsp, 1 * kPointerSize));
- __ movq(Operand(kScratchRegister, kOffsetToMapCheckValue), rax);
- if (FLAG_debug_code) {
- __ movl(rdi, Immediate(kWordBeforeMapCheckValue));
- __ cmpl(Operand(kScratchRegister, kOffsetToMapCheckValue - 4), rdi);
- __ Assert(equal, "InstanceofStub unexpected call site cache.");
- }
- }
-
- __ movq(rcx, FieldOperand(rax, Map::kPrototypeOffset));
-
- // Loop through the prototype chain looking for the function prototype.
- NearLabel loop, is_instance, is_not_instance;
- __ LoadRoot(kScratchRegister, Heap::kNullValueRootIndex);
- __ bind(&loop);
- __ cmpq(rcx, rbx);
- __ j(equal, &is_instance);
- __ cmpq(rcx, kScratchRegister);
- // The code at is_not_instance assumes that kScratchRegister contains a
- // non-zero GCable value (the null object in this case).
- __ j(equal, &is_not_instance);
- __ movq(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
- __ movq(rcx, FieldOperand(rcx, Map::kPrototypeOffset));
- __ jmp(&loop);
-
- __ bind(&is_instance);
- if (!HasCallSiteInlineCheck()) {
- __ xorl(rax, rax);
- // Store bitwise zero in the cache. This is a Smi in GC terms.
- STATIC_ASSERT(kSmiTag == 0);
- __ StoreRoot(rax, Heap::kInstanceofCacheAnswerRootIndex);
- } else {
- // Store offset of true in the root array at the inline check site.
- ASSERT((Heap::kTrueValueRootIndex << kPointerSizeLog2) - kRootRegisterBias
- == 0xB0 - 0x100);
- __ movl(rax, Immediate(0xB0)); // TrueValue is at -10 * kPointerSize.
- __ movq(kScratchRegister, Operand(rsp, 0 * kPointerSize));
- __ subq(kScratchRegister, Operand(rsp, 1 * kPointerSize));
- __ movb(Operand(kScratchRegister, kOffsetToResultValue), rax);
- if (FLAG_debug_code) {
- __ movl(rax, Immediate(kWordBeforeResultValue));
- __ cmpl(Operand(kScratchRegister, kOffsetToResultValue - 4), rax);
- __ Assert(equal, "InstanceofStub unexpected call site cache.");
- }
- __ xorl(rax, rax);
- }
- __ ret(2 * kPointerSize + extra_stack_space);
-
- __ bind(&is_not_instance);
- if (!HasCallSiteInlineCheck()) {
- // We have to store a non-zero value in the cache.
- __ StoreRoot(kScratchRegister, Heap::kInstanceofCacheAnswerRootIndex);
- } else {
- // Store offset of false in the root array at the inline check site.
- ASSERT((Heap::kFalseValueRootIndex << kPointerSizeLog2) - kRootRegisterBias
- == 0xB8 - 0x100);
- __ movl(rax, Immediate(0xB8)); // FalseValue is at -9 * kPointerSize.
- __ movq(kScratchRegister, Operand(rsp, 0 * kPointerSize));
- __ subq(kScratchRegister, Operand(rsp, 1 * kPointerSize));
- __ movb(Operand(kScratchRegister, kOffsetToResultValue), rax);
- if (FLAG_debug_code) {
- __ movl(rax, Immediate(kWordBeforeResultValue));
- __ cmpl(Operand(kScratchRegister, kOffsetToResultValue - 4), rax);
- __ Assert(equal, "InstanceofStub unexpected call site cache (mov)");
- }
- }
- __ ret(2 * kPointerSize + extra_stack_space);
-
- // Slow-case: Go through the JavaScript implementation.
- __ bind(&slow);
- if (HasCallSiteInlineCheck()) {
- // Remove extra value from the stack.
- __ pop(rcx);
- __ pop(rax);
- __ push(rcx);
- }
- __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
-}
-
-
-// Passing arguments in registers is not supported.
-Register InstanceofStub::left() { return no_reg; }
-
-
-Register InstanceofStub::right() { return no_reg; }
-
-
-int CompareStub::MinorKey() {
- // Encode the three parameters in a unique 16 bit value. To avoid duplicate
- // stubs the never NaN NaN condition is only taken into account if the
- // condition is equals.
- ASSERT(static_cast<unsigned>(cc_) < (1 << 12));
- ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
- return ConditionField::encode(static_cast<unsigned>(cc_))
- | RegisterField::encode(false) // lhs_ and rhs_ are not used
- | StrictField::encode(strict_)
- | NeverNanNanField::encode(cc_ == equal ? never_nan_nan_ : false)
- | IncludeNumberCompareField::encode(include_number_compare_)
- | IncludeSmiCompareField::encode(include_smi_compare_);
-}
-
-
-// Unfortunately you have to run without snapshots to see most of these
-// names in the profile since most compare stubs end up in the snapshot.
-const char* CompareStub::GetName() {
- ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
-
- if (name_ != NULL) return name_;
- const int kMaxNameLength = 100;
- name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
- kMaxNameLength);
- if (name_ == NULL) return "OOM";
-
- const char* cc_name;
- switch (cc_) {
- case less: cc_name = "LT"; break;
- case greater: cc_name = "GT"; break;
- case less_equal: cc_name = "LE"; break;
- case greater_equal: cc_name = "GE"; break;
- case equal: cc_name = "EQ"; break;
- case not_equal: cc_name = "NE"; break;
- default: cc_name = "UnknownCondition"; break;
- }
-
- const char* strict_name = "";
- if (strict_ && (cc_ == equal || cc_ == not_equal)) {
- strict_name = "_STRICT";
- }
-
- const char* never_nan_nan_name = "";
- if (never_nan_nan_ && (cc_ == equal || cc_ == not_equal)) {
- never_nan_nan_name = "_NO_NAN";
- }
-
- const char* include_number_compare_name = "";
- if (!include_number_compare_) {
- include_number_compare_name = "_NO_NUMBER";
- }
-
- const char* include_smi_compare_name = "";
- if (!include_smi_compare_) {
- include_smi_compare_name = "_NO_SMI";
- }
-
- OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
- "CompareStub_%s%s%s%s",
- cc_name,
- strict_name,
- never_nan_nan_name,
- include_number_compare_name,
- include_smi_compare_name);
- return name_;
-}
-
-
-// -------------------------------------------------------------------------
-// StringCharCodeAtGenerator
-
-void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
- Label flat_string;
- Label ascii_string;
- Label got_char_code;
-
- // If the receiver is a smi trigger the non-string case.
- __ JumpIfSmi(object_, receiver_not_string_);
-
- // Fetch the instance type of the receiver into result register.
- __ movq(result_, FieldOperand(object_, HeapObject::kMapOffset));
- __ movzxbl(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
- // If the receiver is not a string trigger the non-string case.
- __ testb(result_, Immediate(kIsNotStringMask));
- __ j(not_zero, receiver_not_string_);
-
- // If the index is non-smi trigger the non-smi case.
- __ JumpIfNotSmi(index_, &index_not_smi_);
-
- // Put smi-tagged index into scratch register.
- __ movq(scratch_, index_);
- __ bind(&got_smi_index_);
-
- // Check for index out of range.
- __ SmiCompare(scratch_, FieldOperand(object_, String::kLengthOffset));
- __ j(above_equal, index_out_of_range_);
-
- // We need special handling for non-flat strings.
- STATIC_ASSERT(kSeqStringTag == 0);
- __ testb(result_, Immediate(kStringRepresentationMask));
- __ j(zero, &flat_string);
-
- // Handle non-flat strings.
- __ testb(result_, Immediate(kIsConsStringMask));
- __ j(zero, &call_runtime_);
-
- // ConsString.
- // Check whether the right hand side is the empty string (i.e. if
- // this is really a flat string in a cons string). If that is not
- // the case we would rather go to the runtime system now to flatten
- // the string.
- __ CompareRoot(FieldOperand(object_, ConsString::kSecondOffset),
- Heap::kEmptyStringRootIndex);
- __ j(not_equal, &call_runtime_);
- // Get the first of the two strings and load its instance type.
- __ movq(object_, FieldOperand(object_, ConsString::kFirstOffset));
- __ movq(result_, FieldOperand(object_, HeapObject::kMapOffset));
- __ movzxbl(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
- // If the first cons component is also non-flat, then go to runtime.
- STATIC_ASSERT(kSeqStringTag == 0);
- __ testb(result_, Immediate(kStringRepresentationMask));
- __ j(not_zero, &call_runtime_);
-
- // Check for 1-byte or 2-byte string.
- __ bind(&flat_string);
- STATIC_ASSERT(kAsciiStringTag != 0);
- __ testb(result_, Immediate(kStringEncodingMask));
- __ j(not_zero, &ascii_string);
-
- // 2-byte string.
- // Load the 2-byte character code into the result register.
- __ SmiToInteger32(scratch_, scratch_);
- __ movzxwl(result_, FieldOperand(object_,
- scratch_, times_2,
- SeqTwoByteString::kHeaderSize));
- __ jmp(&got_char_code);
-
- // ASCII string.
- // Load the byte into the result register.
- __ bind(&ascii_string);
- __ SmiToInteger32(scratch_, scratch_);
- __ movzxbl(result_, FieldOperand(object_,
- scratch_, times_1,
- SeqAsciiString::kHeaderSize));
- __ bind(&got_char_code);
- __ Integer32ToSmi(result_, result_);
- __ bind(&exit_);
-}
-
-
-void StringCharCodeAtGenerator::GenerateSlow(
- MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
- __ Abort("Unexpected fallthrough to CharCodeAt slow case");
-
- // Index is not a smi.
- __ bind(&index_not_smi_);
- // If index is a heap number, try converting it to an integer.
- __ CheckMap(index_, FACTORY->heap_number_map(), index_not_number_, true);
- call_helper.BeforeCall(masm);
- __ push(object_);
- __ push(index_);
- __ push(index_); // Consumed by runtime conversion function.
- if (index_flags_ == STRING_INDEX_IS_NUMBER) {
- __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
- } else {
- ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
- // NumberToSmi discards numbers that are not exact integers.
- __ CallRuntime(Runtime::kNumberToSmi, 1);
- }
- if (!scratch_.is(rax)) {
- // Save the conversion result before the pop instructions below
- // have a chance to overwrite it.
- __ movq(scratch_, rax);
- }
- __ pop(index_);
- __ pop(object_);
- // Reload the instance type.
- __ movq(result_, FieldOperand(object_, HeapObject::kMapOffset));
- __ movzxbl(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
- call_helper.AfterCall(masm);
- // If index is still not a smi, it must be out of range.
- __ JumpIfNotSmi(scratch_, index_out_of_range_);
- // Otherwise, return to the fast path.
- __ jmp(&got_smi_index_);
-
- // Call runtime. We get here when the receiver is a string and the
- // index is a number, but the code of getting the actual character
- // is too complex (e.g., when the string needs to be flattened).
- __ bind(&call_runtime_);
- call_helper.BeforeCall(masm);
- __ push(object_);
- __ push(index_);
- __ CallRuntime(Runtime::kStringCharCodeAt, 2);
- if (!result_.is(rax)) {
- __ movq(result_, rax);
- }
- call_helper.AfterCall(masm);
- __ jmp(&exit_);
-
- __ Abort("Unexpected fallthrough from CharCodeAt slow case");
-}
-
-
-// -------------------------------------------------------------------------
-// StringCharFromCodeGenerator
-
-void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
- // Fast case of Heap::LookupSingleCharacterStringFromCode.
- __ JumpIfNotSmi(code_, &slow_case_);
- __ SmiCompare(code_, Smi::FromInt(String::kMaxAsciiCharCode));
- __ j(above, &slow_case_);
-
- __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
- SmiIndex index = masm->SmiToIndex(kScratchRegister, code_, kPointerSizeLog2);
- __ movq(result_, FieldOperand(result_, index.reg, index.scale,
- FixedArray::kHeaderSize));
- __ CompareRoot(result_, Heap::kUndefinedValueRootIndex);
- __ j(equal, &slow_case_);
- __ bind(&exit_);
-}
-
-
-void StringCharFromCodeGenerator::GenerateSlow(
- MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
- __ Abort("Unexpected fallthrough to CharFromCode slow case");
-
- __ bind(&slow_case_);
- call_helper.BeforeCall(masm);
- __ push(code_);
- __ CallRuntime(Runtime::kCharFromCode, 1);
- if (!result_.is(rax)) {
- __ movq(result_, rax);
- }
- call_helper.AfterCall(masm);
- __ jmp(&exit_);
-
- __ Abort("Unexpected fallthrough from CharFromCode slow case");
-}
-
-
-// -------------------------------------------------------------------------
-// StringCharAtGenerator
-
-void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) {
- char_code_at_generator_.GenerateFast(masm);
- char_from_code_generator_.GenerateFast(masm);
-}
-
-
-void StringCharAtGenerator::GenerateSlow(
- MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
- char_code_at_generator_.GenerateSlow(masm, call_helper);
- char_from_code_generator_.GenerateSlow(masm, call_helper);
-}
-
-
-void StringAddStub::Generate(MacroAssembler* masm) {
- Label string_add_runtime, call_builtin;
- Builtins::JavaScript builtin_id = Builtins::ADD;
-
- // Load the two arguments.
- __ movq(rax, Operand(rsp, 2 * kPointerSize)); // First argument (left).
- __ movq(rdx, Operand(rsp, 1 * kPointerSize)); // Second argument (right).
-
- // Make sure that both arguments are strings if not known in advance.
- if (flags_ == NO_STRING_ADD_FLAGS) {
- Condition is_smi;
- is_smi = masm->CheckSmi(rax);
- __ j(is_smi, &string_add_runtime);
- __ CmpObjectType(rax, FIRST_NONSTRING_TYPE, r8);
- __ j(above_equal, &string_add_runtime);
-
- // First argument is a a string, test second.
- is_smi = masm->CheckSmi(rdx);
- __ j(is_smi, &string_add_runtime);
- __ CmpObjectType(rdx, FIRST_NONSTRING_TYPE, r9);
- __ j(above_equal, &string_add_runtime);
- } else {
- // Here at least one of the arguments is definitely a string.
- // We convert the one that is not known to be a string.
- if ((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) == 0) {
- ASSERT((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) != 0);
- GenerateConvertArgument(masm, 2 * kPointerSize, rax, rbx, rcx, rdi,
- &call_builtin);
- builtin_id = Builtins::STRING_ADD_RIGHT;
- } else if ((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) == 0) {
- ASSERT((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) != 0);
- GenerateConvertArgument(masm, 1 * kPointerSize, rdx, rbx, rcx, rdi,
- &call_builtin);
- builtin_id = Builtins::STRING_ADD_LEFT;
- }
- }
-
- // Both arguments are strings.
- // rax: first string
- // rdx: second string
- // Check if either of the strings are empty. In that case return the other.
- NearLabel second_not_zero_length, both_not_zero_length;
- __ movq(rcx, FieldOperand(rdx, String::kLengthOffset));
- __ SmiTest(rcx);
- __ j(not_zero, &second_not_zero_length);
- // Second string is empty, result is first string which is already in rax.
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->string_add_native(), 1);
- __ ret(2 * kPointerSize);
- __ bind(&second_not_zero_length);
- __ movq(rbx, FieldOperand(rax, String::kLengthOffset));
- __ SmiTest(rbx);
- __ j(not_zero, &both_not_zero_length);
- // First string is empty, result is second string which is in rdx.
- __ movq(rax, rdx);
- __ IncrementCounter(counters->string_add_native(), 1);
- __ ret(2 * kPointerSize);
-
- // Both strings are non-empty.
- // rax: first string
- // rbx: length of first string
- // rcx: length of second string
- // rdx: second string
- // r8: map of first string (if flags_ == NO_STRING_ADD_FLAGS)
- // r9: map of second string (if flags_ == NO_STRING_ADD_FLAGS)
- Label string_add_flat_result, longer_than_two;
- __ bind(&both_not_zero_length);
-
- // If arguments where known to be strings, maps are not loaded to r8 and r9
- // by the code above.
- if (flags_ != NO_STRING_ADD_FLAGS) {
- __ movq(r8, FieldOperand(rax, HeapObject::kMapOffset));
- __ movq(r9, FieldOperand(rdx, HeapObject::kMapOffset));
- }
- // Get the instance types of the two strings as they will be needed soon.
- __ movzxbl(r8, FieldOperand(r8, Map::kInstanceTypeOffset));
- __ movzxbl(r9, FieldOperand(r9, Map::kInstanceTypeOffset));
-
- // Look at the length of the result of adding the two strings.
- STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue / 2);
- __ SmiAdd(rbx, rbx, rcx);
- // Use the symbol table when adding two one character strings, as it
- // helps later optimizations to return a symbol here.
- __ SmiCompare(rbx, Smi::FromInt(2));
- __ j(not_equal, &longer_than_two);
-
- // Check that both strings are non-external ascii strings.
- __ JumpIfBothInstanceTypesAreNotSequentialAscii(r8, r9, rbx, rcx,
- &string_add_runtime);
-
- // Get the two characters forming the sub string.
- __ movzxbq(rbx, FieldOperand(rax, SeqAsciiString::kHeaderSize));
- __ movzxbq(rcx, FieldOperand(rdx, SeqAsciiString::kHeaderSize));
-
- // Try to lookup two character string in symbol table. If it is not found
- // just allocate a new one.
- Label make_two_character_string, make_flat_ascii_string;
- StringHelper::GenerateTwoCharacterSymbolTableProbe(
- masm, rbx, rcx, r14, r11, rdi, r15, &make_two_character_string);
- __ IncrementCounter(counters->string_add_native(), 1);
- __ ret(2 * kPointerSize);
-
- __ bind(&make_two_character_string);
- __ Set(rbx, 2);
- __ jmp(&make_flat_ascii_string);
-
- __ bind(&longer_than_two);
- // Check if resulting string will be flat.
- __ SmiCompare(rbx, Smi::FromInt(String::kMinNonFlatLength));
- __ j(below, &string_add_flat_result);
- // Handle exceptionally long strings in the runtime system.
- STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0);
- __ SmiCompare(rbx, Smi::FromInt(String::kMaxLength));
- __ j(above, &string_add_runtime);
-
- // If result is not supposed to be flat, allocate a cons string object. If
- // both strings are ascii the result is an ascii cons string.
- // rax: first string
- // rbx: length of resulting flat string
- // rdx: second string
- // r8: instance type of first string
- // r9: instance type of second string
- Label non_ascii, allocated, ascii_data;
- __ movl(rcx, r8);
- __ and_(rcx, r9);
- STATIC_ASSERT(kStringEncodingMask == kAsciiStringTag);
- __ testl(rcx, Immediate(kAsciiStringTag));
- __ j(zero, &non_ascii);
- __ bind(&ascii_data);
- // Allocate an acsii cons string.
- __ AllocateAsciiConsString(rcx, rdi, no_reg, &string_add_runtime);
- __ bind(&allocated);
- // Fill the fields of the cons string.
- __ movq(FieldOperand(rcx, ConsString::kLengthOffset), rbx);
- __ movq(FieldOperand(rcx, ConsString::kHashFieldOffset),
- Immediate(String::kEmptyHashField));
- __ movq(FieldOperand(rcx, ConsString::kFirstOffset), rax);
- __ movq(FieldOperand(rcx, ConsString::kSecondOffset), rdx);
- __ movq(rax, rcx);
- __ IncrementCounter(counters->string_add_native(), 1);
- __ ret(2 * kPointerSize);
- __ bind(&non_ascii);
- // At least one of the strings is two-byte. Check whether it happens
- // to contain only ascii characters.
- // rcx: first instance type AND second instance type.
- // r8: first instance type.
- // r9: second instance type.
- __ testb(rcx, Immediate(kAsciiDataHintMask));
- __ j(not_zero, &ascii_data);
- __ xor_(r8, r9);
- STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
- __ andb(r8, Immediate(kAsciiStringTag | kAsciiDataHintTag));
- __ cmpb(r8, Immediate(kAsciiStringTag | kAsciiDataHintTag));
- __ j(equal, &ascii_data);
- // Allocate a two byte cons string.
- __ AllocateConsString(rcx, rdi, no_reg, &string_add_runtime);
- __ jmp(&allocated);
-
- // Handle creating a flat result. First check that both strings are not
- // external strings.
- // rax: first string
- // rbx: length of resulting flat string as smi
- // rdx: second string
- // r8: instance type of first string
- // r9: instance type of first string
- __ bind(&string_add_flat_result);
- __ SmiToInteger32(rbx, rbx);
- __ movl(rcx, r8);
- __ and_(rcx, Immediate(kStringRepresentationMask));
- __ cmpl(rcx, Immediate(kExternalStringTag));
- __ j(equal, &string_add_runtime);
- __ movl(rcx, r9);
- __ and_(rcx, Immediate(kStringRepresentationMask));
- __ cmpl(rcx, Immediate(kExternalStringTag));
- __ j(equal, &string_add_runtime);
- // Now check if both strings are ascii strings.
- // rax: first string
- // rbx: length of resulting flat string
- // rdx: second string
- // r8: instance type of first string
- // r9: instance type of second string
- Label non_ascii_string_add_flat_result;
- STATIC_ASSERT(kStringEncodingMask == kAsciiStringTag);
- __ testl(r8, Immediate(kAsciiStringTag));
- __ j(zero, &non_ascii_string_add_flat_result);
- __ testl(r9, Immediate(kAsciiStringTag));
- __ j(zero, &string_add_runtime);
-
- __ bind(&make_flat_ascii_string);
- // Both strings are ascii strings. As they are short they are both flat.
- __ AllocateAsciiString(rcx, rbx, rdi, r14, r11, &string_add_runtime);
- // rcx: result string
- __ movq(rbx, rcx);
- // Locate first character of result.
- __ addq(rcx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- // Locate first character of first argument
- __ SmiToInteger32(rdi, FieldOperand(rax, String::kLengthOffset));
- __ addq(rax, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- // rax: first char of first argument
- // rbx: result string
- // rcx: first character of result
- // rdx: second string
- // rdi: length of first argument
- StringHelper::GenerateCopyCharacters(masm, rcx, rax, rdi, true);
- // Locate first character of second argument.
- __ SmiToInteger32(rdi, FieldOperand(rdx, String::kLengthOffset));
- __ addq(rdx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- // rbx: result string
- // rcx: next character of result
- // rdx: first char of second argument
- // rdi: length of second argument
- StringHelper::GenerateCopyCharacters(masm, rcx, rdx, rdi, true);
- __ movq(rax, rbx);
- __ IncrementCounter(counters->string_add_native(), 1);
- __ ret(2 * kPointerSize);
-
- // Handle creating a flat two byte result.
- // rax: first string - known to be two byte
- // rbx: length of resulting flat string
- // rdx: second string
- // r8: instance type of first string
- // r9: instance type of first string
- __ bind(&non_ascii_string_add_flat_result);
- __ and_(r9, Immediate(kAsciiStringTag));
- __ j(not_zero, &string_add_runtime);
- // Both strings are two byte strings. As they are short they are both
- // flat.
- __ AllocateTwoByteString(rcx, rbx, rdi, r14, r11, &string_add_runtime);
- // rcx: result string
- __ movq(rbx, rcx);
- // Locate first character of result.
- __ addq(rcx, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- // Locate first character of first argument.
- __ SmiToInteger32(rdi, FieldOperand(rax, String::kLengthOffset));
- __ addq(rax, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- // rax: first char of first argument
- // rbx: result string
- // rcx: first character of result
- // rdx: second argument
- // rdi: length of first argument
- StringHelper::GenerateCopyCharacters(masm, rcx, rax, rdi, false);
- // Locate first character of second argument.
- __ SmiToInteger32(rdi, FieldOperand(rdx, String::kLengthOffset));
- __ addq(rdx, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- // rbx: result string
- // rcx: next character of result
- // rdx: first char of second argument
- // rdi: length of second argument
- StringHelper::GenerateCopyCharacters(masm, rcx, rdx, rdi, false);
- __ movq(rax, rbx);
- __ IncrementCounter(counters->string_add_native(), 1);
- __ ret(2 * kPointerSize);
-
- // Just jump to runtime to add the two strings.
- __ bind(&string_add_runtime);
- __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
-
- if (call_builtin.is_linked()) {
- __ bind(&call_builtin);
- __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
- }
-}
-
-
-void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
- int stack_offset,
- Register arg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* slow) {
- // First check if the argument is already a string.
- Label not_string, done;
- __ JumpIfSmi(arg, &not_string);
- __ CmpObjectType(arg, FIRST_NONSTRING_TYPE, scratch1);
- __ j(below, &done);
-
- // Check the number to string cache.
- Label not_cached;
- __ bind(&not_string);
- // Puts the cached result into scratch1.
- NumberToStringStub::GenerateLookupNumberStringCache(masm,
- arg,
- scratch1,
- scratch2,
- scratch3,
- false,
- &not_cached);
- __ movq(arg, scratch1);
- __ movq(Operand(rsp, stack_offset), arg);
- __ jmp(&done);
-
- // Check if the argument is a safe string wrapper.
- __ bind(&not_cached);
- __ JumpIfSmi(arg, slow);
- __ CmpObjectType(arg, JS_VALUE_TYPE, scratch1); // map -> scratch1.
- __ j(not_equal, slow);
- __ testb(FieldOperand(scratch1, Map::kBitField2Offset),
- Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
- __ j(zero, slow);
- __ movq(arg, FieldOperand(arg, JSValue::kValueOffset));
- __ movq(Operand(rsp, stack_offset), arg);
-
- __ bind(&done);
-}
-
-
-void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- bool ascii) {
- Label loop;
- __ bind(&loop);
- // This loop just copies one character at a time, as it is only used for very
- // short strings.
- if (ascii) {
- __ movb(kScratchRegister, Operand(src, 0));
- __ movb(Operand(dest, 0), kScratchRegister);
- __ incq(src);
- __ incq(dest);
- } else {
- __ movzxwl(kScratchRegister, Operand(src, 0));
- __ movw(Operand(dest, 0), kScratchRegister);
- __ addq(src, Immediate(2));
- __ addq(dest, Immediate(2));
- }
- __ decl(count);
- __ j(not_zero, &loop);
-}
-
-
-void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- bool ascii) {
- // Copy characters using rep movs of doublewords. Align destination on 4 byte
- // boundary before starting rep movs. Copy remaining characters after running
- // rep movs.
- // Count is positive int32, dest and src are character pointers.
- ASSERT(dest.is(rdi)); // rep movs destination
- ASSERT(src.is(rsi)); // rep movs source
- ASSERT(count.is(rcx)); // rep movs count
-
- // Nothing to do for zero characters.
- NearLabel done;
- __ testl(count, count);
- __ j(zero, &done);
-
- // Make count the number of bytes to copy.
- if (!ascii) {
- STATIC_ASSERT(2 == sizeof(uc16));
- __ addl(count, count);
- }
-
- // Don't enter the rep movs if there are less than 4 bytes to copy.
- NearLabel last_bytes;
- __ testl(count, Immediate(~7));
- __ j(zero, &last_bytes);
-
- // Copy from edi to esi using rep movs instruction.
- __ movl(kScratchRegister, count);
- __ shr(count, Immediate(3)); // Number of doublewords to copy.
- __ repmovsq();
-
- // Find number of bytes left.
- __ movl(count, kScratchRegister);
- __ and_(count, Immediate(7));
-
- // Check if there are more bytes to copy.
- __ bind(&last_bytes);
- __ testl(count, count);
- __ j(zero, &done);
-
- // Copy remaining characters.
- Label loop;
- __ bind(&loop);
- __ movb(kScratchRegister, Operand(src, 0));
- __ movb(Operand(dest, 0), kScratchRegister);
- __ incq(src);
- __ incq(dest);
- __ decl(count);
- __ j(not_zero, &loop);
-
- __ bind(&done);
-}
-
-void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
- Register c1,
- Register c2,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Label* not_found) {
- // Register scratch3 is the general scratch register in this function.
- Register scratch = scratch3;
-
- // Make sure that both characters are not digits as such strings has a
- // different hash algorithm. Don't try to look for these in the symbol table.
- NearLabel not_array_index;
- __ leal(scratch, Operand(c1, -'0'));
- __ cmpl(scratch, Immediate(static_cast<int>('9' - '0')));
- __ j(above, &not_array_index);
- __ leal(scratch, Operand(c2, -'0'));
- __ cmpl(scratch, Immediate(static_cast<int>('9' - '0')));
- __ j(below_equal, not_found);
-
- __ bind(&not_array_index);
- // Calculate the two character string hash.
- Register hash = scratch1;
- GenerateHashInit(masm, hash, c1, scratch);
- GenerateHashAddCharacter(masm, hash, c2, scratch);
- GenerateHashGetHash(masm, hash, scratch);
-
- // Collect the two characters in a register.
- Register chars = c1;
- __ shl(c2, Immediate(kBitsPerByte));
- __ orl(chars, c2);
-
- // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
- // hash: hash of two character string.
-
- // Load the symbol table.
- Register symbol_table = c2;
- __ LoadRoot(symbol_table, Heap::kSymbolTableRootIndex);
-
- // Calculate capacity mask from the symbol table capacity.
- Register mask = scratch2;
- __ SmiToInteger32(mask,
- FieldOperand(symbol_table, SymbolTable::kCapacityOffset));
- __ decl(mask);
-
- Register map = scratch4;
-
- // Registers
- // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
- // hash: hash of two character string (32-bit int)
- // symbol_table: symbol table
- // mask: capacity mask (32-bit int)
- // map: -
- // scratch: -
-
- // Perform a number of probes in the symbol table.
- static const int kProbes = 4;
- Label found_in_symbol_table;
- Label next_probe[kProbes];
- for (int i = 0; i < kProbes; i++) {
- // Calculate entry in symbol table.
- __ movl(scratch, hash);
- if (i > 0) {
- __ addl(scratch, Immediate(SymbolTable::GetProbeOffset(i)));
- }
- __ andl(scratch, mask);
-
- // Load the entry from the symbol table.
- Register candidate = scratch; // Scratch register contains candidate.
- STATIC_ASSERT(SymbolTable::kEntrySize == 1);
- __ movq(candidate,
- FieldOperand(symbol_table,
- scratch,
- times_pointer_size,
- SymbolTable::kElementsStartOffset));
-
- // If entry is undefined no string with this hash can be found.
- NearLabel is_string;
- __ CmpObjectType(candidate, ODDBALL_TYPE, map);
- __ j(not_equal, &is_string);
-
- __ CompareRoot(candidate, Heap::kUndefinedValueRootIndex);
- __ j(equal, not_found);
- // Must be null (deleted entry).
- __ jmp(&next_probe[i]);
-
- __ bind(&is_string);
-
- // If length is not 2 the string is not a candidate.
- __ SmiCompare(FieldOperand(candidate, String::kLengthOffset),
- Smi::FromInt(2));
- __ j(not_equal, &next_probe[i]);
-
- // We use kScratchRegister as a temporary register in assumption that
- // JumpIfInstanceTypeIsNotSequentialAscii does not use it implicitly
- Register temp = kScratchRegister;
-
- // Check that the candidate is a non-external ascii string.
- __ movzxbl(temp, FieldOperand(map, Map::kInstanceTypeOffset));
- __ JumpIfInstanceTypeIsNotSequentialAscii(
- temp, temp, &next_probe[i]);
-
- // Check if the two characters match.
- __ movl(temp, FieldOperand(candidate, SeqAsciiString::kHeaderSize));
- __ andl(temp, Immediate(0x0000ffff));
- __ cmpl(chars, temp);
- __ j(equal, &found_in_symbol_table);
- __ bind(&next_probe[i]);
- }
-
- // No matching 2 character string found by probing.
- __ jmp(not_found);
-
- // Scratch register contains result when we fall through to here.
- Register result = scratch;
- __ bind(&found_in_symbol_table);
- if (!result.is(rax)) {
- __ movq(rax, result);
- }
-}
-
-
-void StringHelper::GenerateHashInit(MacroAssembler* masm,
- Register hash,
- Register character,
- Register scratch) {
- // hash = character + (character << 10);
- __ movl(hash, character);
- __ shll(hash, Immediate(10));
- __ addl(hash, character);
- // hash ^= hash >> 6;
- __ movl(scratch, hash);
- __ sarl(scratch, Immediate(6));
- __ xorl(hash, scratch);
-}
-
-
-void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
- Register hash,
- Register character,
- Register scratch) {
- // hash += character;
- __ addl(hash, character);
- // hash += hash << 10;
- __ movl(scratch, hash);
- __ shll(scratch, Immediate(10));
- __ addl(hash, scratch);
- // hash ^= hash >> 6;
- __ movl(scratch, hash);
- __ sarl(scratch, Immediate(6));
- __ xorl(hash, scratch);
-}
-
-
-void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
- Register hash,
- Register scratch) {
- // hash += hash << 3;
- __ leal(hash, Operand(hash, hash, times_8, 0));
- // hash ^= hash >> 11;
- __ movl(scratch, hash);
- __ sarl(scratch, Immediate(11));
- __ xorl(hash, scratch);
- // hash += hash << 15;
- __ movl(scratch, hash);
- __ shll(scratch, Immediate(15));
- __ addl(hash, scratch);
-
- // if (hash == 0) hash = 27;
- Label hash_not_zero;
- __ j(not_zero, &hash_not_zero);
- __ movl(hash, Immediate(27));
- __ bind(&hash_not_zero);
-}
-
-void SubStringStub::Generate(MacroAssembler* masm) {
- Label runtime;
-
- // Stack frame on entry.
- // rsp[0]: return address
- // rsp[8]: to
- // rsp[16]: from
- // rsp[24]: string
-
- const int kToOffset = 1 * kPointerSize;
- const int kFromOffset = kToOffset + kPointerSize;
- const int kStringOffset = kFromOffset + kPointerSize;
- const int kArgumentsSize = (kStringOffset + kPointerSize) - kToOffset;
-
- // Make sure first argument is a string.
- __ movq(rax, Operand(rsp, kStringOffset));
- STATIC_ASSERT(kSmiTag == 0);
- __ testl(rax, Immediate(kSmiTagMask));
- __ j(zero, &runtime);
- Condition is_string = masm->IsObjectStringType(rax, rbx, rbx);
- __ j(NegateCondition(is_string), &runtime);
-
- // rax: string
- // rbx: instance type
- // Calculate length of sub string using the smi values.
- Label result_longer_than_two;
- __ movq(rcx, Operand(rsp, kToOffset));
- __ movq(rdx, Operand(rsp, kFromOffset));
- __ JumpUnlessBothNonNegativeSmi(rcx, rdx, &runtime);
-
- __ SmiSub(rcx, rcx, rdx); // Overflow doesn't happen.
- __ cmpq(FieldOperand(rax, String::kLengthOffset), rcx);
- Label return_rax;
- __ j(equal, &return_rax);
- // Special handling of sub-strings of length 1 and 2. One character strings
- // are handled in the runtime system (looked up in the single character
- // cache). Two character strings are looked for in the symbol cache.
- __ SmiToInteger32(rcx, rcx);
- __ cmpl(rcx, Immediate(2));
- __ j(greater, &result_longer_than_two);
- __ j(less, &runtime);
-
- // Sub string of length 2 requested.
- // rax: string
- // rbx: instance type
- // rcx: sub string length (value is 2)
- // rdx: from index (smi)
- __ JumpIfInstanceTypeIsNotSequentialAscii(rbx, rbx, &runtime);
-
- // Get the two characters forming the sub string.
- __ SmiToInteger32(rdx, rdx); // From index is no longer smi.
- __ movzxbq(rbx, FieldOperand(rax, rdx, times_1, SeqAsciiString::kHeaderSize));
- __ movzxbq(rcx,
- FieldOperand(rax, rdx, times_1, SeqAsciiString::kHeaderSize + 1));
-
- // Try to lookup two character string in symbol table.
- Label make_two_character_string;
- StringHelper::GenerateTwoCharacterSymbolTableProbe(
- masm, rbx, rcx, rax, rdx, rdi, r14, &make_two_character_string);
- __ ret(3 * kPointerSize);
-
- __ bind(&make_two_character_string);
- // Setup registers for allocating the two character string.
- __ movq(rax, Operand(rsp, kStringOffset));
- __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
- __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
- __ Set(rcx, 2);
-
- __ bind(&result_longer_than_two);
-
- // rax: string
- // rbx: instance type
- // rcx: result string length
- // Check for flat ascii string
- Label non_ascii_flat;
- __ JumpIfInstanceTypeIsNotSequentialAscii(rbx, rbx, &non_ascii_flat);
-
- // Allocate the result.
- __ AllocateAsciiString(rax, rcx, rbx, rdx, rdi, &runtime);
-
- // rax: result string
- // rcx: result string length
- __ movq(rdx, rsi); // esi used by following code.
- // Locate first character of result.
- __ lea(rdi, FieldOperand(rax, SeqAsciiString::kHeaderSize));
- // Load string argument and locate character of sub string start.
- __ movq(rsi, Operand(rsp, kStringOffset));
- __ movq(rbx, Operand(rsp, kFromOffset));
- {
- SmiIndex smi_as_index = masm->SmiToIndex(rbx, rbx, times_1);
- __ lea(rsi, Operand(rsi, smi_as_index.reg, smi_as_index.scale,
- SeqAsciiString::kHeaderSize - kHeapObjectTag));
- }
-
- // rax: result string
- // rcx: result length
- // rdx: original value of rsi
- // rdi: first character of result
- // rsi: character of sub string start
- StringHelper::GenerateCopyCharactersREP(masm, rdi, rsi, rcx, true);
- __ movq(rsi, rdx); // Restore rsi.
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->sub_string_native(), 1);
- __ ret(kArgumentsSize);
-
- __ bind(&non_ascii_flat);
- // rax: string
- // rbx: instance type & kStringRepresentationMask | kStringEncodingMask
- // rcx: result string length
- // Check for sequential two byte string
- __ cmpb(rbx, Immediate(kSeqStringTag | kTwoByteStringTag));
- __ j(not_equal, &runtime);
-
- // Allocate the result.
- __ AllocateTwoByteString(rax, rcx, rbx, rdx, rdi, &runtime);
-
- // rax: result string
- // rcx: result string length
- __ movq(rdx, rsi); // esi used by following code.
- // Locate first character of result.
- __ lea(rdi, FieldOperand(rax, SeqTwoByteString::kHeaderSize));
- // Load string argument and locate character of sub string start.
- __ movq(rsi, Operand(rsp, kStringOffset));
- __ movq(rbx, Operand(rsp, kFromOffset));
- {
- SmiIndex smi_as_index = masm->SmiToIndex(rbx, rbx, times_2);
- __ lea(rsi, Operand(rsi, smi_as_index.reg, smi_as_index.scale,
- SeqAsciiString::kHeaderSize - kHeapObjectTag));
- }
-
- // rax: result string
- // rcx: result length
- // rdx: original value of rsi
- // rdi: first character of result
- // rsi: character of sub string start
- StringHelper::GenerateCopyCharactersREP(masm, rdi, rsi, rcx, false);
- __ movq(rsi, rdx); // Restore esi.
-
- __ bind(&return_rax);
- __ IncrementCounter(counters->sub_string_native(), 1);
- __ ret(kArgumentsSize);
-
- // Just jump to runtime to create the sub string.
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kSubString, 3, 1);
-}
-
-
-void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4) {
- // Ensure that you can always subtract a string length from a non-negative
- // number (e.g. another length).
- STATIC_ASSERT(String::kMaxLength < 0x7fffffff);
-
- // Find minimum length and length difference.
- __ movq(scratch1, FieldOperand(left, String::kLengthOffset));
- __ movq(scratch4, scratch1);
- __ SmiSub(scratch4,
- scratch4,
- FieldOperand(right, String::kLengthOffset));
- // Register scratch4 now holds left.length - right.length.
- const Register length_difference = scratch4;
- NearLabel left_shorter;
- __ j(less, &left_shorter);
- // The right string isn't longer that the left one.
- // Get the right string's length by subtracting the (non-negative) difference
- // from the left string's length.
- __ SmiSub(scratch1, scratch1, length_difference);
- __ bind(&left_shorter);
- // Register scratch1 now holds Min(left.length, right.length).
- const Register min_length = scratch1;
-
- NearLabel compare_lengths;
- // If min-length is zero, go directly to comparing lengths.
- __ SmiTest(min_length);
- __ j(zero, &compare_lengths);
-
- __ SmiToInteger32(min_length, min_length);
-
- // Registers scratch2 and scratch3 are free.
- NearLabel result_not_equal;
- Label loop;
- {
- // Check characters 0 .. min_length - 1 in a loop.
- // Use scratch3 as loop index, min_length as limit and scratch2
- // for computation.
- const Register index = scratch3;
- __ movl(index, Immediate(0)); // Index into strings.
- __ bind(&loop);
- // Compare characters.
- // TODO(lrn): Could we load more than one character at a time?
- __ movb(scratch2, FieldOperand(left,
- index,
- times_1,
- SeqAsciiString::kHeaderSize));
- // Increment index and use -1 modifier on next load to give
- // the previous load extra time to complete.
- __ addl(index, Immediate(1));
- __ cmpb(scratch2, FieldOperand(right,
- index,
- times_1,
- SeqAsciiString::kHeaderSize - 1));
- __ j(not_equal, &result_not_equal);
- __ cmpl(index, min_length);
- __ j(not_equal, &loop);
- }
- // Completed loop without finding different characters.
- // Compare lengths (precomputed).
- __ bind(&compare_lengths);
- __ SmiTest(length_difference);
- __ j(not_zero, &result_not_equal);
-
- // Result is EQUAL.
- __ Move(rax, Smi::FromInt(EQUAL));
- __ ret(0);
-
- NearLabel result_greater;
- __ bind(&result_not_equal);
- // Unequal comparison of left to right, either character or length.
- __ j(greater, &result_greater);
-
- // Result is LESS.
- __ Move(rax, Smi::FromInt(LESS));
- __ ret(0);
-
- // Result is GREATER.
- __ bind(&result_greater);
- __ Move(rax, Smi::FromInt(GREATER));
- __ ret(0);
-}
-
-
-void StringCompareStub::Generate(MacroAssembler* masm) {
- Label runtime;
-
- // Stack frame on entry.
- // rsp[0]: return address
- // rsp[8]: right string
- // rsp[16]: left string
-
- __ movq(rdx, Operand(rsp, 2 * kPointerSize)); // left
- __ movq(rax, Operand(rsp, 1 * kPointerSize)); // right
-
- // Check for identity.
- NearLabel not_same;
- __ cmpq(rdx, rax);
- __ j(not_equal, &not_same);
- __ Move(rax, Smi::FromInt(EQUAL));
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->string_compare_native(), 1);
- __ ret(2 * kPointerSize);
-
- __ bind(&not_same);
-
- // Check that both are sequential ASCII strings.
- __ JumpIfNotBothSequentialAsciiStrings(rdx, rax, rcx, rbx, &runtime);
-
- // Inline comparison of ascii strings.
- __ IncrementCounter(counters->string_compare_native(), 1);
- // Drop arguments from the stack
- __ pop(rcx);
- __ addq(rsp, Immediate(2 * kPointerSize));
- __ push(rcx);
- GenerateCompareFlatAsciiStrings(masm, rdx, rax, rcx, rbx, rdi, r8);
-
- // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
- // tagged as a small integer.
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
-}
-
-
-void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::SMIS);
- NearLabel miss;
- __ JumpIfNotBothSmi(rdx, rax, &miss);
-
- if (GetCondition() == equal) {
- // For equality we do not care about the sign of the result.
- __ subq(rax, rdx);
- } else {
- NearLabel done;
- __ subq(rdx, rax);
- __ j(no_overflow, &done);
- // Correct sign of result in case of overflow.
- __ SmiNot(rdx, rdx);
- __ bind(&done);
- __ movq(rax, rdx);
- }
- __ ret(0);
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::HEAP_NUMBERS);
-
- NearLabel generic_stub;
- NearLabel unordered;
- NearLabel miss;
- Condition either_smi = masm->CheckEitherSmi(rax, rdx);
- __ j(either_smi, &generic_stub);
-
- __ CmpObjectType(rax, HEAP_NUMBER_TYPE, rcx);
- __ j(not_equal, &miss);
- __ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rcx);
- __ j(not_equal, &miss);
-
- // Load left and right operand
- __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
- __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
-
- // Compare operands
- __ ucomisd(xmm0, xmm1);
-
- // Don't base result on EFLAGS when a NaN is involved.
- __ j(parity_even, &unordered);
-
- // Return a result of -1, 0, or 1, based on EFLAGS.
- // Performing mov, because xor would destroy the flag register.
- __ movl(rax, Immediate(0));
- __ movl(rcx, Immediate(0));
- __ setcc(above, rax); // Add one to zero if carry clear and not equal.
- __ sbbq(rax, rcx); // Subtract one if below (aka. carry set).
- __ ret(0);
-
- __ bind(&unordered);
-
- CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS);
- __ bind(&generic_stub);
- __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::OBJECTS);
- NearLabel miss;
- Condition either_smi = masm->CheckEitherSmi(rdx, rax);
- __ j(either_smi, &miss);
-
- __ CmpObjectType(rax, JS_OBJECT_TYPE, rcx);
- __ j(not_equal, &miss, not_taken);
- __ CmpObjectType(rdx, JS_OBJECT_TYPE, rcx);
- __ j(not_equal, &miss, not_taken);
-
- ASSERT(GetCondition() == equal);
- __ subq(rax, rdx);
- __ ret(0);
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
- // Save the registers.
- __ pop(rcx);
- __ push(rdx);
- __ push(rax);
- __ push(rcx);
-
- // Call the runtime system in a fresh internal frame.
- ExternalReference miss =
- ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
- __ EnterInternalFrame();
- __ push(rdx);
- __ push(rax);
- __ Push(Smi::FromInt(op_));
- __ CallExternalReference(miss, 3);
- __ LeaveInternalFrame();
-
- // Compute the entry point of the rewritten stub.
- __ lea(rdi, FieldOperand(rax, Code::kHeaderSize));
-
- // Restore registers.
- __ pop(rcx);
- __ pop(rax);
- __ pop(rdx);
- __ push(rcx);
-
- // Do a tail call to the rewritten stub.
- __ jmp(rdi);
-}
-
-
-#undef __
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_X64
diff --git a/src/3rdparty/v8/src/x64/code-stubs-x64.h b/src/3rdparty/v8/src/x64/code-stubs-x64.h
deleted file mode 100644
index 246650a..0000000
--- a/src/3rdparty/v8/src/x64/code-stubs-x64.h
+++ /dev/null
@@ -1,477 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_X64_CODE_STUBS_X64_H_
-#define V8_X64_CODE_STUBS_X64_H_
-
-#include "ic-inl.h"
-#include "type-info.h"
-
-namespace v8 {
-namespace internal {
-
-
-// Compute a transcendental math function natively, or call the
-// TranscendentalCache runtime function.
-class TranscendentalCacheStub: public CodeStub {
- public:
- enum ArgumentType {
- TAGGED = 0,
- UNTAGGED = 1 << TranscendentalCache::kTranscendentalTypeBits
- };
-
- explicit TranscendentalCacheStub(TranscendentalCache::Type type,
- ArgumentType argument_type)
- : type_(type), argument_type_(argument_type) {}
- void Generate(MacroAssembler* masm);
- private:
- TranscendentalCache::Type type_;
- ArgumentType argument_type_;
-
- Major MajorKey() { return TranscendentalCache; }
- int MinorKey() { return type_ | argument_type_; }
- Runtime::FunctionId RuntimeFunction();
- void GenerateOperation(MacroAssembler* masm);
-};
-
-
-class ToBooleanStub: public CodeStub {
- public:
- ToBooleanStub() { }
-
- void Generate(MacroAssembler* masm);
-
- private:
- Major MajorKey() { return ToBoolean; }
- int MinorKey() { return 0; }
-};
-
-
-// Flag that indicates how to generate code for the stub GenericBinaryOpStub.
-enum GenericBinaryFlags {
- NO_GENERIC_BINARY_FLAGS = 0,
- NO_SMI_CODE_IN_STUB = 1 << 0 // Omit smi code in stub.
-};
-
-
-class GenericBinaryOpStub: public CodeStub {
- public:
- GenericBinaryOpStub(Token::Value op,
- OverwriteMode mode,
- GenericBinaryFlags flags,
- TypeInfo operands_type = TypeInfo::Unknown())
- : op_(op),
- mode_(mode),
- flags_(flags),
- args_in_registers_(false),
- args_reversed_(false),
- static_operands_type_(operands_type),
- runtime_operands_type_(BinaryOpIC::DEFAULT),
- name_(NULL) {
- ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
- }
-
- GenericBinaryOpStub(int key, BinaryOpIC::TypeInfo runtime_operands_type)
- : op_(OpBits::decode(key)),
- mode_(ModeBits::decode(key)),
- flags_(FlagBits::decode(key)),
- args_in_registers_(ArgsInRegistersBits::decode(key)),
- args_reversed_(ArgsReversedBits::decode(key)),
- static_operands_type_(TypeInfo::ExpandedRepresentation(
- StaticTypeInfoBits::decode(key))),
- runtime_operands_type_(runtime_operands_type),
- name_(NULL) {
- }
-
- // Generate code to call the stub with the supplied arguments. This will add
- // code at the call site to prepare arguments either in registers or on the
- // stack together with the actual call.
- void GenerateCall(MacroAssembler* masm, Register left, Register right);
- void GenerateCall(MacroAssembler* masm, Register left, Smi* right);
- void GenerateCall(MacroAssembler* masm, Smi* left, Register right);
-
- bool ArgsInRegistersSupported() {
- return (op_ == Token::ADD) || (op_ == Token::SUB)
- || (op_ == Token::MUL) || (op_ == Token::DIV);
- }
-
- private:
- Token::Value op_;
- OverwriteMode mode_;
- GenericBinaryFlags flags_;
- bool args_in_registers_; // Arguments passed in registers not on the stack.
- bool args_reversed_; // Left and right argument are swapped.
-
- // Number type information of operands, determined by code generator.
- TypeInfo static_operands_type_;
-
- // Operand type information determined at runtime.
- BinaryOpIC::TypeInfo runtime_operands_type_;
-
- char* name_;
-
- const char* GetName();
-
-#ifdef DEBUG
- void Print() {
- PrintF("GenericBinaryOpStub %d (op %s), "
- "(mode %d, flags %d, registers %d, reversed %d, type_info %s)\n",
- MinorKey(),
- Token::String(op_),
- static_cast<int>(mode_),
- static_cast<int>(flags_),
- static_cast<int>(args_in_registers_),
- static_cast<int>(args_reversed_),
- static_operands_type_.ToString());
- }
-#endif
-
- // Minor key encoding in 17 bits TTNNNFRAOOOOOOOMM.
- class ModeBits: public BitField<OverwriteMode, 0, 2> {};
- class OpBits: public BitField<Token::Value, 2, 7> {};
- class ArgsInRegistersBits: public BitField<bool, 9, 1> {};
- class ArgsReversedBits: public BitField<bool, 10, 1> {};
- class FlagBits: public BitField<GenericBinaryFlags, 11, 1> {};
- class StaticTypeInfoBits: public BitField<int, 12, 3> {};
- class RuntimeTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 15, 3> {};
-
- Major MajorKey() { return GenericBinaryOp; }
- int MinorKey() {
- // Encode the parameters in a unique 18 bit value.
- return OpBits::encode(op_)
- | ModeBits::encode(mode_)
- | FlagBits::encode(flags_)
- | ArgsInRegistersBits::encode(args_in_registers_)
- | ArgsReversedBits::encode(args_reversed_)
- | StaticTypeInfoBits::encode(
- static_operands_type_.ThreeBitRepresentation())
- | RuntimeTypeInfoBits::encode(runtime_operands_type_);
- }
-
- void Generate(MacroAssembler* masm);
- void GenerateSmiCode(MacroAssembler* masm, Label* slow);
- void GenerateLoadArguments(MacroAssembler* masm);
- void GenerateReturn(MacroAssembler* masm);
- void GenerateRegisterArgsPush(MacroAssembler* masm);
- void GenerateTypeTransition(MacroAssembler* masm);
-
- bool IsOperationCommutative() {
- return (op_ == Token::ADD) || (op_ == Token::MUL);
- }
-
- void SetArgsInRegisters() { args_in_registers_ = true; }
- void SetArgsReversed() { args_reversed_ = true; }
- bool HasSmiCodeInStub() { return (flags_ & NO_SMI_CODE_IN_STUB) == 0; }
- bool HasArgsInRegisters() { return args_in_registers_; }
- bool HasArgsReversed() { return args_reversed_; }
-
- bool ShouldGenerateSmiCode() {
- return HasSmiCodeInStub() &&
- runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS &&
- runtime_operands_type_ != BinaryOpIC::STRINGS;
- }
-
- bool ShouldGenerateFPCode() {
- return runtime_operands_type_ != BinaryOpIC::STRINGS;
- }
-
- virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
-
- virtual InlineCacheState GetICState() {
- return BinaryOpIC::ToState(runtime_operands_type_);
- }
-
- friend class CodeGenerator;
- friend class LCodeGen;
-};
-
-
-class TypeRecordingBinaryOpStub: public CodeStub {
- public:
- TypeRecordingBinaryOpStub(Token::Value op, OverwriteMode mode)
- : op_(op),
- mode_(mode),
- operands_type_(TRBinaryOpIC::UNINITIALIZED),
- result_type_(TRBinaryOpIC::UNINITIALIZED),
- name_(NULL) {
- ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
- }
-
- TypeRecordingBinaryOpStub(
- int key,
- TRBinaryOpIC::TypeInfo operands_type,
- TRBinaryOpIC::TypeInfo result_type = TRBinaryOpIC::UNINITIALIZED)
- : op_(OpBits::decode(key)),
- mode_(ModeBits::decode(key)),
- operands_type_(operands_type),
- result_type_(result_type),
- name_(NULL) { }
-
- private:
- enum SmiCodeGenerateHeapNumberResults {
- ALLOW_HEAPNUMBER_RESULTS,
- NO_HEAPNUMBER_RESULTS
- };
-
- Token::Value op_;
- OverwriteMode mode_;
-
- // Operand type information determined at runtime.
- TRBinaryOpIC::TypeInfo operands_type_;
- TRBinaryOpIC::TypeInfo result_type_;
-
- char* name_;
-
- const char* GetName();
-
-#ifdef DEBUG
- void Print() {
- PrintF("TypeRecordingBinaryOpStub %d (op %s), "
- "(mode %d, runtime_type_info %s)\n",
- MinorKey(),
- Token::String(op_),
- static_cast<int>(mode_),
- TRBinaryOpIC::GetName(operands_type_));
- }
-#endif
-
- // Minor key encoding in 15 bits RRRTTTOOOOOOOMM.
- class ModeBits: public BitField<OverwriteMode, 0, 2> {};
- class OpBits: public BitField<Token::Value, 2, 7> {};
- class OperandTypeInfoBits: public BitField<TRBinaryOpIC::TypeInfo, 9, 3> {};
- class ResultTypeInfoBits: public BitField<TRBinaryOpIC::TypeInfo, 12, 3> {};
-
- Major MajorKey() { return TypeRecordingBinaryOp; }
- int MinorKey() {
- return OpBits::encode(op_)
- | ModeBits::encode(mode_)
- | OperandTypeInfoBits::encode(operands_type_)
- | ResultTypeInfoBits::encode(result_type_);
- }
-
- void Generate(MacroAssembler* masm);
- void GenerateGeneric(MacroAssembler* masm);
- void GenerateSmiCode(MacroAssembler* masm,
- Label* slow,
- SmiCodeGenerateHeapNumberResults heapnumber_results);
- void GenerateFloatingPointCode(MacroAssembler* masm,
- Label* allocation_failure,
- Label* non_numeric_failure);
- void GenerateStringAddCode(MacroAssembler* masm);
- void GenerateCallRuntimeCode(MacroAssembler* masm);
- void GenerateLoadArguments(MacroAssembler* masm);
- void GenerateReturn(MacroAssembler* masm);
- void GenerateUninitializedStub(MacroAssembler* masm);
- void GenerateSmiStub(MacroAssembler* masm);
- void GenerateInt32Stub(MacroAssembler* masm);
- void GenerateHeapNumberStub(MacroAssembler* masm);
- void GenerateOddballStub(MacroAssembler* masm);
- void GenerateStringStub(MacroAssembler* masm);
- void GenerateGenericStub(MacroAssembler* masm);
-
- void GenerateHeapResultAllocation(MacroAssembler* masm, Label* alloc_failure);
- void GenerateRegisterArgsPush(MacroAssembler* masm);
- void GenerateTypeTransition(MacroAssembler* masm);
- void GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm);
-
- virtual int GetCodeKind() { return Code::TYPE_RECORDING_BINARY_OP_IC; }
-
- virtual InlineCacheState GetICState() {
- return TRBinaryOpIC::ToState(operands_type_);
- }
-
- virtual void FinishCode(Code* code) {
- code->set_type_recording_binary_op_type(operands_type_);
- code->set_type_recording_binary_op_result_type(result_type_);
- }
-
- friend class CodeGenerator;
-};
-
-
-class StringHelper : public AllStatic {
- public:
- // Generate code for copying characters using a simple loop. This should only
- // be used in places where the number of characters is small and the
- // additional setup and checking in GenerateCopyCharactersREP adds too much
- // overhead. Copying of overlapping regions is not supported.
- static void GenerateCopyCharacters(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- bool ascii);
-
- // Generate code for copying characters using the rep movs instruction.
- // Copies rcx characters from rsi to rdi. Copying of overlapping regions is
- // not supported.
- static void GenerateCopyCharactersREP(MacroAssembler* masm,
- Register dest, // Must be rdi.
- Register src, // Must be rsi.
- Register count, // Must be rcx.
- bool ascii);
-
-
- // Probe the symbol table for a two character string. If the string is
- // not found by probing a jump to the label not_found is performed. This jump
- // does not guarantee that the string is not in the symbol table. If the
- // string is found the code falls through with the string in register rax.
- static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
- Register c1,
- Register c2,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Label* not_found);
-
- // Generate string hash.
- static void GenerateHashInit(MacroAssembler* masm,
- Register hash,
- Register character,
- Register scratch);
- static void GenerateHashAddCharacter(MacroAssembler* masm,
- Register hash,
- Register character,
- Register scratch);
- static void GenerateHashGetHash(MacroAssembler* masm,
- Register hash,
- Register scratch);
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
-};
-
-
-// Flag that indicates how to generate code for the stub StringAddStub.
-enum StringAddFlags {
- NO_STRING_ADD_FLAGS = 0,
- // Omit left string check in stub (left is definitely a string).
- NO_STRING_CHECK_LEFT_IN_STUB = 1 << 0,
- // Omit right string check in stub (right is definitely a string).
- NO_STRING_CHECK_RIGHT_IN_STUB = 1 << 1,
- // Omit both string checks in stub.
- NO_STRING_CHECK_IN_STUB =
- NO_STRING_CHECK_LEFT_IN_STUB | NO_STRING_CHECK_RIGHT_IN_STUB
-};
-
-
-class StringAddStub: public CodeStub {
- public:
- explicit StringAddStub(StringAddFlags flags) : flags_(flags) {}
-
- private:
- Major MajorKey() { return StringAdd; }
- int MinorKey() { return flags_; }
-
- void Generate(MacroAssembler* masm);
-
- void GenerateConvertArgument(MacroAssembler* masm,
- int stack_offset,
- Register arg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* slow);
-
- const StringAddFlags flags_;
-};
-
-
-class SubStringStub: public CodeStub {
- public:
- SubStringStub() {}
-
- private:
- Major MajorKey() { return SubString; }
- int MinorKey() { return 0; }
-
- void Generate(MacroAssembler* masm);
-};
-
-
-class StringCompareStub: public CodeStub {
- public:
- explicit StringCompareStub() {}
-
- // Compare two flat ascii strings and returns result in rax after popping two
- // arguments from the stack.
- static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4);
-
- private:
- Major MajorKey() { return StringCompare; }
- int MinorKey() { return 0; }
-
- void Generate(MacroAssembler* masm);
-};
-
-
-class NumberToStringStub: public CodeStub {
- public:
- NumberToStringStub() { }
-
- // Generate code to do a lookup in the number string cache. If the number in
- // the register object is found in the cache the generated code falls through
- // with the result in the result register. The object and the result register
- // can be the same. If the number is not found in the cache the code jumps to
- // the label not_found with only the content of register object unchanged.
- static void GenerateLookupNumberStringCache(MacroAssembler* masm,
- Register object,
- Register result,
- Register scratch1,
- Register scratch2,
- bool object_is_smi,
- Label* not_found);
-
- private:
- static void GenerateConvertHashCodeToIndex(MacroAssembler* masm,
- Register hash,
- Register mask);
-
- Major MajorKey() { return NumberToString; }
- int MinorKey() { return 0; }
-
- void Generate(MacroAssembler* masm);
-
- const char* GetName() { return "NumberToStringStub"; }
-
-#ifdef DEBUG
- void Print() {
- PrintF("NumberToStringStub\n");
- }
-#endif
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_X64_CODE_STUBS_X64_H_
diff --git a/src/3rdparty/v8/src/x64/codegen-x64-inl.h b/src/3rdparty/v8/src/x64/codegen-x64-inl.h
deleted file mode 100644
index 53caf91..0000000
--- a/src/3rdparty/v8/src/x64/codegen-x64-inl.h
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#ifndef V8_X64_CODEGEN_X64_INL_H_
-#define V8_X64_CODEGEN_X64_INL_H_
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm_)
-
-// Platform-specific inline functions.
-
-void DeferredCode::Jump() { __ jmp(&entry_label_); }
-void DeferredCode::Branch(Condition cc) { __ j(cc, &entry_label_); }
-
-#undef __
-
-} } // namespace v8::internal
-
-#endif // V8_X64_CODEGEN_X64_INL_H_
diff --git a/src/3rdparty/v8/src/x64/codegen-x64.cc b/src/3rdparty/v8/src/x64/codegen-x64.cc
deleted file mode 100644
index 9cf85c4..0000000
--- a/src/3rdparty/v8/src/x64/codegen-x64.cc
+++ /dev/null
@@ -1,8843 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_X64)
-
-#include "bootstrapper.h"
-#include "code-stubs.h"
-#include "codegen-inl.h"
-#include "compiler.h"
-#include "debug.h"
-#include "ic-inl.h"
-#include "parser.h"
-#include "regexp-macro-assembler.h"
-#include "register-allocator-inl.h"
-#include "scopes.h"
-#include "virtual-frame-inl.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-// -------------------------------------------------------------------------
-// Platform-specific FrameRegisterState functions.
-
-void FrameRegisterState::Save(MacroAssembler* masm) const {
- for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
- int action = registers_[i];
- if (action == kPush) {
- __ push(RegisterAllocator::ToRegister(i));
- } else if (action != kIgnore && (action & kSyncedFlag) == 0) {
- __ movq(Operand(rbp, action), RegisterAllocator::ToRegister(i));
- }
- }
-}
-
-
-void FrameRegisterState::Restore(MacroAssembler* masm) const {
- // Restore registers in reverse order due to the stack.
- for (int i = RegisterAllocator::kNumRegisters - 1; i >= 0; i--) {
- int action = registers_[i];
- if (action == kPush) {
- __ pop(RegisterAllocator::ToRegister(i));
- } else if (action != kIgnore) {
- action &= ~kSyncedFlag;
- __ movq(RegisterAllocator::ToRegister(i), Operand(rbp, action));
- }
- }
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm_)
-
-// -------------------------------------------------------------------------
-// Platform-specific DeferredCode functions.
-
-void DeferredCode::SaveRegisters() {
- frame_state_.Save(masm_);
-}
-
-
-void DeferredCode::RestoreRegisters() {
- frame_state_.Restore(masm_);
-}
-
-
-// -------------------------------------------------------------------------
-// Platform-specific RuntimeCallHelper functions.
-
-void VirtualFrameRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
- frame_state_->Save(masm);
-}
-
-
-void VirtualFrameRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
- frame_state_->Restore(masm);
-}
-
-
-void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
- masm->EnterInternalFrame();
-}
-
-
-void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
- masm->LeaveInternalFrame();
-}
-
-
-// -------------------------------------------------------------------------
-// CodeGenState implementation.
-
-CodeGenState::CodeGenState(CodeGenerator* owner)
- : owner_(owner),
- destination_(NULL),
- previous_(NULL) {
- owner_->set_state(this);
-}
-
-
-CodeGenState::CodeGenState(CodeGenerator* owner,
- ControlDestination* destination)
- : owner_(owner),
- destination_(destination),
- previous_(owner->state()) {
- owner_->set_state(this);
-}
-
-
-CodeGenState::~CodeGenState() {
- ASSERT(owner_->state() == this);
- owner_->set_state(previous_);
-}
-
-
-// -------------------------------------------------------------------------
-// CodeGenerator implementation.
-
-CodeGenerator::CodeGenerator(MacroAssembler* masm)
- : deferred_(8),
- masm_(masm),
- info_(NULL),
- frame_(NULL),
- allocator_(NULL),
- state_(NULL),
- loop_nesting_(0),
- function_return_is_shadowed_(false),
- in_spilled_code_(false) {
-}
-
-
-// Calling conventions:
-// rbp: caller's frame pointer
-// rsp: stack pointer
-// rdi: called JS function
-// rsi: callee's context
-
-void CodeGenerator::Generate(CompilationInfo* info) {
- // Record the position for debugging purposes.
- CodeForFunctionPosition(info->function());
- Comment cmnt(masm_, "[ function compiled by virtual frame code generator");
-
- // Initialize state.
- info_ = info;
- ASSERT(allocator_ == NULL);
- RegisterAllocator register_allocator(this);
- allocator_ = &register_allocator;
- ASSERT(frame_ == NULL);
- frame_ = new VirtualFrame();
- set_in_spilled_code(false);
-
- // Adjust for function-level loop nesting.
- ASSERT_EQ(0, loop_nesting_);
- loop_nesting_ = info->is_in_loop() ? 1 : 0;
-
- Isolate::Current()->set_jump_target_compiling_deferred_code(false);
-
- {
- CodeGenState state(this);
- // Entry:
- // Stack: receiver, arguments, return address.
- // rbp: caller's frame pointer
- // rsp: stack pointer
- // rdi: called JS function
- // rsi: callee's context
- allocator_->Initialize();
-
-#ifdef DEBUG
- if (strlen(FLAG_stop_at) > 0 &&
- info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
- frame_->SpillAll();
- __ int3();
- }
-#endif
-
- frame_->Enter();
-
- // Allocate space for locals and initialize them.
- frame_->AllocateStackSlots();
-
- // Allocate the local context if needed.
- int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
- if (heap_slots > 0) {
- Comment cmnt(masm_, "[ allocate local context");
- // Allocate local context.
- // Get outer context and create a new context based on it.
- frame_->PushFunction();
- Result context;
- if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(heap_slots);
- context = frame_->CallStub(&stub, 1);
- } else {
- context = frame_->CallRuntime(Runtime::kNewContext, 1);
- }
-
- // Update context local.
- frame_->SaveContextRegister();
-
- // Verify that the runtime call result and rsi agree.
- if (FLAG_debug_code) {
- __ cmpq(context.reg(), rsi);
- __ Assert(equal, "Runtime::NewContext should end up in rsi");
- }
- }
-
- // TODO(1241774): Improve this code:
- // 1) only needed if we have a context
- // 2) no need to recompute context ptr every single time
- // 3) don't copy parameter operand code from SlotOperand!
- {
- Comment cmnt2(masm_, "[ copy context parameters into .context");
- // Note that iteration order is relevant here! If we have the same
- // parameter twice (e.g., function (x, y, x)), and that parameter
- // needs to be copied into the context, it must be the last argument
- // passed to the parameter that needs to be copied. This is a rare
- // case so we don't check for it, instead we rely on the copying
- // order: such a parameter is copied repeatedly into the same
- // context location and thus the last value is what is seen inside
- // the function.
- for (int i = 0; i < scope()->num_parameters(); i++) {
- Variable* par = scope()->parameter(i);
- Slot* slot = par->AsSlot();
- if (slot != NULL && slot->type() == Slot::CONTEXT) {
- // The use of SlotOperand below is safe in unspilled code
- // because the slot is guaranteed to be a context slot.
- //
- // There are no parameters in the global scope.
- ASSERT(!scope()->is_global_scope());
- frame_->PushParameterAt(i);
- Result value = frame_->Pop();
- value.ToRegister();
-
- // SlotOperand loads context.reg() with the context object
- // stored to, used below in RecordWrite.
- Result context = allocator_->Allocate();
- ASSERT(context.is_valid());
- __ movq(SlotOperand(slot, context.reg()), value.reg());
- int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
- Result scratch = allocator_->Allocate();
- ASSERT(scratch.is_valid());
- frame_->Spill(context.reg());
- frame_->Spill(value.reg());
- __ RecordWrite(context.reg(), offset, value.reg(), scratch.reg());
- }
- }
- }
-
- // Store the arguments object. This must happen after context
- // initialization because the arguments object may be stored in
- // the context.
- if (ArgumentsMode() != NO_ARGUMENTS_ALLOCATION) {
- StoreArgumentsObject(true);
- }
-
- // Initialize ThisFunction reference if present.
- if (scope()->is_function_scope() && scope()->function() != NULL) {
- frame_->Push(FACTORY->the_hole_value());
- StoreToSlot(scope()->function()->AsSlot(), NOT_CONST_INIT);
- }
-
- // Initialize the function return target after the locals are set
- // up, because it needs the expected frame height from the frame.
- function_return_.set_direction(JumpTarget::BIDIRECTIONAL);
- function_return_is_shadowed_ = false;
-
- // Generate code to 'execute' declarations and initialize functions
- // (source elements). In case of an illegal redeclaration we need to
- // handle that instead of processing the declarations.
- if (scope()->HasIllegalRedeclaration()) {
- Comment cmnt(masm_, "[ illegal redeclarations");
- scope()->VisitIllegalRedeclaration(this);
- } else {
- Comment cmnt(masm_, "[ declarations");
- ProcessDeclarations(scope()->declarations());
- // Bail out if a stack-overflow exception occurred when processing
- // declarations.
- if (HasStackOverflow()) return;
- }
-
- if (FLAG_trace) {
- frame_->CallRuntime(Runtime::kTraceEnter, 0);
- // Ignore the return value.
- }
- CheckStack();
-
- // Compile the body of the function in a vanilla state. Don't
- // bother compiling all the code if the scope has an illegal
- // redeclaration.
- if (!scope()->HasIllegalRedeclaration()) {
- Comment cmnt(masm_, "[ function body");
-#ifdef DEBUG
- bool is_builtin = Isolate::Current()->bootstrapper()->IsActive();
- bool should_trace =
- is_builtin ? FLAG_trace_builtin_calls : FLAG_trace_calls;
- if (should_trace) {
- frame_->CallRuntime(Runtime::kDebugTrace, 0);
- // Ignore the return value.
- }
-#endif
- VisitStatements(info->function()->body());
-
- // Handle the return from the function.
- if (has_valid_frame()) {
- // If there is a valid frame, control flow can fall off the end of
- // the body. In that case there is an implicit return statement.
- ASSERT(!function_return_is_shadowed_);
- CodeForReturnPosition(info->function());
- frame_->PrepareForReturn();
- Result undefined(FACTORY->undefined_value());
- if (function_return_.is_bound()) {
- function_return_.Jump(&undefined);
- } else {
- function_return_.Bind(&undefined);
- GenerateReturnSequence(&undefined);
- }
- } else if (function_return_.is_linked()) {
- // If the return target has dangling jumps to it, then we have not
- // yet generated the return sequence. This can happen when (a)
- // control does not flow off the end of the body so we did not
- // compile an artificial return statement just above, and (b) there
- // are return statements in the body but (c) they are all shadowed.
- Result return_value;
- function_return_.Bind(&return_value);
- GenerateReturnSequence(&return_value);
- }
- }
- }
-
- // Adjust for function-level loop nesting.
- ASSERT_EQ(loop_nesting_, info->is_in_loop() ? 1 : 0);
- loop_nesting_ = 0;
-
- // Code generation state must be reset.
- ASSERT(state_ == NULL);
- ASSERT(!function_return_is_shadowed_);
- function_return_.Unuse();
- DeleteFrame();
-
- // Process any deferred code using the register allocator.
- if (!HasStackOverflow()) {
- info->isolate()->set_jump_target_compiling_deferred_code(true);
- ProcessDeferred();
- info->isolate()->set_jump_target_compiling_deferred_code(false);
- }
-
- // There is no need to delete the register allocator, it is a
- // stack-allocated local.
- allocator_ = NULL;
-}
-
-
-Operand CodeGenerator::SlotOperand(Slot* slot, Register tmp) {
- // Currently, this assertion will fail if we try to assign to
- // a constant variable that is constant because it is read-only
- // (such as the variable referring to a named function expression).
- // We need to implement assignments to read-only variables.
- // Ideally, we should do this during AST generation (by converting
- // such assignments into expression statements); however, in general
- // we may not be able to make the decision until past AST generation,
- // that is when the entire program is known.
- ASSERT(slot != NULL);
- int index = slot->index();
- switch (slot->type()) {
- case Slot::PARAMETER:
- return frame_->ParameterAt(index);
-
- case Slot::LOCAL:
- return frame_->LocalAt(index);
-
- case Slot::CONTEXT: {
- // Follow the context chain if necessary.
- ASSERT(!tmp.is(rsi)); // do not overwrite context register
- Register context = rsi;
- int chain_length = scope()->ContextChainLength(slot->var()->scope());
- for (int i = 0; i < chain_length; i++) {
- // Load the closure.
- // (All contexts, even 'with' contexts, have a closure,
- // and it is the same for all contexts inside a function.
- // There is no need to go to the function context first.)
- __ movq(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
- // Load the function context (which is the incoming, outer context).
- __ movq(tmp, FieldOperand(tmp, JSFunction::kContextOffset));
- context = tmp;
- }
- // We may have a 'with' context now. Get the function context.
- // (In fact this mov may never be the needed, since the scope analysis
- // may not permit a direct context access in this case and thus we are
- // always at a function context. However it is safe to dereference be-
- // cause the function context of a function context is itself. Before
- // deleting this mov we should try to create a counter-example first,
- // though...)
- __ movq(tmp, ContextOperand(context, Context::FCONTEXT_INDEX));
- return ContextOperand(tmp, index);
- }
-
- default:
- UNREACHABLE();
- return Operand(rsp, 0);
- }
-}
-
-
-Operand CodeGenerator::ContextSlotOperandCheckExtensions(Slot* slot,
- Result tmp,
- JumpTarget* slow) {
- ASSERT(slot->type() == Slot::CONTEXT);
- ASSERT(tmp.is_register());
- Register context = rsi;
-
- for (Scope* s = scope(); s != slot->var()->scope(); s = s->outer_scope()) {
- if (s->num_heap_slots() > 0) {
- if (s->calls_eval()) {
- // Check that extension is NULL.
- __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX),
- Immediate(0));
- slow->Branch(not_equal, not_taken);
- }
- __ movq(tmp.reg(), ContextOperand(context, Context::CLOSURE_INDEX));
- __ movq(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
- context = tmp.reg();
- }
- }
- // Check that last extension is NULL.
- __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX), Immediate(0));
- slow->Branch(not_equal, not_taken);
- __ movq(tmp.reg(), ContextOperand(context, Context::FCONTEXT_INDEX));
- return ContextOperand(tmp.reg(), slot->index());
-}
-
-
-// Emit code to load the value of an expression to the top of the
-// frame. If the expression is boolean-valued it may be compiled (or
-// partially compiled) into control flow to the control destination.
-// If force_control is true, control flow is forced.
-void CodeGenerator::LoadCondition(Expression* expr,
- ControlDestination* dest,
- bool force_control) {
- ASSERT(!in_spilled_code());
- int original_height = frame_->height();
-
- { CodeGenState new_state(this, dest);
- Visit(expr);
-
- // If we hit a stack overflow, we may not have actually visited
- // the expression. In that case, we ensure that we have a
- // valid-looking frame state because we will continue to generate
- // code as we unwind the C++ stack.
- //
- // It's possible to have both a stack overflow and a valid frame
- // state (eg, a subexpression overflowed, visiting it returned
- // with a dummied frame state, and visiting this expression
- // returned with a normal-looking state).
- if (HasStackOverflow() &&
- !dest->is_used() &&
- frame_->height() == original_height) {
- dest->Goto(true);
- }
- }
-
- if (force_control && !dest->is_used()) {
- // Convert the TOS value into flow to the control destination.
- ToBoolean(dest);
- }
-
- ASSERT(!(force_control && !dest->is_used()));
- ASSERT(dest->is_used() || frame_->height() == original_height + 1);
-}
-
-
-void CodeGenerator::LoadAndSpill(Expression* expression) {
- ASSERT(in_spilled_code());
- set_in_spilled_code(false);
- Load(expression);
- frame_->SpillAll();
- set_in_spilled_code(true);
-}
-
-
-void CodeGenerator::Load(Expression* expr) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- ASSERT(!in_spilled_code());
- JumpTarget true_target;
- JumpTarget false_target;
- ControlDestination dest(&true_target, &false_target, true);
- LoadCondition(expr, &dest, false);
-
- if (dest.false_was_fall_through()) {
- // The false target was just bound.
- JumpTarget loaded;
- frame_->Push(FACTORY->false_value());
- // There may be dangling jumps to the true target.
- if (true_target.is_linked()) {
- loaded.Jump();
- true_target.Bind();
- frame_->Push(FACTORY->true_value());
- loaded.Bind();
- }
-
- } else if (dest.is_used()) {
- // There is true, and possibly false, control flow (with true as
- // the fall through).
- JumpTarget loaded;
- frame_->Push(FACTORY->true_value());
- if (false_target.is_linked()) {
- loaded.Jump();
- false_target.Bind();
- frame_->Push(FACTORY->false_value());
- loaded.Bind();
- }
-
- } else {
- // We have a valid value on top of the frame, but we still may
- // have dangling jumps to the true and false targets from nested
- // subexpressions (eg, the left subexpressions of the
- // short-circuited boolean operators).
- ASSERT(has_valid_frame());
- if (true_target.is_linked() || false_target.is_linked()) {
- JumpTarget loaded;
- loaded.Jump(); // Don't lose the current TOS.
- if (true_target.is_linked()) {
- true_target.Bind();
- frame_->Push(FACTORY->true_value());
- if (false_target.is_linked()) {
- loaded.Jump();
- }
- }
- if (false_target.is_linked()) {
- false_target.Bind();
- frame_->Push(FACTORY->false_value());
- }
- loaded.Bind();
- }
- }
-
- ASSERT(has_valid_frame());
- ASSERT(frame_->height() == original_height + 1);
-}
-
-
-void CodeGenerator::LoadGlobal() {
- if (in_spilled_code()) {
- frame_->EmitPush(GlobalObjectOperand());
- } else {
- Result temp = allocator_->Allocate();
- __ movq(temp.reg(), GlobalObjectOperand());
- frame_->Push(&temp);
- }
-}
-
-
-void CodeGenerator::LoadGlobalReceiver() {
- Result temp = allocator_->Allocate();
- Register reg = temp.reg();
- __ movq(reg, GlobalObjectOperand());
- __ movq(reg, FieldOperand(reg, GlobalObject::kGlobalReceiverOffset));
- frame_->Push(&temp);
-}
-
-
-void CodeGenerator::LoadTypeofExpression(Expression* expr) {
- // Special handling of identifiers as subexpressions of typeof.
- Variable* variable = expr->AsVariableProxy()->AsVariable();
- if (variable != NULL && !variable->is_this() && variable->is_global()) {
- // For a global variable we build the property reference
- // <global>.<variable> and perform a (regular non-contextual) property
- // load to make sure we do not get reference errors.
- Slot global(variable, Slot::CONTEXT, Context::GLOBAL_INDEX);
- Literal key(variable->name());
- Property property(&global, &key, RelocInfo::kNoPosition);
- Reference ref(this, &property);
- ref.GetValue();
- } else if (variable != NULL && variable->AsSlot() != NULL) {
- // For a variable that rewrites to a slot, we signal it is the immediate
- // subexpression of a typeof.
- LoadFromSlotCheckForArguments(variable->AsSlot(), INSIDE_TYPEOF);
- } else {
- // Anything else can be handled normally.
- Load(expr);
- }
-}
-
-
-ArgumentsAllocationMode CodeGenerator::ArgumentsMode() {
- if (scope()->arguments() == NULL) return NO_ARGUMENTS_ALLOCATION;
-
- // In strict mode there is no need for shadow arguments.
- ASSERT(scope()->arguments_shadow() != NULL || scope()->is_strict_mode());
- // We don't want to do lazy arguments allocation for functions that
- // have heap-allocated contexts, because it interfers with the
- // uninitialized const tracking in the context objects.
- return (scope()->num_heap_slots() > 0 || scope()->is_strict_mode())
- ? EAGER_ARGUMENTS_ALLOCATION
- : LAZY_ARGUMENTS_ALLOCATION;
-}
-
-
-Result CodeGenerator::StoreArgumentsObject(bool initial) {
- ArgumentsAllocationMode mode = ArgumentsMode();
- ASSERT(mode != NO_ARGUMENTS_ALLOCATION);
-
- Comment cmnt(masm_, "[ store arguments object");
- if (mode == LAZY_ARGUMENTS_ALLOCATION && initial) {
- // When using lazy arguments allocation, we store the arguments marker value
- // as a sentinel indicating that the arguments object hasn't been
- // allocated yet.
- frame_->Push(FACTORY->arguments_marker());
- } else {
- ArgumentsAccessStub stub(is_strict_mode()
- ? ArgumentsAccessStub::NEW_STRICT
- : ArgumentsAccessStub::NEW_NON_STRICT);
- frame_->PushFunction();
- frame_->PushReceiverSlotAddress();
- frame_->Push(Smi::FromInt(scope()->num_parameters()));
- Result result = frame_->CallStub(&stub, 3);
- frame_->Push(&result);
- }
-
- Variable* arguments = scope()->arguments();
- Variable* shadow = scope()->arguments_shadow();
- ASSERT(arguments != NULL && arguments->AsSlot() != NULL);
- ASSERT((shadow != NULL && shadow->AsSlot() != NULL) ||
- scope()->is_strict_mode());
-
- JumpTarget done;
- bool skip_arguments = false;
- if (mode == LAZY_ARGUMENTS_ALLOCATION && !initial) {
- // We have to skip storing into the arguments slot if it has
- // already been written to. This can happen if the a function
- // has a local variable named 'arguments'.
- LoadFromSlot(arguments->AsSlot(), NOT_INSIDE_TYPEOF);
- Result probe = frame_->Pop();
- if (probe.is_constant()) {
- // We have to skip updating the arguments object if it has
- // been assigned a proper value.
- skip_arguments = !probe.handle()->IsArgumentsMarker();
- } else {
- __ CompareRoot(probe.reg(), Heap::kArgumentsMarkerRootIndex);
- probe.Unuse();
- done.Branch(not_equal);
- }
- }
- if (!skip_arguments) {
- StoreToSlot(arguments->AsSlot(), NOT_CONST_INIT);
- if (mode == LAZY_ARGUMENTS_ALLOCATION) done.Bind();
- }
- if (shadow != NULL) {
- StoreToSlot(shadow->AsSlot(), NOT_CONST_INIT);
- }
- return frame_->Pop();
-}
-
-//------------------------------------------------------------------------------
-// CodeGenerator implementation of variables, lookups, and stores.
-
-Reference::Reference(CodeGenerator* cgen,
- Expression* expression,
- bool persist_after_get)
- : cgen_(cgen),
- expression_(expression),
- type_(ILLEGAL),
- persist_after_get_(persist_after_get) {
- cgen->LoadReference(this);
-}
-
-
-Reference::~Reference() {
- ASSERT(is_unloaded() || is_illegal());
-}
-
-
-void CodeGenerator::LoadReference(Reference* ref) {
- // References are loaded from both spilled and unspilled code. Set the
- // state to unspilled to allow that (and explicitly spill after
- // construction at the construction sites).
- bool was_in_spilled_code = in_spilled_code_;
- in_spilled_code_ = false;
-
- Comment cmnt(masm_, "[ LoadReference");
- Expression* e = ref->expression();
- Property* property = e->AsProperty();
- Variable* var = e->AsVariableProxy()->AsVariable();
-
- if (property != NULL) {
- // The expression is either a property or a variable proxy that rewrites
- // to a property.
- Load(property->obj());
- if (property->key()->IsPropertyName()) {
- ref->set_type(Reference::NAMED);
- } else {
- Load(property->key());
- ref->set_type(Reference::KEYED);
- }
- } else if (var != NULL) {
- // The expression is a variable proxy that does not rewrite to a
- // property. Global variables are treated as named property references.
- if (var->is_global()) {
- // If rax is free, the register allocator prefers it. Thus the code
- // generator will load the global object into rax, which is where
- // LoadIC wants it. Most uses of Reference call LoadIC directly
- // after the reference is created.
- frame_->Spill(rax);
- LoadGlobal();
- ref->set_type(Reference::NAMED);
- } else {
- ASSERT(var->AsSlot() != NULL);
- ref->set_type(Reference::SLOT);
- }
- } else {
- // Anything else is a runtime error.
- Load(e);
- frame_->CallRuntime(Runtime::kThrowReferenceError, 1);
- }
-
- in_spilled_code_ = was_in_spilled_code;
-}
-
-
-void CodeGenerator::UnloadReference(Reference* ref) {
- // Pop a reference from the stack while preserving TOS.
- Comment cmnt(masm_, "[ UnloadReference");
- frame_->Nip(ref->size());
- ref->set_unloaded();
-}
-
-
-// ECMA-262, section 9.2, page 30: ToBoolean(). Pop the top of stack and
-// convert it to a boolean in the condition code register or jump to
-// 'false_target'/'true_target' as appropriate.
-void CodeGenerator::ToBoolean(ControlDestination* dest) {
- Comment cmnt(masm_, "[ ToBoolean");
-
- // The value to convert should be popped from the frame.
- Result value = frame_->Pop();
- value.ToRegister();
-
- if (value.is_number()) {
- // Fast case if TypeInfo indicates only numbers.
- if (FLAG_debug_code) {
- __ AbortIfNotNumber(value.reg());
- }
- // Smi => false iff zero.
- __ Cmp(value.reg(), Smi::FromInt(0));
- if (value.is_smi()) {
- value.Unuse();
- dest->Split(not_zero);
- } else {
- dest->false_target()->Branch(equal);
- Condition is_smi = masm_->CheckSmi(value.reg());
- dest->true_target()->Branch(is_smi);
- __ xorpd(xmm0, xmm0);
- __ ucomisd(xmm0, FieldOperand(value.reg(), HeapNumber::kValueOffset));
- value.Unuse();
- dest->Split(not_zero);
- }
- } else {
- // Fast case checks.
- // 'false' => false.
- __ CompareRoot(value.reg(), Heap::kFalseValueRootIndex);
- dest->false_target()->Branch(equal);
-
- // 'true' => true.
- __ CompareRoot(value.reg(), Heap::kTrueValueRootIndex);
- dest->true_target()->Branch(equal);
-
- // 'undefined' => false.
- __ CompareRoot(value.reg(), Heap::kUndefinedValueRootIndex);
- dest->false_target()->Branch(equal);
-
- // Smi => false iff zero.
- __ Cmp(value.reg(), Smi::FromInt(0));
- dest->false_target()->Branch(equal);
- Condition is_smi = masm_->CheckSmi(value.reg());
- dest->true_target()->Branch(is_smi);
-
- // Call the stub for all other cases.
- frame_->Push(&value); // Undo the Pop() from above.
- ToBooleanStub stub;
- Result temp = frame_->CallStub(&stub, 1);
- // Convert the result to a condition code.
- __ testq(temp.reg(), temp.reg());
- temp.Unuse();
- dest->Split(not_equal);
- }
-}
-
-
-// Call the specialized stub for a binary operation.
-class DeferredInlineBinaryOperation: public DeferredCode {
- public:
- DeferredInlineBinaryOperation(Token::Value op,
- Register dst,
- Register left,
- Register right,
- OverwriteMode mode)
- : op_(op), dst_(dst), left_(left), right_(right), mode_(mode) {
- set_comment("[ DeferredInlineBinaryOperation");
- }
-
- virtual void Generate();
-
- private:
- Token::Value op_;
- Register dst_;
- Register left_;
- Register right_;
- OverwriteMode mode_;
-};
-
-
-void DeferredInlineBinaryOperation::Generate() {
- Label done;
- if ((op_ == Token::ADD)
- || (op_ == Token::SUB)
- || (op_ == Token::MUL)
- || (op_ == Token::DIV)) {
- Label call_runtime;
- Label left_smi, right_smi, load_right, do_op;
- __ JumpIfSmi(left_, &left_smi);
- __ CompareRoot(FieldOperand(left_, HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &call_runtime);
- __ movsd(xmm0, FieldOperand(left_, HeapNumber::kValueOffset));
- if (mode_ == OVERWRITE_LEFT) {
- __ movq(dst_, left_);
- }
- __ jmp(&load_right);
-
- __ bind(&left_smi);
- __ SmiToInteger32(left_, left_);
- __ cvtlsi2sd(xmm0, left_);
- __ Integer32ToSmi(left_, left_);
- if (mode_ == OVERWRITE_LEFT) {
- Label alloc_failure;
- __ AllocateHeapNumber(dst_, no_reg, &call_runtime);
- }
-
- __ bind(&load_right);
- __ JumpIfSmi(right_, &right_smi);
- __ CompareRoot(FieldOperand(right_, HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &call_runtime);
- __ movsd(xmm1, FieldOperand(right_, HeapNumber::kValueOffset));
- if (mode_ == OVERWRITE_RIGHT) {
- __ movq(dst_, right_);
- } else if (mode_ == NO_OVERWRITE) {
- Label alloc_failure;
- __ AllocateHeapNumber(dst_, no_reg, &call_runtime);
- }
- __ jmp(&do_op);
-
- __ bind(&right_smi);
- __ SmiToInteger32(right_, right_);
- __ cvtlsi2sd(xmm1, right_);
- __ Integer32ToSmi(right_, right_);
- if (mode_ == OVERWRITE_RIGHT || mode_ == NO_OVERWRITE) {
- Label alloc_failure;
- __ AllocateHeapNumber(dst_, no_reg, &call_runtime);
- }
-
- __ bind(&do_op);
- switch (op_) {
- case Token::ADD: __ addsd(xmm0, xmm1); break;
- case Token::SUB: __ subsd(xmm0, xmm1); break;
- case Token::MUL: __ mulsd(xmm0, xmm1); break;
- case Token::DIV: __ divsd(xmm0, xmm1); break;
- default: UNREACHABLE();
- }
- __ movsd(FieldOperand(dst_, HeapNumber::kValueOffset), xmm0);
- __ jmp(&done);
-
- __ bind(&call_runtime);
- }
- GenericBinaryOpStub stub(op_, mode_, NO_SMI_CODE_IN_STUB);
- stub.GenerateCall(masm_, left_, right_);
- if (!dst_.is(rax)) __ movq(dst_, rax);
- __ bind(&done);
-}
-
-
-static TypeInfo CalculateTypeInfo(TypeInfo operands_type,
- Token::Value op,
- const Result& right,
- const Result& left) {
- // Set TypeInfo of result according to the operation performed.
- // We rely on the fact that smis have a 32 bit payload on x64.
- STATIC_ASSERT(kSmiValueSize == 32);
- switch (op) {
- case Token::COMMA:
- return right.type_info();
- case Token::OR:
- case Token::AND:
- // Result type can be either of the two input types.
- return operands_type;
- case Token::BIT_OR:
- case Token::BIT_XOR:
- case Token::BIT_AND:
- // Result is always a smi.
- return TypeInfo::Smi();
- case Token::SAR:
- case Token::SHL:
- // Result is always a smi.
- return TypeInfo::Smi();
- case Token::SHR:
- // Result of x >>> y is always a smi if masked y >= 1, otherwise a number.
- return (right.is_constant() && right.handle()->IsSmi()
- && (Smi::cast(*right.handle())->value() & 0x1F) >= 1)
- ? TypeInfo::Smi()
- : TypeInfo::Number();
- case Token::ADD:
- if (operands_type.IsNumber()) {
- return TypeInfo::Number();
- } else if (left.type_info().IsString() || right.type_info().IsString()) {
- return TypeInfo::String();
- } else {
- return TypeInfo::Unknown();
- }
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- case Token::MOD:
- // Result is always a number.
- return TypeInfo::Number();
- default:
- UNREACHABLE();
- }
- UNREACHABLE();
- return TypeInfo::Unknown();
-}
-
-
-void CodeGenerator::GenericBinaryOperation(BinaryOperation* expr,
- OverwriteMode overwrite_mode) {
- Comment cmnt(masm_, "[ BinaryOperation");
- Token::Value op = expr->op();
- Comment cmnt_token(masm_, Token::String(op));
-
- if (op == Token::COMMA) {
- // Simply discard left value.
- frame_->Nip(1);
- return;
- }
-
- Result right = frame_->Pop();
- Result left = frame_->Pop();
-
- if (op == Token::ADD) {
- const bool left_is_string = left.type_info().IsString();
- const bool right_is_string = right.type_info().IsString();
- // Make sure constant strings have string type info.
- ASSERT(!(left.is_constant() && left.handle()->IsString()) ||
- left_is_string);
- ASSERT(!(right.is_constant() && right.handle()->IsString()) ||
- right_is_string);
- if (left_is_string || right_is_string) {
- frame_->Push(&left);
- frame_->Push(&right);
- Result answer;
- if (left_is_string) {
- if (right_is_string) {
- StringAddStub stub(NO_STRING_CHECK_IN_STUB);
- answer = frame_->CallStub(&stub, 2);
- } else {
- answer =
- frame_->InvokeBuiltin(Builtins::STRING_ADD_LEFT, CALL_FUNCTION, 2);
- }
- } else if (right_is_string) {
- answer =
- frame_->InvokeBuiltin(Builtins::STRING_ADD_RIGHT, CALL_FUNCTION, 2);
- }
- answer.set_type_info(TypeInfo::String());
- frame_->Push(&answer);
- return;
- }
- // Neither operand is known to be a string.
- }
-
- bool left_is_smi_constant = left.is_constant() && left.handle()->IsSmi();
- bool left_is_non_smi_constant = left.is_constant() && !left.handle()->IsSmi();
- bool right_is_smi_constant = right.is_constant() && right.handle()->IsSmi();
- bool right_is_non_smi_constant =
- right.is_constant() && !right.handle()->IsSmi();
-
- if (left_is_smi_constant && right_is_smi_constant) {
- // Compute the constant result at compile time, and leave it on the frame.
- int left_int = Smi::cast(*left.handle())->value();
- int right_int = Smi::cast(*right.handle())->value();
- if (FoldConstantSmis(op, left_int, right_int)) return;
- }
-
- // Get number type of left and right sub-expressions.
- TypeInfo operands_type =
- TypeInfo::Combine(left.type_info(), right.type_info());
-
- TypeInfo result_type = CalculateTypeInfo(operands_type, op, right, left);
-
- Result answer;
- if (left_is_non_smi_constant || right_is_non_smi_constant) {
- // Go straight to the slow case, with no smi code.
- GenericBinaryOpStub stub(op,
- overwrite_mode,
- NO_SMI_CODE_IN_STUB,
- operands_type);
- answer = GenerateGenericBinaryOpStubCall(&stub, &left, &right);
- } else if (right_is_smi_constant) {
- answer = ConstantSmiBinaryOperation(expr, &left, right.handle(),
- false, overwrite_mode);
- } else if (left_is_smi_constant) {
- answer = ConstantSmiBinaryOperation(expr, &right, left.handle(),
- true, overwrite_mode);
- } else {
- // Set the flags based on the operation, type and loop nesting level.
- // Bit operations always assume they likely operate on smis. Still only
- // generate the inline Smi check code if this operation is part of a loop.
- // For all other operations only inline the Smi check code for likely smis
- // if the operation is part of a loop.
- if (loop_nesting() > 0 &&
- (Token::IsBitOp(op) ||
- operands_type.IsInteger32() ||
- expr->type()->IsLikelySmi())) {
- answer = LikelySmiBinaryOperation(expr, &left, &right, overwrite_mode);
- } else {
- GenericBinaryOpStub stub(op,
- overwrite_mode,
- NO_GENERIC_BINARY_FLAGS,
- operands_type);
- answer = GenerateGenericBinaryOpStubCall(&stub, &left, &right);
- }
- }
-
- answer.set_type_info(result_type);
- frame_->Push(&answer);
-}
-
-
-bool CodeGenerator::FoldConstantSmis(Token::Value op, int left, int right) {
- Object* answer_object = HEAP->undefined_value();
- switch (op) {
- case Token::ADD:
- // Use intptr_t to detect overflow of 32-bit int.
- if (Smi::IsValid(static_cast<intptr_t>(left) + right)) {
- answer_object = Smi::FromInt(left + right);
- }
- break;
- case Token::SUB:
- // Use intptr_t to detect overflow of 32-bit int.
- if (Smi::IsValid(static_cast<intptr_t>(left) - right)) {
- answer_object = Smi::FromInt(left - right);
- }
- break;
- case Token::MUL: {
- double answer = static_cast<double>(left) * right;
- if (answer >= Smi::kMinValue && answer <= Smi::kMaxValue) {
- // If the product is zero and the non-zero factor is negative,
- // the spec requires us to return floating point negative zero.
- if (answer != 0 || (left >= 0 && right >= 0)) {
- answer_object = Smi::FromInt(static_cast<int>(answer));
- }
- }
- }
- break;
- case Token::DIV:
- case Token::MOD:
- break;
- case Token::BIT_OR:
- answer_object = Smi::FromInt(left | right);
- break;
- case Token::BIT_AND:
- answer_object = Smi::FromInt(left & right);
- break;
- case Token::BIT_XOR:
- answer_object = Smi::FromInt(left ^ right);
- break;
-
- case Token::SHL: {
- int shift_amount = right & 0x1F;
- if (Smi::IsValid(left << shift_amount)) {
- answer_object = Smi::FromInt(left << shift_amount);
- }
- break;
- }
- case Token::SHR: {
- int shift_amount = right & 0x1F;
- unsigned int unsigned_left = left;
- unsigned_left >>= shift_amount;
- if (unsigned_left <= static_cast<unsigned int>(Smi::kMaxValue)) {
- answer_object = Smi::FromInt(unsigned_left);
- }
- break;
- }
- case Token::SAR: {
- int shift_amount = right & 0x1F;
- unsigned int unsigned_left = left;
- if (left < 0) {
- // Perform arithmetic shift of a negative number by
- // complementing number, logical shifting, complementing again.
- unsigned_left = ~unsigned_left;
- unsigned_left >>= shift_amount;
- unsigned_left = ~unsigned_left;
- } else {
- unsigned_left >>= shift_amount;
- }
- ASSERT(Smi::IsValid(static_cast<int32_t>(unsigned_left)));
- answer_object = Smi::FromInt(static_cast<int32_t>(unsigned_left));
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
- if (answer_object->IsUndefined()) {
- return false;
- }
- frame_->Push(Handle<Object>(answer_object));
- return true;
-}
-
-
-void CodeGenerator::JumpIfBothSmiUsingTypeInfo(Result* left,
- Result* right,
- JumpTarget* both_smi) {
- TypeInfo left_info = left->type_info();
- TypeInfo right_info = right->type_info();
- if (left_info.IsDouble() || left_info.IsString() ||
- right_info.IsDouble() || right_info.IsString()) {
- // We know that left and right are not both smi. Don't do any tests.
- return;
- }
-
- if (left->reg().is(right->reg())) {
- if (!left_info.IsSmi()) {
- Condition is_smi = masm()->CheckSmi(left->reg());
- both_smi->Branch(is_smi);
- } else {
- if (FLAG_debug_code) __ AbortIfNotSmi(left->reg());
- left->Unuse();
- right->Unuse();
- both_smi->Jump();
- }
- } else if (!left_info.IsSmi()) {
- if (!right_info.IsSmi()) {
- Condition is_smi = masm()->CheckBothSmi(left->reg(), right->reg());
- both_smi->Branch(is_smi);
- } else {
- Condition is_smi = masm()->CheckSmi(left->reg());
- both_smi->Branch(is_smi);
- }
- } else {
- if (FLAG_debug_code) __ AbortIfNotSmi(left->reg());
- if (!right_info.IsSmi()) {
- Condition is_smi = masm()->CheckSmi(right->reg());
- both_smi->Branch(is_smi);
- } else {
- if (FLAG_debug_code) __ AbortIfNotSmi(right->reg());
- left->Unuse();
- right->Unuse();
- both_smi->Jump();
- }
- }
-}
-
-
-void CodeGenerator::JumpIfNotSmiUsingTypeInfo(Register reg,
- TypeInfo type,
- DeferredCode* deferred) {
- if (!type.IsSmi()) {
- __ JumpIfNotSmi(reg, deferred->entry_label());
- }
- if (FLAG_debug_code) {
- __ AbortIfNotSmi(reg);
- }
-}
-
-
-void CodeGenerator::JumpIfNotBothSmiUsingTypeInfo(Register left,
- Register right,
- TypeInfo left_info,
- TypeInfo right_info,
- DeferredCode* deferred) {
- if (!left_info.IsSmi() && !right_info.IsSmi()) {
- __ JumpIfNotBothSmi(left, right, deferred->entry_label());
- } else if (!left_info.IsSmi()) {
- __ JumpIfNotSmi(left, deferred->entry_label());
- } else if (!right_info.IsSmi()) {
- __ JumpIfNotSmi(right, deferred->entry_label());
- }
- if (FLAG_debug_code) {
- __ AbortIfNotSmi(left);
- __ AbortIfNotSmi(right);
- }
-}
-
-
-// Implements a binary operation using a deferred code object and some
-// inline code to operate on smis quickly.
-Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
- Result* left,
- Result* right,
- OverwriteMode overwrite_mode) {
- // Copy the type info because left and right may be overwritten.
- TypeInfo left_type_info = left->type_info();
- TypeInfo right_type_info = right->type_info();
- Token::Value op = expr->op();
- Result answer;
- // Special handling of div and mod because they use fixed registers.
- if (op == Token::DIV || op == Token::MOD) {
- // We need rax as the quotient register, rdx as the remainder
- // register, neither left nor right in rax or rdx, and left copied
- // to rax.
- Result quotient;
- Result remainder;
- bool left_is_in_rax = false;
- // Step 1: get rax for quotient.
- if ((left->is_register() && left->reg().is(rax)) ||
- (right->is_register() && right->reg().is(rax))) {
- // One or both is in rax. Use a fresh non-rdx register for
- // them.
- Result fresh = allocator_->Allocate();
- ASSERT(fresh.is_valid());
- if (fresh.reg().is(rdx)) {
- remainder = fresh;
- fresh = allocator_->Allocate();
- ASSERT(fresh.is_valid());
- }
- if (left->is_register() && left->reg().is(rax)) {
- quotient = *left;
- *left = fresh;
- left_is_in_rax = true;
- }
- if (right->is_register() && right->reg().is(rax)) {
- quotient = *right;
- *right = fresh;
- }
- __ movq(fresh.reg(), rax);
- } else {
- // Neither left nor right is in rax.
- quotient = allocator_->Allocate(rax);
- }
- ASSERT(quotient.is_register() && quotient.reg().is(rax));
- ASSERT(!(left->is_register() && left->reg().is(rax)));
- ASSERT(!(right->is_register() && right->reg().is(rax)));
-
- // Step 2: get rdx for remainder if necessary.
- if (!remainder.is_valid()) {
- if ((left->is_register() && left->reg().is(rdx)) ||
- (right->is_register() && right->reg().is(rdx))) {
- Result fresh = allocator_->Allocate();
- ASSERT(fresh.is_valid());
- if (left->is_register() && left->reg().is(rdx)) {
- remainder = *left;
- *left = fresh;
- }
- if (right->is_register() && right->reg().is(rdx)) {
- remainder = *right;
- *right = fresh;
- }
- __ movq(fresh.reg(), rdx);
- } else {
- // Neither left nor right is in rdx.
- remainder = allocator_->Allocate(rdx);
- }
- }
- ASSERT(remainder.is_register() && remainder.reg().is(rdx));
- ASSERT(!(left->is_register() && left->reg().is(rdx)));
- ASSERT(!(right->is_register() && right->reg().is(rdx)));
-
- left->ToRegister();
- right->ToRegister();
- frame_->Spill(rax);
- frame_->Spill(rdx);
-
- // Check that left and right are smi tagged.
- DeferredInlineBinaryOperation* deferred =
- new DeferredInlineBinaryOperation(op,
- (op == Token::DIV) ? rax : rdx,
- left->reg(),
- right->reg(),
- overwrite_mode);
- JumpIfNotBothSmiUsingTypeInfo(left->reg(), right->reg(),
- left_type_info, right_type_info, deferred);
-
- if (op == Token::DIV) {
- __ SmiDiv(rax, left->reg(), right->reg(), deferred->entry_label());
- deferred->BindExit();
- left->Unuse();
- right->Unuse();
- answer = quotient;
- } else {
- ASSERT(op == Token::MOD);
- __ SmiMod(rdx, left->reg(), right->reg(), deferred->entry_label());
- deferred->BindExit();
- left->Unuse();
- right->Unuse();
- answer = remainder;
- }
- ASSERT(answer.is_valid());
- return answer;
- }
-
- // Special handling of shift operations because they use fixed
- // registers.
- if (op == Token::SHL || op == Token::SHR || op == Token::SAR) {
- // Move left out of rcx if necessary.
- if (left->is_register() && left->reg().is(rcx)) {
- *left = allocator_->Allocate();
- ASSERT(left->is_valid());
- __ movq(left->reg(), rcx);
- }
- right->ToRegister(rcx);
- left->ToRegister();
- ASSERT(left->is_register() && !left->reg().is(rcx));
- ASSERT(right->is_register() && right->reg().is(rcx));
-
- // We will modify right, it must be spilled.
- frame_->Spill(rcx);
-
- // Use a fresh answer register to avoid spilling the left operand.
- answer = allocator_->Allocate();
- ASSERT(answer.is_valid());
- // Check that both operands are smis using the answer register as a
- // temporary.
- DeferredInlineBinaryOperation* deferred =
- new DeferredInlineBinaryOperation(op,
- answer.reg(),
- left->reg(),
- rcx,
- overwrite_mode);
-
- Label do_op;
- // Left operand must be unchanged in left->reg() for deferred code.
- // Left operand is in answer.reg(), possibly converted to int32, for
- // inline code.
- __ movq(answer.reg(), left->reg());
- if (right_type_info.IsSmi()) {
- if (FLAG_debug_code) {
- __ AbortIfNotSmi(right->reg());
- }
- // If left is not known to be a smi, check if it is.
- // If left is not known to be a number, and it isn't a smi, check if
- // it is a HeapNumber.
- if (!left_type_info.IsSmi()) {
- __ JumpIfSmi(answer.reg(), &do_op);
- if (!left_type_info.IsNumber()) {
- // Branch if not a heapnumber.
- __ Cmp(FieldOperand(answer.reg(), HeapObject::kMapOffset),
- FACTORY->heap_number_map());
- deferred->Branch(not_equal);
- }
- // Load integer value into answer register using truncation.
- __ cvttsd2si(answer.reg(),
- FieldOperand(answer.reg(), HeapNumber::kValueOffset));
- // Branch if we might have overflowed.
- // (False negative for Smi::kMinValue)
- __ cmpl(answer.reg(), Immediate(0x80000000));
- deferred->Branch(equal);
- // TODO(lrn): Inline shifts on int32 here instead of first smi-tagging.
- __ Integer32ToSmi(answer.reg(), answer.reg());
- } else {
- // Fast case - both are actually smis.
- if (FLAG_debug_code) {
- __ AbortIfNotSmi(left->reg());
- }
- }
- } else {
- JumpIfNotBothSmiUsingTypeInfo(left->reg(), rcx,
- left_type_info, right_type_info, deferred);
- }
- __ bind(&do_op);
-
- // Perform the operation.
- switch (op) {
- case Token::SAR:
- __ SmiShiftArithmeticRight(answer.reg(), answer.reg(), rcx);
- break;
- case Token::SHR: {
- __ SmiShiftLogicalRight(answer.reg(),
- answer.reg(),
- rcx,
- deferred->entry_label());
- break;
- }
- case Token::SHL: {
- __ SmiShiftLeft(answer.reg(),
- answer.reg(),
- rcx);
- break;
- }
- default:
- UNREACHABLE();
- }
- deferred->BindExit();
- left->Unuse();
- right->Unuse();
- ASSERT(answer.is_valid());
- return answer;
- }
-
- // Handle the other binary operations.
- left->ToRegister();
- right->ToRegister();
- // A newly allocated register answer is used to hold the answer. The
- // registers containing left and right are not modified so they don't
- // need to be spilled in the fast case.
- answer = allocator_->Allocate();
- ASSERT(answer.is_valid());
-
- // Perform the smi tag check.
- DeferredInlineBinaryOperation* deferred =
- new DeferredInlineBinaryOperation(op,
- answer.reg(),
- left->reg(),
- right->reg(),
- overwrite_mode);
- JumpIfNotBothSmiUsingTypeInfo(left->reg(), right->reg(),
- left_type_info, right_type_info, deferred);
-
- switch (op) {
- case Token::ADD:
- __ SmiAdd(answer.reg(),
- left->reg(),
- right->reg(),
- deferred->entry_label());
- break;
-
- case Token::SUB:
- __ SmiSub(answer.reg(),
- left->reg(),
- right->reg(),
- deferred->entry_label());
- break;
-
- case Token::MUL: {
- __ SmiMul(answer.reg(),
- left->reg(),
- right->reg(),
- deferred->entry_label());
- break;
- }
-
- case Token::BIT_OR:
- __ SmiOr(answer.reg(), left->reg(), right->reg());
- break;
-
- case Token::BIT_AND:
- __ SmiAnd(answer.reg(), left->reg(), right->reg());
- break;
-
- case Token::BIT_XOR:
- __ SmiXor(answer.reg(), left->reg(), right->reg());
- break;
-
- default:
- UNREACHABLE();
- break;
- }
- deferred->BindExit();
- left->Unuse();
- right->Unuse();
- ASSERT(answer.is_valid());
- return answer;
-}
-
-
-// Call the appropriate binary operation stub to compute src op value
-// and leave the result in dst.
-class DeferredInlineSmiOperation: public DeferredCode {
- public:
- DeferredInlineSmiOperation(Token::Value op,
- Register dst,
- Register src,
- Smi* value,
- OverwriteMode overwrite_mode)
- : op_(op),
- dst_(dst),
- src_(src),
- value_(value),
- overwrite_mode_(overwrite_mode) {
- set_comment("[ DeferredInlineSmiOperation");
- }
-
- virtual void Generate();
-
- private:
- Token::Value op_;
- Register dst_;
- Register src_;
- Smi* value_;
- OverwriteMode overwrite_mode_;
-};
-
-
-void DeferredInlineSmiOperation::Generate() {
- // For mod we don't generate all the Smi code inline.
- GenericBinaryOpStub stub(
- op_,
- overwrite_mode_,
- (op_ == Token::MOD) ? NO_GENERIC_BINARY_FLAGS : NO_SMI_CODE_IN_STUB);
- stub.GenerateCall(masm_, src_, value_);
- if (!dst_.is(rax)) __ movq(dst_, rax);
-}
-
-
-// Call the appropriate binary operation stub to compute value op src
-// and leave the result in dst.
-class DeferredInlineSmiOperationReversed: public DeferredCode {
- public:
- DeferredInlineSmiOperationReversed(Token::Value op,
- Register dst,
- Smi* value,
- Register src,
- OverwriteMode overwrite_mode)
- : op_(op),
- dst_(dst),
- value_(value),
- src_(src),
- overwrite_mode_(overwrite_mode) {
- set_comment("[ DeferredInlineSmiOperationReversed");
- }
-
- virtual void Generate();
-
- private:
- Token::Value op_;
- Register dst_;
- Smi* value_;
- Register src_;
- OverwriteMode overwrite_mode_;
-};
-
-
-void DeferredInlineSmiOperationReversed::Generate() {
- GenericBinaryOpStub stub(
- op_,
- overwrite_mode_,
- NO_SMI_CODE_IN_STUB);
- stub.GenerateCall(masm_, value_, src_);
- if (!dst_.is(rax)) __ movq(dst_, rax);
-}
-class DeferredInlineSmiAdd: public DeferredCode {
- public:
- DeferredInlineSmiAdd(Register dst,
- Smi* value,
- OverwriteMode overwrite_mode)
- : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
- set_comment("[ DeferredInlineSmiAdd");
- }
-
- virtual void Generate();
-
- private:
- Register dst_;
- Smi* value_;
- OverwriteMode overwrite_mode_;
-};
-
-
-void DeferredInlineSmiAdd::Generate() {
- GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, NO_SMI_CODE_IN_STUB);
- igostub.GenerateCall(masm_, dst_, value_);
- if (!dst_.is(rax)) __ movq(dst_, rax);
-}
-
-
-// The result of value + src is in dst. It either overflowed or was not
-// smi tagged. Undo the speculative addition and call the appropriate
-// specialized stub for add. The result is left in dst.
-class DeferredInlineSmiAddReversed: public DeferredCode {
- public:
- DeferredInlineSmiAddReversed(Register dst,
- Smi* value,
- OverwriteMode overwrite_mode)
- : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
- set_comment("[ DeferredInlineSmiAddReversed");
- }
-
- virtual void Generate();
-
- private:
- Register dst_;
- Smi* value_;
- OverwriteMode overwrite_mode_;
-};
-
-
-void DeferredInlineSmiAddReversed::Generate() {
- GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, NO_SMI_CODE_IN_STUB);
- igostub.GenerateCall(masm_, value_, dst_);
- if (!dst_.is(rax)) __ movq(dst_, rax);
-}
-
-
-class DeferredInlineSmiSub: public DeferredCode {
- public:
- DeferredInlineSmiSub(Register dst,
- Smi* value,
- OverwriteMode overwrite_mode)
- : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
- set_comment("[ DeferredInlineSmiSub");
- }
-
- virtual void Generate();
-
- private:
- Register dst_;
- Smi* value_;
- OverwriteMode overwrite_mode_;
-};
-
-
-void DeferredInlineSmiSub::Generate() {
- GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, NO_SMI_CODE_IN_STUB);
- igostub.GenerateCall(masm_, dst_, value_);
- if (!dst_.is(rax)) __ movq(dst_, rax);
-}
-
-
-Result CodeGenerator::ConstantSmiBinaryOperation(BinaryOperation* expr,
- Result* operand,
- Handle<Object> value,
- bool reversed,
- OverwriteMode overwrite_mode) {
- // Generate inline code for a binary operation when one of the
- // operands is a constant smi. Consumes the argument "operand".
- if (IsUnsafeSmi(value)) {
- Result unsafe_operand(value);
- if (reversed) {
- return LikelySmiBinaryOperation(expr, &unsafe_operand, operand,
- overwrite_mode);
- } else {
- return LikelySmiBinaryOperation(expr, operand, &unsafe_operand,
- overwrite_mode);
- }
- }
-
- // Get the literal value.
- Smi* smi_value = Smi::cast(*value);
- int int_value = smi_value->value();
-
- Token::Value op = expr->op();
- Result answer;
- switch (op) {
- case Token::ADD: {
- operand->ToRegister();
- frame_->Spill(operand->reg());
- DeferredCode* deferred = NULL;
- if (reversed) {
- deferred = new DeferredInlineSmiAddReversed(operand->reg(),
- smi_value,
- overwrite_mode);
- } else {
- deferred = new DeferredInlineSmiAdd(operand->reg(),
- smi_value,
- overwrite_mode);
- }
- JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
- deferred);
- __ SmiAddConstant(operand->reg(),
- operand->reg(),
- smi_value,
- deferred->entry_label());
- deferred->BindExit();
- answer = *operand;
- break;
- }
-
- case Token::SUB: {
- if (reversed) {
- Result constant_operand(value);
- answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
- overwrite_mode);
- } else {
- operand->ToRegister();
- frame_->Spill(operand->reg());
- answer = *operand;
- DeferredCode* deferred = new DeferredInlineSmiSub(operand->reg(),
- smi_value,
- overwrite_mode);
- JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
- deferred);
- // A smi currently fits in a 32-bit Immediate.
- __ SmiSubConstant(operand->reg(),
- operand->reg(),
- smi_value,
- deferred->entry_label());
- deferred->BindExit();
- operand->Unuse();
- }
- break;
- }
-
- case Token::SAR:
- if (reversed) {
- Result constant_operand(value);
- answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
- overwrite_mode);
- } else {
- // Only the least significant 5 bits of the shift value are used.
- // In the slow case, this masking is done inside the runtime call.
- int shift_value = int_value & 0x1f;
- operand->ToRegister();
- frame_->Spill(operand->reg());
- DeferredInlineSmiOperation* deferred =
- new DeferredInlineSmiOperation(op,
- operand->reg(),
- operand->reg(),
- smi_value,
- overwrite_mode);
- JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
- deferred);
- __ SmiShiftArithmeticRightConstant(operand->reg(),
- operand->reg(),
- shift_value);
- deferred->BindExit();
- answer = *operand;
- }
- break;
-
- case Token::SHR:
- if (reversed) {
- Result constant_operand(value);
- answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
- overwrite_mode);
- } else {
- // Only the least significant 5 bits of the shift value are used.
- // In the slow case, this masking is done inside the runtime call.
- int shift_value = int_value & 0x1f;
- operand->ToRegister();
- answer = allocator()->Allocate();
- ASSERT(answer.is_valid());
- DeferredInlineSmiOperation* deferred =
- new DeferredInlineSmiOperation(op,
- answer.reg(),
- operand->reg(),
- smi_value,
- overwrite_mode);
- JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
- deferred);
- __ SmiShiftLogicalRightConstant(answer.reg(),
- operand->reg(),
- shift_value,
- deferred->entry_label());
- deferred->BindExit();
- operand->Unuse();
- }
- break;
-
- case Token::SHL:
- if (reversed) {
- operand->ToRegister();
-
- // We need rcx to be available to hold operand, and to be spilled.
- // SmiShiftLeft implicitly modifies rcx.
- if (operand->reg().is(rcx)) {
- frame_->Spill(operand->reg());
- answer = allocator()->Allocate();
- } else {
- Result rcx_reg = allocator()->Allocate(rcx);
- // answer must not be rcx.
- answer = allocator()->Allocate();
- // rcx_reg goes out of scope.
- }
-
- DeferredInlineSmiOperationReversed* deferred =
- new DeferredInlineSmiOperationReversed(op,
- answer.reg(),
- smi_value,
- operand->reg(),
- overwrite_mode);
- JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
- deferred);
-
- __ Move(answer.reg(), smi_value);
- __ SmiShiftLeft(answer.reg(), answer.reg(), operand->reg());
- operand->Unuse();
-
- deferred->BindExit();
- } else {
- // Only the least significant 5 bits of the shift value are used.
- // In the slow case, this masking is done inside the runtime call.
- int shift_value = int_value & 0x1f;
- operand->ToRegister();
- if (shift_value == 0) {
- // Spill operand so it can be overwritten in the slow case.
- frame_->Spill(operand->reg());
- DeferredInlineSmiOperation* deferred =
- new DeferredInlineSmiOperation(op,
- operand->reg(),
- operand->reg(),
- smi_value,
- overwrite_mode);
- JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
- deferred);
- deferred->BindExit();
- answer = *operand;
- } else {
- // Use a fresh temporary for nonzero shift values.
- answer = allocator()->Allocate();
- ASSERT(answer.is_valid());
- DeferredInlineSmiOperation* deferred =
- new DeferredInlineSmiOperation(op,
- answer.reg(),
- operand->reg(),
- smi_value,
- overwrite_mode);
- JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
- deferred);
- __ SmiShiftLeftConstant(answer.reg(),
- operand->reg(),
- shift_value);
- deferred->BindExit();
- operand->Unuse();
- }
- }
- break;
-
- case Token::BIT_OR:
- case Token::BIT_XOR:
- case Token::BIT_AND: {
- operand->ToRegister();
- frame_->Spill(operand->reg());
- if (reversed) {
- // Bit operations with a constant smi are commutative.
- // We can swap left and right operands with no problem.
- // Swap left and right overwrite modes. 0->0, 1->2, 2->1.
- overwrite_mode = static_cast<OverwriteMode>((2 * overwrite_mode) % 3);
- }
- DeferredCode* deferred = new DeferredInlineSmiOperation(op,
- operand->reg(),
- operand->reg(),
- smi_value,
- overwrite_mode);
- JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
- deferred);
- if (op == Token::BIT_AND) {
- __ SmiAndConstant(operand->reg(), operand->reg(), smi_value);
- } else if (op == Token::BIT_XOR) {
- if (int_value != 0) {
- __ SmiXorConstant(operand->reg(), operand->reg(), smi_value);
- }
- } else {
- ASSERT(op == Token::BIT_OR);
- if (int_value != 0) {
- __ SmiOrConstant(operand->reg(), operand->reg(), smi_value);
- }
- }
- deferred->BindExit();
- answer = *operand;
- break;
- }
-
- // Generate inline code for mod of powers of 2 and negative powers of 2.
- case Token::MOD:
- if (!reversed &&
- int_value != 0 &&
- (IsPowerOf2(int_value) || IsPowerOf2(-int_value))) {
- operand->ToRegister();
- frame_->Spill(operand->reg());
- DeferredCode* deferred =
- new DeferredInlineSmiOperation(op,
- operand->reg(),
- operand->reg(),
- smi_value,
- overwrite_mode);
- __ JumpUnlessNonNegativeSmi(operand->reg(), deferred->entry_label());
- if (int_value < 0) int_value = -int_value;
- if (int_value == 1) {
- __ Move(operand->reg(), Smi::FromInt(0));
- } else {
- __ SmiAndConstant(operand->reg(),
- operand->reg(),
- Smi::FromInt(int_value - 1));
- }
- deferred->BindExit();
- answer = *operand;
- break; // This break only applies if we generated code for MOD.
- }
- // Fall through if we did not find a power of 2 on the right hand side!
- // The next case must be the default.
-
- default: {
- Result constant_operand(value);
- if (reversed) {
- answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
- overwrite_mode);
- } else {
- answer = LikelySmiBinaryOperation(expr, operand, &constant_operand,
- overwrite_mode);
- }
- break;
- }
- }
- ASSERT(answer.is_valid());
- return answer;
-}
-
-
-static bool CouldBeNaN(const Result& result) {
- if (result.type_info().IsSmi()) return false;
- if (result.type_info().IsInteger32()) return false;
- if (!result.is_constant()) return true;
- if (!result.handle()->IsHeapNumber()) return false;
- return isnan(HeapNumber::cast(*result.handle())->value());
-}
-
-
-// Convert from signed to unsigned comparison to match the way EFLAGS are set
-// by FPU and XMM compare instructions.
-static Condition DoubleCondition(Condition cc) {
- switch (cc) {
- case less: return below;
- case equal: return equal;
- case less_equal: return below_equal;
- case greater: return above;
- case greater_equal: return above_equal;
- default: UNREACHABLE();
- }
- UNREACHABLE();
- return equal;
-}
-
-
-static CompareFlags ComputeCompareFlags(NaNInformation nan_info,
- bool inline_number_compare) {
- CompareFlags flags = NO_SMI_COMPARE_IN_STUB;
- if (nan_info == kCantBothBeNaN) {
- flags = static_cast<CompareFlags>(flags | CANT_BOTH_BE_NAN);
- }
- if (inline_number_compare) {
- flags = static_cast<CompareFlags>(flags | NO_NUMBER_COMPARE_IN_STUB);
- }
- return flags;
-}
-
-
-void CodeGenerator::Comparison(AstNode* node,
- Condition cc,
- bool strict,
- ControlDestination* dest) {
- // Strict only makes sense for equality comparisons.
- ASSERT(!strict || cc == equal);
-
- Result left_side;
- Result right_side;
- // Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order.
- if (cc == greater || cc == less_equal) {
- cc = ReverseCondition(cc);
- left_side = frame_->Pop();
- right_side = frame_->Pop();
- } else {
- right_side = frame_->Pop();
- left_side = frame_->Pop();
- }
- ASSERT(cc == less || cc == equal || cc == greater_equal);
-
- // If either side is a constant smi, optimize the comparison.
- bool left_side_constant_smi = false;
- bool left_side_constant_null = false;
- bool left_side_constant_1_char_string = false;
- if (left_side.is_constant()) {
- left_side_constant_smi = left_side.handle()->IsSmi();
- left_side_constant_null = left_side.handle()->IsNull();
- left_side_constant_1_char_string =
- (left_side.handle()->IsString() &&
- String::cast(*left_side.handle())->length() == 1 &&
- String::cast(*left_side.handle())->IsAsciiRepresentation());
- }
- bool right_side_constant_smi = false;
- bool right_side_constant_null = false;
- bool right_side_constant_1_char_string = false;
- if (right_side.is_constant()) {
- right_side_constant_smi = right_side.handle()->IsSmi();
- right_side_constant_null = right_side.handle()->IsNull();
- right_side_constant_1_char_string =
- (right_side.handle()->IsString() &&
- String::cast(*right_side.handle())->length() == 1 &&
- String::cast(*right_side.handle())->IsAsciiRepresentation());
- }
-
- if (left_side_constant_smi || right_side_constant_smi) {
- bool is_loop_condition = (node->AsExpression() != NULL) &&
- node->AsExpression()->is_loop_condition();
- ConstantSmiComparison(cc, strict, dest, &left_side, &right_side,
- left_side_constant_smi, right_side_constant_smi,
- is_loop_condition);
- } else if (left_side_constant_1_char_string ||
- right_side_constant_1_char_string) {
- if (left_side_constant_1_char_string && right_side_constant_1_char_string) {
- // Trivial case, comparing two constants.
- int left_value = String::cast(*left_side.handle())->Get(0);
- int right_value = String::cast(*right_side.handle())->Get(0);
- switch (cc) {
- case less:
- dest->Goto(left_value < right_value);
- break;
- case equal:
- dest->Goto(left_value == right_value);
- break;
- case greater_equal:
- dest->Goto(left_value >= right_value);
- break;
- default:
- UNREACHABLE();
- }
- } else {
- // Only one side is a constant 1 character string.
- // If left side is a constant 1-character string, reverse the operands.
- // Since one side is a constant string, conversion order does not matter.
- if (left_side_constant_1_char_string) {
- Result temp = left_side;
- left_side = right_side;
- right_side = temp;
- cc = ReverseCondition(cc);
- // This may reintroduce greater or less_equal as the value of cc.
- // CompareStub and the inline code both support all values of cc.
- }
- // Implement comparison against a constant string, inlining the case
- // where both sides are strings.
- left_side.ToRegister();
-
- // Here we split control flow to the stub call and inlined cases
- // before finally splitting it to the control destination. We use
- // a jump target and branching to duplicate the virtual frame at
- // the first split. We manually handle the off-frame references
- // by reconstituting them on the non-fall-through path.
- JumpTarget is_not_string, is_string;
- Register left_reg = left_side.reg();
- Handle<Object> right_val = right_side.handle();
- ASSERT(StringShape(String::cast(*right_val)).IsSymbol());
- Condition is_smi = masm()->CheckSmi(left_reg);
- is_not_string.Branch(is_smi, &left_side);
- Result temp = allocator_->Allocate();
- ASSERT(temp.is_valid());
- __ movq(temp.reg(),
- FieldOperand(left_reg, HeapObject::kMapOffset));
- __ movzxbl(temp.reg(),
- FieldOperand(temp.reg(), Map::kInstanceTypeOffset));
- // If we are testing for equality then make use of the symbol shortcut.
- // Check if the left hand side has the same type as the right hand
- // side (which is always a symbol).
- if (cc == equal) {
- Label not_a_symbol;
- STATIC_ASSERT(kSymbolTag != 0);
- // Ensure that no non-strings have the symbol bit set.
- STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsSymbolMask);
- __ testb(temp.reg(), Immediate(kIsSymbolMask)); // Test the symbol bit.
- __ j(zero, &not_a_symbol);
- // They are symbols, so do identity compare.
- __ Cmp(left_reg, right_side.handle());
- dest->true_target()->Branch(equal);
- dest->false_target()->Branch(not_equal);
- __ bind(&not_a_symbol);
- }
- // Call the compare stub if the left side is not a flat ascii string.
- __ andb(temp.reg(),
- Immediate(kIsNotStringMask |
- kStringRepresentationMask |
- kStringEncodingMask));
- __ cmpb(temp.reg(),
- Immediate(kStringTag | kSeqStringTag | kAsciiStringTag));
- temp.Unuse();
- is_string.Branch(equal, &left_side);
-
- // Setup and call the compare stub.
- is_not_string.Bind(&left_side);
- CompareFlags flags =
- static_cast<CompareFlags>(CANT_BOTH_BE_NAN | NO_SMI_CODE_IN_STUB);
- CompareStub stub(cc, strict, flags);
- Result result = frame_->CallStub(&stub, &left_side, &right_side);
- result.ToRegister();
- __ testq(result.reg(), result.reg());
- result.Unuse();
- dest->true_target()->Branch(cc);
- dest->false_target()->Jump();
-
- is_string.Bind(&left_side);
- // left_side is a sequential ASCII string.
- ASSERT(left_side.reg().is(left_reg));
- right_side = Result(right_val);
- Result temp2 = allocator_->Allocate();
- ASSERT(temp2.is_valid());
- // Test string equality and comparison.
- if (cc == equal) {
- Label comparison_done;
- __ SmiCompare(FieldOperand(left_side.reg(), String::kLengthOffset),
- Smi::FromInt(1));
- __ j(not_equal, &comparison_done);
- uint8_t char_value =
- static_cast<uint8_t>(String::cast(*right_val)->Get(0));
- __ cmpb(FieldOperand(left_side.reg(), SeqAsciiString::kHeaderSize),
- Immediate(char_value));
- __ bind(&comparison_done);
- } else {
- __ movq(temp2.reg(),
- FieldOperand(left_side.reg(), String::kLengthOffset));
- __ SmiSubConstant(temp2.reg(), temp2.reg(), Smi::FromInt(1));
- Label comparison;
- // If the length is 0 then the subtraction gave -1 which compares less
- // than any character.
- __ j(negative, &comparison);
- // Otherwise load the first character.
- __ movzxbl(temp2.reg(),
- FieldOperand(left_side.reg(), SeqAsciiString::kHeaderSize));
- __ bind(&comparison);
- // Compare the first character of the string with the
- // constant 1-character string.
- uint8_t char_value =
- static_cast<uint8_t>(String::cast(*right_side.handle())->Get(0));
- __ cmpb(temp2.reg(), Immediate(char_value));
- Label characters_were_different;
- __ j(not_equal, &characters_were_different);
- // If the first character is the same then the long string sorts after
- // the short one.
- __ SmiCompare(FieldOperand(left_side.reg(), String::kLengthOffset),
- Smi::FromInt(1));
- __ bind(&characters_were_different);
- }
- temp2.Unuse();
- left_side.Unuse();
- right_side.Unuse();
- dest->Split(cc);
- }
- } else {
- // Neither side is a constant Smi, constant 1-char string, or constant null.
- // If either side is a non-smi constant, or known to be a heap number,
- // skip the smi check.
- bool known_non_smi =
- (left_side.is_constant() && !left_side.handle()->IsSmi()) ||
- (right_side.is_constant() && !right_side.handle()->IsSmi()) ||
- left_side.type_info().IsDouble() ||
- right_side.type_info().IsDouble();
-
- NaNInformation nan_info =
- (CouldBeNaN(left_side) && CouldBeNaN(right_side)) ?
- kBothCouldBeNaN :
- kCantBothBeNaN;
-
- // Inline number comparison handling any combination of smi's and heap
- // numbers if:
- // code is in a loop
- // the compare operation is different from equal
- // compare is not a for-loop comparison
- // The reason for excluding equal is that it will most likely be done
- // with smi's (not heap numbers) and the code to comparing smi's is inlined
- // separately. The same reason applies for for-loop comparison which will
- // also most likely be smi comparisons.
- bool is_loop_condition = (node->AsExpression() != NULL)
- && node->AsExpression()->is_loop_condition();
- bool inline_number_compare =
- loop_nesting() > 0 && cc != equal && !is_loop_condition;
-
- // Left and right needed in registers for the following code.
- left_side.ToRegister();
- right_side.ToRegister();
-
- if (known_non_smi) {
- // Inlined equality check:
- // If at least one of the objects is not NaN, then if the objects
- // are identical, they are equal.
- if (nan_info == kCantBothBeNaN && cc == equal) {
- __ cmpq(left_side.reg(), right_side.reg());
- dest->true_target()->Branch(equal);
- }
-
- // Inlined number comparison:
- if (inline_number_compare) {
- GenerateInlineNumberComparison(&left_side, &right_side, cc, dest);
- }
-
- // End of in-line compare, call out to the compare stub. Don't include
- // number comparison in the stub if it was inlined.
- CompareFlags flags = ComputeCompareFlags(nan_info, inline_number_compare);
- CompareStub stub(cc, strict, flags);
- Result answer = frame_->CallStub(&stub, &left_side, &right_side);
- __ testq(answer.reg(), answer.reg()); // Sets both zero and sign flag.
- answer.Unuse();
- dest->Split(cc);
- } else {
- // Here we split control flow to the stub call and inlined cases
- // before finally splitting it to the control destination. We use
- // a jump target and branching to duplicate the virtual frame at
- // the first split. We manually handle the off-frame references
- // by reconstituting them on the non-fall-through path.
- JumpTarget is_smi;
- Register left_reg = left_side.reg();
- Register right_reg = right_side.reg();
-
- // In-line check for comparing two smis.
- JumpIfBothSmiUsingTypeInfo(&left_side, &right_side, &is_smi);
-
- if (has_valid_frame()) {
- // Inline the equality check if both operands can't be a NaN. If both
- // objects are the same they are equal.
- if (nan_info == kCantBothBeNaN && cc == equal) {
- __ cmpq(left_side.reg(), right_side.reg());
- dest->true_target()->Branch(equal);
- }
-
- // Inlined number comparison:
- if (inline_number_compare) {
- GenerateInlineNumberComparison(&left_side, &right_side, cc, dest);
- }
-
- // End of in-line compare, call out to the compare stub. Don't include
- // number comparison in the stub if it was inlined.
- CompareFlags flags =
- ComputeCompareFlags(nan_info, inline_number_compare);
- CompareStub stub(cc, strict, flags);
- Result answer = frame_->CallStub(&stub, &left_side, &right_side);
- __ testq(answer.reg(), answer.reg()); // Sets both zero and sign flags.
- answer.Unuse();
- if (is_smi.is_linked()) {
- dest->true_target()->Branch(cc);
- dest->false_target()->Jump();
- } else {
- dest->Split(cc);
- }
- }
-
- if (is_smi.is_linked()) {
- is_smi.Bind();
- left_side = Result(left_reg);
- right_side = Result(right_reg);
- __ SmiCompare(left_side.reg(), right_side.reg());
- right_side.Unuse();
- left_side.Unuse();
- dest->Split(cc);
- }
- }
- }
-}
-
-
-void CodeGenerator::ConstantSmiComparison(Condition cc,
- bool strict,
- ControlDestination* dest,
- Result* left_side,
- Result* right_side,
- bool left_side_constant_smi,
- bool right_side_constant_smi,
- bool is_loop_condition) {
- if (left_side_constant_smi && right_side_constant_smi) {
- // Trivial case, comparing two constants.
- int left_value = Smi::cast(*left_side->handle())->value();
- int right_value = Smi::cast(*right_side->handle())->value();
- switch (cc) {
- case less:
- dest->Goto(left_value < right_value);
- break;
- case equal:
- dest->Goto(left_value == right_value);
- break;
- case greater_equal:
- dest->Goto(left_value >= right_value);
- break;
- default:
- UNREACHABLE();
- }
- } else {
- // Only one side is a constant Smi.
- // If left side is a constant Smi, reverse the operands.
- // Since one side is a constant Smi, conversion order does not matter.
- if (left_side_constant_smi) {
- Result* temp = left_side;
- left_side = right_side;
- right_side = temp;
- cc = ReverseCondition(cc);
- // This may re-introduce greater or less_equal as the value of cc.
- // CompareStub and the inline code both support all values of cc.
- }
- // Implement comparison against a constant Smi, inlining the case
- // where both sides are smis.
- left_side->ToRegister();
- Register left_reg = left_side->reg();
- Smi* constant_smi = Smi::cast(*right_side->handle());
-
- if (left_side->is_smi()) {
- if (FLAG_debug_code) {
- __ AbortIfNotSmi(left_reg);
- }
- // Test smi equality and comparison by signed int comparison.
- __ SmiCompare(left_reg, constant_smi);
- left_side->Unuse();
- right_side->Unuse();
- dest->Split(cc);
- } else {
- // Only the case where the left side could possibly be a non-smi is left.
- JumpTarget is_smi;
- if (cc == equal) {
- // We can do the equality comparison before the smi check.
- __ Cmp(left_reg, constant_smi);
- dest->true_target()->Branch(equal);
- Condition left_is_smi = masm_->CheckSmi(left_reg);
- dest->false_target()->Branch(left_is_smi);
- } else {
- // Do the smi check, then the comparison.
- Condition left_is_smi = masm_->CheckSmi(left_reg);
- is_smi.Branch(left_is_smi, left_side, right_side);
- }
-
- // Jump or fall through to here if we are comparing a non-smi to a
- // constant smi. If the non-smi is a heap number and this is not
- // a loop condition, inline the floating point code.
- if (!is_loop_condition) {
- // Right side is a constant smi and left side has been checked
- // not to be a smi.
- JumpTarget not_number;
- __ Cmp(FieldOperand(left_reg, HeapObject::kMapOffset),
- FACTORY->heap_number_map());
- not_number.Branch(not_equal, left_side);
- __ movsd(xmm1,
- FieldOperand(left_reg, HeapNumber::kValueOffset));
- int value = constant_smi->value();
- if (value == 0) {
- __ xorpd(xmm0, xmm0);
- } else {
- Result temp = allocator()->Allocate();
- __ movl(temp.reg(), Immediate(value));
- __ cvtlsi2sd(xmm0, temp.reg());
- temp.Unuse();
- }
- __ ucomisd(xmm1, xmm0);
- // Jump to builtin for NaN.
- not_number.Branch(parity_even, left_side);
- left_side->Unuse();
- dest->true_target()->Branch(DoubleCondition(cc));
- dest->false_target()->Jump();
- not_number.Bind(left_side);
- }
-
- // Setup and call the compare stub.
- CompareFlags flags =
- static_cast<CompareFlags>(CANT_BOTH_BE_NAN | NO_SMI_CODE_IN_STUB);
- CompareStub stub(cc, strict, flags);
- Result result = frame_->CallStub(&stub, left_side, right_side);
- result.ToRegister();
- __ testq(result.reg(), result.reg());
- result.Unuse();
- if (cc == equal) {
- dest->Split(cc);
- } else {
- dest->true_target()->Branch(cc);
- dest->false_target()->Jump();
-
- // It is important for performance for this case to be at the end.
- is_smi.Bind(left_side, right_side);
- __ SmiCompare(left_reg, constant_smi);
- left_side->Unuse();
- right_side->Unuse();
- dest->Split(cc);
- }
- }
- }
-}
-
-
-// Load a comparison operand into into a XMM register. Jump to not_numbers jump
-// target passing the left and right result if the operand is not a number.
-static void LoadComparisonOperand(MacroAssembler* masm_,
- Result* operand,
- XMMRegister xmm_reg,
- Result* left_side,
- Result* right_side,
- JumpTarget* not_numbers) {
- Label done;
- if (operand->type_info().IsDouble()) {
- // Operand is known to be a heap number, just load it.
- __ movsd(xmm_reg, FieldOperand(operand->reg(), HeapNumber::kValueOffset));
- } else if (operand->type_info().IsSmi()) {
- // Operand is known to be a smi. Convert it to double and keep the original
- // smi.
- __ SmiToInteger32(kScratchRegister, operand->reg());
- __ cvtlsi2sd(xmm_reg, kScratchRegister);
- } else {
- // Operand type not known, check for smi or heap number.
- Label smi;
- __ JumpIfSmi(operand->reg(), &smi);
- if (!operand->type_info().IsNumber()) {
- __ LoadRoot(kScratchRegister, Heap::kHeapNumberMapRootIndex);
- __ cmpq(FieldOperand(operand->reg(), HeapObject::kMapOffset),
- kScratchRegister);
- not_numbers->Branch(not_equal, left_side, right_side, taken);
- }
- __ movsd(xmm_reg, FieldOperand(operand->reg(), HeapNumber::kValueOffset));
- __ jmp(&done);
-
- __ bind(&smi);
- // Comvert smi to float and keep the original smi.
- __ SmiToInteger32(kScratchRegister, operand->reg());
- __ cvtlsi2sd(xmm_reg, kScratchRegister);
- __ jmp(&done);
- }
- __ bind(&done);
-}
-
-
-void CodeGenerator::GenerateInlineNumberComparison(Result* left_side,
- Result* right_side,
- Condition cc,
- ControlDestination* dest) {
- ASSERT(left_side->is_register());
- ASSERT(right_side->is_register());
-
- JumpTarget not_numbers;
- // Load left and right operand into registers xmm0 and xmm1 and compare.
- LoadComparisonOperand(masm_, left_side, xmm0, left_side, right_side,
- &not_numbers);
- LoadComparisonOperand(masm_, right_side, xmm1, left_side, right_side,
- &not_numbers);
- __ ucomisd(xmm0, xmm1);
- // Bail out if a NaN is involved.
- not_numbers.Branch(parity_even, left_side, right_side);
-
- // Split to destination targets based on comparison.
- left_side->Unuse();
- right_side->Unuse();
- dest->true_target()->Branch(DoubleCondition(cc));
- dest->false_target()->Jump();
-
- not_numbers.Bind(left_side, right_side);
-}
-
-
-// Call the function just below TOS on the stack with the given
-// arguments. The receiver is the TOS.
-void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
- CallFunctionFlags flags,
- int position) {
- // Push the arguments ("left-to-right") on the stack.
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Load(args->at(i));
- frame_->SpillTop();
- }
-
- // Record the position for debugging purposes.
- CodeForSourcePosition(position);
-
- // Use the shared code stub to call the function.
- InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
- CallFunctionStub call_function(arg_count, in_loop, flags);
- Result answer = frame_->CallStub(&call_function, arg_count + 1);
- // Restore context and replace function on the stack with the
- // result of the stub invocation.
- frame_->RestoreContextRegister();
- frame_->SetElementAt(0, &answer);
-}
-
-
-void CodeGenerator::CallApplyLazy(Expression* applicand,
- Expression* receiver,
- VariableProxy* arguments,
- int position) {
- // An optimized implementation of expressions of the form
- // x.apply(y, arguments).
- // If the arguments object of the scope has not been allocated,
- // and x.apply is Function.prototype.apply, this optimization
- // just copies y and the arguments of the current function on the
- // stack, as receiver and arguments, and calls x.
- // In the implementation comments, we call x the applicand
- // and y the receiver.
- ASSERT(ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION);
- ASSERT(arguments->IsArguments());
-
- // Load applicand.apply onto the stack. This will usually
- // give us a megamorphic load site. Not super, but it works.
- Load(applicand);
- frame()->Dup();
- Handle<String> name = FACTORY->LookupAsciiSymbol("apply");
- frame()->Push(name);
- Result answer = frame()->CallLoadIC(RelocInfo::CODE_TARGET);
- __ nop();
- frame()->Push(&answer);
-
- // Load the receiver and the existing arguments object onto the
- // expression stack. Avoid allocating the arguments object here.
- Load(receiver);
- LoadFromSlot(scope()->arguments()->AsSlot(), NOT_INSIDE_TYPEOF);
-
- // Emit the source position information after having loaded the
- // receiver and the arguments.
- CodeForSourcePosition(position);
- // Contents of frame at this point:
- // Frame[0]: arguments object of the current function or the hole.
- // Frame[1]: receiver
- // Frame[2]: applicand.apply
- // Frame[3]: applicand.
-
- // Check if the arguments object has been lazily allocated
- // already. If so, just use that instead of copying the arguments
- // from the stack. This also deals with cases where a local variable
- // named 'arguments' has been introduced.
- frame_->Dup();
- Result probe = frame_->Pop();
- { VirtualFrame::SpilledScope spilled_scope;
- Label slow, done;
- bool try_lazy = true;
- if (probe.is_constant()) {
- try_lazy = probe.handle()->IsArgumentsMarker();
- } else {
- __ CompareRoot(probe.reg(), Heap::kArgumentsMarkerRootIndex);
- probe.Unuse();
- __ j(not_equal, &slow);
- }
-
- if (try_lazy) {
- Label build_args;
- // Get rid of the arguments object probe.
- frame_->Drop(); // Can be called on a spilled frame.
- // Stack now has 3 elements on it.
- // Contents of stack at this point:
- // rsp[0]: receiver
- // rsp[1]: applicand.apply
- // rsp[2]: applicand.
-
- // Check that the receiver really is a JavaScript object.
- __ movq(rax, Operand(rsp, 0));
- Condition is_smi = masm_->CheckSmi(rax);
- __ j(is_smi, &build_args);
- // We allow all JSObjects including JSFunctions. As long as
- // JS_FUNCTION_TYPE is the last instance type and it is right
- // after LAST_JS_OBJECT_TYPE, we do not have to check the upper
- // bound.
- STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
- STATIC_ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
- __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
- __ j(below, &build_args);
-
- // Check that applicand.apply is Function.prototype.apply.
- __ movq(rax, Operand(rsp, kPointerSize));
- is_smi = masm_->CheckSmi(rax);
- __ j(is_smi, &build_args);
- __ CmpObjectType(rax, JS_FUNCTION_TYPE, rcx);
- __ j(not_equal, &build_args);
- __ movq(rcx, FieldOperand(rax, JSFunction::kCodeEntryOffset));
- __ subq(rcx, Immediate(Code::kHeaderSize - kHeapObjectTag));
- Handle<Code> apply_code = Isolate::Current()->builtins()->FunctionApply();
- __ Cmp(rcx, apply_code);
- __ j(not_equal, &build_args);
-
- // Check that applicand is a function.
- __ movq(rdi, Operand(rsp, 2 * kPointerSize));
- is_smi = masm_->CheckSmi(rdi);
- __ j(is_smi, &build_args);
- __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
- __ j(not_equal, &build_args);
-
- // Copy the arguments to this function possibly from the
- // adaptor frame below it.
- Label invoke, adapted;
- __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ Cmp(Operand(rdx, StandardFrameConstants::kContextOffset),
- Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ j(equal, &adapted);
-
- // No arguments adaptor frame. Copy fixed number of arguments.
- __ Set(rax, scope()->num_parameters());
- for (int i = 0; i < scope()->num_parameters(); i++) {
- __ push(frame_->ParameterAt(i));
- }
- __ jmp(&invoke);
-
- // Arguments adaptor frame present. Copy arguments from there, but
- // avoid copying too many arguments to avoid stack overflows.
- __ bind(&adapted);
- static const uint32_t kArgumentsLimit = 1 * KB;
- __ SmiToInteger32(rax,
- Operand(rdx,
- ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ movl(rcx, rax);
- __ cmpl(rax, Immediate(kArgumentsLimit));
- __ j(above, &build_args);
-
- // Loop through the arguments pushing them onto the execution
- // stack. We don't inform the virtual frame of the push, so we don't
- // have to worry about getting rid of the elements from the virtual
- // frame.
- Label loop;
- // rcx is a small non-negative integer, due to the test above.
- __ testl(rcx, rcx);
- __ j(zero, &invoke);
- __ bind(&loop);
- __ push(Operand(rdx, rcx, times_pointer_size, 1 * kPointerSize));
- __ decl(rcx);
- __ j(not_zero, &loop);
-
- // Invoke the function.
- __ bind(&invoke);
- ParameterCount actual(rax);
- __ InvokeFunction(rdi, actual, CALL_FUNCTION);
- // Drop applicand.apply and applicand from the stack, and push
- // the result of the function call, but leave the spilled frame
- // unchanged, with 3 elements, so it is correct when we compile the
- // slow-case code.
- __ addq(rsp, Immediate(2 * kPointerSize));
- __ push(rax);
- // Stack now has 1 element:
- // rsp[0]: result
- __ jmp(&done);
-
- // Slow-case: Allocate the arguments object since we know it isn't
- // there, and fall-through to the slow-case where we call
- // applicand.apply.
- __ bind(&build_args);
- // Stack now has 3 elements, because we have jumped from where:
- // rsp[0]: receiver
- // rsp[1]: applicand.apply
- // rsp[2]: applicand.
-
- // StoreArgumentsObject requires a correct frame, and may modify it.
- Result arguments_object = StoreArgumentsObject(false);
- frame_->SpillAll();
- arguments_object.ToRegister();
- frame_->EmitPush(arguments_object.reg());
- arguments_object.Unuse();
- // Stack and frame now have 4 elements.
- __ bind(&slow);
- }
-
- // Generic computation of x.apply(y, args) with no special optimization.
- // Flip applicand.apply and applicand on the stack, so
- // applicand looks like the receiver of the applicand.apply call.
- // Then process it as a normal function call.
- __ movq(rax, Operand(rsp, 3 * kPointerSize));
- __ movq(rbx, Operand(rsp, 2 * kPointerSize));
- __ movq(Operand(rsp, 2 * kPointerSize), rax);
- __ movq(Operand(rsp, 3 * kPointerSize), rbx);
-
- CallFunctionStub call_function(2, NOT_IN_LOOP, NO_CALL_FUNCTION_FLAGS);
- Result res = frame_->CallStub(&call_function, 3);
- // The function and its two arguments have been dropped.
- frame_->Drop(1); // Drop the receiver as well.
- res.ToRegister();
- frame_->EmitPush(res.reg());
- // Stack now has 1 element:
- // rsp[0]: result
- if (try_lazy) __ bind(&done);
- } // End of spilled scope.
- // Restore the context register after a call.
- frame_->RestoreContextRegister();
-}
-
-
-class DeferredStackCheck: public DeferredCode {
- public:
- DeferredStackCheck() {
- set_comment("[ DeferredStackCheck");
- }
-
- virtual void Generate();
-};
-
-
-void DeferredStackCheck::Generate() {
- StackCheckStub stub;
- __ CallStub(&stub);
-}
-
-
-void CodeGenerator::CheckStack() {
- DeferredStackCheck* deferred = new DeferredStackCheck;
- __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
- deferred->Branch(below);
- deferred->BindExit();
-}
-
-
-void CodeGenerator::VisitAndSpill(Statement* statement) {
- ASSERT(in_spilled_code());
- set_in_spilled_code(false);
- Visit(statement);
- if (frame_ != NULL) {
- frame_->SpillAll();
- }
- set_in_spilled_code(true);
-}
-
-
-void CodeGenerator::VisitStatementsAndSpill(ZoneList<Statement*>* statements) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- ASSERT(in_spilled_code());
- set_in_spilled_code(false);
- VisitStatements(statements);
- if (frame_ != NULL) {
- frame_->SpillAll();
- }
- set_in_spilled_code(true);
-
- ASSERT(!has_valid_frame() || frame_->height() == original_height);
-}
-
-
-void CodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- ASSERT(!in_spilled_code());
- for (int i = 0; has_valid_frame() && i < statements->length(); i++) {
- Visit(statements->at(i));
- }
- ASSERT(!has_valid_frame() || frame_->height() == original_height);
-}
-
-
-void CodeGenerator::VisitBlock(Block* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "[ Block");
- CodeForStatementPosition(node);
- node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
- VisitStatements(node->statements());
- if (node->break_target()->is_linked()) {
- node->break_target()->Bind();
- }
- node->break_target()->Unuse();
-}
-
-
-void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
- // Call the runtime to declare the globals. The inevitable call
- // will sync frame elements to memory anyway, so we do it eagerly to
- // allow us to push the arguments directly into place.
- frame_->SyncRange(0, frame_->element_count() - 1);
-
- __ movq(kScratchRegister, pairs, RelocInfo::EMBEDDED_OBJECT);
- frame_->EmitPush(rsi); // The context is the first argument.
- frame_->EmitPush(kScratchRegister);
- frame_->EmitPush(Smi::FromInt(is_eval() ? 1 : 0));
- frame_->EmitPush(Smi::FromInt(strict_mode_flag()));
- Result ignored = frame_->CallRuntime(Runtime::kDeclareGlobals, 4);
- // Return value is ignored.
-}
-
-
-void CodeGenerator::VisitDeclaration(Declaration* node) {
- Comment cmnt(masm_, "[ Declaration");
- Variable* var = node->proxy()->var();
- ASSERT(var != NULL); // must have been resolved
- Slot* slot = var->AsSlot();
-
- // If it was not possible to allocate the variable at compile time,
- // we need to "declare" it at runtime to make sure it actually
- // exists in the local context.
- if (slot != NULL && slot->type() == Slot::LOOKUP) {
- // Variables with a "LOOKUP" slot were introduced as non-locals
- // during variable resolution and must have mode DYNAMIC.
- ASSERT(var->is_dynamic());
- // For now, just do a runtime call. Sync the virtual frame eagerly
- // so we can simply push the arguments into place.
- frame_->SyncRange(0, frame_->element_count() - 1);
- frame_->EmitPush(rsi);
- __ movq(kScratchRegister, var->name(), RelocInfo::EMBEDDED_OBJECT);
- frame_->EmitPush(kScratchRegister);
- // Declaration nodes are always introduced in one of two modes.
- ASSERT(node->mode() == Variable::VAR || node->mode() == Variable::CONST);
- PropertyAttributes attr = node->mode() == Variable::VAR ? NONE : READ_ONLY;
- frame_->EmitPush(Smi::FromInt(attr));
- // Push initial value, if any.
- // Note: For variables we must not push an initial value (such as
- // 'undefined') because we may have a (legal) redeclaration and we
- // must not destroy the current value.
- if (node->mode() == Variable::CONST) {
- frame_->EmitPush(Heap::kTheHoleValueRootIndex);
- } else if (node->fun() != NULL) {
- Load(node->fun());
- } else {
- frame_->EmitPush(Smi::FromInt(0)); // no initial value!
- }
- Result ignored = frame_->CallRuntime(Runtime::kDeclareContextSlot, 4);
- // Ignore the return value (declarations are statements).
- return;
- }
-
- ASSERT(!var->is_global());
-
- // If we have a function or a constant, we need to initialize the variable.
- Expression* val = NULL;
- if (node->mode() == Variable::CONST) {
- val = new Literal(FACTORY->the_hole_value());
- } else {
- val = node->fun(); // NULL if we don't have a function
- }
-
- if (val != NULL) {
- {
- // Set the initial value.
- Reference target(this, node->proxy());
- Load(val);
- target.SetValue(NOT_CONST_INIT);
- // The reference is removed from the stack (preserving TOS) when
- // it goes out of scope.
- }
- // Get rid of the assigned value (declarations are statements).
- frame_->Drop();
- }
-}
-
-
-void CodeGenerator::VisitExpressionStatement(ExpressionStatement* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "[ ExpressionStatement");
- CodeForStatementPosition(node);
- Expression* expression = node->expression();
- expression->MarkAsStatement();
- Load(expression);
- // Remove the lingering expression result from the top of stack.
- frame_->Drop();
-}
-
-
-void CodeGenerator::VisitEmptyStatement(EmptyStatement* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "// EmptyStatement");
- CodeForStatementPosition(node);
- // nothing to do
-}
-
-
-void CodeGenerator::VisitIfStatement(IfStatement* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "[ IfStatement");
- // Generate different code depending on which parts of the if statement
- // are present or not.
- bool has_then_stm = node->HasThenStatement();
- bool has_else_stm = node->HasElseStatement();
-
- CodeForStatementPosition(node);
- JumpTarget exit;
- if (has_then_stm && has_else_stm) {
- JumpTarget then;
- JumpTarget else_;
- ControlDestination dest(&then, &else_, true);
- LoadCondition(node->condition(), &dest, true);
-
- if (dest.false_was_fall_through()) {
- // The else target was bound, so we compile the else part first.
- Visit(node->else_statement());
-
- // We may have dangling jumps to the then part.
- if (then.is_linked()) {
- if (has_valid_frame()) exit.Jump();
- then.Bind();
- Visit(node->then_statement());
- }
- } else {
- // The then target was bound, so we compile the then part first.
- Visit(node->then_statement());
-
- if (else_.is_linked()) {
- if (has_valid_frame()) exit.Jump();
- else_.Bind();
- Visit(node->else_statement());
- }
- }
-
- } else if (has_then_stm) {
- ASSERT(!has_else_stm);
- JumpTarget then;
- ControlDestination dest(&then, &exit, true);
- LoadCondition(node->condition(), &dest, true);
-
- if (dest.false_was_fall_through()) {
- // The exit label was bound. We may have dangling jumps to the
- // then part.
- if (then.is_linked()) {
- exit.Unuse();
- exit.Jump();
- then.Bind();
- Visit(node->then_statement());
- }
- } else {
- // The then label was bound.
- Visit(node->then_statement());
- }
-
- } else if (has_else_stm) {
- ASSERT(!has_then_stm);
- JumpTarget else_;
- ControlDestination dest(&exit, &else_, false);
- LoadCondition(node->condition(), &dest, true);
-
- if (dest.true_was_fall_through()) {
- // The exit label was bound. We may have dangling jumps to the
- // else part.
- if (else_.is_linked()) {
- exit.Unuse();
- exit.Jump();
- else_.Bind();
- Visit(node->else_statement());
- }
- } else {
- // The else label was bound.
- Visit(node->else_statement());
- }
-
- } else {
- ASSERT(!has_then_stm && !has_else_stm);
- // We only care about the condition's side effects (not its value
- // or control flow effect). LoadCondition is called without
- // forcing control flow.
- ControlDestination dest(&exit, &exit, true);
- LoadCondition(node->condition(), &dest, false);
- if (!dest.is_used()) {
- // We got a value on the frame rather than (or in addition to)
- // control flow.
- frame_->Drop();
- }
- }
-
- if (exit.is_linked()) {
- exit.Bind();
- }
-}
-
-
-void CodeGenerator::VisitContinueStatement(ContinueStatement* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "[ ContinueStatement");
- CodeForStatementPosition(node);
- node->target()->continue_target()->Jump();
-}
-
-
-void CodeGenerator::VisitBreakStatement(BreakStatement* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "[ BreakStatement");
- CodeForStatementPosition(node);
- node->target()->break_target()->Jump();
-}
-
-
-void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "[ ReturnStatement");
-
- CodeForStatementPosition(node);
- Load(node->expression());
- Result return_value = frame_->Pop();
- masm()->positions_recorder()->WriteRecordedPositions();
- if (function_return_is_shadowed_) {
- function_return_.Jump(&return_value);
- } else {
- frame_->PrepareForReturn();
- if (function_return_.is_bound()) {
- // If the function return label is already bound we reuse the
- // code by jumping to the return site.
- function_return_.Jump(&return_value);
- } else {
- function_return_.Bind(&return_value);
- GenerateReturnSequence(&return_value);
- }
- }
-}
-
-
-void CodeGenerator::GenerateReturnSequence(Result* return_value) {
- // The return value is a live (but not currently reference counted)
- // reference to rax. This is safe because the current frame does not
- // contain a reference to rax (it is prepared for the return by spilling
- // all registers).
- if (FLAG_trace) {
- frame_->Push(return_value);
- *return_value = frame_->CallRuntime(Runtime::kTraceExit, 1);
- }
- return_value->ToRegister(rax);
-
- // Add a label for checking the size of the code used for returning.
-#ifdef DEBUG
- Label check_exit_codesize;
- masm_->bind(&check_exit_codesize);
-#endif
-
- // Leave the frame and return popping the arguments and the
- // receiver.
- frame_->Exit();
- int arguments_bytes = (scope()->num_parameters() + 1) * kPointerSize;
- __ Ret(arguments_bytes, rcx);
- DeleteFrame();
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Add padding that will be overwritten by a debugger breakpoint.
- // The shortest return sequence generated is "movq rsp, rbp; pop rbp; ret k"
- // with length 7 (3 + 1 + 3).
- const int kPadding = Assembler::kJSReturnSequenceLength - 7;
- for (int i = 0; i < kPadding; ++i) {
- masm_->int3();
- }
- // Check that the size of the code used for returning is large enough
- // for the debugger's requirements.
- ASSERT(Assembler::kJSReturnSequenceLength <=
- masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
-#endif
-}
-
-
-void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "[ WithEnterStatement");
- CodeForStatementPosition(node);
- Load(node->expression());
- Result context;
- if (node->is_catch_block()) {
- context = frame_->CallRuntime(Runtime::kPushCatchContext, 1);
- } else {
- context = frame_->CallRuntime(Runtime::kPushContext, 1);
- }
-
- // Update context local.
- frame_->SaveContextRegister();
-
- // Verify that the runtime call result and rsi agree.
- if (FLAG_debug_code) {
- __ cmpq(context.reg(), rsi);
- __ Assert(equal, "Runtime::NewContext should end up in rsi");
- }
-}
-
-
-void CodeGenerator::VisitWithExitStatement(WithExitStatement* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "[ WithExitStatement");
- CodeForStatementPosition(node);
- // Pop context.
- __ movq(rsi, ContextOperand(rsi, Context::PREVIOUS_INDEX));
- // Update context local.
- frame_->SaveContextRegister();
-}
-
-
-void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "[ SwitchStatement");
- CodeForStatementPosition(node);
- node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
-
- // Compile the switch value.
- Load(node->tag());
-
- ZoneList<CaseClause*>* cases = node->cases();
- int length = cases->length();
- CaseClause* default_clause = NULL;
-
- JumpTarget next_test;
- // Compile the case label expressions and comparisons. Exit early
- // if a comparison is unconditionally true. The target next_test is
- // bound before the loop in order to indicate control flow to the
- // first comparison.
- next_test.Bind();
- for (int i = 0; i < length && !next_test.is_unused(); i++) {
- CaseClause* clause = cases->at(i);
- // The default is not a test, but remember it for later.
- if (clause->is_default()) {
- default_clause = clause;
- continue;
- }
-
- Comment cmnt(masm_, "[ Case comparison");
- // We recycle the same target next_test for each test. Bind it if
- // the previous test has not done so and then unuse it for the
- // loop.
- if (next_test.is_linked()) {
- next_test.Bind();
- }
- next_test.Unuse();
-
- // Duplicate the switch value.
- frame_->Dup();
-
- // Compile the label expression.
- Load(clause->label());
-
- // Compare and branch to the body if true or the next test if
- // false. Prefer the next test as a fall through.
- ControlDestination dest(clause->body_target(), &next_test, false);
- Comparison(node, equal, true, &dest);
-
- // If the comparison fell through to the true target, jump to the
- // actual body.
- if (dest.true_was_fall_through()) {
- clause->body_target()->Unuse();
- clause->body_target()->Jump();
- }
- }
-
- // If there was control flow to a next test from the last one
- // compiled, compile a jump to the default or break target.
- if (!next_test.is_unused()) {
- if (next_test.is_linked()) {
- next_test.Bind();
- }
- // Drop the switch value.
- frame_->Drop();
- if (default_clause != NULL) {
- default_clause->body_target()->Jump();
- } else {
- node->break_target()->Jump();
- }
- }
-
- // The last instruction emitted was a jump, either to the default
- // clause or the break target, or else to a case body from the loop
- // that compiles the tests.
- ASSERT(!has_valid_frame());
- // Compile case bodies as needed.
- for (int i = 0; i < length; i++) {
- CaseClause* clause = cases->at(i);
-
- // There are two ways to reach the body: from the corresponding
- // test or as the fall through of the previous body.
- if (clause->body_target()->is_linked() || has_valid_frame()) {
- if (clause->body_target()->is_linked()) {
- if (has_valid_frame()) {
- // If we have both a jump to the test and a fall through, put
- // a jump on the fall through path to avoid the dropping of
- // the switch value on the test path. The exception is the
- // default which has already had the switch value dropped.
- if (clause->is_default()) {
- clause->body_target()->Bind();
- } else {
- JumpTarget body;
- body.Jump();
- clause->body_target()->Bind();
- frame_->Drop();
- body.Bind();
- }
- } else {
- // No fall through to worry about.
- clause->body_target()->Bind();
- if (!clause->is_default()) {
- frame_->Drop();
- }
- }
- } else {
- // Otherwise, we have only fall through.
- ASSERT(has_valid_frame());
- }
-
- // We are now prepared to compile the body.
- Comment cmnt(masm_, "[ Case body");
- VisitStatements(clause->statements());
- }
- clause->body_target()->Unuse();
- }
-
- // We may not have a valid frame here so bind the break target only
- // if needed.
- if (node->break_target()->is_linked()) {
- node->break_target()->Bind();
- }
- node->break_target()->Unuse();
-}
-
-
-void CodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "[ DoWhileStatement");
- CodeForStatementPosition(node);
- node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
- JumpTarget body(JumpTarget::BIDIRECTIONAL);
- IncrementLoopNesting();
-
- ConditionAnalysis info = AnalyzeCondition(node->cond());
- // Label the top of the loop for the backward jump if necessary.
- switch (info) {
- case ALWAYS_TRUE:
- // Use the continue target.
- node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
- node->continue_target()->Bind();
- break;
- case ALWAYS_FALSE:
- // No need to label it.
- node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
- break;
- case DONT_KNOW:
- // Continue is the test, so use the backward body target.
- node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
- body.Bind();
- break;
- }
-
- CheckStack(); // TODO(1222600): ignore if body contains calls.
- Visit(node->body());
-
- // Compile the test.
- switch (info) {
- case ALWAYS_TRUE:
- // If control flow can fall off the end of the body, jump back
- // to the top and bind the break target at the exit.
- if (has_valid_frame()) {
- node->continue_target()->Jump();
- }
- if (node->break_target()->is_linked()) {
- node->break_target()->Bind();
- }
- break;
- case ALWAYS_FALSE:
- // We may have had continues or breaks in the body.
- if (node->continue_target()->is_linked()) {
- node->continue_target()->Bind();
- }
- if (node->break_target()->is_linked()) {
- node->break_target()->Bind();
- }
- break;
- case DONT_KNOW:
- // We have to compile the test expression if it can be reached by
- // control flow falling out of the body or via continue.
- if (node->continue_target()->is_linked()) {
- node->continue_target()->Bind();
- }
- if (has_valid_frame()) {
- Comment cmnt(masm_, "[ DoWhileCondition");
- CodeForDoWhileConditionPosition(node);
- ControlDestination dest(&body, node->break_target(), false);
- LoadCondition(node->cond(), &dest, true);
- }
- if (node->break_target()->is_linked()) {
- node->break_target()->Bind();
- }
- break;
- }
-
- DecrementLoopNesting();
- node->continue_target()->Unuse();
- node->break_target()->Unuse();
-}
-
-
-void CodeGenerator::VisitWhileStatement(WhileStatement* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "[ WhileStatement");
- CodeForStatementPosition(node);
-
- // If the condition is always false and has no side effects, we do not
- // need to compile anything.
- ConditionAnalysis info = AnalyzeCondition(node->cond());
- if (info == ALWAYS_FALSE) return;
-
- // Do not duplicate conditions that may have function literal
- // subexpressions. This can cause us to compile the function literal
- // twice.
- bool test_at_bottom = !node->may_have_function_literal();
- node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
- IncrementLoopNesting();
- JumpTarget body;
- if (test_at_bottom) {
- body.set_direction(JumpTarget::BIDIRECTIONAL);
- }
-
- // Based on the condition analysis, compile the test as necessary.
- switch (info) {
- case ALWAYS_TRUE:
- // We will not compile the test expression. Label the top of the
- // loop with the continue target.
- node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
- node->continue_target()->Bind();
- break;
- case DONT_KNOW: {
- if (test_at_bottom) {
- // Continue is the test at the bottom, no need to label the test
- // at the top. The body is a backward target.
- node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
- } else {
- // Label the test at the top as the continue target. The body
- // is a forward-only target.
- node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
- node->continue_target()->Bind();
- }
- // Compile the test with the body as the true target and preferred
- // fall-through and with the break target as the false target.
- ControlDestination dest(&body, node->break_target(), true);
- LoadCondition(node->cond(), &dest, true);
-
- if (dest.false_was_fall_through()) {
- // If we got the break target as fall-through, the test may have
- // been unconditionally false (if there are no jumps to the
- // body).
- if (!body.is_linked()) {
- DecrementLoopNesting();
- return;
- }
-
- // Otherwise, jump around the body on the fall through and then
- // bind the body target.
- node->break_target()->Unuse();
- node->break_target()->Jump();
- body.Bind();
- }
- break;
- }
- case ALWAYS_FALSE:
- UNREACHABLE();
- break;
- }
-
- CheckStack(); // TODO(1222600): ignore if body contains calls.
- Visit(node->body());
-
- // Based on the condition analysis, compile the backward jump as
- // necessary.
- switch (info) {
- case ALWAYS_TRUE:
- // The loop body has been labeled with the continue target.
- if (has_valid_frame()) {
- node->continue_target()->Jump();
- }
- break;
- case DONT_KNOW:
- if (test_at_bottom) {
- // If we have chosen to recompile the test at the bottom,
- // then it is the continue target.
- if (node->continue_target()->is_linked()) {
- node->continue_target()->Bind();
- }
- if (has_valid_frame()) {
- // The break target is the fall-through (body is a backward
- // jump from here and thus an invalid fall-through).
- ControlDestination dest(&body, node->break_target(), false);
- LoadCondition(node->cond(), &dest, true);
- }
- } else {
- // If we have chosen not to recompile the test at the bottom,
- // jump back to the one at the top.
- if (has_valid_frame()) {
- node->continue_target()->Jump();
- }
- }
- break;
- case ALWAYS_FALSE:
- UNREACHABLE();
- break;
- }
-
- // The break target may be already bound (by the condition), or there
- // may not be a valid frame. Bind it only if needed.
- if (node->break_target()->is_linked()) {
- node->break_target()->Bind();
- }
- DecrementLoopNesting();
-}
-
-
-void CodeGenerator::SetTypeForStackSlot(Slot* slot, TypeInfo info) {
- ASSERT(slot->type() == Slot::LOCAL || slot->type() == Slot::PARAMETER);
- if (slot->type() == Slot::LOCAL) {
- frame_->SetTypeForLocalAt(slot->index(), info);
- } else {
- frame_->SetTypeForParamAt(slot->index(), info);
- }
- if (FLAG_debug_code && info.IsSmi()) {
- if (slot->type() == Slot::LOCAL) {
- frame_->PushLocalAt(slot->index());
- } else {
- frame_->PushParameterAt(slot->index());
- }
- Result var = frame_->Pop();
- var.ToRegister();
- __ AbortIfNotSmi(var.reg());
- }
-}
-
-
-void CodeGenerator::GenerateFastSmiLoop(ForStatement* node) {
- // A fast smi loop is a for loop with an initializer
- // that is a simple assignment of a smi to a stack variable,
- // a test that is a simple test of that variable against a smi constant,
- // and a step that is a increment/decrement of the variable, and
- // where the variable isn't modified in the loop body.
- // This guarantees that the variable is always a smi.
-
- Variable* loop_var = node->loop_variable();
- Smi* initial_value = *Handle<Smi>::cast(node->init()
- ->StatementAsSimpleAssignment()->value()->AsLiteral()->handle());
- Smi* limit_value = *Handle<Smi>::cast(
- node->cond()->AsCompareOperation()->right()->AsLiteral()->handle());
- Token::Value compare_op =
- node->cond()->AsCompareOperation()->op();
- bool increments =
- node->next()->StatementAsCountOperation()->op() == Token::INC;
-
- // Check that the condition isn't initially false.
- bool initially_false = false;
- int initial_int_value = initial_value->value();
- int limit_int_value = limit_value->value();
- switch (compare_op) {
- case Token::LT:
- initially_false = initial_int_value >= limit_int_value;
- break;
- case Token::LTE:
- initially_false = initial_int_value > limit_int_value;
- break;
- case Token::GT:
- initially_false = initial_int_value <= limit_int_value;
- break;
- case Token::GTE:
- initially_false = initial_int_value < limit_int_value;
- break;
- default:
- UNREACHABLE();
- }
- if (initially_false) return;
-
- // Only check loop condition at the end.
-
- Visit(node->init());
-
- JumpTarget loop(JumpTarget::BIDIRECTIONAL);
- // Set type and stack height of BreakTargets.
- node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
- node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
-
- IncrementLoopNesting();
- loop.Bind();
-
- // Set number type of the loop variable to smi.
- CheckStack(); // TODO(1222600): ignore if body contains calls.
-
- SetTypeForStackSlot(loop_var->AsSlot(), TypeInfo::Smi());
- Visit(node->body());
-
- if (node->continue_target()->is_linked()) {
- node->continue_target()->Bind();
- }
-
- if (has_valid_frame()) {
- CodeForStatementPosition(node);
- Slot* loop_var_slot = loop_var->AsSlot();
- if (loop_var_slot->type() == Slot::LOCAL) {
- frame_->TakeLocalAt(loop_var_slot->index());
- } else {
- ASSERT(loop_var_slot->type() == Slot::PARAMETER);
- frame_->TakeParameterAt(loop_var_slot->index());
- }
- Result loop_var_result = frame_->Pop();
- if (!loop_var_result.is_register()) {
- loop_var_result.ToRegister();
- }
- Register loop_var_reg = loop_var_result.reg();
- frame_->Spill(loop_var_reg);
- if (increments) {
- __ SmiAddConstant(loop_var_reg,
- loop_var_reg,
- Smi::FromInt(1));
- } else {
- __ SmiSubConstant(loop_var_reg,
- loop_var_reg,
- Smi::FromInt(1));
- }
-
- frame_->Push(&loop_var_result);
- if (loop_var_slot->type() == Slot::LOCAL) {
- frame_->StoreToLocalAt(loop_var_slot->index());
- } else {
- ASSERT(loop_var_slot->type() == Slot::PARAMETER);
- frame_->StoreToParameterAt(loop_var_slot->index());
- }
- frame_->Drop();
-
- __ SmiCompare(loop_var_reg, limit_value);
- Condition condition;
- switch (compare_op) {
- case Token::LT:
- condition = less;
- break;
- case Token::LTE:
- condition = less_equal;
- break;
- case Token::GT:
- condition = greater;
- break;
- case Token::GTE:
- condition = greater_equal;
- break;
- default:
- condition = never;
- UNREACHABLE();
- }
- loop.Branch(condition);
- }
- if (node->break_target()->is_linked()) {
- node->break_target()->Bind();
- }
- DecrementLoopNesting();
-}
-
-
-void CodeGenerator::VisitForStatement(ForStatement* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "[ ForStatement");
- CodeForStatementPosition(node);
-
- if (node->is_fast_smi_loop()) {
- GenerateFastSmiLoop(node);
- return;
- }
-
- // Compile the init expression if present.
- if (node->init() != NULL) {
- Visit(node->init());
- }
-
- // If the condition is always false and has no side effects, we do not
- // need to compile anything else.
- ConditionAnalysis info = AnalyzeCondition(node->cond());
- if (info == ALWAYS_FALSE) return;
-
- // Do not duplicate conditions that may have function literal
- // subexpressions. This can cause us to compile the function literal
- // twice.
- bool test_at_bottom = !node->may_have_function_literal();
- node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
- IncrementLoopNesting();
-
- // Target for backward edge if no test at the bottom, otherwise
- // unused.
- JumpTarget loop(JumpTarget::BIDIRECTIONAL);
-
- // Target for backward edge if there is a test at the bottom,
- // otherwise used as target for test at the top.
- JumpTarget body;
- if (test_at_bottom) {
- body.set_direction(JumpTarget::BIDIRECTIONAL);
- }
-
- // Based on the condition analysis, compile the test as necessary.
- switch (info) {
- case ALWAYS_TRUE:
- // We will not compile the test expression. Label the top of the
- // loop.
- if (node->next() == NULL) {
- // Use the continue target if there is no update expression.
- node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
- node->continue_target()->Bind();
- } else {
- // Otherwise use the backward loop target.
- node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
- loop.Bind();
- }
- break;
- case DONT_KNOW: {
- if (test_at_bottom) {
- // Continue is either the update expression or the test at the
- // bottom, no need to label the test at the top.
- node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
- } else if (node->next() == NULL) {
- // We are not recompiling the test at the bottom and there is no
- // update expression.
- node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
- node->continue_target()->Bind();
- } else {
- // We are not recompiling the test at the bottom and there is an
- // update expression.
- node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
- loop.Bind();
- }
-
- // Compile the test with the body as the true target and preferred
- // fall-through and with the break target as the false target.
- ControlDestination dest(&body, node->break_target(), true);
- LoadCondition(node->cond(), &dest, true);
-
- if (dest.false_was_fall_through()) {
- // If we got the break target as fall-through, the test may have
- // been unconditionally false (if there are no jumps to the
- // body).
- if (!body.is_linked()) {
- DecrementLoopNesting();
- return;
- }
-
- // Otherwise, jump around the body on the fall through and then
- // bind the body target.
- node->break_target()->Unuse();
- node->break_target()->Jump();
- body.Bind();
- }
- break;
- }
- case ALWAYS_FALSE:
- UNREACHABLE();
- break;
- }
-
- CheckStack(); // TODO(1222600): ignore if body contains calls.
-
- Visit(node->body());
-
- // If there is an update expression, compile it if necessary.
- if (node->next() != NULL) {
- if (node->continue_target()->is_linked()) {
- node->continue_target()->Bind();
- }
-
- // Control can reach the update by falling out of the body or by a
- // continue.
- if (has_valid_frame()) {
- // Record the source position of the statement as this code which
- // is after the code for the body actually belongs to the loop
- // statement and not the body.
- CodeForStatementPosition(node);
- Visit(node->next());
- }
- }
-
- // Based on the condition analysis, compile the backward jump as
- // necessary.
- switch (info) {
- case ALWAYS_TRUE:
- if (has_valid_frame()) {
- if (node->next() == NULL) {
- node->continue_target()->Jump();
- } else {
- loop.Jump();
- }
- }
- break;
- case DONT_KNOW:
- if (test_at_bottom) {
- if (node->continue_target()->is_linked()) {
- // We can have dangling jumps to the continue target if there
- // was no update expression.
- node->continue_target()->Bind();
- }
- // Control can reach the test at the bottom by falling out of
- // the body, by a continue in the body, or from the update
- // expression.
- if (has_valid_frame()) {
- // The break target is the fall-through (body is a backward
- // jump from here).
- ControlDestination dest(&body, node->break_target(), false);
- LoadCondition(node->cond(), &dest, true);
- }
- } else {
- // Otherwise, jump back to the test at the top.
- if (has_valid_frame()) {
- if (node->next() == NULL) {
- node->continue_target()->Jump();
- } else {
- loop.Jump();
- }
- }
- }
- break;
- case ALWAYS_FALSE:
- UNREACHABLE();
- break;
- }
-
- // The break target may be already bound (by the condition), or there
- // may not be a valid frame. Bind it only if needed.
- if (node->break_target()->is_linked()) {
- node->break_target()->Bind();
- }
- DecrementLoopNesting();
-}
-
-
-void CodeGenerator::VisitForInStatement(ForInStatement* node) {
- ASSERT(!in_spilled_code());
- VirtualFrame::SpilledScope spilled_scope;
- Comment cmnt(masm_, "[ ForInStatement");
- CodeForStatementPosition(node);
-
- JumpTarget primitive;
- JumpTarget jsobject;
- JumpTarget fixed_array;
- JumpTarget entry(JumpTarget::BIDIRECTIONAL);
- JumpTarget end_del_check;
- JumpTarget exit;
-
- // Get the object to enumerate over (converted to JSObject).
- LoadAndSpill(node->enumerable());
-
- // Both SpiderMonkey and kjs ignore null and undefined in contrast
- // to the specification. 12.6.4 mandates a call to ToObject.
- frame_->EmitPop(rax);
-
- // rax: value to be iterated over
- __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
- exit.Branch(equal);
- __ CompareRoot(rax, Heap::kNullValueRootIndex);
- exit.Branch(equal);
-
- // Stack layout in body:
- // [iteration counter (smi)] <- slot 0
- // [length of array] <- slot 1
- // [FixedArray] <- slot 2
- // [Map or 0] <- slot 3
- // [Object] <- slot 4
-
- // Check if enumerable is already a JSObject
- // rax: value to be iterated over
- Condition is_smi = masm_->CheckSmi(rax);
- primitive.Branch(is_smi);
- __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
- jsobject.Branch(above_equal);
-
- primitive.Bind();
- frame_->EmitPush(rax);
- frame_->InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION, 1);
- // function call returns the value in rax, which is where we want it below
-
- jsobject.Bind();
- // Get the set of properties (as a FixedArray or Map).
- // rax: value to be iterated over
- frame_->EmitPush(rax); // Push the object being iterated over.
-
-
- // Check cache validity in generated code. This is a fast case for
- // the JSObject::IsSimpleEnum cache validity checks. If we cannot
- // guarantee cache validity, call the runtime system to check cache
- // validity or get the property names in a fixed array.
- JumpTarget call_runtime;
- JumpTarget loop(JumpTarget::BIDIRECTIONAL);
- JumpTarget check_prototype;
- JumpTarget use_cache;
- __ movq(rcx, rax);
- loop.Bind();
- // Check that there are no elements.
- __ movq(rdx, FieldOperand(rcx, JSObject::kElementsOffset));
- __ CompareRoot(rdx, Heap::kEmptyFixedArrayRootIndex);
- call_runtime.Branch(not_equal);
- // Check that instance descriptors are not empty so that we can
- // check for an enum cache. Leave the map in ebx for the subsequent
- // prototype load.
- __ movq(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
- __ movq(rdx, FieldOperand(rbx, Map::kInstanceDescriptorsOffset));
- __ CompareRoot(rdx, Heap::kEmptyDescriptorArrayRootIndex);
- call_runtime.Branch(equal);
- // Check that there in an enum cache in the non-empty instance
- // descriptors. This is the case if the next enumeration index
- // field does not contain a smi.
- __ movq(rdx, FieldOperand(rdx, DescriptorArray::kEnumerationIndexOffset));
- is_smi = masm_->CheckSmi(rdx);
- call_runtime.Branch(is_smi);
- // For all objects but the receiver, check that the cache is empty.
- __ cmpq(rcx, rax);
- check_prototype.Branch(equal);
- __ movq(rdx, FieldOperand(rdx, DescriptorArray::kEnumCacheBridgeCacheOffset));
- __ CompareRoot(rdx, Heap::kEmptyFixedArrayRootIndex);
- call_runtime.Branch(not_equal);
- check_prototype.Bind();
- // Load the prototype from the map and loop if non-null.
- __ movq(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
- __ CompareRoot(rcx, Heap::kNullValueRootIndex);
- loop.Branch(not_equal);
- // The enum cache is valid. Load the map of the object being
- // iterated over and use the cache for the iteration.
- __ movq(rax, FieldOperand(rax, HeapObject::kMapOffset));
- use_cache.Jump();
-
- call_runtime.Bind();
- // Call the runtime to get the property names for the object.
- frame_->EmitPush(rax); // push the Object (slot 4) for the runtime call
- frame_->CallRuntime(Runtime::kGetPropertyNamesFast, 1);
-
- // If we got a Map, we can do a fast modification check.
- // Otherwise, we got a FixedArray, and we have to do a slow check.
- // rax: map or fixed array (result from call to
- // Runtime::kGetPropertyNamesFast)
- __ movq(rdx, rax);
- __ movq(rcx, FieldOperand(rdx, HeapObject::kMapOffset));
- __ CompareRoot(rcx, Heap::kMetaMapRootIndex);
- fixed_array.Branch(not_equal);
-
- use_cache.Bind();
- // Get enum cache
- // rax: map (either the result from a call to
- // Runtime::kGetPropertyNamesFast or has been fetched directly from
- // the object)
- __ movq(rcx, rax);
- __ movq(rcx, FieldOperand(rcx, Map::kInstanceDescriptorsOffset));
- // Get the bridge array held in the enumeration index field.
- __ movq(rcx, FieldOperand(rcx, DescriptorArray::kEnumerationIndexOffset));
- // Get the cache from the bridge array.
- __ movq(rdx, FieldOperand(rcx, DescriptorArray::kEnumCacheBridgeCacheOffset));
-
- frame_->EmitPush(rax); // <- slot 3
- frame_->EmitPush(rdx); // <- slot 2
- __ movq(rax, FieldOperand(rdx, FixedArray::kLengthOffset));
- frame_->EmitPush(rax); // <- slot 1
- frame_->EmitPush(Smi::FromInt(0)); // <- slot 0
- entry.Jump();
-
- fixed_array.Bind();
- // rax: fixed array (result from call to Runtime::kGetPropertyNamesFast)
- frame_->EmitPush(Smi::FromInt(0)); // <- slot 3
- frame_->EmitPush(rax); // <- slot 2
-
- // Push the length of the array and the initial index onto the stack.
- __ movq(rax, FieldOperand(rax, FixedArray::kLengthOffset));
- frame_->EmitPush(rax); // <- slot 1
- frame_->EmitPush(Smi::FromInt(0)); // <- slot 0
-
- // Condition.
- entry.Bind();
- // Grab the current frame's height for the break and continue
- // targets only after all the state is pushed on the frame.
- node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
- node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
-
- __ movq(rax, frame_->ElementAt(0)); // load the current count
- __ SmiCompare(frame_->ElementAt(1), rax); // compare to the array length
- node->break_target()->Branch(below_equal);
-
- // Get the i'th entry of the array.
- __ movq(rdx, frame_->ElementAt(2));
- SmiIndex index = masm_->SmiToIndex(rbx, rax, kPointerSizeLog2);
- __ movq(rbx,
- FieldOperand(rdx, index.reg, index.scale, FixedArray::kHeaderSize));
-
- // Get the expected map from the stack or a zero map in the
- // permanent slow case rax: current iteration count rbx: i'th entry
- // of the enum cache
- __ movq(rdx, frame_->ElementAt(3));
- // Check if the expected map still matches that of the enumerable.
- // If not, we have to filter the key.
- // rax: current iteration count
- // rbx: i'th entry of the enum cache
- // rdx: expected map value
- __ movq(rcx, frame_->ElementAt(4));
- __ movq(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
- __ cmpq(rcx, rdx);
- end_del_check.Branch(equal);
-
- // Convert the entry to a string (or null if it isn't a property anymore).
- frame_->EmitPush(frame_->ElementAt(4)); // push enumerable
- frame_->EmitPush(rbx); // push entry
- frame_->InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION, 2);
- __ movq(rbx, rax);
-
- // If the property has been removed while iterating, we just skip it.
- __ Cmp(rbx, Smi::FromInt(0));
- node->continue_target()->Branch(equal);
-
- end_del_check.Bind();
- // Store the entry in the 'each' expression and take another spin in the
- // loop. rdx: i'th entry of the enum cache (or string there of)
- frame_->EmitPush(rbx);
- { Reference each(this, node->each());
- // Loading a reference may leave the frame in an unspilled state.
- frame_->SpillAll();
- if (!each.is_illegal()) {
- if (each.size() > 0) {
- frame_->EmitPush(frame_->ElementAt(each.size()));
- each.SetValue(NOT_CONST_INIT);
- frame_->Drop(2); // Drop the original and the copy of the element.
- } else {
- // If the reference has size zero then we can use the value below
- // the reference as if it were above the reference, instead of pushing
- // a new copy of it above the reference.
- each.SetValue(NOT_CONST_INIT);
- frame_->Drop(); // Drop the original of the element.
- }
- }
- }
- // Unloading a reference may leave the frame in an unspilled state.
- frame_->SpillAll();
-
- // Body.
- CheckStack(); // TODO(1222600): ignore if body contains calls.
- VisitAndSpill(node->body());
-
- // Next. Reestablish a spilled frame in case we are coming here via
- // a continue in the body.
- node->continue_target()->Bind();
- frame_->SpillAll();
- frame_->EmitPop(rax);
- __ SmiAddConstant(rax, rax, Smi::FromInt(1));
- frame_->EmitPush(rax);
- entry.Jump();
-
- // Cleanup. No need to spill because VirtualFrame::Drop is safe for
- // any frame.
- node->break_target()->Bind();
- frame_->Drop(5);
-
- // Exit.
- exit.Bind();
-
- node->continue_target()->Unuse();
- node->break_target()->Unuse();
-}
-
-
-void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) {
- ASSERT(!in_spilled_code());
- VirtualFrame::SpilledScope spilled_scope;
- Comment cmnt(masm_, "[ TryCatchStatement");
- CodeForStatementPosition(node);
-
- JumpTarget try_block;
- JumpTarget exit;
-
- try_block.Call();
- // --- Catch block ---
- frame_->EmitPush(rax);
-
- // Store the caught exception in the catch variable.
- Variable* catch_var = node->catch_var()->var();
- ASSERT(catch_var != NULL && catch_var->AsSlot() != NULL);
- StoreToSlot(catch_var->AsSlot(), NOT_CONST_INIT);
-
- // Remove the exception from the stack.
- frame_->Drop();
-
- VisitStatementsAndSpill(node->catch_block()->statements());
- if (has_valid_frame()) {
- exit.Jump();
- }
-
-
- // --- Try block ---
- try_block.Bind();
-
- frame_->PushTryHandler(TRY_CATCH_HANDLER);
- int handler_height = frame_->height();
-
- // Shadow the jump targets for all escapes from the try block, including
- // returns. During shadowing, the original target is hidden as the
- // ShadowTarget and operations on the original actually affect the
- // shadowing target.
- //
- // We should probably try to unify the escaping targets and the return
- // target.
- int nof_escapes = node->escaping_targets()->length();
- List<ShadowTarget*> shadows(1 + nof_escapes);
-
- // Add the shadow target for the function return.
- static const int kReturnShadowIndex = 0;
- shadows.Add(new ShadowTarget(&function_return_));
- bool function_return_was_shadowed = function_return_is_shadowed_;
- function_return_is_shadowed_ = true;
- ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
-
- // Add the remaining shadow targets.
- for (int i = 0; i < nof_escapes; i++) {
- shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
- }
-
- // Generate code for the statements in the try block.
- VisitStatementsAndSpill(node->try_block()->statements());
-
- // Stop the introduced shadowing and count the number of required unlinks.
- // After shadowing stops, the original targets are unshadowed and the
- // ShadowTargets represent the formerly shadowing targets.
- bool has_unlinks = false;
- for (int i = 0; i < shadows.length(); i++) {
- shadows[i]->StopShadowing();
- has_unlinks = has_unlinks || shadows[i]->is_linked();
- }
- function_return_is_shadowed_ = function_return_was_shadowed;
-
- // Get an external reference to the handler address.
- ExternalReference handler_address(Isolate::k_handler_address, isolate());
-
- // Make sure that there's nothing left on the stack above the
- // handler structure.
- if (FLAG_debug_code) {
- __ movq(kScratchRegister, handler_address);
- __ cmpq(rsp, Operand(kScratchRegister, 0));
- __ Assert(equal, "stack pointer should point to top handler");
- }
-
- // If we can fall off the end of the try block, unlink from try chain.
- if (has_valid_frame()) {
- // The next handler address is on top of the frame. Unlink from
- // the handler list and drop the rest of this handler from the
- // frame.
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- __ movq(kScratchRegister, handler_address);
- frame_->EmitPop(Operand(kScratchRegister, 0));
- frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
- if (has_unlinks) {
- exit.Jump();
- }
- }
-
- // Generate unlink code for the (formerly) shadowing targets that
- // have been jumped to. Deallocate each shadow target.
- Result return_value;
- for (int i = 0; i < shadows.length(); i++) {
- if (shadows[i]->is_linked()) {
- // Unlink from try chain; be careful not to destroy the TOS if
- // there is one.
- if (i == kReturnShadowIndex) {
- shadows[i]->Bind(&return_value);
- return_value.ToRegister(rax);
- } else {
- shadows[i]->Bind();
- }
- // Because we can be jumping here (to spilled code) from
- // unspilled code, we need to reestablish a spilled frame at
- // this block.
- frame_->SpillAll();
-
- // Reload sp from the top handler, because some statements that we
- // break from (eg, for...in) may have left stuff on the stack.
- __ movq(kScratchRegister, handler_address);
- __ movq(rsp, Operand(kScratchRegister, 0));
- frame_->Forget(frame_->height() - handler_height);
-
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- __ movq(kScratchRegister, handler_address);
- frame_->EmitPop(Operand(kScratchRegister, 0));
- frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
-
- if (i == kReturnShadowIndex) {
- if (!function_return_is_shadowed_) frame_->PrepareForReturn();
- shadows[i]->other_target()->Jump(&return_value);
- } else {
- shadows[i]->other_target()->Jump();
- }
- }
- }
-
- exit.Bind();
-}
-
-
-void CodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* node) {
- ASSERT(!in_spilled_code());
- VirtualFrame::SpilledScope spilled_scope;
- Comment cmnt(masm_, "[ TryFinallyStatement");
- CodeForStatementPosition(node);
-
- // State: Used to keep track of reason for entering the finally
- // block. Should probably be extended to hold information for
- // break/continue from within the try block.
- enum { FALLING, THROWING, JUMPING };
-
- JumpTarget try_block;
- JumpTarget finally_block;
-
- try_block.Call();
-
- frame_->EmitPush(rax);
- // In case of thrown exceptions, this is where we continue.
- __ Move(rcx, Smi::FromInt(THROWING));
- finally_block.Jump();
-
- // --- Try block ---
- try_block.Bind();
-
- frame_->PushTryHandler(TRY_FINALLY_HANDLER);
- int handler_height = frame_->height();
-
- // Shadow the jump targets for all escapes from the try block, including
- // returns. During shadowing, the original target is hidden as the
- // ShadowTarget and operations on the original actually affect the
- // shadowing target.
- //
- // We should probably try to unify the escaping targets and the return
- // target.
- int nof_escapes = node->escaping_targets()->length();
- List<ShadowTarget*> shadows(1 + nof_escapes);
-
- // Add the shadow target for the function return.
- static const int kReturnShadowIndex = 0;
- shadows.Add(new ShadowTarget(&function_return_));
- bool function_return_was_shadowed = function_return_is_shadowed_;
- function_return_is_shadowed_ = true;
- ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
-
- // Add the remaining shadow targets.
- for (int i = 0; i < nof_escapes; i++) {
- shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
- }
-
- // Generate code for the statements in the try block.
- VisitStatementsAndSpill(node->try_block()->statements());
-
- // Stop the introduced shadowing and count the number of required unlinks.
- // After shadowing stops, the original targets are unshadowed and the
- // ShadowTargets represent the formerly shadowing targets.
- int nof_unlinks = 0;
- for (int i = 0; i < shadows.length(); i++) {
- shadows[i]->StopShadowing();
- if (shadows[i]->is_linked()) nof_unlinks++;
- }
- function_return_is_shadowed_ = function_return_was_shadowed;
-
- // Get an external reference to the handler address.
- ExternalReference handler_address(Isolate::k_handler_address, isolate());
-
- // If we can fall off the end of the try block, unlink from the try
- // chain and set the state on the frame to FALLING.
- if (has_valid_frame()) {
- // The next handler address is on top of the frame.
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- __ movq(kScratchRegister, handler_address);
- frame_->EmitPop(Operand(kScratchRegister, 0));
- frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
-
- // Fake a top of stack value (unneeded when FALLING) and set the
- // state in ecx, then jump around the unlink blocks if any.
- frame_->EmitPush(Heap::kUndefinedValueRootIndex);
- __ Move(rcx, Smi::FromInt(FALLING));
- if (nof_unlinks > 0) {
- finally_block.Jump();
- }
- }
-
- // Generate code to unlink and set the state for the (formerly)
- // shadowing targets that have been jumped to.
- for (int i = 0; i < shadows.length(); i++) {
- if (shadows[i]->is_linked()) {
- // If we have come from the shadowed return, the return value is
- // on the virtual frame. We must preserve it until it is
- // pushed.
- if (i == kReturnShadowIndex) {
- Result return_value;
- shadows[i]->Bind(&return_value);
- return_value.ToRegister(rax);
- } else {
- shadows[i]->Bind();
- }
- // Because we can be jumping here (to spilled code) from
- // unspilled code, we need to reestablish a spilled frame at
- // this block.
- frame_->SpillAll();
-
- // Reload sp from the top handler, because some statements that
- // we break from (eg, for...in) may have left stuff on the
- // stack.
- __ movq(kScratchRegister, handler_address);
- __ movq(rsp, Operand(kScratchRegister, 0));
- frame_->Forget(frame_->height() - handler_height);
-
- // Unlink this handler and drop it from the frame.
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- __ movq(kScratchRegister, handler_address);
- frame_->EmitPop(Operand(kScratchRegister, 0));
- frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
-
- if (i == kReturnShadowIndex) {
- // If this target shadowed the function return, materialize
- // the return value on the stack.
- frame_->EmitPush(rax);
- } else {
- // Fake TOS for targets that shadowed breaks and continues.
- frame_->EmitPush(Heap::kUndefinedValueRootIndex);
- }
- __ Move(rcx, Smi::FromInt(JUMPING + i));
- if (--nof_unlinks > 0) {
- // If this is not the last unlink block, jump around the next.
- finally_block.Jump();
- }
- }
- }
-
- // --- Finally block ---
- finally_block.Bind();
-
- // Push the state on the stack.
- frame_->EmitPush(rcx);
-
- // We keep two elements on the stack - the (possibly faked) result
- // and the state - while evaluating the finally block.
- //
- // Generate code for the statements in the finally block.
- VisitStatementsAndSpill(node->finally_block()->statements());
-
- if (has_valid_frame()) {
- // Restore state and return value or faked TOS.
- frame_->EmitPop(rcx);
- frame_->EmitPop(rax);
- }
-
- // Generate code to jump to the right destination for all used
- // formerly shadowing targets. Deallocate each shadow target.
- for (int i = 0; i < shadows.length(); i++) {
- if (has_valid_frame() && shadows[i]->is_bound()) {
- BreakTarget* original = shadows[i]->other_target();
- __ SmiCompare(rcx, Smi::FromInt(JUMPING + i));
- if (i == kReturnShadowIndex) {
- // The return value is (already) in rax.
- Result return_value = allocator_->Allocate(rax);
- ASSERT(return_value.is_valid());
- if (function_return_is_shadowed_) {
- original->Branch(equal, &return_value);
- } else {
- // Branch around the preparation for return which may emit
- // code.
- JumpTarget skip;
- skip.Branch(not_equal);
- frame_->PrepareForReturn();
- original->Jump(&return_value);
- skip.Bind();
- }
- } else {
- original->Branch(equal);
- }
- }
- }
-
- if (has_valid_frame()) {
- // Check if we need to rethrow the exception.
- JumpTarget exit;
- __ SmiCompare(rcx, Smi::FromInt(THROWING));
- exit.Branch(not_equal);
-
- // Rethrow exception.
- frame_->EmitPush(rax); // undo pop from above
- frame_->CallRuntime(Runtime::kReThrow, 1);
-
- // Done.
- exit.Bind();
- }
-}
-
-
-void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "[ DebuggerStatement");
- CodeForStatementPosition(node);
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Spill everything, even constants, to the frame.
- frame_->SpillAll();
-
- frame_->DebugBreak();
- // Ignore the return value.
-#endif
-}
-
-
-void CodeGenerator::InstantiateFunction(
- Handle<SharedFunctionInfo> function_info,
- bool pretenure) {
- // The inevitable call will sync frame elements to memory anyway, so
- // we do it eagerly to allow us to push the arguments directly into
- // place.
- frame_->SyncRange(0, frame_->element_count() - 1);
-
- // Use the fast case closure allocation code that allocates in new
- // space for nested functions that don't need literals cloning.
- if (!pretenure &&
- scope()->is_function_scope() &&
- function_info->num_literals() == 0) {
- FastNewClosureStub stub(
- function_info->strict_mode() ? kStrictMode : kNonStrictMode);
- frame_->Push(function_info);
- Result answer = frame_->CallStub(&stub, 1);
- frame_->Push(&answer);
- } else {
- // Call the runtime to instantiate the function based on the
- // shared function info.
- frame_->EmitPush(rsi);
- frame_->EmitPush(function_info);
- frame_->EmitPush(pretenure
- ? FACTORY->true_value()
- : FACTORY->false_value());
- Result result = frame_->CallRuntime(Runtime::kNewClosure, 3);
- frame_->Push(&result);
- }
-}
-
-
-void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
- Comment cmnt(masm_, "[ FunctionLiteral");
-
- // Build the function info and instantiate it.
- Handle<SharedFunctionInfo> function_info =
- Compiler::BuildFunctionInfo(node, script());
- // Check for stack-overflow exception.
- if (function_info.is_null()) {
- SetStackOverflow();
- return;
- }
- InstantiateFunction(function_info, node->pretenure());
-}
-
-
-void CodeGenerator::VisitSharedFunctionInfoLiteral(
- SharedFunctionInfoLiteral* node) {
- Comment cmnt(masm_, "[ SharedFunctionInfoLiteral");
- InstantiateFunction(node->shared_function_info(), false);
-}
-
-
-void CodeGenerator::VisitConditional(Conditional* node) {
- Comment cmnt(masm_, "[ Conditional");
- JumpTarget then;
- JumpTarget else_;
- JumpTarget exit;
- ControlDestination dest(&then, &else_, true);
- LoadCondition(node->condition(), &dest, true);
-
- if (dest.false_was_fall_through()) {
- // The else target was bound, so we compile the else part first.
- Load(node->else_expression());
-
- if (then.is_linked()) {
- exit.Jump();
- then.Bind();
- Load(node->then_expression());
- }
- } else {
- // The then target was bound, so we compile the then part first.
- Load(node->then_expression());
-
- if (else_.is_linked()) {
- exit.Jump();
- else_.Bind();
- Load(node->else_expression());
- }
- }
-
- exit.Bind();
-}
-
-
-void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
- if (slot->type() == Slot::LOOKUP) {
- ASSERT(slot->var()->is_dynamic());
-
- JumpTarget slow;
- JumpTarget done;
- Result value;
-
- // Generate fast case for loading from slots that correspond to
- // local/global variables or arguments unless they are shadowed by
- // eval-introduced bindings.
- EmitDynamicLoadFromSlotFastCase(slot,
- typeof_state,
- &value,
- &slow,
- &done);
-
- slow.Bind();
- // A runtime call is inevitable. We eagerly sync frame elements
- // to memory so that we can push the arguments directly into place
- // on top of the frame.
- frame_->SyncRange(0, frame_->element_count() - 1);
- frame_->EmitPush(rsi);
- __ movq(kScratchRegister, slot->var()->name(), RelocInfo::EMBEDDED_OBJECT);
- frame_->EmitPush(kScratchRegister);
- if (typeof_state == INSIDE_TYPEOF) {
- value =
- frame_->CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
- } else {
- value = frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
- }
-
- done.Bind(&value);
- frame_->Push(&value);
-
- } else if (slot->var()->mode() == Variable::CONST) {
- // Const slots may contain 'the hole' value (the constant hasn't been
- // initialized yet) which needs to be converted into the 'undefined'
- // value.
- //
- // We currently spill the virtual frame because constants use the
- // potentially unsafe direct-frame access of SlotOperand.
- VirtualFrame::SpilledScope spilled_scope;
- Comment cmnt(masm_, "[ Load const");
- JumpTarget exit;
- __ movq(rcx, SlotOperand(slot, rcx));
- __ CompareRoot(rcx, Heap::kTheHoleValueRootIndex);
- exit.Branch(not_equal);
- __ LoadRoot(rcx, Heap::kUndefinedValueRootIndex);
- exit.Bind();
- frame_->EmitPush(rcx);
-
- } else if (slot->type() == Slot::PARAMETER) {
- frame_->PushParameterAt(slot->index());
-
- } else if (slot->type() == Slot::LOCAL) {
- frame_->PushLocalAt(slot->index());
-
- } else {
- // The other remaining slot types (LOOKUP and GLOBAL) cannot reach
- // here.
- //
- // The use of SlotOperand below is safe for an unspilled frame
- // because it will always be a context slot.
- ASSERT(slot->type() == Slot::CONTEXT);
- Result temp = allocator_->Allocate();
- ASSERT(temp.is_valid());
- __ movq(temp.reg(), SlotOperand(slot, temp.reg()));
- frame_->Push(&temp);
- }
-}
-
-
-void CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot,
- TypeofState state) {
- LoadFromSlot(slot, state);
-
- // Bail out quickly if we're not using lazy arguments allocation.
- if (ArgumentsMode() != LAZY_ARGUMENTS_ALLOCATION) return;
-
- // ... or if the slot isn't a non-parameter arguments slot.
- if (slot->type() == Slot::PARAMETER || !slot->is_arguments()) return;
-
- // Pop the loaded value from the stack.
- Result value = frame_->Pop();
-
- // If the loaded value is a constant, we know if the arguments
- // object has been lazily loaded yet.
- if (value.is_constant()) {
- if (value.handle()->IsArgumentsMarker()) {
- Result arguments = StoreArgumentsObject(false);
- frame_->Push(&arguments);
- } else {
- frame_->Push(&value);
- }
- return;
- }
-
- // The loaded value is in a register. If it is the sentinel that
- // indicates that we haven't loaded the arguments object yet, we
- // need to do it now.
- JumpTarget exit;
- __ CompareRoot(value.reg(), Heap::kArgumentsMarkerRootIndex);
- frame_->Push(&value);
- exit.Branch(not_equal);
- Result arguments = StoreArgumentsObject(false);
- frame_->SetElementAt(0, &arguments);
- exit.Bind();
-}
-
-
-Result CodeGenerator::LoadFromGlobalSlotCheckExtensions(
- Slot* slot,
- TypeofState typeof_state,
- JumpTarget* slow) {
- // Check that no extension objects have been created by calls to
- // eval from the current scope to the global scope.
- Register context = rsi;
- Result tmp = allocator_->Allocate();
- ASSERT(tmp.is_valid()); // All non-reserved registers were available.
-
- Scope* s = scope();
- while (s != NULL) {
- if (s->num_heap_slots() > 0) {
- if (s->calls_eval()) {
- // Check that extension is NULL.
- __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX),
- Immediate(0));
- slow->Branch(not_equal, not_taken);
- }
- // Load next context in chain.
- __ movq(tmp.reg(), ContextOperand(context, Context::CLOSURE_INDEX));
- __ movq(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
- context = tmp.reg();
- }
- // If no outer scope calls eval, we do not need to check more
- // context extensions. If we have reached an eval scope, we check
- // all extensions from this point.
- if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break;
- s = s->outer_scope();
- }
-
- if (s->is_eval_scope()) {
- // Loop up the context chain. There is no frame effect so it is
- // safe to use raw labels here.
- Label next, fast;
- if (!context.is(tmp.reg())) {
- __ movq(tmp.reg(), context);
- }
- // Load map for comparison into register, outside loop.
- __ LoadRoot(kScratchRegister, Heap::kGlobalContextMapRootIndex);
- __ bind(&next);
- // Terminate at global context.
- __ cmpq(kScratchRegister, FieldOperand(tmp.reg(), HeapObject::kMapOffset));
- __ j(equal, &fast);
- // Check that extension is NULL.
- __ cmpq(ContextOperand(tmp.reg(), Context::EXTENSION_INDEX), Immediate(0));
- slow->Branch(not_equal);
- // Load next context in chain.
- __ movq(tmp.reg(), ContextOperand(tmp.reg(), Context::CLOSURE_INDEX));
- __ movq(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
- __ jmp(&next);
- __ bind(&fast);
- }
- tmp.Unuse();
-
- // All extension objects were empty and it is safe to use a global
- // load IC call.
- LoadGlobal();
- frame_->Push(slot->var()->name());
- RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
- ? RelocInfo::CODE_TARGET
- : RelocInfo::CODE_TARGET_CONTEXT;
- Result answer = frame_->CallLoadIC(mode);
- // A test rax instruction following the call signals that the inobject
- // property case was inlined. Ensure that there is not a test rax
- // instruction here.
- masm_->nop();
- return answer;
-}
-
-
-void CodeGenerator::EmitDynamicLoadFromSlotFastCase(Slot* slot,
- TypeofState typeof_state,
- Result* result,
- JumpTarget* slow,
- JumpTarget* done) {
- // Generate fast-case code for variables that might be shadowed by
- // eval-introduced variables. Eval is used a lot without
- // introducing variables. In those cases, we do not want to
- // perform a runtime call for all variables in the scope
- // containing the eval.
- if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
- *result = LoadFromGlobalSlotCheckExtensions(slot, typeof_state, slow);
- done->Jump(result);
-
- } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
- Slot* potential_slot = slot->var()->local_if_not_shadowed()->AsSlot();
- Expression* rewrite = slot->var()->local_if_not_shadowed()->rewrite();
- if (potential_slot != NULL) {
- // Generate fast case for locals that rewrite to slots.
- // Allocate a fresh register to use as a temp in
- // ContextSlotOperandCheckExtensions and to hold the result
- // value.
- *result = allocator_->Allocate();
- ASSERT(result->is_valid());
- __ movq(result->reg(),
- ContextSlotOperandCheckExtensions(potential_slot,
- *result,
- slow));
- if (potential_slot->var()->mode() == Variable::CONST) {
- __ CompareRoot(result->reg(), Heap::kTheHoleValueRootIndex);
- done->Branch(not_equal, result);
- __ LoadRoot(result->reg(), Heap::kUndefinedValueRootIndex);
- }
- done->Jump(result);
- } else if (rewrite != NULL) {
- // Generate fast case for argument loads.
- Property* property = rewrite->AsProperty();
- if (property != NULL) {
- VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
- Literal* key_literal = property->key()->AsLiteral();
- if (obj_proxy != NULL &&
- key_literal != NULL &&
- obj_proxy->IsArguments() &&
- key_literal->handle()->IsSmi()) {
- // Load arguments object if there are no eval-introduced
- // variables. Then load the argument from the arguments
- // object using keyed load.
- Result arguments = allocator()->Allocate();
- ASSERT(arguments.is_valid());
- __ movq(arguments.reg(),
- ContextSlotOperandCheckExtensions(obj_proxy->var()->AsSlot(),
- arguments,
- slow));
- frame_->Push(&arguments);
- frame_->Push(key_literal->handle());
- *result = EmitKeyedLoad();
- done->Jump(result);
- }
- }
- }
- }
-}
-
-
-void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
- if (slot->type() == Slot::LOOKUP) {
- ASSERT(slot->var()->is_dynamic());
-
- // For now, just do a runtime call. Since the call is inevitable,
- // we eagerly sync the virtual frame so we can directly push the
- // arguments into place.
- frame_->SyncRange(0, frame_->element_count() - 1);
-
- frame_->EmitPush(rsi);
- frame_->EmitPush(slot->var()->name());
-
- Result value;
- if (init_state == CONST_INIT) {
- // Same as the case for a normal store, but ignores attribute
- // (e.g. READ_ONLY) of context slot so that we can initialize const
- // properties (introduced via eval("const foo = (some expr);")). Also,
- // uses the current function context instead of the top context.
- //
- // Note that we must declare the foo upon entry of eval(), via a
- // context slot declaration, but we cannot initialize it at the same
- // time, because the const declaration may be at the end of the eval
- // code (sigh...) and the const variable may have been used before
- // (where its value is 'undefined'). Thus, we can only do the
- // initialization when we actually encounter the expression and when
- // the expression operands are defined and valid, and thus we need the
- // split into 2 operations: declaration of the context slot followed
- // by initialization.
- value = frame_->CallRuntime(Runtime::kInitializeConstContextSlot, 3);
- } else {
- frame_->Push(Smi::FromInt(strict_mode_flag()));
- value = frame_->CallRuntime(Runtime::kStoreContextSlot, 4);
- }
- // Storing a variable must keep the (new) value on the expression
- // stack. This is necessary for compiling chained assignment
- // expressions.
- frame_->Push(&value);
- } else {
- ASSERT(!slot->var()->is_dynamic());
-
- JumpTarget exit;
- if (init_state == CONST_INIT) {
- ASSERT(slot->var()->mode() == Variable::CONST);
- // Only the first const initialization must be executed (the slot
- // still contains 'the hole' value). When the assignment is executed,
- // the code is identical to a normal store (see below).
- //
- // We spill the frame in the code below because the direct-frame
- // access of SlotOperand is potentially unsafe with an unspilled
- // frame.
- VirtualFrame::SpilledScope spilled_scope;
- Comment cmnt(masm_, "[ Init const");
- __ movq(rcx, SlotOperand(slot, rcx));
- __ CompareRoot(rcx, Heap::kTheHoleValueRootIndex);
- exit.Branch(not_equal);
- }
-
- // We must execute the store. Storing a variable must keep the (new)
- // value on the stack. This is necessary for compiling assignment
- // expressions.
- //
- // Note: We will reach here even with slot->var()->mode() ==
- // Variable::CONST because of const declarations which will initialize
- // consts to 'the hole' value and by doing so, end up calling this code.
- if (slot->type() == Slot::PARAMETER) {
- frame_->StoreToParameterAt(slot->index());
- } else if (slot->type() == Slot::LOCAL) {
- frame_->StoreToLocalAt(slot->index());
- } else {
- // The other slot types (LOOKUP and GLOBAL) cannot reach here.
- //
- // The use of SlotOperand below is safe for an unspilled frame
- // because the slot is a context slot.
- ASSERT(slot->type() == Slot::CONTEXT);
- frame_->Dup();
- Result value = frame_->Pop();
- value.ToRegister();
- Result start = allocator_->Allocate();
- ASSERT(start.is_valid());
- __ movq(SlotOperand(slot, start.reg()), value.reg());
- // RecordWrite may destroy the value registers.
- //
- // TODO(204): Avoid actually spilling when the value is not
- // needed (probably the common case).
- frame_->Spill(value.reg());
- int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
- Result temp = allocator_->Allocate();
- ASSERT(temp.is_valid());
- __ RecordWrite(start.reg(), offset, value.reg(), temp.reg());
- // The results start, value, and temp are unused by going out of
- // scope.
- }
-
- exit.Bind();
- }
-}
-
-
-void CodeGenerator::VisitSlot(Slot* node) {
- Comment cmnt(masm_, "[ Slot");
- LoadFromSlotCheckForArguments(node, NOT_INSIDE_TYPEOF);
-}
-
-
-void CodeGenerator::VisitVariableProxy(VariableProxy* node) {
- Comment cmnt(masm_, "[ VariableProxy");
- Variable* var = node->var();
- Expression* expr = var->rewrite();
- if (expr != NULL) {
- Visit(expr);
- } else {
- ASSERT(var->is_global());
- Reference ref(this, node);
- ref.GetValue();
- }
-}
-
-
-void CodeGenerator::VisitLiteral(Literal* node) {
- Comment cmnt(masm_, "[ Literal");
- frame_->Push(node->handle());
-}
-
-
-void CodeGenerator::LoadUnsafeSmi(Register target, Handle<Object> value) {
- UNIMPLEMENTED();
- // TODO(X64): Implement security policy for loads of smis.
-}
-
-
-bool CodeGenerator::IsUnsafeSmi(Handle<Object> value) {
- return false;
-}
-
-
-// Materialize the regexp literal 'node' in the literals array
-// 'literals' of the function. Leave the regexp boilerplate in
-// 'boilerplate'.
-class DeferredRegExpLiteral: public DeferredCode {
- public:
- DeferredRegExpLiteral(Register boilerplate,
- Register literals,
- RegExpLiteral* node)
- : boilerplate_(boilerplate), literals_(literals), node_(node) {
- set_comment("[ DeferredRegExpLiteral");
- }
-
- void Generate();
-
- private:
- Register boilerplate_;
- Register literals_;
- RegExpLiteral* node_;
-};
-
-
-void DeferredRegExpLiteral::Generate() {
- // Since the entry is undefined we call the runtime system to
- // compute the literal.
- // Literal array (0).
- __ push(literals_);
- // Literal index (1).
- __ Push(Smi::FromInt(node_->literal_index()));
- // RegExp pattern (2).
- __ Push(node_->pattern());
- // RegExp flags (3).
- __ Push(node_->flags());
- __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
- if (!boilerplate_.is(rax)) __ movq(boilerplate_, rax);
-}
-
-
-class DeferredAllocateInNewSpace: public DeferredCode {
- public:
- DeferredAllocateInNewSpace(int size,
- Register target,
- int registers_to_save = 0)
- : size_(size), target_(target), registers_to_save_(registers_to_save) {
- ASSERT(size >= kPointerSize && size <= HEAP->MaxObjectSizeInNewSpace());
- set_comment("[ DeferredAllocateInNewSpace");
- }
- void Generate();
-
- private:
- int size_;
- Register target_;
- int registers_to_save_;
-};
-
-
-void DeferredAllocateInNewSpace::Generate() {
- for (int i = 0; i < kNumRegs; i++) {
- if (registers_to_save_ & (1 << i)) {
- Register save_register = { i };
- __ push(save_register);
- }
- }
- __ Push(Smi::FromInt(size_));
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
- if (!target_.is(rax)) {
- __ movq(target_, rax);
- }
- for (int i = kNumRegs - 1; i >= 0; i--) {
- if (registers_to_save_ & (1 << i)) {
- Register save_register = { i };
- __ pop(save_register);
- }
- }
-}
-
-
-void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
- Comment cmnt(masm_, "[ RegExp Literal");
-
- // Retrieve the literals array and check the allocated entry. Begin
- // with a writable copy of the function of this activation in a
- // register.
- frame_->PushFunction();
- Result literals = frame_->Pop();
- literals.ToRegister();
- frame_->Spill(literals.reg());
-
- // Load the literals array of the function.
- __ movq(literals.reg(),
- FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
-
- // Load the literal at the ast saved index.
- Result boilerplate = allocator_->Allocate();
- ASSERT(boilerplate.is_valid());
- int literal_offset =
- FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
- __ movq(boilerplate.reg(), FieldOperand(literals.reg(), literal_offset));
-
- // Check whether we need to materialize the RegExp object. If so,
- // jump to the deferred code passing the literals array.
- DeferredRegExpLiteral* deferred =
- new DeferredRegExpLiteral(boilerplate.reg(), literals.reg(), node);
- __ CompareRoot(boilerplate.reg(), Heap::kUndefinedValueRootIndex);
- deferred->Branch(equal);
- deferred->BindExit();
-
- // Register of boilerplate contains RegExp object.
-
- Result tmp = allocator()->Allocate();
- ASSERT(tmp.is_valid());
-
- int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
-
- DeferredAllocateInNewSpace* allocate_fallback =
- new DeferredAllocateInNewSpace(size, literals.reg());
- frame_->Push(&boilerplate);
- frame_->SpillTop();
- __ AllocateInNewSpace(size,
- literals.reg(),
- tmp.reg(),
- no_reg,
- allocate_fallback->entry_label(),
- TAG_OBJECT);
- allocate_fallback->BindExit();
- boilerplate = frame_->Pop();
- // Copy from boilerplate to clone and return clone.
-
- for (int i = 0; i < size; i += kPointerSize) {
- __ movq(tmp.reg(), FieldOperand(boilerplate.reg(), i));
- __ movq(FieldOperand(literals.reg(), i), tmp.reg());
- }
- frame_->Push(&literals);
-}
-
-
-void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
- Comment cmnt(masm_, "[ ObjectLiteral");
-
- // Load a writable copy of the function of this activation in a
- // register.
- frame_->PushFunction();
- Result literals = frame_->Pop();
- literals.ToRegister();
- frame_->Spill(literals.reg());
-
- // Load the literals array of the function.
- __ movq(literals.reg(),
- FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
- // Literal array.
- frame_->Push(&literals);
- // Literal index.
- frame_->Push(Smi::FromInt(node->literal_index()));
- // Constant properties.
- frame_->Push(node->constant_properties());
- // Should the object literal have fast elements?
- frame_->Push(Smi::FromInt(node->fast_elements() ? 1 : 0));
- Result clone;
- if (node->depth() > 1) {
- clone = frame_->CallRuntime(Runtime::kCreateObjectLiteral, 4);
- } else {
- clone = frame_->CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
- }
- frame_->Push(&clone);
-
- // Mark all computed expressions that are bound to a key that
- // is shadowed by a later occurrence of the same key. For the
- // marked expressions, no store code is emitted.
- node->CalculateEmitStore();
-
- for (int i = 0; i < node->properties()->length(); i++) {
- ObjectLiteral::Property* property = node->properties()->at(i);
- switch (property->kind()) {
- case ObjectLiteral::Property::CONSTANT:
- break;
- case ObjectLiteral::Property::MATERIALIZED_LITERAL:
- if (CompileTimeValue::IsCompileTimeValue(property->value())) break;
- // else fall through.
- case ObjectLiteral::Property::COMPUTED: {
- Handle<Object> key(property->key()->handle());
- if (key->IsSymbol()) {
- // Duplicate the object as the IC receiver.
- frame_->Dup();
- Load(property->value());
- if (property->emit_store()) {
- Result ignored =
- frame_->CallStoreIC(Handle<String>::cast(key), false,
- strict_mode_flag());
- // A test rax instruction following the store IC call would
- // indicate the presence of an inlined version of the
- // store. Add a nop to indicate that there is no such
- // inlined version.
- __ nop();
- } else {
- frame_->Drop(2);
- }
- break;
- }
- // Fall through
- }
- case ObjectLiteral::Property::PROTOTYPE: {
- // Duplicate the object as an argument to the runtime call.
- frame_->Dup();
- Load(property->key());
- Load(property->value());
- if (property->emit_store()) {
- frame_->Push(Smi::FromInt(NONE)); // PropertyAttributes
- // Ignore the result.
- Result ignored = frame_->CallRuntime(Runtime::kSetProperty, 4);
- } else {
- frame_->Drop(3);
- }
- break;
- }
- case ObjectLiteral::Property::SETTER: {
- // Duplicate the object as an argument to the runtime call.
- frame_->Dup();
- Load(property->key());
- frame_->Push(Smi::FromInt(1));
- Load(property->value());
- Result ignored = frame_->CallRuntime(Runtime::kDefineAccessor, 4);
- // Ignore the result.
- break;
- }
- case ObjectLiteral::Property::GETTER: {
- // Duplicate the object as an argument to the runtime call.
- frame_->Dup();
- Load(property->key());
- frame_->Push(Smi::FromInt(0));
- Load(property->value());
- Result ignored = frame_->CallRuntime(Runtime::kDefineAccessor, 4);
- // Ignore the result.
- break;
- }
- default: UNREACHABLE();
- }
- }
-}
-
-
-void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
- Comment cmnt(masm_, "[ ArrayLiteral");
-
- // Load a writable copy of the function of this activation in a
- // register.
- frame_->PushFunction();
- Result literals = frame_->Pop();
- literals.ToRegister();
- frame_->Spill(literals.reg());
-
- // Load the literals array of the function.
- __ movq(literals.reg(),
- FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
-
- frame_->Push(&literals);
- frame_->Push(Smi::FromInt(node->literal_index()));
- frame_->Push(node->constant_elements());
- int length = node->values()->length();
- Result clone;
- if (node->constant_elements()->map() == HEAP->fixed_cow_array_map()) {
- FastCloneShallowArrayStub stub(
- FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length);
- clone = frame_->CallStub(&stub, 3);
- Counters* counters = masm()->isolate()->counters();
- __ IncrementCounter(counters->cow_arrays_created_stub(), 1);
- } else if (node->depth() > 1) {
- clone = frame_->CallRuntime(Runtime::kCreateArrayLiteral, 3);
- } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
- clone = frame_->CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
- } else {
- FastCloneShallowArrayStub stub(
- FastCloneShallowArrayStub::CLONE_ELEMENTS, length);
- clone = frame_->CallStub(&stub, 3);
- }
- frame_->Push(&clone);
-
- // Generate code to set the elements in the array that are not
- // literals.
- for (int i = 0; i < length; i++) {
- Expression* value = node->values()->at(i);
-
- if (!CompileTimeValue::ArrayLiteralElementNeedsInitialization(value)) {
- continue;
- }
-
- // The property must be set by generated code.
- Load(value);
-
- // Get the property value off the stack.
- Result prop_value = frame_->Pop();
- prop_value.ToRegister();
-
- // Fetch the array literal while leaving a copy on the stack and
- // use it to get the elements array.
- frame_->Dup();
- Result elements = frame_->Pop();
- elements.ToRegister();
- frame_->Spill(elements.reg());
- // Get the elements FixedArray.
- __ movq(elements.reg(),
- FieldOperand(elements.reg(), JSObject::kElementsOffset));
-
- // Write to the indexed properties array.
- int offset = i * kPointerSize + FixedArray::kHeaderSize;
- __ movq(FieldOperand(elements.reg(), offset), prop_value.reg());
-
- // Update the write barrier for the array address.
- frame_->Spill(prop_value.reg()); // Overwritten by the write barrier.
- Result scratch = allocator_->Allocate();
- ASSERT(scratch.is_valid());
- __ RecordWrite(elements.reg(), offset, prop_value.reg(), scratch.reg());
- }
-}
-
-
-void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) {
- ASSERT(!in_spilled_code());
- // Call runtime routine to allocate the catch extension object and
- // assign the exception value to the catch variable.
- Comment cmnt(masm_, "[ CatchExtensionObject");
- Load(node->key());
- Load(node->value());
- Result result =
- frame_->CallRuntime(Runtime::kCreateCatchExtensionObject, 2);
- frame_->Push(&result);
-}
-
-
-void CodeGenerator::EmitSlotAssignment(Assignment* node) {
-#ifdef DEBUG
- int original_height = frame()->height();
-#endif
- Comment cmnt(masm(), "[ Variable Assignment");
- Variable* var = node->target()->AsVariableProxy()->AsVariable();
- ASSERT(var != NULL);
- Slot* slot = var->AsSlot();
- ASSERT(slot != NULL);
-
- // Evaluate the right-hand side.
- if (node->is_compound()) {
- // For a compound assignment the right-hand side is a binary operation
- // between the current property value and the actual right-hand side.
- LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
- Load(node->value());
-
- // Perform the binary operation.
- bool overwrite_value = node->value()->ResultOverwriteAllowed();
- // Construct the implicit binary operation.
- BinaryOperation expr(node);
- GenericBinaryOperation(&expr,
- overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
- } else {
- // For non-compound assignment just load the right-hand side.
- Load(node->value());
- }
-
- // Perform the assignment.
- if (var->mode() != Variable::CONST || node->op() == Token::INIT_CONST) {
- CodeForSourcePosition(node->position());
- StoreToSlot(slot,
- node->op() == Token::INIT_CONST ? CONST_INIT : NOT_CONST_INIT);
- }
- ASSERT(frame()->height() == original_height + 1);
-}
-
-
-void CodeGenerator::EmitNamedPropertyAssignment(Assignment* node) {
-#ifdef DEBUG
- int original_height = frame()->height();
-#endif
- Comment cmnt(masm(), "[ Named Property Assignment");
- Variable* var = node->target()->AsVariableProxy()->AsVariable();
- Property* prop = node->target()->AsProperty();
- ASSERT(var == NULL || (prop == NULL && var->is_global()));
-
- // Initialize name and evaluate the receiver sub-expression if necessary. If
- // the receiver is trivial it is not placed on the stack at this point, but
- // loaded whenever actually needed.
- Handle<String> name;
- bool is_trivial_receiver = false;
- if (var != NULL) {
- name = var->name();
- } else {
- Literal* lit = prop->key()->AsLiteral();
- ASSERT_NOT_NULL(lit);
- name = Handle<String>::cast(lit->handle());
- // Do not materialize the receiver on the frame if it is trivial.
- is_trivial_receiver = prop->obj()->IsTrivial();
- if (!is_trivial_receiver) Load(prop->obj());
- }
-
- // Change to slow case in the beginning of an initialization block to
- // avoid the quadratic behavior of repeatedly adding fast properties.
- if (node->starts_initialization_block()) {
- // Initialization block consists of assignments of the form expr.x = ..., so
- // this will never be an assignment to a variable, so there must be a
- // receiver object.
- ASSERT_EQ(NULL, var);
- if (is_trivial_receiver) {
- frame()->Push(prop->obj());
- } else {
- frame()->Dup();
- }
- Result ignored = frame()->CallRuntime(Runtime::kToSlowProperties, 1);
- }
-
- // Change to fast case at the end of an initialization block. To prepare for
- // that add an extra copy of the receiver to the frame, so that it can be
- // converted back to fast case after the assignment.
- if (node->ends_initialization_block() && !is_trivial_receiver) {
- frame()->Dup();
- }
-
- // Stack layout:
- // [tos] : receiver (only materialized if non-trivial)
- // [tos+1] : receiver if at the end of an initialization block
-
- // Evaluate the right-hand side.
- if (node->is_compound()) {
- // For a compound assignment the right-hand side is a binary operation
- // between the current property value and the actual right-hand side.
- if (is_trivial_receiver) {
- frame()->Push(prop->obj());
- } else if (var != NULL) {
- // The LoadIC stub expects the object in rax.
- // Freeing rax causes the code generator to load the global into it.
- frame_->Spill(rax);
- LoadGlobal();
- } else {
- frame()->Dup();
- }
- Result value = EmitNamedLoad(name, var != NULL);
- frame()->Push(&value);
- Load(node->value());
-
- bool overwrite_value = node->value()->ResultOverwriteAllowed();
- // Construct the implicit binary operation.
- BinaryOperation expr(node);
- GenericBinaryOperation(&expr,
- overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
- } else {
- // For non-compound assignment just load the right-hand side.
- Load(node->value());
- }
-
- // Stack layout:
- // [tos] : value
- // [tos+1] : receiver (only materialized if non-trivial)
- // [tos+2] : receiver if at the end of an initialization block
-
- // Perform the assignment. It is safe to ignore constants here.
- ASSERT(var == NULL || var->mode() != Variable::CONST);
- ASSERT_NE(Token::INIT_CONST, node->op());
- if (is_trivial_receiver) {
- Result value = frame()->Pop();
- frame()->Push(prop->obj());
- frame()->Push(&value);
- }
- CodeForSourcePosition(node->position());
- bool is_contextual = (var != NULL);
- Result answer = EmitNamedStore(name, is_contextual);
- frame()->Push(&answer);
-
- // Stack layout:
- // [tos] : result
- // [tos+1] : receiver if at the end of an initialization block
-
- if (node->ends_initialization_block()) {
- ASSERT_EQ(NULL, var);
- // The argument to the runtime call is the receiver.
- if (is_trivial_receiver) {
- frame()->Push(prop->obj());
- } else {
- // A copy of the receiver is below the value of the assignment. Swap
- // the receiver and the value of the assignment expression.
- Result result = frame()->Pop();
- Result receiver = frame()->Pop();
- frame()->Push(&result);
- frame()->Push(&receiver);
- }
- Result ignored = frame_->CallRuntime(Runtime::kToFastProperties, 1);
- }
-
- // Stack layout:
- // [tos] : result
-
- ASSERT_EQ(frame()->height(), original_height + 1);
-}
-
-
-void CodeGenerator::EmitKeyedPropertyAssignment(Assignment* node) {
-#ifdef DEBUG
- int original_height = frame()->height();
-#endif
- Comment cmnt(masm_, "[ Keyed Property Assignment");
- Property* prop = node->target()->AsProperty();
- ASSERT_NOT_NULL(prop);
-
- // Evaluate the receiver subexpression.
- Load(prop->obj());
-
- // Change to slow case in the beginning of an initialization block to
- // avoid the quadratic behavior of repeatedly adding fast properties.
- if (node->starts_initialization_block()) {
- frame_->Dup();
- Result ignored = frame_->CallRuntime(Runtime::kToSlowProperties, 1);
- }
-
- // Change to fast case at the end of an initialization block. To prepare for
- // that add an extra copy of the receiver to the frame, so that it can be
- // converted back to fast case after the assignment.
- if (node->ends_initialization_block()) {
- frame_->Dup();
- }
-
- // Evaluate the key subexpression.
- Load(prop->key());
-
- // Stack layout:
- // [tos] : key
- // [tos+1] : receiver
- // [tos+2] : receiver if at the end of an initialization block
-
- // Evaluate the right-hand side.
- if (node->is_compound()) {
- // For a compound assignment the right-hand side is a binary operation
- // between the current property value and the actual right-hand side.
- // Duplicate receiver and key for loading the current property value.
- frame()->PushElementAt(1);
- frame()->PushElementAt(1);
- Result value = EmitKeyedLoad();
- frame()->Push(&value);
- Load(node->value());
-
- // Perform the binary operation.
- bool overwrite_value = node->value()->ResultOverwriteAllowed();
- BinaryOperation expr(node);
- GenericBinaryOperation(&expr,
- overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
- } else {
- // For non-compound assignment just load the right-hand side.
- Load(node->value());
- }
-
- // Stack layout:
- // [tos] : value
- // [tos+1] : key
- // [tos+2] : receiver
- // [tos+3] : receiver if at the end of an initialization block
-
- // Perform the assignment. It is safe to ignore constants here.
- ASSERT(node->op() != Token::INIT_CONST);
- CodeForSourcePosition(node->position());
- Result answer = EmitKeyedStore(prop->key()->type());
- frame()->Push(&answer);
-
- // Stack layout:
- // [tos] : result
- // [tos+1] : receiver if at the end of an initialization block
-
- // Change to fast case at the end of an initialization block.
- if (node->ends_initialization_block()) {
- // The argument to the runtime call is the extra copy of the receiver,
- // which is below the value of the assignment. Swap the receiver and
- // the value of the assignment expression.
- Result result = frame()->Pop();
- Result receiver = frame()->Pop();
- frame()->Push(&result);
- frame()->Push(&receiver);
- Result ignored = frame_->CallRuntime(Runtime::kToFastProperties, 1);
- }
-
- // Stack layout:
- // [tos] : result
-
- ASSERT(frame()->height() == original_height + 1);
-}
-
-
-void CodeGenerator::VisitAssignment(Assignment* node) {
-#ifdef DEBUG
- int original_height = frame()->height();
-#endif
- Variable* var = node->target()->AsVariableProxy()->AsVariable();
- Property* prop = node->target()->AsProperty();
-
- if (var != NULL && !var->is_global()) {
- EmitSlotAssignment(node);
-
- } else if ((prop != NULL && prop->key()->IsPropertyName()) ||
- (var != NULL && var->is_global())) {
- // Properties whose keys are property names and global variables are
- // treated as named property references. We do not need to consider
- // global 'this' because it is not a valid left-hand side.
- EmitNamedPropertyAssignment(node);
-
- } else if (prop != NULL) {
- // Other properties (including rewritten parameters for a function that
- // uses arguments) are keyed property assignments.
- EmitKeyedPropertyAssignment(node);
-
- } else {
- // Invalid left-hand side.
- Load(node->target());
- Result result = frame()->CallRuntime(Runtime::kThrowReferenceError, 1);
- // The runtime call doesn't actually return but the code generator will
- // still generate code and expects a certain frame height.
- frame()->Push(&result);
- }
-
- ASSERT(frame()->height() == original_height + 1);
-}
-
-
-void CodeGenerator::VisitThrow(Throw* node) {
- Comment cmnt(masm_, "[ Throw");
- Load(node->exception());
- Result result = frame_->CallRuntime(Runtime::kThrow, 1);
- frame_->Push(&result);
-}
-
-
-void CodeGenerator::VisitProperty(Property* node) {
- Comment cmnt(masm_, "[ Property");
- Reference property(this, node);
- property.GetValue();
-}
-
-
-void CodeGenerator::VisitCall(Call* node) {
- Comment cmnt(masm_, "[ Call");
-
- ZoneList<Expression*>* args = node->arguments();
-
- // Check if the function is a variable or a property.
- Expression* function = node->expression();
- Variable* var = function->AsVariableProxy()->AsVariable();
- Property* property = function->AsProperty();
-
- // ------------------------------------------------------------------------
- // Fast-case: Use inline caching.
- // ---
- // According to ECMA-262, section 11.2.3, page 44, the function to call
- // must be resolved after the arguments have been evaluated. The IC code
- // automatically handles this by loading the arguments before the function
- // is resolved in cache misses (this also holds for megamorphic calls).
- // ------------------------------------------------------------------------
-
- if (var != NULL && var->is_possibly_eval()) {
- // ----------------------------------
- // JavaScript example: 'eval(arg)' // eval is not known to be shadowed
- // ----------------------------------
-
- // In a call to eval, we first call %ResolvePossiblyDirectEval to
- // resolve the function we need to call and the receiver of the
- // call. Then we call the resolved function using the given
- // arguments.
-
- // Prepare the stack for the call to the resolved function.
- Load(function);
-
- // Allocate a frame slot for the receiver.
- frame_->Push(FACTORY->undefined_value());
-
- // Load the arguments.
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Load(args->at(i));
- frame_->SpillTop();
- }
-
- // Result to hold the result of the function resolution and the
- // final result of the eval call.
- Result result;
-
- // If we know that eval can only be shadowed by eval-introduced
- // variables we attempt to load the global eval function directly
- // in generated code. If we succeed, there is no need to perform a
- // context lookup in the runtime system.
- JumpTarget done;
- if (var->AsSlot() != NULL && var->mode() == Variable::DYNAMIC_GLOBAL) {
- ASSERT(var->AsSlot()->type() == Slot::LOOKUP);
- JumpTarget slow;
- // Prepare the stack for the call to
- // ResolvePossiblyDirectEvalNoLookup by pushing the loaded
- // function, the first argument to the eval call and the
- // receiver.
- Result fun = LoadFromGlobalSlotCheckExtensions(var->AsSlot(),
- NOT_INSIDE_TYPEOF,
- &slow);
- frame_->Push(&fun);
- if (arg_count > 0) {
- frame_->PushElementAt(arg_count);
- } else {
- frame_->Push(FACTORY->undefined_value());
- }
- frame_->PushParameterAt(-1);
-
- // Push the strict mode flag.
- frame_->Push(Smi::FromInt(strict_mode_flag()));
-
- // Resolve the call.
- result =
- frame_->CallRuntime(Runtime::kResolvePossiblyDirectEvalNoLookup, 4);
-
- done.Jump(&result);
- slow.Bind();
- }
-
- // Prepare the stack for the call to ResolvePossiblyDirectEval by
- // pushing the loaded function, the first argument to the eval
- // call and the receiver.
- frame_->PushElementAt(arg_count + 1);
- if (arg_count > 0) {
- frame_->PushElementAt(arg_count);
- } else {
- frame_->Push(FACTORY->undefined_value());
- }
- frame_->PushParameterAt(-1);
-
- // Push the strict mode flag.
- frame_->Push(Smi::FromInt(strict_mode_flag()));
-
- // Resolve the call.
- result = frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 4);
-
- // If we generated fast-case code bind the jump-target where fast
- // and slow case merge.
- if (done.is_linked()) done.Bind(&result);
-
- // The runtime call returns a pair of values in rax (function) and
- // rdx (receiver). Touch up the stack with the right values.
- Result receiver = allocator_->Allocate(rdx);
- frame_->SetElementAt(arg_count + 1, &result);
- frame_->SetElementAt(arg_count, &receiver);
- receiver.Unuse();
-
- // Call the function.
- CodeForSourcePosition(node->position());
- InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
- CallFunctionStub call_function(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
- result = frame_->CallStub(&call_function, arg_count + 1);
-
- // Restore the context and overwrite the function on the stack with
- // the result.
- frame_->RestoreContextRegister();
- frame_->SetElementAt(0, &result);
-
- } else if (var != NULL && !var->is_this() && var->is_global()) {
- // ----------------------------------
- // JavaScript example: 'foo(1, 2, 3)' // foo is global
- // ----------------------------------
-
- // Pass the global object as the receiver and let the IC stub
- // patch the stack to use the global proxy as 'this' in the
- // invoked function.
- LoadGlobal();
-
- // Load the arguments.
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Load(args->at(i));
- frame_->SpillTop();
- }
-
- // Push the name of the function on the frame.
- frame_->Push(var->name());
-
- // Call the IC initialization code.
- CodeForSourcePosition(node->position());
- Result result = frame_->CallCallIC(RelocInfo::CODE_TARGET_CONTEXT,
- arg_count,
- loop_nesting());
- frame_->RestoreContextRegister();
- // Replace the function on the stack with the result.
- frame_->Push(&result);
-
- } else if (var != NULL && var->AsSlot() != NULL &&
- var->AsSlot()->type() == Slot::LOOKUP) {
- // ----------------------------------
- // JavaScript examples:
- //
- // with (obj) foo(1, 2, 3) // foo may be in obj.
- //
- // function f() {};
- // function g() {
- // eval(...);
- // f(); // f could be in extension object.
- // }
- // ----------------------------------
-
- JumpTarget slow, done;
- Result function;
-
- // Generate fast case for loading functions from slots that
- // correspond to local/global variables or arguments unless they
- // are shadowed by eval-introduced bindings.
- EmitDynamicLoadFromSlotFastCase(var->AsSlot(),
- NOT_INSIDE_TYPEOF,
- &function,
- &slow,
- &done);
-
- slow.Bind();
- // Load the function from the context. Sync the frame so we can
- // push the arguments directly into place.
- frame_->SyncRange(0, frame_->element_count() - 1);
- frame_->EmitPush(rsi);
- frame_->EmitPush(var->name());
- frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
- // The runtime call returns a pair of values in rax and rdx. The
- // looked-up function is in rax and the receiver is in rdx. These
- // register references are not ref counted here. We spill them
- // eagerly since they are arguments to an inevitable call (and are
- // not sharable by the arguments).
- ASSERT(!allocator()->is_used(rax));
- frame_->EmitPush(rax);
-
- // Load the receiver.
- ASSERT(!allocator()->is_used(rdx));
- frame_->EmitPush(rdx);
-
- // If fast case code has been generated, emit code to push the
- // function and receiver and have the slow path jump around this
- // code.
- if (done.is_linked()) {
- JumpTarget call;
- call.Jump();
- done.Bind(&function);
- frame_->Push(&function);
- LoadGlobalReceiver();
- call.Bind();
- }
-
- // Call the function.
- CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
-
- } else if (property != NULL) {
- // Check if the key is a literal string.
- Literal* literal = property->key()->AsLiteral();
-
- if (literal != NULL && literal->handle()->IsSymbol()) {
- // ------------------------------------------------------------------
- // JavaScript example: 'object.foo(1, 2, 3)' or 'map["key"](1, 2, 3)'
- // ------------------------------------------------------------------
-
- Handle<String> name = Handle<String>::cast(literal->handle());
-
- if (ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION &&
- name->IsEqualTo(CStrVector("apply")) &&
- args->length() == 2 &&
- args->at(1)->AsVariableProxy() != NULL &&
- args->at(1)->AsVariableProxy()->IsArguments()) {
- // Use the optimized Function.prototype.apply that avoids
- // allocating lazily allocated arguments objects.
- CallApplyLazy(property->obj(),
- args->at(0),
- args->at(1)->AsVariableProxy(),
- node->position());
-
- } else {
- // Push the receiver onto the frame.
- Load(property->obj());
-
- // Load the arguments.
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Load(args->at(i));
- frame_->SpillTop();
- }
-
- // Push the name of the function onto the frame.
- frame_->Push(name);
-
- // Call the IC initialization code.
- CodeForSourcePosition(node->position());
- Result result = frame_->CallCallIC(RelocInfo::CODE_TARGET,
- arg_count,
- loop_nesting());
- frame_->RestoreContextRegister();
- frame_->Push(&result);
- }
-
- } else {
- // -------------------------------------------
- // JavaScript example: 'array[index](1, 2, 3)'
- // -------------------------------------------
-
- // Load the function to call from the property through a reference.
- if (property->is_synthetic()) {
- Reference ref(this, property, false);
- ref.GetValue();
- // Use global object as receiver.
- LoadGlobalReceiver();
- // Call the function.
- CallWithArguments(args, RECEIVER_MIGHT_BE_VALUE, node->position());
- } else {
- // Push the receiver onto the frame.
- Load(property->obj());
-
- // Load the name of the function.
- Load(property->key());
-
- // Swap the name of the function and the receiver on the stack to follow
- // the calling convention for call ICs.
- Result key = frame_->Pop();
- Result receiver = frame_->Pop();
- frame_->Push(&key);
- frame_->Push(&receiver);
- key.Unuse();
- receiver.Unuse();
-
- // Load the arguments.
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Load(args->at(i));
- frame_->SpillTop();
- }
-
- // Place the key on top of stack and call the IC initialization code.
- frame_->PushElementAt(arg_count + 1);
- CodeForSourcePosition(node->position());
- Result result = frame_->CallKeyedCallIC(RelocInfo::CODE_TARGET,
- arg_count,
- loop_nesting());
- frame_->Drop(); // Drop the key still on the stack.
- frame_->RestoreContextRegister();
- frame_->Push(&result);
- }
- }
- } else {
- // ----------------------------------
- // JavaScript example: 'foo(1, 2, 3)' // foo is not global
- // ----------------------------------
-
- // Load the function.
- Load(function);
-
- // Pass the global proxy as the receiver.
- LoadGlobalReceiver();
-
- // Call the function.
- CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
- }
-}
-
-
-void CodeGenerator::VisitCallNew(CallNew* node) {
- Comment cmnt(masm_, "[ CallNew");
-
- // According to ECMA-262, section 11.2.2, page 44, the function
- // expression in new calls must be evaluated before the
- // arguments. This is different from ordinary calls, where the
- // actual function to call is resolved after the arguments have been
- // evaluated.
-
- // Push constructor on the stack. If it's not a function it's used as
- // receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
- // ignored.
- Load(node->expression());
-
- // Push the arguments ("left-to-right") on the stack.
- ZoneList<Expression*>* args = node->arguments();
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Load(args->at(i));
- }
-
- // Call the construct call builtin that handles allocation and
- // constructor invocation.
- CodeForSourcePosition(node->position());
- Result result = frame_->CallConstructor(arg_count);
- frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Result value = frame_->Pop();
- value.ToRegister();
- ASSERT(value.is_valid());
- Condition is_smi = masm_->CheckSmi(value.reg());
- value.Unuse();
- destination()->Split(is_smi);
-}
-
-
-void CodeGenerator::GenerateLog(ZoneList<Expression*>* args) {
- // Conditionally generate a log call.
- // Args:
- // 0 (literal string): The type of logging (corresponds to the flags).
- // This is used to determine whether or not to generate the log call.
- // 1 (string): Format string. Access the string at argument index 2
- // with '%2s' (see Logger::LogRuntime for all the formats).
- // 2 (array): Arguments to the format string.
- ASSERT_EQ(args->length(), 3);
-#ifdef ENABLE_LOGGING_AND_PROFILING
- if (ShouldGenerateLog(args->at(0))) {
- Load(args->at(1));
- Load(args->at(2));
- frame_->CallRuntime(Runtime::kLog, 2);
- }
-#endif
- // Finally, we're expected to leave a value on the top of the stack.
- frame_->Push(FACTORY->undefined_value());
-}
-
-
-void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Result value = frame_->Pop();
- value.ToRegister();
- ASSERT(value.is_valid());
- Condition non_negative_smi = masm_->CheckNonNegativeSmi(value.reg());
- value.Unuse();
- destination()->Split(non_negative_smi);
-}
-
-
-class DeferredStringCharCodeAt : public DeferredCode {
- public:
- DeferredStringCharCodeAt(Register object,
- Register index,
- Register scratch,
- Register result)
- : result_(result),
- char_code_at_generator_(object,
- index,
- scratch,
- result,
- &need_conversion_,
- &need_conversion_,
- &index_out_of_range_,
- STRING_INDEX_IS_NUMBER) {}
-
- StringCharCodeAtGenerator* fast_case_generator() {
- return &char_code_at_generator_;
- }
-
- virtual void Generate() {
- VirtualFrameRuntimeCallHelper call_helper(frame_state());
- char_code_at_generator_.GenerateSlow(masm(), call_helper);
-
- __ bind(&need_conversion_);
- // Move the undefined value into the result register, which will
- // trigger conversion.
- __ LoadRoot(result_, Heap::kUndefinedValueRootIndex);
- __ jmp(exit_label());
-
- __ bind(&index_out_of_range_);
- // When the index is out of range, the spec requires us to return
- // NaN.
- __ LoadRoot(result_, Heap::kNanValueRootIndex);
- __ jmp(exit_label());
- }
-
- private:
- Register result_;
-
- Label need_conversion_;
- Label index_out_of_range_;
-
- StringCharCodeAtGenerator char_code_at_generator_;
-};
-
-
-// This generates code that performs a String.prototype.charCodeAt() call
-// or returns a smi in order to trigger conversion.
-void CodeGenerator::GenerateStringCharCodeAt(ZoneList<Expression*>* args) {
- Comment(masm_, "[ GenerateStringCharCodeAt");
- ASSERT(args->length() == 2);
-
- Load(args->at(0));
- Load(args->at(1));
- Result index = frame_->Pop();
- Result object = frame_->Pop();
- object.ToRegister();
- index.ToRegister();
- // We might mutate the object register.
- frame_->Spill(object.reg());
-
- // We need two extra registers.
- Result result = allocator()->Allocate();
- ASSERT(result.is_valid());
- Result scratch = allocator()->Allocate();
- ASSERT(scratch.is_valid());
-
- DeferredStringCharCodeAt* deferred =
- new DeferredStringCharCodeAt(object.reg(),
- index.reg(),
- scratch.reg(),
- result.reg());
- deferred->fast_case_generator()->GenerateFast(masm_);
- deferred->BindExit();
- frame_->Push(&result);
-}
-
-
-class DeferredStringCharFromCode : public DeferredCode {
- public:
- DeferredStringCharFromCode(Register code,
- Register result)
- : char_from_code_generator_(code, result) {}
-
- StringCharFromCodeGenerator* fast_case_generator() {
- return &char_from_code_generator_;
- }
-
- virtual void Generate() {
- VirtualFrameRuntimeCallHelper call_helper(frame_state());
- char_from_code_generator_.GenerateSlow(masm(), call_helper);
- }
-
- private:
- StringCharFromCodeGenerator char_from_code_generator_;
-};
-
-
-// Generates code for creating a one-char string from a char code.
-void CodeGenerator::GenerateStringCharFromCode(ZoneList<Expression*>* args) {
- Comment(masm_, "[ GenerateStringCharFromCode");
- ASSERT(args->length() == 1);
-
- Load(args->at(0));
-
- Result code = frame_->Pop();
- code.ToRegister();
- ASSERT(code.is_valid());
-
- Result result = allocator()->Allocate();
- ASSERT(result.is_valid());
-
- DeferredStringCharFromCode* deferred = new DeferredStringCharFromCode(
- code.reg(), result.reg());
- deferred->fast_case_generator()->GenerateFast(masm_);
- deferred->BindExit();
- frame_->Push(&result);
-}
-
-
-class DeferredStringCharAt : public DeferredCode {
- public:
- DeferredStringCharAt(Register object,
- Register index,
- Register scratch1,
- Register scratch2,
- Register result)
- : result_(result),
- char_at_generator_(object,
- index,
- scratch1,
- scratch2,
- result,
- &need_conversion_,
- &need_conversion_,
- &index_out_of_range_,
- STRING_INDEX_IS_NUMBER) {}
-
- StringCharAtGenerator* fast_case_generator() {
- return &char_at_generator_;
- }
-
- virtual void Generate() {
- VirtualFrameRuntimeCallHelper call_helper(frame_state());
- char_at_generator_.GenerateSlow(masm(), call_helper);
-
- __ bind(&need_conversion_);
- // Move smi zero into the result register, which will trigger
- // conversion.
- __ Move(result_, Smi::FromInt(0));
- __ jmp(exit_label());
-
- __ bind(&index_out_of_range_);
- // When the index is out of range, the spec requires us to return
- // the empty string.
- __ LoadRoot(result_, Heap::kEmptyStringRootIndex);
- __ jmp(exit_label());
- }
-
- private:
- Register result_;
-
- Label need_conversion_;
- Label index_out_of_range_;
-
- StringCharAtGenerator char_at_generator_;
-};
-
-
-// This generates code that performs a String.prototype.charAt() call
-// or returns a smi in order to trigger conversion.
-void CodeGenerator::GenerateStringCharAt(ZoneList<Expression*>* args) {
- Comment(masm_, "[ GenerateStringCharAt");
- ASSERT(args->length() == 2);
-
- Load(args->at(0));
- Load(args->at(1));
- Result index = frame_->Pop();
- Result object = frame_->Pop();
- object.ToRegister();
- index.ToRegister();
- // We might mutate the object register.
- frame_->Spill(object.reg());
-
- // We need three extra registers.
- Result result = allocator()->Allocate();
- ASSERT(result.is_valid());
- Result scratch1 = allocator()->Allocate();
- ASSERT(scratch1.is_valid());
- Result scratch2 = allocator()->Allocate();
- ASSERT(scratch2.is_valid());
-
- DeferredStringCharAt* deferred =
- new DeferredStringCharAt(object.reg(),
- index.reg(),
- scratch1.reg(),
- scratch2.reg(),
- result.reg());
- deferred->fast_case_generator()->GenerateFast(masm_);
- deferred->BindExit();
- frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Result value = frame_->Pop();
- value.ToRegister();
- ASSERT(value.is_valid());
- Condition is_smi = masm_->CheckSmi(value.reg());
- destination()->false_target()->Branch(is_smi);
- // It is a heap object - get map.
- // Check if the object is a JS array or not.
- __ CmpObjectType(value.reg(), JS_ARRAY_TYPE, kScratchRegister);
- value.Unuse();
- destination()->Split(equal);
-}
-
-
-void CodeGenerator::GenerateIsRegExp(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Result value = frame_->Pop();
- value.ToRegister();
- ASSERT(value.is_valid());
- Condition is_smi = masm_->CheckSmi(value.reg());
- destination()->false_target()->Branch(is_smi);
- // It is a heap object - get map.
- // Check if the object is a regexp.
- __ CmpObjectType(value.reg(), JS_REGEXP_TYPE, kScratchRegister);
- value.Unuse();
- destination()->Split(equal);
-}
-
-
-void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
- // This generates a fast version of:
- // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp')
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Result obj = frame_->Pop();
- obj.ToRegister();
- Condition is_smi = masm_->CheckSmi(obj.reg());
- destination()->false_target()->Branch(is_smi);
-
- __ Move(kScratchRegister, FACTORY->null_value());
- __ cmpq(obj.reg(), kScratchRegister);
- destination()->true_target()->Branch(equal);
-
- __ movq(kScratchRegister, FieldOperand(obj.reg(), HeapObject::kMapOffset));
- // Undetectable objects behave like undefined when tested with typeof.
- __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- destination()->false_target()->Branch(not_zero);
- __ movzxbq(kScratchRegister,
- FieldOperand(kScratchRegister, Map::kInstanceTypeOffset));
- __ cmpq(kScratchRegister, Immediate(FIRST_JS_OBJECT_TYPE));
- destination()->false_target()->Branch(below);
- __ cmpq(kScratchRegister, Immediate(LAST_JS_OBJECT_TYPE));
- obj.Unuse();
- destination()->Split(below_equal);
-}
-
-
-void CodeGenerator::GenerateIsSpecObject(ZoneList<Expression*>* args) {
- // This generates a fast version of:
- // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp' ||
- // typeof(arg) == function).
- // It includes undetectable objects (as opposed to IsObject).
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Result value = frame_->Pop();
- value.ToRegister();
- ASSERT(value.is_valid());
- Condition is_smi = masm_->CheckSmi(value.reg());
- destination()->false_target()->Branch(is_smi);
- // Check that this is an object.
- __ CmpObjectType(value.reg(), FIRST_JS_OBJECT_TYPE, kScratchRegister);
- value.Unuse();
- destination()->Split(above_equal);
-}
-
-
-// Deferred code to check whether the String JavaScript object is safe for using
-// default value of. This code is called after the bit caching this information
-// in the map has been checked with the map for the object in the map_result_
-// register. On return the register map_result_ contains 1 for true and 0 for
-// false.
-class DeferredIsStringWrapperSafeForDefaultValueOf : public DeferredCode {
- public:
- DeferredIsStringWrapperSafeForDefaultValueOf(Register object,
- Register map_result,
- Register scratch1,
- Register scratch2)
- : object_(object),
- map_result_(map_result),
- scratch1_(scratch1),
- scratch2_(scratch2) { }
-
- virtual void Generate() {
- Label false_result;
-
- // Check that map is loaded as expected.
- if (FLAG_debug_code) {
- __ cmpq(map_result_, FieldOperand(object_, HeapObject::kMapOffset));
- __ Assert(equal, "Map not in expected register");
- }
-
- // Check for fast case object. Generate false result for slow case object.
- __ movq(scratch1_, FieldOperand(object_, JSObject::kPropertiesOffset));
- __ movq(scratch1_, FieldOperand(scratch1_, HeapObject::kMapOffset));
- __ CompareRoot(scratch1_, Heap::kHashTableMapRootIndex);
- __ j(equal, &false_result);
-
- // Look for valueOf symbol in the descriptor array, and indicate false if
- // found. The type is not checked, so if it is a transition it is a false
- // negative.
- __ movq(map_result_,
- FieldOperand(map_result_, Map::kInstanceDescriptorsOffset));
- __ movq(scratch1_, FieldOperand(map_result_, FixedArray::kLengthOffset));
- // map_result_: descriptor array
- // scratch1_: length of descriptor array
- // Calculate the end of the descriptor array.
- SmiIndex index = masm_->SmiToIndex(scratch2_, scratch1_, kPointerSizeLog2);
- __ lea(scratch1_,
- Operand(
- map_result_, index.reg, index.scale, FixedArray::kHeaderSize));
- // Calculate location of the first key name.
- __ addq(map_result_,
- Immediate(FixedArray::kHeaderSize +
- DescriptorArray::kFirstIndex * kPointerSize));
- // Loop through all the keys in the descriptor array. If one of these is the
- // symbol valueOf the result is false.
- Label entry, loop;
- __ jmp(&entry);
- __ bind(&loop);
- __ movq(scratch2_, FieldOperand(map_result_, 0));
- __ Cmp(scratch2_, FACTORY->value_of_symbol());
- __ j(equal, &false_result);
- __ addq(map_result_, Immediate(kPointerSize));
- __ bind(&entry);
- __ cmpq(map_result_, scratch1_);
- __ j(not_equal, &loop);
-
- // Reload map as register map_result_ was used as temporary above.
- __ movq(map_result_, FieldOperand(object_, HeapObject::kMapOffset));
-
- // If a valueOf property is not found on the object check that it's
- // prototype is the un-modified String prototype. If not result is false.
- __ movq(scratch1_, FieldOperand(map_result_, Map::kPrototypeOffset));
- __ testq(scratch1_, Immediate(kSmiTagMask));
- __ j(zero, &false_result);
- __ movq(scratch1_, FieldOperand(scratch1_, HeapObject::kMapOffset));
- __ movq(scratch2_,
- Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ movq(scratch2_,
- FieldOperand(scratch2_, GlobalObject::kGlobalContextOffset));
- __ cmpq(scratch1_,
- ContextOperand(
- scratch2_, Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
- __ j(not_equal, &false_result);
- // Set the bit in the map to indicate that it has been checked safe for
- // default valueOf and set true result.
- __ or_(FieldOperand(map_result_, Map::kBitField2Offset),
- Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
- __ Set(map_result_, 1);
- __ jmp(exit_label());
- __ bind(&false_result);
- // Set false result.
- __ Set(map_result_, 0);
- }
-
- private:
- Register object_;
- Register map_result_;
- Register scratch1_;
- Register scratch2_;
-};
-
-
-void CodeGenerator::GenerateIsStringWrapperSafeForDefaultValueOf(
- ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Result obj = frame_->Pop(); // Pop the string wrapper.
- obj.ToRegister();
- ASSERT(obj.is_valid());
- if (FLAG_debug_code) {
- __ AbortIfSmi(obj.reg());
- }
-
- // Check whether this map has already been checked to be safe for default
- // valueOf.
- Result map_result = allocator()->Allocate();
- ASSERT(map_result.is_valid());
- __ movq(map_result.reg(), FieldOperand(obj.reg(), HeapObject::kMapOffset));
- __ testb(FieldOperand(map_result.reg(), Map::kBitField2Offset),
- Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
- destination()->true_target()->Branch(not_zero);
-
- // We need an additional two scratch registers for the deferred code.
- Result temp1 = allocator()->Allocate();
- ASSERT(temp1.is_valid());
- Result temp2 = allocator()->Allocate();
- ASSERT(temp2.is_valid());
-
- DeferredIsStringWrapperSafeForDefaultValueOf* deferred =
- new DeferredIsStringWrapperSafeForDefaultValueOf(
- obj.reg(), map_result.reg(), temp1.reg(), temp2.reg());
- deferred->Branch(zero);
- deferred->BindExit();
- __ testq(map_result.reg(), map_result.reg());
- obj.Unuse();
- map_result.Unuse();
- temp1.Unuse();
- temp2.Unuse();
- destination()->Split(not_equal);
-}
-
-
-void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
- // This generates a fast version of:
- // (%_ClassOf(arg) === 'Function')
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Result obj = frame_->Pop();
- obj.ToRegister();
- Condition is_smi = masm_->CheckSmi(obj.reg());
- destination()->false_target()->Branch(is_smi);
- __ CmpObjectType(obj.reg(), JS_FUNCTION_TYPE, kScratchRegister);
- obj.Unuse();
- destination()->Split(equal);
-}
-
-
-void CodeGenerator::GenerateIsUndetectableObject(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Result obj = frame_->Pop();
- obj.ToRegister();
- Condition is_smi = masm_->CheckSmi(obj.reg());
- destination()->false_target()->Branch(is_smi);
- __ movq(kScratchRegister, FieldOperand(obj.reg(), HeapObject::kMapOffset));
- __ movzxbl(kScratchRegister,
- FieldOperand(kScratchRegister, Map::kBitFieldOffset));
- __ testl(kScratchRegister, Immediate(1 << Map::kIsUndetectable));
- obj.Unuse();
- destination()->Split(not_zero);
-}
-
-
-void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 0);
-
- // Get the frame pointer for the calling frame.
- Result fp = allocator()->Allocate();
- __ movq(fp.reg(), Operand(rbp, StandardFrameConstants::kCallerFPOffset));
-
- // Skip the arguments adaptor frame if it exists.
- Label check_frame_marker;
- __ Cmp(Operand(fp.reg(), StandardFrameConstants::kContextOffset),
- Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ j(not_equal, &check_frame_marker);
- __ movq(fp.reg(), Operand(fp.reg(), StandardFrameConstants::kCallerFPOffset));
-
- // Check the marker in the calling frame.
- __ bind(&check_frame_marker);
- __ Cmp(Operand(fp.reg(), StandardFrameConstants::kMarkerOffset),
- Smi::FromInt(StackFrame::CONSTRUCT));
- fp.Unuse();
- destination()->Split(equal);
-}
-
-
-void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 0);
-
- Result fp = allocator_->Allocate();
- Result result = allocator_->Allocate();
- ASSERT(fp.is_valid() && result.is_valid());
-
- Label exit;
-
- // Get the number of formal parameters.
- __ Move(result.reg(), Smi::FromInt(scope()->num_parameters()));
-
- // Check if the calling frame is an arguments adaptor frame.
- __ movq(fp.reg(), Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ Cmp(Operand(fp.reg(), StandardFrameConstants::kContextOffset),
- Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ j(not_equal, &exit);
-
- // Arguments adaptor case: Read the arguments length from the
- // adaptor frame.
- __ movq(result.reg(),
- Operand(fp.reg(), ArgumentsAdaptorFrameConstants::kLengthOffset));
-
- __ bind(&exit);
- result.set_type_info(TypeInfo::Smi());
- if (FLAG_debug_code) {
- __ AbortIfNotSmi(result.reg());
- }
- frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- JumpTarget leave, null, function, non_function_constructor;
- Load(args->at(0)); // Load the object.
- Result obj = frame_->Pop();
- obj.ToRegister();
- frame_->Spill(obj.reg());
-
- // If the object is a smi, we return null.
- Condition is_smi = masm_->CheckSmi(obj.reg());
- null.Branch(is_smi);
-
- // Check that the object is a JS object but take special care of JS
- // functions to make sure they have 'Function' as their class.
-
- __ CmpObjectType(obj.reg(), FIRST_JS_OBJECT_TYPE, obj.reg());
- null.Branch(below);
-
- // As long as JS_FUNCTION_TYPE is the last instance type and it is
- // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
- // LAST_JS_OBJECT_TYPE.
- ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
- ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
- __ CmpInstanceType(obj.reg(), JS_FUNCTION_TYPE);
- function.Branch(equal);
-
- // Check if the constructor in the map is a function.
- __ movq(obj.reg(), FieldOperand(obj.reg(), Map::kConstructorOffset));
- __ CmpObjectType(obj.reg(), JS_FUNCTION_TYPE, kScratchRegister);
- non_function_constructor.Branch(not_equal);
-
- // The obj register now contains the constructor function. Grab the
- // instance class name from there.
- __ movq(obj.reg(),
- FieldOperand(obj.reg(), JSFunction::kSharedFunctionInfoOffset));
- __ movq(obj.reg(),
- FieldOperand(obj.reg(),
- SharedFunctionInfo::kInstanceClassNameOffset));
- frame_->Push(&obj);
- leave.Jump();
-
- // Functions have class 'Function'.
- function.Bind();
- frame_->Push(FACTORY->function_class_symbol());
- leave.Jump();
-
- // Objects with a non-function constructor have class 'Object'.
- non_function_constructor.Bind();
- frame_->Push(FACTORY->Object_symbol());
- leave.Jump();
-
- // Non-JS objects have class null.
- null.Bind();
- frame_->Push(FACTORY->null_value());
-
- // All done.
- leave.Bind();
-}
-
-
-void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- JumpTarget leave;
- Load(args->at(0)); // Load the object.
- frame_->Dup();
- Result object = frame_->Pop();
- object.ToRegister();
- ASSERT(object.is_valid());
- // if (object->IsSmi()) return object.
- Condition is_smi = masm_->CheckSmi(object.reg());
- leave.Branch(is_smi);
- // It is a heap object - get map.
- Result temp = allocator()->Allocate();
- ASSERT(temp.is_valid());
- // if (!object->IsJSValue()) return object.
- __ CmpObjectType(object.reg(), JS_VALUE_TYPE, temp.reg());
- leave.Branch(not_equal);
- __ movq(temp.reg(), FieldOperand(object.reg(), JSValue::kValueOffset));
- object.Unuse();
- frame_->SetElementAt(0, &temp);
- leave.Bind();
-}
-
-
-void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 2);
- JumpTarget leave;
- Load(args->at(0)); // Load the object.
- Load(args->at(1)); // Load the value.
- Result value = frame_->Pop();
- Result object = frame_->Pop();
- value.ToRegister();
- object.ToRegister();
-
- // if (object->IsSmi()) return value.
- Condition is_smi = masm_->CheckSmi(object.reg());
- leave.Branch(is_smi, &value);
-
- // It is a heap object - get its map.
- Result scratch = allocator_->Allocate();
- ASSERT(scratch.is_valid());
- // if (!object->IsJSValue()) return value.
- __ CmpObjectType(object.reg(), JS_VALUE_TYPE, scratch.reg());
- leave.Branch(not_equal, &value);
-
- // Store the value.
- __ movq(FieldOperand(object.reg(), JSValue::kValueOffset), value.reg());
- // Update the write barrier. Save the value as it will be
- // overwritten by the write barrier code and is needed afterward.
- Result duplicate_value = allocator_->Allocate();
- ASSERT(duplicate_value.is_valid());
- __ movq(duplicate_value.reg(), value.reg());
- // The object register is also overwritten by the write barrier and
- // possibly aliased in the frame.
- frame_->Spill(object.reg());
- __ RecordWrite(object.reg(), JSValue::kValueOffset, duplicate_value.reg(),
- scratch.reg());
- object.Unuse();
- scratch.Unuse();
- duplicate_value.Unuse();
-
- // Leave.
- leave.Bind(&value);
- frame_->Push(&value);
-}
-
-
-void CodeGenerator::GenerateArguments(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
-
- // ArgumentsAccessStub expects the key in rdx and the formal
- // parameter count in rax.
- Load(args->at(0));
- Result key = frame_->Pop();
- // Explicitly create a constant result.
- Result count(Handle<Smi>(Smi::FromInt(scope()->num_parameters())));
- // Call the shared stub to get to arguments[key].
- ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
- Result result = frame_->CallStub(&stub, &key, &count);
- frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 2);
-
- // Load the two objects into registers and perform the comparison.
- Load(args->at(0));
- Load(args->at(1));
- Result right = frame_->Pop();
- Result left = frame_->Pop();
- right.ToRegister();
- left.ToRegister();
- __ cmpq(right.reg(), left.reg());
- right.Unuse();
- left.Unuse();
- destination()->Split(equal);
-}
-
-
-void CodeGenerator::GenerateGetFramePointer(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 0);
- // RBP value is aligned, so it should be tagged as a smi (without necesarily
- // being padded as a smi, so it should not be treated as a smi.).
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
- Result rbp_as_smi = allocator_->Allocate();
- ASSERT(rbp_as_smi.is_valid());
- __ movq(rbp_as_smi.reg(), rbp);
- frame_->Push(&rbp_as_smi);
-}
-
-
-void CodeGenerator::GenerateRandomHeapNumber(
- ZoneList<Expression*>* args) {
- ASSERT(args->length() == 0);
- frame_->SpillAll();
-
- Label slow_allocate_heapnumber;
- Label heapnumber_allocated;
- __ AllocateHeapNumber(rbx, rcx, &slow_allocate_heapnumber);
- __ jmp(&heapnumber_allocated);
-
- __ bind(&slow_allocate_heapnumber);
- // Allocate a heap number.
- __ CallRuntime(Runtime::kNumberAlloc, 0);
- __ movq(rbx, rax);
-
- __ bind(&heapnumber_allocated);
-
- // Return a random uint32 number in rax.
- // The fresh HeapNumber is in rbx, which is callee-save on both x64 ABIs.
- __ PrepareCallCFunction(1);
-#ifdef _WIN64
- __ LoadAddress(rcx, ExternalReference::isolate_address());
-#else
- __ LoadAddress(rdi, ExternalReference::isolate_address());
-#endif
- __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
-
- // Convert 32 random bits in rax to 0.(32 random bits) in a double
- // by computing:
- // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
- __ movl(rcx, Immediate(0x49800000)); // 1.0 x 2^20 as single.
- __ movd(xmm1, rcx);
- __ movd(xmm0, rax);
- __ cvtss2sd(xmm1, xmm1);
- __ xorpd(xmm0, xmm1);
- __ subsd(xmm0, xmm1);
- __ movsd(FieldOperand(rbx, HeapNumber::kValueOffset), xmm0);
-
- __ movq(rax, rbx);
- Result result = allocator_->Allocate(rax);
- frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateStringAdd(ZoneList<Expression*>* args) {
- ASSERT_EQ(2, args->length());
-
- Load(args->at(0));
- Load(args->at(1));
-
- StringAddStub stub(NO_STRING_ADD_FLAGS);
- Result answer = frame_->CallStub(&stub, 2);
- frame_->Push(&answer);
-}
-
-
-void CodeGenerator::GenerateSubString(ZoneList<Expression*>* args) {
- ASSERT_EQ(3, args->length());
-
- Load(args->at(0));
- Load(args->at(1));
- Load(args->at(2));
-
- SubStringStub stub;
- Result answer = frame_->CallStub(&stub, 3);
- frame_->Push(&answer);
-}
-
-
-void CodeGenerator::GenerateStringCompare(ZoneList<Expression*>* args) {
- ASSERT_EQ(2, args->length());
-
- Load(args->at(0));
- Load(args->at(1));
-
- StringCompareStub stub;
- Result answer = frame_->CallStub(&stub, 2);
- frame_->Push(&answer);
-}
-
-
-void CodeGenerator::GenerateRegExpExec(ZoneList<Expression*>* args) {
- ASSERT_EQ(args->length(), 4);
-
- // Load the arguments on the stack and call the runtime system.
- Load(args->at(0));
- Load(args->at(1));
- Load(args->at(2));
- Load(args->at(3));
- RegExpExecStub stub;
- Result result = frame_->CallStub(&stub, 4);
- frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateRegExpConstructResult(ZoneList<Expression*>* args) {
- ASSERT_EQ(3, args->length());
- Load(args->at(0)); // Size of array, smi.
- Load(args->at(1)); // "index" property value.
- Load(args->at(2)); // "input" property value.
- RegExpConstructResultStub stub;
- Result result = frame_->CallStub(&stub, 3);
- frame_->Push(&result);
-}
-
-
-class DeferredSearchCache: public DeferredCode {
- public:
- DeferredSearchCache(Register dst,
- Register cache,
- Register key,
- Register scratch)
- : dst_(dst), cache_(cache), key_(key), scratch_(scratch) {
- set_comment("[ DeferredSearchCache");
- }
-
- virtual void Generate();
-
- private:
- Register dst_; // on invocation index of finger (as int32), on exit
- // holds value being looked up.
- Register cache_; // instance of JSFunctionResultCache.
- Register key_; // key being looked up.
- Register scratch_;
-};
-
-
-// Return a position of the element at |index| + |additional_offset|
-// in FixedArray pointer to which is held in |array|. |index| is int32.
-static Operand ArrayElement(Register array,
- Register index,
- int additional_offset = 0) {
- int offset = FixedArray::kHeaderSize + additional_offset * kPointerSize;
- return FieldOperand(array, index, times_pointer_size, offset);
-}
-
-
-void DeferredSearchCache::Generate() {
- Label first_loop, search_further, second_loop, cache_miss;
-
- Immediate kEntriesIndexImm = Immediate(JSFunctionResultCache::kEntriesIndex);
- Immediate kEntrySizeImm = Immediate(JSFunctionResultCache::kEntrySize);
-
- // Check the cache from finger to start of the cache.
- __ bind(&first_loop);
- __ subl(dst_, kEntrySizeImm);
- __ cmpl(dst_, kEntriesIndexImm);
- __ j(less, &search_further);
-
- __ cmpq(ArrayElement(cache_, dst_), key_);
- __ j(not_equal, &first_loop);
-
- __ Integer32ToSmiField(
- FieldOperand(cache_, JSFunctionResultCache::kFingerOffset), dst_);
- __ movq(dst_, ArrayElement(cache_, dst_, 1));
- __ jmp(exit_label());
-
- __ bind(&search_further);
-
- // Check the cache from end of cache up to finger.
- __ SmiToInteger32(dst_,
- FieldOperand(cache_,
- JSFunctionResultCache::kCacheSizeOffset));
- __ SmiToInteger32(scratch_,
- FieldOperand(cache_, JSFunctionResultCache::kFingerOffset));
-
- __ bind(&second_loop);
- __ subl(dst_, kEntrySizeImm);
- __ cmpl(dst_, scratch_);
- __ j(less_equal, &cache_miss);
-
- __ cmpq(ArrayElement(cache_, dst_), key_);
- __ j(not_equal, &second_loop);
-
- __ Integer32ToSmiField(
- FieldOperand(cache_, JSFunctionResultCache::kFingerOffset), dst_);
- __ movq(dst_, ArrayElement(cache_, dst_, 1));
- __ jmp(exit_label());
-
- __ bind(&cache_miss);
- __ push(cache_); // store a reference to cache
- __ push(key_); // store a key
- __ push(Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ push(key_);
- // On x64 function must be in rdi.
- __ movq(rdi, FieldOperand(cache_, JSFunctionResultCache::kFactoryOffset));
- ParameterCount expected(1);
- __ InvokeFunction(rdi, expected, CALL_FUNCTION);
-
- // Find a place to put new cached value into.
- Label add_new_entry, update_cache;
- __ movq(rcx, Operand(rsp, kPointerSize)); // restore the cache
- // Possible optimization: cache size is constant for the given cache
- // so technically we could use a constant here. However, if we have
- // cache miss this optimization would hardly matter much.
-
- // Check if we could add new entry to cache.
- __ SmiToInteger32(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
- __ SmiToInteger32(r9,
- FieldOperand(rcx, JSFunctionResultCache::kCacheSizeOffset));
- __ cmpl(rbx, r9);
- __ j(greater, &add_new_entry);
-
- // Check if we could evict entry after finger.
- __ SmiToInteger32(rdx,
- FieldOperand(rcx, JSFunctionResultCache::kFingerOffset));
- __ addl(rdx, kEntrySizeImm);
- Label forward;
- __ cmpl(rbx, rdx);
- __ j(greater, &forward);
- // Need to wrap over the cache.
- __ movl(rdx, kEntriesIndexImm);
- __ bind(&forward);
- __ movl(r9, rdx);
- __ jmp(&update_cache);
-
- __ bind(&add_new_entry);
- // r9 holds cache size as int32.
- __ leal(rbx, Operand(r9, JSFunctionResultCache::kEntrySize));
- __ Integer32ToSmiField(
- FieldOperand(rcx, JSFunctionResultCache::kCacheSizeOffset), rbx);
-
- // Update the cache itself.
- // r9 holds the index as int32.
- __ bind(&update_cache);
- __ pop(rbx); // restore the key
- __ Integer32ToSmiField(
- FieldOperand(rcx, JSFunctionResultCache::kFingerOffset), r9);
- // Store key.
- __ movq(ArrayElement(rcx, r9), rbx);
- __ RecordWrite(rcx, 0, rbx, r9);
-
- // Store value.
- __ pop(rcx); // restore the cache.
- __ SmiToInteger32(rdx,
- FieldOperand(rcx, JSFunctionResultCache::kFingerOffset));
- __ incl(rdx);
- // Backup rax, because the RecordWrite macro clobbers its arguments.
- __ movq(rbx, rax);
- __ movq(ArrayElement(rcx, rdx), rax);
- __ RecordWrite(rcx, 0, rbx, rdx);
-
- if (!dst_.is(rax)) {
- __ movq(dst_, rax);
- }
-}
-
-
-void CodeGenerator::GenerateGetFromCache(ZoneList<Expression*>* args) {
- ASSERT_EQ(2, args->length());
-
- ASSERT_NE(NULL, args->at(0)->AsLiteral());
- int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
-
- Handle<FixedArray> jsfunction_result_caches(
- Isolate::Current()->global_context()->jsfunction_result_caches());
- if (jsfunction_result_caches->length() <= cache_id) {
- __ Abort("Attempt to use undefined cache.");
- frame_->Push(FACTORY->undefined_value());
- return;
- }
-
- Load(args->at(1));
- Result key = frame_->Pop();
- key.ToRegister();
-
- Result cache = allocator()->Allocate();
- ASSERT(cache.is_valid());
- __ movq(cache.reg(), ContextOperand(rsi, Context::GLOBAL_INDEX));
- __ movq(cache.reg(),
- FieldOperand(cache.reg(), GlobalObject::kGlobalContextOffset));
- __ movq(cache.reg(),
- ContextOperand(cache.reg(), Context::JSFUNCTION_RESULT_CACHES_INDEX));
- __ movq(cache.reg(),
- FieldOperand(cache.reg(), FixedArray::OffsetOfElementAt(cache_id)));
-
- Result tmp = allocator()->Allocate();
- ASSERT(tmp.is_valid());
-
- Result scratch = allocator()->Allocate();
- ASSERT(scratch.is_valid());
-
- DeferredSearchCache* deferred = new DeferredSearchCache(tmp.reg(),
- cache.reg(),
- key.reg(),
- scratch.reg());
-
- const int kFingerOffset =
- FixedArray::OffsetOfElementAt(JSFunctionResultCache::kFingerIndex);
- // tmp.reg() now holds finger offset as a smi.
- __ SmiToInteger32(tmp.reg(), FieldOperand(cache.reg(), kFingerOffset));
- __ cmpq(key.reg(), FieldOperand(cache.reg(),
- tmp.reg(), times_pointer_size,
- FixedArray::kHeaderSize));
- deferred->Branch(not_equal);
- __ movq(tmp.reg(), FieldOperand(cache.reg(),
- tmp.reg(), times_pointer_size,
- FixedArray::kHeaderSize + kPointerSize));
-
- deferred->BindExit();
- frame_->Push(&tmp);
-}
-
-
-void CodeGenerator::GenerateNumberToString(ZoneList<Expression*>* args) {
- ASSERT_EQ(args->length(), 1);
-
- // Load the argument on the stack and jump to the runtime.
- Load(args->at(0));
-
- NumberToStringStub stub;
- Result result = frame_->CallStub(&stub, 1);
- frame_->Push(&result);
-}
-
-
-class DeferredSwapElements: public DeferredCode {
- public:
- DeferredSwapElements(Register object, Register index1, Register index2)
- : object_(object), index1_(index1), index2_(index2) {
- set_comment("[ DeferredSwapElements");
- }
-
- virtual void Generate();
-
- private:
- Register object_, index1_, index2_;
-};
-
-
-void DeferredSwapElements::Generate() {
- __ push(object_);
- __ push(index1_);
- __ push(index2_);
- __ CallRuntime(Runtime::kSwapElements, 3);
-}
-
-
-void CodeGenerator::GenerateSwapElements(ZoneList<Expression*>* args) {
- Comment cmnt(masm_, "[ GenerateSwapElements");
-
- ASSERT_EQ(3, args->length());
-
- Load(args->at(0));
- Load(args->at(1));
- Load(args->at(2));
-
- Result index2 = frame_->Pop();
- index2.ToRegister();
-
- Result index1 = frame_->Pop();
- index1.ToRegister();
-
- Result object = frame_->Pop();
- object.ToRegister();
-
- Result tmp1 = allocator()->Allocate();
- tmp1.ToRegister();
- Result tmp2 = allocator()->Allocate();
- tmp2.ToRegister();
-
- frame_->Spill(object.reg());
- frame_->Spill(index1.reg());
- frame_->Spill(index2.reg());
-
- DeferredSwapElements* deferred = new DeferredSwapElements(object.reg(),
- index1.reg(),
- index2.reg());
-
- // Fetch the map and check if array is in fast case.
- // Check that object doesn't require security checks and
- // has no indexed interceptor.
- __ CmpObjectType(object.reg(), JS_ARRAY_TYPE, tmp1.reg());
- deferred->Branch(not_equal);
- __ testb(FieldOperand(tmp1.reg(), Map::kBitFieldOffset),
- Immediate(KeyedLoadIC::kSlowCaseBitFieldMask));
- deferred->Branch(not_zero);
-
- // Check the object's elements are in fast case and writable.
- __ movq(tmp1.reg(), FieldOperand(object.reg(), JSObject::kElementsOffset));
- __ CompareRoot(FieldOperand(tmp1.reg(), HeapObject::kMapOffset),
- Heap::kFixedArrayMapRootIndex);
- deferred->Branch(not_equal);
-
- // Check that both indices are smis.
- Condition both_smi = masm()->CheckBothSmi(index1.reg(), index2.reg());
- deferred->Branch(NegateCondition(both_smi));
-
- // Check that both indices are valid.
- __ movq(tmp2.reg(), FieldOperand(object.reg(), JSArray::kLengthOffset));
- __ SmiCompare(tmp2.reg(), index1.reg());
- deferred->Branch(below_equal);
- __ SmiCompare(tmp2.reg(), index2.reg());
- deferred->Branch(below_equal);
-
- // Bring addresses into index1 and index2.
- __ SmiToInteger32(index1.reg(), index1.reg());
- __ lea(index1.reg(), FieldOperand(tmp1.reg(),
- index1.reg(),
- times_pointer_size,
- FixedArray::kHeaderSize));
- __ SmiToInteger32(index2.reg(), index2.reg());
- __ lea(index2.reg(), FieldOperand(tmp1.reg(),
- index2.reg(),
- times_pointer_size,
- FixedArray::kHeaderSize));
-
- // Swap elements.
- __ movq(object.reg(), Operand(index1.reg(), 0));
- __ movq(tmp2.reg(), Operand(index2.reg(), 0));
- __ movq(Operand(index2.reg(), 0), object.reg());
- __ movq(Operand(index1.reg(), 0), tmp2.reg());
-
- Label done;
- __ InNewSpace(tmp1.reg(), tmp2.reg(), equal, &done);
- // Possible optimization: do a check that both values are smis
- // (or them and test against Smi mask.)
-
- __ movq(tmp2.reg(), tmp1.reg());
- __ RecordWriteHelper(tmp1.reg(), index1.reg(), object.reg());
- __ RecordWriteHelper(tmp2.reg(), index2.reg(), object.reg());
- __ bind(&done);
-
- deferred->BindExit();
- frame_->Push(FACTORY->undefined_value());
-}
-
-
-void CodeGenerator::GenerateCallFunction(ZoneList<Expression*>* args) {
- Comment cmnt(masm_, "[ GenerateCallFunction");
-
- ASSERT(args->length() >= 2);
-
- int n_args = args->length() - 2; // for receiver and function.
- Load(args->at(0)); // receiver
- for (int i = 0; i < n_args; i++) {
- Load(args->at(i + 1));
- }
- Load(args->at(n_args + 1)); // function
- Result result = frame_->CallJSFunction(n_args);
- frame_->Push(&result);
-}
-
-
-// Generates the Math.pow method. Only handles special cases and
-// branches to the runtime system for everything else. Please note
-// that this function assumes that the callsite has executed ToNumber
-// on both arguments.
-void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 2);
- Load(args->at(0));
- Load(args->at(1));
-
- Label allocate_return;
- // Load the two operands while leaving the values on the frame.
- frame()->Dup();
- Result exponent = frame()->Pop();
- exponent.ToRegister();
- frame()->Spill(exponent.reg());
- frame()->PushElementAt(1);
- Result base = frame()->Pop();
- base.ToRegister();
- frame()->Spill(base.reg());
-
- Result answer = allocator()->Allocate();
- ASSERT(answer.is_valid());
- ASSERT(!exponent.reg().is(base.reg()));
- JumpTarget call_runtime;
-
- // Save 1 in xmm3 - we need this several times later on.
- __ movl(answer.reg(), Immediate(1));
- __ cvtlsi2sd(xmm3, answer.reg());
-
- Label exponent_nonsmi;
- Label base_nonsmi;
- // If the exponent is a heap number go to that specific case.
- __ JumpIfNotSmi(exponent.reg(), &exponent_nonsmi);
- __ JumpIfNotSmi(base.reg(), &base_nonsmi);
-
- // Optimized version when y is an integer.
- Label powi;
- __ SmiToInteger32(base.reg(), base.reg());
- __ cvtlsi2sd(xmm0, base.reg());
- __ jmp(&powi);
- // exponent is smi and base is a heapnumber.
- __ bind(&base_nonsmi);
- __ CompareRoot(FieldOperand(base.reg(), HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- call_runtime.Branch(not_equal);
-
- __ movsd(xmm0, FieldOperand(base.reg(), HeapNumber::kValueOffset));
-
- // Optimized version of pow if y is an integer.
- __ bind(&powi);
- __ SmiToInteger32(exponent.reg(), exponent.reg());
-
- // Save exponent in base as we need to check if exponent is negative later.
- // We know that base and exponent are in different registers.
- __ movl(base.reg(), exponent.reg());
-
- // Get absolute value of exponent.
- Label no_neg;
- __ cmpl(exponent.reg(), Immediate(0));
- __ j(greater_equal, &no_neg);
- __ negl(exponent.reg());
- __ bind(&no_neg);
-
- // Load xmm1 with 1.
- __ movsd(xmm1, xmm3);
- Label while_true;
- Label no_multiply;
-
- __ bind(&while_true);
- __ shrl(exponent.reg(), Immediate(1));
- __ j(not_carry, &no_multiply);
- __ mulsd(xmm1, xmm0);
- __ bind(&no_multiply);
- __ testl(exponent.reg(), exponent.reg());
- __ mulsd(xmm0, xmm0);
- __ j(not_zero, &while_true);
-
- // x has the original value of y - if y is negative return 1/result.
- __ testl(base.reg(), base.reg());
- __ j(positive, &allocate_return);
- // Special case if xmm1 has reached infinity.
- __ movl(answer.reg(), Immediate(0x7FB00000));
- __ movd(xmm0, answer.reg());
- __ cvtss2sd(xmm0, xmm0);
- __ ucomisd(xmm0, xmm1);
- call_runtime.Branch(equal);
- __ divsd(xmm3, xmm1);
- __ movsd(xmm1, xmm3);
- __ jmp(&allocate_return);
-
- // exponent (or both) is a heapnumber - no matter what we should now work
- // on doubles.
- __ bind(&exponent_nonsmi);
- __ CompareRoot(FieldOperand(exponent.reg(), HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- call_runtime.Branch(not_equal);
- __ movsd(xmm1, FieldOperand(exponent.reg(), HeapNumber::kValueOffset));
- // Test if exponent is nan.
- __ ucomisd(xmm1, xmm1);
- call_runtime.Branch(parity_even);
-
- Label base_not_smi;
- Label handle_special_cases;
- __ JumpIfNotSmi(base.reg(), &base_not_smi);
- __ SmiToInteger32(base.reg(), base.reg());
- __ cvtlsi2sd(xmm0, base.reg());
- __ jmp(&handle_special_cases);
- __ bind(&base_not_smi);
- __ CompareRoot(FieldOperand(base.reg(), HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- call_runtime.Branch(not_equal);
- __ movl(answer.reg(), FieldOperand(base.reg(), HeapNumber::kExponentOffset));
- __ andl(answer.reg(), Immediate(HeapNumber::kExponentMask));
- __ cmpl(answer.reg(), Immediate(HeapNumber::kExponentMask));
- // base is NaN or +/-Infinity
- call_runtime.Branch(greater_equal);
- __ movsd(xmm0, FieldOperand(base.reg(), HeapNumber::kValueOffset));
-
- // base is in xmm0 and exponent is in xmm1.
- __ bind(&handle_special_cases);
- Label not_minus_half;
- // Test for -0.5.
- // Load xmm2 with -0.5.
- __ movl(answer.reg(), Immediate(0xBF000000));
- __ movd(xmm2, answer.reg());
- __ cvtss2sd(xmm2, xmm2);
- // xmm2 now has -0.5.
- __ ucomisd(xmm2, xmm1);
- __ j(not_equal, &not_minus_half);
-
- // Calculates reciprocal of square root.
- // sqrtsd returns -0 when input is -0. ECMA spec requires +0.
- __ xorpd(xmm1, xmm1);
- __ addsd(xmm1, xmm0);
- __ sqrtsd(xmm1, xmm1);
- __ divsd(xmm3, xmm1);
- __ movsd(xmm1, xmm3);
- __ jmp(&allocate_return);
-
- // Test for 0.5.
- __ bind(&not_minus_half);
- // Load xmm2 with 0.5.
- // Since xmm3 is 1 and xmm2 is -0.5 this is simply xmm2 + xmm3.
- __ addsd(xmm2, xmm3);
- // xmm2 now has 0.5.
- __ ucomisd(xmm2, xmm1);
- call_runtime.Branch(not_equal);
-
- // Calculates square root.
- // sqrtsd returns -0 when input is -0. ECMA spec requires +0.
- __ xorpd(xmm1, xmm1);
- __ addsd(xmm1, xmm0);
- __ sqrtsd(xmm1, xmm1);
-
- JumpTarget done;
- Label failure, success;
- __ bind(&allocate_return);
- // Make a copy of the frame to enable us to handle allocation
- // failure after the JumpTarget jump.
- VirtualFrame* clone = new VirtualFrame(frame());
- __ AllocateHeapNumber(answer.reg(), exponent.reg(), &failure);
- __ movsd(FieldOperand(answer.reg(), HeapNumber::kValueOffset), xmm1);
- // Remove the two original values from the frame - we only need those
- // in the case where we branch to runtime.
- frame()->Drop(2);
- exponent.Unuse();
- base.Unuse();
- done.Jump(&answer);
- // Use the copy of the original frame as our current frame.
- RegisterFile empty_regs;
- SetFrame(clone, &empty_regs);
- // If we experience an allocation failure we branch to runtime.
- __ bind(&failure);
- call_runtime.Bind();
- answer = frame()->CallRuntime(Runtime::kMath_pow_cfunction, 2);
-
- done.Bind(&answer);
- frame()->Push(&answer);
-}
-
-
-void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
- ASSERT_EQ(args->length(), 1);
- Load(args->at(0));
- TranscendentalCacheStub stub(TranscendentalCache::SIN,
- TranscendentalCacheStub::TAGGED);
- Result result = frame_->CallStub(&stub, 1);
- frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
- ASSERT_EQ(args->length(), 1);
- Load(args->at(0));
- TranscendentalCacheStub stub(TranscendentalCache::COS,
- TranscendentalCacheStub::TAGGED);
- Result result = frame_->CallStub(&stub, 1);
- frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateMathLog(ZoneList<Expression*>* args) {
- ASSERT_EQ(args->length(), 1);
- Load(args->at(0));
- TranscendentalCacheStub stub(TranscendentalCache::LOG,
- TranscendentalCacheStub::TAGGED);
- Result result = frame_->CallStub(&stub, 1);
- frame_->Push(&result);
-}
-
-
-// Generates the Math.sqrt method. Please note - this function assumes that
-// the callsite has executed ToNumber on the argument.
-void CodeGenerator::GenerateMathSqrt(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Load(args->at(0));
-
- // Leave original value on the frame if we need to call runtime.
- frame()->Dup();
- Result result = frame()->Pop();
- result.ToRegister();
- frame()->Spill(result.reg());
- Label runtime;
- Label non_smi;
- Label load_done;
- JumpTarget end;
-
- __ JumpIfNotSmi(result.reg(), &non_smi);
- __ SmiToInteger32(result.reg(), result.reg());
- __ cvtlsi2sd(xmm0, result.reg());
- __ jmp(&load_done);
- __ bind(&non_smi);
- __ CompareRoot(FieldOperand(result.reg(), HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &runtime);
- __ movsd(xmm0, FieldOperand(result.reg(), HeapNumber::kValueOffset));
-
- __ bind(&load_done);
- __ sqrtsd(xmm0, xmm0);
- // A copy of the virtual frame to allow us to go to runtime after the
- // JumpTarget jump.
- Result scratch = allocator()->Allocate();
- VirtualFrame* clone = new VirtualFrame(frame());
- __ AllocateHeapNumber(result.reg(), scratch.reg(), &runtime);
-
- __ movsd(FieldOperand(result.reg(), HeapNumber::kValueOffset), xmm0);
- frame()->Drop(1);
- scratch.Unuse();
- end.Jump(&result);
- // We only branch to runtime if we have an allocation error.
- // Use the copy of the original frame as our current frame.
- RegisterFile empty_regs;
- SetFrame(clone, &empty_regs);
- __ bind(&runtime);
- result = frame()->CallRuntime(Runtime::kMath_sqrt, 1);
-
- end.Bind(&result);
- frame()->Push(&result);
-}
-
-
-void CodeGenerator::GenerateIsRegExpEquivalent(ZoneList<Expression*>* args) {
- ASSERT_EQ(2, args->length());
- Load(args->at(0));
- Load(args->at(1));
- Result right_res = frame_->Pop();
- Result left_res = frame_->Pop();
- right_res.ToRegister();
- left_res.ToRegister();
- Result tmp_res = allocator()->Allocate();
- ASSERT(tmp_res.is_valid());
- Register right = right_res.reg();
- Register left = left_res.reg();
- Register tmp = tmp_res.reg();
- right_res.Unuse();
- left_res.Unuse();
- tmp_res.Unuse();
- __ cmpq(left, right);
- destination()->true_target()->Branch(equal);
- // Fail if either is a non-HeapObject.
- Condition either_smi =
- masm()->CheckEitherSmi(left, right, tmp);
- destination()->false_target()->Branch(either_smi);
- __ movq(tmp, FieldOperand(left, HeapObject::kMapOffset));
- __ cmpb(FieldOperand(tmp, Map::kInstanceTypeOffset),
- Immediate(JS_REGEXP_TYPE));
- destination()->false_target()->Branch(not_equal);
- __ cmpq(tmp, FieldOperand(right, HeapObject::kMapOffset));
- destination()->false_target()->Branch(not_equal);
- __ movq(tmp, FieldOperand(left, JSRegExp::kDataOffset));
- __ cmpq(tmp, FieldOperand(right, JSRegExp::kDataOffset));
- destination()->Split(equal);
-}
-
-
-void CodeGenerator::GenerateHasCachedArrayIndex(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Result value = frame_->Pop();
- value.ToRegister();
- ASSERT(value.is_valid());
- __ testl(FieldOperand(value.reg(), String::kHashFieldOffset),
- Immediate(String::kContainsCachedArrayIndexMask));
- value.Unuse();
- destination()->Split(zero);
-}
-
-
-void CodeGenerator::GenerateGetCachedArrayIndex(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Result string = frame_->Pop();
- string.ToRegister();
-
- Result number = allocator()->Allocate();
- ASSERT(number.is_valid());
- __ movl(number.reg(), FieldOperand(string.reg(), String::kHashFieldOffset));
- __ IndexFromHash(number.reg(), number.reg());
- string.Unuse();
- frame_->Push(&number);
-}
-
-
-void CodeGenerator::GenerateFastAsciiArrayJoin(ZoneList<Expression*>* args) {
- frame_->Push(FACTORY->undefined_value());
-}
-
-
-void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
- if (CheckForInlineRuntimeCall(node)) {
- return;
- }
-
- ZoneList<Expression*>* args = node->arguments();
- Comment cmnt(masm_, "[ CallRuntime");
- const Runtime::Function* function = node->function();
-
- if (function == NULL) {
- // Push the builtins object found in the current global object.
- Result temp = allocator()->Allocate();
- ASSERT(temp.is_valid());
- __ movq(temp.reg(), GlobalObjectOperand());
- __ movq(temp.reg(),
- FieldOperand(temp.reg(), GlobalObject::kBuiltinsOffset));
- frame_->Push(&temp);
- }
-
- // Push the arguments ("left-to-right").
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Load(args->at(i));
- }
-
- if (function == NULL) {
- // Call the JS runtime function.
- frame_->Push(node->name());
- Result answer = frame_->CallCallIC(RelocInfo::CODE_TARGET,
- arg_count,
- loop_nesting_);
- frame_->RestoreContextRegister();
- frame_->Push(&answer);
- } else {
- // Call the C runtime function.
- Result answer = frame_->CallRuntime(function, arg_count);
- frame_->Push(&answer);
- }
-}
-
-
-void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
- Comment cmnt(masm_, "[ UnaryOperation");
-
- Token::Value op = node->op();
-
- if (op == Token::NOT) {
- // Swap the true and false targets but keep the same actual label
- // as the fall through.
- destination()->Invert();
- LoadCondition(node->expression(), destination(), true);
- // Swap the labels back.
- destination()->Invert();
-
- } else if (op == Token::DELETE) {
- Property* property = node->expression()->AsProperty();
- if (property != NULL) {
- Load(property->obj());
- Load(property->key());
- frame_->Push(Smi::FromInt(strict_mode_flag()));
- Result answer = frame_->InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, 3);
- frame_->Push(&answer);
- return;
- }
-
- Variable* variable = node->expression()->AsVariableProxy()->AsVariable();
- if (variable != NULL) {
- // Delete of an unqualified identifier is disallowed in strict mode
- // but "delete this" is.
- ASSERT(strict_mode_flag() == kNonStrictMode || variable->is_this());
- Slot* slot = variable->AsSlot();
- if (variable->is_global()) {
- LoadGlobal();
- frame_->Push(variable->name());
- frame_->Push(Smi::FromInt(kNonStrictMode));
- Result answer = frame_->InvokeBuiltin(Builtins::DELETE,
- CALL_FUNCTION, 3);
- frame_->Push(&answer);
-
- } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
- // Call the runtime to delete from the context holding the named
- // variable. Sync the virtual frame eagerly so we can push the
- // arguments directly into place.
- frame_->SyncRange(0, frame_->element_count() - 1);
- frame_->EmitPush(rsi);
- frame_->EmitPush(variable->name());
- Result answer = frame_->CallRuntime(Runtime::kDeleteContextSlot, 2);
- frame_->Push(&answer);
- } else {
- // Default: Result of deleting non-global, not dynamically
- // introduced variables is false.
- frame_->Push(FACTORY->false_value());
- }
- } else {
- // Default: Result of deleting expressions is true.
- Load(node->expression()); // may have side-effects
- frame_->SetElementAt(0, FACTORY->true_value());
- }
-
- } else if (op == Token::TYPEOF) {
- // Special case for loading the typeof expression; see comment on
- // LoadTypeofExpression().
- LoadTypeofExpression(node->expression());
- Result answer = frame_->CallRuntime(Runtime::kTypeof, 1);
- frame_->Push(&answer);
-
- } else if (op == Token::VOID) {
- Expression* expression = node->expression();
- if (expression && expression->AsLiteral() && (
- expression->AsLiteral()->IsTrue() ||
- expression->AsLiteral()->IsFalse() ||
- expression->AsLiteral()->handle()->IsNumber() ||
- expression->AsLiteral()->handle()->IsString() ||
- expression->AsLiteral()->handle()->IsJSRegExp() ||
- expression->AsLiteral()->IsNull())) {
- // Omit evaluating the value of the primitive literal.
- // It will be discarded anyway, and can have no side effect.
- frame_->Push(FACTORY->undefined_value());
- } else {
- Load(node->expression());
- frame_->SetElementAt(0, FACTORY->undefined_value());
- }
-
- } else {
- bool can_overwrite = node->expression()->ResultOverwriteAllowed();
- UnaryOverwriteMode overwrite =
- can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
- bool no_negative_zero = node->expression()->no_negative_zero();
- Load(node->expression());
- switch (op) {
- case Token::NOT:
- case Token::DELETE:
- case Token::TYPEOF:
- UNREACHABLE(); // handled above
- break;
-
- case Token::SUB: {
- GenericUnaryOpStub stub(
- Token::SUB,
- overwrite,
- NO_UNARY_FLAGS,
- no_negative_zero ? kIgnoreNegativeZero : kStrictNegativeZero);
- Result operand = frame_->Pop();
- Result answer = frame_->CallStub(&stub, &operand);
- answer.set_type_info(TypeInfo::Number());
- frame_->Push(&answer);
- break;
- }
-
- case Token::BIT_NOT: {
- // Smi check.
- JumpTarget smi_label;
- JumpTarget continue_label;
- Result operand = frame_->Pop();
- operand.ToRegister();
-
- Condition is_smi = masm_->CheckSmi(operand.reg());
- smi_label.Branch(is_smi, &operand);
-
- GenericUnaryOpStub stub(Token::BIT_NOT,
- overwrite,
- NO_UNARY_SMI_CODE_IN_STUB);
- Result answer = frame_->CallStub(&stub, &operand);
- continue_label.Jump(&answer);
-
- smi_label.Bind(&answer);
- answer.ToRegister();
- frame_->Spill(answer.reg());
- __ SmiNot(answer.reg(), answer.reg());
- continue_label.Bind(&answer);
- answer.set_type_info(TypeInfo::Smi());
- frame_->Push(&answer);
- break;
- }
-
- case Token::ADD: {
- // Smi check.
- JumpTarget continue_label;
- Result operand = frame_->Pop();
- TypeInfo operand_info = operand.type_info();
- operand.ToRegister();
- Condition is_smi = masm_->CheckSmi(operand.reg());
- continue_label.Branch(is_smi, &operand);
- frame_->Push(&operand);
- Result answer = frame_->InvokeBuiltin(Builtins::TO_NUMBER,
- CALL_FUNCTION, 1);
-
- continue_label.Bind(&answer);
- if (operand_info.IsSmi()) {
- answer.set_type_info(TypeInfo::Smi());
- } else if (operand_info.IsInteger32()) {
- answer.set_type_info(TypeInfo::Integer32());
- } else {
- answer.set_type_info(TypeInfo::Number());
- }
- frame_->Push(&answer);
- break;
- }
- default:
- UNREACHABLE();
- }
- }
-}
-
-
-// The value in dst was optimistically incremented or decremented.
-// The result overflowed or was not smi tagged. Call into the runtime
-// to convert the argument to a number, and call the specialized add
-// or subtract stub. The result is left in dst.
-class DeferredPrefixCountOperation: public DeferredCode {
- public:
- DeferredPrefixCountOperation(Register dst,
- bool is_increment,
- TypeInfo input_type)
- : dst_(dst), is_increment_(is_increment), input_type_(input_type) {
- set_comment("[ DeferredCountOperation");
- }
-
- virtual void Generate();
-
- private:
- Register dst_;
- bool is_increment_;
- TypeInfo input_type_;
-};
-
-
-void DeferredPrefixCountOperation::Generate() {
- Register left;
- if (input_type_.IsNumber()) {
- left = dst_;
- } else {
- __ push(dst_);
- __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
- left = rax;
- }
-
- GenericBinaryOpStub stub(is_increment_ ? Token::ADD : Token::SUB,
- NO_OVERWRITE,
- NO_GENERIC_BINARY_FLAGS,
- TypeInfo::Number());
- stub.GenerateCall(masm_, left, Smi::FromInt(1));
-
- if (!dst_.is(rax)) __ movq(dst_, rax);
-}
-
-
-// The value in dst was optimistically incremented or decremented.
-// The result overflowed or was not smi tagged. Call into the runtime
-// to convert the argument to a number. Update the original value in
-// old. Call the specialized add or subtract stub. The result is
-// left in dst.
-class DeferredPostfixCountOperation: public DeferredCode {
- public:
- DeferredPostfixCountOperation(Register dst,
- Register old,
- bool is_increment,
- TypeInfo input_type)
- : dst_(dst),
- old_(old),
- is_increment_(is_increment),
- input_type_(input_type) {
- set_comment("[ DeferredCountOperation");
- }
-
- virtual void Generate();
-
- private:
- Register dst_;
- Register old_;
- bool is_increment_;
- TypeInfo input_type_;
-};
-
-
-void DeferredPostfixCountOperation::Generate() {
- Register left;
- if (input_type_.IsNumber()) {
- __ push(dst_); // Save the input to use as the old value.
- left = dst_;
- } else {
- __ push(dst_);
- __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
- __ push(rax); // Save the result of ToNumber to use as the old value.
- left = rax;
- }
-
- GenericBinaryOpStub stub(is_increment_ ? Token::ADD : Token::SUB,
- NO_OVERWRITE,
- NO_GENERIC_BINARY_FLAGS,
- TypeInfo::Number());
- stub.GenerateCall(masm_, left, Smi::FromInt(1));
-
- if (!dst_.is(rax)) __ movq(dst_, rax);
- __ pop(old_);
-}
-
-
-void CodeGenerator::VisitCountOperation(CountOperation* node) {
- Comment cmnt(masm_, "[ CountOperation");
-
- bool is_postfix = node->is_postfix();
- bool is_increment = node->op() == Token::INC;
-
- Variable* var = node->expression()->AsVariableProxy()->AsVariable();
- bool is_const = (var != NULL && var->mode() == Variable::CONST);
-
- // Postfix operations need a stack slot under the reference to hold
- // the old value while the new value is being stored. This is so that
- // in the case that storing the new value requires a call, the old
- // value will be in the frame to be spilled.
- if (is_postfix) frame_->Push(Smi::FromInt(0));
-
- // A constant reference is not saved to, so the reference is not a
- // compound assignment reference.
- { Reference target(this, node->expression(), !is_const);
- if (target.is_illegal()) {
- // Spoof the virtual frame to have the expected height (one higher
- // than on entry).
- if (!is_postfix) frame_->Push(Smi::FromInt(0));
- return;
- }
- target.TakeValue();
-
- Result new_value = frame_->Pop();
- new_value.ToRegister();
-
- Result old_value; // Only allocated in the postfix case.
- if (is_postfix) {
- // Allocate a temporary to preserve the old value.
- old_value = allocator_->Allocate();
- ASSERT(old_value.is_valid());
- __ movq(old_value.reg(), new_value.reg());
-
- // The return value for postfix operations is ToNumber(input).
- // Keep more precise type info if the input is some kind of
- // number already. If the input is not a number we have to wait
- // for the deferred code to convert it.
- if (new_value.type_info().IsNumber()) {
- old_value.set_type_info(new_value.type_info());
- }
- }
- // Ensure the new value is writable.
- frame_->Spill(new_value.reg());
-
- DeferredCode* deferred = NULL;
- if (is_postfix) {
- deferred = new DeferredPostfixCountOperation(new_value.reg(),
- old_value.reg(),
- is_increment,
- new_value.type_info());
- } else {
- deferred = new DeferredPrefixCountOperation(new_value.reg(),
- is_increment,
- new_value.type_info());
- }
-
- if (new_value.is_smi()) {
- if (FLAG_debug_code) { __ AbortIfNotSmi(new_value.reg()); }
- } else {
- __ JumpIfNotSmi(new_value.reg(), deferred->entry_label());
- }
- if (is_increment) {
- __ SmiAddConstant(new_value.reg(),
- new_value.reg(),
- Smi::FromInt(1),
- deferred->entry_label());
- } else {
- __ SmiSubConstant(new_value.reg(),
- new_value.reg(),
- Smi::FromInt(1),
- deferred->entry_label());
- }
- deferred->BindExit();
-
- // Postfix count operations return their input converted to
- // number. The case when the input is already a number is covered
- // above in the allocation code for old_value.
- if (is_postfix && !new_value.type_info().IsNumber()) {
- old_value.set_type_info(TypeInfo::Number());
- }
-
- new_value.set_type_info(TypeInfo::Number());
-
- // Postfix: store the old value in the allocated slot under the
- // reference.
- if (is_postfix) frame_->SetElementAt(target.size(), &old_value);
-
- frame_->Push(&new_value);
- // Non-constant: update the reference.
- if (!is_const) target.SetValue(NOT_CONST_INIT);
- }
-
- // Postfix: drop the new value and use the old.
- if (is_postfix) frame_->Drop();
-}
-
-
-void CodeGenerator::GenerateLogicalBooleanOperation(BinaryOperation* node) {
- // According to ECMA-262 section 11.11, page 58, the binary logical
- // operators must yield the result of one of the two expressions
- // before any ToBoolean() conversions. This means that the value
- // produced by a && or || operator is not necessarily a boolean.
-
- // NOTE: If the left hand side produces a materialized value (not
- // control flow), we force the right hand side to do the same. This
- // is necessary because we assume that if we get control flow on the
- // last path out of an expression we got it on all paths.
- if (node->op() == Token::AND) {
- JumpTarget is_true;
- ControlDestination dest(&is_true, destination()->false_target(), true);
- LoadCondition(node->left(), &dest, false);
-
- if (dest.false_was_fall_through()) {
- // The current false target was used as the fall-through. If
- // there are no dangling jumps to is_true then the left
- // subexpression was unconditionally false. Otherwise we have
- // paths where we do have to evaluate the right subexpression.
- if (is_true.is_linked()) {
- // We need to compile the right subexpression. If the jump to
- // the current false target was a forward jump then we have a
- // valid frame, we have just bound the false target, and we
- // have to jump around the code for the right subexpression.
- if (has_valid_frame()) {
- destination()->false_target()->Unuse();
- destination()->false_target()->Jump();
- }
- is_true.Bind();
- // The left subexpression compiled to control flow, so the
- // right one is free to do so as well.
- LoadCondition(node->right(), destination(), false);
- } else {
- // We have actually just jumped to or bound the current false
- // target but the current control destination is not marked as
- // used.
- destination()->Use(false);
- }
-
- } else if (dest.is_used()) {
- // The left subexpression compiled to control flow (and is_true
- // was just bound), so the right is free to do so as well.
- LoadCondition(node->right(), destination(), false);
-
- } else {
- // We have a materialized value on the frame, so we exit with
- // one on all paths. There are possibly also jumps to is_true
- // from nested subexpressions.
- JumpTarget pop_and_continue;
- JumpTarget exit;
-
- // Avoid popping the result if it converts to 'false' using the
- // standard ToBoolean() conversion as described in ECMA-262,
- // section 9.2, page 30.
- //
- // Duplicate the TOS value. The duplicate will be popped by
- // ToBoolean.
- frame_->Dup();
- ControlDestination dest(&pop_and_continue, &exit, true);
- ToBoolean(&dest);
-
- // Pop the result of evaluating the first part.
- frame_->Drop();
-
- // Compile right side expression.
- is_true.Bind();
- Load(node->right());
-
- // Exit (always with a materialized value).
- exit.Bind();
- }
-
- } else {
- ASSERT(node->op() == Token::OR);
- JumpTarget is_false;
- ControlDestination dest(destination()->true_target(), &is_false, false);
- LoadCondition(node->left(), &dest, false);
-
- if (dest.true_was_fall_through()) {
- // The current true target was used as the fall-through. If
- // there are no dangling jumps to is_false then the left
- // subexpression was unconditionally true. Otherwise we have
- // paths where we do have to evaluate the right subexpression.
- if (is_false.is_linked()) {
- // We need to compile the right subexpression. If the jump to
- // the current true target was a forward jump then we have a
- // valid frame, we have just bound the true target, and we
- // have to jump around the code for the right subexpression.
- if (has_valid_frame()) {
- destination()->true_target()->Unuse();
- destination()->true_target()->Jump();
- }
- is_false.Bind();
- // The left subexpression compiled to control flow, so the
- // right one is free to do so as well.
- LoadCondition(node->right(), destination(), false);
- } else {
- // We have just jumped to or bound the current true target but
- // the current control destination is not marked as used.
- destination()->Use(true);
- }
-
- } else if (dest.is_used()) {
- // The left subexpression compiled to control flow (and is_false
- // was just bound), so the right is free to do so as well.
- LoadCondition(node->right(), destination(), false);
-
- } else {
- // We have a materialized value on the frame, so we exit with
- // one on all paths. There are possibly also jumps to is_false
- // from nested subexpressions.
- JumpTarget pop_and_continue;
- JumpTarget exit;
-
- // Avoid popping the result if it converts to 'true' using the
- // standard ToBoolean() conversion as described in ECMA-262,
- // section 9.2, page 30.
- //
- // Duplicate the TOS value. The duplicate will be popped by
- // ToBoolean.
- frame_->Dup();
- ControlDestination dest(&exit, &pop_and_continue, false);
- ToBoolean(&dest);
-
- // Pop the result of evaluating the first part.
- frame_->Drop();
-
- // Compile right side expression.
- is_false.Bind();
- Load(node->right());
-
- // Exit (always with a materialized value).
- exit.Bind();
- }
- }
-}
-
-void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
- Comment cmnt(masm_, "[ BinaryOperation");
-
- if (node->op() == Token::AND || node->op() == Token::OR) {
- GenerateLogicalBooleanOperation(node);
- } else {
- // NOTE: The code below assumes that the slow cases (calls to runtime)
- // never return a constant/immutable object.
- OverwriteMode overwrite_mode = NO_OVERWRITE;
- if (node->left()->ResultOverwriteAllowed()) {
- overwrite_mode = OVERWRITE_LEFT;
- } else if (node->right()->ResultOverwriteAllowed()) {
- overwrite_mode = OVERWRITE_RIGHT;
- }
-
- if (node->left()->IsTrivial()) {
- Load(node->right());
- Result right = frame_->Pop();
- frame_->Push(node->left());
- frame_->Push(&right);
- } else {
- Load(node->left());
- Load(node->right());
- }
- GenericBinaryOperation(node, overwrite_mode);
- }
-}
-
-
-void CodeGenerator::VisitThisFunction(ThisFunction* node) {
- frame_->PushFunction();
-}
-
-
-void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
- Comment cmnt(masm_, "[ CompareOperation");
-
- // Get the expressions from the node.
- Expression* left = node->left();
- Expression* right = node->right();
- Token::Value op = node->op();
- // To make typeof testing for natives implemented in JavaScript really
- // efficient, we generate special code for expressions of the form:
- // 'typeof <expression> == <string>'.
- UnaryOperation* operation = left->AsUnaryOperation();
- if ((op == Token::EQ || op == Token::EQ_STRICT) &&
- (operation != NULL && operation->op() == Token::TYPEOF) &&
- (right->AsLiteral() != NULL &&
- right->AsLiteral()->handle()->IsString())) {
- Handle<String> check(Handle<String>::cast(right->AsLiteral()->handle()));
-
- // Load the operand and move it to a register.
- LoadTypeofExpression(operation->expression());
- Result answer = frame_->Pop();
- answer.ToRegister();
-
- if (check->Equals(HEAP->number_symbol())) {
- Condition is_smi = masm_->CheckSmi(answer.reg());
- destination()->true_target()->Branch(is_smi);
- frame_->Spill(answer.reg());
- __ movq(answer.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
- __ CompareRoot(answer.reg(), Heap::kHeapNumberMapRootIndex);
- answer.Unuse();
- destination()->Split(equal);
-
- } else if (check->Equals(HEAP->string_symbol())) {
- Condition is_smi = masm_->CheckSmi(answer.reg());
- destination()->false_target()->Branch(is_smi);
-
- // It can be an undetectable string object.
- __ movq(kScratchRegister,
- FieldOperand(answer.reg(), HeapObject::kMapOffset));
- __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- destination()->false_target()->Branch(not_zero);
- __ CmpInstanceType(kScratchRegister, FIRST_NONSTRING_TYPE);
- answer.Unuse();
- destination()->Split(below); // Unsigned byte comparison needed.
-
- } else if (check->Equals(HEAP->boolean_symbol())) {
- __ CompareRoot(answer.reg(), Heap::kTrueValueRootIndex);
- destination()->true_target()->Branch(equal);
- __ CompareRoot(answer.reg(), Heap::kFalseValueRootIndex);
- answer.Unuse();
- destination()->Split(equal);
-
- } else if (check->Equals(HEAP->undefined_symbol())) {
- __ CompareRoot(answer.reg(), Heap::kUndefinedValueRootIndex);
- destination()->true_target()->Branch(equal);
-
- Condition is_smi = masm_->CheckSmi(answer.reg());
- destination()->false_target()->Branch(is_smi);
-
- // It can be an undetectable object.
- __ movq(kScratchRegister,
- FieldOperand(answer.reg(), HeapObject::kMapOffset));
- __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- answer.Unuse();
- destination()->Split(not_zero);
-
- } else if (check->Equals(HEAP->function_symbol())) {
- Condition is_smi = masm_->CheckSmi(answer.reg());
- destination()->false_target()->Branch(is_smi);
- frame_->Spill(answer.reg());
- __ CmpObjectType(answer.reg(), JS_FUNCTION_TYPE, answer.reg());
- destination()->true_target()->Branch(equal);
- // Regular expressions are callable so typeof == 'function'.
- __ CmpInstanceType(answer.reg(), JS_REGEXP_TYPE);
- answer.Unuse();
- destination()->Split(equal);
-
- } else if (check->Equals(HEAP->object_symbol())) {
- Condition is_smi = masm_->CheckSmi(answer.reg());
- destination()->false_target()->Branch(is_smi);
- __ CompareRoot(answer.reg(), Heap::kNullValueRootIndex);
- destination()->true_target()->Branch(equal);
-
- // Regular expressions are typeof == 'function', not 'object'.
- __ CmpObjectType(answer.reg(), JS_REGEXP_TYPE, kScratchRegister);
- destination()->false_target()->Branch(equal);
-
- // It can be an undetectable object.
- __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- destination()->false_target()->Branch(not_zero);
- __ CmpInstanceType(kScratchRegister, FIRST_JS_OBJECT_TYPE);
- destination()->false_target()->Branch(below);
- __ CmpInstanceType(kScratchRegister, LAST_JS_OBJECT_TYPE);
- answer.Unuse();
- destination()->Split(below_equal);
- } else {
- // Uncommon case: typeof testing against a string literal that is
- // never returned from the typeof operator.
- answer.Unuse();
- destination()->Goto(false);
- }
- return;
- }
-
- Condition cc = no_condition;
- bool strict = false;
- switch (op) {
- case Token::EQ_STRICT:
- strict = true;
- // Fall through
- case Token::EQ:
- cc = equal;
- break;
- case Token::LT:
- cc = less;
- break;
- case Token::GT:
- cc = greater;
- break;
- case Token::LTE:
- cc = less_equal;
- break;
- case Token::GTE:
- cc = greater_equal;
- break;
- case Token::IN: {
- Load(left);
- Load(right);
- Result answer = frame_->InvokeBuiltin(Builtins::IN, CALL_FUNCTION, 2);
- frame_->Push(&answer); // push the result
- return;
- }
- case Token::INSTANCEOF: {
- Load(left);
- Load(right);
- InstanceofStub stub(InstanceofStub::kNoFlags);
- Result answer = frame_->CallStub(&stub, 2);
- answer.ToRegister();
- __ testq(answer.reg(), answer.reg());
- answer.Unuse();
- destination()->Split(zero);
- return;
- }
- default:
- UNREACHABLE();
- }
-
- if (left->IsTrivial()) {
- Load(right);
- Result right_result = frame_->Pop();
- frame_->Push(left);
- frame_->Push(&right_result);
- } else {
- Load(left);
- Load(right);
- }
-
- Comparison(node, cc, strict, destination());
-}
-
-
-void CodeGenerator::VisitCompareToNull(CompareToNull* node) {
- Comment cmnt(masm_, "[ CompareToNull");
-
- Load(node->expression());
- Result operand = frame_->Pop();
- operand.ToRegister();
- __ CompareRoot(operand.reg(), Heap::kNullValueRootIndex);
- if (node->is_strict()) {
- operand.Unuse();
- destination()->Split(equal);
- } else {
- // The 'null' value is only equal to 'undefined' if using non-strict
- // comparisons.
- destination()->true_target()->Branch(equal);
- __ CompareRoot(operand.reg(), Heap::kUndefinedValueRootIndex);
- destination()->true_target()->Branch(equal);
- Condition is_smi = masm_->CheckSmi(operand.reg());
- destination()->false_target()->Branch(is_smi);
-
- // It can be an undetectable object.
- // Use a scratch register in preference to spilling operand.reg().
- Result temp = allocator()->Allocate();
- ASSERT(temp.is_valid());
- __ movq(temp.reg(),
- FieldOperand(operand.reg(), HeapObject::kMapOffset));
- __ testb(FieldOperand(temp.reg(), Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- temp.Unuse();
- operand.Unuse();
- destination()->Split(not_zero);
- }
-}
-
-
-#ifdef DEBUG
-bool CodeGenerator::HasValidEntryRegisters() {
- return (allocator()->count(rax) == (frame()->is_used(rax) ? 1 : 0))
- && (allocator()->count(rbx) == (frame()->is_used(rbx) ? 1 : 0))
- && (allocator()->count(rcx) == (frame()->is_used(rcx) ? 1 : 0))
- && (allocator()->count(rdx) == (frame()->is_used(rdx) ? 1 : 0))
- && (allocator()->count(rdi) == (frame()->is_used(rdi) ? 1 : 0))
- && (allocator()->count(r8) == (frame()->is_used(r8) ? 1 : 0))
- && (allocator()->count(r9) == (frame()->is_used(r9) ? 1 : 0))
- && (allocator()->count(r11) == (frame()->is_used(r11) ? 1 : 0))
- && (allocator()->count(r14) == (frame()->is_used(r14) ? 1 : 0))
- && (allocator()->count(r15) == (frame()->is_used(r15) ? 1 : 0));
-}
-#endif
-
-
-
-// Emit a LoadIC call to get the value from receiver and leave it in
-// dst. The receiver register is restored after the call.
-class DeferredReferenceGetNamedValue: public DeferredCode {
- public:
- DeferredReferenceGetNamedValue(Register dst,
- Register receiver,
- Handle<String> name)
- : dst_(dst), receiver_(receiver), name_(name) {
- set_comment("[ DeferredReferenceGetNamedValue");
- }
-
- virtual void Generate();
-
- Label* patch_site() { return &patch_site_; }
-
- private:
- Label patch_site_;
- Register dst_;
- Register receiver_;
- Handle<String> name_;
-};
-
-
-void DeferredReferenceGetNamedValue::Generate() {
- if (!receiver_.is(rax)) {
- __ movq(rax, receiver_);
- }
- __ Move(rcx, name_);
- Handle<Code> ic = Isolate::Current()->builtins()->LoadIC_Initialize();
- __ Call(ic, RelocInfo::CODE_TARGET);
- // The call must be followed by a test rax instruction to indicate
- // that the inobject property case was inlined.
- //
- // Store the delta to the map check instruction here in the test
- // instruction. Use masm_-> instead of the __ macro since the
- // latter can't return a value.
- int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
- // Here we use masm_-> instead of the __ macro because this is the
- // instruction that gets patched and coverage code gets in the way.
- masm_->testl(rax, Immediate(-delta_to_patch_site));
- Counters* counters = masm()->isolate()->counters();
- __ IncrementCounter(counters->named_load_inline_miss(), 1);
-
- if (!dst_.is(rax)) __ movq(dst_, rax);
-}
-
-
-class DeferredReferenceGetKeyedValue: public DeferredCode {
- public:
- explicit DeferredReferenceGetKeyedValue(Register dst,
- Register receiver,
- Register key)
- : dst_(dst), receiver_(receiver), key_(key) {
- set_comment("[ DeferredReferenceGetKeyedValue");
- }
-
- virtual void Generate();
-
- Label* patch_site() { return &patch_site_; }
-
- private:
- Label patch_site_;
- Register dst_;
- Register receiver_;
- Register key_;
-};
-
-
-void DeferredReferenceGetKeyedValue::Generate() {
- if (receiver_.is(rdx)) {
- if (!key_.is(rax)) {
- __ movq(rax, key_);
- } // else do nothing.
- } else if (receiver_.is(rax)) {
- if (key_.is(rdx)) {
- __ xchg(rax, rdx);
- } else if (key_.is(rax)) {
- __ movq(rdx, receiver_);
- } else {
- __ movq(rdx, receiver_);
- __ movq(rax, key_);
- }
- } else if (key_.is(rax)) {
- __ movq(rdx, receiver_);
- } else {
- __ movq(rax, key_);
- __ movq(rdx, receiver_);
- }
- // Calculate the delta from the IC call instruction to the map check
- // movq instruction in the inlined version. This delta is stored in
- // a test(rax, delta) instruction after the call so that we can find
- // it in the IC initialization code and patch the movq instruction.
- // This means that we cannot allow test instructions after calls to
- // KeyedLoadIC stubs in other places.
- Handle<Code> ic = Isolate::Current()->builtins()->KeyedLoadIC_Initialize();
- __ Call(ic, RelocInfo::CODE_TARGET);
- // The delta from the start of the map-compare instruction to the
- // test instruction. We use masm_-> directly here instead of the __
- // macro because the macro sometimes uses macro expansion to turn
- // into something that can't return a value. This is encountered
- // when doing generated code coverage tests.
- int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
- // Here we use masm_-> instead of the __ macro because this is the
- // instruction that gets patched and coverage code gets in the way.
- // TODO(X64): Consider whether it's worth switching the test to a
- // 7-byte NOP with non-zero immediate (0f 1f 80 xxxxxxxx) which won't
- // be generated normally.
- masm_->testl(rax, Immediate(-delta_to_patch_site));
- Counters* counters = masm()->isolate()->counters();
- __ IncrementCounter(counters->keyed_load_inline_miss(), 1);
-
- if (!dst_.is(rax)) __ movq(dst_, rax);
-}
-
-
-class DeferredReferenceSetKeyedValue: public DeferredCode {
- public:
- DeferredReferenceSetKeyedValue(Register value,
- Register key,
- Register receiver,
- StrictModeFlag strict_mode)
- : value_(value),
- key_(key),
- receiver_(receiver),
- strict_mode_(strict_mode) {
- set_comment("[ DeferredReferenceSetKeyedValue");
- }
-
- virtual void Generate();
-
- Label* patch_site() { return &patch_site_; }
-
- private:
- Register value_;
- Register key_;
- Register receiver_;
- Label patch_site_;
- StrictModeFlag strict_mode_;
-};
-
-
-void DeferredReferenceSetKeyedValue::Generate() {
- Counters* counters = masm()->isolate()->counters();
- __ IncrementCounter(counters->keyed_store_inline_miss(), 1);
- // Move value, receiver, and key to registers rax, rdx, and rcx, as
- // the IC stub expects.
- // Move value to rax, using xchg if the receiver or key is in rax.
- if (!value_.is(rax)) {
- if (!receiver_.is(rax) && !key_.is(rax)) {
- __ movq(rax, value_);
- } else {
- __ xchg(rax, value_);
- // Update receiver_ and key_ if they are affected by the swap.
- if (receiver_.is(rax)) {
- receiver_ = value_;
- } else if (receiver_.is(value_)) {
- receiver_ = rax;
- }
- if (key_.is(rax)) {
- key_ = value_;
- } else if (key_.is(value_)) {
- key_ = rax;
- }
- }
- }
- // Value is now in rax. Its original location is remembered in value_,
- // and the value is restored to value_ before returning.
- // The variables receiver_ and key_ are not preserved.
- // Move receiver and key to rdx and rcx, swapping if necessary.
- if (receiver_.is(rdx)) {
- if (!key_.is(rcx)) {
- __ movq(rcx, key_);
- } // Else everything is already in the right place.
- } else if (receiver_.is(rcx)) {
- if (key_.is(rdx)) {
- __ xchg(rcx, rdx);
- } else if (key_.is(rcx)) {
- __ movq(rdx, receiver_);
- } else {
- __ movq(rdx, receiver_);
- __ movq(rcx, key_);
- }
- } else if (key_.is(rcx)) {
- __ movq(rdx, receiver_);
- } else {
- __ movq(rcx, key_);
- __ movq(rdx, receiver_);
- }
-
- // Call the IC stub.
- Handle<Code> ic(Isolate::Current()->builtins()->builtin(
- (strict_mode_ == kStrictMode) ? Builtins::kKeyedStoreIC_Initialize_Strict
- : Builtins::kKeyedStoreIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
- // The delta from the start of the map-compare instructions (initial movq)
- // to the test instruction. We use masm_-> directly here instead of the
- // __ macro because the macro sometimes uses macro expansion to turn
- // into something that can't return a value. This is encountered
- // when doing generated code coverage tests.
- int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
- // Here we use masm_-> instead of the __ macro because this is the
- // instruction that gets patched and coverage code gets in the way.
- masm_->testl(rax, Immediate(-delta_to_patch_site));
- // Restore value (returned from store IC).
- if (!value_.is(rax)) __ movq(value_, rax);
-}
-
-
-Result CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
-#ifdef DEBUG
- int original_height = frame()->height();
-#endif
- Result result;
- // Do not inline the inobject property case for loads from the global
- // object. Also do not inline for unoptimized code. This saves time
- // in the code generator. Unoptimized code is toplevel code or code
- // that is not in a loop.
- if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) {
- Comment cmnt(masm(), "[ Load from named Property");
- frame()->Push(name);
-
- RelocInfo::Mode mode = is_contextual
- ? RelocInfo::CODE_TARGET_CONTEXT
- : RelocInfo::CODE_TARGET;
- result = frame()->CallLoadIC(mode);
- // A test rax instruction following the call signals that the
- // inobject property case was inlined. Ensure that there is not
- // a test rax instruction here.
- __ nop();
- } else {
- // Inline the inobject property case.
- Comment cmnt(masm(), "[ Inlined named property load");
- Result receiver = frame()->Pop();
- receiver.ToRegister();
- result = allocator()->Allocate();
- ASSERT(result.is_valid());
-
- // r12 is now a reserved register, so it cannot be the receiver.
- // If it was, the distance to the fixup location would not be constant.
- ASSERT(!receiver.reg().is(r12));
-
- DeferredReferenceGetNamedValue* deferred =
- new DeferredReferenceGetNamedValue(result.reg(), receiver.reg(), name);
-
- // Check that the receiver is a heap object.
- __ JumpIfSmi(receiver.reg(), deferred->entry_label());
-
- __ bind(deferred->patch_site());
- // This is the map check instruction that will be patched (so we can't
- // use the double underscore macro that may insert instructions).
- // Initially use an invalid map to force a failure.
- masm()->movq(kScratchRegister, FACTORY->null_value(),
- RelocInfo::EMBEDDED_OBJECT);
- masm()->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
- kScratchRegister);
- // This branch is always a forwards branch so it's always a fixed
- // size which allows the assert below to succeed and patching to work.
- // Don't use deferred->Branch(...), since that might add coverage code.
- masm()->j(not_equal, deferred->entry_label());
-
- // The delta from the patch label to the load offset must be
- // statically known.
- ASSERT(masm()->SizeOfCodeGeneratedSince(deferred->patch_site()) ==
- LoadIC::kOffsetToLoadInstruction);
- // The initial (invalid) offset has to be large enough to force
- // a 32-bit instruction encoding to allow patching with an
- // arbitrary offset. Use kMaxInt (minus kHeapObjectTag).
- int offset = kMaxInt;
- masm()->movq(result.reg(), FieldOperand(receiver.reg(), offset));
-
- Counters* counters = masm()->isolate()->counters();
- __ IncrementCounter(counters->named_load_inline(), 1);
- deferred->BindExit();
- }
- ASSERT(frame()->height() == original_height - 1);
- return result;
-}
-
-
-Result CodeGenerator::EmitNamedStore(Handle<String> name, bool is_contextual) {
-#ifdef DEBUG
- int expected_height = frame()->height() - (is_contextual ? 1 : 2);
-#endif
-
- Result result;
- if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) {
- result = frame()->CallStoreIC(name, is_contextual, strict_mode_flag());
- // A test rax instruction following the call signals that the inobject
- // property case was inlined. Ensure that there is not a test rax
- // instruction here.
- __ nop();
- } else {
- // Inline the in-object property case.
- JumpTarget slow, done;
- Label patch_site;
-
- // Get the value and receiver from the stack.
- Result value = frame()->Pop();
- value.ToRegister();
- Result receiver = frame()->Pop();
- receiver.ToRegister();
-
- // Allocate result register.
- result = allocator()->Allocate();
- ASSERT(result.is_valid() && receiver.is_valid() && value.is_valid());
-
- // r12 is now a reserved register, so it cannot be the receiver.
- // If it was, the distance to the fixup location would not be constant.
- ASSERT(!receiver.reg().is(r12));
-
- // Check that the receiver is a heap object.
- Condition is_smi = masm()->CheckSmi(receiver.reg());
- slow.Branch(is_smi, &value, &receiver);
-
- // This is the map check instruction that will be patched.
- // Initially use an invalid map to force a failure. The exact
- // instruction sequence is important because we use the
- // kOffsetToStoreInstruction constant for patching. We avoid using
- // the __ macro for the following two instructions because it
- // might introduce extra instructions.
- __ bind(&patch_site);
- masm()->movq(kScratchRegister, FACTORY->null_value(),
- RelocInfo::EMBEDDED_OBJECT);
- masm()->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
- kScratchRegister);
- // This branch is always a forwards branch so it's always a fixed size
- // which allows the assert below to succeed and patching to work.
- slow.Branch(not_equal, &value, &receiver);
-
- // The delta from the patch label to the store offset must be
- // statically known.
- ASSERT(masm()->SizeOfCodeGeneratedSince(&patch_site) ==
- StoreIC::kOffsetToStoreInstruction);
-
- // The initial (invalid) offset has to be large enough to force a 32-bit
- // instruction encoding to allow patching with an arbitrary offset. Use
- // kMaxInt (minus kHeapObjectTag).
- int offset = kMaxInt;
- __ movq(FieldOperand(receiver.reg(), offset), value.reg());
- __ movq(result.reg(), value.reg());
-
- // Allocate scratch register for write barrier.
- Result scratch = allocator()->Allocate();
- ASSERT(scratch.is_valid());
-
- // The write barrier clobbers all input registers, so spill the
- // receiver and the value.
- frame_->Spill(receiver.reg());
- frame_->Spill(value.reg());
-
- // If the receiver and the value share a register allocate a new
- // register for the receiver.
- if (receiver.reg().is(value.reg())) {
- receiver = allocator()->Allocate();
- ASSERT(receiver.is_valid());
- __ movq(receiver.reg(), value.reg());
- }
-
- // Update the write barrier. To save instructions in the inlined
- // version we do not filter smis.
- Label skip_write_barrier;
- __ InNewSpace(receiver.reg(), value.reg(), equal, &skip_write_barrier);
- int delta_to_record_write = masm_->SizeOfCodeGeneratedSince(&patch_site);
- __ lea(scratch.reg(), Operand(receiver.reg(), offset));
- __ RecordWriteHelper(receiver.reg(), scratch.reg(), value.reg());
- if (FLAG_debug_code) {
- __ movq(receiver.reg(), BitCast<int64_t>(kZapValue), RelocInfo::NONE);
- __ movq(value.reg(), BitCast<int64_t>(kZapValue), RelocInfo::NONE);
- __ movq(scratch.reg(), BitCast<int64_t>(kZapValue), RelocInfo::NONE);
- }
- __ bind(&skip_write_barrier);
- value.Unuse();
- scratch.Unuse();
- receiver.Unuse();
- done.Jump(&result);
-
- slow.Bind(&value, &receiver);
- frame()->Push(&receiver);
- frame()->Push(&value);
- result = frame()->CallStoreIC(name, is_contextual, strict_mode_flag());
- // Encode the offset to the map check instruction and the offset
- // to the write barrier store address computation in a test rax
- // instruction.
- int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(&patch_site);
- __ testl(rax,
- Immediate((delta_to_record_write << 16) | delta_to_patch_site));
- done.Bind(&result);
- }
-
- ASSERT_EQ(expected_height, frame()->height());
- return result;
-}
-
-
-Result CodeGenerator::EmitKeyedLoad() {
-#ifdef DEBUG
- int original_height = frame()->height();
-#endif
- Result result;
- // Inline array load code if inside of a loop. We do not know
- // the receiver map yet, so we initially generate the code with
- // a check against an invalid map. In the inline cache code, we
- // patch the map check if appropriate.
- if (loop_nesting() > 0) {
- Comment cmnt(masm_, "[ Inlined load from keyed Property");
-
- // Use a fresh temporary to load the elements without destroying
- // the receiver which is needed for the deferred slow case.
- // Allocate the temporary early so that we use rax if it is free.
- Result elements = allocator()->Allocate();
- ASSERT(elements.is_valid());
-
- Result key = frame_->Pop();
- Result receiver = frame_->Pop();
- key.ToRegister();
- receiver.ToRegister();
-
- // If key and receiver are shared registers on the frame, their values will
- // be automatically saved and restored when going to deferred code.
- // The result is returned in elements, which is not shared.
- DeferredReferenceGetKeyedValue* deferred =
- new DeferredReferenceGetKeyedValue(elements.reg(),
- receiver.reg(),
- key.reg());
-
- __ JumpIfSmi(receiver.reg(), deferred->entry_label());
-
- // Check that the receiver has the expected map.
- // Initially, use an invalid map. The map is patched in the IC
- // initialization code.
- __ bind(deferred->patch_site());
- // Use masm-> here instead of the double underscore macro since extra
- // coverage code can interfere with the patching. Do not use a load
- // from the root array to load null_value, since the load must be patched
- // with the expected receiver map, which is not in the root array.
- masm_->movq(kScratchRegister, FACTORY->null_value(),
- RelocInfo::EMBEDDED_OBJECT);
- masm_->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
- kScratchRegister);
- deferred->Branch(not_equal);
-
- __ JumpUnlessNonNegativeSmi(key.reg(), deferred->entry_label());
-
- // Get the elements array from the receiver.
- __ movq(elements.reg(),
- FieldOperand(receiver.reg(), JSObject::kElementsOffset));
- __ AssertFastElements(elements.reg());
-
- // Check that key is within bounds.
- __ SmiCompare(key.reg(),
- FieldOperand(elements.reg(), FixedArray::kLengthOffset));
- deferred->Branch(above_equal);
-
- // Load and check that the result is not the hole. We could
- // reuse the index or elements register for the value.
- //
- // TODO(206): Consider whether it makes sense to try some
- // heuristic about which register to reuse. For example, if
- // one is rax, the we can reuse that one because the value
- // coming from the deferred code will be in rax.
- SmiIndex index =
- masm_->SmiToIndex(kScratchRegister, key.reg(), kPointerSizeLog2);
- __ movq(elements.reg(),
- FieldOperand(elements.reg(),
- index.reg,
- index.scale,
- FixedArray::kHeaderSize));
- result = elements;
- __ CompareRoot(result.reg(), Heap::kTheHoleValueRootIndex);
- deferred->Branch(equal);
- Counters* counters = masm()->isolate()->counters();
- __ IncrementCounter(counters->keyed_load_inline(), 1);
-
- deferred->BindExit();
- } else {
- Comment cmnt(masm_, "[ Load from keyed Property");
- result = frame_->CallKeyedLoadIC(RelocInfo::CODE_TARGET);
- // Make sure that we do not have a test instruction after the
- // call. A test instruction after the call is used to
- // indicate that we have generated an inline version of the
- // keyed load. The explicit nop instruction is here because
- // the push that follows might be peep-hole optimized away.
- __ nop();
- }
- ASSERT(frame()->height() == original_height - 2);
- return result;
-}
-
-
-Result CodeGenerator::EmitKeyedStore(StaticType* key_type) {
-#ifdef DEBUG
- int original_height = frame()->height();
-#endif
- Result result;
- // Generate inlined version of the keyed store if the code is in a loop
- // and the key is likely to be a smi.
- if (loop_nesting() > 0 && key_type->IsLikelySmi()) {
- Comment cmnt(masm(), "[ Inlined store to keyed Property");
-
- // Get the receiver, key and value into registers.
- result = frame()->Pop();
- Result key = frame()->Pop();
- Result receiver = frame()->Pop();
-
- Result tmp = allocator_->Allocate();
- ASSERT(tmp.is_valid());
- Result tmp2 = allocator_->Allocate();
- ASSERT(tmp2.is_valid());
-
- // Determine whether the value is a constant before putting it in a
- // register.
- bool value_is_constant = result.is_constant();
-
- // Make sure that value, key and receiver are in registers.
- result.ToRegister();
- key.ToRegister();
- receiver.ToRegister();
-
- DeferredReferenceSetKeyedValue* deferred =
- new DeferredReferenceSetKeyedValue(result.reg(),
- key.reg(),
- receiver.reg(),
- strict_mode_flag());
-
- // Check that the receiver is not a smi.
- __ JumpIfSmi(receiver.reg(), deferred->entry_label());
-
- // Check that the key is a smi.
- if (!key.is_smi()) {
- __ JumpIfNotSmi(key.reg(), deferred->entry_label());
- } else if (FLAG_debug_code) {
- __ AbortIfNotSmi(key.reg());
- }
-
- // Check that the receiver is a JSArray.
- __ CmpObjectType(receiver.reg(), JS_ARRAY_TYPE, kScratchRegister);
- deferred->Branch(not_equal);
-
- // Get the elements array from the receiver and check that it is not a
- // dictionary.
- __ movq(tmp.reg(),
- FieldOperand(receiver.reg(), JSArray::kElementsOffset));
-
- // Check whether it is possible to omit the write barrier. If the elements
- // array is in new space or the value written is a smi we can safely update
- // the elements array without write barrier.
- Label in_new_space;
- __ InNewSpace(tmp.reg(), tmp2.reg(), equal, &in_new_space);
- if (!value_is_constant) {
- __ JumpIfNotSmi(result.reg(), deferred->entry_label());
- }
-
- __ bind(&in_new_space);
- // Bind the deferred code patch site to be able to locate the fixed
- // array map comparison. When debugging, we patch this comparison to
- // always fail so that we will hit the IC call in the deferred code
- // which will allow the debugger to break for fast case stores.
- __ bind(deferred->patch_site());
- // Avoid using __ to ensure the distance from patch_site
- // to the map address is always the same.
- masm()->movq(kScratchRegister, FACTORY->fixed_array_map(),
- RelocInfo::EMBEDDED_OBJECT);
- __ cmpq(FieldOperand(tmp.reg(), HeapObject::kMapOffset),
- kScratchRegister);
- deferred->Branch(not_equal);
-
- // Check that the key is within bounds. Both the key and the length of
- // the JSArray are smis (because the fixed array check above ensures the
- // elements are in fast case). Use unsigned comparison to handle negative
- // keys.
- __ SmiCompare(FieldOperand(receiver.reg(), JSArray::kLengthOffset),
- key.reg());
- deferred->Branch(below_equal);
-
- // Store the value.
- SmiIndex index =
- masm()->SmiToIndex(kScratchRegister, key.reg(), kPointerSizeLog2);
- __ movq(FieldOperand(tmp.reg(),
- index.reg,
- index.scale,
- FixedArray::kHeaderSize),
- result.reg());
- Counters* counters = masm()->isolate()->counters();
- __ IncrementCounter(counters->keyed_store_inline(), 1);
-
- deferred->BindExit();
- } else {
- result = frame()->CallKeyedStoreIC(strict_mode_flag());
- // Make sure that we do not have a test instruction after the
- // call. A test instruction after the call is used to
- // indicate that we have generated an inline version of the
- // keyed store.
- __ nop();
- }
- ASSERT(frame()->height() == original_height - 3);
- return result;
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm)
-
-
-Handle<String> Reference::GetName() {
- ASSERT(type_ == NAMED);
- Property* property = expression_->AsProperty();
- if (property == NULL) {
- // Global variable reference treated as a named property reference.
- VariableProxy* proxy = expression_->AsVariableProxy();
- ASSERT(proxy->AsVariable() != NULL);
- ASSERT(proxy->AsVariable()->is_global());
- return proxy->name();
- } else {
- Literal* raw_name = property->key()->AsLiteral();
- ASSERT(raw_name != NULL);
- return Handle<String>(String::cast(*raw_name->handle()));
- }
-}
-
-
-void Reference::GetValue() {
- ASSERT(!cgen_->in_spilled_code());
- ASSERT(cgen_->HasValidEntryRegisters());
- ASSERT(!is_illegal());
- MacroAssembler* masm = cgen_->masm();
-
- // Record the source position for the property load.
- Property* property = expression_->AsProperty();
- if (property != NULL) {
- cgen_->CodeForSourcePosition(property->position());
- }
-
- switch (type_) {
- case SLOT: {
- Comment cmnt(masm, "[ Load from Slot");
- Slot* slot = expression_->AsVariableProxy()->AsVariable()->AsSlot();
- ASSERT(slot != NULL);
- cgen_->LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
- break;
- }
-
- case NAMED: {
- Variable* var = expression_->AsVariableProxy()->AsVariable();
- bool is_global = var != NULL;
- ASSERT(!is_global || var->is_global());
- if (persist_after_get_) {
- cgen_->frame()->Dup();
- }
- Result result = cgen_->EmitNamedLoad(GetName(), is_global);
- cgen_->frame()->Push(&result);
- break;
- }
-
- case KEYED: {
- // A load of a bare identifier (load from global) cannot be keyed.
- ASSERT(expression_->AsVariableProxy()->AsVariable() == NULL);
- if (persist_after_get_) {
- cgen_->frame()->PushElementAt(1);
- cgen_->frame()->PushElementAt(1);
- }
- Result value = cgen_->EmitKeyedLoad();
- cgen_->frame()->Push(&value);
- break;
- }
-
- default:
- UNREACHABLE();
- }
-
- if (!persist_after_get_) {
- set_unloaded();
- }
-}
-
-
-void Reference::TakeValue() {
- // TODO(X64): This function is completely architecture independent. Move
- // it somewhere shared.
-
- // For non-constant frame-allocated slots, we invalidate the value in the
- // slot. For all others, we fall back on GetValue.
- ASSERT(!cgen_->in_spilled_code());
- ASSERT(!is_illegal());
- if (type_ != SLOT) {
- GetValue();
- return;
- }
-
- Slot* slot = expression_->AsVariableProxy()->AsVariable()->AsSlot();
- ASSERT(slot != NULL);
- if (slot->type() == Slot::LOOKUP ||
- slot->type() == Slot::CONTEXT ||
- slot->var()->mode() == Variable::CONST ||
- slot->is_arguments()) {
- GetValue();
- return;
- }
-
- // Only non-constant, frame-allocated parameters and locals can reach
- // here. Be careful not to use the optimizations for arguments
- // object access since it may not have been initialized yet.
- ASSERT(!slot->is_arguments());
- if (slot->type() == Slot::PARAMETER) {
- cgen_->frame()->TakeParameterAt(slot->index());
- } else {
- ASSERT(slot->type() == Slot::LOCAL);
- cgen_->frame()->TakeLocalAt(slot->index());
- }
-
- ASSERT(persist_after_get_);
- // Do not unload the reference, because it is used in SetValue.
-}
-
-
-void Reference::SetValue(InitState init_state) {
- ASSERT(cgen_->HasValidEntryRegisters());
- ASSERT(!is_illegal());
- MacroAssembler* masm = cgen_->masm();
- switch (type_) {
- case SLOT: {
- Comment cmnt(masm, "[ Store to Slot");
- Slot* slot = expression_->AsVariableProxy()->AsVariable()->AsSlot();
- ASSERT(slot != NULL);
- cgen_->StoreToSlot(slot, init_state);
- set_unloaded();
- break;
- }
-
- case NAMED: {
- Comment cmnt(masm, "[ Store to named Property");
- Result answer = cgen_->EmitNamedStore(GetName(), false);
- cgen_->frame()->Push(&answer);
- set_unloaded();
- break;
- }
-
- case KEYED: {
- Comment cmnt(masm, "[ Store to keyed Property");
- Property* property = expression()->AsProperty();
- ASSERT(property != NULL);
-
- Result answer = cgen_->EmitKeyedStore(property->key()->type());
- cgen_->frame()->Push(&answer);
- set_unloaded();
- break;
- }
-
- case UNLOADED:
- case ILLEGAL:
- UNREACHABLE();
- }
-}
-
-
-Result CodeGenerator::GenerateGenericBinaryOpStubCall(GenericBinaryOpStub* stub,
- Result* left,
- Result* right) {
- if (stub->ArgsInRegistersSupported()) {
- stub->SetArgsInRegisters();
- return frame_->CallStub(stub, left, right);
- } else {
- frame_->Push(left);
- frame_->Push(right);
- return frame_->CallStub(stub, 2);
- }
-}
-
-#undef __
-
-#define __ masm.
-
-#ifdef _WIN64
-typedef double (*ModuloFunction)(double, double);
-// Define custom fmod implementation.
-ModuloFunction CreateModuloFunction() {
- size_t actual_size;
- byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
- &actual_size,
- true));
- CHECK(buffer);
- Assembler masm(NULL, buffer, static_cast<int>(actual_size));
- // Generated code is put into a fixed, unmovable, buffer, and not into
- // the V8 heap. We can't, and don't, refer to any relocatable addresses
- // (e.g. the JavaScript nan-object).
-
- // Windows 64 ABI passes double arguments in xmm0, xmm1 and
- // returns result in xmm0.
- // Argument backing space is allocated on the stack above
- // the return address.
-
- // Compute x mod y.
- // Load y and x (use argument backing store as temporary storage).
- __ movsd(Operand(rsp, kPointerSize * 2), xmm1);
- __ movsd(Operand(rsp, kPointerSize), xmm0);
- __ fld_d(Operand(rsp, kPointerSize * 2));
- __ fld_d(Operand(rsp, kPointerSize));
-
- // Clear exception flags before operation.
- {
- Label no_exceptions;
- __ fwait();
- __ fnstsw_ax();
- // Clear if Illegal Operand or Zero Division exceptions are set.
- __ testb(rax, Immediate(5));
- __ j(zero, &no_exceptions);
- __ fnclex();
- __ bind(&no_exceptions);
- }
-
- // Compute st(0) % st(1)
- {
- Label partial_remainder_loop;
- __ bind(&partial_remainder_loop);
- __ fprem();
- __ fwait();
- __ fnstsw_ax();
- __ testl(rax, Immediate(0x400 /* C2 */));
- // If C2 is set, computation only has partial result. Loop to
- // continue computation.
- __ j(not_zero, &partial_remainder_loop);
- }
-
- Label valid_result;
- Label return_result;
- // If Invalid Operand or Zero Division exceptions are set,
- // return NaN.
- __ testb(rax, Immediate(5));
- __ j(zero, &valid_result);
- __ fstp(0); // Drop result in st(0).
- int64_t kNaNValue = V8_INT64_C(0x7ff8000000000000);
- __ movq(rcx, kNaNValue, RelocInfo::NONE);
- __ movq(Operand(rsp, kPointerSize), rcx);
- __ movsd(xmm0, Operand(rsp, kPointerSize));
- __ jmp(&return_result);
-
- // If result is valid, return that.
- __ bind(&valid_result);
- __ fstp_d(Operand(rsp, kPointerSize));
- __ movsd(xmm0, Operand(rsp, kPointerSize));
-
- // Clean up FPU stack and exceptions and return xmm0
- __ bind(&return_result);
- __ fstp(0); // Unload y.
-
- Label clear_exceptions;
- __ testb(rax, Immediate(0x3f /* Any Exception*/));
- __ j(not_zero, &clear_exceptions);
- __ ret(0);
- __ bind(&clear_exceptions);
- __ fnclex();
- __ ret(0);
-
- CodeDesc desc;
- masm.GetCode(&desc);
- // Call the function from C++ through this pointer.
- return FUNCTION_CAST<ModuloFunction>(buffer);
-}
-
-#endif
-
-
-#undef __
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_X64
diff --git a/src/3rdparty/v8/src/x64/codegen-x64.h b/src/3rdparty/v8/src/x64/codegen-x64.h
deleted file mode 100644
index 9a70907..0000000
--- a/src/3rdparty/v8/src/x64/codegen-x64.h
+++ /dev/null
@@ -1,753 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_X64_CODEGEN_X64_H_
-#define V8_X64_CODEGEN_X64_H_
-
-#include "ast.h"
-#include "ic-inl.h"
-#include "jump-target-heavy.h"
-
-namespace v8 {
-namespace internal {
-
-// Forward declarations
-class CompilationInfo;
-class DeferredCode;
-class RegisterAllocator;
-class RegisterFile;
-
-enum InitState { CONST_INIT, NOT_CONST_INIT };
-enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
-
-
-// -------------------------------------------------------------------------
-// Reference support
-
-// A reference is a C++ stack-allocated object that puts a
-// reference on the virtual frame. The reference may be consumed
-// by GetValue, TakeValue, SetValue, and Codegen::UnloadReference.
-// When the lifetime (scope) of a valid reference ends, it must have
-// been consumed, and be in state UNLOADED.
-class Reference BASE_EMBEDDED {
- public:
- // The values of the types is important, see size().
- enum Type { UNLOADED = -2, ILLEGAL = -1, SLOT = 0, NAMED = 1, KEYED = 2 };
-
- Reference(CodeGenerator* cgen,
- Expression* expression,
- bool persist_after_get = false);
- ~Reference();
-
- Expression* expression() const { return expression_; }
- Type type() const { return type_; }
- void set_type(Type value) {
- ASSERT_EQ(ILLEGAL, type_);
- type_ = value;
- }
-
- void set_unloaded() {
- ASSERT_NE(ILLEGAL, type_);
- ASSERT_NE(UNLOADED, type_);
- type_ = UNLOADED;
- }
- // The size the reference takes up on the stack.
- int size() const {
- return (type_ < SLOT) ? 0 : type_;
- }
-
- bool is_illegal() const { return type_ == ILLEGAL; }
- bool is_slot() const { return type_ == SLOT; }
- bool is_property() const { return type_ == NAMED || type_ == KEYED; }
- bool is_unloaded() const { return type_ == UNLOADED; }
-
- // Return the name. Only valid for named property references.
- Handle<String> GetName();
-
- // Generate code to push the value of the reference on top of the
- // expression stack. The reference is expected to be already on top of
- // the expression stack, and it is consumed by the call unless the
- // reference is for a compound assignment.
- // If the reference is not consumed, it is left in place under its value.
- void GetValue();
-
- // Like GetValue except that the slot is expected to be written to before
- // being read from again. The value of the reference may be invalidated,
- // causing subsequent attempts to read it to fail.
- void TakeValue();
-
- // Generate code to store the value on top of the expression stack in the
- // reference. The reference is expected to be immediately below the value
- // on the expression stack. The value is stored in the location specified
- // by the reference, and is left on top of the stack, after the reference
- // is popped from beneath it (unloaded).
- void SetValue(InitState init_state);
-
- private:
- CodeGenerator* cgen_;
- Expression* expression_;
- Type type_;
- bool persist_after_get_;
-};
-
-
-// -------------------------------------------------------------------------
-// Control destinations.
-
-// A control destination encapsulates a pair of jump targets and a
-// flag indicating which one is the preferred fall-through. The
-// preferred fall-through must be unbound, the other may be already
-// bound (ie, a backward target).
-//
-// The true and false targets may be jumped to unconditionally or
-// control may split conditionally. Unconditional jumping and
-// splitting should be emitted in tail position (as the last thing
-// when compiling an expression) because they can cause either label
-// to be bound or the non-fall through to be jumped to leaving an
-// invalid virtual frame.
-//
-// The labels in the control destination can be extracted and
-// manipulated normally without affecting the state of the
-// destination.
-
-class ControlDestination BASE_EMBEDDED {
- public:
- ControlDestination(JumpTarget* true_target,
- JumpTarget* false_target,
- bool true_is_fall_through)
- : true_target_(true_target),
- false_target_(false_target),
- true_is_fall_through_(true_is_fall_through),
- is_used_(false) {
- ASSERT(true_is_fall_through ? !true_target->is_bound()
- : !false_target->is_bound());
- }
-
- // Accessors for the jump targets. Directly jumping or branching to
- // or binding the targets will not update the destination's state.
- JumpTarget* true_target() const { return true_target_; }
- JumpTarget* false_target() const { return false_target_; }
-
- // True if the the destination has been jumped to unconditionally or
- // control has been split to both targets. This predicate does not
- // test whether the targets have been extracted and manipulated as
- // raw jump targets.
- bool is_used() const { return is_used_; }
-
- // True if the destination is used and the true target (respectively
- // false target) was the fall through. If the target is backward,
- // "fall through" included jumping unconditionally to it.
- bool true_was_fall_through() const {
- return is_used_ && true_is_fall_through_;
- }
-
- bool false_was_fall_through() const {
- return is_used_ && !true_is_fall_through_;
- }
-
- // Emit a branch to one of the true or false targets, and bind the
- // other target. Because this binds the fall-through target, it
- // should be emitted in tail position (as the last thing when
- // compiling an expression).
- void Split(Condition cc) {
- ASSERT(!is_used_);
- if (true_is_fall_through_) {
- false_target_->Branch(NegateCondition(cc));
- true_target_->Bind();
- } else {
- true_target_->Branch(cc);
- false_target_->Bind();
- }
- is_used_ = true;
- }
-
- // Emit an unconditional jump in tail position, to the true target
- // (if the argument is true) or the false target. The "jump" will
- // actually bind the jump target if it is forward, jump to it if it
- // is backward.
- void Goto(bool where) {
- ASSERT(!is_used_);
- JumpTarget* target = where ? true_target_ : false_target_;
- if (target->is_bound()) {
- target->Jump();
- } else {
- target->Bind();
- }
- is_used_ = true;
- true_is_fall_through_ = where;
- }
-
- // Mark this jump target as used as if Goto had been called, but
- // without generating a jump or binding a label (the control effect
- // should have already happened). This is used when the left
- // subexpression of the short-circuit boolean operators are
- // compiled.
- void Use(bool where) {
- ASSERT(!is_used_);
- ASSERT((where ? true_target_ : false_target_)->is_bound());
- is_used_ = true;
- true_is_fall_through_ = where;
- }
-
- // Swap the true and false targets but keep the same actual label as
- // the fall through. This is used when compiling negated
- // expressions, where we want to swap the targets but preserve the
- // state.
- void Invert() {
- JumpTarget* temp_target = true_target_;
- true_target_ = false_target_;
- false_target_ = temp_target;
-
- true_is_fall_through_ = !true_is_fall_through_;
- }
-
- private:
- // True and false jump targets.
- JumpTarget* true_target_;
- JumpTarget* false_target_;
-
- // Before using the destination: true if the true target is the
- // preferred fall through, false if the false target is. After
- // using the destination: true if the true target was actually used
- // as the fall through, false if the false target was.
- bool true_is_fall_through_;
-
- // True if the Split or Goto functions have been called.
- bool is_used_;
-};
-
-
-// -------------------------------------------------------------------------
-// Code generation state
-
-// The state is passed down the AST by the code generator (and back up, in
-// the form of the state of the jump target pair). It is threaded through
-// the call stack. Constructing a state implicitly pushes it on the owning
-// code generator's stack of states, and destroying one implicitly pops it.
-//
-// The code generator state is only used for expressions, so statements have
-// the initial state.
-
-class CodeGenState BASE_EMBEDDED {
- public:
- // Create an initial code generator state. Destroying the initial state
- // leaves the code generator with a NULL state.
- explicit CodeGenState(CodeGenerator* owner);
-
- // Create a code generator state based on a code generator's current
- // state. The new state has its own control destination.
- CodeGenState(CodeGenerator* owner, ControlDestination* destination);
-
- // Destroy a code generator state and restore the owning code generator's
- // previous state.
- ~CodeGenState();
-
- // Accessors for the state.
- ControlDestination* destination() const { return destination_; }
-
- private:
- // The owning code generator.
- CodeGenerator* owner_;
-
- // A control destination in case the expression has a control-flow
- // effect.
- ControlDestination* destination_;
-
- // The previous state of the owning code generator, restored when
- // this state is destroyed.
- CodeGenState* previous_;
-};
-
-
-// -------------------------------------------------------------------------
-// Arguments allocation mode
-
-enum ArgumentsAllocationMode {
- NO_ARGUMENTS_ALLOCATION,
- EAGER_ARGUMENTS_ALLOCATION,
- LAZY_ARGUMENTS_ALLOCATION
-};
-
-
-// -------------------------------------------------------------------------
-// CodeGenerator
-
-class CodeGenerator: public AstVisitor {
- public:
- static bool MakeCode(CompilationInfo* info);
-
- // Printing of AST, etc. as requested by flags.
- static void MakeCodePrologue(CompilationInfo* info);
-
- // Allocate and install the code.
- static Handle<Code> MakeCodeEpilogue(MacroAssembler* masm,
- Code::Flags flags,
- CompilationInfo* info);
-
- // Print the code after compiling it.
- static void PrintCode(Handle<Code> code, CompilationInfo* info);
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
- static bool ShouldGenerateLog(Expression* type);
-#endif
-
- static bool RecordPositions(MacroAssembler* masm,
- int pos,
- bool right_here = false);
-
- // Accessors
- MacroAssembler* masm() { return masm_; }
- VirtualFrame* frame() const { return frame_; }
- inline Handle<Script> script();
-
- bool has_valid_frame() const { return frame_ != NULL; }
-
- // Set the virtual frame to be new_frame, with non-frame register
- // reference counts given by non_frame_registers. The non-frame
- // register reference counts of the old frame are returned in
- // non_frame_registers.
- void SetFrame(VirtualFrame* new_frame, RegisterFile* non_frame_registers);
-
- void DeleteFrame();
-
- RegisterAllocator* allocator() const { return allocator_; }
-
- CodeGenState* state() { return state_; }
- void set_state(CodeGenState* state) { state_ = state; }
-
- void AddDeferred(DeferredCode* code) { deferred_.Add(code); }
-
- bool in_spilled_code() const { return in_spilled_code_; }
- void set_in_spilled_code(bool flag) { in_spilled_code_ = flag; }
-
- private:
- // Type of a member function that generates inline code for a native function.
- typedef void (CodeGenerator::*InlineFunctionGenerator)
- (ZoneList<Expression*>*);
-
- static const InlineFunctionGenerator kInlineFunctionGenerators[];
-
- // Construction/Destruction
- explicit CodeGenerator(MacroAssembler* masm);
-
- // Accessors
- inline bool is_eval();
- inline Scope* scope();
- inline bool is_strict_mode();
- inline StrictModeFlag strict_mode_flag();
-
- // Generating deferred code.
- void ProcessDeferred();
-
- // State
- ControlDestination* destination() const { return state_->destination(); }
-
- // Track loop nesting level.
- int loop_nesting() const { return loop_nesting_; }
- void IncrementLoopNesting() { loop_nesting_++; }
- void DecrementLoopNesting() { loop_nesting_--; }
-
-
- // Node visitors.
- void VisitStatements(ZoneList<Statement*>* statements);
-
- virtual void VisitSlot(Slot* node);
-#define DEF_VISIT(type) \
- virtual void Visit##type(type* node);
- AST_NODE_LIST(DEF_VISIT)
-#undef DEF_VISIT
-
- // Visit a statement and then spill the virtual frame if control flow can
- // reach the end of the statement (ie, it does not exit via break,
- // continue, return, or throw). This function is used temporarily while
- // the code generator is being transformed.
- void VisitAndSpill(Statement* statement);
-
- // Visit a list of statements and then spill the virtual frame if control
- // flow can reach the end of the list.
- void VisitStatementsAndSpill(ZoneList<Statement*>* statements);
-
- // Main code generation function
- void Generate(CompilationInfo* info);
-
- // Generate the return sequence code. Should be called no more than
- // once per compiled function, immediately after binding the return
- // target (which can not be done more than once).
- void GenerateReturnSequence(Result* return_value);
-
- // Generate code for a fast smi loop.
- void GenerateFastSmiLoop(ForStatement* node);
-
- // Returns the arguments allocation mode.
- ArgumentsAllocationMode ArgumentsMode();
-
- // Store the arguments object and allocate it if necessary.
- Result StoreArgumentsObject(bool initial);
-
- // The following are used by class Reference.
- void LoadReference(Reference* ref);
- void UnloadReference(Reference* ref);
-
- Operand SlotOperand(Slot* slot, Register tmp);
-
- Operand ContextSlotOperandCheckExtensions(Slot* slot,
- Result tmp,
- JumpTarget* slow);
-
- // Expressions
- void LoadCondition(Expression* x,
- ControlDestination* destination,
- bool force_control);
- void Load(Expression* expr);
- void LoadGlobal();
- void LoadGlobalReceiver();
-
- // Generate code to push the value of an expression on top of the frame
- // and then spill the frame fully to memory. This function is used
- // temporarily while the code generator is being transformed.
- void LoadAndSpill(Expression* expression);
-
- // Read a value from a slot and leave it on top of the expression stack.
- void LoadFromSlot(Slot* slot, TypeofState typeof_state);
- void LoadFromSlotCheckForArguments(Slot* slot, TypeofState state);
- Result LoadFromGlobalSlotCheckExtensions(Slot* slot,
- TypeofState typeof_state,
- JumpTarget* slow);
-
- // Support for loading from local/global variables and arguments
- // whose location is known unless they are shadowed by
- // eval-introduced bindings. Generates no code for unsupported slot
- // types and therefore expects to fall through to the slow jump target.
- void EmitDynamicLoadFromSlotFastCase(Slot* slot,
- TypeofState typeof_state,
- Result* result,
- JumpTarget* slow,
- JumpTarget* done);
-
- // Store the value on top of the expression stack into a slot, leaving the
- // value in place.
- void StoreToSlot(Slot* slot, InitState init_state);
-
- // Support for compiling assignment expressions.
- void EmitSlotAssignment(Assignment* node);
- void EmitNamedPropertyAssignment(Assignment* node);
- void EmitKeyedPropertyAssignment(Assignment* node);
-
- // Receiver is passed on the frame and not consumed.
- Result EmitNamedLoad(Handle<String> name, bool is_contextual);
-
- // If the store is contextual, value is passed on the frame and consumed.
- // Otherwise, receiver and value are passed on the frame and consumed.
- Result EmitNamedStore(Handle<String> name, bool is_contextual);
-
- // Load a property of an object, returning it in a Result.
- // The object and the property name are passed on the stack, and
- // not changed.
- Result EmitKeyedLoad();
-
- // Receiver, key, and value are passed on the frame and consumed.
- Result EmitKeyedStore(StaticType* key_type);
-
- // Special code for typeof expressions: Unfortunately, we must
- // be careful when loading the expression in 'typeof'
- // expressions. We are not allowed to throw reference errors for
- // non-existing properties of the global object, so we must make it
- // look like an explicit property access, instead of an access
- // through the context chain.
- void LoadTypeofExpression(Expression* x);
-
- // Translate the value on top of the frame into control flow to the
- // control destination.
- void ToBoolean(ControlDestination* destination);
-
- // Generate code that computes a shortcutting logical operation.
- void GenerateLogicalBooleanOperation(BinaryOperation* node);
-
- void GenericBinaryOperation(BinaryOperation* expr,
- OverwriteMode overwrite_mode);
-
- // Generate a stub call from the virtual frame.
- Result GenerateGenericBinaryOpStubCall(GenericBinaryOpStub* stub,
- Result* left,
- Result* right);
-
- // Emits code sequence that jumps to a JumpTarget if the inputs
- // are both smis. Cannot be in MacroAssembler because it takes
- // advantage of TypeInfo to skip unneeded checks.
- void JumpIfBothSmiUsingTypeInfo(Result* left,
- Result* right,
- JumpTarget* both_smi);
-
- // Emits code sequence that jumps to deferred code if the input
- // is not a smi. Cannot be in MacroAssembler because it takes
- // advantage of TypeInfo to skip unneeded checks.
- void JumpIfNotSmiUsingTypeInfo(Register reg,
- TypeInfo type,
- DeferredCode* deferred);
-
- // Emits code sequence that jumps to deferred code if the inputs
- // are not both smis. Cannot be in MacroAssembler because it takes
- // advantage of TypeInfo to skip unneeded checks.
- void JumpIfNotBothSmiUsingTypeInfo(Register left,
- Register right,
- TypeInfo left_info,
- TypeInfo right_info,
- DeferredCode* deferred);
-
- // If possible, combine two constant smi values using op to produce
- // a smi result, and push it on the virtual frame, all at compile time.
- // Returns true if it succeeds. Otherwise it has no effect.
- bool FoldConstantSmis(Token::Value op, int left, int right);
-
- // Emit code to perform a binary operation on a constant
- // smi and a likely smi. Consumes the Result *operand.
- Result ConstantSmiBinaryOperation(BinaryOperation* expr,
- Result* operand,
- Handle<Object> constant_operand,
- bool reversed,
- OverwriteMode overwrite_mode);
-
- // Emit code to perform a binary operation on two likely smis.
- // The code to handle smi arguments is produced inline.
- // Consumes the Results *left and *right.
- Result LikelySmiBinaryOperation(BinaryOperation* expr,
- Result* left,
- Result* right,
- OverwriteMode overwrite_mode);
-
- void Comparison(AstNode* node,
- Condition cc,
- bool strict,
- ControlDestination* destination);
-
- // If at least one of the sides is a constant smi, generate optimized code.
- void ConstantSmiComparison(Condition cc,
- bool strict,
- ControlDestination* destination,
- Result* left_side,
- Result* right_side,
- bool left_side_constant_smi,
- bool right_side_constant_smi,
- bool is_loop_condition);
-
- void GenerateInlineNumberComparison(Result* left_side,
- Result* right_side,
- Condition cc,
- ControlDestination* dest);
-
- // To prevent long attacker-controlled byte sequences, integer constants
- // from the JavaScript source are loaded in two parts if they are larger
- // than 16 bits.
- static const int kMaxSmiInlinedBits = 16;
- bool IsUnsafeSmi(Handle<Object> value);
- // Load an integer constant x into a register target using
- // at most 16 bits of user-controlled data per assembly operation.
- void LoadUnsafeSmi(Register target, Handle<Object> value);
-
- void CallWithArguments(ZoneList<Expression*>* arguments,
- CallFunctionFlags flags,
- int position);
-
- // An optimized implementation of expressions of the form
- // x.apply(y, arguments). We call x the applicand and y the receiver.
- // The optimization avoids allocating an arguments object if possible.
- void CallApplyLazy(Expression* applicand,
- Expression* receiver,
- VariableProxy* arguments,
- int position);
-
- void CheckStack();
-
- bool CheckForInlineRuntimeCall(CallRuntime* node);
-
- void ProcessDeclarations(ZoneList<Declaration*>* declarations);
-
- // Declare global variables and functions in the given array of
- // name/value pairs.
- void DeclareGlobals(Handle<FixedArray> pairs);
-
- // Instantiate the function based on the shared function info.
- void InstantiateFunction(Handle<SharedFunctionInfo> function_info,
- bool pretenure);
-
- // Support for type checks.
- void GenerateIsSmi(ZoneList<Expression*>* args);
- void GenerateIsNonNegativeSmi(ZoneList<Expression*>* args);
- void GenerateIsArray(ZoneList<Expression*>* args);
- void GenerateIsRegExp(ZoneList<Expression*>* args);
- void GenerateIsObject(ZoneList<Expression*>* args);
- void GenerateIsSpecObject(ZoneList<Expression*>* args);
- void GenerateIsFunction(ZoneList<Expression*>* args);
- void GenerateIsUndetectableObject(ZoneList<Expression*>* args);
- void GenerateIsStringWrapperSafeForDefaultValueOf(
- ZoneList<Expression*>* args);
-
- // Support for construct call checks.
- void GenerateIsConstructCall(ZoneList<Expression*>* args);
-
- // Support for arguments.length and arguments[?].
- void GenerateArgumentsLength(ZoneList<Expression*>* args);
- void GenerateArguments(ZoneList<Expression*>* args);
-
- // Support for accessing the class and value fields of an object.
- void GenerateClassOf(ZoneList<Expression*>* args);
- void GenerateValueOf(ZoneList<Expression*>* args);
- void GenerateSetValueOf(ZoneList<Expression*>* args);
-
- // Fast support for charCodeAt(n).
- void GenerateStringCharCodeAt(ZoneList<Expression*>* args);
-
- // Fast support for string.charAt(n) and string[n].
- void GenerateStringCharFromCode(ZoneList<Expression*>* args);
-
- // Fast support for string.charAt(n) and string[n].
- void GenerateStringCharAt(ZoneList<Expression*>* args);
-
- // Fast support for object equality testing.
- void GenerateObjectEquals(ZoneList<Expression*>* args);
-
- void GenerateLog(ZoneList<Expression*>* args);
-
- void GenerateGetFramePointer(ZoneList<Expression*>* args);
-
- // Fast support for Math.random().
- void GenerateRandomHeapNumber(ZoneList<Expression*>* args);
-
- // Fast support for StringAdd.
- void GenerateStringAdd(ZoneList<Expression*>* args);
-
- // Fast support for SubString.
- void GenerateSubString(ZoneList<Expression*>* args);
-
- // Fast support for StringCompare.
- void GenerateStringCompare(ZoneList<Expression*>* args);
-
- // Support for direct calls from JavaScript to native RegExp code.
- void GenerateRegExpExec(ZoneList<Expression*>* args);
-
- void GenerateRegExpConstructResult(ZoneList<Expression*>* args);
-
- // Support for fast native caches.
- void GenerateGetFromCache(ZoneList<Expression*>* args);
-
- // Fast support for number to string.
- void GenerateNumberToString(ZoneList<Expression*>* args);
-
- // Fast swapping of elements. Takes three expressions, the object and two
- // indices. This should only be used if the indices are known to be
- // non-negative and within bounds of the elements array at the call site.
- void GenerateSwapElements(ZoneList<Expression*>* args);
-
- // Fast call for custom callbacks.
- void GenerateCallFunction(ZoneList<Expression*>* args);
-
- // Fast call to math functions.
- void GenerateMathPow(ZoneList<Expression*>* args);
- void GenerateMathSin(ZoneList<Expression*>* args);
- void GenerateMathCos(ZoneList<Expression*>* args);
- void GenerateMathSqrt(ZoneList<Expression*>* args);
- void GenerateMathLog(ZoneList<Expression*>* args);
-
- // Check whether two RegExps are equivalent.
- void GenerateIsRegExpEquivalent(ZoneList<Expression*>* args);
-
- void GenerateHasCachedArrayIndex(ZoneList<Expression*>* args);
- void GenerateGetCachedArrayIndex(ZoneList<Expression*>* args);
- void GenerateFastAsciiArrayJoin(ZoneList<Expression*>* args);
-
- // Simple condition analysis.
- enum ConditionAnalysis {
- ALWAYS_TRUE,
- ALWAYS_FALSE,
- DONT_KNOW
- };
- ConditionAnalysis AnalyzeCondition(Expression* cond);
-
- // Methods used to indicate which source code is generated for. Source
- // positions are collected by the assembler and emitted with the relocation
- // information.
- void CodeForFunctionPosition(FunctionLiteral* fun);
- void CodeForReturnPosition(FunctionLiteral* fun);
- void CodeForStatementPosition(Statement* node);
- void CodeForDoWhileConditionPosition(DoWhileStatement* stmt);
- void CodeForSourcePosition(int pos);
-
- void SetTypeForStackSlot(Slot* slot, TypeInfo info);
-
-#ifdef DEBUG
- // True if the registers are valid for entry to a block. There should
- // be no frame-external references to (non-reserved) registers.
- bool HasValidEntryRegisters();
-#endif
-
- ZoneList<DeferredCode*> deferred_;
-
- // Assembler
- MacroAssembler* masm_; // to generate code
-
- CompilationInfo* info_;
-
- // Code generation state
- VirtualFrame* frame_;
- RegisterAllocator* allocator_;
- CodeGenState* state_;
- int loop_nesting_;
-
- // Jump targets.
- // The target of the return from the function.
- BreakTarget function_return_;
-
- // True if the function return is shadowed (ie, jumping to the target
- // function_return_ does not jump to the true function return, but rather
- // to some unlinking code).
- bool function_return_is_shadowed_;
-
- // True when we are in code that expects the virtual frame to be fully
- // spilled. Some virtual frame function are disabled in DEBUG builds when
- // called from spilled code, because they do not leave the virtual frame
- // in a spilled state.
- bool in_spilled_code_;
-
- friend class VirtualFrame;
- friend class Isolate;
- friend class JumpTarget;
- friend class Reference;
- friend class Result;
- friend class FastCodeGenerator;
- friend class FullCodeGenerator;
- friend class FullCodeGenSyntaxChecker;
-
- friend class CodeGeneratorPatcher; // Used in test-log-stack-tracer.cc
- friend class InlineRuntimeFunctionsTable;
-
- DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_X64_CODEGEN_X64_H_
diff --git a/src/3rdparty/v8/src/x64/cpu-x64.cc b/src/3rdparty/v8/src/x64/cpu-x64.cc
deleted file mode 100644
index e637ba1..0000000
--- a/src/3rdparty/v8/src/x64/cpu-x64.cc
+++ /dev/null
@@ -1,88 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// CPU specific code for x64 independent of OS goes here.
-
-#ifdef __GNUC__
-#include "third_party/valgrind/valgrind.h"
-#endif
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_X64)
-
-#include "cpu.h"
-#include "macro-assembler.h"
-
-namespace v8 {
-namespace internal {
-
-void CPU::Setup() {
- CpuFeatures::Probe();
-}
-
-
-bool CPU::SupportsCrankshaft() {
- return true; // Yay!
-}
-
-
-void CPU::FlushICache(void* start, size_t size) {
- // No need to flush the instruction cache on Intel. On Intel instruction
- // cache flushing is only necessary when multiple cores running the same
- // code simultaneously. V8 (and JavaScript) is single threaded and when code
- // is patched on an intel CPU the core performing the patching will have its
- // own instruction cache updated automatically.
-
- // If flushing of the instruction cache becomes necessary Windows has the
- // API function FlushInstructionCache.
-
- // By default, valgrind only checks the stack for writes that might need to
- // invalidate already cached translated code. This leads to random
- // instability when code patches or moves are sometimes unnoticed. One
- // solution is to run valgrind with --smc-check=all, but this comes at a big
- // performance cost. We can notify valgrind to invalidate its cache.
-#ifdef VALGRIND_DISCARD_TRANSLATIONS
- VALGRIND_DISCARD_TRANSLATIONS(start, size);
-#endif
-}
-
-
-void CPU::DebugBreak() {
-#ifdef _MSC_VER
- // To avoid Visual Studio runtime support the following code can be used
- // instead
- // __asm { int 3 }
- __debugbreak();
-#else
- asm("int $3");
-#endif
-}
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_X64
diff --git a/src/3rdparty/v8/src/x64/debug-x64.cc b/src/3rdparty/v8/src/x64/debug-x64.cc
deleted file mode 100644
index 0398465..0000000
--- a/src/3rdparty/v8/src/x64/debug-x64.cc
+++ /dev/null
@@ -1,318 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_X64)
-
-#include "codegen-inl.h"
-#include "debug.h"
-
-
-namespace v8 {
-namespace internal {
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-
-bool BreakLocationIterator::IsDebugBreakAtReturn() {
- return Debug::IsDebugBreakAtReturn(rinfo());
-}
-
-
-// Patch the JS frame exit code with a debug break call. See
-// CodeGenerator::VisitReturnStatement and VirtualFrame::Exit in codegen-x64.cc
-// for the precise return instructions sequence.
-void BreakLocationIterator::SetDebugBreakAtReturn() {
- ASSERT(Assembler::kJSReturnSequenceLength >=
- Assembler::kCallInstructionLength);
- rinfo()->PatchCodeWithCall(
- Isolate::Current()->debug()->debug_break_return()->entry(),
- Assembler::kJSReturnSequenceLength - Assembler::kCallInstructionLength);
-}
-
-
-// Restore the JS frame exit code.
-void BreakLocationIterator::ClearDebugBreakAtReturn() {
- rinfo()->PatchCode(original_rinfo()->pc(),
- Assembler::kJSReturnSequenceLength);
-}
-
-
-// A debug break in the frame exit code is identified by the JS frame exit code
-// having been patched with a call instruction.
-bool Debug::IsDebugBreakAtReturn(v8::internal::RelocInfo* rinfo) {
- ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()));
- return rinfo->IsPatchedReturnSequence();
-}
-
-
-bool BreakLocationIterator::IsDebugBreakAtSlot() {
- ASSERT(IsDebugBreakSlot());
- // Check whether the debug break slot instructions have been patched.
- return !Assembler::IsNop(rinfo()->pc());
-}
-
-
-void BreakLocationIterator::SetDebugBreakAtSlot() {
- ASSERT(IsDebugBreakSlot());
- rinfo()->PatchCodeWithCall(
- Isolate::Current()->debug()->debug_break_slot()->entry(),
- Assembler::kDebugBreakSlotLength - Assembler::kCallInstructionLength);
-}
-
-
-void BreakLocationIterator::ClearDebugBreakAtSlot() {
- ASSERT(IsDebugBreakSlot());
- rinfo()->PatchCode(original_rinfo()->pc(), Assembler::kDebugBreakSlotLength);
-}
-
-
-#define __ ACCESS_MASM(masm)
-
-
-static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
- RegList object_regs,
- RegList non_object_regs,
- bool convert_call_to_jmp) {
- // Enter an internal frame.
- __ EnterInternalFrame();
-
- // Store the registers containing live values on the expression stack to
- // make sure that these are correctly updated during GC. Non object values
- // are stored as as two smis causing it to be untouched by GC.
- ASSERT((object_regs & ~kJSCallerSaved) == 0);
- ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
- ASSERT((object_regs & non_object_regs) == 0);
- for (int i = 0; i < kNumJSCallerSaved; i++) {
- int r = JSCallerSavedCode(i);
- Register reg = { r };
- ASSERT(!reg.is(kScratchRegister));
- if ((object_regs & (1 << r)) != 0) {
- __ push(reg);
- }
- // Store the 64-bit value as two smis.
- if ((non_object_regs & (1 << r)) != 0) {
- __ movq(kScratchRegister, reg);
- __ Integer32ToSmi(reg, reg);
- __ push(reg);
- __ sar(kScratchRegister, Immediate(32));
- __ Integer32ToSmi(kScratchRegister, kScratchRegister);
- __ push(kScratchRegister);
- }
- }
-
-#ifdef DEBUG
- __ RecordComment("// Calling from debug break to runtime - come in - over");
-#endif
- __ Set(rax, 0); // No arguments (argc == 0).
- __ movq(rbx, ExternalReference::debug_break(masm->isolate()));
-
- CEntryStub ceb(1);
- __ CallStub(&ceb);
-
- // Restore the register values from the expression stack.
- for (int i = kNumJSCallerSaved - 1; i >= 0; i--) {
- int r = JSCallerSavedCode(i);
- Register reg = { r };
- if (FLAG_debug_code) {
- __ Set(reg, kDebugZapValue);
- }
- if ((object_regs & (1 << r)) != 0) {
- __ pop(reg);
- }
- // Reconstruct the 64-bit value from two smis.
- if ((non_object_regs & (1 << r)) != 0) {
- __ pop(kScratchRegister);
- __ SmiToInteger32(kScratchRegister, kScratchRegister);
- __ shl(kScratchRegister, Immediate(32));
- __ pop(reg);
- __ SmiToInteger32(reg, reg);
- __ or_(reg, kScratchRegister);
- }
- }
-
- // Get rid of the internal frame.
- __ LeaveInternalFrame();
-
- // If this call did not replace a call but patched other code then there will
- // be an unwanted return address left on the stack. Here we get rid of that.
- if (convert_call_to_jmp) {
- __ addq(rsp, Immediate(kPointerSize));
- }
-
- // Now that the break point has been handled, resume normal execution by
- // jumping to the target address intended by the caller and that was
- // overwritten by the address of DebugBreakXXX.
- ExternalReference after_break_target =
- ExternalReference(Debug_Address::AfterBreakTarget(), masm->isolate());
- __ movq(kScratchRegister, after_break_target);
- __ jmp(Operand(kScratchRegister, 0));
-}
-
-
-void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) {
- // Register state for IC load call (from ic-x64.cc).
- // ----------- S t a t e -------------
- // -- rax : receiver
- // -- rcx : name
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, rax.bit() | rcx.bit(), 0, false);
-}
-
-
-void Debug::GenerateStoreICDebugBreak(MacroAssembler* masm) {
- // Register state for IC store call (from ic-x64.cc).
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : name
- // -- rdx : receiver
- // -----------------------------------
- Generate_DebugBreakCallHelper(
- masm, rax.bit() | rcx.bit() | rdx.bit(), 0, false);
-}
-
-
-void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
- // Register state for keyed IC load call (from ic-x64.cc).
- // ----------- S t a t e -------------
- // -- rax : key
- // -- rdx : receiver
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, rax.bit() | rdx.bit(), 0, false);
-}
-
-
-void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
- // Register state for keyed IC load call (from ic-x64.cc).
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : key
- // -- rdx : receiver
- // -----------------------------------
- Generate_DebugBreakCallHelper(
- masm, rax.bit() | rcx.bit() | rdx.bit(), 0, false);
-}
-
-
-void Debug::GenerateCallICDebugBreak(MacroAssembler* masm) {
- // Register state for IC call call (from ic-x64.cc)
- // ----------- S t a t e -------------
- // -- rcx: function name
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, rcx.bit(), 0, false);
-}
-
-
-void Debug::GenerateConstructCallDebugBreak(MacroAssembler* masm) {
- // Register state just before return from JS function (from codegen-x64.cc).
- // rax is the actual number of arguments not encoded as a smi, see comment
- // above IC call.
- // ----------- S t a t e -------------
- // -- rax: number of arguments
- // -----------------------------------
- // The number of arguments in rax is not smi encoded.
- Generate_DebugBreakCallHelper(masm, rdi.bit(), rax.bit(), false);
-}
-
-
-void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
- // Register state just before return from JS function (from codegen-x64.cc).
- // ----------- S t a t e -------------
- // -- rax: return value
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, rax.bit(), 0, true);
-}
-
-
-void Debug::GenerateStubNoRegistersDebugBreak(MacroAssembler* masm) {
- // Register state for stub CallFunction (from CallFunctionStub in ic-x64.cc).
- // ----------- S t a t e -------------
- // No registers used on entry.
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, 0, 0, false);
-}
-
-
-void Debug::GenerateSlot(MacroAssembler* masm) {
- // Generate enough nop's to make space for a call instruction.
- Label check_codesize;
- __ bind(&check_codesize);
- __ RecordDebugBreakSlot();
- for (int i = 0; i < Assembler::kDebugBreakSlotLength; i++) {
- __ nop();
- }
- ASSERT_EQ(Assembler::kDebugBreakSlotLength,
- masm->SizeOfCodeGeneratedSince(&check_codesize));
-}
-
-
-void Debug::GenerateSlotDebugBreak(MacroAssembler* masm) {
- // In the places where a debug break slot is inserted no registers can contain
- // object pointers.
- Generate_DebugBreakCallHelper(masm, 0, 0, true);
-}
-
-
-void Debug::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
- masm->ret(0);
-}
-
-
-void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
- ExternalReference restarter_frame_function_slot =
- ExternalReference(Debug_Address::RestarterFrameFunctionPointer(),
- masm->isolate());
- __ movq(rax, restarter_frame_function_slot);
- __ movq(Operand(rax, 0), Immediate(0));
-
- // We do not know our frame height, but set rsp based on rbp.
- __ lea(rsp, Operand(rbp, -1 * kPointerSize));
-
- __ pop(rdi); // Function.
- __ pop(rbp);
-
- // Load context from the function.
- __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
-
- // Get function code.
- __ movq(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ movq(rdx, FieldOperand(rdx, SharedFunctionInfo::kCodeOffset));
- __ lea(rdx, FieldOperand(rdx, Code::kHeaderSize));
-
- // Re-run JSFunction, rdi is function, rsi is context.
- __ jmp(rdx);
-}
-
-const bool Debug::kFrameDropperSupported = true;
-
-#undef __
-
-#endif // ENABLE_DEBUGGER_SUPPORT
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_X64
diff --git a/src/3rdparty/v8/src/x64/deoptimizer-x64.cc b/src/3rdparty/v8/src/x64/deoptimizer-x64.cc
deleted file mode 100644
index e33d061..0000000
--- a/src/3rdparty/v8/src/x64/deoptimizer-x64.cc
+++ /dev/null
@@ -1,816 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_X64)
-
-#include "codegen.h"
-#include "deoptimizer.h"
-#include "full-codegen.h"
-#include "safepoint-table.h"
-
-namespace v8 {
-namespace internal {
-
-
-int Deoptimizer::table_entry_size_ = 10;
-
-
-int Deoptimizer::patch_size() {
- return MacroAssembler::kCallInstructionLength;
-}
-
-
-#ifdef DEBUG
-// Overwrites code with int3 instructions.
-static void ZapCodeRange(Address from, Address to) {
- CHECK(from <= to);
- int length = static_cast<int>(to - from);
- CodePatcher destroyer(from, length);
- while (length-- > 0) {
- destroyer.masm()->int3();
- }
-}
-#endif
-
-
-// Iterate through the entries of a SafepointTable that corresponds to
-// deoptimization points.
-class SafepointTableDeoptimiztionEntryIterator {
- public:
- explicit SafepointTableDeoptimiztionEntryIterator(Code* code)
- : code_(code), table_(code), index_(-1), limit_(table_.length()) {
- FindNextIndex();
- }
-
- SafepointEntry Next(Address* pc) {
- if (index_ >= limit_) {
- *pc = NULL;
- return SafepointEntry(); // Invalid entry.
- }
- *pc = code_->instruction_start() + table_.GetPcOffset(index_);
- SafepointEntry entry = table_.GetEntry(index_);
- FindNextIndex();
- return entry;
- }
-
- private:
- void FindNextIndex() {
- ASSERT(index_ < limit_);
- while (++index_ < limit_) {
- if (table_.GetEntry(index_).deoptimization_index() !=
- Safepoint::kNoDeoptimizationIndex) {
- return;
- }
- }
- }
-
- Code* code_;
- SafepointTable table_;
- // Index of next deoptimization entry. If negative after calling
- // FindNextIndex, there are no more, and Next will return an invalid
- // SafepointEntry.
- int index_;
- // Table length.
- int limit_;
-};
-
-
-void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
- // TODO(1276): Implement.
-}
-
-
-void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
- HandleScope scope;
- AssertNoAllocation no_allocation;
-
- if (!function->IsOptimized()) return;
-
- // Get the optimized code.
- Code* code = function->code();
-
- // Invalidate the relocation information, as it will become invalid by the
- // code patching below, and is not needed any more.
- code->InvalidateRelocation();
-
- // For each return after a safepoint insert a absolute call to the
- // corresponding deoptimization entry, or a short call to an absolute
- // jump if space is short. The absolute jumps are put in a table just
- // before the safepoint table (space was allocated there when the Code
- // object was created, if necessary).
-
- Address instruction_start = function->code()->instruction_start();
- Address jump_table_address =
- instruction_start + function->code()->safepoint_table_offset();
- Address previous_pc = instruction_start;
-
- SafepointTableDeoptimiztionEntryIterator deoptimizations(function->code());
- Address entry_pc = NULL;
-
- SafepointEntry current_entry = deoptimizations.Next(&entry_pc);
- while (current_entry.is_valid()) {
- int gap_code_size = current_entry.gap_code_size();
- unsigned deoptimization_index = current_entry.deoptimization_index();
-
-#ifdef DEBUG
- // Destroy the code which is not supposed to run again.
- ZapCodeRange(previous_pc, entry_pc);
-#endif
- // Position where Call will be patched in.
- Address call_address = entry_pc + gap_code_size;
- // End of call instruction, if using a direct call to a 64-bit address.
- Address call_end_address =
- call_address + MacroAssembler::kCallInstructionLength;
-
- // Find next deoptimization entry, if any.
- Address next_pc = NULL;
- SafepointEntry next_entry = deoptimizations.Next(&next_pc);
-
- if (!next_entry.is_valid() || next_pc >= call_end_address) {
- // Room enough to write a long call instruction.
- CodePatcher patcher(call_address, Assembler::kCallInstructionLength);
- patcher.masm()->Call(GetDeoptimizationEntry(deoptimization_index, LAZY),
- RelocInfo::NONE);
- previous_pc = call_end_address;
- } else {
- // Not room enough for a long Call instruction. Write a short call
- // instruction to a long jump placed elsewhere in the code.
- Address short_call_end_address =
- call_address + MacroAssembler::kShortCallInstructionLength;
- ASSERT(next_pc >= short_call_end_address);
-
- // Write jump in jump-table.
- jump_table_address -= MacroAssembler::kJumpInstructionLength;
- CodePatcher jump_patcher(jump_table_address,
- MacroAssembler::kJumpInstructionLength);
- jump_patcher.masm()->Jump(
- GetDeoptimizationEntry(deoptimization_index, LAZY),
- RelocInfo::NONE);
-
- // Write call to jump at call_offset.
- CodePatcher call_patcher(call_address,
- MacroAssembler::kShortCallInstructionLength);
- call_patcher.masm()->call(jump_table_address);
- previous_pc = short_call_end_address;
- }
-
- // Continue with next deoptimization entry.
- current_entry = next_entry;
- entry_pc = next_pc;
- }
-
-#ifdef DEBUG
- // Destroy the code which is not supposed to run again.
- ZapCodeRange(previous_pc, jump_table_address);
-#endif
-
- // Add the deoptimizing code to the list.
- DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code);
- DeoptimizerData* data = code->GetIsolate()->deoptimizer_data();
- node->set_next(data->deoptimizing_code_list_);
- data->deoptimizing_code_list_ = node;
-
- // Set the code for the function to non-optimized version.
- function->ReplaceCode(function->shared()->code());
-
- if (FLAG_trace_deopt) {
- PrintF("[forced deoptimization: ");
- function->PrintName();
- PrintF(" / %" V8PRIxPTR "]\n", reinterpret_cast<intptr_t>(function));
-#ifdef DEBUG
- if (FLAG_print_code) {
- code->PrintLn();
- }
-#endif
- }
-}
-
-
-void Deoptimizer::PatchStackCheckCodeAt(Address pc_after,
- Code* check_code,
- Code* replacement_code) {
- Address call_target_address = pc_after - kIntSize;
- ASSERT(check_code->entry() ==
- Assembler::target_address_at(call_target_address));
- // The stack check code matches the pattern:
- //
- // cmp rsp, <limit>
- // jae ok
- // call <stack guard>
- // test rax, <loop nesting depth>
- // ok: ...
- //
- // We will patch away the branch so the code is:
- //
- // cmp rsp, <limit> ;; Not changed
- // nop
- // nop
- // call <on-stack replacment>
- // test rax, <loop nesting depth>
- // ok:
- //
- ASSERT(*(call_target_address - 3) == 0x73 && // jae
- *(call_target_address - 2) == 0x07 && // offset
- *(call_target_address - 1) == 0xe8); // call
- *(call_target_address - 3) = 0x90; // nop
- *(call_target_address - 2) = 0x90; // nop
- Assembler::set_target_address_at(call_target_address,
- replacement_code->entry());
-}
-
-
-void Deoptimizer::RevertStackCheckCodeAt(Address pc_after,
- Code* check_code,
- Code* replacement_code) {
- Address call_target_address = pc_after - kIntSize;
- ASSERT(replacement_code->entry() ==
- Assembler::target_address_at(call_target_address));
- // Replace the nops from patching (Deoptimizer::PatchStackCheckCode) to
- // restore the conditional branch.
- ASSERT(*(call_target_address - 3) == 0x90 && // nop
- *(call_target_address - 2) == 0x90 && // nop
- *(call_target_address - 1) == 0xe8); // call
- *(call_target_address - 3) = 0x73; // jae
- *(call_target_address - 2) = 0x07; // offset
- Assembler::set_target_address_at(call_target_address,
- check_code->entry());
-}
-
-
-static int LookupBailoutId(DeoptimizationInputData* data, unsigned ast_id) {
- ByteArray* translations = data->TranslationByteArray();
- int length = data->DeoptCount();
- for (int i = 0; i < length; i++) {
- if (static_cast<unsigned>(data->AstId(i)->value()) == ast_id) {
- TranslationIterator it(translations, data->TranslationIndex(i)->value());
- int value = it.Next();
- ASSERT(Translation::BEGIN == static_cast<Translation::Opcode>(value));
- // Read the number of frames.
- value = it.Next();
- if (value == 1) return i;
- }
- }
- UNREACHABLE();
- return -1;
-}
-
-
-void Deoptimizer::DoComputeOsrOutputFrame() {
- DeoptimizationInputData* data = DeoptimizationInputData::cast(
- optimized_code_->deoptimization_data());
- unsigned ast_id = data->OsrAstId()->value();
- // TODO(kasperl): This should not be the bailout_id_. It should be
- // the ast id. Confusing.
- ASSERT(bailout_id_ == ast_id);
-
- int bailout_id = LookupBailoutId(data, ast_id);
- unsigned translation_index = data->TranslationIndex(bailout_id)->value();
- ByteArray* translations = data->TranslationByteArray();
-
- TranslationIterator iterator(translations, translation_index);
- Translation::Opcode opcode =
- static_cast<Translation::Opcode>(iterator.Next());
- ASSERT(Translation::BEGIN == opcode);
- USE(opcode);
- int count = iterator.Next();
- ASSERT(count == 1);
- USE(count);
-
- opcode = static_cast<Translation::Opcode>(iterator.Next());
- USE(opcode);
- ASSERT(Translation::FRAME == opcode);
- unsigned node_id = iterator.Next();
- USE(node_id);
- ASSERT(node_id == ast_id);
- JSFunction* function = JSFunction::cast(ComputeLiteral(iterator.Next()));
- USE(function);
- ASSERT(function == function_);
- unsigned height = iterator.Next();
- unsigned height_in_bytes = height * kPointerSize;
- USE(height_in_bytes);
-
- unsigned fixed_size = ComputeFixedSize(function_);
- unsigned input_frame_size = static_cast<unsigned>(input_->GetFrameSize());
- ASSERT(fixed_size + height_in_bytes == input_frame_size);
-
- unsigned stack_slot_size = optimized_code_->stack_slots() * kPointerSize;
- unsigned outgoing_height = data->ArgumentsStackHeight(bailout_id)->value();
- unsigned outgoing_size = outgoing_height * kPointerSize;
- unsigned output_frame_size = fixed_size + stack_slot_size + outgoing_size;
- ASSERT(outgoing_size == 0); // OSR does not happen in the middle of a call.
-
- if (FLAG_trace_osr) {
- PrintF("[on-stack replacement: begin 0x%08" V8PRIxPTR " ",
- reinterpret_cast<intptr_t>(function_));
- function_->PrintName();
- PrintF(" => node=%u, frame=%d->%d]\n",
- ast_id,
- input_frame_size,
- output_frame_size);
- }
-
- // There's only one output frame in the OSR case.
- output_count_ = 1;
- output_ = new FrameDescription*[1];
- output_[0] = new(output_frame_size) FrameDescription(
- output_frame_size, function_);
-
- // Clear the incoming parameters in the optimized frame to avoid
- // confusing the garbage collector.
- unsigned output_offset = output_frame_size - kPointerSize;
- int parameter_count = function_->shared()->formal_parameter_count() + 1;
- for (int i = 0; i < parameter_count; ++i) {
- output_[0]->SetFrameSlot(output_offset, 0);
- output_offset -= kPointerSize;
- }
-
- // Translate the incoming parameters. This may overwrite some of the
- // incoming argument slots we've just cleared.
- int input_offset = input_frame_size - kPointerSize;
- bool ok = true;
- int limit = input_offset - (parameter_count * kPointerSize);
- while (ok && input_offset > limit) {
- ok = DoOsrTranslateCommand(&iterator, &input_offset);
- }
-
- // There are no translation commands for the caller's pc and fp, the
- // context, and the function. Set them up explicitly.
- for (int i = StandardFrameConstants::kCallerPCOffset;
- ok && i >= StandardFrameConstants::kMarkerOffset;
- i -= kPointerSize) {
- intptr_t input_value = input_->GetFrameSlot(input_offset);
- if (FLAG_trace_osr) {
- const char* name = "UNKNOWN";
- switch (i) {
- case StandardFrameConstants::kCallerPCOffset:
- name = "caller's pc";
- break;
- case StandardFrameConstants::kCallerFPOffset:
- name = "fp";
- break;
- case StandardFrameConstants::kContextOffset:
- name = "context";
- break;
- case StandardFrameConstants::kMarkerOffset:
- name = "function";
- break;
- }
- PrintF(" [rsp + %d] <- 0x%08" V8PRIxPTR " ; [rsp + %d] "
- "(fixed part - %s)\n",
- output_offset,
- input_value,
- input_offset,
- name);
- }
- output_[0]->SetFrameSlot(output_offset, input_->GetFrameSlot(input_offset));
- input_offset -= kPointerSize;
- output_offset -= kPointerSize;
- }
-
- // Translate the rest of the frame.
- while (ok && input_offset >= 0) {
- ok = DoOsrTranslateCommand(&iterator, &input_offset);
- }
-
- // If translation of any command failed, continue using the input frame.
- if (!ok) {
- delete output_[0];
- output_[0] = input_;
- output_[0]->SetPc(reinterpret_cast<intptr_t>(from_));
- } else {
- // Setup the frame pointer and the context pointer.
- output_[0]->SetRegister(rbp.code(), input_->GetRegister(rbp.code()));
- output_[0]->SetRegister(rsi.code(), input_->GetRegister(rsi.code()));
-
- unsigned pc_offset = data->OsrPcOffset()->value();
- intptr_t pc = reinterpret_cast<intptr_t>(
- optimized_code_->entry() + pc_offset);
- output_[0]->SetPc(pc);
- }
- Code* continuation =
- function->GetIsolate()->builtins()->builtin(Builtins::kNotifyOSR);
- output_[0]->SetContinuation(
- reinterpret_cast<intptr_t>(continuation->entry()));
-
- if (FLAG_trace_osr) {
- PrintF("[on-stack replacement translation %s: 0x%08" V8PRIxPTR " ",
- ok ? "finished" : "aborted",
- reinterpret_cast<intptr_t>(function));
- function->PrintName();
- PrintF(" => pc=0x%0" V8PRIxPTR "]\n", output_[0]->GetPc());
- }
-}
-
-
-void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
- int frame_index) {
- // Read the ast node id, function, and frame height for this output frame.
- Translation::Opcode opcode =
- static_cast<Translation::Opcode>(iterator->Next());
- USE(opcode);
- ASSERT(Translation::FRAME == opcode);
- int node_id = iterator->Next();
- JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
- unsigned height = iterator->Next();
- unsigned height_in_bytes = height * kPointerSize;
- if (FLAG_trace_deopt) {
- PrintF(" translating ");
- function->PrintName();
- PrintF(" => node=%d, height=%d\n", node_id, height_in_bytes);
- }
-
- // The 'fixed' part of the frame consists of the incoming parameters and
- // the part described by JavaScriptFrameConstants.
- unsigned fixed_frame_size = ComputeFixedSize(function);
- unsigned input_frame_size = static_cast<unsigned>(input_->GetFrameSize());
- unsigned output_frame_size = height_in_bytes + fixed_frame_size;
-
- // Allocate and store the output frame description.
- FrameDescription* output_frame =
- new(output_frame_size) FrameDescription(output_frame_size, function);
-
- bool is_bottommost = (0 == frame_index);
- bool is_topmost = (output_count_ - 1 == frame_index);
- ASSERT(frame_index >= 0 && frame_index < output_count_);
- ASSERT(output_[frame_index] == NULL);
- output_[frame_index] = output_frame;
-
- // The top address for the bottommost output frame can be computed from
- // the input frame pointer and the output frame's height. For all
- // subsequent output frames, it can be computed from the previous one's
- // top address and the current frame's size.
- intptr_t top_address;
- if (is_bottommost) {
- // 2 = context and function in the frame.
- top_address =
- input_->GetRegister(rbp.code()) - (2 * kPointerSize) - height_in_bytes;
- } else {
- top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
- }
- output_frame->SetTop(top_address);
-
- // Compute the incoming parameter translation.
- int parameter_count = function->shared()->formal_parameter_count() + 1;
- unsigned output_offset = output_frame_size;
- unsigned input_offset = input_frame_size;
- for (int i = 0; i < parameter_count; ++i) {
- output_offset -= kPointerSize;
- DoTranslateCommand(iterator, frame_index, output_offset);
- }
- input_offset -= (parameter_count * kPointerSize);
-
- // There are no translation commands for the caller's pc and fp, the
- // context, and the function. Synthesize their values and set them up
- // explicitly.
- //
- // The caller's pc for the bottommost output frame is the same as in the
- // input frame. For all subsequent output frames, it can be read from the
- // previous one. This frame's pc can be computed from the non-optimized
- // function code and AST id of the bailout.
- output_offset -= kPointerSize;
- input_offset -= kPointerSize;
- intptr_t value;
- if (is_bottommost) {
- value = input_->GetFrameSlot(input_offset);
- } else {
- value = output_[frame_index - 1]->GetPc();
- }
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; caller's pc\n",
- top_address + output_offset, output_offset, value);
- }
-
- // The caller's frame pointer for the bottommost output frame is the same
- // as in the input frame. For all subsequent output frames, it can be
- // read from the previous one. Also compute and set this frame's frame
- // pointer.
- output_offset -= kPointerSize;
- input_offset -= kPointerSize;
- if (is_bottommost) {
- value = input_->GetFrameSlot(input_offset);
- } else {
- value = output_[frame_index - 1]->GetFp();
- }
- output_frame->SetFrameSlot(output_offset, value);
- intptr_t fp_value = top_address + output_offset;
- ASSERT(!is_bottommost || input_->GetRegister(rbp.code()) == fp_value);
- output_frame->SetFp(fp_value);
- if (is_topmost) output_frame->SetRegister(rbp.code(), fp_value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; caller's fp\n",
- fp_value, output_offset, value);
- }
-
- // For the bottommost output frame the context can be gotten from the input
- // frame. For all subsequent output frames it can be gotten from the function
- // so long as we don't inline functions that need local contexts.
- output_offset -= kPointerSize;
- input_offset -= kPointerSize;
- if (is_bottommost) {
- value = input_->GetFrameSlot(input_offset);
- } else {
- value = reinterpret_cast<intptr_t>(function->context());
- }
- output_frame->SetFrameSlot(output_offset, value);
- if (is_topmost) output_frame->SetRegister(rsi.code(), value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR "; context\n",
- top_address + output_offset, output_offset, value);
- }
-
- // The function was mentioned explicitly in the BEGIN_FRAME.
- output_offset -= kPointerSize;
- input_offset -= kPointerSize;
- value = reinterpret_cast<intptr_t>(function);
- // The function for the bottommost output frame should also agree with the
- // input frame.
- ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value);
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR "; function\n",
- top_address + output_offset, output_offset, value);
- }
-
- // Translate the rest of the frame.
- for (unsigned i = 0; i < height; ++i) {
- output_offset -= kPointerSize;
- DoTranslateCommand(iterator, frame_index, output_offset);
- }
- ASSERT(0 == output_offset);
-
- // Compute this frame's PC, state, and continuation.
- Code* non_optimized_code = function->shared()->code();
- FixedArray* raw_data = non_optimized_code->deoptimization_data();
- DeoptimizationOutputData* data = DeoptimizationOutputData::cast(raw_data);
- Address start = non_optimized_code->instruction_start();
- unsigned pc_and_state = GetOutputInfo(data, node_id, function->shared());
- unsigned pc_offset = FullCodeGenerator::PcField::decode(pc_and_state);
- intptr_t pc_value = reinterpret_cast<intptr_t>(start + pc_offset);
- output_frame->SetPc(pc_value);
-
- FullCodeGenerator::State state =
- FullCodeGenerator::StateField::decode(pc_and_state);
- output_frame->SetState(Smi::FromInt(state));
-
- // Set the continuation for the topmost frame.
- if (is_topmost) {
- Code* continuation = (bailout_type_ == EAGER)
- ? isolate_->builtins()->builtin(Builtins::kNotifyDeoptimized)
- : isolate_->builtins()->builtin(Builtins::kNotifyLazyDeoptimized);
- output_frame->SetContinuation(
- reinterpret_cast<intptr_t>(continuation->entry()));
- }
-
- if (output_count_ - 1 == frame_index) iterator->Done();
-}
-
-
-#define __ masm()->
-
-void Deoptimizer::EntryGenerator::Generate() {
- GeneratePrologue();
- CpuFeatures::Scope scope(SSE2);
-
- // Save all general purpose registers before messing with them.
- const int kNumberOfRegisters = Register::kNumRegisters;
-
- const int kDoubleRegsSize = kDoubleSize *
- XMMRegister::kNumAllocatableRegisters;
- __ subq(rsp, Immediate(kDoubleRegsSize));
-
- for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
- XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
- int offset = i * kDoubleSize;
- __ movsd(Operand(rsp, offset), xmm_reg);
- }
-
- // We push all registers onto the stack, even though we do not need
- // to restore all later.
- for (int i = 0; i < kNumberOfRegisters; i++) {
- Register r = Register::toRegister(i);
- __ push(r);
- }
-
- const int kSavedRegistersAreaSize = kNumberOfRegisters * kPointerSize +
- kDoubleRegsSize;
-
- // When calling new_deoptimizer_function we need to pass the last argument
- // on the stack on windows and in r8 on linux. The remaining arguments are
- // all passed in registers (different ones on linux and windows though).
-
-#ifdef _WIN64
- Register arg4 = r9;
- Register arg3 = r8;
- Register arg2 = rdx;
- Register arg1 = rcx;
-#else
- Register arg4 = rcx;
- Register arg3 = rdx;
- Register arg2 = rsi;
- Register arg1 = rdi;
-#endif
-
- // We use this to keep the value of the fifth argument temporarily.
- // Unfortunately we can't store it directly in r8 (used for passing
- // this on linux), since it is another parameter passing register on windows.
- Register arg5 = r11;
-
- // Get the bailout id from the stack.
- __ movq(arg3, Operand(rsp, kSavedRegistersAreaSize));
-
- // Get the address of the location in the code object if possible
- // and compute the fp-to-sp delta in register arg5.
- if (type() == EAGER) {
- __ Set(arg4, 0);
- __ lea(arg5, Operand(rsp, kSavedRegistersAreaSize + 1 * kPointerSize));
- } else {
- __ movq(arg4, Operand(rsp, kSavedRegistersAreaSize + 1 * kPointerSize));
- __ lea(arg5, Operand(rsp, kSavedRegistersAreaSize + 2 * kPointerSize));
- }
-
- __ subq(arg5, rbp);
- __ neg(arg5);
-
- // Allocate a new deoptimizer object.
- __ PrepareCallCFunction(6);
- __ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ movq(arg1, rax);
- __ movq(arg2, Immediate(type()));
- // Args 3 and 4 are already in the right registers.
-
- // On windows put the arguments on the stack (PrepareCallCFunction
- // has created space for this). On linux pass the arguments in r8 and r9.
-#ifdef _WIN64
- __ movq(Operand(rsp, 4 * kPointerSize), arg5);
- __ LoadAddress(arg5, ExternalReference::isolate_address());
- __ movq(Operand(rsp, 5 * kPointerSize), arg5);
-#else
- __ movq(r8, arg5);
- __ LoadAddress(r9, ExternalReference::isolate_address());
-#endif
-
- Isolate* isolate = masm()->isolate();
-
- __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6);
- // Preserve deoptimizer object in register rax and get the input
- // frame descriptor pointer.
- __ movq(rbx, Operand(rax, Deoptimizer::input_offset()));
-
- // Fill in the input registers.
- for (int i = kNumberOfRegisters -1; i >= 0; i--) {
- int offset = (i * kPointerSize) + FrameDescription::registers_offset();
- __ pop(Operand(rbx, offset));
- }
-
- // Fill in the double input registers.
- int double_regs_offset = FrameDescription::double_registers_offset();
- for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; i++) {
- int dst_offset = i * kDoubleSize + double_regs_offset;
- __ pop(Operand(rbx, dst_offset));
- }
-
- // Remove the bailout id from the stack.
- if (type() == EAGER) {
- __ addq(rsp, Immediate(kPointerSize));
- } else {
- __ addq(rsp, Immediate(2 * kPointerSize));
- }
-
- // Compute a pointer to the unwinding limit in register rcx; that is
- // the first stack slot not part of the input frame.
- __ movq(rcx, Operand(rbx, FrameDescription::frame_size_offset()));
- __ addq(rcx, rsp);
-
- // Unwind the stack down to - but not including - the unwinding
- // limit and copy the contents of the activation frame to the input
- // frame description.
- __ lea(rdx, Operand(rbx, FrameDescription::frame_content_offset()));
- Label pop_loop;
- __ bind(&pop_loop);
- __ pop(Operand(rdx, 0));
- __ addq(rdx, Immediate(sizeof(intptr_t)));
- __ cmpq(rcx, rsp);
- __ j(not_equal, &pop_loop);
-
- // Compute the output frame in the deoptimizer.
- __ push(rax);
- __ PrepareCallCFunction(2);
- __ movq(arg1, rax);
- __ LoadAddress(arg2, ExternalReference::isolate_address());
- __ CallCFunction(
- ExternalReference::compute_output_frames_function(isolate), 2);
- __ pop(rax);
-
- // Replace the current frame with the output frames.
- Label outer_push_loop, inner_push_loop;
- // Outer loop state: rax = current FrameDescription**, rdx = one past the
- // last FrameDescription**.
- __ movl(rdx, Operand(rax, Deoptimizer::output_count_offset()));
- __ movq(rax, Operand(rax, Deoptimizer::output_offset()));
- __ lea(rdx, Operand(rax, rdx, times_8, 0));
- __ bind(&outer_push_loop);
- // Inner loop state: rbx = current FrameDescription*, rcx = loop index.
- __ movq(rbx, Operand(rax, 0));
- __ movq(rcx, Operand(rbx, FrameDescription::frame_size_offset()));
- __ bind(&inner_push_loop);
- __ subq(rcx, Immediate(sizeof(intptr_t)));
- __ push(Operand(rbx, rcx, times_1, FrameDescription::frame_content_offset()));
- __ testq(rcx, rcx);
- __ j(not_zero, &inner_push_loop);
- __ addq(rax, Immediate(kPointerSize));
- __ cmpq(rax, rdx);
- __ j(below, &outer_push_loop);
-
- // In case of OSR, we have to restore the XMM registers.
- if (type() == OSR) {
- for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
- XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
- int src_offset = i * kDoubleSize + double_regs_offset;
- __ movsd(xmm_reg, Operand(rbx, src_offset));
- }
- }
-
- // Push state, pc, and continuation from the last output frame.
- if (type() != OSR) {
- __ push(Operand(rbx, FrameDescription::state_offset()));
- }
- __ push(Operand(rbx, FrameDescription::pc_offset()));
- __ push(Operand(rbx, FrameDescription::continuation_offset()));
-
- // Push the registers from the last output frame.
- for (int i = 0; i < kNumberOfRegisters; i++) {
- int offset = (i * kPointerSize) + FrameDescription::registers_offset();
- __ push(Operand(rbx, offset));
- }
-
- // Restore the registers from the stack.
- for (int i = kNumberOfRegisters - 1; i >= 0 ; i--) {
- Register r = Register::toRegister(i);
- // Do not restore rsp, simply pop the value into the next register
- // and overwrite this afterwards.
- if (r.is(rsp)) {
- ASSERT(i > 0);
- r = Register::toRegister(i - 1);
- }
- __ pop(r);
- }
-
- // Set up the roots register.
- __ InitializeRootRegister();
- __ InitializeSmiConstantRegister();
-
- // Return to the continuation point.
- __ ret(0);
-}
-
-
-void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
- // Create a sequence of deoptimization entries.
- Label done;
- for (int i = 0; i < count(); i++) {
- int start = masm()->pc_offset();
- USE(start);
- __ push_imm32(i);
- __ jmp(&done);
- ASSERT(masm()->pc_offset() - start == table_entry_size_);
- }
- __ bind(&done);
-}
-
-#undef __
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_X64
diff --git a/src/3rdparty/v8/src/x64/disasm-x64.cc b/src/3rdparty/v8/src/x64/disasm-x64.cc
deleted file mode 100644
index 189ee42..0000000
--- a/src/3rdparty/v8/src/x64/disasm-x64.cc
+++ /dev/null
@@ -1,1752 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include <assert.h>
-#include <stdio.h>
-#include <stdarg.h>
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_X64)
-
-#include "disasm.h"
-
-namespace disasm {
-
-enum OperandType {
- UNSET_OP_ORDER = 0,
- // Operand size decides between 16, 32 and 64 bit operands.
- REG_OPER_OP_ORDER = 1, // Register destination, operand source.
- OPER_REG_OP_ORDER = 2, // Operand destination, register source.
- // Fixed 8-bit operands.
- BYTE_SIZE_OPERAND_FLAG = 4,
- BYTE_REG_OPER_OP_ORDER = REG_OPER_OP_ORDER | BYTE_SIZE_OPERAND_FLAG,
- BYTE_OPER_REG_OP_ORDER = OPER_REG_OP_ORDER | BYTE_SIZE_OPERAND_FLAG
-};
-
-//------------------------------------------------------------------
-// Tables
-//------------------------------------------------------------------
-struct ByteMnemonic {
- int b; // -1 terminates, otherwise must be in range (0..255)
- OperandType op_order_;
- const char* mnem;
-};
-
-
-static ByteMnemonic two_operands_instr[] = {
- { 0x00, BYTE_OPER_REG_OP_ORDER, "add" },
- { 0x01, OPER_REG_OP_ORDER, "add" },
- { 0x02, BYTE_REG_OPER_OP_ORDER, "add" },
- { 0x03, REG_OPER_OP_ORDER, "add" },
- { 0x08, BYTE_OPER_REG_OP_ORDER, "or" },
- { 0x09, OPER_REG_OP_ORDER, "or" },
- { 0x0A, BYTE_REG_OPER_OP_ORDER, "or" },
- { 0x0B, REG_OPER_OP_ORDER, "or" },
- { 0x10, BYTE_OPER_REG_OP_ORDER, "adc" },
- { 0x11, OPER_REG_OP_ORDER, "adc" },
- { 0x12, BYTE_REG_OPER_OP_ORDER, "adc" },
- { 0x13, REG_OPER_OP_ORDER, "adc" },
- { 0x18, BYTE_OPER_REG_OP_ORDER, "sbb" },
- { 0x19, OPER_REG_OP_ORDER, "sbb" },
- { 0x1A, BYTE_REG_OPER_OP_ORDER, "sbb" },
- { 0x1B, REG_OPER_OP_ORDER, "sbb" },
- { 0x20, BYTE_OPER_REG_OP_ORDER, "and" },
- { 0x21, OPER_REG_OP_ORDER, "and" },
- { 0x22, BYTE_REG_OPER_OP_ORDER, "and" },
- { 0x23, REG_OPER_OP_ORDER, "and" },
- { 0x28, BYTE_OPER_REG_OP_ORDER, "sub" },
- { 0x29, OPER_REG_OP_ORDER, "sub" },
- { 0x2A, BYTE_REG_OPER_OP_ORDER, "sub" },
- { 0x2B, REG_OPER_OP_ORDER, "sub" },
- { 0x30, BYTE_OPER_REG_OP_ORDER, "xor" },
- { 0x31, OPER_REG_OP_ORDER, "xor" },
- { 0x32, BYTE_REG_OPER_OP_ORDER, "xor" },
- { 0x33, REG_OPER_OP_ORDER, "xor" },
- { 0x38, BYTE_OPER_REG_OP_ORDER, "cmp" },
- { 0x39, OPER_REG_OP_ORDER, "cmp" },
- { 0x3A, BYTE_REG_OPER_OP_ORDER, "cmp" },
- { 0x3B, REG_OPER_OP_ORDER, "cmp" },
- { 0x63, REG_OPER_OP_ORDER, "movsxlq" },
- { 0x84, BYTE_REG_OPER_OP_ORDER, "test" },
- { 0x85, REG_OPER_OP_ORDER, "test" },
- { 0x86, BYTE_REG_OPER_OP_ORDER, "xchg" },
- { 0x87, REG_OPER_OP_ORDER, "xchg" },
- { 0x88, BYTE_OPER_REG_OP_ORDER, "mov" },
- { 0x89, OPER_REG_OP_ORDER, "mov" },
- { 0x8A, BYTE_REG_OPER_OP_ORDER, "mov" },
- { 0x8B, REG_OPER_OP_ORDER, "mov" },
- { 0x8D, REG_OPER_OP_ORDER, "lea" },
- { -1, UNSET_OP_ORDER, "" }
-};
-
-
-static ByteMnemonic zero_operands_instr[] = {
- { 0xC3, UNSET_OP_ORDER, "ret" },
- { 0xC9, UNSET_OP_ORDER, "leave" },
- { 0xF4, UNSET_OP_ORDER, "hlt" },
- { 0xCC, UNSET_OP_ORDER, "int3" },
- { 0x60, UNSET_OP_ORDER, "pushad" },
- { 0x61, UNSET_OP_ORDER, "popad" },
- { 0x9C, UNSET_OP_ORDER, "pushfd" },
- { 0x9D, UNSET_OP_ORDER, "popfd" },
- { 0x9E, UNSET_OP_ORDER, "sahf" },
- { 0x99, UNSET_OP_ORDER, "cdq" },
- { 0x9B, UNSET_OP_ORDER, "fwait" },
- { 0xA4, UNSET_OP_ORDER, "movs" },
- { 0xA5, UNSET_OP_ORDER, "movs" },
- { 0xA6, UNSET_OP_ORDER, "cmps" },
- { 0xA7, UNSET_OP_ORDER, "cmps" },
- { -1, UNSET_OP_ORDER, "" }
-};
-
-
-static ByteMnemonic call_jump_instr[] = {
- { 0xE8, UNSET_OP_ORDER, "call" },
- { 0xE9, UNSET_OP_ORDER, "jmp" },
- { -1, UNSET_OP_ORDER, "" }
-};
-
-
-static ByteMnemonic short_immediate_instr[] = {
- { 0x05, UNSET_OP_ORDER, "add" },
- { 0x0D, UNSET_OP_ORDER, "or" },
- { 0x15, UNSET_OP_ORDER, "adc" },
- { 0x1D, UNSET_OP_ORDER, "sbb" },
- { 0x25, UNSET_OP_ORDER, "and" },
- { 0x2D, UNSET_OP_ORDER, "sub" },
- { 0x35, UNSET_OP_ORDER, "xor" },
- { 0x3D, UNSET_OP_ORDER, "cmp" },
- { -1, UNSET_OP_ORDER, "" }
-};
-
-
-static const char* conditional_code_suffix[] = {
- "o", "no", "c", "nc", "z", "nz", "na", "a",
- "s", "ns", "pe", "po", "l", "ge", "le", "g"
-};
-
-
-enum InstructionType {
- NO_INSTR,
- ZERO_OPERANDS_INSTR,
- TWO_OPERANDS_INSTR,
- JUMP_CONDITIONAL_SHORT_INSTR,
- REGISTER_INSTR,
- PUSHPOP_INSTR, // Has implicit 64-bit operand size.
- MOVE_REG_INSTR,
- CALL_JUMP_INSTR,
- SHORT_IMMEDIATE_INSTR
-};
-
-
-enum Prefixes {
- ESCAPE_PREFIX = 0x0F,
- OPERAND_SIZE_OVERRIDE_PREFIX = 0x66,
- ADDRESS_SIZE_OVERRIDE_PREFIX = 0x67,
- REPNE_PREFIX = 0xF2,
- REP_PREFIX = 0xF3,
- REPEQ_PREFIX = REP_PREFIX
-};
-
-
-struct InstructionDesc {
- const char* mnem;
- InstructionType type;
- OperandType op_order_;
- bool byte_size_operation; // Fixed 8-bit operation.
-};
-
-
-class InstructionTable {
- public:
- InstructionTable();
- const InstructionDesc& Get(byte x) const {
- return instructions_[x];
- }
-
- private:
- InstructionDesc instructions_[256];
- void Clear();
- void Init();
- void CopyTable(ByteMnemonic bm[], InstructionType type);
- void SetTableRange(InstructionType type, byte start, byte end, bool byte_size,
- const char* mnem);
- void AddJumpConditionalShort();
-};
-
-
-InstructionTable::InstructionTable() {
- Clear();
- Init();
-}
-
-
-void InstructionTable::Clear() {
- for (int i = 0; i < 256; i++) {
- instructions_[i].mnem = "(bad)";
- instructions_[i].type = NO_INSTR;
- instructions_[i].op_order_ = UNSET_OP_ORDER;
- instructions_[i].byte_size_operation = false;
- }
-}
-
-
-void InstructionTable::Init() {
- CopyTable(two_operands_instr, TWO_OPERANDS_INSTR);
- CopyTable(zero_operands_instr, ZERO_OPERANDS_INSTR);
- CopyTable(call_jump_instr, CALL_JUMP_INSTR);
- CopyTable(short_immediate_instr, SHORT_IMMEDIATE_INSTR);
- AddJumpConditionalShort();
- SetTableRange(PUSHPOP_INSTR, 0x50, 0x57, false, "push");
- SetTableRange(PUSHPOP_INSTR, 0x58, 0x5F, false, "pop");
- SetTableRange(MOVE_REG_INSTR, 0xB8, 0xBF, false, "mov");
-}
-
-
-void InstructionTable::CopyTable(ByteMnemonic bm[], InstructionType type) {
- for (int i = 0; bm[i].b >= 0; i++) {
- InstructionDesc* id = &instructions_[bm[i].b];
- id->mnem = bm[i].mnem;
- OperandType op_order = bm[i].op_order_;
- id->op_order_ =
- static_cast<OperandType>(op_order & ~BYTE_SIZE_OPERAND_FLAG);
- ASSERT_EQ(NO_INSTR, id->type); // Information not already entered
- id->type = type;
- id->byte_size_operation = ((op_order & BYTE_SIZE_OPERAND_FLAG) != 0);
- }
-}
-
-
-void InstructionTable::SetTableRange(InstructionType type,
- byte start,
- byte end,
- bool byte_size,
- const char* mnem) {
- for (byte b = start; b <= end; b++) {
- InstructionDesc* id = &instructions_[b];
- ASSERT_EQ(NO_INSTR, id->type); // Information not already entered
- id->mnem = mnem;
- id->type = type;
- id->byte_size_operation = byte_size;
- }
-}
-
-
-void InstructionTable::AddJumpConditionalShort() {
- for (byte b = 0x70; b <= 0x7F; b++) {
- InstructionDesc* id = &instructions_[b];
- ASSERT_EQ(NO_INSTR, id->type); // Information not already entered
- id->mnem = NULL; // Computed depending on condition code.
- id->type = JUMP_CONDITIONAL_SHORT_INSTR;
- }
-}
-
-
-static InstructionTable instruction_table;
-
-
-static InstructionDesc cmov_instructions[16] = {
- {"cmovo", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
- {"cmovno", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
- {"cmovc", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
- {"cmovnc", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
- {"cmovz", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
- {"cmovnz", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
- {"cmovna", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
- {"cmova", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
- {"cmovs", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
- {"cmovns", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
- {"cmovpe", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
- {"cmovpo", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
- {"cmovl", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
- {"cmovge", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
- {"cmovle", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
- {"cmovg", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false}
-};
-
-//------------------------------------------------------------------------------
-// DisassemblerX64 implementation.
-
-enum UnimplementedOpcodeAction {
- CONTINUE_ON_UNIMPLEMENTED_OPCODE,
- ABORT_ON_UNIMPLEMENTED_OPCODE
-};
-
-// A new DisassemblerX64 object is created to disassemble each instruction.
-// The object can only disassemble a single instruction.
-class DisassemblerX64 {
- public:
- DisassemblerX64(const NameConverter& converter,
- UnimplementedOpcodeAction unimplemented_action =
- ABORT_ON_UNIMPLEMENTED_OPCODE)
- : converter_(converter),
- tmp_buffer_pos_(0),
- abort_on_unimplemented_(
- unimplemented_action == ABORT_ON_UNIMPLEMENTED_OPCODE),
- rex_(0),
- operand_size_(0),
- group_1_prefix_(0),
- byte_size_operand_(false) {
- tmp_buffer_[0] = '\0';
- }
-
- virtual ~DisassemblerX64() {
- }
-
- // Writes one disassembled instruction into 'buffer' (0-terminated).
- // Returns the length of the disassembled machine instruction in bytes.
- int InstructionDecode(v8::internal::Vector<char> buffer, byte* instruction);
-
- private:
- enum OperandSize {
- BYTE_SIZE = 0,
- WORD_SIZE = 1,
- DOUBLEWORD_SIZE = 2,
- QUADWORD_SIZE = 3
- };
-
- const NameConverter& converter_;
- v8::internal::EmbeddedVector<char, 128> tmp_buffer_;
- unsigned int tmp_buffer_pos_;
- bool abort_on_unimplemented_;
- // Prefixes parsed
- byte rex_;
- byte operand_size_; // 0x66 or (if no group 3 prefix is present) 0x0.
- byte group_1_prefix_; // 0xF2, 0xF3, or (if no group 1 prefix is present) 0.
- // Byte size operand override.
- bool byte_size_operand_;
-
- void setRex(byte rex) {
- ASSERT_EQ(0x40, rex & 0xF0);
- rex_ = rex;
- }
-
- bool rex() { return rex_ != 0; }
-
- bool rex_b() { return (rex_ & 0x01) != 0; }
-
- // Actual number of base register given the low bits and the rex.b state.
- int base_reg(int low_bits) { return low_bits | ((rex_ & 0x01) << 3); }
-
- bool rex_x() { return (rex_ & 0x02) != 0; }
-
- bool rex_r() { return (rex_ & 0x04) != 0; }
-
- bool rex_w() { return (rex_ & 0x08) != 0; }
-
- OperandSize operand_size() {
- if (byte_size_operand_) return BYTE_SIZE;
- if (rex_w()) return QUADWORD_SIZE;
- if (operand_size_ != 0) return WORD_SIZE;
- return DOUBLEWORD_SIZE;
- }
-
- char operand_size_code() {
- return "bwlq"[operand_size()];
- }
-
- const char* NameOfCPURegister(int reg) const {
- return converter_.NameOfCPURegister(reg);
- }
-
- const char* NameOfByteCPURegister(int reg) const {
- return converter_.NameOfByteCPURegister(reg);
- }
-
- const char* NameOfXMMRegister(int reg) const {
- return converter_.NameOfXMMRegister(reg);
- }
-
- const char* NameOfAddress(byte* addr) const {
- return converter_.NameOfAddress(addr);
- }
-
- // Disassembler helper functions.
- void get_modrm(byte data,
- int* mod,
- int* regop,
- int* rm) {
- *mod = (data >> 6) & 3;
- *regop = ((data & 0x38) >> 3) | (rex_r() ? 8 : 0);
- *rm = (data & 7) | (rex_b() ? 8 : 0);
- }
-
- void get_sib(byte data,
- int* scale,
- int* index,
- int* base) {
- *scale = (data >> 6) & 3;
- *index = ((data >> 3) & 7) | (rex_x() ? 8 : 0);
- *base = (data & 7) | (rex_b() ? 8 : 0);
- }
-
- typedef const char* (DisassemblerX64::*RegisterNameMapping)(int reg) const;
-
- int PrintRightOperandHelper(byte* modrmp,
- RegisterNameMapping register_name);
- int PrintRightOperand(byte* modrmp);
- int PrintRightByteOperand(byte* modrmp);
- int PrintRightXMMOperand(byte* modrmp);
- int PrintOperands(const char* mnem,
- OperandType op_order,
- byte* data);
- int PrintImmediate(byte* data, OperandSize size);
- int PrintImmediateOp(byte* data);
- const char* TwoByteMnemonic(byte opcode);
- int TwoByteOpcodeInstruction(byte* data);
- int F6F7Instruction(byte* data);
- int ShiftInstruction(byte* data);
- int JumpShort(byte* data);
- int JumpConditional(byte* data);
- int JumpConditionalShort(byte* data);
- int SetCC(byte* data);
- int FPUInstruction(byte* data);
- int MemoryFPUInstruction(int escape_opcode, int regop, byte* modrm_start);
- int RegisterFPUInstruction(int escape_opcode, byte modrm_byte);
- void AppendToBuffer(const char* format, ...);
-
- void UnimplementedInstruction() {
- if (abort_on_unimplemented_) {
- CHECK(false);
- } else {
- AppendToBuffer("'Unimplemented Instruction'");
- }
- }
-};
-
-
-void DisassemblerX64::AppendToBuffer(const char* format, ...) {
- v8::internal::Vector<char> buf = tmp_buffer_ + tmp_buffer_pos_;
- va_list args;
- va_start(args, format);
- int result = v8::internal::OS::VSNPrintF(buf, format, args);
- va_end(args);
- tmp_buffer_pos_ += result;
-}
-
-
-int DisassemblerX64::PrintRightOperandHelper(
- byte* modrmp,
- RegisterNameMapping direct_register_name) {
- int mod, regop, rm;
- get_modrm(*modrmp, &mod, &regop, &rm);
- RegisterNameMapping register_name = (mod == 3) ? direct_register_name :
- &DisassemblerX64::NameOfCPURegister;
- switch (mod) {
- case 0:
- if ((rm & 7) == 5) {
- int32_t disp = *reinterpret_cast<int32_t*>(modrmp + 1);
- AppendToBuffer("[0x%x]", disp);
- return 5;
- } else if ((rm & 7) == 4) {
- // Codes for SIB byte.
- byte sib = *(modrmp + 1);
- int scale, index, base;
- get_sib(sib, &scale, &index, &base);
- if (index == 4 && (base & 7) == 4 && scale == 0 /*times_1*/) {
- // index == rsp means no index. Only use sib byte with no index for
- // rsp and r12 base.
- AppendToBuffer("[%s]", NameOfCPURegister(base));
- return 2;
- } else if (base == 5) {
- // base == rbp means no base register (when mod == 0).
- int32_t disp = *reinterpret_cast<int32_t*>(modrmp + 2);
- AppendToBuffer("[%s*%d+0x%x]",
- NameOfCPURegister(index),
- 1 << scale, disp);
- return 6;
- } else if (index != 4 && base != 5) {
- // [base+index*scale]
- AppendToBuffer("[%s+%s*%d]",
- NameOfCPURegister(base),
- NameOfCPURegister(index),
- 1 << scale);
- return 2;
- } else {
- UnimplementedInstruction();
- return 1;
- }
- } else {
- AppendToBuffer("[%s]", NameOfCPURegister(rm));
- return 1;
- }
- break;
- case 1: // fall through
- case 2:
- if ((rm & 7) == 4) {
- byte sib = *(modrmp + 1);
- int scale, index, base;
- get_sib(sib, &scale, &index, &base);
- int disp = (mod == 2) ? *reinterpret_cast<int32_t*>(modrmp + 2)
- : *reinterpret_cast<char*>(modrmp + 2);
- if (index == 4 && (base & 7) == 4 && scale == 0 /*times_1*/) {
- if (-disp > 0) {
- AppendToBuffer("[%s-0x%x]", NameOfCPURegister(base), -disp);
- } else {
- AppendToBuffer("[%s+0x%x]", NameOfCPURegister(base), disp);
- }
- } else {
- if (-disp > 0) {
- AppendToBuffer("[%s+%s*%d-0x%x]",
- NameOfCPURegister(base),
- NameOfCPURegister(index),
- 1 << scale,
- -disp);
- } else {
- AppendToBuffer("[%s+%s*%d+0x%x]",
- NameOfCPURegister(base),
- NameOfCPURegister(index),
- 1 << scale,
- disp);
- }
- }
- return mod == 2 ? 6 : 3;
- } else {
- // No sib.
- int disp = (mod == 2) ? *reinterpret_cast<int32_t*>(modrmp + 1)
- : *reinterpret_cast<char*>(modrmp + 1);
- if (-disp > 0) {
- AppendToBuffer("[%s-0x%x]", NameOfCPURegister(rm), -disp);
- } else {
- AppendToBuffer("[%s+0x%x]", NameOfCPURegister(rm), disp);
- }
- return (mod == 2) ? 5 : 2;
- }
- break;
- case 3:
- AppendToBuffer("%s", (this->*register_name)(rm));
- return 1;
- default:
- UnimplementedInstruction();
- return 1;
- }
- UNREACHABLE();
-}
-
-
-int DisassemblerX64::PrintImmediate(byte* data, OperandSize size) {
- int64_t value;
- int count;
- switch (size) {
- case BYTE_SIZE:
- value = *data;
- count = 1;
- break;
- case WORD_SIZE:
- value = *reinterpret_cast<int16_t*>(data);
- count = 2;
- break;
- case DOUBLEWORD_SIZE:
- value = *reinterpret_cast<uint32_t*>(data);
- count = 4;
- break;
- case QUADWORD_SIZE:
- value = *reinterpret_cast<int32_t*>(data);
- count = 4;
- break;
- default:
- UNREACHABLE();
- value = 0; // Initialize variables on all paths to satisfy the compiler.
- count = 0;
- }
- AppendToBuffer("%" V8_PTR_PREFIX "x", value);
- return count;
-}
-
-
-int DisassemblerX64::PrintRightOperand(byte* modrmp) {
- return PrintRightOperandHelper(modrmp,
- &DisassemblerX64::NameOfCPURegister);
-}
-
-
-int DisassemblerX64::PrintRightByteOperand(byte* modrmp) {
- return PrintRightOperandHelper(modrmp,
- &DisassemblerX64::NameOfByteCPURegister);
-}
-
-
-int DisassemblerX64::PrintRightXMMOperand(byte* modrmp) {
- return PrintRightOperandHelper(modrmp,
- &DisassemblerX64::NameOfXMMRegister);
-}
-
-
-// Returns number of bytes used including the current *data.
-// Writes instruction's mnemonic, left and right operands to 'tmp_buffer_'.
-int DisassemblerX64::PrintOperands(const char* mnem,
- OperandType op_order,
- byte* data) {
- byte modrm = *data;
- int mod, regop, rm;
- get_modrm(modrm, &mod, &regop, &rm);
- int advance = 0;
- const char* register_name =
- byte_size_operand_ ? NameOfByteCPURegister(regop)
- : NameOfCPURegister(regop);
- switch (op_order) {
- case REG_OPER_OP_ORDER: {
- AppendToBuffer("%s%c %s,",
- mnem,
- operand_size_code(),
- register_name);
- advance = byte_size_operand_ ? PrintRightByteOperand(data)
- : PrintRightOperand(data);
- break;
- }
- case OPER_REG_OP_ORDER: {
- AppendToBuffer("%s%c ", mnem, operand_size_code());
- advance = byte_size_operand_ ? PrintRightByteOperand(data)
- : PrintRightOperand(data);
- AppendToBuffer(",%s", register_name);
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
- return advance;
-}
-
-
-// Returns number of bytes used by machine instruction, including *data byte.
-// Writes immediate instructions to 'tmp_buffer_'.
-int DisassemblerX64::PrintImmediateOp(byte* data) {
- bool byte_size_immediate = (*data & 0x02) != 0;
- byte modrm = *(data + 1);
- int mod, regop, rm;
- get_modrm(modrm, &mod, &regop, &rm);
- const char* mnem = "Imm???";
- switch (regop) {
- case 0:
- mnem = "add";
- break;
- case 1:
- mnem = "or";
- break;
- case 2:
- mnem = "adc";
- break;
- case 4:
- mnem = "and";
- break;
- case 5:
- mnem = "sub";
- break;
- case 6:
- mnem = "xor";
- break;
- case 7:
- mnem = "cmp";
- break;
- default:
- UnimplementedInstruction();
- }
- AppendToBuffer("%s%c ", mnem, operand_size_code());
- int count = PrintRightOperand(data + 1);
- AppendToBuffer(",0x");
- OperandSize immediate_size = byte_size_immediate ? BYTE_SIZE : operand_size();
- count += PrintImmediate(data + 1 + count, immediate_size);
- return 1 + count;
-}
-
-
-// Returns number of bytes used, including *data.
-int DisassemblerX64::F6F7Instruction(byte* data) {
- ASSERT(*data == 0xF7 || *data == 0xF6);
- byte modrm = *(data + 1);
- int mod, regop, rm;
- get_modrm(modrm, &mod, &regop, &rm);
- if (mod == 3 && regop != 0) {
- const char* mnem = NULL;
- switch (regop) {
- case 2:
- mnem = "not";
- break;
- case 3:
- mnem = "neg";
- break;
- case 4:
- mnem = "mul";
- break;
- case 7:
- mnem = "idiv";
- break;
- default:
- UnimplementedInstruction();
- }
- AppendToBuffer("%s%c %s",
- mnem,
- operand_size_code(),
- NameOfCPURegister(rm));
- return 2;
- } else if (regop == 0) {
- AppendToBuffer("test%c ", operand_size_code());
- int count = PrintRightOperand(data + 1); // Use name of 64-bit register.
- AppendToBuffer(",0x");
- count += PrintImmediate(data + 1 + count, operand_size());
- return 1 + count;
- } else {
- UnimplementedInstruction();
- return 2;
- }
-}
-
-
-int DisassemblerX64::ShiftInstruction(byte* data) {
- byte op = *data & (~1);
- if (op != 0xD0 && op != 0xD2 && op != 0xC0) {
- UnimplementedInstruction();
- return 1;
- }
- byte modrm = *(data + 1);
- int mod, regop, rm;
- get_modrm(modrm, &mod, &regop, &rm);
- regop &= 0x7; // The REX.R bit does not affect the operation.
- int imm8 = -1;
- int num_bytes = 2;
- if (mod != 3) {
- UnimplementedInstruction();
- return num_bytes;
- }
- const char* mnem = NULL;
- switch (regop) {
- case 0:
- mnem = "rol";
- break;
- case 1:
- mnem = "ror";
- break;
- case 2:
- mnem = "rcl";
- break;
- case 3:
- mnem = "rcr";
- break;
- case 4:
- mnem = "shl";
- break;
- case 5:
- mnem = "shr";
- break;
- case 7:
- mnem = "sar";
- break;
- default:
- UnimplementedInstruction();
- return num_bytes;
- }
- ASSERT_NE(NULL, mnem);
- if (op == 0xD0) {
- imm8 = 1;
- } else if (op == 0xC0) {
- imm8 = *(data + 2);
- num_bytes = 3;
- }
- AppendToBuffer("%s%c %s,",
- mnem,
- operand_size_code(),
- byte_size_operand_ ? NameOfByteCPURegister(rm)
- : NameOfCPURegister(rm));
- if (op == 0xD2) {
- AppendToBuffer("cl");
- } else {
- AppendToBuffer("%d", imm8);
- }
- return num_bytes;
-}
-
-
-// Returns number of bytes used, including *data.
-int DisassemblerX64::JumpShort(byte* data) {
- ASSERT_EQ(0xEB, *data);
- byte b = *(data + 1);
- byte* dest = data + static_cast<int8_t>(b) + 2;
- AppendToBuffer("jmp %s", NameOfAddress(dest));
- return 2;
-}
-
-
-// Returns number of bytes used, including *data.
-int DisassemblerX64::JumpConditional(byte* data) {
- ASSERT_EQ(0x0F, *data);
- byte cond = *(data + 1) & 0x0F;
- byte* dest = data + *reinterpret_cast<int32_t*>(data + 2) + 6;
- const char* mnem = conditional_code_suffix[cond];
- AppendToBuffer("j%s %s", mnem, NameOfAddress(dest));
- return 6; // includes 0x0F
-}
-
-
-// Returns number of bytes used, including *data.
-int DisassemblerX64::JumpConditionalShort(byte* data) {
- byte cond = *data & 0x0F;
- byte b = *(data + 1);
- byte* dest = data + static_cast<int8_t>(b) + 2;
- const char* mnem = conditional_code_suffix[cond];
- AppendToBuffer("j%s %s", mnem, NameOfAddress(dest));
- return 2;
-}
-
-
-// Returns number of bytes used, including *data.
-int DisassemblerX64::SetCC(byte* data) {
- ASSERT_EQ(0x0F, *data);
- byte cond = *(data + 1) & 0x0F;
- const char* mnem = conditional_code_suffix[cond];
- AppendToBuffer("set%s%c ", mnem, operand_size_code());
- PrintRightByteOperand(data + 2);
- return 3; // includes 0x0F
-}
-
-
-// Returns number of bytes used, including *data.
-int DisassemblerX64::FPUInstruction(byte* data) {
- byte escape_opcode = *data;
- ASSERT_EQ(0xD8, escape_opcode & 0xF8);
- byte modrm_byte = *(data+1);
-
- if (modrm_byte >= 0xC0) {
- return RegisterFPUInstruction(escape_opcode, modrm_byte);
- } else {
- return MemoryFPUInstruction(escape_opcode, modrm_byte, data+1);
- }
-}
-
-int DisassemblerX64::MemoryFPUInstruction(int escape_opcode,
- int modrm_byte,
- byte* modrm_start) {
- const char* mnem = "?";
- int regop = (modrm_byte >> 3) & 0x7; // reg/op field of modrm byte.
- switch (escape_opcode) {
- case 0xD9: switch (regop) {
- case 0: mnem = "fld_s"; break;
- case 3: mnem = "fstp_s"; break;
- case 7: mnem = "fstcw"; break;
- default: UnimplementedInstruction();
- }
- break;
-
- case 0xDB: switch (regop) {
- case 0: mnem = "fild_s"; break;
- case 1: mnem = "fisttp_s"; break;
- case 2: mnem = "fist_s"; break;
- case 3: mnem = "fistp_s"; break;
- default: UnimplementedInstruction();
- }
- break;
-
- case 0xDD: switch (regop) {
- case 0: mnem = "fld_d"; break;
- case 3: mnem = "fstp_d"; break;
- default: UnimplementedInstruction();
- }
- break;
-
- case 0xDF: switch (regop) {
- case 5: mnem = "fild_d"; break;
- case 7: mnem = "fistp_d"; break;
- default: UnimplementedInstruction();
- }
- break;
-
- default: UnimplementedInstruction();
- }
- AppendToBuffer("%s ", mnem);
- int count = PrintRightOperand(modrm_start);
- return count + 1;
-}
-
-int DisassemblerX64::RegisterFPUInstruction(int escape_opcode,
- byte modrm_byte) {
- bool has_register = false; // Is the FPU register encoded in modrm_byte?
- const char* mnem = "?";
-
- switch (escape_opcode) {
- case 0xD8:
- UnimplementedInstruction();
- break;
-
- case 0xD9:
- switch (modrm_byte & 0xF8) {
- case 0xC0:
- mnem = "fld";
- has_register = true;
- break;
- case 0xC8:
- mnem = "fxch";
- has_register = true;
- break;
- default:
- switch (modrm_byte) {
- case 0xE0: mnem = "fchs"; break;
- case 0xE1: mnem = "fabs"; break;
- case 0xE4: mnem = "ftst"; break;
- case 0xE8: mnem = "fld1"; break;
- case 0xEB: mnem = "fldpi"; break;
- case 0xED: mnem = "fldln2"; break;
- case 0xEE: mnem = "fldz"; break;
- case 0xF1: mnem = "fyl2x"; break;
- case 0xF5: mnem = "fprem1"; break;
- case 0xF7: mnem = "fincstp"; break;
- case 0xF8: mnem = "fprem"; break;
- case 0xFE: mnem = "fsin"; break;
- case 0xFF: mnem = "fcos"; break;
- default: UnimplementedInstruction();
- }
- }
- break;
-
- case 0xDA:
- if (modrm_byte == 0xE9) {
- mnem = "fucompp";
- } else {
- UnimplementedInstruction();
- }
- break;
-
- case 0xDB:
- if ((modrm_byte & 0xF8) == 0xE8) {
- mnem = "fucomi";
- has_register = true;
- } else if (modrm_byte == 0xE2) {
- mnem = "fclex";
- } else {
- UnimplementedInstruction();
- }
- break;
-
- case 0xDC:
- has_register = true;
- switch (modrm_byte & 0xF8) {
- case 0xC0: mnem = "fadd"; break;
- case 0xE8: mnem = "fsub"; break;
- case 0xC8: mnem = "fmul"; break;
- case 0xF8: mnem = "fdiv"; break;
- default: UnimplementedInstruction();
- }
- break;
-
- case 0xDD:
- has_register = true;
- switch (modrm_byte & 0xF8) {
- case 0xC0: mnem = "ffree"; break;
- case 0xD8: mnem = "fstp"; break;
- default: UnimplementedInstruction();
- }
- break;
-
- case 0xDE:
- if (modrm_byte == 0xD9) {
- mnem = "fcompp";
- } else {
- has_register = true;
- switch (modrm_byte & 0xF8) {
- case 0xC0: mnem = "faddp"; break;
- case 0xE8: mnem = "fsubp"; break;
- case 0xC8: mnem = "fmulp"; break;
- case 0xF8: mnem = "fdivp"; break;
- default: UnimplementedInstruction();
- }
- }
- break;
-
- case 0xDF:
- if (modrm_byte == 0xE0) {
- mnem = "fnstsw_ax";
- } else if ((modrm_byte & 0xF8) == 0xE8) {
- mnem = "fucomip";
- has_register = true;
- }
- break;
-
- default: UnimplementedInstruction();
- }
-
- if (has_register) {
- AppendToBuffer("%s st%d", mnem, modrm_byte & 0x7);
- } else {
- AppendToBuffer("%s", mnem);
- }
- return 2;
-}
-
-
-
-// Handle all two-byte opcodes, which start with 0x0F.
-// These instructions may be affected by an 0x66, 0xF2, or 0xF3 prefix.
-// We do not use any three-byte opcodes, which start with 0x0F38 or 0x0F3A.
-int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
- byte opcode = *(data + 1);
- byte* current = data + 2;
- // At return, "current" points to the start of the next instruction.
- const char* mnemonic = TwoByteMnemonic(opcode);
- if (operand_size_ == 0x66) {
- // 0x66 0x0F prefix.
- int mod, regop, rm;
- if (opcode == 0x3A) {
- byte third_byte = *current;
- current = data + 3;
- if (third_byte == 0x17) {
- get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("extractps "); // reg/m32, xmm, imm8
- current += PrintRightOperand(current);
- AppendToBuffer(", %s, %d", NameOfCPURegister(regop), (*current) & 3);
- current += 1;
- } else {
- UnimplementedInstruction();
- }
- } else {
- get_modrm(*current, &mod, &regop, &rm);
- if (opcode == 0x6E) {
- AppendToBuffer("mov%c %s,",
- rex_w() ? 'q' : 'd',
- NameOfXMMRegister(regop));
- current += PrintRightOperand(current);
- } else if (opcode == 0x6F) {
- AppendToBuffer("movdqa %s,",
- NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
- } else if (opcode == 0x7E) {
- AppendToBuffer("mov%c ",
- rex_w() ? 'q' : 'd');
- current += PrintRightOperand(current);
- AppendToBuffer(", %s", NameOfXMMRegister(regop));
- } else if (opcode == 0x7F) {
- AppendToBuffer("movdqa ");
- current += PrintRightXMMOperand(current);
- AppendToBuffer(", %s", NameOfXMMRegister(regop));
- } else {
- const char* mnemonic = "?";
- if (opcode == 0x50) {
- mnemonic = "movmskpd";
- } else if (opcode == 0x54) {
- mnemonic = "andpd";
- } else if (opcode == 0x56) {
- mnemonic = "orpd";
- } else if (opcode == 0x57) {
- mnemonic = "xorpd";
- } else if (opcode == 0x2E) {
- mnemonic = "ucomisd";
- } else if (opcode == 0x2F) {
- mnemonic = "comisd";
- } else {
- UnimplementedInstruction();
- }
- AppendToBuffer("%s %s,", mnemonic, NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
- }
- }
- } else if (group_1_prefix_ == 0xF2) {
- // Beginning of instructions with prefix 0xF2.
-
- if (opcode == 0x11 || opcode == 0x10) {
- // MOVSD: Move scalar double-precision fp to/from/between XMM registers.
- AppendToBuffer("movsd ");
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
- if (opcode == 0x11) {
- current += PrintRightXMMOperand(current);
- AppendToBuffer(",%s", NameOfXMMRegister(regop));
- } else {
- AppendToBuffer("%s,", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
- }
- } else if (opcode == 0x2A) {
- // CVTSI2SD: integer to XMM double conversion.
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("%sd %s,", mnemonic, NameOfXMMRegister(regop));
- current += PrintRightOperand(current);
- } else if (opcode == 0x2C) {
- // CVTTSD2SI:
- // Convert with truncation scalar double-precision FP to integer.
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("cvttsd2si%c %s,",
- operand_size_code(), NameOfCPURegister(regop));
- current += PrintRightXMMOperand(current);
- } else if (opcode == 0x2D) {
- // CVTSD2SI: Convert scalar double-precision FP to integer.
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("cvtsd2si%c %s,",
- operand_size_code(), NameOfCPURegister(regop));
- current += PrintRightXMMOperand(current);
- } else if ((opcode & 0xF8) == 0x58 || opcode == 0x51) {
- // XMM arithmetic. Mnemonic was retrieved at the start of this function.
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("%s %s,", mnemonic, NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
- } else {
- UnimplementedInstruction();
- }
- } else if (group_1_prefix_ == 0xF3) {
- // Instructions with prefix 0xF3.
- if (opcode == 0x11 || opcode == 0x10) {
- // MOVSS: Move scalar double-precision fp to/from/between XMM registers.
- AppendToBuffer("movss ");
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
- if (opcode == 0x11) {
- current += PrintRightOperand(current);
- AppendToBuffer(",%s", NameOfXMMRegister(regop));
- } else {
- AppendToBuffer("%s,", NameOfXMMRegister(regop));
- current += PrintRightOperand(current);
- }
- } else if (opcode == 0x2A) {
- // CVTSI2SS: integer to XMM single conversion.
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("%ss %s,", mnemonic, NameOfXMMRegister(regop));
- current += PrintRightOperand(current);
- } else if (opcode == 0x2C) {
- // CVTTSS2SI:
- // Convert with truncation scalar single-precision FP to dword integer.
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("cvttss2si%c %s,",
- operand_size_code(), NameOfCPURegister(regop));
- current += PrintRightXMMOperand(current);
- } else if (opcode == 0x5A) {
- // CVTSS2SD:
- // Convert scalar single-precision FP to scalar double-precision FP.
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("cvtss2sd %s,", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
- } else {
- UnimplementedInstruction();
- }
- } else if (opcode == 0x1F) {
- // NOP
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
- current++;
- if (regop == 4) { // SIB byte present.
- current++;
- }
- if (mod == 1) { // Byte displacement.
- current += 1;
- } else if (mod == 2) { // 32-bit displacement.
- current += 4;
- } // else no immediate displacement.
- AppendToBuffer("nop");
- } else if (opcode == 0xA2 || opcode == 0x31) {
- // RDTSC or CPUID
- AppendToBuffer("%s", mnemonic);
-
- } else if ((opcode & 0xF0) == 0x40) {
- // CMOVcc: conditional move.
- int condition = opcode & 0x0F;
- const InstructionDesc& idesc = cmov_instructions[condition];
- byte_size_operand_ = idesc.byte_size_operation;
- current += PrintOperands(idesc.mnem, idesc.op_order_, current);
-
- } else if ((opcode & 0xF0) == 0x80) {
- // Jcc: Conditional jump (branch).
- current = data + JumpConditional(data);
-
- } else if (opcode == 0xBE || opcode == 0xBF || opcode == 0xB6 ||
- opcode == 0xB7 || opcode == 0xAF) {
- // Size-extending moves, IMUL.
- current += PrintOperands(mnemonic, REG_OPER_OP_ORDER, current);
-
- } else if ((opcode & 0xF0) == 0x90) {
- // SETcc: Set byte on condition. Needs pointer to beginning of instruction.
- current = data + SetCC(data);
-
- } else if (opcode == 0xAB || opcode == 0xA5 || opcode == 0xAD) {
- // SHLD, SHRD (double-precision shift), BTS (bit set).
- AppendToBuffer("%s ", mnemonic);
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
- current += PrintRightOperand(current);
- if (opcode == 0xAB) {
- AppendToBuffer(",%s", NameOfCPURegister(regop));
- } else {
- AppendToBuffer(",%s,cl", NameOfCPURegister(regop));
- }
- } else {
- UnimplementedInstruction();
- }
- return static_cast<int>(current - data);
-}
-
-
-// Mnemonics for two-byte opcode instructions starting with 0x0F.
-// The argument is the second byte of the two-byte opcode.
-// Returns NULL if the instruction is not handled here.
-const char* DisassemblerX64::TwoByteMnemonic(byte opcode) {
- switch (opcode) {
- case 0x1F:
- return "nop";
- case 0x2A: // F2/F3 prefix.
- return "cvtsi2s";
- case 0x31:
- return "rdtsc";
- case 0x51: // F2 prefix.
- return "sqrtsd";
- case 0x58: // F2 prefix.
- return "addsd";
- case 0x59: // F2 prefix.
- return "mulsd";
- case 0x5C: // F2 prefix.
- return "subsd";
- case 0x5E: // F2 prefix.
- return "divsd";
- case 0xA2:
- return "cpuid";
- case 0xA5:
- return "shld";
- case 0xAB:
- return "bts";
- case 0xAD:
- return "shrd";
- case 0xAF:
- return "imul";
- case 0xB6:
- return "movzxb";
- case 0xB7:
- return "movzxw";
- case 0xBE:
- return "movsxb";
- case 0xBF:
- return "movsxw";
- default:
- return NULL;
- }
-}
-
-
-// Disassembles the instruction at instr, and writes it into out_buffer.
-int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
- byte* instr) {
- tmp_buffer_pos_ = 0; // starting to write as position 0
- byte* data = instr;
- bool processed = true; // Will be set to false if the current instruction
- // is not in 'instructions' table.
- byte current;
-
- // Scan for prefixes.
- while (true) {
- current = *data;
- if (current == OPERAND_SIZE_OVERRIDE_PREFIX) { // Group 3 prefix.
- operand_size_ = current;
- } else if ((current & 0xF0) == 0x40) { // REX prefix.
- setRex(current);
- if (rex_w()) AppendToBuffer("REX.W ");
- } else if ((current & 0xFE) == 0xF2) { // Group 1 prefix (0xF2 or 0xF3).
- group_1_prefix_ = current;
- } else { // Not a prefix - an opcode.
- break;
- }
- data++;
- }
-
- const InstructionDesc& idesc = instruction_table.Get(current);
- byte_size_operand_ = idesc.byte_size_operation;
- switch (idesc.type) {
- case ZERO_OPERANDS_INSTR:
- if (current >= 0xA4 && current <= 0xA7) {
- // String move or compare operations.
- if (group_1_prefix_ == REP_PREFIX) {
- // REP.
- AppendToBuffer("rep ");
- }
- if (rex_w()) AppendToBuffer("REX.W ");
- AppendToBuffer("%s%c", idesc.mnem, operand_size_code());
- } else {
- AppendToBuffer("%s", idesc.mnem, operand_size_code());
- }
- data++;
- break;
-
- case TWO_OPERANDS_INSTR:
- data++;
- data += PrintOperands(idesc.mnem, idesc.op_order_, data);
- break;
-
- case JUMP_CONDITIONAL_SHORT_INSTR:
- data += JumpConditionalShort(data);
- break;
-
- case REGISTER_INSTR:
- AppendToBuffer("%s%c %s",
- idesc.mnem,
- operand_size_code(),
- NameOfCPURegister(base_reg(current & 0x07)));
- data++;
- break;
- case PUSHPOP_INSTR:
- AppendToBuffer("%s %s",
- idesc.mnem,
- NameOfCPURegister(base_reg(current & 0x07)));
- data++;
- break;
- case MOVE_REG_INSTR: {
- byte* addr = NULL;
- switch (operand_size()) {
- case WORD_SIZE:
- addr = reinterpret_cast<byte*>(*reinterpret_cast<int16_t*>(data + 1));
- data += 3;
- break;
- case DOUBLEWORD_SIZE:
- addr = reinterpret_cast<byte*>(*reinterpret_cast<int32_t*>(data + 1));
- data += 5;
- break;
- case QUADWORD_SIZE:
- addr = reinterpret_cast<byte*>(*reinterpret_cast<int64_t*>(data + 1));
- data += 9;
- break;
- default:
- UNREACHABLE();
- }
- AppendToBuffer("mov%c %s,%s",
- operand_size_code(),
- NameOfCPURegister(base_reg(current & 0x07)),
- NameOfAddress(addr));
- break;
- }
-
- case CALL_JUMP_INSTR: {
- byte* addr = data + *reinterpret_cast<int32_t*>(data + 1) + 5;
- AppendToBuffer("%s %s", idesc.mnem, NameOfAddress(addr));
- data += 5;
- break;
- }
-
- case SHORT_IMMEDIATE_INSTR: {
- byte* addr =
- reinterpret_cast<byte*>(*reinterpret_cast<int32_t*>(data + 1));
- AppendToBuffer("%s rax, %s", idesc.mnem, NameOfAddress(addr));
- data += 5;
- break;
- }
-
- case NO_INSTR:
- processed = false;
- break;
-
- default:
- UNIMPLEMENTED(); // This type is not implemented.
- }
-
- // The first byte didn't match any of the simple opcodes, so we
- // need to do special processing on it.
- if (!processed) {
- switch (*data) {
- case 0xC2:
- AppendToBuffer("ret 0x%x", *reinterpret_cast<uint16_t*>(data + 1));
- data += 3;
- break;
-
- case 0x69: // fall through
- case 0x6B: {
- int mod, regop, rm;
- get_modrm(*(data + 1), &mod, &regop, &rm);
- int32_t imm = *data == 0x6B ? *(data + 2)
- : *reinterpret_cast<int32_t*>(data + 2);
- AppendToBuffer("imul%c %s,%s,0x%x",
- operand_size_code(),
- NameOfCPURegister(regop),
- NameOfCPURegister(rm), imm);
- data += 2 + (*data == 0x6B ? 1 : 4);
- break;
- }
-
- case 0x81: // fall through
- case 0x83: // 0x81 with sign extension bit set
- data += PrintImmediateOp(data);
- break;
-
- case 0x0F:
- data += TwoByteOpcodeInstruction(data);
- break;
-
- case 0x8F: {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- if (regop == 0) {
- AppendToBuffer("pop ");
- data += PrintRightOperand(data);
- }
- }
- break;
-
- case 0xFF: {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- const char* mnem = NULL;
- switch (regop) {
- case 0:
- mnem = "inc";
- break;
- case 1:
- mnem = "dec";
- break;
- case 2:
- mnem = "call";
- break;
- case 4:
- mnem = "jmp";
- break;
- case 6:
- mnem = "push";
- break;
- default:
- mnem = "???";
- }
- AppendToBuffer(((regop <= 1) ? "%s%c " : "%s "),
- mnem,
- operand_size_code());
- data += PrintRightOperand(data);
- }
- break;
-
- case 0xC7: // imm32, fall through
- case 0xC6: // imm8
- {
- bool is_byte = *data == 0xC6;
- data++;
- if (is_byte) {
- AppendToBuffer("movb ");
- data += PrintRightByteOperand(data);
- int32_t imm = *data;
- AppendToBuffer(",0x%x", imm);
- data++;
- } else {
- AppendToBuffer("mov%c ", operand_size_code());
- data += PrintRightOperand(data);
- int32_t imm = *reinterpret_cast<int32_t*>(data);
- AppendToBuffer(",0x%x", imm);
- data += 4;
- }
- }
- break;
-
- case 0x80: {
- data++;
- AppendToBuffer("cmpb ");
- data += PrintRightByteOperand(data);
- int32_t imm = *data;
- AppendToBuffer(",0x%x", imm);
- data++;
- }
- break;
-
- case 0x88: // 8bit, fall through
- case 0x89: // 32bit
- {
- bool is_byte = *data == 0x88;
- int mod, regop, rm;
- data++;
- get_modrm(*data, &mod, &regop, &rm);
- if (is_byte) {
- AppendToBuffer("movb ");
- data += PrintRightByteOperand(data);
- AppendToBuffer(",%s", NameOfByteCPURegister(regop));
- } else {
- AppendToBuffer("mov%c ", operand_size_code());
- data += PrintRightOperand(data);
- AppendToBuffer(",%s", NameOfCPURegister(regop));
- }
- }
- break;
-
- case 0x90:
- case 0x91:
- case 0x92:
- case 0x93:
- case 0x94:
- case 0x95:
- case 0x96:
- case 0x97: {
- int reg = (*data & 0x7) | (rex_b() ? 8 : 0);
- if (reg == 0) {
- AppendToBuffer("nop"); // Common name for xchg rax,rax.
- } else {
- AppendToBuffer("xchg%c rax, %s",
- operand_size_code(),
- NameOfCPURegister(reg));
- }
- data++;
- }
- break;
-
- case 0xFE: {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- if (regop == 1) {
- AppendToBuffer("decb ");
- data += PrintRightByteOperand(data);
- } else {
- UnimplementedInstruction();
- }
- }
- break;
-
- case 0x68:
- AppendToBuffer("push 0x%x", *reinterpret_cast<int32_t*>(data + 1));
- data += 5;
- break;
-
- case 0x6A:
- AppendToBuffer("push 0x%x", *reinterpret_cast<int8_t*>(data + 1));
- data += 2;
- break;
-
- case 0xA1: // Fall through.
- case 0xA3:
- switch (operand_size()) {
- case DOUBLEWORD_SIZE: {
- const char* memory_location = NameOfAddress(
- reinterpret_cast<byte*>(
- *reinterpret_cast<int32_t*>(data + 1)));
- if (*data == 0xA1) { // Opcode 0xA1
- AppendToBuffer("movzxlq rax,(%s)", memory_location);
- } else { // Opcode 0xA3
- AppendToBuffer("movzxlq (%s),rax", memory_location);
- }
- data += 5;
- break;
- }
- case QUADWORD_SIZE: {
- // New x64 instruction mov rax,(imm_64).
- const char* memory_location = NameOfAddress(
- *reinterpret_cast<byte**>(data + 1));
- if (*data == 0xA1) { // Opcode 0xA1
- AppendToBuffer("movq rax,(%s)", memory_location);
- } else { // Opcode 0xA3
- AppendToBuffer("movq (%s),rax", memory_location);
- }
- data += 9;
- break;
- }
- default:
- UnimplementedInstruction();
- data += 2;
- }
- break;
-
- case 0xA8:
- AppendToBuffer("test al,0x%x", *reinterpret_cast<uint8_t*>(data + 1));
- data += 2;
- break;
-
- case 0xA9: {
- int64_t value = 0;
- switch (operand_size()) {
- case WORD_SIZE:
- value = *reinterpret_cast<uint16_t*>(data + 1);
- data += 3;
- break;
- case DOUBLEWORD_SIZE:
- value = *reinterpret_cast<uint32_t*>(data + 1);
- data += 5;
- break;
- case QUADWORD_SIZE:
- value = *reinterpret_cast<int32_t*>(data + 1);
- data += 5;
- break;
- default:
- UNREACHABLE();
- }
- AppendToBuffer("test%c rax,0x%"V8_PTR_PREFIX"x",
- operand_size_code(),
- value);
- break;
- }
- case 0xD1: // fall through
- case 0xD3: // fall through
- case 0xC1:
- data += ShiftInstruction(data);
- break;
- case 0xD0: // fall through
- case 0xD2: // fall through
- case 0xC0:
- byte_size_operand_ = true;
- data += ShiftInstruction(data);
- break;
-
- case 0xD9: // fall through
- case 0xDA: // fall through
- case 0xDB: // fall through
- case 0xDC: // fall through
- case 0xDD: // fall through
- case 0xDE: // fall through
- case 0xDF:
- data += FPUInstruction(data);
- break;
-
- case 0xEB:
- data += JumpShort(data);
- break;
-
- case 0xF6:
- byte_size_operand_ = true; // fall through
- case 0xF7:
- data += F6F7Instruction(data);
- break;
-
- default:
- UnimplementedInstruction();
- data += 1;
- }
- } // !processed
-
- if (tmp_buffer_pos_ < sizeof tmp_buffer_) {
- tmp_buffer_[tmp_buffer_pos_] = '\0';
- }
-
- int instr_len = static_cast<int>(data - instr);
- ASSERT(instr_len > 0); // Ensure progress.
-
- int outp = 0;
- // Instruction bytes.
- for (byte* bp = instr; bp < data; bp++) {
- outp += v8::internal::OS::SNPrintF(out_buffer + outp, "%02x", *bp);
- }
- for (int i = 6 - instr_len; i >= 0; i--) {
- outp += v8::internal::OS::SNPrintF(out_buffer + outp, " ");
- }
-
- outp += v8::internal::OS::SNPrintF(out_buffer + outp, " %s",
- tmp_buffer_.start());
- return instr_len;
-}
-
-//------------------------------------------------------------------------------
-
-
-static const char* cpu_regs[16] = {
- "rax", "rcx", "rdx", "rbx", "rsp", "rbp", "rsi", "rdi",
- "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
-};
-
-
-static const char* byte_cpu_regs[16] = {
- "al", "cl", "dl", "bl", "spl", "bpl", "sil", "dil",
- "r8l", "r9l", "r10l", "r11l", "r12l", "r13l", "r14l", "r15l"
-};
-
-
-static const char* xmm_regs[16] = {
- "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7",
- "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15"
-};
-
-
-const char* NameConverter::NameOfAddress(byte* addr) const {
- v8::internal::OS::SNPrintF(tmp_buffer_, "%p", addr);
- return tmp_buffer_.start();
-}
-
-
-const char* NameConverter::NameOfConstant(byte* addr) const {
- return NameOfAddress(addr);
-}
-
-
-const char* NameConverter::NameOfCPURegister(int reg) const {
- if (0 <= reg && reg < 16)
- return cpu_regs[reg];
- return "noreg";
-}
-
-
-const char* NameConverter::NameOfByteCPURegister(int reg) const {
- if (0 <= reg && reg < 16)
- return byte_cpu_regs[reg];
- return "noreg";
-}
-
-
-const char* NameConverter::NameOfXMMRegister(int reg) const {
- if (0 <= reg && reg < 16)
- return xmm_regs[reg];
- return "noxmmreg";
-}
-
-
-const char* NameConverter::NameInCode(byte* addr) const {
- // X64 does not embed debug strings at the moment.
- UNREACHABLE();
- return "";
-}
-
-//------------------------------------------------------------------------------
-
-Disassembler::Disassembler(const NameConverter& converter)
- : converter_(converter) { }
-
-Disassembler::~Disassembler() { }
-
-
-int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer,
- byte* instruction) {
- DisassemblerX64 d(converter_, CONTINUE_ON_UNIMPLEMENTED_OPCODE);
- return d.InstructionDecode(buffer, instruction);
-}
-
-
-// The X64 assembler does not use constant pools.
-int Disassembler::ConstantPoolSizeAt(byte* instruction) {
- return -1;
-}
-
-
-void Disassembler::Disassemble(FILE* f, byte* begin, byte* end) {
- NameConverter converter;
- Disassembler d(converter);
- for (byte* pc = begin; pc < end;) {
- v8::internal::EmbeddedVector<char, 128> buffer;
- buffer[0] = '\0';
- byte* prev_pc = pc;
- pc += d.InstructionDecode(buffer, pc);
- fprintf(f, "%p", prev_pc);
- fprintf(f, " ");
-
- for (byte* bp = prev_pc; bp < pc; bp++) {
- fprintf(f, "%02x", *bp);
- }
- for (int i = 6 - static_cast<int>(pc - prev_pc); i >= 0; i--) {
- fprintf(f, " ");
- }
- fprintf(f, " %s\n", buffer.start());
- }
-}
-
-} // namespace disasm
-
-#endif // V8_TARGET_ARCH_X64
diff --git a/src/3rdparty/v8/src/x64/frames-x64.cc b/src/3rdparty/v8/src/x64/frames-x64.cc
deleted file mode 100644
index 6c58bc9..0000000
--- a/src/3rdparty/v8/src/x64/frames-x64.cc
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_X64)
-
-#include "frames-inl.h"
-
-namespace v8 {
-namespace internal {
-
-
-Address ExitFrame::ComputeStackPointer(Address fp) {
- return Memory::Address_at(fp + ExitFrameConstants::kSPOffset);
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_X64
diff --git a/src/3rdparty/v8/src/x64/frames-x64.h b/src/3rdparty/v8/src/x64/frames-x64.h
deleted file mode 100644
index b14267c..0000000
--- a/src/3rdparty/v8/src/x64/frames-x64.h
+++ /dev/null
@@ -1,130 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_X64_FRAMES_X64_H_
-#define V8_X64_FRAMES_X64_H_
-
-namespace v8 {
-namespace internal {
-
-static const int kNumRegs = 16;
-static const RegList kJSCallerSaved =
- 1 << 0 | // rax
- 1 << 1 | // rcx
- 1 << 2 | // rdx
- 1 << 3 | // rbx - used as a caller-saved register in JavaScript code
- 1 << 7; // rdi - callee function
-
-static const int kNumJSCallerSaved = 5;
-
-typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved];
-
-// Number of registers for which space is reserved in safepoints.
-static const int kNumSafepointRegisters = 16;
-
-// ----------------------------------------------------
-
-class StackHandlerConstants : public AllStatic {
- public:
- static const int kNextOffset = 0 * kPointerSize;
- static const int kFPOffset = 1 * kPointerSize;
- static const int kStateOffset = 2 * kPointerSize;
- static const int kPCOffset = 3 * kPointerSize;
-
- static const int kSize = 4 * kPointerSize;
-};
-
-
-class EntryFrameConstants : public AllStatic {
- public:
-#ifdef _WIN64
- static const int kCallerFPOffset = -10 * kPointerSize;
-#else
- static const int kCallerFPOffset = -8 * kPointerSize;
-#endif
- static const int kArgvOffset = 6 * kPointerSize;
-};
-
-
-class ExitFrameConstants : public AllStatic {
- public:
- static const int kCodeOffset = -2 * kPointerSize;
- static const int kSPOffset = -1 * kPointerSize;
-
- static const int kCallerFPOffset = +0 * kPointerSize;
- static const int kCallerPCOffset = +1 * kPointerSize;
-
- // FP-relative displacement of the caller's SP. It points just
- // below the saved PC.
- static const int kCallerSPDisplacement = +2 * kPointerSize;
-};
-
-
-class StandardFrameConstants : public AllStatic {
- public:
- static const int kExpressionsOffset = -3 * kPointerSize;
- static const int kMarkerOffset = -2 * kPointerSize;
- static const int kContextOffset = -1 * kPointerSize;
- static const int kCallerFPOffset = 0 * kPointerSize;
- static const int kCallerPCOffset = +1 * kPointerSize;
- static const int kCallerSPOffset = +2 * kPointerSize;
-};
-
-
-class JavaScriptFrameConstants : public AllStatic {
- public:
- // FP-relative.
- static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
- static const int kLastParameterOffset = +2 * kPointerSize;
- static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
-
- // Caller SP-relative.
- static const int kParam0Offset = -2 * kPointerSize;
- static const int kReceiverOffset = -1 * kPointerSize;
-};
-
-
-class ArgumentsAdaptorFrameConstants : public AllStatic {
- public:
- static const int kLengthOffset = StandardFrameConstants::kExpressionsOffset;
-};
-
-
-class InternalFrameConstants : public AllStatic {
- public:
- static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
-};
-
-
-inline Object* JavaScriptFrame::function_slot_object() const {
- const int offset = JavaScriptFrameConstants::kFunctionOffset;
- return Memory::Object_at(fp() + offset);
-}
-
-} } // namespace v8::internal
-
-#endif // V8_X64_FRAMES_X64_H_
diff --git a/src/3rdparty/v8/src/x64/full-codegen-x64.cc b/src/3rdparty/v8/src/x64/full-codegen-x64.cc
deleted file mode 100644
index 4bf84a8..0000000
--- a/src/3rdparty/v8/src/x64/full-codegen-x64.cc
+++ /dev/null
@@ -1,4339 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_X64)
-
-#include "code-stubs.h"
-#include "codegen-inl.h"
-#include "compiler.h"
-#include "debug.h"
-#include "full-codegen.h"
-#include "parser.h"
-#include "scopes.h"
-#include "stub-cache.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm_)
-
-
-class JumpPatchSite BASE_EMBEDDED {
- public:
- explicit JumpPatchSite(MacroAssembler* masm)
- : masm_(masm) {
-#ifdef DEBUG
- info_emitted_ = false;
-#endif
- }
-
- ~JumpPatchSite() {
- ASSERT(patch_site_.is_bound() == info_emitted_);
- }
-
- void EmitJumpIfNotSmi(Register reg, NearLabel* target) {
- __ testb(reg, Immediate(kSmiTagMask));
- EmitJump(not_carry, target); // Always taken before patched.
- }
-
- void EmitJumpIfSmi(Register reg, NearLabel* target) {
- __ testb(reg, Immediate(kSmiTagMask));
- EmitJump(carry, target); // Never taken before patched.
- }
-
- void EmitPatchInfo() {
- int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(&patch_site_);
- ASSERT(is_int8(delta_to_patch_site));
- __ testl(rax, Immediate(delta_to_patch_site));
-#ifdef DEBUG
- info_emitted_ = true;
-#endif
- }
-
- bool is_bound() const { return patch_site_.is_bound(); }
-
- private:
- // jc will be patched with jz, jnc will become jnz.
- void EmitJump(Condition cc, NearLabel* target) {
- ASSERT(!patch_site_.is_bound() && !info_emitted_);
- ASSERT(cc == carry || cc == not_carry);
- __ bind(&patch_site_);
- __ j(cc, target);
- }
-
- MacroAssembler* masm_;
- Label patch_site_;
-#ifdef DEBUG
- bool info_emitted_;
-#endif
-};
-
-
-// Generate code for a JS function. On entry to the function the receiver
-// and arguments have been pushed on the stack left to right, with the
-// return address on top of them. The actual argument count matches the
-// formal parameter count expected by the function.
-//
-// The live registers are:
-// o rdi: the JS function object being called (ie, ourselves)
-// o rsi: our context
-// o rbp: our caller's frame pointer
-// o rsp: stack pointer (pointing to return address)
-//
-// The function builds a JS frame. Please see JavaScriptFrameConstants in
-// frames-x64.h for its layout.
-void FullCodeGenerator::Generate(CompilationInfo* info) {
- ASSERT(info_ == NULL);
- info_ = info;
- SetFunctionPosition(function());
- Comment cmnt(masm_, "[ function compiled by full code generator");
-
-#ifdef DEBUG
- if (strlen(FLAG_stop_at) > 0 &&
- info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
- __ int3();
- }
-#endif
- __ push(rbp); // Caller's frame pointer.
- __ movq(rbp, rsp);
- __ push(rsi); // Callee's context.
- __ push(rdi); // Callee's JS Function.
-
- { Comment cmnt(masm_, "[ Allocate locals");
- int locals_count = scope()->num_stack_slots();
- if (locals_count == 1) {
- __ PushRoot(Heap::kUndefinedValueRootIndex);
- } else if (locals_count > 1) {
- __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
- for (int i = 0; i < locals_count; i++) {
- __ push(rdx);
- }
- }
- }
-
- bool function_in_register = true;
-
- // Possibly allocate a local context.
- int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
- if (heap_slots > 0) {
- Comment cmnt(masm_, "[ Allocate local context");
- // Argument to NewContext is the function, which is still in rdi.
- __ push(rdi);
- if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(heap_slots);
- __ CallStub(&stub);
- } else {
- __ CallRuntime(Runtime::kNewContext, 1);
- }
- function_in_register = false;
- // Context is returned in both rax and rsi. It replaces the context
- // passed to us. It's saved in the stack and kept live in rsi.
- __ movq(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
-
- // Copy any necessary parameters into the context.
- int num_parameters = scope()->num_parameters();
- for (int i = 0; i < num_parameters; i++) {
- Slot* slot = scope()->parameter(i)->AsSlot();
- if (slot != NULL && slot->type() == Slot::CONTEXT) {
- int parameter_offset = StandardFrameConstants::kCallerSPOffset +
- (num_parameters - 1 - i) * kPointerSize;
- // Load parameter from stack.
- __ movq(rax, Operand(rbp, parameter_offset));
- // Store it in the context.
- int context_offset = Context::SlotOffset(slot->index());
- __ movq(Operand(rsi, context_offset), rax);
- // Update the write barrier. This clobbers all involved
- // registers, so we have use a third register to avoid
- // clobbering rsi.
- __ movq(rcx, rsi);
- __ RecordWrite(rcx, context_offset, rax, rbx);
- }
- }
- }
-
- // Possibly allocate an arguments object.
- Variable* arguments = scope()->arguments();
- if (arguments != NULL) {
- // Arguments object must be allocated after the context object, in
- // case the "arguments" or ".arguments" variables are in the context.
- Comment cmnt(masm_, "[ Allocate arguments object");
- if (function_in_register) {
- __ push(rdi);
- } else {
- __ push(Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- }
- // The receiver is just before the parameters on the caller's stack.
- int offset = scope()->num_parameters() * kPointerSize;
- __ lea(rdx,
- Operand(rbp, StandardFrameConstants::kCallerSPOffset + offset));
- __ push(rdx);
- __ Push(Smi::FromInt(scope()->num_parameters()));
- // Arguments to ArgumentsAccessStub:
- // function, receiver address, parameter count.
- // The stub will rewrite receiver and parameter count if the previous
- // stack frame was an arguments adapter frame.
- ArgumentsAccessStub stub(
- is_strict_mode() ? ArgumentsAccessStub::NEW_STRICT
- : ArgumentsAccessStub::NEW_NON_STRICT);
- __ CallStub(&stub);
-
- Variable* arguments_shadow = scope()->arguments_shadow();
- if (arguments_shadow != NULL) {
- // Store new arguments object in both "arguments" and ".arguments" slots.
- __ movq(rcx, rax);
- Move(arguments_shadow->AsSlot(), rcx, rbx, rdx);
- }
- Move(arguments->AsSlot(), rax, rbx, rdx);
- }
-
- if (FLAG_trace) {
- __ CallRuntime(Runtime::kTraceEnter, 0);
- }
-
- // Visit the declarations and body unless there is an illegal
- // redeclaration.
- if (scope()->HasIllegalRedeclaration()) {
- Comment cmnt(masm_, "[ Declarations");
- scope()->VisitIllegalRedeclaration(this);
- } else {
- { Comment cmnt(masm_, "[ Declarations");
- // For named function expressions, declare the function name as a
- // constant.
- if (scope()->is_function_scope() && scope()->function() != NULL) {
- EmitDeclaration(scope()->function(), Variable::CONST, NULL);
- }
- VisitDeclarations(scope()->declarations());
- }
-
- { Comment cmnt(masm_, "[ Stack check");
- PrepareForBailout(info->function(), NO_REGISTERS);
- NearLabel ok;
- __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
- __ j(above_equal, &ok);
- StackCheckStub stub;
- __ CallStub(&stub);
- __ bind(&ok);
- }
-
- { Comment cmnt(masm_, "[ Body");
- ASSERT(loop_depth() == 0);
- VisitStatements(function()->body());
- ASSERT(loop_depth() == 0);
- }
- }
-
- // Always emit a 'return undefined' in case control fell off the end of
- // the body.
- { Comment cmnt(masm_, "[ return <undefined>;");
- __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
- EmitReturnSequence();
- }
-}
-
-
-void FullCodeGenerator::ClearAccumulator() {
- __ Set(rax, 0);
-}
-
-
-void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt) {
- Comment cmnt(masm_, "[ Stack check");
- NearLabel ok;
- __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
- __ j(above_equal, &ok);
- StackCheckStub stub;
- __ CallStub(&stub);
- // Record a mapping of this PC offset to the OSR id. This is used to find
- // the AST id from the unoptimized code in order to use it as a key into
- // the deoptimization input data found in the optimized code.
- RecordStackCheck(stmt->OsrEntryId());
-
- // Loop stack checks can be patched to perform on-stack replacement. In
- // order to decide whether or not to perform OSR we embed the loop depth
- // in a test instruction after the call so we can extract it from the OSR
- // builtin.
- ASSERT(loop_depth() > 0);
- __ testl(rax, Immediate(Min(loop_depth(), Code::kMaxLoopNestingMarker)));
-
- __ bind(&ok);
- PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
- // Record a mapping of the OSR id to this PC. This is used if the OSR
- // entry becomes the target of a bailout. We don't expect it to be, but
- // we want it to work if it is.
- PrepareForBailoutForId(stmt->OsrEntryId(), NO_REGISTERS);
-}
-
-
-void FullCodeGenerator::EmitReturnSequence() {
- Comment cmnt(masm_, "[ Return sequence");
- if (return_label_.is_bound()) {
- __ jmp(&return_label_);
- } else {
- __ bind(&return_label_);
- if (FLAG_trace) {
- __ push(rax);
- __ CallRuntime(Runtime::kTraceExit, 1);
- }
-#ifdef DEBUG
- // Add a label for checking the size of the code used for returning.
- Label check_exit_codesize;
- masm_->bind(&check_exit_codesize);
-#endif
- CodeGenerator::RecordPositions(masm_, function()->end_position() - 1);
- __ RecordJSReturn();
- // Do not use the leave instruction here because it is too short to
- // patch with the code required by the debugger.
- __ movq(rsp, rbp);
- __ pop(rbp);
-
- int arguments_bytes = (scope()->num_parameters() + 1) * kPointerSize;
- __ Ret(arguments_bytes, rcx);
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Add padding that will be overwritten by a debugger breakpoint. We
- // have just generated at least 7 bytes: "movq rsp, rbp; pop rbp; ret k"
- // (3 + 1 + 3).
- const int kPadding = Assembler::kJSReturnSequenceLength - 7;
- for (int i = 0; i < kPadding; ++i) {
- masm_->int3();
- }
- // Check that the size of the code used for returning is large enough
- // for the debugger's requirements.
- ASSERT(Assembler::kJSReturnSequenceLength <=
- masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
-#endif
- }
-}
-
-
-void FullCodeGenerator::EffectContext::Plug(Slot* slot) const {
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::Plug(Slot* slot) const {
- MemOperand slot_operand = codegen()->EmitSlotSearch(slot, result_register());
- __ movq(result_register(), slot_operand);
-}
-
-
-void FullCodeGenerator::StackValueContext::Plug(Slot* slot) const {
- MemOperand slot_operand = codegen()->EmitSlotSearch(slot, result_register());
- __ push(slot_operand);
-}
-
-
-void FullCodeGenerator::TestContext::Plug(Slot* slot) const {
- codegen()->Move(result_register(), slot);
- codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
- codegen()->DoTest(true_label_, false_label_, fall_through_);
-}
-
-
-void FullCodeGenerator::EffectContext::Plug(Heap::RootListIndex index) const {
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::Plug(
- Heap::RootListIndex index) const {
- __ LoadRoot(result_register(), index);
-}
-
-
-void FullCodeGenerator::StackValueContext::Plug(
- Heap::RootListIndex index) const {
- __ PushRoot(index);
-}
-
-
-void FullCodeGenerator::TestContext::Plug(Heap::RootListIndex index) const {
- codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
- true,
- true_label_,
- false_label_);
- if (index == Heap::kUndefinedValueRootIndex ||
- index == Heap::kNullValueRootIndex ||
- index == Heap::kFalseValueRootIndex) {
- if (false_label_ != fall_through_) __ jmp(false_label_);
- } else if (index == Heap::kTrueValueRootIndex) {
- if (true_label_ != fall_through_) __ jmp(true_label_);
- } else {
- __ LoadRoot(result_register(), index);
- codegen()->DoTest(true_label_, false_label_, fall_through_);
- }
-}
-
-
-void FullCodeGenerator::EffectContext::Plug(Handle<Object> lit) const {
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::Plug(
- Handle<Object> lit) const {
- __ Move(result_register(), lit);
-}
-
-
-void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
- __ Push(lit);
-}
-
-
-void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
- codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
- true,
- true_label_,
- false_label_);
- ASSERT(!lit->IsUndetectableObject()); // There are no undetectable literals.
- if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
- if (false_label_ != fall_through_) __ jmp(false_label_);
- } else if (lit->IsTrue() || lit->IsJSObject()) {
- if (true_label_ != fall_through_) __ jmp(true_label_);
- } else if (lit->IsString()) {
- if (String::cast(*lit)->length() == 0) {
- if (false_label_ != fall_through_) __ jmp(false_label_);
- } else {
- if (true_label_ != fall_through_) __ jmp(true_label_);
- }
- } else if (lit->IsSmi()) {
- if (Smi::cast(*lit)->value() == 0) {
- if (false_label_ != fall_through_) __ jmp(false_label_);
- } else {
- if (true_label_ != fall_through_) __ jmp(true_label_);
- }
- } else {
- // For simplicity we always test the accumulator register.
- __ Move(result_register(), lit);
- codegen()->DoTest(true_label_, false_label_, fall_through_);
- }
-}
-
-
-void FullCodeGenerator::EffectContext::DropAndPlug(int count,
- Register reg) const {
- ASSERT(count > 0);
- __ Drop(count);
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::DropAndPlug(
- int count,
- Register reg) const {
- ASSERT(count > 0);
- __ Drop(count);
- __ Move(result_register(), reg);
-}
-
-
-void FullCodeGenerator::StackValueContext::DropAndPlug(int count,
- Register reg) const {
- ASSERT(count > 0);
- if (count > 1) __ Drop(count - 1);
- __ movq(Operand(rsp, 0), reg);
-}
-
-
-void FullCodeGenerator::TestContext::DropAndPlug(int count,
- Register reg) const {
- ASSERT(count > 0);
- // For simplicity we always test the accumulator register.
- __ Drop(count);
- __ Move(result_register(), reg);
- codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
- codegen()->DoTest(true_label_, false_label_, fall_through_);
-}
-
-
-void FullCodeGenerator::EffectContext::Plug(Label* materialize_true,
- Label* materialize_false) const {
- ASSERT(materialize_true == materialize_false);
- __ bind(materialize_true);
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::Plug(
- Label* materialize_true,
- Label* materialize_false) const {
- NearLabel done;
- __ bind(materialize_true);
- __ Move(result_register(), isolate()->factory()->true_value());
- __ jmp(&done);
- __ bind(materialize_false);
- __ Move(result_register(), isolate()->factory()->false_value());
- __ bind(&done);
-}
-
-
-void FullCodeGenerator::StackValueContext::Plug(
- Label* materialize_true,
- Label* materialize_false) const {
- NearLabel done;
- __ bind(materialize_true);
- __ Push(isolate()->factory()->true_value());
- __ jmp(&done);
- __ bind(materialize_false);
- __ Push(isolate()->factory()->false_value());
- __ bind(&done);
-}
-
-
-void FullCodeGenerator::TestContext::Plug(Label* materialize_true,
- Label* materialize_false) const {
- ASSERT(materialize_true == true_label_);
- ASSERT(materialize_false == false_label_);
-}
-
-
-void FullCodeGenerator::EffectContext::Plug(bool flag) const {
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::Plug(bool flag) const {
- Heap::RootListIndex value_root_index =
- flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex;
- __ LoadRoot(result_register(), value_root_index);
-}
-
-
-void FullCodeGenerator::StackValueContext::Plug(bool flag) const {
- Heap::RootListIndex value_root_index =
- flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex;
- __ PushRoot(value_root_index);
-}
-
-
-void FullCodeGenerator::TestContext::Plug(bool flag) const {
- codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
- true,
- true_label_,
- false_label_);
- if (flag) {
- if (true_label_ != fall_through_) __ jmp(true_label_);
- } else {
- if (false_label_ != fall_through_) __ jmp(false_label_);
- }
-}
-
-
-void FullCodeGenerator::DoTest(Label* if_true,
- Label* if_false,
- Label* fall_through) {
- // Emit the inlined tests assumed by the stub.
- __ CompareRoot(result_register(), Heap::kUndefinedValueRootIndex);
- __ j(equal, if_false);
- __ CompareRoot(result_register(), Heap::kTrueValueRootIndex);
- __ j(equal, if_true);
- __ CompareRoot(result_register(), Heap::kFalseValueRootIndex);
- __ j(equal, if_false);
- STATIC_ASSERT(kSmiTag == 0);
- __ Cmp(result_register(), Smi::FromInt(0));
- __ j(equal, if_false);
- Condition is_smi = masm_->CheckSmi(result_register());
- __ j(is_smi, if_true);
-
- // Call the ToBoolean stub for all other cases.
- ToBooleanStub stub;
- __ push(result_register());
- __ CallStub(&stub);
- __ testq(rax, rax);
-
- // The stub returns nonzero for true.
- Split(not_zero, if_true, if_false, fall_through);
-}
-
-
-void FullCodeGenerator::Split(Condition cc,
- Label* if_true,
- Label* if_false,
- Label* fall_through) {
- if (if_false == fall_through) {
- __ j(cc, if_true);
- } else if (if_true == fall_through) {
- __ j(NegateCondition(cc), if_false);
- } else {
- __ j(cc, if_true);
- __ jmp(if_false);
- }
-}
-
-
-MemOperand FullCodeGenerator::EmitSlotSearch(Slot* slot, Register scratch) {
- switch (slot->type()) {
- case Slot::PARAMETER:
- case Slot::LOCAL:
- return Operand(rbp, SlotOffset(slot));
- case Slot::CONTEXT: {
- int context_chain_length =
- scope()->ContextChainLength(slot->var()->scope());
- __ LoadContext(scratch, context_chain_length);
- return ContextOperand(scratch, slot->index());
- }
- case Slot::LOOKUP:
- UNREACHABLE();
- }
- UNREACHABLE();
- return Operand(rax, 0);
-}
-
-
-void FullCodeGenerator::Move(Register destination, Slot* source) {
- MemOperand location = EmitSlotSearch(source, destination);
- __ movq(destination, location);
-}
-
-
-void FullCodeGenerator::Move(Slot* dst,
- Register src,
- Register scratch1,
- Register scratch2) {
- ASSERT(dst->type() != Slot::LOOKUP); // Not yet implemented.
- ASSERT(!scratch1.is(src) && !scratch2.is(src));
- MemOperand location = EmitSlotSearch(dst, scratch1);
- __ movq(location, src);
- // Emit the write barrier code if the location is in the heap.
- if (dst->type() == Slot::CONTEXT) {
- int offset = FixedArray::kHeaderSize + dst->index() * kPointerSize;
- __ RecordWrite(scratch1, offset, src, scratch2);
- }
-}
-
-
-void FullCodeGenerator::PrepareForBailoutBeforeSplit(State state,
- bool should_normalize,
- Label* if_true,
- Label* if_false) {
- // Only prepare for bailouts before splits if we're in a test
- // context. Otherwise, we let the Visit function deal with the
- // preparation to avoid preparing with the same AST id twice.
- if (!context()->IsTest() || !info_->IsOptimizable()) return;
-
- NearLabel skip;
- if (should_normalize) __ jmp(&skip);
-
- ForwardBailoutStack* current = forward_bailout_stack_;
- while (current != NULL) {
- PrepareForBailout(current->expr(), state);
- current = current->parent();
- }
-
- if (should_normalize) {
- __ CompareRoot(rax, Heap::kTrueValueRootIndex);
- Split(equal, if_true, if_false, NULL);
- __ bind(&skip);
- }
-}
-
-
-void FullCodeGenerator::EmitDeclaration(Variable* variable,
- Variable::Mode mode,
- FunctionLiteral* function) {
- Comment cmnt(masm_, "[ Declaration");
- ASSERT(variable != NULL); // Must have been resolved.
- Slot* slot = variable->AsSlot();
- Property* prop = variable->AsProperty();
-
- if (slot != NULL) {
- switch (slot->type()) {
- case Slot::PARAMETER:
- case Slot::LOCAL:
- if (mode == Variable::CONST) {
- __ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
- __ movq(Operand(rbp, SlotOffset(slot)), kScratchRegister);
- } else if (function != NULL) {
- VisitForAccumulatorValue(function);
- __ movq(Operand(rbp, SlotOffset(slot)), result_register());
- }
- break;
-
- case Slot::CONTEXT:
- // We bypass the general EmitSlotSearch because we know more about
- // this specific context.
-
- // The variable in the decl always resides in the current context.
- ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
- if (FLAG_debug_code) {
- // Check if we have the correct context pointer.
- __ movq(rbx, ContextOperand(rsi, Context::FCONTEXT_INDEX));
- __ cmpq(rbx, rsi);
- __ Check(equal, "Unexpected declaration in current context.");
- }
- if (mode == Variable::CONST) {
- __ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
- __ movq(ContextOperand(rsi, slot->index()), kScratchRegister);
- // No write barrier since the hole value is in old space.
- } else if (function != NULL) {
- VisitForAccumulatorValue(function);
- __ movq(ContextOperand(rsi, slot->index()), result_register());
- int offset = Context::SlotOffset(slot->index());
- __ movq(rbx, rsi);
- __ RecordWrite(rbx, offset, result_register(), rcx);
- }
- break;
-
- case Slot::LOOKUP: {
- __ push(rsi);
- __ Push(variable->name());
- // Declaration nodes are always introduced in one of two modes.
- ASSERT(mode == Variable::VAR || mode == Variable::CONST);
- PropertyAttributes attr = (mode == Variable::VAR) ? NONE : READ_ONLY;
- __ Push(Smi::FromInt(attr));
- // Push initial value, if any.
- // Note: For variables we must not push an initial value (such as
- // 'undefined') because we may have a (legal) redeclaration and we
- // must not destroy the current value.
- if (mode == Variable::CONST) {
- __ PushRoot(Heap::kTheHoleValueRootIndex);
- } else if (function != NULL) {
- VisitForStackValue(function);
- } else {
- __ Push(Smi::FromInt(0)); // no initial value!
- }
- __ CallRuntime(Runtime::kDeclareContextSlot, 4);
- break;
- }
- }
-
- } else if (prop != NULL) {
- if (function != NULL || mode == Variable::CONST) {
- // We are declaring a function or constant that rewrites to a
- // property. Use (keyed) IC to set the initial value. We
- // cannot visit the rewrite because it's shared and we risk
- // recording duplicate AST IDs for bailouts from optimized code.
- ASSERT(prop->obj()->AsVariableProxy() != NULL);
- { AccumulatorValueContext for_object(this);
- EmitVariableLoad(prop->obj()->AsVariableProxy()->var());
- }
- if (function != NULL) {
- __ push(rax);
- VisitForAccumulatorValue(function);
- __ pop(rdx);
- } else {
- __ movq(rdx, rax);
- __ LoadRoot(rax, Heap::kTheHoleValueRootIndex);
- }
- ASSERT(prop->key()->AsLiteral() != NULL &&
- prop->key()->AsLiteral()->handle()->IsSmi());
- __ Move(rcx, prop->key()->AsLiteral()->handle());
-
- Handle<Code> ic = is_strict_mode()
- ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
- : isolate()->builtins()->KeyedStoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
- }
- }
-}
-
-
-void FullCodeGenerator::VisitDeclaration(Declaration* decl) {
- EmitDeclaration(decl->proxy()->var(), decl->mode(), decl->fun());
-}
-
-
-void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
- // Call the runtime to declare the globals.
- __ push(rsi); // The context is the first argument.
- __ Push(pairs);
- __ Push(Smi::FromInt(is_eval() ? 1 : 0));
- __ Push(Smi::FromInt(strict_mode_flag()));
- __ CallRuntime(Runtime::kDeclareGlobals, 4);
- // Return value is ignored.
-}
-
-
-void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
- Comment cmnt(masm_, "[ SwitchStatement");
- Breakable nested_statement(this, stmt);
- SetStatementPosition(stmt);
-
- // Keep the switch value on the stack until a case matches.
- VisitForStackValue(stmt->tag());
- PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
-
- ZoneList<CaseClause*>* clauses = stmt->cases();
- CaseClause* default_clause = NULL; // Can occur anywhere in the list.
-
- Label next_test; // Recycled for each test.
- // Compile all the tests with branches to their bodies.
- for (int i = 0; i < clauses->length(); i++) {
- CaseClause* clause = clauses->at(i);
- clause->body_target()->entry_label()->Unuse();
-
- // The default is not a test, but remember it as final fall through.
- if (clause->is_default()) {
- default_clause = clause;
- continue;
- }
-
- Comment cmnt(masm_, "[ Case comparison");
- __ bind(&next_test);
- next_test.Unuse();
-
- // Compile the label expression.
- VisitForAccumulatorValue(clause->label());
-
- // Perform the comparison as if via '==='.
- __ movq(rdx, Operand(rsp, 0)); // Switch value.
- bool inline_smi_code = ShouldInlineSmiCase(Token::EQ_STRICT);
- JumpPatchSite patch_site(masm_);
- if (inline_smi_code) {
- NearLabel slow_case;
- __ movq(rcx, rdx);
- __ or_(rcx, rax);
- patch_site.EmitJumpIfNotSmi(rcx, &slow_case);
-
- __ cmpq(rdx, rax);
- __ j(not_equal, &next_test);
- __ Drop(1); // Switch value is no longer needed.
- __ jmp(clause->body_target()->entry_label());
- __ bind(&slow_case);
- }
-
- // Record position before stub call for type feedback.
- SetSourcePosition(clause->position());
- Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT);
- EmitCallIC(ic, &patch_site);
-
- __ testq(rax, rax);
- __ j(not_equal, &next_test);
- __ Drop(1); // Switch value is no longer needed.
- __ jmp(clause->body_target()->entry_label());
- }
-
- // Discard the test value and jump to the default if present, otherwise to
- // the end of the statement.
- __ bind(&next_test);
- __ Drop(1); // Switch value is no longer needed.
- if (default_clause == NULL) {
- __ jmp(nested_statement.break_target());
- } else {
- __ jmp(default_clause->body_target()->entry_label());
- }
-
- // Compile all the case bodies.
- for (int i = 0; i < clauses->length(); i++) {
- Comment cmnt(masm_, "[ Case body");
- CaseClause* clause = clauses->at(i);
- __ bind(clause->body_target()->entry_label());
- PrepareForBailoutForId(clause->EntryId(), NO_REGISTERS);
- VisitStatements(clause->statements());
- }
-
- __ bind(nested_statement.break_target());
- PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
-}
-
-
-void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
- Comment cmnt(masm_, "[ ForInStatement");
- SetStatementPosition(stmt);
-
- Label loop, exit;
- ForIn loop_statement(this, stmt);
- increment_loop_depth();
-
- // Get the object to enumerate over. Both SpiderMonkey and JSC
- // ignore null and undefined in contrast to the specification; see
- // ECMA-262 section 12.6.4.
- VisitForAccumulatorValue(stmt->enumerable());
- __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
- __ j(equal, &exit);
- Register null_value = rdi;
- __ LoadRoot(null_value, Heap::kNullValueRootIndex);
- __ cmpq(rax, null_value);
- __ j(equal, &exit);
-
- // Convert the object to a JS object.
- Label convert, done_convert;
- __ JumpIfSmi(rax, &convert);
- __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
- __ j(above_equal, &done_convert);
- __ bind(&convert);
- __ push(rax);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ bind(&done_convert);
- __ push(rax);
-
- // Check cache validity in generated code. This is a fast case for
- // the JSObject::IsSimpleEnum cache validity checks. If we cannot
- // guarantee cache validity, call the runtime system to check cache
- // validity or get the property names in a fixed array.
- Label next, call_runtime;
- Register empty_fixed_array_value = r8;
- __ LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
- Register empty_descriptor_array_value = r9;
- __ LoadRoot(empty_descriptor_array_value,
- Heap::kEmptyDescriptorArrayRootIndex);
- __ movq(rcx, rax);
- __ bind(&next);
-
- // Check that there are no elements. Register rcx contains the
- // current JS object we've reached through the prototype chain.
- __ cmpq(empty_fixed_array_value,
- FieldOperand(rcx, JSObject::kElementsOffset));
- __ j(not_equal, &call_runtime);
-
- // Check that instance descriptors are not empty so that we can
- // check for an enum cache. Leave the map in rbx for the subsequent
- // prototype load.
- __ movq(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
- __ movq(rdx, FieldOperand(rbx, Map::kInstanceDescriptorsOffset));
- __ cmpq(rdx, empty_descriptor_array_value);
- __ j(equal, &call_runtime);
-
- // Check that there is an enum cache in the non-empty instance
- // descriptors (rdx). This is the case if the next enumeration
- // index field does not contain a smi.
- __ movq(rdx, FieldOperand(rdx, DescriptorArray::kEnumerationIndexOffset));
- __ JumpIfSmi(rdx, &call_runtime);
-
- // For all objects but the receiver, check that the cache is empty.
- NearLabel check_prototype;
- __ cmpq(rcx, rax);
- __ j(equal, &check_prototype);
- __ movq(rdx, FieldOperand(rdx, DescriptorArray::kEnumCacheBridgeCacheOffset));
- __ cmpq(rdx, empty_fixed_array_value);
- __ j(not_equal, &call_runtime);
-
- // Load the prototype from the map and loop if non-null.
- __ bind(&check_prototype);
- __ movq(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
- __ cmpq(rcx, null_value);
- __ j(not_equal, &next);
-
- // The enum cache is valid. Load the map of the object being
- // iterated over and use the cache for the iteration.
- NearLabel use_cache;
- __ movq(rax, FieldOperand(rax, HeapObject::kMapOffset));
- __ jmp(&use_cache);
-
- // Get the set of properties to enumerate.
- __ bind(&call_runtime);
- __ push(rax); // Duplicate the enumerable object on the stack.
- __ CallRuntime(Runtime::kGetPropertyNamesFast, 1);
-
- // If we got a map from the runtime call, we can do a fast
- // modification check. Otherwise, we got a fixed array, and we have
- // to do a slow check.
- NearLabel fixed_array;
- __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
- Heap::kMetaMapRootIndex);
- __ j(not_equal, &fixed_array);
-
- // We got a map in register rax. Get the enumeration cache from it.
- __ bind(&use_cache);
- __ movq(rcx, FieldOperand(rax, Map::kInstanceDescriptorsOffset));
- __ movq(rcx, FieldOperand(rcx, DescriptorArray::kEnumerationIndexOffset));
- __ movq(rdx, FieldOperand(rcx, DescriptorArray::kEnumCacheBridgeCacheOffset));
-
- // Setup the four remaining stack slots.
- __ push(rax); // Map.
- __ push(rdx); // Enumeration cache.
- __ movq(rax, FieldOperand(rdx, FixedArray::kLengthOffset));
- __ push(rax); // Enumeration cache length (as smi).
- __ Push(Smi::FromInt(0)); // Initial index.
- __ jmp(&loop);
-
- // We got a fixed array in register rax. Iterate through that.
- __ bind(&fixed_array);
- __ Push(Smi::FromInt(0)); // Map (0) - force slow check.
- __ push(rax);
- __ movq(rax, FieldOperand(rax, FixedArray::kLengthOffset));
- __ push(rax); // Fixed array length (as smi).
- __ Push(Smi::FromInt(0)); // Initial index.
-
- // Generate code for doing the condition check.
- __ bind(&loop);
- __ movq(rax, Operand(rsp, 0 * kPointerSize)); // Get the current index.
- __ cmpq(rax, Operand(rsp, 1 * kPointerSize)); // Compare to the array length.
- __ j(above_equal, loop_statement.break_target());
-
- // Get the current entry of the array into register rbx.
- __ movq(rbx, Operand(rsp, 2 * kPointerSize));
- SmiIndex index = masm()->SmiToIndex(rax, rax, kPointerSizeLog2);
- __ movq(rbx, FieldOperand(rbx,
- index.reg,
- index.scale,
- FixedArray::kHeaderSize));
-
- // Get the expected map from the stack or a zero map in the
- // permanent slow case into register rdx.
- __ movq(rdx, Operand(rsp, 3 * kPointerSize));
-
- // Check if the expected map still matches that of the enumerable.
- // If not, we have to filter the key.
- NearLabel update_each;
- __ movq(rcx, Operand(rsp, 4 * kPointerSize));
- __ cmpq(rdx, FieldOperand(rcx, HeapObject::kMapOffset));
- __ j(equal, &update_each);
-
- // Convert the entry to a string or null if it isn't a property
- // anymore. If the property has been removed while iterating, we
- // just skip it.
- __ push(rcx); // Enumerable.
- __ push(rbx); // Current entry.
- __ InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION);
- __ Cmp(rax, Smi::FromInt(0));
- __ j(equal, loop_statement.continue_target());
- __ movq(rbx, rax);
-
- // Update the 'each' property or variable from the possibly filtered
- // entry in register rbx.
- __ bind(&update_each);
- __ movq(result_register(), rbx);
- // Perform the assignment as if via '='.
- { EffectContext context(this);
- EmitAssignment(stmt->each(), stmt->AssignmentId());
- }
-
- // Generate code for the body of the loop.
- Visit(stmt->body());
-
- // Generate code for going to the next element by incrementing the
- // index (smi) stored on top of the stack.
- __ bind(loop_statement.continue_target());
- __ SmiAddConstant(Operand(rsp, 0 * kPointerSize), Smi::FromInt(1));
-
- EmitStackCheck(stmt);
- __ jmp(&loop);
-
- // Remove the pointers stored on the stack.
- __ bind(loop_statement.break_target());
- __ addq(rsp, Immediate(5 * kPointerSize));
-
- // Exit and decrement the loop depth.
- __ bind(&exit);
- decrement_loop_depth();
-}
-
-
-void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
- bool pretenure) {
- // Use the fast case closure allocation code that allocates in new
- // space for nested functions that don't need literals cloning. If
- // we're running with the --always-opt or the --prepare-always-opt
- // flag, we need to use the runtime function so that the new function
- // we are creating here gets a chance to have its code optimized and
- // doesn't just get a copy of the existing unoptimized code.
- if (!FLAG_always_opt &&
- !FLAG_prepare_always_opt &&
- !pretenure &&
- scope()->is_function_scope() &&
- info->num_literals() == 0) {
- FastNewClosureStub stub(info->strict_mode() ? kStrictMode : kNonStrictMode);
- __ Push(info);
- __ CallStub(&stub);
- } else {
- __ push(rsi);
- __ Push(info);
- __ Push(pretenure
- ? isolate()->factory()->true_value()
- : isolate()->factory()->false_value());
- __ CallRuntime(Runtime::kNewClosure, 3);
- }
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
- Comment cmnt(masm_, "[ VariableProxy");
- EmitVariableLoad(expr->var());
-}
-
-
-void FullCodeGenerator::EmitLoadGlobalSlotCheckExtensions(
- Slot* slot,
- TypeofState typeof_state,
- Label* slow) {
- Register context = rsi;
- Register temp = rdx;
-
- Scope* s = scope();
- while (s != NULL) {
- if (s->num_heap_slots() > 0) {
- if (s->calls_eval()) {
- // Check that extension is NULL.
- __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX),
- Immediate(0));
- __ j(not_equal, slow);
- }
- // Load next context in chain.
- __ movq(temp, ContextOperand(context, Context::CLOSURE_INDEX));
- __ movq(temp, FieldOperand(temp, JSFunction::kContextOffset));
- // Walk the rest of the chain without clobbering rsi.
- context = temp;
- }
- // If no outer scope calls eval, we do not need to check more
- // context extensions. If we have reached an eval scope, we check
- // all extensions from this point.
- if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break;
- s = s->outer_scope();
- }
-
- if (s != NULL && s->is_eval_scope()) {
- // Loop up the context chain. There is no frame effect so it is
- // safe to use raw labels here.
- NearLabel next, fast;
- if (!context.is(temp)) {
- __ movq(temp, context);
- }
- // Load map for comparison into register, outside loop.
- __ LoadRoot(kScratchRegister, Heap::kGlobalContextMapRootIndex);
- __ bind(&next);
- // Terminate at global context.
- __ cmpq(kScratchRegister, FieldOperand(temp, HeapObject::kMapOffset));
- __ j(equal, &fast);
- // Check that extension is NULL.
- __ cmpq(ContextOperand(temp, Context::EXTENSION_INDEX), Immediate(0));
- __ j(not_equal, slow);
- // Load next context in chain.
- __ movq(temp, ContextOperand(temp, Context::CLOSURE_INDEX));
- __ movq(temp, FieldOperand(temp, JSFunction::kContextOffset));
- __ jmp(&next);
- __ bind(&fast);
- }
-
- // All extension objects were empty and it is safe to use a global
- // load IC call.
- __ movq(rax, GlobalObjectOperand());
- __ Move(rcx, slot->var()->name());
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
- ? RelocInfo::CODE_TARGET
- : RelocInfo::CODE_TARGET_CONTEXT;
- EmitCallIC(ic, mode);
-}
-
-
-MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(
- Slot* slot,
- Label* slow) {
- ASSERT(slot->type() == Slot::CONTEXT);
- Register context = rsi;
- Register temp = rbx;
-
- for (Scope* s = scope(); s != slot->var()->scope(); s = s->outer_scope()) {
- if (s->num_heap_slots() > 0) {
- if (s->calls_eval()) {
- // Check that extension is NULL.
- __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX),
- Immediate(0));
- __ j(not_equal, slow);
- }
- __ movq(temp, ContextOperand(context, Context::CLOSURE_INDEX));
- __ movq(temp, FieldOperand(temp, JSFunction::kContextOffset));
- // Walk the rest of the chain without clobbering rsi.
- context = temp;
- }
- }
- // Check that last extension is NULL.
- __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX), Immediate(0));
- __ j(not_equal, slow);
-
- // This function is used only for loads, not stores, so it's safe to
- // return an rsi-based operand (the write barrier cannot be allowed to
- // destroy the rsi register).
- return ContextOperand(context, slot->index());
-}
-
-
-void FullCodeGenerator::EmitDynamicLoadFromSlotFastCase(
- Slot* slot,
- TypeofState typeof_state,
- Label* slow,
- Label* done) {
- // Generate fast-case code for variables that might be shadowed by
- // eval-introduced variables. Eval is used a lot without
- // introducing variables. In those cases, we do not want to
- // perform a runtime call for all variables in the scope
- // containing the eval.
- if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
- EmitLoadGlobalSlotCheckExtensions(slot, typeof_state, slow);
- __ jmp(done);
- } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
- Slot* potential_slot = slot->var()->local_if_not_shadowed()->AsSlot();
- Expression* rewrite = slot->var()->local_if_not_shadowed()->rewrite();
- if (potential_slot != NULL) {
- // Generate fast case for locals that rewrite to slots.
- __ movq(rax,
- ContextSlotOperandCheckExtensions(potential_slot, slow));
- if (potential_slot->var()->mode() == Variable::CONST) {
- __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
- __ j(not_equal, done);
- __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
- }
- __ jmp(done);
- } else if (rewrite != NULL) {
- // Generate fast case for calls of an argument function.
- Property* property = rewrite->AsProperty();
- if (property != NULL) {
- VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
- Literal* key_literal = property->key()->AsLiteral();
- if (obj_proxy != NULL &&
- key_literal != NULL &&
- obj_proxy->IsArguments() &&
- key_literal->handle()->IsSmi()) {
- // Load arguments object if there are no eval-introduced
- // variables. Then load the argument from the arguments
- // object using keyed load.
- __ movq(rdx,
- ContextSlotOperandCheckExtensions(obj_proxy->var()->AsSlot(),
- slow));
- __ Move(rax, key_literal->handle());
- Handle<Code> ic =
- isolate()->builtins()->KeyedLoadIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
- __ jmp(done);
- }
- }
- }
- }
-}
-
-
-void FullCodeGenerator::EmitVariableLoad(Variable* var) {
- // Four cases: non-this global variables, lookup slots, all other
- // types of slots, and parameters that rewrite to explicit property
- // accesses on the arguments object.
- Slot* slot = var->AsSlot();
- Property* property = var->AsProperty();
-
- if (var->is_global() && !var->is_this()) {
- Comment cmnt(masm_, "Global variable");
- // Use inline caching. Variable name is passed in rcx and the global
- // object on the stack.
- __ Move(rcx, var->name());
- __ movq(rax, GlobalObjectOperand());
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
- context()->Plug(rax);
-
- } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
- Label done, slow;
-
- // Generate code for loading from variables potentially shadowed
- // by eval-introduced variables.
- EmitDynamicLoadFromSlotFastCase(slot, NOT_INSIDE_TYPEOF, &slow, &done);
-
- __ bind(&slow);
- Comment cmnt(masm_, "Lookup slot");
- __ push(rsi); // Context.
- __ Push(var->name());
- __ CallRuntime(Runtime::kLoadContextSlot, 2);
- __ bind(&done);
-
- context()->Plug(rax);
-
- } else if (slot != NULL) {
- Comment cmnt(masm_, (slot->type() == Slot::CONTEXT)
- ? "Context slot"
- : "Stack slot");
- if (var->mode() == Variable::CONST) {
- // Constants may be the hole value if they have not been initialized.
- // Unhole them.
- NearLabel done;
- MemOperand slot_operand = EmitSlotSearch(slot, rax);
- __ movq(rax, slot_operand);
- __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
- __ j(not_equal, &done);
- __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
- __ bind(&done);
- context()->Plug(rax);
- } else {
- context()->Plug(slot);
- }
-
- } else {
- Comment cmnt(masm_, "Rewritten parameter");
- ASSERT_NOT_NULL(property);
- // Rewritten parameter accesses are of the form "slot[literal]".
-
- // Assert that the object is in a slot.
- Variable* object_var = property->obj()->AsVariableProxy()->AsVariable();
- ASSERT_NOT_NULL(object_var);
- Slot* object_slot = object_var->AsSlot();
- ASSERT_NOT_NULL(object_slot);
-
- // Load the object.
- MemOperand object_loc = EmitSlotSearch(object_slot, rax);
- __ movq(rdx, object_loc);
-
- // Assert that the key is a smi.
- Literal* key_literal = property->key()->AsLiteral();
- ASSERT_NOT_NULL(key_literal);
- ASSERT(key_literal->handle()->IsSmi());
-
- // Load the key.
- __ Move(rax, key_literal->handle());
-
- // Do a keyed property load.
- Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
- context()->Plug(rax);
- }
-}
-
-
-void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
- Comment cmnt(masm_, "[ RegExpLiteral");
- Label materialized;
- // Registers will be used as follows:
- // rdi = JS function.
- // rcx = literals array.
- // rbx = regexp literal.
- // rax = regexp literal clone.
- __ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ movq(rcx, FieldOperand(rdi, JSFunction::kLiteralsOffset));
- int literal_offset =
- FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
- __ movq(rbx, FieldOperand(rcx, literal_offset));
- __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, &materialized);
-
- // Create regexp literal using runtime function
- // Result will be in rax.
- __ push(rcx);
- __ Push(Smi::FromInt(expr->literal_index()));
- __ Push(expr->pattern());
- __ Push(expr->flags());
- __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
- __ movq(rbx, rax);
-
- __ bind(&materialized);
- int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
- Label allocated, runtime_allocate;
- __ AllocateInNewSpace(size, rax, rcx, rdx, &runtime_allocate, TAG_OBJECT);
- __ jmp(&allocated);
-
- __ bind(&runtime_allocate);
- __ push(rbx);
- __ Push(Smi::FromInt(size));
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
- __ pop(rbx);
-
- __ bind(&allocated);
- // Copy the content into the newly allocated memory.
- // (Unroll copy loop once for better throughput).
- for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
- __ movq(rdx, FieldOperand(rbx, i));
- __ movq(rcx, FieldOperand(rbx, i + kPointerSize));
- __ movq(FieldOperand(rax, i), rdx);
- __ movq(FieldOperand(rax, i + kPointerSize), rcx);
- }
- if ((size % (2 * kPointerSize)) != 0) {
- __ movq(rdx, FieldOperand(rbx, size - kPointerSize));
- __ movq(FieldOperand(rax, size - kPointerSize), rdx);
- }
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
- Comment cmnt(masm_, "[ ObjectLiteral");
- __ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(FieldOperand(rdi, JSFunction::kLiteralsOffset));
- __ Push(Smi::FromInt(expr->literal_index()));
- __ Push(expr->constant_properties());
- int flags = expr->fast_elements()
- ? ObjectLiteral::kFastElements
- : ObjectLiteral::kNoFlags;
- flags |= expr->has_function()
- ? ObjectLiteral::kHasFunction
- : ObjectLiteral::kNoFlags;
- __ Push(Smi::FromInt(flags));
- if (expr->depth() > 1) {
- __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
- } else {
- __ CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
- }
-
- // If result_saved is true the result is on top of the stack. If
- // result_saved is false the result is in rax.
- bool result_saved = false;
-
- // Mark all computed expressions that are bound to a key that
- // is shadowed by a later occurrence of the same key. For the
- // marked expressions, no store code is emitted.
- expr->CalculateEmitStore();
-
- for (int i = 0; i < expr->properties()->length(); i++) {
- ObjectLiteral::Property* property = expr->properties()->at(i);
- if (property->IsCompileTimeValue()) continue;
-
- Literal* key = property->key();
- Expression* value = property->value();
- if (!result_saved) {
- __ push(rax); // Save result on the stack
- result_saved = true;
- }
- switch (property->kind()) {
- case ObjectLiteral::Property::CONSTANT:
- UNREACHABLE();
- case ObjectLiteral::Property::MATERIALIZED_LITERAL:
- ASSERT(!CompileTimeValue::IsCompileTimeValue(value));
- // Fall through.
- case ObjectLiteral::Property::COMPUTED:
- if (key->handle()->IsSymbol()) {
- VisitForAccumulatorValue(value);
- __ Move(rcx, key->handle());
- __ movq(rdx, Operand(rsp, 0));
- if (property->emit_store()) {
- Handle<Code> ic = isolate()->builtins()->StoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
- PrepareForBailoutForId(key->id(), NO_REGISTERS);
- }
- break;
- }
- // Fall through.
- case ObjectLiteral::Property::PROTOTYPE:
- __ push(Operand(rsp, 0)); // Duplicate receiver.
- VisitForStackValue(key);
- VisitForStackValue(value);
- if (property->emit_store()) {
- __ Push(Smi::FromInt(NONE)); // PropertyAttributes
- __ CallRuntime(Runtime::kSetProperty, 4);
- } else {
- __ Drop(3);
- }
- break;
- case ObjectLiteral::Property::SETTER:
- case ObjectLiteral::Property::GETTER:
- __ push(Operand(rsp, 0)); // Duplicate receiver.
- VisitForStackValue(key);
- __ Push(property->kind() == ObjectLiteral::Property::SETTER ?
- Smi::FromInt(1) :
- Smi::FromInt(0));
- VisitForStackValue(value);
- __ CallRuntime(Runtime::kDefineAccessor, 4);
- break;
- }
- }
-
- if (expr->has_function()) {
- ASSERT(result_saved);
- __ push(Operand(rsp, 0));
- __ CallRuntime(Runtime::kToFastProperties, 1);
- }
-
- if (result_saved) {
- context()->PlugTOS();
- } else {
- context()->Plug(rax);
- }
-}
-
-
-void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
- Comment cmnt(masm_, "[ ArrayLiteral");
-
- ZoneList<Expression*>* subexprs = expr->values();
- int length = subexprs->length();
-
- __ movq(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(FieldOperand(rbx, JSFunction::kLiteralsOffset));
- __ Push(Smi::FromInt(expr->literal_index()));
- __ Push(expr->constant_elements());
- if (expr->constant_elements()->map() ==
- isolate()->heap()->fixed_cow_array_map()) {
- FastCloneShallowArrayStub stub(
- FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length);
- __ CallStub(&stub);
- __ IncrementCounter(isolate()->counters()->cow_arrays_created_stub(), 1);
- } else if (expr->depth() > 1) {
- __ CallRuntime(Runtime::kCreateArrayLiteral, 3);
- } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
- __ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
- } else {
- FastCloneShallowArrayStub stub(
- FastCloneShallowArrayStub::CLONE_ELEMENTS, length);
- __ CallStub(&stub);
- }
-
- bool result_saved = false; // Is the result saved to the stack?
-
- // Emit code to evaluate all the non-constant subexpressions and to store
- // them into the newly cloned array.
- for (int i = 0; i < length; i++) {
- Expression* subexpr = subexprs->at(i);
- // If the subexpression is a literal or a simple materialized literal it
- // is already set in the cloned array.
- if (subexpr->AsLiteral() != NULL ||
- CompileTimeValue::IsCompileTimeValue(subexpr)) {
- continue;
- }
-
- if (!result_saved) {
- __ push(rax);
- result_saved = true;
- }
- VisitForAccumulatorValue(subexpr);
-
- // Store the subexpression value in the array's elements.
- __ movq(rbx, Operand(rsp, 0)); // Copy of array literal.
- __ movq(rbx, FieldOperand(rbx, JSObject::kElementsOffset));
- int offset = FixedArray::kHeaderSize + (i * kPointerSize);
- __ movq(FieldOperand(rbx, offset), result_register());
-
- // Update the write barrier for the array store.
- __ RecordWrite(rbx, offset, result_register(), rcx);
-
- PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS);
- }
-
- if (result_saved) {
- context()->PlugTOS();
- } else {
- context()->Plug(rax);
- }
-}
-
-
-void FullCodeGenerator::VisitAssignment(Assignment* expr) {
- Comment cmnt(masm_, "[ Assignment");
- // Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
- // on the left-hand side.
- if (!expr->target()->IsValidLeftHandSide()) {
- VisitForEffect(expr->target());
- return;
- }
-
- // Left-hand side can only be a property, a global or a (parameter or local)
- // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
- enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
- LhsKind assign_type = VARIABLE;
- Property* property = expr->target()->AsProperty();
- if (property != NULL) {
- assign_type = (property->key()->IsPropertyName())
- ? NAMED_PROPERTY
- : KEYED_PROPERTY;
- }
-
- // Evaluate LHS expression.
- switch (assign_type) {
- case VARIABLE:
- // Nothing to do here.
- break;
- case NAMED_PROPERTY:
- if (expr->is_compound()) {
- // We need the receiver both on the stack and in the accumulator.
- VisitForAccumulatorValue(property->obj());
- __ push(result_register());
- } else {
- VisitForStackValue(property->obj());
- }
- break;
- case KEYED_PROPERTY: {
- if (expr->is_compound()) {
- if (property->is_arguments_access()) {
- VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
- MemOperand slot_operand =
- EmitSlotSearch(obj_proxy->var()->AsSlot(), rcx);
- __ push(slot_operand);
- __ Move(rax, property->key()->AsLiteral()->handle());
- } else {
- VisitForStackValue(property->obj());
- VisitForAccumulatorValue(property->key());
- }
- __ movq(rdx, Operand(rsp, 0));
- __ push(rax);
- } else {
- if (property->is_arguments_access()) {
- VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
- MemOperand slot_operand =
- EmitSlotSearch(obj_proxy->var()->AsSlot(), rcx);
- __ push(slot_operand);
- __ Push(property->key()->AsLiteral()->handle());
- } else {
- VisitForStackValue(property->obj());
- VisitForStackValue(property->key());
- }
- }
- break;
- }
- }
-
- // For compound assignments we need another deoptimization point after the
- // variable/property load.
- if (expr->is_compound()) {
- { AccumulatorValueContext context(this);
- switch (assign_type) {
- case VARIABLE:
- EmitVariableLoad(expr->target()->AsVariableProxy()->var());
- PrepareForBailout(expr->target(), TOS_REG);
- break;
- case NAMED_PROPERTY:
- EmitNamedPropertyLoad(property);
- PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
- break;
- case KEYED_PROPERTY:
- EmitKeyedPropertyLoad(property);
- PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
- break;
- }
- }
-
- Token::Value op = expr->binary_op();
- __ push(rax); // Left operand goes on the stack.
- VisitForAccumulatorValue(expr->value());
-
- OverwriteMode mode = expr->value()->ResultOverwriteAllowed()
- ? OVERWRITE_RIGHT
- : NO_OVERWRITE;
- SetSourcePosition(expr->position() + 1);
- AccumulatorValueContext context(this);
- if (ShouldInlineSmiCase(op)) {
- EmitInlineSmiBinaryOp(expr,
- op,
- mode,
- expr->target(),
- expr->value());
- } else {
- EmitBinaryOp(op, mode);
- }
- // Deoptimization point in case the binary operation may have side effects.
- PrepareForBailout(expr->binary_operation(), TOS_REG);
- } else {
- VisitForAccumulatorValue(expr->value());
- }
-
- // Record source position before possible IC call.
- SetSourcePosition(expr->position());
-
- // Store the value.
- switch (assign_type) {
- case VARIABLE:
- EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
- expr->op());
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- context()->Plug(rax);
- break;
- case NAMED_PROPERTY:
- EmitNamedPropertyAssignment(expr);
- break;
- case KEYED_PROPERTY:
- EmitKeyedPropertyAssignment(expr);
- break;
- }
-}
-
-
-void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
- SetSourcePosition(prop->position());
- Literal* key = prop->key()->AsLiteral();
- __ Move(rcx, key->handle());
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
-}
-
-
-void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
- SetSourcePosition(prop->position());
- Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
-}
-
-
-void FullCodeGenerator::EmitInlineSmiBinaryOp(Expression* expr,
- Token::Value op,
- OverwriteMode mode,
- Expression* left,
- Expression* right) {
- // Do combined smi check of the operands. Left operand is on the
- // stack (popped into rdx). Right operand is in rax but moved into
- // rcx to make the shifts easier.
- NearLabel done, stub_call, smi_case;
- __ pop(rdx);
- __ movq(rcx, rax);
- __ or_(rax, rdx);
- JumpPatchSite patch_site(masm_);
- patch_site.EmitJumpIfSmi(rax, &smi_case);
-
- __ bind(&stub_call);
- __ movq(rax, rcx);
- TypeRecordingBinaryOpStub stub(op, mode);
- EmitCallIC(stub.GetCode(), &patch_site);
- __ jmp(&done);
-
- __ bind(&smi_case);
- switch (op) {
- case Token::SAR:
- __ SmiShiftArithmeticRight(rax, rdx, rcx);
- break;
- case Token::SHL:
- __ SmiShiftLeft(rax, rdx, rcx);
- break;
- case Token::SHR:
- __ SmiShiftLogicalRight(rax, rdx, rcx, &stub_call);
- break;
- case Token::ADD:
- __ SmiAdd(rax, rdx, rcx, &stub_call);
- break;
- case Token::SUB:
- __ SmiSub(rax, rdx, rcx, &stub_call);
- break;
- case Token::MUL:
- __ SmiMul(rax, rdx, rcx, &stub_call);
- break;
- case Token::BIT_OR:
- __ SmiOr(rax, rdx, rcx);
- break;
- case Token::BIT_AND:
- __ SmiAnd(rax, rdx, rcx);
- break;
- case Token::BIT_XOR:
- __ SmiXor(rax, rdx, rcx);
- break;
- default:
- UNREACHABLE();
- break;
- }
-
- __ bind(&done);
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitBinaryOp(Token::Value op,
- OverwriteMode mode) {
- __ pop(rdx);
- TypeRecordingBinaryOpStub stub(op, mode);
- EmitCallIC(stub.GetCode(), NULL); // NULL signals no inlined smi code.
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
- // Invalid left-hand sides are rewritten to have a 'throw
- // ReferenceError' on the left-hand side.
- if (!expr->IsValidLeftHandSide()) {
- VisitForEffect(expr);
- return;
- }
-
- // Left-hand side can only be a property, a global or a (parameter or local)
- // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
- enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
- LhsKind assign_type = VARIABLE;
- Property* prop = expr->AsProperty();
- if (prop != NULL) {
- assign_type = (prop->key()->IsPropertyName())
- ? NAMED_PROPERTY
- : KEYED_PROPERTY;
- }
-
- switch (assign_type) {
- case VARIABLE: {
- Variable* var = expr->AsVariableProxy()->var();
- EffectContext context(this);
- EmitVariableAssignment(var, Token::ASSIGN);
- break;
- }
- case NAMED_PROPERTY: {
- __ push(rax); // Preserve value.
- VisitForAccumulatorValue(prop->obj());
- __ movq(rdx, rax);
- __ pop(rax); // Restore value.
- __ Move(rcx, prop->key()->AsLiteral()->handle());
- Handle<Code> ic = is_strict_mode()
- ? isolate()->builtins()->StoreIC_Initialize_Strict()
- : isolate()->builtins()->StoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
- break;
- }
- case KEYED_PROPERTY: {
- __ push(rax); // Preserve value.
- if (prop->is_synthetic()) {
- ASSERT(prop->obj()->AsVariableProxy() != NULL);
- ASSERT(prop->key()->AsLiteral() != NULL);
- { AccumulatorValueContext for_object(this);
- EmitVariableLoad(prop->obj()->AsVariableProxy()->var());
- }
- __ movq(rdx, rax);
- __ Move(rcx, prop->key()->AsLiteral()->handle());
- } else {
- VisitForStackValue(prop->obj());
- VisitForAccumulatorValue(prop->key());
- __ movq(rcx, rax);
- __ pop(rdx);
- }
- __ pop(rax); // Restore value.
- Handle<Code> ic = is_strict_mode()
- ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
- : isolate()->builtins()->KeyedStoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
- break;
- }
- }
- PrepareForBailoutForId(bailout_ast_id, TOS_REG);
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitVariableAssignment(Variable* var,
- Token::Value op) {
- // Left-hand sides that rewrite to explicit property accesses do not reach
- // here.
- ASSERT(var != NULL);
- ASSERT(var->is_global() || var->AsSlot() != NULL);
-
- if (var->is_global()) {
- ASSERT(!var->is_this());
- // Assignment to a global variable. Use inline caching for the
- // assignment. Right-hand-side value is passed in rax, variable name in
- // rcx, and the global object on the stack.
- __ Move(rcx, var->name());
- __ movq(rdx, GlobalObjectOperand());
- Handle<Code> ic = is_strict_mode()
- ? isolate()->builtins()->StoreIC_Initialize_Strict()
- : isolate()->builtins()->StoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
-
- } else if (op == Token::INIT_CONST) {
- // Like var declarations, const declarations are hoisted to function
- // scope. However, unlike var initializers, const initializers are able
- // to drill a hole to that function context, even from inside a 'with'
- // context. We thus bypass the normal static scope lookup.
- Slot* slot = var->AsSlot();
- Label skip;
- switch (slot->type()) {
- case Slot::PARAMETER:
- // No const parameters.
- UNREACHABLE();
- break;
- case Slot::LOCAL:
- __ movq(rdx, Operand(rbp, SlotOffset(slot)));
- __ CompareRoot(rdx, Heap::kTheHoleValueRootIndex);
- __ j(not_equal, &skip);
- __ movq(Operand(rbp, SlotOffset(slot)), rax);
- break;
- case Slot::CONTEXT: {
- __ movq(rcx, ContextOperand(rsi, Context::FCONTEXT_INDEX));
- __ movq(rdx, ContextOperand(rcx, slot->index()));
- __ CompareRoot(rdx, Heap::kTheHoleValueRootIndex);
- __ j(not_equal, &skip);
- __ movq(ContextOperand(rcx, slot->index()), rax);
- int offset = Context::SlotOffset(slot->index());
- __ movq(rdx, rax); // Preserve the stored value in eax.
- __ RecordWrite(rcx, offset, rdx, rbx);
- break;
- }
- case Slot::LOOKUP:
- __ push(rax);
- __ push(rsi);
- __ Push(var->name());
- __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
- break;
- }
- __ bind(&skip);
-
- } else if (var->mode() != Variable::CONST) {
- // Perform the assignment for non-const variables. Const assignments
- // are simply skipped.
- Slot* slot = var->AsSlot();
- switch (slot->type()) {
- case Slot::PARAMETER:
- case Slot::LOCAL:
- // Perform the assignment.
- __ movq(Operand(rbp, SlotOffset(slot)), rax);
- break;
-
- case Slot::CONTEXT: {
- MemOperand target = EmitSlotSearch(slot, rcx);
- // Perform the assignment and issue the write barrier.
- __ movq(target, rax);
- // The value of the assignment is in rax. RecordWrite clobbers its
- // register arguments.
- __ movq(rdx, rax);
- int offset = Context::SlotOffset(slot->index());
- __ RecordWrite(rcx, offset, rdx, rbx);
- break;
- }
-
- case Slot::LOOKUP:
- // Call the runtime for the assignment.
- __ push(rax); // Value.
- __ push(rsi); // Context.
- __ Push(var->name());
- __ Push(Smi::FromInt(strict_mode_flag()));
- __ CallRuntime(Runtime::kStoreContextSlot, 4);
- break;
- }
- }
-}
-
-
-void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
- // Assignment to a property, using a named store IC.
- Property* prop = expr->target()->AsProperty();
- ASSERT(prop != NULL);
- ASSERT(prop->key()->AsLiteral() != NULL);
-
- // If the assignment starts a block of assignments to the same object,
- // change to slow case to avoid the quadratic behavior of repeatedly
- // adding fast properties.
- if (expr->starts_initialization_block()) {
- __ push(result_register());
- __ push(Operand(rsp, kPointerSize)); // Receiver is now under value.
- __ CallRuntime(Runtime::kToSlowProperties, 1);
- __ pop(result_register());
- }
-
- // Record source code position before IC call.
- SetSourcePosition(expr->position());
- __ Move(rcx, prop->key()->AsLiteral()->handle());
- if (expr->ends_initialization_block()) {
- __ movq(rdx, Operand(rsp, 0));
- } else {
- __ pop(rdx);
- }
- Handle<Code> ic = is_strict_mode()
- ? isolate()->builtins()->StoreIC_Initialize_Strict()
- : isolate()->builtins()->StoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
-
- // If the assignment ends an initialization block, revert to fast case.
- if (expr->ends_initialization_block()) {
- __ push(rax); // Result of assignment, saved even if not needed.
- __ push(Operand(rsp, kPointerSize)); // Receiver is under value.
- __ CallRuntime(Runtime::kToFastProperties, 1);
- __ pop(rax);
- __ Drop(1);
- }
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
- // Assignment to a property, using a keyed store IC.
-
- // If the assignment starts a block of assignments to the same object,
- // change to slow case to avoid the quadratic behavior of repeatedly
- // adding fast properties.
- if (expr->starts_initialization_block()) {
- __ push(result_register());
- // Receiver is now under the key and value.
- __ push(Operand(rsp, 2 * kPointerSize));
- __ CallRuntime(Runtime::kToSlowProperties, 1);
- __ pop(result_register());
- }
-
- __ pop(rcx);
- if (expr->ends_initialization_block()) {
- __ movq(rdx, Operand(rsp, 0)); // Leave receiver on the stack for later.
- } else {
- __ pop(rdx);
- }
- // Record source code position before IC call.
- SetSourcePosition(expr->position());
- Handle<Code> ic = is_strict_mode()
- ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
- : isolate()->builtins()->KeyedStoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
-
- // If the assignment ends an initialization block, revert to fast case.
- if (expr->ends_initialization_block()) {
- __ pop(rdx);
- __ push(rax); // Result of assignment, saved even if not needed.
- __ push(rdx);
- __ CallRuntime(Runtime::kToFastProperties, 1);
- __ pop(rax);
- }
-
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::VisitProperty(Property* expr) {
- Comment cmnt(masm_, "[ Property");
- Expression* key = expr->key();
-
- if (key->IsPropertyName()) {
- VisitForAccumulatorValue(expr->obj());
- EmitNamedPropertyLoad(expr);
- context()->Plug(rax);
- } else {
- VisitForStackValue(expr->obj());
- VisitForAccumulatorValue(expr->key());
- __ pop(rdx);
- EmitKeyedPropertyLoad(expr);
- context()->Plug(rax);
- }
-}
-
-
-void FullCodeGenerator::EmitCallWithIC(Call* expr,
- Handle<Object> name,
- RelocInfo::Mode mode) {
- // Code common for calls using the IC.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- { PreservePositionScope scope(masm()->positions_recorder());
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
- __ Move(rcx, name);
- }
- // Record source position for debugger.
- SetSourcePosition(expr->position());
- // Call the IC initialization code.
- InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
- Handle<Code> ic =
- ISOLATE->stub_cache()->ComputeCallInitialize(arg_count, in_loop);
- EmitCallIC(ic, mode);
- RecordJSReturnSite(expr);
- // Restore context register.
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
- Expression* key,
- RelocInfo::Mode mode) {
- // Load the key.
- VisitForAccumulatorValue(key);
-
- // Swap the name of the function and the receiver on the stack to follow
- // the calling convention for call ICs.
- __ pop(rcx);
- __ push(rax);
- __ push(rcx);
-
- // Load the arguments.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- { PreservePositionScope scope(masm()->positions_recorder());
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
- }
- // Record source position for debugger.
- SetSourcePosition(expr->position());
- // Call the IC initialization code.
- InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
- Handle<Code> ic =
- ISOLATE->stub_cache()->ComputeKeyedCallInitialize(arg_count, in_loop);
- __ movq(rcx, Operand(rsp, (arg_count + 1) * kPointerSize)); // Key.
- EmitCallIC(ic, mode);
- RecordJSReturnSite(expr);
- // Restore context register.
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- context()->DropAndPlug(1, rax); // Drop the key still on the stack.
-}
-
-
-void FullCodeGenerator::EmitCallWithStub(Call* expr) {
- // Code common for calls using the call stub.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- { PreservePositionScope scope(masm()->positions_recorder());
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
- }
- // Record source position for debugger.
- SetSourcePosition(expr->position());
- InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
- CallFunctionStub stub(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
- __ CallStub(&stub);
- RecordJSReturnSite(expr);
- // Restore context register.
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- // Discard the function left on TOS.
- context()->DropAndPlug(1, rax);
-}
-
-
-void FullCodeGenerator::EmitResolvePossiblyDirectEval(ResolveEvalFlag flag,
- int arg_count) {
- // Push copy of the first argument or undefined if it doesn't exist.
- if (arg_count > 0) {
- __ push(Operand(rsp, arg_count * kPointerSize));
- } else {
- __ PushRoot(Heap::kUndefinedValueRootIndex);
- }
-
- // Push the receiver of the enclosing function and do runtime call.
- __ push(Operand(rbp, (2 + scope()->num_parameters()) * kPointerSize));
-
- // Push the strict mode flag.
- __ Push(Smi::FromInt(strict_mode_flag()));
-
- __ CallRuntime(flag == SKIP_CONTEXT_LOOKUP
- ? Runtime::kResolvePossiblyDirectEvalNoLookup
- : Runtime::kResolvePossiblyDirectEval, 4);
-}
-
-
-void FullCodeGenerator::VisitCall(Call* expr) {
-#ifdef DEBUG
- // We want to verify that RecordJSReturnSite gets called on all paths
- // through this function. Avoid early returns.
- expr->return_is_recorded_ = false;
-#endif
-
- Comment cmnt(masm_, "[ Call");
- Expression* fun = expr->expression();
- Variable* var = fun->AsVariableProxy()->AsVariable();
-
- if (var != NULL && var->is_possibly_eval()) {
- // In a call to eval, we first call %ResolvePossiblyDirectEval to
- // resolve the function we need to call and the receiver of the
- // call. Then we call the resolved function using the given
- // arguments.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- { PreservePositionScope pos_scope(masm()->positions_recorder());
- VisitForStackValue(fun);
- __ PushRoot(Heap::kUndefinedValueRootIndex); // Reserved receiver slot.
-
- // Push the arguments.
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
-
- // If we know that eval can only be shadowed by eval-introduced
- // variables we attempt to load the global eval function directly
- // in generated code. If we succeed, there is no need to perform a
- // context lookup in the runtime system.
- Label done;
- if (var->AsSlot() != NULL && var->mode() == Variable::DYNAMIC_GLOBAL) {
- Label slow;
- EmitLoadGlobalSlotCheckExtensions(var->AsSlot(),
- NOT_INSIDE_TYPEOF,
- &slow);
- // Push the function and resolve eval.
- __ push(rax);
- EmitResolvePossiblyDirectEval(SKIP_CONTEXT_LOOKUP, arg_count);
- __ jmp(&done);
- __ bind(&slow);
- }
-
- // Push copy of the function (found below the arguments) and
- // resolve eval.
- __ push(Operand(rsp, (arg_count + 1) * kPointerSize));
- EmitResolvePossiblyDirectEval(PERFORM_CONTEXT_LOOKUP, arg_count);
- if (done.is_linked()) {
- __ bind(&done);
- }
-
- // The runtime call returns a pair of values in rax (function) and
- // rdx (receiver). Touch up the stack with the right values.
- __ movq(Operand(rsp, (arg_count + 0) * kPointerSize), rdx);
- __ movq(Operand(rsp, (arg_count + 1) * kPointerSize), rax);
- }
- // Record source position for debugger.
- SetSourcePosition(expr->position());
- InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
- CallFunctionStub stub(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
- __ CallStub(&stub);
- RecordJSReturnSite(expr);
- // Restore context register.
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- context()->DropAndPlug(1, rax);
- } else if (var != NULL && !var->is_this() && var->is_global()) {
- // Call to a global variable.
- // Push global object as receiver for the call IC lookup.
- __ push(GlobalObjectOperand());
- EmitCallWithIC(expr, var->name(), RelocInfo::CODE_TARGET_CONTEXT);
- } else if (var != NULL && var->AsSlot() != NULL &&
- var->AsSlot()->type() == Slot::LOOKUP) {
- // Call to a lookup slot (dynamically introduced variable).
- Label slow, done;
-
- { PreservePositionScope scope(masm()->positions_recorder());
- // Generate code for loading from variables potentially shadowed
- // by eval-introduced variables.
- EmitDynamicLoadFromSlotFastCase(var->AsSlot(),
- NOT_INSIDE_TYPEOF,
- &slow,
- &done);
-
- __ bind(&slow);
- }
- // Call the runtime to find the function to call (returned in rax)
- // and the object holding it (returned in rdx).
- __ push(context_register());
- __ Push(var->name());
- __ CallRuntime(Runtime::kLoadContextSlot, 2);
- __ push(rax); // Function.
- __ push(rdx); // Receiver.
-
- // If fast case code has been generated, emit code to push the
- // function and receiver and have the slow path jump around this
- // code.
- if (done.is_linked()) {
- NearLabel call;
- __ jmp(&call);
- __ bind(&done);
- // Push function.
- __ push(rax);
- // Push global receiver.
- __ movq(rbx, GlobalObjectOperand());
- __ push(FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
- __ bind(&call);
- }
-
- EmitCallWithStub(expr);
- } else if (fun->AsProperty() != NULL) {
- // Call to an object property.
- Property* prop = fun->AsProperty();
- Literal* key = prop->key()->AsLiteral();
- if (key != NULL && key->handle()->IsSymbol()) {
- // Call to a named property, use call IC.
- { PreservePositionScope scope(masm()->positions_recorder());
- VisitForStackValue(prop->obj());
- }
- EmitCallWithIC(expr, key->handle(), RelocInfo::CODE_TARGET);
- } else {
- // Call to a keyed property.
- // For a synthetic property use keyed load IC followed by function call,
- // for a regular property use keyed EmitCallIC.
- if (prop->is_synthetic()) {
- // Do not visit the object and key subexpressions (they are shared
- // by all occurrences of the same rewritten parameter).
- ASSERT(prop->obj()->AsVariableProxy() != NULL);
- ASSERT(prop->obj()->AsVariableProxy()->var()->AsSlot() != NULL);
- Slot* slot = prop->obj()->AsVariableProxy()->var()->AsSlot();
- MemOperand operand = EmitSlotSearch(slot, rdx);
- __ movq(rdx, operand);
-
- ASSERT(prop->key()->AsLiteral() != NULL);
- ASSERT(prop->key()->AsLiteral()->handle()->IsSmi());
- __ Move(rax, prop->key()->AsLiteral()->handle());
-
- // Record source code position for IC call.
- SetSourcePosition(prop->position());
-
- Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
- // Push result (function).
- __ push(rax);
- // Push Global receiver.
- __ movq(rcx, GlobalObjectOperand());
- __ push(FieldOperand(rcx, GlobalObject::kGlobalReceiverOffset));
- EmitCallWithStub(expr);
- } else {
- { PreservePositionScope scope(masm()->positions_recorder());
- VisitForStackValue(prop->obj());
- }
- EmitKeyedCallWithIC(expr, prop->key(), RelocInfo::CODE_TARGET);
- }
- }
- } else {
- { PreservePositionScope scope(masm()->positions_recorder());
- VisitForStackValue(fun);
- }
- // Load global receiver object.
- __ movq(rbx, GlobalObjectOperand());
- __ push(FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
- // Emit function call.
- EmitCallWithStub(expr);
- }
-
-#ifdef DEBUG
- // RecordJSReturnSite should have been called.
- ASSERT(expr->return_is_recorded_);
-#endif
-}
-
-
-void FullCodeGenerator::VisitCallNew(CallNew* expr) {
- Comment cmnt(masm_, "[ CallNew");
- // According to ECMA-262, section 11.2.2, page 44, the function
- // expression in new calls must be evaluated before the
- // arguments.
-
- // Push constructor on the stack. If it's not a function it's used as
- // receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
- // ignored.
- VisitForStackValue(expr->expression());
-
- // Push the arguments ("left-to-right") on the stack.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
-
- // Call the construct call builtin that handles allocation and
- // constructor invocation.
- SetSourcePosition(expr->position());
-
- // Load function and argument count into rdi and rax.
- __ Set(rax, arg_count);
- __ movq(rdi, Operand(rsp, arg_count * kPointerSize));
-
- Handle<Code> construct_builtin =
- isolate()->builtins()->JSConstructCall();
- __ Call(construct_builtin, RelocInfo::CONSTRUCT_CALL);
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitIsSmi(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
- __ JumpIfSmi(rax, if_true);
- __ jmp(if_false);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsNonNegativeSmi(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
- Condition non_negative_smi = masm()->CheckNonNegativeSmi(rax);
- Split(non_negative_smi, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(rax, if_false);
- __ CompareRoot(rax, Heap::kNullValueRootIndex);
- __ j(equal, if_true);
- __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
- // Undetectable objects behave like undefined when tested with typeof.
- __ testb(FieldOperand(rbx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- __ j(not_zero, if_false);
- __ movzxbq(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
- __ cmpq(rbx, Immediate(FIRST_JS_OBJECT_TYPE));
- __ j(below, if_false);
- __ cmpq(rbx, Immediate(LAST_JS_OBJECT_TYPE));
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
- Split(below_equal, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsSpecObject(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(rax, if_false);
- __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rbx);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
- Split(above_equal, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(rax, if_false);
- __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
- __ testb(FieldOperand(rbx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
- Split(not_zero, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
- ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- if (FLAG_debug_code) __ AbortIfSmi(rax);
-
- // Check whether this map has already been checked to be safe for default
- // valueOf.
- __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
- __ testb(FieldOperand(rbx, Map::kBitField2Offset),
- Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
- __ j(not_zero, if_true);
-
- // Check for fast case object. Generate false result for slow case object.
- __ movq(rcx, FieldOperand(rax, JSObject::kPropertiesOffset));
- __ movq(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
- __ CompareRoot(rcx, Heap::kHashTableMapRootIndex);
- __ j(equal, if_false);
-
- // Look for valueOf symbol in the descriptor array, and indicate false if
- // found. The type is not checked, so if it is a transition it is a false
- // negative.
- __ movq(rbx, FieldOperand(rbx, Map::kInstanceDescriptorsOffset));
- __ movq(rcx, FieldOperand(rbx, FixedArray::kLengthOffset));
- // rbx: descriptor array
- // rcx: length of descriptor array
- // Calculate the end of the descriptor array.
- SmiIndex index = masm_->SmiToIndex(rdx, rcx, kPointerSizeLog2);
- __ lea(rcx,
- Operand(
- rbx, index.reg, index.scale, FixedArray::kHeaderSize));
- // Calculate location of the first key name.
- __ addq(rbx,
- Immediate(FixedArray::kHeaderSize +
- DescriptorArray::kFirstIndex * kPointerSize));
- // Loop through all the keys in the descriptor array. If one of these is the
- // symbol valueOf the result is false.
- Label entry, loop;
- __ jmp(&entry);
- __ bind(&loop);
- __ movq(rdx, FieldOperand(rbx, 0));
- __ Cmp(rdx, FACTORY->value_of_symbol());
- __ j(equal, if_false);
- __ addq(rbx, Immediate(kPointerSize));
- __ bind(&entry);
- __ cmpq(rbx, rcx);
- __ j(not_equal, &loop);
-
- // Reload map as register rbx was used as temporary above.
- __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
-
- // If a valueOf property is not found on the object check that it's
- // prototype is the un-modified String prototype. If not result is false.
- __ movq(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
- __ testq(rcx, Immediate(kSmiTagMask));
- __ j(zero, if_false);
- __ movq(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
- __ movq(rdx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalContextOffset));
- __ cmpq(rcx,
- ContextOperand(rdx, Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
- __ j(not_equal, if_false);
- // Set the bit in the map to indicate that it has been checked safe for
- // default valueOf and set true result.
- __ or_(FieldOperand(rbx, Map::kBitField2Offset),
- Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
- __ jmp(if_true);
-
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsFunction(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(rax, if_false);
- __ CmpObjectType(rax, JS_FUNCTION_TYPE, rbx);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
- Split(equal, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsArray(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(rax, if_false);
- __ CmpObjectType(rax, JS_ARRAY_TYPE, rbx);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
- Split(equal, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsRegExp(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(rax, if_false);
- __ CmpObjectType(rax, JS_REGEXP_TYPE, rbx);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
- Split(equal, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-
-void FullCodeGenerator::EmitIsConstructCall(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 0);
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- // Get the frame pointer for the calling frame.
- __ movq(rax, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
-
- // Skip the arguments adaptor frame if it exists.
- Label check_frame_marker;
- __ Cmp(Operand(rax, StandardFrameConstants::kContextOffset),
- Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ j(not_equal, &check_frame_marker);
- __ movq(rax, Operand(rax, StandardFrameConstants::kCallerFPOffset));
-
- // Check the marker in the calling frame.
- __ bind(&check_frame_marker);
- __ Cmp(Operand(rax, StandardFrameConstants::kMarkerOffset),
- Smi::FromInt(StackFrame::CONSTRUCT));
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
- Split(equal, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitObjectEquals(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 2);
-
- // Load the two objects into registers and perform the comparison.
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ pop(rbx);
- __ cmpq(rax, rbx);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
- Split(equal, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitArguments(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
-
- // ArgumentsAccessStub expects the key in rdx and the formal
- // parameter count in rax.
- VisitForAccumulatorValue(args->at(0));
- __ movq(rdx, rax);
- __ Move(rax, Smi::FromInt(scope()->num_parameters()));
- ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
- __ CallStub(&stub);
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitArgumentsLength(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 0);
-
- NearLabel exit;
- // Get the number of formal parameters.
- __ Move(rax, Smi::FromInt(scope()->num_parameters()));
-
- // Check if the calling frame is an arguments adaptor frame.
- __ movq(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ Cmp(Operand(rbx, StandardFrameConstants::kContextOffset),
- Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ j(not_equal, &exit);
-
- // Arguments adaptor case: Read the arguments length from the
- // adaptor frame.
- __ movq(rax, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset));
-
- __ bind(&exit);
- if (FLAG_debug_code) __ AbortIfNotSmi(rax);
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Label done, null, function, non_function_constructor;
-
- VisitForAccumulatorValue(args->at(0));
-
- // If the object is a smi, we return null.
- __ JumpIfSmi(rax, &null);
-
- // Check that the object is a JS object but take special care of JS
- // functions to make sure they have 'Function' as their class.
- __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rax); // Map is now in rax.
- __ j(below, &null);
-
- // As long as JS_FUNCTION_TYPE is the last instance type and it is
- // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
- // LAST_JS_OBJECT_TYPE.
- ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
- ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
- __ CmpInstanceType(rax, JS_FUNCTION_TYPE);
- __ j(equal, &function);
-
- // Check if the constructor in the map is a function.
- __ movq(rax, FieldOperand(rax, Map::kConstructorOffset));
- __ CmpObjectType(rax, JS_FUNCTION_TYPE, rbx);
- __ j(not_equal, &non_function_constructor);
-
- // rax now contains the constructor function. Grab the
- // instance class name from there.
- __ movq(rax, FieldOperand(rax, JSFunction::kSharedFunctionInfoOffset));
- __ movq(rax, FieldOperand(rax, SharedFunctionInfo::kInstanceClassNameOffset));
- __ jmp(&done);
-
- // Functions have class 'Function'.
- __ bind(&function);
- __ Move(rax, isolate()->factory()->function_class_symbol());
- __ jmp(&done);
-
- // Objects with a non-function constructor have class 'Object'.
- __ bind(&non_function_constructor);
- __ Move(rax, isolate()->factory()->Object_symbol());
- __ jmp(&done);
-
- // Non-JS objects have class null.
- __ bind(&null);
- __ LoadRoot(rax, Heap::kNullValueRootIndex);
-
- // All done.
- __ bind(&done);
-
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitLog(ZoneList<Expression*>* args) {
- // Conditionally generate a log call.
- // Args:
- // 0 (literal string): The type of logging (corresponds to the flags).
- // This is used to determine whether or not to generate the log call.
- // 1 (string): Format string. Access the string at argument index 2
- // with '%2s' (see Logger::LogRuntime for all the formats).
- // 2 (array): Arguments to the format string.
- ASSERT_EQ(args->length(), 3);
-#ifdef ENABLE_LOGGING_AND_PROFILING
- if (CodeGenerator::ShouldGenerateLog(args->at(0))) {
- VisitForStackValue(args->at(1));
- VisitForStackValue(args->at(2));
- __ CallRuntime(Runtime::kLog, 2);
- }
-#endif
- // Finally, we're expected to leave a value on the top of the stack.
- __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 0);
-
- Label slow_allocate_heapnumber;
- Label heapnumber_allocated;
-
- __ AllocateHeapNumber(rbx, rcx, &slow_allocate_heapnumber);
- __ jmp(&heapnumber_allocated);
-
- __ bind(&slow_allocate_heapnumber);
- // Allocate a heap number.
- __ CallRuntime(Runtime::kNumberAlloc, 0);
- __ movq(rbx, rax);
-
- __ bind(&heapnumber_allocated);
-
- // Return a random uint32 number in rax.
- // The fresh HeapNumber is in rbx, which is callee-save on both x64 ABIs.
- __ PrepareCallCFunction(1);
-#ifdef _WIN64
- __ LoadAddress(rcx, ExternalReference::isolate_address());
-#else
- __ LoadAddress(rdi, ExternalReference::isolate_address());
-#endif
- __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
-
- // Convert 32 random bits in rax to 0.(32 random bits) in a double
- // by computing:
- // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
- __ movl(rcx, Immediate(0x49800000)); // 1.0 x 2^20 as single.
- __ movd(xmm1, rcx);
- __ movd(xmm0, rax);
- __ cvtss2sd(xmm1, xmm1);
- __ xorpd(xmm0, xmm1);
- __ subsd(xmm0, xmm1);
- __ movsd(FieldOperand(rbx, HeapNumber::kValueOffset), xmm0);
-
- __ movq(rax, rbx);
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitSubString(ZoneList<Expression*>* args) {
- // Load the arguments on the stack and call the stub.
- SubStringStub stub;
- ASSERT(args->length() == 3);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
- VisitForStackValue(args->at(2));
- __ CallStub(&stub);
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitRegExpExec(ZoneList<Expression*>* args) {
- // Load the arguments on the stack and call the stub.
- RegExpExecStub stub;
- ASSERT(args->length() == 4);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
- VisitForStackValue(args->at(2));
- VisitForStackValue(args->at(3));
- __ CallStub(&stub);
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitValueOf(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0)); // Load the object.
-
- Label done;
- // If the object is a smi return the object.
- __ JumpIfSmi(rax, &done);
- // If the object is not a value type, return the object.
- __ CmpObjectType(rax, JS_VALUE_TYPE, rbx);
- __ j(not_equal, &done);
- __ movq(rax, FieldOperand(rax, JSValue::kValueOffset));
-
- __ bind(&done);
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitMathPow(ZoneList<Expression*>* args) {
- // Load the arguments on the stack and call the runtime function.
- ASSERT(args->length() == 2);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
- MathPowStub stub;
- __ CallStub(&stub);
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitSetValueOf(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 2);
-
- VisitForStackValue(args->at(0)); // Load the object.
- VisitForAccumulatorValue(args->at(1)); // Load the value.
- __ pop(rbx); // rax = value. rbx = object.
-
- Label done;
- // If the object is a smi, return the value.
- __ JumpIfSmi(rbx, &done);
-
- // If the object is not a value type, return the value.
- __ CmpObjectType(rbx, JS_VALUE_TYPE, rcx);
- __ j(not_equal, &done);
-
- // Store the value.
- __ movq(FieldOperand(rbx, JSValue::kValueOffset), rax);
- // Update the write barrier. Save the value as it will be
- // overwritten by the write barrier code and is needed afterward.
- __ movq(rdx, rax);
- __ RecordWrite(rbx, JSValue::kValueOffset, rdx, rcx);
-
- __ bind(&done);
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitNumberToString(ZoneList<Expression*>* args) {
- ASSERT_EQ(args->length(), 1);
-
- // Load the argument on the stack and call the stub.
- VisitForStackValue(args->at(0));
-
- NumberToStringStub stub;
- __ CallStub(&stub);
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitStringCharFromCode(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label done;
- StringCharFromCodeGenerator generator(rax, rbx);
- generator.GenerateFast(masm_);
- __ jmp(&done);
-
- NopRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm_, call_helper);
-
- __ bind(&done);
- context()->Plug(rbx);
-}
-
-
-void FullCodeGenerator::EmitStringCharCodeAt(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 2);
-
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
-
- Register object = rbx;
- Register index = rax;
- Register scratch = rcx;
- Register result = rdx;
-
- __ pop(object);
-
- Label need_conversion;
- Label index_out_of_range;
- Label done;
- StringCharCodeAtGenerator generator(object,
- index,
- scratch,
- result,
- &need_conversion,
- &need_conversion,
- &index_out_of_range,
- STRING_INDEX_IS_NUMBER);
- generator.GenerateFast(masm_);
- __ jmp(&done);
-
- __ bind(&index_out_of_range);
- // When the index is out of range, the spec requires us to return
- // NaN.
- __ LoadRoot(result, Heap::kNanValueRootIndex);
- __ jmp(&done);
-
- __ bind(&need_conversion);
- // Move the undefined value into the result register, which will
- // trigger conversion.
- __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
- __ jmp(&done);
-
- NopRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm_, call_helper);
-
- __ bind(&done);
- context()->Plug(result);
-}
-
-
-void FullCodeGenerator::EmitStringCharAt(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 2);
-
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
-
- Register object = rbx;
- Register index = rax;
- Register scratch1 = rcx;
- Register scratch2 = rdx;
- Register result = rax;
-
- __ pop(object);
-
- Label need_conversion;
- Label index_out_of_range;
- Label done;
- StringCharAtGenerator generator(object,
- index,
- scratch1,
- scratch2,
- result,
- &need_conversion,
- &need_conversion,
- &index_out_of_range,
- STRING_INDEX_IS_NUMBER);
- generator.GenerateFast(masm_);
- __ jmp(&done);
-
- __ bind(&index_out_of_range);
- // When the index is out of range, the spec requires us to return
- // the empty string.
- __ LoadRoot(result, Heap::kEmptyStringRootIndex);
- __ jmp(&done);
-
- __ bind(&need_conversion);
- // Move smi zero into the result register, which will trigger
- // conversion.
- __ Move(result, Smi::FromInt(0));
- __ jmp(&done);
-
- NopRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm_, call_helper);
-
- __ bind(&done);
- context()->Plug(result);
-}
-
-
-void FullCodeGenerator::EmitStringAdd(ZoneList<Expression*>* args) {
- ASSERT_EQ(2, args->length());
-
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
-
- StringAddStub stub(NO_STRING_ADD_FLAGS);
- __ CallStub(&stub);
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitStringCompare(ZoneList<Expression*>* args) {
- ASSERT_EQ(2, args->length());
-
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
-
- StringCompareStub stub;
- __ CallStub(&stub);
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitMathSin(ZoneList<Expression*>* args) {
- // Load the argument on the stack and call the stub.
- TranscendentalCacheStub stub(TranscendentalCache::SIN,
- TranscendentalCacheStub::TAGGED);
- ASSERT(args->length() == 1);
- VisitForStackValue(args->at(0));
- __ CallStub(&stub);
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitMathCos(ZoneList<Expression*>* args) {
- // Load the argument on the stack and call the stub.
- TranscendentalCacheStub stub(TranscendentalCache::COS,
- TranscendentalCacheStub::TAGGED);
- ASSERT(args->length() == 1);
- VisitForStackValue(args->at(0));
- __ CallStub(&stub);
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitMathLog(ZoneList<Expression*>* args) {
- // Load the argument on the stack and call the stub.
- TranscendentalCacheStub stub(TranscendentalCache::LOG,
- TranscendentalCacheStub::TAGGED);
- ASSERT(args->length() == 1);
- VisitForStackValue(args->at(0));
- __ CallStub(&stub);
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitMathSqrt(ZoneList<Expression*>* args) {
- // Load the argument on the stack and call the runtime function.
- ASSERT(args->length() == 1);
- VisitForStackValue(args->at(0));
- __ CallRuntime(Runtime::kMath_sqrt, 1);
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitCallFunction(ZoneList<Expression*>* args) {
- ASSERT(args->length() >= 2);
-
- int arg_count = args->length() - 2; // For receiver and function.
- VisitForStackValue(args->at(0)); // Receiver.
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i + 1));
- }
- VisitForAccumulatorValue(args->at(arg_count + 1)); // Function.
-
- // InvokeFunction requires function in rdi. Move it in there.
- if (!result_register().is(rdi)) __ movq(rdi, result_register());
- ParameterCount count(arg_count);
- __ InvokeFunction(rdi, count, CALL_FUNCTION);
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitRegExpConstructResult(ZoneList<Expression*>* args) {
- RegExpConstructResultStub stub;
- ASSERT(args->length() == 3);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
- VisitForStackValue(args->at(2));
- __ CallStub(&stub);
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 3);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
- VisitForStackValue(args->at(2));
- Label done;
- Label slow_case;
- Register object = rax;
- Register index_1 = rbx;
- Register index_2 = rcx;
- Register elements = rdi;
- Register temp = rdx;
- __ movq(object, Operand(rsp, 2 * kPointerSize));
- // Fetch the map and check if array is in fast case.
- // Check that object doesn't require security checks and
- // has no indexed interceptor.
- __ CmpObjectType(object, JS_ARRAY_TYPE, temp);
- __ j(not_equal, &slow_case);
- __ testb(FieldOperand(temp, Map::kBitFieldOffset),
- Immediate(KeyedLoadIC::kSlowCaseBitFieldMask));
- __ j(not_zero, &slow_case);
-
- // Check the object's elements are in fast case and writable.
- __ movq(elements, FieldOperand(object, JSObject::kElementsOffset));
- __ CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
- Heap::kFixedArrayMapRootIndex);
- __ j(not_equal, &slow_case);
-
- // Check that both indices are smis.
- __ movq(index_1, Operand(rsp, 1 * kPointerSize));
- __ movq(index_2, Operand(rsp, 0 * kPointerSize));
- __ JumpIfNotBothSmi(index_1, index_2, &slow_case);
-
- // Check that both indices are valid.
- // The JSArray length field is a smi since the array is in fast case mode.
- __ movq(temp, FieldOperand(object, JSArray::kLengthOffset));
- __ SmiCompare(temp, index_1);
- __ j(below_equal, &slow_case);
- __ SmiCompare(temp, index_2);
- __ j(below_equal, &slow_case);
-
- __ SmiToInteger32(index_1, index_1);
- __ SmiToInteger32(index_2, index_2);
- // Bring addresses into index1 and index2.
- __ lea(index_1, FieldOperand(elements, index_1, times_pointer_size,
- FixedArray::kHeaderSize));
- __ lea(index_2, FieldOperand(elements, index_2, times_pointer_size,
- FixedArray::kHeaderSize));
-
- // Swap elements. Use object and temp as scratch registers.
- __ movq(object, Operand(index_1, 0));
- __ movq(temp, Operand(index_2, 0));
- __ movq(Operand(index_2, 0), object);
- __ movq(Operand(index_1, 0), temp);
-
- Label new_space;
- __ InNewSpace(elements, temp, equal, &new_space);
-
- __ movq(object, elements);
- __ RecordWriteHelper(object, index_1, temp);
- __ RecordWriteHelper(elements, index_2, temp);
-
- __ bind(&new_space);
- // We are done. Drop elements from the stack, and return undefined.
- __ addq(rsp, Immediate(3 * kPointerSize));
- __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
- __ jmp(&done);
-
- __ bind(&slow_case);
- __ CallRuntime(Runtime::kSwapElements, 3);
-
- __ bind(&done);
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitGetFromCache(ZoneList<Expression*>* args) {
- ASSERT_EQ(2, args->length());
-
- ASSERT_NE(NULL, args->at(0)->AsLiteral());
- int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
-
- Handle<FixedArray> jsfunction_result_caches(
- isolate()->global_context()->jsfunction_result_caches());
- if (jsfunction_result_caches->length() <= cache_id) {
- __ Abort("Attempt to use undefined cache.");
- __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
- context()->Plug(rax);
- return;
- }
-
- VisitForAccumulatorValue(args->at(1));
-
- Register key = rax;
- Register cache = rbx;
- Register tmp = rcx;
- __ movq(cache, ContextOperand(rsi, Context::GLOBAL_INDEX));
- __ movq(cache,
- FieldOperand(cache, GlobalObject::kGlobalContextOffset));
- __ movq(cache,
- ContextOperand(cache, Context::JSFUNCTION_RESULT_CACHES_INDEX));
- __ movq(cache,
- FieldOperand(cache, FixedArray::OffsetOfElementAt(cache_id)));
-
- NearLabel done, not_found;
- // tmp now holds finger offset as a smi.
- ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
- __ movq(tmp, FieldOperand(cache, JSFunctionResultCache::kFingerOffset));
- SmiIndex index =
- __ SmiToIndex(kScratchRegister, tmp, kPointerSizeLog2);
- __ cmpq(key, FieldOperand(cache,
- index.reg,
- index.scale,
- FixedArray::kHeaderSize));
- __ j(not_equal, &not_found);
- __ movq(rax, FieldOperand(cache,
- index.reg,
- index.scale,
- FixedArray::kHeaderSize + kPointerSize));
- __ jmp(&done);
-
- __ bind(&not_found);
- // Call runtime to perform the lookup.
- __ push(cache);
- __ push(key);
- __ CallRuntime(Runtime::kGetFromCache, 2);
-
- __ bind(&done);
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitIsRegExpEquivalent(ZoneList<Expression*>* args) {
- ASSERT_EQ(2, args->length());
-
- Register right = rax;
- Register left = rbx;
- Register tmp = rcx;
-
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
- __ pop(left);
-
- NearLabel done, fail, ok;
- __ cmpq(left, right);
- __ j(equal, &ok);
- // Fail if either is a non-HeapObject.
- Condition either_smi = masm()->CheckEitherSmi(left, right, tmp);
- __ j(either_smi, &fail);
- __ j(zero, &fail);
- __ movq(tmp, FieldOperand(left, HeapObject::kMapOffset));
- __ cmpb(FieldOperand(tmp, Map::kInstanceTypeOffset),
- Immediate(JS_REGEXP_TYPE));
- __ j(not_equal, &fail);
- __ cmpq(tmp, FieldOperand(right, HeapObject::kMapOffset));
- __ j(not_equal, &fail);
- __ movq(tmp, FieldOperand(left, JSRegExp::kDataOffset));
- __ cmpq(tmp, FieldOperand(right, JSRegExp::kDataOffset));
- __ j(equal, &ok);
- __ bind(&fail);
- __ Move(rax, isolate()->factory()->false_value());
- __ jmp(&done);
- __ bind(&ok);
- __ Move(rax, isolate()->factory()->true_value());
- __ bind(&done);
-
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitHasCachedArrayIndex(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ testl(FieldOperand(rax, String::kHashFieldOffset),
- Immediate(String::kContainsCachedArrayIndexMask));
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
- __ j(zero, if_true);
- __ jmp(if_false);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitGetCachedArrayIndex(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- VisitForAccumulatorValue(args->at(0));
-
- if (FLAG_debug_code) {
- __ AbortIfNotString(rax);
- }
-
- __ movl(rax, FieldOperand(rax, String::kHashFieldOffset));
- ASSERT(String::kHashShift >= kSmiTagSize);
- __ IndexFromHash(rax, rax);
-
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
- Label bailout, return_result, done, one_char_separator, long_separator,
- non_trivial_array, not_size_one_array, loop,
- loop_1, loop_1_condition, loop_2, loop_2_entry, loop_3, loop_3_entry;
- ASSERT(args->length() == 2);
- // We will leave the separator on the stack until the end of the function.
- VisitForStackValue(args->at(1));
- // Load this to rax (= array)
- VisitForAccumulatorValue(args->at(0));
- // All aliases of the same register have disjoint lifetimes.
- Register array = rax;
- Register elements = no_reg; // Will be rax.
-
- Register index = rdx;
-
- Register string_length = rcx;
-
- Register string = rsi;
-
- Register scratch = rbx;
-
- Register array_length = rdi;
- Register result_pos = no_reg; // Will be rdi.
-
- Operand separator_operand = Operand(rsp, 2 * kPointerSize);
- Operand result_operand = Operand(rsp, 1 * kPointerSize);
- Operand array_length_operand = Operand(rsp, 0 * kPointerSize);
- // Separator operand is already pushed. Make room for the two
- // other stack fields, and clear the direction flag in anticipation
- // of calling CopyBytes.
- __ subq(rsp, Immediate(2 * kPointerSize));
- __ cld();
- // Check that the array is a JSArray
- __ JumpIfSmi(array, &bailout);
- __ CmpObjectType(array, JS_ARRAY_TYPE, scratch);
- __ j(not_equal, &bailout);
-
- // Check that the array has fast elements.
- __ testb(FieldOperand(scratch, Map::kBitField2Offset),
- Immediate(1 << Map::kHasFastElements));
- __ j(zero, &bailout);
-
- // Array has fast elements, so its length must be a smi.
- // If the array has length zero, return the empty string.
- __ movq(array_length, FieldOperand(array, JSArray::kLengthOffset));
- __ SmiCompare(array_length, Smi::FromInt(0));
- __ j(not_zero, &non_trivial_array);
- __ LoadRoot(rax, Heap::kEmptyStringRootIndex);
- __ jmp(&return_result);
-
- // Save the array length on the stack.
- __ bind(&non_trivial_array);
- __ SmiToInteger32(array_length, array_length);
- __ movl(array_length_operand, array_length);
-
- // Save the FixedArray containing array's elements.
- // End of array's live range.
- elements = array;
- __ movq(elements, FieldOperand(array, JSArray::kElementsOffset));
- array = no_reg;
-
-
- // Check that all array elements are sequential ASCII strings, and
- // accumulate the sum of their lengths, as a smi-encoded value.
- __ Set(index, 0);
- __ Set(string_length, 0);
- // Loop condition: while (index < array_length).
- // Live loop registers: index(int32), array_length(int32), string(String*),
- // scratch, string_length(int32), elements(FixedArray*).
- if (FLAG_debug_code) {
- __ cmpq(index, array_length);
- __ Assert(below, "No empty arrays here in EmitFastAsciiArrayJoin");
- }
- __ bind(&loop);
- __ movq(string, FieldOperand(elements,
- index,
- times_pointer_size,
- FixedArray::kHeaderSize));
- __ JumpIfSmi(string, &bailout);
- __ movq(scratch, FieldOperand(string, HeapObject::kMapOffset));
- __ movzxbl(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
- __ andb(scratch, Immediate(
- kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
- __ cmpb(scratch, Immediate(kStringTag | kAsciiStringTag | kSeqStringTag));
- __ j(not_equal, &bailout);
- __ AddSmiField(string_length,
- FieldOperand(string, SeqAsciiString::kLengthOffset));
- __ j(overflow, &bailout);
- __ incl(index);
- __ cmpl(index, array_length);
- __ j(less, &loop);
-
- // Live registers:
- // string_length: Sum of string lengths.
- // elements: FixedArray of strings.
- // index: Array length.
- // array_length: Array length.
-
- // If array_length is 1, return elements[0], a string.
- __ cmpl(array_length, Immediate(1));
- __ j(not_equal, &not_size_one_array);
- __ movq(rax, FieldOperand(elements, FixedArray::kHeaderSize));
- __ jmp(&return_result);
-
- __ bind(&not_size_one_array);
-
- // End of array_length live range.
- result_pos = array_length;
- array_length = no_reg;
-
- // Live registers:
- // string_length: Sum of string lengths.
- // elements: FixedArray of strings.
- // index: Array length.
-
- // Check that the separator is a sequential ASCII string.
- __ movq(string, separator_operand);
- __ JumpIfSmi(string, &bailout);
- __ movq(scratch, FieldOperand(string, HeapObject::kMapOffset));
- __ movzxbl(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
- __ andb(scratch, Immediate(
- kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
- __ cmpb(scratch, Immediate(kStringTag | kAsciiStringTag | kSeqStringTag));
- __ j(not_equal, &bailout);
-
- // Live registers:
- // string_length: Sum of string lengths.
- // elements: FixedArray of strings.
- // index: Array length.
- // string: Separator string.
-
- // Add (separator length times (array_length - 1)) to string_length.
- __ SmiToInteger32(scratch,
- FieldOperand(string, SeqAsciiString::kLengthOffset));
- __ decl(index);
- __ imull(scratch, index);
- __ j(overflow, &bailout);
- __ addl(string_length, scratch);
- __ j(overflow, &bailout);
-
- // Live registers and stack values:
- // string_length: Total length of result string.
- // elements: FixedArray of strings.
- __ AllocateAsciiString(result_pos, string_length, scratch,
- index, string, &bailout);
- __ movq(result_operand, result_pos);
- __ lea(result_pos, FieldOperand(result_pos, SeqAsciiString::kHeaderSize));
-
- __ movq(string, separator_operand);
- __ SmiCompare(FieldOperand(string, SeqAsciiString::kLengthOffset),
- Smi::FromInt(1));
- __ j(equal, &one_char_separator);
- __ j(greater, &long_separator);
-
-
- // Empty separator case:
- __ Set(index, 0);
- __ movl(scratch, array_length_operand);
- __ jmp(&loop_1_condition);
- // Loop condition: while (index < array_length).
- __ bind(&loop_1);
- // Each iteration of the loop concatenates one string to the result.
- // Live values in registers:
- // index: which element of the elements array we are adding to the result.
- // result_pos: the position to which we are currently copying characters.
- // elements: the FixedArray of strings we are joining.
- // scratch: array length.
-
- // Get string = array[index].
- __ movq(string, FieldOperand(elements, index,
- times_pointer_size,
- FixedArray::kHeaderSize));
- __ SmiToInteger32(string_length,
- FieldOperand(string, String::kLengthOffset));
- __ lea(string,
- FieldOperand(string, SeqAsciiString::kHeaderSize));
- __ CopyBytes(result_pos, string, string_length);
- __ incl(index);
- __ bind(&loop_1_condition);
- __ cmpl(index, scratch);
- __ j(less, &loop_1); // Loop while (index < array_length).
- __ jmp(&done);
-
- // Generic bailout code used from several places.
- __ bind(&bailout);
- __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
- __ jmp(&return_result);
-
-
- // One-character separator case
- __ bind(&one_char_separator);
- // Get the separator ascii character value.
- // Register "string" holds the separator.
- __ movzxbl(scratch, FieldOperand(string, SeqAsciiString::kHeaderSize));
- __ Set(index, 0);
- // Jump into the loop after the code that copies the separator, so the first
- // element is not preceded by a separator
- __ jmp(&loop_2_entry);
- // Loop condition: while (index < length).
- __ bind(&loop_2);
- // Each iteration of the loop concatenates one string to the result.
- // Live values in registers:
- // elements: The FixedArray of strings we are joining.
- // index: which element of the elements array we are adding to the result.
- // result_pos: the position to which we are currently copying characters.
- // scratch: Separator character.
-
- // Copy the separator character to the result.
- __ movb(Operand(result_pos, 0), scratch);
- __ incq(result_pos);
-
- __ bind(&loop_2_entry);
- // Get string = array[index].
- __ movq(string, FieldOperand(elements, index,
- times_pointer_size,
- FixedArray::kHeaderSize));
- __ SmiToInteger32(string_length,
- FieldOperand(string, String::kLengthOffset));
- __ lea(string,
- FieldOperand(string, SeqAsciiString::kHeaderSize));
- __ CopyBytes(result_pos, string, string_length);
- __ incl(index);
- __ cmpl(index, array_length_operand);
- __ j(less, &loop_2); // End while (index < length).
- __ jmp(&done);
-
-
- // Long separator case (separator is more than one character).
- __ bind(&long_separator);
-
- // Make elements point to end of elements array, and index
- // count from -array_length to zero, so we don't need to maintain
- // a loop limit.
- __ movl(index, array_length_operand);
- __ lea(elements, FieldOperand(elements, index, times_pointer_size,
- FixedArray::kHeaderSize));
- __ neg(index);
-
- // Replace separator string with pointer to its first character, and
- // make scratch be its length.
- __ movq(string, separator_operand);
- __ SmiToInteger32(scratch,
- FieldOperand(string, String::kLengthOffset));
- __ lea(string,
- FieldOperand(string, SeqAsciiString::kHeaderSize));
- __ movq(separator_operand, string);
-
- // Jump into the loop after the code that copies the separator, so the first
- // element is not preceded by a separator
- __ jmp(&loop_3_entry);
- // Loop condition: while (index < length).
- __ bind(&loop_3);
- // Each iteration of the loop concatenates one string to the result.
- // Live values in registers:
- // index: which element of the elements array we are adding to the result.
- // result_pos: the position to which we are currently copying characters.
- // scratch: Separator length.
- // separator_operand (rsp[0x10]): Address of first char of separator.
-
- // Copy the separator to the result.
- __ movq(string, separator_operand);
- __ movl(string_length, scratch);
- __ CopyBytes(result_pos, string, string_length, 2);
-
- __ bind(&loop_3_entry);
- // Get string = array[index].
- __ movq(string, Operand(elements, index, times_pointer_size, 0));
- __ SmiToInteger32(string_length,
- FieldOperand(string, String::kLengthOffset));
- __ lea(string,
- FieldOperand(string, SeqAsciiString::kHeaderSize));
- __ CopyBytes(result_pos, string, string_length);
- __ incq(index);
- __ j(not_equal, &loop_3); // Loop while (index < 0).
-
- __ bind(&done);
- __ movq(rax, result_operand);
-
- __ bind(&return_result);
- // Drop temp values from the stack, and restore context register.
- __ addq(rsp, Immediate(3 * kPointerSize));
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
- Handle<String> name = expr->name();
- if (name->length() > 0 && name->Get(0) == '_') {
- Comment cmnt(masm_, "[ InlineRuntimeCall");
- EmitInlineRuntimeCall(expr);
- return;
- }
-
- Comment cmnt(masm_, "[ CallRuntime");
- ZoneList<Expression*>* args = expr->arguments();
-
- if (expr->is_jsruntime()) {
- // Prepare for calling JS runtime function.
- __ movq(rax, GlobalObjectOperand());
- __ push(FieldOperand(rax, GlobalObject::kBuiltinsOffset));
- }
-
- // Push the arguments ("left-to-right").
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
-
- if (expr->is_jsruntime()) {
- // Call the JS runtime function using a call IC.
- __ Move(rcx, expr->name());
- InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
- Handle<Code> ic =
- ISOLATE->stub_cache()->ComputeCallInitialize(arg_count, in_loop);
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
- // Restore context register.
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- } else {
- __ CallRuntime(expr->function(), arg_count);
- }
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
- switch (expr->op()) {
- case Token::DELETE: {
- Comment cmnt(masm_, "[ UnaryOperation (DELETE)");
- Property* prop = expr->expression()->AsProperty();
- Variable* var = expr->expression()->AsVariableProxy()->AsVariable();
-
- if (prop != NULL) {
- if (prop->is_synthetic()) {
- // Result of deleting parameters is false, even when they rewrite
- // to accesses on the arguments object.
- context()->Plug(false);
- } else {
- VisitForStackValue(prop->obj());
- VisitForStackValue(prop->key());
- __ Push(Smi::FromInt(strict_mode_flag()));
- __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
- context()->Plug(rax);
- }
- } else if (var != NULL) {
- // Delete of an unqualified identifier is disallowed in strict mode
- // but "delete this" is.
- ASSERT(strict_mode_flag() == kNonStrictMode || var->is_this());
- if (var->is_global()) {
- __ push(GlobalObjectOperand());
- __ Push(var->name());
- __ Push(Smi::FromInt(kNonStrictMode));
- __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
- context()->Plug(rax);
- } else if (var->AsSlot() != NULL &&
- var->AsSlot()->type() != Slot::LOOKUP) {
- // Result of deleting non-global, non-dynamic variables is false.
- // The subexpression does not have side effects.
- context()->Plug(false);
- } else {
- // Non-global variable. Call the runtime to try to delete from the
- // context where the variable was introduced.
- __ push(context_register());
- __ Push(var->name());
- __ CallRuntime(Runtime::kDeleteContextSlot, 2);
- context()->Plug(rax);
- }
- } else {
- // Result of deleting non-property, non-variable reference is true.
- // The subexpression may have side effects.
- VisitForEffect(expr->expression());
- context()->Plug(true);
- }
- break;
- }
-
- case Token::VOID: {
- Comment cmnt(masm_, "[ UnaryOperation (VOID)");
- VisitForEffect(expr->expression());
- context()->Plug(Heap::kUndefinedValueRootIndex);
- break;
- }
-
- case Token::NOT: {
- Comment cmnt(masm_, "[ UnaryOperation (NOT)");
- if (context()->IsEffect()) {
- // Unary NOT has no side effects so it's only necessary to visit the
- // subexpression. Match the optimizing compiler by not branching.
- VisitForEffect(expr->expression());
- } else {
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- // Notice that the labels are swapped.
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_false, &if_true, &fall_through);
- if (context()->IsTest()) ForwardBailoutToChild(expr);
- VisitForControl(expr->expression(), if_true, if_false, fall_through);
- context()->Plug(if_false, if_true); // Labels swapped.
- }
- break;
- }
-
- case Token::TYPEOF: {
- Comment cmnt(masm_, "[ UnaryOperation (TYPEOF)");
- { StackValueContext context(this);
- VisitForTypeofValue(expr->expression());
- }
- __ CallRuntime(Runtime::kTypeof, 1);
- context()->Plug(rax);
- break;
- }
-
- case Token::ADD: {
- Comment cmt(masm_, "[ UnaryOperation (ADD)");
- VisitForAccumulatorValue(expr->expression());
- Label no_conversion;
- Condition is_smi = masm_->CheckSmi(result_register());
- __ j(is_smi, &no_conversion);
- ToNumberStub convert_stub;
- __ CallStub(&convert_stub);
- __ bind(&no_conversion);
- context()->Plug(result_register());
- break;
- }
-
- case Token::SUB: {
- Comment cmt(masm_, "[ UnaryOperation (SUB)");
- bool can_overwrite = expr->expression()->ResultOverwriteAllowed();
- UnaryOverwriteMode overwrite =
- can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
- GenericUnaryOpStub stub(Token::SUB, overwrite, NO_UNARY_FLAGS);
- // GenericUnaryOpStub expects the argument to be in the
- // accumulator register rax.
- VisitForAccumulatorValue(expr->expression());
- __ CallStub(&stub);
- context()->Plug(rax);
- break;
- }
-
- case Token::BIT_NOT: {
- Comment cmt(masm_, "[ UnaryOperation (BIT_NOT)");
- // The generic unary operation stub expects the argument to be
- // in the accumulator register rax.
- VisitForAccumulatorValue(expr->expression());
- Label done;
- bool inline_smi_case = ShouldInlineSmiCase(expr->op());
- if (inline_smi_case) {
- Label call_stub;
- __ JumpIfNotSmi(rax, &call_stub);
- __ SmiNot(rax, rax);
- __ jmp(&done);
- __ bind(&call_stub);
- }
- bool overwrite = expr->expression()->ResultOverwriteAllowed();
- UnaryOverwriteMode mode =
- overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
- UnaryOpFlags flags = inline_smi_case
- ? NO_UNARY_SMI_CODE_IN_STUB
- : NO_UNARY_FLAGS;
- GenericUnaryOpStub stub(Token::BIT_NOT, mode, flags);
- __ CallStub(&stub);
- __ bind(&done);
- context()->Plug(rax);
- break;
- }
-
- default:
- UNREACHABLE();
- }
-}
-
-
-void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
- Comment cmnt(masm_, "[ CountOperation");
- SetSourcePosition(expr->position());
-
- // Invalid left-hand-sides are rewritten to have a 'throw
- // ReferenceError' as the left-hand side.
- if (!expr->expression()->IsValidLeftHandSide()) {
- VisitForEffect(expr->expression());
- return;
- }
-
- // Expression can only be a property, a global or a (parameter or local)
- // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
- enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
- LhsKind assign_type = VARIABLE;
- Property* prop = expr->expression()->AsProperty();
- // In case of a property we use the uninitialized expression context
- // of the key to detect a named property.
- if (prop != NULL) {
- assign_type =
- (prop->key()->IsPropertyName()) ? NAMED_PROPERTY : KEYED_PROPERTY;
- }
-
- // Evaluate expression and get value.
- if (assign_type == VARIABLE) {
- ASSERT(expr->expression()->AsVariableProxy()->var() != NULL);
- AccumulatorValueContext context(this);
- EmitVariableLoad(expr->expression()->AsVariableProxy()->var());
- } else {
- // Reserve space for result of postfix operation.
- if (expr->is_postfix() && !context()->IsEffect()) {
- __ Push(Smi::FromInt(0));
- }
- if (assign_type == NAMED_PROPERTY) {
- VisitForAccumulatorValue(prop->obj());
- __ push(rax); // Copy of receiver, needed for later store.
- EmitNamedPropertyLoad(prop);
- } else {
- if (prop->is_arguments_access()) {
- VariableProxy* obj_proxy = prop->obj()->AsVariableProxy();
- MemOperand slot_operand =
- EmitSlotSearch(obj_proxy->var()->AsSlot(), rcx);
- __ push(slot_operand);
- __ Move(rax, prop->key()->AsLiteral()->handle());
- } else {
- VisitForStackValue(prop->obj());
- VisitForAccumulatorValue(prop->key());
- }
- __ movq(rdx, Operand(rsp, 0)); // Leave receiver on stack
- __ push(rax); // Copy of key, needed for later store.
- EmitKeyedPropertyLoad(prop);
- }
- }
-
- // We need a second deoptimization point after loading the value
- // in case evaluating the property load my have a side effect.
- if (assign_type == VARIABLE) {
- PrepareForBailout(expr->expression(), TOS_REG);
- } else {
- PrepareForBailout(expr->increment(), TOS_REG);
- }
-
- // Call ToNumber only if operand is not a smi.
- NearLabel no_conversion;
- Condition is_smi;
- is_smi = masm_->CheckSmi(rax);
- __ j(is_smi, &no_conversion);
- ToNumberStub convert_stub;
- __ CallStub(&convert_stub);
- __ bind(&no_conversion);
-
- // Save result for postfix expressions.
- if (expr->is_postfix()) {
- if (!context()->IsEffect()) {
- // Save the result on the stack. If we have a named or keyed property
- // we store the result under the receiver that is currently on top
- // of the stack.
- switch (assign_type) {
- case VARIABLE:
- __ push(rax);
- break;
- case NAMED_PROPERTY:
- __ movq(Operand(rsp, kPointerSize), rax);
- break;
- case KEYED_PROPERTY:
- __ movq(Operand(rsp, 2 * kPointerSize), rax);
- break;
- }
- }
- }
-
- // Inline smi case if we are in a loop.
- NearLabel stub_call, done;
- JumpPatchSite patch_site(masm_);
-
- if (ShouldInlineSmiCase(expr->op())) {
- if (expr->op() == Token::INC) {
- __ SmiAddConstant(rax, rax, Smi::FromInt(1));
- } else {
- __ SmiSubConstant(rax, rax, Smi::FromInt(1));
- }
- __ j(overflow, &stub_call);
- // We could eliminate this smi check if we split the code at
- // the first smi check before calling ToNumber.
- patch_site.EmitJumpIfSmi(rax, &done);
-
- __ bind(&stub_call);
- // Call stub. Undo operation first.
- if (expr->op() == Token::INC) {
- __ SmiSubConstant(rax, rax, Smi::FromInt(1));
- } else {
- __ SmiAddConstant(rax, rax, Smi::FromInt(1));
- }
- }
-
- // Record position before stub call.
- SetSourcePosition(expr->position());
-
- // Call stub for +1/-1.
- TypeRecordingBinaryOpStub stub(expr->binary_op(), NO_OVERWRITE);
- if (expr->op() == Token::INC) {
- __ Move(rdx, Smi::FromInt(1));
- } else {
- __ movq(rdx, rax);
- __ Move(rax, Smi::FromInt(1));
- }
- EmitCallIC(stub.GetCode(), &patch_site);
- __ bind(&done);
-
- // Store the value returned in rax.
- switch (assign_type) {
- case VARIABLE:
- if (expr->is_postfix()) {
- // Perform the assignment as if via '='.
- { EffectContext context(this);
- EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
- Token::ASSIGN);
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- context.Plug(rax);
- }
- // For all contexts except kEffect: We have the result on
- // top of the stack.
- if (!context()->IsEffect()) {
- context()->PlugTOS();
- }
- } else {
- // Perform the assignment as if via '='.
- EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
- Token::ASSIGN);
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- context()->Plug(rax);
- }
- break;
- case NAMED_PROPERTY: {
- __ Move(rcx, prop->key()->AsLiteral()->handle());
- __ pop(rdx);
- Handle<Code> ic = is_strict_mode()
- ? isolate()->builtins()->StoreIC_Initialize_Strict()
- : isolate()->builtins()->StoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- if (expr->is_postfix()) {
- if (!context()->IsEffect()) {
- context()->PlugTOS();
- }
- } else {
- context()->Plug(rax);
- }
- break;
- }
- case KEYED_PROPERTY: {
- __ pop(rcx);
- __ pop(rdx);
- Handle<Code> ic = is_strict_mode()
- ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
- : isolate()->builtins()->KeyedStoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- if (expr->is_postfix()) {
- if (!context()->IsEffect()) {
- context()->PlugTOS();
- }
- } else {
- context()->Plug(rax);
- }
- break;
- }
- }
-}
-
-
-void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
- VariableProxy* proxy = expr->AsVariableProxy();
- ASSERT(!context()->IsEffect());
- ASSERT(!context()->IsTest());
-
- if (proxy != NULL && !proxy->var()->is_this() && proxy->var()->is_global()) {
- Comment cmnt(masm_, "Global variable");
- __ Move(rcx, proxy->name());
- __ movq(rax, GlobalObjectOperand());
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- // Use a regular load, not a contextual load, to avoid a reference
- // error.
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
- PrepareForBailout(expr, TOS_REG);
- context()->Plug(rax);
- } else if (proxy != NULL &&
- proxy->var()->AsSlot() != NULL &&
- proxy->var()->AsSlot()->type() == Slot::LOOKUP) {
- Label done, slow;
-
- // Generate code for loading from variables potentially shadowed
- // by eval-introduced variables.
- Slot* slot = proxy->var()->AsSlot();
- EmitDynamicLoadFromSlotFastCase(slot, INSIDE_TYPEOF, &slow, &done);
-
- __ bind(&slow);
- __ push(rsi);
- __ Push(proxy->name());
- __ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
- PrepareForBailout(expr, TOS_REG);
- __ bind(&done);
-
- context()->Plug(rax);
- } else {
- // This expression cannot throw a reference error at the top level.
- context()->HandleExpression(expr);
- }
-}
-
-
-bool FullCodeGenerator::TryLiteralCompare(Token::Value op,
- Expression* left,
- Expression* right,
- Label* if_true,
- Label* if_false,
- Label* fall_through) {
- if (op != Token::EQ && op != Token::EQ_STRICT) return false;
-
- // Check for the pattern: typeof <expression> == <string literal>.
- Literal* right_literal = right->AsLiteral();
- if (right_literal == NULL) return false;
- Handle<Object> right_literal_value = right_literal->handle();
- if (!right_literal_value->IsString()) return false;
- UnaryOperation* left_unary = left->AsUnaryOperation();
- if (left_unary == NULL || left_unary->op() != Token::TYPEOF) return false;
- Handle<String> check = Handle<String>::cast(right_literal_value);
-
- { AccumulatorValueContext context(this);
- VisitForTypeofValue(left_unary->expression());
- }
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
-
- if (check->Equals(isolate()->heap()->number_symbol())) {
- __ JumpIfSmi(rax, if_true);
- __ movq(rax, FieldOperand(rax, HeapObject::kMapOffset));
- __ CompareRoot(rax, Heap::kHeapNumberMapRootIndex);
- Split(equal, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->string_symbol())) {
- __ JumpIfSmi(rax, if_false);
- // Check for undetectable objects => false.
- __ CmpObjectType(rax, FIRST_NONSTRING_TYPE, rdx);
- __ j(above_equal, if_false);
- __ testb(FieldOperand(rdx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- Split(zero, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->boolean_symbol())) {
- __ CompareRoot(rax, Heap::kTrueValueRootIndex);
- __ j(equal, if_true);
- __ CompareRoot(rax, Heap::kFalseValueRootIndex);
- Split(equal, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->undefined_symbol())) {
- __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
- __ j(equal, if_true);
- __ JumpIfSmi(rax, if_false);
- // Check for undetectable objects => true.
- __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
- __ testb(FieldOperand(rdx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- Split(not_zero, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->function_symbol())) {
- __ JumpIfSmi(rax, if_false);
- __ CmpObjectType(rax, FIRST_FUNCTION_CLASS_TYPE, rdx);
- Split(above_equal, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->object_symbol())) {
- __ JumpIfSmi(rax, if_false);
- __ CompareRoot(rax, Heap::kNullValueRootIndex);
- __ j(equal, if_true);
- __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rdx);
- __ j(below, if_false);
- __ CmpInstanceType(rdx, FIRST_FUNCTION_CLASS_TYPE);
- __ j(above_equal, if_false);
- // Check for undetectable objects => false.
- __ testb(FieldOperand(rdx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- Split(zero, if_true, if_false, fall_through);
- } else {
- if (if_false != fall_through) __ jmp(if_false);
- }
-
- return true;
-}
-
-
-void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
- Comment cmnt(masm_, "[ CompareOperation");
- SetSourcePosition(expr->position());
-
- // Always perform the comparison for its control flow. Pack the result
- // into the expression's context after the comparison is performed.
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- // First we try a fast inlined version of the compare when one of
- // the operands is a literal.
- Token::Value op = expr->op();
- Expression* left = expr->left();
- Expression* right = expr->right();
- if (TryLiteralCompare(op, left, right, if_true, if_false, fall_through)) {
- context()->Plug(if_true, if_false);
- return;
- }
-
- VisitForStackValue(expr->left());
- switch (op) {
- case Token::IN:
- VisitForStackValue(expr->right());
- __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
- PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
- __ CompareRoot(rax, Heap::kTrueValueRootIndex);
- Split(equal, if_true, if_false, fall_through);
- break;
-
- case Token::INSTANCEOF: {
- VisitForStackValue(expr->right());
- InstanceofStub stub(InstanceofStub::kNoFlags);
- __ CallStub(&stub);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
- __ testq(rax, rax);
- // The stub returns 0 for true.
- Split(zero, if_true, if_false, fall_through);
- break;
- }
-
- default: {
- VisitForAccumulatorValue(expr->right());
- Condition cc = no_condition;
- bool strict = false;
- switch (op) {
- case Token::EQ_STRICT:
- strict = true;
- // Fall through.
- case Token::EQ:
- cc = equal;
- __ pop(rdx);
- break;
- case Token::LT:
- cc = less;
- __ pop(rdx);
- break;
- case Token::GT:
- // Reverse left and right sizes to obtain ECMA-262 conversion order.
- cc = less;
- __ movq(rdx, result_register());
- __ pop(rax);
- break;
- case Token::LTE:
- // Reverse left and right sizes to obtain ECMA-262 conversion order.
- cc = greater_equal;
- __ movq(rdx, result_register());
- __ pop(rax);
- break;
- case Token::GTE:
- cc = greater_equal;
- __ pop(rdx);
- break;
- case Token::IN:
- case Token::INSTANCEOF:
- default:
- UNREACHABLE();
- }
-
- bool inline_smi_code = ShouldInlineSmiCase(op);
- JumpPatchSite patch_site(masm_);
- if (inline_smi_code) {
- NearLabel slow_case;
- __ movq(rcx, rdx);
- __ or_(rcx, rax);
- patch_site.EmitJumpIfNotSmi(rcx, &slow_case);
- __ cmpq(rdx, rax);
- Split(cc, if_true, if_false, NULL);
- __ bind(&slow_case);
- }
-
- // Record position and call the compare IC.
- SetSourcePosition(expr->position());
- Handle<Code> ic = CompareIC::GetUninitialized(op);
- EmitCallIC(ic, &patch_site);
-
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
- __ testq(rax, rax);
- Split(cc, if_true, if_false, fall_through);
- }
- }
-
- // Convert the result of the comparison into one expected for this
- // expression's context.
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::VisitCompareToNull(CompareToNull* expr) {
- Comment cmnt(masm_, "[ CompareToNull");
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- VisitForAccumulatorValue(expr->expression());
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
- __ CompareRoot(rax, Heap::kNullValueRootIndex);
- if (expr->is_strict()) {
- Split(equal, if_true, if_false, fall_through);
- } else {
- __ j(equal, if_true);
- __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
- __ j(equal, if_true);
- Condition is_smi = masm_->CheckSmi(rax);
- __ j(is_smi, if_false);
- // It can be an undetectable object.
- __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
- __ testb(FieldOperand(rdx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- Split(not_zero, if_true, if_false, fall_through);
- }
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
- __ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- context()->Plug(rax);
-}
-
-
-Register FullCodeGenerator::result_register() {
- return rax;
-}
-
-
-Register FullCodeGenerator::context_register() {
- return rsi;
-}
-
-
-void FullCodeGenerator::EmitCallIC(Handle<Code> ic, RelocInfo::Mode mode) {
- ASSERT(mode == RelocInfo::CODE_TARGET ||
- mode == RelocInfo::CODE_TARGET_CONTEXT);
- Counters* counters = isolate()->counters();
- switch (ic->kind()) {
- case Code::LOAD_IC:
- __ IncrementCounter(counters->named_load_full(), 1);
- break;
- case Code::KEYED_LOAD_IC:
- __ IncrementCounter(counters->keyed_load_full(), 1);
- break;
- case Code::STORE_IC:
- __ IncrementCounter(counters->named_store_full(), 1);
- break;
- case Code::KEYED_STORE_IC:
- __ IncrementCounter(counters->keyed_store_full(), 1);
- default:
- break;
- }
-
- __ call(ic, mode);
-
- // Crankshaft doesn't need patching of inlined loads and stores.
- // When compiling the snapshot we need to produce code that works
- // with and without Crankshaft.
- if (V8::UseCrankshaft() && !Serializer::enabled()) {
- return;
- }
-
- // If we're calling a (keyed) load or store stub, we have to mark
- // the call as containing no inlined code so we will not attempt to
- // patch it.
- switch (ic->kind()) {
- case Code::LOAD_IC:
- case Code::KEYED_LOAD_IC:
- case Code::STORE_IC:
- case Code::KEYED_STORE_IC:
- __ nop(); // Signals no inlined code.
- break;
- default:
- // Do nothing.
- break;
- }
-}
-
-
-void FullCodeGenerator::EmitCallIC(Handle<Code> ic, JumpPatchSite* patch_site) {
- Counters* counters = isolate()->counters();
- switch (ic->kind()) {
- case Code::LOAD_IC:
- __ IncrementCounter(counters->named_load_full(), 1);
- break;
- case Code::KEYED_LOAD_IC:
- __ IncrementCounter(counters->keyed_load_full(), 1);
- break;
- case Code::STORE_IC:
- __ IncrementCounter(counters->named_store_full(), 1);
- break;
- case Code::KEYED_STORE_IC:
- __ IncrementCounter(counters->keyed_store_full(), 1);
- default:
- break;
- }
-
- __ call(ic, RelocInfo::CODE_TARGET);
- if (patch_site != NULL && patch_site->is_bound()) {
- patch_site->EmitPatchInfo();
- } else {
- __ nop(); // Signals no inlined code.
- }
-}
-
-
-void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
- ASSERT(IsAligned(frame_offset, kPointerSize));
- __ movq(Operand(rbp, frame_offset), value);
-}
-
-
-void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
- __ movq(dst, ContextOperand(rsi, context_index));
-}
-
-
-// ----------------------------------------------------------------------------
-// Non-local control flow support.
-
-
-void FullCodeGenerator::EnterFinallyBlock() {
- ASSERT(!result_register().is(rdx));
- ASSERT(!result_register().is(rcx));
- // Cook return address on top of stack (smi encoded Code* delta)
- __ movq(rdx, Operand(rsp, 0));
- __ Move(rcx, masm_->CodeObject());
- __ subq(rdx, rcx);
- __ Integer32ToSmi(rdx, rdx);
- __ movq(Operand(rsp, 0), rdx);
- // Store result register while executing finally block.
- __ push(result_register());
-}
-
-
-void FullCodeGenerator::ExitFinallyBlock() {
- ASSERT(!result_register().is(rdx));
- ASSERT(!result_register().is(rcx));
- // Restore result register from stack.
- __ pop(result_register());
- // Uncook return address.
- __ movq(rdx, Operand(rsp, 0));
- __ SmiToInteger32(rdx, rdx);
- __ Move(rcx, masm_->CodeObject());
- __ addq(rdx, rcx);
- __ movq(Operand(rsp, 0), rdx);
- // And return.
- __ ret(0);
-}
-
-
-#undef __
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_X64
diff --git a/src/3rdparty/v8/src/x64/ic-x64.cc b/src/3rdparty/v8/src/x64/ic-x64.cc
deleted file mode 100644
index 9180465..0000000
--- a/src/3rdparty/v8/src/x64/ic-x64.cc
+++ /dev/null
@@ -1,1752 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_X64)
-
-#include "codegen-inl.h"
-#include "ic-inl.h"
-#include "runtime.h"
-#include "stub-cache.h"
-
-namespace v8 {
-namespace internal {
-
-// ----------------------------------------------------------------------------
-// Static IC stub generators.
-//
-
-#define __ ACCESS_MASM(masm)
-
-
-static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm,
- Register type,
- Label* global_object) {
- // Register usage:
- // type: holds the receiver instance type on entry.
- __ cmpb(type, Immediate(JS_GLOBAL_OBJECT_TYPE));
- __ j(equal, global_object);
- __ cmpb(type, Immediate(JS_BUILTINS_OBJECT_TYPE));
- __ j(equal, global_object);
- __ cmpb(type, Immediate(JS_GLOBAL_PROXY_TYPE));
- __ j(equal, global_object);
-}
-
-
-// Generated code falls through if the receiver is a regular non-global
-// JS object with slow properties and no interceptors.
-static void GenerateStringDictionaryReceiverCheck(MacroAssembler* masm,
- Register receiver,
- Register r0,
- Register r1,
- Label* miss) {
- // Register usage:
- // receiver: holds the receiver on entry and is unchanged.
- // r0: used to hold receiver instance type.
- // Holds the property dictionary on fall through.
- // r1: used to hold receivers map.
-
- __ JumpIfSmi(receiver, miss);
-
- // Check that the receiver is a valid JS object.
- __ movq(r1, FieldOperand(receiver, HeapObject::kMapOffset));
- __ movb(r0, FieldOperand(r1, Map::kInstanceTypeOffset));
- __ cmpb(r0, Immediate(FIRST_JS_OBJECT_TYPE));
- __ j(below, miss);
-
- // If this assert fails, we have to check upper bound too.
- ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
-
- GenerateGlobalInstanceTypeCheck(masm, r0, miss);
-
- // Check for non-global object that requires access check.
- __ testb(FieldOperand(r1, Map::kBitFieldOffset),
- Immediate((1 << Map::kIsAccessCheckNeeded) |
- (1 << Map::kHasNamedInterceptor)));
- __ j(not_zero, miss);
-
- __ movq(r0, FieldOperand(receiver, JSObject::kPropertiesOffset));
- __ CompareRoot(FieldOperand(r0, HeapObject::kMapOffset),
- Heap::kHashTableMapRootIndex);
- __ j(not_equal, miss);
-}
-
-
-// Probe the string dictionary in the |elements| register. Jump to the
-// |done| label if a property with the given name is found leaving the
-// index into the dictionary in |r1|. Jump to the |miss| label
-// otherwise.
-static void GenerateStringDictionaryProbes(MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register elements,
- Register name,
- Register r0,
- Register r1) {
- // Assert that name contains a string.
- if (FLAG_debug_code) __ AbortIfNotString(name);
-
- // Compute the capacity mask.
- const int kCapacityOffset =
- StringDictionary::kHeaderSize +
- StringDictionary::kCapacityIndex * kPointerSize;
- __ SmiToInteger32(r0, FieldOperand(elements, kCapacityOffset));
- __ decl(r0);
-
- // Generate an unrolled loop that performs a few probes before
- // giving up. Measurements done on Gmail indicate that 2 probes
- // cover ~93% of loads from dictionaries.
- static const int kProbes = 4;
- const int kElementsStartOffset =
- StringDictionary::kHeaderSize +
- StringDictionary::kElementsStartIndex * kPointerSize;
- for (int i = 0; i < kProbes; i++) {
- // Compute the masked index: (hash + i + i * i) & mask.
- __ movl(r1, FieldOperand(name, String::kHashFieldOffset));
- __ shrl(r1, Immediate(String::kHashShift));
- if (i > 0) {
- __ addl(r1, Immediate(StringDictionary::GetProbeOffset(i)));
- }
- __ and_(r1, r0);
-
- // Scale the index by multiplying by the entry size.
- ASSERT(StringDictionary::kEntrySize == 3);
- __ lea(r1, Operand(r1, r1, times_2, 0)); // r1 = r1 * 3
-
- // Check if the key is identical to the name.
- __ cmpq(name, Operand(elements, r1, times_pointer_size,
- kElementsStartOffset - kHeapObjectTag));
- if (i != kProbes - 1) {
- __ j(equal, done);
- } else {
- __ j(not_equal, miss);
- }
- }
-}
-
-
-// Helper function used to load a property from a dictionary backing storage.
-// This function may return false negatives, so miss_label
-// must always call a backup property load that is complete.
-// This function is safe to call if name is not a symbol, and will jump to
-// the miss_label in that case.
-// The generated code assumes that the receiver has slow properties,
-// is not a global object and does not have interceptors.
-static void GenerateDictionaryLoad(MacroAssembler* masm,
- Label* miss_label,
- Register elements,
- Register name,
- Register r0,
- Register r1,
- Register result) {
- // Register use:
- //
- // elements - holds the property dictionary on entry and is unchanged.
- //
- // name - holds the name of the property on entry and is unchanged.
- //
- // r0 - used to hold the capacity of the property dictionary.
- //
- // r1 - used to hold the index into the property dictionary.
- //
- // result - holds the result on exit if the load succeeded.
-
- Label done;
-
- // Probe the dictionary.
- GenerateStringDictionaryProbes(masm,
- miss_label,
- &done,
- elements,
- name,
- r0,
- r1);
-
- // If probing finds an entry in the dictionary, r0 contains the
- // index into the dictionary. Check that the value is a normal
- // property.
- __ bind(&done);
- const int kElementsStartOffset =
- StringDictionary::kHeaderSize +
- StringDictionary::kElementsStartIndex * kPointerSize;
- const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
- __ Test(Operand(elements, r1, times_pointer_size,
- kDetailsOffset - kHeapObjectTag),
- Smi::FromInt(PropertyDetails::TypeField::mask()));
- __ j(not_zero, miss_label);
-
- // Get the value at the masked, scaled index.
- const int kValueOffset = kElementsStartOffset + kPointerSize;
- __ movq(result,
- Operand(elements, r1, times_pointer_size,
- kValueOffset - kHeapObjectTag));
-}
-
-
-// Helper function used to store a property to a dictionary backing
-// storage. This function may fail to store a property even though it
-// is in the dictionary, so code at miss_label must always call a
-// backup property store that is complete. This function is safe to
-// call if name is not a symbol, and will jump to the miss_label in
-// that case. The generated code assumes that the receiver has slow
-// properties, is not a global object and does not have interceptors.
-static void GenerateDictionaryStore(MacroAssembler* masm,
- Label* miss_label,
- Register elements,
- Register name,
- Register value,
- Register scratch0,
- Register scratch1) {
- // Register use:
- //
- // elements - holds the property dictionary on entry and is clobbered.
- //
- // name - holds the name of the property on entry and is unchanged.
- //
- // value - holds the value to store and is unchanged.
- //
- // scratch0 - used for index into the property dictionary and is clobbered.
- //
- // scratch1 - used to hold the capacity of the property dictionary and is
- // clobbered.
- Label done;
-
- // Probe the dictionary.
- GenerateStringDictionaryProbes(masm,
- miss_label,
- &done,
- elements,
- name,
- scratch0,
- scratch1);
-
- // If probing finds an entry in the dictionary, scratch0 contains the
- // index into the dictionary. Check that the value is a normal
- // property that is not read only.
- __ bind(&done);
- const int kElementsStartOffset =
- StringDictionary::kHeaderSize +
- StringDictionary::kElementsStartIndex * kPointerSize;
- const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
- const int kTypeAndReadOnlyMask
- = (PropertyDetails::TypeField::mask() |
- PropertyDetails::AttributesField::encode(READ_ONLY)) << kSmiTagSize;
- __ Test(Operand(elements,
- scratch1,
- times_pointer_size,
- kDetailsOffset - kHeapObjectTag),
- Smi::FromInt(kTypeAndReadOnlyMask));
- __ j(not_zero, miss_label);
-
- // Store the value at the masked, scaled index.
- const int kValueOffset = kElementsStartOffset + kPointerSize;
- __ lea(scratch1, Operand(elements,
- scratch1,
- times_pointer_size,
- kValueOffset - kHeapObjectTag));
- __ movq(Operand(scratch1, 0), value);
-
- // Update write barrier. Make sure not to clobber the value.
- __ movq(scratch0, value);
- __ RecordWrite(elements, scratch1, scratch0);
-}
-
-
-static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
- Label* miss,
- Register elements,
- Register key,
- Register r0,
- Register r1,
- Register r2,
- Register result) {
- // Register use:
- //
- // elements - holds the slow-case elements of the receiver on entry.
- // Unchanged unless 'result' is the same register.
- //
- // key - holds the smi key on entry.
- // Unchanged unless 'result' is the same register.
- //
- // Scratch registers:
- //
- // r0 - holds the untagged key on entry and holds the hash once computed.
- //
- // r1 - used to hold the capacity mask of the dictionary
- //
- // r2 - used for the index into the dictionary.
- //
- // result - holds the result on exit if the load succeeded.
- // Allowed to be the same as 'key' or 'result'.
- // Unchanged on bailout so 'key' or 'result' can be used
- // in further computation.
-
- Label done;
-
- // Compute the hash code from the untagged key. This must be kept in sync
- // with ComputeIntegerHash in utils.h.
- //
- // hash = ~hash + (hash << 15);
- __ movl(r1, r0);
- __ notl(r0);
- __ shll(r1, Immediate(15));
- __ addl(r0, r1);
- // hash = hash ^ (hash >> 12);
- __ movl(r1, r0);
- __ shrl(r1, Immediate(12));
- __ xorl(r0, r1);
- // hash = hash + (hash << 2);
- __ leal(r0, Operand(r0, r0, times_4, 0));
- // hash = hash ^ (hash >> 4);
- __ movl(r1, r0);
- __ shrl(r1, Immediate(4));
- __ xorl(r0, r1);
- // hash = hash * 2057;
- __ imull(r0, r0, Immediate(2057));
- // hash = hash ^ (hash >> 16);
- __ movl(r1, r0);
- __ shrl(r1, Immediate(16));
- __ xorl(r0, r1);
-
- // Compute capacity mask.
- __ SmiToInteger32(r1,
- FieldOperand(elements, NumberDictionary::kCapacityOffset));
- __ decl(r1);
-
- // Generate an unrolled loop that performs a few probes before giving up.
- const int kProbes = 4;
- for (int i = 0; i < kProbes; i++) {
- // Use r2 for index calculations and keep the hash intact in r0.
- __ movq(r2, r0);
- // Compute the masked index: (hash + i + i * i) & mask.
- if (i > 0) {
- __ addl(r2, Immediate(NumberDictionary::GetProbeOffset(i)));
- }
- __ and_(r2, r1);
-
- // Scale the index by multiplying by the entry size.
- ASSERT(NumberDictionary::kEntrySize == 3);
- __ lea(r2, Operand(r2, r2, times_2, 0)); // r2 = r2 * 3
-
- // Check if the key matches.
- __ cmpq(key, FieldOperand(elements,
- r2,
- times_pointer_size,
- NumberDictionary::kElementsStartOffset));
- if (i != (kProbes - 1)) {
- __ j(equal, &done);
- } else {
- __ j(not_equal, miss);
- }
- }
-
- __ bind(&done);
- // Check that the value is a normal propety.
- const int kDetailsOffset =
- NumberDictionary::kElementsStartOffset + 2 * kPointerSize;
- ASSERT_EQ(NORMAL, 0);
- __ Test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
- Smi::FromInt(PropertyDetails::TypeField::mask()));
- __ j(not_zero, miss);
-
- // Get the value at the masked, scaled index.
- const int kValueOffset =
- NumberDictionary::kElementsStartOffset + kPointerSize;
- __ movq(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
-}
-
-
-// The offset from the inlined patch site to the start of the inlined
-// load instruction.
-const int LoadIC::kOffsetToLoadInstruction = 20;
-
-
-void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : receiver
- // -- rcx : name
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss;
-
- StubCompiler::GenerateLoadArrayLength(masm, rax, rdx, &miss);
- __ bind(&miss);
- StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
-}
-
-
-void LoadIC::GenerateStringLength(MacroAssembler* masm, bool support_wrappers) {
- // ----------- S t a t e -------------
- // -- rax : receiver
- // -- rcx : name
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss;
-
- StubCompiler::GenerateLoadStringLength(masm, rax, rdx, rbx, &miss,
- support_wrappers);
- __ bind(&miss);
- StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
-}
-
-
-void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : receiver
- // -- rcx : name
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss;
-
- StubCompiler::GenerateLoadFunctionPrototype(masm, rax, rdx, rbx, &miss);
- __ bind(&miss);
- StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
-}
-
-
-// Checks the receiver for special cases (value type, slow case bits).
-// Falls through for regular JS object.
-static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
- Register receiver,
- Register map,
- int interceptor_bit,
- Label* slow) {
- // Register use:
- // receiver - holds the receiver and is unchanged.
- // Scratch registers:
- // map - used to hold the map of the receiver.
-
- // Check that the object isn't a smi.
- __ JumpIfSmi(receiver, slow);
-
- // Check that the object is some kind of JS object EXCEPT JS Value type.
- // In the case that the object is a value-wrapper object,
- // we enter the runtime system to make sure that indexing
- // into string objects work as intended.
- ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
- __ CmpObjectType(receiver, JS_OBJECT_TYPE, map);
- __ j(below, slow);
-
- // Check bit field.
- __ testb(FieldOperand(map, Map::kBitFieldOffset),
- Immediate((1 << Map::kIsAccessCheckNeeded) |
- (1 << interceptor_bit)));
- __ j(not_zero, slow);
-}
-
-
-// Loads an indexed element from a fast case array.
-// If not_fast_array is NULL, doesn't perform the elements map check.
-static void GenerateFastArrayLoad(MacroAssembler* masm,
- Register receiver,
- Register key,
- Register elements,
- Register scratch,
- Register result,
- Label* not_fast_array,
- Label* out_of_range) {
- // Register use:
- //
- // receiver - holds the receiver on entry.
- // Unchanged unless 'result' is the same register.
- //
- // key - holds the smi key on entry.
- // Unchanged unless 'result' is the same register.
- //
- // elements - holds the elements of the receiver on exit.
- //
- // result - holds the result on exit if the load succeeded.
- // Allowed to be the the same as 'receiver' or 'key'.
- // Unchanged on bailout so 'receiver' and 'key' can be safely
- // used by further computation.
- //
- // Scratch registers:
- //
- // scratch - used to hold elements of the receiver and the loaded value.
-
- __ movq(elements, FieldOperand(receiver, JSObject::kElementsOffset));
- if (not_fast_array != NULL) {
- // Check that the object is in fast mode and writable.
- __ CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
- Heap::kFixedArrayMapRootIndex);
- __ j(not_equal, not_fast_array);
- } else {
- __ AssertFastElements(elements);
- }
- // Check that the key (index) is within bounds.
- __ SmiCompare(key, FieldOperand(elements, FixedArray::kLengthOffset));
- // Unsigned comparison rejects negative indices.
- __ j(above_equal, out_of_range);
- // Fast case: Do the load.
- SmiIndex index = masm->SmiToIndex(scratch, key, kPointerSizeLog2);
- __ movq(scratch, FieldOperand(elements,
- index.reg,
- index.scale,
- FixedArray::kHeaderSize));
- __ CompareRoot(scratch, Heap::kTheHoleValueRootIndex);
- // In case the loaded value is the_hole we have to consult GetProperty
- // to ensure the prototype chain is searched.
- __ j(equal, out_of_range);
- if (!result.is(scratch)) {
- __ movq(result, scratch);
- }
-}
-
-
-// Checks whether a key is an array index string or a symbol string.
-// Falls through if the key is a symbol.
-static void GenerateKeyStringCheck(MacroAssembler* masm,
- Register key,
- Register map,
- Register hash,
- Label* index_string,
- Label* not_symbol) {
- // Register use:
- // key - holds the key and is unchanged. Assumed to be non-smi.
- // Scratch registers:
- // map - used to hold the map of the key.
- // hash - used to hold the hash of the key.
- __ CmpObjectType(key, FIRST_NONSTRING_TYPE, map);
- __ j(above_equal, not_symbol);
- // Is the string an array index, with cached numeric value?
- __ movl(hash, FieldOperand(key, String::kHashFieldOffset));
- __ testl(hash, Immediate(String::kContainsCachedArrayIndexMask));
- __ j(zero, index_string); // The value in hash is used at jump target.
-
- // Is the string a symbol?
- ASSERT(kSymbolTag != 0);
- __ testb(FieldOperand(map, Map::kInstanceTypeOffset),
- Immediate(kIsSymbolMask));
- __ j(zero, not_symbol);
-}
-
-
-
-void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label slow, check_string, index_smi, index_string, property_array_property;
- Label probe_dictionary, check_number_dictionary;
-
- // Check that the key is a smi.
- __ JumpIfNotSmi(rax, &check_string);
- __ bind(&index_smi);
- // Now the key is known to be a smi. This place is also jumped to from below
- // where a numeric string is converted to a smi.
-
- GenerateKeyedLoadReceiverCheck(
- masm, rdx, rcx, Map::kHasIndexedInterceptor, &slow);
-
- // Check the "has fast elements" bit in the receiver's map which is
- // now in rcx.
- __ testb(FieldOperand(rcx, Map::kBitField2Offset),
- Immediate(1 << Map::kHasFastElements));
- __ j(zero, &check_number_dictionary);
-
- GenerateFastArrayLoad(masm,
- rdx,
- rax,
- rcx,
- rbx,
- rax,
- NULL,
- &slow);
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->keyed_load_generic_smi(), 1);
- __ ret(0);
-
- __ bind(&check_number_dictionary);
- __ SmiToInteger32(rbx, rax);
- __ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
-
- // Check whether the elements is a number dictionary.
- // rdx: receiver
- // rax: key
- // rbx: key as untagged int32
- // rcx: elements
- __ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
- Heap::kHashTableMapRootIndex);
- __ j(not_equal, &slow);
- GenerateNumberDictionaryLoad(masm, &slow, rcx, rax, rbx, r9, rdi, rax);
- __ ret(0);
-
- __ bind(&slow);
- // Slow case: Jump to runtime.
- // rdx: receiver
- // rax: key
- __ IncrementCounter(counters->keyed_load_generic_slow(), 1);
- GenerateRuntimeGetProperty(masm);
-
- __ bind(&check_string);
- GenerateKeyStringCheck(masm, rax, rcx, rbx, &index_string, &slow);
-
- GenerateKeyedLoadReceiverCheck(
- masm, rdx, rcx, Map::kHasNamedInterceptor, &slow);
-
- // If the receiver is a fast-case object, check the keyed lookup
- // cache. Otherwise probe the dictionary leaving result in rcx.
- __ movq(rbx, FieldOperand(rdx, JSObject::kPropertiesOffset));
- __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
- Heap::kHashTableMapRootIndex);
- __ j(equal, &probe_dictionary);
-
- // Load the map of the receiver, compute the keyed lookup cache hash
- // based on 32 bits of the map pointer and the string hash.
- __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
- __ movl(rcx, rbx);
- __ shr(rcx, Immediate(KeyedLookupCache::kMapHashShift));
- __ movl(rdi, FieldOperand(rax, String::kHashFieldOffset));
- __ shr(rdi, Immediate(String::kHashShift));
- __ xor_(rcx, rdi);
- __ and_(rcx, Immediate(KeyedLookupCache::kCapacityMask));
-
- // Load the key (consisting of map and symbol) from the cache and
- // check for match.
- ExternalReference cache_keys
- = ExternalReference::keyed_lookup_cache_keys(masm->isolate());
- __ movq(rdi, rcx);
- __ shl(rdi, Immediate(kPointerSizeLog2 + 1));
- __ LoadAddress(kScratchRegister, cache_keys);
- __ cmpq(rbx, Operand(kScratchRegister, rdi, times_1, 0));
- __ j(not_equal, &slow);
- __ cmpq(rax, Operand(kScratchRegister, rdi, times_1, kPointerSize));
- __ j(not_equal, &slow);
-
- // Get field offset, which is a 32-bit integer.
- ExternalReference cache_field_offsets
- = ExternalReference::keyed_lookup_cache_field_offsets(masm->isolate());
- __ LoadAddress(kScratchRegister, cache_field_offsets);
- __ movl(rdi, Operand(kScratchRegister, rcx, times_4, 0));
- __ movzxbq(rcx, FieldOperand(rbx, Map::kInObjectPropertiesOffset));
- __ subq(rdi, rcx);
- __ j(above_equal, &property_array_property);
-
- // Load in-object property.
- __ movzxbq(rcx, FieldOperand(rbx, Map::kInstanceSizeOffset));
- __ addq(rcx, rdi);
- __ movq(rax, FieldOperand(rdx, rcx, times_pointer_size, 0));
- __ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1);
- __ ret(0);
-
- // Load property array property.
- __ bind(&property_array_property);
- __ movq(rax, FieldOperand(rdx, JSObject::kPropertiesOffset));
- __ movq(rax, FieldOperand(rax, rdi, times_pointer_size,
- FixedArray::kHeaderSize));
- __ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1);
- __ ret(0);
-
- // Do a quick inline probe of the receiver's dictionary, if it
- // exists.
- __ bind(&probe_dictionary);
- // rdx: receiver
- // rax: key
- // rbx: elements
-
- __ movq(rcx, FieldOperand(rdx, JSObject::kMapOffset));
- __ movb(rcx, FieldOperand(rcx, Map::kInstanceTypeOffset));
- GenerateGlobalInstanceTypeCheck(masm, rcx, &slow);
-
- GenerateDictionaryLoad(masm, &slow, rbx, rax, rcx, rdi, rax);
- __ IncrementCounter(counters->keyed_load_generic_symbol(), 1);
- __ ret(0);
-
- __ bind(&index_string);
- __ IndexFromHash(rbx, rax);
- __ jmp(&index_smi);
-}
-
-
-void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss;
-
- Register receiver = rdx;
- Register index = rax;
- Register scratch1 = rbx;
- Register scratch2 = rcx;
- Register result = rax;
-
- StringCharAtGenerator char_at_generator(receiver,
- index,
- scratch1,
- scratch2,
- result,
- &miss, // When not a string.
- &miss, // When not a number.
- &miss, // When index out of range.
- STRING_INDEX_IS_ARRAY_INDEX);
- char_at_generator.GenerateFast(masm);
- __ ret(0);
-
- StubRuntimeCallHelper call_helper;
- char_at_generator.GenerateSlow(masm, call_helper);
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label slow;
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(rdx, &slow);
-
- // Check that the key is an array index, that is Uint32.
- STATIC_ASSERT(kSmiValueSize <= 32);
- __ JumpUnlessNonNegativeSmi(rax, &slow);
-
- // Get the map of the receiver.
- __ movq(rcx, FieldOperand(rdx, HeapObject::kMapOffset));
-
- // Check that it has indexed interceptor and access checks
- // are not enabled for this object.
- __ movb(rcx, FieldOperand(rcx, Map::kBitFieldOffset));
- __ andb(rcx, Immediate(kSlowCaseBitFieldMask));
- __ cmpb(rcx, Immediate(1 << Map::kHasIndexedInterceptor));
- __ j(not_zero, &slow);
-
- // Everything is fine, call runtime.
- __ pop(rcx);
- __ push(rdx); // receiver
- __ push(rax); // key
- __ push(rcx); // return address
-
- // Perform tail call to the entry.
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(kKeyedLoadPropertyWithInterceptor),
- masm->isolate()),
- 2,
- 1);
-
- __ bind(&slow);
- GenerateMiss(masm);
-}
-
-
-void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label slow, slow_with_tagged_index, fast, array, extra;
-
- // Check that the object isn't a smi.
- __ JumpIfSmi(rdx, &slow_with_tagged_index);
- // Get the map from the receiver.
- __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
- // Check that the receiver does not require access checks. We need
- // to do this because this generic stub does not perform map checks.
- __ testb(FieldOperand(rbx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsAccessCheckNeeded));
- __ j(not_zero, &slow_with_tagged_index);
- // Check that the key is a smi.
- __ JumpIfNotSmi(rcx, &slow_with_tagged_index);
- __ SmiToInteger32(rcx, rcx);
-
- __ CmpInstanceType(rbx, JS_ARRAY_TYPE);
- __ j(equal, &array);
- // Check that the object is some kind of JS object.
- __ CmpInstanceType(rbx, FIRST_JS_OBJECT_TYPE);
- __ j(below, &slow);
-
- // Object case: Check key against length in the elements array.
- // rax: value
- // rdx: JSObject
- // rcx: index
- __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
- // Check that the object is in fast mode and writable.
- __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
- Heap::kFixedArrayMapRootIndex);
- __ j(not_equal, &slow);
- __ SmiCompareInteger32(FieldOperand(rbx, FixedArray::kLengthOffset), rcx);
- // rax: value
- // rbx: FixedArray
- // rcx: index
- __ j(above, &fast);
-
- // Slow case: call runtime.
- __ bind(&slow);
- __ Integer32ToSmi(rcx, rcx);
- __ bind(&slow_with_tagged_index);
- GenerateRuntimeSetProperty(masm, strict_mode);
- // Never returns to here.
-
- // Extra capacity case: Check if there is extra capacity to
- // perform the store and update the length. Used for adding one
- // element to the array by writing to array[array.length].
- __ bind(&extra);
- // rax: value
- // rdx: receiver (a JSArray)
- // rbx: receiver's elements array (a FixedArray)
- // rcx: index
- // flags: smicompare (rdx.length(), rbx)
- __ j(not_equal, &slow); // do not leave holes in the array
- __ SmiCompareInteger32(FieldOperand(rbx, FixedArray::kLengthOffset), rcx);
- __ j(below_equal, &slow);
- // Increment index to get new length.
- __ leal(rdi, Operand(rcx, 1));
- __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rdi);
- __ jmp(&fast);
-
- // Array case: Get the length and the elements array from the JS
- // array. Check that the array is in fast mode (and writable); if it
- // is the length is always a smi.
- __ bind(&array);
- // rax: value
- // rdx: receiver (a JSArray)
- // rcx: index
- __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
- __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
- Heap::kFixedArrayMapRootIndex);
- __ j(not_equal, &slow);
-
- // Check the key against the length in the array, compute the
- // address to store into and fall through to fast case.
- __ SmiCompareInteger32(FieldOperand(rdx, JSArray::kLengthOffset), rcx);
- __ j(below_equal, &extra);
-
- // Fast case: Do the store.
- __ bind(&fast);
- // rax: value
- // rbx: receiver's elements array (a FixedArray)
- // rcx: index
- NearLabel non_smi_value;
- __ movq(FieldOperand(rbx, rcx, times_pointer_size, FixedArray::kHeaderSize),
- rax);
- __ JumpIfNotSmi(rax, &non_smi_value);
- __ ret(0);
- __ bind(&non_smi_value);
- // Slow case that needs to retain rcx for use by RecordWrite.
- // Update write barrier for the elements array address.
- __ movq(rdx, rax);
- __ RecordWriteNonSmi(rbx, 0, rdx, rcx);
- __ ret(0);
-}
-
-
-// The generated code does not accept smi keys.
-// The generated code falls through if both probes miss.
-static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
- int argc,
- Code::Kind kind) {
- // ----------- S t a t e -------------
- // rcx : function name
- // rdx : receiver
- // -----------------------------------
- Label number, non_number, non_string, boolean, probe, miss;
-
- // Probe the stub cache.
- Code::Flags flags = Code::ComputeFlags(kind,
- NOT_IN_LOOP,
- MONOMORPHIC,
- Code::kNoExtraICState,
- NORMAL,
- argc);
- Isolate::Current()->stub_cache()->GenerateProbe(masm, flags, rdx, rcx, rbx,
- rax);
-
- // If the stub cache probing failed, the receiver might be a value.
- // For value objects, we use the map of the prototype objects for
- // the corresponding JSValue for the cache and that is what we need
- // to probe.
- //
- // Check for number.
- __ JumpIfSmi(rdx, &number);
- __ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rbx);
- __ j(not_equal, &non_number);
- __ bind(&number);
- StubCompiler::GenerateLoadGlobalFunctionPrototype(
- masm, Context::NUMBER_FUNCTION_INDEX, rdx);
- __ jmp(&probe);
-
- // Check for string.
- __ bind(&non_number);
- __ CmpInstanceType(rbx, FIRST_NONSTRING_TYPE);
- __ j(above_equal, &non_string);
- StubCompiler::GenerateLoadGlobalFunctionPrototype(
- masm, Context::STRING_FUNCTION_INDEX, rdx);
- __ jmp(&probe);
-
- // Check for boolean.
- __ bind(&non_string);
- __ CompareRoot(rdx, Heap::kTrueValueRootIndex);
- __ j(equal, &boolean);
- __ CompareRoot(rdx, Heap::kFalseValueRootIndex);
- __ j(not_equal, &miss);
- __ bind(&boolean);
- StubCompiler::GenerateLoadGlobalFunctionPrototype(
- masm, Context::BOOLEAN_FUNCTION_INDEX, rdx);
-
- // Probe the stub cache for the value object.
- __ bind(&probe);
- Isolate::Current()->stub_cache()->GenerateProbe(masm, flags, rdx, rcx, rbx,
- no_reg);
-
- __ bind(&miss);
-}
-
-
-static void GenerateFunctionTailCall(MacroAssembler* masm,
- int argc,
- Label* miss) {
- // ----------- S t a t e -------------
- // rcx : function name
- // rdi : function
- // rsp[0] : return address
- // rsp[8] : argument argc
- // rsp[16] : argument argc - 1
- // ...
- // rsp[argc * 8] : argument 1
- // rsp[(argc + 1) * 8] : argument 0 = receiver
- // -----------------------------------
- __ JumpIfSmi(rdi, miss);
- // Check that the value is a JavaScript function.
- __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rdx);
- __ j(not_equal, miss);
-
- // Invoke the function.
- ParameterCount actual(argc);
- __ InvokeFunction(rdi, actual, JUMP_FUNCTION);
-}
-
-
-// The generated code falls through if the call should be handled by runtime.
-static void GenerateCallNormal(MacroAssembler* masm, int argc) {
- // ----------- S t a t e -------------
- // rcx : function name
- // rsp[0] : return address
- // rsp[8] : argument argc
- // rsp[16] : argument argc - 1
- // ...
- // rsp[argc * 8] : argument 1
- // rsp[(argc + 1) * 8] : argument 0 = receiver
- // -----------------------------------
- Label miss;
-
- // Get the receiver of the function from the stack.
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
-
- GenerateStringDictionaryReceiverCheck(masm, rdx, rax, rbx, &miss);
-
- // rax: elements
- // Search the dictionary placing the result in rdi.
- GenerateDictionaryLoad(masm, &miss, rax, rcx, rbx, rdi, rdi);
-
- GenerateFunctionTailCall(masm, argc, &miss);
-
- __ bind(&miss);
-}
-
-
-static void GenerateCallMiss(MacroAssembler* masm, int argc, IC::UtilityId id) {
- // ----------- S t a t e -------------
- // rcx : function name
- // rsp[0] : return address
- // rsp[8] : argument argc
- // rsp[16] : argument argc - 1
- // ...
- // rsp[argc * 8] : argument 1
- // rsp[(argc + 1) * 8] : argument 0 = receiver
- // -----------------------------------
-
- Counters* counters = masm->isolate()->counters();
- if (id == IC::kCallIC_Miss) {
- __ IncrementCounter(counters->call_miss(), 1);
- } else {
- __ IncrementCounter(counters->keyed_call_miss(), 1);
- }
-
- // Get the receiver of the function from the stack; 1 ~ return address.
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
-
- // Enter an internal frame.
- __ EnterInternalFrame();
-
- // Push the receiver and the name of the function.
- __ push(rdx);
- __ push(rcx);
-
- // Call the entry.
- CEntryStub stub(1);
- __ movq(rax, Immediate(2));
- __ LoadAddress(rbx, ExternalReference(IC_Utility(id), masm->isolate()));
- __ CallStub(&stub);
-
- // Move result to rdi and exit the internal frame.
- __ movq(rdi, rax);
- __ LeaveInternalFrame();
-
- // Check if the receiver is a global object of some sort.
- // This can happen only for regular CallIC but not KeyedCallIC.
- if (id == IC::kCallIC_Miss) {
- Label invoke, global;
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize)); // receiver
- __ JumpIfSmi(rdx, &invoke);
- __ CmpObjectType(rdx, JS_GLOBAL_OBJECT_TYPE, rcx);
- __ j(equal, &global);
- __ CmpInstanceType(rcx, JS_BUILTINS_OBJECT_TYPE);
- __ j(not_equal, &invoke);
-
- // Patch the receiver on the stack.
- __ bind(&global);
- __ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset));
- __ movq(Operand(rsp, (argc + 1) * kPointerSize), rdx);
- __ bind(&invoke);
- }
-
- // Invoke the function.
- ParameterCount actual(argc);
- __ InvokeFunction(rdi, actual, JUMP_FUNCTION);
-}
-
-
-void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
- // ----------- S t a t e -------------
- // rcx : function name
- // rsp[0] : return address
- // rsp[8] : argument argc
- // rsp[16] : argument argc - 1
- // ...
- // rsp[argc * 8] : argument 1
- // rsp[(argc + 1) * 8] : argument 0 = receiver
- // -----------------------------------
-
- // Get the receiver of the function from the stack; 1 ~ return address.
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
- GenerateMonomorphicCacheProbe(masm, argc, Code::CALL_IC);
- GenerateMiss(masm, argc);
-}
-
-
-void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
- // ----------- S t a t e -------------
- // rcx : function name
- // rsp[0] : return address
- // rsp[8] : argument argc
- // rsp[16] : argument argc - 1
- // ...
- // rsp[argc * 8] : argument 1
- // rsp[(argc + 1) * 8] : argument 0 = receiver
- // -----------------------------------
-
- GenerateCallNormal(masm, argc);
- GenerateMiss(masm, argc);
-}
-
-
-void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
- // ----------- S t a t e -------------
- // rcx : function name
- // rsp[0] : return address
- // rsp[8] : argument argc
- // rsp[16] : argument argc - 1
- // ...
- // rsp[argc * 8] : argument 1
- // rsp[(argc + 1) * 8] : argument 0 = receiver
- // -----------------------------------
-
- GenerateCallMiss(masm, argc, IC::kCallIC_Miss);
-}
-
-
-void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
- // ----------- S t a t e -------------
- // rcx : function name
- // rsp[0] : return address
- // rsp[8] : argument argc
- // rsp[16] : argument argc - 1
- // ...
- // rsp[argc * 8] : argument 1
- // rsp[(argc + 1) * 8] : argument 0 = receiver
- // -----------------------------------
-
- // Get the receiver of the function from the stack; 1 ~ return address.
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
-
- Label do_call, slow_call, slow_load;
- Label check_number_dictionary, check_string, lookup_monomorphic_cache;
- Label index_smi, index_string;
-
- // Check that the key is a smi.
- __ JumpIfNotSmi(rcx, &check_string);
-
- __ bind(&index_smi);
- // Now the key is known to be a smi. This place is also jumped to from below
- // where a numeric string is converted to a smi.
-
- GenerateKeyedLoadReceiverCheck(
- masm, rdx, rax, Map::kHasIndexedInterceptor, &slow_call);
-
- GenerateFastArrayLoad(
- masm, rdx, rcx, rax, rbx, rdi, &check_number_dictionary, &slow_load);
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->keyed_call_generic_smi_fast(), 1);
-
- __ bind(&do_call);
- // receiver in rdx is not used after this point.
- // rcx: key
- // rdi: function
- GenerateFunctionTailCall(masm, argc, &slow_call);
-
- __ bind(&check_number_dictionary);
- // rax: elements
- // rcx: smi key
- // Check whether the elements is a number dictionary.
- __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
- Heap::kHashTableMapRootIndex);
- __ j(not_equal, &slow_load);
- __ SmiToInteger32(rbx, rcx);
- // ebx: untagged index
- GenerateNumberDictionaryLoad(masm, &slow_load, rax, rcx, rbx, r9, rdi, rdi);
- __ IncrementCounter(counters->keyed_call_generic_smi_dict(), 1);
- __ jmp(&do_call);
-
- __ bind(&slow_load);
- // This branch is taken when calling KeyedCallIC_Miss is neither required
- // nor beneficial.
- __ IncrementCounter(counters->keyed_call_generic_slow_load(), 1);
- __ EnterInternalFrame();
- __ push(rcx); // save the key
- __ push(rdx); // pass the receiver
- __ push(rcx); // pass the key
- __ CallRuntime(Runtime::kKeyedGetProperty, 2);
- __ pop(rcx); // restore the key
- __ LeaveInternalFrame();
- __ movq(rdi, rax);
- __ jmp(&do_call);
-
- __ bind(&check_string);
- GenerateKeyStringCheck(masm, rcx, rax, rbx, &index_string, &slow_call);
-
- // The key is known to be a symbol.
- // If the receiver is a regular JS object with slow properties then do
- // a quick inline probe of the receiver's dictionary.
- // Otherwise do the monomorphic cache probe.
- GenerateKeyedLoadReceiverCheck(
- masm, rdx, rax, Map::kHasNamedInterceptor, &lookup_monomorphic_cache);
-
- __ movq(rbx, FieldOperand(rdx, JSObject::kPropertiesOffset));
- __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
- Heap::kHashTableMapRootIndex);
- __ j(not_equal, &lookup_monomorphic_cache);
-
- GenerateDictionaryLoad(masm, &slow_load, rbx, rcx, rax, rdi, rdi);
- __ IncrementCounter(counters->keyed_call_generic_lookup_dict(), 1);
- __ jmp(&do_call);
-
- __ bind(&lookup_monomorphic_cache);
- __ IncrementCounter(counters->keyed_call_generic_lookup_cache(), 1);
- GenerateMonomorphicCacheProbe(masm, argc, Code::KEYED_CALL_IC);
- // Fall through on miss.
-
- __ bind(&slow_call);
- // This branch is taken if:
- // - the receiver requires boxing or access check,
- // - the key is neither smi nor symbol,
- // - the value loaded is not a function,
- // - there is hope that the runtime will create a monomorphic call stub
- // that will get fetched next time.
- __ IncrementCounter(counters->keyed_call_generic_slow(), 1);
- GenerateMiss(masm, argc);
-
- __ bind(&index_string);
- __ IndexFromHash(rbx, rcx);
- // Now jump to the place where smi keys are handled.
- __ jmp(&index_smi);
-}
-
-
-void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) {
- // ----------- S t a t e -------------
- // rcx : function name
- // rsp[0] : return address
- // rsp[8] : argument argc
- // rsp[16] : argument argc - 1
- // ...
- // rsp[argc * 8] : argument 1
- // rsp[(argc + 1) * 8] : argument 0 = receiver
- // -----------------------------------
-
- // Check if the name is a string.
- Label miss;
- __ JumpIfSmi(rcx, &miss);
- Condition cond = masm->IsObjectStringType(rcx, rax, rax);
- __ j(NegateCondition(cond), &miss);
- GenerateCallNormal(masm, argc);
- __ bind(&miss);
- GenerateMiss(masm, argc);
-}
-
-
-void KeyedCallIC::GenerateMiss(MacroAssembler* masm, int argc) {
- // ----------- S t a t e -------------
- // rcx : function name
- // rsp[0] : return address
- // rsp[8] : argument argc
- // rsp[16] : argument argc - 1
- // ...
- // rsp[argc * 8] : argument 1
- // rsp[(argc + 1) * 8] : argument 0 = receiver
- // -----------------------------------
-
- GenerateCallMiss(masm, argc, IC::kKeyedCallIC_Miss);
-}
-
-
-void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : receiver
- // -- rcx : name
- // -- rsp[0] : return address
- // -----------------------------------
-
- // Probe the stub cache.
- Code::Flags flags = Code::ComputeFlags(Code::LOAD_IC,
- NOT_IN_LOOP,
- MONOMORPHIC);
- Isolate::Current()->stub_cache()->GenerateProbe(masm, flags, rax, rcx, rbx,
- rdx);
-
- // Cache miss: Jump to runtime.
- StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
-}
-
-
-void LoadIC::GenerateNormal(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : receiver
- // -- rcx : name
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss;
-
- GenerateStringDictionaryReceiverCheck(masm, rax, rdx, rbx, &miss);
-
- // rdx: elements
- // Search the dictionary placing the result in rax.
- GenerateDictionaryLoad(masm, &miss, rdx, rcx, rbx, rdi, rax);
- __ ret(0);
-
- // Cache miss: Jump to runtime.
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void LoadIC::GenerateMiss(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : receiver
- // -- rcx : name
- // -- rsp[0] : return address
- // -----------------------------------
-
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->load_miss(), 1);
-
- __ pop(rbx);
- __ push(rax); // receiver
- __ push(rcx); // name
- __ push(rbx); // return address
-
- // Perform tail call to the entry.
- ExternalReference ref =
- ExternalReference(IC_Utility(kLoadIC_Miss), masm->isolate());
- __ TailCallExternalReference(ref, 2, 1);
-}
-
-
-bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
- if (V8::UseCrankshaft()) return false;
-
- // The address of the instruction following the call.
- Address test_instruction_address =
- address + Assembler::kCallTargetAddressOffset;
- // If the instruction following the call is not a test rax, nothing
- // was inlined.
- if (*test_instruction_address != Assembler::kTestEaxByte) return false;
-
- Address delta_address = test_instruction_address + 1;
- // The delta to the start of the map check instruction.
- int delta = *reinterpret_cast<int*>(delta_address);
-
- // The map address is the last 8 bytes of the 10-byte
- // immediate move instruction, so we add 2 to get the
- // offset to the last 8 bytes.
- Address map_address = test_instruction_address + delta + 2;
- *(reinterpret_cast<Object**>(map_address)) = map;
-
- // The offset is in the 32-bit displacement of a seven byte
- // memory-to-register move instruction (REX.W 0x88 ModR/M disp32),
- // so we add 3 to get the offset of the displacement.
- Address offset_address =
- test_instruction_address + delta + kOffsetToLoadInstruction + 3;
- *reinterpret_cast<int*>(offset_address) = offset - kHeapObjectTag;
- return true;
-}
-
-
-bool LoadIC::PatchInlinedContextualLoad(Address address,
- Object* map,
- Object* cell,
- bool is_dont_delete) {
- // TODO(<bug#>): implement this.
- return false;
-}
-
-
-bool StoreIC::PatchInlinedStore(Address address, Object* map, int offset) {
- if (V8::UseCrankshaft()) return false;
-
- // The address of the instruction following the call.
- Address test_instruction_address =
- address + Assembler::kCallTargetAddressOffset;
-
- // If the instruction following the call is not a test rax, nothing
- // was inlined.
- if (*test_instruction_address != Assembler::kTestEaxByte) return false;
-
- // Extract the encoded deltas from the test rax instruction.
- Address encoded_offsets_address = test_instruction_address + 1;
- int encoded_offsets = *reinterpret_cast<int*>(encoded_offsets_address);
- int delta_to_map_check = -(encoded_offsets & 0xFFFF);
- int delta_to_record_write = encoded_offsets >> 16;
-
- // Patch the map to check. The map address is the last 8 bytes of
- // the 10-byte immediate move instruction.
- Address map_check_address = test_instruction_address + delta_to_map_check;
- Address map_address = map_check_address + 2;
- *(reinterpret_cast<Object**>(map_address)) = map;
-
- // Patch the offset in the store instruction. The offset is in the
- // last 4 bytes of a 7 byte register-to-memory move instruction.
- Address offset_address =
- map_check_address + StoreIC::kOffsetToStoreInstruction + 3;
- // The offset should have initial value (kMaxInt - 1), cleared value
- // (-1) or we should be clearing the inlined version.
- ASSERT(*reinterpret_cast<int*>(offset_address) == kMaxInt - 1 ||
- *reinterpret_cast<int*>(offset_address) == -1 ||
- (offset == 0 && map == HEAP->null_value()));
- *reinterpret_cast<int*>(offset_address) = offset - kHeapObjectTag;
-
- // Patch the offset in the write-barrier code. The offset is the
- // last 4 bytes of a 7 byte lea instruction.
- offset_address = map_check_address + delta_to_record_write + 3;
- // The offset should have initial value (kMaxInt), cleared value
- // (-1) or we should be clearing the inlined version.
- ASSERT(*reinterpret_cast<int*>(offset_address) == kMaxInt ||
- *reinterpret_cast<int*>(offset_address) == -1 ||
- (offset == 0 && map == HEAP->null_value()));
- *reinterpret_cast<int*>(offset_address) = offset - kHeapObjectTag;
-
- return true;
-}
-
-
-static bool PatchInlinedMapCheck(Address address, Object* map) {
- if (V8::UseCrankshaft()) return false;
-
- // Arguments are address of start of call sequence that called
- // the IC,
- Address test_instruction_address =
- address + Assembler::kCallTargetAddressOffset;
- // The keyed load has a fast inlined case if the IC call instruction
- // is immediately followed by a test instruction.
- if (*test_instruction_address != Assembler::kTestEaxByte) return false;
-
- // Fetch the offset from the test instruction to the map compare
- // instructions (starting with the 64-bit immediate mov of the map
- // address). This offset is stored in the last 4 bytes of the 5
- // byte test instruction.
- Address delta_address = test_instruction_address + 1;
- int delta = *reinterpret_cast<int*>(delta_address);
- // Compute the map address. The map address is in the last 8 bytes
- // of the 10-byte immediate mov instruction (incl. REX prefix), so we add 2
- // to the offset to get the map address.
- Address map_address = test_instruction_address + delta + 2;
- // Patch the map check.
- *(reinterpret_cast<Object**>(map_address)) = map;
- return true;
-}
-
-
-bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
- return PatchInlinedMapCheck(address, map);
-}
-
-
-bool KeyedStoreIC::PatchInlinedStore(Address address, Object* map) {
- return PatchInlinedMapCheck(address, map);
-}
-
-
-void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
-
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->keyed_load_miss(), 1);
-
- __ pop(rbx);
- __ push(rdx); // receiver
- __ push(rax); // name
- __ push(rbx); // return address
-
- // Perform tail call to the entry.
- ExternalReference ref
- = ExternalReference(IC_Utility(kKeyedLoadIC_Miss), masm->isolate());
- __ TailCallExternalReference(ref, 2, 1);
-}
-
-
-void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
-
- __ pop(rbx);
- __ push(rdx); // receiver
- __ push(rax); // name
- __ push(rbx); // return address
-
- // Perform tail call to the entry.
- __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
-}
-
-
-void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : name
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
-
- // Get the receiver from the stack and probe the stub cache.
- Code::Flags flags = Code::ComputeFlags(Code::STORE_IC,
- NOT_IN_LOOP,
- MONOMORPHIC,
- strict_mode);
- Isolate::Current()->stub_cache()->GenerateProbe(masm, flags, rdx, rcx, rbx,
- no_reg);
-
- // Cache miss: Jump to runtime.
- GenerateMiss(masm);
-}
-
-
-void StoreIC::GenerateMiss(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : name
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
-
- __ pop(rbx);
- __ push(rdx); // receiver
- __ push(rcx); // name
- __ push(rax); // value
- __ push(rbx); // return address
-
- // Perform tail call to the entry.
- ExternalReference ref =
- ExternalReference(IC_Utility(kStoreIC_Miss), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
-}
-
-
-// The offset from the inlined patch site to the start of the inlined
-// store instruction.
-const int StoreIC::kOffsetToStoreInstruction = 20;
-
-
-void StoreIC::GenerateArrayLength(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : name
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- //
- // This accepts as a receiver anything JSObject::SetElementsLength accepts
- // (currently anything except for external and pixel arrays which means
- // anything with elements of FixedArray type.), but currently is restricted
- // to JSArray.
- // Value must be a number, but only smis are accepted as the most common case.
-
- Label miss;
-
- Register receiver = rdx;
- Register value = rax;
- Register scratch = rbx;
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &miss);
-
- // Check that the object is a JS array.
- __ CmpObjectType(receiver, JS_ARRAY_TYPE, scratch);
- __ j(not_equal, &miss);
-
- // Check that elements are FixedArray.
- // We rely on StoreIC_ArrayLength below to deal with all types of
- // fast elements (including COW).
- __ movq(scratch, FieldOperand(receiver, JSArray::kElementsOffset));
- __ CmpObjectType(scratch, FIXED_ARRAY_TYPE, scratch);
- __ j(not_equal, &miss);
-
- // Check that value is a smi.
- __ JumpIfNotSmi(value, &miss);
-
- // Prepare tail call to StoreIC_ArrayLength.
- __ pop(scratch);
- __ push(receiver);
- __ push(value);
- __ push(scratch); // return address
-
- ExternalReference ref =
- ExternalReference(IC_Utility(kStoreIC_ArrayLength), masm->isolate());
- __ TailCallExternalReference(ref, 2, 1);
-
- __ bind(&miss);
-
- GenerateMiss(masm);
-}
-
-
-void StoreIC::GenerateNormal(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : name
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
-
- Label miss;
-
- GenerateStringDictionaryReceiverCheck(masm, rdx, rbx, rdi, &miss);
-
- GenerateDictionaryStore(masm, &miss, rbx, rcx, rax, r8, r9);
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->store_normal_hit(), 1);
- __ ret(0);
-
- __ bind(&miss);
- __ IncrementCounter(counters->store_normal_miss(), 1);
- GenerateMiss(masm);
-}
-
-
-void StoreIC::GenerateGlobalProxy(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : name
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- __ pop(rbx);
- __ push(rdx);
- __ push(rcx);
- __ push(rax);
- __ Push(Smi::FromInt(NONE)); // PropertyAttributes
- __ Push(Smi::FromInt(strict_mode));
- __ push(rbx); // return address
-
- // Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
-}
-
-
-void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
-
- __ pop(rbx);
- __ push(rdx); // receiver
- __ push(rcx); // key
- __ push(rax); // value
- __ Push(Smi::FromInt(NONE)); // PropertyAttributes
- __ Push(Smi::FromInt(strict_mode)); // Strict mode.
- __ push(rbx); // return address
-
- // Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
-}
-
-
-void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
-
- __ pop(rbx);
- __ push(rdx); // receiver
- __ push(rcx); // key
- __ push(rax); // value
- __ push(rbx); // return address
-
- // Do tail-call to runtime routine.
- ExternalReference ref =
- ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
-}
-
-
-#undef __
-
-
-Condition CompareIC::ComputeCondition(Token::Value op) {
- switch (op) {
- case Token::EQ_STRICT:
- case Token::EQ:
- return equal;
- case Token::LT:
- return less;
- case Token::GT:
- // Reverse left and right operands to obtain ECMA-262 conversion order.
- return less;
- case Token::LTE:
- // Reverse left and right operands to obtain ECMA-262 conversion order.
- return greater_equal;
- case Token::GTE:
- return greater_equal;
- default:
- UNREACHABLE();
- return no_condition;
- }
-}
-
-
-static bool HasInlinedSmiCode(Address address) {
- // The address of the instruction following the call.
- Address test_instruction_address =
- address + Assembler::kCallTargetAddressOffset;
-
- // If the instruction following the call is not a test al, nothing
- // was inlined.
- return *test_instruction_address == Assembler::kTestAlByte;
-}
-
-
-void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
- HandleScope scope;
- Handle<Code> rewritten;
- State previous_state = GetState();
-
- State state = TargetState(previous_state, HasInlinedSmiCode(address()), x, y);
- if (state == GENERIC) {
- CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS);
- rewritten = stub.GetCode();
- } else {
- ICCompareStub stub(op_, state);
- rewritten = stub.GetCode();
- }
- set_target(*rewritten);
-
-#ifdef DEBUG
- if (FLAG_trace_ic) {
- PrintF("[CompareIC (%s->%s)#%s]\n",
- GetStateName(previous_state),
- GetStateName(state),
- Token::Name(op_));
- }
-#endif
-
- // Activate inlined smi code.
- if (previous_state == UNINITIALIZED) {
- PatchInlinedSmiCode(address());
- }
-}
-
-void PatchInlinedSmiCode(Address address) {
- // The address of the instruction following the call.
- Address test_instruction_address =
- address + Assembler::kCallTargetAddressOffset;
-
- // If the instruction following the call is not a test al, nothing
- // was inlined.
- if (*test_instruction_address != Assembler::kTestAlByte) {
- ASSERT(*test_instruction_address == Assembler::kNopByte);
- return;
- }
-
- Address delta_address = test_instruction_address + 1;
- // The delta to the start of the map check instruction and the
- // condition code uses at the patched jump.
- int8_t delta = *reinterpret_cast<int8_t*>(delta_address);
- if (FLAG_trace_ic) {
- PrintF("[ patching ic at %p, test=%p, delta=%d\n",
- address, test_instruction_address, delta);
- }
-
- // Patch with a short conditional jump. There must be a
- // short jump-if-carry/not-carry at this position.
- Address jmp_address = test_instruction_address - delta;
- ASSERT(*jmp_address == Assembler::kJncShortOpcode ||
- *jmp_address == Assembler::kJcShortOpcode);
- Condition cc = *jmp_address == Assembler::kJncShortOpcode
- ? not_zero
- : zero;
- *jmp_address = static_cast<byte>(Assembler::kJccShortPrefix | cc);
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_X64
diff --git a/src/3rdparty/v8/src/x64/jump-target-x64.cc b/src/3rdparty/v8/src/x64/jump-target-x64.cc
deleted file mode 100644
index e715604..0000000
--- a/src/3rdparty/v8/src/x64/jump-target-x64.cc
+++ /dev/null
@@ -1,437 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_X64)
-
-#include "codegen-inl.h"
-#include "jump-target-inl.h"
-#include "register-allocator-inl.h"
-#include "virtual-frame-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// JumpTarget implementation.
-
-#define __ ACCESS_MASM(cgen()->masm())
-
-void JumpTarget::DoJump() {
- ASSERT(cgen()->has_valid_frame());
- // Live non-frame registers are not allowed at unconditional jumps
- // because we have no way of invalidating the corresponding results
- // which are still live in the C++ code.
- ASSERT(cgen()->HasValidEntryRegisters());
-
- if (is_bound()) {
- // Backward jump. There is an expected frame to merge to.
- ASSERT(direction_ == BIDIRECTIONAL);
- cgen()->frame()->PrepareMergeTo(entry_frame_);
- cgen()->frame()->MergeTo(entry_frame_);
- cgen()->DeleteFrame();
- __ jmp(&entry_label_);
- } else if (entry_frame_ != NULL) {
- // Forward jump with a preconfigured entry frame. Assert the
- // current frame matches the expected one and jump to the block.
- ASSERT(cgen()->frame()->Equals(entry_frame_));
- cgen()->DeleteFrame();
- __ jmp(&entry_label_);
- } else {
- // Forward jump. Remember the current frame and emit a jump to
- // its merge code.
- AddReachingFrame(cgen()->frame());
- RegisterFile empty;
- cgen()->SetFrame(NULL, &empty);
- __ jmp(&merge_labels_.last());
- }
-}
-
-
-void JumpTarget::DoBranch(Condition cc, Hint b) {
- ASSERT(cgen() != NULL);
- ASSERT(cgen()->has_valid_frame());
-
- if (is_bound()) {
- ASSERT(direction_ == BIDIRECTIONAL);
- // Backward branch. We have an expected frame to merge to on the
- // backward edge.
-
- // Swap the current frame for a copy (we do the swapping to get
- // the off-frame registers off the fall through) to use for the
- // branch.
- VirtualFrame* fall_through_frame = cgen()->frame();
- VirtualFrame* branch_frame = new VirtualFrame(fall_through_frame);
- RegisterFile non_frame_registers;
- cgen()->SetFrame(branch_frame, &non_frame_registers);
-
- // Check if we can avoid merge code.
- cgen()->frame()->PrepareMergeTo(entry_frame_);
- if (cgen()->frame()->Equals(entry_frame_)) {
- // Branch right in to the block.
- cgen()->DeleteFrame();
- __ j(cc, &entry_label_);
- cgen()->SetFrame(fall_through_frame, &non_frame_registers);
- return;
- }
-
- // Check if we can reuse existing merge code.
- for (int i = 0; i < reaching_frames_.length(); i++) {
- if (reaching_frames_[i] != NULL &&
- cgen()->frame()->Equals(reaching_frames_[i])) {
- // Branch to the merge code.
- cgen()->DeleteFrame();
- __ j(cc, &merge_labels_[i]);
- cgen()->SetFrame(fall_through_frame, &non_frame_registers);
- return;
- }
- }
-
- // To emit the merge code here, we negate the condition and branch
- // around the merge code on the fall through path.
- Label original_fall_through;
- __ j(NegateCondition(cc), &original_fall_through);
- cgen()->frame()->MergeTo(entry_frame_);
- cgen()->DeleteFrame();
- __ jmp(&entry_label_);
- cgen()->SetFrame(fall_through_frame, &non_frame_registers);
- __ bind(&original_fall_through);
-
- } else if (entry_frame_ != NULL) {
- // Forward branch with a preconfigured entry frame. Assert the
- // current frame matches the expected one and branch to the block.
- ASSERT(cgen()->frame()->Equals(entry_frame_));
- // Explicitly use the macro assembler instead of __ as forward
- // branches are expected to be a fixed size (no inserted
- // coverage-checking instructions please). This is used in
- // Reference::GetValue.
- cgen()->masm()->j(cc, &entry_label_);
-
- } else {
- // Forward branch. A copy of the current frame is remembered and
- // a branch to the merge code is emitted. Explicitly use the
- // macro assembler instead of __ as forward branches are expected
- // to be a fixed size (no inserted coverage-checking instructions
- // please). This is used in Reference::GetValue.
- AddReachingFrame(new VirtualFrame(cgen()->frame()));
- cgen()->masm()->j(cc, &merge_labels_.last());
- }
-}
-
-
-void JumpTarget::Call() {
- // Call is used to push the address of the catch block on the stack as
- // a return address when compiling try/catch and try/finally. We
- // fully spill the frame before making the call. The expected frame
- // at the label (which should be the only one) is the spilled current
- // frame plus an in-memory return address. The "fall-through" frame
- // at the return site is the spilled current frame.
- ASSERT(cgen() != NULL);
- ASSERT(cgen()->has_valid_frame());
- // There are no non-frame references across the call.
- ASSERT(cgen()->HasValidEntryRegisters());
- ASSERT(!is_linked());
-
- cgen()->frame()->SpillAll();
- VirtualFrame* target_frame = new VirtualFrame(cgen()->frame());
- target_frame->Adjust(1);
- // We do not expect a call with a preconfigured entry frame.
- ASSERT(entry_frame_ == NULL);
- AddReachingFrame(target_frame);
- __ call(&merge_labels_.last());
-}
-
-
-void JumpTarget::DoBind() {
- ASSERT(cgen() != NULL);
- ASSERT(!is_bound());
-
- // Live non-frame registers are not allowed at the start of a basic
- // block.
- ASSERT(!cgen()->has_valid_frame() || cgen()->HasValidEntryRegisters());
-
- // Fast case: the jump target was manually configured with an entry
- // frame to use.
- if (entry_frame_ != NULL) {
- // Assert no reaching frames to deal with.
- ASSERT(reaching_frames_.is_empty());
- ASSERT(!cgen()->has_valid_frame());
-
- RegisterFile empty;
- if (direction_ == BIDIRECTIONAL) {
- // Copy the entry frame so the original can be used for a
- // possible backward jump.
- cgen()->SetFrame(new VirtualFrame(entry_frame_), &empty);
- } else {
- // Take ownership of the entry frame.
- cgen()->SetFrame(entry_frame_, &empty);
- entry_frame_ = NULL;
- }
- __ bind(&entry_label_);
- return;
- }
-
- if (!is_linked()) {
- ASSERT(cgen()->has_valid_frame());
- if (direction_ == FORWARD_ONLY) {
- // Fast case: no forward jumps and no possible backward jumps.
- // The stack pointer can be floating above the top of the
- // virtual frame before the bind. Afterward, it should not.
- VirtualFrame* frame = cgen()->frame();
- int difference = frame->stack_pointer_ - (frame->element_count() - 1);
- if (difference > 0) {
- frame->stack_pointer_ -= difference;
- __ addq(rsp, Immediate(difference * kPointerSize));
- }
- } else {
- ASSERT(direction_ == BIDIRECTIONAL);
- // Fast case: no forward jumps, possible backward ones. Remove
- // constants and copies above the watermark on the fall-through
- // frame and use it as the entry frame.
- cgen()->frame()->MakeMergable();
- entry_frame_ = new VirtualFrame(cgen()->frame());
- }
- __ bind(&entry_label_);
- return;
- }
-
- if (direction_ == FORWARD_ONLY &&
- !cgen()->has_valid_frame() &&
- reaching_frames_.length() == 1) {
- // Fast case: no fall-through, a single forward jump, and no
- // possible backward jumps. Pick up the only reaching frame, take
- // ownership of it, and use it for the block about to be emitted.
- VirtualFrame* frame = reaching_frames_[0];
- RegisterFile empty;
- cgen()->SetFrame(frame, &empty);
- reaching_frames_[0] = NULL;
- __ bind(&merge_labels_[0]);
-
- // The stack pointer can be floating above the top of the
- // virtual frame before the bind. Afterward, it should not.
- int difference = frame->stack_pointer_ - (frame->element_count() - 1);
- if (difference > 0) {
- frame->stack_pointer_ -= difference;
- __ addq(rsp, Immediate(difference * kPointerSize));
- }
-
- __ bind(&entry_label_);
- return;
- }
-
- // If there is a current frame, record it as the fall-through. It
- // is owned by the reaching frames for now.
- bool had_fall_through = false;
- if (cgen()->has_valid_frame()) {
- had_fall_through = true;
- AddReachingFrame(cgen()->frame()); // Return value ignored.
- RegisterFile empty;
- cgen()->SetFrame(NULL, &empty);
- }
-
- // Compute the frame to use for entry to the block.
- ComputeEntryFrame();
-
- // Some moves required to merge to an expected frame require purely
- // frame state changes, and do not require any code generation.
- // Perform those first to increase the possibility of finding equal
- // frames below.
- for (int i = 0; i < reaching_frames_.length(); i++) {
- if (reaching_frames_[i] != NULL) {
- reaching_frames_[i]->PrepareMergeTo(entry_frame_);
- }
- }
-
- if (is_linked()) {
- // There were forward jumps. Handle merging the reaching frames
- // to the entry frame.
-
- // Loop over the (non-null) reaching frames and process any that
- // need merge code. Iterate backwards through the list to handle
- // the fall-through frame first. Set frames that will be
- // processed after 'i' to NULL if we want to avoid processing
- // them.
- for (int i = reaching_frames_.length() - 1; i >= 0; i--) {
- VirtualFrame* frame = reaching_frames_[i];
-
- if (frame != NULL) {
- // Does the frame (probably) need merge code?
- if (!frame->Equals(entry_frame_)) {
- // We could have a valid frame as the fall through to the
- // binding site or as the fall through from a previous merge
- // code block. Jump around the code we are about to
- // generate.
- if (cgen()->has_valid_frame()) {
- cgen()->DeleteFrame();
- __ jmp(&entry_label_);
- }
- // Pick up the frame for this block. Assume ownership if
- // there cannot be backward jumps.
- RegisterFile empty;
- if (direction_ == BIDIRECTIONAL) {
- cgen()->SetFrame(new VirtualFrame(frame), &empty);
- } else {
- cgen()->SetFrame(frame, &empty);
- reaching_frames_[i] = NULL;
- }
- __ bind(&merge_labels_[i]);
-
- // Loop over the remaining (non-null) reaching frames,
- // looking for any that can share merge code with this one.
- for (int j = 0; j < i; j++) {
- VirtualFrame* other = reaching_frames_[j];
- if (other != NULL && other->Equals(cgen()->frame())) {
- // Set the reaching frame element to null to avoid
- // processing it later, and then bind its entry label.
- reaching_frames_[j] = NULL;
- __ bind(&merge_labels_[j]);
- }
- }
-
- // Emit the merge code.
- cgen()->frame()->MergeTo(entry_frame_);
- } else if (i == reaching_frames_.length() - 1 && had_fall_through) {
- // If this is the fall through frame, and it didn't need
- // merge code, we need to pick up the frame so we can jump
- // around subsequent merge blocks if necessary.
- RegisterFile empty;
- cgen()->SetFrame(frame, &empty);
- reaching_frames_[i] = NULL;
- }
- }
- }
-
- // The code generator may not have a current frame if there was no
- // fall through and none of the reaching frames needed merging.
- // In that case, clone the entry frame as the current frame.
- if (!cgen()->has_valid_frame()) {
- RegisterFile empty;
- cgen()->SetFrame(new VirtualFrame(entry_frame_), &empty);
- }
-
- // There may be unprocessed reaching frames that did not need
- // merge code. They will have unbound merge labels. Bind their
- // merge labels to be the same as the entry label and deallocate
- // them.
- for (int i = 0; i < reaching_frames_.length(); i++) {
- if (!merge_labels_[i].is_bound()) {
- reaching_frames_[i] = NULL;
- __ bind(&merge_labels_[i]);
- }
- }
-
- // There are non-NULL reaching frames with bound labels for each
- // merge block, but only on backward targets.
- } else {
- // There were no forward jumps. There must be a current frame and
- // this must be a bidirectional target.
- ASSERT(reaching_frames_.length() == 1);
- ASSERT(reaching_frames_[0] != NULL);
- ASSERT(direction_ == BIDIRECTIONAL);
-
- // Use a copy of the reaching frame so the original can be saved
- // for possible reuse as a backward merge block.
- RegisterFile empty;
- cgen()->SetFrame(new VirtualFrame(reaching_frames_[0]), &empty);
- __ bind(&merge_labels_[0]);
- cgen()->frame()->MergeTo(entry_frame_);
- }
-
- __ bind(&entry_label_);
-}
-
-
-void BreakTarget::Jump() {
- // Drop leftover statement state from the frame before merging, without
- // emitting code.
- ASSERT(cgen()->has_valid_frame());
- int count = cgen()->frame()->height() - expected_height_;
- cgen()->frame()->ForgetElements(count);
- DoJump();
-}
-
-
-void BreakTarget::Jump(Result* arg) {
- // Drop leftover statement state from the frame before merging, without
- // emitting code.
- ASSERT(cgen()->has_valid_frame());
- int count = cgen()->frame()->height() - expected_height_;
- cgen()->frame()->ForgetElements(count);
- cgen()->frame()->Push(arg);
- DoJump();
-}
-
-
-void BreakTarget::Bind() {
-#ifdef DEBUG
- // All the forward-reaching frames should have been adjusted at the
- // jumps to this target.
- for (int i = 0; i < reaching_frames_.length(); i++) {
- ASSERT(reaching_frames_[i] == NULL ||
- reaching_frames_[i]->height() == expected_height_);
- }
-#endif
- // Drop leftover statement state from the frame before merging, even on
- // the fall through. This is so we can bind the return target with state
- // on the frame.
- if (cgen()->has_valid_frame()) {
- int count = cgen()->frame()->height() - expected_height_;
- cgen()->frame()->ForgetElements(count);
- }
- DoBind();
-}
-
-
-void BreakTarget::Bind(Result* arg) {
-#ifdef DEBUG
- // All the forward-reaching frames should have been adjusted at the
- // jumps to this target.
- for (int i = 0; i < reaching_frames_.length(); i++) {
- ASSERT(reaching_frames_[i] == NULL ||
- reaching_frames_[i]->height() == expected_height_ + 1);
- }
-#endif
- // Drop leftover statement state from the frame before merging, even on
- // the fall through. This is so we can bind the return target with state
- // on the frame.
- if (cgen()->has_valid_frame()) {
- int count = cgen()->frame()->height() - expected_height_;
- cgen()->frame()->ForgetElements(count);
- cgen()->frame()->Push(arg);
- }
- DoBind();
- *arg = cgen()->frame()->Pop();
-}
-
-
-#undef __
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_X64
diff --git a/src/3rdparty/v8/src/x64/lithium-codegen-x64.cc b/src/3rdparty/v8/src/x64/lithium-codegen-x64.cc
deleted file mode 100644
index 7ceff76..0000000
--- a/src/3rdparty/v8/src/x64/lithium-codegen-x64.cc
+++ /dev/null
@@ -1,3970 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_X64)
-
-#include "x64/lithium-codegen-x64.h"
-#include "code-stubs.h"
-#include "stub-cache.h"
-
-namespace v8 {
-namespace internal {
-
-
-// When invoking builtins, we need to record the safepoint in the middle of
-// the invoke instruction sequence generated by the macro assembler.
-class SafepointGenerator : public CallWrapper {
- public:
- SafepointGenerator(LCodeGen* codegen,
- LPointerMap* pointers,
- int deoptimization_index)
- : codegen_(codegen),
- pointers_(pointers),
- deoptimization_index_(deoptimization_index) { }
- virtual ~SafepointGenerator() { }
-
- virtual void BeforeCall(int call_size) {
- ASSERT(call_size >= 0);
- // Ensure that we have enough space after the previous safepoint position
- // for the jump generated there.
- int call_end = codegen_->masm()->pc_offset() + call_size;
- int prev_jump_end = codegen_->LastSafepointEnd() + kMinSafepointSize;
- if (call_end < prev_jump_end) {
- int padding_size = prev_jump_end - call_end;
- STATIC_ASSERT(kMinSafepointSize <= 9); // One multibyte nop is enough.
- codegen_->masm()->nop(padding_size);
- }
- }
-
- virtual void AfterCall() {
- codegen_->RecordSafepoint(pointers_, deoptimization_index_);
- }
-
- private:
- static const int kMinSafepointSize =
- MacroAssembler::kShortCallInstructionLength;
- LCodeGen* codegen_;
- LPointerMap* pointers_;
- int deoptimization_index_;
-};
-
-
-#define __ masm()->
-
-bool LCodeGen::GenerateCode() {
- HPhase phase("Code generation", chunk());
- ASSERT(is_unused());
- status_ = GENERATING;
- return GeneratePrologue() &&
- GenerateBody() &&
- GenerateDeferredCode() &&
- GenerateJumpTable() &&
- GenerateSafepointTable();
-}
-
-
-void LCodeGen::FinishCode(Handle<Code> code) {
- ASSERT(is_done());
- code->set_stack_slots(StackSlotCount());
- code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
- PopulateDeoptimizationData(code);
- Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code);
-}
-
-
-void LCodeGen::Abort(const char* format, ...) {
- if (FLAG_trace_bailout) {
- SmartPointer<char> name(info()->shared_info()->DebugName()->ToCString());
- PrintF("Aborting LCodeGen in @\"%s\": ", *name);
- va_list arguments;
- va_start(arguments, format);
- OS::VPrint(format, arguments);
- va_end(arguments);
- PrintF("\n");
- }
- status_ = ABORTED;
-}
-
-
-void LCodeGen::Comment(const char* format, ...) {
- if (!FLAG_code_comments) return;
- char buffer[4 * KB];
- StringBuilder builder(buffer, ARRAY_SIZE(buffer));
- va_list arguments;
- va_start(arguments, format);
- builder.AddFormattedList(format, arguments);
- va_end(arguments);
-
- // Copy the string before recording it in the assembler to avoid
- // issues when the stack allocated buffer goes out of scope.
- int length = builder.position();
- Vector<char> copy = Vector<char>::New(length + 1);
- memcpy(copy.start(), builder.Finalize(), copy.length());
- masm()->RecordComment(copy.start());
-}
-
-
-bool LCodeGen::GeneratePrologue() {
- ASSERT(is_generating());
-
-#ifdef DEBUG
- if (strlen(FLAG_stop_at) > 0 &&
- info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
- __ int3();
- }
-#endif
-
- __ push(rbp); // Caller's frame pointer.
- __ movq(rbp, rsp);
- __ push(rsi); // Callee's context.
- __ push(rdi); // Callee's JS function.
-
- // Reserve space for the stack slots needed by the code.
- int slots = StackSlotCount();
- if (slots > 0) {
- if (FLAG_debug_code) {
- __ movl(rax, Immediate(slots));
- __ movq(kScratchRegister, kSlotsZapValue, RelocInfo::NONE);
- Label loop;
- __ bind(&loop);
- __ push(kScratchRegister);
- __ decl(rax);
- __ j(not_zero, &loop);
- } else {
- __ subq(rsp, Immediate(slots * kPointerSize));
-#ifdef _MSC_VER
- // On windows, you may not access the stack more than one page below
- // the most recently mapped page. To make the allocated area randomly
- // accessible, we write to each page in turn (the value is irrelevant).
- const int kPageSize = 4 * KB;
- for (int offset = slots * kPointerSize - kPageSize;
- offset > 0;
- offset -= kPageSize) {
- __ movq(Operand(rsp, offset), rax);
- }
-#endif
- }
- }
-
- // Possibly allocate a local context.
- int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
- if (heap_slots > 0) {
- Comment(";;; Allocate local context");
- // Argument to NewContext is the function, which is still in rdi.
- __ push(rdi);
- if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(heap_slots);
- __ CallStub(&stub);
- } else {
- __ CallRuntime(Runtime::kNewContext, 1);
- }
- RecordSafepoint(Safepoint::kNoDeoptimizationIndex);
- // Context is returned in both rax and rsi. It replaces the context
- // passed to us. It's saved in the stack and kept live in rsi.
- __ movq(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
-
- // Copy any necessary parameters into the context.
- int num_parameters = scope()->num_parameters();
- for (int i = 0; i < num_parameters; i++) {
- Slot* slot = scope()->parameter(i)->AsSlot();
- if (slot != NULL && slot->type() == Slot::CONTEXT) {
- int parameter_offset = StandardFrameConstants::kCallerSPOffset +
- (num_parameters - 1 - i) * kPointerSize;
- // Load parameter from stack.
- __ movq(rax, Operand(rbp, parameter_offset));
- // Store it in the context.
- int context_offset = Context::SlotOffset(slot->index());
- __ movq(Operand(rsi, context_offset), rax);
- // Update the write barrier. This clobbers all involved
- // registers, so we have use a third register to avoid
- // clobbering rsi.
- __ movq(rcx, rsi);
- __ RecordWrite(rcx, context_offset, rax, rbx);
- }
- }
- Comment(";;; End allocate local context");
- }
-
- // Trace the call.
- if (FLAG_trace) {
- __ CallRuntime(Runtime::kTraceEnter, 0);
- }
- return !is_aborted();
-}
-
-
-bool LCodeGen::GenerateBody() {
- ASSERT(is_generating());
- bool emit_instructions = true;
- for (current_instruction_ = 0;
- !is_aborted() && current_instruction_ < instructions_->length();
- current_instruction_++) {
- LInstruction* instr = instructions_->at(current_instruction_);
- if (instr->IsLabel()) {
- LLabel* label = LLabel::cast(instr);
- emit_instructions = !label->HasReplacement();
- }
-
- if (emit_instructions) {
- Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
- instr->CompileToNative(this);
- }
- }
- return !is_aborted();
-}
-
-
-LInstruction* LCodeGen::GetNextInstruction() {
- if (current_instruction_ < instructions_->length() - 1) {
- return instructions_->at(current_instruction_ + 1);
- } else {
- return NULL;
- }
-}
-
-
-bool LCodeGen::GenerateJumpTable() {
- for (int i = 0; i < jump_table_.length(); i++) {
- __ bind(&jump_table_[i].label);
- __ Jump(jump_table_[i].address, RelocInfo::RUNTIME_ENTRY);
- }
- return !is_aborted();
-}
-
-
-bool LCodeGen::GenerateDeferredCode() {
- ASSERT(is_generating());
- for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
- LDeferredCode* code = deferred_[i];
- __ bind(code->entry());
- code->Generate();
- __ jmp(code->exit());
- }
-
- // Deferred code is the last part of the instruction sequence. Mark
- // the generated code as done unless we bailed out.
- if (!is_aborted()) status_ = DONE;
- return !is_aborted();
-}
-
-
-bool LCodeGen::GenerateSafepointTable() {
- ASSERT(is_done());
- // Ensure that there is space at the end of the code to write a number
- // of jump instructions, as well as to afford writing a call near the end
- // of the code.
- // The jumps are used when there isn't room in the code stream to write
- // a long call instruction. Instead it writes a shorter call to a
- // jump instruction in the same code object.
- // The calls are used when lazy deoptimizing a function and calls to a
- // deoptimization function.
- int short_deopts = safepoints_.CountShortDeoptimizationIntervals(
- static_cast<unsigned>(MacroAssembler::kJumpInstructionLength));
- int byte_count = (short_deopts) * MacroAssembler::kJumpInstructionLength;
- while (byte_count-- > 0) {
- __ int3();
- }
- safepoints_.Emit(masm(), StackSlotCount());
- return !is_aborted();
-}
-
-
-Register LCodeGen::ToRegister(int index) const {
- return Register::FromAllocationIndex(index);
-}
-
-
-XMMRegister LCodeGen::ToDoubleRegister(int index) const {
- return XMMRegister::FromAllocationIndex(index);
-}
-
-
-Register LCodeGen::ToRegister(LOperand* op) const {
- ASSERT(op->IsRegister());
- return ToRegister(op->index());
-}
-
-
-XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
- ASSERT(op->IsDoubleRegister());
- return ToDoubleRegister(op->index());
-}
-
-
-bool LCodeGen::IsInteger32Constant(LConstantOperand* op) const {
- return op->IsConstantOperand() &&
- chunk_->LookupLiteralRepresentation(op).IsInteger32();
-}
-
-
-bool LCodeGen::IsTaggedConstant(LConstantOperand* op) const {
- return op->IsConstantOperand() &&
- chunk_->LookupLiteralRepresentation(op).IsTagged();
-}
-
-
-int LCodeGen::ToInteger32(LConstantOperand* op) const {
- Handle<Object> value = chunk_->LookupLiteral(op);
- ASSERT(chunk_->LookupLiteralRepresentation(op).IsInteger32());
- ASSERT(static_cast<double>(static_cast<int32_t>(value->Number())) ==
- value->Number());
- return static_cast<int32_t>(value->Number());
-}
-
-
-Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
- Handle<Object> literal = chunk_->LookupLiteral(op);
- ASSERT(chunk_->LookupLiteralRepresentation(op).IsTagged());
- return literal;
-}
-
-
-Operand LCodeGen::ToOperand(LOperand* op) const {
- // Does not handle registers. In X64 assembler, plain registers are not
- // representable as an Operand.
- ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
- int index = op->index();
- if (index >= 0) {
- // Local or spill slot. Skip the frame pointer, function, and
- // context in the fixed part of the frame.
- return Operand(rbp, -(index + 3) * kPointerSize);
- } else {
- // Incoming parameter. Skip the return address.
- return Operand(rbp, -(index - 1) * kPointerSize);
- }
-}
-
-
-void LCodeGen::WriteTranslation(LEnvironment* environment,
- Translation* translation) {
- if (environment == NULL) return;
-
- // The translation includes one command per value in the environment.
- int translation_size = environment->values()->length();
- // The output frame height does not include the parameters.
- int height = translation_size - environment->parameter_count();
-
- WriteTranslation(environment->outer(), translation);
- int closure_id = DefineDeoptimizationLiteral(environment->closure());
- translation->BeginFrame(environment->ast_id(), closure_id, height);
- for (int i = 0; i < translation_size; ++i) {
- LOperand* value = environment->values()->at(i);
- // spilled_registers_ and spilled_double_registers_ are either
- // both NULL or both set.
- if (environment->spilled_registers() != NULL && value != NULL) {
- if (value->IsRegister() &&
- environment->spilled_registers()[value->index()] != NULL) {
- translation->MarkDuplicate();
- AddToTranslation(translation,
- environment->spilled_registers()[value->index()],
- environment->HasTaggedValueAt(i));
- } else if (
- value->IsDoubleRegister() &&
- environment->spilled_double_registers()[value->index()] != NULL) {
- translation->MarkDuplicate();
- AddToTranslation(
- translation,
- environment->spilled_double_registers()[value->index()],
- false);
- }
- }
-
- AddToTranslation(translation, value, environment->HasTaggedValueAt(i));
- }
-}
-
-
-void LCodeGen::AddToTranslation(Translation* translation,
- LOperand* op,
- bool is_tagged) {
- if (op == NULL) {
- // TODO(twuerthinger): Introduce marker operands to indicate that this value
- // is not present and must be reconstructed from the deoptimizer. Currently
- // this is only used for the arguments object.
- translation->StoreArgumentsObject();
- } else if (op->IsStackSlot()) {
- if (is_tagged) {
- translation->StoreStackSlot(op->index());
- } else {
- translation->StoreInt32StackSlot(op->index());
- }
- } else if (op->IsDoubleStackSlot()) {
- translation->StoreDoubleStackSlot(op->index());
- } else if (op->IsArgument()) {
- ASSERT(is_tagged);
- int src_index = StackSlotCount() + op->index();
- translation->StoreStackSlot(src_index);
- } else if (op->IsRegister()) {
- Register reg = ToRegister(op);
- if (is_tagged) {
- translation->StoreRegister(reg);
- } else {
- translation->StoreInt32Register(reg);
- }
- } else if (op->IsDoubleRegister()) {
- XMMRegister reg = ToDoubleRegister(op);
- translation->StoreDoubleRegister(reg);
- } else if (op->IsConstantOperand()) {
- Handle<Object> literal = chunk()->LookupLiteral(LConstantOperand::cast(op));
- int src_index = DefineDeoptimizationLiteral(literal);
- translation->StoreLiteral(src_index);
- } else {
- UNREACHABLE();
- }
-}
-
-
-void LCodeGen::CallCode(Handle<Code> code,
- RelocInfo::Mode mode,
- LInstruction* instr) {
- ASSERT(instr != NULL);
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
- __ call(code, mode);
- RegisterLazyDeoptimization(instr);
-
- // Signal that we don't inline smi code before these stubs in the
- // optimizing code generator.
- if (code->kind() == Code::TYPE_RECORDING_BINARY_OP_IC ||
- code->kind() == Code::COMPARE_IC) {
- __ nop();
- }
-}
-
-
-void LCodeGen::CallRuntime(const Runtime::Function* function,
- int num_arguments,
- LInstruction* instr) {
- ASSERT(instr != NULL);
- ASSERT(instr->HasPointerMap());
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
-
- __ CallRuntime(function, num_arguments);
- RegisterLazyDeoptimization(instr);
-}
-
-
-void LCodeGen::RegisterLazyDeoptimization(LInstruction* instr) {
- // Create the environment to bailout to. If the call has side effects
- // execution has to continue after the call otherwise execution can continue
- // from a previous bailout point repeating the call.
- LEnvironment* deoptimization_environment;
- if (instr->HasDeoptimizationEnvironment()) {
- deoptimization_environment = instr->deoptimization_environment();
- } else {
- deoptimization_environment = instr->environment();
- }
-
- RegisterEnvironmentForDeoptimization(deoptimization_environment);
- RecordSafepoint(instr->pointer_map(),
- deoptimization_environment->deoptimization_index());
-}
-
-
-void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment) {
- if (!environment->HasBeenRegistered()) {
- // Physical stack frame layout:
- // -x ............. -4 0 ..................................... y
- // [incoming arguments] [spill slots] [pushed outgoing arguments]
-
- // Layout of the environment:
- // 0 ..................................................... size-1
- // [parameters] [locals] [expression stack including arguments]
-
- // Layout of the translation:
- // 0 ........................................................ size - 1 + 4
- // [expression stack including arguments] [locals] [4 words] [parameters]
- // |>------------ translation_size ------------<|
-
- int frame_count = 0;
- for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
- ++frame_count;
- }
- Translation translation(&translations_, frame_count);
- WriteTranslation(environment, &translation);
- int deoptimization_index = deoptimizations_.length();
- environment->Register(deoptimization_index, translation.index());
- deoptimizations_.Add(environment);
- }
-}
-
-
-void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
- RegisterEnvironmentForDeoptimization(environment);
- ASSERT(environment->HasBeenRegistered());
- int id = environment->deoptimization_index();
- Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
- ASSERT(entry != NULL);
- if (entry == NULL) {
- Abort("bailout was not prepared");
- return;
- }
-
- if (cc == no_condition) {
- __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
- } else {
- // We often have several deopts to the same entry, reuse the last
- // jump entry if this is the case.
- if (jump_table_.is_empty() ||
- jump_table_.last().address != entry) {
- jump_table_.Add(entry);
- }
- __ j(cc, &jump_table_.last().label);
- }
-}
-
-
-void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
- int length = deoptimizations_.length();
- if (length == 0) return;
- ASSERT(FLAG_deopt);
- Handle<DeoptimizationInputData> data =
- factory()->NewDeoptimizationInputData(length, TENURED);
-
- Handle<ByteArray> translations = translations_.CreateByteArray();
- data->SetTranslationByteArray(*translations);
- data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
-
- Handle<FixedArray> literals =
- factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
- for (int i = 0; i < deoptimization_literals_.length(); i++) {
- literals->set(i, *deoptimization_literals_[i]);
- }
- data->SetLiteralArray(*literals);
-
- data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id()));
- data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
-
- // Populate the deoptimization entries.
- for (int i = 0; i < length; i++) {
- LEnvironment* env = deoptimizations_[i];
- data->SetAstId(i, Smi::FromInt(env->ast_id()));
- data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
- data->SetArgumentsStackHeight(i,
- Smi::FromInt(env->arguments_stack_height()));
- }
- code->set_deoptimization_data(*data);
-}
-
-
-int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
- int result = deoptimization_literals_.length();
- for (int i = 0; i < deoptimization_literals_.length(); ++i) {
- if (deoptimization_literals_[i].is_identical_to(literal)) return i;
- }
- deoptimization_literals_.Add(literal);
- return result;
-}
-
-
-void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
- ASSERT(deoptimization_literals_.length() == 0);
-
- const ZoneList<Handle<JSFunction> >* inlined_closures =
- chunk()->inlined_closures();
-
- for (int i = 0, length = inlined_closures->length();
- i < length;
- i++) {
- DefineDeoptimizationLiteral(inlined_closures->at(i));
- }
-
- inlined_function_count_ = deoptimization_literals_.length();
-}
-
-
-void LCodeGen::RecordSafepoint(
- LPointerMap* pointers,
- Safepoint::Kind kind,
- int arguments,
- int deoptimization_index) {
- const ZoneList<LOperand*>* operands = pointers->operands();
-
- Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
- kind, arguments, deoptimization_index);
- for (int i = 0; i < operands->length(); i++) {
- LOperand* pointer = operands->at(i);
- if (pointer->IsStackSlot()) {
- safepoint.DefinePointerSlot(pointer->index());
- } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
- safepoint.DefinePointerRegister(ToRegister(pointer));
- }
- }
- if (kind & Safepoint::kWithRegisters) {
- // Register rsi always contains a pointer to the context.
- safepoint.DefinePointerRegister(rsi);
- }
-}
-
-
-void LCodeGen::RecordSafepoint(LPointerMap* pointers,
- int deoptimization_index) {
- RecordSafepoint(pointers, Safepoint::kSimple, 0, deoptimization_index);
-}
-
-
-void LCodeGen::RecordSafepoint(int deoptimization_index) {
- LPointerMap empty_pointers(RelocInfo::kNoPosition);
- RecordSafepoint(&empty_pointers, deoptimization_index);
-}
-
-
-void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
- int arguments,
- int deoptimization_index) {
- RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments,
- deoptimization_index);
-}
-
-
-void LCodeGen::RecordPosition(int position) {
- if (!FLAG_debug_info || position == RelocInfo::kNoPosition) return;
- masm()->positions_recorder()->RecordPosition(position);
-}
-
-
-void LCodeGen::DoLabel(LLabel* label) {
- if (label->is_loop_header()) {
- Comment(";;; B%d - LOOP entry", label->block_id());
- } else {
- Comment(";;; B%d", label->block_id());
- }
- __ bind(label->label());
- current_block_ = label->block_id();
- LCodeGen::DoGap(label);
-}
-
-
-void LCodeGen::DoParallelMove(LParallelMove* move) {
- resolver_.Resolve(move);
-}
-
-
-void LCodeGen::DoGap(LGap* gap) {
- for (int i = LGap::FIRST_INNER_POSITION;
- i <= LGap::LAST_INNER_POSITION;
- i++) {
- LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
- LParallelMove* move = gap->GetParallelMove(inner_pos);
- if (move != NULL) DoParallelMove(move);
- }
-
- LInstruction* next = GetNextInstruction();
- if (next != NULL && next->IsLazyBailout()) {
- int pc = masm()->pc_offset();
- safepoints_.SetPcAfterGap(pc);
- }
-}
-
-
-void LCodeGen::DoParameter(LParameter* instr) {
- // Nothing to do.
-}
-
-
-void LCodeGen::DoCallStub(LCallStub* instr) {
- ASSERT(ToRegister(instr->result()).is(rax));
- switch (instr->hydrogen()->major_key()) {
- case CodeStub::RegExpConstructResult: {
- RegExpConstructResultStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- break;
- }
- case CodeStub::RegExpExec: {
- RegExpExecStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- break;
- }
- case CodeStub::SubString: {
- SubStringStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- break;
- }
- case CodeStub::NumberToString: {
- NumberToStringStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- break;
- }
- case CodeStub::StringAdd: {
- StringAddStub stub(NO_STRING_ADD_FLAGS);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- break;
- }
- case CodeStub::StringCompare: {
- StringCompareStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- break;
- }
- case CodeStub::TranscendentalCache: {
- TranscendentalCacheStub stub(instr->transcendental_type(),
- TranscendentalCacheStub::TAGGED);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- break;
- }
- default:
- UNREACHABLE();
- }
-}
-
-
-void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
- // Nothing to do.
-}
-
-
-void LCodeGen::DoModI(LModI* instr) {
- if (instr->hydrogen()->HasPowerOf2Divisor()) {
- Register dividend = ToRegister(instr->InputAt(0));
-
- int32_t divisor =
- HConstant::cast(instr->hydrogen()->right())->Integer32Value();
-
- if (divisor < 0) divisor = -divisor;
-
- NearLabel positive_dividend, done;
- __ testl(dividend, dividend);
- __ j(not_sign, &positive_dividend);
- __ negl(dividend);
- __ andl(dividend, Immediate(divisor - 1));
- __ negl(dividend);
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ j(not_zero, &done);
- DeoptimizeIf(no_condition, instr->environment());
- }
- __ bind(&positive_dividend);
- __ andl(dividend, Immediate(divisor - 1));
- __ bind(&done);
- } else {
- LOperand* right = instr->InputAt(1);
- Register right_reg = ToRegister(right);
-
- ASSERT(ToRegister(instr->result()).is(rdx));
- ASSERT(ToRegister(instr->InputAt(0)).is(rax));
- ASSERT(!right_reg.is(rax));
- ASSERT(!right_reg.is(rdx));
-
- // Check for x % 0.
- if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
- __ testl(right_reg, right_reg);
- DeoptimizeIf(zero, instr->environment());
- }
-
- // Sign extend eax to edx.
- // (We are using only the low 32 bits of the values.)
- __ cdq();
-
- // Check for (0 % -x) that will produce negative zero.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- NearLabel positive_left;
- NearLabel done;
- __ testl(rax, rax);
- __ j(not_sign, &positive_left);
- __ idivl(right_reg);
-
- // Test the remainder for 0, because then the result would be -0.
- __ testl(rdx, rdx);
- __ j(not_zero, &done);
-
- DeoptimizeIf(no_condition, instr->environment());
- __ bind(&positive_left);
- __ idivl(right_reg);
- __ bind(&done);
- } else {
- __ idivl(right_reg);
- }
- }
-}
-
-
-void LCodeGen::DoDivI(LDivI* instr) {
- LOperand* right = instr->InputAt(1);
- ASSERT(ToRegister(instr->result()).is(rax));
- ASSERT(ToRegister(instr->InputAt(0)).is(rax));
- ASSERT(!ToRegister(instr->InputAt(1)).is(rax));
- ASSERT(!ToRegister(instr->InputAt(1)).is(rdx));
-
- Register left_reg = rax;
-
- // Check for x / 0.
- Register right_reg = ToRegister(right);
- if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
- __ testl(right_reg, right_reg);
- DeoptimizeIf(zero, instr->environment());
- }
-
- // Check for (0 / -x) that will produce negative zero.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- NearLabel left_not_zero;
- __ testl(left_reg, left_reg);
- __ j(not_zero, &left_not_zero);
- __ testl(right_reg, right_reg);
- DeoptimizeIf(sign, instr->environment());
- __ bind(&left_not_zero);
- }
-
- // Check for (-kMinInt / -1).
- if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- NearLabel left_not_min_int;
- __ cmpl(left_reg, Immediate(kMinInt));
- __ j(not_zero, &left_not_min_int);
- __ cmpl(right_reg, Immediate(-1));
- DeoptimizeIf(zero, instr->environment());
- __ bind(&left_not_min_int);
- }
-
- // Sign extend to rdx.
- __ cdq();
- __ idivl(right_reg);
-
- // Deoptimize if remainder is not 0.
- __ testl(rdx, rdx);
- DeoptimizeIf(not_zero, instr->environment());
-}
-
-
-void LCodeGen::DoMulI(LMulI* instr) {
- Register left = ToRegister(instr->InputAt(0));
- LOperand* right = instr->InputAt(1);
-
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ movl(kScratchRegister, left);
- }
-
- bool can_overflow =
- instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
- if (right->IsConstantOperand()) {
- int right_value = ToInteger32(LConstantOperand::cast(right));
- if (right_value == -1) {
- __ negl(left);
- } else if (right_value == 0) {
- __ xorl(left, left);
- } else if (right_value == 2) {
- __ addl(left, left);
- } else if (!can_overflow) {
- // If the multiplication is known to not overflow, we
- // can use operations that don't set the overflow flag
- // correctly.
- switch (right_value) {
- case 1:
- // Do nothing.
- break;
- case 3:
- __ leal(left, Operand(left, left, times_2, 0));
- break;
- case 4:
- __ shll(left, Immediate(2));
- break;
- case 5:
- __ leal(left, Operand(left, left, times_4, 0));
- break;
- case 8:
- __ shll(left, Immediate(3));
- break;
- case 9:
- __ leal(left, Operand(left, left, times_8, 0));
- break;
- case 16:
- __ shll(left, Immediate(4));
- break;
- default:
- __ imull(left, left, Immediate(right_value));
- break;
- }
- } else {
- __ imull(left, left, Immediate(right_value));
- }
- } else if (right->IsStackSlot()) {
- __ imull(left, ToOperand(right));
- } else {
- __ imull(left, ToRegister(right));
- }
-
- if (can_overflow) {
- DeoptimizeIf(overflow, instr->environment());
- }
-
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- // Bail out if the result is supposed to be negative zero.
- NearLabel done;
- __ testl(left, left);
- __ j(not_zero, &done);
- if (right->IsConstantOperand()) {
- if (ToInteger32(LConstantOperand::cast(right)) <= 0) {
- DeoptimizeIf(no_condition, instr->environment());
- }
- } else if (right->IsStackSlot()) {
- __ or_(kScratchRegister, ToOperand(right));
- DeoptimizeIf(sign, instr->environment());
- } else {
- // Test the non-zero operand for negative sign.
- __ or_(kScratchRegister, ToRegister(right));
- DeoptimizeIf(sign, instr->environment());
- }
- __ bind(&done);
- }
-}
-
-
-void LCodeGen::DoBitI(LBitI* instr) {
- LOperand* left = instr->InputAt(0);
- LOperand* right = instr->InputAt(1);
- ASSERT(left->Equals(instr->result()));
- ASSERT(left->IsRegister());
-
- if (right->IsConstantOperand()) {
- int right_operand = ToInteger32(LConstantOperand::cast(right));
- switch (instr->op()) {
- case Token::BIT_AND:
- __ andl(ToRegister(left), Immediate(right_operand));
- break;
- case Token::BIT_OR:
- __ orl(ToRegister(left), Immediate(right_operand));
- break;
- case Token::BIT_XOR:
- __ xorl(ToRegister(left), Immediate(right_operand));
- break;
- default:
- UNREACHABLE();
- break;
- }
- } else if (right->IsStackSlot()) {
- switch (instr->op()) {
- case Token::BIT_AND:
- __ andl(ToRegister(left), ToOperand(right));
- break;
- case Token::BIT_OR:
- __ orl(ToRegister(left), ToOperand(right));
- break;
- case Token::BIT_XOR:
- __ xorl(ToRegister(left), ToOperand(right));
- break;
- default:
- UNREACHABLE();
- break;
- }
- } else {
- ASSERT(right->IsRegister());
- switch (instr->op()) {
- case Token::BIT_AND:
- __ andl(ToRegister(left), ToRegister(right));
- break;
- case Token::BIT_OR:
- __ orl(ToRegister(left), ToRegister(right));
- break;
- case Token::BIT_XOR:
- __ xorl(ToRegister(left), ToRegister(right));
- break;
- default:
- UNREACHABLE();
- break;
- }
- }
-}
-
-
-void LCodeGen::DoShiftI(LShiftI* instr) {
- LOperand* left = instr->InputAt(0);
- LOperand* right = instr->InputAt(1);
- ASSERT(left->Equals(instr->result()));
- ASSERT(left->IsRegister());
- if (right->IsRegister()) {
- ASSERT(ToRegister(right).is(rcx));
-
- switch (instr->op()) {
- case Token::SAR:
- __ sarl_cl(ToRegister(left));
- break;
- case Token::SHR:
- __ shrl_cl(ToRegister(left));
- if (instr->can_deopt()) {
- __ testl(ToRegister(left), ToRegister(left));
- DeoptimizeIf(negative, instr->environment());
- }
- break;
- case Token::SHL:
- __ shll_cl(ToRegister(left));
- break;
- default:
- UNREACHABLE();
- break;
- }
- } else {
- int value = ToInteger32(LConstantOperand::cast(right));
- uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
- switch (instr->op()) {
- case Token::SAR:
- if (shift_count != 0) {
- __ sarl(ToRegister(left), Immediate(shift_count));
- }
- break;
- case Token::SHR:
- if (shift_count == 0 && instr->can_deopt()) {
- __ testl(ToRegister(left), ToRegister(left));
- DeoptimizeIf(negative, instr->environment());
- } else {
- __ shrl(ToRegister(left), Immediate(shift_count));
- }
- break;
- case Token::SHL:
- if (shift_count != 0) {
- __ shll(ToRegister(left), Immediate(shift_count));
- }
- break;
- default:
- UNREACHABLE();
- break;
- }
- }
-}
-
-
-void LCodeGen::DoSubI(LSubI* instr) {
- LOperand* left = instr->InputAt(0);
- LOperand* right = instr->InputAt(1);
- ASSERT(left->Equals(instr->result()));
-
- if (right->IsConstantOperand()) {
- __ subl(ToRegister(left),
- Immediate(ToInteger32(LConstantOperand::cast(right))));
- } else if (right->IsRegister()) {
- __ subl(ToRegister(left), ToRegister(right));
- } else {
- __ subl(ToRegister(left), ToOperand(right));
- }
-
- if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- DeoptimizeIf(overflow, instr->environment());
- }
-}
-
-
-void LCodeGen::DoConstantI(LConstantI* instr) {
- ASSERT(instr->result()->IsRegister());
- __ movl(ToRegister(instr->result()), Immediate(instr->value()));
-}
-
-
-void LCodeGen::DoConstantD(LConstantD* instr) {
- ASSERT(instr->result()->IsDoubleRegister());
- XMMRegister res = ToDoubleRegister(instr->result());
- double v = instr->value();
- uint64_t int_val = BitCast<uint64_t, double>(v);
- // Use xor to produce +0.0 in a fast and compact way, but avoid to
- // do so if the constant is -0.0.
- if (int_val == 0) {
- __ xorpd(res, res);
- } else {
- Register tmp = ToRegister(instr->TempAt(0));
- __ Set(tmp, int_val);
- __ movq(res, tmp);
- }
-}
-
-
-void LCodeGen::DoConstantT(LConstantT* instr) {
- ASSERT(instr->result()->IsRegister());
- __ Move(ToRegister(instr->result()), instr->value());
-}
-
-
-void LCodeGen::DoJSArrayLength(LJSArrayLength* instr) {
- Register result = ToRegister(instr->result());
- Register array = ToRegister(instr->InputAt(0));
- __ movq(result, FieldOperand(array, JSArray::kLengthOffset));
-}
-
-
-void LCodeGen::DoFixedArrayLength(LFixedArrayLength* instr) {
- Register result = ToRegister(instr->result());
- Register array = ToRegister(instr->InputAt(0));
- __ movq(result, FieldOperand(array, FixedArray::kLengthOffset));
-}
-
-
-void LCodeGen::DoExternalArrayLength(LExternalArrayLength* instr) {
- Register result = ToRegister(instr->result());
- Register array = ToRegister(instr->InputAt(0));
- __ movl(result, FieldOperand(array, ExternalPixelArray::kLengthOffset));
-}
-
-
-void LCodeGen::DoValueOf(LValueOf* instr) {
- Register input = ToRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
- ASSERT(input.is(result));
- NearLabel done;
- // If the object is a smi return the object.
- __ JumpIfSmi(input, &done);
-
- // If the object is not a value type, return the object.
- __ CmpObjectType(input, JS_VALUE_TYPE, kScratchRegister);
- __ j(not_equal, &done);
- __ movq(result, FieldOperand(input, JSValue::kValueOffset));
-
- __ bind(&done);
-}
-
-
-void LCodeGen::DoBitNotI(LBitNotI* instr) {
- LOperand* input = instr->InputAt(0);
- ASSERT(input->Equals(instr->result()));
- __ not_(ToRegister(input));
-}
-
-
-void LCodeGen::DoThrow(LThrow* instr) {
- __ push(ToRegister(instr->InputAt(0)));
- CallRuntime(Runtime::kThrow, 1, instr);
-
- if (FLAG_debug_code) {
- Comment("Unreachable code.");
- __ int3();
- }
-}
-
-
-void LCodeGen::DoAddI(LAddI* instr) {
- LOperand* left = instr->InputAt(0);
- LOperand* right = instr->InputAt(1);
- ASSERT(left->Equals(instr->result()));
-
- if (right->IsConstantOperand()) {
- __ addl(ToRegister(left),
- Immediate(ToInteger32(LConstantOperand::cast(right))));
- } else if (right->IsRegister()) {
- __ addl(ToRegister(left), ToRegister(right));
- } else {
- __ addl(ToRegister(left), ToOperand(right));
- }
-
- if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- DeoptimizeIf(overflow, instr->environment());
- }
-}
-
-
-void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
- XMMRegister left = ToDoubleRegister(instr->InputAt(0));
- XMMRegister right = ToDoubleRegister(instr->InputAt(1));
- XMMRegister result = ToDoubleRegister(instr->result());
- // All operations except MOD are computed in-place.
- ASSERT(instr->op() == Token::MOD || left.is(result));
- switch (instr->op()) {
- case Token::ADD:
- __ addsd(left, right);
- break;
- case Token::SUB:
- __ subsd(left, right);
- break;
- case Token::MUL:
- __ mulsd(left, right);
- break;
- case Token::DIV:
- __ divsd(left, right);
- break;
- case Token::MOD:
- __ PrepareCallCFunction(2);
- __ movsd(xmm0, left);
- ASSERT(right.is(xmm1));
- __ CallCFunction(
- ExternalReference::double_fp_operation(Token::MOD, isolate()), 2);
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- __ movsd(result, xmm0);
- break;
- default:
- UNREACHABLE();
- break;
- }
-}
-
-
-void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
- ASSERT(ToRegister(instr->InputAt(0)).is(rdx));
- ASSERT(ToRegister(instr->InputAt(1)).is(rax));
- ASSERT(ToRegister(instr->result()).is(rax));
-
- TypeRecordingBinaryOpStub stub(instr->op(), NO_OVERWRITE);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-}
-
-
-int LCodeGen::GetNextEmittedBlock(int block) {
- for (int i = block + 1; i < graph()->blocks()->length(); ++i) {
- LLabel* label = chunk_->GetLabel(i);
- if (!label->HasReplacement()) return i;
- }
- return -1;
-}
-
-
-void LCodeGen::EmitBranch(int left_block, int right_block, Condition cc) {
- int next_block = GetNextEmittedBlock(current_block_);
- right_block = chunk_->LookupDestination(right_block);
- left_block = chunk_->LookupDestination(left_block);
-
- if (right_block == left_block) {
- EmitGoto(left_block);
- } else if (left_block == next_block) {
- __ j(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block));
- } else if (right_block == next_block) {
- __ j(cc, chunk_->GetAssemblyLabel(left_block));
- } else {
- __ j(cc, chunk_->GetAssemblyLabel(left_block));
- if (cc != always) {
- __ jmp(chunk_->GetAssemblyLabel(right_block));
- }
- }
-}
-
-
-void LCodeGen::DoBranch(LBranch* instr) {
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- Representation r = instr->hydrogen()->representation();
- if (r.IsInteger32()) {
- Register reg = ToRegister(instr->InputAt(0));
- __ testl(reg, reg);
- EmitBranch(true_block, false_block, not_zero);
- } else if (r.IsDouble()) {
- XMMRegister reg = ToDoubleRegister(instr->InputAt(0));
- __ xorpd(xmm0, xmm0);
- __ ucomisd(reg, xmm0);
- EmitBranch(true_block, false_block, not_equal);
- } else {
- ASSERT(r.IsTagged());
- Register reg = ToRegister(instr->InputAt(0));
- HType type = instr->hydrogen()->type();
- if (type.IsBoolean()) {
- __ CompareRoot(reg, Heap::kTrueValueRootIndex);
- EmitBranch(true_block, false_block, equal);
- } else if (type.IsSmi()) {
- __ SmiCompare(reg, Smi::FromInt(0));
- EmitBranch(true_block, false_block, not_equal);
- } else {
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
- __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
- __ j(equal, false_label);
- __ CompareRoot(reg, Heap::kTrueValueRootIndex);
- __ j(equal, true_label);
- __ CompareRoot(reg, Heap::kFalseValueRootIndex);
- __ j(equal, false_label);
- __ Cmp(reg, Smi::FromInt(0));
- __ j(equal, false_label);
- __ JumpIfSmi(reg, true_label);
-
- // Test for double values. Plus/minus zero and NaN are false.
- NearLabel call_stub;
- __ CompareRoot(FieldOperand(reg, HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &call_stub);
-
- // HeapNumber => false iff +0, -0, or NaN. These three cases set the
- // zero flag when compared to zero using ucomisd.
- __ xorpd(xmm0, xmm0);
- __ ucomisd(xmm0, FieldOperand(reg, HeapNumber::kValueOffset));
- __ j(zero, false_label);
- __ jmp(true_label);
-
- // The conversion stub doesn't cause garbage collections so it's
- // safe to not record a safepoint after the call.
- __ bind(&call_stub);
- ToBooleanStub stub;
- __ Pushad();
- __ push(reg);
- __ CallStub(&stub);
- __ testq(rax, rax);
- __ Popad();
- EmitBranch(true_block, false_block, not_zero);
- }
- }
-}
-
-
-void LCodeGen::EmitGoto(int block, LDeferredCode* deferred_stack_check) {
- block = chunk_->LookupDestination(block);
- int next_block = GetNextEmittedBlock(current_block_);
- if (block != next_block) {
- // Perform stack overflow check if this goto needs it before jumping.
- if (deferred_stack_check != NULL) {
- __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
- __ j(above_equal, chunk_->GetAssemblyLabel(block));
- __ jmp(deferred_stack_check->entry());
- deferred_stack_check->SetExit(chunk_->GetAssemblyLabel(block));
- } else {
- __ jmp(chunk_->GetAssemblyLabel(block));
- }
- }
-}
-
-
-void LCodeGen::DoDeferredStackCheck(LGoto* instr) {
- __ Pushad();
- __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
- __ Popad();
-}
-
-
-void LCodeGen::DoGoto(LGoto* instr) {
- class DeferredStackCheck: public LDeferredCode {
- public:
- DeferredStackCheck(LCodeGen* codegen, LGoto* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
- private:
- LGoto* instr_;
- };
-
- DeferredStackCheck* deferred = NULL;
- if (instr->include_stack_check()) {
- deferred = new DeferredStackCheck(this, instr);
- }
- EmitGoto(instr->block_id(), deferred);
-}
-
-
-inline Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
- Condition cond = no_condition;
- switch (op) {
- case Token::EQ:
- case Token::EQ_STRICT:
- cond = equal;
- break;
- case Token::LT:
- cond = is_unsigned ? below : less;
- break;
- case Token::GT:
- cond = is_unsigned ? above : greater;
- break;
- case Token::LTE:
- cond = is_unsigned ? below_equal : less_equal;
- break;
- case Token::GTE:
- cond = is_unsigned ? above_equal : greater_equal;
- break;
- case Token::IN:
- case Token::INSTANCEOF:
- default:
- UNREACHABLE();
- }
- return cond;
-}
-
-
-void LCodeGen::EmitCmpI(LOperand* left, LOperand* right) {
- if (right->IsConstantOperand()) {
- int32_t value = ToInteger32(LConstantOperand::cast(right));
- if (left->IsRegister()) {
- __ cmpl(ToRegister(left), Immediate(value));
- } else {
- __ cmpl(ToOperand(left), Immediate(value));
- }
- } else if (right->IsRegister()) {
- __ cmpl(ToRegister(left), ToRegister(right));
- } else {
- __ cmpl(ToRegister(left), ToOperand(right));
- }
-}
-
-
-void LCodeGen::DoCmpID(LCmpID* instr) {
- LOperand* left = instr->InputAt(0);
- LOperand* right = instr->InputAt(1);
- LOperand* result = instr->result();
-
- NearLabel unordered;
- if (instr->is_double()) {
- // Don't base result on EFLAGS when a NaN is involved. Instead
- // jump to the unordered case, which produces a false value.
- __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
- __ j(parity_even, &unordered);
- } else {
- EmitCmpI(left, right);
- }
-
- NearLabel done;
- Condition cc = TokenToCondition(instr->op(), instr->is_double());
- __ LoadRoot(ToRegister(result), Heap::kTrueValueRootIndex);
- __ j(cc, &done);
-
- __ bind(&unordered);
- __ LoadRoot(ToRegister(result), Heap::kFalseValueRootIndex);
- __ bind(&done);
-}
-
-
-void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
- LOperand* left = instr->InputAt(0);
- LOperand* right = instr->InputAt(1);
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
-
- if (instr->is_double()) {
- // Don't base result on EFLAGS when a NaN is involved. Instead
- // jump to the false block.
- __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
- __ j(parity_even, chunk_->GetAssemblyLabel(false_block));
- } else {
- EmitCmpI(left, right);
- }
-
- Condition cc = TokenToCondition(instr->op(), instr->is_double());
- EmitBranch(true_block, false_block, cc);
-}
-
-
-void LCodeGen::DoCmpJSObjectEq(LCmpJSObjectEq* instr) {
- Register left = ToRegister(instr->InputAt(0));
- Register right = ToRegister(instr->InputAt(1));
- Register result = ToRegister(instr->result());
-
- NearLabel different, done;
- __ cmpq(left, right);
- __ j(not_equal, &different);
- __ LoadRoot(result, Heap::kTrueValueRootIndex);
- __ jmp(&done);
- __ bind(&different);
- __ LoadRoot(result, Heap::kFalseValueRootIndex);
- __ bind(&done);
-}
-
-
-void LCodeGen::DoCmpJSObjectEqAndBranch(LCmpJSObjectEqAndBranch* instr) {
- Register left = ToRegister(instr->InputAt(0));
- Register right = ToRegister(instr->InputAt(1));
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
-
- __ cmpq(left, right);
- EmitBranch(true_block, false_block, equal);
-}
-
-
-void LCodeGen::DoIsNull(LIsNull* instr) {
- Register reg = ToRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
-
- // If the expression is known to be a smi, then it's
- // definitely not null. Materialize false.
- // Consider adding other type and representation tests too.
- if (instr->hydrogen()->value()->type().IsSmi()) {
- __ LoadRoot(result, Heap::kFalseValueRootIndex);
- return;
- }
-
- __ CompareRoot(reg, Heap::kNullValueRootIndex);
- if (instr->is_strict()) {
- __ movl(result, Immediate(Heap::kTrueValueRootIndex));
- NearLabel load;
- __ j(equal, &load);
- __ movl(result, Immediate(Heap::kFalseValueRootIndex));
- __ bind(&load);
- __ LoadRootIndexed(result, result, 0);
- } else {
- NearLabel true_value, false_value, done;
- __ j(equal, &true_value);
- __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
- __ j(equal, &true_value);
- __ JumpIfSmi(reg, &false_value);
- // Check for undetectable objects by looking in the bit field in
- // the map. The object has already been smi checked.
- Register scratch = result;
- __ movq(scratch, FieldOperand(reg, HeapObject::kMapOffset));
- __ testb(FieldOperand(scratch, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- __ j(not_zero, &true_value);
- __ bind(&false_value);
- __ LoadRoot(result, Heap::kFalseValueRootIndex);
- __ jmp(&done);
- __ bind(&true_value);
- __ LoadRoot(result, Heap::kTrueValueRootIndex);
- __ bind(&done);
- }
-}
-
-
-void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) {
- Register reg = ToRegister(instr->InputAt(0));
-
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- if (instr->hydrogen()->representation().IsSpecialization() ||
- instr->hydrogen()->type().IsSmi()) {
- // If the expression is known to untagged or smi, then it's definitely
- // not null, and it can't be a an undetectable object.
- // Jump directly to the false block.
- EmitGoto(false_block);
- return;
- }
-
- int true_block = chunk_->LookupDestination(instr->true_block_id());
-
- __ CompareRoot(reg, Heap::kNullValueRootIndex);
- if (instr->is_strict()) {
- EmitBranch(true_block, false_block, equal);
- } else {
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
- __ j(equal, true_label);
- __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
- __ j(equal, true_label);
- __ JumpIfSmi(reg, false_label);
- // Check for undetectable objects by looking in the bit field in
- // the map. The object has already been smi checked.
- Register scratch = ToRegister(instr->TempAt(0));
- __ movq(scratch, FieldOperand(reg, HeapObject::kMapOffset));
- __ testb(FieldOperand(scratch, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- EmitBranch(true_block, false_block, not_zero);
- }
-}
-
-
-Condition LCodeGen::EmitIsObject(Register input,
- Label* is_not_object,
- Label* is_object) {
- ASSERT(!input.is(kScratchRegister));
-
- __ JumpIfSmi(input, is_not_object);
-
- __ CompareRoot(input, Heap::kNullValueRootIndex);
- __ j(equal, is_object);
-
- __ movq(kScratchRegister, FieldOperand(input, HeapObject::kMapOffset));
- // Undetectable objects behave like undefined.
- __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- __ j(not_zero, is_not_object);
-
- __ movzxbl(kScratchRegister,
- FieldOperand(kScratchRegister, Map::kInstanceTypeOffset));
- __ cmpb(kScratchRegister, Immediate(FIRST_JS_OBJECT_TYPE));
- __ j(below, is_not_object);
- __ cmpb(kScratchRegister, Immediate(LAST_JS_OBJECT_TYPE));
- return below_equal;
-}
-
-
-void LCodeGen::DoIsObject(LIsObject* instr) {
- Register reg = ToRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
- Label is_false, is_true, done;
-
- Condition true_cond = EmitIsObject(reg, &is_false, &is_true);
- __ j(true_cond, &is_true);
-
- __ bind(&is_false);
- __ LoadRoot(result, Heap::kFalseValueRootIndex);
- __ jmp(&done);
-
- __ bind(&is_true);
- __ LoadRoot(result, Heap::kTrueValueRootIndex);
-
- __ bind(&done);
-}
-
-
-void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
- Register reg = ToRegister(instr->InputAt(0));
-
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
- Condition true_cond = EmitIsObject(reg, false_label, true_label);
-
- EmitBranch(true_block, false_block, true_cond);
-}
-
-
-void LCodeGen::DoIsSmi(LIsSmi* instr) {
- LOperand* input_operand = instr->InputAt(0);
- Register result = ToRegister(instr->result());
- if (input_operand->IsRegister()) {
- Register input = ToRegister(input_operand);
- __ CheckSmiToIndicator(result, input);
- } else {
- Operand input = ToOperand(instr->InputAt(0));
- __ CheckSmiToIndicator(result, input);
- }
- // result is zero if input is a smi, and one otherwise.
- ASSERT(Heap::kFalseValueRootIndex == Heap::kTrueValueRootIndex + 1);
- __ LoadRootIndexed(result, result, Heap::kTrueValueRootIndex);
-}
-
-
-void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- Condition is_smi;
- if (instr->InputAt(0)->IsRegister()) {
- Register input = ToRegister(instr->InputAt(0));
- is_smi = masm()->CheckSmi(input);
- } else {
- Operand input = ToOperand(instr->InputAt(0));
- is_smi = masm()->CheckSmi(input);
- }
- EmitBranch(true_block, false_block, is_smi);
-}
-
-
-static InstanceType TestType(HHasInstanceType* instr) {
- InstanceType from = instr->from();
- InstanceType to = instr->to();
- if (from == FIRST_TYPE) return to;
- ASSERT(from == to || to == LAST_TYPE);
- return from;
-}
-
-
-static Condition BranchCondition(HHasInstanceType* instr) {
- InstanceType from = instr->from();
- InstanceType to = instr->to();
- if (from == to) return equal;
- if (to == LAST_TYPE) return above_equal;
- if (from == FIRST_TYPE) return below_equal;
- UNREACHABLE();
- return equal;
-}
-
-
-void LCodeGen::DoHasInstanceType(LHasInstanceType* instr) {
- Register input = ToRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
-
- ASSERT(instr->hydrogen()->value()->representation().IsTagged());
- __ testl(input, Immediate(kSmiTagMask));
- NearLabel done, is_false;
- __ j(zero, &is_false);
- __ CmpObjectType(input, TestType(instr->hydrogen()), result);
- __ j(NegateCondition(BranchCondition(instr->hydrogen())), &is_false);
- __ LoadRoot(result, Heap::kTrueValueRootIndex);
- __ jmp(&done);
- __ bind(&is_false);
- __ LoadRoot(result, Heap::kFalseValueRootIndex);
- __ bind(&done);
-}
-
-
-void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
- Register input = ToRegister(instr->InputAt(0));
-
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
- __ JumpIfSmi(input, false_label);
-
- __ CmpObjectType(input, TestType(instr->hydrogen()), kScratchRegister);
- EmitBranch(true_block, false_block, BranchCondition(instr->hydrogen()));
-}
-
-
-void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
- Register input = ToRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
-
- if (FLAG_debug_code) {
- __ AbortIfNotString(input);
- }
-
- __ movl(result, FieldOperand(input, String::kHashFieldOffset));
- ASSERT(String::kHashShift >= kSmiTagSize);
- __ IndexFromHash(result, result);
-}
-
-
-void LCodeGen::DoHasCachedArrayIndex(LHasCachedArrayIndex* instr) {
- Register input = ToRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
-
- ASSERT(instr->hydrogen()->value()->representation().IsTagged());
- __ LoadRoot(result, Heap::kTrueValueRootIndex);
- __ testl(FieldOperand(input, String::kHashFieldOffset),
- Immediate(String::kContainsCachedArrayIndexMask));
- NearLabel done;
- __ j(zero, &done);
- __ LoadRoot(result, Heap::kFalseValueRootIndex);
- __ bind(&done);
-}
-
-
-void LCodeGen::DoHasCachedArrayIndexAndBranch(
- LHasCachedArrayIndexAndBranch* instr) {
- Register input = ToRegister(instr->InputAt(0));
-
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- __ testl(FieldOperand(input, String::kHashFieldOffset),
- Immediate(String::kContainsCachedArrayIndexMask));
- EmitBranch(true_block, false_block, equal);
-}
-
-
-// Branches to a label or falls through with the answer in the z flag.
-// Trashes the temp register and possibly input (if it and temp are aliased).
-void LCodeGen::EmitClassOfTest(Label* is_true,
- Label* is_false,
- Handle<String> class_name,
- Register input,
- Register temp) {
- __ JumpIfSmi(input, is_false);
- __ CmpObjectType(input, FIRST_JS_OBJECT_TYPE, temp);
- __ j(below, is_false);
-
- // Map is now in temp.
- // Functions have class 'Function'.
- __ CmpInstanceType(temp, JS_FUNCTION_TYPE);
- if (class_name->IsEqualTo(CStrVector("Function"))) {
- __ j(equal, is_true);
- } else {
- __ j(equal, is_false);
- }
-
- // Check if the constructor in the map is a function.
- __ movq(temp, FieldOperand(temp, Map::kConstructorOffset));
-
- // As long as JS_FUNCTION_TYPE is the last instance type and it is
- // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
- // LAST_JS_OBJECT_TYPE.
- ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
- ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
-
- // Objects with a non-function constructor have class 'Object'.
- __ CmpObjectType(temp, JS_FUNCTION_TYPE, kScratchRegister);
- if (class_name->IsEqualTo(CStrVector("Object"))) {
- __ j(not_equal, is_true);
- } else {
- __ j(not_equal, is_false);
- }
-
- // temp now contains the constructor function. Grab the
- // instance class name from there.
- __ movq(temp, FieldOperand(temp, JSFunction::kSharedFunctionInfoOffset));
- __ movq(temp, FieldOperand(temp,
- SharedFunctionInfo::kInstanceClassNameOffset));
- // The class name we are testing against is a symbol because it's a literal.
- // The name in the constructor is a symbol because of the way the context is
- // booted. This routine isn't expected to work for random API-created
- // classes and it doesn't have to because you can't access it with natives
- // syntax. Since both sides are symbols it is sufficient to use an identity
- // comparison.
- ASSERT(class_name->IsSymbol());
- __ Cmp(temp, class_name);
- // End with the answer in the z flag.
-}
-
-
-void LCodeGen::DoClassOfTest(LClassOfTest* instr) {
- Register input = ToRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
- ASSERT(input.is(result));
- Register temp = ToRegister(instr->TempAt(0));
- Handle<String> class_name = instr->hydrogen()->class_name();
- NearLabel done;
- Label is_true, is_false;
-
- EmitClassOfTest(&is_true, &is_false, class_name, input, temp);
-
- __ j(not_equal, &is_false);
-
- __ bind(&is_true);
- __ LoadRoot(result, Heap::kTrueValueRootIndex);
- __ jmp(&done);
-
- __ bind(&is_false);
- __ LoadRoot(result, Heap::kFalseValueRootIndex);
- __ bind(&done);
-}
-
-
-void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
- Register input = ToRegister(instr->InputAt(0));
- Register temp = ToRegister(instr->TempAt(0));
- Handle<String> class_name = instr->hydrogen()->class_name();
-
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
- EmitClassOfTest(true_label, false_label, class_name, input, temp);
-
- EmitBranch(true_block, false_block, equal);
-}
-
-
-void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
- Register reg = ToRegister(instr->InputAt(0));
- int true_block = instr->true_block_id();
- int false_block = instr->false_block_id();
-
- __ Cmp(FieldOperand(reg, HeapObject::kMapOffset), instr->map());
- EmitBranch(true_block, false_block, equal);
-}
-
-
-void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
- InstanceofStub stub(InstanceofStub::kNoFlags);
- __ push(ToRegister(instr->InputAt(0)));
- __ push(ToRegister(instr->InputAt(1)));
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- NearLabel true_value, done;
- __ testq(rax, rax);
- __ j(zero, &true_value);
- __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
- __ jmp(&done);
- __ bind(&true_value);
- __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
- __ bind(&done);
-}
-
-
-void LCodeGen::DoInstanceOfAndBranch(LInstanceOfAndBranch* instr) {
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- InstanceofStub stub(InstanceofStub::kNoFlags);
- __ push(ToRegister(instr->InputAt(0)));
- __ push(ToRegister(instr->InputAt(1)));
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- __ testq(rax, rax);
- EmitBranch(true_block, false_block, zero);
-}
-
-
-void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
- class DeferredInstanceOfKnownGlobal: public LDeferredCode {
- public:
- DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
- LInstanceOfKnownGlobal* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
- codegen()->DoDeferredLInstanceOfKnownGlobal(instr_, &map_check_);
- }
-
- Label* map_check() { return &map_check_; }
-
- private:
- LInstanceOfKnownGlobal* instr_;
- Label map_check_;
- };
-
-
- DeferredInstanceOfKnownGlobal* deferred;
- deferred = new DeferredInstanceOfKnownGlobal(this, instr);
-
- Label done, false_result;
- Register object = ToRegister(instr->InputAt(0));
-
- // A Smi is not an instance of anything.
- __ JumpIfSmi(object, &false_result);
-
- // This is the inlined call site instanceof cache. The two occurences of the
- // hole value will be patched to the last map/result pair generated by the
- // instanceof stub.
- NearLabel cache_miss;
- // Use a temp register to avoid memory operands with variable lengths.
- Register map = ToRegister(instr->TempAt(0));
- __ movq(map, FieldOperand(object, HeapObject::kMapOffset));
- __ bind(deferred->map_check()); // Label for calculating code patching.
- __ movq(kScratchRegister, factory()->the_hole_value(),
- RelocInfo::EMBEDDED_OBJECT);
- __ cmpq(map, kScratchRegister); // Patched to cached map.
- __ j(not_equal, &cache_miss);
- // Patched to load either true or false.
- __ LoadRoot(ToRegister(instr->result()), Heap::kTheHoleValueRootIndex);
-#ifdef DEBUG
- // Check that the code size between patch label and patch sites is invariant.
- Label end_of_patched_code;
- __ bind(&end_of_patched_code);
- ASSERT(true);
-#endif
- __ jmp(&done);
-
- // The inlined call site cache did not match. Check for null and string
- // before calling the deferred code.
- __ bind(&cache_miss); // Null is not an instance of anything.
- __ CompareRoot(object, Heap::kNullValueRootIndex);
- __ j(equal, &false_result);
-
- // String values are not instances of anything.
- __ JumpIfNotString(object, kScratchRegister, deferred->entry());
-
- __ bind(&false_result);
- __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
-
- __ bind(deferred->exit());
- __ bind(&done);
-}
-
-
-void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
- Label* map_check) {
- __ PushSafepointRegisters();
- InstanceofStub::Flags flags = static_cast<InstanceofStub::Flags>(
- InstanceofStub::kNoFlags | InstanceofStub::kCallSiteInlineCheck);
- InstanceofStub stub(flags);
-
- __ push(ToRegister(instr->InputAt(0)));
- __ Push(instr->function());
- Register temp = ToRegister(instr->TempAt(0));
- ASSERT(temp.is(rdi));
- static const int kAdditionalDelta = 16;
- int delta =
- masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta;
- __ movq(temp, Immediate(delta));
- __ push(temp);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- __ movq(kScratchRegister, rax);
- __ PopSafepointRegisters();
- __ testq(kScratchRegister, kScratchRegister);
- Label load_false;
- Label done;
- __ j(not_zero, &load_false);
- __ LoadRoot(rax, Heap::kTrueValueRootIndex);
- __ jmp(&done);
- __ bind(&load_false);
- __ LoadRoot(rax, Heap::kFalseValueRootIndex);
- __ bind(&done);
-}
-
-
-void LCodeGen::DoCmpT(LCmpT* instr) {
- Token::Value op = instr->op();
-
- Handle<Code> ic = CompareIC::GetUninitialized(op);
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-
- Condition condition = TokenToCondition(op, false);
- if (op == Token::GT || op == Token::LTE) {
- condition = ReverseCondition(condition);
- }
- NearLabel true_value, done;
- __ testq(rax, rax);
- __ j(condition, &true_value);
- __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
- __ jmp(&done);
- __ bind(&true_value);
- __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
- __ bind(&done);
-}
-
-
-void LCodeGen::DoCmpTAndBranch(LCmpTAndBranch* instr) {
- Token::Value op = instr->op();
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- Handle<Code> ic = CompareIC::GetUninitialized(op);
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-
- // The compare stub expects compare condition and the input operands
- // reversed for GT and LTE.
- Condition condition = TokenToCondition(op, false);
- if (op == Token::GT || op == Token::LTE) {
- condition = ReverseCondition(condition);
- }
- __ testq(rax, rax);
- EmitBranch(true_block, false_block, condition);
-}
-
-
-void LCodeGen::DoReturn(LReturn* instr) {
- if (FLAG_trace) {
- // Preserve the return value on the stack and rely on the runtime
- // call to return the value in the same register.
- __ push(rax);
- __ CallRuntime(Runtime::kTraceExit, 1);
- }
- __ movq(rsp, rbp);
- __ pop(rbp);
- __ Ret((ParameterCount() + 1) * kPointerSize, rcx);
-}
-
-
-void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
- Register result = ToRegister(instr->result());
- if (result.is(rax)) {
- __ load_rax(instr->hydrogen()->cell().location(),
- RelocInfo::GLOBAL_PROPERTY_CELL);
- } else {
- __ movq(result, instr->hydrogen()->cell(), RelocInfo::GLOBAL_PROPERTY_CELL);
- __ movq(result, Operand(result, 0));
- }
- if (instr->hydrogen()->check_hole_value()) {
- __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(equal, instr->environment());
- }
-}
-
-
-void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
- ASSERT(ToRegister(instr->global_object()).is(rax));
- ASSERT(ToRegister(instr->result()).is(rax));
-
- __ Move(rcx, instr->name());
- RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET :
- RelocInfo::CODE_TARGET_CONTEXT;
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallCode(ic, mode, instr);
-}
-
-
-void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
- Register value = ToRegister(instr->InputAt(0));
- Register temp = ToRegister(instr->TempAt(0));
- ASSERT(!value.is(temp));
- bool check_hole = instr->hydrogen()->check_hole_value();
- if (!check_hole && value.is(rax)) {
- __ store_rax(instr->hydrogen()->cell().location(),
- RelocInfo::GLOBAL_PROPERTY_CELL);
- return;
- }
- // If the cell we are storing to contains the hole it could have
- // been deleted from the property dictionary. In that case, we need
- // to update the property details in the property dictionary to mark
- // it as no longer deleted. We deoptimize in that case.
- __ movq(temp, instr->hydrogen()->cell(), RelocInfo::GLOBAL_PROPERTY_CELL);
- if (check_hole) {
- __ CompareRoot(Operand(temp, 0), Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(equal, instr->environment());
- }
- __ movq(Operand(temp, 0), value);
-}
-
-
-void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
- ASSERT(ToRegister(instr->global_object()).is(rdx));
- ASSERT(ToRegister(instr->value()).is(rax));
-
- __ Move(rcx, instr->name());
- Handle<Code> ic = isolate()->builtins()->StoreIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
-}
-
-
-void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
- Register context = ToRegister(instr->context());
- Register result = ToRegister(instr->result());
- __ movq(result, ContextOperand(context, instr->slot_index()));
-}
-
-
-void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
- Register context = ToRegister(instr->context());
- Register value = ToRegister(instr->value());
- __ movq(ContextOperand(context, instr->slot_index()), value);
- if (instr->needs_write_barrier()) {
- int offset = Context::SlotOffset(instr->slot_index());
- Register scratch = ToRegister(instr->TempAt(0));
- __ RecordWrite(context, offset, value, scratch);
- }
-}
-
-
-void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
- Register object = ToRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
- if (instr->hydrogen()->is_in_object()) {
- __ movq(result, FieldOperand(object, instr->hydrogen()->offset()));
- } else {
- __ movq(result, FieldOperand(object, JSObject::kPropertiesOffset));
- __ movq(result, FieldOperand(result, instr->hydrogen()->offset()));
- }
-}
-
-
-void LCodeGen::EmitLoadField(Register result,
- Register object,
- Handle<Map> type,
- Handle<String> name) {
- LookupResult lookup;
- type->LookupInDescriptors(NULL, *name, &lookup);
- ASSERT(lookup.IsProperty() && lookup.type() == FIELD);
- int index = lookup.GetLocalFieldIndexFromMap(*type);
- int offset = index * kPointerSize;
- if (index < 0) {
- // Negative property indices are in-object properties, indexed
- // from the end of the fixed part of the object.
- __ movq(result, FieldOperand(object, offset + type->instance_size()));
- } else {
- // Non-negative property indices are in the properties array.
- __ movq(result, FieldOperand(object, JSObject::kPropertiesOffset));
- __ movq(result, FieldOperand(result, offset + FixedArray::kHeaderSize));
- }
-}
-
-
-void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
- Register object = ToRegister(instr->object());
- Register result = ToRegister(instr->result());
-
- int map_count = instr->hydrogen()->types()->length();
- Handle<String> name = instr->hydrogen()->name();
-
- if (map_count == 0) {
- ASSERT(instr->hydrogen()->need_generic());
- __ Move(rcx, instr->hydrogen()->name());
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
- } else {
- NearLabel done;
- for (int i = 0; i < map_count - 1; ++i) {
- Handle<Map> map = instr->hydrogen()->types()->at(i);
- NearLabel next;
- __ Cmp(FieldOperand(object, HeapObject::kMapOffset), map);
- __ j(not_equal, &next);
- EmitLoadField(result, object, map, name);
- __ jmp(&done);
- __ bind(&next);
- }
- Handle<Map> map = instr->hydrogen()->types()->last();
- __ Cmp(FieldOperand(object, HeapObject::kMapOffset), map);
- if (instr->hydrogen()->need_generic()) {
- NearLabel generic;
- __ j(not_equal, &generic);
- EmitLoadField(result, object, map, name);
- __ jmp(&done);
- __ bind(&generic);
- __ Move(rcx, instr->hydrogen()->name());
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
- } else {
- DeoptimizeIf(not_equal, instr->environment());
- EmitLoadField(result, object, map, name);
- }
- __ bind(&done);
- }
-}
-
-
-void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
- ASSERT(ToRegister(instr->object()).is(rax));
- ASSERT(ToRegister(instr->result()).is(rax));
-
- __ Move(rcx, instr->name());
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
- Register function = ToRegister(instr->function());
- Register result = ToRegister(instr->result());
-
- // Check that the function really is a function.
- __ CmpObjectType(function, JS_FUNCTION_TYPE, result);
- DeoptimizeIf(not_equal, instr->environment());
-
- // Check whether the function has an instance prototype.
- NearLabel non_instance;
- __ testb(FieldOperand(result, Map::kBitFieldOffset),
- Immediate(1 << Map::kHasNonInstancePrototype));
- __ j(not_zero, &non_instance);
-
- // Get the prototype or initial map from the function.
- __ movq(result,
- FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
-
- // Check that the function has a prototype or an initial map.
- __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(equal, instr->environment());
-
- // If the function does not have an initial map, we're done.
- NearLabel done;
- __ CmpObjectType(result, MAP_TYPE, kScratchRegister);
- __ j(not_equal, &done);
-
- // Get the prototype from the initial map.
- __ movq(result, FieldOperand(result, Map::kPrototypeOffset));
- __ jmp(&done);
-
- // Non-instance prototype: Fetch prototype from constructor field
- // in the function's map.
- __ bind(&non_instance);
- __ movq(result, FieldOperand(result, Map::kConstructorOffset));
-
- // All done.
- __ bind(&done);
-}
-
-
-void LCodeGen::DoLoadElements(LLoadElements* instr) {
- Register result = ToRegister(instr->result());
- Register input = ToRegister(instr->InputAt(0));
- __ movq(result, FieldOperand(input, JSObject::kElementsOffset));
- if (FLAG_debug_code) {
- NearLabel done;
- __ CompareRoot(FieldOperand(result, HeapObject::kMapOffset),
- Heap::kFixedArrayMapRootIndex);
- __ j(equal, &done);
- __ CompareRoot(FieldOperand(result, HeapObject::kMapOffset),
- Heap::kFixedCOWArrayMapRootIndex);
- __ j(equal, &done);
- Register temp((result.is(rax)) ? rbx : rax);
- __ push(temp);
- __ movq(temp, FieldOperand(result, HeapObject::kMapOffset));
- __ movzxbq(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
- __ subq(temp, Immediate(FIRST_EXTERNAL_ARRAY_TYPE));
- __ cmpq(temp, Immediate(kExternalArrayTypeCount));
- __ pop(temp);
- __ Check(below, "Check for fast elements failed.");
- __ bind(&done);
- }
-}
-
-
-void LCodeGen::DoLoadExternalArrayPointer(
- LLoadExternalArrayPointer* instr) {
- Register result = ToRegister(instr->result());
- Register input = ToRegister(instr->InputAt(0));
- __ movq(result, FieldOperand(input,
- ExternalPixelArray::kExternalPointerOffset));
-}
-
-
-void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
- Register arguments = ToRegister(instr->arguments());
- Register length = ToRegister(instr->length());
- Register result = ToRegister(instr->result());
-
- if (instr->index()->IsRegister()) {
- __ subl(length, ToRegister(instr->index()));
- } else {
- __ subl(length, ToOperand(instr->index()));
- }
- DeoptimizeIf(below_equal, instr->environment());
-
- // There are two words between the frame pointer and the last argument.
- // Subtracting from length accounts for one of them add one more.
- __ movq(result, Operand(arguments, length, times_pointer_size, kPointerSize));
-}
-
-
-void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
- Register elements = ToRegister(instr->elements());
- Register key = ToRegister(instr->key());
- Register result = ToRegister(instr->result());
- ASSERT(result.is(elements));
-
- // Load the result.
- __ movq(result, FieldOperand(elements,
- key,
- times_pointer_size,
- FixedArray::kHeaderSize));
-
- // Check for the hole value.
- __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(equal, instr->environment());
-}
-
-
-void LCodeGen::DoLoadKeyedSpecializedArrayElement(
- LLoadKeyedSpecializedArrayElement* instr) {
- Register external_pointer = ToRegister(instr->external_pointer());
- Register key = ToRegister(instr->key());
- ExternalArrayType array_type = instr->array_type();
- if (array_type == kExternalFloatArray) {
- XMMRegister result(ToDoubleRegister(instr->result()));
- __ movss(result, Operand(external_pointer, key, times_4, 0));
- __ cvtss2sd(result, result);
- } else {
- Register result(ToRegister(instr->result()));
- switch (array_type) {
- case kExternalByteArray:
- __ movsxbq(result, Operand(external_pointer, key, times_1, 0));
- break;
- case kExternalUnsignedByteArray:
- case kExternalPixelArray:
- __ movzxbq(result, Operand(external_pointer, key, times_1, 0));
- break;
- case kExternalShortArray:
- __ movsxwq(result, Operand(external_pointer, key, times_2, 0));
- break;
- case kExternalUnsignedShortArray:
- __ movzxwq(result, Operand(external_pointer, key, times_2, 0));
- break;
- case kExternalIntArray:
- __ movsxlq(result, Operand(external_pointer, key, times_4, 0));
- break;
- case kExternalUnsignedIntArray:
- __ movl(result, Operand(external_pointer, key, times_4, 0));
- __ testl(result, result);
- // TODO(danno): we could be more clever here, perhaps having a special
- // version of the stub that detects if the overflow case actually
- // happens, and generate code that returns a double rather than int.
- DeoptimizeIf(negative, instr->environment());
- break;
- case kExternalFloatArray:
- UNREACHABLE();
- break;
- }
- }
-}
-
-
-void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
- ASSERT(ToRegister(instr->object()).is(rdx));
- ASSERT(ToRegister(instr->key()).is(rax));
-
- Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
- Register result = ToRegister(instr->result());
-
- // Check for arguments adapter frame.
- NearLabel done, adapted;
- __ movq(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ Cmp(Operand(result, StandardFrameConstants::kContextOffset),
- Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ j(equal, &adapted);
-
- // No arguments adaptor frame.
- __ movq(result, rbp);
- __ jmp(&done);
-
- // Arguments adaptor frame present.
- __ bind(&adapted);
- __ movq(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
-
- // Result is the frame pointer for the frame if not adapted and for the real
- // frame below the adaptor frame if adapted.
- __ bind(&done);
-}
-
-
-void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
- Register result = ToRegister(instr->result());
-
- NearLabel done;
-
- // If no arguments adaptor frame the number of arguments is fixed.
- if (instr->InputAt(0)->IsRegister()) {
- __ cmpq(rbp, ToRegister(instr->InputAt(0)));
- } else {
- __ cmpq(rbp, ToOperand(instr->InputAt(0)));
- }
- __ movq(result, Immediate(scope()->num_parameters()));
- __ j(equal, &done);
-
- // Arguments adaptor frame present. Get argument length from there.
- __ movq(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ movq(result, Operand(result,
- ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiToInteger32(result, result);
-
- // Argument length is in result register.
- __ bind(&done);
-}
-
-
-void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
- Register receiver = ToRegister(instr->receiver());
- Register function = ToRegister(instr->function());
- Register length = ToRegister(instr->length());
- Register elements = ToRegister(instr->elements());
- ASSERT(receiver.is(rax)); // Used for parameter count.
- ASSERT(function.is(rdi)); // Required by InvokeFunction.
- ASSERT(ToRegister(instr->result()).is(rax));
-
- // If the receiver is null or undefined, we have to pass the global object
- // as a receiver.
- NearLabel global_object, receiver_ok;
- __ CompareRoot(receiver, Heap::kNullValueRootIndex);
- __ j(equal, &global_object);
- __ CompareRoot(receiver, Heap::kUndefinedValueRootIndex);
- __ j(equal, &global_object);
-
- // The receiver should be a JS object.
- Condition is_smi = __ CheckSmi(receiver);
- DeoptimizeIf(is_smi, instr->environment());
- __ CmpObjectType(receiver, FIRST_JS_OBJECT_TYPE, kScratchRegister);
- DeoptimizeIf(below, instr->environment());
- __ jmp(&receiver_ok);
-
- __ bind(&global_object);
- // TODO(kmillikin): We have a hydrogen value for the global object. See
- // if it's better to use it than to explicitly fetch it from the context
- // here.
- __ movq(receiver, Operand(rbp, StandardFrameConstants::kContextOffset));
- __ movq(receiver, ContextOperand(receiver, Context::GLOBAL_INDEX));
- __ bind(&receiver_ok);
-
- // Copy the arguments to this function possibly from the
- // adaptor frame below it.
- const uint32_t kArgumentsLimit = 1 * KB;
- __ cmpq(length, Immediate(kArgumentsLimit));
- DeoptimizeIf(above, instr->environment());
-
- __ push(receiver);
- __ movq(receiver, length);
-
- // Loop through the arguments pushing them onto the execution
- // stack.
- NearLabel invoke, loop;
- // length is a small non-negative integer, due to the test above.
- __ testl(length, length);
- __ j(zero, &invoke);
- __ bind(&loop);
- __ push(Operand(elements, length, times_pointer_size, 1 * kPointerSize));
- __ decl(length);
- __ j(not_zero, &loop);
-
- // Invoke the function.
- __ bind(&invoke);
- ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
- LPointerMap* pointers = instr->pointer_map();
- LEnvironment* env = instr->deoptimization_environment();
- RecordPosition(pointers->position());
- RegisterEnvironmentForDeoptimization(env);
- SafepointGenerator safepoint_generator(this,
- pointers,
- env->deoptimization_index());
- v8::internal::ParameterCount actual(rax);
- __ InvokeFunction(function, actual, CALL_FUNCTION, &safepoint_generator);
-}
-
-
-void LCodeGen::DoPushArgument(LPushArgument* instr) {
- LOperand* argument = instr->InputAt(0);
- if (argument->IsConstantOperand()) {
- EmitPushConstantOperand(argument);
- } else if (argument->IsRegister()) {
- __ push(ToRegister(argument));
- } else {
- ASSERT(!argument->IsDoubleRegister());
- __ push(ToOperand(argument));
- }
-}
-
-
-void LCodeGen::DoContext(LContext* instr) {
- Register result = ToRegister(instr->result());
- __ movq(result, Operand(rbp, StandardFrameConstants::kContextOffset));
-}
-
-
-void LCodeGen::DoOuterContext(LOuterContext* instr) {
- Register context = ToRegister(instr->context());
- Register result = ToRegister(instr->result());
- __ movq(result,
- Operand(context, Context::SlotOffset(Context::CLOSURE_INDEX)));
- __ movq(result, FieldOperand(result, JSFunction::kContextOffset));
-}
-
-
-void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
- Register result = ToRegister(instr->result());
- __ movq(result, GlobalObjectOperand());
-}
-
-
-void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
- Register global = ToRegister(instr->global());
- Register result = ToRegister(instr->result());
- __ movq(result, FieldOperand(global, GlobalObject::kGlobalReceiverOffset));
-}
-
-
-void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
- int arity,
- LInstruction* instr) {
- // Change context if needed.
- bool change_context =
- (info()->closure()->context() != function->context()) ||
- scope()->contains_with() ||
- (scope()->num_heap_slots() > 0);
- if (change_context) {
- __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
- }
-
- // Set rax to arguments count if adaption is not needed. Assumes that rax
- // is available to write to at this point.
- if (!function->NeedsArgumentsAdaption()) {
- __ Set(rax, arity);
- }
-
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
-
- // Invoke function.
- if (*function == *info()->closure()) {
- __ CallSelf();
- } else {
- __ call(FieldOperand(rdi, JSFunction::kCodeEntryOffset));
- }
-
- // Setup deoptimization.
- RegisterLazyDeoptimization(instr);
-
- // Restore context.
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
-}
-
-
-void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
- ASSERT(ToRegister(instr->result()).is(rax));
- __ Move(rdi, instr->function());
- CallKnownFunction(instr->function(), instr->arity(), instr);
-}
-
-
-void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
- Register input_reg = ToRegister(instr->InputAt(0));
- __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- DeoptimizeIf(not_equal, instr->environment());
-
- Label done;
- Register tmp = input_reg.is(rax) ? rcx : rax;
- Register tmp2 = tmp.is(rcx) ? rdx : input_reg.is(rcx) ? rdx : rcx;
-
- // Preserve the value of all registers.
- __ PushSafepointRegisters();
-
- Label negative;
- __ movl(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset));
- // Check the sign of the argument. If the argument is positive, just
- // return it. We do not need to patch the stack since |input| and
- // |result| are the same register and |input| will be restored
- // unchanged by popping safepoint registers.
- __ testl(tmp, Immediate(HeapNumber::kSignMask));
- __ j(not_zero, &negative);
- __ jmp(&done);
-
- __ bind(&negative);
-
- Label allocated, slow;
- __ AllocateHeapNumber(tmp, tmp2, &slow);
- __ jmp(&allocated);
-
- // Slow case: Call the runtime system to do the number allocation.
- __ bind(&slow);
-
- __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
- // Set the pointer to the new heap number in tmp.
- if (!tmp.is(rax)) {
- __ movq(tmp, rax);
- }
-
- // Restore input_reg after call to runtime.
- __ LoadFromSafepointRegisterSlot(input_reg, input_reg);
-
- __ bind(&allocated);
- __ movq(tmp2, FieldOperand(input_reg, HeapNumber::kValueOffset));
- __ shl(tmp2, Immediate(1));
- __ shr(tmp2, Immediate(1));
- __ movq(FieldOperand(tmp, HeapNumber::kValueOffset), tmp2);
- __ StoreToSafepointRegisterSlot(input_reg, tmp);
-
- __ bind(&done);
- __ PopSafepointRegisters();
-}
-
-
-void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
- Register input_reg = ToRegister(instr->InputAt(0));
- __ testl(input_reg, input_reg);
- Label is_positive;
- __ j(not_sign, &is_positive);
- __ negl(input_reg); // Sets flags.
- DeoptimizeIf(negative, instr->environment());
- __ bind(&is_positive);
-}
-
-
-void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
- // Class for deferred case.
- class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
- public:
- DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
- LUnaryMathOperation* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
- codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
- }
- private:
- LUnaryMathOperation* instr_;
- };
-
- ASSERT(instr->InputAt(0)->Equals(instr->result()));
- Representation r = instr->hydrogen()->value()->representation();
-
- if (r.IsDouble()) {
- XMMRegister scratch = xmm0;
- XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
- __ xorpd(scratch, scratch);
- __ subsd(scratch, input_reg);
- __ andpd(input_reg, scratch);
- } else if (r.IsInteger32()) {
- EmitIntegerMathAbs(instr);
- } else { // Tagged case.
- DeferredMathAbsTaggedHeapNumber* deferred =
- new DeferredMathAbsTaggedHeapNumber(this, instr);
- Register input_reg = ToRegister(instr->InputAt(0));
- // Smi check.
- __ JumpIfNotSmi(input_reg, deferred->entry());
- EmitIntegerMathAbs(instr);
- __ bind(deferred->exit());
- }
-}
-
-
-void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
- XMMRegister xmm_scratch = xmm0;
- Register output_reg = ToRegister(instr->result());
- XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
- __ xorpd(xmm_scratch, xmm_scratch); // Zero the register.
- __ ucomisd(input_reg, xmm_scratch);
-
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(below_equal, instr->environment());
- } else {
- DeoptimizeIf(below, instr->environment());
- }
-
- // Use truncating instruction (OK because input is positive).
- __ cvttsd2si(output_reg, input_reg);
-
- // Overflow is signalled with minint.
- __ cmpl(output_reg, Immediate(0x80000000));
- DeoptimizeIf(equal, instr->environment());
-}
-
-
-void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
- const XMMRegister xmm_scratch = xmm0;
- Register output_reg = ToRegister(instr->result());
- XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
-
- // xmm_scratch = 0.5
- __ movq(kScratchRegister, V8_INT64_C(0x3FE0000000000000), RelocInfo::NONE);
- __ movq(xmm_scratch, kScratchRegister);
-
- // input = input + 0.5
- __ addsd(input_reg, xmm_scratch);
-
- // We need to return -0 for the input range [-0.5, 0[, otherwise
- // compute Math.floor(value + 0.5).
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ ucomisd(input_reg, xmm_scratch);
- DeoptimizeIf(below_equal, instr->environment());
- } else {
- // If we don't need to bailout on -0, we check only bailout
- // on negative inputs.
- __ xorpd(xmm_scratch, xmm_scratch); // Zero the register.
- __ ucomisd(input_reg, xmm_scratch);
- DeoptimizeIf(below, instr->environment());
- }
-
- // Compute Math.floor(value + 0.5).
- // Use truncating instruction (OK because input is positive).
- __ cvttsd2si(output_reg, input_reg);
-
- // Overflow is signalled with minint.
- __ cmpl(output_reg, Immediate(0x80000000));
- DeoptimizeIf(equal, instr->environment());
-}
-
-
-void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
- XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
- ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
- __ sqrtsd(input_reg, input_reg);
-}
-
-
-void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
- XMMRegister xmm_scratch = xmm0;
- XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
- ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
- __ xorpd(xmm_scratch, xmm_scratch);
- __ addsd(input_reg, xmm_scratch); // Convert -0 to +0.
- __ sqrtsd(input_reg, input_reg);
-}
-
-
-void LCodeGen::DoPower(LPower* instr) {
- LOperand* left = instr->InputAt(0);
- XMMRegister left_reg = ToDoubleRegister(left);
- ASSERT(!left_reg.is(xmm1));
- LOperand* right = instr->InputAt(1);
- XMMRegister result_reg = ToDoubleRegister(instr->result());
- Representation exponent_type = instr->hydrogen()->right()->representation();
- if (exponent_type.IsDouble()) {
- __ PrepareCallCFunction(2);
- // Move arguments to correct registers
- __ movsd(xmm0, left_reg);
- ASSERT(ToDoubleRegister(right).is(xmm1));
- __ CallCFunction(
- ExternalReference::power_double_double_function(isolate()), 2);
- } else if (exponent_type.IsInteger32()) {
- __ PrepareCallCFunction(2);
- // Move arguments to correct registers: xmm0 and edi (not rdi).
- // On Windows, the registers are xmm0 and edx.
- __ movsd(xmm0, left_reg);
-#ifdef _WIN64
- ASSERT(ToRegister(right).is(rdx));
-#else
- ASSERT(ToRegister(right).is(rdi));
-#endif
- __ CallCFunction(
- ExternalReference::power_double_int_function(isolate()), 2);
- } else {
- ASSERT(exponent_type.IsTagged());
- Register right_reg = ToRegister(right);
-
- Label non_smi, call;
- __ JumpIfNotSmi(right_reg, &non_smi);
- __ SmiToInteger32(right_reg, right_reg);
- __ cvtlsi2sd(xmm1, right_reg);
- __ jmp(&call);
-
- __ bind(&non_smi);
- __ CmpObjectType(right_reg, HEAP_NUMBER_TYPE , kScratchRegister);
- DeoptimizeIf(not_equal, instr->environment());
- __ movsd(xmm1, FieldOperand(right_reg, HeapNumber::kValueOffset));
-
- __ bind(&call);
- __ PrepareCallCFunction(2);
- // Move arguments to correct registers xmm0 and xmm1.
- __ movsd(xmm0, left_reg);
- // Right argument is already in xmm1.
- __ CallCFunction(
- ExternalReference::power_double_double_function(isolate()), 2);
- }
- // Return value is in xmm0.
- __ movsd(result_reg, xmm0);
- // Restore context register.
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
-}
-
-
-void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
- ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
- TranscendentalCacheStub stub(TranscendentalCache::LOG,
- TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
- ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
- TranscendentalCacheStub stub(TranscendentalCache::COS,
- TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoMathSin(LUnaryMathOperation* instr) {
- ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
- TranscendentalCacheStub stub(TranscendentalCache::SIN,
- TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
- switch (instr->op()) {
- case kMathAbs:
- DoMathAbs(instr);
- break;
- case kMathFloor:
- DoMathFloor(instr);
- break;
- case kMathRound:
- DoMathRound(instr);
- break;
- case kMathSqrt:
- DoMathSqrt(instr);
- break;
- case kMathPowHalf:
- DoMathPowHalf(instr);
- break;
- case kMathCos:
- DoMathCos(instr);
- break;
- case kMathSin:
- DoMathSin(instr);
- break;
- case kMathLog:
- DoMathLog(instr);
- break;
-
- default:
- UNREACHABLE();
- }
-}
-
-
-void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
- ASSERT(ToRegister(instr->key()).is(rcx));
- ASSERT(ToRegister(instr->result()).is(rax));
-
- int arity = instr->arity();
- Handle<Code> ic = isolate()->stub_cache()->ComputeKeyedCallInitialize(
- arity, NOT_IN_LOOP);
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
-}
-
-
-void LCodeGen::DoCallNamed(LCallNamed* instr) {
- ASSERT(ToRegister(instr->result()).is(rax));
-
- int arity = instr->arity();
- Handle<Code> ic = isolate()->stub_cache()->ComputeCallInitialize(
- arity, NOT_IN_LOOP);
- __ Move(rcx, instr->name());
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
-}
-
-
-void LCodeGen::DoCallFunction(LCallFunction* instr) {
- ASSERT(ToRegister(instr->result()).is(rax));
-
- int arity = instr->arity();
- CallFunctionStub stub(arity, NOT_IN_LOOP, RECEIVER_MIGHT_BE_VALUE);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- __ Drop(1);
-}
-
-
-void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
- ASSERT(ToRegister(instr->result()).is(rax));
- int arity = instr->arity();
- Handle<Code> ic = isolate()->stub_cache()->ComputeCallInitialize(
- arity, NOT_IN_LOOP);
- __ Move(rcx, instr->name());
- CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
-}
-
-
-void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
- ASSERT(ToRegister(instr->result()).is(rax));
- __ Move(rdi, instr->target());
- CallKnownFunction(instr->target(), instr->arity(), instr);
-}
-
-
-void LCodeGen::DoCallNew(LCallNew* instr) {
- ASSERT(ToRegister(instr->InputAt(0)).is(rdi));
- ASSERT(ToRegister(instr->result()).is(rax));
-
- Handle<Code> builtin = isolate()->builtins()->JSConstructCall();
- __ Set(rax, instr->arity());
- CallCode(builtin, RelocInfo::CONSTRUCT_CALL, instr);
-}
-
-
-void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
- CallRuntime(instr->function(), instr->arity(), instr);
-}
-
-
-void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
- Register object = ToRegister(instr->object());
- Register value = ToRegister(instr->value());
- int offset = instr->offset();
-
- if (!instr->transition().is_null()) {
- __ Move(FieldOperand(object, HeapObject::kMapOffset), instr->transition());
- }
-
- // Do the store.
- if (instr->is_in_object()) {
- __ movq(FieldOperand(object, offset), value);
- if (instr->needs_write_barrier()) {
- Register temp = ToRegister(instr->TempAt(0));
- // Update the write barrier for the object for in-object properties.
- __ RecordWrite(object, offset, value, temp);
- }
- } else {
- Register temp = ToRegister(instr->TempAt(0));
- __ movq(temp, FieldOperand(object, JSObject::kPropertiesOffset));
- __ movq(FieldOperand(temp, offset), value);
- if (instr->needs_write_barrier()) {
- // Update the write barrier for the properties array.
- // object is used as a scratch register.
- __ RecordWrite(temp, offset, value, object);
- }
- }
-}
-
-
-void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
- ASSERT(ToRegister(instr->object()).is(rdx));
- ASSERT(ToRegister(instr->value()).is(rax));
-
- __ Move(rcx, instr->hydrogen()->name());
- Handle<Code> ic = info_->is_strict()
- ? isolate()->builtins()->StoreIC_Initialize_Strict()
- : isolate()->builtins()->StoreIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoStoreKeyedSpecializedArrayElement(
- LStoreKeyedSpecializedArrayElement* instr) {
- Register external_pointer = ToRegister(instr->external_pointer());
- Register key = ToRegister(instr->key());
- ExternalArrayType array_type = instr->array_type();
- if (array_type == kExternalFloatArray) {
- XMMRegister value(ToDoubleRegister(instr->value()));
- __ cvtsd2ss(value, value);
- __ movss(Operand(external_pointer, key, times_4, 0), value);
- } else {
- Register value(ToRegister(instr->value()));
- switch (array_type) {
- case kExternalPixelArray:
- { // Clamp the value to [0..255].
- NearLabel done;
- __ testl(value, Immediate(0xFFFFFF00));
- __ j(zero, &done);
- __ setcc(negative, value); // 1 if negative, 0 if positive.
- __ decb(value); // 0 if negative, 255 if positive.
- __ bind(&done);
- __ movb(Operand(external_pointer, key, times_1, 0), value);
- }
- break;
- case kExternalByteArray:
- case kExternalUnsignedByteArray:
- __ movb(Operand(external_pointer, key, times_1, 0), value);
- break;
- case kExternalShortArray:
- case kExternalUnsignedShortArray:
- __ movw(Operand(external_pointer, key, times_2, 0), value);
- break;
- case kExternalIntArray:
- case kExternalUnsignedIntArray:
- __ movl(Operand(external_pointer, key, times_4, 0), value);
- break;
- case kExternalFloatArray:
- UNREACHABLE();
- break;
- }
- }
-}
-
-
-void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
- if (instr->length()->IsRegister()) {
- __ cmpq(ToRegister(instr->index()), ToRegister(instr->length()));
- } else {
- __ cmpq(ToRegister(instr->index()), ToOperand(instr->length()));
- }
- DeoptimizeIf(above_equal, instr->environment());
-}
-
-
-void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
- Register value = ToRegister(instr->value());
- Register elements = ToRegister(instr->object());
- Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
-
- // Do the store.
- if (instr->key()->IsConstantOperand()) {
- ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
- LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
- int offset =
- ToInteger32(const_operand) * kPointerSize + FixedArray::kHeaderSize;
- __ movq(FieldOperand(elements, offset), value);
- } else {
- __ movq(FieldOperand(elements,
- key,
- times_pointer_size,
- FixedArray::kHeaderSize),
- value);
- }
-
- if (instr->hydrogen()->NeedsWriteBarrier()) {
- // Compute address of modified element and store it into key register.
- __ lea(key, FieldOperand(elements,
- key,
- times_pointer_size,
- FixedArray::kHeaderSize));
- __ RecordWrite(elements, key, value);
- }
-}
-
-
-void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
- ASSERT(ToRegister(instr->object()).is(rdx));
- ASSERT(ToRegister(instr->key()).is(rcx));
- ASSERT(ToRegister(instr->value()).is(rax));
-
- Handle<Code> ic = info_->is_strict()
- ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
- : isolate()->builtins()->KeyedStoreIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
- class DeferredStringCharCodeAt: public LDeferredCode {
- public:
- DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
- private:
- LStringCharCodeAt* instr_;
- };
-
- Register string = ToRegister(instr->string());
- Register index = no_reg;
- int const_index = -1;
- if (instr->index()->IsConstantOperand()) {
- const_index = ToInteger32(LConstantOperand::cast(instr->index()));
- STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
- if (!Smi::IsValid(const_index)) {
- // Guaranteed to be out of bounds because of the assert above.
- // So the bounds check that must dominate this instruction must
- // have deoptimized already.
- if (FLAG_debug_code) {
- __ Abort("StringCharCodeAt: out of bounds index.");
- }
- // No code needs to be generated.
- return;
- }
- } else {
- index = ToRegister(instr->index());
- }
- Register result = ToRegister(instr->result());
-
- DeferredStringCharCodeAt* deferred =
- new DeferredStringCharCodeAt(this, instr);
-
- NearLabel flat_string, ascii_string, done;
-
- // Fetch the instance type of the receiver into result register.
- __ movq(result, FieldOperand(string, HeapObject::kMapOffset));
- __ movzxbl(result, FieldOperand(result, Map::kInstanceTypeOffset));
-
- // We need special handling for non-sequential strings.
- STATIC_ASSERT(kSeqStringTag == 0);
- __ testb(result, Immediate(kStringRepresentationMask));
- __ j(zero, &flat_string);
-
- // Handle cons strings and go to deferred code for the rest.
- __ testb(result, Immediate(kIsConsStringMask));
- __ j(zero, deferred->entry());
-
- // ConsString.
- // Check whether the right hand side is the empty string (i.e. if
- // this is really a flat string in a cons string). If that is not
- // the case we would rather go to the runtime system now to flatten
- // the string.
- __ CompareRoot(FieldOperand(string, ConsString::kSecondOffset),
- Heap::kEmptyStringRootIndex);
- __ j(not_equal, deferred->entry());
- // Get the first of the two strings and load its instance type.
- __ movq(string, FieldOperand(string, ConsString::kFirstOffset));
- __ movq(result, FieldOperand(string, HeapObject::kMapOffset));
- __ movzxbl(result, FieldOperand(result, Map::kInstanceTypeOffset));
- // If the first cons component is also non-flat, then go to runtime.
- STATIC_ASSERT(kSeqStringTag == 0);
- __ testb(result, Immediate(kStringRepresentationMask));
- __ j(not_zero, deferred->entry());
-
- // Check for ASCII or two-byte string.
- __ bind(&flat_string);
- STATIC_ASSERT(kAsciiStringTag != 0);
- __ testb(result, Immediate(kStringEncodingMask));
- __ j(not_zero, &ascii_string);
-
- // Two-byte string.
- // Load the two-byte character code into the result register.
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
- if (instr->index()->IsConstantOperand()) {
- __ movzxwl(result,
- FieldOperand(string,
- SeqTwoByteString::kHeaderSize +
- (kUC16Size * const_index)));
- } else {
- __ movzxwl(result, FieldOperand(string,
- index,
- times_2,
- SeqTwoByteString::kHeaderSize));
- }
- __ jmp(&done);
-
- // ASCII string.
- // Load the byte into the result register.
- __ bind(&ascii_string);
- if (instr->index()->IsConstantOperand()) {
- __ movzxbl(result, FieldOperand(string,
- SeqAsciiString::kHeaderSize + const_index));
- } else {
- __ movzxbl(result, FieldOperand(string,
- index,
- times_1,
- SeqAsciiString::kHeaderSize));
- }
- __ bind(&done);
- __ bind(deferred->exit());
-}
-
-
-void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
- Register string = ToRegister(instr->string());
- Register result = ToRegister(instr->result());
-
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- __ Set(result, 0);
-
- __ PushSafepointRegisters();
- __ push(string);
- // Push the index as a smi. This is safe because of the checks in
- // DoStringCharCodeAt above.
- STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
- if (instr->index()->IsConstantOperand()) {
- int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
- __ Push(Smi::FromInt(const_index));
- } else {
- Register index = ToRegister(instr->index());
- __ Integer32ToSmi(index, index);
- __ push(index);
- }
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- __ CallRuntimeSaveDoubles(Runtime::kStringCharCodeAt);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 2, Safepoint::kNoDeoptimizationIndex);
- if (FLAG_debug_code) {
- __ AbortIfNotSmi(rax);
- }
- __ SmiToInteger32(rax, rax);
- __ StoreToSafepointRegisterSlot(result, rax);
- __ PopSafepointRegisters();
-}
-
-
-void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
- class DeferredStringCharFromCode: public LDeferredCode {
- public:
- DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
- private:
- LStringCharFromCode* instr_;
- };
-
- DeferredStringCharFromCode* deferred =
- new DeferredStringCharFromCode(this, instr);
-
- ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
- Register char_code = ToRegister(instr->char_code());
- Register result = ToRegister(instr->result());
- ASSERT(!char_code.is(result));
-
- __ cmpl(char_code, Immediate(String::kMaxAsciiCharCode));
- __ j(above, deferred->entry());
- __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
- __ movq(result, FieldOperand(result,
- char_code, times_pointer_size,
- FixedArray::kHeaderSize));
- __ CompareRoot(result, Heap::kUndefinedValueRootIndex);
- __ j(equal, deferred->entry());
- __ bind(deferred->exit());
-}
-
-
-void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
- Register char_code = ToRegister(instr->char_code());
- Register result = ToRegister(instr->result());
-
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- __ Set(result, 0);
-
- __ PushSafepointRegisters();
- __ Integer32ToSmi(char_code, char_code);
- __ push(char_code);
- __ CallRuntimeSaveDoubles(Runtime::kCharFromCode);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 1, Safepoint::kNoDeoptimizationIndex);
- __ StoreToSafepointRegisterSlot(result, rax);
- __ PopSafepointRegisters();
-}
-
-
-void LCodeGen::DoStringLength(LStringLength* instr) {
- Register string = ToRegister(instr->string());
- Register result = ToRegister(instr->result());
- __ movq(result, FieldOperand(string, String::kLengthOffset));
-}
-
-
-void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
- LOperand* input = instr->InputAt(0);
- ASSERT(input->IsRegister() || input->IsStackSlot());
- LOperand* output = instr->result();
- ASSERT(output->IsDoubleRegister());
- if (input->IsRegister()) {
- __ cvtlsi2sd(ToDoubleRegister(output), ToRegister(input));
- } else {
- __ cvtlsi2sd(ToDoubleRegister(output), ToOperand(input));
- }
-}
-
-
-void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
- LOperand* input = instr->InputAt(0);
- ASSERT(input->IsRegister() && input->Equals(instr->result()));
- Register reg = ToRegister(input);
-
- __ Integer32ToSmi(reg, reg);
-}
-
-
-void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
- class DeferredNumberTagD: public LDeferredCode {
- public:
- DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
- private:
- LNumberTagD* instr_;
- };
-
- XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
- Register reg = ToRegister(instr->result());
- Register tmp = ToRegister(instr->TempAt(0));
-
- DeferredNumberTagD* deferred = new DeferredNumberTagD(this, instr);
- if (FLAG_inline_new) {
- __ AllocateHeapNumber(reg, tmp, deferred->entry());
- } else {
- __ jmp(deferred->entry());
- }
- __ bind(deferred->exit());
- __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
-}
-
-
-void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- Register reg = ToRegister(instr->result());
- __ Move(reg, Smi::FromInt(0));
-
- __ PushSafepointRegisters();
- __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
- // Ensure that value in rax survives popping registers.
- __ movq(kScratchRegister, rax);
- __ PopSafepointRegisters();
- __ movq(reg, kScratchRegister);
-}
-
-
-void LCodeGen::DoSmiTag(LSmiTag* instr) {
- ASSERT(instr->InputAt(0)->Equals(instr->result()));
- Register input = ToRegister(instr->InputAt(0));
- ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
- __ Integer32ToSmi(input, input);
-}
-
-
-void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
- ASSERT(instr->InputAt(0)->Equals(instr->result()));
- Register input = ToRegister(instr->InputAt(0));
- if (instr->needs_check()) {
- Condition is_smi = __ CheckSmi(input);
- DeoptimizeIf(NegateCondition(is_smi), instr->environment());
- }
- __ SmiToInteger32(input, input);
-}
-
-
-void LCodeGen::EmitNumberUntagD(Register input_reg,
- XMMRegister result_reg,
- LEnvironment* env) {
- NearLabel load_smi, heap_number, done;
-
- // Smi check.
- __ JumpIfSmi(input_reg, &load_smi);
-
- // Heap number map check.
- __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- __ j(equal, &heap_number);
-
- __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
- DeoptimizeIf(not_equal, env);
-
- // Convert undefined to NaN. Compute NaN as 0/0.
- __ xorpd(result_reg, result_reg);
- __ divsd(result_reg, result_reg);
- __ jmp(&done);
-
- // Heap number to XMM conversion.
- __ bind(&heap_number);
- __ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
- __ jmp(&done);
-
- // Smi to XMM conversion
- __ bind(&load_smi);
- __ SmiToInteger32(kScratchRegister, input_reg);
- __ cvtlsi2sd(result_reg, kScratchRegister);
- __ bind(&done);
-}
-
-
-class DeferredTaggedToI: public LDeferredCode {
- public:
- DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
- private:
- LTaggedToI* instr_;
-};
-
-
-void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
- NearLabel done, heap_number;
- Register input_reg = ToRegister(instr->InputAt(0));
-
- // Heap number map check.
- __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
-
- if (instr->truncating()) {
- __ j(equal, &heap_number);
- // Check for undefined. Undefined is converted to zero for truncating
- // conversions.
- __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
- DeoptimizeIf(not_equal, instr->environment());
- __ movl(input_reg, Immediate(0));
- __ jmp(&done);
-
- __ bind(&heap_number);
-
- __ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
- __ cvttsd2siq(input_reg, xmm0);
- __ Set(kScratchRegister, V8_UINT64_C(0x8000000000000000));
- __ cmpl(input_reg, kScratchRegister);
- DeoptimizeIf(equal, instr->environment());
- } else {
- // Deoptimize if we don't have a heap number.
- DeoptimizeIf(not_equal, instr->environment());
-
- XMMRegister xmm_temp = ToDoubleRegister(instr->TempAt(0));
- __ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
- __ cvttsd2si(input_reg, xmm0);
- __ cvtlsi2sd(xmm_temp, input_reg);
- __ ucomisd(xmm0, xmm_temp);
- DeoptimizeIf(not_equal, instr->environment());
- DeoptimizeIf(parity_even, instr->environment()); // NaN.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ testl(input_reg, input_reg);
- __ j(not_zero, &done);
- __ movmskpd(input_reg, xmm0);
- __ andl(input_reg, Immediate(1));
- DeoptimizeIf(not_zero, instr->environment());
- }
- }
- __ bind(&done);
-}
-
-
-void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
- LOperand* input = instr->InputAt(0);
- ASSERT(input->IsRegister());
- ASSERT(input->Equals(instr->result()));
-
- Register input_reg = ToRegister(input);
- DeferredTaggedToI* deferred = new DeferredTaggedToI(this, instr);
- __ JumpIfNotSmi(input_reg, deferred->entry());
- __ SmiToInteger32(input_reg, input_reg);
- __ bind(deferred->exit());
-}
-
-
-void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
- LOperand* input = instr->InputAt(0);
- ASSERT(input->IsRegister());
- LOperand* result = instr->result();
- ASSERT(result->IsDoubleRegister());
-
- Register input_reg = ToRegister(input);
- XMMRegister result_reg = ToDoubleRegister(result);
-
- EmitNumberUntagD(input_reg, result_reg, instr->environment());
-}
-
-
-void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
- LOperand* input = instr->InputAt(0);
- ASSERT(input->IsDoubleRegister());
- LOperand* result = instr->result();
- ASSERT(result->IsRegister());
-
- XMMRegister input_reg = ToDoubleRegister(input);
- Register result_reg = ToRegister(result);
-
- if (instr->truncating()) {
- // Performs a truncating conversion of a floating point number as used by
- // the JS bitwise operations.
- __ cvttsd2siq(result_reg, input_reg);
- __ movq(kScratchRegister, V8_INT64_C(0x8000000000000000), RelocInfo::NONE);
- __ cmpl(result_reg, kScratchRegister);
- DeoptimizeIf(equal, instr->environment());
- } else {
- __ cvttsd2si(result_reg, input_reg);
- __ cvtlsi2sd(xmm0, result_reg);
- __ ucomisd(xmm0, input_reg);
- DeoptimizeIf(not_equal, instr->environment());
- DeoptimizeIf(parity_even, instr->environment()); // NaN.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- NearLabel done;
- // The integer converted back is equal to the original. We
- // only have to test if we got -0 as an input.
- __ testl(result_reg, result_reg);
- __ j(not_zero, &done);
- __ movmskpd(result_reg, input_reg);
- // Bit 0 contains the sign of the double in input_reg.
- // If input was positive, we are ok and return 0, otherwise
- // deoptimize.
- __ andl(result_reg, Immediate(1));
- DeoptimizeIf(not_zero, instr->environment());
- __ bind(&done);
- }
- }
-}
-
-
-void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
- LOperand* input = instr->InputAt(0);
- Condition cc = masm()->CheckSmi(ToRegister(input));
- DeoptimizeIf(NegateCondition(cc), instr->environment());
-}
-
-
-void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
- LOperand* input = instr->InputAt(0);
- Condition cc = masm()->CheckSmi(ToRegister(input));
- DeoptimizeIf(cc, instr->environment());
-}
-
-
-void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
- Register input = ToRegister(instr->InputAt(0));
- InstanceType first = instr->hydrogen()->first();
- InstanceType last = instr->hydrogen()->last();
-
- __ movq(kScratchRegister, FieldOperand(input, HeapObject::kMapOffset));
-
- // If there is only one type in the interval check for equality.
- if (first == last) {
- __ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
- Immediate(static_cast<int8_t>(first)));
- DeoptimizeIf(not_equal, instr->environment());
- } else if (first == FIRST_STRING_TYPE && last == LAST_STRING_TYPE) {
- // String has a dedicated bit in instance type.
- __ testb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
- Immediate(kIsNotStringMask));
- DeoptimizeIf(not_zero, instr->environment());
- } else {
- __ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
- Immediate(static_cast<int8_t>(first)));
- DeoptimizeIf(below, instr->environment());
- // Omit check for the last type.
- if (last != LAST_TYPE) {
- __ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
- Immediate(static_cast<int8_t>(last)));
- DeoptimizeIf(above, instr->environment());
- }
- }
-}
-
-
-void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
- ASSERT(instr->InputAt(0)->IsRegister());
- Register reg = ToRegister(instr->InputAt(0));
- __ Cmp(reg, instr->hydrogen()->target());
- DeoptimizeIf(not_equal, instr->environment());
-}
-
-
-void LCodeGen::DoCheckMap(LCheckMap* instr) {
- LOperand* input = instr->InputAt(0);
- ASSERT(input->IsRegister());
- Register reg = ToRegister(input);
- __ Cmp(FieldOperand(reg, HeapObject::kMapOffset),
- instr->hydrogen()->map());
- DeoptimizeIf(not_equal, instr->environment());
-}
-
-
-void LCodeGen::LoadHeapObject(Register result, Handle<HeapObject> object) {
- if (heap()->InNewSpace(*object)) {
- Handle<JSGlobalPropertyCell> cell =
- factory()->NewJSGlobalPropertyCell(object);
- __ movq(result, cell, RelocInfo::GLOBAL_PROPERTY_CELL);
- __ movq(result, Operand(result, 0));
- } else {
- __ Move(result, object);
- }
-}
-
-
-void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
- Register reg = ToRegister(instr->TempAt(0));
-
- Handle<JSObject> holder = instr->holder();
- Handle<JSObject> current_prototype = instr->prototype();
-
- // Load prototype object.
- LoadHeapObject(reg, current_prototype);
-
- // Check prototype maps up to the holder.
- while (!current_prototype.is_identical_to(holder)) {
- __ Cmp(FieldOperand(reg, HeapObject::kMapOffset),
- Handle<Map>(current_prototype->map()));
- DeoptimizeIf(not_equal, instr->environment());
- current_prototype =
- Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
- // Load next prototype object.
- LoadHeapObject(reg, current_prototype);
- }
-
- // Check the holder map.
- __ Cmp(FieldOperand(reg, HeapObject::kMapOffset),
- Handle<Map>(current_prototype->map()));
- DeoptimizeIf(not_equal, instr->environment());
-}
-
-
-void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
- // Setup the parameters to the stub/runtime call.
- __ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(FieldOperand(rax, JSFunction::kLiteralsOffset));
- __ Push(Smi::FromInt(instr->hydrogen()->literal_index()));
- __ Push(instr->hydrogen()->constant_elements());
-
- // Pick the right runtime function or stub to call.
- int length = instr->hydrogen()->length();
- if (instr->hydrogen()->IsCopyOnWrite()) {
- ASSERT(instr->hydrogen()->depth() == 1);
- FastCloneShallowArrayStub::Mode mode =
- FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS;
- FastCloneShallowArrayStub stub(mode, length);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- } else if (instr->hydrogen()->depth() > 1) {
- CallRuntime(Runtime::kCreateArrayLiteral, 3, instr);
- } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
- CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
- } else {
- FastCloneShallowArrayStub::Mode mode =
- FastCloneShallowArrayStub::CLONE_ELEMENTS;
- FastCloneShallowArrayStub stub(mode, length);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- }
-}
-
-
-void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
- // Setup the parameters to the stub/runtime call.
- __ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(FieldOperand(rax, JSFunction::kLiteralsOffset));
- __ Push(Smi::FromInt(instr->hydrogen()->literal_index()));
- __ Push(instr->hydrogen()->constant_properties());
- __ Push(Smi::FromInt(instr->hydrogen()->fast_elements() ? 1 : 0));
-
- // Pick the right runtime function to call.
- if (instr->hydrogen()->depth() > 1) {
- CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
- } else {
- CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
- }
-}
-
-
-void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
- ASSERT(ToRegister(instr->InputAt(0)).is(rax));
- __ push(rax);
- CallRuntime(Runtime::kToFastProperties, 1, instr);
-}
-
-
-void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
- NearLabel materialized;
- // Registers will be used as follows:
- // rdi = JS function.
- // rcx = literals array.
- // rbx = regexp literal.
- // rax = regexp literal clone.
- __ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ movq(rcx, FieldOperand(rdi, JSFunction::kLiteralsOffset));
- int literal_offset = FixedArray::kHeaderSize +
- instr->hydrogen()->literal_index() * kPointerSize;
- __ movq(rbx, FieldOperand(rcx, literal_offset));
- __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, &materialized);
-
- // Create regexp literal using runtime function
- // Result will be in rax.
- __ push(rcx);
- __ Push(Smi::FromInt(instr->hydrogen()->literal_index()));
- __ Push(instr->hydrogen()->pattern());
- __ Push(instr->hydrogen()->flags());
- CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
- __ movq(rbx, rax);
-
- __ bind(&materialized);
- int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
- Label allocated, runtime_allocate;
- __ AllocateInNewSpace(size, rax, rcx, rdx, &runtime_allocate, TAG_OBJECT);
- __ jmp(&allocated);
-
- __ bind(&runtime_allocate);
- __ push(rbx);
- __ Push(Smi::FromInt(size));
- CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
- __ pop(rbx);
-
- __ bind(&allocated);
- // Copy the content into the newly allocated memory.
- // (Unroll copy loop once for better throughput).
- for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
- __ movq(rdx, FieldOperand(rbx, i));
- __ movq(rcx, FieldOperand(rbx, i + kPointerSize));
- __ movq(FieldOperand(rax, i), rdx);
- __ movq(FieldOperand(rax, i + kPointerSize), rcx);
- }
- if ((size % (2 * kPointerSize)) != 0) {
- __ movq(rdx, FieldOperand(rbx, size - kPointerSize));
- __ movq(FieldOperand(rax, size - kPointerSize), rdx);
- }
-}
-
-
-void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
- // Use the fast case closure allocation code that allocates in new
- // space for nested functions that don't need literals cloning.
- Handle<SharedFunctionInfo> shared_info = instr->shared_info();
- bool pretenure = instr->hydrogen()->pretenure();
- if (!pretenure && shared_info->num_literals() == 0) {
- FastNewClosureStub stub(
- shared_info->strict_mode() ? kStrictMode : kNonStrictMode);
- __ Push(shared_info);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- } else {
- __ push(rsi);
- __ Push(shared_info);
- __ PushRoot(pretenure ?
- Heap::kTrueValueRootIndex :
- Heap::kFalseValueRootIndex);
- CallRuntime(Runtime::kNewClosure, 3, instr);
- }
-}
-
-
-void LCodeGen::DoTypeof(LTypeof* instr) {
- LOperand* input = instr->InputAt(0);
- if (input->IsConstantOperand()) {
- __ Push(ToHandle(LConstantOperand::cast(input)));
- } else if (input->IsRegister()) {
- __ push(ToRegister(input));
- } else {
- ASSERT(input->IsStackSlot());
- __ push(ToOperand(input));
- }
- CallRuntime(Runtime::kTypeof, 1, instr);
-}
-
-
-void LCodeGen::DoTypeofIs(LTypeofIs* instr) {
- Register input = ToRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
- Label true_label;
- Label false_label;
- NearLabel done;
-
- Condition final_branch_condition = EmitTypeofIs(&true_label,
- &false_label,
- input,
- instr->type_literal());
- __ j(final_branch_condition, &true_label);
- __ bind(&false_label);
- __ LoadRoot(result, Heap::kFalseValueRootIndex);
- __ jmp(&done);
-
- __ bind(&true_label);
- __ LoadRoot(result, Heap::kTrueValueRootIndex);
-
- __ bind(&done);
-}
-
-
-void LCodeGen::EmitPushConstantOperand(LOperand* operand) {
- ASSERT(operand->IsConstantOperand());
- LConstantOperand* const_op = LConstantOperand::cast(operand);
- Handle<Object> literal = chunk_->LookupLiteral(const_op);
- Representation r = chunk_->LookupLiteralRepresentation(const_op);
- if (r.IsInteger32()) {
- ASSERT(literal->IsNumber());
- __ push(Immediate(static_cast<int32_t>(literal->Number())));
- } else if (r.IsDouble()) {
- Abort("unsupported double immediate");
- } else {
- ASSERT(r.IsTagged());
- __ Push(literal);
- }
-}
-
-
-void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
- Register input = ToRegister(instr->InputAt(0));
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
- Condition final_branch_condition = EmitTypeofIs(true_label,
- false_label,
- input,
- instr->type_literal());
-
- EmitBranch(true_block, false_block, final_branch_condition);
-}
-
-
-Condition LCodeGen::EmitTypeofIs(Label* true_label,
- Label* false_label,
- Register input,
- Handle<String> type_name) {
- Condition final_branch_condition = no_condition;
- if (type_name->Equals(heap()->number_symbol())) {
- __ JumpIfSmi(input, true_label);
- __ CompareRoot(FieldOperand(input, HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
-
- final_branch_condition = equal;
-
- } else if (type_name->Equals(heap()->string_symbol())) {
- __ JumpIfSmi(input, false_label);
- __ CmpObjectType(input, FIRST_NONSTRING_TYPE, input);
- __ j(above_equal, false_label);
- __ testb(FieldOperand(input, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- final_branch_condition = zero;
-
- } else if (type_name->Equals(heap()->boolean_symbol())) {
- __ CompareRoot(input, Heap::kTrueValueRootIndex);
- __ j(equal, true_label);
- __ CompareRoot(input, Heap::kFalseValueRootIndex);
- final_branch_condition = equal;
-
- } else if (type_name->Equals(heap()->undefined_symbol())) {
- __ CompareRoot(input, Heap::kUndefinedValueRootIndex);
- __ j(equal, true_label);
- __ JumpIfSmi(input, false_label);
- // Check for undetectable objects => true.
- __ movq(input, FieldOperand(input, HeapObject::kMapOffset));
- __ testb(FieldOperand(input, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- final_branch_condition = not_zero;
-
- } else if (type_name->Equals(heap()->function_symbol())) {
- __ JumpIfSmi(input, false_label);
- __ CmpObjectType(input, FIRST_FUNCTION_CLASS_TYPE, input);
- final_branch_condition = above_equal;
-
- } else if (type_name->Equals(heap()->object_symbol())) {
- __ JumpIfSmi(input, false_label);
- __ CompareRoot(input, Heap::kNullValueRootIndex);
- __ j(equal, true_label);
- __ CmpObjectType(input, FIRST_JS_OBJECT_TYPE, input);
- __ j(below, false_label);
- __ CmpInstanceType(input, FIRST_FUNCTION_CLASS_TYPE);
- __ j(above_equal, false_label);
- // Check for undetectable objects => false.
- __ testb(FieldOperand(input, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- final_branch_condition = zero;
-
- } else {
- final_branch_condition = never;
- __ jmp(false_label);
- }
-
- return final_branch_condition;
-}
-
-
-void LCodeGen::DoIsConstructCall(LIsConstructCall* instr) {
- Register result = ToRegister(instr->result());
- NearLabel true_label;
- NearLabel false_label;
- NearLabel done;
-
- EmitIsConstructCall(result);
- __ j(equal, &true_label);
-
- __ LoadRoot(result, Heap::kFalseValueRootIndex);
- __ jmp(&done);
-
- __ bind(&true_label);
- __ LoadRoot(result, Heap::kTrueValueRootIndex);
-
-
- __ bind(&done);
-}
-
-
-void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
- Register temp = ToRegister(instr->TempAt(0));
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- EmitIsConstructCall(temp);
- EmitBranch(true_block, false_block, equal);
-}
-
-
-void LCodeGen::EmitIsConstructCall(Register temp) {
- // Get the frame pointer for the calling frame.
- __ movq(temp, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
-
- // Skip the arguments adaptor frame if it exists.
- NearLabel check_frame_marker;
- __ Cmp(Operand(temp, StandardFrameConstants::kContextOffset),
- Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ j(not_equal, &check_frame_marker);
- __ movq(temp, Operand(rax, StandardFrameConstants::kCallerFPOffset));
-
- // Check the marker in the calling frame.
- __ bind(&check_frame_marker);
- __ Cmp(Operand(temp, StandardFrameConstants::kMarkerOffset),
- Smi::FromInt(StackFrame::CONSTRUCT));
-}
-
-
-void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
- // No code for lazy bailout instruction. Used to capture environment after a
- // call for populating the safepoint data with deoptimization data.
-}
-
-
-void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
- DeoptimizeIf(no_condition, instr->environment());
-}
-
-
-void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
- LOperand* obj = instr->object();
- LOperand* key = instr->key();
- // Push object.
- if (obj->IsRegister()) {
- __ push(ToRegister(obj));
- } else {
- __ push(ToOperand(obj));
- }
- // Push key.
- if (key->IsConstantOperand()) {
- EmitPushConstantOperand(key);
- } else if (key->IsRegister()) {
- __ push(ToRegister(key));
- } else {
- __ push(ToOperand(key));
- }
- ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
- LPointerMap* pointers = instr->pointer_map();
- LEnvironment* env = instr->deoptimization_environment();
- RecordPosition(pointers->position());
- RegisterEnvironmentForDeoptimization(env);
- // Create safepoint generator that will also ensure enough space in the
- // reloc info for patching in deoptimization (since this is invoking a
- // builtin)
- SafepointGenerator safepoint_generator(this,
- pointers,
- env->deoptimization_index());
- __ Push(Smi::FromInt(strict_mode_flag()));
- __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, &safepoint_generator);
-}
-
-
-void LCodeGen::DoStackCheck(LStackCheck* instr) {
- // Perform stack overflow check.
- NearLabel done;
- __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
- __ j(above_equal, &done);
-
- StackCheckStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- __ bind(&done);
-}
-
-
-void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
- // This is a pseudo-instruction that ensures that the environment here is
- // properly registered for deoptimization and records the assembler's PC
- // offset.
- LEnvironment* environment = instr->environment();
- environment->SetSpilledRegisters(instr->SpilledRegisterArray(),
- instr->SpilledDoubleRegisterArray());
-
- // If the environment were already registered, we would have no way of
- // backpatching it with the spill slot operands.
- ASSERT(!environment->HasBeenRegistered());
- RegisterEnvironmentForDeoptimization(environment);
- ASSERT(osr_pc_offset_ == -1);
- osr_pc_offset_ = masm()->pc_offset();
-}
-
-#undef __
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_X64
diff --git a/src/3rdparty/v8/src/x64/lithium-codegen-x64.h b/src/3rdparty/v8/src/x64/lithium-codegen-x64.h
deleted file mode 100644
index f44fdb9..0000000
--- a/src/3rdparty/v8/src/x64/lithium-codegen-x64.h
+++ /dev/null
@@ -1,318 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_X64_LITHIUM_CODEGEN_X64_H_
-#define V8_X64_LITHIUM_CODEGEN_X64_H_
-
-#include "x64/lithium-x64.h"
-
-#include "checks.h"
-#include "deoptimizer.h"
-#include "safepoint-table.h"
-#include "scopes.h"
-#include "x64/lithium-gap-resolver-x64.h"
-
-namespace v8 {
-namespace internal {
-
-// Forward declarations.
-class LDeferredCode;
-class SafepointGenerator;
-
-class LCodeGen BASE_EMBEDDED {
- public:
- LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
- : chunk_(chunk),
- masm_(assembler),
- info_(info),
- current_block_(-1),
- current_instruction_(-1),
- instructions_(chunk->instructions()),
- deoptimizations_(4),
- jump_table_(4),
- deoptimization_literals_(8),
- inlined_function_count_(0),
- scope_(info->scope()),
- status_(UNUSED),
- deferred_(8),
- osr_pc_offset_(-1),
- resolver_(this) {
- PopulateDeoptimizationLiteralsWithInlinedFunctions();
- }
-
- // Simple accessors.
- MacroAssembler* masm() const { return masm_; }
- CompilationInfo* info() const { return info_; }
- Isolate* isolate() const { return info_->isolate(); }
- Factory* factory() const { return isolate()->factory(); }
- Heap* heap() const { return isolate()->heap(); }
-
- // Support for converting LOperands to assembler types.
- Register ToRegister(LOperand* op) const;
- XMMRegister ToDoubleRegister(LOperand* op) const;
- bool IsInteger32Constant(LConstantOperand* op) const;
- int ToInteger32(LConstantOperand* op) const;
- bool IsTaggedConstant(LConstantOperand* op) const;
- Handle<Object> ToHandle(LConstantOperand* op) const;
- Operand ToOperand(LOperand* op) const;
-
- // Try to generate code for the entire chunk, but it may fail if the
- // chunk contains constructs we cannot handle. Returns true if the
- // code generation attempt succeeded.
- bool GenerateCode();
-
- // Finish the code by setting stack height, safepoint, and bailout
- // information on it.
- void FinishCode(Handle<Code> code);
-
- // Deferred code support.
- void DoDeferredNumberTagD(LNumberTagD* instr);
- void DoDeferredTaggedToI(LTaggedToI* instr);
- void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr);
- void DoDeferredStackCheck(LGoto* instr);
- void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
- void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
- void DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
- Label* map_check);
-
- // Parallel move support.
- void DoParallelMove(LParallelMove* move);
-
- // Emit frame translation commands for an environment.
- void WriteTranslation(LEnvironment* environment, Translation* translation);
-
- // Declare methods that deal with the individual node types.
-#define DECLARE_DO(type) void Do##type(L##type* node);
- LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
-#undef DECLARE_DO
-
- private:
- enum Status {
- UNUSED,
- GENERATING,
- DONE,
- ABORTED
- };
-
- bool is_unused() const { return status_ == UNUSED; }
- bool is_generating() const { return status_ == GENERATING; }
- bool is_done() const { return status_ == DONE; }
- bool is_aborted() const { return status_ == ABORTED; }
-
- int strict_mode_flag() const {
- return info()->is_strict() ? kStrictMode : kNonStrictMode;
- }
-
- LChunk* chunk() const { return chunk_; }
- Scope* scope() const { return scope_; }
- HGraph* graph() const { return chunk_->graph(); }
-
- int GetNextEmittedBlock(int block);
- LInstruction* GetNextInstruction();
-
- void EmitClassOfTest(Label* if_true,
- Label* if_false,
- Handle<String> class_name,
- Register input,
- Register temporary);
-
- int StackSlotCount() const { return chunk()->spill_slot_count(); }
- int ParameterCount() const { return scope()->num_parameters(); }
-
- void Abort(const char* format, ...);
- void Comment(const char* format, ...);
-
- void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code); }
-
- // Code generation passes. Returns true if code generation should
- // continue.
- bool GeneratePrologue();
- bool GenerateBody();
- bool GenerateDeferredCode();
- bool GenerateJumpTable();
- bool GenerateSafepointTable();
-
- void CallCode(Handle<Code> code,
- RelocInfo::Mode mode,
- LInstruction* instr);
- void CallRuntime(const Runtime::Function* function,
- int num_arguments,
- LInstruction* instr);
- void CallRuntime(Runtime::FunctionId id,
- int num_arguments,
- LInstruction* instr) {
- const Runtime::Function* function = Runtime::FunctionForId(id);
- CallRuntime(function, num_arguments, instr);
- }
-
- // Generate a direct call to a known function. Expects the function
- // to be in edi.
- void CallKnownFunction(Handle<JSFunction> function,
- int arity,
- LInstruction* instr);
-
- void LoadHeapObject(Register result, Handle<HeapObject> object);
-
- void RegisterLazyDeoptimization(LInstruction* instr);
- void RegisterEnvironmentForDeoptimization(LEnvironment* environment);
- void DeoptimizeIf(Condition cc, LEnvironment* environment);
-
- void AddToTranslation(Translation* translation,
- LOperand* op,
- bool is_tagged);
- void PopulateDeoptimizationData(Handle<Code> code);
- int DefineDeoptimizationLiteral(Handle<Object> literal);
-
- void PopulateDeoptimizationLiteralsWithInlinedFunctions();
-
- Register ToRegister(int index) const;
- XMMRegister ToDoubleRegister(int index) const;
-
- // Specific math operations - used from DoUnaryMathOperation.
- void EmitIntegerMathAbs(LUnaryMathOperation* instr);
- void DoMathAbs(LUnaryMathOperation* instr);
- void DoMathFloor(LUnaryMathOperation* instr);
- void DoMathRound(LUnaryMathOperation* instr);
- void DoMathSqrt(LUnaryMathOperation* instr);
- void DoMathPowHalf(LUnaryMathOperation* instr);
- void DoMathLog(LUnaryMathOperation* instr);
- void DoMathCos(LUnaryMathOperation* instr);
- void DoMathSin(LUnaryMathOperation* instr);
-
- // Support for recording safepoint and position information.
- void RecordSafepoint(LPointerMap* pointers,
- Safepoint::Kind kind,
- int arguments,
- int deoptimization_index);
- void RecordSafepoint(LPointerMap* pointers, int deoptimization_index);
- void RecordSafepoint(int deoptimization_index);
- void RecordSafepointWithRegisters(LPointerMap* pointers,
- int arguments,
- int deoptimization_index);
- void RecordPosition(int position);
- int LastSafepointEnd() {
- return static_cast<int>(safepoints_.GetPcAfterGap());
- }
-
- static Condition TokenToCondition(Token::Value op, bool is_unsigned);
- void EmitGoto(int block, LDeferredCode* deferred_stack_check = NULL);
- void EmitBranch(int left_block, int right_block, Condition cc);
- void EmitCmpI(LOperand* left, LOperand* right);
- void EmitNumberUntagD(Register input, XMMRegister result, LEnvironment* env);
-
- // Emits optimized code for typeof x == "y". Modifies input register.
- // Returns the condition on which a final split to
- // true and false label should be made, to optimize fallthrough.
- Condition EmitTypeofIs(Label* true_label, Label* false_label,
- Register input, Handle<String> type_name);
-
- // Emits optimized code for %_IsObject(x). Preserves input register.
- // Returns the condition on which a final split to
- // true and false label should be made, to optimize fallthrough.
- Condition EmitIsObject(Register input,
- Label* is_not_object,
- Label* is_object);
-
- // Emits optimized code for %_IsConstructCall().
- // Caller should branch on equal condition.
- void EmitIsConstructCall(Register temp);
-
- void EmitLoadField(Register result,
- Register object,
- Handle<Map> type,
- Handle<String> name);
-
- // Emits code for pushing a constant operand.
- void EmitPushConstantOperand(LOperand* operand);
-
- struct JumpTableEntry {
- inline JumpTableEntry(Address entry)
- : label(),
- address(entry) { }
- Label label;
- Address address;
- };
-
- LChunk* const chunk_;
- MacroAssembler* const masm_;
- CompilationInfo* const info_;
-
- int current_block_;
- int current_instruction_;
- const ZoneList<LInstruction*>* instructions_;
- ZoneList<LEnvironment*> deoptimizations_;
- ZoneList<JumpTableEntry> jump_table_;
- ZoneList<Handle<Object> > deoptimization_literals_;
- int inlined_function_count_;
- Scope* const scope_;
- Status status_;
- TranslationBuffer translations_;
- ZoneList<LDeferredCode*> deferred_;
- int osr_pc_offset_;
-
- // Builder that keeps track of safepoints in the code. The table
- // itself is emitted at the end of the generated code.
- SafepointTableBuilder safepoints_;
-
- // Compiler from a set of parallel moves to a sequential list of moves.
- LGapResolver resolver_;
-
- friend class LDeferredCode;
- friend class LEnvironment;
- friend class SafepointGenerator;
- DISALLOW_COPY_AND_ASSIGN(LCodeGen);
-};
-
-
-class LDeferredCode: public ZoneObject {
- public:
- explicit LDeferredCode(LCodeGen* codegen)
- : codegen_(codegen), external_exit_(NULL) {
- codegen->AddDeferredCode(this);
- }
-
- virtual ~LDeferredCode() { }
- virtual void Generate() = 0;
-
- void SetExit(Label *exit) { external_exit_ = exit; }
- Label* entry() { return &entry_; }
- Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
-
- protected:
- LCodeGen* codegen() const { return codegen_; }
- MacroAssembler* masm() const { return codegen_->masm(); }
-
- private:
- LCodeGen* codegen_;
- Label entry_;
- Label exit_;
- Label* external_exit_;
-};
-
-} } // namespace v8::internal
-
-#endif // V8_X64_LITHIUM_CODEGEN_X64_H_
diff --git a/src/3rdparty/v8/src/x64/lithium-gap-resolver-x64.cc b/src/3rdparty/v8/src/x64/lithium-gap-resolver-x64.cc
deleted file mode 100644
index cedd025..0000000
--- a/src/3rdparty/v8/src/x64/lithium-gap-resolver-x64.cc
+++ /dev/null
@@ -1,320 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_X64)
-
-#include "x64/lithium-gap-resolver-x64.h"
-#include "x64/lithium-codegen-x64.h"
-
-namespace v8 {
-namespace internal {
-
-LGapResolver::LGapResolver(LCodeGen* owner)
- : cgen_(owner), moves_(32) {}
-
-
-void LGapResolver::Resolve(LParallelMove* parallel_move) {
- ASSERT(moves_.is_empty());
- // Build up a worklist of moves.
- BuildInitialMoveList(parallel_move);
-
- for (int i = 0; i < moves_.length(); ++i) {
- LMoveOperands move = moves_[i];
- // Skip constants to perform them last. They don't block other moves
- // and skipping such moves with register destinations keeps those
- // registers free for the whole algorithm.
- if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
- PerformMove(i);
- }
- }
-
- // Perform the moves with constant sources.
- for (int i = 0; i < moves_.length(); ++i) {
- if (!moves_[i].IsEliminated()) {
- ASSERT(moves_[i].source()->IsConstantOperand());
- EmitMove(i);
- }
- }
-
- moves_.Rewind(0);
-}
-
-
-void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
- // Perform a linear sweep of the moves to add them to the initial list of
- // moves to perform, ignoring any move that is redundant (the source is
- // the same as the destination, the destination is ignored and
- // unallocated, or the move was already eliminated).
- const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
- for (int i = 0; i < moves->length(); ++i) {
- LMoveOperands move = moves->at(i);
- if (!move.IsRedundant()) moves_.Add(move);
- }
- Verify();
-}
-
-
-void LGapResolver::PerformMove(int index) {
- // Each call to this function performs a move and deletes it from the move
- // graph. We first recursively perform any move blocking this one. We
- // mark a move as "pending" on entry to PerformMove in order to detect
- // cycles in the move graph. We use operand swaps to resolve cycles,
- // which means that a call to PerformMove could change any source operand
- // in the move graph.
-
- ASSERT(!moves_[index].IsPending());
- ASSERT(!moves_[index].IsRedundant());
-
- // Clear this move's destination to indicate a pending move. The actual
- // destination is saved in a stack-allocated local. Recursion may allow
- // multiple moves to be pending.
- ASSERT(moves_[index].source() != NULL); // Or else it will look eliminated.
- LOperand* destination = moves_[index].destination();
- moves_[index].set_destination(NULL);
-
- // Perform a depth-first traversal of the move graph to resolve
- // dependencies. Any unperformed, unpending move with a source the same
- // as this one's destination blocks this one so recursively perform all
- // such moves.
- for (int i = 0; i < moves_.length(); ++i) {
- LMoveOperands other_move = moves_[i];
- if (other_move.Blocks(destination) && !other_move.IsPending()) {
- // Though PerformMove can change any source operand in the move graph,
- // this call cannot create a blocking move via a swap (this loop does
- // not miss any). Assume there is a non-blocking move with source A
- // and this move is blocked on source B and there is a swap of A and
- // B. Then A and B must be involved in the same cycle (or they would
- // not be swapped). Since this move's destination is B and there is
- // only a single incoming edge to an operand, this move must also be
- // involved in the same cycle. In that case, the blocking move will
- // be created but will be "pending" when we return from PerformMove.
- PerformMove(i);
- }
- }
-
- // We are about to resolve this move and don't need it marked as
- // pending, so restore its destination.
- moves_[index].set_destination(destination);
-
- // This move's source may have changed due to swaps to resolve cycles and
- // so it may now be the last move in the cycle. If so remove it.
- if (moves_[index].source()->Equals(destination)) {
- moves_[index].Eliminate();
- return;
- }
-
- // The move may be blocked on a (at most one) pending move, in which case
- // we have a cycle. Search for such a blocking move and perform a swap to
- // resolve it.
- for (int i = 0; i < moves_.length(); ++i) {
- LMoveOperands other_move = moves_[i];
- if (other_move.Blocks(destination)) {
- ASSERT(other_move.IsPending());
- EmitSwap(index);
- return;
- }
- }
-
- // This move is not blocked.
- EmitMove(index);
-}
-
-
-void LGapResolver::Verify() {
-#ifdef ENABLE_SLOW_ASSERTS
- // No operand should be the destination for more than one move.
- for (int i = 0; i < moves_.length(); ++i) {
- LOperand* destination = moves_[i].destination();
- for (int j = i + 1; j < moves_.length(); ++j) {
- SLOW_ASSERT(!destination->Equals(moves_[j].destination()));
- }
- }
-#endif
-}
-
-
-#define __ ACCESS_MASM(cgen_->masm())
-
-
-void LGapResolver::EmitMove(int index) {
- LOperand* source = moves_[index].source();
- LOperand* destination = moves_[index].destination();
-
- // Dispatch on the source and destination operand kinds. Not all
- // combinations are possible.
- if (source->IsRegister()) {
- Register src = cgen_->ToRegister(source);
- if (destination->IsRegister()) {
- Register dst = cgen_->ToRegister(destination);
- __ movq(dst, src);
- } else {
- ASSERT(destination->IsStackSlot());
- Operand dst = cgen_->ToOperand(destination);
- __ movq(dst, src);
- }
-
- } else if (source->IsStackSlot()) {
- Operand src = cgen_->ToOperand(source);
- if (destination->IsRegister()) {
- Register dst = cgen_->ToRegister(destination);
- __ movq(dst, src);
- } else {
- ASSERT(destination->IsStackSlot());
- Operand dst = cgen_->ToOperand(destination);
- __ movq(kScratchRegister, src);
- __ movq(dst, kScratchRegister);
- }
-
- } else if (source->IsConstantOperand()) {
- LConstantOperand* constant_source = LConstantOperand::cast(source);
- if (destination->IsRegister()) {
- Register dst = cgen_->ToRegister(destination);
- if (cgen_->IsInteger32Constant(constant_source)) {
- __ movl(dst, Immediate(cgen_->ToInteger32(constant_source)));
- } else {
- __ Move(dst, cgen_->ToHandle(constant_source));
- }
- } else {
- ASSERT(destination->IsStackSlot());
- Operand dst = cgen_->ToOperand(destination);
- if (cgen_->IsInteger32Constant(constant_source)) {
- // Allow top 32 bits of an untagged Integer32 to be arbitrary.
- __ movl(dst, Immediate(cgen_->ToInteger32(constant_source)));
- } else {
- __ Move(dst, cgen_->ToHandle(constant_source));
- }
- }
-
- } else if (source->IsDoubleRegister()) {
- XMMRegister src = cgen_->ToDoubleRegister(source);
- if (destination->IsDoubleRegister()) {
- __ movsd(cgen_->ToDoubleRegister(destination), src);
- } else {
- ASSERT(destination->IsDoubleStackSlot());
- __ movsd(cgen_->ToOperand(destination), src);
- }
- } else if (source->IsDoubleStackSlot()) {
- Operand src = cgen_->ToOperand(source);
- if (destination->IsDoubleRegister()) {
- __ movsd(cgen_->ToDoubleRegister(destination), src);
- } else {
- ASSERT(destination->IsDoubleStackSlot());
- __ movsd(xmm0, src);
- __ movsd(cgen_->ToOperand(destination), xmm0);
- }
- } else {
- UNREACHABLE();
- }
-
- moves_[index].Eliminate();
-}
-
-
-void LGapResolver::EmitSwap(int index) {
- LOperand* source = moves_[index].source();
- LOperand* destination = moves_[index].destination();
-
- // Dispatch on the source and destination operand kinds. Not all
- // combinations are possible.
- if (source->IsRegister() && destination->IsRegister()) {
- // Swap two general-purpose registers.
- Register src = cgen_->ToRegister(source);
- Register dst = cgen_->ToRegister(destination);
- __ xchg(dst, src);
-
- } else if ((source->IsRegister() && destination->IsStackSlot()) ||
- (source->IsStackSlot() && destination->IsRegister())) {
- // Swap a general-purpose register and a stack slot.
- Register reg =
- cgen_->ToRegister(source->IsRegister() ? source : destination);
- Operand mem =
- cgen_->ToOperand(source->IsRegister() ? destination : source);
- __ movq(kScratchRegister, mem);
- __ movq(mem, reg);
- __ movq(reg, kScratchRegister);
-
- } else if ((source->IsStackSlot() && destination->IsStackSlot()) ||
- (source->IsDoubleStackSlot() && destination->IsDoubleStackSlot())) {
- // Swap two stack slots or two double stack slots.
- Operand src = cgen_->ToOperand(source);
- Operand dst = cgen_->ToOperand(destination);
- __ movsd(xmm0, src);
- __ movq(kScratchRegister, dst);
- __ movsd(dst, xmm0);
- __ movq(src, kScratchRegister);
-
- } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
- // Swap two double registers.
- XMMRegister source_reg = cgen_->ToDoubleRegister(source);
- XMMRegister destination_reg = cgen_->ToDoubleRegister(destination);
- __ movsd(xmm0, source_reg);
- __ movsd(source_reg, destination_reg);
- __ movsd(destination_reg, xmm0);
-
- } else if (source->IsDoubleRegister() || destination->IsDoubleRegister()) {
- // Swap a double register and a double stack slot.
- ASSERT((source->IsDoubleRegister() && destination->IsDoubleStackSlot()) ||
- (source->IsDoubleStackSlot() && destination->IsDoubleRegister()));
- XMMRegister reg = cgen_->ToDoubleRegister(source->IsDoubleRegister()
- ? source
- : destination);
- LOperand* other = source->IsDoubleRegister() ? destination : source;
- ASSERT(other->IsDoubleStackSlot());
- Operand other_operand = cgen_->ToOperand(other);
- __ movsd(xmm0, other_operand);
- __ movsd(other_operand, reg);
- __ movsd(reg, xmm0);
-
- } else {
- // No other combinations are possible.
- UNREACHABLE();
- }
-
- // The swap of source and destination has executed a move from source to
- // destination.
- moves_[index].Eliminate();
-
- // Any unperformed (including pending) move with a source of either
- // this move's source or destination needs to have their source
- // changed to reflect the state of affairs after the swap.
- for (int i = 0; i < moves_.length(); ++i) {
- LMoveOperands other_move = moves_[i];
- if (other_move.Blocks(source)) {
- moves_[i].set_source(destination);
- } else if (other_move.Blocks(destination)) {
- moves_[i].set_source(source);
- }
- }
-}
-
-#undef __
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_X64
diff --git a/src/3rdparty/v8/src/x64/lithium-gap-resolver-x64.h b/src/3rdparty/v8/src/x64/lithium-gap-resolver-x64.h
deleted file mode 100644
index d828455..0000000
--- a/src/3rdparty/v8/src/x64/lithium-gap-resolver-x64.h
+++ /dev/null
@@ -1,74 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_X64_LITHIUM_GAP_RESOLVER_X64_H_
-#define V8_X64_LITHIUM_GAP_RESOLVER_X64_H_
-
-#include "v8.h"
-
-#include "lithium.h"
-
-namespace v8 {
-namespace internal {
-
-class LCodeGen;
-class LGapResolver;
-
-class LGapResolver BASE_EMBEDDED {
- public:
- explicit LGapResolver(LCodeGen* owner);
-
- // Resolve a set of parallel moves, emitting assembler instructions.
- void Resolve(LParallelMove* parallel_move);
-
- private:
- // Build the initial list of moves.
- void BuildInitialMoveList(LParallelMove* parallel_move);
-
- // Perform the move at the moves_ index in question (possibly requiring
- // other moves to satisfy dependencies).
- void PerformMove(int index);
-
- // Emit a move and remove it from the move graph.
- void EmitMove(int index);
-
- // Execute a move by emitting a swap of two operands. The move from
- // source to destination is removed from the move graph.
- void EmitSwap(int index);
-
- // Verify the move list before performing moves.
- void Verify();
-
- LCodeGen* cgen_;
-
- // List of moves not yet resolved.
- ZoneList<LMoveOperands> moves_;
-};
-
-} } // namespace v8::internal
-
-#endif // V8_X64_LITHIUM_GAP_RESOLVER_X64_H_
diff --git a/src/3rdparty/v8/src/x64/lithium-x64.cc b/src/3rdparty/v8/src/x64/lithium-x64.cc
deleted file mode 100644
index d0091e5..0000000
--- a/src/3rdparty/v8/src/x64/lithium-x64.cc
+++ /dev/null
@@ -1,2117 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_X64)
-
-#include "lithium-allocator-inl.h"
-#include "x64/lithium-x64.h"
-#include "x64/lithium-codegen-x64.h"
-
-namespace v8 {
-namespace internal {
-
-#define DEFINE_COMPILE(type) \
- void L##type::CompileToNative(LCodeGen* generator) { \
- generator->Do##type(this); \
- }
-LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
-#undef DEFINE_COMPILE
-
-LOsrEntry::LOsrEntry() {
- for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
- register_spills_[i] = NULL;
- }
- for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; ++i) {
- double_register_spills_[i] = NULL;
- }
-}
-
-
-void LOsrEntry::MarkSpilledRegister(int allocation_index,
- LOperand* spill_operand) {
- ASSERT(spill_operand->IsStackSlot());
- ASSERT(register_spills_[allocation_index] == NULL);
- register_spills_[allocation_index] = spill_operand;
-}
-
-
-void LOsrEntry::MarkSpilledDoubleRegister(int allocation_index,
- LOperand* spill_operand) {
- ASSERT(spill_operand->IsDoubleStackSlot());
- ASSERT(double_register_spills_[allocation_index] == NULL);
- double_register_spills_[allocation_index] = spill_operand;
-}
-
-
-#ifdef DEBUG
-void LInstruction::VerifyCall() {
- // Call instructions can use only fixed registers as
- // temporaries and outputs because all registers
- // are blocked by the calling convention.
- // Inputs must use a fixed register.
- ASSERT(Output() == NULL ||
- LUnallocated::cast(Output())->HasFixedPolicy() ||
- !LUnallocated::cast(Output())->HasRegisterPolicy());
- for (UseIterator it(this); it.HasNext(); it.Advance()) {
- LOperand* operand = it.Next();
- ASSERT(LUnallocated::cast(operand)->HasFixedPolicy() ||
- !LUnallocated::cast(operand)->HasRegisterPolicy());
- }
- for (TempIterator it(this); it.HasNext(); it.Advance()) {
- LOperand* operand = it.Next();
- ASSERT(LUnallocated::cast(operand)->HasFixedPolicy() ||
- !LUnallocated::cast(operand)->HasRegisterPolicy());
- }
-}
-#endif
-
-
-void LInstruction::PrintTo(StringStream* stream) {
- stream->Add("%s ", this->Mnemonic());
-
- PrintOutputOperandTo(stream);
-
- PrintDataTo(stream);
-
- if (HasEnvironment()) {
- stream->Add(" ");
- environment()->PrintTo(stream);
- }
-
- if (HasPointerMap()) {
- stream->Add(" ");
- pointer_map()->PrintTo(stream);
- }
-}
-
-
-template<int R, int I, int T>
-void LTemplateInstruction<R, I, T>::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- inputs_.PrintOperandsTo(stream);
-}
-
-
-template<int R, int I, int T>
-void LTemplateInstruction<R, I, T>::PrintOutputOperandTo(StringStream* stream) {
- results_.PrintOperandsTo(stream);
-}
-
-
-template<typename T, int N>
-void OperandContainer<T, N>::PrintOperandsTo(StringStream* stream) {
- for (int i = 0; i < N; i++) {
- if (i > 0) stream->Add(" ");
- elems_[i]->PrintTo(stream);
- }
-}
-
-
-void LLabel::PrintDataTo(StringStream* stream) {
- LGap::PrintDataTo(stream);
- LLabel* rep = replacement();
- if (rep != NULL) {
- stream->Add(" Dead block replaced with B%d", rep->block_id());
- }
-}
-
-
-bool LGap::IsRedundant() const {
- for (int i = 0; i < 4; i++) {
- if (parallel_moves_[i] != NULL && !parallel_moves_[i]->IsRedundant()) {
- return false;
- }
- }
-
- return true;
-}
-
-
-void LGap::PrintDataTo(StringStream* stream) {
- for (int i = 0; i < 4; i++) {
- stream->Add("(");
- if (parallel_moves_[i] != NULL) {
- parallel_moves_[i]->PrintDataTo(stream);
- }
- stream->Add(") ");
- }
-}
-
-
-const char* LArithmeticD::Mnemonic() const {
- switch (op()) {
- case Token::ADD: return "add-d";
- case Token::SUB: return "sub-d";
- case Token::MUL: return "mul-d";
- case Token::DIV: return "div-d";
- case Token::MOD: return "mod-d";
- default:
- UNREACHABLE();
- return NULL;
- }
-}
-
-
-const char* LArithmeticT::Mnemonic() const {
- switch (op()) {
- case Token::ADD: return "add-t";
- case Token::SUB: return "sub-t";
- case Token::MUL: return "mul-t";
- case Token::MOD: return "mod-t";
- case Token::DIV: return "div-t";
- case Token::BIT_AND: return "bit-and-t";
- case Token::BIT_OR: return "bit-or-t";
- case Token::BIT_XOR: return "bit-xor-t";
- case Token::SHL: return "sal-t";
- case Token::SAR: return "sar-t";
- case Token::SHR: return "shr-t";
- default:
- UNREACHABLE();
- return NULL;
- }
-}
-
-
-void LGoto::PrintDataTo(StringStream* stream) {
- stream->Add("B%d", block_id());
-}
-
-
-void LBranch::PrintDataTo(StringStream* stream) {
- stream->Add("B%d | B%d on ", true_block_id(), false_block_id());
- InputAt(0)->PrintTo(stream);
-}
-
-
-void LCmpIDAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if ");
- InputAt(0)->PrintTo(stream);
- stream->Add(" %s ", Token::String(op()));
- InputAt(1)->PrintTo(stream);
- stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LIsNullAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if ");
- InputAt(0)->PrintTo(stream);
- stream->Add(is_strict() ? " === null" : " == null");
- stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LIsObjectAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if is_object(");
- InputAt(0)->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LIsSmiAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if is_smi(");
- InputAt(0)->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if has_instance_type(");
- InputAt(0)->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if has_cached_array_index(");
- InputAt(0)->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if class_of_test(");
- InputAt(0)->PrintTo(stream);
- stream->Add(", \"%o\") then B%d else B%d",
- *hydrogen()->class_name(),
- true_block_id(),
- false_block_id());
-}
-
-
-void LTypeofIs::PrintDataTo(StringStream* stream) {
- InputAt(0)->PrintTo(stream);
- stream->Add(" == \"%s\"", *hydrogen()->type_literal()->ToCString());
-}
-
-
-void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if typeof ");
- InputAt(0)->PrintTo(stream);
- stream->Add(" == \"%s\" then B%d else B%d",
- *hydrogen()->type_literal()->ToCString(),
- true_block_id(), false_block_id());
-}
-
-
-void LCallConstantFunction::PrintDataTo(StringStream* stream) {
- stream->Add("#%d / ", arity());
-}
-
-
-void LUnaryMathOperation::PrintDataTo(StringStream* stream) {
- stream->Add("/%s ", hydrogen()->OpName());
- InputAt(0)->PrintTo(stream);
-}
-
-
-void LLoadContextSlot::PrintDataTo(StringStream* stream) {
- InputAt(0)->PrintTo(stream);
- stream->Add("[%d]", slot_index());
-}
-
-
-void LStoreContextSlot::PrintDataTo(StringStream* stream) {
- InputAt(0)->PrintTo(stream);
- stream->Add("[%d] <- ", slot_index());
- InputAt(1)->PrintTo(stream);
-}
-
-
-void LCallKeyed::PrintDataTo(StringStream* stream) {
- stream->Add("[rcx] #%d / ", arity());
-}
-
-
-void LCallNamed::PrintDataTo(StringStream* stream) {
- SmartPointer<char> name_string = name()->ToCString();
- stream->Add("%s #%d / ", *name_string, arity());
-}
-
-
-void LCallGlobal::PrintDataTo(StringStream* stream) {
- SmartPointer<char> name_string = name()->ToCString();
- stream->Add("%s #%d / ", *name_string, arity());
-}
-
-
-void LCallKnownGlobal::PrintDataTo(StringStream* stream) {
- stream->Add("#%d / ", arity());
-}
-
-
-void LCallNew::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- InputAt(0)->PrintTo(stream);
- stream->Add(" #%d / ", arity());
-}
-
-
-void LClassOfTest::PrintDataTo(StringStream* stream) {
- stream->Add("= class_of_test(");
- InputAt(0)->PrintTo(stream);
- stream->Add(", \"%o\")", *hydrogen()->class_name());
-}
-
-
-void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
- arguments()->PrintTo(stream);
-
- stream->Add(" length ");
- length()->PrintTo(stream);
-
- stream->Add(" index ");
- index()->PrintTo(stream);
-}
-
-
-int LChunk::GetNextSpillIndex(bool is_double) {
- return spill_slot_count_++;
-}
-
-
-LOperand* LChunk::GetNextSpillSlot(bool is_double) {
- // All stack slots are Double stack slots on x64.
- // Alternatively, at some point, start using half-size
- // stack slots for int32 values.
- int index = GetNextSpillIndex(is_double);
- if (is_double) {
- return LDoubleStackSlot::Create(index);
- } else {
- return LStackSlot::Create(index);
- }
-}
-
-
-void LChunk::MarkEmptyBlocks() {
- HPhase phase("Mark empty blocks", this);
- for (int i = 0; i < graph()->blocks()->length(); ++i) {
- HBasicBlock* block = graph()->blocks()->at(i);
- int first = block->first_instruction_index();
- int last = block->last_instruction_index();
- LInstruction* first_instr = instructions()->at(first);
- LInstruction* last_instr = instructions()->at(last);
-
- LLabel* label = LLabel::cast(first_instr);
- if (last_instr->IsGoto()) {
- LGoto* goto_instr = LGoto::cast(last_instr);
- if (!goto_instr->include_stack_check() &&
- label->IsRedundant() &&
- !label->is_loop_header()) {
- bool can_eliminate = true;
- for (int i = first + 1; i < last && can_eliminate; ++i) {
- LInstruction* cur = instructions()->at(i);
- if (cur->IsGap()) {
- LGap* gap = LGap::cast(cur);
- if (!gap->IsRedundant()) {
- can_eliminate = false;
- }
- } else {
- can_eliminate = false;
- }
- }
-
- if (can_eliminate) {
- label->set_replacement(GetLabel(goto_instr->block_id()));
- }
- }
- }
- }
-}
-
-
-void LStoreNamedField::PrintDataTo(StringStream* stream) {
- object()->PrintTo(stream);
- stream->Add(".");
- stream->Add(*String::cast(*name())->ToCString());
- stream->Add(" <- ");
- value()->PrintTo(stream);
-}
-
-
-void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
- object()->PrintTo(stream);
- stream->Add(".");
- stream->Add(*String::cast(*name())->ToCString());
- stream->Add(" <- ");
- value()->PrintTo(stream);
-}
-
-
-void LStoreKeyedFastElement::PrintDataTo(StringStream* stream) {
- object()->PrintTo(stream);
- stream->Add("[");
- key()->PrintTo(stream);
- stream->Add("] <- ");
- value()->PrintTo(stream);
-}
-
-
-void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
- object()->PrintTo(stream);
- stream->Add("[");
- key()->PrintTo(stream);
- stream->Add("] <- ");
- value()->PrintTo(stream);
-}
-
-
-void LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
- LGap* gap = new LGap(block);
- int index = -1;
- if (instr->IsControl()) {
- instructions_.Add(gap);
- index = instructions_.length();
- instructions_.Add(instr);
- } else {
- index = instructions_.length();
- instructions_.Add(instr);
- instructions_.Add(gap);
- }
- if (instr->HasPointerMap()) {
- pointer_maps_.Add(instr->pointer_map());
- instr->pointer_map()->set_lithium_position(index);
- }
-}
-
-
-LConstantOperand* LChunk::DefineConstantOperand(HConstant* constant) {
- return LConstantOperand::Create(constant->id());
-}
-
-
-int LChunk::GetParameterStackSlot(int index) const {
- // The receiver is at index 0, the first parameter at index 1, so we
- // shift all parameter indexes down by the number of parameters, and
- // make sure they end up negative so they are distinguishable from
- // spill slots.
- int result = index - info()->scope()->num_parameters() - 1;
- ASSERT(result < 0);
- return result;
-}
-
-// A parameter relative to ebp in the arguments stub.
-int LChunk::ParameterAt(int index) {
- ASSERT(-1 <= index); // -1 is the receiver.
- return (1 + info()->scope()->num_parameters() - index) *
- kPointerSize;
-}
-
-
-LGap* LChunk::GetGapAt(int index) const {
- return LGap::cast(instructions_[index]);
-}
-
-
-bool LChunk::IsGapAt(int index) const {
- return instructions_[index]->IsGap();
-}
-
-
-int LChunk::NearestGapPos(int index) const {
- while (!IsGapAt(index)) index--;
- return index;
-}
-
-
-void LChunk::AddGapMove(int index, LOperand* from, LOperand* to) {
- GetGapAt(index)->GetOrCreateParallelMove(LGap::START)->AddMove(from, to);
-}
-
-
-Handle<Object> LChunk::LookupLiteral(LConstantOperand* operand) const {
- return HConstant::cast(graph_->LookupValue(operand->index()))->handle();
-}
-
-
-Representation LChunk::LookupLiteralRepresentation(
- LConstantOperand* operand) const {
- return graph_->LookupValue(operand->index())->representation();
-}
-
-
-LChunk* LChunkBuilder::Build() {
- ASSERT(is_unused());
- chunk_ = new LChunk(info(), graph());
- HPhase phase("Building chunk", chunk_);
- status_ = BUILDING;
- const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
- for (int i = 0; i < blocks->length(); i++) {
- HBasicBlock* next = NULL;
- if (i < blocks->length() - 1) next = blocks->at(i + 1);
- DoBasicBlock(blocks->at(i), next);
- if (is_aborted()) return NULL;
- }
- status_ = DONE;
- return chunk_;
-}
-
-
-void LChunkBuilder::Abort(const char* format, ...) {
- if (FLAG_trace_bailout) {
- SmartPointer<char> name(info()->shared_info()->DebugName()->ToCString());
- PrintF("Aborting LChunk building in @\"%s\": ", *name);
- va_list arguments;
- va_start(arguments, format);
- OS::VPrint(format, arguments);
- va_end(arguments);
- PrintF("\n");
- }
- status_ = ABORTED;
-}
-
-
-LRegister* LChunkBuilder::ToOperand(Register reg) {
- return LRegister::Create(Register::ToAllocationIndex(reg));
-}
-
-
-LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
- return new LUnallocated(LUnallocated::FIXED_REGISTER,
- Register::ToAllocationIndex(reg));
-}
-
-
-LUnallocated* LChunkBuilder::ToUnallocated(XMMRegister reg) {
- return new LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER,
- XMMRegister::ToAllocationIndex(reg));
-}
-
-
-LOperand* LChunkBuilder::UseFixed(HValue* value, Register fixed_register) {
- return Use(value, ToUnallocated(fixed_register));
-}
-
-
-LOperand* LChunkBuilder::UseFixedDouble(HValue* value, XMMRegister reg) {
- return Use(value, ToUnallocated(reg));
-}
-
-
-LOperand* LChunkBuilder::UseRegister(HValue* value) {
- return Use(value, new LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
-}
-
-
-LOperand* LChunkBuilder::UseRegisterAtStart(HValue* value) {
- return Use(value,
- new LUnallocated(LUnallocated::MUST_HAVE_REGISTER,
- LUnallocated::USED_AT_START));
-}
-
-
-LOperand* LChunkBuilder::UseTempRegister(HValue* value) {
- return Use(value, new LUnallocated(LUnallocated::WRITABLE_REGISTER));
-}
-
-
-LOperand* LChunkBuilder::Use(HValue* value) {
- return Use(value, new LUnallocated(LUnallocated::NONE));
-}
-
-
-LOperand* LChunkBuilder::UseAtStart(HValue* value) {
- return Use(value, new LUnallocated(LUnallocated::NONE,
- LUnallocated::USED_AT_START));
-}
-
-
-LOperand* LChunkBuilder::UseOrConstant(HValue* value) {
- return value->IsConstant()
- ? chunk_->DefineConstantOperand(HConstant::cast(value))
- : Use(value);
-}
-
-
-LOperand* LChunkBuilder::UseOrConstantAtStart(HValue* value) {
- return value->IsConstant()
- ? chunk_->DefineConstantOperand(HConstant::cast(value))
- : UseAtStart(value);
-}
-
-
-LOperand* LChunkBuilder::UseRegisterOrConstant(HValue* value) {
- return value->IsConstant()
- ? chunk_->DefineConstantOperand(HConstant::cast(value))
- : UseRegister(value);
-}
-
-
-LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) {
- return value->IsConstant()
- ? chunk_->DefineConstantOperand(HConstant::cast(value))
- : UseRegisterAtStart(value);
-}
-
-
-LOperand* LChunkBuilder::UseAny(HValue* value) {
- return value->IsConstant()
- ? chunk_->DefineConstantOperand(HConstant::cast(value))
- : Use(value, new LUnallocated(LUnallocated::ANY));
-}
-
-
-LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) {
- if (value->EmitAtUses()) {
- HInstruction* instr = HInstruction::cast(value);
- VisitInstruction(instr);
- }
- allocator_->RecordUse(value, operand);
- return operand;
-}
-
-
-template<int I, int T>
-LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr,
- LUnallocated* result) {
- allocator_->RecordDefinition(current_instruction_, result);
- instr->set_result(result);
- return instr;
-}
-
-
-template<int I, int T>
-LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr) {
- return Define(instr, new LUnallocated(LUnallocated::NONE));
-}
-
-
-template<int I, int T>
-LInstruction* LChunkBuilder::DefineAsRegister(
- LTemplateInstruction<1, I, T>* instr) {
- return Define(instr, new LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
-}
-
-
-template<int I, int T>
-LInstruction* LChunkBuilder::DefineAsSpilled(
- LTemplateInstruction<1, I, T>* instr,
- int index) {
- return Define(instr, new LUnallocated(LUnallocated::FIXED_SLOT, index));
-}
-
-
-template<int I, int T>
-LInstruction* LChunkBuilder::DefineSameAsFirst(
- LTemplateInstruction<1, I, T>* instr) {
- return Define(instr, new LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
-}
-
-
-template<int I, int T>
-LInstruction* LChunkBuilder::DefineFixed(LTemplateInstruction<1, I, T>* instr,
- Register reg) {
- return Define(instr, ToUnallocated(reg));
-}
-
-
-template<int I, int T>
-LInstruction* LChunkBuilder::DefineFixedDouble(
- LTemplateInstruction<1, I, T>* instr,
- XMMRegister reg) {
- return Define(instr, ToUnallocated(reg));
-}
-
-
-LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
- HEnvironment* hydrogen_env = current_block_->last_environment();
- instr->set_environment(CreateEnvironment(hydrogen_env));
- return instr;
-}
-
-
-LInstruction* LChunkBuilder::SetInstructionPendingDeoptimizationEnvironment(
- LInstruction* instr, int ast_id) {
- ASSERT(instruction_pending_deoptimization_environment_ == NULL);
- ASSERT(pending_deoptimization_ast_id_ == AstNode::kNoNumber);
- instruction_pending_deoptimization_environment_ = instr;
- pending_deoptimization_ast_id_ = ast_id;
- return instr;
-}
-
-
-void LChunkBuilder::ClearInstructionPendingDeoptimizationEnvironment() {
- instruction_pending_deoptimization_environment_ = NULL;
- pending_deoptimization_ast_id_ = AstNode::kNoNumber;
-}
-
-
-LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
- HInstruction* hinstr,
- CanDeoptimize can_deoptimize) {
-#ifdef DEBUG
- instr->VerifyCall();
-#endif
- instr->MarkAsCall();
- instr = AssignPointerMap(instr);
-
- if (hinstr->HasSideEffects()) {
- ASSERT(hinstr->next()->IsSimulate());
- HSimulate* sim = HSimulate::cast(hinstr->next());
- instr = SetInstructionPendingDeoptimizationEnvironment(
- instr, sim->ast_id());
- }
-
- // If instruction does not have side-effects lazy deoptimization
- // after the call will try to deoptimize to the point before the call.
- // Thus we still need to attach environment to this call even if
- // call sequence can not deoptimize eagerly.
- bool needs_environment =
- (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) || !hinstr->HasSideEffects();
- if (needs_environment && !instr->HasEnvironment()) {
- instr = AssignEnvironment(instr);
- }
-
- return instr;
-}
-
-
-LInstruction* LChunkBuilder::MarkAsSaveDoubles(LInstruction* instr) {
- instr->MarkAsSaveDoubles();
- return instr;
-}
-
-
-LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
- ASSERT(!instr->HasPointerMap());
- instr->set_pointer_map(new LPointerMap(position_));
- return instr;
-}
-
-
-LUnallocated* LChunkBuilder::TempRegister() {
- LUnallocated* operand = new LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
- allocator_->RecordTemporary(operand);
- return operand;
-}
-
-
-LOperand* LChunkBuilder::FixedTemp(Register reg) {
- LUnallocated* operand = ToUnallocated(reg);
- allocator_->RecordTemporary(operand);
- return operand;
-}
-
-
-LOperand* LChunkBuilder::FixedTemp(XMMRegister reg) {
- LUnallocated* operand = ToUnallocated(reg);
- allocator_->RecordTemporary(operand);
- return operand;
-}
-
-
-LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) {
- return new LLabel(instr->block());
-}
-
-
-LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
- return AssignEnvironment(new LDeoptimize);
-}
-
-
-LInstruction* LChunkBuilder::DoBit(Token::Value op,
- HBitwiseBinaryOperation* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
-
- LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
- LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
- return DefineSameAsFirst(new LBitI(op, left, right));
- } else {
- ASSERT(instr->representation().IsTagged());
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
-
- LOperand* left = UseFixed(instr->left(), rdx);
- LOperand* right = UseFixed(instr->right(), rax);
- LArithmeticT* result = new LArithmeticT(op, left, right);
- return MarkAsCall(DefineFixed(result, rax), instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoShift(Token::Value op,
- HBitwiseBinaryOperation* instr) {
- if (instr->representation().IsTagged()) {
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
-
- LOperand* left = UseFixed(instr->left(), rdx);
- LOperand* right = UseFixed(instr->right(), rax);
- LArithmeticT* result = new LArithmeticT(op, left, right);
- return MarkAsCall(DefineFixed(result, rax), instr);
- }
-
- ASSERT(instr->representation().IsInteger32());
- ASSERT(instr->OperandAt(0)->representation().IsInteger32());
- ASSERT(instr->OperandAt(1)->representation().IsInteger32());
- LOperand* left = UseRegisterAtStart(instr->OperandAt(0));
-
- HValue* right_value = instr->OperandAt(1);
- LOperand* right = NULL;
- int constant_value = 0;
- if (right_value->IsConstant()) {
- HConstant* constant = HConstant::cast(right_value);
- right = chunk_->DefineConstantOperand(constant);
- constant_value = constant->Integer32Value() & 0x1f;
- } else {
- right = UseFixed(right_value, rcx);
- }
-
- // Shift operations can only deoptimize if we do a logical shift
- // by 0 and the result cannot be truncated to int32.
- bool can_deopt = (op == Token::SHR && constant_value == 0);
- if (can_deopt) {
- bool can_truncate = true;
- for (int i = 0; i < instr->uses()->length(); i++) {
- if (!instr->uses()->at(i)->CheckFlag(HValue::kTruncatingToInt32)) {
- can_truncate = false;
- break;
- }
- }
- can_deopt = !can_truncate;
- }
-
- LShiftI* result = new LShiftI(op, left, right, can_deopt);
- return can_deopt
- ? AssignEnvironment(DefineSameAsFirst(result))
- : DefineSameAsFirst(result);
-}
-
-
-LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
- HArithmeticBinaryOperation* instr) {
- ASSERT(instr->representation().IsDouble());
- ASSERT(instr->left()->representation().IsDouble());
- ASSERT(instr->right()->representation().IsDouble());
- ASSERT(op != Token::MOD);
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseRegisterAtStart(instr->right());
- LArithmeticD* result = new LArithmeticD(op, left, right);
- return DefineSameAsFirst(result);
-}
-
-
-LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
- HArithmeticBinaryOperation* instr) {
- ASSERT(op == Token::ADD ||
- op == Token::DIV ||
- op == Token::MOD ||
- op == Token::MUL ||
- op == Token::SUB);
- HValue* left = instr->left();
- HValue* right = instr->right();
- ASSERT(left->representation().IsTagged());
- ASSERT(right->representation().IsTagged());
- LOperand* left_operand = UseFixed(left, rdx);
- LOperand* right_operand = UseFixed(right, rax);
- LArithmeticT* result = new LArithmeticT(op, left_operand, right_operand);
- return MarkAsCall(DefineFixed(result, rax), instr);
-}
-
-
-void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
- ASSERT(is_building());
- current_block_ = block;
- next_block_ = next_block;
- if (block->IsStartBlock()) {
- block->UpdateEnvironment(graph_->start_environment());
- argument_count_ = 0;
- } else if (block->predecessors()->length() == 1) {
- // We have a single predecessor => copy environment and outgoing
- // argument count from the predecessor.
- ASSERT(block->phis()->length() == 0);
- HBasicBlock* pred = block->predecessors()->at(0);
- HEnvironment* last_environment = pred->last_environment();
- ASSERT(last_environment != NULL);
- // Only copy the environment, if it is later used again.
- if (pred->end()->SecondSuccessor() == NULL) {
- ASSERT(pred->end()->FirstSuccessor() == block);
- } else {
- if (pred->end()->FirstSuccessor()->block_id() > block->block_id() ||
- pred->end()->SecondSuccessor()->block_id() > block->block_id()) {
- last_environment = last_environment->Copy();
- }
- }
- block->UpdateEnvironment(last_environment);
- ASSERT(pred->argument_count() >= 0);
- argument_count_ = pred->argument_count();
- } else {
- // We are at a state join => process phis.
- HBasicBlock* pred = block->predecessors()->at(0);
- // No need to copy the environment, it cannot be used later.
- HEnvironment* last_environment = pred->last_environment();
- for (int i = 0; i < block->phis()->length(); ++i) {
- HPhi* phi = block->phis()->at(i);
- last_environment->SetValueAt(phi->merged_index(), phi);
- }
- for (int i = 0; i < block->deleted_phis()->length(); ++i) {
- last_environment->SetValueAt(block->deleted_phis()->at(i),
- graph_->GetConstantUndefined());
- }
- block->UpdateEnvironment(last_environment);
- // Pick up the outgoing argument count of one of the predecessors.
- argument_count_ = pred->argument_count();
- }
- HInstruction* current = block->first();
- int start = chunk_->instructions()->length();
- while (current != NULL && !is_aborted()) {
- // Code for constants in registers is generated lazily.
- if (!current->EmitAtUses()) {
- VisitInstruction(current);
- }
- current = current->next();
- }
- int end = chunk_->instructions()->length() - 1;
- if (end >= start) {
- block->set_first_instruction_index(start);
- block->set_last_instruction_index(end);
- }
- block->set_argument_count(argument_count_);
- next_block_ = NULL;
- current_block_ = NULL;
-}
-
-
-void LChunkBuilder::VisitInstruction(HInstruction* current) {
- HInstruction* old_current = current_instruction_;
- current_instruction_ = current;
- if (current->has_position()) position_ = current->position();
- LInstruction* instr = current->CompileToLithium(this);
-
- if (instr != NULL) {
- if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
- instr = AssignPointerMap(instr);
- }
- if (FLAG_stress_environments && !instr->HasEnvironment()) {
- instr = AssignEnvironment(instr);
- }
- if (current->IsTest() && !instr->IsGoto()) {
- ASSERT(instr->IsControl());
- HTest* test = HTest::cast(current);
- instr->set_hydrogen_value(test->value());
- HBasicBlock* first = test->FirstSuccessor();
- HBasicBlock* second = test->SecondSuccessor();
- ASSERT(first != NULL && second != NULL);
- instr->SetBranchTargets(first->block_id(), second->block_id());
- } else {
- instr->set_hydrogen_value(current);
- }
-
- chunk_->AddInstruction(instr, current_block_);
- }
- current_instruction_ = old_current;
-}
-
-
-LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) {
- if (hydrogen_env == NULL) return NULL;
-
- LEnvironment* outer = CreateEnvironment(hydrogen_env->outer());
- int ast_id = hydrogen_env->ast_id();
- ASSERT(ast_id != AstNode::kNoNumber);
- int value_count = hydrogen_env->length();
- LEnvironment* result = new LEnvironment(hydrogen_env->closure(),
- ast_id,
- hydrogen_env->parameter_count(),
- argument_count_,
- value_count,
- outer);
- int argument_index = 0;
- for (int i = 0; i < value_count; ++i) {
- HValue* value = hydrogen_env->values()->at(i);
- LOperand* op = NULL;
- if (value->IsArgumentsObject()) {
- op = NULL;
- } else if (value->IsPushArgument()) {
- op = new LArgument(argument_index++);
- } else {
- op = UseAny(value);
- }
- result->AddValue(op, value->representation());
- }
-
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
- LGoto* result = new LGoto(instr->FirstSuccessor()->block_id(),
- instr->include_stack_check());
- return (instr->include_stack_check())
- ? AssignPointerMap(result)
- : result;
-}
-
-
-LInstruction* LChunkBuilder::DoTest(HTest* instr) {
- HValue* v = instr->value();
- if (v->EmitAtUses()) {
- if (v->IsClassOfTest()) {
- HClassOfTest* compare = HClassOfTest::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
-
- return new LClassOfTestAndBranch(UseTempRegister(compare->value()),
- TempRegister());
- } else if (v->IsCompare()) {
- HCompare* compare = HCompare::cast(v);
- Token::Value op = compare->token();
- HValue* left = compare->left();
- HValue* right = compare->right();
- Representation r = compare->GetInputRepresentation();
- if (r.IsInteger32()) {
- ASSERT(left->representation().IsInteger32());
- ASSERT(right->representation().IsInteger32());
-
- return new LCmpIDAndBranch(UseRegisterAtStart(left),
- UseOrConstantAtStart(right));
- } else if (r.IsDouble()) {
- ASSERT(left->representation().IsDouble());
- ASSERT(right->representation().IsDouble());
-
- return new LCmpIDAndBranch(UseRegisterAtStart(left),
- UseRegisterAtStart(right));
- } else {
- ASSERT(left->representation().IsTagged());
- ASSERT(right->representation().IsTagged());
- bool reversed = op == Token::GT || op == Token::LTE;
- LOperand* left_operand = UseFixed(left, reversed ? rax : rdx);
- LOperand* right_operand = UseFixed(right, reversed ? rdx : rax);
- LCmpTAndBranch* result = new LCmpTAndBranch(left_operand,
- right_operand);
- return MarkAsCall(result, instr);
- }
- } else if (v->IsIsSmi()) {
- HIsSmi* compare = HIsSmi::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
-
- return new LIsSmiAndBranch(Use(compare->value()));
- } else if (v->IsHasInstanceType()) {
- HHasInstanceType* compare = HHasInstanceType::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
-
- return new LHasInstanceTypeAndBranch(
- UseRegisterAtStart(compare->value()));
- } else if (v->IsHasCachedArrayIndex()) {
- HHasCachedArrayIndex* compare = HHasCachedArrayIndex::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
-
- return new LHasCachedArrayIndexAndBranch(
- UseRegisterAtStart(compare->value()));
- } else if (v->IsIsNull()) {
- HIsNull* compare = HIsNull::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
-
- // We only need a temp register for non-strict compare.
- LOperand* temp = compare->is_strict() ? NULL : TempRegister();
- return new LIsNullAndBranch(UseRegisterAtStart(compare->value()),
- temp);
- } else if (v->IsIsObject()) {
- HIsObject* compare = HIsObject::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
- return new LIsObjectAndBranch(UseRegisterAtStart(compare->value()));
- } else if (v->IsCompareJSObjectEq()) {
- HCompareJSObjectEq* compare = HCompareJSObjectEq::cast(v);
- return new LCmpJSObjectEqAndBranch(UseRegisterAtStart(compare->left()),
- UseRegisterAtStart(compare->right()));
- } else if (v->IsInstanceOf()) {
- HInstanceOf* instance_of = HInstanceOf::cast(v);
- LInstanceOfAndBranch* result =
- new LInstanceOfAndBranch(UseFixed(instance_of->left(), rax),
- UseFixed(instance_of->right(), rdx));
- return MarkAsCall(result, instr);
- } else if (v->IsTypeofIs()) {
- HTypeofIs* typeof_is = HTypeofIs::cast(v);
- return new LTypeofIsAndBranch(UseTempRegister(typeof_is->value()));
- } else if (v->IsIsConstructCall()) {
- return new LIsConstructCallAndBranch(TempRegister());
- } else {
- if (v->IsConstant()) {
- if (HConstant::cast(v)->handle()->IsTrue()) {
- return new LGoto(instr->FirstSuccessor()->block_id());
- } else if (HConstant::cast(v)->handle()->IsFalse()) {
- return new LGoto(instr->SecondSuccessor()->block_id());
- }
- }
- Abort("Undefined compare before branch");
- return NULL;
- }
- }
- return new LBranch(UseRegisterAtStart(v));
-}
-
-
-LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
- return new LCmpMapAndBranch(value);
-}
-
-
-LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* length) {
- return DefineAsRegister(new LArgumentsLength(Use(length->value())));
-}
-
-
-LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
- return DefineAsRegister(new LArgumentsElements);
-}
-
-
-LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
- LOperand* left = UseFixed(instr->left(), rax);
- LOperand* right = UseFixed(instr->right(), rdx);
- LInstanceOf* result = new LInstanceOf(left, right);
- return MarkAsCall(DefineFixed(result, rax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
- HInstanceOfKnownGlobal* instr) {
- LInstanceOfKnownGlobal* result =
- new LInstanceOfKnownGlobal(UseFixed(instr->value(), rax),
- FixedTemp(rdi));
- return MarkAsCall(DefineFixed(result, rax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
- LOperand* function = UseFixed(instr->function(), rdi);
- LOperand* receiver = UseFixed(instr->receiver(), rax);
- LOperand* length = UseFixed(instr->length(), rbx);
- LOperand* elements = UseFixed(instr->elements(), rcx);
- LApplyArguments* result = new LApplyArguments(function,
- receiver,
- length,
- elements);
- return MarkAsCall(DefineFixed(result, rax), instr, CAN_DEOPTIMIZE_EAGERLY);
-}
-
-
-LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
- ++argument_count_;
- LOperand* argument = UseOrConstant(instr->argument());
- return new LPushArgument(argument);
-}
-
-
-LInstruction* LChunkBuilder::DoContext(HContext* instr) {
- return DefineAsRegister(new LContext);
-}
-
-
-LInstruction* LChunkBuilder::DoOuterContext(HOuterContext* instr) {
- LOperand* context = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LOuterContext(context));
-}
-
-
-LInstruction* LChunkBuilder::DoGlobalObject(HGlobalObject* instr) {
- return DefineAsRegister(new LGlobalObject);
-}
-
-
-LInstruction* LChunkBuilder::DoGlobalReceiver(HGlobalReceiver* instr) {
- LOperand* global_object = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LGlobalReceiver(global_object));
-}
-
-
-LInstruction* LChunkBuilder::DoCallConstantFunction(
- HCallConstantFunction* instr) {
- argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new LCallConstantFunction, rax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
- BuiltinFunctionId op = instr->op();
- if (op == kMathLog || op == kMathSin || op == kMathCos) {
- LOperand* input = UseFixedDouble(instr->value(), xmm1);
- LUnaryMathOperation* result = new LUnaryMathOperation(input);
- return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
- } else {
- LOperand* input = UseRegisterAtStart(instr->value());
- LUnaryMathOperation* result = new LUnaryMathOperation(input);
- switch (op) {
- case kMathAbs:
- return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
- case kMathFloor:
- return AssignEnvironment(DefineAsRegister(result));
- case kMathRound:
- return AssignEnvironment(DefineAsRegister(result));
- case kMathSqrt:
- return DefineSameAsFirst(result);
- case kMathPowHalf:
- return DefineSameAsFirst(result);
- default:
- UNREACHABLE();
- return NULL;
- }
- }
-}
-
-
-LInstruction* LChunkBuilder::DoCallKeyed(HCallKeyed* instr) {
- ASSERT(instr->key()->representation().IsTagged());
- LOperand* key = UseFixed(instr->key(), rcx);
- argument_count_ -= instr->argument_count();
- LCallKeyed* result = new LCallKeyed(key);
- return MarkAsCall(DefineFixed(result, rax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallNamed(HCallNamed* instr) {
- argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new LCallNamed, rax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallGlobal(HCallGlobal* instr) {
- argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new LCallGlobal, rax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallKnownGlobal(HCallKnownGlobal* instr) {
- argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new LCallKnownGlobal, rax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
- LOperand* constructor = UseFixed(instr->constructor(), rdi);
- argument_count_ -= instr->argument_count();
- LCallNew* result = new LCallNew(constructor);
- return MarkAsCall(DefineFixed(result, rax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
- argument_count_ -= instr->argument_count();
- LCallFunction* result = new LCallFunction();
- return MarkAsCall(DefineFixed(result, rax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
- argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new LCallRuntime, rax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoShr(HShr* instr) {
- return DoShift(Token::SHR, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoSar(HSar* instr) {
- return DoShift(Token::SAR, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoShl(HShl* instr) {
- return DoShift(Token::SHL, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoBitAnd(HBitAnd* instr) {
- return DoBit(Token::BIT_AND, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoBitNot(HBitNot* instr) {
- ASSERT(instr->value()->representation().IsInteger32());
- ASSERT(instr->representation().IsInteger32());
- LOperand* input = UseRegisterAtStart(instr->value());
- LBitNotI* result = new LBitNotI(input);
- return DefineSameAsFirst(result);
-}
-
-
-LInstruction* LChunkBuilder::DoBitOr(HBitOr* instr) {
- return DoBit(Token::BIT_OR, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoBitXor(HBitXor* instr) {
- return DoBit(Token::BIT_XOR, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
- if (instr->representation().IsDouble()) {
- return DoArithmeticD(Token::DIV, instr);
- } else if (instr->representation().IsInteger32()) {
- // The temporary operand is necessary to ensure that right is not allocated
- // into rdx.
- LOperand* temp = FixedTemp(rdx);
- LOperand* dividend = UseFixed(instr->left(), rax);
- LOperand* divisor = UseRegister(instr->right());
- LDivI* result = new LDivI(dividend, divisor, temp);
- return AssignEnvironment(DefineFixed(result, rax));
- } else {
- ASSERT(instr->representation().IsTagged());
- return DoArithmeticT(Token::DIV, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoMod(HMod* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
-
- LInstruction* result;
- if (instr->HasPowerOf2Divisor()) {
- ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero));
- LOperand* value = UseRegisterAtStart(instr->left());
- LModI* mod = new LModI(value, UseOrConstant(instr->right()), NULL);
- result = DefineSameAsFirst(mod);
- } else {
- // The temporary operand is necessary to ensure that right is not
- // allocated into edx.
- LOperand* temp = FixedTemp(rdx);
- LOperand* value = UseFixed(instr->left(), rax);
- LOperand* divisor = UseRegister(instr->right());
- LModI* mod = new LModI(value, divisor, temp);
- result = DefineFixed(mod, rdx);
- }
-
- return (instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
- instr->CheckFlag(HValue::kCanBeDivByZero))
- ? AssignEnvironment(result)
- : result;
- } else if (instr->representation().IsTagged()) {
- return DoArithmeticT(Token::MOD, instr);
- } else {
- ASSERT(instr->representation().IsDouble());
- // We call a C function for double modulo. It can't trigger a GC.
- // We need to use fixed result register for the call.
- // TODO(fschneider): Allow any register as input registers.
- LOperand* left = UseFixedDouble(instr->left(), xmm2);
- LOperand* right = UseFixedDouble(instr->right(), xmm1);
- LArithmeticD* result = new LArithmeticD(Token::MOD, left, right);
- return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoMul(HMul* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
- LOperand* right = UseOrConstant(instr->MostConstantOperand());
- LMulI* mul = new LMulI(left, right);
- return AssignEnvironment(DefineSameAsFirst(mul));
- } else if (instr->representation().IsDouble()) {
- return DoArithmeticD(Token::MUL, instr);
- } else {
- ASSERT(instr->representation().IsTagged());
- return DoArithmeticT(Token::MUL, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoSub(HSub* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseOrConstantAtStart(instr->right());
- LSubI* sub = new LSubI(left, right);
- LInstruction* result = DefineSameAsFirst(sub);
- if (instr->CheckFlag(HValue::kCanOverflow)) {
- result = AssignEnvironment(result);
- }
- return result;
- } else if (instr->representation().IsDouble()) {
- return DoArithmeticD(Token::SUB, instr);
- } else {
- ASSERT(instr->representation().IsTagged());
- return DoArithmeticT(Token::SUB, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
- LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
- LAddI* add = new LAddI(left, right);
- LInstruction* result = DefineSameAsFirst(add);
- if (instr->CheckFlag(HValue::kCanOverflow)) {
- result = AssignEnvironment(result);
- }
- return result;
- } else if (instr->representation().IsDouble()) {
- return DoArithmeticD(Token::ADD, instr);
- } else {
- ASSERT(instr->representation().IsTagged());
- return DoArithmeticT(Token::ADD, instr);
- }
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoPower(HPower* instr) {
- ASSERT(instr->representation().IsDouble());
- // We call a C function for double power. It can't trigger a GC.
- // We need to use fixed result register for the call.
- Representation exponent_type = instr->right()->representation();
- ASSERT(instr->left()->representation().IsDouble());
- LOperand* left = UseFixedDouble(instr->left(), xmm2);
- LOperand* right = exponent_type.IsDouble() ?
- UseFixedDouble(instr->right(), xmm1) :
-#ifdef _WIN64
- UseFixed(instr->right(), rdx);
-#else
- UseFixed(instr->right(), rdi);
-#endif
- LPower* result = new LPower(left, right);
- return MarkAsCall(DefineFixedDouble(result, xmm1), instr,
- CAN_DEOPTIMIZE_EAGERLY);
-}
-
-
-LInstruction* LChunkBuilder::DoCompare(HCompare* instr) {
- Token::Value op = instr->token();
- Representation r = instr->GetInputRepresentation();
- if (r.IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseOrConstantAtStart(instr->right());
- return DefineAsRegister(new LCmpID(left, right));
- } else if (r.IsDouble()) {
- ASSERT(instr->left()->representation().IsDouble());
- ASSERT(instr->right()->representation().IsDouble());
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseRegisterAtStart(instr->right());
- return DefineAsRegister(new LCmpID(left, right));
- } else {
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
- bool reversed = (op == Token::GT || op == Token::LTE);
- LOperand* left = UseFixed(instr->left(), reversed ? rax : rdx);
- LOperand* right = UseFixed(instr->right(), reversed ? rdx : rax);
- LCmpT* result = new LCmpT(left, right);
- return MarkAsCall(DefineFixed(result, rax), instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoCompareJSObjectEq(
- HCompareJSObjectEq* instr) {
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseRegisterAtStart(instr->right());
- LCmpJSObjectEq* result = new LCmpJSObjectEq(left, right);
- return DefineAsRegister(result);
-}
-
-
-LInstruction* LChunkBuilder::DoIsNull(HIsNull* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
-
- return DefineAsRegister(new LIsNull(value));
-}
-
-
-LInstruction* LChunkBuilder::DoIsObject(HIsObject* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegister(instr->value());
-
- return DefineAsRegister(new LIsObject(value));
-}
-
-
-LInstruction* LChunkBuilder::DoIsSmi(HIsSmi* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseAtStart(instr->value());
-
- return DefineAsRegister(new LIsSmi(value));
-}
-
-
-LInstruction* LChunkBuilder::DoHasInstanceType(HHasInstanceType* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
-
- return DefineAsRegister(new LHasInstanceType(value));
-}
-
-
-LInstruction* LChunkBuilder::DoGetCachedArrayIndex(
- HGetCachedArrayIndex* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
-
- return DefineAsRegister(new LGetCachedArrayIndex(value));
-}
-
-
-LInstruction* LChunkBuilder::DoHasCachedArrayIndex(
- HHasCachedArrayIndex* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegister(instr->value());
- return DefineAsRegister(new LHasCachedArrayIndex(value));
-}
-
-
-LInstruction* LChunkBuilder::DoClassOfTest(HClassOfTest* instr) {
- Abort("Unimplemented: %s", "DoClassOfTest");
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoJSArrayLength(HJSArrayLength* instr) {
- LOperand* array = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LJSArrayLength(array));
-}
-
-
-LInstruction* LChunkBuilder::DoFixedArrayLength(HFixedArrayLength* instr) {
- LOperand* array = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LFixedArrayLength(array));
-}
-
-
-LInstruction* LChunkBuilder::DoExternalArrayLength(
- HExternalArrayLength* instr) {
- LOperand* array = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LExternalArrayLength(array));
-}
-
-
-LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) {
- LOperand* object = UseRegister(instr->value());
- LValueOf* result = new LValueOf(object);
- return AssignEnvironment(DefineSameAsFirst(result));
-}
-
-
-LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
- return AssignEnvironment(new LBoundsCheck(UseRegisterAtStart(instr->index()),
- Use(instr->length())));
-}
-
-
-LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
- // The control instruction marking the end of a block that completed
- // abruptly (e.g., threw an exception). There is nothing specific to do.
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoThrow(HThrow* instr) {
- LOperand* value = UseFixed(instr->value(), rax);
- return MarkAsCall(new LThrow(value), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoChange(HChange* instr) {
- Representation from = instr->from();
- Representation to = instr->to();
- if (from.IsTagged()) {
- if (to.IsDouble()) {
- LOperand* value = UseRegister(instr->value());
- LNumberUntagD* res = new LNumberUntagD(value);
- return AssignEnvironment(DefineAsRegister(res));
- } else {
- ASSERT(to.IsInteger32());
- LOperand* value = UseRegister(instr->value());
- bool needs_check = !instr->value()->type().IsSmi();
- if (needs_check) {
- LOperand* xmm_temp =
- (instr->CanTruncateToInt32() && CpuFeatures::IsSupported(SSE3))
- ? NULL
- : FixedTemp(xmm1);
- LTaggedToI* res = new LTaggedToI(value, xmm_temp);
- return AssignEnvironment(DefineSameAsFirst(res));
- } else {
- return DefineSameAsFirst(new LSmiUntag(value, needs_check));
- }
- }
- } else if (from.IsDouble()) {
- if (to.IsTagged()) {
- LOperand* value = UseRegister(instr->value());
- LOperand* temp = TempRegister();
-
- // Make sure that temp and result_temp are different registers.
- LUnallocated* result_temp = TempRegister();
- LNumberTagD* result = new LNumberTagD(value, temp);
- return AssignPointerMap(Define(result, result_temp));
- } else {
- ASSERT(to.IsInteger32());
- LOperand* value = UseRegister(instr->value());
- return AssignEnvironment(DefineAsRegister(new LDoubleToI(value)));
- }
- } else if (from.IsInteger32()) {
- if (to.IsTagged()) {
- HValue* val = instr->value();
- LOperand* value = UseRegister(val);
- if (val->HasRange() && val->range()->IsInSmiRange()) {
- return DefineSameAsFirst(new LSmiTag(value));
- } else {
- LNumberTagI* result = new LNumberTagI(value);
- return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
- }
- } else {
- ASSERT(to.IsDouble());
- return DefineAsRegister(new LInteger32ToDouble(Use(instr->value())));
- }
- }
- UNREACHABLE();
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoCheckNonSmi(HCheckNonSmi* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- return AssignEnvironment(new LCheckNonSmi(value));
-}
-
-
-LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- LCheckInstanceType* result = new LCheckInstanceType(value);
- return AssignEnvironment(result);
-}
-
-
-LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
- LOperand* temp = TempRegister();
- LCheckPrototypeMaps* result = new LCheckPrototypeMaps(temp);
- return AssignEnvironment(result);
-}
-
-
-LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- return AssignEnvironment(new LCheckSmi(value));
-}
-
-
-LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- return AssignEnvironment(new LCheckFunction(value));
-}
-
-
-LInstruction* LChunkBuilder::DoCheckMap(HCheckMap* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- LCheckMap* result = new LCheckMap(value);
- return AssignEnvironment(result);
-}
-
-
-LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
- return new LReturn(UseFixed(instr->value(), rax));
-}
-
-
-LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
- Representation r = instr->representation();
- if (r.IsInteger32()) {
- return DefineAsRegister(new LConstantI);
- } else if (r.IsDouble()) {
- LOperand* temp = TempRegister();
- return DefineAsRegister(new LConstantD(temp));
- } else if (r.IsTagged()) {
- return DefineAsRegister(new LConstantT);
- } else {
- UNREACHABLE();
- return NULL;
- }
-}
-
-
-LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
- LLoadGlobalCell* result = new LLoadGlobalCell;
- return instr->check_hole_value()
- ? AssignEnvironment(DefineAsRegister(result))
- : DefineAsRegister(result);
-}
-
-
-LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
- LOperand* global_object = UseFixed(instr->global_object(), rax);
- LLoadGlobalGeneric* result = new LLoadGlobalGeneric(global_object);
- return MarkAsCall(DefineFixed(result, rax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
- LStoreGlobalCell* result =
- new LStoreGlobalCell(UseRegister(instr->value()), TempRegister());
- return instr->check_hole_value() ? AssignEnvironment(result) : result;
-}
-
-
-LInstruction* LChunkBuilder::DoStoreGlobalGeneric(HStoreGlobalGeneric* instr) {
- LOperand* global_object = UseFixed(instr->global_object(), rdx);
- LOperand* value = UseFixed(instr->value(), rax);
- LStoreGlobalGeneric* result = new LStoreGlobalGeneric(global_object, value);
- return MarkAsCall(result, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
- LOperand* context = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LLoadContextSlot(context));
-}
-
-
-LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
- LOperand* context;
- LOperand* value;
- LOperand* temp;
- if (instr->NeedsWriteBarrier()) {
- context = UseTempRegister(instr->context());
- value = UseTempRegister(instr->value());
- temp = TempRegister();
- } else {
- context = UseRegister(instr->context());
- value = UseRegister(instr->value());
- temp = NULL;
- }
- return new LStoreContextSlot(context, value, temp);
-}
-
-
-LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
- ASSERT(instr->representation().IsTagged());
- LOperand* obj = UseRegisterAtStart(instr->object());
- return DefineAsRegister(new LLoadNamedField(obj));
-}
-
-
-LInstruction* LChunkBuilder::DoLoadNamedFieldPolymorphic(
- HLoadNamedFieldPolymorphic* instr) {
- ASSERT(instr->representation().IsTagged());
- if (instr->need_generic()) {
- LOperand* obj = UseFixed(instr->object(), rax);
- LLoadNamedFieldPolymorphic* result = new LLoadNamedFieldPolymorphic(obj);
- return MarkAsCall(DefineFixed(result, rax), instr);
- } else {
- LOperand* obj = UseRegisterAtStart(instr->object());
- LLoadNamedFieldPolymorphic* result = new LLoadNamedFieldPolymorphic(obj);
- return AssignEnvironment(DefineAsRegister(result));
- }
-}
-
-
-LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
- LOperand* object = UseFixed(instr->object(), rax);
- LLoadNamedGeneric* result = new LLoadNamedGeneric(object);
- return MarkAsCall(DefineFixed(result, rax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
- HLoadFunctionPrototype* instr) {
- return AssignEnvironment(DefineAsRegister(
- new LLoadFunctionPrototype(UseRegister(instr->function()))));
-}
-
-
-LInstruction* LChunkBuilder::DoLoadElements(HLoadElements* instr) {
- LOperand* input = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LLoadElements(input));
-}
-
-
-LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
- HLoadExternalArrayPointer* instr) {
- LOperand* input = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LLoadExternalArrayPointer(input));
-}
-
-
-LInstruction* LChunkBuilder::DoLoadKeyedFastElement(
- HLoadKeyedFastElement* instr) {
- ASSERT(instr->representation().IsTagged());
- ASSERT(instr->key()->representation().IsInteger32());
- LOperand* obj = UseRegisterAtStart(instr->object());
- LOperand* key = UseRegisterAtStart(instr->key());
- LLoadKeyedFastElement* result = new LLoadKeyedFastElement(obj, key);
- return AssignEnvironment(DefineSameAsFirst(result));
-}
-
-
-LInstruction* LChunkBuilder::DoLoadKeyedSpecializedArrayElement(
- HLoadKeyedSpecializedArrayElement* instr) {
- ExternalArrayType array_type = instr->array_type();
- Representation representation(instr->representation());
- ASSERT((representation.IsInteger32() && array_type != kExternalFloatArray) ||
- (representation.IsDouble() && array_type == kExternalFloatArray));
- ASSERT(instr->key()->representation().IsInteger32());
- LOperand* external_pointer = UseRegister(instr->external_pointer());
- LOperand* key = UseRegister(instr->key());
- LLoadKeyedSpecializedArrayElement* result =
- new LLoadKeyedSpecializedArrayElement(external_pointer, key);
- LInstruction* load_instr = DefineAsRegister(result);
- // An unsigned int array load might overflow and cause a deopt, make sure it
- // has an environment.
- return (array_type == kExternalUnsignedIntArray) ?
- AssignEnvironment(load_instr) : load_instr;
-}
-
-
-LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
- LOperand* object = UseFixed(instr->object(), rdx);
- LOperand* key = UseFixed(instr->key(), rax);
-
- LLoadKeyedGeneric* result = new LLoadKeyedGeneric(object, key);
- return MarkAsCall(DefineFixed(result, rax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoStoreKeyedFastElement(
- HStoreKeyedFastElement* instr) {
- bool needs_write_barrier = instr->NeedsWriteBarrier();
- ASSERT(instr->value()->representation().IsTagged());
- ASSERT(instr->object()->representation().IsTagged());
- ASSERT(instr->key()->representation().IsInteger32());
-
- LOperand* obj = UseTempRegister(instr->object());
- LOperand* val = needs_write_barrier
- ? UseTempRegister(instr->value())
- : UseRegisterAtStart(instr->value());
- LOperand* key = needs_write_barrier
- ? UseTempRegister(instr->key())
- : UseRegisterOrConstantAtStart(instr->key());
-
- return AssignEnvironment(new LStoreKeyedFastElement(obj, key, val));
-}
-
-
-LInstruction* LChunkBuilder::DoStoreKeyedSpecializedArrayElement(
- HStoreKeyedSpecializedArrayElement* instr) {
- Representation representation(instr->value()->representation());
- ExternalArrayType array_type = instr->array_type();
- ASSERT((representation.IsInteger32() && array_type != kExternalFloatArray) ||
- (representation.IsDouble() && array_type == kExternalFloatArray));
- ASSERT(instr->external_pointer()->representation().IsExternal());
- ASSERT(instr->key()->representation().IsInteger32());
-
- LOperand* external_pointer = UseRegister(instr->external_pointer());
- bool val_is_temp_register = array_type == kExternalPixelArray ||
- array_type == kExternalFloatArray;
- LOperand* val = val_is_temp_register
- ? UseTempRegister(instr->value())
- : UseRegister(instr->value());
- LOperand* key = UseRegister(instr->key());
-
- return new LStoreKeyedSpecializedArrayElement(external_pointer,
- key,
- val);
-}
-
-
-LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
- LOperand* object = UseFixed(instr->object(), rdx);
- LOperand* key = UseFixed(instr->key(), rcx);
- LOperand* value = UseFixed(instr->value(), rax);
-
- ASSERT(instr->object()->representation().IsTagged());
- ASSERT(instr->key()->representation().IsTagged());
- ASSERT(instr->value()->representation().IsTagged());
-
- LStoreKeyedGeneric* result = new LStoreKeyedGeneric(object, key, value);
- return MarkAsCall(result, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
- bool needs_write_barrier = instr->NeedsWriteBarrier();
-
- LOperand* obj = needs_write_barrier
- ? UseTempRegister(instr->object())
- : UseRegisterAtStart(instr->object());
-
- LOperand* val = needs_write_barrier
- ? UseTempRegister(instr->value())
- : UseRegister(instr->value());
-
- // We only need a scratch register if we have a write barrier or we
- // have a store into the properties array (not in-object-property).
- LOperand* temp = (!instr->is_in_object() || needs_write_barrier)
- ? TempRegister() : NULL;
-
- return new LStoreNamedField(obj, val, temp);
-}
-
-
-LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
- LOperand* object = UseFixed(instr->object(), rdx);
- LOperand* value = UseFixed(instr->value(), rax);
-
- LStoreNamedGeneric* result = new LStoreNamedGeneric(object, value);
- return MarkAsCall(result, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
- LOperand* string = UseRegister(instr->string());
- LOperand* index = UseRegisterOrConstant(instr->index());
- LStringCharCodeAt* result = new LStringCharCodeAt(string, index);
- return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
-}
-
-
-LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
- LOperand* char_code = UseRegister(instr->value());
- LStringCharFromCode* result = new LStringCharFromCode(char_code);
- return AssignPointerMap(DefineAsRegister(result));
-}
-
-
-LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) {
- LOperand* string = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LStringLength(string));
-}
-
-
-LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) {
- return MarkAsCall(DefineFixed(new LArrayLiteral, rax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoObjectLiteral(HObjectLiteral* instr) {
- return MarkAsCall(DefineFixed(new LObjectLiteral, rax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
- return MarkAsCall(DefineFixed(new LRegExpLiteral, rax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
- return MarkAsCall(DefineFixed(new LFunctionLiteral, rax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoDeleteProperty(HDeleteProperty* instr) {
- LDeleteProperty* result =
- new LDeleteProperty(Use(instr->object()), UseOrConstant(instr->key()));
- return MarkAsCall(DefineFixed(result, rax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
- allocator_->MarkAsOsrEntry();
- current_block_->last_environment()->set_ast_id(instr->ast_id());
- return AssignEnvironment(new LOsrEntry);
-}
-
-
-LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
- int spill_index = chunk()->GetParameterStackSlot(instr->index());
- return DefineAsSpilled(new LParameter, spill_index);
-}
-
-
-LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
- int spill_index = chunk()->GetNextSpillIndex(false); // Not double-width.
- return DefineAsSpilled(new LUnknownOSRValue, spill_index);
-}
-
-
-LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
- argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new LCallStub, rax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
- // There are no real uses of the arguments object.
- // arguments.length and element access are supported directly on
- // stack arguments, and any real arguments object use causes a bailout.
- // So this value is never used.
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
- LOperand* arguments = UseRegister(instr->arguments());
- LOperand* length = UseTempRegister(instr->length());
- LOperand* index = Use(instr->index());
- LAccessArgumentsAt* result = new LAccessArgumentsAt(arguments, length, index);
- return AssignEnvironment(DefineAsRegister(result));
-}
-
-
-LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) {
- LOperand* object = UseFixed(instr->value(), rax);
- LToFastProperties* result = new LToFastProperties(object);
- return MarkAsCall(DefineFixed(result, rax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
- LTypeof* result = new LTypeof(UseAtStart(instr->value()));
- return MarkAsCall(DefineFixed(result, rax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoTypeofIs(HTypeofIs* instr) {
- return DefineSameAsFirst(new LTypeofIs(UseRegister(instr->value())));
-}
-
-
-LInstruction* LChunkBuilder::DoIsConstructCall(HIsConstructCall* instr) {
- return DefineAsRegister(new LIsConstructCall);
-}
-
-
-LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
- HEnvironment* env = current_block_->last_environment();
- ASSERT(env != NULL);
-
- env->set_ast_id(instr->ast_id());
-
- env->Drop(instr->pop_count());
- for (int i = 0; i < instr->values()->length(); ++i) {
- HValue* value = instr->values()->at(i);
- if (instr->HasAssignedIndexAt(i)) {
- env->Bind(instr->GetAssignedIndexAt(i), value);
- } else {
- env->Push(value);
- }
- }
-
- // If there is an instruction pending deoptimization environment create a
- // lazy bailout instruction to capture the environment.
- if (pending_deoptimization_ast_id_ == instr->ast_id()) {
- LLazyBailout* lazy_bailout = new LLazyBailout;
- LInstruction* result = AssignEnvironment(lazy_bailout);
- instruction_pending_deoptimization_environment_->
- set_deoptimization_environment(result->environment());
- ClearInstructionPendingDeoptimizationEnvironment();
- return result;
- }
-
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
- return MarkAsCall(new LStackCheck, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
- HEnvironment* outer = current_block_->last_environment();
- HConstant* undefined = graph()->GetConstantUndefined();
- HEnvironment* inner = outer->CopyForInlining(instr->closure(),
- instr->function(),
- false,
- undefined);
- current_block_->UpdateEnvironment(inner);
- chunk_->AddInlinedClosure(instr->closure());
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
- HEnvironment* outer = current_block_->last_environment()->outer();
- current_block_->UpdateEnvironment(outer);
- return NULL;
-}
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_X64
diff --git a/src/3rdparty/v8/src/x64/lithium-x64.h b/src/3rdparty/v8/src/x64/lithium-x64.h
deleted file mode 100644
index 512abbb..0000000
--- a/src/3rdparty/v8/src/x64/lithium-x64.h
+++ /dev/null
@@ -1,2161 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_X64_LITHIUM_X64_H_
-#define V8_X64_LITHIUM_X64_H_
-
-#include "hydrogen.h"
-#include "lithium-allocator.h"
-#include "lithium.h"
-#include "safepoint-table.h"
-
-namespace v8 {
-namespace internal {
-
-// Forward declarations.
-class LCodeGen;
-
-#define LITHIUM_ALL_INSTRUCTION_LIST(V) \
- V(ControlInstruction) \
- V(Call) \
- LITHIUM_CONCRETE_INSTRUCTION_LIST(V)
-
-
-#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \
- V(AccessArgumentsAt) \
- V(AddI) \
- V(ApplyArguments) \
- V(ArgumentsElements) \
- V(ArgumentsLength) \
- V(ArithmeticD) \
- V(ArithmeticT) \
- V(ArrayLiteral) \
- V(BitI) \
- V(BitNotI) \
- V(BoundsCheck) \
- V(Branch) \
- V(CallConstantFunction) \
- V(CallFunction) \
- V(CallGlobal) \
- V(CallKeyed) \
- V(CallKnownGlobal) \
- V(CallNamed) \
- V(CallNew) \
- V(CallRuntime) \
- V(CallStub) \
- V(CheckFunction) \
- V(CheckInstanceType) \
- V(CheckMap) \
- V(CheckNonSmi) \
- V(CheckPrototypeMaps) \
- V(CheckSmi) \
- V(ClassOfTest) \
- V(ClassOfTestAndBranch) \
- V(CmpID) \
- V(CmpIDAndBranch) \
- V(CmpJSObjectEq) \
- V(CmpJSObjectEqAndBranch) \
- V(CmpMapAndBranch) \
- V(CmpT) \
- V(CmpTAndBranch) \
- V(ConstantD) \
- V(ConstantI) \
- V(ConstantT) \
- V(Context) \
- V(DeleteProperty) \
- V(Deoptimize) \
- V(DivI) \
- V(DoubleToI) \
- V(ExternalArrayLength) \
- V(FixedArrayLength) \
- V(FunctionLiteral) \
- V(Gap) \
- V(GetCachedArrayIndex) \
- V(GlobalObject) \
- V(GlobalReceiver) \
- V(Goto) \
- V(HasInstanceType) \
- V(HasInstanceTypeAndBranch) \
- V(HasCachedArrayIndex) \
- V(HasCachedArrayIndexAndBranch) \
- V(InstanceOf) \
- V(InstanceOfAndBranch) \
- V(InstanceOfKnownGlobal) \
- V(Integer32ToDouble) \
- V(IsNull) \
- V(IsNullAndBranch) \
- V(IsObject) \
- V(IsObjectAndBranch) \
- V(IsSmi) \
- V(IsSmiAndBranch) \
- V(JSArrayLength) \
- V(Label) \
- V(LazyBailout) \
- V(LoadContextSlot) \
- V(LoadElements) \
- V(LoadExternalArrayPointer) \
- V(LoadGlobalCell) \
- V(LoadGlobalGeneric) \
- V(LoadKeyedFastElement) \
- V(LoadKeyedGeneric) \
- V(LoadKeyedSpecializedArrayElement) \
- V(LoadNamedField) \
- V(LoadNamedFieldPolymorphic) \
- V(LoadNamedGeneric) \
- V(LoadFunctionPrototype) \
- V(ModI) \
- V(MulI) \
- V(NumberTagD) \
- V(NumberTagI) \
- V(NumberUntagD) \
- V(ObjectLiteral) \
- V(OsrEntry) \
- V(OuterContext) \
- V(Parameter) \
- V(Power) \
- V(PushArgument) \
- V(RegExpLiteral) \
- V(Return) \
- V(ShiftI) \
- V(SmiTag) \
- V(SmiUntag) \
- V(StackCheck) \
- V(StoreContextSlot) \
- V(StoreGlobalCell) \
- V(StoreGlobalGeneric) \
- V(StoreKeyedFastElement) \
- V(StoreKeyedGeneric) \
- V(StoreKeyedSpecializedArrayElement) \
- V(StoreNamedField) \
- V(StoreNamedGeneric) \
- V(StringCharCodeAt) \
- V(StringCharFromCode) \
- V(StringLength) \
- V(SubI) \
- V(TaggedToI) \
- V(ToFastProperties) \
- V(Throw) \
- V(Typeof) \
- V(TypeofIs) \
- V(TypeofIsAndBranch) \
- V(IsConstructCall) \
- V(IsConstructCallAndBranch) \
- V(UnaryMathOperation) \
- V(UnknownOSRValue) \
- V(ValueOf)
-
-
-#define DECLARE_INSTRUCTION(type) \
- virtual bool Is##type() const { return true; } \
- static L##type* cast(LInstruction* instr) { \
- ASSERT(instr->Is##type()); \
- return reinterpret_cast<L##type*>(instr); \
- }
-
-
-#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
- virtual void CompileToNative(LCodeGen* generator); \
- virtual const char* Mnemonic() const { return mnemonic; } \
- DECLARE_INSTRUCTION(type)
-
-
-#define DECLARE_HYDROGEN_ACCESSOR(type) \
- H##type* hydrogen() const { \
- return H##type::cast(hydrogen_value()); \
- }
-
-
-class LInstruction: public ZoneObject {
- public:
- LInstruction()
- : environment_(NULL),
- hydrogen_value_(NULL),
- is_call_(false),
- is_save_doubles_(false) { }
-
- virtual ~LInstruction() { }
-
- virtual void CompileToNative(LCodeGen* generator) = 0;
- virtual const char* Mnemonic() const = 0;
- virtual void PrintTo(StringStream* stream);
- virtual void PrintDataTo(StringStream* stream) = 0;
- virtual void PrintOutputOperandTo(StringStream* stream) = 0;
-
- // Declare virtual type testers.
-#define DECLARE_DO(type) virtual bool Is##type() const { return false; }
- LITHIUM_ALL_INSTRUCTION_LIST(DECLARE_DO)
-#undef DECLARE_DO
-
- virtual bool IsControl() const { return false; }
- virtual void SetBranchTargets(int true_block_id, int false_block_id) { }
-
- void set_environment(LEnvironment* env) { environment_ = env; }
- LEnvironment* environment() const { return environment_; }
- bool HasEnvironment() const { return environment_ != NULL; }
-
- void set_pointer_map(LPointerMap* p) { pointer_map_.set(p); }
- LPointerMap* pointer_map() const { return pointer_map_.get(); }
- bool HasPointerMap() const { return pointer_map_.is_set(); }
-
- void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
- HValue* hydrogen_value() const { return hydrogen_value_; }
-
- void set_deoptimization_environment(LEnvironment* env) {
- deoptimization_environment_.set(env);
- }
- LEnvironment* deoptimization_environment() const {
- return deoptimization_environment_.get();
- }
- bool HasDeoptimizationEnvironment() const {
- return deoptimization_environment_.is_set();
- }
-
- void MarkAsCall() { is_call_ = true; }
- void MarkAsSaveDoubles() { is_save_doubles_ = true; }
-
- // Interface to the register allocator and iterators.
- bool IsMarkedAsCall() const { return is_call_; }
- bool IsMarkedAsSaveDoubles() const { return is_save_doubles_; }
-
- virtual bool HasResult() const = 0;
- virtual LOperand* result() = 0;
-
- virtual int InputCount() = 0;
- virtual LOperand* InputAt(int i) = 0;
- virtual int TempCount() = 0;
- virtual LOperand* TempAt(int i) = 0;
-
- LOperand* FirstInput() { return InputAt(0); }
- LOperand* Output() { return HasResult() ? result() : NULL; }
-
-#ifdef DEBUG
- void VerifyCall();
-#endif
-
- private:
- LEnvironment* environment_;
- SetOncePointer<LPointerMap> pointer_map_;
- HValue* hydrogen_value_;
- SetOncePointer<LEnvironment> deoptimization_environment_;
- bool is_call_;
- bool is_save_doubles_;
-};
-
-
-template<typename ElementType, int NumElements>
-class OperandContainer {
- public:
- OperandContainer() {
- for (int i = 0; i < NumElements; i++) elems_[i] = NULL;
- }
- int length() { return NumElements; }
- ElementType& operator[](int i) {
- ASSERT(i < length());
- return elems_[i];
- }
- void PrintOperandsTo(StringStream* stream);
-
- private:
- ElementType elems_[NumElements];
-};
-
-
-template<typename ElementType>
-class OperandContainer<ElementType, 0> {
- public:
- int length() { return 0; }
- void PrintOperandsTo(StringStream* stream) { }
- ElementType& operator[](int i) {
- UNREACHABLE();
- static ElementType t = 0;
- return t;
- }
-};
-
-
-// R = number of result operands (0 or 1).
-// I = number of input operands.
-// T = number of temporary operands.
-template<int R, int I, int T>
-class LTemplateInstruction: public LInstruction {
- public:
- // Allow 0 or 1 output operands.
- STATIC_ASSERT(R == 0 || R == 1);
- virtual bool HasResult() const { return R != 0; }
- void set_result(LOperand* operand) { results_[0] = operand; }
- LOperand* result() { return results_[0]; }
-
- int InputCount() { return I; }
- LOperand* InputAt(int i) { return inputs_[i]; }
-
- int TempCount() { return T; }
- LOperand* TempAt(int i) { return temps_[i]; }
-
- virtual void PrintDataTo(StringStream* stream);
- virtual void PrintOutputOperandTo(StringStream* stream);
-
- protected:
- OperandContainer<LOperand*, R> results_;
- OperandContainer<LOperand*, I> inputs_;
- OperandContainer<LOperand*, T> temps_;
-};
-
-
-class LGap: public LTemplateInstruction<0, 0, 0> {
- public:
- explicit LGap(HBasicBlock* block)
- : block_(block) {
- parallel_moves_[BEFORE] = NULL;
- parallel_moves_[START] = NULL;
- parallel_moves_[END] = NULL;
- parallel_moves_[AFTER] = NULL;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(Gap, "gap")
- virtual void PrintDataTo(StringStream* stream);
-
- bool IsRedundant() const;
-
- HBasicBlock* block() const { return block_; }
-
- enum InnerPosition {
- BEFORE,
- START,
- END,
- AFTER,
- FIRST_INNER_POSITION = BEFORE,
- LAST_INNER_POSITION = AFTER
- };
-
- LParallelMove* GetOrCreateParallelMove(InnerPosition pos) {
- if (parallel_moves_[pos] == NULL) parallel_moves_[pos] = new LParallelMove;
- return parallel_moves_[pos];
- }
-
- LParallelMove* GetParallelMove(InnerPosition pos) {
- return parallel_moves_[pos];
- }
-
- private:
- LParallelMove* parallel_moves_[LAST_INNER_POSITION + 1];
- HBasicBlock* block_;
-};
-
-
-class LGoto: public LTemplateInstruction<0, 0, 0> {
- public:
- LGoto(int block_id, bool include_stack_check = false)
- : block_id_(block_id), include_stack_check_(include_stack_check) { }
-
- DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
- virtual void PrintDataTo(StringStream* stream);
- virtual bool IsControl() const { return true; }
-
- int block_id() const { return block_id_; }
- bool include_stack_check() const { return include_stack_check_; }
-
- private:
- int block_id_;
- bool include_stack_check_;
-};
-
-
-class LLazyBailout: public LTemplateInstruction<0, 0, 0> {
- public:
- LLazyBailout() : gap_instructions_size_(0) { }
-
- DECLARE_CONCRETE_INSTRUCTION(LazyBailout, "lazy-bailout")
-
- void set_gap_instructions_size(int gap_instructions_size) {
- gap_instructions_size_ = gap_instructions_size;
- }
- int gap_instructions_size() { return gap_instructions_size_; }
-
- private:
- int gap_instructions_size_;
-};
-
-
-class LDeoptimize: public LTemplateInstruction<0, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
-};
-
-
-class LLabel: public LGap {
- public:
- explicit LLabel(HBasicBlock* block)
- : LGap(block), replacement_(NULL) { }
-
- DECLARE_CONCRETE_INSTRUCTION(Label, "label")
-
- virtual void PrintDataTo(StringStream* stream);
-
- int block_id() const { return block()->block_id(); }
- bool is_loop_header() const { return block()->IsLoopHeader(); }
- Label* label() { return &label_; }
- LLabel* replacement() const { return replacement_; }
- void set_replacement(LLabel* label) { replacement_ = label; }
- bool HasReplacement() const { return replacement_ != NULL; }
-
- private:
- Label label_;
- LLabel* replacement_;
-};
-
-
-class LParameter: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
-};
-
-
-class LCallStub: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
- DECLARE_HYDROGEN_ACCESSOR(CallStub)
-
- TranscendentalCache::Type transcendental_type() {
- return hydrogen()->transcendental_type();
- }
-};
-
-
-class LUnknownOSRValue: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
-};
-
-
-template<int I, int T>
-class LControlInstruction: public LTemplateInstruction<0, I, T> {
- public:
- DECLARE_INSTRUCTION(ControlInstruction)
- virtual bool IsControl() const { return true; }
-
- int true_block_id() const { return true_block_id_; }
- int false_block_id() const { return false_block_id_; }
- void SetBranchTargets(int true_block_id, int false_block_id) {
- true_block_id_ = true_block_id;
- false_block_id_ = false_block_id;
- }
-
- private:
- int true_block_id_;
- int false_block_id_;
-};
-
-
-class LApplyArguments: public LTemplateInstruction<1, 4, 0> {
- public:
- LApplyArguments(LOperand* function,
- LOperand* receiver,
- LOperand* length,
- LOperand* elements) {
- inputs_[0] = function;
- inputs_[1] = receiver;
- inputs_[2] = length;
- inputs_[3] = elements;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
-
- LOperand* function() { return inputs_[0]; }
- LOperand* receiver() { return inputs_[1]; }
- LOperand* length() { return inputs_[2]; }
- LOperand* elements() { return inputs_[3]; }
-};
-
-
-class LAccessArgumentsAt: public LTemplateInstruction<1, 3, 0> {
- public:
- LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index) {
- inputs_[0] = arguments;
- inputs_[1] = length;
- inputs_[2] = index;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at")
-
- LOperand* arguments() { return inputs_[0]; }
- LOperand* length() { return inputs_[1]; }
- LOperand* index() { return inputs_[2]; }
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LArgumentsLength: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LArgumentsLength(LOperand* elements) {
- inputs_[0] = elements;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments-length")
-};
-
-
-class LArgumentsElements: public LTemplateInstruction<1, 0, 0> {
- public:
- LArgumentsElements() { }
-
- DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
-};
-
-
-class LModI: public LTemplateInstruction<1, 2, 1> {
- public:
- LModI(LOperand* left, LOperand* right, LOperand* temp) {
- inputs_[0] = left;
- inputs_[1] = right;
- temps_[0] = temp;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(ModI, "mod-i")
- DECLARE_HYDROGEN_ACCESSOR(Mod)
-};
-
-
-class LDivI: public LTemplateInstruction<1, 2, 1> {
- public:
- LDivI(LOperand* left, LOperand* right, LOperand* temp) {
- inputs_[0] = left;
- inputs_[1] = right;
- temps_[0] = temp;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
- DECLARE_HYDROGEN_ACCESSOR(Div)
-};
-
-
-class LMulI: public LTemplateInstruction<1, 2, 0> {
- public:
- LMulI(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-i")
- DECLARE_HYDROGEN_ACCESSOR(Mul)
-};
-
-
-class LCmpID: public LTemplateInstruction<1, 2, 0> {
- public:
- LCmpID(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpID, "cmp-id")
- DECLARE_HYDROGEN_ACCESSOR(Compare)
-
- Token::Value op() const { return hydrogen()->token(); }
- bool is_double() const {
- return hydrogen()->GetInputRepresentation().IsDouble();
- }
-};
-
-
-class LCmpIDAndBranch: public LControlInstruction<2, 0> {
- public:
- LCmpIDAndBranch(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpIDAndBranch, "cmp-id-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(Compare)
-
- Token::Value op() const { return hydrogen()->token(); }
- bool is_double() const {
- return hydrogen()->GetInputRepresentation().IsDouble();
- }
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LUnaryMathOperation: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LUnaryMathOperation(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(UnaryMathOperation, "unary-math-operation")
- DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
-
- virtual void PrintDataTo(StringStream* stream);
- BuiltinFunctionId op() const { return hydrogen()->op(); }
-};
-
-
-class LCmpJSObjectEq: public LTemplateInstruction<1, 2, 0> {
- public:
- LCmpJSObjectEq(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpJSObjectEq, "cmp-jsobject-eq")
-};
-
-
-class LCmpJSObjectEqAndBranch: public LControlInstruction<2, 0> {
- public:
- LCmpJSObjectEqAndBranch(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpJSObjectEqAndBranch,
- "cmp-jsobject-eq-and-branch")
-};
-
-
-class LIsNull: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LIsNull(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(IsNull, "is-null")
- DECLARE_HYDROGEN_ACCESSOR(IsNull)
-
- bool is_strict() const { return hydrogen()->is_strict(); }
-};
-
-
-class LIsNullAndBranch: public LControlInstruction<1, 1> {
- public:
- LIsNullAndBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(IsNullAndBranch, "is-null-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsNull)
-
- bool is_strict() const { return hydrogen()->is_strict(); }
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LIsObject: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LIsObject(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(IsObject, "is-object")
-};
-
-
-class LIsObjectAndBranch: public LControlInstruction<1, 0> {
- public:
- explicit LIsObjectAndBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LIsSmi: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LIsSmi(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(IsSmi, "is-smi")
- DECLARE_HYDROGEN_ACCESSOR(IsSmi)
-};
-
-
-class LIsSmiAndBranch: public LControlInstruction<1, 0> {
- public:
- explicit LIsSmiAndBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LHasInstanceType: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LHasInstanceType(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(HasInstanceType, "has-instance-type")
- DECLARE_HYDROGEN_ACCESSOR(HasInstanceType)
-};
-
-
-class LHasInstanceTypeAndBranch: public LControlInstruction<1, 0> {
- public:
- explicit LHasInstanceTypeAndBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch,
- "has-instance-type-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(HasInstanceType)
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LGetCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LGetCachedArrayIndex(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get-cached-array-index")
- DECLARE_HYDROGEN_ACCESSOR(GetCachedArrayIndex)
-};
-
-
-class LHasCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LHasCachedArrayIndex(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndex, "has-cached-array-index")
- DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndex)
-};
-
-
-class LHasCachedArrayIndexAndBranch: public LControlInstruction<1, 0> {
- public:
- explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
- "has-cached-array-index-and-branch")
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LClassOfTest: public LTemplateInstruction<1, 1, 1> {
- public:
- LClassOfTest(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(ClassOfTest, "class-of-test")
- DECLARE_HYDROGEN_ACCESSOR(ClassOfTest)
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LClassOfTestAndBranch: public LControlInstruction<1, 1> {
- public:
- LClassOfTestAndBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch,
- "class-of-test-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(ClassOfTest)
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LCmpT: public LTemplateInstruction<1, 2, 0> {
- public:
- LCmpT(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
- DECLARE_HYDROGEN_ACCESSOR(Compare)
-
- Token::Value op() const { return hydrogen()->token(); }
-};
-
-
-class LCmpTAndBranch: public LControlInstruction<2, 0> {
- public:
- LCmpTAndBranch(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpTAndBranch, "cmp-t-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(Compare)
-
- Token::Value op() const { return hydrogen()->token(); }
-};
-
-
-class LInstanceOf: public LTemplateInstruction<1, 2, 0> {
- public:
- LInstanceOf(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
-};
-
-
-class LInstanceOfAndBranch: public LControlInstruction<2, 0> {
- public:
- LInstanceOfAndBranch(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(InstanceOfAndBranch, "instance-of-and-branch")
-};
-
-
-class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 1> {
- public:
- LInstanceOfKnownGlobal(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(InstanceOfKnownGlobal,
- "instance-of-known-global")
- DECLARE_HYDROGEN_ACCESSOR(InstanceOfKnownGlobal)
-
- Handle<JSFunction> function() const { return hydrogen()->function(); }
-};
-
-
-class LBoundsCheck: public LTemplateInstruction<0, 2, 0> {
- public:
- LBoundsCheck(LOperand* index, LOperand* length) {
- inputs_[0] = index;
- inputs_[1] = length;
- }
-
- LOperand* index() { return inputs_[0]; }
- LOperand* length() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds-check")
-};
-
-
-class LBitI: public LTemplateInstruction<1, 2, 0> {
- public:
- LBitI(Token::Value op, LOperand* left, LOperand* right)
- : op_(op) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- Token::Value op() const { return op_; }
-
- DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i")
-
- private:
- Token::Value op_;
-};
-
-
-class LShiftI: public LTemplateInstruction<1, 2, 0> {
- public:
- LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
- : op_(op), can_deopt_(can_deopt) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- Token::Value op() const { return op_; }
-
- bool can_deopt() const { return can_deopt_; }
-
- DECLARE_CONCRETE_INSTRUCTION(ShiftI, "shift-i")
-
- private:
- Token::Value op_;
- bool can_deopt_;
-};
-
-
-class LSubI: public LTemplateInstruction<1, 2, 0> {
- public:
- LSubI(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(SubI, "sub-i")
- DECLARE_HYDROGEN_ACCESSOR(Sub)
-};
-
-
-class LConstantI: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
- DECLARE_HYDROGEN_ACCESSOR(Constant)
-
- int32_t value() const { return hydrogen()->Integer32Value(); }
-};
-
-
-class LConstantD: public LTemplateInstruction<1, 0, 1> {
- public:
- explicit LConstantD(LOperand* temp) {
- temps_[0] = temp;
- }
- DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
- DECLARE_HYDROGEN_ACCESSOR(Constant)
-
- double value() const { return hydrogen()->DoubleValue(); }
-};
-
-
-class LConstantT: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t")
- DECLARE_HYDROGEN_ACCESSOR(Constant)
-
- Handle<Object> value() const { return hydrogen()->handle(); }
-};
-
-
-class LBranch: public LControlInstruction<1, 0> {
- public:
- explicit LBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
- DECLARE_HYDROGEN_ACCESSOR(Value)
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LCmpMapAndBranch: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LCmpMapAndBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareMap)
-
- virtual bool IsControl() const { return true; }
-
- Handle<Map> map() const { return hydrogen()->map(); }
- int true_block_id() const {
- return hydrogen()->FirstSuccessor()->block_id();
- }
- int false_block_id() const {
- return hydrogen()->SecondSuccessor()->block_id();
- }
-};
-
-
-class LJSArrayLength: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LJSArrayLength(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(JSArrayLength, "js-array-length")
- DECLARE_HYDROGEN_ACCESSOR(JSArrayLength)
-};
-
-
-class LExternalArrayLength: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LExternalArrayLength(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(ExternalArrayLength, "external-array-length")
- DECLARE_HYDROGEN_ACCESSOR(ExternalArrayLength)
-};
-
-
-class LFixedArrayLength: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LFixedArrayLength(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(FixedArrayLength, "fixed-array-length")
- DECLARE_HYDROGEN_ACCESSOR(FixedArrayLength)
-};
-
-
-class LValueOf: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LValueOf(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(ValueOf, "value-of")
- DECLARE_HYDROGEN_ACCESSOR(ValueOf)
-};
-
-
-class LThrow: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LThrow(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(Throw, "throw")
-};
-
-
-class LBitNotI: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LBitNotI(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(BitNotI, "bit-not-i")
-};
-
-
-class LAddI: public LTemplateInstruction<1, 2, 0> {
- public:
- LAddI(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i")
- DECLARE_HYDROGEN_ACCESSOR(Add)
-};
-
-
-class LPower: public LTemplateInstruction<1, 2, 0> {
- public:
- LPower(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(Power, "power")
- DECLARE_HYDROGEN_ACCESSOR(Power)
-};
-
-
-class LArithmeticD: public LTemplateInstruction<1, 2, 0> {
- public:
- LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
- : op_(op) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- Token::Value op() const { return op_; }
-
- virtual void CompileToNative(LCodeGen* generator);
- virtual const char* Mnemonic() const;
-
- private:
- Token::Value op_;
-};
-
-
-class LArithmeticT: public LTemplateInstruction<1, 2, 0> {
- public:
- LArithmeticT(Token::Value op, LOperand* left, LOperand* right)
- : op_(op) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- virtual void CompileToNative(LCodeGen* generator);
- virtual const char* Mnemonic() const;
-
- Token::Value op() const { return op_; }
-
- private:
- Token::Value op_;
-};
-
-
-class LReturn: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LReturn(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(Return, "return")
-};
-
-
-class LLoadNamedField: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadNamedField(LOperand* object) {
- inputs_[0] = object;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field")
- DECLARE_HYDROGEN_ACCESSOR(LoadNamedField)
-};
-
-
-class LLoadNamedFieldPolymorphic: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadNamedFieldPolymorphic(LOperand* object) {
- inputs_[0] = object;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field-polymorphic")
- DECLARE_HYDROGEN_ACCESSOR(LoadNamedFieldPolymorphic)
-
- LOperand* object() { return inputs_[0]; }
-};
-
-
-class LLoadNamedGeneric: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadNamedGeneric(LOperand* object) {
- inputs_[0] = object;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
- DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric)
-
- LOperand* object() { return inputs_[0]; }
- Handle<Object> name() const { return hydrogen()->name(); }
-};
-
-
-class LLoadFunctionPrototype: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadFunctionPrototype(LOperand* function) {
- inputs_[0] = function;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype, "load-function-prototype")
- DECLARE_HYDROGEN_ACCESSOR(LoadFunctionPrototype)
-
- LOperand* function() { return inputs_[0]; }
-};
-
-
-class LLoadElements: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadElements(LOperand* object) {
- inputs_[0] = object;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadElements, "load-elements")
-};
-
-
-class LLoadExternalArrayPointer: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadExternalArrayPointer(LOperand* object) {
- inputs_[0] = object;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadExternalArrayPointer,
- "load-external-array-pointer")
-};
-
-
-class LLoadKeyedFastElement: public LTemplateInstruction<1, 2, 0> {
- public:
- LLoadKeyedFastElement(LOperand* elements, LOperand* key) {
- inputs_[0] = elements;
- inputs_[1] = key;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastElement, "load-keyed-fast-element")
- DECLARE_HYDROGEN_ACCESSOR(LoadKeyedFastElement)
-
- LOperand* elements() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
-};
-
-
-class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> {
- public:
- LLoadKeyedSpecializedArrayElement(LOperand* external_pointer,
- LOperand* key) {
- inputs_[0] = external_pointer;
- inputs_[1] = key;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyedSpecializedArrayElement,
- "load-keyed-specialized-array-element")
- DECLARE_HYDROGEN_ACCESSOR(LoadKeyedSpecializedArrayElement)
-
- LOperand* external_pointer() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- ExternalArrayType array_type() const {
- return hydrogen()->array_type();
- }
-};
-
-
-class LLoadKeyedGeneric: public LTemplateInstruction<1, 2, 0> {
- public:
- LLoadKeyedGeneric(LOperand* obj, LOperand* key) {
- inputs_[0] = obj;
- inputs_[1] = key;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
-
- LOperand* object() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
-};
-
-
-class LLoadGlobalCell: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load-global-cell")
- DECLARE_HYDROGEN_ACCESSOR(LoadGlobalCell)
-};
-
-
-class LLoadGlobalGeneric: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadGlobalGeneric(LOperand* global_object) {
- inputs_[0] = global_object;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
- DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
-
- LOperand* global_object() { return inputs_[0]; }
- Handle<Object> name() const { return hydrogen()->name(); }
- bool for_typeof() const { return hydrogen()->for_typeof(); }
-};
-
-
-class LStoreGlobalCell: public LTemplateInstruction<0, 1, 1> {
- public:
- explicit LStoreGlobalCell(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store-global-cell")
- DECLARE_HYDROGEN_ACCESSOR(StoreGlobalCell)
-};
-
-
-class LStoreGlobalGeneric: public LTemplateInstruction<0, 2, 0> {
- public:
- explicit LStoreGlobalGeneric(LOperand* global_object,
- LOperand* value) {
- inputs_[0] = global_object;
- inputs_[1] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreGlobalGeneric, "store-global-generic")
- DECLARE_HYDROGEN_ACCESSOR(StoreGlobalGeneric)
-
- LOperand* global_object() { return InputAt(0); }
- Handle<Object> name() const { return hydrogen()->name(); }
- LOperand* value() { return InputAt(1); }
-};
-
-
-class LLoadContextSlot: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadContextSlot(LOperand* context) {
- inputs_[0] = context;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot")
- DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot)
-
- LOperand* context() { return InputAt(0); }
- int slot_index() { return hydrogen()->slot_index(); }
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LStoreContextSlot: public LTemplateInstruction<0, 2, 1> {
- public:
- LStoreContextSlot(LOperand* context, LOperand* value, LOperand* temp) {
- inputs_[0] = context;
- inputs_[1] = value;
- temps_[0] = temp;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store-context-slot")
- DECLARE_HYDROGEN_ACCESSOR(StoreContextSlot)
-
- LOperand* context() { return InputAt(0); }
- LOperand* value() { return InputAt(1); }
- int slot_index() { return hydrogen()->slot_index(); }
- int needs_write_barrier() { return hydrogen()->NeedsWriteBarrier(); }
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LPushArgument: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LPushArgument(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(PushArgument, "push-argument")
-};
-
-
-class LContext: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(Context, "context")
-};
-
-
-class LOuterContext: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LOuterContext(LOperand* context) {
- inputs_[0] = context;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(OuterContext, "outer-context")
-
- LOperand* context() { return InputAt(0); }
-};
-
-
-class LGlobalObject: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global-object")
-};
-
-
-class LGlobalReceiver: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LGlobalReceiver(LOperand* global_object) {
- inputs_[0] = global_object;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver, "global-receiver")
-
- LOperand* global() { return InputAt(0); }
-};
-
-
-class LCallConstantFunction: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(CallConstantFunction, "call-constant-function")
- DECLARE_HYDROGEN_ACCESSOR(CallConstantFunction)
-
- virtual void PrintDataTo(StringStream* stream);
-
- Handle<JSFunction> function() { return hydrogen()->function(); }
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallKeyed: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCallKeyed(LOperand* key) {
- inputs_[0] = key;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CallKeyed, "call-keyed")
- DECLARE_HYDROGEN_ACCESSOR(CallKeyed)
-
- LOperand* key() { return inputs_[0]; }
-
- virtual void PrintDataTo(StringStream* stream);
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallNamed: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(CallNamed, "call-named")
- DECLARE_HYDROGEN_ACCESSOR(CallNamed)
-
- virtual void PrintDataTo(StringStream* stream);
-
- Handle<String> name() const { return hydrogen()->name(); }
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallFunction: public LTemplateInstruction<1, 0, 0> {
- public:
- LCallFunction() {}
-
- DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
- DECLARE_HYDROGEN_ACCESSOR(CallFunction)
-
- int arity() const { return hydrogen()->argument_count() - 2; }
-};
-
-
-class LCallGlobal: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(CallGlobal, "call-global")
- DECLARE_HYDROGEN_ACCESSOR(CallGlobal)
-
- virtual void PrintDataTo(StringStream* stream);
-
- Handle<String> name() const {return hydrogen()->name(); }
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallKnownGlobal: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(CallKnownGlobal, "call-known-global")
- DECLARE_HYDROGEN_ACCESSOR(CallKnownGlobal)
-
- virtual void PrintDataTo(StringStream* stream);
-
- Handle<JSFunction> target() const { return hydrogen()->target(); }
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallNew: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCallNew(LOperand* constructor) {
- inputs_[0] = constructor;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
- DECLARE_HYDROGEN_ACCESSOR(CallNew)
-
- virtual void PrintDataTo(StringStream* stream);
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallRuntime: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
- DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
-
- const Runtime::Function* function() const { return hydrogen()->function(); }
- int arity() const { return hydrogen()->argument_count(); }
-};
-
-
-class LInteger32ToDouble: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LInteger32ToDouble(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(Integer32ToDouble, "int32-to-double")
-};
-
-
-class LNumberTagI: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LNumberTagI(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(NumberTagI, "number-tag-i")
-};
-
-
-class LNumberTagD: public LTemplateInstruction<1, 1, 1> {
- public:
- explicit LNumberTagD(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d")
-};
-
-
-// Sometimes truncating conversion from a tagged value to an int32.
-class LDoubleToI: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LDoubleToI(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i")
- DECLARE_HYDROGEN_ACCESSOR(Change)
-
- bool truncating() { return hydrogen()->CanTruncateToInt32(); }
-};
-
-
-// Truncating conversion from a tagged value to an int32.
-class LTaggedToI: public LTemplateInstruction<1, 1, 1> {
- public:
- LTaggedToI(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
- DECLARE_HYDROGEN_ACCESSOR(Change)
-
- bool truncating() { return hydrogen()->CanTruncateToInt32(); }
-};
-
-
-class LSmiTag: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LSmiTag(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag")
-};
-
-
-class LNumberUntagD: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LNumberUntagD(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
-};
-
-
-class LSmiUntag: public LTemplateInstruction<1, 1, 0> {
- public:
- LSmiUntag(LOperand* value, bool needs_check)
- : needs_check_(needs_check) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(SmiUntag, "smi-untag")
-
- bool needs_check() const { return needs_check_; }
-
- private:
- bool needs_check_;
-};
-
-
-class LStoreNamedField: public LTemplateInstruction<0, 2, 1> {
- public:
- LStoreNamedField(LOperand* object, LOperand* value, LOperand* temp) {
- inputs_[0] = object;
- inputs_[1] = value;
- temps_[0] = temp;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
- DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
-
- virtual void PrintDataTo(StringStream* stream);
-
- LOperand* object() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
-
- Handle<Object> name() const { return hydrogen()->name(); }
- bool is_in_object() { return hydrogen()->is_in_object(); }
- int offset() { return hydrogen()->offset(); }
- bool needs_write_barrier() { return hydrogen()->NeedsWriteBarrier(); }
- Handle<Map> transition() const { return hydrogen()->transition(); }
-};
-
-
-class LStoreNamedGeneric: public LTemplateInstruction<0, 2, 0> {
- public:
- LStoreNamedGeneric(LOperand* object, LOperand* value) {
- inputs_[0] = object;
- inputs_[1] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
- DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
-
- virtual void PrintDataTo(StringStream* stream);
-
- LOperand* object() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
- Handle<Object> name() const { return hydrogen()->name(); }
-};
-
-
-class LStoreKeyedFastElement: public LTemplateInstruction<0, 3, 0> {
- public:
- LStoreKeyedFastElement(LOperand* obj, LOperand* key, LOperand* val) {
- inputs_[0] = obj;
- inputs_[1] = key;
- inputs_[2] = val;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement,
- "store-keyed-fast-element")
- DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastElement)
-
- virtual void PrintDataTo(StringStream* stream);
-
- LOperand* object() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
-};
-
-
-class LStoreKeyedSpecializedArrayElement: public LTemplateInstruction<0, 3, 0> {
- public:
- LStoreKeyedSpecializedArrayElement(LOperand* external_pointer,
- LOperand* key,
- LOperand* val) {
- inputs_[0] = external_pointer;
- inputs_[1] = key;
- inputs_[2] = val;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyedSpecializedArrayElement,
- "store-keyed-specialized-array-element")
- DECLARE_HYDROGEN_ACCESSOR(StoreKeyedSpecializedArrayElement)
-
- LOperand* external_pointer() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
- ExternalArrayType array_type() const {
- return hydrogen()->array_type();
- }
-};
-
-
-class LStoreKeyedGeneric: public LTemplateInstruction<0, 3, 0> {
- public:
- LStoreKeyedGeneric(LOperand* object, LOperand* key, LOperand* value) {
- inputs_[0] = object;
- inputs_[1] = key;
- inputs_[2] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
-
- virtual void PrintDataTo(StringStream* stream);
-
- LOperand* object() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
-};
-
-
-class LStringCharCodeAt: public LTemplateInstruction<1, 2, 0> {
- public:
- LStringCharCodeAt(LOperand* string, LOperand* index) {
- inputs_[0] = string;
- inputs_[1] = index;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at")
- DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt)
-
- LOperand* string() { return inputs_[0]; }
- LOperand* index() { return inputs_[1]; }
-};
-
-
-class LStringCharFromCode: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LStringCharFromCode(LOperand* char_code) {
- inputs_[0] = char_code;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string-char-from-code")
- DECLARE_HYDROGEN_ACCESSOR(StringCharFromCode)
-
- LOperand* char_code() { return inputs_[0]; }
-};
-
-
-class LStringLength: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LStringLength(LOperand* string) {
- inputs_[0] = string;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(StringLength, "string-length")
- DECLARE_HYDROGEN_ACCESSOR(StringLength)
-
- LOperand* string() { return inputs_[0]; }
-};
-
-
-class LCheckFunction: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LCheckFunction(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckFunction, "check-function")
- DECLARE_HYDROGEN_ACCESSOR(CheckFunction)
-};
-
-
-class LCheckInstanceType: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LCheckInstanceType(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check-instance-type")
- DECLARE_HYDROGEN_ACCESSOR(CheckInstanceType)
-};
-
-
-class LCheckMap: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LCheckMap(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckMap, "check-map")
- DECLARE_HYDROGEN_ACCESSOR(CheckMap)
-};
-
-
-class LCheckPrototypeMaps: public LTemplateInstruction<0, 0, 1> {
- public:
- explicit LCheckPrototypeMaps(LOperand* temp) {
- temps_[0] = temp;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckPrototypeMaps, "check-prototype-maps")
- DECLARE_HYDROGEN_ACCESSOR(CheckPrototypeMaps)
-
- Handle<JSObject> prototype() const { return hydrogen()->prototype(); }
- Handle<JSObject> holder() const { return hydrogen()->holder(); }
-};
-
-
-class LCheckSmi: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LCheckSmi(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckSmi, "check-smi")
-};
-
-
-class LCheckNonSmi: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LCheckNonSmi(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check-non-smi")
-};
-
-
-class LArrayLiteral: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral, "array-literal")
- DECLARE_HYDROGEN_ACCESSOR(ArrayLiteral)
-};
-
-
-class LObjectLiteral: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ObjectLiteral, "object-literal")
- DECLARE_HYDROGEN_ACCESSOR(ObjectLiteral)
-};
-
-
-class LRegExpLiteral: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp-literal")
- DECLARE_HYDROGEN_ACCESSOR(RegExpLiteral)
-};
-
-
-class LFunctionLiteral: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function-literal")
- DECLARE_HYDROGEN_ACCESSOR(FunctionLiteral)
-
- Handle<SharedFunctionInfo> shared_info() { return hydrogen()->shared_info(); }
-};
-
-
-class LToFastProperties: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LToFastProperties(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(ToFastProperties, "to-fast-properties")
- DECLARE_HYDROGEN_ACCESSOR(ToFastProperties)
-};
-
-
-class LTypeof: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LTypeof(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof")
-};
-
-
-class LTypeofIs: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LTypeofIs(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(TypeofIs, "typeof-is")
- DECLARE_HYDROGEN_ACCESSOR(TypeofIs)
-
- Handle<String> type_literal() { return hydrogen()->type_literal(); }
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LTypeofIsAndBranch: public LControlInstruction<1, 0> {
- public:
- explicit LTypeofIsAndBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(TypeofIs)
-
- Handle<String> type_literal() { return hydrogen()->type_literal(); }
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LIsConstructCall: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(IsConstructCall, "is-construct-call")
- DECLARE_HYDROGEN_ACCESSOR(IsConstructCall)
-};
-
-
-class LIsConstructCallAndBranch: public LControlInstruction<0, 1> {
- public:
- explicit LIsConstructCallAndBranch(LOperand* temp) {
- temps_[0] = temp;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(IsConstructCallAndBranch,
- "is-construct-call-and-branch")
-};
-
-
-class LDeleteProperty: public LTemplateInstruction<1, 2, 0> {
- public:
- LDeleteProperty(LOperand* obj, LOperand* key) {
- inputs_[0] = obj;
- inputs_[1] = key;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(DeleteProperty, "delete-property")
-
- LOperand* object() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
-};
-
-
-class LOsrEntry: public LTemplateInstruction<0, 0, 0> {
- public:
- LOsrEntry();
-
- DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
-
- LOperand** SpilledRegisterArray() { return register_spills_; }
- LOperand** SpilledDoubleRegisterArray() { return double_register_spills_; }
-
- void MarkSpilledRegister(int allocation_index, LOperand* spill_operand);
- void MarkSpilledDoubleRegister(int allocation_index,
- LOperand* spill_operand);
-
- private:
- // Arrays of spill slot operands for registers with an assigned spill
- // slot, i.e., that must also be restored to the spill slot on OSR entry.
- // NULL if the register has no assigned spill slot. Indexed by allocation
- // index.
- LOperand* register_spills_[Register::kNumAllocatableRegisters];
- LOperand* double_register_spills_[DoubleRegister::kNumAllocatableRegisters];
-};
-
-
-class LStackCheck: public LTemplateInstruction<0, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check")
-};
-
-
-class LChunkBuilder;
-class LChunk: public ZoneObject {
- public:
- explicit LChunk(CompilationInfo* info, HGraph* graph)
- : spill_slot_count_(0),
- info_(info),
- graph_(graph),
- instructions_(32),
- pointer_maps_(8),
- inlined_closures_(1) { }
-
- void AddInstruction(LInstruction* instruction, HBasicBlock* block);
- LConstantOperand* DefineConstantOperand(HConstant* constant);
- Handle<Object> LookupLiteral(LConstantOperand* operand) const;
- Representation LookupLiteralRepresentation(LConstantOperand* operand) const;
-
- int GetNextSpillIndex(bool is_double);
- LOperand* GetNextSpillSlot(bool is_double);
-
- int ParameterAt(int index);
- int GetParameterStackSlot(int index) const;
- int spill_slot_count() const { return spill_slot_count_; }
- CompilationInfo* info() const { return info_; }
- HGraph* graph() const { return graph_; }
- const ZoneList<LInstruction*>* instructions() const { return &instructions_; }
- void AddGapMove(int index, LOperand* from, LOperand* to);
- LGap* GetGapAt(int index) const;
- bool IsGapAt(int index) const;
- int NearestGapPos(int index) const;
- void MarkEmptyBlocks();
- const ZoneList<LPointerMap*>* pointer_maps() const { return &pointer_maps_; }
- LLabel* GetLabel(int block_id) const {
- HBasicBlock* block = graph_->blocks()->at(block_id);
- int first_instruction = block->first_instruction_index();
- return LLabel::cast(instructions_[first_instruction]);
- }
- int LookupDestination(int block_id) const {
- LLabel* cur = GetLabel(block_id);
- while (cur->replacement() != NULL) {
- cur = cur->replacement();
- }
- return cur->block_id();
- }
- Label* GetAssemblyLabel(int block_id) const {
- LLabel* label = GetLabel(block_id);
- ASSERT(!label->HasReplacement());
- return label->label();
- }
-
- const ZoneList<Handle<JSFunction> >* inlined_closures() const {
- return &inlined_closures_;
- }
-
- void AddInlinedClosure(Handle<JSFunction> closure) {
- inlined_closures_.Add(closure);
- }
-
- private:
- int spill_slot_count_;
- CompilationInfo* info_;
- HGraph* const graph_;
- ZoneList<LInstruction*> instructions_;
- ZoneList<LPointerMap*> pointer_maps_;
- ZoneList<Handle<JSFunction> > inlined_closures_;
-};
-
-
-class LChunkBuilder BASE_EMBEDDED {
- public:
- LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
- : chunk_(NULL),
- info_(info),
- graph_(graph),
- status_(UNUSED),
- current_instruction_(NULL),
- current_block_(NULL),
- next_block_(NULL),
- argument_count_(0),
- allocator_(allocator),
- position_(RelocInfo::kNoPosition),
- instruction_pending_deoptimization_environment_(NULL),
- pending_deoptimization_ast_id_(AstNode::kNoNumber) { }
-
- // Build the sequence for the graph.
- LChunk* Build();
-
- // Declare methods that deal with the individual node types.
-#define DECLARE_DO(type) LInstruction* Do##type(H##type* node);
- HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
-#undef DECLARE_DO
-
- private:
- enum Status {
- UNUSED,
- BUILDING,
- DONE,
- ABORTED
- };
-
- LChunk* chunk() const { return chunk_; }
- CompilationInfo* info() const { return info_; }
- HGraph* graph() const { return graph_; }
-
- bool is_unused() const { return status_ == UNUSED; }
- bool is_building() const { return status_ == BUILDING; }
- bool is_done() const { return status_ == DONE; }
- bool is_aborted() const { return status_ == ABORTED; }
-
- void Abort(const char* format, ...);
-
- // Methods for getting operands for Use / Define / Temp.
- LRegister* ToOperand(Register reg);
- LUnallocated* ToUnallocated(Register reg);
- LUnallocated* ToUnallocated(XMMRegister reg);
-
- // Methods for setting up define-use relationships.
- MUST_USE_RESULT LOperand* Use(HValue* value, LUnallocated* operand);
- MUST_USE_RESULT LOperand* UseFixed(HValue* value, Register fixed_register);
- MUST_USE_RESULT LOperand* UseFixedDouble(HValue* value,
- XMMRegister fixed_register);
-
- // A value that is guaranteed to be allocated to a register.
- // Operand created by UseRegister is guaranteed to be live until the end of
- // instruction. This means that register allocator will not reuse it's
- // register for any other operand inside instruction.
- // Operand created by UseRegisterAtStart is guaranteed to be live only at
- // instruction start. Register allocator is free to assign the same register
- // to some other operand used inside instruction (i.e. temporary or
- // output).
- MUST_USE_RESULT LOperand* UseRegister(HValue* value);
- MUST_USE_RESULT LOperand* UseRegisterAtStart(HValue* value);
-
- // An input operand in a register that may be trashed.
- MUST_USE_RESULT LOperand* UseTempRegister(HValue* value);
-
- // An input operand in a register or stack slot.
- MUST_USE_RESULT LOperand* Use(HValue* value);
- MUST_USE_RESULT LOperand* UseAtStart(HValue* value);
-
- // An input operand in a register, stack slot or a constant operand.
- MUST_USE_RESULT LOperand* UseOrConstant(HValue* value);
- MUST_USE_RESULT LOperand* UseOrConstantAtStart(HValue* value);
-
- // An input operand in a register or a constant operand.
- MUST_USE_RESULT LOperand* UseRegisterOrConstant(HValue* value);
- MUST_USE_RESULT LOperand* UseRegisterOrConstantAtStart(HValue* value);
-
- // An input operand in register, stack slot or a constant operand.
- // Will not be moved to a register even if one is freely available.
- MUST_USE_RESULT LOperand* UseAny(HValue* value);
-
- // Temporary operand that must be in a register.
- MUST_USE_RESULT LUnallocated* TempRegister();
- MUST_USE_RESULT LOperand* FixedTemp(Register reg);
- MUST_USE_RESULT LOperand* FixedTemp(XMMRegister reg);
-
- // Methods for setting up define-use relationships.
- // Return the same instruction that they are passed.
- template<int I, int T>
- LInstruction* Define(LTemplateInstruction<1, I, T>* instr,
- LUnallocated* result);
- template<int I, int T>
- LInstruction* Define(LTemplateInstruction<1, I, T>* instr);
- template<int I, int T>
- LInstruction* DefineAsRegister(LTemplateInstruction<1, I, T>* instr);
- template<int I, int T>
- LInstruction* DefineAsSpilled(LTemplateInstruction<1, I, T>* instr,
- int index);
- template<int I, int T>
- LInstruction* DefineSameAsFirst(LTemplateInstruction<1, I, T>* instr);
- template<int I, int T>
- LInstruction* DefineFixed(LTemplateInstruction<1, I, T>* instr,
- Register reg);
- template<int I, int T>
- LInstruction* DefineFixedDouble(LTemplateInstruction<1, I, T>* instr,
- XMMRegister reg);
- LInstruction* AssignEnvironment(LInstruction* instr);
- LInstruction* AssignPointerMap(LInstruction* instr);
-
- enum CanDeoptimize { CAN_DEOPTIMIZE_EAGERLY, CANNOT_DEOPTIMIZE_EAGERLY };
-
- // By default we assume that instruction sequences generated for calls
- // cannot deoptimize eagerly and we do not attach environment to this
- // instruction.
- LInstruction* MarkAsCall(
- LInstruction* instr,
- HInstruction* hinstr,
- CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
- LInstruction* MarkAsSaveDoubles(LInstruction* instr);
-
- LInstruction* SetInstructionPendingDeoptimizationEnvironment(
- LInstruction* instr, int ast_id);
- void ClearInstructionPendingDeoptimizationEnvironment();
-
- LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env);
-
- void VisitInstruction(HInstruction* current);
-
- void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block);
- LInstruction* DoBit(Token::Value op, HBitwiseBinaryOperation* instr);
- LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr);
- LInstruction* DoArithmeticD(Token::Value op,
- HArithmeticBinaryOperation* instr);
- LInstruction* DoArithmeticT(Token::Value op,
- HArithmeticBinaryOperation* instr);
-
- LChunk* chunk_;
- CompilationInfo* info_;
- HGraph* const graph_;
- Status status_;
- HInstruction* current_instruction_;
- HBasicBlock* current_block_;
- HBasicBlock* next_block_;
- int argument_count_;
- LAllocator* allocator_;
- int position_;
- LInstruction* instruction_pending_deoptimization_environment_;
- int pending_deoptimization_ast_id_;
-
- DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
-};
-
-#undef DECLARE_HYDROGEN_ACCESSOR
-#undef DECLARE_INSTRUCTION
-#undef DECLARE_CONCRETE_INSTRUCTION
-
-} } // namespace v8::int
-
-#endif // V8_X64_LITHIUM_X64_H_
diff --git a/src/3rdparty/v8/src/x64/macro-assembler-x64.cc b/src/3rdparty/v8/src/x64/macro-assembler-x64.cc
deleted file mode 100644
index 3a90343..0000000
--- a/src/3rdparty/v8/src/x64/macro-assembler-x64.cc
+++ /dev/null
@@ -1,2912 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_X64)
-
-#include "bootstrapper.h"
-#include "codegen-inl.h"
-#include "assembler-x64.h"
-#include "macro-assembler-x64.h"
-#include "serialize.h"
-#include "debug.h"
-#include "heap.h"
-
-namespace v8 {
-namespace internal {
-
-MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
- : Assembler(arg_isolate, buffer, size),
- generating_stub_(false),
- allow_stub_calls_(true),
- root_array_available_(true) {
- if (isolate() != NULL) {
- code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
- isolate());
- }
-}
-
-
-static intptr_t RootRegisterDelta(ExternalReference other, Isolate* isolate) {
- Address roots_register_value = kRootRegisterBias +
- reinterpret_cast<Address>(isolate->heap()->roots_address());
- intptr_t delta = other.address() - roots_register_value;
- return delta;
-}
-
-
-Operand MacroAssembler::ExternalOperand(ExternalReference target,
- Register scratch) {
- if (root_array_available_ && !Serializer::enabled()) {
- intptr_t delta = RootRegisterDelta(target, isolate());
- if (is_int32(delta)) {
- Serializer::TooLateToEnableNow();
- return Operand(kRootRegister, static_cast<int32_t>(delta));
- }
- }
- movq(scratch, target);
- return Operand(scratch, 0);
-}
-
-
-void MacroAssembler::Load(Register destination, ExternalReference source) {
- if (root_array_available_ && !Serializer::enabled()) {
- intptr_t delta = RootRegisterDelta(source, isolate());
- if (is_int32(delta)) {
- Serializer::TooLateToEnableNow();
- movq(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
- return;
- }
- }
- // Safe code.
- if (destination.is(rax)) {
- load_rax(source);
- } else {
- movq(kScratchRegister, source);
- movq(destination, Operand(kScratchRegister, 0));
- }
-}
-
-
-void MacroAssembler::Store(ExternalReference destination, Register source) {
- if (root_array_available_ && !Serializer::enabled()) {
- intptr_t delta = RootRegisterDelta(destination, isolate());
- if (is_int32(delta)) {
- Serializer::TooLateToEnableNow();
- movq(Operand(kRootRegister, static_cast<int32_t>(delta)), source);
- return;
- }
- }
- // Safe code.
- if (source.is(rax)) {
- store_rax(destination);
- } else {
- movq(kScratchRegister, destination);
- movq(Operand(kScratchRegister, 0), source);
- }
-}
-
-
-void MacroAssembler::LoadAddress(Register destination,
- ExternalReference source) {
- if (root_array_available_ && !Serializer::enabled()) {
- intptr_t delta = RootRegisterDelta(source, isolate());
- if (is_int32(delta)) {
- Serializer::TooLateToEnableNow();
- lea(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
- return;
- }
- }
- // Safe code.
- movq(destination, source);
-}
-
-
-int MacroAssembler::LoadAddressSize(ExternalReference source) {
- if (root_array_available_ && !Serializer::enabled()) {
- // This calculation depends on the internals of LoadAddress.
- // It's correctness is ensured by the asserts in the Call
- // instruction below.
- intptr_t delta = RootRegisterDelta(source, isolate());
- if (is_int32(delta)) {
- Serializer::TooLateToEnableNow();
- // Operand is lea(scratch, Operand(kRootRegister, delta));
- // Opcodes : REX.W 8D ModRM Disp8/Disp32 - 4 or 7.
- int size = 4;
- if (!is_int8(static_cast<int32_t>(delta))) {
- size += 3; // Need full four-byte displacement in lea.
- }
- return size;
- }
- }
- // Size of movq(destination, src);
- return 10;
-}
-
-
-void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
- ASSERT(root_array_available_);
- movq(destination, Operand(kRootRegister,
- (index << kPointerSizeLog2) - kRootRegisterBias));
-}
-
-
-void MacroAssembler::LoadRootIndexed(Register destination,
- Register variable_offset,
- int fixed_offset) {
- ASSERT(root_array_available_);
- movq(destination,
- Operand(kRootRegister,
- variable_offset, times_pointer_size,
- (fixed_offset << kPointerSizeLog2) - kRootRegisterBias));
-}
-
-
-void MacroAssembler::StoreRoot(Register source, Heap::RootListIndex index) {
- ASSERT(root_array_available_);
- movq(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias),
- source);
-}
-
-
-void MacroAssembler::PushRoot(Heap::RootListIndex index) {
- ASSERT(root_array_available_);
- push(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias));
-}
-
-
-void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
- ASSERT(root_array_available_);
- cmpq(with, Operand(kRootRegister,
- (index << kPointerSizeLog2) - kRootRegisterBias));
-}
-
-
-void MacroAssembler::CompareRoot(const Operand& with,
- Heap::RootListIndex index) {
- ASSERT(root_array_available_);
- ASSERT(!with.AddressUsesRegister(kScratchRegister));
- LoadRoot(kScratchRegister, index);
- cmpq(with, kScratchRegister);
-}
-
-
-void MacroAssembler::RecordWriteHelper(Register object,
- Register addr,
- Register scratch) {
- if (emit_debug_code()) {
- // Check that the object is not in new space.
- NearLabel not_in_new_space;
- InNewSpace(object, scratch, not_equal, &not_in_new_space);
- Abort("new-space object passed to RecordWriteHelper");
- bind(&not_in_new_space);
- }
-
- // Compute the page start address from the heap object pointer, and reuse
- // the 'object' register for it.
- and_(object, Immediate(~Page::kPageAlignmentMask));
-
- // Compute number of region covering addr. See Page::GetRegionNumberForAddress
- // method for more details.
- shrl(addr, Immediate(Page::kRegionSizeLog2));
- andl(addr, Immediate(Page::kPageAlignmentMask >> Page::kRegionSizeLog2));
-
- // Set dirty mark for region.
- bts(Operand(object, Page::kDirtyFlagOffset), addr);
-}
-
-
-void MacroAssembler::RecordWrite(Register object,
- int offset,
- Register value,
- Register index) {
- // The compiled code assumes that record write doesn't change the
- // context register, so we check that none of the clobbered
- // registers are rsi.
- ASSERT(!object.is(rsi) && !value.is(rsi) && !index.is(rsi));
-
- // First, check if a write barrier is even needed. The tests below
- // catch stores of smis and stores into the young generation.
- Label done;
- JumpIfSmi(value, &done);
-
- RecordWriteNonSmi(object, offset, value, index);
- bind(&done);
-
- // Clobber all input registers when running with the debug-code flag
- // turned on to provoke errors. This clobbering repeats the
- // clobbering done inside RecordWriteNonSmi but it's necessary to
- // avoid having the fast case for smis leave the registers
- // unchanged.
- if (emit_debug_code()) {
- movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
- movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
- movq(index, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
- }
-}
-
-
-void MacroAssembler::RecordWrite(Register object,
- Register address,
- Register value) {
- // The compiled code assumes that record write doesn't change the
- // context register, so we check that none of the clobbered
- // registers are rsi.
- ASSERT(!object.is(rsi) && !value.is(rsi) && !address.is(rsi));
-
- // First, check if a write barrier is even needed. The tests below
- // catch stores of smis and stores into the young generation.
- Label done;
- JumpIfSmi(value, &done);
-
- InNewSpace(object, value, equal, &done);
-
- RecordWriteHelper(object, address, value);
-
- bind(&done);
-
- // Clobber all input registers when running with the debug-code flag
- // turned on to provoke errors.
- if (emit_debug_code()) {
- movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
- movq(address, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
- movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
- }
-}
-
-
-void MacroAssembler::RecordWriteNonSmi(Register object,
- int offset,
- Register scratch,
- Register index) {
- Label done;
-
- if (emit_debug_code()) {
- NearLabel okay;
- JumpIfNotSmi(object, &okay);
- Abort("MacroAssembler::RecordWriteNonSmi cannot deal with smis");
- bind(&okay);
-
- if (offset == 0) {
- // index must be int32.
- Register tmp = index.is(rax) ? rbx : rax;
- push(tmp);
- movl(tmp, index);
- cmpq(tmp, index);
- Check(equal, "Index register for RecordWrite must be untagged int32.");
- pop(tmp);
- }
- }
-
- // Test that the object address is not in the new space. We cannot
- // update page dirty marks for new space pages.
- InNewSpace(object, scratch, equal, &done);
-
- // The offset is relative to a tagged or untagged HeapObject pointer,
- // so either offset or offset + kHeapObjectTag must be a
- // multiple of kPointerSize.
- ASSERT(IsAligned(offset, kPointerSize) ||
- IsAligned(offset + kHeapObjectTag, kPointerSize));
-
- Register dst = index;
- if (offset != 0) {
- lea(dst, Operand(object, offset));
- } else {
- // array access: calculate the destination address in the same manner as
- // KeyedStoreIC::GenerateGeneric.
- lea(dst, FieldOperand(object,
- index,
- times_pointer_size,
- FixedArray::kHeaderSize));
- }
- RecordWriteHelper(object, dst, scratch);
-
- bind(&done);
-
- // Clobber all input registers when running with the debug-code flag
- // turned on to provoke errors.
- if (emit_debug_code()) {
- movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
- movq(scratch, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
- movq(index, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
- }
-}
-
-void MacroAssembler::Assert(Condition cc, const char* msg) {
- if (emit_debug_code()) Check(cc, msg);
-}
-
-
-void MacroAssembler::AssertFastElements(Register elements) {
- if (emit_debug_code()) {
- NearLabel ok;
- CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
- Heap::kFixedArrayMapRootIndex);
- j(equal, &ok);
- CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
- Heap::kFixedCOWArrayMapRootIndex);
- j(equal, &ok);
- Abort("JSObject with fast elements map has slow elements");
- bind(&ok);
- }
-}
-
-
-void MacroAssembler::Check(Condition cc, const char* msg) {
- NearLabel L;
- j(cc, &L);
- Abort(msg);
- // will not return here
- bind(&L);
-}
-
-
-void MacroAssembler::CheckStackAlignment() {
- int frame_alignment = OS::ActivationFrameAlignment();
- int frame_alignment_mask = frame_alignment - 1;
- if (frame_alignment > kPointerSize) {
- ASSERT(IsPowerOf2(frame_alignment));
- NearLabel alignment_as_expected;
- testq(rsp, Immediate(frame_alignment_mask));
- j(zero, &alignment_as_expected);
- // Abort if stack is not aligned.
- int3();
- bind(&alignment_as_expected);
- }
-}
-
-
-void MacroAssembler::NegativeZeroTest(Register result,
- Register op,
- Label* then_label) {
- NearLabel ok;
- testl(result, result);
- j(not_zero, &ok);
- testl(op, op);
- j(sign, then_label);
- bind(&ok);
-}
-
-
-void MacroAssembler::Abort(const char* msg) {
- // We want to pass the msg string like a smi to avoid GC
- // problems, however msg is not guaranteed to be aligned
- // properly. Instead, we pass an aligned pointer that is
- // a proper v8 smi, but also pass the alignment difference
- // from the real pointer as a smi.
- intptr_t p1 = reinterpret_cast<intptr_t>(msg);
- intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
- // Note: p0 might not be a valid Smi *value*, but it has a valid Smi tag.
- ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
-#ifdef DEBUG
- if (msg != NULL) {
- RecordComment("Abort message: ");
- RecordComment(msg);
- }
-#endif
- // Disable stub call restrictions to always allow calls to abort.
- AllowStubCallsScope allow_scope(this, true);
-
- push(rax);
- movq(kScratchRegister, p0, RelocInfo::NONE);
- push(kScratchRegister);
- movq(kScratchRegister,
- reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(p1 - p0))),
- RelocInfo::NONE);
- push(kScratchRegister);
- CallRuntime(Runtime::kAbort, 2);
- // will not return here
- int3();
-}
-
-
-void MacroAssembler::CallStub(CodeStub* stub) {
- ASSERT(allow_stub_calls()); // calls are not allowed in some stubs
- Call(stub->GetCode(), RelocInfo::CODE_TARGET);
-}
-
-
-MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub) {
- ASSERT(allow_stub_calls()); // Calls are not allowed in some stubs.
- MaybeObject* result = stub->TryGetCode();
- if (!result->IsFailure()) {
- call(Handle<Code>(Code::cast(result->ToObjectUnchecked())),
- RelocInfo::CODE_TARGET);
- }
- return result;
-}
-
-
-void MacroAssembler::TailCallStub(CodeStub* stub) {
- ASSERT(allow_stub_calls()); // Calls are not allowed in some stubs.
- Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
-}
-
-
-MaybeObject* MacroAssembler::TryTailCallStub(CodeStub* stub) {
- ASSERT(allow_stub_calls()); // Calls are not allowed in some stubs.
- MaybeObject* result = stub->TryGetCode();
- if (!result->IsFailure()) {
- jmp(Handle<Code>(Code::cast(result->ToObjectUnchecked())),
- RelocInfo::CODE_TARGET);
- }
- return result;
-}
-
-
-void MacroAssembler::StubReturn(int argc) {
- ASSERT(argc >= 1 && generating_stub());
- ret((argc - 1) * kPointerSize);
-}
-
-
-void MacroAssembler::IllegalOperation(int num_arguments) {
- if (num_arguments > 0) {
- addq(rsp, Immediate(num_arguments * kPointerSize));
- }
- LoadRoot(rax, Heap::kUndefinedValueRootIndex);
-}
-
-
-void MacroAssembler::IndexFromHash(Register hash, Register index) {
- // The assert checks that the constants for the maximum number of digits
- // for an array index cached in the hash field and the number of bits
- // reserved for it does not conflict.
- ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
- (1 << String::kArrayIndexValueBits));
- // We want the smi-tagged index in key. Even if we subsequently go to
- // the slow case, converting the key to a smi is always valid.
- // key: string key
- // hash: key's hash field, including its array index value.
- and_(hash, Immediate(String::kArrayIndexValueMask));
- shr(hash, Immediate(String::kHashShift));
- // Here we actually clobber the key which will be used if calling into
- // runtime later. However as the new key is the numeric value of a string key
- // there is no difference in using either key.
- Integer32ToSmi(index, hash);
-}
-
-
-void MacroAssembler::CallRuntime(Runtime::FunctionId id, int num_arguments) {
- CallRuntime(Runtime::FunctionForId(id), num_arguments);
-}
-
-
-void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
- const Runtime::Function* function = Runtime::FunctionForId(id);
- Set(rax, function->nargs);
- LoadAddress(rbx, ExternalReference(function, isolate()));
- CEntryStub ces(1);
- ces.SaveDoubles();
- CallStub(&ces);
-}
-
-
-MaybeObject* MacroAssembler::TryCallRuntime(Runtime::FunctionId id,
- int num_arguments) {
- return TryCallRuntime(Runtime::FunctionForId(id), num_arguments);
-}
-
-
-void MacroAssembler::CallRuntime(const Runtime::Function* f,
- int num_arguments) {
- // If the expected number of arguments of the runtime function is
- // constant, we check that the actual number of arguments match the
- // expectation.
- if (f->nargs >= 0 && f->nargs != num_arguments) {
- IllegalOperation(num_arguments);
- return;
- }
-
- // TODO(1236192): Most runtime routines don't need the number of
- // arguments passed in because it is constant. At some point we
- // should remove this need and make the runtime routine entry code
- // smarter.
- Set(rax, num_arguments);
- LoadAddress(rbx, ExternalReference(f, isolate()));
- CEntryStub ces(f->result_size);
- CallStub(&ces);
-}
-
-
-MaybeObject* MacroAssembler::TryCallRuntime(const Runtime::Function* f,
- int num_arguments) {
- if (f->nargs >= 0 && f->nargs != num_arguments) {
- IllegalOperation(num_arguments);
- // Since we did not call the stub, there was no allocation failure.
- // Return some non-failure object.
- return HEAP->undefined_value();
- }
-
- // TODO(1236192): Most runtime routines don't need the number of
- // arguments passed in because it is constant. At some point we
- // should remove this need and make the runtime routine entry code
- // smarter.
- Set(rax, num_arguments);
- LoadAddress(rbx, ExternalReference(f, isolate()));
- CEntryStub ces(f->result_size);
- return TryCallStub(&ces);
-}
-
-
-void MacroAssembler::CallExternalReference(const ExternalReference& ext,
- int num_arguments) {
- Set(rax, num_arguments);
- LoadAddress(rbx, ext);
-
- CEntryStub stub(1);
- CallStub(&stub);
-}
-
-
-void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
- int num_arguments,
- int result_size) {
- // ----------- S t a t e -------------
- // -- rsp[0] : return address
- // -- rsp[8] : argument num_arguments - 1
- // ...
- // -- rsp[8 * num_arguments] : argument 0 (receiver)
- // -----------------------------------
-
- // TODO(1236192): Most runtime routines don't need the number of
- // arguments passed in because it is constant. At some point we
- // should remove this need and make the runtime routine entry code
- // smarter.
- Set(rax, num_arguments);
- JumpToExternalReference(ext, result_size);
-}
-
-
-MaybeObject* MacroAssembler::TryTailCallExternalReference(
- const ExternalReference& ext, int num_arguments, int result_size) {
- // ----------- S t a t e -------------
- // -- rsp[0] : return address
- // -- rsp[8] : argument num_arguments - 1
- // ...
- // -- rsp[8 * num_arguments] : argument 0 (receiver)
- // -----------------------------------
-
- // TODO(1236192): Most runtime routines don't need the number of
- // arguments passed in because it is constant. At some point we
- // should remove this need and make the runtime routine entry code
- // smarter.
- Set(rax, num_arguments);
- return TryJumpToExternalReference(ext, result_size);
-}
-
-
-void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
- int num_arguments,
- int result_size) {
- TailCallExternalReference(ExternalReference(fid, isolate()),
- num_arguments,
- result_size);
-}
-
-
-MaybeObject* MacroAssembler::TryTailCallRuntime(Runtime::FunctionId fid,
- int num_arguments,
- int result_size) {
- return TryTailCallExternalReference(ExternalReference(fid, isolate()),
- num_arguments,
- result_size);
-}
-
-
-static int Offset(ExternalReference ref0, ExternalReference ref1) {
- int64_t offset = (ref0.address() - ref1.address());
- // Check that fits into int.
- ASSERT(static_cast<int>(offset) == offset);
- return static_cast<int>(offset);
-}
-
-
-void MacroAssembler::PrepareCallApiFunction(int arg_stack_space) {
-#ifdef _WIN64
- // We need to prepare a slot for result handle on stack and put
- // a pointer to it into 1st arg register.
- EnterApiExitFrame(arg_stack_space + 1);
-
- // rcx must be used to pass the pointer to the return value slot.
- lea(rcx, StackSpaceOperand(arg_stack_space));
-#else
- EnterApiExitFrame(arg_stack_space);
-#endif
-}
-
-
-MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(
- ApiFunction* function, int stack_space) {
- Label empty_result;
- Label prologue;
- Label promote_scheduled_exception;
- Label delete_allocated_handles;
- Label leave_exit_frame;
- Label write_back;
-
- ExternalReference next_address =
- ExternalReference::handle_scope_next_address();
- const int kNextOffset = 0;
- const int kLimitOffset = Offset(
- ExternalReference::handle_scope_limit_address(),
- next_address);
- const int kLevelOffset = Offset(
- ExternalReference::handle_scope_level_address(),
- next_address);
- ExternalReference scheduled_exception_address =
- ExternalReference::scheduled_exception_address(isolate());
-
- // Allocate HandleScope in callee-save registers.
- Register prev_next_address_reg = r14;
- Register prev_limit_reg = rbx;
- Register base_reg = r15;
- movq(base_reg, next_address);
- movq(prev_next_address_reg, Operand(base_reg, kNextOffset));
- movq(prev_limit_reg, Operand(base_reg, kLimitOffset));
- addl(Operand(base_reg, kLevelOffset), Immediate(1));
- // Call the api function!
- movq(rax,
- reinterpret_cast<int64_t>(function->address()),
- RelocInfo::RUNTIME_ENTRY);
- call(rax);
-
-#ifdef _WIN64
- // rax keeps a pointer to v8::Handle, unpack it.
- movq(rax, Operand(rax, 0));
-#endif
- // Check if the result handle holds 0.
- testq(rax, rax);
- j(zero, &empty_result);
- // It was non-zero. Dereference to get the result value.
- movq(rax, Operand(rax, 0));
- bind(&prologue);
-
- // No more valid handles (the result handle was the last one). Restore
- // previous handle scope.
- subl(Operand(base_reg, kLevelOffset), Immediate(1));
- movq(Operand(base_reg, kNextOffset), prev_next_address_reg);
- cmpq(prev_limit_reg, Operand(base_reg, kLimitOffset));
- j(not_equal, &delete_allocated_handles);
- bind(&leave_exit_frame);
-
- // Check if the function scheduled an exception.
- movq(rsi, scheduled_exception_address);
- Cmp(Operand(rsi, 0), FACTORY->the_hole_value());
- j(not_equal, &promote_scheduled_exception);
-
- LeaveApiExitFrame();
- ret(stack_space * kPointerSize);
-
- bind(&promote_scheduled_exception);
- MaybeObject* result = TryTailCallRuntime(Runtime::kPromoteScheduledException,
- 0, 1);
- if (result->IsFailure()) {
- return result;
- }
-
- bind(&empty_result);
- // It was zero; the result is undefined.
- Move(rax, FACTORY->undefined_value());
- jmp(&prologue);
-
- // HandleScope limit has changed. Delete allocated extensions.
- bind(&delete_allocated_handles);
- movq(Operand(base_reg, kLimitOffset), prev_limit_reg);
- movq(prev_limit_reg, rax);
-#ifdef _WIN64
- LoadAddress(rcx, ExternalReference::isolate_address());
-#else
- LoadAddress(rdi, ExternalReference::isolate_address());
-#endif
- LoadAddress(rax,
- ExternalReference::delete_handle_scope_extensions(isolate()));
- call(rax);
- movq(rax, prev_limit_reg);
- jmp(&leave_exit_frame);
-
- return result;
-}
-
-
-void MacroAssembler::JumpToExternalReference(const ExternalReference& ext,
- int result_size) {
- // Set the entry point and jump to the C entry runtime stub.
- LoadAddress(rbx, ext);
- CEntryStub ces(result_size);
- jmp(ces.GetCode(), RelocInfo::CODE_TARGET);
-}
-
-
-MaybeObject* MacroAssembler::TryJumpToExternalReference(
- const ExternalReference& ext, int result_size) {
- // Set the entry point and jump to the C entry runtime stub.
- LoadAddress(rbx, ext);
- CEntryStub ces(result_size);
- return TryTailCallStub(&ces);
-}
-
-
-void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
- InvokeFlag flag,
- CallWrapper* call_wrapper) {
- // Calls are not allowed in some stubs.
- ASSERT(flag == JUMP_FUNCTION || allow_stub_calls());
-
- // Rely on the assertion to check that the number of provided
- // arguments match the expected number of arguments. Fake a
- // parameter count to avoid emitting code to do the check.
- ParameterCount expected(0);
- GetBuiltinEntry(rdx, id);
- InvokeCode(rdx, expected, expected, flag, call_wrapper);
-}
-
-
-void MacroAssembler::GetBuiltinFunction(Register target,
- Builtins::JavaScript id) {
- // Load the builtins object into target register.
- movq(target, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- movq(target, FieldOperand(target, GlobalObject::kBuiltinsOffset));
- movq(target, FieldOperand(target,
- JSBuiltinsObject::OffsetOfFunctionWithId(id)));
-}
-
-
-void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
- ASSERT(!target.is(rdi));
- // Load the JavaScript builtin function from the builtins object.
- GetBuiltinFunction(rdi, id);
- movq(target, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
-}
-
-
-void MacroAssembler::Set(Register dst, int64_t x) {
- if (x == 0) {
- xorl(dst, dst);
- } else if (is_int32(x)) {
- movq(dst, Immediate(static_cast<int32_t>(x)));
- } else if (is_uint32(x)) {
- movl(dst, Immediate(static_cast<uint32_t>(x)));
- } else {
- movq(dst, x, RelocInfo::NONE);
- }
-}
-
-void MacroAssembler::Set(const Operand& dst, int64_t x) {
- if (is_int32(x)) {
- movq(dst, Immediate(static_cast<int32_t>(x)));
- } else {
- movq(kScratchRegister, x, RelocInfo::NONE);
- movq(dst, kScratchRegister);
- }
-}
-
-// ----------------------------------------------------------------------------
-// Smi tagging, untagging and tag detection.
-
-Register MacroAssembler::GetSmiConstant(Smi* source) {
- int value = source->value();
- if (value == 0) {
- xorl(kScratchRegister, kScratchRegister);
- return kScratchRegister;
- }
- if (value == 1) {
- return kSmiConstantRegister;
- }
- LoadSmiConstant(kScratchRegister, source);
- return kScratchRegister;
-}
-
-void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) {
- if (emit_debug_code()) {
- movq(dst,
- reinterpret_cast<uint64_t>(Smi::FromInt(kSmiConstantRegisterValue)),
- RelocInfo::NONE);
- cmpq(dst, kSmiConstantRegister);
- if (allow_stub_calls()) {
- Assert(equal, "Uninitialized kSmiConstantRegister");
- } else {
- NearLabel ok;
- j(equal, &ok);
- int3();
- bind(&ok);
- }
- }
- int value = source->value();
- if (value == 0) {
- xorl(dst, dst);
- return;
- }
- bool negative = value < 0;
- unsigned int uvalue = negative ? -value : value;
-
- switch (uvalue) {
- case 9:
- lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_8, 0));
- break;
- case 8:
- xorl(dst, dst);
- lea(dst, Operand(dst, kSmiConstantRegister, times_8, 0));
- break;
- case 4:
- xorl(dst, dst);
- lea(dst, Operand(dst, kSmiConstantRegister, times_4, 0));
- break;
- case 5:
- lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_4, 0));
- break;
- case 3:
- lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_2, 0));
- break;
- case 2:
- lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_1, 0));
- break;
- case 1:
- movq(dst, kSmiConstantRegister);
- break;
- case 0:
- UNREACHABLE();
- return;
- default:
- movq(dst, reinterpret_cast<uint64_t>(source), RelocInfo::NONE);
- return;
- }
- if (negative) {
- neg(dst);
- }
-}
-
-
-void MacroAssembler::Integer32ToSmi(Register dst, Register src) {
- ASSERT_EQ(0, kSmiTag);
- if (!dst.is(src)) {
- movl(dst, src);
- }
- shl(dst, Immediate(kSmiShift));
-}
-
-
-void MacroAssembler::Integer32ToSmiField(const Operand& dst, Register src) {
- if (emit_debug_code()) {
- testb(dst, Immediate(0x01));
- NearLabel ok;
- j(zero, &ok);
- if (allow_stub_calls()) {
- Abort("Integer32ToSmiField writing to non-smi location");
- } else {
- int3();
- }
- bind(&ok);
- }
- ASSERT(kSmiShift % kBitsPerByte == 0);
- movl(Operand(dst, kSmiShift / kBitsPerByte), src);
-}
-
-
-void MacroAssembler::Integer64PlusConstantToSmi(Register dst,
- Register src,
- int constant) {
- if (dst.is(src)) {
- addl(dst, Immediate(constant));
- } else {
- leal(dst, Operand(src, constant));
- }
- shl(dst, Immediate(kSmiShift));
-}
-
-
-void MacroAssembler::SmiToInteger32(Register dst, Register src) {
- ASSERT_EQ(0, kSmiTag);
- if (!dst.is(src)) {
- movq(dst, src);
- }
- shr(dst, Immediate(kSmiShift));
-}
-
-
-void MacroAssembler::SmiToInteger32(Register dst, const Operand& src) {
- movl(dst, Operand(src, kSmiShift / kBitsPerByte));
-}
-
-
-void MacroAssembler::SmiToInteger64(Register dst, Register src) {
- ASSERT_EQ(0, kSmiTag);
- if (!dst.is(src)) {
- movq(dst, src);
- }
- sar(dst, Immediate(kSmiShift));
-}
-
-
-void MacroAssembler::SmiToInteger64(Register dst, const Operand& src) {
- movsxlq(dst, Operand(src, kSmiShift / kBitsPerByte));
-}
-
-
-void MacroAssembler::SmiTest(Register src) {
- testq(src, src);
-}
-
-
-void MacroAssembler::SmiCompare(Register smi1, Register smi2) {
- if (emit_debug_code()) {
- AbortIfNotSmi(smi1);
- AbortIfNotSmi(smi2);
- }
- cmpq(smi1, smi2);
-}
-
-
-void MacroAssembler::SmiCompare(Register dst, Smi* src) {
- if (emit_debug_code()) {
- AbortIfNotSmi(dst);
- }
- Cmp(dst, src);
-}
-
-
-void MacroAssembler::Cmp(Register dst, Smi* src) {
- ASSERT(!dst.is(kScratchRegister));
- if (src->value() == 0) {
- testq(dst, dst);
- } else {
- Register constant_reg = GetSmiConstant(src);
- cmpq(dst, constant_reg);
- }
-}
-
-
-void MacroAssembler::SmiCompare(Register dst, const Operand& src) {
- if (emit_debug_code()) {
- AbortIfNotSmi(dst);
- AbortIfNotSmi(src);
- }
- cmpq(dst, src);
-}
-
-
-void MacroAssembler::SmiCompare(const Operand& dst, Register src) {
- if (emit_debug_code()) {
- AbortIfNotSmi(dst);
- AbortIfNotSmi(src);
- }
- cmpq(dst, src);
-}
-
-
-void MacroAssembler::SmiCompare(const Operand& dst, Smi* src) {
- if (emit_debug_code()) {
- AbortIfNotSmi(dst);
- }
- cmpl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(src->value()));
-}
-
-
-void MacroAssembler::Cmp(const Operand& dst, Smi* src) {
- // The Operand cannot use the smi register.
- Register smi_reg = GetSmiConstant(src);
- ASSERT(!dst.AddressUsesRegister(smi_reg));
- cmpq(dst, smi_reg);
-}
-
-
-void MacroAssembler::SmiCompareInteger32(const Operand& dst, Register src) {
- cmpl(Operand(dst, kSmiShift / kBitsPerByte), src);
-}
-
-
-void MacroAssembler::PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
- Register src,
- int power) {
- ASSERT(power >= 0);
- ASSERT(power < 64);
- if (power == 0) {
- SmiToInteger64(dst, src);
- return;
- }
- if (!dst.is(src)) {
- movq(dst, src);
- }
- if (power < kSmiShift) {
- sar(dst, Immediate(kSmiShift - power));
- } else if (power > kSmiShift) {
- shl(dst, Immediate(power - kSmiShift));
- }
-}
-
-
-void MacroAssembler::PositiveSmiDivPowerOfTwoToInteger32(Register dst,
- Register src,
- int power) {
- ASSERT((0 <= power) && (power < 32));
- if (dst.is(src)) {
- shr(dst, Immediate(power + kSmiShift));
- } else {
- UNIMPLEMENTED(); // Not used.
- }
-}
-
-
-Condition MacroAssembler::CheckSmi(Register src) {
- ASSERT_EQ(0, kSmiTag);
- testb(src, Immediate(kSmiTagMask));
- return zero;
-}
-
-
-Condition MacroAssembler::CheckSmi(const Operand& src) {
- ASSERT_EQ(0, kSmiTag);
- testb(src, Immediate(kSmiTagMask));
- return zero;
-}
-
-
-Condition MacroAssembler::CheckNonNegativeSmi(Register src) {
- ASSERT_EQ(0, kSmiTag);
- // Test that both bits of the mask 0x8000000000000001 are zero.
- movq(kScratchRegister, src);
- rol(kScratchRegister, Immediate(1));
- testb(kScratchRegister, Immediate(3));
- return zero;
-}
-
-
-Condition MacroAssembler::CheckBothSmi(Register first, Register second) {
- if (first.is(second)) {
- return CheckSmi(first);
- }
- ASSERT(kSmiTag == 0 && kHeapObjectTag == 1 && kHeapObjectTagMask == 3);
- leal(kScratchRegister, Operand(first, second, times_1, 0));
- testb(kScratchRegister, Immediate(0x03));
- return zero;
-}
-
-
-Condition MacroAssembler::CheckBothNonNegativeSmi(Register first,
- Register second) {
- if (first.is(second)) {
- return CheckNonNegativeSmi(first);
- }
- movq(kScratchRegister, first);
- or_(kScratchRegister, second);
- rol(kScratchRegister, Immediate(1));
- testl(kScratchRegister, Immediate(3));
- return zero;
-}
-
-
-Condition MacroAssembler::CheckEitherSmi(Register first,
- Register second,
- Register scratch) {
- if (first.is(second)) {
- return CheckSmi(first);
- }
- if (scratch.is(second)) {
- andl(scratch, first);
- } else {
- if (!scratch.is(first)) {
- movl(scratch, first);
- }
- andl(scratch, second);
- }
- testb(scratch, Immediate(kSmiTagMask));
- return zero;
-}
-
-
-Condition MacroAssembler::CheckIsMinSmi(Register src) {
- ASSERT(!src.is(kScratchRegister));
- // If we overflow by subtracting one, it's the minimal smi value.
- cmpq(src, kSmiConstantRegister);
- return overflow;
-}
-
-
-Condition MacroAssembler::CheckInteger32ValidSmiValue(Register src) {
- // A 32-bit integer value can always be converted to a smi.
- return always;
-}
-
-
-Condition MacroAssembler::CheckUInteger32ValidSmiValue(Register src) {
- // An unsigned 32-bit integer value is valid as long as the high bit
- // is not set.
- testl(src, src);
- return positive;
-}
-
-
-void MacroAssembler::CheckSmiToIndicator(Register dst, Register src) {
- if (dst.is(src)) {
- andl(dst, Immediate(kSmiTagMask));
- } else {
- movl(dst, Immediate(kSmiTagMask));
- andl(dst, src);
- }
-}
-
-
-void MacroAssembler::CheckSmiToIndicator(Register dst, const Operand& src) {
- if (!(src.AddressUsesRegister(dst))) {
- movl(dst, Immediate(kSmiTagMask));
- andl(dst, src);
- } else {
- movl(dst, src);
- andl(dst, Immediate(kSmiTagMask));
- }
-}
-
-
-void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) {
- if (constant->value() == 0) {
- if (!dst.is(src)) {
- movq(dst, src);
- }
- return;
- } else if (dst.is(src)) {
- ASSERT(!dst.is(kScratchRegister));
- switch (constant->value()) {
- case 1:
- addq(dst, kSmiConstantRegister);
- return;
- case 2:
- lea(dst, Operand(src, kSmiConstantRegister, times_2, 0));
- return;
- case 4:
- lea(dst, Operand(src, kSmiConstantRegister, times_4, 0));
- return;
- case 8:
- lea(dst, Operand(src, kSmiConstantRegister, times_8, 0));
- return;
- default:
- Register constant_reg = GetSmiConstant(constant);
- addq(dst, constant_reg);
- return;
- }
- } else {
- switch (constant->value()) {
- case 1:
- lea(dst, Operand(src, kSmiConstantRegister, times_1, 0));
- return;
- case 2:
- lea(dst, Operand(src, kSmiConstantRegister, times_2, 0));
- return;
- case 4:
- lea(dst, Operand(src, kSmiConstantRegister, times_4, 0));
- return;
- case 8:
- lea(dst, Operand(src, kSmiConstantRegister, times_8, 0));
- return;
- default:
- LoadSmiConstant(dst, constant);
- addq(dst, src);
- return;
- }
- }
-}
-
-
-void MacroAssembler::SmiAddConstant(const Operand& dst, Smi* constant) {
- if (constant->value() != 0) {
- addl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(constant->value()));
- }
-}
-
-
-void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant) {
- if (constant->value() == 0) {
- if (!dst.is(src)) {
- movq(dst, src);
- }
- } else if (dst.is(src)) {
- ASSERT(!dst.is(kScratchRegister));
- Register constant_reg = GetSmiConstant(constant);
- subq(dst, constant_reg);
- } else {
- if (constant->value() == Smi::kMinValue) {
- LoadSmiConstant(dst, constant);
- // Adding and subtracting the min-value gives the same result, it only
- // differs on the overflow bit, which we don't check here.
- addq(dst, src);
- } else {
- // Subtract by adding the negation.
- LoadSmiConstant(dst, Smi::FromInt(-constant->value()));
- addq(dst, src);
- }
- }
-}
-
-
-void MacroAssembler::SmiAdd(Register dst,
- Register src1,
- Register src2) {
- // No overflow checking. Use only when it's known that
- // overflowing is impossible.
- ASSERT(!dst.is(src2));
- if (!dst.is(src1)) {
- movq(dst, src1);
- }
- addq(dst, src2);
- Assert(no_overflow, "Smi addition overflow");
-}
-
-
-void MacroAssembler::SmiSub(Register dst, Register src1, Register src2) {
- // No overflow checking. Use only when it's known that
- // overflowing is impossible (e.g., subtracting two positive smis).
- ASSERT(!dst.is(src2));
- if (!dst.is(src1)) {
- movq(dst, src1);
- }
- subq(dst, src2);
- Assert(no_overflow, "Smi subtraction overflow");
-}
-
-
-void MacroAssembler::SmiSub(Register dst,
- Register src1,
- const Operand& src2) {
- // No overflow checking. Use only when it's known that
- // overflowing is impossible (e.g., subtracting two positive smis).
- if (!dst.is(src1)) {
- movq(dst, src1);
- }
- subq(dst, src2);
- Assert(no_overflow, "Smi subtraction overflow");
-}
-
-
-void MacroAssembler::SmiNot(Register dst, Register src) {
- ASSERT(!dst.is(kScratchRegister));
- ASSERT(!src.is(kScratchRegister));
- // Set tag and padding bits before negating, so that they are zero afterwards.
- movl(kScratchRegister, Immediate(~0));
- if (dst.is(src)) {
- xor_(dst, kScratchRegister);
- } else {
- lea(dst, Operand(src, kScratchRegister, times_1, 0));
- }
- not_(dst);
-}
-
-
-void MacroAssembler::SmiAnd(Register dst, Register src1, Register src2) {
- ASSERT(!dst.is(src2));
- if (!dst.is(src1)) {
- movq(dst, src1);
- }
- and_(dst, src2);
-}
-
-
-void MacroAssembler::SmiAndConstant(Register dst, Register src, Smi* constant) {
- if (constant->value() == 0) {
- Set(dst, 0);
- } else if (dst.is(src)) {
- ASSERT(!dst.is(kScratchRegister));
- Register constant_reg = GetSmiConstant(constant);
- and_(dst, constant_reg);
- } else {
- LoadSmiConstant(dst, constant);
- and_(dst, src);
- }
-}
-
-
-void MacroAssembler::SmiOr(Register dst, Register src1, Register src2) {
- if (!dst.is(src1)) {
- movq(dst, src1);
- }
- or_(dst, src2);
-}
-
-
-void MacroAssembler::SmiOrConstant(Register dst, Register src, Smi* constant) {
- if (dst.is(src)) {
- ASSERT(!dst.is(kScratchRegister));
- Register constant_reg = GetSmiConstant(constant);
- or_(dst, constant_reg);
- } else {
- LoadSmiConstant(dst, constant);
- or_(dst, src);
- }
-}
-
-
-void MacroAssembler::SmiXor(Register dst, Register src1, Register src2) {
- if (!dst.is(src1)) {
- movq(dst, src1);
- }
- xor_(dst, src2);
-}
-
-
-void MacroAssembler::SmiXorConstant(Register dst, Register src, Smi* constant) {
- if (dst.is(src)) {
- ASSERT(!dst.is(kScratchRegister));
- Register constant_reg = GetSmiConstant(constant);
- xor_(dst, constant_reg);
- } else {
- LoadSmiConstant(dst, constant);
- xor_(dst, src);
- }
-}
-
-
-void MacroAssembler::SmiShiftArithmeticRightConstant(Register dst,
- Register src,
- int shift_value) {
- ASSERT(is_uint5(shift_value));
- if (shift_value > 0) {
- if (dst.is(src)) {
- sar(dst, Immediate(shift_value + kSmiShift));
- shl(dst, Immediate(kSmiShift));
- } else {
- UNIMPLEMENTED(); // Not used.
- }
- }
-}
-
-
-void MacroAssembler::SmiShiftLeftConstant(Register dst,
- Register src,
- int shift_value) {
- if (!dst.is(src)) {
- movq(dst, src);
- }
- if (shift_value > 0) {
- shl(dst, Immediate(shift_value));
- }
-}
-
-
-void MacroAssembler::SmiShiftLeft(Register dst,
- Register src1,
- Register src2) {
- ASSERT(!dst.is(rcx));
- NearLabel result_ok;
- // Untag shift amount.
- if (!dst.is(src1)) {
- movq(dst, src1);
- }
- SmiToInteger32(rcx, src2);
- // Shift amount specified by lower 5 bits, not six as the shl opcode.
- and_(rcx, Immediate(0x1f));
- shl_cl(dst);
-}
-
-
-void MacroAssembler::SmiShiftArithmeticRight(Register dst,
- Register src1,
- Register src2) {
- ASSERT(!dst.is(kScratchRegister));
- ASSERT(!src1.is(kScratchRegister));
- ASSERT(!src2.is(kScratchRegister));
- ASSERT(!dst.is(rcx));
- if (src1.is(rcx)) {
- movq(kScratchRegister, src1);
- } else if (src2.is(rcx)) {
- movq(kScratchRegister, src2);
- }
- if (!dst.is(src1)) {
- movq(dst, src1);
- }
- SmiToInteger32(rcx, src2);
- orl(rcx, Immediate(kSmiShift));
- sar_cl(dst); // Shift 32 + original rcx & 0x1f.
- shl(dst, Immediate(kSmiShift));
- if (src1.is(rcx)) {
- movq(src1, kScratchRegister);
- } else if (src2.is(rcx)) {
- movq(src2, kScratchRegister);
- }
-}
-
-
-SmiIndex MacroAssembler::SmiToIndex(Register dst,
- Register src,
- int shift) {
- ASSERT(is_uint6(shift));
- // There is a possible optimization if shift is in the range 60-63, but that
- // will (and must) never happen.
- if (!dst.is(src)) {
- movq(dst, src);
- }
- if (shift < kSmiShift) {
- sar(dst, Immediate(kSmiShift - shift));
- } else {
- shl(dst, Immediate(shift - kSmiShift));
- }
- return SmiIndex(dst, times_1);
-}
-
-SmiIndex MacroAssembler::SmiToNegativeIndex(Register dst,
- Register src,
- int shift) {
- // Register src holds a positive smi.
- ASSERT(is_uint6(shift));
- if (!dst.is(src)) {
- movq(dst, src);
- }
- neg(dst);
- if (shift < kSmiShift) {
- sar(dst, Immediate(kSmiShift - shift));
- } else {
- shl(dst, Immediate(shift - kSmiShift));
- }
- return SmiIndex(dst, times_1);
-}
-
-
-void MacroAssembler::AddSmiField(Register dst, const Operand& src) {
- ASSERT_EQ(0, kSmiShift % kBitsPerByte);
- addl(dst, Operand(src, kSmiShift / kBitsPerByte));
-}
-
-
-
-void MacroAssembler::Move(Register dst, Register src) {
- if (!dst.is(src)) {
- movq(dst, src);
- }
-}
-
-
-void MacroAssembler::Move(Register dst, Handle<Object> source) {
- ASSERT(!source->IsFailure());
- if (source->IsSmi()) {
- Move(dst, Smi::cast(*source));
- } else {
- movq(dst, source, RelocInfo::EMBEDDED_OBJECT);
- }
-}
-
-
-void MacroAssembler::Move(const Operand& dst, Handle<Object> source) {
- ASSERT(!source->IsFailure());
- if (source->IsSmi()) {
- Move(dst, Smi::cast(*source));
- } else {
- movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
- movq(dst, kScratchRegister);
- }
-}
-
-
-void MacroAssembler::Cmp(Register dst, Handle<Object> source) {
- if (source->IsSmi()) {
- Cmp(dst, Smi::cast(*source));
- } else {
- Move(kScratchRegister, source);
- cmpq(dst, kScratchRegister);
- }
-}
-
-
-void MacroAssembler::Cmp(const Operand& dst, Handle<Object> source) {
- if (source->IsSmi()) {
- Cmp(dst, Smi::cast(*source));
- } else {
- ASSERT(source->IsHeapObject());
- movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
- cmpq(dst, kScratchRegister);
- }
-}
-
-
-void MacroAssembler::Push(Handle<Object> source) {
- if (source->IsSmi()) {
- Push(Smi::cast(*source));
- } else {
- ASSERT(source->IsHeapObject());
- movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
- push(kScratchRegister);
- }
-}
-
-
-void MacroAssembler::Push(Smi* source) {
- intptr_t smi = reinterpret_cast<intptr_t>(source);
- if (is_int32(smi)) {
- push(Immediate(static_cast<int32_t>(smi)));
- } else {
- Register constant = GetSmiConstant(source);
- push(constant);
- }
-}
-
-
-void MacroAssembler::Drop(int stack_elements) {
- if (stack_elements > 0) {
- addq(rsp, Immediate(stack_elements * kPointerSize));
- }
-}
-
-
-void MacroAssembler::Test(const Operand& src, Smi* source) {
- testl(Operand(src, kIntSize), Immediate(source->value()));
-}
-
-
-void MacroAssembler::Jump(ExternalReference ext) {
- LoadAddress(kScratchRegister, ext);
- jmp(kScratchRegister);
-}
-
-
-void MacroAssembler::Jump(Address destination, RelocInfo::Mode rmode) {
- movq(kScratchRegister, destination, rmode);
- jmp(kScratchRegister);
-}
-
-
-void MacroAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode) {
- // TODO(X64): Inline this
- jmp(code_object, rmode);
-}
-
-
-int MacroAssembler::CallSize(ExternalReference ext) {
- // Opcode for call kScratchRegister is: Rex.B FF D4 (three bytes).
- const int kCallInstructionSize = 3;
- return LoadAddressSize(ext) + kCallInstructionSize;
-}
-
-
-void MacroAssembler::Call(ExternalReference ext) {
-#ifdef DEBUG
- int end_position = pc_offset() + CallSize(ext);
-#endif
- LoadAddress(kScratchRegister, ext);
- call(kScratchRegister);
-#ifdef DEBUG
- CHECK_EQ(end_position, pc_offset());
-#endif
-}
-
-
-void MacroAssembler::Call(Address destination, RelocInfo::Mode rmode) {
-#ifdef DEBUG
- int end_position = pc_offset() + CallSize(destination, rmode);
-#endif
- movq(kScratchRegister, destination, rmode);
- call(kScratchRegister);
-#ifdef DEBUG
- CHECK_EQ(pc_offset(), end_position);
-#endif
-}
-
-
-void MacroAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
-#ifdef DEBUG
- int end_position = pc_offset() + CallSize(code_object);
-#endif
- ASSERT(RelocInfo::IsCodeTarget(rmode));
- call(code_object, rmode);
-#ifdef DEBUG
- CHECK_EQ(end_position, pc_offset());
-#endif
-}
-
-
-void MacroAssembler::Pushad() {
- push(rax);
- push(rcx);
- push(rdx);
- push(rbx);
- // Not pushing rsp or rbp.
- push(rsi);
- push(rdi);
- push(r8);
- push(r9);
- // r10 is kScratchRegister.
- push(r11);
- // r12 is kSmiConstantRegister.
- // r13 is kRootRegister.
- push(r14);
- push(r15);
- STATIC_ASSERT(11 == kNumSafepointSavedRegisters);
- // Use lea for symmetry with Popad.
- int sp_delta =
- (kNumSafepointRegisters - kNumSafepointSavedRegisters) * kPointerSize;
- lea(rsp, Operand(rsp, -sp_delta));
-}
-
-
-void MacroAssembler::Popad() {
- // Popad must not change the flags, so use lea instead of addq.
- int sp_delta =
- (kNumSafepointRegisters - kNumSafepointSavedRegisters) * kPointerSize;
- lea(rsp, Operand(rsp, sp_delta));
- pop(r15);
- pop(r14);
- pop(r11);
- pop(r9);
- pop(r8);
- pop(rdi);
- pop(rsi);
- pop(rbx);
- pop(rdx);
- pop(rcx);
- pop(rax);
-}
-
-
-void MacroAssembler::Dropad() {
- addq(rsp, Immediate(kNumSafepointRegisters * kPointerSize));
-}
-
-
-// Order general registers are pushed by Pushad:
-// rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r14, r15.
-int MacroAssembler::kSafepointPushRegisterIndices[Register::kNumRegisters] = {
- 0,
- 1,
- 2,
- 3,
- -1,
- -1,
- 4,
- 5,
- 6,
- 7,
- -1,
- 8,
- -1,
- -1,
- 9,
- 10
-};
-
-
-void MacroAssembler::StoreToSafepointRegisterSlot(Register dst, Register src) {
- movq(SafepointRegisterSlot(dst), src);
-}
-
-
-void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
- movq(dst, SafepointRegisterSlot(src));
-}
-
-
-Operand MacroAssembler::SafepointRegisterSlot(Register reg) {
- return Operand(rsp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
-}
-
-
-void MacroAssembler::PushTryHandler(CodeLocation try_location,
- HandlerType type) {
- // Adjust this code if not the case.
- ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
-
- // The pc (return address) is already on TOS. This code pushes state,
- // frame pointer and current handler. Check that they are expected
- // next on the stack, in that order.
- ASSERT_EQ(StackHandlerConstants::kStateOffset,
- StackHandlerConstants::kPCOffset - kPointerSize);
- ASSERT_EQ(StackHandlerConstants::kFPOffset,
- StackHandlerConstants::kStateOffset - kPointerSize);
- ASSERT_EQ(StackHandlerConstants::kNextOffset,
- StackHandlerConstants::kFPOffset - kPointerSize);
-
- if (try_location == IN_JAVASCRIPT) {
- if (type == TRY_CATCH_HANDLER) {
- push(Immediate(StackHandler::TRY_CATCH));
- } else {
- push(Immediate(StackHandler::TRY_FINALLY));
- }
- push(rbp);
- } else {
- ASSERT(try_location == IN_JS_ENTRY);
- // The frame pointer does not point to a JS frame so we save NULL
- // for rbp. We expect the code throwing an exception to check rbp
- // before dereferencing it to restore the context.
- push(Immediate(StackHandler::ENTRY));
- push(Immediate(0)); // NULL frame pointer.
- }
- // Save the current handler.
- Operand handler_operand =
- ExternalOperand(ExternalReference(Isolate::k_handler_address, isolate()));
- push(handler_operand);
- // Link this handler.
- movq(handler_operand, rsp);
-}
-
-
-void MacroAssembler::PopTryHandler() {
- ASSERT_EQ(0, StackHandlerConstants::kNextOffset);
- // Unlink this handler.
- Operand handler_operand =
- ExternalOperand(ExternalReference(Isolate::k_handler_address, isolate()));
- pop(handler_operand);
- // Remove the remaining fields.
- addq(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
-}
-
-
-void MacroAssembler::Throw(Register value) {
- // Check that stack should contain next handler, frame pointer, state and
- // return address in that order.
- STATIC_ASSERT(StackHandlerConstants::kFPOffset + kPointerSize ==
- StackHandlerConstants::kStateOffset);
- STATIC_ASSERT(StackHandlerConstants::kStateOffset + kPointerSize ==
- StackHandlerConstants::kPCOffset);
- // Keep thrown value in rax.
- if (!value.is(rax)) {
- movq(rax, value);
- }
-
- ExternalReference handler_address(Isolate::k_handler_address, isolate());
- Operand handler_operand = ExternalOperand(handler_address);
- movq(rsp, handler_operand);
- // get next in chain
- pop(handler_operand);
- pop(rbp); // pop frame pointer
- pop(rdx); // remove state
-
- // Before returning we restore the context from the frame pointer if not NULL.
- // The frame pointer is NULL in the exception handler of a JS entry frame.
- Set(rsi, 0); // Tentatively set context pointer to NULL
- NearLabel skip;
- cmpq(rbp, Immediate(0));
- j(equal, &skip);
- movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- bind(&skip);
- ret(0);
-}
-
-
-void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type,
- Register value) {
- // Keep thrown value in rax.
- if (!value.is(rax)) {
- movq(rax, value);
- }
- // Fetch top stack handler.
- ExternalReference handler_address(Isolate::k_handler_address, isolate());
- Load(rsp, handler_address);
-
- // Unwind the handlers until the ENTRY handler is found.
- NearLabel loop, done;
- bind(&loop);
- // Load the type of the current stack handler.
- const int kStateOffset = StackHandlerConstants::kStateOffset;
- cmpq(Operand(rsp, kStateOffset), Immediate(StackHandler::ENTRY));
- j(equal, &done);
- // Fetch the next handler in the list.
- const int kNextOffset = StackHandlerConstants::kNextOffset;
- movq(rsp, Operand(rsp, kNextOffset));
- jmp(&loop);
- bind(&done);
-
- // Set the top handler address to next handler past the current ENTRY handler.
- Operand handler_operand = ExternalOperand(handler_address);
- pop(handler_operand);
-
- if (type == OUT_OF_MEMORY) {
- // Set external caught exception to false.
- ExternalReference external_caught(
- Isolate::k_external_caught_exception_address, isolate());
- movq(rax, Immediate(false));
- Store(external_caught, rax);
-
- // Set pending exception and rax to out of memory exception.
- ExternalReference pending_exception(Isolate::k_pending_exception_address,
- isolate());
- movq(rax, Failure::OutOfMemoryException(), RelocInfo::NONE);
- Store(pending_exception, rax);
- }
-
- // Clear the context pointer.
- Set(rsi, 0);
-
- // Restore registers from handler.
- STATIC_ASSERT(StackHandlerConstants::kNextOffset + kPointerSize ==
- StackHandlerConstants::kFPOffset);
- pop(rbp); // FP
- STATIC_ASSERT(StackHandlerConstants::kFPOffset + kPointerSize ==
- StackHandlerConstants::kStateOffset);
- pop(rdx); // State
-
- STATIC_ASSERT(StackHandlerConstants::kStateOffset + kPointerSize ==
- StackHandlerConstants::kPCOffset);
- ret(0);
-}
-
-
-void MacroAssembler::Ret() {
- ret(0);
-}
-
-
-void MacroAssembler::Ret(int bytes_dropped, Register scratch) {
- if (is_uint16(bytes_dropped)) {
- ret(bytes_dropped);
- } else {
- pop(scratch);
- addq(rsp, Immediate(bytes_dropped));
- push(scratch);
- ret(0);
- }
-}
-
-
-void MacroAssembler::FCmp() {
- fucomip();
- fstp(0);
-}
-
-
-void MacroAssembler::CmpObjectType(Register heap_object,
- InstanceType type,
- Register map) {
- movq(map, FieldOperand(heap_object, HeapObject::kMapOffset));
- CmpInstanceType(map, type);
-}
-
-
-void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
- cmpb(FieldOperand(map, Map::kInstanceTypeOffset),
- Immediate(static_cast<int8_t>(type)));
-}
-
-
-void MacroAssembler::CheckMap(Register obj,
- Handle<Map> map,
- Label* fail,
- bool is_heap_object) {
- if (!is_heap_object) {
- JumpIfSmi(obj, fail);
- }
- Cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
- j(not_equal, fail);
-}
-
-
-void MacroAssembler::AbortIfNotNumber(Register object) {
- NearLabel ok;
- Condition is_smi = CheckSmi(object);
- j(is_smi, &ok);
- Cmp(FieldOperand(object, HeapObject::kMapOffset),
- FACTORY->heap_number_map());
- Assert(equal, "Operand not a number");
- bind(&ok);
-}
-
-
-void MacroAssembler::AbortIfSmi(Register object) {
- NearLabel ok;
- Condition is_smi = CheckSmi(object);
- Assert(NegateCondition(is_smi), "Operand is a smi");
-}
-
-
-void MacroAssembler::AbortIfNotSmi(Register object) {
- Condition is_smi = CheckSmi(object);
- Assert(is_smi, "Operand is not a smi");
-}
-
-
-void MacroAssembler::AbortIfNotSmi(const Operand& object) {
- Condition is_smi = CheckSmi(object);
- Assert(is_smi, "Operand is not a smi");
-}
-
-
-void MacroAssembler::AbortIfNotString(Register object) {
- testb(object, Immediate(kSmiTagMask));
- Assert(not_equal, "Operand is not a string");
- push(object);
- movq(object, FieldOperand(object, HeapObject::kMapOffset));
- CmpInstanceType(object, FIRST_NONSTRING_TYPE);
- pop(object);
- Assert(below, "Operand is not a string");
-}
-
-
-void MacroAssembler::AbortIfNotRootValue(Register src,
- Heap::RootListIndex root_value_index,
- const char* message) {
- ASSERT(!src.is(kScratchRegister));
- LoadRoot(kScratchRegister, root_value_index);
- cmpq(src, kScratchRegister);
- Check(equal, message);
-}
-
-
-
-Condition MacroAssembler::IsObjectStringType(Register heap_object,
- Register map,
- Register instance_type) {
- movq(map, FieldOperand(heap_object, HeapObject::kMapOffset));
- movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
- ASSERT(kNotStringTag != 0);
- testb(instance_type, Immediate(kIsNotStringMask));
- return zero;
-}
-
-
-void MacroAssembler::TryGetFunctionPrototype(Register function,
- Register result,
- Label* miss) {
- // Check that the receiver isn't a smi.
- testl(function, Immediate(kSmiTagMask));
- j(zero, miss);
-
- // Check that the function really is a function.
- CmpObjectType(function, JS_FUNCTION_TYPE, result);
- j(not_equal, miss);
-
- // Make sure that the function has an instance prototype.
- NearLabel non_instance;
- testb(FieldOperand(result, Map::kBitFieldOffset),
- Immediate(1 << Map::kHasNonInstancePrototype));
- j(not_zero, &non_instance);
-
- // Get the prototype or initial map from the function.
- movq(result,
- FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
-
- // If the prototype or initial map is the hole, don't return it and
- // simply miss the cache instead. This will allow us to allocate a
- // prototype object on-demand in the runtime system.
- CompareRoot(result, Heap::kTheHoleValueRootIndex);
- j(equal, miss);
-
- // If the function does not have an initial map, we're done.
- NearLabel done;
- CmpObjectType(result, MAP_TYPE, kScratchRegister);
- j(not_equal, &done);
-
- // Get the prototype from the initial map.
- movq(result, FieldOperand(result, Map::kPrototypeOffset));
- jmp(&done);
-
- // Non-instance prototype: Fetch prototype from constructor field
- // in initial map.
- bind(&non_instance);
- movq(result, FieldOperand(result, Map::kConstructorOffset));
-
- // All done.
- bind(&done);
-}
-
-
-void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
- if (FLAG_native_code_counters && counter->Enabled()) {
- Operand counter_operand = ExternalOperand(ExternalReference(counter));
- movq(counter_operand, Immediate(value));
- }
-}
-
-
-void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) {
- ASSERT(value > 0);
- if (FLAG_native_code_counters && counter->Enabled()) {
- Operand counter_operand = ExternalOperand(ExternalReference(counter));
- if (value == 1) {
- incl(counter_operand);
- } else {
- addl(counter_operand, Immediate(value));
- }
- }
-}
-
-
-void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
- ASSERT(value > 0);
- if (FLAG_native_code_counters && counter->Enabled()) {
- Operand counter_operand = ExternalOperand(ExternalReference(counter));
- if (value == 1) {
- decl(counter_operand);
- } else {
- subl(counter_operand, Immediate(value));
- }
- }
-}
-
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-void MacroAssembler::DebugBreak() {
- ASSERT(allow_stub_calls());
- Set(rax, 0); // No arguments.
- LoadAddress(rbx, ExternalReference(Runtime::kDebugBreak, isolate()));
- CEntryStub ces(1);
- Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
-}
-#endif // ENABLE_DEBUGGER_SUPPORT
-
-
-void MacroAssembler::InvokeCode(Register code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag,
- CallWrapper* call_wrapper) {
- NearLabel done;
- InvokePrologue(expected,
- actual,
- Handle<Code>::null(),
- code,
- &done,
- flag,
- call_wrapper);
- if (flag == CALL_FUNCTION) {
- if (call_wrapper != NULL) call_wrapper->BeforeCall(CallSize(code));
- call(code);
- if (call_wrapper != NULL) call_wrapper->AfterCall();
- } else {
- ASSERT(flag == JUMP_FUNCTION);
- jmp(code);
- }
- bind(&done);
-}
-
-
-void MacroAssembler::InvokeCode(Handle<Code> code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- RelocInfo::Mode rmode,
- InvokeFlag flag,
- CallWrapper* call_wrapper) {
- NearLabel done;
- Register dummy = rax;
- InvokePrologue(expected,
- actual,
- code,
- dummy,
- &done,
- flag,
- call_wrapper);
- if (flag == CALL_FUNCTION) {
- if (call_wrapper != NULL) call_wrapper->BeforeCall(CallSize(code));
- Call(code, rmode);
- if (call_wrapper != NULL) call_wrapper->AfterCall();
- } else {
- ASSERT(flag == JUMP_FUNCTION);
- Jump(code, rmode);
- }
- bind(&done);
-}
-
-
-void MacroAssembler::InvokeFunction(Register function,
- const ParameterCount& actual,
- InvokeFlag flag,
- CallWrapper* call_wrapper) {
- ASSERT(function.is(rdi));
- movq(rdx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
- movq(rsi, FieldOperand(function, JSFunction::kContextOffset));
- movsxlq(rbx,
- FieldOperand(rdx, SharedFunctionInfo::kFormalParameterCountOffset));
- // Advances rdx to the end of the Code object header, to the start of
- // the executable code.
- movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
-
- ParameterCount expected(rbx);
- InvokeCode(rdx, expected, actual, flag, call_wrapper);
-}
-
-
-void MacroAssembler::InvokeFunction(JSFunction* function,
- const ParameterCount& actual,
- InvokeFlag flag,
- CallWrapper* call_wrapper) {
- ASSERT(function->is_compiled());
- // Get the function and setup the context.
- Move(rdi, Handle<JSFunction>(function));
- movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
-
- if (V8::UseCrankshaft()) {
- // Since Crankshaft can recompile a function, we need to load
- // the Code object every time we call the function.
- movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
- ParameterCount expected(function->shared()->formal_parameter_count());
- InvokeCode(rdx, expected, actual, flag, call_wrapper);
- } else {
- // Invoke the cached code.
- Handle<Code> code(function->code());
- ParameterCount expected(function->shared()->formal_parameter_count());
- InvokeCode(code,
- expected,
- actual,
- RelocInfo::CODE_TARGET,
- flag,
- call_wrapper);
- }
-}
-
-
-void MacroAssembler::EnterFrame(StackFrame::Type type) {
- push(rbp);
- movq(rbp, rsp);
- push(rsi); // Context.
- Push(Smi::FromInt(type));
- movq(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
- push(kScratchRegister);
- if (emit_debug_code()) {
- movq(kScratchRegister,
- FACTORY->undefined_value(),
- RelocInfo::EMBEDDED_OBJECT);
- cmpq(Operand(rsp, 0), kScratchRegister);
- Check(not_equal, "code object not properly patched");
- }
-}
-
-
-void MacroAssembler::LeaveFrame(StackFrame::Type type) {
- if (emit_debug_code()) {
- Move(kScratchRegister, Smi::FromInt(type));
- cmpq(Operand(rbp, StandardFrameConstants::kMarkerOffset), kScratchRegister);
- Check(equal, "stack frame types must match");
- }
- movq(rsp, rbp);
- pop(rbp);
-}
-
-
-void MacroAssembler::EnterExitFramePrologue(bool save_rax) {
- // Setup the frame structure on the stack.
- // All constants are relative to the frame pointer of the exit frame.
- ASSERT(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize);
- ASSERT(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize);
- ASSERT(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize);
- push(rbp);
- movq(rbp, rsp);
-
- // Reserve room for entry stack pointer and push the code object.
- ASSERT(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
- push(Immediate(0)); // Saved entry sp, patched before call.
- movq(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
- push(kScratchRegister); // Accessed from EditFrame::code_slot.
-
- // Save the frame pointer and the context in top.
- if (save_rax) {
- movq(r14, rax); // Backup rax in callee-save register.
- }
-
- Store(ExternalReference(Isolate::k_c_entry_fp_address, isolate()), rbp);
- Store(ExternalReference(Isolate::k_context_address, isolate()), rsi);
-}
-
-
-void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
- bool save_doubles) {
-#ifdef _WIN64
- const int kShadowSpace = 4;
- arg_stack_space += kShadowSpace;
-#endif
- // Optionally save all XMM registers.
- if (save_doubles) {
- CpuFeatures::Scope scope(SSE2);
- int space = XMMRegister::kNumRegisters * kDoubleSize +
- arg_stack_space * kPointerSize;
- subq(rsp, Immediate(space));
- int offset = -2 * kPointerSize;
- for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; i++) {
- XMMRegister reg = XMMRegister::FromAllocationIndex(i);
- movsd(Operand(rbp, offset - ((i + 1) * kDoubleSize)), reg);
- }
- } else if (arg_stack_space > 0) {
- subq(rsp, Immediate(arg_stack_space * kPointerSize));
- }
-
- // Get the required frame alignment for the OS.
- const int kFrameAlignment = OS::ActivationFrameAlignment();
- if (kFrameAlignment > 0) {
- ASSERT(IsPowerOf2(kFrameAlignment));
- movq(kScratchRegister, Immediate(-kFrameAlignment));
- and_(rsp, kScratchRegister);
- }
-
- // Patch the saved entry sp.
- movq(Operand(rbp, ExitFrameConstants::kSPOffset), rsp);
-}
-
-
-void MacroAssembler::EnterExitFrame(int arg_stack_space, bool save_doubles) {
- EnterExitFramePrologue(true);
-
- // Setup argv in callee-saved register r15. It is reused in LeaveExitFrame,
- // so it must be retained across the C-call.
- int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
- lea(r15, Operand(rbp, r14, times_pointer_size, offset));
-
- EnterExitFrameEpilogue(arg_stack_space, save_doubles);
-}
-
-
-void MacroAssembler::EnterApiExitFrame(int arg_stack_space) {
- EnterExitFramePrologue(false);
- EnterExitFrameEpilogue(arg_stack_space, false);
-}
-
-
-void MacroAssembler::LeaveExitFrame(bool save_doubles) {
- // Registers:
- // r15 : argv
- if (save_doubles) {
- int offset = -2 * kPointerSize;
- for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; i++) {
- XMMRegister reg = XMMRegister::FromAllocationIndex(i);
- movsd(reg, Operand(rbp, offset - ((i + 1) * kDoubleSize)));
- }
- }
- // Get the return address from the stack and restore the frame pointer.
- movq(rcx, Operand(rbp, 1 * kPointerSize));
- movq(rbp, Operand(rbp, 0 * kPointerSize));
-
- // Drop everything up to and including the arguments and the receiver
- // from the caller stack.
- lea(rsp, Operand(r15, 1 * kPointerSize));
-
- // Push the return address to get ready to return.
- push(rcx);
-
- LeaveExitFrameEpilogue();
-}
-
-
-void MacroAssembler::LeaveApiExitFrame() {
- movq(rsp, rbp);
- pop(rbp);
-
- LeaveExitFrameEpilogue();
-}
-
-
-void MacroAssembler::LeaveExitFrameEpilogue() {
- // Restore current context from top and clear it in debug mode.
- ExternalReference context_address(Isolate::k_context_address, isolate());
- Operand context_operand = ExternalOperand(context_address);
- movq(rsi, context_operand);
-#ifdef DEBUG
- movq(context_operand, Immediate(0));
-#endif
-
- // Clear the top frame.
- ExternalReference c_entry_fp_address(Isolate::k_c_entry_fp_address,
- isolate());
- Operand c_entry_fp_operand = ExternalOperand(c_entry_fp_address);
- movq(c_entry_fp_operand, Immediate(0));
-}
-
-
-void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
- Register scratch,
- Label* miss) {
- Label same_contexts;
-
- ASSERT(!holder_reg.is(scratch));
- ASSERT(!scratch.is(kScratchRegister));
- // Load current lexical context from the stack frame.
- movq(scratch, Operand(rbp, StandardFrameConstants::kContextOffset));
-
- // When generating debug code, make sure the lexical context is set.
- if (emit_debug_code()) {
- cmpq(scratch, Immediate(0));
- Check(not_equal, "we should not have an empty lexical context");
- }
- // Load the global context of the current context.
- int offset = Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
- movq(scratch, FieldOperand(scratch, offset));
- movq(scratch, FieldOperand(scratch, GlobalObject::kGlobalContextOffset));
-
- // Check the context is a global context.
- if (emit_debug_code()) {
- Cmp(FieldOperand(scratch, HeapObject::kMapOffset),
- FACTORY->global_context_map());
- Check(equal, "JSGlobalObject::global_context should be a global context.");
- }
-
- // Check if both contexts are the same.
- cmpq(scratch, FieldOperand(holder_reg, JSGlobalProxy::kContextOffset));
- j(equal, &same_contexts);
-
- // Compare security tokens.
- // Check that the security token in the calling global object is
- // compatible with the security token in the receiving global
- // object.
-
- // Check the context is a global context.
- if (emit_debug_code()) {
- // Preserve original value of holder_reg.
- push(holder_reg);
- movq(holder_reg, FieldOperand(holder_reg, JSGlobalProxy::kContextOffset));
- CompareRoot(holder_reg, Heap::kNullValueRootIndex);
- Check(not_equal, "JSGlobalProxy::context() should not be null.");
-
- // Read the first word and compare to global_context_map(),
- movq(holder_reg, FieldOperand(holder_reg, HeapObject::kMapOffset));
- CompareRoot(holder_reg, Heap::kGlobalContextMapRootIndex);
- Check(equal, "JSGlobalObject::global_context should be a global context.");
- pop(holder_reg);
- }
-
- movq(kScratchRegister,
- FieldOperand(holder_reg, JSGlobalProxy::kContextOffset));
- int token_offset =
- Context::kHeaderSize + Context::SECURITY_TOKEN_INDEX * kPointerSize;
- movq(scratch, FieldOperand(scratch, token_offset));
- cmpq(scratch, FieldOperand(kScratchRegister, token_offset));
- j(not_equal, miss);
-
- bind(&same_contexts);
-}
-
-
-void MacroAssembler::LoadAllocationTopHelper(Register result,
- Register scratch,
- AllocationFlags flags) {
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate());
-
- // Just return if allocation top is already known.
- if ((flags & RESULT_CONTAINS_TOP) != 0) {
- // No use of scratch if allocation top is provided.
- ASSERT(!scratch.is_valid());
-#ifdef DEBUG
- // Assert that result actually contains top on entry.
- Operand top_operand = ExternalOperand(new_space_allocation_top);
- cmpq(result, top_operand);
- Check(equal, "Unexpected allocation top");
-#endif
- return;
- }
-
- // Move address of new object to result. Use scratch register if available,
- // and keep address in scratch until call to UpdateAllocationTopHelper.
- if (scratch.is_valid()) {
- LoadAddress(scratch, new_space_allocation_top);
- movq(result, Operand(scratch, 0));
- } else {
- Load(result, new_space_allocation_top);
- }
-}
-
-
-void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
- Register scratch) {
- if (emit_debug_code()) {
- testq(result_end, Immediate(kObjectAlignmentMask));
- Check(zero, "Unaligned allocation in new space");
- }
-
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate());
-
- // Update new top.
- if (scratch.is_valid()) {
- // Scratch already contains address of allocation top.
- movq(Operand(scratch, 0), result_end);
- } else {
- Store(new_space_allocation_top, result_end);
- }
-}
-
-
-void MacroAssembler::AllocateInNewSpace(int object_size,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags) {
- if (!FLAG_inline_new) {
- if (emit_debug_code()) {
- // Trash the registers to simulate an allocation failure.
- movl(result, Immediate(0x7091));
- if (result_end.is_valid()) {
- movl(result_end, Immediate(0x7191));
- }
- if (scratch.is_valid()) {
- movl(scratch, Immediate(0x7291));
- }
- }
- jmp(gc_required);
- return;
- }
- ASSERT(!result.is(result_end));
-
- // Load address of new object into result.
- LoadAllocationTopHelper(result, scratch, flags);
-
- // Calculate new top and bail out if new space is exhausted.
- ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address(isolate());
-
- Register top_reg = result_end.is_valid() ? result_end : result;
-
- if (!top_reg.is(result)) {
- movq(top_reg, result);
- }
- addq(top_reg, Immediate(object_size));
- j(carry, gc_required);
- Operand limit_operand = ExternalOperand(new_space_allocation_limit);
- cmpq(top_reg, limit_operand);
- j(above, gc_required);
-
- // Update allocation top.
- UpdateAllocationTopHelper(top_reg, scratch);
-
- if (top_reg.is(result)) {
- if ((flags & TAG_OBJECT) != 0) {
- subq(result, Immediate(object_size - kHeapObjectTag));
- } else {
- subq(result, Immediate(object_size));
- }
- } else if ((flags & TAG_OBJECT) != 0) {
- // Tag the result if requested.
- addq(result, Immediate(kHeapObjectTag));
- }
-}
-
-
-void MacroAssembler::AllocateInNewSpace(int header_size,
- ScaleFactor element_size,
- Register element_count,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags) {
- if (!FLAG_inline_new) {
- if (emit_debug_code()) {
- // Trash the registers to simulate an allocation failure.
- movl(result, Immediate(0x7091));
- movl(result_end, Immediate(0x7191));
- if (scratch.is_valid()) {
- movl(scratch, Immediate(0x7291));
- }
- // Register element_count is not modified by the function.
- }
- jmp(gc_required);
- return;
- }
- ASSERT(!result.is(result_end));
-
- // Load address of new object into result.
- LoadAllocationTopHelper(result, scratch, flags);
-
- // Calculate new top and bail out if new space is exhausted.
- ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address(isolate());
-
- // We assume that element_count*element_size + header_size does not
- // overflow.
- lea(result_end, Operand(element_count, element_size, header_size));
- addq(result_end, result);
- j(carry, gc_required);
- Operand limit_operand = ExternalOperand(new_space_allocation_limit);
- cmpq(result_end, limit_operand);
- j(above, gc_required);
-
- // Update allocation top.
- UpdateAllocationTopHelper(result_end, scratch);
-
- // Tag the result if requested.
- if ((flags & TAG_OBJECT) != 0) {
- addq(result, Immediate(kHeapObjectTag));
- }
-}
-
-
-void MacroAssembler::AllocateInNewSpace(Register object_size,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags) {
- if (!FLAG_inline_new) {
- if (emit_debug_code()) {
- // Trash the registers to simulate an allocation failure.
- movl(result, Immediate(0x7091));
- movl(result_end, Immediate(0x7191));
- if (scratch.is_valid()) {
- movl(scratch, Immediate(0x7291));
- }
- // object_size is left unchanged by this function.
- }
- jmp(gc_required);
- return;
- }
- ASSERT(!result.is(result_end));
-
- // Load address of new object into result.
- LoadAllocationTopHelper(result, scratch, flags);
-
- // Calculate new top and bail out if new space is exhausted.
- ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address(isolate());
- if (!object_size.is(result_end)) {
- movq(result_end, object_size);
- }
- addq(result_end, result);
- j(carry, gc_required);
- Operand limit_operand = ExternalOperand(new_space_allocation_limit);
- cmpq(result_end, limit_operand);
- j(above, gc_required);
-
- // Update allocation top.
- UpdateAllocationTopHelper(result_end, scratch);
-
- // Tag the result if requested.
- if ((flags & TAG_OBJECT) != 0) {
- addq(result, Immediate(kHeapObjectTag));
- }
-}
-
-
-void MacroAssembler::UndoAllocationInNewSpace(Register object) {
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate());
-
- // Make sure the object has no tag before resetting top.
- and_(object, Immediate(~kHeapObjectTagMask));
- Operand top_operand = ExternalOperand(new_space_allocation_top);
-#ifdef DEBUG
- cmpq(object, top_operand);
- Check(below, "Undo allocation of non allocated memory");
-#endif
- movq(top_operand, object);
-}
-
-
-void MacroAssembler::AllocateHeapNumber(Register result,
- Register scratch,
- Label* gc_required) {
- // Allocate heap number in new space.
- AllocateInNewSpace(HeapNumber::kSize,
- result,
- scratch,
- no_reg,
- gc_required,
- TAG_OBJECT);
-
- // Set the map.
- LoadRoot(kScratchRegister, Heap::kHeapNumberMapRootIndex);
- movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
-}
-
-
-void MacroAssembler::AllocateTwoByteString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required) {
- // Calculate the number of bytes needed for the characters in the string while
- // observing object alignment.
- const int kHeaderAlignment = SeqTwoByteString::kHeaderSize &
- kObjectAlignmentMask;
- ASSERT(kShortSize == 2);
- // scratch1 = length * 2 + kObjectAlignmentMask.
- lea(scratch1, Operand(length, length, times_1, kObjectAlignmentMask +
- kHeaderAlignment));
- and_(scratch1, Immediate(~kObjectAlignmentMask));
- if (kHeaderAlignment > 0) {
- subq(scratch1, Immediate(kHeaderAlignment));
- }
-
- // Allocate two byte string in new space.
- AllocateInNewSpace(SeqTwoByteString::kHeaderSize,
- times_1,
- scratch1,
- result,
- scratch2,
- scratch3,
- gc_required,
- TAG_OBJECT);
-
- // Set the map, length and hash field.
- LoadRoot(kScratchRegister, Heap::kStringMapRootIndex);
- movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
- Integer32ToSmi(scratch1, length);
- movq(FieldOperand(result, String::kLengthOffset), scratch1);
- movq(FieldOperand(result, String::kHashFieldOffset),
- Immediate(String::kEmptyHashField));
-}
-
-
-void MacroAssembler::AllocateAsciiString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required) {
- // Calculate the number of bytes needed for the characters in the string while
- // observing object alignment.
- const int kHeaderAlignment = SeqAsciiString::kHeaderSize &
- kObjectAlignmentMask;
- movl(scratch1, length);
- ASSERT(kCharSize == 1);
- addq(scratch1, Immediate(kObjectAlignmentMask + kHeaderAlignment));
- and_(scratch1, Immediate(~kObjectAlignmentMask));
- if (kHeaderAlignment > 0) {
- subq(scratch1, Immediate(kHeaderAlignment));
- }
-
- // Allocate ascii string in new space.
- AllocateInNewSpace(SeqAsciiString::kHeaderSize,
- times_1,
- scratch1,
- result,
- scratch2,
- scratch3,
- gc_required,
- TAG_OBJECT);
-
- // Set the map, length and hash field.
- LoadRoot(kScratchRegister, Heap::kAsciiStringMapRootIndex);
- movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
- Integer32ToSmi(scratch1, length);
- movq(FieldOperand(result, String::kLengthOffset), scratch1);
- movq(FieldOperand(result, String::kHashFieldOffset),
- Immediate(String::kEmptyHashField));
-}
-
-
-void MacroAssembler::AllocateConsString(Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- // Allocate heap number in new space.
- AllocateInNewSpace(ConsString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
-
- // Set the map. The other fields are left uninitialized.
- LoadRoot(kScratchRegister, Heap::kConsStringMapRootIndex);
- movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
-}
-
-
-void MacroAssembler::AllocateAsciiConsString(Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- // Allocate heap number in new space.
- AllocateInNewSpace(ConsString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
-
- // Set the map. The other fields are left uninitialized.
- LoadRoot(kScratchRegister, Heap::kConsAsciiStringMapRootIndex);
- movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
-}
-
-
-// Copy memory, byte-by-byte, from source to destination. Not optimized for
-// long or aligned copies. The contents of scratch and length are destroyed.
-// Destination is incremented by length, source, length and scratch are
-// clobbered.
-// A simpler loop is faster on small copies, but slower on large ones.
-// The cld() instruction must have been emitted, to set the direction flag(),
-// before calling this function.
-void MacroAssembler::CopyBytes(Register destination,
- Register source,
- Register length,
- int min_length,
- Register scratch) {
- ASSERT(min_length >= 0);
- if (FLAG_debug_code) {
- cmpl(length, Immediate(min_length));
- Assert(greater_equal, "Invalid min_length");
- }
- Label loop, done, short_string, short_loop;
-
- const int kLongStringLimit = 20;
- if (min_length <= kLongStringLimit) {
- cmpl(length, Immediate(kLongStringLimit));
- j(less_equal, &short_string);
- }
-
- ASSERT(source.is(rsi));
- ASSERT(destination.is(rdi));
- ASSERT(length.is(rcx));
-
- // Because source is 8-byte aligned in our uses of this function,
- // we keep source aligned for the rep movs operation by copying the odd bytes
- // at the end of the ranges.
- movq(scratch, length);
- shrl(length, Immediate(3));
- repmovsq();
- // Move remaining bytes of length.
- andl(scratch, Immediate(0x7));
- movq(length, Operand(source, scratch, times_1, -8));
- movq(Operand(destination, scratch, times_1, -8), length);
- addq(destination, scratch);
-
- if (min_length <= kLongStringLimit) {
- jmp(&done);
-
- bind(&short_string);
- if (min_length == 0) {
- testl(length, length);
- j(zero, &done);
- }
- lea(scratch, Operand(destination, length, times_1, 0));
-
- bind(&short_loop);
- movb(length, Operand(source, 0));
- movb(Operand(destination, 0), length);
- incq(source);
- incq(destination);
- cmpq(destination, scratch);
- j(not_equal, &short_loop);
-
- bind(&done);
- }
-}
-
-
-void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
- if (context_chain_length > 0) {
- // Move up the chain of contexts to the context containing the slot.
- movq(dst, Operand(rsi, Context::SlotOffset(Context::CLOSURE_INDEX)));
- // Load the function context (which is the incoming, outer context).
- movq(dst, FieldOperand(dst, JSFunction::kContextOffset));
- for (int i = 1; i < context_chain_length; i++) {
- movq(dst, Operand(dst, Context::SlotOffset(Context::CLOSURE_INDEX)));
- movq(dst, FieldOperand(dst, JSFunction::kContextOffset));
- }
- // The context may be an intermediate context, not a function context.
- movq(dst, Operand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX)));
- } else {
- // Slot is in the current function context. Move it into the
- // destination register in case we store into it (the write barrier
- // cannot be allowed to destroy the context in rsi).
- movq(dst, rsi);
- }
-
- // We should not have found a 'with' context by walking the context chain
- // (i.e., the static scope chain and runtime context chain do not agree).
- // A variable occurring in such a scope should have slot type LOOKUP and
- // not CONTEXT.
- if (emit_debug_code()) {
- cmpq(dst, Operand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX)));
- Check(equal, "Yo dawg, I heard you liked function contexts "
- "so I put function contexts in all your contexts");
- }
-}
-
-#ifdef _WIN64
-static const int kRegisterPassedArguments = 4;
-#else
-static const int kRegisterPassedArguments = 6;
-#endif
-
-void MacroAssembler::LoadGlobalFunction(int index, Register function) {
- // Load the global or builtins object from the current context.
- movq(function, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- // Load the global context from the global or builtins object.
- movq(function, FieldOperand(function, GlobalObject::kGlobalContextOffset));
- // Load the function from the global context.
- movq(function, Operand(function, Context::SlotOffset(index)));
-}
-
-
-void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
- Register map) {
- // Load the initial map. The global functions all have initial maps.
- movq(map, FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
- if (emit_debug_code()) {
- Label ok, fail;
- CheckMap(map, FACTORY->meta_map(), &fail, false);
- jmp(&ok);
- bind(&fail);
- Abort("Global functions must have initial map");
- bind(&ok);
- }
-}
-
-
-int MacroAssembler::ArgumentStackSlotsForCFunctionCall(int num_arguments) {
- // On Windows 64 stack slots are reserved by the caller for all arguments
- // including the ones passed in registers, and space is always allocated for
- // the four register arguments even if the function takes fewer than four
- // arguments.
- // On AMD64 ABI (Linux/Mac) the first six arguments are passed in registers
- // and the caller does not reserve stack slots for them.
- ASSERT(num_arguments >= 0);
-#ifdef _WIN64
- const int kMinimumStackSlots = kRegisterPassedArguments;
- if (num_arguments < kMinimumStackSlots) return kMinimumStackSlots;
- return num_arguments;
-#else
- if (num_arguments < kRegisterPassedArguments) return 0;
- return num_arguments - kRegisterPassedArguments;
-#endif
-}
-
-
-void MacroAssembler::PrepareCallCFunction(int num_arguments) {
- int frame_alignment = OS::ActivationFrameAlignment();
- ASSERT(frame_alignment != 0);
- ASSERT(num_arguments >= 0);
-
- // Make stack end at alignment and allocate space for arguments and old rsp.
- movq(kScratchRegister, rsp);
- ASSERT(IsPowerOf2(frame_alignment));
- int argument_slots_on_stack =
- ArgumentStackSlotsForCFunctionCall(num_arguments);
- subq(rsp, Immediate((argument_slots_on_stack + 1) * kPointerSize));
- and_(rsp, Immediate(-frame_alignment));
- movq(Operand(rsp, argument_slots_on_stack * kPointerSize), kScratchRegister);
-}
-
-
-void MacroAssembler::CallCFunction(ExternalReference function,
- int num_arguments) {
- LoadAddress(rax, function);
- CallCFunction(rax, num_arguments);
-}
-
-
-void MacroAssembler::CallCFunction(Register function, int num_arguments) {
- // Check stack alignment.
- if (emit_debug_code()) {
- CheckStackAlignment();
- }
-
- call(function);
- ASSERT(OS::ActivationFrameAlignment() != 0);
- ASSERT(num_arguments >= 0);
- int argument_slots_on_stack =
- ArgumentStackSlotsForCFunctionCall(num_arguments);
- movq(rsp, Operand(rsp, argument_slots_on_stack * kPointerSize));
-}
-
-
-CodePatcher::CodePatcher(byte* address, int size)
- : address_(address),
- size_(size),
- masm_(Isolate::Current(), address, size + Assembler::kGap) {
- // Create a new macro assembler pointing to the address of the code to patch.
- // The size is adjusted with kGap on order for the assembler to generate size
- // bytes of instructions without failing with buffer size constraints.
- ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
-}
-
-
-CodePatcher::~CodePatcher() {
- // Indicate that code has changed.
- CPU::FlushICache(address_, size_);
-
- // Check that the code was patched as expected.
- ASSERT(masm_.pc_ == address_ + size_);
- ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
-}
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_X64
diff --git a/src/3rdparty/v8/src/x64/macro-assembler-x64.h b/src/3rdparty/v8/src/x64/macro-assembler-x64.h
deleted file mode 100644
index 9fde18d..0000000
--- a/src/3rdparty/v8/src/x64/macro-assembler-x64.h
+++ /dev/null
@@ -1,1984 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_X64_MACRO_ASSEMBLER_X64_H_
-#define V8_X64_MACRO_ASSEMBLER_X64_H_
-
-#include "assembler.h"
-
-namespace v8 {
-namespace internal {
-
-// Flags used for the AllocateInNewSpace functions.
-enum AllocationFlags {
- // No special flags.
- NO_ALLOCATION_FLAGS = 0,
- // Return the pointer to the allocated already tagged as a heap object.
- TAG_OBJECT = 1 << 0,
- // The content of the result register already contains the allocation top in
- // new space.
- RESULT_CONTAINS_TOP = 1 << 1
-};
-
-// Default scratch register used by MacroAssembler (and other code that needs
-// a spare register). The register isn't callee save, and not used by the
-// function calling convention.
-static const Register kScratchRegister = { 10 }; // r10.
-static const Register kSmiConstantRegister = { 12 }; // r12 (callee save).
-static const Register kRootRegister = { 13 }; // r13 (callee save).
-// Value of smi in kSmiConstantRegister.
-static const int kSmiConstantRegisterValue = 1;
-// Actual value of root register is offset from the root array's start
-// to take advantage of negitive 8-bit displacement values.
-static const int kRootRegisterBias = 128;
-
-// Convenience for platform-independent signatures.
-typedef Operand MemOperand;
-
-// Forward declaration.
-class JumpTarget;
-class CallWrapper;
-
-struct SmiIndex {
- SmiIndex(Register index_register, ScaleFactor scale)
- : reg(index_register),
- scale(scale) {}
- Register reg;
- ScaleFactor scale;
-};
-
-// MacroAssembler implements a collection of frequently used macros.
-class MacroAssembler: public Assembler {
- public:
- // The isolate parameter can be NULL if the macro assembler should
- // not use isolate-dependent functionality. In this case, it's the
- // responsibility of the caller to never invoke such function on the
- // macro assembler.
- MacroAssembler(Isolate* isolate, void* buffer, int size);
-
- // Prevent the use of the RootArray during the lifetime of this
- // scope object.
- class NoRootArrayScope BASE_EMBEDDED {
- public:
- explicit NoRootArrayScope(MacroAssembler* assembler)
- : variable_(&assembler->root_array_available_),
- old_value_(assembler->root_array_available_) {
- assembler->root_array_available_ = false;
- }
- ~NoRootArrayScope() {
- *variable_ = old_value_;
- }
- private:
- bool* variable_;
- bool old_value_;
- };
-
- // Operand pointing to an external reference.
- // May emit code to set up the scratch register. The operand is
- // only guaranteed to be correct as long as the scratch register
- // isn't changed.
- // If the operand is used more than once, use a scratch register
- // that is guaranteed not to be clobbered.
- Operand ExternalOperand(ExternalReference reference,
- Register scratch = kScratchRegister);
- // Loads and stores the value of an external reference.
- // Special case code for load and store to take advantage of
- // load_rax/store_rax if possible/necessary.
- // For other operations, just use:
- // Operand operand = ExternalOperand(extref);
- // operation(operand, ..);
- void Load(Register destination, ExternalReference source);
- void Store(ExternalReference destination, Register source);
- // Loads the address of the external reference into the destination
- // register.
- void LoadAddress(Register destination, ExternalReference source);
- // Returns the size of the code generated by LoadAddress.
- // Used by CallSize(ExternalReference) to find the size of a call.
- int LoadAddressSize(ExternalReference source);
-
- // Operations on roots in the root-array.
- void LoadRoot(Register destination, Heap::RootListIndex index);
- void StoreRoot(Register source, Heap::RootListIndex index);
- // Load a root value where the index (or part of it) is variable.
- // The variable_offset register is added to the fixed_offset value
- // to get the index into the root-array.
- void LoadRootIndexed(Register destination,
- Register variable_offset,
- int fixed_offset);
- void CompareRoot(Register with, Heap::RootListIndex index);
- void CompareRoot(const Operand& with, Heap::RootListIndex index);
- void PushRoot(Heap::RootListIndex index);
-
- // ---------------------------------------------------------------------------
- // GC Support
-
- // For page containing |object| mark region covering |addr| dirty.
- // RecordWriteHelper only works if the object is not in new
- // space.
- void RecordWriteHelper(Register object,
- Register addr,
- Register scratch);
-
- // Check if object is in new space. The condition cc can be equal or
- // not_equal. If it is equal a jump will be done if the object is on new
- // space. The register scratch can be object itself, but it will be clobbered.
- template <typename LabelType>
- void InNewSpace(Register object,
- Register scratch,
- Condition cc,
- LabelType* branch);
-
- // For page containing |object| mark region covering [object+offset]
- // dirty. |object| is the object being stored into, |value| is the
- // object being stored. If |offset| is zero, then the |scratch|
- // register contains the array index into the elements array
- // represented as an untagged 32-bit integer. All registers are
- // clobbered by the operation. RecordWrite filters out smis so it
- // does not update the write barrier if the value is a smi.
- void RecordWrite(Register object,
- int offset,
- Register value,
- Register scratch);
-
- // For page containing |object| mark region covering [address]
- // dirty. |object| is the object being stored into, |value| is the
- // object being stored. All registers are clobbered by the
- // operation. RecordWrite filters out smis so it does not update
- // the write barrier if the value is a smi.
- void RecordWrite(Register object,
- Register address,
- Register value);
-
- // For page containing |object| mark region covering [object+offset] dirty.
- // The value is known to not be a smi.
- // object is the object being stored into, value is the object being stored.
- // If offset is zero, then the scratch register contains the array index into
- // the elements array represented as an untagged 32-bit integer.
- // All registers are clobbered by the operation.
- void RecordWriteNonSmi(Register object,
- int offset,
- Register value,
- Register scratch);
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // ---------------------------------------------------------------------------
- // Debugger Support
-
- void DebugBreak();
-#endif
-
- // ---------------------------------------------------------------------------
- // Activation frames
-
- void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); }
- void LeaveInternalFrame() { LeaveFrame(StackFrame::INTERNAL); }
-
- void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); }
- void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); }
-
- // Enter specific kind of exit frame; either in normal or
- // debug mode. Expects the number of arguments in register rax and
- // sets up the number of arguments in register rdi and the pointer
- // to the first argument in register rsi.
- //
- // Allocates arg_stack_space * kPointerSize memory (not GCed) on the stack
- // accessible via StackSpaceOperand.
- void EnterExitFrame(int arg_stack_space = 0, bool save_doubles = false);
-
- // Enter specific kind of exit frame. Allocates arg_stack_space * kPointerSize
- // memory (not GCed) on the stack accessible via StackSpaceOperand.
- void EnterApiExitFrame(int arg_stack_space);
-
- // Leave the current exit frame. Expects/provides the return value in
- // register rax:rdx (untouched) and the pointer to the first
- // argument in register rsi.
- void LeaveExitFrame(bool save_doubles = false);
-
- // Leave the current exit frame. Expects/provides the return value in
- // register rax (untouched).
- void LeaveApiExitFrame();
-
- // Push and pop the registers that can hold pointers.
- void PushSafepointRegisters() { Pushad(); }
- void PopSafepointRegisters() { Popad(); }
- // Store the value in register src in the safepoint register stack
- // slot for register dst.
- void StoreToSafepointRegisterSlot(Register dst, Register src);
- void LoadFromSafepointRegisterSlot(Register dst, Register src);
-
- void InitializeRootRegister() {
- ExternalReference roots_address =
- ExternalReference::roots_address(isolate());
- movq(kRootRegister, roots_address);
- addq(kRootRegister, Immediate(kRootRegisterBias));
- }
-
- // ---------------------------------------------------------------------------
- // JavaScript invokes
-
- // Invoke the JavaScript function code by either calling or jumping.
- void InvokeCode(Register code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag,
- CallWrapper* call_wrapper = NULL);
-
- void InvokeCode(Handle<Code> code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- RelocInfo::Mode rmode,
- InvokeFlag flag,
- CallWrapper* call_wrapper = NULL);
-
- // Invoke the JavaScript function in the given register. Changes the
- // current context to the context in the function before invoking.
- void InvokeFunction(Register function,
- const ParameterCount& actual,
- InvokeFlag flag,
- CallWrapper* call_wrapper = NULL);
-
- void InvokeFunction(JSFunction* function,
- const ParameterCount& actual,
- InvokeFlag flag,
- CallWrapper* call_wrapper = NULL);
-
- // Invoke specified builtin JavaScript function. Adds an entry to
- // the unresolved list if the name does not resolve.
- void InvokeBuiltin(Builtins::JavaScript id,
- InvokeFlag flag,
- CallWrapper* call_wrapper = NULL);
-
- // Store the function for the given builtin in the target register.
- void GetBuiltinFunction(Register target, Builtins::JavaScript id);
-
- // Store the code object for the given builtin in the target register.
- void GetBuiltinEntry(Register target, Builtins::JavaScript id);
-
-
- // ---------------------------------------------------------------------------
- // Smi tagging, untagging and operations on tagged smis.
-
- void InitializeSmiConstantRegister() {
- movq(kSmiConstantRegister,
- reinterpret_cast<uint64_t>(Smi::FromInt(kSmiConstantRegisterValue)),
- RelocInfo::NONE);
- }
-
- // Conversions between tagged smi values and non-tagged integer values.
-
- // Tag an integer value. The result must be known to be a valid smi value.
- // Only uses the low 32 bits of the src register. Sets the N and Z flags
- // based on the value of the resulting smi.
- void Integer32ToSmi(Register dst, Register src);
-
- // Stores an integer32 value into a memory field that already holds a smi.
- void Integer32ToSmiField(const Operand& dst, Register src);
-
- // Adds constant to src and tags the result as a smi.
- // Result must be a valid smi.
- void Integer64PlusConstantToSmi(Register dst, Register src, int constant);
-
- // Convert smi to 32-bit integer. I.e., not sign extended into
- // high 32 bits of destination.
- void SmiToInteger32(Register dst, Register src);
- void SmiToInteger32(Register dst, const Operand& src);
-
- // Convert smi to 64-bit integer (sign extended if necessary).
- void SmiToInteger64(Register dst, Register src);
- void SmiToInteger64(Register dst, const Operand& src);
-
- // Multiply a positive smi's integer value by a power of two.
- // Provides result as 64-bit integer value.
- void PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
- Register src,
- int power);
-
- // Divide a positive smi's integer value by a power of two.
- // Provides result as 32-bit integer value.
- void PositiveSmiDivPowerOfTwoToInteger32(Register dst,
- Register src,
- int power);
-
-
- // Simple comparison of smis. Both sides must be known smis to use these,
- // otherwise use Cmp.
- void SmiCompare(Register smi1, Register smi2);
- void SmiCompare(Register dst, Smi* src);
- void SmiCompare(Register dst, const Operand& src);
- void SmiCompare(const Operand& dst, Register src);
- void SmiCompare(const Operand& dst, Smi* src);
- // Compare the int32 in src register to the value of the smi stored at dst.
- void SmiCompareInteger32(const Operand& dst, Register src);
- // Sets sign and zero flags depending on value of smi in register.
- void SmiTest(Register src);
-
- // Functions performing a check on a known or potential smi. Returns
- // a condition that is satisfied if the check is successful.
-
- // Is the value a tagged smi.
- Condition CheckSmi(Register src);
- Condition CheckSmi(const Operand& src);
-
- // Is the value a non-negative tagged smi.
- Condition CheckNonNegativeSmi(Register src);
-
- // Are both values tagged smis.
- Condition CheckBothSmi(Register first, Register second);
-
- // Are both values non-negative tagged smis.
- Condition CheckBothNonNegativeSmi(Register first, Register second);
-
- // Are either value a tagged smi.
- Condition CheckEitherSmi(Register first,
- Register second,
- Register scratch = kScratchRegister);
-
- // Is the value the minimum smi value (since we are using
- // two's complement numbers, negating the value is known to yield
- // a non-smi value).
- Condition CheckIsMinSmi(Register src);
-
- // Checks whether an 32-bit integer value is a valid for conversion
- // to a smi.
- Condition CheckInteger32ValidSmiValue(Register src);
-
- // Checks whether an 32-bit unsigned integer value is a valid for
- // conversion to a smi.
- Condition CheckUInteger32ValidSmiValue(Register src);
-
- // Check whether src is a Smi, and set dst to zero if it is a smi,
- // and to one if it isn't.
- void CheckSmiToIndicator(Register dst, Register src);
- void CheckSmiToIndicator(Register dst, const Operand& src);
-
- // Test-and-jump functions. Typically combines a check function
- // above with a conditional jump.
-
- // Jump if the value cannot be represented by a smi.
- template <typename LabelType>
- void JumpIfNotValidSmiValue(Register src, LabelType* on_invalid);
-
- // Jump if the unsigned integer value cannot be represented by a smi.
- template <typename LabelType>
- void JumpIfUIntNotValidSmiValue(Register src, LabelType* on_invalid);
-
- // Jump to label if the value is a tagged smi.
- template <typename LabelType>
- void JumpIfSmi(Register src, LabelType* on_smi);
-
- // Jump to label if the value is not a tagged smi.
- template <typename LabelType>
- void JumpIfNotSmi(Register src, LabelType* on_not_smi);
-
- // Jump to label if the value is not a non-negative tagged smi.
- template <typename LabelType>
- void JumpUnlessNonNegativeSmi(Register src, LabelType* on_not_smi);
-
- // Jump to label if the value, which must be a tagged smi, has value equal
- // to the constant.
- template <typename LabelType>
- void JumpIfSmiEqualsConstant(Register src,
- Smi* constant,
- LabelType* on_equals);
-
- // Jump if either or both register are not smi values.
- template <typename LabelType>
- void JumpIfNotBothSmi(Register src1,
- Register src2,
- LabelType* on_not_both_smi);
-
- // Jump if either or both register are not non-negative smi values.
- template <typename LabelType>
- void JumpUnlessBothNonNegativeSmi(Register src1, Register src2,
- LabelType* on_not_both_smi);
-
- // Operations on tagged smi values.
-
- // Smis represent a subset of integers. The subset is always equivalent to
- // a two's complement interpretation of a fixed number of bits.
-
- // Optimistically adds an integer constant to a supposed smi.
- // If the src is not a smi, or the result is not a smi, jump to
- // the label.
- template <typename LabelType>
- void SmiTryAddConstant(Register dst,
- Register src,
- Smi* constant,
- LabelType* on_not_smi_result);
-
- // Add an integer constant to a tagged smi, giving a tagged smi as result.
- // No overflow testing on the result is done.
- void SmiAddConstant(Register dst, Register src, Smi* constant);
-
- // Add an integer constant to a tagged smi, giving a tagged smi as result.
- // No overflow testing on the result is done.
- void SmiAddConstant(const Operand& dst, Smi* constant);
-
- // Add an integer constant to a tagged smi, giving a tagged smi as result,
- // or jumping to a label if the result cannot be represented by a smi.
- template <typename LabelType>
- void SmiAddConstant(Register dst,
- Register src,
- Smi* constant,
- LabelType* on_not_smi_result);
-
- // Subtract an integer constant from a tagged smi, giving a tagged smi as
- // result. No testing on the result is done. Sets the N and Z flags
- // based on the value of the resulting integer.
- void SmiSubConstant(Register dst, Register src, Smi* constant);
-
- // Subtract an integer constant from a tagged smi, giving a tagged smi as
- // result, or jumping to a label if the result cannot be represented by a smi.
- template <typename LabelType>
- void SmiSubConstant(Register dst,
- Register src,
- Smi* constant,
- LabelType* on_not_smi_result);
-
- // Negating a smi can give a negative zero or too large positive value.
- // NOTICE: This operation jumps on success, not failure!
- template <typename LabelType>
- void SmiNeg(Register dst,
- Register src,
- LabelType* on_smi_result);
-
- // Adds smi values and return the result as a smi.
- // If dst is src1, then src1 will be destroyed, even if
- // the operation is unsuccessful.
- template <typename LabelType>
- void SmiAdd(Register dst,
- Register src1,
- Register src2,
- LabelType* on_not_smi_result);
- template <typename LabelType>
- void SmiAdd(Register dst,
- Register src1,
- const Operand& src2,
- LabelType* on_not_smi_result);
-
- void SmiAdd(Register dst,
- Register src1,
- Register src2);
-
- // Subtracts smi values and return the result as a smi.
- // If dst is src1, then src1 will be destroyed, even if
- // the operation is unsuccessful.
- template <typename LabelType>
- void SmiSub(Register dst,
- Register src1,
- Register src2,
- LabelType* on_not_smi_result);
-
- void SmiSub(Register dst,
- Register src1,
- Register src2);
-
- template <typename LabelType>
- void SmiSub(Register dst,
- Register src1,
- const Operand& src2,
- LabelType* on_not_smi_result);
-
- void SmiSub(Register dst,
- Register src1,
- const Operand& src2);
-
- // Multiplies smi values and return the result as a smi,
- // if possible.
- // If dst is src1, then src1 will be destroyed, even if
- // the operation is unsuccessful.
- template <typename LabelType>
- void SmiMul(Register dst,
- Register src1,
- Register src2,
- LabelType* on_not_smi_result);
-
- // Divides one smi by another and returns the quotient.
- // Clobbers rax and rdx registers.
- template <typename LabelType>
- void SmiDiv(Register dst,
- Register src1,
- Register src2,
- LabelType* on_not_smi_result);
-
- // Divides one smi by another and returns the remainder.
- // Clobbers rax and rdx registers.
- template <typename LabelType>
- void SmiMod(Register dst,
- Register src1,
- Register src2,
- LabelType* on_not_smi_result);
-
- // Bitwise operations.
- void SmiNot(Register dst, Register src);
- void SmiAnd(Register dst, Register src1, Register src2);
- void SmiOr(Register dst, Register src1, Register src2);
- void SmiXor(Register dst, Register src1, Register src2);
- void SmiAndConstant(Register dst, Register src1, Smi* constant);
- void SmiOrConstant(Register dst, Register src1, Smi* constant);
- void SmiXorConstant(Register dst, Register src1, Smi* constant);
-
- void SmiShiftLeftConstant(Register dst,
- Register src,
- int shift_value);
- template <typename LabelType>
- void SmiShiftLogicalRightConstant(Register dst,
- Register src,
- int shift_value,
- LabelType* on_not_smi_result);
- void SmiShiftArithmeticRightConstant(Register dst,
- Register src,
- int shift_value);
-
- // Shifts a smi value to the left, and returns the result if that is a smi.
- // Uses and clobbers rcx, so dst may not be rcx.
- void SmiShiftLeft(Register dst,
- Register src1,
- Register src2);
- // Shifts a smi value to the right, shifting in zero bits at the top, and
- // returns the unsigned intepretation of the result if that is a smi.
- // Uses and clobbers rcx, so dst may not be rcx.
- template <typename LabelType>
- void SmiShiftLogicalRight(Register dst,
- Register src1,
- Register src2,
- LabelType* on_not_smi_result);
- // Shifts a smi value to the right, sign extending the top, and
- // returns the signed intepretation of the result. That will always
- // be a valid smi value, since it's numerically smaller than the
- // original.
- // Uses and clobbers rcx, so dst may not be rcx.
- void SmiShiftArithmeticRight(Register dst,
- Register src1,
- Register src2);
-
- // Specialized operations
-
- // Select the non-smi register of two registers where exactly one is a
- // smi. If neither are smis, jump to the failure label.
- template <typename LabelType>
- void SelectNonSmi(Register dst,
- Register src1,
- Register src2,
- LabelType* on_not_smis);
-
- // Converts, if necessary, a smi to a combination of number and
- // multiplier to be used as a scaled index.
- // The src register contains a *positive* smi value. The shift is the
- // power of two to multiply the index value by (e.g.
- // to index by smi-value * kPointerSize, pass the smi and kPointerSizeLog2).
- // The returned index register may be either src or dst, depending
- // on what is most efficient. If src and dst are different registers,
- // src is always unchanged.
- SmiIndex SmiToIndex(Register dst, Register src, int shift);
-
- // Converts a positive smi to a negative index.
- SmiIndex SmiToNegativeIndex(Register dst, Register src, int shift);
-
- // Add the value of a smi in memory to an int32 register.
- // Sets flags as a normal add.
- void AddSmiField(Register dst, const Operand& src);
-
- // Basic Smi operations.
- void Move(Register dst, Smi* source) {
- LoadSmiConstant(dst, source);
- }
-
- void Move(const Operand& dst, Smi* source) {
- Register constant = GetSmiConstant(source);
- movq(dst, constant);
- }
-
- void Push(Smi* smi);
- void Test(const Operand& dst, Smi* source);
-
- // ---------------------------------------------------------------------------
- // String macros.
-
- // If object is a string, its map is loaded into object_map.
- template <typename LabelType>
- void JumpIfNotString(Register object,
- Register object_map,
- LabelType* not_string);
-
-
- template <typename LabelType>
- void JumpIfNotBothSequentialAsciiStrings(Register first_object,
- Register second_object,
- Register scratch1,
- Register scratch2,
- LabelType* on_not_both_flat_ascii);
-
- // Check whether the instance type represents a flat ascii string. Jump to the
- // label if not. If the instance type can be scratched specify same register
- // for both instance type and scratch.
- template <typename LabelType>
- void JumpIfInstanceTypeIsNotSequentialAscii(
- Register instance_type,
- Register scratch,
- LabelType *on_not_flat_ascii_string);
-
- template <typename LabelType>
- void JumpIfBothInstanceTypesAreNotSequentialAscii(
- Register first_object_instance_type,
- Register second_object_instance_type,
- Register scratch1,
- Register scratch2,
- LabelType* on_fail);
-
- // ---------------------------------------------------------------------------
- // Macro instructions.
-
- // Load a register with a long value as efficiently as possible.
- void Set(Register dst, int64_t x);
- void Set(const Operand& dst, int64_t x);
-
- // Move if the registers are not identical.
- void Move(Register target, Register source);
-
- // Handle support
- void Move(Register dst, Handle<Object> source);
- void Move(const Operand& dst, Handle<Object> source);
- void Cmp(Register dst, Handle<Object> source);
- void Cmp(const Operand& dst, Handle<Object> source);
- void Cmp(Register dst, Smi* src);
- void Cmp(const Operand& dst, Smi* src);
- void Push(Handle<Object> source);
-
- // Emit code to discard a non-negative number of pointer-sized elements
- // from the stack, clobbering only the rsp register.
- void Drop(int stack_elements);
-
- void Call(Label* target) { call(target); }
-
- // Control Flow
- void Jump(Address destination, RelocInfo::Mode rmode);
- void Jump(ExternalReference ext);
- void Jump(Handle<Code> code_object, RelocInfo::Mode rmode);
-
- void Call(Address destination, RelocInfo::Mode rmode);
- void Call(ExternalReference ext);
- void Call(Handle<Code> code_object, RelocInfo::Mode rmode);
-
- // The size of the code generated for different call instructions.
- int CallSize(Address destination, RelocInfo::Mode rmode) {
- return kCallInstructionLength;
- }
- int CallSize(ExternalReference ext);
- int CallSize(Handle<Code> code_object) {
- // Code calls use 32-bit relative addressing.
- return kShortCallInstructionLength;
- }
- int CallSize(Register target) {
- // Opcode: REX_opt FF /2 m64
- return (target.high_bit() != 0) ? 3 : 2;
- }
- int CallSize(const Operand& target) {
- // Opcode: REX_opt FF /2 m64
- return (target.requires_rex() ? 2 : 1) + target.operand_size();
- }
-
- // Emit call to the code we are currently generating.
- void CallSelf() {
- Handle<Code> self(reinterpret_cast<Code**>(CodeObject().location()));
- Call(self, RelocInfo::CODE_TARGET);
- }
-
- // Non-x64 instructions.
- // Push/pop all general purpose registers.
- // Does not push rsp/rbp nor any of the assembler's special purpose registers
- // (kScratchRegister, kSmiConstantRegister, kRootRegister).
- void Pushad();
- void Popad();
- // Sets the stack as after performing Popad, without actually loading the
- // registers.
- void Dropad();
-
- // Compare object type for heap object.
- // Always use unsigned comparisons: above and below, not less and greater.
- // Incoming register is heap_object and outgoing register is map.
- // They may be the same register, and may be kScratchRegister.
- void CmpObjectType(Register heap_object, InstanceType type, Register map);
-
- // Compare instance type for map.
- // Always use unsigned comparisons: above and below, not less and greater.
- void CmpInstanceType(Register map, InstanceType type);
-
- // Check if the map of an object is equal to a specified map and
- // branch to label if not. Skip the smi check if not required
- // (object is known to be a heap object)
- void CheckMap(Register obj,
- Handle<Map> map,
- Label* fail,
- bool is_heap_object);
-
- // Check if the object in register heap_object is a string. Afterwards the
- // register map contains the object map and the register instance_type
- // contains the instance_type. The registers map and instance_type can be the
- // same in which case it contains the instance type afterwards. Either of the
- // registers map and instance_type can be the same as heap_object.
- Condition IsObjectStringType(Register heap_object,
- Register map,
- Register instance_type);
-
- // FCmp compares and pops the two values on top of the FPU stack.
- // The flag results are similar to integer cmp, but requires unsigned
- // jcc instructions (je, ja, jae, jb, jbe, je, and jz).
- void FCmp();
-
- // Abort execution if argument is not a number. Used in debug code.
- void AbortIfNotNumber(Register object);
-
- // Abort execution if argument is a smi. Used in debug code.
- void AbortIfSmi(Register object);
-
- // Abort execution if argument is not a smi. Used in debug code.
- void AbortIfNotSmi(Register object);
- void AbortIfNotSmi(const Operand& object);
-
- // Abort execution if argument is a string. Used in debug code.
- void AbortIfNotString(Register object);
-
- // Abort execution if argument is not the root value with the given index.
- void AbortIfNotRootValue(Register src,
- Heap::RootListIndex root_value_index,
- const char* message);
-
- // ---------------------------------------------------------------------------
- // Exception handling
-
- // Push a new try handler and link into try handler chain. The return
- // address must be pushed before calling this helper.
- void PushTryHandler(CodeLocation try_location, HandlerType type);
-
- // Unlink the stack handler on top of the stack from the try handler chain.
- void PopTryHandler();
-
- // Activate the top handler in the try hander chain and pass the
- // thrown value.
- void Throw(Register value);
-
- // Propagate an uncatchable exception out of the current JS stack.
- void ThrowUncatchable(UncatchableExceptionType type, Register value);
-
- // ---------------------------------------------------------------------------
- // Inline caching support
-
- // Generate code for checking access rights - used for security checks
- // on access to global objects across environments. The holder register
- // is left untouched, but the scratch register and kScratchRegister,
- // which must be different, are clobbered.
- void CheckAccessGlobalProxy(Register holder_reg,
- Register scratch,
- Label* miss);
-
-
- // ---------------------------------------------------------------------------
- // Allocation support
-
- // Allocate an object in new space. If the new space is exhausted control
- // continues at the gc_required label. The allocated object is returned in
- // result and end of the new object is returned in result_end. The register
- // scratch can be passed as no_reg in which case an additional object
- // reference will be added to the reloc info. The returned pointers in result
- // and result_end have not yet been tagged as heap objects. If
- // result_contains_top_on_entry is true the content of result is known to be
- // the allocation top on entry (could be result_end from a previous call to
- // AllocateInNewSpace). If result_contains_top_on_entry is true scratch
- // should be no_reg as it is never used.
- void AllocateInNewSpace(int object_size,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags);
-
- void AllocateInNewSpace(int header_size,
- ScaleFactor element_size,
- Register element_count,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags);
-
- void AllocateInNewSpace(Register object_size,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags);
-
- // Undo allocation in new space. The object passed and objects allocated after
- // it will no longer be allocated. Make sure that no pointers are left to the
- // object(s) no longer allocated as they would be invalid when allocation is
- // un-done.
- void UndoAllocationInNewSpace(Register object);
-
- // Allocate a heap number in new space with undefined value. Returns
- // tagged pointer in result register, or jumps to gc_required if new
- // space is full.
- void AllocateHeapNumber(Register result,
- Register scratch,
- Label* gc_required);
-
- // Allocate a sequential string. All the header fields of the string object
- // are initialized.
- void AllocateTwoByteString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required);
- void AllocateAsciiString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required);
-
- // Allocate a raw cons string object. Only the map field of the result is
- // initialized.
- void AllocateConsString(Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
- void AllocateAsciiConsString(Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
-
- // ---------------------------------------------------------------------------
- // Support functions.
-
- // Check if result is zero and op is negative.
- void NegativeZeroTest(Register result, Register op, Label* then_label);
-
- // Check if result is zero and op is negative in code using jump targets.
- void NegativeZeroTest(CodeGenerator* cgen,
- Register result,
- Register op,
- JumpTarget* then_target);
-
- // Check if result is zero and any of op1 and op2 are negative.
- // Register scratch is destroyed, and it must be different from op2.
- void NegativeZeroTest(Register result, Register op1, Register op2,
- Register scratch, Label* then_label);
-
- // Try to get function prototype of a function and puts the value in
- // the result register. Checks that the function really is a
- // function and jumps to the miss label if the fast checks fail. The
- // function register will be untouched; the other register may be
- // clobbered.
- void TryGetFunctionPrototype(Register function,
- Register result,
- Label* miss);
-
- // Generates code for reporting that an illegal operation has
- // occurred.
- void IllegalOperation(int num_arguments);
-
- // Picks out an array index from the hash field.
- // Register use:
- // hash - holds the index's hash. Clobbered.
- // index - holds the overwritten index on exit.
- void IndexFromHash(Register hash, Register index);
-
- // Find the function context up the context chain.
- void LoadContext(Register dst, int context_chain_length);
-
- // Load the global function with the given index.
- void LoadGlobalFunction(int index, Register function);
-
- // Load the initial map from the global function. The registers
- // function and map can be the same.
- void LoadGlobalFunctionInitialMap(Register function, Register map);
-
- // ---------------------------------------------------------------------------
- // Runtime calls
-
- // Call a code stub.
- void CallStub(CodeStub* stub);
-
- // Call a code stub and return the code object called. Try to generate
- // the code if necessary. Do not perform a GC but instead return a retry
- // after GC failure.
- MUST_USE_RESULT MaybeObject* TryCallStub(CodeStub* stub);
-
- // Tail call a code stub (jump).
- void TailCallStub(CodeStub* stub);
-
- // Tail call a code stub (jump) and return the code object called. Try to
- // generate the code if necessary. Do not perform a GC but instead return
- // a retry after GC failure.
- MUST_USE_RESULT MaybeObject* TryTailCallStub(CodeStub* stub);
-
- // Return from a code stub after popping its arguments.
- void StubReturn(int argc);
-
- // Call a runtime routine.
- void CallRuntime(const Runtime::Function* f, int num_arguments);
-
- // Call a runtime function and save the value of XMM registers.
- void CallRuntimeSaveDoubles(Runtime::FunctionId id);
-
- // Call a runtime function, returning the CodeStub object called.
- // Try to generate the stub code if necessary. Do not perform a GC
- // but instead return a retry after GC failure.
- MUST_USE_RESULT MaybeObject* TryCallRuntime(const Runtime::Function* f,
- int num_arguments);
-
- // Convenience function: Same as above, but takes the fid instead.
- void CallRuntime(Runtime::FunctionId id, int num_arguments);
-
- // Convenience function: Same as above, but takes the fid instead.
- MUST_USE_RESULT MaybeObject* TryCallRuntime(Runtime::FunctionId id,
- int num_arguments);
-
- // Convenience function: call an external reference.
- void CallExternalReference(const ExternalReference& ext,
- int num_arguments);
-
- // Tail call of a runtime routine (jump).
- // Like JumpToExternalReference, but also takes care of passing the number
- // of parameters.
- void TailCallExternalReference(const ExternalReference& ext,
- int num_arguments,
- int result_size);
-
- MUST_USE_RESULT MaybeObject* TryTailCallExternalReference(
- const ExternalReference& ext, int num_arguments, int result_size);
-
- // Convenience function: tail call a runtime routine (jump).
- void TailCallRuntime(Runtime::FunctionId fid,
- int num_arguments,
- int result_size);
-
- MUST_USE_RESULT MaybeObject* TryTailCallRuntime(Runtime::FunctionId fid,
- int num_arguments,
- int result_size);
-
- // Jump to a runtime routine.
- void JumpToExternalReference(const ExternalReference& ext, int result_size);
-
- // Jump to a runtime routine.
- MaybeObject* TryJumpToExternalReference(const ExternalReference& ext,
- int result_size);
-
- // Prepares stack to put arguments (aligns and so on).
- // WIN64 calling convention requires to put the pointer to the return value
- // slot into rcx (rcx must be preserverd until TryCallApiFunctionAndReturn).
- // Saves context (rsi). Clobbers rax. Allocates arg_stack_space * kPointerSize
- // inside the exit frame (not GCed) accessible via StackSpaceOperand.
- void PrepareCallApiFunction(int arg_stack_space);
-
- // Calls an API function. Allocates HandleScope, extracts
- // returned value from handle and propagates exceptions.
- // Clobbers r14, r15, rbx and caller-save registers. Restores context.
- // On return removes stack_space * kPointerSize (GCed).
- MUST_USE_RESULT MaybeObject* TryCallApiFunctionAndReturn(
- ApiFunction* function, int stack_space);
-
- // Before calling a C-function from generated code, align arguments on stack.
- // After aligning the frame, arguments must be stored in esp[0], esp[4],
- // etc., not pushed. The argument count assumes all arguments are word sized.
- // The number of slots reserved for arguments depends on platform. On Windows
- // stack slots are reserved for the arguments passed in registers. On other
- // platforms stack slots are only reserved for the arguments actually passed
- // on the stack.
- void PrepareCallCFunction(int num_arguments);
-
- // Calls a C function and cleans up the space for arguments allocated
- // by PrepareCallCFunction. The called function is not allowed to trigger a
- // garbage collection, since that might move the code and invalidate the
- // return address (unless this is somehow accounted for by the called
- // function).
- void CallCFunction(ExternalReference function, int num_arguments);
- void CallCFunction(Register function, int num_arguments);
-
- // Calculate the number of stack slots to reserve for arguments when calling a
- // C function.
- int ArgumentStackSlotsForCFunctionCall(int num_arguments);
-
- // ---------------------------------------------------------------------------
- // Utilities
-
- void Ret();
-
- // Return and drop arguments from stack, where the number of arguments
- // may be bigger than 2^16 - 1. Requires a scratch register.
- void Ret(int bytes_dropped, Register scratch);
-
- Handle<Object> CodeObject() {
- ASSERT(!code_object_.is_null());
- return code_object_;
- }
-
- // Copy length bytes from source to destination.
- // Uses scratch register internally (if you have a low-eight register
- // free, do use it, otherwise kScratchRegister will be used).
- // The min_length is a minimum limit on the value that length will have.
- // The algorithm has some special cases that might be omitted if the string
- // is known to always be long.
- void CopyBytes(Register destination,
- Register source,
- Register length,
- int min_length = 0,
- Register scratch = kScratchRegister);
-
-
- // ---------------------------------------------------------------------------
- // StatsCounter support
-
- void SetCounter(StatsCounter* counter, int value);
- void IncrementCounter(StatsCounter* counter, int value);
- void DecrementCounter(StatsCounter* counter, int value);
-
-
- // ---------------------------------------------------------------------------
- // Debugging
-
- // Calls Abort(msg) if the condition cc is not satisfied.
- // Use --debug_code to enable.
- void Assert(Condition cc, const char* msg);
-
- void AssertFastElements(Register elements);
-
- // Like Assert(), but always enabled.
- void Check(Condition cc, const char* msg);
-
- // Print a message to stdout and abort execution.
- void Abort(const char* msg);
-
- // Check that the stack is aligned.
- void CheckStackAlignment();
-
- // Verify restrictions about code generated in stubs.
- void set_generating_stub(bool value) { generating_stub_ = value; }
- bool generating_stub() { return generating_stub_; }
- void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
- bool allow_stub_calls() { return allow_stub_calls_; }
-
- private:
- // Order general registers are pushed by Pushad.
- // rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r14, r15.
- static int kSafepointPushRegisterIndices[Register::kNumRegisters];
- static const int kNumSafepointSavedRegisters = 11;
-
- bool generating_stub_;
- bool allow_stub_calls_;
- bool root_array_available_;
-
- // Returns a register holding the smi value. The register MUST NOT be
- // modified. It may be the "smi 1 constant" register.
- Register GetSmiConstant(Smi* value);
-
- // Moves the smi value to the destination register.
- void LoadSmiConstant(Register dst, Smi* value);
-
- // This handle will be patched with the code object on installation.
- Handle<Object> code_object_;
-
- // Helper functions for generating invokes.
- template <typename LabelType>
- void InvokePrologue(const ParameterCount& expected,
- const ParameterCount& actual,
- Handle<Code> code_constant,
- Register code_register,
- LabelType* done,
- InvokeFlag flag,
- CallWrapper* call_wrapper);
-
- // Activation support.
- void EnterFrame(StackFrame::Type type);
- void LeaveFrame(StackFrame::Type type);
-
- void EnterExitFramePrologue(bool save_rax);
-
- // Allocates arg_stack_space * kPointerSize memory (not GCed) on the stack
- // accessible via StackSpaceOperand.
- void EnterExitFrameEpilogue(int arg_stack_space, bool save_doubles);
-
- void LeaveExitFrameEpilogue();
-
- // Allocation support helpers.
- // Loads the top of new-space into the result register.
- // Otherwise the address of the new-space top is loaded into scratch (if
- // scratch is valid), and the new-space top is loaded into result.
- void LoadAllocationTopHelper(Register result,
- Register scratch,
- AllocationFlags flags);
- // Update allocation top with value in result_end register.
- // If scratch is valid, it contains the address of the allocation top.
- void UpdateAllocationTopHelper(Register result_end, Register scratch);
-
- // Helper for PopHandleScope. Allowed to perform a GC and returns
- // NULL if gc_allowed. Does not perform a GC if !gc_allowed, and
- // possibly returns a failure object indicating an allocation failure.
- Object* PopHandleScopeHelper(Register saved,
- Register scratch,
- bool gc_allowed);
-
-
- // Compute memory operands for safepoint stack slots.
- Operand SafepointRegisterSlot(Register reg);
- static int SafepointRegisterStackIndex(int reg_code) {
- return kNumSafepointRegisters - kSafepointPushRegisterIndices[reg_code] - 1;
- }
-
- // Needs access to SafepointRegisterStackIndex for optimized frame
- // traversal.
- friend class OptimizedFrame;
-};
-
-
-// The code patcher is used to patch (typically) small parts of code e.g. for
-// debugging and other types of instrumentation. When using the code patcher
-// the exact number of bytes specified must be emitted. Is not legal to emit
-// relocation information. If any of these constraints are violated it causes
-// an assertion.
-class CodePatcher {
- public:
- CodePatcher(byte* address, int size);
- virtual ~CodePatcher();
-
- // Macro assembler to emit code.
- MacroAssembler* masm() { return &masm_; }
-
- private:
- byte* address_; // The address of the code being patched.
- int size_; // Number of bytes of the expected patch size.
- MacroAssembler masm_; // Macro assembler used to generate the code.
-};
-
-
-// Helper class for generating code or data associated with the code
-// right before or after a call instruction. As an example this can be used to
-// generate safepoint data after calls for crankshaft.
-class CallWrapper {
- public:
- CallWrapper() { }
- virtual ~CallWrapper() { }
- // Called just before emitting a call. Argument is the size of the generated
- // call code.
- virtual void BeforeCall(int call_size) = 0;
- // Called just after emitting a call, i.e., at the return site for the call.
- virtual void AfterCall() = 0;
-};
-
-
-// -----------------------------------------------------------------------------
-// Static helper functions.
-
-// Generate an Operand for loading a field from an object.
-static inline Operand FieldOperand(Register object, int offset) {
- return Operand(object, offset - kHeapObjectTag);
-}
-
-
-// Generate an Operand for loading an indexed field from an object.
-static inline Operand FieldOperand(Register object,
- Register index,
- ScaleFactor scale,
- int offset) {
- return Operand(object, index, scale, offset - kHeapObjectTag);
-}
-
-
-static inline Operand ContextOperand(Register context, int index) {
- return Operand(context, Context::SlotOffset(index));
-}
-
-
-static inline Operand GlobalObjectOperand() {
- return ContextOperand(rsi, Context::GLOBAL_INDEX);
-}
-
-
-// Provides access to exit frame stack space (not GCed).
-static inline Operand StackSpaceOperand(int index) {
-#ifdef _WIN64
- const int kShaddowSpace = 4;
- return Operand(rsp, (index + kShaddowSpace) * kPointerSize);
-#else
- return Operand(rsp, index * kPointerSize);
-#endif
-}
-
-
-
-#ifdef GENERATED_CODE_COVERAGE
-extern void LogGeneratedCodeCoverage(const char* file_line);
-#define CODE_COVERAGE_STRINGIFY(x) #x
-#define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
-#define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
-#define ACCESS_MASM(masm) { \
- byte* x64_coverage_function = \
- reinterpret_cast<byte*>(FUNCTION_ADDR(LogGeneratedCodeCoverage)); \
- masm->pushfd(); \
- masm->pushad(); \
- masm->push(Immediate(reinterpret_cast<int>(&__FILE_LINE__))); \
- masm->call(x64_coverage_function, RelocInfo::RUNTIME_ENTRY); \
- masm->pop(rax); \
- masm->popad(); \
- masm->popfd(); \
- } \
- masm->
-#else
-#define ACCESS_MASM(masm) masm->
-#endif
-
-// -----------------------------------------------------------------------------
-// Template implementations.
-
-static int kSmiShift = kSmiTagSize + kSmiShiftSize;
-
-
-template <typename LabelType>
-void MacroAssembler::SmiNeg(Register dst,
- Register src,
- LabelType* on_smi_result) {
- if (dst.is(src)) {
- ASSERT(!dst.is(kScratchRegister));
- movq(kScratchRegister, src);
- neg(dst); // Low 32 bits are retained as zero by negation.
- // Test if result is zero or Smi::kMinValue.
- cmpq(dst, kScratchRegister);
- j(not_equal, on_smi_result);
- movq(src, kScratchRegister);
- } else {
- movq(dst, src);
- neg(dst);
- cmpq(dst, src);
- // If the result is zero or Smi::kMinValue, negation failed to create a smi.
- j(not_equal, on_smi_result);
- }
-}
-
-
-template <typename LabelType>
-void MacroAssembler::SmiAdd(Register dst,
- Register src1,
- Register src2,
- LabelType* on_not_smi_result) {
- ASSERT_NOT_NULL(on_not_smi_result);
- ASSERT(!dst.is(src2));
- if (dst.is(src1)) {
- movq(kScratchRegister, src1);
- addq(kScratchRegister, src2);
- j(overflow, on_not_smi_result);
- movq(dst, kScratchRegister);
- } else {
- movq(dst, src1);
- addq(dst, src2);
- j(overflow, on_not_smi_result);
- }
-}
-
-
-template <typename LabelType>
-void MacroAssembler::SmiAdd(Register dst,
- Register src1,
- const Operand& src2,
- LabelType* on_not_smi_result) {
- ASSERT_NOT_NULL(on_not_smi_result);
- if (dst.is(src1)) {
- movq(kScratchRegister, src1);
- addq(kScratchRegister, src2);
- j(overflow, on_not_smi_result);
- movq(dst, kScratchRegister);
- } else {
- ASSERT(!src2.AddressUsesRegister(dst));
- movq(dst, src1);
- addq(dst, src2);
- j(overflow, on_not_smi_result);
- }
-}
-
-
-template <typename LabelType>
-void MacroAssembler::SmiSub(Register dst,
- Register src1,
- Register src2,
- LabelType* on_not_smi_result) {
- ASSERT_NOT_NULL(on_not_smi_result);
- ASSERT(!dst.is(src2));
- if (dst.is(src1)) {
- cmpq(dst, src2);
- j(overflow, on_not_smi_result);
- subq(dst, src2);
- } else {
- movq(dst, src1);
- subq(dst, src2);
- j(overflow, on_not_smi_result);
- }
-}
-
-
-template <typename LabelType>
-void MacroAssembler::SmiSub(Register dst,
- Register src1,
- const Operand& src2,
- LabelType* on_not_smi_result) {
- ASSERT_NOT_NULL(on_not_smi_result);
- if (dst.is(src1)) {
- movq(kScratchRegister, src2);
- cmpq(src1, kScratchRegister);
- j(overflow, on_not_smi_result);
- subq(src1, kScratchRegister);
- } else {
- movq(dst, src1);
- subq(dst, src2);
- j(overflow, on_not_smi_result);
- }
-}
-
-
-template <typename LabelType>
-void MacroAssembler::SmiMul(Register dst,
- Register src1,
- Register src2,
- LabelType* on_not_smi_result) {
- ASSERT(!dst.is(src2));
- ASSERT(!dst.is(kScratchRegister));
- ASSERT(!src1.is(kScratchRegister));
- ASSERT(!src2.is(kScratchRegister));
-
- if (dst.is(src1)) {
- NearLabel failure, zero_correct_result;
- movq(kScratchRegister, src1); // Create backup for later testing.
- SmiToInteger64(dst, src1);
- imul(dst, src2);
- j(overflow, &failure);
-
- // Check for negative zero result. If product is zero, and one
- // argument is negative, go to slow case.
- NearLabel correct_result;
- testq(dst, dst);
- j(not_zero, &correct_result);
-
- movq(dst, kScratchRegister);
- xor_(dst, src2);
- j(positive, &zero_correct_result); // Result was positive zero.
-
- bind(&failure); // Reused failure exit, restores src1.
- movq(src1, kScratchRegister);
- jmp(on_not_smi_result);
-
- bind(&zero_correct_result);
- Set(dst, 0);
-
- bind(&correct_result);
- } else {
- SmiToInteger64(dst, src1);
- imul(dst, src2);
- j(overflow, on_not_smi_result);
- // Check for negative zero result. If product is zero, and one
- // argument is negative, go to slow case.
- NearLabel correct_result;
- testq(dst, dst);
- j(not_zero, &correct_result);
- // One of src1 and src2 is zero, the check whether the other is
- // negative.
- movq(kScratchRegister, src1);
- xor_(kScratchRegister, src2);
- j(negative, on_not_smi_result);
- bind(&correct_result);
- }
-}
-
-
-template <typename LabelType>
-void MacroAssembler::SmiTryAddConstant(Register dst,
- Register src,
- Smi* constant,
- LabelType* on_not_smi_result) {
- // Does not assume that src is a smi.
- ASSERT_EQ(static_cast<int>(1), static_cast<int>(kSmiTagMask));
- ASSERT_EQ(0, kSmiTag);
- ASSERT(!dst.is(kScratchRegister));
- ASSERT(!src.is(kScratchRegister));
-
- JumpIfNotSmi(src, on_not_smi_result);
- Register tmp = (dst.is(src) ? kScratchRegister : dst);
- LoadSmiConstant(tmp, constant);
- addq(tmp, src);
- j(overflow, on_not_smi_result);
- if (dst.is(src)) {
- movq(dst, tmp);
- }
-}
-
-
-template <typename LabelType>
-void MacroAssembler::SmiAddConstant(Register dst,
- Register src,
- Smi* constant,
- LabelType* on_not_smi_result) {
- if (constant->value() == 0) {
- if (!dst.is(src)) {
- movq(dst, src);
- }
- } else if (dst.is(src)) {
- ASSERT(!dst.is(kScratchRegister));
-
- LoadSmiConstant(kScratchRegister, constant);
- addq(kScratchRegister, src);
- j(overflow, on_not_smi_result);
- movq(dst, kScratchRegister);
- } else {
- LoadSmiConstant(dst, constant);
- addq(dst, src);
- j(overflow, on_not_smi_result);
- }
-}
-
-
-template <typename LabelType>
-void MacroAssembler::SmiSubConstant(Register dst,
- Register src,
- Smi* constant,
- LabelType* on_not_smi_result) {
- if (constant->value() == 0) {
- if (!dst.is(src)) {
- movq(dst, src);
- }
- } else if (dst.is(src)) {
- ASSERT(!dst.is(kScratchRegister));
- if (constant->value() == Smi::kMinValue) {
- // Subtracting min-value from any non-negative value will overflow.
- // We test the non-negativeness before doing the subtraction.
- testq(src, src);
- j(not_sign, on_not_smi_result);
- LoadSmiConstant(kScratchRegister, constant);
- subq(dst, kScratchRegister);
- } else {
- // Subtract by adding the negation.
- LoadSmiConstant(kScratchRegister, Smi::FromInt(-constant->value()));
- addq(kScratchRegister, dst);
- j(overflow, on_not_smi_result);
- movq(dst, kScratchRegister);
- }
- } else {
- if (constant->value() == Smi::kMinValue) {
- // Subtracting min-value from any non-negative value will overflow.
- // We test the non-negativeness before doing the subtraction.
- testq(src, src);
- j(not_sign, on_not_smi_result);
- LoadSmiConstant(dst, constant);
- // Adding and subtracting the min-value gives the same result, it only
- // differs on the overflow bit, which we don't check here.
- addq(dst, src);
- } else {
- // Subtract by adding the negation.
- LoadSmiConstant(dst, Smi::FromInt(-(constant->value())));
- addq(dst, src);
- j(overflow, on_not_smi_result);
- }
- }
-}
-
-
-template <typename LabelType>
-void MacroAssembler::SmiDiv(Register dst,
- Register src1,
- Register src2,
- LabelType* on_not_smi_result) {
- ASSERT(!src1.is(kScratchRegister));
- ASSERT(!src2.is(kScratchRegister));
- ASSERT(!dst.is(kScratchRegister));
- ASSERT(!src2.is(rax));
- ASSERT(!src2.is(rdx));
- ASSERT(!src1.is(rdx));
-
- // Check for 0 divisor (result is +/-Infinity).
- NearLabel positive_divisor;
- testq(src2, src2);
- j(zero, on_not_smi_result);
-
- if (src1.is(rax)) {
- movq(kScratchRegister, src1);
- }
- SmiToInteger32(rax, src1);
- // We need to rule out dividing Smi::kMinValue by -1, since that would
- // overflow in idiv and raise an exception.
- // We combine this with negative zero test (negative zero only happens
- // when dividing zero by a negative number).
-
- // We overshoot a little and go to slow case if we divide min-value
- // by any negative value, not just -1.
- NearLabel safe_div;
- testl(rax, Immediate(0x7fffffff));
- j(not_zero, &safe_div);
- testq(src2, src2);
- if (src1.is(rax)) {
- j(positive, &safe_div);
- movq(src1, kScratchRegister);
- jmp(on_not_smi_result);
- } else {
- j(negative, on_not_smi_result);
- }
- bind(&safe_div);
-
- SmiToInteger32(src2, src2);
- // Sign extend src1 into edx:eax.
- cdq();
- idivl(src2);
- Integer32ToSmi(src2, src2);
- // Check that the remainder is zero.
- testl(rdx, rdx);
- if (src1.is(rax)) {
- NearLabel smi_result;
- j(zero, &smi_result);
- movq(src1, kScratchRegister);
- jmp(on_not_smi_result);
- bind(&smi_result);
- } else {
- j(not_zero, on_not_smi_result);
- }
- if (!dst.is(src1) && src1.is(rax)) {
- movq(src1, kScratchRegister);
- }
- Integer32ToSmi(dst, rax);
-}
-
-
-template <typename LabelType>
-void MacroAssembler::SmiMod(Register dst,
- Register src1,
- Register src2,
- LabelType* on_not_smi_result) {
- ASSERT(!dst.is(kScratchRegister));
- ASSERT(!src1.is(kScratchRegister));
- ASSERT(!src2.is(kScratchRegister));
- ASSERT(!src2.is(rax));
- ASSERT(!src2.is(rdx));
- ASSERT(!src1.is(rdx));
- ASSERT(!src1.is(src2));
-
- testq(src2, src2);
- j(zero, on_not_smi_result);
-
- if (src1.is(rax)) {
- movq(kScratchRegister, src1);
- }
- SmiToInteger32(rax, src1);
- SmiToInteger32(src2, src2);
-
- // Test for the edge case of dividing Smi::kMinValue by -1 (will overflow).
- NearLabel safe_div;
- cmpl(rax, Immediate(Smi::kMinValue));
- j(not_equal, &safe_div);
- cmpl(src2, Immediate(-1));
- j(not_equal, &safe_div);
- // Retag inputs and go slow case.
- Integer32ToSmi(src2, src2);
- if (src1.is(rax)) {
- movq(src1, kScratchRegister);
- }
- jmp(on_not_smi_result);
- bind(&safe_div);
-
- // Sign extend eax into edx:eax.
- cdq();
- idivl(src2);
- // Restore smi tags on inputs.
- Integer32ToSmi(src2, src2);
- if (src1.is(rax)) {
- movq(src1, kScratchRegister);
- }
- // Check for a negative zero result. If the result is zero, and the
- // dividend is negative, go slow to return a floating point negative zero.
- NearLabel smi_result;
- testl(rdx, rdx);
- j(not_zero, &smi_result);
- testq(src1, src1);
- j(negative, on_not_smi_result);
- bind(&smi_result);
- Integer32ToSmi(dst, rdx);
-}
-
-
-template <typename LabelType>
-void MacroAssembler::SmiShiftLogicalRightConstant(
- Register dst, Register src, int shift_value, LabelType* on_not_smi_result) {
- // Logic right shift interprets its result as an *unsigned* number.
- if (dst.is(src)) {
- UNIMPLEMENTED(); // Not used.
- } else {
- movq(dst, src);
- if (shift_value == 0) {
- testq(dst, dst);
- j(negative, on_not_smi_result);
- }
- shr(dst, Immediate(shift_value + kSmiShift));
- shl(dst, Immediate(kSmiShift));
- }
-}
-
-
-template <typename LabelType>
-void MacroAssembler::SmiShiftLogicalRight(Register dst,
- Register src1,
- Register src2,
- LabelType* on_not_smi_result) {
- ASSERT(!dst.is(kScratchRegister));
- ASSERT(!src1.is(kScratchRegister));
- ASSERT(!src2.is(kScratchRegister));
- ASSERT(!dst.is(rcx));
- // dst and src1 can be the same, because the one case that bails out
- // is a shift by 0, which leaves dst, and therefore src1, unchanged.
- NearLabel result_ok;
- if (src1.is(rcx) || src2.is(rcx)) {
- movq(kScratchRegister, rcx);
- }
- if (!dst.is(src1)) {
- movq(dst, src1);
- }
- SmiToInteger32(rcx, src2);
- orl(rcx, Immediate(kSmiShift));
- shr_cl(dst); // Shift is rcx modulo 0x1f + 32.
- shl(dst, Immediate(kSmiShift));
- testq(dst, dst);
- if (src1.is(rcx) || src2.is(rcx)) {
- NearLabel positive_result;
- j(positive, &positive_result);
- if (src1.is(rcx)) {
- movq(src1, kScratchRegister);
- } else {
- movq(src2, kScratchRegister);
- }
- jmp(on_not_smi_result);
- bind(&positive_result);
- } else {
- j(negative, on_not_smi_result); // src2 was zero and src1 negative.
- }
-}
-
-
-template <typename LabelType>
-void MacroAssembler::SelectNonSmi(Register dst,
- Register src1,
- Register src2,
- LabelType* on_not_smis) {
- ASSERT(!dst.is(kScratchRegister));
- ASSERT(!src1.is(kScratchRegister));
- ASSERT(!src2.is(kScratchRegister));
- ASSERT(!dst.is(src1));
- ASSERT(!dst.is(src2));
- // Both operands must not be smis.
-#ifdef DEBUG
- if (allow_stub_calls()) { // Check contains a stub call.
- Condition not_both_smis = NegateCondition(CheckBothSmi(src1, src2));
- Check(not_both_smis, "Both registers were smis in SelectNonSmi.");
- }
-#endif
- ASSERT_EQ(0, kSmiTag);
- ASSERT_EQ(0, Smi::FromInt(0));
- movl(kScratchRegister, Immediate(kSmiTagMask));
- and_(kScratchRegister, src1);
- testl(kScratchRegister, src2);
- // If non-zero then both are smis.
- j(not_zero, on_not_smis);
-
- // Exactly one operand is a smi.
- ASSERT_EQ(1, static_cast<int>(kSmiTagMask));
- // kScratchRegister still holds src1 & kSmiTag, which is either zero or one.
- subq(kScratchRegister, Immediate(1));
- // If src1 is a smi, then scratch register all 1s, else it is all 0s.
- movq(dst, src1);
- xor_(dst, src2);
- and_(dst, kScratchRegister);
- // If src1 is a smi, dst holds src1 ^ src2, else it is zero.
- xor_(dst, src1);
- // If src1 is a smi, dst is src2, else it is src1, i.e., the non-smi.
-}
-
-
-template <typename LabelType>
-void MacroAssembler::JumpIfSmi(Register src, LabelType* on_smi) {
- ASSERT_EQ(0, kSmiTag);
- Condition smi = CheckSmi(src);
- j(smi, on_smi);
-}
-
-
-template <typename LabelType>
-void MacroAssembler::JumpIfNotSmi(Register src, LabelType* on_not_smi) {
- Condition smi = CheckSmi(src);
- j(NegateCondition(smi), on_not_smi);
-}
-
-
-template <typename LabelType>
-void MacroAssembler::JumpUnlessNonNegativeSmi(
- Register src, LabelType* on_not_smi_or_negative) {
- Condition non_negative_smi = CheckNonNegativeSmi(src);
- j(NegateCondition(non_negative_smi), on_not_smi_or_negative);
-}
-
-
-template <typename LabelType>
-void MacroAssembler::JumpIfSmiEqualsConstant(Register src,
- Smi* constant,
- LabelType* on_equals) {
- SmiCompare(src, constant);
- j(equal, on_equals);
-}
-
-
-template <typename LabelType>
-void MacroAssembler::JumpIfNotValidSmiValue(Register src,
- LabelType* on_invalid) {
- Condition is_valid = CheckInteger32ValidSmiValue(src);
- j(NegateCondition(is_valid), on_invalid);
-}
-
-
-template <typename LabelType>
-void MacroAssembler::JumpIfUIntNotValidSmiValue(Register src,
- LabelType* on_invalid) {
- Condition is_valid = CheckUInteger32ValidSmiValue(src);
- j(NegateCondition(is_valid), on_invalid);
-}
-
-
-template <typename LabelType>
-void MacroAssembler::JumpIfNotBothSmi(Register src1,
- Register src2,
- LabelType* on_not_both_smi) {
- Condition both_smi = CheckBothSmi(src1, src2);
- j(NegateCondition(both_smi), on_not_both_smi);
-}
-
-
-template <typename LabelType>
-void MacroAssembler::JumpUnlessBothNonNegativeSmi(Register src1,
- Register src2,
- LabelType* on_not_both_smi) {
- Condition both_smi = CheckBothNonNegativeSmi(src1, src2);
- j(NegateCondition(both_smi), on_not_both_smi);
-}
-
-
-template <typename LabelType>
-void MacroAssembler::JumpIfNotString(Register object,
- Register object_map,
- LabelType* not_string) {
- Condition is_smi = CheckSmi(object);
- j(is_smi, not_string);
- CmpObjectType(object, FIRST_NONSTRING_TYPE, object_map);
- j(above_equal, not_string);
-}
-
-
-template <typename LabelType>
-void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first_object,
- Register second_object,
- Register scratch1,
- Register scratch2,
- LabelType* on_fail) {
- // Check that both objects are not smis.
- Condition either_smi = CheckEitherSmi(first_object, second_object);
- j(either_smi, on_fail);
-
- // Load instance type for both strings.
- movq(scratch1, FieldOperand(first_object, HeapObject::kMapOffset));
- movq(scratch2, FieldOperand(second_object, HeapObject::kMapOffset));
- movzxbl(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
- movzxbl(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
-
- // Check that both are flat ascii strings.
- ASSERT(kNotStringTag != 0);
- const int kFlatAsciiStringMask =
- kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
- const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
-
- andl(scratch1, Immediate(kFlatAsciiStringMask));
- andl(scratch2, Immediate(kFlatAsciiStringMask));
- // Interleave the bits to check both scratch1 and scratch2 in one test.
- ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
- lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
- cmpl(scratch1,
- Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
- j(not_equal, on_fail);
-}
-
-
-template <typename LabelType>
-void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(
- Register instance_type,
- Register scratch,
- LabelType *failure) {
- if (!scratch.is(instance_type)) {
- movl(scratch, instance_type);
- }
-
- const int kFlatAsciiStringMask =
- kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
-
- andl(scratch, Immediate(kFlatAsciiStringMask));
- cmpl(scratch, Immediate(kStringTag | kSeqStringTag | kAsciiStringTag));
- j(not_equal, failure);
-}
-
-
-template <typename LabelType>
-void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
- Register first_object_instance_type,
- Register second_object_instance_type,
- Register scratch1,
- Register scratch2,
- LabelType* on_fail) {
- // Load instance type for both strings.
- movq(scratch1, first_object_instance_type);
- movq(scratch2, second_object_instance_type);
-
- // Check that both are flat ascii strings.
- ASSERT(kNotStringTag != 0);
- const int kFlatAsciiStringMask =
- kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
- const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
-
- andl(scratch1, Immediate(kFlatAsciiStringMask));
- andl(scratch2, Immediate(kFlatAsciiStringMask));
- // Interleave the bits to check both scratch1 and scratch2 in one test.
- ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
- lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
- cmpl(scratch1,
- Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
- j(not_equal, on_fail);
-}
-
-
-template <typename LabelType>
-void MacroAssembler::InNewSpace(Register object,
- Register scratch,
- Condition cc,
- LabelType* branch) {
- if (Serializer::enabled()) {
- // Can't do arithmetic on external references if it might get serialized.
- // The mask isn't really an address. We load it as an external reference in
- // case the size of the new space is different between the snapshot maker
- // and the running system.
- if (scratch.is(object)) {
- movq(kScratchRegister, ExternalReference::new_space_mask(isolate()));
- and_(scratch, kScratchRegister);
- } else {
- movq(scratch, ExternalReference::new_space_mask(isolate()));
- and_(scratch, object);
- }
- movq(kScratchRegister, ExternalReference::new_space_start(isolate()));
- cmpq(scratch, kScratchRegister);
- j(cc, branch);
- } else {
- ASSERT(is_int32(static_cast<int64_t>(HEAP->NewSpaceMask())));
- intptr_t new_space_start =
- reinterpret_cast<intptr_t>(HEAP->NewSpaceStart());
- movq(kScratchRegister, -new_space_start, RelocInfo::NONE);
- if (scratch.is(object)) {
- addq(scratch, kScratchRegister);
- } else {
- lea(scratch, Operand(object, kScratchRegister, times_1, 0));
- }
- and_(scratch, Immediate(static_cast<int32_t>(HEAP->NewSpaceMask())));
- j(cc, branch);
- }
-}
-
-
-template <typename LabelType>
-void MacroAssembler::InvokePrologue(const ParameterCount& expected,
- const ParameterCount& actual,
- Handle<Code> code_constant,
- Register code_register,
- LabelType* done,
- InvokeFlag flag,
- CallWrapper* call_wrapper) {
- bool definitely_matches = false;
- NearLabel invoke;
- if (expected.is_immediate()) {
- ASSERT(actual.is_immediate());
- if (expected.immediate() == actual.immediate()) {
- definitely_matches = true;
- } else {
- Set(rax, actual.immediate());
- if (expected.immediate() ==
- SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
- // Don't worry about adapting arguments for built-ins that
- // don't want that done. Skip adaption code by making it look
- // like we have a match between expected and actual number of
- // arguments.
- definitely_matches = true;
- } else {
- Set(rbx, expected.immediate());
- }
- }
- } else {
- if (actual.is_immediate()) {
- // Expected is in register, actual is immediate. This is the
- // case when we invoke function values without going through the
- // IC mechanism.
- cmpq(expected.reg(), Immediate(actual.immediate()));
- j(equal, &invoke);
- ASSERT(expected.reg().is(rbx));
- Set(rax, actual.immediate());
- } else if (!expected.reg().is(actual.reg())) {
- // Both expected and actual are in (different) registers. This
- // is the case when we invoke functions using call and apply.
- cmpq(expected.reg(), actual.reg());
- j(equal, &invoke);
- ASSERT(actual.reg().is(rax));
- ASSERT(expected.reg().is(rbx));
- }
- }
-
- if (!definitely_matches) {
- Handle<Code> adaptor = isolate()->builtins()->ArgumentsAdaptorTrampoline();
- if (!code_constant.is_null()) {
- movq(rdx, code_constant, RelocInfo::EMBEDDED_OBJECT);
- addq(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag));
- } else if (!code_register.is(rdx)) {
- movq(rdx, code_register);
- }
-
- if (flag == CALL_FUNCTION) {
- if (call_wrapper != NULL) call_wrapper->BeforeCall(CallSize(adaptor));
- Call(adaptor, RelocInfo::CODE_TARGET);
- if (call_wrapper != NULL) call_wrapper->AfterCall();
- jmp(done);
- } else {
- Jump(adaptor, RelocInfo::CODE_TARGET);
- }
- bind(&invoke);
- }
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_X64_MACRO_ASSEMBLER_X64_H_
diff --git a/src/3rdparty/v8/src/x64/regexp-macro-assembler-x64.cc b/src/3rdparty/v8/src/x64/regexp-macro-assembler-x64.cc
deleted file mode 100644
index 03f91fa..0000000
--- a/src/3rdparty/v8/src/x64/regexp-macro-assembler-x64.cc
+++ /dev/null
@@ -1,1398 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_X64)
-
-#include "serialize.h"
-#include "unicode.h"
-#include "log.h"
-#include "regexp-stack.h"
-#include "macro-assembler.h"
-#include "regexp-macro-assembler.h"
-#include "x64/regexp-macro-assembler-x64.h"
-
-namespace v8 {
-namespace internal {
-
-#ifndef V8_INTERPRETED_REGEXP
-
-/*
- * This assembler uses the following register assignment convention
- * - rdx : currently loaded character(s) as ASCII or UC16. Must be loaded using
- * LoadCurrentCharacter before using any of the dispatch methods.
- * - rdi : current position in input, as negative offset from end of string.
- * Please notice that this is the byte offset, not the character
- * offset! Is always a 32-bit signed (negative) offset, but must be
- * maintained sign-extended to 64 bits, since it is used as index.
- * - rsi : end of input (points to byte after last character in input),
- * so that rsi+rdi points to the current character.
- * - rbp : frame pointer. Used to access arguments, local variables and
- * RegExp registers.
- * - rsp : points to tip of C stack.
- * - rcx : points to tip of backtrack stack. The backtrack stack contains
- * only 32-bit values. Most are offsets from some base (e.g., character
- * positions from end of string or code location from Code* pointer).
- * - r8 : code object pointer. Used to convert between absolute and
- * code-object-relative addresses.
- *
- * The registers rax, rbx, r9 and r11 are free to use for computations.
- * If changed to use r12+, they should be saved as callee-save registers.
- * The macro assembler special registers r12 and r13 (kSmiConstantRegister,
- * kRootRegister) aren't special during execution of RegExp code (they don't
- * hold the values assumed when creating JS code), so no Smi or Root related
- * macro operations can be used.
- *
- * Each call to a C++ method should retain these registers.
- *
- * The stack will have the following content, in some order, indexable from the
- * frame pointer (see, e.g., kStackHighEnd):
- * - Isolate* isolate (Address of the current isolate)
- * - direct_call (if 1, direct call from JavaScript code, if 0 call
- * through the runtime system)
- * - stack_area_base (High end of the memory area to use as
- * backtracking stack)
- * - int* capture_array (int[num_saved_registers_], for output).
- * - end of input (Address of end of string)
- * - start of input (Address of first character in string)
- * - start index (character index of start)
- * - String* input_string (input string)
- * - return address
- * - backup of callee save registers (rbx, possibly rsi and rdi).
- * - Offset of location before start of input (effectively character
- * position -1). Used to initialize capture registers to a non-position.
- * - At start of string (if 1, we are starting at the start of the
- * string, otherwise 0)
- * - register 0 rbp[-n] (Only positions must be stored in the first
- * - register 1 rbp[-n-8] num_saved_registers_ registers)
- * - ...
- *
- * The first num_saved_registers_ registers are initialized to point to
- * "character -1" in the string (i.e., char_size() bytes before the first
- * character of the string). The remaining registers starts out uninitialized.
- *
- * The first seven values must be provided by the calling code by
- * calling the code's entry address cast to a function pointer with the
- * following signature:
- * int (*match)(String* input_string,
- * int start_index,
- * Address start,
- * Address end,
- * int* capture_output_array,
- * bool at_start,
- * byte* stack_area_base,
- * bool direct_call)
- */
-
-#define __ ACCESS_MASM((&masm_))
-
-RegExpMacroAssemblerX64::RegExpMacroAssemblerX64(
- Mode mode,
- int registers_to_save)
- : masm_(Isolate::Current(), NULL, kRegExpCodeSize),
- no_root_array_scope_(&masm_),
- code_relative_fixup_positions_(4),
- mode_(mode),
- num_registers_(registers_to_save),
- num_saved_registers_(registers_to_save),
- entry_label_(),
- start_label_(),
- success_label_(),
- backtrack_label_(),
- exit_label_() {
- ASSERT_EQ(0, registers_to_save % 2);
- __ jmp(&entry_label_); // We'll write the entry code when we know more.
- __ bind(&start_label_); // And then continue from here.
-}
-
-
-RegExpMacroAssemblerX64::~RegExpMacroAssemblerX64() {
- // Unuse labels in case we throw away the assembler without calling GetCode.
- entry_label_.Unuse();
- start_label_.Unuse();
- success_label_.Unuse();
- backtrack_label_.Unuse();
- exit_label_.Unuse();
- check_preempt_label_.Unuse();
- stack_overflow_label_.Unuse();
-}
-
-
-int RegExpMacroAssemblerX64::stack_limit_slack() {
- return RegExpStack::kStackLimitSlack;
-}
-
-
-void RegExpMacroAssemblerX64::AdvanceCurrentPosition(int by) {
- if (by != 0) {
- __ addq(rdi, Immediate(by * char_size()));
- }
-}
-
-
-void RegExpMacroAssemblerX64::AdvanceRegister(int reg, int by) {
- ASSERT(reg >= 0);
- ASSERT(reg < num_registers_);
- if (by != 0) {
- __ addq(register_location(reg), Immediate(by));
- }
-}
-
-
-void RegExpMacroAssemblerX64::Backtrack() {
- CheckPreemption();
- // Pop Code* offset from backtrack stack, add Code* and jump to location.
- Pop(rbx);
- __ addq(rbx, code_object_pointer());
- __ jmp(rbx);
-}
-
-
-void RegExpMacroAssemblerX64::Bind(Label* label) {
- __ bind(label);
-}
-
-
-void RegExpMacroAssemblerX64::CheckCharacter(uint32_t c, Label* on_equal) {
- __ cmpl(current_character(), Immediate(c));
- BranchOrBacktrack(equal, on_equal);
-}
-
-
-void RegExpMacroAssemblerX64::CheckCharacterGT(uc16 limit, Label* on_greater) {
- __ cmpl(current_character(), Immediate(limit));
- BranchOrBacktrack(greater, on_greater);
-}
-
-
-void RegExpMacroAssemblerX64::CheckAtStart(Label* on_at_start) {
- Label not_at_start;
- // Did we start the match at the start of the string at all?
- __ cmpb(Operand(rbp, kStartIndex), Immediate(0));
- BranchOrBacktrack(not_equal, &not_at_start);
- // If we did, are we still at the start of the input?
- __ lea(rax, Operand(rsi, rdi, times_1, 0));
- __ cmpq(rax, Operand(rbp, kInputStart));
- BranchOrBacktrack(equal, on_at_start);
- __ bind(&not_at_start);
-}
-
-
-void RegExpMacroAssemblerX64::CheckNotAtStart(Label* on_not_at_start) {
- // Did we start the match at the start of the string at all?
- __ cmpb(Operand(rbp, kStartIndex), Immediate(0));
- BranchOrBacktrack(not_equal, on_not_at_start);
- // If we did, are we still at the start of the input?
- __ lea(rax, Operand(rsi, rdi, times_1, 0));
- __ cmpq(rax, Operand(rbp, kInputStart));
- BranchOrBacktrack(not_equal, on_not_at_start);
-}
-
-
-void RegExpMacroAssemblerX64::CheckCharacterLT(uc16 limit, Label* on_less) {
- __ cmpl(current_character(), Immediate(limit));
- BranchOrBacktrack(less, on_less);
-}
-
-
-void RegExpMacroAssemblerX64::CheckCharacters(Vector<const uc16> str,
- int cp_offset,
- Label* on_failure,
- bool check_end_of_string) {
-#ifdef DEBUG
- // If input is ASCII, don't even bother calling here if the string to
- // match contains a non-ascii character.
- if (mode_ == ASCII) {
- ASSERT(String::IsAscii(str.start(), str.length()));
- }
-#endif
- int byte_length = str.length() * char_size();
- int byte_offset = cp_offset * char_size();
- if (check_end_of_string) {
- // Check that there are at least str.length() characters left in the input.
- __ cmpl(rdi, Immediate(-(byte_offset + byte_length)));
- BranchOrBacktrack(greater, on_failure);
- }
-
- if (on_failure == NULL) {
- // Instead of inlining a backtrack, (re)use the global backtrack target.
- on_failure = &backtrack_label_;
- }
-
- // Do one character test first to minimize loading for the case that
- // we don't match at all (loading more than one character introduces that
- // chance of reading unaligned and reading across cache boundaries).
- // If the first character matches, expect a larger chance of matching the
- // string, and start loading more characters at a time.
- if (mode_ == ASCII) {
- __ cmpb(Operand(rsi, rdi, times_1, byte_offset),
- Immediate(static_cast<int8_t>(str[0])));
- } else {
- // Don't use 16-bit immediate. The size changing prefix throws off
- // pre-decoding.
- __ movzxwl(rax,
- Operand(rsi, rdi, times_1, byte_offset));
- __ cmpl(rax, Immediate(static_cast<int32_t>(str[0])));
- }
- BranchOrBacktrack(not_equal, on_failure);
-
- __ lea(rbx, Operand(rsi, rdi, times_1, 0));
- for (int i = 1, n = str.length(); i < n; ) {
- if (mode_ == ASCII) {
- if (i + 8 <= n) {
- uint64_t combined_chars =
- (static_cast<uint64_t>(str[i + 0]) << 0) ||
- (static_cast<uint64_t>(str[i + 1]) << 8) ||
- (static_cast<uint64_t>(str[i + 2]) << 16) ||
- (static_cast<uint64_t>(str[i + 3]) << 24) ||
- (static_cast<uint64_t>(str[i + 4]) << 32) ||
- (static_cast<uint64_t>(str[i + 5]) << 40) ||
- (static_cast<uint64_t>(str[i + 6]) << 48) ||
- (static_cast<uint64_t>(str[i + 7]) << 56);
- __ movq(rax, combined_chars, RelocInfo::NONE);
- __ cmpq(rax, Operand(rbx, byte_offset + i));
- i += 8;
- } else if (i + 4 <= n) {
- uint32_t combined_chars =
- (static_cast<uint32_t>(str[i + 0]) << 0) ||
- (static_cast<uint32_t>(str[i + 1]) << 8) ||
- (static_cast<uint32_t>(str[i + 2]) << 16) ||
- (static_cast<uint32_t>(str[i + 3]) << 24);
- __ cmpl(Operand(rbx, byte_offset + i), Immediate(combined_chars));
- i += 4;
- } else {
- __ cmpb(Operand(rbx, byte_offset + i),
- Immediate(static_cast<int8_t>(str[i])));
- i++;
- }
- } else {
- ASSERT(mode_ == UC16);
- if (i + 4 <= n) {
- uint64_t combined_chars = *reinterpret_cast<const uint64_t*>(&str[i]);
- __ movq(rax, combined_chars, RelocInfo::NONE);
- __ cmpq(rax,
- Operand(rsi, rdi, times_1, byte_offset + i * sizeof(uc16)));
- i += 4;
- } else if (i + 2 <= n) {
- uint32_t combined_chars = *reinterpret_cast<const uint32_t*>(&str[i]);
- __ cmpl(Operand(rsi, rdi, times_1, byte_offset + i * sizeof(uc16)),
- Immediate(combined_chars));
- i += 2;
- } else {
- __ movzxwl(rax,
- Operand(rsi, rdi, times_1, byte_offset + i * sizeof(uc16)));
- __ cmpl(rax, Immediate(str[i]));
- i++;
- }
- }
- BranchOrBacktrack(not_equal, on_failure);
- }
-}
-
-
-void RegExpMacroAssemblerX64::CheckGreedyLoop(Label* on_equal) {
- Label fallthrough;
- __ cmpl(rdi, Operand(backtrack_stackpointer(), 0));
- __ j(not_equal, &fallthrough);
- Drop();
- BranchOrBacktrack(no_condition, on_equal);
- __ bind(&fallthrough);
-}
-
-
-void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
- int start_reg,
- Label* on_no_match) {
- Label fallthrough;
- __ movq(rdx, register_location(start_reg)); // Offset of start of capture
- __ movq(rbx, register_location(start_reg + 1)); // Offset of end of capture
- __ subq(rbx, rdx); // Length of capture.
-
- // -----------------------
- // rdx = Start offset of capture.
- // rbx = Length of capture
-
- // If length is negative, this code will fail (it's a symptom of a partial or
- // illegal capture where start of capture after end of capture).
- // This must not happen (no back-reference can reference a capture that wasn't
- // closed before in the reg-exp, and we must not generate code that can cause
- // this condition).
-
- // If length is zero, either the capture is empty or it is nonparticipating.
- // In either case succeed immediately.
- __ j(equal, &fallthrough);
-
- if (mode_ == ASCII) {
- Label loop_increment;
- if (on_no_match == NULL) {
- on_no_match = &backtrack_label_;
- }
-
- __ lea(r9, Operand(rsi, rdx, times_1, 0));
- __ lea(r11, Operand(rsi, rdi, times_1, 0));
- __ addq(rbx, r9); // End of capture
- // ---------------------
- // r11 - current input character address
- // r9 - current capture character address
- // rbx - end of capture
-
- Label loop;
- __ bind(&loop);
- __ movzxbl(rdx, Operand(r9, 0));
- __ movzxbl(rax, Operand(r11, 0));
- // al - input character
- // dl - capture character
- __ cmpb(rax, rdx);
- __ j(equal, &loop_increment);
-
- // Mismatch, try case-insensitive match (converting letters to lower-case).
- // I.e., if or-ing with 0x20 makes values equal and in range 'a'-'z', it's
- // a match.
- __ or_(rax, Immediate(0x20)); // Convert match character to lower-case.
- __ or_(rdx, Immediate(0x20)); // Convert capture character to lower-case.
- __ cmpb(rax, rdx);
- __ j(not_equal, on_no_match); // Definitely not equal.
- __ subb(rax, Immediate('a'));
- __ cmpb(rax, Immediate('z' - 'a'));
- __ j(above, on_no_match); // Weren't letters anyway.
-
- __ bind(&loop_increment);
- // Increment pointers into match and capture strings.
- __ addq(r11, Immediate(1));
- __ addq(r9, Immediate(1));
- // Compare to end of capture, and loop if not done.
- __ cmpq(r9, rbx);
- __ j(below, &loop);
-
- // Compute new value of character position after the matched part.
- __ movq(rdi, r11);
- __ subq(rdi, rsi);
- } else {
- ASSERT(mode_ == UC16);
- // Save important/volatile registers before calling C function.
-#ifndef _WIN64
- // Caller save on Linux and callee save in Windows.
- __ push(rsi);
- __ push(rdi);
-#endif
- __ push(backtrack_stackpointer());
-
- static const int num_arguments = 4;
- __ PrepareCallCFunction(num_arguments);
-
- // Put arguments into parameter registers. Parameters are
- // Address byte_offset1 - Address captured substring's start.
- // Address byte_offset2 - Address of current character position.
- // size_t byte_length - length of capture in bytes(!)
- // Isolate* isolate
-#ifdef _WIN64
- // Compute and set byte_offset1 (start of capture).
- __ lea(rcx, Operand(rsi, rdx, times_1, 0));
- // Set byte_offset2.
- __ lea(rdx, Operand(rsi, rdi, times_1, 0));
- // Set byte_length.
- __ movq(r8, rbx);
- // Isolate.
- __ LoadAddress(r9, ExternalReference::isolate_address());
-#else // AMD64 calling convention
- // Compute byte_offset2 (current position = rsi+rdi).
- __ lea(rax, Operand(rsi, rdi, times_1, 0));
- // Compute and set byte_offset1 (start of capture).
- __ lea(rdi, Operand(rsi, rdx, times_1, 0));
- // Set byte_offset2.
- __ movq(rsi, rax);
- // Set byte_length.
- __ movq(rdx, rbx);
- // Isolate.
- __ LoadAddress(rcx, ExternalReference::isolate_address());
-#endif
- ExternalReference compare =
- ExternalReference::re_case_insensitive_compare_uc16(masm_.isolate());
- __ CallCFunction(compare, num_arguments);
-
- // Restore original values before reacting on result value.
- __ Move(code_object_pointer(), masm_.CodeObject());
- __ pop(backtrack_stackpointer());
-#ifndef _WIN64
- __ pop(rdi);
- __ pop(rsi);
-#endif
-
- // Check if function returned non-zero for success or zero for failure.
- __ testq(rax, rax);
- BranchOrBacktrack(zero, on_no_match);
- // On success, increment position by length of capture.
- // Requires that rbx is callee save (true for both Win64 and AMD64 ABIs).
- __ addq(rdi, rbx);
- }
- __ bind(&fallthrough);
-}
-
-
-void RegExpMacroAssemblerX64::CheckNotBackReference(
- int start_reg,
- Label* on_no_match) {
- Label fallthrough;
-
- // Find length of back-referenced capture.
- __ movq(rdx, register_location(start_reg));
- __ movq(rax, register_location(start_reg + 1));
- __ subq(rax, rdx); // Length to check.
-
- // Fail on partial or illegal capture (start of capture after end of capture).
- // This must not happen (no back-reference can reference a capture that wasn't
- // closed before in the reg-exp).
- __ Check(greater_equal, "Invalid capture referenced");
-
- // Succeed on empty capture (including non-participating capture)
- __ j(equal, &fallthrough);
-
- // -----------------------
- // rdx - Start of capture
- // rax - length of capture
-
- // Check that there are sufficient characters left in the input.
- __ movl(rbx, rdi);
- __ addl(rbx, rax);
- BranchOrBacktrack(greater, on_no_match);
-
- // Compute pointers to match string and capture string
- __ lea(rbx, Operand(rsi, rdi, times_1, 0)); // Start of match.
- __ addq(rdx, rsi); // Start of capture.
- __ lea(r9, Operand(rdx, rax, times_1, 0)); // End of capture
-
- // -----------------------
- // rbx - current capture character address.
- // rbx - current input character address .
- // r9 - end of input to match (capture length after rbx).
-
- Label loop;
- __ bind(&loop);
- if (mode_ == ASCII) {
- __ movzxbl(rax, Operand(rdx, 0));
- __ cmpb(rax, Operand(rbx, 0));
- } else {
- ASSERT(mode_ == UC16);
- __ movzxwl(rax, Operand(rdx, 0));
- __ cmpw(rax, Operand(rbx, 0));
- }
- BranchOrBacktrack(not_equal, on_no_match);
- // Increment pointers into capture and match string.
- __ addq(rbx, Immediate(char_size()));
- __ addq(rdx, Immediate(char_size()));
- // Check if we have reached end of match area.
- __ cmpq(rdx, r9);
- __ j(below, &loop);
-
- // Success.
- // Set current character position to position after match.
- __ movq(rdi, rbx);
- __ subq(rdi, rsi);
-
- __ bind(&fallthrough);
-}
-
-
-void RegExpMacroAssemblerX64::CheckNotRegistersEqual(int reg1,
- int reg2,
- Label* on_not_equal) {
- __ movq(rax, register_location(reg1));
- __ cmpq(rax, register_location(reg2));
- BranchOrBacktrack(not_equal, on_not_equal);
-}
-
-
-void RegExpMacroAssemblerX64::CheckNotCharacter(uint32_t c,
- Label* on_not_equal) {
- __ cmpl(current_character(), Immediate(c));
- BranchOrBacktrack(not_equal, on_not_equal);
-}
-
-
-void RegExpMacroAssemblerX64::CheckCharacterAfterAnd(uint32_t c,
- uint32_t mask,
- Label* on_equal) {
- __ movl(rax, current_character());
- __ and_(rax, Immediate(mask));
- __ cmpl(rax, Immediate(c));
- BranchOrBacktrack(equal, on_equal);
-}
-
-
-void RegExpMacroAssemblerX64::CheckNotCharacterAfterAnd(uint32_t c,
- uint32_t mask,
- Label* on_not_equal) {
- __ movl(rax, current_character());
- __ and_(rax, Immediate(mask));
- __ cmpl(rax, Immediate(c));
- BranchOrBacktrack(not_equal, on_not_equal);
-}
-
-
-void RegExpMacroAssemblerX64::CheckNotCharacterAfterMinusAnd(
- uc16 c,
- uc16 minus,
- uc16 mask,
- Label* on_not_equal) {
- ASSERT(minus < String::kMaxUC16CharCode);
- __ lea(rax, Operand(current_character(), -minus));
- __ and_(rax, Immediate(mask));
- __ cmpl(rax, Immediate(c));
- BranchOrBacktrack(not_equal, on_not_equal);
-}
-
-
-bool RegExpMacroAssemblerX64::CheckSpecialCharacterClass(uc16 type,
- Label* on_no_match) {
- // Range checks (c in min..max) are generally implemented by an unsigned
- // (c - min) <= (max - min) check, using the sequence:
- // lea(rax, Operand(current_character(), -min)) or sub(rax, Immediate(min))
- // cmp(rax, Immediate(max - min))
- switch (type) {
- case 's':
- // Match space-characters
- if (mode_ == ASCII) {
- // ASCII space characters are '\t'..'\r' and ' '.
- Label success;
- __ cmpl(current_character(), Immediate(' '));
- __ j(equal, &success);
- // Check range 0x09..0x0d
- __ lea(rax, Operand(current_character(), -'\t'));
- __ cmpl(rax, Immediate('\r' - '\t'));
- BranchOrBacktrack(above, on_no_match);
- __ bind(&success);
- return true;
- }
- return false;
- case 'S':
- // Match non-space characters.
- if (mode_ == ASCII) {
- // ASCII space characters are '\t'..'\r' and ' '.
- __ cmpl(current_character(), Immediate(' '));
- BranchOrBacktrack(equal, on_no_match);
- __ lea(rax, Operand(current_character(), -'\t'));
- __ cmpl(rax, Immediate('\r' - '\t'));
- BranchOrBacktrack(below_equal, on_no_match);
- return true;
- }
- return false;
- case 'd':
- // Match ASCII digits ('0'..'9')
- __ lea(rax, Operand(current_character(), -'0'));
- __ cmpl(rax, Immediate('9' - '0'));
- BranchOrBacktrack(above, on_no_match);
- return true;
- case 'D':
- // Match non ASCII-digits
- __ lea(rax, Operand(current_character(), -'0'));
- __ cmpl(rax, Immediate('9' - '0'));
- BranchOrBacktrack(below_equal, on_no_match);
- return true;
- case '.': {
- // Match non-newlines (not 0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
- __ movl(rax, current_character());
- __ xor_(rax, Immediate(0x01));
- // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
- __ subl(rax, Immediate(0x0b));
- __ cmpl(rax, Immediate(0x0c - 0x0b));
- BranchOrBacktrack(below_equal, on_no_match);
- if (mode_ == UC16) {
- // Compare original value to 0x2028 and 0x2029, using the already
- // computed (current_char ^ 0x01 - 0x0b). I.e., check for
- // 0x201d (0x2028 - 0x0b) or 0x201e.
- __ subl(rax, Immediate(0x2028 - 0x0b));
- __ cmpl(rax, Immediate(0x2029 - 0x2028));
- BranchOrBacktrack(below_equal, on_no_match);
- }
- return true;
- }
- case 'n': {
- // Match newlines (0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
- __ movl(rax, current_character());
- __ xor_(rax, Immediate(0x01));
- // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
- __ subl(rax, Immediate(0x0b));
- __ cmpl(rax, Immediate(0x0c - 0x0b));
- if (mode_ == ASCII) {
- BranchOrBacktrack(above, on_no_match);
- } else {
- Label done;
- BranchOrBacktrack(below_equal, &done);
- // Compare original value to 0x2028 and 0x2029, using the already
- // computed (current_char ^ 0x01 - 0x0b). I.e., check for
- // 0x201d (0x2028 - 0x0b) or 0x201e.
- __ subl(rax, Immediate(0x2028 - 0x0b));
- __ cmpl(rax, Immediate(0x2029 - 0x2028));
- BranchOrBacktrack(above, on_no_match);
- __ bind(&done);
- }
- return true;
- }
- case 'w': {
- if (mode_ != ASCII) {
- // Table is 128 entries, so all ASCII characters can be tested.
- __ cmpl(current_character(), Immediate('z'));
- BranchOrBacktrack(above, on_no_match);
- }
- __ movq(rbx, ExternalReference::re_word_character_map());
- ASSERT_EQ(0, word_character_map[0]); // Character '\0' is not a word char.
- ExternalReference word_map = ExternalReference::re_word_character_map();
- __ testb(Operand(rbx, current_character(), times_1, 0),
- current_character());
- BranchOrBacktrack(zero, on_no_match);
- return true;
- }
- case 'W': {
- Label done;
- if (mode_ != ASCII) {
- // Table is 128 entries, so all ASCII characters can be tested.
- __ cmpl(current_character(), Immediate('z'));
- __ j(above, &done);
- }
- __ movq(rbx, ExternalReference::re_word_character_map());
- ASSERT_EQ(0, word_character_map[0]); // Character '\0' is not a word char.
- ExternalReference word_map = ExternalReference::re_word_character_map();
- __ testb(Operand(rbx, current_character(), times_1, 0),
- current_character());
- BranchOrBacktrack(not_zero, on_no_match);
- if (mode_ != ASCII) {
- __ bind(&done);
- }
- return true;
- }
-
- case '*':
- // Match any character.
- return true;
- // No custom implementation (yet): s(UC16), S(UC16).
- default:
- return false;
- }
-}
-
-
-void RegExpMacroAssemblerX64::Fail() {
- ASSERT(FAILURE == 0); // Return value for failure is zero.
- __ Set(rax, 0);
- __ jmp(&exit_label_);
-}
-
-
-Handle<Object> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
- // Finalize code - write the entry point code now we know how many
- // registers we need.
- // Entry code:
- __ bind(&entry_label_);
- // Start new stack frame.
- __ push(rbp);
- __ movq(rbp, rsp);
- // Save parameters and callee-save registers. Order here should correspond
- // to order of kBackup_ebx etc.
-#ifdef _WIN64
- // MSVC passes arguments in rcx, rdx, r8, r9, with backing stack slots.
- // Store register parameters in pre-allocated stack slots,
- __ movq(Operand(rbp, kInputString), rcx);
- __ movq(Operand(rbp, kStartIndex), rdx); // Passed as int32 in edx.
- __ movq(Operand(rbp, kInputStart), r8);
- __ movq(Operand(rbp, kInputEnd), r9);
- // Callee-save on Win64.
- __ push(rsi);
- __ push(rdi);
- __ push(rbx);
-#else
- // GCC passes arguments in rdi, rsi, rdx, rcx, r8, r9 (and then on stack).
- // Push register parameters on stack for reference.
- ASSERT_EQ(kInputString, -1 * kPointerSize);
- ASSERT_EQ(kStartIndex, -2 * kPointerSize);
- ASSERT_EQ(kInputStart, -3 * kPointerSize);
- ASSERT_EQ(kInputEnd, -4 * kPointerSize);
- ASSERT_EQ(kRegisterOutput, -5 * kPointerSize);
- ASSERT_EQ(kStackHighEnd, -6 * kPointerSize);
- __ push(rdi);
- __ push(rsi);
- __ push(rdx);
- __ push(rcx);
- __ push(r8);
- __ push(r9);
-
- __ push(rbx); // Callee-save
-#endif
-
- __ push(Immediate(0)); // Make room for "at start" constant.
-
- // Check if we have space on the stack for registers.
- Label stack_limit_hit;
- Label stack_ok;
-
- ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit(masm_.isolate());
- __ movq(rcx, rsp);
- __ movq(kScratchRegister, stack_limit);
- __ subq(rcx, Operand(kScratchRegister, 0));
- // Handle it if the stack pointer is already below the stack limit.
- __ j(below_equal, &stack_limit_hit);
- // Check if there is room for the variable number of registers above
- // the stack limit.
- __ cmpq(rcx, Immediate(num_registers_ * kPointerSize));
- __ j(above_equal, &stack_ok);
- // Exit with OutOfMemory exception. There is not enough space on the stack
- // for our working registers.
- __ movq(rax, Immediate(EXCEPTION));
- __ jmp(&exit_label_);
-
- __ bind(&stack_limit_hit);
- __ Move(code_object_pointer(), masm_.CodeObject());
- CallCheckStackGuardState(); // Preserves no registers beside rbp and rsp.
- __ testq(rax, rax);
- // If returned value is non-zero, we exit with the returned value as result.
- __ j(not_zero, &exit_label_);
-
- __ bind(&stack_ok);
-
- // Allocate space on stack for registers.
- __ subq(rsp, Immediate(num_registers_ * kPointerSize));
- // Load string length.
- __ movq(rsi, Operand(rbp, kInputEnd));
- // Load input position.
- __ movq(rdi, Operand(rbp, kInputStart));
- // Set up rdi to be negative offset from string end.
- __ subq(rdi, rsi);
- // Set rax to address of char before start of the string
- // (effectively string position -1).
- __ movq(rbx, Operand(rbp, kStartIndex));
- __ neg(rbx);
- if (mode_ == UC16) {
- __ lea(rax, Operand(rdi, rbx, times_2, -char_size()));
- } else {
- __ lea(rax, Operand(rdi, rbx, times_1, -char_size()));
- }
- // Store this value in a local variable, for use when clearing
- // position registers.
- __ movq(Operand(rbp, kInputStartMinusOne), rax);
-
- if (num_saved_registers_ > 0) {
- // Fill saved registers with initial value = start offset - 1
- // Fill in stack push order, to avoid accessing across an unwritten
- // page (a problem on Windows).
- __ movq(rcx, Immediate(kRegisterZero));
- Label init_loop;
- __ bind(&init_loop);
- __ movq(Operand(rbp, rcx, times_1, 0), rax);
- __ subq(rcx, Immediate(kPointerSize));
- __ cmpq(rcx,
- Immediate(kRegisterZero - num_saved_registers_ * kPointerSize));
- __ j(greater, &init_loop);
- }
- // Ensure that we have written to each stack page, in order. Skipping a page
- // on Windows can cause segmentation faults. Assuming page size is 4k.
- const int kPageSize = 4096;
- const int kRegistersPerPage = kPageSize / kPointerSize;
- for (int i = num_saved_registers_ + kRegistersPerPage - 1;
- i < num_registers_;
- i += kRegistersPerPage) {
- __ movq(register_location(i), rax); // One write every page.
- }
-
- // Initialize backtrack stack pointer.
- __ movq(backtrack_stackpointer(), Operand(rbp, kStackHighEnd));
- // Initialize code object pointer.
- __ Move(code_object_pointer(), masm_.CodeObject());
- // Load previous char as initial value of current-character.
- Label at_start;
- __ cmpb(Operand(rbp, kStartIndex), Immediate(0));
- __ j(equal, &at_start);
- LoadCurrentCharacterUnchecked(-1, 1); // Load previous char.
- __ jmp(&start_label_);
- __ bind(&at_start);
- __ movq(current_character(), Immediate('\n'));
- __ jmp(&start_label_);
-
-
- // Exit code:
- if (success_label_.is_linked()) {
- // Save captures when successful.
- __ bind(&success_label_);
- if (num_saved_registers_ > 0) {
- // copy captures to output
- __ movq(rdx, Operand(rbp, kStartIndex));
- __ movq(rbx, Operand(rbp, kRegisterOutput));
- __ movq(rcx, Operand(rbp, kInputEnd));
- __ subq(rcx, Operand(rbp, kInputStart));
- if (mode_ == UC16) {
- __ lea(rcx, Operand(rcx, rdx, times_2, 0));
- } else {
- __ addq(rcx, rdx);
- }
- for (int i = 0; i < num_saved_registers_; i++) {
- __ movq(rax, register_location(i));
- __ addq(rax, rcx); // Convert to index from start, not end.
- if (mode_ == UC16) {
- __ sar(rax, Immediate(1)); // Convert byte index to character index.
- }
- __ movl(Operand(rbx, i * kIntSize), rax);
- }
- }
- __ movq(rax, Immediate(SUCCESS));
- }
-
- // Exit and return rax
- __ bind(&exit_label_);
-
-#ifdef _WIN64
- // Restore callee save registers.
- __ lea(rsp, Operand(rbp, kLastCalleeSaveRegister));
- __ pop(rbx);
- __ pop(rdi);
- __ pop(rsi);
- // Stack now at rbp.
-#else
- // Restore callee save register.
- __ movq(rbx, Operand(rbp, kBackup_rbx));
- // Skip rsp to rbp.
- __ movq(rsp, rbp);
-#endif
- // Exit function frame, restore previous one.
- __ pop(rbp);
- __ ret(0);
-
- // Backtrack code (branch target for conditional backtracks).
- if (backtrack_label_.is_linked()) {
- __ bind(&backtrack_label_);
- Backtrack();
- }
-
- Label exit_with_exception;
-
- // Preempt-code
- if (check_preempt_label_.is_linked()) {
- SafeCallTarget(&check_preempt_label_);
-
- __ push(backtrack_stackpointer());
- __ push(rdi);
-
- CallCheckStackGuardState();
- __ testq(rax, rax);
- // If returning non-zero, we should end execution with the given
- // result as return value.
- __ j(not_zero, &exit_label_);
-
- // Restore registers.
- __ Move(code_object_pointer(), masm_.CodeObject());
- __ pop(rdi);
- __ pop(backtrack_stackpointer());
- // String might have moved: Reload esi from frame.
- __ movq(rsi, Operand(rbp, kInputEnd));
- SafeReturn();
- }
-
- // Backtrack stack overflow code.
- if (stack_overflow_label_.is_linked()) {
- SafeCallTarget(&stack_overflow_label_);
- // Reached if the backtrack-stack limit has been hit.
-
- Label grow_failed;
- // Save registers before calling C function
-#ifndef _WIN64
- // Callee-save in Microsoft 64-bit ABI, but not in AMD64 ABI.
- __ push(rsi);
- __ push(rdi);
-#endif
-
- // Call GrowStack(backtrack_stackpointer())
- static const int num_arguments = 3;
- __ PrepareCallCFunction(num_arguments);
-#ifdef _WIN64
- // Microsoft passes parameters in rcx, rdx, r8.
- // First argument, backtrack stackpointer, is already in rcx.
- __ lea(rdx, Operand(rbp, kStackHighEnd)); // Second argument
- __ LoadAddress(r8, ExternalReference::isolate_address());
-#else
- // AMD64 ABI passes parameters in rdi, rsi, rdx.
- __ movq(rdi, backtrack_stackpointer()); // First argument.
- __ lea(rsi, Operand(rbp, kStackHighEnd)); // Second argument.
- __ LoadAddress(rdx, ExternalReference::isolate_address());
-#endif
- ExternalReference grow_stack =
- ExternalReference::re_grow_stack(masm_.isolate());
- __ CallCFunction(grow_stack, num_arguments);
- // If return NULL, we have failed to grow the stack, and
- // must exit with a stack-overflow exception.
- __ testq(rax, rax);
- __ j(equal, &exit_with_exception);
- // Otherwise use return value as new stack pointer.
- __ movq(backtrack_stackpointer(), rax);
- // Restore saved registers and continue.
- __ Move(code_object_pointer(), masm_.CodeObject());
-#ifndef _WIN64
- __ pop(rdi);
- __ pop(rsi);
-#endif
- SafeReturn();
- }
-
- if (exit_with_exception.is_linked()) {
- // If any of the code above needed to exit with an exception.
- __ bind(&exit_with_exception);
- // Exit with Result EXCEPTION(-1) to signal thrown exception.
- __ movq(rax, Immediate(EXCEPTION));
- __ jmp(&exit_label_);
- }
-
- FixupCodeRelativePositions();
-
- CodeDesc code_desc;
- masm_.GetCode(&code_desc);
- Isolate* isolate = ISOLATE;
- Handle<Code> code = isolate->factory()->NewCode(
- code_desc, Code::ComputeFlags(Code::REGEXP),
- masm_.CodeObject());
- PROFILE(isolate, RegExpCodeCreateEvent(*code, *source));
- return Handle<Object>::cast(code);
-}
-
-
-void RegExpMacroAssemblerX64::GoTo(Label* to) {
- BranchOrBacktrack(no_condition, to);
-}
-
-
-void RegExpMacroAssemblerX64::IfRegisterGE(int reg,
- int comparand,
- Label* if_ge) {
- __ cmpq(register_location(reg), Immediate(comparand));
- BranchOrBacktrack(greater_equal, if_ge);
-}
-
-
-void RegExpMacroAssemblerX64::IfRegisterLT(int reg,
- int comparand,
- Label* if_lt) {
- __ cmpq(register_location(reg), Immediate(comparand));
- BranchOrBacktrack(less, if_lt);
-}
-
-
-void RegExpMacroAssemblerX64::IfRegisterEqPos(int reg,
- Label* if_eq) {
- __ cmpq(rdi, register_location(reg));
- BranchOrBacktrack(equal, if_eq);
-}
-
-
-RegExpMacroAssembler::IrregexpImplementation
- RegExpMacroAssemblerX64::Implementation() {
- return kX64Implementation;
-}
-
-
-void RegExpMacroAssemblerX64::LoadCurrentCharacter(int cp_offset,
- Label* on_end_of_input,
- bool check_bounds,
- int characters) {
- ASSERT(cp_offset >= -1); // ^ and \b can look behind one character.
- ASSERT(cp_offset < (1<<30)); // Be sane! (And ensure negation works)
- if (check_bounds) {
- CheckPosition(cp_offset + characters - 1, on_end_of_input);
- }
- LoadCurrentCharacterUnchecked(cp_offset, characters);
-}
-
-
-void RegExpMacroAssemblerX64::PopCurrentPosition() {
- Pop(rdi);
-}
-
-
-void RegExpMacroAssemblerX64::PopRegister(int register_index) {
- Pop(rax);
- __ movq(register_location(register_index), rax);
-}
-
-
-void RegExpMacroAssemblerX64::PushBacktrack(Label* label) {
- Push(label);
- CheckStackLimit();
-}
-
-
-void RegExpMacroAssemblerX64::PushCurrentPosition() {
- Push(rdi);
-}
-
-
-void RegExpMacroAssemblerX64::PushRegister(int register_index,
- StackCheckFlag check_stack_limit) {
- __ movq(rax, register_location(register_index));
- Push(rax);
- if (check_stack_limit) CheckStackLimit();
-}
-
-
-void RegExpMacroAssemblerX64::ReadCurrentPositionFromRegister(int reg) {
- __ movq(rdi, register_location(reg));
-}
-
-
-void RegExpMacroAssemblerX64::ReadStackPointerFromRegister(int reg) {
- __ movq(backtrack_stackpointer(), register_location(reg));
- __ addq(backtrack_stackpointer(), Operand(rbp, kStackHighEnd));
-}
-
-
-void RegExpMacroAssemblerX64::SetCurrentPositionFromEnd(int by) {
- NearLabel after_position;
- __ cmpq(rdi, Immediate(-by * char_size()));
- __ j(greater_equal, &after_position);
- __ movq(rdi, Immediate(-by * char_size()));
- // On RegExp code entry (where this operation is used), the character before
- // the current position is expected to be already loaded.
- // We have advanced the position, so it's safe to read backwards.
- LoadCurrentCharacterUnchecked(-1, 1);
- __ bind(&after_position);
-}
-
-
-void RegExpMacroAssemblerX64::SetRegister(int register_index, int to) {
- ASSERT(register_index >= num_saved_registers_); // Reserved for positions!
- __ movq(register_location(register_index), Immediate(to));
-}
-
-
-void RegExpMacroAssemblerX64::Succeed() {
- __ jmp(&success_label_);
-}
-
-
-void RegExpMacroAssemblerX64::WriteCurrentPositionToRegister(int reg,
- int cp_offset) {
- if (cp_offset == 0) {
- __ movq(register_location(reg), rdi);
- } else {
- __ lea(rax, Operand(rdi, cp_offset * char_size()));
- __ movq(register_location(reg), rax);
- }
-}
-
-
-void RegExpMacroAssemblerX64::ClearRegisters(int reg_from, int reg_to) {
- ASSERT(reg_from <= reg_to);
- __ movq(rax, Operand(rbp, kInputStartMinusOne));
- for (int reg = reg_from; reg <= reg_to; reg++) {
- __ movq(register_location(reg), rax);
- }
-}
-
-
-void RegExpMacroAssemblerX64::WriteStackPointerToRegister(int reg) {
- __ movq(rax, backtrack_stackpointer());
- __ subq(rax, Operand(rbp, kStackHighEnd));
- __ movq(register_location(reg), rax);
-}
-
-
-// Private methods:
-
-void RegExpMacroAssemblerX64::CallCheckStackGuardState() {
- // This function call preserves no register values. Caller should
- // store anything volatile in a C call or overwritten by this function.
- static const int num_arguments = 3;
- __ PrepareCallCFunction(num_arguments);
-#ifdef _WIN64
- // Second argument: Code* of self. (Do this before overwriting r8).
- __ movq(rdx, code_object_pointer());
- // Third argument: RegExp code frame pointer.
- __ movq(r8, rbp);
- // First argument: Next address on the stack (will be address of
- // return address).
- __ lea(rcx, Operand(rsp, -kPointerSize));
-#else
- // Third argument: RegExp code frame pointer.
- __ movq(rdx, rbp);
- // Second argument: Code* of self.
- __ movq(rsi, code_object_pointer());
- // First argument: Next address on the stack (will be address of
- // return address).
- __ lea(rdi, Operand(rsp, -kPointerSize));
-#endif
- ExternalReference stack_check =
- ExternalReference::re_check_stack_guard_state(masm_.isolate());
- __ CallCFunction(stack_check, num_arguments);
-}
-
-
-// Helper function for reading a value out of a stack frame.
-template <typename T>
-static T& frame_entry(Address re_frame, int frame_offset) {
- return reinterpret_cast<T&>(Memory::int32_at(re_frame + frame_offset));
-}
-
-
-int RegExpMacroAssemblerX64::CheckStackGuardState(Address* return_address,
- Code* re_code,
- Address re_frame) {
- Isolate* isolate = frame_entry<Isolate*>(re_frame, kIsolate);
- ASSERT(isolate == Isolate::Current());
- if (isolate->stack_guard()->IsStackOverflow()) {
- isolate->StackOverflow();
- return EXCEPTION;
- }
-
- // If not real stack overflow the stack guard was used to interrupt
- // execution for another purpose.
-
- // If this is a direct call from JavaScript retry the RegExp forcing the call
- // through the runtime system. Currently the direct call cannot handle a GC.
- if (frame_entry<int>(re_frame, kDirectCall) == 1) {
- return RETRY;
- }
-
- // Prepare for possible GC.
- HandleScope handles;
- Handle<Code> code_handle(re_code);
-
- Handle<String> subject(frame_entry<String*>(re_frame, kInputString));
- // Current string.
- bool is_ascii = subject->IsAsciiRepresentation();
-
- ASSERT(re_code->instruction_start() <= *return_address);
- ASSERT(*return_address <=
- re_code->instruction_start() + re_code->instruction_size());
-
- MaybeObject* result = Execution::HandleStackGuardInterrupt();
-
- if (*code_handle != re_code) { // Return address no longer valid
- intptr_t delta = *code_handle - re_code;
- // Overwrite the return address on the stack.
- *return_address += delta;
- }
-
- if (result->IsException()) {
- return EXCEPTION;
- }
-
- // String might have changed.
- if (subject->IsAsciiRepresentation() != is_ascii) {
- // If we changed between an ASCII and an UC16 string, the specialized
- // code cannot be used, and we need to restart regexp matching from
- // scratch (including, potentially, compiling a new version of the code).
- return RETRY;
- }
-
- // Otherwise, the content of the string might have moved. It must still
- // be a sequential or external string with the same content.
- // Update the start and end pointers in the stack frame to the current
- // location (whether it has actually moved or not).
- ASSERT(StringShape(*subject).IsSequential() ||
- StringShape(*subject).IsExternal());
-
- // The original start address of the characters to match.
- const byte* start_address = frame_entry<const byte*>(re_frame, kInputStart);
-
- // Find the current start address of the same character at the current string
- // position.
- int start_index = frame_entry<int>(re_frame, kStartIndex);
- const byte* new_address = StringCharacterPosition(*subject, start_index);
-
- if (start_address != new_address) {
- // If there is a difference, update the object pointer and start and end
- // addresses in the RegExp stack frame to match the new value.
- const byte* end_address = frame_entry<const byte* >(re_frame, kInputEnd);
- int byte_length = static_cast<int>(end_address - start_address);
- frame_entry<const String*>(re_frame, kInputString) = *subject;
- frame_entry<const byte*>(re_frame, kInputStart) = new_address;
- frame_entry<const byte*>(re_frame, kInputEnd) = new_address + byte_length;
- }
-
- return 0;
-}
-
-
-Operand RegExpMacroAssemblerX64::register_location(int register_index) {
- ASSERT(register_index < (1<<30));
- if (num_registers_ <= register_index) {
- num_registers_ = register_index + 1;
- }
- return Operand(rbp, kRegisterZero - register_index * kPointerSize);
-}
-
-
-void RegExpMacroAssemblerX64::CheckPosition(int cp_offset,
- Label* on_outside_input) {
- __ cmpl(rdi, Immediate(-cp_offset * char_size()));
- BranchOrBacktrack(greater_equal, on_outside_input);
-}
-
-
-void RegExpMacroAssemblerX64::BranchOrBacktrack(Condition condition,
- Label* to) {
- if (condition < 0) { // No condition
- if (to == NULL) {
- Backtrack();
- return;
- }
- __ jmp(to);
- return;
- }
- if (to == NULL) {
- __ j(condition, &backtrack_label_);
- return;
- }
- __ j(condition, to);
-}
-
-
-void RegExpMacroAssemblerX64::SafeCall(Label* to) {
- __ call(to);
-}
-
-
-void RegExpMacroAssemblerX64::SafeCallTarget(Label* label) {
- __ bind(label);
- __ subq(Operand(rsp, 0), code_object_pointer());
-}
-
-
-void RegExpMacroAssemblerX64::SafeReturn() {
- __ addq(Operand(rsp, 0), code_object_pointer());
- __ ret(0);
-}
-
-
-void RegExpMacroAssemblerX64::Push(Register source) {
- ASSERT(!source.is(backtrack_stackpointer()));
- // Notice: This updates flags, unlike normal Push.
- __ subq(backtrack_stackpointer(), Immediate(kIntSize));
- __ movl(Operand(backtrack_stackpointer(), 0), source);
-}
-
-
-void RegExpMacroAssemblerX64::Push(Immediate value) {
- // Notice: This updates flags, unlike normal Push.
- __ subq(backtrack_stackpointer(), Immediate(kIntSize));
- __ movl(Operand(backtrack_stackpointer(), 0), value);
-}
-
-
-void RegExpMacroAssemblerX64::FixupCodeRelativePositions() {
- for (int i = 0, n = code_relative_fixup_positions_.length(); i < n; i++) {
- int position = code_relative_fixup_positions_[i];
- // The position succeeds a relative label offset from position.
- // Patch the relative offset to be relative to the Code object pointer
- // instead.
- int patch_position = position - kIntSize;
- int offset = masm_.long_at(patch_position);
- masm_.long_at_put(patch_position,
- offset
- + position
- + Code::kHeaderSize
- - kHeapObjectTag);
- }
- code_relative_fixup_positions_.Clear();
-}
-
-
-void RegExpMacroAssemblerX64::Push(Label* backtrack_target) {
- __ subq(backtrack_stackpointer(), Immediate(kIntSize));
- __ movl(Operand(backtrack_stackpointer(), 0), backtrack_target);
- MarkPositionForCodeRelativeFixup();
-}
-
-
-void RegExpMacroAssemblerX64::Pop(Register target) {
- ASSERT(!target.is(backtrack_stackpointer()));
- __ movsxlq(target, Operand(backtrack_stackpointer(), 0));
- // Notice: This updates flags, unlike normal Pop.
- __ addq(backtrack_stackpointer(), Immediate(kIntSize));
-}
-
-
-void RegExpMacroAssemblerX64::Drop() {
- __ addq(backtrack_stackpointer(), Immediate(kIntSize));
-}
-
-
-void RegExpMacroAssemblerX64::CheckPreemption() {
- // Check for preemption.
- Label no_preempt;
- ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit(masm_.isolate());
- __ load_rax(stack_limit);
- __ cmpq(rsp, rax);
- __ j(above, &no_preempt);
-
- SafeCall(&check_preempt_label_);
-
- __ bind(&no_preempt);
-}
-
-
-void RegExpMacroAssemblerX64::CheckStackLimit() {
- Label no_stack_overflow;
- ExternalReference stack_limit =
- ExternalReference::address_of_regexp_stack_limit(masm_.isolate());
- __ load_rax(stack_limit);
- __ cmpq(backtrack_stackpointer(), rax);
- __ j(above, &no_stack_overflow);
-
- SafeCall(&stack_overflow_label_);
-
- __ bind(&no_stack_overflow);
-}
-
-
-void RegExpMacroAssemblerX64::LoadCurrentCharacterUnchecked(int cp_offset,
- int characters) {
- if (mode_ == ASCII) {
- if (characters == 4) {
- __ movl(current_character(), Operand(rsi, rdi, times_1, cp_offset));
- } else if (characters == 2) {
- __ movzxwl(current_character(), Operand(rsi, rdi, times_1, cp_offset));
- } else {
- ASSERT(characters == 1);
- __ movzxbl(current_character(), Operand(rsi, rdi, times_1, cp_offset));
- }
- } else {
- ASSERT(mode_ == UC16);
- if (characters == 2) {
- __ movl(current_character(),
- Operand(rsi, rdi, times_1, cp_offset * sizeof(uc16)));
- } else {
- ASSERT(characters == 1);
- __ movzxwl(current_character(),
- Operand(rsi, rdi, times_1, cp_offset * sizeof(uc16)));
- }
- }
-}
-
-#undef __
-
-#endif // V8_INTERPRETED_REGEXP
-
-}} // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_X64
diff --git a/src/3rdparty/v8/src/x64/regexp-macro-assembler-x64.h b/src/3rdparty/v8/src/x64/regexp-macro-assembler-x64.h
deleted file mode 100644
index a83f8cb..0000000
--- a/src/3rdparty/v8/src/x64/regexp-macro-assembler-x64.h
+++ /dev/null
@@ -1,282 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_X64_REGEXP_MACRO_ASSEMBLER_X64_H_
-#define V8_X64_REGEXP_MACRO_ASSEMBLER_X64_H_
-
-namespace v8 {
-namespace internal {
-
-#ifndef V8_INTERPRETED_REGEXP
-
-class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler {
- public:
- RegExpMacroAssemblerX64(Mode mode, int registers_to_save);
- virtual ~RegExpMacroAssemblerX64();
- virtual int stack_limit_slack();
- virtual void AdvanceCurrentPosition(int by);
- virtual void AdvanceRegister(int reg, int by);
- virtual void Backtrack();
- virtual void Bind(Label* label);
- virtual void CheckAtStart(Label* on_at_start);
- virtual void CheckCharacter(uint32_t c, Label* on_equal);
- virtual void CheckCharacterAfterAnd(uint32_t c,
- uint32_t mask,
- Label* on_equal);
- virtual void CheckCharacterGT(uc16 limit, Label* on_greater);
- virtual void CheckCharacterLT(uc16 limit, Label* on_less);
- virtual void CheckCharacters(Vector<const uc16> str,
- int cp_offset,
- Label* on_failure,
- bool check_end_of_string);
- // A "greedy loop" is a loop that is both greedy and with a simple
- // body. It has a particularly simple implementation.
- virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
- virtual void CheckNotAtStart(Label* on_not_at_start);
- virtual void CheckNotBackReference(int start_reg, Label* on_no_match);
- virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
- Label* on_no_match);
- virtual void CheckNotRegistersEqual(int reg1, int reg2, Label* on_not_equal);
- virtual void CheckNotCharacter(uint32_t c, Label* on_not_equal);
- virtual void CheckNotCharacterAfterAnd(uint32_t c,
- uint32_t mask,
- Label* on_not_equal);
- virtual void CheckNotCharacterAfterMinusAnd(uc16 c,
- uc16 minus,
- uc16 mask,
- Label* on_not_equal);
- // Checks whether the given offset from the current position is before
- // the end of the string.
- virtual void CheckPosition(int cp_offset, Label* on_outside_input);
- virtual bool CheckSpecialCharacterClass(uc16 type,
- Label* on_no_match);
- virtual void Fail();
- virtual Handle<Object> GetCode(Handle<String> source);
- virtual void GoTo(Label* label);
- virtual void IfRegisterGE(int reg, int comparand, Label* if_ge);
- virtual void IfRegisterLT(int reg, int comparand, Label* if_lt);
- virtual void IfRegisterEqPos(int reg, Label* if_eq);
- virtual IrregexpImplementation Implementation();
- virtual void LoadCurrentCharacter(int cp_offset,
- Label* on_end_of_input,
- bool check_bounds = true,
- int characters = 1);
- virtual void PopCurrentPosition();
- virtual void PopRegister(int register_index);
- virtual void PushBacktrack(Label* label);
- virtual void PushCurrentPosition();
- virtual void PushRegister(int register_index,
- StackCheckFlag check_stack_limit);
- virtual void ReadCurrentPositionFromRegister(int reg);
- virtual void ReadStackPointerFromRegister(int reg);
- virtual void SetCurrentPositionFromEnd(int by);
- virtual void SetRegister(int register_index, int to);
- virtual void Succeed();
- virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
- virtual void ClearRegisters(int reg_from, int reg_to);
- virtual void WriteStackPointerToRegister(int reg);
-
- static Result Match(Handle<Code> regexp,
- Handle<String> subject,
- int* offsets_vector,
- int offsets_vector_length,
- int previous_index,
- Isolate* isolate);
-
- static Result Execute(Code* code,
- String* input,
- int start_offset,
- const byte* input_start,
- const byte* input_end,
- int* output,
- bool at_start);
-
- // Called from RegExp if the stack-guard is triggered.
- // If the code object is relocated, the return address is fixed before
- // returning.
- static int CheckStackGuardState(Address* return_address,
- Code* re_code,
- Address re_frame);
-
- private:
- // Offsets from rbp of function parameters and stored registers.
- static const int kFramePointer = 0;
- // Above the frame pointer - function parameters and return address.
- static const int kReturn_eip = kFramePointer + kPointerSize;
- static const int kFrameAlign = kReturn_eip + kPointerSize;
-
-#ifdef _WIN64
- // Parameters (first four passed as registers, but with room on stack).
- // In Microsoft 64-bit Calling Convention, there is room on the callers
- // stack (before the return address) to spill parameter registers. We
- // use this space to store the register passed parameters.
- static const int kInputString = kFrameAlign;
- // StartIndex is passed as 32 bit int.
- static const int kStartIndex = kInputString + kPointerSize;
- static const int kInputStart = kStartIndex + kPointerSize;
- static const int kInputEnd = kInputStart + kPointerSize;
- static const int kRegisterOutput = kInputEnd + kPointerSize;
- static const int kStackHighEnd = kRegisterOutput + kPointerSize;
- // DirectCall is passed as 32 bit int (values 0 or 1).
- static const int kDirectCall = kStackHighEnd + kPointerSize;
- static const int kIsolate = kDirectCall + kPointerSize;
-#else
- // In AMD64 ABI Calling Convention, the first six integer parameters
- // are passed as registers, and caller must allocate space on the stack
- // if it wants them stored. We push the parameters after the frame pointer.
- static const int kInputString = kFramePointer - kPointerSize;
- static const int kStartIndex = kInputString - kPointerSize;
- static const int kInputStart = kStartIndex - kPointerSize;
- static const int kInputEnd = kInputStart - kPointerSize;
- static const int kRegisterOutput = kInputEnd - kPointerSize;
- static const int kStackHighEnd = kRegisterOutput - kPointerSize;
- static const int kDirectCall = kFrameAlign;
- static const int kIsolate = kDirectCall + kPointerSize;
-#endif
-
-#ifdef _WIN64
- // Microsoft calling convention has three callee-saved registers
- // (that we are using). We push these after the frame pointer.
- static const int kBackup_rsi = kFramePointer - kPointerSize;
- static const int kBackup_rdi = kBackup_rsi - kPointerSize;
- static const int kBackup_rbx = kBackup_rdi - kPointerSize;
- static const int kLastCalleeSaveRegister = kBackup_rbx;
-#else
- // AMD64 Calling Convention has only one callee-save register that
- // we use. We push this after the frame pointer (and after the
- // parameters).
- static const int kBackup_rbx = kStackHighEnd - kPointerSize;
- static const int kLastCalleeSaveRegister = kBackup_rbx;
-#endif
-
- // When adding local variables remember to push space for them in
- // the frame in GetCode.
- static const int kInputStartMinusOne =
- kLastCalleeSaveRegister - kPointerSize;
-
- // First register address. Following registers are below it on the stack.
- static const int kRegisterZero = kInputStartMinusOne - kPointerSize;
-
- // Initial size of code buffer.
- static const size_t kRegExpCodeSize = 1024;
-
- // Load a number of characters at the given offset from the
- // current position, into the current-character register.
- void LoadCurrentCharacterUnchecked(int cp_offset, int character_count);
-
- // Check whether preemption has been requested.
- void CheckPreemption();
-
- // Check whether we are exceeding the stack limit on the backtrack stack.
- void CheckStackLimit();
-
- // Generate a call to CheckStackGuardState.
- void CallCheckStackGuardState();
-
- // The rbp-relative location of a regexp register.
- Operand register_location(int register_index);
-
- // The register containing the current character after LoadCurrentCharacter.
- inline Register current_character() { return rdx; }
-
- // The register containing the backtrack stack top. Provides a meaningful
- // name to the register.
- inline Register backtrack_stackpointer() { return rcx; }
-
- // The registers containing a self pointer to this code's Code object.
- inline Register code_object_pointer() { return r8; }
-
- // Byte size of chars in the string to match (decided by the Mode argument)
- inline int char_size() { return static_cast<int>(mode_); }
-
- // Equivalent to a conditional branch to the label, unless the label
- // is NULL, in which case it is a conditional Backtrack.
- void BranchOrBacktrack(Condition condition, Label* to);
-
- void MarkPositionForCodeRelativeFixup() {
- code_relative_fixup_positions_.Add(masm_.pc_offset());
- }
-
- void FixupCodeRelativePositions();
-
- // Call and return internally in the generated code in a way that
- // is GC-safe (i.e., doesn't leave absolute code addresses on the stack)
- inline void SafeCall(Label* to);
- inline void SafeCallTarget(Label* label);
- inline void SafeReturn();
-
- // Pushes the value of a register on the backtrack stack. Decrements the
- // stack pointer (rcx) by a word size and stores the register's value there.
- inline void Push(Register source);
-
- // Pushes a value on the backtrack stack. Decrements the stack pointer (rcx)
- // by a word size and stores the value there.
- inline void Push(Immediate value);
-
- // Pushes the Code object relative offset of a label on the backtrack stack
- // (i.e., a backtrack target). Decrements the stack pointer (rcx)
- // by a word size and stores the value there.
- inline void Push(Label* label);
-
- // Pops a value from the backtrack stack. Reads the word at the stack pointer
- // (rcx) and increments it by a word size.
- inline void Pop(Register target);
-
- // Drops the top value from the backtrack stack without reading it.
- // Increments the stack pointer (rcx) by a word size.
- inline void Drop();
-
- MacroAssembler masm_;
- MacroAssembler::NoRootArrayScope no_root_array_scope_;
-
- ZoneList<int> code_relative_fixup_positions_;
-
- // Which mode to generate code for (ASCII or UC16).
- Mode mode_;
-
- // One greater than maximal register index actually used.
- int num_registers_;
-
- // Number of registers to output at the end (the saved registers
- // are always 0..num_saved_registers_-1)
- int num_saved_registers_;
-
- // Labels used internally.
- Label entry_label_;
- Label start_label_;
- Label success_label_;
- Label backtrack_label_;
- Label exit_label_;
- Label check_preempt_label_;
- Label stack_overflow_label_;
-};
-
-#endif // V8_INTERPRETED_REGEXP
-
-}} // namespace v8::internal
-
-#endif // V8_X64_REGEXP_MACRO_ASSEMBLER_X64_H_
diff --git a/src/3rdparty/v8/src/x64/register-allocator-x64-inl.h b/src/3rdparty/v8/src/x64/register-allocator-x64-inl.h
deleted file mode 100644
index 5df3d54..0000000
--- a/src/3rdparty/v8/src/x64/register-allocator-x64-inl.h
+++ /dev/null
@@ -1,87 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_X64_REGISTER_ALLOCATOR_X64_INL_H_
-#define V8_X64_REGISTER_ALLOCATOR_X64_INL_H_
-
-#include "v8.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// RegisterAllocator implementation.
-
-bool RegisterAllocator::IsReserved(Register reg) {
- return reg.is(rsp) || reg.is(rbp) || reg.is(rsi) ||
- reg.is(kScratchRegister) || reg.is(kRootRegister) ||
- reg.is(kSmiConstantRegister);
-}
-
-
-// The register allocator uses small integers to represent the
-// non-reserved assembler registers.
-int RegisterAllocator::ToNumber(Register reg) {
- ASSERT(reg.is_valid() && !IsReserved(reg));
- const int kNumbers[] = {
- 0, // rax
- 2, // rcx
- 3, // rdx
- 1, // rbx
- -1, // rsp Stack pointer.
- -1, // rbp Frame pointer.
- -1, // rsi Context.
- 4, // rdi
- 5, // r8
- 6, // r9
- -1, // r10 Scratch register.
- 8, // r11
- -1, // r12 Smi constant.
- -1, // r13 Roots array. This is callee saved.
- 7, // r14
- 9 // r15
- };
- return kNumbers[reg.code()];
-}
-
-
-Register RegisterAllocator::ToRegister(int num) {
- ASSERT(num >= 0 && num < kNumRegisters);
- const Register kRegisters[] =
- { rax, rbx, rcx, rdx, rdi, r8, r9, r14, r11, r15 };
- return kRegisters[num];
-}
-
-
-void RegisterAllocator::Initialize() {
- Reset();
- // The non-reserved rdi register is live on JS function entry.
- Use(rdi); // JS function.
-}
-} } // namespace v8::internal
-
-#endif // V8_X64_REGISTER_ALLOCATOR_X64_INL_H_
diff --git a/src/3rdparty/v8/src/x64/register-allocator-x64.cc b/src/3rdparty/v8/src/x64/register-allocator-x64.cc
deleted file mode 100644
index 65189f5..0000000
--- a/src/3rdparty/v8/src/x64/register-allocator-x64.cc
+++ /dev/null
@@ -1,95 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_X64)
-
-#include "codegen-inl.h"
-#include "register-allocator-inl.h"
-#include "virtual-frame-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// Result implementation.
-
-void Result::ToRegister() {
- ASSERT(is_valid());
- if (is_constant()) {
- CodeGenerator* code_generator =
- CodeGeneratorScope::Current(Isolate::Current());
- Result fresh = code_generator->allocator()->Allocate();
- ASSERT(fresh.is_valid());
- code_generator->masm()->Move(fresh.reg(), handle());
- // This result becomes a copy of the fresh one.
- fresh.set_type_info(type_info());
- *this = fresh;
- }
- ASSERT(is_register());
-}
-
-
-void Result::ToRegister(Register target) {
- ASSERT(is_valid());
- CodeGenerator* code_generator =
- CodeGeneratorScope::Current(Isolate::Current());
- if (!is_register() || !reg().is(target)) {
- Result fresh = code_generator->allocator()->Allocate(target);
- ASSERT(fresh.is_valid());
- if (is_register()) {
- code_generator->masm()->movq(fresh.reg(), reg());
- } else {
- ASSERT(is_constant());
- code_generator->masm()->Move(fresh.reg(), handle());
- }
- fresh.set_type_info(type_info());
- *this = fresh;
- } else if (is_register() && reg().is(target)) {
- ASSERT(code_generator->has_valid_frame());
- code_generator->frame()->Spill(target);
- ASSERT(code_generator->allocator()->count(target) == 1);
- }
- ASSERT(is_register());
- ASSERT(reg().is(target));
-}
-
-
-// -------------------------------------------------------------------------
-// RegisterAllocator implementation.
-
-Result RegisterAllocator::AllocateByteRegisterWithoutSpilling() {
- // This function is not used in 64-bit code.
- UNREACHABLE();
- return Result();
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_X64
diff --git a/src/3rdparty/v8/src/x64/register-allocator-x64.h b/src/3rdparty/v8/src/x64/register-allocator-x64.h
deleted file mode 100644
index a2884d9..0000000
--- a/src/3rdparty/v8/src/x64/register-allocator-x64.h
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_X64_REGISTER_ALLOCATOR_X64_H_
-#define V8_X64_REGISTER_ALLOCATOR_X64_H_
-
-namespace v8 {
-namespace internal {
-
-class RegisterAllocatorConstants : public AllStatic {
- public:
- static const int kNumRegisters = 10;
- static const int kInvalidRegister = -1;
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_X64_REGISTER_ALLOCATOR_X64_H_
diff --git a/src/3rdparty/v8/src/x64/simulator-x64.cc b/src/3rdparty/v8/src/x64/simulator-x64.cc
deleted file mode 100644
index 209aa2d..0000000
--- a/src/3rdparty/v8/src/x64/simulator-x64.cc
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
diff --git a/src/3rdparty/v8/src/x64/simulator-x64.h b/src/3rdparty/v8/src/x64/simulator-x64.h
deleted file mode 100644
index cfaa5b8..0000000
--- a/src/3rdparty/v8/src/x64/simulator-x64.h
+++ /dev/null
@@ -1,71 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_X64_SIMULATOR_X64_H_
-#define V8_X64_SIMULATOR_X64_H_
-
-#include "allocation.h"
-
-namespace v8 {
-namespace internal {
-
-// Since there is no simulator for the x64 architecture the only thing we can
-// do is to call the entry directly.
-// TODO(X64): Don't pass p0, since it isn't used?
-#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
- (entry(p0, p1, p2, p3, p4))
-
-typedef int (*regexp_matcher)(String*, int, const byte*,
- const byte*, int*, Address, int, Isolate*);
-
-// Call the generated regexp code directly. The code at the entry address should
-// expect eight int/pointer sized arguments and return an int.
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
- (FUNCTION_CAST<regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6, p7))
-
-#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
- (reinterpret_cast<TryCatch*>(try_catch_address))
-
-// The stack limit beyond which we will throw stack overflow errors in
-// generated code. Because generated code on x64 uses the C stack, we
-// just use the C stack limit.
-class SimulatorStack : public v8::internal::AllStatic {
- public:
- static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) {
- return c_limit;
- }
-
- static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
- return try_catch_address;
- }
-
- static inline void UnregisterCTryCatch() { }
-};
-
-} } // namespace v8::internal
-
-#endif // V8_X64_SIMULATOR_X64_H_
diff --git a/src/3rdparty/v8/src/x64/stub-cache-x64.cc b/src/3rdparty/v8/src/x64/stub-cache-x64.cc
deleted file mode 100644
index 7494fe0..0000000
--- a/src/3rdparty/v8/src/x64/stub-cache-x64.cc
+++ /dev/null
@@ -1,3460 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_X64)
-
-#include "ic-inl.h"
-#include "codegen-inl.h"
-#include "stub-cache.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-
-static void ProbeTable(Isolate* isolate,
- MacroAssembler* masm,
- Code::Flags flags,
- StubCache::Table table,
- Register name,
- Register offset) {
- ASSERT_EQ(8, kPointerSize);
- ASSERT_EQ(16, sizeof(StubCache::Entry));
- // The offset register holds the entry offset times four (due to masking
- // and shifting optimizations).
- ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
- Label miss;
-
- __ LoadAddress(kScratchRegister, key_offset);
- // Check that the key in the entry matches the name.
- // Multiply entry offset by 16 to get the entry address. Since the
- // offset register already holds the entry offset times four, multiply
- // by a further four.
- __ cmpl(name, Operand(kScratchRegister, offset, times_4, 0));
- __ j(not_equal, &miss);
- // Get the code entry from the cache.
- // Use key_offset + kPointerSize, rather than loading value_offset.
- __ movq(kScratchRegister,
- Operand(kScratchRegister, offset, times_4, kPointerSize));
- // Check that the flags match what we're looking for.
- __ movl(offset, FieldOperand(kScratchRegister, Code::kFlagsOffset));
- __ and_(offset, Immediate(~Code::kFlagsNotUsedInLookup));
- __ cmpl(offset, Immediate(flags));
- __ j(not_equal, &miss);
-
- // Jump to the first instruction in the code stub.
- __ addq(kScratchRegister, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ jmp(kScratchRegister);
-
- __ bind(&miss);
-}
-
-
-// Helper function used to check that the dictionary doesn't contain
-// the property. This function may return false negatives, so miss_label
-// must always call a backup property check that is complete.
-// This function is safe to call if the receiver has fast properties.
-// Name must be a symbol and receiver must be a heap object.
-static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
- Label* miss_label,
- Register receiver,
- String* name,
- Register r0,
- Register r1) {
- ASSERT(name->IsSymbol());
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->negative_lookups(), 1);
- __ IncrementCounter(counters->negative_lookups_miss(), 1);
-
- Label done;
- __ movq(r0, FieldOperand(receiver, HeapObject::kMapOffset));
-
- const int kInterceptorOrAccessCheckNeededMask =
- (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
-
- // Bail out if the receiver has a named interceptor or requires access checks.
- __ testb(FieldOperand(r0, Map::kBitFieldOffset),
- Immediate(kInterceptorOrAccessCheckNeededMask));
- __ j(not_zero, miss_label);
-
- // Check that receiver is a JSObject.
- __ CmpInstanceType(r0, FIRST_JS_OBJECT_TYPE);
- __ j(below, miss_label);
-
- // Load properties array.
- Register properties = r0;
- __ movq(properties, FieldOperand(receiver, JSObject::kPropertiesOffset));
-
- // Check that the properties array is a dictionary.
- __ CompareRoot(FieldOperand(properties, HeapObject::kMapOffset),
- Heap::kHashTableMapRootIndex);
- __ j(not_equal, miss_label);
-
- // Compute the capacity mask.
- const int kCapacityOffset =
- StringDictionary::kHeaderSize +
- StringDictionary::kCapacityIndex * kPointerSize;
-
- // Generate an unrolled loop that performs a few probes before
- // giving up.
- static const int kProbes = 4;
- const int kElementsStartOffset =
- StringDictionary::kHeaderSize +
- StringDictionary::kElementsStartIndex * kPointerSize;
-
- // If names of slots in range from 1 to kProbes - 1 for the hash value are
- // not equal to the name and kProbes-th slot is not used (its name is the
- // undefined value), it guarantees the hash table doesn't contain the
- // property. It's true even if some slots represent deleted properties
- // (their names are the null value).
- for (int i = 0; i < kProbes; i++) {
- // r0 points to properties hash.
- // Compute the masked index: (hash + i + i * i) & mask.
- Register index = r1;
- // Capacity is smi 2^n.
- __ SmiToInteger32(index, FieldOperand(properties, kCapacityOffset));
- __ decl(index);
- __ and_(index,
- Immediate(name->Hash() + StringDictionary::GetProbeOffset(i)));
-
- // Scale the index by multiplying by the entry size.
- ASSERT(StringDictionary::kEntrySize == 3);
- __ lea(index, Operand(index, index, times_2, 0)); // index *= 3.
-
- Register entity_name = r1;
- // Having undefined at this place means the name is not contained.
- ASSERT_EQ(kSmiTagSize, 1);
- __ movq(entity_name, Operand(properties, index, times_pointer_size,
- kElementsStartOffset - kHeapObjectTag));
- __ Cmp(entity_name, masm->isolate()->factory()->undefined_value());
- // __ jmp(miss_label);
- if (i != kProbes - 1) {
- __ j(equal, &done);
-
- // Stop if found the property.
- __ Cmp(entity_name, Handle<String>(name));
- __ j(equal, miss_label);
-
- // Check if the entry name is not a symbol.
- __ movq(entity_name, FieldOperand(entity_name, HeapObject::kMapOffset));
- __ testb(FieldOperand(entity_name, Map::kInstanceTypeOffset),
- Immediate(kIsSymbolMask));
- __ j(zero, miss_label);
- } else {
- // Give up probing if still not found the undefined value.
- __ j(not_equal, miss_label);
- }
- }
-
- __ bind(&done);
- __ DecrementCounter(counters->negative_lookups_miss(), 1);
-}
-
-
-void StubCache::GenerateProbe(MacroAssembler* masm,
- Code::Flags flags,
- Register receiver,
- Register name,
- Register scratch,
- Register extra,
- Register extra2) {
- Isolate* isolate = masm->isolate();
- Label miss;
- USE(extra); // The register extra is not used on the X64 platform.
- USE(extra2); // The register extra2 is not used on the X64 platform.
- // Make sure that code is valid. The shifting code relies on the
- // entry size being 16.
- ASSERT(sizeof(Entry) == 16);
-
- // Make sure the flags do not name a specific type.
- ASSERT(Code::ExtractTypeFromFlags(flags) == 0);
-
- // Make sure that there are no register conflicts.
- ASSERT(!scratch.is(receiver));
- ASSERT(!scratch.is(name));
-
- // Check scratch register is valid, extra and extra2 are unused.
- ASSERT(!scratch.is(no_reg));
- ASSERT(extra2.is(no_reg));
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &miss);
-
- // Get the map of the receiver and compute the hash.
- __ movl(scratch, FieldOperand(name, String::kHashFieldOffset));
- // Use only the low 32 bits of the map pointer.
- __ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
- __ xor_(scratch, Immediate(flags));
- __ and_(scratch, Immediate((kPrimaryTableSize - 1) << kHeapObjectTagSize));
-
- // Probe the primary table.
- ProbeTable(isolate, masm, flags, kPrimary, name, scratch);
-
- // Primary miss: Compute hash for secondary probe.
- __ movl(scratch, FieldOperand(name, String::kHashFieldOffset));
- __ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
- __ xor_(scratch, Immediate(flags));
- __ and_(scratch, Immediate((kPrimaryTableSize - 1) << kHeapObjectTagSize));
- __ subl(scratch, name);
- __ addl(scratch, Immediate(flags));
- __ and_(scratch, Immediate((kSecondaryTableSize - 1) << kHeapObjectTagSize));
-
- // Probe the secondary table.
- ProbeTable(isolate, masm, flags, kSecondary, name, scratch);
-
- // Cache miss: Fall-through and let caller handle the miss by
- // entering the runtime system.
- __ bind(&miss);
-}
-
-
-void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
- int index,
- Register prototype) {
- // Load the global or builtins object from the current context.
- __ movq(prototype,
- Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- // Load the global context from the global or builtins object.
- __ movq(prototype,
- FieldOperand(prototype, GlobalObject::kGlobalContextOffset));
- // Load the function from the global context.
- __ movq(prototype, Operand(prototype, Context::SlotOffset(index)));
- // Load the initial map. The global functions all have initial maps.
- __ movq(prototype,
- FieldOperand(prototype, JSFunction::kPrototypeOrInitialMapOffset));
- // Load the prototype from the initial map.
- __ movq(prototype, FieldOperand(prototype, Map::kPrototypeOffset));
-}
-
-
-void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
- MacroAssembler* masm, int index, Register prototype, Label* miss) {
- Isolate* isolate = masm->isolate();
- // Check we're still in the same context.
- __ Move(prototype, isolate->global());
- __ cmpq(Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)),
- prototype);
- __ j(not_equal, miss);
- // Get the global function with the given index.
- JSFunction* function =
- JSFunction::cast(isolate->global_context()->get(index));
- // Load its initial map. The global functions all have initial maps.
- __ Move(prototype, Handle<Map>(function->initial_map()));
- // Load the prototype from the initial map.
- __ movq(prototype, FieldOperand(prototype, Map::kPrototypeOffset));
-}
-
-
-void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
- Register receiver,
- Register scratch,
- Label* miss_label) {
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, miss_label);
-
- // Check that the object is a JS array.
- __ CmpObjectType(receiver, JS_ARRAY_TYPE, scratch);
- __ j(not_equal, miss_label);
-
- // Load length directly from the JS array.
- __ movq(rax, FieldOperand(receiver, JSArray::kLengthOffset));
- __ ret(0);
-}
-
-
-// Generate code to check if an object is a string. If the object is
-// a string, the map's instance type is left in the scratch register.
-static void GenerateStringCheck(MacroAssembler* masm,
- Register receiver,
- Register scratch,
- Label* smi,
- Label* non_string_object) {
- // Check that the object isn't a smi.
- __ JumpIfSmi(receiver, smi);
-
- // Check that the object is a string.
- __ movq(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
- __ movzxbq(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
- ASSERT(kNotStringTag != 0);
- __ testl(scratch, Immediate(kNotStringTag));
- __ j(not_zero, non_string_object);
-}
-
-
-void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Label* miss,
- bool support_wrappers) {
- Label check_wrapper;
-
- // Check if the object is a string leaving the instance type in the
- // scratch register.
- GenerateStringCheck(masm, receiver, scratch1, miss,
- support_wrappers ? &check_wrapper : miss);
-
- // Load length directly from the string.
- __ movq(rax, FieldOperand(receiver, String::kLengthOffset));
- __ ret(0);
-
- if (support_wrappers) {
- // Check if the object is a JSValue wrapper.
- __ bind(&check_wrapper);
- __ cmpl(scratch1, Immediate(JS_VALUE_TYPE));
- __ j(not_equal, miss);
-
- // Check if the wrapped value is a string and load the length
- // directly if it is.
- __ movq(scratch2, FieldOperand(receiver, JSValue::kValueOffset));
- GenerateStringCheck(masm, scratch2, scratch1, miss, miss);
- __ movq(rax, FieldOperand(scratch2, String::kLengthOffset));
- __ ret(0);
- }
-}
-
-
-void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
- Register receiver,
- Register result,
- Register scratch,
- Label* miss_label) {
- __ TryGetFunctionPrototype(receiver, result, miss_label);
- if (!result.is(rax)) __ movq(rax, result);
- __ ret(0);
-}
-
-
-// Load a fast property out of a holder object (src). In-object properties
-// are loaded directly otherwise the property is loaded from the properties
-// fixed array.
-void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
- Register dst, Register src,
- JSObject* holder, int index) {
- // Adjust for the number of properties stored in the holder.
- index -= holder->map()->inobject_properties();
- if (index < 0) {
- // Get the property straight out of the holder.
- int offset = holder->map()->instance_size() + (index * kPointerSize);
- __ movq(dst, FieldOperand(src, offset));
- } else {
- // Calculate the offset into the properties array.
- int offset = index * kPointerSize + FixedArray::kHeaderSize;
- __ movq(dst, FieldOperand(src, JSObject::kPropertiesOffset));
- __ movq(dst, FieldOperand(dst, offset));
- }
-}
-
-
-static void PushInterceptorArguments(MacroAssembler* masm,
- Register receiver,
- Register holder,
- Register name,
- JSObject* holder_obj) {
- __ push(name);
- InterceptorInfo* interceptor = holder_obj->GetNamedInterceptor();
- ASSERT(!masm->isolate()->heap()->InNewSpace(interceptor));
- __ Move(kScratchRegister, Handle<Object>(interceptor));
- __ push(kScratchRegister);
- __ push(receiver);
- __ push(holder);
- __ push(FieldOperand(kScratchRegister, InterceptorInfo::kDataOffset));
-}
-
-
-static void CompileCallLoadPropertyWithInterceptor(MacroAssembler* masm,
- Register receiver,
- Register holder,
- Register name,
- JSObject* holder_obj) {
- PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
-
- ExternalReference ref =
- ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly),
- masm->isolate());
- __ movq(rax, Immediate(5));
- __ LoadAddress(rbx, ref);
-
- CEntryStub stub(1);
- __ CallStub(&stub);
-}
-
-
-// Number of pointers to be reserved on stack for fast API call.
-static const int kFastApiCallArguments = 3;
-
-
-// Reserves space for the extra arguments to API function in the
-// caller's frame.
-//
-// These arguments are set by CheckPrototypes and GenerateFastApiCall.
-static void ReserveSpaceForFastApiCall(MacroAssembler* masm, Register scratch) {
- // ----------- S t a t e -------------
- // -- rsp[0] : return address
- // -- rsp[8] : last argument in the internal frame of the caller
- // -----------------------------------
- __ movq(scratch, Operand(rsp, 0));
- __ subq(rsp, Immediate(kFastApiCallArguments * kPointerSize));
- __ movq(Operand(rsp, 0), scratch);
- __ Move(scratch, Smi::FromInt(0));
- for (int i = 1; i <= kFastApiCallArguments; i++) {
- __ movq(Operand(rsp, i * kPointerSize), scratch);
- }
-}
-
-
-// Undoes the effects of ReserveSpaceForFastApiCall.
-static void FreeSpaceForFastApiCall(MacroAssembler* masm, Register scratch) {
- // ----------- S t a t e -------------
- // -- rsp[0] : return address.
- // -- rsp[8] : last fast api call extra argument.
- // -- ...
- // -- rsp[kFastApiCallArguments * 8] : first fast api call extra argument.
- // -- rsp[kFastApiCallArguments * 8 + 8] : last argument in the internal
- // frame.
- // -----------------------------------
- __ movq(scratch, Operand(rsp, 0));
- __ movq(Operand(rsp, kFastApiCallArguments * kPointerSize), scratch);
- __ addq(rsp, Immediate(kPointerSize * kFastApiCallArguments));
-}
-
-
-// Generates call to API function.
-static MaybeObject* GenerateFastApiCall(MacroAssembler* masm,
- const CallOptimization& optimization,
- int argc) {
- // ----------- S t a t e -------------
- // -- rsp[0] : return address
- // -- rsp[8] : object passing the type check
- // (last fast api call extra argument,
- // set by CheckPrototypes)
- // -- rsp[16] : api function
- // (first fast api call extra argument)
- // -- rsp[24] : api call data
- // -- rsp[32] : last argument
- // -- ...
- // -- rsp[(argc + 3) * 8] : first argument
- // -- rsp[(argc + 4) * 8] : receiver
- // -----------------------------------
- // Get the function and setup the context.
- JSFunction* function = optimization.constant_function();
- __ Move(rdi, Handle<JSFunction>(function));
- __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
-
- // Pass the additional arguments.
- __ movq(Operand(rsp, 2 * kPointerSize), rdi);
- Object* call_data = optimization.api_call_info()->data();
- Handle<CallHandlerInfo> api_call_info_handle(optimization.api_call_info());
- if (masm->isolate()->heap()->InNewSpace(call_data)) {
- __ Move(rcx, api_call_info_handle);
- __ movq(rbx, FieldOperand(rcx, CallHandlerInfo::kDataOffset));
- __ movq(Operand(rsp, 3 * kPointerSize), rbx);
- } else {
- __ Move(Operand(rsp, 3 * kPointerSize), Handle<Object>(call_data));
- }
-
- // Prepare arguments.
- __ lea(rbx, Operand(rsp, 3 * kPointerSize));
-
- Object* callback = optimization.api_call_info()->callback();
- Address api_function_address = v8::ToCData<Address>(callback);
- ApiFunction fun(api_function_address);
-
-#ifdef _WIN64
- // Win64 uses first register--rcx--for returned value.
- Register arguments_arg = rdx;
-#else
- Register arguments_arg = rdi;
-#endif
-
- // Allocate the v8::Arguments structure in the arguments' space since
- // it's not controlled by GC.
- const int kApiStackSpace = 4;
-
- __ PrepareCallApiFunction(kApiStackSpace);
-
- __ movq(StackSpaceOperand(0), rbx); // v8::Arguments::implicit_args_.
- __ addq(rbx, Immediate(argc * kPointerSize));
- __ movq(StackSpaceOperand(1), rbx); // v8::Arguments::values_.
- __ Set(StackSpaceOperand(2), argc); // v8::Arguments::length_.
- // v8::Arguments::is_construct_call_.
- __ Set(StackSpaceOperand(3), 0);
-
- // v8::InvocationCallback's argument.
- __ lea(arguments_arg, StackSpaceOperand(0));
- // Emitting a stub call may try to allocate (if the code is not
- // already generated). Do not allow the assembler to perform a
- // garbage collection but instead return the allocation failure
- // object.
- return masm->TryCallApiFunctionAndReturn(&fun,
- argc + kFastApiCallArguments + 1);
-}
-
-
-class CallInterceptorCompiler BASE_EMBEDDED {
- public:
- CallInterceptorCompiler(StubCompiler* stub_compiler,
- const ParameterCount& arguments,
- Register name)
- : stub_compiler_(stub_compiler),
- arguments_(arguments),
- name_(name) {}
-
- MaybeObject* Compile(MacroAssembler* masm,
- JSObject* object,
- JSObject* holder,
- String* name,
- LookupResult* lookup,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* miss) {
- ASSERT(holder->HasNamedInterceptor());
- ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, miss);
-
- CallOptimization optimization(lookup);
-
- if (optimization.is_constant_call()) {
- return CompileCacheable(masm,
- object,
- receiver,
- scratch1,
- scratch2,
- scratch3,
- holder,
- lookup,
- name,
- optimization,
- miss);
- } else {
- CompileRegular(masm,
- object,
- receiver,
- scratch1,
- scratch2,
- scratch3,
- name,
- holder,
- miss);
- return masm->isolate()->heap()->undefined_value(); // Success.
- }
- }
-
- private:
- MaybeObject* CompileCacheable(MacroAssembler* masm,
- JSObject* object,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- JSObject* interceptor_holder,
- LookupResult* lookup,
- String* name,
- const CallOptimization& optimization,
- Label* miss_label) {
- ASSERT(optimization.is_constant_call());
- ASSERT(!lookup->holder()->IsGlobalObject());
-
- int depth1 = kInvalidProtoDepth;
- int depth2 = kInvalidProtoDepth;
- bool can_do_fast_api_call = false;
- if (optimization.is_simple_api_call() &&
- !lookup->holder()->IsGlobalObject()) {
- depth1 =
- optimization.GetPrototypeDepthOfExpectedType(object,
- interceptor_holder);
- if (depth1 == kInvalidProtoDepth) {
- depth2 =
- optimization.GetPrototypeDepthOfExpectedType(interceptor_holder,
- lookup->holder());
- }
- can_do_fast_api_call = (depth1 != kInvalidProtoDepth) ||
- (depth2 != kInvalidProtoDepth);
- }
-
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->call_const_interceptor(), 1);
-
- if (can_do_fast_api_call) {
- __ IncrementCounter(counters->call_const_interceptor_fast_api(), 1);
- ReserveSpaceForFastApiCall(masm, scratch1);
- }
-
- // Check that the maps from receiver to interceptor's holder
- // haven't changed and thus we can invoke interceptor.
- Label miss_cleanup;
- Label* miss = can_do_fast_api_call ? &miss_cleanup : miss_label;
- Register holder =
- stub_compiler_->CheckPrototypes(object, receiver,
- interceptor_holder, scratch1,
- scratch2, scratch3, name, depth1, miss);
-
- // Invoke an interceptor and if it provides a value,
- // branch to |regular_invoke|.
- Label regular_invoke;
- LoadWithInterceptor(masm, receiver, holder, interceptor_holder,
- &regular_invoke);
-
- // Interceptor returned nothing for this property. Try to use cached
- // constant function.
-
- // Check that the maps from interceptor's holder to constant function's
- // holder haven't changed and thus we can use cached constant function.
- if (interceptor_holder != lookup->holder()) {
- stub_compiler_->CheckPrototypes(interceptor_holder, receiver,
- lookup->holder(), scratch1,
- scratch2, scratch3, name, depth2, miss);
- } else {
- // CheckPrototypes has a side effect of fetching a 'holder'
- // for API (object which is instanceof for the signature). It's
- // safe to omit it here, as if present, it should be fetched
- // by the previous CheckPrototypes.
- ASSERT(depth2 == kInvalidProtoDepth);
- }
-
- // Invoke function.
- if (can_do_fast_api_call) {
- MaybeObject* result = GenerateFastApiCall(masm,
- optimization,
- arguments_.immediate());
- if (result->IsFailure()) return result;
- } else {
- __ InvokeFunction(optimization.constant_function(), arguments_,
- JUMP_FUNCTION);
- }
-
- // Deferred code for fast API call case---clean preallocated space.
- if (can_do_fast_api_call) {
- __ bind(&miss_cleanup);
- FreeSpaceForFastApiCall(masm, scratch1);
- __ jmp(miss_label);
- }
-
- // Invoke a regular function.
- __ bind(&regular_invoke);
- if (can_do_fast_api_call) {
- FreeSpaceForFastApiCall(masm, scratch1);
- }
-
- return masm->isolate()->heap()->undefined_value(); // Success.
- }
-
- void CompileRegular(MacroAssembler* masm,
- JSObject* object,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- String* name,
- JSObject* interceptor_holder,
- Label* miss_label) {
- Register holder =
- stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, scratch3, name,
- miss_label);
-
- __ EnterInternalFrame();
- // Save the name_ register across the call.
- __ push(name_);
-
- PushInterceptorArguments(masm,
- receiver,
- holder,
- name_,
- interceptor_holder);
-
- __ CallExternalReference(
- ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForCall),
- masm->isolate()),
- 5);
-
- // Restore the name_ register.
- __ pop(name_);
- __ LeaveInternalFrame();
- }
-
- void LoadWithInterceptor(MacroAssembler* masm,
- Register receiver,
- Register holder,
- JSObject* holder_obj,
- Label* interceptor_succeeded) {
- __ EnterInternalFrame();
- __ push(holder); // Save the holder.
- __ push(name_); // Save the name.
-
- CompileCallLoadPropertyWithInterceptor(masm,
- receiver,
- holder,
- name_,
- holder_obj);
-
- __ pop(name_); // Restore the name.
- __ pop(receiver); // Restore the holder.
- __ LeaveInternalFrame();
-
- __ CompareRoot(rax, Heap::kNoInterceptorResultSentinelRootIndex);
- __ j(not_equal, interceptor_succeeded);
- }
-
- StubCompiler* stub_compiler_;
- const ParameterCount& arguments_;
- Register name_;
-};
-
-
-void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
- ASSERT(kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC);
- Code* code = NULL;
- if (kind == Code::LOAD_IC) {
- code = masm->isolate()->builtins()->builtin(Builtins::kLoadIC_Miss);
- } else {
- code = masm->isolate()->builtins()->builtin(Builtins::kKeyedLoadIC_Miss);
- }
-
- Handle<Code> ic(code);
- __ Jump(ic, RelocInfo::CODE_TARGET);
-}
-
-
-// Both name_reg and receiver_reg are preserved on jumps to miss_label,
-// but may be destroyed if store is successful.
-void StubCompiler::GenerateStoreField(MacroAssembler* masm,
- JSObject* object,
- int index,
- Map* transition,
- Register receiver_reg,
- Register name_reg,
- Register scratch,
- Label* miss_label) {
- // Check that the object isn't a smi.
- __ JumpIfSmi(receiver_reg, miss_label);
-
- // Check that the map of the object hasn't changed.
- __ Cmp(FieldOperand(receiver_reg, HeapObject::kMapOffset),
- Handle<Map>(object->map()));
- __ j(not_equal, miss_label);
-
- // Perform global security token check if needed.
- if (object->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(receiver_reg, scratch, miss_label);
- }
-
- // Stub never generated for non-global objects that require access
- // checks.
- ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
-
- // Perform map transition for the receiver if necessary.
- if ((transition != NULL) && (object->map()->unused_property_fields() == 0)) {
- // The properties must be extended before we can store the value.
- // We jump to a runtime call that extends the properties array.
- __ pop(scratch); // Return address.
- __ push(receiver_reg);
- __ Push(Handle<Map>(transition));
- __ push(rax);
- __ push(scratch);
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage),
- masm->isolate()),
- 3,
- 1);
- return;
- }
-
- if (transition != NULL) {
- // Update the map of the object; no write barrier updating is
- // needed because the map is never in new space.
- __ Move(FieldOperand(receiver_reg, HeapObject::kMapOffset),
- Handle<Map>(transition));
- }
-
- // Adjust for the number of properties stored in the object. Even in the
- // face of a transition we can use the old map here because the size of the
- // object and the number of in-object properties is not going to change.
- index -= object->map()->inobject_properties();
-
- if (index < 0) {
- // Set the property straight into the object.
- int offset = object->map()->instance_size() + (index * kPointerSize);
- __ movq(FieldOperand(receiver_reg, offset), rax);
-
- // Update the write barrier for the array address.
- // Pass the value being stored in the now unused name_reg.
- __ movq(name_reg, rax);
- __ RecordWrite(receiver_reg, offset, name_reg, scratch);
- } else {
- // Write to the properties array.
- int offset = index * kPointerSize + FixedArray::kHeaderSize;
- // Get the properties array (optimistically).
- __ movq(scratch, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
- __ movq(FieldOperand(scratch, offset), rax);
-
- // Update the write barrier for the array address.
- // Pass the value being stored in the now unused name_reg.
- __ movq(name_reg, rax);
- __ RecordWrite(scratch, offset, name_reg, receiver_reg);
- }
-
- // Return the value (register rax).
- __ ret(0);
-}
-
-
-// Generate code to check that a global property cell is empty. Create
-// the property cell at compilation time if no cell exists for the
-// property.
-MUST_USE_RESULT static MaybeObject* GenerateCheckPropertyCell(
- MacroAssembler* masm,
- GlobalObject* global,
- String* name,
- Register scratch,
- Label* miss) {
- Object* probe;
- { MaybeObject* maybe_probe = global->EnsurePropertyCell(name);
- if (!maybe_probe->ToObject(&probe)) return maybe_probe;
- }
- JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(probe);
- ASSERT(cell->value()->IsTheHole());
- __ Move(scratch, Handle<Object>(cell));
- __ Cmp(FieldOperand(scratch, JSGlobalPropertyCell::kValueOffset),
- masm->isolate()->factory()->the_hole_value());
- __ j(not_equal, miss);
- return cell;
-}
-
-
-#undef __
-#define __ ACCESS_MASM((masm()))
-
-
-Register StubCompiler::CheckPrototypes(JSObject* object,
- Register object_reg,
- JSObject* holder,
- Register holder_reg,
- Register scratch1,
- Register scratch2,
- String* name,
- int save_at_depth,
- Label* miss) {
- // Make sure there's no overlap between holder and object registers.
- ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
- ASSERT(!scratch2.is(object_reg) && !scratch2.is(holder_reg)
- && !scratch2.is(scratch1));
-
- // Keep track of the current object in register reg. On the first
- // iteration, reg is an alias for object_reg, on later iterations,
- // it is an alias for holder_reg.
- Register reg = object_reg;
- int depth = 0;
-
- if (save_at_depth == depth) {
- __ movq(Operand(rsp, kPointerSize), object_reg);
- }
-
- // Check the maps in the prototype chain.
- // Traverse the prototype chain from the object and do map checks.
- JSObject* current = object;
- while (current != holder) {
- depth++;
-
- // Only global objects and objects that do not require access
- // checks are allowed in stubs.
- ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded());
-
- JSObject* prototype = JSObject::cast(current->GetPrototype());
- if (!current->HasFastProperties() &&
- !current->IsJSGlobalObject() &&
- !current->IsJSGlobalProxy()) {
- if (!name->IsSymbol()) {
- MaybeObject* lookup_result = heap()->LookupSymbol(name);
- if (lookup_result->IsFailure()) {
- set_failure(Failure::cast(lookup_result));
- return reg;
- } else {
- name = String::cast(lookup_result->ToObjectUnchecked());
- }
- }
- ASSERT(current->property_dictionary()->FindEntry(name) ==
- StringDictionary::kNotFound);
-
- GenerateDictionaryNegativeLookup(masm(),
- miss,
- reg,
- name,
- scratch1,
- scratch2);
- __ movq(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
- reg = holder_reg; // from now the object is in holder_reg
- __ movq(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
- } else if (heap()->InNewSpace(prototype)) {
- // Get the map of the current object.
- __ movq(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
- __ Cmp(scratch1, Handle<Map>(current->map()));
- // Branch on the result of the map check.
- __ j(not_equal, miss);
- // Check access rights to the global object. This has to happen
- // after the map check so that we know that the object is
- // actually a global object.
- if (current->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(reg, scratch1, miss);
-
- // Restore scratch register to be the map of the object.
- // We load the prototype from the map in the scratch register.
- __ movq(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
- }
- // The prototype is in new space; we cannot store a reference
- // to it in the code. Load it from the map.
- reg = holder_reg; // from now the object is in holder_reg
- __ movq(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
-
- } else {
- // Check the map of the current object.
- __ Cmp(FieldOperand(reg, HeapObject::kMapOffset),
- Handle<Map>(current->map()));
- // Branch on the result of the map check.
- __ j(not_equal, miss);
- // Check access rights to the global object. This has to happen
- // after the map check so that we know that the object is
- // actually a global object.
- if (current->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(reg, scratch1, miss);
- }
- // The prototype is in old space; load it directly.
- reg = holder_reg; // from now the object is in holder_reg
- __ Move(reg, Handle<JSObject>(prototype));
- }
-
- if (save_at_depth == depth) {
- __ movq(Operand(rsp, kPointerSize), reg);
- }
-
- // Go to the next object in the prototype chain.
- current = prototype;
- }
-
- // Check the holder map.
- __ Cmp(FieldOperand(reg, HeapObject::kMapOffset), Handle<Map>(holder->map()));
- __ j(not_equal, miss);
-
- // Log the check depth.
- LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
-
- // Perform security check for access to the global object and return
- // the holder register.
- ASSERT(current == holder);
- ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded());
- if (current->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(reg, scratch1, miss);
- }
-
- // If we've skipped any global objects, it's not enough to verify
- // that their maps haven't changed. We also need to check that the
- // property cell for the property is still empty.
- current = object;
- while (current != holder) {
- if (current->IsGlobalObject()) {
- MaybeObject* cell = GenerateCheckPropertyCell(masm(),
- GlobalObject::cast(current),
- name,
- scratch1,
- miss);
- if (cell->IsFailure()) {
- set_failure(Failure::cast(cell));
- return reg;
- }
- }
- current = JSObject::cast(current->GetPrototype());
- }
-
- // Return the register containing the holder.
- return reg;
-}
-
-
-void StubCompiler::GenerateLoadField(JSObject* object,
- JSObject* holder,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- int index,
- String* name,
- Label* miss) {
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, miss);
-
- // Check the prototype chain.
- Register reg =
- CheckPrototypes(object, receiver, holder,
- scratch1, scratch2, scratch3, name, miss);
-
- // Get the value from the properties.
- GenerateFastPropertyLoad(masm(), rax, reg, holder, index);
- __ ret(0);
-}
-
-
-MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object,
- JSObject* holder,
- Register receiver,
- Register name_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- AccessorInfo* callback,
- String* name,
- Label* miss) {
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, miss);
-
- // Check that the maps haven't changed.
- Register reg =
- CheckPrototypes(object, receiver, holder, scratch1,
- scratch2, scratch3, name, miss);
-
- Handle<AccessorInfo> callback_handle(callback);
-
- // Insert additional parameters into the stack frame above return address.
- ASSERT(!scratch2.is(reg));
- __ pop(scratch2); // Get return address to place it below.
-
- __ push(receiver); // receiver
- __ push(reg); // holder
- if (heap()->InNewSpace(callback_handle->data())) {
- __ Move(scratch1, callback_handle);
- __ push(FieldOperand(scratch1, AccessorInfo::kDataOffset)); // data
- } else {
- __ Push(Handle<Object>(callback_handle->data()));
- }
- __ push(name_reg); // name
- // Save a pointer to where we pushed the arguments pointer.
- // This will be passed as the const AccessorInfo& to the C++ callback.
-
-#ifdef _WIN64
- // Win64 uses first register--rcx--for returned value.
- Register accessor_info_arg = r8;
- Register name_arg = rdx;
-#else
- Register accessor_info_arg = rsi;
- Register name_arg = rdi;
-#endif
-
- ASSERT(!name_arg.is(scratch2));
- __ movq(name_arg, rsp);
- __ push(scratch2); // Restore return address.
-
- // Do call through the api.
- Address getter_address = v8::ToCData<Address>(callback->getter());
- ApiFunction fun(getter_address);
-
- // 3 elements array for v8::Agruments::values_ and handler for name.
- const int kStackSpace = 4;
-
- // Allocate v8::AccessorInfo in non-GCed stack space.
- const int kArgStackSpace = 1;
-
- __ PrepareCallApiFunction(kArgStackSpace);
- __ lea(rax, Operand(name_arg, 3 * kPointerSize));
-
- // v8::AccessorInfo::args_.
- __ movq(StackSpaceOperand(0), rax);
-
- // The context register (rsi) has been saved in PrepareCallApiFunction and
- // could be used to pass arguments.
- __ lea(accessor_info_arg, StackSpaceOperand(0));
-
- // Emitting a stub call may try to allocate (if the code is not
- // already generated). Do not allow the assembler to perform a
- // garbage collection but instead return the allocation failure
- // object.
- return masm()->TryCallApiFunctionAndReturn(&fun, kStackSpace);
-}
-
-
-void StubCompiler::GenerateLoadConstant(JSObject* object,
- JSObject* holder,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Object* value,
- String* name,
- Label* miss) {
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, miss);
-
- // Check that the maps haven't changed.
- Register reg =
- CheckPrototypes(object, receiver, holder,
- scratch1, scratch2, scratch3, name, miss);
-
- // Return the constant value.
- __ Move(rax, Handle<Object>(value));
- __ ret(0);
-}
-
-
-void StubCompiler::GenerateLoadInterceptor(JSObject* object,
- JSObject* interceptor_holder,
- LookupResult* lookup,
- Register receiver,
- Register name_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- String* name,
- Label* miss) {
- ASSERT(interceptor_holder->HasNamedInterceptor());
- ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, miss);
-
- // So far the most popular follow ups for interceptor loads are FIELD
- // and CALLBACKS, so inline only them, other cases may be added
- // later.
- bool compile_followup_inline = false;
- if (lookup->IsProperty() && lookup->IsCacheable()) {
- if (lookup->type() == FIELD) {
- compile_followup_inline = true;
- } else if (lookup->type() == CALLBACKS &&
- lookup->GetCallbackObject()->IsAccessorInfo() &&
- AccessorInfo::cast(lookup->GetCallbackObject())->getter() != NULL) {
- compile_followup_inline = true;
- }
- }
-
- if (compile_followup_inline) {
- // Compile the interceptor call, followed by inline code to load the
- // property from further up the prototype chain if the call fails.
- // Check that the maps haven't changed.
- Register holder_reg = CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, scratch3,
- name, miss);
- ASSERT(holder_reg.is(receiver) || holder_reg.is(scratch1));
-
- // Save necessary data before invoking an interceptor.
- // Requires a frame to make GC aware of pushed pointers.
- __ EnterInternalFrame();
-
- if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
- // CALLBACKS case needs a receiver to be passed into C++ callback.
- __ push(receiver);
- }
- __ push(holder_reg);
- __ push(name_reg);
-
- // Invoke an interceptor. Note: map checks from receiver to
- // interceptor's holder has been compiled before (see a caller
- // of this method.)
- CompileCallLoadPropertyWithInterceptor(masm(),
- receiver,
- holder_reg,
- name_reg,
- interceptor_holder);
-
- // Check if interceptor provided a value for property. If it's
- // the case, return immediately.
- Label interceptor_failed;
- __ CompareRoot(rax, Heap::kNoInterceptorResultSentinelRootIndex);
- __ j(equal, &interceptor_failed);
- __ LeaveInternalFrame();
- __ ret(0);
-
- __ bind(&interceptor_failed);
- __ pop(name_reg);
- __ pop(holder_reg);
- if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
- __ pop(receiver);
- }
-
- __ LeaveInternalFrame();
-
- // Check that the maps from interceptor's holder to lookup's holder
- // haven't changed. And load lookup's holder into |holder| register.
- if (interceptor_holder != lookup->holder()) {
- holder_reg = CheckPrototypes(interceptor_holder,
- holder_reg,
- lookup->holder(),
- scratch1,
- scratch2,
- scratch3,
- name,
- miss);
- }
-
- if (lookup->type() == FIELD) {
- // We found FIELD property in prototype chain of interceptor's holder.
- // Retrieve a field from field's holder.
- GenerateFastPropertyLoad(masm(), rax, holder_reg,
- lookup->holder(), lookup->GetFieldIndex());
- __ ret(0);
- } else {
- // We found CALLBACKS property in prototype chain of interceptor's
- // holder.
- ASSERT(lookup->type() == CALLBACKS);
- ASSERT(lookup->GetCallbackObject()->IsAccessorInfo());
- AccessorInfo* callback = AccessorInfo::cast(lookup->GetCallbackObject());
- ASSERT(callback != NULL);
- ASSERT(callback->getter() != NULL);
-
- // Tail call to runtime.
- // Important invariant in CALLBACKS case: the code above must be
- // structured to never clobber |receiver| register.
- __ pop(scratch2); // return address
- __ push(receiver);
- __ push(holder_reg);
- __ Move(holder_reg, Handle<AccessorInfo>(callback));
- __ push(FieldOperand(holder_reg, AccessorInfo::kDataOffset));
- __ push(holder_reg);
- __ push(name_reg);
- __ push(scratch2); // restore return address
-
- ExternalReference ref =
- ExternalReference(IC_Utility(IC::kLoadCallbackProperty),
- isolate());
- __ TailCallExternalReference(ref, 5, 1);
- }
- } else { // !compile_followup_inline
- // Call the runtime system to load the interceptor.
- // Check that the maps haven't changed.
- Register holder_reg = CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, scratch3,
- name, miss);
- __ pop(scratch2); // save old return address
- PushInterceptorArguments(masm(), receiver, holder_reg,
- name_reg, interceptor_holder);
- __ push(scratch2); // restore old return address
-
- ExternalReference ref = ExternalReference(
- IC_Utility(IC::kLoadPropertyWithInterceptorForLoad), isolate());
- __ TailCallExternalReference(ref, 5, 1);
- }
-}
-
-
-void CallStubCompiler::GenerateNameCheck(String* name, Label* miss) {
- if (kind_ == Code::KEYED_CALL_IC) {
- __ Cmp(rcx, Handle<String>(name));
- __ j(not_equal, miss);
- }
-}
-
-
-void CallStubCompiler::GenerateGlobalReceiverCheck(JSObject* object,
- JSObject* holder,
- String* name,
- Label* miss) {
- ASSERT(holder->IsGlobalObject());
-
- // Get the number of arguments.
- const int argc = arguments().immediate();
-
- // Get the receiver from the stack.
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
-
- // If the object is the holder then we know that it's a global
- // object which can only happen for contextual calls. In this case,
- // the receiver cannot be a smi.
- if (object != holder) {
- __ JumpIfSmi(rdx, miss);
- }
-
- // Check that the maps haven't changed.
- CheckPrototypes(object, rdx, holder, rbx, rax, rdi, name, miss);
-}
-
-
-void CallStubCompiler::GenerateLoadFunctionFromCell(JSGlobalPropertyCell* cell,
- JSFunction* function,
- Label* miss) {
- // Get the value from the cell.
- __ Move(rdi, Handle<JSGlobalPropertyCell>(cell));
- __ movq(rdi, FieldOperand(rdi, JSGlobalPropertyCell::kValueOffset));
-
- // Check that the cell contains the same function.
- if (heap()->InNewSpace(function)) {
- // We can't embed a pointer to a function in new space so we have
- // to verify that the shared function info is unchanged. This has
- // the nice side effect that multiple closures based on the same
- // function can all use this call IC. Before we load through the
- // function, we have to verify that it still is a function.
- __ JumpIfSmi(rdi, miss);
- __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rax);
- __ j(not_equal, miss);
-
- // Check the shared function info. Make sure it hasn't changed.
- __ Move(rax, Handle<SharedFunctionInfo>(function->shared()));
- __ cmpq(FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset), rax);
- __ j(not_equal, miss);
- } else {
- __ Cmp(rdi, Handle<JSFunction>(function));
- __ j(not_equal, miss);
- }
-}
-
-
-MaybeObject* CallStubCompiler::GenerateMissBranch() {
- MaybeObject* maybe_obj = isolate()->stub_cache()->ComputeCallMiss(
- arguments().immediate(), kind_);
- Object* obj;
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- __ Jump(Handle<Code>(Code::cast(obj)), RelocInfo::CODE_TARGET);
- return obj;
-}
-
-
-MaybeObject* CallStubCompiler::CompileCallField(JSObject* object,
- JSObject* holder,
- int index,
- String* name) {
- // ----------- S t a t e -------------
- // rcx : function name
- // rsp[0] : return address
- // rsp[8] : argument argc
- // rsp[16] : argument argc - 1
- // ...
- // rsp[argc * 8] : argument 1
- // rsp[(argc + 1) * 8] : argument 0 = receiver
- // -----------------------------------
- Label miss;
-
- GenerateNameCheck(name, &miss);
-
- // Get the receiver from the stack.
- const int argc = arguments().immediate();
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(rdx, &miss);
-
- // Do the right check and compute the holder register.
- Register reg = CheckPrototypes(object, rdx, holder, rbx, rax, rdi,
- name, &miss);
-
- GenerateFastPropertyLoad(masm(), rdi, reg, holder, index);
-
- // Check that the function really is a function.
- __ JumpIfSmi(rdi, &miss);
- __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rbx);
- __ j(not_equal, &miss);
-
- // Patch the receiver on the stack with the global proxy if
- // necessary.
- if (object->IsGlobalObject()) {
- __ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset));
- __ movq(Operand(rsp, (argc + 1) * kPointerSize), rdx);
- }
-
- // Invoke the function.
- __ InvokeFunction(rdi, arguments(), JUMP_FUNCTION);
-
- // Handle call cache miss.
- __ bind(&miss);
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
-
- // Return the generated code.
- return GetCode(FIELD, name);
-}
-
-
-MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
- // ----------- S t a t e -------------
- // -- rcx : name
- // -- rsp[0] : return address
- // -- rsp[(argc - n) * 8] : arg[n] (zero-based)
- // -- ...
- // -- rsp[(argc + 1) * 8] : receiver
- // -----------------------------------
-
- // If object is not an array, bail out to regular call.
- if (!object->IsJSArray() || cell != NULL) return heap()->undefined_value();
-
- Label miss;
-
- GenerateNameCheck(name, &miss);
-
- // Get the receiver from the stack.
- const int argc = arguments().immediate();
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(rdx, &miss);
-
- CheckPrototypes(JSObject::cast(object),
- rdx,
- holder,
- rbx,
- rax,
- rdi,
- name,
- &miss);
-
- if (argc == 0) {
- // Noop, return the length.
- __ movq(rax, FieldOperand(rdx, JSArray::kLengthOffset));
- __ ret((argc + 1) * kPointerSize);
- } else {
- Label call_builtin;
-
- // Get the elements array of the object.
- __ movq(rbx, FieldOperand(rdx, JSArray::kElementsOffset));
-
- // Check that the elements are in fast mode and writable.
- __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
- factory()->fixed_array_map());
- __ j(not_equal, &call_builtin);
-
- if (argc == 1) { // Otherwise fall through to call builtin.
- Label exit, with_write_barrier, attempt_to_grow_elements;
-
- // Get the array's length into rax and calculate new length.
- __ SmiToInteger32(rax, FieldOperand(rdx, JSArray::kLengthOffset));
- STATIC_ASSERT(FixedArray::kMaxLength < Smi::kMaxValue);
- __ addl(rax, Immediate(argc));
-
- // Get the element's length into rcx.
- __ SmiToInteger32(rcx, FieldOperand(rbx, FixedArray::kLengthOffset));
-
- // Check if we could survive without allocation.
- __ cmpl(rax, rcx);
- __ j(greater, &attempt_to_grow_elements);
-
- // Save new length.
- __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rax);
-
- // Push the element.
- __ movq(rcx, Operand(rsp, argc * kPointerSize));
- __ lea(rdx, FieldOperand(rbx,
- rax, times_pointer_size,
- FixedArray::kHeaderSize - argc * kPointerSize));
- __ movq(Operand(rdx, 0), rcx);
-
- // Check if value is a smi.
- __ Integer32ToSmi(rax, rax); // Return new length as smi.
-
- __ JumpIfNotSmi(rcx, &with_write_barrier);
-
- __ bind(&exit);
- __ ret((argc + 1) * kPointerSize);
-
- __ bind(&with_write_barrier);
-
- __ InNewSpace(rbx, rcx, equal, &exit);
-
- __ RecordWriteHelper(rbx, rdx, rcx);
-
- __ ret((argc + 1) * kPointerSize);
-
- __ bind(&attempt_to_grow_elements);
- if (!FLAG_inline_new) {
- __ jmp(&call_builtin);
- }
-
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate());
- ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address(isolate());
-
- const int kAllocationDelta = 4;
- // Load top.
- __ Load(rcx, new_space_allocation_top);
-
- // Check if it's the end of elements.
- __ lea(rdx, FieldOperand(rbx,
- rax, times_pointer_size,
- FixedArray::kHeaderSize - argc * kPointerSize));
- __ cmpq(rdx, rcx);
- __ j(not_equal, &call_builtin);
- __ addq(rcx, Immediate(kAllocationDelta * kPointerSize));
- Operand limit_operand =
- masm()->ExternalOperand(new_space_allocation_limit);
- __ cmpq(rcx, limit_operand);
- __ j(above, &call_builtin);
-
- // We fit and could grow elements.
- __ Store(new_space_allocation_top, rcx);
- __ movq(rcx, Operand(rsp, argc * kPointerSize));
-
- // Push the argument...
- __ movq(Operand(rdx, 0), rcx);
- // ... and fill the rest with holes.
- __ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
- for (int i = 1; i < kAllocationDelta; i++) {
- __ movq(Operand(rdx, i * kPointerSize), kScratchRegister);
- }
-
- // Restore receiver to rdx as finish sequence assumes it's here.
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
-
- // Increment element's and array's sizes.
- __ SmiAddConstant(FieldOperand(rbx, FixedArray::kLengthOffset),
- Smi::FromInt(kAllocationDelta));
-
- // Make new length a smi before returning it.
- __ Integer32ToSmi(rax, rax);
- __ movq(FieldOperand(rdx, JSArray::kLengthOffset), rax);
-
- // Elements are in new space, so write barrier is not required.
- __ ret((argc + 1) * kPointerSize);
- }
-
- __ bind(&call_builtin);
- __ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPush,
- isolate()),
- argc + 1,
- 1);
- }
-
- __ bind(&miss);
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
-
- // Return the generated code.
- return GetCode(function);
-}
-
-
-MaybeObject* CallStubCompiler::CompileArrayPopCall(Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
- // ----------- S t a t e -------------
- // -- rcx : name
- // -- rsp[0] : return address
- // -- rsp[(argc - n) * 8] : arg[n] (zero-based)
- // -- ...
- // -- rsp[(argc + 1) * 8] : receiver
- // -----------------------------------
-
- // If object is not an array, bail out to regular call.
- if (!object->IsJSArray() || cell != NULL) return heap()->undefined_value();
-
- Label miss, return_undefined, call_builtin;
-
- GenerateNameCheck(name, &miss);
-
- // Get the receiver from the stack.
- const int argc = arguments().immediate();
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(rdx, &miss);
-
- CheckPrototypes(JSObject::cast(object), rdx,
- holder, rbx,
- rax, rdi, name, &miss);
-
- // Get the elements array of the object.
- __ movq(rbx, FieldOperand(rdx, JSArray::kElementsOffset));
-
- // Check that the elements are in fast mode and writable.
- __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
- Heap::kFixedArrayMapRootIndex);
- __ j(not_equal, &call_builtin);
-
- // Get the array's length into rcx and calculate new length.
- __ SmiToInteger32(rcx, FieldOperand(rdx, JSArray::kLengthOffset));
- __ subl(rcx, Immediate(1));
- __ j(negative, &return_undefined);
-
- // Get the last element.
- __ LoadRoot(r9, Heap::kTheHoleValueRootIndex);
- __ movq(rax, FieldOperand(rbx,
- rcx, times_pointer_size,
- FixedArray::kHeaderSize));
- // Check if element is already the hole.
- __ cmpq(rax, r9);
- // If so, call slow-case to also check prototypes for value.
- __ j(equal, &call_builtin);
-
- // Set the array's length.
- __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rcx);
-
- // Fill with the hole and return original value.
- __ movq(FieldOperand(rbx,
- rcx, times_pointer_size,
- FixedArray::kHeaderSize),
- r9);
- __ ret((argc + 1) * kPointerSize);
-
- __ bind(&return_undefined);
- __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
- __ ret((argc + 1) * kPointerSize);
-
- __ bind(&call_builtin);
- __ TailCallExternalReference(
- ExternalReference(Builtins::c_ArrayPop, isolate()),
- argc + 1,
- 1);
-
- __ bind(&miss);
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
-
- // Return the generated code.
- return GetCode(function);
-}
-
-
-MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
- Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
- // ----------- S t a t e -------------
- // -- rcx : function name
- // -- rsp[0] : return address
- // -- rsp[(argc - n) * 8] : arg[n] (zero-based)
- // -- ...
- // -- rsp[(argc + 1) * 8] : receiver
- // -----------------------------------
-
- // If object is not a string, bail out to regular call.
- if (!object->IsString() || cell != NULL) return heap()->undefined_value();
-
- const int argc = arguments().immediate();
-
- Label miss;
- Label name_miss;
- Label index_out_of_range;
- Label* index_out_of_range_label = &index_out_of_range;
-
- if (kind_ == Code::CALL_IC && extra_ic_state_ == DEFAULT_STRING_STUB) {
- index_out_of_range_label = &miss;
- }
-
- GenerateNameCheck(name, &name_miss);
-
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(masm(),
- Context::STRING_FUNCTION_INDEX,
- rax,
- &miss);
- ASSERT(object != holder);
- CheckPrototypes(JSObject::cast(object->GetPrototype()), rax, holder,
- rbx, rdx, rdi, name, &miss);
-
- Register receiver = rbx;
- Register index = rdi;
- Register scratch = rdx;
- Register result = rax;
- __ movq(receiver, Operand(rsp, (argc + 1) * kPointerSize));
- if (argc > 0) {
- __ movq(index, Operand(rsp, (argc - 0) * kPointerSize));
- } else {
- __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
- }
-
- StringCharCodeAtGenerator char_code_at_generator(receiver,
- index,
- scratch,
- result,
- &miss, // When not a string.
- &miss, // When not a number.
- index_out_of_range_label,
- STRING_INDEX_IS_NUMBER);
- char_code_at_generator.GenerateFast(masm());
- __ ret((argc + 1) * kPointerSize);
-
- StubRuntimeCallHelper call_helper;
- char_code_at_generator.GenerateSlow(masm(), call_helper);
-
- if (index_out_of_range.is_linked()) {
- __ bind(&index_out_of_range);
- __ LoadRoot(rax, Heap::kNanValueRootIndex);
- __ ret((argc + 1) * kPointerSize);
- }
-
- __ bind(&miss);
- // Restore function name in rcx.
- __ Move(rcx, Handle<String>(name));
- __ bind(&name_miss);
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
-
- // Return the generated code.
- return GetCode(function);
-}
-
-
-MaybeObject* CallStubCompiler::CompileStringCharAtCall(
- Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
- // ----------- S t a t e -------------
- // -- rcx : function name
- // -- rsp[0] : return address
- // -- rsp[(argc - n) * 8] : arg[n] (zero-based)
- // -- ...
- // -- rsp[(argc + 1) * 8] : receiver
- // -----------------------------------
-
- // If object is not a string, bail out to regular call.
- if (!object->IsString() || cell != NULL) return heap()->undefined_value();
-
- const int argc = arguments().immediate();
-
- Label miss;
- Label name_miss;
- Label index_out_of_range;
- Label* index_out_of_range_label = &index_out_of_range;
-
- if (kind_ == Code::CALL_IC && extra_ic_state_ == DEFAULT_STRING_STUB) {
- index_out_of_range_label = &miss;
- }
-
- GenerateNameCheck(name, &name_miss);
-
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(masm(),
- Context::STRING_FUNCTION_INDEX,
- rax,
- &miss);
- ASSERT(object != holder);
- CheckPrototypes(JSObject::cast(object->GetPrototype()), rax, holder,
- rbx, rdx, rdi, name, &miss);
-
- Register receiver = rax;
- Register index = rdi;
- Register scratch1 = rbx;
- Register scratch2 = rdx;
- Register result = rax;
- __ movq(receiver, Operand(rsp, (argc + 1) * kPointerSize));
- if (argc > 0) {
- __ movq(index, Operand(rsp, (argc - 0) * kPointerSize));
- } else {
- __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
- }
-
- StringCharAtGenerator char_at_generator(receiver,
- index,
- scratch1,
- scratch2,
- result,
- &miss, // When not a string.
- &miss, // When not a number.
- index_out_of_range_label,
- STRING_INDEX_IS_NUMBER);
- char_at_generator.GenerateFast(masm());
- __ ret((argc + 1) * kPointerSize);
-
- StubRuntimeCallHelper call_helper;
- char_at_generator.GenerateSlow(masm(), call_helper);
-
- if (index_out_of_range.is_linked()) {
- __ bind(&index_out_of_range);
- __ LoadRoot(rax, Heap::kEmptyStringRootIndex);
- __ ret((argc + 1) * kPointerSize);
- }
-
- __ bind(&miss);
- // Restore function name in rcx.
- __ Move(rcx, Handle<String>(name));
- __ bind(&name_miss);
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
-
- // Return the generated code.
- return GetCode(function);
-}
-
-
-MaybeObject* CallStubCompiler::CompileStringFromCharCodeCall(
- Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
- // ----------- S t a t e -------------
- // -- rcx : function name
- // -- rsp[0] : return address
- // -- rsp[(argc - n) * 8] : arg[n] (zero-based)
- // -- ...
- // -- rsp[(argc + 1) * 8] : receiver
- // -----------------------------------
-
- const int argc = arguments().immediate();
-
- // If the object is not a JSObject or we got an unexpected number of
- // arguments, bail out to the regular call.
- if (!object->IsJSObject() || argc != 1) return heap()->undefined_value();
-
- Label miss;
- GenerateNameCheck(name, &miss);
-
- if (cell == NULL) {
- __ movq(rdx, Operand(rsp, 2 * kPointerSize));
-
- __ JumpIfSmi(rdx, &miss);
-
- CheckPrototypes(JSObject::cast(object), rdx, holder, rbx, rax, rdi, name,
- &miss);
- } else {
- ASSERT(cell->value() == function);
- GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
- GenerateLoadFunctionFromCell(cell, function, &miss);
- }
-
- // Load the char code argument.
- Register code = rbx;
- __ movq(code, Operand(rsp, 1 * kPointerSize));
-
- // Check the code is a smi.
- Label slow;
- __ JumpIfNotSmi(code, &slow);
-
- // Convert the smi code to uint16.
- __ SmiAndConstant(code, code, Smi::FromInt(0xffff));
-
- StringCharFromCodeGenerator char_from_code_generator(code, rax);
- char_from_code_generator.GenerateFast(masm());
- __ ret(2 * kPointerSize);
-
- StubRuntimeCallHelper call_helper;
- char_from_code_generator.GenerateSlow(masm(), call_helper);
-
- // Tail call the full function. We do not have to patch the receiver
- // because the function makes no use of it.
- __ bind(&slow);
- __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
-
- __ bind(&miss);
- // rcx: function name.
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
-
- // Return the generated code.
- return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
-}
-
-
-MaybeObject* CallStubCompiler::CompileMathFloorCall(Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
- // TODO(872): implement this.
- return heap()->undefined_value();
-}
-
-
-MaybeObject* CallStubCompiler::CompileMathAbsCall(Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
- // ----------- S t a t e -------------
- // -- rcx : function name
- // -- rsp[0] : return address
- // -- rsp[(argc - n) * 8] : arg[n] (zero-based)
- // -- ...
- // -- rsp[(argc + 1) * 8] : receiver
- // -----------------------------------
-
- const int argc = arguments().immediate();
-
- // If the object is not a JSObject or we got an unexpected number of
- // arguments, bail out to the regular call.
- if (!object->IsJSObject() || argc != 1) return heap()->undefined_value();
-
- Label miss;
- GenerateNameCheck(name, &miss);
-
- if (cell == NULL) {
- __ movq(rdx, Operand(rsp, 2 * kPointerSize));
-
- __ JumpIfSmi(rdx, &miss);
-
- CheckPrototypes(JSObject::cast(object), rdx, holder, rbx, rax, rdi, name,
- &miss);
- } else {
- ASSERT(cell->value() == function);
- GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
- GenerateLoadFunctionFromCell(cell, function, &miss);
- }
-
- // Load the (only) argument into rax.
- __ movq(rax, Operand(rsp, 1 * kPointerSize));
-
- // Check if the argument is a smi.
- Label not_smi;
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfNotSmi(rax, &not_smi);
- __ SmiToInteger32(rax, rax);
-
- // Set ebx to 1...1 (== -1) if the argument is negative, or to 0...0
- // otherwise.
- __ movl(rbx, rax);
- __ sarl(rbx, Immediate(kBitsPerInt - 1));
-
- // Do bitwise not or do nothing depending on ebx.
- __ xorl(rax, rbx);
-
- // Add 1 or do nothing depending on ebx.
- __ subl(rax, rbx);
-
- // If the result is still negative, go to the slow case.
- // This only happens for the most negative smi.
- Label slow;
- __ j(negative, &slow);
-
- // Smi case done.
- __ Integer32ToSmi(rax, rax);
- __ ret(2 * kPointerSize);
-
- // Check if the argument is a heap number and load its value.
- __ bind(&not_smi);
- __ CheckMap(rax, factory()->heap_number_map(), &slow, true);
- __ movq(rbx, FieldOperand(rax, HeapNumber::kValueOffset));
-
- // Check the sign of the argument. If the argument is positive,
- // just return it.
- Label negative_sign;
- const int sign_mask_shift =
- (HeapNumber::kExponentOffset - HeapNumber::kValueOffset) * kBitsPerByte;
- __ movq(rdi, static_cast<int64_t>(HeapNumber::kSignMask) << sign_mask_shift,
- RelocInfo::NONE);
- __ testq(rbx, rdi);
- __ j(not_zero, &negative_sign);
- __ ret(2 * kPointerSize);
-
- // If the argument is negative, clear the sign, and return a new
- // number. We still have the sign mask in rdi.
- __ bind(&negative_sign);
- __ xor_(rbx, rdi);
- __ AllocateHeapNumber(rax, rdx, &slow);
- __ movq(FieldOperand(rax, HeapNumber::kValueOffset), rbx);
- __ ret(2 * kPointerSize);
-
- // Tail call the full function. We do not have to patch the receiver
- // because the function makes no use of it.
- __ bind(&slow);
- __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
-
- __ bind(&miss);
- // rcx: function name.
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
-
- // Return the generated code.
- return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
-}
-
-
-MaybeObject* CallStubCompiler::CompileFastApiCall(
- const CallOptimization& optimization,
- Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
- ASSERT(optimization.is_simple_api_call());
- // Bail out if object is a global object as we don't want to
- // repatch it to global receiver.
- if (object->IsGlobalObject()) return heap()->undefined_value();
- if (cell != NULL) return heap()->undefined_value();
- int depth = optimization.GetPrototypeDepthOfExpectedType(
- JSObject::cast(object), holder);
- if (depth == kInvalidProtoDepth) return heap()->undefined_value();
-
- Label miss, miss_before_stack_reserved;
-
- GenerateNameCheck(name, &miss_before_stack_reserved);
-
- // Get the receiver from the stack.
- const int argc = arguments().immediate();
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(rdx, &miss_before_stack_reserved);
-
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->call_const(), 1);
- __ IncrementCounter(counters->call_const_fast_api(), 1);
-
- // Allocate space for v8::Arguments implicit values. Must be initialized
- // before calling any runtime function.
- __ subq(rsp, Immediate(kFastApiCallArguments * kPointerSize));
-
- // Check that the maps haven't changed and find a Holder as a side effect.
- CheckPrototypes(JSObject::cast(object), rdx, holder,
- rbx, rax, rdi, name, depth, &miss);
-
- // Move the return address on top of the stack.
- __ movq(rax, Operand(rsp, 3 * kPointerSize));
- __ movq(Operand(rsp, 0 * kPointerSize), rax);
-
- MaybeObject* result = GenerateFastApiCall(masm(), optimization, argc);
- if (result->IsFailure()) return result;
-
- __ bind(&miss);
- __ addq(rsp, Immediate(kFastApiCallArguments * kPointerSize));
-
- __ bind(&miss_before_stack_reserved);
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
-
- // Return the generated code.
- return GetCode(function);
-}
-
-
-MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
- JSObject* holder,
- JSFunction* function,
- String* name,
- CheckType check) {
- // ----------- S t a t e -------------
- // rcx : function name
- // rsp[0] : return address
- // rsp[8] : argument argc
- // rsp[16] : argument argc - 1
- // ...
- // rsp[argc * 8] : argument 1
- // rsp[(argc + 1) * 8] : argument 0 = receiver
- // -----------------------------------
-
- if (HasCustomCallGenerator(function)) {
- MaybeObject* maybe_result = CompileCustomCall(
- object, holder, NULL, function, name);
- Object* result;
- if (!maybe_result->ToObject(&result)) return maybe_result;
- // undefined means bail out to regular compiler.
- if (!result->IsUndefined()) return result;
- }
-
- Label miss;
-
- GenerateNameCheck(name, &miss);
-
- // Get the receiver from the stack.
- const int argc = arguments().immediate();
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
-
- // Check that the receiver isn't a smi.
- if (check != NUMBER_CHECK) {
- __ JumpIfSmi(rdx, &miss);
- }
-
- // Make sure that it's okay not to patch the on stack receiver
- // unless we're doing a receiver map check.
- ASSERT(!object->IsGlobalObject() || check == RECEIVER_MAP_CHECK);
-
- Counters* counters = isolate()->counters();
- SharedFunctionInfo* function_info = function->shared();
- switch (check) {
- case RECEIVER_MAP_CHECK:
- __ IncrementCounter(counters->call_const(), 1);
-
- // Check that the maps haven't changed.
- CheckPrototypes(JSObject::cast(object), rdx, holder,
- rbx, rax, rdi, name, &miss);
-
- // Patch the receiver on the stack with the global proxy if
- // necessary.
- if (object->IsGlobalObject()) {
- __ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset));
- __ movq(Operand(rsp, (argc + 1) * kPointerSize), rdx);
- }
- break;
-
- case STRING_CHECK:
- if (!function->IsBuiltin() && !function_info->strict_mode()) {
- // Calling non-strict non-builtins with a value as the receiver
- // requires boxing.
- __ jmp(&miss);
- } else {
- // Check that the object is a two-byte string or a symbol.
- __ CmpObjectType(rdx, FIRST_NONSTRING_TYPE, rax);
- __ j(above_equal, &miss);
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::STRING_FUNCTION_INDEX, rax, &miss);
- CheckPrototypes(JSObject::cast(object->GetPrototype()), rax, holder,
- rbx, rdx, rdi, name, &miss);
- }
- break;
-
- case NUMBER_CHECK: {
- if (!function->IsBuiltin() && !function_info->strict_mode()) {
- // Calling non-strict non-builtins with a value as the receiver
- // requires boxing.
- __ jmp(&miss);
- } else {
- Label fast;
- // Check that the object is a smi or a heap number.
- __ JumpIfSmi(rdx, &fast);
- __ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rax);
- __ j(not_equal, &miss);
- __ bind(&fast);
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::NUMBER_FUNCTION_INDEX, rax, &miss);
- CheckPrototypes(JSObject::cast(object->GetPrototype()), rax, holder,
- rbx, rdx, rdi, name, &miss);
- }
- break;
- }
-
- case BOOLEAN_CHECK: {
- if (!function->IsBuiltin() && !function_info->strict_mode()) {
- // Calling non-strict non-builtins with a value as the receiver
- // requires boxing.
- __ jmp(&miss);
- } else {
- Label fast;
- // Check that the object is a boolean.
- __ CompareRoot(rdx, Heap::kTrueValueRootIndex);
- __ j(equal, &fast);
- __ CompareRoot(rdx, Heap::kFalseValueRootIndex);
- __ j(not_equal, &miss);
- __ bind(&fast);
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::BOOLEAN_FUNCTION_INDEX, rax, &miss);
- CheckPrototypes(JSObject::cast(object->GetPrototype()), rax, holder,
- rbx, rdx, rdi, name, &miss);
- }
- break;
- }
-
- default:
- UNREACHABLE();
- }
-
- __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
-
- // Handle call cache miss.
- __ bind(&miss);
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
-
- // Return the generated code.
- return GetCode(function);
-}
-
-
-MaybeObject* CallStubCompiler::CompileCallInterceptor(JSObject* object,
- JSObject* holder,
- String* name) {
- // ----------- S t a t e -------------
- // rcx : function name
- // rsp[0] : return address
- // rsp[8] : argument argc
- // rsp[16] : argument argc - 1
- // ...
- // rsp[argc * 8] : argument 1
- // rsp[(argc + 1) * 8] : argument 0 = receiver
- // -----------------------------------
- Label miss;
-
- GenerateNameCheck(name, &miss);
-
- // Get the number of arguments.
- const int argc = arguments().immediate();
-
- LookupResult lookup;
- LookupPostInterceptor(holder, name, &lookup);
-
- // Get the receiver from the stack.
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
-
- CallInterceptorCompiler compiler(this, arguments(), rcx);
- MaybeObject* result = compiler.Compile(masm(),
- object,
- holder,
- name,
- &lookup,
- rdx,
- rbx,
- rdi,
- rax,
- &miss);
- if (result->IsFailure()) return result;
-
- // Restore receiver.
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
-
- // Check that the function really is a function.
- __ JumpIfSmi(rax, &miss);
- __ CmpObjectType(rax, JS_FUNCTION_TYPE, rbx);
- __ j(not_equal, &miss);
-
- // Patch the receiver on the stack with the global proxy if
- // necessary.
- if (object->IsGlobalObject()) {
- __ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset));
- __ movq(Operand(rsp, (argc + 1) * kPointerSize), rdx);
- }
-
- // Invoke the function.
- __ movq(rdi, rax);
- __ InvokeFunction(rdi, arguments(), JUMP_FUNCTION);
-
- // Handle load cache miss.
- __ bind(&miss);
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
-
- // Return the generated code.
- return GetCode(INTERCEPTOR, name);
-}
-
-
-MaybeObject* CallStubCompiler::CompileCallGlobal(JSObject* object,
- GlobalObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
- // ----------- S t a t e -------------
- // rcx : function name
- // rsp[0] : return address
- // rsp[8] : argument argc
- // rsp[16] : argument argc - 1
- // ...
- // rsp[argc * 8] : argument 1
- // rsp[(argc + 1) * 8] : argument 0 = receiver
- // -----------------------------------
-
- if (HasCustomCallGenerator(function)) {
- MaybeObject* maybe_result = CompileCustomCall(
- object, holder, cell, function, name);
- Object* result;
- if (!maybe_result->ToObject(&result)) return maybe_result;
- // undefined means bail out to regular compiler.
- if (!result->IsUndefined()) return result;
- }
-
- Label miss;
-
- GenerateNameCheck(name, &miss);
-
- // Get the number of arguments.
- const int argc = arguments().immediate();
-
- GenerateGlobalReceiverCheck(object, holder, name, &miss);
-
- GenerateLoadFunctionFromCell(cell, function, &miss);
-
- // Patch the receiver on the stack with the global proxy.
- if (object->IsGlobalObject()) {
- __ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset));
- __ movq(Operand(rsp, (argc + 1) * kPointerSize), rdx);
- }
-
- // Setup the context (function already in rdi).
- __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
-
- // Jump to the cached code (tail call).
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->call_global_inline(), 1);
- ASSERT(function->is_compiled());
- ParameterCount expected(function->shared()->formal_parameter_count());
- if (V8::UseCrankshaft()) {
- // TODO(kasperl): For now, we always call indirectly through the
- // code field in the function to allow recompilation to take effect
- // without changing any of the call sites.
- __ movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
- __ InvokeCode(rdx, expected, arguments(), JUMP_FUNCTION);
- } else {
- Handle<Code> code(function->code());
- __ InvokeCode(code, expected, arguments(),
- RelocInfo::CODE_TARGET, JUMP_FUNCTION);
- }
- // Handle call cache miss.
- __ bind(&miss);
- __ IncrementCounter(counters->call_global_inline_miss(), 1);
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
-
- // Return the generated code.
- return GetCode(NORMAL, name);
-}
-
-
-MaybeObject* StoreStubCompiler::CompileStoreField(JSObject* object,
- int index,
- Map* transition,
- String* name) {
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : name
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss;
-
- // Generate store field code. Preserves receiver and name on jump to miss.
- GenerateStoreField(masm(),
- object,
- index,
- transition,
- rdx, rcx, rbx,
- &miss);
-
- // Handle store cache miss.
- __ bind(&miss);
- Handle<Code> ic = isolate()->builtins()->StoreIC_Miss();
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(transition == NULL ? FIELD : MAP_TRANSITION, name);
-}
-
-
-MaybeObject* StoreStubCompiler::CompileStoreCallback(JSObject* object,
- AccessorInfo* callback,
- String* name) {
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : name
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss;
-
- // Check that the object isn't a smi.
- __ JumpIfSmi(rdx, &miss);
-
- // Check that the map of the object hasn't changed.
- __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
- Handle<Map>(object->map()));
- __ j(not_equal, &miss);
-
- // Perform global security token check if needed.
- if (object->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(rdx, rbx, &miss);
- }
-
- // Stub never generated for non-global objects that require access
- // checks.
- ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
-
- __ pop(rbx); // remove the return address
- __ push(rdx); // receiver
- __ Push(Handle<AccessorInfo>(callback)); // callback info
- __ push(rcx); // name
- __ push(rax); // value
- __ push(rbx); // restore return address
-
- // Do tail-call to the runtime system.
- ExternalReference store_callback_property =
- ExternalReference(IC_Utility(IC::kStoreCallbackProperty), isolate());
- __ TailCallExternalReference(store_callback_property, 4, 1);
-
- // Handle store cache miss.
- __ bind(&miss);
- Handle<Code> ic = isolate()->builtins()->StoreIC_Miss();
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(CALLBACKS, name);
-}
-
-
-MaybeObject* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
- String* name) {
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : name
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss;
-
- // Check that the object isn't a smi.
- __ JumpIfSmi(rdx, &miss);
-
- // Check that the map of the object hasn't changed.
- __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
- Handle<Map>(receiver->map()));
- __ j(not_equal, &miss);
-
- // Perform global security token check if needed.
- if (receiver->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(rdx, rbx, &miss);
- }
-
- // Stub never generated for non-global objects that require access
- // checks.
- ASSERT(receiver->IsJSGlobalProxy() || !receiver->IsAccessCheckNeeded());
-
- __ pop(rbx); // remove the return address
- __ push(rdx); // receiver
- __ push(rcx); // name
- __ push(rax); // value
- __ Push(Smi::FromInt(strict_mode_));
- __ push(rbx); // restore return address
-
- // Do tail-call to the runtime system.
- ExternalReference store_ic_property =
- ExternalReference(IC_Utility(IC::kStoreInterceptorProperty), isolate());
- __ TailCallExternalReference(store_ic_property, 4, 1);
-
- // Handle store cache miss.
- __ bind(&miss);
- Handle<Code> ic = isolate()->builtins()->StoreIC_Miss();
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(INTERCEPTOR, name);
-}
-
-
-MaybeObject* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
- JSGlobalPropertyCell* cell,
- String* name) {
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : name
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss;
-
- // Check that the map of the global has not changed.
- __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
- Handle<Map>(object->map()));
- __ j(not_equal, &miss);
-
- // Check that the value in the cell is not the hole. If it is, this
- // cell could have been deleted and reintroducing the global needs
- // to update the property details in the property dictionary of the
- // global object. We bail out to the runtime system to do that.
- __ Move(rbx, Handle<JSGlobalPropertyCell>(cell));
- __ CompareRoot(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset),
- Heap::kTheHoleValueRootIndex);
- __ j(equal, &miss);
-
- // Store the value in the cell.
- __ movq(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset), rax);
-
- // Return the value (register rax).
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->named_store_global_inline(), 1);
- __ ret(0);
-
- // Handle store cache miss.
- __ bind(&miss);
- __ IncrementCounter(counters->named_store_global_inline_miss(), 1);
- Handle<Code> ic = isolate()->builtins()->StoreIC_Miss();
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(NORMAL, name);
-}
-
-
-MaybeObject* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
- int index,
- Map* transition,
- String* name) {
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss;
-
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->keyed_store_field(), 1);
-
- // Check that the name has not changed.
- __ Cmp(rcx, Handle<String>(name));
- __ j(not_equal, &miss);
-
- // Generate store field code. Preserves receiver and name on jump to miss.
- GenerateStoreField(masm(),
- object,
- index,
- transition,
- rdx, rcx, rbx,
- &miss);
-
- // Handle store cache miss.
- __ bind(&miss);
- __ DecrementCounter(counters->keyed_store_field(), 1);
- Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss();
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(transition == NULL ? FIELD : MAP_TRANSITION, name);
-}
-
-
-MaybeObject* KeyedStoreStubCompiler::CompileStoreSpecialized(
- JSObject* receiver) {
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss;
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(rdx, &miss);
-
- // Check that the map matches.
- __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
- Handle<Map>(receiver->map()));
- __ j(not_equal, &miss);
-
- // Check that the key is a smi.
- __ JumpIfNotSmi(rcx, &miss);
-
- // Get the elements array and make sure it is a fast element array, not 'cow'.
- __ movq(rdi, FieldOperand(rdx, JSObject::kElementsOffset));
- __ Cmp(FieldOperand(rdi, HeapObject::kMapOffset),
- factory()->fixed_array_map());
- __ j(not_equal, &miss);
-
- // Check that the key is within bounds.
- if (receiver->IsJSArray()) {
- __ SmiCompare(rcx, FieldOperand(rdx, JSArray::kLengthOffset));
- __ j(above_equal, &miss);
- } else {
- __ SmiCompare(rcx, FieldOperand(rdi, FixedArray::kLengthOffset));
- __ j(above_equal, &miss);
- }
-
- // Do the store and update the write barrier. Make sure to preserve
- // the value in register eax.
- __ movq(rdx, rax);
- __ SmiToInteger32(rcx, rcx);
- __ movq(FieldOperand(rdi, rcx, times_pointer_size, FixedArray::kHeaderSize),
- rax);
- __ RecordWrite(rdi, 0, rdx, rcx);
-
- // Done.
- __ ret(0);
-
- // Handle store cache miss.
- __ bind(&miss);
- Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss();
- __ jmp(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(NORMAL, NULL);
-}
-
-
-MaybeObject* LoadStubCompiler::CompileLoadNonexistent(String* name,
- JSObject* object,
- JSObject* last) {
- // ----------- S t a t e -------------
- // -- rax : receiver
- // -- rcx : name
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss;
-
- // Chech that receiver is not a smi.
- __ JumpIfSmi(rax, &miss);
-
- // Check the maps of the full prototype chain. Also check that
- // global property cells up to (but not including) the last object
- // in the prototype chain are empty.
- CheckPrototypes(object, rax, last, rbx, rdx, rdi, name, &miss);
-
- // If the last object in the prototype chain is a global object,
- // check that the global property cell is empty.
- if (last->IsGlobalObject()) {
- MaybeObject* cell = GenerateCheckPropertyCell(masm(),
- GlobalObject::cast(last),
- name,
- rdx,
- &miss);
- if (cell->IsFailure()) {
- miss.Unuse();
- return cell;
- }
- }
-
- // Return undefined if maps of the full prototype chain are still the
- // same and no global property with this name contains a value.
- __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
- __ ret(0);
-
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::LOAD_IC);
-
- // Return the generated code.
- return GetCode(NONEXISTENT, heap()->empty_string());
-}
-
-
-MaybeObject* LoadStubCompiler::CompileLoadField(JSObject* object,
- JSObject* holder,
- int index,
- String* name) {
- // ----------- S t a t e -------------
- // -- rax : receiver
- // -- rcx : name
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss;
-
- GenerateLoadField(object, holder, rax, rbx, rdx, rdi, index, name, &miss);
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::LOAD_IC);
-
- // Return the generated code.
- return GetCode(FIELD, name);
-}
-
-
-MaybeObject* LoadStubCompiler::CompileLoadCallback(String* name,
- JSObject* object,
- JSObject* holder,
- AccessorInfo* callback) {
- // ----------- S t a t e -------------
- // -- rax : receiver
- // -- rcx : name
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss;
-
- MaybeObject* result = GenerateLoadCallback(object, holder, rax, rcx, rdx, rbx,
- rdi, callback, name, &miss);
- if (result->IsFailure()) {
- miss.Unuse();
- return result;
- }
-
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::LOAD_IC);
-
- // Return the generated code.
- return GetCode(CALLBACKS, name);
-}
-
-
-MaybeObject* LoadStubCompiler::CompileLoadConstant(JSObject* object,
- JSObject* holder,
- Object* value,
- String* name) {
- // ----------- S t a t e -------------
- // -- rax : receiver
- // -- rcx : name
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss;
-
- GenerateLoadConstant(object, holder, rax, rbx, rdx, rdi, value, name, &miss);
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::LOAD_IC);
-
- // Return the generated code.
- return GetCode(CONSTANT_FUNCTION, name);
-}
-
-
-MaybeObject* LoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
- JSObject* holder,
- String* name) {
- // ----------- S t a t e -------------
- // -- rax : receiver
- // -- rcx : name
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss;
-
- LookupResult lookup;
- LookupPostInterceptor(holder, name, &lookup);
-
- // TODO(368): Compile in the whole chain: all the interceptors in
- // prototypes and ultimate answer.
- GenerateLoadInterceptor(receiver,
- holder,
- &lookup,
- rax,
- rcx,
- rdx,
- rbx,
- rdi,
- name,
- &miss);
-
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::LOAD_IC);
-
- // Return the generated code.
- return GetCode(INTERCEPTOR, name);
-}
-
-
-MaybeObject* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
- GlobalObject* holder,
- JSGlobalPropertyCell* cell,
- String* name,
- bool is_dont_delete) {
- // ----------- S t a t e -------------
- // -- rax : receiver
- // -- rcx : name
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss;
-
- // If the object is the holder then we know that it's a global
- // object which can only happen for contextual loads. In this case,
- // the receiver cannot be a smi.
- if (object != holder) {
- __ JumpIfSmi(rax, &miss);
- }
-
- // Check that the maps haven't changed.
- CheckPrototypes(object, rax, holder, rbx, rdx, rdi, name, &miss);
-
- // Get the value from the cell.
- __ Move(rbx, Handle<JSGlobalPropertyCell>(cell));
- __ movq(rbx, FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset));
-
- // Check for deleted property if property can actually be deleted.
- if (!is_dont_delete) {
- __ CompareRoot(rbx, Heap::kTheHoleValueRootIndex);
- __ j(equal, &miss);
- } else if (FLAG_debug_code) {
- __ CompareRoot(rbx, Heap::kTheHoleValueRootIndex);
- __ Check(not_equal, "DontDelete cells can't contain the hole");
- }
-
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->named_load_global_stub(), 1);
- __ movq(rax, rbx);
- __ ret(0);
-
- __ bind(&miss);
- __ IncrementCounter(counters->named_load_global_stub_miss(), 1);
- GenerateLoadMiss(masm(), Code::LOAD_IC);
-
- // Return the generated code.
- return GetCode(NORMAL, name);
-}
-
-
-MaybeObject* KeyedLoadStubCompiler::CompileLoadField(String* name,
- JSObject* receiver,
- JSObject* holder,
- int index) {
- // ----------- S t a t e -------------
- // -- rax : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss;
-
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->keyed_load_field(), 1);
-
- // Check that the name has not changed.
- __ Cmp(rax, Handle<String>(name));
- __ j(not_equal, &miss);
-
- GenerateLoadField(receiver, holder, rdx, rbx, rcx, rdi, index, name, &miss);
-
- __ bind(&miss);
- __ DecrementCounter(counters->keyed_load_field(), 1);
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- // Return the generated code.
- return GetCode(FIELD, name);
-}
-
-
-MaybeObject* KeyedLoadStubCompiler::CompileLoadCallback(
- String* name,
- JSObject* receiver,
- JSObject* holder,
- AccessorInfo* callback) {
- // ----------- S t a t e -------------
- // -- rax : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss;
-
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->keyed_load_callback(), 1);
-
- // Check that the name has not changed.
- __ Cmp(rax, Handle<String>(name));
- __ j(not_equal, &miss);
-
- MaybeObject* result = GenerateLoadCallback(receiver, holder, rdx, rax, rbx,
- rcx, rdi, callback, name, &miss);
- if (result->IsFailure()) {
- miss.Unuse();
- return result;
- }
-
- __ bind(&miss);
-
- __ DecrementCounter(counters->keyed_load_callback(), 1);
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- // Return the generated code.
- return GetCode(CALLBACKS, name);
-}
-
-
-MaybeObject* KeyedLoadStubCompiler::CompileLoadConstant(String* name,
- JSObject* receiver,
- JSObject* holder,
- Object* value) {
- // ----------- S t a t e -------------
- // -- rax : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss;
-
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->keyed_load_constant_function(), 1);
-
- // Check that the name has not changed.
- __ Cmp(rax, Handle<String>(name));
- __ j(not_equal, &miss);
-
- GenerateLoadConstant(receiver, holder, rdx, rbx, rcx, rdi,
- value, name, &miss);
- __ bind(&miss);
- __ DecrementCounter(counters->keyed_load_constant_function(), 1);
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- // Return the generated code.
- return GetCode(CONSTANT_FUNCTION, name);
-}
-
-
-MaybeObject* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
- JSObject* holder,
- String* name) {
- // ----------- S t a t e -------------
- // -- rax : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss;
-
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->keyed_load_interceptor(), 1);
-
- // Check that the name has not changed.
- __ Cmp(rax, Handle<String>(name));
- __ j(not_equal, &miss);
-
- LookupResult lookup;
- LookupPostInterceptor(holder, name, &lookup);
- GenerateLoadInterceptor(receiver,
- holder,
- &lookup,
- rdx,
- rax,
- rcx,
- rbx,
- rdi,
- name,
- &miss);
- __ bind(&miss);
- __ DecrementCounter(counters->keyed_load_interceptor(), 1);
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- // Return the generated code.
- return GetCode(INTERCEPTOR, name);
-}
-
-
-MaybeObject* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
- // ----------- S t a t e -------------
- // -- rax : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss;
-
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->keyed_load_array_length(), 1);
-
- // Check that the name has not changed.
- __ Cmp(rax, Handle<String>(name));
- __ j(not_equal, &miss);
-
- GenerateLoadArrayLength(masm(), rdx, rcx, &miss);
- __ bind(&miss);
- __ DecrementCounter(counters->keyed_load_array_length(), 1);
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- // Return the generated code.
- return GetCode(CALLBACKS, name);
-}
-
-
-MaybeObject* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
- // ----------- S t a t e -------------
- // -- rax : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss;
-
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->keyed_load_string_length(), 1);
-
- // Check that the name has not changed.
- __ Cmp(rax, Handle<String>(name));
- __ j(not_equal, &miss);
-
- GenerateLoadStringLength(masm(), rdx, rcx, rbx, &miss, true);
- __ bind(&miss);
- __ DecrementCounter(counters->keyed_load_string_length(), 1);
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- // Return the generated code.
- return GetCode(CALLBACKS, name);
-}
-
-
-MaybeObject* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
- // ----------- S t a t e -------------
- // -- rax : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss;
-
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->keyed_load_function_prototype(), 1);
-
- // Check that the name has not changed.
- __ Cmp(rax, Handle<String>(name));
- __ j(not_equal, &miss);
-
- GenerateLoadFunctionPrototype(masm(), rdx, rcx, rbx, &miss);
- __ bind(&miss);
- __ DecrementCounter(counters->keyed_load_function_prototype(), 1);
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- // Return the generated code.
- return GetCode(CALLBACKS, name);
-}
-
-
-MaybeObject* KeyedLoadStubCompiler::CompileLoadSpecialized(JSObject* receiver) {
- // ----------- S t a t e -------------
- // -- rax : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss;
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(rdx, &miss);
-
- // Check that the map matches.
- __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
- Handle<Map>(receiver->map()));
- __ j(not_equal, &miss);
-
- // Check that the key is a smi.
- __ JumpIfNotSmi(rax, &miss);
-
- // Get the elements array.
- __ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
- __ AssertFastElements(rcx);
-
- // Check that the key is within bounds.
- __ SmiCompare(rax, FieldOperand(rcx, FixedArray::kLengthOffset));
- __ j(above_equal, &miss);
-
- // Load the result and make sure it's not the hole.
- SmiIndex index = masm()->SmiToIndex(rbx, rax, kPointerSizeLog2);
- __ movq(rbx, FieldOperand(rcx,
- index.reg,
- index.scale,
- FixedArray::kHeaderSize));
- __ CompareRoot(rbx, Heap::kTheHoleValueRootIndex);
- __ j(equal, &miss);
- __ movq(rax, rbx);
- __ ret(0);
-
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- // Return the generated code.
- return GetCode(NORMAL, NULL);
-}
-
-
-// Specialized stub for constructing objects from functions which only have only
-// simple assignments of the form this.x = ...; in their body.
-MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
- // ----------- S t a t e -------------
- // -- rax : argc
- // -- rdi : constructor
- // -- rsp[0] : return address
- // -- rsp[4] : last argument
- // -----------------------------------
- Label generic_stub_call;
-
- // Use r8 for holding undefined which is used in several places below.
- __ Move(r8, factory()->undefined_value());
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Check to see whether there are any break points in the function code. If
- // there are jump to the generic constructor stub which calls the actual
- // code for the function thereby hitting the break points.
- __ movq(rbx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ movq(rbx, FieldOperand(rbx, SharedFunctionInfo::kDebugInfoOffset));
- __ cmpq(rbx, r8);
- __ j(not_equal, &generic_stub_call);
-#endif
-
- // Load the initial map and verify that it is in fact a map.
- __ movq(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a NULL and a Smi.
- ASSERT(kSmiTag == 0);
- __ JumpIfSmi(rbx, &generic_stub_call);
- __ CmpObjectType(rbx, MAP_TYPE, rcx);
- __ j(not_equal, &generic_stub_call);
-
-#ifdef DEBUG
- // Cannot construct functions this way.
- // rdi: constructor
- // rbx: initial map
- __ CmpInstanceType(rbx, JS_FUNCTION_TYPE);
- __ Assert(not_equal, "Function constructed by construct stub.");
-#endif
-
- // Now allocate the JSObject in new space.
- // rdi: constructor
- // rbx: initial map
- __ movzxbq(rcx, FieldOperand(rbx, Map::kInstanceSizeOffset));
- __ shl(rcx, Immediate(kPointerSizeLog2));
- __ AllocateInNewSpace(rcx,
- rdx,
- rcx,
- no_reg,
- &generic_stub_call,
- NO_ALLOCATION_FLAGS);
-
- // Allocated the JSObject, now initialize the fields and add the heap tag.
- // rbx: initial map
- // rdx: JSObject (untagged)
- __ movq(Operand(rdx, JSObject::kMapOffset), rbx);
- __ Move(rbx, factory()->empty_fixed_array());
- __ movq(Operand(rdx, JSObject::kPropertiesOffset), rbx);
- __ movq(Operand(rdx, JSObject::kElementsOffset), rbx);
-
- // rax: argc
- // rdx: JSObject (untagged)
- // Load the address of the first in-object property into r9.
- __ lea(r9, Operand(rdx, JSObject::kHeaderSize));
- // Calculate the location of the first argument. The stack contains only the
- // return address on top of the argc arguments.
- __ lea(rcx, Operand(rsp, rax, times_pointer_size, 0));
-
- // rax: argc
- // rcx: first argument
- // rdx: JSObject (untagged)
- // r8: undefined
- // r9: first in-object property of the JSObject
- // Fill the initialized properties with a constant value or a passed argument
- // depending on the this.x = ...; assignment in the function.
- SharedFunctionInfo* shared = function->shared();
- for (int i = 0; i < shared->this_property_assignments_count(); i++) {
- if (shared->IsThisPropertyAssignmentArgument(i)) {
- // Check if the argument assigned to the property is actually passed.
- // If argument is not passed the property is set to undefined,
- // otherwise find it on the stack.
- int arg_number = shared->GetThisPropertyAssignmentArgument(i);
- __ movq(rbx, r8);
- __ cmpq(rax, Immediate(arg_number));
- __ cmovq(above, rbx, Operand(rcx, arg_number * -kPointerSize));
- // Store value in the property.
- __ movq(Operand(r9, i * kPointerSize), rbx);
- } else {
- // Set the property to the constant value.
- Handle<Object> constant(shared->GetThisPropertyAssignmentConstant(i));
- __ Move(Operand(r9, i * kPointerSize), constant);
- }
- }
-
- // Fill the unused in-object property fields with undefined.
- ASSERT(function->has_initial_map());
- for (int i = shared->this_property_assignments_count();
- i < function->initial_map()->inobject_properties();
- i++) {
- __ movq(Operand(r9, i * kPointerSize), r8);
- }
-
- // rax: argc
- // rdx: JSObject (untagged)
- // Move argc to rbx and the JSObject to return to rax and tag it.
- __ movq(rbx, rax);
- __ movq(rax, rdx);
- __ or_(rax, Immediate(kHeapObjectTag));
-
- // rax: JSObject
- // rbx: argc
- // Remove caller arguments and receiver from the stack and return.
- __ pop(rcx);
- __ lea(rsp, Operand(rsp, rbx, times_pointer_size, 1 * kPointerSize));
- __ push(rcx);
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->constructed_objects(), 1);
- __ IncrementCounter(counters->constructed_objects_stub(), 1);
- __ ret(0);
-
- // Jump to the generic stub in case the specialized code cannot handle the
- // construction.
- __ bind(&generic_stub_call);
- Code* code =
- isolate()->builtins()->builtin(Builtins::kJSConstructStubGeneric);
- Handle<Code> generic_construct_stub(code);
- __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode();
-}
-
-
-MaybeObject* ExternalArrayStubCompiler::CompileKeyedLoadStub(
- JSObject* receiver, ExternalArrayType array_type, Code::Flags flags) {
- // ----------- S t a t e -------------
- // -- rax : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label slow;
-
- // Check that the object isn't a smi.
- __ JumpIfSmi(rdx, &slow);
-
- // Check that the key is a smi.
- __ JumpIfNotSmi(rax, &slow);
-
- // Check that the map matches.
- __ CheckMap(rdx, Handle<Map>(receiver->map()), &slow, false);
- __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
-
- // Check that the index is in range.
- __ SmiToInteger32(rcx, rax);
- __ cmpl(rcx, FieldOperand(rbx, ExternalArray::kLengthOffset));
- // Unsigned comparison catches both negative and too-large values.
- __ j(above_equal, &slow);
-
- // rax: index (as a smi)
- // rdx: receiver (JSObject)
- // rcx: untagged index
- // rbx: elements array
- __ movq(rbx, FieldOperand(rbx, ExternalArray::kExternalPointerOffset));
- // rbx: base pointer of external storage
- switch (array_type) {
- case kExternalByteArray:
- __ movsxbq(rcx, Operand(rbx, rcx, times_1, 0));
- break;
- case kExternalPixelArray:
- case kExternalUnsignedByteArray:
- __ movzxbq(rcx, Operand(rbx, rcx, times_1, 0));
- break;
- case kExternalShortArray:
- __ movsxwq(rcx, Operand(rbx, rcx, times_2, 0));
- break;
- case kExternalUnsignedShortArray:
- __ movzxwq(rcx, Operand(rbx, rcx, times_2, 0));
- break;
- case kExternalIntArray:
- __ movsxlq(rcx, Operand(rbx, rcx, times_4, 0));
- break;
- case kExternalUnsignedIntArray:
- __ movl(rcx, Operand(rbx, rcx, times_4, 0));
- break;
- case kExternalFloatArray:
- __ cvtss2sd(xmm0, Operand(rbx, rcx, times_4, 0));
- break;
- default:
- UNREACHABLE();
- break;
- }
-
- // rax: index
- // rdx: receiver
- // For integer array types:
- // rcx: value
- // For floating-point array type:
- // xmm0: value as double.
-
- ASSERT(kSmiValueSize == 32);
- if (array_type == kExternalUnsignedIntArray) {
- // For the UnsignedInt array type, we need to see whether
- // the value can be represented in a Smi. If not, we need to convert
- // it to a HeapNumber.
- NearLabel box_int;
-
- __ JumpIfUIntNotValidSmiValue(rcx, &box_int);
-
- __ Integer32ToSmi(rax, rcx);
- __ ret(0);
-
- __ bind(&box_int);
-
- // Allocate a HeapNumber for the int and perform int-to-double
- // conversion.
- // The value is zero-extended since we loaded the value from memory
- // with movl.
- __ cvtqsi2sd(xmm0, rcx);
-
- __ AllocateHeapNumber(rcx, rbx, &slow);
- // Set the value.
- __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0);
- __ movq(rax, rcx);
- __ ret(0);
- } else if (array_type == kExternalFloatArray) {
- // For the floating-point array type, we need to always allocate a
- // HeapNumber.
- __ AllocateHeapNumber(rcx, rbx, &slow);
- // Set the value.
- __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0);
- __ movq(rax, rcx);
- __ ret(0);
- } else {
- __ Integer32ToSmi(rax, rcx);
- __ ret(0);
- }
-
- // Slow case: Jump to runtime.
- __ bind(&slow);
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->keyed_load_external_array_slow(), 1);
-
- // ----------- S t a t e -------------
- // -- rax : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
-
- __ pop(rbx);
- __ push(rdx); // receiver
- __ push(rax); // name
- __ push(rbx); // return address
-
- // Perform tail call to the entry.
- __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
-
- // Return the generated code.
- return GetCode(flags);
-}
-
-
-MaybeObject* ExternalArrayStubCompiler::CompileKeyedStoreStub(
- JSObject* receiver, ExternalArrayType array_type, Code::Flags flags) {
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label slow;
-
- // Check that the object isn't a smi.
- __ JumpIfSmi(rdx, &slow);
-
- // Check that the map matches.
- __ CheckMap(rdx, Handle<Map>(receiver->map()), &slow, false);
- __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
-
- // Check that the key is a smi.
- __ JumpIfNotSmi(rcx, &slow);
-
- // Check that the index is in range.
- __ SmiToInteger32(rdi, rcx); // Untag the index.
- __ cmpl(rdi, FieldOperand(rbx, ExternalArray::kLengthOffset));
- // Unsigned comparison catches both negative and too-large values.
- __ j(above_equal, &slow);
-
- // Handle both smis and HeapNumbers in the fast path. Go to the
- // runtime for all other kinds of values.
- // rax: value
- // rcx: key (a smi)
- // rdx: receiver (a JSObject)
- // rbx: elements array
- // rdi: untagged key
- NearLabel check_heap_number;
- if (array_type == kExternalPixelArray) {
- // Float to pixel conversion is only implemented in the runtime for now.
- __ JumpIfNotSmi(rax, &slow);
- } else {
- __ JumpIfNotSmi(rax, &check_heap_number);
- }
- // No more branches to slow case on this path. Key and receiver not needed.
- __ SmiToInteger32(rdx, rax);
- __ movq(rbx, FieldOperand(rbx, ExternalArray::kExternalPointerOffset));
- // rbx: base pointer of external storage
- switch (array_type) {
- case kExternalPixelArray:
- { // Clamp the value to [0..255].
- NearLabel done;
- __ testl(rdx, Immediate(0xFFFFFF00));
- __ j(zero, &done);
- __ setcc(negative, rdx); // 1 if negative, 0 if positive.
- __ decb(rdx); // 0 if negative, 255 if positive.
- __ bind(&done);
- }
- __ movb(Operand(rbx, rdi, times_1, 0), rdx);
- break;
- case kExternalByteArray:
- case kExternalUnsignedByteArray:
- __ movb(Operand(rbx, rdi, times_1, 0), rdx);
- break;
- case kExternalShortArray:
- case kExternalUnsignedShortArray:
- __ movw(Operand(rbx, rdi, times_2, 0), rdx);
- break;
- case kExternalIntArray:
- case kExternalUnsignedIntArray:
- __ movl(Operand(rbx, rdi, times_4, 0), rdx);
- break;
- case kExternalFloatArray:
- // Need to perform int-to-float conversion.
- __ cvtlsi2ss(xmm0, rdx);
- __ movss(Operand(rbx, rdi, times_4, 0), xmm0);
- break;
- default:
- UNREACHABLE();
- break;
- }
- __ ret(0);
-
- // TODO(danno): handle heap number -> pixel array conversion
- if (array_type != kExternalPixelArray) {
- __ bind(&check_heap_number);
- // rax: value
- // rcx: key (a smi)
- // rdx: receiver (a JSObject)
- // rbx: elements array
- // rdi: untagged key
- __ CmpObjectType(rax, HEAP_NUMBER_TYPE, kScratchRegister);
- __ j(not_equal, &slow);
- // No more branches to slow case on this path.
-
- // The WebGL specification leaves the behavior of storing NaN and
- // +/-Infinity into integer arrays basically undefined. For more
- // reproducible behavior, convert these to zero.
- __ movsd(xmm0, FieldOperand(rax, HeapNumber::kValueOffset));
- __ movq(rbx, FieldOperand(rbx, ExternalArray::kExternalPointerOffset));
- // rdi: untagged index
- // rbx: base pointer of external storage
- // top of FPU stack: value
- if (array_type == kExternalFloatArray) {
- __ cvtsd2ss(xmm0, xmm0);
- __ movss(Operand(rbx, rdi, times_4, 0), xmm0);
- __ ret(0);
- } else {
- // Perform float-to-int conversion with truncation (round-to-zero)
- // behavior.
-
- // Convert to int32 and store the low byte/word.
- // If the value is NaN or +/-infinity, the result is 0x80000000,
- // which is automatically zero when taken mod 2^n, n < 32.
- // rdx: value (converted to an untagged integer)
- // rdi: untagged index
- // rbx: base pointer of external storage
- switch (array_type) {
- case kExternalByteArray:
- case kExternalUnsignedByteArray:
- __ cvttsd2si(rdx, xmm0);
- __ movb(Operand(rbx, rdi, times_1, 0), rdx);
- break;
- case kExternalShortArray:
- case kExternalUnsignedShortArray:
- __ cvttsd2si(rdx, xmm0);
- __ movw(Operand(rbx, rdi, times_2, 0), rdx);
- break;
- case kExternalIntArray:
- case kExternalUnsignedIntArray: {
- // Convert to int64, so that NaN and infinities become
- // 0x8000000000000000, which is zero mod 2^32.
- __ cvttsd2siq(rdx, xmm0);
- __ movl(Operand(rbx, rdi, times_4, 0), rdx);
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
- __ ret(0);
- }
- }
-
- // Slow case: call runtime.
- __ bind(&slow);
-
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
-
- __ pop(rbx);
- __ push(rdx); // receiver
- __ push(rcx); // key
- __ push(rax); // value
- __ Push(Smi::FromInt(NONE)); // PropertyAttributes
- __ Push(Smi::FromInt(
- Code::ExtractExtraICStateFromFlags(flags) & kStrictMode));
- __ push(rbx); // return address
-
- // Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
-
- return GetCode(flags);
-}
-
-#undef __
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_X64
diff --git a/src/3rdparty/v8/src/x64/virtual-frame-x64.cc b/src/3rdparty/v8/src/x64/virtual-frame-x64.cc
deleted file mode 100644
index 10c327a..0000000
--- a/src/3rdparty/v8/src/x64/virtual-frame-x64.cc
+++ /dev/null
@@ -1,1296 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_X64)
-
-#include "codegen-inl.h"
-#include "register-allocator-inl.h"
-#include "scopes.h"
-#include "stub-cache.h"
-#include "virtual-frame-inl.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm())
-
-void VirtualFrame::Enter() {
- // Registers live on entry to a JS frame:
- // rsp: stack pointer, points to return address from this function.
- // rbp: base pointer, points to previous JS, ArgumentsAdaptor, or
- // Trampoline frame.
- // rsi: context of this function call.
- // rdi: pointer to this function object.
- Comment cmnt(masm(), "[ Enter JS frame");
-
-#ifdef DEBUG
- if (FLAG_debug_code) {
- // Verify that rdi contains a JS function. The following code
- // relies on rax being available for use.
- Condition not_smi = NegateCondition(masm()->CheckSmi(rdi));
- __ Check(not_smi,
- "VirtualFrame::Enter - rdi is not a function (smi check).");
- __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rax);
- __ Check(equal,
- "VirtualFrame::Enter - rdi is not a function (map check).");
- }
-#endif
-
- EmitPush(rbp);
-
- __ movq(rbp, rsp);
-
- // Store the context in the frame. The context is kept in rsi and a
- // copy is stored in the frame. The external reference to rsi
- // remains.
- EmitPush(rsi);
-
- // Store the function in the frame. The frame owns the register
- // reference now (ie, it can keep it in rdi or spill it later).
- Push(rdi);
- SyncElementAt(element_count() - 1);
- cgen()->allocator()->Unuse(rdi);
-}
-
-
-void VirtualFrame::Exit() {
- Comment cmnt(masm(), "[ Exit JS frame");
- // Record the location of the JS exit code for patching when setting
- // break point.
- __ RecordJSReturn();
-
- // Avoid using the leave instruction here, because it is too
- // short. We need the return sequence to be a least the size of a
- // call instruction to support patching the exit code in the
- // debugger. See GenerateReturnSequence for the full return sequence.
- // TODO(X64): A patched call will be very long now. Make sure we
- // have enough room.
- __ movq(rsp, rbp);
- stack_pointer_ = frame_pointer();
- for (int i = element_count() - 1; i > stack_pointer_; i--) {
- FrameElement last = elements_.RemoveLast();
- if (last.is_register()) {
- Unuse(last.reg());
- }
- }
-
- EmitPop(rbp);
-}
-
-
-void VirtualFrame::AllocateStackSlots() {
- int count = local_count();
- if (count > 0) {
- Comment cmnt(masm(), "[ Allocate space for locals");
- // The locals are initialized to a constant (the undefined value), but
- // we sync them with the actual frame to allocate space for spilling
- // them later. First sync everything above the stack pointer so we can
- // use pushes to allocate and initialize the locals.
- SyncRange(stack_pointer_ + 1, element_count() - 1);
- Handle<Object> undefined = FACTORY->undefined_value();
- FrameElement initial_value =
- FrameElement::ConstantElement(undefined, FrameElement::SYNCED);
- if (count < kLocalVarBound) {
- // For fewer locals the unrolled loop is more compact.
-
- // Hope for one of the first eight registers, where the push operation
- // takes only one byte (kScratchRegister needs the REX.W bit).
- Result tmp = cgen()->allocator()->Allocate();
- ASSERT(tmp.is_valid());
- __ movq(tmp.reg(), undefined, RelocInfo::EMBEDDED_OBJECT);
- for (int i = 0; i < count; i++) {
- __ push(tmp.reg());
- }
- } else {
- // For more locals a loop in generated code is more compact.
- Label alloc_locals_loop;
- Result cnt = cgen()->allocator()->Allocate();
- ASSERT(cnt.is_valid());
- __ movq(kScratchRegister, undefined, RelocInfo::EMBEDDED_OBJECT);
-#ifdef DEBUG
- Label loop_size;
- __ bind(&loop_size);
-#endif
- if (is_uint8(count)) {
- // Loading imm8 is shorter than loading imm32.
- // Loading only partial byte register, and using decb below.
- __ movb(cnt.reg(), Immediate(count));
- } else {
- __ movl(cnt.reg(), Immediate(count));
- }
- __ bind(&alloc_locals_loop);
- __ push(kScratchRegister);
- if (is_uint8(count)) {
- __ decb(cnt.reg());
- } else {
- __ decl(cnt.reg());
- }
- __ j(not_zero, &alloc_locals_loop);
-#ifdef DEBUG
- CHECK(masm()->SizeOfCodeGeneratedSince(&loop_size) < kLocalVarBound);
-#endif
- }
- for (int i = 0; i < count; i++) {
- elements_.Add(initial_value);
- stack_pointer_++;
- }
- }
-}
-
-
-void VirtualFrame::SaveContextRegister() {
- ASSERT(elements_[context_index()].is_memory());
- __ movq(Operand(rbp, fp_relative(context_index())), rsi);
-}
-
-
-void VirtualFrame::RestoreContextRegister() {
- ASSERT(elements_[context_index()].is_memory());
- __ movq(rsi, Operand(rbp, fp_relative(context_index())));
-}
-
-
-void VirtualFrame::PushReceiverSlotAddress() {
- Result temp = cgen()->allocator()->Allocate();
- ASSERT(temp.is_valid());
- __ lea(temp.reg(), ParameterAt(-1));
- Push(&temp);
-}
-
-
-void VirtualFrame::EmitPop(Register reg) {
- ASSERT(stack_pointer_ == element_count() - 1);
- stack_pointer_--;
- elements_.RemoveLast();
- __ pop(reg);
-}
-
-
-void VirtualFrame::EmitPop(const Operand& operand) {
- ASSERT(stack_pointer_ == element_count() - 1);
- stack_pointer_--;
- elements_.RemoveLast();
- __ pop(operand);
-}
-
-
-void VirtualFrame::EmitPush(Register reg, TypeInfo info) {
- ASSERT(stack_pointer_ == element_count() - 1);
- elements_.Add(FrameElement::MemoryElement(info));
- stack_pointer_++;
- __ push(reg);
-}
-
-
-void VirtualFrame::EmitPush(const Operand& operand, TypeInfo info) {
- ASSERT(stack_pointer_ == element_count() - 1);
- elements_.Add(FrameElement::MemoryElement(info));
- stack_pointer_++;
- __ push(operand);
-}
-
-
-void VirtualFrame::EmitPush(Immediate immediate, TypeInfo info) {
- ASSERT(stack_pointer_ == element_count() - 1);
- elements_.Add(FrameElement::MemoryElement(info));
- stack_pointer_++;
- __ push(immediate);
-}
-
-
-void VirtualFrame::EmitPush(Smi* smi_value) {
- ASSERT(stack_pointer_ == element_count() - 1);
- elements_.Add(FrameElement::MemoryElement(TypeInfo::Smi()));
- stack_pointer_++;
- __ Push(smi_value);
-}
-
-
-void VirtualFrame::EmitPush(Handle<Object> value) {
- ASSERT(stack_pointer_ == element_count() - 1);
- TypeInfo info = TypeInfo::TypeFromValue(value);
- elements_.Add(FrameElement::MemoryElement(info));
- stack_pointer_++;
- __ Push(value);
-}
-
-
-void VirtualFrame::EmitPush(Heap::RootListIndex index, TypeInfo info) {
- ASSERT(stack_pointer_ == element_count() - 1);
- elements_.Add(FrameElement::MemoryElement(info));
- stack_pointer_++;
- __ PushRoot(index);
-}
-
-
-void VirtualFrame::Push(Expression* expr) {
- ASSERT(expr->IsTrivial());
-
- Literal* lit = expr->AsLiteral();
- if (lit != NULL) {
- Push(lit->handle());
- return;
- }
-
- VariableProxy* proxy = expr->AsVariableProxy();
- if (proxy != NULL) {
- Slot* slot = proxy->var()->AsSlot();
- if (slot->type() == Slot::LOCAL) {
- PushLocalAt(slot->index());
- return;
- }
- if (slot->type() == Slot::PARAMETER) {
- PushParameterAt(slot->index());
- return;
- }
- }
- UNREACHABLE();
-}
-
-
-void VirtualFrame::Push(Handle<Object> value) {
- if (ConstantPoolOverflowed()) {
- Result temp = cgen()->allocator()->Allocate();
- ASSERT(temp.is_valid());
- if (value->IsSmi()) {
- __ Move(temp.reg(), Smi::cast(*value));
- } else {
- __ movq(temp.reg(), value, RelocInfo::EMBEDDED_OBJECT);
- }
- Push(&temp);
- } else {
- FrameElement element =
- FrameElement::ConstantElement(value, FrameElement::NOT_SYNCED);
- elements_.Add(element);
- }
-}
-
-
-void VirtualFrame::Drop(int count) {
- ASSERT(count >= 0);
- ASSERT(height() >= count);
- int num_virtual_elements = (element_count() - 1) - stack_pointer_;
-
- // Emit code to lower the stack pointer if necessary.
- if (num_virtual_elements < count) {
- int num_dropped = count - num_virtual_elements;
- stack_pointer_ -= num_dropped;
- __ addq(rsp, Immediate(num_dropped * kPointerSize));
- }
-
- // Discard elements from the virtual frame and free any registers.
- for (int i = 0; i < count; i++) {
- FrameElement dropped = elements_.RemoveLast();
- if (dropped.is_register()) {
- Unuse(dropped.reg());
- }
- }
-}
-
-
-int VirtualFrame::InvalidateFrameSlotAt(int index) {
- FrameElement original = elements_[index];
-
- // Is this element the backing store of any copies?
- int new_backing_index = kIllegalIndex;
- if (original.is_copied()) {
- // Verify it is copied, and find first copy.
- for (int i = index + 1; i < element_count(); i++) {
- if (elements_[i].is_copy() && elements_[i].index() == index) {
- new_backing_index = i;
- break;
- }
- }
- }
-
- if (new_backing_index == kIllegalIndex) {
- // No copies found, return kIllegalIndex.
- if (original.is_register()) {
- Unuse(original.reg());
- }
- elements_[index] = FrameElement::InvalidElement();
- return kIllegalIndex;
- }
-
- // This is the backing store of copies.
- Register backing_reg;
- if (original.is_memory()) {
- Result fresh = cgen()->allocator()->Allocate();
- ASSERT(fresh.is_valid());
- Use(fresh.reg(), new_backing_index);
- backing_reg = fresh.reg();
- __ movq(backing_reg, Operand(rbp, fp_relative(index)));
- } else {
- // The original was in a register.
- backing_reg = original.reg();
- set_register_location(backing_reg, new_backing_index);
- }
- // Invalidate the element at index.
- elements_[index] = FrameElement::InvalidElement();
- // Set the new backing element.
- if (elements_[new_backing_index].is_synced()) {
- elements_[new_backing_index] =
- FrameElement::RegisterElement(backing_reg,
- FrameElement::SYNCED,
- original.type_info());
- } else {
- elements_[new_backing_index] =
- FrameElement::RegisterElement(backing_reg,
- FrameElement::NOT_SYNCED,
- original.type_info());
- }
- // Update the other copies.
- for (int i = new_backing_index + 1; i < element_count(); i++) {
- if (elements_[i].is_copy() && elements_[i].index() == index) {
- elements_[i].set_index(new_backing_index);
- elements_[new_backing_index].set_copied();
- }
- }
- return new_backing_index;
-}
-
-
-void VirtualFrame::TakeFrameSlotAt(int index) {
- ASSERT(index >= 0);
- ASSERT(index <= element_count());
- FrameElement original = elements_[index];
- int new_backing_store_index = InvalidateFrameSlotAt(index);
- if (new_backing_store_index != kIllegalIndex) {
- elements_.Add(CopyElementAt(new_backing_store_index));
- return;
- }
-
- switch (original.type()) {
- case FrameElement::MEMORY: {
- // Emit code to load the original element's data into a register.
- // Push that register as a FrameElement on top of the frame.
- Result fresh = cgen()->allocator()->Allocate();
- ASSERT(fresh.is_valid());
- FrameElement new_element =
- FrameElement::RegisterElement(fresh.reg(),
- FrameElement::NOT_SYNCED,
- original.type_info());
- Use(fresh.reg(), element_count());
- elements_.Add(new_element);
- __ movq(fresh.reg(), Operand(rbp, fp_relative(index)));
- break;
- }
- case FrameElement::REGISTER:
- Use(original.reg(), element_count());
- // Fall through.
- case FrameElement::CONSTANT:
- case FrameElement::COPY:
- original.clear_sync();
- elements_.Add(original);
- break;
- case FrameElement::INVALID:
- UNREACHABLE();
- break;
- }
-}
-
-
-void VirtualFrame::StoreToFrameSlotAt(int index) {
- // Store the value on top of the frame to the virtual frame slot at
- // a given index. The value on top of the frame is left in place.
- // This is a duplicating operation, so it can create copies.
- ASSERT(index >= 0);
- ASSERT(index < element_count());
-
- int top_index = element_count() - 1;
- FrameElement top = elements_[top_index];
- FrameElement original = elements_[index];
- if (top.is_copy() && top.index() == index) return;
- ASSERT(top.is_valid());
-
- InvalidateFrameSlotAt(index);
-
- // InvalidateFrameSlotAt can potentially change any frame element, due
- // to spilling registers to allocate temporaries in order to preserve
- // the copy-on-write semantics of aliased elements. Reload top from
- // the frame.
- top = elements_[top_index];
-
- if (top.is_copy()) {
- // There are two cases based on the relative positions of the
- // stored-to slot and the backing slot of the top element.
- int backing_index = top.index();
- ASSERT(backing_index != index);
- if (backing_index < index) {
- // 1. The top element is a copy of a slot below the stored-to
- // slot. The stored-to slot becomes an unsynced copy of that
- // same backing slot.
- elements_[index] = CopyElementAt(backing_index);
- } else {
- // 2. The top element is a copy of a slot above the stored-to
- // slot. The stored-to slot becomes the new (unsynced) backing
- // slot and both the top element and the element at the former
- // backing slot become copies of it. The sync state of the top
- // and former backing elements is preserved.
- FrameElement backing_element = elements_[backing_index];
- ASSERT(backing_element.is_memory() || backing_element.is_register());
- if (backing_element.is_memory()) {
- // Because sets of copies are canonicalized to be backed by
- // their lowest frame element, and because memory frame
- // elements are backed by the corresponding stack address, we
- // have to move the actual value down in the stack.
- //
- // TODO(209): considering allocating the stored-to slot to the
- // temp register. Alternatively, allow copies to appear in
- // any order in the frame and lazily move the value down to
- // the slot.
- __ movq(kScratchRegister, Operand(rbp, fp_relative(backing_index)));
- __ movq(Operand(rbp, fp_relative(index)), kScratchRegister);
- } else {
- set_register_location(backing_element.reg(), index);
- if (backing_element.is_synced()) {
- // If the element is a register, we will not actually move
- // anything on the stack but only update the virtual frame
- // element.
- backing_element.clear_sync();
- }
- }
- elements_[index] = backing_element;
-
- // The old backing element becomes a copy of the new backing
- // element.
- FrameElement new_element = CopyElementAt(index);
- elements_[backing_index] = new_element;
- if (backing_element.is_synced()) {
- elements_[backing_index].set_sync();
- }
-
- // All the copies of the old backing element (including the top
- // element) become copies of the new backing element.
- for (int i = backing_index + 1; i < element_count(); i++) {
- if (elements_[i].is_copy() && elements_[i].index() == backing_index) {
- elements_[i].set_index(index);
- }
- }
- }
- return;
- }
-
- // Move the top element to the stored-to slot and replace it (the
- // top element) with a copy.
- elements_[index] = top;
- if (top.is_memory()) {
- // TODO(209): consider allocating the stored-to slot to the temp
- // register. Alternatively, allow copies to appear in any order
- // in the frame and lazily move the value down to the slot.
- FrameElement new_top = CopyElementAt(index);
- new_top.set_sync();
- elements_[top_index] = new_top;
-
- // The sync state of the former top element is correct (synced).
- // Emit code to move the value down in the frame.
- __ movq(kScratchRegister, Operand(rsp, 0));
- __ movq(Operand(rbp, fp_relative(index)), kScratchRegister);
- } else if (top.is_register()) {
- set_register_location(top.reg(), index);
- // The stored-to slot has the (unsynced) register reference and
- // the top element becomes a copy. The sync state of the top is
- // preserved.
- FrameElement new_top = CopyElementAt(index);
- if (top.is_synced()) {
- new_top.set_sync();
- elements_[index].clear_sync();
- }
- elements_[top_index] = new_top;
- } else {
- // The stored-to slot holds the same value as the top but
- // unsynced. (We do not have copies of constants yet.)
- ASSERT(top.is_constant());
- elements_[index].clear_sync();
- }
-}
-
-
-void VirtualFrame::MakeMergable() {
- for (int i = 0; i < element_count(); i++) {
- FrameElement element = elements_[i];
-
- // In all cases we have to reset the number type information
- // to unknown for a mergable frame because of incoming back edges.
- if (element.is_constant() || element.is_copy()) {
- if (element.is_synced()) {
- // Just spill.
- elements_[i] = FrameElement::MemoryElement(TypeInfo::Unknown());
- } else {
- // Allocate to a register.
- FrameElement backing_element; // Invalid if not a copy.
- if (element.is_copy()) {
- backing_element = elements_[element.index()];
- }
- Result fresh = cgen()->allocator()->Allocate();
- ASSERT(fresh.is_valid()); // A register was spilled if all were in use.
- elements_[i] =
- FrameElement::RegisterElement(fresh.reg(),
- FrameElement::NOT_SYNCED,
- TypeInfo::Unknown());
- Use(fresh.reg(), i);
-
- // Emit a move.
- if (element.is_constant()) {
- __ Move(fresh.reg(), element.handle());
- } else {
- ASSERT(element.is_copy());
- // Copies are only backed by register or memory locations.
- if (backing_element.is_register()) {
- // The backing store may have been spilled by allocating,
- // but that's OK. If it was, the value is right where we
- // want it.
- if (!fresh.reg().is(backing_element.reg())) {
- __ movq(fresh.reg(), backing_element.reg());
- }
- } else {
- ASSERT(backing_element.is_memory());
- __ movq(fresh.reg(), Operand(rbp, fp_relative(element.index())));
- }
- }
- }
- // No need to set the copied flag --- there are no copies.
- } else {
- // Clear the copy flag of non-constant, non-copy elements.
- // They cannot be copied because copies are not allowed.
- // The copy flag is not relied on before the end of this loop,
- // including when registers are spilled.
- elements_[i].clear_copied();
- elements_[i].set_type_info(TypeInfo::Unknown());
- }
- }
-}
-
-
-void VirtualFrame::MergeTo(VirtualFrame* expected) {
- Comment cmnt(masm(), "[ Merge frame");
- // We should always be merging the code generator's current frame to an
- // expected frame.
- ASSERT(cgen()->frame() == this);
-
- // Adjust the stack pointer upward (toward the top of the virtual
- // frame) if necessary.
- if (stack_pointer_ < expected->stack_pointer_) {
- int difference = expected->stack_pointer_ - stack_pointer_;
- stack_pointer_ = expected->stack_pointer_;
- __ subq(rsp, Immediate(difference * kPointerSize));
- }
-
- MergeMoveRegistersToMemory(expected);
- MergeMoveRegistersToRegisters(expected);
- MergeMoveMemoryToRegisters(expected);
-
- // Adjust the stack pointer downward if necessary.
- if (stack_pointer_ > expected->stack_pointer_) {
- int difference = stack_pointer_ - expected->stack_pointer_;
- stack_pointer_ = expected->stack_pointer_;
- __ addq(rsp, Immediate(difference * kPointerSize));
- }
-
- // At this point, the frames should be identical.
- ASSERT(Equals(expected));
-}
-
-
-void VirtualFrame::MergeMoveRegistersToMemory(VirtualFrame* expected) {
- ASSERT(stack_pointer_ >= expected->stack_pointer_);
-
- // Move registers, constants, and copies to memory. Perform moves
- // from the top downward in the frame in order to leave the backing
- // stores of copies in registers.
- for (int i = element_count() - 1; i >= 0; i--) {
- FrameElement target = expected->elements_[i];
- if (target.is_register()) continue; // Handle registers later.
- if (target.is_memory()) {
- FrameElement source = elements_[i];
- switch (source.type()) {
- case FrameElement::INVALID:
- // Not a legal merge move.
- UNREACHABLE();
- break;
-
- case FrameElement::MEMORY:
- // Already in place.
- break;
-
- case FrameElement::REGISTER:
- Unuse(source.reg());
- if (!source.is_synced()) {
- __ movq(Operand(rbp, fp_relative(i)), source.reg());
- }
- break;
-
- case FrameElement::CONSTANT:
- if (!source.is_synced()) {
- __ Move(Operand(rbp, fp_relative(i)), source.handle());
- }
- break;
-
- case FrameElement::COPY:
- if (!source.is_synced()) {
- int backing_index = source.index();
- FrameElement backing_element = elements_[backing_index];
- if (backing_element.is_memory()) {
- __ movq(kScratchRegister,
- Operand(rbp, fp_relative(backing_index)));
- __ movq(Operand(rbp, fp_relative(i)), kScratchRegister);
- } else {
- ASSERT(backing_element.is_register());
- __ movq(Operand(rbp, fp_relative(i)), backing_element.reg());
- }
- }
- break;
- }
- }
- elements_[i] = target;
- }
-}
-
-
-void VirtualFrame::MergeMoveRegistersToRegisters(VirtualFrame* expected) {
- // We have already done X-to-memory moves.
- ASSERT(stack_pointer_ >= expected->stack_pointer_);
-
- for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
- // Move the right value into register i if it is currently in a register.
- int index = expected->register_location(i);
- int use_index = register_location(i);
- // Skip if register i is unused in the target or else if source is
- // not a register (this is not a register-to-register move).
- if (index == kIllegalIndex || !elements_[index].is_register()) continue;
-
- Register target = RegisterAllocator::ToRegister(i);
- Register source = elements_[index].reg();
- if (index != use_index) {
- if (use_index == kIllegalIndex) { // Target is currently unused.
- // Copy contents of source from source to target.
- // Set frame element register to target.
- Use(target, index);
- Unuse(source);
- __ movq(target, source);
- } else {
- // Exchange contents of registers source and target.
- // Nothing except the register backing use_index has changed.
- elements_[use_index].set_reg(source);
- set_register_location(target, index);
- set_register_location(source, use_index);
- __ xchg(source, target);
- }
- }
-
- if (!elements_[index].is_synced() &&
- expected->elements_[index].is_synced()) {
- __ movq(Operand(rbp, fp_relative(index)), target);
- }
- elements_[index] = expected->elements_[index];
- }
-}
-
-
-void VirtualFrame::MergeMoveMemoryToRegisters(VirtualFrame* expected) {
- // Move memory, constants, and copies to registers. This is the
- // final step and since it is not done from the bottom up, but in
- // register code order, we have special code to ensure that the backing
- // elements of copies are in their correct locations when we
- // encounter the copies.
- for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
- int index = expected->register_location(i);
- if (index != kIllegalIndex) {
- FrameElement source = elements_[index];
- FrameElement target = expected->elements_[index];
- Register target_reg = RegisterAllocator::ToRegister(i);
- ASSERT(target.reg().is(target_reg));
- switch (source.type()) {
- case FrameElement::INVALID: // Fall through.
- UNREACHABLE();
- break;
- case FrameElement::REGISTER:
- ASSERT(source.Equals(target));
- // Go to next iteration. Skips Use(target_reg) and syncing
- // below. It is safe to skip syncing because a target
- // register frame element would only be synced if all source
- // elements were.
- continue;
- break;
- case FrameElement::MEMORY:
- ASSERT(index <= stack_pointer_);
- __ movq(target_reg, Operand(rbp, fp_relative(index)));
- break;
-
- case FrameElement::CONSTANT:
- __ Move(target_reg, source.handle());
- break;
-
- case FrameElement::COPY: {
- int backing_index = source.index();
- FrameElement backing = elements_[backing_index];
- ASSERT(backing.is_memory() || backing.is_register());
- if (backing.is_memory()) {
- ASSERT(backing_index <= stack_pointer_);
- // Code optimization if backing store should also move
- // to a register: move backing store to its register first.
- if (expected->elements_[backing_index].is_register()) {
- FrameElement new_backing = expected->elements_[backing_index];
- Register new_backing_reg = new_backing.reg();
- ASSERT(!is_used(new_backing_reg));
- elements_[backing_index] = new_backing;
- Use(new_backing_reg, backing_index);
- __ movq(new_backing_reg,
- Operand(rbp, fp_relative(backing_index)));
- __ movq(target_reg, new_backing_reg);
- } else {
- __ movq(target_reg, Operand(rbp, fp_relative(backing_index)));
- }
- } else {
- __ movq(target_reg, backing.reg());
- }
- }
- }
- // Ensure the proper sync state.
- if (target.is_synced() && !source.is_synced()) {
- __ movq(Operand(rbp, fp_relative(index)), target_reg);
- }
- Use(target_reg, index);
- elements_[index] = target;
- }
- }
-}
-
-
-Result VirtualFrame::Pop() {
- FrameElement element = elements_.RemoveLast();
- int index = element_count();
- ASSERT(element.is_valid());
-
- // Get number type information of the result.
- TypeInfo info;
- if (!element.is_copy()) {
- info = element.type_info();
- } else {
- info = elements_[element.index()].type_info();
- }
-
- bool pop_needed = (stack_pointer_ == index);
- if (pop_needed) {
- stack_pointer_--;
- if (element.is_memory()) {
- Result temp = cgen()->allocator()->Allocate();
- ASSERT(temp.is_valid());
- __ pop(temp.reg());
- temp.set_type_info(info);
- return temp;
- }
-
- __ addq(rsp, Immediate(kPointerSize));
- }
- ASSERT(!element.is_memory());
-
- // The top element is a register, constant, or a copy. Unuse
- // registers and follow copies to their backing store.
- if (element.is_register()) {
- Unuse(element.reg());
- } else if (element.is_copy()) {
- ASSERT(element.index() < index);
- index = element.index();
- element = elements_[index];
- }
- ASSERT(!element.is_copy());
-
- // The element is memory, a register, or a constant.
- if (element.is_memory()) {
- // Memory elements could only be the backing store of a copy.
- // Allocate the original to a register.
- ASSERT(index <= stack_pointer_);
- Result temp = cgen()->allocator()->Allocate();
- ASSERT(temp.is_valid());
- Use(temp.reg(), index);
- FrameElement new_element =
- FrameElement::RegisterElement(temp.reg(),
- FrameElement::SYNCED,
- element.type_info());
- // Preserve the copy flag on the element.
- if (element.is_copied()) new_element.set_copied();
- elements_[index] = new_element;
- __ movq(temp.reg(), Operand(rbp, fp_relative(index)));
- return Result(temp.reg(), info);
- } else if (element.is_register()) {
- return Result(element.reg(), info);
- } else {
- ASSERT(element.is_constant());
- return Result(element.handle());
- }
-}
-
-
-Result VirtualFrame::RawCallStub(CodeStub* stub) {
- ASSERT(cgen()->HasValidEntryRegisters());
- __ CallStub(stub);
- Result result = cgen()->allocator()->Allocate(rax);
- ASSERT(result.is_valid());
- return result;
-}
-
-
-Result VirtualFrame::CallStub(CodeStub* stub, Result* arg) {
- PrepareForCall(0, 0);
- arg->ToRegister(rax);
- arg->Unuse();
- return RawCallStub(stub);
-}
-
-
-Result VirtualFrame::CallStub(CodeStub* stub, Result* arg0, Result* arg1) {
- PrepareForCall(0, 0);
-
- if (arg0->is_register() && arg0->reg().is(rax)) {
- if (arg1->is_register() && arg1->reg().is(rdx)) {
- // Wrong registers.
- __ xchg(rax, rdx);
- } else {
- // Register rdx is free for arg0, which frees rax for arg1.
- arg0->ToRegister(rdx);
- arg1->ToRegister(rax);
- }
- } else {
- // Register rax is free for arg1, which guarantees rdx is free for
- // arg0.
- arg1->ToRegister(rax);
- arg0->ToRegister(rdx);
- }
-
- arg0->Unuse();
- arg1->Unuse();
- return RawCallStub(stub);
-}
-
-
-Result VirtualFrame::CallJSFunction(int arg_count) {
- Result function = Pop();
-
- // InvokeFunction requires function in rdi. Move it in there.
- function.ToRegister(rdi);
- function.Unuse();
-
- // +1 for receiver.
- PrepareForCall(arg_count + 1, arg_count + 1);
- ASSERT(cgen()->HasValidEntryRegisters());
- ParameterCount count(arg_count);
- __ InvokeFunction(rdi, count, CALL_FUNCTION);
- RestoreContextRegister();
- Result result = cgen()->allocator()->Allocate(rax);
- ASSERT(result.is_valid());
- return result;
-}
-
-
-void VirtualFrame::SyncElementBelowStackPointer(int index) {
- // Emit code to write elements below the stack pointer to their
- // (already allocated) stack address.
- ASSERT(index <= stack_pointer_);
- FrameElement element = elements_[index];
- ASSERT(!element.is_synced());
- switch (element.type()) {
- case FrameElement::INVALID:
- break;
-
- case FrameElement::MEMORY:
- // This function should not be called with synced elements.
- // (memory elements are always synced).
- UNREACHABLE();
- break;
-
- case FrameElement::REGISTER:
- __ movq(Operand(rbp, fp_relative(index)), element.reg());
- break;
-
- case FrameElement::CONSTANT:
- __ Move(Operand(rbp, fp_relative(index)), element.handle());
- break;
-
- case FrameElement::COPY: {
- int backing_index = element.index();
- FrameElement backing_element = elements_[backing_index];
- if (backing_element.is_memory()) {
- __ movq(kScratchRegister, Operand(rbp, fp_relative(backing_index)));
- __ movq(Operand(rbp, fp_relative(index)), kScratchRegister);
- } else {
- ASSERT(backing_element.is_register());
- __ movq(Operand(rbp, fp_relative(index)), backing_element.reg());
- }
- break;
- }
- }
- elements_[index].set_sync();
-}
-
-
-void VirtualFrame::SyncElementByPushing(int index) {
- // Sync an element of the frame that is just above the stack pointer
- // by pushing it.
- ASSERT(index == stack_pointer_ + 1);
- stack_pointer_++;
- FrameElement element = elements_[index];
-
- switch (element.type()) {
- case FrameElement::INVALID:
- __ Push(Smi::FromInt(0));
- break;
-
- case FrameElement::MEMORY:
- // No memory elements exist above the stack pointer.
- UNREACHABLE();
- break;
-
- case FrameElement::REGISTER:
- __ push(element.reg());
- break;
-
- case FrameElement::CONSTANT:
- __ Move(kScratchRegister, element.handle());
- __ push(kScratchRegister);
- break;
-
- case FrameElement::COPY: {
- int backing_index = element.index();
- FrameElement backing = elements_[backing_index];
- ASSERT(backing.is_memory() || backing.is_register());
- if (backing.is_memory()) {
- __ push(Operand(rbp, fp_relative(backing_index)));
- } else {
- __ push(backing.reg());
- }
- break;
- }
- }
- elements_[index].set_sync();
-}
-
-
-// Clear the dirty bits for the range of elements in
-// [min(stack_pointer_ + 1,begin), end].
-void VirtualFrame::SyncRange(int begin, int end) {
- ASSERT(begin >= 0);
- ASSERT(end < element_count());
- // Sync elements below the range if they have not been materialized
- // on the stack.
- int start = Min(begin, stack_pointer_ + 1);
- int end_or_stack_pointer = Min(stack_pointer_, end);
- // Emit normal push instructions for elements above stack pointer
- // and use mov instructions if we are below stack pointer.
- int i = start;
-
- while (i <= end_or_stack_pointer) {
- if (!elements_[i].is_synced()) SyncElementBelowStackPointer(i);
- i++;
- }
- while (i <= end) {
- SyncElementByPushing(i);
- i++;
- }
-}
-
-
-//------------------------------------------------------------------------------
-// Virtual frame stub and IC calling functions.
-
-Result VirtualFrame::CallRuntime(const Runtime::Function* f, int arg_count) {
- PrepareForCall(arg_count, arg_count);
- ASSERT(cgen()->HasValidEntryRegisters());
- __ CallRuntime(f, arg_count);
- Result result = cgen()->allocator()->Allocate(rax);
- ASSERT(result.is_valid());
- return result;
-}
-
-
-Result VirtualFrame::CallRuntime(Runtime::FunctionId id, int arg_count) {
- PrepareForCall(arg_count, arg_count);
- ASSERT(cgen()->HasValidEntryRegisters());
- __ CallRuntime(id, arg_count);
- Result result = cgen()->allocator()->Allocate(rax);
- ASSERT(result.is_valid());
- return result;
-}
-
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-void VirtualFrame::DebugBreak() {
- PrepareForCall(0, 0);
- ASSERT(cgen()->HasValidEntryRegisters());
- __ DebugBreak();
- Result result = cgen()->allocator()->Allocate(rax);
- ASSERT(result.is_valid());
-}
-#endif
-
-
-Result VirtualFrame::InvokeBuiltin(Builtins::JavaScript id,
- InvokeFlag flag,
- int arg_count) {
- PrepareForCall(arg_count, arg_count);
- ASSERT(cgen()->HasValidEntryRegisters());
- __ InvokeBuiltin(id, flag);
- Result result = cgen()->allocator()->Allocate(rax);
- ASSERT(result.is_valid());
- return result;
-}
-
-
-Result VirtualFrame::RawCallCodeObject(Handle<Code> code,
- RelocInfo::Mode rmode) {
- ASSERT(cgen()->HasValidEntryRegisters());
- __ Call(code, rmode);
- Result result = cgen()->allocator()->Allocate(rax);
- ASSERT(result.is_valid());
- return result;
-}
-
-
-// This function assumes that the only results that could be in a_reg or b_reg
-// are a and b. Other results can be live, but must not be in a_reg or b_reg.
-void VirtualFrame::MoveResultsToRegisters(Result* a,
- Result* b,
- Register a_reg,
- Register b_reg) {
- ASSERT(!a_reg.is(b_reg));
- // Assert that cgen()->allocator()->count(a_reg) is accounted for by a and b.
- ASSERT(cgen()->allocator()->count(a_reg) <= 2);
- ASSERT(cgen()->allocator()->count(a_reg) != 2 || a->reg().is(a_reg));
- ASSERT(cgen()->allocator()->count(a_reg) != 2 || b->reg().is(a_reg));
- ASSERT(cgen()->allocator()->count(a_reg) != 1 ||
- (a->is_register() && a->reg().is(a_reg)) ||
- (b->is_register() && b->reg().is(a_reg)));
- // Assert that cgen()->allocator()->count(b_reg) is accounted for by a and b.
- ASSERT(cgen()->allocator()->count(b_reg) <= 2);
- ASSERT(cgen()->allocator()->count(b_reg) != 2 || a->reg().is(b_reg));
- ASSERT(cgen()->allocator()->count(b_reg) != 2 || b->reg().is(b_reg));
- ASSERT(cgen()->allocator()->count(b_reg) != 1 ||
- (a->is_register() && a->reg().is(b_reg)) ||
- (b->is_register() && b->reg().is(b_reg)));
-
- if (a->is_register() && a->reg().is(a_reg)) {
- b->ToRegister(b_reg);
- } else if (!cgen()->allocator()->is_used(a_reg)) {
- a->ToRegister(a_reg);
- b->ToRegister(b_reg);
- } else if (cgen()->allocator()->is_used(b_reg)) {
- // a must be in b_reg, b in a_reg.
- __ xchg(a_reg, b_reg);
- // Results a and b will be invalidated, so it is ok if they are switched.
- } else {
- b->ToRegister(b_reg);
- a->ToRegister(a_reg);
- }
- a->Unuse();
- b->Unuse();
-}
-
-
-Result VirtualFrame::CallLoadIC(RelocInfo::Mode mode) {
- // Name and receiver are on the top of the frame. Both are dropped.
- // The IC expects name in rcx and receiver in rax.
- Handle<Code> ic(Isolate::Current()->builtins()->builtin(
- Builtins::kLoadIC_Initialize));
- Result name = Pop();
- Result receiver = Pop();
- PrepareForCall(0, 0);
- MoveResultsToRegisters(&name, &receiver, rcx, rax);
-
- return RawCallCodeObject(ic, mode);
-}
-
-
-Result VirtualFrame::CallKeyedLoadIC(RelocInfo::Mode mode) {
- // Key and receiver are on top of the frame. Put them in rax and rdx.
- Result key = Pop();
- Result receiver = Pop();
- PrepareForCall(0, 0);
- MoveResultsToRegisters(&key, &receiver, rax, rdx);
-
- Handle<Code> ic(Isolate::Current()->builtins()->builtin(
- Builtins::kKeyedLoadIC_Initialize));
- return RawCallCodeObject(ic, mode);
-}
-
-
-Result VirtualFrame::CallStoreIC(Handle<String> name,
- bool is_contextual,
- StrictModeFlag strict_mode) {
- // Value and (if not contextual) receiver are on top of the frame.
- // The IC expects name in rcx, value in rax, and receiver in rdx.
- Handle<Code> ic(Isolate::Current()->builtins()->builtin(
- (strict_mode == kStrictMode) ? Builtins::kStoreIC_Initialize_Strict
- : Builtins::kStoreIC_Initialize));
- Result value = Pop();
- RelocInfo::Mode mode;
- if (is_contextual) {
- PrepareForCall(0, 0);
- value.ToRegister(rax);
- __ movq(rdx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- value.Unuse();
- mode = RelocInfo::CODE_TARGET_CONTEXT;
- } else {
- Result receiver = Pop();
- PrepareForCall(0, 0);
- MoveResultsToRegisters(&value, &receiver, rax, rdx);
- mode = RelocInfo::CODE_TARGET;
- }
- __ Move(rcx, name);
- return RawCallCodeObject(ic, mode);
-}
-
-
-Result VirtualFrame::CallKeyedStoreIC(StrictModeFlag strict_mode) {
- // Value, key, and receiver are on the top of the frame. The IC
- // expects value in rax, key in rcx, and receiver in rdx.
- Result value = Pop();
- Result key = Pop();
- Result receiver = Pop();
- PrepareForCall(0, 0);
- if (!cgen()->allocator()->is_used(rax) ||
- (value.is_register() && value.reg().is(rax))) {
- if (!cgen()->allocator()->is_used(rax)) {
- value.ToRegister(rax);
- }
- MoveResultsToRegisters(&key, &receiver, rcx, rdx);
- value.Unuse();
- } else if (!cgen()->allocator()->is_used(rcx) ||
- (key.is_register() && key.reg().is(rcx))) {
- if (!cgen()->allocator()->is_used(rcx)) {
- key.ToRegister(rcx);
- }
- MoveResultsToRegisters(&value, &receiver, rax, rdx);
- key.Unuse();
- } else if (!cgen()->allocator()->is_used(rdx) ||
- (receiver.is_register() && receiver.reg().is(rdx))) {
- if (!cgen()->allocator()->is_used(rdx)) {
- receiver.ToRegister(rdx);
- }
- MoveResultsToRegisters(&key, &value, rcx, rax);
- receiver.Unuse();
- } else {
- // All three registers are used, and no value is in the correct place.
- // We have one of the two circular permutations of rax, rcx, rdx.
- ASSERT(value.is_register());
- if (value.reg().is(rcx)) {
- __ xchg(rax, rdx);
- __ xchg(rax, rcx);
- } else {
- __ xchg(rax, rcx);
- __ xchg(rax, rdx);
- }
- value.Unuse();
- key.Unuse();
- receiver.Unuse();
- }
-
- Handle<Code> ic(Isolate::Current()->builtins()->builtin(
- (strict_mode == kStrictMode) ? Builtins::kKeyedStoreIC_Initialize_Strict
- : Builtins::kKeyedStoreIC_Initialize));
- return RawCallCodeObject(ic, RelocInfo::CODE_TARGET);
-}
-
-
-Result VirtualFrame::CallCallIC(RelocInfo::Mode mode,
- int arg_count,
- int loop_nesting) {
- // Function name, arguments, and receiver are found on top of the frame
- // and dropped by the call. The IC expects the name in rcx and the rest
- // on the stack, and drops them all.
- InLoopFlag in_loop = loop_nesting > 0 ? IN_LOOP : NOT_IN_LOOP;
- Handle<Code> ic =
- ISOLATE->stub_cache()->ComputeCallInitialize(arg_count, in_loop);
- Result name = Pop();
- // Spill args, receiver, and function. The call will drop args and
- // receiver.
- PrepareForCall(arg_count + 1, arg_count + 1);
- name.ToRegister(rcx);
- name.Unuse();
- return RawCallCodeObject(ic, mode);
-}
-
-
-Result VirtualFrame::CallKeyedCallIC(RelocInfo::Mode mode,
- int arg_count,
- int loop_nesting) {
- // Function name, arguments, and receiver are found on top of the frame
- // and dropped by the call. The IC expects the name in rcx and the rest
- // on the stack, and drops them all.
- InLoopFlag in_loop = loop_nesting > 0 ? IN_LOOP : NOT_IN_LOOP;
- Handle<Code> ic =
- ISOLATE->stub_cache()->ComputeKeyedCallInitialize(arg_count, in_loop);
- Result name = Pop();
- // Spill args, receiver, and function. The call will drop args and
- // receiver.
- PrepareForCall(arg_count + 1, arg_count + 1);
- name.ToRegister(rcx);
- name.Unuse();
- return RawCallCodeObject(ic, mode);
-}
-
-
-Result VirtualFrame::CallConstructor(int arg_count) {
- // Arguments, receiver, and function are on top of the frame. The
- // IC expects arg count in rax, function in rdi, and the arguments
- // and receiver on the stack.
- Handle<Code> ic(Isolate::Current()->builtins()->builtin(
- Builtins::kJSConstructCall));
- // Duplicate the function before preparing the frame.
- PushElementAt(arg_count);
- Result function = Pop();
- PrepareForCall(arg_count + 1, arg_count + 1); // Spill function and args.
- function.ToRegister(rdi);
-
- // Constructors are called with the number of arguments in register
- // rax for now. Another option would be to have separate construct
- // call trampolines per different arguments counts encountered.
- Result num_args = cgen()->allocator()->Allocate(rax);
- ASSERT(num_args.is_valid());
- __ Set(num_args.reg(), arg_count);
-
- function.Unuse();
- num_args.Unuse();
- return RawCallCodeObject(ic, RelocInfo::CONSTRUCT_CALL);
-}
-
-
-void VirtualFrame::PushTryHandler(HandlerType type) {
- ASSERT(cgen()->HasValidEntryRegisters());
- // Grow the expression stack by handler size less one (the return
- // address is already pushed by a call instruction).
- Adjust(kHandlerSize - 1);
- __ PushTryHandler(IN_JAVASCRIPT, type);
-}
-
-
-#undef __
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_X64
diff --git a/src/3rdparty/v8/src/x64/virtual-frame-x64.h b/src/3rdparty/v8/src/x64/virtual-frame-x64.h
deleted file mode 100644
index aac9864..0000000
--- a/src/3rdparty/v8/src/x64/virtual-frame-x64.h
+++ /dev/null
@@ -1,597 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_X64_VIRTUAL_FRAME_X64_H_
-#define V8_X64_VIRTUAL_FRAME_X64_H_
-
-#include "type-info.h"
-#include "register-allocator.h"
-#include "scopes.h"
-#include "codegen.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// Virtual frames
-//
-// The virtual frame is an abstraction of the physical stack frame. It
-// encapsulates the parameters, frame-allocated locals, and the expression
-// stack. It supports push/pop operations on the expression stack, as well
-// as random access to the expression stack elements, locals, and
-// parameters.
-
-class VirtualFrame : public ZoneObject {
- public:
- // A utility class to introduce a scope where the virtual frame is
- // expected to remain spilled. The constructor spills the code
- // generator's current frame, but no attempt is made to require it
- // to stay spilled. It is intended as documentation while the code
- // generator is being transformed.
- class SpilledScope BASE_EMBEDDED {
- public:
- SpilledScope() : previous_state_(cgen()->in_spilled_code()) {
- ASSERT(cgen()->has_valid_frame());
- cgen()->frame()->SpillAll();
- cgen()->set_in_spilled_code(true);
- }
-
- ~SpilledScope() {
- cgen()->set_in_spilled_code(previous_state_);
- }
-
- private:
- bool previous_state_;
-
- CodeGenerator* cgen() {
- return CodeGeneratorScope::Current(Isolate::Current());
- }
- };
-
- // An illegal index into the virtual frame.
- static const int kIllegalIndex = -1;
-
- // Construct an initial virtual frame on entry to a JS function.
- inline VirtualFrame();
-
- // Construct a virtual frame as a clone of an existing one.
- explicit inline VirtualFrame(VirtualFrame* original);
-
- CodeGenerator* cgen() {
- return CodeGeneratorScope::Current(Isolate::Current());
- }
-
- MacroAssembler* masm() { return cgen()->masm(); }
-
- // Create a duplicate of an existing valid frame element.
- FrameElement CopyElementAt(int index,
- TypeInfo info = TypeInfo::Uninitialized());
-
- // The number of elements on the virtual frame.
- int element_count() { return elements_.length(); }
-
- // The height of the virtual expression stack.
- int height() {
- return element_count() - expression_base_index();
- }
-
- int register_location(int num) {
- ASSERT(num >= 0 && num < RegisterAllocator::kNumRegisters);
- return register_locations_[num];
- }
-
- inline int register_location(Register reg);
-
- inline void set_register_location(Register reg, int index);
-
- bool is_used(int num) {
- ASSERT(num >= 0 && num < RegisterAllocator::kNumRegisters);
- return register_locations_[num] != kIllegalIndex;
- }
-
- inline bool is_used(Register reg);
-
- // Add extra in-memory elements to the top of the frame to match an actual
- // frame (eg, the frame after an exception handler is pushed). No code is
- // emitted.
- void Adjust(int count);
-
- // Forget count elements from the top of the frame all in-memory
- // (including synced) and adjust the stack pointer downward, to
- // match an external frame effect (examples include a call removing
- // its arguments, and exiting a try/catch removing an exception
- // handler). No code will be emitted.
- void Forget(int count) {
- ASSERT(count >= 0);
- ASSERT(stack_pointer_ == element_count() - 1);
- stack_pointer_ -= count;
- ForgetElements(count);
- }
-
- // Forget count elements from the top of the frame without adjusting
- // the stack pointer downward. This is used, for example, before
- // merging frames at break, continue, and return targets.
- void ForgetElements(int count);
-
- // Spill all values from the frame to memory.
- inline void SpillAll();
-
- // Spill all occurrences of a specific register from the frame.
- void Spill(Register reg) {
- if (is_used(reg)) SpillElementAt(register_location(reg));
- }
-
- // Spill all occurrences of an arbitrary register if possible. Return the
- // register spilled or no_reg if it was not possible to free any register
- // (ie, they all have frame-external references).
- Register SpillAnyRegister();
-
- // Spill the top element of the frame to memory.
- void SpillTop() { SpillElementAt(element_count() - 1); }
-
- // Sync the range of elements in [begin, end] with memory.
- void SyncRange(int begin, int end);
-
- // Make this frame so that an arbitrary frame of the same height can
- // be merged to it. Copies and constants are removed from the frame.
- void MakeMergable();
-
- // Prepare this virtual frame for merging to an expected frame by
- // performing some state changes that do not require generating
- // code. It is guaranteed that no code will be generated.
- void PrepareMergeTo(VirtualFrame* expected);
-
- // Make this virtual frame have a state identical to an expected virtual
- // frame. As a side effect, code may be emitted to make this frame match
- // the expected one.
- void MergeTo(VirtualFrame* expected);
-
- // Detach a frame from its code generator, perhaps temporarily. This
- // tells the register allocator that it is free to use frame-internal
- // registers. Used when the code generator's frame is switched from this
- // one to NULL by an unconditional jump.
- void DetachFromCodeGenerator() {
- RegisterAllocator* cgen_allocator = cgen()->allocator();
- for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
- if (is_used(i)) cgen_allocator->Unuse(i);
- }
- }
-
- // (Re)attach a frame to its code generator. This informs the register
- // allocator that the frame-internal register references are active again.
- // Used when a code generator's frame is switched from NULL to this one by
- // binding a label.
- void AttachToCodeGenerator() {
- RegisterAllocator* cgen_allocator = cgen()->allocator();
- for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
- if (is_used(i)) cgen_allocator->Use(i);
- }
- }
-
- // Emit code for the physical JS entry and exit frame sequences. After
- // calling Enter, the virtual frame is ready for use; and after calling
- // Exit it should not be used. Note that Enter does not allocate space in
- // the physical frame for storing frame-allocated locals.
- void Enter();
- void Exit();
-
- // Prepare for returning from the frame by spilling locals. This
- // avoids generating unnecessary merge code when jumping to the
- // shared return site. Emits code for spills.
- inline void PrepareForReturn();
-
- // Number of local variables after when we use a loop for allocating.
- static const int kLocalVarBound = 14;
-
- // Allocate and initialize the frame-allocated locals.
- void AllocateStackSlots();
-
- // An element of the expression stack as an assembly operand.
- Operand ElementAt(int index) const {
- return Operand(rsp, index * kPointerSize);
- }
-
- // Random-access store to a frame-top relative frame element. The result
- // becomes owned by the frame and is invalidated.
- void SetElementAt(int index, Result* value);
-
- // Set a frame element to a constant. The index is frame-top relative.
- inline void SetElementAt(int index, Handle<Object> value);
-
- void PushElementAt(int index) {
- PushFrameSlotAt(element_count() - index - 1);
- }
-
- void StoreToElementAt(int index) {
- StoreToFrameSlotAt(element_count() - index - 1);
- }
-
- // A frame-allocated local as an assembly operand.
- Operand LocalAt(int index) {
- ASSERT(0 <= index);
- ASSERT(index < local_count());
- return Operand(rbp, kLocal0Offset - index * kPointerSize);
- }
-
- // Push a copy of the value of a local frame slot on top of the frame.
- void PushLocalAt(int index) {
- PushFrameSlotAt(local0_index() + index);
- }
-
- // Push the value of a local frame slot on top of the frame and invalidate
- // the local slot. The slot should be written to before trying to read
- // from it again.
- void TakeLocalAt(int index) {
- TakeFrameSlotAt(local0_index() + index);
- }
-
- // Store the top value on the virtual frame into a local frame slot. The
- // value is left in place on top of the frame.
- void StoreToLocalAt(int index) {
- StoreToFrameSlotAt(local0_index() + index);
- }
-
- // Push the address of the receiver slot on the frame.
- void PushReceiverSlotAddress();
-
- // Push the function on top of the frame.
- void PushFunction() { PushFrameSlotAt(function_index()); }
-
- // Save the value of the esi register to the context frame slot.
- void SaveContextRegister();
-
- // Restore the esi register from the value of the context frame
- // slot.
- void RestoreContextRegister();
-
- // A parameter as an assembly operand.
- Operand ParameterAt(int index) {
- ASSERT(-1 <= index); // -1 is the receiver.
- ASSERT(index < parameter_count());
- return Operand(rbp, (1 + parameter_count() - index) * kPointerSize);
- }
-
- // Push a copy of the value of a parameter frame slot on top of the frame.
- void PushParameterAt(int index) {
- PushFrameSlotAt(param0_index() + index);
- }
-
- // Push the value of a paramter frame slot on top of the frame and
- // invalidate the parameter slot. The slot should be written to before
- // trying to read from it again.
- void TakeParameterAt(int index) {
- TakeFrameSlotAt(param0_index() + index);
- }
-
- // Store the top value on the virtual frame into a parameter frame slot.
- // The value is left in place on top of the frame.
- void StoreToParameterAt(int index) {
- StoreToFrameSlotAt(param0_index() + index);
- }
-
- // The receiver frame slot.
- Operand Receiver() { return ParameterAt(-1); }
-
- // Push a try-catch or try-finally handler on top of the virtual frame.
- void PushTryHandler(HandlerType type);
-
- // Call stub given the number of arguments it expects on (and
- // removes from) the stack.
- inline Result CallStub(CodeStub* stub, int arg_count);
-
- // Call stub that takes a single argument passed in eax. The
- // argument is given as a result which does not have to be eax or
- // even a register. The argument is consumed by the call.
- Result CallStub(CodeStub* stub, Result* arg);
-
- // Call stub that takes a pair of arguments passed in edx (arg0, rdx) and
- // eax (arg1, rax). The arguments are given as results which do not have
- // to be in the proper registers or even in registers. The
- // arguments are consumed by the call.
- Result CallStub(CodeStub* stub, Result* arg0, Result* arg1);
-
- // Call JS function from top of the stack with arguments
- // taken from the stack.
- Result CallJSFunction(int arg_count);
-
- // Call runtime given the number of arguments expected on (and
- // removed from) the stack.
- Result CallRuntime(const Runtime::Function* f, int arg_count);
- Result CallRuntime(Runtime::FunctionId id, int arg_count);
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- void DebugBreak();
-#endif
-
- // Invoke builtin given the number of arguments it expects on (and
- // removes from) the stack.
- Result InvokeBuiltin(Builtins::JavaScript id,
- InvokeFlag flag,
- int arg_count);
-
- // Call load IC. Name and receiver are found on top of the frame.
- // Both are dropped.
- Result CallLoadIC(RelocInfo::Mode mode);
-
- // Call keyed load IC. Key and receiver are found on top of the
- // frame. Both are dropped.
- Result CallKeyedLoadIC(RelocInfo::Mode mode);
-
- // Call store IC. If the load is contextual, value is found on top of the
- // frame. If not, value and receiver are on the frame. Both are dropped.
- Result CallStoreIC(Handle<String> name, bool is_contextual,
- StrictModeFlag strict_mode);
-
- // Call keyed store IC. Value, key, and receiver are found on top
- Result CallKeyedStoreIC(StrictModeFlag strict_mode);
-
- // Call call IC. Function name, arguments, and receiver are found on top
- // of the frame and dropped by the call.
- // The argument count does not include the receiver.
- Result CallCallIC(RelocInfo::Mode mode, int arg_count, int loop_nesting);
-
- // Call keyed call IC. Same calling convention as CallCallIC.
- Result CallKeyedCallIC(RelocInfo::Mode mode, int arg_count, int loop_nesting);
-
- // Allocate and call JS function as constructor. Arguments,
- // receiver (global object), and function are found on top of the
- // frame. Function is not dropped. The argument count does not
- // include the receiver.
- Result CallConstructor(int arg_count);
-
- // Drop a number of elements from the top of the expression stack. May
- // emit code to affect the physical frame. Does not clobber any registers
- // excepting possibly the stack pointer.
- void Drop(int count);
-
- // Drop one element.
- void Drop() { Drop(1); }
-
- // Duplicate the top element of the frame.
- void Dup() { PushFrameSlotAt(element_count() - 1); }
-
- // Duplicate the n'th element from the top of the frame.
- // Dup(1) is equivalent to Dup().
- void Dup(int n) {
- ASSERT(n > 0);
- PushFrameSlotAt(element_count() - n);
- }
-
- // Pop an element from the top of the expression stack. Returns a
- // Result, which may be a constant or a register.
- Result Pop();
-
- // Pop and save an element from the top of the expression stack and
- // emit a corresponding pop instruction.
- void EmitPop(Register reg);
- void EmitPop(const Operand& operand);
-
- // Push an element on top of the expression stack and emit a
- // corresponding push instruction.
- void EmitPush(Register reg,
- TypeInfo info = TypeInfo::Unknown());
- void EmitPush(const Operand& operand,
- TypeInfo info = TypeInfo::Unknown());
- void EmitPush(Heap::RootListIndex index,
- TypeInfo info = TypeInfo::Unknown());
- void EmitPush(Immediate immediate,
- TypeInfo info = TypeInfo::Unknown());
- void EmitPush(Smi* value);
- // Uses kScratchRegister, emits appropriate relocation info.
- void EmitPush(Handle<Object> value);
-
- inline bool ConstantPoolOverflowed();
-
- // Push an element on the virtual frame.
- void Push(Handle<Object> value);
- inline void Push(Register reg, TypeInfo info = TypeInfo::Unknown());
- inline void Push(Smi* value);
-
- // Pushing a result invalidates it (its contents become owned by the
- // frame).
- void Push(Result* result) {
- if (result->is_register()) {
- Push(result->reg(), result->type_info());
- } else {
- ASSERT(result->is_constant());
- Push(result->handle());
- }
- result->Unuse();
- }
-
- // Pushing an expression expects that the expression is trivial (according
- // to Expression::IsTrivial).
- void Push(Expression* expr);
-
- // Nip removes zero or more elements from immediately below the top
- // of the frame, leaving the previous top-of-frame value on top of
- // the frame. Nip(k) is equivalent to x = Pop(), Drop(k), Push(x).
- inline void Nip(int num_dropped);
-
- inline void SetTypeForLocalAt(int index, TypeInfo info);
- inline void SetTypeForParamAt(int index, TypeInfo info);
-
- private:
- static const int kLocal0Offset = JavaScriptFrameConstants::kLocal0Offset;
- static const int kFunctionOffset = JavaScriptFrameConstants::kFunctionOffset;
- static const int kContextOffset = StandardFrameConstants::kContextOffset;
-
- static const int kHandlerSize = StackHandlerConstants::kSize / kPointerSize;
- static const int kPreallocatedElements = 5 + 8; // 8 expression stack slots.
-
- ZoneList<FrameElement> elements_;
-
- // The index of the element that is at the processor's stack pointer
- // (the esp register).
- int stack_pointer_;
-
- // The index of the register frame element using each register, or
- // kIllegalIndex if a register is not on the frame.
- int register_locations_[RegisterAllocator::kNumRegisters];
-
- // The number of frame-allocated locals and parameters respectively.
- inline int parameter_count();
- inline int local_count();
-
- // The index of the element that is at the processor's frame pointer
- // (the ebp register). The parameters, receiver, and return address
- // are below the frame pointer.
- int frame_pointer() { return parameter_count() + 2; }
-
- // The index of the first parameter. The receiver lies below the first
- // parameter.
- int param0_index() { return 1; }
-
- // The index of the context slot in the frame. It is immediately
- // above the frame pointer.
- int context_index() { return frame_pointer() + 1; }
-
- // The index of the function slot in the frame. It is above the frame
- // pointer and the context slot.
- int function_index() { return frame_pointer() + 2; }
-
- // The index of the first local. Between the frame pointer and the
- // locals lie the context and the function.
- int local0_index() { return frame_pointer() + 3; }
-
- // The index of the base of the expression stack.
- int expression_base_index() { return local0_index() + local_count(); }
-
- // Convert a frame index into a frame pointer relative offset into the
- // actual stack.
- int fp_relative(int index) {
- ASSERT(index < element_count());
- ASSERT(frame_pointer() < element_count()); // FP is on the frame.
- return (frame_pointer() - index) * kPointerSize;
- }
-
- // Record an occurrence of a register in the virtual frame. This has the
- // effect of incrementing the register's external reference count and
- // of updating the index of the register's location in the frame.
- void Use(Register reg, int index) {
- ASSERT(!is_used(reg));
- set_register_location(reg, index);
- cgen()->allocator()->Use(reg);
- }
-
- // Record that a register reference has been dropped from the frame. This
- // decrements the register's external reference count and invalidates the
- // index of the register's location in the frame.
- void Unuse(Register reg) {
- ASSERT(is_used(reg));
- set_register_location(reg, kIllegalIndex);
- cgen()->allocator()->Unuse(reg);
- }
-
- // Spill the element at a particular index---write it to memory if
- // necessary, free any associated register, and forget its value if
- // constant.
- void SpillElementAt(int index);
-
- // Sync the element at a particular index. If it is a register or
- // constant that disagrees with the value on the stack, write it to memory.
- // Keep the element type as register or constant, and clear the dirty bit.
- void SyncElementAt(int index);
-
- // Sync a single unsynced element that lies beneath or at the stack pointer.
- void SyncElementBelowStackPointer(int index);
-
- // Sync a single unsynced element that lies just above the stack pointer.
- void SyncElementByPushing(int index);
-
- // Push a copy of a frame slot (typically a local or parameter) on top of
- // the frame.
- inline void PushFrameSlotAt(int index);
-
- // Push a the value of a frame slot (typically a local or parameter) on
- // top of the frame and invalidate the slot.
- void TakeFrameSlotAt(int index);
-
- // Store the value on top of the frame to a frame slot (typically a local
- // or parameter).
- void StoreToFrameSlotAt(int index);
-
- // Spill all elements in registers. Spill the top spilled_args elements
- // on the frame. Sync all other frame elements.
- // Then drop dropped_args elements from the virtual frame, to match
- // the effect of an upcoming call that will drop them from the stack.
- void PrepareForCall(int spilled_args, int dropped_args);
-
- // Move frame elements currently in registers or constants, that
- // should be in memory in the expected frame, to memory.
- void MergeMoveRegistersToMemory(VirtualFrame* expected);
-
- // Make the register-to-register moves necessary to
- // merge this frame with the expected frame.
- // Register to memory moves must already have been made,
- // and memory to register moves must follow this call.
- // This is because some new memory-to-register moves are
- // created in order to break cycles of register moves.
- // Used in the implementation of MergeTo().
- void MergeMoveRegistersToRegisters(VirtualFrame* expected);
-
- // Make the memory-to-register and constant-to-register moves
- // needed to make this frame equal the expected frame.
- // Called after all register-to-memory and register-to-register
- // moves have been made. After this function returns, the frames
- // should be equal.
- void MergeMoveMemoryToRegisters(VirtualFrame* expected);
-
- // Invalidates a frame slot (puts an invalid frame element in it).
- // Copies on the frame are correctly handled, and if this slot was
- // the backing store of copies, the index of the new backing store
- // is returned. Otherwise, returns kIllegalIndex.
- // Register counts are correctly updated.
- int InvalidateFrameSlotAt(int index);
-
- // This function assumes that a and b are the only results that could be in
- // the registers a_reg or b_reg. Other results can be live, but must not
- // be in the registers a_reg or b_reg. The results a and b are invalidated.
- void MoveResultsToRegisters(Result* a,
- Result* b,
- Register a_reg,
- Register b_reg);
-
- // Call a code stub that has already been prepared for calling (via
- // PrepareForCall).
- Result RawCallStub(CodeStub* stub);
-
- // Calls a code object which has already been prepared for calling
- // (via PrepareForCall).
- Result RawCallCodeObject(Handle<Code> code, RelocInfo::Mode rmode);
-
- inline bool Equals(VirtualFrame* other);
-
- // Classes that need raw access to the elements_ array.
- friend class FrameRegisterState;
- friend class JumpTarget;
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_X64_VIRTUAL_FRAME_X64_H_
diff --git a/src/3rdparty/v8/src/zone-inl.h b/src/3rdparty/v8/src/zone-inl.h
deleted file mode 100644
index 17e83dc..0000000
--- a/src/3rdparty/v8/src/zone-inl.h
+++ /dev/null
@@ -1,129 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_ZONE_INL_H_
-#define V8_ZONE_INL_H_
-
-#include "isolate.h"
-#include "zone.h"
-#include "v8-counters.h"
-
-namespace v8 {
-namespace internal {
-
-
-AssertNoZoneAllocation::AssertNoZoneAllocation()
- : prev_(Isolate::Current()->zone_allow_allocation()) {
- Isolate::Current()->set_zone_allow_allocation(false);
-}
-
-
-AssertNoZoneAllocation::~AssertNoZoneAllocation() {
- Isolate::Current()->set_zone_allow_allocation(prev_);
-}
-
-
-inline void* Zone::New(int size) {
- ASSERT(Isolate::Current()->zone_allow_allocation());
- ASSERT(ZoneScope::nesting() > 0);
- // Round up the requested size to fit the alignment.
- size = RoundUp(size, kAlignment);
-
- // Check if the requested size is available without expanding.
- Address result = position_;
- if ((position_ += size) > limit_) result = NewExpand(size);
-
- // Check that the result has the proper alignment and return it.
- ASSERT(IsAddressAligned(result, kAlignment, 0));
- allocation_size_ += size;
- return reinterpret_cast<void*>(result);
-}
-
-
-template <typename T>
-T* Zone::NewArray(int length) {
- return static_cast<T*>(New(length * sizeof(T)));
-}
-
-
-bool Zone::excess_allocation() {
- return segment_bytes_allocated_ > zone_excess_limit_;
-}
-
-
-void Zone::adjust_segment_bytes_allocated(int delta) {
- segment_bytes_allocated_ += delta;
- isolate_->counters()->zone_segment_bytes()->Set(segment_bytes_allocated_);
-}
-
-
-template <typename Config>
-ZoneSplayTree<Config>::~ZoneSplayTree() {
- // Reset the root to avoid unneeded iteration over all tree nodes
- // in the destructor. For a zone-allocated tree, nodes will be
- // freed by the Zone.
- SplayTree<Config, ZoneListAllocationPolicy>::ResetRoot();
-}
-
-
-// TODO(isolates): for performance reasons, this should be replaced with a new
-// operator that takes the zone in which the object should be
-// allocated.
-void* ZoneObject::operator new(size_t size) {
- return ZONE->New(static_cast<int>(size));
-}
-
-void* ZoneObject::operator new(size_t size, Zone* zone) {
- return zone->New(static_cast<int>(size));
-}
-
-
-inline void* ZoneListAllocationPolicy::New(int size) {
- return ZONE->New(size);
-}
-
-
-ZoneScope::ZoneScope(ZoneScopeMode mode)
- : isolate_(Isolate::Current()),
- mode_(mode) {
- isolate_->zone()->scope_nesting_++;
-}
-
-
-bool ZoneScope::ShouldDeleteOnExit() {
- return isolate_->zone()->scope_nesting_ == 1 && mode_ == DELETE_ON_EXIT;
-}
-
-
-int ZoneScope::nesting() {
- return Isolate::Current()->zone()->scope_nesting_;
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_ZONE_INL_H_
diff --git a/src/3rdparty/v8/src/zone.cc b/src/3rdparty/v8/src/zone.cc
deleted file mode 100644
index 42ce8c5..0000000
--- a/src/3rdparty/v8/src/zone.cc
+++ /dev/null
@@ -1,196 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "zone-inl.h"
-#include "splay-tree-inl.h"
-
-namespace v8 {
-namespace internal {
-
-
-Zone::Zone()
- : zone_excess_limit_(256 * MB),
- segment_bytes_allocated_(0),
- position_(0),
- limit_(0),
- scope_nesting_(0),
- segment_head_(NULL) {
-}
-unsigned Zone::allocation_size_ = 0;
-
-
-ZoneScope::~ZoneScope() {
- ASSERT_EQ(Isolate::Current(), isolate_);
- if (ShouldDeleteOnExit()) isolate_->zone()->DeleteAll();
- isolate_->zone()->scope_nesting_--;
-}
-
-
-// Segments represent chunks of memory: They have starting address
-// (encoded in the this pointer) and a size in bytes. Segments are
-// chained together forming a LIFO structure with the newest segment
-// available as segment_head_. Segments are allocated using malloc()
-// and de-allocated using free().
-
-class Segment {
- public:
- Segment* next() const { return next_; }
- void clear_next() { next_ = NULL; }
-
- int size() const { return size_; }
- int capacity() const { return size_ - sizeof(Segment); }
-
- Address start() const { return address(sizeof(Segment)); }
- Address end() const { return address(size_); }
-
- private:
- // Computes the address of the nth byte in this segment.
- Address address(int n) const {
- return Address(this) + n;
- }
-
- Segment* next_;
- int size_;
-
- friend class Zone;
-};
-
-
-// Creates a new segment, sets it size, and pushes it to the front
-// of the segment chain. Returns the new segment.
-Segment* Zone::NewSegment(int size) {
- Segment* result = reinterpret_cast<Segment*>(Malloced::New(size));
- adjust_segment_bytes_allocated(size);
- if (result != NULL) {
- result->next_ = segment_head_;
- result->size_ = size;
- segment_head_ = result;
- }
- return result;
-}
-
-
-// Deletes the given segment. Does not touch the segment chain.
-void Zone::DeleteSegment(Segment* segment, int size) {
- adjust_segment_bytes_allocated(-size);
- Malloced::Delete(segment);
-}
-
-
-void Zone::DeleteAll() {
-#ifdef DEBUG
- // Constant byte value used for zapping dead memory in debug mode.
- static const unsigned char kZapDeadByte = 0xcd;
-#endif
-
- // Find a segment with a suitable size to keep around.
- Segment* keep = segment_head_;
- while (keep != NULL && keep->size() > kMaximumKeptSegmentSize) {
- keep = keep->next();
- }
-
- // Traverse the chained list of segments, zapping (in debug mode)
- // and freeing every segment except the one we wish to keep.
- Segment* current = segment_head_;
- while (current != NULL) {
- Segment* next = current->next();
- if (current == keep) {
- // Unlink the segment we wish to keep from the list.
- current->clear_next();
- } else {
- int size = current->size();
-#ifdef DEBUG
- // Zap the entire current segment (including the header).
- memset(current, kZapDeadByte, size);
-#endif
- DeleteSegment(current, size);
- }
- current = next;
- }
-
- // If we have found a segment we want to keep, we must recompute the
- // variables 'position' and 'limit' to prepare for future allocate
- // attempts. Otherwise, we must clear the position and limit to
- // force a new segment to be allocated on demand.
- if (keep != NULL) {
- Address start = keep->start();
- position_ = RoundUp(start, kAlignment);
- limit_ = keep->end();
-#ifdef DEBUG
- // Zap the contents of the kept segment (but not the header).
- memset(start, kZapDeadByte, keep->capacity());
-#endif
- } else {
- position_ = limit_ = 0;
- }
-
- // Update the head segment to be the kept segment (if any).
- segment_head_ = keep;
-}
-
-
-Address Zone::NewExpand(int size) {
- // Make sure the requested size is already properly aligned and that
- // there isn't enough room in the Zone to satisfy the request.
- ASSERT(size == RoundDown(size, kAlignment));
- ASSERT(position_ + size > limit_);
-
- // Compute the new segment size. We use a 'high water mark'
- // strategy, where we increase the segment size every time we expand
- // except that we employ a maximum segment size when we delete. This
- // is to avoid excessive malloc() and free() overhead.
- Segment* head = segment_head_;
- int old_size = (head == NULL) ? 0 : head->size();
- static const int kSegmentOverhead = sizeof(Segment) + kAlignment;
- int new_size = kSegmentOverhead + size + (old_size << 1);
- if (new_size < kMinimumSegmentSize) {
- new_size = kMinimumSegmentSize;
- } else if (new_size > kMaximumSegmentSize) {
- // Limit the size of new segments to avoid growing the segment size
- // exponentially, thus putting pressure on contiguous virtual address space.
- // All the while making sure to allocate a segment large enough to hold the
- // requested size.
- new_size = Max(kSegmentOverhead + size, kMaximumSegmentSize);
- }
- Segment* segment = NewSegment(new_size);
- if (segment == NULL) {
- V8::FatalProcessOutOfMemory("Zone");
- return NULL;
- }
-
- // Recompute 'top' and 'limit' based on the new segment.
- Address result = RoundUp(segment->start(), kAlignment);
- position_ = result + size;
- limit_ = segment->end();
- ASSERT(position_ <= limit_);
- return result;
-}
-
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/zone.h b/src/3rdparty/v8/src/zone.h
deleted file mode 100644
index 9efe4f5..0000000
--- a/src/3rdparty/v8/src/zone.h
+++ /dev/null
@@ -1,236 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_ZONE_H_
-#define V8_ZONE_H_
-
-namespace v8 {
-namespace internal {
-
-
-// Zone scopes are in one of two modes. Either they delete the zone
-// on exit or they do not.
-enum ZoneScopeMode {
- DELETE_ON_EXIT,
- DONT_DELETE_ON_EXIT
-};
-
-class Segment;
-
-// The Zone supports very fast allocation of small chunks of
-// memory. The chunks cannot be deallocated individually, but instead
-// the Zone supports deallocating all chunks in one fast
-// operation. The Zone is used to hold temporary data structures like
-// the abstract syntax tree, which is deallocated after compilation.
-
-// Note: There is no need to initialize the Zone; the first time an
-// allocation is attempted, a segment of memory will be requested
-// through a call to malloc().
-
-// Note: The implementation is inherently not thread safe. Do not use
-// from multi-threaded code.
-
-class Zone {
- public:
- // Allocate 'size' bytes of memory in the Zone; expands the Zone by
- // allocating new segments of memory on demand using malloc().
- inline void* New(int size);
-
- template <typename T>
- inline T* NewArray(int length);
-
- // Delete all objects and free all memory allocated in the Zone.
- void DeleteAll();
-
- // Returns true if more memory has been allocated in zones than
- // the limit allows.
- inline bool excess_allocation();
-
- inline void adjust_segment_bytes_allocated(int delta);
-
- static unsigned allocation_size_;
-
- private:
- friend class Isolate;
- friend class ZoneScope;
-
- // All pointers returned from New() have this alignment.
- static const int kAlignment = kPointerSize;
-
- // Never allocate segments smaller than this size in bytes.
- static const int kMinimumSegmentSize = 8 * KB;
-
- // Never allocate segments larger than this size in bytes.
- static const int kMaximumSegmentSize = 1 * MB;
-
- // Never keep segments larger than this size in bytes around.
- static const int kMaximumKeptSegmentSize = 64 * KB;
-
- // Report zone excess when allocation exceeds this limit.
- int zone_excess_limit_;
-
- // The number of bytes allocated in segments. Note that this number
- // includes memory allocated from the OS but not yet allocated from
- // the zone.
- int segment_bytes_allocated_;
-
- // Each isolate gets its own zone.
- Zone();
-
- // Expand the Zone to hold at least 'size' more bytes and allocate
- // the bytes. Returns the address of the newly allocated chunk of
- // memory in the Zone. Should only be called if there isn't enough
- // room in the Zone already.
- Address NewExpand(int size);
-
- // Creates a new segment, sets it size, and pushes it to the front
- // of the segment chain. Returns the new segment.
- Segment* NewSegment(int size);
-
- // Deletes the given segment. Does not touch the segment chain.
- void DeleteSegment(Segment* segment, int size);
-
- // The free region in the current (front) segment is represented as
- // the half-open interval [position, limit). The 'position' variable
- // is guaranteed to be aligned as dictated by kAlignment.
- Address position_;
- Address limit_;
-
- int scope_nesting_;
-
- Segment* segment_head_;
- Isolate* isolate_;
-};
-
-
-// ZoneObject is an abstraction that helps define classes of objects
-// allocated in the Zone. Use it as a base class; see ast.h.
-class ZoneObject {
- public:
- // Allocate a new ZoneObject of 'size' bytes in the Zone.
- inline void* operator new(size_t size);
- inline void* operator new(size_t size, Zone* zone);
-
- // Ideally, the delete operator should be private instead of
- // public, but unfortunately the compiler sometimes synthesizes
- // (unused) destructors for classes derived from ZoneObject, which
- // require the operator to be visible. MSVC requires the delete
- // operator to be public.
-
- // ZoneObjects should never be deleted individually; use
- // Zone::DeleteAll() to delete all zone objects in one go.
- void operator delete(void*, size_t) { UNREACHABLE(); }
-};
-
-
-class AssertNoZoneAllocation {
- public:
- inline AssertNoZoneAllocation();
- inline ~AssertNoZoneAllocation();
- private:
- bool prev_;
-};
-
-
-// The ZoneListAllocationPolicy is used to specialize the GenericList
-// implementation to allocate ZoneLists and their elements in the
-// Zone.
-class ZoneListAllocationPolicy {
- public:
- // Allocate 'size' bytes of memory in the zone.
- static inline void* New(int size);
-
- // De-allocation attempts are silently ignored.
- static void Delete(void* p) { }
-};
-
-
-// ZoneLists are growable lists with constant-time access to the
-// elements. The list itself and all its elements are allocated in the
-// Zone. ZoneLists cannot be deleted individually; you can delete all
-// objects in the Zone by calling Zone::DeleteAll().
-template<typename T>
-class ZoneList: public List<T, ZoneListAllocationPolicy> {
- public:
- // Construct a new ZoneList with the given capacity; the length is
- // always zero. The capacity must be non-negative.
- explicit ZoneList(int capacity)
- : List<T, ZoneListAllocationPolicy>(capacity) { }
-
- // Construct a new ZoneList by copying the elements of the given ZoneList.
- explicit ZoneList(const ZoneList<T>& other)
- : List<T, ZoneListAllocationPolicy>(other.length()) {
- AddAll(other);
- }
-};
-
-
-// Introduce a convenience type for zone lists of map handles.
-typedef ZoneList<Handle<Map> > ZoneMapList;
-
-
-// ZoneScopes keep track of the current parsing and compilation
-// nesting and cleans up generated ASTs in the Zone when exiting the
-// outer-most scope.
-class ZoneScope BASE_EMBEDDED {
- public:
- // TODO(isolates): pass isolate pointer here.
- inline explicit ZoneScope(ZoneScopeMode mode);
-
- virtual ~ZoneScope();
-
- inline bool ShouldDeleteOnExit();
-
- // For ZoneScopes that do not delete on exit by default, call this
- // method to request deletion on exit.
- void DeleteOnExit() {
- mode_ = DELETE_ON_EXIT;
- }
-
- inline static int nesting();
-
- private:
- Isolate* isolate_;
- ZoneScopeMode mode_;
-};
-
-
-// A zone splay tree. The config type parameter encapsulates the
-// different configurations of a concrete splay tree (see splay-tree.h).
-// The tree itself and all its elements are allocated in the Zone.
-template <typename Config>
-class ZoneSplayTree: public SplayTree<Config, ZoneListAllocationPolicy> {
- public:
- ZoneSplayTree()
- : SplayTree<Config, ZoneListAllocationPolicy>() {}
- ~ZoneSplayTree();
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_ZONE_H_
diff --git a/src/3rdparty/v8/tools/codemap.js b/src/3rdparty/v8/tools/codemap.js
deleted file mode 100644
index 71a99cc..0000000
--- a/src/3rdparty/v8/tools/codemap.js
+++ /dev/null
@@ -1,265 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-/**
- * Constructs a mapper that maps addresses into code entries.
- *
- * @constructor
- */
-function CodeMap() {
- /**
- * Dynamic code entries. Used for JIT compiled code.
- */
- this.dynamics_ = new SplayTree();
-
- /**
- * Name generator for entries having duplicate names.
- */
- this.dynamicsNameGen_ = new CodeMap.NameGenerator();
-
- /**
- * Static code entries. Used for statically compiled code.
- */
- this.statics_ = new SplayTree();
-
- /**
- * Libraries entries. Used for the whole static code libraries.
- */
- this.libraries_ = new SplayTree();
-
- /**
- * Map of memory pages occupied with static code.
- */
- this.pages_ = [];
-};
-
-
-/**
- * The number of alignment bits in a page address.
- */
-CodeMap.PAGE_ALIGNMENT = 12;
-
-
-/**
- * Page size in bytes.
- */
-CodeMap.PAGE_SIZE =
- 1 << CodeMap.PAGE_ALIGNMENT;
-
-
-/**
- * Adds a dynamic (i.e. moveable and discardable) code entry.
- *
- * @param {number} start The starting address.
- * @param {CodeMap.CodeEntry} codeEntry Code entry object.
- */
-CodeMap.prototype.addCode = function(start, codeEntry) {
- this.dynamics_.insert(start, codeEntry);
-};
-
-
-/**
- * Moves a dynamic code entry. Throws an exception if there is no dynamic
- * code entry with the specified starting address.
- *
- * @param {number} from The starting address of the entry being moved.
- * @param {number} to The destination address.
- */
-CodeMap.prototype.moveCode = function(from, to) {
- var removedNode = this.dynamics_.remove(from);
- this.dynamics_.insert(to, removedNode.value);
-};
-
-
-/**
- * Discards a dynamic code entry. Throws an exception if there is no dynamic
- * code entry with the specified starting address.
- *
- * @param {number} start The starting address of the entry being deleted.
- */
-CodeMap.prototype.deleteCode = function(start) {
- var removedNode = this.dynamics_.remove(start);
-};
-
-
-/**
- * Adds a library entry.
- *
- * @param {number} start The starting address.
- * @param {CodeMap.CodeEntry} codeEntry Code entry object.
- */
-CodeMap.prototype.addLibrary = function(
- start, codeEntry) {
- this.markPages_(start, start + codeEntry.size);
- this.libraries_.insert(start, codeEntry);
-};
-
-
-/**
- * Adds a static code entry.
- *
- * @param {number} start The starting address.
- * @param {CodeMap.CodeEntry} codeEntry Code entry object.
- */
-CodeMap.prototype.addStaticCode = function(
- start, codeEntry) {
- this.statics_.insert(start, codeEntry);
-};
-
-
-/**
- * @private
- */
-CodeMap.prototype.markPages_ = function(start, end) {
- for (var addr = start; addr <= end;
- addr += CodeMap.PAGE_SIZE) {
- this.pages_[addr >>> CodeMap.PAGE_ALIGNMENT] = 1;
- }
-};
-
-
-/**
- * @private
- */
-CodeMap.prototype.isAddressBelongsTo_ = function(addr, node) {
- return addr >= node.key && addr < (node.key + node.value.size);
-};
-
-
-/**
- * @private
- */
-CodeMap.prototype.findInTree_ = function(tree, addr) {
- var node = tree.findGreatestLessThan(addr);
- return node && this.isAddressBelongsTo_(addr, node) ? node.value : null;
-};
-
-
-/**
- * Finds a code entry that contains the specified address. Both static and
- * dynamic code entries are considered.
- *
- * @param {number} addr Address.
- */
-CodeMap.prototype.findEntry = function(addr) {
- var pageAddr = addr >>> CodeMap.PAGE_ALIGNMENT;
- if (pageAddr in this.pages_) {
- // Static code entries can contain "holes" of unnamed code.
- // In this case, the whole library is assigned to this address.
- return this.findInTree_(this.statics_, addr) ||
- this.findInTree_(this.libraries_, addr);
- }
- var min = this.dynamics_.findMin();
- var max = this.dynamics_.findMax();
- if (max != null && addr < (max.key + max.value.size) && addr >= min.key) {
- var dynaEntry = this.findInTree_(this.dynamics_, addr);
- if (dynaEntry == null) return null;
- // Dedupe entry name.
- if (!dynaEntry.nameUpdated_) {
- dynaEntry.name = this.dynamicsNameGen_.getName(dynaEntry.name);
- dynaEntry.nameUpdated_ = true;
- }
- return dynaEntry;
- }
- return null;
-};
-
-
-/**
- * Returns a dynamic code entry using its starting address.
- *
- * @param {number} addr Address.
- */
-CodeMap.prototype.findDynamicEntryByStartAddress =
- function(addr) {
- var node = this.dynamics_.find(addr);
- return node ? node.value : null;
-};
-
-
-/**
- * Returns an array of all dynamic code entries.
- */
-CodeMap.prototype.getAllDynamicEntries = function() {
- return this.dynamics_.exportValues();
-};
-
-
-/**
- * Returns an array of all static code entries.
- */
-CodeMap.prototype.getAllStaticEntries = function() {
- return this.statics_.exportValues();
-};
-
-
-/**
- * Returns an array of all libraries entries.
- */
-CodeMap.prototype.getAllLibrariesEntries = function() {
- return this.libraries_.exportValues();
-};
-
-
-/**
- * Creates a code entry object.
- *
- * @param {number} size Code entry size in bytes.
- * @param {string} opt_name Code entry name.
- * @constructor
- */
-CodeMap.CodeEntry = function(size, opt_name) {
- this.size = size;
- this.name = opt_name || '';
- this.nameUpdated_ = false;
-};
-
-
-CodeMap.CodeEntry.prototype.getName = function() {
- return this.name;
-};
-
-
-CodeMap.CodeEntry.prototype.toString = function() {
- return this.name + ': ' + this.size.toString(16);
-};
-
-
-CodeMap.NameGenerator = function() {
- this.knownNames_ = {};
-};
-
-
-CodeMap.NameGenerator.prototype.getName = function(name) {
- if (!(name in this.knownNames_)) {
- this.knownNames_[name] = 0;
- return name;
- }
- var count = ++this.knownNames_[name];
- return name + ' {' + count + '}';
-};
diff --git a/src/3rdparty/v8/tools/consarray.js b/src/3rdparty/v8/tools/consarray.js
deleted file mode 100644
index c67abb7..0000000
--- a/src/3rdparty/v8/tools/consarray.js
+++ /dev/null
@@ -1,93 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-/**
- * Constructs a ConsArray object. It is used mainly for tree traversal.
- * In this use case we have lots of arrays that we need to iterate
- * sequentally. The internal Array implementation is horribly slow
- * when concatenating on large (10K items) arrays due to memory copying.
- * That's why we avoid copying memory and insead build a linked list
- * of arrays to iterate through.
- *
- * @constructor
- */
-function ConsArray() {
- this.tail_ = new ConsArray.Cell(null, null);
- this.currCell_ = this.tail_;
- this.currCellPos_ = 0;
-};
-
-
-/**
- * Concatenates another array for iterating. Empty arrays are ignored.
- * This operation can be safely performed during ongoing ConsArray
- * iteration.
- *
- * @param {Array} arr Array to concatenate.
- */
-ConsArray.prototype.concat = function(arr) {
- if (arr.length > 0) {
- this.tail_.data = arr;
- this.tail_ = this.tail_.next = new ConsArray.Cell(null, null);
- }
-};
-
-
-/**
- * Whether the end of iteration is reached.
- */
-ConsArray.prototype.atEnd = function() {
- return this.currCell_ === null ||
- this.currCell_.data === null ||
- this.currCellPos_ >= this.currCell_.data.length;
-};
-
-
-/**
- * Returns the current item, moves to the next one.
- */
-ConsArray.prototype.next = function() {
- var result = this.currCell_.data[this.currCellPos_++];
- if (this.currCellPos_ >= this.currCell_.data.length) {
- this.currCell_ = this.currCell_.next;
- this.currCellPos_ = 0;
- }
- return result;
-};
-
-
-/**
- * A cell object used for constructing a list in ConsArray.
- *
- * @constructor
- */
-ConsArray.Cell = function(data, next) {
- this.data = data;
- this.next = next;
-};
-
diff --git a/src/3rdparty/v8/tools/csvparser.js b/src/3rdparty/v8/tools/csvparser.js
deleted file mode 100644
index c7d46b5..0000000
--- a/src/3rdparty/v8/tools/csvparser.js
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-/**
- * Creates a CSV lines parser.
- */
-function CsvParser() {
-};
-
-
-/**
- * A regex for matching a CSV field.
- * @private
- */
-CsvParser.CSV_FIELD_RE_ = /^"((?:[^"]|"")*)"|([^,]*)/;
-
-
-/**
- * A regex for matching a double quote.
- * @private
- */
-CsvParser.DOUBLE_QUOTE_RE_ = /""/g;
-
-
-/**
- * Parses a line of CSV-encoded values. Returns an array of fields.
- *
- * @param {string} line Input line.
- */
-CsvParser.prototype.parseLine = function(line) {
- var fieldRe = CsvParser.CSV_FIELD_RE_;
- var doubleQuoteRe = CsvParser.DOUBLE_QUOTE_RE_;
- var pos = 0;
- var endPos = line.length;
- var fields = [];
- if (endPos > 0) {
- do {
- var fieldMatch = fieldRe.exec(line.substr(pos));
- if (typeof fieldMatch[1] === "string") {
- var field = fieldMatch[1];
- pos += field.length + 3; // Skip comma and quotes.
- fields.push(field.replace(doubleQuoteRe, '"'));
- } else {
- // The second field pattern will match anything, thus
- // in the worst case the match will be an empty string.
- var field = fieldMatch[2];
- pos += field.length + 1; // Skip comma.
- fields.push(field);
- }
- } while (pos <= endPos);
- }
- return fields;
-};
diff --git a/src/3rdparty/v8/tools/disasm.py b/src/3rdparty/v8/tools/disasm.py
deleted file mode 100644
index c326382..0000000
--- a/src/3rdparty/v8/tools/disasm.py
+++ /dev/null
@@ -1,92 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2011 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-import os
-import re
-import subprocess
-import tempfile
-
-
-# Avoid using the slow (google-specific) wrapper around objdump.
-OBJDUMP_BIN = "/usr/bin/objdump"
-if not os.path.exists(OBJDUMP_BIN):
- OBJDUMP_BIN = "objdump"
-
-
-_COMMON_DISASM_OPTIONS = ["-M", "intel-mnemonic", "-C"]
-
-_DISASM_HEADER_RE = re.compile(r"[a-f0-9]+\s+<.*:$")
-_DISASM_LINE_RE = re.compile(r"\s*([a-f0-9]+):\s*(\S.*)")
-
-# Keys must match constants in Logger::LogCodeInfo.
-_ARCH_MAP = {
- "ia32": "-m i386",
- "x64": "-m i386 -M x86-64",
- "arm": "-m arm" # Not supported by our objdump build.
-}
-
-
-def GetDisasmLines(filename, offset, size, arch, inplace):
- tmp_name = None
- if not inplace:
- # Create a temporary file containing a copy of the code.
- assert arch in _ARCH_MAP, "Unsupported architecture '%s'" % arch
- arch_flags = _ARCH_MAP[arch]
- tmp_name = tempfile.mktemp(".v8code")
- command = "dd if=%s of=%s bs=1 count=%d skip=%d && " \
- "%s %s -D -b binary %s %s" % (
- filename, tmp_name, size, offset,
- OBJDUMP_BIN, ' '.join(_COMMON_DISASM_OPTIONS), arch_flags,
- tmp_name)
- else:
- command = "%s %s --start-address=%d --stop-address=%d -d %s " % (
- OBJDUMP_BIN, ' '.join(_COMMON_DISASM_OPTIONS),
- offset,
- offset + size,
- filename)
- process = subprocess.Popen(command,
- shell=True,
- stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT)
- out, err = process.communicate()
- lines = out.split("\n")
- header_line = 0
- for i, line in enumerate(lines):
- if _DISASM_HEADER_RE.match(line):
- header_line = i
- break
- if tmp_name:
- os.unlink(tmp_name)
- split_lines = []
- for line in lines[header_line + 1:]:
- match = _DISASM_LINE_RE.match(line)
- if match:
- line_address = int(match.group(1), 16)
- split_lines.append((line_address, match.group(2)))
- return split_lines
diff --git a/src/3rdparty/v8/tools/freebsd-tick-processor b/src/3rdparty/v8/tools/freebsd-tick-processor
deleted file mode 100755
index 2bb2618..0000000
--- a/src/3rdparty/v8/tools/freebsd-tick-processor
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/bin/sh
-
-# A wrapper script to call 'linux-tick-processor'.
-
-# Known issues on FreeBSD:
-# No ticks from C++ code.
-# You must have d8 built and in your path before calling this.
-
-tools_path=`cd $(dirname "$0");pwd`
-$tools_path/linux-tick-processor "$@"
diff --git a/src/3rdparty/v8/tools/gc-nvp-trace-processor.py b/src/3rdparty/v8/tools/gc-nvp-trace-processor.py
deleted file mode 100755
index 2c173ab..0000000
--- a/src/3rdparty/v8/tools/gc-nvp-trace-processor.py
+++ /dev/null
@@ -1,328 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2010 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#
-
-#
-# This is an utility for plotting charts based on GC traces produced by V8 when
-# run with flags --trace-gc --trace-gc-nvp. Relies on gnuplot for actual
-# plotting.
-#
-# Usage: gc-nvp-trace-processor.py <GC-trace-filename>
-#
-
-
-from __future__ import with_statement
-import sys, types, re, subprocess, math
-
-def flatten(l):
- flat = []
- for i in l: flat.extend(i)
- return flat
-
-def split_nvp(s):
- t = {}
- for (name, value) in re.findall(r"(\w+)=([-\w]+)", s):
- try:
- t[name] = int(value)
- except ValueError:
- t[name] = value
-
- return t
-
-def parse_gc_trace(input):
- trace = []
- with open(input) as f:
- for line in f:
- info = split_nvp(line)
- if info and 'pause' in info and info['pause'] > 0:
- info['i'] = len(trace)
- trace.append(info)
- return trace
-
-def extract_field_names(script):
- fields = { 'data': true, 'in': true }
-
- for m in re.finditer(r"$(\w+)", script):
- field_name = m.group(1)
- if field_name not in fields:
- fields[field] = field_count
- field_count = field_count + 1
-
- return fields
-
-def gnuplot(script):
- gnuplot = subprocess.Popen(["gnuplot"], stdin=subprocess.PIPE)
- gnuplot.stdin.write(script)
- gnuplot.stdin.close()
- gnuplot.wait()
-
-x1y1 = 'x1y1'
-x1y2 = 'x1y2'
-x2y1 = 'x2y1'
-x2y2 = 'x2y2'
-
-class Item(object):
- def __init__(self, title, field, axis = x1y1, **keywords):
- self.title = title
- self.axis = axis
- self.props = keywords
- if type(field) is types.ListType:
- self.field = field
- else:
- self.field = [field]
-
- def fieldrefs(self):
- return self.field
-
- def to_gnuplot(self, context):
- args = ['"%s"' % context.datafile,
- 'using %s' % context.format_fieldref(self.field),
- 'title "%s"' % self.title,
- 'axis %s' % self.axis]
- if 'style' in self.props:
- args.append('with %s' % self.props['style'])
- if 'lc' in self.props:
- args.append('lc rgb "%s"' % self.props['lc'])
- if 'fs' in self.props:
- args.append('fs %s' % self.props['fs'])
- return ' '.join(args)
-
-class Plot(object):
- def __init__(self, *items):
- self.items = items
-
- def fieldrefs(self):
- return flatten([item.fieldrefs() for item in self.items])
-
- def to_gnuplot(self, ctx):
- return 'plot ' + ', '.join([item.to_gnuplot(ctx) for item in self.items])
-
-class Set(object):
- def __init__(self, value):
- self.value = value
-
- def to_gnuplot(self, ctx):
- return 'set ' + self.value
-
- def fieldrefs(self):
- return []
-
-class Context(object):
- def __init__(self, datafile, field_to_index):
- self.datafile = datafile
- self.field_to_index = field_to_index
-
- def format_fieldref(self, fieldref):
- return ':'.join([str(self.field_to_index[field]) for field in fieldref])
-
-def collect_fields(plot):
- field_to_index = {}
- fields = []
-
- def add_field(field):
- if field not in field_to_index:
- fields.append(field)
- field_to_index[field] = len(fields)
-
- for field in flatten([item.fieldrefs() for item in plot]):
- add_field(field)
-
- return (fields, field_to_index)
-
-def is_y2_used(plot):
- for subplot in plot:
- if isinstance(subplot, Plot):
- for item in subplot.items:
- if item.axis == x1y2 or item.axis == x2y2:
- return True
- return False
-
-def get_field(trace_line, field):
- t = type(field)
- if t is types.StringType:
- return trace_line[field]
- elif t is types.FunctionType:
- return field(trace_line)
-
-def generate_datafile(datafile_name, trace, fields):
- with open(datafile_name, 'w') as datafile:
- for line in trace:
- data_line = [str(get_field(line, field)) for field in fields]
- datafile.write('\t'.join(data_line))
- datafile.write('\n')
-
-def generate_script_and_datafile(plot, trace, datafile, output):
- (fields, field_to_index) = collect_fields(plot)
- generate_datafile(datafile, trace, fields)
- script = [
- 'set terminal png',
- 'set output "%s"' % output,
- 'set autoscale',
- 'set ytics nomirror',
- 'set xtics nomirror',
- 'set key below'
- ]
-
- if is_y2_used(plot):
- script.append('set autoscale y2')
- script.append('set y2tics')
-
- context = Context(datafile, field_to_index)
-
- for item in plot:
- script.append(item.to_gnuplot(context))
-
- return '\n'.join(script)
-
-def plot_all(plots, trace, prefix):
- charts = []
-
- for plot in plots:
- outfilename = "%s_%d.png" % (prefix, len(charts))
- charts.append(outfilename)
- script = generate_script_and_datafile(plot, trace, '~datafile', outfilename)
- print 'Plotting %s...' % outfilename
- gnuplot(script)
-
- return charts
-
-def reclaimed_bytes(row):
- return row['total_size_before'] - row['total_size_after']
-
-def other_scope(r):
- return r['pause'] - r['mark'] - r['sweep'] - r['compact']
-
-plots = [
- [
- Set('style fill solid 0.5 noborder'),
- Set('style histogram rowstacked'),
- Set('style data histograms'),
- Plot(Item('Marking', 'mark', lc = 'purple'),
- Item('Sweep', 'sweep', lc = 'blue'),
- Item('Compaction', 'compact', lc = 'red'),
- Item('Other', other_scope, lc = 'grey'))
- ],
- [
- Set('style histogram rowstacked'),
- Set('style data histograms'),
- Plot(Item('Heap Size (before GC)', 'total_size_before', x1y2,
- fs = 'solid 0.4 noborder',
- lc = 'green'),
- Item('Total holes (after GC)', 'holes_size_before', x1y2,
- fs = 'solid 0.4 noborder',
- lc = 'red'),
- Item('GC Time', ['i', 'pause'], style = 'lines', lc = 'red'))
- ],
- [
- Set('style histogram rowstacked'),
- Set('style data histograms'),
- Plot(Item('Heap Size (after GC)', 'total_size_after', x1y2,
- fs = 'solid 0.4 noborder',
- lc = 'green'),
- Item('Total holes (after GC)', 'holes_size_after', x1y2,
- fs = 'solid 0.4 noborder',
- lc = 'red'),
- Item('GC Time', ['i', 'pause'],
- style = 'lines',
- lc = 'red'))
- ],
- [
- Set('style fill solid 0.5 noborder'),
- Set('style data histograms'),
- Plot(Item('Allocated', 'allocated'),
- Item('Reclaimed', reclaimed_bytes),
- Item('Promoted', 'promoted', style = 'lines', lc = 'black'))
- ],
-]
-
-def freduce(f, field, trace, init):
- return reduce(lambda t,r: f(t, r[field]), trace, init)
-
-def calc_total(trace, field):
- return freduce(lambda t,v: t + v, field, trace, 0)
-
-def calc_max(trace, field):
- return freduce(lambda t,r: max(t, r), field, trace, 0)
-
-def count_nonzero(trace, field):
- return freduce(lambda t,r: t if r == 0 else t + 1, field, trace, 0)
-
-
-def process_trace(filename):
- trace = parse_gc_trace(filename)
-
- marksweeps = filter(lambda r: r['gc'] == 'ms', trace)
- markcompacts = filter(lambda r: r['gc'] == 'mc', trace)
- scavenges = filter(lambda r: r['gc'] == 's', trace)
-
- charts = plot_all(plots, trace, filename)
-
- def stats(out, prefix, trace, field):
- n = len(trace)
- total = calc_total(trace, field)
- max = calc_max(trace, field)
- if n > 0:
- avg = total / n
- else:
- avg = 0
- if n > 1:
- dev = math.sqrt(freduce(lambda t,r: (r - avg) ** 2, field, trace, 0) /
- (n - 1))
- else:
- dev = 0
-
- out.write('<tr><td>%s</td><td>%d</td><td>%d</td>'
- '<td>%d</td><td>%d [dev %f]</td></tr>' %
- (prefix, n, total, max, avg, dev))
-
-
- with open(filename + '.html', 'w') as out:
- out.write('<html><body>')
- out.write('<table>')
- out.write('<tr><td>Phase</td><td>Count</td><td>Time (ms)</td>')
- out.write('<td>Max</td><td>Avg</td></tr>')
- stats(out, 'Total in GC', trace, 'pause')
- stats(out, 'Scavenge', scavenges, 'pause')
- stats(out, 'MarkSweep', marksweeps, 'pause')
- stats(out, 'MarkCompact', markcompacts, 'pause')
- stats(out, 'Mark', filter(lambda r: r['mark'] != 0, trace), 'mark')
- stats(out, 'Sweep', filter(lambda r: r['sweep'] != 0, trace), 'sweep')
- stats(out, 'Compact', filter(lambda r: r['compact'] != 0, trace), 'compact')
- out.write('</table>')
- for chart in charts:
- out.write('<img src="%s">' % chart)
- out.write('</body></html>')
-
- print "%s generated." % (filename + '.html')
-
-if len(sys.argv) != 2:
- print "Usage: %s <GC-trace-filename>" % sys.argv[0]
- sys.exit(1)
-
-process_trace(sys.argv[1])
diff --git a/src/3rdparty/v8/tools/generate-ten-powers.scm b/src/3rdparty/v8/tools/generate-ten-powers.scm
deleted file mode 100644
index eaeb7f4..0000000
--- a/src/3rdparty/v8/tools/generate-ten-powers.scm
+++ /dev/null
@@ -1,286 +0,0 @@
-;; Copyright 2010 the V8 project authors. All rights reserved.
-;; Redistribution and use in source and binary forms, with or without
-;; modification, are permitted provided that the following conditions are
-;; met:
-;;
-;; * Redistributions of source code must retain the above copyright
-;; notice, this list of conditions and the following disclaimer.
-;; * Redistributions in binary form must reproduce the above
-;; copyright notice, this list of conditions and the following
-;; disclaimer in the documentation and/or other materials provided
-;; with the distribution.
-;; * Neither the name of Google Inc. nor the names of its
-;; contributors may be used to endorse or promote products derived
-;; from this software without specific prior written permission.
-;;
-;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-;; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-;; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-;; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-;; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-;; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-;; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-;; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-;; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-;; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-;; This is a Scheme script for the Bigloo compiler. Bigloo must be compiled with
-;; support for bignums. The compilation of the script can be done as follows:
-;; bigloo -static-bigloo -o generate-ten-powers generate-ten-powers.scm
-;;
-;; Generate approximations of 10^k.
-
-(module gen-ten-powers
- (static (class Cached-Fast
- v::bignum
- e::bint
- exact?::bool))
- (main my-main))
-
-
-;;----------------bignum shifts -----------------------------------------------
-(define (bit-lshbx::bignum x::bignum by::bint)
- (if (<fx by 0)
- #z0
- (*bx x (exptbx #z2 (fixnum->bignum by)))))
-
-(define (bit-rshbx::bignum x::bignum by::bint)
- (if (<fx by 0)
- #z0
- (/bx x (exptbx #z2 (fixnum->bignum by)))))
-
-;;----------------the actual power generation -------------------------------
-
-;; e should be an indication. it might be too small.
-(define (round-n-cut n e nb-bits)
- (define max-container (- (bit-lshbx #z1 nb-bits) 1))
- (define (round n)
- (case *round*
- ((down) n)
- ((up)
- (+bx n
- ;; with the -1 it will only round up if the cut off part is
- ;; non-zero
- (-bx (bit-lshbx #z1
- (-fx (+fx e nb-bits) 1))
- #z1)))
- ((round)
- (+bx n
- (bit-lshbx #z1
- (-fx (+fx e nb-bits) 2))))))
- (let* ((shift (-fx (+fx e nb-bits) 1))
- (cut (bit-rshbx (round n) shift))
- (exact? (=bx n (bit-lshbx cut shift))))
- (if (<=bx cut max-container)
- (values cut e exact?)
- (round-n-cut n (+fx e 1) nb-bits))))
-
-(define (rounded-/bx x y)
- (case *round*
- ((down) (/bx x y))
- ((up) (+bx (/bx x y) #z1))
- ((round) (let ((tmp (/bx (*bx #z2 x) y)))
- (if (zerobx? (remainderbx tmp #z2))
- (/bx tmp #z2)
- (+bx (/bx tmp #z2) #z1))))))
-
-(define (generate-powers from to mantissa-size)
- (let* ((nb-bits mantissa-size)
- (offset (- from))
- (nb-elements (+ (- from) to 1))
- (vec (make-vector nb-elements))
- (max-container (- (bit-lshbx #z1 nb-bits) 1)))
- ;; the negative ones. 10^-1, 10^-2, etc.
- ;; We already know, that we can't be exact, so exact? will always be #f.
- ;; Basically we will have a ten^i that we will *10 at each iteration. We
- ;; want to create the matissa of 1/ten^i. However the mantissa must be
- ;; normalized (start with a 1). -> we have to shift the number.
- ;; We shift by multiplying with two^e. -> We encode two^e*(1/ten^i) ==
- ;; two^e/ten^i.
- (let loop ((i 1)
- (ten^i #z10)
- (two^e #z1)
- (e 0))
- (unless (< (- i) from)
- (if (>bx (/bx (*bx #z2 two^e) ten^i) max-container)
- ;; another shift would make the number too big. We are
- ;; hence normalized now.
- (begin
- (vector-set! vec (-fx offset i)
- (instantiate::Cached-Fast
- (v (rounded-/bx two^e ten^i))
- (e (negfx e))
- (exact? #f)))
- (loop (+fx i 1) (*bx ten^i #z10) two^e e))
- (loop i ten^i (bit-lshbx two^e 1) (+fx e 1)))))
- ;; the positive ones 10^0, 10^1, etc.
- ;; start with 1.0. mantissa: 10...0 (1 followed by nb-bits-1 bits)
- ;; -> e = -(nb-bits-1)
- ;; exact? is true when the container can still hold the complete 10^i
- (let loop ((i 0)
- (n (bit-lshbx #z1 (-fx nb-bits 1)))
- (e (-fx 1 nb-bits)))
- (when (<= i to)
- (receive (cut e exact?)
- (round-n-cut n e nb-bits)
- (vector-set! vec (+fx i offset)
- (instantiate::Cached-Fast
- (v cut)
- (e e)
- (exact? exact?)))
- (loop (+fx i 1) (*bx n #z10) e))))
- vec))
-
-(define (print-c powers from to struct-type
- cache-name max-distance-name offset-name macro64)
- (define (display-power power k)
- (with-access::Cached-Fast power (v e exact?)
- (let ((tmp-p (open-output-string)))
- ;; really hackish way of getting the digits
- (display (format "~x" v) tmp-p)
- (let ((str (close-output-port tmp-p)))
- (printf " {~a(0x~a, ~a), ~a, ~a},\n"
- macro64
- (substring str 0 8)
- (substring str 8 16)
- e
- k)))))
- (define (print-powers-reduced n)
- (print "static const " struct-type " " cache-name
- "(" n ")"
- "[] = {")
- (let loop ((i 0)
- (nb-elements 0)
- (last-e 0)
- (max-distance 0))
- (cond
- ((>= i (vector-length powers))
- (print " };")
- (print "static const int " max-distance-name "(" n ") = "
- max-distance ";")
- (print "// nb elements (" n "): " nb-elements))
- (else
- (let* ((power (vector-ref powers i))
- (e (Cached-Fast-e power)))
- (display-power power (+ i from))
- (loop (+ i n)
- (+ nb-elements 1)
- e
- (cond
- ((=fx i 0) max-distance)
- ((> (- e last-e) max-distance) (- e last-e))
- (else max-distance))))))))
- (print "// Copyright 2010 the V8 project authors. All rights reserved.")
- (print "// ------------ GENERATED FILE ----------------")
- (print "// command used:")
- (print "// "
- (apply string-append (map (lambda (str)
- (string-append " " str))
- *main-args*))
- " // NOLINT")
- (print)
- (print
- "// This file is intended to be included inside another .h or .cc files\n"
- "// with the following defines set:\n"
- "// GRISU_CACHE_STRUCT: should expand to the name of a struct that will\n"
- "// hold the cached powers of ten. Each entry will hold a 64-bit\n"
- "// significand, a 16-bit signed binary exponent, and a 16-bit\n"
- "// signed decimal exponent. Each entry will be constructed as follows:\n"
- "// { significand, binary_exponent, decimal_exponent }.\n"
- "// GRISU_CACHE_NAME(i): generates the name for the different caches.\n"
- "// The parameter i will be a number in the range 1-20. A cache will\n"
- "// hold every i'th element of a full cache. GRISU_CACHE_NAME(1) will\n"
- "// thus hold all elements. The higher i the fewer elements it has.\n"
- "// Ideally the user should only reference one cache and let the\n"
- "// compiler remove the unused ones.\n"
- "// GRISU_CACHE_MAX_DISTANCE(i): generates the name for the maximum\n"
- "// binary exponent distance between all elements of a given cache.\n"
- "// GRISU_CACHE_OFFSET: is used as variable name for the decimal\n"
- "// exponent offset. It is equal to -cache[0].decimal_exponent.\n"
- "// GRISU_UINT64_C: used to construct 64-bit values in a platform\n"
- "// independent way. In order to encode 0x123456789ABCDEF0 the macro\n"
- "// will be invoked as follows: GRISU_UINT64_C(0x12345678,9ABCDEF0).\n")
- (print)
- (print-powers-reduced 1)
- (print-powers-reduced 2)
- (print-powers-reduced 3)
- (print-powers-reduced 4)
- (print-powers-reduced 5)
- (print-powers-reduced 6)
- (print-powers-reduced 7)
- (print-powers-reduced 8)
- (print-powers-reduced 9)
- (print-powers-reduced 10)
- (print-powers-reduced 11)
- (print-powers-reduced 12)
- (print-powers-reduced 13)
- (print-powers-reduced 14)
- (print-powers-reduced 15)
- (print-powers-reduced 16)
- (print-powers-reduced 17)
- (print-powers-reduced 18)
- (print-powers-reduced 19)
- (print-powers-reduced 20)
- (print "static const int GRISU_CACHE_OFFSET = " (- from) ";"))
-
-;;----------------main --------------------------------------------------------
-(define *main-args* #f)
-(define *mantissa-size* #f)
-(define *dest* #f)
-(define *round* #f)
-(define *from* #f)
-(define *to* #f)
-
-(define (my-main args)
- (set! *main-args* args)
- (args-parse (cdr args)
- (section "Help")
- (("?") (args-parse-usage #f))
- ((("-h" "--help") (help "?, -h, --help" "This help message"))
- (args-parse-usage #f))
- (section "Misc")
- (("-o" ?file (help "The output file"))
- (set! *dest* file))
- (("--mantissa-size" ?size (help "Container-size in bits"))
- (set! *mantissa-size* (string->number size)))
- (("--round" ?direction (help "Round bignums (down, round or up)"))
- (set! *round* (string->symbol direction)))
- (("--from" ?from (help "start at 10^from"))
- (set! *from* (string->number from)))
- (("--to" ?to (help "go up to 10^to"))
- (set! *to* (string->number to)))
- (else
- (print "Illegal argument `" else "'. Usage:")
- (args-parse-usage #f)))
- (when (not *from*)
- (error "generate-ten-powers"
- "Missing from"
- #f))
- (when (not *to*)
- (error "generate-ten-powers"
- "Missing to"
- #f))
- (when (not *mantissa-size*)
- (error "generate-ten-powers"
- "Missing mantissa size"
- #f))
- (when (not (memv *round* '(up down round)))
- (error "generate-ten-powers"
- "Missing round-method"
- *round*))
-
- (let ((dividers (generate-powers *from* *to* *mantissa-size*))
- (p (if (not *dest*)
- (current-output-port)
- (open-output-file *dest*))))
- (unwind-protect
- (with-output-to-port p
- (lambda ()
- (print-c dividers *from* *to*
- "GRISU_CACHE_STRUCT" "GRISU_CACHE_NAME"
- "GRISU_CACHE_MAX_DISTANCE" "GRISU_CACHE_OFFSET"
- "GRISU_UINT64_C"
- )))
- (if *dest*
- (close-output-port p)))))
diff --git a/src/3rdparty/v8/tools/grokdump.py b/src/3rdparty/v8/tools/grokdump.py
deleted file mode 100755
index de681b2..0000000
--- a/src/3rdparty/v8/tools/grokdump.py
+++ /dev/null
@@ -1,840 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2011 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-import ctypes
-import mmap
-import optparse
-import os
-import disasm
-import sys
-import types
-import codecs
-import re
-
-
-USAGE="""usage: %prog [OPTION]...
-
-Minidump analyzer.
-
-Shows the processor state at the point of exception including the
-stack of the active thread and the referenced objects in the V8
-heap. Code objects are disassembled and the addresses linked from the
-stack (pushed return addresses) are marked with "=>".
-
-
-Examples:
- $ %prog 12345678-1234-1234-1234-123456789abcd-full.dmp
-"""
-
-DEBUG=False
-
-
-def DebugPrint(s):
- if not DEBUG: return
- print s
-
-
-class Descriptor(object):
- """Descriptor of a structure in a memory."""
-
- def __init__(self, fields):
- self.fields = fields
- self.is_flexible = False
- for _, type_or_func in fields:
- if isinstance(type_or_func, types.FunctionType):
- self.is_flexible = True
- break
- if not self.is_flexible:
- self.ctype = Descriptor._GetCtype(fields)
- self.size = ctypes.sizeof(self.ctype)
-
- def Read(self, memory, offset):
- if self.is_flexible:
- fields_copy = self.fields[:]
- last = 0
- for name, type_or_func in fields_copy:
- if isinstance(type_or_func, types.FunctionType):
- partial_ctype = Descriptor._GetCtype(fields_copy[:last])
- partial_object = partial_ctype.from_buffer(memory, offset)
- type = type_or_func(partial_object)
- if type is not None:
- fields_copy[last] = (name, type)
- last += 1
- else:
- last += 1
- complete_ctype = Descriptor._GetCtype(fields_copy[:last])
- else:
- complete_ctype = self.ctype
- return complete_ctype.from_buffer(memory, offset)
-
- @staticmethod
- def _GetCtype(fields):
- class Raw(ctypes.Structure):
- _fields_ = fields
- _pack_ = 1
-
- def __str__(self):
- return "{" + ", ".join("%s: %s" % (field, self.__getattribute__(field))
- for field, _ in Raw._fields_) + "}"
- return Raw
-
-
-# Set of structures and constants that describe the layout of minidump
-# files. Based on MSDN and Google Breakpad.
-
-MINIDUMP_HEADER = Descriptor([
- ("signature", ctypes.c_uint32),
- ("version", ctypes.c_uint32),
- ("stream_count", ctypes.c_uint32),
- ("stream_directories_rva", ctypes.c_uint32),
- ("checksum", ctypes.c_uint32),
- ("time_date_stampt", ctypes.c_uint32),
- ("flags", ctypes.c_uint64)
-])
-
-MINIDUMP_LOCATION_DESCRIPTOR = Descriptor([
- ("data_size", ctypes.c_uint32),
- ("rva", ctypes.c_uint32)
-])
-
-MINIDUMP_DIRECTORY = Descriptor([
- ("stream_type", ctypes.c_uint32),
- ("location", MINIDUMP_LOCATION_DESCRIPTOR.ctype)
-])
-
-MD_EXCEPTION_MAXIMUM_PARAMETERS = 15
-
-MINIDUMP_EXCEPTION = Descriptor([
- ("code", ctypes.c_uint32),
- ("flags", ctypes.c_uint32),
- ("record", ctypes.c_uint64),
- ("address", ctypes.c_uint64),
- ("parameter_count", ctypes.c_uint32),
- ("unused_alignment", ctypes.c_uint32),
- ("information", ctypes.c_uint64 * MD_EXCEPTION_MAXIMUM_PARAMETERS)
-])
-
-MINIDUMP_EXCEPTION_STREAM = Descriptor([
- ("thread_id", ctypes.c_uint32),
- ("unused_alignment", ctypes.c_uint32),
- ("exception", MINIDUMP_EXCEPTION.ctype),
- ("thread_context", MINIDUMP_LOCATION_DESCRIPTOR.ctype)
-])
-
-# Stream types.
-MD_UNUSED_STREAM = 0
-MD_RESERVED_STREAM_0 = 1
-MD_RESERVED_STREAM_1 = 2
-MD_THREAD_LIST_STREAM = 3
-MD_MODULE_LIST_STREAM = 4
-MD_MEMORY_LIST_STREAM = 5
-MD_EXCEPTION_STREAM = 6
-MD_SYSTEM_INFO_STREAM = 7
-MD_THREAD_EX_LIST_STREAM = 8
-MD_MEMORY_64_LIST_STREAM = 9
-MD_COMMENT_STREAM_A = 10
-MD_COMMENT_STREAM_W = 11
-MD_HANDLE_DATA_STREAM = 12
-MD_FUNCTION_TABLE_STREAM = 13
-MD_UNLOADED_MODULE_LIST_STREAM = 14
-MD_MISC_INFO_STREAM = 15
-MD_MEMORY_INFO_LIST_STREAM = 16
-MD_THREAD_INFO_LIST_STREAM = 17
-MD_HANDLE_OPERATION_LIST_STREAM = 18
-
-MD_FLOATINGSAVEAREA_X86_REGISTERAREA_SIZE = 80
-
-MINIDUMP_FLOATING_SAVE_AREA_X86 = Descriptor([
- ("control_word", ctypes.c_uint32),
- ("status_word", ctypes.c_uint32),
- ("tag_word", ctypes.c_uint32),
- ("error_offset", ctypes.c_uint32),
- ("error_selector", ctypes.c_uint32),
- ("data_offset", ctypes.c_uint32),
- ("data_selector", ctypes.c_uint32),
- ("register_area", ctypes.c_uint8 * MD_FLOATINGSAVEAREA_X86_REGISTERAREA_SIZE),
- ("cr0_npx_state", ctypes.c_uint32)
-])
-
-MD_CONTEXT_X86_EXTENDED_REGISTERS_SIZE = 512
-
-# Context flags.
-MD_CONTEXT_X86 = 0x00010000
-MD_CONTEXT_X86_CONTROL = (MD_CONTEXT_X86 | 0x00000001)
-MD_CONTEXT_X86_INTEGER = (MD_CONTEXT_X86 | 0x00000002)
-MD_CONTEXT_X86_SEGMENTS = (MD_CONTEXT_X86 | 0x00000004)
-MD_CONTEXT_X86_FLOATING_POINT = (MD_CONTEXT_X86 | 0x00000008)
-MD_CONTEXT_X86_DEBUG_REGISTERS = (MD_CONTEXT_X86 | 0x00000010)
-MD_CONTEXT_X86_EXTENDED_REGISTERS = (MD_CONTEXT_X86 | 0x00000020)
-
-def EnableOnFlag(type, flag):
- return lambda o: [None, type][int((o.context_flags & flag) != 0)]
-
-MINIDUMP_CONTEXT_X86 = Descriptor([
- ("context_flags", ctypes.c_uint32),
- # MD_CONTEXT_X86_DEBUG_REGISTERS.
- ("dr0", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_DEBUG_REGISTERS)),
- ("dr1", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_DEBUG_REGISTERS)),
- ("dr2", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_DEBUG_REGISTERS)),
- ("dr3", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_DEBUG_REGISTERS)),
- ("dr6", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_DEBUG_REGISTERS)),
- ("dr7", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_DEBUG_REGISTERS)),
- # MD_CONTEXT_X86_FLOATING_POINT.
- ("float_save", EnableOnFlag(MINIDUMP_FLOATING_SAVE_AREA_X86.ctype,
- MD_CONTEXT_X86_FLOATING_POINT)),
- # MD_CONTEXT_X86_SEGMENTS.
- ("gs", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_SEGMENTS)),
- ("fs", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_SEGMENTS)),
- ("es", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_SEGMENTS)),
- ("ds", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_SEGMENTS)),
- # MD_CONTEXT_X86_INTEGER.
- ("edi", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_INTEGER)),
- ("esi", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_INTEGER)),
- ("ebx", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_INTEGER)),
- ("edx", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_INTEGER)),
- ("ecx", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_INTEGER)),
- ("eax", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_INTEGER)),
- # MD_CONTEXT_X86_CONTROL.
- ("ebp", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_CONTROL)),
- ("eip", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_CONTROL)),
- ("cs", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_CONTROL)),
- ("eflags", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_CONTROL)),
- ("esp", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_CONTROL)),
- ("ss", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_CONTROL)),
- # MD_CONTEXT_X86_EXTENDED_REGISTERS.
- ("extended_registers",
- EnableOnFlag(ctypes.c_uint8 * MD_CONTEXT_X86_EXTENDED_REGISTERS_SIZE,
- MD_CONTEXT_X86_EXTENDED_REGISTERS))
-])
-
-MINIDUMP_MEMORY_DESCRIPTOR = Descriptor([
- ("start", ctypes.c_uint64),
- ("memory", MINIDUMP_LOCATION_DESCRIPTOR.ctype)
-])
-
-MINIDUMP_MEMORY_DESCRIPTOR64 = Descriptor([
- ("start", ctypes.c_uint64),
- ("size", ctypes.c_uint64)
-])
-
-MINIDUMP_MEMORY_LIST = Descriptor([
- ("range_count", ctypes.c_uint32),
- ("ranges", lambda m: MINIDUMP_MEMORY_DESCRIPTOR.ctype * m.range_count)
-])
-
-MINIDUMP_MEMORY_LIST64 = Descriptor([
- ("range_count", ctypes.c_uint64),
- ("base_rva", ctypes.c_uint64),
- ("ranges", lambda m: MINIDUMP_MEMORY_DESCRIPTOR64.ctype * m.range_count)
-])
-
-MINIDUMP_THREAD = Descriptor([
- ("id", ctypes.c_uint32),
- ("suspend_count", ctypes.c_uint32),
- ("priority_class", ctypes.c_uint32),
- ("priority", ctypes.c_uint32),
- ("ted", ctypes.c_uint64),
- ("stack", MINIDUMP_MEMORY_DESCRIPTOR.ctype),
- ("context", MINIDUMP_LOCATION_DESCRIPTOR.ctype)
-])
-
-MINIDUMP_THREAD_LIST = Descriptor([
- ("thread_count", ctypes.c_uint32),
- ("threads", lambda t: MINIDUMP_THREAD.ctype * t.thread_count)
-])
-
-
-class MinidumpReader(object):
- """Minidump (.dmp) reader."""
-
- _HEADER_MAGIC = 0x504d444d
-
- def __init__(self, options, minidump_name):
- self.minidump_name = minidump_name
- self.minidump_file = open(minidump_name, "r")
- self.minidump = mmap.mmap(self.minidump_file.fileno(), 0, mmap.MAP_PRIVATE)
- self.header = MINIDUMP_HEADER.Read(self.minidump, 0)
- if self.header.signature != MinidumpReader._HEADER_MAGIC:
- print >>sys.stderr, "Warning: unsupported minidump header magic"
- DebugPrint(self.header)
- directories = []
- offset = self.header.stream_directories_rva
- for _ in xrange(self.header.stream_count):
- directories.append(MINIDUMP_DIRECTORY.Read(self.minidump, offset))
- offset += MINIDUMP_DIRECTORY.size
- self.exception = None
- self.exception_context = None
- self.memory_list = None
- self.thread_map = {}
- for d in directories:
- DebugPrint(d)
- # TODO(vitalyr): extract system info including CPU features.
- if d.stream_type == MD_EXCEPTION_STREAM:
- self.exception = MINIDUMP_EXCEPTION_STREAM.Read(
- self.minidump, d.location.rva)
- DebugPrint(self.exception)
- self.exception_context = MINIDUMP_CONTEXT_X86.Read(
- self.minidump, self.exception.thread_context.rva)
- DebugPrint(self.exception_context)
- elif d.stream_type == MD_THREAD_LIST_STREAM:
- thread_list = MINIDUMP_THREAD_LIST.Read(self.minidump, d.location.rva)
- assert ctypes.sizeof(thread_list) == d.location.data_size
- DebugPrint(thread_list)
- for thread in thread_list.threads:
- DebugPrint(thread)
- self.thread_map[thread.id] = thread
- elif d.stream_type == MD_MEMORY_LIST_STREAM:
- print >>sys.stderr, "Warning: not a full minidump"
- ml = MINIDUMP_MEMORY_LIST.Read(self.minidump, d.location.rva)
- DebugPrint(ml)
- for m in ml.ranges:
- DebugPrint(m)
- elif d.stream_type == MD_MEMORY_64_LIST_STREAM:
- assert self.memory_list is None
- self.memory_list = MINIDUMP_MEMORY_LIST64.Read(
- self.minidump, d.location.rva)
- assert ctypes.sizeof(self.memory_list) == d.location.data_size
- DebugPrint(self.memory_list)
-
- def IsValidAddress(self, address):
- return self.FindLocation(address) is not None
-
- def ReadU8(self, address):
- location = self.FindLocation(address)
- return ctypes.c_uint8.from_buffer(self.minidump, location).value
-
- def ReadU32(self, address):
- location = self.FindLocation(address)
- return ctypes.c_uint32.from_buffer(self.minidump, location).value
-
- def ReadBytes(self, address, size):
- location = self.FindLocation(address)
- return self.minidump[location:location + size]
-
- def FindLocation(self, address):
- # TODO(vitalyr): only works for full minidumps (...64 structure variants).
- offset = 0
- for r in self.memory_list.ranges:
- if r.start <= address < r.start + r.size:
- return self.memory_list.base_rva + offset + address - r.start
- offset += r.size
- return None
-
- def GetDisasmLines(self, address, size):
- location = self.FindLocation(address)
- if location is None: return []
- return disasm.GetDisasmLines(self.minidump_name,
- location,
- size,
- "ia32",
- False)
-
-
- def Dispose(self):
- self.minidump.close()
- self.minidump_file.close()
-
-
-# List of V8 instance types. Obtained by adding the code below to any .cc file.
-#
-# #define DUMP_TYPE(T) printf("%d: \"%s\",\n", T, #T);
-# struct P {
-# P() {
-# printf("{\n");
-# INSTANCE_TYPE_LIST(DUMP_TYPE)
-# printf("}\n");
-# }
-# };
-# static P p;
-INSTANCE_TYPES = {
- 64: "SYMBOL_TYPE",
- 68: "ASCII_SYMBOL_TYPE",
- 65: "CONS_SYMBOL_TYPE",
- 69: "CONS_ASCII_SYMBOL_TYPE",
- 66: "EXTERNAL_SYMBOL_TYPE",
- 74: "EXTERNAL_SYMBOL_WITH_ASCII_DATA_TYPE",
- 70: "EXTERNAL_ASCII_SYMBOL_TYPE",
- 0: "STRING_TYPE",
- 4: "ASCII_STRING_TYPE",
- 1: "CONS_STRING_TYPE",
- 5: "CONS_ASCII_STRING_TYPE",
- 2: "EXTERNAL_STRING_TYPE",
- 10: "EXTERNAL_STRING_WITH_ASCII_DATA_TYPE",
- 6: "EXTERNAL_ASCII_STRING_TYPE",
- 6: "PRIVATE_EXTERNAL_ASCII_STRING_TYPE",
- 128: "MAP_TYPE",
- 129: "CODE_TYPE",
- 130: "ODDBALL_TYPE",
- 131: "JS_GLOBAL_PROPERTY_CELL_TYPE",
- 132: "HEAP_NUMBER_TYPE",
- 133: "PROXY_TYPE",
- 134: "BYTE_ARRAY_TYPE",
- 135: "PIXEL_ARRAY_TYPE",
- 136: "EXTERNAL_BYTE_ARRAY_TYPE",
- 137: "EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE",
- 138: "EXTERNAL_SHORT_ARRAY_TYPE",
- 139: "EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE",
- 140: "EXTERNAL_INT_ARRAY_TYPE",
- 141: "EXTERNAL_UNSIGNED_INT_ARRAY_TYPE",
- 142: "EXTERNAL_FLOAT_ARRAY_TYPE",
- 143: "FILLER_TYPE",
- 144: "ACCESSOR_INFO_TYPE",
- 145: "ACCESS_CHECK_INFO_TYPE",
- 146: "INTERCEPTOR_INFO_TYPE",
- 147: "CALL_HANDLER_INFO_TYPE",
- 148: "FUNCTION_TEMPLATE_INFO_TYPE",
- 149: "OBJECT_TEMPLATE_INFO_TYPE",
- 150: "SIGNATURE_INFO_TYPE",
- 151: "TYPE_SWITCH_INFO_TYPE",
- 152: "SCRIPT_TYPE",
- 153: "CODE_CACHE_TYPE",
- 156: "FIXED_ARRAY_TYPE",
- 157: "SHARED_FUNCTION_INFO_TYPE",
- 158: "JS_MESSAGE_OBJECT_TYPE",
- 159: "JS_VALUE_TYPE",
- 160: "JS_OBJECT_TYPE",
- 161: "JS_CONTEXT_EXTENSION_OBJECT_TYPE",
- 162: "JS_GLOBAL_OBJECT_TYPE",
- 163: "JS_BUILTINS_OBJECT_TYPE",
- 164: "JS_GLOBAL_PROXY_TYPE",
- 165: "JS_ARRAY_TYPE",
- 166: "JS_REGEXP_TYPE",
- 167: "JS_FUNCTION_TYPE",
- 154: "DEBUG_INFO_TYPE",
- 155: "BREAK_POINT_INFO_TYPE",
-}
-
-
-class Printer(object):
- """Printer with indentation support."""
-
- def __init__(self):
- self.indent = 0
-
- def Indent(self):
- self.indent += 2
-
- def Dedent(self):
- self.indent -= 2
-
- def Print(self, string):
- print "%s%s" % (self._IndentString(), string)
-
- def PrintLines(self, lines):
- indent = self._IndentString()
- print "\n".join("%s%s" % (indent, line) for line in lines)
-
- def _IndentString(self):
- return self.indent * " "
-
-
-ADDRESS_RE = re.compile(r"0x[0-9a-fA-F]+")
-
-
-def FormatDisasmLine(start, heap, line):
- line_address = start + line[0]
- stack_slot = heap.stack_map.get(line_address)
- marker = " "
- if stack_slot:
- marker = "=>"
- code = AnnotateAddresses(heap, line[1])
- return "%s%08x %08x: %s" % (marker, line_address, line[0], code)
-
-
-def AnnotateAddresses(heap, line):
- extra = []
- for m in ADDRESS_RE.finditer(line):
- maybe_address = int(m.group(0), 16)
- object = heap.FindObject(maybe_address)
- if not object: continue
- extra.append(str(object))
- if len(extra) == 0: return line
- return "%s ;; %s" % (line, ", ".join(extra))
-
-
-class HeapObject(object):
- def __init__(self, heap, map, address):
- self.heap = heap
- self.map = map
- self.address = address
-
- def Is(self, cls):
- return isinstance(self, cls)
-
- def Print(self, p):
- p.Print(str(self))
-
- def __str__(self):
- return "HeapObject(%08x, %s)" % (self.address,
- INSTANCE_TYPES[self.map.instance_type])
-
- def ObjectField(self, offset):
- field_value = self.heap.reader.ReadU32(self.address + offset)
- return self.heap.FindObjectOrSmi(field_value)
-
- def SmiField(self, offset):
- field_value = self.heap.reader.ReadU32(self.address + offset)
- assert (field_value & 1) == 0
- return field_value / 2
-
-
-class Map(HeapObject):
- INSTANCE_TYPE_OFFSET = 8
-
- def __init__(self, heap, map, address):
- HeapObject.__init__(self, heap, map, address)
- self.instance_type = \
- heap.reader.ReadU8(self.address + Map.INSTANCE_TYPE_OFFSET)
-
-
-class String(HeapObject):
- LENGTH_OFFSET = 4
-
- def __init__(self, heap, map, address):
- HeapObject.__init__(self, heap, map, address)
- self.length = self.SmiField(String.LENGTH_OFFSET)
-
- def GetChars(self):
- return "?string?"
-
- def Print(self, p):
- p.Print(str(self))
-
- def __str__(self):
- return "\"%s\"" % self.GetChars()
-
-
-class SeqString(String):
- CHARS_OFFSET = 12
-
- def __init__(self, heap, map, address):
- String.__init__(self, heap, map, address)
- self.chars = heap.reader.ReadBytes(self.address + SeqString.CHARS_OFFSET,
- self.length)
-
- def GetChars(self):
- return self.chars
-
-
-class ExternalString(String):
- RESOURCE_OFFSET = 12
-
- WEBKIT_RESOUCE_STRING_IMPL_OFFSET = 4
- WEBKIT_STRING_IMPL_CHARS_OFFSET = 8
-
- def __init__(self, heap, map, address):
- String.__init__(self, heap, map, address)
- reader = heap.reader
- self.resource = \
- reader.ReadU32(self.address + ExternalString.RESOURCE_OFFSET)
- self.chars = "?external string?"
- if not reader.IsValidAddress(self.resource): return
- string_impl_address = self.resource + \
- ExternalString.WEBKIT_RESOUCE_STRING_IMPL_OFFSET
- if not reader.IsValidAddress(string_impl_address): return
- string_impl = reader.ReadU32(string_impl_address)
- chars_ptr_address = string_impl + \
- ExternalString.WEBKIT_STRING_IMPL_CHARS_OFFSET
- if not reader.IsValidAddress(chars_ptr_address): return
- chars_ptr = reader.ReadU32(chars_ptr_address)
- if not reader.IsValidAddress(chars_ptr): return
- raw_chars = reader.ReadBytes(chars_ptr, 2 * self.length)
- self.chars = codecs.getdecoder("utf16")(raw_chars)[0]
-
- def GetChars(self):
- return self.chars
-
-
-class ConsString(String):
- LEFT_OFFSET = 12
- RIGHT_OFFSET = 16
-
- def __init__(self, heap, map, address):
- String.__init__(self, heap, map, address)
- self.left = self.ObjectField(ConsString.LEFT_OFFSET)
- self.right = self.ObjectField(ConsString.RIGHT_OFFSET)
-
- def GetChars(self):
- return self.left.GetChars() + self.right.GetChars()
-
-
-class Oddball(HeapObject):
- TO_STRING_OFFSET = 4
-
- def __init__(self, heap, map, address):
- HeapObject.__init__(self, heap, map, address)
- self.to_string = self.ObjectField(Oddball.TO_STRING_OFFSET)
-
- def Print(self, p):
- p.Print(str(self))
-
- def __str__(self):
- return "<%s>" % self.to_string.GetChars()
-
-
-class FixedArray(HeapObject):
- LENGTH_OFFSET = 4
- ELEMENTS_OFFSET = 8
-
- def __init__(self, heap, map, address):
- HeapObject.__init__(self, heap, map, address)
- self.length = self.SmiField(FixedArray.LENGTH_OFFSET)
-
- def Print(self, p):
- p.Print("FixedArray(%08x) {" % self.address)
- p.Indent()
- p.Print("length: %d" % self.length)
- for i in xrange(self.length):
- offset = FixedArray.ELEMENTS_OFFSET + 4 * i
- p.Print("[%08d] = %s" % (i, self.ObjectField(offset)))
- p.Dedent()
- p.Print("}")
-
- def __str__(self):
- return "FixedArray(%08x, length=%d)" % (self.address, self.length)
-
-
-class JSFunction(HeapObject):
- CODE_ENTRY_OFFSET = 12
- SHARED_OFFSET = 20
-
- def __init__(self, heap, map, address):
- HeapObject.__init__(self, heap, map, address)
- code_entry = \
- heap.reader.ReadU32(self.address + JSFunction.CODE_ENTRY_OFFSET)
- self.code = heap.FindObject(code_entry - Code.ENTRY_OFFSET + 1)
- self.shared = self.ObjectField(JSFunction.SHARED_OFFSET)
-
- def Print(self, p):
- source = "\n".join(" %s" % line for line in self._GetSource().split("\n"))
- p.Print("JSFunction(%08x) {" % self.address)
- p.Indent()
- p.Print("inferred name: %s" % self.shared.inferred_name)
- if self.shared.script.Is(Script) and self.shared.script.name.Is(String):
- p.Print("script name: %s" % self.shared.script.name)
- p.Print("source:")
- p.PrintLines(self._GetSource().split("\n"))
- p.Print("code:")
- self.code.Print(p)
- if self.code != self.shared.code:
- p.Print("unoptimized code:")
- self.shared.code.Print(p)
- p.Dedent()
- p.Print("}")
-
- def __str__(self):
- inferred_name = ""
- if self.shared.Is(SharedFunctionInfo):
- inferred_name = self.shared.inferred_name
- return "JSFunction(%08x, %s)" % (self.address, inferred_name)
-
- def _GetSource(self):
- source = "?source?"
- start = self.shared.start_position
- end = self.shared.end_position
- if not self.shared.script.Is(Script): return source
- script_source = self.shared.script.source
- if not script_source.Is(String): return source
- return script_source.GetChars()[start:end]
-
-
-class SharedFunctionInfo(HeapObject):
- CODE_OFFSET = 2 * 4
- SCRIPT_OFFSET = 7 * 4
- INFERRED_NAME_OFFSET = 9 * 4
- START_POSITION_AND_TYPE_OFFSET = 17 * 4
- END_POSITION_OFFSET = 18 * 4
-
- def __init__(self, heap, map, address):
- HeapObject.__init__(self, heap, map, address)
- self.code = self.ObjectField(SharedFunctionInfo.CODE_OFFSET)
- self.script = self.ObjectField(SharedFunctionInfo.SCRIPT_OFFSET)
- self.inferred_name = \
- self.ObjectField(SharedFunctionInfo.INFERRED_NAME_OFFSET)
- start_position_and_type = \
- self.SmiField(SharedFunctionInfo.START_POSITION_AND_TYPE_OFFSET)
- self.start_position = start_position_and_type >> 2
- self.end_position = self.SmiField(SharedFunctionInfo.END_POSITION_OFFSET)
-
-
-class Script(HeapObject):
- SOURCE_OFFSET = 4
- NAME_OFFSET = 8
-
- def __init__(self, heap, map, address):
- HeapObject.__init__(self, heap, map, address)
- self.source = self.ObjectField(Script.SOURCE_OFFSET)
- self.name = self.ObjectField(Script.NAME_OFFSET)
-
-
-class Code(HeapObject):
- INSTRUCTION_SIZE_OFFSET = 4
- ENTRY_OFFSET = 32
-
- def __init__(self, heap, map, address):
- HeapObject.__init__(self, heap, map, address)
- self.entry = self.address + Code.ENTRY_OFFSET
- self.instruction_size = \
- heap.reader.ReadU32(self.address + Code.INSTRUCTION_SIZE_OFFSET)
-
- def Print(self, p):
- lines = self.heap.reader.GetDisasmLines(self.entry, self.instruction_size)
- p.Print("Code(%08x) {" % self.address)
- p.Indent()
- p.Print("instruction_size: %d" % self.instruction_size)
- p.PrintLines(self._FormatLine(line) for line in lines)
- p.Dedent()
- p.Print("}")
-
- def _FormatLine(self, line):
- return FormatDisasmLine(self.entry, self.heap, line)
-
-
-class V8Heap(object):
- CLASS_MAP = {
- "SYMBOL_TYPE": SeqString,
- "ASCII_SYMBOL_TYPE": SeqString,
- "CONS_SYMBOL_TYPE": ConsString,
- "CONS_ASCII_SYMBOL_TYPE": ConsString,
- "EXTERNAL_SYMBOL_TYPE": ExternalString,
- "EXTERNAL_SYMBOL_WITH_ASCII_DATA_TYPE": ExternalString,
- "EXTERNAL_ASCII_SYMBOL_TYPE": ExternalString,
- "STRING_TYPE": SeqString,
- "ASCII_STRING_TYPE": SeqString,
- "CONS_STRING_TYPE": ConsString,
- "CONS_ASCII_STRING_TYPE": ConsString,
- "EXTERNAL_STRING_TYPE": ExternalString,
- "EXTERNAL_STRING_WITH_ASCII_DATA_TYPE": ExternalString,
- "EXTERNAL_ASCII_STRING_TYPE": ExternalString,
-
- "MAP_TYPE": Map,
- "ODDBALL_TYPE": Oddball,
- "FIXED_ARRAY_TYPE": FixedArray,
- "JS_FUNCTION_TYPE": JSFunction,
- "SHARED_FUNCTION_INFO_TYPE": SharedFunctionInfo,
- "SCRIPT_TYPE": Script,
- "CODE_TYPE": Code
- }
-
- def __init__(self, reader, stack_map):
- self.reader = reader
- self.stack_map = stack_map
- self.objects = {}
-
- def FindObjectOrSmi(self, tagged_address):
- if (tagged_address & 1) == 0: return tagged_address / 2
- return self.FindObject(tagged_address)
-
- def FindObject(self, tagged_address):
- if tagged_address in self.objects:
- return self.objects[tagged_address]
- if (tagged_address & 1) != 1: return None
- address = tagged_address - 1
- if not self.reader.IsValidAddress(address): return None
- map_tagged_address = self.reader.ReadU32(address)
- if tagged_address == map_tagged_address:
- # Meta map?
- meta_map = Map(self, None, address)
- instance_type_name = INSTANCE_TYPES.get(meta_map.instance_type)
- if instance_type_name != "MAP_TYPE": return None
- meta_map.map = meta_map
- object = meta_map
- else:
- map = self.FindObject(map_tagged_address)
- if map is None: return None
- instance_type_name = INSTANCE_TYPES.get(map.instance_type)
- if instance_type_name is None: return None
- cls = V8Heap.CLASS_MAP.get(instance_type_name, HeapObject)
- object = cls(self, map, address)
- self.objects[tagged_address] = object
- return object
-
-
-EIP_PROXIMITY = 64
-
-
-def AnalyzeMinidump(options, minidump_name):
- reader = MinidumpReader(options, minidump_name)
- DebugPrint("========================================")
- if reader.exception is None:
- print "Minidump has no exception info"
- return
- print "Exception info:"
- exception_thread = reader.thread_map[reader.exception.thread_id]
- print " thread id: %d" % exception_thread.id
- print " code: %08X" % reader.exception.exception.code
- print " context:"
- print " eax: %08x" % reader.exception_context.eax
- print " ebx: %08x" % reader.exception_context.ebx
- print " ecx: %08x" % reader.exception_context.ecx
- print " edx: %08x" % reader.exception_context.edx
- print " edi: %08x" % reader.exception_context.edi
- print " esi: %08x" % reader.exception_context.esi
- print " ebp: %08x" % reader.exception_context.ebp
- print " esp: %08x" % reader.exception_context.esp
- print " eip: %08x" % reader.exception_context.eip
- # TODO(vitalyr): decode eflags.
- print " eflags: %s" % bin(reader.exception_context.eflags)[2:]
- print
-
- stack_bottom = exception_thread.stack.start + \
- exception_thread.stack.memory.data_size
- stack_map = {reader.exception_context.eip: -1}
- for slot in xrange(reader.exception_context.esp, stack_bottom, 4):
- maybe_address = reader.ReadU32(slot)
- if not maybe_address in stack_map:
- stack_map[maybe_address] = slot
- heap = V8Heap(reader, stack_map)
-
- print "Disassembly around exception.eip:"
- start = reader.exception_context.eip - EIP_PROXIMITY
- lines = reader.GetDisasmLines(start, 2 * EIP_PROXIMITY)
- for line in lines:
- print FormatDisasmLine(start, heap, line)
- print
-
- print "Annotated stack (from exception.esp to bottom):"
- for slot in xrange(reader.exception_context.esp, stack_bottom, 4):
- maybe_address = reader.ReadU32(slot)
- heap_object = heap.FindObject(maybe_address)
- print "%08x: %08x" % (slot, maybe_address)
- if heap_object:
- heap_object.Print(Printer())
- print
-
- reader.Dispose()
-
-
-if __name__ == "__main__":
- parser = optparse.OptionParser(USAGE)
- options, args = parser.parse_args()
- if len(args) != 1:
- parser.print_help()
- sys.exit(1)
- AnalyzeMinidump(options, args[0])
diff --git a/src/3rdparty/v8/tools/gyp/v8.gyp b/src/3rdparty/v8/tools/gyp/v8.gyp
deleted file mode 100644
index 8804454..0000000
--- a/src/3rdparty/v8/tools/gyp/v8.gyp
+++ /dev/null
@@ -1,844 +0,0 @@
-# Copyright 2011 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-{
- 'variables': {
- 'use_system_v8%': 0,
- 'msvs_use_common_release': 0,
- 'gcc_version%': 'unknown',
- 'v8_target_arch%': '<(target_arch)',
- 'v8_use_snapshot%': 'true',
- 'v8_use_liveobjectlist%': 'false',
- },
- 'conditions': [
- ['use_system_v8==0', {
- 'target_defaults': {
- 'defines': [
- 'ENABLE_LOGGING_AND_PROFILING',
- 'ENABLE_DEBUGGER_SUPPORT',
- 'ENABLE_VMSTATE_TRACKING',
- 'V8_FAST_TLS',
- ],
- 'conditions': [
- ['OS!="mac"', {
- # TODO(mark): The OS!="mac" conditional is temporary. It can be
- # removed once the Mac Chromium build stops setting target_arch to
- # ia32 and instead sets it to mac. Other checks in this file for
- # OS=="mac" can be removed at that time as well. This can be cleaned
- # up once http://crbug.com/44205 is fixed.
- 'conditions': [
- ['v8_target_arch=="arm"', {
- 'defines': [
- 'V8_TARGET_ARCH_ARM',
- ],
- }],
- ['v8_target_arch=="ia32"', {
- 'defines': [
- 'V8_TARGET_ARCH_IA32',
- ],
- }],
- ['v8_target_arch=="x64"', {
- 'defines': [
- 'V8_TARGET_ARCH_X64',
- ],
- }],
- ],
- }],
- ['v8_use_liveobjectlist=="true"', {
- 'defines': [
- 'ENABLE_DEBUGGER_SUPPORT',
- 'INSPECTOR',
- 'OBJECT_PRINT',
- 'LIVEOBJECTLIST',
- ],
- }],
- ],
- 'configurations': {
- 'Debug': {
- 'defines': [
- 'DEBUG',
- '_DEBUG',
- 'ENABLE_DISASSEMBLER',
- 'V8_ENABLE_CHECKS',
- 'OBJECT_PRINT',
- ],
- 'msvs_settings': {
- 'VCCLCompilerTool': {
- 'Optimization': '0',
-
- 'conditions': [
- ['OS=="win" and component=="shared_library"', {
- 'RuntimeLibrary': '3', # /MDd
- }, {
- 'RuntimeLibrary': '1', # /MTd
- }],
- ],
- },
- 'VCLinkerTool': {
- 'LinkIncremental': '2',
- },
- },
- 'conditions': [
- ['OS=="freebsd" or OS=="openbsd"', {
- 'cflags': [ '-I/usr/local/include' ],
- }],
- ],
- },
- 'Release': {
- 'conditions': [
- ['OS=="linux" or OS=="freebsd" or OS=="openbsd"', {
- 'cflags!': [
- '-O2',
- '-Os',
- ],
- 'cflags': [
- '-fomit-frame-pointer',
- '-O3',
- ],
- 'conditions': [
- [ 'gcc_version==44', {
- 'cflags': [
- # Avoid crashes with gcc 4.4 in the v8 test suite.
- '-fno-tree-vrp',
- ],
- }],
- ],
- }],
- ['OS=="freebsd" or OS=="openbsd"', {
- 'cflags': [ '-I/usr/local/include' ],
- }],
- ['OS=="mac"', {
- 'xcode_settings': {
- 'GCC_OPTIMIZATION_LEVEL': '3', # -O3
-
- # -fstrict-aliasing. Mainline gcc
- # enables this at -O2 and above,
- # but Apple gcc does not unless it
- # is specified explicitly.
- 'GCC_STRICT_ALIASING': 'YES',
- },
- }],
- ['OS=="win"', {
- 'msvs_configuration_attributes': {
- 'OutputDirectory': '$(SolutionDir)$(ConfigurationName)',
- 'IntermediateDirectory': '$(OutDir)\\obj\\$(ProjectName)',
- 'CharacterSet': '1',
- },
- 'msvs_settings': {
- 'VCCLCompilerTool': {
- 'Optimization': '2',
- 'InlineFunctionExpansion': '2',
- 'EnableIntrinsicFunctions': 'true',
- 'FavorSizeOrSpeed': '0',
- 'OmitFramePointers': 'true',
- 'StringPooling': 'true',
-
- 'conditions': [
- ['OS=="win" and component=="shared_library"', {
- 'RuntimeLibrary': '2', #/MD
- }, {
- 'RuntimeLibrary': '0', #/MT
- }],
- ],
- },
- 'VCLinkerTool': {
- 'LinkIncremental': '1',
- 'OptimizeReferences': '2',
- 'OptimizeForWindows98': '1',
- 'EnableCOMDATFolding': '2',
- },
- },
- }],
- ],
- },
- },
- },
- 'targets': [
- {
- 'target_name': 'v8',
- 'conditions': [
- ['v8_use_snapshot=="true"', {
- 'dependencies': ['v8_snapshot'],
- },
- {
- 'dependencies': ['v8_nosnapshot'],
- }],
- ['OS=="win" and component=="shared_library"', {
- 'type': '<(component)',
- 'sources': [
- '../../src/v8dll-main.cc',
- ],
- 'defines': [
- 'BUILDING_V8_SHARED'
- ],
- 'direct_dependent_settings': {
- 'defines': [
- 'USING_V8_SHARED',
- ],
- },
- },
- {
- 'type': 'none',
- }],
- ],
- 'direct_dependent_settings': {
- 'include_dirs': [
- '../../include',
- ],
- },
- },
- {
- 'target_name': 'v8_snapshot',
- 'type': '<(library)',
- 'conditions': [
- ['OS=="win" and component=="shared_library"', {
- 'defines': [
- 'BUILDING_V8_SHARED',
- ],
- }],
- ],
- 'dependencies': [
- 'mksnapshot#host',
- 'js2c#host',
- 'v8_base',
- ],
- 'include_dirs+': [
- '../../src',
- ],
- 'sources': [
- '<(SHARED_INTERMEDIATE_DIR)/libraries-empty.cc',
- '<(INTERMEDIATE_DIR)/snapshot.cc',
- ],
- 'actions': [
- {
- 'action_name': 'run_mksnapshot',
- 'inputs': [
- '<(PRODUCT_DIR)/<(EXECUTABLE_PREFIX)mksnapshot<(EXECUTABLE_SUFFIX)',
- ],
- 'outputs': [
- '<(INTERMEDIATE_DIR)/snapshot.cc',
- ],
- 'action': ['<@(_inputs)', '<@(_outputs)'],
- },
- ],
- },
- {
- 'target_name': 'v8_nosnapshot',
- 'type': '<(library)',
- 'toolsets': ['host', 'target'],
- 'dependencies': [
- 'js2c#host',
- 'v8_base',
- ],
- 'include_dirs+': [
- '../../src',
- ],
- 'sources': [
- '<(SHARED_INTERMEDIATE_DIR)/libraries.cc',
- '../../src/snapshot-empty.cc',
- ],
- 'conditions': [
- # The ARM assembler assumes the host is 32 bits, so force building
- # 32-bit host tools.
- ['v8_target_arch=="arm" and host_arch=="x64" and _toolset=="host"', {
- 'cflags': ['-m32'],
- 'ldflags': ['-m32'],
- }],
- ['OS=="win" and component=="shared_library"', {
- 'defines': [
- 'BUILDING_V8_SHARED',
- ],
- }],
- ]
- },
- {
- 'target_name': 'v8_base',
- 'type': '<(library)',
- 'toolsets': ['host', 'target'],
- 'include_dirs+': [
- '../../src',
- ],
- 'sources': [
- '../../src/accessors.cc',
- '../../src/accessors.h',
- '../../src/allocation.cc',
- '../../src/allocation.h',
- '../../src/api.cc',
- '../../src/api.h',
- '../../src/apiutils.h',
- '../../src/arguments.h',
- '../../src/assembler.cc',
- '../../src/assembler.h',
- '../../src/ast.cc',
- '../../src/ast-inl.h',
- '../../src/ast.h',
- '../../src/atomicops_internals_x86_gcc.cc',
- '../../src/bignum.cc',
- '../../src/bignum.h',
- '../../src/bignum-dtoa.cc',
- '../../src/bignum-dtoa.h',
- '../../src/bootstrapper.cc',
- '../../src/bootstrapper.h',
- '../../src/builtins.cc',
- '../../src/builtins.h',
- '../../src/bytecodes-irregexp.h',
- '../../src/cached-powers.cc',
- '../../src/cached-powers.h',
- '../../src/char-predicates-inl.h',
- '../../src/char-predicates.h',
- '../../src/checks.cc',
- '../../src/checks.h',
- '../../src/circular-queue-inl.h',
- '../../src/circular-queue.cc',
- '../../src/circular-queue.h',
- '../../src/code-stubs.cc',
- '../../src/code-stubs.h',
- '../../src/code.h',
- '../../src/codegen-inl.h',
- '../../src/codegen.cc',
- '../../src/codegen.h',
- '../../src/compilation-cache.cc',
- '../../src/compilation-cache.h',
- '../../src/compiler.cc',
- '../../src/compiler.h',
- '../../src/contexts.cc',
- '../../src/contexts.h',
- '../../src/conversions-inl.h',
- '../../src/conversions.cc',
- '../../src/conversions.h',
- '../../src/counters.cc',
- '../../src/counters.h',
- '../../src/cpu.h',
- '../../src/cpu-profiler-inl.h',
- '../../src/cpu-profiler.cc',
- '../../src/cpu-profiler.h',
- '../../src/data-flow.cc',
- '../../src/data-flow.h',
- '../../src/dateparser.cc',
- '../../src/dateparser.h',
- '../../src/dateparser-inl.h',
- '../../src/debug.cc',
- '../../src/debug.h',
- '../../src/debug-agent.cc',
- '../../src/debug-agent.h',
- '../../src/deoptimizer.cc',
- '../../src/deoptimizer.h',
- '../../src/disasm.h',
- '../../src/disassembler.cc',
- '../../src/disassembler.h',
- '../../src/dtoa.cc',
- '../../src/dtoa.h',
- '../../src/diy-fp.cc',
- '../../src/diy-fp.h',
- '../../src/double.h',
- '../../src/execution.cc',
- '../../src/execution.h',
- '../../src/factory.cc',
- '../../src/factory.h',
- '../../src/fast-dtoa.cc',
- '../../src/fast-dtoa.h',
- '../../src/flag-definitions.h',
- '../../src/fixed-dtoa.cc',
- '../../src/fixed-dtoa.h',
- '../../src/flags.cc',
- '../../src/flags.h',
- '../../src/frame-element.cc',
- '../../src/frame-element.h',
- '../../src/frames-inl.h',
- '../../src/frames.cc',
- '../../src/frames.h',
- '../../src/full-codegen.cc',
- '../../src/full-codegen.h',
- '../../src/func-name-inferrer.cc',
- '../../src/func-name-inferrer.h',
- '../../src/global-handles.cc',
- '../../src/global-handles.h',
- '../../src/globals.h',
- '../../src/handles-inl.h',
- '../../src/handles.cc',
- '../../src/handles.h',
- '../../src/hashmap.cc',
- '../../src/hashmap.h',
- '../../src/heap-inl.h',
- '../../src/heap.cc',
- '../../src/heap.h',
- '../../src/heap-profiler.cc',
- '../../src/heap-profiler.h',
- '../../src/hydrogen.cc',
- '../../src/hydrogen.h',
- '../../src/hydrogen-instructions.cc',
- '../../src/hydrogen-instructions.h',
- '../../src/ic-inl.h',
- '../../src/ic.cc',
- '../../src/ic.h',
- '../../src/inspector.cc',
- '../../src/inspector.h',
- '../../src/interpreter-irregexp.cc',
- '../../src/interpreter-irregexp.h',
- '../../src/jump-target-inl.h',
- '../../src/jump-target.cc',
- '../../src/jump-target.h',
- '../../src/jsregexp.cc',
- '../../src/jsregexp.h',
- '../../src/isolate.cc',
- '../../src/isolate.h',
- '../../src/list-inl.h',
- '../../src/list.h',
- '../../src/lithium.cc',
- '../../src/lithium.h',
- '../../src/lithium-allocator.cc',
- '../../src/lithium-allocator.h',
- '../../src/lithium-allocator-inl.h',
- '../../src/liveedit.cc',
- '../../src/liveedit.h',
- '../../src/liveobjectlist-inl.h',
- '../../src/liveobjectlist.cc',
- '../../src/liveobjectlist.h',
- '../../src/log-inl.h',
- '../../src/log-utils.cc',
- '../../src/log-utils.h',
- '../../src/log.cc',
- '../../src/log.h',
- '../../src/macro-assembler.h',
- '../../src/mark-compact.cc',
- '../../src/mark-compact.h',
- '../../src/messages.cc',
- '../../src/messages.h',
- '../../src/natives.h',
- '../../src/objects-debug.cc',
- '../../src/objects-printer.cc',
- '../../src/objects-inl.h',
- '../../src/objects-visiting.cc',
- '../../src/objects-visiting.h',
- '../../src/objects.cc',
- '../../src/objects.h',
- '../../src/parser.cc',
- '../../src/parser.h',
- '../../src/platform-tls-mac.h',
- '../../src/platform-tls-win32.h',
- '../../src/platform-tls.h',
- '../../src/platform.h',
- '../../src/preparse-data.cc',
- '../../src/preparse-data.h',
- '../../src/preparser.cc',
- '../../src/preparser.h',
- '../../src/prettyprinter.cc',
- '../../src/prettyprinter.h',
- '../../src/property.cc',
- '../../src/property.h',
- '../../src/profile-generator-inl.h',
- '../../src/profile-generator.cc',
- '../../src/profile-generator.h',
- '../../src/regexp-macro-assembler-irregexp-inl.h',
- '../../src/regexp-macro-assembler-irregexp.cc',
- '../../src/regexp-macro-assembler-irregexp.h',
- '../../src/regexp-macro-assembler-tracer.cc',
- '../../src/regexp-macro-assembler-tracer.h',
- '../../src/regexp-macro-assembler.cc',
- '../../src/regexp-macro-assembler.h',
- '../../src/regexp-stack.cc',
- '../../src/regexp-stack.h',
- '../../src/register-allocator.h',
- '../../src/register-allocator-inl.h',
- '../../src/register-allocator.cc',
- '../../src/rewriter.cc',
- '../../src/rewriter.h',
- '../../src/runtime.cc',
- '../../src/runtime.h',
- '../../src/runtime-profiler.cc',
- '../../src/runtime-profiler.h',
- '../../src/safepoint-table.cc',
- '../../src/safepoint-table.h',
- '../../src/scanner-base.cc',
- '../../src/scanner-base.h',
- '../../src/scanner.cc',
- '../../src/scanner.h',
- '../../src/scopeinfo.cc',
- '../../src/scopeinfo.h',
- '../../src/scopes.cc',
- '../../src/scopes.h',
- '../../src/serialize.cc',
- '../../src/serialize.h',
- '../../src/shell.h',
- '../../src/small-pointer-list.h',
- '../../src/smart-pointer.h',
- '../../src/snapshot-common.cc',
- '../../src/snapshot.h',
- '../../src/spaces-inl.h',
- '../../src/spaces.cc',
- '../../src/spaces.h',
- '../../src/string-search.cc',
- '../../src/string-search.h',
- '../../src/string-stream.cc',
- '../../src/string-stream.h',
- '../../src/strtod.cc',
- '../../src/strtod.h',
- '../../src/stub-cache.cc',
- '../../src/stub-cache.h',
- '../../src/token.cc',
- '../../src/token.h',
- '../../src/top.cc',
- '../../src/top.h',
- '../../src/type-info.cc',
- '../../src/type-info.h',
- '../../src/unbound-queue-inl.h',
- '../../src/unbound-queue.h',
- '../../src/unicode-inl.h',
- '../../src/unicode.cc',
- '../../src/unicode.h',
- '../../src/utils.cc',
- '../../src/utils.h',
- '../../src/v8-counters.cc',
- '../../src/v8-counters.h',
- '../../src/v8.cc',
- '../../src/v8.h',
- '../../src/v8checks.h',
- '../../src/v8globals.h',
- '../../src/v8memory.h',
- '../../src/v8threads.cc',
- '../../src/v8threads.h',
- '../../src/v8utils.h',
- '../../src/variables.cc',
- '../../src/variables.h',
- '../../src/version.cc',
- '../../src/version.h',
- '../../src/virtual-frame-inl.h',
- '../../src/virtual-frame.cc',
- '../../src/virtual-frame.h',
- '../../src/vm-state-inl.h',
- '../../src/vm-state.h',
- '../../src/zone-inl.h',
- '../../src/zone.cc',
- '../../src/zone.h',
- '../../src/extensions/externalize-string-extension.cc',
- '../../src/extensions/externalize-string-extension.h',
- '../../src/extensions/gc-extension.cc',
- '../../src/extensions/gc-extension.h',
- ],
- 'conditions': [
- ['v8_target_arch=="arm"', {
- 'include_dirs+': [
- '../../src/arm',
- ],
- 'sources': [
- '../../src/jump-target-light.h',
- '../../src/jump-target-light-inl.h',
- '../../src/jump-target-light.cc',
- '../../src/virtual-frame-light-inl.h',
- '../../src/virtual-frame-light.cc',
- '../../src/arm/assembler-arm-inl.h',
- '../../src/arm/assembler-arm.cc',
- '../../src/arm/assembler-arm.h',
- '../../src/arm/builtins-arm.cc',
- '../../src/arm/code-stubs-arm.cc',
- '../../src/arm/code-stubs-arm.h',
- '../../src/arm/codegen-arm.cc',
- '../../src/arm/codegen-arm.h',
- '../../src/arm/constants-arm.h',
- '../../src/arm/constants-arm.cc',
- '../../src/arm/cpu-arm.cc',
- '../../src/arm/debug-arm.cc',
- '../../src/arm/deoptimizer-arm.cc',
- '../../src/arm/disasm-arm.cc',
- '../../src/arm/frames-arm.cc',
- '../../src/arm/frames-arm.h',
- '../../src/arm/full-codegen-arm.cc',
- '../../src/arm/ic-arm.cc',
- '../../src/arm/jump-target-arm.cc',
- '../../src/arm/lithium-arm.cc',
- '../../src/arm/lithium-arm.h',
- '../../src/arm/lithium-codegen-arm.cc',
- '../../src/arm/lithium-codegen-arm.h',
- '../../src/arm/lithium-gap-resolver-arm.cc',
- '../../src/arm/lithium-gap-resolver-arm.h',
- '../../src/arm/macro-assembler-arm.cc',
- '../../src/arm/macro-assembler-arm.h',
- '../../src/arm/regexp-macro-assembler-arm.cc',
- '../../src/arm/regexp-macro-assembler-arm.h',
- '../../src/arm/register-allocator-arm.cc',
- '../../src/arm/simulator-arm.cc',
- '../../src/arm/stub-cache-arm.cc',
- '../../src/arm/virtual-frame-arm-inl.h',
- '../../src/arm/virtual-frame-arm.cc',
- '../../src/arm/virtual-frame-arm.h',
- ],
- 'conditions': [
- # The ARM assembler assumes the host is 32 bits,
- # so force building 32-bit host tools.
- ['host_arch=="x64" and _toolset=="host"', {
- 'cflags': ['-m32'],
- 'ldflags': ['-m32'],
- }]
- ]
- }],
- ['v8_target_arch=="ia32" or v8_target_arch=="mac" or OS=="mac"', {
- 'include_dirs+': [
- '../../src/ia32',
- ],
- 'sources': [
- '../../src/jump-target-heavy.h',
- '../../src/jump-target-heavy-inl.h',
- '../../src/jump-target-heavy.cc',
- '../../src/virtual-frame-heavy-inl.h',
- '../../src/virtual-frame-heavy.cc',
- '../../src/ia32/assembler-ia32-inl.h',
- '../../src/ia32/assembler-ia32.cc',
- '../../src/ia32/assembler-ia32.h',
- '../../src/ia32/builtins-ia32.cc',
- '../../src/ia32/code-stubs-ia32.cc',
- '../../src/ia32/code-stubs-ia32.h',
- '../../src/ia32/codegen-ia32.cc',
- '../../src/ia32/codegen-ia32.h',
- '../../src/ia32/cpu-ia32.cc',
- '../../src/ia32/debug-ia32.cc',
- '../../src/ia32/deoptimizer-ia32.cc',
- '../../src/ia32/disasm-ia32.cc',
- '../../src/ia32/frames-ia32.cc',
- '../../src/ia32/frames-ia32.h',
- '../../src/ia32/full-codegen-ia32.cc',
- '../../src/ia32/ic-ia32.cc',
- '../../src/ia32/jump-target-ia32.cc',
- '../../src/ia32/lithium-codegen-ia32.cc',
- '../../src/ia32/lithium-codegen-ia32.h',
- '../../src/ia32/lithium-gap-resolver-ia32.cc',
- '../../src/ia32/lithium-gap-resolver-ia32.h',
- '../../src/ia32/lithium-ia32.cc',
- '../../src/ia32/lithium-ia32.h',
- '../../src/ia32/macro-assembler-ia32.cc',
- '../../src/ia32/macro-assembler-ia32.h',
- '../../src/ia32/regexp-macro-assembler-ia32.cc',
- '../../src/ia32/regexp-macro-assembler-ia32.h',
- '../../src/ia32/register-allocator-ia32.cc',
- '../../src/ia32/stub-cache-ia32.cc',
- '../../src/ia32/virtual-frame-ia32.cc',
- '../../src/ia32/virtual-frame-ia32.h',
- ],
- }],
- ['v8_target_arch=="x64" or v8_target_arch=="mac" or OS=="mac"', {
- 'include_dirs+': [
- '../../src/x64',
- ],
- 'sources': [
- '../../src/jump-target-heavy.h',
- '../../src/jump-target-heavy-inl.h',
- '../../src/jump-target-heavy.cc',
- '../../src/virtual-frame-heavy-inl.h',
- '../../src/virtual-frame-heavy.cc',
- '../../src/x64/assembler-x64-inl.h',
- '../../src/x64/assembler-x64.cc',
- '../../src/x64/assembler-x64.h',
- '../../src/x64/builtins-x64.cc',
- '../../src/x64/code-stubs-x64.cc',
- '../../src/x64/code-stubs-x64.h',
- '../../src/x64/codegen-x64.cc',
- '../../src/x64/codegen-x64.h',
- '../../src/x64/cpu-x64.cc',
- '../../src/x64/debug-x64.cc',
- '../../src/x64/deoptimizer-x64.cc',
- '../../src/x64/disasm-x64.cc',
- '../../src/x64/frames-x64.cc',
- '../../src/x64/frames-x64.h',
- '../../src/x64/full-codegen-x64.cc',
- '../../src/x64/ic-x64.cc',
- '../../src/x64/jump-target-x64.cc',
- '../../src/x64/lithium-codegen-x64.cc',
- '../../src/x64/lithium-codegen-x64.h',
- '../../src/x64/lithium-gap-resolver-x64.cc',
- '../../src/x64/lithium-gap-resolver-x64.h',
- '../../src/x64/lithium-x64.cc',
- '../../src/x64/lithium-x64.h',
- '../../src/x64/macro-assembler-x64.cc',
- '../../src/x64/macro-assembler-x64.h',
- '../../src/x64/regexp-macro-assembler-x64.cc',
- '../../src/x64/regexp-macro-assembler-x64.h',
- '../../src/x64/register-allocator-x64.cc',
- '../../src/x64/stub-cache-x64.cc',
- '../../src/x64/virtual-frame-x64.cc',
- '../../src/x64/virtual-frame-x64.h',
- ],
- }],
- ['OS=="linux"', {
- 'link_settings': {
- 'libraries': [
- # Needed for clock_gettime() used by src/platform-linux.cc.
- '-lrt',
- ]},
- 'sources': [
- '../../src/platform-linux.cc',
- '../../src/platform-posix.cc'
- ],
- }
- ],
- ['OS=="freebsd"', {
- 'link_settings': {
- 'libraries': [
- '-L/usr/local/lib -lexecinfo',
- ]},
- 'sources': [
- '../../src/platform-freebsd.cc',
- '../../src/platform-posix.cc'
- ],
- }
- ],
- ['OS=="openbsd"', {
- 'link_settings': {
- 'libraries': [
- '-L/usr/local/lib -lexecinfo',
- ]},
- 'sources': [
- '../../src/platform-openbsd.cc',
- '../../src/platform-posix.cc'
- ],
- }
- ],
- ['OS=="mac"', {
- 'sources': [
- '../../src/platform-macos.cc',
- '../../src/platform-posix.cc'
- ]},
- ],
- ['OS=="win"', {
- 'sources': [
- '../../src/platform-win32.cc',
- ],
- 'msvs_disabled_warnings': [4351, 4355, 4800],
- 'link_settings': {
- 'libraries': [ '-lwinmm.lib' ],
- },
- }],
- ['OS=="win" and component=="shared_library"', {
- 'defines': [
- 'BUILDING_V8_SHARED'
- ],
- }],
- ],
- },
- {
- 'target_name': 'js2c',
- 'type': 'none',
- 'toolsets': ['host'],
- 'variables': {
- 'library_files': [
- '../../src/runtime.js',
- '../../src/v8natives.js',
- '../../src/array.js',
- '../../src/string.js',
- '../../src/uri.js',
- '../../src/math.js',
- '../../src/messages.js',
- '../../src/apinatives.js',
- '../../src/debug-debugger.js',
- '../../src/mirror-debugger.js',
- '../../src/liveedit-debugger.js',
- '../../src/date.js',
- '../../src/json.js',
- '../../src/regexp.js',
- '../../src/macros.py',
- ],
- },
- 'actions': [
- {
- 'action_name': 'js2c',
- 'inputs': [
- '../../tools/js2c.py',
- '<@(library_files)',
- ],
- 'outputs': [
- '<(SHARED_INTERMEDIATE_DIR)/libraries.cc',
- '<(SHARED_INTERMEDIATE_DIR)/libraries-empty.cc',
- ],
- 'action': [
- 'python',
- '../../tools/js2c.py',
- '<@(_outputs)',
- 'CORE',
- '<@(library_files)'
- ],
- },
- ],
- },
- {
- 'target_name': 'mksnapshot',
- 'type': 'executable',
- 'toolsets': ['host'],
- 'dependencies': [
- 'v8_nosnapshot',
- ],
- 'include_dirs+': [
- '../../src',
- ],
- 'sources': [
- '../../src/mksnapshot.cc',
- ],
- 'conditions': [
- # The ARM assembler assumes the host is 32 bits, so force building
- # 32-bit host tools.
- ['v8_target_arch=="arm" and host_arch=="x64" and _toolset=="host"', {
- 'cflags': ['-m32'],
- 'ldflags': ['-m32'],
- }]
- ]
- },
- {
- 'target_name': 'v8_shell',
- 'type': 'executable',
- 'dependencies': [
- 'v8'
- ],
- 'sources': [
- '../../samples/shell.cc',
- ],
- 'conditions': [
- ['OS=="win"', {
- # This could be gotten by not setting chromium_code, if that's OK.
- 'defines': ['_CRT_SECURE_NO_WARNINGS'],
- }],
- ],
- },
- ],
- }, { # use_system_v8 != 0
- 'targets': [
- {
- 'target_name': 'v8',
- 'type': 'settings',
- 'link_settings': {
- 'libraries': [
- '-lv8',
- ],
- },
- },
- {
- 'target_name': 'v8_shell',
- 'type': 'none',
- 'dependencies': [
- 'v8'
- ],
- },
- ],
- }],
- ],
-}
diff --git a/src/3rdparty/v8/tools/js2c.py b/src/3rdparty/v8/tools/js2c.py
deleted file mode 100755
index 2da132f..0000000
--- a/src/3rdparty/v8/tools/js2c.py
+++ /dev/null
@@ -1,380 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2006-2008 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-# This is a utility for converting JavaScript source code into C-style
-# char arrays. It is used for embedded JavaScript code in the V8
-# library.
-
-import os, re, sys, string
-import jsmin
-
-
-def ToCArray(lines):
- result = []
- for chr in lines:
- value = ord(chr)
- assert value < 128
- result.append(str(value))
- result.append("0")
- return ", ".join(result)
-
-
-def RemoveCommentsAndTrailingWhitespace(lines):
- lines = re.sub(r'//.*\n', '\n', lines) # end-of-line comments
- lines = re.sub(re.compile(r'/\*.*?\*/', re.DOTALL), '', lines) # comments.
- lines = re.sub(r'\s+\n+', '\n', lines) # trailing whitespace
- return lines
-
-
-def ReadFile(filename):
- file = open(filename, "rt")
- try:
- lines = file.read()
- finally:
- file.close()
- return lines
-
-
-def ReadLines(filename):
- result = []
- for line in open(filename, "rt"):
- if '#' in line:
- line = line[:line.index('#')]
- line = line.strip()
- if len(line) > 0:
- result.append(line)
- return result
-
-
-def LoadConfigFrom(name):
- import ConfigParser
- config = ConfigParser.ConfigParser()
- config.read(name)
- return config
-
-
-def ParseValue(string):
- string = string.strip()
- if string.startswith('[') and string.endswith(']'):
- return string.lstrip('[').rstrip(']').split()
- else:
- return string
-
-
-EVAL_PATTERN = re.compile(r'\beval\s*\(');
-WITH_PATTERN = re.compile(r'\bwith\s*\(');
-
-
-def Validate(lines, file):
- lines = RemoveCommentsAndTrailingWhitespace(lines)
- # Because of simplified context setup, eval and with is not
- # allowed in the natives files.
- eval_match = EVAL_PATTERN.search(lines)
- if eval_match:
- raise ("Eval disallowed in natives: %s" % file)
- with_match = WITH_PATTERN.search(lines)
- if with_match:
- raise ("With statements disallowed in natives: %s" % file)
-
-
-def ExpandConstants(lines, constants):
- for key, value in constants:
- lines = key.sub(str(value), lines)
- return lines
-
-
-def ExpandMacros(lines, macros):
- # We allow macros to depend on the previously declared macros, but
- # we don't allow self-dependecies or recursion.
- for name_pattern, macro in reversed(macros):
- pattern_match = name_pattern.search(lines, 0)
- while pattern_match is not None:
- # Scan over the arguments
- height = 1
- start = pattern_match.start()
- end = pattern_match.end()
- assert lines[end - 1] == '('
- last_match = end
- arg_index = 0
- mapping = { }
- def add_arg(str):
- # Remember to expand recursively in the arguments
- replacement = ExpandMacros(str.strip(), macros)
- mapping[macro.args[arg_index]] = replacement
- while end < len(lines) and height > 0:
- # We don't count commas at higher nesting levels.
- if lines[end] == ',' and height == 1:
- add_arg(lines[last_match:end])
- last_match = end + 1
- elif lines[end] in ['(', '{', '[']:
- height = height + 1
- elif lines[end] in [')', '}', ']']:
- height = height - 1
- end = end + 1
- # Remember to add the last match.
- add_arg(lines[last_match:end-1])
- result = macro.expand(mapping)
- # Replace the occurrence of the macro with the expansion
- lines = lines[:start] + result + lines[end:]
- pattern_match = name_pattern.search(lines, start + len(result))
- return lines
-
-class TextMacro:
- def __init__(self, args, body):
- self.args = args
- self.body = body
- def expand(self, mapping):
- result = self.body
- for key, value in mapping.items():
- result = result.replace(key, value)
- return result
-
-class PythonMacro:
- def __init__(self, args, fun):
- self.args = args
- self.fun = fun
- def expand(self, mapping):
- args = []
- for arg in self.args:
- args.append(mapping[arg])
- return str(self.fun(*args))
-
-CONST_PATTERN = re.compile(r'^const\s+([a-zA-Z0-9_]+)\s*=\s*([^;]*);$')
-MACRO_PATTERN = re.compile(r'^macro\s+([a-zA-Z0-9_]+)\s*\(([^)]*)\)\s*=\s*([^;]*);$')
-PYTHON_MACRO_PATTERN = re.compile(r'^python\s+macro\s+([a-zA-Z0-9_]+)\s*\(([^)]*)\)\s*=\s*([^;]*);$')
-
-
-def ReadMacros(lines):
- constants = []
- macros = []
- for line in lines:
- hash = line.find('#')
- if hash != -1: line = line[:hash]
- line = line.strip()
- if len(line) is 0: continue
- const_match = CONST_PATTERN.match(line)
- if const_match:
- name = const_match.group(1)
- value = const_match.group(2).strip()
- constants.append((re.compile("\\b%s\\b" % name), value))
- else:
- macro_match = MACRO_PATTERN.match(line)
- if macro_match:
- name = macro_match.group(1)
- args = map(string.strip, macro_match.group(2).split(','))
- body = macro_match.group(3).strip()
- macros.append((re.compile("\\b%s\\(" % name), TextMacro(args, body)))
- else:
- python_match = PYTHON_MACRO_PATTERN.match(line)
- if python_match:
- name = python_match.group(1)
- args = map(string.strip, python_match.group(2).split(','))
- body = python_match.group(3).strip()
- fun = eval("lambda " + ",".join(args) + ': ' + body)
- macros.append((re.compile("\\b%s\\(" % name), PythonMacro(args, fun)))
- else:
- raise ("Illegal line: " + line)
- return (constants, macros)
-
-
-HEADER_TEMPLATE = """\
-// Copyright 2008 Google Inc. All Rights Reserved.
-
-// This file was generated from .js source files by SCons. If you
-// want to make changes to this file you should either change the
-// javascript source files or the SConstruct script.
-
-#include "v8.h"
-#include "natives.h"
-
-namespace v8 {
-namespace internal {
-
-%(source_lines)s\
-
- template <>
- int NativesCollection<%(type)s>::GetBuiltinsCount() {
- return %(builtin_count)i;
- }
-
- template <>
- int NativesCollection<%(type)s>::GetDebuggerCount() {
- return %(debugger_count)i;
- }
-
- template <>
- int NativesCollection<%(type)s>::GetIndex(const char* name) {
-%(get_index_cases)s\
- return -1;
- }
-
- template <>
- Vector<const char> NativesCollection<%(type)s>::GetScriptSource(int index) {
-%(get_script_source_cases)s\
- return Vector<const char>("", 0);
- }
-
- template <>
- Vector<const char> NativesCollection<%(type)s>::GetScriptName(int index) {
-%(get_script_name_cases)s\
- return Vector<const char>("", 0);
- }
-
-} // internal
-} // v8
-"""
-
-
-SOURCE_DECLARATION = """\
- static const char %(id)s[] = { %(data)s };
-"""
-
-
-GET_DEBUGGER_INDEX_CASE = """\
- if (strcmp(name, "%(id)s") == 0) return %(i)i;
-"""
-
-
-GET_DEBUGGER_SCRIPT_SOURCE_CASE = """\
- if (index == %(i)i) return Vector<const char>(%(id)s, %(length)i);
-"""
-
-
-GET_DEBUGGER_SCRIPT_NAME_CASE = """\
- if (index == %(i)i) return Vector<const char>("%(name)s", %(length)i);
-"""
-
-def JS2C(source, target, env):
- ids = []
- debugger_ids = []
- modules = []
- # Locate the macros file name.
- consts = []
- macros = []
- for s in source:
- if 'macros.py' == (os.path.split(str(s))[1]):
- (consts, macros) = ReadMacros(ReadLines(str(s)))
- else:
- modules.append(s)
-
- # Build source code lines
- source_lines = [ ]
-
- minifier = jsmin.JavaScriptMinifier()
-
- source_lines_empty = []
- for module in modules:
- filename = str(module)
- debugger = filename.endswith('-debugger.js')
- lines = ReadFile(filename)
- lines = ExpandConstants(lines, consts)
- lines = ExpandMacros(lines, macros)
- Validate(lines, filename)
- lines = minifier.JSMinify(lines)
- data = ToCArray(lines)
- id = (os.path.split(filename)[1])[:-3]
- if debugger: id = id[:-9]
- if debugger:
- debugger_ids.append((id, len(lines)))
- else:
- ids.append((id, len(lines)))
- source_lines.append(SOURCE_DECLARATION % { 'id': id, 'data': data })
- source_lines_empty.append(SOURCE_DECLARATION % { 'id': id, 'data': data })
-
- # Build debugger support functions
- get_index_cases = [ ]
- get_script_source_cases = [ ]
- get_script_name_cases = [ ]
-
- i = 0
- for (id, length) in debugger_ids:
- native_name = "native %s.js" % id
- get_index_cases.append(GET_DEBUGGER_INDEX_CASE % { 'id': id, 'i': i })
- get_script_source_cases.append(GET_DEBUGGER_SCRIPT_SOURCE_CASE % {
- 'id': id,
- 'length': length,
- 'i': i
- })
- get_script_name_cases.append(GET_DEBUGGER_SCRIPT_NAME_CASE % {
- 'name': native_name,
- 'length': len(native_name),
- 'i': i
- });
- i = i + 1
-
- for (id, length) in ids:
- native_name = "native %s.js" % id
- get_index_cases.append(GET_DEBUGGER_INDEX_CASE % { 'id': id, 'i': i })
- get_script_source_cases.append(GET_DEBUGGER_SCRIPT_SOURCE_CASE % {
- 'id': id,
- 'length': length,
- 'i': i
- })
- get_script_name_cases.append(GET_DEBUGGER_SCRIPT_NAME_CASE % {
- 'name': native_name,
- 'length': len(native_name),
- 'i': i
- });
- i = i + 1
-
- # Emit result
- output = open(str(target[0]), "w")
- output.write(HEADER_TEMPLATE % {
- 'builtin_count': len(ids) + len(debugger_ids),
- 'debugger_count': len(debugger_ids),
- 'source_lines': "\n".join(source_lines),
- 'get_index_cases': "".join(get_index_cases),
- 'get_script_source_cases': "".join(get_script_source_cases),
- 'get_script_name_cases': "".join(get_script_name_cases),
- 'type': env['TYPE']
- })
- output.close()
-
- if len(target) > 1:
- output = open(str(target[1]), "w")
- output.write(HEADER_TEMPLATE % {
- 'builtin_count': len(ids) + len(debugger_ids),
- 'debugger_count': len(debugger_ids),
- 'source_lines': "\n".join(source_lines_empty),
- 'get_index_cases': "".join(get_index_cases),
- 'get_script_source_cases': "".join(get_script_source_cases),
- 'get_script_name_cases': "".join(get_script_name_cases),
- 'type': env['TYPE']
- })
- output.close()
-
-def main():
- natives = sys.argv[1]
- natives_empty = sys.argv[2]
- type = sys.argv[3]
- source_files = sys.argv[4:]
- JS2C(source_files, [natives, natives_empty], { 'TYPE': type })
-
-if __name__ == "__main__":
- main()
diff --git a/src/3rdparty/v8/tools/jsmin.py b/src/3rdparty/v8/tools/jsmin.py
deleted file mode 100644
index 646bf14..0000000
--- a/src/3rdparty/v8/tools/jsmin.py
+++ /dev/null
@@ -1,280 +0,0 @@
-#!/usr/bin/python2.4
-
-# Copyright 2009 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""A JavaScript minifier.
-
-It is far from being a complete JS parser, so there are many valid
-JavaScript programs that will be ruined by it. Another strangeness is that
-it accepts $ and % as parts of identifiers. It doesn't merge lines or strip
-out blank lines in order to ease debugging. Variables at the top scope are
-properties of the global object so we can't rename them. It is assumed that
-you introduce variables with var as if JavaScript followed C++ scope rules
-around curly braces, so the declaration must be above the first use.
-
-Use as:
-import jsmin
-minifier = JavaScriptMinifier()
-program1 = minifier.JSMinify(program1)
-program2 = minifier.JSMinify(program2)
-"""
-
-import re
-
-
-class JavaScriptMinifier(object):
- """An object that you can feed code snippets to to get them minified."""
-
- def __init__(self):
- # We prepopulate the list of identifiers that shouldn't be used. These
- # short language keywords could otherwise be used by the script as variable
- # names.
- self.seen_identifiers = {"do": True, "in": True}
- self.identifier_counter = 0
- self.in_comment = False
- self.map = {}
- self.nesting = 0
-
- def LookAtIdentifier(self, m):
- """Records identifiers or keywords that we see in use.
-
- (So we can avoid renaming variables to these strings.)
- Args:
- m: The match object returned by re.search.
-
- Returns:
- Nothing.
- """
- identifier = m.group(1)
- self.seen_identifiers[identifier] = True
-
- def Push(self):
- """Called when we encounter a '{'."""
- self.nesting += 1
-
- def Pop(self):
- """Called when we encounter a '}'."""
- self.nesting -= 1
- # We treat each top-level opening brace as a single scope that can span
- # several sets of nested braces.
- if self.nesting == 0:
- self.map = {}
- self.identifier_counter = 0
-
- def Declaration(self, m):
- """Rewrites bits of the program selected by a regexp.
-
- These can be curly braces, literal strings, function declarations and var
- declarations. (These last two must be on one line including the opening
- curly brace of the function for their variables to be renamed).
-
- Args:
- m: The match object returned by re.search.
-
- Returns:
- The string that should replace the match in the rewritten program.
- """
- matched_text = m.group(0)
- if matched_text == "{":
- self.Push()
- return matched_text
- if matched_text == "}":
- self.Pop()
- return matched_text
- if re.match("[\"'/]", matched_text):
- return matched_text
- m = re.match(r"var ", matched_text)
- if m:
- var_names = matched_text[m.end():]
- var_names = re.split(r",", var_names)
- return "var " + ",".join(map(self.FindNewName, var_names))
- m = re.match(r"(function\b[^(]*)\((.*)\)\{$", matched_text)
- if m:
- up_to_args = m.group(1)
- args = m.group(2)
- args = re.split(r",", args)
- self.Push()
- return up_to_args + "(" + ",".join(map(self.FindNewName, args)) + "){"
-
- if matched_text in self.map:
- return self.map[matched_text]
-
- return matched_text
-
- def CharFromNumber(self, number):
- """A single-digit base-52 encoding using a-zA-Z."""
- if number < 26:
- return chr(number + 97)
- number -= 26
- return chr(number + 65)
-
- def FindNewName(self, var_name):
- """Finds a new 1-character or 2-character name for a variable.
-
- Enters it into the mapping table for this scope.
-
- Args:
- var_name: The name of the variable before renaming.
-
- Returns:
- The new name of the variable.
- """
- new_identifier = ""
- # Variable names that end in _ are member variables of the global object,
- # so they can be visible from code in a different scope. We leave them
- # alone.
- if var_name in self.map:
- return self.map[var_name]
- if self.nesting == 0:
- return var_name
- while True:
- identifier_first_char = self.identifier_counter % 52
- identifier_second_char = self.identifier_counter / 52
- new_identifier = self.CharFromNumber(identifier_first_char)
- if identifier_second_char != 0:
- new_identifier = (
- self.CharFromNumber(identifier_second_char - 1) + new_identifier)
- self.identifier_counter += 1
- if not new_identifier in self.seen_identifiers:
- break
-
- self.map[var_name] = new_identifier
- return new_identifier
-
- def RemoveSpaces(self, m):
- """Returns literal strings unchanged, replaces other inputs with group 2.
-
- Other inputs are replaced with the contents of capture 1. This is either
- a single space or an empty string.
-
- Args:
- m: The match object returned by re.search.
-
- Returns:
- The string that should be inserted instead of the matched text.
- """
- entire_match = m.group(0)
- replacement = m.group(1)
- if re.match(r"'.*'$", entire_match):
- return entire_match
- if re.match(r'".*"$', entire_match):
- return entire_match
- if re.match(r"/.+/$", entire_match):
- return entire_match
- return replacement
-
- def JSMinify(self, text):
- """The main entry point. Takes a text and returns a compressed version.
-
- The compressed version hopefully does the same thing. Line breaks are
- preserved.
-
- Args:
- text: The text of the code snippet as a multiline string.
-
- Returns:
- The compressed text of the code snippet as a multiline string.
- """
- new_lines = []
- for line in re.split(r"\n", text):
- line = line.replace("\t", " ")
- if self.in_comment:
- m = re.search(r"\*/", line)
- if m:
- line = line[m.end():]
- self.in_comment = False
- else:
- new_lines.append("")
- continue
-
- if not self.in_comment:
- line = re.sub(r"/\*.*?\*/", " ", line)
- line = re.sub(r"//.*", "", line)
- m = re.search(r"/\*", line)
- if m:
- line = line[:m.start()]
- self.in_comment = True
-
- # Strip leading and trailing spaces.
- line = re.sub(r"^ +", "", line)
- line = re.sub(r" +$", "", line)
- # A regexp that matches a literal string surrounded by "double quotes".
- # This regexp can handle embedded backslash-escaped characters including
- # embedded backslash-escaped double quotes.
- double_quoted_string = r'"(?:[^"\\]|\\.)*"'
- # A regexp that matches a literal string surrounded by 'double quotes'.
- single_quoted_string = r"'(?:[^'\\]|\\.)*'"
- # A regexp that matches a regexp literal surrounded by /slashes/.
- # Don't allow a regexp to have a ) before the first ( since that's a
- # syntax error and it's probably just two unrelated slashes.
- slash_quoted_regexp = r"/(?:(?=\()|(?:[^()/\\]|\\.)+)(?:\([^/\\]|\\.)*/"
- # Replace multiple spaces with a single space.
- line = re.sub("|".join([double_quoted_string,
- single_quoted_string,
- slash_quoted_regexp,
- "( )+"]),
- self.RemoveSpaces,
- line)
- # Strip single spaces unless they have an identifier character both before
- # and after the space. % and $ are counted as identifier characters.
- line = re.sub("|".join([double_quoted_string,
- single_quoted_string,
- slash_quoted_regexp,
- r"(?<![a-zA-Z_0-9$%]) | (?![a-zA-Z_0-9$%])()"]),
- self.RemoveSpaces,
- line)
- # Collect keywords and identifiers that are already in use.
- if self.nesting == 0:
- re.sub(r"([a-zA-Z0-9_$%]+)", self.LookAtIdentifier, line)
- function_declaration_regexp = (
- r"\bfunction" # Function definition keyword...
- r"( [\w$%]+)?" # ...optional function name...
- r"\([\w$%,]+\)\{") # ...argument declarations.
- # Unfortunately the keyword-value syntax { key:value } makes the key look
- # like a variable where in fact it is a literal string. We use the
- # presence or absence of a question mark to try to distinguish between
- # this case and the ternary operator: "condition ? iftrue : iffalse".
- if re.search(r"\?", line):
- block_trailing_colon = r""
- else:
- block_trailing_colon = r"(?![:\w$%])"
- # Variable use. Cannot follow a period precede a colon.
- variable_use_regexp = r"(?<![.\w$%])[\w$%]+" + block_trailing_colon
- line = re.sub("|".join([double_quoted_string,
- single_quoted_string,
- slash_quoted_regexp,
- r"\{", # Curly braces.
- r"\}",
- r"\bvar [\w$%,]+", # var declarations.
- function_declaration_regexp,
- variable_use_regexp]),
- self.Declaration,
- line)
- new_lines.append(line)
-
- return "\n".join(new_lines) + "\n"
diff --git a/src/3rdparty/v8/tools/linux-tick-processor b/src/3rdparty/v8/tools/linux-tick-processor
deleted file mode 100755
index 9789697..0000000
--- a/src/3rdparty/v8/tools/linux-tick-processor
+++ /dev/null
@@ -1,35 +0,0 @@
-#!/bin/sh
-
-tools_path=`cd $(dirname "$0");pwd`
-if [ ! "$D8_PATH" ]; then
- d8_public=`which d8`
- if [ -x $d8_public ]; then D8_PATH=$(dirname "$d8_public"); fi
-fi
-[ "$D8_PATH" ] || D8_PATH=$tools_path/..
-d8_exec=$D8_PATH/d8
-
-if [ "$1" = "--no-build" ]; then
- shift
-else
-# compile d8 if it doesn't exist, assuming this script
-# resides in the repository.
- [ -x $d8_exec ] || scons -j4 -C $D8_PATH -Y $tools_path/.. d8
-fi
-
-
-# find the name of the log file to process, it must not start with a dash.
-log_file="v8.log"
-for arg in "$@"
-do
- if ! expr "X${arg}" : "^X-" > /dev/null; then
- log_file=${arg}
- fi
-done
-
-
-# nm spits out 'no symbols found' messages to stderr.
-cat $log_file | $d8_exec $tools_path/splaytree.js $tools_path/codemap.js \
- $tools_path/csvparser.js $tools_path/consarray.js \
- $tools_path/profile.js $tools_path/profile_view.js \
- $tools_path/logreader.js $tools_path/tickprocessor.js \
- $tools_path/tickprocessor-driver.js -- $@ 2>/dev/null
diff --git a/src/3rdparty/v8/tools/ll_prof.py b/src/3rdparty/v8/tools/ll_prof.py
deleted file mode 100755
index 7f12c13..0000000
--- a/src/3rdparty/v8/tools/ll_prof.py
+++ /dev/null
@@ -1,919 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2010 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-import bisect
-import collections
-import ctypes
-import disasm
-import mmap
-import optparse
-import os
-import re
-import subprocess
-import sys
-import time
-
-
-USAGE="""usage: %prog [OPTION]...
-
-Analyses V8 and perf logs to produce profiles.
-
-Perf logs can be collected using a command like:
- $ perf record -R -e cycles -c 10000 -f -i ./shell bench.js --ll-prof
- # -R: collect all data
- # -e cycles: use cpu-cycles event (run "perf list" for details)
- # -c 10000: write a sample after each 10000 events
- # -f: force output file overwrite
- # -i: limit profiling to our process and the kernel
- # --ll-prof shell flag enables the right V8 logs
-This will produce a binary trace file (perf.data) that %prog can analyse.
-
-Examples:
- # Print flat profile with annotated disassembly for the 10 top
- # symbols. Use default log names and include the snapshot log.
- $ %prog --snapshot --disasm-top=10
-
- # Print flat profile with annotated disassembly for all used symbols.
- # Use default log names and include kernel symbols into analysis.
- $ %prog --disasm-all --kernel
-
- # Print flat profile. Use custom log names.
- $ %prog --log=foo.log --snapshot-log=snap-foo.log --trace=foo.data --snapshot
-"""
-
-
-# Must match kGcFakeMmap.
-V8_GC_FAKE_MMAP = "/tmp/__v8_gc__"
-
-JS_ORIGIN = "js"
-JS_SNAPSHOT_ORIGIN = "js-snapshot"
-
-OBJDUMP_BIN = disasm.OBJDUMP_BIN
-
-
-class Code(object):
- """Code object."""
-
- _id = 0
-
- def __init__(self, name, start_address, end_address, origin, origin_offset):
- self.id = Code._id
- Code._id += 1
- self.name = name
- self.other_names = None
- self.start_address = start_address
- self.end_address = end_address
- self.origin = origin
- self.origin_offset = origin_offset
- self.self_ticks = 0
- self.self_ticks_map = None
- self.callee_ticks = None
-
- def AddName(self, name):
- assert self.name != name
- if self.other_names is None:
- self.other_names = [name]
- return
- if not name in self.other_names:
- self.other_names.append(name)
-
- def FullName(self):
- if self.other_names is None:
- return self.name
- self.other_names.sort()
- return "%s (aka %s)" % (self.name, ", ".join(self.other_names))
-
- def IsUsed(self):
- return self.self_ticks > 0 or self.callee_ticks is not None
-
- def Tick(self, pc):
- self.self_ticks += 1
- if self.self_ticks_map is None:
- self.self_ticks_map = collections.defaultdict(lambda: 0)
- offset = pc - self.start_address
- self.self_ticks_map[offset] += 1
-
- def CalleeTick(self, callee):
- if self.callee_ticks is None:
- self.callee_ticks = collections.defaultdict(lambda: 0)
- self.callee_ticks[callee] += 1
-
- def PrintAnnotated(self, code_info, options):
- if self.self_ticks_map is None:
- ticks_map = []
- else:
- ticks_map = self.self_ticks_map.items()
- # Convert the ticks map to offsets and counts arrays so that later
- # we can do binary search in the offsets array.
- ticks_map.sort(key=lambda t: t[0])
- ticks_offsets = [t[0] for t in ticks_map]
- ticks_counts = [t[1] for t in ticks_map]
- # Get a list of disassembled lines and their addresses.
- lines = self._GetDisasmLines(code_info, options)
- if len(lines) == 0:
- return
- # Print annotated lines.
- address = lines[0][0]
- total_count = 0
- for i in xrange(len(lines)):
- start_offset = lines[i][0] - address
- if i == len(lines) - 1:
- end_offset = self.end_address - self.start_address
- else:
- end_offset = lines[i + 1][0] - address
- # Ticks (reported pc values) are not always precise, i.e. not
- # necessarily point at instruction starts. So we have to search
- # for ticks that touch the current instruction line.
- j = bisect.bisect_left(ticks_offsets, end_offset)
- count = 0
- for offset, cnt in reversed(zip(ticks_offsets[:j], ticks_counts[:j])):
- if offset < start_offset:
- break
- count += cnt
- total_count += count
- count = 100.0 * count / self.self_ticks
- if count >= 0.01:
- print "%15.2f %x: %s" % (count, lines[i][0], lines[i][1])
- else:
- print "%s %x: %s" % (" " * 15, lines[i][0], lines[i][1])
- print
- assert total_count == self.self_ticks, \
- "Lost ticks (%d != %d) in %s" % (total_count, self.self_ticks, self)
-
- def __str__(self):
- return "%s [0x%x, 0x%x) size: %d origin: %s" % (
- self.name,
- self.start_address,
- self.end_address,
- self.end_address - self.start_address,
- self.origin)
-
- def _GetDisasmLines(self, code_info, options):
- if self.origin == JS_ORIGIN or self.origin == JS_SNAPSHOT_ORIGIN:
- inplace = False
- filename = options.log + ".code"
- else:
- inplace = True
- filename = self.origin
- return disasm.GetDisasmLines(filename,
- self.origin_offset,
- self.end_address - self.start_address,
- code_info.arch,
- inplace)
-
-
-class CodePage(object):
- """Group of adjacent code objects."""
-
- SHIFT = 12 # 4K pages
- SIZE = (1 << SHIFT)
- MASK = ~(SIZE - 1)
-
- @staticmethod
- def PageAddress(address):
- return address & CodePage.MASK
-
- @staticmethod
- def PageId(address):
- return address >> CodePage.SHIFT
-
- @staticmethod
- def PageAddressFromId(id):
- return id << CodePage.SHIFT
-
- def __init__(self, address):
- self.address = address
- self.code_objects = []
-
- def Add(self, code):
- self.code_objects.append(code)
-
- def Remove(self, code):
- self.code_objects.remove(code)
-
- def Find(self, pc):
- code_objects = self.code_objects
- for i, code in enumerate(code_objects):
- if code.start_address <= pc < code.end_address:
- code_objects[0], code_objects[i] = code, code_objects[0]
- return code
- return None
-
- def __iter__(self):
- return self.code_objects.__iter__()
-
-
-class CodeMap(object):
- """Code object map."""
-
- def __init__(self):
- self.pages = {}
- self.min_address = 1 << 64
- self.max_address = -1
-
- def Add(self, code, max_pages=-1):
- page_id = CodePage.PageId(code.start_address)
- limit_id = CodePage.PageId(code.end_address + CodePage.SIZE - 1)
- pages = 0
- while page_id < limit_id:
- if max_pages >= 0 and pages > max_pages:
- print >>sys.stderr, \
- "Warning: page limit (%d) reached for %s [%s]" % (
- max_pages, code.name, code.origin)
- break
- if page_id in self.pages:
- page = self.pages[page_id]
- else:
- page = CodePage(CodePage.PageAddressFromId(page_id))
- self.pages[page_id] = page
- page.Add(code)
- page_id += 1
- pages += 1
- self.min_address = min(self.min_address, code.start_address)
- self.max_address = max(self.max_address, code.end_address)
-
- def Remove(self, code):
- page_id = CodePage.PageId(code.start_address)
- limit_id = CodePage.PageId(code.end_address + CodePage.SIZE - 1)
- removed = False
- while page_id < limit_id:
- if page_id not in self.pages:
- page_id += 1
- continue
- page = self.pages[page_id]
- page.Remove(code)
- removed = True
- page_id += 1
- return removed
-
- def AllCode(self):
- for page in self.pages.itervalues():
- for code in page:
- if CodePage.PageAddress(code.start_address) == page.address:
- yield code
-
- def UsedCode(self):
- for code in self.AllCode():
- if code.IsUsed():
- yield code
-
- def Print(self):
- for code in self.AllCode():
- print code
-
- def Find(self, pc):
- if pc < self.min_address or pc >= self.max_address:
- return None
- page_id = CodePage.PageId(pc)
- if page_id not in self.pages:
- return None
- return self.pages[page_id].Find(pc)
-
-
-class CodeInfo(object):
- """Generic info about generated code objects."""
-
- def __init__(self, arch, header_size):
- self.arch = arch
- self.header_size = header_size
-
-
-class CodeLogReader(object):
- """V8 code event log reader."""
-
- _CODE_INFO_RE = re.compile(
- r"code-info,([^,]+),(\d+)")
-
- _CODE_CREATE_RE = re.compile(
- r"code-creation,([^,]+),(0x[a-f0-9]+),(\d+),\"(.*)\"(?:,(0x[a-f0-9]+),([~*])?)?(?:,(\d+))?")
-
- _CODE_MOVE_RE = re.compile(
- r"code-move,(0x[a-f0-9]+),(0x[a-f0-9]+)")
-
- _CODE_DELETE_RE = re.compile(
- r"code-delete,(0x[a-f0-9]+)")
-
- _SNAPSHOT_POS_RE = re.compile(
- r"snapshot-pos,(0x[a-f0-9]+),(\d+)")
-
- _CODE_MOVING_GC = "code-moving-gc"
-
- def __init__(self, log_name, code_map, is_snapshot, snapshot_pos_to_name):
- self.log = open(log_name, "r")
- self.code_map = code_map
- self.is_snapshot = is_snapshot
- self.snapshot_pos_to_name = snapshot_pos_to_name
- self.address_to_snapshot_name = {}
-
- def ReadCodeInfo(self):
- line = self.log.readline() or ""
- match = CodeLogReader._CODE_INFO_RE.match(line)
- assert match, "No code info in log"
- return CodeInfo(arch=match.group(1), header_size=int(match.group(2)))
-
- def ReadUpToGC(self, code_info):
- made_progress = False
- code_header_size = code_info.header_size
- while True:
- line = self.log.readline()
- if not line:
- return made_progress
- made_progress = True
-
- if line.startswith(CodeLogReader._CODE_MOVING_GC):
- self.address_to_snapshot_name.clear()
- return made_progress
-
- match = CodeLogReader._CODE_CREATE_RE.match(line)
- if match:
- start_address = int(match.group(2), 16) + code_header_size
- end_address = start_address + int(match.group(3)) - code_header_size
- if start_address in self.address_to_snapshot_name:
- name = self.address_to_snapshot_name[start_address]
- origin = JS_SNAPSHOT_ORIGIN
- else:
- tag = match.group(1)
- optimization_status = match.group(6)
- func_name = match.group(4)
- if optimization_status:
- name = "%s:%s%s" % (tag, optimization_status, func_name)
- else:
- name = "%s:%s" % (tag, func_name)
- origin = JS_ORIGIN
- if self.is_snapshot:
- origin_offset = 0
- else:
- origin_offset = int(match.group(7))
- code = Code(name, start_address, end_address, origin, origin_offset)
- conficting_code = self.code_map.Find(start_address)
- if conficting_code:
- CodeLogReader._HandleCodeConflict(conficting_code, code)
- # TODO(vitalyr): this warning is too noisy because of our
- # attempts to reconstruct code log from the snapshot.
- # print >>sys.stderr, \
- # "Warning: Skipping duplicate code log entry %s" % code
- continue
- self.code_map.Add(code)
- continue
-
- match = CodeLogReader._CODE_MOVE_RE.match(line)
- if match:
- old_start_address = int(match.group(1), 16) + code_header_size
- new_start_address = int(match.group(2), 16) + code_header_size
- if old_start_address == new_start_address:
- # Skip useless code move entries.
- continue
- code = self.code_map.Find(old_start_address)
- if not code:
- print >>sys.stderr, "Warning: Not found %x" % old_start_address
- continue
- assert code.start_address == old_start_address, \
- "Inexact move address %x for %s" % (old_start_address, code)
- self.code_map.Remove(code)
- size = code.end_address - code.start_address
- code.start_address = new_start_address
- code.end_address = new_start_address + size
- self.code_map.Add(code)
- continue
-
- match = CodeLogReader._CODE_DELETE_RE.match(line)
- if match:
- old_start_address = int(match.group(1), 16) + code_header_size
- code = self.code_map.Find(old_start_address)
- if not code:
- print >>sys.stderr, "Warning: Not found %x" % old_start_address
- continue
- assert code.start_address == old_start_address, \
- "Inexact delete address %x for %s" % (old_start_address, code)
- self.code_map.Remove(code)
- continue
-
- match = CodeLogReader._SNAPSHOT_POS_RE.match(line)
- if match:
- start_address = int(match.group(1), 16) + code_header_size
- snapshot_pos = int(match.group(2))
- if self.is_snapshot:
- code = self.code_map.Find(start_address)
- if code:
- assert code.start_address == start_address, \
- "Inexact snapshot address %x for %s" % (start_address, code)
- self.snapshot_pos_to_name[snapshot_pos] = code.name
- else:
- if snapshot_pos in self.snapshot_pos_to_name:
- self.address_to_snapshot_name[start_address] = \
- self.snapshot_pos_to_name[snapshot_pos]
-
- def Dispose(self):
- self.log.close()
-
- @staticmethod
- def _HandleCodeConflict(old_code, new_code):
- assert (old_code.start_address == new_code.start_address and
- old_code.end_address == new_code.end_address), \
- "Conficting code log entries %s and %s" % (old_code, new_code)
- CodeLogReader._UpdateNames(old_code, new_code)
-
- @staticmethod
- def _UpdateNames(old_code, new_code):
- if old_code.name == new_code.name:
- return
- # Kludge: there are code objects with custom names that don't
- # match their flags.
- misnamed_code = set(["Builtin:CpuFeatures::Probe"])
- if old_code.name in misnamed_code:
- return
- # Code object may be shared by a few functions. Collect the full
- # set of names.
- old_code.AddName(new_code.name)
-
-
-class Descriptor(object):
- """Descriptor of a structure in the binary trace log."""
-
- CTYPE_MAP = {
- "u16": ctypes.c_uint16,
- "u32": ctypes.c_uint32,
- "u64": ctypes.c_uint64
- }
-
- def __init__(self, fields):
- class TraceItem(ctypes.Structure):
- _fields_ = Descriptor.CtypesFields(fields)
-
- def __str__(self):
- return ", ".join("%s: %s" % (field, self.__getattribute__(field))
- for field, _ in TraceItem._fields_)
-
- self.ctype = TraceItem
-
- def Read(self, trace, offset):
- return self.ctype.from_buffer(trace, offset)
-
- @staticmethod
- def CtypesFields(fields):
- return [(field, Descriptor.CTYPE_MAP[format]) for (field, format) in fields]
-
-
-# Please see http://git.kernel.org/?p=linux/kernel/git/torvalds/linux-2.6.git;a=tree;f=tools/perf
-# for the gory details.
-
-
-TRACE_HEADER_DESC = Descriptor([
- ("magic", "u64"),
- ("size", "u64"),
- ("attr_size", "u64"),
- ("attrs_offset", "u64"),
- ("attrs_size", "u64"),
- ("data_offset", "u64"),
- ("data_size", "u64"),
- ("event_types_offset", "u64"),
- ("event_types_size", "u64")
-])
-
-
-PERF_EVENT_ATTR_DESC = Descriptor([
- ("type", "u32"),
- ("size", "u32"),
- ("config", "u64"),
- ("sample_period_or_freq", "u64"),
- ("sample_type", "u64"),
- ("read_format", "u64"),
- ("flags", "u64"),
- ("wakeup_events_or_watermark", "u32"),
- ("bt_type", "u32"),
- ("bp_addr", "u64"),
- ("bp_len", "u64"),
-])
-
-
-PERF_EVENT_HEADER_DESC = Descriptor([
- ("type", "u32"),
- ("misc", "u16"),
- ("size", "u16")
-])
-
-
-PERF_MMAP_EVENT_BODY_DESC = Descriptor([
- ("pid", "u32"),
- ("tid", "u32"),
- ("addr", "u64"),
- ("len", "u64"),
- ("pgoff", "u64")
-])
-
-
-# perf_event_attr.sample_type bits control the set of
-# perf_sample_event fields.
-PERF_SAMPLE_IP = 1 << 0
-PERF_SAMPLE_TID = 1 << 1
-PERF_SAMPLE_TIME = 1 << 2
-PERF_SAMPLE_ADDR = 1 << 3
-PERF_SAMPLE_READ = 1 << 4
-PERF_SAMPLE_CALLCHAIN = 1 << 5
-PERF_SAMPLE_ID = 1 << 6
-PERF_SAMPLE_CPU = 1 << 7
-PERF_SAMPLE_PERIOD = 1 << 8
-PERF_SAMPLE_STREAM_ID = 1 << 9
-PERF_SAMPLE_RAW = 1 << 10
-
-
-PERF_SAMPLE_EVENT_BODY_FIELDS = [
- ("ip", "u64", PERF_SAMPLE_IP),
- ("pid", "u32", PERF_SAMPLE_TID),
- ("tid", "u32", PERF_SAMPLE_TID),
- ("time", "u64", PERF_SAMPLE_TIME),
- ("addr", "u64", PERF_SAMPLE_ADDR),
- ("id", "u64", PERF_SAMPLE_ID),
- ("stream_id", "u64", PERF_SAMPLE_STREAM_ID),
- ("cpu", "u32", PERF_SAMPLE_CPU),
- ("res", "u32", PERF_SAMPLE_CPU),
- ("period", "u64", PERF_SAMPLE_PERIOD),
- # Don't want to handle read format that comes after the period and
- # before the callchain and has variable size.
- ("nr", "u64", PERF_SAMPLE_CALLCHAIN)
- # Raw data follows the callchain and is ignored.
-]
-
-
-PERF_SAMPLE_EVENT_IP_FORMAT = "u64"
-
-
-PERF_RECORD_MMAP = 1
-PERF_RECORD_SAMPLE = 9
-
-
-class TraceReader(object):
- """Perf (linux-2.6/tools/perf) trace file reader."""
-
- _TRACE_HEADER_MAGIC = 4993446653023372624
-
- def __init__(self, trace_name):
- self.trace_file = open(trace_name, "r")
- self.trace = mmap.mmap(self.trace_file.fileno(), 0, mmap.MAP_PRIVATE)
- self.trace_header = TRACE_HEADER_DESC.Read(self.trace, 0)
- if self.trace_header.magic != TraceReader._TRACE_HEADER_MAGIC:
- print >>sys.stderr, "Warning: unsupported trace header magic"
- self.offset = self.trace_header.data_offset
- self.limit = self.trace_header.data_offset + self.trace_header.data_size
- assert self.limit <= self.trace.size(), \
- "Trace data limit exceeds trace file size"
- self.header_size = ctypes.sizeof(PERF_EVENT_HEADER_DESC.ctype)
- assert self.trace_header.attrs_size != 0, \
- "No perf event attributes found in the trace"
- perf_event_attr = PERF_EVENT_ATTR_DESC.Read(self.trace,
- self.trace_header.attrs_offset)
- self.sample_event_body_desc = self._SampleEventBodyDesc(
- perf_event_attr.sample_type)
- self.callchain_supported = \
- (perf_event_attr.sample_type & PERF_SAMPLE_CALLCHAIN) != 0
- if self.callchain_supported:
- self.ip_struct = Descriptor.CTYPE_MAP[PERF_SAMPLE_EVENT_IP_FORMAT]
- self.ip_size = ctypes.sizeof(self.ip_struct)
-
- def ReadEventHeader(self):
- if self.offset >= self.limit:
- return None, 0
- offset = self.offset
- header = PERF_EVENT_HEADER_DESC.Read(self.trace, self.offset)
- self.offset += header.size
- return header, offset
-
- def ReadMmap(self, header, offset):
- mmap_info = PERF_MMAP_EVENT_BODY_DESC.Read(self.trace,
- offset + self.header_size)
- # Read null-padded filename.
- filename = self.trace[offset + self.header_size + ctypes.sizeof(mmap_info):
- offset + header.size].rstrip(chr(0))
- mmap_info.filename = filename
- return mmap_info
-
- def ReadSample(self, header, offset):
- sample = self.sample_event_body_desc.Read(self.trace,
- offset + self.header_size)
- if not self.callchain_supported:
- return sample
- sample.ips = []
- offset += self.header_size + ctypes.sizeof(sample)
- for _ in xrange(sample.nr):
- sample.ips.append(
- self.ip_struct.from_buffer(self.trace, offset).value)
- offset += self.ip_size
- return sample
-
- def Dispose(self):
- self.trace.close()
- self.trace_file.close()
-
- def _SampleEventBodyDesc(self, sample_type):
- assert (sample_type & PERF_SAMPLE_READ) == 0, \
- "Can't hande read format in samples"
- fields = [(field, format)
- for (field, format, bit) in PERF_SAMPLE_EVENT_BODY_FIELDS
- if (bit & sample_type) != 0]
- return Descriptor(fields)
-
-
-OBJDUMP_SECTION_HEADER_RE = re.compile(
- r"^\s*\d+\s(\.\S+)\s+[a-f0-9]")
-OBJDUMP_SYMBOL_LINE_RE = re.compile(
- r"^([a-f0-9]+)\s(.{7})\s(\S+)\s+([a-f0-9]+)\s+(?:\.hidden\s+)?(.*)$")
-OBJDUMP_DYNAMIC_SYMBOLS_START_RE = re.compile(
- r"^DYNAMIC SYMBOL TABLE")
-KERNEL_ALLSYMS_FILE = "/proc/kallsyms"
-PERF_KERNEL_ALLSYMS_RE = re.compile(
- r".*kallsyms.*")
-KERNEL_ALLSYMS_LINE_RE = re.compile(
- r"^([a-f0-9]+)\s(?:t|T)\s(\S+)$")
-
-
-class LibraryRepo(object):
- def __init__(self):
- self.infos = []
- self.names = set()
- self.ticks = {}
-
- def Load(self, mmap_info, code_map, options):
- # Skip kernel mmaps when requested using the fact that their tid
- # is 0.
- if mmap_info.tid == 0 and not options.kernel:
- return True
- if PERF_KERNEL_ALLSYMS_RE.match(mmap_info.filename):
- return self._LoadKernelSymbols(code_map)
- self.infos.append(mmap_info)
- mmap_info.ticks = 0
- mmap_info.unique_name = self._UniqueMmapName(mmap_info)
- if not os.path.exists(mmap_info.filename):
- return True
- # Request section headers (-h), symbols (-t), and dynamic symbols
- # (-T) from objdump.
- # Unfortunately, section headers span two lines, so we have to
- # keep the just seen section name (from the first line in each
- # section header) in the after_section variable.
- process = subprocess.Popen(
- "%s -h -t -T -C %s" % (OBJDUMP_BIN, mmap_info.filename),
- shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
- pipe = process.stdout
- after_section = None
- code_sections = set()
- reloc_sections = set()
- dynamic = False
- try:
- for line in pipe:
- if after_section:
- if line.find("CODE") != -1:
- code_sections.add(after_section)
- if line.find("RELOC") != -1:
- reloc_sections.add(after_section)
- after_section = None
- continue
-
- match = OBJDUMP_SECTION_HEADER_RE.match(line)
- if match:
- after_section = match.group(1)
- continue
-
- if OBJDUMP_DYNAMIC_SYMBOLS_START_RE.match(line):
- dynamic = True
- continue
-
- match = OBJDUMP_SYMBOL_LINE_RE.match(line)
- if match:
- start_address = int(match.group(1), 16)
- origin_offset = start_address
- flags = match.group(2)
- section = match.group(3)
- if section in code_sections:
- if dynamic or section in reloc_sections:
- start_address += mmap_info.addr
- size = int(match.group(4), 16)
- name = match.group(5)
- origin = mmap_info.filename
- code_map.Add(Code(name, start_address, start_address + size,
- origin, origin_offset))
- finally:
- pipe.close()
- assert process.wait() == 0, "Failed to objdump %s" % mmap_info.filename
-
- def Tick(self, pc):
- for i, mmap_info in enumerate(self.infos):
- if mmap_info.addr <= pc < (mmap_info.addr + mmap_info.len):
- mmap_info.ticks += 1
- self.infos[0], self.infos[i] = mmap_info, self.infos[0]
- return True
- return False
-
- def _UniqueMmapName(self, mmap_info):
- name = mmap_info.filename
- index = 1
- while name in self.names:
- name = "%s-%d" % (mmap_info.filename, index)
- index += 1
- self.names.add(name)
- return name
-
- def _LoadKernelSymbols(self, code_map):
- if not os.path.exists(KERNEL_ALLSYMS_FILE):
- print >>sys.stderr, "Warning: %s not found" % KERNEL_ALLSYMS_FILE
- return False
- kallsyms = open(KERNEL_ALLSYMS_FILE, "r")
- code = None
- for line in kallsyms:
- match = KERNEL_ALLSYMS_LINE_RE.match(line)
- if match:
- start_address = int(match.group(1), 16)
- end_address = start_address
- name = match.group(2)
- if code:
- code.end_address = start_address
- code_map.Add(code, 16)
- code = Code(name, start_address, end_address, "kernel", 0)
- return True
-
-
-def PrintReport(code_map, library_repo, code_info, options):
- print "Ticks per symbol:"
- used_code = [code for code in code_map.UsedCode()]
- used_code.sort(key=lambda x: x.self_ticks, reverse=True)
- for i, code in enumerate(used_code):
- print "%10d %s [%s]" % (code.self_ticks, code.FullName(), code.origin)
- if options.disasm_all or i < options.disasm_top:
- code.PrintAnnotated(code_info, options)
- print
- print "Ticks per library:"
- mmap_infos = [m for m in library_repo.infos]
- mmap_infos.sort(key=lambda m: m.ticks, reverse=True)
- for mmap_info in mmap_infos:
- print "%10d %s" % (mmap_info.ticks, mmap_info.unique_name)
-
-
-def PrintDot(code_map, options):
- print "digraph G {"
- for code in code_map.UsedCode():
- if code.self_ticks < 10:
- continue
- print "n%d [shape=box,label=\"%s\"];" % (code.id, code.name)
- if code.callee_ticks:
- for callee, ticks in code.callee_ticks.iteritems():
- print "n%d -> n%d [label=\"%d\"];" % (code.id, callee.id, ticks)
- print "}"
-
-
-if __name__ == "__main__":
- parser = optparse.OptionParser(USAGE)
- parser.add_option("--snapshot-log",
- default="obj/release/snapshot.log",
- help="V8 snapshot log file name [default: %default]")
- parser.add_option("--log",
- default="v8.log",
- help="V8 log file name [default: %default]")
- parser.add_option("--snapshot",
- default=False,
- action="store_true",
- help="process V8 snapshot log [default: %default]")
- parser.add_option("--trace",
- default="perf.data",
- help="perf trace file name [default: %default]")
- parser.add_option("--kernel",
- default=False,
- action="store_true",
- help="process kernel entries [default: %default]")
- parser.add_option("--disasm-top",
- default=0,
- type="int",
- help=("number of top symbols to disassemble and annotate "
- "[default: %default]"))
- parser.add_option("--disasm-all",
- default=False,
- action="store_true",
- help=("disassemble and annotate all used symbols "
- "[default: %default]"))
- parser.add_option("--dot",
- default=False,
- action="store_true",
- help="produce dot output (WIP) [default: %default]")
- parser.add_option("--quiet", "-q",
- default=False,
- action="store_true",
- help="no auxiliary messages [default: %default]")
- options, args = parser.parse_args()
-
- if not options.quiet:
- if options.snapshot:
- print "V8 logs: %s, %s, %s.code" % (options.snapshot_log,
- options.log,
- options.log)
- else:
- print "V8 log: %s, %s.code (no snapshot)" % (options.log, options.log)
- print "Perf trace file: %s" % options.trace
-
- # Stats.
- events = 0
- ticks = 0
- missed_ticks = 0
- really_missed_ticks = 0
- mmap_time = 0
- sample_time = 0
-
- # Initialize the log reader and get the code info.
- code_map = CodeMap()
- snapshot_name_map = {}
- log_reader = CodeLogReader(log_name=options.log,
- code_map=code_map,
- is_snapshot=False,
- snapshot_pos_to_name=snapshot_name_map)
- code_info = log_reader.ReadCodeInfo()
- if not options.quiet:
- print "Generated code architecture: %s" % code_info.arch
- print
-
- # Process the snapshot log to fill the snapshot name map.
- if options.snapshot:
- snapshot_log_reader = CodeLogReader(log_name=options.snapshot_log,
- code_map=CodeMap(),
- is_snapshot=True,
- snapshot_pos_to_name=snapshot_name_map)
- while snapshot_log_reader.ReadUpToGC(code_info):
- pass
-
- # Process the code and trace logs.
- library_repo = LibraryRepo()
- log_reader.ReadUpToGC(code_info)
- trace_reader = TraceReader(options.trace)
- while True:
- header, offset = trace_reader.ReadEventHeader()
- if not header:
- break
- events += 1
- if header.type == PERF_RECORD_MMAP:
- start = time.time()
- mmap_info = trace_reader.ReadMmap(header, offset)
- if mmap_info.filename == V8_GC_FAKE_MMAP:
- log_reader.ReadUpToGC(code_info)
- else:
- library_repo.Load(mmap_info, code_map, options)
- mmap_time += time.time() - start
- elif header.type == PERF_RECORD_SAMPLE:
- ticks += 1
- start = time.time()
- sample = trace_reader.ReadSample(header, offset)
- code = code_map.Find(sample.ip)
- if code:
- code.Tick(sample.ip)
- else:
- missed_ticks += 1
- if not library_repo.Tick(sample.ip) and not code:
- really_missed_ticks += 1
- if trace_reader.callchain_supported:
- for ip in sample.ips:
- caller_code = code_map.Find(ip)
- if caller_code:
- if code:
- caller_code.CalleeTick(code)
- code = caller_code
- sample_time += time.time() - start
-
- if options.dot:
- PrintDot(code_map, options)
- else:
- PrintReport(code_map, library_repo, code_info, options)
-
- if not options.quiet:
- print
- print "Stats:"
- print "%10d total trace events" % events
- print "%10d total ticks" % ticks
- print "%10d ticks not in symbols" % missed_ticks
- print "%10d unaccounted ticks" % really_missed_ticks
- print "%10d total symbols" % len([c for c in code_map.AllCode()])
- print "%10d used symbols" % len([c for c in code_map.UsedCode()])
- print "%9.2fs library processing time" % mmap_time
- print "%9.2fs tick processing time" % sample_time
-
- log_reader.Dispose()
- trace_reader.Dispose()
diff --git a/src/3rdparty/v8/tools/logreader.js b/src/3rdparty/v8/tools/logreader.js
deleted file mode 100644
index 315e721..0000000
--- a/src/3rdparty/v8/tools/logreader.js
+++ /dev/null
@@ -1,185 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-/**
- * @fileoverview Log Reader is used to process log file produced by V8.
- */
-
-
-/**
- * Base class for processing log files.
- *
- * @param {Array.<Object>} dispatchTable A table used for parsing and processing
- * log records.
- * @constructor
- */
-function LogReader(dispatchTable) {
- /**
- * @type {Array.<Object>}
- */
- this.dispatchTable_ = dispatchTable;
-
- /**
- * Current line.
- * @type {number}
- */
- this.lineNum_ = 0;
-
- /**
- * CSV lines parser.
- * @type {CsvParser}
- */
- this.csvParser_ = new CsvParser();
-};
-
-
-/**
- * Used for printing error messages.
- *
- * @param {string} str Error message.
- */
-LogReader.prototype.printError = function(str) {
- // Do nothing.
-};
-
-
-/**
- * Processes a portion of V8 profiler event log.
- *
- * @param {string} chunk A portion of log.
- */
-LogReader.prototype.processLogChunk = function(chunk) {
- this.processLog_(chunk.split('\n'));
-};
-
-
-/**
- * Processes a line of V8 profiler event log.
- *
- * @param {string} line A line of log.
- */
-LogReader.prototype.processLogLine = function(line) {
- this.processLog_([line]);
-};
-
-
-/**
- * Processes stack record.
- *
- * @param {number} pc Program counter.
- * @param {number} func JS Function.
- * @param {Array.<string>} stack String representation of a stack.
- * @return {Array.<number>} Processed stack.
- */
-LogReader.prototype.processStack = function(pc, func, stack) {
- var fullStack = func ? [pc, func] : [pc];
- var prevFrame = pc;
- for (var i = 0, n = stack.length; i < n; ++i) {
- var frame = stack[i];
- var firstChar = frame.charAt(0);
- if (firstChar == '+' || firstChar == '-') {
- // An offset from the previous frame.
- prevFrame += parseInt(frame, 16);
- fullStack.push(prevFrame);
- // Filter out possible 'overflow' string.
- } else if (firstChar != 'o') {
- fullStack.push(parseInt(frame, 16));
- }
- }
- return fullStack;
-};
-
-
-/**
- * Returns whether a particular dispatch must be skipped.
- *
- * @param {!Object} dispatch Dispatch record.
- * @return {boolean} True if dispatch must be skipped.
- */
-LogReader.prototype.skipDispatch = function(dispatch) {
- return false;
-};
-
-
-/**
- * Does a dispatch of a log record.
- *
- * @param {Array.<string>} fields Log record.
- * @private
- */
-LogReader.prototype.dispatchLogRow_ = function(fields) {
- // Obtain the dispatch.
- var command = fields[0];
- if (!(command in this.dispatchTable_)) {
- throw new Error('unknown command: ' + command);
- }
- var dispatch = this.dispatchTable_[command];
-
- if (dispatch === null || this.skipDispatch(dispatch)) {
- return;
- }
-
- // Parse fields.
- var parsedFields = [];
- for (var i = 0; i < dispatch.parsers.length; ++i) {
- var parser = dispatch.parsers[i];
- if (parser === null) {
- parsedFields.push(fields[1 + i]);
- } else if (typeof parser == 'function') {
- parsedFields.push(parser(fields[1 + i]));
- } else {
- // var-args
- parsedFields.push(fields.slice(1 + i));
- break;
- }
- }
-
- // Run the processor.
- dispatch.processor.apply(this, parsedFields);
-};
-
-
-/**
- * Processes log lines.
- *
- * @param {Array.<string>} lines Log lines.
- * @private
- */
-LogReader.prototype.processLog_ = function(lines) {
- for (var i = 0, n = lines.length; i < n; ++i, ++this.lineNum_) {
- var line = lines[i];
- if (!line) {
- continue;
- }
- try {
- var fields = this.csvParser_.parseLine(line);
- this.dispatchLogRow_(fields);
- } catch (e) {
- this.printError('line ' + (this.lineNum_ + 1) + ': ' + (e.message || e));
- }
- }
-};
diff --git a/src/3rdparty/v8/tools/mac-nm b/src/3rdparty/v8/tools/mac-nm
deleted file mode 100755
index 07efb07..0000000
--- a/src/3rdparty/v8/tools/mac-nm
+++ /dev/null
@@ -1,18 +0,0 @@
-#!/bin/sh
-
-# This script is a wrapper for OS X nm(1) tool. nm(1) perform C++ function
-# names demangling, so we're piping its output to c++filt(1) tool which does it.
-# But c++filt(1) comes with XCode (as a part of GNU binutils), so it doesn't
-# guaranteed to exist on a system.
-#
-# An alternative approach is to perform demangling in tick processor, but
-# for GNU C++ ABI this is a complex process (see cp-demangle.c sources), and
-# can't be done partially, because term boundaries are plain text symbols, such
-# as 'N', 'E', so one can't just do a search through a function name, it really
-# needs to be parsed, which requires a lot of knowledge to be coded in.
-
-if [ "`which c++filt`" == "" ]; then
- nm "$@"
-else
- nm "$@" | c++filt -p -i
-fi
diff --git a/src/3rdparty/v8/tools/mac-tick-processor b/src/3rdparty/v8/tools/mac-tick-processor
deleted file mode 100755
index 5fba622..0000000
--- a/src/3rdparty/v8/tools/mac-tick-processor
+++ /dev/null
@@ -1,6 +0,0 @@
-#!/bin/sh
-
-# A wrapper script to call 'linux-tick-processor' with Mac-specific settings.
-
-tools_path=`cd $(dirname "$0");pwd`
-$tools_path/linux-tick-processor --mac --nm=$tools_path/mac-nm $@
diff --git a/src/3rdparty/v8/tools/oom_dump/README b/src/3rdparty/v8/tools/oom_dump/README
deleted file mode 100644
index 0be7511..0000000
--- a/src/3rdparty/v8/tools/oom_dump/README
+++ /dev/null
@@ -1,31 +0,0 @@
-oom_dump extracts useful information from Google Chrome OOM minidumps.
-
-To build one needs a google-breakpad checkout
-(http://code.google.com/p/google-breakpad/).
-
-First, one needs to build and install breakpad itself. For instructions
-check google-breakpad, but currently it's as easy as:
-
- ./configure
- make
- sudo make install
-
-(the catch: breakpad installs .so into /usr/local/lib, so you might
-need some additional tweaking to make it discoverable, for example,
-put a soft link into /usr/lib directory).
-
-Next step is to build v8. Note: you should build x64 version of v8,
-if you're on 64-bit platform, otherwise you would get a link error when
-building oom_dump.
-
-The last step is to build oom_dump itself. The following command should work:
-
- cd <v8 working copy>/tools/oom_dump
- scons BREAKPAD_DIR=<path to google-breakpad working copy>
-
-(Additionally you can control v8 working copy dir, but the default should work.)
-
-If everything goes fine, oom_dump <path to minidump> should print
-some useful information about the OOM crash.
-
-Note: currently only 32-bit Windows minidumps are supported.
diff --git a/src/3rdparty/v8/tools/oom_dump/SConstruct b/src/3rdparty/v8/tools/oom_dump/SConstruct
deleted file mode 100644
index f228c89..0000000
--- a/src/3rdparty/v8/tools/oom_dump/SConstruct
+++ /dev/null
@@ -1,42 +0,0 @@
-# Copyright 2010 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-vars = Variables('custom.py')
-vars.Add(PathVariable('BREAKPAD_DIR',
- 'Path to checkout of google-breakpad project',
- '~/google-breakpad',
- PathVariable.PathIsDir))
-vars.Add(PathVariable('V8_DIR',
- 'Path to checkout of v8 project',
- '../..',
- PathVariable.PathIsDir))
-
-env = Environment(variables = vars,
- CPPPATH = ['${BREAKPAD_DIR}/src', '${V8_DIR}/src'],
- LIBPATH = ['/usr/local/lib', '${V8_DIR}'])
-
-env.Program('oom_dump.cc', LIBS = ['breakpad', 'v8', 'pthread'])
diff --git a/src/3rdparty/v8/tools/oom_dump/oom_dump.cc b/src/3rdparty/v8/tools/oom_dump/oom_dump.cc
deleted file mode 100644
index 1bf5ac1..0000000
--- a/src/3rdparty/v8/tools/oom_dump/oom_dump.cc
+++ /dev/null
@@ -1,288 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include <stdio.h>
-#include <stdlib.h>
-
-#include <algorithm>
-
-#include <google_breakpad/processor/minidump.h>
-
-#define ENABLE_DEBUGGER_SUPPORT
-
-#include <v8.h>
-
-namespace {
-
-using google_breakpad::Minidump;
-using google_breakpad::MinidumpContext;
-using google_breakpad::MinidumpThread;
-using google_breakpad::MinidumpThreadList;
-using google_breakpad::MinidumpException;
-using google_breakpad::MinidumpMemoryRegion;
-
-const char* InstanceTypeToString(int type) {
- static char const* names[v8::internal::LAST_TYPE] = {0};
- if (names[v8::internal::STRING_TYPE] == NULL) {
- using namespace v8::internal;
-#define SET(type) names[type] = #type;
- INSTANCE_TYPE_LIST(SET)
-#undef SET
- }
- return names[type];
-}
-
-
-u_int32_t ReadPointedValue(MinidumpMemoryRegion* region,
- u_int64_t base,
- int offset) {
- u_int32_t ptr = 0;
- CHECK(region->GetMemoryAtAddress(base + 4 * offset, &ptr));
- u_int32_t value = 0;
- CHECK(region->GetMemoryAtAddress(ptr, &value));
- return value;
-}
-
-
-void ReadArray(MinidumpMemoryRegion* region,
- u_int64_t array_ptr,
- int size,
- int* output) {
- for (int i = 0; i < size; i++) {
- u_int32_t value;
- CHECK(region->GetMemoryAtAddress(array_ptr + 4 * i, &value));
- output[i] = value;
- }
-}
-
-
-u_int32_t ReadArrayFrom(MinidumpMemoryRegion* region,
- u_int64_t base,
- int offset,
- int size,
- int* output) {
- u_int32_t ptr = 0;
- CHECK(region->GetMemoryAtAddress(base + 4 * offset, &ptr));
- ReadArray(region, ptr, size, output);
-}
-
-
-double toM(int size) {
- return size / (1024. * 1024.);
-}
-
-
-class IndirectSorter {
- public:
- explicit IndirectSorter(int* a) : a_(a) { }
-
- bool operator() (int i0, int i1) {
- return a_[i0] > a_[i1];
- }
-
- private:
- int* a_;
-};
-
-void DumpHeapStats(const char *minidump_file) {
- Minidump minidump(minidump_file);
- CHECK(minidump.Read());
-
- MinidumpException *exception = minidump.GetException();
- CHECK(exception);
-
- MinidumpContext* crash_context = exception->GetContext();
- CHECK(crash_context);
-
- u_int32_t exception_thread_id = 0;
- CHECK(exception->GetThreadID(&exception_thread_id));
-
- MinidumpThreadList* thread_list = minidump.GetThreadList();
- CHECK(thread_list);
-
- MinidumpThread* exception_thread =
- thread_list->GetThreadByID(exception_thread_id);
- CHECK(exception_thread);
-
- // Currently only 32-bit Windows minidumps are supported.
- CHECK_EQ(MD_CONTEXT_X86, crash_context->GetContextCPU());
-
- const MDRawContextX86* contextX86 = crash_context->GetContextX86();
- CHECK(contextX86);
-
- const u_int32_t esp = contextX86->esp;
-
- MinidumpMemoryRegion* memory_region = exception_thread->GetMemory();
- CHECK(memory_region);
-
- const u_int64_t last = memory_region->GetBase() + memory_region->GetSize();
-
- u_int64_t heap_stats_addr = 0;
- for (u_int64_t addr = esp; addr < last; addr += 4) {
- u_int32_t value = 0;
- CHECK(memory_region->GetMemoryAtAddress(addr, &value));
- if (value >= esp && value < last) {
- u_int32_t value2 = 0;
- CHECK(memory_region->GetMemoryAtAddress(value, &value2));
- if (value2 == v8::internal::HeapStats::kStartMarker) {
- heap_stats_addr = addr;
- break;
- }
- }
- }
- CHECK(heap_stats_addr);
-
- // Read heap stats.
-
-#define READ_FIELD(offset) \
- ReadPointedValue(memory_region, heap_stats_addr, offset)
-
- CHECK(READ_FIELD(0) == v8::internal::HeapStats::kStartMarker);
- CHECK(READ_FIELD(24) == v8::internal::HeapStats::kEndMarker);
-
- const int new_space_size = READ_FIELD(1);
- const int new_space_capacity = READ_FIELD(2);
- const int old_pointer_space_size = READ_FIELD(3);
- const int old_pointer_space_capacity = READ_FIELD(4);
- const int old_data_space_size = READ_FIELD(5);
- const int old_data_space_capacity = READ_FIELD(6);
- const int code_space_size = READ_FIELD(7);
- const int code_space_capacity = READ_FIELD(8);
- const int map_space_size = READ_FIELD(9);
- const int map_space_capacity = READ_FIELD(10);
- const int cell_space_size = READ_FIELD(11);
- const int cell_space_capacity = READ_FIELD(12);
- const int lo_space_size = READ_FIELD(13);
- const int global_handle_count = READ_FIELD(14);
- const int weak_global_handle_count = READ_FIELD(15);
- const int pending_global_handle_count = READ_FIELD(16);
- const int near_death_global_handle_count = READ_FIELD(17);
- const int destroyed_global_handle_count = READ_FIELD(18);
- const int memory_allocator_size = READ_FIELD(19);
- const int memory_allocator_capacity = READ_FIELD(20);
- const int os_error = READ_FIELD(23);
-#undef READ_FIELD
-
- int objects_per_type[v8::internal::LAST_TYPE + 1] = {0};
- ReadArrayFrom(memory_region, heap_stats_addr, 21,
- v8::internal::LAST_TYPE + 1, objects_per_type);
-
- int size_per_type[v8::internal::LAST_TYPE + 1] = {0};
- ReadArrayFrom(memory_region, heap_stats_addr, 22, v8::internal::LAST_TYPE + 1,
- size_per_type);
-
- int js_global_objects =
- objects_per_type[v8::internal::JS_GLOBAL_OBJECT_TYPE];
- int js_builtins_objects =
- objects_per_type[v8::internal::JS_BUILTINS_OBJECT_TYPE];
- int js_global_proxies =
- objects_per_type[v8::internal::JS_GLOBAL_PROXY_TYPE];
-
- int indices[v8::internal::LAST_TYPE + 1];
- for (int i = 0; i <= v8::internal::LAST_TYPE; i++) {
- indices[i] = i;
- }
-
- std::stable_sort(indices, indices + sizeof(indices)/sizeof(indices[0]),
- IndirectSorter(size_per_type));
-
- int total_size = 0;
- for (int i = 0; i <= v8::internal::LAST_TYPE; i++) {
- total_size += size_per_type[i];
- }
-
- // Print heap stats.
-
- printf("exception thread ID: %" PRIu32 " (%#" PRIx32 ")\n",
- exception_thread_id, exception_thread_id);
- printf("heap stats address: %#" PRIx64 "\n", heap_stats_addr);
-#define PRINT_INT_STAT(stat) \
- printf("\t%-25s\t% 10d\n", #stat ":", stat);
-#define PRINT_MB_STAT(stat) \
- printf("\t%-25s\t% 10.3f MB\n", #stat ":", toM(stat));
- PRINT_MB_STAT(new_space_size);
- PRINT_MB_STAT(new_space_capacity);
- PRINT_MB_STAT(old_pointer_space_size);
- PRINT_MB_STAT(old_pointer_space_capacity);
- PRINT_MB_STAT(old_data_space_size);
- PRINT_MB_STAT(old_data_space_capacity);
- PRINT_MB_STAT(code_space_size);
- PRINT_MB_STAT(code_space_capacity);
- PRINT_MB_STAT(map_space_size);
- PRINT_MB_STAT(map_space_capacity);
- PRINT_MB_STAT(cell_space_size);
- PRINT_MB_STAT(cell_space_capacity);
- PRINT_MB_STAT(lo_space_size);
- PRINT_INT_STAT(global_handle_count);
- PRINT_INT_STAT(weak_global_handle_count);
- PRINT_INT_STAT(pending_global_handle_count);
- PRINT_INT_STAT(near_death_global_handle_count);
- PRINT_INT_STAT(destroyed_global_handle_count);
- PRINT_MB_STAT(memory_allocator_size);
- PRINT_MB_STAT(memory_allocator_capacity);
- PRINT_INT_STAT(os_error);
-#undef PRINT_STAT
-
- printf("\n");
-
- printf(
- "\tJS_GLOBAL_OBJECT_TYPE/JS_BUILTINS_OBJECT_TYPE/JS_GLOBAL_PROXY_TYPE: "
- "%d/%d/%d\n\n",
- js_global_objects, js_builtins_objects, js_global_proxies);
-
- int running_size = 0;
- for (int i = 0; i <= v8::internal::LAST_TYPE; i++) {
- int type = indices[i];
- const char* name = InstanceTypeToString(type);
- if (name == NULL) {
- // Unknown instance type. Check that there is no objects of that type.
- CHECK_EQ(0, objects_per_type[type]);
- CHECK_EQ(0, size_per_type[type]);
- continue;
- }
- int size = size_per_type[type];
- running_size += size;
- printf("\t%-37s% 9d% 11.3f MB% 10.3f%%% 10.3f%%\n",
- name, objects_per_type[type], toM(size),
- 100. * size / total_size, 100. * running_size / total_size);
- }
- printf("\t%-37s% 9d% 11.3f MB% 10.3f%%% 10.3f%%\n",
- "total", 0, toM(total_size), 100., 100.);
-}
-
-} // namespace
-
-int main(int argc, char **argv) {
- if (argc != 2) {
- fprintf(stderr, "usage: %s <minidump>\n", argv[0]);
- return 1;
- }
-
- DumpHeapStats(argv[1]);
-
- return 0;
-}
diff --git a/src/3rdparty/v8/tools/presubmit.py b/src/3rdparty/v8/tools/presubmit.py
deleted file mode 100755
index 1d80f92..0000000
--- a/src/3rdparty/v8/tools/presubmit.py
+++ /dev/null
@@ -1,305 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2008 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-try:
- import hashlib
- md5er = hashlib.md5
-except ImportError, e:
- import md5
- md5er = md5.new
-
-
-import optparse
-import os
-from os.path import abspath, join, dirname, basename, exists
-import pickle
-import re
-import sys
-import subprocess
-
-# Disabled LINT rules and reason.
-# build/include_what_you_use: Started giving false positives for variables
-# named "string" and "map" assuming that you needed to include STL headers.
-
-ENABLED_LINT_RULES = """
-build/class
-build/deprecated
-build/endif_comment
-build/forward_decl
-build/include_order
-build/printf_format
-build/storage_class
-legal/copyright
-readability/boost
-readability/braces
-readability/casting
-readability/check
-readability/constructors
-readability/fn_size
-readability/function
-readability/multiline_comment
-readability/multiline_string
-readability/streams
-readability/todo
-readability/utf8
-runtime/arrays
-runtime/casting
-runtime/deprecated_fn
-runtime/explicit
-runtime/int
-runtime/memset
-runtime/mutex
-runtime/nonconf
-runtime/printf
-runtime/printf_format
-runtime/references
-runtime/rtti
-runtime/sizeof
-runtime/string
-runtime/virtual
-runtime/vlog
-whitespace/blank_line
-whitespace/braces
-whitespace/comma
-whitespace/comments
-whitespace/end_of_line
-whitespace/ending_newline
-whitespace/indent
-whitespace/labels
-whitespace/line_length
-whitespace/newline
-whitespace/operators
-whitespace/parens
-whitespace/tab
-whitespace/todo
-""".split()
-
-
-class FileContentsCache(object):
-
- def __init__(self, sums_file_name):
- self.sums = {}
- self.sums_file_name = sums_file_name
-
- def Load(self):
- try:
- sums_file = None
- try:
- sums_file = open(self.sums_file_name, 'r')
- self.sums = pickle.load(sums_file)
- except IOError:
- # File might not exist, this is OK.
- pass
- finally:
- if sums_file:
- sums_file.close()
-
- def Save(self):
- try:
- sums_file = open(self.sums_file_name, 'w')
- pickle.dump(self.sums, sums_file)
- finally:
- sums_file.close()
-
- def FilterUnchangedFiles(self, files):
- changed_or_new = []
- for file in files:
- try:
- handle = open(file, "r")
- file_sum = md5er(handle.read()).digest()
- if not file in self.sums or self.sums[file] != file_sum:
- changed_or_new.append(file)
- self.sums[file] = file_sum
- finally:
- handle.close()
- return changed_or_new
-
- def RemoveFile(self, file):
- if file in self.sums:
- self.sums.pop(file)
-
-
-class SourceFileProcessor(object):
- """
- Utility class that can run through a directory structure, find all relevant
- files and invoke a custom check on the files.
- """
-
- def Run(self, path):
- all_files = []
- for file in self.GetPathsToSearch():
- all_files += self.FindFilesIn(join(path, file))
- if not self.ProcessFiles(all_files, path):
- return False
- return True
-
- def IgnoreDir(self, name):
- return name.startswith('.') or name == 'data' or name == 'sputniktests'
-
- def IgnoreFile(self, name):
- return name.startswith('.')
-
- def FindFilesIn(self, path):
- result = []
- for (root, dirs, files) in os.walk(path):
- for ignored in [x for x in dirs if self.IgnoreDir(x)]:
- dirs.remove(ignored)
- for file in files:
- if not self.IgnoreFile(file) and self.IsRelevant(file):
- result.append(join(root, file))
- return result
-
-
-class CppLintProcessor(SourceFileProcessor):
- """
- Lint files to check that they follow the google code style.
- """
-
- def IsRelevant(self, name):
- return name.endswith('.cc') or name.endswith('.h')
-
- def IgnoreDir(self, name):
- return (super(CppLintProcessor, self).IgnoreDir(name)
- or (name == 'third_party'))
-
- IGNORE_LINT = ['flag-definitions.h']
-
- def IgnoreFile(self, name):
- return (super(CppLintProcessor, self).IgnoreFile(name)
- or (name in CppLintProcessor.IGNORE_LINT))
-
- def GetPathsToSearch(self):
- return ['src', 'preparser', 'include', 'samples', join('test', 'cctest')]
-
- def ProcessFiles(self, files, path):
- good_files_cache = FileContentsCache('.cpplint-cache')
- good_files_cache.Load()
- files = good_files_cache.FilterUnchangedFiles(files)
- if len(files) == 0:
- print 'No changes in files detected. Skipping cpplint check.'
- return True
-
- filt = '-,' + ",".join(['+' + n for n in ENABLED_LINT_RULES])
- command = ['cpplint.py', '--filter', filt] + join(files)
- local_cpplint = join(path, "tools", "cpplint.py")
- if exists(local_cpplint):
- command = ['python', local_cpplint, '--filter', filt] + join(files)
-
- process = subprocess.Popen(command, stderr=subprocess.PIPE)
- LINT_ERROR_PATTERN = re.compile(r'^(.+)[:(]\d+[:)]')
- while True:
- out_line = process.stderr.readline()
- if out_line == '' and process.poll() != None:
- break
- sys.stderr.write(out_line)
- m = LINT_ERROR_PATTERN.match(out_line)
- if m:
- good_files_cache.RemoveFile(m.group(1))
-
- good_files_cache.Save()
- return process.returncode == 0
-
-
-COPYRIGHT_HEADER_PATTERN = re.compile(
- r'Copyright [\d-]*20[0-1][0-9] the V8 project authors. All rights reserved.')
-
-class SourceProcessor(SourceFileProcessor):
- """
- Check that all files include a copyright notice.
- """
-
- RELEVANT_EXTENSIONS = ['.js', '.cc', '.h', '.py', '.c', 'SConscript',
- 'SConstruct', '.status']
- def IsRelevant(self, name):
- for ext in SourceProcessor.RELEVANT_EXTENSIONS:
- if name.endswith(ext):
- return True
- return False
-
- def GetPathsToSearch(self):
- return ['.']
-
- def IgnoreDir(self, name):
- return (super(SourceProcessor, self).IgnoreDir(name)
- or (name == 'third_party')
- or (name == 'obj'))
-
- IGNORE_COPYRIGHTS = ['earley-boyer.js', 'raytrace.js', 'crypto.js',
- 'libraries.cc', 'libraries-empty.cc', 'jsmin.py', 'regexp-pcre.js']
- IGNORE_TABS = IGNORE_COPYRIGHTS + ['unicode-test.js',
- 'html-comments.js']
-
- def ProcessContents(self, name, contents):
- result = True
- base = basename(name)
- if not base in SourceProcessor.IGNORE_TABS:
- if '\t' in contents:
- print "%s contains tabs" % name
- result = False
- if not base in SourceProcessor.IGNORE_COPYRIGHTS:
- if not COPYRIGHT_HEADER_PATTERN.search(contents):
- print "%s is missing a correct copyright header." % name
- result = False
- return result
-
- def ProcessFiles(self, files, path):
- success = True
- for file in files:
- try:
- handle = open(file)
- contents = handle.read()
- success = self.ProcessContents(file, contents) and success
- finally:
- handle.close()
- return success
-
-
-def GetOptions():
- result = optparse.OptionParser()
- result.add_option('--no-lint', help="Do not run cpplint", default=False,
- action="store_true")
- return result
-
-
-def Main():
- workspace = abspath(join(dirname(sys.argv[0]), '..'))
- parser = GetOptions()
- (options, args) = parser.parse_args()
- success = True
- if not options.no_lint:
- success = CppLintProcessor().Run(workspace) and success
- success = SourceProcessor().Run(workspace) and success
- if success:
- return 0
- else:
- return 1
-
-
-if __name__ == '__main__':
- sys.exit(Main())
diff --git a/src/3rdparty/v8/tools/process-heap-prof.py b/src/3rdparty/v8/tools/process-heap-prof.py
deleted file mode 100755
index 6a2c397..0000000
--- a/src/3rdparty/v8/tools/process-heap-prof.py
+++ /dev/null
@@ -1,120 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2009 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-# This is an utility for converting V8 heap logs into .hp files that can
-# be further processed using 'hp2ps' tool (bundled with GHC and Valgrind)
-# to produce heap usage histograms.
-
-# Sample usage:
-# $ ./shell --log-gc script.js
-# $ tools/process-heap-prof.py v8.log | hp2ps -c > script-heap-graph.ps
-# ('-c' enables color, see hp2ps manual page for more options)
-# or
-# $ tools/process-heap-prof.py --js-cons-profile v8.log | hp2ps -c > script-heap-graph.ps
-# to get JS constructor profile
-
-
-import csv, sys, time, optparse
-
-def ProcessLogFile(filename, options):
- if options.js_cons_profile:
- itemname = 'heap-js-cons-item'
- else:
- itemname = 'heap-sample-item'
-
- first_call_time = None
- sample_time = 0.0
- sampling = False
- try:
- logfile = open(filename, 'rb')
- try:
- logreader = csv.reader(logfile)
-
- print('JOB "v8"')
- print('DATE "%s"' % time.asctime(time.localtime()))
- print('SAMPLE_UNIT "seconds"')
- print('VALUE_UNIT "bytes"')
-
- for row in logreader:
- if row[0] == 'heap-sample-begin' and row[1] == 'Heap':
- sample_time = float(row[3])/1000.0
- if first_call_time == None:
- first_call_time = sample_time
- sample_time -= first_call_time
- print('BEGIN_SAMPLE %.2f' % sample_time)
- sampling = True
- elif row[0] == 'heap-sample-end' and row[1] == 'Heap':
- print('END_SAMPLE %.2f' % sample_time)
- sampling = False
- elif row[0] == itemname and sampling:
- print(row[1]),
- if options.count:
- print('%d' % (int(row[2]))),
- if options.size:
- print('%d' % (int(row[3]))),
- print
- finally:
- logfile.close()
- except:
- sys.exit('can\'t open %s' % filename)
-
-
-def BuildOptions():
- result = optparse.OptionParser()
- result.add_option("--js_cons_profile", help="Constructor profile",
- default=False, action="store_true")
- result.add_option("--size", help="Report object size",
- default=False, action="store_true")
- result.add_option("--count", help="Report object count",
- default=False, action="store_true")
- return result
-
-
-def ProcessOptions(options):
- if not options.size and not options.count:
- options.size = True
- return True
-
-
-def Main():
- parser = BuildOptions()
- (options, args) = parser.parse_args()
- if not ProcessOptions(options):
- parser.print_help()
- sys.exit();
-
- if not args:
- print "Missing logfile"
- sys.exit();
-
- ProcessLogFile(args[0], options)
-
-
-if __name__ == '__main__':
- sys.exit(Main())
diff --git a/src/3rdparty/v8/tools/profile.js b/src/3rdparty/v8/tools/profile.js
deleted file mode 100644
index c9c9437..0000000
--- a/src/3rdparty/v8/tools/profile.js
+++ /dev/null
@@ -1,751 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-/**
- * Creates a profile object for processing profiling-related events
- * and calculating function execution times.
- *
- * @constructor
- */
-function Profile() {
- this.codeMap_ = new CodeMap();
- this.topDownTree_ = new CallTree();
- this.bottomUpTree_ = new CallTree();
-};
-
-
-/**
- * Returns whether a function with the specified name must be skipped.
- * Should be overriden by subclasses.
- *
- * @param {string} name Function name.
- */
-Profile.prototype.skipThisFunction = function(name) {
- return false;
-};
-
-
-/**
- * Enum for profiler operations that involve looking up existing
- * code entries.
- *
- * @enum {number}
- */
-Profile.Operation = {
- MOVE: 0,
- DELETE: 1,
- TICK: 2
-};
-
-
-/**
- * Enum for code state regarding its dynamic optimization.
- *
- * @enum {number}
- */
-Profile.CodeState = {
- COMPILED: 0,
- OPTIMIZABLE: 1,
- OPTIMIZED: 2
-};
-
-
-/**
- * Called whenever the specified operation has failed finding a function
- * containing the specified address. Should be overriden by subclasses.
- * See the Profile.Operation enum for the list of
- * possible operations.
- *
- * @param {number} operation Operation.
- * @param {number} addr Address of the unknown code.
- * @param {number} opt_stackPos If an unknown address is encountered
- * during stack strace processing, specifies a position of the frame
- * containing the address.
- */
-Profile.prototype.handleUnknownCode = function(
- operation, addr, opt_stackPos) {
-};
-
-
-/**
- * Registers a library.
- *
- * @param {string} name Code entry name.
- * @param {number} startAddr Starting address.
- * @param {number} endAddr Ending address.
- */
-Profile.prototype.addLibrary = function(
- name, startAddr, endAddr) {
- var entry = new CodeMap.CodeEntry(
- endAddr - startAddr, name);
- this.codeMap_.addLibrary(startAddr, entry);
- return entry;
-};
-
-
-/**
- * Registers statically compiled code entry.
- *
- * @param {string} name Code entry name.
- * @param {number} startAddr Starting address.
- * @param {number} endAddr Ending address.
- */
-Profile.prototype.addStaticCode = function(
- name, startAddr, endAddr) {
- var entry = new CodeMap.CodeEntry(
- endAddr - startAddr, name);
- this.codeMap_.addStaticCode(startAddr, entry);
- return entry;
-};
-
-
-/**
- * Registers dynamic (JIT-compiled) code entry.
- *
- * @param {string} type Code entry type.
- * @param {string} name Code entry name.
- * @param {number} start Starting address.
- * @param {number} size Code entry size.
- */
-Profile.prototype.addCode = function(
- type, name, start, size) {
- var entry = new Profile.DynamicCodeEntry(size, type, name);
- this.codeMap_.addCode(start, entry);
- return entry;
-};
-
-
-/**
- * Registers dynamic (JIT-compiled) code entry.
- *
- * @param {string} type Code entry type.
- * @param {string} name Code entry name.
- * @param {number} start Starting address.
- * @param {number} size Code entry size.
- * @param {number} funcAddr Shared function object address.
- * @param {Profile.CodeState} state Optimization state.
- */
-Profile.prototype.addFuncCode = function(
- type, name, start, size, funcAddr, state) {
- // As code and functions are in the same address space,
- // it is safe to put them in a single code map.
- var func = this.codeMap_.findDynamicEntryByStartAddress(funcAddr);
- if (!func) {
- func = new Profile.FunctionEntry(name);
- this.codeMap_.addCode(funcAddr, func);
- } else if (func.name !== name) {
- // Function object has been overwritten with a new one.
- func.name = name;
- }
- var entry = new Profile.DynamicFuncCodeEntry(size, type, func, state);
- this.codeMap_.addCode(start, entry);
- return entry;
-};
-
-
-/**
- * Reports about moving of a dynamic code entry.
- *
- * @param {number} from Current code entry address.
- * @param {number} to New code entry address.
- */
-Profile.prototype.moveCode = function(from, to) {
- try {
- this.codeMap_.moveCode(from, to);
- } catch (e) {
- this.handleUnknownCode(Profile.Operation.MOVE, from);
- }
-};
-
-
-/**
- * Reports about deletion of a dynamic code entry.
- *
- * @param {number} start Starting address.
- */
-Profile.prototype.deleteCode = function(start) {
- try {
- this.codeMap_.deleteCode(start);
- } catch (e) {
- this.handleUnknownCode(Profile.Operation.DELETE, start);
- }
-};
-
-
-/**
- * Reports about moving of a dynamic code entry.
- *
- * @param {number} from Current code entry address.
- * @param {number} to New code entry address.
- */
-Profile.prototype.moveFunc = function(from, to) {
- if (this.codeMap_.findDynamicEntryByStartAddress(from)) {
- this.codeMap_.moveCode(from, to);
- }
-};
-
-
-/**
- * Retrieves a code entry by an address.
- *
- * @param {number} addr Entry address.
- */
-Profile.prototype.findEntry = function(addr) {
- return this.codeMap_.findEntry(addr);
-};
-
-
-/**
- * Records a tick event. Stack must contain a sequence of
- * addresses starting with the program counter value.
- *
- * @param {Array<number>} stack Stack sample.
- */
-Profile.prototype.recordTick = function(stack) {
- var processedStack = this.resolveAndFilterFuncs_(stack);
- this.bottomUpTree_.addPath(processedStack);
- processedStack.reverse();
- this.topDownTree_.addPath(processedStack);
-};
-
-
-/**
- * Translates addresses into function names and filters unneeded
- * functions.
- *
- * @param {Array<number>} stack Stack sample.
- */
-Profile.prototype.resolveAndFilterFuncs_ = function(stack) {
- var result = [];
- for (var i = 0; i < stack.length; ++i) {
- var entry = this.codeMap_.findEntry(stack[i]);
- if (entry) {
- var name = entry.getName();
- if (!this.skipThisFunction(name)) {
- result.push(name);
- }
- } else {
- this.handleUnknownCode(
- Profile.Operation.TICK, stack[i], i);
- }
- }
- return result;
-};
-
-
-/**
- * Performs a BF traversal of the top down call graph.
- *
- * @param {function(CallTree.Node)} f Visitor function.
- */
-Profile.prototype.traverseTopDownTree = function(f) {
- this.topDownTree_.traverse(f);
-};
-
-
-/**
- * Performs a BF traversal of the bottom up call graph.
- *
- * @param {function(CallTree.Node)} f Visitor function.
- */
-Profile.prototype.traverseBottomUpTree = function(f) {
- this.bottomUpTree_.traverse(f);
-};
-
-
-/**
- * Calculates a top down profile for a node with the specified label.
- * If no name specified, returns the whole top down calls tree.
- *
- * @param {string} opt_label Node label.
- */
-Profile.prototype.getTopDownProfile = function(opt_label) {
- return this.getTreeProfile_(this.topDownTree_, opt_label);
-};
-
-
-/**
- * Calculates a bottom up profile for a node with the specified label.
- * If no name specified, returns the whole bottom up calls tree.
- *
- * @param {string} opt_label Node label.
- */
-Profile.prototype.getBottomUpProfile = function(opt_label) {
- return this.getTreeProfile_(this.bottomUpTree_, opt_label);
-};
-
-
-/**
- * Helper function for calculating a tree profile.
- *
- * @param {Profile.CallTree} tree Call tree.
- * @param {string} opt_label Node label.
- */
-Profile.prototype.getTreeProfile_ = function(tree, opt_label) {
- if (!opt_label) {
- tree.computeTotalWeights();
- return tree;
- } else {
- var subTree = tree.cloneSubtree(opt_label);
- subTree.computeTotalWeights();
- return subTree;
- }
-};
-
-
-/**
- * Calculates a flat profile of callees starting from a node with
- * the specified label. If no name specified, starts from the root.
- *
- * @param {string} opt_label Starting node label.
- */
-Profile.prototype.getFlatProfile = function(opt_label) {
- var counters = new CallTree();
- var rootLabel = opt_label || CallTree.ROOT_NODE_LABEL;
- var precs = {};
- precs[rootLabel] = 0;
- var root = counters.findOrAddChild(rootLabel);
-
- this.topDownTree_.computeTotalWeights();
- this.topDownTree_.traverseInDepth(
- function onEnter(node) {
- if (!(node.label in precs)) {
- precs[node.label] = 0;
- }
- var nodeLabelIsRootLabel = node.label == rootLabel;
- if (nodeLabelIsRootLabel || precs[rootLabel] > 0) {
- if (precs[rootLabel] == 0) {
- root.selfWeight += node.selfWeight;
- root.totalWeight += node.totalWeight;
- } else {
- var rec = root.findOrAddChild(node.label);
- rec.selfWeight += node.selfWeight;
- if (nodeLabelIsRootLabel || precs[node.label] == 0) {
- rec.totalWeight += node.totalWeight;
- }
- }
- precs[node.label]++;
- }
- },
- function onExit(node) {
- if (node.label == rootLabel || precs[rootLabel] > 0) {
- precs[node.label]--;
- }
- },
- null);
-
- if (!opt_label) {
- // If we have created a flat profile for the whole program, we don't
- // need an explicit root in it. Thus, replace the counters tree
- // root with the node corresponding to the whole program.
- counters.root_ = root;
- } else {
- // Propagate weights so percents can be calculated correctly.
- counters.getRoot().selfWeight = root.selfWeight;
- counters.getRoot().totalWeight = root.totalWeight;
- }
- return counters;
-};
-
-
-/**
- * Creates a dynamic code entry.
- *
- * @param {number} size Code size.
- * @param {string} type Code type.
- * @param {string} name Function name.
- * @constructor
- */
-Profile.DynamicCodeEntry = function(size, type, name) {
- CodeMap.CodeEntry.call(this, size, name);
- this.type = type;
-};
-
-
-/**
- * Returns node name.
- */
-Profile.DynamicCodeEntry.prototype.getName = function() {
- return this.type + ': ' + this.name;
-};
-
-
-/**
- * Returns raw node name (without type decoration).
- */
-Profile.DynamicCodeEntry.prototype.getRawName = function() {
- return this.name;
-};
-
-
-Profile.DynamicCodeEntry.prototype.isJSFunction = function() {
- return false;
-};
-
-
-/**
- * Creates a dynamic code entry.
- *
- * @param {number} size Code size.
- * @param {string} type Code type.
- * @param {Profile.FunctionEntry} func Shared function entry.
- * @param {Profile.CodeState} state Code optimization state.
- * @constructor
- */
-Profile.DynamicFuncCodeEntry = function(size, type, func, state) {
- CodeMap.CodeEntry.call(this, size);
- this.type = type;
- this.func = func;
- this.state = state;
-};
-
-Profile.DynamicFuncCodeEntry.STATE_PREFIX = ["", "~", "*"];
-
-/**
- * Returns node name.
- */
-Profile.DynamicFuncCodeEntry.prototype.getName = function() {
- var name = this.func.getName();
- return this.type + ': ' + Profile.DynamicFuncCodeEntry.STATE_PREFIX[this.state] + name;
-};
-
-
-/**
- * Returns raw node name (without type decoration).
- */
-Profile.DynamicFuncCodeEntry.prototype.getRawName = function() {
- return this.func.getName();
-};
-
-
-Profile.DynamicFuncCodeEntry.prototype.isJSFunction = function() {
- return true;
-};
-
-
-/**
- * Creates a shared function object entry.
- *
- * @param {string} name Function name.
- * @constructor
- */
-Profile.FunctionEntry = function(name) {
- CodeMap.CodeEntry.call(this, 0, name);
-};
-
-
-/**
- * Returns node name.
- */
-Profile.FunctionEntry.prototype.getName = function() {
- var name = this.name;
- if (name.length == 0) {
- name = '<anonymous>';
- } else if (name.charAt(0) == ' ') {
- // An anonymous function with location: " aaa.js:10".
- name = '<anonymous>' + name;
- }
- return name;
-};
-
-
-/**
- * Constructs a call graph.
- *
- * @constructor
- */
-function CallTree() {
- this.root_ = new CallTree.Node(
- CallTree.ROOT_NODE_LABEL);
-};
-
-
-/**
- * The label of the root node.
- */
-CallTree.ROOT_NODE_LABEL = '';
-
-
-/**
- * @private
- */
-CallTree.prototype.totalsComputed_ = false;
-
-
-/**
- * Returns the tree root.
- */
-CallTree.prototype.getRoot = function() {
- return this.root_;
-};
-
-
-/**
- * Adds the specified call path, constructing nodes as necessary.
- *
- * @param {Array<string>} path Call path.
- */
-CallTree.prototype.addPath = function(path) {
- if (path.length == 0) {
- return;
- }
- var curr = this.root_;
- for (var i = 0; i < path.length; ++i) {
- curr = curr.findOrAddChild(path[i]);
- }
- curr.selfWeight++;
- this.totalsComputed_ = false;
-};
-
-
-/**
- * Finds an immediate child of the specified parent with the specified
- * label, creates a child node if necessary. If a parent node isn't
- * specified, uses tree root.
- *
- * @param {string} label Child node label.
- */
-CallTree.prototype.findOrAddChild = function(label) {
- return this.root_.findOrAddChild(label);
-};
-
-
-/**
- * Creates a subtree by cloning and merging all subtrees rooted at nodes
- * with a given label. E.g. cloning the following call tree on label 'A'
- * will give the following result:
- *
- * <A>--<B> <B>
- * / /
- * <root> == clone on 'A' ==> <root>--<A>
- * \ \
- * <C>--<A>--<D> <D>
- *
- * And <A>'s selfWeight will be the sum of selfWeights of <A>'s from the
- * source call tree.
- *
- * @param {string} label The label of the new root node.
- */
-CallTree.prototype.cloneSubtree = function(label) {
- var subTree = new CallTree();
- this.traverse(function(node, parent) {
- if (!parent && node.label != label) {
- return null;
- }
- var child = (parent ? parent : subTree).findOrAddChild(node.label);
- child.selfWeight += node.selfWeight;
- return child;
- });
- return subTree;
-};
-
-
-/**
- * Computes total weights in the call graph.
- */
-CallTree.prototype.computeTotalWeights = function() {
- if (this.totalsComputed_) {
- return;
- }
- this.root_.computeTotalWeight();
- this.totalsComputed_ = true;
-};
-
-
-/**
- * Traverses the call graph in preorder. This function can be used for
- * building optionally modified tree clones. This is the boilerplate code
- * for this scenario:
- *
- * callTree.traverse(function(node, parentClone) {
- * var nodeClone = cloneNode(node);
- * if (parentClone)
- * parentClone.addChild(nodeClone);
- * return nodeClone;
- * });
- *
- * @param {function(CallTree.Node, *)} f Visitor function.
- * The second parameter is the result of calling 'f' on the parent node.
- */
-CallTree.prototype.traverse = function(f) {
- var pairsToProcess = new ConsArray();
- pairsToProcess.concat([{node: this.root_, param: null}]);
- while (!pairsToProcess.atEnd()) {
- var pair = pairsToProcess.next();
- var node = pair.node;
- var newParam = f(node, pair.param);
- var morePairsToProcess = [];
- node.forEachChild(function (child) {
- morePairsToProcess.push({node: child, param: newParam}); });
- pairsToProcess.concat(morePairsToProcess);
- }
-};
-
-
-/**
- * Performs an indepth call graph traversal.
- *
- * @param {function(CallTree.Node)} enter A function called
- * prior to visiting node's children.
- * @param {function(CallTree.Node)} exit A function called
- * after visiting node's children.
- */
-CallTree.prototype.traverseInDepth = function(enter, exit) {
- function traverse(node) {
- enter(node);
- node.forEachChild(traverse);
- exit(node);
- }
- traverse(this.root_);
-};
-
-
-/**
- * Constructs a call graph node.
- *
- * @param {string} label Node label.
- * @param {CallTree.Node} opt_parent Node parent.
- */
-CallTree.Node = function(label, opt_parent) {
- this.label = label;
- this.parent = opt_parent;
- this.children = {};
-};
-
-
-/**
- * Node self weight (how many times this node was the last node in
- * a call path).
- * @type {number}
- */
-CallTree.Node.prototype.selfWeight = 0;
-
-
-/**
- * Node total weight (includes weights of all children).
- * @type {number}
- */
-CallTree.Node.prototype.totalWeight = 0;
-
-
-/**
- * Adds a child node.
- *
- * @param {string} label Child node label.
- */
-CallTree.Node.prototype.addChild = function(label) {
- var child = new CallTree.Node(label, this);
- this.children[label] = child;
- return child;
-};
-
-
-/**
- * Computes node's total weight.
- */
-CallTree.Node.prototype.computeTotalWeight =
- function() {
- var totalWeight = this.selfWeight;
- this.forEachChild(function(child) {
- totalWeight += child.computeTotalWeight(); });
- return this.totalWeight = totalWeight;
-};
-
-
-/**
- * Returns all node's children as an array.
- */
-CallTree.Node.prototype.exportChildren = function() {
- var result = [];
- this.forEachChild(function (node) { result.push(node); });
- return result;
-};
-
-
-/**
- * Finds an immediate child with the specified label.
- *
- * @param {string} label Child node label.
- */
-CallTree.Node.prototype.findChild = function(label) {
- return this.children[label] || null;
-};
-
-
-/**
- * Finds an immediate child with the specified label, creates a child
- * node if necessary.
- *
- * @param {string} label Child node label.
- */
-CallTree.Node.prototype.findOrAddChild = function(label) {
- return this.findChild(label) || this.addChild(label);
-};
-
-
-/**
- * Calls the specified function for every child.
- *
- * @param {function(CallTree.Node)} f Visitor function.
- */
-CallTree.Node.prototype.forEachChild = function(f) {
- for (var c in this.children) {
- f(this.children[c]);
- }
-};
-
-
-/**
- * Walks up from the current node up to the call tree root.
- *
- * @param {function(CallTree.Node)} f Visitor function.
- */
-CallTree.Node.prototype.walkUpToRoot = function(f) {
- for (var curr = this; curr != null; curr = curr.parent) {
- f(curr);
- }
-};
-
-
-/**
- * Tries to find a node with the specified path.
- *
- * @param {Array<string>} labels The path.
- * @param {function(CallTree.Node)} opt_f Visitor function.
- */
-CallTree.Node.prototype.descendToChild = function(
- labels, opt_f) {
- for (var pos = 0, curr = this; pos < labels.length && curr != null; pos++) {
- var child = curr.findChild(labels[pos]);
- if (opt_f) {
- opt_f(child, pos);
- }
- curr = child;
- }
- return curr;
-};
diff --git a/src/3rdparty/v8/tools/profile_view.js b/src/3rdparty/v8/tools/profile_view.js
deleted file mode 100644
index e041909..0000000
--- a/src/3rdparty/v8/tools/profile_view.js
+++ /dev/null
@@ -1,219 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-/**
- * Creates a Profile View builder object.
- *
- * @param {number} samplingRate Number of ms between profiler ticks.
- * @constructor
- */
-function ViewBuilder(samplingRate) {
- this.samplingRate = samplingRate;
-};
-
-
-/**
- * Builds a profile view for the specified call tree.
- *
- * @param {CallTree} callTree A call tree.
- * @param {boolean} opt_bottomUpViewWeights Whether remapping
- * of self weights for a bottom up view is needed.
- */
-ViewBuilder.prototype.buildView = function(
- callTree, opt_bottomUpViewWeights) {
- var head;
- var samplingRate = this.samplingRate;
- var createViewNode = this.createViewNode;
- callTree.traverse(function(node, viewParent) {
- var totalWeight = node.totalWeight * samplingRate;
- var selfWeight = node.selfWeight * samplingRate;
- if (opt_bottomUpViewWeights === true) {
- if (viewParent === head) {
- selfWeight = totalWeight;
- } else {
- selfWeight = 0;
- }
- }
- var viewNode = createViewNode(node.label, totalWeight, selfWeight, head);
- if (viewParent) {
- viewParent.addChild(viewNode);
- } else {
- head = viewNode;
- }
- return viewNode;
- });
- var view = this.createView(head);
- return view;
-};
-
-
-/**
- * Factory method for a profile view.
- *
- * @param {ProfileView.Node} head View head node.
- * @return {ProfileView} Profile view.
- */
-ViewBuilder.prototype.createView = function(head) {
- return new ProfileView(head);
-};
-
-
-/**
- * Factory method for a profile view node.
- *
- * @param {string} internalFuncName A fully qualified function name.
- * @param {number} totalTime Amount of time that application spent in the
- * corresponding function and its descendants (not that depending on
- * profile they can be either callees or callers.)
- * @param {number} selfTime Amount of time that application spent in the
- * corresponding function only.
- * @param {ProfileView.Node} head Profile view head.
- * @return {ProfileView.Node} Profile view node.
- */
-ViewBuilder.prototype.createViewNode = function(
- funcName, totalTime, selfTime, head) {
- return new ProfileView.Node(
- funcName, totalTime, selfTime, head);
-};
-
-
-/**
- * Creates a Profile View object. It allows to perform sorting
- * and filtering actions on the profile.
- *
- * @param {ProfileView.Node} head Head (root) node.
- * @constructor
- */
-function ProfileView(head) {
- this.head = head;
-};
-
-
-/**
- * Sorts the profile view using the specified sort function.
- *
- * @param {function(ProfileView.Node,
- * ProfileView.Node):number} sortFunc A sorting
- * functions. Must comply with Array.sort sorting function requirements.
- */
-ProfileView.prototype.sort = function(sortFunc) {
- this.traverse(function (node) {
- node.sortChildren(sortFunc);
- });
-};
-
-
-/**
- * Traverses profile view nodes in preorder.
- *
- * @param {function(ProfileView.Node)} f Visitor function.
- */
-ProfileView.prototype.traverse = function(f) {
- var nodesToTraverse = new ConsArray();
- nodesToTraverse.concat([this.head]);
- while (!nodesToTraverse.atEnd()) {
- var node = nodesToTraverse.next();
- f(node);
- nodesToTraverse.concat(node.children);
- }
-};
-
-
-/**
- * Constructs a Profile View node object. Each node object corresponds to
- * a function call.
- *
- * @param {string} internalFuncName A fully qualified function name.
- * @param {number} totalTime Amount of time that application spent in the
- * corresponding function and its descendants (not that depending on
- * profile they can be either callees or callers.)
- * @param {number} selfTime Amount of time that application spent in the
- * corresponding function only.
- * @param {ProfileView.Node} head Profile view head.
- * @constructor
- */
-ProfileView.Node = function(
- internalFuncName, totalTime, selfTime, head) {
- this.internalFuncName = internalFuncName;
- this.totalTime = totalTime;
- this.selfTime = selfTime;
- this.head = head;
- this.parent = null;
- this.children = [];
-};
-
-
-/**
- * Returns a share of the function's total time in application's total time.
- */
-ProfileView.Node.prototype.__defineGetter__(
- 'totalPercent',
- function() { return this.totalTime /
- (this.head ? this.head.totalTime : this.totalTime) * 100.0; });
-
-
-/**
- * Returns a share of the function's self time in application's total time.
- */
-ProfileView.Node.prototype.__defineGetter__(
- 'selfPercent',
- function() { return this.selfTime /
- (this.head ? this.head.totalTime : this.totalTime) * 100.0; });
-
-
-/**
- * Returns a share of the function's total time in its parent's total time.
- */
-ProfileView.Node.prototype.__defineGetter__(
- 'parentTotalPercent',
- function() { return this.totalTime /
- (this.parent ? this.parent.totalTime : this.totalTime) * 100.0; });
-
-
-/**
- * Adds a child to the node.
- *
- * @param {ProfileView.Node} node Child node.
- */
-ProfileView.Node.prototype.addChild = function(node) {
- node.parent = this;
- this.children.push(node);
-};
-
-
-/**
- * Sorts all the node's children recursively.
- *
- * @param {function(ProfileView.Node,
- * ProfileView.Node):number} sortFunc A sorting
- * functions. Must comply with Array.sort sorting function requirements.
- */
-ProfileView.Node.prototype.sortChildren = function(
- sortFunc) {
- this.children.sort(sortFunc);
-};
diff --git a/src/3rdparty/v8/tools/run-valgrind.py b/src/3rdparty/v8/tools/run-valgrind.py
deleted file mode 100755
index 49c1b70..0000000
--- a/src/3rdparty/v8/tools/run-valgrind.py
+++ /dev/null
@@ -1,77 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright 2009 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-# Simple wrapper for running valgrind and checking the output on
-# stderr for memory leaks.
-
-import subprocess
-import sys
-import re
-
-VALGRIND_ARGUMENTS = [
- 'valgrind',
- '--error-exitcode=1',
- '--leak-check=full',
- '--smc-check=all'
-]
-
-# Compute the command line.
-command = VALGRIND_ARGUMENTS + sys.argv[1:]
-
-# Run valgrind.
-process = subprocess.Popen(command, stderr=subprocess.PIPE)
-code = process.wait();
-errors = process.stderr.readlines();
-
-# If valgrind produced an error, we report that to the user.
-if code != 0:
- sys.stderr.writelines(errors)
- sys.exit(code)
-
-# Look through the leak details and make sure that we don't
-# have any definitely, indirectly, and possibly lost bytes.
-LEAK_RE = r"(?:definitely|indirectly|possibly) lost: "
-LEAK_LINE_MATCHER = re.compile(LEAK_RE)
-LEAK_OKAY_MATCHER = re.compile(r"lost: 0 bytes in 0 blocks")
-leaks = []
-for line in errors:
- if LEAK_LINE_MATCHER.search(line):
- leaks.append(line)
- if not LEAK_OKAY_MATCHER.search(line):
- sys.stderr.writelines(errors)
- sys.exit(1)
-
-# Make sure we found between 2 and 3 leak lines.
-if len(leaks) < 2 or len(leaks) > 3:
- sys.stderr.writelines(errors)
- sys.stderr.write('\n\n#### Malformed valgrind output.\n#### Exiting.\n')
- sys.exit(1)
-
-# No leaks found.
-sys.exit(0)
diff --git a/src/3rdparty/v8/tools/splaytree.js b/src/3rdparty/v8/tools/splaytree.js
deleted file mode 100644
index 1c9aab9..0000000
--- a/src/3rdparty/v8/tools/splaytree.js
+++ /dev/null
@@ -1,316 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-/**
- * Constructs a Splay tree. A splay tree is a self-balancing binary
- * search tree with the additional property that recently accessed
- * elements are quick to access again. It performs basic operations
- * such as insertion, look-up and removal in O(log(n)) amortized time.
- *
- * @constructor
- */
-function SplayTree() {
-};
-
-
-/**
- * Pointer to the root node of the tree.
- *
- * @type {SplayTree.Node}
- * @private
- */
-SplayTree.prototype.root_ = null;
-
-
-/**
- * @return {boolean} Whether the tree is empty.
- */
-SplayTree.prototype.isEmpty = function() {
- return !this.root_;
-};
-
-
-
-/**
- * Inserts a node into the tree with the specified key and value if
- * the tree does not already contain a node with the specified key. If
- * the value is inserted, it becomes the root of the tree.
- *
- * @param {number} key Key to insert into the tree.
- * @param {*} value Value to insert into the tree.
- */
-SplayTree.prototype.insert = function(key, value) {
- if (this.isEmpty()) {
- this.root_ = new SplayTree.Node(key, value);
- return;
- }
- // Splay on the key to move the last node on the search path for
- // the key to the root of the tree.
- this.splay_(key);
- if (this.root_.key == key) {
- return;
- }
- var node = new SplayTree.Node(key, value);
- if (key > this.root_.key) {
- node.left = this.root_;
- node.right = this.root_.right;
- this.root_.right = null;
- } else {
- node.right = this.root_;
- node.left = this.root_.left;
- this.root_.left = null;
- }
- this.root_ = node;
-};
-
-
-/**
- * Removes a node with the specified key from the tree if the tree
- * contains a node with this key. The removed node is returned. If the
- * key is not found, an exception is thrown.
- *
- * @param {number} key Key to find and remove from the tree.
- * @return {SplayTree.Node} The removed node.
- */
-SplayTree.prototype.remove = function(key) {
- if (this.isEmpty()) {
- throw Error('Key not found: ' + key);
- }
- this.splay_(key);
- if (this.root_.key != key) {
- throw Error('Key not found: ' + key);
- }
- var removed = this.root_;
- if (!this.root_.left) {
- this.root_ = this.root_.right;
- } else {
- var right = this.root_.right;
- this.root_ = this.root_.left;
- // Splay to make sure that the new root has an empty right child.
- this.splay_(key);
- // Insert the original right child as the right child of the new
- // root.
- this.root_.right = right;
- }
- return removed;
-};
-
-
-/**
- * Returns the node having the specified key or null if the tree doesn't contain
- * a node with the specified key.
- *
- * @param {number} key Key to find in the tree.
- * @return {SplayTree.Node} Node having the specified key.
- */
-SplayTree.prototype.find = function(key) {
- if (this.isEmpty()) {
- return null;
- }
- this.splay_(key);
- return this.root_.key == key ? this.root_ : null;
-};
-
-
-/**
- * @return {SplayTree.Node} Node having the minimum key value.
- */
-SplayTree.prototype.findMin = function() {
- if (this.isEmpty()) {
- return null;
- }
- var current = this.root_;
- while (current.left) {
- current = current.left;
- }
- return current;
-};
-
-
-/**
- * @return {SplayTree.Node} Node having the maximum key value.
- */
-SplayTree.prototype.findMax = function(opt_startNode) {
- if (this.isEmpty()) {
- return null;
- }
- var current = opt_startNode || this.root_;
- while (current.right) {
- current = current.right;
- }
- return current;
-};
-
-
-/**
- * @return {SplayTree.Node} Node having the maximum key value that
- * is less or equal to the specified key value.
- */
-SplayTree.prototype.findGreatestLessThan = function(key) {
- if (this.isEmpty()) {
- return null;
- }
- // Splay on the key to move the node with the given key or the last
- // node on the search path to the top of the tree.
- this.splay_(key);
- // Now the result is either the root node or the greatest node in
- // the left subtree.
- if (this.root_.key <= key) {
- return this.root_;
- } else if (this.root_.left) {
- return this.findMax(this.root_.left);
- } else {
- return null;
- }
-};
-
-
-/**
- * @return {Array<*>} An array containing all the values of tree's nodes.
- */
-SplayTree.prototype.exportValues = function() {
- var result = [];
- this.traverse_(function(node) { result.push(node.value); });
- return result;
-};
-
-
-/**
- * Perform the splay operation for the given key. Moves the node with
- * the given key to the top of the tree. If no node has the given
- * key, the last node on the search path is moved to the top of the
- * tree. This is the simplified top-down splaying algorithm from:
- * "Self-adjusting Binary Search Trees" by Sleator and Tarjan
- *
- * @param {number} key Key to splay the tree on.
- * @private
- */
-SplayTree.prototype.splay_ = function(key) {
- if (this.isEmpty()) {
- return;
- }
- // Create a dummy node. The use of the dummy node is a bit
- // counter-intuitive: The right child of the dummy node will hold
- // the L tree of the algorithm. The left child of the dummy node
- // will hold the R tree of the algorithm. Using a dummy node, left
- // and right will always be nodes and we avoid special cases.
- var dummy, left, right;
- dummy = left = right = new SplayTree.Node(null, null);
- var current = this.root_;
- while (true) {
- if (key < current.key) {
- if (!current.left) {
- break;
- }
- if (key < current.left.key) {
- // Rotate right.
- var tmp = current.left;
- current.left = tmp.right;
- tmp.right = current;
- current = tmp;
- if (!current.left) {
- break;
- }
- }
- // Link right.
- right.left = current;
- right = current;
- current = current.left;
- } else if (key > current.key) {
- if (!current.right) {
- break;
- }
- if (key > current.right.key) {
- // Rotate left.
- var tmp = current.right;
- current.right = tmp.left;
- tmp.left = current;
- current = tmp;
- if (!current.right) {
- break;
- }
- }
- // Link left.
- left.right = current;
- left = current;
- current = current.right;
- } else {
- break;
- }
- }
- // Assemble.
- left.right = current.left;
- right.left = current.right;
- current.left = dummy.right;
- current.right = dummy.left;
- this.root_ = current;
-};
-
-
-/**
- * Performs a preorder traversal of the tree.
- *
- * @param {function(SplayTree.Node)} f Visitor function.
- * @private
- */
-SplayTree.prototype.traverse_ = function(f) {
- var nodesToVisit = [this.root_];
- while (nodesToVisit.length > 0) {
- var node = nodesToVisit.shift();
- if (node == null) {
- continue;
- }
- f(node);
- nodesToVisit.push(node.left);
- nodesToVisit.push(node.right);
- }
-};
-
-
-/**
- * Constructs a Splay tree node.
- *
- * @param {number} key Key.
- * @param {*} value Value.
- */
-SplayTree.Node = function(key, value) {
- this.key = key;
- this.value = value;
-};
-
-
-/**
- * @type {SplayTree.Node}
- */
-SplayTree.Node.prototype.left = null;
-
-
-/**
- * @type {SplayTree.Node}
- */
-SplayTree.Node.prototype.right = null;
diff --git a/src/3rdparty/v8/tools/stats-viewer.py b/src/3rdparty/v8/tools/stats-viewer.py
deleted file mode 100755
index 05cb762..0000000
--- a/src/3rdparty/v8/tools/stats-viewer.py
+++ /dev/null
@@ -1,468 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2008 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-"""A cross-platform execution counter viewer.
-
-The stats viewer reads counters from a binary file and displays them
-in a window, re-reading and re-displaying with regular intervals.
-"""
-
-import mmap
-import optparse
-import os
-import re
-import struct
-import sys
-import time
-import Tkinter
-
-
-# The interval, in milliseconds, between ui updates
-UPDATE_INTERVAL_MS = 100
-
-
-# Mapping from counter prefix to the formatting to be used for the counter
-COUNTER_LABELS = {"t": "%i ms.", "c": "%i"}
-
-
-# The magic numbers used to check if a file is not a counters file
-COUNTERS_FILE_MAGIC_NUMBER = 0xDEADFACE
-CHROME_COUNTERS_FILE_MAGIC_NUMBER = 0x13131313
-
-
-class StatsViewer(object):
- """The main class that keeps the data used by the stats viewer."""
-
- def __init__(self, data_name, name_filter):
- """Creates a new instance.
-
- Args:
- data_name: the name of the file containing the counters.
- name_filter: The regexp filter to apply to counter names.
- """
- self.data_name = data_name
- self.name_filter = name_filter
-
- # The handle created by mmap.mmap to the counters file. We need
- # this to clean it up on exit.
- self.shared_mmap = None
-
- # A mapping from counter names to the ui element that displays
- # them
- self.ui_counters = {}
-
- # The counter collection used to access the counters file
- self.data = None
-
- # The Tkinter root window object
- self.root = None
-
- def Run(self):
- """The main entry-point to running the stats viewer."""
- try:
- self.data = self.MountSharedData()
- # OpenWindow blocks until the main window is closed
- self.OpenWindow()
- finally:
- self.CleanUp()
-
- def MountSharedData(self):
- """Mount the binary counters file as a memory-mapped file. If
- something goes wrong print an informative message and exit the
- program."""
- if not os.path.exists(self.data_name):
- maps_name = "/proc/%s/maps" % self.data_name
- if not os.path.exists(maps_name):
- print "\"%s\" is neither a counter file nor a PID." % self.data_name
- sys.exit(1)
- maps_file = open(maps_name, "r")
- try:
- m = re.search(r"/dev/shm/\S*", maps_file.read())
- if m is not None and os.path.exists(m.group(0)):
- self.data_name = m.group(0)
- else:
- print "Can't find counter file in maps for PID %s." % self.data_name
- sys.exit(1)
- finally:
- maps_file.close()
- data_file = open(self.data_name, "r")
- size = os.fstat(data_file.fileno()).st_size
- fileno = data_file.fileno()
- self.shared_mmap = mmap.mmap(fileno, size, access=mmap.ACCESS_READ)
- data_access = SharedDataAccess(self.shared_mmap)
- if data_access.IntAt(0) == COUNTERS_FILE_MAGIC_NUMBER:
- return CounterCollection(data_access)
- elif data_access.IntAt(0) == CHROME_COUNTERS_FILE_MAGIC_NUMBER:
- return ChromeCounterCollection(data_access)
- print "File %s is not stats data." % self.data_name
- sys.exit(1)
-
- def CleanUp(self):
- """Cleans up the memory mapped file if necessary."""
- if self.shared_mmap:
- self.shared_mmap.close()
-
- def UpdateCounters(self):
- """Read the contents of the memory-mapped file and update the ui if
- necessary. If the same counters are present in the file as before
- we just update the existing labels. If any counters have been added
- or removed we scrap the existing ui and draw a new one.
- """
- changed = False
- counters_in_use = self.data.CountersInUse()
- if counters_in_use != len(self.ui_counters):
- self.RefreshCounters()
- changed = True
- else:
- for i in xrange(self.data.CountersInUse()):
- counter = self.data.Counter(i)
- name = counter.Name()
- if name in self.ui_counters:
- value = counter.Value()
- ui_counter = self.ui_counters[name]
- counter_changed = ui_counter.Set(value)
- changed = (changed or counter_changed)
- else:
- self.RefreshCounters()
- changed = True
- break
- if changed:
- # The title of the window shows the last time the file was
- # changed.
- self.UpdateTime()
- self.ScheduleUpdate()
-
- def UpdateTime(self):
- """Update the title of the window with the current time."""
- self.root.title("Stats Viewer [updated %s]" % time.strftime("%H:%M:%S"))
-
- def ScheduleUpdate(self):
- """Schedules the next ui update."""
- self.root.after(UPDATE_INTERVAL_MS, lambda: self.UpdateCounters())
-
- def RefreshCounters(self):
- """Tear down and rebuild the controls in the main window."""
- counters = self.ComputeCounters()
- self.RebuildMainWindow(counters)
-
- def ComputeCounters(self):
- """Group the counters by the suffix of their name.
-
- Since the same code-level counter (for instance "X") can result in
- several variables in the binary counters file that differ only by a
- two-character prefix (for instance "c:X" and "t:X") counters are
- grouped by suffix and then displayed with custom formatting
- depending on their prefix.
-
- Returns:
- A mapping from suffixes to a list of counters with that suffix,
- sorted by prefix.
- """
- names = {}
- for i in xrange(self.data.CountersInUse()):
- counter = self.data.Counter(i)
- name = counter.Name()
- names[name] = counter
-
- # By sorting the keys we ensure that the prefixes always come in the
- # same order ("c:" before "t:") which looks more consistent in the
- # ui.
- sorted_keys = names.keys()
- sorted_keys.sort()
-
- # Group together the names whose suffix after a ':' are the same.
- groups = {}
- for name in sorted_keys:
- counter = names[name]
- if ":" in name:
- name = name[name.find(":")+1:]
- if not name in groups:
- groups[name] = []
- groups[name].append(counter)
-
- return groups
-
- def RebuildMainWindow(self, groups):
- """Tear down and rebuild the main window.
-
- Args:
- groups: the groups of counters to display
- """
- # Remove elements in the current ui
- self.ui_counters.clear()
- for child in self.root.children.values():
- child.destroy()
-
- # Build new ui
- index = 0
- sorted_groups = groups.keys()
- sorted_groups.sort()
- for counter_name in sorted_groups:
- counter_objs = groups[counter_name]
- if self.name_filter.match(counter_name):
- name = Tkinter.Label(self.root, width=50, anchor=Tkinter.W,
- text=counter_name)
- name.grid(row=index, column=0, padx=1, pady=1)
- count = len(counter_objs)
- for i in xrange(count):
- counter = counter_objs[i]
- name = counter.Name()
- var = Tkinter.StringVar()
- if self.name_filter.match(name):
- value = Tkinter.Label(self.root, width=15, anchor=Tkinter.W,
- textvariable=var)
- value.grid(row=index, column=(1 + i), padx=1, pady=1)
-
- # If we know how to interpret the prefix of this counter then
- # add an appropriate formatting to the variable
- if (":" in name) and (name[0] in COUNTER_LABELS):
- format = COUNTER_LABELS[name[0]]
- else:
- format = "%i"
- ui_counter = UiCounter(var, format)
- self.ui_counters[name] = ui_counter
- ui_counter.Set(counter.Value())
- index += 1
- self.root.update()
-
- def OpenWindow(self):
- """Create and display the root window."""
- self.root = Tkinter.Tk()
-
- # Tkinter is no good at resizing so we disable it
- self.root.resizable(width=False, height=False)
- self.RefreshCounters()
- self.ScheduleUpdate()
- self.root.mainloop()
-
-
-class UiCounter(object):
- """A counter in the ui."""
-
- def __init__(self, var, format):
- """Creates a new ui counter.
-
- Args:
- var: the Tkinter string variable for updating the ui
- format: the format string used to format this counter
- """
- self.var = var
- self.format = format
- self.last_value = None
-
- def Set(self, value):
- """Updates the ui for this counter.
-
- Args:
- value: The value to display
-
- Returns:
- True if the value had changed, otherwise False. The first call
- always returns True.
- """
- if value == self.last_value:
- return False
- else:
- self.last_value = value
- self.var.set(self.format % value)
- return True
-
-
-class SharedDataAccess(object):
- """A utility class for reading data from the memory-mapped binary
- counters file."""
-
- def __init__(self, data):
- """Create a new instance.
-
- Args:
- data: A handle to the memory-mapped file, as returned by mmap.mmap.
- """
- self.data = data
-
- def ByteAt(self, index):
- """Return the (unsigned) byte at the specified byte index."""
- return ord(self.CharAt(index))
-
- def IntAt(self, index):
- """Return the little-endian 32-byte int at the specified byte index."""
- word_str = self.data[index:index+4]
- result, = struct.unpack("I", word_str)
- return result
-
- def CharAt(self, index):
- """Return the ascii character at the specified byte index."""
- return self.data[index]
-
-
-class Counter(object):
- """A pointer to a single counter withing a binary counters file."""
-
- def __init__(self, data, offset):
- """Create a new instance.
-
- Args:
- data: the shared data access object containing the counter
- offset: the byte offset of the start of this counter
- """
- self.data = data
- self.offset = offset
-
- def Value(self):
- """Return the integer value of this counter."""
- return self.data.IntAt(self.offset)
-
- def Name(self):
- """Return the ascii name of this counter."""
- result = ""
- index = self.offset + 4
- current = self.data.ByteAt(index)
- while current:
- result += chr(current)
- index += 1
- current = self.data.ByteAt(index)
- return result
-
-
-class CounterCollection(object):
- """An overlay over a counters file that provides access to the
- individual counters contained in the file."""
-
- def __init__(self, data):
- """Create a new instance.
-
- Args:
- data: the shared data access object
- """
- self.data = data
- self.max_counters = data.IntAt(4)
- self.max_name_size = data.IntAt(8)
-
- def CountersInUse(self):
- """Return the number of counters in active use."""
- return self.data.IntAt(12)
-
- def Counter(self, index):
- """Return the index'th counter."""
- return Counter(self.data, 16 + index * self.CounterSize())
-
- def CounterSize(self):
- """Return the size of a single counter."""
- return 4 + self.max_name_size
-
-
-class ChromeCounter(object):
- """A pointer to a single counter withing a binary counters file."""
-
- def __init__(self, data, name_offset, value_offset):
- """Create a new instance.
-
- Args:
- data: the shared data access object containing the counter
- name_offset: the byte offset of the start of this counter's name
- value_offset: the byte offset of the start of this counter's value
- """
- self.data = data
- self.name_offset = name_offset
- self.value_offset = value_offset
-
- def Value(self):
- """Return the integer value of this counter."""
- return self.data.IntAt(self.value_offset)
-
- def Name(self):
- """Return the ascii name of this counter."""
- result = ""
- index = self.name_offset
- current = self.data.ByteAt(index)
- while current:
- result += chr(current)
- index += 1
- current = self.data.ByteAt(index)
- return result
-
-
-class ChromeCounterCollection(object):
- """An overlay over a counters file that provides access to the
- individual counters contained in the file."""
-
- _HEADER_SIZE = 4 * 4
- _NAME_SIZE = 32
-
- def __init__(self, data):
- """Create a new instance.
-
- Args:
- data: the shared data access object
- """
- self.data = data
- self.max_counters = data.IntAt(8)
- self.max_threads = data.IntAt(12)
- self.counter_names_offset = \
- self._HEADER_SIZE + self.max_threads * (self._NAME_SIZE + 2 * 4)
- self.counter_values_offset = \
- self.counter_names_offset + self.max_counters * self._NAME_SIZE
-
- def CountersInUse(self):
- """Return the number of counters in active use."""
- for i in xrange(self.max_counters):
- if self.data.ByteAt(self.counter_names_offset + i * self._NAME_SIZE) == 0:
- return i
- return self.max_counters
-
- def Counter(self, i):
- """Return the i'th counter."""
- return ChromeCounter(self.data,
- self.counter_names_offset + i * self._NAME_SIZE,
- self.counter_values_offset + i * self.max_threads * 4)
-
-
-def Main(data_file, name_filter):
- """Run the stats counter.
-
- Args:
- data_file: The counters file to monitor.
- name_filter: The regexp filter to apply to counter names.
- """
- StatsViewer(data_file, name_filter).Run()
-
-
-if __name__ == "__main__":
- parser = optparse.OptionParser("usage: %prog [--filter=re] "
- "<stats data>|<test_shell pid>")
- parser.add_option("--filter",
- default=".*",
- help=("regexp filter for counter names "
- "[default: %default]"))
- (options, args) = parser.parse_args()
- if len(args) != 1:
- parser.print_help()
- sys.exit(1)
- Main(args[0], re.compile(options.filter))
diff --git a/src/3rdparty/v8/tools/test.py b/src/3rdparty/v8/tools/test.py
deleted file mode 100755
index 707e725..0000000
--- a/src/3rdparty/v8/tools/test.py
+++ /dev/null
@@ -1,1490 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2008 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-import imp
-import optparse
-import os
-from os.path import join, dirname, abspath, basename, isdir, exists
-import platform
-import re
-import signal
-import subprocess
-import sys
-import tempfile
-import time
-import threading
-import utils
-from Queue import Queue, Empty
-
-
-VERBOSE = False
-
-
-# ---------------------------------------------
-# --- P r o g r e s s I n d i c a t o r s ---
-# ---------------------------------------------
-
-
-class ProgressIndicator(object):
-
- def __init__(self, cases):
- self.cases = cases
- self.queue = Queue(len(cases))
- for case in cases:
- self.queue.put_nowait(case)
- self.succeeded = 0
- self.remaining = len(cases)
- self.total = len(cases)
- self.failed = [ ]
- self.crashed = 0
- self.terminate = False
- self.lock = threading.Lock()
-
- def PrintFailureHeader(self, test):
- if test.IsNegative():
- negative_marker = '[negative] '
- else:
- negative_marker = ''
- print "=== %(label)s %(negative)s===" % {
- 'label': test.GetLabel(),
- 'negative': negative_marker
- }
- print "Path: %s" % "/".join(test.path)
-
- def Run(self, tasks):
- self.Starting()
- threads = []
- # Spawn N-1 threads and then use this thread as the last one.
- # That way -j1 avoids threading altogether which is a nice fallback
- # in case of threading problems.
- for i in xrange(tasks - 1):
- thread = threading.Thread(target=self.RunSingle, args=[])
- threads.append(thread)
- thread.start()
- try:
- self.RunSingle()
- # Wait for the remaining threads
- for thread in threads:
- # Use a timeout so that signals (ctrl-c) will be processed.
- thread.join(timeout=10000000)
- except Exception, e:
- # If there's an exception we schedule an interruption for any
- # remaining threads.
- self.terminate = True
- # ...and then reraise the exception to bail out
- raise
- self.Done()
- return not self.failed
-
- def RunSingle(self):
- while not self.terminate:
- try:
- test = self.queue.get_nowait()
- except Empty:
- return
- case = test.case
- self.lock.acquire()
- self.AboutToRun(case)
- self.lock.release()
- try:
- start = time.time()
- output = case.Run()
- case.duration = (time.time() - start)
- except IOError, e:
- assert self.terminate
- return
- if self.terminate:
- return
- self.lock.acquire()
- if output.UnexpectedOutput():
- self.failed.append(output)
- if output.HasCrashed():
- self.crashed += 1
- else:
- self.succeeded += 1
- self.remaining -= 1
- self.HasRun(output)
- self.lock.release()
-
-
-def EscapeCommand(command):
- parts = []
- for part in command:
- if ' ' in part:
- # Escape spaces. We may need to escape more characters for this
- # to work properly.
- parts.append('"%s"' % part)
- else:
- parts.append(part)
- return " ".join(parts)
-
-
-class SimpleProgressIndicator(ProgressIndicator):
-
- def Starting(self):
- print 'Running %i tests' % len(self.cases)
-
- def Done(self):
- print
- for failed in self.failed:
- self.PrintFailureHeader(failed.test)
- if failed.output.stderr:
- print "--- stderr ---"
- print failed.output.stderr.strip()
- if failed.output.stdout:
- print "--- stdout ---"
- print failed.output.stdout.strip()
- print "Command: %s" % EscapeCommand(failed.command)
- if failed.HasCrashed():
- print "--- CRASHED ---"
- if failed.HasTimedOut():
- print "--- TIMEOUT ---"
- if len(self.failed) == 0:
- print "==="
- print "=== All tests succeeded"
- print "==="
- else:
- print
- print "==="
- print "=== %i tests failed" % len(self.failed)
- if self.crashed > 0:
- print "=== %i tests CRASHED" % self.crashed
- print "==="
-
-
-class VerboseProgressIndicator(SimpleProgressIndicator):
-
- def AboutToRun(self, case):
- print 'Starting %s...' % case.GetLabel()
- sys.stdout.flush()
-
- def HasRun(self, output):
- if output.UnexpectedOutput():
- if output.HasCrashed():
- outcome = 'CRASH'
- else:
- outcome = 'FAIL'
- else:
- outcome = 'pass'
- print 'Done running %s: %s' % (output.test.GetLabel(), outcome)
-
-
-class DotsProgressIndicator(SimpleProgressIndicator):
-
- def AboutToRun(self, case):
- pass
-
- def HasRun(self, output):
- total = self.succeeded + len(self.failed)
- if (total > 1) and (total % 50 == 1):
- sys.stdout.write('\n')
- if output.UnexpectedOutput():
- if output.HasCrashed():
- sys.stdout.write('C')
- sys.stdout.flush()
- elif output.HasTimedOut():
- sys.stdout.write('T')
- sys.stdout.flush()
- else:
- sys.stdout.write('F')
- sys.stdout.flush()
- else:
- sys.stdout.write('.')
- sys.stdout.flush()
-
-
-class CompactProgressIndicator(ProgressIndicator):
-
- def __init__(self, cases, templates):
- super(CompactProgressIndicator, self).__init__(cases)
- self.templates = templates
- self.last_status_length = 0
- self.start_time = time.time()
-
- def Starting(self):
- pass
-
- def Done(self):
- self.PrintProgress('Done')
-
- def AboutToRun(self, case):
- self.PrintProgress(case.GetLabel())
-
- def HasRun(self, output):
- if output.UnexpectedOutput():
- self.ClearLine(self.last_status_length)
- self.PrintFailureHeader(output.test)
- stdout = output.output.stdout.strip()
- if len(stdout):
- print self.templates['stdout'] % stdout
- stderr = output.output.stderr.strip()
- if len(stderr):
- print self.templates['stderr'] % stderr
- print "Command: %s" % EscapeCommand(output.command)
- if output.HasCrashed():
- print "--- CRASHED ---"
- if output.HasTimedOut():
- print "--- TIMEOUT ---"
-
- def Truncate(self, str, length):
- if length and (len(str) > (length - 3)):
- return str[:(length-3)] + "..."
- else:
- return str
-
- def PrintProgress(self, name):
- self.ClearLine(self.last_status_length)
- elapsed = time.time() - self.start_time
- status = self.templates['status_line'] % {
- 'passed': self.succeeded,
- 'remaining': (((self.total - self.remaining) * 100) // self.total),
- 'failed': len(self.failed),
- 'test': name,
- 'mins': int(elapsed) / 60,
- 'secs': int(elapsed) % 60
- }
- status = self.Truncate(status, 78)
- self.last_status_length = len(status)
- print status,
- sys.stdout.flush()
-
-
-class ColorProgressIndicator(CompactProgressIndicator):
-
- def __init__(self, cases):
- templates = {
- 'status_line': "[%(mins)02i:%(secs)02i|\033[34m%%%(remaining) 4d\033[0m|\033[32m+%(passed) 4d\033[0m|\033[31m-%(failed) 4d\033[0m]: %(test)s",
- 'stdout': "\033[1m%s\033[0m",
- 'stderr': "\033[31m%s\033[0m",
- }
- super(ColorProgressIndicator, self).__init__(cases, templates)
-
- def ClearLine(self, last_line_length):
- print "\033[1K\r",
-
-
-class MonochromeProgressIndicator(CompactProgressIndicator):
-
- def __init__(self, cases):
- templates = {
- 'status_line': "[%(mins)02i:%(secs)02i|%%%(remaining) 4d|+%(passed) 4d|-%(failed) 4d]: %(test)s",
- 'stdout': '%s',
- 'stderr': '%s',
- 'clear': lambda last_line_length: ("\r" + (" " * last_line_length) + "\r"),
- 'max_length': 78
- }
- super(MonochromeProgressIndicator, self).__init__(cases, templates)
-
- def ClearLine(self, last_line_length):
- print ("\r" + (" " * last_line_length) + "\r"),
-
-
-PROGRESS_INDICATORS = {
- 'verbose': VerboseProgressIndicator,
- 'dots': DotsProgressIndicator,
- 'color': ColorProgressIndicator,
- 'mono': MonochromeProgressIndicator
-}
-
-
-# -------------------------
-# --- F r a m e w o r k ---
-# -------------------------
-
-
-class CommandOutput(object):
-
- def __init__(self, exit_code, timed_out, stdout, stderr):
- self.exit_code = exit_code
- self.timed_out = timed_out
- self.stdout = stdout
- self.stderr = stderr
- self.failed = None
-
-
-class TestCase(object):
-
- def __init__(self, context, path, mode):
- self.path = path
- self.context = context
- self.duration = None
- self.mode = mode
-
- def IsNegative(self):
- return False
-
- def TestsIsolates(self):
- return False
-
- def CompareTime(self, other):
- return cmp(other.duration, self.duration)
-
- def DidFail(self, output):
- if output.failed is None:
- output.failed = self.IsFailureOutput(output)
- return output.failed
-
- def IsFailureOutput(self, output):
- return output.exit_code != 0
-
- def GetSource(self):
- return "(no source available)"
-
- def RunCommand(self, command):
- full_command = self.context.processor(command)
- output = Execute(full_command,
- self.context,
- self.context.GetTimeout(self, self.mode))
- self.Cleanup()
- return TestOutput(self,
- full_command,
- output,
- self.context.store_unexpected_output)
-
- def BeforeRun(self):
- pass
-
- def AfterRun(self, result):
- pass
-
- def GetCustomFlags(self, mode):
- return None
-
- def Run(self):
- self.BeforeRun()
- result = "exception"
- try:
- result = self.RunCommand(self.GetCommand())
- finally:
- self.AfterRun(result)
- return result
-
- def Cleanup(self):
- return
-
-
-class TestOutput(object):
-
- def __init__(self, test, command, output, store_unexpected_output):
- self.test = test
- self.command = command
- self.output = output
- self.store_unexpected_output = store_unexpected_output
-
- def UnexpectedOutput(self):
- if self.HasCrashed():
- outcome = CRASH
- elif self.HasTimedOut():
- outcome = TIMEOUT
- elif self.HasFailed():
- outcome = FAIL
- else:
- outcome = PASS
- return not outcome in self.test.outcomes
-
- def HasPreciousOutput(self):
- return self.UnexpectedOutput() and self.store_unexpected_output
-
- def HasCrashed(self):
- if utils.IsWindows():
- return 0x80000000 & self.output.exit_code and not (0x3FFFFF00 & self.output.exit_code)
- else:
- # Timed out tests will have exit_code -signal.SIGTERM.
- if self.output.timed_out:
- return False
- return self.output.exit_code < 0 and \
- self.output.exit_code != -signal.SIGABRT
-
- def HasTimedOut(self):
- return self.output.timed_out;
-
- def HasFailed(self):
- execution_failed = self.test.DidFail(self.output)
- if self.test.IsNegative():
- return not execution_failed
- else:
- return execution_failed
-
-
-def KillProcessWithID(pid):
- if utils.IsWindows():
- os.popen('taskkill /T /F /PID %d' % pid)
- else:
- os.kill(pid, signal.SIGTERM)
-
-
-MAX_SLEEP_TIME = 0.1
-INITIAL_SLEEP_TIME = 0.0001
-SLEEP_TIME_FACTOR = 1.25
-
-SEM_INVALID_VALUE = -1
-SEM_NOGPFAULTERRORBOX = 0x0002 # Microsoft Platform SDK WinBase.h
-
-def Win32SetErrorMode(mode):
- prev_error_mode = SEM_INVALID_VALUE
- try:
- import ctypes
- prev_error_mode = ctypes.windll.kernel32.SetErrorMode(mode);
- except ImportError:
- pass
- return prev_error_mode
-
-def RunProcess(context, timeout, args, **rest):
- if context.verbose: print "#", " ".join(args)
- popen_args = args
- prev_error_mode = SEM_INVALID_VALUE;
- if utils.IsWindows():
- popen_args = '"' + subprocess.list2cmdline(args) + '"'
- if context.suppress_dialogs:
- # Try to change the error mode to avoid dialogs on fatal errors. Don't
- # touch any existing error mode flags by merging the existing error mode.
- # See http://blogs.msdn.com/oldnewthing/archive/2004/07/27/198410.aspx.
- error_mode = SEM_NOGPFAULTERRORBOX;
- prev_error_mode = Win32SetErrorMode(error_mode);
- Win32SetErrorMode(error_mode | prev_error_mode);
- process = subprocess.Popen(
- shell = utils.IsWindows(),
- args = popen_args,
- **rest
- )
- if utils.IsWindows() and context.suppress_dialogs and prev_error_mode != SEM_INVALID_VALUE:
- Win32SetErrorMode(prev_error_mode)
- # Compute the end time - if the process crosses this limit we
- # consider it timed out.
- if timeout is None: end_time = None
- else: end_time = time.time() + timeout
- timed_out = False
- # Repeatedly check the exit code from the process in a
- # loop and keep track of whether or not it times out.
- exit_code = None
- sleep_time = INITIAL_SLEEP_TIME
- while exit_code is None:
- if (not end_time is None) and (time.time() >= end_time):
- # Kill the process and wait for it to exit.
- KillProcessWithID(process.pid)
- exit_code = process.wait()
- timed_out = True
- else:
- exit_code = process.poll()
- time.sleep(sleep_time)
- sleep_time = sleep_time * SLEEP_TIME_FACTOR
- if sleep_time > MAX_SLEEP_TIME:
- sleep_time = MAX_SLEEP_TIME
- return (process, exit_code, timed_out)
-
-
-def PrintError(str):
- sys.stderr.write(str)
- sys.stderr.write('\n')
-
-
-def CheckedUnlink(name):
- # On Windows, when run with -jN in parallel processes,
- # OS often fails to unlink the temp file. Not sure why.
- # Need to retry.
- # Idea from https://bugs.webkit.org/attachment.cgi?id=75982&action=prettypatch
- retry_count = 0
- while retry_count < 30:
- try:
- os.unlink(name)
- return
- except OSError, e:
- retry_count += 1;
- time.sleep(retry_count * 0.1)
- PrintError("os.unlink() " + str(e))
-
-def Execute(args, context, timeout=None):
- (fd_out, outname) = tempfile.mkstemp()
- (fd_err, errname) = tempfile.mkstemp()
- (process, exit_code, timed_out) = RunProcess(
- context,
- timeout,
- args = args,
- stdout = fd_out,
- stderr = fd_err,
- )
- os.close(fd_out)
- os.close(fd_err)
- output = file(outname).read()
- errors = file(errname).read()
- CheckedUnlink(outname)
- CheckedUnlink(errname)
- return CommandOutput(exit_code, timed_out, output, errors)
-
-
-def ExecuteNoCapture(args, context, timeout=None):
- (process, exit_code, timed_out) = RunProcess(
- context,
- timeout,
- args = args,
- )
- return CommandOutput(exit_code, False, "", "")
-
-
-def CarCdr(path):
- if len(path) == 0:
- return (None, [ ])
- else:
- return (path[0], path[1:])
-
-
-class TestConfiguration(object):
-
- def __init__(self, context, root):
- self.context = context
- self.root = root
-
- def Contains(self, path, file):
- if len(path) > len(file):
- return False
- for i in xrange(len(path)):
- if not path[i].match(file[i]):
- return False
- return True
-
- def GetTestStatus(self, sections, defs):
- pass
-
-
-class TestSuite(object):
-
- def __init__(self, name):
- self.name = name
-
- def GetName(self):
- return self.name
-
-
-# Use this to run several variants of the tests, e.g.:
-# VARIANT_FLAGS = [[], ['--always_compact', '--noflush_code']]
-VARIANT_FLAGS = [[],
- ['--stress-opt', '--always-opt'],
- ['--nocrankshaft']]
-
-
-class TestRepository(TestSuite):
-
- def __init__(self, path):
- normalized_path = abspath(path)
- super(TestRepository, self).__init__(basename(normalized_path))
- self.path = normalized_path
- self.is_loaded = False
- self.config = None
-
- def GetConfiguration(self, context):
- if self.is_loaded:
- return self.config
- self.is_loaded = True
- file = None
- try:
- (file, pathname, description) = imp.find_module('testcfg', [ self.path ])
- module = imp.load_module('testcfg', file, pathname, description)
- self.config = module.GetConfiguration(context, self.path)
- finally:
- if file:
- file.close()
- return self.config
-
- def GetBuildRequirements(self, path, context):
- return self.GetConfiguration(context).GetBuildRequirements()
-
- def AddTestsToList(self, result, current_path, path, context, mode):
- for v in VARIANT_FLAGS:
- tests = self.GetConfiguration(context).ListTests(current_path, path, mode, v)
- for t in tests: t.variant_flags = v
- result += tests
-
-
- def GetTestStatus(self, context, sections, defs):
- self.GetConfiguration(context).GetTestStatus(sections, defs)
-
-
-class LiteralTestSuite(TestSuite):
-
- def __init__(self, tests):
- super(LiteralTestSuite, self).__init__('root')
- self.tests = tests
-
- def GetBuildRequirements(self, path, context):
- (name, rest) = CarCdr(path)
- result = [ ]
- for test in self.tests:
- if not name or name.match(test.GetName()):
- result += test.GetBuildRequirements(rest, context)
- return result
-
- def ListTests(self, current_path, path, context, mode, variant_flags):
- (name, rest) = CarCdr(path)
- result = [ ]
- for test in self.tests:
- test_name = test.GetName()
- if not name or name.match(test_name):
- full_path = current_path + [test_name]
- test.AddTestsToList(result, full_path, path, context, mode)
- return result
-
- def GetTestStatus(self, context, sections, defs):
- for test in self.tests:
- test.GetTestStatus(context, sections, defs)
-
-
-SUFFIX = {
- 'debug' : '_g',
- 'release' : '' }
-FLAGS = {
- 'debug' : ['--enable-slow-asserts', '--debug-code', '--verify-heap'],
- 'release' : []}
-TIMEOUT_SCALEFACTOR = {
- 'debug' : 4,
- 'release' : 1 }
-
-
-class Context(object):
-
- def __init__(self, workspace, buildspace, verbose, vm, timeout, processor, suppress_dialogs, store_unexpected_output):
- self.workspace = workspace
- self.buildspace = buildspace
- self.verbose = verbose
- self.vm_root = vm
- self.timeout = timeout
- self.processor = processor
- self.suppress_dialogs = suppress_dialogs
- self.store_unexpected_output = store_unexpected_output
-
- def GetVm(self, mode):
- name = self.vm_root + SUFFIX[mode]
- if utils.IsWindows() and not name.endswith('.exe'):
- name = name + '.exe'
- return name
-
- def GetVmCommand(self, testcase, mode):
- return [self.GetVm(mode)] + self.GetVmFlags(testcase, mode)
-
- def GetVmFlags(self, testcase, mode):
- flags = testcase.GetCustomFlags(mode)
- if flags is None:
- flags = FLAGS[mode]
- return testcase.variant_flags + flags
-
- def GetTimeout(self, testcase, mode):
- result = self.timeout * TIMEOUT_SCALEFACTOR[mode]
- if '--stress-opt' in self.GetVmFlags(testcase, mode):
- return result * 2
- else:
- return result
-
-def RunTestCases(cases_to_run, progress, tasks):
- progress = PROGRESS_INDICATORS[progress](cases_to_run)
- return progress.Run(tasks)
-
-
-def BuildRequirements(context, requirements, mode, scons_flags):
- command_line = (['scons', '-Y', context.workspace, 'mode=' + ",".join(mode)]
- + requirements
- + scons_flags)
- output = ExecuteNoCapture(command_line, context)
- return output.exit_code == 0
-
-
-# -------------------------------------------
-# --- T e s t C o n f i g u r a t i o n ---
-# -------------------------------------------
-
-
-SKIP = 'skip'
-FAIL = 'fail'
-PASS = 'pass'
-OKAY = 'okay'
-TIMEOUT = 'timeout'
-CRASH = 'crash'
-SLOW = 'slow'
-
-
-class Expression(object):
- pass
-
-
-class Constant(Expression):
-
- def __init__(self, value):
- self.value = value
-
- def Evaluate(self, env, defs):
- return self.value
-
-
-class Variable(Expression):
-
- def __init__(self, name):
- self.name = name
-
- def GetOutcomes(self, env, defs):
- if self.name in env: return ListSet([env[self.name]])
- else: return Nothing()
-
- def Evaluate(self, env, defs):
- return env[self.name]
-
-
-class Outcome(Expression):
-
- def __init__(self, name):
- self.name = name
-
- def GetOutcomes(self, env, defs):
- if self.name in defs:
- return defs[self.name].GetOutcomes(env, defs)
- else:
- return ListSet([self.name])
-
-
-class Set(object):
- pass
-
-
-class ListSet(Set):
-
- def __init__(self, elms):
- self.elms = elms
-
- def __str__(self):
- return "ListSet%s" % str(self.elms)
-
- def Intersect(self, that):
- if not isinstance(that, ListSet):
- return that.Intersect(self)
- return ListSet([ x for x in self.elms if x in that.elms ])
-
- def Union(self, that):
- if not isinstance(that, ListSet):
- return that.Union(self)
- return ListSet(self.elms + [ x for x in that.elms if x not in self.elms ])
-
- def IsEmpty(self):
- return len(self.elms) == 0
-
-
-class Everything(Set):
-
- def Intersect(self, that):
- return that
-
- def Union(self, that):
- return self
-
- def IsEmpty(self):
- return False
-
-
-class Nothing(Set):
-
- def Intersect(self, that):
- return self
-
- def Union(self, that):
- return that
-
- def IsEmpty(self):
- return True
-
-
-class Operation(Expression):
-
- def __init__(self, left, op, right):
- self.left = left
- self.op = op
- self.right = right
-
- def Evaluate(self, env, defs):
- if self.op == '||' or self.op == ',':
- return self.left.Evaluate(env, defs) or self.right.Evaluate(env, defs)
- elif self.op == 'if':
- return False
- elif self.op == '==':
- inter = self.left.GetOutcomes(env, defs).Intersect(self.right.GetOutcomes(env, defs))
- return not inter.IsEmpty()
- else:
- assert self.op == '&&'
- return self.left.Evaluate(env, defs) and self.right.Evaluate(env, defs)
-
- def GetOutcomes(self, env, defs):
- if self.op == '||' or self.op == ',':
- return self.left.GetOutcomes(env, defs).Union(self.right.GetOutcomes(env, defs))
- elif self.op == 'if':
- if self.right.Evaluate(env, defs): return self.left.GetOutcomes(env, defs)
- else: return Nothing()
- else:
- assert self.op == '&&'
- return self.left.GetOutcomes(env, defs).Intersect(self.right.GetOutcomes(env, defs))
-
-
-def IsAlpha(str):
- for char in str:
- if not (char.isalpha() or char.isdigit() or char == '_'):
- return False
- return True
-
-
-class Tokenizer(object):
- """A simple string tokenizer that chops expressions into variables,
- parens and operators"""
-
- def __init__(self, expr):
- self.index = 0
- self.expr = expr
- self.length = len(expr)
- self.tokens = None
-
- def Current(self, length = 1):
- if not self.HasMore(length): return ""
- return self.expr[self.index:self.index+length]
-
- def HasMore(self, length = 1):
- return self.index < self.length + (length - 1)
-
- def Advance(self, count = 1):
- self.index = self.index + count
-
- def AddToken(self, token):
- self.tokens.append(token)
-
- def SkipSpaces(self):
- while self.HasMore() and self.Current().isspace():
- self.Advance()
-
- def Tokenize(self):
- self.tokens = [ ]
- while self.HasMore():
- self.SkipSpaces()
- if not self.HasMore():
- return None
- if self.Current() == '(':
- self.AddToken('(')
- self.Advance()
- elif self.Current() == ')':
- self.AddToken(')')
- self.Advance()
- elif self.Current() == '$':
- self.AddToken('$')
- self.Advance()
- elif self.Current() == ',':
- self.AddToken(',')
- self.Advance()
- elif IsAlpha(self.Current()):
- buf = ""
- while self.HasMore() and IsAlpha(self.Current()):
- buf += self.Current()
- self.Advance()
- self.AddToken(buf)
- elif self.Current(2) == '&&':
- self.AddToken('&&')
- self.Advance(2)
- elif self.Current(2) == '||':
- self.AddToken('||')
- self.Advance(2)
- elif self.Current(2) == '==':
- self.AddToken('==')
- self.Advance(2)
- else:
- return None
- return self.tokens
-
-
-class Scanner(object):
- """A simple scanner that can serve out tokens from a given list"""
-
- def __init__(self, tokens):
- self.tokens = tokens
- self.length = len(tokens)
- self.index = 0
-
- def HasMore(self):
- return self.index < self.length
-
- def Current(self):
- return self.tokens[self.index]
-
- def Advance(self):
- self.index = self.index + 1
-
-
-def ParseAtomicExpression(scan):
- if scan.Current() == "true":
- scan.Advance()
- return Constant(True)
- elif scan.Current() == "false":
- scan.Advance()
- return Constant(False)
- elif IsAlpha(scan.Current()):
- name = scan.Current()
- scan.Advance()
- return Outcome(name.lower())
- elif scan.Current() == '$':
- scan.Advance()
- if not IsAlpha(scan.Current()):
- return None
- name = scan.Current()
- scan.Advance()
- return Variable(name.lower())
- elif scan.Current() == '(':
- scan.Advance()
- result = ParseLogicalExpression(scan)
- if (not result) or (scan.Current() != ')'):
- return None
- scan.Advance()
- return result
- else:
- return None
-
-
-BINARIES = ['==']
-def ParseOperatorExpression(scan):
- left = ParseAtomicExpression(scan)
- if not left: return None
- while scan.HasMore() and (scan.Current() in BINARIES):
- op = scan.Current()
- scan.Advance()
- right = ParseOperatorExpression(scan)
- if not right:
- return None
- left = Operation(left, op, right)
- return left
-
-
-def ParseConditionalExpression(scan):
- left = ParseOperatorExpression(scan)
- if not left: return None
- while scan.HasMore() and (scan.Current() == 'if'):
- scan.Advance()
- right = ParseOperatorExpression(scan)
- if not right:
- return None
- left= Operation(left, 'if', right)
- return left
-
-
-LOGICALS = ["&&", "||", ","]
-def ParseLogicalExpression(scan):
- left = ParseConditionalExpression(scan)
- if not left: return None
- while scan.HasMore() and (scan.Current() in LOGICALS):
- op = scan.Current()
- scan.Advance()
- right = ParseConditionalExpression(scan)
- if not right:
- return None
- left = Operation(left, op, right)
- return left
-
-
-def ParseCondition(expr):
- """Parses a logical expression into an Expression object"""
- tokens = Tokenizer(expr).Tokenize()
- if not tokens:
- print "Malformed expression: '%s'" % expr
- return None
- scan = Scanner(tokens)
- ast = ParseLogicalExpression(scan)
- if not ast:
- print "Malformed expression: '%s'" % expr
- return None
- if scan.HasMore():
- print "Malformed expression: '%s'" % expr
- return None
- return ast
-
-
-class ClassifiedTest(object):
-
- def __init__(self, case, outcomes):
- self.case = case
- self.outcomes = outcomes
-
- def TestsIsolates(self):
- return self.case.TestsIsolates()
-
-
-class Configuration(object):
- """The parsed contents of a configuration file"""
-
- def __init__(self, sections, defs):
- self.sections = sections
- self.defs = defs
-
- def ClassifyTests(self, cases, env):
- sections = [s for s in self.sections if s.condition.Evaluate(env, self.defs)]
- all_rules = reduce(list.__add__, [s.rules for s in sections], [])
- unused_rules = set(all_rules)
- result = [ ]
- all_outcomes = set([])
- for case in cases:
- matches = [ r for r in all_rules if r.Contains(case.path) ]
- outcomes = set([])
- for rule in matches:
- outcomes = outcomes.union(rule.GetOutcomes(env, self.defs))
- unused_rules.discard(rule)
- if not outcomes:
- outcomes = [PASS]
- case.outcomes = outcomes
- all_outcomes = all_outcomes.union(outcomes)
- result.append(ClassifiedTest(case, outcomes))
- return (result, list(unused_rules), all_outcomes)
-
-
-class Section(object):
- """A section of the configuration file. Sections are enabled or
- disabled prior to running the tests, based on their conditions"""
-
- def __init__(self, condition):
- self.condition = condition
- self.rules = [ ]
-
- def AddRule(self, rule):
- self.rules.append(rule)
-
-
-class Rule(object):
- """A single rule that specifies the expected outcome for a single
- test."""
-
- def __init__(self, raw_path, path, value):
- self.raw_path = raw_path
- self.path = path
- self.value = value
-
- def GetOutcomes(self, env, defs):
- set = self.value.GetOutcomes(env, defs)
- assert isinstance(set, ListSet)
- return set.elms
-
- def Contains(self, path):
- if len(self.path) > len(path):
- return False
- for i in xrange(len(self.path)):
- if not self.path[i].match(path[i]):
- return False
- return True
-
-
-HEADER_PATTERN = re.compile(r'\[([^]]+)\]')
-RULE_PATTERN = re.compile(r'\s*([^: ]*)\s*:(.*)')
-DEF_PATTERN = re.compile(r'^def\s*(\w+)\s*=(.*)$')
-PREFIX_PATTERN = re.compile(r'^\s*prefix\s+([\w\_\.\-\/]+)$')
-
-
-def ReadConfigurationInto(path, sections, defs):
- current_section = Section(Constant(True))
- sections.append(current_section)
- prefix = []
- for line in utils.ReadLinesFrom(path):
- header_match = HEADER_PATTERN.match(line)
- if header_match:
- condition_str = header_match.group(1).strip()
- condition = ParseCondition(condition_str)
- new_section = Section(condition)
- sections.append(new_section)
- current_section = new_section
- continue
- rule_match = RULE_PATTERN.match(line)
- if rule_match:
- path = prefix + SplitPath(rule_match.group(1).strip())
- value_str = rule_match.group(2).strip()
- value = ParseCondition(value_str)
- if not value:
- return False
- current_section.AddRule(Rule(rule_match.group(1), path, value))
- continue
- def_match = DEF_PATTERN.match(line)
- if def_match:
- name = def_match.group(1).lower()
- value = ParseCondition(def_match.group(2).strip())
- if not value:
- return False
- defs[name] = value
- continue
- prefix_match = PREFIX_PATTERN.match(line)
- if prefix_match:
- prefix = SplitPath(prefix_match.group(1).strip())
- continue
- print "Malformed line: '%s'." % line
- return False
- return True
-
-
-# ---------------
-# --- M a i n ---
-# ---------------
-
-
-ARCH_GUESS = utils.GuessArchitecture()
-
-
-def BuildOptions():
- result = optparse.OptionParser()
- result.add_option("-m", "--mode", help="The test modes in which to run (comma-separated)",
- default='release')
- result.add_option("-v", "--verbose", help="Verbose output",
- default=False, action="store_true")
- result.add_option("-S", dest="scons_flags", help="Flag to pass through to scons",
- default=[], action="append")
- result.add_option("-p", "--progress",
- help="The style of progress indicator (verbose, dots, color, mono)",
- choices=PROGRESS_INDICATORS.keys(), default="mono")
- result.add_option("--no-build", help="Don't build requirements",
- default=False, action="store_true")
- result.add_option("--build-only", help="Only build requirements, don't run the tests",
- default=False, action="store_true")
- result.add_option("--report", help="Print a summary of the tests to be run",
- default=False, action="store_true")
- result.add_option("-s", "--suite", help="A test suite",
- default=[], action="append")
- result.add_option("-t", "--timeout", help="Timeout in seconds",
- default=60, type="int")
- result.add_option("--arch", help='The architecture to run tests for',
- default='none')
- result.add_option("--snapshot", help="Run the tests with snapshot turned on",
- default=False, action="store_true")
- result.add_option("--simulator", help="Run tests with architecture simulator",
- default='none')
- result.add_option("--special-command", default=None)
- result.add_option("--valgrind", help="Run tests through valgrind",
- default=False, action="store_true")
- result.add_option("--cat", help="Print the source of the tests",
- default=False, action="store_true")
- result.add_option("--warn-unused", help="Report unused rules",
- default=False, action="store_true")
- result.add_option("-j", help="The number of parallel tasks to run",
- default=1, type="int")
- result.add_option("--time", help="Print timing information after running",
- default=False, action="store_true")
- result.add_option("--suppress-dialogs", help="Suppress Windows dialogs for crashing tests",
- dest="suppress_dialogs", default=True, action="store_true")
- result.add_option("--no-suppress-dialogs", help="Display Windows dialogs for crashing tests",
- dest="suppress_dialogs", action="store_false")
- result.add_option("--shell", help="Path to V8 shell", default="shell")
- result.add_option("--isolates", help="Whether to test isolates", default=False, action="store_true")
- result.add_option("--store-unexpected-output",
- help="Store the temporary JS files from tests that fails",
- dest="store_unexpected_output", default=True, action="store_true")
- result.add_option("--no-store-unexpected-output",
- help="Deletes the temporary JS files from tests that fails",
- dest="store_unexpected_output", action="store_false")
- result.add_option("--stress-only",
- help="Only run tests with --always-opt --stress-opt",
- default=False, action="store_true")
- result.add_option("--nostress",
- help="Don't run crankshaft --always-opt --stress-op test",
- default=False, action="store_true")
- result.add_option("--crankshaft",
- help="Run with the --crankshaft flag",
- default=False, action="store_true")
- result.add_option("--shard-count",
- help="Split testsuites into this number of shards",
- default=1, type="int")
- result.add_option("--shard-run",
- help="Run this shard from the split up tests.",
- default=1, type="int")
- result.add_option("--noprof", help="Disable profiling support",
- default=False)
- return result
-
-
-def ProcessOptions(options):
- global VERBOSE
- VERBOSE = options.verbose
- options.mode = options.mode.split(',')
- for mode in options.mode:
- if not mode in ['debug', 'release']:
- print "Unknown mode %s" % mode
- return False
- if options.simulator != 'none':
- # Simulator argument was set. Make sure arch and simulator agree.
- if options.simulator != options.arch:
- if options.arch == 'none':
- options.arch = options.simulator
- else:
- print "Architecture %s does not match sim %s" %(options.arch, options.simulator)
- return False
- # Ensure that the simulator argument is handed down to scons.
- options.scons_flags.append("simulator=" + options.simulator)
- else:
- # If options.arch is not set by the command line and no simulator setting
- # was found, set the arch to the guess.
- if options.arch == 'none':
- options.arch = ARCH_GUESS
- options.scons_flags.append("arch=" + options.arch)
- if options.snapshot:
- options.scons_flags.append("snapshot=on")
- global VARIANT_FLAGS
- if options.stress_only:
- VARIANT_FLAGS = [['--stress-opt', '--always-opt']]
- if options.nostress:
- VARIANT_FLAGS = [[],['--nocrankshaft']]
- if options.crankshaft:
- if options.special_command:
- options.special_command += " --crankshaft"
- else:
- options.special_command = "@--crankshaft"
- if options.noprof:
- options.scons_flags.append("prof=off")
- options.scons_flags.append("profilingsupport=off")
- return True
-
-
-REPORT_TEMPLATE = """\
-Total: %(total)i tests
- * %(skipped)4d tests will be skipped
- * %(nocrash)4d tests are expected to be flaky but not crash
- * %(pass)4d tests are expected to pass
- * %(fail_ok)4d tests are expected to fail that we won't fix
- * %(fail)4d tests are expected to fail that we should fix\
-"""
-
-def PrintReport(cases):
- def IsFlaky(o):
- return (PASS in o) and (FAIL in o) and (not CRASH in o) and (not OKAY in o)
- def IsFailOk(o):
- return (len(o) == 2) and (FAIL in o) and (OKAY in o)
- unskipped = [c for c in cases if not SKIP in c.outcomes]
- print REPORT_TEMPLATE % {
- 'total': len(cases),
- 'skipped': len(cases) - len(unskipped),
- 'nocrash': len([t for t in unskipped if IsFlaky(t.outcomes)]),
- 'pass': len([t for t in unskipped if list(t.outcomes) == [PASS]]),
- 'fail_ok': len([t for t in unskipped if IsFailOk(t.outcomes)]),
- 'fail': len([t for t in unskipped if list(t.outcomes) == [FAIL]])
- }
-
-
-class Pattern(object):
-
- def __init__(self, pattern):
- self.pattern = pattern
- self.compiled = None
-
- def match(self, str):
- if not self.compiled:
- pattern = "^" + self.pattern.replace('*', '.*') + "$"
- self.compiled = re.compile(pattern)
- return self.compiled.match(str)
-
- def __str__(self):
- return self.pattern
-
-
-def SplitPath(s):
- stripped = [ c.strip() for c in s.split('/') ]
- return [ Pattern(s) for s in stripped if len(s) > 0 ]
-
-
-def GetSpecialCommandProcessor(value):
- if (not value) or (value.find('@') == -1):
- def ExpandCommand(args):
- return args
- return ExpandCommand
- else:
- pos = value.find('@')
- import urllib
- prefix = urllib.unquote(value[:pos]).split()
- suffix = urllib.unquote(value[pos+1:]).split()
- def ExpandCommand(args):
- return prefix + args + suffix
- return ExpandCommand
-
-
-BUILT_IN_TESTS = ['mjsunit', 'cctest', 'message', 'preparser']
-
-
-def GetSuites(test_root):
- def IsSuite(path):
- return isdir(path) and exists(join(path, 'testcfg.py'))
- return [ f for f in os.listdir(test_root) if IsSuite(join(test_root, f)) ]
-
-
-def FormatTime(d):
- millis = round(d * 1000) % 1000
- return time.strftime("%M:%S.", time.gmtime(d)) + ("%03i" % millis)
-
-def ShardTests(tests, options):
- if options.shard_count < 2:
- return tests
- if options.shard_run < 1 or options.shard_run > options.shard_count:
- print "shard-run not a valid number, should be in [1:shard-count]"
- print "defaulting back to running all tests"
- return tests
- count = 0;
- shard = []
- for test in tests:
- if count % options.shard_count == options.shard_run - 1:
- shard.append(test);
- count += 1
- return shard
-
-def Main():
- parser = BuildOptions()
- (options, args) = parser.parse_args()
- if not ProcessOptions(options):
- parser.print_help()
- return 1
-
- workspace = abspath(join(dirname(sys.argv[0]), '..'))
- suites = GetSuites(join(workspace, 'test'))
- repositories = [TestRepository(join(workspace, 'test', name)) for name in suites]
- repositories += [TestRepository(a) for a in options.suite]
-
- root = LiteralTestSuite(repositories)
- if len(args) == 0:
- paths = [SplitPath(t) for t in BUILT_IN_TESTS]
- else:
- paths = [ ]
- for arg in args:
- path = SplitPath(arg)
- paths.append(path)
-
- # Check for --valgrind option. If enabled, we overwrite the special
- # command flag with a command that uses the run-valgrind.py script.
- if options.valgrind:
- run_valgrind = join(workspace, "tools", "run-valgrind.py")
- options.special_command = "python -u " + run_valgrind + " @"
-
- shell = abspath(options.shell)
- buildspace = dirname(shell)
-
- context = Context(workspace, buildspace, VERBOSE,
- shell,
- options.timeout,
- GetSpecialCommandProcessor(options.special_command),
- options.suppress_dialogs,
- options.store_unexpected_output)
- # First build the required targets
- if not options.no_build:
- reqs = [ ]
- for path in paths:
- reqs += root.GetBuildRequirements(path, context)
- reqs = list(set(reqs))
- if len(reqs) > 0:
- if options.j != 1:
- options.scons_flags += ['-j', str(options.j)]
- if not BuildRequirements(context, reqs, options.mode, options.scons_flags):
- return 1
-
- # Just return if we are only building the targets for running the tests.
- if options.build_only:
- return 0
-
- # Get status for tests
- sections = [ ]
- defs = { }
- root.GetTestStatus(context, sections, defs)
- config = Configuration(sections, defs)
-
- # List the tests
- all_cases = [ ]
- all_unused = [ ]
- unclassified_tests = [ ]
- globally_unused_rules = None
- for path in paths:
- for mode in options.mode:
- env = {
- 'mode': mode,
- 'system': utils.GuessOS(),
- 'arch': options.arch,
- 'simulator': options.simulator,
- 'crankshaft': options.crankshaft
- }
- test_list = root.ListTests([], path, context, mode, [])
- unclassified_tests += test_list
- (cases, unused_rules, all_outcomes) = config.ClassifyTests(test_list, env)
- if globally_unused_rules is None:
- globally_unused_rules = set(unused_rules)
- else:
- globally_unused_rules = globally_unused_rules.intersection(unused_rules)
- all_cases += ShardTests(cases, options)
- all_unused.append(unused_rules)
-
- if options.cat:
- visited = set()
- for test in unclassified_tests:
- key = tuple(test.path)
- if key in visited:
- continue
- visited.add(key)
- print "--- begin source: %s ---" % test.GetLabel()
- source = test.GetSource().strip()
- print source
- print "--- end source: %s ---" % test.GetLabel()
- return 0
-
- if options.warn_unused:
- for rule in globally_unused_rules:
- print "Rule for '%s' was not used." % '/'.join([str(s) for s in rule.path])
-
- if options.report:
- PrintReport(all_cases)
-
- result = None
- def DoSkip(case):
- return SKIP in case.outcomes or SLOW in case.outcomes
- cases_to_run = [ c for c in all_cases if not DoSkip(c) ]
- if not options.isolates:
- cases_to_run = [c for c in cases_to_run if not c.TestsIsolates()]
- if len(cases_to_run) == 0:
- print "No tests to run."
- return 0
- else:
- try:
- start = time.time()
- if RunTestCases(cases_to_run, options.progress, options.j):
- result = 0
- else:
- result = 1
- duration = time.time() - start
- except KeyboardInterrupt:
- print "Interrupted"
- return 1
-
- if options.time:
- # Write the times to stderr to make it easy to separate from the
- # test output.
- print
- sys.stderr.write("--- Total time: %s ---\n" % FormatTime(duration))
- timed_tests = [ t.case for t in cases_to_run if not t.case.duration is None ]
- timed_tests.sort(lambda a, b: a.CompareTime(b))
- index = 1
- for entry in timed_tests[:20]:
- t = FormatTime(entry.duration)
- sys.stderr.write("%4i (%s) %s\n" % (index, t, entry.GetLabel()))
- index += 1
-
- return result
-
-
-if __name__ == '__main__':
- sys.exit(Main())
diff --git a/src/3rdparty/v8/tools/tickprocessor-driver.js b/src/3rdparty/v8/tools/tickprocessor-driver.js
deleted file mode 100644
index 4201e43..0000000
--- a/src/3rdparty/v8/tools/tickprocessor-driver.js
+++ /dev/null
@@ -1,59 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-// Tick Processor's code flow.
-
-function processArguments(args) {
- var processor = new ArgumentsProcessor(args);
- if (processor.parse()) {
- return processor.result();
- } else {
- processor.printUsageAndExit();
- }
-}
-
-var entriesProviders = {
- 'unix': UnixCppEntriesProvider,
- 'windows': WindowsCppEntriesProvider,
- 'mac': MacCppEntriesProvider
-};
-
-var params = processArguments(arguments);
-var snapshotLogProcessor;
-if (params.snapshotLogFileName) {
- snapshotLogProcessor = new SnapshotLogProcessor();
- snapshotLogProcessor.processLogFile(params.snapshotLogFileName);
-}
-var tickProcessor = new TickProcessor(
- new (entriesProviders[params.platform])(params.nm),
- params.separateIc,
- params.ignoreUnknown,
- params.stateFilter,
- snapshotLogProcessor);
-tickProcessor.processLogFile(params.logFileName);
-tickProcessor.printStatistics();
diff --git a/src/3rdparty/v8/tools/tickprocessor.js b/src/3rdparty/v8/tools/tickprocessor.js
deleted file mode 100644
index 9d6bfb6..0000000
--- a/src/3rdparty/v8/tools/tickprocessor.js
+++ /dev/null
@@ -1,877 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-function inherits(childCtor, parentCtor) {
- childCtor.prototype.__proto__ = parentCtor.prototype;
-};
-
-
-function V8Profile(separateIc) {
- Profile.call(this);
- if (!separateIc) {
- this.skipThisFunction = function(name) { return V8Profile.IC_RE.test(name); };
- }
-};
-inherits(V8Profile, Profile);
-
-
-V8Profile.IC_RE =
- /^(?:CallIC|LoadIC|StoreIC)|(?:Builtin: (?:Keyed)?(?:Call|Load|Store)IC_)/;
-
-
-/**
- * A thin wrapper around shell's 'read' function showing a file name on error.
- */
-function readFile(fileName) {
- try {
- return read(fileName);
- } catch (e) {
- print(fileName + ': ' + (e.message || e));
- throw e;
- }
-}
-
-
-/**
- * Parser for dynamic code optimization state.
- */
-function parseState(s) {
- switch (s) {
- case "": return Profile.CodeState.COMPILED;
- case "~": return Profile.CodeState.OPTIMIZABLE;
- case "*": return Profile.CodeState.OPTIMIZED;
- }
- throw new Error("unknown code state: " + s);
-}
-
-
-function SnapshotLogProcessor() {
- LogReader.call(this, {
- 'code-creation': {
- parsers: [null, parseInt, parseInt, null, 'var-args'],
- processor: this.processCodeCreation },
- 'code-move': { parsers: [parseInt, parseInt],
- processor: this.processCodeMove },
- 'code-delete': { parsers: [parseInt],
- processor: this.processCodeDelete },
- 'function-creation': null,
- 'function-move': null,
- 'function-delete': null,
- 'sfi-move': null,
- 'snapshot-pos': { parsers: [parseInt, parseInt],
- processor: this.processSnapshotPosition }});
-
- V8Profile.prototype.handleUnknownCode = function(operation, addr) {
- var op = Profile.Operation;
- switch (operation) {
- case op.MOVE:
- print('Snapshot: Code move event for unknown code: 0x' +
- addr.toString(16));
- break;
- case op.DELETE:
- print('Snapshot: Code delete event for unknown code: 0x' +
- addr.toString(16));
- break;
- }
- };
-
- this.profile_ = new V8Profile();
- this.serializedEntries_ = [];
-}
-inherits(SnapshotLogProcessor, LogReader);
-
-
-SnapshotLogProcessor.prototype.processCodeCreation = function(
- type, start, size, name, maybe_func) {
- if (maybe_func.length) {
- var funcAddr = parseInt(maybe_func[0]);
- var state = parseState(maybe_func[1]);
- this.profile_.addFuncCode(type, name, start, size, funcAddr, state);
- } else {
- this.profile_.addCode(type, name, start, size);
- }
-};
-
-
-SnapshotLogProcessor.prototype.processCodeMove = function(from, to) {
- this.profile_.moveCode(from, to);
-};
-
-
-SnapshotLogProcessor.prototype.processCodeDelete = function(start) {
- this.profile_.deleteCode(start);
-};
-
-
-SnapshotLogProcessor.prototype.processSnapshotPosition = function(addr, pos) {
- this.serializedEntries_[pos] = this.profile_.findEntry(addr);
-};
-
-
-SnapshotLogProcessor.prototype.processLogFile = function(fileName) {
- var contents = readFile(fileName);
- this.processLogChunk(contents);
-};
-
-
-SnapshotLogProcessor.prototype.getSerializedEntryName = function(pos) {
- var entry = this.serializedEntries_[pos];
- return entry ? entry.getRawName() : null;
-};
-
-
-function TickProcessor(
- cppEntriesProvider, separateIc, ignoreUnknown, stateFilter, snapshotLogProcessor) {
- LogReader.call(this, {
- 'shared-library': { parsers: [null, parseInt, parseInt],
- processor: this.processSharedLibrary },
- 'code-creation': {
- parsers: [null, parseInt, parseInt, null, 'var-args'],
- processor: this.processCodeCreation },
- 'code-move': { parsers: [parseInt, parseInt],
- processor: this.processCodeMove },
- 'code-delete': { parsers: [parseInt],
- processor: this.processCodeDelete },
- 'sfi-move': { parsers: [parseInt, parseInt],
- processor: this.processFunctionMove },
- 'snapshot-pos': { parsers: [parseInt, parseInt],
- processor: this.processSnapshotPosition },
- 'tick': {
- parsers: [parseInt, parseInt, parseInt,
- parseInt, parseInt, 'var-args'],
- processor: this.processTick },
- 'heap-sample-begin': { parsers: [null, null, parseInt],
- processor: this.processHeapSampleBegin },
- 'heap-sample-end': { parsers: [null, null],
- processor: this.processHeapSampleEnd },
- 'heap-js-prod-item': { parsers: [null, 'var-args'],
- processor: this.processJSProducer },
- // Ignored events.
- 'profiler': null,
- 'function-creation': null,
- 'function-move': null,
- 'function-delete': null,
- 'heap-sample-stats': null,
- 'heap-sample-item': null,
- 'heap-js-cons-item': null,
- 'heap-js-ret-item': null,
- // Obsolete row types.
- 'code-allocate': null,
- 'begin-code-region': null,
- 'end-code-region': null });
-
- this.cppEntriesProvider_ = cppEntriesProvider;
- this.ignoreUnknown_ = ignoreUnknown;
- this.stateFilter_ = stateFilter;
- this.snapshotLogProcessor_ = snapshotLogProcessor;
- this.deserializedEntriesNames_ = [];
- var ticks = this.ticks_ =
- { total: 0, unaccounted: 0, excluded: 0, gc: 0 };
-
- V8Profile.prototype.handleUnknownCode = function(
- operation, addr, opt_stackPos) {
- var op = Profile.Operation;
- switch (operation) {
- case op.MOVE:
- print('Code move event for unknown code: 0x' + addr.toString(16));
- break;
- case op.DELETE:
- print('Code delete event for unknown code: 0x' + addr.toString(16));
- break;
- case op.TICK:
- // Only unknown PCs (the first frame) are reported as unaccounted,
- // otherwise tick balance will be corrupted (this behavior is compatible
- // with the original tickprocessor.py script.)
- if (opt_stackPos == 0) {
- ticks.unaccounted++;
- }
- break;
- }
- };
-
- this.profile_ = new V8Profile(separateIc);
- this.codeTypes_ = {};
- // Count each tick as a time unit.
- this.viewBuilder_ = new ViewBuilder(1);
- this.lastLogFileName_ = null;
-
- this.generation_ = 1;
- this.currentProducerProfile_ = null;
-};
-inherits(TickProcessor, LogReader);
-
-
-TickProcessor.VmStates = {
- JS: 0,
- GC: 1,
- COMPILER: 2,
- OTHER: 3,
- EXTERNAL: 4
-};
-
-
-TickProcessor.CodeTypes = {
- CPP: 0,
- SHARED_LIB: 1
-};
-// Otherwise, this is JS-related code. We are not adding it to
-// codeTypes_ map because there can be zillions of them.
-
-
-TickProcessor.CALL_PROFILE_CUTOFF_PCT = 2.0;
-
-
-/**
- * @override
- */
-TickProcessor.prototype.printError = function(str) {
- print(str);
-};
-
-
-TickProcessor.prototype.setCodeType = function(name, type) {
- this.codeTypes_[name] = TickProcessor.CodeTypes[type];
-};
-
-
-TickProcessor.prototype.isSharedLibrary = function(name) {
- return this.codeTypes_[name] == TickProcessor.CodeTypes.SHARED_LIB;
-};
-
-
-TickProcessor.prototype.isCppCode = function(name) {
- return this.codeTypes_[name] == TickProcessor.CodeTypes.CPP;
-};
-
-
-TickProcessor.prototype.isJsCode = function(name) {
- return !(name in this.codeTypes_);
-};
-
-
-TickProcessor.prototype.processLogFile = function(fileName) {
- this.lastLogFileName_ = fileName;
- var line;
- while (line = readline()) {
- this.processLogLine(line);
- }
-};
-
-
-TickProcessor.prototype.processLogFileInTest = function(fileName) {
- // Hack file name to avoid dealing with platform specifics.
- this.lastLogFileName_ = 'v8.log';
- var contents = readFile(fileName);
- this.processLogChunk(contents);
-};
-
-
-TickProcessor.prototype.processSharedLibrary = function(
- name, startAddr, endAddr) {
- var entry = this.profile_.addLibrary(name, startAddr, endAddr);
- this.setCodeType(entry.getName(), 'SHARED_LIB');
-
- var self = this;
- var libFuncs = this.cppEntriesProvider_.parseVmSymbols(
- name, startAddr, endAddr, function(fName, fStart, fEnd) {
- self.profile_.addStaticCode(fName, fStart, fEnd);
- self.setCodeType(fName, 'CPP');
- });
-};
-
-
-TickProcessor.prototype.processCodeCreation = function(
- type, start, size, name, maybe_func) {
- name = this.deserializedEntriesNames_[start] || name;
- if (maybe_func.length) {
- var funcAddr = parseInt(maybe_func[0]);
- var state = parseState(maybe_func[1]);
- this.profile_.addFuncCode(type, name, start, size, funcAddr, state);
- } else {
- this.profile_.addCode(type, name, start, size);
- }
-};
-
-
-TickProcessor.prototype.processCodeMove = function(from, to) {
- this.profile_.moveCode(from, to);
-};
-
-
-TickProcessor.prototype.processCodeDelete = function(start) {
- this.profile_.deleteCode(start);
-};
-
-
-TickProcessor.prototype.processFunctionMove = function(from, to) {
- this.profile_.moveFunc(from, to);
-};
-
-
-TickProcessor.prototype.processSnapshotPosition = function(addr, pos) {
- if (this.snapshotLogProcessor_) {
- this.deserializedEntriesNames_[addr] =
- this.snapshotLogProcessor_.getSerializedEntryName(pos);
- }
-};
-
-
-TickProcessor.prototype.includeTick = function(vmState) {
- return this.stateFilter_ == null || this.stateFilter_ == vmState;
-};
-
-TickProcessor.prototype.processTick = function(pc,
- sp,
- is_external_callback,
- tos_or_external_callback,
- vmState,
- stack) {
- this.ticks_.total++;
- if (vmState == TickProcessor.VmStates.GC) this.ticks_.gc++;
- if (!this.includeTick(vmState)) {
- this.ticks_.excluded++;
- return;
- }
- if (is_external_callback) {
- // Don't use PC when in external callback code, as it can point
- // inside callback's code, and we will erroneously report
- // that a callback calls itself. Instead we use tos_or_external_callback,
- // as simply resetting PC will produce unaccounted ticks.
- pc = tos_or_external_callback;
- tos_or_external_callback = 0;
- } else if (tos_or_external_callback) {
- // Find out, if top of stack was pointing inside a JS function
- // meaning that we have encountered a frameless invocation.
- var funcEntry = this.profile_.findEntry(tos_or_external_callback);
- if (!funcEntry || !funcEntry.isJSFunction || !funcEntry.isJSFunction()) {
- tos_or_external_callback = 0;
- }
- }
-
- this.profile_.recordTick(this.processStack(pc, tos_or_external_callback, stack));
-};
-
-
-TickProcessor.prototype.processHeapSampleBegin = function(space, state, ticks) {
- if (space != 'Heap') return;
- this.currentProducerProfile_ = new CallTree();
-};
-
-
-TickProcessor.prototype.processHeapSampleEnd = function(space, state) {
- if (space != 'Heap' || !this.currentProducerProfile_) return;
-
- print('Generation ' + this.generation_ + ':');
- var tree = this.currentProducerProfile_;
- tree.computeTotalWeights();
- var producersView = this.viewBuilder_.buildView(tree);
- // Sort by total time, desc, then by name, desc.
- producersView.sort(function(rec1, rec2) {
- return rec2.totalTime - rec1.totalTime ||
- (rec2.internalFuncName < rec1.internalFuncName ? -1 : 1); });
- this.printHeavyProfile(producersView.head.children);
-
- this.currentProducerProfile_ = null;
- this.generation_++;
-};
-
-
-TickProcessor.prototype.processJSProducer = function(constructor, stack) {
- if (!this.currentProducerProfile_) return;
- if (stack.length == 0) return;
- var first = stack.shift();
- var processedStack =
- this.profile_.resolveAndFilterFuncs_(this.processStack(first, 0, stack));
- processedStack.unshift(constructor);
- this.currentProducerProfile_.addPath(processedStack);
-};
-
-
-TickProcessor.prototype.printStatistics = function() {
- print('Statistical profiling result from ' + this.lastLogFileName_ +
- ', (' + this.ticks_.total +
- ' ticks, ' + this.ticks_.unaccounted + ' unaccounted, ' +
- this.ticks_.excluded + ' excluded).');
-
- if (this.ticks_.total == 0) return;
-
- // Print the unknown ticks percentage if they are not ignored.
- if (!this.ignoreUnknown_ && this.ticks_.unaccounted > 0) {
- this.printHeader('Unknown');
- this.printCounter(this.ticks_.unaccounted, this.ticks_.total);
- }
-
- var flatProfile = this.profile_.getFlatProfile();
- var flatView = this.viewBuilder_.buildView(flatProfile);
- // Sort by self time, desc, then by name, desc.
- flatView.sort(function(rec1, rec2) {
- return rec2.selfTime - rec1.selfTime ||
- (rec2.internalFuncName < rec1.internalFuncName ? -1 : 1); });
- var totalTicks = this.ticks_.total;
- if (this.ignoreUnknown_) {
- totalTicks -= this.ticks_.unaccounted;
- }
- // Our total time contains all the ticks encountered,
- // while profile only knows about the filtered ticks.
- flatView.head.totalTime = totalTicks;
-
- // Count library ticks
- var flatViewNodes = flatView.head.children;
- var self = this;
- var libraryTicks = 0;
- this.processProfile(flatViewNodes,
- function(name) { return self.isSharedLibrary(name); },
- function(rec) { libraryTicks += rec.selfTime; });
- var nonLibraryTicks = totalTicks - libraryTicks;
-
- this.printHeader('Shared libraries');
- this.printEntries(flatViewNodes, null,
- function(name) { return self.isSharedLibrary(name); });
-
- this.printHeader('JavaScript');
- this.printEntries(flatViewNodes, nonLibraryTicks,
- function(name) { return self.isJsCode(name); });
-
- this.printHeader('C++');
- this.printEntries(flatViewNodes, nonLibraryTicks,
- function(name) { return self.isCppCode(name); });
-
- this.printHeader('GC');
- this.printCounter(this.ticks_.gc, totalTicks);
-
- this.printHeavyProfHeader();
- var heavyProfile = this.profile_.getBottomUpProfile();
- var heavyView = this.viewBuilder_.buildView(heavyProfile);
- // To show the same percentages as in the flat profile.
- heavyView.head.totalTime = totalTicks;
- // Sort by total time, desc, then by name, desc.
- heavyView.sort(function(rec1, rec2) {
- return rec2.totalTime - rec1.totalTime ||
- (rec2.internalFuncName < rec1.internalFuncName ? -1 : 1); });
- this.printHeavyProfile(heavyView.head.children);
-};
-
-
-function padLeft(s, len) {
- s = s.toString();
- if (s.length < len) {
- var padLength = len - s.length;
- if (!(padLength in padLeft)) {
- padLeft[padLength] = new Array(padLength + 1).join(' ');
- }
- s = padLeft[padLength] + s;
- }
- return s;
-};
-
-
-TickProcessor.prototype.printHeader = function(headerTitle) {
- print('\n [' + headerTitle + ']:');
- print(' ticks total nonlib name');
-};
-
-
-TickProcessor.prototype.printHeavyProfHeader = function() {
- print('\n [Bottom up (heavy) profile]:');
- print(' Note: percentage shows a share of a particular caller in the ' +
- 'total\n' +
- ' amount of its parent calls.');
- print(' Callers occupying less than ' +
- TickProcessor.CALL_PROFILE_CUTOFF_PCT.toFixed(1) +
- '% are not shown.\n');
- print(' ticks parent name');
-};
-
-
-TickProcessor.prototype.printCounter = function(ticksCount, totalTicksCount) {
- var pct = ticksCount * 100.0 / totalTicksCount;
- print(' ' + padLeft(ticksCount, 5) + ' ' + padLeft(pct.toFixed(1), 5) + '%');
-};
-
-
-TickProcessor.prototype.processProfile = function(
- profile, filterP, func) {
- for (var i = 0, n = profile.length; i < n; ++i) {
- var rec = profile[i];
- if (!filterP(rec.internalFuncName)) {
- continue;
- }
- func(rec);
- }
-};
-
-
-TickProcessor.prototype.printEntries = function(
- profile, nonLibTicks, filterP) {
- this.processProfile(profile, filterP, function (rec) {
- if (rec.selfTime == 0) return;
- var nonLibPct = nonLibTicks != null ?
- rec.selfTime * 100.0 / nonLibTicks : 0.0;
- print(' ' + padLeft(rec.selfTime, 5) + ' ' +
- padLeft(rec.selfPercent.toFixed(1), 5) + '% ' +
- padLeft(nonLibPct.toFixed(1), 5) + '% ' +
- rec.internalFuncName);
- });
-};
-
-
-TickProcessor.prototype.printHeavyProfile = function(profile, opt_indent) {
- var self = this;
- var indent = opt_indent || 0;
- var indentStr = padLeft('', indent);
- this.processProfile(profile, function() { return true; }, function (rec) {
- // Cut off too infrequent callers.
- if (rec.parentTotalPercent < TickProcessor.CALL_PROFILE_CUTOFF_PCT) return;
- print(' ' + padLeft(rec.totalTime, 5) + ' ' +
- padLeft(rec.parentTotalPercent.toFixed(1), 5) + '% ' +
- indentStr + rec.internalFuncName);
- // Limit backtrace depth.
- if (indent < 10) {
- self.printHeavyProfile(rec.children, indent + 2);
- }
- // Delimit top-level functions.
- if (indent == 0) {
- print('');
- }
- });
-};
-
-
-function CppEntriesProvider() {
-};
-
-
-CppEntriesProvider.prototype.parseVmSymbols = function(
- libName, libStart, libEnd, processorFunc) {
- this.loadSymbols(libName);
-
- var prevEntry;
-
- function addEntry(funcInfo) {
- // Several functions can be mapped onto the same address. To avoid
- // creating zero-sized entries, skip such duplicates.
- // Also double-check that function belongs to the library address space.
- if (prevEntry && !prevEntry.end &&
- prevEntry.start < funcInfo.start &&
- prevEntry.start >= libStart && funcInfo.start <= libEnd) {
- processorFunc(prevEntry.name, prevEntry.start, funcInfo.start);
- }
- if (funcInfo.end &&
- (!prevEntry || prevEntry.start != funcInfo.start) &&
- funcInfo.start >= libStart && funcInfo.end <= libEnd) {
- processorFunc(funcInfo.name, funcInfo.start, funcInfo.end);
- }
- prevEntry = funcInfo;
- }
-
- while (true) {
- var funcInfo = this.parseNextLine();
- if (funcInfo === null) {
- continue;
- } else if (funcInfo === false) {
- break;
- }
- if (funcInfo.start < libStart && funcInfo.start < libEnd - libStart) {
- funcInfo.start += libStart;
- }
- if (funcInfo.size) {
- funcInfo.end = funcInfo.start + funcInfo.size;
- }
- addEntry(funcInfo);
- }
- addEntry({name: '', start: libEnd});
-};
-
-
-CppEntriesProvider.prototype.loadSymbols = function(libName) {
-};
-
-
-CppEntriesProvider.prototype.parseNextLine = function() {
- return false;
-};
-
-
-function UnixCppEntriesProvider(nmExec) {
- this.symbols = [];
- this.parsePos = 0;
- this.nmExec = nmExec;
- this.FUNC_RE = /^([0-9a-fA-F]{8,16}) ([0-9a-fA-F]{8,16} )?[tTwW] (.*)$/;
-};
-inherits(UnixCppEntriesProvider, CppEntriesProvider);
-
-
-UnixCppEntriesProvider.prototype.loadSymbols = function(libName) {
- this.parsePos = 0;
- try {
- this.symbols = [
- os.system(this.nmExec, ['-C', '-n', '-S', libName], -1, -1),
- os.system(this.nmExec, ['-C', '-n', '-S', '-D', libName], -1, -1)
- ];
- } catch (e) {
- // If the library cannot be found on this system let's not panic.
- this.symbols = ['', ''];
- }
-};
-
-
-UnixCppEntriesProvider.prototype.parseNextLine = function() {
- if (this.symbols.length == 0) {
- return false;
- }
- var lineEndPos = this.symbols[0].indexOf('\n', this.parsePos);
- if (lineEndPos == -1) {
- this.symbols.shift();
- this.parsePos = 0;
- return this.parseNextLine();
- }
-
- var line = this.symbols[0].substring(this.parsePos, lineEndPos);
- this.parsePos = lineEndPos + 1;
- var fields = line.match(this.FUNC_RE);
- var funcInfo = null;
- if (fields) {
- funcInfo = { name: fields[3], start: parseInt(fields[1], 16) };
- if (fields[2]) {
- funcInfo.size = parseInt(fields[2], 16);
- }
- }
- return funcInfo;
-};
-
-
-function MacCppEntriesProvider(nmExec) {
- UnixCppEntriesProvider.call(this, nmExec);
- // Note an empty group. It is required, as UnixCppEntriesProvider expects 3 groups.
- this.FUNC_RE = /^([0-9a-fA-F]{8,16}) ()[iItT] (.*)$/;
-};
-inherits(MacCppEntriesProvider, UnixCppEntriesProvider);
-
-
-MacCppEntriesProvider.prototype.loadSymbols = function(libName) {
- this.parsePos = 0;
- try {
- this.symbols = [os.system(this.nmExec, ['-n', '-f', libName], -1, -1), ''];
- } catch (e) {
- // If the library cannot be found on this system let's not panic.
- this.symbols = '';
- }
-};
-
-
-function WindowsCppEntriesProvider() {
- this.symbols = '';
- this.parsePos = 0;
-};
-inherits(WindowsCppEntriesProvider, CppEntriesProvider);
-
-
-WindowsCppEntriesProvider.FILENAME_RE = /^(.*)\.([^.]+)$/;
-
-
-WindowsCppEntriesProvider.FUNC_RE =
- /^\s+0001:[0-9a-fA-F]{8}\s+([_\?@$0-9a-zA-Z]+)\s+([0-9a-fA-F]{8}).*$/;
-
-
-WindowsCppEntriesProvider.IMAGE_BASE_RE =
- /^\s+0000:00000000\s+___ImageBase\s+([0-9a-fA-F]{8}).*$/;
-
-
-// This is almost a constant on Windows.
-WindowsCppEntriesProvider.EXE_IMAGE_BASE = 0x00400000;
-
-
-WindowsCppEntriesProvider.prototype.loadSymbols = function(libName) {
- var fileNameFields = libName.match(WindowsCppEntriesProvider.FILENAME_RE);
- if (!fileNameFields) return;
- var mapFileName = fileNameFields[1] + '.map';
- this.moduleType_ = fileNameFields[2].toLowerCase();
- try {
- this.symbols = read(mapFileName);
- } catch (e) {
- // If .map file cannot be found let's not panic.
- this.symbols = '';
- }
-};
-
-
-WindowsCppEntriesProvider.prototype.parseNextLine = function() {
- var lineEndPos = this.symbols.indexOf('\r\n', this.parsePos);
- if (lineEndPos == -1) {
- return false;
- }
-
- var line = this.symbols.substring(this.parsePos, lineEndPos);
- this.parsePos = lineEndPos + 2;
-
- // Image base entry is above all other symbols, so we can just
- // terminate parsing.
- var imageBaseFields = line.match(WindowsCppEntriesProvider.IMAGE_BASE_RE);
- if (imageBaseFields) {
- var imageBase = parseInt(imageBaseFields[1], 16);
- if ((this.moduleType_ == 'exe') !=
- (imageBase == WindowsCppEntriesProvider.EXE_IMAGE_BASE)) {
- return false;
- }
- }
-
- var fields = line.match(WindowsCppEntriesProvider.FUNC_RE);
- return fields ?
- { name: this.unmangleName(fields[1]), start: parseInt(fields[2], 16) } :
- null;
-};
-
-
-/**
- * Performs very simple unmangling of C++ names.
- *
- * Does not handle arguments and template arguments. The mangled names have
- * the form:
- *
- * ?LookupInDescriptor@JSObject@internal@v8@@...arguments info...
- */
-WindowsCppEntriesProvider.prototype.unmangleName = function(name) {
- // Empty or non-mangled name.
- if (name.length < 1 || name.charAt(0) != '?') return name;
- var nameEndPos = name.indexOf('@@');
- var components = name.substring(1, nameEndPos).split('@');
- components.reverse();
- return components.join('::');
-};
-
-
-function ArgumentsProcessor(args) {
- this.args_ = args;
- this.result_ = ArgumentsProcessor.DEFAULTS;
-
- this.argsDispatch_ = {
- '-j': ['stateFilter', TickProcessor.VmStates.JS,
- 'Show only ticks from JS VM state'],
- '-g': ['stateFilter', TickProcessor.VmStates.GC,
- 'Show only ticks from GC VM state'],
- '-c': ['stateFilter', TickProcessor.VmStates.COMPILER,
- 'Show only ticks from COMPILER VM state'],
- '-o': ['stateFilter', TickProcessor.VmStates.OTHER,
- 'Show only ticks from OTHER VM state'],
- '-e': ['stateFilter', TickProcessor.VmStates.EXTERNAL,
- 'Show only ticks from EXTERNAL VM state'],
- '--ignore-unknown': ['ignoreUnknown', true,
- 'Exclude ticks of unknown code entries from processing'],
- '--separate-ic': ['separateIc', true,
- 'Separate IC entries'],
- '--unix': ['platform', 'unix',
- 'Specify that we are running on *nix platform'],
- '--windows': ['platform', 'windows',
- 'Specify that we are running on Windows platform'],
- '--mac': ['platform', 'mac',
- 'Specify that we are running on Mac OS X platform'],
- '--nm': ['nm', 'nm',
- 'Specify the \'nm\' executable to use (e.g. --nm=/my_dir/nm)'],
- '--snapshot-log': ['snapshotLogFileName', 'snapshot.log',
- 'Specify snapshot log file to use (e.g. --snapshot-log=snapshot.log)']
- };
- this.argsDispatch_['--js'] = this.argsDispatch_['-j'];
- this.argsDispatch_['--gc'] = this.argsDispatch_['-g'];
- this.argsDispatch_['--compiler'] = this.argsDispatch_['-c'];
- this.argsDispatch_['--other'] = this.argsDispatch_['-o'];
- this.argsDispatch_['--external'] = this.argsDispatch_['-e'];
-};
-
-
-ArgumentsProcessor.DEFAULTS = {
- logFileName: 'v8.log',
- snapshotLogFileName: null,
- platform: 'unix',
- stateFilter: null,
- ignoreUnknown: false,
- separateIc: false,
- nm: 'nm'
-};
-
-
-ArgumentsProcessor.prototype.parse = function() {
- while (this.args_.length) {
- var arg = this.args_[0];
- if (arg.charAt(0) != '-') {
- break;
- }
- this.args_.shift();
- var userValue = null;
- var eqPos = arg.indexOf('=');
- if (eqPos != -1) {
- userValue = arg.substr(eqPos + 1);
- arg = arg.substr(0, eqPos);
- }
- if (arg in this.argsDispatch_) {
- var dispatch = this.argsDispatch_[arg];
- this.result_[dispatch[0]] = userValue == null ? dispatch[1] : userValue;
- } else {
- return false;
- }
- }
-
- if (this.args_.length >= 1) {
- this.result_.logFileName = this.args_.shift();
- }
- return true;
-};
-
-
-ArgumentsProcessor.prototype.result = function() {
- return this.result_;
-};
-
-
-ArgumentsProcessor.prototype.printUsageAndExit = function() {
-
- function padRight(s, len) {
- s = s.toString();
- if (s.length < len) {
- s = s + (new Array(len - s.length + 1).join(' '));
- }
- return s;
- }
-
- print('Cmdline args: [options] [log-file-name]\n' +
- 'Default log file name is "' +
- ArgumentsProcessor.DEFAULTS.logFileName + '".\n');
- print('Options:');
- for (var arg in this.argsDispatch_) {
- var synonims = [arg];
- var dispatch = this.argsDispatch_[arg];
- for (var synArg in this.argsDispatch_) {
- if (arg !== synArg && dispatch === this.argsDispatch_[synArg]) {
- synonims.push(synArg);
- delete this.argsDispatch_[synArg];
- }
- }
- print(' ' + padRight(synonims.join(', '), 20) + dispatch[2]);
- }
- quit(2);
-};
-
diff --git a/src/3rdparty/v8/tools/utils.py b/src/3rdparty/v8/tools/utils.py
deleted file mode 100644
index fb94d14..0000000
--- a/src/3rdparty/v8/tools/utils.py
+++ /dev/null
@@ -1,96 +0,0 @@
-# Copyright 2008 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-import platform
-import re
-
-
-# Reads a .list file into an array of strings
-def ReadLinesFrom(name):
- list = []
- for line in open(name):
- if '#' in line:
- line = line[:line.find('#')]
- line = line.strip()
- if len(line) == 0:
- continue
- list.append(line)
- return list
-
-
-def GuessOS():
- id = platform.system()
- if id == 'Linux':
- return 'linux'
- elif id == 'Darwin':
- return 'macos'
- elif id.find('CYGWIN') >= 0:
- return 'cygwin'
- elif id == 'Windows' or id == 'Microsoft':
- # On Windows Vista platform.system() can return 'Microsoft' with some
- # versions of Python, see http://bugs.python.org/issue1082
- return 'win32'
- elif id == 'FreeBSD':
- return 'freebsd'
- elif id == 'OpenBSD':
- return 'openbsd'
- elif id == 'SunOS':
- return 'solaris'
- else:
- return None
-
-
-# This will default to building the 32 bit VM even on machines that are capable
-# of running the 64 bit VM. Use the scons option --arch=x64 to force it to build
-# the 64 bit VM.
-def GuessArchitecture():
- id = platform.machine()
- id = id.lower() # Windows 7 capitalizes 'AMD64'.
- if id.startswith('arm'):
- return 'arm'
- elif (not id) or (not re.match('(x|i[3-6])86$', id) is None):
- return 'ia32'
- elif id == 'i86pc':
- return 'ia32'
- elif id == 'x86_64':
- return 'ia32'
- elif id == 'amd64':
- return 'ia32'
- else:
- return None
-
-
-def GuessWordsize():
- if '64' in platform.machine():
- return '64'
- else:
- return '32'
-
-
-def IsWindows():
- return GuessOS() == 'win32'
diff --git a/src/3rdparty/v8/tools/visual_studio/README.txt b/src/3rdparty/v8/tools/visual_studio/README.txt
deleted file mode 100644
index c46aa37..0000000
--- a/src/3rdparty/v8/tools/visual_studio/README.txt
+++ /dev/null
@@ -1,70 +0,0 @@
-This directory contains Microsoft Visual Studio project files for including v8
-in a Visual Studio/Visual C++ Express solution. All these project files have
-been created for use with Microsoft Visual Studio 2005. They can however also
-be used in both Visual Studio 2008 and Visual C++ 2008 Express Edition. When
-using the project files in the 2008 editions minor upgrades to the files will
-be performed by Visual Studio.
-
-v8_base.vcproj
---------------
-Base V8 library containing all the V8 code but no JavaScript library code.
-
-v8.vcproj
----------
-V8 library containing all the V8 and JavaScript library code embedded as source
-which is compiled as V8 is running.
-
-v8_mksnapshot.vcproj
---------------------
-Executable v8_mksnapshot.exe for building a heap snapshot from a running V8.
-
-v8_snapshot_cc.vcproj
----------------------
-Uses v8_mksnapshot.exe to generate snapshot.cc, which is used in
-v8_snapshot.vcproj.
-
-v8_snapshot.vcproj
-------------------
-V8 library containing all the V8 and JavaScript library code embedded as a heap
-snapshot instead of source to be compiled as V8 is running. Using this library
-provides significantly faster startup time than v8.vcproj.
-
-The property sheets common.vsprops, debug.vsprops and release.vsprops contains
-most of the configuration options and are inhireted by the project files
-described above. The location of the output directory used are defined in
-common.vsprops.
-
-With regard to Platform SDK version V8 has no specific requriments and builds
-with either what is supplied with Visual Studio 2005 or the latest Platform SDK
-from Microsoft.
-
-When adding these projects to a solution the following dependencies needs to be
-in place:
-
- v8.vcproj depends on v8_base.vcproj
- v8_mksnapshot.vcproj depends on v8.vcproj
- v8_snapshot_cc.vcproj depends on v8_mksnapshot.vcproj
- v8_snapshot.vcproj depends on v8_snapshot_cc.vcproj and v8_base.vcproj
-
-A project which uses V8 should then depend on v8_snapshot.vcproj.
-
-If V8 without snapshot if preferred only v8_base.vcproj and v8.vcproj are
-required and a project which uses V8 should depend on v8.vcproj.
-
-Two sample project files are available as well. These are v8_shell_sample.vcproj
-for building the sample in samples\shell.cc and v8_process_sample.vcproj for
-building the sample in samples\process.cc. Add either of these (or both) to a
-solution with v8_base, v8, v8_mksnapshot and v8_snapshot set up as described
-solution with v8_base, v8, v8_mksnapshot and v8_snapshot set up as described
-above and have them depend on v8_snapshot.
-
-Finally a sample Visual Studio solution file for is provided. This solution file
-includes the two sample projects together with the V8 projects and with the
-dependencies configured as described above.
-
-Python requirements
--------------------
-When using the Microsoft Visual Studio project files Python version 2.4 or later
-is required. Make sure that python.exe is on the path before running Visual
-Studio. The use of Python is in the command script js2c.cmd which is used in the
-Custom Build Step for v8natives.js in the v8.vcproj project.
diff --git a/src/3rdparty/v8/tools/visual_studio/arm.vsprops b/src/3rdparty/v8/tools/visual_studio/arm.vsprops
deleted file mode 100644
index 98d0f70..0000000
--- a/src/3rdparty/v8/tools/visual_studio/arm.vsprops
+++ /dev/null
@@ -1,14 +0,0 @@
-<?xml version="1.0" encoding="Windows-1252"?>
-<VisualStudioPropertySheet
- ProjectType="Visual C++"
- Version="8.00"
- OutputDirectory="$(SolutionDir)$(ConfigurationName)Arm"
- IntermediateDirectory="$(SolutionDir)$(ConfigurationName)Arm\obj\$(ProjectName)"
- Name="arm"
- >
- <Tool
- Name="VCCLCompilerTool"
- PreprocessorDefinitions="_USE_32BIT_TIME_T;V8_TARGET_ARCH_ARM"
- DisableSpecificWarnings="4996"
- />
-</VisualStudioPropertySheet>
diff --git a/src/3rdparty/v8/tools/visual_studio/common.vsprops b/src/3rdparty/v8/tools/visual_studio/common.vsprops
deleted file mode 100644
index fa78cdc..0000000
--- a/src/3rdparty/v8/tools/visual_studio/common.vsprops
+++ /dev/null
@@ -1,34 +0,0 @@
-<?xml version="1.0" encoding="Windows-1252"?>
-<VisualStudioPropertySheet
- ProjectType="Visual C++"
- Version="8.00"
- Name="essential"
- CharacterSet="1"
- >
- <Tool
- Name="VCCLCompilerTool"
- AdditionalIncludeDirectories="$(ProjectDir)\..\..\src;$(IntDir)\DerivedSources"
- PreprocessorDefinitions="WIN32;_CRT_SECURE_NO_DEPRECATE;_CRT_NONSTDC_NO_DEPRECATE;_HAS_EXCEPTIONS=0;ENABLE_VMSTATE_TRACKING;ENABLE_LOGGING_AND_PROFILING;ENABLE_DEBUGGER_SUPPORT"
- MinimalRebuild="false"
- ExceptionHandling="0"
- RuntimeTypeInfo="false"
- WarningLevel="3"
- WarnAsError="true"
- Detect64BitPortabilityProblems="false"
- DebugInformationFormat="3"
- DisableSpecificWarnings="4351;4355;4800"
- EnableFunctionLevelLinking="true"
- />
- <Tool
- Name="VCLibrarianTool"
- OutputFile="$(OutDir)\lib\$(ProjectName).lib"
- />
- <Tool
- Name="VCLinkerTool"
- GenerateDebugInformation="true"
- MapFileName="$(OutDir)\$(TargetName).map"
- ImportLibrary="$(OutDir)\lib\$(TargetName).lib"
- FixedBaseAddress="1"
- AdditionalOptions="/IGNORE:4221 /NXCOMPAT"
- />
-</VisualStudioPropertySheet>
diff --git a/src/3rdparty/v8/tools/visual_studio/d8js2c.cmd b/src/3rdparty/v8/tools/visual_studio/d8js2c.cmd
deleted file mode 100644
index df2b89c..0000000
--- a/src/3rdparty/v8/tools/visual_studio/d8js2c.cmd
+++ /dev/null
@@ -1,6 +0,0 @@
-@echo off
-set SOURCE_DIR=%1
-set TARGET_DIR=%2
-set PYTHON="..\..\..\third_party\python_24\python.exe"
-if not exist %PYTHON% set PYTHON=python.exe
-%PYTHON% ..\js2c.py %TARGET_DIR%\natives.cc %TARGET_DIR%\natives-empty.cc D8 %SOURCE_DIR%\macros.py %SOURCE_DIR%\d8.js
diff --git a/src/3rdparty/v8/tools/visual_studio/debug.vsprops b/src/3rdparty/v8/tools/visual_studio/debug.vsprops
deleted file mode 100644
index 60b79fe..0000000
--- a/src/3rdparty/v8/tools/visual_studio/debug.vsprops
+++ /dev/null
@@ -1,17 +0,0 @@
-<?xml version="1.0" encoding="Windows-1252"?>
-<VisualStudioPropertySheet
- ProjectType="Visual C++"
- Version="8.00"
- Name="debug"
- >
- <Tool
- Name="VCCLCompilerTool"
- Optimization="0"
- PreprocessorDefinitions="DEBUG;_DEBUG;ENABLE_DISASSEMBLER;V8_ENABLE_CHECKS,OBJECT_PRINT"
- RuntimeLibrary="1"
- />
- <Tool
- Name="VCLinkerTool"
- LinkIncremental="2"
- />
-</VisualStudioPropertySheet>
diff --git a/src/3rdparty/v8/tools/visual_studio/ia32.vsprops b/src/3rdparty/v8/tools/visual_studio/ia32.vsprops
deleted file mode 100644
index b574660..0000000
--- a/src/3rdparty/v8/tools/visual_studio/ia32.vsprops
+++ /dev/null
@@ -1,17 +0,0 @@
-<?xml version="1.0" encoding="Windows-1252"?>
-<VisualStudioPropertySheet
- ProjectType="Visual C++"
- Version="8.00"
- OutputDirectory="$(SolutionDir)$(ConfigurationName)"
- IntermediateDirectory="$(SolutionDir)$(ConfigurationName)\obj\$(ProjectName)"
- Name="ia32"
- >
- <Tool
- Name="VCCLCompilerTool"
- PreprocessorDefinitions="_USE_32BIT_TIME_T;V8_TARGET_ARCH_IA32"
- />
- <Tool
- Name="VCLinkerTool"
- TargetMachine="1"
- />
-</VisualStudioPropertySheet>
diff --git a/src/3rdparty/v8/tools/visual_studio/js2c.cmd b/src/3rdparty/v8/tools/visual_studio/js2c.cmd
deleted file mode 100644
index 82722ff..0000000
--- a/src/3rdparty/v8/tools/visual_studio/js2c.cmd
+++ /dev/null
@@ -1,6 +0,0 @@
-@echo off
-set SOURCE_DIR=%1
-set TARGET_DIR=%2
-set PYTHON="..\..\..\third_party\python_24\python.exe"
-if not exist %PYTHON% set PYTHON=python.exe
-%PYTHON% ..\js2c.py %TARGET_DIR%\natives.cc %TARGET_DIR%\natives-empty.cc CORE %SOURCE_DIR%\macros.py %SOURCE_DIR%\runtime.js %SOURCE_DIR%\v8natives.js %SOURCE_DIR%\array.js %SOURCE_DIR%\string.js %SOURCE_DIR%\uri.js %SOURCE_DIR%\math.js %SOURCE_DIR%\messages.js %SOURCE_DIR%\apinatives.js %SOURCE_DIR%\debug-debugger.js %SOURCE_DIR%\liveedit-debugger.js %SOURCE_DIR%\mirror-debugger.js %SOURCE_DIR%\date.js %SOURCE_DIR%\regexp.js %SOURCE_DIR%\json.js
diff --git a/src/3rdparty/v8/tools/visual_studio/release.vsprops b/src/3rdparty/v8/tools/visual_studio/release.vsprops
deleted file mode 100644
index d7b26bc..0000000
--- a/src/3rdparty/v8/tools/visual_studio/release.vsprops
+++ /dev/null
@@ -1,24 +0,0 @@
-<?xml version="1.0" encoding="Windows-1252"?>
-<VisualStudioPropertySheet
- ProjectType="Visual C++"
- Version="8.00"
- Name="release"
- >
- <Tool
- Name="VCCLCompilerTool"
- RuntimeLibrary="0"
- Optimization="2"
- InlineFunctionExpansion="2"
- EnableIntrinsicFunctions="true"
- FavorSizeOrSpeed="0"
- OmitFramePointers="true"
- StringPooling="true"
- />
- <Tool
- Name="VCLinkerTool"
- LinkIncremental="1"
- OptimizeReferences="2"
- OptimizeForWindows98="1"
- EnableCOMDATFolding="2"
- />
-</VisualStudioPropertySheet>
diff --git a/src/3rdparty/v8/tools/visual_studio/x64.vsprops b/src/3rdparty/v8/tools/visual_studio/x64.vsprops
deleted file mode 100644
index 04d9c65..0000000
--- a/src/3rdparty/v8/tools/visual_studio/x64.vsprops
+++ /dev/null
@@ -1,18 +0,0 @@
-<?xml version="1.0" encoding="Windows-1252"?>
-<VisualStudioPropertySheet
- ProjectType="Visual C++"
- Version="8.00"
- OutputDirectory="$(SolutionDir)$(ConfigurationName)64"
- IntermediateDirectory="$(SolutionDir)$(ConfigurationName)64\obj\$(ProjectName)"
- Name="x64"
- >
- <Tool
- Name="VCCLCompilerTool"
- PreprocessorDefinitions="V8_TARGET_ARCH_X64"
- />
- <Tool
- Name="VCLinkerTool"
- StackReserveSize="2091752"
- TargetMachine="17"
- />
-</VisualStudioPropertySheet>
diff --git a/src/3rdparty/v8/tools/windows-tick-processor.bat b/src/3rdparty/v8/tools/windows-tick-processor.bat
deleted file mode 100755
index d67f047..0000000
--- a/src/3rdparty/v8/tools/windows-tick-processor.bat
+++ /dev/null
@@ -1,30 +0,0 @@
-@echo off
-
-SET tools_dir=%~dp0
-IF 1%D8_PATH% == 1 (SET D8_PATH=%tools_dir%..)
-
-SET log_file=v8.log
-
-rem find the name of the log file to process, it must not start with a dash.
-rem we prepend cmdline args with a number (in fact, any letter or number)
-rem to cope with empty arguments.
-SET arg1=1%1
-IF NOT %arg1:~0,2% == 1 (IF NOT %arg1:~0,2% == 1- SET log_file=%1)
-SET arg2=2%2
-IF NOT %arg2:~0,2% == 2 (IF NOT %arg2:~0,2% == 2- SET log_file=%2)
-SET arg3=3%3
-IF NOT %arg3:~0,2% == 3 (IF NOT %arg3:~0,2% == 3- SET log_file=%3)
-SET arg4=4%4
-IF NOT %arg4:~0,2% == 4 (IF NOT %arg4:~0,2% == 4- SET log_file=%4)
-SET arg5=5%5
-IF NOT %arg5:~0,2% == 5 (IF NOT %arg5:~0,2% == 5- SET log_file=%5)
-SET arg6=6%6
-IF NOT %arg6:~0,2% == 6 (IF NOT %arg6:~0,2% == 6- SET log_file=%6)
-SET arg7=7%7
-IF NOT %arg7:~0,2% == 7 (IF NOT %arg7:~0,2% == 7- SET log_file=%7)
-SET arg8=8%8
-IF NOT %arg8:~0,2% == 8 (IF NOT %arg8:~0,2% == 8- SET log_file=%8)
-SET arg9=9%9
-IF NOT %arg9:~0,2% == 9 (IF NOT %arg9:~0,2% == 9- SET log_file=%9)
-
-type %log_file% | %D8_PATH%\d8 %tools_dir%splaytree.js %tools_dir%codemap.js %tools_dir%csvparser.js %tools_dir%consarray.js %tools_dir%profile.js %tools_dir%profile_view.js %tools_dir%logreader.js %tools_dir%tickprocessor.js %tools_dir%tickprocessor-driver.js -- --windows %*